From a6a8d9f87eb8510a8f53672ea87703f62185d75f Mon Sep 17 00:00:00 2001
From: Chandra Seetharaman <sekharan@us.ibm.com>
Date: Thu, 1 May 2008 14:49:46 -0700
Subject: [SCSI] scsi_dh: add infrastructure for SCSI Device Handlers

Some of the storage devices (that can be accessed through multiple paths),
do need some special handling for
	1. Activating the passive path of the storage access.
	2. Decode and handle the special sense codes returned by the devices.
	3. Handle the I/Os being sent to the passive path, especially
           during the device probe time.
when accessed through multiple paths.

As of today this special device handling is done at the dm-multipath
layer using dm-handlers. That works well for (1); for (2) to be handled
at dm layer, scsi sense information need to be exported from SCSI to dm-layer,
which is not very attractive; (3) cannot be done at all at the dm layer.

Device handler has been moved to SCSI mainly to handle (2) and (3) properly.

Signed-off-by: Chandra Seetharaman <sekharan@us.ibm.com>
Signed-off-by: Mike Anderson <andmike@linux.vnet.ibm.com>
Signed-off-by: Mike Christie <michaelc@cs.wisc.edu>
Signed-off-by: James Bottomley <James.Bottomley@HansenPartnership.com>
---
 drivers/scsi/Kconfig                  |   2 +
 drivers/scsi/Makefile                 |   1 +
 drivers/scsi/device_handler/Kconfig   |  12 +++
 drivers/scsi/device_handler/Makefile  |   4 +
 drivers/scsi/device_handler/scsi_dh.c | 162 ++++++++++++++++++++++++++++++++++
 drivers/scsi/scsi_error.c             |  11 +++
 drivers/scsi/scsi_lib.c               |   8 ++
 drivers/scsi/scsi_sysfs.c             |   1 +
 8 files changed, 201 insertions(+)
 create mode 100644 drivers/scsi/device_handler/Kconfig
 create mode 100644 drivers/scsi/device_handler/Makefile
 create mode 100644 drivers/scsi/device_handler/scsi_dh.c

(limited to 'drivers/scsi')

diff --git a/drivers/scsi/Kconfig b/drivers/scsi/Kconfig
index 81ccbd7f9e34..7886ec6e5f87 100644
--- a/drivers/scsi/Kconfig
+++ b/drivers/scsi/Kconfig
@@ -1771,4 +1771,6 @@ endif # SCSI_LOWLEVEL
 
 source "drivers/scsi/pcmcia/Kconfig"
 
+source "drivers/scsi/device_handler/Kconfig"
+
 endmenu
diff --git a/drivers/scsi/Makefile b/drivers/scsi/Makefile
index 6c775e350c98..aa8272e188ea 100644
--- a/drivers/scsi/Makefile
+++ b/drivers/scsi/Makefile
@@ -34,6 +34,7 @@ obj-$(CONFIG_SCSI_ISCSI_ATTRS)	+= scsi_transport_iscsi.o
 obj-$(CONFIG_SCSI_SAS_ATTRS)	+= scsi_transport_sas.o
 obj-$(CONFIG_SCSI_SAS_LIBSAS)	+= libsas/
 obj-$(CONFIG_SCSI_SRP_ATTRS)	+= scsi_transport_srp.o
+obj-$(CONFIG_SCSI_DH)		+= device_handler/
 
 obj-$(CONFIG_ISCSI_TCP) 	+= libiscsi.o	iscsi_tcp.o
 obj-$(CONFIG_INFINIBAND_ISER) 	+= libiscsi.o
diff --git a/drivers/scsi/device_handler/Kconfig b/drivers/scsi/device_handler/Kconfig
new file mode 100644
index 000000000000..228241d1d98c
--- /dev/null
+++ b/drivers/scsi/device_handler/Kconfig
@@ -0,0 +1,12 @@
+#
+# SCSI Device Handler configuration
+#
+
+menuconfig SCSI_DH
+	tristate "SCSI Device Handlers"
+	depends on SCSI
+	default n
+	help
+	  SCSI Device Handlers provide device specific support for
+	  devices utilized in multipath configurations. Say Y here to
+	  select support for specific hardware.
diff --git a/drivers/scsi/device_handler/Makefile b/drivers/scsi/device_handler/Makefile
new file mode 100644
index 000000000000..f306e44e3837
--- /dev/null
+++ b/drivers/scsi/device_handler/Makefile
@@ -0,0 +1,4 @@
+#
+# SCSI Device Handler
+#
+obj-$(CONFIG_SCSI_DH)		+= scsi_dh.o
diff --git a/drivers/scsi/device_handler/scsi_dh.c b/drivers/scsi/device_handler/scsi_dh.c
new file mode 100644
index 000000000000..ab6c21cd9689
--- /dev/null
+++ b/drivers/scsi/device_handler/scsi_dh.c
@@ -0,0 +1,162 @@
+/*
+ * SCSI device handler infrastruture.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, write to the Free Software Foundation, Inc.,
+ * 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ *
+ * Copyright IBM Corporation, 2007
+ *      Authors:
+ *               Chandra Seetharaman <sekharan@us.ibm.com>
+ *               Mike Anderson <andmike@linux.vnet.ibm.com>
+ */
+
+#include <scsi/scsi_dh.h>
+#include "../scsi_priv.h"
+
+static DEFINE_SPINLOCK(list_lock);
+static LIST_HEAD(scsi_dh_list);
+
+static struct scsi_device_handler *get_device_handler(const char *name)
+{
+	struct scsi_device_handler *tmp, *found = NULL;
+
+	spin_lock(&list_lock);
+	list_for_each_entry(tmp, &scsi_dh_list, list) {
+		if (!strcmp(tmp->name, name)) {
+			found = tmp;
+			break;
+		}
+	}
+	spin_unlock(&list_lock);
+	return found;
+}
+
+static int scsi_dh_notifier_add(struct device *dev, void *data)
+{
+	struct scsi_device_handler *scsi_dh = data;
+
+	scsi_dh->nb.notifier_call(&scsi_dh->nb, BUS_NOTIFY_ADD_DEVICE, dev);
+	return 0;
+}
+
+/*
+ * scsi_register_device_handler - register a device handler personality
+ *      module.
+ * @scsi_dh - device handler to be registered.
+ *
+ * Returns 0 on success, -EBUSY if handler already registered.
+ */
+int scsi_register_device_handler(struct scsi_device_handler *scsi_dh)
+{
+	int ret = -EBUSY;
+	struct scsi_device_handler *tmp;
+
+	tmp = get_device_handler(scsi_dh->name);
+	if (tmp)
+		goto done;
+
+	ret = bus_register_notifier(&scsi_bus_type, &scsi_dh->nb);
+
+	bus_for_each_dev(&scsi_bus_type, NULL, scsi_dh, scsi_dh_notifier_add);
+	spin_lock(&list_lock);
+	list_add(&scsi_dh->list, &scsi_dh_list);
+	spin_unlock(&list_lock);
+
+done:
+	return ret;
+}
+EXPORT_SYMBOL_GPL(scsi_register_device_handler);
+
+static int scsi_dh_notifier_remove(struct device *dev, void *data)
+{
+	struct scsi_device_handler *scsi_dh = data;
+
+	scsi_dh->nb.notifier_call(&scsi_dh->nb, BUS_NOTIFY_DEL_DEVICE, dev);
+	return 0;
+}
+
+/*
+ * scsi_unregister_device_handler - register a device handler personality
+ *      module.
+ * @scsi_dh - device handler to be unregistered.
+ *
+ * Returns 0 on success, -ENODEV if handler not registered.
+ */
+int scsi_unregister_device_handler(struct scsi_device_handler *scsi_dh)
+{
+	int ret = -ENODEV;
+	struct scsi_device_handler *tmp;
+
+	tmp = get_device_handler(scsi_dh->name);
+	if (!tmp)
+		goto done;
+
+	ret = bus_unregister_notifier(&scsi_bus_type, &scsi_dh->nb);
+
+	bus_for_each_dev(&scsi_bus_type, NULL, scsi_dh,
+					scsi_dh_notifier_remove);
+	spin_lock(&list_lock);
+	list_del(&scsi_dh->list);
+	spin_unlock(&list_lock);
+
+done:
+	return ret;
+}
+EXPORT_SYMBOL_GPL(scsi_unregister_device_handler);
+
+/*
+ * scsi_dh_activate - activate the path associated with the scsi_device
+ *      corresponding to the given request queue.
+ * @q - Request queue that is associated with the scsi_device to be
+ *      activated.
+ */
+int scsi_dh_activate(struct request_queue *q)
+{
+	int err = 0;
+	unsigned long flags;
+	struct scsi_device *sdev;
+	struct scsi_device_handler *scsi_dh = NULL;
+
+	spin_lock_irqsave(q->queue_lock, flags);
+	sdev = q->queuedata;
+	if (sdev && sdev->scsi_dh_data)
+		scsi_dh = sdev->scsi_dh_data->scsi_dh;
+	if (!scsi_dh || !get_device(&sdev->sdev_gendev))
+		err = SCSI_DH_NOSYS;
+	spin_unlock_irqrestore(q->queue_lock, flags);
+
+	if (err)
+		return err;
+
+	if (scsi_dh->activate)
+		err = scsi_dh->activate(sdev);
+	put_device(&sdev->sdev_gendev);
+	return err;
+}
+EXPORT_SYMBOL_GPL(scsi_dh_activate);
+
+/*
+ * scsi_dh_handler_exist - Return TRUE(1) if a device handler exists for
+ *	the given name. FALSE(0) otherwise.
+ * @name - name of the device handler.
+ */
+int scsi_dh_handler_exist(const char *name)
+{
+	return (get_device_handler(name) != NULL);
+}
+EXPORT_SYMBOL_GPL(scsi_dh_handler_exist);
+
+MODULE_DESCRIPTION("SCSI device handler");
+MODULE_AUTHOR("Chandra Seetharaman <sekharan@us.ibm.com>");
+MODULE_LICENSE("GPL");
diff --git a/drivers/scsi/scsi_error.c b/drivers/scsi/scsi_error.c
index eaf5a8add1ba..006a95916f72 100644
--- a/drivers/scsi/scsi_error.c
+++ b/drivers/scsi/scsi_error.c
@@ -298,6 +298,7 @@ static inline void scsi_eh_prt_fail_stats(struct Scsi_Host *shost,
  */
 static int scsi_check_sense(struct scsi_cmnd *scmd)
 {
+	struct scsi_device *sdev = scmd->device;
 	struct scsi_sense_hdr sshdr;
 
 	if (! scsi_command_normalize_sense(scmd, &sshdr))
@@ -306,6 +307,16 @@ static int scsi_check_sense(struct scsi_cmnd *scmd)
 	if (scsi_sense_is_deferred(&sshdr))
 		return NEEDS_RETRY;
 
+	if (sdev->scsi_dh_data && sdev->scsi_dh_data->scsi_dh &&
+			sdev->scsi_dh_data->scsi_dh->check_sense) {
+		int rc;
+
+		rc = sdev->scsi_dh_data->scsi_dh->check_sense(sdev, &sshdr);
+		if (rc != SCSI_RETURN_NOT_HANDLED)
+			return rc;
+		/* handler does not care. Drop down to default handling */
+	}
+
 	/*
 	 * Previous logic looked for FILEMARK, EOM or ILI which are
 	 * mainly associated with tapes and returned SUCCESS.
diff --git a/drivers/scsi/scsi_lib.c b/drivers/scsi/scsi_lib.c
index a82d2fe80fb5..033c58a65f50 100644
--- a/drivers/scsi/scsi_lib.c
+++ b/drivers/scsi/scsi_lib.c
@@ -1160,6 +1160,14 @@ int scsi_setup_fs_cmnd(struct scsi_device *sdev, struct request *req)
 
 	if (ret != BLKPREP_OK)
 		return ret;
+
+	if (unlikely(sdev->scsi_dh_data && sdev->scsi_dh_data->scsi_dh
+			 && sdev->scsi_dh_data->scsi_dh->prep_fn)) {
+		ret = sdev->scsi_dh_data->scsi_dh->prep_fn(sdev, req);
+		if (ret != BLKPREP_OK)
+			return ret;
+	}
+
 	/*
 	 * Filesystem requests must transfer data.
 	 */
diff --git a/drivers/scsi/scsi_sysfs.c b/drivers/scsi/scsi_sysfs.c
index 93d2b6714453..b6e561059779 100644
--- a/drivers/scsi/scsi_sysfs.c
+++ b/drivers/scsi/scsi_sysfs.c
@@ -439,6 +439,7 @@ struct bus_type scsi_bus_type = {
 	.resume		= scsi_bus_resume,
 	.remove		= scsi_bus_remove,
 };
+EXPORT_SYMBOL_GPL(scsi_bus_type);
 
 int scsi_sysfs_register(void)
 {
-- 
cgit v1.2.3


From fbd7ab3eb53a3b88fefa7873139a62e439860155 Mon Sep 17 00:00:00 2001
From: Chandra Seetharaman <sekharan@us.ibm.com>
Date: Thu, 1 May 2008 14:49:52 -0700
Subject: [SCSI] scsi_dh: add lsi rdac device handler

This patch provides the device handler to support the LSI RDAC SCSI
based storage devices.

Signed-off-by: Chandra Seetharaman <sekharan@us.ibm.com>
Signed-off-by: James Bottomley <James.Bottomley@HansenPartnership.com>
---
 drivers/scsi/device_handler/Kconfig        |   6 +
 drivers/scsi/device_handler/Makefile       |   1 +
 drivers/scsi/device_handler/scsi_dh_rdac.c | 691 +++++++++++++++++++++++++++++
 3 files changed, 698 insertions(+)
 create mode 100644 drivers/scsi/device_handler/scsi_dh_rdac.c

(limited to 'drivers/scsi')

diff --git a/drivers/scsi/device_handler/Kconfig b/drivers/scsi/device_handler/Kconfig
index 228241d1d98c..404f37732627 100644
--- a/drivers/scsi/device_handler/Kconfig
+++ b/drivers/scsi/device_handler/Kconfig
@@ -10,3 +10,9 @@ menuconfig SCSI_DH
 	  SCSI Device Handlers provide device specific support for
 	  devices utilized in multipath configurations. Say Y here to
 	  select support for specific hardware.
+
+config SCSI_DH_RDAC
+	tristate "LSI RDAC Device Handler"
+	depends on SCSI_DH
+	help
+	If you have a LSI RDAC select y. Otherwise, say N.
diff --git a/drivers/scsi/device_handler/Makefile b/drivers/scsi/device_handler/Makefile
index f306e44e3837..1a42b3498318 100644
--- a/drivers/scsi/device_handler/Makefile
+++ b/drivers/scsi/device_handler/Makefile
@@ -2,3 +2,4 @@
 # SCSI Device Handler
 #
 obj-$(CONFIG_SCSI_DH)		+= scsi_dh.o
+obj-$(CONFIG_SCSI_DH_RDAC)	+= scsi_dh_rdac.o
diff --git a/drivers/scsi/device_handler/scsi_dh_rdac.c b/drivers/scsi/device_handler/scsi_dh_rdac.c
new file mode 100644
index 000000000000..6fff077a888d
--- /dev/null
+++ b/drivers/scsi/device_handler/scsi_dh_rdac.c
@@ -0,0 +1,691 @@
+/*
+ * Engenio/LSI RDAC SCSI Device Handler
+ *
+ * Copyright (C) 2005 Mike Christie. All rights reserved.
+ * Copyright (C) Chandra Seetharaman, IBM Corp. 2007
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ *
+ */
+#include <scsi/scsi.h>
+#include <scsi/scsi_eh.h>
+#include <scsi/scsi_dh.h>
+
+#define RDAC_NAME "rdac"
+
+/*
+ * LSI mode page stuff
+ *
+ * These struct definitions and the forming of the
+ * mode page were taken from the LSI RDAC 2.4 GPL'd
+ * driver, and then converted to Linux conventions.
+ */
+#define RDAC_QUIESCENCE_TIME 20;
+/*
+ * Page Codes
+ */
+#define RDAC_PAGE_CODE_REDUNDANT_CONTROLLER 0x2c
+
+/*
+ * Controller modes definitions
+ */
+#define RDAC_MODE_TRANSFER_SPECIFIED_LUNS	0x02
+
+/*
+ * RDAC Options field
+ */
+#define RDAC_FORCED_QUIESENCE 0x02
+
+#define RDAC_TIMEOUT	(60 * HZ)
+#define RDAC_RETRIES	3
+
+struct rdac_mode_6_hdr {
+	u8	data_len;
+	u8	medium_type;
+	u8	device_params;
+	u8	block_desc_len;
+};
+
+struct rdac_mode_10_hdr {
+	u16	data_len;
+	u8	medium_type;
+	u8	device_params;
+	u16	reserved;
+	u16	block_desc_len;
+};
+
+struct rdac_mode_common {
+	u8	controller_serial[16];
+	u8	alt_controller_serial[16];
+	u8	rdac_mode[2];
+	u8	alt_rdac_mode[2];
+	u8	quiescence_timeout;
+	u8	rdac_options;
+};
+
+struct rdac_pg_legacy {
+	struct rdac_mode_6_hdr hdr;
+	u8	page_code;
+	u8	page_len;
+	struct rdac_mode_common common;
+#define MODE6_MAX_LUN	32
+	u8	lun_table[MODE6_MAX_LUN];
+	u8	reserved2[32];
+	u8	reserved3;
+	u8	reserved4;
+};
+
+struct rdac_pg_expanded {
+	struct rdac_mode_10_hdr hdr;
+	u8	page_code;
+	u8	subpage_code;
+	u8	page_len[2];
+	struct rdac_mode_common common;
+	u8	lun_table[256];
+	u8	reserved3;
+	u8	reserved4;
+};
+
+struct c9_inquiry {
+	u8	peripheral_info;
+	u8	page_code;	/* 0xC9 */
+	u8	reserved1;
+	u8	page_len;
+	u8	page_id[4];	/* "vace" */
+	u8	avte_cvp;
+	u8	path_prio;
+	u8	reserved2[38];
+};
+
+#define SUBSYS_ID_LEN	16
+#define SLOT_ID_LEN	2
+
+struct c4_inquiry {
+	u8	peripheral_info;
+	u8	page_code;	/* 0xC4 */
+	u8	reserved1;
+	u8	page_len;
+	u8	page_id[4];	/* "subs" */
+	u8	subsys_id[SUBSYS_ID_LEN];
+	u8	revision[4];
+	u8	slot_id[SLOT_ID_LEN];
+	u8	reserved[2];
+};
+
+struct rdac_controller {
+	u8			subsys_id[SUBSYS_ID_LEN];
+	u8			slot_id[SLOT_ID_LEN];
+	int			use_ms10;
+	struct kref		kref;
+	struct list_head	node; /* list of all controllers */
+	union			{
+		struct rdac_pg_legacy legacy;
+		struct rdac_pg_expanded expanded;
+	} mode_select;
+};
+struct c8_inquiry {
+	u8	peripheral_info;
+	u8	page_code; /* 0xC8 */
+	u8	reserved1;
+	u8	page_len;
+	u8	page_id[4]; /* "edid" */
+	u8	reserved2[3];
+	u8	vol_uniq_id_len;
+	u8	vol_uniq_id[16];
+	u8	vol_user_label_len;
+	u8	vol_user_label[60];
+	u8	array_uniq_id_len;
+	u8	array_unique_id[16];
+	u8	array_user_label_len;
+	u8	array_user_label[60];
+	u8	lun[8];
+};
+
+struct c2_inquiry {
+	u8	peripheral_info;
+	u8	page_code;	/* 0xC2 */
+	u8	reserved1;
+	u8	page_len;
+	u8	page_id[4];	/* "swr4" */
+	u8	sw_version[3];
+	u8	sw_date[3];
+	u8	features_enabled;
+	u8	max_lun_supported;
+	u8	partitions[239]; /* Total allocation length should be 0xFF */
+};
+
+struct rdac_dh_data {
+	struct rdac_controller	*ctlr;
+#define UNINITIALIZED_LUN	(1 << 8)
+	unsigned		lun;
+#define RDAC_STATE_ACTIVE	0
+#define RDAC_STATE_PASSIVE	1
+	unsigned char		state;
+	unsigned char		sense[SCSI_SENSE_BUFFERSIZE];
+	union			{
+		struct c2_inquiry c2;
+		struct c4_inquiry c4;
+		struct c8_inquiry c8;
+		struct c9_inquiry c9;
+	} inq;
+};
+
+static LIST_HEAD(ctlr_list);
+static DEFINE_SPINLOCK(list_lock);
+
+static inline struct rdac_dh_data *get_rdac_data(struct scsi_device *sdev)
+{
+	struct scsi_dh_data *scsi_dh_data = sdev->scsi_dh_data;
+	BUG_ON(scsi_dh_data == NULL);
+	return ((struct rdac_dh_data *) scsi_dh_data->buf);
+}
+
+static struct request *get_rdac_req(struct scsi_device *sdev,
+			void *buffer, unsigned buflen, int rw)
+{
+	struct request *rq;
+	struct request_queue *q = sdev->request_queue;
+	struct rdac_dh_data *h = get_rdac_data(sdev);
+
+	rq = blk_get_request(q, rw, GFP_KERNEL);
+
+	if (!rq) {
+		sdev_printk(KERN_INFO, sdev,
+				"get_rdac_req: blk_get_request failed.\n");
+		return NULL;
+	}
+
+	if (buflen && blk_rq_map_kern(q, rq, buffer, buflen, GFP_KERNEL)) {
+		blk_put_request(rq);
+		sdev_printk(KERN_INFO, sdev,
+				"get_rdac_req: blk_rq_map_kern failed.\n");
+		return NULL;
+	}
+
+	memset(&rq->cmd, 0, BLK_MAX_CDB);
+	rq->sense = h->sense;
+	memset(rq->sense, 0, SCSI_SENSE_BUFFERSIZE);
+	rq->sense_len = 0;
+
+	rq->cmd_type = REQ_TYPE_BLOCK_PC;
+	rq->cmd_flags |= REQ_FAILFAST | REQ_NOMERGE;
+	rq->retries = RDAC_RETRIES;
+	rq->timeout = RDAC_TIMEOUT;
+
+	return rq;
+}
+
+static struct request *rdac_failover_get(struct scsi_device *sdev)
+{
+	struct request *rq;
+	struct rdac_mode_common *common;
+	unsigned data_size;
+	struct rdac_dh_data *h = get_rdac_data(sdev);
+
+	if (h->ctlr->use_ms10) {
+		struct rdac_pg_expanded *rdac_pg;
+
+		data_size = sizeof(struct rdac_pg_expanded);
+		rdac_pg = &h->ctlr->mode_select.expanded;
+		memset(rdac_pg, 0, data_size);
+		common = &rdac_pg->common;
+		rdac_pg->page_code = RDAC_PAGE_CODE_REDUNDANT_CONTROLLER + 0x40;
+		rdac_pg->subpage_code = 0x1;
+		rdac_pg->page_len[0] = 0x01;
+		rdac_pg->page_len[1] = 0x28;
+		rdac_pg->lun_table[h->lun] = 0x81;
+	} else {
+		struct rdac_pg_legacy *rdac_pg;
+
+		data_size = sizeof(struct rdac_pg_legacy);
+		rdac_pg = &h->ctlr->mode_select.legacy;
+		memset(rdac_pg, 0, data_size);
+		common = &rdac_pg->common;
+		rdac_pg->page_code = RDAC_PAGE_CODE_REDUNDANT_CONTROLLER;
+		rdac_pg->page_len = 0x68;
+		rdac_pg->lun_table[h->lun] = 0x81;
+	}
+	common->rdac_mode[1] = RDAC_MODE_TRANSFER_SPECIFIED_LUNS;
+	common->quiescence_timeout = RDAC_QUIESCENCE_TIME;
+	common->rdac_options = RDAC_FORCED_QUIESENCE;
+
+	/* get request for block layer packet command */
+	rq = get_rdac_req(sdev, &h->ctlr->mode_select, data_size, WRITE);
+	if (!rq)
+		return NULL;
+
+	/* Prepare the command. */
+	if (h->ctlr->use_ms10) {
+		rq->cmd[0] = MODE_SELECT_10;
+		rq->cmd[7] = data_size >> 8;
+		rq->cmd[8] = data_size & 0xff;
+	} else {
+		rq->cmd[0] = MODE_SELECT;
+		rq->cmd[4] = data_size;
+	}
+	rq->cmd_len = COMMAND_SIZE(rq->cmd[0]);
+
+	return rq;
+}
+
+static void release_controller(struct kref *kref)
+{
+	struct rdac_controller *ctlr;
+	ctlr = container_of(kref, struct rdac_controller, kref);
+
+	spin_lock(&list_lock);
+	list_del(&ctlr->node);
+	spin_unlock(&list_lock);
+	kfree(ctlr);
+}
+
+static struct rdac_controller *get_controller(u8 *subsys_id, u8 *slot_id)
+{
+	struct rdac_controller *ctlr, *tmp;
+
+	spin_lock(&list_lock);
+
+	list_for_each_entry(tmp, &ctlr_list, node) {
+		if ((memcmp(tmp->subsys_id, subsys_id, SUBSYS_ID_LEN) == 0) &&
+			  (memcmp(tmp->slot_id, slot_id, SLOT_ID_LEN) == 0)) {
+			kref_get(&tmp->kref);
+			spin_unlock(&list_lock);
+			return tmp;
+		}
+	}
+	ctlr = kmalloc(sizeof(*ctlr), GFP_ATOMIC);
+	if (!ctlr)
+		goto done;
+
+	/* initialize fields of controller */
+	memcpy(ctlr->subsys_id, subsys_id, SUBSYS_ID_LEN);
+	memcpy(ctlr->slot_id, slot_id, SLOT_ID_LEN);
+	kref_init(&ctlr->kref);
+	ctlr->use_ms10 = -1;
+	list_add(&ctlr->node, &ctlr_list);
+done:
+	spin_unlock(&list_lock);
+	return ctlr;
+}
+
+static int submit_inquiry(struct scsi_device *sdev, int page_code,
+		unsigned int len)
+{
+	struct request *rq;
+	struct request_queue *q = sdev->request_queue;
+	struct rdac_dh_data *h = get_rdac_data(sdev);
+	int err = SCSI_DH_RES_TEMP_UNAVAIL;
+
+	rq = get_rdac_req(sdev, &h->inq, len, READ);
+	if (!rq)
+		goto done;
+
+	/* Prepare the command. */
+	rq->cmd[0] = INQUIRY;
+	rq->cmd[1] = 1;
+	rq->cmd[2] = page_code;
+	rq->cmd[4] = len;
+	rq->cmd_len = COMMAND_SIZE(INQUIRY);
+	err = blk_execute_rq(q, NULL, rq, 1);
+	if (err == -EIO)
+		err = SCSI_DH_IO;
+done:
+	return err;
+}
+
+static int get_lun(struct scsi_device *sdev)
+{
+	int err;
+	struct c8_inquiry *inqp;
+	struct rdac_dh_data *h = get_rdac_data(sdev);
+
+	err = submit_inquiry(sdev, 0xC8, sizeof(struct c8_inquiry));
+	if (err == SCSI_DH_OK) {
+		inqp = &h->inq.c8;
+		h->lun = inqp->lun[7]; /* currently it uses only one byte */
+	}
+	return err;
+}
+
+#define RDAC_OWNED	0
+#define RDAC_UNOWNED	1
+#define RDAC_FAILED	2
+static int check_ownership(struct scsi_device *sdev)
+{
+	int err;
+	struct c9_inquiry *inqp;
+	struct rdac_dh_data *h = get_rdac_data(sdev);
+
+	err = submit_inquiry(sdev, 0xC9, sizeof(struct c9_inquiry));
+	if (err == SCSI_DH_OK) {
+		err = RDAC_UNOWNED;
+		inqp = &h->inq.c9;
+		/*
+		 * If in AVT mode or if the path already owns the LUN,
+		 * return RDAC_OWNED;
+		 */
+		if (((inqp->avte_cvp >> 7) == 0x1) ||
+				 ((inqp->avte_cvp & 0x1) != 0))
+			err = RDAC_OWNED;
+	} else
+		err = RDAC_FAILED;
+	return err;
+}
+
+static int initialize_controller(struct scsi_device *sdev)
+{
+	int err;
+	struct c4_inquiry *inqp;
+	struct rdac_dh_data *h = get_rdac_data(sdev);
+
+	err = submit_inquiry(sdev, 0xC4, sizeof(struct c4_inquiry));
+	if (err == SCSI_DH_OK) {
+		inqp = &h->inq.c4;
+		h->ctlr = get_controller(inqp->subsys_id, inqp->slot_id);
+		if (!h->ctlr)
+			err = SCSI_DH_RES_TEMP_UNAVAIL;
+	}
+	return err;
+}
+
+static int set_mode_select(struct scsi_device *sdev)
+{
+	int err;
+	struct c2_inquiry *inqp;
+	struct rdac_dh_data *h = get_rdac_data(sdev);
+
+	err = submit_inquiry(sdev, 0xC2, sizeof(struct c2_inquiry));
+	if (err == SCSI_DH_OK) {
+		inqp = &h->inq.c2;
+		/*
+		 * If more than MODE6_MAX_LUN luns are supported, use
+		 * mode select 10
+		 */
+		if (inqp->max_lun_supported >= MODE6_MAX_LUN)
+			h->ctlr->use_ms10 = 1;
+		else
+			h->ctlr->use_ms10 = 0;
+	}
+	return err;
+}
+
+static int mode_select_handle_sense(struct scsi_device *sdev)
+{
+	struct scsi_sense_hdr sense_hdr;
+	struct rdac_dh_data *h = get_rdac_data(sdev);
+	int sense, err = SCSI_DH_IO, ret;
+
+	ret = scsi_normalize_sense(h->sense, SCSI_SENSE_BUFFERSIZE, &sense_hdr);
+	if (!ret)
+		goto done;
+
+	err = SCSI_DH_OK;
+	sense = (sense_hdr.sense_key << 16) | (sense_hdr.asc << 8) |
+			sense_hdr.ascq;
+	/* If it is retryable failure, submit the c9 inquiry again */
+	if (sense == 0x59136 || sense == 0x68b02 || sense == 0xb8b02 ||
+			    sense == 0x62900) {
+		/* 0x59136    - Command lock contention
+		 * 0x[6b]8b02 - Quiesense in progress or achieved
+		 * 0x62900    - Power On, Reset, or Bus Device Reset
+		 */
+		err = SCSI_DH_RETRY;
+	}
+
+	if (sense)
+		sdev_printk(KERN_INFO, sdev,
+			"MODE_SELECT failed with sense 0x%x.\n", sense);
+done:
+	return err;
+}
+
+static int send_mode_select(struct scsi_device *sdev)
+{
+	struct request *rq;
+	struct request_queue *q = sdev->request_queue;
+	struct rdac_dh_data *h = get_rdac_data(sdev);
+	int err = SCSI_DH_RES_TEMP_UNAVAIL;
+
+	rq = rdac_failover_get(sdev);
+	if (!rq)
+		goto done;
+
+	sdev_printk(KERN_INFO, sdev, "queueing MODE_SELECT command.\n");
+
+	err = blk_execute_rq(q, NULL, rq, 1);
+	if (err != SCSI_DH_OK)
+		err = mode_select_handle_sense(sdev);
+	if (err == SCSI_DH_OK)
+		h->state = RDAC_STATE_ACTIVE;
+done:
+	return err;
+}
+
+static int rdac_activate(struct scsi_device *sdev)
+{
+	struct rdac_dh_data *h = get_rdac_data(sdev);
+	int err = SCSI_DH_OK;
+
+	if (h->lun == UNINITIALIZED_LUN) {
+		err = get_lun(sdev);
+		if (err != SCSI_DH_OK)
+			goto done;
+	}
+
+	err = check_ownership(sdev);
+	switch (err) {
+	case RDAC_UNOWNED:
+		break;
+	case RDAC_OWNED:
+		err = SCSI_DH_OK;
+		goto done;
+	case RDAC_FAILED:
+	default:
+		err = SCSI_DH_IO;
+		goto done;
+	}
+
+	if (!h->ctlr) {
+		err = initialize_controller(sdev);
+		if (err != SCSI_DH_OK)
+			goto done;
+	}
+
+	if (h->ctlr->use_ms10 == -1) {
+		err = set_mode_select(sdev);
+		if (err != SCSI_DH_OK)
+			goto done;
+	}
+
+	err = send_mode_select(sdev);
+done:
+	return err;
+}
+
+static int rdac_prep_fn(struct scsi_device *sdev, struct request *req)
+{
+	struct rdac_dh_data *h = get_rdac_data(sdev);
+	int ret = BLKPREP_OK;
+
+	if (h->state != RDAC_STATE_ACTIVE) {
+		ret = BLKPREP_KILL;
+		req->cmd_flags |= REQ_QUIET;
+	}
+	return ret;
+
+}
+
+static int rdac_check_sense(struct scsi_device *sdev,
+				struct scsi_sense_hdr *sense_hdr)
+{
+	struct rdac_dh_data *h = get_rdac_data(sdev);
+	switch (sense_hdr->sense_key) {
+	case NOT_READY:
+		if (sense_hdr->asc == 0x04 && sense_hdr->ascq == 0x81)
+			/* LUN Not Ready - Storage firmware incompatible
+			 * Manual code synchonisation required.
+			 *
+			 * Nothing we can do here. Try to bypass the path.
+			 */
+			return SUCCESS;
+		if (sense_hdr->asc == 0x04 && sense_hdr->ascq == 0xA1)
+			/* LUN Not Ready - Quiescense in progress
+			 *
+			 * Just retry and wait.
+			 */
+			return NEEDS_RETRY;
+		break;
+	case ILLEGAL_REQUEST:
+		if (sense_hdr->asc == 0x94 && sense_hdr->ascq == 0x01) {
+			/* Invalid Request - Current Logical Unit Ownership.
+			 * Controller is not the current owner of the LUN,
+			 * Fail the path, so that the other path be used.
+			 */
+			h->state = RDAC_STATE_PASSIVE;
+			return SUCCESS;
+		}
+		break;
+	case UNIT_ATTENTION:
+		if (sense_hdr->asc == 0x29 && sense_hdr->ascq == 0x00)
+			/*
+			 * Power On, Reset, or Bus Device Reset, just retry.
+			 */
+			return NEEDS_RETRY;
+		break;
+	}
+	/* success just means we do not care what scsi-ml does */
+	return SCSI_RETURN_NOT_HANDLED;
+}
+
+static const struct {
+	char *vendor;
+	char *model;
+} rdac_dev_list[] = {
+	{"IBM", "1722"},
+	{"IBM", "1724"},
+	{"IBM", "1726"},
+	{"IBM", "1742"},
+	{"IBM", "1814"},
+	{"IBM", "1815"},
+	{"IBM", "1818"},
+	{"IBM", "3526"},
+	{"SGI", "TP9400"},
+	{"SGI", "TP9500"},
+	{"SGI", "IS"},
+	{"STK", "OPENstorage D280"},
+	{"SUN", "CSM200_R"},
+	{"SUN", "LCSM100_F"},
+	{NULL, NULL},
+};
+
+static int rdac_bus_notify(struct notifier_block *, unsigned long, void *);
+
+static struct scsi_device_handler rdac_dh = {
+	.name = RDAC_NAME,
+	.module = THIS_MODULE,
+	.nb.notifier_call = rdac_bus_notify,
+	.prep_fn = rdac_prep_fn,
+	.check_sense = rdac_check_sense,
+	.activate = rdac_activate,
+};
+
+/*
+ * TODO: need some interface so we can set trespass values
+ */
+static int rdac_bus_notify(struct notifier_block *nb,
+			    unsigned long action, void *data)
+{
+	struct device *dev = data;
+	struct scsi_device *sdev = to_scsi_device(dev);
+	struct scsi_dh_data *scsi_dh_data;
+	struct rdac_dh_data *h;
+	int i, found = 0;
+	unsigned long flags;
+
+	if (action == BUS_NOTIFY_ADD_DEVICE) {
+		for (i = 0; rdac_dev_list[i].vendor; i++) {
+			if (!strncmp(sdev->vendor, rdac_dev_list[i].vendor,
+				     strlen(rdac_dev_list[i].vendor)) &&
+			    !strncmp(sdev->model, rdac_dev_list[i].model,
+				     strlen(rdac_dev_list[i].model))) {
+				found = 1;
+				break;
+			}
+		}
+		if (!found)
+			goto out;
+
+		scsi_dh_data = kzalloc(sizeof(struct scsi_device_handler *)
+				+ sizeof(*h) , GFP_KERNEL);
+		if (!scsi_dh_data) {
+			sdev_printk(KERN_ERR, sdev, "Attach failed %s.\n",
+				    RDAC_NAME);
+			goto out;
+		}
+
+		scsi_dh_data->scsi_dh = &rdac_dh;
+		h = (struct rdac_dh_data *) scsi_dh_data->buf;
+		h->lun = UNINITIALIZED_LUN;
+		h->state = RDAC_STATE_ACTIVE;
+		spin_lock_irqsave(sdev->request_queue->queue_lock, flags);
+		sdev->scsi_dh_data = scsi_dh_data;
+		spin_unlock_irqrestore(sdev->request_queue->queue_lock, flags);
+		try_module_get(THIS_MODULE);
+
+		sdev_printk(KERN_NOTICE, sdev, "Attached %s.\n", RDAC_NAME);
+
+	} else if (action == BUS_NOTIFY_DEL_DEVICE) {
+		if (sdev->scsi_dh_data == NULL ||
+				sdev->scsi_dh_data->scsi_dh != &rdac_dh)
+			goto out;
+
+		spin_lock_irqsave(sdev->request_queue->queue_lock, flags);
+		scsi_dh_data = sdev->scsi_dh_data;
+		sdev->scsi_dh_data = NULL;
+		spin_unlock_irqrestore(sdev->request_queue->queue_lock, flags);
+
+		h = (struct rdac_dh_data *) scsi_dh_data->buf;
+		if (h->ctlr)
+			kref_put(&h->ctlr->kref, release_controller);
+		kfree(scsi_dh_data);
+		module_put(THIS_MODULE);
+		sdev_printk(KERN_NOTICE, sdev, "Dettached %s.\n", RDAC_NAME);
+	}
+
+out:
+	return 0;
+}
+
+static int __init rdac_init(void)
+{
+	int r;
+
+	r = scsi_register_device_handler(&rdac_dh);
+	if (r != 0)
+		printk(KERN_ERR "Failed to register scsi device handler.");
+	return r;
+}
+
+static void __exit rdac_exit(void)
+{
+	scsi_unregister_device_handler(&rdac_dh);
+}
+
+module_init(rdac_init);
+module_exit(rdac_exit);
+
+MODULE_DESCRIPTION("Multipath LSI/Engenio RDAC driver");
+MODULE_AUTHOR("Mike Christie, Chandra Seetharaman");
+MODULE_LICENSE("GPL");
-- 
cgit v1.2.3


From f6dd337ee4c440f29a873da3779eb3af44bd1623 Mon Sep 17 00:00:00 2001
From: Mike Christie <michaelc@cs.wisc.edu>
Date: Thu, 1 May 2008 14:49:59 -0700
Subject: [SCSI] scsi_dh: add hp sw device handler

This patch provides the device handler to support the older hp boxes
which cannot be upgraded.

Signed-off-by: Mike Christie <michaelc@cs.wisc.edu>
Signed-off-by: Chandra Seetharaman <sekharan@us.ibm.com>
Signed-off-by: James Bottomley <James.Bottomley@HansenPartnership.com>
---
 drivers/scsi/device_handler/Kconfig         |   8 ++
 drivers/scsi/device_handler/Makefile        |   1 +
 drivers/scsi/device_handler/scsi_dh_hp_sw.c | 202 ++++++++++++++++++++++++++++
 3 files changed, 211 insertions(+)
 create mode 100644 drivers/scsi/device_handler/scsi_dh_hp_sw.c

(limited to 'drivers/scsi')

diff --git a/drivers/scsi/device_handler/Kconfig b/drivers/scsi/device_handler/Kconfig
index 404f37732627..61578d84fa6e 100644
--- a/drivers/scsi/device_handler/Kconfig
+++ b/drivers/scsi/device_handler/Kconfig
@@ -16,3 +16,11 @@ config SCSI_DH_RDAC
 	depends on SCSI_DH
 	help
 	If you have a LSI RDAC select y. Otherwise, say N.
+
+config SCSI_DH_HP_SW
+	tristate "HP/COMPAQ MSA Device Handler"
+	depends on SCSI_DH
+	help
+	If you have a HP/COMPAQ MSA device that requires START_STOP to
+	be sent to start it and cannot upgrade the firmware then select y.
+	Otherwise, say N.
diff --git a/drivers/scsi/device_handler/Makefile b/drivers/scsi/device_handler/Makefile
index 1a42b3498318..08ec1943f19e 100644
--- a/drivers/scsi/device_handler/Makefile
+++ b/drivers/scsi/device_handler/Makefile
@@ -3,3 +3,4 @@
 #
 obj-$(CONFIG_SCSI_DH)		+= scsi_dh.o
 obj-$(CONFIG_SCSI_DH_RDAC)	+= scsi_dh_rdac.o
+obj-$(CONFIG_SCSI_DH_HP_SW)	+= scsi_dh_hp_sw.o
diff --git a/drivers/scsi/device_handler/scsi_dh_hp_sw.c b/drivers/scsi/device_handler/scsi_dh_hp_sw.c
new file mode 100644
index 000000000000..12ceab7b3662
--- /dev/null
+++ b/drivers/scsi/device_handler/scsi_dh_hp_sw.c
@@ -0,0 +1,202 @@
+/*
+ * Basic HP/COMPAQ MSA 1000 support. This is only needed if your HW cannot be
+ * upgraded.
+ *
+ * Copyright (C) 2006 Red Hat, Inc.  All rights reserved.
+ * Copyright (C) 2006 Mike Christie
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2, or (at your option)
+ * any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; see the file COPYING.  If not, write to
+ * the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA.
+ */
+
+#include <scsi/scsi.h>
+#include <scsi/scsi_dbg.h>
+#include <scsi/scsi_eh.h>
+#include <scsi/scsi_dh.h>
+
+#define HP_SW_NAME	"hp_sw"
+
+#define HP_SW_TIMEOUT (60 * HZ)
+#define HP_SW_RETRIES 3
+
+struct hp_sw_dh_data {
+	unsigned char sense[SCSI_SENSE_BUFFERSIZE];
+	int retries;
+};
+
+static inline struct hp_sw_dh_data *get_hp_sw_data(struct scsi_device *sdev)
+{
+	struct scsi_dh_data *scsi_dh_data = sdev->scsi_dh_data;
+	BUG_ON(scsi_dh_data == NULL);
+	return ((struct hp_sw_dh_data *) scsi_dh_data->buf);
+}
+
+static int hp_sw_done(struct scsi_device *sdev)
+{
+	struct hp_sw_dh_data *h = get_hp_sw_data(sdev);
+	struct scsi_sense_hdr sshdr;
+	int rc;
+
+	sdev_printk(KERN_INFO, sdev, "hp_sw_done\n");
+
+	rc = scsi_normalize_sense(h->sense, SCSI_SENSE_BUFFERSIZE, &sshdr);
+	if (!rc)
+		goto done;
+	switch (sshdr.sense_key) {
+	case NOT_READY:
+		if ((sshdr.asc == 0x04) && (sshdr.ascq == 3)) {
+			rc = SCSI_DH_RETRY;
+			h->retries++;
+			break;
+		}
+		/* fall through */
+	default:
+		h->retries++;
+		rc = SCSI_DH_IMM_RETRY;
+	}
+
+done:
+	if (rc == SCSI_DH_OK || rc == SCSI_DH_IO)
+		h->retries = 0;
+	else if (h->retries > HP_SW_RETRIES) {
+		h->retries = 0;
+		rc = SCSI_DH_IO;
+	}
+	return rc;
+}
+
+static int hp_sw_activate(struct scsi_device *sdev)
+{
+	struct hp_sw_dh_data *h = get_hp_sw_data(sdev);
+	struct request *req;
+	int ret = SCSI_DH_RES_TEMP_UNAVAIL;
+
+	req = blk_get_request(sdev->request_queue, WRITE, GFP_ATOMIC);
+	if (!req)
+		goto done;
+
+	sdev_printk(KERN_INFO, sdev, "sending START_STOP.");
+
+	req->cmd_type = REQ_TYPE_BLOCK_PC;
+	req->cmd_flags |= REQ_FAILFAST;
+	req->cmd_len = COMMAND_SIZE(START_STOP);
+	memset(req->cmd, 0, MAX_COMMAND_SIZE);
+	req->cmd[0] = START_STOP;
+	req->cmd[4] = 1;	/* Start spin cycle */
+	req->timeout = HP_SW_TIMEOUT;
+	req->sense = h->sense;
+	memset(req->sense, 0, SCSI_SENSE_BUFFERSIZE);
+	req->sense_len = 0;
+
+	ret = blk_execute_rq(req->q, NULL, req, 1);
+	if (!ret) /* SUCCESS */
+		ret = hp_sw_done(sdev);
+	else
+		ret = SCSI_DH_IO;
+done:
+	return ret;
+}
+
+static const struct {
+	char *vendor;
+	char *model;
+} hp_sw_dh_data_list[] = {
+	{"COMPAQ", "MSA"},
+	{"HP", "HSV"},
+	{"DEC", "HSG80"},
+	{NULL, NULL},
+};
+
+static int hp_sw_bus_notify(struct notifier_block *, unsigned long, void *);
+
+static struct scsi_device_handler hp_sw_dh = {
+	.name		= HP_SW_NAME,
+	.module		= THIS_MODULE,
+	.nb.notifier_call = hp_sw_bus_notify,
+	.activate	= hp_sw_activate,
+};
+
+static int hp_sw_bus_notify(struct notifier_block *nb,
+			    unsigned long action, void *data)
+{
+	struct device *dev = data;
+	struct scsi_device *sdev = to_scsi_device(dev);
+	struct scsi_dh_data *scsi_dh_data;
+	int i, found = 0;
+	unsigned long flags;
+
+	if (action == BUS_NOTIFY_ADD_DEVICE) {
+		for (i = 0; hp_sw_dh_data_list[i].vendor; i++) {
+			if (!strncmp(sdev->vendor, hp_sw_dh_data_list[i].vendor,
+				     strlen(hp_sw_dh_data_list[i].vendor)) &&
+			    !strncmp(sdev->model, hp_sw_dh_data_list[i].model,
+				     strlen(hp_sw_dh_data_list[i].model))) {
+				found = 1;
+				break;
+			}
+		}
+		if (!found)
+			goto out;
+
+		scsi_dh_data = kzalloc(sizeof(struct scsi_device_handler *)
+				+ sizeof(struct hp_sw_dh_data) , GFP_KERNEL);
+		if (!scsi_dh_data) {
+			sdev_printk(KERN_ERR, sdev, "Attach Failed %s.\n",
+				    HP_SW_NAME);
+			goto out;
+		}
+
+		scsi_dh_data->scsi_dh = &hp_sw_dh;
+		spin_lock_irqsave(sdev->request_queue->queue_lock, flags);
+		sdev->scsi_dh_data = scsi_dh_data;
+		spin_unlock_irqrestore(sdev->request_queue->queue_lock, flags);
+		try_module_get(THIS_MODULE);
+
+		sdev_printk(KERN_NOTICE, sdev, "Attached %s.\n", HP_SW_NAME);
+	} else if (action == BUS_NOTIFY_DEL_DEVICE) {
+		if (sdev->scsi_dh_data == NULL ||
+				sdev->scsi_dh_data->scsi_dh != &hp_sw_dh)
+			goto out;
+
+		spin_lock_irqsave(sdev->request_queue->queue_lock, flags);
+		scsi_dh_data = sdev->scsi_dh_data;
+		sdev->scsi_dh_data = NULL;
+		spin_unlock_irqrestore(sdev->request_queue->queue_lock, flags);
+		module_put(THIS_MODULE);
+
+		sdev_printk(KERN_NOTICE, sdev, "Dettached %s.\n", HP_SW_NAME);
+
+		kfree(scsi_dh_data);
+	}
+
+out:
+	return 0;
+}
+
+static int __init hp_sw_init(void)
+{
+	return scsi_register_device_handler(&hp_sw_dh);
+}
+
+static void __exit hp_sw_exit(void)
+{
+	scsi_unregister_device_handler(&hp_sw_dh);
+}
+
+module_init(hp_sw_init);
+module_exit(hp_sw_exit);
+
+MODULE_DESCRIPTION("HP MSA 1000");
+MODULE_AUTHOR("Mike Christie <michaelc@cs.wisc.edu");
+MODULE_LICENSE("GPL");
-- 
cgit v1.2.3


From 5e7dccad3621f6e2b572f309cf830a2c902cae80 Mon Sep 17 00:00:00 2001
From: Chandra Seetharaman <sekharan@us.ibm.com>
Date: Thu, 1 May 2008 14:50:04 -0700
Subject: [SCSI] scsi_dh: add EMC Clariion device handler

This adds support for EMC Clariions. This patch has the features that
currently exists in mainline and advanced features from Ed's patches.

Signed-off-by: Chandra Seetharaman <sekharan@us.ibm.com>
Signed-off-by: Mike Christie <michaelc@cs.wisc.edu>
Signed-off-by: James Bottomley <James.Bottomley@HansenPartnership.com>
---
 drivers/scsi/device_handler/Kconfig       |   6 +
 drivers/scsi/device_handler/Makefile      |   1 +
 drivers/scsi/device_handler/scsi_dh_emc.c | 499 ++++++++++++++++++++++++++++++
 3 files changed, 506 insertions(+)
 create mode 100644 drivers/scsi/device_handler/scsi_dh_emc.c

(limited to 'drivers/scsi')

diff --git a/drivers/scsi/device_handler/Kconfig b/drivers/scsi/device_handler/Kconfig
index 61578d84fa6e..2adc0f666b68 100644
--- a/drivers/scsi/device_handler/Kconfig
+++ b/drivers/scsi/device_handler/Kconfig
@@ -24,3 +24,9 @@ config SCSI_DH_HP_SW
 	If you have a HP/COMPAQ MSA device that requires START_STOP to
 	be sent to start it and cannot upgrade the firmware then select y.
 	Otherwise, say N.
+
+config SCSI_DH_EMC
+	tristate "EMC CLARiiON Device Handler"
+	depends on SCSI_DH
+	help
+	If you have a EMC CLARiiON select y. Otherwise, say N.
diff --git a/drivers/scsi/device_handler/Makefile b/drivers/scsi/device_handler/Makefile
index 08ec1943f19e..35272e93b1c8 100644
--- a/drivers/scsi/device_handler/Makefile
+++ b/drivers/scsi/device_handler/Makefile
@@ -4,3 +4,4 @@
 obj-$(CONFIG_SCSI_DH)		+= scsi_dh.o
 obj-$(CONFIG_SCSI_DH_RDAC)	+= scsi_dh_rdac.o
 obj-$(CONFIG_SCSI_DH_HP_SW)	+= scsi_dh_hp_sw.o
+obj-$(CONFIG_SCSI_DH_EMC)	+= scsi_dh_emc.o
diff --git a/drivers/scsi/device_handler/scsi_dh_emc.c b/drivers/scsi/device_handler/scsi_dh_emc.c
new file mode 100644
index 000000000000..ed53f14007a2
--- /dev/null
+++ b/drivers/scsi/device_handler/scsi_dh_emc.c
@@ -0,0 +1,499 @@
+/*
+ * Target driver for EMC CLARiiON AX/CX-series hardware.
+ * Based on code from Lars Marowsky-Bree <lmb@suse.de>
+ * and Ed Goggin <egoggin@emc.com>.
+ *
+ * Copyright (C) 2006 Red Hat, Inc.  All rights reserved.
+ * Copyright (C) 2006 Mike Christie
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2, or (at your option)
+ * any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; see the file COPYING.  If not, write to
+ * the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA.
+ */
+#include <scsi/scsi.h>
+#include <scsi/scsi_eh.h>
+#include <scsi/scsi_dh.h>
+#include <scsi/scsi_device.h>
+
+#define CLARIION_NAME			"emc_clariion"
+
+#define CLARIION_TRESPASS_PAGE		0x22
+#define CLARIION_BUFFER_SIZE		0x80
+#define CLARIION_TIMEOUT		(60 * HZ)
+#define CLARIION_RETRIES		3
+#define CLARIION_UNBOUND_LU		-1
+
+static unsigned char long_trespass[] = {
+	0, 0, 0, 0,
+	CLARIION_TRESPASS_PAGE,	/* Page code */
+	0x09,			/* Page length - 2 */
+	0x81,			/* Trespass code + Honor reservation bit */
+	0xff, 0xff,		/* Trespass target */
+	0, 0, 0, 0, 0, 0	/* Reserved bytes / unknown */
+};
+
+static unsigned char long_trespass_hr[] = {
+	0, 0, 0, 0,
+	CLARIION_TRESPASS_PAGE,	/* Page code */
+	0x09,			/* Page length - 2 */
+	0x01,			/* Trespass code + Honor reservation bit */
+	0xff, 0xff,		/* Trespass target */
+	0, 0, 0, 0, 0, 0	/* Reserved bytes / unknown */
+};
+
+static unsigned char short_trespass[] = {
+	0, 0, 0, 0,
+	CLARIION_TRESPASS_PAGE,	/* Page code */
+	0x02,			/* Page length - 2 */
+	0x81,			/* Trespass code + Honor reservation bit */
+	0xff,			/* Trespass target */
+};
+
+static unsigned char short_trespass_hr[] = {
+	0, 0, 0, 0,
+	CLARIION_TRESPASS_PAGE,	/* Page code */
+	0x02,			/* Page length - 2 */
+	0x01,			/* Trespass code + Honor reservation bit */
+	0xff,			/* Trespass target */
+};
+
+struct clariion_dh_data {
+	/*
+	 * Use short trespass command (FC-series) or the long version
+	 * (default for AX/CX CLARiiON arrays).
+	 */
+	unsigned short_trespass;
+	/*
+	 * Whether or not (default) to honor SCSI reservations when
+	 * initiating a switch-over.
+	 */
+	unsigned hr;
+	/* I/O buffer for both MODE_SELECT and INQUIRY commands. */
+	char buffer[CLARIION_BUFFER_SIZE];
+	/*
+	 * SCSI sense buffer for commands -- assumes serial issuance
+	 * and completion sequence of all commands for same multipath.
+	 */
+	unsigned char sense[SCSI_SENSE_BUFFERSIZE];
+	/* which SP (A=0,B=1,UNBOUND=-1) is dflt SP for path's mapped dev */
+	int default_sp;
+	/* which SP (A=0,B=1,UNBOUND=-1) is active for path's mapped dev */
+	int current_sp;
+};
+
+static inline struct clariion_dh_data
+			*get_clariion_data(struct scsi_device *sdev)
+{
+	struct scsi_dh_data *scsi_dh_data = sdev->scsi_dh_data;
+	BUG_ON(scsi_dh_data == NULL);
+	return ((struct clariion_dh_data *) scsi_dh_data->buf);
+}
+
+/*
+ * Parse MODE_SELECT cmd reply.
+ */
+static int trespass_endio(struct scsi_device *sdev, int result)
+{
+	int err = SCSI_DH_OK;
+	struct scsi_sense_hdr sshdr;
+	struct clariion_dh_data *csdev = get_clariion_data(sdev);
+	char *sense = csdev->sense;
+
+	if (status_byte(result) == CHECK_CONDITION &&
+	    scsi_normalize_sense(sense, SCSI_SENSE_BUFFERSIZE, &sshdr)) {
+		sdev_printk(KERN_ERR, sdev, "Found valid sense data 0x%2x, "
+			    "0x%2x, 0x%2x while sending CLARiiON trespass "
+			    "command.\n", sshdr.sense_key, sshdr.asc,
+			     sshdr.ascq);
+
+		if ((sshdr.sense_key == 0x05) && (sshdr.asc == 0x04) &&
+		     (sshdr.ascq == 0x00)) {
+			/*
+			 * Array based copy in progress -- do not send
+			 * mode_select or copy will be aborted mid-stream.
+			 */
+			sdev_printk(KERN_INFO, sdev, "Array Based Copy in "
+				    "progress while sending CLARiiON trespass "
+				    "command.\n");
+			err = SCSI_DH_DEV_TEMP_BUSY;
+		} else if ((sshdr.sense_key == 0x02) && (sshdr.asc == 0x04) &&
+			    (sshdr.ascq == 0x03)) {
+			/*
+			 * LUN Not Ready - Manual Intervention Required
+			 * indicates in-progress ucode upgrade (NDU).
+			 */
+			sdev_printk(KERN_INFO, sdev, "Detected in-progress "
+				    "ucode upgrade NDU operation while sending "
+				    "CLARiiON trespass command.\n");
+			err = SCSI_DH_DEV_TEMP_BUSY;
+		} else
+			err = SCSI_DH_DEV_FAILED;
+	} else if (result) {
+		sdev_printk(KERN_ERR, sdev, "Error 0x%x while sending "
+			    "CLARiiON trespass command.\n", result);
+		err = SCSI_DH_IO;
+	}
+
+	return err;
+}
+
+static int parse_sp_info_reply(struct scsi_device *sdev, int result,
+		int *default_sp, int *current_sp, int *new_current_sp)
+{
+	int err = SCSI_DH_OK;
+	struct clariion_dh_data *csdev = get_clariion_data(sdev);
+
+	if (result == 0) {
+		/* check for in-progress ucode upgrade (NDU) */
+		if (csdev->buffer[48] != 0) {
+			sdev_printk(KERN_NOTICE, sdev, "Detected in-progress "
+			       "ucode upgrade NDU operation while finding "
+			       "current active SP.");
+			err = SCSI_DH_DEV_TEMP_BUSY;
+		} else {
+			*default_sp = csdev->buffer[5];
+
+			if (csdev->buffer[4] == 2)
+				/* SP for path is current */
+				*current_sp = csdev->buffer[8];
+			else {
+				if (csdev->buffer[4] == 1)
+					/* SP for this path is NOT current */
+					if (csdev->buffer[8] == 0)
+						*current_sp = 1;
+					else
+						*current_sp = 0;
+				else
+					/* unbound LU or LUNZ */
+					*current_sp = CLARIION_UNBOUND_LU;
+			}
+			*new_current_sp =  csdev->buffer[8];
+		}
+	} else {
+		struct scsi_sense_hdr sshdr;
+
+		err = SCSI_DH_IO;
+
+		if (scsi_normalize_sense(csdev->sense, SCSI_SENSE_BUFFERSIZE,
+							   &sshdr))
+			sdev_printk(KERN_ERR, sdev, "Found valid sense data "
+			      "0x%2x, 0x%2x, 0x%2x while finding current "
+			      "active SP.", sshdr.sense_key, sshdr.asc,
+			      sshdr.ascq);
+		else
+			sdev_printk(KERN_ERR, sdev, "Error 0x%x finding "
+			      "current active SP.", result);
+	}
+
+	return err;
+}
+
+static int sp_info_endio(struct scsi_device *sdev, int result,
+					int mode_select_sent, int *done)
+{
+	struct clariion_dh_data *csdev = get_clariion_data(sdev);
+	int err_flags, default_sp, current_sp, new_current_sp;
+
+	err_flags = parse_sp_info_reply(sdev, result, &default_sp,
+					     &current_sp, &new_current_sp);
+
+	if (err_flags != SCSI_DH_OK)
+		goto done;
+
+	if (mode_select_sent) {
+		csdev->default_sp = default_sp;
+		csdev->current_sp = current_sp;
+	} else {
+		/*
+		 * Issue the actual module_selec request IFF either
+		 * (1) we do not know the identity of the current SP OR
+		 * (2) what we think we know is actually correct.
+		 */
+		if ((current_sp != CLARIION_UNBOUND_LU) &&
+		    (new_current_sp != current_sp)) {
+
+			csdev->default_sp = default_sp;
+			csdev->current_sp = current_sp;
+
+			sdev_printk(KERN_INFO, sdev, "Ignoring path group "
+			       "switch-over command for CLARiiON SP%s since "
+			       " mapped device is already initialized.",
+			       current_sp ? "B" : "A");
+			if (done)
+				*done = 1; /* as good as doing it */
+		}
+	}
+done:
+	return err_flags;
+}
+
+/*
+* Get block request for REQ_BLOCK_PC command issued to path.  Currently
+* limited to MODE_SELECT (trespass) and INQUIRY (VPD page 0xC0) commands.
+*
+* Uses data and sense buffers in hardware handler context structure and
+* assumes serial servicing of commands, both issuance and completion.
+*/
+static struct request *get_req(struct scsi_device *sdev, int cmd)
+{
+	struct clariion_dh_data *csdev = get_clariion_data(sdev);
+	struct request *rq;
+	unsigned char *page22;
+	int len = 0;
+
+	rq = blk_get_request(sdev->request_queue,
+			(cmd == MODE_SELECT) ? WRITE : READ, GFP_ATOMIC);
+	if (!rq) {
+		sdev_printk(KERN_INFO, sdev, "get_req: blk_get_request failed");
+		return NULL;
+	}
+
+	memset(&rq->cmd, 0, BLK_MAX_CDB);
+	rq->cmd[0] = cmd;
+	rq->cmd_len = COMMAND_SIZE(rq->cmd[0]);
+
+	switch (cmd) {
+	case MODE_SELECT:
+		if (csdev->short_trespass) {
+			page22 = csdev->hr ? short_trespass_hr : short_trespass;
+			len = sizeof(short_trespass);
+		} else {
+			page22 = csdev->hr ? long_trespass_hr : long_trespass;
+			len = sizeof(long_trespass);
+		}
+		/*
+		 * Can't DMA from kernel BSS -- must copy selected trespass
+		 * command mode page contents to context buffer which is
+		 * allocated by kmalloc.
+		 */
+		BUG_ON((len > CLARIION_BUFFER_SIZE));
+		memcpy(csdev->buffer, page22, len);
+		rq->cmd_flags |= REQ_RW;
+		rq->cmd[1] = 0x10;
+		break;
+	case INQUIRY:
+		rq->cmd[1] = 0x1;
+		rq->cmd[2] = 0xC0;
+		len = CLARIION_BUFFER_SIZE;
+		memset(csdev->buffer, 0, CLARIION_BUFFER_SIZE);
+		break;
+	default:
+		BUG_ON(1);
+		break;
+	}
+
+	rq->cmd[4] = len;
+	rq->cmd_type = REQ_TYPE_BLOCK_PC;
+	rq->cmd_flags |= REQ_FAILFAST;
+	rq->timeout = CLARIION_TIMEOUT;
+	rq->retries = CLARIION_RETRIES;
+
+	rq->sense = csdev->sense;
+	memset(rq->sense, 0, SCSI_SENSE_BUFFERSIZE);
+	rq->sense_len = 0;
+
+	if (blk_rq_map_kern(sdev->request_queue, rq, csdev->buffer,
+							len, GFP_ATOMIC)) {
+		__blk_put_request(rq->q, rq);
+		return NULL;
+	}
+
+	return rq;
+}
+
+static int send_cmd(struct scsi_device *sdev, int cmd)
+{
+	struct request *rq = get_req(sdev, cmd);
+
+	if (!rq)
+		return SCSI_DH_RES_TEMP_UNAVAIL;
+
+	return blk_execute_rq(sdev->request_queue, NULL, rq, 1);
+}
+
+static int clariion_activate(struct scsi_device *sdev)
+{
+	int result, done = 0;
+
+	result = send_cmd(sdev, INQUIRY);
+	result = sp_info_endio(sdev, result, 0, &done);
+	if (result || done)
+		goto done;
+
+	result = send_cmd(sdev, MODE_SELECT);
+	result = trespass_endio(sdev, result);
+	if (result)
+		goto done;
+
+	result = send_cmd(sdev, INQUIRY);
+	result = sp_info_endio(sdev, result, 1, NULL);
+done:
+	return result;
+}
+
+static int clariion_check_sense(struct scsi_device *sdev,
+				struct scsi_sense_hdr *sense_hdr)
+{
+	switch (sense_hdr->sense_key) {
+	case NOT_READY:
+		if (sense_hdr->asc == 0x04 && sense_hdr->ascq == 0x03)
+			/*
+			 * LUN Not Ready - Manual Intervention Required
+			 * indicates this is a passive path.
+			 *
+			 * FIXME: However, if this is seen and EVPD C0
+			 * indicates that this is due to a NDU in
+			 * progress, we should set FAIL_PATH too.
+			 * This indicates we might have to do a SCSI
+			 * inquiry in the end_io path. Ugh.
+			 *
+			 * Can return FAILED only when we want the error
+			 * recovery process to kick in.
+			 */
+			return SUCCESS;
+		break;
+	case ILLEGAL_REQUEST:
+		if (sense_hdr->asc == 0x25 && sense_hdr->ascq == 0x01)
+			/*
+			 * An array based copy is in progress. Do not
+			 * fail the path, do not bypass to another PG,
+			 * do not retry. Fail the IO immediately.
+			 * (Actually this is the same conclusion as in
+			 * the default handler, but lets make sure.)
+			 *
+			 * Can return FAILED only when we want the error
+			 * recovery process to kick in.
+			 */
+			return SUCCESS;
+		break;
+	case UNIT_ATTENTION:
+		if (sense_hdr->asc == 0x29 && sense_hdr->ascq == 0x00)
+			/*
+			 * Unit Attention Code. This is the first IO
+			 * to the new path, so just retry.
+			 */
+			return NEEDS_RETRY;
+		break;
+	}
+
+	/* success just means we do not care what scsi-ml does */
+	return SUCCESS;
+}
+
+static const struct {
+	char *vendor;
+	char *model;
+} clariion_dev_list[] = {
+	{"DGC", "RAID"},
+	{"DGC", "DISK"},
+	{NULL, NULL},
+};
+
+static int clariion_bus_notify(struct notifier_block *, unsigned long, void *);
+
+static struct scsi_device_handler clariion_dh = {
+	.name		= CLARIION_NAME,
+	.module		= THIS_MODULE,
+	.nb.notifier_call = clariion_bus_notify,
+	.check_sense	= clariion_check_sense,
+	.activate	= clariion_activate,
+};
+
+/*
+ * TODO: need some interface so we can set trespass values
+ */
+static int clariion_bus_notify(struct notifier_block *nb,
+				unsigned long action, void *data)
+{
+	struct device *dev = data;
+	struct scsi_device *sdev = to_scsi_device(dev);
+	struct scsi_dh_data *scsi_dh_data;
+	struct clariion_dh_data *h;
+	int i, found = 0;
+	unsigned long flags;
+
+	if (action == BUS_NOTIFY_ADD_DEVICE) {
+		for (i = 0; clariion_dev_list[i].vendor; i++) {
+			if (!strncmp(sdev->vendor, clariion_dev_list[i].vendor,
+				     strlen(clariion_dev_list[i].vendor)) &&
+			    !strncmp(sdev->model, clariion_dev_list[i].model,
+				     strlen(clariion_dev_list[i].model))) {
+				found = 1;
+				break;
+			}
+		}
+		if (!found)
+			goto out;
+
+		scsi_dh_data = kzalloc(sizeof(struct scsi_device_handler *)
+				+ sizeof(*h) , GFP_KERNEL);
+		if (!scsi_dh_data) {
+			sdev_printk(KERN_ERR, sdev, "Attach failed %s.\n",
+				    CLARIION_NAME);
+			goto out;
+		}
+
+		scsi_dh_data->scsi_dh = &clariion_dh;
+		h = (struct clariion_dh_data *) scsi_dh_data->buf;
+		h->default_sp = CLARIION_UNBOUND_LU;
+		h->current_sp = CLARIION_UNBOUND_LU;
+
+		spin_lock_irqsave(sdev->request_queue->queue_lock, flags);
+		sdev->scsi_dh_data = scsi_dh_data;
+		spin_unlock_irqrestore(sdev->request_queue->queue_lock, flags);
+
+		sdev_printk(KERN_NOTICE, sdev, "Attached %s.\n", CLARIION_NAME);
+		try_module_get(THIS_MODULE);
+
+	} else if (action == BUS_NOTIFY_DEL_DEVICE) {
+		if (sdev->scsi_dh_data == NULL ||
+				sdev->scsi_dh_data->scsi_dh != &clariion_dh)
+			goto out;
+
+		spin_lock_irqsave(sdev->request_queue->queue_lock, flags);
+		scsi_dh_data = sdev->scsi_dh_data;
+		sdev->scsi_dh_data = NULL;
+		spin_unlock_irqrestore(sdev->request_queue->queue_lock, flags);
+
+		sdev_printk(KERN_NOTICE, sdev, "Dettached %s.\n",
+			    CLARIION_NAME);
+
+		kfree(scsi_dh_data);
+		module_put(THIS_MODULE);
+	}
+
+out:
+	return 0;
+}
+
+static int __init clariion_init(void)
+{
+	int r;
+
+	r = scsi_register_device_handler(&clariion_dh);
+	if (r != 0)
+		printk(KERN_ERR "Failed to register scsi device handler.");
+	return r;
+}
+
+static void __exit clariion_exit(void)
+{
+	scsi_unregister_device_handler(&clariion_dh);
+}
+
+module_init(clariion_init);
+module_exit(clariion_exit);
+
+MODULE_DESCRIPTION("EMC CX/AX/FC-family driver");
+MODULE_AUTHOR("Mike Christie <michaelc@cs.wisc.edu>, Chandra Seetharaman <sekharan@us.ibm.com>");
+MODULE_LICENSE("GPL");
-- 
cgit v1.2.3


From 13a17fdeedbb156463a9a007378366ec0a0c30ef Mon Sep 17 00:00:00 2001
From: Harvey Harrison <harvey.harrison@gmail.com>
Date: Thu, 8 May 2008 12:08:53 -0700
Subject: [SCSI] aacraid: linit.c make aac_show_serial_number static

drivers/scsi/aacraid/linit.c:865:9: warning: symbol 'aac_show_serial_number' was not declared. Should it be static?

Signed-off-by: Harvey Harrison <harvey.harrison@gmail.com>
Acked-by: Mark Salyzyn <Mark_Salyzyn@adaptec.com>
Signed-off-by: James Bottomley <James.Bottomley@HansenPartnership.com>
---
 drivers/scsi/aacraid/linit.c | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)

(limited to 'drivers/scsi')

diff --git a/drivers/scsi/aacraid/linit.c b/drivers/scsi/aacraid/linit.c
index 1f7c83607f84..5797b2c42f08 100644
--- a/drivers/scsi/aacraid/linit.c
+++ b/drivers/scsi/aacraid/linit.c
@@ -862,7 +862,7 @@ static ssize_t aac_show_bios_version(struct device *device,
 	return len;
 }
 
-ssize_t aac_show_serial_number(struct device *device,
+static ssize_t aac_show_serial_number(struct device *device,
 			       struct device_attribute *attr, char *buf)
 {
 	struct aac_dev *dev = (struct aac_dev*)class_to_shost(device)->hostdata;
-- 
cgit v1.2.3


From 19c4158bcdf42ee3b2394342caf14f8471d2c78e Mon Sep 17 00:00:00 2001
From: Alan Stern <stern@rowland.harvard.edu>
Date: Fri, 7 Mar 2008 11:20:25 -0500
Subject: [SCSI] SCSI: remove dev->power.power_state from mesh driver

power.power_state is scheduled for removal.  This patch (as1055)
removes all uses of that field from the SCSI mesh driver.

Signed-off-by: Alan Stern <stern@rowland.harvard.edu>
Acked-by: Paul Mackerras <paulus@au.ibm.com>
Signed-off-by: James Bottomley <James.Bottomley@HansenPartnership.com>
---
 drivers/scsi/mesh.c | 8 ++------
 1 file changed, 2 insertions(+), 6 deletions(-)

(limited to 'drivers/scsi')

diff --git a/drivers/scsi/mesh.c b/drivers/scsi/mesh.c
index fd63b06d9ef1..11aa917629ac 100644
--- a/drivers/scsi/mesh.c
+++ b/drivers/scsi/mesh.c
@@ -1765,7 +1765,7 @@ static int mesh_suspend(struct macio_dev *mdev, pm_message_t mesg)
 	default:
 		return 0;
 	}
-	if (mesg.event == mdev->ofdev.dev.power.power_state.event)
+	if (ms->phase == sleeping)
 		return 0;
 
 	scsi_block_requests(ms->host);
@@ -1780,8 +1780,6 @@ static int mesh_suspend(struct macio_dev *mdev, pm_message_t mesg)
 	disable_irq(ms->meshintr);
 	set_mesh_power(ms, 0);
 
-	mdev->ofdev.dev.power.power_state = mesg;
-
 	return 0;
 }
 
@@ -1790,7 +1788,7 @@ static int mesh_resume(struct macio_dev *mdev)
 	struct mesh_state *ms = (struct mesh_state *)macio_get_drvdata(mdev);
 	unsigned long flags;
 
-	if (mdev->ofdev.dev.power.power_state.event == PM_EVENT_ON)
+	if (ms->phase != sleeping)
 		return 0;
 
 	set_mesh_power(ms, 1);
@@ -1801,8 +1799,6 @@ static int mesh_resume(struct macio_dev *mdev)
 	enable_irq(ms->meshintr);
 	scsi_unblock_requests(ms->host);
 
-	mdev->ofdev.dev.power.power_state.event = PM_EVENT_ON;
-
 	return 0;
 }
 
-- 
cgit v1.2.3


From 427e59f09fdba387547106de7bab980b7fff77be Mon Sep 17 00:00:00 2001
From: James Bottomley <James.Bottomley@HansenPartnership.com>
Date: Sat, 8 Mar 2008 18:24:17 -0600
Subject: [SCSI] make use of the residue value

USB sometimes doesn't return an error but instead returns a residue
value indicating part (or all) of the command wasn't completed.  So if
the driver _done() error processing indicates the command was fully
processed, subtract off the residue so that this USB error gets
propagated.

Cc: Alan Stern <stern@rowland.harvard.edu>
Signed-off-by: James Bottomley <James.Bottomley@HansenPartnership.com>
---
 drivers/scsi/scsi.c | 9 +++++++++
 1 file changed, 9 insertions(+)

(limited to 'drivers/scsi')

diff --git a/drivers/scsi/scsi.c b/drivers/scsi/scsi.c
index 110e776d1a07..36c92f961e15 100644
--- a/drivers/scsi/scsi.c
+++ b/drivers/scsi/scsi.c
@@ -855,9 +855,18 @@ void scsi_finish_command(struct scsi_cmnd *cmd)
 
 	good_bytes = scsi_bufflen(cmd);
         if (cmd->request->cmd_type != REQ_TYPE_BLOCK_PC) {
+		int old_good_bytes = good_bytes;
 		drv = scsi_cmd_to_driver(cmd);
 		if (drv->done)
 			good_bytes = drv->done(cmd);
+		/*
+		 * USB may not give sense identifying bad sector and
+		 * simply return a residue instead, so subtract off the
+		 * residue if drv->done() error processing indicates no
+		 * change to the completion length.
+		 */
+		if (good_bytes == old_good_bytes)
+			good_bytes -= scsi_get_resid(cmd);
 	}
 	scsi_io_completion(cmd, good_bytes);
 }
-- 
cgit v1.2.3


From 40753caa364bfba60ebd5e2a8bdf366ef175d03c Mon Sep 17 00:00:00 2001
From: Mike Christie <michaelc@cs.wisc.edu>
Date: Wed, 21 May 2008 15:53:56 -0500
Subject: [SCSI] iscsi class, iscsi_tcp/iser: add host arg to session creation

iscsi offload (bnx2i and qla4xx) allocate a scsi host per hba,
so the session creation path needs a shost/host_no argument.
Software iscsi/iser will follow the same behabior as before
where it allcoates a host per session, but in the future iser
will probably look more like bnx2i where the host's parent is
the hardware (rnic for iser and for bnx2i it is the nic), because
it does not use a socket layer like how iscsi_tcp does.

Signed-off-by: Mike Christie <michaelc@cs.wisc.edu>
Signed-off-by: James Bottomley <James.Bottomley@HansenPartnership.com>
---
 drivers/infiniband/ulp/iser/iscsi_iser.c |  1 +
 drivers/scsi/iscsi_tcp.c                 |  5 ++--
 drivers/scsi/scsi_transport_iscsi.c      | 46 +++++++++++++++++++++++++-------
 include/scsi/iscsi_if.h                  |  7 +++++
 include/scsi/libiscsi.h                  |  1 +
 include/scsi/scsi_transport_iscsi.h      |  4 +--
 6 files changed, 51 insertions(+), 13 deletions(-)

(limited to 'drivers/scsi')

diff --git a/drivers/infiniband/ulp/iser/iscsi_iser.c b/drivers/infiniband/ulp/iser/iscsi_iser.c
index aeb58cae9a3f..efc121986c50 100644
--- a/drivers/infiniband/ulp/iser/iscsi_iser.c
+++ b/drivers/infiniband/ulp/iser/iscsi_iser.c
@@ -368,6 +368,7 @@ static struct iscsi_transport iscsi_iser_transport;
 static struct iscsi_cls_session *
 iscsi_iser_session_create(struct iscsi_transport *iscsit,
 			 struct scsi_transport_template *scsit,
+			 struct Scsi_Host *shost,
 			 uint16_t cmds_max, uint16_t qdepth,
 			 uint32_t initial_cmdsn, uint32_t *hostno)
 {
diff --git a/drivers/scsi/iscsi_tcp.c b/drivers/scsi/iscsi_tcp.c
index 72b9b2a0eba3..81c421a7d477 100644
--- a/drivers/scsi/iscsi_tcp.c
+++ b/drivers/scsi/iscsi_tcp.c
@@ -1871,8 +1871,9 @@ iscsi_conn_get_stats(struct iscsi_cls_conn *cls_conn, struct iscsi_stats *stats)
 static struct iscsi_cls_session *
 iscsi_tcp_session_create(struct iscsi_transport *iscsit,
 			 struct scsi_transport_template *scsit,
-			 uint16_t cmds_max, uint16_t qdepth,
-			 uint32_t initial_cmdsn, uint32_t *hostno)
+			 struct Scsi_Host *shost, uint16_t cmds_max,
+			 uint16_t qdepth, uint32_t initial_cmdsn,
+			 uint32_t *hostno)
 {
 	struct iscsi_cls_session *cls_session;
 	struct iscsi_session *session;
diff --git a/drivers/scsi/scsi_transport_iscsi.c b/drivers/scsi/scsi_transport_iscsi.c
index 65d1737eb664..2a6669d967cb 100644
--- a/drivers/scsi/scsi_transport_iscsi.c
+++ b/drivers/scsi/scsi_transport_iscsi.c
@@ -1017,21 +1017,38 @@ int iscsi_session_event(struct iscsi_cls_session *session,
 EXPORT_SYMBOL_GPL(iscsi_session_event);
 
 static int
-iscsi_if_create_session(struct iscsi_internal *priv, struct iscsi_uevent *ev)
+iscsi_if_create_session(struct iscsi_internal *priv, struct iscsi_uevent *ev,
+			uint32_t host_no, uint32_t initial_cmdsn,
+			uint16_t cmds_max, uint16_t queue_depth)
 {
 	struct iscsi_transport *transport = priv->iscsi_transport;
 	struct iscsi_cls_session *session;
-	uint32_t hostno;
+	struct Scsi_Host *shost = NULL;
 
-	session = transport->create_session(transport, &priv->t,
-					    ev->u.c_session.cmds_max,
-					    ev->u.c_session.queue_depth,
-					    ev->u.c_session.initial_cmdsn,
-					    &hostno);
+	/*
+	 * Software iscsi allocates a host per session, but
+	 * offload drivers (and possibly iser one day) allocate a host per
+	 * hba/nic/rnic. Offload will match a host here, but software will
+	 * return a new hostno after the create_session callback has returned.
+	 */
+	if (host_no != UINT_MAX) {
+		shost = scsi_host_lookup(host_no);
+		if (IS_ERR(shost)) {
+			printk(KERN_ERR "Could not find host no %u to "
+			       "create session\n", host_no);
+			return -ENODEV;
+		}
+	}
+
+	session = transport->create_session(transport, &priv->t, shost,
+					    cmds_max, queue_depth,
+					    initial_cmdsn, &host_no);
+	if (shost)
+		scsi_host_put(shost);
 	if (!session)
 		return -ENOMEM;
 
-	ev->r.c_session_ret.host_no = hostno;
+	ev->r.c_session_ret.host_no = host_no;
 	ev->r.c_session_ret.sid = session->sid;
 	return 0;
 }
@@ -1190,6 +1207,7 @@ static int
 iscsi_if_recv_msg(struct sk_buff *skb, struct nlmsghdr *nlh)
 {
 	int err = 0;
+	uint32_t host_no = UINT_MAX;
 	struct iscsi_uevent *ev = NLMSG_DATA(nlh);
 	struct iscsi_transport *transport = NULL;
 	struct iscsi_internal *priv;
@@ -1208,7 +1226,17 @@ iscsi_if_recv_msg(struct sk_buff *skb, struct nlmsghdr *nlh)
 
 	switch (nlh->nlmsg_type) {
 	case ISCSI_UEVENT_CREATE_SESSION:
-		err = iscsi_if_create_session(priv, ev);
+		err = iscsi_if_create_session(priv, ev, host_no,
+					      ev->u.c_session.initial_cmdsn,
+					      ev->u.c_session.cmds_max,
+					      ev->u.c_session.queue_depth);
+		break;
+	case ISCSI_UEVENT_CREATE_BOUND_SESSION:
+		err = iscsi_if_create_session(priv, ev,
+					ev->u.c_bound_session.host_no,
+					ev->u.c_bound_session.initial_cmdsn,
+					ev->u.c_bound_session.cmds_max,
+					ev->u.c_bound_session.queue_depth);
 		break;
 	case ISCSI_UEVENT_DESTROY_SESSION:
 		session = iscsi_session_lookup(ev->u.d_session.sid);
diff --git a/include/scsi/iscsi_if.h b/include/scsi/iscsi_if.h
index e19e58423166..1883c85cd3ee 100644
--- a/include/scsi/iscsi_if.h
+++ b/include/scsi/iscsi_if.h
@@ -50,6 +50,7 @@ enum iscsi_uevent_e {
 	ISCSI_UEVENT_TGT_DSCVR		= UEVENT_BASE + 15,
 	ISCSI_UEVENT_SET_HOST_PARAM	= UEVENT_BASE + 16,
 	ISCSI_UEVENT_UNBIND_SESSION	= UEVENT_BASE + 17,
+	ISCSI_UEVENT_CREATE_BOUND_SESSION	= UEVENT_BASE + 18,
 
 	/* up events */
 	ISCSI_KEVENT_RECV_PDU		= KEVENT_BASE + 1,
@@ -78,6 +79,12 @@ struct iscsi_uevent {
 			uint16_t	cmds_max;
 			uint16_t	queue_depth;
 		} c_session;
+		struct msg_create_bound_session {
+			uint32_t	host_no;
+			uint32_t	initial_cmdsn;
+			uint16_t	cmds_max;
+			uint16_t	queue_depth;
+		} c_bound_session;
 		struct msg_destroy_session {
 			uint32_t	sid;
 		} d_session;
diff --git a/include/scsi/libiscsi.h b/include/scsi/libiscsi.h
index cd3ca63d4fb1..f24cf0246739 100644
--- a/include/scsi/libiscsi.h
+++ b/include/scsi/libiscsi.h
@@ -24,6 +24,7 @@
 #define LIBISCSI_H
 
 #include <linux/types.h>
+#include <linux/wait.h>
 #include <linux/mutex.h>
 #include <linux/timer.h>
 #include <linux/workqueue.h>
diff --git a/include/scsi/scsi_transport_iscsi.h b/include/scsi/scsi_transport_iscsi.h
index aab1eae2ec4c..02a852000be7 100644
--- a/include/scsi/scsi_transport_iscsi.h
+++ b/include/scsi/scsi_transport_iscsi.h
@@ -92,8 +92,8 @@ struct iscsi_transport {
 	unsigned int max_conn;
 	unsigned int max_cmd_len;
 	struct iscsi_cls_session *(*create_session) (struct iscsi_transport *it,
-		struct scsi_transport_template *t, uint16_t, uint16_t,
-		uint32_t sn, uint32_t *hn);
+		struct scsi_transport_template *t, struct Scsi_Host *shost,
+		uint16_t cmds_max, uint16_t qdepth, uint32_t sn, uint32_t *hn);
 	void (*destroy_session) (struct iscsi_cls_session *session);
 	struct iscsi_cls_conn *(*create_conn) (struct iscsi_cls_session *sess,
 				uint32_t cid);
-- 
cgit v1.2.3


From d3826721b198001c55353b1c54e10843068aae63 Mon Sep 17 00:00:00 2001
From: Mike Christie <michaelc@cs.wisc.edu>
Date: Wed, 21 May 2008 15:53:57 -0500
Subject: [SCSI] iscsi class, iscsi drivers: remove unused iscsi_transport
 attrs

max_cmd_len and max_conn are not really used. max_cmd_len is
always 16 and can be set by the LLD. max_conn is always one
since we do not support MCS.

Signed-off-by: Mike Christie <michaelc@cs.wisc.edu>
Signed-off-by: James Bottomley <James.Bottomley@HansenPartnership.com>
---
 drivers/infiniband/ulp/iser/iscsi_iser.c | 1 -
 drivers/infiniband/ulp/iser/iscsi_iser.h | 1 -
 drivers/scsi/iscsi_tcp.c                 | 2 --
 drivers/scsi/libiscsi.c                  | 2 +-
 drivers/scsi/scsi_transport_iscsi.c      | 4 ----
 include/scsi/scsi_transport_iscsi.h      | 2 --
 6 files changed, 1 insertion(+), 11 deletions(-)

(limited to 'drivers/scsi')

diff --git a/drivers/infiniband/ulp/iser/iscsi_iser.c b/drivers/infiniband/ulp/iser/iscsi_iser.c
index efc121986c50..32f5d5e79abf 100644
--- a/drivers/infiniband/ulp/iser/iscsi_iser.c
+++ b/drivers/infiniband/ulp/iser/iscsi_iser.c
@@ -592,7 +592,6 @@ static struct iscsi_transport iscsi_iser_transport = {
 	.host_template          = &iscsi_iser_sht,
 	.conndata_size		= sizeof(struct iscsi_conn),
 	.max_lun                = ISCSI_ISER_MAX_LUN,
-	.max_cmd_len            = ISCSI_ISER_MAX_CMD_LEN,
 	/* session management */
 	.create_session         = iscsi_iser_session_create,
 	.destroy_session        = iscsi_session_teardown,
diff --git a/drivers/infiniband/ulp/iser/iscsi_iser.h b/drivers/infiniband/ulp/iser/iscsi_iser.h
index a8c1b300e34d..66a2f30ada01 100644
--- a/drivers/infiniband/ulp/iser/iscsi_iser.h
+++ b/drivers/infiniband/ulp/iser/iscsi_iser.h
@@ -96,7 +96,6 @@
 					/* support upto 512KB in one RDMA */
 #define ISCSI_ISER_SG_TABLESIZE         (0x80000 >> SHIFT_4K)
 #define ISCSI_ISER_MAX_LUN		256
-#define ISCSI_ISER_MAX_CMD_LEN		16
 
 /* QP settings */
 /* Maximal bounds on received asynchronous PDUs */
diff --git a/drivers/scsi/iscsi_tcp.c b/drivers/scsi/iscsi_tcp.c
index 81c421a7d477..aecadbdce9df 100644
--- a/drivers/scsi/iscsi_tcp.c
+++ b/drivers/scsi/iscsi_tcp.c
@@ -1978,8 +1978,6 @@ static struct iscsi_transport iscsi_tcp_transport = {
 				  ISCSI_HOST_NETDEV_NAME,
 	.host_template		= &iscsi_sht,
 	.conndata_size		= sizeof(struct iscsi_conn),
-	.max_conn		= 1,
-	.max_cmd_len		= 16,
 	/* session management */
 	.create_session		= iscsi_tcp_session_create,
 	.destroy_session	= iscsi_tcp_session_destroy,
diff --git a/drivers/scsi/libiscsi.c b/drivers/scsi/libiscsi.c
index b43bf1d60dac..01a1a4d36f21 100644
--- a/drivers/scsi/libiscsi.c
+++ b/drivers/scsi/libiscsi.c
@@ -1830,7 +1830,7 @@ iscsi_session_setup(struct iscsi_transport *iscsit,
 	shost->max_id = 1;
 	shost->max_channel = 0;
 	shost->max_lun = iscsit->max_lun;
-	shost->max_cmd_len = iscsit->max_cmd_len;
+	shost->max_cmd_len = 16;
 	shost->transportt = scsit;
 	shost->transportt->create_work_queue = 1;
 	shost->transportt->eh_timed_out = iscsi_eh_cmd_timed_out;
diff --git a/drivers/scsi/scsi_transport_iscsi.c b/drivers/scsi/scsi_transport_iscsi.c
index 2a6669d967cb..e6a090e9c8a4 100644
--- a/drivers/scsi/scsi_transport_iscsi.c
+++ b/drivers/scsi/scsi_transport_iscsi.c
@@ -102,15 +102,11 @@ static DEVICE_ATTR(name, S_IRUGO, show_transport_##name, NULL);
 
 show_transport_attr(caps, "0x%x");
 show_transport_attr(max_lun, "%d");
-show_transport_attr(max_conn, "%d");
-show_transport_attr(max_cmd_len, "%d");
 
 static struct attribute *iscsi_transport_attrs[] = {
 	&dev_attr_handle.attr,
 	&dev_attr_caps.attr,
 	&dev_attr_max_lun.attr,
-	&dev_attr_max_conn.attr,
-	&dev_attr_max_cmd_len.attr,
 	NULL,
 };
 
diff --git a/include/scsi/scsi_transport_iscsi.h b/include/scsi/scsi_transport_iscsi.h
index 02a852000be7..afed70ec2aa5 100644
--- a/include/scsi/scsi_transport_iscsi.h
+++ b/include/scsi/scsi_transport_iscsi.h
@@ -89,8 +89,6 @@ struct iscsi_transport {
 	/* LLD session data size */
 	int sessiondata_size;
 	int max_lun;
-	unsigned int max_conn;
-	unsigned int max_cmd_len;
 	struct iscsi_cls_session *(*create_session) (struct iscsi_transport *it,
 		struct scsi_transport_template *t, struct Scsi_Host *shost,
 		uint16_t cmds_max, uint16_t qdepth, uint32_t sn, uint32_t *hn);
-- 
cgit v1.2.3


From 32c6e1b9a2e27076b7070a9ec56a9e5437ebd725 Mon Sep 17 00:00:00 2001
From: Mike Christie <michaelc@cs.wisc.edu>
Date: Wed, 21 May 2008 15:53:58 -0500
Subject: [SCSI] iscsi class: rename iscsi_host to iscsi_cls_host

This renames the iscsi_host to iscsi_cls_host to match the other
structs, because libiscsi wants to use the iscsi_host name in
the future.

Signed-off-by: Mike Christie <michaelc@cs.wisc.edu>
Signed-off-by: James Bottomley <James.Bottomley@HansenPartnership.com>
---
 drivers/scsi/scsi_transport_iscsi.c | 24 +++++++++++-------------
 include/scsi/scsi_transport_iscsi.h |  2 +-
 2 files changed, 12 insertions(+), 14 deletions(-)

(limited to 'drivers/scsi')

diff --git a/drivers/scsi/scsi_transport_iscsi.c b/drivers/scsi/scsi_transport_iscsi.c
index e6a090e9c8a4..5577a60bec4e 100644
--- a/drivers/scsi/scsi_transport_iscsi.c
+++ b/drivers/scsi/scsi_transport_iscsi.c
@@ -114,13 +114,11 @@ static struct attribute_group iscsi_transport_group = {
 	.attrs = iscsi_transport_attrs,
 };
 
-
-
 static int iscsi_setup_host(struct transport_container *tc, struct device *dev,
 			    struct device *cdev)
 {
 	struct Scsi_Host *shost = dev_to_shost(dev);
-	struct iscsi_host *ihost = shost->shost_data;
+	struct iscsi_cls_host *ihost = shost->shost_data;
 
 	memset(ihost, 0, sizeof(*ihost));
 	INIT_LIST_HEAD(&ihost->sessions);
@@ -140,7 +138,7 @@ static int iscsi_remove_host(struct transport_container *tc, struct device *dev,
 			     struct device *cdev)
 {
 	struct Scsi_Host *shost = dev_to_shost(dev);
-	struct iscsi_host *ihost = shost->shost_data;
+	struct iscsi_cls_host *ihost = shost->shost_data;
 
 	destroy_workqueue(ihost->scan_workq);
 	return 0;
@@ -293,7 +291,7 @@ static int iscsi_is_session_dev(const struct device *dev)
  */
 int iscsi_scan_finished(struct Scsi_Host *shost, unsigned long time)
 {
-	struct iscsi_host *ihost = shost->shost_data;
+	struct iscsi_cls_host *ihost = shost->shost_data;
 	/*
 	 * qla4xxx will have kicked off some session unblocks before calling
 	 * scsi_scan_host, so just wait for them to complete.
@@ -305,7 +303,7 @@ EXPORT_SYMBOL_GPL(iscsi_scan_finished);
 static int iscsi_user_scan(struct Scsi_Host *shost, uint channel,
 			   uint id, uint lun)
 {
-	struct iscsi_host *ihost = shost->shost_data;
+	struct iscsi_cls_host *ihost = shost->shost_data;
 	struct iscsi_cls_session *session;
 
 	mutex_lock(&ihost->mutex);
@@ -325,7 +323,7 @@ static void iscsi_scan_session(struct work_struct *work)
 	struct iscsi_cls_session *session =
 			container_of(work, struct iscsi_cls_session, scan_work);
 	struct Scsi_Host *shost = iscsi_session_to_shost(session);
-	struct iscsi_host *ihost = shost->shost_data;
+	struct iscsi_cls_host *ihost = shost->shost_data;
 	unsigned long flags;
 
 	spin_lock_irqsave(&session->lock, flags);
@@ -377,7 +375,7 @@ static void __iscsi_unblock_session(struct work_struct *work)
 			container_of(work, struct iscsi_cls_session,
 				     unblock_work);
 	struct Scsi_Host *shost = iscsi_session_to_shost(session);
-	struct iscsi_host *ihost = shost->shost_data;
+	struct iscsi_cls_host *ihost = shost->shost_data;
 	unsigned long flags;
 
 	/*
@@ -445,7 +443,7 @@ static void __iscsi_unbind_session(struct work_struct *work)
 			container_of(work, struct iscsi_cls_session,
 				     unbind_work);
 	struct Scsi_Host *shost = iscsi_session_to_shost(session);
-	struct iscsi_host *ihost = shost->shost_data;
+	struct iscsi_cls_host *ihost = shost->shost_data;
 
 	/* Prevent new scans and make sure scanning is not in progress */
 	mutex_lock(&ihost->mutex);
@@ -463,7 +461,7 @@ static void __iscsi_unbind_session(struct work_struct *work)
 static int iscsi_unbind_session(struct iscsi_cls_session *session)
 {
 	struct Scsi_Host *shost = iscsi_session_to_shost(session);
-	struct iscsi_host *ihost = shost->shost_data;
+	struct iscsi_cls_host *ihost = shost->shost_data;
 
 	return queue_work(ihost->scan_workq, &session->unbind_work);
 }
@@ -505,7 +503,7 @@ EXPORT_SYMBOL_GPL(iscsi_alloc_session);
 int iscsi_add_session(struct iscsi_cls_session *session, unsigned int target_id)
 {
 	struct Scsi_Host *shost = iscsi_session_to_shost(session);
-	struct iscsi_host *ihost;
+	struct iscsi_cls_host *ihost;
 	unsigned long flags;
 	int err;
 
@@ -591,7 +589,7 @@ static int iscsi_iter_destroy_conn_fn(struct device *dev, void *data)
 void iscsi_remove_session(struct iscsi_cls_session *session)
 {
 	struct Scsi_Host *shost = iscsi_session_to_shost(session);
-	struct iscsi_host *ihost = shost->shost_data;
+	struct iscsi_cls_host *ihost = shost->shost_data;
 	unsigned long flags;
 	int err;
 
@@ -1619,7 +1617,7 @@ iscsi_register_transport(struct iscsi_transport *tt)
 	priv->t.host_attrs.ac.attrs = &priv->host_attrs[0];
 	priv->t.host_attrs.ac.class = &iscsi_host_class.class;
 	priv->t.host_attrs.ac.match = iscsi_host_match;
-	priv->t.host_size = sizeof(struct iscsi_host);
+	priv->t.host_size = sizeof(struct iscsi_cls_host);
 	transport_container_register(&priv->t.host_attrs);
 
 	SETUP_HOST_RD_ATTR(netdev, ISCSI_HOST_NETDEV_NAME);
diff --git a/include/scsi/scsi_transport_iscsi.h b/include/scsi/scsi_transport_iscsi.h
index afed70ec2aa5..728702292a80 100644
--- a/include/scsi/scsi_transport_iscsi.h
+++ b/include/scsi/scsi_transport_iscsi.h
@@ -201,7 +201,7 @@ struct iscsi_cls_session {
 #define starget_to_session(_stgt) \
 	iscsi_dev_to_session(_stgt->dev.parent)
 
-struct iscsi_host {
+struct iscsi_cls_host {
 	struct list_head sessions;
 	atomic_t nr_scans;
 	struct mutex mutex;
-- 
cgit v1.2.3


From 756135215ec743be6fdce2bdebe8cdb9f8a231f6 Mon Sep 17 00:00:00 2001
From: Mike Christie <michaelc@cs.wisc.edu>
Date: Wed, 21 May 2008 15:53:59 -0500
Subject: [SCSI] iscsi: remove session and host binding in libiscsi

bnx2i allocates a host per netdevice but will use libiscsi,
so this unbinds the session from the host in that code.

This will also be useful for the iser parent device dma settings
fixes.

Signed-off-by: Mike Christie <michaelc@cs.wisc.edu>
Signed-off-by: James Bottomley <James.Bottomley@HansenPartnership.com>
---
 drivers/infiniband/ulp/iser/iscsi_iser.c |  74 ++++++++---
 drivers/scsi/iscsi_tcp.c                 | 102 ++++++++-------
 drivers/scsi/libiscsi.c                  | 213 ++++++++++++++-----------------
 drivers/scsi/qla4xxx/ql4_os.c            |   1 -
 drivers/scsi/scsi_transport_iscsi.c      |   5 +-
 include/scsi/iscsi_if.h                  |   7 -
 include/scsi/libiscsi.h                  |  31 ++---
 include/scsi/scsi_transport_iscsi.h      |   8 +-
 8 files changed, 225 insertions(+), 216 deletions(-)

(limited to 'drivers/scsi')

diff --git a/drivers/infiniband/ulp/iser/iscsi_iser.c b/drivers/infiniband/ulp/iser/iscsi_iser.c
index 32f5d5e79abf..5a750042e2b2 100644
--- a/drivers/infiniband/ulp/iser/iscsi_iser.c
+++ b/drivers/infiniband/ulp/iser/iscsi_iser.c
@@ -74,6 +74,10 @@
 
 #include "iscsi_iser.h"
 
+static struct scsi_host_template iscsi_iser_sht;
+static struct iscsi_transport iscsi_iser_transport;
+static struct scsi_transport_template *iscsi_iser_scsi_transport;
+
 static unsigned int iscsi_max_lun = 512;
 module_param_named(max_lun, iscsi_max_lun, uint, S_IRUGO);
 
@@ -363,40 +367,64 @@ iscsi_iser_conn_start(struct iscsi_cls_conn *cls_conn)
 	return iscsi_conn_start(cls_conn);
 }
 
-static struct iscsi_transport iscsi_iser_transport;
+static void iscsi_iser_session_destroy(struct iscsi_cls_session *cls_session)
+{
+	struct Scsi_Host *shost = iscsi_session_to_shost(cls_session);
+
+	iscsi_session_teardown(cls_session);
+	scsi_remove_host(shost);
+	iscsi_host_teardown(shost);
+	scsi_host_put(shost);
+}
 
 static struct iscsi_cls_session *
-iscsi_iser_session_create(struct iscsi_transport *iscsit,
-			 struct scsi_transport_template *scsit,
-			 struct Scsi_Host *shost,
-			 uint16_t cmds_max, uint16_t qdepth,
-			 uint32_t initial_cmdsn, uint32_t *hostno)
+iscsi_iser_session_create(struct Scsi_Host *shost,
+			  uint16_t cmds_max, uint16_t qdepth,
+			  uint32_t initial_cmdsn, uint32_t *hostno)
 {
 	struct iscsi_cls_session *cls_session;
 	struct iscsi_session *session;
 	int i;
-	uint32_t hn;
 	struct iscsi_cmd_task  *ctask;
 	struct iscsi_mgmt_task *mtask;
 	struct iscsi_iser_cmd_task *iser_ctask;
 	struct iser_desc *desc;
 
+	if (shost) {
+		printk(KERN_ERR "iscsi_tcp: invalid shost %d.\n",
+		       shost->host_no);
+		return NULL;
+	}
+
+	shost = scsi_host_alloc(&iscsi_iser_sht, 0);
+	if (!shost)
+		return NULL;
+	shost->transportt = iscsi_iser_scsi_transport;
+	shost->max_lun = iscsi_max_lun;
+	shost->max_id = 0;
+	shost->max_channel = 0;
+	shost->max_cmd_len = 16;
+
+	iscsi_host_setup(shost, qdepth);
+
+	if (scsi_add_host(shost, NULL))
+		goto free_host;
+	*hostno = shost->host_no;
+
 	/*
 	 * we do not support setting can_queue cmd_per_lun from userspace yet
 	 * because we preallocate so many resources
 	 */
-	cls_session = iscsi_session_setup(iscsit, scsit,
+	cls_session = iscsi_session_setup(&iscsi_iser_transport, shost,
 					  ISCSI_DEF_XMIT_CMDS_MAX,
-					  ISCSI_MAX_CMD_PER_LUN,
 					  sizeof(struct iscsi_iser_cmd_task),
 					  sizeof(struct iser_desc),
-					  initial_cmdsn, &hn);
+					  initial_cmdsn);
 	if (!cls_session)
-	return NULL;
-
-	*hostno = hn;
-	session = class_to_transport_session(cls_session);
+		goto remove_host;
+	session = cls_session->dd_data;
 
+	shost->can_queue = session->cmds_max;
 	/* libiscsi setup itts, data and pool so just set desc fields */
 	for (i = 0; i < session->cmds_max; i++) {
 		ctask      = session->cmds[i];
@@ -413,6 +441,13 @@ iscsi_iser_session_create(struct iscsi_transport *iscsit,
 	}
 
 	return cls_session;
+
+remove_host:
+	scsi_remove_host(shost);
+free_host:
+	iscsi_host_teardown(shost);
+	scsi_host_put(shost);
+	return NULL;
 }
 
 static int
@@ -589,12 +624,11 @@ static struct iscsi_transport iscsi_iser_transport = {
 	.host_param_mask	= ISCSI_HOST_HWADDRESS |
 				  ISCSI_HOST_NETDEV_NAME |
 				  ISCSI_HOST_INITIATOR_NAME,
-	.host_template          = &iscsi_iser_sht,
 	.conndata_size		= sizeof(struct iscsi_conn),
-	.max_lun                = ISCSI_ISER_MAX_LUN,
+	.sessiondata_size	= sizeof(struct iscsi_session),
 	/* session management */
 	.create_session         = iscsi_iser_session_create,
-	.destroy_session        = iscsi_session_teardown,
+	.destroy_session        = iscsi_iser_session_destroy,
 	/* connection management */
 	.create_conn            = iscsi_iser_conn_create,
 	.bind_conn              = iscsi_iser_conn_bind,
@@ -633,8 +667,6 @@ static int __init iser_init(void)
 		return -EINVAL;
 	}
 
-	iscsi_iser_transport.max_lun = iscsi_max_lun;
-
 	memset(&ig, 0, sizeof(struct iser_global));
 
 	ig.desc_cache = kmem_cache_create("iser_descriptors",
@@ -650,7 +682,9 @@ static int __init iser_init(void)
 	mutex_init(&ig.connlist_mutex);
 	INIT_LIST_HEAD(&ig.connlist);
 
-	if (!iscsi_register_transport(&iscsi_iser_transport)) {
+	iscsi_iser_scsi_transport = iscsi_register_transport(
+							&iscsi_iser_transport);
+	if (!iscsi_iser_scsi_transport) {
 		iser_err("iscsi_register_transport failed\n");
 		err = -EINVAL;
 		goto register_transport_failure;
diff --git a/drivers/scsi/iscsi_tcp.c b/drivers/scsi/iscsi_tcp.c
index aecadbdce9df..8cdcaf33fb4e 100644
--- a/drivers/scsi/iscsi_tcp.c
+++ b/drivers/scsi/iscsi_tcp.c
@@ -64,6 +64,10 @@ MODULE_LICENSE("GPL");
 #define BUG_ON(expr)
 #endif
 
+static struct scsi_transport_template *iscsi_tcp_scsi_transport;
+static struct scsi_host_template iscsi_sht;
+static struct iscsi_transport iscsi_tcp_transport;
+
 static unsigned int iscsi_max_lun = 512;
 module_param_named(max_lun, iscsi_max_lun, uint, S_IRUGO);
 
@@ -1623,6 +1627,8 @@ iscsi_tcp_conn_bind(struct iscsi_cls_session *cls_session,
 		    struct iscsi_cls_conn *cls_conn, uint64_t transport_eph,
 		    int is_leading)
 {
+	struct Scsi_Host *shost = iscsi_session_to_shost(cls_session);
+	struct iscsi_host *ihost = shost_priv(shost);
 	struct iscsi_conn *conn = cls_conn->dd_data;
 	struct iscsi_tcp_conn *tcp_conn = conn->dd_data;
 	struct sock *sk;
@@ -1646,8 +1652,8 @@ iscsi_tcp_conn_bind(struct iscsi_cls_session *cls_session,
 	if (err)
 		goto free_socket;
 
-	err = iscsi_tcp_get_addr(conn, sock, conn->local_address,
-				&conn->local_port, kernel_getsockname);
+	err = iscsi_tcp_get_addr(conn, sock, ihost->local_address,
+				&ihost->local_port, kernel_getsockname);
 	if (err)
 		goto free_socket;
 
@@ -1821,29 +1827,6 @@ iscsi_tcp_conn_get_param(struct iscsi_cls_conn *cls_conn,
 	return len;
 }
 
-static int
-iscsi_tcp_host_get_param(struct Scsi_Host *shost, enum iscsi_host_param param,
-			 char *buf)
-{
-        struct iscsi_session *session = iscsi_hostdata(shost->hostdata);
-	int len;
-
-	switch (param) {
-	case ISCSI_HOST_PARAM_IPADDRESS:
-		spin_lock_bh(&session->lock);
-		if (!session->leadconn)
-			len = -ENODEV;
-		else
-			len = sprintf(buf, "%s\n",
-				     session->leadconn->local_address);
-		spin_unlock_bh(&session->lock);
-		break;
-	default:
-		return iscsi_host_get_param(shost, param, buf);
-	}
-	return len;
-}
-
 static void
 iscsi_conn_get_stats(struct iscsi_cls_conn *cls_conn, struct iscsi_stats *stats)
 {
@@ -1869,26 +1852,44 @@ iscsi_conn_get_stats(struct iscsi_cls_conn *cls_conn, struct iscsi_stats *stats)
 }
 
 static struct iscsi_cls_session *
-iscsi_tcp_session_create(struct iscsi_transport *iscsit,
-			 struct scsi_transport_template *scsit,
-			 struct Scsi_Host *shost, uint16_t cmds_max,
+iscsi_tcp_session_create(struct Scsi_Host *shost, uint16_t cmds_max,
 			 uint16_t qdepth, uint32_t initial_cmdsn,
 			 uint32_t *hostno)
 {
 	struct iscsi_cls_session *cls_session;
 	struct iscsi_session *session;
-	uint32_t hn;
 	int cmd_i;
 
-	cls_session = iscsi_session_setup(iscsit, scsit, cmds_max, qdepth,
-					 sizeof(struct iscsi_tcp_cmd_task),
-					 sizeof(struct iscsi_tcp_mgmt_task),
-					 initial_cmdsn, &hn);
-	if (!cls_session)
+	if (shost) {
+		printk(KERN_ERR "iscsi_tcp: invalid shost %d.\n",
+		       shost->host_no);
+		return NULL;
+	}
+
+	shost = scsi_host_alloc(&iscsi_sht, sizeof(struct iscsi_host));
+	if (!shost)
 		return NULL;
-	*hostno = hn;
+	shost->transportt = iscsi_tcp_scsi_transport;
+	shost->max_lun = iscsi_max_lun;
+	shost->max_id = 0;
+	shost->max_channel = 0;
+	shost->max_cmd_len = 16;
+
+	iscsi_host_setup(shost, qdepth);
+
+	if (scsi_add_host(shost, NULL))
+		goto free_host;
+	*hostno = shost->host_no;
+
+	cls_session = iscsi_session_setup(&iscsi_tcp_transport, shost, cmds_max,
+					  sizeof(struct iscsi_tcp_cmd_task),
+					  sizeof(struct iscsi_tcp_mgmt_task),
+					  initial_cmdsn);
+	if (!cls_session)
+		goto remove_host;
+	session = cls_session->dd_data;
 
-	session = class_to_transport_session(cls_session);
+	shost->can_queue = session->cmds_max;
 	for (cmd_i = 0; cmd_i < session->cmds_max; cmd_i++) {
 		struct iscsi_cmd_task *ctask = session->cmds[cmd_i];
 		struct iscsi_tcp_cmd_task *tcp_ctask = ctask->dd_data;
@@ -1904,20 +1905,30 @@ iscsi_tcp_session_create(struct iscsi_transport *iscsit,
 		mtask->hdr = (struct iscsi_hdr *) &tcp_mtask->hdr;
 	}
 
-	if (iscsi_r2tpool_alloc(class_to_transport_session(cls_session)))
-		goto r2tpool_alloc_fail;
-
+	if (iscsi_r2tpool_alloc(session))
+		goto remove_session;
 	return cls_session;
 
-r2tpool_alloc_fail:
+remove_session:
 	iscsi_session_teardown(cls_session);
+remove_host:
+	scsi_remove_host(shost);
+free_host:
+	iscsi_host_teardown(shost);
+	scsi_host_put(shost);
 	return NULL;
 }
 
 static void iscsi_tcp_session_destroy(struct iscsi_cls_session *cls_session)
 {
-	iscsi_r2tpool_free(class_to_transport_session(cls_session));
+	struct Scsi_Host *shost = iscsi_session_to_shost(cls_session);
+
+	iscsi_r2tpool_free(cls_session->dd_data);
 	iscsi_session_teardown(cls_session);
+
+	scsi_remove_host(shost);
+	iscsi_host_teardown(shost);
+	scsi_host_put(shost);
 }
 
 static int iscsi_tcp_slave_configure(struct scsi_device *sdev)
@@ -1976,8 +1987,8 @@ static struct iscsi_transport iscsi_tcp_transport = {
 	.host_param_mask	= ISCSI_HOST_HWADDRESS | ISCSI_HOST_IPADDRESS |
 				  ISCSI_HOST_INITIATOR_NAME |
 				  ISCSI_HOST_NETDEV_NAME,
-	.host_template		= &iscsi_sht,
 	.conndata_size		= sizeof(struct iscsi_conn),
+	.sessiondata_size	= sizeof(struct iscsi_session),
 	/* session management */
 	.create_session		= iscsi_tcp_session_create,
 	.destroy_session	= iscsi_tcp_session_destroy,
@@ -1991,7 +2002,7 @@ static struct iscsi_transport iscsi_tcp_transport = {
 	.start_conn		= iscsi_conn_start,
 	.stop_conn		= iscsi_tcp_conn_stop,
 	/* iscsi host params */
-	.get_host_param		= iscsi_tcp_host_get_param,
+	.get_host_param		= iscsi_host_get_param,
 	.set_host_param		= iscsi_host_set_param,
 	/* IO */
 	.send_pdu		= iscsi_conn_send_pdu,
@@ -2013,9 +2024,10 @@ iscsi_tcp_init(void)
 		       iscsi_max_lun);
 		return -EINVAL;
 	}
-	iscsi_tcp_transport.max_lun = iscsi_max_lun;
 
-	if (!iscsi_register_transport(&iscsi_tcp_transport))
+	iscsi_tcp_scsi_transport = iscsi_register_transport(
+							&iscsi_tcp_transport);
+	if (!iscsi_tcp_scsi_transport)
 		return -ENODEV;
 
 	return 0;
diff --git a/drivers/scsi/libiscsi.c b/drivers/scsi/libiscsi.c
index 01a1a4d36f21..64b1dd827366 100644
--- a/drivers/scsi/libiscsi.c
+++ b/drivers/scsi/libiscsi.c
@@ -38,14 +38,6 @@
 #include <scsi/scsi_transport_iscsi.h>
 #include <scsi/libiscsi.h>
 
-struct iscsi_session *
-class_to_transport_session(struct iscsi_cls_session *cls_session)
-{
-	struct Scsi_Host *shost = iscsi_session_to_shost(cls_session);
-	return iscsi_hostdata(shost->hostdata);
-}
-EXPORT_SYMBOL_GPL(class_to_transport_session);
-
 /* Serial Number Arithmetic, 32 bits, less than, RFC1982 */
 #define SNA32_CHECK 2147483648UL
 
@@ -1096,6 +1088,7 @@ enum {
 
 int iscsi_queuecommand(struct scsi_cmnd *sc, void (*done)(struct scsi_cmnd *))
 {
+	struct iscsi_cls_session *cls_session;
 	struct Scsi_Host *host;
 	int reason = 0;
 	struct iscsi_session *session;
@@ -1109,10 +1102,11 @@ int iscsi_queuecommand(struct scsi_cmnd *sc, void (*done)(struct scsi_cmnd *))
 	host = sc->device->host;
 	spin_unlock(host->host_lock);
 
-	session = iscsi_hostdata(host->hostdata);
+	cls_session = starget_to_session(scsi_target(sc->device));
+	session = cls_session->dd_data;
 	spin_lock(&session->lock);
 
-	reason = iscsi_session_chkready(session_to_cls(session));
+	reason = iscsi_session_chkready(cls_session);
 	if (reason) {
 		sc->result = reason;
 		goto fault;
@@ -1222,7 +1216,7 @@ EXPORT_SYMBOL_GPL(iscsi_change_queue_depth);
 
 void iscsi_session_recovery_timedout(struct iscsi_cls_session *cls_session)
 {
-	struct iscsi_session *session = class_to_transport_session(cls_session);
+	struct iscsi_session *session = cls_session->dd_data;
 
 	spin_lock_bh(&session->lock);
 	if (session->state != ISCSI_STATE_LOGGED_IN) {
@@ -1236,9 +1230,13 @@ EXPORT_SYMBOL_GPL(iscsi_session_recovery_timedout);
 
 int iscsi_eh_host_reset(struct scsi_cmnd *sc)
 {
-	struct Scsi_Host *host = sc->device->host;
-	struct iscsi_session *session = iscsi_hostdata(host->hostdata);
-	struct iscsi_conn *conn = session->leadconn;
+	struct iscsi_cls_session *cls_session;
+	struct iscsi_session *session;
+	struct iscsi_conn *conn;
+
+	cls_session = starget_to_session(scsi_target(sc->device));
+	session = cls_session->dd_data;
+	conn = session->leadconn;
 
 	mutex_lock(&session->eh_mutex);
 	spin_lock_bh(&session->lock);
@@ -1405,7 +1403,7 @@ static enum scsi_eh_timer_return iscsi_eh_cmd_timed_out(struct scsi_cmnd *scmd)
 	enum scsi_eh_timer_return rc = EH_NOT_HANDLED;
 
 	cls_session = starget_to_session(scsi_target(scmd->device));
-	session = class_to_transport_session(cls_session);
+	session = cls_session->dd_data;
 
 	debug_scsi("scsi cmd %p timedout\n", scmd);
 
@@ -1507,13 +1505,16 @@ static void iscsi_prep_abort_task_pdu(struct iscsi_cmd_task *ctask,
 
 int iscsi_eh_abort(struct scsi_cmnd *sc)
 {
-	struct Scsi_Host *host = sc->device->host;
-	struct iscsi_session *session = iscsi_hostdata(host->hostdata);
+	struct iscsi_cls_session *cls_session;
+	struct iscsi_session *session;
 	struct iscsi_conn *conn;
 	struct iscsi_cmd_task *ctask;
 	struct iscsi_tm *hdr;
 	int rc, age;
 
+	cls_session = starget_to_session(scsi_target(sc->device));
+	session = cls_session->dd_data;
+
 	mutex_lock(&session->eh_mutex);
 	spin_lock_bh(&session->lock);
 	/*
@@ -1630,12 +1631,15 @@ static void iscsi_prep_lun_reset_pdu(struct scsi_cmnd *sc, struct iscsi_tm *hdr)
 
 int iscsi_eh_device_reset(struct scsi_cmnd *sc)
 {
-	struct Scsi_Host *host = sc->device->host;
-	struct iscsi_session *session = iscsi_hostdata(host->hostdata);
+	struct iscsi_cls_session *cls_session;
+	struct iscsi_session *session;
 	struct iscsi_conn *conn;
 	struct iscsi_tm *hdr;
 	int rc = FAILED;
 
+	cls_session = starget_to_session(scsi_target(sc->device));
+	session = cls_session->dd_data;
+
 	debug_scsi("LU Reset [sc %p lun %u]\n", sc, sc->device->lun);
 
 	mutex_lock(&session->eh_mutex);
@@ -1760,55 +1764,53 @@ void iscsi_pool_free(struct iscsi_pool *q)
 }
 EXPORT_SYMBOL_GPL(iscsi_pool_free);
 
-/*
- * iSCSI Session's hostdata organization:
- *
- *    *------------------* <== hostdata_session(host->hostdata)
- *    | ptr to class sess|
- *    |------------------| <== iscsi_hostdata(host->hostdata)
- *    | iscsi_session    |
- *    *------------------*
- */
+void iscsi_host_setup(struct Scsi_Host *shost, uint16_t qdepth)
+{
+	if (qdepth > ISCSI_MAX_CMD_PER_LUN || qdepth < 1) {
+		if (qdepth != 0)
+			printk(KERN_ERR "iscsi: invalid queue depth of %d. "
+			       "Queue depth must be between 1 and %d.\n",
+			       qdepth, ISCSI_MAX_CMD_PER_LUN);
+		qdepth = ISCSI_DEF_CMD_PER_LUN;
+	}
+
+	shost->transportt->create_work_queue = 1;
+	shost->transportt->eh_timed_out = iscsi_eh_cmd_timed_out;
+	shost->cmd_per_lun = qdepth;
+}
+EXPORT_SYMBOL_GPL(iscsi_host_setup);
 
-#define hostdata_privsize(_sz)	(sizeof(unsigned long) + _sz + \
-				 _sz % sizeof(unsigned long))
+void iscsi_host_teardown(struct Scsi_Host *shost)
+{
+	struct iscsi_host *ihost = shost_priv(shost);
 
-#define hostdata_session(_hostdata) (iscsi_ptr(*(unsigned long *)_hostdata))
+	kfree(ihost->netdev);
+	kfree(ihost->hwaddress);
+	kfree(ihost->initiatorname);
+}
+EXPORT_SYMBOL_GPL(iscsi_host_teardown);
 
 /**
  * iscsi_session_setup - create iscsi cls session and host and session
- * @scsit: scsi transport template
  * @iscsit: iscsi transport template
- * @cmds_max: scsi host can queue
- * @qdepth: scsi host cmds per lun
+ * @shost: scsi host
+ * @cmds_max: session can queue
  * @cmd_task_size: LLD ctask private data size
  * @mgmt_task_size: LLD mtask private data size
  * @initial_cmdsn: initial CmdSN
- * @hostno: host no allocated
  *
  * This can be used by software iscsi_transports that allocate
  * a session per scsi host.
- **/
+ */
 struct iscsi_cls_session *
-iscsi_session_setup(struct iscsi_transport *iscsit,
-		    struct scsi_transport_template *scsit,
-		    uint16_t cmds_max, uint16_t qdepth,
-		    int cmd_task_size, int mgmt_task_size,
-		    uint32_t initial_cmdsn, uint32_t *hostno)
+iscsi_session_setup(struct iscsi_transport *iscsit, struct Scsi_Host *shost,
+		    uint16_t cmds_max, int cmd_task_size, int mgmt_task_size,
+		    uint32_t initial_cmdsn)
 {
-	struct Scsi_Host *shost;
 	struct iscsi_session *session;
 	struct iscsi_cls_session *cls_session;
 	int cmd_i;
 
-	if (qdepth > ISCSI_MAX_CMD_PER_LUN || qdepth < 1) {
-		if (qdepth != 0)
-			printk(KERN_ERR "iscsi: invalid queue depth of %d. "
-			      "Queue depth must be between 1 and %d.\n",
-			      qdepth, ISCSI_MAX_CMD_PER_LUN);
-		qdepth = ISCSI_DEF_CMD_PER_LUN;
-	}
-
 	if (!is_power_of_2(cmds_max) || cmds_max >= ISCSI_MGMT_ITT_OFFSET ||
 	    cmds_max < 2) {
 		if (cmds_max != 0)
@@ -1819,25 +1821,11 @@ iscsi_session_setup(struct iscsi_transport *iscsit,
 		cmds_max = ISCSI_DEF_XMIT_CMDS_MAX;
 	}
 
-	shost = scsi_host_alloc(iscsit->host_template,
-				hostdata_privsize(sizeof(*session)));
-	if (!shost)
+	cls_session = iscsi_alloc_session(shost, iscsit);
+	if (!cls_session)
 		return NULL;
-
-	/* the iscsi layer takes one task for reserve */
-	shost->can_queue = cmds_max - 1;
-	shost->cmd_per_lun = qdepth;
-	shost->max_id = 1;
-	shost->max_channel = 0;
-	shost->max_lun = iscsit->max_lun;
-	shost->max_cmd_len = 16;
-	shost->transportt = scsit;
-	shost->transportt->create_work_queue = 1;
-	shost->transportt->eh_timed_out = iscsi_eh_cmd_timed_out;
-	*hostno = shost->host_no;
-
-	session = iscsi_hostdata(shost->hostdata);
-	memset(session, 0, sizeof(struct iscsi_session));
+	session = cls_session->dd_data;
+	session->cls_session = cls_session;
 	session->host = shost;
 	session->state = ISCSI_STATE_FREE;
 	session->fast_abort = 1;
@@ -1851,6 +1839,7 @@ iscsi_session_setup(struct iscsi_transport *iscsit,
 	session->max_r2t = 1;
 	session->tt = iscsit;
 	mutex_init(&session->eh_mutex);
+	spin_lock_init(&session->lock);
 
 	/* initialize SCSI PDU commands pool */
 	if (iscsi_pool_init(&session->cmdpool, session->cmds_max,
@@ -1868,8 +1857,6 @@ iscsi_session_setup(struct iscsi_transport *iscsit,
 		INIT_LIST_HEAD(&ctask->running);
 	}
 
-	spin_lock_init(&session->lock);
-
 	/* initialize immediate command pool */
 	if (iscsi_pool_init(&session->mgmtpool, session->mgmtpool_max,
 			   (void***)&session->mgmt_cmds,
@@ -1887,49 +1874,37 @@ iscsi_session_setup(struct iscsi_transport *iscsit,
 		INIT_LIST_HEAD(&mtask->running);
 	}
 
-	if (scsi_add_host(shost, NULL))
-		goto add_host_fail;
-
 	if (!try_module_get(iscsit->owner))
-		goto cls_session_fail;
-
-	cls_session = iscsi_create_session(shost, iscsit, 0);
-	if (!cls_session)
-		goto module_put;
-	*(unsigned long*)shost->hostdata = (unsigned long)cls_session;
+		goto module_get_fail;
 
+	if (iscsi_add_session(cls_session, 0))
+		goto cls_session_fail;
 	return cls_session;
 
-module_put:
-	module_put(iscsit->owner);
 cls_session_fail:
-	scsi_remove_host(shost);
-add_host_fail:
+	module_put(iscsit->owner);
+module_get_fail:
 	iscsi_pool_free(&session->mgmtpool);
 mgmtpool_alloc_fail:
 	iscsi_pool_free(&session->cmdpool);
 cmdpool_alloc_fail:
-	scsi_host_put(shost);
+	iscsi_free_session(cls_session);
 	return NULL;
 }
 EXPORT_SYMBOL_GPL(iscsi_session_setup);
 
 /**
  * iscsi_session_teardown - destroy session, host, and cls_session
- * shost: scsi host
+ * @cls_session: iscsi session
  *
- * This can be used by software iscsi_transports that allocate
- * a session per scsi host.
- **/
+ * The driver must have called iscsi_remove_session before
+ * calling this.
+ */
 void iscsi_session_teardown(struct iscsi_cls_session *cls_session)
 {
-	struct Scsi_Host *shost = iscsi_session_to_shost(cls_session);
-	struct iscsi_session *session = iscsi_hostdata(shost->hostdata);
+	struct iscsi_session *session = cls_session->dd_data;
 	struct module *owner = cls_session->transport->owner;
 
-	iscsi_remove_session(cls_session);
-	scsi_remove_host(shost);
-
 	iscsi_pool_free(&session->mgmtpool);
 	iscsi_pool_free(&session->cmdpool);
 
@@ -1938,12 +1913,8 @@ void iscsi_session_teardown(struct iscsi_cls_session *cls_session)
 	kfree(session->username);
 	kfree(session->username_in);
 	kfree(session->targetname);
-	kfree(session->netdev);
-	kfree(session->hwaddress);
-	kfree(session->initiatorname);
 
-	iscsi_free_session(cls_session);
-	scsi_host_put(shost);
+	iscsi_destroy_session(cls_session);
 	module_put(owner);
 }
 EXPORT_SYMBOL_GPL(iscsi_session_teardown);
@@ -1956,7 +1927,7 @@ EXPORT_SYMBOL_GPL(iscsi_session_teardown);
 struct iscsi_cls_conn *
 iscsi_conn_setup(struct iscsi_cls_session *cls_session, uint32_t conn_idx)
 {
-	struct iscsi_session *session = class_to_transport_session(cls_session);
+	struct iscsi_session *session = cls_session->dd_data;
 	struct iscsi_conn *conn;
 	struct iscsi_cls_conn *cls_conn;
 	char *data;
@@ -2140,7 +2111,7 @@ int iscsi_conn_start(struct iscsi_cls_conn *cls_conn)
 	}
 	spin_unlock_bh(&session->lock);
 
-	iscsi_unblock_session(session_to_cls(session));
+	iscsi_unblock_session(session->cls_session);
 	wake_up(&conn->ehwait);
 	return 0;
 }
@@ -2225,7 +2196,7 @@ static void iscsi_start_session_recovery(struct iscsi_session *session,
 		if (session->state == ISCSI_STATE_IN_RECOVERY &&
 		    old_stop_stage != STOP_CONN_RECOVER) {
 			debug_scsi("blocking session\n");
-			iscsi_block_session(session_to_cls(session));
+			iscsi_block_session(session->cls_session);
 		}
 	}
 
@@ -2260,7 +2231,7 @@ EXPORT_SYMBOL_GPL(iscsi_conn_stop);
 int iscsi_conn_bind(struct iscsi_cls_session *cls_session,
 		    struct iscsi_cls_conn *cls_conn, int is_leading)
 {
-	struct iscsi_session *session = class_to_transport_session(cls_session);
+	struct iscsi_session *session = cls_session->dd_data;
 	struct iscsi_conn *conn = cls_conn->dd_data;
 
 	spin_lock_bh(&session->lock);
@@ -2410,8 +2381,7 @@ EXPORT_SYMBOL_GPL(iscsi_set_param);
 int iscsi_session_get_param(struct iscsi_cls_session *cls_session,
 			    enum iscsi_param param, char *buf)
 {
-	struct Scsi_Host *shost = iscsi_session_to_shost(cls_session);
-	struct iscsi_session *session = iscsi_hostdata(shost->hostdata);
+	struct iscsi_session *session = cls_session->dd_data;
 	int len;
 
 	switch(param) {
@@ -2525,29 +2495,34 @@ EXPORT_SYMBOL_GPL(iscsi_conn_get_param);
 int iscsi_host_get_param(struct Scsi_Host *shost, enum iscsi_host_param param,
 			 char *buf)
 {
-	struct iscsi_session *session = iscsi_hostdata(shost->hostdata);
+	struct iscsi_host *ihost = shost_priv(shost);
 	int len;
 
 	switch (param) {
 	case ISCSI_HOST_PARAM_NETDEV_NAME:
-		if (!session->netdev)
+		if (!ihost->netdev)
 			len = sprintf(buf, "%s\n", "default");
 		else
-			len = sprintf(buf, "%s\n", session->netdev);
+			len = sprintf(buf, "%s\n", ihost->netdev);
 		break;
 	case ISCSI_HOST_PARAM_HWADDRESS:
-		if (!session->hwaddress)
+		if (!ihost->hwaddress)
 			len = sprintf(buf, "%s\n", "default");
 		else
-			len = sprintf(buf, "%s\n", session->hwaddress);
+			len = sprintf(buf, "%s\n", ihost->hwaddress);
 		break;
 	case ISCSI_HOST_PARAM_INITIATOR_NAME:
-		if (!session->initiatorname)
+		if (!ihost->initiatorname)
 			len = sprintf(buf, "%s\n", "unknown");
 		else
-			len = sprintf(buf, "%s\n", session->initiatorname);
+			len = sprintf(buf, "%s\n", ihost->initiatorname);
 		break;
-
+	case ISCSI_HOST_PARAM_IPADDRESS:
+		if (!strlen(ihost->local_address))
+			len = sprintf(buf, "%s\n", "unknown");
+		else
+			len = sprintf(buf, "%s\n",
+				      ihost->local_address);
 	default:
 		return -ENOSYS;
 	}
@@ -2559,20 +2534,20 @@ EXPORT_SYMBOL_GPL(iscsi_host_get_param);
 int iscsi_host_set_param(struct Scsi_Host *shost, enum iscsi_host_param param,
 			 char *buf, int buflen)
 {
-	struct iscsi_session *session = iscsi_hostdata(shost->hostdata);
+	struct iscsi_host *ihost = shost_priv(shost);
 
 	switch (param) {
 	case ISCSI_HOST_PARAM_NETDEV_NAME:
-		if (!session->netdev)
-			session->netdev = kstrdup(buf, GFP_KERNEL);
+		if (!ihost->netdev)
+			ihost->netdev = kstrdup(buf, GFP_KERNEL);
 		break;
 	case ISCSI_HOST_PARAM_HWADDRESS:
-		if (!session->hwaddress)
-			session->hwaddress = kstrdup(buf, GFP_KERNEL);
+		if (!ihost->hwaddress)
+			ihost->hwaddress = kstrdup(buf, GFP_KERNEL);
 		break;
 	case ISCSI_HOST_PARAM_INITIATOR_NAME:
-		if (!session->initiatorname)
-			session->initiatorname = kstrdup(buf, GFP_KERNEL);
+		if (!ihost->initiatorname)
+			ihost->initiatorname = kstrdup(buf, GFP_KERNEL);
 		break;
 	default:
 		return -ENOSYS;
diff --git a/drivers/scsi/qla4xxx/ql4_os.c b/drivers/scsi/qla4xxx/ql4_os.c
index 0c786944d2c2..6c6ee0f34995 100644
--- a/drivers/scsi/qla4xxx/ql4_os.c
+++ b/drivers/scsi/qla4xxx/ql4_os.c
@@ -114,7 +114,6 @@ static struct iscsi_transport qla4xxx_iscsi_transport = {
 				  ISCSI_HOST_IPADDRESS |
 				  ISCSI_HOST_INITIATOR_NAME,
 	.sessiondata_size	= sizeof(struct ddb_entry),
-	.host_template		= &qla4xxx_driver_template,
 
 	.tgt_dscvr		= qla4xxx_tgt_dscvr,
 	.get_conn_param		= qla4xxx_conn_get_param,
diff --git a/drivers/scsi/scsi_transport_iscsi.c b/drivers/scsi/scsi_transport_iscsi.c
index 5577a60bec4e..9c00a157b485 100644
--- a/drivers/scsi/scsi_transport_iscsi.c
+++ b/drivers/scsi/scsi_transport_iscsi.c
@@ -101,12 +101,10 @@ show_transport_##name(struct device *dev, 				\
 static DEVICE_ATTR(name, S_IRUGO, show_transport_##name, NULL);
 
 show_transport_attr(caps, "0x%x");
-show_transport_attr(max_lun, "%d");
 
 static struct attribute *iscsi_transport_attrs[] = {
 	&dev_attr_handle.attr,
 	&dev_attr_caps.attr,
-	&dev_attr_max_lun.attr,
 	NULL,
 };
 
@@ -1034,8 +1032,7 @@ iscsi_if_create_session(struct iscsi_internal *priv, struct iscsi_uevent *ev,
 		}
 	}
 
-	session = transport->create_session(transport, &priv->t, shost,
-					    cmds_max, queue_depth,
+	session = transport->create_session(shost, cmds_max, queue_depth,
 					    initial_cmdsn, &host_no);
 	if (shost)
 		scsi_host_put(shost);
diff --git a/include/scsi/iscsi_if.h b/include/scsi/iscsi_if.h
index 1883c85cd3ee..801a677777cc 100644
--- a/include/scsi/iscsi_if.h
+++ b/include/scsi/iscsi_if.h
@@ -310,13 +310,6 @@ enum iscsi_host_param {
 
 #define iscsi_ptr(_handle) ((void*)(unsigned long)_handle)
 #define iscsi_handle(_ptr) ((uint64_t)(unsigned long)_ptr)
-#define hostdata_session(_hostdata) (iscsi_ptr(*(unsigned long *)_hostdata))
-
-/**
- * iscsi_hostdata - get LLD hostdata from scsi_host
- * @_hostdata: pointer to scsi host's hostdata
- **/
-#define iscsi_hostdata(_hostdata) ((void*)_hostdata + sizeof(unsigned long))
 
 /*
  * These flags presents iSCSI Data-Path capabilities.
diff --git a/include/scsi/libiscsi.h b/include/scsi/libiscsi.h
index f24cf0246739..8a6271c20935 100644
--- a/include/scsi/libiscsi.h
+++ b/include/scsi/libiscsi.h
@@ -209,9 +209,6 @@ struct iscsi_conn {
 	/* remote portal currently connected to */
 	int			portal_port;
 	char			portal_address[ISCSI_ADDRESS_BUF_LEN];
-	/* local address */
-	int			local_port;
-	char			local_address[ISCSI_ADDRESS_BUF_LEN];
 
 	/* MIB-statistics */
 	uint64_t		txdata_octets;
@@ -247,6 +244,7 @@ enum {
 };
 
 struct iscsi_session {
+	struct iscsi_cls_session *cls_session;
 	/*
 	 * Syncs up the scsi eh thread with the iscsi eh thread when sending
 	 * task management functions. This must be taken before the session
@@ -282,10 +280,6 @@ struct iscsi_session {
 	char			*password;
 	char			*password_in;
 	char			*targetname;
-	char			*initiatorname;
-	/* hw address or netdev iscsi connection is bound to */
-	char			*hwaddress;
-	char			*netdev;
 	/* control data */
 	struct iscsi_transport	*tt;
 	struct Scsi_Host	*host;
@@ -307,6 +301,16 @@ struct iscsi_session {
 	struct iscsi_pool	mgmtpool;	/* Mgmt PDU's pool */
 };
 
+struct iscsi_host {
+	char			*initiatorname;
+	/* hw address or netdev iscsi connection is bound to */
+	char			*hwaddress;
+	char			*netdev;
+	/* local address */
+	int			local_port;
+	char			local_address[ISCSI_ADDRESS_BUF_LEN];
+};
+
 /*
  * scsi host template
  */
@@ -326,27 +330,24 @@ extern int iscsi_host_set_param(struct Scsi_Host *shost,
 				int buflen);
 extern int iscsi_host_get_param(struct Scsi_Host *shost,
 				enum iscsi_host_param param, char *buf);
+extern void iscsi_host_setup(struct Scsi_Host *shost, uint16_t qdepth);
+extern void iscsi_host_teardown(struct Scsi_Host *shost);
 
 /*
  * session management
  */
 extern struct iscsi_cls_session *
-iscsi_session_setup(struct iscsi_transport *, struct scsi_transport_template *,
-		    uint16_t, uint16_t, int, int, uint32_t, uint32_t *);
+iscsi_session_setup(struct iscsi_transport *, struct Scsi_Host *shost,
+		    uint16_t, int, int, uint32_t);
 extern void iscsi_session_teardown(struct iscsi_cls_session *);
-extern struct iscsi_session *class_to_transport_session(struct iscsi_cls_session *);
 extern void iscsi_session_recovery_timedout(struct iscsi_cls_session *);
 extern int iscsi_set_param(struct iscsi_cls_conn *cls_conn,
 			   enum iscsi_param param, char *buf, int buflen);
 extern int iscsi_session_get_param(struct iscsi_cls_session *cls_session,
 				   enum iscsi_param param, char *buf);
 
-#define session_to_cls(_sess) \
-	hostdata_session(_sess->host->hostdata)
-
 #define iscsi_session_printk(prefix, _sess, fmt, a...)	\
-	iscsi_cls_session_printk(prefix,		\
-		(struct iscsi_cls_session *)session_to_cls(_sess), fmt, ##a)
+	iscsi_cls_session_printk(prefix, _sess->cls_session, fmt, ##a)
 
 /*
  * connection management
diff --git a/include/scsi/scsi_transport_iscsi.h b/include/scsi/scsi_transport_iscsi.h
index 728702292a80..702eda2904d7 100644
--- a/include/scsi/scsi_transport_iscsi.h
+++ b/include/scsi/scsi_transport_iscsi.h
@@ -83,15 +83,13 @@ struct iscsi_transport {
 	/* LLD sets this to indicate what values it can export to sysfs */
 	uint64_t param_mask;
 	uint64_t host_param_mask;
-	struct scsi_host_template *host_template;
 	/* LLD connection data size */
 	int conndata_size;
 	/* LLD session data size */
 	int sessiondata_size;
-	int max_lun;
-	struct iscsi_cls_session *(*create_session) (struct iscsi_transport *it,
-		struct scsi_transport_template *t, struct Scsi_Host *shost,
-		uint16_t cmds_max, uint16_t qdepth, uint32_t sn, uint32_t *hn);
+	struct iscsi_cls_session *(*create_session) (struct Scsi_Host *shost,
+					uint16_t cmds_max, uint16_t qdepth,
+					uint32_t sn, uint32_t *hn);
 	void (*destroy_session) (struct iscsi_cls_session *session);
 	struct iscsi_cls_conn *(*create_conn) (struct iscsi_cls_session *sess,
 				uint32_t cid);
-- 
cgit v1.2.3


From a4804cd6eb19318ae8d08ea967cfeaaf5c5b68a6 Mon Sep 17 00:00:00 2001
From: Mike Christie <michaelc@cs.wisc.edu>
Date: Wed, 21 May 2008 15:54:00 -0500
Subject: [SCSI] iscsi: add iscsi host helpers

This finishes the host/session unbinding, by adding some helpers
to add and remove hosts and the session they manage.

Signed-off-by: Mike Christie <michaelc@cs.wisc.edu>
Signed-off-by: James Bottomley <James.Bottomley@HansenPartnership.com>
---
 drivers/infiniband/ulp/iser/iscsi_iser.c | 17 ++++-----
 drivers/scsi/iscsi_tcp.c                 | 18 ++++------
 drivers/scsi/libiscsi.c                  | 60 ++++++++++++++++++++++++++++----
 drivers/scsi/scsi_transport_iscsi.c      | 20 +++++++++++
 include/scsi/libiscsi.h                  | 11 ++++--
 include/scsi/scsi_transport_iscsi.h      |  4 +++
 6 files changed, 99 insertions(+), 31 deletions(-)

(limited to 'drivers/scsi')

diff --git a/drivers/infiniband/ulp/iser/iscsi_iser.c b/drivers/infiniband/ulp/iser/iscsi_iser.c
index 5a750042e2b2..62e35e503e49 100644
--- a/drivers/infiniband/ulp/iser/iscsi_iser.c
+++ b/drivers/infiniband/ulp/iser/iscsi_iser.c
@@ -371,10 +371,8 @@ static void iscsi_iser_session_destroy(struct iscsi_cls_session *cls_session)
 {
 	struct Scsi_Host *shost = iscsi_session_to_shost(cls_session);
 
-	iscsi_session_teardown(cls_session);
-	scsi_remove_host(shost);
-	iscsi_host_teardown(shost);
-	scsi_host_put(shost);
+	iscsi_host_remove(shost);
+	iscsi_host_free(shost);
 }
 
 static struct iscsi_cls_session *
@@ -396,7 +394,7 @@ iscsi_iser_session_create(struct Scsi_Host *shost,
 		return NULL;
 	}
 
-	shost = scsi_host_alloc(&iscsi_iser_sht, 0);
+	shost = iscsi_host_alloc(&iscsi_iser_sht, 0, ISCSI_MAX_CMD_PER_LUN);
 	if (!shost)
 		return NULL;
 	shost->transportt = iscsi_iser_scsi_transport;
@@ -405,9 +403,7 @@ iscsi_iser_session_create(struct Scsi_Host *shost,
 	shost->max_channel = 0;
 	shost->max_cmd_len = 16;
 
-	iscsi_host_setup(shost, qdepth);
-
-	if (scsi_add_host(shost, NULL))
+	if (iscsi_host_add(shost, NULL))
 		goto free_host;
 	*hostno = shost->host_no;
 
@@ -443,10 +439,9 @@ iscsi_iser_session_create(struct Scsi_Host *shost,
 	return cls_session;
 
 remove_host:
-	scsi_remove_host(shost);
+	iscsi_host_remove(shost);
 free_host:
-	iscsi_host_teardown(shost);
-	scsi_host_put(shost);
+	iscsi_host_free(shost);
 	return NULL;
 }
 
diff --git a/drivers/scsi/iscsi_tcp.c b/drivers/scsi/iscsi_tcp.c
index 8cdcaf33fb4e..e19d92f2d753 100644
--- a/drivers/scsi/iscsi_tcp.c
+++ b/drivers/scsi/iscsi_tcp.c
@@ -1866,7 +1866,7 @@ iscsi_tcp_session_create(struct Scsi_Host *shost, uint16_t cmds_max,
 		return NULL;
 	}
 
-	shost = scsi_host_alloc(&iscsi_sht, sizeof(struct iscsi_host));
+	shost = iscsi_host_alloc(&iscsi_sht, 0, qdepth);
 	if (!shost)
 		return NULL;
 	shost->transportt = iscsi_tcp_scsi_transport;
@@ -1874,10 +1874,9 @@ iscsi_tcp_session_create(struct Scsi_Host *shost, uint16_t cmds_max,
 	shost->max_id = 0;
 	shost->max_channel = 0;
 	shost->max_cmd_len = 16;
+	shost->can_queue = cmds_max;
 
-	iscsi_host_setup(shost, qdepth);
-
-	if (scsi_add_host(shost, NULL))
+	if (iscsi_host_add(shost, NULL))
 		goto free_host;
 	*hostno = shost->host_no;
 
@@ -1912,10 +1911,9 @@ iscsi_tcp_session_create(struct Scsi_Host *shost, uint16_t cmds_max,
 remove_session:
 	iscsi_session_teardown(cls_session);
 remove_host:
-	scsi_remove_host(shost);
+	iscsi_host_remove(shost);
 free_host:
-	iscsi_host_teardown(shost);
-	scsi_host_put(shost);
+	iscsi_host_free(shost);
 	return NULL;
 }
 
@@ -1924,11 +1922,9 @@ static void iscsi_tcp_session_destroy(struct iscsi_cls_session *cls_session)
 	struct Scsi_Host *shost = iscsi_session_to_shost(cls_session);
 
 	iscsi_r2tpool_free(cls_session->dd_data);
-	iscsi_session_teardown(cls_session);
 
-	scsi_remove_host(shost);
-	iscsi_host_teardown(shost);
-	scsi_host_put(shost);
+	iscsi_host_remove(shost);
+	iscsi_host_free(shost);
 }
 
 static int iscsi_tcp_slave_configure(struct scsi_device *sdev)
diff --git a/drivers/scsi/libiscsi.c b/drivers/scsi/libiscsi.c
index 64b1dd827366..73c37c04ca66 100644
--- a/drivers/scsi/libiscsi.c
+++ b/drivers/scsi/libiscsi.c
@@ -1764,8 +1764,39 @@ void iscsi_pool_free(struct iscsi_pool *q)
 }
 EXPORT_SYMBOL_GPL(iscsi_pool_free);
 
-void iscsi_host_setup(struct Scsi_Host *shost, uint16_t qdepth)
+/**
+ * iscsi_host_add - add host to system
+ * @shost: scsi host
+ * @pdev: parent device
+ *
+ * This should be called by partial offload and software iscsi drivers
+ * to add a host to the system.
+ */
+int iscsi_host_add(struct Scsi_Host *shost, struct device *pdev)
+{
+	return scsi_add_host(shost, pdev);
+}
+EXPORT_SYMBOL_GPL(iscsi_host_add);
+
+/**
+ * iscsi_host_alloc - allocate a host and driver data
+ * @sht: scsi host template
+ * @dd_data_size: driver host data size
+ * @qdepth: default device queue depth
+ *
+ * This should be called by partial offload and software iscsi drivers.
+ * To access the driver specific memory use the iscsi_host_priv() macro.
+ */
+struct Scsi_Host *iscsi_host_alloc(struct scsi_host_template *sht,
+				   int dd_data_size, uint16_t qdepth)
 {
+	struct Scsi_Host *shost;
+
+	shost = scsi_host_alloc(sht, sizeof(struct iscsi_host) + dd_data_size);
+	if (!shost)
+		return NULL;
+	shost->transportt->eh_timed_out = iscsi_eh_cmd_timed_out;
+
 	if (qdepth > ISCSI_MAX_CMD_PER_LUN || qdepth < 1) {
 		if (qdepth != 0)
 			printk(KERN_ERR "iscsi: invalid queue depth of %d. "
@@ -1773,22 +1804,37 @@ void iscsi_host_setup(struct Scsi_Host *shost, uint16_t qdepth)
 			       qdepth, ISCSI_MAX_CMD_PER_LUN);
 		qdepth = ISCSI_DEF_CMD_PER_LUN;
 	}
-
-	shost->transportt->create_work_queue = 1;
-	shost->transportt->eh_timed_out = iscsi_eh_cmd_timed_out;
 	shost->cmd_per_lun = qdepth;
+	return shost;
+}
+EXPORT_SYMBOL_GPL(iscsi_host_alloc);
+
+/**
+ * iscsi_host_remove - remove host and sessions
+ * @shost: scsi host
+ *
+ * This will also remove any sessions attached to the host, but if userspace
+ * is managing the session at the same time this will break. TODO: add
+ * refcounting to the netlink iscsi interface so a rmmod or host hot unplug
+ * does not remove the memory from under us.
+ */
+void iscsi_host_remove(struct Scsi_Host *shost)
+{
+	iscsi_host_for_each_session(shost, iscsi_session_teardown);
+	scsi_remove_host(shost);
 }
-EXPORT_SYMBOL_GPL(iscsi_host_setup);
+EXPORT_SYMBOL_GPL(iscsi_host_remove);
 
-void iscsi_host_teardown(struct Scsi_Host *shost)
+void iscsi_host_free(struct Scsi_Host *shost)
 {
 	struct iscsi_host *ihost = shost_priv(shost);
 
 	kfree(ihost->netdev);
 	kfree(ihost->hwaddress);
 	kfree(ihost->initiatorname);
+	scsi_host_put(shost);
 }
-EXPORT_SYMBOL_GPL(iscsi_host_teardown);
+EXPORT_SYMBOL_GPL(iscsi_host_free);
 
 /**
  * iscsi_session_setup - create iscsi cls session and host and session
diff --git a/drivers/scsi/scsi_transport_iscsi.c b/drivers/scsi/scsi_transport_iscsi.c
index 9c00a157b485..6fdaa2ee6632 100644
--- a/drivers/scsi/scsi_transport_iscsi.c
+++ b/drivers/scsi/scsi_transport_iscsi.c
@@ -279,6 +279,24 @@ static int iscsi_is_session_dev(const struct device *dev)
 	return dev->release == iscsi_session_release;
 }
 
+static int iscsi_iter_session_fn(struct device *dev, void *data)
+{
+	void (* fn) (struct iscsi_cls_session *) = data;
+
+	if (!iscsi_is_session_dev(dev))
+		return 0;
+	fn(iscsi_dev_to_session(dev));
+	return 0;
+}
+
+void iscsi_host_for_each_session(struct Scsi_Host *shost,
+				 void (*fn)(struct iscsi_cls_session *))
+{
+	device_for_each_child(&shost->shost_gendev, fn,
+			      iscsi_iter_session_fn);
+}
+EXPORT_SYMBOL_GPL(iscsi_host_for_each_session);
+
 /**
  * iscsi_scan_finished - helper to report when running scans are done
  * @shost: scsi host
@@ -1599,6 +1617,8 @@ iscsi_register_transport(struct iscsi_transport *tt)
 	priv->daemon_pid = -1;
 	priv->iscsi_transport = tt;
 	priv->t.user_scan = iscsi_user_scan;
+	if (!(tt->caps & CAP_DATA_PATH_OFFLOAD))
+		priv->t.create_work_queue = 1;
 
 	priv->dev.class = &iscsi_transport_class;
 	snprintf(priv->dev.bus_id, BUS_ID_SIZE, "%s", tt->name);
diff --git a/include/scsi/libiscsi.h b/include/scsi/libiscsi.h
index 8a6271c20935..9a26d715a953 100644
--- a/include/scsi/libiscsi.h
+++ b/include/scsi/libiscsi.h
@@ -32,6 +32,7 @@
 #include <scsi/iscsi_if.h>
 
 struct scsi_transport_template;
+struct scsi_host_template;
 struct scsi_device;
 struct Scsi_Host;
 struct scsi_cmnd;
@@ -41,6 +42,7 @@ struct iscsi_cls_session;
 struct iscsi_cls_conn;
 struct iscsi_session;
 struct iscsi_nopin;
+struct device;
 
 /* #define DEBUG_SCSI */
 #ifdef DEBUG_SCSI
@@ -311,6 +313,8 @@ struct iscsi_host {
 	char			local_address[ISCSI_ADDRESS_BUF_LEN];
 };
 
+#define iscsi_host_priv(_shost) \
+	(shost_priv(_shost) + sizeof(struct iscsi_host))
 /*
  * scsi host template
  */
@@ -330,8 +334,11 @@ extern int iscsi_host_set_param(struct Scsi_Host *shost,
 				int buflen);
 extern int iscsi_host_get_param(struct Scsi_Host *shost,
 				enum iscsi_host_param param, char *buf);
-extern void iscsi_host_setup(struct Scsi_Host *shost, uint16_t qdepth);
-extern void iscsi_host_teardown(struct Scsi_Host *shost);
+extern int iscsi_host_add(struct Scsi_Host *shost, struct device *pdev);
+extern struct Scsi_Host *iscsi_host_alloc(struct scsi_host_template *sht,
+					  int dd_data_size, uint16_t qdepth);
+extern void iscsi_host_remove(struct Scsi_Host *shost);
+extern void iscsi_host_free(struct Scsi_Host *shost);
 
 /*
  * session management
diff --git a/include/scsi/scsi_transport_iscsi.h b/include/scsi/scsi_transport_iscsi.h
index 702eda2904d7..761f62da7cc8 100644
--- a/include/scsi/scsi_transport_iscsi.h
+++ b/include/scsi/scsi_transport_iscsi.h
@@ -207,6 +207,10 @@ struct iscsi_cls_host {
 	char scan_workq_name[KOBJ_NAME_LEN];
 };
 
+extern void iscsi_host_for_each_session(struct Scsi_Host *shost,
+				void (*fn)(struct iscsi_cls_session *));
+
+
 /*
  * session and connection functions that can be used by HW iSCSI LLDs
  */
-- 
cgit v1.2.3


From 5d91e209fb21fb9cc765729d4c6a85a9fb6c9187 Mon Sep 17 00:00:00 2001
From: Mike Christie <michaelc@cs.wisc.edu>
Date: Wed, 21 May 2008 15:54:01 -0500
Subject: [SCSI] iscsi: remove session/conn_data_size from iscsi_transport

This removes the session and conn data_size fields from the iscsi_transport.
Just pass in the value like with host allocation. This patch also makes
it so the LLD iscsi_conn data is allocated with the iscsi_cls_conn.

Signed-off-by: Mike Christie <michaelc@cs.wisc.edu>
Signed-off-by: James Bottomley <James.Bottomley@HansenPartnership.com>
---
 drivers/infiniband/ulp/iser/iscsi_iser.c | 16 +++-------------
 drivers/scsi/iscsi_tcp.c                 | 19 +++++--------------
 drivers/scsi/libiscsi.c                  | 15 ++++++++++-----
 drivers/scsi/qla4xxx/ql4_os.c            |  7 +++----
 drivers/scsi/scsi_transport_iscsi.c      | 24 ++++++++++++------------
 include/scsi/libiscsi.h                  |  8 ++++----
 include/scsi/scsi_transport_iscsi.h      |  9 +++------
 7 files changed, 40 insertions(+), 58 deletions(-)

(limited to 'drivers/scsi')

diff --git a/drivers/infiniband/ulp/iser/iscsi_iser.c b/drivers/infiniband/ulp/iser/iscsi_iser.c
index 62e35e503e49..9b34946eb00d 100644
--- a/drivers/infiniband/ulp/iser/iscsi_iser.c
+++ b/drivers/infiniband/ulp/iser/iscsi_iser.c
@@ -279,7 +279,7 @@ iscsi_iser_conn_create(struct iscsi_cls_session *cls_session, uint32_t conn_idx)
 	struct iscsi_cls_conn *cls_conn;
 	struct iscsi_iser_conn *iser_conn;
 
-	cls_conn = iscsi_conn_setup(cls_session, conn_idx);
+	cls_conn = iscsi_conn_setup(cls_session, sizeof(*iser_conn), conn_idx);
 	if (!cls_conn)
 		return NULL;
 	conn = cls_conn->dd_data;
@@ -290,10 +290,7 @@ iscsi_iser_conn_create(struct iscsi_cls_session *cls_session, uint32_t conn_idx)
 	 */
 	conn->max_recv_dlength = 128;
 
-	iser_conn = kzalloc(sizeof(*iser_conn), GFP_KERNEL);
-	if (!iser_conn)
-		goto conn_alloc_fail;
-
+	iser_conn = conn->dd_data;
 	/* currently this is the only field which need to be initiated */
 	rwlock_init(&iser_conn->lock);
 
@@ -301,10 +298,6 @@ iscsi_iser_conn_create(struct iscsi_cls_session *cls_session, uint32_t conn_idx)
 	iser_conn->iscsi_conn = conn;
 
 	return cls_conn;
-
-conn_alloc_fail:
-	iscsi_conn_teardown(cls_conn);
-	return NULL;
 }
 
 static void
@@ -313,10 +306,9 @@ iscsi_iser_conn_destroy(struct iscsi_cls_conn *cls_conn)
 	struct iscsi_conn *conn = cls_conn->dd_data;
 	struct iscsi_iser_conn *iser_conn = conn->dd_data;
 
-	iscsi_conn_teardown(cls_conn);
 	if (iser_conn->ib_conn)
 		iser_conn->ib_conn->iser_conn = NULL;
-	kfree(iser_conn);
+	iscsi_conn_teardown(cls_conn);
 }
 
 static int
@@ -619,8 +611,6 @@ static struct iscsi_transport iscsi_iser_transport = {
 	.host_param_mask	= ISCSI_HOST_HWADDRESS |
 				  ISCSI_HOST_NETDEV_NAME |
 				  ISCSI_HOST_INITIATOR_NAME,
-	.conndata_size		= sizeof(struct iscsi_conn),
-	.sessiondata_size	= sizeof(struct iscsi_session),
 	/* session management */
 	.create_session         = iscsi_iser_session_create,
 	.destroy_session        = iscsi_iser_session_destroy,
diff --git a/drivers/scsi/iscsi_tcp.c b/drivers/scsi/iscsi_tcp.c
index e19d92f2d753..dfaf9fa57340 100644
--- a/drivers/scsi/iscsi_tcp.c
+++ b/drivers/scsi/iscsi_tcp.c
@@ -1490,7 +1490,7 @@ iscsi_tcp_conn_create(struct iscsi_cls_session *cls_session, uint32_t conn_idx)
 	struct iscsi_cls_conn *cls_conn;
 	struct iscsi_tcp_conn *tcp_conn;
 
-	cls_conn = iscsi_conn_setup(cls_session, conn_idx);
+	cls_conn = iscsi_conn_setup(cls_session, sizeof(*tcp_conn), conn_idx);
 	if (!cls_conn)
 		return NULL;
 	conn = cls_conn->dd_data;
@@ -1500,18 +1500,14 @@ iscsi_tcp_conn_create(struct iscsi_cls_session *cls_session, uint32_t conn_idx)
 	 */
 	conn->max_recv_dlength = ISCSI_DEF_MAX_RECV_SEG_LEN;
 
-	tcp_conn = kzalloc(sizeof(*tcp_conn), GFP_KERNEL);
-	if (!tcp_conn)
-		goto tcp_conn_alloc_fail;
-
-	conn->dd_data = tcp_conn;
+	tcp_conn = conn->dd_data;
 	tcp_conn->iscsi_conn = conn;
 
 	tcp_conn->tx_hash.tfm = crypto_alloc_hash("crc32c", 0,
 						  CRYPTO_ALG_ASYNC);
 	tcp_conn->tx_hash.flags = 0;
 	if (IS_ERR(tcp_conn->tx_hash.tfm))
-		goto free_tcp_conn;
+		goto free_conn;
 
 	tcp_conn->rx_hash.tfm = crypto_alloc_hash("crc32c", 0,
 						  CRYPTO_ALG_ASYNC);
@@ -1523,14 +1519,12 @@ iscsi_tcp_conn_create(struct iscsi_cls_session *cls_session, uint32_t conn_idx)
 
 free_tx_tfm:
 	crypto_free_hash(tcp_conn->tx_hash.tfm);
-free_tcp_conn:
+free_conn:
 	iscsi_conn_printk(KERN_ERR, conn,
 			  "Could not create connection due to crc32c "
 			  "loading error. Make sure the crc32c "
 			  "module is built as a module or into the "
 			  "kernel\n");
-	kfree(tcp_conn);
-tcp_conn_alloc_fail:
 	iscsi_conn_teardown(cls_conn);
 	return NULL;
 }
@@ -1563,14 +1557,13 @@ iscsi_tcp_conn_destroy(struct iscsi_cls_conn *cls_conn)
 	struct iscsi_tcp_conn *tcp_conn = conn->dd_data;
 
 	iscsi_tcp_release_conn(conn);
-	iscsi_conn_teardown(cls_conn);
 
 	if (tcp_conn->tx_hash.tfm)
 		crypto_free_hash(tcp_conn->tx_hash.tfm);
 	if (tcp_conn->rx_hash.tfm)
 		crypto_free_hash(tcp_conn->rx_hash.tfm);
 
-	kfree(tcp_conn);
+	iscsi_conn_teardown(cls_conn);
 }
 
 static void
@@ -1983,8 +1976,6 @@ static struct iscsi_transport iscsi_tcp_transport = {
 	.host_param_mask	= ISCSI_HOST_HWADDRESS | ISCSI_HOST_IPADDRESS |
 				  ISCSI_HOST_INITIATOR_NAME |
 				  ISCSI_HOST_NETDEV_NAME,
-	.conndata_size		= sizeof(struct iscsi_conn),
-	.sessiondata_size	= sizeof(struct iscsi_session),
 	/* session management */
 	.create_session		= iscsi_tcp_session_create,
 	.destroy_session	= iscsi_tcp_session_destroy,
diff --git a/drivers/scsi/libiscsi.c b/drivers/scsi/libiscsi.c
index 73c37c04ca66..784a935fad4a 100644
--- a/drivers/scsi/libiscsi.c
+++ b/drivers/scsi/libiscsi.c
@@ -1867,7 +1867,8 @@ iscsi_session_setup(struct iscsi_transport *iscsit, struct Scsi_Host *shost,
 		cmds_max = ISCSI_DEF_XMIT_CMDS_MAX;
 	}
 
-	cls_session = iscsi_alloc_session(shost, iscsit);
+	cls_session = iscsi_alloc_session(shost, iscsit,
+					  sizeof(struct iscsi_session));
 	if (!cls_session)
 		return NULL;
 	session = cls_session->dd_data;
@@ -1968,22 +1969,26 @@ EXPORT_SYMBOL_GPL(iscsi_session_teardown);
 /**
  * iscsi_conn_setup - create iscsi_cls_conn and iscsi_conn
  * @cls_session: iscsi_cls_session
+ * @dd_size: private driver data size
  * @conn_idx: cid
- **/
+ */
 struct iscsi_cls_conn *
-iscsi_conn_setup(struct iscsi_cls_session *cls_session, uint32_t conn_idx)
+iscsi_conn_setup(struct iscsi_cls_session *cls_session, int dd_size,
+		 uint32_t conn_idx)
 {
 	struct iscsi_session *session = cls_session->dd_data;
 	struct iscsi_conn *conn;
 	struct iscsi_cls_conn *cls_conn;
 	char *data;
 
-	cls_conn = iscsi_create_conn(cls_session, conn_idx);
+	cls_conn = iscsi_create_conn(cls_session, sizeof(*conn) + dd_size,
+				     conn_idx);
 	if (!cls_conn)
 		return NULL;
 	conn = cls_conn->dd_data;
-	memset(conn, 0, sizeof(*conn));
+	memset(conn, 0, sizeof(*conn) + dd_size);
 
+	conn->dd_data = cls_conn->dd_data + sizeof(*conn);
 	conn->session = session;
 	conn->cls_conn = cls_conn;
 	conn->c_stage = ISCSI_CONN_INITIAL_STAGE;
diff --git a/drivers/scsi/qla4xxx/ql4_os.c b/drivers/scsi/qla4xxx/ql4_os.c
index 6c6ee0f34995..5822dd595826 100644
--- a/drivers/scsi/qla4xxx/ql4_os.c
+++ b/drivers/scsi/qla4xxx/ql4_os.c
@@ -113,8 +113,6 @@ static struct iscsi_transport qla4xxx_iscsi_transport = {
 	.host_param_mask	= ISCSI_HOST_HWADDRESS |
 				  ISCSI_HOST_IPADDRESS |
 				  ISCSI_HOST_INITIATOR_NAME,
-	.sessiondata_size	= sizeof(struct ddb_entry),
-
 	.tgt_dscvr		= qla4xxx_tgt_dscvr,
 	.get_conn_param		= qla4xxx_conn_get_param,
 	.get_session_param	= qla4xxx_sess_get_param,
@@ -274,7 +272,7 @@ int qla4xxx_add_sess(struct ddb_entry *ddb_entry)
 		return err;
 	}
 
-	ddb_entry->conn = iscsi_create_conn(ddb_entry->sess, 0);
+	ddb_entry->conn = iscsi_create_conn(ddb_entry->sess, 0, 0);
 	if (!ddb_entry->conn) {
 		iscsi_remove_session(ddb_entry->sess);
 		DEBUG2(printk(KERN_ERR "Could not add connection.\n"));
@@ -291,7 +289,8 @@ struct ddb_entry *qla4xxx_alloc_sess(struct scsi_qla_host *ha)
 	struct ddb_entry *ddb_entry;
 	struct iscsi_cls_session *sess;
 
-	sess = iscsi_alloc_session(ha->host, &qla4xxx_iscsi_transport);
+	sess = iscsi_alloc_session(ha->host, &qla4xxx_iscsi_transport,
+				   sizeof(struct ddb_entry));
 	if (!sess)
 		return NULL;
 
diff --git a/drivers/scsi/scsi_transport_iscsi.c b/drivers/scsi/scsi_transport_iscsi.c
index 6fdaa2ee6632..6b8516a0970b 100644
--- a/drivers/scsi/scsi_transport_iscsi.c
+++ b/drivers/scsi/scsi_transport_iscsi.c
@@ -483,12 +483,12 @@ static int iscsi_unbind_session(struct iscsi_cls_session *session)
 }
 
 struct iscsi_cls_session *
-iscsi_alloc_session(struct Scsi_Host *shost,
-		    struct iscsi_transport *transport)
+iscsi_alloc_session(struct Scsi_Host *shost, struct iscsi_transport *transport,
+		    int dd_size)
 {
 	struct iscsi_cls_session *session;
 
-	session = kzalloc(sizeof(*session) + transport->sessiondata_size,
+	session = kzalloc(sizeof(*session) + dd_size,
 			  GFP_KERNEL);
 	if (!session)
 		return NULL;
@@ -510,7 +510,7 @@ iscsi_alloc_session(struct Scsi_Host *shost,
 	session->dev.parent = &shost->shost_gendev;
 	session->dev.release = iscsi_session_release;
 	device_initialize(&session->dev);
-	if (transport->sessiondata_size)
+	if (dd_size)
 		session->dd_data = &session[1];
 	return session;
 }
@@ -558,18 +558,18 @@ EXPORT_SYMBOL_GPL(iscsi_add_session);
  * iscsi_create_session - create iscsi class session
  * @shost: scsi host
  * @transport: iscsi transport
+ * @dd_size: private driver data size
  * @target_id: which target
  *
  * This can be called from a LLD or iscsi_transport.
  */
 struct iscsi_cls_session *
-iscsi_create_session(struct Scsi_Host *shost,
-		     struct iscsi_transport *transport,
-		     unsigned int target_id)
+iscsi_create_session(struct Scsi_Host *shost, struct iscsi_transport *transport,
+		     int dd_size, unsigned int target_id)
 {
 	struct iscsi_cls_session *session;
 
-	session = iscsi_alloc_session(shost, transport);
+	session = iscsi_alloc_session(shost, transport, dd_size);
 	if (!session)
 		return NULL;
 
@@ -671,6 +671,7 @@ EXPORT_SYMBOL_GPL(iscsi_destroy_session);
 /**
  * iscsi_create_conn - create iscsi class connection
  * @session: iscsi cls session
+ * @dd_size: private driver data size
  * @cid: connection id
  *
  * This can be called from a LLD or iscsi_transport. The connection
@@ -683,18 +684,17 @@ EXPORT_SYMBOL_GPL(iscsi_destroy_session);
  * non-zero.
  */
 struct iscsi_cls_conn *
-iscsi_create_conn(struct iscsi_cls_session *session, uint32_t cid)
+iscsi_create_conn(struct iscsi_cls_session *session, int dd_size, uint32_t cid)
 {
 	struct iscsi_transport *transport = session->transport;
 	struct iscsi_cls_conn *conn;
 	unsigned long flags;
 	int err;
 
-	conn = kzalloc(sizeof(*conn) + transport->conndata_size, GFP_KERNEL);
+	conn = kzalloc(sizeof(*conn) + dd_size, GFP_KERNEL);
 	if (!conn)
 		return NULL;
-
-	if (transport->conndata_size)
+	if (dd_size)
 		conn->dd_data = &conn[1];
 
 	INIT_LIST_HEAD(&conn->conn_list);
diff --git a/include/scsi/libiscsi.h b/include/scsi/libiscsi.h
index 9a26d715a953..4e1c14f20ddd 100644
--- a/include/scsi/libiscsi.h
+++ b/include/scsi/libiscsi.h
@@ -313,8 +313,6 @@ struct iscsi_host {
 	char			local_address[ISCSI_ADDRESS_BUF_LEN];
 };
 
-#define iscsi_host_priv(_shost) \
-	(shost_priv(_shost) + sizeof(struct iscsi_host))
 /*
  * scsi host template
  */
@@ -325,10 +323,12 @@ extern int iscsi_eh_device_reset(struct scsi_cmnd *sc);
 extern int iscsi_queuecommand(struct scsi_cmnd *sc,
 			      void (*done)(struct scsi_cmnd *));
 
-
 /*
  * iSCSI host helpers.
  */
+#define iscsi_host_priv(_shost) \
+	(shost_priv(_shost) + sizeof(struct iscsi_host))
+
 extern int iscsi_host_set_param(struct Scsi_Host *shost,
 				enum iscsi_host_param param, char *buf,
 				int buflen);
@@ -360,7 +360,7 @@ extern int iscsi_session_get_param(struct iscsi_cls_session *cls_session,
  * connection management
  */
 extern struct iscsi_cls_conn *iscsi_conn_setup(struct iscsi_cls_session *,
-					       uint32_t);
+					       int, uint32_t);
 extern void iscsi_conn_teardown(struct iscsi_cls_conn *);
 extern int iscsi_conn_start(struct iscsi_cls_conn *);
 extern void iscsi_conn_stop(struct iscsi_cls_conn *, int);
diff --git a/include/scsi/scsi_transport_iscsi.h b/include/scsi/scsi_transport_iscsi.h
index 761f62da7cc8..4028f121d548 100644
--- a/include/scsi/scsi_transport_iscsi.h
+++ b/include/scsi/scsi_transport_iscsi.h
@@ -83,10 +83,6 @@ struct iscsi_transport {
 	/* LLD sets this to indicate what values it can export to sysfs */
 	uint64_t param_mask;
 	uint64_t host_param_mask;
-	/* LLD connection data size */
-	int conndata_size;
-	/* LLD session data size */
-	int sessiondata_size;
 	struct iscsi_cls_session *(*create_session) (struct Scsi_Host *shost,
 					uint16_t cmds_max, uint16_t qdepth,
 					uint32_t sn, uint32_t *hn);
@@ -222,19 +218,20 @@ extern void iscsi_host_for_each_session(struct Scsi_Host *shost,
 
 extern int iscsi_session_chkready(struct iscsi_cls_session *session);
 extern struct iscsi_cls_session *iscsi_alloc_session(struct Scsi_Host *shost,
-					struct iscsi_transport *transport);
+				struct iscsi_transport *transport, int dd_size);
 extern int iscsi_add_session(struct iscsi_cls_session *session,
 			     unsigned int target_id);
 extern int iscsi_session_event(struct iscsi_cls_session *session,
 			       enum iscsi_uevent_e event);
 extern struct iscsi_cls_session *iscsi_create_session(struct Scsi_Host *shost,
 						struct iscsi_transport *t,
+						int dd_size,
 						unsigned int target_id);
 extern void iscsi_remove_session(struct iscsi_cls_session *session);
 extern void iscsi_free_session(struct iscsi_cls_session *session);
 extern int iscsi_destroy_session(struct iscsi_cls_session *session);
 extern struct iscsi_cls_conn *iscsi_create_conn(struct iscsi_cls_session *sess,
-					    uint32_t cid);
+						int dd_size, uint32_t cid);
 extern int iscsi_destroy_conn(struct iscsi_cls_conn *conn);
 extern void iscsi_unblock_session(struct iscsi_cls_session *session);
 extern void iscsi_block_session(struct iscsi_cls_session *session);
-- 
cgit v1.2.3


From b40977d95fb3a1898ace6a7d97e4ed1a33a440a4 Mon Sep 17 00:00:00 2001
From: Mike Christie <michaelc@cs.wisc.edu>
Date: Wed, 21 May 2008 15:54:03 -0500
Subject: [SCSI] iser: fix handling of scsi cmnds during recovery.

After the stop_conn callback has returned the LLD should not
touch the scsi cmds. iscsi_tcp and libiscsi use the
conn->recv_lock and suspend_rx field to halt recv path
processing, but iser does not have any protection.

This patch modifies iser so that userspace can just
call the ep_disconnect callback, which will halt
all recv IO, before calling the stop_conn callback so
we do not have to worry about the conn->recv_lock and
suspend rx field. iser just needs to stop the send side
from accessing the ib conn.

Fixup to handle when the ep poll fails and ep disconnect
is called from Erez.

Signed-off-by: Mike Christie <michaelc@cs.wisc.edu>
Signed-off-by: James Bottomley <James.Bottomley@HansenPartnership.com>
---
 drivers/infiniband/ulp/iser/iscsi_iser.c | 42 +++++++++++++++++++++++++++++---
 drivers/infiniband/ulp/iser/iscsi_iser.h |  5 ++++
 drivers/infiniband/ulp/iser/iser_verbs.c | 14 ++++++++++-
 drivers/scsi/libiscsi.c                  |  3 ++-
 4 files changed, 59 insertions(+), 5 deletions(-)

(limited to 'drivers/scsi')

diff --git a/drivers/infiniband/ulp/iser/iscsi_iser.c b/drivers/infiniband/ulp/iser/iscsi_iser.c
index 9b34946eb00d..8a1bfb7277c8 100644
--- a/drivers/infiniband/ulp/iser/iscsi_iser.c
+++ b/drivers/infiniband/ulp/iser/iscsi_iser.c
@@ -305,10 +305,18 @@ iscsi_iser_conn_destroy(struct iscsi_cls_conn *cls_conn)
 {
 	struct iscsi_conn *conn = cls_conn->dd_data;
 	struct iscsi_iser_conn *iser_conn = conn->dd_data;
+	struct iser_conn *ib_conn = iser_conn->ib_conn;
 
-	if (iser_conn->ib_conn)
-		iser_conn->ib_conn->iser_conn = NULL;
 	iscsi_conn_teardown(cls_conn);
+	/*
+	 * Userspace will normally call the stop callback and
+	 * already have freed the ib_conn, but if it goofed up then
+	 * we free it here.
+	 */
+	if (ib_conn) {
+		ib_conn->iser_conn = NULL;
+		iser_conn_put(ib_conn);
+	}
 }
 
 static int
@@ -340,12 +348,29 @@ iscsi_iser_conn_bind(struct iscsi_cls_session *cls_session,
 	iser_conn = conn->dd_data;
 	ib_conn->iser_conn = iser_conn;
 	iser_conn->ib_conn  = ib_conn;
+	iser_conn_get(ib_conn);
 
 	conn->recv_lock = &iser_conn->lock;
 
 	return 0;
 }
 
+static void
+iscsi_iser_conn_stop(struct iscsi_cls_conn *cls_conn, int flag)
+{
+	struct iscsi_conn *conn = cls_conn->dd_data;
+	struct iscsi_iser_conn *iser_conn = conn->dd_data;
+	struct iser_conn *ib_conn = iser_conn->ib_conn;
+
+	iscsi_conn_stop(cls_conn, flag);
+	/*
+	 * There is no unbind event so the stop callback
+	 * must release the ref from the bind.
+	 */
+	iser_conn_put(ib_conn);
+	iser_conn->ib_conn = NULL;
+}
+
 static int
 iscsi_iser_conn_start(struct iscsi_cls_conn *cls_conn)
 {
@@ -564,6 +589,17 @@ iscsi_iser_ep_disconnect(__u64 ep_handle)
 	if (!ib_conn)
 		return;
 
+	if (ib_conn->iser_conn)
+		/*
+		 * Must suspend xmit path if the ep is bound to the
+		 * iscsi_conn, so we know we are not accessing the ib_conn
+		 * when we free it.
+		 *
+		 * This may not be bound if the ep poll failed.
+		 */
+		iscsi_suspend_tx(ib_conn->iser_conn->iscsi_conn);
+
+
 	iser_err("ib conn %p state %d\n",ib_conn, ib_conn->state);
 	iser_conn_terminate(ib_conn);
 }
@@ -622,7 +658,7 @@ static struct iscsi_transport iscsi_iser_transport = {
 	.get_conn_param		= iscsi_conn_get_param,
 	.get_session_param	= iscsi_session_get_param,
 	.start_conn             = iscsi_iser_conn_start,
-	.stop_conn              = iscsi_conn_stop,
+	.stop_conn              = iscsi_iser_conn_stop,
 	/* iscsi host params */
 	.get_host_param		= iscsi_host_get_param,
 	.set_host_param		= iscsi_host_set_param,
diff --git a/drivers/infiniband/ulp/iser/iscsi_iser.h b/drivers/infiniband/ulp/iser/iscsi_iser.h
index 66a2f30ada01..bd5c1a554ea6 100644
--- a/drivers/infiniband/ulp/iser/iscsi_iser.h
+++ b/drivers/infiniband/ulp/iser/iscsi_iser.h
@@ -242,6 +242,7 @@ struct iser_device {
 struct iser_conn {
 	struct iscsi_iser_conn       *iser_conn; /* iser conn for upcalls  */
 	enum iser_ib_conn_state	     state;	    /* rdma connection state   */
+	atomic_t		     refcount;
 	spinlock_t		     lock;	    /* used for state changes  */
 	struct iser_device           *device;       /* device context          */
 	struct rdma_cm_id            *cma_id;       /* CMA ID		       */
@@ -314,6 +315,10 @@ void iscsi_iser_recv(struct iscsi_conn *conn,
 
 int  iser_conn_init(struct iser_conn **ib_conn);
 
+void iser_conn_get(struct iser_conn *ib_conn);
+
+void iser_conn_put(struct iser_conn *ib_conn);
+
 void iser_conn_terminate(struct iser_conn *ib_conn);
 
 void iser_rcv_completion(struct iser_desc *desc,
diff --git a/drivers/infiniband/ulp/iser/iser_verbs.c b/drivers/infiniband/ulp/iser/iser_verbs.c
index d19cfe605ebb..5daed2bd710e 100644
--- a/drivers/infiniband/ulp/iser/iser_verbs.c
+++ b/drivers/infiniband/ulp/iser/iser_verbs.c
@@ -328,6 +328,17 @@ static void iser_conn_release(struct iser_conn *ib_conn)
 	kfree(ib_conn);
 }
 
+void iser_conn_get(struct iser_conn *ib_conn)
+{
+	atomic_inc(&ib_conn->refcount);
+}
+
+void iser_conn_put(struct iser_conn *ib_conn)
+{
+	if (atomic_dec_and_test(&ib_conn->refcount))
+		iser_conn_release(ib_conn);
+}
+
 /**
  * triggers start of the disconnect procedures and wait for them to be done
  */
@@ -349,7 +360,7 @@ void iser_conn_terminate(struct iser_conn *ib_conn)
 	wait_event_interruptible(ib_conn->wait,
 				 ib_conn->state == ISER_CONN_DOWN);
 
-	iser_conn_release(ib_conn);
+	iser_conn_put(ib_conn);
 }
 
 static void iser_connect_error(struct rdma_cm_id *cma_id)
@@ -496,6 +507,7 @@ int iser_conn_init(struct iser_conn **ibconn)
 	init_waitqueue_head(&ib_conn->wait);
 	atomic_set(&ib_conn->post_recv_buf_count, 0);
 	atomic_set(&ib_conn->post_send_buf_count, 0);
+	atomic_set(&ib_conn->refcount, 1);
 	INIT_LIST_HEAD(&ib_conn->conn_list);
 	spin_lock_init(&ib_conn->lock);
 
diff --git a/drivers/scsi/libiscsi.c b/drivers/scsi/libiscsi.c
index 784a935fad4a..79bc49fd7f12 100644
--- a/drivers/scsi/libiscsi.c
+++ b/drivers/scsi/libiscsi.c
@@ -1383,11 +1383,12 @@ static void fail_all_commands(struct iscsi_conn *conn, unsigned lun,
 	}
 }
 
-static void iscsi_suspend_tx(struct iscsi_conn *conn)
+void iscsi_suspend_tx(struct iscsi_conn *conn)
 {
 	set_bit(ISCSI_SUSPEND_BIT, &conn->suspend_tx);
 	scsi_flush_work(conn->session->host);
 }
+EXPORT_SYMBOL_GPL(iscsi_suspend_tx);
 
 static void iscsi_start_tx(struct iscsi_conn *conn)
 {
-- 
cgit v1.2.3


From 0af967f5d4f2dd1e00618d34ac988037d37a6c3b Mon Sep 17 00:00:00 2001
From: Mike Christie <michaelc@cs.wisc.edu>
Date: Wed, 21 May 2008 15:54:04 -0500
Subject: [SCSI] libiscsi, iscsi_tcp, iser: add session cmds array accessor

Currently to get a ctask from the session cmd array, you have to
know to use the itt modifier. To make this easier on LLDs and
so in the future we can easilly kill the session array and use
the host shared map instead, this patch adds a nice wrapper
to strip the itt into a session->cmds index and return a ctask.

Signed-off-by: Mike Christie <michaelc@cs.wisc.edu>
Signed-off-by: James Bottomley <James.Bottomley@HansenPartnership.com>
---
 drivers/infiniband/ulp/iser/iscsi_iser.c     |  8 +---
 drivers/infiniband/ulp/iser/iser_initiator.c | 23 +++++------
 drivers/scsi/iscsi_tcp.c                     | 13 ++++--
 drivers/scsi/libiscsi.c                      | 60 ++++++++++++++++++++--------
 include/scsi/libiscsi.h                      |  4 +-
 5 files changed, 65 insertions(+), 43 deletions(-)

(limited to 'drivers/scsi')

diff --git a/drivers/infiniband/ulp/iser/iscsi_iser.c b/drivers/infiniband/ulp/iser/iscsi_iser.c
index 8a1bfb7277c8..7b1468869066 100644
--- a/drivers/infiniband/ulp/iser/iscsi_iser.c
+++ b/drivers/infiniband/ulp/iser/iscsi_iser.c
@@ -98,7 +98,6 @@ iscsi_iser_recv(struct iscsi_conn *conn,
 		struct iscsi_hdr *hdr, char *rx_data, int rx_data_len)
 {
 	int rc = 0;
-	uint32_t ret_itt;
 	int datalen;
 	int ahslen;
 
@@ -114,12 +113,7 @@ iscsi_iser_recv(struct iscsi_conn *conn,
 	/* read AHS */
 	ahslen = hdr->hlength * 4;
 
-	/* verify itt (itt encoding: age+cid+itt) */
-	rc = iscsi_verify_itt(conn, hdr, &ret_itt);
-
-	if (!rc)
-		rc = iscsi_complete_pdu(conn, hdr, rx_data, rx_data_len);
-
+	rc = iscsi_complete_pdu(conn, hdr, rx_data, rx_data_len);
 	if (rc && rc != ISCSI_ERR_NO_SCSI_CMD)
 		goto error;
 
diff --git a/drivers/infiniband/ulp/iser/iser_initiator.c b/drivers/infiniband/ulp/iser/iser_initiator.c
index 08dc81c46f41..b82a5f2d4d37 100644
--- a/drivers/infiniband/ulp/iser/iser_initiator.c
+++ b/drivers/infiniband/ulp/iser/iser_initiator.c
@@ -537,13 +537,11 @@ void iser_rcv_completion(struct iser_desc *rx_desc,
 {
 	struct iser_dto        *dto = &rx_desc->dto;
 	struct iscsi_iser_conn *conn = dto->ib_conn->iser_conn;
-	struct iscsi_session *session = conn->iscsi_conn->session;
 	struct iscsi_cmd_task *ctask;
 	struct iscsi_iser_cmd_task *iser_ctask;
 	struct iscsi_hdr *hdr;
 	char   *rx_data = NULL;
 	int     rx_data_len = 0;
-	unsigned int itt;
 	unsigned char opcode;
 
 	hdr = &rx_desc->iscsi_header;
@@ -559,19 +557,18 @@ void iser_rcv_completion(struct iser_desc *rx_desc,
 	opcode = hdr->opcode & ISCSI_OPCODE_MASK;
 
 	if (opcode == ISCSI_OP_SCSI_CMD_RSP) {
-	        itt = get_itt(hdr->itt); /* mask out cid and age bits */
-		if (!(itt < session->cmds_max))
+		ctask = iscsi_itt_to_ctask(conn->iscsi_conn, hdr->itt);
+		if (!ctask)
 			iser_err("itt can't be matched to task!!! "
-				 "conn %p opcode %d cmds_max %d itt %d\n",
-				 conn->iscsi_conn,opcode,session->cmds_max,itt);
-		/* use the mapping given with the cmds array indexed by itt */
-		ctask = (struct iscsi_cmd_task *)session->cmds[itt];
-		iser_ctask = ctask->dd_data;
-		iser_dbg("itt %d ctask %p\n",itt,ctask);
-		iser_ctask->status = ISER_TASK_STATUS_COMPLETED;
-		iser_ctask_rdma_finalize(iser_ctask);
+				 "conn %p opcode %d itt %d\n",
+				 conn->iscsi_conn, opcode, hdr->itt);
+		else {
+			iser_ctask = ctask->dd_data;
+			iser_dbg("itt %d ctask %p\n",hdr->itt, ctask);
+			iser_ctask->status = ISER_TASK_STATUS_COMPLETED;
+			iser_ctask_rdma_finalize(iser_ctask);
+		}
 	}
-
 	iser_dto_buffs_release(dto);
 
 	iscsi_iser_recv(conn->iscsi_conn, hdr, rx_data, rx_data_len);
diff --git a/drivers/scsi/iscsi_tcp.c b/drivers/scsi/iscsi_tcp.c
index dfaf9fa57340..f2a08f7ed902 100644
--- a/drivers/scsi/iscsi_tcp.c
+++ b/drivers/scsi/iscsi_tcp.c
@@ -740,7 +740,6 @@ iscsi_tcp_hdr_dissect(struct iscsi_conn *conn, struct iscsi_hdr *hdr)
 	struct iscsi_session *session = conn->session;
 	struct iscsi_tcp_conn *tcp_conn = conn->dd_data;
 	struct iscsi_cmd_task *ctask;
-	uint32_t itt;
 
 	/* verify PDU length */
 	tcp_conn->in.datalen = ntoh24(hdr->dlength);
@@ -758,7 +757,7 @@ iscsi_tcp_hdr_dissect(struct iscsi_conn *conn, struct iscsi_hdr *hdr)
 
 	opcode = hdr->opcode & ISCSI_OPCODE_MASK;
 	/* verify itt (itt encoding: age+cid+itt) */
-	rc = iscsi_verify_itt(conn, hdr, &itt);
+	rc = iscsi_verify_itt(conn, hdr->itt);
 	if (rc)
 		return rc;
 
@@ -767,7 +766,10 @@ iscsi_tcp_hdr_dissect(struct iscsi_conn *conn, struct iscsi_hdr *hdr)
 
 	switch(opcode) {
 	case ISCSI_OP_SCSI_DATA_IN:
-		ctask = session->cmds[itt];
+		ctask = iscsi_itt_to_ctask(conn, hdr->itt);
+		if (!ctask)
+			return ISCSI_ERR_BAD_ITT;
+
 		spin_lock(&conn->session->lock);
 		rc = iscsi_data_rsp(conn, ctask);
 		spin_unlock(&conn->session->lock);
@@ -810,7 +812,10 @@ iscsi_tcp_hdr_dissect(struct iscsi_conn *conn, struct iscsi_hdr *hdr)
 		rc = iscsi_complete_pdu(conn, hdr, NULL, 0);
 		break;
 	case ISCSI_OP_R2T:
-		ctask = session->cmds[itt];
+		ctask = iscsi_itt_to_ctask(conn, hdr->itt);
+		if (!ctask)
+			return ISCSI_ERR_BAD_ITT;
+
 		if (ahslen)
 			rc = ISCSI_ERR_AHSLEN;
 		else if (ctask->sc->sc_data_direction == DMA_TO_DEVICE) {
diff --git a/drivers/scsi/libiscsi.c b/drivers/scsi/libiscsi.c
index 79bc49fd7f12..4bc63c4b3c10 100644
--- a/drivers/scsi/libiscsi.c
+++ b/drivers/scsi/libiscsi.c
@@ -640,6 +640,10 @@ static int __iscsi_complete_pdu(struct iscsi_conn *conn, struct iscsi_hdr *hdr,
 	uint32_t itt;
 
 	conn->last_recv = jiffies;
+	rc = iscsi_verify_itt(conn, hdr->itt);
+	if (rc)
+		return rc;
+
 	if (hdr->itt != RESERVED_ITT)
 		itt = get_itt(hdr->itt);
 	else
@@ -776,27 +780,22 @@ int iscsi_complete_pdu(struct iscsi_conn *conn, struct iscsi_hdr *hdr,
 }
 EXPORT_SYMBOL_GPL(iscsi_complete_pdu);
 
-/* verify itt (itt encoding: age+cid+itt) */
-int iscsi_verify_itt(struct iscsi_conn *conn, struct iscsi_hdr *hdr,
-		     uint32_t *ret_itt)
+int iscsi_verify_itt(struct iscsi_conn *conn, itt_t itt)
 {
 	struct iscsi_session *session = conn->session;
 	struct iscsi_cmd_task *ctask;
-	uint32_t itt;
 
-	if (hdr->itt != RESERVED_ITT) {
-		if (((__force u32)hdr->itt & ISCSI_AGE_MASK) !=
-		    (session->age << ISCSI_AGE_SHIFT)) {
-			iscsi_conn_printk(KERN_ERR, conn,
-					  "received itt %x expected session "
-					  "age (%x)\n", (__force u32)hdr->itt,
-					  session->age & ISCSI_AGE_MASK);
-			return ISCSI_ERR_BAD_ITT;
-		}
+	if (itt == RESERVED_ITT)
+		return 0;
 
-		itt = get_itt(hdr->itt);
-	} else
-		itt = ~0U;
+	if (((__force u32)itt & ISCSI_AGE_MASK) !=
+	    (session->age << ISCSI_AGE_SHIFT)) {
+		iscsi_conn_printk(KERN_ERR, conn,
+				  "received itt %x expected session age (%x)\n",
+				  (__force u32)itt,
+				  session->age & ISCSI_AGE_MASK);
+		return ISCSI_ERR_BAD_ITT;
+	}
 
 	if (itt < session->cmds_max) {
 		ctask = session->cmds[itt];
@@ -817,11 +816,38 @@ int iscsi_verify_itt(struct iscsi_conn *conn, struct iscsi_hdr *hdr,
 		}
 	}
 
-	*ret_itt = itt;
 	return 0;
 }
 EXPORT_SYMBOL_GPL(iscsi_verify_itt);
 
+struct iscsi_cmd_task *
+iscsi_itt_to_ctask(struct iscsi_conn *conn, itt_t itt)
+{
+	struct iscsi_session *session = conn->session;
+	struct iscsi_cmd_task *ctask;
+	uint32_t i;
+
+	if (iscsi_verify_itt(conn, itt))
+		return NULL;
+
+	if (itt == RESERVED_ITT)
+		return NULL;
+
+	i = get_itt(itt);
+	if (i >= session->cmds_max)
+		return NULL;
+
+	ctask = session->cmds[i];
+	if (!ctask->sc)
+		return NULL;
+
+	if (ctask->sc->SCp.phase != session->age)
+		return NULL;
+
+	return ctask;
+}
+EXPORT_SYMBOL_GPL(iscsi_itt_to_ctask);
+
 void iscsi_conn_failure(struct iscsi_conn *conn, enum iscsi_err err)
 {
 	struct iscsi_session *session = conn->session;
diff --git a/include/scsi/libiscsi.h b/include/scsi/libiscsi.h
index f54aeb1e8ae3..9be6a70faff5 100644
--- a/include/scsi/libiscsi.h
+++ b/include/scsi/libiscsi.h
@@ -385,8 +385,8 @@ extern int iscsi_conn_send_pdu(struct iscsi_cls_conn *, struct iscsi_hdr *,
 				char *, uint32_t);
 extern int iscsi_complete_pdu(struct iscsi_conn *, struct iscsi_hdr *,
 			      char *, int);
-extern int iscsi_verify_itt(struct iscsi_conn *, struct iscsi_hdr *,
-			    uint32_t *);
+extern int iscsi_verify_itt(struct iscsi_conn *, itt_t);
+extern struct iscsi_cmd_task *iscsi_itt_to_ctask(struct iscsi_conn *, itt_t);
 extern void iscsi_requeue_ctask(struct iscsi_cmd_task *ctask);
 extern void iscsi_free_mgmt_task(struct iscsi_conn *conn,
 				 struct iscsi_mgmt_task *mtask);
-- 
cgit v1.2.3


From 052d014485d2ce5bb7fa8dd0df875dafd1db77df Mon Sep 17 00:00:00 2001
From: Mike Christie <michaelc@cs.wisc.edu>
Date: Wed, 21 May 2008 15:54:05 -0500
Subject: [SCSI] libiscsi: modify libiscsi so it supports offloaded data paths

This patch modifies libiscsi, so drivers like bnx2i and iser can execute
a command from queuecommand/send_pdu instead of having to be queued to
be run in a workq.

Signed-off-by: Mike Christie <michaelc@cs.wisc.edu>
Signed-off-by: James Bottomley <James.Bottomley@HansenPartnership.com>
---
 drivers/scsi/libiscsi.c | 179 ++++++++++++++++++++++++++++--------------------
 1 file changed, 104 insertions(+), 75 deletions(-)

(limited to 'drivers/scsi')

diff --git a/drivers/scsi/libiscsi.c b/drivers/scsi/libiscsi.c
index 4bc63c4b3c10..1e605de07cff 100644
--- a/drivers/scsi/libiscsi.c
+++ b/drivers/scsi/libiscsi.c
@@ -79,9 +79,11 @@ iscsi_update_cmdsn(struct iscsi_session *session, struct iscsi_nopin *hdr)
 		 * xmit thread
 		 */
 		if (!list_empty(&session->leadconn->xmitqueue) ||
-		    !list_empty(&session->leadconn->mgmtqueue))
-			scsi_queue_work(session->host,
-					&session->leadconn->xmitwork);
+		    !list_empty(&session->leadconn->mgmtqueue)) {
+			if (!(session->tt->caps & CAP_DATA_PATH_OFFLOAD))
+				scsi_queue_work(session->host,
+						&session->leadconn->xmitwork);
+		}
 	}
 }
 EXPORT_SYMBOL_GPL(iscsi_update_cmdsn);
@@ -298,8 +300,12 @@ static int iscsi_prep_scsi_cmd_pdu(struct iscsi_cmd_task *ctask)
 	WARN_ON(hdrlength >= 256);
 	hdr->hlength = hdrlength & 0xFF;
 
-	if (conn->session->tt->init_cmd_task(conn->ctask))
-		return EIO;
+	if (conn->session->tt->init_cmd_task &&
+	    conn->session->tt->init_cmd_task(ctask))
+		return -EIO;
+
+	ctask->state = ISCSI_TASK_RUNNING;
+	list_move_tail(&ctask->running, &conn->run_list);
 
 	conn->scsicmd_pdus_cnt++;
 	debug_scsi("iscsi prep [%s cid %d sc %p cdb 0x%x itt 0x%x "
@@ -334,7 +340,9 @@ static void iscsi_complete_command(struct iscsi_cmd_task *ctask)
 		conn->ctask = NULL;
 	list_del_init(&ctask->running);
 	__kfifo_put(session->cmdpool.queue, (void*)&ctask, sizeof(void*));
-	sc->scsi_done(sc);
+
+	if (sc->scsi_done)
+		sc->scsi_done(sc);
 }
 
 static void __iscsi_get_ctask(struct iscsi_cmd_task *ctask)
@@ -403,6 +411,50 @@ void iscsi_free_mgmt_task(struct iscsi_conn *conn,
 }
 EXPORT_SYMBOL_GPL(iscsi_free_mgmt_task);
 
+static int iscsi_prep_mtask(struct iscsi_conn *conn,
+			    struct iscsi_mgmt_task *mtask)
+{
+	struct iscsi_session *session = conn->session;
+	struct iscsi_hdr *hdr = mtask->hdr;
+	struct iscsi_nopout *nop = (struct iscsi_nopout *)hdr;
+
+	if (conn->session->state == ISCSI_STATE_LOGGING_OUT)
+		return -ENOTCONN;
+
+	if (hdr->opcode != (ISCSI_OP_LOGIN | ISCSI_OP_IMMEDIATE) &&
+	    hdr->opcode != (ISCSI_OP_TEXT | ISCSI_OP_IMMEDIATE))
+		nop->exp_statsn = cpu_to_be32(conn->exp_statsn);
+	/*
+	 * pre-format CmdSN for outgoing PDU.
+	 */
+	nop->cmdsn = cpu_to_be32(session->cmdsn);
+	if (hdr->itt != RESERVED_ITT) {
+		hdr->itt = build_itt(mtask->itt, session->age);
+		/*
+		 * TODO: We always use immediate, so we never hit this.
+		 * If we start to send tmfs or nops as non-immediate then
+		 * we should start checking the cmdsn numbers for mgmt tasks.
+		 */
+		if (conn->c_stage == ISCSI_CONN_STARTED &&
+		    !(hdr->opcode & ISCSI_OP_IMMEDIATE)) {
+			session->queued_cmdsn++;
+			session->cmdsn++;
+		}
+	}
+
+	if (session->tt->init_mgmt_task)
+		session->tt->init_mgmt_task(conn, mtask);
+
+	if ((hdr->opcode & ISCSI_OPCODE_MASK) == ISCSI_OP_LOGOUT)
+		session->state = ISCSI_STATE_LOGGING_OUT;
+
+	list_move_tail(&mtask->running, &conn->mgmt_run_list);
+	debug_scsi("mgmtpdu [op 0x%x hdr->itt 0x%x datalen %d]\n",
+		   hdr->opcode & ISCSI_OPCODE_MASK, hdr->itt,
+		   mtask->data_count);
+	return 0;
+}
+
 static struct iscsi_mgmt_task *
 __iscsi_conn_send_pdu(struct iscsi_conn *conn, struct iscsi_hdr *hdr,
 		      char *data, uint32_t data_size)
@@ -429,6 +481,12 @@ __iscsi_conn_send_pdu(struct iscsi_conn *conn, struct iscsi_hdr *hdr,
 		if (!__kfifo_get(session->mgmtpool.queue,
 				 (void*)&mtask, sizeof(void*)))
 			return NULL;
+
+		if ((hdr->opcode == (ISCSI_OP_NOOP_OUT | ISCSI_OP_IMMEDIATE)) &&
+		     hdr->ttt == RESERVED_ITT) {
+			conn->ping_mtask = mtask;
+			conn->last_ping = jiffies;
+		}
 	}
 
 	if (data_size) {
@@ -440,6 +498,19 @@ __iscsi_conn_send_pdu(struct iscsi_conn *conn, struct iscsi_hdr *hdr,
 	memcpy(mtask->hdr, hdr, sizeof(struct iscsi_hdr));
 	INIT_LIST_HEAD(&mtask->running);
 	list_add_tail(&mtask->running, &conn->mgmtqueue);
+
+	if (session->tt->caps & CAP_DATA_PATH_OFFLOAD) {
+		if (iscsi_prep_mtask(conn, mtask)) {
+			iscsi_free_mgmt_task(conn, mtask);
+			return NULL;
+		}
+
+		if (session->tt->xmit_mgmt_task(conn, mtask))
+			mtask = NULL;
+
+	} else
+		scsi_queue_work(conn->session->host, &conn->xmitwork);
+
 	return mtask;
 }
 
@@ -454,7 +525,6 @@ int iscsi_conn_send_pdu(struct iscsi_cls_conn *cls_conn, struct iscsi_hdr *hdr,
 	if (!__iscsi_conn_send_pdu(conn, hdr, data, data_size))
 		err = -EPERM;
 	spin_unlock_bh(&session->lock);
-	scsi_queue_work(session->host, &conn->xmitwork);
 	return err;
 }
 EXPORT_SYMBOL_GPL(iscsi_conn_send_pdu);
@@ -581,17 +651,8 @@ static void iscsi_send_nopout(struct iscsi_conn *conn, struct iscsi_nopin *rhdr)
 		hdr.ttt = RESERVED_ITT;
 
 	mtask = __iscsi_conn_send_pdu(conn, (struct iscsi_hdr *)&hdr, NULL, 0);
-	if (!mtask) {
+	if (!mtask)
 		iscsi_conn_printk(KERN_ERR, conn, "Could not send nopout\n");
-		return;
-	}
-
-	/* only track our nops */
-	if (!rhdr) {
-		conn->ping_mtask = mtask;
-		conn->last_ping = jiffies;
-	}
-	scsi_queue_work(conn->session->host, &conn->xmitwork);
 }
 
 static int iscsi_handle_reject(struct iscsi_conn *conn, struct iscsi_hdr *hdr,
@@ -868,56 +929,15 @@ void iscsi_conn_failure(struct iscsi_conn *conn, enum iscsi_err err)
 }
 EXPORT_SYMBOL_GPL(iscsi_conn_failure);
 
-static void iscsi_prep_mtask(struct iscsi_conn *conn,
-			     struct iscsi_mgmt_task *mtask)
-{
-	struct iscsi_session *session = conn->session;
-	struct iscsi_hdr *hdr = mtask->hdr;
-	struct iscsi_nopout *nop = (struct iscsi_nopout *)hdr;
-
-	if (hdr->opcode != (ISCSI_OP_LOGIN | ISCSI_OP_IMMEDIATE) &&
-	    hdr->opcode != (ISCSI_OP_TEXT | ISCSI_OP_IMMEDIATE))
-		nop->exp_statsn = cpu_to_be32(conn->exp_statsn);
-	/*
-	 * pre-format CmdSN for outgoing PDU.
-	 */
-	nop->cmdsn = cpu_to_be32(session->cmdsn);
-	if (hdr->itt != RESERVED_ITT) {
-		hdr->itt = build_itt(mtask->itt, session->age);
-		/*
-		 * TODO: We always use immediate, so we never hit this.
-		 * If we start to send tmfs or nops as non-immediate then
-		 * we should start checking the cmdsn numbers for mgmt tasks.
-		 */
-		if (conn->c_stage == ISCSI_CONN_STARTED &&
-		    !(hdr->opcode & ISCSI_OP_IMMEDIATE)) {
-			session->queued_cmdsn++;
-			session->cmdsn++;
-		}
-	}
-
-	if (session->tt->init_mgmt_task)
-		session->tt->init_mgmt_task(conn, mtask);
-
-	debug_scsi("mgmtpdu [op 0x%x hdr->itt 0x%x datalen %d]\n",
-		   hdr->opcode & ISCSI_OPCODE_MASK, hdr->itt,
-		   mtask->data_count);
-}
-
 static int iscsi_xmit_mtask(struct iscsi_conn *conn)
 {
-	struct iscsi_hdr *hdr = conn->mtask->hdr;
 	int rc;
 
-	if ((hdr->opcode & ISCSI_OPCODE_MASK) == ISCSI_OP_LOGOUT)
-		conn->session->state = ISCSI_STATE_LOGGING_OUT;
 	spin_unlock_bh(&conn->session->lock);
-
 	rc = conn->session->tt->xmit_mgmt_task(conn, conn->mtask);
 	spin_lock_bh(&conn->session->lock);
 	if (rc)
 		return rc;
-
 	/* done with this in-progress mtask */
 	conn->mtask = NULL;
 	return 0;
@@ -961,7 +981,8 @@ static int iscsi_xmit_ctask(struct iscsi_conn *conn)
  * @ctask: ctask to requeue
  *
  * LLDs that need to run a ctask from the session workqueue should call
- * this. The session lock must be held.
+ * this. The session lock must be held. This should only be called
+ * by software drivers.
  */
 void iscsi_requeue_ctask(struct iscsi_cmd_task *ctask)
 {
@@ -1013,14 +1034,11 @@ check_mgmt:
 	while (!list_empty(&conn->mgmtqueue)) {
 		conn->mtask = list_entry(conn->mgmtqueue.next,
 					 struct iscsi_mgmt_task, running);
-		if (conn->session->state == ISCSI_STATE_LOGGING_OUT) {
+		if (iscsi_prep_mtask(conn, conn->mtask)) {
 			iscsi_free_mgmt_task(conn, conn->mtask);
 			conn->mtask = NULL;
 			continue;
 		}
-
-		iscsi_prep_mtask(conn, conn->mtask);
-		list_move_tail(conn->mgmtqueue.next, &conn->mgmt_run_list);
 		rc = iscsi_xmit_mtask(conn);
 		if (rc)
 			goto again;
@@ -1041,9 +1059,6 @@ check_mgmt:
 			fail_command(conn, conn->ctask, DID_ABORT << 16);
 			continue;
 		}
-
-		conn->ctask->state = ISCSI_TASK_RUNNING;
-		list_move_tail(conn->xmitqueue.next, &conn->run_list);
 		rc = iscsi_xmit_ctask(conn);
 		if (rc)
 			goto again;
@@ -1192,8 +1207,6 @@ int iscsi_queuecommand(struct scsi_cmnd *sc, void (*done)(struct scsi_cmnd *))
 		reason = FAILURE_OOM;
 		goto reject;
 	}
-	session->queued_cmdsn++;
-
 	sc->SCp.phase = session->age;
 	sc->SCp.ptr = (char *)ctask;
 
@@ -1202,11 +1215,26 @@ int iscsi_queuecommand(struct scsi_cmnd *sc, void (*done)(struct scsi_cmnd *))
 	ctask->conn = conn;
 	ctask->sc = sc;
 	INIT_LIST_HEAD(&ctask->running);
-
 	list_add_tail(&ctask->running, &conn->xmitqueue);
-	spin_unlock(&session->lock);
 
-	scsi_queue_work(host, &conn->xmitwork);
+	if (session->tt->caps & CAP_DATA_PATH_OFFLOAD) {
+		if (iscsi_prep_scsi_cmd_pdu(ctask)) {
+			sc->result = DID_ABORT << 16;
+			sc->scsi_done = NULL;
+			iscsi_complete_command(ctask);
+			goto fault;
+		}
+		if (session->tt->xmit_cmd_task(conn, ctask)) {
+			sc->scsi_done = NULL;
+			iscsi_complete_command(ctask);
+			reason = FAILURE_SESSION_NOT_READY;
+			goto reject;
+		}
+	} else
+		scsi_queue_work(session->host, &conn->xmitwork);
+
+	session->queued_cmdsn++;
+	spin_unlock(&session->lock);
 	spin_lock(host->host_lock);
 	return 0;
 
@@ -1225,7 +1253,7 @@ fault:
 		scsi_out(sc)->resid = scsi_out(sc)->length;
 		scsi_in(sc)->resid = scsi_in(sc)->length;
 	}
-	sc->scsi_done(sc);
+	done(sc);
 	spin_lock(host->host_lock);
 	return 0;
 }
@@ -1344,7 +1372,6 @@ static int iscsi_exec_task_mgmt_fn(struct iscsi_conn *conn,
 
 	spin_unlock_bh(&session->lock);
 	mutex_unlock(&session->eh_mutex);
-	scsi_queue_work(session->host, &conn->xmitwork);
 
 	/*
 	 * block eh thread until:
@@ -1412,14 +1439,16 @@ static void fail_all_commands(struct iscsi_conn *conn, unsigned lun,
 void iscsi_suspend_tx(struct iscsi_conn *conn)
 {
 	set_bit(ISCSI_SUSPEND_BIT, &conn->suspend_tx);
-	scsi_flush_work(conn->session->host);
+	if (!(conn->session->tt->caps & CAP_DATA_PATH_OFFLOAD))
+		scsi_flush_work(conn->session->host);
 }
 EXPORT_SYMBOL_GPL(iscsi_suspend_tx);
 
 static void iscsi_start_tx(struct iscsi_conn *conn)
 {
 	clear_bit(ISCSI_SUSPEND_BIT, &conn->suspend_tx);
-	scsi_queue_work(conn->session->host, &conn->xmitwork);
+	if (!(conn->session->tt->caps & CAP_DATA_PATH_OFFLOAD))
+		scsi_queue_work(conn->session->host, &conn->xmitwork);
 }
 
 static enum scsi_eh_timer_return iscsi_eh_cmd_timed_out(struct scsi_cmnd *scmd)
-- 
cgit v1.2.3


From 3e5c28ad0391389959ccae81c938c7533efb3490 Mon Sep 17 00:00:00 2001
From: Mike Christie <michaelc@cs.wisc.edu>
Date: Wed, 21 May 2008 15:54:06 -0500
Subject: [SCSI] libiscsi: merge iscsi_mgmt_task and iscsi_cmd_task

There is no need to have the mgmt and cmd tasks separate
structs. It used to save a lot of memory when we overprealocated
memory for tasks, but the next patches will set up the
driver so in the future they can use a mempool or some other
common scsi command allocator and common tagging.

Signed-off-by: Mike Christie <michaelc@cs.wisc.edu>
Signed-off-by: James Bottomley <James.Bottomley@HansenPartnership.com>
---
 drivers/scsi/libiscsi.c             | 518 +++++++++++++++++-------------------
 include/scsi/libiscsi.h             |  29 +-
 include/scsi/scsi_transport_iscsi.h |  41 ++-
 3 files changed, 272 insertions(+), 316 deletions(-)

(limited to 'drivers/scsi')

diff --git a/drivers/scsi/libiscsi.c b/drivers/scsi/libiscsi.c
index 1e605de07cff..ef92b1b0f16e 100644
--- a/drivers/scsi/libiscsi.c
+++ b/drivers/scsi/libiscsi.c
@@ -197,7 +197,7 @@ static int iscsi_prep_bidi_ahs(struct iscsi_cmd_task *ctask)
 
 /**
  * iscsi_prep_scsi_cmd_pdu - prep iscsi scsi cmd pdu
- * @ctask: iscsi cmd task
+ * @ctask: iscsi task
  *
  * Prep basic iSCSI PDU fields for a scsi cmd pdu. The LLD should set
  * fields like dlength or final based on how much data it sends
@@ -300,31 +300,31 @@ static int iscsi_prep_scsi_cmd_pdu(struct iscsi_cmd_task *ctask)
 	WARN_ON(hdrlength >= 256);
 	hdr->hlength = hdrlength & 0xFF;
 
-	if (conn->session->tt->init_cmd_task &&
-	    conn->session->tt->init_cmd_task(ctask))
+	if (conn->session->tt->init_task &&
+	    conn->session->tt->init_task(ctask))
 		return -EIO;
 
 	ctask->state = ISCSI_TASK_RUNNING;
 	list_move_tail(&ctask->running, &conn->run_list);
 
 	conn->scsicmd_pdus_cnt++;
-	debug_scsi("iscsi prep [%s cid %d sc %p cdb 0x%x itt 0x%x "
-		"len %d bidi_len %d cmdsn %d win %d]\n",
-		scsi_bidi_cmnd(sc) ? "bidirectional" :
-		     sc->sc_data_direction == DMA_TO_DEVICE ? "write" : "read",
-		conn->id, sc, sc->cmnd[0], ctask->itt,
-		scsi_bufflen(sc), scsi_bidi_cmnd(sc) ? scsi_in(sc)->length : 0,
-		session->cmdsn, session->max_cmdsn - session->exp_cmdsn + 1);
+	debug_scsi("iscsi prep [%s cid %d sc %p cdb 0x%x itt 0x%x len %d "
+		   "bidi_len %d cmdsn %d win %d]\n", scsi_bidi_cmnd(sc) ?
+		   "bidirectional" : sc->sc_data_direction == DMA_TO_DEVICE ?
+		   "write" : "read", conn->id, sc, sc->cmnd[0], ctask->itt,
+		   scsi_bufflen(sc),
+		   scsi_bidi_cmnd(sc) ? scsi_in(sc)->length : 0,
+		   session->cmdsn, session->max_cmdsn - session->exp_cmdsn + 1);
 	return 0;
 }
 
 /**
- * iscsi_complete_command - return command back to scsi-ml
+ * iscsi_complete_command - finish a task
  * @ctask: iscsi cmd task
  *
  * Must be called with session lock.
- * This function returns the scsi command to scsi-ml and returns
- * the cmd task to the pool of available cmd tasks.
+ * This function returns the scsi command to scsi-ml or cleans
+ * up mgmt tasks then returns the task to the pool.
  */
 static void iscsi_complete_command(struct iscsi_cmd_task *ctask)
 {
@@ -332,17 +332,34 @@ static void iscsi_complete_command(struct iscsi_cmd_task *ctask)
 	struct iscsi_session *session = conn->session;
 	struct scsi_cmnd *sc = ctask->sc;
 
+	list_del_init(&ctask->running);
 	ctask->state = ISCSI_TASK_COMPLETED;
 	ctask->sc = NULL;
-	/* SCSI eh reuses commands to verify us */
-	sc->SCp.ptr = NULL;
+
 	if (conn->ctask == ctask)
 		conn->ctask = NULL;
-	list_del_init(&ctask->running);
+	/*
+	 * login ctask is preallocated so do not free
+	 */
+	if (conn->login_ctask == ctask)
+		return;
+
 	__kfifo_put(session->cmdpool.queue, (void*)&ctask, sizeof(void*));
 
-	if (sc->scsi_done)
-		sc->scsi_done(sc);
+	if (conn->ping_ctask == ctask)
+		conn->ping_ctask = NULL;
+
+	if (sc) {
+		ctask->sc = NULL;
+		/* SCSI eh reuses commands to verify us */
+		sc->SCp.ptr = NULL;
+		/*
+		 * queue command may call this to free the task, but
+		 * not have setup the sc callback
+		 */
+		if (sc->scsi_done)
+			sc->scsi_done(sc);
+	}
 }
 
 static void __iscsi_get_ctask(struct iscsi_cmd_task *ctask)
@@ -356,6 +373,16 @@ static void __iscsi_put_ctask(struct iscsi_cmd_task *ctask)
 		iscsi_complete_command(ctask);
 }
 
+void iscsi_put_ctask(struct iscsi_cmd_task *ctask)
+{
+	struct iscsi_session *session = ctask->conn->session;
+
+	spin_lock_bh(&session->lock);
+	__iscsi_put_ctask(ctask);
+	spin_unlock_bh(&session->lock);
+}
+EXPORT_SYMBOL_GPL(iscsi_put_ctask);
+
 /*
  * session lock must be held
  */
@@ -375,47 +402,28 @@ static void fail_command(struct iscsi_conn *conn, struct iscsi_cmd_task *ctask,
 		 */
 		conn->session->queued_cmdsn--;
 	else
-		conn->session->tt->cleanup_cmd_task(conn, ctask);
+		conn->session->tt->cleanup_task(conn, ctask);
 
 	sc->result = err;
+
 	if (!scsi_bidi_cmnd(sc))
 		scsi_set_resid(sc, scsi_bufflen(sc));
 	else {
 		scsi_out(sc)->resid = scsi_out(sc)->length;
 		scsi_in(sc)->resid = scsi_in(sc)->length;
 	}
+
 	if (conn->ctask == ctask)
 		conn->ctask = NULL;
 	/* release ref from queuecommand */
 	__iscsi_put_ctask(ctask);
 }
 
-/**
- * iscsi_free_mgmt_task - return mgmt task back to pool
- * @conn: iscsi connection
- * @mtask: mtask
- *
- * Must be called with session lock.
- */
-void iscsi_free_mgmt_task(struct iscsi_conn *conn,
-			  struct iscsi_mgmt_task *mtask)
-{
-	list_del_init(&mtask->running);
-	if (conn->login_mtask == mtask)
-		return;
-
-	if (conn->ping_mtask == mtask)
-		conn->ping_mtask = NULL;
-	__kfifo_put(conn->session->mgmtpool.queue,
-		    (void*)&mtask, sizeof(void*));
-}
-EXPORT_SYMBOL_GPL(iscsi_free_mgmt_task);
-
-static int iscsi_prep_mtask(struct iscsi_conn *conn,
-			    struct iscsi_mgmt_task *mtask)
+static int iscsi_prep_mgmt_task(struct iscsi_conn *conn,
+				struct iscsi_cmd_task *ctask)
 {
 	struct iscsi_session *session = conn->session;
-	struct iscsi_hdr *hdr = mtask->hdr;
+	struct iscsi_hdr *hdr = (struct iscsi_hdr *)ctask->hdr;
 	struct iscsi_nopout *nop = (struct iscsi_nopout *)hdr;
 
 	if (conn->session->state == ISCSI_STATE_LOGGING_OUT)
@@ -429,7 +437,7 @@ static int iscsi_prep_mtask(struct iscsi_conn *conn,
 	 */
 	nop->cmdsn = cpu_to_be32(session->cmdsn);
 	if (hdr->itt != RESERVED_ITT) {
-		hdr->itt = build_itt(mtask->itt, session->age);
+		hdr->itt = build_itt(ctask->itt, session->age);
 		/*
 		 * TODO: We always use immediate, so we never hit this.
 		 * If we start to send tmfs or nops as non-immediate then
@@ -442,25 +450,25 @@ static int iscsi_prep_mtask(struct iscsi_conn *conn,
 		}
 	}
 
-	if (session->tt->init_mgmt_task)
-		session->tt->init_mgmt_task(conn, mtask);
+	if (session->tt->init_task)
+		session->tt->init_task(ctask);
 
 	if ((hdr->opcode & ISCSI_OPCODE_MASK) == ISCSI_OP_LOGOUT)
 		session->state = ISCSI_STATE_LOGGING_OUT;
 
-	list_move_tail(&mtask->running, &conn->mgmt_run_list);
+	list_move_tail(&ctask->running, &conn->mgmt_run_list);
 	debug_scsi("mgmtpdu [op 0x%x hdr->itt 0x%x datalen %d]\n",
 		   hdr->opcode & ISCSI_OPCODE_MASK, hdr->itt,
-		   mtask->data_count);
+		   ctask->data_count);
 	return 0;
 }
 
-static struct iscsi_mgmt_task *
+static struct iscsi_cmd_task *
 __iscsi_conn_send_pdu(struct iscsi_conn *conn, struct iscsi_hdr *hdr,
 		      char *data, uint32_t data_size)
 {
 	struct iscsi_session *session = conn->session;
-	struct iscsi_mgmt_task *mtask;
+	struct iscsi_cmd_task *ctask;
 
 	if (session->state == ISCSI_STATE_TERMINATE)
 		return NULL;
@@ -470,48 +478,56 @@ __iscsi_conn_send_pdu(struct iscsi_conn *conn, struct iscsi_hdr *hdr,
 		/*
 		 * Login and Text are sent serially, in
 		 * request-followed-by-response sequence.
-		 * Same mtask can be used. Same ITT must be used.
-		 * Note that login_mtask is preallocated at conn_create().
+		 * Same task can be used. Same ITT must be used.
+		 * Note that login_task is preallocated at conn_create().
 		 */
-		mtask = conn->login_mtask;
+		ctask = conn->login_ctask;
 	else {
 		BUG_ON(conn->c_stage == ISCSI_CONN_INITIAL_STAGE);
 		BUG_ON(conn->c_stage == ISCSI_CONN_STOPPED);
 
-		if (!__kfifo_get(session->mgmtpool.queue,
-				 (void*)&mtask, sizeof(void*)))
+		if (!__kfifo_get(session->cmdpool.queue,
+				 (void*)&ctask, sizeof(void*)))
 			return NULL;
 
 		if ((hdr->opcode == (ISCSI_OP_NOOP_OUT | ISCSI_OP_IMMEDIATE)) &&
 		     hdr->ttt == RESERVED_ITT) {
-			conn->ping_mtask = mtask;
+			conn->ping_ctask = ctask;
 			conn->last_ping = jiffies;
 		}
 	}
+	/*
+	 * released in complete pdu for task we expect a response for, and
+	 * released by the lld when it has transmitted the task for
+	 * pdus we do not expect a response for.
+	 */
+	atomic_set(&ctask->refcount, 1);
+	ctask->conn = conn;
+	ctask->sc = NULL;
 
 	if (data_size) {
-		memcpy(mtask->data, data, data_size);
-		mtask->data_count = data_size;
+		memcpy(ctask->data, data, data_size);
+		ctask->data_count = data_size;
 	} else
-		mtask->data_count = 0;
+		ctask->data_count = 0;
 
-	memcpy(mtask->hdr, hdr, sizeof(struct iscsi_hdr));
-	INIT_LIST_HEAD(&mtask->running);
-	list_add_tail(&mtask->running, &conn->mgmtqueue);
+	memcpy(ctask->hdr, hdr, sizeof(struct iscsi_hdr));
+	INIT_LIST_HEAD(&ctask->running);
+	list_add_tail(&ctask->running, &conn->mgmtqueue);
 
 	if (session->tt->caps & CAP_DATA_PATH_OFFLOAD) {
-		if (iscsi_prep_mtask(conn, mtask)) {
-			iscsi_free_mgmt_task(conn, mtask);
+		if (iscsi_prep_mgmt_task(conn, ctask)) {
+			__iscsi_put_ctask(ctask);
 			return NULL;
 		}
 
-		if (session->tt->xmit_mgmt_task(conn, mtask))
-			mtask = NULL;
+		if (session->tt->xmit_task(ctask))
+			ctask = NULL;
 
 	} else
 		scsi_queue_work(conn->session->host, &conn->xmitwork);
 
-	return mtask;
+	return ctask;
 }
 
 int iscsi_conn_send_pdu(struct iscsi_cls_conn *cls_conn, struct iscsi_hdr *hdr,
@@ -538,7 +554,7 @@ EXPORT_SYMBOL_GPL(iscsi_conn_send_pdu);
  * @datalen: len of buffer
  *
  * iscsi_cmd_rsp sets up the scsi_cmnd fields based on the PDU and
- * then completes the command and task.
+ * then completes the command and ctask.
  **/
 static void iscsi_scsi_cmd_rsp(struct iscsi_conn *conn, struct iscsi_hdr *hdr,
 			       struct iscsi_cmd_task *ctask, char *data,
@@ -634,9 +650,9 @@ static void iscsi_tmf_rsp(struct iscsi_conn *conn, struct iscsi_hdr *hdr)
 static void iscsi_send_nopout(struct iscsi_conn *conn, struct iscsi_nopin *rhdr)
 {
         struct iscsi_nopout hdr;
-	struct iscsi_mgmt_task *mtask;
+	struct iscsi_cmd_task *ctask;
 
-	if (!rhdr && conn->ping_mtask)
+	if (!rhdr && conn->ping_ctask)
 		return;
 
 	memset(&hdr, 0, sizeof(struct iscsi_nopout));
@@ -650,8 +666,8 @@ static void iscsi_send_nopout(struct iscsi_conn *conn, struct iscsi_nopin *rhdr)
 	} else
 		hdr.ttt = RESERVED_ITT;
 
-	mtask = __iscsi_conn_send_pdu(conn, (struct iscsi_hdr *)&hdr, NULL, 0);
-	if (!mtask)
+	ctask = __iscsi_conn_send_pdu(conn, (struct iscsi_hdr *)&hdr, NULL, 0);
+	if (!ctask)
 		iscsi_conn_printk(KERN_ERR, conn, "Could not send nopout\n");
 }
 
@@ -697,7 +713,6 @@ static int __iscsi_complete_pdu(struct iscsi_conn *conn, struct iscsi_hdr *hdr,
 	struct iscsi_session *session = conn->session;
 	int opcode = hdr->opcode & ISCSI_OPCODE_MASK, rc = 0;
 	struct iscsi_cmd_task *ctask;
-	struct iscsi_mgmt_task *mtask;
 	uint32_t itt;
 
 	conn->last_recv = jiffies;
@@ -710,93 +725,10 @@ static int __iscsi_complete_pdu(struct iscsi_conn *conn, struct iscsi_hdr *hdr,
 	else
 		itt = ~0U;
 
-	if (itt < session->cmds_max) {
-		ctask = session->cmds[itt];
-
-		debug_scsi("cmdrsp [op 0x%x cid %d itt 0x%x len %d]\n",
-			   opcode, conn->id, ctask->itt, datalen);
-
-		switch(opcode) {
-		case ISCSI_OP_SCSI_CMD_RSP:
-			BUG_ON((void*)ctask != ctask->sc->SCp.ptr);
-			iscsi_scsi_cmd_rsp(conn, hdr, ctask, data,
-					   datalen);
-			break;
-		case ISCSI_OP_SCSI_DATA_IN:
-			BUG_ON((void*)ctask != ctask->sc->SCp.ptr);
-			if (hdr->flags & ISCSI_FLAG_DATA_STATUS) {
-				conn->scsirsp_pdus_cnt++;
-				__iscsi_put_ctask(ctask);
-			}
-			break;
-		case ISCSI_OP_R2T:
-			/* LLD handles this for now */
-			break;
-		default:
-			rc = ISCSI_ERR_BAD_OPCODE;
-			break;
-		}
-	} else if (itt >= ISCSI_MGMT_ITT_OFFSET &&
-		   itt < ISCSI_MGMT_ITT_OFFSET + session->mgmtpool_max) {
-		mtask = session->mgmt_cmds[itt - ISCSI_MGMT_ITT_OFFSET];
-
-		debug_scsi("immrsp [op 0x%x cid %d itt 0x%x len %d]\n",
-			   opcode, conn->id, mtask->itt, datalen);
+	debug_scsi("[op 0x%x cid %d itt 0x%x len %d]\n",
+		   opcode, conn->id, itt, datalen);
 
-		iscsi_update_cmdsn(session, (struct iscsi_nopin*)hdr);
-		switch(opcode) {
-		case ISCSI_OP_LOGOUT_RSP:
-			if (datalen) {
-				rc = ISCSI_ERR_PROTO;
-				break;
-			}
-			conn->exp_statsn = be32_to_cpu(hdr->statsn) + 1;
-			/* fall through */
-		case ISCSI_OP_LOGIN_RSP:
-		case ISCSI_OP_TEXT_RSP:
-			/*
-			 * login related PDU's exp_statsn is handled in
-			 * userspace
-			 */
-			if (iscsi_recv_pdu(conn->cls_conn, hdr, data, datalen))
-				rc = ISCSI_ERR_CONN_FAILED;
-			iscsi_free_mgmt_task(conn, mtask);
-			break;
-		case ISCSI_OP_SCSI_TMFUNC_RSP:
-			if (datalen) {
-				rc = ISCSI_ERR_PROTO;
-				break;
-			}
-
-			iscsi_tmf_rsp(conn, hdr);
-			iscsi_free_mgmt_task(conn, mtask);
-			break;
-		case ISCSI_OP_NOOP_IN:
-			if (hdr->ttt != cpu_to_be32(ISCSI_RESERVED_TAG) ||
-			    datalen) {
-				rc = ISCSI_ERR_PROTO;
-				break;
-			}
-			conn->exp_statsn = be32_to_cpu(hdr->statsn) + 1;
-
-			if (conn->ping_mtask != mtask) {
-				/*
-				 * If this is not in response to one of our
-				 * nops then it must be from userspace.
-				 */
-				if (iscsi_recv_pdu(conn->cls_conn, hdr, data,
-						   datalen))
-					rc = ISCSI_ERR_CONN_FAILED;
-			} else
-				mod_timer(&conn->transport_timer,
-					  jiffies + conn->recv_timeout);
-			iscsi_free_mgmt_task(conn, mtask);
-			break;
-		default:
-			rc = ISCSI_ERR_BAD_OPCODE;
-			break;
-		}
-	} else if (itt == ~0U) {
+	if (itt == ~0U) {
 		iscsi_update_cmdsn(session, (struct iscsi_nopin*)hdr);
 
 		switch(opcode) {
@@ -823,9 +755,88 @@ static int __iscsi_complete_pdu(struct iscsi_conn *conn, struct iscsi_hdr *hdr,
 			rc = ISCSI_ERR_BAD_OPCODE;
 			break;
 		}
-	} else
-		rc = ISCSI_ERR_BAD_ITT;
+		goto out;
+	}
+
+	ctask = session->cmds[itt];
+	switch(opcode) {
+	case ISCSI_OP_SCSI_CMD_RSP:
+		if (!ctask->sc) {
+			rc = ISCSI_ERR_NO_SCSI_CMD;
+			break;
+		}
+		BUG_ON((void*)ctask != ctask->sc->SCp.ptr);
+		iscsi_scsi_cmd_rsp(conn, hdr, ctask, data, datalen);
+		break;
+	case ISCSI_OP_SCSI_DATA_IN:
+		if (!ctask->sc) {
+			rc = ISCSI_ERR_NO_SCSI_CMD;
+			break;
+		}
+		BUG_ON((void*)ctask != ctask->sc->SCp.ptr);
+		if (hdr->flags & ISCSI_FLAG_DATA_STATUS) {
+			conn->scsirsp_pdus_cnt++;
+			iscsi_update_cmdsn(session,
+					   (struct iscsi_nopin*) hdr);
+			__iscsi_put_ctask(ctask);
+		}
+		break;
+	case ISCSI_OP_R2T:
+		/* LLD handles this for now */
+		break;
+	case ISCSI_OP_LOGOUT_RSP:
+		iscsi_update_cmdsn(session, (struct iscsi_nopin*)hdr);
+		if (datalen) {
+			rc = ISCSI_ERR_PROTO;
+			break;
+		}
+		conn->exp_statsn = be32_to_cpu(hdr->statsn) + 1;
+		goto recv_pdu;
+	case ISCSI_OP_LOGIN_RSP:
+	case ISCSI_OP_TEXT_RSP:
+		iscsi_update_cmdsn(session, (struct iscsi_nopin*)hdr);
+		/*
+		 * login related PDU's exp_statsn is handled in
+		 * userspace
+		 */
+		goto recv_pdu;
+	case ISCSI_OP_SCSI_TMFUNC_RSP:
+		iscsi_update_cmdsn(session, (struct iscsi_nopin*)hdr);
+		if (datalen) {
+			rc = ISCSI_ERR_PROTO;
+			break;
+		}
+
+		iscsi_tmf_rsp(conn, hdr);
+		__iscsi_put_ctask(ctask);
+		break;
+	case ISCSI_OP_NOOP_IN:
+		iscsi_update_cmdsn(session, (struct iscsi_nopin*)hdr);
+		if (hdr->ttt != cpu_to_be32(ISCSI_RESERVED_TAG) || datalen) {
+			rc = ISCSI_ERR_PROTO;
+			break;
+		}
+		conn->exp_statsn = be32_to_cpu(hdr->statsn) + 1;
+
+		if (conn->ping_ctask != ctask)
+			/*
+			 * If this is not in response to one of our
+			 * nops then it must be from userspace.
+			 */
+			goto recv_pdu;
+		__iscsi_put_ctask(ctask);
+		break;
+	default:
+		rc = ISCSI_ERR_BAD_OPCODE;
+		break;
+	}
 
+out:
+	return rc;
+recv_pdu:
+	if (iscsi_recv_pdu(conn->cls_conn, hdr, data, datalen))
+		rc = ISCSI_ERR_CONN_FAILED;
+	__iscsi_put_ctask(ctask);
 	return rc;
 }
 
@@ -845,6 +856,7 @@ int iscsi_verify_itt(struct iscsi_conn *conn, itt_t itt)
 {
 	struct iscsi_session *session = conn->session;
 	struct iscsi_cmd_task *ctask;
+	uint32_t i;
 
 	if (itt == RESERVED_ITT)
 		return 0;
@@ -858,25 +870,22 @@ int iscsi_verify_itt(struct iscsi_conn *conn, itt_t itt)
 		return ISCSI_ERR_BAD_ITT;
 	}
 
-	if (itt < session->cmds_max) {
-		ctask = session->cmds[itt];
-
-		if (!ctask->sc) {
-			iscsi_conn_printk(KERN_INFO, conn, "dropping ctask "
-					  "with itt 0x%x\n", ctask->itt);
-			/* force drop */
-			return ISCSI_ERR_NO_SCSI_CMD;
-		}
-
-		if (ctask->sc->SCp.phase != session->age) {
-			iscsi_conn_printk(KERN_ERR, conn,
-					  "iscsi: ctask's session age %d, "
-					  "expected %d\n", ctask->sc->SCp.phase,
-					  session->age);
-			return ISCSI_ERR_SESSION_FAILED;
-		}
+	i = get_itt(itt);
+	if (i >= session->cmds_max) {
+		iscsi_conn_printk(KERN_ERR, conn,
+				  "received invalid itt index %u (max cmds "
+				   "%u.\n", i, session->cmds_max);
+		return ISCSI_ERR_BAD_ITT;
 	}
 
+	ctask = session->cmds[i];
+	if (ctask->sc && ctask->sc->SCp.phase != session->age) {
+		iscsi_conn_printk(KERN_ERR, conn,
+				  "iscsi: ctask's session age %d, "
+				  "expected %d\n", ctask->sc->SCp.phase,
+				  session->age);
+		return ISCSI_ERR_SESSION_FAILED;
+	}
 	return 0;
 }
 EXPORT_SYMBOL_GPL(iscsi_verify_itt);
@@ -929,20 +938,6 @@ void iscsi_conn_failure(struct iscsi_conn *conn, enum iscsi_err err)
 }
 EXPORT_SYMBOL_GPL(iscsi_conn_failure);
 
-static int iscsi_xmit_mtask(struct iscsi_conn *conn)
-{
-	int rc;
-
-	spin_unlock_bh(&conn->session->lock);
-	rc = conn->session->tt->xmit_mgmt_task(conn, conn->mtask);
-	spin_lock_bh(&conn->session->lock);
-	if (rc)
-		return rc;
-	/* done with this in-progress mtask */
-	conn->mtask = NULL;
-	return 0;
-}
-
 static int iscsi_check_cmdsn_window_closed(struct iscsi_conn *conn)
 {
 	struct iscsi_session *session = conn->session;
@@ -967,7 +962,7 @@ static int iscsi_xmit_ctask(struct iscsi_conn *conn)
 
 	__iscsi_get_ctask(ctask);
 	spin_unlock_bh(&conn->session->lock);
-	rc = conn->session->tt->xmit_cmd_task(conn, ctask);
+	rc = conn->session->tt->xmit_task(ctask);
 	spin_lock_bh(&conn->session->lock);
 	__iscsi_put_ctask(ctask);
 	if (!rc)
@@ -1015,12 +1010,6 @@ static int iscsi_data_xmit(struct iscsi_conn *conn)
 
 	if (conn->ctask) {
 		rc = iscsi_xmit_ctask(conn);
-		if (rc)
-			goto again;
-	}
-
-	if (conn->mtask) {
-		rc = iscsi_xmit_mtask(conn);
 	        if (rc)
 		        goto again;
 	}
@@ -1032,14 +1021,14 @@ static int iscsi_data_xmit(struct iscsi_conn *conn)
 	 */
 check_mgmt:
 	while (!list_empty(&conn->mgmtqueue)) {
-		conn->mtask = list_entry(conn->mgmtqueue.next,
-					 struct iscsi_mgmt_task, running);
-		if (iscsi_prep_mtask(conn, conn->mtask)) {
-			iscsi_free_mgmt_task(conn, conn->mtask);
-			conn->mtask = NULL;
+		conn->ctask = list_entry(conn->mgmtqueue.next,
+					 struct iscsi_cmd_task, running);
+		if (iscsi_prep_mgmt_task(conn, conn->ctask)) {
+			__iscsi_put_ctask(conn->ctask);
+			conn->ctask = NULL;
 			continue;
 		}
-		rc = iscsi_xmit_mtask(conn);
+		rc = iscsi_xmit_ctask(conn);
 		if (rc)
 			goto again;
 	}
@@ -1224,7 +1213,7 @@ int iscsi_queuecommand(struct scsi_cmnd *sc, void (*done)(struct scsi_cmnd *))
 			iscsi_complete_command(ctask);
 			goto fault;
 		}
-		if (session->tt->xmit_cmd_task(conn, ctask)) {
+		if (session->tt->xmit_task(ctask)) {
 			sc->scsi_done = NULL;
 			iscsi_complete_command(ctask);
 			reason = FAILURE_SESSION_NOT_READY;
@@ -1347,16 +1336,16 @@ static void iscsi_tmf_timedout(unsigned long data)
 	spin_unlock(&session->lock);
 }
 
-static int iscsi_exec_task_mgmt_fn(struct iscsi_conn *conn,
+static int iscsi_exec_ctask_mgmt_fn(struct iscsi_conn *conn,
 				   struct iscsi_tm *hdr, int age,
 				   int timeout)
 {
 	struct iscsi_session *session = conn->session;
-	struct iscsi_mgmt_task *mtask;
+	struct iscsi_cmd_task *ctask;
 
-	mtask = __iscsi_conn_send_pdu(conn, (struct iscsi_hdr *)hdr,
+	ctask = __iscsi_conn_send_pdu(conn, (struct iscsi_hdr *)hdr,
 				      NULL, 0);
-	if (!mtask) {
+	if (!ctask) {
 		spin_unlock_bh(&session->lock);
 		iscsi_conn_failure(conn, ISCSI_ERR_CONN_FAILED);
 		spin_lock_bh(&session->lock);
@@ -1390,7 +1379,7 @@ static int iscsi_exec_task_mgmt_fn(struct iscsi_conn *conn,
 
 	mutex_lock(&session->eh_mutex);
 	spin_lock_bh(&session->lock);
-	/* if the session drops it will clean up the mtask */
+	/* if the session drops it will clean up the ctask */
 	if (age != session->age ||
 	    session->state != ISCSI_STATE_LOGGED_IN)
 		return -ENOTCONN;
@@ -1497,7 +1486,7 @@ static enum scsi_eh_timer_return iscsi_eh_cmd_timed_out(struct scsi_cmnd *scmd)
 			   jiffies))
 		rc = EH_RESET_TIMER;
 	/* if in the middle of checking the transport then give us more time */
-	if (conn->ping_mtask)
+	if (conn->ping_ctask)
 		rc = EH_RESET_TIMER;
 done:
 	spin_unlock(&session->lock);
@@ -1521,7 +1510,7 @@ static void iscsi_check_transport_timeouts(unsigned long data)
 
 	recv_timeout *= HZ;
 	last_recv = conn->last_recv;
-	if (conn->ping_mtask &&
+	if (conn->ping_ctask &&
 	    time_before_eq(conn->last_ping + (conn->ping_timeout * HZ),
 			   jiffies)) {
 		iscsi_conn_printk(KERN_ERR, conn, "ping timeout of %d secs "
@@ -1547,7 +1536,7 @@ done:
 	spin_unlock(&session->lock);
 }
 
-static void iscsi_prep_abort_task_pdu(struct iscsi_cmd_task *ctask,
+static void iscsi_prep_abort_ctask_pdu(struct iscsi_cmd_task *ctask,
 				      struct iscsi_tm *hdr)
 {
 	memset(hdr, 0, sizeof(*hdr));
@@ -1619,9 +1608,9 @@ int iscsi_eh_abort(struct scsi_cmnd *sc)
 	conn->tmf_state = TMF_QUEUED;
 
 	hdr = &conn->tmhdr;
-	iscsi_prep_abort_task_pdu(ctask, hdr);
+	iscsi_prep_abort_ctask_pdu(ctask, hdr);
 
-	if (iscsi_exec_task_mgmt_fn(conn, hdr, age, session->abort_timeout)) {
+	if (iscsi_exec_ctask_mgmt_fn(conn, hdr, age, session->abort_timeout)) {
 		rc = FAILED;
 		goto failed;
 	}
@@ -1631,7 +1620,7 @@ int iscsi_eh_abort(struct scsi_cmnd *sc)
 		spin_unlock_bh(&session->lock);
 		iscsi_suspend_tx(conn);
 		/*
-		 * clean up task if aborted. grab the recv lock as a writer
+		 * clean up ctask if aborted. grab the recv lock as a writer
 		 */
 		write_lock_bh(conn->recv_lock);
 		spin_lock(&session->lock);
@@ -1716,7 +1705,7 @@ int iscsi_eh_device_reset(struct scsi_cmnd *sc)
 	hdr = &conn->tmhdr;
 	iscsi_prep_lun_reset_pdu(sc, hdr);
 
-	if (iscsi_exec_task_mgmt_fn(conn, hdr, session->age,
+	if (iscsi_exec_ctask_mgmt_fn(conn, hdr, session->age,
 				    session->lu_reset_timeout)) {
 		rc = FAILED;
 		goto unlock;
@@ -1897,8 +1886,7 @@ EXPORT_SYMBOL_GPL(iscsi_host_free);
  * @iscsit: iscsi transport template
  * @shost: scsi host
  * @cmds_max: session can queue
- * @cmd_task_size: LLD ctask private data size
- * @mgmt_task_size: LLD mtask private data size
+ * @cmd_ctask_size: LLD ctask private data size
  * @initial_cmdsn: initial CmdSN
  *
  * This can be used by software iscsi_transports that allocate
@@ -1906,22 +1894,26 @@ EXPORT_SYMBOL_GPL(iscsi_host_free);
  */
 struct iscsi_cls_session *
 iscsi_session_setup(struct iscsi_transport *iscsit, struct Scsi_Host *shost,
-		    uint16_t cmds_max, int cmd_task_size, int mgmt_task_size,
+		    uint16_t scsi_cmds_max, int cmd_ctask_size,
 		    uint32_t initial_cmdsn)
 {
 	struct iscsi_session *session;
 	struct iscsi_cls_session *cls_session;
-	int cmd_i;
+	int cmd_i, cmds_max;
 
-	if (!is_power_of_2(cmds_max) || cmds_max >= ISCSI_MGMT_ITT_OFFSET ||
-	    cmds_max < 2) {
-		if (cmds_max != 0)
-			printk(KERN_ERR "iscsi: invalid can_queue of %d. "
-			       "can_queue must be a power of 2 and between "
-			       "2 and %d - setting to %d.\n", cmds_max,
-			       ISCSI_MGMT_ITT_OFFSET, ISCSI_DEF_XMIT_CMDS_MAX);
-		cmds_max = ISCSI_DEF_XMIT_CMDS_MAX;
+	/*
+	 * The iscsi layer needs some ctasks for nop handling and tmfs.
+	 */
+	if (scsi_cmds_max < 1)
+		scsi_cmds_max = ISCSI_MGMT_CMDS_MAX;
+	if ((scsi_cmds_max + ISCSI_MGMT_CMDS_MAX) >= ISCSI_MGMT_ITT_OFFSET) {
+		printk(KERN_ERR "iscsi: invalid can_queue of %d. "
+		       "can_queue must be less than %d.\n",
+		       scsi_cmds_max,
+		       ISCSI_MGMT_ITT_OFFSET - ISCSI_MGMT_CMDS_MAX);
+		scsi_cmds_max = ISCSI_DEF_XMIT_CMDS_MAX;
 	}
+	cmds_max = roundup_pow_of_two(scsi_cmds_max + ISCSI_MGMT_CMDS_MAX);
 
 	cls_session = iscsi_alloc_session(shost, iscsit,
 					  sizeof(struct iscsi_session));
@@ -1934,7 +1926,7 @@ iscsi_session_setup(struct iscsi_transport *iscsit, struct Scsi_Host *shost,
 	session->fast_abort = 1;
 	session->lu_reset_timeout = 15;
 	session->abort_timeout = 10;
-	session->mgmtpool_max = ISCSI_MGMT_CMDS_MAX;
+	session->scsi_cmds_max = scsi_cmds_max;
 	session->cmds_max = cmds_max;
 	session->queued_cmdsn = session->cmdsn = initial_cmdsn;
 	session->exp_cmdsn = initial_cmdsn + 1;
@@ -1947,36 +1939,19 @@ iscsi_session_setup(struct iscsi_transport *iscsit, struct Scsi_Host *shost,
 	/* initialize SCSI PDU commands pool */
 	if (iscsi_pool_init(&session->cmdpool, session->cmds_max,
 			    (void***)&session->cmds,
-			    cmd_task_size + sizeof(struct iscsi_cmd_task)))
+			    cmd_ctask_size + sizeof(struct iscsi_cmd_task)))
 		goto cmdpool_alloc_fail;
 
 	/* pre-format cmds pool with ITT */
 	for (cmd_i = 0; cmd_i < session->cmds_max; cmd_i++) {
 		struct iscsi_cmd_task *ctask = session->cmds[cmd_i];
 
-		if (cmd_task_size)
+		if (cmd_ctask_size)
 			ctask->dd_data = &ctask[1];
 		ctask->itt = cmd_i;
 		INIT_LIST_HEAD(&ctask->running);
 	}
 
-	/* initialize immediate command pool */
-	if (iscsi_pool_init(&session->mgmtpool, session->mgmtpool_max,
-			   (void***)&session->mgmt_cmds,
-			   mgmt_task_size + sizeof(struct iscsi_mgmt_task)))
-		goto mgmtpool_alloc_fail;
-
-
-	/* pre-format immediate cmds pool with ITT */
-	for (cmd_i = 0; cmd_i < session->mgmtpool_max; cmd_i++) {
-		struct iscsi_mgmt_task *mtask = session->mgmt_cmds[cmd_i];
-
-		if (mgmt_task_size)
-			mtask->dd_data = &mtask[1];
-		mtask->itt = ISCSI_MGMT_ITT_OFFSET + cmd_i;
-		INIT_LIST_HEAD(&mtask->running);
-	}
-
 	if (!try_module_get(iscsit->owner))
 		goto module_get_fail;
 
@@ -1987,8 +1962,6 @@ iscsi_session_setup(struct iscsi_transport *iscsit, struct Scsi_Host *shost,
 cls_session_fail:
 	module_put(iscsit->owner);
 module_get_fail:
-	iscsi_pool_free(&session->mgmtpool);
-mgmtpool_alloc_fail:
 	iscsi_pool_free(&session->cmdpool);
 cmdpool_alloc_fail:
 	iscsi_free_session(cls_session);
@@ -2008,7 +1981,6 @@ void iscsi_session_teardown(struct iscsi_cls_session *cls_session)
 	struct iscsi_session *session = cls_session->dd_data;
 	struct module *owner = cls_session->transport->owner;
 
-	iscsi_pool_free(&session->mgmtpool);
 	iscsi_pool_free(&session->cmdpool);
 
 	kfree(session->password);
@@ -2063,30 +2035,30 @@ iscsi_conn_setup(struct iscsi_cls_session *cls_session, int dd_size,
 	INIT_LIST_HEAD(&conn->requeue);
 	INIT_WORK(&conn->xmitwork, iscsi_xmitworker);
 
-	/* allocate login_mtask used for the login/text sequences */
+	/* allocate login_ctask used for the login/text sequences */
 	spin_lock_bh(&session->lock);
-	if (!__kfifo_get(session->mgmtpool.queue,
-                         (void*)&conn->login_mtask,
+	if (!__kfifo_get(session->cmdpool.queue,
+                         (void*)&conn->login_ctask,
 			 sizeof(void*))) {
 		spin_unlock_bh(&session->lock);
-		goto login_mtask_alloc_fail;
+		goto login_ctask_alloc_fail;
 	}
 	spin_unlock_bh(&session->lock);
 
 	data = kmalloc(ISCSI_DEF_MAX_RECV_SEG_LEN, GFP_KERNEL);
 	if (!data)
-		goto login_mtask_data_alloc_fail;
-	conn->login_mtask->data = conn->data = data;
+		goto login_ctask_data_alloc_fail;
+	conn->login_ctask->data = conn->data = data;
 
 	init_timer(&conn->tmf_timer);
 	init_waitqueue_head(&conn->ehwait);
 
 	return cls_conn;
 
-login_mtask_data_alloc_fail:
-	__kfifo_put(session->mgmtpool.queue, (void*)&conn->login_mtask,
+login_ctask_data_alloc_fail:
+	__kfifo_put(session->cmdpool.queue, (void*)&conn->login_ctask,
 		    sizeof(void*));
-login_mtask_alloc_fail:
+login_ctask_alloc_fail:
 	iscsi_destroy_conn(cls_conn);
 	return NULL;
 }
@@ -2146,7 +2118,7 @@ void iscsi_conn_teardown(struct iscsi_cls_conn *cls_conn)
 	spin_lock_bh(&session->lock);
 	kfree(conn->data);
 	kfree(conn->persistent_address);
-	__kfifo_put(session->mgmtpool.queue, (void*)&conn->login_mtask,
+	__kfifo_put(session->cmdpool.queue, (void*)&conn->login_ctask,
 		    sizeof(void*));
 	if (session->leadconn == conn)
 		session->leadconn = NULL;
@@ -2227,21 +2199,23 @@ EXPORT_SYMBOL_GPL(iscsi_conn_start);
 static void
 flush_control_queues(struct iscsi_session *session, struct iscsi_conn *conn)
 {
-	struct iscsi_mgmt_task *mtask, *tmp;
+	struct iscsi_cmd_task *ctask, *tmp;
 
 	/* handle pending */
-	list_for_each_entry_safe(mtask, tmp, &conn->mgmtqueue, running) {
-		debug_scsi("flushing pending mgmt task itt 0x%x\n", mtask->itt);
-		iscsi_free_mgmt_task(conn, mtask);
+	list_for_each_entry_safe(ctask, tmp, &conn->mgmtqueue, running) {
+		debug_scsi("flushing pending mgmt ctask itt 0x%x\n", ctask->itt);
+		/* release ref from prep ctask */
+		__iscsi_put_ctask(ctask);
 	}
 
 	/* handle running */
-	list_for_each_entry_safe(mtask, tmp, &conn->mgmt_run_list, running) {
-		debug_scsi("flushing running mgmt task itt 0x%x\n", mtask->itt);
-		iscsi_free_mgmt_task(conn, mtask);
+	list_for_each_entry_safe(ctask, tmp, &conn->mgmt_run_list, running) {
+		debug_scsi("flushing running mgmt ctask itt 0x%x\n", ctask->itt);
+		/* release ref from prep ctask */
+		__iscsi_put_ctask(ctask);
 	}
 
-	conn->mtask = NULL;
+	conn->ctask = NULL;
 }
 
 static void iscsi_start_session_recovery(struct iscsi_session *session,
@@ -2272,7 +2246,7 @@ static void iscsi_start_session_recovery(struct iscsi_session *session,
 
 	/*
 	 * When this is called for the in_login state, we only want to clean
-	 * up the login task and connection. We do not need to block and set
+	 * up the login ctask and connection. We do not need to block and set
 	 * the recovery state again
 	 */
 	if (flag == STOP_CONN_TERM)
diff --git a/include/scsi/libiscsi.h b/include/scsi/libiscsi.h
index 9be6a70faff5..d1c36759b350 100644
--- a/include/scsi/libiscsi.h
+++ b/include/scsi/libiscsi.h
@@ -85,18 +85,6 @@ enum {
 	ISCSI_DIGEST_SIZE = sizeof(__u32),
 };
 
-struct iscsi_mgmt_task {
-	/*
-	 * Becuae LLDs allocate their hdr differently, this is a pointer to
-	 * that storage. It must be setup at session creation time.
-	 */
-	struct iscsi_hdr	*hdr;
-	char			*data;		/* mgmt payload */
-	unsigned		data_count;	/* counts data to be sent */
-	uint32_t		itt;		/* this ITT */
-	void			*dd_data;	/* driver/transport data */
-	struct list_head	running;
-};
 
 enum {
 	ISCSI_TASK_COMPLETED,
@@ -121,6 +109,7 @@ struct iscsi_cmd_task {
 	/* offset in unsolicited stream (bytes); */
 	unsigned		unsol_offset;
 	unsigned		data_count;	/* remaining Data-Out */
+	char			*data;		/* mgmt payload */
 	struct scsi_cmnd	*sc;		/* associated SCSI cmd*/
 	struct iscsi_conn	*conn;		/* used connection    */
 
@@ -162,7 +151,7 @@ struct iscsi_conn {
 	unsigned long		last_ping;
 	int			ping_timeout;
 	int			recv_timeout;
-	struct iscsi_mgmt_task	*ping_mtask;
+	struct iscsi_cmd_task 	*ping_ctask;
 
 	/* iSCSI connection-wide sequencing */
 	uint32_t		exp_statsn;
@@ -178,9 +167,8 @@ struct iscsi_conn {
 	 * should always fit in this buffer
 	 */
 	char			*data;
-	struct iscsi_mgmt_task	*login_mtask;	/* mtask used for login/text */
-	struct iscsi_mgmt_task	*mtask;		/* xmit mtask in progress */
-	struct iscsi_cmd_task	*ctask;		/* xmit ctask in progress */
+	struct iscsi_cmd_task 	*login_ctask;	/* mtask used for login/text */
+	struct iscsi_cmd_task	*ctask;		/* xmit task in progress */
 
 	/* xmit */
 	struct list_head	mgmtqueue;	/* mgmt (control) xmit queue */
@@ -295,12 +283,10 @@ struct iscsi_session {
 	int			state;		/* session state           */
 	int			age;		/* counts session re-opens */
 
+	int			scsi_cmds_max; 	/* max scsi commands */
 	int			cmds_max;	/* size of cmds array */
 	struct iscsi_cmd_task	**cmds;		/* Original Cmds arr */
 	struct iscsi_pool	cmdpool;	/* PDU's pool */
-	int			mgmtpool_max;	/* size of mgmt array */
-	struct iscsi_mgmt_task	**mgmt_cmds;	/* Original mgmt arr */
-	struct iscsi_pool	mgmtpool;	/* Mgmt PDU's pool */
 };
 
 struct iscsi_host {
@@ -345,7 +331,7 @@ extern void iscsi_host_free(struct Scsi_Host *shost);
  */
 extern struct iscsi_cls_session *
 iscsi_session_setup(struct iscsi_transport *, struct Scsi_Host *shost,
-		    uint16_t, int, int, uint32_t);
+		    uint16_t, int, uint32_t);
 extern void iscsi_session_teardown(struct iscsi_cls_session *);
 extern void iscsi_session_recovery_timedout(struct iscsi_cls_session *);
 extern int iscsi_set_param(struct iscsi_cls_conn *cls_conn,
@@ -388,8 +374,7 @@ extern int iscsi_complete_pdu(struct iscsi_conn *, struct iscsi_hdr *,
 extern int iscsi_verify_itt(struct iscsi_conn *, itt_t);
 extern struct iscsi_cmd_task *iscsi_itt_to_ctask(struct iscsi_conn *, itt_t);
 extern void iscsi_requeue_ctask(struct iscsi_cmd_task *ctask);
-extern void iscsi_free_mgmt_task(struct iscsi_conn *conn,
-				 struct iscsi_mgmt_task *mtask);
+extern void iscsi_put_ctask(struct iscsi_cmd_task *ctask);
 
 /*
  * generic helpers
diff --git a/include/scsi/scsi_transport_iscsi.h b/include/scsi/scsi_transport_iscsi.h
index 4028f121d548..3f24503dfdf9 100644
--- a/include/scsi/scsi_transport_iscsi.h
+++ b/include/scsi/scsi_transport_iscsi.h
@@ -34,7 +34,6 @@ struct Scsi_Host;
 struct iscsi_cls_conn;
 struct iscsi_conn;
 struct iscsi_cmd_task;
-struct iscsi_mgmt_task;
 struct sockaddr;
 
 /**
@@ -58,19 +57,22 @@ struct sockaddr;
  * @stop_conn:		suspend/recover/terminate connection
  * @send_pdu:		send iSCSI PDU, Login, Logout, NOP-Out, Reject, Text.
  * @session_recovery_timedout: notify LLD a block during recovery timed out
- * @init_cmd_task:	Initialize a iscsi_cmd_task and any internal structs.
- *			Called from queuecommand with session lock held.
- * @init_mgmt_task:	Initialize a iscsi_mgmt_task and any internal structs.
- *			Called from iscsi_conn_send_generic with xmitmutex.
- * @xmit_cmd_task:	Requests LLD to transfer cmd task. Returns 0 or the
+ * @init_task:		Initialize a iscsi_task and any internal structs.
+ *			When offloading the data path, this is called from
+ *			queuecommand with the session lock, or from the
+ *			iscsi_conn_send_pdu context with the session lock.
+ *			When not offloading the data path, this is called
+ *			from the scsi work queue without the session lock.
+ * @xmit_task		Requests LLD to transfer cmd task. Returns 0 or the
  *			the number of bytes transferred on success, and -Exyz
- *			value on error.
- * @xmit_mgmt_task:	Requests LLD to transfer mgmt task. Returns 0 or the
- *			the number of bytes transferred on success, and -Exyz
- *			value on error.
- * @cleanup_cmd_task:	requests LLD to fail cmd task. Called with xmitmutex
- *			and session->lock after the connection has been
- *			suspended and terminated during recovery. If called
+ *			value on error. When offloading the data path, this
+ *			is called from queuecommand with the session lock, or
+ *			from the iscsi_conn_send_pdu context with the session
+ *			lock. When not offloading the data path, this is called
+ *			from the scsi work queue without the session lock.
+ * @cleanup_task:	requests LLD to fail task. Called with session lock
+ *			and after the connection has been suspended and
+ *			terminated during recovery. If called
  *			from abort task then connection is not suspended
  *			or terminated but sk_callback_lock is held
  *
@@ -110,15 +112,10 @@ struct iscsi_transport {
 			 char *data, uint32_t data_size);
 	void (*get_stats) (struct iscsi_cls_conn *conn,
 			   struct iscsi_stats *stats);
-	int (*init_cmd_task) (struct iscsi_cmd_task *ctask);
-	void (*init_mgmt_task) (struct iscsi_conn *conn,
-				struct iscsi_mgmt_task *mtask);
-	int (*xmit_cmd_task) (struct iscsi_conn *conn,
-			      struct iscsi_cmd_task *ctask);
-	void (*cleanup_cmd_task) (struct iscsi_conn *conn,
-				  struct iscsi_cmd_task *ctask);
-	int (*xmit_mgmt_task) (struct iscsi_conn *conn,
-			       struct iscsi_mgmt_task *mtask);
+	int (*init_task) (struct iscsi_cmd_task *task);
+	int (*xmit_task) (struct iscsi_cmd_task *task);
+	void (*cleanup_task) (struct iscsi_conn *conn,
+				  struct iscsi_cmd_task *task);
 	void (*session_recovery_timedout) (struct iscsi_cls_session *session);
 	int (*ep_connect) (struct sockaddr *dst_addr, int non_blocking,
 			   uint64_t *ep_handle);
-- 
cgit v1.2.3


From fbc514b4e262bc0596faae8640ebc0b9142a1cdd Mon Sep 17 00:00:00 2001
From: Mike Christie <michaelc@cs.wisc.edu>
Date: Wed, 21 May 2008 15:54:07 -0500
Subject: [SCSI] iscsi_tcp: convert iscsi_tcp to support merged tasks

Convert iscsi_tcp to support merged tasks.

Signed-off-by: Mike Christie <michaelc@cs.wisc.edu>
Signed-off-by: James Bottomley <James.Bottomley@HansenPartnership.com>
---
 drivers/scsi/iscsi_tcp.c | 130 ++++++++++++++++++++---------------------------
 drivers/scsi/iscsi_tcp.h |   5 --
 2 files changed, 54 insertions(+), 81 deletions(-)

(limited to 'drivers/scsi')

diff --git a/drivers/scsi/iscsi_tcp.c b/drivers/scsi/iscsi_tcp.c
index f2a08f7ed902..517bad160bea 100644
--- a/drivers/scsi/iscsi_tcp.c
+++ b/drivers/scsi/iscsi_tcp.c
@@ -498,11 +498,15 @@ iscsi_tcp_data_recv_prep(struct iscsi_tcp_conn *tcp_conn)
  * must be called with session lock
  */
 static void
-iscsi_tcp_cleanup_ctask(struct iscsi_conn *conn, struct iscsi_cmd_task *ctask)
+iscsi_tcp_cleanup_task(struct iscsi_conn *conn, struct iscsi_cmd_task *ctask)
 {
 	struct iscsi_tcp_cmd_task *tcp_ctask = ctask->dd_data;
 	struct iscsi_r2t_info *r2t;
 
+	/* nothing to do for mgmt ctasks */
+	if (!ctask->sc)
+		return;
+
 	/* flush ctask's r2t queues */
 	while (__kfifo_get(tcp_ctask->r2tqueue, (void*)&r2t, sizeof(void*))) {
 		__kfifo_put(tcp_ctask->r2tpool.queue, (void*)&r2t,
@@ -521,7 +525,7 @@ iscsi_tcp_cleanup_ctask(struct iscsi_conn *conn, struct iscsi_cmd_task *ctask)
 /**
  * iscsi_data_rsp - SCSI Data-In Response processing
  * @conn: iscsi connection
- * @ctask: scsi command task
+ * @ctask: scsi command ctask
  **/
 static int
 iscsi_data_rsp(struct iscsi_conn *conn, struct iscsi_cmd_task *ctask)
@@ -578,7 +582,7 @@ iscsi_data_rsp(struct iscsi_conn *conn, struct iscsi_cmd_task *ctask)
 /**
  * iscsi_solicit_data_init - initialize first Data-Out
  * @conn: iscsi connection
- * @ctask: scsi command task
+ * @ctask: scsi command ctask
  * @r2t: R2T info
  *
  * Notes:
@@ -620,7 +624,7 @@ iscsi_solicit_data_init(struct iscsi_conn *conn, struct iscsi_cmd_task *ctask,
 /**
  * iscsi_r2t_rsp - iSCSI R2T Response processing
  * @conn: iscsi connection
- * @ctask: scsi command task
+ * @ctask: scsi command ctask
  **/
 static int
 iscsi_r2t_rsp(struct iscsi_conn *conn, struct iscsi_cmd_task *ctask)
@@ -646,7 +650,7 @@ iscsi_r2t_rsp(struct iscsi_conn *conn, struct iscsi_cmd_task *ctask)
 		return ISCSI_ERR_R2TSN;
 	}
 
-	/* fill-in new R2T associated with the task */
+	/* fill-in new R2T associated with the ctask */
 	iscsi_update_cmdsn(session, (struct iscsi_nopin*)rhdr);
 
 	if (!ctask->sc || session->state != ISCSI_STATE_LOGGED_IN) {
@@ -769,6 +773,8 @@ iscsi_tcp_hdr_dissect(struct iscsi_conn *conn, struct iscsi_hdr *hdr)
 		ctask = iscsi_itt_to_ctask(conn, hdr->itt);
 		if (!ctask)
 			return ISCSI_ERR_BAD_ITT;
+		if (!ctask->sc)
+			return ISCSI_ERR_NO_SCSI_CMD;
 
 		spin_lock(&conn->session->lock);
 		rc = iscsi_data_rsp(conn, ctask);
@@ -815,6 +821,8 @@ iscsi_tcp_hdr_dissect(struct iscsi_conn *conn, struct iscsi_hdr *hdr)
 		ctask = iscsi_itt_to_ctask(conn, hdr->itt);
 		if (!ctask)
 			return ISCSI_ERR_BAD_ITT;
+		if (!ctask->sc)
+			return ISCSI_ERR_NO_SCSI_CMD;
 
 		if (ahslen)
 			rc = ISCSI_ERR_AHSLEN;
@@ -1194,7 +1202,7 @@ iscsi_tcp_send_hdr_prep(struct iscsi_conn *conn, void *hdr, size_t hdrlen)
 
 	/* If header digest is enabled, compute the CRC and
 	 * place the digest into the same buffer. We make
-	 * sure that both iscsi_tcp_ctask and mtask have
+	 * sure that both iscsi_tcp_cmd_task and mctask have
 	 * sufficient room.
 	 */
 	if (conn->hdrdgst_en) {
@@ -1269,7 +1277,7 @@ iscsi_tcp_send_linear_data_prepare(struct iscsi_conn *conn, void *data,
 /**
  * iscsi_solicit_data_cont - initialize next Data-Out
  * @conn: iscsi connection
- * @ctask: scsi command task
+ * @ctask: scsi command ctask
  * @r2t: R2T info
  * @left: bytes left to transfer
  *
@@ -1316,19 +1324,37 @@ iscsi_solicit_data_cont(struct iscsi_conn *conn, struct iscsi_cmd_task *ctask,
 }
 
 /**
- * iscsi_tcp_ctask - Initialize iSCSI SCSI_READ or SCSI_WRITE commands
+ * iscsi_tcp_task - Initialize iSCSI SCSI_READ or SCSI_WRITE commands
  * @conn: iscsi connection
- * @ctask: scsi command task
+ * @ctask: scsi command ctask
  * @sc: scsi command
  **/
 static int
-iscsi_tcp_ctask_init(struct iscsi_cmd_task *ctask)
+iscsi_tcp_task_init(struct iscsi_cmd_task *ctask)
 {
 	struct iscsi_tcp_cmd_task *tcp_ctask = ctask->dd_data;
 	struct iscsi_conn *conn = ctask->conn;
 	struct scsi_cmnd *sc = ctask->sc;
 	int err;
 
+	if (!sc) {
+		/*
+		 * mgmt ctasks do not have a scatterlist since they come
+		 * in from the iscsi interface.
+		 */
+		debug_scsi("mctask deq [cid %d itt 0x%x]\n", conn->id,
+			   ctask->itt);
+
+		/* Prepare PDU, optionally w/ immediate data */
+		iscsi_tcp_send_hdr_prep(conn, ctask->hdr, sizeof(*ctask->hdr));
+
+		/* If we have immediate data, attach a payload */
+		if (ctask->data_count)
+			iscsi_tcp_send_linear_data_prepare(conn, ctask->data,
+							   ctask->data_count);
+		return 0;
+	}
+
 	BUG_ON(__kfifo_len(tcp_ctask->r2tqueue));
 	tcp_ctask->sent = 0;
 	tcp_ctask->exp_datasn = 0;
@@ -1353,52 +1379,21 @@ iscsi_tcp_ctask_init(struct iscsi_cmd_task *ctask)
 	return 0;
 }
 
-/**
- * iscsi_tcp_mtask_xmit - xmit management(immediate) task
- * @conn: iscsi connection
- * @mtask: task management task
- *
- * Notes:
- *	The function can return -EAGAIN in which case caller must
- *	call it again later, or recover. '0' return code means successful
- *	xmit.
- **/
-static int
-iscsi_tcp_mtask_xmit(struct iscsi_conn *conn, struct iscsi_mgmt_task *mtask)
-{
-	int rc;
-
-	/* Flush any pending data first. */
-	rc = iscsi_tcp_flush(conn);
-	if (rc < 0)
-		return rc;
-
-	if (mtask->hdr->itt == RESERVED_ITT) {
-		struct iscsi_session *session = conn->session;
-
-		spin_lock_bh(&session->lock);
-		iscsi_free_mgmt_task(conn, mtask);
-		spin_unlock_bh(&session->lock);
-	}
-
-	return 0;
-}
-
 /*
- * iscsi_tcp_ctask_xmit - xmit normal PDU task
- * @conn: iscsi connection
- * @ctask: iscsi command task
+ * iscsi_tcp_task_xmit - xmit normal PDU ctask
+ * @ctask: iscsi command ctask
  *
  * We're expected to return 0 when everything was transmitted succesfully,
  * -EAGAIN if there's still data in the queue, or != 0 for any other kind
  * of error.
  */
 static int
-iscsi_tcp_ctask_xmit(struct iscsi_conn *conn, struct iscsi_cmd_task *ctask)
+iscsi_tcp_task_xmit(struct iscsi_cmd_task *ctask)
 {
+	struct iscsi_conn *conn = ctask->conn;
 	struct iscsi_tcp_cmd_task *tcp_ctask = ctask->dd_data;
 	struct scsi_cmnd *sc = ctask->sc;
-	struct scsi_data_buffer *sdb = scsi_out(sc);
+	struct scsi_data_buffer *sdb;
 	int rc = 0;
 
 flush:
@@ -1407,10 +1402,18 @@ flush:
 	if (rc < 0)
 		return rc;
 
+	/* mgmt command */
+	if (!sc) {
+		if (ctask->hdr->itt == RESERVED_ITT)
+			iscsi_put_ctask(ctask);
+		return 0;
+	}
+
 	/* Are we done already? */
 	if (sc->sc_data_direction != DMA_TO_DEVICE)
 		return 0;
 
+	sdb = scsi_out(sc);
 	if (ctask->unsol_count != 0) {
 		struct iscsi_data *hdr = &tcp_ctask->unsol_dtask.hdr;
 
@@ -1688,21 +1691,6 @@ free_socket:
 	return err;
 }
 
-/* called with host lock */
-static void
-iscsi_tcp_mtask_init(struct iscsi_conn *conn, struct iscsi_mgmt_task *mtask)
-{
-	debug_scsi("mtask deq [cid %d itt 0x%x]\n", conn->id, mtask->itt);
-
-	/* Prepare PDU, optionally w/ immediate data */
-	iscsi_tcp_send_hdr_prep(conn, mtask->hdr, sizeof(*mtask->hdr));
-
-	/* If we have immediate data, attach a payload */
-	if (mtask->data_count)
-		iscsi_tcp_send_linear_data_prepare(conn, mtask->data,
-						   mtask->data_count);
-}
-
 static int
 iscsi_r2tpool_alloc(struct iscsi_session *session)
 {
@@ -1710,7 +1698,7 @@ iscsi_r2tpool_alloc(struct iscsi_session *session)
 	int cmd_i;
 
 	/*
-	 * initialize per-task: R2T pool and xmit queue
+	 * initialize per-ctask: R2T pool and xmit queue
 	 */
 	for (cmd_i = 0; cmd_i < session->cmds_max; cmd_i++) {
 	        struct iscsi_cmd_task *ctask = session->cmds[cmd_i];
@@ -1880,13 +1868,12 @@ iscsi_tcp_session_create(struct Scsi_Host *shost, uint16_t cmds_max,
 
 	cls_session = iscsi_session_setup(&iscsi_tcp_transport, shost, cmds_max,
 					  sizeof(struct iscsi_tcp_cmd_task),
-					  sizeof(struct iscsi_tcp_mgmt_task),
 					  initial_cmdsn);
 	if (!cls_session)
 		goto remove_host;
 	session = cls_session->dd_data;
 
-	shost->can_queue = session->cmds_max;
+	shost->can_queue = session->scsi_cmds_max;
 	for (cmd_i = 0; cmd_i < session->cmds_max; cmd_i++) {
 		struct iscsi_cmd_task *ctask = session->cmds[cmd_i];
 		struct iscsi_tcp_cmd_task *tcp_ctask = ctask->dd_data;
@@ -1895,13 +1882,6 @@ iscsi_tcp_session_create(struct Scsi_Host *shost, uint16_t cmds_max,
 		ctask->hdr_max = sizeof(tcp_ctask->hdr) - ISCSI_DIGEST_SIZE;
 	}
 
-	for (cmd_i = 0; cmd_i < session->mgmtpool_max; cmd_i++) {
-		struct iscsi_mgmt_task *mtask = session->mgmt_cmds[cmd_i];
-		struct iscsi_tcp_mgmt_task *tcp_mtask = mtask->dd_data;
-
-		mtask->hdr = (struct iscsi_hdr *) &tcp_mtask->hdr;
-	}
-
 	if (iscsi_r2tpool_alloc(session))
 		goto remove_session;
 	return cls_session;
@@ -1999,11 +1979,9 @@ static struct iscsi_transport iscsi_tcp_transport = {
 	/* IO */
 	.send_pdu		= iscsi_conn_send_pdu,
 	.get_stats		= iscsi_conn_get_stats,
-	.init_cmd_task		= iscsi_tcp_ctask_init,
-	.init_mgmt_task		= iscsi_tcp_mtask_init,
-	.xmit_cmd_task		= iscsi_tcp_ctask_xmit,
-	.xmit_mgmt_task		= iscsi_tcp_mtask_xmit,
-	.cleanup_cmd_task	= iscsi_tcp_cleanup_ctask,
+	.init_task		= iscsi_tcp_task_init,
+	.xmit_task		= iscsi_tcp_task_xmit,
+	.cleanup_task		= iscsi_tcp_cleanup_task,
 	/* recovery */
 	.session_recovery_timedout = iscsi_session_recovery_timedout,
 };
diff --git a/drivers/scsi/iscsi_tcp.h b/drivers/scsi/iscsi_tcp.h
index ed0b991d1e72..c9c8633c41a6 100644
--- a/drivers/scsi/iscsi_tcp.h
+++ b/drivers/scsi/iscsi_tcp.h
@@ -103,11 +103,6 @@ struct iscsi_data_task {
 	char			hdrext[ISCSI_DIGEST_SIZE];/* Header-Digest */
 };
 
-struct iscsi_tcp_mgmt_task {
-	struct iscsi_hdr	hdr;
-	char			hdrext[ISCSI_DIGEST_SIZE]; /* Header-Digest */
-};
-
 struct iscsi_r2t_info {
 	__be32			ttt;		/* copied from R2T */
 	__be32			exp_statsn;	/* copied from R2T */
-- 
cgit v1.2.3


From 9c19a7d0387124a508d2cdb38ebf8cd484631ad0 Mon Sep 17 00:00:00 2001
From: Mike Christie <michaelc@cs.wisc.edu>
Date: Wed, 21 May 2008 15:54:09 -0500
Subject: [SCSI] libiscsi: rename iscsi_cmd_task to iscsi_task

This is the second part of the iscsi task merging, and
all it does it rename iscsi_cmd_task to iscsi_task and
mtask/ctask to just task.

Signed-off-by: Mike Christie <michaelc@cs.wisc.edu>
Signed-off-by: James Bottomley <James.Bottomley@HansenPartnership.com>
---
 drivers/scsi/libiscsi.c             | 514 ++++++++++++++++++------------------
 include/scsi/libiscsi.h             |  22 +-
 include/scsi/scsi_transport_iscsi.h |   8 +-
 3 files changed, 273 insertions(+), 271 deletions(-)

(limited to 'drivers/scsi')

diff --git a/drivers/scsi/libiscsi.c b/drivers/scsi/libiscsi.c
index ef92b1b0f16e..92ee6d94aaf9 100644
--- a/drivers/scsi/libiscsi.c
+++ b/drivers/scsi/libiscsi.c
@@ -88,61 +88,61 @@ iscsi_update_cmdsn(struct iscsi_session *session, struct iscsi_nopin *hdr)
 }
 EXPORT_SYMBOL_GPL(iscsi_update_cmdsn);
 
-void iscsi_prep_unsolicit_data_pdu(struct iscsi_cmd_task *ctask,
+void iscsi_prep_unsolicit_data_pdu(struct iscsi_task *task,
 				   struct iscsi_data *hdr)
 {
-	struct iscsi_conn *conn = ctask->conn;
+	struct iscsi_conn *conn = task->conn;
 
 	memset(hdr, 0, sizeof(struct iscsi_data));
 	hdr->ttt = cpu_to_be32(ISCSI_RESERVED_TAG);
-	hdr->datasn = cpu_to_be32(ctask->unsol_datasn);
-	ctask->unsol_datasn++;
+	hdr->datasn = cpu_to_be32(task->unsol_datasn);
+	task->unsol_datasn++;
 	hdr->opcode = ISCSI_OP_SCSI_DATA_OUT;
-	memcpy(hdr->lun, ctask->hdr->lun, sizeof(hdr->lun));
+	memcpy(hdr->lun, task->hdr->lun, sizeof(hdr->lun));
 
-	hdr->itt = ctask->hdr->itt;
+	hdr->itt = task->hdr->itt;
 	hdr->exp_statsn = cpu_to_be32(conn->exp_statsn);
-	hdr->offset = cpu_to_be32(ctask->unsol_offset);
+	hdr->offset = cpu_to_be32(task->unsol_offset);
 
-	if (ctask->unsol_count > conn->max_xmit_dlength) {
+	if (task->unsol_count > conn->max_xmit_dlength) {
 		hton24(hdr->dlength, conn->max_xmit_dlength);
-		ctask->data_count = conn->max_xmit_dlength;
-		ctask->unsol_offset += ctask->data_count;
+		task->data_count = conn->max_xmit_dlength;
+		task->unsol_offset += task->data_count;
 		hdr->flags = 0;
 	} else {
-		hton24(hdr->dlength, ctask->unsol_count);
-		ctask->data_count = ctask->unsol_count;
+		hton24(hdr->dlength, task->unsol_count);
+		task->data_count = task->unsol_count;
 		hdr->flags = ISCSI_FLAG_CMD_FINAL;
 	}
 }
 EXPORT_SYMBOL_GPL(iscsi_prep_unsolicit_data_pdu);
 
-static int iscsi_add_hdr(struct iscsi_cmd_task *ctask, unsigned len)
+static int iscsi_add_hdr(struct iscsi_task *task, unsigned len)
 {
-	unsigned exp_len = ctask->hdr_len + len;
+	unsigned exp_len = task->hdr_len + len;
 
-	if (exp_len > ctask->hdr_max) {
+	if (exp_len > task->hdr_max) {
 		WARN_ON(1);
 		return -EINVAL;
 	}
 
 	WARN_ON(len & (ISCSI_PAD_LEN - 1)); /* caller must pad the AHS */
-	ctask->hdr_len = exp_len;
+	task->hdr_len = exp_len;
 	return 0;
 }
 
 /*
  * make an extended cdb AHS
  */
-static int iscsi_prep_ecdb_ahs(struct iscsi_cmd_task *ctask)
+static int iscsi_prep_ecdb_ahs(struct iscsi_task *task)
 {
-	struct scsi_cmnd *cmd = ctask->sc;
+	struct scsi_cmnd *cmd = task->sc;
 	unsigned rlen, pad_len;
 	unsigned short ahslength;
 	struct iscsi_ecdb_ahdr *ecdb_ahdr;
 	int rc;
 
-	ecdb_ahdr = iscsi_next_hdr(ctask);
+	ecdb_ahdr = iscsi_next_hdr(task);
 	rlen = cmd->cmd_len - ISCSI_CDB_SIZE;
 
 	BUG_ON(rlen > sizeof(ecdb_ahdr->ecdb));
@@ -150,7 +150,7 @@ static int iscsi_prep_ecdb_ahs(struct iscsi_cmd_task *ctask)
 
 	pad_len = iscsi_padding(rlen);
 
-	rc = iscsi_add_hdr(ctask, sizeof(ecdb_ahdr->ahslength) +
+	rc = iscsi_add_hdr(task, sizeof(ecdb_ahdr->ahslength) +
 	                   sizeof(ecdb_ahdr->ahstype) + ahslength + pad_len);
 	if (rc)
 		return rc;
@@ -165,19 +165,19 @@ static int iscsi_prep_ecdb_ahs(struct iscsi_cmd_task *ctask)
 
 	debug_scsi("iscsi_prep_ecdb_ahs: varlen_cdb_len %d "
 		   "rlen %d pad_len %d ahs_length %d iscsi_headers_size %u\n",
-		   cmd->cmd_len, rlen, pad_len, ahslength, ctask->hdr_len);
+		   cmd->cmd_len, rlen, pad_len, ahslength, task->hdr_len);
 
 	return 0;
 }
 
-static int iscsi_prep_bidi_ahs(struct iscsi_cmd_task *ctask)
+static int iscsi_prep_bidi_ahs(struct iscsi_task *task)
 {
-	struct scsi_cmnd *sc = ctask->sc;
+	struct scsi_cmnd *sc = task->sc;
 	struct iscsi_rlength_ahdr *rlen_ahdr;
 	int rc;
 
-	rlen_ahdr = iscsi_next_hdr(ctask);
-	rc = iscsi_add_hdr(ctask, sizeof(*rlen_ahdr));
+	rlen_ahdr = iscsi_next_hdr(task);
+	rc = iscsi_add_hdr(task, sizeof(*rlen_ahdr));
 	if (rc)
 		return rc;
 
@@ -197,28 +197,28 @@ static int iscsi_prep_bidi_ahs(struct iscsi_cmd_task *ctask)
 
 /**
  * iscsi_prep_scsi_cmd_pdu - prep iscsi scsi cmd pdu
- * @ctask: iscsi task
+ * @task: iscsi task
  *
  * Prep basic iSCSI PDU fields for a scsi cmd pdu. The LLD should set
  * fields like dlength or final based on how much data it sends
  */
-static int iscsi_prep_scsi_cmd_pdu(struct iscsi_cmd_task *ctask)
+static int iscsi_prep_scsi_cmd_pdu(struct iscsi_task *task)
 {
-	struct iscsi_conn *conn = ctask->conn;
+	struct iscsi_conn *conn = task->conn;
 	struct iscsi_session *session = conn->session;
-	struct iscsi_cmd *hdr = ctask->hdr;
-	struct scsi_cmnd *sc = ctask->sc;
+	struct iscsi_cmd *hdr = task->hdr;
+	struct scsi_cmnd *sc = task->sc;
 	unsigned hdrlength, cmd_len;
 	int rc;
 
-	ctask->hdr_len = 0;
-	rc = iscsi_add_hdr(ctask, sizeof(*hdr));
+	task->hdr_len = 0;
+	rc = iscsi_add_hdr(task, sizeof(*hdr));
 	if (rc)
 		return rc;
 	hdr->opcode = ISCSI_OP_SCSI_CMD;
 	hdr->flags = ISCSI_ATTR_SIMPLE;
 	int_to_scsilun(sc->device->lun, (struct scsi_lun *)hdr->lun);
-	hdr->itt = build_itt(ctask->itt, session->age);
+	hdr->itt = build_itt(task->itt, session->age);
 	hdr->cmdsn = cpu_to_be32(session->cmdsn);
 	session->cmdsn++;
 	hdr->exp_statsn = cpu_to_be32(conn->exp_statsn);
@@ -226,17 +226,17 @@ static int iscsi_prep_scsi_cmd_pdu(struct iscsi_cmd_task *ctask)
 	if (cmd_len < ISCSI_CDB_SIZE)
 		memset(&hdr->cdb[cmd_len], 0, ISCSI_CDB_SIZE - cmd_len);
 	else if (cmd_len > ISCSI_CDB_SIZE) {
-		rc = iscsi_prep_ecdb_ahs(ctask);
+		rc = iscsi_prep_ecdb_ahs(task);
 		if (rc)
 			return rc;
 		cmd_len = ISCSI_CDB_SIZE;
 	}
 	memcpy(hdr->cdb, sc->cmnd, cmd_len);
 
-	ctask->imm_count = 0;
+	task->imm_count = 0;
 	if (scsi_bidi_cmnd(sc)) {
 		hdr->flags |= ISCSI_FLAG_CMD_READ;
-		rc = iscsi_prep_bidi_ahs(ctask);
+		rc = iscsi_prep_bidi_ahs(task);
 		if (rc)
 			return rc;
 	}
@@ -258,28 +258,28 @@ static int iscsi_prep_scsi_cmd_pdu(struct iscsi_cmd_task *ctask)
 		 *
 		 *      pad_count       bytes to be sent as zero-padding
 		 */
-		ctask->unsol_count = 0;
-		ctask->unsol_offset = 0;
-		ctask->unsol_datasn = 0;
+		task->unsol_count = 0;
+		task->unsol_offset = 0;
+		task->unsol_datasn = 0;
 
 		if (session->imm_data_en) {
 			if (out_len >= session->first_burst)
-				ctask->imm_count = min(session->first_burst,
+				task->imm_count = min(session->first_burst,
 							conn->max_xmit_dlength);
 			else
-				ctask->imm_count = min(out_len,
+				task->imm_count = min(out_len,
 							conn->max_xmit_dlength);
-			hton24(hdr->dlength, ctask->imm_count);
+			hton24(hdr->dlength, task->imm_count);
 		} else
 			zero_data(hdr->dlength);
 
 		if (!session->initial_r2t_en) {
-			ctask->unsol_count = min(session->first_burst, out_len)
-							     - ctask->imm_count;
-			ctask->unsol_offset = ctask->imm_count;
+			task->unsol_count = min(session->first_burst, out_len)
+							     - task->imm_count;
+			task->unsol_offset = task->imm_count;
 		}
 
-		if (!ctask->unsol_count)
+		if (!task->unsol_count)
 			/* No unsolicit Data-Out's */
 			hdr->flags |= ISCSI_FLAG_CMD_FINAL;
 	} else {
@@ -292,7 +292,7 @@ static int iscsi_prep_scsi_cmd_pdu(struct iscsi_cmd_task *ctask)
 	}
 
 	/* calculate size of additional header segments (AHSs) */
-	hdrlength = ctask->hdr_len - sizeof(*hdr);
+	hdrlength = task->hdr_len - sizeof(*hdr);
 
 	WARN_ON(hdrlength & (ISCSI_PAD_LEN-1));
 	hdrlength /= ISCSI_PAD_LEN;
@@ -301,17 +301,17 @@ static int iscsi_prep_scsi_cmd_pdu(struct iscsi_cmd_task *ctask)
 	hdr->hlength = hdrlength & 0xFF;
 
 	if (conn->session->tt->init_task &&
-	    conn->session->tt->init_task(ctask))
+	    conn->session->tt->init_task(task))
 		return -EIO;
 
-	ctask->state = ISCSI_TASK_RUNNING;
-	list_move_tail(&ctask->running, &conn->run_list);
+	task->state = ISCSI_TASK_RUNNING;
+	list_move_tail(&task->running, &conn->run_list);
 
 	conn->scsicmd_pdus_cnt++;
 	debug_scsi("iscsi prep [%s cid %d sc %p cdb 0x%x itt 0x%x len %d "
 		   "bidi_len %d cmdsn %d win %d]\n", scsi_bidi_cmnd(sc) ?
 		   "bidirectional" : sc->sc_data_direction == DMA_TO_DEVICE ?
-		   "write" : "read", conn->id, sc, sc->cmnd[0], ctask->itt,
+		   "write" : "read", conn->id, sc, sc->cmnd[0], task->itt,
 		   scsi_bufflen(sc),
 		   scsi_bidi_cmnd(sc) ? scsi_in(sc)->length : 0,
 		   session->cmdsn, session->max_cmdsn - session->exp_cmdsn + 1);
@@ -320,37 +320,37 @@ static int iscsi_prep_scsi_cmd_pdu(struct iscsi_cmd_task *ctask)
 
 /**
  * iscsi_complete_command - finish a task
- * @ctask: iscsi cmd task
+ * @task: iscsi cmd task
  *
  * Must be called with session lock.
  * This function returns the scsi command to scsi-ml or cleans
  * up mgmt tasks then returns the task to the pool.
  */
-static void iscsi_complete_command(struct iscsi_cmd_task *ctask)
+static void iscsi_complete_command(struct iscsi_task *task)
 {
-	struct iscsi_conn *conn = ctask->conn;
+	struct iscsi_conn *conn = task->conn;
 	struct iscsi_session *session = conn->session;
-	struct scsi_cmnd *sc = ctask->sc;
+	struct scsi_cmnd *sc = task->sc;
 
-	list_del_init(&ctask->running);
-	ctask->state = ISCSI_TASK_COMPLETED;
-	ctask->sc = NULL;
+	list_del_init(&task->running);
+	task->state = ISCSI_TASK_COMPLETED;
+	task->sc = NULL;
 
-	if (conn->ctask == ctask)
-		conn->ctask = NULL;
+	if (conn->task == task)
+		conn->task = NULL;
 	/*
-	 * login ctask is preallocated so do not free
+	 * login task is preallocated so do not free
 	 */
-	if (conn->login_ctask == ctask)
+	if (conn->login_task == task)
 		return;
 
-	__kfifo_put(session->cmdpool.queue, (void*)&ctask, sizeof(void*));
+	__kfifo_put(session->cmdpool.queue, (void*)&task, sizeof(void*));
 
-	if (conn->ping_ctask == ctask)
-		conn->ping_ctask = NULL;
+	if (conn->ping_task == task)
+		conn->ping_task = NULL;
 
 	if (sc) {
-		ctask->sc = NULL;
+		task->sc = NULL;
 		/* SCSI eh reuses commands to verify us */
 		sc->SCp.ptr = NULL;
 		/*
@@ -362,47 +362,47 @@ static void iscsi_complete_command(struct iscsi_cmd_task *ctask)
 	}
 }
 
-static void __iscsi_get_ctask(struct iscsi_cmd_task *ctask)
+static void __iscsi_get_task(struct iscsi_task *task)
 {
-	atomic_inc(&ctask->refcount);
+	atomic_inc(&task->refcount);
 }
 
-static void __iscsi_put_ctask(struct iscsi_cmd_task *ctask)
+static void __iscsi_put_task(struct iscsi_task *task)
 {
-	if (atomic_dec_and_test(&ctask->refcount))
-		iscsi_complete_command(ctask);
+	if (atomic_dec_and_test(&task->refcount))
+		iscsi_complete_command(task);
 }
 
-void iscsi_put_ctask(struct iscsi_cmd_task *ctask)
+void iscsi_put_task(struct iscsi_task *task)
 {
-	struct iscsi_session *session = ctask->conn->session;
+	struct iscsi_session *session = task->conn->session;
 
 	spin_lock_bh(&session->lock);
-	__iscsi_put_ctask(ctask);
+	__iscsi_put_task(task);
 	spin_unlock_bh(&session->lock);
 }
-EXPORT_SYMBOL_GPL(iscsi_put_ctask);
+EXPORT_SYMBOL_GPL(iscsi_put_task);
 
 /*
  * session lock must be held
  */
-static void fail_command(struct iscsi_conn *conn, struct iscsi_cmd_task *ctask,
+static void fail_command(struct iscsi_conn *conn, struct iscsi_task *task,
 			 int err)
 {
 	struct scsi_cmnd *sc;
 
-	sc = ctask->sc;
+	sc = task->sc;
 	if (!sc)
 		return;
 
-	if (ctask->state == ISCSI_TASK_PENDING)
+	if (task->state == ISCSI_TASK_PENDING)
 		/*
 		 * cmd never made it to the xmit thread, so we should not count
 		 * the cmd in the sequencing
 		 */
 		conn->session->queued_cmdsn--;
 	else
-		conn->session->tt->cleanup_task(conn, ctask);
+		conn->session->tt->cleanup_task(conn, task);
 
 	sc->result = err;
 
@@ -413,17 +413,17 @@ static void fail_command(struct iscsi_conn *conn, struct iscsi_cmd_task *ctask,
 		scsi_in(sc)->resid = scsi_in(sc)->length;
 	}
 
-	if (conn->ctask == ctask)
-		conn->ctask = NULL;
+	if (conn->task == task)
+		conn->task = NULL;
 	/* release ref from queuecommand */
-	__iscsi_put_ctask(ctask);
+	__iscsi_put_task(task);
 }
 
 static int iscsi_prep_mgmt_task(struct iscsi_conn *conn,
-				struct iscsi_cmd_task *ctask)
+				struct iscsi_task *task)
 {
 	struct iscsi_session *session = conn->session;
-	struct iscsi_hdr *hdr = (struct iscsi_hdr *)ctask->hdr;
+	struct iscsi_hdr *hdr = (struct iscsi_hdr *)task->hdr;
 	struct iscsi_nopout *nop = (struct iscsi_nopout *)hdr;
 
 	if (conn->session->state == ISCSI_STATE_LOGGING_OUT)
@@ -437,7 +437,7 @@ static int iscsi_prep_mgmt_task(struct iscsi_conn *conn,
 	 */
 	nop->cmdsn = cpu_to_be32(session->cmdsn);
 	if (hdr->itt != RESERVED_ITT) {
-		hdr->itt = build_itt(ctask->itt, session->age);
+		hdr->itt = build_itt(task->itt, session->age);
 		/*
 		 * TODO: We always use immediate, so we never hit this.
 		 * If we start to send tmfs or nops as non-immediate then
@@ -451,24 +451,24 @@ static int iscsi_prep_mgmt_task(struct iscsi_conn *conn,
 	}
 
 	if (session->tt->init_task)
-		session->tt->init_task(ctask);
+		session->tt->init_task(task);
 
 	if ((hdr->opcode & ISCSI_OPCODE_MASK) == ISCSI_OP_LOGOUT)
 		session->state = ISCSI_STATE_LOGGING_OUT;
 
-	list_move_tail(&ctask->running, &conn->mgmt_run_list);
+	list_move_tail(&task->running, &conn->mgmt_run_list);
 	debug_scsi("mgmtpdu [op 0x%x hdr->itt 0x%x datalen %d]\n",
 		   hdr->opcode & ISCSI_OPCODE_MASK, hdr->itt,
-		   ctask->data_count);
+		   task->data_count);
 	return 0;
 }
 
-static struct iscsi_cmd_task *
+static struct iscsi_task *
 __iscsi_conn_send_pdu(struct iscsi_conn *conn, struct iscsi_hdr *hdr,
 		      char *data, uint32_t data_size)
 {
 	struct iscsi_session *session = conn->session;
-	struct iscsi_cmd_task *ctask;
+	struct iscsi_task *task;
 
 	if (session->state == ISCSI_STATE_TERMINATE)
 		return NULL;
@@ -481,18 +481,18 @@ __iscsi_conn_send_pdu(struct iscsi_conn *conn, struct iscsi_hdr *hdr,
 		 * Same task can be used. Same ITT must be used.
 		 * Note that login_task is preallocated at conn_create().
 		 */
-		ctask = conn->login_ctask;
+		task = conn->login_task;
 	else {
 		BUG_ON(conn->c_stage == ISCSI_CONN_INITIAL_STAGE);
 		BUG_ON(conn->c_stage == ISCSI_CONN_STOPPED);
 
 		if (!__kfifo_get(session->cmdpool.queue,
-				 (void*)&ctask, sizeof(void*)))
+				 (void*)&task, sizeof(void*)))
 			return NULL;
 
 		if ((hdr->opcode == (ISCSI_OP_NOOP_OUT | ISCSI_OP_IMMEDIATE)) &&
 		     hdr->ttt == RESERVED_ITT) {
-			conn->ping_ctask = ctask;
+			conn->ping_task = task;
 			conn->last_ping = jiffies;
 		}
 	}
@@ -501,33 +501,33 @@ __iscsi_conn_send_pdu(struct iscsi_conn *conn, struct iscsi_hdr *hdr,
 	 * released by the lld when it has transmitted the task for
 	 * pdus we do not expect a response for.
 	 */
-	atomic_set(&ctask->refcount, 1);
-	ctask->conn = conn;
-	ctask->sc = NULL;
+	atomic_set(&task->refcount, 1);
+	task->conn = conn;
+	task->sc = NULL;
 
 	if (data_size) {
-		memcpy(ctask->data, data, data_size);
-		ctask->data_count = data_size;
+		memcpy(task->data, data, data_size);
+		task->data_count = data_size;
 	} else
-		ctask->data_count = 0;
+		task->data_count = 0;
 
-	memcpy(ctask->hdr, hdr, sizeof(struct iscsi_hdr));
-	INIT_LIST_HEAD(&ctask->running);
-	list_add_tail(&ctask->running, &conn->mgmtqueue);
+	memcpy(task->hdr, hdr, sizeof(struct iscsi_hdr));
+	INIT_LIST_HEAD(&task->running);
+	list_add_tail(&task->running, &conn->mgmtqueue);
 
 	if (session->tt->caps & CAP_DATA_PATH_OFFLOAD) {
-		if (iscsi_prep_mgmt_task(conn, ctask)) {
-			__iscsi_put_ctask(ctask);
+		if (iscsi_prep_mgmt_task(conn, task)) {
+			__iscsi_put_task(task);
 			return NULL;
 		}
 
-		if (session->tt->xmit_task(ctask))
-			ctask = NULL;
+		if (session->tt->xmit_task(task))
+			task = NULL;
 
 	} else
 		scsi_queue_work(conn->session->host, &conn->xmitwork);
 
-	return ctask;
+	return task;
 }
 
 int iscsi_conn_send_pdu(struct iscsi_cls_conn *cls_conn, struct iscsi_hdr *hdr,
@@ -549,20 +549,20 @@ EXPORT_SYMBOL_GPL(iscsi_conn_send_pdu);
  * iscsi_cmd_rsp - SCSI Command Response processing
  * @conn: iscsi connection
  * @hdr: iscsi header
- * @ctask: scsi command task
+ * @task: scsi command task
  * @data: cmd data buffer
  * @datalen: len of buffer
  *
  * iscsi_cmd_rsp sets up the scsi_cmnd fields based on the PDU and
- * then completes the command and ctask.
+ * then completes the command and task.
  **/
 static void iscsi_scsi_cmd_rsp(struct iscsi_conn *conn, struct iscsi_hdr *hdr,
-			       struct iscsi_cmd_task *ctask, char *data,
+			       struct iscsi_task *task, char *data,
 			       int datalen)
 {
 	struct iscsi_cmd_rsp *rhdr = (struct iscsi_cmd_rsp *)hdr;
 	struct iscsi_session *session = conn->session;
-	struct scsi_cmnd *sc = ctask->sc;
+	struct scsi_cmnd *sc = task->sc;
 
 	iscsi_update_cmdsn(session, (struct iscsi_nopin*)rhdr);
 	conn->exp_statsn = be32_to_cpu(rhdr->statsn) + 1;
@@ -622,10 +622,10 @@ invalid_datalen:
 	}
 out:
 	debug_scsi("done [sc %lx res %d itt 0x%x]\n",
-		   (long)sc, sc->result, ctask->itt);
+		   (long)sc, sc->result, task->itt);
 	conn->scsirsp_pdus_cnt++;
 
-	__iscsi_put_ctask(ctask);
+	__iscsi_put_task(task);
 }
 
 static void iscsi_tmf_rsp(struct iscsi_conn *conn, struct iscsi_hdr *hdr)
@@ -650,9 +650,9 @@ static void iscsi_tmf_rsp(struct iscsi_conn *conn, struct iscsi_hdr *hdr)
 static void iscsi_send_nopout(struct iscsi_conn *conn, struct iscsi_nopin *rhdr)
 {
         struct iscsi_nopout hdr;
-	struct iscsi_cmd_task *ctask;
+	struct iscsi_task *task;
 
-	if (!rhdr && conn->ping_ctask)
+	if (!rhdr && conn->ping_task)
 		return;
 
 	memset(&hdr, 0, sizeof(struct iscsi_nopout));
@@ -666,8 +666,8 @@ static void iscsi_send_nopout(struct iscsi_conn *conn, struct iscsi_nopin *rhdr)
 	} else
 		hdr.ttt = RESERVED_ITT;
 
-	ctask = __iscsi_conn_send_pdu(conn, (struct iscsi_hdr *)&hdr, NULL, 0);
-	if (!ctask)
+	task = __iscsi_conn_send_pdu(conn, (struct iscsi_hdr *)&hdr, NULL, 0);
+	if (!task)
 		iscsi_conn_printk(KERN_ERR, conn, "Could not send nopout\n");
 }
 
@@ -712,7 +712,7 @@ static int __iscsi_complete_pdu(struct iscsi_conn *conn, struct iscsi_hdr *hdr,
 {
 	struct iscsi_session *session = conn->session;
 	int opcode = hdr->opcode & ISCSI_OPCODE_MASK, rc = 0;
-	struct iscsi_cmd_task *ctask;
+	struct iscsi_task *task;
 	uint32_t itt;
 
 	conn->last_recv = jiffies;
@@ -758,27 +758,27 @@ static int __iscsi_complete_pdu(struct iscsi_conn *conn, struct iscsi_hdr *hdr,
 		goto out;
 	}
 
-	ctask = session->cmds[itt];
+	task = session->cmds[itt];
 	switch(opcode) {
 	case ISCSI_OP_SCSI_CMD_RSP:
-		if (!ctask->sc) {
+		if (!task->sc) {
 			rc = ISCSI_ERR_NO_SCSI_CMD;
 			break;
 		}
-		BUG_ON((void*)ctask != ctask->sc->SCp.ptr);
-		iscsi_scsi_cmd_rsp(conn, hdr, ctask, data, datalen);
+		BUG_ON((void*)task != task->sc->SCp.ptr);
+		iscsi_scsi_cmd_rsp(conn, hdr, task, data, datalen);
 		break;
 	case ISCSI_OP_SCSI_DATA_IN:
-		if (!ctask->sc) {
+		if (!task->sc) {
 			rc = ISCSI_ERR_NO_SCSI_CMD;
 			break;
 		}
-		BUG_ON((void*)ctask != ctask->sc->SCp.ptr);
+		BUG_ON((void*)task != task->sc->SCp.ptr);
 		if (hdr->flags & ISCSI_FLAG_DATA_STATUS) {
 			conn->scsirsp_pdus_cnt++;
 			iscsi_update_cmdsn(session,
 					   (struct iscsi_nopin*) hdr);
-			__iscsi_put_ctask(ctask);
+			__iscsi_put_task(task);
 		}
 		break;
 	case ISCSI_OP_R2T:
@@ -808,7 +808,7 @@ static int __iscsi_complete_pdu(struct iscsi_conn *conn, struct iscsi_hdr *hdr,
 		}
 
 		iscsi_tmf_rsp(conn, hdr);
-		__iscsi_put_ctask(ctask);
+		__iscsi_put_task(task);
 		break;
 	case ISCSI_OP_NOOP_IN:
 		iscsi_update_cmdsn(session, (struct iscsi_nopin*)hdr);
@@ -818,13 +818,15 @@ static int __iscsi_complete_pdu(struct iscsi_conn *conn, struct iscsi_hdr *hdr,
 		}
 		conn->exp_statsn = be32_to_cpu(hdr->statsn) + 1;
 
-		if (conn->ping_ctask != ctask)
+		if (conn->ping_task != task)
 			/*
 			 * If this is not in response to one of our
 			 * nops then it must be from userspace.
 			 */
 			goto recv_pdu;
-		__iscsi_put_ctask(ctask);
+
+		mod_timer(&conn->transport_timer, jiffies + conn->recv_timeout);
+		__iscsi_put_task(task);
 		break;
 	default:
 		rc = ISCSI_ERR_BAD_OPCODE;
@@ -836,7 +838,7 @@ out:
 recv_pdu:
 	if (iscsi_recv_pdu(conn->cls_conn, hdr, data, datalen))
 		rc = ISCSI_ERR_CONN_FAILED;
-	__iscsi_put_ctask(ctask);
+	__iscsi_put_task(task);
 	return rc;
 }
 
@@ -855,7 +857,7 @@ EXPORT_SYMBOL_GPL(iscsi_complete_pdu);
 int iscsi_verify_itt(struct iscsi_conn *conn, itt_t itt)
 {
 	struct iscsi_session *session = conn->session;
-	struct iscsi_cmd_task *ctask;
+	struct iscsi_task *task;
 	uint32_t i;
 
 	if (itt == RESERVED_ITT)
@@ -878,11 +880,11 @@ int iscsi_verify_itt(struct iscsi_conn *conn, itt_t itt)
 		return ISCSI_ERR_BAD_ITT;
 	}
 
-	ctask = session->cmds[i];
-	if (ctask->sc && ctask->sc->SCp.phase != session->age) {
+	task = session->cmds[i];
+	if (task->sc && task->sc->SCp.phase != session->age) {
 		iscsi_conn_printk(KERN_ERR, conn,
-				  "iscsi: ctask's session age %d, "
-				  "expected %d\n", ctask->sc->SCp.phase,
+				  "iscsi: task's session age %d, "
+				  "expected %d\n", task->sc->SCp.phase,
 				  session->age);
 		return ISCSI_ERR_SESSION_FAILED;
 	}
@@ -890,11 +892,11 @@ int iscsi_verify_itt(struct iscsi_conn *conn, itt_t itt)
 }
 EXPORT_SYMBOL_GPL(iscsi_verify_itt);
 
-struct iscsi_cmd_task *
+struct iscsi_task *
 iscsi_itt_to_ctask(struct iscsi_conn *conn, itt_t itt)
 {
 	struct iscsi_session *session = conn->session;
-	struct iscsi_cmd_task *ctask;
+	struct iscsi_task *task;
 	uint32_t i;
 
 	if (iscsi_verify_itt(conn, itt))
@@ -907,14 +909,14 @@ iscsi_itt_to_ctask(struct iscsi_conn *conn, itt_t itt)
 	if (i >= session->cmds_max)
 		return NULL;
 
-	ctask = session->cmds[i];
-	if (!ctask->sc)
+	task = session->cmds[i];
+	if (!task->sc)
 		return NULL;
 
-	if (ctask->sc->SCp.phase != session->age)
+	if (task->sc->SCp.phase != session->age)
 		return NULL;
 
-	return ctask;
+	return task;
 }
 EXPORT_SYMBOL_GPL(iscsi_itt_to_ctask);
 
@@ -955,38 +957,38 @@ static int iscsi_check_cmdsn_window_closed(struct iscsi_conn *conn)
 	return 0;
 }
 
-static int iscsi_xmit_ctask(struct iscsi_conn *conn)
+static int iscsi_xmit_task(struct iscsi_conn *conn)
 {
-	struct iscsi_cmd_task *ctask = conn->ctask;
+	struct iscsi_task *task = conn->task;
 	int rc;
 
-	__iscsi_get_ctask(ctask);
+	__iscsi_get_task(task);
 	spin_unlock_bh(&conn->session->lock);
-	rc = conn->session->tt->xmit_task(ctask);
+	rc = conn->session->tt->xmit_task(task);
 	spin_lock_bh(&conn->session->lock);
-	__iscsi_put_ctask(ctask);
+	__iscsi_put_task(task);
 	if (!rc)
-		/* done with this ctask */
-		conn->ctask = NULL;
+		/* done with this task */
+		conn->task = NULL;
 	return rc;
 }
 
 /**
- * iscsi_requeue_ctask - requeue ctask to run from session workqueue
- * @ctask: ctask to requeue
+ * iscsi_requeue_task - requeue task to run from session workqueue
+ * @task: task to requeue
  *
- * LLDs that need to run a ctask from the session workqueue should call
+ * LLDs that need to run a task from the session workqueue should call
  * this. The session lock must be held. This should only be called
  * by software drivers.
  */
-void iscsi_requeue_ctask(struct iscsi_cmd_task *ctask)
+void iscsi_requeue_task(struct iscsi_task *task)
 {
-	struct iscsi_conn *conn = ctask->conn;
+	struct iscsi_conn *conn = task->conn;
 
-	list_move_tail(&ctask->running, &conn->requeue);
+	list_move_tail(&task->running, &conn->requeue);
 	scsi_queue_work(conn->session->host, &conn->xmitwork);
 }
-EXPORT_SYMBOL_GPL(iscsi_requeue_ctask);
+EXPORT_SYMBOL_GPL(iscsi_requeue_task);
 
 /**
  * iscsi_data_xmit - xmit any command into the scheduled connection
@@ -1008,8 +1010,8 @@ static int iscsi_data_xmit(struct iscsi_conn *conn)
 		return -ENODATA;
 	}
 
-	if (conn->ctask) {
-		rc = iscsi_xmit_ctask(conn);
+	if (conn->task) {
+		rc = iscsi_xmit_task(conn);
 	        if (rc)
 		        goto again;
 	}
@@ -1021,14 +1023,14 @@ static int iscsi_data_xmit(struct iscsi_conn *conn)
 	 */
 check_mgmt:
 	while (!list_empty(&conn->mgmtqueue)) {
-		conn->ctask = list_entry(conn->mgmtqueue.next,
-					 struct iscsi_cmd_task, running);
-		if (iscsi_prep_mgmt_task(conn, conn->ctask)) {
-			__iscsi_put_ctask(conn->ctask);
-			conn->ctask = NULL;
+		conn->task = list_entry(conn->mgmtqueue.next,
+					 struct iscsi_task, running);
+		if (iscsi_prep_mgmt_task(conn, conn->task)) {
+			__iscsi_put_task(conn->task);
+			conn->task = NULL;
 			continue;
 		}
-		rc = iscsi_xmit_ctask(conn);
+		rc = iscsi_xmit_task(conn);
 		if (rc)
 			goto again;
 	}
@@ -1038,21 +1040,21 @@ check_mgmt:
 		if (conn->tmf_state == TMF_QUEUED)
 			break;
 
-		conn->ctask = list_entry(conn->xmitqueue.next,
-					 struct iscsi_cmd_task, running);
+		conn->task = list_entry(conn->xmitqueue.next,
+					 struct iscsi_task, running);
 		if (conn->session->state == ISCSI_STATE_LOGGING_OUT) {
-			fail_command(conn, conn->ctask, DID_IMM_RETRY << 16);
+			fail_command(conn, conn->task, DID_IMM_RETRY << 16);
 			continue;
 		}
-		if (iscsi_prep_scsi_cmd_pdu(conn->ctask)) {
-			fail_command(conn, conn->ctask, DID_ABORT << 16);
+		if (iscsi_prep_scsi_cmd_pdu(conn->task)) {
+			fail_command(conn, conn->task, DID_ABORT << 16);
 			continue;
 		}
-		rc = iscsi_xmit_ctask(conn);
+		rc = iscsi_xmit_task(conn);
 		if (rc)
 			goto again;
 		/*
-		 * we could continuously get new ctask requests so
+		 * we could continuously get new task requests so
 		 * we need to check the mgmt queue for nops that need to
 		 * be sent to aviod starvation
 		 */
@@ -1070,11 +1072,11 @@ check_mgmt:
 		if (conn->session->state == ISCSI_STATE_LOGGING_OUT)
 			break;
 
-		conn->ctask = list_entry(conn->requeue.next,
-					 struct iscsi_cmd_task, running);
-		conn->ctask->state = ISCSI_TASK_RUNNING;
+		conn->task = list_entry(conn->requeue.next,
+					 struct iscsi_task, running);
+		conn->task->state = ISCSI_TASK_RUNNING;
 		list_move_tail(conn->requeue.next, &conn->run_list);
-		rc = iscsi_xmit_ctask(conn);
+		rc = iscsi_xmit_task(conn);
 		if (rc)
 			goto again;
 		if (!list_empty(&conn->mgmtqueue))
@@ -1123,7 +1125,7 @@ int iscsi_queuecommand(struct scsi_cmnd *sc, void (*done)(struct scsi_cmnd *))
 	int reason = 0;
 	struct iscsi_session *session;
 	struct iscsi_conn *conn;
-	struct iscsi_cmd_task *ctask = NULL;
+	struct iscsi_task *task = NULL;
 
 	sc->scsi_done = done;
 	sc->result = 0;
@@ -1191,31 +1193,31 @@ int iscsi_queuecommand(struct scsi_cmnd *sc, void (*done)(struct scsi_cmnd *))
 		goto reject;
 	}
 
-	if (!__kfifo_get(session->cmdpool.queue, (void*)&ctask,
+	if (!__kfifo_get(session->cmdpool.queue, (void*)&task,
 			 sizeof(void*))) {
 		reason = FAILURE_OOM;
 		goto reject;
 	}
 	sc->SCp.phase = session->age;
-	sc->SCp.ptr = (char *)ctask;
+	sc->SCp.ptr = (char *)task;
 
-	atomic_set(&ctask->refcount, 1);
-	ctask->state = ISCSI_TASK_PENDING;
-	ctask->conn = conn;
-	ctask->sc = sc;
-	INIT_LIST_HEAD(&ctask->running);
-	list_add_tail(&ctask->running, &conn->xmitqueue);
+	atomic_set(&task->refcount, 1);
+	task->state = ISCSI_TASK_PENDING;
+	task->conn = conn;
+	task->sc = sc;
+	INIT_LIST_HEAD(&task->running);
+	list_add_tail(&task->running, &conn->xmitqueue);
 
 	if (session->tt->caps & CAP_DATA_PATH_OFFLOAD) {
-		if (iscsi_prep_scsi_cmd_pdu(ctask)) {
+		if (iscsi_prep_scsi_cmd_pdu(task)) {
 			sc->result = DID_ABORT << 16;
 			sc->scsi_done = NULL;
-			iscsi_complete_command(ctask);
+			iscsi_complete_command(task);
 			goto fault;
 		}
-		if (session->tt->xmit_task(ctask)) {
+		if (session->tt->xmit_task(task)) {
 			sc->scsi_done = NULL;
-			iscsi_complete_command(ctask);
+			iscsi_complete_command(task);
 			reason = FAILURE_SESSION_NOT_READY;
 			goto reject;
 		}
@@ -1336,16 +1338,16 @@ static void iscsi_tmf_timedout(unsigned long data)
 	spin_unlock(&session->lock);
 }
 
-static int iscsi_exec_ctask_mgmt_fn(struct iscsi_conn *conn,
+static int iscsi_exec_task_mgmt_fn(struct iscsi_conn *conn,
 				   struct iscsi_tm *hdr, int age,
 				   int timeout)
 {
 	struct iscsi_session *session = conn->session;
-	struct iscsi_cmd_task *ctask;
+	struct iscsi_task *task;
 
-	ctask = __iscsi_conn_send_pdu(conn, (struct iscsi_hdr *)hdr,
+	task = __iscsi_conn_send_pdu(conn, (struct iscsi_hdr *)hdr,
 				      NULL, 0);
-	if (!ctask) {
+	if (!task) {
 		spin_unlock_bh(&session->lock);
 		iscsi_conn_failure(conn, ISCSI_ERR_CONN_FAILED);
 		spin_lock_bh(&session->lock);
@@ -1379,7 +1381,7 @@ static int iscsi_exec_ctask_mgmt_fn(struct iscsi_conn *conn,
 
 	mutex_lock(&session->eh_mutex);
 	spin_lock_bh(&session->lock);
-	/* if the session drops it will clean up the ctask */
+	/* if the session drops it will clean up the task */
 	if (age != session->age ||
 	    session->state != ISCSI_STATE_LOGGED_IN)
 		return -ENOTCONN;
@@ -1393,34 +1395,34 @@ static int iscsi_exec_ctask_mgmt_fn(struct iscsi_conn *conn,
 static void fail_all_commands(struct iscsi_conn *conn, unsigned lun,
 			      int error)
 {
-	struct iscsi_cmd_task *ctask, *tmp;
+	struct iscsi_task *task, *tmp;
 
-	if (conn->ctask && (conn->ctask->sc->device->lun == lun || lun == -1))
-		conn->ctask = NULL;
+	if (conn->task && (conn->task->sc->device->lun == lun || lun == -1))
+		conn->task = NULL;
 
 	/* flush pending */
-	list_for_each_entry_safe(ctask, tmp, &conn->xmitqueue, running) {
-		if (lun == ctask->sc->device->lun || lun == -1) {
+	list_for_each_entry_safe(task, tmp, &conn->xmitqueue, running) {
+		if (lun == task->sc->device->lun || lun == -1) {
 			debug_scsi("failing pending sc %p itt 0x%x\n",
-				   ctask->sc, ctask->itt);
-			fail_command(conn, ctask, error << 16);
+				   task->sc, task->itt);
+			fail_command(conn, task, error << 16);
 		}
 	}
 
-	list_for_each_entry_safe(ctask, tmp, &conn->requeue, running) {
-		if (lun == ctask->sc->device->lun || lun == -1) {
+	list_for_each_entry_safe(task, tmp, &conn->requeue, running) {
+		if (lun == task->sc->device->lun || lun == -1) {
 			debug_scsi("failing requeued sc %p itt 0x%x\n",
-				   ctask->sc, ctask->itt);
-			fail_command(conn, ctask, error << 16);
+				   task->sc, task->itt);
+			fail_command(conn, task, error << 16);
 		}
 	}
 
 	/* fail all other running */
-	list_for_each_entry_safe(ctask, tmp, &conn->run_list, running) {
-		if (lun == ctask->sc->device->lun || lun == -1) {
+	list_for_each_entry_safe(task, tmp, &conn->run_list, running) {
+		if (lun == task->sc->device->lun || lun == -1) {
 			debug_scsi("failing in progress sc %p itt 0x%x\n",
-				   ctask->sc, ctask->itt);
-			fail_command(conn, ctask, DID_BUS_BUSY << 16);
+				   task->sc, task->itt);
+			fail_command(conn, task, DID_BUS_BUSY << 16);
 		}
 	}
 }
@@ -1486,7 +1488,7 @@ static enum scsi_eh_timer_return iscsi_eh_cmd_timed_out(struct scsi_cmnd *scmd)
 			   jiffies))
 		rc = EH_RESET_TIMER;
 	/* if in the middle of checking the transport then give us more time */
-	if (conn->ping_ctask)
+	if (conn->ping_task)
 		rc = EH_RESET_TIMER;
 done:
 	spin_unlock(&session->lock);
@@ -1510,7 +1512,7 @@ static void iscsi_check_transport_timeouts(unsigned long data)
 
 	recv_timeout *= HZ;
 	last_recv = conn->last_recv;
-	if (conn->ping_ctask &&
+	if (conn->ping_task &&
 	    time_before_eq(conn->last_ping + (conn->ping_timeout * HZ),
 			   jiffies)) {
 		iscsi_conn_printk(KERN_ERR, conn, "ping timeout of %d secs "
@@ -1536,16 +1538,16 @@ done:
 	spin_unlock(&session->lock);
 }
 
-static void iscsi_prep_abort_ctask_pdu(struct iscsi_cmd_task *ctask,
+static void iscsi_prep_abort_task_pdu(struct iscsi_task *task,
 				      struct iscsi_tm *hdr)
 {
 	memset(hdr, 0, sizeof(*hdr));
 	hdr->opcode = ISCSI_OP_SCSI_TMFUNC | ISCSI_OP_IMMEDIATE;
 	hdr->flags = ISCSI_TM_FUNC_ABORT_TASK & ISCSI_FLAG_TM_FUNC_MASK;
 	hdr->flags |= ISCSI_FLAG_CMD_FINAL;
-	memcpy(hdr->lun, ctask->hdr->lun, sizeof(hdr->lun));
-	hdr->rtt = ctask->hdr->itt;
-	hdr->refcmdsn = ctask->hdr->cmdsn;
+	memcpy(hdr->lun, task->hdr->lun, sizeof(hdr->lun));
+	hdr->rtt = task->hdr->itt;
+	hdr->refcmdsn = task->hdr->cmdsn;
 }
 
 int iscsi_eh_abort(struct scsi_cmnd *sc)
@@ -1553,7 +1555,7 @@ int iscsi_eh_abort(struct scsi_cmnd *sc)
 	struct iscsi_cls_session *cls_session;
 	struct iscsi_session *session;
 	struct iscsi_conn *conn;
-	struct iscsi_cmd_task *ctask;
+	struct iscsi_task *task;
 	struct iscsi_tm *hdr;
 	int rc, age;
 
@@ -1588,17 +1590,17 @@ int iscsi_eh_abort(struct scsi_cmnd *sc)
 	conn->eh_abort_cnt++;
 	age = session->age;
 
-	ctask = (struct iscsi_cmd_task *)sc->SCp.ptr;
-	debug_scsi("aborting [sc %p itt 0x%x]\n", sc, ctask->itt);
+	task = (struct iscsi_task *)sc->SCp.ptr;
+	debug_scsi("aborting [sc %p itt 0x%x]\n", sc, task->itt);
 
-	/* ctask completed before time out */
-	if (!ctask->sc) {
+	/* task completed before time out */
+	if (!task->sc) {
 		debug_scsi("sc completed while abort in progress\n");
 		goto success;
 	}
 
-	if (ctask->state == ISCSI_TASK_PENDING) {
-		fail_command(conn, ctask, DID_ABORT << 16);
+	if (task->state == ISCSI_TASK_PENDING) {
+		fail_command(conn, task, DID_ABORT << 16);
 		goto success;
 	}
 
@@ -1608,9 +1610,9 @@ int iscsi_eh_abort(struct scsi_cmnd *sc)
 	conn->tmf_state = TMF_QUEUED;
 
 	hdr = &conn->tmhdr;
-	iscsi_prep_abort_ctask_pdu(ctask, hdr);
+	iscsi_prep_abort_task_pdu(task, hdr);
 
-	if (iscsi_exec_ctask_mgmt_fn(conn, hdr, age, session->abort_timeout)) {
+	if (iscsi_exec_task_mgmt_fn(conn, hdr, age, session->abort_timeout)) {
 		rc = FAILED;
 		goto failed;
 	}
@@ -1620,11 +1622,11 @@ int iscsi_eh_abort(struct scsi_cmnd *sc)
 		spin_unlock_bh(&session->lock);
 		iscsi_suspend_tx(conn);
 		/*
-		 * clean up ctask if aborted. grab the recv lock as a writer
+		 * clean up task if aborted. grab the recv lock as a writer
 		 */
 		write_lock_bh(conn->recv_lock);
 		spin_lock(&session->lock);
-		fail_command(conn, ctask, DID_ABORT << 16);
+		fail_command(conn, task, DID_ABORT << 16);
 		conn->tmf_state = TMF_INITIAL;
 		spin_unlock(&session->lock);
 		write_unlock_bh(conn->recv_lock);
@@ -1637,7 +1639,7 @@ int iscsi_eh_abort(struct scsi_cmnd *sc)
 	case TMF_NOT_FOUND:
 		if (!sc->SCp.ptr) {
 			conn->tmf_state = TMF_INITIAL;
-			/* ctask completed before tmf abort response */
+			/* task completed before tmf abort response */
 			debug_scsi("sc completed while abort in progress\n");
 			goto success;
 		}
@@ -1650,7 +1652,7 @@ int iscsi_eh_abort(struct scsi_cmnd *sc)
 success:
 	spin_unlock_bh(&session->lock);
 success_unlocked:
-	debug_scsi("abort success [sc %lx itt 0x%x]\n", (long)sc, ctask->itt);
+	debug_scsi("abort success [sc %lx itt 0x%x]\n", (long)sc, task->itt);
 	mutex_unlock(&session->eh_mutex);
 	return SUCCESS;
 
@@ -1658,7 +1660,7 @@ failed:
 	spin_unlock_bh(&session->lock);
 failed_unlocked:
 	debug_scsi("abort failed [sc %p itt 0x%x]\n", sc,
-		    ctask ? ctask->itt : 0);
+		    task ? task->itt : 0);
 	mutex_unlock(&session->eh_mutex);
 	return FAILED;
 }
@@ -1705,7 +1707,7 @@ int iscsi_eh_device_reset(struct scsi_cmnd *sc)
 	hdr = &conn->tmhdr;
 	iscsi_prep_lun_reset_pdu(sc, hdr);
 
-	if (iscsi_exec_ctask_mgmt_fn(conn, hdr, session->age,
+	if (iscsi_exec_task_mgmt_fn(conn, hdr, session->age,
 				    session->lu_reset_timeout)) {
 		rc = FAILED;
 		goto unlock;
@@ -1886,7 +1888,7 @@ EXPORT_SYMBOL_GPL(iscsi_host_free);
  * @iscsit: iscsi transport template
  * @shost: scsi host
  * @cmds_max: session can queue
- * @cmd_ctask_size: LLD ctask private data size
+ * @cmd_task_size: LLD task private data size
  * @initial_cmdsn: initial CmdSN
  *
  * This can be used by software iscsi_transports that allocate
@@ -1894,7 +1896,7 @@ EXPORT_SYMBOL_GPL(iscsi_host_free);
  */
 struct iscsi_cls_session *
 iscsi_session_setup(struct iscsi_transport *iscsit, struct Scsi_Host *shost,
-		    uint16_t scsi_cmds_max, int cmd_ctask_size,
+		    uint16_t scsi_cmds_max, int cmd_task_size,
 		    uint32_t initial_cmdsn)
 {
 	struct iscsi_session *session;
@@ -1902,7 +1904,7 @@ iscsi_session_setup(struct iscsi_transport *iscsit, struct Scsi_Host *shost,
 	int cmd_i, cmds_max;
 
 	/*
-	 * The iscsi layer needs some ctasks for nop handling and tmfs.
+	 * The iscsi layer needs some tasks for nop handling and tmfs.
 	 */
 	if (scsi_cmds_max < 1)
 		scsi_cmds_max = ISCSI_MGMT_CMDS_MAX;
@@ -1939,17 +1941,17 @@ iscsi_session_setup(struct iscsi_transport *iscsit, struct Scsi_Host *shost,
 	/* initialize SCSI PDU commands pool */
 	if (iscsi_pool_init(&session->cmdpool, session->cmds_max,
 			    (void***)&session->cmds,
-			    cmd_ctask_size + sizeof(struct iscsi_cmd_task)))
+			    cmd_task_size + sizeof(struct iscsi_task)))
 		goto cmdpool_alloc_fail;
 
 	/* pre-format cmds pool with ITT */
 	for (cmd_i = 0; cmd_i < session->cmds_max; cmd_i++) {
-		struct iscsi_cmd_task *ctask = session->cmds[cmd_i];
+		struct iscsi_task *task = session->cmds[cmd_i];
 
-		if (cmd_ctask_size)
-			ctask->dd_data = &ctask[1];
-		ctask->itt = cmd_i;
-		INIT_LIST_HEAD(&ctask->running);
+		if (cmd_task_size)
+			task->dd_data = &task[1];
+		task->itt = cmd_i;
+		INIT_LIST_HEAD(&task->running);
 	}
 
 	if (!try_module_get(iscsit->owner))
@@ -2035,30 +2037,30 @@ iscsi_conn_setup(struct iscsi_cls_session *cls_session, int dd_size,
 	INIT_LIST_HEAD(&conn->requeue);
 	INIT_WORK(&conn->xmitwork, iscsi_xmitworker);
 
-	/* allocate login_ctask used for the login/text sequences */
+	/* allocate login_task used for the login/text sequences */
 	spin_lock_bh(&session->lock);
 	if (!__kfifo_get(session->cmdpool.queue,
-                         (void*)&conn->login_ctask,
+                         (void*)&conn->login_task,
 			 sizeof(void*))) {
 		spin_unlock_bh(&session->lock);
-		goto login_ctask_alloc_fail;
+		goto login_task_alloc_fail;
 	}
 	spin_unlock_bh(&session->lock);
 
 	data = kmalloc(ISCSI_DEF_MAX_RECV_SEG_LEN, GFP_KERNEL);
 	if (!data)
-		goto login_ctask_data_alloc_fail;
-	conn->login_ctask->data = conn->data = data;
+		goto login_task_data_alloc_fail;
+	conn->login_task->data = conn->data = data;
 
 	init_timer(&conn->tmf_timer);
 	init_waitqueue_head(&conn->ehwait);
 
 	return cls_conn;
 
-login_ctask_data_alloc_fail:
-	__kfifo_put(session->cmdpool.queue, (void*)&conn->login_ctask,
+login_task_data_alloc_fail:
+	__kfifo_put(session->cmdpool.queue, (void*)&conn->login_task,
 		    sizeof(void*));
-login_ctask_alloc_fail:
+login_task_alloc_fail:
 	iscsi_destroy_conn(cls_conn);
 	return NULL;
 }
@@ -2118,7 +2120,7 @@ void iscsi_conn_teardown(struct iscsi_cls_conn *cls_conn)
 	spin_lock_bh(&session->lock);
 	kfree(conn->data);
 	kfree(conn->persistent_address);
-	__kfifo_put(session->cmdpool.queue, (void*)&conn->login_ctask,
+	__kfifo_put(session->cmdpool.queue, (void*)&conn->login_task,
 		    sizeof(void*));
 	if (session->leadconn == conn)
 		session->leadconn = NULL;
@@ -2199,23 +2201,23 @@ EXPORT_SYMBOL_GPL(iscsi_conn_start);
 static void
 flush_control_queues(struct iscsi_session *session, struct iscsi_conn *conn)
 {
-	struct iscsi_cmd_task *ctask, *tmp;
+	struct iscsi_task *task, *tmp;
 
 	/* handle pending */
-	list_for_each_entry_safe(ctask, tmp, &conn->mgmtqueue, running) {
-		debug_scsi("flushing pending mgmt ctask itt 0x%x\n", ctask->itt);
-		/* release ref from prep ctask */
-		__iscsi_put_ctask(ctask);
+	list_for_each_entry_safe(task, tmp, &conn->mgmtqueue, running) {
+		debug_scsi("flushing pending mgmt task itt 0x%x\n", task->itt);
+		/* release ref from prep task */
+		__iscsi_put_task(task);
 	}
 
 	/* handle running */
-	list_for_each_entry_safe(ctask, tmp, &conn->mgmt_run_list, running) {
-		debug_scsi("flushing running mgmt ctask itt 0x%x\n", ctask->itt);
-		/* release ref from prep ctask */
-		__iscsi_put_ctask(ctask);
+	list_for_each_entry_safe(task, tmp, &conn->mgmt_run_list, running) {
+		debug_scsi("flushing running mgmt task itt 0x%x\n", task->itt);
+		/* release ref from prep task */
+		__iscsi_put_task(task);
 	}
 
-	conn->ctask = NULL;
+	conn->task = NULL;
 }
 
 static void iscsi_start_session_recovery(struct iscsi_session *session,
@@ -2246,7 +2248,7 @@ static void iscsi_start_session_recovery(struct iscsi_session *session,
 
 	/*
 	 * When this is called for the in_login state, we only want to clean
-	 * up the login ctask and connection. We do not need to block and set
+	 * up the login task and connection. We do not need to block and set
 	 * the recovery state again
 	 */
 	if (flag == STOP_CONN_TERM)
diff --git a/include/scsi/libiscsi.h b/include/scsi/libiscsi.h
index d1c36759b350..176353c117b6 100644
--- a/include/scsi/libiscsi.h
+++ b/include/scsi/libiscsi.h
@@ -92,7 +92,7 @@ enum {
 	ISCSI_TASK_RUNNING,
 };
 
-struct iscsi_cmd_task {
+struct iscsi_task {
 	/*
 	 * Because LLDs allocate their hdr differently, this is a pointer
 	 * and length to that storage. It must be setup at session
@@ -120,9 +120,9 @@ struct iscsi_cmd_task {
 	void			*dd_data;	/* driver/transport data */
 };
 
-static inline void* iscsi_next_hdr(struct iscsi_cmd_task *ctask)
+static inline void* iscsi_next_hdr(struct iscsi_task *task)
 {
-	return (void*)ctask->hdr + ctask->hdr_len;
+	return (void*)task->hdr + task->hdr_len;
 }
 
 /* Connection's states */
@@ -151,7 +151,7 @@ struct iscsi_conn {
 	unsigned long		last_ping;
 	int			ping_timeout;
 	int			recv_timeout;
-	struct iscsi_cmd_task 	*ping_ctask;
+	struct iscsi_task 	*ping_task;
 
 	/* iSCSI connection-wide sequencing */
 	uint32_t		exp_statsn;
@@ -167,8 +167,8 @@ struct iscsi_conn {
 	 * should always fit in this buffer
 	 */
 	char			*data;
-	struct iscsi_cmd_task 	*login_ctask;	/* mtask used for login/text */
-	struct iscsi_cmd_task	*ctask;		/* xmit task in progress */
+	struct iscsi_task 	*login_task;	/* mtask used for login/text */
+	struct iscsi_task	*task;		/* xmit task in progress */
 
 	/* xmit */
 	struct list_head	mgmtqueue;	/* mgmt (control) xmit queue */
@@ -285,7 +285,7 @@ struct iscsi_session {
 
 	int			scsi_cmds_max; 	/* max scsi commands */
 	int			cmds_max;	/* size of cmds array */
-	struct iscsi_cmd_task	**cmds;		/* Original Cmds arr */
+	struct iscsi_task	**cmds;		/* Original Cmds arr */
 	struct iscsi_pool	cmdpool;	/* PDU's pool */
 };
 
@@ -365,16 +365,16 @@ extern void iscsi_suspend_tx(struct iscsi_conn *conn);
  * pdu and task processing
  */
 extern void iscsi_update_cmdsn(struct iscsi_session *, struct iscsi_nopin *);
-extern void iscsi_prep_unsolicit_data_pdu(struct iscsi_cmd_task *,
+extern void iscsi_prep_unsolicit_data_pdu(struct iscsi_task *,
 					struct iscsi_data *hdr);
 extern int iscsi_conn_send_pdu(struct iscsi_cls_conn *, struct iscsi_hdr *,
 				char *, uint32_t);
 extern int iscsi_complete_pdu(struct iscsi_conn *, struct iscsi_hdr *,
 			      char *, int);
 extern int iscsi_verify_itt(struct iscsi_conn *, itt_t);
-extern struct iscsi_cmd_task *iscsi_itt_to_ctask(struct iscsi_conn *, itt_t);
-extern void iscsi_requeue_ctask(struct iscsi_cmd_task *ctask);
-extern void iscsi_put_ctask(struct iscsi_cmd_task *ctask);
+extern struct iscsi_task *iscsi_itt_to_ctask(struct iscsi_conn *, itt_t);
+extern void iscsi_requeue_task(struct iscsi_task *task);
+extern void iscsi_put_task(struct iscsi_task *task);
 
 /*
  * generic helpers
diff --git a/include/scsi/scsi_transport_iscsi.h b/include/scsi/scsi_transport_iscsi.h
index 3f24503dfdf9..0553240796e9 100644
--- a/include/scsi/scsi_transport_iscsi.h
+++ b/include/scsi/scsi_transport_iscsi.h
@@ -33,7 +33,7 @@ struct iscsi_transport;
 struct Scsi_Host;
 struct iscsi_cls_conn;
 struct iscsi_conn;
-struct iscsi_cmd_task;
+struct iscsi_task;
 struct sockaddr;
 
 /**
@@ -112,10 +112,10 @@ struct iscsi_transport {
 			 char *data, uint32_t data_size);
 	void (*get_stats) (struct iscsi_cls_conn *conn,
 			   struct iscsi_stats *stats);
-	int (*init_task) (struct iscsi_cmd_task *task);
-	int (*xmit_task) (struct iscsi_cmd_task *task);
+	int (*init_task) (struct iscsi_task *task);
+	int (*xmit_task) (struct iscsi_task *task);
 	void (*cleanup_task) (struct iscsi_conn *conn,
-				  struct iscsi_cmd_task *task);
+				  struct iscsi_task *task);
 	void (*session_recovery_timedout) (struct iscsi_cls_session *session);
 	int (*ep_connect) (struct sockaddr *dst_addr, int non_blocking,
 			   uint64_t *ep_handle);
-- 
cgit v1.2.3


From 135a8ad4e09309d36dcb8b5c7f55db0b6a15b2d6 Mon Sep 17 00:00:00 2001
From: Mike Christie <michaelc@cs.wisc.edu>
Date: Wed, 21 May 2008 15:54:10 -0500
Subject: [SCSI] iscsi_tcp: handle iscsi_cmd_task rename

This converts iscsi_tcp to use the iscsi_task name.

Signed-off-by: Mike Christie <michaelc@cs.wisc.edu>
Signed-off-by: James Bottomley <James.Bottomley@HansenPartnership.com>
---
 drivers/scsi/iscsi_tcp.c | 262 +++++++++++++++++++++++------------------------
 drivers/scsi/iscsi_tcp.h |   2 +-
 2 files changed, 132 insertions(+), 132 deletions(-)

(limited to 'drivers/scsi')

diff --git a/drivers/scsi/iscsi_tcp.c b/drivers/scsi/iscsi_tcp.c
index 517bad160bea..33cd0ca7cc8d 100644
--- a/drivers/scsi/iscsi_tcp.c
+++ b/drivers/scsi/iscsi_tcp.c
@@ -498,43 +498,43 @@ iscsi_tcp_data_recv_prep(struct iscsi_tcp_conn *tcp_conn)
  * must be called with session lock
  */
 static void
-iscsi_tcp_cleanup_task(struct iscsi_conn *conn, struct iscsi_cmd_task *ctask)
+iscsi_tcp_cleanup_task(struct iscsi_conn *conn, struct iscsi_task *task)
 {
-	struct iscsi_tcp_cmd_task *tcp_ctask = ctask->dd_data;
+	struct iscsi_tcp_task *tcp_task = task->dd_data;
 	struct iscsi_r2t_info *r2t;
 
-	/* nothing to do for mgmt ctasks */
-	if (!ctask->sc)
+	/* nothing to do for mgmt tasks */
+	if (!task->sc)
 		return;
 
-	/* flush ctask's r2t queues */
-	while (__kfifo_get(tcp_ctask->r2tqueue, (void*)&r2t, sizeof(void*))) {
-		__kfifo_put(tcp_ctask->r2tpool.queue, (void*)&r2t,
+	/* flush task's r2t queues */
+	while (__kfifo_get(tcp_task->r2tqueue, (void*)&r2t, sizeof(void*))) {
+		__kfifo_put(tcp_task->r2tpool.queue, (void*)&r2t,
 			    sizeof(void*));
-		debug_scsi("iscsi_tcp_cleanup_ctask pending r2t dropped\n");
+		debug_scsi("iscsi_tcp_cleanup_task pending r2t dropped\n");
 	}
 
-	r2t = tcp_ctask->r2t;
+	r2t = tcp_task->r2t;
 	if (r2t != NULL) {
-		__kfifo_put(tcp_ctask->r2tpool.queue, (void*)&r2t,
+		__kfifo_put(tcp_task->r2tpool.queue, (void*)&r2t,
 			    sizeof(void*));
-		tcp_ctask->r2t = NULL;
+		tcp_task->r2t = NULL;
 	}
 }
 
 /**
  * iscsi_data_rsp - SCSI Data-In Response processing
  * @conn: iscsi connection
- * @ctask: scsi command ctask
+ * @task: scsi command task
  **/
 static int
-iscsi_data_rsp(struct iscsi_conn *conn, struct iscsi_cmd_task *ctask)
+iscsi_data_rsp(struct iscsi_conn *conn, struct iscsi_task *task)
 {
 	struct iscsi_tcp_conn *tcp_conn = conn->dd_data;
-	struct iscsi_tcp_cmd_task *tcp_ctask = ctask->dd_data;
+	struct iscsi_tcp_task *tcp_task = task->dd_data;
 	struct iscsi_data_rsp *rhdr = (struct iscsi_data_rsp *)tcp_conn->in.hdr;
 	struct iscsi_session *session = conn->session;
-	struct scsi_cmnd *sc = ctask->sc;
+	struct scsi_cmnd *sc = task->sc;
 	int datasn = be32_to_cpu(rhdr->datasn);
 	unsigned total_in_length = scsi_in(sc)->length;
 
@@ -542,18 +542,18 @@ iscsi_data_rsp(struct iscsi_conn *conn, struct iscsi_cmd_task *ctask)
 	if (tcp_conn->in.datalen == 0)
 		return 0;
 
-	if (tcp_ctask->exp_datasn != datasn) {
-		debug_tcp("%s: ctask->exp_datasn(%d) != rhdr->datasn(%d)\n",
-		          __FUNCTION__, tcp_ctask->exp_datasn, datasn);
+	if (tcp_task->exp_datasn != datasn) {
+		debug_tcp("%s: task->exp_datasn(%d) != rhdr->datasn(%d)\n",
+		          __FUNCTION__, tcp_task->exp_datasn, datasn);
 		return ISCSI_ERR_DATASN;
 	}
 
-	tcp_ctask->exp_datasn++;
+	tcp_task->exp_datasn++;
 
-	tcp_ctask->data_offset = be32_to_cpu(rhdr->offset);
-	if (tcp_ctask->data_offset + tcp_conn->in.datalen > total_in_length) {
+	tcp_task->data_offset = be32_to_cpu(rhdr->offset);
+	if (tcp_task->data_offset + tcp_conn->in.datalen > total_in_length) {
 		debug_tcp("%s: data_offset(%d) + data_len(%d) > total_length_in(%d)\n",
-		          __FUNCTION__, tcp_ctask->data_offset,
+		          __FUNCTION__, tcp_task->data_offset,
 		          tcp_conn->in.datalen, total_in_length);
 		return ISCSI_ERR_DATA_OFFSET;
 	}
@@ -582,7 +582,7 @@ iscsi_data_rsp(struct iscsi_conn *conn, struct iscsi_cmd_task *ctask)
 /**
  * iscsi_solicit_data_init - initialize first Data-Out
  * @conn: iscsi connection
- * @ctask: scsi command ctask
+ * @task: scsi command task
  * @r2t: R2T info
  *
  * Notes:
@@ -592,7 +592,7 @@ iscsi_data_rsp(struct iscsi_conn *conn, struct iscsi_cmd_task *ctask)
  *	This function is called with connection lock taken.
  **/
 static void
-iscsi_solicit_data_init(struct iscsi_conn *conn, struct iscsi_cmd_task *ctask,
+iscsi_solicit_data_init(struct iscsi_conn *conn, struct iscsi_task *task,
 			struct iscsi_r2t_info *r2t)
 {
 	struct iscsi_data *hdr;
@@ -603,8 +603,8 @@ iscsi_solicit_data_init(struct iscsi_conn *conn, struct iscsi_cmd_task *ctask,
 	hdr->datasn = cpu_to_be32(r2t->solicit_datasn);
 	r2t->solicit_datasn++;
 	hdr->opcode = ISCSI_OP_SCSI_DATA_OUT;
-	memcpy(hdr->lun, ctask->hdr->lun, sizeof(hdr->lun));
-	hdr->itt = ctask->hdr->itt;
+	memcpy(hdr->lun, task->hdr->lun, sizeof(hdr->lun));
+	hdr->itt = task->hdr->itt;
 	hdr->exp_statsn = r2t->exp_statsn;
 	hdr->offset = cpu_to_be32(r2t->data_offset);
 	if (r2t->data_length > conn->max_xmit_dlength) {
@@ -624,14 +624,14 @@ iscsi_solicit_data_init(struct iscsi_conn *conn, struct iscsi_cmd_task *ctask,
 /**
  * iscsi_r2t_rsp - iSCSI R2T Response processing
  * @conn: iscsi connection
- * @ctask: scsi command ctask
+ * @task: scsi command task
  **/
 static int
-iscsi_r2t_rsp(struct iscsi_conn *conn, struct iscsi_cmd_task *ctask)
+iscsi_r2t_rsp(struct iscsi_conn *conn, struct iscsi_task *task)
 {
 	struct iscsi_r2t_info *r2t;
 	struct iscsi_session *session = conn->session;
-	struct iscsi_tcp_cmd_task *tcp_ctask = ctask->dd_data;
+	struct iscsi_tcp_task *tcp_task = task->dd_data;
 	struct iscsi_tcp_conn *tcp_conn = conn->dd_data;
 	struct iscsi_r2t_rsp *rhdr = (struct iscsi_r2t_rsp *)tcp_conn->in.hdr;
 	int r2tsn = be32_to_cpu(rhdr->r2tsn);
@@ -644,23 +644,23 @@ iscsi_r2t_rsp(struct iscsi_conn *conn, struct iscsi_cmd_task *ctask)
 		return ISCSI_ERR_DATALEN;
 	}
 
-	if (tcp_ctask->exp_datasn != r2tsn){
-		debug_tcp("%s: ctask->exp_datasn(%d) != rhdr->r2tsn(%d)\n",
-		          __FUNCTION__, tcp_ctask->exp_datasn, r2tsn);
+	if (tcp_task->exp_datasn != r2tsn){
+		debug_tcp("%s: task->exp_datasn(%d) != rhdr->r2tsn(%d)\n",
+		          __FUNCTION__, tcp_task->exp_datasn, r2tsn);
 		return ISCSI_ERR_R2TSN;
 	}
 
-	/* fill-in new R2T associated with the ctask */
+	/* fill-in new R2T associated with the task */
 	iscsi_update_cmdsn(session, (struct iscsi_nopin*)rhdr);
 
-	if (!ctask->sc || session->state != ISCSI_STATE_LOGGED_IN) {
+	if (!task->sc || session->state != ISCSI_STATE_LOGGED_IN) {
 		iscsi_conn_printk(KERN_INFO, conn,
 				  "dropping R2T itt %d in recovery.\n",
-				  ctask->itt);
+				  task->itt);
 		return 0;
 	}
 
-	rc = __kfifo_get(tcp_ctask->r2tpool.queue, (void*)&r2t, sizeof(void*));
+	rc = __kfifo_get(tcp_task->r2tpool.queue, (void*)&r2t, sizeof(void*));
 	BUG_ON(!rc);
 
 	r2t->exp_statsn = rhdr->statsn;
@@ -668,7 +668,7 @@ iscsi_r2t_rsp(struct iscsi_conn *conn, struct iscsi_cmd_task *ctask)
 	if (r2t->data_length == 0) {
 		iscsi_conn_printk(KERN_ERR, conn,
 				  "invalid R2T with zero data len\n");
-		__kfifo_put(tcp_ctask->r2tpool.queue, (void*)&r2t,
+		__kfifo_put(tcp_task->r2tpool.queue, (void*)&r2t,
 			    sizeof(void*));
 		return ISCSI_ERR_DATALEN;
 	}
@@ -679,12 +679,12 @@ iscsi_r2t_rsp(struct iscsi_conn *conn, struct iscsi_cmd_task *ctask)
 			    r2t->data_length, session->max_burst);
 
 	r2t->data_offset = be32_to_cpu(rhdr->data_offset);
-	if (r2t->data_offset + r2t->data_length > scsi_out(ctask->sc)->length) {
+	if (r2t->data_offset + r2t->data_length > scsi_out(task->sc)->length) {
 		iscsi_conn_printk(KERN_ERR, conn,
 				  "invalid R2T with data len %u at offset %u "
 				  "and total length %d\n", r2t->data_length,
-				  r2t->data_offset, scsi_out(ctask->sc)->length);
-		__kfifo_put(tcp_ctask->r2tpool.queue, (void*)&r2t,
+				  r2t->data_offset, scsi_out(task->sc)->length);
+		__kfifo_put(tcp_task->r2tpool.queue, (void*)&r2t,
 			    sizeof(void*));
 		return ISCSI_ERR_DATALEN;
 	}
@@ -692,13 +692,13 @@ iscsi_r2t_rsp(struct iscsi_conn *conn, struct iscsi_cmd_task *ctask)
 	r2t->ttt = rhdr->ttt; /* no flip */
 	r2t->solicit_datasn = 0;
 
-	iscsi_solicit_data_init(conn, ctask, r2t);
+	iscsi_solicit_data_init(conn, task, r2t);
 
-	tcp_ctask->exp_datasn = r2tsn + 1;
-	__kfifo_put(tcp_ctask->r2tqueue, (void*)&r2t, sizeof(void*));
+	tcp_task->exp_datasn = r2tsn + 1;
+	__kfifo_put(tcp_task->r2tqueue, (void*)&r2t, sizeof(void*));
 	conn->r2t_pdus_cnt++;
 
-	iscsi_requeue_ctask(ctask);
+	iscsi_requeue_task(task);
 	return 0;
 }
 
@@ -743,7 +743,7 @@ iscsi_tcp_hdr_dissect(struct iscsi_conn *conn, struct iscsi_hdr *hdr)
 	int rc = 0, opcode, ahslen;
 	struct iscsi_session *session = conn->session;
 	struct iscsi_tcp_conn *tcp_conn = conn->dd_data;
-	struct iscsi_cmd_task *ctask;
+	struct iscsi_task *task;
 
 	/* verify PDU length */
 	tcp_conn->in.datalen = ntoh24(hdr->dlength);
@@ -770,21 +770,21 @@ iscsi_tcp_hdr_dissect(struct iscsi_conn *conn, struct iscsi_hdr *hdr)
 
 	switch(opcode) {
 	case ISCSI_OP_SCSI_DATA_IN:
-		ctask = iscsi_itt_to_ctask(conn, hdr->itt);
-		if (!ctask)
+		task = iscsi_itt_to_ctask(conn, hdr->itt);
+		if (!task)
 			return ISCSI_ERR_BAD_ITT;
-		if (!ctask->sc)
+		if (!task->sc)
 			return ISCSI_ERR_NO_SCSI_CMD;
 
 		spin_lock(&conn->session->lock);
-		rc = iscsi_data_rsp(conn, ctask);
+		rc = iscsi_data_rsp(conn, task);
 		spin_unlock(&conn->session->lock);
 		if (rc)
 			return rc;
 		if (tcp_conn->in.datalen) {
-			struct iscsi_tcp_cmd_task *tcp_ctask = ctask->dd_data;
+			struct iscsi_tcp_task *tcp_task = task->dd_data;
 			struct hash_desc *rx_hash = NULL;
-			struct scsi_data_buffer *sdb = scsi_in(ctask->sc);
+			struct scsi_data_buffer *sdb = scsi_in(task->sc);
 
 			/*
 			 * Setup copy of Data-In into the Scsi_Cmnd
@@ -799,12 +799,12 @@ iscsi_tcp_hdr_dissect(struct iscsi_conn *conn, struct iscsi_hdr *hdr)
 
 			debug_tcp("iscsi_tcp_begin_data_in(%p, offset=%d, "
 				  "datalen=%d)\n", tcp_conn,
-				  tcp_ctask->data_offset,
+				  tcp_task->data_offset,
 				  tcp_conn->in.datalen);
 			return iscsi_segment_seek_sg(&tcp_conn->in.segment,
 						     sdb->table.sgl,
 						     sdb->table.nents,
-						     tcp_ctask->data_offset,
+						     tcp_task->data_offset,
 						     tcp_conn->in.datalen,
 						     iscsi_tcp_process_data_in,
 						     rx_hash);
@@ -818,17 +818,17 @@ iscsi_tcp_hdr_dissect(struct iscsi_conn *conn, struct iscsi_hdr *hdr)
 		rc = iscsi_complete_pdu(conn, hdr, NULL, 0);
 		break;
 	case ISCSI_OP_R2T:
-		ctask = iscsi_itt_to_ctask(conn, hdr->itt);
-		if (!ctask)
+		task = iscsi_itt_to_ctask(conn, hdr->itt);
+		if (!task)
 			return ISCSI_ERR_BAD_ITT;
-		if (!ctask->sc)
+		if (!task->sc)
 			return ISCSI_ERR_NO_SCSI_CMD;
 
 		if (ahslen)
 			rc = ISCSI_ERR_AHSLEN;
-		else if (ctask->sc->sc_data_direction == DMA_TO_DEVICE) {
+		else if (task->sc->sc_data_direction == DMA_TO_DEVICE) {
 			spin_lock(&session->lock);
-			rc = iscsi_r2t_rsp(conn, ctask);
+			rc = iscsi_r2t_rsp(conn, task);
 			spin_unlock(&session->lock);
 		} else
 			rc = ISCSI_ERR_PROTO;
@@ -1202,7 +1202,7 @@ iscsi_tcp_send_hdr_prep(struct iscsi_conn *conn, void *hdr, size_t hdrlen)
 
 	/* If header digest is enabled, compute the CRC and
 	 * place the digest into the same buffer. We make
-	 * sure that both iscsi_tcp_cmd_task and mctask have
+	 * sure that both iscsi_tcp_task and mtask have
 	 * sufficient room.
 	 */
 	if (conn->hdrdgst_en) {
@@ -1277,7 +1277,7 @@ iscsi_tcp_send_linear_data_prepare(struct iscsi_conn *conn, void *data,
 /**
  * iscsi_solicit_data_cont - initialize next Data-Out
  * @conn: iscsi connection
- * @ctask: scsi command ctask
+ * @task: scsi command task
  * @r2t: R2T info
  * @left: bytes left to transfer
  *
@@ -1288,7 +1288,7 @@ iscsi_tcp_send_linear_data_prepare(struct iscsi_conn *conn, void *data,
  *	Called under connection lock.
  **/
 static int
-iscsi_solicit_data_cont(struct iscsi_conn *conn, struct iscsi_cmd_task *ctask,
+iscsi_solicit_data_cont(struct iscsi_conn *conn, struct iscsi_task *task,
 			struct iscsi_r2t_info *r2t)
 {
 	struct iscsi_data *hdr;
@@ -1305,8 +1305,8 @@ iscsi_solicit_data_cont(struct iscsi_conn *conn, struct iscsi_cmd_task *ctask,
 	hdr->datasn = cpu_to_be32(r2t->solicit_datasn);
 	r2t->solicit_datasn++;
 	hdr->opcode = ISCSI_OP_SCSI_DATA_OUT;
-	memcpy(hdr->lun, ctask->hdr->lun, sizeof(hdr->lun));
-	hdr->itt = ctask->hdr->itt;
+	memcpy(hdr->lun, task->hdr->lun, sizeof(hdr->lun));
+	hdr->itt = task->hdr->itt;
 	hdr->exp_statsn = r2t->exp_statsn;
 	new_offset = r2t->data_offset + r2t->sent;
 	hdr->offset = cpu_to_be32(new_offset);
@@ -1326,73 +1326,73 @@ iscsi_solicit_data_cont(struct iscsi_conn *conn, struct iscsi_cmd_task *ctask,
 /**
  * iscsi_tcp_task - Initialize iSCSI SCSI_READ or SCSI_WRITE commands
  * @conn: iscsi connection
- * @ctask: scsi command ctask
+ * @task: scsi command task
  * @sc: scsi command
  **/
 static int
-iscsi_tcp_task_init(struct iscsi_cmd_task *ctask)
+iscsi_tcp_task_init(struct iscsi_task *task)
 {
-	struct iscsi_tcp_cmd_task *tcp_ctask = ctask->dd_data;
-	struct iscsi_conn *conn = ctask->conn;
-	struct scsi_cmnd *sc = ctask->sc;
+	struct iscsi_tcp_task *tcp_task = task->dd_data;
+	struct iscsi_conn *conn = task->conn;
+	struct scsi_cmnd *sc = task->sc;
 	int err;
 
 	if (!sc) {
 		/*
-		 * mgmt ctasks do not have a scatterlist since they come
+		 * mgmt tasks do not have a scatterlist since they come
 		 * in from the iscsi interface.
 		 */
-		debug_scsi("mctask deq [cid %d itt 0x%x]\n", conn->id,
-			   ctask->itt);
+		debug_scsi("mtask deq [cid %d itt 0x%x]\n", conn->id,
+			   task->itt);
 
 		/* Prepare PDU, optionally w/ immediate data */
-		iscsi_tcp_send_hdr_prep(conn, ctask->hdr, sizeof(*ctask->hdr));
+		iscsi_tcp_send_hdr_prep(conn, task->hdr, sizeof(*task->hdr));
 
 		/* If we have immediate data, attach a payload */
-		if (ctask->data_count)
-			iscsi_tcp_send_linear_data_prepare(conn, ctask->data,
-							   ctask->data_count);
+		if (task->data_count)
+			iscsi_tcp_send_linear_data_prepare(conn, task->data,
+							   task->data_count);
 		return 0;
 	}
 
-	BUG_ON(__kfifo_len(tcp_ctask->r2tqueue));
-	tcp_ctask->sent = 0;
-	tcp_ctask->exp_datasn = 0;
+	BUG_ON(__kfifo_len(tcp_task->r2tqueue));
+	tcp_task->sent = 0;
+	tcp_task->exp_datasn = 0;
 
 	/* Prepare PDU, optionally w/ immediate data */
-	debug_scsi("ctask deq [cid %d itt 0x%x imm %d unsol %d]\n",
-		    conn->id, ctask->itt, ctask->imm_count,
-		    ctask->unsol_count);
-	iscsi_tcp_send_hdr_prep(conn, ctask->hdr, ctask->hdr_len);
+	debug_scsi("task deq [cid %d itt 0x%x imm %d unsol %d]\n",
+		    conn->id, task->itt, task->imm_count,
+		    task->unsol_count);
+	iscsi_tcp_send_hdr_prep(conn, task->hdr, task->hdr_len);
 
-	if (!ctask->imm_count)
+	if (!task->imm_count)
 		return 0;
 
 	/* If we have immediate data, attach a payload */
 	err = iscsi_tcp_send_data_prep(conn, scsi_out(sc)->table.sgl,
 				       scsi_out(sc)->table.nents,
-				       0, ctask->imm_count);
+				       0, task->imm_count);
 	if (err)
 		return err;
-	tcp_ctask->sent += ctask->imm_count;
-	ctask->imm_count = 0;
+	tcp_task->sent += task->imm_count;
+	task->imm_count = 0;
 	return 0;
 }
 
 /*
- * iscsi_tcp_task_xmit - xmit normal PDU ctask
- * @ctask: iscsi command ctask
+ * iscsi_tcp_task_xmit - xmit normal PDU task
+ * @task: iscsi command task
  *
  * We're expected to return 0 when everything was transmitted succesfully,
  * -EAGAIN if there's still data in the queue, or != 0 for any other kind
  * of error.
  */
 static int
-iscsi_tcp_task_xmit(struct iscsi_cmd_task *ctask)
+iscsi_tcp_task_xmit(struct iscsi_task *task)
 {
-	struct iscsi_conn *conn = ctask->conn;
-	struct iscsi_tcp_cmd_task *tcp_ctask = ctask->dd_data;
-	struct scsi_cmnd *sc = ctask->sc;
+	struct iscsi_conn *conn = task->conn;
+	struct iscsi_tcp_task *tcp_task = task->dd_data;
+	struct scsi_cmnd *sc = task->sc;
 	struct scsi_data_buffer *sdb;
 	int rc = 0;
 
@@ -1404,8 +1404,8 @@ flush:
 
 	/* mgmt command */
 	if (!sc) {
-		if (ctask->hdr->itt == RESERVED_ITT)
-			iscsi_put_ctask(ctask);
+		if (task->hdr->itt == RESERVED_ITT)
+			iscsi_put_task(task);
 		return 0;
 	}
 
@@ -1414,27 +1414,27 @@ flush:
 		return 0;
 
 	sdb = scsi_out(sc);
-	if (ctask->unsol_count != 0) {
-		struct iscsi_data *hdr = &tcp_ctask->unsol_dtask.hdr;
+	if (task->unsol_count != 0) {
+		struct iscsi_data *hdr = &tcp_task->unsol_dtask.hdr;
 
 		/* Prepare a header for the unsolicited PDU.
 		 * The amount of data we want to send will be
-		 * in ctask->data_count.
+		 * in task->data_count.
 		 * FIXME: return the data count instead.
 		 */
-		iscsi_prep_unsolicit_data_pdu(ctask, hdr);
+		iscsi_prep_unsolicit_data_pdu(task, hdr);
 
 		debug_tcp("unsol dout [itt 0x%x doff %d dlen %d]\n",
-				ctask->itt, tcp_ctask->sent, ctask->data_count);
+				task->itt, tcp_task->sent, task->data_count);
 
 		iscsi_tcp_send_hdr_prep(conn, hdr, sizeof(*hdr));
 		rc = iscsi_tcp_send_data_prep(conn, sdb->table.sgl,
-					      sdb->table.nents, tcp_ctask->sent,
-					      ctask->data_count);
+					      sdb->table.nents, tcp_task->sent,
+					      task->data_count);
 		if (rc)
 			goto fail;
-		tcp_ctask->sent += ctask->data_count;
-		ctask->unsol_count -= ctask->data_count;
+		tcp_task->sent += task->data_count;
+		task->unsol_count -= task->data_count;
 		goto flush;
 	} else {
 		struct iscsi_session *session = conn->session;
@@ -1443,22 +1443,22 @@ flush:
 		/* All unsolicited PDUs sent. Check for solicited PDUs.
 		 */
 		spin_lock_bh(&session->lock);
-		r2t = tcp_ctask->r2t;
+		r2t = tcp_task->r2t;
 		if (r2t != NULL) {
 			/* Continue with this R2T? */
-			if (!iscsi_solicit_data_cont(conn, ctask, r2t)) {
+			if (!iscsi_solicit_data_cont(conn, task, r2t)) {
 				debug_scsi("  done with r2t %p\n", r2t);
 
-				__kfifo_put(tcp_ctask->r2tpool.queue,
+				__kfifo_put(tcp_task->r2tpool.queue,
 					    (void*)&r2t, sizeof(void*));
-				tcp_ctask->r2t = r2t = NULL;
+				tcp_task->r2t = r2t = NULL;
 			}
 		}
 
 		if (r2t == NULL) {
-			__kfifo_get(tcp_ctask->r2tqueue, (void*)&tcp_ctask->r2t,
+			__kfifo_get(tcp_task->r2tqueue, (void*)&tcp_task->r2t,
 				    sizeof(void*));
-			r2t = tcp_ctask->r2t;
+			r2t = tcp_task->r2t;
 		}
 		spin_unlock_bh(&session->lock);
 
@@ -1469,7 +1469,7 @@ flush:
 		}
 
 		debug_scsi("sol dout %p [dsn %d itt 0x%x doff %d dlen %d]\n",
-			r2t, r2t->solicit_datasn - 1, ctask->itt,
+			r2t, r2t->solicit_datasn - 1, task->itt,
 			r2t->data_offset + r2t->sent, r2t->data_count);
 
 		iscsi_tcp_send_hdr_prep(conn, &r2t->dtask.hdr,
@@ -1481,7 +1481,7 @@ flush:
 					      r2t->data_count);
 		if (rc)
 			goto fail;
-		tcp_ctask->sent += r2t->data_count;
+		tcp_task->sent += r2t->data_count;
 		r2t->sent += r2t->data_count;
 		goto flush;
 	}
@@ -1698,11 +1698,11 @@ iscsi_r2tpool_alloc(struct iscsi_session *session)
 	int cmd_i;
 
 	/*
-	 * initialize per-ctask: R2T pool and xmit queue
+	 * initialize per-task: R2T pool and xmit queue
 	 */
 	for (cmd_i = 0; cmd_i < session->cmds_max; cmd_i++) {
-	        struct iscsi_cmd_task *ctask = session->cmds[cmd_i];
-		struct iscsi_tcp_cmd_task *tcp_ctask = ctask->dd_data;
+	        struct iscsi_task *task = session->cmds[cmd_i];
+		struct iscsi_tcp_task *tcp_task = task->dd_data;
 
 		/*
 		 * pre-allocated x4 as much r2ts to handle race when
@@ -1711,16 +1711,16 @@ iscsi_r2tpool_alloc(struct iscsi_session *session)
 		 */
 
 		/* R2T pool */
-		if (iscsi_pool_init(&tcp_ctask->r2tpool, session->max_r2t * 4, NULL,
+		if (iscsi_pool_init(&tcp_task->r2tpool, session->max_r2t * 4, NULL,
 				    sizeof(struct iscsi_r2t_info))) {
 			goto r2t_alloc_fail;
 		}
 
 		/* R2T xmit queue */
-		tcp_ctask->r2tqueue = kfifo_alloc(
+		tcp_task->r2tqueue = kfifo_alloc(
 		      session->max_r2t * 4 * sizeof(void*), GFP_KERNEL, NULL);
-		if (tcp_ctask->r2tqueue == ERR_PTR(-ENOMEM)) {
-			iscsi_pool_free(&tcp_ctask->r2tpool);
+		if (tcp_task->r2tqueue == ERR_PTR(-ENOMEM)) {
+			iscsi_pool_free(&tcp_task->r2tpool);
 			goto r2t_alloc_fail;
 		}
 	}
@@ -1729,11 +1729,11 @@ iscsi_r2tpool_alloc(struct iscsi_session *session)
 
 r2t_alloc_fail:
 	for (i = 0; i < cmd_i; i++) {
-		struct iscsi_cmd_task *ctask = session->cmds[i];
-		struct iscsi_tcp_cmd_task *tcp_ctask = ctask->dd_data;
+		struct iscsi_task *task = session->cmds[i];
+		struct iscsi_tcp_task *tcp_task = task->dd_data;
 
-		kfifo_free(tcp_ctask->r2tqueue);
-		iscsi_pool_free(&tcp_ctask->r2tpool);
+		kfifo_free(tcp_task->r2tqueue);
+		iscsi_pool_free(&tcp_task->r2tpool);
 	}
 	return -ENOMEM;
 }
@@ -1744,11 +1744,11 @@ iscsi_r2tpool_free(struct iscsi_session *session)
 	int i;
 
 	for (i = 0; i < session->cmds_max; i++) {
-		struct iscsi_cmd_task *ctask = session->cmds[i];
-		struct iscsi_tcp_cmd_task *tcp_ctask = ctask->dd_data;
+		struct iscsi_task *task = session->cmds[i];
+		struct iscsi_tcp_task *tcp_task = task->dd_data;
 
-		kfifo_free(tcp_ctask->r2tqueue);
-		iscsi_pool_free(&tcp_ctask->r2tpool);
+		kfifo_free(tcp_task->r2tqueue);
+		iscsi_pool_free(&tcp_task->r2tpool);
 	}
 }
 
@@ -1867,7 +1867,7 @@ iscsi_tcp_session_create(struct Scsi_Host *shost, uint16_t cmds_max,
 	*hostno = shost->host_no;
 
 	cls_session = iscsi_session_setup(&iscsi_tcp_transport, shost, cmds_max,
-					  sizeof(struct iscsi_tcp_cmd_task),
+					  sizeof(struct iscsi_tcp_task),
 					  initial_cmdsn);
 	if (!cls_session)
 		goto remove_host;
@@ -1875,11 +1875,11 @@ iscsi_tcp_session_create(struct Scsi_Host *shost, uint16_t cmds_max,
 
 	shost->can_queue = session->scsi_cmds_max;
 	for (cmd_i = 0; cmd_i < session->cmds_max; cmd_i++) {
-		struct iscsi_cmd_task *ctask = session->cmds[cmd_i];
-		struct iscsi_tcp_cmd_task *tcp_ctask = ctask->dd_data;
+		struct iscsi_task *task = session->cmds[cmd_i];
+		struct iscsi_tcp_task *tcp_task = task->dd_data;
 
-		ctask->hdr = &tcp_ctask->hdr.cmd_hdr;
-		ctask->hdr_max = sizeof(tcp_ctask->hdr) - ISCSI_DIGEST_SIZE;
+		task->hdr = &tcp_task->hdr.cmd_hdr;
+		task->hdr_max = sizeof(tcp_task->hdr) - ISCSI_DIGEST_SIZE;
 	}
 
 	if (iscsi_r2tpool_alloc(session))
diff --git a/drivers/scsi/iscsi_tcp.h b/drivers/scsi/iscsi_tcp.h
index c9c8633c41a6..498d8ca39848 100644
--- a/drivers/scsi/iscsi_tcp.h
+++ b/drivers/scsi/iscsi_tcp.h
@@ -114,7 +114,7 @@ struct iscsi_r2t_info {
 	struct iscsi_data_task	dtask;		/* Data-Out header buf */
 };
 
-struct iscsi_tcp_cmd_task {
+struct iscsi_tcp_task {
 	struct iscsi_hdr_buff {
 		struct iscsi_cmd	cmd_hdr;
 		char			hdrextbuf[ISCSI_MAX_AHS_SIZE +
-- 
cgit v1.2.3


From 7970634b81a6e3561954517bca42615542c4535b Mon Sep 17 00:00:00 2001
From: Mike Christie <michaelc@cs.wisc.edu>
Date: Wed, 21 May 2008 15:54:12 -0500
Subject: [SCSI] iscsi class: user device_for_each_child instead of duplicating
 session list

Currently we duplicate the list of sessions, because we were using the
test for if a session was on the host list to indicate if the session
was bound or unbound. We can instead use the target_id and fix up
the class so that drivers like bnx2i do not have to manage the target id
space.

Signed-off-by: Mike Christie <michaelc@cs.wisc.edu>
Signed-off-by: James Bottomley <James.Bottomley@HansenPartnership.com>
---
 drivers/infiniband/ulp/iser/iscsi_iser.c |   2 +-
 drivers/scsi/iscsi_tcp.c                 |   2 +-
 drivers/scsi/libiscsi.c                  |   4 +-
 drivers/scsi/scsi_transport_iscsi.c      | 125 ++++++++++++++++++++++++-------
 include/scsi/libiscsi.h                  |   2 +-
 include/scsi/scsi_transport_iscsi.h      |   6 +-
 6 files changed, 104 insertions(+), 37 deletions(-)

(limited to 'drivers/scsi')

diff --git a/drivers/infiniband/ulp/iser/iscsi_iser.c b/drivers/infiniband/ulp/iser/iscsi_iser.c
index 86d9c42f0d33..3a89039e9a96 100644
--- a/drivers/infiniband/ulp/iser/iscsi_iser.c
+++ b/drivers/infiniband/ulp/iser/iscsi_iser.c
@@ -437,7 +437,7 @@ iscsi_iser_session_create(struct Scsi_Host *shost,
 	cls_session = iscsi_session_setup(&iscsi_iser_transport, shost,
 					  ISCSI_DEF_XMIT_CMDS_MAX,
 					  sizeof(struct iscsi_iser_task),
-					  initial_cmdsn);
+					  initial_cmdsn, 0);
 	if (!cls_session)
 		goto remove_host;
 	session = cls_session->dd_data;
diff --git a/drivers/scsi/iscsi_tcp.c b/drivers/scsi/iscsi_tcp.c
index 33cd0ca7cc8d..aa3c7f0c550d 100644
--- a/drivers/scsi/iscsi_tcp.c
+++ b/drivers/scsi/iscsi_tcp.c
@@ -1868,7 +1868,7 @@ iscsi_tcp_session_create(struct Scsi_Host *shost, uint16_t cmds_max,
 
 	cls_session = iscsi_session_setup(&iscsi_tcp_transport, shost, cmds_max,
 					  sizeof(struct iscsi_tcp_task),
-					  initial_cmdsn);
+					  initial_cmdsn, 0);
 	if (!cls_session)
 		goto remove_host;
 	session = cls_session->dd_data;
diff --git a/drivers/scsi/libiscsi.c b/drivers/scsi/libiscsi.c
index 92ee6d94aaf9..e88b726ab2e0 100644
--- a/drivers/scsi/libiscsi.c
+++ b/drivers/scsi/libiscsi.c
@@ -1897,7 +1897,7 @@ EXPORT_SYMBOL_GPL(iscsi_host_free);
 struct iscsi_cls_session *
 iscsi_session_setup(struct iscsi_transport *iscsit, struct Scsi_Host *shost,
 		    uint16_t scsi_cmds_max, int cmd_task_size,
-		    uint32_t initial_cmdsn)
+		    uint32_t initial_cmdsn, unsigned int id)
 {
 	struct iscsi_session *session;
 	struct iscsi_cls_session *cls_session;
@@ -1957,7 +1957,7 @@ iscsi_session_setup(struct iscsi_transport *iscsit, struct Scsi_Host *shost,
 	if (!try_module_get(iscsit->owner))
 		goto module_get_fail;
 
-	if (iscsi_add_session(cls_session, 0))
+	if (iscsi_add_session(cls_session, id))
 		goto cls_session_fail;
 	return cls_session;
 
diff --git a/drivers/scsi/scsi_transport_iscsi.c b/drivers/scsi/scsi_transport_iscsi.c
index 6b8516a0970b..ac9d298f54e7 100644
--- a/drivers/scsi/scsi_transport_iscsi.c
+++ b/drivers/scsi/scsi_transport_iscsi.c
@@ -119,9 +119,8 @@ static int iscsi_setup_host(struct transport_container *tc, struct device *dev,
 	struct iscsi_cls_host *ihost = shost->shost_data;
 
 	memset(ihost, 0, sizeof(*ihost));
-	INIT_LIST_HEAD(&ihost->sessions);
-	mutex_init(&ihost->mutex);
 	atomic_set(&ihost->nr_scans, 0);
+	mutex_init(&ihost->mutex);
 
 	snprintf(ihost->scan_workq_name, KOBJ_NAME_LEN, "iscsi_scan_%d",
 		shost->host_no);
@@ -316,42 +315,76 @@ int iscsi_scan_finished(struct Scsi_Host *shost, unsigned long time)
 }
 EXPORT_SYMBOL_GPL(iscsi_scan_finished);
 
-static int iscsi_user_scan(struct Scsi_Host *shost, uint channel,
-			   uint id, uint lun)
+struct iscsi_scan_data {
+	unsigned int channel;
+	unsigned int id;
+	unsigned int lun;
+};
+
+static int iscsi_user_scan_session(struct device *dev, void *data)
 {
-	struct iscsi_cls_host *ihost = shost->shost_data;
+	struct iscsi_scan_data *scan_data = data;
 	struct iscsi_cls_session *session;
+	struct Scsi_Host *shost;
+	struct iscsi_cls_host *ihost;
+	unsigned long flags;
+	unsigned int id;
+
+	if (!iscsi_is_session_dev(dev))
+		return 0;
+
+	session = iscsi_dev_to_session(dev);
+	shost = iscsi_session_to_shost(session);
+	ihost = shost->shost_data;
 
 	mutex_lock(&ihost->mutex);
-	list_for_each_entry(session, &ihost->sessions, host_list) {
-		if ((channel == SCAN_WILD_CARD || channel == 0) &&
-		    (id == SCAN_WILD_CARD || id == session->target_id))
-			scsi_scan_target(&session->dev, 0,
-					 session->target_id, lun, 1);
+	spin_lock_irqsave(&session->lock, flags);
+	if (session->state != ISCSI_SESSION_LOGGED_IN) {
+		spin_unlock_irqrestore(&session->lock, flags);
+		mutex_unlock(&ihost->mutex);
+		return 0;
 	}
-	mutex_unlock(&ihost->mutex);
+	id = session->target_id;
+	spin_unlock_irqrestore(&session->lock, flags);
 
+	if (id != ISCSI_MAX_TARGET) {
+		if ((scan_data->channel == SCAN_WILD_CARD ||
+		     scan_data->channel == 0) &&
+		    (scan_data->id == SCAN_WILD_CARD ||
+		     scan_data->id == id))
+			scsi_scan_target(&session->dev, 0, id,
+					 scan_data->lun, 1);
+	}
+	mutex_unlock(&ihost->mutex);
 	return 0;
 }
 
+static int iscsi_user_scan(struct Scsi_Host *shost, uint channel,
+			   uint id, uint lun)
+{
+	struct iscsi_scan_data scan_data;
+
+	scan_data.channel = channel;
+	scan_data.id = id;
+	scan_data.lun = lun;
+
+	return device_for_each_child(&shost->shost_gendev, &scan_data,
+				     iscsi_user_scan_session);
+}
+
 static void iscsi_scan_session(struct work_struct *work)
 {
 	struct iscsi_cls_session *session =
 			container_of(work, struct iscsi_cls_session, scan_work);
 	struct Scsi_Host *shost = iscsi_session_to_shost(session);
 	struct iscsi_cls_host *ihost = shost->shost_data;
-	unsigned long flags;
+	struct iscsi_scan_data scan_data;
 
-	spin_lock_irqsave(&session->lock, flags);
-	if (session->state != ISCSI_SESSION_LOGGED_IN) {
-		spin_unlock_irqrestore(&session->lock, flags);
-		goto done;
-	}
-	spin_unlock_irqrestore(&session->lock, flags);
+	scan_data.channel = 0;
+	scan_data.id = SCAN_WILD_CARD;
+	scan_data.lun = SCAN_WILD_CARD;
 
-	scsi_scan_target(&session->dev, 0, session->target_id,
-			 SCAN_WILD_CARD, 1);
-done:
+	iscsi_user_scan_session(&session->dev, &scan_data);
 	atomic_dec(&ihost->nr_scans);
 }
 
@@ -460,14 +493,18 @@ static void __iscsi_unbind_session(struct work_struct *work)
 				     unbind_work);
 	struct Scsi_Host *shost = iscsi_session_to_shost(session);
 	struct iscsi_cls_host *ihost = shost->shost_data;
+	unsigned long flags;
 
 	/* Prevent new scans and make sure scanning is not in progress */
 	mutex_lock(&ihost->mutex);
-	if (list_empty(&session->host_list)) {
+	spin_lock_irqsave(&session->lock, flags);
+	if (session->target_id == ISCSI_MAX_TARGET) {
+		spin_unlock_irqrestore(&session->lock, flags);
 		mutex_unlock(&ihost->mutex);
 		return;
 	}
-	list_del_init(&session->host_list);
+	session->target_id = ISCSI_MAX_TARGET;
+	spin_unlock_irqrestore(&session->lock, flags);
 	mutex_unlock(&ihost->mutex);
 
 	scsi_remove_target(&session->dev);
@@ -497,7 +534,6 @@ iscsi_alloc_session(struct Scsi_Host *shost, struct iscsi_transport *transport,
 	session->recovery_tmo = 120;
 	session->state = ISCSI_SESSION_FREE;
 	INIT_DELAYED_WORK(&session->recovery_work, session_recovery_timedout);
-	INIT_LIST_HEAD(&session->host_list);
 	INIT_LIST_HEAD(&session->sess_list);
 	INIT_WORK(&session->unblock_work, __iscsi_unblock_session);
 	INIT_WORK(&session->block_work, __iscsi_block_session);
@@ -516,16 +552,51 @@ iscsi_alloc_session(struct Scsi_Host *shost, struct iscsi_transport *transport,
 }
 EXPORT_SYMBOL_GPL(iscsi_alloc_session);
 
+static int iscsi_get_next_target_id(struct device *dev, void *data)
+{
+	struct iscsi_cls_session *session;
+	unsigned long flags;
+	int err = 0;
+
+	if (!iscsi_is_session_dev(dev))
+		return 0;
+
+	session = iscsi_dev_to_session(dev);
+	spin_lock_irqsave(&session->lock, flags);
+	if (*((unsigned int *) data) == session->target_id)
+		err = -EEXIST;
+	spin_unlock_irqrestore(&session->lock, flags);
+	return err;
+}
+
 int iscsi_add_session(struct iscsi_cls_session *session, unsigned int target_id)
 {
 	struct Scsi_Host *shost = iscsi_session_to_shost(session);
 	struct iscsi_cls_host *ihost;
 	unsigned long flags;
+	unsigned int id = target_id;
 	int err;
 
 	ihost = shost->shost_data;
 	session->sid = atomic_add_return(1, &iscsi_session_nr);
-	session->target_id = target_id;
+
+	if (id == ISCSI_MAX_TARGET) {
+		for (id = 0; id < ISCSI_MAX_TARGET; id++) {
+			err = device_for_each_child(&shost->shost_gendev, &id,
+						    iscsi_get_next_target_id);
+			if (!err)
+				break;
+		}
+
+		if (id == ISCSI_MAX_TARGET) {
+			iscsi_cls_session_printk(KERN_ERR, session,
+						 "Too many iscsi targets. Max "
+						 "number of targets is %d.\n",
+						 ISCSI_MAX_TARGET - 1);
+			goto release_host;
+		}
+	}
+	session->target_id = id;
 
 	snprintf(session->dev.bus_id, BUS_ID_SIZE, "session%u",
 		 session->sid);
@@ -541,10 +612,6 @@ int iscsi_add_session(struct iscsi_cls_session *session, unsigned int target_id)
 	list_add(&session->sess_list, &sesslist);
 	spin_unlock_irqrestore(&sesslock, flags);
 
-	mutex_lock(&ihost->mutex);
-	list_add(&session->host_list, &ihost->sessions);
-	mutex_unlock(&ihost->mutex);
-
 	iscsi_session_event(session, ISCSI_KEVENT_CREATE_SESSION);
 	return 0;
 
diff --git a/include/scsi/libiscsi.h b/include/scsi/libiscsi.h
index 176353c117b6..13c92d7ba969 100644
--- a/include/scsi/libiscsi.h
+++ b/include/scsi/libiscsi.h
@@ -331,7 +331,7 @@ extern void iscsi_host_free(struct Scsi_Host *shost);
  */
 extern struct iscsi_cls_session *
 iscsi_session_setup(struct iscsi_transport *, struct Scsi_Host *shost,
-		    uint16_t, int, uint32_t);
+		    uint16_t, int, uint32_t, unsigned int);
 extern void iscsi_session_teardown(struct iscsi_cls_session *);
 extern void iscsi_session_recovery_timedout(struct iscsi_cls_session *);
 extern int iscsi_set_param(struct iscsi_cls_conn *cls_conn,
diff --git a/include/scsi/scsi_transport_iscsi.h b/include/scsi/scsi_transport_iscsi.h
index 0553240796e9..d6b823195563 100644
--- a/include/scsi/scsi_transport_iscsi.h
+++ b/include/scsi/scsi_transport_iscsi.h
@@ -161,9 +161,10 @@ enum {
 	ISCSI_SESSION_FREE,
 };
 
+#define ISCSI_MAX_TARGET -1
+
 struct iscsi_cls_session {
 	struct list_head sess_list;		/* item in session_list */
-	struct list_head host_list;
 	struct iscsi_transport *transport;
 	spinlock_t lock;
 	struct work_struct block_work;
@@ -175,7 +176,7 @@ struct iscsi_cls_session {
 	int recovery_tmo;
 	struct delayed_work recovery_work;
 
-	int target_id;
+	unsigned int target_id;
 
 	int state;
 	int sid;				/* session id */
@@ -193,7 +194,6 @@ struct iscsi_cls_session {
 	iscsi_dev_to_session(_stgt->dev.parent)
 
 struct iscsi_cls_host {
-	struct list_head sessions;
 	atomic_t nr_scans;
 	struct mutex mutex;
 	struct workqueue_struct *scan_workq;
-- 
cgit v1.2.3


From d82ff9be733a2e6da4f6c2ab4e9216f3f536503d Mon Sep 17 00:00:00 2001
From: Mike Christie <michaelc@cs.wisc.edu>
Date: Wed, 21 May 2008 15:54:13 -0500
Subject: [SCSI] iscsi class: add endpoint class

Add sysfs representation for the endpoint, so userspace can match the
host and session to the endpoint. This will allow us to set the host's
parent correctly at host creation time.

The next patches will convert tcp and iser, and fix iser's dma_mask
bug.

Signed-off-by: Mike Christie <michaelc@cs.wisc.edu>
Signed-off-by: James Bottomley <James.Bottomley@HansenPartnership.com>
---
 drivers/scsi/scsi_transport_iscsi.c | 190 +++++++++++++++++++++++++++++-------
 include/scsi/iscsi_if.h             |   2 +-
 include/scsi/scsi_transport_iscsi.h |  19 +++-
 3 files changed, 172 insertions(+), 39 deletions(-)

(limited to 'drivers/scsi')

diff --git a/drivers/scsi/scsi_transport_iscsi.c b/drivers/scsi/scsi_transport_iscsi.c
index ac9d298f54e7..c3c07ccccca7 100644
--- a/drivers/scsi/scsi_transport_iscsi.c
+++ b/drivers/scsi/scsi_transport_iscsi.c
@@ -33,6 +33,7 @@
 #define ISCSI_SESSION_ATTRS 19
 #define ISCSI_CONN_ATTRS 13
 #define ISCSI_HOST_ATTRS 4
+
 #define ISCSI_TRANSPORT_VERSION "2.0-869"
 
 struct iscsi_internal {
@@ -112,6 +113,123 @@ static struct attribute_group iscsi_transport_group = {
 	.attrs = iscsi_transport_attrs,
 };
 
+/*
+ * iSCSI endpoint attrs
+ */
+#define iscsi_dev_to_endpoint(_dev) \
+	container_of(_dev, struct iscsi_endpoint, dev)
+
+#define ISCSI_ATTR(_prefix,_name,_mode,_show,_store)	\
+struct device_attribute dev_attr_##_prefix##_##_name =	\
+        __ATTR(_name,_mode,_show,_store)
+
+static void iscsi_endpoint_release(struct device *dev)
+{
+	struct iscsi_endpoint *ep = iscsi_dev_to_endpoint(dev);
+	kfree(ep);
+}
+
+static struct class iscsi_endpoint_class = {
+	.name = "iscsi_endpoint",
+	.dev_release = iscsi_endpoint_release,
+};
+
+static ssize_t
+show_ep_handle(struct device *dev, struct device_attribute *attr, char *buf)
+{
+	struct iscsi_endpoint *ep = iscsi_dev_to_endpoint(dev);
+	return sprintf(buf, "%u\n", ep->id);
+}
+static ISCSI_ATTR(ep, handle, S_IRUGO, show_ep_handle, NULL);
+
+static struct attribute *iscsi_endpoint_attrs[] = {
+	&dev_attr_ep_handle.attr,
+	NULL,
+};
+
+static struct attribute_group iscsi_endpoint_group = {
+	.attrs = iscsi_endpoint_attrs,
+};
+
+#define ISCSI_MAX_EPID -1
+
+static int iscsi_match_epid(struct device *dev, void *data)
+{
+	struct iscsi_endpoint *ep = iscsi_dev_to_endpoint(dev);
+	unsigned int *epid = (unsigned int *) data;
+
+	return *epid == ep->id;
+}
+
+struct iscsi_endpoint *
+iscsi_create_endpoint(int dd_size)
+{
+	struct device *dev;
+	struct iscsi_endpoint *ep;
+	unsigned int id;
+	int err;
+
+	for (id = 1; id < ISCSI_MAX_EPID; id++) {
+		dev = class_find_device(&iscsi_endpoint_class, &id,
+					iscsi_match_epid);
+		if (!dev)
+			break;
+	}
+	if (id == ISCSI_MAX_EPID) {
+		printk(KERN_ERR "Too many connections. Max supported %u\n",
+		       ISCSI_MAX_EPID - 1);
+		return NULL;
+	}
+
+	ep = kzalloc(sizeof(*ep) + dd_size, GFP_KERNEL);
+	if (!ep)
+		return NULL;
+
+	ep->id = id;
+	ep->dev.class = &iscsi_endpoint_class;
+	snprintf(ep->dev.bus_id, BUS_ID_SIZE, "ep-%u", id);
+	err = device_register(&ep->dev);
+        if (err)
+                goto free_ep;
+
+	err = sysfs_create_group(&ep->dev.kobj, &iscsi_endpoint_group);
+	if (err)
+		goto unregister_dev;
+
+	if (dd_size)
+		ep->dd_data = &ep[1];
+	return ep;
+
+unregister_dev:
+	device_unregister(&ep->dev);
+	return NULL;
+
+free_ep:
+	kfree(ep);
+	return NULL;
+}
+EXPORT_SYMBOL_GPL(iscsi_create_endpoint);
+
+void iscsi_destroy_endpoint(struct iscsi_endpoint *ep)
+{
+	sysfs_remove_group(&ep->dev.kobj, &iscsi_endpoint_group);
+	device_unregister(&ep->dev);
+}
+EXPORT_SYMBOL_GPL(iscsi_destroy_endpoint);
+
+struct iscsi_endpoint *iscsi_lookup_endpoint(u64 handle)
+{
+	struct device *dev;
+
+	dev = class_find_device(&iscsi_endpoint_class, &handle,
+				iscsi_match_epid);
+	if (!dev)
+		return NULL;
+
+	return iscsi_dev_to_endpoint(dev);
+}
+EXPORT_SYMBOL_GPL(iscsi_lookup_endpoint);
+
 static int iscsi_setup_host(struct transport_container *tc, struct device *dev,
 			    struct device *cdev)
 {
@@ -1094,33 +1212,16 @@ int iscsi_session_event(struct iscsi_cls_session *session,
 EXPORT_SYMBOL_GPL(iscsi_session_event);
 
 static int
-iscsi_if_create_session(struct iscsi_internal *priv, struct iscsi_uevent *ev,
-			uint32_t host_no, uint32_t initial_cmdsn,
+iscsi_if_create_session(struct iscsi_internal *priv, struct iscsi_endpoint *ep,
+			struct iscsi_uevent *ev, uint32_t initial_cmdsn,
 			uint16_t cmds_max, uint16_t queue_depth)
 {
 	struct iscsi_transport *transport = priv->iscsi_transport;
 	struct iscsi_cls_session *session;
-	struct Scsi_Host *shost = NULL;
+	uint32_t host_no;
 
-	/*
-	 * Software iscsi allocates a host per session, but
-	 * offload drivers (and possibly iser one day) allocate a host per
-	 * hba/nic/rnic. Offload will match a host here, but software will
-	 * return a new hostno after the create_session callback has returned.
-	 */
-	if (host_no != UINT_MAX) {
-		shost = scsi_host_lookup(host_no);
-		if (IS_ERR(shost)) {
-			printk(KERN_ERR "Could not find host no %u to "
-			       "create session\n", host_no);
-			return -ENODEV;
-		}
-	}
-
-	session = transport->create_session(shost, cmds_max, queue_depth,
+	session = transport->create_session(ep, cmds_max, queue_depth,
 					    initial_cmdsn, &host_no);
-	if (shost)
-		scsi_host_put(shost);
 	if (!session)
 		return -ENOMEM;
 
@@ -1199,6 +1300,7 @@ static int
 iscsi_if_transport_ep(struct iscsi_transport *transport,
 		      struct iscsi_uevent *ev, int msg_type)
 {
+	struct iscsi_endpoint *ep;
 	struct sockaddr *dst_addr;
 	int rc = 0;
 
@@ -1208,22 +1310,33 @@ iscsi_if_transport_ep(struct iscsi_transport *transport,
 			return -EINVAL;
 
 		dst_addr = (struct sockaddr *)((char*)ev + sizeof(*ev));
-		rc = transport->ep_connect(dst_addr,
-					   ev->u.ep_connect.non_blocking,
-					   &ev->r.ep_connect_ret.handle);
+		ep = transport->ep_connect(dst_addr,
+					   ev->u.ep_connect.non_blocking);
+		if (IS_ERR(ep))
+			return PTR_ERR(ep);
+
+		ev->r.ep_connect_ret.handle = ep->id;
 		break;
 	case ISCSI_UEVENT_TRANSPORT_EP_POLL:
 		if (!transport->ep_poll)
 			return -EINVAL;
 
-		ev->r.retcode = transport->ep_poll(ev->u.ep_poll.ep_handle,
+		ep = iscsi_lookup_endpoint(ev->u.ep_poll.ep_handle);
+		if (!ep)
+			return -EINVAL;
+
+		ev->r.retcode = transport->ep_poll(ep,
 						   ev->u.ep_poll.timeout_ms);
 		break;
 	case ISCSI_UEVENT_TRANSPORT_EP_DISCONNECT:
 		if (!transport->ep_disconnect)
 			return -EINVAL;
 
-		transport->ep_disconnect(ev->u.ep_disconnect.ep_handle);
+		ep = iscsi_lookup_endpoint(ev->u.ep_disconnect.ep_handle);
+		if (!ep)
+			return -EINVAL;
+
+		transport->ep_disconnect(ep);
 		break;
 	}
 	return rc;
@@ -1283,12 +1396,12 @@ static int
 iscsi_if_recv_msg(struct sk_buff *skb, struct nlmsghdr *nlh)
 {
 	int err = 0;
-	uint32_t host_no = UINT_MAX;
 	struct iscsi_uevent *ev = NLMSG_DATA(nlh);
 	struct iscsi_transport *transport = NULL;
 	struct iscsi_internal *priv;
 	struct iscsi_cls_session *session;
 	struct iscsi_cls_conn *conn;
+	struct iscsi_endpoint *ep = NULL;
 
 	priv = iscsi_if_transport_lookup(iscsi_ptr(ev->transport_handle));
 	if (!priv)
@@ -1302,14 +1415,17 @@ iscsi_if_recv_msg(struct sk_buff *skb, struct nlmsghdr *nlh)
 
 	switch (nlh->nlmsg_type) {
 	case ISCSI_UEVENT_CREATE_SESSION:
-		err = iscsi_if_create_session(priv, ev, host_no,
+		err = iscsi_if_create_session(priv, ep, ev,
 					      ev->u.c_session.initial_cmdsn,
 					      ev->u.c_session.cmds_max,
 					      ev->u.c_session.queue_depth);
 		break;
 	case ISCSI_UEVENT_CREATE_BOUND_SESSION:
-		err = iscsi_if_create_session(priv, ev,
-					ev->u.c_bound_session.host_no,
+		ep = iscsi_lookup_endpoint(ev->u.c_bound_session.ep_handle);
+		if (!ep)
+			return -EINVAL;
+
+		err = iscsi_if_create_session(priv, ep, ev,
 					ev->u.c_bound_session.initial_cmdsn,
 					ev->u.c_bound_session.cmds_max,
 					ev->u.c_bound_session.queue_depth);
@@ -1774,6 +1890,7 @@ iscsi_register_transport(struct iscsi_transport *tt)
 
 unregister_dev:
 	device_unregister(&priv->dev);
+	return NULL;
 free_priv:
 	kfree(priv);
 	return NULL;
@@ -1821,10 +1938,14 @@ static __init int iscsi_transport_init(void)
 	if (err)
 		return err;
 
-	err = transport_class_register(&iscsi_host_class);
+	err = class_register(&iscsi_endpoint_class);
 	if (err)
 		goto unregister_transport_class;
 
+	err = transport_class_register(&iscsi_host_class);
+	if (err)
+		goto unregister_endpoint_class;
+
 	err = transport_class_register(&iscsi_connection_class);
 	if (err)
 		goto unregister_host_class;
@@ -1833,8 +1954,8 @@ static __init int iscsi_transport_init(void)
 	if (err)
 		goto unregister_conn_class;
 
-	nls = netlink_kernel_create(&init_net, NETLINK_ISCSI, 1, iscsi_if_rx, NULL,
-			THIS_MODULE);
+	nls = netlink_kernel_create(&init_net, NETLINK_ISCSI, 1, iscsi_if_rx,
+				    NULL, THIS_MODULE);
 	if (!nls) {
 		err = -ENOBUFS;
 		goto unregister_session_class;
@@ -1854,6 +1975,8 @@ unregister_conn_class:
 	transport_class_unregister(&iscsi_connection_class);
 unregister_host_class:
 	transport_class_unregister(&iscsi_host_class);
+unregister_endpoint_class:
+	class_unregister(&iscsi_endpoint_class);
 unregister_transport_class:
 	class_unregister(&iscsi_transport_class);
 	return err;
@@ -1866,6 +1989,7 @@ static void __exit iscsi_transport_exit(void)
 	transport_class_unregister(&iscsi_connection_class);
 	transport_class_unregister(&iscsi_session_class);
 	transport_class_unregister(&iscsi_host_class);
+	class_unregister(&iscsi_endpoint_class);
 	class_unregister(&iscsi_transport_class);
 }
 
diff --git a/include/scsi/iscsi_if.h b/include/scsi/iscsi_if.h
index 801a677777cc..a0f13a280e71 100644
--- a/include/scsi/iscsi_if.h
+++ b/include/scsi/iscsi_if.h
@@ -80,7 +80,7 @@ struct iscsi_uevent {
 			uint16_t	queue_depth;
 		} c_session;
 		struct msg_create_bound_session {
-			uint32_t	host_no;
+			uint64_t	ep_handle;
 			uint32_t	initial_cmdsn;
 			uint16_t	cmds_max;
 			uint16_t	queue_depth;
diff --git a/include/scsi/scsi_transport_iscsi.h b/include/scsi/scsi_transport_iscsi.h
index d6b823195563..f5444e033cc9 100644
--- a/include/scsi/scsi_transport_iscsi.h
+++ b/include/scsi/scsi_transport_iscsi.h
@@ -30,6 +30,7 @@
 
 struct scsi_transport_template;
 struct iscsi_transport;
+struct iscsi_endpoint;
 struct Scsi_Host;
 struct iscsi_cls_conn;
 struct iscsi_conn;
@@ -85,7 +86,7 @@ struct iscsi_transport {
 	/* LLD sets this to indicate what values it can export to sysfs */
 	uint64_t param_mask;
 	uint64_t host_param_mask;
-	struct iscsi_cls_session *(*create_session) (struct Scsi_Host *shost,
+	struct iscsi_cls_session *(*create_session) (struct iscsi_endpoint *ep,
 					uint16_t cmds_max, uint16_t qdepth,
 					uint32_t sn, uint32_t *hn);
 	void (*destroy_session) (struct iscsi_cls_session *session);
@@ -117,10 +118,10 @@ struct iscsi_transport {
 	void (*cleanup_task) (struct iscsi_conn *conn,
 				  struct iscsi_task *task);
 	void (*session_recovery_timedout) (struct iscsi_cls_session *session);
-	int (*ep_connect) (struct sockaddr *dst_addr, int non_blocking,
-			   uint64_t *ep_handle);
-	int (*ep_poll) (uint64_t ep_handle, int timeout_ms);
-	void (*ep_disconnect) (uint64_t ep_handle);
+	struct iscsi_endpoint *(*ep_connect) (struct sockaddr *dst_addr,
+					      int non_blocking);
+	int (*ep_poll) (struct iscsi_endpoint *ep, int timeout_ms);
+	void (*ep_disconnect) (struct iscsi_endpoint *ep);
 	int (*tgt_dscvr) (struct Scsi_Host *shost, enum iscsi_tgt_dscvr type,
 			  uint32_t enable, struct sockaddr *dst_addr);
 };
@@ -203,6 +204,11 @@ struct iscsi_cls_host {
 extern void iscsi_host_for_each_session(struct Scsi_Host *shost,
 				void (*fn)(struct iscsi_cls_session *));
 
+struct iscsi_endpoint {
+	void *dd_data;			/* LLD private data */
+	struct device dev;
+	unsigned int id;
+};
 
 /*
  * session and connection functions that can be used by HW iSCSI LLDs
@@ -233,5 +239,8 @@ extern int iscsi_destroy_conn(struct iscsi_cls_conn *conn);
 extern void iscsi_unblock_session(struct iscsi_cls_session *session);
 extern void iscsi_block_session(struct iscsi_cls_session *session);
 extern int iscsi_scan_finished(struct Scsi_Host *shost, unsigned long time);
+extern struct iscsi_endpoint *iscsi_create_endpoint(int dd_size);
+extern void iscsi_destroy_endpoint(struct iscsi_endpoint *ep);
+extern struct iscsi_endpoint *iscsi_lookup_endpoint(u64 handle);
 
 #endif
-- 
cgit v1.2.3


From 06520edea0fc7007985fa4cd51560149feb3f442 Mon Sep 17 00:00:00 2001
From: Mike Christie <michaelc@cs.wisc.edu>
Date: Wed, 21 May 2008 15:54:15 -0500
Subject: [SCSI] iscsi_tcp: hook iscsi_tcp into iscsi_endpoint code

iscsi_tcp creates its ep in userspace using sockets because
it is virtual, so we just check if we are sent a ep and fail
if we are.

Signed-off-by: Mike Christie <michaelc@cs.wisc.edu>
Signed-off-by: James Bottomley <James.Bottomley@HansenPartnership.com>
---
 drivers/scsi/iscsi_tcp.c | 8 ++++----
 1 file changed, 4 insertions(+), 4 deletions(-)

(limited to 'drivers/scsi')

diff --git a/drivers/scsi/iscsi_tcp.c b/drivers/scsi/iscsi_tcp.c
index aa3c7f0c550d..92d031959002 100644
--- a/drivers/scsi/iscsi_tcp.c
+++ b/drivers/scsi/iscsi_tcp.c
@@ -1838,17 +1838,17 @@ iscsi_conn_get_stats(struct iscsi_cls_conn *cls_conn, struct iscsi_stats *stats)
 }
 
 static struct iscsi_cls_session *
-iscsi_tcp_session_create(struct Scsi_Host *shost, uint16_t cmds_max,
+iscsi_tcp_session_create(struct iscsi_endpoint *ep, uint16_t cmds_max,
 			 uint16_t qdepth, uint32_t initial_cmdsn,
 			 uint32_t *hostno)
 {
 	struct iscsi_cls_session *cls_session;
 	struct iscsi_session *session;
+	struct Scsi_Host *shost;
 	int cmd_i;
 
-	if (shost) {
-		printk(KERN_ERR "iscsi_tcp: invalid shost %d.\n",
-		       shost->host_no);
+	if (ep) {
+		printk(KERN_ERR "iscsi_tcp: invalid ep %p.\n", ep);
 		return NULL;
 	}
 
-- 
cgit v1.2.3


From 88dfd340b9dece8fcaa1a2d4c782338926c017f7 Mon Sep 17 00:00:00 2001
From: Mike Christie <michaelc@cs.wisc.edu>
Date: Wed, 21 May 2008 15:54:16 -0500
Subject: [SCSI] iscsi class: Add session initiatorname and ifacename sysfs
 attrs.

This adds two new attrs used for creating initiator ports and
binding sessions to hardware.

The session level initiatorname:

Since bnx2i does a scsi_host per host device, we need to add the
iface initiator port settings on the session, so we can create
multiple initiator ports (each with different inames) per device/scsi_host.

The current iname reflects that qla4xxx can have one iname per hba, and we are
allocating a host per session for software. The iname on the host will
remain so we can export and set the hba level qla4xxx setting.

The ifacename attr:

To bind a session to a some peice of hardware in userspace we maintain
some mappings, but during boot or iscsid restart (iscsid contains the user
space part of the driver) we need to be able to figure out which of those
host mappings abstractions maps to certain sessions. This patch adds
a ifacename attr, which userspace can set to id the host side of the
endpoint across pivot_roots and iscsid restarts.

Signed-off-by: Mike Christie <michaelc@cs.wisc.edu>
Signed-off-by: James Bottomley <James.Bottomley@HansenPartnership.com>
---
 drivers/infiniband/ulp/iser/iscsi_iser.c |  3 +-
 drivers/scsi/iscsi_tcp.c                 |  3 +-
 drivers/scsi/libiscsi.c                  | 20 ++++++++
 drivers/scsi/scsi_transport_iscsi.c      |  6 ++-
 include/scsi/iscsi_if.h                  | 79 +++++++++++++++++---------------
 include/scsi/libiscsi.h                  |  2 +
 6 files changed, 74 insertions(+), 39 deletions(-)

(limited to 'drivers/scsi')

diff --git a/drivers/infiniband/ulp/iser/iscsi_iser.c b/drivers/infiniband/ulp/iser/iscsi_iser.c
index 42e95b833092..08edbaf89223 100644
--- a/drivers/infiniband/ulp/iser/iscsi_iser.c
+++ b/drivers/infiniband/ulp/iser/iscsi_iser.c
@@ -629,7 +629,8 @@ static struct iscsi_transport iscsi_iser_transport = {
 				  ISCSI_USERNAME | ISCSI_PASSWORD |
 				  ISCSI_USERNAME_IN | ISCSI_PASSWORD_IN |
 				  ISCSI_FAST_ABORT | ISCSI_ABORT_TMO |
-				  ISCSI_PING_TMO | ISCSI_RECV_TMO,
+				  ISCSI_PING_TMO | ISCSI_RECV_TMO |
+				  ISCSI_IFACE_NAME | ISCSI_INITIATOR_NAME,
 	.host_param_mask	= ISCSI_HOST_HWADDRESS |
 				  ISCSI_HOST_NETDEV_NAME |
 				  ISCSI_HOST_INITIATOR_NAME,
diff --git a/drivers/scsi/iscsi_tcp.c b/drivers/scsi/iscsi_tcp.c
index 92d031959002..7552dd8a88f3 100644
--- a/drivers/scsi/iscsi_tcp.c
+++ b/drivers/scsi/iscsi_tcp.c
@@ -1957,7 +1957,8 @@ static struct iscsi_transport iscsi_tcp_transport = {
 				  ISCSI_USERNAME_IN | ISCSI_PASSWORD_IN |
 				  ISCSI_FAST_ABORT | ISCSI_ABORT_TMO |
 				  ISCSI_LU_RESET_TMO |
-				  ISCSI_PING_TMO | ISCSI_RECV_TMO,
+				  ISCSI_PING_TMO | ISCSI_RECV_TMO |
+				  ISCSI_IFACE_NAME | ISCSI_INITIATOR_NAME,
 	.host_param_mask	= ISCSI_HOST_HWADDRESS | ISCSI_HOST_IPADDRESS |
 				  ISCSI_HOST_INITIATOR_NAME |
 				  ISCSI_HOST_NETDEV_NAME,
diff --git a/drivers/scsi/libiscsi.c b/drivers/scsi/libiscsi.c
index e88b726ab2e0..c1af2aa8e4e0 100644
--- a/drivers/scsi/libiscsi.c
+++ b/drivers/scsi/libiscsi.c
@@ -1990,6 +1990,8 @@ void iscsi_session_teardown(struct iscsi_cls_session *cls_session)
 	kfree(session->username);
 	kfree(session->username_in);
 	kfree(session->targetname);
+	kfree(session->initiatorname);
+	kfree(session->ifacename);
 
 	iscsi_destroy_session(cls_session);
 	module_put(owner);
@@ -2453,6 +2455,14 @@ int iscsi_set_param(struct iscsi_cls_conn *cls_conn,
 		if (!conn->persistent_address)
 			return -ENOMEM;
 		break;
+	case ISCSI_PARAM_IFACE_NAME:
+		if (!session->ifacename)
+			session->ifacename = kstrdup(buf, GFP_KERNEL);
+		break;
+	case ISCSI_PARAM_INITIATOR_NAME:
+		if (!session->initiatorname)
+			session->initiatorname = kstrdup(buf, GFP_KERNEL);
+		break;
 	default:
 		return -ENOSYS;
 	}
@@ -2519,6 +2529,15 @@ int iscsi_session_get_param(struct iscsi_cls_session *cls_session,
 	case ISCSI_PARAM_PASSWORD_IN:
 		len = sprintf(buf, "%s\n", session->password_in);
 		break;
+	case ISCSI_PARAM_IFACE_NAME:
+		len = sprintf(buf, "%s\n", session->ifacename);
+		break;
+	case ISCSI_PARAM_INITIATOR_NAME:
+		if (!session->initiatorname)
+			len = sprintf(buf, "%s\n", "unknown");
+		else
+			len = sprintf(buf, "%s\n", session->initiatorname);
+		break;
 	default:
 		return -ENOSYS;
 	}
@@ -2606,6 +2625,7 @@ int iscsi_host_get_param(struct Scsi_Host *shost, enum iscsi_host_param param,
 		else
 			len = sprintf(buf, "%s\n",
 				      ihost->local_address);
+		break;
 	default:
 		return -ENOSYS;
 	}
diff --git a/drivers/scsi/scsi_transport_iscsi.c b/drivers/scsi/scsi_transport_iscsi.c
index c3c07ccccca7..9fd5c6d87ed1 100644
--- a/drivers/scsi/scsi_transport_iscsi.c
+++ b/drivers/scsi/scsi_transport_iscsi.c
@@ -30,7 +30,7 @@
 #include <scsi/scsi_transport_iscsi.h>
 #include <scsi/iscsi_if.h>
 
-#define ISCSI_SESSION_ATTRS 19
+#define ISCSI_SESSION_ATTRS 21
 #define ISCSI_CONN_ATTRS 13
 #define ISCSI_HOST_ATTRS 4
 
@@ -1634,6 +1634,8 @@ iscsi_session_attr(password_in, ISCSI_PARAM_PASSWORD_IN, 1);
 iscsi_session_attr(fast_abort, ISCSI_PARAM_FAST_ABORT, 0);
 iscsi_session_attr(abort_tmo, ISCSI_PARAM_ABORT_TMO, 0);
 iscsi_session_attr(lu_reset_tmo, ISCSI_PARAM_LU_RESET_TMO, 0);
+iscsi_session_attr(ifacename, ISCSI_PARAM_IFACE_NAME, 0);
+iscsi_session_attr(initiatorname, ISCSI_PARAM_INITIATOR_NAME, 0)
 
 static ssize_t
 show_priv_session_state(struct device *dev, struct device_attribute *attr,
@@ -1875,6 +1877,8 @@ iscsi_register_transport(struct iscsi_transport *tt)
 	SETUP_SESSION_RD_ATTR(fast_abort, ISCSI_FAST_ABORT);
 	SETUP_SESSION_RD_ATTR(abort_tmo, ISCSI_ABORT_TMO);
 	SETUP_SESSION_RD_ATTR(lu_reset_tmo,ISCSI_LU_RESET_TMO);
+	SETUP_SESSION_RD_ATTR(ifacename, ISCSI_IFACE_NAME);
+	SETUP_SESSION_RD_ATTR(initiatorname, ISCSI_INITIATOR_NAME);
 	SETUP_PRIV_SESSION_RD_ATTR(recovery_tmo);
 	SETUP_PRIV_SESSION_RD_ATTR(state);
 
diff --git a/include/scsi/iscsi_if.h b/include/scsi/iscsi_if.h
index a0f13a280e71..16be12f1cbe8 100644
--- a/include/scsi/iscsi_if.h
+++ b/include/scsi/iscsi_if.h
@@ -257,42 +257,49 @@ enum iscsi_param {
 
 	ISCSI_PARAM_PING_TMO,
 	ISCSI_PARAM_RECV_TMO,
+
+	ISCSI_PARAM_IFACE_NAME,
+	ISCSI_PARAM_ISID,
+	ISCSI_PARAM_INITIATOR_NAME,
 	/* must always be last */
 	ISCSI_PARAM_MAX,
 };
 
-#define ISCSI_MAX_RECV_DLENGTH		(1 << ISCSI_PARAM_MAX_RECV_DLENGTH)
-#define ISCSI_MAX_XMIT_DLENGTH		(1 << ISCSI_PARAM_MAX_XMIT_DLENGTH)
-#define ISCSI_HDRDGST_EN		(1 << ISCSI_PARAM_HDRDGST_EN)
-#define ISCSI_DATADGST_EN		(1 << ISCSI_PARAM_DATADGST_EN)
-#define ISCSI_INITIAL_R2T_EN		(1 << ISCSI_PARAM_INITIAL_R2T_EN)
-#define ISCSI_MAX_R2T			(1 << ISCSI_PARAM_MAX_R2T)
-#define ISCSI_IMM_DATA_EN		(1 << ISCSI_PARAM_IMM_DATA_EN)
-#define ISCSI_FIRST_BURST		(1 << ISCSI_PARAM_FIRST_BURST)
-#define ISCSI_MAX_BURST			(1 << ISCSI_PARAM_MAX_BURST)
-#define ISCSI_PDU_INORDER_EN		(1 << ISCSI_PARAM_PDU_INORDER_EN)
-#define ISCSI_DATASEQ_INORDER_EN	(1 << ISCSI_PARAM_DATASEQ_INORDER_EN)
-#define ISCSI_ERL			(1 << ISCSI_PARAM_ERL)
-#define ISCSI_IFMARKER_EN		(1 << ISCSI_PARAM_IFMARKER_EN)
-#define ISCSI_OFMARKER_EN		(1 << ISCSI_PARAM_OFMARKER_EN)
-#define ISCSI_EXP_STATSN		(1 << ISCSI_PARAM_EXP_STATSN)
-#define ISCSI_TARGET_NAME		(1 << ISCSI_PARAM_TARGET_NAME)
-#define ISCSI_TPGT			(1 << ISCSI_PARAM_TPGT)
-#define ISCSI_PERSISTENT_ADDRESS	(1 << ISCSI_PARAM_PERSISTENT_ADDRESS)
-#define ISCSI_PERSISTENT_PORT		(1 << ISCSI_PARAM_PERSISTENT_PORT)
-#define ISCSI_SESS_RECOVERY_TMO		(1 << ISCSI_PARAM_SESS_RECOVERY_TMO)
-#define ISCSI_CONN_PORT			(1 << ISCSI_PARAM_CONN_PORT)
-#define ISCSI_CONN_ADDRESS		(1 << ISCSI_PARAM_CONN_ADDRESS)
-#define ISCSI_USERNAME			(1 << ISCSI_PARAM_USERNAME)
-#define ISCSI_USERNAME_IN		(1 << ISCSI_PARAM_USERNAME_IN)
-#define ISCSI_PASSWORD			(1 << ISCSI_PARAM_PASSWORD)
-#define ISCSI_PASSWORD_IN		(1 << ISCSI_PARAM_PASSWORD_IN)
-#define ISCSI_FAST_ABORT		(1 << ISCSI_PARAM_FAST_ABORT)
-#define ISCSI_ABORT_TMO			(1 << ISCSI_PARAM_ABORT_TMO)
-#define ISCSI_LU_RESET_TMO		(1 << ISCSI_PARAM_LU_RESET_TMO)
-#define ISCSI_HOST_RESET_TMO		(1 << ISCSI_PARAM_HOST_RESET_TMO)
-#define ISCSI_PING_TMO			(1 << ISCSI_PARAM_PING_TMO)
-#define ISCSI_RECV_TMO			(1 << ISCSI_PARAM_RECV_TMO)
+#define ISCSI_MAX_RECV_DLENGTH		(1ULL << ISCSI_PARAM_MAX_RECV_DLENGTH)
+#define ISCSI_MAX_XMIT_DLENGTH		(1ULL << ISCSI_PARAM_MAX_XMIT_DLENGTH)
+#define ISCSI_HDRDGST_EN		(1ULL << ISCSI_PARAM_HDRDGST_EN)
+#define ISCSI_DATADGST_EN		(1ULL << ISCSI_PARAM_DATADGST_EN)
+#define ISCSI_INITIAL_R2T_EN		(1ULL << ISCSI_PARAM_INITIAL_R2T_EN)
+#define ISCSI_MAX_R2T			(1ULL << ISCSI_PARAM_MAX_R2T)
+#define ISCSI_IMM_DATA_EN		(1ULL << ISCSI_PARAM_IMM_DATA_EN)
+#define ISCSI_FIRST_BURST		(1ULL << ISCSI_PARAM_FIRST_BURST)
+#define ISCSI_MAX_BURST			(1ULL << ISCSI_PARAM_MAX_BURST)
+#define ISCSI_PDU_INORDER_EN		(1ULL << ISCSI_PARAM_PDU_INORDER_EN)
+#define ISCSI_DATASEQ_INORDER_EN	(1ULL << ISCSI_PARAM_DATASEQ_INORDER_EN)
+#define ISCSI_ERL			(1ULL << ISCSI_PARAM_ERL)
+#define ISCSI_IFMARKER_EN		(1ULL << ISCSI_PARAM_IFMARKER_EN)
+#define ISCSI_OFMARKER_EN		(1ULL << ISCSI_PARAM_OFMARKER_EN)
+#define ISCSI_EXP_STATSN		(1ULL << ISCSI_PARAM_EXP_STATSN)
+#define ISCSI_TARGET_NAME		(1ULL << ISCSI_PARAM_TARGET_NAME)
+#define ISCSI_TPGT			(1ULL << ISCSI_PARAM_TPGT)
+#define ISCSI_PERSISTENT_ADDRESS	(1ULL << ISCSI_PARAM_PERSISTENT_ADDRESS)
+#define ISCSI_PERSISTENT_PORT		(1ULL << ISCSI_PARAM_PERSISTENT_PORT)
+#define ISCSI_SESS_RECOVERY_TMO		(1ULL << ISCSI_PARAM_SESS_RECOVERY_TMO)
+#define ISCSI_CONN_PORT			(1ULL << ISCSI_PARAM_CONN_PORT)
+#define ISCSI_CONN_ADDRESS		(1ULL << ISCSI_PARAM_CONN_ADDRESS)
+#define ISCSI_USERNAME			(1ULL << ISCSI_PARAM_USERNAME)
+#define ISCSI_USERNAME_IN		(1ULL << ISCSI_PARAM_USERNAME_IN)
+#define ISCSI_PASSWORD			(1ULL << ISCSI_PARAM_PASSWORD)
+#define ISCSI_PASSWORD_IN		(1ULL << ISCSI_PARAM_PASSWORD_IN)
+#define ISCSI_FAST_ABORT		(1ULL << ISCSI_PARAM_FAST_ABORT)
+#define ISCSI_ABORT_TMO			(1ULL << ISCSI_PARAM_ABORT_TMO)
+#define ISCSI_LU_RESET_TMO		(1ULL << ISCSI_PARAM_LU_RESET_TMO)
+#define ISCSI_HOST_RESET_TMO		(1ULL << ISCSI_PARAM_HOST_RESET_TMO)
+#define ISCSI_PING_TMO			(1ULL << ISCSI_PARAM_PING_TMO)
+#define ISCSI_RECV_TMO			(1ULL << ISCSI_PARAM_RECV_TMO)
+#define ISCSI_IFACE_NAME		(1ULL << ISCSI_PARAM_IFACE_NAME)
+#define ISCSI_ISID			(1ULL << ISCSI_PARAM_ISID)
+#define ISCSI_INITIATOR_NAME		(1ULL << ISCSI_PARAM_INITIATOR_NAME)
 
 /* iSCSI HBA params */
 enum iscsi_host_param {
@@ -303,10 +310,10 @@ enum iscsi_host_param {
 	ISCSI_HOST_PARAM_MAX,
 };
 
-#define ISCSI_HOST_HWADDRESS		(1 << ISCSI_HOST_PARAM_HWADDRESS)
-#define ISCSI_HOST_INITIATOR_NAME	(1 << ISCSI_HOST_PARAM_INITIATOR_NAME)
-#define ISCSI_HOST_NETDEV_NAME		(1 << ISCSI_HOST_PARAM_NETDEV_NAME)
-#define ISCSI_HOST_IPADDRESS		(1 << ISCSI_HOST_PARAM_IPADDRESS)
+#define ISCSI_HOST_HWADDRESS		(1ULL << ISCSI_HOST_PARAM_HWADDRESS)
+#define ISCSI_HOST_INITIATOR_NAME	(1ULL << ISCSI_HOST_PARAM_INITIATOR_NAME)
+#define ISCSI_HOST_NETDEV_NAME		(1ULL << ISCSI_HOST_PARAM_NETDEV_NAME)
+#define ISCSI_HOST_IPADDRESS		(1ULL << ISCSI_HOST_PARAM_IPADDRESS)
 
 #define iscsi_ptr(_handle) ((void*)(unsigned long)_handle)
 #define iscsi_handle(_ptr) ((uint64_t)(unsigned long)_ptr)
diff --git a/include/scsi/libiscsi.h b/include/scsi/libiscsi.h
index 13c92d7ba969..21cfb1d5483f 100644
--- a/include/scsi/libiscsi.h
+++ b/include/scsi/libiscsi.h
@@ -270,6 +270,8 @@ struct iscsi_session {
 	char			*password;
 	char			*password_in;
 	char			*targetname;
+	char			*ifacename;
+	char			*initiatorname;
 	/* control data */
 	struct iscsi_transport	*tt;
 	struct Scsi_Host	*host;
-- 
cgit v1.2.3


From 3cf7b233ffc45d4fc381221f74d24f10e692c4ea Mon Sep 17 00:00:00 2001
From: Mike Christie <michaelc@cs.wisc.edu>
Date: Wed, 21 May 2008 15:54:17 -0500
Subject: [SCSI] libiscsi: fix cmds_max setting

Drivers expect that the cmds_max value they pass to the iscsi layer
is the max scsi commands  + mgmt tasks. This patch implements that
and fixes some checks for nr cmd limits.

Signed-off-by: Mike Christie <michaelc@cs.wisc.edu>
Signed-off-by: James Bottomley <James.Bottomley@HansenPartnership.com>
---
 drivers/scsi/libiscsi.c | 49 ++++++++++++++++++++++++++++++++++---------------
 include/scsi/libiscsi.h |  9 +++++----
 2 files changed, 39 insertions(+), 19 deletions(-)

(limited to 'drivers/scsi')

diff --git a/drivers/scsi/libiscsi.c b/drivers/scsi/libiscsi.c
index c1af2aa8e4e0..c723e60f02b0 100644
--- a/drivers/scsi/libiscsi.c
+++ b/drivers/scsi/libiscsi.c
@@ -1893,29 +1893,48 @@ EXPORT_SYMBOL_GPL(iscsi_host_free);
  *
  * This can be used by software iscsi_transports that allocate
  * a session per scsi host.
+ *
+ * Callers should set cmds_max to the largest total numer (mgmt + scsi) of
+ * tasks they support. The iscsi layer reserves ISCSI_MGMT_CMDS_MAX tasks
+ * for nop handling and login/logout requests.
  */
 struct iscsi_cls_session *
 iscsi_session_setup(struct iscsi_transport *iscsit, struct Scsi_Host *shost,
-		    uint16_t scsi_cmds_max, int cmd_task_size,
+		    uint16_t cmds_max, int cmd_task_size,
 		    uint32_t initial_cmdsn, unsigned int id)
 {
 	struct iscsi_session *session;
 	struct iscsi_cls_session *cls_session;
-	int cmd_i, cmds_max;
-
+	int cmd_i, scsi_cmds, total_cmds = cmds_max;
 	/*
-	 * The iscsi layer needs some tasks for nop handling and tmfs.
+	 * The iscsi layer needs some tasks for nop handling and tmfs,
+	 * so the cmds_max must at least be greater than ISCSI_MGMT_CMDS_MAX
+	 * + 1 command for scsi IO.
 	 */
-	if (scsi_cmds_max < 1)
-		scsi_cmds_max = ISCSI_MGMT_CMDS_MAX;
-	if ((scsi_cmds_max + ISCSI_MGMT_CMDS_MAX) >= ISCSI_MGMT_ITT_OFFSET) {
-		printk(KERN_ERR "iscsi: invalid can_queue of %d. "
-		       "can_queue must be less than %d.\n",
-		       scsi_cmds_max,
-		       ISCSI_MGMT_ITT_OFFSET - ISCSI_MGMT_CMDS_MAX);
-		scsi_cmds_max = ISCSI_DEF_XMIT_CMDS_MAX;
+	if (total_cmds < ISCSI_TOTAL_CMDS_MIN) {
+		printk(KERN_ERR "iscsi: invalid can_queue of %d. can_queue "
+		       "must be a power of two that is at least %d.\n",
+		       total_cmds, ISCSI_TOTAL_CMDS_MIN);
+		return NULL;
+	}
+
+	if (total_cmds > ISCSI_TOTAL_CMDS_MAX) {
+		printk(KERN_ERR "iscsi: invalid can_queue of %d. can_queue "
+		       "must be a power of 2 less than or equal to %d.\n",
+		       cmds_max, ISCSI_TOTAL_CMDS_MAX);
+		total_cmds = ISCSI_TOTAL_CMDS_MAX;
+	}
+
+	if (!is_power_of_2(total_cmds)) {
+		printk(KERN_ERR "iscsi: invalid can_queue of %d. can_queue "
+		       "must be a power of 2.\n", total_cmds);
+		total_cmds = rounddown_pow_of_two(total_cmds);
+		if (total_cmds < ISCSI_TOTAL_CMDS_MIN)
+			return NULL;
+		printk(KERN_INFO "iscsi: Rounding can_queue to %d.\n",
+		       total_cmds);
 	}
-	cmds_max = roundup_pow_of_two(scsi_cmds_max + ISCSI_MGMT_CMDS_MAX);
+	scsi_cmds = total_cmds - ISCSI_MGMT_CMDS_MAX;
 
 	cls_session = iscsi_alloc_session(shost, iscsit,
 					  sizeof(struct iscsi_session));
@@ -1928,8 +1947,8 @@ iscsi_session_setup(struct iscsi_transport *iscsit, struct Scsi_Host *shost,
 	session->fast_abort = 1;
 	session->lu_reset_timeout = 15;
 	session->abort_timeout = 10;
-	session->scsi_cmds_max = scsi_cmds_max;
-	session->cmds_max = cmds_max;
+	session->scsi_cmds_max = scsi_cmds;
+	session->cmds_max = total_cmds;
 	session->queued_cmdsn = session->cmdsn = initial_cmdsn;
 	session->exp_cmdsn = initial_cmdsn + 1;
 	session->max_cmdsn = initial_cmdsn + 1;
diff --git a/include/scsi/libiscsi.h b/include/scsi/libiscsi.h
index 21cfb1d5483f..5bf0187e7520 100644
--- a/include/scsi/libiscsi.h
+++ b/include/scsi/libiscsi.h
@@ -52,9 +52,7 @@ struct device;
 #endif
 
 #define ISCSI_DEF_XMIT_CMDS_MAX	128	/* must be power of 2 */
-#define ISCSI_MGMT_CMDS_MAX	16	/* must be power of 2 */
-
-#define ISCSI_MGMT_ITT_OFFSET	0xa00
+#define ISCSI_MGMT_CMDS_MAX	15
 
 #define ISCSI_DEF_CMD_PER_LUN		32
 #define ISCSI_MAX_CMD_PER_LUN		128
@@ -72,7 +70,10 @@ enum {
 /* Connection suspend "bit" */
 #define ISCSI_SUSPEND_BIT		1
 
-#define ISCSI_ITT_MASK			(0xfff)
+#define ISCSI_ITT_MASK			(0x1fff)
+#define ISCSI_TOTAL_CMDS_MAX		4096
+/* this must be a power of two greater than ISCSI_MGMT_CMDS_MAX */
+#define ISCSI_TOTAL_CMDS_MIN		16
 #define ISCSI_AGE_SHIFT			28
 #define ISCSI_AGE_MASK			(0xf << ISCSI_AGE_SHIFT)
 
-- 
cgit v1.2.3


From 913e5bf435617aa529919a4f7567f849f9f35f9f Mon Sep 17 00:00:00 2001
From: Mike Christie <michaelc@cs.wisc.edu>
Date: Wed, 21 May 2008 15:54:18 -0500
Subject: [SCSI] libiscsi, iser, tcp: remove recv_lock

The recv lock was defined so the iscsi layer could block
the recv path from processing IO during recovery. It
turns out iser just set a lock to that pointer which was pointless.

We now disconnect the transport connection before doing recovery
so we do not need the recv lock. For iscsi_tcp we still stop
the recv path incase older tools are being used.

This patch also has iscsi_itt_to_ctask user grab the session lock
and has the caller access the task with the lock or get a ref
to it in case the target is broken and sends a tmf success response
then sends data or a response for the command that was supposed to
be affected bty the tmf.

Signed-off-by: Mike Christie <michaelc@cs.wisc.edu>
Signed-off-by: James Bottomley <James.Bottomley@HansenPartnership.com>
---
 drivers/infiniband/ulp/iser/iscsi_iser.c     |  20 ++--
 drivers/infiniband/ulp/iser/iscsi_iser.h     |   2 -
 drivers/infiniband/ulp/iser/iser_initiator.c |   6 ++
 drivers/scsi/iscsi_tcp.c                     |  73 +++++++------
 drivers/scsi/libiscsi.c                      | 152 +++++++++++++++------------
 include/scsi/libiscsi.h                      |   8 +-
 6 files changed, 144 insertions(+), 117 deletions(-)

(limited to 'drivers/scsi')

diff --git a/drivers/infiniband/ulp/iser/iscsi_iser.c b/drivers/infiniband/ulp/iser/iscsi_iser.c
index 08edbaf89223..c02eabd383a1 100644
--- a/drivers/infiniband/ulp/iser/iscsi_iser.c
+++ b/drivers/infiniband/ulp/iser/iscsi_iser.c
@@ -281,9 +281,6 @@ iscsi_iser_conn_create(struct iscsi_cls_session *cls_session, uint32_t conn_idx)
 	conn->max_recv_dlength = 128;
 
 	iser_conn = conn->dd_data;
-	/* currently this is the only field which need to be initiated */
-	rwlock_init(&iser_conn->lock);
-
 	conn->dd_data = iser_conn;
 	iser_conn->iscsi_conn = conn;
 
@@ -342,9 +339,6 @@ iscsi_iser_conn_bind(struct iscsi_cls_session *cls_session,
 	ib_conn->iser_conn = iser_conn;
 	iser_conn->ib_conn  = ib_conn;
 	iser_conn_get(ib_conn);
-
-	conn->recv_lock = &iser_conn->lock;
-
 	return 0;
 }
 
@@ -355,12 +349,18 @@ iscsi_iser_conn_stop(struct iscsi_cls_conn *cls_conn, int flag)
 	struct iscsi_iser_conn *iser_conn = conn->dd_data;
 	struct iser_conn *ib_conn = iser_conn->ib_conn;
 
-	iscsi_conn_stop(cls_conn, flag);
 	/*
-	 * There is no unbind event so the stop callback
-	 * must release the ref from the bind.
+	 * Userspace may have goofed up and not bound the connection or
+	 * might have only partially setup the connection.
 	 */
-	iser_conn_put(ib_conn);
+	if (ib_conn) {
+		iscsi_conn_stop(cls_conn, flag);
+		/*
+		 * There is no unbind event so the stop callback
+		 * must release the ref from the bind.
+		 */
+		iser_conn_put(ib_conn);
+	}
 	iser_conn->ib_conn = NULL;
 }
 
diff --git a/drivers/infiniband/ulp/iser/iscsi_iser.h b/drivers/infiniband/ulp/iser/iscsi_iser.h
index cdf48763b082..a547edeea969 100644
--- a/drivers/infiniband/ulp/iser/iscsi_iser.h
+++ b/drivers/infiniband/ulp/iser/iscsi_iser.h
@@ -263,8 +263,6 @@ struct iser_conn {
 struct iscsi_iser_conn {
 	struct iscsi_conn            *iscsi_conn;/* ptr to iscsi conn */
 	struct iser_conn             *ib_conn;   /* iSER IB conn      */
-
-	rwlock_t		     lock;
 };
 
 struct iscsi_iser_task {
diff --git a/drivers/infiniband/ulp/iser/iser_initiator.c b/drivers/infiniband/ulp/iser/iser_initiator.c
index 35af60a23c61..c36083922134 100644
--- a/drivers/infiniband/ulp/iser/iser_initiator.c
+++ b/drivers/infiniband/ulp/iser/iser_initiator.c
@@ -558,7 +558,12 @@ void iser_rcv_completion(struct iser_desc *rx_desc,
 	opcode = hdr->opcode & ISCSI_OPCODE_MASK;
 
 	if (opcode == ISCSI_OP_SCSI_CMD_RSP) {
+		spin_lock(&conn->iscsi_conn->session->lock);
 		task = iscsi_itt_to_ctask(conn->iscsi_conn, hdr->itt);
+		if (task)
+			__iscsi_get_task(task);
+		spin_unlock(&conn->iscsi_conn->session->lock);
+
 		if (!task)
 			iser_err("itt can't be matched to task!!! "
 				 "conn %p opcode %d itt %d\n",
@@ -568,6 +573,7 @@ void iser_rcv_completion(struct iser_desc *rx_desc,
 			iser_dbg("itt %d task %p\n",hdr->itt, task);
 			iser_task->status = ISER_TASK_STATUS_COMPLETED;
 			iser_task_rdma_finalize(iser_task);
+			iscsi_put_task(task);
 		}
 	}
 	iser_dto_buffs_release(dto);
diff --git a/drivers/scsi/iscsi_tcp.c b/drivers/scsi/iscsi_tcp.c
index 7552dd8a88f3..91cb1fd523f0 100644
--- a/drivers/scsi/iscsi_tcp.c
+++ b/drivers/scsi/iscsi_tcp.c
@@ -741,7 +741,6 @@ static int
 iscsi_tcp_hdr_dissect(struct iscsi_conn *conn, struct iscsi_hdr *hdr)
 {
 	int rc = 0, opcode, ahslen;
-	struct iscsi_session *session = conn->session;
 	struct iscsi_tcp_conn *tcp_conn = conn->dd_data;
 	struct iscsi_task *task;
 
@@ -770,17 +769,17 @@ iscsi_tcp_hdr_dissect(struct iscsi_conn *conn, struct iscsi_hdr *hdr)
 
 	switch(opcode) {
 	case ISCSI_OP_SCSI_DATA_IN:
+		spin_lock(&conn->session->lock);
 		task = iscsi_itt_to_ctask(conn, hdr->itt);
 		if (!task)
-			return ISCSI_ERR_BAD_ITT;
-		if (!task->sc)
-			return ISCSI_ERR_NO_SCSI_CMD;
+			rc = ISCSI_ERR_BAD_ITT;
+		else
+			rc = iscsi_data_rsp(conn, task);
+		if (rc) {
+			spin_unlock(&conn->session->lock);
+			break;
+		}
 
-		spin_lock(&conn->session->lock);
-		rc = iscsi_data_rsp(conn, task);
-		spin_unlock(&conn->session->lock);
-		if (rc)
-			return rc;
 		if (tcp_conn->in.datalen) {
 			struct iscsi_tcp_task *tcp_task = task->dd_data;
 			struct hash_desc *rx_hash = NULL;
@@ -801,15 +800,19 @@ iscsi_tcp_hdr_dissect(struct iscsi_conn *conn, struct iscsi_hdr *hdr)
 				  "datalen=%d)\n", tcp_conn,
 				  tcp_task->data_offset,
 				  tcp_conn->in.datalen);
-			return iscsi_segment_seek_sg(&tcp_conn->in.segment,
-						     sdb->table.sgl,
-						     sdb->table.nents,
-						     tcp_task->data_offset,
-						     tcp_conn->in.datalen,
-						     iscsi_tcp_process_data_in,
-						     rx_hash);
+			rc = iscsi_segment_seek_sg(&tcp_conn->in.segment,
+						   sdb->table.sgl,
+						   sdb->table.nents,
+						   tcp_task->data_offset,
+						   tcp_conn->in.datalen,
+						   iscsi_tcp_process_data_in,
+						   rx_hash);
+			spin_unlock(&conn->session->lock);
+			return rc;
 		}
-		/* fall through */
+		rc = __iscsi_complete_pdu(conn, hdr, NULL, 0);
+		spin_unlock(&conn->session->lock);
+		break;
 	case ISCSI_OP_SCSI_CMD_RSP:
 		if (tcp_conn->in.datalen) {
 			iscsi_tcp_data_recv_prep(tcp_conn);
@@ -818,20 +821,17 @@ iscsi_tcp_hdr_dissect(struct iscsi_conn *conn, struct iscsi_hdr *hdr)
 		rc = iscsi_complete_pdu(conn, hdr, NULL, 0);
 		break;
 	case ISCSI_OP_R2T:
+		spin_lock(&conn->session->lock);
 		task = iscsi_itt_to_ctask(conn, hdr->itt);
 		if (!task)
-			return ISCSI_ERR_BAD_ITT;
-		if (!task->sc)
-			return ISCSI_ERR_NO_SCSI_CMD;
-
-		if (ahslen)
+			rc = ISCSI_ERR_BAD_ITT;
+		else if (ahslen)
 			rc = ISCSI_ERR_AHSLEN;
-		else if (task->sc->sc_data_direction == DMA_TO_DEVICE) {
-			spin_lock(&session->lock);
+		else if (task->sc->sc_data_direction == DMA_TO_DEVICE)
 			rc = iscsi_r2t_rsp(conn, task);
-			spin_unlock(&session->lock);
-		} else
+		else
 			rc = ISCSI_ERR_PROTO;
+		spin_unlock(&conn->session->lock);
 		break;
 	case ISCSI_OP_LOGIN_RSP:
 	case ISCSI_OP_TEXT_RSP:
@@ -1553,7 +1553,6 @@ iscsi_tcp_release_conn(struct iscsi_conn *conn)
 
 	spin_lock_bh(&session->lock);
 	tcp_conn->sock = NULL;
-	conn->recv_lock = NULL;
 	spin_unlock_bh(&session->lock);
 	sockfd_put(sock);
 }
@@ -1578,6 +1577,19 @@ static void
 iscsi_tcp_conn_stop(struct iscsi_cls_conn *cls_conn, int flag)
 {
 	struct iscsi_conn *conn = cls_conn->dd_data;
+	struct iscsi_tcp_conn *tcp_conn = conn->dd_data;
+
+	/* userspace may have goofed up and not bound us */
+	if (!tcp_conn->sock)
+		return;
+	/*
+	 * Make sure our recv side is stopped.
+	 * Older tools called conn stop before ep_disconnect
+	 * so IO could still be coming in.
+	 */
+	write_lock_bh(&tcp_conn->sock->sk->sk_callback_lock);
+	set_bit(ISCSI_SUSPEND_BIT, &conn->suspend_rx);
+	write_unlock_bh(&tcp_conn->sock->sk->sk_callback_lock);
 
 	iscsi_conn_stop(cls_conn, flag);
 	iscsi_tcp_release_conn(conn);
@@ -1671,13 +1683,6 @@ iscsi_tcp_conn_bind(struct iscsi_cls_session *cls_session,
 	sk->sk_sndtimeo = 15 * HZ; /* FIXME: make it configurable */
 	sk->sk_allocation = GFP_ATOMIC;
 
-	/* FIXME: disable Nagle's algorithm */
-
-	/*
-	 * Intercept TCP callbacks for sendfile like receive
-	 * processing.
-	 */
-	conn->recv_lock = &sk->sk_callback_lock;
 	iscsi_conn_set_callbacks(conn);
 	tcp_conn->sendpage = tcp_conn->sock->ops->sendpage;
 	/*
diff --git a/drivers/scsi/libiscsi.c b/drivers/scsi/libiscsi.c
index c723e60f02b0..9c267b440444 100644
--- a/drivers/scsi/libiscsi.c
+++ b/drivers/scsi/libiscsi.c
@@ -362,10 +362,11 @@ static void iscsi_complete_command(struct iscsi_task *task)
 	}
 }
 
-static void __iscsi_get_task(struct iscsi_task *task)
+void __iscsi_get_task(struct iscsi_task *task)
 {
 	atomic_inc(&task->refcount);
 }
+EXPORT_SYMBOL_GPL(__iscsi_get_task);
 
 static void __iscsi_put_task(struct iscsi_task *task)
 {
@@ -403,9 +404,13 @@ static void fail_command(struct iscsi_conn *conn, struct iscsi_task *task,
 		conn->session->queued_cmdsn--;
 	else
 		conn->session->tt->cleanup_task(conn, task);
+	/*
+	 * Check if cleanup_task dropped the lock and the command completed,
+	 */
+	if (!task->sc)
+		return;
 
 	sc->result = err;
-
 	if (!scsi_bidi_cmnd(sc))
 		scsi_set_resid(sc, scsi_bufflen(sc));
 	else {
@@ -696,6 +701,31 @@ static int iscsi_handle_reject(struct iscsi_conn *conn, struct iscsi_hdr *hdr,
 	return 0;
 }
 
+/**
+ * iscsi_itt_to_task - look up task by itt
+ * @conn: iscsi connection
+ * @itt: itt
+ *
+ * This should be used for mgmt tasks like login and nops, or if
+ * the LDD's itt space does not include the session age.
+ *
+ * The session lock must be held.
+ */
+static struct iscsi_task *iscsi_itt_to_task(struct iscsi_conn *conn, itt_t itt)
+{
+	struct iscsi_session *session = conn->session;
+	uint32_t i;
+
+	if (itt == RESERVED_ITT)
+		return NULL;
+
+	i = get_itt(itt);
+	if (i >= session->cmds_max)
+		return NULL;
+
+	return session->cmds[i];
+}
+
 /**
  * __iscsi_complete_pdu - complete pdu
  * @conn: iscsi conn
@@ -707,8 +737,8 @@ static int iscsi_handle_reject(struct iscsi_conn *conn, struct iscsi_hdr *hdr,
  * queuecommand or send generic. session lock must be held and verify
  * itt must have been called.
  */
-static int __iscsi_complete_pdu(struct iscsi_conn *conn, struct iscsi_hdr *hdr,
-				char *data, int datalen)
+int __iscsi_complete_pdu(struct iscsi_conn *conn, struct iscsi_hdr *hdr,
+			 char *data, int datalen)
 {
 	struct iscsi_session *session = conn->session;
 	int opcode = hdr->opcode & ISCSI_OPCODE_MASK, rc = 0;
@@ -758,22 +788,36 @@ static int __iscsi_complete_pdu(struct iscsi_conn *conn, struct iscsi_hdr *hdr,
 		goto out;
 	}
 
-	task = session->cmds[itt];
 	switch(opcode) {
 	case ISCSI_OP_SCSI_CMD_RSP:
-		if (!task->sc) {
-			rc = ISCSI_ERR_NO_SCSI_CMD;
-			break;
-		}
-		BUG_ON((void*)task != task->sc->SCp.ptr);
+	case ISCSI_OP_SCSI_DATA_IN:
+		task = iscsi_itt_to_ctask(conn, hdr->itt);
+		if (!task)
+			return ISCSI_ERR_BAD_ITT;
+		break;
+	case ISCSI_OP_R2T:
+		/*
+		 * LLD handles R2Ts if they need to.
+		 */
+		return 0;
+	case ISCSI_OP_LOGOUT_RSP:
+	case ISCSI_OP_LOGIN_RSP:
+	case ISCSI_OP_TEXT_RSP:
+	case ISCSI_OP_SCSI_TMFUNC_RSP:
+	case ISCSI_OP_NOOP_IN:
+		task = iscsi_itt_to_task(conn, hdr->itt);
+		if (!task)
+			return ISCSI_ERR_BAD_ITT;
+		break;
+	default:
+		return ISCSI_ERR_BAD_OPCODE;
+	}
+
+	switch(opcode) {
+	case ISCSI_OP_SCSI_CMD_RSP:
 		iscsi_scsi_cmd_rsp(conn, hdr, task, data, datalen);
 		break;
 	case ISCSI_OP_SCSI_DATA_IN:
-		if (!task->sc) {
-			rc = ISCSI_ERR_NO_SCSI_CMD;
-			break;
-		}
-		BUG_ON((void*)task != task->sc->SCp.ptr);
 		if (hdr->flags & ISCSI_FLAG_DATA_STATUS) {
 			conn->scsirsp_pdus_cnt++;
 			iscsi_update_cmdsn(session,
@@ -781,9 +825,6 @@ static int __iscsi_complete_pdu(struct iscsi_conn *conn, struct iscsi_hdr *hdr,
 			__iscsi_put_task(task);
 		}
 		break;
-	case ISCSI_OP_R2T:
-		/* LLD handles this for now */
-		break;
 	case ISCSI_OP_LOGOUT_RSP:
 		iscsi_update_cmdsn(session, (struct iscsi_nopin*)hdr);
 		if (datalen) {
@@ -841,6 +882,7 @@ recv_pdu:
 	__iscsi_put_task(task);
 	return rc;
 }
+EXPORT_SYMBOL_GPL(__iscsi_complete_pdu);
 
 int iscsi_complete_pdu(struct iscsi_conn *conn, struct iscsi_hdr *hdr,
 		       char *data, int datalen)
@@ -857,7 +899,6 @@ EXPORT_SYMBOL_GPL(iscsi_complete_pdu);
 int iscsi_verify_itt(struct iscsi_conn *conn, itt_t itt)
 {
 	struct iscsi_session *session = conn->session;
-	struct iscsi_task *task;
 	uint32_t i;
 
 	if (itt == RESERVED_ITT)
@@ -867,8 +908,7 @@ int iscsi_verify_itt(struct iscsi_conn *conn, itt_t itt)
 	    (session->age << ISCSI_AGE_SHIFT)) {
 		iscsi_conn_printk(KERN_ERR, conn,
 				  "received itt %x expected session age (%x)\n",
-				  (__force u32)itt,
-				  session->age & ISCSI_AGE_MASK);
+				  (__force u32)itt, session->age);
 		return ISCSI_ERR_BAD_ITT;
 	}
 
@@ -879,42 +919,36 @@ int iscsi_verify_itt(struct iscsi_conn *conn, itt_t itt)
 				   "%u.\n", i, session->cmds_max);
 		return ISCSI_ERR_BAD_ITT;
 	}
-
-	task = session->cmds[i];
-	if (task->sc && task->sc->SCp.phase != session->age) {
-		iscsi_conn_printk(KERN_ERR, conn,
-				  "iscsi: task's session age %d, "
-				  "expected %d\n", task->sc->SCp.phase,
-				  session->age);
-		return ISCSI_ERR_SESSION_FAILED;
-	}
 	return 0;
 }
 EXPORT_SYMBOL_GPL(iscsi_verify_itt);
 
-struct iscsi_task *
-iscsi_itt_to_ctask(struct iscsi_conn *conn, itt_t itt)
+/**
+ * iscsi_itt_to_ctask - look up ctask by itt
+ * @conn: iscsi connection
+ * @itt: itt
+ *
+ * This should be used for cmd tasks.
+ *
+ * The session lock must be held.
+ */
+struct iscsi_task *iscsi_itt_to_ctask(struct iscsi_conn *conn, itt_t itt)
 {
-	struct iscsi_session *session = conn->session;
 	struct iscsi_task *task;
-	uint32_t i;
 
 	if (iscsi_verify_itt(conn, itt))
 		return NULL;
 
-	if (itt == RESERVED_ITT)
+	task = iscsi_itt_to_task(conn, itt);
+	if (!task || !task->sc)
 		return NULL;
 
-	i = get_itt(itt);
-	if (i >= session->cmds_max)
-		return NULL;
-
-	task = session->cmds[i];
-	if (!task->sc)
-		return NULL;
-
-	if (task->sc->SCp.phase != session->age)
+	if (task->sc->SCp.phase != conn->session->age) {
+		iscsi_session_printk(KERN_ERR, conn->session,
+				  "task's session age %d, expected %d\n",
+				  task->sc->SCp.phase, conn->session->age);
 		return NULL;
+	}
 
 	return task;
 }
@@ -1620,16 +1654,20 @@ int iscsi_eh_abort(struct scsi_cmnd *sc)
 	switch (conn->tmf_state) {
 	case TMF_SUCCESS:
 		spin_unlock_bh(&session->lock);
+		/*
+		 * stop tx side incase the target had sent a abort rsp but
+		 * the initiator was still writing out data.
+		 */
 		iscsi_suspend_tx(conn);
 		/*
-		 * clean up task if aborted. grab the recv lock as a writer
+		 * we do not stop the recv side because targets have been
+		 * good and have never sent us a successful tmf response
+		 * then sent more data for the cmd.
 		 */
-		write_lock_bh(conn->recv_lock);
 		spin_lock(&session->lock);
 		fail_command(conn, task, DID_ABORT << 16);
 		conn->tmf_state = TMF_INITIAL;
 		spin_unlock(&session->lock);
-		write_unlock_bh(conn->recv_lock);
 		iscsi_start_tx(conn);
 		goto success_unlocked;
 	case TMF_TIMEDOUT:
@@ -1729,13 +1767,11 @@ int iscsi_eh_device_reset(struct scsi_cmnd *sc)
 	spin_unlock_bh(&session->lock);
 
 	iscsi_suspend_tx(conn);
-	/* need to grab the recv lock then session lock */
-	write_lock_bh(conn->recv_lock);
+
 	spin_lock(&session->lock);
 	fail_all_commands(conn, sc->device->lun, DID_ERROR);
 	conn->tmf_state = TMF_INITIAL;
 	spin_unlock(&session->lock);
-	write_unlock_bh(conn->recv_lock);
 
 	iscsi_start_tx(conn);
 	goto done;
@@ -2256,17 +2292,6 @@ static void iscsi_start_session_recovery(struct iscsi_session *session,
 		return;
 	}
 
-	/*
-	 * The LLD either freed/unset the lock on us, or userspace called
-	 * stop but did not create a proper connection (connection was never
-	 * bound or it was unbound then stop was called).
-	 */
-	if (!conn->recv_lock) {
-		spin_unlock_bh(&session->lock);
-		mutex_unlock(&session->eh_mutex);
-		return;
-	}
-
 	/*
 	 * When this is called for the in_login state, we only want to clean
 	 * up the login task and connection. We do not need to block and set
@@ -2283,11 +2308,6 @@ static void iscsi_start_session_recovery(struct iscsi_session *session,
 	spin_unlock_bh(&session->lock);
 
 	iscsi_suspend_tx(conn);
-
-	write_lock_bh(conn->recv_lock);
-	set_bit(ISCSI_SUSPEND_BIT, &conn->suspend_rx);
-	write_unlock_bh(conn->recv_lock);
-
 	/*
 	 * for connection level recovery we should not calculate
 	 * header digest. conn->hdr_size used for optimization
diff --git a/include/scsi/libiscsi.h b/include/scsi/libiscsi.h
index 5bf0187e7520..5e75bb7f311c 100644
--- a/include/scsi/libiscsi.h
+++ b/include/scsi/libiscsi.h
@@ -138,11 +138,6 @@ struct iscsi_conn {
 	struct iscsi_cls_conn	*cls_conn;	/* ptr to class connection */
 	void			*dd_data;	/* iscsi_transport data */
 	struct iscsi_session	*session;	/* parent session */
-	/*
-	 * LLDs should set this lock. It protects the transport recv
-	 * code
-	 */
-	rwlock_t		*recv_lock;
 	/*
 	 * conn_stop() flag: stop to recover, stop to terminate
 	 */
@@ -374,10 +369,13 @@ extern int iscsi_conn_send_pdu(struct iscsi_cls_conn *, struct iscsi_hdr *,
 				char *, uint32_t);
 extern int iscsi_complete_pdu(struct iscsi_conn *, struct iscsi_hdr *,
 			      char *, int);
+extern int __iscsi_complete_pdu(struct iscsi_conn *, struct iscsi_hdr *,
+				char *, int);
 extern int iscsi_verify_itt(struct iscsi_conn *, itt_t);
 extern struct iscsi_task *iscsi_itt_to_ctask(struct iscsi_conn *, itt_t);
 extern void iscsi_requeue_task(struct iscsi_task *task);
 extern void iscsi_put_task(struct iscsi_task *task);
+extern void __iscsi_get_task(struct iscsi_task *task);
 
 /*
  * generic helpers
-- 
cgit v1.2.3


From 3a12b199fc820a52b8321c2b35172a1b3651120d Mon Sep 17 00:00:00 2001
From: Harvey Harrison <harvey.harrison@gmail.com>
Date: Wed, 21 May 2008 15:54:19 -0500
Subject: [SCSI] Replace __FUNCTION__ with __func__ in iscsi_tcp.

__FUNCTION__ is gcc-specific, use __func__

(Update diff by Mike Christie)

Signed-off-by: Harvey Harrison <harvey.harrison@gmail.com>
Signed-off-by: Mike Christie <michaelc@cs.wisc.edu>
Signed-off-by: James Bottomley <James.Bottomley@HansenPartnership.com>
---
 drivers/scsi/iscsi_tcp.c | 12 ++++++------
 1 file changed, 6 insertions(+), 6 deletions(-)

(limited to 'drivers/scsi')

diff --git a/drivers/scsi/iscsi_tcp.c b/drivers/scsi/iscsi_tcp.c
index 91cb1fd523f0..7c4ee39a1c43 100644
--- a/drivers/scsi/iscsi_tcp.c
+++ b/drivers/scsi/iscsi_tcp.c
@@ -544,7 +544,7 @@ iscsi_data_rsp(struct iscsi_conn *conn, struct iscsi_task *task)
 
 	if (tcp_task->exp_datasn != datasn) {
 		debug_tcp("%s: task->exp_datasn(%d) != rhdr->datasn(%d)\n",
-		          __FUNCTION__, tcp_task->exp_datasn, datasn);
+		          __func__, tcp_task->exp_datasn, datasn);
 		return ISCSI_ERR_DATASN;
 	}
 
@@ -553,7 +553,7 @@ iscsi_data_rsp(struct iscsi_conn *conn, struct iscsi_task *task)
 	tcp_task->data_offset = be32_to_cpu(rhdr->offset);
 	if (tcp_task->data_offset + tcp_conn->in.datalen > total_in_length) {
 		debug_tcp("%s: data_offset(%d) + data_len(%d) > total_length_in(%d)\n",
-		          __FUNCTION__, tcp_task->data_offset,
+		          __func__, tcp_task->data_offset,
 		          tcp_conn->in.datalen, total_in_length);
 		return ISCSI_ERR_DATA_OFFSET;
 	}
@@ -646,7 +646,7 @@ iscsi_r2t_rsp(struct iscsi_conn *conn, struct iscsi_task *task)
 
 	if (tcp_task->exp_datasn != r2tsn){
 		debug_tcp("%s: task->exp_datasn(%d) != rhdr->r2tsn(%d)\n",
-		          __FUNCTION__, tcp_task->exp_datasn, r2tsn);
+		          __func__, tcp_task->exp_datasn, r2tsn);
 		return ISCSI_ERR_R2TSN;
 	}
 
@@ -1193,7 +1193,7 @@ iscsi_tcp_send_hdr_prep(struct iscsi_conn *conn, void *hdr, size_t hdrlen)
 {
 	struct iscsi_tcp_conn *tcp_conn = conn->dd_data;
 
-	debug_tcp("%s(%p%s)\n", __FUNCTION__, tcp_conn,
+	debug_tcp("%s(%p%s)\n", __func__, tcp_conn,
 			conn->hdrdgst_en? ", digest enabled" : "");
 
 	/* Clear the data segment - needs to be filled in by the
@@ -1234,7 +1234,7 @@ iscsi_tcp_send_data_prep(struct iscsi_conn *conn, struct scatterlist *sg,
 	struct hash_desc *tx_hash = NULL;
 	unsigned int hdr_spec_len;
 
-	debug_tcp("%s(%p, offset=%d, datalen=%d%s)\n", __FUNCTION__,
+	debug_tcp("%s(%p, offset=%d, datalen=%d%s)\n", __func__,
 			tcp_conn, offset, len,
 			conn->datadgst_en? ", digest enabled" : "");
 
@@ -1259,7 +1259,7 @@ iscsi_tcp_send_linear_data_prepare(struct iscsi_conn *conn, void *data,
 	struct hash_desc *tx_hash = NULL;
 	unsigned int hdr_spec_len;
 
-	debug_tcp("%s(%p, datalen=%d%s)\n", __FUNCTION__, tcp_conn, len,
+	debug_tcp("%s(%p, datalen=%d%s)\n", __func__, tcp_conn, len,
 		  conn->datadgst_en? ", digest enabled" : "");
 
 	/* Make sure the datalen matches what the caller
-- 
cgit v1.2.3


From 8f333991ba8479fe8a9d72aea978db415e3c2f2a Mon Sep 17 00:00:00 2001
From: Harvey Harrison <harvey.harrison@gmail.com>
Date: Wed, 21 May 2008 15:54:20 -0500
Subject: [SCSI] scsi: use get_unaligned_* helpers

Signed-off-by: Harvey Harrison <harvey.harrison@gmail.com>
Signed-off-by: Mike Christie <michaelc@cs.wisc.edu>
Signed-off-by: James Bottomley <James.Bottomley@HansenPartnership.com>
---
 drivers/scsi/libiscsi.c | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)

(limited to 'drivers/scsi')

diff --git a/drivers/scsi/libiscsi.c b/drivers/scsi/libiscsi.c
index 9c267b440444..8b4e412a0974 100644
--- a/drivers/scsi/libiscsi.c
+++ b/drivers/scsi/libiscsi.c
@@ -591,7 +591,7 @@ invalid_datalen:
 			goto out;
 		}
 
-		senselen = be16_to_cpu(get_unaligned((__be16 *) data));
+		senselen = get_unaligned_be16(data);
 		if (datalen < senselen)
 			goto invalid_datalen;
 
-- 
cgit v1.2.3


From 30e9ba9f2001f45960507f1963551311a67d0209 Mon Sep 17 00:00:00 2001
From: Boaz Harrosh <bharrosh@panasas.com>
Date: Mon, 26 May 2008 11:31:19 +0300
Subject: [SCSI] iscsi_tcp: Enable any size command

Let through upto the largest command of 260 defined by the scsi standard.
iscsi core supports this already. Now that the scsi-ml supports it we can
start using large commands.

[jejb:rejections fixed up]
Signed-off-by: Boaz Harrosh <bharrosh@panasas.com>
Acked-by: Mike Christie <michaelc@cs.wisc.edu>
Signed-off-by: James Bottomley <James.Bottomley@HansenPartnership.com>
---
 drivers/scsi/iscsi_tcp.c   | 2 +-
 include/scsi/iscsi_proto.h | 3 ++-
 2 files changed, 3 insertions(+), 2 deletions(-)

(limited to 'drivers/scsi')

diff --git a/drivers/scsi/iscsi_tcp.c b/drivers/scsi/iscsi_tcp.c
index 7c4ee39a1c43..0bd8b3dc3c19 100644
--- a/drivers/scsi/iscsi_tcp.c
+++ b/drivers/scsi/iscsi_tcp.c
@@ -1864,7 +1864,7 @@ iscsi_tcp_session_create(struct iscsi_endpoint *ep, uint16_t cmds_max,
 	shost->max_lun = iscsi_max_lun;
 	shost->max_id = 0;
 	shost->max_channel = 0;
-	shost->max_cmd_len = 16;
+	shost->max_cmd_len = SCSI_MAX_VARLEN_CDB_SIZE;
 	shost->can_queue = cmds_max;
 
 	if (iscsi_host_add(shost, NULL))
diff --git a/include/scsi/iscsi_proto.h b/include/scsi/iscsi_proto.h
index e0593bfae622..f2a2c1169486 100644
--- a/include/scsi/iscsi_proto.h
+++ b/include/scsi/iscsi_proto.h
@@ -22,6 +22,7 @@
 #define ISCSI_PROTO_H
 
 #include <linux/types.h>
+#include <scsi/scsi.h>
 
 #define ISCSI_DRAFT20_VERSION	0x00
 
@@ -156,7 +157,7 @@ struct iscsi_ecdb_ahdr {
 	uint8_t ahstype;
 	uint8_t reserved;
 	/* 4-byte aligned extended CDB spillover */
-	uint8_t ecdb[260 - ISCSI_CDB_SIZE];
+	uint8_t ecdb[SCSI_MAX_VARLEN_CDB_SIZE - ISCSI_CDB_SIZE];
 };
 
 /* SCSI Response Header */
-- 
cgit v1.2.3


From 597136ab70a6dccba5c40ee6eed88e881412429b Mon Sep 17 00:00:00 2001
From: "Martin K. Petersen" <martin.petersen@oracle.com>
Date: Thu, 5 Jun 2008 00:12:59 -0400
Subject: [SCSI] scsi_debug: Runtime-configurable sector size

Make scsi_debug sector size configurable at load time instead of being
a #define.  Handy for testing 4KB sectors.

Signed-off-by: Martin K. Petersen <martin.petersen@oracle.com>
Acked-by: Douglas Gilbert <dougg@torque.net>
Signed-off-by: James Bottomley <James.Bottomley@HansenPartnership.com>
---
 drivers/scsi/scsi_debug.c | 98 ++++++++++++++++++++++++++++-------------------
 1 file changed, 59 insertions(+), 39 deletions(-)

(limited to 'drivers/scsi')

diff --git a/drivers/scsi/scsi_debug.c b/drivers/scsi/scsi_debug.c
index f6600bfb5bde..3901125455c9 100644
--- a/drivers/scsi/scsi_debug.c
+++ b/drivers/scsi/scsi_debug.c
@@ -94,6 +94,7 @@ static const char * scsi_debug_version_date = "20070104";
 #define DEF_VIRTUAL_GB   0
 #define DEF_FAKE_RW	0
 #define DEF_VPD_USE_HOSTNO 1
+#define DEF_SECTOR_SIZE 512
 
 /* bit mask values for scsi_debug_opts */
 #define SCSI_DEBUG_OPT_NOISE   1
@@ -142,6 +143,7 @@ static int scsi_debug_no_lun_0 = DEF_NO_LUN_0;
 static int scsi_debug_virtual_gb = DEF_VIRTUAL_GB;
 static int scsi_debug_fake_rw = DEF_FAKE_RW;
 static int scsi_debug_vpd_use_hostno = DEF_VPD_USE_HOSTNO;
+static int scsi_debug_sector_size = DEF_SECTOR_SIZE;
 
 static int scsi_debug_cmnd_count = 0;
 
@@ -157,11 +159,6 @@ static int sdebug_heads;		/* heads per disk */
 static int sdebug_cylinders_per;	/* cylinders per surface */
 static int sdebug_sectors_per;		/* sectors per cylinder */
 
-/* default sector size is 512 bytes, 2**9 bytes */
-#define POW2_SECT_SIZE 9
-#define SECT_SIZE (1 << POW2_SECT_SIZE)
-#define SECT_SIZE_PER(TGT) SECT_SIZE
-
 #define SDEBUG_MAX_PARTS 4
 
 #define SDEBUG_SENSE_LEN 32
@@ -878,8 +875,8 @@ static int resp_readcap(struct scsi_cmnd * scp,
 		arr[2] = 0xff;
 		arr[3] = 0xff;
 	}
-	arr[6] = (SECT_SIZE_PER(target) >> 8) & 0xff;
-	arr[7] = SECT_SIZE_PER(target) & 0xff;
+	arr[6] = (scsi_debug_sector_size >> 8) & 0xff;
+	arr[7] = scsi_debug_sector_size & 0xff;
 	return fill_from_dev_buffer(scp, arr, SDEBUG_READCAP_ARR_SZ);
 }
 
@@ -902,10 +899,10 @@ static int resp_readcap16(struct scsi_cmnd * scp,
 	capac = sdebug_capacity - 1;
 	for (k = 0; k < 8; ++k, capac >>= 8)
 		arr[7 - k] = capac & 0xff;
-	arr[8] = (SECT_SIZE_PER(target) >> 24) & 0xff;
-	arr[9] = (SECT_SIZE_PER(target) >> 16) & 0xff;
-	arr[10] = (SECT_SIZE_PER(target) >> 8) & 0xff;
-	arr[11] = SECT_SIZE_PER(target) & 0xff;
+	arr[8] = (scsi_debug_sector_size >> 24) & 0xff;
+	arr[9] = (scsi_debug_sector_size >> 16) & 0xff;
+	arr[10] = (scsi_debug_sector_size >> 8) & 0xff;
+	arr[11] = scsi_debug_sector_size & 0xff;
 	return fill_from_dev_buffer(scp, arr,
 				    min(alloc_len, SDEBUG_READCAP16_ARR_SZ));
 }
@@ -1019,20 +1016,20 @@ static int resp_disconnect_pg(unsigned char * p, int pcontrol, int target)
 
 static int resp_format_pg(unsigned char * p, int pcontrol, int target)
 {       /* Format device page for mode_sense */
-        unsigned char format_pg[] = {0x3, 0x16, 0, 0, 0, 0, 0, 0,
-                                     0, 0, 0, 0, 0, 0, 0, 0,
-                                     0, 0, 0, 0, 0x40, 0, 0, 0};
-
-        memcpy(p, format_pg, sizeof(format_pg));
-        p[10] = (sdebug_sectors_per >> 8) & 0xff;
-        p[11] = sdebug_sectors_per & 0xff;
-        p[12] = (SECT_SIZE >> 8) & 0xff;
-        p[13] = SECT_SIZE & 0xff;
-        if (DEV_REMOVEABLE(target))
-                p[20] |= 0x20; /* should agree with INQUIRY */
-        if (1 == pcontrol)
-                memset(p + 2, 0, sizeof(format_pg) - 2);
-        return sizeof(format_pg);
+	unsigned char format_pg[] = {0x3, 0x16, 0, 0, 0, 0, 0, 0,
+				     0, 0, 0, 0, 0, 0, 0, 0,
+				     0, 0, 0, 0, 0x40, 0, 0, 0};
+
+	memcpy(p, format_pg, sizeof(format_pg));
+	p[10] = (sdebug_sectors_per >> 8) & 0xff;
+	p[11] = sdebug_sectors_per & 0xff;
+	p[12] = (scsi_debug_sector_size >> 8) & 0xff;
+	p[13] = scsi_debug_sector_size & 0xff;
+	if (DEV_REMOVEABLE(target))
+		p[20] |= 0x20; /* should agree with INQUIRY */
+	if (1 == pcontrol)
+		memset(p + 2, 0, sizeof(format_pg) - 2);
+	return sizeof(format_pg);
 }
 
 static int resp_caching_pg(unsigned char * p, int pcontrol, int target)
@@ -1206,8 +1203,8 @@ static int resp_mode_sense(struct scsi_cmnd * scp, int target,
 			ap[2] = (sdebug_capacity >> 8) & 0xff;
 			ap[3] = sdebug_capacity & 0xff;
 		}
-        	ap[6] = (SECT_SIZE_PER(target) >> 8) & 0xff;
-        	ap[7] = SECT_SIZE_PER(target) & 0xff;
+		ap[6] = (scsi_debug_sector_size >> 8) & 0xff;
+		ap[7] = scsi_debug_sector_size & 0xff;
 		offset += bd_len;
 		ap = arr + offset;
 	} else if (16 == bd_len) {
@@ -1215,10 +1212,10 @@ static int resp_mode_sense(struct scsi_cmnd * scp, int target,
 
         	for (k = 0; k < 8; ++k, capac >>= 8)
                 	ap[7 - k] = capac & 0xff;
-        	ap[12] = (SECT_SIZE_PER(target) >> 24) & 0xff;
-        	ap[13] = (SECT_SIZE_PER(target) >> 16) & 0xff;
-        	ap[14] = (SECT_SIZE_PER(target) >> 8) & 0xff;
-        	ap[15] = SECT_SIZE_PER(target) & 0xff;
+		ap[12] = (scsi_debug_sector_size >> 24) & 0xff;
+		ap[13] = (scsi_debug_sector_size >> 16) & 0xff;
+		ap[14] = (scsi_debug_sector_size >> 8) & 0xff;
+		ap[15] = scsi_debug_sector_size & 0xff;
 		offset += bd_len;
 		ap = arr + offset;
 	}
@@ -1519,10 +1516,10 @@ static int do_device_access(struct scsi_cmnd *scmd,
 	if (block + num > sdebug_store_sectors)
 		rest = block + num - sdebug_store_sectors;
 
-	ret = func(scmd, fake_storep + (block * SECT_SIZE),
-		   (num - rest) * SECT_SIZE);
+	ret = func(scmd, fake_storep + (block * scsi_debug_sector_size),
+		   (num - rest) * scsi_debug_sector_size);
 	if (!ret && rest)
-		ret = func(scmd, fake_storep, rest * SECT_SIZE);
+		ret = func(scmd, fake_storep, rest * scsi_debug_sector_size);
 
 	return ret;
 }
@@ -1575,10 +1572,10 @@ static int resp_write(struct scsi_cmnd *SCpnt, unsigned long long lba,
 	write_unlock_irqrestore(&atomic_rw, iflags);
 	if (-1 == ret)
 		return (DID_ERROR << 16);
-	else if ((ret < (num * SECT_SIZE)) &&
+	else if ((ret < (num * scsi_debug_sector_size)) &&
 		 (SCSI_DEBUG_OPT_NOISE & scsi_debug_opts))
 		printk(KERN_INFO "scsi_debug: write: cdb indicated=%u, "
-		       " IO sent=%d bytes\n", num * SECT_SIZE, ret);
+		       " IO sent=%d bytes\n", num * scsi_debug_sector_size, ret);
 	return 0;
 }
 
@@ -2085,6 +2082,7 @@ module_param_named(scsi_level, scsi_debug_scsi_level, int, S_IRUGO);
 module_param_named(virtual_gb, scsi_debug_virtual_gb, int, S_IRUGO | S_IWUSR);
 module_param_named(vpd_use_hostno, scsi_debug_vpd_use_hostno, int,
 		   S_IRUGO | S_IWUSR);
+module_param_named(sector_size, scsi_debug_sector_size, int, S_IRUGO);
 
 MODULE_AUTHOR("Eric Youngdale + Douglas Gilbert");
 MODULE_DESCRIPTION("SCSI debug adapter driver");
@@ -2106,6 +2104,7 @@ MODULE_PARM_DESC(ptype, "SCSI peripheral type(def=0[disk])");
 MODULE_PARM_DESC(scsi_level, "SCSI level to simulate(def=5[SPC-3])");
 MODULE_PARM_DESC(virtual_gb, "virtual gigabyte size (def=0 -> use dev_size_mb)");
 MODULE_PARM_DESC(vpd_use_hostno, "0 -> dev ids ignore hostno (def=1 -> unique dev ids)");
+MODULE_PARM_DESC(sector_size, "hardware sector size in bytes (def=512)");
 
 
 static char sdebug_info[256];
@@ -2158,8 +2157,9 @@ static int scsi_debug_proc_info(struct Scsi_Host *host, char *buffer, char **sta
 	    scsi_debug_dev_size_mb, scsi_debug_opts, scsi_debug_every_nth,
 	    scsi_debug_cmnd_count, scsi_debug_delay,
 	    scsi_debug_max_luns, scsi_debug_scsi_level,
-	    SECT_SIZE, sdebug_cylinders_per, sdebug_heads, sdebug_sectors_per,
-	    num_aborts, num_dev_resets, num_bus_resets, num_host_resets);
+	    scsi_debug_sector_size, sdebug_cylinders_per, sdebug_heads,
+	    sdebug_sectors_per, num_aborts, num_dev_resets, num_bus_resets,
+	    num_host_resets);
 	if (pos < offset) {
 		len = 0;
 		begin = pos;
@@ -2434,6 +2434,12 @@ static ssize_t sdebug_vpd_use_hostno_store(struct device_driver * ddp,
 DRIVER_ATTR(vpd_use_hostno, S_IRUGO | S_IWUSR, sdebug_vpd_use_hostno_show,
 	    sdebug_vpd_use_hostno_store);
 
+static ssize_t sdebug_sector_size_show(struct device_driver * ddp, char * buf)
+{
+	return scnprintf(buf, PAGE_SIZE, "%u\n", scsi_debug_sector_size);
+}
+DRIVER_ATTR(sector_size, S_IRUGO, sdebug_sector_size_show, NULL);
+
 /* Note: The following function creates attribute files in the
    /sys/bus/pseudo/drivers/scsi_debug directory. The advantage of these
    files (over those found in the /sys/module/scsi_debug/parameters
@@ -2459,11 +2465,13 @@ static int do_create_driverfs_files(void)
 	ret |= driver_create_file(&sdebug_driverfs_driver, &driver_attr_scsi_level);
 	ret |= driver_create_file(&sdebug_driverfs_driver, &driver_attr_virtual_gb);
 	ret |= driver_create_file(&sdebug_driverfs_driver, &driver_attr_vpd_use_hostno);
+	ret |= driver_create_file(&sdebug_driverfs_driver, &driver_attr_sector_size);
 	return ret;
 }
 
 static void do_remove_driverfs_files(void)
 {
+	driver_remove_file(&sdebug_driverfs_driver, &driver_attr_sector_size);
 	driver_remove_file(&sdebug_driverfs_driver, &driver_attr_vpd_use_hostno);
 	driver_remove_file(&sdebug_driverfs_driver, &driver_attr_virtual_gb);
 	driver_remove_file(&sdebug_driverfs_driver, &driver_attr_scsi_level);
@@ -2499,10 +2507,22 @@ static int __init scsi_debug_init(void)
 	int k;
 	int ret;
 
+	switch (scsi_debug_sector_size) {
+	case  512:
+	case 1024:
+	case 2048:
+	case 4096:
+		break;
+	default:
+		printk(KERN_ERR "scsi_debug_init: invalid sector_size %u\n",
+		       scsi_debug_sector_size);
+		return -EINVAL;
+	}
+
 	if (scsi_debug_dev_size_mb < 1)
 		scsi_debug_dev_size_mb = 1;  /* force minimum 1 MB ramdisk */
 	sz = (unsigned long)scsi_debug_dev_size_mb * 1048576;
-	sdebug_store_sectors = sz / SECT_SIZE;
+	sdebug_store_sectors = sz / scsi_debug_sector_size;
 	sdebug_capacity = get_sdebug_capacity();
 
 	/* play around with geometry, don't waste too much on track 0 */
-- 
cgit v1.2.3


From 090507157f3bc43dd925fda50f8aca7d03b616b6 Mon Sep 17 00:00:00 2001
From: Mark Salyzyn <Mark_Salyzyn@adaptec.com>
Date: Wed, 28 May 2008 15:32:55 -0400
Subject: [SCSI] aacraid: prevent copy_from_user() BUG!

Seen:

	kernel BUG at arch/i386/lib/usercopy.c:872

under a 2.6.18-8.el5 kernel. Traced it to a garbage-in/garbage-out
ioctl condition in the aacraid driver.

Adaptec's special ioctl scb passthrough needs to check the validity of
the individual scatter gather count fields to the maximum the adapter
supports. Doing so will have the side effect of preventing
copy_from_user() from bugging out while populating the dma buffers.
This is a hardening effort, issue was triggered by an errant version
of the management tools and thus the BUG should not be seen in the
field.

[jejb: fixed up compile failure]
Signed-off-by: Mark Salyzyn <aacraid@adaptec.com>
Signed-off-by: James Bottomley <James.Bottomley@HansenPartnership.com>
---
 drivers/scsi/aacraid/commctrl.c | 33 +++++++++++++++++++++++++++++++++
 1 file changed, 33 insertions(+)

(limited to 'drivers/scsi')

diff --git a/drivers/scsi/aacraid/commctrl.c b/drivers/scsi/aacraid/commctrl.c
index 5fd83deab36c..a7355260cfcf 100644
--- a/drivers/scsi/aacraid/commctrl.c
+++ b/drivers/scsi/aacraid/commctrl.c
@@ -41,6 +41,7 @@
 #include <linux/kthread.h>
 #include <linux/semaphore.h>
 #include <asm/uaccess.h>
+#include <scsi/scsi_host.h>
 
 #include "aacraid.h"
 
@@ -581,6 +582,14 @@ static int aac_send_raw_srb(struct aac_dev* dev, void __user * arg)
 			for (i = 0; i < upsg->count; i++) {
 				u64 addr;
 				void* p;
+				if (upsg->sg[i].count >
+				    (dev->adapter_info.options &
+				     AAC_OPT_NEW_COMM) ?
+				      (dev->scsi_host_ptr->max_sectors << 9) :
+				      65536) {
+					rcode = -EINVAL;
+					goto cleanup;
+				}
 				/* Does this really need to be GFP_DMA? */
 				p = kmalloc(upsg->sg[i].count,GFP_KERNEL|__GFP_DMA);
 				if(!p) {
@@ -625,6 +634,14 @@ static int aac_send_raw_srb(struct aac_dev* dev, void __user * arg)
 			for (i = 0; i < usg->count; i++) {
 				u64 addr;
 				void* p;
+				if (usg->sg[i].count >
+				    (dev->adapter_info.options &
+				     AAC_OPT_NEW_COMM) ?
+				      (dev->scsi_host_ptr->max_sectors << 9) :
+				      65536) {
+					rcode = -EINVAL;
+					goto cleanup;
+				}
 				/* Does this really need to be GFP_DMA? */
 				p = kmalloc(usg->sg[i].count,GFP_KERNEL|__GFP_DMA);
 				if(!p) {
@@ -667,6 +684,14 @@ static int aac_send_raw_srb(struct aac_dev* dev, void __user * arg)
 			for (i = 0; i < upsg->count; i++) {
 				uintptr_t addr;
 				void* p;
+				if (usg->sg[i].count >
+				    (dev->adapter_info.options &
+				     AAC_OPT_NEW_COMM) ?
+				      (dev->scsi_host_ptr->max_sectors << 9) :
+				      65536) {
+					rcode = -EINVAL;
+					goto cleanup;
+				}
 				/* Does this really need to be GFP_DMA? */
 				p = kmalloc(usg->sg[i].count,GFP_KERNEL|__GFP_DMA);
 				if(!p) {
@@ -698,6 +723,14 @@ static int aac_send_raw_srb(struct aac_dev* dev, void __user * arg)
 			for (i = 0; i < upsg->count; i++) {
 				dma_addr_t addr;
 				void* p;
+				if (upsg->sg[i].count >
+				    (dev->adapter_info.options &
+				     AAC_OPT_NEW_COMM) ?
+				      (dev->scsi_host_ptr->max_sectors << 9) :
+				      65536) {
+					rcode = -EINVAL;
+					goto cleanup;
+				}
 				p = kmalloc(upsg->sg[i].count, GFP_KERNEL);
 				if (!p) {
 					dprintk((KERN_DEBUG"aacraid: Could not allocate SG buffer - size = %d buffer number %d of %d\n",
-- 
cgit v1.2.3


From 6362abd3e00d3161affad996fa53cc69a01fc6d1 Mon Sep 17 00:00:00 2001
From: "Martin K. Petersen" <martin.petersen@oracle.com>
Date: Thu, 5 Jun 2008 23:30:03 -0400
Subject: [SCSI] Rename scsi_bidi_sdb_cache

The data integrity changes need to dynamically allocate
scsi_data_buffers too.  Rename scsi_bidi_sdb_cache for clarity.

Signed-off-by: Martin K. Petersen <martin.petersen@oracle.com>
Signed-off-by: James Bottomley <James.Bottomley@HansenPartnership.com>
---
 drivers/scsi/scsi_lib.c | 26 +++++++++++++-------------
 1 file changed, 13 insertions(+), 13 deletions(-)

(limited to 'drivers/scsi')

diff --git a/drivers/scsi/scsi_lib.c b/drivers/scsi/scsi_lib.c
index 033c58a65f50..aa8d5de58839 100644
--- a/drivers/scsi/scsi_lib.c
+++ b/drivers/scsi/scsi_lib.c
@@ -65,7 +65,7 @@ static struct scsi_host_sg_pool scsi_sg_pools[] = {
 };
 #undef SP
 
-static struct kmem_cache *scsi_bidi_sdb_cache;
+static struct kmem_cache *scsi_sdb_cache;
 
 static void scsi_run_queue(struct request_queue *q);
 
@@ -775,7 +775,7 @@ void scsi_release_buffers(struct scsi_cmnd *cmd)
 		struct scsi_data_buffer *bidi_sdb =
 			cmd->request->next_rq->special;
 		scsi_free_sgtable(bidi_sdb);
-		kmem_cache_free(scsi_bidi_sdb_cache, bidi_sdb);
+		kmem_cache_free(scsi_sdb_cache, bidi_sdb);
 		cmd->request->next_rq->special = NULL;
 	}
 }
@@ -1050,7 +1050,7 @@ int scsi_init_io(struct scsi_cmnd *cmd, gfp_t gfp_mask)
 
 	if (blk_bidi_rq(cmd->request)) {
 		struct scsi_data_buffer *bidi_sdb = kmem_cache_zalloc(
-			scsi_bidi_sdb_cache, GFP_ATOMIC);
+			scsi_sdb_cache, GFP_ATOMIC);
 		if (!bidi_sdb) {
 			error = BLKPREP_DEFER;
 			goto err_exit;
@@ -1692,11 +1692,11 @@ int __init scsi_init_queue(void)
 		return -ENOMEM;
 	}
 
-	scsi_bidi_sdb_cache = kmem_cache_create("scsi_bidi_sdb",
-					sizeof(struct scsi_data_buffer),
-					0, 0, NULL);
-	if (!scsi_bidi_sdb_cache) {
-		printk(KERN_ERR "SCSI: can't init scsi bidi sdb cache\n");
+	scsi_sdb_cache = kmem_cache_create("scsi_data_buffer",
+					   sizeof(struct scsi_data_buffer),
+					   0, 0, NULL);
+	if (!scsi_sdb_cache) {
+		printk(KERN_ERR "SCSI: can't init scsi sdb cache\n");
 		goto cleanup_io_context;
 	}
 
@@ -1709,7 +1709,7 @@ int __init scsi_init_queue(void)
 		if (!sgp->slab) {
 			printk(KERN_ERR "SCSI: can't init sg slab %s\n",
 					sgp->name);
-			goto cleanup_bidi_sdb;
+			goto cleanup_sdb;
 		}
 
 		sgp->pool = mempool_create_slab_pool(SG_MEMPOOL_SIZE,
@@ -1717,13 +1717,13 @@ int __init scsi_init_queue(void)
 		if (!sgp->pool) {
 			printk(KERN_ERR "SCSI: can't init sg mempool %s\n",
 					sgp->name);
-			goto cleanup_bidi_sdb;
+			goto cleanup_sdb;
 		}
 	}
 
 	return 0;
 
-cleanup_bidi_sdb:
+cleanup_sdb:
 	for (i = 0; i < SG_MEMPOOL_NR; i++) {
 		struct scsi_host_sg_pool *sgp = scsi_sg_pools + i;
 		if (sgp->pool)
@@ -1731,7 +1731,7 @@ cleanup_bidi_sdb:
 		if (sgp->slab)
 			kmem_cache_destroy(sgp->slab);
 	}
-	kmem_cache_destroy(scsi_bidi_sdb_cache);
+	kmem_cache_destroy(scsi_sdb_cache);
 cleanup_io_context:
 	kmem_cache_destroy(scsi_io_context_cache);
 
@@ -1743,7 +1743,7 @@ void scsi_exit_queue(void)
 	int i;
 
 	kmem_cache_destroy(scsi_io_context_cache);
-	kmem_cache_destroy(scsi_bidi_sdb_cache);
+	kmem_cache_destroy(scsi_sdb_cache);
 
 	for (i = 0; i < SG_MEMPOOL_NR; i++) {
 		struct scsi_host_sg_pool *sgp = scsi_sg_pools + i;
-- 
cgit v1.2.3


From 915caaaf622172bd3451e7b76ba9cfcea80e87c7 Mon Sep 17 00:00:00 2001
From: James Smart <James.Smart@Emulex.Com>
Date: Sat, 14 Jun 2008 22:52:38 -0400
Subject: [SCSI] lpfc 8.2.7 : Change device reset behavior

Prior handler was only waiting for I/O on one lun to finish before
returning completion. Now, wait for all LUNs on the target.
Also performed some rudimentary cleanup while in this code.

Signed-off-by: James Smart <james.smart@emulex.com>
Signed-off-by: James Bottomley <James.Bottomley@HansenPartnership.com>
---
 drivers/scsi/lpfc/lpfc_scsi.c | 201 +++++++++++++++++-------------------------
 1 file changed, 80 insertions(+), 121 deletions(-)

(limited to 'drivers/scsi')

diff --git a/drivers/scsi/lpfc/lpfc_scsi.c b/drivers/scsi/lpfc/lpfc_scsi.c
index 0910a9ab76a5..3926affaf727 100644
--- a/drivers/scsi/lpfc/lpfc_scsi.c
+++ b/drivers/scsi/lpfc/lpfc_scsi.c
@@ -849,14 +849,15 @@ lpfc_scsi_tgt_reset(struct lpfc_scsi_buf *lpfc_cmd, struct lpfc_vport *vport,
 	struct lpfc_iocbq *iocbq;
 	struct lpfc_iocbq *iocbqrsp;
 	int ret;
+	int status;
 
 	if (!rdata->pnode || !NLP_CHK_NODE_ACT(rdata->pnode))
 		return FAILED;
 
 	lpfc_cmd->rdata = rdata;
-	ret = lpfc_scsi_prep_task_mgmt_cmd(vport, lpfc_cmd, lun,
+	status = lpfc_scsi_prep_task_mgmt_cmd(vport, lpfc_cmd, lun,
 					   FCP_TARGET_RESET);
-	if (!ret)
+	if (!status)
 		return FAILED;
 
 	iocbq = &lpfc_cmd->cur_iocbq;
@@ -869,12 +870,15 @@ lpfc_scsi_tgt_reset(struct lpfc_scsi_buf *lpfc_cmd, struct lpfc_vport *vport,
 	lpfc_printf_vlog(vport, KERN_INFO, LOG_FCP,
 			 "0702 Issue Target Reset to TGT %d Data: x%x x%x\n",
 			 tgt_id, rdata->pnode->nlp_rpi, rdata->pnode->nlp_flag);
-	ret = lpfc_sli_issue_iocb_wait(phba,
+	status = lpfc_sli_issue_iocb_wait(phba,
 				       &phba->sli.ring[phba->sli.fcp_ring],
 				       iocbq, iocbqrsp, lpfc_cmd->timeout);
-	if (ret != IOCB_SUCCESS) {
-		if (ret == IOCB_TIMEDOUT)
+	if (status != IOCB_SUCCESS) {
+		if (status == IOCB_TIMEDOUT) {
 			iocbq->iocb_cmpl = lpfc_tskmgmt_def_cmpl;
+			ret = TIMEOUT_ERROR;
+		} else
+			ret = FAILED;
 		lpfc_cmd->status = IOSTAT_DRIVER_REJECT;
 	} else {
 		ret = SUCCESS;
@@ -1142,121 +1146,96 @@ lpfc_device_reset_handler(struct scsi_cmnd *cmnd)
 	struct lpfc_iocbq *iocbq, *iocbqrsp;
 	struct lpfc_rport_data *rdata = cmnd->device->hostdata;
 	struct lpfc_nodelist *pnode = rdata->pnode;
-	uint32_t cmd_result = 0, cmd_status = 0;
-	int ret = FAILED;
-	int iocb_status = IOCB_SUCCESS;
-	int cnt, loopcnt;
+	unsigned long later;
+	int ret = SUCCESS;
+	int status;
+	int cnt;
 
 	lpfc_block_error_handler(cmnd);
-	loopcnt = 0;
 	/*
 	 * If target is not in a MAPPED state, delay the reset until
 	 * target is rediscovered or devloss timeout expires.
 	 */
-	while (1) {
+	later = msecs_to_jiffies(2 * vport->cfg_devloss_tmo * 1000) + jiffies;
+	while (time_after(later, jiffies)) {
 		if (!pnode || !NLP_CHK_NODE_ACT(pnode))
-			goto out;
-
-		if (pnode->nlp_state != NLP_STE_MAPPED_NODE) {
-			schedule_timeout_uninterruptible(msecs_to_jiffies(500));
-			loopcnt++;
-			rdata = cmnd->device->hostdata;
-			if (!rdata ||
-				(loopcnt > ((vport->cfg_devloss_tmo * 2) + 1))){
-				lpfc_printf_vlog(vport, KERN_ERR, LOG_FCP,
-						 "0721 LUN Reset rport "
-						 "failure: cnt x%x rdata x%p\n",
-						 loopcnt, rdata);
-				goto out;
-			}
-			pnode = rdata->pnode;
-			if (!pnode || !NLP_CHK_NODE_ACT(pnode))
-				goto out;
-		}
+			return FAILED;
 		if (pnode->nlp_state == NLP_STE_MAPPED_NODE)
 			break;
+		schedule_timeout_uninterruptible(msecs_to_jiffies(500));
+		rdata = cmnd->device->hostdata;
+		if (!rdata)
+			break;
+		pnode = rdata->pnode;
+	}
+	if (!rdata || pnode->nlp_state != NLP_STE_MAPPED_NODE) {
+		lpfc_printf_vlog(vport, KERN_ERR, LOG_FCP,
+				 "0721 LUN Reset rport "
+				 "failure: msec x%x rdata x%p\n",
+				 jiffies_to_msecs(jiffies - later), rdata);
+		return FAILED;
 	}
-
 	lpfc_cmd = lpfc_get_scsi_buf(phba);
 	if (lpfc_cmd == NULL)
-		goto out;
-
+		return FAILED;
 	lpfc_cmd->timeout = 60;
 	lpfc_cmd->rdata = rdata;
 
-	ret = lpfc_scsi_prep_task_mgmt_cmd(vport, lpfc_cmd, cmnd->device->lun,
-					   FCP_TARGET_RESET);
-	if (!ret)
-		goto out_free_scsi_buf;
-
+	status = lpfc_scsi_prep_task_mgmt_cmd(vport, lpfc_cmd,
+					      cmnd->device->lun,
+					      FCP_TARGET_RESET);
+	if (!status) {
+		lpfc_release_scsi_buf(phba, lpfc_cmd);
+		return FAILED;
+	}
 	iocbq = &lpfc_cmd->cur_iocbq;
 
 	/* get a buffer for this IOCB command response */
 	iocbqrsp = lpfc_sli_get_iocbq(phba);
-	if (iocbqrsp == NULL)
-		goto out_free_scsi_buf;
-
+	if (iocbqrsp == NULL) {
+		lpfc_release_scsi_buf(phba, lpfc_cmd);
+		return FAILED;
+	}
 	lpfc_printf_vlog(vport, KERN_INFO, LOG_FCP,
 			 "0703 Issue target reset to TGT %d LUN %d "
 			 "rpi x%x nlp_flag x%x\n", cmnd->device->id,
 			 cmnd->device->lun, pnode->nlp_rpi, pnode->nlp_flag);
-	iocb_status = lpfc_sli_issue_iocb_wait(phba,
-				       &phba->sli.ring[phba->sli.fcp_ring],
-				       iocbq, iocbqrsp, lpfc_cmd->timeout);
-
-	if (iocb_status == IOCB_TIMEDOUT)
+	status = lpfc_sli_issue_iocb_wait(phba,
+					  &phba->sli.ring[phba->sli.fcp_ring],
+					  iocbq, iocbqrsp, lpfc_cmd->timeout);
+	if (status == IOCB_TIMEDOUT) {
 		iocbq->iocb_cmpl = lpfc_tskmgmt_def_cmpl;
-
-	if (iocb_status == IOCB_SUCCESS)
-		ret = SUCCESS;
-	else
-		ret = iocb_status;
-
-	cmd_result = iocbqrsp->iocb.un.ulpWord[4];
-	cmd_status = iocbqrsp->iocb.ulpStatus;
-
+		ret = TIMEOUT_ERROR;
+	} else {
+		if (status != IOCB_SUCCESS)
+			ret = FAILED;
+		lpfc_release_scsi_buf(phba, lpfc_cmd);
+	}
+	lpfc_printf_vlog(vport, KERN_ERR, LOG_FCP,
+			 "0713 SCSI layer issued device reset (%d, %d) "
+			 "return x%x status x%x result x%x\n",
+			 cmnd->device->id, cmnd->device->lun, ret,
+			 iocbqrsp->iocb.ulpStatus,
+			 iocbqrsp->iocb.un.ulpWord[4]);
 	lpfc_sli_release_iocbq(phba, iocbqrsp);
-
-	/*
-	 * All outstanding txcmplq I/Os should have been aborted by the device.
-	 * Unfortunately, some targets do not abide by this forcing the driver
-	 * to double check.
-	 */
 	cnt = lpfc_sli_sum_iocb(vport, cmnd->device->id, cmnd->device->lun,
-				LPFC_CTX_LUN);
+				LPFC_CTX_TGT);
 	if (cnt)
 		lpfc_sli_abort_iocb(vport, &phba->sli.ring[phba->sli.fcp_ring],
 				    cmnd->device->id, cmnd->device->lun,
-				    LPFC_CTX_LUN);
-	loopcnt = 0;
-	while(cnt) {
-		schedule_timeout_uninterruptible(LPFC_RESET_WAIT*HZ);
-
-		if (++loopcnt
-		    > (2 * vport->cfg_devloss_tmo)/LPFC_RESET_WAIT)
-			break;
-
+				    LPFC_CTX_TGT);
+	later = msecs_to_jiffies(2 * vport->cfg_devloss_tmo * 1000) + jiffies;
+	while (time_after(later, jiffies) && cnt) {
+		schedule_timeout_uninterruptible(msecs_to_jiffies(20));
 		cnt = lpfc_sli_sum_iocb(vport, cmnd->device->id,
-					cmnd->device->lun, LPFC_CTX_LUN);
+					cmnd->device->lun, LPFC_CTX_TGT);
 	}
-
 	if (cnt) {
 		lpfc_printf_vlog(vport, KERN_ERR, LOG_FCP,
 				 "0719 device reset I/O flush failure: "
 				 "cnt x%x\n", cnt);
 		ret = FAILED;
 	}
-
-out_free_scsi_buf:
-	if (iocb_status != IOCB_TIMEDOUT) {
-		lpfc_release_scsi_buf(phba, lpfc_cmd);
-	}
-	lpfc_printf_vlog(vport, KERN_ERR, LOG_FCP,
-			 "0713 SCSI layer issued device reset (%d, %d) "
-			 "return x%x status x%x result x%x\n",
-			 cmnd->device->id, cmnd->device->lun, ret,
-			 cmd_status, cmd_result);
-out:
 	return ret;
 }
 
@@ -1268,19 +1247,12 @@ lpfc_bus_reset_handler(struct scsi_cmnd *cmnd)
 	struct lpfc_hba   *phba = vport->phba;
 	struct lpfc_nodelist *ndlp = NULL;
 	int match;
-	int ret = FAILED, i, err_count = 0;
-	int cnt, loopcnt;
+	int ret = SUCCESS, status, i;
+	int cnt;
 	struct lpfc_scsi_buf * lpfc_cmd;
+	unsigned long later;
 
 	lpfc_block_error_handler(cmnd);
-
-	lpfc_cmd = lpfc_get_scsi_buf(phba);
-	if (lpfc_cmd == NULL)
-		goto out;
-
-	/* The lpfc_cmd storage is reused.  Set all loop invariants. */
-	lpfc_cmd->timeout = 60;
-
 	/*
 	 * Since the driver manages a single bus device, reset all
 	 * targets known to the driver.  Should any target reset
@@ -1294,7 +1266,7 @@ lpfc_bus_reset_handler(struct scsi_cmnd *cmnd)
 			if (!NLP_CHK_NODE_ACT(ndlp))
 				continue;
 			if (ndlp->nlp_state == NLP_STE_MAPPED_NODE &&
-			    i == ndlp->nlp_sid &&
+			    ndlp->nlp_sid == i &&
 			    ndlp->rport) {
 				match = 1;
 				break;
@@ -1303,27 +1275,22 @@ lpfc_bus_reset_handler(struct scsi_cmnd *cmnd)
 		spin_unlock_irq(shost->host_lock);
 		if (!match)
 			continue;
-
-		ret = lpfc_scsi_tgt_reset(lpfc_cmd, vport, i,
-					  cmnd->device->lun,
-					  ndlp->rport->dd_data);
-		if (ret != SUCCESS) {
+		lpfc_cmd = lpfc_get_scsi_buf(phba);
+		if (lpfc_cmd) {
+			lpfc_cmd->timeout = 60;
+			status = lpfc_scsi_tgt_reset(lpfc_cmd, vport, i,
+						     cmnd->device->lun,
+						     ndlp->rport->dd_data);
+			if (status != TIMEOUT_ERROR)
+				lpfc_release_scsi_buf(phba, lpfc_cmd);
+		}
+		if (!lpfc_cmd || status != SUCCESS) {
 			lpfc_printf_vlog(vport, KERN_ERR, LOG_FCP,
 					 "0700 Bus Reset on target %d failed\n",
 					 i);
-			err_count++;
-			break;
+			ret = FAILED;
 		}
 	}
-
-	if (ret != IOCB_TIMEDOUT)
-		lpfc_release_scsi_buf(phba, lpfc_cmd);
-
-	if (err_count == 0)
-		ret = SUCCESS;
-	else
-		ret = FAILED;
-
 	/*
 	 * All outstanding txcmplq I/Os should have been aborted by
 	 * the targets.  Unfortunately, some targets do not abide by
@@ -1333,27 +1300,19 @@ lpfc_bus_reset_handler(struct scsi_cmnd *cmnd)
 	if (cnt)
 		lpfc_sli_abort_iocb(vport, &phba->sli.ring[phba->sli.fcp_ring],
 				    0, 0, LPFC_CTX_HOST);
-	loopcnt = 0;
-	while(cnt) {
-		schedule_timeout_uninterruptible(LPFC_RESET_WAIT*HZ);
-
-		if (++loopcnt
-		    > (2 * vport->cfg_devloss_tmo)/LPFC_RESET_WAIT)
-			break;
-
+	later = msecs_to_jiffies(2 * vport->cfg_devloss_tmo * 1000) + jiffies;
+	while (time_after(later, jiffies) && cnt) {
+		schedule_timeout_uninterruptible(msecs_to_jiffies(20));
 		cnt = lpfc_sli_sum_iocb(vport, 0, 0, LPFC_CTX_HOST);
 	}
-
 	if (cnt) {
 		lpfc_printf_vlog(vport, KERN_ERR, LOG_FCP,
 				 "0715 Bus Reset I/O flush failure: "
 				 "cnt x%x left x%x\n", cnt, i);
 		ret = FAILED;
 	}
-
 	lpfc_printf_vlog(vport, KERN_ERR, LOG_FCP,
 			 "0714 SCSI layer issued Bus Reset Data: x%x\n", ret);
-out:
 	return ret;
 }
 
-- 
cgit v1.2.3


From 0d2b6b83030d6a88cbf7db57f84f2daf0e0b251b Mon Sep 17 00:00:00 2001
From: James Smart <James.Smart@Emulex.Com>
Date: Sat, 14 Jun 2008 22:52:47 -0400
Subject: [SCSI] lpfc 8.2.7 : Discovery Fixes

- Fix ADISC timeout on initiators causing devloss timeout on targets
- Correct FAN processing : port state vs unreg rpi's wasn't consistent
- Correct mismatches between ASICs and PLOGI that would skip PLOGI

Signed-off-by: James Smart <james.smart@emulex.com>
Signed-off-by: James Bottomley <James.Bottomley@HansenPartnership.com>
---
 drivers/scsi/lpfc/lpfc_els.c       | 141 +++++++++---------------------------
 drivers/scsi/lpfc/lpfc_hbadisc.c   |  23 ++----
 drivers/scsi/lpfc/lpfc_init.c      |   3 +
 drivers/scsi/lpfc/lpfc_nportdisc.c | 145 ++++++++++++++-----------------------
 4 files changed, 99 insertions(+), 213 deletions(-)

(limited to 'drivers/scsi')

diff --git a/drivers/scsi/lpfc/lpfc_els.c b/drivers/scsi/lpfc/lpfc_els.c
index 886c5f1b11d2..d418c7c1251e 100644
--- a/drivers/scsi/lpfc/lpfc_els.c
+++ b/drivers/scsi/lpfc/lpfc_els.c
@@ -1754,29 +1754,34 @@ lpfc_cancel_retry_delay_tmo(struct lpfc_vport *vport, struct lpfc_nodelist *nlp)
 	struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
 	struct lpfc_work_evt *evtp;
 
+	if (!(nlp->nlp_flag & NLP_DELAY_TMO))
+		return;
 	spin_lock_irq(shost->host_lock);
 	nlp->nlp_flag &= ~NLP_DELAY_TMO;
 	spin_unlock_irq(shost->host_lock);
 	del_timer_sync(&nlp->nlp_delayfunc);
 	nlp->nlp_last_elscmd = 0;
-
 	if (!list_empty(&nlp->els_retry_evt.evt_listp)) {
 		list_del_init(&nlp->els_retry_evt.evt_listp);
 		/* Decrement nlp reference count held for the delayed retry */
 		evtp = &nlp->els_retry_evt;
 		lpfc_nlp_put((struct lpfc_nodelist *)evtp->evt_arg1);
 	}
-
 	if (nlp->nlp_flag & NLP_NPR_2B_DISC) {
 		spin_lock_irq(shost->host_lock);
 		nlp->nlp_flag &= ~NLP_NPR_2B_DISC;
 		spin_unlock_irq(shost->host_lock);
 		if (vport->num_disc_nodes) {
-			/* Check to see if there are more
-			 * PLOGIs to be sent
-			 */
-			lpfc_more_plogi(vport);
-
+			if (vport->port_state < LPFC_VPORT_READY) {
+				/* Check if there are more ADISCs to be sent */
+				lpfc_more_adisc(vport);
+				if ((vport->num_disc_nodes == 0) &&
+				    (vport->fc_npr_cnt))
+					lpfc_els_disc_plogi(vport);
+			} else {
+				/* Check if there are more PLOGIs to be sent */
+				lpfc_more_plogi(vport);
+			}
 			if (vport->num_disc_nodes == 0) {
 				spin_lock_irq(shost->host_lock);
 				vport->fc_flag &= ~FC_NDISC_ACTIVE;
@@ -1798,10 +1803,6 @@ lpfc_els_retry_delay(unsigned long ptr)
 	unsigned long flags;
 	struct lpfc_work_evt  *evtp = &ndlp->els_retry_evt;
 
-	ndlp = (struct lpfc_nodelist *) ptr;
-	phba = ndlp->vport->phba;
-	evtp = &ndlp->els_retry_evt;
-
 	spin_lock_irqsave(&phba->hbalock, flags);
 	if (!list_empty(&evtp->evt_listp)) {
 		spin_unlock_irqrestore(&phba->hbalock, flags);
@@ -2761,10 +2762,11 @@ lpfc_els_rsp_prli_acc(struct lpfc_vport *vport, struct lpfc_iocbq *oldiocb,
 	npr = (PRLI *) pcmd;
 	vpd = &phba->vpd;
 	/*
-	 * If our firmware version is 3.20 or later,
-	 * set the following bits for FC-TAPE support.
+	 * If the remote port is a target and our firmware version is 3.20 or
+	 * later, set the following bits for FC-TAPE support.
 	 */
-	if (vpd->rev.feaLevelHigh >= 0x02) {
+	if ((ndlp->nlp_type & NLP_FCP_TARGET) &&
+	    (vpd->rev.feaLevelHigh >= 0x02)) {
 		npr->ConfmComplAllowed = 1;
 		npr->Retry = 1;
 		npr->TaskRetryIdReq = 1;
@@ -3056,27 +3058,16 @@ lpfc_rscn_recovery_check(struct lpfc_vport *vport)
 {
 	struct lpfc_nodelist *ndlp = NULL;
 
-	/* Look at all nodes effected by pending RSCNs and move
-	 * them to NPR state.
-	 */
-
+	/* Move all affected nodes by pending RSCNs to NPR state. */
 	list_for_each_entry(ndlp, &vport->fc_nodes, nlp_listp) {
 		if (!NLP_CHK_NODE_ACT(ndlp) ||
-		    ndlp->nlp_state == NLP_STE_UNUSED_NODE ||
-		    lpfc_rscn_payload_check(vport, ndlp->nlp_DID) == 0)
+		    (ndlp->nlp_state == NLP_STE_UNUSED_NODE) ||
+		    !lpfc_rscn_payload_check(vport, ndlp->nlp_DID))
 			continue;
-
 		lpfc_disc_state_machine(vport, ndlp, NULL,
-						NLP_EVT_DEVICE_RECOVERY);
-
-		/*
-		 * Make sure NLP_DELAY_TMO is NOT running after a device
-		 * recovery event.
-		 */
-		if (ndlp->nlp_flag & NLP_DELAY_TMO)
-			lpfc_cancel_retry_delay_tmo(vport, ndlp);
+					NLP_EVT_DEVICE_RECOVERY);
+		lpfc_cancel_retry_delay_tmo(vport, ndlp);
 	}
-
 	return 0;
 }
 
@@ -3781,91 +3772,27 @@ static int
 lpfc_els_rcv_fan(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb,
 		 struct lpfc_nodelist *fan_ndlp)
 {
-	struct lpfc_dmabuf *pcmd;
+	struct lpfc_hba *phba = vport->phba;
 	uint32_t *lp;
-	IOCB_t *icmd;
-	uint32_t cmd, did;
 	FAN *fp;
-	struct lpfc_nodelist *ndlp, *next_ndlp;
-	struct lpfc_hba *phba = vport->phba;
-
-	/* FAN received */
-	lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS,
-			 "0265 FAN received\n");
-	icmd = &cmdiocb->iocb;
-	did = icmd->un.elsreq64.remoteID;
-	pcmd = (struct lpfc_dmabuf *)cmdiocb->context2;
-	lp = (uint32_t *)pcmd->virt;
-
-	cmd = *lp++;
-	fp = (FAN *) lp;
 
+	lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS, "0265 FAN received\n");
+	lp = (uint32_t *)((struct lpfc_dmabuf *)cmdiocb->context2)->virt;
+	fp = (FAN *) ++lp;
 	/* FAN received; Fan does not have a reply sequence */
-
-	if (phba->pport->port_state == LPFC_LOCAL_CFG_LINK) {
+	if ((vport == phba->pport) &&
+	    (vport->port_state == LPFC_LOCAL_CFG_LINK)) {
 		if ((memcmp(&phba->fc_fabparam.nodeName, &fp->FnodeName,
-			sizeof(struct lpfc_name)) != 0) ||
+			    sizeof(struct lpfc_name))) ||
 		    (memcmp(&phba->fc_fabparam.portName, &fp->FportName,
-			sizeof(struct lpfc_name)) != 0)) {
-			/*
-			 * This node has switched fabrics.  FLOGI is required
-			 * Clean up the old rpi's
-			 */
-
-			list_for_each_entry_safe(ndlp, next_ndlp,
-						 &vport->fc_nodes, nlp_listp) {
-				if (!NLP_CHK_NODE_ACT(ndlp))
-					continue;
-				if (ndlp->nlp_state != NLP_STE_NPR_NODE)
-					continue;
-				if (ndlp->nlp_type & NLP_FABRIC) {
-					/*
-					 * Clean up old Fabric, Nameserver and
-					 * other NLP_FABRIC logins
-					 */
-					lpfc_drop_node(vport, ndlp);
-
-				} else if (!(ndlp->nlp_flag & NLP_NPR_ADISC)) {
-					/* Fail outstanding I/O now since this
-					 * device is marked for PLOGI
-					 */
-					lpfc_unreg_rpi(vport, ndlp);
-				}
-			}
-
+			    sizeof(struct lpfc_name)))) {
+			/* This port has switched fabrics. FLOGI is required */
 			lpfc_initial_flogi(vport);
-			return 0;
-		}
-		/* Discovery not needed,
-		 * move the nodes to their original state.
-		 */
-		list_for_each_entry_safe(ndlp, next_ndlp, &vport->fc_nodes,
-					 nlp_listp) {
-			if (!NLP_CHK_NODE_ACT(ndlp))
-				continue;
-			if (ndlp->nlp_state != NLP_STE_NPR_NODE)
-				continue;
-
-			switch (ndlp->nlp_prev_state) {
-			case NLP_STE_UNMAPPED_NODE:
-				ndlp->nlp_prev_state = NLP_STE_NPR_NODE;
-				lpfc_nlp_set_state(vport, ndlp,
-						   NLP_STE_UNMAPPED_NODE);
-				break;
-
-			case NLP_STE_MAPPED_NODE:
-				ndlp->nlp_prev_state = NLP_STE_NPR_NODE;
-				lpfc_nlp_set_state(vport, ndlp,
-						   NLP_STE_MAPPED_NODE);
-				break;
-
-			default:
-				break;
-			}
+		} else {
+			/* FAN verified - skip FLOGI */
+			vport->fc_myDID = vport->fc_prevDID;
+			lpfc_issue_fabric_reglogin(vport);
 		}
-
-		/* Start discovery - this should just do CLEAR_LA */
-		lpfc_disc_start(vport);
 	}
 	return 0;
 }
diff --git a/drivers/scsi/lpfc/lpfc_hbadisc.c b/drivers/scsi/lpfc/lpfc_hbadisc.c
index 7cb68feb04fd..f3dc19dfac5b 100644
--- a/drivers/scsi/lpfc/lpfc_hbadisc.c
+++ b/drivers/scsi/lpfc/lpfc_hbadisc.c
@@ -1087,6 +1087,8 @@ lpfc_mbx_cmpl_read_la(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
 	MAILBOX_t *mb = &pmb->mb;
 	struct lpfc_dmabuf *mp = (struct lpfc_dmabuf *) (pmb->context1);
 
+	/* Unblock ELS traffic */
+	phba->sli.ring[LPFC_ELS_RING].flag &= ~LPFC_STOP_IOCB_EVENT;
 	/* Check for error */
 	if (mb->mbxStatus) {
 		lpfc_printf_log(phba, KERN_INFO, LOG_LINK_EVENT,
@@ -1650,7 +1652,6 @@ lpfc_nlp_set_state(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
 		ndlp->nlp_DID, old_state, state);
 
 	if (old_state == NLP_STE_NPR_NODE &&
-	    (ndlp->nlp_flag & NLP_DELAY_TMO) != 0 &&
 	    state != NLP_STE_NPR_NODE)
 		lpfc_cancel_retry_delay_tmo(vport, ndlp);
 	if (old_state == NLP_STE_UNMAPPED_NODE) {
@@ -1687,8 +1688,7 @@ lpfc_dequeue_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp)
 {
 	struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
 
-	if ((ndlp->nlp_flag & NLP_DELAY_TMO) != 0)
-		lpfc_cancel_retry_delay_tmo(vport, ndlp);
+	lpfc_cancel_retry_delay_tmo(vport, ndlp);
 	if (ndlp->nlp_state && !list_empty(&ndlp->nlp_listp))
 		lpfc_nlp_counters(vport, ndlp->nlp_state, -1);
 	spin_lock_irq(shost->host_lock);
@@ -1701,8 +1701,7 @@ lpfc_dequeue_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp)
 static void
 lpfc_disable_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp)
 {
-	if ((ndlp->nlp_flag & NLP_DELAY_TMO) != 0)
-		lpfc_cancel_retry_delay_tmo(vport, ndlp);
+	lpfc_cancel_retry_delay_tmo(vport, ndlp);
 	if (ndlp->nlp_state && !list_empty(&ndlp->nlp_listp))
 		lpfc_nlp_counters(vport, ndlp->nlp_state, -1);
 	lpfc_nlp_state_cleanup(vport, ndlp, ndlp->nlp_state,
@@ -2121,10 +2120,8 @@ lpfc_cleanup_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp)
 	ndlp->nlp_last_elscmd = 0;
 	del_timer_sync(&ndlp->nlp_delayfunc);
 
-	if (!list_empty(&ndlp->els_retry_evt.evt_listp))
-		list_del_init(&ndlp->els_retry_evt.evt_listp);
-	if (!list_empty(&ndlp->dev_loss_evt.evt_listp))
-		list_del_init(&ndlp->dev_loss_evt.evt_listp);
+	list_del_init(&ndlp->els_retry_evt.evt_listp);
+	list_del_init(&ndlp->dev_loss_evt.evt_listp);
 
 	lpfc_unreg_rpi(vport, ndlp);
 
@@ -2144,10 +2141,7 @@ lpfc_nlp_remove(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp)
 	LPFC_MBOXQ_t *mbox;
 	int rc;
 
-	if (ndlp->nlp_flag & NLP_DELAY_TMO) {
-		lpfc_cancel_retry_delay_tmo(vport, ndlp);
-	}
-
+	lpfc_cancel_retry_delay_tmo(vport, ndlp);
 	if (ndlp->nlp_flag & NLP_DEFER_RM && !ndlp->nlp_rpi) {
 		/* For this case we need to cleanup the default rpi
 		 * allocated by the firmware.
@@ -2317,8 +2311,7 @@ lpfc_setup_disc_node(struct lpfc_vport *vport, uint32_t did)
 			/* Since this node is marked for discovery,
 			 * delay timeout is not needed.
 			 */
-			if (ndlp->nlp_flag & NLP_DELAY_TMO)
-				lpfc_cancel_retry_delay_tmo(vport, ndlp);
+			lpfc_cancel_retry_delay_tmo(vport, ndlp);
 		} else
 			ndlp = NULL;
 	} else {
diff --git a/drivers/scsi/lpfc/lpfc_init.c b/drivers/scsi/lpfc/lpfc_init.c
index fa757b251f82..6fcddda58512 100644
--- a/drivers/scsi/lpfc/lpfc_init.c
+++ b/drivers/scsi/lpfc/lpfc_init.c
@@ -851,6 +851,8 @@ lpfc_handle_latt(struct lpfc_hba *phba)
 	lpfc_read_la(phba, pmb, mp);
 	pmb->mbox_cmpl = lpfc_mbx_cmpl_read_la;
 	pmb->vport = vport;
+	/* Block ELS IOCBs until we have processed this mbox command */
+	phba->sli.ring[LPFC_ELS_RING].flag |= LPFC_STOP_IOCB_EVENT;
 	rc = lpfc_sli_issue_mbox (phba, pmb, MBX_NOWAIT);
 	if (rc == MBX_NOT_FINISHED) {
 		rc = 4;
@@ -866,6 +868,7 @@ lpfc_handle_latt(struct lpfc_hba *phba)
 	return;
 
 lpfc_handle_latt_free_mbuf:
+	phba->sli.ring[LPFC_ELS_RING].flag &= ~LPFC_STOP_IOCB_EVENT;
 	lpfc_mbuf_free(phba, mp->virt, mp->phys);
 lpfc_handle_latt_free_mp:
 	kfree(mp);
diff --git a/drivers/scsi/lpfc/lpfc_nportdisc.c b/drivers/scsi/lpfc/lpfc_nportdisc.c
index d08c4c890744..6688a8689b56 100644
--- a/drivers/scsi/lpfc/lpfc_nportdisc.c
+++ b/drivers/scsi/lpfc/lpfc_nportdisc.c
@@ -235,10 +235,7 @@ lpfc_els_abort(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp)
 			(iocb->iocb_cmpl) (phba, iocb, iocb);
 		}
 	}
-
-	/* If we are delaying issuing an ELS command, cancel it */
-	if (ndlp->nlp_flag & NLP_DELAY_TMO)
-		lpfc_cancel_retry_delay_tmo(phba->pport, ndlp);
+	lpfc_cancel_retry_delay_tmo(phba->pport, ndlp);
 	return 0;
 }
 
@@ -249,7 +246,6 @@ lpfc_rcv_plogi(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
 	struct Scsi_Host   *shost = lpfc_shost_from_vport(vport);
 	struct lpfc_hba    *phba = vport->phba;
 	struct lpfc_dmabuf *pcmd;
-	struct lpfc_work_evt *evtp;
 	uint32_t *lp;
 	IOCB_t *icmd;
 	struct serv_parm *sp;
@@ -425,73 +421,8 @@ lpfc_rcv_plogi(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
 			ndlp, mbox);
 		return 1;
 	}
-
-	/* If the remote NPort logs into us, before we can initiate
-	 * discovery to them, cleanup the NPort from discovery accordingly.
-	 */
-	if (ndlp->nlp_state == NLP_STE_NPR_NODE) {
-		spin_lock_irq(shost->host_lock);
-		ndlp->nlp_flag &= ~NLP_DELAY_TMO;
-		spin_unlock_irq(shost->host_lock);
-		del_timer_sync(&ndlp->nlp_delayfunc);
-		ndlp->nlp_last_elscmd = 0;
-
-		if (!list_empty(&ndlp->els_retry_evt.evt_listp)) {
-			list_del_init(&ndlp->els_retry_evt.evt_listp);
-			/* Decrement ndlp reference count held for the
-			 * delayed retry
-			 */
-			evtp = &ndlp->els_retry_evt;
-			lpfc_nlp_put((struct lpfc_nodelist *)evtp->evt_arg1);
-		}
-
-		if (ndlp->nlp_flag & NLP_NPR_2B_DISC) {
-			spin_lock_irq(shost->host_lock);
-			ndlp->nlp_flag &= ~NLP_NPR_2B_DISC;
-			spin_unlock_irq(shost->host_lock);
-
-			if ((ndlp->nlp_flag & NLP_ADISC_SND) &&
-			    (vport->num_disc_nodes)) {
-				/* Check to see if there are more
-				 * ADISCs to be sent
-				 */
-				lpfc_more_adisc(vport);
-
-				if ((vport->num_disc_nodes == 0) &&
-					(vport->fc_npr_cnt))
-					lpfc_els_disc_plogi(vport);
-
-				if (vport->num_disc_nodes == 0) {
-					spin_lock_irq(shost->host_lock);
-					vport->fc_flag &= ~FC_NDISC_ACTIVE;
-					spin_unlock_irq(shost->host_lock);
-					lpfc_can_disctmo(vport);
-					lpfc_end_rscn(vport);
-				}
-			}
-		}
-	} else if ((ndlp->nlp_state == NLP_STE_PLOGI_ISSUE) &&
-		   (ndlp->nlp_flag & NLP_NPR_2B_DISC) &&
-		   (vport->num_disc_nodes)) {
-		spin_lock_irq(shost->host_lock);
-		ndlp->nlp_flag &= ~NLP_NPR_2B_DISC;
-		spin_unlock_irq(shost->host_lock);
-		/* Check to see if there are more
-		 * PLOGIs to be sent
-		 */
-		lpfc_more_plogi(vport);
-		if (vport->num_disc_nodes == 0) {
-			spin_lock_irq(shost->host_lock);
-			vport->fc_flag &= ~FC_NDISC_ACTIVE;
-			spin_unlock_irq(shost->host_lock);
-			lpfc_can_disctmo(vport);
-			lpfc_end_rscn(vport);
-		}
-	}
-
 	lpfc_els_rsp_acc(vport, ELS_CMD_PLOGI, cmdiocb, ndlp, mbox);
 	return 1;
-
 out:
 	stat.un.b.lsRjtRsnCode = LSRJT_UNABLE_TPC;
 	stat.un.b.lsRjtRsnCodeExp = LSEXP_OUT_OF_RESOURCE;
@@ -574,7 +505,9 @@ lpfc_rcv_logo(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
 	else
 		lpfc_els_rsp_acc(vport, ELS_CMD_ACC, cmdiocb, ndlp, NULL);
 
-	if (!(ndlp->nlp_type & NLP_FABRIC) ||
+	if ((!(ndlp->nlp_type & NLP_FABRIC) &&
+	     ((ndlp->nlp_type & NLP_FCP_TARGET) ||
+	      !(ndlp->nlp_type & NLP_FCP_INITIATOR))) ||
 	    (ndlp->nlp_state == NLP_STE_ADISC_ISSUE)) {
 		/* Only try to re-login if this is NOT a Fabric Node */
 		mod_timer(&ndlp->nlp_delayfunc, jiffies + HZ * 1);
@@ -751,6 +684,7 @@ static uint32_t
 lpfc_rcv_plogi_plogi_issue(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
 			   void *arg, uint32_t evt)
 {
+	struct Scsi_Host   *shost = lpfc_shost_from_vport(vport);
 	struct lpfc_hba   *phba = vport->phba;
 	struct lpfc_iocbq *cmdiocb = arg;
 	struct lpfc_dmabuf *pcmd = (struct lpfc_dmabuf *) cmdiocb->context2;
@@ -776,7 +710,22 @@ lpfc_rcv_plogi_plogi_issue(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
 		lpfc_els_rsp_reject(vport, stat.un.lsRjtError, cmdiocb, ndlp,
 			NULL);
 	} else {
-		lpfc_rcv_plogi(vport, ndlp, cmdiocb);
+		if (lpfc_rcv_plogi(vport, ndlp, cmdiocb) &&
+		    (ndlp->nlp_flag & NLP_NPR_2B_DISC) &&
+		    (vport->num_disc_nodes)) {
+			spin_lock_irq(shost->host_lock);
+			ndlp->nlp_flag &= ~NLP_NPR_2B_DISC;
+			spin_unlock_irq(shost->host_lock);
+			/* Check if there are more PLOGIs to be sent */
+			lpfc_more_plogi(vport);
+			if (vport->num_disc_nodes == 0) {
+				spin_lock_irq(shost->host_lock);
+				vport->fc_flag &= ~FC_NDISC_ACTIVE;
+				spin_unlock_irq(shost->host_lock);
+				lpfc_can_disctmo(vport);
+				lpfc_end_rscn(vport);
+			}
+		}
 	} /* If our portname was less */
 
 	return ndlp->nlp_state;
@@ -1040,6 +989,7 @@ static uint32_t
 lpfc_rcv_plogi_adisc_issue(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
 			   void *arg, uint32_t evt)
 {
+	struct Scsi_Host   *shost = lpfc_shost_from_vport(vport);
 	struct lpfc_hba   *phba = vport->phba;
 	struct lpfc_iocbq *cmdiocb;
 
@@ -1048,9 +998,28 @@ lpfc_rcv_plogi_adisc_issue(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
 
 	cmdiocb = (struct lpfc_iocbq *) arg;
 
-	if (lpfc_rcv_plogi(vport, ndlp, cmdiocb))
-		return ndlp->nlp_state;
+	if (lpfc_rcv_plogi(vport, ndlp, cmdiocb)) {
+		if (ndlp->nlp_flag & NLP_NPR_2B_DISC) {
+			spin_lock_irq(shost->host_lock);
+			ndlp->nlp_flag &= ~NLP_NPR_2B_DISC;
+			spin_unlock_irq(shost->host_lock);
 
+			if (vport->num_disc_nodes) {
+				lpfc_more_adisc(vport);
+				if ((vport->num_disc_nodes == 0) &&
+				    (vport->fc_npr_cnt))
+					lpfc_els_disc_plogi(vport);
+				if (vport->num_disc_nodes == 0) {
+					spin_lock_irq(shost->host_lock);
+					vport->fc_flag &= ~FC_NDISC_ACTIVE;
+					spin_unlock_irq(shost->host_lock);
+					lpfc_can_disctmo(vport);
+					lpfc_end_rscn(vport);
+				}
+			}
+		}
+		return ndlp->nlp_state;
+	}
 	ndlp->nlp_prev_state = NLP_STE_ADISC_ISSUE;
 	lpfc_issue_els_plogi(vport, ndlp->nlp_DID, 0);
 	lpfc_nlp_set_state(vport, ndlp, NLP_STE_PLOGI_ISSUE);
@@ -1742,24 +1711,21 @@ lpfc_rcv_plogi_npr_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
 	struct lpfc_iocbq *cmdiocb  = (struct lpfc_iocbq *) arg;
 
 	/* Ignore PLOGI if we have an outstanding LOGO */
-	if (ndlp->nlp_flag & (NLP_LOGO_SND | NLP_LOGO_ACC)) {
+	if (ndlp->nlp_flag & (NLP_LOGO_SND | NLP_LOGO_ACC))
 		return ndlp->nlp_state;
-	}
-
 	if (lpfc_rcv_plogi(vport, ndlp, cmdiocb)) {
+		lpfc_cancel_retry_delay_tmo(vport, ndlp);
 		spin_lock_irq(shost->host_lock);
-		ndlp->nlp_flag &= ~NLP_NPR_ADISC;
+		ndlp->nlp_flag &= ~(NLP_NPR_ADISC | NLP_NPR_2B_DISC);
 		spin_unlock_irq(shost->host_lock);
-		return ndlp->nlp_state;
-	}
-
-	/* send PLOGI immediately, move to PLOGI issue state */
-	if (!(ndlp->nlp_flag & NLP_DELAY_TMO)) {
-		ndlp->nlp_prev_state = NLP_STE_NPR_NODE;
-		lpfc_nlp_set_state(vport, ndlp, NLP_STE_PLOGI_ISSUE);
-		lpfc_issue_els_plogi(vport, ndlp->nlp_DID, 0);
+	} else if (!(ndlp->nlp_flag & NLP_NPR_2B_DISC)) {
+		/* send PLOGI immediately, move to PLOGI issue state */
+		if (!(ndlp->nlp_flag & NLP_DELAY_TMO)) {
+			ndlp->nlp_prev_state = NLP_STE_NPR_NODE;
+			lpfc_nlp_set_state(vport, ndlp, NLP_STE_PLOGI_ISSUE);
+			lpfc_issue_els_plogi(vport, ndlp->nlp_DID, 0);
+		}
 	}
-
 	return ndlp->nlp_state;
 }
 
@@ -1810,7 +1776,6 @@ lpfc_rcv_padisc_npr_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
 	struct lpfc_iocbq *cmdiocb = (struct lpfc_iocbq *) arg;
 
 	lpfc_rcv_padisc(vport, ndlp, cmdiocb);
-
 	/*
 	 * Do not start discovery if discovery is about to start
 	 * or discovery in progress for this node. Starting discovery
@@ -1973,9 +1938,7 @@ lpfc_device_recov_npr_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
 	spin_lock_irq(shost->host_lock);
 	ndlp->nlp_flag &= ~(NLP_NODEV_REMOVE | NLP_NPR_2B_DISC);
 	spin_unlock_irq(shost->host_lock);
-	if (ndlp->nlp_flag & NLP_DELAY_TMO) {
-		lpfc_cancel_retry_delay_tmo(vport, ndlp);
-	}
+	lpfc_cancel_retry_delay_tmo(vport, ndlp);
 	return ndlp->nlp_state;
 }
 
-- 
cgit v1.2.3


From 5e9d9b8276980fc5dfa88ce34f6ec88ce3026232 Mon Sep 17 00:00:00 2001
From: James Smart <James.Smart@Emulex.Com>
Date: Sat, 14 Jun 2008 22:52:53 -0400
Subject: [SCSI] lpfc 8.2.7 : Rework the worker thread

Rework of the worker thread to make it more efficient.
Make a finer-grain notfication of pending work so less time is
spent checking conditions. Also made other general cleanups.

Signed-off-by: James Smart <james.smart@emulex.com>
Signed-off-by: James Bottomley <James.Bottomley@HansenPartnership.com>
---
 drivers/scsi/lpfc/lpfc.h         | 20 +++++++--
 drivers/scsi/lpfc/lpfc_ct.c      | 16 +++----
 drivers/scsi/lpfc/lpfc_els.c     | 33 ++++++--------
 drivers/scsi/lpfc/lpfc_hbadisc.c | 93 ++++++++++------------------------------
 drivers/scsi/lpfc/lpfc_init.c    | 13 +++---
 drivers/scsi/lpfc/lpfc_scsi.c    | 26 +++++------
 drivers/scsi/lpfc/lpfc_sli.c     | 36 ++++++++--------
 7 files changed, 97 insertions(+), 140 deletions(-)

(limited to 'drivers/scsi')

diff --git a/drivers/scsi/lpfc/lpfc.h b/drivers/scsi/lpfc/lpfc.h
index ec0b0f6e5e1a..e3e5b540e36c 100644
--- a/drivers/scsi/lpfc/lpfc.h
+++ b/drivers/scsi/lpfc/lpfc.h
@@ -59,6 +59,9 @@ struct lpfc_sli2_slim;
 
 #define MAX_HBAEVT	32
 
+/* lpfc wait event data ready flag */
+#define LPFC_DATA_READY		(1<<0)
+
 enum lpfc_polling_flags {
 	ENABLE_FCP_RING_POLLING = 0x1,
 	DISABLE_FCP_RING_INT    = 0x2
@@ -425,9 +428,6 @@ struct lpfc_hba {
 
 	uint16_t pci_cfg_value;
 
-	uint8_t work_found;
-#define LPFC_MAX_WORKER_ITERATION  4
-
 	uint8_t fc_linkspeed;	/* Link speed after last READ_LA */
 
 	uint32_t fc_eventTag;	/* event tag for link attention */
@@ -489,8 +489,9 @@ struct lpfc_hba {
 	uint32_t              work_hs;      /* HS stored in case of ERRAT */
 	uint32_t              work_status[2]; /* Extra status from SLIM */
 
-	wait_queue_head_t    *work_wait;
+	wait_queue_head_t    work_waitq;
 	struct task_struct   *worker_thread;
+	long data_flags;
 
 	uint32_t hbq_in_use;		/* HBQs in use flag */
 	struct list_head hbqbuf_in_list;  /* in-fly hbq buffer list */
@@ -637,6 +638,17 @@ lpfc_is_link_up(struct lpfc_hba *phba)
 		phba->link_state == LPFC_HBA_READY;
 }
 
+static inline void
+lpfc_worker_wake_up(struct lpfc_hba *phba)
+{
+	/* Set the lpfc data pending flag */
+	set_bit(LPFC_DATA_READY, &phba->data_flags);
+
+	/* Wake up worker thread */
+	wake_up(&phba->work_waitq);
+	return;
+}
+
 #define FC_REG_DUMP_EVENT		0x10	/* Register for Dump events */
 #define FC_REG_TEMPERATURE_EVENT	0x20    /* Register for temperature
 						   event */
diff --git a/drivers/scsi/lpfc/lpfc_ct.c b/drivers/scsi/lpfc/lpfc_ct.c
index 153afae567b5..5442ce33615a 100644
--- a/drivers/scsi/lpfc/lpfc_ct.c
+++ b/drivers/scsi/lpfc/lpfc_ct.c
@@ -1679,20 +1679,18 @@ lpfc_fdmi_tmo(unsigned long ptr)
 {
 	struct lpfc_vport *vport = (struct lpfc_vport *)ptr;
 	struct lpfc_hba   *phba = vport->phba;
+	uint32_t tmo_posted;
 	unsigned long iflag;
 
 	spin_lock_irqsave(&vport->work_port_lock, iflag);
-	if (!(vport->work_port_events & WORKER_FDMI_TMO)) {
+	tmo_posted = vport->work_port_events & WORKER_FDMI_TMO;
+	if (!tmo_posted)
 		vport->work_port_events |= WORKER_FDMI_TMO;
-		spin_unlock_irqrestore(&vport->work_port_lock, iflag);
+	spin_unlock_irqrestore(&vport->work_port_lock, iflag);
 
-		spin_lock_irqsave(&phba->hbalock, iflag);
-		if (phba->work_wait)
-			lpfc_worker_wake_up(phba);
-		spin_unlock_irqrestore(&phba->hbalock, iflag);
-	}
-	else
-		spin_unlock_irqrestore(&vport->work_port_lock, iflag);
+	if (!tmo_posted)
+		lpfc_worker_wake_up(phba);
+	return;
 }
 
 void
diff --git a/drivers/scsi/lpfc/lpfc_els.c b/drivers/scsi/lpfc/lpfc_els.c
index d418c7c1251e..5d69dee85a8d 100644
--- a/drivers/scsi/lpfc/lpfc_els.c
+++ b/drivers/scsi/lpfc/lpfc_els.c
@@ -1813,11 +1813,11 @@ lpfc_els_retry_delay(unsigned long ptr)
 	 * count until the queued work is done
 	 */
 	evtp->evt_arg1  = lpfc_nlp_get(ndlp);
-	evtp->evt       = LPFC_EVT_ELS_RETRY;
-	list_add_tail(&evtp->evt_listp, &phba->work_list);
-	if (phba->work_wait)
+	if (evtp->evt_arg1) {
+		evtp->evt = LPFC_EVT_ELS_RETRY;
+		list_add_tail(&evtp->evt_listp, &phba->work_list);
 		lpfc_worker_wake_up(phba);
-
+	}
 	spin_unlock_irqrestore(&phba->hbalock, flags);
 	return;
 }
@@ -3802,20 +3802,17 @@ lpfc_els_timeout(unsigned long ptr)
 {
 	struct lpfc_vport *vport = (struct lpfc_vport *) ptr;
 	struct lpfc_hba   *phba = vport->phba;
+	uint32_t tmo_posted;
 	unsigned long iflag;
 
 	spin_lock_irqsave(&vport->work_port_lock, iflag);
-	if ((vport->work_port_events & WORKER_ELS_TMO) == 0) {
+	tmo_posted = vport->work_port_events & WORKER_ELS_TMO;
+	if (!tmo_posted)
 		vport->work_port_events |= WORKER_ELS_TMO;
-		spin_unlock_irqrestore(&vport->work_port_lock, iflag);
+	spin_unlock_irqrestore(&vport->work_port_lock, iflag);
 
-		spin_lock_irqsave(&phba->hbalock, iflag);
-		if (phba->work_wait)
-			lpfc_worker_wake_up(phba);
-		spin_unlock_irqrestore(&phba->hbalock, iflag);
-	}
-	else
-		spin_unlock_irqrestore(&vport->work_port_lock, iflag);
+	if (!tmo_posted)
+		lpfc_worker_wake_up(phba);
 	return;
 }
 
@@ -4769,18 +4766,16 @@ lpfc_fabric_block_timeout(unsigned long ptr)
 	struct lpfc_hba  *phba = (struct lpfc_hba *) ptr;
 	unsigned long iflags;
 	uint32_t tmo_posted;
+
 	spin_lock_irqsave(&phba->pport->work_port_lock, iflags);
 	tmo_posted = phba->pport->work_port_events & WORKER_FABRIC_BLOCK_TMO;
 	if (!tmo_posted)
 		phba->pport->work_port_events |= WORKER_FABRIC_BLOCK_TMO;
 	spin_unlock_irqrestore(&phba->pport->work_port_lock, iflags);
 
-	if (!tmo_posted) {
-		spin_lock_irqsave(&phba->hbalock, iflags);
-		if (phba->work_wait)
-			lpfc_worker_wake_up(phba);
-		spin_unlock_irqrestore(&phba->hbalock, iflags);
-	}
+	if (!tmo_posted)
+		lpfc_worker_wake_up(phba);
+	return;
 }
 
 static void
diff --git a/drivers/scsi/lpfc/lpfc_hbadisc.c b/drivers/scsi/lpfc/lpfc_hbadisc.c
index f3dc19dfac5b..ba4873c9e2c3 100644
--- a/drivers/scsi/lpfc/lpfc_hbadisc.c
+++ b/drivers/scsi/lpfc/lpfc_hbadisc.c
@@ -153,11 +153,11 @@ lpfc_dev_loss_tmo_callbk(struct fc_rport *rport)
 	 * count until this queued work is done
 	 */
 	evtp->evt_arg1  = lpfc_nlp_get(ndlp);
-	evtp->evt       = LPFC_EVT_DEV_LOSS;
-	list_add_tail(&evtp->evt_listp, &phba->work_list);
-	if (phba->work_wait)
-		wake_up(phba->work_wait);
-
+	if (evtp->evt_arg1) {
+		evtp->evt = LPFC_EVT_DEV_LOSS;
+		list_add_tail(&evtp->evt_listp, &phba->work_list);
+		lpfc_worker_wake_up(phba);
+	}
 	spin_unlock_irq(&phba->hbalock);
 
 	return;
@@ -276,14 +276,6 @@ lpfc_dev_loss_tmo_handler(struct lpfc_nodelist *ndlp)
 		lpfc_disc_state_machine(vport, ndlp, NULL, NLP_EVT_DEVICE_RM);
 }
 
-
-void
-lpfc_worker_wake_up(struct lpfc_hba *phba)
-{
-	wake_up(phba->work_wait);
-	return;
-}
-
 static void
 lpfc_work_list_done(struct lpfc_hba *phba)
 {
@@ -429,6 +421,8 @@ lpfc_work_done(struct lpfc_hba *phba)
 		|| (pring->flag & LPFC_DEFERRED_RING_EVENT)) {
 		if (pring->flag & LPFC_STOP_IOCB_EVENT) {
 			pring->flag |= LPFC_DEFERRED_RING_EVENT;
+			/* Set the lpfc data pending flag */
+			set_bit(LPFC_DATA_READY, &phba->data_flags);
 		} else {
 			pring->flag &= ~LPFC_DEFERRED_RING_EVENT;
 			lpfc_sli_handle_slow_ring_event(phba, pring,
@@ -459,69 +453,29 @@ lpfc_work_done(struct lpfc_hba *phba)
 	lpfc_work_list_done(phba);
 }
 
-static int
-check_work_wait_done(struct lpfc_hba *phba)
-{
-	struct lpfc_vport *vport;
-	struct lpfc_sli_ring *pring = &phba->sli.ring[LPFC_ELS_RING];
-	int rc = 0;
-
-	spin_lock_irq(&phba->hbalock);
-	list_for_each_entry(vport, &phba->port_list, listentry) {
-		if (vport->work_port_events) {
-			rc = 1;
-			break;
-		}
-	}
-	if (rc || phba->work_ha || (!list_empty(&phba->work_list)) ||
-	    kthread_should_stop() || pring->flag & LPFC_DEFERRED_RING_EVENT) {
-		rc = 1;
-		phba->work_found++;
-	} else
-		phba->work_found = 0;
-	spin_unlock_irq(&phba->hbalock);
-	return rc;
-}
-
-
 int
 lpfc_do_work(void *p)
 {
 	struct lpfc_hba *phba = p;
 	int rc;
-	DECLARE_WAIT_QUEUE_HEAD_ONSTACK(work_waitq);
 
 	set_user_nice(current, -20);
-	phba->work_wait = &work_waitq;
-	phba->work_found = 0;
+	phba->data_flags = 0;
 
 	while (1) {
-
-		rc = wait_event_interruptible(work_waitq,
-					      check_work_wait_done(phba));
-
+		/* wait and check worker queue activities */
+		rc = wait_event_interruptible(phba->work_waitq,
+					(test_and_clear_bit(LPFC_DATA_READY,
+							    &phba->data_flags)
+					 || kthread_should_stop()));
 		BUG_ON(rc);
 
 		if (kthread_should_stop())
 			break;
 
+		/* Attend pending lpfc data processing */
 		lpfc_work_done(phba);
-
-		/* If there is alot of slow ring work, like during link up
-		 * check_work_wait_done() may cause this thread to not give
-		 * up the CPU for very long periods of time. This may cause
-		 * soft lockups or other problems. To avoid these situations
-		 * give up the CPU here after LPFC_MAX_WORKER_ITERATION
-		 * consecutive iterations.
-		 */
-		if (phba->work_found >= LPFC_MAX_WORKER_ITERATION) {
-			phba->work_found = 0;
-			schedule();
-		}
 	}
-	spin_lock_irq(&phba->hbalock);
-	phba->work_wait = NULL;
-	spin_unlock_irq(&phba->hbalock);
 	return 0;
 }
 
@@ -551,10 +505,10 @@ lpfc_workq_post_event(struct lpfc_hba *phba, void *arg1, void *arg2,
 
 	spin_lock_irqsave(&phba->hbalock, flags);
 	list_add_tail(&evtp->evt_listp, &phba->work_list);
-	if (phba->work_wait)
-		lpfc_worker_wake_up(phba);
 	spin_unlock_irqrestore(&phba->hbalock, flags);
 
+	lpfc_worker_wake_up(phba);
+
 	return 1;
 }
 
@@ -2636,21 +2590,20 @@ lpfc_disc_timeout(unsigned long ptr)
 {
 	struct lpfc_vport *vport = (struct lpfc_vport *) ptr;
 	struct lpfc_hba   *phba = vport->phba;
+	uint32_t tmo_posted;
 	unsigned long flags = 0;
 
 	if (unlikely(!phba))
 		return;
 
-	if ((vport->work_port_events & WORKER_DISC_TMO) == 0) {
-		spin_lock_irqsave(&vport->work_port_lock, flags);
+	spin_lock_irqsave(&vport->work_port_lock, flags);
+	tmo_posted = vport->work_port_events & WORKER_DISC_TMO;
+	if (!tmo_posted)
 		vport->work_port_events |= WORKER_DISC_TMO;
-		spin_unlock_irqrestore(&vport->work_port_lock, flags);
+	spin_unlock_irqrestore(&vport->work_port_lock, flags);
 
-		spin_lock_irqsave(&phba->hbalock, flags);
-		if (phba->work_wait)
-			lpfc_worker_wake_up(phba);
-		spin_unlock_irqrestore(&phba->hbalock, flags);
-	}
+	if (!tmo_posted)
+		lpfc_worker_wake_up(phba);
 	return;
 }
 
diff --git a/drivers/scsi/lpfc/lpfc_init.c b/drivers/scsi/lpfc/lpfc_init.c
index 6fcddda58512..53cedbafffba 100644
--- a/drivers/scsi/lpfc/lpfc_init.c
+++ b/drivers/scsi/lpfc/lpfc_init.c
@@ -551,18 +551,18 @@ static void
 lpfc_hb_timeout(unsigned long ptr)
 {
 	struct lpfc_hba *phba;
+	uint32_t tmo_posted;
 	unsigned long iflag;
 
 	phba = (struct lpfc_hba *)ptr;
 	spin_lock_irqsave(&phba->pport->work_port_lock, iflag);
-	if (!(phba->pport->work_port_events & WORKER_HB_TMO))
+	tmo_posted = phba->pport->work_port_events & WORKER_HB_TMO;
+	if (!tmo_posted)
 		phba->pport->work_port_events |= WORKER_HB_TMO;
 	spin_unlock_irqrestore(&phba->pport->work_port_lock, iflag);
 
-	spin_lock_irqsave(&phba->hbalock, iflag);
-	if (phba->work_wait)
-		wake_up(phba->work_wait);
-	spin_unlock_irqrestore(&phba->hbalock, iflag);
+	if (!tmo_posted)
+		lpfc_worker_wake_up(phba);
 	return;
 }
 
@@ -2104,6 +2104,9 @@ lpfc_pci_probe_one(struct pci_dev *pdev, const struct pci_device_id *pid)
 	phba->work_ha_mask = (HA_ERATT|HA_MBATT|HA_LATT);
 	phba->work_ha_mask |= (HA_RXMASK << (LPFC_ELS_RING * 4));
 
+	/* Initialize the wait queue head for the kernel thread */
+	init_waitqueue_head(&phba->work_waitq);
+
 	/* Startup the kernel thread for this host adapter. */
 	phba->worker_thread = kthread_run(lpfc_do_work, phba,
 				       "lpfc_worker_%d", phba->brd_no);
diff --git a/drivers/scsi/lpfc/lpfc_scsi.c b/drivers/scsi/lpfc/lpfc_scsi.c
index 3926affaf727..1e88b7a8a451 100644
--- a/drivers/scsi/lpfc/lpfc_scsi.c
+++ b/drivers/scsi/lpfc/lpfc_scsi.c
@@ -50,6 +50,7 @@ void
 lpfc_adjust_queue_depth(struct lpfc_hba *phba)
 {
 	unsigned long flags;
+	uint32_t evt_posted;
 
 	spin_lock_irqsave(&phba->hbalock, flags);
 	atomic_inc(&phba->num_rsrc_err);
@@ -65,17 +66,13 @@ lpfc_adjust_queue_depth(struct lpfc_hba *phba)
 	spin_unlock_irqrestore(&phba->hbalock, flags);
 
 	spin_lock_irqsave(&phba->pport->work_port_lock, flags);
-	if ((phba->pport->work_port_events &
-		WORKER_RAMP_DOWN_QUEUE) == 0) {
+	evt_posted = phba->pport->work_port_events & WORKER_RAMP_DOWN_QUEUE;
+	if (!evt_posted)
 		phba->pport->work_port_events |= WORKER_RAMP_DOWN_QUEUE;
-	}
 	spin_unlock_irqrestore(&phba->pport->work_port_lock, flags);
 
-	spin_lock_irqsave(&phba->hbalock, flags);
-	if (phba->work_wait)
-		wake_up(phba->work_wait);
-	spin_unlock_irqrestore(&phba->hbalock, flags);
-
+	if (!evt_posted)
+		lpfc_worker_wake_up(phba);
 	return;
 }
 
@@ -89,6 +86,7 @@ lpfc_rampup_queue_depth(struct lpfc_vport  *vport,
 {
 	unsigned long flags;
 	struct lpfc_hba *phba = vport->phba;
+	uint32_t evt_posted;
 	atomic_inc(&phba->num_cmd_success);
 
 	if (vport->cfg_lun_queue_depth <= sdev->queue_depth)
@@ -103,16 +101,14 @@ lpfc_rampup_queue_depth(struct lpfc_vport  *vport,
 	spin_unlock_irqrestore(&phba->hbalock, flags);
 
 	spin_lock_irqsave(&phba->pport->work_port_lock, flags);
-	if ((phba->pport->work_port_events &
-		WORKER_RAMP_UP_QUEUE) == 0) {
+	evt_posted = phba->pport->work_port_events & WORKER_RAMP_UP_QUEUE;
+	if (!evt_posted)
 		phba->pport->work_port_events |= WORKER_RAMP_UP_QUEUE;
-	}
 	spin_unlock_irqrestore(&phba->pport->work_port_lock, flags);
 
-	spin_lock_irqsave(&phba->hbalock, flags);
-	if (phba->work_wait)
-		wake_up(phba->work_wait);
-	spin_unlock_irqrestore(&phba->hbalock, flags);
+	if (!evt_posted)
+		lpfc_worker_wake_up(phba);
+	return;
 }
 
 void
diff --git a/drivers/scsi/lpfc/lpfc_sli.c b/drivers/scsi/lpfc/lpfc_sli.c
index 70a0a9eab211..3dba3a967ed1 100644
--- a/drivers/scsi/lpfc/lpfc_sli.c
+++ b/drivers/scsi/lpfc/lpfc_sli.c
@@ -324,9 +324,7 @@ lpfc_sli_next_iocb_slot (struct lpfc_hba *phba, struct lpfc_sli_ring *pring)
 			phba->work_ha |= HA_ERATT;
 			phba->work_hs = HS_FFER3;
 
-			/* hbalock should already be held */
-			if (phba->work_wait)
-				lpfc_worker_wake_up(phba);
+			lpfc_worker_wake_up(phba);
 
 			return NULL;
 		}
@@ -1309,9 +1307,7 @@ lpfc_sli_rsp_pointers_error(struct lpfc_hba *phba, struct lpfc_sli_ring *pring)
 	phba->work_ha |= HA_ERATT;
 	phba->work_hs = HS_FFER3;
 
-	/* hbalock should already be held */
-	if (phba->work_wait)
-		lpfc_worker_wake_up(phba);
+	lpfc_worker_wake_up(phba);
 
 	return;
 }
@@ -2611,12 +2607,9 @@ lpfc_mbox_timeout(unsigned long ptr)
 		phba->pport->work_port_events |= WORKER_MBOX_TMO;
 	spin_unlock_irqrestore(&phba->pport->work_port_lock, iflag);
 
-	if (!tmo_posted) {
-		spin_lock_irqsave(&phba->hbalock, iflag);
-		if (phba->work_wait)
-			lpfc_worker_wake_up(phba);
-		spin_unlock_irqrestore(&phba->hbalock, iflag);
-	}
+	if (!tmo_posted)
+		lpfc_worker_wake_up(phba);
+	return;
 }
 
 void
@@ -3374,8 +3367,12 @@ lpfc_sli_host_down(struct lpfc_vport *vport)
 	for (i = 0; i < psli->num_rings; i++) {
 		pring = &psli->ring[i];
 		prev_pring_flag = pring->flag;
-		if (pring->ringno == LPFC_ELS_RING) /* Only slow rings */
+		/* Only slow rings */
+		if (pring->ringno == LPFC_ELS_RING) {
 			pring->flag |= LPFC_DEFERRED_RING_EVENT;
+			/* Set the lpfc data pending flag */
+			set_bit(LPFC_DATA_READY, &phba->data_flags);
+		}
 		/*
 		 * Error everything on the txq since these iocbs have not been
 		 * given to the FW yet.
@@ -3434,8 +3431,12 @@ lpfc_sli_hba_down(struct lpfc_hba *phba)
 	spin_lock_irqsave(&phba->hbalock, flags);
 	for (i = 0; i < psli->num_rings; i++) {
 		pring = &psli->ring[i];
-		if (pring->ringno == LPFC_ELS_RING) /* Only slow rings */
+		/* Only slow rings */
+		if (pring->ringno == LPFC_ELS_RING) {
 			pring->flag |= LPFC_DEFERRED_RING_EVENT;
+			/* Set the lpfc data pending flag */
+			set_bit(LPFC_DATA_READY, &phba->data_flags);
+		}
 
 		/*
 		 * Error everything on the txq since these iocbs have not been
@@ -4159,7 +4160,7 @@ lpfc_intr_handler(int irq, void *dev_id)
 						"pwork:x%x hawork:x%x wait:x%x",
 						phba->work_ha, work_ha_copy,
 						(uint32_t)((unsigned long)
-						phba->work_wait));
+						&phba->work_waitq));
 
 					control &=
 					    ~(HC_R0INT_ENA << LPFC_ELS_RING);
@@ -4172,7 +4173,7 @@ lpfc_intr_handler(int irq, void *dev_id)
 						"x%x hawork:x%x wait:x%x",
 						phba->work_ha, work_ha_copy,
 						(uint32_t)((unsigned long)
-						phba->work_wait));
+						&phba->work_waitq));
 				}
 				spin_unlock(&phba->hbalock);
 			}
@@ -4297,9 +4298,8 @@ send_current_mbox:
 
 		spin_lock(&phba->hbalock);
 		phba->work_ha |= work_ha_copy;
-		if (phba->work_wait)
-			lpfc_worker_wake_up(phba);
 		spin_unlock(&phba->hbalock);
+		lpfc_worker_wake_up(phba);
 	}
 
 	ha_copy &= ~(phba->work_ha_mask);
-- 
cgit v1.2.3


From 495a714c50e2c6ca6357129812f983b3ac0a32f2 Mon Sep 17 00:00:00 2001
From: James Smart <James.Smart@Emulex.Com>
Date: Sat, 14 Jun 2008 22:52:59 -0400
Subject: [SCSI] lpfc 8.2.7 : Miscellaneous Fixes

Miscellaneous Fixes:
- Fix bug in mbox sysfs interface that locked in EAGAIN if discovery stalled.
- Fix missing error message when npiv and loop are true when link up occurs.
- Fix panic in lpfc_scsi_cmd_iocb_cmpl: scsi_buf was NULL, but created
  race conditions with other code paths.
- Fix error in sysfs mailbox structure that didn't rezero on next use.
- Add missing mempool_free() to attachment failure path
- Fix missing put of ndlp structure during driver unload.
- Fix applications unable to send mailbox commands during discovery.
- Remove unused argument (type) from function lpfc_post_buffer() API
- Fix vport name is not shown after hbacmd vportcreate.
- Remove repeated code statements.

Signed-off-by: James Smart <james.smart@emulex.com>
Signed-off-by: James Bottomley <James.Bottomley@HansenPartnership.com>
---
 drivers/scsi/lpfc/lpfc.h         |  1 +
 drivers/scsi/lpfc/lpfc_attr.c    |  3 +--
 drivers/scsi/lpfc/lpfc_crtn.h    |  3 ++-
 drivers/scsi/lpfc/lpfc_ct.c      |  6 +++---
 drivers/scsi/lpfc/lpfc_els.c     |  7 ++-----
 drivers/scsi/lpfc/lpfc_hbadisc.c |  4 ++++
 drivers/scsi/lpfc/lpfc_init.c    | 18 ++++++++++++++----
 drivers/scsi/lpfc/lpfc_scsi.c    |  5 ++---
 drivers/scsi/lpfc/lpfc_sli.c     | 13 +++++++------
 drivers/scsi/lpfc/lpfc_vport.c   | 16 +++++++++++++++-
 10 files changed, 51 insertions(+), 25 deletions(-)

(limited to 'drivers/scsi')

diff --git a/drivers/scsi/lpfc/lpfc.h b/drivers/scsi/lpfc/lpfc.h
index e3e5b540e36c..e0e018d12653 100644
--- a/drivers/scsi/lpfc/lpfc.h
+++ b/drivers/scsi/lpfc/lpfc.h
@@ -33,6 +33,7 @@ struct lpfc_sli2_slim;
 #define LPFC_MAX_SG_SEG_CNT	256	/* sg element count per scsi cmnd */
 #define LPFC_IOCB_LIST_CNT	2250	/* list of IOCBs for fast-path usage. */
 #define LPFC_Q_RAMP_UP_INTERVAL 120     /* lun q_depth ramp up interval */
+#define LPFC_VNAME_LEN		100	/* vport symbolic name length */
 
 /*
  * Following time intervals are used of adjusting SCSI device
diff --git a/drivers/scsi/lpfc/lpfc_attr.c b/drivers/scsi/lpfc/lpfc_attr.c
index 960baaf11fb1..37bfa0bd1dae 100644
--- a/drivers/scsi/lpfc/lpfc_attr.c
+++ b/drivers/scsi/lpfc/lpfc_attr.c
@@ -1995,8 +1995,7 @@ sysfs_mbox_read(struct kobject *kobj, struct bin_attribute *bin_attr,
 		/* Don't allow mailbox commands to be sent when blocked
 		 * or when in the middle of discovery
 		 */
-		if (phba->sli.sli_flag & LPFC_BLOCK_MGMT_IO ||
-		    vport->fc_flag & FC_NDISC_ACTIVE) {
+		if (phba->sli.sli_flag & LPFC_BLOCK_MGMT_IO) {
 			sysfs_mbox_idle(phba);
 			spin_unlock_irq(&phba->hbalock);
 			return  -EAGAIN;
diff --git a/drivers/scsi/lpfc/lpfc_crtn.h b/drivers/scsi/lpfc/lpfc_crtn.h
index 7c9f8317d972..1b8245213b83 100644
--- a/drivers/scsi/lpfc/lpfc_crtn.h
+++ b/drivers/scsi/lpfc/lpfc_crtn.h
@@ -142,7 +142,7 @@ int lpfc_config_port_post(struct lpfc_hba *);
 int lpfc_hba_down_prep(struct lpfc_hba *);
 int lpfc_hba_down_post(struct lpfc_hba *);
 void lpfc_hba_init(struct lpfc_hba *, uint32_t *);
-int lpfc_post_buffer(struct lpfc_hba *, struct lpfc_sli_ring *, int, int);
+int lpfc_post_buffer(struct lpfc_hba *, struct lpfc_sli_ring *, int);
 void lpfc_decode_firmware_rev(struct lpfc_hba *, char *, int);
 int lpfc_online(struct lpfc_hba *);
 void lpfc_unblock_mgmt_io(struct lpfc_hba *);
@@ -263,6 +263,7 @@ extern int lpfc_sli_mode;
 extern int lpfc_enable_npiv;
 
 int  lpfc_vport_symbolic_node_name(struct lpfc_vport *, char *, size_t);
+int  lpfc_vport_symbolic_port_name(struct lpfc_vport *, char *,	size_t);
 void lpfc_terminate_rport_io(struct fc_rport *);
 void lpfc_dev_loss_tmo_callbk(struct fc_rport *rport);
 
diff --git a/drivers/scsi/lpfc/lpfc_ct.c b/drivers/scsi/lpfc/lpfc_ct.c
index 5442ce33615a..7fc74cf5823b 100644
--- a/drivers/scsi/lpfc/lpfc_ct.c
+++ b/drivers/scsi/lpfc/lpfc_ct.c
@@ -101,7 +101,7 @@ lpfc_ct_unsol_event(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
 		/* Not enough posted buffers; Try posting more buffers */
 		phba->fc_stat.NoRcvBuf++;
 		if (!(phba->sli3_options & LPFC_SLI3_HBQ_ENABLED))
-			lpfc_post_buffer(phba, pring, 2, 1);
+			lpfc_post_buffer(phba, pring, 2);
 		return;
 	}
 
@@ -151,7 +151,7 @@ lpfc_ct_unsol_event(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
 			}
 			list_del(&iocbq->list);
 			lpfc_sli_release_iocbq(phba, iocbq);
-			lpfc_post_buffer(phba, pring, i, 1);
+			lpfc_post_buffer(phba, pring, i);
 		}
 	}
 }
@@ -990,7 +990,7 @@ lpfc_cmpl_ct_cmd_rff_id(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
 	return;
 }
 
-static int
+int
 lpfc_vport_symbolic_port_name(struct lpfc_vport *vport, char *symbol,
 	size_t size)
 {
diff --git a/drivers/scsi/lpfc/lpfc_els.c b/drivers/scsi/lpfc/lpfc_els.c
index 5d69dee85a8d..f54e0f7eaee3 100644
--- a/drivers/scsi/lpfc/lpfc_els.c
+++ b/drivers/scsi/lpfc/lpfc_els.c
@@ -3857,9 +3857,6 @@ lpfc_els_timeout_handler(struct lpfc_vport *vport)
 		    els_command == ELS_CMD_FDISC)
 			continue;
 
-		if (vport != piocb->vport)
-			continue;
-
 		if (piocb->drvrTimeout > 0) {
 			if (piocb->drvrTimeout >= timeout)
 				piocb->drvrTimeout -= timeout;
@@ -4013,7 +4010,7 @@ lpfc_els_unsol_buffer(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
 	payload = ((struct lpfc_dmabuf *)elsiocb->context2)->virt;
 	cmd = *payload;
 	if ((phba->sli3_options & LPFC_SLI3_HBQ_ENABLED) == 0)
-		lpfc_post_buffer(phba, pring, 1, 1);
+		lpfc_post_buffer(phba, pring, 1);
 
 	did = icmd->un.rcvels.remoteID;
 	if (icmd->ulpStatus) {
@@ -4322,7 +4319,7 @@ lpfc_els_unsol_event(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
 		phba->fc_stat.NoRcvBuf++;
 		/* Not enough posted buffers; Try posting more buffers */
 		if (!(phba->sli3_options & LPFC_SLI3_HBQ_ENABLED))
-			lpfc_post_buffer(phba, pring, 0, 1);
+			lpfc_post_buffer(phba, pring, 0);
 		return;
 	}
 
diff --git a/drivers/scsi/lpfc/lpfc_hbadisc.c b/drivers/scsi/lpfc/lpfc_hbadisc.c
index ba4873c9e2c3..a98d11bf3576 100644
--- a/drivers/scsi/lpfc/lpfc_hbadisc.c
+++ b/drivers/scsi/lpfc/lpfc_hbadisc.c
@@ -917,6 +917,10 @@ lpfc_mbx_process_link_up(struct lpfc_hba *phba, READ_LA_VAR *la)
 	if (phba->fc_topology == TOPOLOGY_LOOP) {
 		phba->sli3_options &= ~LPFC_SLI3_NPIV_ENABLED;
 
+		if (phba->cfg_enable_npiv)
+			lpfc_printf_log(phba, KERN_ERR, LOG_LINK_EVENT,
+				"1309 Link Up Event npiv not supported in loop "
+				"topology\n");
 				/* Get Loop Map information */
 		if (la->il)
 			vport->fc_flag |= FC_LBIT;
diff --git a/drivers/scsi/lpfc/lpfc_init.c b/drivers/scsi/lpfc/lpfc_init.c
index 53cedbafffba..5b6e5395c8eb 100644
--- a/drivers/scsi/lpfc/lpfc_init.c
+++ b/drivers/scsi/lpfc/lpfc_init.c
@@ -145,8 +145,10 @@ lpfc_config_port_prep(struct lpfc_hba *phba)
 		return -ERESTART;
 	}
 
-	if (phba->sli_rev == 3 && !mb->un.varRdRev.v3rsp)
+	if (phba->sli_rev == 3 && !mb->un.varRdRev.v3rsp) {
+		mempool_free(pmb, phba->mbox_mem_pool);
 		return -EINVAL;
+	}
 
 	/* Save information as VPD data */
 	vp->rev.rBit = 1;
@@ -1197,8 +1199,7 @@ lpfc_get_hba_model_desc(struct lpfc_hba *phba, uint8_t *mdp, uint8_t *descp)
 /*   Returns the number of buffers NOT posted.    */
 /**************************************************/
 int
-lpfc_post_buffer(struct lpfc_hba *phba, struct lpfc_sli_ring *pring, int cnt,
-		 int type)
+lpfc_post_buffer(struct lpfc_hba *phba, struct lpfc_sli_ring *pring, int cnt)
 {
 	IOCB_t *icmd;
 	struct lpfc_iocbq *iocb;
@@ -1298,7 +1299,7 @@ lpfc_post_rcv_buf(struct lpfc_hba *phba)
 	struct lpfc_sli *psli = &phba->sli;
 
 	/* Ring 0, ELS / CT buffers */
-	lpfc_post_buffer(phba, &psli->ring[LPFC_ELS_RING], LPFC_BUF_RING0, 1);
+	lpfc_post_buffer(phba, &psli->ring[LPFC_ELS_RING], LPFC_BUF_RING0);
 	/* Ring 2 - FCP no buffers needed */
 
 	return 0;
@@ -1457,6 +1458,15 @@ lpfc_cleanup(struct lpfc_vport *vport)
 
 		lpfc_disc_state_machine(vport, ndlp, NULL,
 					     NLP_EVT_DEVICE_RM);
+
+		/* nlp_type zero is not defined, nlp_flag zero also not defined,
+		 * nlp_state is unused, this happens when
+		 * an initiator has logged
+		 * into us so cleanup this ndlp.
+		 */
+		if ((ndlp->nlp_type == 0) && (ndlp->nlp_flag == 0) &&
+			(ndlp->nlp_state == 0))
+			lpfc_nlp_put(ndlp);
 	}
 
 	/* At this point, ALL ndlp's should be gone
diff --git a/drivers/scsi/lpfc/lpfc_scsi.c b/drivers/scsi/lpfc/lpfc_scsi.c
index 1e88b7a8a451..c94da4f2b8a6 100644
--- a/drivers/scsi/lpfc/lpfc_scsi.c
+++ b/drivers/scsi/lpfc/lpfc_scsi.c
@@ -605,9 +605,6 @@ lpfc_scsi_cmd_iocb_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *pIocbIn,
 	result = cmd->result;
 	sdev = cmd->device;
 	lpfc_scsi_unprep_dma_buf(phba, lpfc_cmd);
-	spin_lock_irqsave(sdev->host->host_lock, flags);
-	lpfc_cmd->pCmd = NULL;	/* This must be done before scsi_done */
-	spin_unlock_irqrestore(sdev->host->host_lock, flags);
 	cmd->scsi_done(cmd);
 
 	if (phba->cfg_poll & ENABLE_FCP_RING_POLLING) {
@@ -616,6 +613,7 @@ lpfc_scsi_cmd_iocb_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *pIocbIn,
 		 * wake up the thread.
 		 */
 		spin_lock_irqsave(sdev->host->host_lock, flags);
+		lpfc_cmd->pCmd = NULL;
 		if (lpfc_cmd->waitq)
 			wake_up(lpfc_cmd->waitq);
 		spin_unlock_irqrestore(sdev->host->host_lock, flags);
@@ -686,6 +684,7 @@ lpfc_scsi_cmd_iocb_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *pIocbIn,
 	 * wake up the thread.
 	 */
 	spin_lock_irqsave(sdev->host->host_lock, flags);
+	lpfc_cmd->pCmd = NULL;
 	if (lpfc_cmd->waitq)
 		wake_up(lpfc_cmd->waitq);
 	spin_unlock_irqrestore(sdev->host->host_lock, flags);
diff --git a/drivers/scsi/lpfc/lpfc_sli.c b/drivers/scsi/lpfc/lpfc_sli.c
index 3dba3a967ed1..f40aa7b905f7 100644
--- a/drivers/scsi/lpfc/lpfc_sli.c
+++ b/drivers/scsi/lpfc/lpfc_sli.c
@@ -3763,7 +3763,6 @@ lpfc_sli_validate_fcp_iocb(struct lpfc_iocbq *iocbq, struct lpfc_vport *vport,
 			   lpfc_ctx_cmd ctx_cmd)
 {
 	struct lpfc_scsi_buf *lpfc_cmd;
-	struct scsi_cmnd *cmnd;
 	int rc = 1;
 
 	if (!(iocbq->iocb_flag &  LPFC_IO_FCP))
@@ -3773,19 +3772,20 @@ lpfc_sli_validate_fcp_iocb(struct lpfc_iocbq *iocbq, struct lpfc_vport *vport,
 		return rc;
 
 	lpfc_cmd = container_of(iocbq, struct lpfc_scsi_buf, cur_iocbq);
-	cmnd = lpfc_cmd->pCmd;
 
-	if (cmnd == NULL)
+	if (lpfc_cmd->pCmd == NULL)
 		return rc;
 
 	switch (ctx_cmd) {
 	case LPFC_CTX_LUN:
-		if ((cmnd->device->id == tgt_id) &&
-		    (cmnd->device->lun == lun_id))
+		if ((lpfc_cmd->rdata->pnode) &&
+		    (lpfc_cmd->rdata->pnode->nlp_sid == tgt_id) &&
+		    (scsilun_to_int(&lpfc_cmd->fcp_cmnd->fcp_lun) == lun_id))
 			rc = 0;
 		break;
 	case LPFC_CTX_TGT:
-		if (cmnd->device->id == tgt_id)
+		if ((lpfc_cmd->rdata->pnode) &&
+		    (lpfc_cmd->rdata->pnode->nlp_sid == tgt_id))
 			rc = 0;
 		break;
 	case LPFC_CTX_HOST:
@@ -3995,6 +3995,7 @@ lpfc_sli_issue_mbox_wait(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmboxq,
 	if (pmboxq->context1)
 		return MBX_NOT_FINISHED;
 
+	pmboxq->mbox_flag &= ~LPFC_MBX_WAKE;
 	/* setup wake call as IOCB callback */
 	pmboxq->mbox_cmpl = lpfc_sli_wake_mbox_wait;
 	/* setup context field to pass wait_queue pointer to wake function  */
diff --git a/drivers/scsi/lpfc/lpfc_vport.c b/drivers/scsi/lpfc/lpfc_vport.c
index 6feaf59b0b1b..109f89d98830 100644
--- a/drivers/scsi/lpfc/lpfc_vport.c
+++ b/drivers/scsi/lpfc/lpfc_vport.c
@@ -216,6 +216,7 @@ lpfc_vport_create(struct fc_vport *fc_vport, bool disable)
 	int vpi;
 	int rc = VPORT_ERROR;
 	int status;
+	int size;
 
 	if ((phba->sli_rev < 3) ||
 		!(phba->sli3_options & LPFC_SLI3_NPIV_ENABLED)) {
@@ -278,7 +279,20 @@ lpfc_vport_create(struct fc_vport *fc_vport, bool disable)
 
 	memcpy(vport->fc_portname.u.wwn, vport->fc_sparam.portName.u.wwn, 8);
 	memcpy(vport->fc_nodename.u.wwn, vport->fc_sparam.nodeName.u.wwn, 8);
-
+	size = strnlen(fc_vport->symbolic_name, LPFC_VNAME_LEN);
+	if (size) {
+		vport->vname = kzalloc(size+1, GFP_KERNEL);
+		if (!vport->vname) {
+			lpfc_printf_vlog(vport, KERN_ERR, LOG_VPORT,
+					 "1814 Create VPORT failed. "
+					 "vname allocation failed.\n");
+			rc = VPORT_ERROR;
+			lpfc_free_vpi(phba, vpi);
+			destroy_port(vport);
+			goto error_out;
+		}
+		memcpy(vport->vname, fc_vport->symbolic_name, size+1);
+	}
 	if (fc_vport->node_name != 0)
 		u64_to_wwn(fc_vport->node_name, vport->fc_nodename.u.wwn);
 	if (fc_vport->port_name != 0)
-- 
cgit v1.2.3


From ff0f4cb5ea322dcc32d08bab2d758c050ba1ab07 Mon Sep 17 00:00:00 2001
From: James Smart <James.Smart@Emulex.Com>
Date: Sat, 14 Jun 2008 22:53:02 -0400
Subject: [SCSI] lpfc 8.2.7 : Update version to 8.2.7

Update lpfc driver version to 8.2.7

Signed-off-by: James Smart <james.smart@emulex.com>
Signed-off-by: James Bottomley <James.Bottomley@HansenPartnership.com>
---
 drivers/scsi/lpfc/lpfc_version.h | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)

(limited to 'drivers/scsi')

diff --git a/drivers/scsi/lpfc/lpfc_version.h b/drivers/scsi/lpfc/lpfc_version.h
index b22b893019f4..ad24cacfbe10 100644
--- a/drivers/scsi/lpfc/lpfc_version.h
+++ b/drivers/scsi/lpfc/lpfc_version.h
@@ -18,7 +18,7 @@
  * included with this package.                                     *
  *******************************************************************/
 
-#define LPFC_DRIVER_VERSION "8.2.6"
+#define LPFC_DRIVER_VERSION "8.2.7"
 
 #define LPFC_DRIVER_NAME "lpfc"
 
-- 
cgit v1.2.3


From c95fddc729fafb43f420747027eeb998c2e5e798 Mon Sep 17 00:00:00 2001
From: Mike Christie <michaelc@cs.wisc.edu>
Date: Mon, 16 Jun 2008 10:11:32 -0500
Subject: [SCSI] iscsi class: fix refcount leak

Must do a module_out if the endpoint lookup fails.

Signed-off-by: Mike Christie <michaelc@cs.wisc.edu>
Signed-off-by: James Bottomley <James.Bottomley@HansenPartnership.com>
---
 drivers/scsi/scsi_transport_iscsi.c | 6 ++++--
 1 file changed, 4 insertions(+), 2 deletions(-)

(limited to 'drivers/scsi')

diff --git a/drivers/scsi/scsi_transport_iscsi.c b/drivers/scsi/scsi_transport_iscsi.c
index 9fd5c6d87ed1..bc0f74d4ea09 100644
--- a/drivers/scsi/scsi_transport_iscsi.c
+++ b/drivers/scsi/scsi_transport_iscsi.c
@@ -1422,8 +1422,10 @@ iscsi_if_recv_msg(struct sk_buff *skb, struct nlmsghdr *nlh)
 		break;
 	case ISCSI_UEVENT_CREATE_BOUND_SESSION:
 		ep = iscsi_lookup_endpoint(ev->u.c_bound_session.ep_handle);
-		if (!ep)
-			return -EINVAL;
+		if (!ep) {
+			err = -EINVAL;
+			break;
+		}
 
 		err = iscsi_if_create_session(priv, ep, ev,
 					ev->u.c_bound_session.initial_cmdsn,
-- 
cgit v1.2.3


From 8e9a20cee4511be4560f9c858d9994eb6913731e Mon Sep 17 00:00:00 2001
From: Mike Christie <michaelc@cs.wisc.edu>
Date: Mon, 16 Jun 2008 10:11:33 -0500
Subject: [SCSI] libiscsi, iscsi_tcp, ib_iser: fix setting of can_queue with
 old tools.

This patch fixes two bugs that are related.

1. Old tools did not set can_queue/cmds_max. This patch modifies
libiscsi so that when we add the host we catch this and set it
to the default.

2. iscsi_tcp thought that the scsi command that was passed to
the eh functions needed a iscsi_cmd_task allocated for it. It
only needed a mgmt task, and now it does not matter since it
all comes from the same pool and libiscsi handles this for the
drivers. ib_iser had copied iscsi_tcp's code and set can_queue
to its max - 1 to handle this. So this patch removes the max -1,
and just sets it to the max.

Signed-off-by: Mike Christie <michaelc@cs.wisc.edu>
Signed-off-by: James Bottomley <James.Bottomley@HansenPartnership.com>
---
 drivers/infiniband/ulp/iser/iscsi_iser.c | 1 -
 drivers/scsi/iscsi_tcp.c                 | 1 -
 drivers/scsi/libiscsi.c                  | 6 ++++++
 3 files changed, 6 insertions(+), 2 deletions(-)

(limited to 'drivers/scsi')

diff --git a/drivers/infiniband/ulp/iser/iscsi_iser.c b/drivers/infiniband/ulp/iser/iscsi_iser.c
index c02eabd383a1..a56931e03976 100644
--- a/drivers/infiniband/ulp/iser/iscsi_iser.c
+++ b/drivers/infiniband/ulp/iser/iscsi_iser.c
@@ -595,7 +595,6 @@ static struct scsi_host_template iscsi_iser_sht = {
 	.name                   = "iSCSI Initiator over iSER, v." DRV_VER,
 	.queuecommand           = iscsi_queuecommand,
 	.change_queue_depth	= iscsi_change_queue_depth,
-	.can_queue		= ISCSI_DEF_XMIT_CMDS_MAX - 1,
 	.sg_tablesize           = ISCSI_ISER_SG_TABLESIZE,
 	.max_sectors		= 1024,
 	.cmd_per_lun            = ISCSI_MAX_CMD_PER_LUN,
diff --git a/drivers/scsi/iscsi_tcp.c b/drivers/scsi/iscsi_tcp.c
index 0bd8b3dc3c19..2a2f0094570f 100644
--- a/drivers/scsi/iscsi_tcp.c
+++ b/drivers/scsi/iscsi_tcp.c
@@ -1865,7 +1865,6 @@ iscsi_tcp_session_create(struct iscsi_endpoint *ep, uint16_t cmds_max,
 	shost->max_id = 0;
 	shost->max_channel = 0;
 	shost->max_cmd_len = SCSI_MAX_VARLEN_CDB_SIZE;
-	shost->can_queue = cmds_max;
 
 	if (iscsi_host_add(shost, NULL))
 		goto free_host;
diff --git a/drivers/scsi/libiscsi.c b/drivers/scsi/libiscsi.c
index 8b4e412a0974..299e075a7b34 100644
--- a/drivers/scsi/libiscsi.c
+++ b/drivers/scsi/libiscsi.c
@@ -1857,6 +1857,9 @@ EXPORT_SYMBOL_GPL(iscsi_pool_free);
  */
 int iscsi_host_add(struct Scsi_Host *shost, struct device *pdev)
 {
+	if (!shost->can_queue)
+		shost->can_queue = ISCSI_DEF_XMIT_CMDS_MAX;
+
 	return scsi_add_host(shost, pdev);
 }
 EXPORT_SYMBOL_GPL(iscsi_host_add);
@@ -1942,6 +1945,9 @@ iscsi_session_setup(struct iscsi_transport *iscsit, struct Scsi_Host *shost,
 	struct iscsi_session *session;
 	struct iscsi_cls_session *cls_session;
 	int cmd_i, scsi_cmds, total_cmds = cmds_max;
+
+	if (!total_cmds)
+		total_cmds = ISCSI_DEF_XMIT_CMDS_MAX;
 	/*
 	 * The iscsi layer needs some tasks for nop handling and tmfs,
 	 * so the cmds_max must at least be greater than ISCSI_MGMT_CMDS_MAX
-- 
cgit v1.2.3


From 4c2133c82385c31dd3eed76b07da1e986eb00294 Mon Sep 17 00:00:00 2001
From: Mike Christie <michaelc@cs.wisc.edu>
Date: Mon, 16 Jun 2008 10:11:34 -0500
Subject: [SCSI] iscsi class: update version number

Update iscsi class version number.

Signed-off-by: Mike Christie <michaelc@cs.wisc.edu>
Signed-off-by: James Bottomley <James.Bottomley@HansenPartnership.com>
---
 drivers/scsi/scsi_transport_iscsi.c | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)

(limited to 'drivers/scsi')

diff --git a/drivers/scsi/scsi_transport_iscsi.c b/drivers/scsi/scsi_transport_iscsi.c
index bc0f74d4ea09..8e34f3c08575 100644
--- a/drivers/scsi/scsi_transport_iscsi.c
+++ b/drivers/scsi/scsi_transport_iscsi.c
@@ -34,7 +34,7 @@
 #define ISCSI_CONN_ATTRS 13
 #define ISCSI_HOST_ATTRS 4
 
-#define ISCSI_TRANSPORT_VERSION "2.0-869"
+#define ISCSI_TRANSPORT_VERSION "2.0-870"
 
 struct iscsi_internal {
 	int daemon_pid;
-- 
cgit v1.2.3


From f80f868ec463b0463b332cdb704fe5438f013f98 Mon Sep 17 00:00:00 2001
From: Mike Christie <michaelc@cs.wisc.edu>
Date: Mon, 16 Jun 2008 10:11:35 -0500
Subject: [SCSI] iscsi class: fix endpoint leak

class_find_device gets a ref to the device so we must release it.
The class will serialize access to the ep so we do not have to worry
about a remove racing with the callers access, so we can simplify the
use and drop the ref right away.

Signed-off-by: Mike Christie <michaelc@cs.wisc.edu>
Signed-off-by: James Bottomley <James.Bottomley@HansenPartnership.com>
---
 drivers/scsi/scsi_transport_iscsi.c | 9 ++++++++-
 1 file changed, 8 insertions(+), 1 deletion(-)

(limited to 'drivers/scsi')

diff --git a/drivers/scsi/scsi_transport_iscsi.c b/drivers/scsi/scsi_transport_iscsi.c
index 8e34f3c08575..3af7cbcc5c5d 100644
--- a/drivers/scsi/scsi_transport_iscsi.c
+++ b/drivers/scsi/scsi_transport_iscsi.c
@@ -219,6 +219,7 @@ EXPORT_SYMBOL_GPL(iscsi_destroy_endpoint);
 
 struct iscsi_endpoint *iscsi_lookup_endpoint(u64 handle)
 {
+	struct iscsi_endpoint *ep;
 	struct device *dev;
 
 	dev = class_find_device(&iscsi_endpoint_class, &handle,
@@ -226,7 +227,13 @@ struct iscsi_endpoint *iscsi_lookup_endpoint(u64 handle)
 	if (!dev)
 		return NULL;
 
-	return iscsi_dev_to_endpoint(dev);
+	ep = iscsi_dev_to_endpoint(dev);
+	/*
+	 * we can drop this now because the interface will prevent
+	 * removals and lookups from racing.
+	 */
+	put_device(dev);
+	return ep;
 }
 EXPORT_SYMBOL_GPL(iscsi_lookup_endpoint);
 
-- 
cgit v1.2.3


From eac6e8e449647cbb9efee53977c8bfee0aa7d69e Mon Sep 17 00:00:00 2001
From: Matthew Wilcox <matthew@wil.cx>
Date: Thu, 19 Jun 2008 10:02:58 -0600
Subject: [SCSI] scsi_debug: add support for rotation speed

Add support for VPD page b1 to scsi_debug

SCSI VPD page b1 reports the nominal rotation speed of the device.
Since scsi_debug is ram-based, claim to be a non-rotating medium.

Signed-off-by: Matthew Wilcox <willy@linux.intel.com>
Acked-by: Douglas Gilbert <dougg@torque.net>
Signed-off-by: James Bottomley <James.Bottomley@HansenPartnership.com>
---
 drivers/scsi/scsi_debug.c | 12 ++++++++++++
 1 file changed, 12 insertions(+)

(limited to 'drivers/scsi')

diff --git a/drivers/scsi/scsi_debug.c b/drivers/scsi/scsi_debug.c
index 3901125455c9..01d11a01ffbf 100644
--- a/drivers/scsi/scsi_debug.c
+++ b/drivers/scsi/scsi_debug.c
@@ -643,6 +643,14 @@ static int inquiry_evpd_b0(unsigned char * arr)
 	return sizeof(vpdb0_data);
 }
 
+static int inquiry_evpd_b1(unsigned char *arr)
+{
+	memset(arr, 0, 0x3c);
+	arr[0] = 0;
+	arr[1] = 1;
+
+	return 0x3c;
+}
 
 #define SDEBUG_LONG_INQ_SZ 96
 #define SDEBUG_MAX_INQ_ARR_SZ 584
@@ -698,6 +706,7 @@ static int resp_inquiry(struct scsi_cmnd * scp, int target,
 			arr[n++] = 0x88;  /* SCSI ports */
 			arr[n++] = 0x89;  /* ATA information */
 			arr[n++] = 0xb0;  /* Block limits (SBC) */
+			arr[n++] = 0xb1;  /* Block characteristics (SBC) */
 			arr[3] = n - 4;	  /* number of supported VPD pages */
 		} else if (0x80 == cmd[2]) { /* unit serial number */
 			arr[1] = cmd[2];	/*sanity */
@@ -737,6 +746,9 @@ static int resp_inquiry(struct scsi_cmnd * scp, int target,
 		} else if (0xb0 == cmd[2]) { /* Block limits (SBC) */
 			arr[1] = cmd[2];        /*sanity */
 			arr[3] = inquiry_evpd_b0(&arr[4]);
+		} else if (0xb1 == cmd[2]) { /* Block characteristics (SBC) */
+			arr[1] = cmd[2];        /*sanity */
+			arr[3] = inquiry_evpd_b1(&arr[4]);
 		} else {
 			/* Illegal request, invalid field in cdb */
 			mk_sense_buffer(devip, ILLEGAL_REQUEST,
-- 
cgit v1.2.3


From a793804f25fb2c0fe2b784450092699ea3475332 Mon Sep 17 00:00:00 2001
From: "David S. Miller" <davem@sunset.davemloft.net>
Date: Sun, 30 Sep 2007 17:10:42 -0700
Subject: [SCSI] esp: Correct chip ID probing sequence.

The features enable bit has to be set in the config2 register
before we can be absolutely sure we will probe a correct
part unique ID and family code from the transfer-count-high
register.

Also, reload the CFACT, STP, SOFF, and TIMEO near the end of
esp_reset_esp().

From a patch by Maciej W. Rozycki.

Signed-off-by: David S. Miller <davem@davemloft.net>
Signed-off-by: James Bottomley <James.Bottomley@HansenPartnership.com>
---
 drivers/scsi/esp_scsi.c | 24 +++++++++++++-----------
 1 file changed, 13 insertions(+), 11 deletions(-)

(limited to 'drivers/scsi')

diff --git a/drivers/scsi/esp_scsi.c b/drivers/scsi/esp_scsi.c
index a0b6d414953d..3d5ad243e77f 100644
--- a/drivers/scsi/esp_scsi.c
+++ b/drivers/scsi/esp_scsi.c
@@ -219,19 +219,10 @@ static void esp_reset_esp(struct esp *esp)
 	/* Now reset the ESP chip */
 	scsi_esp_cmd(esp, ESP_CMD_RC);
 	scsi_esp_cmd(esp, ESP_CMD_NULL | ESP_CMD_DMA);
+	if (esp->rev == FAST)
+		esp_write8(ESP_CONFIG2_FENAB, ESP_CFG2);
 	scsi_esp_cmd(esp, ESP_CMD_NULL | ESP_CMD_DMA);
 
-	/* Reload the configuration registers */
-	esp_write8(esp->cfact, ESP_CFACT);
-
-	esp->prev_stp = 0;
-	esp_write8(esp->prev_stp, ESP_STP);
-
-	esp->prev_soff = 0;
-	esp_write8(esp->prev_soff, ESP_SOFF);
-
-	esp_write8(esp->neg_defp, ESP_TIMEO);
-
 	/* This is the only point at which it is reliable to read
 	 * the ID-code for a fast ESP chip variants.
 	 */
@@ -316,6 +307,17 @@ static void esp_reset_esp(struct esp *esp)
 		break;
 	}
 
+	/* Reload the configuration registers */
+	esp_write8(esp->cfact, ESP_CFACT);
+
+	esp->prev_stp = 0;
+	esp_write8(esp->prev_stp, ESP_STP);
+
+	esp->prev_soff = 0;
+	esp_write8(esp->prev_soff, ESP_SOFF);
+
+	esp_write8(esp->neg_defp, ESP_TIMEO);
+
 	/* Eat any bitrot in the chip */
 	esp_read8(ESP_INTRPT);
 	udelay(100);
-- 
cgit v1.2.3


From dbfe54a9c9a9e31cdf71704e4e70d10d22a57264 Mon Sep 17 00:00:00 2001
From: Frans Pop <elendil@planet.nl>
Date: Thu, 19 Jun 2008 20:20:12 -0700
Subject: [SCSI] esp: correct module name in Kconfig help for SCSI_SUNESP

The module name was changed from esp to sun_esp some time ago.
Also correct the list of chips supported by the driver.

Signed-off-by: Frans Pop <elendil@planet.nl>
Signed-off-by: David S. Miller <davem@davemloft.net>
Signed-off-by: James Bottomley <James.Bottomley@HansenPartnership.com>
---
 drivers/scsi/Kconfig | 6 ++++--
 1 file changed, 4 insertions(+), 2 deletions(-)

(limited to 'drivers/scsi')

diff --git a/drivers/scsi/Kconfig b/drivers/scsi/Kconfig
index 7886ec6e5f87..994943f3e139 100644
--- a/drivers/scsi/Kconfig
+++ b/drivers/scsi/Kconfig
@@ -1738,10 +1738,12 @@ config SCSI_SUNESP
 	select SCSI_SPI_ATTRS
 	help
 	  This is the driver for the Sun ESP SCSI host adapter. The ESP
-	  chipset is present in most SPARC SBUS-based computers.
+	  chipset is present in most SPARC SBUS-based computers and
+	  supports the Emulex family of ESP SCSI chips (esp100, esp100A,
+	  esp236, fas101, fas236) as well as the Qlogic fas366 SCSI chip.
 
 	  To compile this driver as a module, choose M here: the
-	  module will be called esp.
+	  module will be called sun_esp.
 
 config ZFCP
 	tristate "FCP host bus adapter driver for IBM eServer zSeries"
-- 
cgit v1.2.3


From aa91696e56a0870db5754610e9f9b937e77507e0 Mon Sep 17 00:00:00 2001
From: "Martin K. Petersen" <martin.petersen@oracle.com>
Date: Tue, 17 Jun 2008 12:47:32 -0400
Subject: [SCSI] sd: Move sd.h header file

Christoph objected to having sd.h in include/scsi since it is internal
to the sd driver.  Move it to drivers/scsi/sd.h.

Signed-off-by: Martin K. Petersen <martin.petersen@oracle.com>
Signed-off-by: James Bottomley <James.Bottomley@HansenPartnership.com>
---
 drivers/scsi/sd.c |  2 +-
 drivers/scsi/sd.h | 57 +++++++++++++++++++++++++++++++++++++++++++++++++++++++
 include/scsi/sd.h | 57 -------------------------------------------------------
 3 files changed, 58 insertions(+), 58 deletions(-)
 create mode 100644 drivers/scsi/sd.h
 delete mode 100644 include/scsi/sd.h

(limited to 'drivers/scsi')

diff --git a/drivers/scsi/sd.c b/drivers/scsi/sd.c
index 01cefbb2d539..e19691f880b9 100644
--- a/drivers/scsi/sd.c
+++ b/drivers/scsi/sd.c
@@ -58,8 +58,8 @@
 #include <scsi/scsi_host.h>
 #include <scsi/scsi_ioctl.h>
 #include <scsi/scsicam.h>
-#include <scsi/sd.h>
 
+#include "sd.h"
 #include "scsi_logging.h"
 
 MODULE_AUTHOR("Eric Youngdale");
diff --git a/drivers/scsi/sd.h b/drivers/scsi/sd.h
new file mode 100644
index 000000000000..4f032d48cb6e
--- /dev/null
+++ b/drivers/scsi/sd.h
@@ -0,0 +1,57 @@
+#ifndef _SCSI_DISK_H
+#define _SCSI_DISK_H
+
+/*
+ * More than enough for everybody ;)  The huge number of majors
+ * is a leftover from 16bit dev_t days, we don't really need that
+ * much numberspace.
+ */
+#define SD_MAJORS	16
+
+/*
+ * This is limited by the naming scheme enforced in sd_probe,
+ * add another character to it if you really need more disks.
+ */
+#define SD_MAX_DISKS	(((26 * 26) + 26 + 1) * 26)
+
+/*
+ * Time out in seconds for disks and Magneto-opticals (which are slower).
+ */
+#define SD_TIMEOUT		(30 * HZ)
+#define SD_MOD_TIMEOUT		(75 * HZ)
+
+/*
+ * Number of allowed retries
+ */
+#define SD_MAX_RETRIES		5
+#define SD_PASSTHROUGH_RETRIES	1
+
+/*
+ * Size of the initial data buffer for mode and read capacity data
+ */
+#define SD_BUF_SIZE		512
+
+struct scsi_disk {
+	struct scsi_driver *driver;	/* always &sd_template */
+	struct scsi_device *device;
+	struct device	dev;
+	struct gendisk	*disk;
+	unsigned int	openers;	/* protected by BKL for now, yuck */
+	sector_t	capacity;	/* size in 512-byte sectors */
+	u32		index;
+	u8		media_present;
+	u8		write_prot;
+	unsigned	previous_state : 1;
+	unsigned	WCE : 1;	/* state of disk WCE bit */
+	unsigned	RCD : 1;	/* state of disk RCD bit, unused */
+	unsigned	DPOFUA : 1;	/* state of disk DPOFUA bit */
+};
+#define to_scsi_disk(obj) container_of(obj,struct scsi_disk,dev)
+
+#define sd_printk(prefix, sdsk, fmt, a...)				\
+        (sdsk)->disk ?							\
+	sdev_printk(prefix, (sdsk)->device, "[%s] " fmt,		\
+		    (sdsk)->disk->disk_name, ##a) :			\
+	sdev_printk(prefix, (sdsk)->device, fmt, ##a)
+
+#endif /* _SCSI_DISK_H */
diff --git a/include/scsi/sd.h b/include/scsi/sd.h
deleted file mode 100644
index 4f032d48cb6e..000000000000
--- a/include/scsi/sd.h
+++ /dev/null
@@ -1,57 +0,0 @@
-#ifndef _SCSI_DISK_H
-#define _SCSI_DISK_H
-
-/*
- * More than enough for everybody ;)  The huge number of majors
- * is a leftover from 16bit dev_t days, we don't really need that
- * much numberspace.
- */
-#define SD_MAJORS	16
-
-/*
- * This is limited by the naming scheme enforced in sd_probe,
- * add another character to it if you really need more disks.
- */
-#define SD_MAX_DISKS	(((26 * 26) + 26 + 1) * 26)
-
-/*
- * Time out in seconds for disks and Magneto-opticals (which are slower).
- */
-#define SD_TIMEOUT		(30 * HZ)
-#define SD_MOD_TIMEOUT		(75 * HZ)
-
-/*
- * Number of allowed retries
- */
-#define SD_MAX_RETRIES		5
-#define SD_PASSTHROUGH_RETRIES	1
-
-/*
- * Size of the initial data buffer for mode and read capacity data
- */
-#define SD_BUF_SIZE		512
-
-struct scsi_disk {
-	struct scsi_driver *driver;	/* always &sd_template */
-	struct scsi_device *device;
-	struct device	dev;
-	struct gendisk	*disk;
-	unsigned int	openers;	/* protected by BKL for now, yuck */
-	sector_t	capacity;	/* size in 512-byte sectors */
-	u32		index;
-	u8		media_present;
-	u8		write_prot;
-	unsigned	previous_state : 1;
-	unsigned	WCE : 1;	/* state of disk WCE bit */
-	unsigned	RCD : 1;	/* state of disk RCD bit, unused */
-	unsigned	DPOFUA : 1;	/* state of disk DPOFUA bit */
-};
-#define to_scsi_disk(obj) container_of(obj,struct scsi_disk,dev)
-
-#define sd_printk(prefix, sdsk, fmt, a...)				\
-        (sdsk)->disk ?							\
-	sdev_printk(prefix, (sdsk)->device, "[%s] " fmt,		\
-		    (sdsk)->disk->disk_name, ##a) :			\
-	sdev_printk(prefix, (sdsk)->device, fmt, ##a)
-
-#endif /* _SCSI_DISK_H */
-- 
cgit v1.2.3


From 5b635da11e3a6387172abd651d26d8ef54b1fbc7 Mon Sep 17 00:00:00 2001
From: "Martin K. Petersen" <martin.petersen@oracle.com>
Date: Wed, 25 Jun 2008 11:22:41 -0400
Subject: [SCSI] sd: Move scsi_disk() accessor function to sd.h

Signed-off-by: Martin K. Petersen <martin.petersen@oracle.com>
Signed-off-by: James Bottomley <James.Bottomley@HansenPartnership.com>
---
 drivers/scsi/sd.c | 5 -----
 drivers/scsi/sd.h | 5 +++++
 2 files changed, 5 insertions(+), 5 deletions(-)

(limited to 'drivers/scsi')

diff --git a/drivers/scsi/sd.c b/drivers/scsi/sd.c
index e19691f880b9..71069d952dc7 100644
--- a/drivers/scsi/sd.c
+++ b/drivers/scsi/sd.c
@@ -295,11 +295,6 @@ static int sd_major(int major_idx)
 	}
 }
 
-static inline struct scsi_disk *scsi_disk(struct gendisk *disk)
-{
-	return container_of(disk->private_data, struct scsi_disk, driver);
-}
-
 static struct scsi_disk *__scsi_disk_get(struct gendisk *disk)
 {
 	struct scsi_disk *sdkp = NULL;
diff --git a/drivers/scsi/sd.h b/drivers/scsi/sd.h
index 4f032d48cb6e..03a3d45cfa42 100644
--- a/drivers/scsi/sd.h
+++ b/drivers/scsi/sd.h
@@ -48,6 +48,11 @@ struct scsi_disk {
 };
 #define to_scsi_disk(obj) container_of(obj,struct scsi_disk,dev)
 
+static inline struct scsi_disk *scsi_disk(struct gendisk *disk)
+{
+	return container_of(disk->private_data, struct scsi_disk, driver);
+}
+
 #define sd_printk(prefix, sdsk, fmt, a...)				\
         (sdsk)->disk ?							\
 	sdev_printk(prefix, (sdsk)->device, "[%s] " fmt,		\
-- 
cgit v1.2.3


From 39120e11705782c77d3a47d7d2927676fd8e3aaa Mon Sep 17 00:00:00 2001
From: Brian King <brking@linux.vnet.ibm.com>
Date: Tue, 1 Jul 2008 13:03:19 -0500
Subject: [SCSI] sg: Add target reset support

Adds support for target reset to SG_SCSI_RESET.

Signed-off-by: Brian King <brking@linux.vnet.ibm.com>
Acked-by: Douglas Gilbert <dougg@torque.net>
Signed-off-by: James Bottomley <James.Bottomley@HansenPartnership.com>
---
 drivers/scsi/sg.c | 3 +++
 include/scsi/sg.h | 1 +
 2 files changed, 4 insertions(+)

(limited to 'drivers/scsi')

diff --git a/drivers/scsi/sg.c b/drivers/scsi/sg.c
index ea0edd1b2e76..2010fa039cfe 100644
--- a/drivers/scsi/sg.c
+++ b/drivers/scsi/sg.c
@@ -1026,6 +1026,9 @@ sg_ioctl(struct inode *inode, struct file *filp,
 		case SG_SCSI_RESET_DEVICE:
 			val = SCSI_TRY_RESET_DEVICE;
 			break;
+		case SG_SCSI_RESET_TARGET:
+			val = SCSI_TRY_RESET_TARGET;
+			break;
 		case SG_SCSI_RESET_BUS:
 			val = SCSI_TRY_RESET_BUS;
 			break;
diff --git a/include/scsi/sg.h b/include/scsi/sg.h
index 519c49a0fc11..934ae389671d 100644
--- a/include/scsi/sg.h
+++ b/include/scsi/sg.h
@@ -206,6 +206,7 @@ typedef struct sg_req_info { /* used by SG_GET_REQUEST_TABLE ioctl() */
 #define		SG_SCSI_RESET_DEVICE	1
 #define		SG_SCSI_RESET_BUS	2
 #define		SG_SCSI_RESET_HOST	3
+#define		SG_SCSI_RESET_TARGET	4
 
 /* synchronous SCSI command ioctl, (only in version 3 interface) */
 #define SG_IO 0x2285   /* similar effect as write() followed by read() */
-- 
cgit v1.2.3


From 072b91f9c6510d0ec4a49d07dbc318760c7da7b3 Mon Sep 17 00:00:00 2001
From: Brian King <brking@linux.vnet.ibm.com>
Date: Tue, 1 Jul 2008 13:14:30 -0500
Subject: [SCSI] ibmvfc: IBM Power Virtual Fibre Channel Adapter Client Driver

This patch adds a new device driver to support the Virtual Fibre Channel
interface on IBM Power based servers. The Virtual I/O Server on IBM Power
servers utilizes N-Port ID Virtualization to export a Virtual Fibre Channel
adapter to the client. This driver is the client device driver.

Signed-off-by: Brian King <brking@linux.vnet.ibm.com>
Signed-off-by: James Bottomley <James.Bottomley@HansenPartnership.com>
---
 drivers/scsi/Kconfig           |   19 +
 drivers/scsi/Makefile          |    1 +
 drivers/scsi/ibmvscsi/Makefile |    1 +
 drivers/scsi/ibmvscsi/ibmvfc.c | 3910 ++++++++++++++++++++++++++++++++++++++++
 drivers/scsi/ibmvscsi/ibmvfc.h |  682 +++++++
 5 files changed, 4613 insertions(+)
 create mode 100644 drivers/scsi/ibmvscsi/ibmvfc.c
 create mode 100644 drivers/scsi/ibmvscsi/ibmvfc.h

(limited to 'drivers/scsi')

diff --git a/drivers/scsi/Kconfig b/drivers/scsi/Kconfig
index 994943f3e139..26be540d1dd3 100644
--- a/drivers/scsi/Kconfig
+++ b/drivers/scsi/Kconfig
@@ -888,6 +888,25 @@ config SCSI_IBMVSCSIS
 	  To compile this driver as a module, choose M here: the
 	  module will be called ibmvstgt.
 
+config SCSI_IBMVFC
+	tristate "IBM Virtual FC support"
+	depends on PPC_PSERIES && SCSI
+	select SCSI_FC_ATTRS
+	help
+	  This is the IBM POWER Virtual FC Client
+
+	  To compile this driver as a module, choose M here: the
+	  module will be called ibmvfc.
+
+config SCSI_IBMVFC_TRACE
+	bool "enable driver internal trace"
+	depends on SCSI_IBMVFC
+	default y
+	help
+	  If you say Y here, the driver will trace all commands issued
+	  to the adapter. Performance impact is minimal. Trace can be
+	  dumped using /sys/class/scsi_host/hostXX/trace.
+
 config SCSI_INITIO
 	tristate "Initio 9100U(W) support"
 	depends on PCI && SCSI
diff --git a/drivers/scsi/Makefile b/drivers/scsi/Makefile
index aa8272e188ea..a8149677de23 100644
--- a/drivers/scsi/Makefile
+++ b/drivers/scsi/Makefile
@@ -119,6 +119,7 @@ obj-$(CONFIG_SCSI_IPR)		+= ipr.o
 obj-$(CONFIG_SCSI_SRP)		+= libsrp.o
 obj-$(CONFIG_SCSI_IBMVSCSI)	+= ibmvscsi/
 obj-$(CONFIG_SCSI_IBMVSCSIS)	+= ibmvscsi/
+obj-$(CONFIG_SCSI_IBMVFC)	+= ibmvscsi/
 obj-$(CONFIG_SCSI_HPTIOP)	+= hptiop.o
 obj-$(CONFIG_SCSI_STEX)		+= stex.o
 obj-$(CONFIG_SCSI_MVSAS)	+= mvsas.o
diff --git a/drivers/scsi/ibmvscsi/Makefile b/drivers/scsi/ibmvscsi/Makefile
index 6ac0633d5452..a423d9633625 100644
--- a/drivers/scsi/ibmvscsi/Makefile
+++ b/drivers/scsi/ibmvscsi/Makefile
@@ -5,3 +5,4 @@ ibmvscsic-$(CONFIG_PPC_ISERIES)	+= iseries_vscsi.o
 ibmvscsic-$(CONFIG_PPC_PSERIES)	+= rpa_vscsi.o 
 
 obj-$(CONFIG_SCSI_IBMVSCSIS)	+= ibmvstgt.o
+obj-$(CONFIG_SCSI_IBMVFC)	+= ibmvfc.o
diff --git a/drivers/scsi/ibmvscsi/ibmvfc.c b/drivers/scsi/ibmvscsi/ibmvfc.c
new file mode 100644
index 000000000000..eb702b96d57c
--- /dev/null
+++ b/drivers/scsi/ibmvscsi/ibmvfc.c
@@ -0,0 +1,3910 @@
+/*
+ * ibmvfc.c -- driver for IBM Power Virtual Fibre Channel Adapter
+ *
+ * Written By: Brian King <brking@linux.vnet.ibm.com>, IBM Corporation
+ *
+ * Copyright (C) IBM Corporation, 2008
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
+ *
+ */
+
+#include <linux/module.h>
+#include <linux/moduleparam.h>
+#include <linux/dma-mapping.h>
+#include <linux/dmapool.h>
+#include <linux/delay.h>
+#include <linux/interrupt.h>
+#include <linux/kthread.h>
+#include <linux/of.h>
+#include <linux/stringify.h>
+#include <asm/firmware.h>
+#include <asm/irq.h>
+#include <asm/vio.h>
+#include <scsi/scsi.h>
+#include <scsi/scsi_cmnd.h>
+#include <scsi/scsi_host.h>
+#include <scsi/scsi_device.h>
+#include <scsi/scsi_tcq.h>
+#include <scsi/scsi_transport_fc.h>
+#include "ibmvfc.h"
+
+static unsigned int init_timeout = IBMVFC_INIT_TIMEOUT;
+static unsigned int default_timeout = IBMVFC_DEFAULT_TIMEOUT;
+static unsigned int max_lun = IBMVFC_MAX_LUN;
+static unsigned int max_targets = IBMVFC_MAX_TARGETS;
+static unsigned int max_requests = IBMVFC_MAX_REQUESTS_DEFAULT;
+static unsigned int disc_threads = IBMVFC_MAX_DISC_THREADS;
+static unsigned int dev_loss_tmo = IBMVFC_DEV_LOSS_TMO;
+static unsigned int ibmvfc_debug = IBMVFC_DEBUG;
+static unsigned int log_level = IBMVFC_DEFAULT_LOG_LEVEL;
+static LIST_HEAD(ibmvfc_head);
+static DEFINE_SPINLOCK(ibmvfc_driver_lock);
+static struct scsi_transport_template *ibmvfc_transport_template;
+
+MODULE_DESCRIPTION("IBM Virtual Fibre Channel Driver");
+MODULE_AUTHOR("Brian King <brking@linux.vnet.ibm.com>");
+MODULE_LICENSE("GPL");
+MODULE_VERSION(IBMVFC_DRIVER_VERSION);
+
+module_param_named(init_timeout, init_timeout, uint, S_IRUGO | S_IWUSR);
+MODULE_PARM_DESC(init_timeout, "Initialization timeout in seconds. "
+		 "[Default=" __stringify(IBMVFC_INIT_TIMEOUT) "]");
+module_param_named(default_timeout, default_timeout, uint, S_IRUGO | S_IWUSR);
+MODULE_PARM_DESC(default_timeout,
+		 "Default timeout in seconds for initialization and EH commands. "
+		 "[Default=" __stringify(IBMVFC_DEFAULT_TIMEOUT) "]");
+module_param_named(max_requests, max_requests, uint, S_IRUGO);
+MODULE_PARM_DESC(max_requests, "Maximum requests for this adapter. "
+		 "[Default=" __stringify(IBMVFC_MAX_REQUESTS_DEFAULT) "]");
+module_param_named(max_lun, max_lun, uint, S_IRUGO);
+MODULE_PARM_DESC(max_lun, "Maximum allowed LUN. "
+		 "[Default=" __stringify(IBMVFC_MAX_LUN) "]");
+module_param_named(max_targets, max_targets, uint, S_IRUGO);
+MODULE_PARM_DESC(max_targets, "Maximum allowed targets. "
+		 "[Default=" __stringify(IBMVFC_MAX_TARGETS) "]");
+module_param_named(disc_threads, disc_threads, uint, S_IRUGO | S_IWUSR);
+MODULE_PARM_DESC(disc_threads, "Number of device discovery threads to use. "
+		 "[Default=" __stringify(IBMVFC_MAX_DISC_THREADS) "]");
+module_param_named(debug, ibmvfc_debug, uint, S_IRUGO | S_IWUSR);
+MODULE_PARM_DESC(debug, "Enable driver debug information. "
+		 "[Default=" __stringify(IBMVFC_DEBUG) "]");
+module_param_named(dev_loss_tmo, dev_loss_tmo, uint, S_IRUGO | S_IWUSR);
+MODULE_PARM_DESC(dev_loss_tmo, "Maximum number of seconds that the FC "
+		 "transport should insulate the loss of a remote port. Once this "
+		 "value is exceeded, the scsi target is removed. "
+		 "[Default=" __stringify(IBMVFC_DEV_LOSS_TMO) "]");
+module_param_named(log_level, log_level, uint, 0);
+MODULE_PARM_DESC(log_level, "Set to 0 - 4 for increasing verbosity of device driver. "
+		 "[Default=" __stringify(IBMVFC_DEFAULT_LOG_LEVEL) "]");
+
+static const struct {
+	u16 status;
+	u16 error;
+	u8 result;
+	u8 retry;
+	int log;
+	char *name;
+} cmd_status [] = {
+	{ IBMVFC_FABRIC_MAPPED, IBMVFC_UNABLE_TO_ESTABLISH, DID_ERROR, 1, 1, "unable to establish" },
+	{ IBMVFC_FABRIC_MAPPED, IBMVFC_XPORT_FAULT, DID_OK, 1, 0, "transport fault" },
+	{ IBMVFC_FABRIC_MAPPED, IBMVFC_CMD_TIMEOUT, DID_TIME_OUT, 1, 1, "command timeout" },
+	{ IBMVFC_FABRIC_MAPPED, IBMVFC_ENETDOWN, DID_NO_CONNECT, 1, 1, "network down" },
+	{ IBMVFC_FABRIC_MAPPED, IBMVFC_HW_FAILURE, DID_ERROR, 1, 1, "hardware failure" },
+	{ IBMVFC_FABRIC_MAPPED, IBMVFC_LINK_DOWN_ERR, DID_REQUEUE, 0, 0, "link down" },
+	{ IBMVFC_FABRIC_MAPPED, IBMVFC_LINK_DEAD_ERR, DID_ERROR, 0, 0, "link dead" },
+	{ IBMVFC_FABRIC_MAPPED, IBMVFC_UNABLE_TO_REGISTER, DID_ERROR, 1, 1, "unable to register" },
+	{ IBMVFC_FABRIC_MAPPED, IBMVFC_XPORT_BUSY, DID_BUS_BUSY, 1, 0, "transport busy" },
+	{ IBMVFC_FABRIC_MAPPED, IBMVFC_XPORT_DEAD, DID_ERROR, 0, 1, "transport dead" },
+	{ IBMVFC_FABRIC_MAPPED, IBMVFC_CONFIG_ERROR, DID_ERROR, 1, 1, "configuration error" },
+	{ IBMVFC_FABRIC_MAPPED, IBMVFC_NAME_SERVER_FAIL, DID_ERROR, 1, 1, "name server failure" },
+	{ IBMVFC_FABRIC_MAPPED, IBMVFC_LINK_HALTED, DID_REQUEUE, 0, 0, "link halted" },
+	{ IBMVFC_FABRIC_MAPPED, IBMVFC_XPORT_GENERAL, DID_OK, 1, 0, "general transport error" },
+
+	{ IBMVFC_VIOS_FAILURE, IBMVFC_CRQ_FAILURE, DID_REQUEUE, 1, 1, "CRQ failure" },
+	{ IBMVFC_VIOS_FAILURE, IBMVFC_SW_FAILURE, DID_ERROR, 0, 1, "software failure" },
+	{ IBMVFC_VIOS_FAILURE, IBMVFC_INVALID_PARAMETER, DID_ABORT, 0, 1, "invalid parameter" },
+	{ IBMVFC_VIOS_FAILURE, IBMVFC_MISSING_PARAMETER, DID_ABORT, 0, 1, "missing parameter" },
+	{ IBMVFC_VIOS_FAILURE, IBMVFC_HOST_IO_BUS, DID_ERROR, 1, 1, "host I/O bus failure" },
+	{ IBMVFC_VIOS_FAILURE, IBMVFC_TRANS_CANCELLED, DID_ABORT, 0, 1, "transaction cancelled" },
+	{ IBMVFC_VIOS_FAILURE, IBMVFC_TRANS_CANCELLED_IMPLICIT, DID_ABORT, 0, 1, "transaction cancelled implicit" },
+	{ IBMVFC_VIOS_FAILURE, IBMVFC_INSUFFICIENT_RESOURCE, DID_REQUEUE, 1, 1, "insufficient resources" },
+	{ IBMVFC_VIOS_FAILURE, IBMVFC_COMMAND_FAILED, DID_ERROR, 1, 1, "command failed" },
+
+	{ IBMVFC_FC_FAILURE, IBMVFC_INVALID_ELS_CMD_CODE, DID_ERROR, 0, 1, "invalid ELS command code" },
+	{ IBMVFC_FC_FAILURE, IBMVFC_INVALID_VERSION, DID_ERROR, 0, 1, "invalid version level" },
+	{ IBMVFC_FC_FAILURE, IBMVFC_LOGICAL_ERROR, DID_ERROR, 1, 1, "logical error" },
+	{ IBMVFC_FC_FAILURE, IBMVFC_INVALID_CT_IU_SIZE, DID_ERROR, 0, 1, "invalid CT_IU size" },
+	{ IBMVFC_FC_FAILURE, IBMVFC_LOGICAL_BUSY, DID_REQUEUE, 1, 0, "logical busy" },
+	{ IBMVFC_FC_FAILURE, IBMVFC_PROTOCOL_ERROR, DID_ERROR, 1, 1, "protocol error" },
+	{ IBMVFC_FC_FAILURE, IBMVFC_UNABLE_TO_PERFORM_REQ, DID_ERROR, 1, 1, "unable to perform request" },
+	{ IBMVFC_FC_FAILURE, IBMVFC_CMD_NOT_SUPPORTED, DID_ERROR, 0, 0, "command not supported" },
+	{ IBMVFC_FC_FAILURE, IBMVFC_SERVER_NOT_AVAIL, DID_ERROR, 0, 1, "server not available" },
+	{ IBMVFC_FC_FAILURE, IBMVFC_CMD_IN_PROGRESS, DID_ERROR, 0, 1, "command already in progress" },
+	{ IBMVFC_FC_FAILURE, IBMVFC_VENDOR_SPECIFIC, DID_ERROR, 1, 1, "vendor specific" },
+
+	{ IBMVFC_FC_SCSI_ERROR, 0, DID_OK, 1, 0, "SCSI error" },
+};
+
+static void ibmvfc_npiv_login(struct ibmvfc_host *);
+static void ibmvfc_tgt_send_prli(struct ibmvfc_target *);
+static void ibmvfc_tgt_send_plogi(struct ibmvfc_target *);
+static void ibmvfc_tgt_query_target(struct ibmvfc_target *);
+
+static const char *unknown_error = "unknown error";
+
+#ifdef CONFIG_SCSI_IBMVFC_TRACE
+/**
+ * ibmvfc_trc_start - Log a start trace entry
+ * @evt:		ibmvfc event struct
+ *
+ **/
+static void ibmvfc_trc_start(struct ibmvfc_event *evt)
+{
+	struct ibmvfc_host *vhost = evt->vhost;
+	struct ibmvfc_cmd *vfc_cmd = &evt->iu.cmd;
+	struct ibmvfc_mad_common *mad = &evt->iu.mad_common;
+	struct ibmvfc_trace_entry *entry;
+
+	entry = &vhost->trace[vhost->trace_index++];
+	entry->evt = evt;
+	entry->time = jiffies;
+	entry->fmt = evt->crq.format;
+	entry->type = IBMVFC_TRC_START;
+
+	switch (entry->fmt) {
+	case IBMVFC_CMD_FORMAT:
+		entry->op_code = vfc_cmd->iu.cdb[0];
+		entry->scsi_id = vfc_cmd->tgt_scsi_id;
+		entry->lun = scsilun_to_int(&vfc_cmd->iu.lun);
+		entry->tmf_flags = vfc_cmd->iu.tmf_flags;
+		entry->u.start.xfer_len = vfc_cmd->iu.xfer_len;
+		break;
+	case IBMVFC_MAD_FORMAT:
+		entry->op_code = mad->opcode;
+		break;
+	default:
+		break;
+	};
+}
+
+/**
+ * ibmvfc_trc_end - Log an end trace entry
+ * @evt:		ibmvfc event struct
+ *
+ **/
+static void ibmvfc_trc_end(struct ibmvfc_event *evt)
+{
+	struct ibmvfc_host *vhost = evt->vhost;
+	struct ibmvfc_cmd *vfc_cmd = &evt->xfer_iu->cmd;
+	struct ibmvfc_mad_common *mad = &evt->xfer_iu->mad_common;
+	struct ibmvfc_trace_entry *entry = &vhost->trace[vhost->trace_index++];
+
+	entry->evt = evt;
+	entry->time = jiffies;
+	entry->fmt = evt->crq.format;
+	entry->type = IBMVFC_TRC_END;
+
+	switch (entry->fmt) {
+	case IBMVFC_CMD_FORMAT:
+		entry->op_code = vfc_cmd->iu.cdb[0];
+		entry->scsi_id = vfc_cmd->tgt_scsi_id;
+		entry->lun = scsilun_to_int(&vfc_cmd->iu.lun);
+		entry->tmf_flags = vfc_cmd->iu.tmf_flags;
+		entry->u.end.status = vfc_cmd->status;
+		entry->u.end.error = vfc_cmd->error;
+		entry->u.end.fcp_rsp_flags = vfc_cmd->rsp.flags;
+		entry->u.end.rsp_code = vfc_cmd->rsp.data.info.rsp_code;
+		entry->u.end.scsi_status = vfc_cmd->rsp.scsi_status;
+		break;
+	case IBMVFC_MAD_FORMAT:
+		entry->op_code = mad->opcode;
+		entry->u.end.status = mad->status;
+		break;
+	default:
+		break;
+
+	};
+}
+
+#else
+#define ibmvfc_trc_start(evt) do { } while (0)
+#define ibmvfc_trc_end(evt) do { } while (0)
+#endif
+
+/**
+ * ibmvfc_get_err_index - Find the index into cmd_status for the fcp response
+ * @status:		status / error class
+ * @error:		error
+ *
+ * Return value:
+ *	index into cmd_status / -EINVAL on failure
+ **/
+static int ibmvfc_get_err_index(u16 status, u16 error)
+{
+	int i;
+
+	for (i = 0; i < ARRAY_SIZE(cmd_status); i++)
+		if ((cmd_status[i].status & status) == cmd_status[i].status &&
+		    cmd_status[i].error == error)
+			return i;
+
+	return -EINVAL;
+}
+
+/**
+ * ibmvfc_get_cmd_error - Find the error description for the fcp response
+ * @status:		status / error class
+ * @error:		error
+ *
+ * Return value:
+ *	error description string
+ **/
+static const char *ibmvfc_get_cmd_error(u16 status, u16 error)
+{
+	int rc = ibmvfc_get_err_index(status, error);
+	if (rc >= 0)
+		return cmd_status[rc].name;
+	return unknown_error;
+}
+
+/**
+ * ibmvfc_get_err_result - Find the scsi status to return for the fcp response
+ * @vfc_cmd:	ibmvfc command struct
+ *
+ * Return value:
+ *	SCSI result value to return for completed command
+ **/
+static int ibmvfc_get_err_result(struct ibmvfc_cmd *vfc_cmd)
+{
+	int err;
+	struct ibmvfc_fcp_rsp *rsp = &vfc_cmd->rsp;
+	int fc_rsp_len = rsp->fcp_rsp_len;
+
+	if ((rsp->flags & FCP_RSP_LEN_VALID) &&
+	    ((!fc_rsp_len && fc_rsp_len != 4 && fc_rsp_len != 8) ||
+	     rsp->data.info.rsp_code))
+		return DID_ERROR << 16;
+
+	if (!vfc_cmd->status) {
+		if (rsp->flags & FCP_RESID_OVER)
+			return rsp->scsi_status | (DID_ERROR << 16);
+		else
+			return rsp->scsi_status | (DID_OK << 16);
+	}
+
+	err = ibmvfc_get_err_index(vfc_cmd->status, vfc_cmd->error);
+	if (err >= 0)
+		return rsp->scsi_status | (cmd_status[err].result << 16);
+	return rsp->scsi_status | (DID_ERROR << 16);
+}
+
+/**
+ * ibmvfc_retry_cmd - Determine if error status is retryable
+ * @status:		status / error class
+ * @error:		error
+ *
+ * Return value:
+ *	1 if error should be retried / 0 if it should not
+ **/
+static int ibmvfc_retry_cmd(u16 status, u16 error)
+{
+	int rc = ibmvfc_get_err_index(status, error);
+
+	if (rc >= 0)
+		return cmd_status[rc].retry;
+	return 1;
+}
+
+static const char *unknown_fc_explain = "unknown fc explain";
+
+static const struct {
+	u16 fc_explain;
+	char *name;
+} ls_explain [] = {
+	{ 0x00, "no additional explanation" },
+	{ 0x01, "service parameter error - options" },
+	{ 0x03, "service parameter error - initiator control" },
+	{ 0x05, "service parameter error - recipient control" },
+	{ 0x07, "service parameter error - received data field size" },
+	{ 0x09, "service parameter error - concurrent seq" },
+	{ 0x0B, "service parameter error - credit" },
+	{ 0x0D, "invalid N_Port/F_Port_Name" },
+	{ 0x0E, "invalid node/Fabric Name" },
+	{ 0x0F, "invalid common service parameters" },
+	{ 0x11, "invalid association header" },
+	{ 0x13, "association header required" },
+	{ 0x15, "invalid originator S_ID" },
+	{ 0x17, "invalid OX_ID-RX-ID combination" },
+	{ 0x19, "command (request) already in progress" },
+	{ 0x1E, "N_Port Login requested" },
+	{ 0x1F, "Invalid N_Port_ID" },
+};
+
+static const struct {
+	u16 fc_explain;
+	char *name;
+} gs_explain [] = {
+	{ 0x00, "no additional explanation" },
+	{ 0x01, "port identifier not registered" },
+	{ 0x02, "port name not registered" },
+	{ 0x03, "node name not registered" },
+	{ 0x04, "class of service not registered" },
+	{ 0x06, "initial process associator not registered" },
+	{ 0x07, "FC-4 TYPEs not registered" },
+	{ 0x08, "symbolic port name not registered" },
+	{ 0x09, "symbolic node name not registered" },
+	{ 0x0A, "port type not registered" },
+	{ 0xF0, "authorization exception" },
+	{ 0xF1, "authentication exception" },
+	{ 0xF2, "data base full" },
+	{ 0xF3, "data base empty" },
+	{ 0xF4, "processing request" },
+	{ 0xF5, "unable to verify connection" },
+	{ 0xF6, "devices not in a common zone" },
+};
+
+/**
+ * ibmvfc_get_ls_explain - Return the FC Explain description text
+ * @status:	FC Explain status
+ *
+ * Returns:
+ *	error string
+ **/
+static const char *ibmvfc_get_ls_explain(u16 status)
+{
+	int i;
+
+	for (i = 0; i < ARRAY_SIZE(ls_explain); i++)
+		if (ls_explain[i].fc_explain == status)
+			return ls_explain[i].name;
+
+	return unknown_fc_explain;
+}
+
+/**
+ * ibmvfc_get_gs_explain - Return the FC Explain description text
+ * @status:	FC Explain status
+ *
+ * Returns:
+ *	error string
+ **/
+static const char *ibmvfc_get_gs_explain(u16 status)
+{
+	int i;
+
+	for (i = 0; i < ARRAY_SIZE(gs_explain); i++)
+		if (gs_explain[i].fc_explain == status)
+			return gs_explain[i].name;
+
+	return unknown_fc_explain;
+}
+
+static const struct {
+	enum ibmvfc_fc_type fc_type;
+	char *name;
+} fc_type [] = {
+	{ IBMVFC_FABRIC_REJECT, "fabric reject" },
+	{ IBMVFC_PORT_REJECT, "port reject" },
+	{ IBMVFC_LS_REJECT, "ELS reject" },
+	{ IBMVFC_FABRIC_BUSY, "fabric busy" },
+	{ IBMVFC_PORT_BUSY, "port busy" },
+	{ IBMVFC_BASIC_REJECT, "basic reject" },
+};
+
+static const char *unknown_fc_type = "unknown fc type";
+
+/**
+ * ibmvfc_get_fc_type - Return the FC Type description text
+ * @status:	FC Type error status
+ *
+ * Returns:
+ *	error string
+ **/
+static const char *ibmvfc_get_fc_type(u16 status)
+{
+	int i;
+
+	for (i = 0; i < ARRAY_SIZE(fc_type); i++)
+		if (fc_type[i].fc_type == status)
+			return fc_type[i].name;
+
+	return unknown_fc_type;
+}
+
+/**
+ * ibmvfc_set_tgt_action - Set the next init action for the target
+ * @tgt:		ibmvfc target struct
+ * @action:		action to perform
+ *
+ **/
+static void ibmvfc_set_tgt_action(struct ibmvfc_target *tgt,
+				  enum ibmvfc_target_action action)
+{
+	switch (tgt->action) {
+	case IBMVFC_TGT_ACTION_DEL_RPORT:
+		break;
+	default:
+		tgt->action = action;
+		break;
+	}
+}
+
+/**
+ * ibmvfc_set_host_state - Set the state for the host
+ * @vhost:		ibmvfc host struct
+ * @state:		state to set host to
+ *
+ * Returns:
+ *	0 if state changed / non-zero if not changed
+ **/
+static int ibmvfc_set_host_state(struct ibmvfc_host *vhost,
+				  enum ibmvfc_host_state state)
+{
+	int rc = 0;
+
+	switch (vhost->state) {
+	case IBMVFC_HOST_OFFLINE:
+		rc = -EINVAL;
+		break;
+	default:
+		vhost->state = state;
+		break;
+	};
+
+	return rc;
+}
+
+/**
+ * ibmvfc_set_host_action - Set the next init action for the host
+ * @vhost:		ibmvfc host struct
+ * @action:		action to perform
+ *
+ **/
+static void ibmvfc_set_host_action(struct ibmvfc_host *vhost,
+				   enum ibmvfc_host_action action)
+{
+	switch (action) {
+	case IBMVFC_HOST_ACTION_ALLOC_TGTS:
+		if (vhost->action == IBMVFC_HOST_ACTION_INIT_WAIT)
+			vhost->action = action;
+		break;
+	case IBMVFC_HOST_ACTION_INIT_WAIT:
+		if (vhost->action == IBMVFC_HOST_ACTION_INIT)
+			vhost->action = action;
+		break;
+	case IBMVFC_HOST_ACTION_QUERY:
+		switch (vhost->action) {
+		case IBMVFC_HOST_ACTION_INIT_WAIT:
+		case IBMVFC_HOST_ACTION_NONE:
+		case IBMVFC_HOST_ACTION_TGT_ADD:
+			vhost->action = action;
+			break;
+		default:
+			break;
+		};
+		break;
+	case IBMVFC_HOST_ACTION_TGT_INIT:
+		if (vhost->action == IBMVFC_HOST_ACTION_ALLOC_TGTS)
+			vhost->action = action;
+		break;
+	case IBMVFC_HOST_ACTION_INIT:
+	case IBMVFC_HOST_ACTION_TGT_DEL:
+	case IBMVFC_HOST_ACTION_QUERY_TGTS:
+	case IBMVFC_HOST_ACTION_TGT_ADD:
+	case IBMVFC_HOST_ACTION_NONE:
+	default:
+		vhost->action = action;
+		break;
+	};
+}
+
+/**
+ * ibmvfc_reinit_host - Re-start host initialization (no NPIV Login)
+ * @vhost:		ibmvfc host struct
+ *
+ * Return value:
+ *	nothing
+ **/
+static void ibmvfc_reinit_host(struct ibmvfc_host *vhost)
+{
+	if (vhost->action == IBMVFC_HOST_ACTION_NONE) {
+		scsi_block_requests(vhost->host);
+		ibmvfc_set_host_state(vhost, IBMVFC_INITIALIZING);
+		ibmvfc_set_host_action(vhost, IBMVFC_HOST_ACTION_QUERY);
+	} else
+		vhost->reinit = 1;
+
+	wake_up(&vhost->work_wait_q);
+}
+
+/**
+ * ibmvfc_link_down - Handle a link down event from the adapter
+ * @vhost:	ibmvfc host struct
+ * @state:	ibmvfc host state to enter
+ *
+ **/
+static void ibmvfc_link_down(struct ibmvfc_host *vhost,
+			     enum ibmvfc_host_state state)
+{
+	struct ibmvfc_target *tgt;
+
+	ENTER;
+	scsi_block_requests(vhost->host);
+	list_for_each_entry(tgt, &vhost->targets, queue)
+		ibmvfc_set_tgt_action(tgt, IBMVFC_TGT_ACTION_DEL_RPORT);
+	ibmvfc_set_host_state(vhost, state);
+	ibmvfc_set_host_action(vhost, IBMVFC_HOST_ACTION_TGT_DEL);
+	vhost->events_to_log |= IBMVFC_AE_LINKDOWN;
+	wake_up(&vhost->work_wait_q);
+	LEAVE;
+}
+
+/**
+ * ibmvfc_init_host - Start host initialization
+ * @vhost:		ibmvfc host struct
+ *
+ * Return value:
+ *	nothing
+ **/
+static void ibmvfc_init_host(struct ibmvfc_host *vhost)
+{
+	struct ibmvfc_target *tgt;
+
+	if (vhost->action == IBMVFC_HOST_ACTION_INIT_WAIT) {
+		if (++vhost->init_retries > IBMVFC_MAX_INIT_RETRIES) {
+			dev_err(vhost->dev,
+				"Host initialization retries exceeded. Taking adapter offline\n");
+			ibmvfc_link_down(vhost, IBMVFC_HOST_OFFLINE);
+			return;
+		}
+	}
+
+	if (!ibmvfc_set_host_state(vhost, IBMVFC_INITIALIZING)) {
+		list_for_each_entry(tgt, &vhost->targets, queue)
+			tgt->need_login = 1;
+		scsi_block_requests(vhost->host);
+		ibmvfc_set_host_action(vhost, IBMVFC_HOST_ACTION_INIT);
+		vhost->job_step = ibmvfc_npiv_login;
+		wake_up(&vhost->work_wait_q);
+	}
+}
+
+/**
+ * ibmvfc_send_crq - Send a CRQ
+ * @vhost:	ibmvfc host struct
+ * @word1:	the first 64 bits of the data
+ * @word2:	the second 64 bits of the data
+ *
+ * Return value:
+ *	0 on success / other on failure
+ **/
+static int ibmvfc_send_crq(struct ibmvfc_host *vhost, u64 word1, u64 word2)
+{
+	struct vio_dev *vdev = to_vio_dev(vhost->dev);
+	return plpar_hcall_norets(H_SEND_CRQ, vdev->unit_address, word1, word2);
+}
+
+/**
+ * ibmvfc_send_crq_init - Send a CRQ init message
+ * @vhost:	ibmvfc host struct
+ *
+ * Return value:
+ *	0 on success / other on failure
+ **/
+static int ibmvfc_send_crq_init(struct ibmvfc_host *vhost)
+{
+	ibmvfc_dbg(vhost, "Sending CRQ init\n");
+	return ibmvfc_send_crq(vhost, 0xC001000000000000LL, 0);
+}
+
+/**
+ * ibmvfc_send_crq_init_complete - Send a CRQ init complete message
+ * @vhost:	ibmvfc host struct
+ *
+ * Return value:
+ *	0 on success / other on failure
+ **/
+static int ibmvfc_send_crq_init_complete(struct ibmvfc_host *vhost)
+{
+	ibmvfc_dbg(vhost, "Sending CRQ init complete\n");
+	return ibmvfc_send_crq(vhost, 0xC002000000000000LL, 0);
+}
+
+/**
+ * ibmvfc_release_crq_queue - Deallocates data and unregisters CRQ
+ * @vhost:	ibmvfc host struct
+ *
+ * Frees irq, deallocates a page for messages, unmaps dma, and unregisters
+ * the crq with the hypervisor.
+ **/
+static void ibmvfc_release_crq_queue(struct ibmvfc_host *vhost)
+{
+	long rc;
+	struct vio_dev *vdev = to_vio_dev(vhost->dev);
+	struct ibmvfc_crq_queue *crq = &vhost->crq;
+
+	ibmvfc_dbg(vhost, "Releasing CRQ\n");
+	free_irq(vdev->irq, vhost);
+	do {
+		rc = plpar_hcall_norets(H_FREE_CRQ, vdev->unit_address);
+	} while (rc == H_BUSY || H_IS_LONG_BUSY(rc));
+
+	vhost->state = IBMVFC_NO_CRQ;
+	dma_unmap_single(vhost->dev, crq->msg_token, PAGE_SIZE, DMA_BIDIRECTIONAL);
+	free_page((unsigned long)crq->msgs);
+}
+
+/**
+ * ibmvfc_reenable_crq_queue - reenables the CRQ
+ * @vhost:	ibmvfc host struct
+ *
+ * Return value:
+ *	0 on success / other on failure
+ **/
+static int ibmvfc_reenable_crq_queue(struct ibmvfc_host *vhost)
+{
+	int rc;
+	struct vio_dev *vdev = to_vio_dev(vhost->dev);
+
+	/* Re-enable the CRQ */
+	do {
+		rc = plpar_hcall_norets(H_ENABLE_CRQ, vdev->unit_address);
+	} while (rc == H_IN_PROGRESS || rc == H_BUSY || H_IS_LONG_BUSY(rc));
+
+	if (rc)
+		dev_err(vhost->dev, "Error enabling adapter (rc=%d)\n", rc);
+
+	return rc;
+}
+
+/**
+ * ibmvfc_reset_crq - resets a crq after a failure
+ * @vhost:	ibmvfc host struct
+ *
+ * Return value:
+ *	0 on success / other on failure
+ **/
+static int ibmvfc_reset_crq(struct ibmvfc_host *vhost)
+{
+	int rc;
+	struct vio_dev *vdev = to_vio_dev(vhost->dev);
+	struct ibmvfc_crq_queue *crq = &vhost->crq;
+
+	/* Close the CRQ */
+	do {
+		rc = plpar_hcall_norets(H_FREE_CRQ, vdev->unit_address);
+	} while (rc == H_BUSY || H_IS_LONG_BUSY(rc));
+
+	vhost->state = IBMVFC_NO_CRQ;
+	ibmvfc_set_host_action(vhost, IBMVFC_HOST_ACTION_NONE);
+
+	/* Clean out the queue */
+	memset(crq->msgs, 0, PAGE_SIZE);
+	crq->cur = 0;
+
+	/* And re-open it again */
+	rc = plpar_hcall_norets(H_REG_CRQ, vdev->unit_address,
+				crq->msg_token, PAGE_SIZE);
+
+	if (rc == H_CLOSED)
+		/* Adapter is good, but other end is not ready */
+		dev_warn(vhost->dev, "Partner adapter not ready\n");
+	else if (rc != 0)
+		dev_warn(vhost->dev, "Couldn't register crq (rc=%d)\n", rc);
+
+	return rc;
+}
+
+/**
+ * ibmvfc_valid_event - Determines if event is valid.
+ * @pool:	event_pool that contains the event
+ * @evt:	ibmvfc event to be checked for validity
+ *
+ * Return value:
+ *	1 if event is valid / 0 if event is not valid
+ **/
+static int ibmvfc_valid_event(struct ibmvfc_event_pool *pool,
+			      struct ibmvfc_event *evt)
+{
+	int index = evt - pool->events;
+	if (index < 0 || index >= pool->size)	/* outside of bounds */
+		return 0;
+	if (evt != pool->events + index)	/* unaligned */
+		return 0;
+	return 1;
+}
+
+/**
+ * ibmvfc_free_event - Free the specified event
+ * @evt:	ibmvfc_event to be freed
+ *
+ **/
+static void ibmvfc_free_event(struct ibmvfc_event *evt)
+{
+	struct ibmvfc_host *vhost = evt->vhost;
+	struct ibmvfc_event_pool *pool = &vhost->pool;
+
+	BUG_ON(!ibmvfc_valid_event(pool, evt));
+	BUG_ON(atomic_inc_return(&evt->free) != 1);
+	list_add_tail(&evt->queue, &vhost->free);
+}
+
+/**
+ * ibmvfc_scsi_eh_done - EH done function for queuecommand commands
+ * @evt:	ibmvfc event struct
+ *
+ * This function does not setup any error status, that must be done
+ * before this function gets called.
+ **/
+static void ibmvfc_scsi_eh_done(struct ibmvfc_event *evt)
+{
+	struct scsi_cmnd *cmnd = evt->cmnd;
+
+	if (cmnd) {
+		scsi_dma_unmap(cmnd);
+		cmnd->scsi_done(cmnd);
+	}
+
+	ibmvfc_free_event(evt);
+}
+
+/**
+ * ibmvfc_fail_request - Fail request with specified error code
+ * @evt:		ibmvfc event struct
+ * @error_code:	error code to fail request with
+ *
+ * Return value:
+ *	none
+ **/
+static void ibmvfc_fail_request(struct ibmvfc_event *evt, int error_code)
+{
+	if (evt->cmnd) {
+		evt->cmnd->result = (error_code << 16);
+		evt->done = ibmvfc_scsi_eh_done;
+	} else
+		evt->xfer_iu->mad_common.status = IBMVFC_MAD_DRIVER_FAILED;
+
+	list_del(&evt->queue);
+	del_timer(&evt->timer);
+	ibmvfc_trc_end(evt);
+	evt->done(evt);
+}
+
+/**
+ * ibmvfc_purge_requests - Our virtual adapter just shut down. Purge any sent requests
+ * @vhost:		ibmvfc host struct
+ * @error_code:	error code to fail requests with
+ *
+ * Return value:
+ *	none
+ **/
+static void ibmvfc_purge_requests(struct ibmvfc_host *vhost, int error_code)
+{
+	struct ibmvfc_event *evt, *pos;
+
+	ibmvfc_dbg(vhost, "Purging all requests\n");
+	list_for_each_entry_safe(evt, pos, &vhost->sent, queue)
+		ibmvfc_fail_request(evt, error_code);
+}
+
+/**
+ * __ibmvfc_reset_host - Reset the connection to the server (no locking)
+ * @vhost:	struct ibmvfc host to reset
+ **/
+static void __ibmvfc_reset_host(struct ibmvfc_host *vhost)
+{
+	int rc;
+
+	scsi_block_requests(vhost->host);
+	ibmvfc_purge_requests(vhost, DID_ERROR);
+	if ((rc = ibmvfc_reset_crq(vhost)) ||
+	    (rc = ibmvfc_send_crq_init(vhost)) ||
+	    (rc = vio_enable_interrupts(to_vio_dev(vhost->dev)))) {
+		dev_err(vhost->dev, "Error after reset rc=%d\n", rc);
+		ibmvfc_link_down(vhost, IBMVFC_LINK_DEAD);
+	} else
+		ibmvfc_link_down(vhost, IBMVFC_LINK_DOWN);
+}
+
+/**
+ * ibmvfc_reset_host - Reset the connection to the server
+ * @vhost:	struct ibmvfc host to reset
+ **/
+static void ibmvfc_reset_host(struct ibmvfc_host *vhost)
+{
+	unsigned long flags;
+
+	spin_lock_irqsave(vhost->host->host_lock, flags);
+	__ibmvfc_reset_host(vhost);
+	spin_unlock_irqrestore(vhost->host->host_lock, flags);
+}
+
+/**
+ * ibmvfc_retry_host_init - Retry host initialization if allowed
+ * @vhost:	ibmvfc host struct
+ *
+ **/
+static void ibmvfc_retry_host_init(struct ibmvfc_host *vhost)
+{
+	if (vhost->action == IBMVFC_HOST_ACTION_INIT_WAIT) {
+		if (++vhost->init_retries > IBMVFC_MAX_INIT_RETRIES) {
+			dev_err(vhost->dev,
+				"Host initialization retries exceeded. Taking adapter offline\n");
+			ibmvfc_link_down(vhost, IBMVFC_HOST_OFFLINE);
+		} else if (vhost->init_retries == IBMVFC_MAX_INIT_RETRIES)
+			__ibmvfc_reset_host(vhost);
+		else
+			ibmvfc_set_host_action(vhost, IBMVFC_HOST_ACTION_INIT);
+	}
+
+	wake_up(&vhost->work_wait_q);
+}
+
+/**
+ * __ibmvfc_find_target - Find the specified scsi_target (no locking)
+ * @starget:	scsi target struct
+ *
+ * Return value:
+ *	ibmvfc_target struct / NULL if not found
+ **/
+static struct ibmvfc_target *__ibmvfc_find_target(struct scsi_target *starget)
+{
+	struct Scsi_Host *shost = dev_to_shost(starget->dev.parent);
+	struct ibmvfc_host *vhost = shost_priv(shost);
+	struct ibmvfc_target *tgt;
+
+	list_for_each_entry(tgt, &vhost->targets, queue)
+		if (tgt->target_id == starget->id)
+			return tgt;
+	return NULL;
+}
+
+/**
+ * ibmvfc_find_target - Find the specified scsi_target
+ * @starget:	scsi target struct
+ *
+ * Return value:
+ *	ibmvfc_target struct / NULL if not found
+ **/
+static struct ibmvfc_target *ibmvfc_find_target(struct scsi_target *starget)
+{
+	struct Scsi_Host *shost = dev_to_shost(starget->dev.parent);
+	struct ibmvfc_target *tgt;
+	unsigned long flags;
+
+	spin_lock_irqsave(shost->host_lock, flags);
+	tgt = __ibmvfc_find_target(starget);
+	spin_unlock_irqrestore(shost->host_lock, flags);
+	return tgt;
+}
+
+/**
+ * ibmvfc_get_host_speed - Get host port speed
+ * @shost:		scsi host struct
+ *
+ * Return value:
+ * 	none
+ **/
+static void ibmvfc_get_host_speed(struct Scsi_Host *shost)
+{
+	struct ibmvfc_host *vhost = shost_priv(shost);
+	unsigned long flags;
+
+	spin_lock_irqsave(shost->host_lock, flags);
+	if (vhost->state == IBMVFC_ACTIVE) {
+		switch (vhost->login_buf->resp.link_speed / 100) {
+		case 1:
+			fc_host_speed(shost) = FC_PORTSPEED_1GBIT;
+			break;
+		case 2:
+			fc_host_speed(shost) = FC_PORTSPEED_2GBIT;
+			break;
+		case 4:
+			fc_host_speed(shost) = FC_PORTSPEED_4GBIT;
+			break;
+		case 8:
+			fc_host_speed(shost) = FC_PORTSPEED_8GBIT;
+			break;
+		case 10:
+			fc_host_speed(shost) = FC_PORTSPEED_10GBIT;
+			break;
+		case 16:
+			fc_host_speed(shost) = FC_PORTSPEED_16GBIT;
+			break;
+		default:
+			ibmvfc_log(vhost, 3, "Unknown port speed: %ld Gbit\n",
+				   vhost->login_buf->resp.link_speed / 100);
+			fc_host_speed(shost) = FC_PORTSPEED_UNKNOWN;
+			break;
+		}
+	} else
+		fc_host_speed(shost) = FC_PORTSPEED_UNKNOWN;
+	spin_unlock_irqrestore(shost->host_lock, flags);
+}
+
+/**
+ * ibmvfc_get_host_port_state - Get host port state
+ * @shost:		scsi host struct
+ *
+ * Return value:
+ * 	none
+ **/
+static void ibmvfc_get_host_port_state(struct Scsi_Host *shost)
+{
+	struct ibmvfc_host *vhost = shost_priv(shost);
+	unsigned long flags;
+
+	spin_lock_irqsave(shost->host_lock, flags);
+	switch (vhost->state) {
+	case IBMVFC_INITIALIZING:
+	case IBMVFC_ACTIVE:
+		fc_host_port_state(shost) = FC_PORTSTATE_ONLINE;
+		break;
+	case IBMVFC_LINK_DOWN:
+		fc_host_port_state(shost) = FC_PORTSTATE_LINKDOWN;
+		break;
+	case IBMVFC_LINK_DEAD:
+	case IBMVFC_HOST_OFFLINE:
+		fc_host_port_state(shost) = FC_PORTSTATE_OFFLINE;
+		break;
+	case IBMVFC_HALTED:
+		fc_host_port_state(shost) = FC_PORTSTATE_BLOCKED;
+		break;
+	default:
+		ibmvfc_log(vhost, 3, "Unknown port state: %d\n", vhost->state);
+		fc_host_port_state(shost) = FC_PORTSTATE_UNKNOWN;
+		break;
+	}
+	spin_unlock_irqrestore(shost->host_lock, flags);
+}
+
+/**
+ * ibmvfc_set_rport_dev_loss_tmo - Set rport's device loss timeout
+ * @rport:		rport struct
+ * @timeout:	timeout value
+ *
+ * Return value:
+ * 	none
+ **/
+static void ibmvfc_set_rport_dev_loss_tmo(struct fc_rport *rport, u32 timeout)
+{
+	if (timeout)
+		rport->dev_loss_tmo = timeout;
+	else
+		rport->dev_loss_tmo = 1;
+}
+
+/**
+ * ibmvfc_get_starget_node_name - Get SCSI target's node name
+ * @starget:	scsi target struct
+ *
+ * Return value:
+ * 	none
+ **/
+static void ibmvfc_get_starget_node_name(struct scsi_target *starget)
+{
+	struct ibmvfc_target *tgt = ibmvfc_find_target(starget);
+	fc_starget_port_name(starget) = tgt ? tgt->ids.node_name : 0;
+}
+
+/**
+ * ibmvfc_get_starget_port_name - Get SCSI target's port name
+ * @starget:	scsi target struct
+ *
+ * Return value:
+ * 	none
+ **/
+static void ibmvfc_get_starget_port_name(struct scsi_target *starget)
+{
+	struct ibmvfc_target *tgt = ibmvfc_find_target(starget);
+	fc_starget_port_name(starget) = tgt ? tgt->ids.port_name : 0;
+}
+
+/**
+ * ibmvfc_get_starget_port_id - Get SCSI target's port ID
+ * @starget:	scsi target struct
+ *
+ * Return value:
+ * 	none
+ **/
+static void ibmvfc_get_starget_port_id(struct scsi_target *starget)
+{
+	struct ibmvfc_target *tgt = ibmvfc_find_target(starget);
+	fc_starget_port_id(starget) = tgt ? tgt->scsi_id : -1;
+}
+
+/**
+ * ibmvfc_wait_while_resetting - Wait while the host resets
+ * @vhost:		ibmvfc host struct
+ *
+ * Return value:
+ * 	0 on success / other on failure
+ **/
+static int ibmvfc_wait_while_resetting(struct ibmvfc_host *vhost)
+{
+	long timeout = wait_event_timeout(vhost->init_wait_q,
+					  (vhost->state == IBMVFC_ACTIVE ||
+					   vhost->state == IBMVFC_HOST_OFFLINE ||
+					   vhost->state == IBMVFC_LINK_DEAD),
+					  (init_timeout * HZ));
+
+	return timeout ? 0 : -EIO;
+}
+
+/**
+ * ibmvfc_issue_fc_host_lip - Re-initiate link initialization
+ * @shost:		scsi host struct
+ *
+ * Return value:
+ * 	0 on success / other on failure
+ **/
+static int ibmvfc_issue_fc_host_lip(struct Scsi_Host *shost)
+{
+	struct ibmvfc_host *vhost = shost_priv(shost);
+
+	dev_err(vhost->dev, "Initiating host LIP. Resetting connection\n");
+	ibmvfc_reset_host(vhost);
+	return ibmvfc_wait_while_resetting(vhost);
+}
+
+/**
+ * ibmvfc_gather_partition_info - Gather info about the LPAR
+ *
+ * Return value:
+ *	none
+ **/
+static void ibmvfc_gather_partition_info(struct ibmvfc_host *vhost)
+{
+	struct device_node *rootdn;
+	const char *name;
+	const unsigned int *num;
+
+	rootdn = of_find_node_by_path("/");
+	if (!rootdn)
+		return;
+
+	name = of_get_property(rootdn, "ibm,partition-name", NULL);
+	if (name)
+		strncpy(vhost->partition_name, name, sizeof(vhost->partition_name));
+	num = of_get_property(rootdn, "ibm,partition-no", NULL);
+	if (num)
+		vhost->partition_number = *num;
+	of_node_put(rootdn);
+}
+
+/**
+ * ibmvfc_set_login_info - Setup info for NPIV login
+ * @vhost:	ibmvfc host struct
+ *
+ * Return value:
+ *	none
+ **/
+static void ibmvfc_set_login_info(struct ibmvfc_host *vhost)
+{
+	struct ibmvfc_npiv_login *login_info = &vhost->login_info;
+	struct device_node *of_node = vhost->dev->archdata.of_node;
+	const char *location;
+
+	memset(login_info, 0, sizeof(*login_info));
+
+	login_info->ostype = IBMVFC_OS_LINUX;
+	login_info->max_dma_len = IBMVFC_MAX_SECTORS << 9;
+	login_info->max_payload = sizeof(struct ibmvfc_fcp_cmd_iu);
+	login_info->max_response = sizeof(struct ibmvfc_fcp_rsp);
+	login_info->partition_num = vhost->partition_number;
+	login_info->vfc_frame_version = 1;
+	login_info->fcp_version = 3;
+	if (vhost->client_migrated)
+		login_info->flags = IBMVFC_CLIENT_MIGRATED;
+
+	login_info->max_cmds = max_requests + IBMVFC_NUM_INTERNAL_REQ;
+	login_info->capabilities = IBMVFC_CAN_MIGRATE;
+	login_info->async.va = vhost->async_crq.msg_token;
+	login_info->async.len = vhost->async_crq.size;
+	strncpy(login_info->partition_name, vhost->partition_name, IBMVFC_MAX_NAME);
+	strncpy(login_info->device_name,
+		vhost->host->shost_gendev.bus_id, IBMVFC_MAX_NAME);
+
+	location = of_get_property(of_node, "ibm,loc-code", NULL);
+	location = location ? location : vhost->dev->bus_id;
+	strncpy(login_info->drc_name, location, IBMVFC_MAX_NAME);
+}
+
+/**
+ * ibmvfc_init_event_pool - Allocates and initializes the event pool for a host
+ * @vhost:	ibmvfc host who owns the event pool
+ *
+ * Returns zero on success.
+ **/
+static int ibmvfc_init_event_pool(struct ibmvfc_host *vhost)
+{
+	int i;
+	struct ibmvfc_event_pool *pool = &vhost->pool;
+
+	ENTER;
+	pool->size = max_requests + IBMVFC_NUM_INTERNAL_REQ;
+	pool->events = kcalloc(pool->size, sizeof(*pool->events), GFP_KERNEL);
+	if (!pool->events)
+		return -ENOMEM;
+
+	pool->iu_storage = dma_alloc_coherent(vhost->dev,
+					      pool->size * sizeof(*pool->iu_storage),
+					      &pool->iu_token, 0);
+
+	if (!pool->iu_storage) {
+		kfree(pool->events);
+		return -ENOMEM;
+	}
+
+	for (i = 0; i < pool->size; ++i) {
+		struct ibmvfc_event *evt = &pool->events[i];
+		atomic_set(&evt->free, 1);
+		evt->crq.valid = 0x80;
+		evt->crq.ioba = pool->iu_token + (sizeof(*evt->xfer_iu) * i);
+		evt->xfer_iu = pool->iu_storage + i;
+		evt->vhost = vhost;
+		evt->ext_list = NULL;
+		list_add_tail(&evt->queue, &vhost->free);
+	}
+
+	LEAVE;
+	return 0;
+}
+
+/**
+ * ibmvfc_free_event_pool - Frees memory of the event pool of a host
+ * @vhost:	ibmvfc host who owns the event pool
+ *
+ **/
+static void ibmvfc_free_event_pool(struct ibmvfc_host *vhost)
+{
+	int i;
+	struct ibmvfc_event_pool *pool = &vhost->pool;
+
+	ENTER;
+	for (i = 0; i < pool->size; ++i) {
+		list_del(&pool->events[i].queue);
+		BUG_ON(atomic_read(&pool->events[i].free) != 1);
+		if (pool->events[i].ext_list)
+			dma_pool_free(vhost->sg_pool,
+				      pool->events[i].ext_list,
+				      pool->events[i].ext_list_token);
+	}
+
+	kfree(pool->events);
+	dma_free_coherent(vhost->dev,
+			  pool->size * sizeof(*pool->iu_storage),
+			  pool->iu_storage, pool->iu_token);
+	LEAVE;
+}
+
+/**
+ * ibmvfc_get_event - Gets the next free event in pool
+ * @vhost:	ibmvfc host struct
+ *
+ * Returns a free event from the pool.
+ **/
+static struct ibmvfc_event *ibmvfc_get_event(struct ibmvfc_host *vhost)
+{
+	struct ibmvfc_event *evt;
+
+	BUG_ON(list_empty(&vhost->free));
+	evt = list_entry(vhost->free.next, struct ibmvfc_event, queue);
+	atomic_set(&evt->free, 0);
+	list_del(&evt->queue);
+	return evt;
+}
+
+/**
+ * ibmvfc_init_event - Initialize fields in an event struct that are always
+ *				required.
+ * @evt:	The event
+ * @done:	Routine to call when the event is responded to
+ * @format:	SRP or MAD format
+ **/
+static void ibmvfc_init_event(struct ibmvfc_event *evt,
+			      void (*done) (struct ibmvfc_event *), u8 format)
+{
+	evt->cmnd = NULL;
+	evt->sync_iu = NULL;
+	evt->crq.format = format;
+	evt->done = done;
+}
+
+/**
+ * ibmvfc_map_sg_list - Initialize scatterlist
+ * @scmd:	scsi command struct
+ * @nseg:	number of scatterlist segments
+ * @md:	memory descriptor list to initialize
+ **/
+static void ibmvfc_map_sg_list(struct scsi_cmnd *scmd, int nseg,
+			       struct srp_direct_buf *md)
+{
+	int i;
+	struct scatterlist *sg;
+
+	scsi_for_each_sg(scmd, sg, nseg, i) {
+		md[i].va = sg_dma_address(sg);
+		md[i].len = sg_dma_len(sg);
+		md[i].key = 0;
+	}
+}
+
+/**
+ * ibmvfc_map_sg_data - Maps dma for a scatterlist and initializes decriptor fields
+ * @scmd:		Scsi_Cmnd with the scatterlist
+ * @evt:		ibmvfc event struct
+ * @vfc_cmd:	vfc_cmd that contains the memory descriptor
+ * @dev:		device for which to map dma memory
+ *
+ * Returns:
+ *	0 on success / non-zero on failure
+ **/
+static int ibmvfc_map_sg_data(struct scsi_cmnd *scmd,
+			      struct ibmvfc_event *evt,
+			      struct ibmvfc_cmd *vfc_cmd, struct device *dev)
+{
+
+	int sg_mapped;
+	struct srp_direct_buf *data = &vfc_cmd->ioba;
+	struct ibmvfc_host *vhost = dev_get_drvdata(dev);
+
+	sg_mapped = scsi_dma_map(scmd);
+	if (!sg_mapped) {
+		vfc_cmd->flags |= IBMVFC_NO_MEM_DESC;
+		return 0;
+	} else if (unlikely(sg_mapped < 0)) {
+		if (vhost->log_level > IBMVFC_DEFAULT_LOG_LEVEL)
+			scmd_printk(KERN_ERR, scmd, "Failed to map DMA buffer for command\n");
+		return sg_mapped;
+	}
+
+	if (scmd->sc_data_direction == DMA_TO_DEVICE) {
+		vfc_cmd->flags |= IBMVFC_WRITE;
+		vfc_cmd->iu.add_cdb_len |= IBMVFC_WRDATA;
+	} else {
+		vfc_cmd->flags |= IBMVFC_READ;
+		vfc_cmd->iu.add_cdb_len |= IBMVFC_RDDATA;
+	}
+
+	if (sg_mapped == 1) {
+		ibmvfc_map_sg_list(scmd, sg_mapped, data);
+		return 0;
+	}
+
+	vfc_cmd->flags |= IBMVFC_SCATTERLIST;
+
+	if (!evt->ext_list) {
+		evt->ext_list = dma_pool_alloc(vhost->sg_pool, GFP_ATOMIC,
+					       &evt->ext_list_token);
+
+		if (!evt->ext_list) {
+			scmd_printk(KERN_ERR, scmd, "Can't allocate memory for scatterlist\n");
+			return -ENOMEM;
+		}
+	}
+
+	ibmvfc_map_sg_list(scmd, sg_mapped, evt->ext_list);
+
+	data->va = evt->ext_list_token;
+	data->len = sg_mapped * sizeof(struct srp_direct_buf);
+	data->key = 0;
+	return 0;
+}
+
+/**
+ * ibmvfc_timeout - Internal command timeout handler
+ * @evt:	struct ibmvfc_event that timed out
+ *
+ * Called when an internally generated command times out
+ **/
+static void ibmvfc_timeout(struct ibmvfc_event *evt)
+{
+	struct ibmvfc_host *vhost = evt->vhost;
+	dev_err(vhost->dev, "Command timed out (%p). Resetting connection\n", evt);
+	ibmvfc_reset_host(vhost);
+}
+
+/**
+ * ibmvfc_send_event - Transforms event to u64 array and calls send_crq()
+ * @evt:		event to be sent
+ * @vhost:		ibmvfc host struct
+ * @timeout:	timeout in seconds - 0 means do not time command
+ *
+ * Returns the value returned from ibmvfc_send_crq(). (Zero for success)
+ **/
+static int ibmvfc_send_event(struct ibmvfc_event *evt,
+			     struct ibmvfc_host *vhost, unsigned long timeout)
+{
+	u64 *crq_as_u64 = (u64 *) &evt->crq;
+	int rc;
+
+	/* Copy the IU into the transfer area */
+	*evt->xfer_iu = evt->iu;
+	if (evt->crq.format == IBMVFC_CMD_FORMAT)
+		evt->xfer_iu->cmd.tag = (u64)evt;
+	else if (evt->crq.format == IBMVFC_MAD_FORMAT)
+		evt->xfer_iu->mad_common.tag = (u64)evt;
+	else
+		BUG();
+
+	list_add_tail(&evt->queue, &vhost->sent);
+	init_timer(&evt->timer);
+
+	if (timeout) {
+		evt->timer.data = (unsigned long) evt;
+		evt->timer.expires = jiffies + (timeout * HZ);
+		evt->timer.function = (void (*)(unsigned long))ibmvfc_timeout;
+		add_timer(&evt->timer);
+	}
+
+	if ((rc = ibmvfc_send_crq(vhost, crq_as_u64[0], crq_as_u64[1]))) {
+		list_del(&evt->queue);
+		del_timer(&evt->timer);
+
+		/* If send_crq returns H_CLOSED, return SCSI_MLQUEUE_HOST_BUSY.
+		 * Firmware will send a CRQ with a transport event (0xFF) to
+		 * tell this client what has happened to the transport. This
+		 * will be handled in ibmvfc_handle_crq()
+		 */
+		if (rc == H_CLOSED) {
+			if (printk_ratelimit())
+				dev_warn(vhost->dev, "Send warning. Receive queue closed, will retry.\n");
+			if (evt->cmnd)
+				scsi_dma_unmap(evt->cmnd);
+			ibmvfc_free_event(evt);
+			return SCSI_MLQUEUE_HOST_BUSY;
+		}
+
+		dev_err(vhost->dev, "Send error (rc=%d)\n", rc);
+		if (evt->cmnd) {
+			evt->cmnd->result = DID_ERROR << 16;
+			evt->done = ibmvfc_scsi_eh_done;
+		} else
+			evt->xfer_iu->mad_common.status = IBMVFC_MAD_CRQ_ERROR;
+
+		evt->done(evt);
+	} else
+		ibmvfc_trc_start(evt);
+
+	return 0;
+}
+
+/**
+ * ibmvfc_log_error - Log an error for the failed command if appropriate
+ * @evt:	ibmvfc event to log
+ *
+ **/
+static void ibmvfc_log_error(struct ibmvfc_event *evt)
+{
+	struct ibmvfc_cmd *vfc_cmd = &evt->xfer_iu->cmd;
+	struct ibmvfc_host *vhost = evt->vhost;
+	struct ibmvfc_fcp_rsp *rsp = &vfc_cmd->rsp;
+	struct scsi_cmnd *cmnd = evt->cmnd;
+	const char *err = unknown_error;
+	int index = ibmvfc_get_err_index(vfc_cmd->status, vfc_cmd->error);
+	int logerr = 0;
+	int rsp_code = 0;
+
+	if (index >= 0) {
+		logerr = cmd_status[index].log;
+		err = cmd_status[index].name;
+	}
+
+	if (!logerr && (vhost->log_level <= IBMVFC_DEFAULT_LOG_LEVEL))
+		return;
+
+	if (rsp->flags & FCP_RSP_LEN_VALID)
+		rsp_code = rsp->data.info.rsp_code;
+
+	scmd_printk(KERN_ERR, cmnd, "Command (%02X) failed: %s (%x:%x) "
+		    "flags: %x fcp_rsp: %x, resid=%d, scsi_status: %x\n",
+		    cmnd->cmnd[0], err, vfc_cmd->status, vfc_cmd->error,
+		    rsp->flags, rsp_code, scsi_get_resid(cmnd), rsp->scsi_status);
+}
+
+/**
+ * ibmvfc_scsi_done - Handle responses from commands
+ * @evt:	ibmvfc event to be handled
+ *
+ * Used as a callback when sending scsi cmds.
+ **/
+static void ibmvfc_scsi_done(struct ibmvfc_event *evt)
+{
+	struct ibmvfc_cmd *vfc_cmd = &evt->xfer_iu->cmd;
+	struct ibmvfc_fcp_rsp *rsp = &vfc_cmd->rsp;
+	struct scsi_cmnd *cmnd = evt->cmnd;
+	int rsp_len = 0;
+	int sense_len = rsp->fcp_sense_len;
+
+	if (cmnd) {
+		if (vfc_cmd->response_flags & IBMVFC_ADAPTER_RESID_VALID)
+			scsi_set_resid(cmnd, vfc_cmd->adapter_resid);
+		else if (rsp->flags & FCP_RESID_UNDER)
+			scsi_set_resid(cmnd, rsp->fcp_resid);
+		else
+			scsi_set_resid(cmnd, 0);
+
+		if (vfc_cmd->status) {
+			cmnd->result = ibmvfc_get_err_result(vfc_cmd);
+
+			if (rsp->flags & FCP_RSP_LEN_VALID)
+				rsp_len = rsp->fcp_rsp_len;
+			if ((sense_len + rsp_len) > SCSI_SENSE_BUFFERSIZE)
+				sense_len = SCSI_SENSE_BUFFERSIZE - rsp_len;
+			if ((rsp->flags & FCP_SNS_LEN_VALID) && rsp->fcp_sense_len)
+				memcpy(cmnd->sense_buffer, rsp->data.sense + rsp_len, sense_len);
+
+			ibmvfc_log_error(evt);
+		}
+
+		if (!cmnd->result &&
+		    (scsi_bufflen(cmnd) - scsi_get_resid(cmnd) < cmnd->underflow))
+			cmnd->result = (DID_ERROR << 16);
+
+		scsi_dma_unmap(cmnd);
+		cmnd->scsi_done(cmnd);
+	}
+
+	ibmvfc_free_event(evt);
+}
+
+/**
+ * ibmvfc_host_chkready - Check if the host can accept commands
+ * @vhost:	 struct ibmvfc host
+ *
+ * Returns:
+ *	1 if host can accept command / 0 if not
+ **/
+static inline int ibmvfc_host_chkready(struct ibmvfc_host *vhost)
+{
+	int result = 0;
+
+	switch (vhost->state) {
+	case IBMVFC_LINK_DEAD:
+	case IBMVFC_HOST_OFFLINE:
+		result = DID_NO_CONNECT << 16;
+		break;
+	case IBMVFC_NO_CRQ:
+	case IBMVFC_INITIALIZING:
+	case IBMVFC_HALTED:
+	case IBMVFC_LINK_DOWN:
+		result = DID_REQUEUE << 16;
+		break;
+	case IBMVFC_ACTIVE:
+		result = 0;
+		break;
+	};
+
+	return result;
+}
+
+/**
+ * ibmvfc_queuecommand - The queuecommand function of the scsi template
+ * @cmnd:	struct scsi_cmnd to be executed
+ * @done:	Callback function to be called when cmnd is completed
+ *
+ * Returns:
+ *	0 on success / other on failure
+ **/
+static int ibmvfc_queuecommand(struct scsi_cmnd *cmnd,
+			       void (*done) (struct scsi_cmnd *))
+{
+	struct ibmvfc_host *vhost = shost_priv(cmnd->device->host);
+	struct fc_rport *rport = starget_to_rport(scsi_target(cmnd->device));
+	struct ibmvfc_cmd *vfc_cmd;
+	struct ibmvfc_event *evt;
+	u8 tag[2];
+	int rc;
+
+	if (unlikely((rc = fc_remote_port_chkready(rport))) ||
+	    unlikely((rc = ibmvfc_host_chkready(vhost)))) {
+		cmnd->result = rc;
+		done(cmnd);
+		return 0;
+	}
+
+	cmnd->result = (DID_OK << 16);
+	evt = ibmvfc_get_event(vhost);
+	ibmvfc_init_event(evt, ibmvfc_scsi_done, IBMVFC_CMD_FORMAT);
+	evt->cmnd = cmnd;
+	cmnd->scsi_done = done;
+	vfc_cmd = &evt->iu.cmd;
+	memset(vfc_cmd, 0, sizeof(*vfc_cmd));
+	vfc_cmd->resp.va = (u64)evt->crq.ioba + offsetof(struct ibmvfc_cmd, rsp);
+	vfc_cmd->resp.len = sizeof(vfc_cmd->rsp);
+	vfc_cmd->frame_type = IBMVFC_SCSI_FCP_TYPE;
+	vfc_cmd->payload_len = sizeof(vfc_cmd->iu);
+	vfc_cmd->resp_len = sizeof(vfc_cmd->rsp);
+	vfc_cmd->cancel_key = (unsigned long)cmnd->device->hostdata;
+	vfc_cmd->tgt_scsi_id = rport->port_id;
+	if ((rport->supported_classes & FC_COS_CLASS3) &&
+	    (fc_host_supported_classes(vhost->host) & FC_COS_CLASS3))
+		vfc_cmd->flags = IBMVFC_CLASS_3_ERR;
+	vfc_cmd->iu.xfer_len = scsi_bufflen(cmnd);
+	int_to_scsilun(cmnd->device->lun, &vfc_cmd->iu.lun);
+	memcpy(vfc_cmd->iu.cdb, cmnd->cmnd, cmnd->cmd_len);
+
+	if (scsi_populate_tag_msg(cmnd, tag)) {
+		vfc_cmd->task_tag = tag[1];
+		switch (tag[0]) {
+		case MSG_SIMPLE_TAG:
+			vfc_cmd->iu.pri_task_attr = IBMVFC_SIMPLE_TASK;
+			break;
+		case MSG_HEAD_TAG:
+			vfc_cmd->iu.pri_task_attr = IBMVFC_HEAD_OF_QUEUE;
+			break;
+		case MSG_ORDERED_TAG:
+			vfc_cmd->iu.pri_task_attr = IBMVFC_ORDERED_TASK;
+			break;
+		};
+	}
+
+	if (likely(!(rc = ibmvfc_map_sg_data(cmnd, evt, vfc_cmd, vhost->dev))))
+		return ibmvfc_send_event(evt, vhost, 0);
+
+	ibmvfc_free_event(evt);
+	if (rc == -ENOMEM)
+		return SCSI_MLQUEUE_HOST_BUSY;
+
+	if (vhost->log_level > IBMVFC_DEFAULT_LOG_LEVEL)
+		scmd_printk(KERN_ERR, cmnd,
+			    "Failed to map DMA buffer for command. rc=%d\n", rc);
+
+	cmnd->result = DID_ERROR << 16;
+	done(cmnd);
+	return 0;
+}
+
+/**
+ * ibmvfc_sync_completion - Signal that a synchronous command has completed
+ * @evt:	ibmvfc event struct
+ *
+ **/
+static void ibmvfc_sync_completion(struct ibmvfc_event *evt)
+{
+	/* copy the response back */
+	if (evt->sync_iu)
+		*evt->sync_iu = *evt->xfer_iu;
+
+	complete(&evt->comp);
+}
+
+/**
+ * ibmvfc_reset_device - Reset the device with the specified reset type
+ * @sdev:	scsi device to reset
+ * @type:	reset type
+ * @desc:	reset type description for log messages
+ *
+ * Returns:
+ *	0 on success / other on failure
+ **/
+static int ibmvfc_reset_device(struct scsi_device *sdev, int type, char *desc)
+{
+	struct ibmvfc_host *vhost = shost_priv(sdev->host);
+	struct fc_rport *rport = starget_to_rport(scsi_target(sdev));
+	struct ibmvfc_cmd *tmf;
+	struct ibmvfc_event *evt;
+	union ibmvfc_iu rsp_iu;
+	struct ibmvfc_fcp_rsp *fc_rsp = &rsp_iu.cmd.rsp;
+	int rsp_rc = -EBUSY;
+	unsigned long flags;
+	int rsp_code = 0;
+
+	spin_lock_irqsave(vhost->host->host_lock, flags);
+	if (vhost->state == IBMVFC_ACTIVE) {
+		evt = ibmvfc_get_event(vhost);
+		ibmvfc_init_event(evt, ibmvfc_sync_completion, IBMVFC_CMD_FORMAT);
+
+		tmf = &evt->iu.cmd;
+		memset(tmf, 0, sizeof(*tmf));
+		tmf->resp.va = (u64)evt->crq.ioba + offsetof(struct ibmvfc_cmd, rsp);
+		tmf->resp.len = sizeof(tmf->rsp);
+		tmf->frame_type = IBMVFC_SCSI_FCP_TYPE;
+		tmf->payload_len = sizeof(tmf->iu);
+		tmf->resp_len = sizeof(tmf->rsp);
+		tmf->cancel_key = (unsigned long)sdev->hostdata;
+		tmf->tgt_scsi_id = rport->port_id;
+		int_to_scsilun(sdev->lun, &tmf->iu.lun);
+		tmf->flags = (IBMVFC_NO_MEM_DESC | IBMVFC_TMF);
+		tmf->iu.tmf_flags = type;
+		evt->sync_iu = &rsp_iu;
+
+		init_completion(&evt->comp);
+		rsp_rc = ibmvfc_send_event(evt, vhost, default_timeout);
+	}
+	spin_unlock_irqrestore(vhost->host->host_lock, flags);
+
+	if (rsp_rc != 0) {
+		sdev_printk(KERN_ERR, sdev, "Failed to send %s reset event. rc=%d\n",
+			    desc, rsp_rc);
+		return -EIO;
+	}
+
+	sdev_printk(KERN_INFO, sdev, "Resetting %s\n", desc);
+	wait_for_completion(&evt->comp);
+
+	if (rsp_iu.cmd.status) {
+		if (fc_rsp->flags & FCP_RSP_LEN_VALID)
+			rsp_code = fc_rsp->data.info.rsp_code;
+
+		sdev_printk(KERN_ERR, sdev, "%s reset failed: %s (%x:%x) "
+			    "flags: %x fcp_rsp: %x, scsi_status: %x\n",
+			    desc, ibmvfc_get_cmd_error(rsp_iu.cmd.status, rsp_iu.cmd.error),
+			    rsp_iu.cmd.status, rsp_iu.cmd.error, fc_rsp->flags, rsp_code,
+			    fc_rsp->scsi_status);
+		rsp_rc = -EIO;
+	} else
+		sdev_printk(KERN_INFO, sdev, "%s reset successful\n", desc);
+
+	spin_lock_irqsave(vhost->host->host_lock, flags);
+	ibmvfc_free_event(evt);
+	spin_unlock_irqrestore(vhost->host->host_lock, flags);
+	return rsp_rc;
+}
+
+/**
+ * ibmvfc_abort_task_set - Abort outstanding commands to the device
+ * @sdev:	scsi device to abort commands
+ *
+ * This sends an Abort Task Set to the VIOS for the specified device. This does
+ * NOT send any cancel to the VIOS. That must be done separately.
+ *
+ * Returns:
+ *	0 on success / other on failure
+ **/
+static int ibmvfc_abort_task_set(struct scsi_device *sdev)
+{
+	struct ibmvfc_host *vhost = shost_priv(sdev->host);
+	struct fc_rport *rport = starget_to_rport(scsi_target(sdev));
+	struct ibmvfc_cmd *tmf;
+	struct ibmvfc_event *evt, *found_evt;
+	union ibmvfc_iu rsp_iu;
+	struct ibmvfc_fcp_rsp *fc_rsp = &rsp_iu.cmd.rsp;
+	int rsp_rc = -EBUSY;
+	unsigned long flags;
+	int rsp_code = 0;
+
+	spin_lock_irqsave(vhost->host->host_lock, flags);
+	found_evt = NULL;
+	list_for_each_entry(evt, &vhost->sent, queue) {
+		if (evt->cmnd && evt->cmnd->device == sdev) {
+			found_evt = evt;
+			break;
+		}
+	}
+
+	if (!found_evt) {
+		if (vhost->log_level > IBMVFC_DEFAULT_LOG_LEVEL)
+			sdev_printk(KERN_INFO, sdev, "No events found to abort\n");
+		spin_unlock_irqrestore(vhost->host->host_lock, flags);
+		return 0;
+	}
+
+	if (vhost->state == IBMVFC_ACTIVE) {
+		evt = ibmvfc_get_event(vhost);
+		ibmvfc_init_event(evt, ibmvfc_sync_completion, IBMVFC_CMD_FORMAT);
+
+		tmf = &evt->iu.cmd;
+		memset(tmf, 0, sizeof(*tmf));
+		tmf->resp.va = (u64)evt->crq.ioba + offsetof(struct ibmvfc_cmd, rsp);
+		tmf->resp.len = sizeof(tmf->rsp);
+		tmf->frame_type = IBMVFC_SCSI_FCP_TYPE;
+		tmf->payload_len = sizeof(tmf->iu);
+		tmf->resp_len = sizeof(tmf->rsp);
+		tmf->cancel_key = (unsigned long)sdev->hostdata;
+		tmf->tgt_scsi_id = rport->port_id;
+		int_to_scsilun(sdev->lun, &tmf->iu.lun);
+		tmf->flags = (IBMVFC_NO_MEM_DESC | IBMVFC_TMF);
+		tmf->iu.tmf_flags = IBMVFC_ABORT_TASK_SET;
+		evt->sync_iu = &rsp_iu;
+
+		init_completion(&evt->comp);
+		rsp_rc = ibmvfc_send_event(evt, vhost, default_timeout);
+	}
+
+	spin_unlock_irqrestore(vhost->host->host_lock, flags);
+
+	if (rsp_rc != 0) {
+		sdev_printk(KERN_ERR, sdev, "Failed to send abort. rc=%d\n", rsp_rc);
+		return -EIO;
+	}
+
+	sdev_printk(KERN_INFO, sdev, "Aborting outstanding commands\n");
+	wait_for_completion(&evt->comp);
+
+	if (rsp_iu.cmd.status) {
+		if (fc_rsp->flags & FCP_RSP_LEN_VALID)
+			rsp_code = fc_rsp->data.info.rsp_code;
+
+		sdev_printk(KERN_ERR, sdev, "Abort failed: %s (%x:%x) "
+			    "flags: %x fcp_rsp: %x, scsi_status: %x\n",
+			    ibmvfc_get_cmd_error(rsp_iu.cmd.status, rsp_iu.cmd.error),
+			    rsp_iu.cmd.status, rsp_iu.cmd.error, fc_rsp->flags, rsp_code,
+			    fc_rsp->scsi_status);
+		rsp_rc = -EIO;
+	} else
+		sdev_printk(KERN_INFO, sdev, "Abort successful\n");
+
+	spin_lock_irqsave(vhost->host->host_lock, flags);
+	ibmvfc_free_event(evt);
+	spin_unlock_irqrestore(vhost->host->host_lock, flags);
+	return rsp_rc;
+}
+
+/**
+ * ibmvfc_cancel_all - Cancel all outstanding commands to the device
+ * @sdev:	scsi device to cancel commands
+ * @type:	type of error recovery being performed
+ *
+ * This sends a cancel to the VIOS for the specified device. This does
+ * NOT send any abort to the actual device. That must be done separately.
+ *
+ * Returns:
+ *	0 on success / other on failure
+ **/
+static int ibmvfc_cancel_all(struct scsi_device *sdev, int type)
+{
+	struct ibmvfc_host *vhost = shost_priv(sdev->host);
+	struct fc_rport *rport = starget_to_rport(scsi_target(sdev));
+	struct ibmvfc_tmf *tmf;
+	struct ibmvfc_event *evt, *found_evt;
+	union ibmvfc_iu rsp;
+	int rsp_rc = -EBUSY;
+	unsigned long flags;
+	u16 status;
+
+	ENTER;
+	spin_lock_irqsave(vhost->host->host_lock, flags);
+	found_evt = NULL;
+	list_for_each_entry(evt, &vhost->sent, queue) {
+		if (evt->cmnd && evt->cmnd->device == sdev) {
+			found_evt = evt;
+			break;
+		}
+	}
+
+	if (!found_evt) {
+		if (vhost->log_level > IBMVFC_DEFAULT_LOG_LEVEL)
+			sdev_printk(KERN_INFO, sdev, "No events found to cancel\n");
+		spin_unlock_irqrestore(vhost->host->host_lock, flags);
+		return 0;
+	}
+
+	if (vhost->state == IBMVFC_ACTIVE) {
+		evt = ibmvfc_get_event(vhost);
+		ibmvfc_init_event(evt, ibmvfc_sync_completion, IBMVFC_MAD_FORMAT);
+
+		tmf = &evt->iu.tmf;
+		memset(tmf, 0, sizeof(*tmf));
+		tmf->common.version = 1;
+		tmf->common.opcode = IBMVFC_TMF_MAD;
+		tmf->common.length = sizeof(*tmf);
+		tmf->scsi_id = rport->port_id;
+		int_to_scsilun(sdev->lun, &tmf->lun);
+		tmf->flags = (type | IBMVFC_TMF_LUA_VALID);
+		tmf->cancel_key = (unsigned long)sdev->hostdata;
+		tmf->my_cancel_key = (IBMVFC_TMF_CANCEL_KEY | (unsigned long)sdev->hostdata);
+
+		evt->sync_iu = &rsp;
+		init_completion(&evt->comp);
+		rsp_rc = ibmvfc_send_event(evt, vhost, default_timeout);
+	}
+
+	spin_unlock_irqrestore(vhost->host->host_lock, flags);
+
+	if (rsp_rc != 0) {
+		sdev_printk(KERN_ERR, sdev, "Failed to send cancel event. rc=%d\n", rsp_rc);
+		return -EIO;
+	}
+
+	sdev_printk(KERN_INFO, sdev, "Cancelling outstanding commands.\n");
+
+	wait_for_completion(&evt->comp);
+	status = rsp.mad_common.status;
+	spin_lock_irqsave(vhost->host->host_lock, flags);
+	ibmvfc_free_event(evt);
+	spin_unlock_irqrestore(vhost->host->host_lock, flags);
+
+	if (status != IBMVFC_MAD_SUCCESS) {
+		sdev_printk(KERN_WARNING, sdev, "Cancel failed with rc=%x\n", status);
+		return -EIO;
+	}
+
+	sdev_printk(KERN_INFO, sdev, "Successfully cancelled outstanding commands\n");
+	return 0;
+}
+
+/**
+ * ibmvfc_eh_abort_handler - Abort a command
+ * @cmd:	scsi command to abort
+ *
+ * Returns:
+ *	SUCCESS / FAILED
+ **/
+static int ibmvfc_eh_abort_handler(struct scsi_cmnd *cmd)
+{
+	struct ibmvfc_host *vhost = shost_priv(cmd->device->host);
+	struct ibmvfc_event *evt, *pos;
+	int cancel_rc, abort_rc;
+	unsigned long flags;
+
+	ENTER;
+	ibmvfc_wait_while_resetting(vhost);
+	cancel_rc = ibmvfc_cancel_all(cmd->device, IBMVFC_TMF_ABORT_TASK_SET);
+	abort_rc = ibmvfc_abort_task_set(cmd->device);
+
+	if (!cancel_rc && !abort_rc) {
+		spin_lock_irqsave(vhost->host->host_lock, flags);
+		list_for_each_entry_safe(evt, pos, &vhost->sent, queue) {
+			if (evt->cmnd && evt->cmnd->device == cmd->device)
+				ibmvfc_fail_request(evt, DID_ABORT);
+		}
+		spin_unlock_irqrestore(vhost->host->host_lock, flags);
+		LEAVE;
+		return SUCCESS;
+	}
+
+	LEAVE;
+	return FAILED;
+}
+
+/**
+ * ibmvfc_eh_device_reset_handler - Reset a single LUN
+ * @cmd:	scsi command struct
+ *
+ * Returns:
+ *	SUCCESS / FAILED
+ **/
+static int ibmvfc_eh_device_reset_handler(struct scsi_cmnd *cmd)
+{
+	struct ibmvfc_host *vhost = shost_priv(cmd->device->host);
+	struct ibmvfc_event *evt, *pos;
+	int cancel_rc, reset_rc;
+	unsigned long flags;
+
+	ENTER;
+	ibmvfc_wait_while_resetting(vhost);
+	cancel_rc = ibmvfc_cancel_all(cmd->device, IBMVFC_TMF_LUN_RESET);
+	reset_rc = ibmvfc_reset_device(cmd->device, IBMVFC_LUN_RESET, "LUN");
+
+	if (!cancel_rc && !reset_rc) {
+		spin_lock_irqsave(vhost->host->host_lock, flags);
+		list_for_each_entry_safe(evt, pos, &vhost->sent, queue) {
+			if (evt->cmnd && evt->cmnd->device == cmd->device)
+				ibmvfc_fail_request(evt, DID_ABORT);
+		}
+		spin_unlock_irqrestore(vhost->host->host_lock, flags);
+		LEAVE;
+		return SUCCESS;
+	}
+
+	LEAVE;
+	return FAILED;
+}
+
+/**
+ * ibmvfc_dev_cancel_all - Device iterated cancel all function
+ * @sdev:	scsi device struct
+ * @data:	return code
+ *
+ **/
+static void ibmvfc_dev_cancel_all(struct scsi_device *sdev, void *data)
+{
+	unsigned long *rc = data;
+	*rc |= ibmvfc_cancel_all(sdev, IBMVFC_TMF_TGT_RESET);
+}
+
+/**
+ * ibmvfc_dev_abort_all - Device iterated abort task set function
+ * @sdev:	scsi device struct
+ * @data:	return code
+ *
+ **/
+static void ibmvfc_dev_abort_all(struct scsi_device *sdev, void *data)
+{
+	unsigned long *rc = data;
+	*rc |= ibmvfc_abort_task_set(sdev);
+}
+
+/**
+ * ibmvfc_eh_target_reset_handler - Reset the target
+ * @cmd:	scsi command struct
+ *
+ * Returns:
+ *	SUCCESS / FAILED
+ **/
+static int ibmvfc_eh_target_reset_handler(struct scsi_cmnd *cmd)
+{
+	struct ibmvfc_host *vhost = shost_priv(cmd->device->host);
+	struct scsi_target *starget = scsi_target(cmd->device);
+	struct ibmvfc_event *evt, *pos;
+	int reset_rc;
+	unsigned long cancel_rc = 0;
+	unsigned long flags;
+
+	ENTER;
+	ibmvfc_wait_while_resetting(vhost);
+	starget_for_each_device(starget, &cancel_rc, ibmvfc_dev_cancel_all);
+	reset_rc = ibmvfc_reset_device(cmd->device, IBMVFC_TARGET_RESET, "target");
+
+	if (!cancel_rc && !reset_rc) {
+		spin_lock_irqsave(vhost->host->host_lock, flags);
+		list_for_each_entry_safe(evt, pos, &vhost->sent, queue) {
+			if (evt->cmnd && scsi_target(evt->cmnd->device) == starget)
+				ibmvfc_fail_request(evt, DID_ABORT);
+		}
+		spin_unlock_irqrestore(vhost->host->host_lock, flags);
+		LEAVE;
+		return SUCCESS;
+	}
+
+	LEAVE;
+	return FAILED;
+}
+
+/**
+ * ibmvfc_eh_host_reset_handler - Reset the connection to the server
+ * @cmd:	struct scsi_cmnd having problems
+ *
+ **/
+static int ibmvfc_eh_host_reset_handler(struct scsi_cmnd *cmd)
+{
+	int rc;
+	struct ibmvfc_host *vhost = shost_priv(cmd->device->host);
+
+	dev_err(vhost->dev, "Resetting connection due to error recovery\n");
+	rc = ibmvfc_issue_fc_host_lip(vhost->host);
+	return rc ? FAILED : SUCCESS;
+}
+
+/**
+ * ibmvfc_terminate_rport_io - Terminate all pending I/O to the rport.
+ * @rport:		rport struct
+ *
+ * Return value:
+ * 	none
+ **/
+static void ibmvfc_terminate_rport_io(struct fc_rport *rport)
+{
+	struct scsi_target *starget = to_scsi_target(&rport->dev);
+	struct Scsi_Host *shost = dev_to_shost(starget->dev.parent);
+	struct ibmvfc_host *vhost = shost_priv(shost);
+	struct ibmvfc_event *evt, *pos;
+	unsigned long cancel_rc = 0;
+	unsigned long abort_rc = 0;
+	unsigned long flags;
+
+	ENTER;
+	starget_for_each_device(starget, &cancel_rc, ibmvfc_dev_cancel_all);
+	starget_for_each_device(starget, &abort_rc, ibmvfc_dev_abort_all);
+
+	if (!cancel_rc && !abort_rc) {
+		spin_lock_irqsave(shost->host_lock, flags);
+		list_for_each_entry_safe(evt, pos, &vhost->sent, queue) {
+			if (evt->cmnd && scsi_target(evt->cmnd->device) == starget)
+				ibmvfc_fail_request(evt, DID_ABORT);
+		}
+		spin_unlock_irqrestore(shost->host_lock, flags);
+	} else
+		ibmvfc_issue_fc_host_lip(shost);
+
+	scsi_target_unblock(&rport->dev);
+	LEAVE;
+}
+
+static const struct {
+	enum ibmvfc_async_event ae;
+	const char *desc;
+} ae_desc [] = {
+	{ IBMVFC_AE_ELS_PLOGI,		"PLOGI" },
+	{ IBMVFC_AE_ELS_LOGO,		"LOGO" },
+	{ IBMVFC_AE_ELS_PRLO,		"PRLO" },
+	{ IBMVFC_AE_SCN_NPORT,		"N-Port SCN" },
+	{ IBMVFC_AE_SCN_GROUP,		"Group SCN" },
+	{ IBMVFC_AE_SCN_DOMAIN,		"Domain SCN" },
+	{ IBMVFC_AE_SCN_FABRIC,		"Fabric SCN" },
+	{ IBMVFC_AE_LINK_UP,		"Link Up" },
+	{ IBMVFC_AE_LINK_DOWN,		"Link Down" },
+	{ IBMVFC_AE_LINK_DEAD,		"Link Dead" },
+	{ IBMVFC_AE_HALT,			"Halt" },
+	{ IBMVFC_AE_RESUME,		"Resume" },
+	{ IBMVFC_AE_ADAPTER_FAILED,	"Adapter Failed" },
+};
+
+static const char *unknown_ae = "Unknown async";
+
+/**
+ * ibmvfc_get_ae_desc - Get text description for async event
+ * @ae:	async event
+ *
+ **/
+static const char *ibmvfc_get_ae_desc(u64 ae)
+{
+	int i;
+
+	for (i = 0; i < ARRAY_SIZE(ae_desc); i++)
+		if (ae_desc[i].ae == ae)
+			return ae_desc[i].desc;
+
+	return unknown_ae;
+}
+
+/**
+ * ibmvfc_handle_async - Handle an async event from the adapter
+ * @crq:	crq to process
+ * @vhost:	ibmvfc host struct
+ *
+ **/
+static void ibmvfc_handle_async(struct ibmvfc_async_crq *crq,
+				struct ibmvfc_host *vhost)
+{
+	const char *desc = ibmvfc_get_ae_desc(crq->event);
+
+	ibmvfc_log(vhost, 2, "%s event received\n", desc);
+
+	switch (crq->event) {
+	case IBMVFC_AE_LINK_UP:
+	case IBMVFC_AE_RESUME:
+		vhost->events_to_log |= IBMVFC_AE_LINKUP;
+		ibmvfc_init_host(vhost);
+		break;
+	case IBMVFC_AE_SCN_FABRIC:
+		vhost->events_to_log |= IBMVFC_AE_RSCN;
+		ibmvfc_init_host(vhost);
+		break;
+	case IBMVFC_AE_SCN_NPORT:
+	case IBMVFC_AE_SCN_GROUP:
+	case IBMVFC_AE_SCN_DOMAIN:
+		vhost->events_to_log |= IBMVFC_AE_RSCN;
+	case IBMVFC_AE_ELS_LOGO:
+	case IBMVFC_AE_ELS_PRLO:
+	case IBMVFC_AE_ELS_PLOGI:
+		ibmvfc_reinit_host(vhost);
+		break;
+	case IBMVFC_AE_LINK_DOWN:
+	case IBMVFC_AE_ADAPTER_FAILED:
+		ibmvfc_link_down(vhost, IBMVFC_LINK_DOWN);
+		break;
+	case IBMVFC_AE_LINK_DEAD:
+		ibmvfc_link_down(vhost, IBMVFC_LINK_DEAD);
+		break;
+	case IBMVFC_AE_HALT:
+		ibmvfc_link_down(vhost, IBMVFC_HALTED);
+		break;
+	default:
+		dev_err(vhost->dev, "Unknown async event received: %ld\n", crq->event);
+		break;
+	};
+}
+
+/**
+ * ibmvfc_handle_crq - Handles and frees received events in the CRQ
+ * @crq:	Command/Response queue
+ * @vhost:	ibmvfc host struct
+ *
+ **/
+static void ibmvfc_handle_crq(struct ibmvfc_crq *crq, struct ibmvfc_host *vhost)
+{
+	long rc;
+	struct ibmvfc_event *evt = (struct ibmvfc_event *)crq->ioba;
+
+	switch (crq->valid) {
+	case IBMVFC_CRQ_INIT_RSP:
+		switch (crq->format) {
+		case IBMVFC_CRQ_INIT:
+			dev_info(vhost->dev, "Partner initialized\n");
+			/* Send back a response */
+			rc = ibmvfc_send_crq_init_complete(vhost);
+			if (rc == 0)
+				ibmvfc_init_host(vhost);
+			else
+				dev_err(vhost->dev, "Unable to send init rsp. rc=%ld\n", rc);
+			break;
+		case IBMVFC_CRQ_INIT_COMPLETE:
+			dev_info(vhost->dev, "Partner initialization complete\n");
+			ibmvfc_init_host(vhost);
+			break;
+		default:
+			dev_err(vhost->dev, "Unknown crq message type: %d\n", crq->format);
+		}
+		return;
+	case IBMVFC_CRQ_XPORT_EVENT:
+		vhost->state = IBMVFC_NO_CRQ;
+		ibmvfc_set_host_action(vhost, IBMVFC_HOST_ACTION_NONE);
+		if (crq->format == IBMVFC_PARTITION_MIGRATED) {
+			/* We need to re-setup the interpartition connection */
+			dev_info(vhost->dev, "Re-enabling adapter\n");
+			vhost->client_migrated = 1;
+			ibmvfc_purge_requests(vhost, DID_REQUEUE);
+			if ((rc = ibmvfc_reenable_crq_queue(vhost)) ||
+			    (rc = ibmvfc_send_crq_init(vhost))) {
+				ibmvfc_link_down(vhost, IBMVFC_LINK_DEAD);
+				dev_err(vhost->dev, "Error after enable (rc=%ld)\n", rc);
+			} else
+				ibmvfc_link_down(vhost, IBMVFC_LINK_DOWN);
+		} else {
+			dev_err(vhost->dev, "Virtual adapter failed (rc=%d)\n", crq->format);
+
+			ibmvfc_purge_requests(vhost, DID_ERROR);
+			if ((rc = ibmvfc_reset_crq(vhost)) ||
+			    (rc = ibmvfc_send_crq_init(vhost))) {
+				ibmvfc_link_down(vhost, IBMVFC_LINK_DEAD);
+				dev_err(vhost->dev, "Error after reset (rc=%ld)\n", rc);
+			} else
+				ibmvfc_link_down(vhost, IBMVFC_LINK_DOWN);
+		}
+		return;
+	case IBMVFC_CRQ_CMD_RSP:
+		break;
+	default:
+		dev_err(vhost->dev, "Got an invalid message type 0x%02x\n", crq->valid);
+		return;
+	}
+
+	if (crq->format == IBMVFC_ASYNC_EVENT)
+		return;
+
+	/* The only kind of payload CRQs we should get are responses to
+	 * things we send. Make sure this response is to something we
+	 * actually sent
+	 */
+	if (unlikely(!ibmvfc_valid_event(&vhost->pool, evt))) {
+		dev_err(vhost->dev, "Returned correlation_token 0x%08lx is invalid!\n",
+			crq->ioba);
+		return;
+	}
+
+	if (unlikely(atomic_read(&evt->free))) {
+		dev_err(vhost->dev, "Received duplicate correlation_token 0x%08lx!\n",
+			crq->ioba);
+		return;
+	}
+
+	del_timer(&evt->timer);
+	list_del(&evt->queue);
+	ibmvfc_trc_end(evt);
+	evt->done(evt);
+}
+
+/**
+ * ibmvfc_scan_finished - Check if the device scan is done.
+ * @shost:	scsi host struct
+ * @time:	current elapsed time
+ *
+ * Returns:
+ *	0 if scan is not done / 1 if scan is done
+ **/
+static int ibmvfc_scan_finished(struct Scsi_Host *shost, unsigned long time)
+{
+	unsigned long flags;
+	struct ibmvfc_host *vhost = shost_priv(shost);
+	int done = 0;
+
+	spin_lock_irqsave(shost->host_lock, flags);
+	if (time >= (init_timeout * HZ)) {
+		dev_info(vhost->dev, "Scan taking longer than %d seconds, "
+			 "continuing initialization\n", init_timeout);
+		done = 1;
+	}
+
+	if (vhost->state != IBMVFC_NO_CRQ && vhost->action == IBMVFC_HOST_ACTION_NONE)
+		done = 1;
+	spin_unlock_irqrestore(shost->host_lock, flags);
+	return done;
+}
+
+/**
+ * ibmvfc_slave_alloc - Setup the device's task set value
+ * @sdev:	struct scsi_device device to configure
+ *
+ * Set the device's task set value so that error handling works as
+ * expected.
+ *
+ * Returns:
+ *	0 on success / -ENXIO if device does not exist
+ **/
+static int ibmvfc_slave_alloc(struct scsi_device *sdev)
+{
+	struct Scsi_Host *shost = sdev->host;
+	struct fc_rport *rport = starget_to_rport(scsi_target(sdev));
+	struct ibmvfc_host *vhost = shost_priv(shost);
+	unsigned long flags = 0;
+
+	if (!rport || fc_remote_port_chkready(rport))
+		return -ENXIO;
+
+	spin_lock_irqsave(shost->host_lock, flags);
+	sdev->hostdata = (void *)(unsigned long)vhost->task_set++;
+	spin_unlock_irqrestore(shost->host_lock, flags);
+	return 0;
+}
+
+/**
+ * ibmvfc_slave_configure - Configure the device
+ * @sdev:	struct scsi_device device to configure
+ *
+ * Enable allow_restart for a device if it is a disk. Adjust the
+ * queue_depth here also.
+ *
+ * Returns:
+ *	0
+ **/
+static int ibmvfc_slave_configure(struct scsi_device *sdev)
+{
+	struct Scsi_Host *shost = sdev->host;
+	struct fc_rport *rport = starget_to_rport(sdev->sdev_target);
+	unsigned long flags = 0;
+
+	spin_lock_irqsave(shost->host_lock, flags);
+	if (sdev->type == TYPE_DISK)
+		sdev->allow_restart = 1;
+
+	if (sdev->tagged_supported) {
+		scsi_set_tag_type(sdev, MSG_SIMPLE_TAG);
+		scsi_activate_tcq(sdev, sdev->queue_depth);
+	} else
+		scsi_deactivate_tcq(sdev, sdev->queue_depth);
+
+	rport->dev_loss_tmo = dev_loss_tmo;
+	spin_unlock_irqrestore(shost->host_lock, flags);
+	return 0;
+}
+
+/**
+ * ibmvfc_change_queue_depth - Change the device's queue depth
+ * @sdev:	scsi device struct
+ * @qdepth:	depth to set
+ *
+ * Return value:
+ * 	actual depth set
+ **/
+static int ibmvfc_change_queue_depth(struct scsi_device *sdev, int qdepth)
+{
+	if (qdepth > IBMVFC_MAX_CMDS_PER_LUN)
+		qdepth = IBMVFC_MAX_CMDS_PER_LUN;
+
+	scsi_adjust_queue_depth(sdev, 0, qdepth);
+	return sdev->queue_depth;
+}
+
+/**
+ * ibmvfc_change_queue_type - Change the device's queue type
+ * @sdev:		scsi device struct
+ * @tag_type:	type of tags to use
+ *
+ * Return value:
+ * 	actual queue type set
+ **/
+static int ibmvfc_change_queue_type(struct scsi_device *sdev, int tag_type)
+{
+	if (sdev->tagged_supported) {
+		scsi_set_tag_type(sdev, tag_type);
+
+		if (tag_type)
+			scsi_activate_tcq(sdev, sdev->queue_depth);
+		else
+			scsi_deactivate_tcq(sdev, sdev->queue_depth);
+	} else
+		tag_type = 0;
+
+	return tag_type;
+}
+
+static ssize_t ibmvfc_show_host_partition_name(struct device *dev,
+						 struct device_attribute *attr, char *buf)
+{
+	struct Scsi_Host *shost = class_to_shost(dev);
+	struct ibmvfc_host *vhost = shost_priv(shost);
+
+	return snprintf(buf, PAGE_SIZE, "%s\n",
+			vhost->login_buf->resp.partition_name);
+}
+
+static struct device_attribute ibmvfc_host_partition_name = {
+	.attr = {
+		.name = "partition_name",
+		.mode = S_IRUGO,
+	},
+	.show = ibmvfc_show_host_partition_name,
+};
+
+static ssize_t ibmvfc_show_host_device_name(struct device *dev,
+					    struct device_attribute *attr, char *buf)
+{
+	struct Scsi_Host *shost = class_to_shost(dev);
+	struct ibmvfc_host *vhost = shost_priv(shost);
+
+	return snprintf(buf, PAGE_SIZE, "%s\n",
+			vhost->login_buf->resp.device_name);
+}
+
+static struct device_attribute ibmvfc_host_device_name = {
+	.attr = {
+		.name = "device_name",
+		.mode = S_IRUGO,
+	},
+	.show = ibmvfc_show_host_device_name,
+};
+
+static ssize_t ibmvfc_show_host_loc_code(struct device *dev,
+					 struct device_attribute *attr, char *buf)
+{
+	struct Scsi_Host *shost = class_to_shost(dev);
+	struct ibmvfc_host *vhost = shost_priv(shost);
+
+	return snprintf(buf, PAGE_SIZE, "%s\n",
+			vhost->login_buf->resp.port_loc_code);
+}
+
+static struct device_attribute ibmvfc_host_loc_code = {
+	.attr = {
+		.name = "port_loc_code",
+		.mode = S_IRUGO,
+	},
+	.show = ibmvfc_show_host_loc_code,
+};
+
+static ssize_t ibmvfc_show_host_drc_name(struct device *dev,
+					 struct device_attribute *attr, char *buf)
+{
+	struct Scsi_Host *shost = class_to_shost(dev);
+	struct ibmvfc_host *vhost = shost_priv(shost);
+
+	return snprintf(buf, PAGE_SIZE, "%s\n",
+			vhost->login_buf->resp.drc_name);
+}
+
+static struct device_attribute ibmvfc_host_drc_name = {
+	.attr = {
+		.name = "drc_name",
+		.mode = S_IRUGO,
+	},
+	.show = ibmvfc_show_host_drc_name,
+};
+
+static ssize_t ibmvfc_show_host_npiv_version(struct device *dev,
+					     struct device_attribute *attr, char *buf)
+{
+	struct Scsi_Host *shost = class_to_shost(dev);
+	struct ibmvfc_host *vhost = shost_priv(shost);
+	return snprintf(buf, PAGE_SIZE, "%d\n", vhost->login_buf->resp.version);
+}
+
+static struct device_attribute ibmvfc_host_npiv_version = {
+	.attr = {
+		.name = "npiv_version",
+		.mode = S_IRUGO,
+	},
+	.show = ibmvfc_show_host_npiv_version,
+};
+
+/**
+ * ibmvfc_show_log_level - Show the adapter's error logging level
+ * @dev:	class device struct
+ * @buf:	buffer
+ *
+ * Return value:
+ * 	number of bytes printed to buffer
+ **/
+static ssize_t ibmvfc_show_log_level(struct device *dev,
+				     struct device_attribute *attr, char *buf)
+{
+	struct Scsi_Host *shost = class_to_shost(dev);
+	struct ibmvfc_host *vhost = shost_priv(shost);
+	unsigned long flags = 0;
+	int len;
+
+	spin_lock_irqsave(shost->host_lock, flags);
+	len = snprintf(buf, PAGE_SIZE, "%d\n", vhost->log_level);
+	spin_unlock_irqrestore(shost->host_lock, flags);
+	return len;
+}
+
+/**
+ * ibmvfc_store_log_level - Change the adapter's error logging level
+ * @dev:	class device struct
+ * @buf:	buffer
+ *
+ * Return value:
+ * 	number of bytes printed to buffer
+ **/
+static ssize_t ibmvfc_store_log_level(struct device *dev,
+				      struct device_attribute *attr,
+				      const char *buf, size_t count)
+{
+	struct Scsi_Host *shost = class_to_shost(dev);
+	struct ibmvfc_host *vhost = shost_priv(shost);
+	unsigned long flags = 0;
+
+	spin_lock_irqsave(shost->host_lock, flags);
+	vhost->log_level = simple_strtoul(buf, NULL, 10);
+	spin_unlock_irqrestore(shost->host_lock, flags);
+	return strlen(buf);
+}
+
+static struct device_attribute ibmvfc_log_level_attr = {
+	.attr = {
+		.name =		"log_level",
+		.mode =		S_IRUGO | S_IWUSR,
+	},
+	.show = ibmvfc_show_log_level,
+	.store = ibmvfc_store_log_level
+};
+
+#ifdef CONFIG_SCSI_IBMVFC_TRACE
+/**
+ * ibmvfc_read_trace - Dump the adapter trace
+ * @kobj:		kobject struct
+ * @bin_attr:	bin_attribute struct
+ * @buf:		buffer
+ * @off:		offset
+ * @count:		buffer size
+ *
+ * Return value:
+ *	number of bytes printed to buffer
+ **/
+static ssize_t ibmvfc_read_trace(struct kobject *kobj,
+				 struct bin_attribute *bin_attr,
+				 char *buf, loff_t off, size_t count)
+{
+	struct device *dev = container_of(kobj, struct device, kobj);
+	struct Scsi_Host *shost = class_to_shost(dev);
+	struct ibmvfc_host *vhost = shost_priv(shost);
+	unsigned long flags = 0;
+	int size = IBMVFC_TRACE_SIZE;
+	char *src = (char *)vhost->trace;
+
+	if (off > size)
+		return 0;
+	if (off + count > size) {
+		size -= off;
+		count = size;
+	}
+
+	spin_lock_irqsave(shost->host_lock, flags);
+	memcpy(buf, &src[off], count);
+	spin_unlock_irqrestore(shost->host_lock, flags);
+	return count;
+}
+
+static struct bin_attribute ibmvfc_trace_attr = {
+	.attr =	{
+		.name = "trace",
+		.mode = S_IRUGO,
+	},
+	.size = 0,
+	.read = ibmvfc_read_trace,
+};
+#endif
+
+static struct device_attribute *ibmvfc_attrs[] = {
+	&ibmvfc_host_partition_name,
+	&ibmvfc_host_device_name,
+	&ibmvfc_host_loc_code,
+	&ibmvfc_host_drc_name,
+	&ibmvfc_host_npiv_version,
+	&ibmvfc_log_level_attr,
+	NULL
+};
+
+static struct scsi_host_template driver_template = {
+	.module = THIS_MODULE,
+	.name = "IBM POWER Virtual FC Adapter",
+	.proc_name = IBMVFC_NAME,
+	.queuecommand = ibmvfc_queuecommand,
+	.eh_abort_handler = ibmvfc_eh_abort_handler,
+	.eh_device_reset_handler = ibmvfc_eh_device_reset_handler,
+	.eh_target_reset_handler = ibmvfc_eh_target_reset_handler,
+	.eh_host_reset_handler = ibmvfc_eh_host_reset_handler,
+	.slave_alloc = ibmvfc_slave_alloc,
+	.slave_configure = ibmvfc_slave_configure,
+	.scan_finished = ibmvfc_scan_finished,
+	.change_queue_depth = ibmvfc_change_queue_depth,
+	.change_queue_type = ibmvfc_change_queue_type,
+	.cmd_per_lun = 16,
+	.can_queue = IBMVFC_MAX_REQUESTS_DEFAULT,
+	.this_id = -1,
+	.sg_tablesize = SG_ALL,
+	.max_sectors = IBMVFC_MAX_SECTORS,
+	.use_clustering = ENABLE_CLUSTERING,
+	.shost_attrs = ibmvfc_attrs,
+};
+
+/**
+ * ibmvfc_next_async_crq - Returns the next entry in async queue
+ * @vhost:	ibmvfc host struct
+ *
+ * Returns:
+ *	Pointer to next entry in queue / NULL if empty
+ **/
+static struct ibmvfc_async_crq *ibmvfc_next_async_crq(struct ibmvfc_host *vhost)
+{
+	struct ibmvfc_async_crq_queue *async_crq = &vhost->async_crq;
+	struct ibmvfc_async_crq *crq;
+
+	crq = &async_crq->msgs[async_crq->cur];
+	if (crq->valid & 0x80) {
+		if (++async_crq->cur == async_crq->size)
+			async_crq->cur = 0;
+	} else
+		crq = NULL;
+
+	return crq;
+}
+
+/**
+ * ibmvfc_next_crq - Returns the next entry in message queue
+ * @vhost:	ibmvfc host struct
+ *
+ * Returns:
+ *	Pointer to next entry in queue / NULL if empty
+ **/
+static struct ibmvfc_crq *ibmvfc_next_crq(struct ibmvfc_host *vhost)
+{
+	struct ibmvfc_crq_queue *queue = &vhost->crq;
+	struct ibmvfc_crq *crq;
+
+	crq = &queue->msgs[queue->cur];
+	if (crq->valid & 0x80) {
+		if (++queue->cur == queue->size)
+			queue->cur = 0;
+	} else
+		crq = NULL;
+
+	return crq;
+}
+
+/**
+ * ibmvfc_interrupt - Interrupt handler
+ * @irq:		number of irq to handle, not used
+ * @dev_instance: ibmvfc_host that received interrupt
+ *
+ * Returns:
+ *	IRQ_HANDLED
+ **/
+static irqreturn_t ibmvfc_interrupt(int irq, void *dev_instance)
+{
+	struct ibmvfc_host *vhost = (struct ibmvfc_host *)dev_instance;
+	struct vio_dev *vdev = to_vio_dev(vhost->dev);
+	struct ibmvfc_crq *crq;
+	struct ibmvfc_async_crq *async;
+	unsigned long flags;
+	int done = 0;
+
+	spin_lock_irqsave(vhost->host->host_lock, flags);
+	vio_disable_interrupts(to_vio_dev(vhost->dev));
+	while (!done) {
+		/* Pull all the valid messages off the CRQ */
+		while ((crq = ibmvfc_next_crq(vhost)) != NULL) {
+			ibmvfc_handle_crq(crq, vhost);
+			crq->valid = 0;
+		}
+
+		/* Pull all the valid messages off the async CRQ */
+		while ((async = ibmvfc_next_async_crq(vhost)) != NULL) {
+			ibmvfc_handle_async(async, vhost);
+			async->valid = 0;
+		}
+
+		vio_enable_interrupts(vdev);
+		if ((crq = ibmvfc_next_crq(vhost)) != NULL) {
+			vio_disable_interrupts(vdev);
+			ibmvfc_handle_crq(crq, vhost);
+			crq->valid = 0;
+		} else if ((async = ibmvfc_next_async_crq(vhost)) != NULL) {
+			vio_disable_interrupts(vdev);
+			ibmvfc_handle_async(async, vhost);
+			crq->valid = 0;
+		} else
+			done = 1;
+	}
+
+	spin_unlock_irqrestore(vhost->host->host_lock, flags);
+	return IRQ_HANDLED;
+}
+
+/**
+ * ibmvfc_init_tgt - Set the next init job step for the target
+ * @tgt:		ibmvfc target struct
+ * @job_step:	job step to perform
+ *
+ **/
+static void ibmvfc_init_tgt(struct ibmvfc_target *tgt,
+			    void (*job_step) (struct ibmvfc_target *))
+{
+	ibmvfc_set_tgt_action(tgt, IBMVFC_TGT_ACTION_INIT);
+	tgt->job_step = job_step;
+	wake_up(&tgt->vhost->work_wait_q);
+}
+
+/**
+ * ibmvfc_retry_tgt_init - Attempt to retry a step in target initialization
+ * @tgt:		ibmvfc target struct
+ * @job_step:	initialization job step
+ *
+ **/
+static void ibmvfc_retry_tgt_init(struct ibmvfc_target *tgt,
+				  void (*job_step) (struct ibmvfc_target *))
+{
+	if (++tgt->init_retries > IBMVFC_MAX_INIT_RETRIES) {
+		ibmvfc_set_tgt_action(tgt, IBMVFC_TGT_ACTION_DEL_RPORT);
+		wake_up(&tgt->vhost->work_wait_q);
+	} else
+		ibmvfc_init_tgt(tgt, job_step);
+}
+
+/**
+ * ibmvfc_release_tgt - Free memory allocated for a target
+ * @kref:		kref struct
+ *
+ **/
+static void ibmvfc_release_tgt(struct kref *kref)
+{
+	struct ibmvfc_target *tgt = container_of(kref, struct ibmvfc_target, kref);
+	kfree(tgt);
+}
+
+/**
+ * ibmvfc_tgt_prli_done - Completion handler for Process Login
+ * @evt:	ibmvfc event struct
+ *
+ **/
+static void ibmvfc_tgt_prli_done(struct ibmvfc_event *evt)
+{
+	struct ibmvfc_target *tgt = evt->tgt;
+	struct ibmvfc_host *vhost = evt->vhost;
+	struct ibmvfc_process_login *rsp = &evt->xfer_iu->prli;
+	u32 status = rsp->common.status;
+
+	vhost->discovery_threads--;
+	ibmvfc_set_tgt_action(tgt, IBMVFC_TGT_ACTION_NONE);
+	switch (status) {
+	case IBMVFC_MAD_SUCCESS:
+		tgt_dbg(tgt, "Process Login succeeded\n");
+		tgt->need_login = 0;
+		ibmvfc_set_tgt_action(tgt, IBMVFC_TGT_ACTION_ADD_RPORT);
+		break;
+	case IBMVFC_MAD_DRIVER_FAILED:
+		break;
+	case IBMVFC_MAD_CRQ_ERROR:
+		ibmvfc_retry_tgt_init(tgt, ibmvfc_tgt_send_prli);
+		break;
+	case IBMVFC_MAD_FAILED:
+	default:
+		tgt_err(tgt, "Process Login failed: %s (%x:%x) rc=0x%02X\n",
+			ibmvfc_get_cmd_error(rsp->status, rsp->error),
+			rsp->status, rsp->error, status);
+		if (ibmvfc_retry_cmd(rsp->status, rsp->error))
+			ibmvfc_retry_tgt_init(tgt, ibmvfc_tgt_send_prli);
+		break;
+	};
+
+	kref_put(&tgt->kref, ibmvfc_release_tgt);
+	ibmvfc_free_event(evt);
+	wake_up(&vhost->work_wait_q);
+}
+
+/**
+ * ibmvfc_tgt_send_prli - Send a process login
+ * @tgt:	ibmvfc target struct
+ *
+ **/
+static void ibmvfc_tgt_send_prli(struct ibmvfc_target *tgt)
+{
+	struct ibmvfc_process_login *prli;
+	struct ibmvfc_host *vhost = tgt->vhost;
+	struct ibmvfc_event *evt;
+
+	if (vhost->discovery_threads >= disc_threads)
+		return;
+
+	kref_get(&tgt->kref);
+	evt = ibmvfc_get_event(vhost);
+	vhost->discovery_threads++;
+	ibmvfc_init_event(evt, ibmvfc_tgt_prli_done, IBMVFC_MAD_FORMAT);
+	evt->tgt = tgt;
+	prli = &evt->iu.prli;
+	memset(prli, 0, sizeof(*prli));
+	prli->common.version = 1;
+	prli->common.opcode = IBMVFC_PROCESS_LOGIN;
+	prli->common.length = sizeof(*prli);
+	prli->scsi_id = tgt->scsi_id;
+
+	prli->parms.type = IBMVFC_SCSI_FCP_TYPE;
+	prli->parms.flags = IBMVFC_PRLI_EST_IMG_PAIR;
+	prli->parms.service_parms = IBMVFC_PRLI_INITIATOR_FUNC;
+
+	ibmvfc_set_tgt_action(tgt, IBMVFC_TGT_ACTION_INIT_WAIT);
+	if (ibmvfc_send_event(evt, vhost, default_timeout)) {
+		vhost->discovery_threads--;
+		ibmvfc_set_tgt_action(tgt, IBMVFC_TGT_ACTION_NONE);
+		kref_put(&tgt->kref, ibmvfc_release_tgt);
+	} else
+		tgt_dbg(tgt, "Sent process login\n");
+}
+
+/**
+ * ibmvfc_tgt_plogi_done - Completion handler for Port Login
+ * @evt:	ibmvfc event struct
+ *
+ **/
+static void ibmvfc_tgt_plogi_done(struct ibmvfc_event *evt)
+{
+	struct ibmvfc_target *tgt = evt->tgt;
+	struct ibmvfc_host *vhost = evt->vhost;
+	struct ibmvfc_port_login *rsp = &evt->xfer_iu->plogi;
+	u32 status = rsp->common.status;
+
+	vhost->discovery_threads--;
+	ibmvfc_set_tgt_action(tgt, IBMVFC_TGT_ACTION_NONE);
+	switch (status) {
+	case IBMVFC_MAD_SUCCESS:
+		tgt_dbg(tgt, "Port Login succeeded\n");
+		if (tgt->ids.port_name &&
+		    tgt->ids.port_name != wwn_to_u64(rsp->service_parms.port_name)) {
+			vhost->reinit = 1;
+			tgt_dbg(tgt, "Port re-init required\n");
+			break;
+		}
+		tgt->ids.node_name = wwn_to_u64(rsp->service_parms.node_name);
+		tgt->ids.port_name = wwn_to_u64(rsp->service_parms.port_name);
+		tgt->ids.port_id = tgt->scsi_id;
+		tgt->ids.roles = FC_PORT_ROLE_FCP_TARGET;
+		memcpy(&tgt->service_parms, &rsp->service_parms,
+		       sizeof(tgt->service_parms));
+		memcpy(&tgt->service_parms_change, &rsp->service_parms_change,
+		       sizeof(tgt->service_parms_change));
+		ibmvfc_init_tgt(tgt, ibmvfc_tgt_send_prli);
+		break;
+	case IBMVFC_MAD_DRIVER_FAILED:
+		break;
+	case IBMVFC_MAD_CRQ_ERROR:
+		ibmvfc_retry_tgt_init(tgt, ibmvfc_tgt_send_plogi);
+		break;
+	case IBMVFC_MAD_FAILED:
+	default:
+		tgt_err(tgt, "Port Login failed: %s (%x:%x) %s (%x) %s (%x) rc=0x%02X\n",
+			ibmvfc_get_cmd_error(rsp->status, rsp->error), rsp->status, rsp->error,
+			ibmvfc_get_fc_type(rsp->fc_type), rsp->fc_type,
+			ibmvfc_get_ls_explain(rsp->fc_explain), rsp->fc_explain, status);
+
+		if (ibmvfc_retry_cmd(rsp->status, rsp->error))
+			ibmvfc_retry_tgt_init(tgt, ibmvfc_tgt_send_plogi);
+		break;
+	};
+
+	kref_put(&tgt->kref, ibmvfc_release_tgt);
+	ibmvfc_free_event(evt);
+	wake_up(&vhost->work_wait_q);
+}
+
+/**
+ * ibmvfc_tgt_send_plogi - Send PLOGI to the specified target
+ * @tgt:	ibmvfc target struct
+ *
+ **/
+static void ibmvfc_tgt_send_plogi(struct ibmvfc_target *tgt)
+{
+	struct ibmvfc_port_login *plogi;
+	struct ibmvfc_host *vhost = tgt->vhost;
+	struct ibmvfc_event *evt;
+
+	if (vhost->discovery_threads >= disc_threads)
+		return;
+
+	kref_get(&tgt->kref);
+	evt = ibmvfc_get_event(vhost);
+	vhost->discovery_threads++;
+	ibmvfc_set_tgt_action(tgt, IBMVFC_TGT_ACTION_INIT_WAIT);
+	ibmvfc_init_event(evt, ibmvfc_tgt_plogi_done, IBMVFC_MAD_FORMAT);
+	evt->tgt = tgt;
+	plogi = &evt->iu.plogi;
+	memset(plogi, 0, sizeof(*plogi));
+	plogi->common.version = 1;
+	plogi->common.opcode = IBMVFC_PORT_LOGIN;
+	plogi->common.length = sizeof(*plogi);
+	plogi->scsi_id = tgt->scsi_id;
+
+	if (ibmvfc_send_event(evt, vhost, default_timeout)) {
+		vhost->discovery_threads--;
+		ibmvfc_set_tgt_action(tgt, IBMVFC_TGT_ACTION_NONE);
+		kref_put(&tgt->kref, ibmvfc_release_tgt);
+	} else
+		tgt_dbg(tgt, "Sent port login\n");
+}
+
+/**
+ * ibmvfc_tgt_implicit_logout_done - Completion handler for Implicit Logout MAD
+ * @evt:	ibmvfc event struct
+ *
+ **/
+static void ibmvfc_tgt_implicit_logout_done(struct ibmvfc_event *evt)
+{
+	struct ibmvfc_target *tgt = evt->tgt;
+	struct ibmvfc_host *vhost = evt->vhost;
+	struct ibmvfc_implicit_logout *rsp = &evt->xfer_iu->implicit_logout;
+	u32 status = rsp->common.status;
+
+	vhost->discovery_threads--;
+	ibmvfc_free_event(evt);
+	ibmvfc_set_tgt_action(tgt, IBMVFC_TGT_ACTION_NONE);
+
+	switch (status) {
+	case IBMVFC_MAD_SUCCESS:
+		tgt_dbg(tgt, "Implicit Logout succeeded\n");
+		break;
+	case IBMVFC_MAD_DRIVER_FAILED:
+		kref_put(&tgt->kref, ibmvfc_release_tgt);
+		wake_up(&vhost->work_wait_q);
+		return;
+	case IBMVFC_MAD_FAILED:
+	default:
+		tgt_err(tgt, "Implicit Logout failed: rc=0x%02X\n", status);
+		break;
+	};
+
+	if (vhost->action == IBMVFC_HOST_ACTION_TGT_INIT)
+		ibmvfc_init_tgt(tgt, ibmvfc_tgt_send_plogi);
+	else if (vhost->action == IBMVFC_HOST_ACTION_QUERY_TGTS &&
+		 tgt->scsi_id != tgt->new_scsi_id)
+		ibmvfc_set_tgt_action(tgt, IBMVFC_TGT_ACTION_DEL_RPORT);
+	kref_put(&tgt->kref, ibmvfc_release_tgt);
+	wake_up(&vhost->work_wait_q);
+}
+
+/**
+ * ibmvfc_tgt_implicit_logout - Initiate an Implicit Logout for specified target
+ * @tgt:		ibmvfc target struct
+ *
+ **/
+static void ibmvfc_tgt_implicit_logout(struct ibmvfc_target *tgt)
+{
+	struct ibmvfc_implicit_logout *mad;
+	struct ibmvfc_host *vhost = tgt->vhost;
+	struct ibmvfc_event *evt;
+
+	if (vhost->discovery_threads >= disc_threads)
+		return;
+
+	kref_get(&tgt->kref);
+	evt = ibmvfc_get_event(vhost);
+	vhost->discovery_threads++;
+	ibmvfc_init_event(evt, ibmvfc_tgt_implicit_logout_done, IBMVFC_MAD_FORMAT);
+	evt->tgt = tgt;
+	mad = &evt->iu.implicit_logout;
+	memset(mad, 0, sizeof(*mad));
+	mad->common.version = 1;
+	mad->common.opcode = IBMVFC_IMPLICIT_LOGOUT;
+	mad->common.length = sizeof(*mad);
+	mad->old_scsi_id = tgt->scsi_id;
+
+	ibmvfc_set_tgt_action(tgt, IBMVFC_TGT_ACTION_INIT_WAIT);
+	if (ibmvfc_send_event(evt, vhost, default_timeout)) {
+		vhost->discovery_threads--;
+		ibmvfc_set_tgt_action(tgt, IBMVFC_TGT_ACTION_NONE);
+		kref_put(&tgt->kref, ibmvfc_release_tgt);
+	} else
+		tgt_dbg(tgt, "Sent Implicit Logout\n");
+}
+
+/**
+ * ibmvfc_tgt_query_target_done - Completion handler for Query Target MAD
+ * @evt:	ibmvfc event struct
+ *
+ **/
+static void ibmvfc_tgt_query_target_done(struct ibmvfc_event *evt)
+{
+	struct ibmvfc_target *tgt = evt->tgt;
+	struct ibmvfc_host *vhost = evt->vhost;
+	struct ibmvfc_query_tgt *rsp = &evt->xfer_iu->query_tgt;
+	u32 status = rsp->common.status;
+
+	vhost->discovery_threads--;
+	ibmvfc_set_tgt_action(tgt, IBMVFC_TGT_ACTION_NONE);
+	switch (status) {
+	case IBMVFC_MAD_SUCCESS:
+		tgt_dbg(tgt, "Query Target succeeded\n");
+		tgt->new_scsi_id = rsp->scsi_id;
+		if (rsp->scsi_id != tgt->scsi_id)
+			ibmvfc_init_tgt(tgt, ibmvfc_tgt_implicit_logout);
+		break;
+	case IBMVFC_MAD_DRIVER_FAILED:
+		break;
+	case IBMVFC_MAD_CRQ_ERROR:
+		ibmvfc_retry_tgt_init(tgt, ibmvfc_tgt_query_target);
+		break;
+	case IBMVFC_MAD_FAILED:
+	default:
+		tgt_err(tgt, "Query Target failed: %s (%x:%x) %s (%x) %s (%x) rc=0x%02X\n",
+			ibmvfc_get_cmd_error(rsp->status, rsp->error), rsp->status, rsp->error,
+			ibmvfc_get_fc_type(rsp->fc_type), rsp->fc_type,
+			ibmvfc_get_gs_explain(rsp->fc_explain), rsp->fc_explain, status);
+
+		if ((rsp->status & IBMVFC_FABRIC_MAPPED) == IBMVFC_FABRIC_MAPPED &&
+		    rsp->error == IBMVFC_UNABLE_TO_PERFORM_REQ &&
+		    rsp->fc_explain == IBMVFC_PORT_NAME_NOT_REG)
+			ibmvfc_set_tgt_action(tgt, IBMVFC_TGT_ACTION_DEL_RPORT);
+		else if (ibmvfc_retry_cmd(rsp->status, rsp->error))
+			ibmvfc_retry_tgt_init(tgt, ibmvfc_tgt_query_target);
+		break;
+	};
+
+	kref_put(&tgt->kref, ibmvfc_release_tgt);
+	ibmvfc_free_event(evt);
+	wake_up(&vhost->work_wait_q);
+}
+
+/**
+ * ibmvfc_tgt_query_target - Initiate a Query Target for specified target
+ * @tgt:	ibmvfc target struct
+ *
+ **/
+static void ibmvfc_tgt_query_target(struct ibmvfc_target *tgt)
+{
+	struct ibmvfc_query_tgt *query_tgt;
+	struct ibmvfc_host *vhost = tgt->vhost;
+	struct ibmvfc_event *evt;
+
+	if (vhost->discovery_threads >= disc_threads)
+		return;
+
+	kref_get(&tgt->kref);
+	evt = ibmvfc_get_event(vhost);
+	vhost->discovery_threads++;
+	evt->tgt = tgt;
+	ibmvfc_init_event(evt, ibmvfc_tgt_query_target_done, IBMVFC_MAD_FORMAT);
+	query_tgt = &evt->iu.query_tgt;
+	memset(query_tgt, 0, sizeof(*query_tgt));
+	query_tgt->common.version = 1;
+	query_tgt->common.opcode = IBMVFC_QUERY_TARGET;
+	query_tgt->common.length = sizeof(*query_tgt);
+	query_tgt->wwpn = tgt->ids.port_name;
+
+	ibmvfc_set_tgt_action(tgt, IBMVFC_TGT_ACTION_INIT_WAIT);
+	if (ibmvfc_send_event(evt, vhost, default_timeout)) {
+		vhost->discovery_threads--;
+		ibmvfc_set_tgt_action(tgt, IBMVFC_TGT_ACTION_NONE);
+		kref_put(&tgt->kref, ibmvfc_release_tgt);
+	} else
+		tgt_dbg(tgt, "Sent Query Target\n");
+}
+
+/**
+ * ibmvfc_alloc_target - Allocate and initialize an ibmvfc target
+ * @vhost:		ibmvfc host struct
+ * @scsi_id:	SCSI ID to allocate target for
+ *
+ * Returns:
+ *	0 on success / other on failure
+ **/
+static int ibmvfc_alloc_target(struct ibmvfc_host *vhost, u64 scsi_id)
+{
+	struct ibmvfc_target *tgt;
+	unsigned long flags;
+
+	spin_lock_irqsave(vhost->host->host_lock, flags);
+	list_for_each_entry(tgt, &vhost->targets, queue) {
+		if (tgt->scsi_id == scsi_id) {
+			if (tgt->need_login)
+				ibmvfc_init_tgt(tgt, ibmvfc_tgt_implicit_logout);
+			goto unlock_out;
+		}
+	}
+	spin_unlock_irqrestore(vhost->host->host_lock, flags);
+
+	tgt = mempool_alloc(vhost->tgt_pool, GFP_KERNEL);
+	if (!tgt) {
+		dev_err(vhost->dev, "Target allocation failure for scsi id %08lx\n",
+			scsi_id);
+		return -ENOMEM;
+	}
+
+	tgt->scsi_id = scsi_id;
+	tgt->new_scsi_id = scsi_id;
+	tgt->vhost = vhost;
+	tgt->need_login = 1;
+	kref_init(&tgt->kref);
+	ibmvfc_init_tgt(tgt, ibmvfc_tgt_implicit_logout);
+	spin_lock_irqsave(vhost->host->host_lock, flags);
+	list_add_tail(&tgt->queue, &vhost->targets);
+
+unlock_out:
+	spin_unlock_irqrestore(vhost->host->host_lock, flags);
+	return 0;
+}
+
+/**
+ * ibmvfc_alloc_targets - Allocate and initialize ibmvfc targets
+ * @vhost:		ibmvfc host struct
+ *
+ * Returns:
+ *	0 on success / other on failure
+ **/
+static int ibmvfc_alloc_targets(struct ibmvfc_host *vhost)
+{
+	int i, rc;
+
+	for (i = 0, rc = 0; !rc && i < vhost->num_targets; i++)
+		rc = ibmvfc_alloc_target(vhost,
+					 vhost->disc_buf->scsi_id[i] & IBMVFC_DISC_TGT_SCSI_ID_MASK);
+
+	return rc;
+}
+
+/**
+ * ibmvfc_discover_targets_done - Completion handler for discover targets MAD
+ * @evt:	ibmvfc event struct
+ *
+ **/
+static void ibmvfc_discover_targets_done(struct ibmvfc_event *evt)
+{
+	struct ibmvfc_host *vhost = evt->vhost;
+	struct ibmvfc_discover_targets *rsp = &evt->xfer_iu->discover_targets;
+	u32 mad_status = rsp->common.status;
+
+	switch (mad_status) {
+	case IBMVFC_MAD_SUCCESS:
+		ibmvfc_dbg(vhost, "Discover Targets succeeded\n");
+		vhost->num_targets = rsp->num_written;
+		ibmvfc_set_host_action(vhost, IBMVFC_HOST_ACTION_ALLOC_TGTS);
+		break;
+	case IBMVFC_MAD_FAILED:
+		dev_err(vhost->dev, "Discover Targets failed: %s (%x:%x)\n",
+			ibmvfc_get_cmd_error(rsp->status, rsp->error), rsp->status, rsp->error);
+		ibmvfc_retry_host_init(vhost);
+		break;
+	case IBMVFC_MAD_DRIVER_FAILED:
+		break;
+	default:
+		dev_err(vhost->dev, "Invalid Discover Targets response: 0x%x\n", mad_status);
+		ibmvfc_link_down(vhost, IBMVFC_LINK_DEAD);
+		break;
+	}
+
+	ibmvfc_free_event(evt);
+	wake_up(&vhost->work_wait_q);
+}
+
+/**
+ * ibmvfc_discover_targets - Send Discover Targets MAD
+ * @vhost:	ibmvfc host struct
+ *
+ **/
+static void ibmvfc_discover_targets(struct ibmvfc_host *vhost)
+{
+	struct ibmvfc_discover_targets *mad;
+	struct ibmvfc_event *evt = ibmvfc_get_event(vhost);
+
+	ibmvfc_init_event(evt, ibmvfc_discover_targets_done, IBMVFC_MAD_FORMAT);
+	mad = &evt->iu.discover_targets;
+	memset(mad, 0, sizeof(*mad));
+	mad->common.version = 1;
+	mad->common.opcode = IBMVFC_DISC_TARGETS;
+	mad->common.length = sizeof(*mad);
+	mad->bufflen = vhost->disc_buf_sz;
+	mad->buffer.va = vhost->disc_buf_dma;
+	mad->buffer.len = vhost->disc_buf_sz;
+	ibmvfc_set_host_action(vhost, IBMVFC_HOST_ACTION_INIT_WAIT);
+
+	if (!ibmvfc_send_event(evt, vhost, default_timeout))
+		ibmvfc_dbg(vhost, "Sent discover targets\n");
+	else
+		ibmvfc_link_down(vhost, IBMVFC_LINK_DEAD);
+}
+
+/**
+ * ibmvfc_npiv_login_done - Completion handler for NPIV Login
+ * @evt:	ibmvfc event struct
+ *
+ **/
+static void ibmvfc_npiv_login_done(struct ibmvfc_event *evt)
+{
+	struct ibmvfc_host *vhost = evt->vhost;
+	u32 mad_status = evt->xfer_iu->npiv_login.common.status;
+	struct ibmvfc_npiv_login_resp *rsp = &vhost->login_buf->resp;
+	unsigned int npiv_max_sectors;
+
+	switch (mad_status) {
+	case IBMVFC_MAD_SUCCESS:
+		ibmvfc_free_event(evt);
+		break;
+	case IBMVFC_MAD_FAILED:
+		dev_err(vhost->dev, "NPIV Login failed: %s (%x:%x)\n",
+			ibmvfc_get_cmd_error(rsp->status, rsp->error), rsp->status, rsp->error);
+		if (ibmvfc_retry_cmd(rsp->status, rsp->error))
+			ibmvfc_retry_host_init(vhost);
+		else
+			ibmvfc_link_down(vhost, IBMVFC_LINK_DEAD);
+		ibmvfc_free_event(evt);
+		return;
+	case IBMVFC_MAD_CRQ_ERROR:
+		ibmvfc_retry_host_init(vhost);
+	case IBMVFC_MAD_DRIVER_FAILED:
+		ibmvfc_free_event(evt);
+		return;
+	default:
+		dev_err(vhost->dev, "Invalid NPIV Login response: 0x%x\n", mad_status);
+		ibmvfc_link_down(vhost, IBMVFC_LINK_DEAD);
+		ibmvfc_free_event(evt);
+		return;
+	}
+
+	vhost->client_migrated = 0;
+
+	if (!(rsp->flags & IBMVFC_NATIVE_FC)) {
+		dev_err(vhost->dev, "Virtual adapter does not support FC. %x\n",
+			rsp->flags);
+		ibmvfc_link_down(vhost, IBMVFC_LINK_DEAD);
+		wake_up(&vhost->work_wait_q);
+		return;
+	}
+
+	if (rsp->max_cmds <= IBMVFC_NUM_INTERNAL_REQ) {
+		dev_err(vhost->dev, "Virtual adapter supported queue depth too small: %d\n",
+			rsp->max_cmds);
+		ibmvfc_link_down(vhost, IBMVFC_LINK_DEAD);
+		wake_up(&vhost->work_wait_q);
+		return;
+	}
+
+	npiv_max_sectors = min((uint)(rsp->max_dma_len >> 9), IBMVFC_MAX_SECTORS);
+	dev_info(vhost->dev, "Host partition: %s, device: %s %s %s max sectors %u\n",
+		 rsp->partition_name, rsp->device_name, rsp->port_loc_code,
+		 rsp->drc_name, npiv_max_sectors);
+
+	fc_host_fabric_name(vhost->host) = rsp->node_name;
+	fc_host_node_name(vhost->host) = rsp->node_name;
+	fc_host_port_name(vhost->host) = rsp->port_name;
+	fc_host_port_id(vhost->host) = rsp->scsi_id;
+	fc_host_port_type(vhost->host) = FC_PORTTYPE_NPIV;
+	fc_host_supported_classes(vhost->host) = 0;
+	if (rsp->service_parms.class1_parms[0] & 0x80000000)
+		fc_host_supported_classes(vhost->host) |= FC_COS_CLASS1;
+	if (rsp->service_parms.class2_parms[0] & 0x80000000)
+		fc_host_supported_classes(vhost->host) |= FC_COS_CLASS2;
+	if (rsp->service_parms.class3_parms[0] & 0x80000000)
+		fc_host_supported_classes(vhost->host) |= FC_COS_CLASS3;
+	fc_host_maxframe_size(vhost->host) =
+		rsp->service_parms.common.bb_rcv_sz & 0x0fff;
+
+	vhost->host->can_queue = rsp->max_cmds - IBMVFC_NUM_INTERNAL_REQ;
+	vhost->host->max_sectors = npiv_max_sectors;
+	ibmvfc_set_host_action(vhost, IBMVFC_HOST_ACTION_QUERY);
+	wake_up(&vhost->work_wait_q);
+}
+
+/**
+ * ibmvfc_npiv_login - Sends NPIV login
+ * @vhost:	ibmvfc host struct
+ *
+ **/
+static void ibmvfc_npiv_login(struct ibmvfc_host *vhost)
+{
+	struct ibmvfc_npiv_login_mad *mad;
+	struct ibmvfc_event *evt = ibmvfc_get_event(vhost);
+
+	ibmvfc_gather_partition_info(vhost);
+	ibmvfc_set_login_info(vhost);
+	ibmvfc_init_event(evt, ibmvfc_npiv_login_done, IBMVFC_MAD_FORMAT);
+
+	memcpy(vhost->login_buf, &vhost->login_info, sizeof(vhost->login_info));
+	mad = &evt->iu.npiv_login;
+	memset(mad, 0, sizeof(struct ibmvfc_npiv_login_mad));
+	mad->common.version = 1;
+	mad->common.opcode = IBMVFC_NPIV_LOGIN;
+	mad->common.length = sizeof(struct ibmvfc_npiv_login_mad);
+	mad->buffer.va = vhost->login_buf_dma;
+	mad->buffer.len = sizeof(*vhost->login_buf);
+
+	memset(vhost->async_crq.msgs, 0, PAGE_SIZE);
+	vhost->async_crq.cur = 0;
+	ibmvfc_set_host_action(vhost, IBMVFC_HOST_ACTION_INIT_WAIT);
+
+	if (!ibmvfc_send_event(evt, vhost, default_timeout))
+		ibmvfc_dbg(vhost, "Sent NPIV login\n");
+	else
+		ibmvfc_link_down(vhost, IBMVFC_LINK_DEAD);
+};
+
+/**
+ * ibmvfc_dev_init_to_do - Is there target initialization work to do?
+ * @vhost:		ibmvfc host struct
+ *
+ * Returns:
+ *	1 if work to do / 0 if not
+ **/
+static int ibmvfc_dev_init_to_do(struct ibmvfc_host *vhost)
+{
+	struct ibmvfc_target *tgt;
+
+	list_for_each_entry(tgt, &vhost->targets, queue) {
+		if (tgt->action == IBMVFC_TGT_ACTION_INIT ||
+		    tgt->action == IBMVFC_TGT_ACTION_INIT_WAIT)
+			return 1;
+	}
+
+	return 0;
+}
+
+/**
+ * __ibmvfc_work_to_do - Is there task level work to do? (no locking)
+ * @vhost:		ibmvfc host struct
+ *
+ * Returns:
+ *	1 if work to do / 0 if not
+ **/
+static int __ibmvfc_work_to_do(struct ibmvfc_host *vhost)
+{
+	struct ibmvfc_target *tgt;
+
+	if (kthread_should_stop())
+		return 1;
+	switch (vhost->action) {
+	case IBMVFC_HOST_ACTION_NONE:
+	case IBMVFC_HOST_ACTION_INIT_WAIT:
+		return 0;
+	case IBMVFC_HOST_ACTION_TGT_INIT:
+	case IBMVFC_HOST_ACTION_QUERY_TGTS:
+		if (vhost->discovery_threads == disc_threads)
+			return 0;
+		list_for_each_entry(tgt, &vhost->targets, queue)
+			if (tgt->action == IBMVFC_TGT_ACTION_INIT)
+				return 1;
+		list_for_each_entry(tgt, &vhost->targets, queue)
+			if (tgt->action == IBMVFC_TGT_ACTION_INIT_WAIT)
+				return 0;
+		return 1;
+	case IBMVFC_HOST_ACTION_INIT:
+	case IBMVFC_HOST_ACTION_ALLOC_TGTS:
+	case IBMVFC_HOST_ACTION_TGT_ADD:
+	case IBMVFC_HOST_ACTION_TGT_DEL:
+	case IBMVFC_HOST_ACTION_QUERY:
+	default:
+		break;
+	};
+
+	return 1;
+}
+
+/**
+ * ibmvfc_work_to_do - Is there task level work to do?
+ * @vhost:		ibmvfc host struct
+ *
+ * Returns:
+ *	1 if work to do / 0 if not
+ **/
+static int ibmvfc_work_to_do(struct ibmvfc_host *vhost)
+{
+	unsigned long flags;
+	int rc;
+
+	spin_lock_irqsave(vhost->host->host_lock, flags);
+	rc = __ibmvfc_work_to_do(vhost);
+	spin_unlock_irqrestore(vhost->host->host_lock, flags);
+	return rc;
+}
+
+/**
+ * ibmvfc_log_ae - Log async events if necessary
+ * @vhost:		ibmvfc host struct
+ * @events:		events to log
+ *
+ **/
+static void ibmvfc_log_ae(struct ibmvfc_host *vhost, int events)
+{
+	if (events & IBMVFC_AE_RSCN)
+		fc_host_post_event(vhost->host, fc_get_event_number(), FCH_EVT_RSCN, 0);
+	if ((events & IBMVFC_AE_LINKDOWN) &&
+	    vhost->state >= IBMVFC_HALTED)
+		fc_host_post_event(vhost->host, fc_get_event_number(), FCH_EVT_LINKDOWN, 0);
+	if ((events & IBMVFC_AE_LINKUP) &&
+	    vhost->state == IBMVFC_INITIALIZING)
+		fc_host_post_event(vhost->host, fc_get_event_number(), FCH_EVT_LINKUP, 0);
+}
+
+/**
+ * ibmvfc_tgt_add_rport - Tell the FC transport about a new remote port
+ * @tgt:		ibmvfc target struct
+ *
+ **/
+static void ibmvfc_tgt_add_rport(struct ibmvfc_target *tgt)
+{
+	struct ibmvfc_host *vhost = tgt->vhost;
+	struct fc_rport *rport;
+	unsigned long flags;
+
+	tgt_dbg(tgt, "Adding rport\n");
+	rport = fc_remote_port_add(vhost->host, 0, &tgt->ids);
+	spin_lock_irqsave(vhost->host->host_lock, flags);
+	tgt->rport = rport;
+	ibmvfc_set_tgt_action(tgt, IBMVFC_TGT_ACTION_NONE);
+	if (rport) {
+		tgt_dbg(tgt, "rport add succeeded\n");
+		rport->maxframe_size = tgt->service_parms.common.bb_rcv_sz & 0x0fff;
+		rport->supported_classes = 0;
+		if (tgt->service_parms.class1_parms[0] & 0x80000000)
+			rport->supported_classes |= FC_COS_CLASS1;
+		if (tgt->service_parms.class2_parms[0] & 0x80000000)
+			rport->supported_classes |= FC_COS_CLASS2;
+		if (tgt->service_parms.class3_parms[0] & 0x80000000)
+			rport->supported_classes |= FC_COS_CLASS3;
+	} else
+		tgt_dbg(tgt, "rport add failed\n");
+	spin_unlock_irqrestore(vhost->host->host_lock, flags);
+}
+
+/**
+ * ibmvfc_do_work - Do task level work
+ * @vhost:		ibmvfc host struct
+ *
+ **/
+static void ibmvfc_do_work(struct ibmvfc_host *vhost)
+{
+	struct ibmvfc_target *tgt;
+	unsigned long flags;
+	struct fc_rport *rport;
+
+	ibmvfc_log_ae(vhost, vhost->events_to_log);
+	spin_lock_irqsave(vhost->host->host_lock, flags);
+	vhost->events_to_log = 0;
+	switch (vhost->action) {
+	case IBMVFC_HOST_ACTION_NONE:
+	case IBMVFC_HOST_ACTION_INIT_WAIT:
+		break;
+	case IBMVFC_HOST_ACTION_INIT:
+		BUG_ON(vhost->state != IBMVFC_INITIALIZING);
+		vhost->job_step(vhost);
+		break;
+	case IBMVFC_HOST_ACTION_QUERY:
+		list_for_each_entry(tgt, &vhost->targets, queue)
+			ibmvfc_init_tgt(tgt, ibmvfc_tgt_query_target);
+		ibmvfc_set_host_action(vhost, IBMVFC_HOST_ACTION_QUERY_TGTS);
+		break;
+	case IBMVFC_HOST_ACTION_QUERY_TGTS:
+		list_for_each_entry(tgt, &vhost->targets, queue) {
+			if (tgt->action == IBMVFC_TGT_ACTION_INIT) {
+				tgt->job_step(tgt);
+				break;
+			}
+		}
+
+		if (!ibmvfc_dev_init_to_do(vhost))
+			ibmvfc_set_host_action(vhost, IBMVFC_HOST_ACTION_TGT_DEL);
+		break;
+	case IBMVFC_HOST_ACTION_TGT_DEL:
+		list_for_each_entry(tgt, &vhost->targets, queue) {
+			if (tgt->action == IBMVFC_TGT_ACTION_DEL_RPORT) {
+				tgt_dbg(tgt, "Deleting rport\n");
+				rport = tgt->rport;
+				tgt->rport = NULL;
+				list_del(&tgt->queue);
+				spin_unlock_irqrestore(vhost->host->host_lock, flags);
+				if (rport)
+					fc_remote_port_delete(rport);
+				kref_put(&tgt->kref, ibmvfc_release_tgt);
+				return;
+			}
+		}
+
+		if (vhost->state == IBMVFC_INITIALIZING) {
+			ibmvfc_set_host_action(vhost, IBMVFC_HOST_ACTION_INIT);
+			vhost->job_step = ibmvfc_discover_targets;
+		} else {
+			ibmvfc_set_host_action(vhost, IBMVFC_HOST_ACTION_NONE);
+			spin_unlock_irqrestore(vhost->host->host_lock, flags);
+			scsi_unblock_requests(vhost->host);
+			wake_up(&vhost->init_wait_q);
+			return;
+		}
+		break;
+	case IBMVFC_HOST_ACTION_ALLOC_TGTS:
+		ibmvfc_set_host_action(vhost, IBMVFC_HOST_ACTION_TGT_INIT);
+		spin_unlock_irqrestore(vhost->host->host_lock, flags);
+		ibmvfc_alloc_targets(vhost);
+		spin_lock_irqsave(vhost->host->host_lock, flags);
+		break;
+	case IBMVFC_HOST_ACTION_TGT_INIT:
+		list_for_each_entry(tgt, &vhost->targets, queue) {
+			if (tgt->action == IBMVFC_TGT_ACTION_INIT) {
+				tgt->job_step(tgt);
+				break;
+			}
+		}
+
+		if (!ibmvfc_dev_init_to_do(vhost)) {
+			ibmvfc_set_host_state(vhost, IBMVFC_ACTIVE);
+			ibmvfc_set_host_action(vhost, IBMVFC_HOST_ACTION_TGT_ADD);
+			vhost->init_retries = 0;
+			spin_unlock_irqrestore(vhost->host->host_lock, flags);
+			scsi_unblock_requests(vhost->host);
+			return;
+		}
+		break;
+	case IBMVFC_HOST_ACTION_TGT_ADD:
+		list_for_each_entry(tgt, &vhost->targets, queue) {
+			if (tgt->action == IBMVFC_TGT_ACTION_ADD_RPORT) {
+				spin_unlock_irqrestore(vhost->host->host_lock, flags);
+				ibmvfc_tgt_add_rport(tgt);
+				return;
+			} else if (tgt->action == IBMVFC_TGT_ACTION_DEL_RPORT) {
+				tgt_dbg(tgt, "Deleting rport\n");
+				rport = tgt->rport;
+				tgt->rport = NULL;
+				list_del(&tgt->queue);
+				spin_unlock_irqrestore(vhost->host->host_lock, flags);
+				if (rport)
+					fc_remote_port_delete(rport);
+				kref_put(&tgt->kref, ibmvfc_release_tgt);
+				return;
+			}
+		}
+
+		if (vhost->reinit) {
+			vhost->reinit = 0;
+			ibmvfc_set_host_action(vhost, IBMVFC_HOST_ACTION_QUERY);
+		} else {
+			ibmvfc_set_host_action(vhost, IBMVFC_HOST_ACTION_NONE);
+			wake_up(&vhost->init_wait_q);
+		}
+		break;
+	default:
+		break;
+	};
+
+	spin_unlock_irqrestore(vhost->host->host_lock, flags);
+}
+
+/**
+ * ibmvfc_work - Do task level work
+ * @data:		ibmvfc host struct
+ *
+ * Returns:
+ *	zero
+ **/
+static int ibmvfc_work(void *data)
+{
+	struct ibmvfc_host *vhost = data;
+	int rc;
+
+	set_user_nice(current, -20);
+
+	while (1) {
+		rc = wait_event_interruptible(vhost->work_wait_q,
+					      ibmvfc_work_to_do(vhost));
+
+		BUG_ON(rc);
+
+		if (kthread_should_stop())
+			break;
+
+		ibmvfc_do_work(vhost);
+	}
+
+	ibmvfc_dbg(vhost, "ibmvfc kthread exiting...\n");
+	return 0;
+}
+
+/**
+ * ibmvfc_init_crq - Initializes and registers CRQ with hypervisor
+ * @vhost:	ibmvfc host struct
+ *
+ * Allocates a page for messages, maps it for dma, and registers
+ * the crq with the hypervisor.
+ *
+ * Return value:
+ *	zero on success / other on failure
+ **/
+static int ibmvfc_init_crq(struct ibmvfc_host *vhost)
+{
+	int rc, retrc = -ENOMEM;
+	struct device *dev = vhost->dev;
+	struct vio_dev *vdev = to_vio_dev(dev);
+	struct ibmvfc_crq_queue *crq = &vhost->crq;
+
+	ENTER;
+	crq->msgs = (struct ibmvfc_crq *)get_zeroed_page(GFP_KERNEL);
+
+	if (!crq->msgs)
+		return -ENOMEM;
+
+	crq->size = PAGE_SIZE / sizeof(*crq->msgs);
+	crq->msg_token = dma_map_single(dev, crq->msgs,
+					PAGE_SIZE, DMA_BIDIRECTIONAL);
+
+	if (dma_mapping_error(crq->msg_token))
+		goto map_failed;
+
+	retrc = rc = plpar_hcall_norets(H_REG_CRQ, vdev->unit_address,
+					crq->msg_token, PAGE_SIZE);
+
+	if (rc == H_RESOURCE)
+		/* maybe kexecing and resource is busy. try a reset */
+		retrc = rc = ibmvfc_reset_crq(vhost);
+
+	if (rc == H_CLOSED)
+		dev_warn(dev, "Partner adapter not ready\n");
+	else if (rc) {
+		dev_warn(dev, "Error %d opening adapter\n", rc);
+		goto reg_crq_failed;
+	}
+
+	retrc = 0;
+
+	if ((rc = request_irq(vdev->irq, ibmvfc_interrupt, 0, IBMVFC_NAME, vhost))) {
+		dev_err(dev, "Couldn't register irq 0x%x. rc=%d\n", vdev->irq, rc);
+		goto req_irq_failed;
+	}
+
+	if ((rc = vio_enable_interrupts(vdev))) {
+		dev_err(dev, "Error %d enabling interrupts\n", rc);
+		goto req_irq_failed;
+	}
+
+	crq->cur = 0;
+	LEAVE;
+	return retrc;
+
+req_irq_failed:
+	do {
+		rc = plpar_hcall_norets(H_FREE_CRQ, vdev->unit_address);
+	} while (rc == H_BUSY || H_IS_LONG_BUSY(rc));
+reg_crq_failed:
+	dma_unmap_single(dev, crq->msg_token, PAGE_SIZE, DMA_BIDIRECTIONAL);
+map_failed:
+	free_page((unsigned long)crq->msgs);
+	return retrc;
+}
+
+/**
+ * ibmvfc_free_mem - Free memory for vhost
+ * @vhost:	ibmvfc host struct
+ *
+ * Return value:
+ * 	none
+ **/
+static void ibmvfc_free_mem(struct ibmvfc_host *vhost)
+{
+	struct ibmvfc_async_crq_queue *async_q = &vhost->async_crq;
+
+	ENTER;
+	mempool_destroy(vhost->tgt_pool);
+	kfree(vhost->trace);
+	dma_free_coherent(vhost->dev, vhost->disc_buf_sz, vhost->disc_buf,
+			  vhost->disc_buf_dma);
+	dma_free_coherent(vhost->dev, sizeof(*vhost->login_buf),
+			  vhost->login_buf, vhost->login_buf_dma);
+	dma_pool_destroy(vhost->sg_pool);
+	dma_unmap_single(vhost->dev, async_q->msg_token,
+			 async_q->size * sizeof(*async_q->msgs), DMA_BIDIRECTIONAL);
+	free_page((unsigned long)async_q->msgs);
+	LEAVE;
+}
+
+/**
+ * ibmvfc_alloc_mem - Allocate memory for vhost
+ * @vhost:	ibmvfc host struct
+ *
+ * Return value:
+ * 	0 on success / non-zero on failure
+ **/
+static int ibmvfc_alloc_mem(struct ibmvfc_host *vhost)
+{
+	struct ibmvfc_async_crq_queue *async_q = &vhost->async_crq;
+	struct device *dev = vhost->dev;
+
+	ENTER;
+	async_q->msgs = (struct ibmvfc_async_crq *)get_zeroed_page(GFP_KERNEL);
+	if (!async_q->msgs) {
+		dev_err(dev, "Couldn't allocate async queue.\n");
+		goto nomem;
+	}
+
+	async_q->size = PAGE_SIZE / sizeof(struct ibmvfc_async_crq);
+	async_q->msg_token = dma_map_single(dev, async_q->msgs,
+					    async_q->size * sizeof(*async_q->msgs),
+					    DMA_BIDIRECTIONAL);
+
+	if (dma_mapping_error(async_q->msg_token)) {
+		dev_err(dev, "Failed to map async queue\n");
+		goto free_async_crq;
+	}
+
+	vhost->sg_pool = dma_pool_create(IBMVFC_NAME, dev,
+					 SG_ALL * sizeof(struct srp_direct_buf),
+					 sizeof(struct srp_direct_buf), 0);
+
+	if (!vhost->sg_pool) {
+		dev_err(dev, "Failed to allocate sg pool\n");
+		goto unmap_async_crq;
+	}
+
+	vhost->login_buf = dma_alloc_coherent(dev, sizeof(*vhost->login_buf),
+					      &vhost->login_buf_dma, GFP_KERNEL);
+
+	if (!vhost->login_buf) {
+		dev_err(dev, "Couldn't allocate NPIV login buffer\n");
+		goto free_sg_pool;
+	}
+
+	vhost->disc_buf_sz = sizeof(vhost->disc_buf->scsi_id[0]) * max_targets;
+	vhost->disc_buf = dma_alloc_coherent(dev, vhost->disc_buf_sz,
+					     &vhost->disc_buf_dma, GFP_KERNEL);
+
+	if (!vhost->disc_buf) {
+		dev_err(dev, "Couldn't allocate Discover Targets buffer\n");
+		goto free_login_buffer;
+	}
+
+	vhost->trace = kcalloc(IBMVFC_NUM_TRACE_ENTRIES,
+			       sizeof(struct ibmvfc_trace_entry), GFP_KERNEL);
+
+	if (!vhost->trace)
+		goto free_disc_buffer;
+
+	vhost->tgt_pool = mempool_create_kzalloc_pool(IBMVFC_TGT_MEMPOOL_SZ,
+						      sizeof(struct ibmvfc_target));
+
+	if (!vhost->tgt_pool) {
+		dev_err(dev, "Couldn't allocate target memory pool\n");
+		goto free_trace;
+	}
+
+	LEAVE;
+	return 0;
+
+free_trace:
+	kfree(vhost->trace);
+free_disc_buffer:
+	dma_free_coherent(dev, vhost->disc_buf_sz, vhost->disc_buf,
+			  vhost->disc_buf_dma);
+free_login_buffer:
+	dma_free_coherent(dev, sizeof(*vhost->login_buf),
+			  vhost->login_buf, vhost->login_buf_dma);
+free_sg_pool:
+	dma_pool_destroy(vhost->sg_pool);
+unmap_async_crq:
+	dma_unmap_single(dev, async_q->msg_token,
+			 async_q->size * sizeof(*async_q->msgs), DMA_BIDIRECTIONAL);
+free_async_crq:
+	free_page((unsigned long)async_q->msgs);
+nomem:
+	LEAVE;
+	return -ENOMEM;
+}
+
+/**
+ * ibmvfc_probe - Adapter hot plug add entry point
+ * @vdev:	vio device struct
+ * @id:	vio device id struct
+ *
+ * Return value:
+ * 	0 on success / non-zero on failure
+ **/
+static int ibmvfc_probe(struct vio_dev *vdev, const struct vio_device_id *id)
+{
+	struct ibmvfc_host *vhost;
+	struct Scsi_Host *shost;
+	struct device *dev = &vdev->dev;
+	int rc = -ENOMEM;
+
+	ENTER;
+	shost = scsi_host_alloc(&driver_template, sizeof(*vhost));
+	if (!shost) {
+		dev_err(dev, "Couldn't allocate host data\n");
+		goto out;
+	}
+
+	shost->transportt = ibmvfc_transport_template;
+	shost->can_queue = max_requests;
+	shost->max_lun = max_lun;
+	shost->max_id = max_targets;
+	shost->max_sectors = IBMVFC_MAX_SECTORS;
+	shost->max_cmd_len = IBMVFC_MAX_CDB_LEN;
+	shost->unique_id = shost->host_no;
+
+	vhost = shost_priv(shost);
+	INIT_LIST_HEAD(&vhost->sent);
+	INIT_LIST_HEAD(&vhost->free);
+	INIT_LIST_HEAD(&vhost->targets);
+	sprintf(vhost->name, IBMVFC_NAME);
+	vhost->host = shost;
+	vhost->dev = dev;
+	vhost->partition_number = -1;
+	vhost->log_level = log_level;
+	strcpy(vhost->partition_name, "UNKNOWN");
+	init_waitqueue_head(&vhost->work_wait_q);
+	init_waitqueue_head(&vhost->init_wait_q);
+
+	if ((rc = ibmvfc_alloc_mem(vhost)))
+		goto free_scsi_host;
+
+	vhost->work_thread = kthread_run(ibmvfc_work, vhost, "%s_%d", IBMVFC_NAME,
+					 shost->host_no);
+
+	if (IS_ERR(vhost->work_thread)) {
+		dev_err(dev, "Couldn't create kernel thread: %ld\n",
+			PTR_ERR(vhost->work_thread));
+		goto free_host_mem;
+	}
+
+	if ((rc = ibmvfc_init_crq(vhost))) {
+		dev_err(dev, "Couldn't initialize crq. rc=%d\n", rc);
+		goto kill_kthread;
+	}
+
+	if ((rc = ibmvfc_init_event_pool(vhost))) {
+		dev_err(dev, "Couldn't initialize event pool. rc=%d\n", rc);
+		goto release_crq;
+	}
+
+	if ((rc = scsi_add_host(shost, dev)))
+		goto release_event_pool;
+
+	if ((rc = ibmvfc_create_trace_file(&shost->shost_dev.kobj,
+					   &ibmvfc_trace_attr))) {
+		dev_err(dev, "Failed to create trace file. rc=%d\n", rc);
+		goto remove_shost;
+	}
+
+	dev_set_drvdata(dev, vhost);
+	spin_lock(&ibmvfc_driver_lock);
+	list_add_tail(&vhost->queue, &ibmvfc_head);
+	spin_unlock(&ibmvfc_driver_lock);
+
+	ibmvfc_send_crq_init(vhost);
+	scsi_scan_host(shost);
+	return 0;
+
+remove_shost:
+	scsi_remove_host(shost);
+release_event_pool:
+	ibmvfc_free_event_pool(vhost);
+release_crq:
+	ibmvfc_release_crq_queue(vhost);
+kill_kthread:
+	kthread_stop(vhost->work_thread);
+free_host_mem:
+	ibmvfc_free_mem(vhost);
+free_scsi_host:
+	scsi_host_put(shost);
+out:
+	LEAVE;
+	return rc;
+}
+
+/**
+ * ibmvfc_remove - Adapter hot plug remove entry point
+ * @vdev:	vio device struct
+ *
+ * Return value:
+ * 	0
+ **/
+static int ibmvfc_remove(struct vio_dev *vdev)
+{
+	struct ibmvfc_host *vhost = dev_get_drvdata(&vdev->dev);
+	unsigned long flags;
+
+	ENTER;
+	ibmvfc_remove_trace_file(&vhost->host->shost_dev.kobj, &ibmvfc_trace_attr);
+	kthread_stop(vhost->work_thread);
+	fc_remove_host(vhost->host);
+	scsi_remove_host(vhost->host);
+	ibmvfc_release_crq_queue(vhost);
+
+	spin_lock_irqsave(vhost->host->host_lock, flags);
+	ibmvfc_purge_requests(vhost, DID_ERROR);
+	ibmvfc_free_event_pool(vhost);
+	spin_unlock_irqrestore(vhost->host->host_lock, flags);
+
+	ibmvfc_free_mem(vhost);
+	spin_lock(&ibmvfc_driver_lock);
+	list_del(&vhost->queue);
+	spin_unlock(&ibmvfc_driver_lock);
+	scsi_host_put(vhost->host);
+	LEAVE;
+	return 0;
+}
+
+static struct vio_device_id ibmvfc_device_table[] __devinitdata = {
+	{"fcp", "IBM,vfc-client"},
+	{ "", "" }
+};
+MODULE_DEVICE_TABLE(vio, ibmvfc_device_table);
+
+static struct vio_driver ibmvfc_driver = {
+	.id_table = ibmvfc_device_table,
+	.probe = ibmvfc_probe,
+	.remove = ibmvfc_remove,
+	.driver = {
+		.name = IBMVFC_NAME,
+		.owner = THIS_MODULE,
+	}
+};
+
+static struct fc_function_template ibmvfc_transport_functions = {
+	.show_host_fabric_name = 1,
+	.show_host_node_name = 1,
+	.show_host_port_name = 1,
+	.show_host_supported_classes = 1,
+	.show_host_port_type = 1,
+	.show_host_port_id = 1,
+
+	.get_host_port_state = ibmvfc_get_host_port_state,
+	.show_host_port_state = 1,
+
+	.get_host_speed = ibmvfc_get_host_speed,
+	.show_host_speed = 1,
+
+	.issue_fc_host_lip = ibmvfc_issue_fc_host_lip,
+	.terminate_rport_io = ibmvfc_terminate_rport_io,
+
+	.show_rport_maxframe_size = 1,
+	.show_rport_supported_classes = 1,
+
+	.set_rport_dev_loss_tmo = ibmvfc_set_rport_dev_loss_tmo,
+	.show_rport_dev_loss_tmo = 1,
+
+	.get_starget_node_name = ibmvfc_get_starget_node_name,
+	.show_starget_node_name = 1,
+
+	.get_starget_port_name = ibmvfc_get_starget_port_name,
+	.show_starget_port_name = 1,
+
+	.get_starget_port_id = ibmvfc_get_starget_port_id,
+	.show_starget_port_id = 1,
+};
+
+/**
+ * ibmvfc_module_init - Initialize the ibmvfc module
+ *
+ * Return value:
+ * 	0 on success / other on failure
+ **/
+static int __init ibmvfc_module_init(void)
+{
+	int rc;
+
+	if (!firmware_has_feature(FW_FEATURE_VIO))
+		return -ENODEV;
+
+	printk(KERN_INFO IBMVFC_NAME": IBM Virtual Fibre Channel Driver version: %s %s\n",
+	       IBMVFC_DRIVER_VERSION, IBMVFC_DRIVER_DATE);
+
+	ibmvfc_transport_template = fc_attach_transport(&ibmvfc_transport_functions);
+	if (!ibmvfc_transport_template)
+		return -ENOMEM;
+
+	rc = vio_register_driver(&ibmvfc_driver);
+	if (rc)
+		fc_release_transport(ibmvfc_transport_template);
+	return rc;
+}
+
+/**
+ * ibmvfc_module_exit - Teardown the ibmvfc module
+ *
+ * Return value:
+ * 	nothing
+ **/
+static void __exit ibmvfc_module_exit(void)
+{
+	vio_unregister_driver(&ibmvfc_driver);
+	fc_release_transport(ibmvfc_transport_template);
+}
+
+module_init(ibmvfc_module_init);
+module_exit(ibmvfc_module_exit);
diff --git a/drivers/scsi/ibmvscsi/ibmvfc.h b/drivers/scsi/ibmvscsi/ibmvfc.h
new file mode 100644
index 000000000000..057f3c01ed61
--- /dev/null
+++ b/drivers/scsi/ibmvscsi/ibmvfc.h
@@ -0,0 +1,682 @@
+/*
+ * ibmvfc.h -- driver for IBM Power Virtual Fibre Channel Adapter
+ *
+ * Written By: Brian King <brking@linux.vnet.ibm.com>, IBM Corporation
+ *
+ * Copyright (C) IBM Corporation, 2008
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
+ *
+ */
+
+#ifndef _IBMVFC_H
+#define _IBMVFC_H
+
+#include <linux/list.h>
+#include <linux/types.h>
+#include "viosrp.h"
+
+#define IBMVFC_NAME	"ibmvfc"
+#define IBMVFC_DRIVER_VERSION		"1.0.0"
+#define IBMVFC_DRIVER_DATE		"(July 1, 2008)"
+
+#define IBMVFC_DEFAULT_TIMEOUT	15
+#define IBMVFC_INIT_TIMEOUT		30
+#define IBMVFC_MAX_REQUESTS_DEFAULT	100
+
+#define IBMVFC_DEBUG			0
+#define IBMVFC_MAX_TARGETS		1024
+#define IBMVFC_MAX_LUN			0xffffffff
+#define IBMVFC_MAX_SECTORS		0xffffu
+#define IBMVFC_MAX_DISC_THREADS	4
+#define IBMVFC_TGT_MEMPOOL_SZ		64
+#define IBMVFC_MAX_CMDS_PER_LUN	64
+#define IBMVFC_MAX_INIT_RETRIES	3
+#define IBMVFC_DEV_LOSS_TMO		(5 * 60)
+#define IBMVFC_DEFAULT_LOG_LEVEL	2
+#define IBMVFC_MAX_CDB_LEN		16
+
+/*
+ * Ensure we have resources for ERP and initialization:
+ * 1 for ERP
+ * 1 for initialization
+ * 1 for each discovery thread
+ */
+#define IBMVFC_NUM_INTERNAL_REQ	(1 + 1 + disc_threads)
+
+#define IBMVFC_MAD_SUCCESS		0x00
+#define IBMVFC_MAD_NOT_SUPPORTED	0xF1
+#define IBMVFC_MAD_FAILED		0xF7
+#define IBMVFC_MAD_DRIVER_FAILED	0xEE
+#define IBMVFC_MAD_CRQ_ERROR		0xEF
+
+enum ibmvfc_crq_valid {
+	IBMVFC_CRQ_CMD_RSP		= 0x80,
+	IBMVFC_CRQ_INIT_RSP		= 0xC0,
+	IBMVFC_CRQ_XPORT_EVENT		= 0xFF,
+};
+
+enum ibmvfc_crq_format {
+	IBMVFC_CRQ_INIT			= 0x01,
+	IBMVFC_CRQ_INIT_COMPLETE	= 0x02,
+	IBMVFC_PARTITION_MIGRATED	= 0x06,
+};
+
+enum ibmvfc_cmd_status_flags {
+	IBMVFC_FABRIC_MAPPED		= 0x0001,
+	IBMVFC_VIOS_FAILURE		= 0x0002,
+	IBMVFC_FC_FAILURE			= 0x0004,
+	IBMVFC_FC_SCSI_ERROR		= 0x0008,
+	IBMVFC_HW_EVENT_LOGGED		= 0x0010,
+	IBMVFC_VIOS_LOGGED		= 0x0020,
+};
+
+enum ibmvfc_fabric_mapped_errors {
+	IBMVFC_UNABLE_TO_ESTABLISH	= 0x0001,
+	IBMVFC_XPORT_FAULT		= 0x0002,
+	IBMVFC_CMD_TIMEOUT		= 0x0003,
+	IBMVFC_ENETDOWN			= 0x0004,
+	IBMVFC_HW_FAILURE			= 0x0005,
+	IBMVFC_LINK_DOWN_ERR		= 0x0006,
+	IBMVFC_LINK_DEAD_ERR		= 0x0007,
+	IBMVFC_UNABLE_TO_REGISTER	= 0x0008,
+	IBMVFC_XPORT_BUSY			= 0x000A,
+	IBMVFC_XPORT_DEAD			= 0x000B,
+	IBMVFC_CONFIG_ERROR		= 0x000C,
+	IBMVFC_NAME_SERVER_FAIL		= 0x000D,
+	IBMVFC_LINK_HALTED		= 0x000E,
+	IBMVFC_XPORT_GENERAL		= 0x8000,
+};
+
+enum ibmvfc_vios_errors {
+	IBMVFC_CRQ_FAILURE			= 0x0001,
+	IBMVFC_SW_FAILURE				= 0x0002,
+	IBMVFC_INVALID_PARAMETER		= 0x0003,
+	IBMVFC_MISSING_PARAMETER		= 0x0004,
+	IBMVFC_HOST_IO_BUS			= 0x0005,
+	IBMVFC_TRANS_CANCELLED			= 0x0006,
+	IBMVFC_TRANS_CANCELLED_IMPLICIT	= 0x0007,
+	IBMVFC_INSUFFICIENT_RESOURCE		= 0x0008,
+	IBMVFC_COMMAND_FAILED			= 0x8000,
+};
+
+enum ibmvfc_mad_types {
+	IBMVFC_NPIV_LOGIN		= 0x0001,
+	IBMVFC_DISC_TARGETS	= 0x0002,
+	IBMVFC_PORT_LOGIN		= 0x0004,
+	IBMVFC_PROCESS_LOGIN	= 0x0008,
+	IBMVFC_QUERY_TARGET	= 0x0010,
+	IBMVFC_IMPLICIT_LOGOUT	= 0x0040,
+	IBMVFC_TMF_MAD		= 0x0100,
+};
+
+struct ibmvfc_mad_common {
+	u32 version;
+	u32 reserved;
+	u32 opcode;
+	u16 status;
+	u16 length;
+	u64 tag;
+}__attribute__((packed, aligned (8)));
+
+struct ibmvfc_npiv_login_mad {
+	struct ibmvfc_mad_common common;
+	struct srp_direct_buf buffer;
+}__attribute__((packed, aligned (8)));
+
+#define IBMVFC_MAX_NAME 256
+
+struct ibmvfc_npiv_login {
+	u32 ostype;
+#define IBMVFC_OS_LINUX	0x02
+	u32 pad;
+	u64 max_dma_len;
+	u32 max_payload;
+	u32 max_response;
+	u32 partition_num;
+	u32 vfc_frame_version;
+	u16 fcp_version;
+	u16 flags;
+#define IBMVFC_CLIENT_MIGRATED	0x01
+#define IBMVFC_FLUSH_ON_HALT		0x02
+	u32 max_cmds;
+	u64 capabilities;
+#define IBMVFC_CAN_MIGRATE		0x01
+	u64 node_name;
+	struct srp_direct_buf async;
+	u8 partition_name[IBMVFC_MAX_NAME];
+	u8 device_name[IBMVFC_MAX_NAME];
+	u8 drc_name[IBMVFC_MAX_NAME];
+	u64 reserved2[2];
+}__attribute__((packed, aligned (8)));
+
+struct ibmvfc_common_svc_parms {
+	u16 fcph_version;
+	u16 b2b_credit;
+	u16 features;
+	u16 bb_rcv_sz; /* upper nibble is BB_SC_N */
+	u32 ratov;
+	u32 edtov;
+}__attribute__((packed, aligned (4)));
+
+struct ibmvfc_service_parms {
+	struct ibmvfc_common_svc_parms common;
+	u8 port_name[8];
+	u8 node_name[8];
+	u32 class1_parms[4];
+	u32 class2_parms[4];
+	u32 class3_parms[4];
+	u32 obsolete[4];
+	u32 vendor_version[4];
+	u32 services_avail[2];
+	u32 ext_len;
+	u32 reserved[30];
+	u32 clk_sync_qos[2];
+}__attribute__((packed, aligned (4)));
+
+struct ibmvfc_npiv_login_resp {
+	u32 version;
+	u16 status;
+	u16 error;
+	u32 flags;
+#define IBMVFC_NATIVE_FC		0x01
+#define IBMVFC_CAN_FLUSH_ON_HALT	0x08
+	u32 reserved;
+	u64 capabilites;
+	u32 max_cmds;
+	u32 scsi_id_sz;
+	u64 max_dma_len;
+	u64 scsi_id;
+	u64 port_name;
+	u64 node_name;
+	u64 link_speed;
+	u8 partition_name[IBMVFC_MAX_NAME];
+	u8 device_name[IBMVFC_MAX_NAME];
+	u8 port_loc_code[IBMVFC_MAX_NAME];
+	u8 drc_name[IBMVFC_MAX_NAME];
+	struct ibmvfc_service_parms service_parms;
+	u64 reserved2;
+}__attribute__((packed, aligned (8)));
+
+union ibmvfc_npiv_login_data {
+	struct ibmvfc_npiv_login login;
+	struct ibmvfc_npiv_login_resp resp;
+}__attribute__((packed, aligned (8)));
+
+struct ibmvfc_discover_targets_buf {
+	u32 scsi_id[1];
+#define IBMVFC_DISC_TGT_SCSI_ID_MASK	0x00ffffff
+};
+
+struct ibmvfc_discover_targets {
+	struct ibmvfc_mad_common common;
+	struct srp_direct_buf buffer;
+	u32 flags;
+	u16 status;
+	u16 error;
+	u32 bufflen;
+	u32 num_avail;
+	u32 num_written;
+	u64 reserved[2];
+}__attribute__((packed, aligned (8)));
+
+enum ibmvfc_fc_reason {
+	IBMVFC_INVALID_ELS_CMD_CODE	= 0x01,
+	IBMVFC_INVALID_VERSION		= 0x02,
+	IBMVFC_LOGICAL_ERROR		= 0x03,
+	IBMVFC_INVALID_CT_IU_SIZE	= 0x04,
+	IBMVFC_LOGICAL_BUSY		= 0x05,
+	IBMVFC_PROTOCOL_ERROR		= 0x07,
+	IBMVFC_UNABLE_TO_PERFORM_REQ	= 0x09,
+	IBMVFC_CMD_NOT_SUPPORTED	= 0x0B,
+	IBMVFC_SERVER_NOT_AVAIL		= 0x0D,
+	IBMVFC_CMD_IN_PROGRESS		= 0x0E,
+	IBMVFC_VENDOR_SPECIFIC		= 0xFF,
+};
+
+enum ibmvfc_fc_type {
+	IBMVFC_FABRIC_REJECT	= 0x01,
+	IBMVFC_PORT_REJECT	= 0x02,
+	IBMVFC_LS_REJECT		= 0x03,
+	IBMVFC_FABRIC_BUSY	= 0x04,
+	IBMVFC_PORT_BUSY		= 0x05,
+	IBMVFC_BASIC_REJECT	= 0x06,
+};
+
+enum ibmvfc_gs_explain {
+	IBMVFC_PORT_NAME_NOT_REG	= 0x02,
+};
+
+struct ibmvfc_port_login {
+	struct ibmvfc_mad_common common;
+	u64 scsi_id;
+	u16 reserved;
+	u16 fc_service_class;
+	u32 blksz;
+	u32 hdr_per_blk;
+	u16 status;
+	u16 error;		/* also fc_reason */
+	u16 fc_explain;
+	u16 fc_type;
+	u32 reserved2;
+	struct ibmvfc_service_parms service_parms;
+	struct ibmvfc_service_parms service_parms_change;
+	u64 reserved3[2];
+}__attribute__((packed, aligned (8)));
+
+struct ibmvfc_prli_svc_parms {
+	u8 type;
+#define IBMVFC_SCSI_FCP_TYPE		0x08
+	u8 type_ext;
+	u16 flags;
+#define IBMVFC_PRLI_ORIG_PA_VALID			0x8000
+#define IBMVFC_PRLI_RESP_PA_VALID			0x4000
+#define IBMVFC_PRLI_EST_IMG_PAIR			0x2000
+	u32 orig_pa;
+	u32 resp_pa;
+	u32 service_parms;
+#define IBMVFC_PRLI_TASK_RETRY			0x00000200
+#define IBMVFC_PRLI_RETRY				0x00000100
+#define IBMVFC_PRLI_DATA_OVERLAY			0x00000040
+#define IBMVFC_PRLI_INITIATOR_FUNC			0x00000020
+#define IBMVFC_PRLI_TARGET_FUNC			0x00000010
+#define IBMVFC_PRLI_READ_FCP_XFER_RDY_DISABLED	0x00000002
+#define IBMVFC_PRLI_WR_FCP_XFER_RDY_DISABLED	0x00000001
+}__attribute__((packed, aligned (4)));
+
+struct ibmvfc_process_login {
+	struct ibmvfc_mad_common common;
+	u64 scsi_id;
+	struct ibmvfc_prli_svc_parms parms;
+	u8 reserved[48];
+	u16 status;
+	u16 error;			/* also fc_reason */
+	u32 reserved2;
+	u64 reserved3[2];
+}__attribute__((packed, aligned (8)));
+
+struct ibmvfc_query_tgt {
+	struct ibmvfc_mad_common common;
+	u64 wwpn;
+	u64 scsi_id;
+	u16 status;
+	u16 error;
+	u16 fc_explain;
+	u16 fc_type;
+	u64 reserved[2];
+}__attribute__((packed, aligned (8)));
+
+struct ibmvfc_implicit_logout {
+	struct ibmvfc_mad_common common;
+	u64 old_scsi_id;
+	u64 reserved[2];
+}__attribute__((packed, aligned (8)));
+
+struct ibmvfc_tmf {
+	struct ibmvfc_mad_common common;
+	u64 scsi_id;
+	struct scsi_lun lun;
+	u32 flags;
+#define IBMVFC_TMF_ABORT_TASK		0x02
+#define IBMVFC_TMF_ABORT_TASK_SET	0x04
+#define IBMVFC_TMF_LUN_RESET		0x10
+#define IBMVFC_TMF_TGT_RESET		0x20
+#define IBMVFC_TMF_LUA_VALID		0x40
+	u32 cancel_key;
+	u32 my_cancel_key;
+#define IBMVFC_TMF_CANCEL_KEY		0x80000000
+	u32 pad;
+	u64 reserved[2];
+}__attribute__((packed, aligned (8)));
+
+enum ibmvfc_fcp_rsp_info_codes {
+	RSP_NO_FAILURE		= 0x00,
+	RSP_TMF_REJECTED		= 0x04,
+	RSP_TMF_FAILED		= 0x05,
+	RSP_TMF_INVALID_LUN	= 0x09,
+};
+
+struct ibmvfc_fcp_rsp_info {
+	u16 reserved;
+	u8 rsp_code;
+	u8 reserved2[4];
+}__attribute__((packed, aligned (2)));
+
+enum ibmvfc_fcp_rsp_flags {
+	FCP_BIDI_RSP			= 0x80,
+	FCP_BIDI_READ_RESID_UNDER	= 0x40,
+	FCP_BIDI_READ_RESID_OVER	= 0x20,
+	FCP_CONF_REQ			= 0x10,
+	FCP_RESID_UNDER			= 0x08,
+	FCP_RESID_OVER			= 0x04,
+	FCP_SNS_LEN_VALID			= 0x02,
+	FCP_RSP_LEN_VALID			= 0x01,
+};
+
+union ibmvfc_fcp_rsp_data {
+	struct ibmvfc_fcp_rsp_info info;
+	u8 sense[SCSI_SENSE_BUFFERSIZE + sizeof(struct ibmvfc_fcp_rsp_info)];
+}__attribute__((packed, aligned (8)));
+
+struct ibmvfc_fcp_rsp {
+	u64 reserved;
+	u16 retry_delay_timer;
+	u8 flags;
+	u8 scsi_status;
+	u32 fcp_resid;
+	u32 fcp_sense_len;
+	u32 fcp_rsp_len;
+	union ibmvfc_fcp_rsp_data data;
+}__attribute__((packed, aligned (8)));
+
+enum ibmvfc_cmd_flags {
+	IBMVFC_SCATTERLIST	= 0x0001,
+	IBMVFC_NO_MEM_DESC	= 0x0002,
+	IBMVFC_READ			= 0x0004,
+	IBMVFC_WRITE		= 0x0008,
+	IBMVFC_TMF			= 0x0080,
+	IBMVFC_CLASS_3_ERR	= 0x0100,
+};
+
+enum ibmvfc_fc_task_attr {
+	IBMVFC_SIMPLE_TASK	= 0x00,
+	IBMVFC_HEAD_OF_QUEUE	= 0x01,
+	IBMVFC_ORDERED_TASK	= 0x02,
+	IBMVFC_ACA_TASK		= 0x04,
+};
+
+enum ibmvfc_fc_tmf_flags {
+	IBMVFC_ABORT_TASK_SET	= 0x02,
+	IBMVFC_LUN_RESET		= 0x10,
+	IBMVFC_TARGET_RESET	= 0x20,
+};
+
+struct ibmvfc_fcp_cmd_iu {
+	struct scsi_lun lun;
+	u8 crn;
+	u8 pri_task_attr;
+	u8 tmf_flags;
+	u8 add_cdb_len;
+#define IBMVFC_RDDATA		0x02
+#define IBMVFC_WRDATA		0x01
+	u8 cdb[IBMVFC_MAX_CDB_LEN];
+	u32 xfer_len;
+}__attribute__((packed, aligned (4)));
+
+struct ibmvfc_cmd {
+	u64 task_tag;
+	u32 frame_type;
+	u32 payload_len;
+	u32 resp_len;
+	u32 adapter_resid;
+	u16 status;
+	u16 error;
+	u16 flags;
+	u16 response_flags;
+#define IBMVFC_ADAPTER_RESID_VALID	0x01
+	u32 cancel_key;
+	u32 exchange_id;
+	struct srp_direct_buf ext_func;
+	struct srp_direct_buf ioba;
+	struct srp_direct_buf resp;
+	u64 correlation;
+	u64 tgt_scsi_id;
+	u64 tag;
+	u64 reserved3[2];
+	struct ibmvfc_fcp_cmd_iu iu;
+	struct ibmvfc_fcp_rsp rsp;
+}__attribute__((packed, aligned (8)));
+
+struct ibmvfc_trace_start_entry {
+	u32 xfer_len;
+}__attribute__((packed));
+
+struct ibmvfc_trace_end_entry {
+	u16 status;
+	u16 error;
+	u8 fcp_rsp_flags;
+	u8 rsp_code;
+	u8 scsi_status;
+	u8 reserved;
+}__attribute__((packed));
+
+struct ibmvfc_trace_entry {
+	struct ibmvfc_event *evt;
+	u32 time;
+	u32 scsi_id;
+	u32 lun;
+	u8 fmt;
+	u8 op_code;
+	u8 tmf_flags;
+	u8 type;
+#define IBMVFC_TRC_START	0x00
+#define IBMVFC_TRC_END		0xff
+	union {
+		struct ibmvfc_trace_start_entry start;
+		struct ibmvfc_trace_end_entry end;
+	} u;
+}__attribute__((packed, aligned (8)));
+
+enum ibmvfc_crq_formats {
+	IBMVFC_CMD_FORMAT		= 0x01,
+	IBMVFC_ASYNC_EVENT	= 0x02,
+	IBMVFC_MAD_FORMAT		= 0x04,
+};
+
+enum ibmvfc_async_event {
+	IBMVFC_AE_ELS_PLOGI		= 0x0001,
+	IBMVFC_AE_ELS_LOGO		= 0x0002,
+	IBMVFC_AE_ELS_PRLO		= 0x0004,
+	IBMVFC_AE_SCN_NPORT		= 0x0008,
+	IBMVFC_AE_SCN_GROUP		= 0x0010,
+	IBMVFC_AE_SCN_DOMAIN		= 0x0020,
+	IBMVFC_AE_SCN_FABRIC		= 0x0040,
+	IBMVFC_AE_LINK_UP			= 0x0080,
+	IBMVFC_AE_LINK_DOWN		= 0x0100,
+	IBMVFC_AE_LINK_DEAD		= 0x0200,
+	IBMVFC_AE_HALT			= 0x0400,
+	IBMVFC_AE_RESUME			= 0x0800,
+	IBMVFC_AE_ADAPTER_FAILED	= 0x1000,
+};
+
+struct ibmvfc_crq {
+	u8 valid;
+	u8 format;
+	u8 reserved[6];
+	u64 ioba;
+}__attribute__((packed, aligned (8)));
+
+struct ibmvfc_crq_queue {
+	struct ibmvfc_crq *msgs;
+	int size, cur;
+	dma_addr_t msg_token;
+};
+
+struct ibmvfc_async_crq {
+	u8 valid;
+	u8 pad[3];
+	u32 pad2;
+	u64 event;
+	u64 scsi_id;
+	u64 wwpn;
+	u64 node_name;
+	u64 reserved;
+}__attribute__((packed, aligned (8)));
+
+struct ibmvfc_async_crq_queue {
+	struct ibmvfc_async_crq *msgs;
+	int size, cur;
+	dma_addr_t msg_token;
+};
+
+union ibmvfc_iu {
+	struct ibmvfc_mad_common mad_common;
+	struct ibmvfc_npiv_login_mad npiv_login;
+	struct ibmvfc_discover_targets discover_targets;
+	struct ibmvfc_port_login plogi;
+	struct ibmvfc_process_login prli;
+	struct ibmvfc_query_tgt query_tgt;
+	struct ibmvfc_implicit_logout implicit_logout;
+	struct ibmvfc_tmf tmf;
+	struct ibmvfc_cmd cmd;
+}__attribute__((packed, aligned (8)));
+
+enum ibmvfc_target_action {
+	IBMVFC_TGT_ACTION_NONE = 0,
+	IBMVFC_TGT_ACTION_INIT,
+	IBMVFC_TGT_ACTION_INIT_WAIT,
+	IBMVFC_TGT_ACTION_ADD_RPORT,
+	IBMVFC_TGT_ACTION_DEL_RPORT,
+};
+
+struct ibmvfc_target {
+	struct list_head queue;
+	struct ibmvfc_host *vhost;
+	u64 scsi_id;
+	u64 new_scsi_id;
+	struct fc_rport *rport;
+	int target_id;
+	enum ibmvfc_target_action action;
+	int need_login;
+	int init_retries;
+	struct ibmvfc_service_parms service_parms;
+	struct ibmvfc_service_parms service_parms_change;
+	struct fc_rport_identifiers ids;
+	void (*job_step) (struct ibmvfc_target *);
+	struct kref kref;
+};
+
+/* a unit of work for the hosting partition */
+struct ibmvfc_event {
+	struct list_head queue;
+	struct ibmvfc_host *vhost;
+	struct ibmvfc_target *tgt;
+	struct scsi_cmnd *cmnd;
+	atomic_t free;
+	union ibmvfc_iu *xfer_iu;
+	void (*done) (struct ibmvfc_event *);
+	struct ibmvfc_crq crq;
+	union ibmvfc_iu iu;
+	union ibmvfc_iu *sync_iu;
+	struct srp_direct_buf *ext_list;
+	dma_addr_t ext_list_token;
+	struct completion comp;
+	struct timer_list timer;
+};
+
+/* a pool of event structs for use */
+struct ibmvfc_event_pool {
+	struct ibmvfc_event *events;
+	u32 size;
+	union ibmvfc_iu *iu_storage;
+	dma_addr_t iu_token;
+};
+
+enum ibmvfc_host_action {
+	IBMVFC_HOST_ACTION_NONE = 0,
+	IBMVFC_HOST_ACTION_INIT,
+	IBMVFC_HOST_ACTION_INIT_WAIT,
+	IBMVFC_HOST_ACTION_QUERY,
+	IBMVFC_HOST_ACTION_QUERY_TGTS,
+	IBMVFC_HOST_ACTION_TGT_DEL,
+	IBMVFC_HOST_ACTION_ALLOC_TGTS,
+	IBMVFC_HOST_ACTION_TGT_INIT,
+	IBMVFC_HOST_ACTION_TGT_ADD,
+};
+
+enum ibmvfc_host_state {
+	IBMVFC_NO_CRQ = 0,
+	IBMVFC_INITIALIZING,
+	IBMVFC_ACTIVE,
+	IBMVFC_HALTED,
+	IBMVFC_LINK_DOWN,
+	IBMVFC_LINK_DEAD,
+	IBMVFC_HOST_OFFLINE,
+};
+
+struct ibmvfc_host {
+	char name[8];
+	struct list_head queue;
+	struct Scsi_Host *host;
+	enum ibmvfc_host_state state;
+	enum ibmvfc_host_action action;
+#define IBMVFC_NUM_TRACE_INDEX_BITS		8
+#define IBMVFC_NUM_TRACE_ENTRIES		(1 << IBMVFC_NUM_TRACE_INDEX_BITS)
+#define IBMVFC_TRACE_SIZE	(sizeof(struct ibmvfc_trace_entry) * IBMVFC_NUM_TRACE_ENTRIES)
+	struct ibmvfc_trace_entry *trace;
+	u32 trace_index:IBMVFC_NUM_TRACE_INDEX_BITS;
+	int num_targets;
+	struct list_head targets;
+	struct list_head sent;
+	struct list_head free;
+	struct device *dev;
+	struct ibmvfc_event_pool pool;
+	struct dma_pool *sg_pool;
+	mempool_t *tgt_pool;
+	struct ibmvfc_crq_queue crq;
+	struct ibmvfc_async_crq_queue async_crq;
+	struct ibmvfc_npiv_login login_info;
+	union ibmvfc_npiv_login_data *login_buf;
+	dma_addr_t login_buf_dma;
+	int disc_buf_sz;
+	int log_level;
+	struct ibmvfc_discover_targets_buf *disc_buf;
+	int task_set;
+	int init_retries;
+	int discovery_threads;
+	int client_migrated;
+	int reinit;
+	int events_to_log;
+#define IBMVFC_AE_LINKUP	0x0001
+#define IBMVFC_AE_LINKDOWN	0x0002
+#define IBMVFC_AE_RSCN		0x0004
+	dma_addr_t disc_buf_dma;
+	unsigned int partition_number;
+	char partition_name[97];
+	void (*job_step) (struct ibmvfc_host *);
+	struct task_struct *work_thread;
+	wait_queue_head_t init_wait_q;
+	wait_queue_head_t work_wait_q;
+};
+
+#define DBG_CMD(CMD) do { if (ibmvfc_debug) CMD; } while (0)
+
+#define tgt_dbg(t, fmt, ...)			\
+	DBG_CMD(dev_info((t)->vhost->dev, "%lX: " fmt, (t)->scsi_id, ##__VA_ARGS__))
+
+#define tgt_err(t, fmt, ...)		\
+	dev_err((t)->vhost->dev, "%lX: " fmt, (t)->scsi_id, ##__VA_ARGS__)
+
+#define ibmvfc_dbg(vhost, ...) \
+	DBG_CMD(dev_info((vhost)->dev, ##__VA_ARGS__))
+
+#define ibmvfc_log(vhost, level, ...) \
+	do { \
+		if (level >= (vhost)->log_level) \
+			dev_err((vhost)->dev, ##__VA_ARGS__); \
+	} while (0)
+
+#define ENTER DBG_CMD(printk(KERN_INFO IBMVFC_NAME": Entering %s\n", __FUNCTION__))
+#define LEAVE DBG_CMD(printk(KERN_INFO IBMVFC_NAME": Leaving %s\n", __FUNCTION__))
+
+#ifdef CONFIG_SCSI_IBMVFC_TRACE
+#define ibmvfc_create_trace_file(kobj, attr) sysfs_create_bin_file(kobj, attr)
+#define ibmvfc_remove_trace_file(kobj, attr) sysfs_remove_bin_file(kobj, attr)
+#else
+#define ibmvfc_create_trace_file(kobj, attr) 0
+#define ibmvfc_remove_trace_file(kobj, attr) do { } while (0)
+#endif
+
+#endif
-- 
cgit v1.2.3


From 2476b4d0426e1d6d4a42b2f7ae08f668b2cfe510 Mon Sep 17 00:00:00 2001
From: James Bottomley <James.Bottomley@HansenPartnership.com>
Date: Thu, 3 Jul 2008 11:31:55 -0500
Subject: [SCSI] fix locking in host use of blk_plug_device()

scsi_lib.c:scsi_host_queue_ready() plugs the device with incorrect
locking.  It should actually have the queue lock held, but it's
holding the host lock.  Fix this by eliminating the call.  The host
ready has no need to plug the queue because if it returns 0 in
scsi_request_function control transfers to not_ready which acquires
the queue lock and plugs the device if its at zero depth.

Reported-by: Elias Oltmanns <eo@nebensachen.de>
Signed-off-by: James Bottomley <James.Bottomley@HansenPartnership.com>
---
 drivers/scsi/scsi_lib.c | 1 -
 1 file changed, 1 deletion(-)

(limited to 'drivers/scsi')

diff --git a/drivers/scsi/scsi_lib.c b/drivers/scsi/scsi_lib.c
index aa8d5de58839..0451903452e6 100644
--- a/drivers/scsi/scsi_lib.c
+++ b/drivers/scsi/scsi_lib.c
@@ -1328,7 +1328,6 @@ static inline int scsi_host_queue_ready(struct request_queue *q,
 				printk("scsi%d unblocking host at zero depth\n",
 					shost->host_no));
 		} else {
-			blk_plug_device(q);
 			return 0;
 		}
 	}
-- 
cgit v1.2.3


From 453cd0f3ff3bf25d86c96e62d271ba238f06d5ff Mon Sep 17 00:00:00 2001
From: Adrian Bunk <bunk@kernel.org>
Date: Thu, 3 Jul 2008 23:47:33 -0700
Subject: [SCSI] make struct scsi_{host,target}_type static

Make the needlessly global struct scsi_{host,target}_type static.

Signed-off-by: Adrian Bunk <bunk@kernel.org>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: James Bottomley <James.Bottomley@HansenPartnership.com>
---
 drivers/scsi/hosts.c     | 2 +-
 drivers/scsi/scsi_scan.c | 2 +-
 2 files changed, 2 insertions(+), 2 deletions(-)

(limited to 'drivers/scsi')

diff --git a/drivers/scsi/hosts.c b/drivers/scsi/hosts.c
index 3690360d7a79..497ca68d1ffc 100644
--- a/drivers/scsi/hosts.c
+++ b/drivers/scsi/hosts.c
@@ -290,7 +290,7 @@ static void scsi_host_dev_release(struct device *dev)
 	kfree(shost);
 }
 
-struct device_type scsi_host_type = {
+static struct device_type scsi_host_type = {
 	.name =		"scsi_host",
 	.release =	scsi_host_dev_release,
 };
diff --git a/drivers/scsi/scsi_scan.c b/drivers/scsi/scsi_scan.c
index a00eee6f7be9..196fe3af0d5e 100644
--- a/drivers/scsi/scsi_scan.c
+++ b/drivers/scsi/scsi_scan.c
@@ -346,7 +346,7 @@ static void scsi_target_dev_release(struct device *dev)
 	put_device(parent);
 }
 
-struct device_type scsi_target_type = {
+static struct device_type scsi_target_type = {
 	.name =		"scsi_target",
 	.release =	scsi_target_dev_release,
 };
-- 
cgit v1.2.3


From d7f305e9a08040649b0800245e67708df58cdb55 Mon Sep 17 00:00:00 2001
From: Benjamin Herrenschmidt <benh@kernel.crashing.org>
Date: Mon, 7 Jul 2008 10:50:25 +1000
Subject: [SCSI] sym53c8xx: Fix bogus sym_que_entry re-implementation of
 container_of

The sym53c8xx driver, for some reason, seems to mostly re-implement
linux/list.h with added bogosity. The main one is it's implementation
of sym_que_entry which spits warnings with recent gcc's on some
64 bits architectures.

Signed-off-by: Benjamin Herrenschmidt <benh@kernel.crashing.org>
Signed-off-by: James Bottomley <James.Bottomley@HansenPartnership.com>
---
 drivers/scsi/sym53c8xx_2/sym_misc.h | 4 +---
 1 file changed, 1 insertion(+), 3 deletions(-)

(limited to 'drivers/scsi')

diff --git a/drivers/scsi/sym53c8xx_2/sym_misc.h b/drivers/scsi/sym53c8xx_2/sym_misc.h
index 0433d5d0caf3..430537183c18 100644
--- a/drivers/scsi/sym53c8xx_2/sym_misc.h
+++ b/drivers/scsi/sym53c8xx_2/sym_misc.h
@@ -121,9 +121,7 @@ static __inline void sym_que_move(struct sym_quehead *orig,
 	}
 }
 
-#define sym_que_entry(ptr, type, member) \
-	((type *)((char *)(ptr)-(unsigned int)(&((type *)0)->member)))
-
+#define sym_que_entry(ptr, type, member) container_of(ptr, type, member)
 
 #define sym_insque(new, pos)		__sym_que_add(new, pos, (pos)->flink)
 
-- 
cgit v1.2.3