summaryrefslogtreecommitdiffstats
path: root/drivers
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2017-03-02 14:52:05 -0800
committerLinus Torvalds <torvalds@linux-foundation.org>2017-03-02 14:52:05 -0800
commit821fd6f6cb6500cd04a6c7e8f701f9b311a5c2b3 (patch)
treed5b3dc3a6bb92b8842cad096e737f3518d257bbe /drivers
parentca4c7d7c2b6b4bfb293c24da1442cf0336c1912a (diff)
parentc87ba9c49c1fa86261448b09c5f1b2223bf7efd9 (diff)
downloadlinux-821fd6f6cb6500cd04a6c7e8f701f9b311a5c2b3.tar.bz2
Merge branch 'for-next' of git://git.kernel.org/pub/scm/linux/kernel/git/nab/target-pending
Pull SCSI target updates from Nicholas Bellinger: "The highlights this round include: - enable dual mode (initiator + target) qla2xxx operation. (Quinn + Himanshu) - add a framework for qla2xxx async fabric discovery. (Quinn + Himanshu) - enable iscsi PDU DDP completion offload in cxgbit/T6 NICs. (Varun) - fix target-core handling of aborted failed commands. (Bart) - fix a long standing target-core issue NULL pointer dereference with active I/O LUN shutdown. (Rob Millner + Bryant + nab)" * 'for-next' of git://git.kernel.org/pub/scm/linux/kernel/git/nab/target-pending: (44 commits) target: Add counters for ABORT_TASK success + failure iscsi-target: Fix early login failure statistics misses target: Fix NULL dereference during LUN lookup + active I/O shutdown target: Delete tmr from list before processing target: Fix handling of aborted failed commands uapi: fix linux/target_core_user.h userspace compilation errors target: export protocol identifier qla2xxx: Fix a warning reported by the "smatch" static checker target/iscsi: Fix unsolicited data seq_end_offset calculation target/cxgbit: add T6 iSCSI DDP completion feature target/cxgbit: Enable DDP for T6 only if data sequence and pdu are in order target/cxgbit: Use T6 specific macros to get ETH/IP hdr len target/cxgbit: use cxgb4_tp_smt_idx() to get smt idx target/iscsi: split iscsit_check_dataout_hdr() target: Remove command flag CMD_T_DEV_ACTIVE target: Remove command flag CMD_T_BUSY target: Move session check from target_put_sess_cmd() into target_release_cmd_kref() target: Inline transport_cmd_check_stop() target: Remove an overly chatty debug message target: Stop execution if CMD_T_STOP has been set ...
Diffstat (limited to 'drivers')
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/t4_msg.h4
-rw-r--r--drivers/net/ethernet/chelsio/libcxgb/libcxgb_ppm.h2
-rw-r--r--drivers/scsi/qla2xxx/qla_attr.c3
-rw-r--r--drivers/scsi/qla2xxx/qla_bsg.c23
-rw-r--r--drivers/scsi/qla2xxx/qla_def.h306
-rw-r--r--drivers/scsi/qla2xxx/qla_dfs.c11
-rw-r--r--drivers/scsi/qla2xxx/qla_fw.h106
-rw-r--r--drivers/scsi/qla2xxx/qla_gbl.h72
-rw-r--r--drivers/scsi/qla2xxx/qla_gs.c726
-rw-r--r--drivers/scsi/qla2xxx/qla_init.c1596
-rw-r--r--drivers/scsi/qla2xxx/qla_inline.h18
-rw-r--r--drivers/scsi/qla2xxx/qla_iocb.c167
-rw-r--r--drivers/scsi/qla2xxx/qla_isr.c317
-rw-r--r--drivers/scsi/qla2xxx/qla_mbx.c232
-rw-r--r--drivers/scsi/qla2xxx/qla_mr.c48
-rw-r--r--drivers/scsi/qla2xxx/qla_os.c330
-rw-r--r--drivers/scsi/qla2xxx/qla_target.c2356
-rw-r--r--drivers/scsi/qla2xxx/qla_target.h252
-rw-r--r--drivers/scsi/qla2xxx/tcm_qla2xxx.c256
-rw-r--r--drivers/scsi/qla2xxx/tcm_qla2xxx.h4
-rw-r--r--drivers/target/iscsi/cxgbit/cxgbit_cm.c30
-rw-r--r--drivers/target/iscsi/cxgbit/cxgbit_lro.h5
-rw-r--r--drivers/target/iscsi/cxgbit/cxgbit_main.c69
-rw-r--r--drivers/target/iscsi/cxgbit/cxgbit_target.c173
-rw-r--r--drivers/target/iscsi/iscsi_target.c138
-rw-r--r--drivers/target/iscsi/iscsi_target_erl0.c6
-rw-r--r--drivers/target/iscsi/iscsi_target_erl2.c6
-rw-r--r--drivers/target/iscsi/iscsi_target_erl2.h2
-rw-r--r--drivers/target/iscsi/iscsi_target_login.c2
-rw-r--r--drivers/target/iscsi/iscsi_target_nego.c16
-rw-r--r--drivers/target/iscsi/iscsi_target_tmr.c4
-rw-r--r--drivers/target/iscsi/iscsi_target_util.c39
-rw-r--r--drivers/target/target_core_device.c11
-rw-r--r--drivers/target/target_core_sbc.c2
-rw-r--r--drivers/target/target_core_stat.c36
-rw-r--r--drivers/target/target_core_tmr.c17
-rw-r--r--drivers/target/target_core_tpg.c3
-rw-r--r--drivers/target/target_core_transport.c164
-rw-r--r--drivers/target/tcm_fc/tfc_cmd.c2
39 files changed, 4739 insertions, 2815 deletions
diff --git a/drivers/net/ethernet/chelsio/cxgb4/t4_msg.h b/drivers/net/ethernet/chelsio/cxgb4/t4_msg.h
index 5043b64805f0..8098c93cd16e 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/t4_msg.h
+++ b/drivers/net/ethernet/chelsio/cxgb4/t4_msg.h
@@ -1364,6 +1364,10 @@ struct cpl_tx_data {
#define TX_FORCE_S 13
#define TX_FORCE_V(x) ((x) << TX_FORCE_S)
+#define T6_TX_FORCE_S 20
+#define T6_TX_FORCE_V(x) ((x) << T6_TX_FORCE_S)
+#define T6_TX_FORCE_F T6_TX_FORCE_V(1U)
+
enum {
ULP_TX_MEM_READ = 2,
ULP_TX_MEM_WRITE = 3,
diff --git a/drivers/net/ethernet/chelsio/libcxgb/libcxgb_ppm.h b/drivers/net/ethernet/chelsio/libcxgb/libcxgb_ppm.h
index e995a1a3840a..a91ad766cef0 100644
--- a/drivers/net/ethernet/chelsio/libcxgb/libcxgb_ppm.h
+++ b/drivers/net/ethernet/chelsio/libcxgb/libcxgb_ppm.h
@@ -59,7 +59,7 @@ struct cxgbi_pagepod_hdr {
#define PPOD_PAGES_MAX 4
struct cxgbi_pagepod {
struct cxgbi_pagepod_hdr hdr;
- u64 addr[PPOD_PAGES_MAX + 1];
+ __be64 addr[PPOD_PAGES_MAX + 1];
};
/* ddp tag format
diff --git a/drivers/scsi/qla2xxx/qla_attr.c b/drivers/scsi/qla2xxx/qla_attr.c
index f201f4099620..f610103994af 100644
--- a/drivers/scsi/qla2xxx/qla_attr.c
+++ b/drivers/scsi/qla2xxx/qla_attr.c
@@ -2163,6 +2163,9 @@ qla24xx_vport_delete(struct fc_vport *fc_vport)
clear_bit(vha->vp_idx, ha->vp_idx_map);
mutex_unlock(&ha->vport_lock);
+ dma_free_coherent(&ha->pdev->dev, vha->gnl.size, vha->gnl.l,
+ vha->gnl.ldma);
+
if (vha->qpair->vp_idx == vha->vp_idx) {
if (qla2xxx_delete_qpair(vha, vha->qpair) != QLA_SUCCESS)
ql_log(ql_log_warn, vha, 0x7087,
diff --git a/drivers/scsi/qla2xxx/qla_bsg.c b/drivers/scsi/qla2xxx/qla_bsg.c
index 40ca75bbcb9d..84c9098cc089 100644
--- a/drivers/scsi/qla2xxx/qla_bsg.c
+++ b/drivers/scsi/qla2xxx/qla_bsg.c
@@ -13,28 +13,25 @@
/* BSG support for ELS/CT pass through */
void
-qla2x00_bsg_job_done(void *data, void *ptr, int res)
+qla2x00_bsg_job_done(void *ptr, int res)
{
- srb_t *sp = (srb_t *)ptr;
- struct scsi_qla_host *vha = (scsi_qla_host_t *)data;
+ srb_t *sp = ptr;
struct bsg_job *bsg_job = sp->u.bsg_job;
struct fc_bsg_reply *bsg_reply = bsg_job->reply;
bsg_reply->result = res;
bsg_job_done(bsg_job, bsg_reply->result,
bsg_reply->reply_payload_rcv_len);
- sp->free(vha, sp);
+ sp->free(sp);
}
void
-qla2x00_bsg_sp_free(void *data, void *ptr)
+qla2x00_bsg_sp_free(void *ptr)
{
- srb_t *sp = (srb_t *)ptr;
- struct scsi_qla_host *vha = sp->fcport->vha;
+ srb_t *sp = ptr;
+ struct qla_hw_data *ha = sp->vha->hw;
struct bsg_job *bsg_job = sp->u.bsg_job;
struct fc_bsg_request *bsg_request = bsg_job->request;
-
- struct qla_hw_data *ha = vha->hw;
struct qla_mt_iocb_rqst_fx00 *piocb_rqst;
if (sp->type == SRB_FXIOCB_BCMD) {
@@ -62,7 +59,7 @@ qla2x00_bsg_sp_free(void *data, void *ptr)
sp->type == SRB_FXIOCB_BCMD ||
sp->type == SRB_ELS_CMD_HST)
kfree(sp->fcport);
- qla2x00_rel_sp(vha, sp);
+ qla2x00_rel_sp(sp);
}
int
@@ -394,7 +391,7 @@ qla2x00_process_els(struct bsg_job *bsg_job)
if (rval != QLA_SUCCESS) {
ql_log(ql_log_warn, vha, 0x700e,
"qla2x00_start_sp failed = %d\n", rval);
- qla2x00_rel_sp(vha, sp);
+ qla2x00_rel_sp(sp);
rval = -EIO;
goto done_unmap_sg;
}
@@ -542,7 +539,7 @@ qla2x00_process_ct(struct bsg_job *bsg_job)
if (rval != QLA_SUCCESS) {
ql_log(ql_log_warn, vha, 0x7017,
"qla2x00_start_sp failed=%d.\n", rval);
- qla2x00_rel_sp(vha, sp);
+ qla2x00_rel_sp(sp);
rval = -EIO;
goto done_free_fcport;
}
@@ -2578,6 +2575,6 @@ qla24xx_bsg_timeout(struct bsg_job *bsg_job)
done:
spin_unlock_irqrestore(&ha->hardware_lock, flags);
- sp->free(vha, sp);
+ sp->free(sp);
return 0;
}
diff --git a/drivers/scsi/qla2xxx/qla_def.h b/drivers/scsi/qla2xxx/qla_def.h
index 2f14adfab018..625d438e3cce 100644
--- a/drivers/scsi/qla2xxx/qla_def.h
+++ b/drivers/scsi/qla2xxx/qla_def.h
@@ -55,6 +55,8 @@
#include "qla_settings.h"
+#define MODE_DUAL (MODE_TARGET | MODE_INITIATOR)
+
/*
* Data bit definitions
*/
@@ -251,6 +253,14 @@
#define MAX_CMDSZ 16 /* SCSI maximum CDB size. */
#include "qla_fw.h"
+
+struct name_list_extended {
+ struct get_name_list_extended *l;
+ dma_addr_t ldma;
+ struct list_head fcports; /* protect by sess_list */
+ u32 size;
+ u8 sent;
+};
/*
* Timeout timer counts in seconds
*/
@@ -309,6 +319,17 @@ struct els_logo_payload {
uint8_t wwpn[WWN_SIZE];
};
+struct ct_arg {
+ void *iocb;
+ u16 nport_handle;
+ dma_addr_t req_dma;
+ dma_addr_t rsp_dma;
+ u32 req_size;
+ u32 rsp_size;
+ void *req;
+ void *rsp;
+};
+
/*
* SRB extensions.
*/
@@ -320,6 +341,7 @@ struct srb_iocb {
#define SRB_LOGIN_COND_PLOGI BIT_1
#define SRB_LOGIN_SKIP_PRLI BIT_2
uint16_t data[2];
+ u32 iop[2];
} logio;
struct {
#define ELS_DCMD_TIMEOUT 20
@@ -372,6 +394,16 @@ struct srb_iocb {
__le16 comp_status;
struct completion comp;
} abt;
+ struct ct_arg ctarg;
+ struct {
+ __le16 in_mb[28]; /* fr fw */
+ __le16 out_mb[28]; /* to fw */
+ void *out, *in;
+ dma_addr_t out_dma, in_dma;
+ } mbx;
+ struct {
+ struct imm_ntfy_from_isp *ntfy;
+ } nack;
} u;
struct timer_list timer;
@@ -392,23 +424,31 @@ struct srb_iocb {
#define SRB_FXIOCB_BCMD 11
#define SRB_ABT_CMD 12
#define SRB_ELS_DCMD 13
+#define SRB_MB_IOCB 14
+#define SRB_CT_PTHRU_CMD 15
+#define SRB_NACK_PLOGI 16
+#define SRB_NACK_PRLI 17
+#define SRB_NACK_LOGO 18
typedef struct srb {
atomic_t ref_count;
struct fc_port *fcport;
+ struct scsi_qla_host *vha;
uint32_t handle;
uint16_t flags;
uint16_t type;
char *name;
int iocbs;
struct qla_qpair *qpair;
+ u32 gen1; /* scratch */
+ u32 gen2; /* scratch */
union {
struct srb_iocb iocb_cmd;
struct bsg_job *bsg_job;
struct srb_cmd scmd;
} u;
- void (*done)(void *, void *, int);
- void (*free)(void *, void *);
+ void (*done)(void *, int);
+ void (*free)(void *);
} srb_t;
#define GET_CMD_SP(sp) (sp->u.scmd.cmd)
@@ -1794,6 +1834,7 @@ typedef struct {
#define SS_RESIDUAL_OVER BIT_10
#define SS_SENSE_LEN_VALID BIT_9
#define SS_RESPONSE_INFO_LEN_VALID BIT_8
+#define SS_SCSI_STATUS_BYTE 0xff
#define SS_RESERVE_CONFLICT (BIT_4 | BIT_3)
#define SS_BUSY_CONDITION BIT_3
@@ -1975,6 +2016,84 @@ struct mbx_entry {
uint8_t port_name[WWN_SIZE];
};
+#ifndef IMMED_NOTIFY_TYPE
+#define IMMED_NOTIFY_TYPE 0x0D /* Immediate notify entry. */
+/*
+ * ISP queue - immediate notify entry structure definition.
+ * This is sent by the ISP to the Target driver.
+ * This IOCB would have report of events sent by the
+ * initiator, that needs to be handled by the target
+ * driver immediately.
+ */
+struct imm_ntfy_from_isp {
+ uint8_t entry_type; /* Entry type. */
+ uint8_t entry_count; /* Entry count. */
+ uint8_t sys_define; /* System defined. */
+ uint8_t entry_status; /* Entry Status. */
+ union {
+ struct {
+ uint32_t sys_define_2; /* System defined. */
+ target_id_t target;
+ uint16_t lun;
+ uint8_t target_id;
+ uint8_t reserved_1;
+ uint16_t status_modifier;
+ uint16_t status;
+ uint16_t task_flags;
+ uint16_t seq_id;
+ uint16_t srr_rx_id;
+ uint32_t srr_rel_offs;
+ uint16_t srr_ui;
+#define SRR_IU_DATA_IN 0x1
+#define SRR_IU_DATA_OUT 0x5
+#define SRR_IU_STATUS 0x7
+ uint16_t srr_ox_id;
+ uint8_t reserved_2[28];
+ } isp2x;
+ struct {
+ uint32_t reserved;
+ uint16_t nport_handle;
+ uint16_t reserved_2;
+ uint16_t flags;
+#define NOTIFY24XX_FLAGS_GLOBAL_TPRLO BIT_1
+#define NOTIFY24XX_FLAGS_PUREX_IOCB BIT_0
+ uint16_t srr_rx_id;
+ uint16_t status;
+ uint8_t status_subcode;
+ uint8_t fw_handle;
+ uint32_t exchange_address;
+ uint32_t srr_rel_offs;
+ uint16_t srr_ui;
+ uint16_t srr_ox_id;
+ union {
+ struct {
+ uint8_t node_name[8];
+ } plogi; /* PLOGI/ADISC/PDISC */
+ struct {
+ /* PRLI word 3 bit 0-15 */
+ uint16_t wd3_lo;
+ uint8_t resv0[6];
+ } prli;
+ struct {
+ uint8_t port_id[3];
+ uint8_t resv1;
+ uint16_t nport_handle;
+ uint16_t resv2;
+ } req_els;
+ } u;
+ uint8_t port_name[8];
+ uint8_t resv3[3];
+ uint8_t vp_index;
+ uint32_t reserved_5;
+ uint8_t port_id[3];
+ uint8_t reserved_6;
+ } isp24;
+ } u;
+ uint16_t reserved_7;
+ uint16_t ox_id;
+} __packed;
+#endif
+
/*
* ISP request and response queue entry sizes
*/
@@ -2022,10 +2141,22 @@ typedef struct {
#define FC4_TYPE_OTHER 0x0
#define FC4_TYPE_UNKNOWN 0xff
+/* mailbox command 4G & above */
+struct mbx_24xx_entry {
+ uint8_t entry_type;
+ uint8_t entry_count;
+ uint8_t sys_define1;
+ uint8_t entry_status;
+ uint32_t handle;
+ uint16_t mb[28];
+};
+
+#define IOCB_SIZE 64
+
/*
* Fibre channel port type.
*/
- typedef enum {
+typedef enum {
FCT_UNKNOWN,
FCT_RSCN,
FCT_SWITCH,
@@ -2034,6 +2165,74 @@ typedef struct {
FCT_TARGET
} fc_port_type_t;
+enum qla_sess_deletion {
+ QLA_SESS_DELETION_NONE = 0,
+ QLA_SESS_DELETION_IN_PROGRESS,
+ QLA_SESS_DELETED,
+};
+
+enum qlt_plogi_link_t {
+ QLT_PLOGI_LINK_SAME_WWN,
+ QLT_PLOGI_LINK_CONFLICT,
+ QLT_PLOGI_LINK_MAX
+};
+
+struct qlt_plogi_ack_t {
+ struct list_head list;
+ struct imm_ntfy_from_isp iocb;
+ port_id_t id;
+ int ref_count;
+ void *fcport;
+};
+
+struct ct_sns_desc {
+ struct ct_sns_pkt *ct_sns;
+ dma_addr_t ct_sns_dma;
+};
+
+enum discovery_state {
+ DSC_DELETED,
+ DSC_GID_PN,
+ DSC_GNL,
+ DSC_LOGIN_PEND,
+ DSC_LOGIN_FAILED,
+ DSC_GPDB,
+ DSC_GPSC,
+ DSC_UPD_FCPORT,
+ DSC_LOGIN_COMPLETE,
+ DSC_DELETE_PEND,
+};
+
+enum login_state { /* FW control Target side */
+ DSC_LS_LLIOCB_SENT = 2,
+ DSC_LS_PLOGI_PEND,
+ DSC_LS_PLOGI_COMP,
+ DSC_LS_PRLI_PEND,
+ DSC_LS_PRLI_COMP,
+ DSC_LS_PORT_UNAVAIL,
+ DSC_LS_PRLO_PEND = 9,
+ DSC_LS_LOGO_PEND,
+};
+
+enum fcport_mgt_event {
+ FCME_RELOGIN = 1,
+ FCME_RSCN,
+ FCME_GIDPN_DONE,
+ FCME_PLOGI_DONE, /* Initiator side sent LLIOCB */
+ FCME_GNL_DONE,
+ FCME_GPSC_DONE,
+ FCME_GPDB_DONE,
+ FCME_GPNID_DONE,
+ FCME_DELETE_DONE,
+};
+
+enum rscn_addr_format {
+ RSCN_PORT_ADDR,
+ RSCN_AREA_ADDR,
+ RSCN_DOM_ADDR,
+ RSCN_FAB_ADDR,
+};
+
/*
* Fibre channel port structure.
*/
@@ -2047,6 +2246,29 @@ typedef struct fc_port {
uint16_t loop_id;
uint16_t old_loop_id;
+ unsigned int conf_compl_supported:1;
+ unsigned int deleted:2;
+ unsigned int local:1;
+ unsigned int logout_on_delete:1;
+ unsigned int logo_ack_needed:1;
+ unsigned int keep_nport_handle:1;
+ unsigned int send_els_logo:1;
+ unsigned int login_pause:1;
+ unsigned int login_succ:1;
+
+ struct fc_port *conflict;
+ unsigned char logout_completed;
+ int generation;
+
+ struct se_session *se_sess;
+ struct kref sess_kref;
+ struct qla_tgt *tgt;
+ unsigned long expires;
+ struct list_head del_list_entry;
+ struct work_struct free_work;
+
+ struct qlt_plogi_ack_t *plogi_link[QLT_PLOGI_LINK_MAX];
+
uint16_t tgt_id;
uint16_t old_tgt_id;
@@ -2075,8 +2297,30 @@ typedef struct fc_port {
unsigned long retry_delay_timestamp;
struct qla_tgt_sess *tgt_session;
+ struct ct_sns_desc ct_desc;
+ enum discovery_state disc_state;
+ enum login_state fw_login_state;
+ u32 login_gen, last_login_gen;
+ u32 rscn_gen, last_rscn_gen;
+ u32 chip_reset;
+ struct list_head gnl_entry;
+ struct work_struct del_work;
+ u8 iocb[IOCB_SIZE];
} fc_port_t;
+#define QLA_FCPORT_SCAN 1
+#define QLA_FCPORT_FOUND 2
+
+struct event_arg {
+ enum fcport_mgt_event event;
+ fc_port_t *fcport;
+ srb_t *sp;
+ port_id_t id;
+ u16 data[2], rc;
+ u8 port_name[WWN_SIZE];
+ u32 iop[2];
+};
+
#include "qla_mr.h"
/*
@@ -2154,6 +2398,10 @@ static const char * const port_state_str[] = {
#define GFT_ID_REQ_SIZE (16 + 4)
#define GFT_ID_RSP_SIZE (16 + 32)
+#define GID_PN_CMD 0x121
+#define GID_PN_REQ_SIZE (16 + 8)
+#define GID_PN_RSP_SIZE (16 + 4)
+
#define RFT_ID_CMD 0x217
#define RFT_ID_REQ_SIZE (16 + 4 + 32)
#define RFT_ID_RSP_SIZE 16
@@ -2479,6 +2727,10 @@ struct ct_sns_req {
uint8_t reserved;
uint8_t port_name[3];
} gff_id;
+
+ struct {
+ uint8_t port_name[8];
+ } gid_pn;
} req;
};
@@ -2558,6 +2810,10 @@ struct ct_sns_rsp {
struct {
uint8_t fc4_features[128];
} gff_id;
+ struct {
+ uint8_t reserved;
+ uint8_t port_id[3];
+ } gid_pn;
} rsp;
};
@@ -2699,11 +2955,11 @@ struct isp_operations {
uint16_t (*calc_req_entries) (uint16_t);
void (*build_iocbs) (srb_t *, cmd_entry_t *, uint16_t);
- void * (*prep_ms_iocb) (struct scsi_qla_host *, uint32_t, uint32_t);
- void * (*prep_ms_fdmi_iocb) (struct scsi_qla_host *, uint32_t,
+ void *(*prep_ms_iocb) (struct scsi_qla_host *, struct ct_arg *);
+ void *(*prep_ms_fdmi_iocb) (struct scsi_qla_host *, uint32_t,
uint32_t);
- uint8_t * (*read_nvram) (struct scsi_qla_host *, uint8_t *,
+ uint8_t *(*read_nvram) (struct scsi_qla_host *, uint8_t *,
uint32_t, uint32_t);
int (*write_nvram) (struct scsi_qla_host *, uint8_t *, uint32_t,
uint32_t);
@@ -2765,13 +3021,21 @@ enum qla_work_type {
QLA_EVT_AEN,
QLA_EVT_IDC_ACK,
QLA_EVT_ASYNC_LOGIN,
- QLA_EVT_ASYNC_LOGIN_DONE,
QLA_EVT_ASYNC_LOGOUT,
QLA_EVT_ASYNC_LOGOUT_DONE,
QLA_EVT_ASYNC_ADISC,
QLA_EVT_ASYNC_ADISC_DONE,
QLA_EVT_UEVENT,
QLA_EVT_AENFX,
+ QLA_EVT_GIDPN,
+ QLA_EVT_GPNID,
+ QLA_EVT_GPNID_DONE,
+ QLA_EVT_NEW_SESS,
+ QLA_EVT_GPDB,
+ QLA_EVT_GPSC,
+ QLA_EVT_UPD_FCPORT,
+ QLA_EVT_GNL,
+ QLA_EVT_NACK,
};
@@ -2807,6 +3071,23 @@ struct qla_work_evt {
struct {
srb_t *sp;
} iosb;
+ struct {
+ port_id_t id;
+ } gpnid;
+ struct {
+ port_id_t id;
+ u8 port_name[8];
+ void *pla;
+ } new_sess;
+ struct { /*Get PDB, Get Speed, update fcport, gnl, gidpn */
+ fc_port_t *fcport;
+ u8 opt;
+ } fcport;
+ struct {
+ fc_port_t *fcport;
+ u8 iocb[IOCB_SIZE];
+ int type;
+ } nack;
} u;
};
@@ -2943,6 +3224,7 @@ struct qla_qpair {
struct qla_hw_data *hw;
struct work_struct q_work;
struct list_head qp_list_elem; /* vha->qp_list */
+ struct scsi_qla_host *vha;
};
/* Place holder for FW buffer parameters */
@@ -2963,7 +3245,6 @@ struct qlt_hw_data {
/* Protected by hw lock */
uint32_t enable_class_2:1;
uint32_t enable_explicit_conf:1;
- uint32_t ini_mode_force_reverse:1;
uint32_t node_name_set:1;
dma_addr_t atio_dma; /* Physical address. */
@@ -3115,6 +3396,7 @@ struct qla_hw_data {
#define FLOGI_SP_SUPPORT BIT_13
uint8_t port_no; /* Physical port of adapter */
+ uint8_t exch_starvation;
/* Timeout timers. */
uint8_t loop_down_abort_time; /* port down timer */
@@ -3682,7 +3964,7 @@ typedef struct scsi_qla_host {
#define FCOE_CTX_RESET_NEEDED 18 /* Initiate FCoE context reset */
#define MPI_RESET_NEEDED 19 /* Initiate MPI FW reset */
#define ISP_QUIESCE_NEEDED 20 /* Driver need some quiescence */
-#define SCR_PENDING 21 /* SCR in target mode */
+#define FREE_BIT 21
#define PORT_UPDATE_NEEDED 22
#define FX00_RESET_RECOVERY 23
#define FX00_TARGET_SCAN 24
@@ -3736,7 +4018,9 @@ typedef struct scsi_qla_host {
/* list of commands waiting on workqueue */
struct list_head qla_cmd_list;
struct list_head qla_sess_op_cmd_list;
+ struct list_head unknown_atio_list;
spinlock_t cmd_list_lock;
+ struct delayed_work unknown_atio_work;
/* Counter to detect races between ELS and RSCN events */
atomic_t generation_tick;
@@ -3788,6 +4072,10 @@ typedef struct scsi_qla_host {
struct qla8044_reset_template reset_tmplt;
struct qla_tgt_counters tgt_counters;
uint16_t bbcr;
+ struct name_list_extended gnl;
+ /* Count of active session/fcport */
+ int fcport_count;
+ wait_queue_head_t fcport_waitQ;
} scsi_qla_host_t;
struct qla27xx_image_status {
diff --git a/drivers/scsi/qla2xxx/qla_dfs.c b/drivers/scsi/qla2xxx/qla_dfs.c
index 34272fde8a5b..b48cce696bac 100644
--- a/drivers/scsi/qla2xxx/qla_dfs.c
+++ b/drivers/scsi/qla2xxx/qla_dfs.c
@@ -18,7 +18,7 @@ qla2x00_dfs_tgt_sess_show(struct seq_file *s, void *unused)
scsi_qla_host_t *vha = s->private;
struct qla_hw_data *ha = vha->hw;
unsigned long flags;
- struct qla_tgt_sess *sess = NULL;
+ struct fc_port *sess = NULL;
struct qla_tgt *tgt= vha->vha_tgt.qla_tgt;
seq_printf(s, "%s\n",vha->host_str);
@@ -26,12 +26,11 @@ qla2x00_dfs_tgt_sess_show(struct seq_file *s, void *unused)
seq_printf(s, "Port ID Port Name Handle\n");
spin_lock_irqsave(&ha->tgt.sess_lock, flags);
- list_for_each_entry(sess, &tgt->sess_list, sess_list_entry) {
+ list_for_each_entry(sess, &vha->vp_fcports, list)
seq_printf(s, "%02x:%02x:%02x %8phC %d\n",
- sess->s_id.b.domain,sess->s_id.b.area,
- sess->s_id.b.al_pa, sess->port_name,
- sess->loop_id);
- }
+ sess->d_id.b.domain, sess->d_id.b.area,
+ sess->d_id.b.al_pa, sess->port_name,
+ sess->loop_id);
spin_unlock_irqrestore(&ha->tgt.sess_lock, flags);
}
diff --git a/drivers/scsi/qla2xxx/qla_fw.h b/drivers/scsi/qla2xxx/qla_fw.h
index 8a2368b32dec..1f808928763b 100644
--- a/drivers/scsi/qla2xxx/qla_fw.h
+++ b/drivers/scsi/qla2xxx/qla_fw.h
@@ -72,6 +72,37 @@ struct port_database_24xx {
uint8_t reserved_3[24];
};
+/*
+ * MB 75h returns a list of DB entries similar to port_database_24xx(64B).
+ * However, in this case it returns 1st 40 bytes.
+ */
+struct get_name_list_extended {
+ __le16 flags;
+ u8 current_login_state;
+ u8 last_login_state;
+ u8 hard_address[3];
+ u8 reserved_1;
+ u8 port_id[3];
+ u8 sequence_id;
+ __le16 port_timer;
+ __le16 nport_handle; /* N_PORT handle. */
+ __le16 receive_data_size;
+ __le16 reserved_2;
+
+ /* PRLI SVC Param are Big endian */
+ u8 prli_svc_param_word_0[2]; /* Bits 15-0 of word 0 */
+ u8 prli_svc_param_word_3[2]; /* Bits 15-0 of word 3 */
+ u8 port_name[WWN_SIZE];
+ u8 node_name[WWN_SIZE];
+};
+
+/* MB 75h: This is the short version of the database */
+struct get_name_list {
+ u8 port_node_name[WWN_SIZE]; /* B7 most sig, B0 least sig */
+ __le16 nport_handle;
+ u8 reserved;
+};
+
struct vp_database_24xx {
uint16_t vp_status;
uint8_t options;
@@ -1270,27 +1301,76 @@ struct vp_config_entry_24xx {
};
#define VP_RPT_ID_IOCB_TYPE 0x32 /* Report ID Acquisition entry. */
+enum VP_STATUS {
+ VP_STAT_COMPL,
+ VP_STAT_FAIL,
+ VP_STAT_ID_CHG,
+ VP_STAT_SNS_TO, /* timeout */
+ VP_STAT_SNS_RJT,
+ VP_STAT_SCR_TO, /* timeout */
+ VP_STAT_SCR_RJT,
+};
+
+enum VP_FLAGS {
+ VP_FLAGS_CON_FLOOP = 1,
+ VP_FLAGS_CON_P2P = 2,
+ VP_FLAGS_CON_FABRIC = 3,
+ VP_FLAGS_NAME_VALID = BIT_5,
+};
+
struct vp_rpt_id_entry_24xx {
uint8_t entry_type; /* Entry type. */
uint8_t entry_count; /* Entry count. */
uint8_t sys_define; /* System defined. */
uint8_t entry_status; /* Entry Status. */
-
- uint32_t handle; /* System handle. */
-
- uint16_t vp_count; /* Format 0 -- | VP setup | VP acq |. */
- /* Format 1 -- | VP count |. */
- uint16_t vp_idx; /* Format 0 -- Reserved. */
- /* Format 1 -- VP status and index. */
+ uint32_t resv1;
+ uint8_t vp_acquired;
+ uint8_t vp_setup;
+ uint8_t vp_idx; /* Format 0=reserved */
+ uint8_t vp_status; /* Format 0=reserved */
uint8_t port_id[3];
uint8_t format;
-
- uint8_t vp_idx_map[16];
-
- uint8_t reserved_4[24];
- uint16_t bbcr;
- uint8_t reserved_5[6];
+ union {
+ struct {
+ /* format 0 loop */
+ uint8_t vp_idx_map[16];
+ uint8_t reserved_4[32];
+ } f0;
+ struct {
+ /* format 1 fabric */
+ uint8_t vpstat1_subcode; /* vp_status=1 subcode */
+ uint8_t flags;
+ uint16_t fip_flags;
+ uint8_t rsv2[12];
+
+ uint8_t ls_rjt_vendor;
+ uint8_t ls_rjt_explanation;
+ uint8_t ls_rjt_reason;
+ uint8_t rsv3[5];
+
+ uint8_t port_name[8];
+ uint8_t node_name[8];
+ uint16_t bbcr;
+ uint8_t reserved_5[6];
+ } f1;
+ struct { /* format 2: N2N direct connect */
+ uint8_t vpstat1_subcode;
+ uint8_t flags;
+ uint16_t rsv6;
+ uint8_t rsv2[12];
+
+ uint8_t ls_rjt_vendor;
+ uint8_t ls_rjt_explanation;
+ uint8_t ls_rjt_reason;
+ uint8_t rsv3[5];
+
+ uint8_t port_name[8];
+ uint8_t node_name[8];
+ uint32_t remote_nport_id;
+ uint32_t reserved_5;
+ } f2;
+ } u;
};
#define VF_EVFP_IOCB_TYPE 0x26 /* Exchange Virtual Fabric Parameters entry. */
diff --git a/drivers/scsi/qla2xxx/qla_gbl.h b/drivers/scsi/qla2xxx/qla_gbl.h
index afa0116a163b..b3d6441d1d90 100644
--- a/drivers/scsi/qla2xxx/qla_gbl.h
+++ b/drivers/scsi/qla2xxx/qla_gbl.h
@@ -73,6 +73,10 @@ extern void qla2x00_async_logout_done(struct scsi_qla_host *, fc_port_t *,
uint16_t *);
extern void qla2x00_async_adisc_done(struct scsi_qla_host *, fc_port_t *,
uint16_t *);
+struct qla_work_evt *qla2x00_alloc_work(struct scsi_qla_host *,
+ enum qla_work_type);
+extern int qla24xx_async_gnl(struct scsi_qla_host *, fc_port_t *);
+int qla2x00_post_work(struct scsi_qla_host *vha, struct qla_work_evt *e);
extern void *qla2x00_alloc_iocbs(struct scsi_qla_host *, srb_t *);
extern void *qla2x00_alloc_iocbs_ready(struct scsi_qla_host *, srb_t *);
extern int qla24xx_update_fcport_fcp_prio(scsi_qla_host_t *, fc_port_t *);
@@ -94,6 +98,13 @@ extern uint8_t qla27xx_find_valid_image(struct scsi_qla_host *);
extern struct qla_qpair *qla2xxx_create_qpair(struct scsi_qla_host *,
int, int);
extern int qla2xxx_delete_qpair(struct scsi_qla_host *, struct qla_qpair *);
+void qla2x00_fcport_event_handler(scsi_qla_host_t *, struct event_arg *);
+int qla24xx_async_gpdb(struct scsi_qla_host *, fc_port_t *, u8);
+int qla24xx_async_notify_ack(scsi_qla_host_t *, fc_port_t *,
+ struct imm_ntfy_from_isp *, int);
+int qla24xx_post_newsess_work(struct scsi_qla_host *, port_id_t *, u8 *,
+ void *);
+int qla24xx_fcport_handle_login(struct scsi_qla_host *, fc_port_t *);
/*
* Global Data in qla_os.c source file.
@@ -127,6 +138,7 @@ extern int ql2xmdenable;
extern int ql2xexlogins;
extern int ql2xexchoffld;
extern int ql2xfwholdabts;
+extern int ql2xmvasynctoatio;
extern int qla2x00_loop_reset(scsi_qla_host_t *);
extern void qla2x00_abort_all_cmds(scsi_qla_host_t *, int);
@@ -135,8 +147,6 @@ extern int qla2x00_post_aen_work(struct scsi_qla_host *, enum
extern int qla2x00_post_idc_ack_work(struct scsi_qla_host *, uint16_t *);
extern int qla2x00_post_async_login_work(struct scsi_qla_host *, fc_port_t *,
uint16_t *);
-extern int qla2x00_post_async_login_done_work(struct scsi_qla_host *,
- fc_port_t *, uint16_t *);
extern int qla2x00_post_async_logout_work(struct scsi_qla_host *, fc_port_t *,
uint16_t *);
extern int qla2x00_post_async_logout_done_work(struct scsi_qla_host *,
@@ -176,9 +186,13 @@ extern int qla2x00_post_uevent_work(struct scsi_qla_host *, u32);
extern int qla2x00_post_uevent_work(struct scsi_qla_host *, u32);
extern void qla2x00_disable_board_on_pci_error(struct work_struct *);
-extern void qla2x00_sp_compl(void *, void *, int);
-extern void qla2xxx_qpair_sp_free_dma(void *, void *);
-extern void qla2xxx_qpair_sp_compl(void *, void *, int);
+extern void qla2x00_sp_compl(void *, int);
+extern void qla2xxx_qpair_sp_free_dma(void *);
+extern void qla2xxx_qpair_sp_compl(void *, int);
+extern int qla24xx_post_upd_fcport_work(struct scsi_qla_host *, fc_port_t *);
+void qla2x00_handle_login_done_event(struct scsi_qla_host *, fc_port_t *,
+ uint16_t *);
+int qla24xx_post_gnl_work(struct scsi_qla_host *, fc_port_t *);
/*
* Global Functions in qla_mid.c source file.
@@ -201,7 +215,7 @@ extern void qla2x00_do_dpc_all_vps(scsi_qla_host_t *);
extern int qla24xx_vport_create_req_sanity_check(struct fc_vport *);
extern scsi_qla_host_t * qla24xx_create_vhost(struct fc_vport *);
-extern void qla2x00_sp_free_dma(void *, void *);
+extern void qla2x00_sp_free_dma(void *);
extern char *qla2x00_get_fw_version_str(struct scsi_qla_host *, char *);
extern void qla2x00_mark_device_lost(scsi_qla_host_t *, fc_port_t *, int, int);
@@ -302,9 +316,6 @@ extern int
qla2x00_init_firmware(scsi_qla_host_t *, uint16_t);
extern int
-qla2x00_get_node_name_list(scsi_qla_host_t *, void **, int *);
-
-extern int
qla2x00_get_port_database(scsi_qla_host_t *, fc_port_t *, uint8_t);
extern int
@@ -483,6 +494,9 @@ qla2x00_process_completed_request(struct scsi_qla_host *, struct req_que *,
uint32_t);
extern irqreturn_t
qla2xxx_msix_rsp_q(int irq, void *dev_id);
+fc_port_t *qla2x00_find_fcport_by_loopid(scsi_qla_host_t *, uint16_t);
+fc_port_t *qla2x00_find_fcport_by_wwpn(scsi_qla_host_t *, u8 *, u8);
+fc_port_t *qla2x00_find_fcport_by_nportid(scsi_qla_host_t *, port_id_t *, u8);
/*
* Global Function Prototypes in qla_sup.c source file.
@@ -574,8 +588,8 @@ extern void qla2xxx_dump_post_process(scsi_qla_host_t *, int);
/*
* Global Function Prototypes in qla_gs.c source file.
*/
-extern void *qla2x00_prep_ms_iocb(scsi_qla_host_t *, uint32_t, uint32_t);
-extern void *qla24xx_prep_ms_iocb(scsi_qla_host_t *, uint32_t, uint32_t);
+extern void *qla2x00_prep_ms_iocb(scsi_qla_host_t *, struct ct_arg *);
+extern void *qla24xx_prep_ms_iocb(scsi_qla_host_t *, struct ct_arg *);
extern int qla2x00_ga_nxt(scsi_qla_host_t *, fc_port_t *);
extern int qla2x00_gid_pt(scsi_qla_host_t *, sw_info_t *);
extern int qla2x00_gpn_id(scsi_qla_host_t *, sw_info_t *);
@@ -591,6 +605,23 @@ extern int qla2x00_fdmi_register(scsi_qla_host_t *);
extern int qla2x00_gfpn_id(scsi_qla_host_t *, sw_info_t *);
extern int qla2x00_gpsc(scsi_qla_host_t *, sw_info_t *);
extern void qla2x00_get_sym_node_name(scsi_qla_host_t *, uint8_t *, size_t);
+extern int qla2x00_chk_ms_status(scsi_qla_host_t *, ms_iocb_entry_t *,
+ struct ct_sns_rsp *, const char *);
+extern void qla2x00_async_iocb_timeout(void *data);
+extern int qla24xx_async_gidpn(scsi_qla_host_t *, fc_port_t *);
+int qla24xx_post_gidpn_work(struct scsi_qla_host *, fc_port_t *);
+void qla24xx_handle_gidpn_event(scsi_qla_host_t *, struct event_arg *);
+
+extern void qla2x00_free_fcport(fc_port_t *);
+
+extern int qla24xx_post_gpnid_work(struct scsi_qla_host *, port_id_t *);
+extern int qla24xx_async_gpnid(scsi_qla_host_t *, port_id_t *);
+void qla24xx_async_gpnid_done(scsi_qla_host_t *, srb_t*);
+void qla24xx_handle_gpnid_event(scsi_qla_host_t *, struct event_arg *);
+
+int qla24xx_post_gpsc_work(struct scsi_qla_host *, fc_port_t *);
+int qla24xx_async_gpsc(scsi_qla_host_t *, fc_port_t *);
+int qla2x00_mgmt_svr_login(scsi_qla_host_t *);
/*
* Global Function Prototypes in qla_attr.c source file.
@@ -702,10 +733,10 @@ extern int qla82xx_restart_isp(scsi_qla_host_t *);
/* IOCB related functions */
extern int qla82xx_start_scsi(srb_t *);
-extern void qla2x00_sp_free(void *, void *);
+extern void qla2x00_sp_free(void *);
extern void qla2x00_sp_timeout(unsigned long);
-extern void qla2x00_bsg_job_done(void *, void *, int);
-extern void qla2x00_bsg_sp_free(void *, void *);
+extern void qla2x00_bsg_job_done(void *, int);
+extern void qla2x00_bsg_sp_free(void *);
extern void qla2x00_start_iocbs(struct scsi_qla_host *, struct req_que *);
/* Interrupt related */
@@ -803,4 +834,17 @@ extern int qla_get_exchoffld_status(scsi_qla_host_t *, uint16_t *, uint16_t *);
extern int qla_set_exchoffld_mem_cfg(scsi_qla_host_t *, dma_addr_t);
extern void qlt_handle_abts_recv(struct scsi_qla_host *, response_t *);
+int qla24xx_async_notify_ack(scsi_qla_host_t *, fc_port_t *,
+ struct imm_ntfy_from_isp *, int);
+void qla24xx_do_nack_work(struct scsi_qla_host *, struct qla_work_evt *);
+void qlt_plogi_ack_link(struct scsi_qla_host *, struct qlt_plogi_ack_t *,
+ struct fc_port *, enum qlt_plogi_link_t);
+void qlt_plogi_ack_unref(struct scsi_qla_host *, struct qlt_plogi_ack_t *);
+extern void qlt_schedule_sess_for_deletion(struct fc_port *, bool);
+extern void qlt_schedule_sess_for_deletion_lock(struct fc_port *);
+extern struct fc_port *qlt_find_sess_invalidate_other(scsi_qla_host_t *,
+ uint64_t wwn, port_id_t port_id, uint16_t loop_id, struct fc_port **);
+void qla24xx_delete_sess_fn(struct work_struct *);
+void qlt_unknown_atio_work_fn(struct work_struct *);
+
#endif /* _QLA_GBL_H */
diff --git a/drivers/scsi/qla2xxx/qla_gs.c b/drivers/scsi/qla2xxx/qla_gs.c
index ee3df8794806..ab0f873fd6a1 100644
--- a/drivers/scsi/qla2xxx/qla_gs.c
+++ b/drivers/scsi/qla2xxx/qla_gs.c
@@ -24,12 +24,12 @@ static int qla2x00_sns_rnn_id(scsi_qla_host_t *);
* Returns a pointer to the @ha's ms_iocb.
*/
void *
-qla2x00_prep_ms_iocb(scsi_qla_host_t *vha, uint32_t req_size, uint32_t rsp_size)
+qla2x00_prep_ms_iocb(scsi_qla_host_t *vha, struct ct_arg *arg)
{
struct qla_hw_data *ha = vha->hw;
ms_iocb_entry_t *ms_pkt;
- ms_pkt = ha->ms_iocb;
+ ms_pkt = (ms_iocb_entry_t *)arg->iocb;
memset(ms_pkt, 0, sizeof(ms_iocb_entry_t));
ms_pkt->entry_type = MS_IOCB_TYPE;
@@ -39,15 +39,15 @@ qla2x00_prep_ms_iocb(scsi_qla_host_t *vha, uint32_t req_size, uint32_t rsp_size)
ms_pkt->timeout = cpu_to_le16(ha->r_a_tov / 10 * 2);
ms_pkt->cmd_dsd_count = cpu_to_le16(1);
ms_pkt->total_dsd_count = cpu_to_le16(2);
- ms_pkt->rsp_bytecount = cpu_to_le32(rsp_size);
- ms_pkt->req_bytecount = cpu_to_le32(req_size);
+ ms_pkt->rsp_bytecount = cpu_to_le32(arg->rsp_size);
+ ms_pkt->req_bytecount = cpu_to_le32(arg->req_size);
- ms_pkt->dseg_req_address[0] = cpu_to_le32(LSD(ha->ct_sns_dma));
- ms_pkt->dseg_req_address[1] = cpu_to_le32(MSD(ha->ct_sns_dma));
+ ms_pkt->dseg_req_address[0] = cpu_to_le32(LSD(arg->req_dma));
+ ms_pkt->dseg_req_address[1] = cpu_to_le32(MSD(arg->req_dma));
ms_pkt->dseg_req_length = ms_pkt->req_bytecount;
- ms_pkt->dseg_rsp_address[0] = cpu_to_le32(LSD(ha->ct_sns_dma));
- ms_pkt->dseg_rsp_address[1] = cpu_to_le32(MSD(ha->ct_sns_dma));
+ ms_pkt->dseg_rsp_address[0] = cpu_to_le32(LSD(arg->rsp_dma));
+ ms_pkt->dseg_rsp_address[1] = cpu_to_le32(MSD(arg->rsp_dma));
ms_pkt->dseg_rsp_length = ms_pkt->rsp_bytecount;
vha->qla_stats.control_requests++;
@@ -64,29 +64,29 @@ qla2x00_prep_ms_iocb(scsi_qla_host_t *vha, uint32_t req_size, uint32_t rsp_size)
* Returns a pointer to the @ha's ms_iocb.
*/
void *
-qla24xx_prep_ms_iocb(scsi_qla_host_t *vha, uint32_t req_size, uint32_t rsp_size)
+qla24xx_prep_ms_iocb(scsi_qla_host_t *vha, struct ct_arg *arg)
{
struct qla_hw_data *ha = vha->hw;
struct ct_entry_24xx *ct_pkt;
- ct_pkt = (struct ct_entry_24xx *)ha->ms_iocb;
+ ct_pkt = (struct ct_entry_24xx *)arg->iocb;
memset(ct_pkt, 0, sizeof(struct ct_entry_24xx));
ct_pkt->entry_type = CT_IOCB_TYPE;
ct_pkt->entry_count = 1;
- ct_pkt->nport_handle = cpu_to_le16(NPH_SNS);
+ ct_pkt->nport_handle = cpu_to_le16(arg->nport_handle);
ct_pkt->timeout = cpu_to_le16(ha->r_a_tov / 10 * 2);
ct_pkt->cmd_dsd_count = cpu_to_le16(1);
ct_pkt->rsp_dsd_count = cpu_to_le16(1);
- ct_pkt->rsp_byte_count = cpu_to_le32(rsp_size);
- ct_pkt->cmd_byte_count = cpu_to_le32(req_size);
+ ct_pkt->rsp_byte_count = cpu_to_le32(arg->rsp_size);
+ ct_pkt->cmd_byte_count = cpu_to_le32(arg->req_size);
- ct_pkt->dseg_0_address[0] = cpu_to_le32(LSD(ha->ct_sns_dma));
- ct_pkt->dseg_0_address[1] = cpu_to_le32(MSD(ha->ct_sns_dma));
+ ct_pkt->dseg_0_address[0] = cpu_to_le32(LSD(arg->req_dma));
+ ct_pkt->dseg_0_address[1] = cpu_to_le32(MSD(arg->req_dma));
ct_pkt->dseg_0_len = ct_pkt->cmd_byte_count;
- ct_pkt->dseg_1_address[0] = cpu_to_le32(LSD(ha->ct_sns_dma));
- ct_pkt->dseg_1_address[1] = cpu_to_le32(MSD(ha->ct_sns_dma));
+ ct_pkt->dseg_1_address[0] = cpu_to_le32(LSD(arg->rsp_dma));
+ ct_pkt->dseg_1_address[1] = cpu_to_le32(MSD(arg->rsp_dma));
ct_pkt->dseg_1_len = ct_pkt->rsp_byte_count;
ct_pkt->vp_index = vha->vp_idx;
@@ -117,7 +117,7 @@ qla2x00_prep_ct_req(struct ct_sns_pkt *p, uint16_t cmd, uint16_t rsp_size)
return &p->p.req;
}
-static int
+int
qla2x00_chk_ms_status(scsi_qla_host_t *vha, ms_iocb_entry_t *ms_pkt,
struct ct_sns_rsp *ct_rsp, const char *routine)
{
@@ -183,14 +183,21 @@ qla2x00_ga_nxt(scsi_qla_host_t *vha, fc_port_t *fcport)
struct ct_sns_req *ct_req;
struct ct_sns_rsp *ct_rsp;
struct qla_hw_data *ha = vha->hw;
+ struct ct_arg arg;
if (IS_QLA2100(ha) || IS_QLA2200(ha))
return qla2x00_sns_ga_nxt(vha, fcport);
+ arg.iocb = ha->ms_iocb;
+ arg.req_dma = ha->ct_sns_dma;
+ arg.rsp_dma = ha->ct_sns_dma;
+ arg.req_size = GA_NXT_REQ_SIZE;
+ arg.rsp_size = GA_NXT_RSP_SIZE;
+ arg.nport_handle = NPH_SNS;
+
/* Issue GA_NXT */
/* Prepare common MS IOCB */
- ms_pkt = ha->isp_ops->prep_ms_iocb(vha, GA_NXT_REQ_SIZE,
- GA_NXT_RSP_SIZE);
+ ms_pkt = ha->isp_ops->prep_ms_iocb(vha, &arg);
/* Prepare CT request */
ct_req = qla2x00_prep_ct_req(ha->ct_sns, GA_NXT_CMD,
@@ -269,16 +276,24 @@ qla2x00_gid_pt(scsi_qla_host_t *vha, sw_info_t *list)
struct ct_sns_gid_pt_data *gid_data;
struct qla_hw_data *ha = vha->hw;
uint16_t gid_pt_rsp_size;
+ struct ct_arg arg;
if (IS_QLA2100(ha) || IS_QLA2200(ha))
return qla2x00_sns_gid_pt(vha, list);
gid_data = NULL;
gid_pt_rsp_size = qla2x00_gid_pt_rsp_size(vha);
+
+ arg.iocb = ha->ms_iocb;
+ arg.req_dma = ha->ct_sns_dma;
+ arg.rsp_dma = ha->ct_sns_dma;
+ arg.req_size = GID_PT_REQ_SIZE;
+ arg.rsp_size = gid_pt_rsp_size;
+ arg.nport_handle = NPH_SNS;
+
/* Issue GID_PT */
/* Prepare common MS IOCB */
- ms_pkt = ha->isp_ops->prep_ms_iocb(vha, GID_PT_REQ_SIZE,
- gid_pt_rsp_size);
+ ms_pkt = ha->isp_ops->prep_ms_iocb(vha, &arg);
/* Prepare CT request */
ct_req = qla2x00_prep_ct_req(ha->ct_sns, GID_PT_CMD, gid_pt_rsp_size);
@@ -344,15 +359,22 @@ qla2x00_gpn_id(scsi_qla_host_t *vha, sw_info_t *list)
struct ct_sns_req *ct_req;
struct ct_sns_rsp *ct_rsp;
struct qla_hw_data *ha = vha->hw;
+ struct ct_arg arg;
if (IS_QLA2100(ha) || IS_QLA2200(ha))
return qla2x00_sns_gpn_id(vha, list);
+ arg.iocb = ha->ms_iocb;
+ arg.req_dma = ha->ct_sns_dma;
+ arg.rsp_dma = ha->ct_sns_dma;
+ arg.req_size = GPN_ID_REQ_SIZE;
+ arg.rsp_size = GPN_ID_RSP_SIZE;
+ arg.nport_handle = NPH_SNS;
+
for (i = 0; i < ha->max_fibre_devices; i++) {
/* Issue GPN_ID */
/* Prepare common MS IOCB */
- ms_pkt = ha->isp_ops->prep_ms_iocb(vha, GPN_ID_REQ_SIZE,
- GPN_ID_RSP_SIZE);
+ ms_pkt = ha->isp_ops->prep_ms_iocb(vha, &arg);
/* Prepare CT request */
ct_req = qla2x00_prep_ct_req(ha->ct_sns, GPN_ID_CMD,
@@ -406,15 +428,22 @@ qla2x00_gnn_id(scsi_qla_host_t *vha, sw_info_t *list)
ms_iocb_entry_t *ms_pkt;
struct ct_sns_req *ct_req;
struct ct_sns_rsp *ct_rsp;
+ struct ct_arg arg;
if (IS_QLA2100(ha) || IS_QLA2200(ha))
return qla2x00_sns_gnn_id(vha, list);
+ arg.iocb = ha->ms_iocb;
+ arg.req_dma = ha->ct_sns_dma;
+ arg.rsp_dma = ha->ct_sns_dma;
+ arg.req_size = GNN_ID_REQ_SIZE;
+ arg.rsp_size = GNN_ID_RSP_SIZE;
+ arg.nport_handle = NPH_SNS;
+
for (i = 0; i < ha->max_fibre_devices; i++) {
/* Issue GNN_ID */
/* Prepare common MS IOCB */
- ms_pkt = ha->isp_ops->prep_ms_iocb(vha, GNN_ID_REQ_SIZE,
- GNN_ID_RSP_SIZE);
+ ms_pkt = ha->isp_ops->prep_ms_iocb(vha, &arg);
/* Prepare CT request */
ct_req = qla2x00_prep_ct_req(ha->ct_sns, GNN_ID_CMD,
@@ -473,14 +502,21 @@ qla2x00_rft_id(scsi_qla_host_t *vha)
ms_iocb_entry_t *ms_pkt;
struct ct_sns_req *ct_req;
struct ct_sns_rsp *ct_rsp;
+ struct ct_arg arg;
if (IS_QLA2100(ha) || IS_QLA2200(ha))
return qla2x00_sns_rft_id(vha);
+ arg.iocb = ha->ms_iocb;
+ arg.req_dma = ha->ct_sns_dma;
+ arg.rsp_dma = ha->ct_sns_dma;
+ arg.req_size = RFT_ID_REQ_SIZE;
+ arg.rsp_size = RFT_ID_RSP_SIZE;
+ arg.nport_handle = NPH_SNS;
+
/* Issue RFT_ID */
/* Prepare common MS IOCB */
- ms_pkt = ha->isp_ops->prep_ms_iocb(vha, RFT_ID_REQ_SIZE,
- RFT_ID_RSP_SIZE);
+ ms_pkt = ha->isp_ops->prep_ms_iocb(vha, &arg);
/* Prepare CT request */
ct_req = qla2x00_prep_ct_req(ha->ct_sns, RFT_ID_CMD,
@@ -526,6 +562,7 @@ qla2x00_rff_id(scsi_qla_host_t *vha)
ms_iocb_entry_t *ms_pkt;
struct ct_sns_req *ct_req;
struct ct_sns_rsp *ct_rsp;
+ struct ct_arg arg;
if (IS_QLA2100(ha) || IS_QLA2200(ha)) {
ql_dbg(ql_dbg_disc, vha, 0x2046,
@@ -533,10 +570,16 @@ qla2x00_rff_id(scsi_qla_host_t *vha)
return (QLA_SUCCESS);
}
+ arg.iocb = ha->ms_iocb;
+ arg.req_dma = ha->ct_sns_dma;
+ arg.rsp_dma = ha->ct_sns_dma;
+ arg.req_size = RFF_ID_REQ_SIZE;
+ arg.rsp_size = RFF_ID_RSP_SIZE;
+ arg.nport_handle = NPH_SNS;
+
/* Issue RFF_ID */
/* Prepare common MS IOCB */
- ms_pkt = ha->isp_ops->prep_ms_iocb(vha, RFF_ID_REQ_SIZE,
- RFF_ID_RSP_SIZE);
+ ms_pkt = ha->isp_ops->prep_ms_iocb(vha, &arg);
/* Prepare CT request */
ct_req = qla2x00_prep_ct_req(ha->ct_sns, RFF_ID_CMD,
@@ -584,14 +627,21 @@ qla2x00_rnn_id(scsi_qla_host_t *vha)
ms_iocb_entry_t *ms_pkt;
struct ct_sns_req *ct_req;
struct ct_sns_rsp *ct_rsp;
+ struct ct_arg arg;
if (IS_QLA2100(ha) || IS_QLA2200(ha))
return qla2x00_sns_rnn_id(vha);
+ arg.iocb = ha->ms_iocb;
+ arg.req_dma = ha->ct_sns_dma;
+ arg.rsp_dma = ha->ct_sns_dma;
+ arg.req_size = RNN_ID_REQ_SIZE;
+ arg.rsp_size = RNN_ID_RSP_SIZE;
+ arg.nport_handle = NPH_SNS;
+
/* Issue RNN_ID */
/* Prepare common MS IOCB */
- ms_pkt = ha->isp_ops->prep_ms_iocb(vha, RNN_ID_REQ_SIZE,
- RNN_ID_RSP_SIZE);
+ ms_pkt = ha->isp_ops->prep_ms_iocb(vha, &arg);
/* Prepare CT request */
ct_req = qla2x00_prep_ct_req(ha->ct_sns, RNN_ID_CMD, RNN_ID_RSP_SIZE);
@@ -651,6 +701,7 @@ qla2x00_rsnn_nn(scsi_qla_host_t *vha)
ms_iocb_entry_t *ms_pkt;
struct ct_sns_req *ct_req;
struct ct_sns_rsp *ct_rsp;
+ struct ct_arg arg;
if (IS_QLA2100(ha) || IS_QLA2200(ha)) {
ql_dbg(ql_dbg_disc, vha, 0x2050,
@@ -658,10 +709,17 @@ qla2x00_rsnn_nn(scsi_qla_host_t *vha)
return (QLA_SUCCESS);
}
+ arg.iocb = ha->ms_iocb;
+ arg.req_dma = ha->ct_sns_dma;
+ arg.rsp_dma = ha->ct_sns_dma;
+ arg.req_size = 0;
+ arg.rsp_size = RSNN_NN_RSP_SIZE;
+ arg.nport_handle = NPH_SNS;
+
/* Issue RSNN_NN */
/* Prepare common MS IOCB */
/* Request size adjusted after CT preparation */
- ms_pkt = ha->isp_ops->prep_ms_iocb(vha, 0, RSNN_NN_RSP_SIZE);
+ ms_pkt = ha->isp_ops->prep_ms_iocb(vha, &arg);
/* Prepare CT request */
ct_req = qla2x00_prep_ct_req(ha->ct_sns, RSNN_NN_CMD,
@@ -1103,7 +1161,7 @@ qla2x00_sns_rnn_id(scsi_qla_host_t *vha)
*
* Returns 0 on success.
*/
-static int
+int
qla2x00_mgmt_svr_login(scsi_qla_host_t *vha)
{
int ret, rval;
@@ -2425,15 +2483,22 @@ qla2x00_gfpn_id(scsi_qla_host_t *vha, sw_info_t *list)
ms_iocb_entry_t *ms_pkt;
struct ct_sns_req *ct_req;
struct ct_sns_rsp *ct_rsp;
+ struct ct_arg arg;
if (!IS_IIDMA_CAPABLE(ha))
return QLA_FUNCTION_FAILED;
+ arg.iocb = ha->ms_iocb;
+ arg.req_dma = ha->ct_sns_dma;
+ arg.rsp_dma = ha->ct_sns_dma;
+ arg.req_size = GFPN_ID_REQ_SIZE;
+ arg.rsp_size = GFPN_ID_RSP_SIZE;
+ arg.nport_handle = NPH_SNS;
+
for (i = 0; i < ha->max_fibre_devices; i++) {
/* Issue GFPN_ID */
/* Prepare common MS IOCB */
- ms_pkt = ha->isp_ops->prep_ms_iocb(vha, GFPN_ID_REQ_SIZE,
- GFPN_ID_RSP_SIZE);
+ ms_pkt = ha->isp_ops->prep_ms_iocb(vha, &arg);
/* Prepare CT request */
ct_req = qla2x00_prep_ct_req(ha->ct_sns, GFPN_ID_CMD,
@@ -2471,36 +2536,6 @@ qla2x00_gfpn_id(scsi_qla_host_t *vha, sw_info_t *list)
return (rval);
}
-static inline void *
-qla24xx_prep_ms_fm_iocb(scsi_qla_host_t *vha, uint32_t req_size,
- uint32_t rsp_size)
-{
- struct ct_entry_24xx *ct_pkt;
- struct qla_hw_data *ha = vha->hw;
- ct_pkt = (struct ct_entry_24xx *)ha->ms_iocb;
- memset(ct_pkt, 0, sizeof(struct ct_entry_24xx));
-
- ct_pkt->entry_type = CT_IOCB_TYPE;
- ct_pkt->entry_count = 1;
- ct_pkt->nport_handle = cpu_to_le16(vha->mgmt_svr_loop_id);
- ct_pkt->timeout = cpu_to_le16(ha->r_a_tov / 10 * 2);
- ct_pkt->cmd_dsd_count = cpu_to_le16(1);
- ct_pkt->rsp_dsd_count = cpu_to_le16(1);
- ct_pkt->rsp_byte_count = cpu_to_le32(rsp_size);
- ct_pkt->cmd_byte_count = cpu_to_le32(req_size);
-
- ct_pkt->dseg_0_address[0] = cpu_to_le32(LSD(ha->ct_sns_dma));
- ct_pkt->dseg_0_address[1] = cpu_to_le32(MSD(ha->ct_sns_dma));
- ct_pkt->dseg_0_len = ct_pkt->cmd_byte_count;
-
- ct_pkt->dseg_1_address[0] = cpu_to_le32(LSD(ha->ct_sns_dma));
- ct_pkt->dseg_1_address[1] = cpu_to_le32(MSD(ha->ct_sns_dma));
- ct_pkt->dseg_1_len = ct_pkt->rsp_byte_count;
- ct_pkt->vp_index = vha->vp_idx;
-
- return ct_pkt;
-}
-
static inline struct ct_sns_req *
qla24xx_prep_ct_fm_req(struct ct_sns_pkt *p, uint16_t cmd,
@@ -2530,9 +2565,10 @@ qla2x00_gpsc(scsi_qla_host_t *vha, sw_info_t *list)
int rval;
uint16_t i;
struct qla_hw_data *ha = vha->hw;
- ms_iocb_entry_t *ms_pkt;
+ ms_iocb_entry_t *ms_pkt;
struct ct_sns_req *ct_req;
struct ct_sns_rsp *ct_rsp;
+ struct ct_arg arg;
if (!IS_IIDMA_CAPABLE(ha))
return QLA_FUNCTION_FAILED;
@@ -2543,11 +2579,17 @@ qla2x00_gpsc(scsi_qla_host_t *vha, sw_info_t *list)
if (rval)
return rval;
+ arg.iocb = ha->ms_iocb;
+ arg.req_dma = ha->ct_sns_dma;
+ arg.rsp_dma = ha->ct_sns_dma;
+ arg.req_size = GPSC_REQ_SIZE;
+ arg.rsp_size = GPSC_RSP_SIZE;
+ arg.nport_handle = vha->mgmt_svr_loop_id;
+
for (i = 0; i < ha->max_fibre_devices; i++) {
/* Issue GFPN_ID */
/* Prepare common MS IOCB */
- ms_pkt = qla24xx_prep_ms_fm_iocb(vha, GPSC_REQ_SIZE,
- GPSC_RSP_SIZE);
+ ms_pkt = qla24xx_prep_ms_iocb(vha, &arg);
/* Prepare CT request */
ct_req = qla24xx_prep_ct_fm_req(ha->ct_sns, GPSC_CMD,
@@ -2641,6 +2683,7 @@ qla2x00_gff_id(scsi_qla_host_t *vha, sw_info_t *list)
struct ct_sns_rsp *ct_rsp;
struct qla_hw_data *ha = vha->hw;
uint8_t fcp_scsi_features = 0;
+ struct ct_arg arg;
for (i = 0; i < ha->max_fibre_devices; i++) {
/* Set default FC4 Type as UNKNOWN so the default is to
@@ -2651,9 +2694,15 @@ qla2x00_gff_id(scsi_qla_host_t *vha, sw_info_t *list)
if (!IS_FWI2_CAPABLE(ha))
continue;
+ arg.iocb = ha->ms_iocb;
+ arg.req_dma = ha->ct_sns_dma;
+ arg.rsp_dma = ha->ct_sns_dma;
+ arg.req_size = GFF_ID_REQ_SIZE;
+ arg.rsp_size = GFF_ID_RSP_SIZE;
+ arg.nport_handle = NPH_SNS;
+
/* Prepare common MS IOCB */
- ms_pkt = ha->isp_ops->prep_ms_iocb(vha, GFF_ID_REQ_SIZE,
- GFF_ID_RSP_SIZE);
+ ms_pkt = ha->isp_ops->prep_ms_iocb(vha, &arg);
/* Prepare CT request */
ct_req = qla2x00_prep_ct_req(ha->ct_sns, GFF_ID_CMD,
@@ -2692,3 +2741,538 @@ qla2x00_gff_id(scsi_qla_host_t *vha, sw_info_t *list)
break;
}
}
+
+/* GID_PN completion processing. */
+void qla24xx_handle_gidpn_event(scsi_qla_host_t *vha, struct event_arg *ea)
+{
+ fc_port_t *fcport = ea->fcport;
+
+ ql_dbg(ql_dbg_disc, vha, 0xffff,
+ "%s %8phC login state %d \n",
+ __func__, fcport->port_name, fcport->fw_login_state);
+
+ if (ea->sp->gen2 != fcport->login_gen) {
+ /* PLOGI/PRLI/LOGO came in while cmd was out.*/
+ ql_dbg(ql_dbg_disc, vha, 0xffff,
+ "%s %8phC generation changed rscn %d|%d login %d|%d \n",
+ __func__, fcport->port_name, fcport->last_rscn_gen,
+ fcport->rscn_gen, fcport->last_login_gen, fcport->login_gen);
+ return;
+ }
+
+ if (!ea->rc) {
+ if (ea->sp->gen1 == fcport->rscn_gen) {
+ fcport->scan_state = QLA_FCPORT_FOUND;
+ fcport->flags |= FCF_FABRIC_DEVICE;
+
+ if (fcport->d_id.b24 == ea->id.b24) {
+ /* cable plugged into the same place */
+ switch (vha->host->active_mode) {
+ case MODE_TARGET:
+ /* NOOP. let the other guy login to us.*/
+ break;
+ case MODE_INITIATOR:
+ case MODE_DUAL:
+ default:
+ if (atomic_read(&fcport->state) ==
+ FCS_ONLINE)
+ break;
+ ql_dbg(ql_dbg_disc, vha, 0xffff,
+ "%s %d %8phC post gnl\n",
+ __func__, __LINE__, fcport->port_name);
+ qla24xx_post_gnl_work(vha, fcport);
+ break;
+ }
+ } else { /* fcport->d_id.b24 != ea->id.b24 */
+ fcport->d_id.b24 = ea->id.b24;
+ if (fcport->deleted == QLA_SESS_DELETED) {
+ ql_dbg(ql_dbg_disc, vha, 0xffff,
+ "%s %d %8phC post del sess\n",
+ __func__, __LINE__, fcport->port_name);
+ qlt_schedule_sess_for_deletion_lock(fcport);
+ }
+ }
+ } else { /* ea->sp->gen1 != fcport->rscn_gen */
+ ql_dbg(ql_dbg_disc, vha, 0xffff,
+ "%s %d %8phC post gidpn\n",
+ __func__, __LINE__, fcport->port_name);
+ /* rscn came in while cmd was out */
+ qla24xx_post_gidpn_work(vha, fcport);
+ }
+ } else { /* ea->rc */
+ /* cable pulled */
+ if (ea->sp->gen1 == fcport->rscn_gen) {
+ if (ea->sp->gen2 == fcport->login_gen) {
+ ql_dbg(ql_dbg_disc, vha, 0xffff,
+ "%s %d %8phC post del sess\n", __func__,
+ __LINE__, fcport->port_name);
+ qlt_schedule_sess_for_deletion_lock(fcport);
+ } else {
+ ql_dbg(ql_dbg_disc, vha, 0xffff,
+ "%s %d %8phC login\n", __func__, __LINE__,
+ fcport->port_name);
+ qla24xx_fcport_handle_login(vha, fcport);
+ }
+ } else {
+ ql_dbg(ql_dbg_disc, vha, 0xffff,
+ "%s %d %8phC post gidpn\n", __func__, __LINE__,
+ fcport->port_name);
+ qla24xx_post_gidpn_work(vha, fcport);
+ }
+ }
+} /* gidpn_event */
+
+static void qla2x00_async_gidpn_sp_done(void *s, int res)
+{
+ struct srb *sp = s;
+ struct scsi_qla_host *vha = sp->vha;
+ fc_port_t *fcport = sp->fcport;
+ u8 *id = fcport->ct_desc.ct_sns->p.rsp.rsp.gid_pn.port_id;
+ struct event_arg ea;
+
+ fcport->flags &= ~FCF_ASYNC_SENT;
+
+ memset(&ea, 0, sizeof(ea));
+ ea.fcport = fcport;
+ ea.id.b.domain = id[0];
+ ea.id.b.area = id[1];
+ ea.id.b.al_pa = id[2];
+ ea.sp = sp;
+ ea.rc = res;
+ ea.event = FCME_GIDPN_DONE;
+
+ ql_dbg(ql_dbg_disc, vha, 0xffff,
+ "Async done-%s res %x, WWPN %8phC ID %3phC \n",
+ sp->name, res, fcport->port_name, id);
+
+ qla2x00_fcport_event_handler(vha, &ea);
+
+ sp->free(sp);
+}
+
+int qla24xx_async_gidpn(scsi_qla_host_t *vha, fc_port_t *fcport)
+{
+ int rval = QLA_FUNCTION_FAILED;
+ struct ct_sns_req *ct_req;
+ srb_t *sp;
+
+ if (!vha->flags.online)
+ goto done;
+
+ fcport->flags |= FCF_ASYNC_SENT;
+ fcport->disc_state = DSC_GID_PN;
+ fcport->scan_state = QLA_FCPORT_SCAN;
+ sp = qla2x00_get_sp(vha, fcport, GFP_ATOMIC);
+ if (!sp)
+ goto done;
+
+ sp->type = SRB_CT_PTHRU_CMD;
+ sp->name = "gidpn";
+ sp->gen1 = fcport->rscn_gen;
+ sp->gen2 = fcport->login_gen;
+
+ qla2x00_init_timer(sp, qla2x00_get_async_timeout(vha) + 2);
+
+ /* CT_IU preamble */
+ ct_req = qla2x00_prep_ct_req(fcport->ct_desc.ct_sns, GID_PN_CMD,
+ GID_PN_RSP_SIZE);
+
+ /* GIDPN req */
+ memcpy(ct_req->req.gid_pn.port_name, fcport->port_name,
+ WWN_SIZE);
+
+ /* req & rsp use the same buffer */
+ sp->u.iocb_cmd.u.ctarg.req = fcport->ct_desc.ct_sns;
+ sp->u.iocb_cmd.u.ctarg.req_dma = fcport->ct_desc.ct_sns_dma;
+ sp->u.iocb_cmd.u.ctarg.rsp = fcport->ct_desc.ct_sns;
+ sp->u.iocb_cmd.u.ctarg.rsp_dma = fcport->ct_desc.ct_sns_dma;
+ sp->u.iocb_cmd.u.ctarg.req_size = GID_PN_REQ_SIZE;
+ sp->u.iocb_cmd.u.ctarg.rsp_size = GID_PN_RSP_SIZE;
+ sp->u.iocb_cmd.u.ctarg.nport_handle = NPH_SNS;
+
+ sp->u.iocb_cmd.timeout = qla2x00_async_iocb_timeout;
+ sp->done = qla2x00_async_gidpn_sp_done;
+
+ rval = qla2x00_start_sp(sp);
+ if (rval != QLA_SUCCESS)
+ goto done_free_sp;
+
+ ql_dbg(ql_dbg_disc, vha, 0x206f,
+ "Async-%s - %8phC hdl=%x loopid=%x portid %02x%02x%02x.\n",
+ sp->name, fcport->port_name,
+ sp->handle, fcport->loop_id, fcport->d_id.b.domain,
+ fcport->d_id.b.area, fcport->d_id.b.al_pa);
+ return rval;
+
+done_free_sp:
+ sp->free(sp);
+done:
+ fcport->flags &= ~FCF_ASYNC_SENT;
+ return rval;
+}
+
+int qla24xx_post_gidpn_work(struct scsi_qla_host *vha, fc_port_t *fcport)
+{
+ struct qla_work_evt *e;
+ int ls;
+
+ ls = atomic_read(&vha->loop_state);
+ if (((ls != LOOP_READY) && (ls != LOOP_UP)) ||
+ test_bit(UNLOADING, &vha->dpc_flags))
+ return 0;
+
+ e = qla2x00_alloc_work(vha, QLA_EVT_GIDPN);
+ if (!e)
+ return QLA_FUNCTION_FAILED;
+
+ e->u.fcport.fcport = fcport;
+ return qla2x00_post_work(vha, e);
+}
+
+int qla24xx_post_gpsc_work(struct scsi_qla_host *vha, fc_port_t *fcport)
+{
+ struct qla_work_evt *e;
+
+ e = qla2x00_alloc_work(vha, QLA_EVT_GPSC);
+ if (!e)
+ return QLA_FUNCTION_FAILED;
+
+ e->u.fcport.fcport = fcport;
+ return qla2x00_post_work(vha, e);
+}
+
+static void qla24xx_async_gpsc_sp_done(void *s, int res)
+{
+ struct srb *sp = s;
+ struct scsi_qla_host *vha = sp->vha;
+ struct qla_hw_data *ha = vha->hw;
+ fc_port_t *fcport = sp->fcport;
+ struct ct_sns_rsp *ct_rsp;
+ struct event_arg ea;
+
+ ct_rsp = &fcport->ct_desc.ct_sns->p.rsp;
+
+ ql_dbg(ql_dbg_disc, vha, 0xffff,
+ "Async done-%s res %x, WWPN %8phC \n",
+ sp->name, res, fcport->port_name);
+
+ fcport->flags &= ~FCF_ASYNC_SENT;
+
+ if (res == (DID_ERROR << 16)) {
+ /* entry status error */
+ goto done;
+ } else if (res) {
+ if ((ct_rsp->header.reason_code ==
+ CT_REASON_INVALID_COMMAND_CODE) ||
+ (ct_rsp->header.reason_code ==
+ CT_REASON_COMMAND_UNSUPPORTED)) {
+ ql_dbg(ql_dbg_disc, vha, 0x205a,
+ "GPSC command unsupported, disabling "
+ "query.\n");
+ ha->flags.gpsc_supported = 0;
+ res = QLA_SUCCESS;
+ }
+ } else {
+ switch (be16_to_cpu(ct_rsp->rsp.gpsc.speed)) {
+ case BIT_15:
+ fcport->fp_speed = PORT_SPEED_1GB;
+ break;
+ case BIT_14:
+ fcport->fp_speed = PORT_SPEED_2GB;
+ break;
+ case BIT_13:
+ fcport->fp_speed = PORT_SPEED_4GB;
+ break;
+ case BIT_12:
+ fcport->fp_speed = PORT_SPEED_10GB;
+ break;
+ case BIT_11:
+ fcport->fp_speed = PORT_SPEED_8GB;
+ break;
+ case BIT_10:
+ fcport->fp_speed = PORT_SPEED_16GB;
+ break;
+ case BIT_8:
+ fcport->fp_speed = PORT_SPEED_32GB;
+ break;
+ }
+
+ ql_dbg(ql_dbg_disc, vha, 0xffff,
+ "Async-%s OUT WWPN %8phC speeds=%04x speed=%04x.\n",
+ sp->name,
+ fcport->fabric_port_name,
+ be16_to_cpu(ct_rsp->rsp.gpsc.speeds),
+ be16_to_cpu(ct_rsp->rsp.gpsc.speed));
+ }
+done:
+ memset(&ea, 0, sizeof(ea));
+ ea.event = FCME_GPSC_DONE;
+ ea.rc = res;
+ ea.fcport = fcport;
+ qla2x00_fcport_event_handler(vha, &ea);
+
+ sp->free(sp);
+}
+
+int qla24xx_async_gpsc(scsi_qla_host_t *vha, fc_port_t *fcport)
+{
+ int rval = QLA_FUNCTION_FAILED;
+ struct ct_sns_req *ct_req;
+ srb_t *sp;
+
+ if (!vha->flags.online)
+ goto done;
+
+ fcport->flags |= FCF_ASYNC_SENT;
+ sp = qla2x00_get_sp(vha, fcport, GFP_KERNEL);
+ if (!sp)
+ goto done;
+
+ sp->type = SRB_CT_PTHRU_CMD;
+ sp->name = "gpsc";
+ sp->gen1 = fcport->rscn_gen;
+ sp->gen2 = fcport->login_gen;
+
+ qla2x00_init_timer(sp, qla2x00_get_async_timeout(vha) + 2);
+
+ /* CT_IU preamble */
+ ct_req = qla24xx_prep_ct_fm_req(fcport->ct_desc.ct_sns, GPSC_CMD,
+ GPSC_RSP_SIZE);
+
+ /* GPSC req */
+ memcpy(ct_req->req.gpsc.port_name, fcport->port_name,
+ WWN_SIZE);
+
+ sp->u.iocb_cmd.u.ctarg.req = fcport->ct_desc.ct_sns;
+ sp->u.iocb_cmd.u.ctarg.req_dma = fcport->ct_desc.ct_sns_dma;
+ sp->u.iocb_cmd.u.ctarg.rsp = fcport->ct_desc.ct_sns;
+ sp->u.iocb_cmd.u.ctarg.rsp_dma = fcport->ct_desc.ct_sns_dma;
+ sp->u.iocb_cmd.u.ctarg.req_size = GPSC_REQ_SIZE;
+ sp->u.iocb_cmd.u.ctarg.rsp_size = GPSC_RSP_SIZE;
+ sp->u.iocb_cmd.u.ctarg.nport_handle = vha->mgmt_svr_loop_id;
+
+ sp->u.iocb_cmd.timeout = qla2x00_async_iocb_timeout;
+ sp->done = qla24xx_async_gpsc_sp_done;
+
+ rval = qla2x00_start_sp(sp);
+ if (rval != QLA_SUCCESS)
+ goto done_free_sp;
+
+ ql_dbg(ql_dbg_disc, vha, 0xffff,
+ "Async-%s %8phC hdl=%x loopid=%x portid=%02x%02x%02x.\n",
+ sp->name, fcport->port_name, sp->handle,
+ fcport->loop_id, fcport->d_id.b.domain,
+ fcport->d_id.b.area, fcport->d_id.b.al_pa);
+ return rval;
+
+done_free_sp:
+ sp->free(sp);
+done:
+ fcport->flags &= ~FCF_ASYNC_SENT;
+ return rval;
+}
+
+int qla24xx_post_gpnid_work(struct scsi_qla_host *vha, port_id_t *id)
+{
+ struct qla_work_evt *e;
+
+ if (test_bit(UNLOADING, &vha->dpc_flags))
+ return 0;
+
+ e = qla2x00_alloc_work(vha, QLA_EVT_GPNID);
+ if (!e)
+ return QLA_FUNCTION_FAILED;
+
+ e->u.gpnid.id = *id;
+ return qla2x00_post_work(vha, e);
+}
+
+void qla24xx_async_gpnid_done(scsi_qla_host_t *vha, srb_t *sp)
+{
+ if (sp->u.iocb_cmd.u.ctarg.req) {
+ dma_free_coherent(&vha->hw->pdev->dev,
+ sizeof(struct ct_sns_pkt),
+ sp->u.iocb_cmd.u.ctarg.req,
+ sp->u.iocb_cmd.u.ctarg.req_dma);
+ sp->u.iocb_cmd.u.ctarg.req = NULL;
+ }
+ if (sp->u.iocb_cmd.u.ctarg.rsp) {
+ dma_free_coherent(&vha->hw->pdev->dev,
+ sizeof(struct ct_sns_pkt),
+ sp->u.iocb_cmd.u.ctarg.rsp,
+ sp->u.iocb_cmd.u.ctarg.rsp_dma);
+ sp->u.iocb_cmd.u.ctarg.rsp = NULL;
+ }
+
+ sp->free(sp);
+}
+
+void qla24xx_handle_gpnid_event(scsi_qla_host_t *vha, struct event_arg *ea)
+{
+ fc_port_t *fcport;
+ unsigned long flags;
+
+ spin_lock_irqsave(&vha->hw->tgt.sess_lock, flags);
+ fcport = qla2x00_find_fcport_by_wwpn(vha, ea->port_name, 1);
+ spin_unlock_irqrestore(&vha->hw->tgt.sess_lock, flags);
+
+ if (fcport) {
+ /* cable moved. just plugged in */
+ ql_dbg(ql_dbg_disc, vha, 0xffff,
+ "%s %d %8phC post del sess\n",
+ __func__, __LINE__, fcport->port_name);
+
+ fcport->rscn_gen++;
+ fcport->d_id = ea->id;
+ fcport->scan_state = QLA_FCPORT_FOUND;
+ fcport->flags |= FCF_FABRIC_DEVICE;
+
+ qlt_schedule_sess_for_deletion_lock(fcport);
+ } else {
+ /* create new fcport */
+ ql_dbg(ql_dbg_disc, vha, 0xffff,
+ "%s %d %8phC post new sess\n",
+ __func__, __LINE__, ea->port_name);
+
+ qla24xx_post_newsess_work(vha, &ea->id, ea->port_name, NULL);
+ }
+}
+
+static void qla2x00_async_gpnid_sp_done(void *s, int res)
+{
+ struct srb *sp = s;
+ struct scsi_qla_host *vha = sp->vha;
+ struct ct_sns_req *ct_req =
+ (struct ct_sns_req *)sp->u.iocb_cmd.u.ctarg.req;
+ struct ct_sns_rsp *ct_rsp =
+ (struct ct_sns_rsp *)sp->u.iocb_cmd.u.ctarg.rsp;
+ struct event_arg ea;
+ struct qla_work_evt *e;
+
+ ql_dbg(ql_dbg_disc, vha, 0xffff,
+ "Async done-%s res %x ID %3phC. %8phC\n",
+ sp->name, res, ct_req->req.port_id.port_id,
+ ct_rsp->rsp.gpn_id.port_name);
+
+ memset(&ea, 0, sizeof(ea));
+ memcpy(ea.port_name, ct_rsp->rsp.gpn_id.port_name, WWN_SIZE);
+ ea.sp = sp;
+ ea.id.b.domain = ct_req->req.port_id.port_id[0];
+ ea.id.b.area = ct_req->req.port_id.port_id[1];
+ ea.id.b.al_pa = ct_req->req.port_id.port_id[2];
+ ea.rc = res;
+ ea.event = FCME_GPNID_DONE;
+
+ qla2x00_fcport_event_handler(vha, &ea);
+
+ e = qla2x00_alloc_work(vha, QLA_EVT_GPNID_DONE);
+ if (!e) {
+ /* please ignore kernel warning. otherwise, we have mem leak. */
+ if (sp->u.iocb_cmd.u.ctarg.req) {
+ dma_free_coherent(&vha->hw->pdev->dev,
+ sizeof(struct ct_sns_pkt),
+ sp->u.iocb_cmd.u.ctarg.req,
+ sp->u.iocb_cmd.u.ctarg.req_dma);
+ sp->u.iocb_cmd.u.ctarg.req = NULL;
+ }
+ if (sp->u.iocb_cmd.u.ctarg.rsp) {
+ dma_free_coherent(&vha->hw->pdev->dev,
+ sizeof(struct ct_sns_pkt),
+ sp->u.iocb_cmd.u.ctarg.rsp,
+ sp->u.iocb_cmd.u.ctarg.rsp_dma);
+ sp->u.iocb_cmd.u.ctarg.rsp = NULL;
+ }
+
+ sp->free(sp);
+ return;
+ }
+
+ e->u.iosb.sp = sp;
+ qla2x00_post_work(vha, e);
+}
+
+/* Get WWPN with Nport ID. */
+int qla24xx_async_gpnid(scsi_qla_host_t *vha, port_id_t *id)
+{
+ int rval = QLA_FUNCTION_FAILED;
+ struct ct_sns_req *ct_req;
+ srb_t *sp;
+ struct ct_sns_pkt *ct_sns;
+
+ if (!vha->flags.online)
+ goto done;
+
+ sp = qla2x00_get_sp(vha, NULL, GFP_KERNEL);
+ if (!sp)
+ goto done;
+
+ sp->type = SRB_CT_PTHRU_CMD;
+ sp->name = "gpnid";
+ qla2x00_init_timer(sp, qla2x00_get_async_timeout(vha) + 2);
+
+ sp->u.iocb_cmd.u.ctarg.req = dma_alloc_coherent(&vha->hw->pdev->dev,
+ sizeof(struct ct_sns_pkt), &sp->u.iocb_cmd.u.ctarg.req_dma,
+ GFP_KERNEL);
+ if (!sp->u.iocb_cmd.u.ctarg.req) {
+ ql_log(ql_log_warn, vha, 0xffff,
+ "Failed to allocate ct_sns request.\n");
+ goto done_free_sp;
+ }
+
+ sp->u.iocb_cmd.u.ctarg.rsp = dma_alloc_coherent(&vha->hw->pdev->dev,
+ sizeof(struct ct_sns_pkt), &sp->u.iocb_cmd.u.ctarg.rsp_dma,
+ GFP_KERNEL);
+ if (!sp->u.iocb_cmd.u.ctarg.rsp) {
+ ql_log(ql_log_warn, vha, 0xffff,
+ "Failed to allocate ct_sns request.\n");
+ goto done_free_sp;
+ }
+
+ ct_sns = (struct ct_sns_pkt *)sp->u.iocb_cmd.u.ctarg.rsp;
+ memset(ct_sns, 0, sizeof(*ct_sns));
+
+ ct_sns = (struct ct_sns_pkt *)sp->u.iocb_cmd.u.ctarg.req;
+ /* CT_IU preamble */
+ ct_req = qla2x00_prep_ct_req(ct_sns, GPN_ID_CMD, GPN_ID_RSP_SIZE);
+
+ /* GPN_ID req */
+ ct_req->req.port_id.port_id[0] = id->b.domain;
+ ct_req->req.port_id.port_id[1] = id->b.area;
+ ct_req->req.port_id.port_id[2] = id->b.al_pa;
+
+ sp->u.iocb_cmd.u.ctarg.req_size = GPN_ID_REQ_SIZE;
+ sp->u.iocb_cmd.u.ctarg.rsp_size = GPN_ID_RSP_SIZE;
+ sp->u.iocb_cmd.u.ctarg.nport_handle = NPH_SNS;
+
+ sp->u.iocb_cmd.timeout = qla2x00_async_iocb_timeout;
+ sp->done = qla2x00_async_gpnid_sp_done;
+
+ rval = qla2x00_start_sp(sp);
+ if (rval != QLA_SUCCESS)
+ goto done_free_sp;
+
+ ql_dbg(ql_dbg_disc, vha, 0xffff,
+ "Async-%s hdl=%x ID %3phC.\n", sp->name,
+ sp->handle, ct_req->req.port_id.port_id);
+ return rval;
+
+done_free_sp:
+ if (sp->u.iocb_cmd.u.ctarg.req) {
+ dma_free_coherent(&vha->hw->pdev->dev,
+ sizeof(struct ct_sns_pkt),
+ sp->u.iocb_cmd.u.ctarg.req,
+ sp->u.iocb_cmd.u.ctarg.req_dma);
+ sp->u.iocb_cmd.u.ctarg.req = NULL;
+ }
+ if (sp->u.iocb_cmd.u.ctarg.rsp) {
+ dma_free_coherent(&vha->hw->pdev->dev,
+ sizeof(struct ct_sns_pkt),
+ sp->u.iocb_cmd.u.ctarg.rsp,
+ sp->u.iocb_cmd.u.ctarg.rsp_dma);
+ sp->u.iocb_cmd.u.ctarg.rsp = NULL;
+ }
+
+ sp->free(sp);
+done:
+ return rval;
+}
diff --git a/drivers/scsi/qla2xxx/qla_init.c b/drivers/scsi/qla2xxx/qla_init.c
index 265e1395bdb8..32fb9007f137 100644
--- a/drivers/scsi/qla2xxx/qla_init.c
+++ b/drivers/scsi/qla2xxx/qla_init.c
@@ -30,15 +30,15 @@ static int qla2x00_configure_hba(scsi_qla_host_t *);
static int qla2x00_configure_loop(scsi_qla_host_t *);
static int qla2x00_configure_local_loop(scsi_qla_host_t *);
static int qla2x00_configure_fabric(scsi_qla_host_t *);
-static int qla2x00_find_all_fabric_devs(scsi_qla_host_t *, struct list_head *);
-static int qla2x00_fabric_dev_login(scsi_qla_host_t *, fc_port_t *,
- uint16_t *);
-
+static int qla2x00_find_all_fabric_devs(scsi_qla_host_t *);
static int qla2x00_restart_isp(scsi_qla_host_t *);
static struct qla_chip_state_84xx *qla84xx_get_chip(struct scsi_qla_host *);
static int qla84xx_init_chip(scsi_qla_host_t *);
static int qla25xx_init_queues(struct qla_hw_data *);
+static int qla24xx_post_gpdb_work(struct scsi_qla_host *, fc_port_t *, u8);
+static void qla24xx_handle_plogi_done_event(struct scsi_qla_host *,
+ struct event_arg *);
/* SRB Extensions ---------------------------------------------------------- */
@@ -47,29 +47,27 @@ qla2x00_sp_timeout(unsigned long __data)
{
srb_t *sp = (srb_t *)__data;
struct srb_iocb *iocb;
- fc_port_t *fcport = sp->fcport;
- struct qla_hw_data *ha = fcport->vha->hw;
+ scsi_qla_host_t *vha = sp->vha;
struct req_que *req;
unsigned long flags;
- spin_lock_irqsave(&ha->hardware_lock, flags);
- req = ha->req_q_map[0];
+ spin_lock_irqsave(&vha->hw->hardware_lock, flags);
+ req = vha->hw->req_q_map[0];
req->outstanding_cmds[sp->handle] = NULL;
iocb = &sp->u.iocb_cmd;
iocb->timeout(sp);
- sp->free(fcport->vha, sp);
- spin_unlock_irqrestore(&ha->hardware_lock, flags);
+ sp->free(sp);
+ spin_unlock_irqrestore(&vha->hw->hardware_lock, flags);
}
void
-qla2x00_sp_free(void *data, void *ptr)
+qla2x00_sp_free(void *ptr)
{
- srb_t *sp = (srb_t *)ptr;
+ srb_t *sp = ptr;
struct srb_iocb *iocb = &sp->u.iocb_cmd;
- struct scsi_qla_host *vha = (scsi_qla_host_t *)data;
del_timer(&iocb->timer);
- qla2x00_rel_sp(vha, sp);
+ qla2x00_rel_sp(sp);
}
/* Asynchronous Login/Logout Routines -------------------------------------- */
@@ -94,43 +92,72 @@ qla2x00_get_async_timeout(struct scsi_qla_host *vha)
return tmo;
}
-static void
+void
qla2x00_async_iocb_timeout(void *data)
{
- srb_t *sp = (srb_t *)data;
+ srb_t *sp = data;
fc_port_t *fcport = sp->fcport;
+ struct srb_iocb *lio = &sp->u.iocb_cmd;
+ struct event_arg ea;
ql_dbg(ql_dbg_disc, fcport->vha, 0x2071,
- "Async-%s timeout - hdl=%x portid=%02x%02x%02x.\n",
- sp->name, sp->handle, fcport->d_id.b.domain, fcport->d_id.b.area,
- fcport->d_id.b.al_pa);
+ "Async-%s timeout - hdl=%x portid=%06x %8phC.\n",
+ sp->name, sp->handle, fcport->d_id.b24, fcport->port_name);
fcport->flags &= ~FCF_ASYNC_SENT;
- if (sp->type == SRB_LOGIN_CMD) {
- struct srb_iocb *lio = &sp->u.iocb_cmd;
- qla2x00_post_async_logout_work(fcport->vha, fcport, NULL);
+
+ switch (sp->type) {
+ case SRB_LOGIN_CMD:
/* Retry as needed. */
lio->u.logio.data[0] = MBS_COMMAND_ERROR;
lio->u.logio.data[1] = lio->u.logio.flags & SRB_LOGIN_RETRIED ?
QLA_LOGIO_LOGIN_RETRIED : 0;
- qla2x00_post_async_login_done_work(fcport->vha, fcport,
- lio->u.logio.data);
- } else if (sp->type == SRB_LOGOUT_CMD) {
+ memset(&ea, 0, sizeof(ea));
+ ea.event = FCME_PLOGI_DONE;
+ ea.fcport = sp->fcport;
+ ea.data[0] = lio->u.logio.data[0];
+ ea.data[1] = lio->u.logio.data[1];
+ ea.sp = sp;
+ qla24xx_handle_plogi_done_event(fcport->vha, &ea);
+ break;
+ case SRB_LOGOUT_CMD:
qlt_logo_completion_handler(fcport, QLA_FUNCTION_TIMEOUT);
+ break;
+ case SRB_CT_PTHRU_CMD:
+ case SRB_MB_IOCB:
+ case SRB_NACK_PLOGI:
+ case SRB_NACK_PRLI:
+ case SRB_NACK_LOGO:
+ sp->done(sp, QLA_FUNCTION_TIMEOUT);
+ break;
}
}
static void
-qla2x00_async_login_sp_done(void *data, void *ptr, int res)
+qla2x00_async_login_sp_done(void *ptr, int res)
{
- srb_t *sp = (srb_t *)ptr;
+ srb_t *sp = ptr;
+ struct scsi_qla_host *vha = sp->vha;
struct srb_iocb *lio = &sp->u.iocb_cmd;
- struct scsi_qla_host *vha = (scsi_qla_host_t *)data;
+ struct event_arg ea;
- if (!test_bit(UNLOADING, &vha->dpc_flags))
- qla2x00_post_async_login_done_work(sp->fcport->vha, sp->fcport,
- lio->u.logio.data);
- sp->free(sp->fcport->vha, sp);
+ ql_dbg(ql_dbg_disc, vha, 0xffff,
+ "%s %8phC res %d \n", __func__, sp->fcport->port_name, res);
+
+ sp->fcport->flags &= ~FCF_ASYNC_SENT;
+ if (!test_bit(UNLOADING, &vha->dpc_flags)) {
+ memset(&ea, 0, sizeof(ea));
+ ea.event = FCME_PLOGI_DONE;
+ ea.fcport = sp->fcport;
+ ea.data[0] = lio->u.logio.data[0];
+ ea.data[1] = lio->u.logio.data[1];
+ ea.iop[0] = lio->u.logio.iop[0];
+ ea.iop[1] = lio->u.logio.iop[1];
+ ea.sp = sp;
+ qla2x00_fcport_event_handler(vha, &ea);
+ }
+
+ sp->free(sp);
}
int
@@ -139,13 +166,23 @@ qla2x00_async_login(struct scsi_qla_host *vha, fc_port_t *fcport,
{
srb_t *sp;
struct srb_iocb *lio;
- int rval;
+ int rval = QLA_FUNCTION_FAILED;
+
+ if (!vha->flags.online)
+ goto done;
+
+ if ((fcport->fw_login_state == DSC_LS_PLOGI_PEND) ||
+ (fcport->fw_login_state == DSC_LS_PLOGI_COMP) ||
+ (fcport->fw_login_state == DSC_LS_PRLI_PEND))
+ goto done;
- rval = QLA_FUNCTION_FAILED;
sp = qla2x00_get_sp(vha, fcport, GFP_KERNEL);
if (!sp)
goto done;
+ fcport->flags |= FCF_ASYNC_SENT;
+ fcport->logout_completed = 0;
+
sp->type = SRB_LOGIN_CMD;
sp->name = "login";
qla2x00_init_timer(sp, qla2x00_get_async_timeout(vha) + 2);
@@ -165,29 +202,30 @@ qla2x00_async_login(struct scsi_qla_host *vha, fc_port_t *fcport,
}
ql_dbg(ql_dbg_disc, vha, 0x2072,
- "Async-login - hdl=%x, loopid=%x portid=%02x%02x%02x "
- "retries=%d.\n", sp->handle, fcport->loop_id,
+ "Async-login - %8phC hdl=%x, loopid=%x portid=%02x%02x%02x "
+ "retries=%d.\n", fcport->port_name, sp->handle, fcport->loop_id,
fcport->d_id.b.domain, fcport->d_id.b.area, fcport->d_id.b.al_pa,
fcport->login_retry);
return rval;
done_free_sp:
- sp->free(fcport->vha, sp);
+ sp->free(sp);
done:
+ fcport->flags &= ~FCF_ASYNC_SENT;
return rval;
}
static void
-qla2x00_async_logout_sp_done(void *data, void *ptr, int res)
+qla2x00_async_logout_sp_done(void *ptr, int res)
{
- srb_t *sp = (srb_t *)ptr;
+ srb_t *sp = ptr;
struct srb_iocb *lio = &sp->u.iocb_cmd;
- struct scsi_qla_host *vha = (scsi_qla_host_t *)data;
- if (!test_bit(UNLOADING, &vha->dpc_flags))
- qla2x00_post_async_logout_done_work(sp->fcport->vha, sp->fcport,
+ sp->fcport->flags &= ~FCF_ASYNC_SENT;
+ if (!test_bit(UNLOADING, &sp->vha->dpc_flags))
+ qla2x00_post_async_logout_done_work(sp->vha, sp->fcport,
lio->u.logio.data);
- sp->free(sp->fcport->vha, sp);
+ sp->free(sp);
}
int
@@ -198,6 +236,7 @@ qla2x00_async_logout(struct scsi_qla_host *vha, fc_port_t *fcport)
int rval;
rval = QLA_FUNCTION_FAILED;
+ fcport->flags |= FCF_ASYNC_SENT;
sp = qla2x00_get_sp(vha, fcport, GFP_KERNEL);
if (!sp)
goto done;
@@ -214,28 +253,30 @@ qla2x00_async_logout(struct scsi_qla_host *vha, fc_port_t *fcport)
goto done_free_sp;
ql_dbg(ql_dbg_disc, vha, 0x2070,
- "Async-logout - hdl=%x loop-id=%x portid=%02x%02x%02x.\n",
+ "Async-logout - hdl=%x loop-id=%x portid=%02x%02x%02x %8phC.\n",
sp->handle, fcport->loop_id, fcport->d_id.b.domain,
- fcport->d_id.b.area, fcport->d_id.b.al_pa);
+ fcport->d_id.b.area, fcport->d_id.b.al_pa,
+ fcport->port_name);
return rval;
done_free_sp:
- sp->free(fcport->vha, sp);
+ sp->free(sp);
done:
+ fcport->flags &= ~FCF_ASYNC_SENT;
return rval;
}
static void
-qla2x00_async_adisc_sp_done(void *data, void *ptr, int res)
+qla2x00_async_adisc_sp_done(void *ptr, int res)
{
- srb_t *sp = (srb_t *)ptr;
+ srb_t *sp = ptr;
+ struct scsi_qla_host *vha = sp->vha;
struct srb_iocb *lio = &sp->u.iocb_cmd;
- struct scsi_qla_host *vha = (scsi_qla_host_t *)data;
if (!test_bit(UNLOADING, &vha->dpc_flags))
- qla2x00_post_async_adisc_done_work(sp->fcport->vha, sp->fcport,
+ qla2x00_post_async_adisc_done_work(sp->vha, sp->fcport,
lio->u.logio.data);
- sp->free(sp->fcport->vha, sp);
+ sp->free(sp);
}
int
@@ -247,6 +288,7 @@ qla2x00_async_adisc(struct scsi_qla_host *vha, fc_port_t *fcport,
int rval;
rval = QLA_FUNCTION_FAILED;
+ fcport->flags |= FCF_ASYNC_SENT;
sp = qla2x00_get_sp(vha, fcport, GFP_KERNEL);
if (!sp)
goto done;
@@ -271,15 +313,858 @@ qla2x00_async_adisc(struct scsi_qla_host *vha, fc_port_t *fcport,
return rval;
done_free_sp:
- sp->free(fcport->vha, sp);
+ sp->free(sp);
done:
+ fcport->flags &= ~FCF_ASYNC_SENT;
return rval;
}
+static void qla24xx_handle_gnl_done_event(scsi_qla_host_t *vha,
+ struct event_arg *ea)
+{
+ fc_port_t *fcport, *conflict_fcport;
+ struct get_name_list_extended *e;
+ u16 i, n, found = 0, loop_id;
+ port_id_t id;
+ u64 wwn;
+ u8 opt = 0;
+
+ fcport = ea->fcport;
+
+ if (ea->rc) { /* rval */
+ if (fcport->login_retry == 0) {
+ fcport->login_retry = vha->hw->login_retry_count;
+ ql_dbg(ql_dbg_disc, vha, 0xffff,
+ "GNL failed Port login retry %8phN, retry cnt=%d.\n",
+ fcport->port_name, fcport->login_retry);
+ }
+ return;
+ }
+
+ if (fcport->last_rscn_gen != fcport->rscn_gen) {
+ ql_dbg(ql_dbg_disc, vha, 0xffff,
+ "%s %8phC rscn gen changed rscn %d|%d \n",
+ __func__, fcport->port_name,
+ fcport->last_rscn_gen, fcport->rscn_gen);
+ qla24xx_post_gidpn_work(vha, fcport);
+ return;
+ } else if (fcport->last_login_gen != fcport->login_gen) {
+ ql_dbg(ql_dbg_disc, vha, 0xffff,
+ "%s %8phC login gen changed login %d|%d \n",
+ __func__, fcport->port_name,
+ fcport->last_login_gen, fcport->login_gen);
+ return;
+ }
+
+ n = ea->data[0] / sizeof(struct get_name_list_extended);
+
+ ql_dbg(ql_dbg_disc, vha, 0xffff,
+ "%s %d %8phC n %d %02x%02x%02x lid %d \n",
+ __func__, __LINE__, fcport->port_name, n,
+ fcport->d_id.b.domain, fcport->d_id.b.area,
+ fcport->d_id.b.al_pa, fcport->loop_id);
+
+ for (i = 0; i < n; i++) {
+ e = &vha->gnl.l[i];
+ wwn = wwn_to_u64(e->port_name);
+
+ if (memcmp((u8 *)&wwn, fcport->port_name, WWN_SIZE))
+ continue;
+
+ found = 1;
+ id.b.domain = e->port_id[2];
+ id.b.area = e->port_id[1];
+ id.b.al_pa = e->port_id[0];
+ id.b.rsvd_1 = 0;
+
+ loop_id = le16_to_cpu(e->nport_handle);
+ loop_id = (loop_id & 0x7fff);
+
+ ql_dbg(ql_dbg_disc, vha, 0xffff,
+ "%s found %8phC CLS [%d|%d] ID[%02x%02x%02x|%02x%02x%02x] lid[%d|%d]\n",
+ __func__, fcport->port_name,
+ e->current_login_state, fcport->fw_login_state,
+ id.b.domain, id.b.area, id.b.al_pa,
+ fcport->d_id.b.domain, fcport->d_id.b.area,
+ fcport->d_id.b.al_pa, loop_id, fcport->loop_id);
+
+ if ((id.b24 != fcport->d_id.b24) ||
+ ((fcport->loop_id != FC_NO_LOOP_ID) &&
+ (fcport->loop_id != loop_id))) {
+ ql_dbg(ql_dbg_disc, vha, 0xffff,
+ "%s %d %8phC post del sess\n",
+ __func__, __LINE__, fcport->port_name);
+ qlt_schedule_sess_for_deletion(fcport, 1);
+ return;
+ }
+
+ fcport->loop_id = loop_id;
+
+ wwn = wwn_to_u64(fcport->port_name);
+ qlt_find_sess_invalidate_other(vha, wwn,
+ id, loop_id, &conflict_fcport);
+
+ if (conflict_fcport) {
+ /*
+ * Another share fcport share the same loop_id &
+ * nport id. Conflict fcport needs to finish
+ * cleanup before this fcport can proceed to login.
+ */
+ conflict_fcport->conflict = fcport;
+ fcport->login_pause = 1;
+ }
+
+ switch (e->current_login_state) {
+ case DSC_LS_PRLI_COMP:
+ ql_dbg(ql_dbg_disc, vha, 0xffff,
+ "%s %d %8phC post gpdb\n",
+ __func__, __LINE__, fcport->port_name);
+ opt = PDO_FORCE_ADISC;
+ qla24xx_post_gpdb_work(vha, fcport, opt);
+ break;
+
+ case DSC_LS_PORT_UNAVAIL:
+ default:
+ if (fcport->loop_id == FC_NO_LOOP_ID) {
+ qla2x00_find_new_loop_id(vha, fcport);
+ fcport->fw_login_state = DSC_LS_PORT_UNAVAIL;
+ }
+ ql_dbg(ql_dbg_disc, vha, 0xffff,
+ "%s %d %8phC \n",
+ __func__, __LINE__, fcport->port_name);
+ qla24xx_fcport_handle_login(vha, fcport);
+ break;
+ }
+ }
+
+ if (!found) {
+ /* fw has no record of this port */
+ if (fcport->loop_id == FC_NO_LOOP_ID) {
+ qla2x00_find_new_loop_id(vha, fcport);
+ fcport->fw_login_state = DSC_LS_PORT_UNAVAIL;
+ } else {
+ for (i = 0; i < n; i++) {
+ e = &vha->gnl.l[i];
+ id.b.domain = e->port_id[0];
+ id.b.area = e->port_id[1];
+ id.b.al_pa = e->port_id[2];
+ id.b.rsvd_1 = 0;
+ loop_id = le16_to_cpu(e->nport_handle);
+
+ if (fcport->d_id.b24 == id.b24) {
+ conflict_fcport =
+ qla2x00_find_fcport_by_wwpn(vha,
+ e->port_name, 0);
+
+ ql_dbg(ql_dbg_disc, vha, 0xffff,
+ "%s %d %8phC post del sess\n",
+ __func__, __LINE__,
+ conflict_fcport->port_name);
+ qlt_schedule_sess_for_deletion
+ (conflict_fcport, 1);
+ }
+
+ if (fcport->loop_id == loop_id) {
+ /* FW already picked this loop id for another fcport */
+ qla2x00_find_new_loop_id(vha, fcport);
+ }
+ }
+ }
+ qla24xx_fcport_handle_login(vha, fcport);
+ }
+} /* gnl_event */
+
+static void
+qla24xx_async_gnl_sp_done(void *s, int res)
+{
+ struct srb *sp = s;
+ struct scsi_qla_host *vha = sp->vha;
+ unsigned long flags;
+ struct fc_port *fcport = NULL, *tf;
+ u16 i, n = 0, loop_id;
+ struct event_arg ea;
+ struct get_name_list_extended *e;
+ u64 wwn;
+ struct list_head h;
+
+ ql_dbg(ql_dbg_disc, vha, 0xffff,
+ "Async done-%s res %x mb[1]=%x mb[2]=%x \n",
+ sp->name, res, sp->u.iocb_cmd.u.mbx.in_mb[1],
+ sp->u.iocb_cmd.u.mbx.in_mb[2]);
+
+ memset(&ea, 0, sizeof(ea));
+ ea.sp = sp;
+ ea.rc = res;
+ ea.event = FCME_GNL_DONE;
+
+ if (sp->u.iocb_cmd.u.mbx.in_mb[1] >=
+ sizeof(struct get_name_list_extended)) {
+ n = sp->u.iocb_cmd.u.mbx.in_mb[1] /
+ sizeof(struct get_name_list_extended);
+ ea.data[0] = sp->u.iocb_cmd.u.mbx.in_mb[1]; /* amnt xfered */
+ }
+
+ for (i = 0; i < n; i++) {
+ e = &vha->gnl.l[i];
+ loop_id = le16_to_cpu(e->nport_handle);
+ /* mask out reserve bit */
+ loop_id = (loop_id & 0x7fff);
+ set_bit(loop_id, vha->hw->loop_id_map);
+ wwn = wwn_to_u64(e->port_name);
+
+ ql_dbg(ql_dbg_disc + ql_dbg_verbose, vha, 0xffff,
+ "%s %8phC %02x:%02x:%02x state %d/%d lid %x \n",
+ __func__, (void *)&wwn, e->port_id[2], e->port_id[1],
+ e->port_id[0], e->current_login_state, e->last_login_state,
+ (loop_id & 0x7fff));
+ }
+
+ spin_lock_irqsave(&vha->hw->tgt.sess_lock, flags);
+ vha->gnl.sent = 0;
+
+ INIT_LIST_HEAD(&h);
+ fcport = tf = NULL;
+ if (!list_empty(&vha->gnl.fcports))
+ list_splice_init(&vha->gnl.fcports, &h);
+
+ list_for_each_entry_safe(fcport, tf, &h, gnl_entry) {
+ list_del_init(&fcport->gnl_entry);
+ fcport->flags &= ~FCF_ASYNC_SENT;
+ ea.fcport = fcport;
+
+ qla2x00_fcport_event_handler(vha, &ea);
+ }
+
+ spin_unlock_irqrestore(&vha->hw->tgt.sess_lock, flags);
+
+ sp->free(sp);
+}
+
+int qla24xx_async_gnl(struct scsi_qla_host *vha, fc_port_t *fcport)
+{
+ srb_t *sp;
+ struct srb_iocb *mbx;
+ int rval = QLA_FUNCTION_FAILED;
+ unsigned long flags;
+ u16 *mb;
+
+ if (!vha->flags.online)
+ goto done;
+
+ ql_dbg(ql_dbg_disc, vha, 0xffff,
+ "Async-gnlist WWPN %8phC \n", fcport->port_name);
+
+ spin_lock_irqsave(&vha->hw->tgt.sess_lock, flags);
+ fcport->flags |= FCF_ASYNC_SENT;
+ fcport->disc_state = DSC_GNL;
+ fcport->last_rscn_gen = fcport->rscn_gen;
+ fcport->last_login_gen = fcport->login_gen;
+
+ list_add_tail(&fcport->gnl_entry, &vha->gnl.fcports);
+ if (vha->gnl.sent) {
+ spin_unlock_irqrestore(&vha->hw->tgt.sess_lock, flags);
+ rval = QLA_SUCCESS;
+ goto done;
+ }
+ vha->gnl.sent = 1;
+ spin_unlock_irqrestore(&vha->hw->tgt.sess_lock, flags);
+
+ sp = qla2x00_get_sp(vha, fcport, GFP_KERNEL);
+ if (!sp)
+ goto done;
+ sp->type = SRB_MB_IOCB;
+ sp->name = "gnlist";
+ sp->gen1 = fcport->rscn_gen;
+ sp->gen2 = fcport->login_gen;
+
+ qla2x00_init_timer(sp, qla2x00_get_async_timeout(vha)+2);
+
+ mb = sp->u.iocb_cmd.u.mbx.out_mb;
+ mb[0] = MBC_PORT_NODE_NAME_LIST;
+ mb[1] = BIT_2 | BIT_3;
+ mb[2] = MSW(vha->gnl.ldma);
+ mb[3] = LSW(vha->gnl.ldma);
+ mb[6] = MSW(MSD(vha->gnl.ldma));
+ mb[7] = LSW(MSD(vha->gnl.ldma));
+ mb[8] = vha->gnl.size;
+ mb[9] = vha->vp_idx;
+
+ mbx = &sp->u.iocb_cmd;
+ mbx->timeout = qla2x00_async_iocb_timeout;
+
+ sp->done = qla24xx_async_gnl_sp_done;
+
+ rval = qla2x00_start_sp(sp);
+ if (rval != QLA_SUCCESS)
+ goto done_free_sp;
+
+ ql_dbg(ql_dbg_disc, vha, 0xffff,
+ "Async-%s - OUT WWPN %8phC hndl %x\n",
+ sp->name, fcport->port_name, sp->handle);
+
+ return rval;
+
+done_free_sp:
+ sp->free(sp);
+done:
+ fcport->flags &= ~FCF_ASYNC_SENT;
+ return rval;
+}
+
+int qla24xx_post_gnl_work(struct scsi_qla_host *vha, fc_port_t *fcport)
+{
+ struct qla_work_evt *e;
+
+ e = qla2x00_alloc_work(vha, QLA_EVT_GNL);
+ if (!e)
+ return QLA_FUNCTION_FAILED;
+
+ e->u.fcport.fcport = fcport;
+ return qla2x00_post_work(vha, e);
+}
+
+static
+void qla24xx_async_gpdb_sp_done(void *s, int res)
+{
+ struct srb *sp = s;
+ struct scsi_qla_host *vha = sp->vha;
+ struct qla_hw_data *ha = vha->hw;
+ uint64_t zero = 0;
+ struct port_database_24xx *pd;
+ fc_port_t *fcport = sp->fcport;
+ u16 *mb = sp->u.iocb_cmd.u.mbx.in_mb;
+ int rval = QLA_SUCCESS;
+ struct event_arg ea;
+
+ ql_dbg(ql_dbg_disc, vha, 0xffff,
+ "Async done-%s res %x, WWPN %8phC mb[1]=%x mb[2]=%x \n",
+ sp->name, res, fcport->port_name, mb[1], mb[2]);
+
+ fcport->flags &= ~FCF_ASYNC_SENT;
+
+ if (res) {
+ rval = res;
+ goto gpd_error_out;
+ }
+
+ pd = (struct port_database_24xx *)sp->u.iocb_cmd.u.mbx.in;
+
+ /* Check for logged in state. */
+ if (pd->current_login_state != PDS_PRLI_COMPLETE &&
+ pd->last_login_state != PDS_PRLI_COMPLETE) {
+ ql_dbg(ql_dbg_mbx, vha, 0xffff,
+ "Unable to verify login-state (%x/%x) for "
+ "loop_id %x.\n", pd->current_login_state,
+ pd->last_login_state, fcport->loop_id);
+ rval = QLA_FUNCTION_FAILED;
+ goto gpd_error_out;
+ }
+
+ if (fcport->loop_id == FC_NO_LOOP_ID ||
+ (memcmp(fcport->port_name, (uint8_t *)&zero, 8) &&
+ memcmp(fcport->port_name, pd->port_name, 8))) {
+ /* We lost the device mid way. */
+ rval = QLA_NOT_LOGGED_IN;
+ goto gpd_error_out;
+ }
+
+ /* Names are little-endian. */
+ memcpy(fcport->node_name, pd->node_name, WWN_SIZE);
+
+ /* Get port_id of device. */
+ fcport->d_id.b.domain = pd->port_id[0];
+ fcport->d_id.b.area = pd->port_id[1];
+ fcport->d_id.b.al_pa = pd->port_id[2];
+ fcport->d_id.b.rsvd_1 = 0;
+
+ /* If not target must be initiator or unknown type. */
+ if ((pd->prli_svc_param_word_3[0] & BIT_4) == 0)
+ fcport->port_type = FCT_INITIATOR;
+ else
+ fcport->port_type = FCT_TARGET;
+
+ /* Passback COS information. */
+ fcport->supported_classes = (pd->flags & PDF_CLASS_2) ?
+ FC_COS_CLASS2 : FC_COS_CLASS3;
+
+ if (pd->prli_svc_param_word_3[0] & BIT_7) {
+ fcport->flags |= FCF_CONF_COMP_SUPPORTED;
+ fcport->conf_compl_supported = 1;
+ }
+
+gpd_error_out:
+ memset(&ea, 0, sizeof(ea));
+ ea.event = FCME_GPDB_DONE;
+ ea.rc = rval;
+ ea.fcport = fcport;
+ ea.sp = sp;
+
+ qla2x00_fcport_event_handler(vha, &ea);
+
+ dma_pool_free(ha->s_dma_pool, sp->u.iocb_cmd.u.mbx.in,
+ sp->u.iocb_cmd.u.mbx.in_dma);
+
+ sp->free(sp);
+}
+
+static int qla24xx_post_gpdb_work(struct scsi_qla_host *vha, fc_port_t *fcport,
+ u8 opt)
+{
+ struct qla_work_evt *e;
+
+ e = qla2x00_alloc_work(vha, QLA_EVT_GPDB);
+ if (!e)
+ return QLA_FUNCTION_FAILED;
+
+ e->u.fcport.fcport = fcport;
+ e->u.fcport.opt = opt;
+ return qla2x00_post_work(vha, e);
+}
+
+int qla24xx_async_gpdb(struct scsi_qla_host *vha, fc_port_t *fcport, u8 opt)
+{
+ srb_t *sp;
+ struct srb_iocb *mbx;
+ int rval = QLA_FUNCTION_FAILED;
+ u16 *mb;
+ dma_addr_t pd_dma;
+ struct port_database_24xx *pd;
+ struct qla_hw_data *ha = vha->hw;
+
+ if (!vha->flags.online)
+ goto done;
+
+ fcport->flags |= FCF_ASYNC_SENT;
+ fcport->disc_state = DSC_GPDB;
+
+ sp = qla2x00_get_sp(vha, fcport, GFP_KERNEL);
+ if (!sp)
+ goto done;
+
+ pd = dma_pool_alloc(ha->s_dma_pool, GFP_KERNEL, &pd_dma);
+ if (pd == NULL) {
+ ql_log(ql_log_warn, vha, 0xffff,
+ "Failed to allocate port database structure.\n");
+ goto done_free_sp;
+ }
+ memset(pd, 0, max(PORT_DATABASE_SIZE, PORT_DATABASE_24XX_SIZE));
+
+ sp->type = SRB_MB_IOCB;
+ sp->name = "gpdb";
+ sp->gen1 = fcport->rscn_gen;
+ sp->gen2 = fcport->login_gen;
+ qla2x00_init_timer(sp, qla2x00_get_async_timeout(vha) + 2);
+
+ mb = sp->u.iocb_cmd.u.mbx.out_mb;
+ mb[0] = MBC_GET_PORT_DATABASE;
+ mb[1] = fcport->loop_id;
+ mb[2] = MSW(pd_dma);
+ mb[3] = LSW(pd_dma);
+ mb[6] = MSW(MSD(pd_dma));
+ mb[7] = LSW(MSD(pd_dma));
+ mb[9] = vha->vp_idx;
+ mb[10] = opt;
+
+ mbx = &sp->u.iocb_cmd;
+ mbx->timeout = qla2x00_async_iocb_timeout;
+ mbx->u.mbx.in = (void *)pd;
+ mbx->u.mbx.in_dma = pd_dma;
+
+ sp->done = qla24xx_async_gpdb_sp_done;
+
+ rval = qla2x00_start_sp(sp);
+ if (rval != QLA_SUCCESS)
+ goto done_free_sp;
+
+ ql_dbg(ql_dbg_disc, vha, 0xffff,
+ "Async-%s %8phC hndl %x opt %x\n",
+ sp->name, fcport->port_name, sp->handle, opt);
+
+ return rval;
+
+done_free_sp:
+ if (pd)
+ dma_pool_free(ha->s_dma_pool, pd, pd_dma);
+
+ sp->free(sp);
+done:
+ fcport->flags &= ~FCF_ASYNC_SENT;
+ qla24xx_post_gpdb_work(vha, fcport, opt);
+ return rval;
+}
+
+static
+void qla24xx_handle_gpdb_event(scsi_qla_host_t *vha, struct event_arg *ea)
+{
+ int rval = ea->rc;
+ fc_port_t *fcport = ea->fcport;
+ unsigned long flags;
+
+ fcport->flags &= ~FCF_ASYNC_SENT;
+
+ ql_dbg(ql_dbg_disc, vha, 0xffff,
+ "%s %8phC DS %d LS %d rval %d\n", __func__, fcport->port_name,
+ fcport->disc_state, fcport->fw_login_state, rval);
+
+ if (ea->sp->gen2 != fcport->login_gen) {
+ /* target side must have changed it. */
+ ql_dbg(ql_dbg_disc, vha, 0xffff,
+ "%s %8phC generation changed rscn %d|%d login %d|%d \n",
+ __func__, fcport->port_name, fcport->last_rscn_gen,
+ fcport->rscn_gen, fcport->last_login_gen,
+ fcport->login_gen);
+ return;
+ } else if (ea->sp->gen1 != fcport->rscn_gen) {
+ ql_dbg(ql_dbg_disc, vha, 0xffff, "%s %d %8phC post gidpn\n",
+ __func__, __LINE__, fcport->port_name);
+ qla24xx_post_gidpn_work(vha, fcport);
+ return;
+ }
+
+ if (rval != QLA_SUCCESS) {
+ ql_dbg(ql_dbg_disc, vha, 0xffff, "%s %d %8phC post del sess\n",
+ __func__, __LINE__, fcport->port_name);
+ qlt_schedule_sess_for_deletion_lock(fcport);
+ return;
+ }
+
+ spin_lock_irqsave(&vha->hw->tgt.sess_lock, flags);
+ ea->fcport->login_gen++;
+ ea->fcport->deleted = 0;
+ ea->fcport->logout_on_delete = 1;
+
+ if (!ea->fcport->login_succ && !IS_SW_RESV_ADDR(ea->fcport->d_id)) {
+ vha->fcport_count++;
+ ea->fcport->login_succ = 1;
+
+ if (!IS_IIDMA_CAPABLE(vha->hw) ||
+ !vha->hw->flags.gpsc_supported) {
+ ql_dbg(ql_dbg_disc, vha, 0xffff,
+ "%s %d %8phC post upd_fcport fcp_cnt %d\n",
+ __func__, __LINE__, fcport->port_name,
+ vha->fcport_count);
+
+ qla24xx_post_upd_fcport_work(vha, fcport);
+ } else {
+ ql_dbg(ql_dbg_disc, vha, 0xffff,
+ "%s %d %8phC post gpsc fcp_cnt %d\n",
+ __func__, __LINE__, fcport->port_name,
+ vha->fcport_count);
+
+ qla24xx_post_gpsc_work(vha, fcport);
+ }
+ }
+ spin_unlock_irqrestore(&vha->hw->tgt.sess_lock, flags);
+} /* gpdb event */
+
+int qla24xx_fcport_handle_login(struct scsi_qla_host *vha, fc_port_t *fcport)
+{
+ if (fcport->login_retry == 0)
+ return 0;
+
+ if (fcport->scan_state != QLA_FCPORT_FOUND)
+ return 0;
+
+ ql_dbg(ql_dbg_disc, vha, 0xffff,
+ "%s %8phC DS %d LS %d P %d fl %x confl %p rscn %d|%d login %d|%d retry %d lid %d\n",
+ __func__, fcport->port_name, fcport->disc_state,
+ fcport->fw_login_state, fcport->login_pause, fcport->flags,
+ fcport->conflict, fcport->last_rscn_gen, fcport->rscn_gen,
+ fcport->last_login_gen, fcport->login_gen, fcport->login_retry,
+ fcport->loop_id);
+
+ fcport->login_retry--;
+
+ if ((fcport->fw_login_state == DSC_LS_PLOGI_PEND) ||
+ (fcport->fw_login_state == DSC_LS_PLOGI_COMP) ||
+ (fcport->fw_login_state == DSC_LS_PRLI_PEND))
+ return 0;
+
+ /* for pure Target Mode. Login will not be initiated */
+ if (vha->host->active_mode == MODE_TARGET)
+ return 0;
+
+ if (fcport->flags & FCF_ASYNC_SENT) {
+ set_bit(RELOGIN_NEEDED, &vha->dpc_flags);
+ return 0;
+ }
+
+ switch (fcport->disc_state) {
+ case DSC_DELETED:
+ if (fcport->loop_id == FC_NO_LOOP_ID) {
+ ql_dbg(ql_dbg_disc, vha, 0xffff,
+ "%s %d %8phC post gnl\n",
+ __func__, __LINE__, fcport->port_name);
+ qla24xx_async_gnl(vha, fcport);
+ } else {
+ ql_dbg(ql_dbg_disc, vha, 0xffff,
+ "%s %d %8phC post login\n",
+ __func__, __LINE__, fcport->port_name);
+ fcport->disc_state = DSC_LOGIN_PEND;
+ qla2x00_post_async_login_work(vha, fcport, NULL);
+ }
+ break;
+
+ case DSC_GNL:
+ if (fcport->login_pause) {
+ fcport->last_rscn_gen = fcport->rscn_gen;
+ fcport->last_login_gen = fcport->login_gen;
+ set_bit(RELOGIN_NEEDED, &vha->dpc_flags);
+ break;
+ }
+
+ if (fcport->flags & FCF_FCP2_DEVICE) {
+ u8 opt = PDO_FORCE_ADISC;
+
+ ql_dbg(ql_dbg_disc, vha, 0xffff,
+ "%s %d %8phC post gpdb\n",
+ __func__, __LINE__, fcport->port_name);
+
+ fcport->disc_state = DSC_GPDB;
+ qla24xx_post_gpdb_work(vha, fcport, opt);
+ } else {
+ ql_dbg(ql_dbg_disc, vha, 0xffff,
+ "%s %d %8phC post login \n",
+ __func__, __LINE__, fcport->port_name);
+ fcport->disc_state = DSC_LOGIN_PEND;
+ qla2x00_post_async_login_work(vha, fcport, NULL);
+ }
+
+ break;
+
+ case DSC_LOGIN_FAILED:
+ ql_dbg(ql_dbg_disc, vha, 0xffff,
+ "%s %d %8phC post gidpn \n",
+ __func__, __LINE__, fcport->port_name);
+
+ qla24xx_post_gidpn_work(vha, fcport);
+ break;
+
+ case DSC_LOGIN_COMPLETE:
+ /* recheck login state */
+ ql_dbg(ql_dbg_disc, vha, 0xffff,
+ "%s %d %8phC post gpdb \n",
+ __func__, __LINE__, fcport->port_name);
+
+ qla24xx_post_gpdb_work(vha, fcport, PDO_FORCE_ADISC);
+ break;
+
+ default:
+ break;
+ }
+
+ return 0;
+}
+
+static
+void qla24xx_handle_rscn_event(fc_port_t *fcport, struct event_arg *ea)
+{
+ fcport->rscn_gen++;
+
+ ql_dbg(ql_dbg_disc, fcport->vha, 0xffff,
+ "%s %8phC DS %d LS %d\n",
+ __func__, fcport->port_name, fcport->disc_state,
+ fcport->fw_login_state);
+
+ if (fcport->flags & FCF_ASYNC_SENT)
+ return;
+
+ switch (fcport->disc_state) {
+ case DSC_DELETED:
+ case DSC_LOGIN_COMPLETE:
+ qla24xx_post_gidpn_work(fcport->vha, fcport);
+ break;
+
+ default:
+ break;
+ }
+}
+
+int qla24xx_post_newsess_work(struct scsi_qla_host *vha, port_id_t *id,
+ u8 *port_name, void *pla)
+{
+ struct qla_work_evt *e;
+ e = qla2x00_alloc_work(vha, QLA_EVT_NEW_SESS);
+ if (!e)
+ return QLA_FUNCTION_FAILED;
+
+ e->u.new_sess.id = *id;
+ e->u.new_sess.pla = pla;
+ memcpy(e->u.new_sess.port_name, port_name, WWN_SIZE);
+
+ return qla2x00_post_work(vha, e);
+}
+
+static
+int qla24xx_handle_delete_done_event(scsi_qla_host_t *vha,
+ struct event_arg *ea)
+{
+ fc_port_t *fcport = ea->fcport;
+
+ if (test_bit(UNLOADING, &vha->dpc_flags))
+ return 0;
+
+ switch (vha->host->active_mode) {
+ case MODE_INITIATOR:
+ case MODE_DUAL:
+ if (fcport->scan_state == QLA_FCPORT_FOUND)
+ qla24xx_fcport_handle_login(vha, fcport);
+ break;
+
+ case MODE_TARGET:
+ default:
+ /* no-op */
+ break;
+ }
+
+ return 0;
+}
+
+static
+void qla24xx_handle_relogin_event(scsi_qla_host_t *vha,
+ struct event_arg *ea)
+{
+ fc_port_t *fcport = ea->fcport;
+
+ if (fcport->scan_state != QLA_FCPORT_FOUND) {
+ fcport->login_retry++;
+ return;
+ }
+
+ ql_dbg(ql_dbg_disc, vha, 0xffff,
+ "%s %8phC DS %d LS %d P %d del %d cnfl %p rscn %d|%d login %d|%d fl %x\n",
+ __func__, fcport->port_name, fcport->disc_state,
+ fcport->fw_login_state, fcport->login_pause,
+ fcport->deleted, fcport->conflict,
+ fcport->last_rscn_gen, fcport->rscn_gen,
+ fcport->last_login_gen, fcport->login_gen,
+ fcport->flags);
+
+ if ((fcport->fw_login_state == DSC_LS_PLOGI_PEND) ||
+ (fcport->fw_login_state == DSC_LS_PLOGI_COMP) ||
+ (fcport->fw_login_state == DSC_LS_PRLI_PEND))
+ return;
+
+ if (fcport->flags & FCF_ASYNC_SENT) {
+ fcport->login_retry++;
+ set_bit(RELOGIN_NEEDED, &vha->dpc_flags);
+ return;
+ }
+
+ if (fcport->disc_state == DSC_DELETE_PEND) {
+ fcport->login_retry++;
+ return;
+ }
+
+ if (fcport->last_rscn_gen != fcport->rscn_gen) {
+ ql_dbg(ql_dbg_disc, vha, 0xffff, "%s %d %8phC post gidpn\n",
+ __func__, __LINE__, fcport->port_name);
+
+ qla24xx_async_gidpn(vha, fcport);
+ return;
+ }
+
+ qla24xx_fcport_handle_login(vha, fcport);
+}
+
+void qla2x00_fcport_event_handler(scsi_qla_host_t *vha, struct event_arg *ea)
+{
+ fc_port_t *fcport, *f, *tf;
+ uint32_t id = 0, mask, rid;
+ int rc;
+
+ switch (ea->event) {
+ case FCME_RELOGIN:
+ if (test_bit(UNLOADING, &vha->dpc_flags))
+ return;
+
+ qla24xx_handle_relogin_event(vha, ea);
+ break;
+ case FCME_RSCN:
+ if (test_bit(UNLOADING, &vha->dpc_flags))
+ return;
+ switch (ea->id.b.rsvd_1) {
+ case RSCN_PORT_ADDR:
+ fcport = qla2x00_find_fcport_by_nportid(vha, &ea->id, 1);
+ if (!fcport) {
+ /* cable moved */
+ rc = qla24xx_post_gpnid_work(vha, &ea->id);
+ if (rc) {
+ ql_log(ql_log_warn, vha, 0xffff,
+ "RSCN GPNID work failed %02x%02x%02x\n",
+ ea->id.b.domain, ea->id.b.area,
+ ea->id.b.al_pa);
+ }
+ } else {
+ ea->fcport = fcport;
+ qla24xx_handle_rscn_event(fcport, ea);
+ }
+ break;
+ case RSCN_AREA_ADDR:
+ case RSCN_DOM_ADDR:
+ if (ea->id.b.rsvd_1 == RSCN_AREA_ADDR) {
+ mask = 0xffff00;
+ ql_log(ql_dbg_async, vha, 0xffff,
+ "RSCN: Area 0x%06x was affected\n",
+ ea->id.b24);
+ } else {
+ mask = 0xff0000;
+ ql_log(ql_dbg_async, vha, 0xffff,
+ "RSCN: Domain 0x%06x was affected\n",
+ ea->id.b24);
+ }
+
+ rid = ea->id.b24 & mask;
+ list_for_each_entry_safe(f, tf, &vha->vp_fcports,
+ list) {
+ id = f->d_id.b24 & mask;
+ if (rid == id) {
+ ea->fcport = f;
+ qla24xx_handle_rscn_event(f, ea);
+ }
+ }
+ break;
+ case RSCN_FAB_ADDR:
+ default:
+ ql_log(ql_log_warn, vha, 0xffff,
+ "RSCN: Fabric was affected. Addr format %d\n",
+ ea->id.b.rsvd_1);
+ qla2x00_mark_all_devices_lost(vha, 1);
+ set_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags);
+ set_bit(LOCAL_LOOP_UPDATE, &vha->dpc_flags);
+ }
+ break;
+ case FCME_GIDPN_DONE:
+ qla24xx_handle_gidpn_event(vha, ea);
+ break;
+ case FCME_GNL_DONE:
+ qla24xx_handle_gnl_done_event(vha, ea);
+ break;
+ case FCME_GPSC_DONE:
+ qla24xx_post_upd_fcport_work(vha, ea->fcport);
+ break;
+ case FCME_PLOGI_DONE: /* Initiator side sent LLIOCB */
+ qla24xx_handle_plogi_done_event(vha, ea);
+ break;
+ case FCME_GPDB_DONE:
+ qla24xx_handle_gpdb_event(vha, ea);
+ break;
+ case FCME_GPNID_DONE:
+ qla24xx_handle_gpnid_event(vha, ea);
+ break;
+ case FCME_DELETE_DONE:
+ qla24xx_handle_delete_done_event(vha, ea);
+ break;
+ default:
+ BUG_ON(1);
+ break;
+ }
+}
+
static void
qla2x00_tmf_iocb_timeout(void *data)
{
- srb_t *sp = (srb_t *)data;
+ srb_t *sp = data;
struct srb_iocb *tmf = &sp->u.iocb_cmd;
tmf->u.tmf.comp_status = CS_TIMEOUT;
@@ -287,10 +1172,11 @@ qla2x00_tmf_iocb_timeout(void *data)
}
static void
-qla2x00_tmf_sp_done(void *data, void *ptr, int res)
+qla2x00_tmf_sp_done(void *ptr, int res)
{
- srb_t *sp = (srb_t *)ptr;
+ srb_t *sp = ptr;
struct srb_iocb *tmf = &sp->u.iocb_cmd;
+
complete(&tmf->u.tmf.comp);
}
@@ -348,7 +1234,7 @@ qla2x00_async_tm_cmd(fc_port_t *fcport, uint32_t flags, uint32_t lun,
}
done_free_sp:
- sp->free(vha, sp);
+ sp->free(sp);
done:
return rval;
}
@@ -356,7 +1242,7 @@ done:
static void
qla24xx_abort_iocb_timeout(void *data)
{
- srb_t *sp = (srb_t *)data;
+ srb_t *sp = data;
struct srb_iocb *abt = &sp->u.iocb_cmd;
abt->u.abt.comp_status = CS_TIMEOUT;
@@ -364,9 +1250,9 @@ qla24xx_abort_iocb_timeout(void *data)
}
static void
-qla24xx_abort_sp_done(void *data, void *ptr, int res)
+qla24xx_abort_sp_done(void *ptr, int res)
{
- srb_t *sp = (srb_t *)ptr;
+ srb_t *sp = ptr;
struct srb_iocb *abt = &sp->u.iocb_cmd;
complete(&abt->u.abt.comp);
@@ -375,7 +1261,7 @@ qla24xx_abort_sp_done(void *data, void *ptr, int res)
static int
qla24xx_async_abort_cmd(srb_t *cmd_sp)
{
- scsi_qla_host_t *vha = cmd_sp->fcport->vha;
+ scsi_qla_host_t *vha = cmd_sp->vha;
fc_port_t *fcport = cmd_sp->fcport;
struct srb_iocb *abt_iocb;
srb_t *sp;
@@ -408,7 +1294,7 @@ qla24xx_async_abort_cmd(srb_t *cmd_sp)
QLA_SUCCESS : QLA_FUNCTION_FAILED;
done_free_sp:
- sp->free(vha, sp);
+ sp->free(sp);
done:
return rval;
}
@@ -441,59 +1327,65 @@ qla24xx_async_abort_command(srb_t *sp)
return qla24xx_async_abort_cmd(sp);
}
-void
-qla2x00_async_login_done(struct scsi_qla_host *vha, fc_port_t *fcport,
- uint16_t *data)
+static void
+qla24xx_handle_plogi_done_event(struct scsi_qla_host *vha, struct event_arg *ea)
{
- int rval;
+ port_id_t cid; /* conflict Nport id */
- switch (data[0]) {
+ switch (ea->data[0]) {
case MBS_COMMAND_COMPLETE:
/*
* Driver must validate login state - If PRLI not complete,
* force a relogin attempt via implicit LOGO, PLOGI, and PRLI
* requests.
*/
- rval = qla2x00_get_port_database(vha, fcport, 0);
- if (rval == QLA_NOT_LOGGED_IN) {
- fcport->flags &= ~FCF_ASYNC_SENT;
- fcport->flags |= FCF_LOGIN_NEEDED;
- set_bit(RELOGIN_NEEDED, &vha->dpc_flags);
- break;
- }
-
- if (rval != QLA_SUCCESS) {
- qla2x00_post_async_logout_work(vha, fcport, NULL);
- qla2x00_post_async_login_work(vha, fcport, NULL);
- break;
- }
- if (fcport->flags & FCF_FCP2_DEVICE) {
- qla2x00_post_async_adisc_work(vha, fcport, data);
- break;
- }
- qla2x00_update_fcport(vha, fcport);
+ ql_dbg(ql_dbg_disc, vha, 0xffff,
+ "%s %d %8phC post gpdb\n",
+ __func__, __LINE__, ea->fcport->port_name);
+ ea->fcport->chip_reset = vha->hw->chip_reset;
+ ea->fcport->logout_on_delete = 1;
+ qla24xx_post_gpdb_work(vha, ea->fcport, 0);
break;
case MBS_COMMAND_ERROR:
- fcport->flags &= ~FCF_ASYNC_SENT;
- if (data[1] & QLA_LOGIO_LOGIN_RETRIED)
+ ql_dbg(ql_dbg_disc, vha, 0xffff, "%s %d %8phC cmd error %x\n",
+ __func__, __LINE__, ea->fcport->port_name, ea->data[1]);
+
+ ea->fcport->flags &= ~FCF_ASYNC_SENT;
+ ea->fcport->disc_state = DSC_LOGIN_FAILED;
+ if (ea->data[1] & QLA_LOGIO_LOGIN_RETRIED)
set_bit(RELOGIN_NEEDED, &vha->dpc_flags);
else
- qla2x00_mark_device_lost(vha, fcport, 1, 0);
- break;
- case MBS_PORT_ID_USED:
- fcport->loop_id = data[1];
- qla2x00_post_async_logout_work(vha, fcport, NULL);
- qla2x00_post_async_login_work(vha, fcport, NULL);
+ qla2x00_mark_device_lost(vha, ea->fcport, 1, 0);
break;
case MBS_LOOP_ID_USED:
- fcport->loop_id++;
- rval = qla2x00_find_new_loop_id(vha, fcport);
- if (rval != QLA_SUCCESS) {
- fcport->flags &= ~FCF_ASYNC_SENT;
- qla2x00_mark_device_lost(vha, fcport, 1, 0);
- break;
+ /* data[1] = IO PARAM 1 = nport ID */
+ cid.b.domain = (ea->iop[1] >> 16) & 0xff;
+ cid.b.area = (ea->iop[1] >> 8) & 0xff;
+ cid.b.al_pa = ea->iop[1] & 0xff;
+ cid.b.rsvd_1 = 0;
+
+ ql_dbg(ql_dbg_disc, vha, 0xffff,
+ "%s %d %8phC LoopID 0x%x in use post gnl\n",
+ __func__, __LINE__, ea->fcport->port_name,
+ ea->fcport->loop_id);
+
+ if (IS_SW_RESV_ADDR(cid)) {
+ set_bit(ea->fcport->loop_id, vha->hw->loop_id_map);
+ ea->fcport->loop_id = FC_NO_LOOP_ID;
+ } else {
+ qla2x00_clear_loop_id(ea->fcport);
}
- qla2x00_post_async_login_work(vha, fcport, NULL);
+ qla24xx_post_gnl_work(vha, ea->fcport);
+ break;
+ case MBS_PORT_ID_USED:
+ ql_dbg(ql_dbg_disc, vha, 0xffff,
+ "%s %d %8phC NPortId %02x%02x%02x inuse post gidpn\n",
+ __func__, __LINE__, ea->fcport->port_name,
+ ea->fcport->d_id.b.domain, ea->fcport->d_id.b.area,
+ ea->fcport->d_id.b.al_pa);
+
+ qla2x00_clear_loop_id(ea->fcport);
+ qla24xx_post_gidpn_work(vha, ea->fcport);
break;
}
return;
@@ -503,10 +1395,9 @@ void
qla2x00_async_logout_done(struct scsi_qla_host *vha, fc_port_t *fcport,
uint16_t *data)
{
- /* Don't re-login in target mode */
- if (!fcport->tgt_session)
- qla2x00_mark_device_lost(vha, fcport, 1, 0);
+ qla2x00_mark_device_lost(vha, fcport, 1, 0);
qlt_logo_completion_handler(fcport, data[0]);
+ fcport->login_gen++;
return;
}
@@ -709,7 +1600,7 @@ qla2x00_initialize_adapter(scsi_qla_host_t *vha)
}
}
- if (qla_ini_mode_enabled(vha))
+ if (qla_ini_mode_enabled(vha) || qla_dual_mode_enabled(vha))
rval = qla2x00_init_rings(vha);
ha->flags.chip_reset_done = 1;
@@ -2088,6 +2979,21 @@ qla24xx_update_fw_options(scsi_qla_host_t *vha)
__func__, ha->fw_options[2]);
}
+ /* Move PUREX, ABTS RX & RIDA to ATIOQ */
+ if (ql2xmvasynctoatio) {
+ if (qla_tgt_mode_enabled(vha) ||
+ qla_dual_mode_enabled(vha))
+ ha->fw_options[2] |= BIT_11;
+ else
+ ha->fw_options[2] &= ~BIT_11;
+ }
+
+ ql_dbg(ql_dbg_init, vha, 0xffff,
+ "%s, add FW options 1-3 = 0x%04x 0x%04x 0x%04x mode %x\n",
+ __func__, ha->fw_options[1], ha->fw_options[2],
+ ha->fw_options[3], vha->host->active_mode);
+ qla2x00_set_fw_options(vha, ha->fw_options);
+
/* Update Serial Link options. */
if ((le16_to_cpu(ha->fw_seriallink_options24[0]) & BIT_0) == 0)
return;
@@ -2968,8 +3874,14 @@ qla2x00_rport_del(void *data)
rport = fcport->drport ? fcport->drport: fcport->rport;
fcport->drport = NULL;
spin_unlock_irqrestore(fcport->vha->host->host_lock, flags);
- if (rport)
+ if (rport) {
+ ql_dbg(ql_dbg_disc, fcport->vha, 0xffff,
+ "%s %8phN. rport %p roles %x \n",
+ __func__, fcport->port_name, rport,
+ rport->roles);
+
fc_remote_port_delete(rport);
+ }
}
/**
@@ -2995,9 +3907,42 @@ qla2x00_alloc_fcport(scsi_qla_host_t *vha, gfp_t flags)
qla2x00_set_fcport_state(fcport, FCS_UNCONFIGURED);
fcport->supported_classes = FC_COS_UNSPECIFIED;
+ fcport->ct_desc.ct_sns = dma_alloc_coherent(&vha->hw->pdev->dev,
+ sizeof(struct ct_sns_pkt), &fcport->ct_desc.ct_sns_dma,
+ flags);
+ fcport->disc_state = DSC_DELETED;
+ fcport->fw_login_state = DSC_LS_PORT_UNAVAIL;
+ fcport->deleted = QLA_SESS_DELETED;
+ fcport->login_retry = vha->hw->login_retry_count;
+ fcport->login_retry = 5;
+ fcport->logout_on_delete = 1;
+
+ if (!fcport->ct_desc.ct_sns) {
+ ql_log(ql_log_warn, vha, 0xffff,
+ "Failed to allocate ct_sns request.\n");
+ kfree(fcport);
+ fcport = NULL;
+ }
+ INIT_WORK(&fcport->del_work, qla24xx_delete_sess_fn);
+ INIT_LIST_HEAD(&fcport->gnl_entry);
+ INIT_LIST_HEAD(&fcport->list);
+
return fcport;
}
+void
+qla2x00_free_fcport(fc_port_t *fcport)
+{
+ if (fcport->ct_desc.ct_sns) {
+ dma_free_coherent(&fcport->vha->hw->pdev->dev,
+ sizeof(struct ct_sns_pkt), fcport->ct_desc.ct_sns,
+ fcport->ct_desc.ct_sns_dma);
+
+ fcport->ct_desc.ct_sns = NULL;
+ }
+ kfree(fcport);
+}
+
/*
* qla2x00_configure_loop
* Updates Fibre Channel Device Database with what is actually on loop.
@@ -3055,10 +4000,11 @@ qla2x00_configure_loop(scsi_qla_host_t *vha)
} else if (ha->current_topology == ISP_CFG_N) {
clear_bit(RSCN_UPDATE, &flags);
-
+ } else if (ha->current_topology == ISP_CFG_NL) {
+ clear_bit(RSCN_UPDATE, &flags);
+ set_bit(LOCAL_LOOP_UPDATE, &flags);
} else if (!vha->flags.online ||
(test_bit(ABORT_ISP_ACTIVE, &flags))) {
-
set_bit(RSCN_UPDATE, &flags);
set_bit(LOCAL_LOOP_UPDATE, &flags);
}
@@ -3095,7 +4041,8 @@ qla2x00_configure_loop(scsi_qla_host_t *vha)
* Process any ATIO queue entries that came in
* while we weren't online.
*/
- if (qla_tgt_mode_enabled(vha)) {
+ if (qla_tgt_mode_enabled(vha) ||
+ qla_dual_mode_enabled(vha)) {
if (IS_QLA27XX(ha) || IS_QLA83XX(ha)) {
spin_lock_irqsave(&ha->tgt.atio_lock,
flags);
@@ -3159,6 +4106,7 @@ qla2x00_configure_local_loop(scsi_qla_host_t *vha)
uint16_t loop_id;
uint8_t domain, area, al_pa;
struct qla_hw_data *ha = vha->hw;
+ unsigned long flags;
found_devs = 0;
new_fcport = NULL;
@@ -3199,7 +4147,7 @@ qla2x00_configure_local_loop(scsi_qla_host_t *vha)
"Marking port lost loop_id=0x%04x.\n",
fcport->loop_id);
- qla2x00_set_fcport_state(fcport, FCS_DEVICE_LOST);
+ qla2x00_mark_device_lost(vha, fcport, 0, 0);
}
}
@@ -3230,13 +4178,14 @@ qla2x00_configure_local_loop(scsi_qla_host_t *vha)
if (loop_id > LAST_LOCAL_LOOP_ID)
continue;
- memset(new_fcport, 0, sizeof(fc_port_t));
+ memset(new_fcport->port_name, 0, WWN_SIZE);
/* Fill in member data. */
new_fcport->d_id.b.domain = domain;
new_fcport->d_id.b.area = area;
new_fcport->d_id.b.al_pa = al_pa;
new_fcport->loop_id = loop_id;
+
rval2 = qla2x00_get_port_database(vha, new_fcport, 0);
if (rval2 != QLA_SUCCESS) {
ql_dbg(ql_dbg_disc, vha, 0x201a,
@@ -3249,6 +4198,7 @@ qla2x00_configure_local_loop(scsi_qla_host_t *vha)
continue;
}
+ spin_lock_irqsave(&vha->hw->tgt.sess_lock, flags);
/* Check for matching device in port list. */
found = 0;
fcport = NULL;
@@ -3264,6 +4214,12 @@ qla2x00_configure_local_loop(scsi_qla_host_t *vha)
memcpy(fcport->node_name, new_fcport->node_name,
WWN_SIZE);
+ if (!fcport->login_succ) {
+ vha->fcport_count++;
+ fcport->login_succ = 1;
+ fcport->disc_state = DSC_LOGIN_COMPLETE;
+ }
+
found++;
break;
}
@@ -3274,16 +4230,28 @@ qla2x00_configure_local_loop(scsi_qla_host_t *vha)
/* Allocate a new replacement fcport. */
fcport = new_fcport;
+ if (!fcport->login_succ) {
+ vha->fcport_count++;
+ fcport->login_succ = 1;
+ fcport->disc_state = DSC_LOGIN_COMPLETE;
+ }
+
+ spin_unlock_irqrestore(&vha->hw->tgt.sess_lock, flags);
+
new_fcport = qla2x00_alloc_fcport(vha, GFP_KERNEL);
+
if (new_fcport == NULL) {
ql_log(ql_log_warn, vha, 0x201c,
"Failed to allocate memory for fcport.\n");
rval = QLA_MEMORY_ALLOC_FAILED;
goto cleanup_allocation;
}
+ spin_lock_irqsave(&vha->hw->tgt.sess_lock, flags);
new_fcport->flags &= ~FCF_FABRIC_DEVICE;
}
+ spin_unlock_irqrestore(&vha->hw->tgt.sess_lock, flags);
+
/* Base iIDMA settings on HBA port speed. */
fcport->fp_speed = ha->link_data_rate;
@@ -3334,6 +4302,7 @@ qla2x00_iidma_fcport(scsi_qla_host_t *vha, fc_port_t *fcport)
}
}
+/* qla2x00_reg_remote_port is reserved for Initiator Mode only.*/
static void
qla2x00_reg_remote_port(scsi_qla_host_t *vha, fc_port_t *fcport)
{
@@ -3352,12 +4321,6 @@ qla2x00_reg_remote_port(scsi_qla_host_t *vha, fc_port_t *fcport)
"Unable to allocate fc remote port.\n");
return;
}
- /*
- * Create target mode FC NEXUS in qla_target.c if target mode is
- * enabled..
- */
-
- qlt_fc_port_added(vha, fcport);
spin_lock_irqsave(fcport->vha->host->host_lock, flags);
*((fc_port_t **)rport->dd_data) = fcport;
@@ -3370,6 +4333,12 @@ qla2x00_reg_remote_port(scsi_qla_host_t *vha, fc_port_t *fcport)
rport_ids.roles |= FC_RPORT_ROLE_FCP_INITIATOR;
if (fcport->port_type == FCT_TARGET)
rport_ids.roles |= FC_RPORT_ROLE_FCP_TARGET;
+
+ ql_dbg(ql_dbg_disc, vha, 0xffff,
+ "%s %8phN. rport %p is %s mode \n",
+ __func__, fcport->port_name, rport,
+ (fcport->port_type == FCT_TARGET) ? "tgt" : "ini");
+
fc_remote_port_rolechg(rport, rport_ids.roles);
}
@@ -3393,25 +4362,44 @@ qla2x00_update_fcport(scsi_qla_host_t *vha, fc_port_t *fcport)
{
fcport->vha = vha;
+ if (IS_SW_RESV_ADDR(fcport->d_id))
+ return;
+
+ ql_dbg(ql_dbg_disc, vha, 0xffff, "%s %8phC \n",
+ __func__, fcport->port_name);
+
if (IS_QLAFX00(vha->hw)) {
qla2x00_set_fcport_state(fcport, FCS_ONLINE);
goto reg_port;
}
fcport->login_retry = 0;
fcport->flags &= ~(FCF_LOGIN_NEEDED | FCF_ASYNC_SENT);
+ fcport->disc_state = DSC_LOGIN_COMPLETE;
+ fcport->deleted = 0;
+ fcport->logout_on_delete = 1;
qla2x00_set_fcport_state(fcport, FCS_ONLINE);
qla2x00_iidma_fcport(vha, fcport);
qla24xx_update_fcport_fcp_prio(vha, fcport);
reg_port:
- if (qla_ini_mode_enabled(vha))
+ switch (vha->host->active_mode) {
+ case MODE_INITIATOR:
qla2x00_reg_remote_port(vha, fcport);
- else {
- /*
- * Create target mode FC NEXUS in qla_target.c
- */
- qlt_fc_port_added(vha, fcport);
+ break;
+ case MODE_TARGET:
+ if (!vha->vha_tgt.qla_tgt->tgt_stop &&
+ !vha->vha_tgt.qla_tgt->tgt_stopped)
+ qlt_fc_port_added(vha, fcport);
+ break;
+ case MODE_DUAL:
+ qla2x00_reg_remote_port(vha, fcport);
+ if (!vha->vha_tgt.qla_tgt->tgt_stop &&
+ !vha->vha_tgt.qla_tgt->tgt_stopped)
+ qlt_fc_port_added(vha, fcport);
+ break;
+ default:
+ break;
}
}
@@ -3430,13 +4418,11 @@ static int
qla2x00_configure_fabric(scsi_qla_host_t *vha)
{
int rval;
- fc_port_t *fcport, *fcptemp;
- uint16_t next_loopid;
+ fc_port_t *fcport;
uint16_t mb[MAILBOX_REGISTER_COUNT];
uint16_t loop_id;
LIST_HEAD(new_fcports);
struct qla_hw_data *ha = vha->hw;
- struct scsi_qla_host *base_vha = pci_get_drvdata(ha->pdev);
int discovery_gen;
/* If FL port exists, then SNS is present */
@@ -3454,7 +4440,19 @@ qla2x00_configure_fabric(scsi_qla_host_t *vha)
}
vha->device_flags |= SWITCH_FOUND;
+
+ if (qla_tgt_mode_enabled(vha) || qla_dual_mode_enabled(vha)) {
+ rval = qla2x00_send_change_request(vha, 0x3, 0);
+ if (rval != QLA_SUCCESS)
+ ql_log(ql_log_warn, vha, 0x121,
+ "Failed to enable receiving of RSCN requests: 0x%x.\n",
+ rval);
+ }
+
+
do {
+ qla2x00_mgmt_svr_login(vha);
+
/* FDMI support. */
if (ql2xfdmienable &&
test_and_clear_bit(REGISTER_FDMI_NEEDED, &vha->dpc_flags))
@@ -3501,9 +4499,6 @@ qla2x00_configure_fabric(scsi_qla_host_t *vha)
}
}
-#define QLA_FCPORT_SCAN 1
-#define QLA_FCPORT_FOUND 2
-
list_for_each_entry(fcport, &vha->vp_fcports, list) {
fcport->scan_state = QLA_FCPORT_SCAN;
}
@@ -3516,174 +4511,14 @@ qla2x00_configure_fabric(scsi_qla_host_t *vha)
* will be newer than discovery_gen. */
qlt_do_generation_tick(vha, &discovery_gen);
- rval = qla2x00_find_all_fabric_devs(vha, &new_fcports);
+ rval = qla2x00_find_all_fabric_devs(vha);
if (rval != QLA_SUCCESS)
break;
-
- /*
- * Logout all previous fabric devices marked lost, except
- * FCP2 devices.
- */
- list_for_each_entry(fcport, &vha->vp_fcports, list) {
- if (test_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags))
- break;
-
- if ((fcport->flags & FCF_FABRIC_DEVICE) == 0)
- continue;
-
- if (fcport->scan_state == QLA_FCPORT_SCAN) {
- if (qla_ini_mode_enabled(base_vha) &&
- atomic_read(&fcport->state) == FCS_ONLINE) {
- qla2x00_mark_device_lost(vha, fcport,
- ql2xplogiabsentdevice, 0);
- if (fcport->loop_id != FC_NO_LOOP_ID &&
- (fcport->flags & FCF_FCP2_DEVICE) == 0 &&
- fcport->port_type != FCT_INITIATOR &&
- fcport->port_type != FCT_BROADCAST) {
- ha->isp_ops->fabric_logout(vha,
- fcport->loop_id,
- fcport->d_id.b.domain,
- fcport->d_id.b.area,
- fcport->d_id.b.al_pa);
- qla2x00_clear_loop_id(fcport);
- }
- } else if (!qla_ini_mode_enabled(base_vha)) {
- /*
- * In target mode, explicitly kill
- * sessions and log out of devices
- * that are gone, so that we don't
- * end up with an initiator using the
- * wrong ACL (if the fabric recycles
- * an FC address and we have a stale
- * session around) and so that we don't
- * report initiators that are no longer
- * on the fabric.
- */
- ql_dbg(ql_dbg_tgt_mgt, vha, 0xf077,
- "port gone, logging out/killing session: "
- "%8phC state 0x%x flags 0x%x fc4_type 0x%x "
- "scan_state %d\n",
- fcport->port_name,
- atomic_read(&fcport->state),
- fcport->flags, fcport->fc4_type,
- fcport->scan_state);
- qlt_fc_port_deleted(vha, fcport,
- discovery_gen);
- }
- }
- }
-
- /* Starting free loop ID. */
- next_loopid = ha->min_external_loopid;
-
- /*
- * Scan through our port list and login entries that need to be
- * logged in.
- */
- list_for_each_entry(fcport, &vha->vp_fcports, list) {
- if (atomic_read(&vha->loop_down_timer) ||
- test_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags))
- break;
-
- if ((fcport->flags & FCF_FABRIC_DEVICE) == 0 ||
- (fcport->flags & FCF_LOGIN_NEEDED) == 0)
- continue;
-
- /*
- * If we're not an initiator, skip looking for devices
- * and logging in. There's no reason for us to do it,
- * and it seems to actively cause problems in target
- * mode if we race with the initiator logging into us
- * (we might get the "port ID used" status back from
- * our login command and log out the initiator, which
- * seems to cause havoc).
- */
- if (!qla_ini_mode_enabled(base_vha)) {
- if (fcport->scan_state == QLA_FCPORT_FOUND) {
- ql_dbg(ql_dbg_tgt_mgt, vha, 0xf078,
- "port %8phC state 0x%x flags 0x%x fc4_type 0x%x "
- "scan_state %d (initiator mode disabled; skipping "
- "login)\n", fcport->port_name,
- atomic_read(&fcport->state),
- fcport->flags, fcport->fc4_type,
- fcport->scan_state);
- }
- continue;
- }
-
- if (fcport->loop_id == FC_NO_LOOP_ID) {
- fcport->loop_id = next_loopid;
- rval = qla2x00_find_new_loop_id(
- base_vha, fcport);
- if (rval != QLA_SUCCESS) {
- /* Ran out of IDs to use */
- break;
- }
- }
- /* Login and update database */
- qla2x00_fabric_dev_login(vha, fcport, &next_loopid);
- }
-
- /* Exit if out of loop IDs. */
- if (rval != QLA_SUCCESS) {
- break;
- }
-
- /*
- * Login and add the new devices to our port list.
- */
- list_for_each_entry_safe(fcport, fcptemp, &new_fcports, list) {
- if (atomic_read(&vha->loop_down_timer) ||
- test_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags))
- break;
-
- /*
- * If we're not an initiator, skip looking for devices
- * and logging in. There's no reason for us to do it,
- * and it seems to actively cause problems in target
- * mode if we race with the initiator logging into us
- * (we might get the "port ID used" status back from
- * our login command and log out the initiator, which
- * seems to cause havoc).
- */
- if (qla_ini_mode_enabled(base_vha)) {
- /* Find a new loop ID to use. */
- fcport->loop_id = next_loopid;
- rval = qla2x00_find_new_loop_id(base_vha,
- fcport);
- if (rval != QLA_SUCCESS) {
- /* Ran out of IDs to use */
- break;
- }
-
- /* Login and update database */
- qla2x00_fabric_dev_login(vha, fcport,
- &next_loopid);
- } else {
- ql_dbg(ql_dbg_tgt_mgt, vha, 0xf079,
- "new port %8phC state 0x%x flags 0x%x fc4_type "
- "0x%x scan_state %d (initiator mode disabled; "
- "skipping login)\n",
- fcport->port_name,
- atomic_read(&fcport->state),
- fcport->flags, fcport->fc4_type,
- fcport->scan_state);
- }
-
- list_move_tail(&fcport->list, &vha->vp_fcports);
- }
} while (0);
- /* Free all new device structures not processed. */
- list_for_each_entry_safe(fcport, fcptemp, &new_fcports, list) {
- list_del(&fcport->list);
- kfree(fcport);
- }
-
- if (rval) {
+ if (rval)
ql_dbg(ql_dbg_disc, vha, 0x2068,
"Configure fabric error exit rval=%d.\n", rval);
- }
return (rval);
}
@@ -3702,12 +4537,11 @@ qla2x00_configure_fabric(scsi_qla_host_t *vha)
* Kernel context.
*/
static int
-qla2x00_find_all_fabric_devs(scsi_qla_host_t *vha,
- struct list_head *new_fcports)
+qla2x00_find_all_fabric_devs(scsi_qla_host_t *vha)
{
int rval;
uint16_t loop_id;
- fc_port_t *fcport, *new_fcport, *fcptemp;
+ fc_port_t *fcport, *new_fcport;
int found;
sw_info_t *swl;
@@ -3716,6 +4550,7 @@ qla2x00_find_all_fabric_devs(scsi_qla_host_t *vha,
port_id_t wrap = {}, nxt_d_id;
struct qla_hw_data *ha = vha->hw;
struct scsi_qla_host *base_vha = pci_get_drvdata(ha->pdev);
+ unsigned long flags;
rval = QLA_SUCCESS;
@@ -3736,9 +4571,8 @@ qla2x00_find_all_fabric_devs(scsi_qla_host_t *vha,
swl = NULL;
} else if (qla2x00_gnn_id(vha, swl) != QLA_SUCCESS) {
swl = NULL;
- } else if (ql2xiidmaenable &&
- qla2x00_gfpn_id(vha, swl) == QLA_SUCCESS) {
- qla2x00_gpsc(vha, swl);
+ } else if (qla2x00_gfpn_id(vha, swl) != QLA_SUCCESS) {
+ swl = NULL;
}
/* If other queries succeeded probe for FC-4 type */
@@ -3800,11 +4634,6 @@ qla2x00_find_all_fabric_devs(scsi_qla_host_t *vha,
ql_log(ql_log_warn, vha, 0x2064,
"SNS scan failed -- assuming "
"zero-entry result.\n");
- list_for_each_entry_safe(fcport, fcptemp,
- new_fcports, list) {
- list_del(&fcport->list);
- kfree(fcport);
- }
rval = QLA_SUCCESS;
break;
}
@@ -3847,6 +4676,8 @@ qla2x00_find_all_fabric_devs(scsi_qla_host_t *vha,
new_fcport->fc4_type != FC4_TYPE_UNKNOWN))
continue;
+ spin_lock_irqsave(&vha->hw->tgt.sess_lock, flags);
+
/* Locate matching device in database. */
found = 0;
list_for_each_entry(fcport, &vha->vp_fcports, list) {
@@ -3869,7 +4700,7 @@ qla2x00_find_all_fabric_devs(scsi_qla_host_t *vha,
*/
if (fcport->d_id.b24 == new_fcport->d_id.b24 &&
(atomic_read(&fcport->state) == FCS_ONLINE ||
- !qla_ini_mode_enabled(base_vha))) {
+ (vha->host->active_mode == MODE_TARGET))) {
break;
}
@@ -3889,7 +4720,7 @@ qla2x00_find_all_fabric_devs(scsi_qla_host_t *vha,
* Log it out if still logged in and mark it for
* relogin later.
*/
- if (!qla_ini_mode_enabled(base_vha)) {
+ if (qla_tgt_mode_enabled(base_vha)) {
ql_dbg(ql_dbg_tgt_mgt, vha, 0xf080,
"port changed FC ID, %8phC"
" old %x:%x:%x (loop_id 0x%04x)-> new %x:%x:%x\n",
@@ -3907,25 +4738,19 @@ qla2x00_find_all_fabric_devs(scsi_qla_host_t *vha,
fcport->d_id.b24 = new_fcport->d_id.b24;
fcport->flags |= FCF_LOGIN_NEEDED;
- if (fcport->loop_id != FC_NO_LOOP_ID &&
- (fcport->flags & FCF_FCP2_DEVICE) == 0 &&
- (fcport->flags & FCF_ASYNC_SENT) == 0 &&
- fcport->port_type != FCT_INITIATOR &&
- fcport->port_type != FCT_BROADCAST) {
- ha->isp_ops->fabric_logout(vha, fcport->loop_id,
- fcport->d_id.b.domain, fcport->d_id.b.area,
- fcport->d_id.b.al_pa);
- qla2x00_clear_loop_id(fcport);
- }
-
break;
}
- if (found)
+ if (found) {
+ spin_unlock_irqrestore(&vha->hw->tgt.sess_lock, flags);
continue;
+ }
/* If device was not in our fcports list, then add it. */
new_fcport->scan_state = QLA_FCPORT_FOUND;
- list_add_tail(&new_fcport->list, new_fcports);
+ list_add_tail(&new_fcport->list, &vha->vp_fcports);
+
+ spin_unlock_irqrestore(&vha->hw->tgt.sess_lock, flags);
+
/* Allocate a new replacement fcport. */
nxt_d_id.b24 = new_fcport->d_id.b24;
@@ -3939,8 +4764,44 @@ qla2x00_find_all_fabric_devs(scsi_qla_host_t *vha,
new_fcport->d_id.b24 = nxt_d_id.b24;
}
- kfree(new_fcport);
+ qla2x00_free_fcport(new_fcport);
+ /*
+ * Logout all previous fabric dev marked lost, except FCP2 devices.
+ */
+ list_for_each_entry(fcport, &vha->vp_fcports, list) {
+ if (test_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags))
+ break;
+
+ if ((fcport->flags & FCF_FABRIC_DEVICE) == 0 ||
+ (fcport->flags & FCF_LOGIN_NEEDED) == 0)
+ continue;
+
+ if (fcport->scan_state == QLA_FCPORT_SCAN) {
+ if ((qla_dual_mode_enabled(vha) ||
+ qla_ini_mode_enabled(vha)) &&
+ atomic_read(&fcport->state) == FCS_ONLINE) {
+ qla2x00_mark_device_lost(vha, fcport,
+ ql2xplogiabsentdevice, 0);
+ if (fcport->loop_id != FC_NO_LOOP_ID &&
+ (fcport->flags & FCF_FCP2_DEVICE) == 0 &&
+ fcport->port_type != FCT_INITIATOR &&
+ fcport->port_type != FCT_BROADCAST) {
+ ql_dbg(ql_dbg_disc, vha, 0xffff,
+ "%s %d %8phC post del sess\n",
+ __func__, __LINE__,
+ fcport->port_name);
+
+ qlt_schedule_sess_for_deletion_lock
+ (fcport);
+ continue;
+ }
+ }
+ }
+
+ if (fcport->scan_state == QLA_FCPORT_FOUND)
+ qla24xx_fcport_handle_login(vha, fcport);
+ }
return (rval);
}
@@ -3992,64 +4853,6 @@ qla2x00_find_new_loop_id(scsi_qla_host_t *vha, fc_port_t *dev)
return (rval);
}
-/*
- * qla2x00_fabric_dev_login
- * Login fabric target device and update FC port database.
- *
- * Input:
- * ha: adapter state pointer.
- * fcport: port structure list pointer.
- * next_loopid: contains value of a new loop ID that can be used
- * by the next login attempt.
- *
- * Returns:
- * qla2x00 local function return status code.
- *
- * Context:
- * Kernel context.
- */
-static int
-qla2x00_fabric_dev_login(scsi_qla_host_t *vha, fc_port_t *fcport,
- uint16_t *next_loopid)
-{
- int rval;
- uint8_t opts;
- struct qla_hw_data *ha = vha->hw;
-
- rval = QLA_SUCCESS;
-
- if (IS_ALOGIO_CAPABLE(ha)) {
- if (fcport->flags & FCF_ASYNC_SENT)
- return rval;
- fcport->flags |= FCF_ASYNC_SENT;
- rval = qla2x00_post_async_login_work(vha, fcport, NULL);
- if (!rval)
- return rval;
- }
-
- fcport->flags &= ~FCF_ASYNC_SENT;
- rval = qla2x00_fabric_login(vha, fcport, next_loopid);
- if (rval == QLA_SUCCESS) {
- /* Send an ADISC to FCP2 devices.*/
- opts = 0;
- if (fcport->flags & FCF_FCP2_DEVICE)
- opts |= BIT_1;
- rval = qla2x00_get_port_database(vha, fcport, opts);
- if (rval != QLA_SUCCESS) {
- ha->isp_ops->fabric_logout(vha, fcport->loop_id,
- fcport->d_id.b.domain, fcport->d_id.b.area,
- fcport->d_id.b.al_pa);
- qla2x00_mark_device_lost(vha, fcport, 1, 0);
- } else {
- qla2x00_update_fcport(vha, fcport);
- }
- } else {
- /* Retry Login. */
- qla2x00_mark_device_lost(vha, fcport, 1, 0);
- }
-
- return (rval);
-}
/*
* qla2x00_fabric_login
@@ -4341,13 +5144,6 @@ qla2x00_update_fcports(scsi_qla_host_t *base_vha)
spin_unlock_irqrestore(&ha->vport_slock, flags);
qla2x00_rport_del(fcport);
- /*
- * Release the target mode FC NEXUS in
- * qla_target.c, if target mod is enabled.
- */
- qlt_fc_port_deleted(vha, fcport,
- base_vha->total_fcport_update_gen);
-
spin_lock_irqsave(&ha->vport_slock, flags);
}
}
@@ -4730,6 +5526,8 @@ qla2x00_abort_isp_cleanup(scsi_qla_host_t *vha)
if (!(IS_P3P_TYPE(ha)))
ha->isp_ops->reset_chip(vha);
+ ha->chip_reset++;
+
atomic_set(&vha->loop_down_timer, LOOP_DOWN_TIME);
if (atomic_read(&vha->loop_state) != LOOP_DOWN) {
atomic_set(&vha->loop_state, LOOP_DOWN);
@@ -4784,8 +5582,6 @@ qla2x00_abort_isp_cleanup(scsi_qla_host_t *vha)
/* Requeue all commands in outstanding command list. */
qla2x00_abort_all_cmds(vha, DID_RESET << 16);
}
-
- ha->chip_reset++;
/* memory barrier */
wmb();
}
@@ -4981,7 +5777,6 @@ qla2x00_restart_isp(scsi_qla_host_t *vha)
if (!status) {
/* Issue a marker after FW becomes ready. */
qla2x00_marker(vha, req, rsp, 0, 0, MK_SYNC_ALL);
-
set_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags);
}
@@ -5209,7 +6004,7 @@ qla24xx_nvram_config(scsi_qla_host_t *vha)
rval = 1;
}
- if (!qla_ini_mode_enabled(vha)) {
+ if (qla_tgt_mode_enabled(vha)) {
/* Don't enable full login after initial LIP */
nv->firmware_options_1 &= cpu_to_le32(~BIT_13);
/* Don't enable LIP full login for initiator */
@@ -5400,6 +6195,7 @@ uint8_t qla27xx_find_valid_image(struct scsi_qla_host *vha)
for (chksum = 0; cnt--; wptr++)
chksum += le32_to_cpu(*wptr);
+
if (chksum) {
ql_dbg(ql_dbg_init, vha, 0x018c,
"Checksum validation failed for primary image (0x%x)\n",
@@ -6412,6 +7208,10 @@ qla81xx_nvram_config(scsi_qla_host_t *vha)
vha->flags.process_response_queue = 1;
}
+ /* enable RIDA Format2 */
+ if (qla_tgt_mode_enabled(vha) || qla_dual_mode_enabled(vha))
+ icb->firmware_options_3 |= BIT_0;
+
if (rval) {
ql_log(ql_log_warn, vha, 0x0076,
"NVRAM configuration failed.\n");
@@ -6536,13 +7336,26 @@ qla81xx_update_fw_options(scsi_qla_host_t *vha)
__func__, ha->fw_options[2]);
}
- if (!ql2xetsenable)
- goto out;
+ /* Move PUREX, ABTS RX & RIDA to ATIOQ */
+ if (ql2xmvasynctoatio) {
+ if (qla_tgt_mode_enabled(vha) ||
+ qla_dual_mode_enabled(vha))
+ ha->fw_options[2] |= BIT_11;
+ else
+ ha->fw_options[2] &= ~BIT_11;
+ }
+
+ if (ql2xetsenable) {
+ /* Enable ETS Burst. */
+ memset(ha->fw_options, 0, sizeof(ha->fw_options));
+ ha->fw_options[2] |= BIT_9;
+ }
+
+ ql_dbg(ql_dbg_init, vha, 0xffff,
+ "%s, add FW options 1-3 = 0x%04x 0x%04x 0x%04x mode %x\n",
+ __func__, ha->fw_options[1], ha->fw_options[2],
+ ha->fw_options[3], vha->host->active_mode);
- /* Enable ETS Burst. */
- memset(ha->fw_options, 0, sizeof(ha->fw_options));
- ha->fw_options[2] |= BIT_9;
-out:
qla2x00_set_fw_options(vha, ha->fw_options);
}
@@ -6748,6 +7561,7 @@ struct qla_qpair *qla2xxx_create_qpair(struct scsi_qla_host *vha, int qos, int v
memset(qpair, 0, sizeof(struct qla_qpair));
qpair->hw = vha->hw;
+ qpair->vha = vha;
/* Assign available que pair id */
mutex_lock(&ha->mq_lock);
diff --git a/drivers/scsi/qla2xxx/qla_inline.h b/drivers/scsi/qla2xxx/qla_inline.h
index 44e404583c86..66df6cec59da 100644
--- a/drivers/scsi/qla2xxx/qla_inline.h
+++ b/drivers/scsi/qla2xxx/qla_inline.h
@@ -166,8 +166,8 @@ qla2x00_set_fcport_state(fc_port_t *fcport, int state)
/* Don't print state transitions during initial allocation of fcport */
if (old_state && old_state != state) {
ql_dbg(ql_dbg_disc, fcport->vha, 0x207d,
- "FCPort state transitioned from %s to %s - "
- "portid=%02x%02x%02x.\n",
+ "FCPort %8phC state transitioned from %s to %s - "
+ "portid=%02x%02x%02x.\n", fcport->port_name,
port_state_str[old_state], port_state_str[state],
fcport->d_id.b.domain, fcport->d_id.b.area,
fcport->d_id.b.al_pa);
@@ -232,6 +232,7 @@ qla2xxx_get_qpair_sp(struct qla_qpair *qpair, fc_port_t *fcport, gfp_t flag)
memset(sp, 0, sizeof(*sp));
sp->fcport = fcport;
sp->iocbs = 1;
+ sp->vha = qpair->vha;
done:
if (!sp)
QLA_QPAIR_MARK_NOT_BUSY(qpair);
@@ -249,20 +250,20 @@ static inline srb_t *
qla2x00_get_sp(scsi_qla_host_t *vha, fc_port_t *fcport, gfp_t flag)
{
srb_t *sp = NULL;
- struct qla_hw_data *ha = vha->hw;
uint8_t bail;
QLA_VHA_MARK_BUSY(vha, bail);
if (unlikely(bail))
return NULL;
- sp = mempool_alloc(ha->srb_mempool, flag);
+ sp = mempool_alloc(vha->hw->srb_mempool, flag);
if (!sp)
goto done;
memset(sp, 0, sizeof(*sp));
sp->fcport = fcport;
sp->iocbs = 1;
+ sp->vha = vha;
done:
if (!sp)
QLA_VHA_MARK_NOT_BUSY(vha);
@@ -270,10 +271,10 @@ done:
}
static inline void
-qla2x00_rel_sp(scsi_qla_host_t *vha, srb_t *sp)
+qla2x00_rel_sp(srb_t *sp)
{
- mempool_free(sp, vha->hw->srb_mempool);
- QLA_VHA_MARK_NOT_BUSY(vha);
+ QLA_VHA_MARK_NOT_BUSY(sp->vha);
+ mempool_free(sp, sp->vha->hw->srb_mempool);
}
static inline void
@@ -285,8 +286,7 @@ qla2x00_init_timer(srb_t *sp, unsigned long tmo)
sp->u.iocb_cmd.timer.function = qla2x00_sp_timeout;
add_timer(&sp->u.iocb_cmd.timer);
sp->free = qla2x00_sp_free;
- if ((IS_QLAFX00(sp->fcport->vha->hw)) &&
- (sp->type == SRB_FXIOCB_DCMD))
+ if (IS_QLAFX00(sp->vha->hw) && (sp->type == SRB_FXIOCB_DCMD))
init_completion(&sp->u.iocb_cmd.u.fxiocb.fxiocb_comp);
if (sp->type == SRB_ELS_DCMD)
init_completion(&sp->u.iocb_cmd.u.els_logo.comp);
diff --git a/drivers/scsi/qla2xxx/qla_iocb.c b/drivers/scsi/qla2xxx/qla_iocb.c
index 58e49a3e1de8..535079280288 100644
--- a/drivers/scsi/qla2xxx/qla_iocb.c
+++ b/drivers/scsi/qla2xxx/qla_iocb.c
@@ -23,7 +23,7 @@ qla2x00_get_cmd_direction(srb_t *sp)
{
uint16_t cflags;
struct scsi_cmnd *cmd = GET_CMD_SP(sp);
- struct scsi_qla_host *vha = sp->fcport->vha;
+ struct scsi_qla_host *vha = sp->vha;
cflags = 0;
@@ -210,7 +210,7 @@ void qla2x00_build_scsi_iocbs_32(srb_t *sp, cmd_entry_t *cmd_pkt,
return;
}
- vha = sp->fcport->vha;
+ vha = sp->vha;
cmd_pkt->control_flags |= cpu_to_le16(qla2x00_get_cmd_direction(sp));
/* Three DSDs are available in the Command Type 2 IOCB */
@@ -267,7 +267,7 @@ void qla2x00_build_scsi_iocbs_64(srb_t *sp, cmd_entry_t *cmd_pkt,
return;
}
- vha = sp->fcport->vha;
+ vha = sp->vha;
cmd_pkt->control_flags |= cpu_to_le16(qla2x00_get_cmd_direction(sp));
/* Two DSDs are available in the Command Type 3 IOCB */
@@ -324,7 +324,7 @@ qla2x00_start_scsi(srb_t *sp)
struct rsp_que *rsp;
/* Setup device pointers. */
- vha = sp->fcport->vha;
+ vha = sp->vha;
ha = vha->hw;
reg = &ha->iobase->isp;
cmd = GET_CMD_SP(sp);
@@ -601,7 +601,7 @@ qla24xx_build_scsi_type_6_iocbs(srb_t *sp, struct cmd_type_6 *cmd_pkt,
return 0;
}
- vha = sp->fcport->vha;
+ vha = sp->vha;
ha = vha->hw;
/* Set transfer direction */
@@ -716,7 +716,7 @@ qla24xx_build_scsi_iocbs(srb_t *sp, struct cmd_type_7 *cmd_pkt,
return;
}
- vha = sp->fcport->vha;
+ vha = sp->vha;
/* Set transfer direction */
if (cmd->sc_data_direction == DMA_TO_DEVICE) {
@@ -1108,7 +1108,7 @@ qla24xx_walk_and_build_prot_sglist(struct qla_hw_data *ha, srb_t *sp,
if (sp) {
cmd = GET_CMD_SP(sp);
sgl = scsi_prot_sglist(cmd);
- vha = sp->fcport->vha;
+ vha = sp->vha;
} else if (tc) {
vha = tc->vha;
sgl = tc->prot_sg;
@@ -1215,7 +1215,7 @@ qla24xx_build_scsi_crc_2_iocbs(srb_t *sp, struct cmd_type_crc_2 *cmd_pkt,
/* Update entry type to indicate Command Type CRC_2 IOCB */
*((uint32_t *)(&cmd_pkt->entry_type)) = cpu_to_le32(COMMAND_TYPE_CRC_2);
- vha = sp->fcport->vha;
+ vha = sp->vha;
ha = vha->hw;
/* No data transfer */
@@ -1225,7 +1225,7 @@ qla24xx_build_scsi_crc_2_iocbs(srb_t *sp, struct cmd_type_crc_2 *cmd_pkt,
return QLA_SUCCESS;
}
- cmd_pkt->vp_index = sp->fcport->vha->vp_idx;
+ cmd_pkt->vp_index = sp->vha->vp_idx;
/* Set transfer direction */
if (cmd->sc_data_direction == DMA_TO_DEVICE) {
@@ -1415,7 +1415,7 @@ qla24xx_start_scsi(srb_t *sp)
struct req_que *req = NULL;
struct rsp_que *rsp = NULL;
struct scsi_cmnd *cmd = GET_CMD_SP(sp);
- struct scsi_qla_host *vha = sp->fcport->vha;
+ struct scsi_qla_host *vha = sp->vha;
struct qla_hw_data *ha = vha->hw;
/* Setup device pointers. */
@@ -1492,7 +1492,7 @@ qla24xx_start_scsi(srb_t *sp)
cmd_pkt->port_id[0] = sp->fcport->d_id.b.al_pa;
cmd_pkt->port_id[1] = sp->fcport->d_id.b.area;
cmd_pkt->port_id[2] = sp->fcport->d_id.b.domain;
- cmd_pkt->vp_index = sp->fcport->vha->vp_idx;
+ cmd_pkt->vp_index = sp->vha->vp_idx;
int_to_scsilun(cmd->device->lun, &cmd_pkt->lun);
host_to_fcp_swap((uint8_t *)&cmd_pkt->lun, sizeof(cmd_pkt->lun));
@@ -1564,7 +1564,7 @@ qla24xx_dif_start_scsi(srb_t *sp)
struct req_que *req = NULL;
struct rsp_que *rsp = NULL;
struct scsi_cmnd *cmd = GET_CMD_SP(sp);
- struct scsi_qla_host *vha = sp->fcport->vha;
+ struct scsi_qla_host *vha = sp->vha;
struct qla_hw_data *ha = vha->hw;
struct cmd_type_crc_2 *cmd_pkt;
uint32_t status = 0;
@@ -2214,13 +2214,13 @@ qla24xx_login_iocb(srb_t *sp, struct logio_entry_24xx *logio)
logio->port_id[0] = sp->fcport->d_id.b.al_pa;
logio->port_id[1] = sp->fcport->d_id.b.area;
logio->port_id[2] = sp->fcport->d_id.b.domain;
- logio->vp_index = sp->fcport->vha->vp_idx;
+ logio->vp_index = sp->vha->vp_idx;
}
static void
qla2x00_login_iocb(srb_t *sp, struct mbx_entry *mbx)
{
- struct qla_hw_data *ha = sp->fcport->vha->hw;
+ struct qla_hw_data *ha = sp->vha->hw;
struct srb_iocb *lio = &sp->u.iocb_cmd;
uint16_t opts;
@@ -2238,7 +2238,7 @@ qla2x00_login_iocb(srb_t *sp, struct mbx_entry *mbx)
mbx->mb2 = cpu_to_le16(sp->fcport->d_id.b.domain);
mbx->mb3 = cpu_to_le16(sp->fcport->d_id.b.area << 8 |
sp->fcport->d_id.b.al_pa);
- mbx->mb9 = cpu_to_le16(sp->fcport->vha->vp_idx);
+ mbx->mb9 = cpu_to_le16(sp->vha->vp_idx);
}
static void
@@ -2247,20 +2247,20 @@ qla24xx_logout_iocb(srb_t *sp, struct logio_entry_24xx *logio)
logio->entry_type = LOGINOUT_PORT_IOCB_TYPE;
logio->control_flags =
cpu_to_le16(LCF_COMMAND_LOGO|LCF_IMPL_LOGO);
- if (!sp->fcport->tgt_session ||
- !sp->fcport->tgt_session->keep_nport_handle)
+ if (!sp->fcport->se_sess ||
+ !sp->fcport->keep_nport_handle)
logio->control_flags |= cpu_to_le16(LCF_FREE_NPORT);
logio->nport_handle = cpu_to_le16(sp->fcport->loop_id);
logio->port_id[0] = sp->fcport->d_id.b.al_pa;
logio->port_id[1] = sp->fcport->d_id.b.area;
logio->port_id[2] = sp->fcport->d_id.b.domain;
- logio->vp_index = sp->fcport->vha->vp_idx;
+ logio->vp_index = sp->vha->vp_idx;
}
static void
qla2x00_logout_iocb(srb_t *sp, struct mbx_entry *mbx)
{
- struct qla_hw_data *ha = sp->fcport->vha->hw;
+ struct qla_hw_data *ha = sp->vha->hw;
mbx->entry_type = MBX_IOCB_TYPE;
SET_TARGET_ID(ha, mbx->loop_id, sp->fcport->loop_id);
@@ -2271,7 +2271,7 @@ qla2x00_logout_iocb(srb_t *sp, struct mbx_entry *mbx)
mbx->mb2 = cpu_to_le16(sp->fcport->d_id.b.domain);
mbx->mb3 = cpu_to_le16(sp->fcport->d_id.b.area << 8 |
sp->fcport->d_id.b.al_pa);
- mbx->mb9 = cpu_to_le16(sp->fcport->vha->vp_idx);
+ mbx->mb9 = cpu_to_le16(sp->vha->vp_idx);
/* Implicit: mbx->mbx10 = 0. */
}
@@ -2281,13 +2281,13 @@ qla24xx_adisc_iocb(srb_t *sp, struct logio_entry_24xx *logio)
logio->entry_type = LOGINOUT_PORT_IOCB_TYPE;
logio->control_flags = cpu_to_le16(LCF_COMMAND_ADISC);
logio->nport_handle = cpu_to_le16(sp->fcport->loop_id);
- logio->vp_index = sp->fcport->vha->vp_idx;
+ logio->vp_index = sp->vha->vp_idx;
}
static void
qla2x00_adisc_iocb(srb_t *sp, struct mbx_entry *mbx)
{
- struct qla_hw_data *ha = sp->fcport->vha->hw;
+ struct qla_hw_data *ha = sp->vha->hw;
mbx->entry_type = MBX_IOCB_TYPE;
SET_TARGET_ID(ha, mbx->loop_id, sp->fcport->loop_id);
@@ -2302,7 +2302,7 @@ qla2x00_adisc_iocb(srb_t *sp, struct mbx_entry *mbx)
mbx->mb3 = cpu_to_le16(LSW(ha->async_pd_dma));
mbx->mb6 = cpu_to_le16(MSW(MSD(ha->async_pd_dma)));
mbx->mb7 = cpu_to_le16(LSW(MSD(ha->async_pd_dma)));
- mbx->mb9 = cpu_to_le16(sp->fcport->vha->vp_idx);
+ mbx->mb9 = cpu_to_le16(sp->vha->vp_idx);
}
static void
@@ -2338,32 +2338,30 @@ qla24xx_tm_iocb(srb_t *sp, struct tsk_mgmt_entry *tsk)
}
static void
-qla2x00_els_dcmd_sp_free(void *ptr, void *data)
+qla2x00_els_dcmd_sp_free(void *data)
{
- struct scsi_qla_host *vha = (scsi_qla_host_t *)ptr;
- struct qla_hw_data *ha = vha->hw;
- srb_t *sp = (srb_t *)data;
+ srb_t *sp = data;
struct srb_iocb *elsio = &sp->u.iocb_cmd;
kfree(sp->fcport);
if (elsio->u.els_logo.els_logo_pyld)
- dma_free_coherent(&ha->pdev->dev, DMA_POOL_SIZE,
+ dma_free_coherent(&sp->vha->hw->pdev->dev, DMA_POOL_SIZE,
elsio->u.els_logo.els_logo_pyld,
elsio->u.els_logo.els_logo_pyld_dma);
del_timer(&elsio->timer);
- qla2x00_rel_sp(vha, sp);
+ qla2x00_rel_sp(sp);
}
static void
qla2x00_els_dcmd_iocb_timeout(void *data)
{
- srb_t *sp = (srb_t *)data;
- struct srb_iocb *lio = &sp->u.iocb_cmd;
+ srb_t *sp = data;
fc_port_t *fcport = sp->fcport;
- struct scsi_qla_host *vha = fcport->vha;
+ struct scsi_qla_host *vha = sp->vha;
struct qla_hw_data *ha = vha->hw;
+ struct srb_iocb *lio = &sp->u.iocb_cmd;
unsigned long flags = 0;
ql_dbg(ql_dbg_io, vha, 0x3069,
@@ -2386,12 +2384,12 @@ qla2x00_els_dcmd_iocb_timeout(void *data)
}
static void
-qla2x00_els_dcmd_sp_done(void *data, void *ptr, int res)
+qla2x00_els_dcmd_sp_done(void *ptr, int res)
{
- srb_t *sp = (srb_t *)ptr;
+ srb_t *sp = ptr;
fc_port_t *fcport = sp->fcport;
struct srb_iocb *lio = &sp->u.iocb_cmd;
- struct scsi_qla_host *vha = fcport->vha;
+ struct scsi_qla_host *vha = sp->vha;
ql_dbg(ql_dbg_io, vha, 0x3072,
"%s hdl=%x, portid=%02x%02x%02x done\n",
@@ -2449,7 +2447,7 @@ qla24xx_els_dcmd_iocb(scsi_qla_host_t *vha, int els_opcode,
GFP_KERNEL);
if (!elsio->u.els_logo.els_logo_pyld) {
- sp->free(vha, sp);
+ sp->free(sp);
return QLA_FUNCTION_FAILED;
}
@@ -2468,7 +2466,7 @@ qla24xx_els_dcmd_iocb(scsi_qla_host_t *vha, int els_opcode,
rval = qla2x00_start_sp(sp);
if (rval != QLA_SUCCESS) {
- sp->free(vha, sp);
+ sp->free(sp);
return QLA_FUNCTION_FAILED;
}
@@ -2479,14 +2477,14 @@ qla24xx_els_dcmd_iocb(scsi_qla_host_t *vha, int els_opcode,
wait_for_completion(&elsio->u.els_logo.comp);
- sp->free(vha, sp);
+ sp->free(sp);
return rval;
}
static void
qla24xx_els_logo_iocb(srb_t *sp, struct els_entry_24xx *els_iocb)
{
- scsi_qla_host_t *vha = sp->fcport->vha;
+ scsi_qla_host_t *vha = sp->vha;
struct srb_iocb *elsio = &sp->u.iocb_cmd;
els_iocb->entry_type = ELS_IOCB_TYPE;
@@ -2518,7 +2516,7 @@ qla24xx_els_logo_iocb(srb_t *sp, struct els_entry_24xx *els_iocb)
els_iocb->rx_address[1] = 0;
els_iocb->rx_len = 0;
- sp->fcport->vha->qla_stats.control_requests++;
+ sp->vha->qla_stats.control_requests++;
}
static void
@@ -2534,7 +2532,7 @@ qla24xx_els_iocb(srb_t *sp, struct els_entry_24xx *els_iocb)
els_iocb->handle = sp->handle;
els_iocb->nport_handle = cpu_to_le16(sp->fcport->loop_id);
els_iocb->tx_dsd_count = cpu_to_le16(bsg_job->request_payload.sg_cnt);
- els_iocb->vp_index = sp->fcport->vha->vp_idx;
+ els_iocb->vp_index = sp->vha->vp_idx;
els_iocb->sof_type = EST_SOFI3;
els_iocb->rx_dsd_count = cpu_to_le16(bsg_job->reply_payload.sg_cnt);
@@ -2565,7 +2563,7 @@ qla24xx_els_iocb(srb_t *sp, struct els_entry_24xx *els_iocb)
els_iocb->rx_len = cpu_to_le32(sg_dma_len
(bsg_job->reply_payload.sg_list));
- sp->fcport->vha->qla_stats.control_requests++;
+ sp->vha->qla_stats.control_requests++;
}
static void
@@ -2576,7 +2574,7 @@ qla2x00_ct_iocb(srb_t *sp, ms_iocb_entry_t *ct_iocb)
struct scatterlist *sg;
int index;
uint16_t tot_dsds;
- scsi_qla_host_t *vha = sp->fcport->vha;
+ scsi_qla_host_t *vha = sp->vha;
struct qla_hw_data *ha = vha->hw;
struct bsg_job *bsg_job = sp->u.bsg_job;
int loop_iterartion = 0;
@@ -2642,7 +2640,7 @@ qla2x00_ct_iocb(srb_t *sp, ms_iocb_entry_t *ct_iocb)
}
ct_iocb->entry_count = entry_count;
- sp->fcport->vha->qla_stats.control_requests++;
+ sp->vha->qla_stats.control_requests++;
}
static void
@@ -2653,7 +2651,7 @@ qla24xx_ct_iocb(srb_t *sp, struct ct_entry_24xx *ct_iocb)
struct scatterlist *sg;
int index;
uint16_t tot_dsds;
- scsi_qla_host_t *vha = sp->fcport->vha;
+ scsi_qla_host_t *vha = sp->vha;
struct qla_hw_data *ha = vha->hw;
struct bsg_job *bsg_job = sp->u.bsg_job;
int loop_iterartion = 0;
@@ -2665,7 +2663,7 @@ qla24xx_ct_iocb(srb_t *sp, struct ct_entry_24xx *ct_iocb)
ct_iocb->handle = sp->handle;
ct_iocb->nport_handle = cpu_to_le16(sp->fcport->loop_id);
- ct_iocb->vp_index = sp->fcport->vha->vp_idx;
+ ct_iocb->vp_index = sp->vha->vp_idx;
ct_iocb->comp_status = cpu_to_le16(0);
ct_iocb->cmd_dsd_count =
@@ -2739,7 +2737,7 @@ qla82xx_start_scsi(srb_t *sp)
uint32_t *fcp_dl;
uint8_t additional_cdb_len;
struct ct6_dsd *ctx;
- struct scsi_qla_host *vha = sp->fcport->vha;
+ struct scsi_qla_host *vha = sp->vha;
struct qla_hw_data *ha = vha->hw;
struct req_que *req = NULL;
struct rsp_que *rsp = NULL;
@@ -2901,7 +2899,7 @@ sufficient_dsds:
cmd_pkt->port_id[0] = sp->fcport->d_id.b.al_pa;
cmd_pkt->port_id[1] = sp->fcport->d_id.b.area;
cmd_pkt->port_id[2] = sp->fcport->d_id.b.domain;
- cmd_pkt->vp_index = sp->fcport->vha->vp_idx;
+ cmd_pkt->vp_index = sp->vha->vp_idx;
/* Build IOCB segments */
if (qla24xx_build_scsi_type_6_iocbs(sp, cmd_pkt, tot_dsds))
@@ -2974,7 +2972,7 @@ sufficient_dsds:
cmd_pkt->port_id[0] = sp->fcport->d_id.b.al_pa;
cmd_pkt->port_id[1] = sp->fcport->d_id.b.area;
cmd_pkt->port_id[2] = sp->fcport->d_id.b.domain;
- cmd_pkt->vp_index = sp->fcport->vha->vp_idx;
+ cmd_pkt->vp_index = sp->vha->vp_idx;
int_to_scsilun(cmd->device->lun, &cmd_pkt->lun);
host_to_fcp_swap((uint8_t *)&cmd_pkt->lun,
@@ -3060,7 +3058,7 @@ static void
qla24xx_abort_iocb(srb_t *sp, struct abort_entry_24xx *abt_iocb)
{
struct srb_iocb *aio = &sp->u.iocb_cmd;
- scsi_qla_host_t *vha = sp->fcport->vha;
+ scsi_qla_host_t *vha = sp->vha;
struct req_que *req = vha->req;
memset(abt_iocb, 0, sizeof(struct abort_entry_24xx));
@@ -3079,19 +3077,69 @@ qla24xx_abort_iocb(srb_t *sp, struct abort_entry_24xx *abt_iocb)
wmb();
}
+static void
+qla2x00_mb_iocb(srb_t *sp, struct mbx_24xx_entry *mbx)
+{
+ int i, sz;
+
+ mbx->entry_type = MBX_IOCB_TYPE;
+ mbx->handle = sp->handle;
+ sz = min(ARRAY_SIZE(mbx->mb), ARRAY_SIZE(sp->u.iocb_cmd.u.mbx.out_mb));
+
+ for (i = 0; i < sz; i++)
+ mbx->mb[i] = cpu_to_le16(sp->u.iocb_cmd.u.mbx.out_mb[i]);
+}
+
+static void
+qla2x00_ctpthru_cmd_iocb(srb_t *sp, struct ct_entry_24xx *ct_pkt)
+{
+ sp->u.iocb_cmd.u.ctarg.iocb = ct_pkt;
+ qla24xx_prep_ms_iocb(sp->vha, &sp->u.iocb_cmd.u.ctarg);
+ ct_pkt->handle = sp->handle;
+}
+
+static void qla2x00_send_notify_ack_iocb(srb_t *sp,
+ struct nack_to_isp *nack)
+{
+ struct imm_ntfy_from_isp *ntfy = sp->u.iocb_cmd.u.nack.ntfy;
+
+ nack->entry_type = NOTIFY_ACK_TYPE;
+ nack->entry_count = 1;
+ nack->ox_id = ntfy->ox_id;
+
+ nack->u.isp24.handle = sp->handle;
+ nack->u.isp24.nport_handle = ntfy->u.isp24.nport_handle;
+ if (le16_to_cpu(ntfy->u.isp24.status) == IMM_NTFY_ELS) {
+ nack->u.isp24.flags = ntfy->u.isp24.flags &
+ cpu_to_le32(NOTIFY24XX_FLAGS_PUREX_IOCB);
+ }
+ nack->u.isp24.srr_rx_id = ntfy->u.isp24.srr_rx_id;
+ nack->u.isp24.status = ntfy->u.isp24.status;
+ nack->u.isp24.status_subcode = ntfy->u.isp24.status_subcode;
+ nack->u.isp24.fw_handle = ntfy->u.isp24.fw_handle;
+ nack->u.isp24.exchange_address = ntfy->u.isp24.exchange_address;
+ nack->u.isp24.srr_rel_offs = ntfy->u.isp24.srr_rel_offs;
+ nack->u.isp24.srr_ui = ntfy->u.isp24.srr_ui;
+ nack->u.isp24.srr_flags = 0;
+ nack->u.isp24.srr_reject_code = 0;
+ nack->u.isp24.srr_reject_code_expl = 0;
+ nack->u.isp24.vp_index = ntfy->u.isp24.vp_index;
+}
+
int
qla2x00_start_sp(srb_t *sp)
{
int rval;
- struct qla_hw_data *ha = sp->fcport->vha->hw;
+ scsi_qla_host_t *vha = sp->vha;
+ struct qla_hw_data *ha = vha->hw;
void *pkt;
unsigned long flags;
rval = QLA_FUNCTION_FAILED;
spin_lock_irqsave(&ha->hardware_lock, flags);
- pkt = qla2x00_alloc_iocbs(sp->fcport->vha, sp);
+ pkt = qla2x00_alloc_iocbs(vha, sp);
if (!pkt) {
- ql_log(ql_log_warn, sp->fcport->vha, 0x700c,
+ ql_log(ql_log_warn, vha, 0x700c,
"qla2x00_alloc_iocbs failed.\n");
goto done;
}
@@ -3139,12 +3187,23 @@ qla2x00_start_sp(srb_t *sp)
case SRB_ELS_DCMD:
qla24xx_els_logo_iocb(sp, pkt);
break;
+ case SRB_CT_PTHRU_CMD:
+ qla2x00_ctpthru_cmd_iocb(sp, pkt);
+ break;
+ case SRB_MB_IOCB:
+ qla2x00_mb_iocb(sp, pkt);
+ break;
+ case SRB_NACK_PLOGI:
+ case SRB_NACK_PRLI:
+ case SRB_NACK_LOGO:
+ qla2x00_send_notify_ack_iocb(sp, pkt);
+ break;
default:
break;
}
wmb();
- qla2x00_start_iocbs(sp->fcport->vha, ha->req_q_map[0]);
+ qla2x00_start_iocbs(vha, ha->req_q_map[0]);
done:
spin_unlock_irqrestore(&ha->hardware_lock, flags);
return rval;
diff --git a/drivers/scsi/qla2xxx/qla_isr.c b/drivers/scsi/qla2xxx/qla_isr.c
index edc2264db45b..352cfb6292c6 100644
--- a/drivers/scsi/qla2xxx/qla_isr.c
+++ b/drivers/scsi/qla2xxx/qla_isr.c
@@ -561,14 +561,50 @@ qla2x00_is_a_vp_did(scsi_qla_host_t *vha, uint32_t rscn_entry)
return ret;
}
-static inline fc_port_t *
+fc_port_t *
qla2x00_find_fcport_by_loopid(scsi_qla_host_t *vha, uint16_t loop_id)
{
- fc_port_t *fcport;
+ fc_port_t *f, *tf;
+
+ f = tf = NULL;
+ list_for_each_entry_safe(f, tf, &vha->vp_fcports, list)
+ if (f->loop_id == loop_id)
+ return f;
+ return NULL;
+}
- list_for_each_entry(fcport, &vha->vp_fcports, list)
- if (fcport->loop_id == loop_id)
- return fcport;
+fc_port_t *
+qla2x00_find_fcport_by_wwpn(scsi_qla_host_t *vha, u8 *wwpn, u8 incl_deleted)
+{
+ fc_port_t *f, *tf;
+
+ f = tf = NULL;
+ list_for_each_entry_safe(f, tf, &vha->vp_fcports, list) {
+ if (memcmp(f->port_name, wwpn, WWN_SIZE) == 0) {
+ if (incl_deleted)
+ return f;
+ else if (f->deleted == 0)
+ return f;
+ }
+ }
+ return NULL;
+}
+
+fc_port_t *
+qla2x00_find_fcport_by_nportid(scsi_qla_host_t *vha, port_id_t *id,
+ u8 incl_deleted)
+{
+ fc_port_t *f, *tf;
+
+ f = tf = NULL;
+ list_for_each_entry_safe(f, tf, &vha->vp_fcports, list) {
+ if (f->d_id.b24 == id->b24) {
+ if (incl_deleted)
+ return f;
+ else if (f->deleted == 0)
+ return f;
+ }
+ }
return NULL;
}
@@ -934,7 +970,11 @@ skip_rio:
ql_dbg(ql_dbg_async, vha, 0x508a,
"Marking port lost loopid=%04x portid=%06x.\n",
fcport->loop_id, fcport->d_id.b24);
- qla2x00_mark_device_lost(fcport->vha, fcport, 1, 1);
+ if (qla_ini_mode_enabled(vha)) {
+ qla2x00_mark_device_lost(fcport->vha, fcport, 1, 1);
+ fcport->logout_on_delete = 0;
+ qlt_schedule_sess_for_deletion_lock(fcport);
+ }
break;
global_port_update:
@@ -985,9 +1025,6 @@ global_port_update:
qla2x00_mark_all_devices_lost(vha, 1);
- if (vha->vp_idx == 0 && !qla_ini_mode_enabled(vha))
- set_bit(SCR_PENDING, &vha->dpc_flags);
-
set_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags);
set_bit(LOCAL_LOOP_UPDATE, &vha->dpc_flags);
set_bit(VP_CONFIG_OK, &vha->vp_flags);
@@ -1024,27 +1061,19 @@ global_port_update:
if (qla2x00_is_a_vp_did(vha, rscn_entry))
break;
- /*
- * Search for the rport related to this RSCN entry and mark it
- * as lost.
- */
- list_for_each_entry(fcport, &vha->vp_fcports, list) {
- if (atomic_read(&fcport->state) != FCS_ONLINE)
- continue;
- if (fcport->d_id.b24 == rscn_entry) {
- qla2x00_mark_device_lost(vha, fcport, 0, 0);
- break;
- }
- }
-
atomic_set(&vha->loop_down_timer, 0);
vha->flags.management_server_logged_in = 0;
-
- set_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags);
- set_bit(RSCN_UPDATE, &vha->dpc_flags);
- qla2x00_post_aen_work(vha, FCH_EVT_RSCN, rscn_entry);
+ {
+ struct event_arg ea;
+
+ memset(&ea, 0, sizeof(ea));
+ ea.event = FCME_RSCN;
+ ea.id.b24 = rscn_entry;
+ ea.id.b.rsvd_1 = rscn_entry >> 24;
+ qla2x00_fcport_event_handler(vha, &ea);
+ qla2x00_post_aen_work(vha, FCH_EVT_RSCN, rscn_entry);
+ }
break;
-
/* case MBA_RIO_RESPONSE: */
case MBA_ZIO_RESPONSE:
ql_dbg(ql_dbg_async, vha, 0x5015,
@@ -1212,7 +1241,7 @@ qla2x00_process_completed_request(struct scsi_qla_host *vha,
req->outstanding_cmds[index] = NULL;
/* Save ISP completion status */
- sp->done(ha, sp, DID_OK << 16);
+ sp->done(sp, DID_OK << 16);
} else {
ql_log(ql_log_warn, vha, 0x3016, "Invalid SCSI SRB.\n");
@@ -1235,7 +1264,8 @@ qla2x00_get_sp_from_handle(scsi_qla_host_t *vha, const char *func,
index = LSW(pkt->handle);
if (index >= req->num_outstanding_cmds) {
ql_log(ql_log_warn, vha, 0x5031,
- "Invalid command index (%x).\n", index);
+ "Invalid command index (%x) type %8ph.\n",
+ index, iocb);
if (IS_P3P_TYPE(ha))
set_bit(FCOE_CTX_RESET_NEEDED, &vha->dpc_flags);
else
@@ -1343,66 +1373,122 @@ qla2x00_mbx_iocb_entry(scsi_qla_host_t *vha, struct req_que *req,
le16_to_cpu(mbx->mb7));
logio_done:
- sp->done(vha, sp, 0);
+ sp->done(sp, 0);
}
static void
-qla2x00_ct_entry(scsi_qla_host_t *vha, struct req_que *req,
- sts_entry_t *pkt, int iocb_type)
+qla24xx_mbx_iocb_entry(scsi_qla_host_t *vha, struct req_que *req,
+ struct mbx_24xx_entry *pkt)
{
- const char func[] = "CT_IOCB";
- const char *type;
+ const char func[] = "MBX-IOCB2";
srb_t *sp;
- struct bsg_job *bsg_job;
- struct fc_bsg_reply *bsg_reply;
- uint16_t comp_status;
+ struct srb_iocb *si;
+ u16 sz, i;
int res;
sp = qla2x00_get_sp_from_handle(vha, func, req, pkt);
if (!sp)
return;
- bsg_job = sp->u.bsg_job;
- bsg_reply = bsg_job->reply;
+ si = &sp->u.iocb_cmd;
+ sz = min(ARRAY_SIZE(pkt->mb), ARRAY_SIZE(sp->u.iocb_cmd.u.mbx.in_mb));
- type = "ct pass-through";
+ for (i = 0; i < sz; i++)
+ si->u.mbx.in_mb[i] = le16_to_cpu(pkt->mb[i]);
- comp_status = le16_to_cpu(pkt->comp_status);
+ res = (si->u.mbx.in_mb[0] & MBS_MASK);
- /* return FC_CTELS_STATUS_OK and leave the decoding of the ELS/CT
- * fc payload to the caller
- */
- bsg_reply->reply_data.ctels_reply.status = FC_CTELS_STATUS_OK;
- bsg_job->reply_len = sizeof(struct fc_bsg_reply);
+ sp->done(sp, res);
+}
- if (comp_status != CS_COMPLETE) {
- if (comp_status == CS_DATA_UNDERRUN) {
- res = DID_OK << 16;
- bsg_reply->reply_payload_rcv_len =
- le16_to_cpu(((sts_entry_t *)pkt)->rsp_info_len);
+static void
+qla24xxx_nack_iocb_entry(scsi_qla_host_t *vha, struct req_que *req,
+ struct nack_to_isp *pkt)
+{
+ const char func[] = "nack";
+ srb_t *sp;
+ int res = 0;
- ql_log(ql_log_warn, vha, 0x5048,
- "CT pass-through-%s error "
- "comp_status-status=0x%x total_byte = 0x%x.\n",
- type, comp_status,
- bsg_reply->reply_payload_rcv_len);
- } else {
- ql_log(ql_log_warn, vha, 0x5049,
- "CT pass-through-%s error "
- "comp_status-status=0x%x.\n", type, comp_status);
- res = DID_ERROR << 16;
- bsg_reply->reply_payload_rcv_len = 0;
- }
- ql_dump_buffer(ql_dbg_async + ql_dbg_buffer, vha, 0x5035,
- (uint8_t *)pkt, sizeof(*pkt));
- } else {
- res = DID_OK << 16;
- bsg_reply->reply_payload_rcv_len =
- bsg_job->reply_payload.payload_len;
- bsg_job->reply_len = 0;
- }
+ sp = qla2x00_get_sp_from_handle(vha, func, req, pkt);
+ if (!sp)
+ return;
+
+ if (pkt->u.isp2x.status != cpu_to_le16(NOTIFY_ACK_SUCCESS))
+ res = QLA_FUNCTION_FAILED;
- sp->done(vha, sp, res);
+ sp->done(sp, res);
+}
+
+static void
+qla2x00_ct_entry(scsi_qla_host_t *vha, struct req_que *req,
+ sts_entry_t *pkt, int iocb_type)
+{
+ const char func[] = "CT_IOCB";
+ const char *type;
+ srb_t *sp;
+ struct bsg_job *bsg_job;
+ struct fc_bsg_reply *bsg_reply;
+ uint16_t comp_status;
+ int res = 0;
+
+ sp = qla2x00_get_sp_from_handle(vha, func, req, pkt);
+ if (!sp)
+ return;
+
+ switch (sp->type) {
+ case SRB_CT_CMD:
+ bsg_job = sp->u.bsg_job;
+ bsg_reply = bsg_job->reply;
+
+ type = "ct pass-through";
+
+ comp_status = le16_to_cpu(pkt->comp_status);
+
+ /*
+ * return FC_CTELS_STATUS_OK and leave the decoding of the ELS/CT
+ * fc payload to the caller
+ */
+ bsg_reply->reply_data.ctels_reply.status = FC_CTELS_STATUS_OK;
+ bsg_job->reply_len = sizeof(struct fc_bsg_reply);
+
+ if (comp_status != CS_COMPLETE) {
+ if (comp_status == CS_DATA_UNDERRUN) {
+ res = DID_OK << 16;
+ bsg_reply->reply_payload_rcv_len =
+ le16_to_cpu(((sts_entry_t *)pkt)->rsp_info_len);
+
+ ql_log(ql_log_warn, vha, 0x5048,
+ "CT pass-through-%s error comp_status=0x%x total_byte=0x%x.\n",
+ type, comp_status,
+ bsg_reply->reply_payload_rcv_len);
+ } else {
+ ql_log(ql_log_warn, vha, 0x5049,
+ "CT pass-through-%s error comp_status=0x%x.\n",
+ type, comp_status);
+ res = DID_ERROR << 16;
+ bsg_reply->reply_payload_rcv_len = 0;
+ }
+ ql_dump_buffer(ql_dbg_async + ql_dbg_buffer, vha, 0x5035,
+ (uint8_t *)pkt, sizeof(*pkt));
+ } else {
+ res = DID_OK << 16;
+ bsg_reply->reply_payload_rcv_len =
+ bsg_job->reply_payload.payload_len;
+ bsg_job->reply_len = 0;
+ }
+ break;
+ case SRB_CT_PTHRU_CMD:
+ /*
+ * borrowing sts_entry_24xx.comp_status.
+ * same location as ct_entry_24xx.comp_status
+ */
+ res = qla2x00_chk_ms_status(vha, (ms_iocb_entry_t *)pkt,
+ (struct ct_sns_rsp *)sp->u.iocb_cmd.u.ctarg.rsp,
+ sp->name);
+ break;
+ }
+
+ sp->done(sp, res);
}
static void
@@ -1438,7 +1524,16 @@ qla24xx_els_ct_entry(scsi_qla_host_t *vha, struct req_que *req,
type = "Driver ELS logo";
ql_dbg(ql_dbg_user, vha, 0x5047,
"Completing %s: (%p) type=%d.\n", type, sp, sp->type);
- sp->done(vha, sp, 0);
+ sp->done(sp, 0);
+ return;
+ case SRB_CT_PTHRU_CMD:
+ /* borrowing sts_entry_24xx.comp_status.
+ same location as ct_entry_24xx.comp_status
+ */
+ res = qla2x00_chk_ms_status(vha, (ms_iocb_entry_t *)pkt,
+ (struct ct_sns_rsp *)sp->u.iocb_cmd.u.ctarg.rsp,
+ sp->name);
+ sp->done(sp, res);
return;
default:
ql_dbg(ql_dbg_user, vha, 0x503e,
@@ -1496,7 +1591,7 @@ qla24xx_els_ct_entry(scsi_qla_host_t *vha, struct req_que *req,
bsg_job->reply_len = 0;
}
- sp->done(vha, sp, res);
+ sp->done(sp, res);
}
static void
@@ -1543,6 +1638,7 @@ qla24xx_logio_entry(scsi_qla_host_t *vha, struct req_que *req,
fcport->d_id.b.area, fcport->d_id.b.al_pa,
le32_to_cpu(logio->io_parameter[0]));
+ vha->hw->exch_starvation = 0;
data[0] = MBS_COMMAND_COMPLETE;
if (sp->type != SRB_LOGIN_CMD)
goto logio_done;
@@ -1568,6 +1664,8 @@ qla24xx_logio_entry(scsi_qla_host_t *vha, struct req_que *req,
iop[0] = le32_to_cpu(logio->io_parameter[0]);
iop[1] = le32_to_cpu(logio->io_parameter[1]);
+ lio->u.logio.iop[0] = iop[0];
+ lio->u.logio.iop[1] = iop[1];
switch (iop[0]) {
case LSC_SCODE_PORTID_USED:
data[0] = MBS_PORT_ID_USED;
@@ -1576,6 +1674,21 @@ qla24xx_logio_entry(scsi_qla_host_t *vha, struct req_que *req,
case LSC_SCODE_NPORT_USED:
data[0] = MBS_LOOP_ID_USED;
break;
+ case LSC_SCODE_NOXCB:
+ vha->hw->exch_starvation++;
+ if (vha->hw->exch_starvation > 5) {
+ ql_log(ql_log_warn, vha, 0xffff,
+ "Exchange starvation. Resetting RISC\n");
+
+ vha->hw->exch_starvation = 0;
+
+ if (IS_P3P_TYPE(vha->hw))
+ set_bit(FCOE_CTX_RESET_NEEDED, &vha->dpc_flags);
+ else
+ set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
+ qla2xxx_wake_dpc(vha);
+ }
+ /* drop through */
default:
data[0] = MBS_COMMAND_ERROR;
break;
@@ -1590,7 +1703,7 @@ qla24xx_logio_entry(scsi_qla_host_t *vha, struct req_que *req,
le32_to_cpu(logio->io_parameter[1]));
logio_done:
- sp->done(vha, sp, 0);
+ sp->done(sp, 0);
}
static void
@@ -1640,7 +1753,7 @@ qla24xx_tm_iocb_entry(scsi_qla_host_t *vha, struct req_que *req, void *tsk)
ql_dump_buffer(ql_dbg_async + ql_dbg_buffer, vha, 0x5055,
(uint8_t *)sts, sizeof(*sts));
- sp->done(vha, sp, 0);
+ sp->done(sp, 0);
}
/**
@@ -1728,7 +1841,7 @@ static inline void
qla2x00_handle_sense(srb_t *sp, uint8_t *sense_data, uint32_t par_sense_len,
uint32_t sense_len, struct rsp_que *rsp, int res)
{
- struct scsi_qla_host *vha = sp->fcport->vha;
+ struct scsi_qla_host *vha = sp->vha;
struct scsi_cmnd *cp = GET_CMD_SP(sp);
uint32_t track_sense_len;
@@ -1756,7 +1869,7 @@ qla2x00_handle_sense(srb_t *sp, uint8_t *sense_data, uint32_t par_sense_len,
if (sense_len) {
ql_dbg(ql_dbg_io + ql_dbg_buffer, vha, 0x301c,
"Check condition Sense data, nexus%ld:%d:%llu cmd=%p.\n",
- sp->fcport->vha->host_no, cp->device->id, cp->device->lun,
+ sp->vha->host_no, cp->device->id, cp->device->lun,
cp);
ql_dump_buffer(ql_dbg_io + ql_dbg_buffer, vha, 0x302b,
cp->sense_buffer, sense_len);
@@ -1778,7 +1891,7 @@ struct scsi_dif_tuple {
static inline int
qla2x00_handle_dif_error(srb_t *sp, struct sts_entry_24xx *sts24)
{
- struct scsi_qla_host *vha = sp->fcport->vha;
+ struct scsi_qla_host *vha = sp->vha;
struct scsi_cmnd *cmd = GET_CMD_SP(sp);
uint8_t *ap = &sts24->data[12];
uint8_t *ep = &sts24->data[20];
@@ -2043,7 +2156,7 @@ done:
bsg_job->reply_len = sizeof(struct fc_bsg_reply);
/* Always return DID_OK, bsg will send the vendor specific response
* in this case only */
- sp->done(vha, sp, (DID_OK << 6));
+ sp->done(sp, DID_OK << 6);
}
@@ -2076,6 +2189,7 @@ qla2x00_status_entry(scsi_qla_host_t *vha, struct rsp_que *rsp, void *pkt)
int res = 0;
uint16_t state_flags = 0;
uint16_t retry_delay = 0;
+ uint8_t no_logout = 0;
sts = (sts_entry_t *) pkt;
sts24 = (struct sts_entry_24xx *) pkt;
@@ -2336,6 +2450,7 @@ check_scsi_status:
break;
case CS_PORT_LOGGED_OUT:
+ no_logout = 1;
case CS_PORT_CONFIG_CHG:
case CS_PORT_BUSY:
case CS_INCOMPLETE:
@@ -2358,14 +2473,21 @@ check_scsi_status:
break;
}
- ql_dbg(ql_dbg_io, fcport->vha, 0x3021,
- "Port to be marked lost on fcport=%02x%02x%02x, current "
- "port state= %s.\n", fcport->d_id.b.domain,
- fcport->d_id.b.area, fcport->d_id.b.al_pa,
- port_state_str[atomic_read(&fcport->state)]);
+ if (atomic_read(&fcport->state) == FCS_ONLINE) {
+ ql_dbg(ql_dbg_disc, fcport->vha, 0x3021,
+ "Port to be marked lost on fcport=%02x%02x%02x, current "
+ "port state= %s comp_status %x.\n", fcport->d_id.b.domain,
+ fcport->d_id.b.area, fcport->d_id.b.al_pa,
+ port_state_str[atomic_read(&fcport->state)],
+ comp_status);
+
+ if (no_logout)
+ fcport->logout_on_delete = 0;
- if (atomic_read(&fcport->state) == FCS_ONLINE)
qla2x00_mark_device_lost(fcport->vha, fcport, 1, 1);
+ qlt_schedule_sess_for_deletion_lock(fcport);
+ }
+
break;
case CS_ABORTED:
@@ -2407,7 +2529,7 @@ out:
resid_len, fw_resid_len, sp, cp);
if (rsp->status_srb == NULL)
- sp->done(ha, sp, res);
+ sp->done(sp, res);
}
/**
@@ -2464,7 +2586,7 @@ qla2x00_status_cont_entry(struct rsp_que *rsp, sts_cont_entry_t *pkt)
/* Place command on done queue. */
if (sense_len == 0) {
rsp->status_srb = NULL;
- sp->done(ha, sp, cp->result);
+ sp->done(sp, cp->result);
}
}
@@ -2500,7 +2622,7 @@ qla2x00_error_entry(scsi_qla_host_t *vha, struct rsp_que *rsp, sts_entry_t *pkt)
sp = qla2x00_get_sp_from_handle(vha, func, req, pkt);
if (sp) {
- sp->done(ha, sp, res);
+ sp->done(sp, res);
return;
}
fatal:
@@ -2558,7 +2680,7 @@ qla24xx_abort_iocb_entry(scsi_qla_host_t *vha, struct req_que *req,
abt = &sp->u.iocb_cmd;
abt->u.abt.comp_status = le32_to_cpu(pkt->nport_handle);
- sp->done(vha, sp, 0);
+ sp->done(sp, 0);
}
/**
@@ -2629,10 +2751,16 @@ process_err:
}
case ABTS_RESP_24XX:
case CTIO_TYPE7:
- case NOTIFY_ACK_TYPE:
case CTIO_CRC2:
qlt_response_pkt_all_vps(vha, (response_t *)pkt);
break;
+ case NOTIFY_ACK_TYPE:
+ if (pkt->handle == QLA_TGT_SKIP_HANDLE)
+ qlt_response_pkt_all_vps(vha, (response_t *)pkt);
+ else
+ qla24xxx_nack_iocb_entry(vha, rsp->req,
+ (struct nack_to_isp *)pkt);
+ break;
case MARKER_TYPE:
/* Do nothing in this case, this check is to prevent it
* from falling into default case
@@ -2642,6 +2770,10 @@ process_err:
qla24xx_abort_iocb_entry(vha, rsp->req,
(struct abort_entry_24xx *)pkt);
break;
+ case MBX_IOCB_TYPE:
+ qla24xx_mbx_iocb_entry(vha, rsp->req,
+ (struct mbx_24xx_entry *)pkt);
+ break;
default:
/* Type Not Supported. */
ql_dbg(ql_dbg_async, vha, 0x5042,
@@ -2658,8 +2790,9 @@ process_err:
if (IS_P3P_TYPE(ha)) {
struct device_reg_82xx __iomem *reg = &ha->iobase->isp82;
WRT_REG_DWORD(&reg->rsp_q_out[0], rsp->ring_index);
- } else
+ } else {
WRT_REG_DWORD(rsp->rsp_q_out, rsp->ring_index);
+ }
}
static void
diff --git a/drivers/scsi/qla2xxx/qla_mbx.c b/drivers/scsi/qla2xxx/qla_mbx.c
index 67f64db390b0..35079f417417 100644
--- a/drivers/scsi/qla2xxx/qla_mbx.c
+++ b/drivers/scsi/qla2xxx/qla_mbx.c
@@ -1637,94 +1637,6 @@ qla2x00_init_firmware(scsi_qla_host_t *vha, uint16_t size)
return rval;
}
-/*
- * qla2x00_get_node_name_list
- * Issue get node name list mailbox command, kmalloc()
- * and return the resulting list. Caller must kfree() it!
- *
- * Input:
- * ha = adapter state pointer.
- * out_data = resulting list
- * out_len = length of the resulting list
- *
- * Returns:
- * qla2x00 local function return status code.
- *
- * Context:
- * Kernel context.
- */
-int
-qla2x00_get_node_name_list(scsi_qla_host_t *vha, void **out_data, int *out_len)
-{
- struct qla_hw_data *ha = vha->hw;
- struct qla_port_24xx_data *list = NULL;
- void *pmap;
- mbx_cmd_t mc;
- dma_addr_t pmap_dma;
- ulong dma_size;
- int rval, left;
-
- left = 1;
- while (left > 0) {
- dma_size = left * sizeof(*list);
- pmap = dma_alloc_coherent(&ha->pdev->dev, dma_size,
- &pmap_dma, GFP_KERNEL);
- if (!pmap) {
- ql_log(ql_log_warn, vha, 0x113f,
- "%s(%ld): DMA Alloc failed of %ld\n",
- __func__, vha->host_no, dma_size);
- rval = QLA_MEMORY_ALLOC_FAILED;
- goto out;
- }
-
- mc.mb[0] = MBC_PORT_NODE_NAME_LIST;
- mc.mb[1] = BIT_1 | BIT_3;
- mc.mb[2] = MSW(pmap_dma);
- mc.mb[3] = LSW(pmap_dma);
- mc.mb[6] = MSW(MSD(pmap_dma));
- mc.mb[7] = LSW(MSD(pmap_dma));
- mc.mb[8] = dma_size;
- mc.out_mb = MBX_0|MBX_1|MBX_2|MBX_3|MBX_6|MBX_7|MBX_8;
- mc.in_mb = MBX_0|MBX_1;
- mc.tov = 30;
- mc.flags = MBX_DMA_IN;
-
- rval = qla2x00_mailbox_command(vha, &mc);
- if (rval != QLA_SUCCESS) {
- if ((mc.mb[0] == MBS_COMMAND_ERROR) &&
- (mc.mb[1] == 0xA)) {
- left += le16_to_cpu(mc.mb[2]) /
- sizeof(struct qla_port_24xx_data);
- goto restart;
- }
- goto out_free;
- }
-
- left = 0;
-
- list = kmemdup(pmap, dma_size, GFP_KERNEL);
- if (!list) {
- ql_log(ql_log_warn, vha, 0x1140,
- "%s(%ld): failed to allocate node names list "
- "structure.\n", __func__, vha->host_no);
- rval = QLA_MEMORY_ALLOC_FAILED;
- goto out_free;
- }
-
-restart:
- dma_free_coherent(&ha->pdev->dev, dma_size, pmap, pmap_dma);
- }
-
- *out_data = list;
- *out_len = dma_size;
-
-out:
- return rval;
-
-out_free:
- dma_free_coherent(&ha->pdev->dev, dma_size, pmap, pmap_dma);
- return rval;
-}
/*
* qla2x00_get_port_database
@@ -3687,10 +3599,8 @@ void
qla24xx_report_id_acquisition(scsi_qla_host_t *vha,
struct vp_rpt_id_entry_24xx *rptid_entry)
{
- uint8_t vp_idx;
- uint16_t stat = le16_to_cpu(rptid_entry->vp_idx);
struct qla_hw_data *ha = vha->hw;
- scsi_qla_host_t *vp;
+ scsi_qla_host_t *vp = NULL;
unsigned long flags;
int found;
@@ -3701,80 +3611,124 @@ qla24xx_report_id_acquisition(scsi_qla_host_t *vha,
return;
if (rptid_entry->format == 0) {
+ /* loop */
ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10b7,
"Format 0 : Number of VPs setup %d, number of "
- "VPs acquired %d.\n",
- MSB(le16_to_cpu(rptid_entry->vp_count)),
- LSB(le16_to_cpu(rptid_entry->vp_count)));
+ "VPs acquired %d.\n", rptid_entry->vp_setup,
+ rptid_entry->vp_acquired);
ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10b8,
"Primary port id %02x%02x%02x.\n",
rptid_entry->port_id[2], rptid_entry->port_id[1],
rptid_entry->port_id[0]);
+
+ vha->d_id.b.domain = rptid_entry->port_id[2];
+ vha->d_id.b.area = rptid_entry->port_id[1];
+ vha->d_id.b.al_pa = rptid_entry->port_id[0];
+
+ spin_lock_irqsave(&ha->vport_slock, flags);
+ qlt_update_vp_map(vha, SET_AL_PA);
+ spin_unlock_irqrestore(&ha->vport_slock, flags);
+
} else if (rptid_entry->format == 1) {
- vp_idx = LSB(stat);
+ /* fabric */
ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10b9,
"Format 1: VP[%d] enabled - status %d - with "
- "port id %02x%02x%02x.\n", vp_idx, MSB(stat),
+ "port id %02x%02x%02x.\n", rptid_entry->vp_idx,
+ rptid_entry->vp_status,
rptid_entry->port_id[2], rptid_entry->port_id[1],
rptid_entry->port_id[0]);
/* buffer to buffer credit flag */
- vha->flags.bbcr_enable = (rptid_entry->bbcr & 0xf) != 0;
-
- /* FA-WWN is only for physical port */
- if (!vp_idx) {
- void *wwpn = ha->init_cb->port_name;
+ vha->flags.bbcr_enable = (rptid_entry->u.f1.bbcr & 0xf) != 0;
+
+ if (rptid_entry->vp_idx == 0) {
+ if (rptid_entry->vp_status == VP_STAT_COMPL) {
+ /* FA-WWN is only for physical port */
+ if (qla_ini_mode_enabled(vha) &&
+ ha->flags.fawwpn_enabled &&
+ (rptid_entry->u.f1.flags &
+ VP_FLAGS_NAME_VALID)) {
+ memcpy(vha->port_name,
+ rptid_entry->u.f1.port_name,
+ WWN_SIZE);
+ }
- if (!MSB(stat)) {
- if (rptid_entry->vp_idx_map[1] & BIT_6)
- wwpn = rptid_entry->reserved_4 + 8;
+ vha->d_id.b.domain = rptid_entry->port_id[2];
+ vha->d_id.b.area = rptid_entry->port_id[1];
+ vha->d_id.b.al_pa = rptid_entry->port_id[0];
+ spin_lock_irqsave(&ha->vport_slock, flags);
+ qlt_update_vp_map(vha, SET_AL_PA);
+ spin_unlock_irqrestore(&ha->vport_slock, flags);
}
- memcpy(vha->port_name, wwpn, WWN_SIZE);
+
fc_host_port_name(vha->host) =
wwn_to_u64(vha->port_name);
- ql_dbg(ql_dbg_mbx, vha, 0x1018,
- "FA-WWN portname %016llx (%x)\n",
- fc_host_port_name(vha->host), MSB(stat));
- }
-
- vp = vha;
- if (vp_idx == 0)
- goto reg_needed;
- if (MSB(stat) != 0 && MSB(stat) != 2) {
- ql_dbg(ql_dbg_mbx, vha, 0x10ba,
- "Could not acquire ID for VP[%d].\n", vp_idx);
- return;
- }
+ if (qla_ini_mode_enabled(vha))
+ ql_dbg(ql_dbg_mbx, vha, 0x1018,
+ "FA-WWN portname %016llx (%x)\n",
+ fc_host_port_name(vha->host),
+ rptid_entry->vp_status);
- found = 0;
- spin_lock_irqsave(&ha->vport_slock, flags);
- list_for_each_entry(vp, &ha->vp_list, list) {
- if (vp_idx == vp->vp_idx) {
- found = 1;
- break;
+ set_bit(REGISTER_FC4_NEEDED, &vha->dpc_flags);
+ set_bit(REGISTER_FDMI_NEEDED, &vha->dpc_flags);
+ } else {
+ if (rptid_entry->vp_status != VP_STAT_COMPL &&
+ rptid_entry->vp_status != VP_STAT_ID_CHG) {
+ ql_dbg(ql_dbg_mbx, vha, 0x10ba,
+ "Could not acquire ID for VP[%d].\n",
+ rptid_entry->vp_idx);
+ return;
}
- }
- spin_unlock_irqrestore(&ha->vport_slock, flags);
- if (!found)
- return;
+ found = 0;
+ spin_lock_irqsave(&ha->vport_slock, flags);
+ list_for_each_entry(vp, &ha->vp_list, list) {
+ if (rptid_entry->vp_idx == vp->vp_idx) {
+ found = 1;
+ break;
+ }
+ }
+ spin_unlock_irqrestore(&ha->vport_slock, flags);
- vp->d_id.b.domain = rptid_entry->port_id[2];
- vp->d_id.b.area = rptid_entry->port_id[1];
- vp->d_id.b.al_pa = rptid_entry->port_id[0];
+ if (!found)
+ return;
- /*
- * Cannot configure here as we are still sitting on the
- * response queue. Handle it in dpc context.
- */
- set_bit(VP_IDX_ACQUIRED, &vp->vp_flags);
+ vp->d_id.b.domain = rptid_entry->port_id[2];
+ vp->d_id.b.area = rptid_entry->port_id[1];
+ vp->d_id.b.al_pa = rptid_entry->port_id[0];
+ spin_lock_irqsave(&ha->vport_slock, flags);
+ qlt_update_vp_map(vp, SET_AL_PA);
+ spin_unlock_irqrestore(&ha->vport_slock, flags);
-reg_needed:
- set_bit(REGISTER_FC4_NEEDED, &vp->dpc_flags);
- set_bit(REGISTER_FDMI_NEEDED, &vp->dpc_flags);
+ /*
+ * Cannot configure here as we are still sitting on the
+ * response queue. Handle it in dpc context.
+ */
+ set_bit(VP_IDX_ACQUIRED, &vp->vp_flags);
+ set_bit(REGISTER_FC4_NEEDED, &vp->dpc_flags);
+ set_bit(REGISTER_FDMI_NEEDED, &vp->dpc_flags);
+ }
set_bit(VP_DPC_NEEDED, &vha->dpc_flags);
qla2xxx_wake_dpc(vha);
+ } else if (rptid_entry->format == 2) {
+ ql_dbg(ql_dbg_async, vha, 0xffff,
+ "RIDA: format 2/N2N Primary port id %02x%02x%02x.\n",
+ rptid_entry->port_id[2], rptid_entry->port_id[1],
+ rptid_entry->port_id[0]);
+
+ ql_dbg(ql_dbg_async, vha, 0xffff,
+ "N2N: Remote WWPN %8phC.\n",
+ rptid_entry->u.f2.port_name);
+
+ /* N2N. direct connect */
+ vha->d_id.b.domain = rptid_entry->port_id[2];
+ vha->d_id.b.area = rptid_entry->port_id[1];
+ vha->d_id.b.al_pa = rptid_entry->port_id[0];
+
+ spin_lock_irqsave(&ha->vport_slock, flags);
+ qlt_update_vp_map(vha, SET_AL_PA);
+ spin_unlock_irqrestore(&ha->vport_slock, flags);
}
}
diff --git a/drivers/scsi/qla2xxx/qla_mr.c b/drivers/scsi/qla2xxx/qla_mr.c
index 96c33e292eba..10b742d27e16 100644
--- a/drivers/scsi/qla2xxx/qla_mr.c
+++ b/drivers/scsi/qla2xxx/qla_mr.c
@@ -1789,16 +1789,16 @@ qlafx00_update_host_attr(scsi_qla_host_t *vha, struct port_info_data *pinfo)
static void
qla2x00_fxdisc_iocb_timeout(void *data)
{
- srb_t *sp = (srb_t *)data;
+ srb_t *sp = data;
struct srb_iocb *lio = &sp->u.iocb_cmd;
complete(&lio->u.fxiocb.fxiocb_comp);
}
static void
-qla2x00_fxdisc_sp_done(void *data, void *ptr, int res)
+qla2x00_fxdisc_sp_done(void *ptr, int res)
{
- srb_t *sp = (srb_t *)ptr;
+ srb_t *sp = ptr;
struct srb_iocb *lio = &sp->u.iocb_cmd;
complete(&lio->u.fxiocb.fxiocb_comp);
@@ -1999,7 +1999,7 @@ done_unmap_req:
dma_free_coherent(&ha->pdev->dev, fdisc->u.fxiocb.req_len,
fdisc->u.fxiocb.req_addr, fdisc->u.fxiocb.req_dma_handle);
done_free_sp:
- sp->free(vha, sp);
+ sp->free(sp);
done:
return rval;
}
@@ -2127,7 +2127,7 @@ static inline void
qlafx00_handle_sense(srb_t *sp, uint8_t *sense_data, uint32_t par_sense_len,
uint32_t sense_len, struct rsp_que *rsp, int res)
{
- struct scsi_qla_host *vha = sp->fcport->vha;
+ struct scsi_qla_host *vha = sp->vha;
struct scsi_cmnd *cp = GET_CMD_SP(sp);
uint32_t track_sense_len;
@@ -2162,7 +2162,7 @@ qlafx00_handle_sense(srb_t *sp, uint8_t *sense_data, uint32_t par_sense_len,
if (sense_len) {
ql_dbg(ql_dbg_io + ql_dbg_buffer, vha, 0x3039,
"Check condition Sense data, nexus%ld:%d:%llu cmd=%p.\n",
- sp->fcport->vha->host_no, cp->device->id, cp->device->lun,
+ sp->vha->host_no, cp->device->id, cp->device->lun,
cp);
ql_dump_buffer(ql_dbg_io + ql_dbg_buffer, vha, 0x3049,
cp->sense_buffer, sense_len);
@@ -2181,7 +2181,7 @@ qlafx00_tm_iocb_entry(scsi_qla_host_t *vha, struct req_que *req,
(sstatus & cpu_to_le16((uint16_t)SS_RESPONSE_INFO_LEN_VALID)))
cpstatus = cpu_to_le16((uint16_t)CS_INCOMPLETE);
tmf->u.tmf.comp_status = cpstatus;
- sp->done(vha, sp, 0);
+ sp->done(sp, 0);
}
static void
@@ -2198,7 +2198,7 @@ qlafx00_abort_iocb_entry(scsi_qla_host_t *vha, struct req_que *req,
abt = &sp->u.iocb_cmd;
abt->u.abt.comp_status = pkt->tgt_id_sts;
- sp->done(vha, sp, 0);
+ sp->done(sp, 0);
}
static void
@@ -2264,7 +2264,7 @@ qlafx00_ioctl_iosb_entry(scsi_qla_host_t *vha, struct req_que *req,
bsg_reply->reply_payload_rcv_len =
bsg_job->reply_payload.payload_len;
}
- sp->done(vha, sp, res);
+ sp->done(sp, res);
}
/**
@@ -2537,7 +2537,7 @@ check_scsi_status:
par_sense_len, rsp_info_len);
if (rsp->status_srb == NULL)
- sp->done(ha, sp, res);
+ sp->done(sp, res);
}
/**
@@ -2614,7 +2614,7 @@ qlafx00_status_cont_entry(struct rsp_que *rsp, sts_cont_entry_t *pkt)
/* Place command on done queue. */
if (sense_len == 0) {
rsp->status_srb = NULL;
- sp->done(ha, sp, cp->result);
+ sp->done(sp, cp->result);
}
}
@@ -2695,7 +2695,7 @@ qlafx00_error_entry(scsi_qla_host_t *vha, struct rsp_que *rsp,
sp = qla2x00_get_sp_from_handle(vha, func, req, pkt);
if (sp) {
- sp->done(ha, sp, res);
+ sp->done(sp, res);
return;
}
@@ -2997,7 +2997,7 @@ qlafx00_build_scsi_iocbs(srb_t *sp, struct cmd_type_7_fx00 *cmd_pkt,
cont_a64_entry_t lcont_pkt;
cont_a64_entry_t *cont_pkt;
- vha = sp->fcport->vha;
+ vha = sp->vha;
req = vha->req;
cmd = GET_CMD_SP(sp);
@@ -3081,7 +3081,7 @@ qlafx00_start_scsi(srb_t *sp)
struct req_que *req = NULL;
struct rsp_que *rsp = NULL;
struct scsi_cmnd *cmd = GET_CMD_SP(sp);
- struct scsi_qla_host *vha = sp->fcport->vha;
+ struct scsi_qla_host *vha = sp->vha;
struct qla_hw_data *ha = vha->hw;
struct cmd_type_7_fx00 *cmd_pkt;
struct cmd_type_7_fx00 lcmd_pkt;
@@ -3205,7 +3205,7 @@ void
qlafx00_tm_iocb(srb_t *sp, struct tsk_mgmt_entry_fx00 *ptm_iocb)
{
struct srb_iocb *fxio = &sp->u.iocb_cmd;
- scsi_qla_host_t *vha = sp->fcport->vha;
+ scsi_qla_host_t *vha = sp->vha;
struct req_que *req = vha->req;
struct tsk_mgmt_entry_fx00 tm_iocb;
struct scsi_lun llun;
@@ -3232,7 +3232,7 @@ void
qlafx00_abort_iocb(srb_t *sp, struct abort_iocb_entry_fx00 *pabt_iocb)
{
struct srb_iocb *fxio = &sp->u.iocb_cmd;
- scsi_qla_host_t *vha = sp->fcport->vha;
+ scsi_qla_host_t *vha = sp->vha;
struct req_que *req = vha->req;
struct abort_iocb_entry_fx00 abt_iocb;
@@ -3346,8 +3346,7 @@ qlafx00_fxdisc_iocb(srb_t *sp, struct fxdisc_entry_fx00 *pfxiocb)
REQUEST_ENTRY_SIZE);
cont_pkt =
qlafx00_prep_cont_type1_iocb(
- sp->fcport->vha->req,
- &lcont_pkt);
+ sp->vha->req, &lcont_pkt);
cur_dsd = (__le32 *)
lcont_pkt.dseg_0_address;
avail_dsds = 5;
@@ -3368,7 +3367,7 @@ qlafx00_fxdisc_iocb(srb_t *sp, struct fxdisc_entry_fx00 *pfxiocb)
&lcont_pkt, REQUEST_ENTRY_SIZE);
ql_dump_buffer(
ql_dbg_user + ql_dbg_verbose,
- sp->fcport->vha, 0x3042,
+ sp->vha, 0x3042,
(uint8_t *)&lcont_pkt,
REQUEST_ENTRY_SIZE);
}
@@ -3377,7 +3376,7 @@ qlafx00_fxdisc_iocb(srb_t *sp, struct fxdisc_entry_fx00 *pfxiocb)
memcpy_toio((void __iomem *)cont_pkt,
&lcont_pkt, REQUEST_ENTRY_SIZE);
ql_dump_buffer(ql_dbg_user + ql_dbg_verbose,
- sp->fcport->vha, 0x3043,
+ sp->vha, 0x3043,
(uint8_t *)&lcont_pkt, REQUEST_ENTRY_SIZE);
}
}
@@ -3409,8 +3408,7 @@ qlafx00_fxdisc_iocb(srb_t *sp, struct fxdisc_entry_fx00 *pfxiocb)
REQUEST_ENTRY_SIZE);
cont_pkt =
qlafx00_prep_cont_type1_iocb(
- sp->fcport->vha->req,
- &lcont_pkt);
+ sp->vha->req, &lcont_pkt);
cur_dsd = (__le32 *)
lcont_pkt.dseg_0_address;
avail_dsds = 5;
@@ -3431,7 +3429,7 @@ qlafx00_fxdisc_iocb(srb_t *sp, struct fxdisc_entry_fx00 *pfxiocb)
REQUEST_ENTRY_SIZE);
ql_dump_buffer(
ql_dbg_user + ql_dbg_verbose,
- sp->fcport->vha, 0x3045,
+ sp->vha, 0x3045,
(uint8_t *)&lcont_pkt,
REQUEST_ENTRY_SIZE);
}
@@ -3440,7 +3438,7 @@ qlafx00_fxdisc_iocb(srb_t *sp, struct fxdisc_entry_fx00 *pfxiocb)
memcpy_toio((void __iomem *)cont_pkt,
&lcont_pkt, REQUEST_ENTRY_SIZE);
ql_dump_buffer(ql_dbg_user + ql_dbg_verbose,
- sp->fcport->vha, 0x3046,
+ sp->vha, 0x3046,
(uint8_t *)&lcont_pkt, REQUEST_ENTRY_SIZE);
}
}
@@ -3452,7 +3450,7 @@ qlafx00_fxdisc_iocb(srb_t *sp, struct fxdisc_entry_fx00 *pfxiocb)
}
ql_dump_buffer(ql_dbg_user + ql_dbg_verbose,
- sp->fcport->vha, 0x3047,
+ sp->vha, 0x3047,
(uint8_t *)&fx_iocb, sizeof(struct fxdisc_entry_fx00));
memcpy_toio((void __iomem *)pfxiocb, &fx_iocb,
diff --git a/drivers/scsi/qla2xxx/qla_os.c b/drivers/scsi/qla2xxx/qla_os.c
index d01c90c7dd04..bbf1ad956251 100644
--- a/drivers/scsi/qla2xxx/qla_os.c
+++ b/drivers/scsi/qla2xxx/qla_os.c
@@ -237,6 +237,13 @@ MODULE_PARM_DESC(ql2xfwholdabts,
"0 (Default) Do not set fw option. "
"1 - Set fw option to hold ABTS.");
+int ql2xmvasynctoatio = 1;
+module_param(ql2xmvasynctoatio, int, S_IRUGO|S_IWUSR);
+MODULE_PARM_DESC(ql2xmvasynctoatio,
+ "Move PUREX, ABTS RX and RIDA IOCBs to ATIOQ"
+ "0 (Default). Do not move IOCBs"
+ "1 - Move IOCBs.");
+
/*
* SCSI host template entry points
*/
@@ -607,11 +614,11 @@ qla24xx_fw_version_str(struct scsi_qla_host *vha, char *str, size_t size)
}
void
-qla2x00_sp_free_dma(void *vha, void *ptr)
+qla2x00_sp_free_dma(void *ptr)
{
- srb_t *sp = (srb_t *)ptr;
+ srb_t *sp = ptr;
+ struct qla_hw_data *ha = sp->vha->hw;
struct scsi_cmnd *cmd = GET_CMD_SP(sp);
- struct qla_hw_data *ha = sp->fcport->vha->hw;
void *ctx = GET_CMD_CTX_SP(sp);
if (sp->flags & SRB_DMA_VALID) {
@@ -650,20 +657,19 @@ qla2x00_sp_free_dma(void *vha, void *ptr)
}
CMD_SP(cmd) = NULL;
- qla2x00_rel_sp(sp->fcport->vha, sp);
+ qla2x00_rel_sp(sp);
}
void
-qla2x00_sp_compl(void *data, void *ptr, int res)
+qla2x00_sp_compl(void *ptr, int res)
{
- struct qla_hw_data *ha = (struct qla_hw_data *)data;
- srb_t *sp = (srb_t *)ptr;
+ srb_t *sp = ptr;
struct scsi_cmnd *cmd = GET_CMD_SP(sp);
cmd->result = res;
if (atomic_read(&sp->ref_count) == 0) {
- ql_dbg(ql_dbg_io, sp->fcport->vha, 0x3015,
+ ql_dbg(ql_dbg_io, sp->vha, 0x3015,
"SP reference-count to ZERO -- sp=%p cmd=%p.\n",
sp, GET_CMD_SP(sp));
if (ql2xextended_error_logging & ql_dbg_io)
@@ -673,12 +679,12 @@ qla2x00_sp_compl(void *data, void *ptr, int res)
if (!atomic_dec_and_test(&sp->ref_count))
return;
- qla2x00_sp_free_dma(ha, sp);
+ qla2x00_sp_free_dma(sp);
cmd->scsi_done(cmd);
}
void
-qla2xxx_qpair_sp_free_dma(void *vha, void *ptr)
+qla2xxx_qpair_sp_free_dma(void *ptr)
{
srb_t *sp = (srb_t *)ptr;
struct scsi_cmnd *cmd = GET_CMD_SP(sp);
@@ -724,9 +730,9 @@ qla2xxx_qpair_sp_free_dma(void *vha, void *ptr)
}
void
-qla2xxx_qpair_sp_compl(void *data, void *ptr, int res)
+qla2xxx_qpair_sp_compl(void *ptr, int res)
{
- srb_t *sp = (srb_t *)ptr;
+ srb_t *sp = ptr;
struct scsi_cmnd *cmd = GET_CMD_SP(sp);
cmd->result = res;
@@ -742,7 +748,7 @@ qla2xxx_qpair_sp_compl(void *data, void *ptr, int res)
if (!atomic_dec_and_test(&sp->ref_count))
return;
- qla2xxx_qpair_sp_free_dma(sp->fcport->vha, sp);
+ qla2xxx_qpair_sp_free_dma(sp);
cmd->scsi_done(cmd);
}
@@ -863,7 +869,7 @@ qla2xxx_queuecommand(struct Scsi_Host *host, struct scsi_cmnd *cmd)
return 0;
qc24_host_busy_free_sp:
- qla2x00_sp_free_dma(ha, sp);
+ qla2x00_sp_free_dma(sp);
qc24_host_busy:
return SCSI_MLQUEUE_HOST_BUSY;
@@ -952,7 +958,7 @@ qla2xxx_mqueuecommand(struct Scsi_Host *host, struct scsi_cmnd *cmd,
return 0;
qc24_host_busy_free_sp:
- qla2xxx_qpair_sp_free_dma(vha, sp);
+ qla2xxx_qpair_sp_free_dma(sp);
qc24_host_busy:
return SCSI_MLQUEUE_HOST_BUSY;
@@ -1044,6 +1050,34 @@ qla2x00_wait_for_hba_online(scsi_qla_host_t *vha)
return (return_status);
}
+static inline int test_fcport_count(scsi_qla_host_t *vha)
+{
+ struct qla_hw_data *ha = vha->hw;
+ unsigned long flags;
+ int res;
+
+ spin_lock_irqsave(&ha->tgt.sess_lock, flags);
+ ql_dbg(ql_dbg_init, vha, 0xffff,
+ "tgt %p, fcport_count=%d\n",
+ vha, vha->fcport_count);
+ res = (vha->fcport_count == 0);
+ spin_unlock_irqrestore(&ha->tgt.sess_lock, flags);
+
+ return res;
+}
+
+/*
+ * qla2x00_wait_for_sess_deletion can only be called from remove_one.
+ * it has dependency on UNLOADING flag to stop device discovery
+ */
+static void
+qla2x00_wait_for_sess_deletion(scsi_qla_host_t *vha)
+{
+ qla2x00_mark_all_devices_lost(vha, 0);
+
+ wait_event(vha->fcport_waitQ, test_fcport_count(vha));
+}
+
/*
* qla2x00_wait_for_hba_ready
* Wait till the HBA is ready before doing driver unload
@@ -1204,7 +1238,7 @@ qla2xxx_eh_abort(struct scsi_cmnd *cmd)
}
spin_lock_irqsave(&ha->hardware_lock, flags);
- sp->done(ha, sp, 0);
+ sp->done(sp, 0);
spin_unlock_irqrestore(&ha->hardware_lock, flags);
/* Did the command return during mailbox execution? */
@@ -1249,7 +1283,7 @@ qla2x00_eh_wait_for_pending_commands(scsi_qla_host_t *vha, unsigned int t,
continue;
if (sp->type != SRB_SCSI_CMD)
continue;
- if (vha->vp_idx != sp->fcport->vha->vp_idx)
+ if (vha->vp_idx != sp->vha->vp_idx)
continue;
match = 0;
cmd = GET_CMD_SP(sp);
@@ -1629,7 +1663,7 @@ qla2x00_abort_all_cmds(scsi_qla_host_t *vha, int res)
spin_lock_irqsave(&ha->hardware_lock, flags);
}
req->outstanding_cmds[cnt] = NULL;
- sp->done(vha, sp, res);
+ sp->done(sp, res);
}
}
}
@@ -3124,7 +3158,8 @@ skip_dpc:
ql_dbg(ql_dbg_init, base_vha, 0x00f2,
"Init done and hba is online.\n");
- if (qla_ini_mode_enabled(base_vha))
+ if (qla_ini_mode_enabled(base_vha) ||
+ qla_dual_mode_enabled(base_vha))
scsi_scan_host(host);
else
ql_dbg(ql_dbg_init, base_vha, 0x0122,
@@ -3373,21 +3408,26 @@ qla2x00_remove_one(struct pci_dev *pdev)
* resources.
*/
if (!atomic_read(&pdev->enable_cnt)) {
+ dma_free_coherent(&ha->pdev->dev, base_vha->gnl.size,
+ base_vha->gnl.l, base_vha->gnl.ldma);
+
scsi_host_put(base_vha->host);
kfree(ha);
pci_set_drvdata(pdev, NULL);
return;
}
-
qla2x00_wait_for_hba_ready(base_vha);
- /* if UNLOAD flag is already set, then continue unload,
+ /*
+ * if UNLOAD flag is already set, then continue unload,
* where it was set first.
*/
if (test_bit(UNLOADING, &base_vha->dpc_flags))
return;
set_bit(UNLOADING, &base_vha->dpc_flags);
+ dma_free_coherent(&ha->pdev->dev,
+ base_vha->gnl.size, base_vha->gnl.l, base_vha->gnl.ldma);
if (IS_QLAFX00(ha))
qlafx00_driver_shutdown(base_vha, 20);
@@ -3536,10 +3576,14 @@ qla2x00_schedule_rport_del(struct scsi_qla_host *vha, fc_port_t *fcport,
qla2xxx_wake_dpc(base_vha);
} else {
int now;
- if (rport)
+ if (rport) {
+ ql_dbg(ql_dbg_disc, fcport->vha, 0xffff,
+ "%s %8phN. rport %p roles %x \n",
+ __func__, fcport->port_name, rport,
+ rport->roles);
fc_remote_port_delete(rport);
+ }
qlt_do_generation_tick(vha, &now);
- qlt_fc_port_deleted(vha, fcport, now);
}
}
@@ -3582,7 +3626,7 @@ void qla2x00_mark_device_lost(scsi_qla_host_t *vha, fc_port_t *fcport,
fcport->login_retry = vha->hw->login_retry_count;
ql_dbg(ql_dbg_disc, vha, 0x2067,
- "Port login retry %8phN, id = 0x%04x retry cnt=%d.\n",
+ "Port login retry %8phN, lid 0x%04x retry cnt=%d.\n",
fcport->port_name, fcport->loop_id, fcport->login_retry);
}
}
@@ -3605,7 +3649,13 @@ qla2x00_mark_all_devices_lost(scsi_qla_host_t *vha, int defer)
{
fc_port_t *fcport;
+ ql_dbg(ql_dbg_disc, vha, 0xffff,
+ "Mark all dev lost\n");
+
list_for_each_entry(fcport, &vha->vp_fcports, list) {
+ fcport->scan_state = 0;
+ qlt_schedule_sess_for_deletion_lock(fcport);
+
if (vha->vp_idx != 0 && vha->vp_idx != fcport->vha->vp_idx)
continue;
@@ -4195,10 +4245,10 @@ struct scsi_qla_host *qla2x00_create_host(struct scsi_host_template *sht,
struct scsi_qla_host *vha = NULL;
host = scsi_host_alloc(sht, sizeof(scsi_qla_host_t));
- if (host == NULL) {
+ if (!host) {
ql_log_pci(ql_log_fatal, ha->pdev, 0x0107,
"Failed to allocate host from the scsi layer, aborting.\n");
- goto fail;
+ return NULL;
}
/* Clear our data area */
@@ -4217,9 +4267,22 @@ struct scsi_qla_host *qla2x00_create_host(struct scsi_host_template *sht,
INIT_LIST_HEAD(&vha->logo_list);
INIT_LIST_HEAD(&vha->plogi_ack_list);
INIT_LIST_HEAD(&vha->qp_list);
+ INIT_LIST_HEAD(&vha->gnl.fcports);
spin_lock_init(&vha->work_lock);
spin_lock_init(&vha->cmd_list_lock);
+ init_waitqueue_head(&vha->fcport_waitQ);
+
+ vha->gnl.size = sizeof(struct get_name_list_extended) *
+ (ha->max_loop_id + 1);
+ vha->gnl.l = dma_alloc_coherent(&ha->pdev->dev,
+ vha->gnl.size, &vha->gnl.ldma, GFP_KERNEL);
+ if (!vha->gnl.l) {
+ ql_log(ql_log_fatal, vha, 0xffff,
+ "Alloc failed for name list.\n");
+ scsi_remove_host(vha->host);
+ return NULL;
+ }
sprintf(vha->host_str, "%s_%ld", QLA2XXX_DRIVER_NAME, vha->host_no);
ql_dbg(ql_dbg_init, vha, 0x0041,
@@ -4228,12 +4291,9 @@ struct scsi_qla_host *qla2x00_create_host(struct scsi_host_template *sht,
dev_name(&(ha->pdev->dev)));
return vha;
-
-fail:
- return vha;
}
-static struct qla_work_evt *
+struct qla_work_evt *
qla2x00_alloc_work(struct scsi_qla_host *vha, enum qla_work_type type)
{
struct qla_work_evt *e;
@@ -4255,7 +4315,7 @@ qla2x00_alloc_work(struct scsi_qla_host *vha, enum qla_work_type type)
return e;
}
-static int
+int
qla2x00_post_work(struct scsi_qla_host *vha, struct qla_work_evt *e)
{
unsigned long flags;
@@ -4316,7 +4376,6 @@ int qla2x00_post_async_##name##_work( \
}
qla2x00_post_async_work(login, QLA_EVT_ASYNC_LOGIN);
-qla2x00_post_async_work(login_done, QLA_EVT_ASYNC_LOGIN_DONE);
qla2x00_post_async_work(logout, QLA_EVT_ASYNC_LOGOUT);
qla2x00_post_async_work(logout_done, QLA_EVT_ASYNC_LOGOUT_DONE);
qla2x00_post_async_work(adisc, QLA_EVT_ASYNC_ADISC);
@@ -4369,6 +4428,67 @@ qlafx00_post_aenfx_work(struct scsi_qla_host *vha, uint32_t evtcode,
return qla2x00_post_work(vha, e);
}
+int qla24xx_post_upd_fcport_work(struct scsi_qla_host *vha, fc_port_t *fcport)
+{
+ struct qla_work_evt *e;
+
+ e = qla2x00_alloc_work(vha, QLA_EVT_UPD_FCPORT);
+ if (!e)
+ return QLA_FUNCTION_FAILED;
+
+ e->u.fcport.fcport = fcport;
+ return qla2x00_post_work(vha, e);
+}
+
+static
+void qla24xx_create_new_sess(struct scsi_qla_host *vha, struct qla_work_evt *e)
+{
+ unsigned long flags;
+ fc_port_t *fcport = NULL;
+ struct qlt_plogi_ack_t *pla =
+ (struct qlt_plogi_ack_t *)e->u.new_sess.pla;
+
+ spin_lock_irqsave(&vha->hw->tgt.sess_lock, flags);
+ fcport = qla2x00_find_fcport_by_wwpn(vha, e->u.new_sess.port_name, 1);
+ if (fcport) {
+ fcport->d_id = e->u.new_sess.id;
+ if (pla) {
+ fcport->fw_login_state = DSC_LS_PLOGI_PEND;
+ qlt_plogi_ack_link(vha, pla, fcport, QLT_PLOGI_LINK_SAME_WWN);
+ /* we took an extra ref_count to prevent PLOGI ACK when
+ * fcport/sess has not been created.
+ */
+ pla->ref_count--;
+ }
+ } else {
+ fcport = qla2x00_alloc_fcport(vha, GFP_KERNEL);
+ if (fcport) {
+ fcport->d_id = e->u.new_sess.id;
+ fcport->scan_state = QLA_FCPORT_FOUND;
+ fcport->flags |= FCF_FABRIC_DEVICE;
+ fcport->fw_login_state = DSC_LS_PLOGI_PEND;
+
+ memcpy(fcport->port_name, e->u.new_sess.port_name,
+ WWN_SIZE);
+ list_add_tail(&fcport->list, &vha->vp_fcports);
+
+ if (pla) {
+ qlt_plogi_ack_link(vha, pla, fcport,
+ QLT_PLOGI_LINK_SAME_WWN);
+ pla->ref_count--;
+ }
+ }
+ }
+ spin_unlock_irqrestore(&vha->hw->tgt.sess_lock, flags);
+
+ if (fcport) {
+ if (pla)
+ qlt_plogi_ack_unref(vha, pla);
+ else
+ qla24xx_async_gnl(vha, fcport);
+ }
+}
+
void
qla2x00_do_work(struct scsi_qla_host *vha)
{
@@ -4395,10 +4515,6 @@ qla2x00_do_work(struct scsi_qla_host *vha)
qla2x00_async_login(vha, e->u.logio.fcport,
e->u.logio.data);
break;
- case QLA_EVT_ASYNC_LOGIN_DONE:
- qla2x00_async_login_done(vha, e->u.logio.fcport,
- e->u.logio.data);
- break;
case QLA_EVT_ASYNC_LOGOUT:
qla2x00_async_logout(vha, e->u.logio.fcport);
break;
@@ -4420,6 +4536,34 @@ qla2x00_do_work(struct scsi_qla_host *vha)
case QLA_EVT_AENFX:
qlafx00_process_aen(vha, e);
break;
+ case QLA_EVT_GIDPN:
+ qla24xx_async_gidpn(vha, e->u.fcport.fcport);
+ break;
+ case QLA_EVT_GPNID:
+ qla24xx_async_gpnid(vha, &e->u.gpnid.id);
+ break;
+ case QLA_EVT_GPNID_DONE:
+ qla24xx_async_gpnid_done(vha, e->u.iosb.sp);
+ break;
+ case QLA_EVT_NEW_SESS:
+ qla24xx_create_new_sess(vha, e);
+ break;
+ case QLA_EVT_GPDB:
+ qla24xx_async_gpdb(vha, e->u.fcport.fcport,
+ e->u.fcport.opt);
+ break;
+ case QLA_EVT_GPSC:
+ qla24xx_async_gpsc(vha, e->u.fcport.fcport);
+ break;
+ case QLA_EVT_UPD_FCPORT:
+ qla2x00_update_fcport(vha, e->u.fcport.fcport);
+ break;
+ case QLA_EVT_GNL:
+ qla24xx_async_gnl(vha, e->u.fcport.fcport);
+ break;
+ case QLA_EVT_NACK:
+ qla24xx_do_nack_work(vha, e);
+ break;
}
if (e->flags & QLA_EVT_FLAG_FREE)
kfree(e);
@@ -4436,9 +4580,7 @@ void qla2x00_relogin(struct scsi_qla_host *vha)
{
fc_port_t *fcport;
int status;
- uint16_t next_loopid = 0;
- struct qla_hw_data *ha = vha->hw;
- uint16_t data[2];
+ struct event_arg ea;
list_for_each_entry(fcport, &vha->vp_fcports, list) {
/*
@@ -4449,77 +4591,38 @@ void qla2x00_relogin(struct scsi_qla_host *vha)
fcport->login_retry && !(fcport->flags & FCF_ASYNC_SENT)) {
fcport->login_retry--;
if (fcport->flags & FCF_FABRIC_DEVICE) {
- if (fcport->flags & FCF_FCP2_DEVICE)
- ha->isp_ops->fabric_logout(vha,
- fcport->loop_id,
- fcport->d_id.b.domain,
- fcport->d_id.b.area,
- fcport->d_id.b.al_pa);
-
- if (fcport->loop_id == FC_NO_LOOP_ID) {
- fcport->loop_id = next_loopid =
- ha->min_external_loopid;
- status = qla2x00_find_new_loop_id(
- vha, fcport);
- if (status != QLA_SUCCESS) {
- /* Ran out of IDs to use */
- break;
- }
- }
-
- if (IS_ALOGIO_CAPABLE(ha)) {
- fcport->flags |= FCF_ASYNC_SENT;
- data[0] = 0;
- data[1] = QLA_LOGIO_LOGIN_RETRIED;
- status = qla2x00_post_async_login_work(
- vha, fcport, data);
- if (status == QLA_SUCCESS)
- continue;
- /* Attempt a retry. */
- status = 1;
- } else {
- status = qla2x00_fabric_login(vha,
- fcport, &next_loopid);
- if (status == QLA_SUCCESS) {
- int status2;
- uint8_t opts;
-
- opts = 0;
- if (fcport->flags &
- FCF_FCP2_DEVICE)
- opts |= BIT_1;
- status2 =
- qla2x00_get_port_database(
- vha, fcport, opts);
- if (status2 != QLA_SUCCESS)
- status = 1;
- }
- }
- } else
+ ql_dbg(ql_dbg_disc, fcport->vha, 0xffff,
+ "%s %8phC DS %d LS %d\n", __func__,
+ fcport->port_name, fcport->disc_state,
+ fcport->fw_login_state);
+ memset(&ea, 0, sizeof(ea));
+ ea.event = FCME_RELOGIN;
+ ea.fcport = fcport;
+ qla2x00_fcport_event_handler(vha, &ea);
+ } else {
status = qla2x00_local_device_login(vha,
fcport);
+ if (status == QLA_SUCCESS) {
+ fcport->old_loop_id = fcport->loop_id;
+ ql_dbg(ql_dbg_disc, vha, 0x2003,
+ "Port login OK: logged in ID 0x%x.\n",
+ fcport->loop_id);
+ qla2x00_update_fcport(vha, fcport);
+ } else if (status == 1) {
+ set_bit(RELOGIN_NEEDED, &vha->dpc_flags);
+ /* retry the login again */
+ ql_dbg(ql_dbg_disc, vha, 0x2007,
+ "Retrying %d login again loop_id 0x%x.\n",
+ fcport->login_retry,
+ fcport->loop_id);
+ } else {
+ fcport->login_retry = 0;
+ }
- if (status == QLA_SUCCESS) {
- fcport->old_loop_id = fcport->loop_id;
-
- ql_dbg(ql_dbg_disc, vha, 0x2003,
- "Port login OK: logged in ID 0x%x.\n",
- fcport->loop_id);
-
- qla2x00_update_fcport(vha, fcport);
-
- } else if (status == 1) {
- set_bit(RELOGIN_NEEDED, &vha->dpc_flags);
- /* retry the login again */
- ql_dbg(ql_dbg_disc, vha, 0x2007,
- "Retrying %d login again loop_id 0x%x.\n",
- fcport->login_retry, fcport->loop_id);
- } else {
- fcport->login_retry = 0;
+ if (fcport->login_retry == 0 &&
+ status != QLA_SUCCESS)
+ qla2x00_clear_loop_id(fcport);
}
-
- if (fcport->login_retry == 0 && status != QLA_SUCCESS)
- qla2x00_clear_loop_id(fcport);
}
if (test_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags))
break;
@@ -5183,7 +5286,8 @@ qla2x00_disable_board_on_pci_error(struct work_struct *work)
struct pci_dev *pdev = ha->pdev;
scsi_qla_host_t *base_vha = pci_get_drvdata(ha->pdev);
- /* if UNLOAD flag is already set, then continue unload,
+ /*
+ * if UNLOAD flag is already set, then continue unload,
* where it was set first.
*/
if (test_bit(UNLOADING, &base_vha->dpc_flags))
@@ -5192,6 +5296,8 @@ qla2x00_disable_board_on_pci_error(struct work_struct *work)
ql_log(ql_log_warn, base_vha, 0x015b,
"Disabling adapter.\n");
+ qla2x00_wait_for_sess_deletion(base_vha);
+
set_bit(UNLOADING, &base_vha->dpc_flags);
qla2x00_delete_all_vps(ha, base_vha);
@@ -5410,16 +5516,6 @@ qla2x00_do_dpc(void *data)
qla2x00_update_fcports(base_vha);
}
- if (test_bit(SCR_PENDING, &base_vha->dpc_flags)) {
- int ret;
- ret = qla2x00_send_change_request(base_vha, 0x3, 0);
- if (ret != QLA_SUCCESS)
- ql_log(ql_log_warn, base_vha, 0x121,
- "Failed to enable receiving of RSCN "
- "requests: 0x%x.\n", ret);
- clear_bit(SCR_PENDING, &base_vha->dpc_flags);
- }
-
if (IS_QLAFX00(ha))
goto loop_resync_check;
diff --git a/drivers/scsi/qla2xxx/qla_target.c b/drivers/scsi/qla2xxx/qla_target.c
index e4fda84b959e..45f5077684f0 100644
--- a/drivers/scsi/qla2xxx/qla_target.c
+++ b/drivers/scsi/qla2xxx/qla_target.c
@@ -55,8 +55,17 @@ MODULE_PARM_DESC(qlini_mode,
"disabled on enabling target mode and then on disabling target mode "
"enabled back; "
"\"disabled\" - initiator mode will never be enabled; "
+ "\"dual\" - Initiator Modes will be enabled. Target Mode can be activated "
+ "when ready "
"\"enabled\" (default) - initiator mode will always stay enabled.");
+static int ql_dm_tgt_ex_pct = 50;
+module_param(ql_dm_tgt_ex_pct, int, S_IRUGO|S_IWUSR);
+MODULE_PARM_DESC(ql_dm_tgt_ex_pct,
+ "For Dual Mode (qlini_mode=dual), this parameter determines "
+ "the percentage of exchanges/cmds FW will allocate resources "
+ "for Target mode.");
+
int ql2x_ini_mode = QLA2XXX_INI_MODE_EXCLUSIVE;
static int temp_sam_status = SAM_STAT_BUSY;
@@ -102,12 +111,10 @@ enum fcp_resp_rsp_codes {
static void qlt_24xx_atio_pkt(struct scsi_qla_host *ha,
struct atio_from_isp *pkt, uint8_t);
static void qlt_response_pkt(struct scsi_qla_host *ha, response_t *pkt);
-static int qlt_issue_task_mgmt(struct qla_tgt_sess *sess, uint32_t lun,
+static int qlt_issue_task_mgmt(struct fc_port *sess, u64 lun,
int fn, void *iocb, int flags);
static void qlt_send_term_exchange(struct scsi_qla_host *ha, struct qla_tgt_cmd
*cmd, struct atio_from_isp *atio, int ha_locked, int ul_abort);
-static void qlt_reject_free_srr_imm(struct scsi_qla_host *ha,
- struct qla_tgt_srr_imm *imm, int ha_lock);
static void qlt_abort_cmd_on_host_reset(struct scsi_qla_host *vha,
struct qla_tgt_cmd *cmd);
static void qlt_alloc_qfull_cmd(struct scsi_qla_host *vha,
@@ -120,6 +127,9 @@ static void qlt_send_notify_ack(struct scsi_qla_host *vha,
uint16_t srr_flags, uint16_t srr_reject_code, uint8_t srr_explan);
static void qlt_send_term_imm_notif(struct scsi_qla_host *vha,
struct imm_ntfy_from_isp *imm, int ha_locked);
+static struct fc_port *qlt_create_sess(struct scsi_qla_host *vha,
+ fc_port_t *fcport, bool local);
+void qlt_unreg_sess(struct fc_port *sess);
/*
* Global Variables
*/
@@ -140,21 +150,6 @@ void qlt_do_generation_tick(struct scsi_qla_host *vha, int *dest)
wmb();
}
-/* ha->hardware_lock supposed to be held on entry (to protect tgt->sess_list) */
-static struct qla_tgt_sess *qlt_find_sess_by_port_name(
- struct qla_tgt *tgt,
- const uint8_t *port_name)
-{
- struct qla_tgt_sess *sess;
-
- list_for_each_entry(sess, &tgt->sess_list, sess_list_entry) {
- if (!memcmp(sess->port_name, port_name, WWN_SIZE))
- return sess;
- }
-
- return NULL;
-}
-
/* Might release hw lock, then reaquire!! */
static inline int qlt_issue_marker(struct scsi_qla_host *vha, int vha_locked)
{
@@ -229,6 +224,105 @@ static inline void qlt_decr_num_pend_cmds(struct scsi_qla_host *vha)
spin_unlock_irqrestore(&vha->hw->tgt.q_full_lock, flags);
}
+
+static void qlt_queue_unknown_atio(scsi_qla_host_t *vha,
+ struct atio_from_isp *atio, uint8_t ha_locked)
+{
+ struct qla_tgt_sess_op *u;
+ struct qla_tgt *tgt = vha->vha_tgt.qla_tgt;
+ unsigned long flags;
+
+ if (tgt->tgt_stop) {
+ ql_dbg(ql_dbg_async, vha, 0xffff,
+ "qla_target(%d): dropping unknown ATIO_TYPE7, "
+ "because tgt is being stopped", vha->vp_idx);
+ goto out_term;
+ }
+
+ u = kzalloc(sizeof(*u), GFP_ATOMIC);
+ if (u == NULL) {
+ ql_dbg(ql_dbg_async, vha, 0xffff,
+ "Alloc of struct unknown_atio (size %zd) failed", sizeof(*u));
+ /* It should be harmless and on the next retry should work well */
+ goto out_term;
+ }
+
+ u->vha = vha;
+ memcpy(&u->atio, atio, sizeof(*atio));
+ INIT_LIST_HEAD(&u->cmd_list);
+
+ spin_lock_irqsave(&vha->cmd_list_lock, flags);
+ list_add_tail(&u->cmd_list, &vha->unknown_atio_list);
+ spin_unlock_irqrestore(&vha->cmd_list_lock, flags);
+
+ schedule_delayed_work(&vha->unknown_atio_work, 1);
+
+out:
+ return;
+
+out_term:
+ qlt_send_term_exchange(vha, NULL, atio, ha_locked, 0);
+ goto out;
+}
+
+static void qlt_try_to_dequeue_unknown_atios(struct scsi_qla_host *vha,
+ uint8_t ha_locked)
+{
+ struct qla_tgt_sess_op *u, *t;
+ scsi_qla_host_t *host;
+ struct qla_tgt *tgt = vha->vha_tgt.qla_tgt;
+ unsigned long flags;
+ uint8_t queued = 0;
+
+ list_for_each_entry_safe(u, t, &vha->unknown_atio_list, cmd_list) {
+ if (u->aborted) {
+ ql_dbg(ql_dbg_async, vha, 0xffff,
+ "Freeing unknown %s %p, because of Abort",
+ "ATIO_TYPE7", u);
+ qlt_send_term_exchange(vha, NULL, &u->atio,
+ ha_locked, 0);
+ goto abort;
+ }
+
+ host = qlt_find_host_by_d_id(vha, u->atio.u.isp24.fcp_hdr.d_id);
+ if (host != NULL) {
+ ql_dbg(ql_dbg_async, vha, 0xffff,
+ "Requeuing unknown ATIO_TYPE7 %p", u);
+ qlt_24xx_atio_pkt(host, &u->atio, ha_locked);
+ } else if (tgt->tgt_stop) {
+ ql_dbg(ql_dbg_async, vha, 0xffff,
+ "Freeing unknown %s %p, because tgt is being stopped",
+ "ATIO_TYPE7", u);
+ qlt_send_term_exchange(vha, NULL, &u->atio,
+ ha_locked, 0);
+ } else {
+ ql_dbg(ql_dbg_async, vha, 0xffff,
+ "u %p, vha %p, host %p, sched again..", u,
+ vha, host);
+ if (!queued) {
+ queued = 1;
+ schedule_delayed_work(&vha->unknown_atio_work,
+ 1);
+ }
+ continue;
+ }
+
+abort:
+ spin_lock_irqsave(&vha->cmd_list_lock, flags);
+ list_del(&u->cmd_list);
+ spin_unlock_irqrestore(&vha->cmd_list_lock, flags);
+ kfree(u);
+ }
+}
+
+void qlt_unknown_atio_work_fn(struct work_struct *work)
+{
+ struct scsi_qla_host *vha = container_of(to_delayed_work(work),
+ struct scsi_qla_host, unknown_atio_work);
+
+ qlt_try_to_dequeue_unknown_atios(vha, 0);
+}
+
static bool qlt_24xx_atio_pkt_all_vps(struct scsi_qla_host *vha,
struct atio_from_isp *atio, uint8_t ha_locked)
{
@@ -249,8 +343,14 @@ static bool qlt_24xx_atio_pkt_all_vps(struct scsi_qla_host *vha,
atio->u.isp24.fcp_hdr.d_id[0],
atio->u.isp24.fcp_hdr.d_id[1],
atio->u.isp24.fcp_hdr.d_id[2]);
+
+
+ qlt_queue_unknown_atio(vha, atio, ha_locked);
break;
}
+ if (unlikely(!list_empty(&vha->unknown_atio_list)))
+ qlt_try_to_dequeue_unknown_atios(vha, ha_locked);
+
qlt_24xx_atio_pkt(host, atio, ha_locked);
break;
}
@@ -278,6 +378,31 @@ static bool qlt_24xx_atio_pkt_all_vps(struct scsi_qla_host *vha,
break;
}
+ case VP_RPT_ID_IOCB_TYPE:
+ qla24xx_report_id_acquisition(vha,
+ (struct vp_rpt_id_entry_24xx *)atio);
+ break;
+
+ case ABTS_RECV_24XX:
+ {
+ struct abts_recv_from_24xx *entry =
+ (struct abts_recv_from_24xx *)atio;
+ struct scsi_qla_host *host = qlt_find_host_by_vp_idx(vha,
+ entry->vp_index);
+ if (unlikely(!host)) {
+ ql_dbg(ql_dbg_tgt, vha, 0xffff,
+ "qla_target(%d): Response pkt (ABTS_RECV_24XX) "
+ "received, with unknown vp_index %d\n",
+ vha->vp_idx, entry->vp_index);
+ break;
+ }
+ qlt_response_pkt(host, (response_t *)atio);
+ break;
+
+ }
+
+ /* case PUREX_IOCB_TYPE: ql2xmvasynctoatio */
+
default:
ql_dbg(ql_dbg_tgt, vha, 0xe040,
"qla_target(%d): Received unknown ATIO atio "
@@ -395,22 +520,263 @@ void qlt_response_pkt_all_vps(struct scsi_qla_host *vha, response_t *pkt)
/*
* All qlt_plogi_ack_t operations are protected by hardware_lock
*/
+static int qla24xx_post_nack_work(struct scsi_qla_host *vha, fc_port_t *fcport,
+ struct imm_ntfy_from_isp *ntfy, int type)
+{
+ struct qla_work_evt *e;
+ e = qla2x00_alloc_work(vha, QLA_EVT_NACK);
+ if (!e)
+ return QLA_FUNCTION_FAILED;
+
+ e->u.nack.fcport = fcport;
+ e->u.nack.type = type;
+ memcpy(e->u.nack.iocb, ntfy, sizeof(struct imm_ntfy_from_isp));
+ return qla2x00_post_work(vha, e);
+}
+
+static
+void qla2x00_async_nack_sp_done(void *s, int res)
+{
+ struct srb *sp = (struct srb *)s;
+ struct scsi_qla_host *vha = sp->vha;
+ unsigned long flags;
+
+ ql_dbg(ql_dbg_disc, vha, 0xffff,
+ "Async done-%s res %x %8phC type %d\n",
+ sp->name, res, sp->fcport->port_name, sp->type);
+
+ spin_lock_irqsave(&vha->hw->tgt.sess_lock, flags);
+ sp->fcport->flags &= ~FCF_ASYNC_SENT;
+ sp->fcport->chip_reset = vha->hw->chip_reset;
+
+ switch (sp->type) {
+ case SRB_NACK_PLOGI:
+ sp->fcport->login_gen++;
+ sp->fcport->fw_login_state = DSC_LS_PLOGI_COMP;
+ sp->fcport->logout_on_delete = 1;
+ break;
+
+ case SRB_NACK_PRLI:
+ sp->fcport->fw_login_state = DSC_LS_PRLI_COMP;
+ sp->fcport->deleted = 0;
+
+ if (!sp->fcport->login_succ &&
+ !IS_SW_RESV_ADDR(sp->fcport->d_id)) {
+ sp->fcport->login_succ = 1;
+
+ vha->fcport_count++;
+
+ if (!IS_IIDMA_CAPABLE(vha->hw) ||
+ !vha->hw->flags.gpsc_supported) {
+ ql_dbg(ql_dbg_disc, vha, 0xffff,
+ "%s %d %8phC post upd_fcport fcp_cnt %d\n",
+ __func__, __LINE__,
+ sp->fcport->port_name,
+ vha->fcport_count);
+
+ qla24xx_post_upd_fcport_work(vha, sp->fcport);
+ } else {
+ ql_dbg(ql_dbg_disc, vha, 0xffff,
+ "%s %d %8phC post gpsc fcp_cnt %d\n",
+ __func__, __LINE__,
+ sp->fcport->port_name,
+ vha->fcport_count);
+
+ qla24xx_post_gpsc_work(vha, sp->fcport);
+ }
+ }
+ break;
+
+ case SRB_NACK_LOGO:
+ sp->fcport->login_gen++;
+ sp->fcport->fw_login_state = DSC_LS_PORT_UNAVAIL;
+ qlt_logo_completion_handler(sp->fcport, MBS_COMMAND_COMPLETE);
+ break;
+ }
+ spin_unlock_irqrestore(&vha->hw->tgt.sess_lock, flags);
+
+ sp->free(sp);
+}
+
+int qla24xx_async_notify_ack(scsi_qla_host_t *vha, fc_port_t *fcport,
+ struct imm_ntfy_from_isp *ntfy, int type)
+{
+ int rval = QLA_FUNCTION_FAILED;
+ srb_t *sp;
+ char *c = NULL;
+
+ fcport->flags |= FCF_ASYNC_SENT;
+ switch (type) {
+ case SRB_NACK_PLOGI:
+ fcport->fw_login_state = DSC_LS_PLOGI_PEND;
+ c = "PLOGI";
+ break;
+ case SRB_NACK_PRLI:
+ fcport->fw_login_state = DSC_LS_PRLI_PEND;
+ c = "PRLI";
+ break;
+ case SRB_NACK_LOGO:
+ fcport->fw_login_state = DSC_LS_LOGO_PEND;
+ c = "LOGO";
+ break;
+ }
+
+ sp = qla2x00_get_sp(vha, fcport, GFP_ATOMIC);
+ if (!sp)
+ goto done;
+
+ sp->type = type;
+ sp->name = "nack";
+
+ qla2x00_init_timer(sp, qla2x00_get_async_timeout(vha)+2);
+
+ sp->u.iocb_cmd.u.nack.ntfy = ntfy;
+
+ sp->done = qla2x00_async_nack_sp_done;
+
+ rval = qla2x00_start_sp(sp);
+ if (rval != QLA_SUCCESS)
+ goto done_free_sp;
+
+ ql_dbg(ql_dbg_disc, vha, 0xffff,
+ "Async-%s %8phC hndl %x %s\n",
+ sp->name, fcport->port_name, sp->handle, c);
+
+ return rval;
+
+done_free_sp:
+ sp->free(sp);
+done:
+ fcport->flags &= ~FCF_ASYNC_SENT;
+ return rval;
+}
+
+void qla24xx_do_nack_work(struct scsi_qla_host *vha, struct qla_work_evt *e)
+{
+ fc_port_t *t;
+ unsigned long flags;
+
+ switch (e->u.nack.type) {
+ case SRB_NACK_PRLI:
+ mutex_lock(&vha->vha_tgt.tgt_mutex);
+ t = qlt_create_sess(vha, e->u.nack.fcport, 0);
+ mutex_unlock(&vha->vha_tgt.tgt_mutex);
+ if (t) {
+ ql_log(ql_log_info, vha, 0xffff,
+ "%s create sess success %p", __func__, t);
+ spin_lock_irqsave(&vha->hw->tgt.sess_lock, flags);
+ /* create sess has an extra kref */
+ vha->hw->tgt.tgt_ops->put_sess(e->u.nack.fcport);
+ spin_unlock_irqrestore(&vha->hw->tgt.sess_lock, flags);
+ }
+ break;
+ }
+ qla24xx_async_notify_ack(vha, e->u.nack.fcport,
+ (struct imm_ntfy_from_isp*)e->u.nack.iocb, e->u.nack.type);
+}
+
+void qla24xx_delete_sess_fn(struct work_struct *work)
+{
+ fc_port_t *fcport = container_of(work, struct fc_port, del_work);
+ struct qla_hw_data *ha = fcport->vha->hw;
+ unsigned long flags;
+
+ spin_lock_irqsave(&ha->tgt.sess_lock, flags);
+
+ if (fcport->se_sess) {
+ ha->tgt.tgt_ops->shutdown_sess(fcport);
+ ha->tgt.tgt_ops->put_sess(fcport);
+ } else {
+ qlt_unreg_sess(fcport);
+ }
+ spin_unlock_irqrestore(&ha->tgt.sess_lock, flags);
+}
+
+/*
+ * Called from qla2x00_reg_remote_port()
+ */
+void qlt_fc_port_added(struct scsi_qla_host *vha, fc_port_t *fcport)
+{
+ struct qla_hw_data *ha = vha->hw;
+ struct qla_tgt *tgt = vha->vha_tgt.qla_tgt;
+ struct fc_port *sess = fcport;
+ unsigned long flags;
+
+ if (!vha->hw->tgt.tgt_ops)
+ return;
+
+ spin_lock_irqsave(&ha->tgt.sess_lock, flags);
+ if (tgt->tgt_stop) {
+ spin_unlock_irqrestore(&ha->tgt.sess_lock, flags);
+ return;
+ }
+
+ if (fcport->disc_state == DSC_DELETE_PEND) {
+ spin_unlock_irqrestore(&ha->tgt.sess_lock, flags);
+ return;
+ }
+
+ if (!sess->se_sess) {
+ spin_unlock_irqrestore(&ha->tgt.sess_lock, flags);
+
+ mutex_lock(&vha->vha_tgt.tgt_mutex);
+ sess = qlt_create_sess(vha, fcport, false);
+ mutex_unlock(&vha->vha_tgt.tgt_mutex);
+
+ spin_lock_irqsave(&ha->tgt.sess_lock, flags);
+ } else {
+ if (fcport->fw_login_state == DSC_LS_PRLI_COMP) {
+ spin_unlock_irqrestore(&ha->tgt.sess_lock, flags);
+ return;
+ }
+
+ if (!kref_get_unless_zero(&sess->sess_kref)) {
+ ql_dbg(ql_dbg_disc, vha, 0xffff,
+ "%s: kref_get fail sess %8phC \n",
+ __func__, sess->port_name);
+ spin_unlock_irqrestore(&ha->tgt.sess_lock, flags);
+ return;
+ }
+
+ ql_dbg(ql_dbg_tgt_mgt, vha, 0xf04c,
+ "qla_target(%u): %ssession for port %8phC "
+ "(loop ID %d) reappeared\n", vha->vp_idx,
+ sess->local ? "local " : "", sess->port_name, sess->loop_id);
+
+ ql_dbg(ql_dbg_tgt_mgt, vha, 0xf007,
+ "Reappeared sess %p\n", sess);
+
+ ha->tgt.tgt_ops->update_sess(sess, fcport->d_id,
+ fcport->loop_id,
+ (fcport->flags & FCF_CONF_COMP_SUPPORTED));
+ }
+
+ if (sess && sess->local) {
+ ql_dbg(ql_dbg_tgt_mgt, vha, 0xf04d,
+ "qla_target(%u): local session for "
+ "port %8phC (loop ID %d) became global\n", vha->vp_idx,
+ fcport->port_name, sess->loop_id);
+ sess->local = 0;
+ }
+ ha->tgt.tgt_ops->put_sess(sess);
+ spin_unlock_irqrestore(&ha->tgt.sess_lock, flags);
+}
/*
* This is a zero-base ref-counting solution, since hardware_lock
* guarantees that ref_count is not modified concurrently.
* Upon successful return content of iocb is undefined
*/
-static qlt_plogi_ack_t *
+static struct qlt_plogi_ack_t *
qlt_plogi_ack_find_add(struct scsi_qla_host *vha, port_id_t *id,
struct imm_ntfy_from_isp *iocb)
{
- qlt_plogi_ack_t *pla;
+ struct qlt_plogi_ack_t *pla;
list_for_each_entry(pla, &vha->plogi_ack_list, list) {
if (pla->id.b24 == id->b24) {
qlt_send_term_imm_notif(vha, &pla->iocb, 1);
- pla->iocb = *iocb;
+ memcpy(&pla->iocb, iocb, sizeof(pla->iocb));
return pla;
}
}
@@ -423,50 +789,78 @@ qlt_plogi_ack_find_add(struct scsi_qla_host *vha, port_id_t *id,
return NULL;
}
- pla->iocb = *iocb;
+ memcpy(&pla->iocb, iocb, sizeof(pla->iocb));
pla->id = *id;
list_add_tail(&pla->list, &vha->plogi_ack_list);
return pla;
}
-static void qlt_plogi_ack_unref(struct scsi_qla_host *vha, qlt_plogi_ack_t *pla)
+void qlt_plogi_ack_unref(struct scsi_qla_host *vha,
+ struct qlt_plogi_ack_t *pla)
{
+ struct imm_ntfy_from_isp *iocb = &pla->iocb;
+ port_id_t port_id;
+ uint16_t loop_id;
+ fc_port_t *fcport = pla->fcport;
+
BUG_ON(!pla->ref_count);
pla->ref_count--;
if (pla->ref_count)
return;
- ql_dbg(ql_dbg_async, vha, 0x5089,
+ ql_dbg(ql_dbg_disc, vha, 0x5089,
"Sending PLOGI ACK to wwn %8phC s_id %02x:%02x:%02x loop_id %#04x"
- " exch %#x ox_id %#x\n", pla->iocb.u.isp24.port_name,
- pla->iocb.u.isp24.port_id[2], pla->iocb.u.isp24.port_id[1],
- pla->iocb.u.isp24.port_id[0],
- le16_to_cpu(pla->iocb.u.isp24.nport_handle),
- pla->iocb.u.isp24.exchange_address, pla->iocb.ox_id);
- qlt_send_notify_ack(vha, &pla->iocb, 0, 0, 0, 0, 0, 0);
+ " exch %#x ox_id %#x\n", iocb->u.isp24.port_name,
+ iocb->u.isp24.port_id[2], iocb->u.isp24.port_id[1],
+ iocb->u.isp24.port_id[0],
+ le16_to_cpu(iocb->u.isp24.nport_handle),
+ iocb->u.isp24.exchange_address, iocb->ox_id);
+
+ port_id.b.domain = iocb->u.isp24.port_id[2];
+ port_id.b.area = iocb->u.isp24.port_id[1];
+ port_id.b.al_pa = iocb->u.isp24.port_id[0];
+ port_id.b.rsvd_1 = 0;
+
+ loop_id = le16_to_cpu(iocb->u.isp24.nport_handle);
+
+ fcport->loop_id = loop_id;
+ fcport->d_id = port_id;
+ qla24xx_post_nack_work(vha, fcport, iocb, SRB_NACK_PLOGI);
+
+ list_for_each_entry(fcport, &vha->vp_fcports, list) {
+ if (fcport->plogi_link[QLT_PLOGI_LINK_SAME_WWN] == pla)
+ fcport->plogi_link[QLT_PLOGI_LINK_SAME_WWN] = NULL;
+ if (fcport->plogi_link[QLT_PLOGI_LINK_CONFLICT] == pla)
+ fcport->plogi_link[QLT_PLOGI_LINK_CONFLICT] = NULL;
+ }
list_del(&pla->list);
kmem_cache_free(qla_tgt_plogi_cachep, pla);
}
-static void
-qlt_plogi_ack_link(struct scsi_qla_host *vha, qlt_plogi_ack_t *pla,
- struct qla_tgt_sess *sess, qlt_plogi_link_t link)
+void
+qlt_plogi_ack_link(struct scsi_qla_host *vha, struct qlt_plogi_ack_t *pla,
+ struct fc_port *sess, enum qlt_plogi_link_t link)
{
+ struct imm_ntfy_from_isp *iocb = &pla->iocb;
/* Inc ref_count first because link might already be pointing at pla */
pla->ref_count++;
+ ql_dbg(ql_dbg_tgt_mgt, vha, 0xf097,
+ "Linking sess %p [%d] wwn %8phC with PLOGI ACK to wwn %8phC"
+ " s_id %02x:%02x:%02x, ref=%d pla %p link %d\n",
+ sess, link, sess->port_name,
+ iocb->u.isp24.port_name, iocb->u.isp24.port_id[2],
+ iocb->u.isp24.port_id[1], iocb->u.isp24.port_id[0],
+ pla->ref_count, pla, link);
+
if (sess->plogi_link[link])
qlt_plogi_ack_unref(vha, sess->plogi_link[link]);
- ql_dbg(ql_dbg_tgt_mgt, vha, 0xf097,
- "Linking sess %p [%d] wwn %8phC with PLOGI ACK to wwn %8phC"
- " s_id %02x:%02x:%02x, ref=%d\n", sess, link, sess->port_name,
- pla->iocb.u.isp24.port_name, pla->iocb.u.isp24.port_id[2],
- pla->iocb.u.isp24.port_id[1], pla->iocb.u.isp24.port_id[0],
- pla->ref_count);
+ if (link == QLT_PLOGI_LINK_SAME_WWN)
+ pla->fcport = sess;
sess->plogi_link[link] = pla;
}
@@ -519,49 +913,45 @@ qlt_send_first_logo(struct scsi_qla_host *vha, qlt_port_logo_t *logo)
static void qlt_free_session_done(struct work_struct *work)
{
- struct qla_tgt_sess *sess = container_of(work, struct qla_tgt_sess,
+ struct fc_port *sess = container_of(work, struct fc_port,
free_work);
struct qla_tgt *tgt = sess->tgt;
struct scsi_qla_host *vha = sess->vha;
struct qla_hw_data *ha = vha->hw;
unsigned long flags;
bool logout_started = false;
- fc_port_t fcport;
+ struct event_arg ea;
+ scsi_qla_host_t *base_vha;
ql_dbg(ql_dbg_tgt_mgt, vha, 0xf084,
"%s: se_sess %p / sess %p from port %8phC loop_id %#04x"
" s_id %02x:%02x:%02x logout %d keep %d els_logo %d\n",
__func__, sess->se_sess, sess, sess->port_name, sess->loop_id,
- sess->s_id.b.domain, sess->s_id.b.area, sess->s_id.b.al_pa,
+ sess->d_id.b.domain, sess->d_id.b.area, sess->d_id.b.al_pa,
sess->logout_on_delete, sess->keep_nport_handle,
sess->send_els_logo);
- BUG_ON(!tgt);
- if (sess->send_els_logo) {
- qlt_port_logo_t logo;
- logo.id = sess->s_id;
- logo.cmd_count = 0;
- qlt_send_first_logo(vha, &logo);
- }
+ if (!IS_SW_RESV_ADDR(sess->d_id)) {
+ if (sess->send_els_logo) {
+ qlt_port_logo_t logo;
- if (sess->logout_on_delete) {
- int rc;
+ logo.id = sess->d_id;
+ logo.cmd_count = 0;
+ qlt_send_first_logo(vha, &logo);
+ }
- memset(&fcport, 0, sizeof(fcport));
- fcport.loop_id = sess->loop_id;
- fcport.d_id = sess->s_id;
- memcpy(fcport.port_name, sess->port_name, WWN_SIZE);
- fcport.vha = vha;
- fcport.tgt_session = sess;
-
- rc = qla2x00_post_async_logout_work(vha, &fcport, NULL);
- if (rc != QLA_SUCCESS)
- ql_log(ql_log_warn, vha, 0xf085,
- "Schedule logo failed sess %p rc %d\n",
- sess, rc);
- else
- logout_started = true;
+ if (sess->logout_on_delete) {
+ int rc;
+
+ rc = qla2x00_post_async_logout_work(vha, sess, NULL);
+ if (rc != QLA_SUCCESS)
+ ql_log(ql_log_warn, vha, 0xf085,
+ "Schedule logo failed sess %p rc %d\n",
+ sess, rc);
+ else
+ logout_started = true;
+ }
}
/*
@@ -583,29 +973,61 @@ static void qlt_free_session_done(struct work_struct *work)
msleep(100);
}
- ql_dbg(ql_dbg_tgt_mgt, vha, 0xf087,
- "%s: sess %p logout completed\n",
- __func__, sess);
+ ql_dbg(ql_dbg_disc, vha, 0xf087,
+ "%s: sess %p logout completed\n",__func__, sess);
}
- spin_lock_irqsave(&ha->hardware_lock, flags);
+ if (sess->logo_ack_needed) {
+ sess->logo_ack_needed = 0;
+ qla24xx_async_notify_ack(vha, sess,
+ (struct imm_ntfy_from_isp *)sess->iocb, SRB_NACK_LOGO);
+ }
+
+ spin_lock_irqsave(&ha->tgt.sess_lock, flags);
+ if (sess->se_sess) {
+ sess->se_sess = NULL;
+ if (tgt && !IS_SW_RESV_ADDR(sess->d_id))
+ tgt->sess_count--;
+ }
+
+ sess->disc_state = DSC_DELETED;
+ sess->fw_login_state = DSC_LS_PORT_UNAVAIL;
+ sess->deleted = QLA_SESS_DELETED;
+ sess->login_retry = vha->hw->login_retry_count;
+
+ if (sess->login_succ && !IS_SW_RESV_ADDR(sess->d_id)) {
+ vha->fcport_count--;
+ sess->login_succ = 0;
+ }
+
+ if (sess->chip_reset != sess->vha->hw->chip_reset)
+ qla2x00_clear_loop_id(sess);
+
+ if (sess->conflict) {
+ sess->conflict->login_pause = 0;
+ sess->conflict = NULL;
+ if (!test_bit(UNLOADING, &vha->dpc_flags))
+ set_bit(RELOGIN_NEEDED, &vha->dpc_flags);
+ }
{
- qlt_plogi_ack_t *own =
+ struct qlt_plogi_ack_t *own =
sess->plogi_link[QLT_PLOGI_LINK_SAME_WWN];
- qlt_plogi_ack_t *con =
+ struct qlt_plogi_ack_t *con =
sess->plogi_link[QLT_PLOGI_LINK_CONFLICT];
+ struct imm_ntfy_from_isp *iocb;
if (con) {
+ iocb = &con->iocb;
ql_dbg(ql_dbg_tgt_mgt, vha, 0xf099,
- "se_sess %p / sess %p port %8phC is gone,"
- " %s (ref=%d), releasing PLOGI for %8phC (ref=%d)\n",
- sess->se_sess, sess, sess->port_name,
- own ? "releasing own PLOGI" :
- "no own PLOGI pending",
- own ? own->ref_count : -1,
- con->iocb.u.isp24.port_name, con->ref_count);
+ "se_sess %p / sess %p port %8phC is gone,"
+ " %s (ref=%d), releasing PLOGI for %8phC (ref=%d)\n",
+ sess->se_sess, sess, sess->port_name,
+ own ? "releasing own PLOGI" : "no own PLOGI pending",
+ own ? own->ref_count : -1,
+ iocb->u.isp24.port_name, con->ref_count);
qlt_plogi_ack_unref(vha, con);
+ sess->plogi_link[QLT_PLOGI_LINK_CONFLICT] = NULL;
} else {
ql_dbg(ql_dbg_tgt_mgt, vha, 0xf09a,
"se_sess %p / sess %p port %8phC is gone, %s (ref=%d)\n",
@@ -615,59 +1037,64 @@ static void qlt_free_session_done(struct work_struct *work)
own ? own->ref_count : -1);
}
- if (own)
+ if (own) {
+ sess->fw_login_state = DSC_LS_PLOGI_PEND;
qlt_plogi_ack_unref(vha, own);
+ sess->plogi_link[QLT_PLOGI_LINK_SAME_WWN] = NULL;
+ }
}
-
- list_del(&sess->sess_list_entry);
-
- spin_unlock_irqrestore(&ha->hardware_lock, flags);
+ spin_unlock_irqrestore(&ha->tgt.sess_lock, flags);
ql_dbg(ql_dbg_tgt_mgt, vha, 0xf001,
- "Unregistration of sess %p finished\n", sess);
+ "Unregistration of sess %p %8phC finished fcp_cnt %d\n",
+ sess, sess->port_name, vha->fcport_count);
- kfree(sess);
- /*
- * We need to protect against race, when tgt is freed before or
- * inside wake_up()
- */
- tgt->sess_count--;
- if (tgt->sess_count == 0)
+ if (tgt && (tgt->sess_count == 0))
wake_up_all(&tgt->waitQ);
+
+ if (vha->fcport_count == 0)
+ wake_up_all(&vha->fcport_waitQ);
+
+ base_vha = pci_get_drvdata(ha->pdev);
+ if (test_bit(PFLG_DRIVER_REMOVING, &base_vha->pci_flags))
+ return;
+
+ if (!tgt || !tgt->tgt_stop) {
+ memset(&ea, 0, sizeof(ea));
+ ea.event = FCME_DELETE_DONE;
+ ea.fcport = sess;
+ qla2x00_fcport_event_handler(vha, &ea);
+ }
}
/* ha->tgt.sess_lock supposed to be held on entry */
-static void qlt_release_session(struct kref *kref)
+void qlt_unreg_sess(struct fc_port *sess)
{
- struct qla_tgt_sess *sess =
- container_of(kref, struct qla_tgt_sess, sess_kref);
struct scsi_qla_host *vha = sess->vha;
+ ql_dbg(ql_dbg_disc, sess->vha, 0xffff,
+ "%s sess %p for deletion %8phC\n",
+ __func__, sess, sess->port_name);
+
if (sess->se_sess)
vha->hw->tgt.tgt_ops->clear_nacl_from_fcport_map(sess);
- if (!list_empty(&sess->del_list_entry))
- list_del_init(&sess->del_list_entry);
+ qla2x00_mark_device_lost(vha, sess, 1, 1);
+
sess->deleted = QLA_SESS_DELETION_IN_PROGRESS;
+ sess->disc_state = DSC_DELETE_PEND;
+ sess->last_rscn_gen = sess->rscn_gen;
+ sess->last_login_gen = sess->login_gen;
INIT_WORK(&sess->free_work, qlt_free_session_done);
schedule_work(&sess->free_work);
}
-
-void qlt_put_sess(struct qla_tgt_sess *sess)
-{
- if (!sess)
- return;
-
- assert_spin_locked(&sess->vha->hw->tgt.sess_lock);
- kref_put(&sess->sess_kref, qlt_release_session);
-}
-EXPORT_SYMBOL(qlt_put_sess);
+EXPORT_SYMBOL(qlt_unreg_sess);
static int qlt_reset(struct scsi_qla_host *vha, void *iocb, int mcmd)
{
struct qla_hw_data *ha = vha->hw;
- struct qla_tgt_sess *sess = NULL;
+ struct fc_port *sess = NULL;
uint16_t loop_id;
int res = 0;
struct imm_ntfy_from_isp *n = (struct imm_ntfy_from_isp *)iocb;
@@ -680,31 +1107,6 @@ static int qlt_reset(struct scsi_qla_host *vha, void *iocb, int mcmd)
spin_lock_irqsave(&ha->tgt.sess_lock, flags);
qlt_clear_tgt_db(vha->vha_tgt.qla_tgt);
spin_unlock_irqrestore(&ha->tgt.sess_lock, flags);
-#if 0 /* FIXME: do we need to choose a session here? */
- if (!list_empty(&ha->tgt.qla_tgt->sess_list)) {
- sess = list_entry(ha->tgt.qla_tgt->sess_list.next,
- typeof(*sess), sess_list_entry);
- switch (mcmd) {
- case QLA_TGT_NEXUS_LOSS_SESS:
- mcmd = QLA_TGT_NEXUS_LOSS;
- break;
- case QLA_TGT_ABORT_ALL_SESS:
- mcmd = QLA_TGT_ABORT_ALL;
- break;
- case QLA_TGT_NEXUS_LOSS:
- case QLA_TGT_ABORT_ALL:
- break;
- default:
- ql_dbg(ql_dbg_tgt, vha, 0xe046,
- "qla_target(%d): Not allowed "
- "command %x in %s", vha->vp_idx,
- mcmd, __func__);
- sess = NULL;
- break;
- }
- } else
- sess = NULL;
-#endif
} else {
spin_lock_irqsave(&ha->tgt.sess_lock, flags);
sess = ha->tgt.tgt_ops->find_sess_by_loop_id(vha, loop_id);
@@ -726,57 +1128,69 @@ static int qlt_reset(struct scsi_qla_host *vha, void *iocb, int mcmd)
return qlt_issue_task_mgmt(sess, 0, mcmd, iocb, QLA24XX_MGMT_SEND_NACK);
}
+static void qla24xx_chk_fcp_state(struct fc_port *sess)
+{
+ if (sess->chip_reset != sess->vha->hw->chip_reset) {
+ sess->logout_on_delete = 0;
+ sess->logo_ack_needed = 0;
+ sess->fw_login_state = DSC_LS_PORT_UNAVAIL;
+ sess->scan_state = 0;
+ }
+}
+
/* ha->tgt.sess_lock supposed to be held on entry */
-static void qlt_schedule_sess_for_deletion(struct qla_tgt_sess *sess,
+void qlt_schedule_sess_for_deletion(struct fc_port *sess,
bool immediate)
{
struct qla_tgt *tgt = sess->tgt;
- uint32_t dev_loss_tmo = tgt->ha->port_down_retry_count + 5;
- if (sess->deleted) {
- /* Upgrade to unconditional deletion in case it was temporary */
- if (immediate && sess->deleted == QLA_SESS_DELETION_PENDING)
- list_del(&sess->del_list_entry);
- else
+ if (sess->disc_state == DSC_DELETE_PEND)
+ return;
+
+ if (sess->disc_state == DSC_DELETED) {
+ if (tgt && tgt->tgt_stop && (tgt->sess_count == 0))
+ wake_up_all(&tgt->waitQ);
+ if (sess->vha->fcport_count == 0)
+ wake_up_all(&sess->vha->fcport_waitQ);
+
+ if (!sess->plogi_link[QLT_PLOGI_LINK_SAME_WWN] &&
+ !sess->plogi_link[QLT_PLOGI_LINK_CONFLICT])
return;
}
- ql_dbg(ql_dbg_tgt, sess->vha, 0xe001,
- "Scheduling sess %p for deletion\n", sess);
+ sess->disc_state = DSC_DELETE_PEND;
- if (immediate) {
- dev_loss_tmo = 0;
- sess->deleted = QLA_SESS_DELETION_IN_PROGRESS;
- list_add(&sess->del_list_entry, &tgt->del_sess_list);
- } else {
- sess->deleted = QLA_SESS_DELETION_PENDING;
- list_add_tail(&sess->del_list_entry, &tgt->del_sess_list);
- }
+ if (sess->deleted == QLA_SESS_DELETED)
+ sess->logout_on_delete = 0;
- sess->expires = jiffies + dev_loss_tmo * HZ;
+ sess->deleted = QLA_SESS_DELETION_IN_PROGRESS;
+ qla24xx_chk_fcp_state(sess);
- ql_dbg(ql_dbg_tgt, sess->vha, 0xe048,
- "qla_target(%d): session for port %8phC (loop ID %d s_id %02x:%02x:%02x)"
- " scheduled for deletion in %u secs (expires: %lu) immed: %d, logout: %d, gen: %#x\n",
- sess->vha->vp_idx, sess->port_name, sess->loop_id,
- sess->s_id.b.domain, sess->s_id.b.area, sess->s_id.b.al_pa,
- dev_loss_tmo, sess->expires, immediate, sess->logout_on_delete,
- sess->generation);
+ ql_dbg(ql_dbg_tgt, sess->vha, 0xe001,
+ "Scheduling sess %p for deletion\n", sess);
- if (immediate)
- mod_delayed_work(system_wq, &tgt->sess_del_work, 0);
- else
- schedule_delayed_work(&tgt->sess_del_work,
- sess->expires - jiffies);
+ schedule_work(&sess->del_work);
+}
+
+void qlt_schedule_sess_for_deletion_lock(struct fc_port *sess)
+{
+ unsigned long flags;
+ struct qla_hw_data *ha = sess->vha->hw;
+ spin_lock_irqsave(&ha->tgt.sess_lock, flags);
+ qlt_schedule_sess_for_deletion(sess, 1);
+ spin_unlock_irqrestore(&ha->tgt.sess_lock, flags);
}
/* ha->tgt.sess_lock supposed to be held on entry */
static void qlt_clear_tgt_db(struct qla_tgt *tgt)
{
- struct qla_tgt_sess *sess;
+ struct fc_port *sess;
+ scsi_qla_host_t *vha = tgt->vha;
- list_for_each_entry(sess, &tgt->sess_list, sess_list_entry)
- qlt_schedule_sess_for_deletion(sess, true);
+ list_for_each_entry(sess, &vha->vp_fcports, list) {
+ if (sess->se_sess)
+ qlt_schedule_sess_for_deletion(sess, 1);
+ }
/* At this point tgt could be already dead */
}
@@ -830,240 +1244,84 @@ out_free_id_list:
return res;
}
-/* ha->tgt.sess_lock supposed to be held on entry */
-static void qlt_undelete_sess(struct qla_tgt_sess *sess)
-{
- BUG_ON(sess->deleted != QLA_SESS_DELETION_PENDING);
-
- list_del_init(&sess->del_list_entry);
- sess->deleted = 0;
-}
-
-static void qlt_del_sess_work_fn(struct delayed_work *work)
-{
- struct qla_tgt *tgt = container_of(work, struct qla_tgt,
- sess_del_work);
- struct scsi_qla_host *vha = tgt->vha;
- struct qla_hw_data *ha = vha->hw;
- struct qla_tgt_sess *sess;
- unsigned long flags, elapsed;
-
- spin_lock_irqsave(&ha->tgt.sess_lock, flags);
- while (!list_empty(&tgt->del_sess_list)) {
- sess = list_entry(tgt->del_sess_list.next, typeof(*sess),
- del_list_entry);
- elapsed = jiffies;
- if (time_after_eq(elapsed, sess->expires)) {
- /* No turning back */
- list_del_init(&sess->del_list_entry);
- sess->deleted = QLA_SESS_DELETION_IN_PROGRESS;
-
- ql_dbg(ql_dbg_tgt_mgt, vha, 0xf004,
- "Timeout: sess %p about to be deleted\n",
- sess);
- if (sess->se_sess)
- ha->tgt.tgt_ops->shutdown_sess(sess);
- qlt_put_sess(sess);
- } else {
- schedule_delayed_work(&tgt->sess_del_work,
- sess->expires - elapsed);
- break;
- }
- }
- spin_unlock_irqrestore(&ha->tgt.sess_lock, flags);
-}
-
/*
* Adds an extra ref to allow to drop hw lock after adding sess to the list.
* Caller must put it.
*/
-static struct qla_tgt_sess *qlt_create_sess(
+static struct fc_port *qlt_create_sess(
struct scsi_qla_host *vha,
fc_port_t *fcport,
bool local)
{
struct qla_hw_data *ha = vha->hw;
- struct qla_tgt_sess *sess;
+ struct fc_port *sess = fcport;
unsigned long flags;
- /* Check to avoid double sessions */
- spin_lock_irqsave(&ha->tgt.sess_lock, flags);
- list_for_each_entry(sess, &vha->vha_tgt.qla_tgt->sess_list,
- sess_list_entry) {
- if (!memcmp(sess->port_name, fcport->port_name, WWN_SIZE)) {
- ql_dbg(ql_dbg_tgt_mgt, vha, 0xf005,
- "Double sess %p found (s_id %x:%x:%x, "
- "loop_id %d), updating to d_id %x:%x:%x, "
- "loop_id %d", sess, sess->s_id.b.domain,
- sess->s_id.b.al_pa, sess->s_id.b.area,
- sess->loop_id, fcport->d_id.b.domain,
- fcport->d_id.b.al_pa, fcport->d_id.b.area,
- fcport->loop_id);
-
- /* Cannot undelete at this point */
- if (sess->deleted == QLA_SESS_DELETION_IN_PROGRESS) {
- spin_unlock_irqrestore(&ha->tgt.sess_lock,
- flags);
- return NULL;
- }
-
- if (sess->deleted)
- qlt_undelete_sess(sess);
-
- if (!sess->se_sess) {
- if (ha->tgt.tgt_ops->check_initiator_node_acl(vha,
- &sess->port_name[0], sess) < 0) {
- spin_unlock_irqrestore(&ha->tgt.sess_lock, flags);
- return NULL;
- }
- }
-
- kref_get(&sess->sess_kref);
- ha->tgt.tgt_ops->update_sess(sess, fcport->d_id, fcport->loop_id,
- (fcport->flags & FCF_CONF_COMP_SUPPORTED));
-
- if (sess->local && !local)
- sess->local = 0;
-
- qlt_do_generation_tick(vha, &sess->generation);
-
- spin_unlock_irqrestore(&ha->tgt.sess_lock, flags);
+ if (vha->vha_tgt.qla_tgt->tgt_stop)
+ return NULL;
- return sess;
+ if (fcport->se_sess) {
+ if (!kref_get_unless_zero(&sess->sess_kref)) {
+ ql_dbg(ql_dbg_disc, vha, 0xffff,
+ "%s: kref_get_unless_zero failed for %8phC\n",
+ __func__, sess->port_name);
+ return NULL;
}
- }
- spin_unlock_irqrestore(&ha->tgt.sess_lock, flags);
-
- sess = kzalloc(sizeof(*sess), GFP_KERNEL);
- if (!sess) {
- ql_dbg(ql_dbg_tgt_mgt, vha, 0xf04a,
- "qla_target(%u): session allocation failed, all commands "
- "from port %8phC will be refused", vha->vp_idx,
- fcport->port_name);
-
- return NULL;
+ return fcport;
}
sess->tgt = vha->vha_tgt.qla_tgt;
- sess->vha = vha;
- sess->s_id = fcport->d_id;
- sess->loop_id = fcport->loop_id;
sess->local = local;
- kref_init(&sess->sess_kref);
- INIT_LIST_HEAD(&sess->del_list_entry);
- /* Under normal circumstances we want to logout from firmware when
+ /*
+ * Under normal circumstances we want to logout from firmware when
* session eventually ends and release corresponding nport handle.
* In the exception cases (e.g. when new PLOGI is waiting) corresponding
- * code will adjust these flags as necessary. */
+ * code will adjust these flags as necessary.
+ */
sess->logout_on_delete = 1;
sess->keep_nport_handle = 0;
+ sess->logout_completed = 0;
- ql_dbg(ql_dbg_tgt_mgt, vha, 0xf006,
- "Adding sess %p to tgt %p via ->check_initiator_node_acl()\n",
- sess, vha->vha_tgt.qla_tgt);
-
- sess->conf_compl_supported = (fcport->flags & FCF_CONF_COMP_SUPPORTED);
- BUILD_BUG_ON(sizeof(sess->port_name) != sizeof(fcport->port_name));
- memcpy(sess->port_name, fcport->port_name, sizeof(sess->port_name));
-
- spin_lock_irqsave(&ha->tgt.sess_lock, flags);
- list_add_tail(&sess->sess_list_entry, &vha->vha_tgt.qla_tgt->sess_list);
- vha->vha_tgt.qla_tgt->sess_count++;
- qlt_do_generation_tick(vha, &sess->generation);
- spin_unlock_irqrestore(&ha->tgt.sess_lock, flags);
-
- ql_dbg(ql_dbg_tgt_mgt, vha, 0xf04b,
- "qla_target(%d): %ssession for wwn %8phC (loop_id %d, "
- "s_id %x:%x:%x, confirmed completion %ssupported) added\n",
- vha->vp_idx, local ? "local " : "", fcport->port_name,
- fcport->loop_id, sess->s_id.b.domain, sess->s_id.b.area,
- sess->s_id.b.al_pa, sess->conf_compl_supported ? "" : "not ");
-
- /*
- * Determine if this fc_port->port_name is allowed to access
- * target mode using explict NodeACLs+MappedLUNs, or using
- * TPG demo mode. If this is successful a target mode FC nexus
- * is created.
- */
if (ha->tgt.tgt_ops->check_initiator_node_acl(vha,
&fcport->port_name[0], sess) < 0) {
+ ql_dbg(ql_dbg_tgt_mgt, vha, 0xffff,
+ "(%d) %8phC check_initiator_node_acl failed\n",
+ vha->vp_idx, fcport->port_name);
return NULL;
} else {
+ kref_init(&fcport->sess_kref);
/*
- * Take an extra reference to ->sess_kref here to handle qla_tgt_sess
- * access across ->tgt.sess_lock reaquire.
+ * Take an extra reference to ->sess_kref here to handle
+ * fc_port access across ->tgt.sess_lock reaquire.
*/
- kref_get(&sess->sess_kref);
- }
-
- return sess;
-}
-
-/*
- * Called from qla2x00_reg_remote_port()
- */
-void qlt_fc_port_added(struct scsi_qla_host *vha, fc_port_t *fcport)
-{
- struct qla_hw_data *ha = vha->hw;
- struct qla_tgt *tgt = vha->vha_tgt.qla_tgt;
- struct qla_tgt_sess *sess;
- unsigned long flags;
-
- if (!vha->hw->tgt.tgt_ops)
- return;
-
- if (!tgt || (fcport->port_type != FCT_INITIATOR))
- return;
+ if (!kref_get_unless_zero(&sess->sess_kref)) {
+ ql_dbg(ql_dbg_disc, vha, 0xffff,
+ "%s: kref_get_unless_zero failed for %8phC\n",
+ __func__, sess->port_name);
+ return NULL;
+ }
- if (qla_ini_mode_enabled(vha))
- return;
+ spin_lock_irqsave(&ha->tgt.sess_lock, flags);
+ if (!IS_SW_RESV_ADDR(sess->d_id))
+ vha->vha_tgt.qla_tgt->sess_count++;
- spin_lock_irqsave(&ha->tgt.sess_lock, flags);
- if (tgt->tgt_stop) {
+ qlt_do_generation_tick(vha, &sess->generation);
spin_unlock_irqrestore(&ha->tgt.sess_lock, flags);
- return;
}
- sess = qlt_find_sess_by_port_name(tgt, fcport->port_name);
- if (!sess) {
- spin_unlock_irqrestore(&ha->tgt.sess_lock, flags);
-
- mutex_lock(&vha->vha_tgt.tgt_mutex);
- sess = qlt_create_sess(vha, fcport, false);
- mutex_unlock(&vha->vha_tgt.tgt_mutex);
- spin_lock_irqsave(&ha->tgt.sess_lock, flags);
- } else if (sess->deleted == QLA_SESS_DELETION_IN_PROGRESS) {
- /* Point of no return */
- spin_unlock_irqrestore(&ha->tgt.sess_lock, flags);
- return;
- } else {
- kref_get(&sess->sess_kref);
-
- if (sess->deleted) {
- qlt_undelete_sess(sess);
-
- ql_dbg(ql_dbg_tgt_mgt, vha, 0xf04c,
- "qla_target(%u): %ssession for port %8phC "
- "(loop ID %d) reappeared\n", vha->vp_idx,
- sess->local ? "local " : "", sess->port_name,
- sess->loop_id);
+ ql_dbg(ql_dbg_tgt_mgt, vha, 0xf006,
+ "Adding sess %p se_sess %p to tgt %p sess_count %d\n",
+ sess, sess->se_sess, vha->vha_tgt.qla_tgt,
+ vha->vha_tgt.qla_tgt->sess_count);
- ql_dbg(ql_dbg_tgt_mgt, vha, 0xf007,
- "Reappeared sess %p\n", sess);
- }
- ha->tgt.tgt_ops->update_sess(sess, fcport->d_id, fcport->loop_id,
- (fcport->flags & FCF_CONF_COMP_SUPPORTED));
- }
+ ql_dbg(ql_dbg_tgt_mgt, vha, 0xf04b,
+ "qla_target(%d): %ssession for wwn %8phC (loop_id %d, "
+ "s_id %x:%x:%x, confirmed completion %ssupported) added\n",
+ vha->vp_idx, local ? "local " : "", fcport->port_name,
+ fcport->loop_id, sess->d_id.b.domain, sess->d_id.b.area,
+ sess->d_id.b.al_pa, sess->conf_compl_supported ? "" : "not ");
- if (sess && sess->local) {
- ql_dbg(ql_dbg_tgt_mgt, vha, 0xf04d,
- "qla_target(%u): local session for "
- "port %8phC (loop ID %d) became global\n", vha->vp_idx,
- fcport->port_name, sess->loop_id);
- sess->local = 0;
- }
- qlt_put_sess(sess);
- spin_unlock_irqrestore(&ha->tgt.sess_lock, flags);
+ return sess;
}
/*
@@ -1074,7 +1332,7 @@ void
qlt_fc_port_deleted(struct scsi_qla_host *vha, fc_port_t *fcport, int max_gen)
{
struct qla_tgt *tgt = vha->vha_tgt.qla_tgt;
- struct qla_tgt_sess *sess;
+ struct fc_port *sess = fcport;
unsigned long flags;
if (!vha->hw->tgt.tgt_ops)
@@ -1088,8 +1346,7 @@ qlt_fc_port_deleted(struct scsi_qla_host *vha, fc_port_t *fcport, int max_gen)
spin_unlock_irqrestore(&vha->hw->tgt.sess_lock, flags);
return;
}
- sess = qlt_find_sess_by_port_name(tgt, fcport->port_name);
- if (!sess) {
+ if (!sess->se_sess) {
spin_unlock_irqrestore(&vha->hw->tgt.sess_lock, flags);
return;
}
@@ -1120,12 +1377,12 @@ static inline int test_tgt_sess_count(struct qla_tgt *tgt)
* We need to protect against race, when tgt is freed before or
* inside wake_up()
*/
- spin_lock_irqsave(&ha->hardware_lock, flags);
+ spin_lock_irqsave(&ha->tgt.sess_lock, flags);
ql_dbg(ql_dbg_tgt, tgt->vha, 0xe002,
- "tgt %p, empty(sess_list)=%d sess_count=%d\n",
- tgt, list_empty(&tgt->sess_list), tgt->sess_count);
+ "tgt %p, sess_count=%d\n",
+ tgt, tgt->sess_count);
res = (tgt->sess_count == 0);
- spin_unlock_irqrestore(&ha->hardware_lock, flags);
+ spin_unlock_irqrestore(&ha->tgt.sess_lock, flags);
return res;
}
@@ -1173,8 +1430,6 @@ int qlt_stop_phase1(struct qla_tgt *tgt)
mutex_unlock(&vha->vha_tgt.tgt_mutex);
mutex_unlock(&qla_tgt_mutex);
- flush_delayed_work(&tgt->sess_del_work);
-
ql_dbg(ql_dbg_tgt_mgt, vha, 0xf009,
"Waiting for sess works (tgt %p)", tgt);
spin_lock_irqsave(&tgt->sess_work_lock, flags);
@@ -1186,14 +1441,13 @@ int qlt_stop_phase1(struct qla_tgt *tgt)
spin_unlock_irqrestore(&tgt->sess_work_lock, flags);
ql_dbg(ql_dbg_tgt_mgt, vha, 0xf00a,
- "Waiting for tgt %p: list_empty(sess_list)=%d "
- "sess_count=%d\n", tgt, list_empty(&tgt->sess_list),
- tgt->sess_count);
+ "Waiting for tgt %p: sess_count=%d\n", tgt, tgt->sess_count);
wait_event(tgt->waitQ, test_tgt_sess_count(tgt));
/* Big hammer */
- if (!ha->flags.host_shutting_down && qla_tgt_mode_enabled(vha))
+ if (!ha->flags.host_shutting_down &&
+ (qla_tgt_mode_enabled(vha) || qla_dual_mode_enabled(vha)))
qlt_disable_vha(vha);
/* Wait for sessions to clear out (just in case) */
@@ -1320,6 +1574,7 @@ static void qlt_send_notify_ack(struct scsi_qla_host *vha,
nack = (struct nack_to_isp *)pkt;
nack->ox_id = ntfy->ox_id;
+ nack->u.isp24.handle = QLA_TGT_SKIP_HANDLE;
nack->u.isp24.nport_handle = ntfy->u.isp24.nport_handle;
if (le16_to_cpu(ntfy->u.isp24.status) == IMM_NTFY_ELS) {
nack->u.isp24.flags = ntfy->u.isp24.flags &
@@ -1489,6 +1744,14 @@ static int abort_cmd_for_tag(struct scsi_qla_host *vha, uint32_t tag)
}
}
+ list_for_each_entry(op, &vha->unknown_atio_list, cmd_list) {
+ if (tag == op->atio.u.isp24.exchange_addr) {
+ op->aborted = true;
+ spin_unlock(&vha->cmd_list_lock);
+ return 1;
+ }
+ }
+
list_for_each_entry(cmd, &vha->qla_cmd_list, cmd_list) {
if (tag == cmd->atio.u.isp24.exchange_addr) {
cmd->aborted = 1;
@@ -1525,6 +1788,18 @@ static void abort_cmds_for_lun(struct scsi_qla_host *vha,
if (op_key == key && op_lun == lun)
op->aborted = true;
}
+
+ list_for_each_entry(op, &vha->unknown_atio_list, cmd_list) {
+ uint32_t op_key;
+ u64 op_lun;
+
+ op_key = sid_to_key(op->atio.u.isp24.fcp_hdr.s_id);
+ op_lun = scsilun_to_int(
+ (struct scsi_lun *)&op->atio.u.isp24.fcp_cmnd.lun);
+ if (op_key == key && op_lun == lun)
+ op->aborted = true;
+ }
+
list_for_each_entry(cmd, &vha->qla_cmd_list, cmd_list) {
uint32_t cmd_key;
uint32_t cmd_lun;
@@ -1540,7 +1815,7 @@ static void abort_cmds_for_lun(struct scsi_qla_host *vha,
/* ha->hardware_lock supposed to be held on entry */
static int __qlt_24xx_handle_abts(struct scsi_qla_host *vha,
- struct abts_recv_from_24xx *abts, struct qla_tgt_sess *sess)
+ struct abts_recv_from_24xx *abts, struct fc_port *sess)
{
struct qla_hw_data *ha = vha->hw;
struct se_session *se_sess = sess->se_sess;
@@ -1549,8 +1824,9 @@ static int __qlt_24xx_handle_abts(struct scsi_qla_host *vha,
u32 lun = 0;
int rc;
bool found_lun = false;
+ unsigned long flags;
- spin_lock(&se_sess->sess_cmd_lock);
+ spin_lock_irqsave(&se_sess->sess_cmd_lock, flags);
list_for_each_entry(se_cmd, &se_sess->sess_cmd_list, se_cmd_list) {
struct qla_tgt_cmd *cmd =
container_of(se_cmd, struct qla_tgt_cmd, se_cmd);
@@ -1560,7 +1836,7 @@ static int __qlt_24xx_handle_abts(struct scsi_qla_host *vha,
break;
}
}
- spin_unlock(&se_sess->sess_cmd_lock);
+ spin_unlock_irqrestore(&se_sess->sess_cmd_lock, flags);
/* cmd not in LIO lists, look in qla list */
if (!found_lun) {
@@ -1592,8 +1868,9 @@ static int __qlt_24xx_handle_abts(struct scsi_qla_host *vha,
mcmd->sess = sess;
memcpy(&mcmd->orig_iocb.abts, abts, sizeof(mcmd->orig_iocb.abts));
mcmd->reset_count = vha->hw->chip_reset;
+ mcmd->tmr_func = QLA_TGT_ABTS;
- rc = ha->tgt.tgt_ops->handle_tmr(mcmd, lun, TMR_ABORT_TASK,
+ rc = ha->tgt.tgt_ops->handle_tmr(mcmd, lun, mcmd->tmr_func,
abts->exchange_addr_to_abort);
if (rc != 0) {
ql_dbg(ql_dbg_tgt_mgt, vha, 0xf052,
@@ -1613,7 +1890,7 @@ static void qlt_24xx_handle_abts(struct scsi_qla_host *vha,
struct abts_recv_from_24xx *abts)
{
struct qla_hw_data *ha = vha->hw;
- struct qla_tgt_sess *sess;
+ struct fc_port *sess;
uint32_t tag = abts->exchange_addr_to_abort;
uint8_t s_id[3];
int rc;
@@ -1665,7 +1942,7 @@ static void qlt_24xx_handle_abts(struct scsi_qla_host *vha,
spin_unlock_irqrestore(&ha->tgt.sess_lock, flags);
- if (sess->deleted == QLA_SESS_DELETION_IN_PROGRESS) {
+ if (sess->deleted) {
qlt_24xx_send_abts_resp(vha, abts, FCP_TMF_REJECTED, false);
return;
}
@@ -1763,10 +2040,23 @@ void qlt_xmit_tm_rsp(struct qla_tgt_mgmt_cmd *mcmd)
return;
}
- if (mcmd->flags == QLA24XX_MGMT_SEND_NACK)
- qlt_send_notify_ack(vha, &mcmd->orig_iocb.imm_ntfy,
- 0, 0, 0, 0, 0, 0);
- else {
+ if (mcmd->flags == QLA24XX_MGMT_SEND_NACK) {
+ if (mcmd->orig_iocb.imm_ntfy.u.isp24.status_subcode ==
+ ELS_LOGO ||
+ mcmd->orig_iocb.imm_ntfy.u.isp24.status_subcode ==
+ ELS_PRLO ||
+ mcmd->orig_iocb.imm_ntfy.u.isp24.status_subcode ==
+ ELS_TPRLO) {
+ ql_dbg(ql_dbg_disc, vha, 0xffff,
+ "TM response logo %phC status %#x state %#x",
+ mcmd->sess->port_name, mcmd->fc_tm_rsp,
+ mcmd->flags);
+ qlt_schedule_sess_for_deletion_lock(mcmd->sess);
+ } else {
+ qlt_send_notify_ack(vha, &mcmd->orig_iocb.imm_ntfy,
+ 0, 0, 0, 0, 0, 0);
+ }
+ } else {
if (mcmd->orig_iocb.atio.u.raw.entry_type == ABTS_RECV_24XX)
qlt_24xx_send_abts_resp(vha, &mcmd->orig_iocb.abts,
mcmd->fc_tm_rsp, false);
@@ -2182,95 +2472,6 @@ static inline int qlt_need_explicit_conf(struct qla_hw_data *ha,
cmd->conf_compl_supported;
}
-#ifdef CONFIG_QLA_TGT_DEBUG_SRR
-/*
- * Original taken from the XFS code
- */
-static unsigned long qlt_srr_random(void)
-{
- static int Inited;
- static unsigned long RandomValue;
- static DEFINE_SPINLOCK(lock);
- /* cycles pseudo-randomly through all values between 1 and 2^31 - 2 */
- register long rv;
- register long lo;
- register long hi;
- unsigned long flags;
-
- spin_lock_irqsave(&lock, flags);
- if (!Inited) {
- RandomValue = jiffies;
- Inited = 1;
- }
- rv = RandomValue;
- hi = rv / 127773;
- lo = rv % 127773;
- rv = 16807 * lo - 2836 * hi;
- if (rv <= 0)
- rv += 2147483647;
- RandomValue = rv;
- spin_unlock_irqrestore(&lock, flags);
- return rv;
-}
-
-static void qlt_check_srr_debug(struct qla_tgt_cmd *cmd, int *xmit_type)
-{
-#if 0 /* This is not a real status packets lost, so it won't lead to SRR */
- if ((*xmit_type & QLA_TGT_XMIT_STATUS) && (qlt_srr_random() % 200)
- == 50) {
- *xmit_type &= ~QLA_TGT_XMIT_STATUS;
- ql_dbg(ql_dbg_tgt_mgt, cmd->vha, 0xf015,
- "Dropping cmd %p (tag %d) status", cmd, se_cmd->tag);
- }
-#endif
- /*
- * It's currently not possible to simulate SRRs for FCP_WRITE without
- * a physical link layer failure, so don't even try here..
- */
- if (cmd->dma_data_direction != DMA_FROM_DEVICE)
- return;
-
- if (qlt_has_data(cmd) && (cmd->sg_cnt > 1) &&
- ((qlt_srr_random() % 100) == 20)) {
- int i, leave = 0;
- unsigned int tot_len = 0;
-
- while (leave == 0)
- leave = qlt_srr_random() % cmd->sg_cnt;
-
- for (i = 0; i < leave; i++)
- tot_len += cmd->sg[i].length;
-
- ql_dbg(ql_dbg_tgt_mgt, cmd->vha, 0xf016,
- "Cutting cmd %p (tag %d) buffer"
- " tail to len %d, sg_cnt %d (cmd->bufflen %d,"
- " cmd->sg_cnt %d)", cmd, se_cmd->tag, tot_len, leave,
- cmd->bufflen, cmd->sg_cnt);
-
- cmd->bufflen = tot_len;
- cmd->sg_cnt = leave;
- }
-
- if (qlt_has_data(cmd) && ((qlt_srr_random() % 100) == 70)) {
- unsigned int offset = qlt_srr_random() % cmd->bufflen;
-
- ql_dbg(ql_dbg_tgt_mgt, cmd->vha, 0xf017,
- "Cutting cmd %p (tag %d) buffer head "
- "to offset %d (cmd->bufflen %d)", cmd, se_cmd->tag, offset,
- cmd->bufflen);
- if (offset == 0)
- *xmit_type &= ~QLA_TGT_XMIT_DATA;
- else if (qlt_set_data_offset(cmd, offset)) {
- ql_dbg(ql_dbg_tgt_mgt, cmd->vha, 0xf018,
- "qlt_set_data_offset() failed (tag %d)", se_cmd->tag);
- }
- }
-}
-#else
-static inline void qlt_check_srr_debug(struct qla_tgt_cmd *cmd, int *xmit_type)
-{}
-#endif
-
static void qlt_24xx_init_ctio_to_isp(struct ctio7_to_24xx *ctio,
struct qla_tgt_prm *prm)
{
@@ -2288,7 +2489,7 @@ static void qlt_24xx_init_ctio_to_isp(struct ctio7_to_24xx *ctio,
int i;
if (qlt_need_explicit_conf(prm->tgt->ha, prm->cmd, 1)) {
- if (prm->cmd->se_cmd.scsi_status != 0) {
+ if ((prm->rq_result & SS_SCSI_STATUS_BYTE) != 0) {
ql_dbg(ql_dbg_tgt, prm->cmd->vha, 0xe017,
"Skipping EXPLICIT_CONFORM and "
"CTIO7_FLAGS_CONFORM_REQ for FCP READ w/ "
@@ -2672,7 +2873,7 @@ int qlt_xmit_response(struct qla_tgt_cmd *cmd, int xmit_type,
int res;
spin_lock_irqsave(&ha->hardware_lock, flags);
- if (cmd->sess && cmd->sess->deleted == QLA_SESS_DELETION_IN_PROGRESS) {
+ if (cmd->sess && cmd->sess->deleted) {
cmd->state = QLA_TGT_STATE_PROCESSED;
if (cmd->sess->logout_completed)
/* no need to terminate. FW already freed exchange. */
@@ -2685,7 +2886,6 @@ int qlt_xmit_response(struct qla_tgt_cmd *cmd, int xmit_type,
spin_unlock_irqrestore(&ha->hardware_lock, flags);
memset(&prm, 0, sizeof(prm));
- qlt_check_srr_debug(cmd, &xmit_type);
ql_dbg(ql_dbg_tgt, cmd->vha, 0xe018,
"is_send_status=%d, cmd->bufflen=%d, cmd->sg_cnt=%d, cmd->dma_data_direction=%d se_cmd[%p]\n",
@@ -2848,7 +3048,7 @@ int qlt_rdy_to_xfer(struct qla_tgt_cmd *cmd)
spin_lock_irqsave(&ha->hardware_lock, flags);
if (!vha->flags.online || (cmd->reset_count != ha->chip_reset) ||
- (cmd->sess && cmd->sess->deleted == QLA_SESS_DELETION_IN_PROGRESS)) {
+ (cmd->sess && cmd->sess->deleted)) {
/*
* Either the port is not online or this request was from
* previous life, just abort the processing.
@@ -3296,7 +3496,7 @@ int qlt_abort_cmd(struct qla_tgt_cmd *cmd)
return EIO;
}
cmd->aborted = 1;
- cmd->cmd_flags |= BIT_6;
+ cmd->trc_flags |= TRC_ABORT;
spin_unlock_irqrestore(&cmd->cmd_lock, flags);
qlt_send_term_exchange(vha, cmd, &cmd->atio, 0, 1);
@@ -3306,7 +3506,7 @@ EXPORT_SYMBOL(qlt_abort_cmd);
void qlt_free_cmd(struct qla_tgt_cmd *cmd)
{
- struct qla_tgt_sess *sess = cmd->sess;
+ struct fc_port *sess = cmd->sess;
ql_dbg(ql_dbg_tgt, cmd->vha, 0xe074,
"%s: se_cmd[%p] ox_id %04x\n",
@@ -3335,90 +3535,6 @@ void qlt_free_cmd(struct qla_tgt_cmd *cmd)
}
EXPORT_SYMBOL(qlt_free_cmd);
-/* ha->hardware_lock supposed to be held on entry */
-static int qlt_prepare_srr_ctio(struct scsi_qla_host *vha,
- struct qla_tgt_cmd *cmd, void *ctio)
-{
- struct qla_tgt_srr_ctio *sc;
- struct qla_tgt *tgt = vha->vha_tgt.qla_tgt;
- struct qla_tgt_srr_imm *imm;
-
- tgt->ctio_srr_id++;
- cmd->cmd_flags |= BIT_15;
-
- ql_dbg(ql_dbg_tgt_mgt, vha, 0xf019,
- "qla_target(%d): CTIO with SRR status received\n", vha->vp_idx);
-
- if (!ctio) {
- ql_dbg(ql_dbg_tgt_mgt, vha, 0xf055,
- "qla_target(%d): SRR CTIO, but ctio is NULL\n",
- vha->vp_idx);
- return -EINVAL;
- }
-
- sc = kzalloc(sizeof(*sc), GFP_ATOMIC);
- if (sc != NULL) {
- sc->cmd = cmd;
- /* IRQ is already OFF */
- spin_lock(&tgt->srr_lock);
- sc->srr_id = tgt->ctio_srr_id;
- list_add_tail(&sc->srr_list_entry,
- &tgt->srr_ctio_list);
- ql_dbg(ql_dbg_tgt_mgt, vha, 0xf01a,
- "CTIO SRR %p added (id %d)\n", sc, sc->srr_id);
- if (tgt->imm_srr_id == tgt->ctio_srr_id) {
- int found = 0;
- list_for_each_entry(imm, &tgt->srr_imm_list,
- srr_list_entry) {
- if (imm->srr_id == sc->srr_id) {
- found = 1;
- break;
- }
- }
- if (found) {
- ql_dbg(ql_dbg_tgt_mgt, vha, 0xf01b,
- "Scheduling srr work\n");
- schedule_work(&tgt->srr_work);
- } else {
- ql_dbg(ql_dbg_tgt_mgt, vha, 0xf056,
- "qla_target(%d): imm_srr_id "
- "== ctio_srr_id (%d), but there is no "
- "corresponding SRR IMM, deleting CTIO "
- "SRR %p\n", vha->vp_idx,
- tgt->ctio_srr_id, sc);
- list_del(&sc->srr_list_entry);
- spin_unlock(&tgt->srr_lock);
-
- kfree(sc);
- return -EINVAL;
- }
- }
- spin_unlock(&tgt->srr_lock);
- } else {
- struct qla_tgt_srr_imm *ti;
-
- ql_dbg(ql_dbg_tgt_mgt, vha, 0xf057,
- "qla_target(%d): Unable to allocate SRR CTIO entry\n",
- vha->vp_idx);
- spin_lock(&tgt->srr_lock);
- list_for_each_entry_safe(imm, ti, &tgt->srr_imm_list,
- srr_list_entry) {
- if (imm->srr_id == tgt->ctio_srr_id) {
- ql_dbg(ql_dbg_tgt_mgt, vha, 0xf01c,
- "IMM SRR %p deleted (id %d)\n",
- imm, imm->srr_id);
- list_del(&imm->srr_list_entry);
- qlt_reject_free_srr_imm(vha, imm, 1);
- }
- }
- spin_unlock(&tgt->srr_lock);
-
- return -ENOMEM;
- }
-
- return 0;
-}
-
/*
* ha->hardware_lock supposed to be held on entry. Might drop it, then reaquire
*/
@@ -3527,7 +3643,7 @@ qlt_abort_cmd_on_host_reset(struct scsi_qla_host *vha, struct qla_tgt_cmd *cmd)
dump_stack();
}
- cmd->cmd_flags |= BIT_17;
+ cmd->trc_flags |= TRC_FLUSH;
ha->tgt.tgt_ops->free_cmd(cmd);
}
@@ -3632,20 +3748,14 @@ static void qlt_do_ctio_completion(struct scsi_qla_host *vha, uint32_t handle,
*/
cmd->sess->logout_on_delete = 0;
cmd->sess->send_els_logo = 1;
- qlt_schedule_sess_for_deletion(cmd->sess, true);
+ ql_dbg(ql_dbg_disc, vha, 0xffff,
+ "%s %d %8phC post del sess\n",
+ __func__, __LINE__, cmd->sess->port_name);
+
+ qlt_schedule_sess_for_deletion_lock(cmd->sess);
}
break;
}
- case CTIO_SRR_RECEIVED:
- ql_dbg(ql_dbg_tgt_mgt, vha, 0xf05a,
- "qla_target(%d): CTIO with SRR_RECEIVED"
- " status %x received (state %x, se_cmd %p)\n",
- vha->vp_idx, status, cmd->state, se_cmd);
- if (qlt_prepare_srr_ctio(vha, cmd, ctio) != 0)
- break;
- else
- return;
-
case CTIO_DIF_ERROR: {
struct ctio_crc_from_fw *crc =
(struct ctio_crc_from_fw *)ctio;
@@ -3693,7 +3803,7 @@ static void qlt_do_ctio_completion(struct scsi_qla_host *vha, uint32_t handle,
*/
if ((cmd->state != QLA_TGT_STATE_NEED_DATA) &&
(!cmd->aborted)) {
- cmd->cmd_flags |= BIT_13;
+ cmd->trc_flags |= TRC_CTIO_ERR;
if (qlt_term_ctio_exchange(vha, ctio, cmd, status))
return;
}
@@ -3701,7 +3811,7 @@ static void qlt_do_ctio_completion(struct scsi_qla_host *vha, uint32_t handle,
skip_term:
if (cmd->state == QLA_TGT_STATE_PROCESSED) {
- cmd->cmd_flags |= BIT_12;
+ cmd->trc_flags |= TRC_CTIO_DONE;
} else if (cmd->state == QLA_TGT_STATE_NEED_DATA) {
cmd->state = QLA_TGT_STATE_DATA_IN;
@@ -3711,11 +3821,11 @@ skip_term:
ha->tgt.tgt_ops->handle_data(cmd);
return;
} else if (cmd->aborted) {
- cmd->cmd_flags |= BIT_18;
+ cmd->trc_flags |= TRC_CTIO_ABORTED;
ql_dbg(ql_dbg_tgt_mgt, vha, 0xf01e,
"Aborted command %p (tag %lld) finished\n", cmd, se_cmd->tag);
} else {
- cmd->cmd_flags |= BIT_19;
+ cmd->trc_flags |= TRC_CTIO_STRANGE;
ql_dbg(ql_dbg_tgt_mgt, vha, 0xf05c,
"qla_target(%d): A command in state (%d) should "
"not return a CTIO complete\n", vha->vp_idx, cmd->state);
@@ -3762,7 +3872,7 @@ static inline int qlt_get_fcp_task_attr(struct scsi_qla_host *vha,
return fcp_task_attr;
}
-static struct qla_tgt_sess *qlt_make_local_sess(struct scsi_qla_host *,
+static struct fc_port *qlt_make_local_sess(struct scsi_qla_host *,
uint8_t *);
/*
* Process context for I/O path into tcm_qla2xxx code
@@ -3772,7 +3882,7 @@ static void __qlt_do_work(struct qla_tgt_cmd *cmd)
scsi_qla_host_t *vha = cmd->vha;
struct qla_hw_data *ha = vha->hw;
struct qla_tgt *tgt = vha->vha_tgt.qla_tgt;
- struct qla_tgt_sess *sess = cmd->sess;
+ struct fc_port *sess = cmd->sess;
struct atio_from_isp *atio = &cmd->atio;
unsigned char *cdb;
unsigned long flags;
@@ -3780,7 +3890,7 @@ static void __qlt_do_work(struct qla_tgt_cmd *cmd)
int ret, fcp_task_attr, data_dir, bidi = 0;
cmd->cmd_in_wq = 0;
- cmd->cmd_flags |= BIT_1;
+ cmd->trc_flags |= TRC_DO_WORK;
if (tgt->tgt_stop)
goto out_term;
@@ -3822,7 +3932,7 @@ static void __qlt_do_work(struct qla_tgt_cmd *cmd)
* Drop extra session reference from qla_tgt_handle_cmd_for_atio*(
*/
spin_lock_irqsave(&ha->tgt.sess_lock, flags);
- qlt_put_sess(sess);
+ ha->tgt.tgt_ops->put_sess(sess);
spin_unlock_irqrestore(&ha->tgt.sess_lock, flags);
return;
@@ -3832,7 +3942,7 @@ out_term:
* cmd has not sent to target yet, so pass NULL as the second
* argument to qlt_send_term_exchange() and free the memory here.
*/
- cmd->cmd_flags |= BIT_2;
+ cmd->trc_flags |= TRC_DO_WORK_ERR;
spin_lock_irqsave(&ha->hardware_lock, flags);
qlt_send_term_exchange(vha, NULL, &cmd->atio, 1, 0);
@@ -3841,7 +3951,7 @@ out_term:
spin_unlock_irqrestore(&ha->hardware_lock, flags);
spin_lock_irqsave(&ha->tgt.sess_lock, flags);
- qlt_put_sess(sess);
+ ha->tgt.tgt_ops->put_sess(sess);
spin_unlock_irqrestore(&ha->tgt.sess_lock, flags);
}
@@ -3859,7 +3969,7 @@ static void qlt_do_work(struct work_struct *work)
}
static struct qla_tgt_cmd *qlt_get_tag(scsi_qla_host_t *vha,
- struct qla_tgt_sess *sess,
+ struct fc_port *sess,
struct atio_from_isp *atio)
{
struct se_session *se_sess = sess->se_sess;
@@ -3883,7 +3993,7 @@ static struct qla_tgt_cmd *qlt_get_tag(scsi_qla_host_t *vha,
cmd->loop_id = sess->loop_id;
cmd->conf_compl_supported = sess->conf_compl_supported;
- cmd->cmd_flags = 0;
+ cmd->trc_flags = 0;
cmd->jiffies_at_alloc = get_jiffies_64();
cmd->reset_count = vha->hw->chip_reset;
@@ -3900,7 +4010,7 @@ static void qlt_create_sess_from_atio(struct work_struct *work)
struct qla_tgt_sess_op, work);
scsi_qla_host_t *vha = op->vha;
struct qla_hw_data *ha = vha->hw;
- struct qla_tgt_sess *sess;
+ struct fc_port *sess;
struct qla_tgt_cmd *cmd;
unsigned long flags;
uint8_t *s_id = op->atio.u.isp24.fcp_hdr.s_id;
@@ -3941,11 +4051,12 @@ static void qlt_create_sess_from_atio(struct work_struct *work)
if (!cmd) {
spin_lock_irqsave(&ha->hardware_lock, flags);
qlt_send_busy(vha, &op->atio, SAM_STAT_BUSY);
- qlt_put_sess(sess);
+ ha->tgt.tgt_ops->put_sess(sess);
spin_unlock_irqrestore(&ha->hardware_lock, flags);
kfree(op);
return;
}
+
/*
* __qlt_do_work() will call qlt_put_sess() to release
* the extra reference taken above by qlt_make_local_sess()
@@ -3953,13 +4064,11 @@ static void qlt_create_sess_from_atio(struct work_struct *work)
__qlt_do_work(cmd);
kfree(op);
return;
-
out_term:
spin_lock_irqsave(&ha->hardware_lock, flags);
qlt_send_term_exchange(vha, NULL, &op->atio, 1, 0);
spin_unlock_irqrestore(&ha->hardware_lock, flags);
kfree(op);
-
}
/* ha->hardware_lock supposed to be held on entry */
@@ -3968,8 +4077,9 @@ static int qlt_handle_cmd_for_atio(struct scsi_qla_host *vha,
{
struct qla_hw_data *ha = vha->hw;
struct qla_tgt *tgt = vha->vha_tgt.qla_tgt;
- struct qla_tgt_sess *sess;
+ struct fc_port *sess;
struct qla_tgt_cmd *cmd;
+ unsigned long flags;
if (unlikely(tgt->tgt_stop)) {
ql_dbg(ql_dbg_io, vha, 0x3061,
@@ -3998,7 +4108,7 @@ static int qlt_handle_cmd_for_atio(struct scsi_qla_host *vha,
/* Another WWN used to have our s_id. Our PLOGI scheduled its
* session deletion, but it's still in sess_del_work wq */
- if (sess->deleted == QLA_SESS_DELETION_IN_PROGRESS) {
+ if (sess->deleted) {
ql_dbg(ql_dbg_io, vha, 0x3061,
"New command while old session %p is being deleted\n",
sess);
@@ -4008,24 +4118,32 @@ static int qlt_handle_cmd_for_atio(struct scsi_qla_host *vha,
/*
* Do kref_get() before returning + dropping qla_hw_data->hardware_lock.
*/
- kref_get(&sess->sess_kref);
+ if (!kref_get_unless_zero(&sess->sess_kref)) {
+ ql_dbg(ql_dbg_tgt, vha, 0xffff,
+ "%s: kref_get fail, %8phC oxid %x \n",
+ __func__, sess->port_name,
+ be16_to_cpu(atio->u.isp24.fcp_hdr.ox_id));
+ return -EFAULT;
+ }
cmd = qlt_get_tag(vha, sess, atio);
if (!cmd) {
ql_dbg(ql_dbg_io, vha, 0x3062,
"qla_target(%d): Allocation of cmd failed\n", vha->vp_idx);
- qlt_put_sess(sess);
+ spin_lock_irqsave(&ha->tgt.sess_lock, flags);
+ ha->tgt.tgt_ops->put_sess(sess);
+ spin_unlock_irqrestore(&ha->tgt.sess_lock, flags);
return -ENOMEM;
}
cmd->cmd_in_wq = 1;
- cmd->cmd_flags |= BIT_0;
+ cmd->trc_flags |= TRC_NEW_CMD;
cmd->se_cmd.cpuid = ha->msix_count ?
ha->tgt.rspq_vector_cpuid : WORK_CPU_UNBOUND;
- spin_lock(&vha->cmd_list_lock);
+ spin_lock_irqsave(&vha->cmd_list_lock, flags);
list_add_tail(&cmd->cmd_list, &vha->qla_cmd_list);
- spin_unlock(&vha->cmd_list_lock);
+ spin_unlock_irqrestore(&vha->cmd_list_lock, flags);
INIT_WORK(&cmd->work, qlt_do_work);
if (ha->msix_count) {
@@ -4043,7 +4161,7 @@ static int qlt_handle_cmd_for_atio(struct scsi_qla_host *vha,
}
/* ha->hardware_lock supposed to be held on entry */
-static int qlt_issue_task_mgmt(struct qla_tgt_sess *sess, uint32_t lun,
+static int qlt_issue_task_mgmt(struct fc_port *sess, u64 lun,
int fn, void *iocb, int flags)
{
struct scsi_qla_host *vha = sess->vha;
@@ -4051,7 +4169,6 @@ static int qlt_issue_task_mgmt(struct qla_tgt_sess *sess, uint32_t lun,
struct qla_tgt_mgmt_cmd *mcmd;
struct atio_from_isp *a = (struct atio_from_isp *)iocb;
int res;
- uint8_t tmr_func;
mcmd = mempool_alloc(qla_tgt_mgmt_cmd_mempool, GFP_ATOMIC);
if (!mcmd) {
@@ -4073,74 +4190,12 @@ static int qlt_issue_task_mgmt(struct qla_tgt_sess *sess, uint32_t lun,
mcmd->reset_count = vha->hw->chip_reset;
switch (fn) {
- case QLA_TGT_CLEAR_ACA:
- ql_dbg(ql_dbg_tgt_tmr, vha, 0x10000,
- "qla_target(%d): CLEAR_ACA received\n", sess->vha->vp_idx);
- tmr_func = TMR_CLEAR_ACA;
- break;
-
- case QLA_TGT_TARGET_RESET:
- ql_dbg(ql_dbg_tgt_tmr, vha, 0x10001,
- "qla_target(%d): TARGET_RESET received\n",
- sess->vha->vp_idx);
- tmr_func = TMR_TARGET_WARM_RESET;
- break;
-
case QLA_TGT_LUN_RESET:
- ql_dbg(ql_dbg_tgt_tmr, vha, 0x10002,
- "qla_target(%d): LUN_RESET received\n", sess->vha->vp_idx);
- tmr_func = TMR_LUN_RESET;
- abort_cmds_for_lun(vha, lun, a->u.isp24.fcp_hdr.s_id);
- break;
-
- case QLA_TGT_CLEAR_TS:
- ql_dbg(ql_dbg_tgt_tmr, vha, 0x10003,
- "qla_target(%d): CLEAR_TS received\n", sess->vha->vp_idx);
- tmr_func = TMR_CLEAR_TASK_SET;
- break;
-
- case QLA_TGT_ABORT_TS:
- ql_dbg(ql_dbg_tgt_tmr, vha, 0x10004,
- "qla_target(%d): ABORT_TS received\n", sess->vha->vp_idx);
- tmr_func = TMR_ABORT_TASK_SET;
- break;
-#if 0
- case QLA_TGT_ABORT_ALL:
- ql_dbg(ql_dbg_tgt_tmr, vha, 0x10005,
- "qla_target(%d): Doing ABORT_ALL_TASKS\n",
- sess->vha->vp_idx);
- tmr_func = 0;
- break;
-
- case QLA_TGT_ABORT_ALL_SESS:
- ql_dbg(ql_dbg_tgt_tmr, vha, 0x10006,
- "qla_target(%d): Doing ABORT_ALL_TASKS_SESS\n",
- sess->vha->vp_idx);
- tmr_func = 0;
- break;
-
- case QLA_TGT_NEXUS_LOSS_SESS:
- ql_dbg(ql_dbg_tgt_tmr, vha, 0x10007,
- "qla_target(%d): Doing NEXUS_LOSS_SESS\n",
- sess->vha->vp_idx);
- tmr_func = 0;
- break;
-
- case QLA_TGT_NEXUS_LOSS:
- ql_dbg(ql_dbg_tgt_tmr, vha, 0x10008,
- "qla_target(%d): Doing NEXUS_LOSS\n", sess->vha->vp_idx);
- tmr_func = 0;
- break;
-#endif
- default:
- ql_dbg(ql_dbg_tgt_tmr, vha, 0x1000a,
- "qla_target(%d): Unknown task mgmt fn 0x%x\n",
- sess->vha->vp_idx, fn);
- mempool_free(mcmd, qla_tgt_mgmt_cmd_mempool);
- return -ENOSYS;
+ abort_cmds_for_lun(vha, lun, a->u.isp24.fcp_hdr.s_id);
+ break;
}
- res = ha->tgt.tgt_ops->handle_tmr(mcmd, lun, tmr_func, 0);
+ res = ha->tgt.tgt_ops->handle_tmr(mcmd, lun, mcmd->tmr_func, 0);
if (res != 0) {
ql_dbg(ql_dbg_tgt_tmr, vha, 0x1000b,
"qla_target(%d): tgt.tgt_ops->handle_tmr() failed: %d\n",
@@ -4158,7 +4213,7 @@ static int qlt_handle_task_mgmt(struct scsi_qla_host *vha, void *iocb)
struct atio_from_isp *a = (struct atio_from_isp *)iocb;
struct qla_hw_data *ha = vha->hw;
struct qla_tgt *tgt;
- struct qla_tgt_sess *sess;
+ struct fc_port *sess;
uint32_t lun, unpacked_lun;
int fn;
unsigned long flags;
@@ -4183,7 +4238,7 @@ static int qlt_handle_task_mgmt(struct scsi_qla_host *vha, void *iocb)
sizeof(struct atio_from_isp));
}
- if (sess->deleted == QLA_SESS_DELETION_IN_PROGRESS)
+ if (sess->deleted)
return -EFAULT;
return qlt_issue_task_mgmt(sess, unpacked_lun, fn, iocb, 0);
@@ -4191,7 +4246,7 @@ static int qlt_handle_task_mgmt(struct scsi_qla_host *vha, void *iocb)
/* ha->hardware_lock supposed to be held on entry */
static int __qlt_abort_task(struct scsi_qla_host *vha,
- struct imm_ntfy_from_isp *iocb, struct qla_tgt_sess *sess)
+ struct imm_ntfy_from_isp *iocb, struct fc_port *sess)
{
struct atio_from_isp *a = (struct atio_from_isp *)iocb;
struct qla_hw_data *ha = vha->hw;
@@ -4215,8 +4270,9 @@ static int __qlt_abort_task(struct scsi_qla_host *vha,
lun = a->u.isp24.fcp_cmnd.lun;
unpacked_lun = scsilun_to_int((struct scsi_lun *)&lun);
mcmd->reset_count = vha->hw->chip_reset;
+ mcmd->tmr_func = QLA_TGT_2G_ABORT_TASK;
- rc = ha->tgt.tgt_ops->handle_tmr(mcmd, unpacked_lun, TMR_ABORT_TASK,
+ rc = ha->tgt.tgt_ops->handle_tmr(mcmd, unpacked_lun, mcmd->tmr_func,
le16_to_cpu(iocb->u.isp2x.seq_id));
if (rc != 0) {
ql_dbg(ql_dbg_tgt_mgt, vha, 0xf060,
@@ -4234,7 +4290,7 @@ static int qlt_abort_task(struct scsi_qla_host *vha,
struct imm_ntfy_from_isp *iocb)
{
struct qla_hw_data *ha = vha->hw;
- struct qla_tgt_sess *sess;
+ struct fc_port *sess;
int loop_id;
unsigned long flags;
@@ -4257,22 +4313,20 @@ static int qlt_abort_task(struct scsi_qla_host *vha,
void qlt_logo_completion_handler(fc_port_t *fcport, int rc)
{
- if (fcport->tgt_session) {
- if (rc != MBS_COMMAND_COMPLETE) {
- ql_dbg(ql_dbg_tgt_mgt, fcport->vha, 0xf093,
- "%s: se_sess %p / sess %p from"
- " port %8phC loop_id %#04x s_id %02x:%02x:%02x"
- " LOGO failed: %#x\n",
- __func__,
- fcport->tgt_session->se_sess,
- fcport->tgt_session,
- fcport->port_name, fcport->loop_id,
- fcport->d_id.b.domain, fcport->d_id.b.area,
- fcport->d_id.b.al_pa, rc);
- }
-
- fcport->tgt_session->logout_completed = 1;
+ if (rc != MBS_COMMAND_COMPLETE) {
+ ql_dbg(ql_dbg_tgt_mgt, fcport->vha, 0xf093,
+ "%s: se_sess %p / sess %p from"
+ " port %8phC loop_id %#04x s_id %02x:%02x:%02x"
+ " LOGO failed: %#x\n",
+ __func__,
+ fcport->se_sess,
+ fcport,
+ fcport->port_name, fcport->loop_id,
+ fcport->d_id.b.domain, fcport->d_id.b.area,
+ fcport->d_id.b.al_pa, rc);
}
+
+ fcport->logout_completed = 1;
}
/*
@@ -4282,16 +4336,16 @@ void qlt_logo_completion_handler(fc_port_t *fcport, int rc)
* deletion. Returns existing session with matching wwn if present.
* Null otherwise.
*/
-static struct qla_tgt_sess *
-qlt_find_sess_invalidate_other(struct qla_tgt *tgt, uint64_t wwn,
- port_id_t port_id, uint16_t loop_id, struct qla_tgt_sess **conflict_sess)
+struct fc_port *
+qlt_find_sess_invalidate_other(scsi_qla_host_t *vha, uint64_t wwn,
+ port_id_t port_id, uint16_t loop_id, struct fc_port **conflict_sess)
{
- struct qla_tgt_sess *sess = NULL, *other_sess;
+ struct fc_port *sess = NULL, *other_sess;
uint64_t other_wwn;
*conflict_sess = NULL;
- list_for_each_entry(other_sess, &tgt->sess_list, sess_list_entry) {
+ list_for_each_entry(other_sess, &vha->vp_fcports, list) {
other_wwn = wwn_to_u64(other_sess->port_name);
@@ -4302,9 +4356,9 @@ qlt_find_sess_invalidate_other(struct qla_tgt *tgt, uint64_t wwn,
}
/* find other sess with nport_id collision */
- if (port_id.b24 == other_sess->s_id.b24) {
+ if (port_id.b24 == other_sess->d_id.b24) {
if (loop_id != other_sess->loop_id) {
- ql_dbg(ql_dbg_tgt_tmr, tgt->vha, 0x1000c,
+ ql_dbg(ql_dbg_tgt_tmr, vha, 0x1000c,
"Invalidating sess %p loop_id %d wwn %llx.\n",
other_sess, other_sess->loop_id, other_wwn);
@@ -4320,6 +4374,11 @@ qlt_find_sess_invalidate_other(struct qla_tgt *tgt, uint64_t wwn,
* Another wwn used to have our s_id/loop_id
* kill the session, but don't free the loop_id
*/
+ ql_dbg(ql_dbg_tgt_tmr, vha, 0xffff,
+ "Invalidating sess %p loop_id %d wwn %llx.\n",
+ other_sess, other_sess->loop_id, other_wwn);
+
+
other_sess->keep_nport_handle = 1;
*conflict_sess = other_sess;
qlt_schedule_sess_for_deletion(other_sess,
@@ -4329,8 +4388,9 @@ qlt_find_sess_invalidate_other(struct qla_tgt *tgt, uint64_t wwn,
}
/* find other sess with nport handle collision */
- if (loop_id == other_sess->loop_id) {
- ql_dbg(ql_dbg_tgt_tmr, tgt->vha, 0x1000d,
+ if ((loop_id == other_sess->loop_id) &&
+ (loop_id != FC_NO_LOOP_ID)) {
+ ql_dbg(ql_dbg_tgt_tmr, vha, 0x1000d,
"Invalidating sess %p loop_id %d wwn %llx.\n",
other_sess, other_sess->loop_id, other_wwn);
@@ -4358,11 +4418,21 @@ static int abort_cmds_for_s_id(struct scsi_qla_host *vha, port_id_t *s_id)
spin_lock(&vha->cmd_list_lock);
list_for_each_entry(op, &vha->qla_sess_op_cmd_list, cmd_list) {
uint32_t op_key = sid_to_key(op->atio.u.isp24.fcp_hdr.s_id);
+
if (op_key == key) {
op->aborted = true;
count++;
}
}
+
+ list_for_each_entry(op, &vha->unknown_atio_list, cmd_list) {
+ uint32_t op_key = sid_to_key(op->atio.u.isp24.fcp_hdr.s_id);
+ if (op_key == key) {
+ op->aborted = true;
+ count++;
+ }
+ }
+
list_for_each_entry(cmd, &vha->qla_cmd_list, cmd_list) {
uint32_t cmd_key = sid_to_key(cmd->atio.u.isp24.fcp_hdr.s_id);
if (cmd_key == key) {
@@ -4383,13 +4453,13 @@ static int qlt_24xx_handle_els(struct scsi_qla_host *vha,
{
struct qla_tgt *tgt = vha->vha_tgt.qla_tgt;
struct qla_hw_data *ha = vha->hw;
- struct qla_tgt_sess *sess = NULL, *conflict_sess = NULL;
+ struct fc_port *sess = NULL, *conflict_sess = NULL;
uint64_t wwn;
port_id_t port_id;
uint16_t loop_id;
uint16_t wd3_lo;
int res = 0;
- qlt_plogi_ack_t *pla;
+ struct qlt_plogi_ack_t *pla;
unsigned long flags;
wwn = wwn_to_u64(iocb->u.isp24.port_name);
@@ -4401,9 +4471,12 @@ static int qlt_24xx_handle_els(struct scsi_qla_host *vha,
loop_id = le16_to_cpu(iocb->u.isp24.nport_handle);
- ql_dbg(ql_dbg_tgt_mgt, vha, 0xf026,
- "qla_target(%d): Port ID: 0x%3phC ELS opcode: 0x%02x\n",
- vha->vp_idx, iocb->u.isp24.port_id, iocb->u.isp24.status_subcode);
+ ql_dbg(ql_dbg_disc, vha, 0xf026,
+ "qla_target(%d): Port ID: %02x:%02x:%02x ELS opcode: 0x%02x lid %d %8phC\n",
+ vha->vp_idx, iocb->u.isp24.port_id[2],
+ iocb->u.isp24.port_id[1], iocb->u.isp24.port_id[0],
+ iocb->u.isp24.status_subcode, loop_id,
+ iocb->u.isp24.port_name);
/* res = 1 means ack at the end of thread
* res = 0 means ack async/later.
@@ -4416,12 +4489,12 @@ static int qlt_24xx_handle_els(struct scsi_qla_host *vha,
if (wwn) {
spin_lock_irqsave(&tgt->ha->tgt.sess_lock, flags);
- sess = qlt_find_sess_invalidate_other(tgt, wwn,
- port_id, loop_id, &conflict_sess);
+ sess = qlt_find_sess_invalidate_other(vha, wwn,
+ port_id, loop_id, &conflict_sess);
spin_unlock_irqrestore(&tgt->ha->tgt.sess_lock, flags);
}
- if (IS_SW_RESV_ADDR(port_id) || (!sess && !conflict_sess)) {
+ if (IS_SW_RESV_ADDR(port_id)) {
res = 1;
break;
}
@@ -4429,42 +4502,66 @@ static int qlt_24xx_handle_els(struct scsi_qla_host *vha,
pla = qlt_plogi_ack_find_add(vha, &port_id, iocb);
if (!pla) {
qlt_send_term_imm_notif(vha, iocb, 1);
-
- res = 0;
break;
}
res = 0;
- if (conflict_sess)
+ if (conflict_sess) {
+ conflict_sess->login_gen++;
qlt_plogi_ack_link(vha, pla, conflict_sess,
- QLT_PLOGI_LINK_CONFLICT);
+ QLT_PLOGI_LINK_CONFLICT);
+ }
- if (!sess)
+ if (!sess) {
+ pla->ref_count++;
+ qla24xx_post_newsess_work(vha, &port_id,
+ iocb->u.isp24.port_name, pla);
+ res = 0;
break;
+ }
qlt_plogi_ack_link(vha, pla, sess, QLT_PLOGI_LINK_SAME_WWN);
- /*
- * Under normal circumstances we want to release nport handle
- * during LOGO process to avoid nport handle leaks inside FW.
- * The exception is when LOGO is done while another PLOGI with
- * the same nport handle is waiting as might be the case here.
- * Note: there is always a possibily of a race where session
- * deletion has already started for other reasons (e.g. ACL
- * removal) and now PLOGI arrives:
- * 1. if PLOGI arrived in FW after nport handle has been freed,
- * FW must have assigned this PLOGI a new/same handle and we
- * can proceed ACK'ing it as usual when session deletion
- * completes.
- * 2. if PLOGI arrived in FW before LOGO with LCF_FREE_NPORT
- * bit reached it, the handle has now been released. We'll
- * get an error when we ACK this PLOGI. Nothing will be sent
- * back to initiator. Initiator should eventually retry
- * PLOGI and situation will correct itself.
- */
- sess->keep_nport_handle = ((sess->loop_id == loop_id) &&
- (sess->s_id.b24 == port_id.b24));
- qlt_schedule_sess_for_deletion(sess, true);
+ sess->fw_login_state = DSC_LS_PLOGI_PEND;
+ sess->d_id = port_id;
+ sess->login_gen++;
+
+ switch (sess->disc_state) {
+ case DSC_DELETED:
+ qlt_plogi_ack_unref(vha, pla);
+ break;
+
+ default:
+ /*
+ * Under normal circumstances we want to release nport handle
+ * during LOGO process to avoid nport handle leaks inside FW.
+ * The exception is when LOGO is done while another PLOGI with
+ * the same nport handle is waiting as might be the case here.
+ * Note: there is always a possibily of a race where session
+ * deletion has already started for other reasons (e.g. ACL
+ * removal) and now PLOGI arrives:
+ * 1. if PLOGI arrived in FW after nport handle has been freed,
+ * FW must have assigned this PLOGI a new/same handle and we
+ * can proceed ACK'ing it as usual when session deletion
+ * completes.
+ * 2. if PLOGI arrived in FW before LOGO with LCF_FREE_NPORT
+ * bit reached it, the handle has now been released. We'll
+ * get an error when we ACK this PLOGI. Nothing will be sent
+ * back to initiator. Initiator should eventually retry
+ * PLOGI and situation will correct itself.
+ */
+ sess->keep_nport_handle = ((sess->loop_id == loop_id) &&
+ (sess->d_id.b24 == port_id.b24));
+
+ ql_dbg(ql_dbg_disc, vha, 0xffff,
+ "%s %d %8phC post del sess\n",
+ __func__, __LINE__, sess->port_name);
+
+
+ qlt_schedule_sess_for_deletion_lock(sess);
+ break;
+ }
+
break;
case ELS_PRLI:
@@ -4472,8 +4569,8 @@ static int qlt_24xx_handle_els(struct scsi_qla_host *vha,
if (wwn) {
spin_lock_irqsave(&tgt->ha->tgt.sess_lock, flags);
- sess = qlt_find_sess_invalidate_other(tgt, wwn, port_id,
- loop_id, &conflict_sess);
+ sess = qlt_find_sess_invalidate_other(vha, wwn, port_id,
+ loop_id, &conflict_sess);
spin_unlock_irqrestore(&tgt->ha->tgt.sess_lock, flags);
}
@@ -4487,7 +4584,7 @@ static int qlt_24xx_handle_els(struct scsi_qla_host *vha,
}
if (sess != NULL) {
- if (sess->deleted) {
+ if (sess->fw_login_state == DSC_LS_PLOGI_PEND) {
/*
* Impatient initiator sent PRLI before last
* PLOGI could finish. Will force him to re-try,
@@ -4511,11 +4608,16 @@ static int qlt_24xx_handle_els(struct scsi_qla_host *vha,
sess->local = 0;
sess->loop_id = loop_id;
- sess->s_id = port_id;
+ sess->d_id = port_id;
+ sess->fw_login_state = DSC_LS_PRLI_PEND;
if (wd3_lo & BIT_7)
sess->conf_compl_supported = 1;
+ if ((wd3_lo & BIT_4) == 0)
+ sess->port_type = FCT_INITIATOR;
+ else
+ sess->port_type = FCT_TARGET;
}
res = 1; /* send notify ack */
@@ -4525,15 +4627,61 @@ static int qlt_24xx_handle_els(struct scsi_qla_host *vha,
set_bit(LOCAL_LOOP_UPDATE, &vha->dpc_flags);
qla2xxx_wake_dpc(vha);
} else {
- /* todo: else - create sess here. */
- res = 1; /* send notify ack */
- }
+ if (sess) {
+ ql_dbg(ql_dbg_disc, vha, 0xffff,
+ "%s %d %8phC post nack\n",
+ __func__, __LINE__, sess->port_name);
+ qla24xx_post_nack_work(vha, sess, iocb,
+ SRB_NACK_PRLI);
+ res = 0;
+ }
+ }
break;
+
+ case ELS_TPRLO:
+ if (le16_to_cpu(iocb->u.isp24.flags) &
+ NOTIFY24XX_FLAGS_GLOBAL_TPRLO) {
+ loop_id = 0xFFFF;
+ qlt_reset(vha, iocb, QLA_TGT_NEXUS_LOSS);
+ res = 1;
+ break;
+ }
+ /* drop through */
case ELS_LOGO:
case ELS_PRLO:
+ spin_lock_irqsave(&ha->tgt.sess_lock, flags);
+ sess = qla2x00_find_fcport_by_loopid(vha, loop_id);
+ spin_unlock_irqrestore(&ha->tgt.sess_lock, flags);
+
+ if (sess) {
+ sess->login_gen++;
+ sess->fw_login_state = DSC_LS_LOGO_PEND;
+ sess->logo_ack_needed = 1;
+ memcpy(sess->iocb, iocb, IOCB_SIZE);
+ }
+
res = qlt_reset(vha, iocb, QLA_TGT_NEXUS_LOSS_SESS);
+
+ ql_dbg(ql_dbg_disc, vha, 0xffff,
+ "%s: logo %llx res %d sess %p ",
+ __func__, wwn, res, sess);
+ if (res == 0) {
+ /*
+ * cmd went upper layer, look for qlt_xmit_tm_rsp()
+ * for LOGO_ACK & sess delete
+ */
+ BUG_ON(!sess);
+ res = 0;
+ } else {
+ /* cmd did not go to upper layer. */
+ if (sess) {
+ qlt_schedule_sess_for_deletion_lock(sess);
+ res = 0;
+ }
+ /* else logo will be ack */
+ }
break;
case ELS_PDISC:
case ELS_ADISC:
@@ -4544,6 +4692,16 @@ static int qlt_24xx_handle_els(struct scsi_qla_host *vha,
0, 0, 0, 0, 0, 0);
tgt->link_reinit_iocb_pending = 0;
}
+
+ sess = qla2x00_find_fcport_by_wwpn(vha,
+ iocb->u.isp24.port_name, 1);
+ if (sess) {
+ ql_dbg(ql_dbg_disc, vha, 0xffff,
+ "sess %p lid %d|%d DS %d LS %d\n",
+ sess, sess->loop_id, loop_id,
+ sess->disc_state, sess->fw_login_state);
+ }
+
res = 1; /* send notify ack */
break;
}
@@ -4560,451 +4718,6 @@ static int qlt_24xx_handle_els(struct scsi_qla_host *vha,
return res;
}
-static int qlt_set_data_offset(struct qla_tgt_cmd *cmd, uint32_t offset)
-{
-#if 1
- /*
- * FIXME: Reject non zero SRR relative offset until we can test
- * this code properly.
- */
- pr_debug("Rejecting non zero SRR rel_offs: %u\n", offset);
- return -1;
-#else
- struct scatterlist *sg, *sgp, *sg_srr, *sg_srr_start = NULL;
- size_t first_offset = 0, rem_offset = offset, tmp = 0;
- int i, sg_srr_cnt, bufflen = 0;
-
- ql_dbg(ql_dbg_tgt, cmd->vha, 0xe023,
- "Entering qla_tgt_set_data_offset: cmd: %p, cmd->sg: %p, "
- "cmd->sg_cnt: %u, direction: %d\n",
- cmd, cmd->sg, cmd->sg_cnt, cmd->dma_data_direction);
-
- if (!cmd->sg || !cmd->sg_cnt) {
- ql_dbg(ql_dbg_tgt, cmd->vha, 0xe055,
- "Missing cmd->sg or zero cmd->sg_cnt in"
- " qla_tgt_set_data_offset\n");
- return -EINVAL;
- }
- /*
- * Walk the current cmd->sg list until we locate the new sg_srr_start
- */
- for_each_sg(cmd->sg, sg, cmd->sg_cnt, i) {
- ql_dbg(ql_dbg_tgt, cmd->vha, 0xe024,
- "sg[%d]: %p page: %p, length: %d, offset: %d\n",
- i, sg, sg_page(sg), sg->length, sg->offset);
-
- if ((sg->length + tmp) > offset) {
- first_offset = rem_offset;
- sg_srr_start = sg;
- ql_dbg(ql_dbg_tgt, cmd->vha, 0xe025,
- "Found matching sg[%d], using %p as sg_srr_start, "
- "and using first_offset: %zu\n", i, sg,
- first_offset);
- break;
- }
- tmp += sg->length;
- rem_offset -= sg->length;
- }
-
- if (!sg_srr_start) {
- ql_dbg(ql_dbg_tgt, cmd->vha, 0xe056,
- "Unable to locate sg_srr_start for offset: %u\n", offset);
- return -EINVAL;
- }
- sg_srr_cnt = (cmd->sg_cnt - i);
-
- sg_srr = kzalloc(sizeof(struct scatterlist) * sg_srr_cnt, GFP_KERNEL);
- if (!sg_srr) {
- ql_dbg(ql_dbg_tgt, cmd->vha, 0xe057,
- "Unable to allocate sgp\n");
- return -ENOMEM;
- }
- sg_init_table(sg_srr, sg_srr_cnt);
- sgp = &sg_srr[0];
- /*
- * Walk the remaining list for sg_srr_start, mapping to the newly
- * allocated sg_srr taking first_offset into account.
- */
- for_each_sg(sg_srr_start, sg, sg_srr_cnt, i) {
- if (first_offset) {
- sg_set_page(sgp, sg_page(sg),
- (sg->length - first_offset), first_offset);
- first_offset = 0;
- } else {
- sg_set_page(sgp, sg_page(sg), sg->length, 0);
- }
- bufflen += sgp->length;
-
- sgp = sg_next(sgp);
- if (!sgp)
- break;
- }
-
- cmd->sg = sg_srr;
- cmd->sg_cnt = sg_srr_cnt;
- cmd->bufflen = bufflen;
- cmd->offset += offset;
- cmd->free_sg = 1;
-
- ql_dbg(ql_dbg_tgt, cmd->vha, 0xe026, "New cmd->sg: %p\n", cmd->sg);
- ql_dbg(ql_dbg_tgt, cmd->vha, 0xe027, "New cmd->sg_cnt: %u\n",
- cmd->sg_cnt);
- ql_dbg(ql_dbg_tgt, cmd->vha, 0xe028, "New cmd->bufflen: %u\n",
- cmd->bufflen);
- ql_dbg(ql_dbg_tgt, cmd->vha, 0xe029, "New cmd->offset: %u\n",
- cmd->offset);
-
- if (cmd->sg_cnt < 0)
- BUG();
-
- if (cmd->bufflen < 0)
- BUG();
-
- return 0;
-#endif
-}
-
-static inline int qlt_srr_adjust_data(struct qla_tgt_cmd *cmd,
- uint32_t srr_rel_offs, int *xmit_type)
-{
- int res = 0, rel_offs;
-
- rel_offs = srr_rel_offs - cmd->offset;
- ql_dbg(ql_dbg_tgt_mgt, cmd->vha, 0xf027, "srr_rel_offs=%d, rel_offs=%d",
- srr_rel_offs, rel_offs);
-
- *xmit_type = QLA_TGT_XMIT_ALL;
-
- if (rel_offs < 0) {
- ql_dbg(ql_dbg_tgt_mgt, cmd->vha, 0xf062,
- "qla_target(%d): SRR rel_offs (%d) < 0",
- cmd->vha->vp_idx, rel_offs);
- res = -1;
- } else if (rel_offs == cmd->bufflen)
- *xmit_type = QLA_TGT_XMIT_STATUS;
- else if (rel_offs > 0)
- res = qlt_set_data_offset(cmd, rel_offs);
-
- return res;
-}
-
-/* No locks, thread context */
-static void qlt_handle_srr(struct scsi_qla_host *vha,
- struct qla_tgt_srr_ctio *sctio, struct qla_tgt_srr_imm *imm)
-{
- struct imm_ntfy_from_isp *ntfy =
- (struct imm_ntfy_from_isp *)&imm->imm_ntfy;
- struct qla_hw_data *ha = vha->hw;
- struct qla_tgt_cmd *cmd = sctio->cmd;
- struct se_cmd *se_cmd = &cmd->se_cmd;
- unsigned long flags;
- int xmit_type = 0, resp = 0;
- uint32_t offset;
- uint16_t srr_ui;
-
- offset = le32_to_cpu(ntfy->u.isp24.srr_rel_offs);
- srr_ui = ntfy->u.isp24.srr_ui;
-
- ql_dbg(ql_dbg_tgt_mgt, vha, 0xf028, "SRR cmd %p, srr_ui %x\n",
- cmd, srr_ui);
-
- switch (srr_ui) {
- case SRR_IU_STATUS:
- spin_lock_irqsave(&ha->hardware_lock, flags);
- qlt_send_notify_ack(vha, ntfy,
- 0, 0, 0, NOTIFY_ACK_SRR_FLAGS_ACCEPT, 0, 0);
- spin_unlock_irqrestore(&ha->hardware_lock, flags);
- xmit_type = QLA_TGT_XMIT_STATUS;
- resp = 1;
- break;
- case SRR_IU_DATA_IN:
- if (!cmd->sg || !cmd->sg_cnt) {
- ql_dbg(ql_dbg_tgt_mgt, vha, 0xf063,
- "Unable to process SRR_IU_DATA_IN due to"
- " missing cmd->sg, state: %d\n", cmd->state);
- dump_stack();
- goto out_reject;
- }
- if (se_cmd->scsi_status != 0) {
- ql_dbg(ql_dbg_tgt, vha, 0xe02a,
- "Rejecting SRR_IU_DATA_IN with non GOOD "
- "scsi_status\n");
- goto out_reject;
- }
- cmd->bufflen = se_cmd->data_length;
-
- if (qlt_has_data(cmd)) {
- if (qlt_srr_adjust_data(cmd, offset, &xmit_type) != 0)
- goto out_reject;
- spin_lock_irqsave(&ha->hardware_lock, flags);
- qlt_send_notify_ack(vha, ntfy,
- 0, 0, 0, NOTIFY_ACK_SRR_FLAGS_ACCEPT, 0, 0);
- spin_unlock_irqrestore(&ha->hardware_lock, flags);
- resp = 1;
- } else {
- ql_dbg(ql_dbg_tgt_mgt, vha, 0xf064,
- "qla_target(%d): SRR for in data for cmd without them (tag %lld, SCSI status %d), reject",
- vha->vp_idx, se_cmd->tag,
- cmd->se_cmd.scsi_status);
- goto out_reject;
- }
- break;
- case SRR_IU_DATA_OUT:
- if (!cmd->sg || !cmd->sg_cnt) {
- ql_dbg(ql_dbg_tgt_mgt, vha, 0xf065,
- "Unable to process SRR_IU_DATA_OUT due to"
- " missing cmd->sg\n");
- dump_stack();
- goto out_reject;
- }
- if (se_cmd->scsi_status != 0) {
- ql_dbg(ql_dbg_tgt, vha, 0xe02b,
- "Rejecting SRR_IU_DATA_OUT"
- " with non GOOD scsi_status\n");
- goto out_reject;
- }
- cmd->bufflen = se_cmd->data_length;
-
- if (qlt_has_data(cmd)) {
- if (qlt_srr_adjust_data(cmd, offset, &xmit_type) != 0)
- goto out_reject;
- spin_lock_irqsave(&ha->hardware_lock, flags);
- qlt_send_notify_ack(vha, ntfy,
- 0, 0, 0, NOTIFY_ACK_SRR_FLAGS_ACCEPT, 0, 0);
- spin_unlock_irqrestore(&ha->hardware_lock, flags);
- if (xmit_type & QLA_TGT_XMIT_DATA) {
- cmd->cmd_flags |= BIT_8;
- qlt_rdy_to_xfer(cmd);
- }
- } else {
- ql_dbg(ql_dbg_tgt_mgt, vha, 0xf066,
- "qla_target(%d): SRR for out data for cmd without them (tag %lld, SCSI status %d), reject",
- vha->vp_idx, se_cmd->tag, cmd->se_cmd.scsi_status);
- goto out_reject;
- }
- break;
- default:
- ql_dbg(ql_dbg_tgt_mgt, vha, 0xf067,
- "qla_target(%d): Unknown srr_ui value %x",
- vha->vp_idx, srr_ui);
- goto out_reject;
- }
-
- /* Transmit response in case of status and data-in cases */
- if (resp) {
- cmd->cmd_flags |= BIT_7;
- qlt_xmit_response(cmd, xmit_type, se_cmd->scsi_status);
- }
-
- return;
-
-out_reject:
- spin_lock_irqsave(&ha->hardware_lock, flags);
- qlt_send_notify_ack(vha, ntfy, 0, 0, 0,
- NOTIFY_ACK_SRR_FLAGS_REJECT,
- NOTIFY_ACK_SRR_REJECT_REASON_UNABLE_TO_PERFORM,
- NOTIFY_ACK_SRR_FLAGS_REJECT_EXPL_NO_EXPL);
- if (cmd->state == QLA_TGT_STATE_NEED_DATA) {
- cmd->state = QLA_TGT_STATE_DATA_IN;
- dump_stack();
- } else {
- cmd->cmd_flags |= BIT_9;
- qlt_send_term_exchange(vha, cmd, &cmd->atio, 1, 0);
- }
- spin_unlock_irqrestore(&ha->hardware_lock, flags);
-}
-
-static void qlt_reject_free_srr_imm(struct scsi_qla_host *vha,
- struct qla_tgt_srr_imm *imm, int ha_locked)
-{
- struct qla_hw_data *ha = vha->hw;
- unsigned long flags = 0;
-
-#ifndef __CHECKER__
- if (!ha_locked)
- spin_lock_irqsave(&ha->hardware_lock, flags);
-#endif
-
- qlt_send_notify_ack(vha, (void *)&imm->imm_ntfy, 0, 0, 0,
- NOTIFY_ACK_SRR_FLAGS_REJECT,
- NOTIFY_ACK_SRR_REJECT_REASON_UNABLE_TO_PERFORM,
- NOTIFY_ACK_SRR_FLAGS_REJECT_EXPL_NO_EXPL);
-
-#ifndef __CHECKER__
- if (!ha_locked)
- spin_unlock_irqrestore(&ha->hardware_lock, flags);
-#endif
-
- kfree(imm);
-}
-
-static void qlt_handle_srr_work(struct work_struct *work)
-{
- struct qla_tgt *tgt = container_of(work, struct qla_tgt, srr_work);
- struct scsi_qla_host *vha = tgt->vha;
- struct qla_tgt_srr_ctio *sctio;
- unsigned long flags;
-
- ql_dbg(ql_dbg_tgt_mgt, vha, 0xf029, "Entering SRR work (tgt %p)\n",
- tgt);
-
-restart:
- spin_lock_irqsave(&tgt->srr_lock, flags);
- list_for_each_entry(sctio, &tgt->srr_ctio_list, srr_list_entry) {
- struct qla_tgt_srr_imm *imm, *i, *ti;
- struct qla_tgt_cmd *cmd;
- struct se_cmd *se_cmd;
-
- imm = NULL;
- list_for_each_entry_safe(i, ti, &tgt->srr_imm_list,
- srr_list_entry) {
- if (i->srr_id == sctio->srr_id) {
- list_del(&i->srr_list_entry);
- if (imm) {
- ql_dbg(ql_dbg_tgt_mgt, vha, 0xf068,
- "qla_target(%d): There must be "
- "only one IMM SRR per CTIO SRR "
- "(IMM SRR %p, id %d, CTIO %p\n",
- vha->vp_idx, i, i->srr_id, sctio);
- qlt_reject_free_srr_imm(tgt->vha, i, 0);
- } else
- imm = i;
- }
- }
-
- ql_dbg(ql_dbg_tgt_mgt, vha, 0xf02a,
- "IMM SRR %p, CTIO SRR %p (id %d)\n", imm, sctio,
- sctio->srr_id);
-
- if (imm == NULL) {
- ql_dbg(ql_dbg_tgt_mgt, vha, 0xf02b,
- "Not found matching IMM for SRR CTIO (id %d)\n",
- sctio->srr_id);
- continue;
- } else
- list_del(&sctio->srr_list_entry);
-
- spin_unlock_irqrestore(&tgt->srr_lock, flags);
-
- cmd = sctio->cmd;
- /*
- * Reset qla_tgt_cmd SRR values and SGL pointer+count to follow
- * tcm_qla2xxx_write_pending() and tcm_qla2xxx_queue_data_in()
- * logic..
- */
- cmd->offset = 0;
- if (cmd->free_sg) {
- kfree(cmd->sg);
- cmd->sg = NULL;
- cmd->free_sg = 0;
- }
- se_cmd = &cmd->se_cmd;
-
- cmd->sg_cnt = se_cmd->t_data_nents;
- cmd->sg = se_cmd->t_data_sg;
-
- ql_dbg(ql_dbg_tgt_mgt, vha, 0xf02c,
- "SRR cmd %p (se_cmd %p, tag %lld, op %x), sg_cnt=%d, offset=%d",
- cmd, &cmd->se_cmd, se_cmd->tag, se_cmd->t_task_cdb ?
- se_cmd->t_task_cdb[0] : 0, cmd->sg_cnt, cmd->offset);
-
- qlt_handle_srr(vha, sctio, imm);
-
- kfree(imm);
- kfree(sctio);
- goto restart;
- }
- spin_unlock_irqrestore(&tgt->srr_lock, flags);
-}
-
-/* ha->hardware_lock supposed to be held on entry */
-static void qlt_prepare_srr_imm(struct scsi_qla_host *vha,
- struct imm_ntfy_from_isp *iocb)
-{
- struct qla_tgt_srr_imm *imm;
- struct qla_tgt *tgt = vha->vha_tgt.qla_tgt;
- struct qla_tgt_srr_ctio *sctio;
-
- tgt->imm_srr_id++;
-
- ql_log(ql_log_warn, vha, 0xf02d, "qla_target(%d): SRR received\n",
- vha->vp_idx);
-
- imm = kzalloc(sizeof(*imm), GFP_ATOMIC);
- if (imm != NULL) {
- memcpy(&imm->imm_ntfy, iocb, sizeof(imm->imm_ntfy));
-
- /* IRQ is already OFF */
- spin_lock(&tgt->srr_lock);
- imm->srr_id = tgt->imm_srr_id;
- list_add_tail(&imm->srr_list_entry,
- &tgt->srr_imm_list);
- ql_dbg(ql_dbg_tgt_mgt, vha, 0xf02e,
- "IMM NTFY SRR %p added (id %d, ui %x)\n",
- imm, imm->srr_id, iocb->u.isp24.srr_ui);
- if (tgt->imm_srr_id == tgt->ctio_srr_id) {
- int found = 0;
- list_for_each_entry(sctio, &tgt->srr_ctio_list,
- srr_list_entry) {
- if (sctio->srr_id == imm->srr_id) {
- found = 1;
- break;
- }
- }
- if (found) {
- ql_dbg(ql_dbg_tgt_mgt, vha, 0xf02f, "%s",
- "Scheduling srr work\n");
- schedule_work(&tgt->srr_work);
- } else {
- ql_dbg(ql_dbg_tgt_mgt, vha, 0xf030,
- "qla_target(%d): imm_srr_id "
- "== ctio_srr_id (%d), but there is no "
- "corresponding SRR CTIO, deleting IMM "
- "SRR %p\n", vha->vp_idx, tgt->ctio_srr_id,
- imm);
- list_del(&imm->srr_list_entry);
-
- kfree(imm);
-
- spin_unlock(&tgt->srr_lock);
- goto out_reject;
- }
- }
- spin_unlock(&tgt->srr_lock);
- } else {
- struct qla_tgt_srr_ctio *ts;
-
- ql_dbg(ql_dbg_tgt_mgt, vha, 0xf069,
- "qla_target(%d): Unable to allocate SRR IMM "
- "entry, SRR request will be rejected\n", vha->vp_idx);
-
- /* IRQ is already OFF */
- spin_lock(&tgt->srr_lock);
- list_for_each_entry_safe(sctio, ts, &tgt->srr_ctio_list,
- srr_list_entry) {
- if (sctio->srr_id == tgt->imm_srr_id) {
- ql_dbg(ql_dbg_tgt_mgt, vha, 0xf031,
- "CTIO SRR %p deleted (id %d)\n",
- sctio, sctio->srr_id);
- list_del(&sctio->srr_list_entry);
- qlt_send_term_exchange(vha, sctio->cmd,
- &sctio->cmd->atio, 1, 0);
- kfree(sctio);
- }
- }
- spin_unlock(&tgt->srr_lock);
- goto out_reject;
- }
-
- return;
-
-out_reject:
- qlt_send_notify_ack(vha, iocb, 0, 0, 0,
- NOTIFY_ACK_SRR_FLAGS_REJECT,
- NOTIFY_ACK_SRR_REJECT_REASON_UNABLE_TO_PERFORM,
- NOTIFY_ACK_SRR_FLAGS_REJECT_EXPL_NO_EXPL);
-}
-
/*
* ha->hardware_lock supposed to be held on entry. Might drop it, then reaquire
*/
@@ -5126,12 +4839,6 @@ static void qlt_handle_imm_notify(struct scsi_qla_host *vha,
if (qlt_24xx_handle_els(vha, iocb) == 0)
send_notify_ack = 0;
break;
-
- case IMM_NTFY_SRR:
- qlt_prepare_srr_imm(vha, iocb);
- send_notify_ack = 0;
- break;
-
default:
ql_dbg(ql_dbg_tgt_mgt, vha, 0xf06d,
"qla_target(%d): Received unknown immediate "
@@ -5153,7 +4860,7 @@ static int __qlt_send_busy(struct scsi_qla_host *vha,
struct ctio7_to_24xx *ctio24;
struct qla_hw_data *ha = vha->hw;
request_t *pkt;
- struct qla_tgt_sess *sess = NULL;
+ struct fc_port *sess = NULL;
unsigned long flags;
spin_lock_irqsave(&ha->tgt.sess_lock, flags);
@@ -5214,7 +4921,7 @@ qlt_alloc_qfull_cmd(struct scsi_qla_host *vha,
{
struct qla_tgt *tgt = vha->vha_tgt.qla_tgt;
struct qla_hw_data *ha = vha->hw;
- struct qla_tgt_sess *sess;
+ struct fc_port *sess;
struct se_session *se_sess;
struct qla_tgt_cmd *cmd;
int tag;
@@ -5756,6 +5463,32 @@ void qlt_async_event(uint16_t code, struct scsi_qla_host *vha,
le16_to_cpu(mailbox[2]), le16_to_cpu(mailbox[3]));
break;
+ case MBA_REJECTED_FCP_CMD:
+ ql_dbg(ql_dbg_tgt_mgt, vha, 0xffff,
+ "qla_target(%d): Async event LS_REJECT occurred "
+ "(m[0]=%x, m[1]=%x, m[2]=%x, m[3]=%x)", vha->vp_idx,
+ le16_to_cpu(mailbox[0]), le16_to_cpu(mailbox[1]),
+ le16_to_cpu(mailbox[2]), le16_to_cpu(mailbox[3]));
+
+ if (le16_to_cpu(mailbox[3]) == 1) {
+ /* exchange starvation. */
+ vha->hw->exch_starvation++;
+ if (vha->hw->exch_starvation > 5) {
+ ql_log(ql_log_warn, vha, 0xffff,
+ "Exchange starvation-. Resetting RISC\n");
+
+ vha->hw->exch_starvation = 0;
+ if (IS_P3P_TYPE(vha->hw))
+ set_bit(FCOE_CTX_RESET_NEEDED,
+ &vha->dpc_flags);
+ else
+ set_bit(ISP_ABORT_NEEDED,
+ &vha->dpc_flags);
+ qla2xxx_wake_dpc(vha);
+ }
+ }
+ break;
+
case MBA_PORT_UPDATE:
ql_dbg(ql_dbg_tgt_mgt, vha, 0xf03d,
"qla_target(%d): Port update async event %#x "
@@ -5765,14 +5498,14 @@ void qlt_async_event(uint16_t code, struct scsi_qla_host *vha,
le16_to_cpu(mailbox[2]), le16_to_cpu(mailbox[3]));
login_code = le16_to_cpu(mailbox[2]);
- if (login_code == 0x4)
+ if (login_code == 0x4) {
ql_dbg(ql_dbg_tgt_mgt, vha, 0xf03e,
"Async MB 2: Got PLOGI Complete\n");
- else if (login_code == 0x7)
+ vha->hw->exch_starvation = 0;
+ } else if (login_code == 0x7)
ql_dbg(ql_dbg_tgt_mgt, vha, 0xf03f,
"Async MB 2: Port Logged Out\n");
break;
-
default:
break;
}
@@ -5783,8 +5516,10 @@ void qlt_async_event(uint16_t code, struct scsi_qla_host *vha,
static fc_port_t *qlt_get_port_database(struct scsi_qla_host *vha,
uint16_t loop_id)
{
- fc_port_t *fcport;
+ fc_port_t *fcport, *tfcp, *del;
int rc;
+ unsigned long flags;
+ u8 newfcport = 0;
fcport = kzalloc(sizeof(*fcport), GFP_KERNEL);
if (!fcport) {
@@ -5806,18 +5541,82 @@ static fc_port_t *qlt_get_port_database(struct scsi_qla_host *vha,
return NULL;
}
+ del = NULL;
+ spin_lock_irqsave(&vha->hw->tgt.sess_lock, flags);
+ tfcp = qla2x00_find_fcport_by_wwpn(vha, fcport->port_name, 1);
+
+ if (tfcp) {
+ tfcp->d_id = fcport->d_id;
+ tfcp->port_type = fcport->port_type;
+ tfcp->supported_classes = fcport->supported_classes;
+ tfcp->flags |= fcport->flags;
+
+ del = fcport;
+ fcport = tfcp;
+ } else {
+ if (vha->hw->current_topology == ISP_CFG_F)
+ fcport->flags |= FCF_FABRIC_DEVICE;
+
+ list_add_tail(&fcport->list, &vha->vp_fcports);
+ if (!IS_SW_RESV_ADDR(fcport->d_id))
+ vha->fcport_count++;
+ fcport->login_gen++;
+ fcport->disc_state = DSC_LOGIN_COMPLETE;
+ fcport->login_succ = 1;
+ newfcport = 1;
+ }
+
+ fcport->deleted = 0;
+ spin_unlock_irqrestore(&vha->hw->tgt.sess_lock, flags);
+
+ switch (vha->host->active_mode) {
+ case MODE_INITIATOR:
+ case MODE_DUAL:
+ if (newfcport) {
+ if (!IS_IIDMA_CAPABLE(vha->hw) || !vha->hw->flags.gpsc_supported) {
+ ql_dbg(ql_dbg_disc, vha, 0xffff,
+ "%s %d %8phC post upd_fcport fcp_cnt %d\n",
+ __func__, __LINE__, fcport->port_name, vha->fcport_count);
+ qla24xx_post_upd_fcport_work(vha, fcport);
+ } else {
+ ql_dbg(ql_dbg_disc, vha, 0xffff,
+ "%s %d %8phC post gpsc fcp_cnt %d\n",
+ __func__, __LINE__, fcport->port_name, vha->fcport_count);
+ qla24xx_post_gpsc_work(vha, fcport);
+ }
+ }
+ break;
+
+ case MODE_TARGET:
+ default:
+ break;
+ }
+ if (del)
+ qla2x00_free_fcport(del);
+
return fcport;
}
/* Must be called under tgt_mutex */
-static struct qla_tgt_sess *qlt_make_local_sess(struct scsi_qla_host *vha,
+static struct fc_port *qlt_make_local_sess(struct scsi_qla_host *vha,
uint8_t *s_id)
{
- struct qla_tgt_sess *sess = NULL;
+ struct fc_port *sess = NULL;
fc_port_t *fcport = NULL;
int rc, global_resets;
uint16_t loop_id = 0;
+ if ((s_id[0] == 0xFF) && (s_id[1] == 0xFC)) {
+ /*
+ * This is Domain Controller, so it should be
+ * OK to drop SCSI commands from it.
+ */
+ ql_dbg(ql_dbg_tgt_mgt, vha, 0xf042,
+ "Unable to find initiator with S_ID %x:%x:%x",
+ s_id[0], s_id[1], s_id[2]);
+ return NULL;
+ }
+
mutex_lock(&vha->vha_tgt.tgt_mutex);
retry:
@@ -5828,21 +5627,11 @@ retry:
if (rc != 0) {
mutex_unlock(&vha->vha_tgt.tgt_mutex);
- if ((s_id[0] == 0xFF) &&
- (s_id[1] == 0xFC)) {
- /*
- * This is Domain Controller, so it should be
- * OK to drop SCSI commands from it.
- */
- ql_dbg(ql_dbg_tgt_mgt, vha, 0xf042,
- "Unable to find initiator with S_ID %x:%x:%x",
- s_id[0], s_id[1], s_id[2]);
- } else
- ql_log(ql_log_info, vha, 0xf071,
- "qla_target(%d): Unable to find "
- "initiator with S_ID %x:%x:%x",
- vha->vp_idx, s_id[0], s_id[1],
- s_id[2]);
+ ql_log(ql_log_info, vha, 0xf071,
+ "qla_target(%d): Unable to find "
+ "initiator with S_ID %x:%x:%x",
+ vha->vp_idx, s_id[0], s_id[1],
+ s_id[2]);
if (rc == -ENOENT) {
qlt_port_logo_t logo;
@@ -5875,7 +5664,6 @@ retry:
mutex_unlock(&vha->vha_tgt.tgt_mutex);
- kfree(fcport);
return sess;
}
@@ -5884,7 +5672,7 @@ static void qlt_abort_work(struct qla_tgt *tgt,
{
struct scsi_qla_host *vha = tgt->vha;
struct qla_hw_data *ha = vha->hw;
- struct qla_tgt_sess *sess = NULL;
+ struct fc_port *sess = NULL;
unsigned long flags = 0, flags2 = 0;
uint32_t be_s_id;
uint8_t s_id[3];
@@ -5911,12 +5699,18 @@ static void qlt_abort_work(struct qla_tgt *tgt,
if (!sess)
goto out_term2;
} else {
- if (sess->deleted == QLA_SESS_DELETION_IN_PROGRESS) {
+ if (sess->deleted) {
sess = NULL;
goto out_term2;
}
- kref_get(&sess->sess_kref);
+ if (!kref_get_unless_zero(&sess->sess_kref)) {
+ ql_dbg(ql_dbg_tgt_tmr, vha, 0xffff,
+ "%s: kref_get fail %8phC \n",
+ __func__, sess->port_name);
+ sess = NULL;
+ goto out_term2;
+ }
}
spin_lock_irqsave(&ha->hardware_lock, flags);
@@ -5928,8 +5722,8 @@ static void qlt_abort_work(struct qla_tgt *tgt,
if (rc != 0)
goto out_term;
spin_unlock_irqrestore(&ha->hardware_lock, flags);
-
- qlt_put_sess(sess);
+ if (sess)
+ ha->tgt.tgt_ops->put_sess(sess);
spin_unlock_irqrestore(&ha->tgt.sess_lock, flags2);
return;
@@ -5940,7 +5734,8 @@ out_term:
qlt_24xx_send_abts_resp(vha, &prm->abts, FCP_TMF_REJECTED, false);
spin_unlock_irqrestore(&ha->hardware_lock, flags);
- qlt_put_sess(sess);
+ if (sess)
+ ha->tgt.tgt_ops->put_sess(sess);
spin_unlock_irqrestore(&ha->tgt.sess_lock, flags2);
}
@@ -5950,7 +5745,7 @@ static void qlt_tmr_work(struct qla_tgt *tgt,
struct atio_from_isp *a = &prm->tm_iocb2;
struct scsi_qla_host *vha = tgt->vha;
struct qla_hw_data *ha = vha->hw;
- struct qla_tgt_sess *sess = NULL;
+ struct fc_port *sess = NULL;
unsigned long flags;
uint8_t *s_id = NULL; /* to hide compiler warnings */
int rc;
@@ -5975,12 +5770,18 @@ static void qlt_tmr_work(struct qla_tgt *tgt,
if (!sess)
goto out_term;
} else {
- if (sess->deleted == QLA_SESS_DELETION_IN_PROGRESS) {
+ if (sess->deleted) {
sess = NULL;
goto out_term;
}
- kref_get(&sess->sess_kref);
+ if (!kref_get_unless_zero(&sess->sess_kref)) {
+ ql_dbg(ql_dbg_tgt_tmr, vha, 0xffff,
+ "%s: kref_get fail %8phC\n",
+ __func__, sess->port_name);
+ sess = NULL;
+ goto out_term;
+ }
}
iocb = a;
@@ -5992,13 +5793,13 @@ static void qlt_tmr_work(struct qla_tgt *tgt,
if (rc != 0)
goto out_term;
- qlt_put_sess(sess);
+ ha->tgt.tgt_ops->put_sess(sess);
spin_unlock_irqrestore(&ha->tgt.sess_lock, flags);
return;
out_term:
qlt_send_term_exchange(vha, NULL, &prm->tm_iocb2, 1, 0);
- qlt_put_sess(sess);
+ ha->tgt.tgt_ops->put_sess(sess);
spin_unlock_irqrestore(&ha->tgt.sess_lock, flags);
}
@@ -6075,17 +5876,10 @@ int qlt_add_target(struct qla_hw_data *ha, struct scsi_qla_host *base_vha)
tgt->ha = ha;
tgt->vha = base_vha;
init_waitqueue_head(&tgt->waitQ);
- INIT_LIST_HEAD(&tgt->sess_list);
INIT_LIST_HEAD(&tgt->del_sess_list);
- INIT_DELAYED_WORK(&tgt->sess_del_work,
- (void (*)(struct work_struct *))qlt_del_sess_work_fn);
spin_lock_init(&tgt->sess_work_lock);
INIT_WORK(&tgt->sess_work, qlt_sess_work_fn);
INIT_LIST_HEAD(&tgt->sess_works_list);
- spin_lock_init(&tgt->srr_lock);
- INIT_LIST_HEAD(&tgt->srr_ctio_list);
- INIT_LIST_HEAD(&tgt->srr_imm_list);
- INIT_WORK(&tgt->srr_work, qlt_handle_srr_work);
atomic_set(&tgt->tgt_global_resets_count, 0);
base_vha->vha_tgt.qla_tgt = tgt;
@@ -6251,29 +6045,25 @@ EXPORT_SYMBOL(qlt_lport_deregister);
/* Must be called under HW lock */
static void qlt_set_mode(struct scsi_qla_host *vha)
{
- struct qla_hw_data *ha = vha->hw;
-
switch (ql2x_ini_mode) {
case QLA2XXX_INI_MODE_DISABLED:
case QLA2XXX_INI_MODE_EXCLUSIVE:
vha->host->active_mode = MODE_TARGET;
break;
case QLA2XXX_INI_MODE_ENABLED:
- vha->host->active_mode |= MODE_TARGET;
+ vha->host->active_mode = MODE_UNKNOWN;
+ break;
+ case QLA2XXX_INI_MODE_DUAL:
+ vha->host->active_mode = MODE_DUAL;
break;
default:
break;
}
-
- if (ha->tgt.ini_mode_force_reverse)
- qla_reverse_ini_mode(vha);
}
/* Must be called under HW lock */
static void qlt_clear_mode(struct scsi_qla_host *vha)
{
- struct qla_hw_data *ha = vha->hw;
-
switch (ql2x_ini_mode) {
case QLA2XXX_INI_MODE_DISABLED:
vha->host->active_mode = MODE_UNKNOWN;
@@ -6282,14 +6072,12 @@ static void qlt_clear_mode(struct scsi_qla_host *vha)
vha->host->active_mode = MODE_INITIATOR;
break;
case QLA2XXX_INI_MODE_ENABLED:
- vha->host->active_mode &= ~MODE_TARGET;
+ case QLA2XXX_INI_MODE_DUAL:
+ vha->host->active_mode = MODE_INITIATOR;
break;
default:
break;
}
-
- if (ha->tgt.ini_mode_force_reverse)
- qla_reverse_ini_mode(vha);
}
/*
@@ -6377,9 +6165,6 @@ static void qlt_disable_vha(struct scsi_qla_host *vha)
void
qlt_vport_create(struct scsi_qla_host *vha, struct qla_hw_data *ha)
{
- if (!qla_tgt_mode_enabled(vha))
- return;
-
vha->vha_tgt.qla_tgt = NULL;
mutex_init(&vha->vha_tgt.tgt_mutex);
@@ -6405,13 +6190,11 @@ qlt_rff_id(struct scsi_qla_host *vha, struct ct_sns_req *ct_req)
* FC-4 Feature bit 0 indicates target functionality to the name server.
*/
if (qla_tgt_mode_enabled(vha)) {
- if (qla_ini_mode_enabled(vha))
- ct_req->req.rff_id.fc4_feature = BIT_0 | BIT_1;
- else
- ct_req->req.rff_id.fc4_feature = BIT_0;
+ ct_req->req.rff_id.fc4_feature = BIT_0;
} else if (qla_ini_mode_enabled(vha)) {
ct_req->req.rff_id.fc4_feature = BIT_1;
- }
+ } else if (qla_dual_mode_enabled(vha))
+ ct_req->req.rff_id.fc4_feature = BIT_0 | BIT_1;
}
/*
@@ -6430,7 +6213,7 @@ qlt_init_atio_q_entries(struct scsi_qla_host *vha)
uint16_t cnt;
struct atio_from_isp *pkt = (struct atio_from_isp *)ha->tgt.atio_ring;
- if (!qla_tgt_mode_enabled(vha))
+ if (qla_ini_mode_enabled(vha))
return;
for (cnt = 0; cnt < ha->tgt.atio_q_length; cnt++) {
@@ -6523,8 +6306,10 @@ void
qlt_24xx_config_nvram_stage1(struct scsi_qla_host *vha, struct nvram_24xx *nv)
{
struct qla_hw_data *ha = vha->hw;
+ u32 tmp;
+ u16 t;
- if (qla_tgt_mode_enabled(vha)) {
+ if (qla_tgt_mode_enabled(vha) || qla_dual_mode_enabled(vha)) {
if (!ha->tgt.saved_set) {
/* We save only once */
ha->tgt.saved_exchange_count = nv->exchange_count;
@@ -6537,13 +6322,30 @@ qlt_24xx_config_nvram_stage1(struct scsi_qla_host *vha, struct nvram_24xx *nv)
ha->tgt.saved_set = 1;
}
- nv->exchange_count = cpu_to_le16(0xFFFF);
+ if (qla_tgt_mode_enabled(vha)) {
+ nv->exchange_count = cpu_to_le16(0xFFFF);
+ } else { /* dual */
+ if (ql_dm_tgt_ex_pct > 100) {
+ ql_dm_tgt_ex_pct = 50;
+ } else if (ql_dm_tgt_ex_pct == 100) {
+ /* leave some for FW */
+ ql_dm_tgt_ex_pct = 95;
+ }
+
+ tmp = ha->orig_fw_xcb_count * ql_dm_tgt_ex_pct;
+ tmp = tmp/100;
+ if (tmp > 0xffff)
+ tmp = 0xffff;
+
+ t = tmp & 0xffff;
+ nv->exchange_count = cpu_to_le16(t);
+ }
/* Enable target mode */
nv->firmware_options_1 |= cpu_to_le32(BIT_4);
/* Disable ini mode, if requested */
- if (!qla_ini_mode_enabled(vha))
+ if (qla_tgt_mode_enabled(vha))
nv->firmware_options_1 |= cpu_to_le32(BIT_5);
/* Disable Full Login after LIP */
@@ -6622,11 +6424,13 @@ void
qlt_81xx_config_nvram_stage1(struct scsi_qla_host *vha, struct nvram_81xx *nv)
{
struct qla_hw_data *ha = vha->hw;
+ u32 tmp;
+ u16 t;
if (!QLA_TGT_MODE_ENABLED())
return;
- if (qla_tgt_mode_enabled(vha)) {
+ if (qla_tgt_mode_enabled(vha) || qla_dual_mode_enabled(vha)) {
if (!ha->tgt.saved_set) {
/* We save only once */
ha->tgt.saved_exchange_count = nv->exchange_count;
@@ -6639,13 +6443,29 @@ qlt_81xx_config_nvram_stage1(struct scsi_qla_host *vha, struct nvram_81xx *nv)
ha->tgt.saved_set = 1;
}
- nv->exchange_count = cpu_to_le16(0xFFFF);
+ if (qla_tgt_mode_enabled(vha)) {
+ nv->exchange_count = cpu_to_le16(0xFFFF);
+ } else { /* dual */
+ if (ql_dm_tgt_ex_pct > 100) {
+ ql_dm_tgt_ex_pct = 50;
+ } else if (ql_dm_tgt_ex_pct == 100) {
+ /* leave some for FW */
+ ql_dm_tgt_ex_pct = 95;
+ }
+
+ tmp = ha->orig_fw_xcb_count * ql_dm_tgt_ex_pct;
+ tmp = tmp/100;
+ if (tmp > 0xffff)
+ tmp = 0xffff;
+ t = tmp & 0xffff;
+ nv->exchange_count = cpu_to_le16(t);
+ }
/* Enable target mode */
nv->firmware_options_1 |= cpu_to_le32(BIT_4);
/* Disable ini mode, if requested */
- if (!qla_ini_mode_enabled(vha))
+ if (qla_tgt_mode_enabled(vha))
nv->firmware_options_1 |= cpu_to_le32(BIT_5);
/* Disable Full Login after LIP */
nv->firmware_options_1 &= cpu_to_le32(~BIT_13);
@@ -6749,10 +6569,12 @@ void
qlt_modify_vp_config(struct scsi_qla_host *vha,
struct vp_config_entry_24xx *vpmod)
{
- if (qla_tgt_mode_enabled(vha))
+ /* enable target mode. Bit5 = 1 => disable */
+ if (qla_tgt_mode_enabled(vha) || qla_dual_mode_enabled(vha))
vpmod->options_idx1 &= ~BIT_5;
- /* Disable ini mode, if requested */
- if (!qla_ini_mode_enabled(vha))
+
+ /* Disable ini mode, if requested. bit4 = 1 => disable */
+ if (qla_tgt_mode_enabled(vha))
vpmod->options_idx1 &= ~BIT_4;
}
@@ -6772,6 +6594,11 @@ qlt_probe_one_stage1(struct scsi_qla_host *base_vha, struct qla_hw_data *ha)
mutex_init(&base_vha->vha_tgt.tgt_mutex);
mutex_init(&base_vha->vha_tgt.tgt_host_action_mutex);
+
+ INIT_LIST_HEAD(&base_vha->unknown_atio_list);
+ INIT_DELAYED_WORK(&base_vha->unknown_atio_work,
+ qlt_unknown_atio_work_fn);
+
qlt_clear_mode(base_vha);
}
@@ -6906,6 +6733,8 @@ static int __init qlt_parse_ini_mode(void)
ql2x_ini_mode = QLA2XXX_INI_MODE_DISABLED;
else if (strcasecmp(qlini_mode, QLA2XXX_INI_MODE_STR_ENABLED) == 0)
ql2x_ini_mode = QLA2XXX_INI_MODE_ENABLED;
+ else if (strcasecmp(qlini_mode, QLA2XXX_INI_MODE_STR_DUAL) == 0)
+ ql2x_ini_mode = QLA2XXX_INI_MODE_DUAL;
else
return false;
@@ -6935,9 +6764,8 @@ int __init qlt_init(void)
}
qla_tgt_plogi_cachep = kmem_cache_create("qla_tgt_plogi_cachep",
- sizeof(qlt_plogi_ack_t),
- __alignof__(qlt_plogi_ack_t),
- 0, NULL);
+ sizeof(struct qlt_plogi_ack_t), __alignof__(struct qlt_plogi_ack_t),
+ 0, NULL);
if (!qla_tgt_plogi_cachep) {
ql_log(ql_log_fatal, NULL, 0xe06d,
diff --git a/drivers/scsi/qla2xxx/qla_target.h b/drivers/scsi/qla2xxx/qla_target.h
index 0824a8164a24..a7f90dcaae37 100644
--- a/drivers/scsi/qla2xxx/qla_target.h
+++ b/drivers/scsi/qla2xxx/qla_target.h
@@ -45,10 +45,12 @@
#define QLA2XXX_INI_MODE_STR_EXCLUSIVE "exclusive"
#define QLA2XXX_INI_MODE_STR_DISABLED "disabled"
#define QLA2XXX_INI_MODE_STR_ENABLED "enabled"
+#define QLA2XXX_INI_MODE_STR_DUAL "dual"
#define QLA2XXX_INI_MODE_EXCLUSIVE 0
#define QLA2XXX_INI_MODE_DISABLED 1
#define QLA2XXX_INI_MODE_ENABLED 2
+#define QLA2XXX_INI_MODE_DUAL 3
#define QLA2XXX_COMMAND_COUNT_INIT 250
#define QLA2XXX_IMMED_NOTIFY_COUNT_INIT 250
@@ -118,84 +120,6 @@
? le16_to_cpu((iocb)->u.isp2x.target.extended) \
: (uint16_t)(iocb)->u.isp2x.target.id.standard)
-#ifndef IMMED_NOTIFY_TYPE
-#define IMMED_NOTIFY_TYPE 0x0D /* Immediate notify entry. */
-/*
- * ISP queue - immediate notify entry structure definition.
- * This is sent by the ISP to the Target driver.
- * This IOCB would have report of events sent by the
- * initiator, that needs to be handled by the target
- * driver immediately.
- */
-struct imm_ntfy_from_isp {
- uint8_t entry_type; /* Entry type. */
- uint8_t entry_count; /* Entry count. */
- uint8_t sys_define; /* System defined. */
- uint8_t entry_status; /* Entry Status. */
- union {
- struct {
- uint32_t sys_define_2; /* System defined. */
- target_id_t target;
- uint16_t lun;
- uint8_t target_id;
- uint8_t reserved_1;
- uint16_t status_modifier;
- uint16_t status;
- uint16_t task_flags;
- uint16_t seq_id;
- uint16_t srr_rx_id;
- uint32_t srr_rel_offs;
- uint16_t srr_ui;
-#define SRR_IU_DATA_IN 0x1
-#define SRR_IU_DATA_OUT 0x5
-#define SRR_IU_STATUS 0x7
- uint16_t srr_ox_id;
- uint8_t reserved_2[28];
- } isp2x;
- struct {
- uint32_t reserved;
- uint16_t nport_handle;
- uint16_t reserved_2;
- uint16_t flags;
-#define NOTIFY24XX_FLAGS_GLOBAL_TPRLO BIT_1
-#define NOTIFY24XX_FLAGS_PUREX_IOCB BIT_0
- uint16_t srr_rx_id;
- uint16_t status;
- uint8_t status_subcode;
- uint8_t fw_handle;
- uint32_t exchange_address;
- uint32_t srr_rel_offs;
- uint16_t srr_ui;
- uint16_t srr_ox_id;
- union {
- struct {
- uint8_t node_name[8];
- } plogi; /* PLOGI/ADISC/PDISC */
- struct {
- /* PRLI word 3 bit 0-15 */
- uint16_t wd3_lo;
- uint8_t resv0[6];
- } prli;
- struct {
- uint8_t port_id[3];
- uint8_t resv1;
- uint16_t nport_handle;
- uint16_t resv2;
- } req_els;
- } u;
- uint8_t port_name[8];
- uint8_t resv3[3];
- uint8_t vp_index;
- uint32_t reserved_5;
- uint8_t port_id[3];
- uint8_t reserved_6;
- } isp24;
- } u;
- uint16_t reserved_7;
- uint16_t ox_id;
-} __packed;
-#endif
-
#ifndef NOTIFY_ACK_TYPE
#define NOTIFY_ACK_TYPE 0x0E /* Notify acknowledge entry. */
/*
@@ -731,7 +655,7 @@ struct abts_resp_from_24xx_fw {
\********************************************************************/
struct qla_tgt_mgmt_cmd;
-struct qla_tgt_sess;
+struct fc_port;
/*
* This structure provides a template of function calls that the
@@ -744,21 +668,22 @@ struct qla_tgt_func_tmpl {
unsigned char *, uint32_t, int, int, int);
void (*handle_data)(struct qla_tgt_cmd *);
void (*handle_dif_err)(struct qla_tgt_cmd *);
- int (*handle_tmr)(struct qla_tgt_mgmt_cmd *, uint32_t, uint8_t,
+ int (*handle_tmr)(struct qla_tgt_mgmt_cmd *, uint32_t, uint16_t,
uint32_t);
void (*free_cmd)(struct qla_tgt_cmd *);
void (*free_mcmd)(struct qla_tgt_mgmt_cmd *);
- void (*free_session)(struct qla_tgt_sess *);
+ void (*free_session)(struct fc_port *);
int (*check_initiator_node_acl)(struct scsi_qla_host *, unsigned char *,
- struct qla_tgt_sess *);
- void (*update_sess)(struct qla_tgt_sess *, port_id_t, uint16_t, bool);
- struct qla_tgt_sess *(*find_sess_by_loop_id)(struct scsi_qla_host *,
+ struct fc_port *);
+ void (*update_sess)(struct fc_port *, port_id_t, uint16_t, bool);
+ struct fc_port *(*find_sess_by_loop_id)(struct scsi_qla_host *,
const uint16_t);
- struct qla_tgt_sess *(*find_sess_by_s_id)(struct scsi_qla_host *,
+ struct fc_port *(*find_sess_by_s_id)(struct scsi_qla_host *,
const uint8_t *);
- void (*clear_nacl_from_fcport_map)(struct qla_tgt_sess *);
- void (*shutdown_sess)(struct qla_tgt_sess *);
+ void (*clear_nacl_from_fcport_map)(struct fc_port *);
+ void (*put_sess)(struct fc_port *);
+ void (*shutdown_sess)(struct fc_port *);
};
int qla2x00_wait_for_hba_online(struct scsi_qla_host *);
@@ -795,6 +720,8 @@ int qla2x00_wait_for_hba_online(struct scsi_qla_host *);
#define QLA_TGT_ABORT_ALL 0xFFFE
#define QLA_TGT_NEXUS_LOSS_SESS 0xFFFD
#define QLA_TGT_NEXUS_LOSS 0xFFFC
+#define QLA_TGT_ABTS 0xFFFB
+#define QLA_TGT_2G_ABORT_TASK 0xFFFA
/* Notify Acknowledge flags */
#define NOTIFY_ACK_RES_COUNT BIT_8
@@ -872,12 +799,8 @@ struct qla_tgt {
/* Count of sessions refering qla_tgt. Protected by hardware_lock. */
int sess_count;
- /* Protected by hardware_lock. Addition also protected by tgt_mutex. */
- struct list_head sess_list;
-
/* Protected by hardware_lock */
struct list_head del_sess_list;
- struct delayed_work sess_del_work;
spinlock_t sess_work_lock;
struct list_head sess_works_list;
@@ -888,16 +811,7 @@ struct qla_tgt {
int notify_ack_expected;
int abts_resp_expected;
int modify_lun_expected;
-
- int ctio_srr_id;
- int imm_srr_id;
- spinlock_t srr_lock;
- struct list_head srr_ctio_list;
- struct list_head srr_imm_list;
- struct work_struct srr_work;
-
atomic_t tgt_global_resets_count;
-
struct list_head tgt_list_entry;
};
@@ -910,92 +824,32 @@ struct qla_tgt_sess_op {
bool aborted;
};
-enum qla_sess_deletion {
- QLA_SESS_DELETION_NONE = 0,
- QLA_SESS_DELETION_PENDING = 1, /* hopefully we can get rid of
- * this one */
- QLA_SESS_DELETION_IN_PROGRESS = 2,
-};
-
-typedef enum {
- QLT_PLOGI_LINK_SAME_WWN,
- QLT_PLOGI_LINK_CONFLICT,
- QLT_PLOGI_LINK_MAX
-} qlt_plogi_link_t;
-
-typedef struct {
- struct list_head list;
- struct imm_ntfy_from_isp iocb;
- port_id_t id;
- int ref_count;
-} qlt_plogi_ack_t;
-
-/*
- * Equivilant to IT Nexus (Initiator-Target)
- */
-struct qla_tgt_sess {
- uint16_t loop_id;
- port_id_t s_id;
-
- unsigned int conf_compl_supported:1;
- unsigned int deleted:2;
- unsigned int local:1;
- unsigned int logout_on_delete:1;
- unsigned int keep_nport_handle:1;
- unsigned int send_els_logo:1;
-
- unsigned char logout_completed;
-
- int generation;
-
- struct se_session *se_sess;
- struct kref sess_kref;
- struct scsi_qla_host *vha;
- struct qla_tgt *tgt;
-
- struct list_head sess_list_entry;
- unsigned long expires;
- struct list_head del_list_entry;
-
- uint8_t port_name[WWN_SIZE];
- struct work_struct free_work;
-
- qlt_plogi_ack_t *plogi_link[QLT_PLOGI_LINK_MAX];
+enum trace_flags {
+ TRC_NEW_CMD = BIT_0,
+ TRC_DO_WORK = BIT_1,
+ TRC_DO_WORK_ERR = BIT_2,
+ TRC_XFR_RDY = BIT_3,
+ TRC_XMIT_DATA = BIT_4,
+ TRC_XMIT_STATUS = BIT_5,
+ TRC_SRR_RSP = BIT_6,
+ TRC_SRR_XRDY = BIT_7,
+ TRC_SRR_TERM = BIT_8,
+ TRC_SRR_CTIO = BIT_9,
+ TRC_FLUSH = BIT_10,
+ TRC_CTIO_ERR = BIT_11,
+ TRC_CTIO_DONE = BIT_12,
+ TRC_CTIO_ABORTED = BIT_13,
+ TRC_CTIO_STRANGE= BIT_14,
+ TRC_CMD_DONE = BIT_15,
+ TRC_CMD_CHK_STOP = BIT_16,
+ TRC_CMD_FREE = BIT_17,
+ TRC_DATA_IN = BIT_18,
+ TRC_ABORT = BIT_19,
};
-typedef enum {
- /*
- * BIT_0 - Atio Arrival / schedule to work
- * BIT_1 - qlt_do_work
- * BIT_2 - qlt_do work failed
- * BIT_3 - xfer rdy/tcm_qla2xxx_write_pending
- * BIT_4 - read respond/tcm_qla2xx_queue_data_in
- * BIT_5 - status respond / tcm_qla2xx_queue_status
- * BIT_6 - tcm request to abort/Term exchange.
- * pre_xmit_response->qlt_send_term_exchange
- * BIT_7 - SRR received (qlt_handle_srr->qlt_xmit_response)
- * BIT_8 - SRR received (qlt_handle_srr->qlt_rdy_to_xfer)
- * BIT_9 - SRR received (qla_handle_srr->qlt_send_term_exchange)
- * BIT_10 - Data in - hanlde_data->tcm_qla2xxx_handle_data
-
- * BIT_12 - good completion - qlt_ctio_do_completion -->free_cmd
- * BIT_13 - Bad completion -
- * qlt_ctio_do_completion --> qlt_term_ctio_exchange
- * BIT_14 - Back end data received/sent.
- * BIT_15 - SRR prepare ctio
- * BIT_16 - complete free
- * BIT_17 - flush - qlt_abort_cmd_on_host_reset
- * BIT_18 - completion w/abort status
- * BIT_19 - completion w/unknown status
- * BIT_20 - tcm_qla2xxx_free_cmd
- */
- CMD_FLAG_DATA_WORK = BIT_11,
- CMD_FLAG_DATA_WORK_FREE = BIT_21,
-} cmd_flags_t;
-
struct qla_tgt_cmd {
struct se_cmd se_cmd;
- struct qla_tgt_sess *sess;
+ struct fc_port *sess;
int state;
struct work_struct free_work;
struct work_struct work;
@@ -1014,6 +868,8 @@ struct qla_tgt_cmd {
unsigned int cmd_sent_to_fw:1;
unsigned int cmd_in_wq:1;
unsigned int aborted:1;
+ unsigned int data_work:1;
+ unsigned int data_work_free:1;
struct scatterlist *sg; /* cmd data buffer SG vector */
int sg_cnt; /* SG segments count */
@@ -1038,7 +894,7 @@ struct qla_tgt_cmd {
uint64_t jiffies_at_alloc;
uint64_t jiffies_at_free;
- cmd_flags_t cmd_flags;
+ enum trace_flags trc_flags;
};
struct qla_tgt_sess_work_param {
@@ -1056,9 +912,9 @@ struct qla_tgt_sess_work_param {
};
struct qla_tgt_mgmt_cmd {
- uint8_t tmr_func;
+ uint16_t tmr_func;
uint8_t fc_tm_rsp;
- struct qla_tgt_sess *sess;
+ struct fc_port *sess;
struct se_cmd se_cmd;
struct work_struct free_work;
unsigned int flags;
@@ -1090,18 +946,6 @@ struct qla_tgt_prm {
uint16_t tot_dsds;
};
-struct qla_tgt_srr_imm {
- struct list_head srr_list_entry;
- int srr_id;
- struct imm_ntfy_from_isp imm_ntfy;
-};
-
-struct qla_tgt_srr_ctio {
- struct list_head srr_list_entry;
- int srr_id;
- struct qla_tgt_cmd *cmd;
-};
-
/* Check for Switch reserved address */
#define IS_SW_RESV_ADDR(_s_id) \
((_s_id.b.domain == 0xff) && (_s_id.b.area == 0xfc))
@@ -1121,7 +965,7 @@ extern int qlt_remove_target(struct qla_hw_data *, struct scsi_qla_host *);
extern int qlt_lport_register(void *, u64, u64, u64,
int (*callback)(struct scsi_qla_host *, void *, u64, u64));
extern void qlt_lport_deregister(struct scsi_qla_host *);
-void qlt_put_sess(struct qla_tgt_sess *sess);
+extern void qlt_unreg_sess(struct fc_port *);
extern void qlt_fc_port_added(struct scsi_qla_host *, fc_port_t *);
extern void qlt_fc_port_deleted(struct scsi_qla_host *, fc_port_t *, int);
extern int __init qlt_init(void);
@@ -1133,24 +977,22 @@ extern void qlt_update_vp_map(struct scsi_qla_host *, int);
* is not set. Right now, ha value is ignored.
*/
#define QLA_TGT_MODE_ENABLED() (ql2x_ini_mode != QLA2XXX_INI_MODE_ENABLED)
+
extern int ql2x_ini_mode;
static inline bool qla_tgt_mode_enabled(struct scsi_qla_host *ha)
{
- return ha->host->active_mode & MODE_TARGET;
+ return ha->host->active_mode == MODE_TARGET;
}
static inline bool qla_ini_mode_enabled(struct scsi_qla_host *ha)
{
- return ha->host->active_mode & MODE_INITIATOR;
+ return ha->host->active_mode == MODE_INITIATOR;
}
-static inline void qla_reverse_ini_mode(struct scsi_qla_host *ha)
+static inline bool qla_dual_mode_enabled(struct scsi_qla_host *ha)
{
- if (ha->host->active_mode & MODE_INITIATOR)
- ha->host->active_mode &= ~MODE_INITIATOR;
- else
- ha->host->active_mode |= MODE_INITIATOR;
+ return (ha->host->active_mode == MODE_DUAL);
}
static inline uint32_t sid_to_key(const uint8_t *s_id)
diff --git a/drivers/scsi/qla2xxx/tcm_qla2xxx.c b/drivers/scsi/qla2xxx/tcm_qla2xxx.c
index 3084983c1287..c2f8c3580880 100644
--- a/drivers/scsi/qla2xxx/tcm_qla2xxx.c
+++ b/drivers/scsi/qla2xxx/tcm_qla2xxx.c
@@ -282,10 +282,10 @@ static void tcm_qla2xxx_complete_free(struct work_struct *work)
cmd->cmd_in_wq = 0;
- WARN_ON(cmd->cmd_flags & BIT_16);
+ WARN_ON(cmd->trc_flags & TRC_CMD_FREE);
cmd->vha->tgt_counters.qla_core_ret_sta_ctio++;
- cmd->cmd_flags |= BIT_16;
+ cmd->trc_flags |= TRC_CMD_FREE;
transport_generic_free_cmd(&cmd->se_cmd, 0);
}
@@ -299,8 +299,8 @@ static void tcm_qla2xxx_free_cmd(struct qla_tgt_cmd *cmd)
cmd->vha->tgt_counters.core_qla_free_cmd++;
cmd->cmd_in_wq = 1;
- BUG_ON(cmd->cmd_flags & BIT_20);
- cmd->cmd_flags |= BIT_20;
+ WARN_ON(cmd->trc_flags & TRC_CMD_DONE);
+ cmd->trc_flags |= TRC_CMD_DONE;
INIT_WORK(&cmd->work, tcm_qla2xxx_complete_free);
queue_work_on(smp_processor_id(), tcm_qla2xxx_free_wq, &cmd->work);
@@ -315,7 +315,7 @@ static int tcm_qla2xxx_check_stop_free(struct se_cmd *se_cmd)
if ((se_cmd->se_cmd_flags & SCF_SCSI_TMR_CDB) == 0) {
cmd = container_of(se_cmd, struct qla_tgt_cmd, se_cmd);
- cmd->cmd_flags |= BIT_14;
+ cmd->trc_flags |= TRC_CMD_CHK_STOP;
}
return target_put_sess_cmd(se_cmd);
@@ -339,9 +339,26 @@ static void tcm_qla2xxx_release_cmd(struct se_cmd *se_cmd)
qlt_free_cmd(cmd);
}
+static void tcm_qla2xxx_release_session(struct kref *kref)
+{
+ struct fc_port *sess = container_of(kref,
+ struct fc_port, sess_kref);
+
+ qlt_unreg_sess(sess);
+}
+
+static void tcm_qla2xxx_put_sess(struct fc_port *sess)
+{
+ if (!sess)
+ return;
+
+ assert_spin_locked(&sess->vha->hw->tgt.sess_lock);
+ kref_put(&sess->sess_kref, tcm_qla2xxx_release_session);
+}
+
static void tcm_qla2xxx_close_session(struct se_session *se_sess)
{
- struct qla_tgt_sess *sess = se_sess->fabric_sess_ptr;
+ struct fc_port *sess = se_sess->fabric_sess_ptr;
struct scsi_qla_host *vha;
unsigned long flags;
@@ -350,7 +367,7 @@ static void tcm_qla2xxx_close_session(struct se_session *se_sess)
spin_lock_irqsave(&vha->hw->tgt.sess_lock, flags);
target_sess_cmd_list_set_waiting(se_sess);
- qlt_put_sess(sess);
+ tcm_qla2xxx_put_sess(sess);
spin_unlock_irqrestore(&vha->hw->tgt.sess_lock, flags);
}
@@ -377,7 +394,7 @@ static int tcm_qla2xxx_write_pending(struct se_cmd *se_cmd)
cmd->se_cmd.se_cmd_flags);
return 0;
}
- cmd->cmd_flags |= BIT_3;
+ cmd->trc_flags |= TRC_XFR_RDY;
cmd->bufflen = se_cmd->data_length;
cmd->dma_data_direction = target_reverse_dma_direction(se_cmd);
@@ -441,7 +458,7 @@ static int tcm_qla2xxx_handle_cmd(scsi_qla_host_t *vha, struct qla_tgt_cmd *cmd,
{
struct se_cmd *se_cmd = &cmd->se_cmd;
struct se_session *se_sess;
- struct qla_tgt_sess *sess;
+ struct fc_port *sess;
#ifdef CONFIG_TCM_QLA2XXX_DEBUG
struct se_portal_group *se_tpg;
struct tcm_qla2xxx_tpg *tpg;
@@ -456,7 +473,7 @@ static int tcm_qla2xxx_handle_cmd(scsi_qla_host_t *vha, struct qla_tgt_cmd *cmd,
sess = cmd->sess;
if (!sess) {
- pr_err("Unable to locate struct qla_tgt_sess from qla_tgt_cmd\n");
+ pr_err("Unable to locate struct fc_port from qla_tgt_cmd\n");
return -EINVAL;
}
@@ -493,9 +510,9 @@ static void tcm_qla2xxx_handle_data_work(struct work_struct *work)
cmd->cmd_in_wq = 0;
spin_lock_irqsave(&cmd->cmd_lock, flags);
- cmd->cmd_flags |= CMD_FLAG_DATA_WORK;
+ cmd->data_work = 1;
if (cmd->aborted) {
- cmd->cmd_flags |= CMD_FLAG_DATA_WORK_FREE;
+ cmd->data_work_free = 1;
spin_unlock_irqrestore(&cmd->cmd_lock, flags);
tcm_qla2xxx_free_cmd(cmd);
@@ -532,7 +549,7 @@ static void tcm_qla2xxx_handle_data_work(struct work_struct *work)
*/
static void tcm_qla2xxx_handle_data(struct qla_tgt_cmd *cmd)
{
- cmd->cmd_flags |= BIT_10;
+ cmd->trc_flags |= TRC_DATA_IN;
cmd->cmd_in_wq = 1;
INIT_WORK(&cmd->work, tcm_qla2xxx_handle_data_work);
queue_work_on(smp_processor_id(), tcm_qla2xxx_free_wq, &cmd->work);
@@ -563,13 +580,49 @@ static void tcm_qla2xxx_handle_dif_err(struct qla_tgt_cmd *cmd)
* Called from qla_target.c:qlt_issue_task_mgmt()
*/
static int tcm_qla2xxx_handle_tmr(struct qla_tgt_mgmt_cmd *mcmd, uint32_t lun,
- uint8_t tmr_func, uint32_t tag)
+ uint16_t tmr_func, uint32_t tag)
{
- struct qla_tgt_sess *sess = mcmd->sess;
+ struct fc_port *sess = mcmd->sess;
struct se_cmd *se_cmd = &mcmd->se_cmd;
+ int transl_tmr_func = 0;
+
+ switch (tmr_func) {
+ case QLA_TGT_ABTS:
+ pr_debug("%ld: ABTS received\n", sess->vha->host_no);
+ transl_tmr_func = TMR_ABORT_TASK;
+ break;
+ case QLA_TGT_2G_ABORT_TASK:
+ pr_debug("%ld: 2G Abort Task received\n", sess->vha->host_no);
+ transl_tmr_func = TMR_ABORT_TASK;
+ break;
+ case QLA_TGT_CLEAR_ACA:
+ pr_debug("%ld: CLEAR_ACA received\n", sess->vha->host_no);
+ transl_tmr_func = TMR_CLEAR_ACA;
+ break;
+ case QLA_TGT_TARGET_RESET:
+ pr_debug("%ld: TARGET_RESET received\n", sess->vha->host_no);
+ transl_tmr_func = TMR_TARGET_WARM_RESET;
+ break;
+ case QLA_TGT_LUN_RESET:
+ pr_debug("%ld: LUN_RESET received\n", sess->vha->host_no);
+ transl_tmr_func = TMR_LUN_RESET;
+ break;
+ case QLA_TGT_CLEAR_TS:
+ pr_debug("%ld: CLEAR_TS received\n", sess->vha->host_no);
+ transl_tmr_func = TMR_CLEAR_TASK_SET;
+ break;
+ case QLA_TGT_ABORT_TS:
+ pr_debug("%ld: ABORT_TS received\n", sess->vha->host_no);
+ transl_tmr_func = TMR_ABORT_TASK_SET;
+ break;
+ default:
+ pr_debug("%ld: Unknown task mgmt fn 0x%x\n",
+ sess->vha->host_no, tmr_func);
+ return -ENOSYS;
+ }
return target_submit_tmr(se_cmd, sess->se_sess, NULL, lun, mcmd,
- tmr_func, GFP_ATOMIC, tag, TARGET_SCF_ACK_KREF);
+ transl_tmr_func, GFP_ATOMIC, tag, TARGET_SCF_ACK_KREF);
}
static int tcm_qla2xxx_queue_data_in(struct se_cmd *se_cmd)
@@ -591,7 +644,7 @@ static int tcm_qla2xxx_queue_data_in(struct se_cmd *se_cmd)
return 0;
}
- cmd->cmd_flags |= BIT_4;
+ cmd->trc_flags |= TRC_XMIT_DATA;
cmd->bufflen = se_cmd->data_length;
cmd->dma_data_direction = target_reverse_dma_direction(se_cmd);
@@ -622,11 +675,11 @@ static int tcm_qla2xxx_queue_status(struct se_cmd *se_cmd)
cmd->sg_cnt = 0;
cmd->offset = 0;
cmd->dma_data_direction = target_reverse_dma_direction(se_cmd);
- if (cmd->cmd_flags & BIT_5) {
- pr_crit("Bit_5 already set for cmd = %p.\n", cmd);
+ if (cmd->trc_flags & TRC_XMIT_STATUS) {
+ pr_crit("Multiple calls for status = %p.\n", cmd);
dump_stack();
}
- cmd->cmd_flags |= BIT_5;
+ cmd->trc_flags |= TRC_XMIT_STATUS;
if (se_cmd->data_direction == DMA_FROM_DEVICE) {
/*
@@ -682,10 +735,7 @@ static void tcm_qla2xxx_queue_tm_rsp(struct se_cmd *se_cmd)
qlt_xmit_tm_rsp(mcmd);
}
-
-#define DATA_WORK_NOT_FREE(_flags) \
- (( _flags & (CMD_FLAG_DATA_WORK|CMD_FLAG_DATA_WORK_FREE)) == \
- CMD_FLAG_DATA_WORK)
+#define DATA_WORK_NOT_FREE(_cmd) (_cmd->data_work && !_cmd->data_work_free)
static void tcm_qla2xxx_aborted_task(struct se_cmd *se_cmd)
{
struct qla_tgt_cmd *cmd = container_of(se_cmd,
@@ -697,13 +747,13 @@ static void tcm_qla2xxx_aborted_task(struct se_cmd *se_cmd)
spin_lock_irqsave(&cmd->cmd_lock, flags);
if ((cmd->state == QLA_TGT_STATE_NEW)||
- ((cmd->state == QLA_TGT_STATE_DATA_IN) &&
- DATA_WORK_NOT_FREE(cmd->cmd_flags)) ) {
-
- cmd->cmd_flags |= CMD_FLAG_DATA_WORK_FREE;
+ ((cmd->state == QLA_TGT_STATE_DATA_IN) &&
+ DATA_WORK_NOT_FREE(cmd))) {
+ cmd->data_work_free = 1;
spin_unlock_irqrestore(&cmd->cmd_lock, flags);
- /* Cmd have not reached firmware.
- * Use this trigger to free it. */
+ /*
+ * cmd has not reached fw, Use this trigger to free it.
+ */
tcm_qla2xxx_free_cmd(cmd);
return;
}
@@ -713,11 +763,11 @@ static void tcm_qla2xxx_aborted_task(struct se_cmd *se_cmd)
}
static void tcm_qla2xxx_clear_sess_lookup(struct tcm_qla2xxx_lport *,
- struct tcm_qla2xxx_nacl *, struct qla_tgt_sess *);
+ struct tcm_qla2xxx_nacl *, struct fc_port *);
/*
* Expected to be called with struct qla_hw_data->tgt.sess_lock held
*/
-static void tcm_qla2xxx_clear_nacl_from_fcport_map(struct qla_tgt_sess *sess)
+static void tcm_qla2xxx_clear_nacl_from_fcport_map(struct fc_port *sess)
{
struct se_node_acl *se_nacl = sess->se_sess->se_node_acl;
struct se_portal_group *se_tpg = se_nacl->se_tpg;
@@ -756,7 +806,7 @@ static void tcm_qla2xxx_clear_nacl_from_fcport_map(struct qla_tgt_sess *sess)
tcm_qla2xxx_clear_sess_lookup(lport, nacl, sess);
}
-static void tcm_qla2xxx_shutdown_sess(struct qla_tgt_sess *sess)
+static void tcm_qla2xxx_shutdown_sess(struct fc_port *sess)
{
assert_spin_locked(&sess->vha->hw->tgt.sess_lock);
target_sess_cmd_list_set_waiting(sess->se_sess);
@@ -1141,7 +1191,7 @@ static struct se_portal_group *tcm_qla2xxx_npiv_make_tpg(
/*
* Expected to be called with struct qla_hw_data->tgt.sess_lock held
*/
-static struct qla_tgt_sess *tcm_qla2xxx_find_sess_by_s_id(
+static struct fc_port *tcm_qla2xxx_find_sess_by_s_id(
scsi_qla_host_t *vha,
const uint8_t *s_id)
{
@@ -1169,12 +1219,12 @@ static struct qla_tgt_sess *tcm_qla2xxx_find_sess_by_s_id(
se_nacl, se_nacl->initiatorname);
nacl = container_of(se_nacl, struct tcm_qla2xxx_nacl, se_node_acl);
- if (!nacl->qla_tgt_sess) {
- pr_err("Unable to locate struct qla_tgt_sess\n");
+ if (!nacl->fc_port) {
+ pr_err("Unable to locate struct fc_port\n");
return NULL;
}
- return nacl->qla_tgt_sess;
+ return nacl->fc_port;
}
/*
@@ -1185,7 +1235,7 @@ static void tcm_qla2xxx_set_sess_by_s_id(
struct se_node_acl *new_se_nacl,
struct tcm_qla2xxx_nacl *nacl,
struct se_session *se_sess,
- struct qla_tgt_sess *qla_tgt_sess,
+ struct fc_port *fc_port,
uint8_t *s_id)
{
u32 key;
@@ -1209,22 +1259,22 @@ static void tcm_qla2xxx_set_sess_by_s_id(
pr_debug("Wiping nonexisting fc_port entry\n");
}
- qla_tgt_sess->se_sess = se_sess;
- nacl->qla_tgt_sess = qla_tgt_sess;
+ fc_port->se_sess = se_sess;
+ nacl->fc_port = fc_port;
return;
}
- if (nacl->qla_tgt_sess) {
+ if (nacl->fc_port) {
if (new_se_nacl == NULL) {
- pr_debug("Clearing existing nacl->qla_tgt_sess and fc_port entry\n");
+ pr_debug("Clearing existing nacl->fc_port and fc_port entry\n");
btree_remove32(&lport->lport_fcport_map, key);
- nacl->qla_tgt_sess = NULL;
+ nacl->fc_port = NULL;
return;
}
- pr_debug("Replacing existing nacl->qla_tgt_sess and fc_port entry\n");
+ pr_debug("Replacing existing nacl->fc_port and fc_port entry\n");
btree_update32(&lport->lport_fcport_map, key, new_se_nacl);
- qla_tgt_sess->se_sess = se_sess;
- nacl->qla_tgt_sess = qla_tgt_sess;
+ fc_port->se_sess = se_sess;
+ nacl->fc_port = fc_port;
return;
}
@@ -1234,19 +1284,19 @@ static void tcm_qla2xxx_set_sess_by_s_id(
return;
}
- pr_debug("Replacing existing fc_port entry w/o active nacl->qla_tgt_sess\n");
+ pr_debug("Replacing existing fc_port entry w/o active nacl->fc_port\n");
btree_update32(&lport->lport_fcport_map, key, new_se_nacl);
- qla_tgt_sess->se_sess = se_sess;
- nacl->qla_tgt_sess = qla_tgt_sess;
+ fc_port->se_sess = se_sess;
+ nacl->fc_port = fc_port;
- pr_debug("Setup nacl->qla_tgt_sess %p by s_id for se_nacl: %p, initiatorname: %s\n",
- nacl->qla_tgt_sess, new_se_nacl, new_se_nacl->initiatorname);
+ pr_debug("Setup nacl->fc_port %p by s_id for se_nacl: %p, initiatorname: %s\n",
+ nacl->fc_port, new_se_nacl, new_se_nacl->initiatorname);
}
/*
* Expected to be called with struct qla_hw_data->tgt.sess_lock held
*/
-static struct qla_tgt_sess *tcm_qla2xxx_find_sess_by_loop_id(
+static struct fc_port *tcm_qla2xxx_find_sess_by_loop_id(
scsi_qla_host_t *vha,
const uint16_t loop_id)
{
@@ -1274,12 +1324,12 @@ static struct qla_tgt_sess *tcm_qla2xxx_find_sess_by_loop_id(
nacl = container_of(se_nacl, struct tcm_qla2xxx_nacl, se_node_acl);
- if (!nacl->qla_tgt_sess) {
- pr_err("Unable to locate struct qla_tgt_sess\n");
+ if (!nacl->fc_port) {
+ pr_err("Unable to locate struct fc_port\n");
return NULL;
}
- return nacl->qla_tgt_sess;
+ return nacl->fc_port;
}
/*
@@ -1290,7 +1340,7 @@ static void tcm_qla2xxx_set_sess_by_loop_id(
struct se_node_acl *new_se_nacl,
struct tcm_qla2xxx_nacl *nacl,
struct se_session *se_sess,
- struct qla_tgt_sess *qla_tgt_sess,
+ struct fc_port *fc_port,
uint16_t loop_id)
{
struct se_node_acl *saved_nacl;
@@ -1305,27 +1355,27 @@ static void tcm_qla2xxx_set_sess_by_loop_id(
if (!saved_nacl) {
pr_debug("Setting up new fc_loopid->se_nacl to new_se_nacl\n");
fc_loopid->se_nacl = new_se_nacl;
- if (qla_tgt_sess->se_sess != se_sess)
- qla_tgt_sess->se_sess = se_sess;
- if (nacl->qla_tgt_sess != qla_tgt_sess)
- nacl->qla_tgt_sess = qla_tgt_sess;
+ if (fc_port->se_sess != se_sess)
+ fc_port->se_sess = se_sess;
+ if (nacl->fc_port != fc_port)
+ nacl->fc_port = fc_port;
return;
}
- if (nacl->qla_tgt_sess) {
+ if (nacl->fc_port) {
if (new_se_nacl == NULL) {
- pr_debug("Clearing nacl->qla_tgt_sess and fc_loopid->se_nacl\n");
+ pr_debug("Clearing nacl->fc_port and fc_loopid->se_nacl\n");
fc_loopid->se_nacl = NULL;
- nacl->qla_tgt_sess = NULL;
+ nacl->fc_port = NULL;
return;
}
- pr_debug("Replacing existing nacl->qla_tgt_sess and fc_loopid->se_nacl\n");
+ pr_debug("Replacing existing nacl->fc_port and fc_loopid->se_nacl\n");
fc_loopid->se_nacl = new_se_nacl;
- if (qla_tgt_sess->se_sess != se_sess)
- qla_tgt_sess->se_sess = se_sess;
- if (nacl->qla_tgt_sess != qla_tgt_sess)
- nacl->qla_tgt_sess = qla_tgt_sess;
+ if (fc_port->se_sess != se_sess)
+ fc_port->se_sess = se_sess;
+ if (nacl->fc_port != fc_port)
+ nacl->fc_port = fc_port;
return;
}
@@ -1335,29 +1385,29 @@ static void tcm_qla2xxx_set_sess_by_loop_id(
return;
}
- pr_debug("Replacing existing fc_loopid->se_nacl w/o active nacl->qla_tgt_sess\n");
+ pr_debug("Replacing existing fc_loopid->se_nacl w/o active nacl->fc_port\n");
fc_loopid->se_nacl = new_se_nacl;
- if (qla_tgt_sess->se_sess != se_sess)
- qla_tgt_sess->se_sess = se_sess;
- if (nacl->qla_tgt_sess != qla_tgt_sess)
- nacl->qla_tgt_sess = qla_tgt_sess;
+ if (fc_port->se_sess != se_sess)
+ fc_port->se_sess = se_sess;
+ if (nacl->fc_port != fc_port)
+ nacl->fc_port = fc_port;
- pr_debug("Setup nacl->qla_tgt_sess %p by loop_id for se_nacl: %p, initiatorname: %s\n",
- nacl->qla_tgt_sess, new_se_nacl, new_se_nacl->initiatorname);
+ pr_debug("Setup nacl->fc_port %p by loop_id for se_nacl: %p, initiatorname: %s\n",
+ nacl->fc_port, new_se_nacl, new_se_nacl->initiatorname);
}
/*
* Should always be called with qla_hw_data->tgt.sess_lock held.
*/
static void tcm_qla2xxx_clear_sess_lookup(struct tcm_qla2xxx_lport *lport,
- struct tcm_qla2xxx_nacl *nacl, struct qla_tgt_sess *sess)
+ struct tcm_qla2xxx_nacl *nacl, struct fc_port *sess)
{
struct se_session *se_sess = sess->se_sess;
unsigned char be_sid[3];
- be_sid[0] = sess->s_id.b.domain;
- be_sid[1] = sess->s_id.b.area;
- be_sid[2] = sess->s_id.b.al_pa;
+ be_sid[0] = sess->d_id.b.domain;
+ be_sid[1] = sess->d_id.b.area;
+ be_sid[2] = sess->d_id.b.al_pa;
tcm_qla2xxx_set_sess_by_s_id(lport, NULL, nacl, se_sess,
sess, be_sid);
@@ -1365,7 +1415,7 @@ static void tcm_qla2xxx_clear_sess_lookup(struct tcm_qla2xxx_lport *lport,
sess, sess->loop_id);
}
-static void tcm_qla2xxx_free_session(struct qla_tgt_sess *sess)
+static void tcm_qla2xxx_free_session(struct fc_port *sess)
{
struct qla_tgt *tgt = sess->tgt;
struct qla_hw_data *ha = tgt->ha;
@@ -1377,7 +1427,7 @@ static void tcm_qla2xxx_free_session(struct qla_tgt_sess *sess)
se_sess = sess->se_sess;
if (!se_sess) {
- pr_err("struct qla_tgt_sess->se_sess is NULL\n");
+ pr_err("struct fc_port->se_sess is NULL\n");
dump_stack();
return;
}
@@ -1404,14 +1454,14 @@ static int tcm_qla2xxx_session_cb(struct se_portal_group *se_tpg,
struct se_node_acl *se_nacl = se_sess->se_node_acl;
struct tcm_qla2xxx_nacl *nacl = container_of(se_nacl,
struct tcm_qla2xxx_nacl, se_node_acl);
- struct qla_tgt_sess *qlat_sess = p;
+ struct fc_port *qlat_sess = p;
uint16_t loop_id = qlat_sess->loop_id;
unsigned long flags;
unsigned char be_sid[3];
- be_sid[0] = qlat_sess->s_id.b.domain;
- be_sid[1] = qlat_sess->s_id.b.area;
- be_sid[2] = qlat_sess->s_id.b.al_pa;
+ be_sid[0] = qlat_sess->d_id.b.domain;
+ be_sid[1] = qlat_sess->d_id.b.area;
+ be_sid[2] = qlat_sess->d_id.b.al_pa;
/*
* And now setup se_nacl and session pointers into HW lport internal
@@ -1434,7 +1484,7 @@ static int tcm_qla2xxx_session_cb(struct se_portal_group *se_tpg,
static int tcm_qla2xxx_check_initiator_node_acl(
scsi_qla_host_t *vha,
unsigned char *fc_wwpn,
- struct qla_tgt_sess *qlat_sess)
+ struct fc_port *qlat_sess)
{
struct qla_hw_data *ha = vha->hw;
struct tcm_qla2xxx_lport *lport;
@@ -1478,7 +1528,7 @@ static int tcm_qla2xxx_check_initiator_node_acl(
return 0;
}
-static void tcm_qla2xxx_update_sess(struct qla_tgt_sess *sess, port_id_t s_id,
+static void tcm_qla2xxx_update_sess(struct fc_port *sess, port_id_t s_id,
uint16_t loop_id, bool conf_compl_supported)
{
struct qla_tgt *tgt = sess->tgt;
@@ -1491,11 +1541,11 @@ static void tcm_qla2xxx_update_sess(struct qla_tgt_sess *sess, port_id_t s_id,
u32 key;
- if (sess->loop_id != loop_id || sess->s_id.b24 != s_id.b24)
+ if (sess->loop_id != loop_id || sess->d_id.b24 != s_id.b24)
pr_info("Updating session %p from port %8phC loop_id %d -> %d s_id %x:%x:%x -> %x:%x:%x\n",
sess, sess->port_name,
- sess->loop_id, loop_id, sess->s_id.b.domain,
- sess->s_id.b.area, sess->s_id.b.al_pa, s_id.b.domain,
+ sess->loop_id, loop_id, sess->d_id.b.domain,
+ sess->d_id.b.area, sess->d_id.b.al_pa, s_id.b.domain,
s_id.b.area, s_id.b.al_pa);
if (sess->loop_id != loop_id) {
@@ -1515,18 +1565,20 @@ static void tcm_qla2xxx_update_sess(struct qla_tgt_sess *sess, port_id_t s_id,
sess->loop_id = loop_id;
}
- if (sess->s_id.b24 != s_id.b24) {
- key = (((u32) sess->s_id.b.domain << 16) |
- ((u32) sess->s_id.b.area << 8) |
- ((u32) sess->s_id.b.al_pa));
+ if (sess->d_id.b24 != s_id.b24) {
+ key = (((u32) sess->d_id.b.domain << 16) |
+ ((u32) sess->d_id.b.area << 8) |
+ ((u32) sess->d_id.b.al_pa));
if (btree_lookup32(&lport->lport_fcport_map, key))
- WARN(btree_remove32(&lport->lport_fcport_map, key) != se_nacl,
- "Found wrong se_nacl when updating s_id %x:%x:%x\n",
- sess->s_id.b.domain, sess->s_id.b.area, sess->s_id.b.al_pa);
+ WARN(btree_remove32(&lport->lport_fcport_map, key) !=
+ se_nacl, "Found wrong se_nacl when updating s_id %x:%x:%x\n",
+ sess->d_id.b.domain, sess->d_id.b.area,
+ sess->d_id.b.al_pa);
else
WARN(1, "No lport_fcport_map entry for s_id %x:%x:%x\n",
- sess->s_id.b.domain, sess->s_id.b.area, sess->s_id.b.al_pa);
+ sess->d_id.b.domain, sess->d_id.b.area,
+ sess->d_id.b.al_pa);
key = (((u32) s_id.b.domain << 16) |
((u32) s_id.b.area << 8) |
@@ -1537,10 +1589,11 @@ static void tcm_qla2xxx_update_sess(struct qla_tgt_sess *sess, port_id_t s_id,
s_id.b.domain, s_id.b.area, s_id.b.al_pa);
btree_update32(&lport->lport_fcport_map, key, se_nacl);
} else {
- btree_insert32(&lport->lport_fcport_map, key, se_nacl, GFP_ATOMIC);
+ btree_insert32(&lport->lport_fcport_map, key, se_nacl,
+ GFP_ATOMIC);
}
- sess->s_id = s_id;
+ sess->d_id = s_id;
nacl->nport_id = key;
}
@@ -1567,6 +1620,7 @@ static struct qla_tgt_func_tmpl tcm_qla2xxx_template = {
.find_sess_by_s_id = tcm_qla2xxx_find_sess_by_s_id,
.find_sess_by_loop_id = tcm_qla2xxx_find_sess_by_loop_id,
.clear_nacl_from_fcport_map = tcm_qla2xxx_clear_nacl_from_fcport_map,
+ .put_sess = tcm_qla2xxx_put_sess,
.shutdown_sess = tcm_qla2xxx_shutdown_sess,
};
@@ -1690,7 +1744,7 @@ static int tcm_qla2xxx_lport_register_npiv_cb(struct scsi_qla_host *base_vha,
(struct tcm_qla2xxx_lport *)base_vha->vha_tgt.target_lport_ptr;
struct fc_vport_identifiers vport_id;
- if (!qla_tgt_mode_enabled(base_vha)) {
+ if (qla_ini_mode_enabled(base_vha)) {
pr_err("qla2xxx base_vha not enabled for target mode\n");
return -EPERM;
}
diff --git a/drivers/scsi/qla2xxx/tcm_qla2xxx.h b/drivers/scsi/qla2xxx/tcm_qla2xxx.h
index cf8430be183b..071035dfa99a 100644
--- a/drivers/scsi/qla2xxx/tcm_qla2xxx.h
+++ b/drivers/scsi/qla2xxx/tcm_qla2xxx.h
@@ -20,8 +20,8 @@ struct tcm_qla2xxx_nacl {
u64 nport_wwnn;
/* ASCII formatted WWPN for FC Initiator Nport */
char nport_name[TCM_QLA2XXX_NAMELEN];
- /* Pointer to qla_tgt_sess */
- struct qla_tgt_sess *qla_tgt_sess;
+ /* Pointer to fc_port */
+ struct fc_port *fc_port;
/* Pointer to TCM FC nexus */
struct se_session *nport_nexus;
};
diff --git a/drivers/target/iscsi/cxgbit/cxgbit_cm.c b/drivers/target/iscsi/cxgbit/cxgbit_cm.c
index 2fb1bf1a26c5..37a05185dcbe 100644
--- a/drivers/target/iscsi/cxgbit/cxgbit_cm.c
+++ b/drivers/target/iscsi/cxgbit/cxgbit_cm.c
@@ -872,7 +872,8 @@ cxgbit_offload_init(struct cxgbit_sock *csk, int iptype, __u8 *peer_ip,
goto out;
csk->mtu = ndev->mtu;
csk->tx_chan = cxgb4_port_chan(ndev);
- csk->smac_idx = (cxgb4_port_viid(ndev) & 0x7F) << 1;
+ csk->smac_idx = cxgb4_tp_smt_idx(cdev->lldi.adapter_type,
+ cxgb4_port_viid(ndev));
step = cdev->lldi.ntxq /
cdev->lldi.nchan;
csk->txq_idx = cxgb4_port_idx(ndev) * step;
@@ -907,7 +908,8 @@ cxgbit_offload_init(struct cxgbit_sock *csk, int iptype, __u8 *peer_ip,
port_id = cxgb4_port_idx(ndev);
csk->mtu = dst_mtu(dst);
csk->tx_chan = cxgb4_port_chan(ndev);
- csk->smac_idx = (cxgb4_port_viid(ndev) & 0x7F) << 1;
+ csk->smac_idx = cxgb4_tp_smt_idx(cdev->lldi.adapter_type,
+ cxgb4_port_viid(ndev));
step = cdev->lldi.ntxq /
cdev->lldi.nports;
csk->txq_idx = (port_id * step) +
@@ -1066,6 +1068,7 @@ cxgbit_pass_accept_rpl(struct cxgbit_sock *csk, struct cpl_pass_accept_req *req)
struct sk_buff *skb;
const struct tcphdr *tcph;
struct cpl_t5_pass_accept_rpl *rpl5;
+ struct cxgb4_lld_info *lldi = &csk->com.cdev->lldi;
unsigned int len = roundup(sizeof(*rpl5), 16);
unsigned int mtu_idx;
u64 opt0;
@@ -1111,6 +1114,9 @@ cxgbit_pass_accept_rpl(struct cxgbit_sock *csk, struct cpl_pass_accept_req *req)
opt2 = RX_CHANNEL_V(0) |
RSS_QUEUE_VALID_F | RSS_QUEUE_V(csk->rss_qid);
+ if (!is_t5(lldi->adapter_type))
+ opt2 |= RX_FC_DISABLE_F;
+
if (req->tcpopt.tstamp)
opt2 |= TSTAMPS_EN_F;
if (req->tcpopt.sack)
@@ -1119,8 +1125,13 @@ cxgbit_pass_accept_rpl(struct cxgbit_sock *csk, struct cpl_pass_accept_req *req)
opt2 |= WND_SCALE_EN_F;
hlen = ntohl(req->hdr_len);
- tcph = (const void *)(req + 1) + ETH_HDR_LEN_G(hlen) +
- IP_HDR_LEN_G(hlen);
+
+ if (is_t5(lldi->adapter_type))
+ tcph = (struct tcphdr *)((u8 *)(req + 1) +
+ ETH_HDR_LEN_G(hlen) + IP_HDR_LEN_G(hlen));
+ else
+ tcph = (struct tcphdr *)((u8 *)(req + 1) +
+ T6_ETH_HDR_LEN_G(hlen) + T6_IP_HDR_LEN_G(hlen));
if (tcph->ece && tcph->cwr)
opt2 |= CCTRL_ECN_V(1);
@@ -1726,7 +1737,7 @@ static bool cxgbit_credit_err(const struct cxgbit_sock *csk)
}
while (skb) {
- credit += skb->csum;
+ credit += (__force u32)skb->csum;
skb = cxgbit_skcb_tx_wr_next(skb);
}
@@ -1753,6 +1764,7 @@ static void cxgbit_fw4_ack(struct cxgbit_sock *csk, struct sk_buff *skb)
while (credits) {
struct sk_buff *p = cxgbit_sock_peek_wr(csk);
+ const u32 csum = (__force u32)p->csum;
if (unlikely(!p)) {
pr_err("csk 0x%p,%u, cr %u,%u+%u, empty.\n",
@@ -1761,17 +1773,17 @@ static void cxgbit_fw4_ack(struct cxgbit_sock *csk, struct sk_buff *skb)
break;
}
- if (unlikely(credits < p->csum)) {
+ if (unlikely(credits < csum)) {
pr_warn("csk 0x%p,%u, cr %u,%u+%u, < %u.\n",
csk, csk->tid,
credits, csk->wr_cred, csk->wr_una_cred,
- p->csum);
- p->csum -= credits;
+ csum);
+ p->csum = (__force __wsum)(csum - credits);
break;
}
cxgbit_sock_dequeue_wr(csk);
- credits -= p->csum;
+ credits -= csum;
kfree_skb(p);
}
diff --git a/drivers/target/iscsi/cxgbit/cxgbit_lro.h b/drivers/target/iscsi/cxgbit/cxgbit_lro.h
index 28c11bd1b930..dcaed3a1d23f 100644
--- a/drivers/target/iscsi/cxgbit/cxgbit_lro.h
+++ b/drivers/target/iscsi/cxgbit/cxgbit_lro.h
@@ -31,8 +31,9 @@ enum cxgbit_pducb_flags {
PDUCBF_RX_DATA = (1 << 1), /* received pdu payload */
PDUCBF_RX_STATUS = (1 << 2), /* received ddp status */
PDUCBF_RX_DATA_DDPD = (1 << 3), /* pdu payload ddp'd */
- PDUCBF_RX_HCRC_ERR = (1 << 4), /* header digest error */
- PDUCBF_RX_DCRC_ERR = (1 << 5), /* data digest error */
+ PDUCBF_RX_DDP_CMP = (1 << 4), /* ddp completion */
+ PDUCBF_RX_HCRC_ERR = (1 << 5), /* header digest error */
+ PDUCBF_RX_DCRC_ERR = (1 << 6), /* data digest error */
};
struct cxgbit_lro_pdu_cb {
diff --git a/drivers/target/iscsi/cxgbit/cxgbit_main.c b/drivers/target/iscsi/cxgbit/cxgbit_main.c
index 96eedfc49c94..4fd775ace541 100644
--- a/drivers/target/iscsi/cxgbit/cxgbit_main.c
+++ b/drivers/target/iscsi/cxgbit/cxgbit_main.c
@@ -165,29 +165,24 @@ static int cxgbit_uld_state_change(void *handle, enum cxgb4_state state)
}
static void
-cxgbit_proc_ddp_status(unsigned int tid, struct cpl_rx_data_ddp *cpl,
- struct cxgbit_lro_pdu_cb *pdu_cb)
+cxgbit_process_ddpvld(struct cxgbit_sock *csk, struct cxgbit_lro_pdu_cb *pdu_cb,
+ u32 ddpvld)
{
- unsigned int status = ntohl(cpl->ddpvld);
- pdu_cb->flags |= PDUCBF_RX_STATUS;
- pdu_cb->ddigest = ntohl(cpl->ulp_crc);
- pdu_cb->pdulen = ntohs(cpl->len);
-
- if (status & (1 << CPL_RX_ISCSI_DDP_STATUS_HCRC_SHIFT)) {
- pr_info("tid 0x%x, status 0x%x, hcrc bad.\n", tid, status);
+ if (ddpvld & (1 << CPL_RX_ISCSI_DDP_STATUS_HCRC_SHIFT)) {
+ pr_info("tid 0x%x, status 0x%x, hcrc bad.\n", csk->tid, ddpvld);
pdu_cb->flags |= PDUCBF_RX_HCRC_ERR;
}
- if (status & (1 << CPL_RX_ISCSI_DDP_STATUS_DCRC_SHIFT)) {
- pr_info("tid 0x%x, status 0x%x, dcrc bad.\n", tid, status);
+ if (ddpvld & (1 << CPL_RX_ISCSI_DDP_STATUS_DCRC_SHIFT)) {
+ pr_info("tid 0x%x, status 0x%x, dcrc bad.\n", csk->tid, ddpvld);
pdu_cb->flags |= PDUCBF_RX_DCRC_ERR;
}
- if (status & (1 << CPL_RX_ISCSI_DDP_STATUS_PAD_SHIFT))
- pr_info("tid 0x%x, status 0x%x, pad bad.\n", tid, status);
+ if (ddpvld & (1 << CPL_RX_ISCSI_DDP_STATUS_PAD_SHIFT))
+ pr_info("tid 0x%x, status 0x%x, pad bad.\n", csk->tid, ddpvld);
- if ((status & (1 << CPL_RX_ISCSI_DDP_STATUS_DDP_SHIFT)) &&
+ if ((ddpvld & (1 << CPL_RX_ISCSI_DDP_STATUS_DDP_SHIFT)) &&
(!(pdu_cb->flags & PDUCBF_RX_DATA))) {
pdu_cb->flags |= PDUCBF_RX_DATA_DDPD;
}
@@ -201,13 +196,17 @@ cxgbit_lro_add_packet_rsp(struct sk_buff *skb, u8 op, const __be64 *rsp)
lro_cb->pdu_idx);
struct cpl_rx_iscsi_ddp *cpl = (struct cpl_rx_iscsi_ddp *)(rsp + 1);
- cxgbit_proc_ddp_status(lro_cb->csk->tid, cpl, pdu_cb);
+ cxgbit_process_ddpvld(lro_cb->csk, pdu_cb, be32_to_cpu(cpl->ddpvld));
+
+ pdu_cb->flags |= PDUCBF_RX_STATUS;
+ pdu_cb->ddigest = ntohl(cpl->ulp_crc);
+ pdu_cb->pdulen = ntohs(cpl->len);
if (pdu_cb->flags & PDUCBF_RX_HDR)
pdu_cb->complete = true;
- lro_cb->complete = true;
lro_cb->pdu_totallen += pdu_cb->pdulen;
+ lro_cb->complete = true;
lro_cb->pdu_idx++;
}
@@ -257,7 +256,7 @@ cxgbit_lro_add_packet_gl(struct sk_buff *skb, u8 op, const struct pkt_gl *gl)
cxgbit_skcb_flags(skb) = 0;
lro_cb->complete = false;
- } else {
+ } else if (op == CPL_ISCSI_DATA) {
struct cpl_iscsi_data *cpl = (struct cpl_iscsi_data *)gl->va;
offset = sizeof(struct cpl_iscsi_data);
@@ -267,6 +266,36 @@ cxgbit_lro_add_packet_gl(struct sk_buff *skb, u8 op, const struct pkt_gl *gl)
pdu_cb->doffset = lro_cb->offset;
pdu_cb->nr_dfrags = gl->nfrags;
pdu_cb->dfrag_idx = skb_shinfo(skb)->nr_frags;
+ lro_cb->complete = false;
+ } else {
+ struct cpl_rx_iscsi_cmp *cpl;
+
+ cpl = (struct cpl_rx_iscsi_cmp *)gl->va;
+ offset = sizeof(struct cpl_rx_iscsi_cmp);
+ pdu_cb->flags |= (PDUCBF_RX_HDR | PDUCBF_RX_STATUS);
+ len = be16_to_cpu(cpl->len);
+ pdu_cb->hdr = gl->va + offset;
+ pdu_cb->hlen = len;
+ pdu_cb->hfrag_idx = skb_shinfo(skb)->nr_frags;
+ pdu_cb->ddigest = be32_to_cpu(cpl->ulp_crc);
+ pdu_cb->pdulen = ntohs(cpl->len);
+
+ if (unlikely(gl->nfrags > 1))
+ cxgbit_skcb_flags(skb) = 0;
+
+ cxgbit_process_ddpvld(lro_cb->csk, pdu_cb,
+ be32_to_cpu(cpl->ddpvld));
+
+ if (pdu_cb->flags & PDUCBF_RX_DATA_DDPD) {
+ pdu_cb->flags |= PDUCBF_RX_DDP_CMP;
+ pdu_cb->complete = true;
+ } else if (pdu_cb->flags & PDUCBF_RX_DATA) {
+ pdu_cb->complete = true;
+ }
+
+ lro_cb->pdu_totallen += pdu_cb->hlen + pdu_cb->dlen;
+ lro_cb->complete = true;
+ lro_cb->pdu_idx++;
}
cxgbit_copy_frags(skb, gl, offset);
@@ -413,6 +442,7 @@ cxgbit_uld_lro_rx_handler(void *hndl, const __be64 *rsp,
switch (op) {
case CPL_ISCSI_HDR:
case CPL_ISCSI_DATA:
+ case CPL_RX_ISCSI_CMP:
case CPL_RX_ISCSI_DDP:
case CPL_FW4_ACK:
lro_flush = false;
@@ -454,12 +484,13 @@ cxgbit_uld_lro_rx_handler(void *hndl, const __be64 *rsp,
if (unlikely(op != *(u8 *)gl->va)) {
pr_info("? FL 0x%p,RSS%#llx,FL %#llx,len %u.\n",
gl->va, be64_to_cpu(*rsp),
- be64_to_cpu(*(u64 *)gl->va),
+ get_unaligned_be64(gl->va),
gl->tot_len);
return 0;
}
- if (op == CPL_ISCSI_HDR || op == CPL_ISCSI_DATA) {
+ if ((op == CPL_ISCSI_HDR) || (op == CPL_ISCSI_DATA) ||
+ (op == CPL_RX_ISCSI_CMP)) {
if (!cxgbit_lro_receive(csk, op, rsp, gl, lro_mgr,
napi))
return 0;
diff --git a/drivers/target/iscsi/cxgbit/cxgbit_target.c b/drivers/target/iscsi/cxgbit/cxgbit_target.c
index 8bcb9b71f764..2714e5901d18 100644
--- a/drivers/target/iscsi/cxgbit/cxgbit_target.c
+++ b/drivers/target/iscsi/cxgbit/cxgbit_target.c
@@ -162,12 +162,14 @@ cxgbit_tx_data_wr(struct cxgbit_sock *csk, struct sk_buff *skb, u32 dlen,
u32 len, u32 credits, u32 compl)
{
struct fw_ofld_tx_data_wr *req;
+ const struct cxgb4_lld_info *lldi = &csk->com.cdev->lldi;
u32 submode = cxgbit_skcb_submode(skb);
u32 wr_ulp_mode = 0;
u32 hdr_size = sizeof(*req);
u32 opcode = FW_OFLD_TX_DATA_WR;
u32 immlen = 0;
- u32 force = TX_FORCE_V(!submode);
+ u32 force = is_t5(lldi->adapter_type) ? TX_FORCE_V(!submode) :
+ T6_TX_FORCE_F;
if (cxgbit_skcb_flags(skb) & SKCBF_TX_ISO) {
opcode = FW_ISCSI_TX_DATA_WR;
@@ -243,7 +245,7 @@ void cxgbit_push_tx_frames(struct cxgbit_sock *csk)
}
__skb_unlink(skb, &csk->txq);
set_wr_txq(skb, CPL_PRIORITY_DATA, csk->txq_idx);
- skb->csum = credits_needed + flowclen16;
+ skb->csum = (__force __wsum)(credits_needed + flowclen16);
csk->wr_cred -= credits_needed;
csk->wr_una_cred += credits_needed;
@@ -651,26 +653,6 @@ static int cxgbit_set_iso_npdu(struct cxgbit_sock *csk)
u32 max_npdu, max_iso_npdu;
if (conn->login->leading_connection) {
- param = iscsi_find_param_from_key(DATASEQUENCEINORDER,
- conn->param_list);
- if (!param) {
- pr_err("param not found key %s\n", DATASEQUENCEINORDER);
- return -1;
- }
-
- if (strcmp(param->value, YES))
- return 0;
-
- param = iscsi_find_param_from_key(DATAPDUINORDER,
- conn->param_list);
- if (!param) {
- pr_err("param not found key %s\n", DATAPDUINORDER);
- return -1;
- }
-
- if (strcmp(param->value, YES))
- return 0;
-
param = iscsi_find_param_from_key(MAXBURSTLENGTH,
conn->param_list);
if (!param) {
@@ -681,11 +663,6 @@ static int cxgbit_set_iso_npdu(struct cxgbit_sock *csk)
if (kstrtou32(param->value, 0, &mbl) < 0)
return -1;
} else {
- if (!conn->sess->sess_ops->DataSequenceInOrder)
- return 0;
- if (!conn->sess->sess_ops->DataPDUInOrder)
- return 0;
-
mbl = conn->sess->sess_ops->MaxBurstLength;
}
@@ -704,6 +681,53 @@ static int cxgbit_set_iso_npdu(struct cxgbit_sock *csk)
return 0;
}
+/*
+ * cxgbit_seq_pdu_inorder()
+ * @csk: pointer to cxgbit socket structure
+ *
+ * This function checks whether data sequence and data
+ * pdu are in order.
+ *
+ * Return: returns -1 on error, 0 if data sequence and
+ * data pdu are in order, 1 if data sequence or data pdu
+ * is not in order.
+ */
+static int cxgbit_seq_pdu_inorder(struct cxgbit_sock *csk)
+{
+ struct iscsi_conn *conn = csk->conn;
+ struct iscsi_param *param;
+
+ if (conn->login->leading_connection) {
+ param = iscsi_find_param_from_key(DATASEQUENCEINORDER,
+ conn->param_list);
+ if (!param) {
+ pr_err("param not found key %s\n", DATASEQUENCEINORDER);
+ return -1;
+ }
+
+ if (strcmp(param->value, YES))
+ return 1;
+
+ param = iscsi_find_param_from_key(DATAPDUINORDER,
+ conn->param_list);
+ if (!param) {
+ pr_err("param not found key %s\n", DATAPDUINORDER);
+ return -1;
+ }
+
+ if (strcmp(param->value, YES))
+ return 1;
+
+ } else {
+ if (!conn->sess->sess_ops->DataSequenceInOrder)
+ return 1;
+ if (!conn->sess->sess_ops->DataPDUInOrder)
+ return 1;
+ }
+
+ return 0;
+}
+
static int cxgbit_set_params(struct iscsi_conn *conn)
{
struct cxgbit_sock *csk = conn->context;
@@ -730,11 +754,24 @@ static int cxgbit_set_params(struct iscsi_conn *conn)
}
if (!erl) {
+ int ret;
+
+ ret = cxgbit_seq_pdu_inorder(csk);
+ if (ret < 0) {
+ return -1;
+ } else if (ret > 0) {
+ if (is_t5(cdev->lldi.adapter_type))
+ goto enable_ddp;
+ else
+ goto enable_digest;
+ }
+
if (test_bit(CDEV_ISO_ENABLE, &cdev->flags)) {
if (cxgbit_set_iso_npdu(csk))
return -1;
}
+enable_ddp:
if (test_bit(CDEV_DDP_ENABLE, &cdev->flags)) {
if (cxgbit_setup_conn_pgidx(csk,
ppm->tformat.pgsz_idx_dflt))
@@ -743,6 +780,7 @@ static int cxgbit_set_params(struct iscsi_conn *conn)
}
}
+enable_digest:
if (cxgbit_set_digest(csk))
return -1;
@@ -983,11 +1021,36 @@ static int cxgbit_handle_iscsi_dataout(struct cxgbit_sock *csk)
int rc, sg_nents, sg_off;
bool dcrc_err = false;
- rc = iscsit_check_dataout_hdr(conn, (unsigned char *)hdr, &cmd);
- if (rc < 0)
- return rc;
- else if (!cmd)
- return 0;
+ if (pdu_cb->flags & PDUCBF_RX_DDP_CMP) {
+ u32 offset = be32_to_cpu(hdr->offset);
+ u32 ddp_data_len;
+ u32 payload_length = ntoh24(hdr->dlength);
+ bool success = false;
+
+ cmd = iscsit_find_cmd_from_itt_or_dump(conn, hdr->itt, 0);
+ if (!cmd)
+ return 0;
+
+ ddp_data_len = offset - cmd->write_data_done;
+ atomic_long_add(ddp_data_len, &conn->sess->rx_data_octets);
+
+ cmd->write_data_done = offset;
+ cmd->next_burst_len = ddp_data_len;
+ cmd->data_sn = be32_to_cpu(hdr->datasn);
+
+ rc = __iscsit_check_dataout_hdr(conn, (unsigned char *)hdr,
+ cmd, payload_length, &success);
+ if (rc < 0)
+ return rc;
+ else if (!success)
+ return 0;
+ } else {
+ rc = iscsit_check_dataout_hdr(conn, (unsigned char *)hdr, &cmd);
+ if (rc < 0)
+ return rc;
+ else if (!cmd)
+ return 0;
+ }
if (pdu_cb->flags & PDUCBF_RX_DCRC_ERR) {
pr_err("ITT: 0x%08x, Offset: %u, Length: %u,"
@@ -1351,6 +1414,9 @@ static void cxgbit_lro_hskb_reset(struct cxgbit_sock *csk)
for (i = 0; i < ssi->nr_frags; i++)
put_page(skb_frag_page(&ssi->frags[i]));
ssi->nr_frags = 0;
+ skb->data_len = 0;
+ skb->truesize -= skb->len;
+ skb->len = 0;
}
static void
@@ -1364,39 +1430,42 @@ cxgbit_lro_skb_merge(struct cxgbit_sock *csk, struct sk_buff *skb, u8 pdu_idx)
unsigned int len = 0;
if (pdu_cb->flags & PDUCBF_RX_HDR) {
- hpdu_cb->flags = pdu_cb->flags;
+ u8 hfrag_idx = hssi->nr_frags;
+
+ hpdu_cb->flags |= pdu_cb->flags;
hpdu_cb->seq = pdu_cb->seq;
hpdu_cb->hdr = pdu_cb->hdr;
hpdu_cb->hlen = pdu_cb->hlen;
- memcpy(&hssi->frags[0], &ssi->frags[pdu_cb->hfrag_idx],
+ memcpy(&hssi->frags[hfrag_idx], &ssi->frags[pdu_cb->hfrag_idx],
sizeof(skb_frag_t));
- get_page(skb_frag_page(&hssi->frags[0]));
- hssi->nr_frags = 1;
- hpdu_cb->frags = 1;
- hpdu_cb->hfrag_idx = 0;
+ get_page(skb_frag_page(&hssi->frags[hfrag_idx]));
+ hssi->nr_frags++;
+ hpdu_cb->frags++;
+ hpdu_cb->hfrag_idx = hfrag_idx;
- len = hssi->frags[0].size;
- hskb->len = len;
- hskb->data_len = len;
- hskb->truesize = len;
+ len = hssi->frags[hfrag_idx].size;
+ hskb->len += len;
+ hskb->data_len += len;
+ hskb->truesize += len;
}
if (pdu_cb->flags & PDUCBF_RX_DATA) {
- u8 hfrag_idx = 1, i;
+ u8 dfrag_idx = hssi->nr_frags, i;
hpdu_cb->flags |= pdu_cb->flags;
+ hpdu_cb->dfrag_idx = dfrag_idx;
len = 0;
- for (i = 0; i < pdu_cb->nr_dfrags; hfrag_idx++, i++) {
- memcpy(&hssi->frags[hfrag_idx],
+ for (i = 0; i < pdu_cb->nr_dfrags; dfrag_idx++, i++) {
+ memcpy(&hssi->frags[dfrag_idx],
&ssi->frags[pdu_cb->dfrag_idx + i],
sizeof(skb_frag_t));
- get_page(skb_frag_page(&hssi->frags[hfrag_idx]));
+ get_page(skb_frag_page(&hssi->frags[dfrag_idx]));
- len += hssi->frags[hfrag_idx].size;
+ len += hssi->frags[dfrag_idx].size;
hssi->nr_frags++;
hpdu_cb->frags++;
@@ -1405,7 +1474,6 @@ cxgbit_lro_skb_merge(struct cxgbit_sock *csk, struct sk_buff *skb, u8 pdu_idx)
hpdu_cb->dlen = pdu_cb->dlen;
hpdu_cb->doffset = hpdu_cb->hlen;
hpdu_cb->nr_dfrags = pdu_cb->nr_dfrags;
- hpdu_cb->dfrag_idx = 1;
hskb->len += len;
hskb->data_len += len;
hskb->truesize += len;
@@ -1490,10 +1558,15 @@ static int cxgbit_rx_lro_skb(struct cxgbit_sock *csk, struct sk_buff *skb)
static int cxgbit_rx_skb(struct cxgbit_sock *csk, struct sk_buff *skb)
{
+ struct cxgb4_lld_info *lldi = &csk->com.cdev->lldi;
int ret = -1;
- if (likely(cxgbit_skcb_flags(skb) & SKCBF_RX_LRO))
- ret = cxgbit_rx_lro_skb(csk, skb);
+ if (likely(cxgbit_skcb_flags(skb) & SKCBF_RX_LRO)) {
+ if (is_t5(lldi->adapter_type))
+ ret = cxgbit_rx_lro_skb(csk, skb);
+ else
+ ret = cxgbit_process_lro_skb(csk, skb);
+ }
__kfree_skb(skb);
return ret;
diff --git a/drivers/target/iscsi/iscsi_target.c b/drivers/target/iscsi/iscsi_target.c
index da2c73a255de..2285988c209b 100644
--- a/drivers/target/iscsi/iscsi_target.c
+++ b/drivers/target/iscsi/iscsi_target.c
@@ -1431,36 +1431,17 @@ static void iscsit_do_crypto_hash_buf(
}
int
-iscsit_check_dataout_hdr(struct iscsi_conn *conn, unsigned char *buf,
- struct iscsi_cmd **out_cmd)
+__iscsit_check_dataout_hdr(struct iscsi_conn *conn, void *buf,
+ struct iscsi_cmd *cmd, u32 payload_length,
+ bool *success)
{
- struct iscsi_data *hdr = (struct iscsi_data *)buf;
- struct iscsi_cmd *cmd = NULL;
+ struct iscsi_data *hdr = buf;
struct se_cmd *se_cmd;
- u32 payload_length = ntoh24(hdr->dlength);
int rc;
- if (!payload_length) {
- pr_warn("DataOUT payload is ZERO, ignoring.\n");
- return 0;
- }
-
/* iSCSI write */
atomic_long_add(payload_length, &conn->sess->rx_data_octets);
- if (payload_length > conn->conn_ops->MaxXmitDataSegmentLength) {
- pr_err("DataSegmentLength: %u is greater than"
- " MaxXmitDataSegmentLength: %u\n", payload_length,
- conn->conn_ops->MaxXmitDataSegmentLength);
- return iscsit_add_reject(conn, ISCSI_REASON_PROTOCOL_ERROR,
- buf);
- }
-
- cmd = iscsit_find_cmd_from_itt_or_dump(conn, hdr->itt,
- payload_length);
- if (!cmd)
- return 0;
-
pr_debug("Got DataOut ITT: 0x%08x, TTT: 0x%08x,"
" DataSN: 0x%08x, Offset: %u, Length: %u, CID: %hu\n",
hdr->itt, hdr->ttt, hdr->datasn, ntohl(hdr->offset),
@@ -1545,7 +1526,7 @@ iscsit_check_dataout_hdr(struct iscsi_conn *conn, unsigned char *buf,
}
}
/*
- * Preform DataSN, DataSequenceInOrder, DataPDUInOrder, and
+ * Perform DataSN, DataSequenceInOrder, DataPDUInOrder, and
* within-command recovery checks before receiving the payload.
*/
rc = iscsit_check_pre_dataout(cmd, buf);
@@ -1553,10 +1534,44 @@ iscsit_check_dataout_hdr(struct iscsi_conn *conn, unsigned char *buf,
return 0;
else if (rc == DATAOUT_CANNOT_RECOVER)
return -1;
-
- *out_cmd = cmd;
+ *success = true;
return 0;
}
+EXPORT_SYMBOL(__iscsit_check_dataout_hdr);
+
+int
+iscsit_check_dataout_hdr(struct iscsi_conn *conn, void *buf,
+ struct iscsi_cmd **out_cmd)
+{
+ struct iscsi_data *hdr = buf;
+ struct iscsi_cmd *cmd;
+ u32 payload_length = ntoh24(hdr->dlength);
+ int rc;
+ bool success = false;
+
+ if (!payload_length) {
+ pr_warn_ratelimited("DataOUT payload is ZERO, ignoring.\n");
+ return 0;
+ }
+
+ if (payload_length > conn->conn_ops->MaxXmitDataSegmentLength) {
+ pr_err_ratelimited("DataSegmentLength: %u is greater than"
+ " MaxXmitDataSegmentLength: %u\n", payload_length,
+ conn->conn_ops->MaxXmitDataSegmentLength);
+ return iscsit_add_reject(conn, ISCSI_REASON_PROTOCOL_ERROR, buf);
+ }
+
+ cmd = iscsit_find_cmd_from_itt_or_dump(conn, hdr->itt, payload_length);
+ if (!cmd)
+ return 0;
+
+ rc = __iscsit_check_dataout_hdr(conn, buf, cmd, payload_length, &success);
+
+ if (success)
+ *out_cmd = cmd;
+
+ return rc;
+}
EXPORT_SYMBOL(iscsit_check_dataout_hdr);
static int
@@ -1920,6 +1935,28 @@ out:
return ret;
}
+static enum tcm_tmreq_table iscsit_convert_tmf(u8 iscsi_tmf)
+{
+ switch (iscsi_tmf) {
+ case ISCSI_TM_FUNC_ABORT_TASK:
+ return TMR_ABORT_TASK;
+ case ISCSI_TM_FUNC_ABORT_TASK_SET:
+ return TMR_ABORT_TASK_SET;
+ case ISCSI_TM_FUNC_CLEAR_ACA:
+ return TMR_CLEAR_ACA;
+ case ISCSI_TM_FUNC_CLEAR_TASK_SET:
+ return TMR_CLEAR_TASK_SET;
+ case ISCSI_TM_FUNC_LOGICAL_UNIT_RESET:
+ return TMR_LUN_RESET;
+ case ISCSI_TM_FUNC_TARGET_WARM_RESET:
+ return TMR_TARGET_WARM_RESET;
+ case ISCSI_TM_FUNC_TARGET_COLD_RESET:
+ return TMR_TARGET_COLD_RESET;
+ default:
+ return TMR_UNKNOWN;
+ }
+}
+
int
iscsit_handle_task_mgt_cmd(struct iscsi_conn *conn, struct iscsi_cmd *cmd,
unsigned char *buf)
@@ -1929,7 +1966,7 @@ iscsit_handle_task_mgt_cmd(struct iscsi_conn *conn, struct iscsi_cmd *cmd,
struct iscsi_tm *hdr;
int out_of_order_cmdsn = 0, ret;
bool sess_ref = false;
- u8 function;
+ u8 function, tcm_function = TMR_UNKNOWN;
hdr = (struct iscsi_tm *) buf;
hdr->flags &= ~ISCSI_FLAG_CMD_FINAL;
@@ -1975,54 +2012,27 @@ iscsit_handle_task_mgt_cmd(struct iscsi_conn *conn, struct iscsi_cmd *cmd,
* LIO-Target $FABRIC_MOD
*/
if (function != ISCSI_TM_FUNC_TASK_REASSIGN) {
-
- u8 tcm_function;
- int ret;
-
transport_init_se_cmd(&cmd->se_cmd, &iscsi_ops,
conn->sess->se_sess, 0, DMA_NONE,
TCM_SIMPLE_TAG, cmd->sense_buffer + 2);
target_get_sess_cmd(&cmd->se_cmd, true);
sess_ref = true;
-
- switch (function) {
- case ISCSI_TM_FUNC_ABORT_TASK:
- tcm_function = TMR_ABORT_TASK;
- break;
- case ISCSI_TM_FUNC_ABORT_TASK_SET:
- tcm_function = TMR_ABORT_TASK_SET;
- break;
- case ISCSI_TM_FUNC_CLEAR_ACA:
- tcm_function = TMR_CLEAR_ACA;
- break;
- case ISCSI_TM_FUNC_CLEAR_TASK_SET:
- tcm_function = TMR_CLEAR_TASK_SET;
- break;
- case ISCSI_TM_FUNC_LOGICAL_UNIT_RESET:
- tcm_function = TMR_LUN_RESET;
- break;
- case ISCSI_TM_FUNC_TARGET_WARM_RESET:
- tcm_function = TMR_TARGET_WARM_RESET;
- break;
- case ISCSI_TM_FUNC_TARGET_COLD_RESET:
- tcm_function = TMR_TARGET_COLD_RESET;
- break;
- default:
+ tcm_function = iscsit_convert_tmf(function);
+ if (tcm_function == TMR_UNKNOWN) {
pr_err("Unknown iSCSI TMR Function:"
" 0x%02x\n", function);
return iscsit_add_reject_cmd(cmd,
ISCSI_REASON_BOOKMARK_NO_RESOURCES, buf);
}
-
- ret = core_tmr_alloc_req(&cmd->se_cmd, cmd->tmr_req,
- tcm_function, GFP_KERNEL);
- if (ret < 0)
- return iscsit_add_reject_cmd(cmd,
+ }
+ ret = core_tmr_alloc_req(&cmd->se_cmd, cmd->tmr_req, tcm_function,
+ GFP_KERNEL);
+ if (ret < 0)
+ return iscsit_add_reject_cmd(cmd,
ISCSI_REASON_BOOKMARK_NO_RESOURCES, buf);
- cmd->tmr_req->se_tmr_req = cmd->se_cmd.se_tmr_req;
- }
+ cmd->tmr_req->se_tmr_req = cmd->se_cmd.se_tmr_req;
cmd->iscsi_opcode = ISCSI_OP_SCSI_TMFUNC;
cmd->i_state = ISTATE_SEND_TASKMGTRSP;
@@ -4136,7 +4146,7 @@ int iscsit_close_connection(
/*
* During Connection recovery drop unacknowledged out of order
* commands for this connection, and prepare the other commands
- * for realligence.
+ * for reallegiance.
*
* During normal operation clear the out of order commands (but
* do not free the struct iscsi_ooo_cmdsn's) and release all
@@ -4144,7 +4154,7 @@ int iscsit_close_connection(
*/
if (atomic_read(&conn->connection_recovery)) {
iscsit_discard_unacknowledged_ooo_cmdsns_for_conn(conn);
- iscsit_prepare_cmds_for_realligance(conn);
+ iscsit_prepare_cmds_for_reallegiance(conn);
} else {
iscsit_clear_ooo_cmdsns_for_conn(conn);
iscsit_release_commands_from_conn(conn);
diff --git a/drivers/target/iscsi/iscsi_target_erl0.c b/drivers/target/iscsi/iscsi_target_erl0.c
index b54e72c7ab0f..a8bcbc43b047 100644
--- a/drivers/target/iscsi/iscsi_target_erl0.c
+++ b/drivers/target/iscsi/iscsi_target_erl0.c
@@ -44,10 +44,8 @@ void iscsit_set_dataout_sequence_values(
*/
if (cmd->unsolicited_data) {
cmd->seq_start_offset = cmd->write_data_done;
- cmd->seq_end_offset = (cmd->write_data_done +
- ((cmd->se_cmd.data_length >
- conn->sess->sess_ops->FirstBurstLength) ?
- conn->sess->sess_ops->FirstBurstLength : cmd->se_cmd.data_length));
+ cmd->seq_end_offset = min(cmd->se_cmd.data_length,
+ conn->sess->sess_ops->FirstBurstLength);
return;
}
diff --git a/drivers/target/iscsi/iscsi_target_erl2.c b/drivers/target/iscsi/iscsi_target_erl2.c
index faf9ae014b30..8df9c90f3db3 100644
--- a/drivers/target/iscsi/iscsi_target_erl2.c
+++ b/drivers/target/iscsi/iscsi_target_erl2.c
@@ -312,7 +312,7 @@ int iscsit_discard_unacknowledged_ooo_cmdsns_for_conn(struct iscsi_conn *conn)
return 0;
}
-int iscsit_prepare_cmds_for_realligance(struct iscsi_conn *conn)
+int iscsit_prepare_cmds_for_reallegiance(struct iscsi_conn *conn)
{
u32 cmd_count = 0;
struct iscsi_cmd *cmd, *cmd_tmp;
@@ -347,7 +347,7 @@ int iscsit_prepare_cmds_for_realligance(struct iscsi_conn *conn)
if ((cmd->iscsi_opcode != ISCSI_OP_SCSI_CMD) &&
(cmd->iscsi_opcode != ISCSI_OP_NOOP_OUT)) {
- pr_debug("Not performing realligence on"
+ pr_debug("Not performing reallegiance on"
" Opcode: 0x%02x, ITT: 0x%08x, CmdSN: 0x%08x,"
" CID: %hu\n", cmd->iscsi_opcode,
cmd->init_task_tag, cmd->cmd_sn, conn->cid);
@@ -382,7 +382,7 @@ int iscsit_prepare_cmds_for_realligance(struct iscsi_conn *conn)
cmd_count++;
pr_debug("Preparing Opcode: 0x%02x, ITT: 0x%08x,"
" CmdSN: 0x%08x, StatSN: 0x%08x, CID: %hu for"
- " realligence.\n", cmd->iscsi_opcode,
+ " reallegiance.\n", cmd->iscsi_opcode,
cmd->init_task_tag, cmd->cmd_sn, cmd->stat_sn,
conn->cid);
diff --git a/drivers/target/iscsi/iscsi_target_erl2.h b/drivers/target/iscsi/iscsi_target_erl2.h
index 7965f1e86506..634d01e13652 100644
--- a/drivers/target/iscsi/iscsi_target_erl2.h
+++ b/drivers/target/iscsi/iscsi_target_erl2.h
@@ -19,7 +19,7 @@ extern int iscsit_remove_cmd_from_connection_recovery(struct iscsi_cmd *,
struct iscsi_session *);
extern void iscsit_discard_cr_cmds_by_expstatsn(struct iscsi_conn_recovery *, u32);
extern int iscsit_discard_unacknowledged_ooo_cmdsns_for_conn(struct iscsi_conn *);
-extern int iscsit_prepare_cmds_for_realligance(struct iscsi_conn *);
+extern int iscsit_prepare_cmds_for_reallegiance(struct iscsi_conn *);
extern int iscsit_connection_recovery_transport_reset(struct iscsi_conn *);
#endif /*** ISCSI_TARGET_ERL2_H ***/
diff --git a/drivers/target/iscsi/iscsi_target_login.c b/drivers/target/iscsi/iscsi_target_login.c
index eab274d17b5c..746b97f8e4f7 100644
--- a/drivers/target/iscsi/iscsi_target_login.c
+++ b/drivers/target/iscsi/iscsi_target_login.c
@@ -223,7 +223,7 @@ int iscsi_check_for_session_reinstatement(struct iscsi_conn *conn)
return 0;
pr_debug("%s iSCSI Session SID %u is still active for %s,"
- " preforming session reinstatement.\n", (sessiontype) ?
+ " performing session reinstatement.\n", (sessiontype) ?
"Discovery" : "Normal", sess->sid,
sess->sess_ops->InitiatorName);
diff --git a/drivers/target/iscsi/iscsi_target_nego.c b/drivers/target/iscsi/iscsi_target_nego.c
index 46388c9e08da..5269e9ef031c 100644
--- a/drivers/target/iscsi/iscsi_target_nego.c
+++ b/drivers/target/iscsi/iscsi_target_nego.c
@@ -1249,16 +1249,16 @@ int iscsi_target_start_negotiation(
{
int ret;
- if (conn->sock) {
- struct sock *sk = conn->sock->sk;
+ if (conn->sock) {
+ struct sock *sk = conn->sock->sk;
- write_lock_bh(&sk->sk_callback_lock);
- set_bit(LOGIN_FLAGS_READY, &conn->login_flags);
- write_unlock_bh(&sk->sk_callback_lock);
- }
+ write_lock_bh(&sk->sk_callback_lock);
+ set_bit(LOGIN_FLAGS_READY, &conn->login_flags);
+ write_unlock_bh(&sk->sk_callback_lock);
+ }
- ret = iscsi_target_do_login(conn, login);
- if (ret < 0) {
+ ret = iscsi_target_do_login(conn, login);
+ if (ret < 0) {
cancel_delayed_work_sync(&conn->login_work);
cancel_delayed_work_sync(&conn->login_cleanup_work);
iscsi_target_restore_sock_callbacks(conn);
diff --git a/drivers/target/iscsi/iscsi_target_tmr.c b/drivers/target/iscsi/iscsi_target_tmr.c
index 3d637055c36f..cb231c907d51 100644
--- a/drivers/target/iscsi/iscsi_target_tmr.c
+++ b/drivers/target/iscsi/iscsi_target_tmr.c
@@ -440,14 +440,14 @@ static int iscsit_task_reassign_complete(
break;
default:
pr_err("Illegal iSCSI Opcode 0x%02x during"
- " command realligence\n", cmd->iscsi_opcode);
+ " command reallegiance\n", cmd->iscsi_opcode);
return -1;
}
if (ret != 0)
return ret;
- pr_debug("Completed connection realligence for Opcode: 0x%02x,"
+ pr_debug("Completed connection reallegiance for Opcode: 0x%02x,"
" ITT: 0x%08x to CID: %hu.\n", cmd->iscsi_opcode,
cmd->init_task_tag, conn->cid);
diff --git a/drivers/target/iscsi/iscsi_target_util.c b/drivers/target/iscsi/iscsi_target_util.c
index b5a1b4ccba12..cc5958882431 100644
--- a/drivers/target/iscsi/iscsi_target_util.c
+++ b/drivers/target/iscsi/iscsi_target_util.c
@@ -417,6 +417,7 @@ struct iscsi_cmd *iscsit_find_cmd_from_itt_or_dump(
return NULL;
}
+EXPORT_SYMBOL(iscsit_find_cmd_from_itt_or_dump);
struct iscsi_cmd *iscsit_find_cmd_from_ttt(
struct iscsi_conn *conn,
@@ -1377,33 +1378,6 @@ int tx_data(
return iscsit_do_tx_data(conn, &c);
}
-static bool sockaddr_equal(struct sockaddr_storage *x, struct sockaddr_storage *y)
-{
- switch (x->ss_family) {
- case AF_INET: {
- struct sockaddr_in *sinx = (struct sockaddr_in *)x;
- struct sockaddr_in *siny = (struct sockaddr_in *)y;
- if (sinx->sin_addr.s_addr != siny->sin_addr.s_addr)
- return false;
- if (sinx->sin_port != siny->sin_port)
- return false;
- break;
- }
- case AF_INET6: {
- struct sockaddr_in6 *sinx = (struct sockaddr_in6 *)x;
- struct sockaddr_in6 *siny = (struct sockaddr_in6 *)y;
- if (!ipv6_addr_equal(&sinx->sin6_addr, &siny->sin6_addr))
- return false;
- if (sinx->sin6_port != siny->sin6_port)
- return false;
- break;
- }
- default:
- return false;
- }
- return true;
-}
-
void iscsit_collect_login_stats(
struct iscsi_conn *conn,
u8 status_class,
@@ -1420,13 +1394,6 @@ void iscsit_collect_login_stats(
ls = &tiqn->login_stats;
spin_lock(&ls->lock);
- if (sockaddr_equal(&conn->login_sockaddr, &ls->last_intr_fail_sockaddr) &&
- ((get_jiffies_64() - ls->last_fail_time) < 10)) {
- /* We already have the failure info for this login */
- spin_unlock(&ls->lock);
- return;
- }
-
if (status_class == ISCSI_STATUS_CLS_SUCCESS)
ls->accepts++;
else if (status_class == ISCSI_STATUS_CLS_REDIRECT) {
@@ -1471,10 +1438,10 @@ struct iscsi_tiqn *iscsit_snmp_get_tiqn(struct iscsi_conn *conn)
{
struct iscsi_portal_group *tpg;
- if (!conn || !conn->sess)
+ if (!conn)
return NULL;
- tpg = conn->sess->tpg;
+ tpg = conn->tpg;
if (!tpg)
return NULL;
diff --git a/drivers/target/target_core_device.c b/drivers/target/target_core_device.c
index 26929c44d703..c754ae33bf7b 100644
--- a/drivers/target/target_core_device.c
+++ b/drivers/target/target_core_device.c
@@ -78,12 +78,16 @@ transport_lookup_cmd_lun(struct se_cmd *se_cmd, u64 unpacked_lun)
&deve->read_bytes);
se_lun = rcu_dereference(deve->se_lun);
+
+ if (!percpu_ref_tryget_live(&se_lun->lun_ref)) {
+ se_lun = NULL;
+ goto out_unlock;
+ }
+
se_cmd->se_lun = rcu_dereference(deve->se_lun);
se_cmd->pr_res_key = deve->pr_res_key;
se_cmd->orig_fe_lun = unpacked_lun;
se_cmd->se_cmd_flags |= SCF_SE_LUN_CMD;
-
- percpu_ref_get(&se_lun->lun_ref);
se_cmd->lun_ref_active = true;
if ((se_cmd->data_direction == DMA_TO_DEVICE) &&
@@ -97,6 +101,7 @@ transport_lookup_cmd_lun(struct se_cmd *se_cmd, u64 unpacked_lun)
goto ref_dev;
}
}
+out_unlock:
rcu_read_unlock();
if (!se_lun) {
@@ -163,7 +168,6 @@ int transport_lookup_tmr_lun(struct se_cmd *se_cmd, u64 unpacked_lun)
rcu_read_lock();
deve = target_nacl_find_deve(nacl, unpacked_lun);
if (deve) {
- se_tmr->tmr_lun = rcu_dereference(deve->se_lun);
se_cmd->se_lun = rcu_dereference(deve->se_lun);
se_lun = rcu_dereference(deve->se_lun);
se_cmd->pr_res_key = deve->pr_res_key;
@@ -816,6 +820,7 @@ struct se_device *target_alloc_device(struct se_hba *hba, const char *name)
xcopy_lun = &dev->xcopy_lun;
rcu_assign_pointer(xcopy_lun->lun_se_dev, dev);
init_completion(&xcopy_lun->lun_ref_comp);
+ init_completion(&xcopy_lun->lun_shutdown_comp);
INIT_LIST_HEAD(&xcopy_lun->lun_deve_list);
INIT_LIST_HEAD(&xcopy_lun->lun_dev_link);
mutex_init(&xcopy_lun->lun_tg_pt_md_mutex);
diff --git a/drivers/target/target_core_sbc.c b/drivers/target/target_core_sbc.c
index df7b6e95c019..68d8aef7ab78 100644
--- a/drivers/target/target_core_sbc.c
+++ b/drivers/target/target_core_sbc.c
@@ -604,7 +604,7 @@ static sense_reason_t compare_and_write_callback(struct se_cmd *cmd, bool succes
spin_lock_irq(&cmd->t_state_lock);
cmd->t_state = TRANSPORT_PROCESSING;
- cmd->transport_state |= CMD_T_ACTIVE|CMD_T_BUSY|CMD_T_SENT;
+ cmd->transport_state |= CMD_T_ACTIVE | CMD_T_SENT;
spin_unlock_irq(&cmd->t_state_lock);
__target_execute_cmd(cmd, false);
diff --git a/drivers/target/target_core_stat.c b/drivers/target/target_core_stat.c
index 1a39033d2bff..8038255b21e8 100644
--- a/drivers/target/target_core_stat.c
+++ b/drivers/target/target_core_stat.c
@@ -158,12 +158,28 @@ static ssize_t target_stat_tgt_resets_show(struct config_item *item,
atomic_long_read(&to_stat_tgt_dev(item)->num_resets));
}
+static ssize_t target_stat_tgt_aborts_complete_show(struct config_item *item,
+ char *page)
+{
+ return snprintf(page, PAGE_SIZE, "%lu\n",
+ atomic_long_read(&to_stat_tgt_dev(item)->aborts_complete));
+}
+
+static ssize_t target_stat_tgt_aborts_no_task_show(struct config_item *item,
+ char *page)
+{
+ return snprintf(page, PAGE_SIZE, "%lu\n",
+ atomic_long_read(&to_stat_tgt_dev(item)->aborts_no_task));
+}
+
CONFIGFS_ATTR_RO(target_stat_tgt_, inst);
CONFIGFS_ATTR_RO(target_stat_tgt_, indx);
CONFIGFS_ATTR_RO(target_stat_tgt_, num_lus);
CONFIGFS_ATTR_RO(target_stat_tgt_, status);
CONFIGFS_ATTR_RO(target_stat_tgt_, non_access_lus);
CONFIGFS_ATTR_RO(target_stat_tgt_, resets);
+CONFIGFS_ATTR_RO(target_stat_tgt_, aborts_complete);
+CONFIGFS_ATTR_RO(target_stat_tgt_, aborts_no_task);
static struct configfs_attribute *target_stat_scsi_tgt_dev_attrs[] = {
&target_stat_tgt_attr_inst,
@@ -172,6 +188,8 @@ static struct configfs_attribute *target_stat_scsi_tgt_dev_attrs[] = {
&target_stat_tgt_attr_status,
&target_stat_tgt_attr_non_access_lus,
&target_stat_tgt_attr_resets,
+ &target_stat_tgt_attr_aborts_complete,
+ &target_stat_tgt_attr_aborts_no_task,
NULL,
};
@@ -795,16 +813,34 @@ static ssize_t target_stat_transport_dev_name_show(struct config_item *item,
return ret;
}
+static ssize_t target_stat_transport_proto_id_show(struct config_item *item,
+ char *page)
+{
+ struct se_lun *lun = to_transport_stat(item);
+ struct se_device *dev;
+ struct se_portal_group *tpg = lun->lun_tpg;
+ ssize_t ret = -ENODEV;
+
+ rcu_read_lock();
+ dev = rcu_dereference(lun->lun_se_dev);
+ if (dev)
+ ret = snprintf(page, PAGE_SIZE, "%u\n", tpg->proto_id);
+ rcu_read_unlock();
+ return ret;
+}
+
CONFIGFS_ATTR_RO(target_stat_transport_, inst);
CONFIGFS_ATTR_RO(target_stat_transport_, device);
CONFIGFS_ATTR_RO(target_stat_transport_, indx);
CONFIGFS_ATTR_RO(target_stat_transport_, dev_name);
+CONFIGFS_ATTR_RO(target_stat_transport_, proto_id);
static struct configfs_attribute *target_stat_scsi_transport_attrs[] = {
&target_stat_transport_attr_inst,
&target_stat_transport_attr_device,
&target_stat_transport_attr_indx,
&target_stat_transport_attr_dev_name,
+ &target_stat_transport_attr_proto_id,
NULL,
};
diff --git a/drivers/target/target_core_tmr.c b/drivers/target/target_core_tmr.c
index 4f229e711e1c..dce1e1b47316 100644
--- a/drivers/target/target_core_tmr.c
+++ b/drivers/target/target_core_tmr.c
@@ -175,10 +175,9 @@ void core_tmr_abort_task(
printk("ABORT_TASK: Found referenced %s task_tag: %llu\n",
se_cmd->se_tfo->get_fabric_name(), ref_tag);
- if (!__target_check_io_state(se_cmd, se_sess, 0)) {
- spin_unlock_irqrestore(&se_sess->sess_cmd_lock, flags);
- goto out;
- }
+ if (!__target_check_io_state(se_cmd, se_sess, 0))
+ continue;
+
list_del_init(&se_cmd->se_cmd_list);
spin_unlock_irqrestore(&se_sess->sess_cmd_lock, flags);
@@ -191,14 +190,15 @@ void core_tmr_abort_task(
printk("ABORT_TASK: Sending TMR_FUNCTION_COMPLETE for"
" ref_tag: %llu\n", ref_tag);
tmr->response = TMR_FUNCTION_COMPLETE;
+ atomic_long_inc(&dev->aborts_complete);
return;
}
spin_unlock_irqrestore(&se_sess->sess_cmd_lock, flags);
-out:
printk("ABORT_TASK: Sending TMR_TASK_DOES_NOT_EXIST for ref_tag: %lld\n",
tmr->ref_task_tag);
tmr->response = TMR_TASK_DOES_NOT_EXIST;
+ atomic_long_inc(&dev->aborts_no_task);
}
static void core_tmr_drain_tmr_list(
@@ -217,13 +217,8 @@ static void core_tmr_drain_tmr_list(
* LUN_RESET tmr..
*/
spin_lock_irqsave(&dev->se_tmr_lock, flags);
+ list_del_init(&tmr->tmr_list);
list_for_each_entry_safe(tmr_p, tmr_pp, &dev->dev_tmr_list, tmr_list) {
- /*
- * Allow the received TMR to return with FUNCTION_COMPLETE.
- */
- if (tmr_p == tmr)
- continue;
-
cmd = tmr_p->task_cmd;
if (!cmd) {
pr_err("Unable to locate struct se_cmd for TMR\n");
diff --git a/drivers/target/target_core_tpg.c b/drivers/target/target_core_tpg.c
index 4a8b180c478b..c0dbfa016575 100644
--- a/drivers/target/target_core_tpg.c
+++ b/drivers/target/target_core_tpg.c
@@ -445,7 +445,7 @@ static void core_tpg_lun_ref_release(struct percpu_ref *ref)
{
struct se_lun *lun = container_of(ref, struct se_lun, lun_ref);
- complete(&lun->lun_ref_comp);
+ complete(&lun->lun_shutdown_comp);
}
/* Does not change se_wwn->priv. */
@@ -572,6 +572,7 @@ struct se_lun *core_tpg_alloc_lun(
lun->lun_link_magic = SE_LUN_LINK_MAGIC;
atomic_set(&lun->lun_acl_count, 0);
init_completion(&lun->lun_ref_comp);
+ init_completion(&lun->lun_shutdown_comp);
INIT_LIST_HEAD(&lun->lun_deve_list);
INIT_LIST_HEAD(&lun->lun_dev_link);
atomic_set(&lun->lun_tg_pt_secondary_offline, 0);
diff --git a/drivers/target/target_core_transport.c b/drivers/target/target_core_transport.c
index 437591bc7c08..434d9d693989 100644
--- a/drivers/target/target_core_transport.c
+++ b/drivers/target/target_core_transport.c
@@ -593,9 +593,6 @@ static void target_remove_from_state_list(struct se_cmd *cmd)
if (!dev)
return;
- if (cmd->transport_state & CMD_T_BUSY)
- return;
-
spin_lock_irqsave(&dev->execute_task_lock, flags);
if (cmd->state_active) {
list_del(&cmd->state_list);
@@ -604,24 +601,18 @@ static void target_remove_from_state_list(struct se_cmd *cmd)
spin_unlock_irqrestore(&dev->execute_task_lock, flags);
}
-static int transport_cmd_check_stop(struct se_cmd *cmd, bool remove_from_lists,
- bool write_pending)
+static int transport_cmd_check_stop_to_fabric(struct se_cmd *cmd)
{
unsigned long flags;
- if (remove_from_lists) {
- target_remove_from_state_list(cmd);
+ target_remove_from_state_list(cmd);
- /*
- * Clear struct se_cmd->se_lun before the handoff to FE.
- */
- cmd->se_lun = NULL;
- }
+ /*
+ * Clear struct se_cmd->se_lun before the handoff to FE.
+ */
+ cmd->se_lun = NULL;
spin_lock_irqsave(&cmd->t_state_lock, flags);
- if (write_pending)
- cmd->t_state = TRANSPORT_WRITE_PENDING;
-
/*
* Determine if frontend context caller is requesting the stopping of
* this command for frontend exceptions.
@@ -635,31 +626,18 @@ static int transport_cmd_check_stop(struct se_cmd *cmd, bool remove_from_lists,
complete_all(&cmd->t_transport_stop_comp);
return 1;
}
-
cmd->transport_state &= ~CMD_T_ACTIVE;
- if (remove_from_lists) {
- /*
- * Some fabric modules like tcm_loop can release
- * their internally allocated I/O reference now and
- * struct se_cmd now.
- *
- * Fabric modules are expected to return '1' here if the
- * se_cmd being passed is released at this point,
- * or zero if not being released.
- */
- if (cmd->se_tfo->check_stop_free != NULL) {
- spin_unlock_irqrestore(&cmd->t_state_lock, flags);
- return cmd->se_tfo->check_stop_free(cmd);
- }
- }
-
spin_unlock_irqrestore(&cmd->t_state_lock, flags);
- return 0;
-}
-static int transport_cmd_check_stop_to_fabric(struct se_cmd *cmd)
-{
- return transport_cmd_check_stop(cmd, true, false);
+ /*
+ * Some fabric modules like tcm_loop can release their internally
+ * allocated I/O reference and struct se_cmd now.
+ *
+ * Fabric modules are expected to return '1' here if the se_cmd being
+ * passed is released at this point, or zero if not being released.
+ */
+ return cmd->se_tfo->check_stop_free ? cmd->se_tfo->check_stop_free(cmd)
+ : 0;
}
static void transport_lun_remove_cmd(struct se_cmd *cmd)
@@ -733,7 +711,6 @@ void target_complete_cmd(struct se_cmd *cmd, u8 scsi_status)
spin_lock_irqsave(&cmd->t_state_lock, flags);
- cmd->transport_state &= ~CMD_T_BUSY;
if (dev && dev->transport->transport_complete) {
dev->transport->transport_complete(cmd,
@@ -1246,7 +1223,6 @@ void transport_init_se_cmd(
init_completion(&cmd->cmd_wait_comp);
spin_lock_init(&cmd->t_state_lock);
kref_init(&cmd->cmd_kref);
- cmd->transport_state = CMD_T_DEV_ACTIVE;
cmd->se_tfo = tfo;
cmd->se_sess = se_sess;
@@ -1671,6 +1647,9 @@ void transport_generic_request_failure(struct se_cmd *cmd,
{
int ret = 0, post_ret = 0;
+ if (transport_check_aborted_status(cmd, 1))
+ return;
+
pr_debug("-----[ Storage Engine Exception for cmd: %p ITT: 0x%08llx"
" CDB: 0x%02x\n", cmd, cmd->tag, cmd->t_task_cdb[0]);
pr_debug("-----[ i_state: %d t_state: %d sense_reason: %d\n",
@@ -1801,7 +1780,7 @@ void __target_execute_cmd(struct se_cmd *cmd, bool do_checks)
return;
err:
spin_lock_irq(&cmd->t_state_lock);
- cmd->transport_state &= ~(CMD_T_BUSY|CMD_T_SENT);
+ cmd->transport_state &= ~CMD_T_SENT;
spin_unlock_irq(&cmd->t_state_lock);
transport_generic_request_failure(cmd, ret);
@@ -1829,7 +1808,7 @@ static int target_write_prot_action(struct se_cmd *cmd)
sectors, 0, cmd->t_prot_sg, 0);
if (unlikely(cmd->pi_err)) {
spin_lock_irq(&cmd->t_state_lock);
- cmd->transport_state &= ~(CMD_T_BUSY|CMD_T_SENT);
+ cmd->transport_state &= ~CMD_T_SENT;
spin_unlock_irq(&cmd->t_state_lock);
transport_generic_request_failure(cmd, cmd->pi_err);
return -1;
@@ -1918,7 +1897,7 @@ void target_execute_cmd(struct se_cmd *cmd)
}
cmd->t_state = TRANSPORT_PROCESSING;
- cmd->transport_state |= CMD_T_ACTIVE|CMD_T_BUSY|CMD_T_SENT;
+ cmd->transport_state |= CMD_T_ACTIVE | CMD_T_SENT;
spin_unlock_irq(&cmd->t_state_lock);
if (target_write_prot_action(cmd))
@@ -1926,7 +1905,7 @@ void target_execute_cmd(struct se_cmd *cmd)
if (target_handle_task_attr(cmd)) {
spin_lock_irq(&cmd->t_state_lock);
- cmd->transport_state &= ~(CMD_T_BUSY | CMD_T_SENT);
+ cmd->transport_state &= ~CMD_T_SENT;
spin_unlock_irq(&cmd->t_state_lock);
return;
}
@@ -1979,8 +1958,6 @@ static void transport_complete_task_attr(struct se_cmd *cmd)
if (cmd->sam_task_attr == TCM_SIMPLE_TAG) {
atomic_dec_mb(&dev->simple_cmds);
dev->dev_cur_ordered_id++;
- pr_debug("Incremented dev->dev_cur_ordered_id: %u for SIMPLE\n",
- dev->dev_cur_ordered_id);
} else if (cmd->sam_task_attr == TCM_HEAD_TAG) {
dev->dev_cur_ordered_id++;
pr_debug("Incremented dev_cur_ordered_id: %u for HEAD_OF_QUEUE\n",
@@ -2387,6 +2364,7 @@ EXPORT_SYMBOL(target_alloc_sgl);
sense_reason_t
transport_generic_new_cmd(struct se_cmd *cmd)
{
+ unsigned long flags;
int ret = 0;
bool zero_flag = !(cmd->se_cmd_flags & SCF_SCSI_DATA_CDB);
@@ -2452,7 +2430,24 @@ transport_generic_new_cmd(struct se_cmd *cmd)
target_execute_cmd(cmd);
return 0;
}
- transport_cmd_check_stop(cmd, false, true);
+
+ spin_lock_irqsave(&cmd->t_state_lock, flags);
+ cmd->t_state = TRANSPORT_WRITE_PENDING;
+ /*
+ * Determine if frontend context caller is requesting the stopping of
+ * this command for frontend exceptions.
+ */
+ if (cmd->transport_state & CMD_T_STOP) {
+ pr_debug("%s:%d CMD_T_STOP for ITT: 0x%08llx\n",
+ __func__, __LINE__, cmd->tag);
+
+ spin_unlock_irqrestore(&cmd->t_state_lock, flags);
+
+ complete_all(&cmd->t_transport_stop_comp);
+ return 0;
+ }
+ cmd->transport_state &= ~CMD_T_ACTIVE;
+ spin_unlock_irqrestore(&cmd->t_state_lock, flags);
ret = cmd->se_tfo->write_pending(cmd);
if (ret == -EAGAIN || ret == -ENOMEM)
@@ -2595,39 +2590,38 @@ static void target_release_cmd_kref(struct kref *kref)
unsigned long flags;
bool fabric_stop;
- spin_lock_irqsave(&se_sess->sess_cmd_lock, flags);
+ if (se_sess) {
+ spin_lock_irqsave(&se_sess->sess_cmd_lock, flags);
- spin_lock(&se_cmd->t_state_lock);
- fabric_stop = (se_cmd->transport_state & CMD_T_FABRIC_STOP) &&
- (se_cmd->transport_state & CMD_T_ABORTED);
- spin_unlock(&se_cmd->t_state_lock);
+ spin_lock(&se_cmd->t_state_lock);
+ fabric_stop = (se_cmd->transport_state & CMD_T_FABRIC_STOP) &&
+ (se_cmd->transport_state & CMD_T_ABORTED);
+ spin_unlock(&se_cmd->t_state_lock);
- if (se_cmd->cmd_wait_set || fabric_stop) {
+ if (se_cmd->cmd_wait_set || fabric_stop) {
+ list_del_init(&se_cmd->se_cmd_list);
+ spin_unlock_irqrestore(&se_sess->sess_cmd_lock, flags);
+ target_free_cmd_mem(se_cmd);
+ complete(&se_cmd->cmd_wait_comp);
+ return;
+ }
list_del_init(&se_cmd->se_cmd_list);
spin_unlock_irqrestore(&se_sess->sess_cmd_lock, flags);
- target_free_cmd_mem(se_cmd);
- complete(&se_cmd->cmd_wait_comp);
- return;
}
- list_del_init(&se_cmd->se_cmd_list);
- spin_unlock_irqrestore(&se_sess->sess_cmd_lock, flags);
target_free_cmd_mem(se_cmd);
se_cmd->se_tfo->release_cmd(se_cmd);
}
-/* target_put_sess_cmd - Check for active I/O shutdown via kref_put
- * @se_cmd: command descriptor to drop
+/**
+ * target_put_sess_cmd - decrease the command reference count
+ * @se_cmd: command to drop a reference from
+ *
+ * Returns 1 if and only if this target_put_sess_cmd() call caused the
+ * refcount to drop to zero. Returns zero otherwise.
*/
int target_put_sess_cmd(struct se_cmd *se_cmd)
{
- struct se_session *se_sess = se_cmd->se_sess;
-
- if (!se_sess) {
- target_free_cmd_mem(se_cmd);
- se_cmd->se_tfo->release_cmd(se_cmd);
- return 1;
- }
return kref_put(&se_cmd->cmd_kref, target_release_cmd_kref);
}
EXPORT_SYMBOL(target_put_sess_cmd);
@@ -2706,10 +2700,39 @@ void target_wait_for_sess_cmds(struct se_session *se_sess)
}
EXPORT_SYMBOL(target_wait_for_sess_cmds);
+static void target_lun_confirm(struct percpu_ref *ref)
+{
+ struct se_lun *lun = container_of(ref, struct se_lun, lun_ref);
+
+ complete(&lun->lun_ref_comp);
+}
+
void transport_clear_lun_ref(struct se_lun *lun)
{
- percpu_ref_kill(&lun->lun_ref);
+ /*
+ * Mark the percpu-ref as DEAD, switch to atomic_t mode, drop
+ * the initial reference and schedule confirm kill to be
+ * executed after one full RCU grace period has completed.
+ */
+ percpu_ref_kill_and_confirm(&lun->lun_ref, target_lun_confirm);
+ /*
+ * The first completion waits for percpu_ref_switch_to_atomic_rcu()
+ * to call target_lun_confirm after lun->lun_ref has been marked
+ * as __PERCPU_REF_DEAD on all CPUs, and switches to atomic_t
+ * mode so that percpu_ref_tryget_live() lookup of lun->lun_ref
+ * fails for all new incoming I/O.
+ */
wait_for_completion(&lun->lun_ref_comp);
+ /*
+ * The second completion waits for percpu_ref_put_many() to
+ * invoke ->release() after lun->lun_ref has switched to
+ * atomic_t mode, and lun->lun_ref.count has reached zero.
+ *
+ * At this point all target-core lun->lun_ref references have
+ * been dropped via transport_lun_remove_cmd(), and it's safe
+ * to proceed with the remaining LUN shutdown.
+ */
+ wait_for_completion(&lun->lun_shutdown_comp);
}
static bool
@@ -2765,11 +2788,8 @@ __transport_wait_for_tasks(struct se_cmd *cmd, bool fabric_stop,
}
/**
- * transport_wait_for_tasks - wait for completion to occur
- * @cmd: command to wait
- *
- * Called from frontend fabric context to wait for storage engine
- * to pause and/or release frontend generated struct se_cmd.
+ * transport_wait_for_tasks - set CMD_T_STOP and wait for t_transport_stop_comp
+ * @cmd: command to wait on
*/
bool transport_wait_for_tasks(struct se_cmd *cmd)
{
diff --git a/drivers/target/tcm_fc/tfc_cmd.c b/drivers/target/tcm_fc/tfc_cmd.c
index 9af7842b8178..ec372860106f 100644
--- a/drivers/target/tcm_fc/tfc_cmd.c
+++ b/drivers/target/tcm_fc/tfc_cmd.c
@@ -83,14 +83,12 @@ void ft_dump_cmd(struct ft_cmd *cmd, const char *caller)
static void ft_free_cmd(struct ft_cmd *cmd)
{
struct fc_frame *fp;
- struct fc_lport *lport;
struct ft_sess *sess;
if (!cmd)
return;
sess = cmd->sess;
fp = cmd->req_frame;
- lport = fr_dev(fp);
if (fr_seq(fp))
fc_seq_release(fr_seq(fp));
fc_frame_free(fp);