diff options
-rw-r--r-- | drivers/target/iscsi/iscsi_target.c | 11 | ||||
-rw-r--r-- | drivers/target/loopback/tcm_loop.c | 23 | ||||
-rw-r--r-- | drivers/target/target_core_alua.c | 11 | ||||
-rw-r--r-- | drivers/target/target_core_alua.h | 4 | ||||
-rw-r--r-- | drivers/target/target_core_cdb.c | 216 | ||||
-rw-r--r-- | drivers/target/target_core_cdb.h | 14 | ||||
-rw-r--r-- | drivers/target/target_core_device.c | 14 | ||||
-rw-r--r-- | drivers/target/target_core_pr.c | 349 | ||||
-rw-r--r-- | drivers/target/target_core_pr.h | 7 | ||||
-rw-r--r-- | drivers/target/target_core_pscsi.c | 2 | ||||
-rw-r--r-- | drivers/target/target_core_tmr.c | 23 | ||||
-rw-r--r-- | drivers/target/target_core_transport.c | 392 | ||||
-rw-r--r-- | drivers/target/tcm_fc/tcm_fc.h | 2 | ||||
-rw-r--r-- | drivers/target/tcm_fc/tfc_cmd.c | 3 | ||||
-rw-r--r-- | include/target/target_core_base.h | 13 | ||||
-rw-r--r-- | include/target/target_core_device.h | 2 | ||||
-rw-r--r-- | include/target/target_core_fabric_ops.h | 11 | ||||
-rw-r--r-- | include/target/target_core_transport.h | 7 |
18 files changed, 629 insertions, 475 deletions
diff --git a/drivers/target/iscsi/iscsi_target.c b/drivers/target/iscsi/iscsi_target.c index 4d01768fcd90..1bf057ed9931 100644 --- a/drivers/target/iscsi/iscsi_target.c +++ b/drivers/target/iscsi/iscsi_target.c @@ -1079,7 +1079,9 @@ attach_cmd: */ if (!cmd->immediate_data) { cmdsn_ret = iscsit_sequence_cmd(conn, cmd, hdr->cmdsn); - if (cmdsn_ret == CMDSN_ERROR_CANNOT_RECOVER) + if (cmdsn_ret == CMDSN_LOWER_THAN_EXP) + return 0; + else if (cmdsn_ret == CMDSN_ERROR_CANNOT_RECOVER) return iscsit_add_reject_from_cmd( ISCSI_REASON_PROTOCOL_ERROR, 1, 0, buf, cmd); @@ -1819,17 +1821,16 @@ attach: int cmdsn_ret = iscsit_sequence_cmd(conn, cmd, hdr->cmdsn); if (cmdsn_ret == CMDSN_HIGHER_THAN_EXP) out_of_order_cmdsn = 1; - else if (cmdsn_ret == CMDSN_LOWER_THAN_EXP) { + else if (cmdsn_ret == CMDSN_LOWER_THAN_EXP) return 0; - } else { /* (cmdsn_ret == CMDSN_ERROR_CANNOT_RECOVER) */ + else if (cmdsn_ret == CMDSN_ERROR_CANNOT_RECOVER) return iscsit_add_reject_from_cmd( ISCSI_REASON_PROTOCOL_ERROR, 1, 0, buf, cmd); - } } iscsit_ack_from_expstatsn(conn, hdr->exp_statsn); - if (out_of_order_cmdsn) + if (out_of_order_cmdsn || !(hdr->opcode & ISCSI_OP_IMMEDIATE)) return 0; /* * Found the referenced task, send to transport for processing. diff --git a/drivers/target/loopback/tcm_loop.c b/drivers/target/loopback/tcm_loop.c index b15d8cbf630b..3df1c9b8ae6b 100644 --- a/drivers/target/loopback/tcm_loop.c +++ b/drivers/target/loopback/tcm_loop.c @@ -174,6 +174,24 @@ static int tcm_loop_new_cmd_map(struct se_cmd *se_cmd) sgl_bidi = sdb->table.sgl; sgl_bidi_count = sdb->table.nents; } + /* + * Because some userspace code via scsi-generic do not memset their + * associated read buffers, go ahead and do that here for type + * SCF_SCSI_CONTROL_SG_IO_CDB. Also note that this is currently + * guaranteed to be a single SGL for SCF_SCSI_CONTROL_SG_IO_CDB + * by target core in transport_generic_allocate_tasks() -> + * transport_generic_cmd_sequencer(). + */ + if (se_cmd->se_cmd_flags & SCF_SCSI_CONTROL_SG_IO_CDB && + se_cmd->data_direction == DMA_FROM_DEVICE) { + struct scatterlist *sg = scsi_sglist(sc); + unsigned char *buf = kmap(sg_page(sg)) + sg->offset; + + if (buf != NULL) { + memset(buf, 0, sg->length); + kunmap(sg_page(sg)); + } + } /* Tell the core about our preallocated memory */ ret = transport_generic_map_mem_to_cmd(se_cmd, scsi_sglist(sc), @@ -187,7 +205,7 @@ static int tcm_loop_new_cmd_map(struct se_cmd *se_cmd) /* * Called from struct target_core_fabric_ops->check_stop_free() */ -static void tcm_loop_check_stop_free(struct se_cmd *se_cmd) +static int tcm_loop_check_stop_free(struct se_cmd *se_cmd) { /* * Do not release struct se_cmd's containing a valid TMR @@ -195,12 +213,13 @@ static void tcm_loop_check_stop_free(struct se_cmd *se_cmd) * with transport_generic_free_cmd(). */ if (se_cmd->se_tmr_req) - return; + return 0; /* * Release the struct se_cmd, which will make a callback to release * struct tcm_loop_cmd * in tcm_loop_deallocate_core_cmd() */ transport_generic_free_cmd(se_cmd, 0); + return 1; } static void tcm_loop_release_cmd(struct se_cmd *se_cmd) diff --git a/drivers/target/target_core_alua.c b/drivers/target/target_core_alua.c index 8f4447749c71..2739b93983a2 100644 --- a/drivers/target/target_core_alua.c +++ b/drivers/target/target_core_alua.c @@ -58,8 +58,9 @@ struct t10_alua_lu_gp *default_lu_gp; * * See spc4r17 section 6.27 */ -int core_emulate_report_target_port_groups(struct se_cmd *cmd) +int target_emulate_report_target_port_groups(struct se_task *task) { + struct se_cmd *cmd = task->task_se_cmd; struct se_subsystem_dev *su_dev = cmd->se_dev->se_sub_dev; struct se_port *port; struct t10_alua_tg_pt_gp *tg_pt_gp; @@ -164,6 +165,8 @@ int core_emulate_report_target_port_groups(struct se_cmd *cmd) transport_kunmap_first_data_page(cmd); + task->task_scsi_status = GOOD; + transport_complete_task(task, 1); return 0; } @@ -172,8 +175,9 @@ int core_emulate_report_target_port_groups(struct se_cmd *cmd) * * See spc4r17 section 6.35 */ -int core_emulate_set_target_port_groups(struct se_cmd *cmd) +int target_emulate_set_target_port_groups(struct se_task *task) { + struct se_cmd *cmd = task->task_se_cmd; struct se_device *dev = cmd->se_dev; struct se_subsystem_dev *su_dev = dev->se_sub_dev; struct se_port *port, *l_port = cmd->se_lun->lun_sep; @@ -341,7 +345,8 @@ int core_emulate_set_target_port_groups(struct se_cmd *cmd) out: transport_kunmap_first_data_page(cmd); - + task->task_scsi_status = GOOD; + transport_complete_task(task, 1); return 0; } diff --git a/drivers/target/target_core_alua.h b/drivers/target/target_core_alua.h index c86f97a081ed..c5b4ecd3e745 100644 --- a/drivers/target/target_core_alua.h +++ b/drivers/target/target_core_alua.h @@ -66,8 +66,8 @@ extern struct kmem_cache *t10_alua_lu_gp_mem_cache; extern struct kmem_cache *t10_alua_tg_pt_gp_cache; extern struct kmem_cache *t10_alua_tg_pt_gp_mem_cache; -extern int core_emulate_report_target_port_groups(struct se_cmd *); -extern int core_emulate_set_target_port_groups(struct se_cmd *); +extern int target_emulate_report_target_port_groups(struct se_task *); +extern int target_emulate_set_target_port_groups(struct se_task *); extern int core_alua_check_nonop_delay(struct se_cmd *); extern int core_alua_do_port_transition(struct t10_alua_tg_pt_gp *, struct se_device *, struct se_port *, diff --git a/drivers/target/target_core_cdb.c b/drivers/target/target_core_cdb.c index 38535eb13929..683ba02b8247 100644 --- a/drivers/target/target_core_cdb.c +++ b/drivers/target/target_core_cdb.c @@ -32,6 +32,7 @@ #include <target/target_core_transport.h> #include <target/target_core_fabric_ops.h> #include "target_core_ua.h" +#include "target_core_cdb.h" static void target_fill_alua_data(struct se_port *port, unsigned char *buf) @@ -679,16 +680,18 @@ target_emulate_evpd_00(struct se_cmd *cmd, unsigned char *buf) return 0; } -static int -target_emulate_inquiry(struct se_cmd *cmd) +int target_emulate_inquiry(struct se_task *task) { + struct se_cmd *cmd = task->task_se_cmd; struct se_device *dev = cmd->se_dev; unsigned char *buf; unsigned char *cdb = cmd->t_task_cdb; int p, ret; - if (!(cdb[1] & 0x1)) - return target_emulate_inquiry_std(cmd); + if (!(cdb[1] & 0x1)) { + ret = target_emulate_inquiry_std(cmd); + goto out; + } /* * Make sure we at least have 4 bytes of INQUIRY response @@ -707,22 +710,30 @@ target_emulate_inquiry(struct se_cmd *cmd) buf[0] = dev->transport->get_device_type(dev); - for (p = 0; p < ARRAY_SIZE(evpd_handlers); ++p) + for (p = 0; p < ARRAY_SIZE(evpd_handlers); ++p) { if (cdb[2] == evpd_handlers[p].page) { buf[1] = cdb[2]; ret = evpd_handlers[p].emulate(cmd, buf); - transport_kunmap_first_data_page(cmd); - return ret; + goto out_unmap; } + } - transport_kunmap_first_data_page(cmd); pr_err("Unknown VPD Code: 0x%02x\n", cdb[2]); - return -EINVAL; + ret = -EINVAL; + +out_unmap: + transport_kunmap_first_data_page(cmd); +out: + if (!ret) { + task->task_scsi_status = GOOD; + transport_complete_task(task, 1); + } + return ret; } -static int -target_emulate_readcapacity(struct se_cmd *cmd) +int target_emulate_readcapacity(struct se_task *task) { + struct se_cmd *cmd = task->task_se_cmd; struct se_device *dev = cmd->se_dev; unsigned char *buf; unsigned long long blocks_long = dev->transport->get_blocks(dev); @@ -751,12 +762,14 @@ target_emulate_readcapacity(struct se_cmd *cmd) transport_kunmap_first_data_page(cmd); + task->task_scsi_status = GOOD; + transport_complete_task(task, 1); return 0; } -static int -target_emulate_readcapacity_16(struct se_cmd *cmd) +int target_emulate_readcapacity_16(struct se_task *task) { + struct se_cmd *cmd = task->task_se_cmd; struct se_device *dev = cmd->se_dev; unsigned char *buf; unsigned long long blocks = dev->transport->get_blocks(dev); @@ -784,6 +797,8 @@ target_emulate_readcapacity_16(struct se_cmd *cmd) transport_kunmap_first_data_page(cmd); + task->task_scsi_status = GOOD; + transport_complete_task(task, 1); return 0; } @@ -922,14 +937,15 @@ target_modesense_dpofua(unsigned char *buf, int type) } } -static int -target_emulate_modesense(struct se_cmd *cmd, int ten) +int target_emulate_modesense(struct se_task *task) { + struct se_cmd *cmd = task->task_se_cmd; struct se_device *dev = cmd->se_dev; char *cdb = cmd->t_task_cdb; unsigned char *rbuf; int type = dev->transport->get_device_type(dev); - int offset = (ten) ? 8 : 4; + int ten = (cmd->t_task_cdb[0] == MODE_SENSE_10); + int offset = ten ? 8 : 4; int length = 0; unsigned char buf[SE_MODE_PAGE_BUF]; @@ -995,12 +1011,14 @@ target_emulate_modesense(struct se_cmd *cmd, int ten) memcpy(rbuf, buf, offset); transport_kunmap_first_data_page(cmd); + task->task_scsi_status = GOOD; + transport_complete_task(task, 1); return 0; } -static int -target_emulate_request_sense(struct se_cmd *cmd) +int target_emulate_request_sense(struct se_task *task) { + struct se_cmd *cmd = task->task_se_cmd; unsigned char *cdb = cmd->t_task_cdb; unsigned char *buf; u8 ua_asc = 0, ua_ascq = 0; @@ -1059,7 +1077,8 @@ target_emulate_request_sense(struct se_cmd *cmd) end: transport_kunmap_first_data_page(cmd); - + task->task_scsi_status = GOOD; + transport_complete_task(task, 1); return 0; } @@ -1067,8 +1086,7 @@ end: * Used for TCM/IBLOCK and TCM/FILEIO for block/blk-lib.c level discard support. * Note this is not used for TCM/pSCSI passthrough */ -static int -target_emulate_unmap(struct se_task *task) +int target_emulate_unmap(struct se_task *task) { struct se_cmd *cmd = task->task_se_cmd; struct se_device *dev = cmd->se_dev; @@ -1079,6 +1097,12 @@ target_emulate_unmap(struct se_task *task) int ret = 0, offset; unsigned short dl, bd_dl; + if (!dev->transport->do_discard) { + pr_err("UNMAP emulation not supported for: %s\n", + dev->transport->name); + return PYX_TRANSPORT_UNKNOWN_SAM_OPCODE; + } + /* First UNMAP block descriptor starts at 8 byte offset */ offset = 8; size -= 8; @@ -1110,7 +1134,10 @@ target_emulate_unmap(struct se_task *task) err: transport_kunmap_first_data_page(cmd); - + if (!ret) { + task->task_scsi_status = GOOD; + transport_complete_task(task, 1); + } return ret; } @@ -1118,14 +1145,28 @@ err: * Used for TCM/IBLOCK and TCM/FILEIO for block/blk-lib.c level discard support. * Note this is not used for TCM/pSCSI passthrough */ -static int -target_emulate_write_same(struct se_task *task, u32 num_blocks) +int target_emulate_write_same(struct se_task *task) { struct se_cmd *cmd = task->task_se_cmd; struct se_device *dev = cmd->se_dev; sector_t range; sector_t lba = cmd->t_task_lba; + u32 num_blocks; int ret; + + if (!dev->transport->do_discard) { + pr_err("WRITE_SAME emulation not supported" + " for: %s\n", dev->transport->name); + return PYX_TRANSPORT_UNKNOWN_SAM_OPCODE; + } + + if (cmd->t_task_cdb[0] == WRITE_SAME) + num_blocks = get_unaligned_be16(&cmd->t_task_cdb[7]); + else if (cmd->t_task_cdb[0] == WRITE_SAME_16) + num_blocks = get_unaligned_be32(&cmd->t_task_cdb[10]); + else /* WRITE_SAME_32 via VARIABLE_LENGTH_CMD */ + num_blocks = get_unaligned_be32(&cmd->t_task_cdb[28]); + /* * Use the explicit range when non zero is supplied, otherwise calculate * the remaining range based on ->get_blocks() - starting LBA. @@ -1144,127 +1185,30 @@ target_emulate_write_same(struct se_task *task, u32 num_blocks) return ret; } + task->task_scsi_status = GOOD; + transport_complete_task(task, 1); return 0; } -int -transport_emulate_control_cdb(struct se_task *task) +int target_emulate_synchronize_cache(struct se_task *task) { - struct se_cmd *cmd = task->task_se_cmd; - struct se_device *dev = cmd->se_dev; - unsigned short service_action; - int ret = 0; + struct se_device *dev = task->task_se_cmd->se_dev; - switch (cmd->t_task_cdb[0]) { - case INQUIRY: - ret = target_emulate_inquiry(cmd); - break; - case READ_CAPACITY: - ret = target_emulate_readcapacity(cmd); - break; - case MODE_SENSE: - ret = target_emulate_modesense(cmd, 0); - break; - case MODE_SENSE_10: - ret = target_emulate_modesense(cmd, 1); - break; - case SERVICE_ACTION_IN: - switch (cmd->t_task_cdb[1] & 0x1f) { - case SAI_READ_CAPACITY_16: - ret = target_emulate_readcapacity_16(cmd); - break; - default: - pr_err("Unsupported SA: 0x%02x\n", - cmd->t_task_cdb[1] & 0x1f); - return PYX_TRANSPORT_UNKNOWN_SAM_OPCODE; - } - break; - case REQUEST_SENSE: - ret = target_emulate_request_sense(cmd); - break; - case UNMAP: - if (!dev->transport->do_discard) { - pr_err("UNMAP emulation not supported for: %s\n", - dev->transport->name); - return PYX_TRANSPORT_UNKNOWN_SAM_OPCODE; - } - ret = target_emulate_unmap(task); - break; - case WRITE_SAME: - if (!dev->transport->do_discard) { - pr_err("WRITE_SAME emulation not supported" - " for: %s\n", dev->transport->name); - return PYX_TRANSPORT_UNKNOWN_SAM_OPCODE; - } - ret = target_emulate_write_same(task, - get_unaligned_be16(&cmd->t_task_cdb[7])); - break; - case WRITE_SAME_16: - if (!dev->transport->do_discard) { - pr_err("WRITE_SAME_16 emulation not supported" - " for: %s\n", dev->transport->name); - return PYX_TRANSPORT_UNKNOWN_SAM_OPCODE; - } - ret = target_emulate_write_same(task, - get_unaligned_be32(&cmd->t_task_cdb[10])); - break; - case VARIABLE_LENGTH_CMD: - service_action = - get_unaligned_be16(&cmd->t_task_cdb[8]); - switch (service_action) { - case WRITE_SAME_32: - if (!dev->transport->do_discard) { - pr_err("WRITE_SAME_32 SA emulation not" - " supported for: %s\n", - dev->transport->name); - return PYX_TRANSPORT_UNKNOWN_SAM_OPCODE; - } - ret = target_emulate_write_same(task, - get_unaligned_be32(&cmd->t_task_cdb[28])); - break; - default: - pr_err("Unsupported VARIABLE_LENGTH_CMD SA:" - " 0x%02x\n", service_action); - break; - } - break; - case SYNCHRONIZE_CACHE: - case 0x91: /* SYNCHRONIZE_CACHE_16: */ - if (!dev->transport->do_sync_cache) { - pr_err("SYNCHRONIZE_CACHE emulation not supported" - " for: %s\n", dev->transport->name); - return PYX_TRANSPORT_UNKNOWN_SAM_OPCODE; - } - dev->transport->do_sync_cache(task); - break; - case ALLOW_MEDIUM_REMOVAL: - case ERASE: - case REZERO_UNIT: - case SEEK_10: - case SPACE: - case START_STOP: - case TEST_UNIT_READY: - case VERIFY: - case WRITE_FILEMARKS: - break; - default: - pr_err("Unsupported SCSI Opcode: 0x%02x for %s\n", - cmd->t_task_cdb[0], dev->transport->name); + if (!dev->transport->do_sync_cache) { + pr_err("SYNCHRONIZE_CACHE emulation not supported" + " for: %s\n", dev->transport->name); return PYX_TRANSPORT_UNKNOWN_SAM_OPCODE; } - if (ret < 0) - return ret; - /* - * Handle the successful completion here unless a caller - * has explictly requested an asychronous completion. - */ - if (!(cmd->se_cmd_flags & SCF_EMULATE_CDB_ASYNC)) { - task->task_scsi_status = GOOD; - transport_complete_task(task, 1); - } + dev->transport->do_sync_cache(task); + return 0; +} - return PYX_TRANSPORT_SENT_TO_TRANSPORT; +int target_emulate_noop(struct se_task *task) +{ + task->task_scsi_status = GOOD; + transport_complete_task(task, 1); + return 0; } /* diff --git a/drivers/target/target_core_cdb.h b/drivers/target/target_core_cdb.h new file mode 100644 index 000000000000..ad6b1e393001 --- /dev/null +++ b/drivers/target/target_core_cdb.h @@ -0,0 +1,14 @@ +#ifndef TARGET_CORE_CDB_H +#define TARGET_CORE_CDB_H + +int target_emulate_inquiry(struct se_task *task); +int target_emulate_readcapacity(struct se_task *task); +int target_emulate_readcapacity_16(struct se_task *task); +int target_emulate_modesense(struct se_task *task); +int target_emulate_request_sense(struct se_task *task); +int target_emulate_unmap(struct se_task *task); +int target_emulate_write_same(struct se_task *task); +int target_emulate_synchronize_cache(struct se_task *task); +int target_emulate_noop(struct se_task *task); + +#endif /* TARGET_CORE_CDB_H */ diff --git a/drivers/target/target_core_device.c b/drivers/target/target_core_device.c index f870c3bcfd82..28d2c808c56b 100644 --- a/drivers/target/target_core_device.c +++ b/drivers/target/target_core_device.c @@ -651,23 +651,15 @@ void core_dev_unexport( lun->lun_se_dev = NULL; } -int transport_core_report_lun_response(struct se_cmd *se_cmd) +int target_report_luns(struct se_task *se_task) { + struct se_cmd *se_cmd = se_task->task_se_cmd; struct se_dev_entry *deve; struct se_lun *se_lun; struct se_session *se_sess = se_cmd->se_sess; - struct se_task *se_task; unsigned char *buf; u32 cdb_offset = 0, lun_count = 0, offset = 8, i; - list_for_each_entry(se_task, &se_cmd->t_task_list, t_list) - break; - - if (!se_task) { - pr_err("Unable to locate struct se_task for struct se_cmd\n"); - return PYX_TRANSPORT_LU_COMM_FAILURE; - } - buf = transport_kmap_first_data_page(se_cmd); /* @@ -713,6 +705,8 @@ done: buf[2] = ((lun_count >> 8) & 0xff); buf[3] = (lun_count & 0xff); + se_task->task_scsi_status = GOOD; + transport_complete_task(se_task, 1); return PYX_TRANSPORT_SENT_TO_TRANSPORT; } diff --git a/drivers/target/target_core_pr.c b/drivers/target/target_core_pr.c index 0c4f783f924c..5a4ebfc3a54f 100644 --- a/drivers/target/target_core_pr.c +++ b/drivers/target/target_core_pr.c @@ -116,114 +116,21 @@ static int core_scsi2_reservation_check(struct se_cmd *cmd, u32 *pr_reg_type) return ret; } -static int core_scsi2_reservation_release(struct se_cmd *cmd) -{ - struct se_device *dev = cmd->se_dev; - struct se_session *sess = cmd->se_sess; - struct se_portal_group *tpg = sess->se_tpg; - - if (!sess || !tpg) - return 0; - - spin_lock(&dev->dev_reservation_lock); - if (!dev->dev_reserved_node_acl || !sess) { - spin_unlock(&dev->dev_reservation_lock); - return 0; - } - - if (dev->dev_reserved_node_acl != sess->se_node_acl) { - spin_unlock(&dev->dev_reservation_lock); - return 0; - } - dev->dev_reserved_node_acl = NULL; - dev->dev_flags &= ~DF_SPC2_RESERVATIONS; - if (dev->dev_flags & DF_SPC2_RESERVATIONS_WITH_ISID) { - dev->dev_res_bin_isid = 0; - dev->dev_flags &= ~DF_SPC2_RESERVATIONS_WITH_ISID; - } - pr_debug("SCSI-2 Released reservation for %s LUN: %u ->" - " MAPPED LUN: %u for %s\n", tpg->se_tpg_tfo->get_fabric_name(), - cmd->se_lun->unpacked_lun, cmd->se_deve->mapped_lun, - sess->se_node_acl->initiatorname); - spin_unlock(&dev->dev_reservation_lock); - - return 0; -} - -static int core_scsi2_reservation_reserve(struct se_cmd *cmd) -{ - struct se_device *dev = cmd->se_dev; - struct se_session *sess = cmd->se_sess; - struct se_portal_group *tpg = sess->se_tpg; - - if ((cmd->t_task_cdb[1] & 0x01) && - (cmd->t_task_cdb[1] & 0x02)) { - pr_err("LongIO and Obselete Bits set, returning" - " ILLEGAL_REQUEST\n"); - return PYX_TRANSPORT_ILLEGAL_REQUEST; - } - /* - * This is currently the case for target_core_mod passthrough struct se_cmd - * ops - */ - if (!sess || !tpg) - return 0; - - spin_lock(&dev->dev_reservation_lock); - if (dev->dev_reserved_node_acl && - (dev->dev_reserved_node_acl != sess->se_node_acl)) { - pr_err("SCSI-2 RESERVATION CONFLIFT for %s fabric\n", - tpg->se_tpg_tfo->get_fabric_name()); - pr_err("Original reserver LUN: %u %s\n", - cmd->se_lun->unpacked_lun, - dev->dev_reserved_node_acl->initiatorname); - pr_err("Current attempt - LUN: %u -> MAPPED LUN: %u" - " from %s \n", cmd->se_lun->unpacked_lun, - cmd->se_deve->mapped_lun, - sess->se_node_acl->initiatorname); - spin_unlock(&dev->dev_reservation_lock); - return PYX_TRANSPORT_RESERVATION_CONFLICT; - } - - dev->dev_reserved_node_acl = sess->se_node_acl; - dev->dev_flags |= DF_SPC2_RESERVATIONS; - if (sess->sess_bin_isid != 0) { - dev->dev_res_bin_isid = sess->sess_bin_isid; - dev->dev_flags |= DF_SPC2_RESERVATIONS_WITH_ISID; - } - pr_debug("SCSI-2 Reserved %s LUN: %u -> MAPPED LUN: %u" - " for %s\n", tpg->se_tpg_tfo->get_fabric_name(), - cmd->se_lun->unpacked_lun, cmd->se_deve->mapped_lun, - sess->se_node_acl->initiatorname); - spin_unlock(&dev->dev_reservation_lock); - - return 0; -} - static struct t10_pr_registration *core_scsi3_locate_pr_reg(struct se_device *, struct se_node_acl *, struct se_session *); static void core_scsi3_put_pr_reg(struct t10_pr_registration *); -/* - * Setup in target_core_transport.c:transport_generic_cmd_sequencer() - * and called via struct se_cmd->transport_emulate_cdb() in TCM processing - * thread context. - */ -int core_scsi2_emulate_crh(struct se_cmd *cmd) +static int target_check_scsi2_reservation_conflict(struct se_cmd *cmd, int *ret) { struct se_session *se_sess = cmd->se_sess; struct se_subsystem_dev *su_dev = cmd->se_dev->se_sub_dev; struct t10_pr_registration *pr_reg; struct t10_reservation *pr_tmpl = &su_dev->t10_pr; - unsigned char *cdb = &cmd->t_task_cdb[0]; int crh = (su_dev->t10_pr.res_type == SPC3_PERSISTENT_RESERVATIONS); int conflict = 0; - if (!se_sess) - return 0; - if (!crh) - goto after_crh; + return false; pr_reg = core_scsi3_locate_pr_reg(cmd->se_dev, se_sess->se_node_acl, se_sess); @@ -251,14 +158,16 @@ int core_scsi2_emulate_crh(struct se_cmd *cmd) */ if (pr_reg->pr_res_holder) { core_scsi3_put_pr_reg(pr_reg); - return 0; + *ret = 0; + return false; } if ((pr_reg->pr_res_type == PR_TYPE_WRITE_EXCLUSIVE_REGONLY) || (pr_reg->pr_res_type == PR_TYPE_EXCLUSIVE_ACCESS_REGONLY) || (pr_reg->pr_res_type == PR_TYPE_WRITE_EXCLUSIVE_ALLREG) || (pr_reg->pr_res_type == PR_TYPE_EXCLUSIVE_ACCESS_ALLREG)) { core_scsi3_put_pr_reg(pr_reg); - return 0; + *ret = 0; + return true; } core_scsi3_put_pr_reg(pr_reg); conflict = 1; @@ -282,18 +191,118 @@ int core_scsi2_emulate_crh(struct se_cmd *cmd) pr_err("Received legacy SPC-2 RESERVE/RELEASE" " while active SPC-3 registrations exist," " returning RESERVATION_CONFLICT\n"); - return PYX_TRANSPORT_RESERVATION_CONFLICT; + *ret = PYX_TRANSPORT_RESERVATION_CONFLICT; + return true; } -after_crh: - if ((cdb[0] == RESERVE) || (cdb[0] == RESERVE_10)) - return core_scsi2_reservation_reserve(cmd); - else if ((cdb[0] == RELEASE) || (cdb[0] == RELEASE_10)) - return core_scsi2_reservation_release(cmd); - else - return PYX_TRANSPORT_INVALID_CDB_FIELD; + return false; +} + +int target_scsi2_reservation_release(struct se_task *task) +{ + struct se_cmd *cmd = task->task_se_cmd; + struct se_device *dev = cmd->se_dev; + struct se_session *sess = cmd->se_sess; + struct se_portal_group *tpg = sess->se_tpg; + int ret = 0; + + if (!sess || !tpg) + goto out; + if (target_check_scsi2_reservation_conflict(cmd, &ret)) + goto out; + + ret = 0; + spin_lock(&dev->dev_reservation_lock); + if (!dev->dev_reserved_node_acl || !sess) + goto out_unlock; + + if (dev->dev_reserved_node_acl != sess->se_node_acl) + goto out_unlock; + + dev->dev_reserved_node_acl = NULL; + dev->dev_flags &= ~DF_SPC2_RESERVATIONS; + if (dev->dev_flags & DF_SPC2_RESERVATIONS_WITH_ISID) { + dev->dev_res_bin_isid = 0; + dev->dev_flags &= ~DF_SPC2_RESERVATIONS_WITH_ISID; + } + pr_debug("SCSI-2 Released reservation for %s LUN: %u ->" + " MAPPED LUN: %u for %s\n", tpg->se_tpg_tfo->get_fabric_name(), + cmd->se_lun->unpacked_lun, cmd->se_deve->mapped_lun, + sess->se_node_acl->initiatorname); + +out_unlock: + spin_unlock(&dev->dev_reservation_lock); +out: + if (!ret) { + task->task_scsi_status = GOOD; + transport_complete_task(task, 1); + } + return ret; +} + +int target_scsi2_reservation_reserve(struct se_task *task) +{ + struct se_cmd *cmd = task->task_se_cmd; + struct se_device *dev = cmd->se_dev; + struct se_session *sess = cmd->se_sess; + struct se_portal_group *tpg = sess->se_tpg; + int ret = 0; + + if ((cmd->t_task_cdb[1] & 0x01) && + (cmd->t_task_cdb[1] & 0x02)) { + pr_err("LongIO and Obselete Bits set, returning" + " ILLEGAL_REQUEST\n"); + ret = PYX_TRANSPORT_ILLEGAL_REQUEST; + goto out; + } + /* + * This is currently the case for target_core_mod passthrough struct se_cmd + * ops + */ + if (!sess || !tpg) + goto out; + if (target_check_scsi2_reservation_conflict(cmd, &ret)) + goto out; + + ret = 0; + spin_lock(&dev->dev_reservation_lock); + if (dev->dev_reserved_node_acl && + (dev->dev_reserved_node_acl != sess->se_node_acl)) { + pr_err("SCSI-2 RESERVATION CONFLIFT for %s fabric\n", + tpg->se_tpg_tfo->get_fabric_name()); + pr_err("Original reserver LUN: %u %s\n", + cmd->se_lun->unpacked_lun, + dev->dev_reserved_node_acl->initiatorname); + pr_err("Current attempt - LUN: %u -> MAPPED LUN: %u" + " from %s \n", cmd->se_lun->unpacked_lun, + cmd->se_deve->mapped_lun, + sess->se_node_acl->initiatorname); + ret = PYX_TRANSPORT_RESERVATION_CONFLICT; + goto out_unlock; + } + + dev->dev_reserved_node_acl = sess->se_node_acl; + dev->dev_flags |= DF_SPC2_RESERVATIONS; + if (sess->sess_bin_isid != 0) { + dev->dev_res_bin_isid = sess->sess_bin_isid; + dev->dev_flags |= DF_SPC2_RESERVATIONS_WITH_ISID; + } + pr_debug("SCSI-2 Reserved %s LUN: %u -> MAPPED LUN: %u" + " for %s\n", tpg->se_tpg_tfo->get_fabric_name(), + cmd->se_lun->unpacked_lun, cmd->se_deve->mapped_lun, + sess->se_node_acl->initiatorname); + +out_unlock: + spin_unlock(&dev->dev_reservation_lock); +out: + if (!ret) { + task->task_scsi_status = GOOD; + transport_complete_task(task, 1); + } + return ret; } + /* * Begin SPC-3/SPC-4 Persistent Reservations emulation support * @@ -418,12 +427,12 @@ static int core_scsi3_pr_seq_non_holder( break; case RELEASE: case RELEASE_10: - /* Handled by CRH=1 in core_scsi2_emulate_crh() */ + /* Handled by CRH=1 in target_scsi2_reservation_release() */ ret = 0; break; case RESERVE: case RESERVE_10: - /* Handled by CRH=1 in core_scsi2_emulate_crh() */ + /* Handled by CRH=1 in target_scsi2_reservation_reserve() */ ret = 0; break; case TEST_UNIT_READY: @@ -3739,12 +3748,33 @@ static unsigned long long core_scsi3_extract_reservation_key(unsigned char *cdb) /* * See spc4r17 section 6.14 Table 170 */ -static int core_scsi3_emulate_pr_out(struct se_cmd *cmd, unsigned char *cdb) +int target_scsi3_emulate_pr_out(struct se_task *task) { + struct se_cmd *cmd = task->task_se_cmd; + unsigned char *cdb = &cmd->t_task_cdb[0]; unsigned char *buf; u64 res_key, sa_res_key; int sa, scope, type, aptpl; int spec_i_pt = 0, all_tg_pt = 0, unreg = 0; + int ret; + + /* + * Following spc2r20 5.5.1 Reservations overview: + * + * If a logical unit has been reserved by any RESERVE command and is + * still reserved by any initiator, all PERSISTENT RESERVE IN and all + * PERSISTENT RESERVE OUT commands shall conflict regardless of + * initiator or service action and shall terminate with a RESERVATION + * CONFLICT status. + */ + if (cmd->se_dev->dev_flags & DF_SPC2_RESERVATIONS) { + pr_err("Received PERSISTENT_RESERVE CDB while legacy" + " SPC-2 reservation is held, returning" + " RESERVATION_CONFLICT\n"); + ret = PYX_TRANSPORT_RESERVATION_CONFLICT; + goto out; + } + /* * FIXME: A NULL struct se_session pointer means an this is not coming from * a $FABRIC_MOD's nexus, but from internal passthrough ops. @@ -3755,7 +3785,8 @@ static int core_scsi3_emulate_pr_out(struct se_cmd *cmd, unsigned char *cdb) if (cmd->data_length < 24) { pr_warn("SPC-PR: Received PR OUT parameter list" " length too small: %u\n", cmd->data_length); - return PYX_TRANSPORT_INVALID_PARAMETER_LIST; + ret = PYX_TRANSPORT_INVALID_PARAMETER_LIST; + goto out; } /* * From the PERSISTENT_RESERVE_OUT command descriptor block (CDB) @@ -3788,8 +3819,11 @@ static int core_scsi3_emulate_pr_out(struct se_cmd *cmd, unsigned char *cdb) /* * SPEC_I_PT=1 is only valid for Service action: REGISTER */ - if (spec_i_pt && ((cdb[1] & 0x1f) != PRO_REGISTER)) - return PYX_TRANSPORT_INVALID_PARAMETER_LIST; + if (spec_i_pt && ((cdb[1] & 0x1f) != PRO_REGISTER)) { + ret = PYX_TRANSPORT_INVALID_PARAMETER_LIST; + goto out; + } + /* * From spc4r17 section 6.14: * @@ -3803,7 +3837,8 @@ static int core_scsi3_emulate_pr_out(struct se_cmd *cmd, unsigned char *cdb) (cmd->data_length != 24)) { pr_warn("SPC-PR: Received PR OUT illegal parameter" " list length: %u\n", cmd->data_length); - return PYX_TRANSPORT_INVALID_PARAMETER_LIST; + ret = PYX_TRANSPORT_INVALID_PARAMETER_LIST; + goto out; } /* * (core_scsi3_emulate_pro_* function parameters @@ -3812,35 +3847,47 @@ static int core_scsi3_emulate_pr_out(struct se_cmd *cmd, unsigned char *cdb) */ switch (sa) { case PRO_REGISTER: - return core_scsi3_emulate_pro_register(cmd, + ret = core_scsi3_emulate_pro_register(cmd, res_key, sa_res_key, aptpl, all_tg_pt, spec_i_pt, 0); + break; case PRO_RESERVE: - return core_scsi3_emulate_pro_reserve(cmd, - type, scope, res_key); + ret = core_scsi3_emulate_pro_reserve(cmd, type, scope, res_key); + break; case PRO_RELEASE: - return core_scsi3_emulate_pro_release(cmd, - type, scope, res_key); + ret = core_scsi3_emulate_pro_release(cmd, type, scope, res_key); + break; case PRO_CLEAR: - return core_scsi3_emulate_pro_clear(cmd, res_key); + ret = core_scsi3_emulate_pro_clear(cmd, res_key); + break; case PRO_PREEMPT: - return core_scsi3_emulate_pro_preempt(cmd, type, scope, + ret = core_scsi3_emulate_pro_preempt(cmd, type, scope, res_key, sa_res_key, 0); + break; case PRO_PREEMPT_AND_ABORT: - return core_scsi3_emulate_pro_preempt(cmd, type, scope, + ret = core_scsi3_emulate_pro_preempt(cmd, type, scope, res_key, sa_res_key, 1); + break; case PRO_REGISTER_AND_IGNORE_EXISTING_KEY: - return core_scsi3_emulate_pro_register(cmd, + ret = core_scsi3_emulate_pro_register(cmd, 0, sa_res_key, aptpl, all_tg_pt, spec_i_pt, 1); + break; case PRO_REGISTER_AND_MOVE: - return core_scsi3_emulate_pro_register_and_move(cmd, res_key, + ret = core_scsi3_emulate_pro_register_and_move(cmd, res_key, sa_res_key, aptpl, unreg); + break; default: pr_err("Unknown PERSISTENT_RESERVE_OUT service" " action: 0x%02x\n", cdb[1] & 0x1f); - return PYX_TRANSPORT_INVALID_CDB_FIELD; + ret = PYX_TRANSPORT_INVALID_CDB_FIELD; + break; } - return PYX_TRANSPORT_INVALID_CDB_FIELD; +out: + if (!ret) { + task->task_scsi_status = GOOD; + transport_complete_task(task, 1); + } + return ret; } /* @@ -4190,29 +4237,11 @@ static int core_scsi3_pri_read_full_status(struct se_cmd *cmd) return 0; } -static int core_scsi3_emulate_pr_in(struct se_cmd *cmd, unsigned char *cdb) +int target_scsi3_emulate_pr_in(struct se_task *task) { - switch (cdb[1] & 0x1f) { - case PRI_READ_KEYS: - return core_scsi3_pri_read_keys(cmd); - case PRI_READ_RESERVATION: - return core_scsi3_pri_read_reservation(cmd); - case PRI_REPORT_CAPABILITIES: - return core_scsi3_pri_report_capabilities(cmd); - case PRI_READ_FULL_STATUS: - return core_scsi3_pri_read_full_status(cmd); - default: - pr_err("Unknown PERSISTENT_RESERVE_IN service" - " action: 0x%02x\n", cdb[1] & 0x1f); - return PYX_TRANSPORT_INVALID_CDB_FIELD; - } - -} + struct se_cmd *cmd = task->task_se_cmd; + int ret; -int core_scsi3_emulate_pr(struct se_cmd *cmd) -{ - unsigned char *cdb = &cmd->t_task_cdb[0]; - struct se_device *dev = cmd->se_dev; /* * Following spc2r20 5.5.1 Reservations overview: * @@ -4222,16 +4251,38 @@ int core_scsi3_emulate_pr(struct se_cmd *cmd) * initiator or service action and shall terminate with a RESERVATION * CONFLICT status. */ - if (dev->dev_flags & DF_SPC2_RESERVATIONS) { + if (cmd->se_dev->dev_flags & DF_SPC2_RESERVATIONS) { pr_err("Received PERSISTENT_RESERVE CDB while legacy" " SPC-2 reservation is held, returning" " RESERVATION_CONFLICT\n"); return PYX_TRANSPORT_RESERVATION_CONFLICT; } - return (cdb[0] == PERSISTENT_RESERVE_OUT) ? - core_scsi3_emulate_pr_out(cmd, cdb) : - core_scsi3_emulate_pr_in(cmd, cdb); + switch (cmd->t_task_cdb[1] & 0x1f) { + case PRI_READ_KEYS: + ret = core_scsi3_pri_read_keys(cmd); + break; + case PRI_READ_RESERVATION: + ret = core_scsi3_pri_read_reservation(cmd); + break; + case PRI_REPORT_CAPABILITIES: + ret = core_scsi3_pri_report_capabilities(cmd); + break; + case PRI_READ_FULL_STATUS: + ret = core_scsi3_pri_read_full_status(cmd); + break; + default: + pr_err("Unknown PERSISTENT_RESERVE_IN service" + " action: 0x%02x\n", cmd->t_task_cdb[1] & 0x1f); + ret = PYX_TRANSPORT_INVALID_CDB_FIELD; + break; + } + + if (!ret) { + task->task_scsi_status = GOOD; + transport_complete_task(task, 1); + } + return ret; } static int core_pt_reservation_check(struct se_cmd *cmd, u32 *pr_res_type) diff --git a/drivers/target/target_core_pr.h b/drivers/target/target_core_pr.h index c8f47d064584..b97f6940dd05 100644 --- a/drivers/target/target_core_pr.h +++ b/drivers/target/target_core_pr.h @@ -47,7 +47,8 @@ extern struct kmem_cache *t10_pr_reg_cache; extern int core_pr_dump_initiator_port(struct t10_pr_registration *, char *, u32); -extern int core_scsi2_emulate_crh(struct se_cmd *); +extern int target_scsi2_reservation_release(struct se_task *task); +extern int target_scsi2_reservation_reserve(struct se_task *task); extern int core_scsi3_alloc_aptpl_registration( struct t10_reservation *, u64, unsigned char *, unsigned char *, u32, @@ -61,7 +62,9 @@ extern void core_scsi3_free_all_registrations(struct se_device *); extern unsigned char *core_scsi3_pr_dump_type(int); extern int core_scsi3_check_cdb_abort_and_preempt(struct list_head *, struct se_cmd *); -extern int core_scsi3_emulate_pr(struct se_cmd *); + +extern int target_scsi3_emulate_pr_in(struct se_task *task); +extern int target_scsi3_emulate_pr_out(struct se_task *task); extern int core_setup_reservations(struct se_device *, int); #endif /* TARGET_CORE_PR_H */ diff --git a/drivers/target/target_core_pscsi.c b/drivers/target/target_core_pscsi.c index dad671dee9e9..f941b6232614 100644 --- a/drivers/target/target_core_pscsi.c +++ b/drivers/target/target_core_pscsi.c @@ -1091,7 +1091,7 @@ static int pscsi_do_task(struct se_task *task) req = blk_make_request(pdv->pdv_sd->request_queue, hbio, GFP_KERNEL); - if (!req) { + if (IS_ERR(req)) { pr_err("pSCSI: blk_make_request() failed\n"); goto fail; } diff --git a/drivers/target/target_core_tmr.c b/drivers/target/target_core_tmr.c index 570b144a1edb..1d2aaba3f372 100644 --- a/drivers/target/target_core_tmr.c +++ b/drivers/target/target_core_tmr.c @@ -118,7 +118,7 @@ static void core_tmr_drain_tmr_list( /* * Allow the received TMR to return with FUNCTION_COMPLETE. */ - if (tmr && (tmr_p == tmr)) + if (tmr_p == tmr) continue; cmd = tmr_p->task_cmd; @@ -147,19 +147,18 @@ static void core_tmr_drain_tmr_list( } spin_unlock(&cmd->t_state_lock); - list_move_tail(&tmr->tmr_list, &drain_tmr_list); + list_move_tail(&tmr_p->tmr_list, &drain_tmr_list); } spin_unlock_irqrestore(&dev->se_tmr_lock, flags); - while (!list_empty(&drain_tmr_list)) { - tmr = list_entry(drain_tmr_list.next, struct se_tmr_req, tmr_list); - list_del(&tmr->tmr_list); + list_for_each_entry_safe(tmr_p, tmr_pp, &drain_tmr_list, tmr_list) { + list_del_init(&tmr_p->tmr_list); cmd = tmr_p->task_cmd; pr_debug("LUN_RESET: %s releasing TMR %p Function: 0x%02x," " Response: 0x%02x, t_state: %d\n", - (preempt_and_abort_list) ? "Preempt" : "", tmr, - tmr->function, tmr->response, cmd->t_state); + (preempt_and_abort_list) ? "Preempt" : "", tmr_p, + tmr_p->function, tmr_p->response, cmd->t_state); transport_cmd_finish_abort(cmd, 1); } @@ -330,16 +329,6 @@ static void core_tmr_drain_cmd_list( */ if (prout_cmd == cmd) continue; - /* - * Skip direct processing of TRANSPORT_FREE_CMD_INTR for - * HW target mode fabrics. - */ - spin_lock(&cmd->t_state_lock); - if (cmd->t_state == TRANSPORT_FREE_CMD_INTR) { - spin_unlock(&cmd->t_state_lock); - continue; - } - spin_unlock(&cmd->t_state_lock); atomic_set(&cmd->t_transport_queue_active, 0); atomic_dec(&qobj->queue_cnt); diff --git a/drivers/target/target_core_transport.c b/drivers/target/target_core_transport.c index d75255804481..f603b12485bd 100644 --- a/drivers/target/target_core_transport.c +++ b/drivers/target/target_core_transport.c @@ -52,6 +52,7 @@ #include <target/target_core_configfs.h> #include "target_core_alua.h" +#include "target_core_cdb.h" #include "target_core_hba.h" #include "target_core_pr.h" #include "target_core_ua.h" @@ -268,6 +269,9 @@ struct se_session *transport_init_session(void) } INIT_LIST_HEAD(&se_sess->sess_list); INIT_LIST_HEAD(&se_sess->sess_acl_list); + INIT_LIST_HEAD(&se_sess->sess_cmd_list); + INIT_LIST_HEAD(&se_sess->sess_wait_list); + spin_lock_init(&se_sess->sess_cmd_lock); return se_sess; } @@ -514,13 +518,16 @@ static int transport_cmd_check_stop( * Some fabric modules like tcm_loop can release * their internally allocated I/O reference now and * struct se_cmd now. + * + * Fabric modules are expected to return '1' here if the + * se_cmd being passed is released at this point, + * or zero if not being released. */ if (cmd->se_tfo->check_stop_free != NULL) { spin_unlock_irqrestore( &cmd->t_state_lock, flags); - cmd->se_tfo->check_stop_free(cmd); - return 1; + return cmd->se_tfo->check_stop_free(cmd); } } spin_unlock_irqrestore(&cmd->t_state_lock, flags); @@ -730,6 +737,10 @@ void transport_complete_task(struct se_task *task, int success) complete(&task->task_stop_comp); return; } + + if (!success) + cmd->t_tasks_failed = 1; + /* * Decrement the outstanding t_task_cdbs_left count. The last * struct se_task from struct se_cmd will complete itself into the @@ -740,7 +751,7 @@ void transport_complete_task(struct se_task *task, int success) return; } - if (!success || cmd->t_tasks_failed) { + if (cmd->t_tasks_failed) { if (!task->task_error_status) { task->task_error_status = PYX_TRANSPORT_UNKNOWN_SAM_OPCODE; @@ -908,7 +919,7 @@ void transport_remove_task_from_execute_queue( } /* - * Handle QUEUE_FULL / -EAGAIN status + * Handle QUEUE_FULL / -EAGAIN and -ENOMEM status */ static void target_qf_do_work(struct work_struct *work) @@ -1498,11 +1509,12 @@ void transport_init_se_cmd( INIT_LIST_HEAD(&cmd->se_ordered_node); INIT_LIST_HEAD(&cmd->se_qf_node); INIT_LIST_HEAD(&cmd->se_queue_node); - + INIT_LIST_HEAD(&cmd->se_cmd_list); INIT_LIST_HEAD(&cmd->t_task_list); init_completion(&cmd->transport_lun_fe_stop_comp); init_completion(&cmd->transport_lun_stop_comp); init_completion(&cmd->t_transport_stop_comp); + init_completion(&cmd->cmd_wait_comp); spin_lock_init(&cmd->t_state_lock); atomic_set(&cmd->transport_dev_active, 1); @@ -1645,9 +1657,7 @@ int transport_handle_cdb_direct( * and call transport_generic_request_failure() if necessary.. */ ret = transport_generic_new_cmd(cmd); - if (ret == -EAGAIN) - return 0; - else if (ret < 0) { + if (ret < 0) { cmd->transport_error_status = ret; transport_generic_request_failure(cmd, 0, (cmd->data_direction != DMA_TO_DEVICE)); @@ -1717,13 +1727,6 @@ int transport_generic_handle_tmr( } EXPORT_SYMBOL(transport_generic_handle_tmr); -void transport_generic_free_cmd_intr( - struct se_cmd *cmd) -{ - transport_add_cmd_to_queue(cmd, TRANSPORT_FREE_CMD_INTR, false); -} -EXPORT_SYMBOL(transport_generic_free_cmd_intr); - /* * If the task is active, request it to be stopped and sleep until it * has completed. @@ -1886,7 +1889,7 @@ static void transport_generic_request_failure( ASCQ_2CH_PREVIOUS_RESERVATION_CONFLICT_STATUS); ret = cmd->se_tfo->queue_status(cmd); - if (ret == -EAGAIN) + if (ret == -EAGAIN || ret == -ENOMEM) goto queue_full; goto check_stop; case PYX_TRANSPORT_USE_SENSE_REASON: @@ -1913,7 +1916,7 @@ static void transport_generic_request_failure( else { ret = transport_send_check_condition_and_sense(cmd, cmd->scsi_sense_reason, 0); - if (ret == -EAGAIN) + if (ret == -EAGAIN || ret == -ENOMEM) goto queue_full; } @@ -2153,62 +2156,20 @@ check_depth: atomic_set(&cmd->t_transport_sent, 1); spin_unlock_irqrestore(&cmd->t_state_lock, flags); - /* - * The struct se_cmd->transport_emulate_cdb() function pointer is used - * to grab REPORT_LUNS and other CDBs we want to handle before they hit the - * struct se_subsystem_api->do_task() caller below. - */ - if (cmd->transport_emulate_cdb) { - error = cmd->transport_emulate_cdb(cmd); - if (error != 0) { - cmd->transport_error_status = error; - spin_lock_irqsave(&cmd->t_state_lock, flags); - task->task_flags &= ~TF_ACTIVE; - spin_unlock_irqrestore(&cmd->t_state_lock, flags); - atomic_set(&cmd->t_transport_sent, 0); - transport_stop_tasks_for_cmd(cmd); - atomic_inc(&dev->depth_left); - transport_generic_request_failure(cmd, 0, 1); - goto check_depth; - } - /* - * Handle the successful completion for transport_emulate_cdb() - * for synchronous operation, following SCF_EMULATE_CDB_ASYNC - * Otherwise the caller is expected to complete the task with - * proper status. - */ - if (!(cmd->se_cmd_flags & SCF_EMULATE_CDB_ASYNC)) { - cmd->scsi_status = SAM_STAT_GOOD; - task->task_scsi_status = GOOD; - transport_complete_task(task, 1); - } - } else { - /* - * Currently for all virtual TCM plugins including IBLOCK, FILEIO and - * RAMDISK we use the internal transport_emulate_control_cdb() logic - * with struct se_subsystem_api callers for the primary SPC-3 TYPE_DISK - * LUN emulation code. - * - * For TCM/pSCSI and all other SCF_SCSI_DATA_SG_IO_CDB I/O tasks we - * call ->do_task() directly and let the underlying TCM subsystem plugin - * code handle the CDB emulation. - */ - if ((dev->transport->transport_type != TRANSPORT_PLUGIN_PHBA_PDEV) && - (!(task->task_se_cmd->se_cmd_flags & SCF_SCSI_DATA_SG_IO_CDB))) - error = transport_emulate_control_cdb(task); - else - error = dev->transport->do_task(task); - if (error != 0) { - cmd->transport_error_status = error; - spin_lock_irqsave(&cmd->t_state_lock, flags); - task->task_flags &= ~TF_ACTIVE; - spin_unlock_irqrestore(&cmd->t_state_lock, flags); - atomic_set(&cmd->t_transport_sent, 0); - transport_stop_tasks_for_cmd(cmd); - atomic_inc(&dev->depth_left); - transport_generic_request_failure(cmd, 0, 1); - } + if (cmd->execute_task) + error = cmd->execute_task(task); + else + error = dev->transport->do_task(task); + if (error != 0) { + cmd->transport_error_status = error; + spin_lock_irqsave(&cmd->t_state_lock, flags); + task->task_flags &= ~TF_ACTIVE; + spin_unlock_irqrestore(&cmd->t_state_lock, flags); + atomic_set(&cmd->t_transport_sent, 0); + transport_stop_tasks_for_cmd(cmd); + atomic_inc(&dev->depth_left); + transport_generic_request_failure(cmd, 0, 1); } goto check_depth; @@ -2642,6 +2603,13 @@ static int transport_generic_cmd_sequencer( */ } + /* + * If we operate in passthrough mode we skip most CDB emulation and + * instead hand the commands down to the physical SCSI device. + */ + passthrough = + (dev->transport->transport_type == TRANSPORT_PLUGIN_PHBA_PDEV); + switch (cdb[0]) { case READ_6: sectors = transport_get_sectors_6(cdb, cmd, §or_ret); @@ -2721,9 +2689,12 @@ static int transport_generic_cmd_sequencer( cmd->t_task_lba = transport_lba_32(cdb); cmd->se_cmd_flags |= SCF_SCSI_DATA_SG_IO_CDB; - if (dev->transport->transport_type == - TRANSPORT_PLUGIN_PHBA_PDEV) + /* + * Do now allow BIDI commands for passthrough mode. + */ + if (passthrough) goto out_unsupported_cdb; + /* * Setup BIDI XOR callback to be run after I/O completion. */ @@ -2732,13 +2703,6 @@ static int transport_generic_cmd_sequencer( break; case VARIABLE_LENGTH_CMD: service_action = get_unaligned_be16(&cdb[8]); - /* - * Determine if this is TCM/PSCSI device and we should disable - * internal emulation for this CDB. - */ - passthrough = (dev->transport->transport_type == - TRANSPORT_PLUGIN_PHBA_PDEV); - switch (service_action) { case XDWRITEREAD_32: sectors = transport_get_sectors_32(cdb, cmd, §or_ret); @@ -2752,8 +2716,12 @@ static int transport_generic_cmd_sequencer( cmd->t_task_lba = transport_lba_64_ext(cdb); cmd->se_cmd_flags |= SCF_SCSI_DATA_SG_IO_CDB; + /* + * Do now allow BIDI commands for passthrough mode. + */ if (passthrough) goto out_unsupported_cdb; + /* * Setup BIDI XOR callback to be run during after I/O * completion. @@ -2779,7 +2747,8 @@ static int transport_generic_cmd_sequencer( if (target_check_write_same_discard(&cdb[10], dev) < 0) goto out_invalid_cdb_field; - + if (!passthrough) + cmd->execute_task = target_emulate_write_same; break; default: pr_err("VARIABLE_LENGTH_CMD service action" @@ -2793,12 +2762,10 @@ static int transport_generic_cmd_sequencer( /* * Check for emulated MI_REPORT_TARGET_PGS. */ - if (cdb[1] == MI_REPORT_TARGET_PGS) { - cmd->transport_emulate_cdb = - (su_dev->t10_alua.alua_type == - SPC3_ALUA_EMULATED) ? - core_emulate_report_target_port_groups : - NULL; + if (cdb[1] == MI_REPORT_TARGET_PGS && + su_dev->t10_alua.alua_type == SPC3_ALUA_EMULATED) { + cmd->execute_task = + target_emulate_report_target_port_groups; } size = (cdb[6] << 24) | (cdb[7] << 16) | (cdb[8] << 8) | cdb[9]; @@ -2819,8 +2786,15 @@ static int transport_generic_cmd_sequencer( case MODE_SENSE: size = cdb[4]; cmd->se_cmd_flags |= SCF_SCSI_CONTROL_SG_IO_CDB; + if (!passthrough) + cmd->execute_task = target_emulate_modesense; break; case MODE_SENSE_10: + size = (cdb[7] << 8) + cdb[8]; + cmd->se_cmd_flags |= SCF_SCSI_CONTROL_SG_IO_CDB; + if (!passthrough) + cmd->execute_task = target_emulate_modesense; + break; case GPCMD_READ_BUFFER_CAPACITY: case GPCMD_SEND_OPC: case LOG_SELECT: @@ -2840,11 +2814,14 @@ static int transport_generic_cmd_sequencer( cmd->se_cmd_flags |= SCF_SCSI_CONTROL_SG_IO_CDB; break; case PERSISTENT_RESERVE_IN: + if (su_dev->t10_pr.res_type == SPC3_PERSISTENT_RESERVATIONS) + cmd->execute_task = target_scsi3_emulate_pr_in; + size = (cdb[7] << 8) + cdb[8]; + cmd->se_cmd_flags |= SCF_SCSI_CONTROL_SG_IO_CDB; + break; case PERSISTENT_RESERVE_OUT: - cmd->transport_emulate_cdb = - (su_dev->t10_pr.res_type == - SPC3_PERSISTENT_RESERVATIONS) ? - core_scsi3_emulate_pr : NULL; + if (su_dev->t10_pr.res_type == SPC3_PERSISTENT_RESERVATIONS) + cmd->execute_task = target_scsi3_emulate_pr_out; size = (cdb[7] << 8) + cdb[8]; cmd->se_cmd_flags |= SCF_SCSI_CONTROL_SG_IO_CDB; break; @@ -2863,12 +2840,10 @@ static int transport_generic_cmd_sequencer( * * Check for emulated MO_SET_TARGET_PGS. */ - if (cdb[1] == MO_SET_TARGET_PGS) { - cmd->transport_emulate_cdb = - (su_dev->t10_alua.alua_type == - SPC3_ALUA_EMULATED) ? - core_emulate_set_target_port_groups : - NULL; + if (cdb[1] == MO_SET_TARGET_PGS && + su_dev->t10_alua.alua_type == SPC3_ALUA_EMULATED) { + cmd->execute_task = + target_emulate_set_target_port_groups; } size = (cdb[6] << 24) | (cdb[7] << 16) | @@ -2888,6 +2863,8 @@ static int transport_generic_cmd_sequencer( if (cmd->se_dev->dev_task_attr_type == SAM_TASK_ATTR_EMULATED) cmd->sam_task_attr = MSG_HEAD_TAG; cmd->se_cmd_flags |= SCF_SCSI_CONTROL_SG_IO_CDB; + if (!passthrough) + cmd->execute_task = target_emulate_inquiry; break; case READ_BUFFER: size = (cdb[6] << 16) + (cdb[7] << 8) + cdb[8]; @@ -2896,6 +2873,8 @@ static int transport_generic_cmd_sequencer( case READ_CAPACITY: size = READ_CAP_LEN; cmd->se_cmd_flags |= SCF_SCSI_CONTROL_SG_IO_CDB; + if (!passthrough) + cmd->execute_task = target_emulate_readcapacity; break; case READ_MEDIA_SERIAL_NUMBER: case SECURITY_PROTOCOL_IN: @@ -2904,6 +2883,21 @@ static int transport_generic_cmd_sequencer( cmd->se_cmd_flags |= SCF_SCSI_CONTROL_SG_IO_CDB; break; case SERVICE_ACTION_IN: + switch (cmd->t_task_cdb[1] & 0x1f) { + case SAI_READ_CAPACITY_16: + if (!passthrough) + cmd->execute_task = + target_emulate_readcapacity_16; + break; + default: + if (passthrough) + break; + + pr_err("Unsupported SA: 0x%02x\n", + cmd->t_task_cdb[1] & 0x1f); + goto out_unsupported_cdb; + } + /*FALLTHROUGH*/ case ACCESS_CONTROL_IN: case ACCESS_CONTROL_OUT: case EXTENDED_COPY: @@ -2934,6 +2928,8 @@ static int transport_generic_cmd_sequencer( case REQUEST_SENSE: size = cdb[4]; cmd->se_cmd_flags |= SCF_SCSI_CONTROL_SG_IO_CDB; + if (!passthrough) + cmd->execute_task = target_emulate_request_sense; break; case READ_ELEMENT_STATUS: size = 65536 * cdb[7] + 256 * cdb[8] + cdb[9]; @@ -2961,10 +2957,8 @@ static int transport_generic_cmd_sequencer( * is running in SPC_PASSTHROUGH, and wants reservations * emulation disabled. */ - cmd->transport_emulate_cdb = - (su_dev->t10_pr.res_type != - SPC_PASSTHROUGH) ? - core_scsi2_emulate_crh : NULL; + if (su_dev->t10_pr.res_type != SPC_PASSTHROUGH) + cmd->execute_task = target_scsi2_reservation_reserve; cmd->se_cmd_flags |= SCF_SCSI_NON_DATA_CDB; break; case RELEASE: @@ -2978,10 +2972,8 @@ static int transport_generic_cmd_sequencer( else size = cmd->data_length; - cmd->transport_emulate_cdb = - (su_dev->t10_pr.res_type != - SPC_PASSTHROUGH) ? - core_scsi2_emulate_crh : NULL; + if (su_dev->t10_pr.res_type != SPC_PASSTHROUGH) + cmd->execute_task = target_scsi2_reservation_release; cmd->se_cmd_flags |= SCF_SCSI_NON_DATA_CDB; break; case SYNCHRONIZE_CACHE: @@ -3002,16 +2994,9 @@ static int transport_generic_cmd_sequencer( size = transport_get_size(sectors, cdb, cmd); cmd->se_cmd_flags |= SCF_SCSI_NON_DATA_CDB; - /* - * For TCM/pSCSI passthrough, skip cmd->transport_emulate_cdb() - */ - if (dev->transport->transport_type == TRANSPORT_PLUGIN_PHBA_PDEV) + if (passthrough) break; - /* - * Set SCF_EMULATE_CDB_ASYNC to ensure asynchronous operation - * for SYNCHRONIZE_CACHE* Immed=1 case in __transport_execute_tasks() - */ - cmd->se_cmd_flags |= SCF_EMULATE_CDB_ASYNC; + /* * Check to ensure that LBA + Range does not exceed past end of * device for IBLOCK and FILEIO ->do_sync_cache() backend calls @@ -3020,10 +3005,13 @@ static int transport_generic_cmd_sequencer( if (transport_cmd_get_valid_sectors(cmd) < 0) goto out_invalid_cdb_field; } + cmd->execute_task = target_emulate_synchronize_cache; break; case UNMAP: size = get_unaligned_be16(&cdb[7]); cmd->se_cmd_flags |= SCF_SCSI_CONTROL_SG_IO_CDB; + if (!passthrough) + cmd->execute_task = target_emulate_unmap; break; case WRITE_SAME_16: sectors = transport_get_sectors_16(cdb, cmd, §or_ret); @@ -3042,6 +3030,8 @@ static int transport_generic_cmd_sequencer( if (target_check_write_same_discard(&cdb[1], dev) < 0) goto out_invalid_cdb_field; + if (!passthrough) + cmd->execute_task = target_emulate_write_same; break; case WRITE_SAME: sectors = transport_get_sectors_10(cdb, cmd, §or_ret); @@ -3063,26 +3053,31 @@ static int transport_generic_cmd_sequencer( */ if (target_check_write_same_discard(&cdb[1], dev) < 0) goto out_invalid_cdb_field; + if (!passthrough) + cmd->execute_task = target_emulate_write_same; break; case ALLOW_MEDIUM_REMOVAL: - case GPCMD_CLOSE_TRACK: case ERASE: - case INITIALIZE_ELEMENT_STATUS: - case GPCMD_LOAD_UNLOAD: case REZERO_UNIT: case SEEK_10: - case GPCMD_SET_SPEED: case SPACE: case START_STOP: case TEST_UNIT_READY: case VERIFY: case WRITE_FILEMARKS: + cmd->se_cmd_flags |= SCF_SCSI_NON_DATA_CDB; + if (!passthrough) + cmd->execute_task = target_emulate_noop; + break; + case GPCMD_CLOSE_TRACK: + case INITIALIZE_ELEMENT_STATUS: + case GPCMD_LOAD_UNLOAD: + case GPCMD_SET_SPEED: case MOVE_MEDIUM: cmd->se_cmd_flags |= SCF_SCSI_NON_DATA_CDB; break; case REPORT_LUNS: - cmd->transport_emulate_cdb = - transport_core_report_lun_response; + cmd->execute_task = target_report_luns; size = (cdb[6] << 24) | (cdb[7] << 16) | (cdb[8] << 8) | cdb[9]; /* * Do implict HEAD_OF_QUEUE processing for REPORT_LUNS @@ -3134,6 +3129,11 @@ static int transport_generic_cmd_sequencer( cmd->data_length = size; } + /* reject any command that we don't have a handler for */ + if (!(passthrough || cmd->execute_task || + (cmd->se_cmd_flags & SCF_SCSI_DATA_SG_IO_CDB))) + goto out_unsupported_cdb; + /* Let's limit control cdbs to a page, for simplicity's sake. */ if ((cmd->se_cmd_flags & SCF_SCSI_CONTROL_SG_IO_CDB) && size > PAGE_SIZE) @@ -3308,7 +3308,7 @@ static void target_complete_ok_work(struct work_struct *work) if (cmd->scsi_status) { ret = transport_send_check_condition_and_sense( cmd, reason, 1); - if (ret == -EAGAIN) + if (ret == -EAGAIN || ret == -ENOMEM) goto queue_full; transport_lun_remove_cmd(cmd); @@ -3333,7 +3333,7 @@ static void target_complete_ok_work(struct work_struct *work) spin_unlock(&cmd->se_lun->lun_sep_lock); ret = cmd->se_tfo->queue_data_in(cmd); - if (ret == -EAGAIN) + if (ret == -EAGAIN || ret == -ENOMEM) goto queue_full; break; case DMA_TO_DEVICE: @@ -3354,14 +3354,14 @@ static void target_complete_ok_work(struct work_struct *work) } spin_unlock(&cmd->se_lun->lun_sep_lock); ret = cmd->se_tfo->queue_data_in(cmd); - if (ret == -EAGAIN) + if (ret == -EAGAIN || ret == -ENOMEM) goto queue_full; break; } /* Fall through for DMA_TO_DEVICE */ case DMA_NONE: ret = cmd->se_tfo->queue_status(cmd); - if (ret == -EAGAIN) + if (ret == -EAGAIN || ret == -ENOMEM) goto queue_full; break; default: @@ -3890,7 +3890,10 @@ EXPORT_SYMBOL(transport_generic_process_write); static void transport_write_pending_qf(struct se_cmd *cmd) { - if (cmd->se_tfo->write_pending(cmd) == -EAGAIN) { + int ret; + + ret = cmd->se_tfo->write_pending(cmd); + if (ret == -EAGAIN || ret == -ENOMEM) { pr_debug("Handling write_pending QUEUE__FULL: se_cmd: %p\n", cmd); transport_handle_queue_full(cmd, cmd->se_dev); @@ -3920,7 +3923,7 @@ static int transport_generic_write_pending(struct se_cmd *cmd) * frontend know that WRITE buffers are ready. */ ret = cmd->se_tfo->write_pending(cmd); - if (ret == -EAGAIN) + if (ret == -EAGAIN || ret == -ENOMEM) goto queue_full; else if (ret < 0) return ret; @@ -3931,7 +3934,7 @@ queue_full: pr_debug("Handling write_pending QUEUE__FULL: se_cmd: %p\n", cmd); cmd->t_state = TRANSPORT_COMPLETE_QF_WP; transport_handle_queue_full(cmd, cmd->se_dev); - return ret; + return 0; } /** @@ -3949,6 +3952,14 @@ void transport_release_cmd(struct se_cmd *cmd) core_tmr_release_req(cmd->se_tmr_req); if (cmd->t_task_cdb != cmd->__t_task_cdb) kfree(cmd->t_task_cdb); + /* + * Check if target_wait_for_sess_cmds() is expecting to + * release se_cmd directly here.. + */ + if (cmd->check_release != 0 && cmd->se_tfo->check_release_cmd) + if (cmd->se_tfo->check_release_cmd(cmd) != 0) + return; + cmd->se_tfo->release_cmd(cmd); } EXPORT_SYMBOL(transport_release_cmd); @@ -3976,6 +3987,114 @@ void transport_generic_free_cmd(struct se_cmd *cmd, int wait_for_tasks) } EXPORT_SYMBOL(transport_generic_free_cmd); +/* target_get_sess_cmd - Add command to active ->sess_cmd_list + * @se_sess: session to reference + * @se_cmd: command descriptor to add + */ +void target_get_sess_cmd(struct se_session *se_sess, struct se_cmd *se_cmd) +{ + unsigned long flags; + + spin_lock_irqsave(&se_sess->sess_cmd_lock, flags); + list_add_tail(&se_cmd->se_cmd_list, &se_sess->sess_cmd_list); + se_cmd->check_release = 1; + spin_unlock_irqrestore(&se_sess->sess_cmd_lock, flags); +} +EXPORT_SYMBOL(target_get_sess_cmd); + +/* target_put_sess_cmd - Check for active I/O shutdown or list delete + * @se_sess: session to reference + * @se_cmd: command descriptor to drop + */ +int target_put_sess_cmd(struct se_session *se_sess, struct se_cmd *se_cmd) +{ + unsigned long flags; + + spin_lock_irqsave(&se_sess->sess_cmd_lock, flags); + if (list_empty(&se_cmd->se_cmd_list)) { + spin_unlock_irqrestore(&se_sess->sess_cmd_lock, flags); + WARN_ON(1); + return 0; + } + + if (se_sess->sess_tearing_down && se_cmd->cmd_wait_set) { + spin_unlock_irqrestore(&se_sess->sess_cmd_lock, flags); + complete(&se_cmd->cmd_wait_comp); + return 1; + } + list_del(&se_cmd->se_cmd_list); + spin_unlock_irqrestore(&se_sess->sess_cmd_lock, flags); + + return 0; +} +EXPORT_SYMBOL(target_put_sess_cmd); + +/* target_splice_sess_cmd_list - Split active cmds into sess_wait_list + * @se_sess: session to split + */ +void target_splice_sess_cmd_list(struct se_session *se_sess) +{ + struct se_cmd *se_cmd; + unsigned long flags; + + WARN_ON(!list_empty(&se_sess->sess_wait_list)); + INIT_LIST_HEAD(&se_sess->sess_wait_list); + + spin_lock_irqsave(&se_sess->sess_cmd_lock, flags); + se_sess->sess_tearing_down = 1; + + list_splice_init(&se_sess->sess_cmd_list, &se_sess->sess_wait_list); + + list_for_each_entry(se_cmd, &se_sess->sess_wait_list, se_cmd_list) + se_cmd->cmd_wait_set = 1; + + spin_unlock_irqrestore(&se_sess->sess_cmd_lock, flags); +} +EXPORT_SYMBOL(target_splice_sess_cmd_list); + +/* target_wait_for_sess_cmds - Wait for outstanding descriptors + * @se_sess: session to wait for active I/O + * @wait_for_tasks: Make extra transport_wait_for_tasks call + */ +void target_wait_for_sess_cmds( + struct se_session *se_sess, + int wait_for_tasks) +{ + struct se_cmd *se_cmd, *tmp_cmd; + bool rc = false; + + list_for_each_entry_safe(se_cmd, tmp_cmd, + &se_sess->sess_wait_list, se_cmd_list) { + list_del(&se_cmd->se_cmd_list); + + pr_debug("Waiting for se_cmd: %p t_state: %d, fabric state:" + " %d\n", se_cmd, se_cmd->t_state, + se_cmd->se_tfo->get_cmd_state(se_cmd)); + + if (wait_for_tasks) { + pr_debug("Calling transport_wait_for_tasks se_cmd: %p t_state: %d," + " fabric state: %d\n", se_cmd, se_cmd->t_state, + se_cmd->se_tfo->get_cmd_state(se_cmd)); + + rc = transport_wait_for_tasks(se_cmd); + + pr_debug("After transport_wait_for_tasks se_cmd: %p t_state: %d," + " fabric state: %d\n", se_cmd, se_cmd->t_state, + se_cmd->se_tfo->get_cmd_state(se_cmd)); + } + + if (!rc) { + wait_for_completion(&se_cmd->cmd_wait_comp); + pr_debug("After cmd_wait_comp: se_cmd: %p t_state: %d" + " fabric state: %d\n", se_cmd, se_cmd->t_state, + se_cmd->se_tfo->get_cmd_state(se_cmd)); + } + + se_cmd->se_tfo->release_cmd(se_cmd); + } +} +EXPORT_SYMBOL(target_wait_for_sess_cmds); + /* transport_lun_wait_for_tasks(): * * Called from ConfigFS context to stop the passed struct se_cmd to allow @@ -4152,14 +4271,14 @@ int transport_clear_lun_from_sessions(struct se_lun *lun) * Called from frontend fabric context to wait for storage engine * to pause and/or release frontend generated struct se_cmd. */ -void transport_wait_for_tasks(struct se_cmd *cmd) +bool transport_wait_for_tasks(struct se_cmd *cmd) { unsigned long flags; spin_lock_irqsave(&cmd->t_state_lock, flags); if (!(cmd->se_cmd_flags & SCF_SE_LUN_CMD) && !(cmd->se_tmr_req)) { spin_unlock_irqrestore(&cmd->t_state_lock, flags); - return; + return false; } /* * Only perform a possible wait_for_tasks if SCF_SUPPORTED_SAM_OPCODE @@ -4167,7 +4286,7 @@ void transport_wait_for_tasks(struct se_cmd *cmd) */ if (!(cmd->se_cmd_flags & SCF_SUPPORTED_SAM_OPCODE) && !cmd->se_tmr_req) { spin_unlock_irqrestore(&cmd->t_state_lock, flags); - return; + return false; } /* * If we are already stopped due to an external event (ie: LUN shutdown) @@ -4210,7 +4329,7 @@ void transport_wait_for_tasks(struct se_cmd *cmd) if (!atomic_read(&cmd->t_transport_active) || atomic_read(&cmd->t_transport_aborted)) { spin_unlock_irqrestore(&cmd->t_state_lock, flags); - return; + return false; } atomic_set(&cmd->t_transport_stop, 1); @@ -4235,6 +4354,8 @@ void transport_wait_for_tasks(struct se_cmd *cmd) cmd->se_tfo->get_task_tag(cmd)); spin_unlock_irqrestore(&cmd->t_state_lock, flags); + + return true; } EXPORT_SYMBOL(transport_wait_for_tasks); @@ -4583,9 +4704,7 @@ get_cmd: break; } ret = transport_generic_new_cmd(cmd); - if (ret == -EAGAIN) - break; - else if (ret < 0) { + if (ret < 0) { cmd->transport_error_status = ret; transport_generic_request_failure(cmd, 0, (cmd->data_direction != @@ -4595,9 +4714,6 @@ get_cmd: case TRANSPORT_PROCESS_WRITE: transport_generic_process_write(cmd); break; - case TRANSPORT_FREE_CMD_INTR: - transport_generic_free_cmd(cmd, 0); - break; case TRANSPORT_PROCESS_TMR: transport_generic_do_tmr(cmd); break; diff --git a/drivers/target/tcm_fc/tcm_fc.h b/drivers/target/tcm_fc/tcm_fc.h index 3749d8b4b423..e05c55100ec6 100644 --- a/drivers/target/tcm_fc/tcm_fc.h +++ b/drivers/target/tcm_fc/tcm_fc.h @@ -156,7 +156,7 @@ int ft_lport_notify(struct notifier_block *, unsigned long, void *); /* * IO methods. */ -void ft_check_stop_free(struct se_cmd *); +int ft_check_stop_free(struct se_cmd *); void ft_release_cmd(struct se_cmd *); int ft_queue_status(struct se_cmd *); int ft_queue_data_in(struct se_cmd *); diff --git a/drivers/target/tcm_fc/tfc_cmd.c b/drivers/target/tcm_fc/tfc_cmd.c index 6195026cc7b0..4fac37c4c615 100644 --- a/drivers/target/tcm_fc/tfc_cmd.c +++ b/drivers/target/tcm_fc/tfc_cmd.c @@ -112,9 +112,10 @@ void ft_release_cmd(struct se_cmd *se_cmd) ft_free_cmd(cmd); } -void ft_check_stop_free(struct se_cmd *se_cmd) +int ft_check_stop_free(struct se_cmd *se_cmd) { transport_generic_free_cmd(se_cmd, 0); + return 1; } /* diff --git a/include/target/target_core_base.h b/include/target/target_core_base.h index 35aa786f93da..7f5fed3c89e1 100644 --- a/include/target/target_core_base.h +++ b/include/target/target_core_base.h @@ -89,7 +89,6 @@ enum transport_state_table { TRANSPORT_PROCESS_TMR = 9, TRANSPORT_ISTATE_PROCESSING = 11, TRANSPORT_NEW_CMD_MAP = 16, - TRANSPORT_FREE_CMD_INTR = 17, TRANSPORT_COMPLETE_QF_WP = 18, TRANSPORT_COMPLETE_QF_OK = 19, }; @@ -115,7 +114,6 @@ enum se_cmd_flags_table { SCF_DELAYED_CMD_FROM_SAM_ATTR = 0x00080000, SCF_UNUSED = 0x00100000, SCF_PASSTHROUGH_SG_TO_MEM_NOALLOC = 0x00400000, - SCF_EMULATE_CDB_ASYNC = 0x01000000, }; /* struct se_dev_entry->lun_flags and struct se_lun->lun_access */ @@ -426,6 +424,9 @@ struct se_cmd { enum transport_state_table t_state; /* Transport specific error status */ int transport_error_status; + /* Used to signal cmd->se_tfo->check_release_cmd() usage per cmd */ + int check_release:1; + int cmd_wait_set:1; /* See se_cmd_flags_table */ u32 se_cmd_flags; u32 se_ordered_id; @@ -452,8 +453,10 @@ struct se_cmd { struct se_session *se_sess; struct se_tmr_req *se_tmr_req; struct list_head se_queue_node; + struct list_head se_cmd_list; + struct completion cmd_wait_comp; struct target_core_fabric_ops *se_tfo; - int (*transport_emulate_cdb)(struct se_cmd *); + int (*execute_task)(struct se_task *); void (*transport_complete_callback)(struct se_cmd *); unsigned char *t_task_cdb; @@ -559,12 +562,16 @@ struct se_node_acl { } ____cacheline_aligned; struct se_session { + int sess_tearing_down:1; u64 sess_bin_isid; struct se_node_acl *se_node_acl; struct se_portal_group *se_tpg; void *fabric_sess_ptr; struct list_head sess_list; struct list_head sess_acl_list; + struct list_head sess_cmd_list; + struct list_head sess_wait_list; + spinlock_t sess_cmd_lock; } ____cacheline_aligned; struct se_device; diff --git a/include/target/target_core_device.h b/include/target/target_core_device.h index 46571912086c..2be31ff8763b 100644 --- a/include/target/target_core_device.h +++ b/include/target/target_core_device.h @@ -17,7 +17,7 @@ extern int core_dev_export(struct se_device *, struct se_portal_group *, struct se_lun *); extern void core_dev_unexport(struct se_device *, struct se_portal_group *, struct se_lun *); -extern int transport_core_report_lun_response(struct se_cmd *); +extern int target_report_luns(struct se_task *); extern void se_release_device_for_hba(struct se_device *); extern void se_release_vpd_for_dev(struct se_device *); extern void se_clear_dev_ports(struct se_device *); diff --git a/include/target/target_core_fabric_ops.h b/include/target/target_core_fabric_ops.h index 126c675f4f14..0256825f923d 100644 --- a/include/target/target_core_fabric_ops.h +++ b/include/target/target_core_fabric_ops.h @@ -46,9 +46,16 @@ struct target_core_fabric_ops { int (*new_cmd_map)(struct se_cmd *); /* * Optional to release struct se_cmd and fabric dependent allocated - * I/O descriptor in transport_cmd_check_stop() + * I/O descriptor in transport_cmd_check_stop(). + * + * Returning 1 will signal a descriptor has been released. + * Returning 0 will signal a descriptor has not been released. */ - void (*check_stop_free)(struct se_cmd *); + int (*check_stop_free)(struct se_cmd *); + /* + * Optional check for active I/O shutdown + */ + int (*check_release_cmd)(struct se_cmd *); void (*release_cmd)(struct se_cmd *); /* * Called with spin_lock_bh(struct se_portal_group->session_lock held. diff --git a/include/target/target_core_transport.h b/include/target/target_core_transport.h index a037a1a6fbba..c16e9431dd01 100644 --- a/include/target/target_core_transport.h +++ b/include/target/target_core_transport.h @@ -160,17 +160,20 @@ extern int transport_generic_handle_cdb_map(struct se_cmd *); extern int transport_generic_handle_data(struct se_cmd *); extern void transport_new_cmd_failure(struct se_cmd *); extern int transport_generic_handle_tmr(struct se_cmd *); -extern void transport_generic_free_cmd_intr(struct se_cmd *); extern bool target_stop_task(struct se_task *task, unsigned long *flags); extern int transport_generic_map_mem_to_cmd(struct se_cmd *cmd, struct scatterlist *, u32, struct scatterlist *, u32); extern int transport_clear_lun_from_sessions(struct se_lun *); -extern void transport_wait_for_tasks(struct se_cmd *); +extern bool transport_wait_for_tasks(struct se_cmd *); extern int transport_check_aborted_status(struct se_cmd *, int); extern int transport_send_check_condition_and_sense(struct se_cmd *, u8, int); extern void transport_send_task_abort(struct se_cmd *); extern void transport_release_cmd(struct se_cmd *); extern void transport_generic_free_cmd(struct se_cmd *, int); +extern void target_get_sess_cmd(struct se_session *, struct se_cmd *); +extern int target_put_sess_cmd(struct se_session *, struct se_cmd *); +extern void target_splice_sess_cmd_list(struct se_session *); +extern void target_wait_for_sess_cmds(struct se_session *, int); extern void transport_generic_wait_for_cmds(struct se_cmd *, int); extern void transport_do_task_sg_chain(struct se_cmd *); extern void transport_generic_process_write(struct se_cmd *); |