diff options
Diffstat (limited to 'drivers/target')
24 files changed, 492 insertions, 278 deletions
diff --git a/drivers/target/iscsi/iscsi_target.c b/drivers/target/iscsi/iscsi_target.c index d0e7ed8f28cc..cf7f0465dd63 100644 --- a/drivers/target/iscsi/iscsi_target.c +++ b/drivers/target/iscsi/iscsi_target.c @@ -1154,10 +1154,10 @@ int iscsit_setup_scsi_cmd(struct iscsi_conn *conn, struct iscsi_cmd *cmd, /* * Initialize struct se_cmd descriptor from target_core_mod infrastructure */ - transport_init_se_cmd(&cmd->se_cmd, &iscsi_ops, - conn->sess->se_sess, be32_to_cpu(hdr->data_length), - cmd->data_direction, sam_task_attr, - cmd->sense_buffer + 2, scsilun_to_int(&hdr->lun)); + __target_init_cmd(&cmd->se_cmd, &iscsi_ops, + conn->sess->se_sess, be32_to_cpu(hdr->data_length), + cmd->data_direction, sam_task_attr, + cmd->sense_buffer + 2, scsilun_to_int(&hdr->lun)); pr_debug("Got SCSI Command, ITT: 0x%08x, CmdSN: 0x%08x," " ExpXferLen: %u, Length: %u, CID: %hu\n", hdr->itt, @@ -1166,7 +1166,8 @@ int iscsit_setup_scsi_cmd(struct iscsi_conn *conn, struct iscsi_cmd *cmd, target_get_sess_cmd(&cmd->se_cmd, true); - cmd->sense_reason = target_cmd_init_cdb(&cmd->se_cmd, hdr->cdb); + cmd->sense_reason = target_cmd_init_cdb(&cmd->se_cmd, hdr->cdb, + GFP_KERNEL); if (cmd->sense_reason) { if (cmd->sense_reason == TCM_OUT_OF_RESOURCES) { return iscsit_add_reject_cmd(cmd, @@ -2013,10 +2014,10 @@ iscsit_handle_task_mgt_cmd(struct iscsi_conn *conn, struct iscsi_cmd *cmd, buf); } - transport_init_se_cmd(&cmd->se_cmd, &iscsi_ops, - conn->sess->se_sess, 0, DMA_NONE, - TCM_SIMPLE_TAG, cmd->sense_buffer + 2, - scsilun_to_int(&hdr->lun)); + __target_init_cmd(&cmd->se_cmd, &iscsi_ops, + conn->sess->se_sess, 0, DMA_NONE, + TCM_SIMPLE_TAG, cmd->sense_buffer + 2, + scsilun_to_int(&hdr->lun)); target_get_sess_cmd(&cmd->se_cmd, true); diff --git a/drivers/target/iscsi/iscsi_target_configfs.c b/drivers/target/iscsi/iscsi_target_configfs.c index 0fa1d57b26fa..f4a24fa5058e 100644 --- a/drivers/target/iscsi/iscsi_target_configfs.c +++ b/drivers/target/iscsi/iscsi_target_configfs.c @@ -161,14 +161,13 @@ static struct se_tpg_np *lio_target_call_addnptotpg( char *str, *str2, *ip_str, *port_str; struct sockaddr_storage sockaddr = { }; int ret; - char buf[MAX_PORTAL_LEN + 1]; + char buf[MAX_PORTAL_LEN + 1] = { }; if (strlen(name) > MAX_PORTAL_LEN) { pr_err("strlen(name): %d exceeds MAX_PORTAL_LEN: %d\n", (int)strlen(name), MAX_PORTAL_LEN); return ERR_PTR(-EOVERFLOW); } - memset(buf, 0, MAX_PORTAL_LEN + 1); snprintf(buf, MAX_PORTAL_LEN + 1, "%s", name); str = strstr(buf, "["); diff --git a/drivers/target/iscsi/iscsi_target_nego.c b/drivers/target/iscsi/iscsi_target_nego.c index 8b40f10976ff..151e2949bb75 100644 --- a/drivers/target/iscsi/iscsi_target_nego.c +++ b/drivers/target/iscsi/iscsi_target_nego.c @@ -28,7 +28,6 @@ #include "iscsi_target_auth.h" #define MAX_LOGIN_PDUS 7 -#define TEXT_LEN 4096 void convert_null_to_semi(char *buf, int len) { diff --git a/drivers/target/iscsi/iscsi_target_stat.c b/drivers/target/iscsi/iscsi_target_stat.c index 35e75a3569c9..cce3a827059e 100644 --- a/drivers/target/iscsi/iscsi_target_stat.c +++ b/drivers/target/iscsi/iscsi_target_stat.c @@ -28,7 +28,6 @@ /* Instance Attributes Table */ #define ISCSI_INST_NUM_NODES 1 #define ISCSI_INST_DESCR "Storage Engine Target" -#define ISCSI_INST_LAST_FAILURE_TYPE 0 #define ISCSI_DISCONTINUITY_TIME 0 #define ISCSI_NODE_INDEX 1 diff --git a/drivers/target/iscsi/iscsi_target_util.c b/drivers/target/iscsi/iscsi_target_util.c index 9468b017b4a7..6dd5810e2af1 100644 --- a/drivers/target/iscsi/iscsi_target_util.c +++ b/drivers/target/iscsi/iscsi_target_util.c @@ -28,23 +28,6 @@ #include "iscsi_target_util.h" #include "iscsi_target.h" -#define PRINT_BUFF(buff, len) \ -{ \ - int zzz; \ - \ - pr_debug("%d:\n", __LINE__); \ - for (zzz = 0; zzz < len; zzz++) { \ - if (zzz % 16 == 0) { \ - if (zzz) \ - pr_debug("\n"); \ - pr_debug("%4i: ", zzz); \ - } \ - pr_debug("%02x ", (unsigned char) (buff)[zzz]); \ - } \ - if ((len + 1) % 16) \ - pr_debug("\n"); \ -} - extern struct list_head g_tiqn_list; extern spinlock_t tiqn_lock; diff --git a/drivers/target/loopback/tcm_loop.c b/drivers/target/loopback/tcm_loop.c index badba437e5f9..2687fd7d45db 100644 --- a/drivers/target/loopback/tcm_loop.c +++ b/drivers/target/loopback/tcm_loop.c @@ -39,7 +39,6 @@ #define to_tcm_loop_hba(hba) container_of(hba, struct tcm_loop_hba, dev) -static struct workqueue_struct *tcm_loop_workqueue; static struct kmem_cache *tcm_loop_cmd_cache; static int tcm_loop_hba_no_cnt; @@ -67,8 +66,12 @@ static void tcm_loop_release_cmd(struct se_cmd *se_cmd) { struct tcm_loop_cmd *tl_cmd = container_of(se_cmd, struct tcm_loop_cmd, tl_se_cmd); + struct scsi_cmnd *sc = tl_cmd->sc; - kmem_cache_free(tcm_loop_cmd_cache, tl_cmd); + if (se_cmd->se_cmd_flags & SCF_SCSI_TMR_CDB) + kmem_cache_free(tcm_loop_cmd_cache, tl_cmd); + else + sc->scsi_done(sc); } static int tcm_loop_show_info(struct seq_file *m, struct Scsi_Host *host) @@ -102,10 +105,8 @@ static struct device_driver tcm_loop_driverfs = { */ static struct device *tcm_loop_primary; -static void tcm_loop_submission_work(struct work_struct *work) +static void tcm_loop_target_queue_cmd(struct tcm_loop_cmd *tl_cmd) { - struct tcm_loop_cmd *tl_cmd = - container_of(work, struct tcm_loop_cmd, work); struct se_cmd *se_cmd = &tl_cmd->tl_se_cmd; struct scsi_cmnd *sc = tl_cmd->sc; struct tcm_loop_nexus *tl_nexus; @@ -113,7 +114,6 @@ static void tcm_loop_submission_work(struct work_struct *work) struct tcm_loop_tpg *tl_tpg; struct scatterlist *sgl_bidi = NULL; u32 sgl_bidi_count = 0, transfer_length; - int rc; tl_hba = *(struct tcm_loop_hba **)shost_priv(sc->device->host); tl_tpg = &tl_hba->tl_hba_tpgs[sc->device->id]; @@ -151,21 +151,20 @@ static void tcm_loop_submission_work(struct work_struct *work) } se_cmd->tag = tl_cmd->sc_cmd_tag; - rc = target_submit_cmd_map_sgls(se_cmd, tl_nexus->se_sess, sc->cmnd, - &tl_cmd->tl_sense_buf[0], tl_cmd->sc->device->lun, - transfer_length, TCM_SIMPLE_TAG, - sc->sc_data_direction, 0, - scsi_sglist(sc), scsi_sg_count(sc), - sgl_bidi, sgl_bidi_count, - scsi_prot_sglist(sc), scsi_prot_sg_count(sc)); - if (rc < 0) { - set_host_byte(sc, DID_NO_CONNECT); - goto out_done; - } + target_init_cmd(se_cmd, tl_nexus->se_sess, &tl_cmd->tl_sense_buf[0], + tl_cmd->sc->device->lun, transfer_length, + TCM_SIMPLE_TAG, sc->sc_data_direction, 0); + + if (target_submit_prep(se_cmd, sc->cmnd, scsi_sglist(sc), + scsi_sg_count(sc), sgl_bidi, sgl_bidi_count, + scsi_prot_sglist(sc), scsi_prot_sg_count(sc), + GFP_ATOMIC)) + return; + + target_queue_submission(se_cmd); return; out_done: - kmem_cache_free(tcm_loop_cmd_cache, tl_cmd); sc->scsi_done(sc); } @@ -175,24 +174,18 @@ out_done: */ static int tcm_loop_queuecommand(struct Scsi_Host *sh, struct scsi_cmnd *sc) { - struct tcm_loop_cmd *tl_cmd; + struct tcm_loop_cmd *tl_cmd = scsi_cmd_priv(sc); pr_debug("%s() %d:%d:%d:%llu got CDB: 0x%02x scsi_buf_len: %u\n", __func__, sc->device->host->host_no, sc->device->id, sc->device->channel, sc->device->lun, sc->cmnd[0], scsi_bufflen(sc)); - tl_cmd = kmem_cache_zalloc(tcm_loop_cmd_cache, GFP_ATOMIC); - if (!tl_cmd) { - set_host_byte(sc, DID_ERROR); - sc->scsi_done(sc); - return 0; - } - + memset(tl_cmd, 0, sizeof(*tl_cmd)); tl_cmd->sc = sc; tl_cmd->sc_cmd_tag = sc->request->tag; - INIT_WORK(&tl_cmd->work, tcm_loop_submission_work); - queue_work(tcm_loop_workqueue, &tl_cmd->work); + + tcm_loop_target_queue_cmd(tl_cmd); return 0; } @@ -320,6 +313,7 @@ static struct scsi_host_template tcm_loop_driver_template = { .dma_boundary = PAGE_SIZE - 1, .module = THIS_MODULE, .track_queue_depth = 1, + .cmd_size = sizeof(struct tcm_loop_cmd), }; static int tcm_loop_driver_probe(struct device *dev) @@ -580,7 +574,6 @@ static int tcm_loop_queue_data_or_status(const char *func, if ((se_cmd->se_cmd_flags & SCF_OVERFLOW_BIT) || (se_cmd->se_cmd_flags & SCF_UNDERFLOW_BIT)) scsi_set_resid(sc, se_cmd->residual_count); - sc->scsi_done(sc); return 0; } @@ -1164,17 +1157,13 @@ static int __init tcm_loop_fabric_init(void) { int ret = -ENOMEM; - tcm_loop_workqueue = alloc_workqueue("tcm_loop", 0, 0); - if (!tcm_loop_workqueue) - goto out; - tcm_loop_cmd_cache = kmem_cache_create("tcm_loop_cmd_cache", sizeof(struct tcm_loop_cmd), __alignof__(struct tcm_loop_cmd), 0, NULL); if (!tcm_loop_cmd_cache) { pr_debug("kmem_cache_create() for tcm_loop_cmd_cache failed\n"); - goto out_destroy_workqueue; + goto out; } ret = tcm_loop_alloc_core_bus(); @@ -1191,8 +1180,6 @@ out_release_core_bus: tcm_loop_release_core_bus(); out_destroy_cache: kmem_cache_destroy(tcm_loop_cmd_cache); -out_destroy_workqueue: - destroy_workqueue(tcm_loop_workqueue); out: return ret; } @@ -1202,7 +1189,6 @@ static void __exit tcm_loop_fabric_exit(void) target_unregister_template(&loop_ops); tcm_loop_release_core_bus(); kmem_cache_destroy(tcm_loop_cmd_cache); - destroy_workqueue(tcm_loop_workqueue); } MODULE_DESCRIPTION("TCM loopback virtual Linux/SCSI fabric module"); diff --git a/drivers/target/loopback/tcm_loop.h b/drivers/target/loopback/tcm_loop.h index d3110909a213..437663b3905c 100644 --- a/drivers/target/loopback/tcm_loop.h +++ b/drivers/target/loopback/tcm_loop.h @@ -16,7 +16,6 @@ struct tcm_loop_cmd { struct scsi_cmnd *sc; /* The TCM I/O descriptor that is accessed via container_of() */ struct se_cmd tl_se_cmd; - struct work_struct work; struct completion tmr_done; /* Sense buffer that will be mapped into outgoing status */ unsigned char tl_sense_buf[TRANSPORT_SENSE_BUFFER]; diff --git a/drivers/target/sbp/sbp_target.c b/drivers/target/sbp/sbp_target.c index 2a6165febd3b..ce84f93c183a 100644 --- a/drivers/target/sbp/sbp_target.c +++ b/drivers/target/sbp/sbp_target.c @@ -1218,11 +1218,9 @@ static void sbp_handle_command(struct sbp_target_request *req) /* only used for printk until we do TMRs */ req->se_cmd.tag = req->orb_pointer; - if (target_submit_cmd(&req->se_cmd, sess->se_sess, req->cmd_buf, - req->sense_buf, unpacked_lun, data_length, - TCM_SIMPLE_TAG, data_dir, TARGET_SCF_ACK_KREF)) - goto err; - + target_submit_cmd(&req->se_cmd, sess->se_sess, req->cmd_buf, + req->sense_buf, unpacked_lun, data_length, + TCM_SIMPLE_TAG, data_dir, TARGET_SCF_ACK_KREF); return; err: diff --git a/drivers/target/target_core_configfs.c b/drivers/target/target_core_configfs.c index f04352285155..9cb1ca8421c8 100644 --- a/drivers/target/target_core_configfs.c +++ b/drivers/target/target_core_configfs.c @@ -1494,7 +1494,7 @@ static ssize_t target_wwn_vpd_unit_serial_store(struct config_item *item, { struct t10_wwn *t10_wwn = to_t10_wwn(item); struct se_device *dev = t10_wwn->t10_dev; - unsigned char buf[INQUIRY_VPD_SERIAL_LEN]; + unsigned char buf[INQUIRY_VPD_SERIAL_LEN] = { }; /* * If Linux/SCSI subsystem_api_t plugin got a VPD Unit Serial @@ -1536,7 +1536,6 @@ static ssize_t target_wwn_vpd_unit_serial_store(struct config_item *item, * Also, strip any newline added from the userspace * echo $UUID > $TARGET/$HBA/$STORAGE_OBJECT/wwn/vpd_unit_serial */ - memset(buf, 0, INQUIRY_VPD_SERIAL_LEN); snprintf(buf, INQUIRY_VPD_SERIAL_LEN, "%s", page); snprintf(dev->t10_wwn.unit_serial, INQUIRY_VPD_SERIAL_LEN, "%s", strstrip(buf)); @@ -1556,11 +1555,9 @@ static ssize_t target_wwn_vpd_protocol_identifier_show(struct config_item *item, { struct t10_wwn *t10_wwn = to_t10_wwn(item); struct t10_vpd *vpd; - unsigned char buf[VPD_TMP_BUF_SIZE]; + unsigned char buf[VPD_TMP_BUF_SIZE] = { }; ssize_t len = 0; - memset(buf, 0, VPD_TMP_BUF_SIZE); - spin_lock(&t10_wwn->t10_vpd_lock); list_for_each_entry(vpd, &t10_wwn->t10_vpd_list, vpd_list) { if (!vpd->protocol_identifier_set) @@ -1663,9 +1660,7 @@ static ssize_t target_core_dev_pr_show_spc3_res(struct se_device *dev, { struct se_node_acl *se_nacl; struct t10_pr_registration *pr_reg; - char i_buf[PR_REG_ISID_ID_LEN]; - - memset(i_buf, 0, PR_REG_ISID_ID_LEN); + char i_buf[PR_REG_ISID_ID_LEN] = { }; pr_reg = dev->dev_pr_res_holder; if (!pr_reg) @@ -2286,7 +2281,7 @@ static ssize_t target_dev_alua_lu_gp_store(struct config_item *item, struct se_hba *hba = dev->se_hba; struct t10_alua_lu_gp *lu_gp = NULL, *lu_gp_new = NULL; struct t10_alua_lu_gp_member *lu_gp_mem; - unsigned char buf[LU_GROUP_NAME_BUF]; + unsigned char buf[LU_GROUP_NAME_BUF] = { }; int move = 0; lu_gp_mem = dev->dev_alua_lu_gp_mem; @@ -2297,7 +2292,6 @@ static ssize_t target_dev_alua_lu_gp_store(struct config_item *item, pr_err("ALUA LU Group Alias too large!\n"); return -EINVAL; } - memset(buf, 0, LU_GROUP_NAME_BUF); memcpy(buf, page, count); /* * Any ALUA logical unit alias besides "NULL" means we will be @@ -2615,9 +2609,7 @@ static ssize_t target_lu_gp_members_show(struct config_item *item, char *page) struct se_hba *hba; struct t10_alua_lu_gp_member *lu_gp_mem; ssize_t len = 0, cur_len; - unsigned char buf[LU_GROUP_NAME_BUF]; - - memset(buf, 0, LU_GROUP_NAME_BUF); + unsigned char buf[LU_GROUP_NAME_BUF] = { }; spin_lock(&lu_gp->lu_gp_lock); list_for_each_entry(lu_gp_mem, &lu_gp->lu_gp_mem_list, lu_gp_mem_list) { @@ -3020,9 +3012,7 @@ static ssize_t target_tg_pt_gp_members_show(struct config_item *item, struct t10_alua_tg_pt_gp *tg_pt_gp = to_tg_pt_gp(item); struct se_lun *lun; ssize_t len = 0, cur_len; - unsigned char buf[TG_PT_GROUP_NAME_BUF]; - - memset(buf, 0, TG_PT_GROUP_NAME_BUF); + unsigned char buf[TG_PT_GROUP_NAME_BUF] = { }; spin_lock(&tg_pt_gp->tg_pt_gp_lock); list_for_each_entry(lun, &tg_pt_gp->tg_pt_gp_lun_list, @@ -3409,11 +3399,10 @@ static struct config_group *target_core_call_addhbatotarget( { char *se_plugin_str, *str, *str2; struct se_hba *hba; - char buf[TARGET_CORE_NAME_MAX_LEN]; + char buf[TARGET_CORE_NAME_MAX_LEN] = { }; unsigned long plugin_dep_id = 0; int ret; - memset(buf, 0, TARGET_CORE_NAME_MAX_LEN); if (strlen(name) >= TARGET_CORE_NAME_MAX_LEN) { pr_err("Passed *name strlen(): %d exceeds" " TARGET_CORE_NAME_MAX_LEN: %d\n", (int)strlen(name), diff --git a/drivers/target/target_core_device.c b/drivers/target/target_core_device.c index 7787c527aad3..74d3a4896588 100644 --- a/drivers/target/target_core_device.c +++ b/drivers/target/target_core_device.c @@ -735,8 +735,14 @@ struct se_device *target_alloc_device(struct se_hba *hba, const char *name) dev->queue_cnt = nr_cpu_ids; for (i = 0; i < dev->queue_cnt; i++) { - INIT_LIST_HEAD(&dev->queues[i].state_list); - spin_lock_init(&dev->queues[i].lock); + struct se_device_queue *q; + + q = &dev->queues[i]; + INIT_LIST_HEAD(&q->state_list); + spin_lock_init(&q->lock); + + init_llist_head(&q->sq.cmd_list); + INIT_WORK(&q->sq.work, target_queued_submit_work); } dev->se_hba = hba; diff --git a/drivers/target/target_core_fabric_configfs.c b/drivers/target/target_core_fabric_configfs.c index ee85602213f7..fc7edc04ee09 100644 --- a/drivers/target/target_core_fabric_configfs.c +++ b/drivers/target/target_core_fabric_configfs.c @@ -892,6 +892,7 @@ static void target_fabric_release_wwn(struct config_item *item) struct target_fabric_configfs *tf = wwn->wwn_tf; configfs_remove_default_groups(&wwn->fabric_stat_group); + configfs_remove_default_groups(&wwn->param_group); tf->tf_ops->fabric_drop_wwn(wwn); } @@ -918,6 +919,57 @@ TF_CIT_SETUP(wwn_fabric_stats, NULL, NULL, NULL); /* End of tfc_wwn_fabric_stats_cit */ +static ssize_t +target_fabric_wwn_cmd_completion_affinity_show(struct config_item *item, + char *page) +{ + struct se_wwn *wwn = container_of(to_config_group(item), struct se_wwn, + param_group); + return sprintf(page, "%d\n", + wwn->cmd_compl_affinity == WORK_CPU_UNBOUND ? + SE_COMPL_AFFINITY_CURR_CPU : wwn->cmd_compl_affinity); +} + +static ssize_t +target_fabric_wwn_cmd_completion_affinity_store(struct config_item *item, + const char *page, size_t count) +{ + struct se_wwn *wwn = container_of(to_config_group(item), struct se_wwn, + param_group); + int compl_val; + + if (kstrtoint(page, 0, &compl_val)) + return -EINVAL; + + switch (compl_val) { + case SE_COMPL_AFFINITY_CPUID: + wwn->cmd_compl_affinity = compl_val; + break; + case SE_COMPL_AFFINITY_CURR_CPU: + wwn->cmd_compl_affinity = WORK_CPU_UNBOUND; + break; + default: + if (compl_val < 0 || compl_val >= nr_cpu_ids || + !cpu_online(compl_val)) { + pr_err("Command completion value must be between %d and %d or an online CPU.\n", + SE_COMPL_AFFINITY_CPUID, + SE_COMPL_AFFINITY_CURR_CPU); + return -EINVAL; + } + wwn->cmd_compl_affinity = compl_val; + } + + return count; +} +CONFIGFS_ATTR(target_fabric_wwn_, cmd_completion_affinity); + +static struct configfs_attribute *target_fabric_wwn_param_attrs[] = { + &target_fabric_wwn_attr_cmd_completion_affinity, + NULL, +}; + +TF_CIT_SETUP(wwn_param, NULL, NULL, target_fabric_wwn_param_attrs); + /* Start of tfc_wwn_cit */ static struct config_group *target_fabric_make_wwn( @@ -937,6 +989,7 @@ static struct config_group *target_fabric_make_wwn( if (!wwn || IS_ERR(wwn)) return ERR_PTR(-EINVAL); + wwn->cmd_compl_affinity = SE_COMPL_AFFINITY_CPUID; wwn->wwn_tf = tf; config_group_init_type_name(&wwn->wwn_group, name, &tf->tf_tpg_cit); @@ -945,6 +998,10 @@ static struct config_group *target_fabric_make_wwn( &tf->tf_wwn_fabric_stats_cit); configfs_add_default_group(&wwn->fabric_stat_group, &wwn->wwn_group); + config_group_init_type_name(&wwn->param_group, "param", + &tf->tf_wwn_param_cit); + configfs_add_default_group(&wwn->param_group, &wwn->wwn_group); + if (tf->tf_ops->add_wwn_groups) tf->tf_ops->add_wwn_groups(wwn); return &wwn->wwn_group; @@ -974,6 +1031,7 @@ int target_fabric_setup_cits(struct target_fabric_configfs *tf) target_fabric_setup_discovery_cit(tf); target_fabric_setup_wwn_cit(tf); target_fabric_setup_wwn_fabric_stats_cit(tf); + target_fabric_setup_wwn_param_cit(tf); target_fabric_setup_tpg_cit(tf); target_fabric_setup_tpg_base_cit(tf); target_fabric_setup_tpg_port_cit(tf); diff --git a/drivers/target/target_core_file.c b/drivers/target/target_core_file.c index 5a66854def95..ef4a8e189fba 100644 --- a/drivers/target/target_core_file.c +++ b/drivers/target/target_core_file.c @@ -498,6 +498,7 @@ fd_do_prot_fill(struct se_device *se_dev, sector_t lba, sector_t nolb, prot_length = nolb * se_dev->prot_length; + memset(buf, 0xff, bufsize); for (prot = 0; prot < prot_length;) { sector_t len = min_t(sector_t, bufsize, prot_length - prot); ssize_t ret = kernel_write(prot_fd, buf, len, &pos); @@ -523,7 +524,6 @@ fd_do_prot_unmap(struct se_cmd *cmd, sector_t lba, sector_t nolb) pr_err("Unable to allocate FILEIO prot buf\n"); return -ENOMEM; } - memset(buf, 0xff, PAGE_SIZE); rc = fd_do_prot_fill(cmd->se_dev, lba, nolb, buf, PAGE_SIZE); @@ -882,7 +882,6 @@ static int fd_format_prot(struct se_device *dev) (unsigned long long)(dev->transport->get_blocks(dev) + 1) * dev->prot_length); - memset(buf, 0xff, unit_size); ret = fd_do_prot_fill(dev, 0, dev->transport->get_blocks(dev) + 1, buf, unit_size); vfree(buf); diff --git a/drivers/target/target_core_iblock.c b/drivers/target/target_core_iblock.c index ee3d52061281..d6fdd1c61f90 100644 --- a/drivers/target/target_core_iblock.c +++ b/drivers/target/target_core_iblock.c @@ -61,9 +61,18 @@ static struct se_device *iblock_alloc_device(struct se_hba *hba, const char *nam return NULL; } + ib_dev->ibd_plug = kcalloc(nr_cpu_ids, sizeof(*ib_dev->ibd_plug), + GFP_KERNEL); + if (!ib_dev->ibd_plug) + goto free_dev; + pr_debug( "IBLOCK: Allocated ib_dev for %s\n", name); return &ib_dev->dev; + +free_dev: + kfree(ib_dev); + return NULL; } static int iblock_configure_device(struct se_device *dev) @@ -171,6 +180,7 @@ static void iblock_dev_call_rcu(struct rcu_head *p) struct se_device *dev = container_of(p, struct se_device, rcu_head); struct iblock_dev *ib_dev = IBLOCK_DEV(dev); + kfree(ib_dev->ibd_plug); kfree(ib_dev); } @@ -188,6 +198,33 @@ static void iblock_destroy_device(struct se_device *dev) bioset_exit(&ib_dev->ibd_bio_set); } +static struct se_dev_plug *iblock_plug_device(struct se_device *se_dev) +{ + struct iblock_dev *ib_dev = IBLOCK_DEV(se_dev); + struct iblock_dev_plug *ib_dev_plug; + + /* + * Each se_device has a per cpu work this can be run from. Wwe + * shouldn't have multiple threads on the same cpu calling this + * at the same time. + */ + ib_dev_plug = &ib_dev->ibd_plug[smp_processor_id()]; + if (test_and_set_bit(IBD_PLUGF_PLUGGED, &ib_dev_plug->flags)) + return NULL; + + blk_start_plug(&ib_dev_plug->blk_plug); + return &ib_dev_plug->se_plug; +} + +static void iblock_unplug_device(struct se_dev_plug *se_plug) +{ + struct iblock_dev_plug *ib_dev_plug = container_of(se_plug, + struct iblock_dev_plug, se_plug); + + blk_finish_plug(&ib_dev_plug->blk_plug); + clear_bit(IBD_PLUGF_PLUGGED, &ib_dev_plug->flags); +} + static unsigned long long iblock_emulate_read_cap_with_block_size( struct se_device *dev, struct block_device *bd, @@ -304,9 +341,8 @@ static void iblock_bio_done(struct bio *bio) iblock_complete_cmd(cmd); } -static struct bio * -iblock_get_bio(struct se_cmd *cmd, sector_t lba, u32 sg_num, int op, - int op_flags) +static struct bio *iblock_get_bio(struct se_cmd *cmd, sector_t lba, u32 sg_num, + unsigned int opf) { struct iblock_dev *ib_dev = IBLOCK_DEV(cmd->se_dev); struct bio *bio; @@ -326,7 +362,7 @@ iblock_get_bio(struct se_cmd *cmd, sector_t lba, u32 sg_num, int op, bio->bi_private = cmd; bio->bi_end_io = &iblock_bio_done; bio->bi_iter.bi_sector = lba; - bio_set_op_attrs(bio, op, op_flags); + bio->bi_opf = opf; return bio; } @@ -335,7 +371,10 @@ static void iblock_submit_bios(struct bio_list *list) { struct blk_plug plug; struct bio *bio; - + /* + * The block layer handles nested plugs, so just plug/unplug to handle + * fabric drivers that didn't support batching and multi bio cmds. + */ blk_start_plug(&plug); while ((bio = bio_list_pop(list))) submit_bio(bio); @@ -477,7 +516,7 @@ iblock_execute_write_same(struct se_cmd *cmd) goto fail; cmd->priv = ibr; - bio = iblock_get_bio(cmd, block_lba, 1, REQ_OP_WRITE, 0); + bio = iblock_get_bio(cmd, block_lba, 1, REQ_OP_WRITE); if (!bio) goto fail_free_ibr; @@ -490,8 +529,7 @@ iblock_execute_write_same(struct se_cmd *cmd) while (bio_add_page(bio, sg_page(sg), sg->length, sg->offset) != sg->length) { - bio = iblock_get_bio(cmd, block_lba, 1, REQ_OP_WRITE, - 0); + bio = iblock_get_bio(cmd, block_lba, 1, REQ_OP_WRITE); if (!bio) goto fail_put_bios; @@ -685,9 +723,11 @@ iblock_execute_rw(struct se_cmd *cmd, struct scatterlist *sgl, u32 sgl_nents, struct bio_list list; struct scatterlist *sg; u32 sg_num = sgl_nents; + unsigned int opf; unsigned bio_cnt; - int i, rc, op, op_flags = 0; + int i, rc; struct sg_mapping_iter prot_miter; + unsigned int miter_dir; if (data_direction == DMA_TO_DEVICE) { struct iblock_dev *ib_dev = IBLOCK_DEV(dev); @@ -696,15 +736,17 @@ iblock_execute_rw(struct se_cmd *cmd, struct scatterlist *sgl, u32 sgl_nents, * Force writethrough using REQ_FUA if a volatile write cache * is not enabled, or if initiator set the Force Unit Access bit. */ - op = REQ_OP_WRITE; + opf = REQ_OP_WRITE; + miter_dir = SG_MITER_TO_SG; if (test_bit(QUEUE_FLAG_FUA, &q->queue_flags)) { if (cmd->se_cmd_flags & SCF_FUA) - op_flags = REQ_FUA; + opf |= REQ_FUA; else if (!test_bit(QUEUE_FLAG_WC, &q->queue_flags)) - op_flags = REQ_FUA; + opf |= REQ_FUA; } } else { - op = REQ_OP_READ; + opf = REQ_OP_READ; + miter_dir = SG_MITER_FROM_SG; } ibr = kzalloc(sizeof(struct iblock_req), GFP_KERNEL); @@ -718,7 +760,7 @@ iblock_execute_rw(struct se_cmd *cmd, struct scatterlist *sgl, u32 sgl_nents, return 0; } - bio = iblock_get_bio(cmd, block_lba, sgl_nents, op, op_flags); + bio = iblock_get_bio(cmd, block_lba, sgl_nents, opf); if (!bio) goto fail_free_ibr; @@ -730,8 +772,7 @@ iblock_execute_rw(struct se_cmd *cmd, struct scatterlist *sgl, u32 sgl_nents, if (cmd->prot_type && dev->dev_attrib.pi_prot_type) sg_miter_start(&prot_miter, cmd->t_prot_sg, cmd->t_prot_nents, - op == REQ_OP_READ ? SG_MITER_FROM_SG : - SG_MITER_TO_SG); + miter_dir); for_each_sg(sgl, sg, sgl_nents, i) { /* @@ -752,8 +793,7 @@ iblock_execute_rw(struct se_cmd *cmd, struct scatterlist *sgl, u32 sgl_nents, bio_cnt = 0; } - bio = iblock_get_bio(cmd, block_lba, sg_num, op, - op_flags); + bio = iblock_get_bio(cmd, block_lba, sg_num, opf); if (!bio) goto fail_put_bios; @@ -813,7 +853,8 @@ static unsigned int iblock_get_lbppbe(struct se_device *dev) { struct iblock_dev *ib_dev = IBLOCK_DEV(dev); struct block_device *bd = ib_dev->ibd_bd; - int logs_per_phys = bdev_physical_block_size(bd) / bdev_logical_block_size(bd); + unsigned int logs_per_phys = + bdev_physical_block_size(bd) / bdev_logical_block_size(bd); return ilog2(logs_per_phys); } @@ -867,6 +908,8 @@ static const struct target_backend_ops iblock_ops = { .configure_device = iblock_configure_device, .destroy_device = iblock_destroy_device, .free_device = iblock_free_device, + .plug_device = iblock_plug_device, + .unplug_device = iblock_unplug_device, .parse_cdb = iblock_parse_cdb, .set_configfs_dev_params = iblock_set_configfs_dev_params, .show_configfs_dev_params = iblock_show_configfs_dev_params, diff --git a/drivers/target/target_core_iblock.h b/drivers/target/target_core_iblock.h index cefc641145b3..8c55375d2f75 100644 --- a/drivers/target/target_core_iblock.h +++ b/drivers/target/target_core_iblock.h @@ -4,6 +4,7 @@ #include <linux/atomic.h> #include <linux/refcount.h> +#include <linux/blkdev.h> #include <target/target_core_base.h> #define IBLOCK_VERSION "4.0" @@ -17,6 +18,14 @@ struct iblock_req { #define IBDF_HAS_UDEV_PATH 0x01 +#define IBD_PLUGF_PLUGGED 0x01 + +struct iblock_dev_plug { + struct se_dev_plug se_plug; + struct blk_plug blk_plug; + unsigned long flags; +}; + struct iblock_dev { struct se_device dev; unsigned char ibd_udev_path[SE_UDEV_PATH_LEN]; @@ -24,6 +33,7 @@ struct iblock_dev { struct bio_set ibd_bio_set; struct block_device *ibd_bd; bool ibd_readonly; + struct iblock_dev_plug *ibd_plug; } ____cacheline_aligned; #endif /* TARGET_CORE_IBLOCK_H */ diff --git a/drivers/target/target_core_internal.h b/drivers/target/target_core_internal.h index e7b3c6e5d574..a343bcfa2180 100644 --- a/drivers/target/target_core_internal.h +++ b/drivers/target/target_core_internal.h @@ -34,6 +34,7 @@ struct target_fabric_configfs { struct config_item_type tf_discovery_cit; struct config_item_type tf_wwn_cit; struct config_item_type tf_wwn_fabric_stats_cit; + struct config_item_type tf_wwn_param_cit; struct config_item_type tf_tpg_cit; struct config_item_type tf_tpg_base_cit; struct config_item_type tf_tpg_lun_cit; @@ -153,6 +154,7 @@ void target_qf_do_work(struct work_struct *work); bool target_check_wce(struct se_device *dev); bool target_check_fua(struct se_device *dev); void __target_execute_cmd(struct se_cmd *, bool); +void target_queued_submit_work(struct work_struct *work); /* target_core_stat.c */ void target_stat_setup_dev_default_groups(struct se_device *); diff --git a/drivers/target/target_core_pr.c b/drivers/target/target_core_pr.c index d4cc43afe05b..d61dc166bc5f 100644 --- a/drivers/target/target_core_pr.c +++ b/drivers/target/target_core_pr.c @@ -896,9 +896,8 @@ static void core_scsi3_aptpl_reserve( struct se_node_acl *node_acl, struct t10_pr_registration *pr_reg) { - char i_buf[PR_REG_ISID_ID_LEN]; + char i_buf[PR_REG_ISID_ID_LEN] = { }; - memset(i_buf, 0, PR_REG_ISID_ID_LEN); core_pr_dump_initiator_port(pr_reg, i_buf, PR_REG_ISID_ID_LEN); spin_lock(&dev->dev_reservation_lock); @@ -928,12 +927,10 @@ static int __core_scsi3_check_aptpl_registration( { struct t10_pr_registration *pr_reg, *pr_reg_tmp; struct t10_reservation *pr_tmpl = &dev->t10_pr; - unsigned char i_port[PR_APTPL_MAX_IPORT_LEN]; - unsigned char t_port[PR_APTPL_MAX_TPORT_LEN]; + unsigned char i_port[PR_APTPL_MAX_IPORT_LEN] = { }; + unsigned char t_port[PR_APTPL_MAX_TPORT_LEN] = { }; u16 tpgt; - memset(i_port, 0, PR_APTPL_MAX_IPORT_LEN); - memset(t_port, 0, PR_APTPL_MAX_TPORT_LEN); /* * Copy Initiator Port information from struct se_node_acl */ @@ -1023,9 +1020,8 @@ static void __core_scsi3_dump_registration( enum register_type register_type) { struct se_portal_group *se_tpg = nacl->se_tpg; - char i_buf[PR_REG_ISID_ID_LEN]; + char i_buf[PR_REG_ISID_ID_LEN] = { }; - memset(&i_buf[0], 0, PR_REG_ISID_ID_LEN); core_pr_dump_initiator_port(pr_reg, i_buf, PR_REG_ISID_ID_LEN); pr_debug("SPC-3 PR [%s] Service Action: REGISTER%s Initiator" @@ -1204,10 +1200,10 @@ static struct t10_pr_registration *core_scsi3_locate_pr_reg( struct se_session *sess) { struct se_portal_group *tpg = nacl->se_tpg; - unsigned char buf[PR_REG_ISID_LEN], *isid_ptr = NULL; + unsigned char buf[PR_REG_ISID_LEN] = { }; + unsigned char *isid_ptr = NULL; if (tpg->se_tpg_tfo->sess_get_initiator_sid != NULL) { - memset(&buf[0], 0, PR_REG_ISID_LEN); tpg->se_tpg_tfo->sess_get_initiator_sid(sess, &buf[0], PR_REG_ISID_LEN); isid_ptr = &buf[0]; @@ -1285,11 +1281,10 @@ static void __core_scsi3_free_registration( struct t10_reservation *pr_tmpl = &dev->t10_pr; struct se_node_acl *nacl = pr_reg->pr_reg_nacl; struct se_dev_entry *deve; - char i_buf[PR_REG_ISID_ID_LEN]; + char i_buf[PR_REG_ISID_ID_LEN] = { }; lockdep_assert_held(&pr_tmpl->registration_lock); - memset(i_buf, 0, PR_REG_ISID_ID_LEN); core_pr_dump_initiator_port(pr_reg, i_buf, PR_REG_ISID_ID_LEN); if (!list_empty(&pr_reg->pr_reg_list)) @@ -2059,7 +2054,8 @@ core_scsi3_emulate_pro_register(struct se_cmd *cmd, u64 res_key, u64 sa_res_key, struct se_portal_group *se_tpg; struct t10_pr_registration *pr_reg, *pr_reg_p, *pr_reg_tmp; struct t10_reservation *pr_tmpl = &dev->t10_pr; - unsigned char isid_buf[PR_REG_ISID_LEN], *isid_ptr = NULL; + unsigned char isid_buf[PR_REG_ISID_LEN] = { }; + unsigned char *isid_ptr = NULL; sense_reason_t ret = TCM_NO_SENSE; int pr_holder = 0, type; @@ -2070,7 +2066,6 @@ core_scsi3_emulate_pro_register(struct se_cmd *cmd, u64 res_key, u64 sa_res_key, se_tpg = se_sess->se_tpg; if (se_tpg->se_tpg_tfo->sess_get_initiator_sid) { - memset(&isid_buf[0], 0, PR_REG_ISID_LEN); se_tpg->se_tpg_tfo->sess_get_initiator_sid(se_sess, &isid_buf[0], PR_REG_ISID_LEN); isid_ptr = &isid_buf[0]; @@ -2282,11 +2277,9 @@ core_scsi3_pro_reserve(struct se_cmd *cmd, int type, int scope, u64 res_key) struct se_lun *se_lun = cmd->se_lun; struct t10_pr_registration *pr_reg, *pr_res_holder; struct t10_reservation *pr_tmpl = &dev->t10_pr; - char i_buf[PR_REG_ISID_ID_LEN]; + char i_buf[PR_REG_ISID_ID_LEN] = { }; sense_reason_t ret; - memset(i_buf, 0, PR_REG_ISID_ID_LEN); - if (!se_sess || !se_lun) { pr_err("SPC-3 PR: se_sess || struct se_lun is NULL!\n"); return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE; @@ -2457,12 +2450,11 @@ static void __core_scsi3_complete_pro_release( int unreg) { const struct target_core_fabric_ops *tfo = se_nacl->se_tpg->se_tpg_tfo; - char i_buf[PR_REG_ISID_ID_LEN]; + char i_buf[PR_REG_ISID_ID_LEN] = { }; int pr_res_type = 0, pr_res_scope = 0; lockdep_assert_held(&dev->dev_reservation_lock); - memset(i_buf, 0, PR_REG_ISID_ID_LEN); core_pr_dump_initiator_port(pr_reg, i_buf, PR_REG_ISID_ID_LEN); /* * Go ahead and release the current PR reservation holder. @@ -2768,11 +2760,10 @@ static void __core_scsi3_complete_pro_preempt( { struct se_node_acl *nacl = pr_reg->pr_reg_nacl; const struct target_core_fabric_ops *tfo = nacl->se_tpg->se_tpg_tfo; - char i_buf[PR_REG_ISID_ID_LEN]; + char i_buf[PR_REG_ISID_ID_LEN] = { }; lockdep_assert_held(&dev->dev_reservation_lock); - memset(i_buf, 0, PR_REG_ISID_ID_LEN); core_pr_dump_initiator_port(pr_reg, i_buf, PR_REG_ISID_ID_LEN); /* * Do an implicit RELEASE of the existing reservation. @@ -3158,7 +3149,7 @@ core_scsi3_emulate_pro_register_and_move(struct se_cmd *cmd, u64 res_key, struct t10_reservation *pr_tmpl = &dev->t10_pr; unsigned char *buf; const unsigned char *initiator_str; - char *iport_ptr = NULL, i_buf[PR_REG_ISID_ID_LEN]; + char *iport_ptr = NULL, i_buf[PR_REG_ISID_ID_LEN] = { }; u32 tid_len, tmp_tid_len; int new_reg = 0, type, scope, matching_iname; sense_reason_t ret; @@ -3170,7 +3161,6 @@ core_scsi3_emulate_pro_register_and_move(struct se_cmd *cmd, u64 res_key, return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE; } - memset(i_buf, 0, PR_REG_ISID_ID_LEN); se_tpg = se_sess->se_tpg; tf_ops = se_tpg->se_tpg_tfo; /* diff --git a/drivers/target/target_core_pscsi.c b/drivers/target/target_core_pscsi.c index 9ee797b8cb7e..1c9aeab93477 100644 --- a/drivers/target/target_core_pscsi.c +++ b/drivers/target/target_core_pscsi.c @@ -34,8 +34,6 @@ #include "target_core_internal.h" #include "target_core_pscsi.h" -#define ISPRINT(a) ((a >= ' ') && (a <= '~')) - static inline struct pscsi_dev_virt *PSCSI_DEV(struct se_device *dev) { return container_of(dev, struct pscsi_dev_virt, dev); @@ -620,8 +618,9 @@ static void pscsi_complete_cmd(struct se_cmd *cmd, u8 scsi_status, unsigned char *buf; buf = transport_kmap_data_sg(cmd); - if (!buf) + if (!buf) { ; /* XXX: TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE */ + } if (cdb[0] == MODE_SENSE_10) { if (!(buf[3] & 0x80)) diff --git a/drivers/target/target_core_sbc.c b/drivers/target/target_core_sbc.c index f7c527a826fd..7b07e557dc8d 100644 --- a/drivers/target/target_core_sbc.c +++ b/drivers/target/target_core_sbc.c @@ -448,7 +448,7 @@ compare_and_write_do_cmp(struct scatterlist *read_sgl, unsigned int read_nents, sense_reason_t ret; unsigned int offset; size_t rc; - int i; + int sg_cnt; buf = kzalloc(cmp_len, GFP_KERNEL); if (!buf) { @@ -467,7 +467,7 @@ compare_and_write_do_cmp(struct scatterlist *read_sgl, unsigned int read_nents, */ offset = 0; ret = TCM_NO_SENSE; - for_each_sg(read_sgl, sg, read_nents, i) { + for_each_sg(read_sgl, sg, read_nents, sg_cnt) { unsigned int len = min(sg->length, cmp_len); unsigned char *addr = kmap_atomic(sg_page(sg)); diff --git a/drivers/target/target_core_stat.c b/drivers/target/target_core_stat.c index 237309db4b33..62d15bcc3d93 100644 --- a/drivers/target/target_core_stat.c +++ b/drivers/target/target_core_stat.c @@ -31,9 +31,6 @@ #define INITIAL_JIFFIES ((unsigned long)(unsigned int) (-300*HZ)) #endif -#define NONE "None" -#define ISPRINT(a) ((a >= ' ') && (a <= '~')) - #define SCSI_LU_INDEX 1 #define LU_COUNT 1 diff --git a/drivers/target/target_core_tmr.c b/drivers/target/target_core_tmr.c index 7347285471fa..e7fcbc09f9db 100644 --- a/drivers/target/target_core_tmr.c +++ b/drivers/target/target_core_tmr.c @@ -124,6 +124,8 @@ void core_tmr_abort_task( int i; for (i = 0; i < dev->queue_cnt; i++) { + flush_work(&dev->queues[i].sq.work); + spin_lock_irqsave(&dev->queues[i].lock, flags); list_for_each_entry_safe(se_cmd, next, &dev->queues[i].state_list, state_list) { @@ -302,6 +304,8 @@ static void core_tmr_drain_state_list( * in the Control Mode Page. */ for (i = 0; i < dev->queue_cnt; i++) { + flush_work(&dev->queues[i].sq.work); + spin_lock_irqsave(&dev->queues[i].lock, flags); list_for_each_entry_safe(cmd, next, &dev->queues[i].state_list, state_list) { diff --git a/drivers/target/target_core_transport.c b/drivers/target/target_core_transport.c index 5ecb9f18a53d..8fbfe75c5744 100644 --- a/drivers/target/target_core_transport.c +++ b/drivers/target/target_core_transport.c @@ -41,6 +41,7 @@ #include <trace/events/target.h> static struct workqueue_struct *target_completion_wq; +static struct workqueue_struct *target_submission_wq; static struct kmem_cache *se_sess_cache; struct kmem_cache *se_ua_cache; struct kmem_cache *t10_pr_reg_cache; @@ -129,8 +130,15 @@ int init_se_kmem_caches(void) if (!target_completion_wq) goto out_free_lba_map_mem_cache; + target_submission_wq = alloc_workqueue("target_submission", + WQ_MEM_RECLAIM, 0); + if (!target_submission_wq) + goto out_free_completion_wq; + return 0; +out_free_completion_wq: + destroy_workqueue(target_completion_wq); out_free_lba_map_mem_cache: kmem_cache_destroy(t10_alua_lba_map_mem_cache); out_free_lba_map_cache: @@ -153,6 +161,7 @@ out: void release_se_kmem_caches(void) { + destroy_workqueue(target_submission_wq); destroy_workqueue(target_completion_wq); kmem_cache_destroy(se_sess_cache); kmem_cache_destroy(se_ua_cache); @@ -848,7 +857,8 @@ static bool target_cmd_interrupted(struct se_cmd *cmd) /* May be called from interrupt context so must not sleep. */ void target_complete_cmd(struct se_cmd *cmd, u8 scsi_status) { - int success; + struct se_wwn *wwn = cmd->se_sess->se_tpg->se_tpg_wwn; + int success, cpu; unsigned long flags; if (target_cmd_interrupted(cmd)) @@ -875,7 +885,13 @@ void target_complete_cmd(struct se_cmd *cmd, u8 scsi_status) INIT_WORK(&cmd->work, success ? target_complete_ok_work : target_complete_failure_work); - queue_work_on(cmd->cpuid, target_completion_wq, &cmd->work); + + if (wwn->cmd_compl_affinity == SE_COMPL_AFFINITY_CPUID) + cpu = cmd->cpuid; + else + cpu = wwn->cmd_compl_affinity; + + queue_work_on(cpu, target_completion_wq, &cmd->work); } EXPORT_SYMBOL(target_complete_cmd); @@ -1304,7 +1320,7 @@ target_check_max_data_sg_nents(struct se_cmd *cmd, struct se_device *dev, * Compare the data buffer size from the CDB with the data buffer limit from the transport * header. Set @cmd->residual_count and SCF_OVERFLOW_BIT or SCF_UNDERFLOW_BIT if necessary. * - * Note: target drivers set @cmd->data_length by calling transport_init_se_cmd(). + * Note: target drivers set @cmd->data_length by calling __target_init_cmd(). * * Return: TCM_NO_SENSE */ @@ -1371,7 +1387,7 @@ target_cmd_size_check(struct se_cmd *cmd, unsigned int size) * * Preserves the value of @cmd->tag. */ -void transport_init_se_cmd( +void __target_init_cmd( struct se_cmd *cmd, const struct target_core_fabric_ops *tfo, struct se_session *se_sess, @@ -1382,7 +1398,6 @@ void transport_init_se_cmd( { INIT_LIST_HEAD(&cmd->se_delayed_node); INIT_LIST_HEAD(&cmd->se_qf_node); - INIT_LIST_HEAD(&cmd->se_cmd_list); INIT_LIST_HEAD(&cmd->state_list); init_completion(&cmd->t_transport_stop_comp); cmd->free_compl = NULL; @@ -1391,6 +1406,7 @@ void transport_init_se_cmd( INIT_WORK(&cmd->work, NULL); kref_init(&cmd->cmd_kref); + cmd->t_task_cdb = &cmd->__t_task_cdb[0]; cmd->se_tfo = tfo; cmd->se_sess = se_sess; cmd->data_length = data_length; @@ -1404,7 +1420,7 @@ void transport_init_se_cmd( cmd->state_active = false; } -EXPORT_SYMBOL(transport_init_se_cmd); +EXPORT_SYMBOL(__target_init_cmd); static sense_reason_t transport_check_alloc_task_attr(struct se_cmd *cmd) @@ -1428,11 +1444,10 @@ transport_check_alloc_task_attr(struct se_cmd *cmd) } sense_reason_t -target_cmd_init_cdb(struct se_cmd *cmd, unsigned char *cdb) +target_cmd_init_cdb(struct se_cmd *cmd, unsigned char *cdb, gfp_t gfp) { sense_reason_t ret; - cmd->t_task_cdb = &cmd->__t_task_cdb[0]; /* * Ensure that the received CDB is less than the max (252 + 8) bytes * for VARIABLE_LENGTH_CMD @@ -1450,8 +1465,7 @@ target_cmd_init_cdb(struct se_cmd *cmd, unsigned char *cdb) * setup the pointer from __t_task_cdb to t_task_cdb. */ if (scsi_command_size(cdb) > sizeof(cmd->__t_task_cdb)) { - cmd->t_task_cdb = kzalloc(scsi_command_size(cdb), - GFP_KERNEL); + cmd->t_task_cdb = kzalloc(scsi_command_size(cdb), gfp); if (!cmd->t_task_cdb) { pr_err("Unable to allocate cmd->t_task_cdb" " %u > sizeof(cmd->__t_task_cdb): %lu ops\n", @@ -1573,46 +1587,31 @@ transport_generic_map_mem_to_cmd(struct se_cmd *cmd, struct scatterlist *sgl, } /** - * target_submit_cmd_map_sgls - lookup unpacked lun and submit uninitialized - * se_cmd + use pre-allocated SGL memory. - * - * @se_cmd: command descriptor to submit + * target_init_cmd - initialize se_cmd + * @se_cmd: command descriptor to init * @se_sess: associated se_sess for endpoint - * @cdb: pointer to SCSI CDB * @sense: pointer to SCSI sense buffer * @unpacked_lun: unpacked LUN to reference for struct se_lun * @data_length: fabric expected data transfer length * @task_attr: SAM task attribute * @data_dir: DMA data direction * @flags: flags for command submission from target_sc_flags_tables - * @sgl: struct scatterlist memory for unidirectional mapping - * @sgl_count: scatterlist count for unidirectional mapping - * @sgl_bidi: struct scatterlist memory for bidirectional READ mapping - * @sgl_bidi_count: scatterlist count for bidirectional READ mapping - * @sgl_prot: struct scatterlist memory protection information - * @sgl_prot_count: scatterlist count for protection information * * Task tags are supported if the caller has set @se_cmd->tag. * - * Returns non zero to signal active I/O shutdown failure. All other - * setup exceptions will be returned as a SCSI CHECK_CONDITION response, - * but still return zero here. + * Returns: + * - less than zero to signal active I/O shutdown failure. + * - zero on success. * - * This may only be called from process context, and also currently - * assumes internal allocation of fabric payload buffer by target-core. + * If the fabric driver calls target_stop_session, then it must check the + * return code and handle failures. This will never fail for other drivers, + * and the return code can be ignored. */ -int target_submit_cmd_map_sgls(struct se_cmd *se_cmd, struct se_session *se_sess, - unsigned char *cdb, unsigned char *sense, u64 unpacked_lun, - u32 data_length, int task_attr, int data_dir, int flags, - struct scatterlist *sgl, u32 sgl_count, - struct scatterlist *sgl_bidi, u32 sgl_bidi_count, - struct scatterlist *sgl_prot, u32 sgl_prot_count) +int target_init_cmd(struct se_cmd *se_cmd, struct se_session *se_sess, + unsigned char *sense, u64 unpacked_lun, + u32 data_length, int task_attr, int data_dir, int flags) { struct se_portal_group *se_tpg; - sense_reason_t rc; - int ret; - - might_sleep(); se_tpg = se_sess->se_tpg; BUG_ON(!se_tpg); @@ -1621,52 +1620,71 @@ int target_submit_cmd_map_sgls(struct se_cmd *se_cmd, struct se_session *se_sess if (flags & TARGET_SCF_USE_CPUID) se_cmd->se_cmd_flags |= SCF_USE_CPUID; /* + * Signal bidirectional data payloads to target-core + */ + if (flags & TARGET_SCF_BIDI_OP) + se_cmd->se_cmd_flags |= SCF_BIDI; + + if (flags & TARGET_SCF_UNKNOWN_SIZE) + se_cmd->unknown_data_length = 1; + /* * Initialize se_cmd for target operation. From this point * exceptions are handled by sending exception status via * target_core_fabric_ops->queue_status() callback */ - transport_init_se_cmd(se_cmd, se_tpg->se_tpg_tfo, se_sess, - data_length, data_dir, task_attr, sense, - unpacked_lun); + __target_init_cmd(se_cmd, se_tpg->se_tpg_tfo, se_sess, data_length, + data_dir, task_attr, sense, unpacked_lun); - if (flags & TARGET_SCF_UNKNOWN_SIZE) - se_cmd->unknown_data_length = 1; /* * Obtain struct se_cmd->cmd_kref reference. A second kref_get here is * necessary for fabrics using TARGET_SCF_ACK_KREF that expect a second * kref_put() to happen during fabric packet acknowledgement. */ - ret = target_get_sess_cmd(se_cmd, flags & TARGET_SCF_ACK_KREF); - if (ret) - return ret; - /* - * Signal bidirectional data payloads to target-core - */ - if (flags & TARGET_SCF_BIDI_OP) - se_cmd->se_cmd_flags |= SCF_BIDI; + return target_get_sess_cmd(se_cmd, flags & TARGET_SCF_ACK_KREF); +} +EXPORT_SYMBOL_GPL(target_init_cmd); - rc = target_cmd_init_cdb(se_cmd, cdb); - if (rc) { - transport_send_check_condition_and_sense(se_cmd, rc, 0); - target_put_sess_cmd(se_cmd); - return 0; - } +/** + * target_submit_prep - prepare cmd for submission + * @se_cmd: command descriptor to prep + * @cdb: pointer to SCSI CDB + * @sgl: struct scatterlist memory for unidirectional mapping + * @sgl_count: scatterlist count for unidirectional mapping + * @sgl_bidi: struct scatterlist memory for bidirectional READ mapping + * @sgl_bidi_count: scatterlist count for bidirectional READ mapping + * @sgl_prot: struct scatterlist memory protection information + * @sgl_prot_count: scatterlist count for protection information + * @gfp: gfp allocation type + * + * Returns: + * - less than zero to signal failure. + * - zero on success. + * + * If failure is returned, lio will the callers queue_status to complete + * the cmd. + */ +int target_submit_prep(struct se_cmd *se_cmd, unsigned char *cdb, + struct scatterlist *sgl, u32 sgl_count, + struct scatterlist *sgl_bidi, u32 sgl_bidi_count, + struct scatterlist *sgl_prot, u32 sgl_prot_count, + gfp_t gfp) +{ + sense_reason_t rc; + + rc = target_cmd_init_cdb(se_cmd, cdb, gfp); + if (rc) + goto send_cc_direct; /* * Locate se_lun pointer and attach it to struct se_cmd */ rc = transport_lookup_cmd_lun(se_cmd); - if (rc) { - transport_send_check_condition_and_sense(se_cmd, rc, 0); - target_put_sess_cmd(se_cmd); - return 0; - } + if (rc) + goto send_cc_direct; rc = target_cmd_parse_cdb(se_cmd); - if (rc != 0) { - transport_generic_request_failure(se_cmd, rc); - return 0; - } + if (rc != 0) + goto generic_fail; /* * Save pointers for SGLs containing protection information, @@ -1686,6 +1704,41 @@ int target_submit_cmd_map_sgls(struct se_cmd *se_cmd, struct se_session *se_sess if (sgl_count != 0) { BUG_ON(!sgl); + rc = transport_generic_map_mem_to_cmd(se_cmd, sgl, sgl_count, + sgl_bidi, sgl_bidi_count); + if (rc != 0) + goto generic_fail; + } + + return 0; + +send_cc_direct: + transport_send_check_condition_and_sense(se_cmd, rc, 0); + target_put_sess_cmd(se_cmd); + return -EIO; + +generic_fail: + transport_generic_request_failure(se_cmd, rc); + return -EIO; +} +EXPORT_SYMBOL_GPL(target_submit_prep); + +/** + * target_submit - perform final initialization and submit cmd to LIO core + * @se_cmd: command descriptor to submit + * + * target_submit_prep must have been called on the cmd, and this must be + * called from process context. + */ +void target_submit(struct se_cmd *se_cmd) +{ + struct scatterlist *sgl = se_cmd->t_data_sg; + unsigned char *buf = NULL; + + might_sleep(); + + if (se_cmd->t_data_nents != 0) { + BUG_ON(!sgl); /* * A work-around for tcm_loop as some userspace code via * scsi-generic do not memset their associated read buffers, @@ -1696,8 +1749,6 @@ int target_submit_cmd_map_sgls(struct se_cmd *se_cmd, struct se_session *se_sess */ if (!(se_cmd->se_cmd_flags & SCF_SCSI_DATA_CDB) && se_cmd->data_direction == DMA_FROM_DEVICE) { - unsigned char *buf = NULL; - if (sgl) buf = kmap(sg_page(sgl)) + sgl->offset; @@ -1707,12 +1758,6 @@ int target_submit_cmd_map_sgls(struct se_cmd *se_cmd, struct se_session *se_sess } } - rc = transport_generic_map_mem_to_cmd(se_cmd, sgl, sgl_count, - sgl_bidi, sgl_bidi_count); - if (rc != 0) { - transport_generic_request_failure(se_cmd, rc); - return 0; - } } /* @@ -1722,9 +1767,8 @@ int target_submit_cmd_map_sgls(struct se_cmd *se_cmd, struct se_session *se_sess core_alua_check_nonop_delay(se_cmd); transport_handle_cdb_direct(se_cmd); - return 0; } -EXPORT_SYMBOL(target_submit_cmd_map_sgls); +EXPORT_SYMBOL_GPL(target_submit); /** * target_submit_cmd - lookup unpacked lun and submit uninitialized se_cmd @@ -1741,25 +1785,109 @@ EXPORT_SYMBOL(target_submit_cmd_map_sgls); * * Task tags are supported if the caller has set @se_cmd->tag. * - * Returns non zero to signal active I/O shutdown failure. All other - * setup exceptions will be returned as a SCSI CHECK_CONDITION response, - * but still return zero here. - * * This may only be called from process context, and also currently * assumes internal allocation of fabric payload buffer by target-core. * * It also assumes interal target core SGL memory allocation. + * + * This function must only be used by drivers that do their own + * sync during shutdown and does not use target_stop_session. If there + * is a failure this function will call into the fabric driver's + * queue_status with a CHECK_CONDITION. */ -int target_submit_cmd(struct se_cmd *se_cmd, struct se_session *se_sess, +void target_submit_cmd(struct se_cmd *se_cmd, struct se_session *se_sess, unsigned char *cdb, unsigned char *sense, u64 unpacked_lun, u32 data_length, int task_attr, int data_dir, int flags) { - return target_submit_cmd_map_sgls(se_cmd, se_sess, cdb, sense, - unpacked_lun, data_length, task_attr, data_dir, - flags, NULL, 0, NULL, 0, NULL, 0); + int rc; + + rc = target_init_cmd(se_cmd, se_sess, sense, unpacked_lun, data_length, + task_attr, data_dir, flags); + WARN(rc, "Invalid target_submit_cmd use. Driver must not use target_stop_session or call target_init_cmd directly.\n"); + if (rc) + return; + + if (target_submit_prep(se_cmd, cdb, NULL, 0, NULL, 0, NULL, 0, + GFP_KERNEL)) + return; + + target_submit(se_cmd); } EXPORT_SYMBOL(target_submit_cmd); + +static struct se_dev_plug *target_plug_device(struct se_device *se_dev) +{ + struct se_dev_plug *se_plug; + + if (!se_dev->transport->plug_device) + return NULL; + + se_plug = se_dev->transport->plug_device(se_dev); + if (!se_plug) + return NULL; + + se_plug->se_dev = se_dev; + /* + * We have a ref to the lun at this point, but the cmds could + * complete before we unplug, so grab a ref to the se_device so we + * can call back into the backend. + */ + config_group_get(&se_dev->dev_group); + return se_plug; +} + +static void target_unplug_device(struct se_dev_plug *se_plug) +{ + struct se_device *se_dev = se_plug->se_dev; + + se_dev->transport->unplug_device(se_plug); + config_group_put(&se_dev->dev_group); +} + +void target_queued_submit_work(struct work_struct *work) +{ + struct se_cmd_queue *sq = container_of(work, struct se_cmd_queue, work); + struct se_cmd *se_cmd, *next_cmd; + struct se_dev_plug *se_plug = NULL; + struct se_device *se_dev = NULL; + struct llist_node *cmd_list; + + cmd_list = llist_del_all(&sq->cmd_list); + if (!cmd_list) + /* Previous call took what we were queued to submit */ + return; + + cmd_list = llist_reverse_order(cmd_list); + llist_for_each_entry_safe(se_cmd, next_cmd, cmd_list, se_cmd_list) { + if (!se_dev) { + se_dev = se_cmd->se_dev; + se_plug = target_plug_device(se_dev); + } + + target_submit(se_cmd); + } + + if (se_plug) + target_unplug_device(se_plug); +} + +/** + * target_queue_submission - queue the cmd to run on the LIO workqueue + * @se_cmd: command descriptor to submit + */ +void target_queue_submission(struct se_cmd *se_cmd) +{ + struct se_device *se_dev = se_cmd->se_dev; + int cpu = se_cmd->cpuid; + struct se_cmd_queue *sq; + + sq = &se_dev->queues[cpu].sq; + llist_add(&se_cmd->se_cmd_list, &sq->cmd_list); + queue_work_on(cpu, target_submission_wq, &sq->work); +} +EXPORT_SYMBOL_GPL(target_queue_submission); + static void target_complete_tmr_failure(struct work_struct *work) { struct se_cmd *se_cmd = container_of(work, struct se_cmd, work); @@ -1799,8 +1927,8 @@ int target_submit_tmr(struct se_cmd *se_cmd, struct se_session *se_sess, se_tpg = se_sess->se_tpg; BUG_ON(!se_tpg); - transport_init_se_cmd(se_cmd, se_tpg->se_tpg_tfo, se_sess, - 0, DMA_NONE, TCM_SIMPLE_TAG, sense, unpacked_lun); + __target_init_cmd(se_cmd, se_tpg->se_tpg_tfo, se_sess, + 0, DMA_NONE, TCM_SIMPLE_TAG, sense, unpacked_lun); /* * FIXME: Currently expect caller to handle se_cmd->se_tmr_req * allocation failure. @@ -2778,9 +2906,7 @@ int target_get_sess_cmd(struct se_cmd *se_cmd, bool ack_kref) * invocations before se_cmd descriptor release. */ if (ack_kref) { - if (!kref_get_unless_zero(&se_cmd->cmd_kref)) - return -EINVAL; - + kref_get(&se_cmd->cmd_kref); se_cmd->se_cmd_flags |= SCF_ACK_KREF; } diff --git a/drivers/target/target_core_user.c b/drivers/target/target_core_user.c index bf73cd5f4b04..bdfc057f000c 100644 --- a/drivers/target/target_core_user.c +++ b/drivers/target/target_core_user.c @@ -8,13 +8,12 @@ #include <linux/spinlock.h> #include <linux/module.h> -#include <linux/idr.h> #include <linux/kernel.h> #include <linux/timer.h> #include <linux/parser.h> #include <linux/vmalloc.h> #include <linux/uio_driver.h> -#include <linux/radix-tree.h> +#include <linux/xarray.h> #include <linux/stringify.h> #include <linux/bitops.h> #include <linux/highmem.h> @@ -111,6 +110,7 @@ struct tcmu_dev { struct kref kref; struct se_device se_dev; + struct se_dev_plug se_plug; char *name; struct se_hba *hba; @@ -119,6 +119,7 @@ struct tcmu_dev { #define TCMU_DEV_BIT_BROKEN 1 #define TCMU_DEV_BIT_BLOCKED 2 #define TCMU_DEV_BIT_TMR_NOTIFY 3 +#define TCM_DEV_BIT_PLUGGED 4 unsigned long flags; struct uio_info uio_info; @@ -143,9 +144,9 @@ struct tcmu_dev { uint32_t dbi_max; uint32_t dbi_thresh; unsigned long *data_bitmap; - struct radix_tree_root data_blocks; + struct xarray data_blocks; - struct idr commands; + struct xarray commands; struct timer_list cmd_timer; unsigned int cmd_time_out; @@ -500,13 +501,13 @@ static inline int tcmu_get_empty_block(struct tcmu_dev *udev, int prev_dbi, int *iov_cnt) { struct page *page; - int ret, dbi; + int dbi; dbi = find_first_zero_bit(udev->data_bitmap, udev->dbi_thresh); if (dbi == udev->dbi_thresh) return -1; - page = radix_tree_lookup(&udev->data_blocks, dbi); + page = xa_load(&udev->data_blocks, dbi); if (!page) { if (atomic_add_return(1, &global_db_count) > tcmu_global_max_blocks) @@ -517,8 +518,7 @@ static inline int tcmu_get_empty_block(struct tcmu_dev *udev, if (!page) goto err_alloc; - ret = radix_tree_insert(&udev->data_blocks, dbi, page); - if (ret) + if (xa_store(&udev->data_blocks, dbi, page, GFP_NOIO)) goto err_insert; } @@ -557,7 +557,7 @@ static int tcmu_get_empty_blocks(struct tcmu_dev *udev, static inline struct page * tcmu_get_block_page(struct tcmu_dev *udev, uint32_t dbi) { - return radix_tree_lookup(&udev->data_blocks, dbi); + return xa_load(&udev->data_blocks, dbi); } static inline void tcmu_free_cmd(struct tcmu_cmd *tcmu_cmd) @@ -959,6 +959,25 @@ static uint32_t ring_insert_padding(struct tcmu_dev *udev, size_t cmd_size) return cmd_head; } +static void tcmu_unplug_device(struct se_dev_plug *se_plug) +{ + struct se_device *se_dev = se_plug->se_dev; + struct tcmu_dev *udev = TCMU_DEV(se_dev); + + clear_bit(TCM_DEV_BIT_PLUGGED, &udev->flags); + uio_event_notify(&udev->uio_info); +} + +static struct se_dev_plug *tcmu_plug_device(struct se_device *se_dev) +{ + struct tcmu_dev *udev = TCMU_DEV(se_dev); + + if (!test_and_set_bit(TCM_DEV_BIT_PLUGGED, &udev->flags)) + return &udev->se_plug; + + return NULL; +} + /** * queue_cmd_ring - queue cmd to ring or internally * @tcmu_cmd: cmd to queue @@ -977,8 +996,8 @@ static int queue_cmd_ring(struct tcmu_cmd *tcmu_cmd, sense_reason_t *scsi_err) struct tcmu_mailbox *mb = udev->mb_addr; struct tcmu_cmd_entry *entry; struct iovec *iov; - int iov_cnt, iov_bidi_cnt, cmd_id; - uint32_t cmd_head; + int iov_cnt, iov_bidi_cnt; + uint32_t cmd_id, cmd_head; uint64_t cdb_off; /* size of data buffer needed */ size_t data_length = (size_t)tcmu_cmd->dbi_cnt * DATA_BLOCK_SIZE; @@ -1031,8 +1050,8 @@ static int queue_cmd_ring(struct tcmu_cmd *tcmu_cmd, sense_reason_t *scsi_err) */ goto free_and_queue; - cmd_id = idr_alloc(&udev->commands, tcmu_cmd, 1, USHRT_MAX, GFP_NOWAIT); - if (cmd_id < 0) { + if (xa_alloc(&udev->commands, &cmd_id, tcmu_cmd, XA_LIMIT(1, 0xffff), + GFP_NOWAIT) < 0) { pr_err("tcmu: Could not allocate cmd id.\n"); tcmu_cmd_free_data(tcmu_cmd, tcmu_cmd->dbi_cnt); @@ -1086,8 +1105,8 @@ static int queue_cmd_ring(struct tcmu_cmd *tcmu_cmd, sense_reason_t *scsi_err) list_add_tail(&tcmu_cmd->queue_entry, &udev->inflight_queue); - /* TODO: only if FLUSH and FUA? */ - uio_event_notify(&udev->uio_info); + if (!test_bit(TCM_DEV_BIT_PLUGGED, &udev->flags)) + uio_event_notify(&udev->uio_info); return 0; @@ -1253,7 +1272,7 @@ tcmu_tmr_notify(struct se_device *se_dev, enum tcm_tmreq_table tmf, pr_debug("TMR event %d on dev %s, aborted cmds %d, afflicted cmd_ids %d\n", tcmu_tmr_type(tmf), udev->name, i, cmd_cnt); - tmr = kmalloc(sizeof(*tmr) + cmd_cnt * sizeof(*cmd_ids), GFP_KERNEL); + tmr = kmalloc(sizeof(*tmr) + cmd_cnt * sizeof(*cmd_ids), GFP_NOIO); if (!tmr) goto unlock; @@ -1415,7 +1434,7 @@ static unsigned int tcmu_handle_completions(struct tcmu_dev *udev) } WARN_ON(tcmu_hdr_get_op(entry->hdr.len_op) != TCMU_OP_CMD); - cmd = idr_remove(&udev->commands, entry->hdr.cmd_id); + cmd = xa_erase(&udev->commands, entry->hdr.cmd_id); if (!cmd) { pr_err("cmd_id %u not found, ring is broken\n", entry->hdr.cmd_id); @@ -1433,7 +1452,7 @@ static unsigned int tcmu_handle_completions(struct tcmu_dev *udev) free_space = tcmu_run_tmr_queue(udev); if (atomic_read(&global_db_count) > tcmu_global_max_blocks && - idr_is_empty(&udev->commands) && list_empty(&udev->qfull_queue)) { + xa_empty(&udev->commands) && list_empty(&udev->qfull_queue)) { /* * Allocated blocks exceeded global block limit, currently no * more pending or waiting commands so try to reclaim blocks. @@ -1556,12 +1575,12 @@ static struct se_device *tcmu_alloc_device(struct se_hba *hba, const char *name) INIT_LIST_HEAD(&udev->qfull_queue); INIT_LIST_HEAD(&udev->tmr_queue); INIT_LIST_HEAD(&udev->inflight_queue); - idr_init(&udev->commands); + xa_init_flags(&udev->commands, XA_FLAGS_ALLOC1); timer_setup(&udev->qfull_timer, tcmu_qfull_timedout, 0); timer_setup(&udev->cmd_timer, tcmu_cmd_timedout, 0); - INIT_RADIX_TREE(&udev->data_blocks, GFP_KERNEL); + xa_init(&udev->data_blocks); return &udev->se_dev; } @@ -1585,19 +1604,19 @@ static int tcmu_check_and_free_pending_cmd(struct tcmu_cmd *cmd) return -EINVAL; } -static void tcmu_blocks_release(struct radix_tree_root *blocks, - int start, int end) +static void tcmu_blocks_release(struct xarray *blocks, unsigned long first, + unsigned long last) { - int i; + XA_STATE(xas, blocks, first); struct page *page; - for (i = start; i < end; i++) { - page = radix_tree_delete(blocks, i); - if (page) { - __free_page(page); - atomic_dec(&global_db_count); - } + xas_lock(&xas); + xas_for_each(&xas, page, last) { + xas_store(&xas, NULL); + __free_page(page); + atomic_dec(&global_db_count); } + xas_unlock(&xas); } static void tcmu_remove_all_queued_tmr(struct tcmu_dev *udev) @@ -1616,7 +1635,7 @@ static void tcmu_dev_kref_release(struct kref *kref) struct se_device *dev = &udev->se_dev; struct tcmu_cmd *cmd; bool all_expired = true; - int i; + unsigned long i; vfree(udev->mb_addr); udev->mb_addr = NULL; @@ -1628,7 +1647,7 @@ static void tcmu_dev_kref_release(struct kref *kref) /* Upper layer should drain all requests before calling this */ mutex_lock(&udev->cmdr_lock); - idr_for_each_entry(&udev->commands, cmd, i) { + xa_for_each(&udev->commands, i, cmd) { if (tcmu_check_and_free_pending_cmd(cmd) != 0) all_expired = false; } @@ -1636,10 +1655,10 @@ static void tcmu_dev_kref_release(struct kref *kref) tcmu_remove_all_queued_tmr(udev); if (!list_empty(&udev->qfull_queue)) all_expired = false; - idr_destroy(&udev->commands); + xa_destroy(&udev->commands); WARN_ON(!all_expired); - tcmu_blocks_release(&udev->data_blocks, 0, udev->dbi_max + 1); + tcmu_blocks_release(&udev->data_blocks, 0, udev->dbi_max); bitmap_free(udev->data_bitmap); mutex_unlock(&udev->cmdr_lock); @@ -2226,16 +2245,16 @@ static void tcmu_reset_ring(struct tcmu_dev *udev, u8 err_level) { struct tcmu_mailbox *mb; struct tcmu_cmd *cmd; - int i; + unsigned long i; mutex_lock(&udev->cmdr_lock); - idr_for_each_entry(&udev->commands, cmd, i) { + xa_for_each(&udev->commands, i, cmd) { pr_debug("removing cmd %u on dev %s from ring (is expired %d)\n", cmd->cmd_id, udev->name, test_bit(TCMU_CMD_BIT_EXPIRED, &cmd->flags)); - idr_remove(&udev->commands, i); + xa_erase(&udev->commands, i); if (!test_bit(TCMU_CMD_BIT_EXPIRED, &cmd->flags)) { WARN_ON(!cmd->se_cmd); list_del_init(&cmd->queue_entry); @@ -2863,6 +2882,8 @@ static struct target_backend_ops tcmu_ops = { .configure_device = tcmu_configure_device, .destroy_device = tcmu_destroy_device, .free_device = tcmu_free_device, + .unplug_device = tcmu_unplug_device, + .plug_device = tcmu_plug_device, .parse_cdb = tcmu_parse_cdb, .tmr_notify = tcmu_tmr_notify, .set_configfs_dev_params = tcmu_set_configfs_dev_params, @@ -2923,7 +2944,7 @@ static void find_free_blocks(void) unmap_mapping_range(udev->inode->i_mapping, off, 0, 1); /* Release the block pages */ - tcmu_blocks_release(&udev->data_blocks, start, end); + tcmu_blocks_release(&udev->data_blocks, start, end - 1); mutex_unlock(&udev->cmdr_lock); total_freed += end - start; diff --git a/drivers/target/target_core_xcopy.c b/drivers/target/target_core_xcopy.c index 66d6f1d06f21..d31ed071cb08 100644 --- a/drivers/target/target_core_xcopy.c +++ b/drivers/target/target_core_xcopy.c @@ -554,7 +554,7 @@ static int target_xcopy_setup_pt_cmd( } cmd->se_cmd_flags |= SCF_SE_LUN_CMD; - if (target_cmd_init_cdb(cmd, cdb)) + if (target_cmd_init_cdb(cmd, cdb, GFP_KERNEL)) return -EINVAL; cmd->tag = 0; @@ -615,8 +615,8 @@ static int target_xcopy_read_source( pr_debug("XCOPY: Built READ_16: LBA: %llu Sectors: %u Length: %u\n", (unsigned long long)src_lba, src_sectors, length); - transport_init_se_cmd(se_cmd, &xcopy_pt_tfo, &xcopy_pt_sess, length, - DMA_FROM_DEVICE, 0, &xpt_cmd.sense_buffer[0], 0); + __target_init_cmd(se_cmd, &xcopy_pt_tfo, &xcopy_pt_sess, length, + DMA_FROM_DEVICE, 0, &xpt_cmd.sense_buffer[0], 0); rc = target_xcopy_setup_pt_cmd(&xpt_cmd, xop, src_dev, &cdb[0], remote_port); @@ -660,8 +660,8 @@ static int target_xcopy_write_destination( pr_debug("XCOPY: Built WRITE_16: LBA: %llu Sectors: %u Length: %u\n", (unsigned long long)dst_lba, dst_sectors, length); - transport_init_se_cmd(se_cmd, &xcopy_pt_tfo, &xcopy_pt_sess, length, - DMA_TO_DEVICE, 0, &xpt_cmd.sense_buffer[0], 0); + __target_init_cmd(se_cmd, &xcopy_pt_tfo, &xcopy_pt_sess, length, + DMA_TO_DEVICE, 0, &xpt_cmd.sense_buffer[0], 0); rc = target_xcopy_setup_pt_cmd(&xpt_cmd, xop, dst_dev, &cdb[0], remote_port); diff --git a/drivers/target/tcm_fc/tfc_cmd.c b/drivers/target/tcm_fc/tfc_cmd.c index 768f250680d9..410b723f9d79 100644 --- a/drivers/target/tcm_fc/tfc_cmd.c +++ b/drivers/target/tcm_fc/tfc_cmd.c @@ -543,16 +543,22 @@ static void ft_send_work(struct work_struct *work) fc_seq_set_resp(cmd->seq, ft_recv_seq, cmd); cmd->se_cmd.tag = fc_seq_exch(cmd->seq)->rxid; + /* * Use a single se_cmd->cmd_kref as we expect to release se_cmd * directly from ft_check_stop_free callback in response path. */ - if (target_submit_cmd(&cmd->se_cmd, cmd->sess->se_sess, fcp->fc_cdb, - &cmd->ft_sense_buffer[0], scsilun_to_int(&fcp->fc_lun), - ntohl(fcp->fc_dl), task_attr, data_dir, - TARGET_SCF_ACK_KREF)) + if (target_init_cmd(&cmd->se_cmd, cmd->sess->se_sess, + &cmd->ft_sense_buffer[0], + scsilun_to_int(&fcp->fc_lun), ntohl(fcp->fc_dl), + task_attr, data_dir, TARGET_SCF_ACK_KREF)) goto err; + if (target_submit_prep(&cmd->se_cmd, fcp->fc_cdb, NULL, 0, NULL, 0, + NULL, 0, GFP_KERNEL)) + return; + + target_submit(&cmd->se_cmd); pr_debug("r_ctl %x target_submit_cmd %p\n", fh->fh_r_ctl, cmd); return; |