From e7b7af6e98b390c1f7d5d208d18cb7d8b7beb0f1 Mon Sep 17 00:00:00 2001 From: Roland Dreier Date: Fri, 14 Nov 2014 12:54:36 -0800 Subject: target: Fix target_core_register_fabric() for built-in fabric modules If we try to create a fabric directory in configfs for one of the default hard-coded fabric modules (iscsi and loopback), and that fabric is actually built into the kernel, then the operation will spuriously fail because request_module() (for the code that's already linked into the kernel) fails. Fix this by running the autoprobing code only if an initial target_core_get_fabric() fails. Signed-off-by: Roland Dreier Signed-off-by: Nicholas Bellinger --- drivers/target/target_core_configfs.c | 75 ++++++++++++++++++++--------------- 1 file changed, 42 insertions(+), 33 deletions(-) diff --git a/drivers/target/target_core_configfs.c b/drivers/target/target_core_configfs.c index 79f9296a08ae..41498b696641 100644 --- a/drivers/target/target_core_configfs.c +++ b/drivers/target/target_core_configfs.c @@ -126,48 +126,57 @@ static struct config_group *target_core_register_fabric( pr_debug("Target_Core_ConfigFS: REGISTER -> group: %p name:" " %s\n", group, name); - /* - * Below are some hardcoded request_module() calls to automatically - * local fabric modules when the following is called: - * - * mkdir -p /sys/kernel/config/target/$MODULE_NAME - * - * Note that this does not limit which TCM fabric module can be - * registered, but simply provids auto loading logic for modules with - * mkdir(2) system calls with known TCM fabric modules. - */ - if (!strncmp(name, "iscsi", 5)) { + + tf = target_core_get_fabric(name); + if (!tf) { + pr_err("target_core_register_fabric() trying autoload for %s\n", + name); + /* - * Automatically load the LIO Target fabric module when the - * following is called: + * Below are some hardcoded request_module() calls to automatically + * local fabric modules when the following is called: * - * mkdir -p $CONFIGFS/target/iscsi - */ - ret = request_module("iscsi_target_mod"); - if (ret < 0) { - pr_err("request_module() failed for" - " iscsi_target_mod.ko: %d\n", ret); - return ERR_PTR(-EINVAL); - } - } else if (!strncmp(name, "loopback", 8)) { - /* - * Automatically load the tcm_loop fabric module when the - * following is called: + * mkdir -p /sys/kernel/config/target/$MODULE_NAME * - * mkdir -p $CONFIGFS/target/loopback + * Note that this does not limit which TCM fabric module can be + * registered, but simply provids auto loading logic for modules with + * mkdir(2) system calls with known TCM fabric modules. */ - ret = request_module("tcm_loop"); - if (ret < 0) { - pr_err("request_module() failed for" - " tcm_loop.ko: %d\n", ret); - return ERR_PTR(-EINVAL); + + if (!strncmp(name, "iscsi", 5)) { + /* + * Automatically load the LIO Target fabric module when the + * following is called: + * + * mkdir -p $CONFIGFS/target/iscsi + */ + ret = request_module("iscsi_target_mod"); + if (ret < 0) { + pr_err("request_module() failed for" + " iscsi_target_mod.ko: %d\n", ret); + return ERR_PTR(-EINVAL); + } + } else if (!strncmp(name, "loopback", 8)) { + /* + * Automatically load the tcm_loop fabric module when the + * following is called: + * + * mkdir -p $CONFIGFS/target/loopback + */ + ret = request_module("tcm_loop"); + if (ret < 0) { + pr_err("request_module() failed for" + " tcm_loop.ko: %d\n", ret); + return ERR_PTR(-EINVAL); + } } + + tf = target_core_get_fabric(name); } - tf = target_core_get_fabric(name); if (!tf) { pr_err("target_core_get_fabric() failed for %s\n", - name); + name); return ERR_PTR(-EINVAL); } pr_debug("Target_Core_ConfigFS: REGISTER -> Located fabric:" -- cgit v1.2.3 From 2ed37f6c9b69092afbbf2a990336f8cbd303947d Mon Sep 17 00:00:00 2001 From: Markus Elfring Date: Fri, 21 Nov 2014 10:25:45 +0100 Subject: target: Deletion of unnecessary checks before the function call "module_put" The module_put() function tests whether its argument is NULL and then returns immediately. Thus the test around the call is not needed. This issue was detected by using the Coccinelle software. Signed-off-by: Markus Elfring Signed-off-by: Nicholas Bellinger --- drivers/target/iscsi/iscsi_target_transport.c | 3 +-- drivers/target/target_core_hba.c | 6 ++---- 2 files changed, 3 insertions(+), 6 deletions(-) diff --git a/drivers/target/iscsi/iscsi_target_transport.c b/drivers/target/iscsi/iscsi_target_transport.c index 882728fac30c..08217d62fb0d 100644 --- a/drivers/target/iscsi/iscsi_target_transport.c +++ b/drivers/target/iscsi/iscsi_target_transport.c @@ -26,8 +26,7 @@ struct iscsit_transport *iscsit_get_transport(int type) void iscsit_put_transport(struct iscsit_transport *t) { - if (t->owner) - module_put(t->owner); + module_put(t->owner); } int iscsit_register_transport(struct iscsit_transport *t) diff --git a/drivers/target/target_core_hba.c b/drivers/target/target_core_hba.c index a25051a37dd7..e6e496ff9546 100644 --- a/drivers/target/target_core_hba.c +++ b/drivers/target/target_core_hba.c @@ -137,8 +137,7 @@ core_alloc_hba(const char *plugin_name, u32 plugin_dep_id, u32 hba_flags) return hba; out_module_put: - if (hba->transport->owner) - module_put(hba->transport->owner); + module_put(hba->transport->owner); hba->transport = NULL; out_free_hba: kfree(hba); @@ -159,8 +158,7 @@ core_delete_hba(struct se_hba *hba) pr_debug("CORE_HBA[%d] - Detached HBA from Generic Target" " Core\n", hba->hba_id); - if (hba->transport->owner) - module_put(hba->transport->owner); + module_put(hba->transport->owner); hba->transport = NULL; kfree(hba); -- cgit v1.2.3 From 73112edca9be1d7c37d0b94348c82c3742c3ef58 Mon Sep 17 00:00:00 2001 From: Nicholas Bellinger Date: Thu, 27 Nov 2014 13:59:20 -0800 Subject: target: Move dev_cit to struct se_subsystem_api This patch adds initial support for dev_cit as external config_item_type. This includes a new struct target_backend_cits to hold the external CITs within struct se_subsystem_api, and target_core_setup_sub_cits() to be used by backend drivers ahead of transport_subsystem_register(). It adds a TB_CIT_SETUP() helper following target_core_fabric_configfs.c to perform the config_item_type assignments. Also, drop left-over target_core_dev_cit from target_core_configfs.c code and update comments. Signed-off-by: Nicholas Bellinger --- drivers/target/target_core_configfs.c | 33 ++++++++++++++++++++++++--------- drivers/target/target_core_hba.c | 1 + include/target/target_core_backend.h | 9 +++++++++ 3 files changed, 34 insertions(+), 9 deletions(-) diff --git a/drivers/target/target_core_configfs.c b/drivers/target/target_core_configfs.c index 41498b696641..f7b2186ba57b 100644 --- a/drivers/target/target_core_configfs.c +++ b/drivers/target/target_core_configfs.c @@ -50,6 +50,19 @@ #include "target_core_rd.h" #include "target_core_xcopy.h" +#define TB_CIT_SETUP(_name, _item_ops, _group_ops, _attrs) \ +static void target_core_setup_##_name##_cit(struct se_subsystem_api *sa) \ +{ \ + struct target_backend_cits *tbc = &sa->tb_cits; \ + struct config_item_type *cit = &tbc->tb_##_name##_cit; \ + \ + cit->ct_item_ops = _item_ops; \ + cit->ct_group_ops = _group_ops; \ + cit->ct_attrs = _attrs; \ + cit->ct_owner = sa->owner; \ + pr_debug("Setup generic %s\n", __stringify(_name)); \ +} + extern struct t10_alua_lu_gp *default_lu_gp; static LIST_HEAD(g_tf_list); @@ -1470,7 +1483,7 @@ static struct config_item_type target_core_dev_pr_cit = { /* End functions for struct config_item_type target_core_dev_pr_cit */ -/* Start functions for struct config_item_type target_core_dev_cit */ +/* Start functions for struct config_item_type tb_dev_cit */ static ssize_t target_core_show_dev_info(void *p, char *page) { @@ -1934,7 +1947,7 @@ static struct target_core_configfs_attribute target_core_attr_dev_lba_map = { .store = target_core_store_dev_lba_map, }; -static struct configfs_attribute *lio_core_dev_attrs[] = { +static struct configfs_attribute *target_core_dev_attrs[] = { &target_core_attr_dev_info.attr, &target_core_attr_dev_control.attr, &target_core_attr_dev_alias.attr, @@ -1993,13 +2006,9 @@ static struct configfs_item_operations target_core_dev_item_ops = { .store_attribute = target_core_dev_store, }; -static struct config_item_type target_core_dev_cit = { - .ct_item_ops = &target_core_dev_item_ops, - .ct_attrs = lio_core_dev_attrs, - .ct_owner = THIS_MODULE, -}; +TB_CIT_SETUP(dev, &target_core_dev_item_ops, NULL, target_core_dev_attrs); -/* End functions for struct config_item_type target_core_dev_cit */ +/* End functions for struct config_item_type tb_dev_cit */ /* Start functions for struct config_item_type target_core_alua_lu_gp_cit */ @@ -2815,7 +2824,7 @@ static struct config_group *target_core_make_subdev( if (!dev_cg->default_groups) goto out_free_device; - config_group_init_type_name(dev_cg, name, &target_core_dev_cit); + config_group_init_type_name(dev_cg, name, &t->tb_cits.tb_dev_cit); config_group_init_type_name(&dev->dev_attrib.da_group, "attrib", &target_core_dev_attrib_cit); config_group_init_type_name(&dev->dev_pr_group, "pr", @@ -3119,6 +3128,12 @@ static struct config_item_type target_core_cit = { /* Stop functions for struct config_item_type target_core_hba_cit */ +void target_core_setup_sub_cits(struct se_subsystem_api *sa) +{ + target_core_setup_dev_cit(sa); +} +EXPORT_SYMBOL(target_core_setup_sub_cits); + static int __init target_core_init_configfs(void) { struct config_group *target_cg, *hba_cg = NULL, *alua_cg = NULL; diff --git a/drivers/target/target_core_hba.c b/drivers/target/target_core_hba.c index e6e496ff9546..ff95f95dcd13 100644 --- a/drivers/target/target_core_hba.c +++ b/drivers/target/target_core_hba.c @@ -36,6 +36,7 @@ #include #include #include +#include #include "target_core_internal.h" diff --git a/include/target/target_core_backend.h b/include/target/target_core_backend.h index 9adc1bca1178..7b84436541b4 100644 --- a/include/target/target_core_backend.h +++ b/include/target/target_core_backend.h @@ -5,6 +5,10 @@ #define TRANSPORT_PLUGIN_VHBA_PDEV 2 #define TRANSPORT_PLUGIN_VHBA_VDEV 3 +struct target_backend_cits { + struct config_item_type tb_dev_cit; +}; + struct se_subsystem_api { struct list_head sub_api_list; @@ -44,6 +48,8 @@ struct se_subsystem_api { int (*init_prot)(struct se_device *); int (*format_prot)(struct se_device *); void (*free_prot)(struct se_device *); + + struct target_backend_cits tb_cits; }; struct sbc_ops { @@ -96,4 +102,7 @@ sense_reason_t transport_generic_map_mem_to_cmd(struct se_cmd *, void array_free(void *array, int n); +/* From target_core_configfs.c to setup default backend config_item_types */ +void target_core_setup_sub_cits(struct se_subsystem_api *); + #endif /* TARGET_CORE_BACKEND_H */ -- cgit v1.2.3 From f79a897efa6a95d698c4064e48e0a5afddc0ad9f Mon Sep 17 00:00:00 2001 From: Nicholas Bellinger Date: Thu, 27 Nov 2014 14:51:14 -0800 Subject: target: Move dev_attrib_cit to struct se_subsystem_api This patch adds support for dev_attrib_cit as an external config_item_type using TB_CIT_SETUP() helper macro, and sets both ct_item_ops + ct_attr following existing code. It updates target_core_make_subdev() + target_core_setup_dev_attrib_cit() + struct target_backend_cits, and drops left-over target_core_dev_attrib_cit from target_core_configfs.c code and update comments. This patch introduces no functional change from existing code. Signed-off-by: Nicholas Bellinger --- drivers/target/target_core_configfs.c | 14 ++++++-------- include/target/target_core_backend.h | 1 + 2 files changed, 7 insertions(+), 8 deletions(-) diff --git a/drivers/target/target_core_configfs.c b/drivers/target/target_core_configfs.c index f7b2186ba57b..78ed857cc7b9 100644 --- a/drivers/target/target_core_configfs.c +++ b/drivers/target/target_core_configfs.c @@ -584,7 +584,7 @@ EXPORT_SYMBOL(target_fabric_configfs_deregister); // Stop functions called by external Target Fabrics Modules //############################################################################*/ -/* Start functions for struct config_item_type target_core_dev_attrib_cit */ +/* Start functions for struct config_item_type tb_dev_attrib_cit */ #define DEF_DEV_ATTRIB_SHOW(_name) \ static ssize_t target_core_dev_show_attr_##_name( \ @@ -767,13 +767,10 @@ static struct configfs_item_operations target_core_dev_attrib_ops = { .store_attribute = target_core_dev_attrib_attr_store, }; -static struct config_item_type target_core_dev_attrib_cit = { - .ct_item_ops = &target_core_dev_attrib_ops, - .ct_attrs = target_core_dev_attrib_attrs, - .ct_owner = THIS_MODULE, -}; +TB_CIT_SETUP(dev_attrib, &target_core_dev_attrib_ops, NULL, + target_core_dev_attrib_attrs); -/* End functions for struct config_item_type target_core_dev_attrib_cit */ +/* End functions for struct config_item_type tb_dev_attrib_cit */ /* Start functions for struct config_item_type target_core_dev_wwn_cit */ @@ -2826,7 +2823,7 @@ static struct config_group *target_core_make_subdev( config_group_init_type_name(dev_cg, name, &t->tb_cits.tb_dev_cit); config_group_init_type_name(&dev->dev_attrib.da_group, "attrib", - &target_core_dev_attrib_cit); + &t->tb_cits.tb_dev_attrib_cit); config_group_init_type_name(&dev->dev_pr_group, "pr", &target_core_dev_pr_cit); config_group_init_type_name(&dev->t10_wwn.t10_wwn_group, "wwn", @@ -3131,6 +3128,7 @@ static struct config_item_type target_core_cit = { void target_core_setup_sub_cits(struct se_subsystem_api *sa) { target_core_setup_dev_cit(sa); + target_core_setup_dev_attrib_cit(sa); } EXPORT_SYMBOL(target_core_setup_sub_cits); diff --git a/include/target/target_core_backend.h b/include/target/target_core_backend.h index 7b84436541b4..735f85672ec6 100644 --- a/include/target/target_core_backend.h +++ b/include/target/target_core_backend.h @@ -7,6 +7,7 @@ struct target_backend_cits { struct config_item_type tb_dev_cit; + struct config_item_type tb_dev_attrib_cit; }; struct se_subsystem_api { -- cgit v1.2.3 From 91e2e39ba89cd627beddafea2a760684adcec89d Mon Sep 17 00:00:00 2001 From: Nicholas Bellinger Date: Thu, 27 Nov 2014 14:57:01 -0800 Subject: target: Move dev_pr_cit to struct se_subsystem_api This patch adds support for dev_pr_cit as an external config_item_type using TB_CIT_SETUP() helper macro, and sets both ct_item_ops + ct_attr following existing code. It updates target_core_make_subdev() + target_core_setup_dev_attrib_cit() + struct target_backend_cits, and drops left-over target_core_dev_pr_cit from target_core_configfs.c code and update comments. This patch introduces no functional change from existing code. Signed-off-by: Nicholas Bellinger --- drivers/target/target_core_configfs.c | 13 +++++-------- include/target/target_core_backend.h | 1 + 2 files changed, 6 insertions(+), 8 deletions(-) diff --git a/drivers/target/target_core_configfs.c b/drivers/target/target_core_configfs.c index 78ed857cc7b9..49d51ba40fca 100644 --- a/drivers/target/target_core_configfs.c +++ b/drivers/target/target_core_configfs.c @@ -1011,7 +1011,7 @@ static struct config_item_type target_core_dev_wwn_cit = { /* End functions for struct config_item_type target_core_dev_wwn_cit */ -/* Start functions for struct config_item_type target_core_dev_pr_cit */ +/* Start functions for struct config_item_type tb_dev_pr_cit */ CONFIGFS_EATTR_STRUCT(target_core_dev_pr, se_device); #define SE_DEV_PR_ATTR(_name, _mode) \ @@ -1472,13 +1472,9 @@ static struct configfs_item_operations target_core_dev_pr_ops = { .store_attribute = target_core_dev_pr_attr_store, }; -static struct config_item_type target_core_dev_pr_cit = { - .ct_item_ops = &target_core_dev_pr_ops, - .ct_attrs = target_core_dev_pr_attrs, - .ct_owner = THIS_MODULE, -}; +TB_CIT_SETUP(dev_pr, &target_core_dev_pr_ops, NULL, target_core_dev_pr_attrs); -/* End functions for struct config_item_type target_core_dev_pr_cit */ +/* End functions for struct config_item_type tb_dev_pr_cit */ /* Start functions for struct config_item_type tb_dev_cit */ @@ -2825,7 +2821,7 @@ static struct config_group *target_core_make_subdev( config_group_init_type_name(&dev->dev_attrib.da_group, "attrib", &t->tb_cits.tb_dev_attrib_cit); config_group_init_type_name(&dev->dev_pr_group, "pr", - &target_core_dev_pr_cit); + &t->tb_cits.tb_dev_pr_cit); config_group_init_type_name(&dev->t10_wwn.t10_wwn_group, "wwn", &target_core_dev_wwn_cit); config_group_init_type_name(&dev->t10_alua.alua_tg_pt_gps_group, @@ -3129,6 +3125,7 @@ void target_core_setup_sub_cits(struct se_subsystem_api *sa) { target_core_setup_dev_cit(sa); target_core_setup_dev_attrib_cit(sa); + target_core_setup_dev_pr_cit(sa); } EXPORT_SYMBOL(target_core_setup_sub_cits); diff --git a/include/target/target_core_backend.h b/include/target/target_core_backend.h index 735f85672ec6..a47e78989b6f 100644 --- a/include/target/target_core_backend.h +++ b/include/target/target_core_backend.h @@ -8,6 +8,7 @@ struct target_backend_cits { struct config_item_type tb_dev_cit; struct config_item_type tb_dev_attrib_cit; + struct config_item_type tb_dev_pr_cit; }; struct se_subsystem_api { -- cgit v1.2.3 From f8d389c6f6074aa382963ae13f9d77d99ec3e92d Mon Sep 17 00:00:00 2001 From: Nicholas Bellinger Date: Thu, 27 Nov 2014 15:01:12 -0800 Subject: target: Move dev_wwn_cit to struct se_subsystem_api This patch adds support for dev_wwn_cit as an external config_item_type using TB_CIT_SETUP() helper macro, and sets both ct_item_ops + ct_attr following existing code. It updates target_core_make_subdev() + target_core_setup_dev_attrib_cit() + struct target_backend_cits, and drops left-over target_core_dev_wwn_cit from target_core_configfs.c code and update comments. This patch introduces no functional change from existing code. Signed-off-by: Nicholas Bellinger --- drivers/target/target_core_configfs.c | 13 +++++-------- include/target/target_core_backend.h | 1 + 2 files changed, 6 insertions(+), 8 deletions(-) diff --git a/drivers/target/target_core_configfs.c b/drivers/target/target_core_configfs.c index 49d51ba40fca..43d180e03931 100644 --- a/drivers/target/target_core_configfs.c +++ b/drivers/target/target_core_configfs.c @@ -772,7 +772,7 @@ TB_CIT_SETUP(dev_attrib, &target_core_dev_attrib_ops, NULL, /* End functions for struct config_item_type tb_dev_attrib_cit */ -/* Start functions for struct config_item_type target_core_dev_wwn_cit */ +/* Start functions for struct config_item_type tb_dev_wwn_cit */ CONFIGFS_EATTR_STRUCT(target_core_dev_wwn, t10_wwn); #define SE_DEV_WWN_ATTR(_name, _mode) \ @@ -1003,13 +1003,9 @@ static struct configfs_item_operations target_core_dev_wwn_ops = { .store_attribute = target_core_dev_wwn_attr_store, }; -static struct config_item_type target_core_dev_wwn_cit = { - .ct_item_ops = &target_core_dev_wwn_ops, - .ct_attrs = target_core_dev_wwn_attrs, - .ct_owner = THIS_MODULE, -}; +TB_CIT_SETUP(dev_wwn, &target_core_dev_wwn_ops, NULL, target_core_dev_wwn_attrs); -/* End functions for struct config_item_type target_core_dev_wwn_cit */ +/* End functions for struct config_item_type tb_dev_wwn_cit */ /* Start functions for struct config_item_type tb_dev_pr_cit */ @@ -2823,7 +2819,7 @@ static struct config_group *target_core_make_subdev( config_group_init_type_name(&dev->dev_pr_group, "pr", &t->tb_cits.tb_dev_pr_cit); config_group_init_type_name(&dev->t10_wwn.t10_wwn_group, "wwn", - &target_core_dev_wwn_cit); + &t->tb_cits.tb_dev_wwn_cit); config_group_init_type_name(&dev->t10_alua.alua_tg_pt_gps_group, "alua", &target_core_alua_tg_pt_gps_cit); config_group_init_type_name(&dev->dev_stat_grps.stat_group, @@ -3126,6 +3122,7 @@ void target_core_setup_sub_cits(struct se_subsystem_api *sa) target_core_setup_dev_cit(sa); target_core_setup_dev_attrib_cit(sa); target_core_setup_dev_pr_cit(sa); + target_core_setup_dev_wwn_cit(sa); } EXPORT_SYMBOL(target_core_setup_sub_cits); diff --git a/include/target/target_core_backend.h b/include/target/target_core_backend.h index a47e78989b6f..68fad7814982 100644 --- a/include/target/target_core_backend.h +++ b/include/target/target_core_backend.h @@ -9,6 +9,7 @@ struct target_backend_cits { struct config_item_type tb_dev_cit; struct config_item_type tb_dev_attrib_cit; struct config_item_type tb_dev_pr_cit; + struct config_item_type tb_dev_wwn_cit; }; struct se_subsystem_api { -- cgit v1.2.3 From 72aca57bd3fef535972b64c9cd76a088107c589c Mon Sep 17 00:00:00 2001 From: Nicholas Bellinger Date: Thu, 27 Nov 2014 15:06:23 -0800 Subject: target: Move dev_alua_tg_pt_gps_cit to struct se_subsystem_api This patch adds support for dev_alua_tg_pt_gps_cit as an external config_item_type using TB_CIT_SETUP() helper macro, and sets only ct_group_ops following existing code. It updates target_core_make_subdev() + target_core_setup_dev_attrib_cit() + struct target_backend_cits, and drops left-over target_core_dev_alua_tg_pt_gps_cit from target_core_configfs.c code and update comments. This patch introduces no functional change from existing code. Signed-off-by: Nicholas Bellinger --- drivers/target/target_core_configfs.c | 12 +++++------- include/target/target_core_backend.h | 1 + 2 files changed, 6 insertions(+), 7 deletions(-) diff --git a/drivers/target/target_core_configfs.c b/drivers/target/target_core_configfs.c index 43d180e03931..0cf41ad54bbf 100644 --- a/drivers/target/target_core_configfs.c +++ b/drivers/target/target_core_configfs.c @@ -2677,7 +2677,7 @@ static struct config_item_type target_core_alua_tg_pt_gp_cit = { /* End functions for struct config_item_type target_core_alua_tg_pt_gp_cit */ -/* Start functions for struct config_item_type target_core_alua_tg_pt_gps_cit */ +/* Start functions for struct config_item_type tb_alua_tg_pt_gps_cit */ static struct config_group *target_core_alua_create_tg_pt_gp( struct config_group *group, @@ -2728,12 +2728,9 @@ static struct configfs_group_operations target_core_alua_tg_pt_gps_group_ops = { .drop_item = &target_core_alua_drop_tg_pt_gp, }; -static struct config_item_type target_core_alua_tg_pt_gps_cit = { - .ct_group_ops = &target_core_alua_tg_pt_gps_group_ops, - .ct_owner = THIS_MODULE, -}; +TB_CIT_SETUP(dev_alua_tg_pt_gps, NULL, &target_core_alua_tg_pt_gps_group_ops, NULL); -/* End functions for struct config_item_type target_core_alua_tg_pt_gps_cit */ +/* End functions for struct config_item_type tb_alua_tg_pt_gps_cit */ /* Start functions for struct config_item_type target_core_alua_cit */ @@ -2821,7 +2818,7 @@ static struct config_group *target_core_make_subdev( config_group_init_type_name(&dev->t10_wwn.t10_wwn_group, "wwn", &t->tb_cits.tb_dev_wwn_cit); config_group_init_type_name(&dev->t10_alua.alua_tg_pt_gps_group, - "alua", &target_core_alua_tg_pt_gps_cit); + "alua", &t->tb_cits.tb_dev_alua_tg_pt_gps_cit); config_group_init_type_name(&dev->dev_stat_grps.stat_group, "statistics", &target_core_stat_cit); @@ -3123,6 +3120,7 @@ void target_core_setup_sub_cits(struct se_subsystem_api *sa) target_core_setup_dev_attrib_cit(sa); target_core_setup_dev_pr_cit(sa); target_core_setup_dev_wwn_cit(sa); + target_core_setup_dev_alua_tg_pt_gps_cit(sa); } EXPORT_SYMBOL(target_core_setup_sub_cits); diff --git a/include/target/target_core_backend.h b/include/target/target_core_backend.h index 68fad7814982..157d173f28fb 100644 --- a/include/target/target_core_backend.h +++ b/include/target/target_core_backend.h @@ -10,6 +10,7 @@ struct target_backend_cits { struct config_item_type tb_dev_attrib_cit; struct config_item_type tb_dev_pr_cit; struct config_item_type tb_dev_wwn_cit; + struct config_item_type tb_dev_alua_tg_pt_gps_cit; }; struct se_subsystem_api { -- cgit v1.2.3 From d23ab570bcb1de0256f1dcea0b8ad3af8e534e40 Mon Sep 17 00:00:00 2001 From: Nicholas Bellinger Date: Thu, 27 Nov 2014 15:09:32 -0800 Subject: target: Move dev_stat_cit to struct se_subsystem_api This patch adds support for dev_stat_cit as an external config_item_type using TB_CIT_SETUP() helper macro, and sets only ct_group_ops following existing code. It updates target_core_make_subdev() + target_core_setup_dev_attrib_cit() + struct target_backend_cits, and drops left-over target_core_dev_stat_cit from target_core_configfs.c code and update comments. This patch introduces no functional change from existing code. Signed-off-by: Nicholas Bellinger --- drivers/target/target_core_configfs.c | 12 +++++------- include/target/target_core_backend.h | 1 + 2 files changed, 6 insertions(+), 7 deletions(-) diff --git a/drivers/target/target_core_configfs.c b/drivers/target/target_core_configfs.c index 0cf41ad54bbf..2903d8931625 100644 --- a/drivers/target/target_core_configfs.c +++ b/drivers/target/target_core_configfs.c @@ -2748,7 +2748,7 @@ static struct config_item_type target_core_alua_cit = { /* End functions for struct config_item_type target_core_alua_cit */ -/* Start functions for struct config_item_type target_core_stat_cit */ +/* Start functions for struct config_item_type tb_dev_stat_cit */ static struct config_group *target_core_stat_mkdir( struct config_group *group, @@ -2769,12 +2769,9 @@ static struct configfs_group_operations target_core_stat_group_ops = { .drop_item = &target_core_stat_rmdir, }; -static struct config_item_type target_core_stat_cit = { - .ct_group_ops = &target_core_stat_group_ops, - .ct_owner = THIS_MODULE, -}; +TB_CIT_SETUP(dev_stat, NULL, &target_core_stat_group_ops, NULL); -/* End functions for struct config_item_type target_core_stat_cit */ +/* End functions for struct config_item_type tb_dev_stat_cit */ /* Start functions for struct config_item_type target_core_hba_cit */ @@ -2820,7 +2817,7 @@ static struct config_group *target_core_make_subdev( config_group_init_type_name(&dev->t10_alua.alua_tg_pt_gps_group, "alua", &t->tb_cits.tb_dev_alua_tg_pt_gps_cit); config_group_init_type_name(&dev->dev_stat_grps.stat_group, - "statistics", &target_core_stat_cit); + "statistics", &t->tb_cits.tb_dev_stat_cit); dev_cg->default_groups[0] = &dev->dev_attrib.da_group; dev_cg->default_groups[1] = &dev->dev_pr_group; @@ -3121,6 +3118,7 @@ void target_core_setup_sub_cits(struct se_subsystem_api *sa) target_core_setup_dev_pr_cit(sa); target_core_setup_dev_wwn_cit(sa); target_core_setup_dev_alua_tg_pt_gps_cit(sa); + target_core_setup_dev_stat_cit(sa); } EXPORT_SYMBOL(target_core_setup_sub_cits); diff --git a/include/target/target_core_backend.h b/include/target/target_core_backend.h index 157d173f28fb..83a3726eabc5 100644 --- a/include/target/target_core_backend.h +++ b/include/target/target_core_backend.h @@ -11,6 +11,7 @@ struct target_backend_cits { struct config_item_type tb_dev_pr_cit; struct config_item_type tb_dev_wwn_cit; struct config_item_type tb_dev_alua_tg_pt_gps_cit; + struct config_item_type tb_dev_stat_cit; }; struct se_subsystem_api { -- cgit v1.2.3 From 7a23f890b7c11b63dfc2a6c7ae1f0a631ed84865 Mon Sep 17 00:00:00 2001 From: Nicholas Bellinger Date: Thu, 27 Nov 2014 18:57:27 -0800 Subject: target: Add target_core_backend_configfs.h helper macros This patch adds a number of configfs e-attr macros following what existing target_core_configfs.c code does for internal target_backend_dev_attrib setup, and similar to how target fabric drivers allow for external config_item_type + cit->ct_attrs. assignment. This is useful for backend drivers like PSCSI who need to only expose a small subset of device attributes, while still retaining a default list of attributes for other backend drivers like IBLOCK, FILEIO, RAMDISK, and TCMU. Signed-off-by: Nicholas Bellinger --- include/target/target_core_backend_configfs.h | 53 +++++++++++++++++++++++++++ 1 file changed, 53 insertions(+) create mode 100644 include/target/target_core_backend_configfs.h diff --git a/include/target/target_core_backend_configfs.h b/include/target/target_core_backend_configfs.h new file mode 100644 index 000000000000..f91935b5ef24 --- /dev/null +++ b/include/target/target_core_backend_configfs.h @@ -0,0 +1,53 @@ +#ifndef TARGET_CORE_BACKEND_CONFIGFS_H +#define TARGET_CORE_BACKEND_CONFIGFS_H + +#include + +#define DEF_TB_DEV_ATTRIB_SHOW(_backend, _name) \ +static ssize_t _backend##_dev_show_attr_##_name( \ + struct se_dev_attrib *da, \ + char *page) \ +{ \ + return snprintf(page, PAGE_SIZE, "%u\n", \ + (u32)da->da_dev->dev_attrib._name); \ +} + +#define DEF_TB_DEV_ATTRIB_STORE(_backend, _name) \ +static ssize_t _backend##_dev_store_attr_##_name( \ + struct se_dev_attrib *da, \ + const char *page, \ + size_t count) \ +{ \ + unsigned long val; \ + int ret; \ + \ + ret = kstrtoul(page, 0, &val); \ + if (ret < 0) { \ + pr_err("kstrtoul() failed with ret: %d\n", ret); \ + return -EINVAL; \ + } \ + ret = se_dev_set_##_name(da->da_dev, (u32)val); \ + \ + return (!ret) ? count : -EINVAL; \ +} + +#define DEF_TB_DEV_ATTRIB(_backend, _name) \ +DEF_TB_DEV_ATTRIB_SHOW(_backend, _name); \ +DEF_TB_DEV_ATTRIB_STORE(_backend, _name); + +#define DEF_TB_DEV_ATTRIB_RO(_backend, name) \ +DEF_TB_DEV_ATTRIB_SHOW(_backend, name); + +CONFIGFS_EATTR_STRUCT(target_backend_dev_attrib, se_dev_attrib); +#define TB_DEV_ATTR(_backend, _name, _mode) \ +static struct target_backend_dev_attrib_attribute _backend##_dev_attrib_##_name = \ + __CONFIGFS_EATTR(_name, _mode, \ + _backend##_dev_show_attr_##_name, \ + _backend##_dev_store_attr_##_name); + +#define TB_DEV_ATTR_RO(_backend, _name) \ +static struct target_backend_dev_attrib_attribute _backend##_dev_attrib_##_name = \ + __CONFIGFS_EATTR_RO(_name, \ + _backend##_dev_show_attr_##_name); + +#endif /* TARGET_CORE_BACKEND_CONFIGFS_H */ -- cgit v1.2.3 From d30cd1238c2f89662c82d5d2c4686971a6dc3693 Mon Sep 17 00:00:00 2001 From: Nicholas Bellinger Date: Thu, 27 Nov 2014 19:15:48 -0800 Subject: target: Add EXPORT_SYMBOL for existing se_dev_set_* Now that target_core_backend_configfs.h macros will be using these se_dev_set attribute functions externally to allow backend drivers to populate different attributes, go ahead and add EXPORT_SYMBOL() for the existing default set of 30 device attributes. Also update target_core_backend.h with proper function prototypes. Signed-off-by: Nicholas Bellinger --- drivers/target/target_core_device.c | 26 ++++++++++++++++++++++++++ drivers/target/target_core_iblock.c | 1 + drivers/target/target_core_internal.h | 28 ---------------------------- include/target/target_core_backend.h | 29 +++++++++++++++++++++++++++++ 4 files changed, 56 insertions(+), 28 deletions(-) diff --git a/drivers/target/target_core_device.c b/drivers/target/target_core_device.c index c45f9e907e44..953fff79bc14 100644 --- a/drivers/target/target_core_device.c +++ b/drivers/target/target_core_device.c @@ -659,6 +659,7 @@ int se_dev_set_max_unmap_lba_count( dev, dev->dev_attrib.max_unmap_lba_count); return 0; } +EXPORT_SYMBOL(se_dev_set_max_unmap_lba_count); int se_dev_set_max_unmap_block_desc_count( struct se_device *dev, @@ -670,6 +671,7 @@ int se_dev_set_max_unmap_block_desc_count( dev, dev->dev_attrib.max_unmap_block_desc_count); return 0; } +EXPORT_SYMBOL(se_dev_set_max_unmap_block_desc_count); int se_dev_set_unmap_granularity( struct se_device *dev, @@ -680,6 +682,7 @@ int se_dev_set_unmap_granularity( dev, dev->dev_attrib.unmap_granularity); return 0; } +EXPORT_SYMBOL(se_dev_set_unmap_granularity); int se_dev_set_unmap_granularity_alignment( struct se_device *dev, @@ -690,6 +693,7 @@ int se_dev_set_unmap_granularity_alignment( dev, dev->dev_attrib.unmap_granularity_alignment); return 0; } +EXPORT_SYMBOL(se_dev_set_unmap_granularity_alignment); int se_dev_set_max_write_same_len( struct se_device *dev, @@ -700,6 +704,7 @@ int se_dev_set_max_write_same_len( dev, dev->dev_attrib.max_write_same_len); return 0; } +EXPORT_SYMBOL(se_dev_set_max_write_same_len); static void dev_set_t10_wwn_model_alias(struct se_device *dev) { @@ -738,6 +743,7 @@ int se_dev_set_emulate_model_alias(struct se_device *dev, int flag) return 0; } +EXPORT_SYMBOL(se_dev_set_emulate_model_alias); int se_dev_set_emulate_dpo(struct se_device *dev, int flag) { @@ -753,6 +759,7 @@ int se_dev_set_emulate_dpo(struct se_device *dev, int flag) return 0; } +EXPORT_SYMBOL(se_dev_set_emulate_dpo); int se_dev_set_emulate_fua_write(struct se_device *dev, int flag) { @@ -771,6 +778,7 @@ int se_dev_set_emulate_fua_write(struct se_device *dev, int flag) dev, dev->dev_attrib.emulate_fua_write); return 0; } +EXPORT_SYMBOL(se_dev_set_emulate_fua_write); int se_dev_set_emulate_fua_read(struct se_device *dev, int flag) { @@ -786,6 +794,7 @@ int se_dev_set_emulate_fua_read(struct se_device *dev, int flag) return 0; } +EXPORT_SYMBOL(se_dev_set_emulate_fua_read); int se_dev_set_emulate_write_cache(struct se_device *dev, int flag) { @@ -809,6 +818,7 @@ int se_dev_set_emulate_write_cache(struct se_device *dev, int flag) dev, dev->dev_attrib.emulate_write_cache); return 0; } +EXPORT_SYMBOL(se_dev_set_emulate_write_cache); int se_dev_set_emulate_ua_intlck_ctrl(struct se_device *dev, int flag) { @@ -829,6 +839,7 @@ int se_dev_set_emulate_ua_intlck_ctrl(struct se_device *dev, int flag) return 0; } +EXPORT_SYMBOL(se_dev_set_emulate_ua_intlck_ctrl); int se_dev_set_emulate_tas(struct se_device *dev, int flag) { @@ -849,6 +860,7 @@ int se_dev_set_emulate_tas(struct se_device *dev, int flag) return 0; } +EXPORT_SYMBOL(se_dev_set_emulate_tas); int se_dev_set_emulate_tpu(struct se_device *dev, int flag) { @@ -870,6 +882,7 @@ int se_dev_set_emulate_tpu(struct se_device *dev, int flag) dev, flag); return 0; } +EXPORT_SYMBOL(se_dev_set_emulate_tpu); int se_dev_set_emulate_tpws(struct se_device *dev, int flag) { @@ -891,6 +904,7 @@ int se_dev_set_emulate_tpws(struct se_device *dev, int flag) dev, flag); return 0; } +EXPORT_SYMBOL(se_dev_set_emulate_tpws); int se_dev_set_emulate_caw(struct se_device *dev, int flag) { @@ -904,6 +918,7 @@ int se_dev_set_emulate_caw(struct se_device *dev, int flag) return 0; } +EXPORT_SYMBOL(se_dev_set_emulate_caw); int se_dev_set_emulate_3pc(struct se_device *dev, int flag) { @@ -917,6 +932,7 @@ int se_dev_set_emulate_3pc(struct se_device *dev, int flag) return 0; } +EXPORT_SYMBOL(se_dev_set_emulate_3pc); int se_dev_set_pi_prot_type(struct se_device *dev, int flag) { @@ -970,6 +986,7 @@ int se_dev_set_pi_prot_type(struct se_device *dev, int flag) return 0; } +EXPORT_SYMBOL(se_dev_set_pi_prot_type); int se_dev_set_pi_prot_format(struct se_device *dev, int flag) { @@ -1005,6 +1022,7 @@ int se_dev_set_pi_prot_format(struct se_device *dev, int flag) return 0; } +EXPORT_SYMBOL(se_dev_set_pi_prot_format); int se_dev_set_enforce_pr_isids(struct se_device *dev, int flag) { @@ -1017,6 +1035,7 @@ int se_dev_set_enforce_pr_isids(struct se_device *dev, int flag) (dev->dev_attrib.enforce_pr_isids) ? "Enabled" : "Disabled"); return 0; } +EXPORT_SYMBOL(se_dev_set_enforce_pr_isids); int se_dev_set_force_pr_aptpl(struct se_device *dev, int flag) { @@ -1034,6 +1053,7 @@ int se_dev_set_force_pr_aptpl(struct se_device *dev, int flag) pr_debug("dev[%p]: SE Device force_pr_aptpl: %d\n", dev, flag); return 0; } +EXPORT_SYMBOL(se_dev_set_force_pr_aptpl); int se_dev_set_is_nonrot(struct se_device *dev, int flag) { @@ -1046,6 +1066,7 @@ int se_dev_set_is_nonrot(struct se_device *dev, int flag) dev, flag); return 0; } +EXPORT_SYMBOL(se_dev_set_is_nonrot); int se_dev_set_emulate_rest_reord(struct se_device *dev, int flag) { @@ -1058,6 +1079,7 @@ int se_dev_set_emulate_rest_reord(struct se_device *dev, int flag) pr_debug("dev[%p]: SE Device emulate_rest_reord: %d\n", dev, flag); return 0; } +EXPORT_SYMBOL(se_dev_set_emulate_rest_reord); /* * Note, this can only be called on unexported SE Device Object. @@ -1101,6 +1123,7 @@ int se_dev_set_queue_depth(struct se_device *dev, u32 queue_depth) dev, queue_depth); return 0; } +EXPORT_SYMBOL(se_dev_set_queue_depth); int se_dev_set_fabric_max_sectors(struct se_device *dev, u32 fabric_max_sectors) { @@ -1155,6 +1178,7 @@ int se_dev_set_fabric_max_sectors(struct se_device *dev, u32 fabric_max_sectors) dev, fabric_max_sectors); return 0; } +EXPORT_SYMBOL(se_dev_set_fabric_max_sectors); int se_dev_set_optimal_sectors(struct se_device *dev, u32 optimal_sectors) { @@ -1181,6 +1205,7 @@ int se_dev_set_optimal_sectors(struct se_device *dev, u32 optimal_sectors) dev, optimal_sectors); return 0; } +EXPORT_SYMBOL(se_dev_set_optimal_sectors); int se_dev_set_block_size(struct se_device *dev, u32 block_size) { @@ -1218,6 +1243,7 @@ int se_dev_set_block_size(struct se_device *dev, u32 block_size) return 0; } +EXPORT_SYMBOL(se_dev_set_block_size); struct se_lun *core_dev_add_lun( struct se_portal_group *tpg, diff --git a/drivers/target/target_core_iblock.c b/drivers/target/target_core_iblock.c index 7e6b857c6b3f..5e69d36af105 100644 --- a/drivers/target/target_core_iblock.c +++ b/drivers/target/target_core_iblock.c @@ -858,6 +858,7 @@ static bool iblock_get_write_cache(struct se_device *dev) return q->flush_flags & REQ_FLUSH; } + static struct se_subsystem_api iblock_template = { .name = "iblock", .inquiry_prod = "IBLOCK", diff --git a/drivers/target/target_core_internal.h b/drivers/target/target_core_internal.h index e31f42f369ff..60381db90026 100644 --- a/drivers/target/target_core_internal.h +++ b/drivers/target/target_core_internal.h @@ -18,34 +18,6 @@ int core_dev_export(struct se_device *, struct se_portal_group *, struct se_lun *); void core_dev_unexport(struct se_device *, struct se_portal_group *, struct se_lun *); -int se_dev_set_task_timeout(struct se_device *, u32); -int se_dev_set_max_unmap_lba_count(struct se_device *, u32); -int se_dev_set_max_unmap_block_desc_count(struct se_device *, u32); -int se_dev_set_unmap_granularity(struct se_device *, u32); -int se_dev_set_unmap_granularity_alignment(struct se_device *, u32); -int se_dev_set_max_write_same_len(struct se_device *, u32); -int se_dev_set_emulate_model_alias(struct se_device *, int); -int se_dev_set_emulate_dpo(struct se_device *, int); -int se_dev_set_emulate_fua_write(struct se_device *, int); -int se_dev_set_emulate_fua_read(struct se_device *, int); -int se_dev_set_emulate_write_cache(struct se_device *, int); -int se_dev_set_emulate_ua_intlck_ctrl(struct se_device *, int); -int se_dev_set_emulate_tas(struct se_device *, int); -int se_dev_set_emulate_tpu(struct se_device *, int); -int se_dev_set_emulate_tpws(struct se_device *, int); -int se_dev_set_emulate_caw(struct se_device *, int); -int se_dev_set_emulate_3pc(struct se_device *, int); -int se_dev_set_pi_prot_type(struct se_device *, int); -int se_dev_set_pi_prot_format(struct se_device *, int); -int se_dev_set_enforce_pr_isids(struct se_device *, int); -int se_dev_set_force_pr_aptpl(struct se_device *, int); -int se_dev_set_is_nonrot(struct se_device *, int); -int se_dev_set_emulate_rest_reord(struct se_device *dev, int); -int se_dev_set_queue_depth(struct se_device *, u32); -int se_dev_set_max_sectors(struct se_device *, u32); -int se_dev_set_fabric_max_sectors(struct se_device *, u32); -int se_dev_set_optimal_sectors(struct se_device *, u32); -int se_dev_set_block_size(struct se_device *, u32); struct se_lun *core_dev_add_lun(struct se_portal_group *, struct se_device *, u32); void core_dev_del_lun(struct se_portal_group *, struct se_lun *); struct se_lun *core_get_lun_from_tpg(struct se_portal_group *, u32); diff --git a/include/target/target_core_backend.h b/include/target/target_core_backend.h index 83a3726eabc5..430cfaf92285 100644 --- a/include/target/target_core_backend.h +++ b/include/target/target_core_backend.h @@ -110,4 +110,33 @@ void array_free(void *array, int n); /* From target_core_configfs.c to setup default backend config_item_types */ void target_core_setup_sub_cits(struct se_subsystem_api *); +/* attribute helpers from target_core_device.c for backend drivers */ +int se_dev_set_max_unmap_lba_count(struct se_device *, u32); +int se_dev_set_max_unmap_block_desc_count(struct se_device *, u32); +int se_dev_set_unmap_granularity(struct se_device *, u32); +int se_dev_set_unmap_granularity_alignment(struct se_device *, u32); +int se_dev_set_max_write_same_len(struct se_device *, u32); +int se_dev_set_emulate_model_alias(struct se_device *, int); +int se_dev_set_emulate_dpo(struct se_device *, int); +int se_dev_set_emulate_fua_write(struct se_device *, int); +int se_dev_set_emulate_fua_read(struct se_device *, int); +int se_dev_set_emulate_write_cache(struct se_device *, int); +int se_dev_set_emulate_ua_intlck_ctrl(struct se_device *, int); +int se_dev_set_emulate_tas(struct se_device *, int); +int se_dev_set_emulate_tpu(struct se_device *, int); +int se_dev_set_emulate_tpws(struct se_device *, int); +int se_dev_set_emulate_caw(struct se_device *, int); +int se_dev_set_emulate_3pc(struct se_device *, int); +int se_dev_set_pi_prot_type(struct se_device *, int); +int se_dev_set_pi_prot_format(struct se_device *, int); +int se_dev_set_enforce_pr_isids(struct se_device *, int); +int se_dev_set_force_pr_aptpl(struct se_device *, int); +int se_dev_set_is_nonrot(struct se_device *, int); +int se_dev_set_emulate_rest_reord(struct se_device *dev, int); +int se_dev_set_queue_depth(struct se_device *, u32); +int se_dev_set_max_sectors(struct se_device *, u32); +int se_dev_set_fabric_max_sectors(struct se_device *, u32); +int se_dev_set_optimal_sectors(struct se_device *, u32); +int se_dev_set_block_size(struct se_device *, u32); + #endif /* TARGET_CORE_BACKEND_H */ -- cgit v1.2.3 From e6c39f703a878b8d9427ca7b4dc8f97874d12ae6 Mon Sep 17 00:00:00 2001 From: Nicholas Bellinger Date: Thu, 27 Nov 2014 19:49:36 -0800 Subject: target: Add DEF_TB_DEFAULT_ATTRIBS macro for virtual device attrs This helper macro adds the default set of 30 device attributes for virtual devices from existing target_core_configfs.c code, and moves the definitions into a single macro to create the structs necessary for backend drivers. It allows them to populate their own external struct configfs_attribute for se_subsystem_api->tb_cits.tb_dev_attrib_cit.ct_attrs. Signed-off-by: Nicholas Bellinger --- include/target/target_core_backend_configfs.h | 67 +++++++++++++++++++++++++++ 1 file changed, 67 insertions(+) diff --git a/include/target/target_core_backend_configfs.h b/include/target/target_core_backend_configfs.h index f91935b5ef24..3247d7530107 100644 --- a/include/target/target_core_backend_configfs.h +++ b/include/target/target_core_backend_configfs.h @@ -50,4 +50,71 @@ static struct target_backend_dev_attrib_attribute _backend##_dev_attrib_##_name __CONFIGFS_EATTR_RO(_name, \ _backend##_dev_show_attr_##_name); +/* + * Default list of target backend device attributes as defined by + * struct se_dev_attrib + */ + +#define DEF_TB_DEFAULT_ATTRIBS(_backend) \ + DEF_TB_DEV_ATTRIB(_backend, emulate_model_alias); \ + TB_DEV_ATTR(_backend, emulate_model_alias, S_IRUGO | S_IWUSR); \ + DEF_TB_DEV_ATTRIB(_backend, emulate_dpo); \ + TB_DEV_ATTR(_backend, emulate_dpo, S_IRUGO | S_IWUSR); \ + DEF_TB_DEV_ATTRIB(_backend, emulate_fua_write); \ + TB_DEV_ATTR(_backend, emulate_fua_write, S_IRUGO | S_IWUSR); \ + DEF_TB_DEV_ATTRIB(_backend, emulate_fua_read); \ + TB_DEV_ATTR(_backend, emulate_fua_read, S_IRUGO | S_IWUSR); \ + DEF_TB_DEV_ATTRIB(_backend, emulate_write_cache); \ + TB_DEV_ATTR(_backend, emulate_write_cache, S_IRUGO | S_IWUSR); \ + DEF_TB_DEV_ATTRIB(_backend, emulate_ua_intlck_ctrl); \ + TB_DEV_ATTR(_backend, emulate_ua_intlck_ctrl, S_IRUGO | S_IWUSR); \ + DEF_TB_DEV_ATTRIB(_backend, emulate_tas); \ + TB_DEV_ATTR(_backend, emulate_tas, S_IRUGO | S_IWUSR); \ + DEF_TB_DEV_ATTRIB(_backend, emulate_tpu); \ + TB_DEV_ATTR(_backend, emulate_tpu, S_IRUGO | S_IWUSR); \ + DEF_TB_DEV_ATTRIB(_backend, emulate_tpws); \ + TB_DEV_ATTR(_backend, emulate_tpws, S_IRUGO | S_IWUSR); \ + DEF_TB_DEV_ATTRIB(_backend, emulate_caw); \ + TB_DEV_ATTR(_backend, emulate_caw, S_IRUGO | S_IWUSR); \ + DEF_TB_DEV_ATTRIB(_backend, emulate_3pc); \ + TB_DEV_ATTR(_backend, emulate_3pc, S_IRUGO | S_IWUSR); \ + DEF_TB_DEV_ATTRIB(_backend, pi_prot_type); \ + TB_DEV_ATTR(_backend, pi_prot_type, S_IRUGO | S_IWUSR); \ + DEF_TB_DEV_ATTRIB_RO(_backend, hw_pi_prot_type); \ + TB_DEV_ATTR_RO(_backend, hw_pi_prot_type); \ + DEF_TB_DEV_ATTRIB(_backend, pi_prot_format); \ + TB_DEV_ATTR(_backend, pi_prot_format, S_IRUGO | S_IWUSR); \ + DEF_TB_DEV_ATTRIB(_backend, enforce_pr_isids); \ + TB_DEV_ATTR(_backend, enforce_pr_isids, S_IRUGO | S_IWUSR); \ + DEF_TB_DEV_ATTRIB(_backend, is_nonrot); \ + TB_DEV_ATTR(_backend, is_nonrot, S_IRUGO | S_IWUSR); \ + DEF_TB_DEV_ATTRIB(_backend, emulate_rest_reord); \ + TB_DEV_ATTR(_backend, emulate_rest_reord, S_IRUGO | S_IWUSR); \ + DEF_TB_DEV_ATTRIB(_backend, force_pr_aptpl); \ + TB_DEV_ATTR(_backend, force_pr_aptpl, S_IRUGO | S_IWUSR); \ + DEF_TB_DEV_ATTRIB_RO(_backend, hw_block_size); \ + TB_DEV_ATTR_RO(_backend, hw_block_size); \ + DEF_TB_DEV_ATTRIB(_backend, block_size); \ + TB_DEV_ATTR(_backend, block_size, S_IRUGO | S_IWUSR); \ + DEF_TB_DEV_ATTRIB_RO(_backend, hw_max_sectors); \ + TB_DEV_ATTR_RO(_backend, hw_max_sectors); \ + DEF_TB_DEV_ATTRIB(_backend, fabric_max_sectors); \ + TB_DEV_ATTR(_backend, fabric_max_sectors, S_IRUGO | S_IWUSR); \ + DEF_TB_DEV_ATTRIB(_backend, optimal_sectors); \ + TB_DEV_ATTR(_backend, optimal_sectors, S_IRUGO | S_IWUSR); \ + DEF_TB_DEV_ATTRIB_RO(_backend, hw_queue_depth); \ + TB_DEV_ATTR_RO(_backend, hw_queue_depth); \ + DEF_TB_DEV_ATTRIB(_backend, queue_depth); \ + TB_DEV_ATTR(_backend, queue_depth, S_IRUGO | S_IWUSR); \ + DEF_TB_DEV_ATTRIB(_backend, max_unmap_lba_count); \ + TB_DEV_ATTR(_backend, max_unmap_lba_count, S_IRUGO | S_IWUSR); \ + DEF_TB_DEV_ATTRIB(_backend, max_unmap_block_desc_count); \ + TB_DEV_ATTR(_backend, max_unmap_block_desc_count, S_IRUGO | S_IWUSR); \ + DEF_TB_DEV_ATTRIB(_backend, unmap_granularity); \ + TB_DEV_ATTR(_backend, unmap_granularity, S_IRUGO | S_IWUSR); \ + DEF_TB_DEV_ATTRIB(_backend, unmap_granularity_alignment); \ + TB_DEV_ATTR(_backend, unmap_granularity_alignment, S_IRUGO | S_IWUSR); \ + DEF_TB_DEV_ATTRIB(_backend, max_write_same_len); \ + TB_DEV_ATTR(_backend, max_write_same_len, S_IRUGO | S_IWUSR); + #endif /* TARGET_CORE_BACKEND_CONFIGFS_H */ -- cgit v1.2.3 From 5645cba011ddaf7dc46a636e5657bb764e11a472 Mon Sep 17 00:00:00 2001 From: Nicholas Bellinger Date: Thu, 27 Nov 2014 19:53:28 -0800 Subject: target/iblock: Convert to external iblock_backend_dev_attrs This patch converts IBLOCK to use an external set of device attributes, and utilizes target_core_backend_configfs.h macros to generate a default set of configfs extended-attr handlers. It calls target_core_setup_sub_cits() to setup the initial config_item_type based on existing target_core_configfs.c defaults, and using configfs_attribute generated by DEF_TB_DEFAULT_ATTRIBS(iblock) populates iblock_backend_dev_attrs[] It introduces no functional change for existing IBLOCK device attributes. Signed-off-by: Nicholas Bellinger --- drivers/target/target_core_iblock.c | 41 +++++++++++++++++++++++++++++++++++++ 1 file changed, 41 insertions(+) diff --git a/drivers/target/target_core_iblock.c b/drivers/target/target_core_iblock.c index 5e69d36af105..3efff94fbd97 100644 --- a/drivers/target/target_core_iblock.c +++ b/drivers/target/target_core_iblock.c @@ -41,6 +41,7 @@ #include #include +#include #include "target_core_iblock.h" @@ -858,6 +859,41 @@ static bool iblock_get_write_cache(struct se_device *dev) return q->flush_flags & REQ_FLUSH; } +DEF_TB_DEFAULT_ATTRIBS(iblock); + +static struct configfs_attribute *iblock_backend_dev_attrs[] = { + &iblock_dev_attrib_emulate_model_alias.attr, + &iblock_dev_attrib_emulate_dpo.attr, + &iblock_dev_attrib_emulate_fua_write.attr, + &iblock_dev_attrib_emulate_fua_read.attr, + &iblock_dev_attrib_emulate_write_cache.attr, + &iblock_dev_attrib_emulate_ua_intlck_ctrl.attr, + &iblock_dev_attrib_emulate_tas.attr, + &iblock_dev_attrib_emulate_tpu.attr, + &iblock_dev_attrib_emulate_tpws.attr, + &iblock_dev_attrib_emulate_caw.attr, + &iblock_dev_attrib_emulate_3pc.attr, + &iblock_dev_attrib_pi_prot_type.attr, + &iblock_dev_attrib_hw_pi_prot_type.attr, + &iblock_dev_attrib_pi_prot_format.attr, + &iblock_dev_attrib_enforce_pr_isids.attr, + &iblock_dev_attrib_is_nonrot.attr, + &iblock_dev_attrib_emulate_rest_reord.attr, + &iblock_dev_attrib_force_pr_aptpl.attr, + &iblock_dev_attrib_hw_block_size.attr, + &iblock_dev_attrib_block_size.attr, + &iblock_dev_attrib_hw_max_sectors.attr, + &iblock_dev_attrib_fabric_max_sectors.attr, + &iblock_dev_attrib_optimal_sectors.attr, + &iblock_dev_attrib_hw_queue_depth.attr, + &iblock_dev_attrib_queue_depth.attr, + &iblock_dev_attrib_max_unmap_lba_count.attr, + &iblock_dev_attrib_max_unmap_block_desc_count.attr, + &iblock_dev_attrib_unmap_granularity.attr, + &iblock_dev_attrib_unmap_granularity_alignment.attr, + &iblock_dev_attrib_max_write_same_len.attr, + NULL, +}; static struct se_subsystem_api iblock_template = { .name = "iblock", @@ -884,6 +920,11 @@ static struct se_subsystem_api iblock_template = { static int __init iblock_module_init(void) { + struct target_backend_cits *tbc = &iblock_template.tb_cits; + + target_core_setup_sub_cits(&iblock_template); + tbc->tb_dev_attrib_cit.ct_attrs = iblock_backend_dev_attrs; + return transport_subsystem_register(&iblock_template); } -- cgit v1.2.3 From b23204970af2ce59c160f87f42baeacac33becb4 Mon Sep 17 00:00:00 2001 From: Nicholas Bellinger Date: Fri, 28 Nov 2014 04:56:30 +0000 Subject: target/file: Convert to external fileio_backend_dev_attrs This patch converts FILEIO to use an external set of device attributes, and utilizes target_core_backend_configfs.h macros to generate a default set of configfs extended-attr handlers. It calls target_core_setup_sub_cits() to setup the initial config_item_type based on existing target_core_configfs.c defaults, and using configfs_attribute generated by DEF_TB_DEFAULT_ATTRIBS(fileio) populates fileio_backend_dev_attrs[] It introduces no functional change for existing FILEIO device attributes. Signed-off-by: Nicholas Bellinger --- drivers/target/target_core_file.c | 42 +++++++++++++++++++++++++++++++++++++++ 1 file changed, 42 insertions(+) diff --git a/drivers/target/target_core_file.c b/drivers/target/target_core_file.c index 72c83d98662b..c2aea099ea4a 100644 --- a/drivers/target/target_core_file.c +++ b/drivers/target/target_core_file.c @@ -37,6 +37,7 @@ #include #include +#include #include "target_core_file.h" @@ -934,6 +935,42 @@ fd_parse_cdb(struct se_cmd *cmd) return sbc_parse_cdb(cmd, &fd_sbc_ops); } +DEF_TB_DEFAULT_ATTRIBS(fileio); + +static struct configfs_attribute *fileio_backend_dev_attrs[] = { + &fileio_dev_attrib_emulate_model_alias.attr, + &fileio_dev_attrib_emulate_dpo.attr, + &fileio_dev_attrib_emulate_fua_write.attr, + &fileio_dev_attrib_emulate_fua_read.attr, + &fileio_dev_attrib_emulate_write_cache.attr, + &fileio_dev_attrib_emulate_ua_intlck_ctrl.attr, + &fileio_dev_attrib_emulate_tas.attr, + &fileio_dev_attrib_emulate_tpu.attr, + &fileio_dev_attrib_emulate_tpws.attr, + &fileio_dev_attrib_emulate_caw.attr, + &fileio_dev_attrib_emulate_3pc.attr, + &fileio_dev_attrib_pi_prot_type.attr, + &fileio_dev_attrib_hw_pi_prot_type.attr, + &fileio_dev_attrib_pi_prot_format.attr, + &fileio_dev_attrib_enforce_pr_isids.attr, + &fileio_dev_attrib_is_nonrot.attr, + &fileio_dev_attrib_emulate_rest_reord.attr, + &fileio_dev_attrib_force_pr_aptpl.attr, + &fileio_dev_attrib_hw_block_size.attr, + &fileio_dev_attrib_block_size.attr, + &fileio_dev_attrib_hw_max_sectors.attr, + &fileio_dev_attrib_fabric_max_sectors.attr, + &fileio_dev_attrib_optimal_sectors.attr, + &fileio_dev_attrib_hw_queue_depth.attr, + &fileio_dev_attrib_queue_depth.attr, + &fileio_dev_attrib_max_unmap_lba_count.attr, + &fileio_dev_attrib_max_unmap_block_desc_count.attr, + &fileio_dev_attrib_unmap_granularity.attr, + &fileio_dev_attrib_unmap_granularity_alignment.attr, + &fileio_dev_attrib_max_write_same_len.attr, + NULL, +}; + static struct se_subsystem_api fileio_template = { .name = "fileio", .inquiry_prod = "FILEIO", @@ -957,6 +994,11 @@ static struct se_subsystem_api fileio_template = { static int __init fileio_module_init(void) { + struct target_backend_cits *tbc = &fileio_template.tb_cits; + + target_core_setup_sub_cits(&fileio_template); + tbc->tb_dev_attrib_cit.ct_attrs = fileio_backend_dev_attrs; + return transport_subsystem_register(&fileio_template); } -- cgit v1.2.3 From 03a627457e8440fe09e173dcc7d59a7c0a202d03 Mon Sep 17 00:00:00 2001 From: Nicholas Bellinger Date: Fri, 28 Nov 2014 05:06:10 +0000 Subject: target/rd: Convert to external rd_mcp_backend_dev_attrs This patch converts RAMDISK to use an external set of device attributes, and utilizes target_core_backend_configfs.h macros to generate a default set of configfs extended-attr handlers. It calls target_core_setup_sub_cits() to setup the initial config_item_type based on existing target_core_configfs.c defaults, and using configfs_attribute generated by DEF_TB_DEFAULT_ATTRIBS(rc_mcp) populates rd_mcp_backend_dev_attrs[] It introduces no functional change for existing RD_MCP device attributes. Signed-off-by: Nicholas Bellinger --- drivers/target/target_core_rd.c | 41 +++++++++++++++++++++++++++++++++++++++++ 1 file changed, 41 insertions(+) diff --git a/drivers/target/target_core_rd.c b/drivers/target/target_core_rd.c index b920db3388cd..60ebd170a561 100644 --- a/drivers/target/target_core_rd.c +++ b/drivers/target/target_core_rd.c @@ -34,6 +34,7 @@ #include #include +#include #include "target_core_rd.h" @@ -632,6 +633,42 @@ rd_parse_cdb(struct se_cmd *cmd) return sbc_parse_cdb(cmd, &rd_sbc_ops); } +DEF_TB_DEFAULT_ATTRIBS(rd_mcp); + +static struct configfs_attribute *rd_mcp_backend_dev_attrs[] = { + &rd_mcp_dev_attrib_emulate_model_alias.attr, + &rd_mcp_dev_attrib_emulate_dpo.attr, + &rd_mcp_dev_attrib_emulate_fua_write.attr, + &rd_mcp_dev_attrib_emulate_fua_read.attr, + &rd_mcp_dev_attrib_emulate_write_cache.attr, + &rd_mcp_dev_attrib_emulate_ua_intlck_ctrl.attr, + &rd_mcp_dev_attrib_emulate_tas.attr, + &rd_mcp_dev_attrib_emulate_tpu.attr, + &rd_mcp_dev_attrib_emulate_tpws.attr, + &rd_mcp_dev_attrib_emulate_caw.attr, + &rd_mcp_dev_attrib_emulate_3pc.attr, + &rd_mcp_dev_attrib_pi_prot_type.attr, + &rd_mcp_dev_attrib_hw_pi_prot_type.attr, + &rd_mcp_dev_attrib_pi_prot_format.attr, + &rd_mcp_dev_attrib_enforce_pr_isids.attr, + &rd_mcp_dev_attrib_is_nonrot.attr, + &rd_mcp_dev_attrib_emulate_rest_reord.attr, + &rd_mcp_dev_attrib_force_pr_aptpl.attr, + &rd_mcp_dev_attrib_hw_block_size.attr, + &rd_mcp_dev_attrib_block_size.attr, + &rd_mcp_dev_attrib_hw_max_sectors.attr, + &rd_mcp_dev_attrib_fabric_max_sectors.attr, + &rd_mcp_dev_attrib_optimal_sectors.attr, + &rd_mcp_dev_attrib_hw_queue_depth.attr, + &rd_mcp_dev_attrib_queue_depth.attr, + &rd_mcp_dev_attrib_max_unmap_lba_count.attr, + &rd_mcp_dev_attrib_max_unmap_block_desc_count.attr, + &rd_mcp_dev_attrib_unmap_granularity.attr, + &rd_mcp_dev_attrib_unmap_granularity_alignment.attr, + &rd_mcp_dev_attrib_max_write_same_len.attr, + NULL, +}; + static struct se_subsystem_api rd_mcp_template = { .name = "rd_mcp", .inquiry_prod = "RAMDISK-MCP", @@ -653,8 +690,12 @@ static struct se_subsystem_api rd_mcp_template = { int __init rd_module_init(void) { + struct target_backend_cits *tbc = &rd_mcp_template.tb_cits; int ret; + target_core_setup_sub_cits(&rd_mcp_template); + tbc->tb_dev_attrib_cit.ct_attrs = rd_mcp_backend_dev_attrs; + ret = transport_subsystem_register(&rd_mcp_template); if (ret < 0) { return ret; -- cgit v1.2.3 From e9f720d63b476c9dc2a936c186878f1795b8b4c5 Mon Sep 17 00:00:00 2001 From: Nicholas Bellinger Date: Fri, 28 Nov 2014 05:11:24 +0000 Subject: target/user: Convert to external tcmu_backend_dev_attrs This patch converts TCM-USER to use an external set of device attributes, and utilizes target_core_backend_configfs.h macros to generate a default set of configfs extended-attr handlers. It calls target_core_setup_sub_cits() to setup the initial config_item_type based on existing target_core_configfs.c defaults, and using configfs_attribute generated by DEF_TB_DEFAULT_ATTRIBS(tcmu) populates tcmu_backend_dev_attrs[] It introduces no function change for existing TCMU device attributes. Signed-off-by: Nicholas Bellinger --- drivers/target/target_core_user.c | 42 +++++++++++++++++++++++++++++++++++++++ 1 file changed, 42 insertions(+) diff --git a/drivers/target/target_core_user.c b/drivers/target/target_core_user.c index 9a1b314f6482..8bfa61c9693d 100644 --- a/drivers/target/target_core_user.c +++ b/drivers/target/target_core_user.c @@ -28,6 +28,8 @@ #include #include #include +#include + #include /* @@ -1092,6 +1094,42 @@ tcmu_parse_cdb(struct se_cmd *cmd) return ret; } +DEF_TB_DEFAULT_ATTRIBS(tcmu); + +static struct configfs_attribute *tcmu_backend_dev_attrs[] = { + &tcmu_dev_attrib_emulate_model_alias.attr, + &tcmu_dev_attrib_emulate_dpo.attr, + &tcmu_dev_attrib_emulate_fua_write.attr, + &tcmu_dev_attrib_emulate_fua_read.attr, + &tcmu_dev_attrib_emulate_write_cache.attr, + &tcmu_dev_attrib_emulate_ua_intlck_ctrl.attr, + &tcmu_dev_attrib_emulate_tas.attr, + &tcmu_dev_attrib_emulate_tpu.attr, + &tcmu_dev_attrib_emulate_tpws.attr, + &tcmu_dev_attrib_emulate_caw.attr, + &tcmu_dev_attrib_emulate_3pc.attr, + &tcmu_dev_attrib_pi_prot_type.attr, + &tcmu_dev_attrib_hw_pi_prot_type.attr, + &tcmu_dev_attrib_pi_prot_format.attr, + &tcmu_dev_attrib_enforce_pr_isids.attr, + &tcmu_dev_attrib_is_nonrot.attr, + &tcmu_dev_attrib_emulate_rest_reord.attr, + &tcmu_dev_attrib_force_pr_aptpl.attr, + &tcmu_dev_attrib_hw_block_size.attr, + &tcmu_dev_attrib_block_size.attr, + &tcmu_dev_attrib_hw_max_sectors.attr, + &tcmu_dev_attrib_fabric_max_sectors.attr, + &tcmu_dev_attrib_optimal_sectors.attr, + &tcmu_dev_attrib_hw_queue_depth.attr, + &tcmu_dev_attrib_queue_depth.attr, + &tcmu_dev_attrib_max_unmap_lba_count.attr, + &tcmu_dev_attrib_max_unmap_block_desc_count.attr, + &tcmu_dev_attrib_unmap_granularity.attr, + &tcmu_dev_attrib_unmap_granularity_alignment.attr, + &tcmu_dev_attrib_max_write_same_len.attr, + NULL, +}; + static struct se_subsystem_api tcmu_template = { .name = "user", .inquiry_prod = "USER", @@ -1112,6 +1150,7 @@ static struct se_subsystem_api tcmu_template = { static int __init tcmu_module_init(void) { + struct target_backend_cits *tbc = &tcmu_template.tb_cits; int ret; BUILD_BUG_ON((sizeof(struct tcmu_cmd_entry) % TCMU_OP_ALIGN_SIZE) != 0); @@ -1134,6 +1173,9 @@ static int __init tcmu_module_init(void) goto out_unreg_device; } + target_core_setup_sub_cits(&tcmu_template); + tbc->tb_dev_attrib_cit.ct_attrs = tcmu_backend_dev_attrs; + ret = transport_subsystem_register(&tcmu_template); if (ret) goto out_unreg_genl; -- cgit v1.2.3 From 6cfb546bfce644bd4fb6662a5049a73406f655c9 Mon Sep 17 00:00:00 2001 From: Nicholas Bellinger Date: Fri, 28 Nov 2014 05:26:18 +0000 Subject: target/pscsi: Convert to external pscsi_backend_dev_attrs This patch converts PSCSI to use an external set of device attributes, and utilizes target_core_backend_configfs.h macros to generate a default set of configfs extended-attr handlers. It calls target_core_setup_sub_cits() to setup the initial config_item_type based on existing target_core_configfs.c defaults, and using configfs_attribute for hw_pi_prot_type, hw_block_size, hw_max_sectors, hw_queue_depth populates pscsi_backend_dev_attrs[] Only these four hw_* read-only device attributes are exports for PSCSI. Signed-off-by: Nicholas Bellinger --- drivers/target/target_core_pscsi.c | 26 ++++++++++++++++++++++++++ 1 file changed, 26 insertions(+) diff --git a/drivers/target/target_core_pscsi.c b/drivers/target/target_core_pscsi.c index 7c8291f0bbbc..74873e42cf7e 100644 --- a/drivers/target/target_core_pscsi.c +++ b/drivers/target/target_core_pscsi.c @@ -44,6 +44,7 @@ #include #include +#include #include "target_core_alua.h" #include "target_core_pscsi.h" @@ -1165,6 +1166,26 @@ static void pscsi_req_done(struct request *req, int uptodate) kfree(pt); } +DEF_TB_DEV_ATTRIB_RO(pscsi, hw_pi_prot_type); +TB_DEV_ATTR_RO(pscsi, hw_pi_prot_type); + +DEF_TB_DEV_ATTRIB_RO(pscsi, hw_block_size); +TB_DEV_ATTR_RO(pscsi, hw_block_size); + +DEF_TB_DEV_ATTRIB_RO(pscsi, hw_max_sectors); +TB_DEV_ATTR_RO(pscsi, hw_max_sectors); + +DEF_TB_DEV_ATTRIB_RO(pscsi, hw_queue_depth); +TB_DEV_ATTR_RO(pscsi, hw_queue_depth); + +static struct configfs_attribute *pscsi_backend_dev_attrs[] = { + &pscsi_dev_attrib_hw_pi_prot_type.attr, + &pscsi_dev_attrib_hw_block_size.attr, + &pscsi_dev_attrib_hw_max_sectors.attr, + &pscsi_dev_attrib_hw_queue_depth.attr, + NULL, +}; + static struct se_subsystem_api pscsi_template = { .name = "pscsi", .owner = THIS_MODULE, @@ -1185,6 +1206,11 @@ static struct se_subsystem_api pscsi_template = { static int __init pscsi_module_init(void) { + struct target_backend_cits *tbc = &pscsi_template.tb_cits; + + target_core_setup_sub_cits(&pscsi_template); + tbc->tb_dev_attrib_cit.ct_attrs = pscsi_backend_dev_attrs; + return transport_subsystem_register(&pscsi_template); } -- cgit v1.2.3 From 43cf208cbe2080ff0212bd3527d915395b6129f6 Mon Sep 17 00:00:00 2001 From: Nicholas Bellinger Date: Fri, 28 Nov 2014 05:34:39 +0000 Subject: target: Drop left-over internal dev attribute code Now that backend drivers are populating their own device attributes, go ahead and remove left-over definitions + internal attribute list of device attributes from target_core_configfs.c code Also update TB_CIT_SETUP(dev_attrib,...) to signal ct_attr = NULL. Signed-off-by: Nicholas Bellinger --- drivers/target/target_core_configfs.c | 176 +--------------------------------- 1 file changed, 1 insertion(+), 175 deletions(-) diff --git a/drivers/target/target_core_configfs.c b/drivers/target/target_core_configfs.c index 2903d8931625..75d89adfccc0 100644 --- a/drivers/target/target_core_configfs.c +++ b/drivers/target/target_core_configfs.c @@ -586,189 +586,15 @@ EXPORT_SYMBOL(target_fabric_configfs_deregister); /* Start functions for struct config_item_type tb_dev_attrib_cit */ -#define DEF_DEV_ATTRIB_SHOW(_name) \ -static ssize_t target_core_dev_show_attr_##_name( \ - struct se_dev_attrib *da, \ - char *page) \ -{ \ - return snprintf(page, PAGE_SIZE, "%u\n", \ - (u32)da->da_dev->dev_attrib._name); \ -} - -#define DEF_DEV_ATTRIB_STORE(_name) \ -static ssize_t target_core_dev_store_attr_##_name( \ - struct se_dev_attrib *da, \ - const char *page, \ - size_t count) \ -{ \ - unsigned long val; \ - int ret; \ - \ - ret = kstrtoul(page, 0, &val); \ - if (ret < 0) { \ - pr_err("kstrtoul() failed with" \ - " ret: %d\n", ret); \ - return -EINVAL; \ - } \ - ret = se_dev_set_##_name(da->da_dev, (u32)val); \ - \ - return (!ret) ? count : -EINVAL; \ -} - -#define DEF_DEV_ATTRIB(_name) \ -DEF_DEV_ATTRIB_SHOW(_name); \ -DEF_DEV_ATTRIB_STORE(_name); - -#define DEF_DEV_ATTRIB_RO(_name) \ -DEF_DEV_ATTRIB_SHOW(_name); - CONFIGFS_EATTR_STRUCT(target_core_dev_attrib, se_dev_attrib); -#define SE_DEV_ATTR(_name, _mode) \ -static struct target_core_dev_attrib_attribute \ - target_core_dev_attrib_##_name = \ - __CONFIGFS_EATTR(_name, _mode, \ - target_core_dev_show_attr_##_name, \ - target_core_dev_store_attr_##_name); - -#define SE_DEV_ATTR_RO(_name); \ -static struct target_core_dev_attrib_attribute \ - target_core_dev_attrib_##_name = \ - __CONFIGFS_EATTR_RO(_name, \ - target_core_dev_show_attr_##_name); - -DEF_DEV_ATTRIB(emulate_model_alias); -SE_DEV_ATTR(emulate_model_alias, S_IRUGO | S_IWUSR); - -DEF_DEV_ATTRIB(emulate_dpo); -SE_DEV_ATTR(emulate_dpo, S_IRUGO | S_IWUSR); - -DEF_DEV_ATTRIB(emulate_fua_write); -SE_DEV_ATTR(emulate_fua_write, S_IRUGO | S_IWUSR); - -DEF_DEV_ATTRIB(emulate_fua_read); -SE_DEV_ATTR(emulate_fua_read, S_IRUGO | S_IWUSR); - -DEF_DEV_ATTRIB(emulate_write_cache); -SE_DEV_ATTR(emulate_write_cache, S_IRUGO | S_IWUSR); - -DEF_DEV_ATTRIB(emulate_ua_intlck_ctrl); -SE_DEV_ATTR(emulate_ua_intlck_ctrl, S_IRUGO | S_IWUSR); - -DEF_DEV_ATTRIB(emulate_tas); -SE_DEV_ATTR(emulate_tas, S_IRUGO | S_IWUSR); - -DEF_DEV_ATTRIB(emulate_tpu); -SE_DEV_ATTR(emulate_tpu, S_IRUGO | S_IWUSR); - -DEF_DEV_ATTRIB(emulate_tpws); -SE_DEV_ATTR(emulate_tpws, S_IRUGO | S_IWUSR); - -DEF_DEV_ATTRIB(emulate_caw); -SE_DEV_ATTR(emulate_caw, S_IRUGO | S_IWUSR); - -DEF_DEV_ATTRIB(emulate_3pc); -SE_DEV_ATTR(emulate_3pc, S_IRUGO | S_IWUSR); - -DEF_DEV_ATTRIB(pi_prot_type); -SE_DEV_ATTR(pi_prot_type, S_IRUGO | S_IWUSR); - -DEF_DEV_ATTRIB_RO(hw_pi_prot_type); -SE_DEV_ATTR_RO(hw_pi_prot_type); - -DEF_DEV_ATTRIB(pi_prot_format); -SE_DEV_ATTR(pi_prot_format, S_IRUGO | S_IWUSR); - -DEF_DEV_ATTRIB(enforce_pr_isids); -SE_DEV_ATTR(enforce_pr_isids, S_IRUGO | S_IWUSR); - -DEF_DEV_ATTRIB(is_nonrot); -SE_DEV_ATTR(is_nonrot, S_IRUGO | S_IWUSR); - -DEF_DEV_ATTRIB(emulate_rest_reord); -SE_DEV_ATTR(emulate_rest_reord, S_IRUGO | S_IWUSR); - -DEF_DEV_ATTRIB(force_pr_aptpl); -SE_DEV_ATTR(force_pr_aptpl, S_IRUGO | S_IWUSR); - -DEF_DEV_ATTRIB_RO(hw_block_size); -SE_DEV_ATTR_RO(hw_block_size); - -DEF_DEV_ATTRIB(block_size); -SE_DEV_ATTR(block_size, S_IRUGO | S_IWUSR); - -DEF_DEV_ATTRIB_RO(hw_max_sectors); -SE_DEV_ATTR_RO(hw_max_sectors); - -DEF_DEV_ATTRIB(fabric_max_sectors); -SE_DEV_ATTR(fabric_max_sectors, S_IRUGO | S_IWUSR); - -DEF_DEV_ATTRIB(optimal_sectors); -SE_DEV_ATTR(optimal_sectors, S_IRUGO | S_IWUSR); - -DEF_DEV_ATTRIB_RO(hw_queue_depth); -SE_DEV_ATTR_RO(hw_queue_depth); - -DEF_DEV_ATTRIB(queue_depth); -SE_DEV_ATTR(queue_depth, S_IRUGO | S_IWUSR); - -DEF_DEV_ATTRIB(max_unmap_lba_count); -SE_DEV_ATTR(max_unmap_lba_count, S_IRUGO | S_IWUSR); - -DEF_DEV_ATTRIB(max_unmap_block_desc_count); -SE_DEV_ATTR(max_unmap_block_desc_count, S_IRUGO | S_IWUSR); - -DEF_DEV_ATTRIB(unmap_granularity); -SE_DEV_ATTR(unmap_granularity, S_IRUGO | S_IWUSR); - -DEF_DEV_ATTRIB(unmap_granularity_alignment); -SE_DEV_ATTR(unmap_granularity_alignment, S_IRUGO | S_IWUSR); - -DEF_DEV_ATTRIB(max_write_same_len); -SE_DEV_ATTR(max_write_same_len, S_IRUGO | S_IWUSR); - CONFIGFS_EATTR_OPS(target_core_dev_attrib, se_dev_attrib, da_group); -static struct configfs_attribute *target_core_dev_attrib_attrs[] = { - &target_core_dev_attrib_emulate_model_alias.attr, - &target_core_dev_attrib_emulate_dpo.attr, - &target_core_dev_attrib_emulate_fua_write.attr, - &target_core_dev_attrib_emulate_fua_read.attr, - &target_core_dev_attrib_emulate_write_cache.attr, - &target_core_dev_attrib_emulate_ua_intlck_ctrl.attr, - &target_core_dev_attrib_emulate_tas.attr, - &target_core_dev_attrib_emulate_tpu.attr, - &target_core_dev_attrib_emulate_tpws.attr, - &target_core_dev_attrib_emulate_caw.attr, - &target_core_dev_attrib_emulate_3pc.attr, - &target_core_dev_attrib_pi_prot_type.attr, - &target_core_dev_attrib_hw_pi_prot_type.attr, - &target_core_dev_attrib_pi_prot_format.attr, - &target_core_dev_attrib_enforce_pr_isids.attr, - &target_core_dev_attrib_force_pr_aptpl.attr, - &target_core_dev_attrib_is_nonrot.attr, - &target_core_dev_attrib_emulate_rest_reord.attr, - &target_core_dev_attrib_hw_block_size.attr, - &target_core_dev_attrib_block_size.attr, - &target_core_dev_attrib_hw_max_sectors.attr, - &target_core_dev_attrib_fabric_max_sectors.attr, - &target_core_dev_attrib_optimal_sectors.attr, - &target_core_dev_attrib_hw_queue_depth.attr, - &target_core_dev_attrib_queue_depth.attr, - &target_core_dev_attrib_max_unmap_lba_count.attr, - &target_core_dev_attrib_max_unmap_block_desc_count.attr, - &target_core_dev_attrib_unmap_granularity.attr, - &target_core_dev_attrib_unmap_granularity_alignment.attr, - &target_core_dev_attrib_max_write_same_len.attr, - NULL, -}; - static struct configfs_item_operations target_core_dev_attrib_ops = { .show_attribute = target_core_dev_attrib_attr_show, .store_attribute = target_core_dev_attrib_attr_store, }; -TB_CIT_SETUP(dev_attrib, &target_core_dev_attrib_ops, NULL, - target_core_dev_attrib_attrs); +TB_CIT_SETUP(dev_attrib, &target_core_dev_attrib_ops, NULL, NULL); /* End functions for struct config_item_type tb_dev_attrib_cit */ -- cgit v1.2.3 From 4b2f57e5ced40e91cbf8886d7dc40a9474d2f5c0 Mon Sep 17 00:00:00 2001 From: Nicholas Bellinger Date: Fri, 28 Nov 2014 05:39:12 +0000 Subject: target: Drop left-over PHBA_PDEV set attr checks Now that PSCSI is only exposing four hw_* read-only device attributes, go ahead and drop the left-over -> legacy PHBA_PDEV checks in various se_dev_set_* code, since it's now only used by virtual devices. Signed-off-by: Nicholas Bellinger --- drivers/target/target_core_device.c | 64 ++++++------------------------------- 1 file changed, 10 insertions(+), 54 deletions(-) diff --git a/drivers/target/target_core_device.c b/drivers/target/target_core_device.c index 953fff79bc14..7653cfb027a2 100644 --- a/drivers/target/target_core_device.c +++ b/drivers/target/target_core_device.c @@ -767,12 +767,6 @@ int se_dev_set_emulate_fua_write(struct se_device *dev, int flag) pr_err("Illegal value %d\n", flag); return -EINVAL; } - - if (flag && - dev->transport->transport_type == TRANSPORT_PLUGIN_PHBA_PDEV) { - pr_err("emulate_fua_write not supported for pSCSI\n"); - return -EINVAL; - } dev->dev_attrib.emulate_fua_write = flag; pr_debug("dev[%p]: SE Device Forced Unit Access WRITEs: %d\n", dev, dev->dev_attrib.emulate_fua_write); @@ -802,11 +796,6 @@ int se_dev_set_emulate_write_cache(struct se_device *dev, int flag) pr_err("Illegal value %d\n", flag); return -EINVAL; } - if (flag && - dev->transport->transport_type == TRANSPORT_PLUGIN_PHBA_PDEV) { - pr_err("emulate_write_cache not supported for pSCSI\n"); - return -EINVAL; - } if (flag && dev->transport->get_write_cache) { pr_err("emulate_write_cache not supported for this device\n"); @@ -1098,26 +1087,15 @@ int se_dev_set_queue_depth(struct se_device *dev, u32 queue_depth) return -EINVAL; } - if (dev->transport->transport_type == TRANSPORT_PLUGIN_PHBA_PDEV) { + if (queue_depth > dev->dev_attrib.queue_depth) { if (queue_depth > dev->dev_attrib.hw_queue_depth) { - pr_err("dev[%p]: Passed queue_depth: %u" - " exceeds TCM/SE_Device TCQ: %u\n", - dev, queue_depth, + pr_err("dev[%p]: Passed queue_depth:" + " %u exceeds TCM/SE_Device MAX" + " TCQ: %u\n", dev, queue_depth, dev->dev_attrib.hw_queue_depth); return -EINVAL; } - } else { - if (queue_depth > dev->dev_attrib.queue_depth) { - if (queue_depth > dev->dev_attrib.hw_queue_depth) { - pr_err("dev[%p]: Passed queue_depth:" - " %u exceeds TCM/SE_Device MAX" - " TCQ: %u\n", dev, queue_depth, - dev->dev_attrib.hw_queue_depth); - return -EINVAL; - } - } } - dev->dev_attrib.queue_depth = dev->queue_depth = queue_depth; pr_debug("dev[%p]: SE Device TCQ Depth changed to: %u\n", dev, queue_depth); @@ -1146,22 +1124,12 @@ int se_dev_set_fabric_max_sectors(struct se_device *dev, u32 fabric_max_sectors) DA_STATUS_MAX_SECTORS_MIN); return -EINVAL; } - if (dev->transport->transport_type == TRANSPORT_PLUGIN_PHBA_PDEV) { - if (fabric_max_sectors > dev->dev_attrib.hw_max_sectors) { - pr_err("dev[%p]: Passed fabric_max_sectors: %u" - " greater than TCM/SE_Device max_sectors:" - " %u\n", dev, fabric_max_sectors, - dev->dev_attrib.hw_max_sectors); - return -EINVAL; - } - } else { - if (fabric_max_sectors > DA_STATUS_MAX_SECTORS_MAX) { - pr_err("dev[%p]: Passed fabric_max_sectors: %u" - " greater than DA_STATUS_MAX_SECTORS_MAX:" - " %u\n", dev, fabric_max_sectors, - DA_STATUS_MAX_SECTORS_MAX); - return -EINVAL; - } + if (fabric_max_sectors > DA_STATUS_MAX_SECTORS_MAX) { + pr_err("dev[%p]: Passed fabric_max_sectors: %u" + " greater than DA_STATUS_MAX_SECTORS_MAX:" + " %u\n", dev, fabric_max_sectors, + DA_STATUS_MAX_SECTORS_MAX); + return -EINVAL; } /* * Align max_sectors down to PAGE_SIZE to follow transport_allocate_data_tasks() @@ -1188,11 +1156,6 @@ int se_dev_set_optimal_sectors(struct se_device *dev, u32 optimal_sectors) dev, dev->export_count); return -EINVAL; } - if (dev->transport->transport_type == TRANSPORT_PLUGIN_PHBA_PDEV) { - pr_err("dev[%p]: Passed optimal_sectors cannot be" - " changed for TCM/pSCSI\n", dev); - return -EINVAL; - } if (optimal_sectors > dev->dev_attrib.fabric_max_sectors) { pr_err("dev[%p]: Passed optimal_sectors %u cannot be" " greater than fabric_max_sectors: %u\n", dev, @@ -1226,13 +1189,6 @@ int se_dev_set_block_size(struct se_device *dev, u32 block_size) return -EINVAL; } - if (dev->transport->transport_type == TRANSPORT_PLUGIN_PHBA_PDEV) { - pr_err("dev[%p]: Not allowed to change block_size for" - " Physical Device, use for Linux/SCSI to change" - " block_size for underlying hardware\n", dev); - return -EINVAL; - } - dev->dev_attrib.block_size = block_size; pr_debug("dev[%p]: SE Device block_size changed to %u\n", dev, block_size); -- cgit v1.2.3 From 506787a2c7daed45f0a213674ca706cbc83a9089 Mon Sep 17 00:00:00 2001 From: Hannes Reinecke Date: Wed, 26 Nov 2014 14:58:57 +0100 Subject: tcm_loop: Fix wrong I_T nexus association tcm_loop has the I_T nexus associated with the HBA. This causes commands to become misdirected if the HBA has more than one target portal group; any command is then being sent to the first target portal group instead of the correct one. The nexus needs to be associated with the target portal group instead. Signed-off-by: Hannes Reinecke Cc: # 3.0+ Signed-off-by: Nicholas Bellinger --- drivers/target/loopback/tcm_loop.c | 66 +++++++++++++------------------------- drivers/target/loopback/tcm_loop.h | 7 +--- 2 files changed, 24 insertions(+), 49 deletions(-) diff --git a/drivers/target/loopback/tcm_loop.c b/drivers/target/loopback/tcm_loop.c index ab3ab27d49b7..0be83e788df2 100644 --- a/drivers/target/loopback/tcm_loop.c +++ b/drivers/target/loopback/tcm_loop.c @@ -190,7 +190,7 @@ static void tcm_loop_submission_work(struct work_struct *work) set_host_byte(sc, DID_TRANSPORT_DISRUPTED); goto out_done; } - tl_nexus = tl_hba->tl_nexus; + tl_nexus = tl_tpg->tl_nexus; if (!tl_nexus) { scmd_printk(KERN_ERR, sc, "TCM_Loop I_T Nexus" " does not exist\n"); @@ -270,16 +270,26 @@ static int tcm_loop_queuecommand(struct Scsi_Host *sh, struct scsi_cmnd *sc) * to struct scsi_device */ static int tcm_loop_issue_tmr(struct tcm_loop_tpg *tl_tpg, - struct tcm_loop_nexus *tl_nexus, int lun, int task, enum tcm_tmreq_table tmr) { struct se_cmd *se_cmd = NULL; struct se_session *se_sess; struct se_portal_group *se_tpg; + struct tcm_loop_nexus *tl_nexus; struct tcm_loop_cmd *tl_cmd = NULL; struct tcm_loop_tmr *tl_tmr = NULL; int ret = TMR_FUNCTION_FAILED, rc; + /* + * Locate the tl_nexus and se_sess pointers + */ + tl_nexus = tl_tpg->tl_nexus; + if (!tl_nexus) { + pr_err("Unable to perform device reset without" + " active I_T Nexus\n"); + return ret; + } + tl_cmd = kmem_cache_zalloc(tcm_loop_cmd_cache, GFP_KERNEL); if (!tl_cmd) { pr_err("Unable to allocate memory for tl_cmd\n"); @@ -295,7 +305,7 @@ static int tcm_loop_issue_tmr(struct tcm_loop_tpg *tl_tpg, se_cmd = &tl_cmd->tl_se_cmd; se_tpg = &tl_tpg->tl_se_tpg; - se_sess = tl_nexus->se_sess; + se_sess = tl_tpg->tl_nexus->se_sess; /* * Initialize struct se_cmd descriptor from target_core_mod infrastructure */ @@ -340,7 +350,6 @@ release: static int tcm_loop_abort_task(struct scsi_cmnd *sc) { struct tcm_loop_hba *tl_hba; - struct tcm_loop_nexus *tl_nexus; struct tcm_loop_tpg *tl_tpg; int ret = FAILED; @@ -348,21 +357,8 @@ static int tcm_loop_abort_task(struct scsi_cmnd *sc) * Locate the tcm_loop_hba_t pointer */ tl_hba = *(struct tcm_loop_hba **)shost_priv(sc->device->host); - /* - * Locate the tl_nexus and se_sess pointers - */ - tl_nexus = tl_hba->tl_nexus; - if (!tl_nexus) { - pr_err("Unable to perform device reset without" - " active I_T Nexus\n"); - return FAILED; - } - - /* - * Locate the tl_tpg pointer from TargetID in sc->device->id - */ tl_tpg = &tl_hba->tl_hba_tpgs[sc->device->id]; - ret = tcm_loop_issue_tmr(tl_tpg, tl_nexus, sc->device->lun, + ret = tcm_loop_issue_tmr(tl_tpg, sc->device->lun, sc->request->tag, TMR_ABORT_TASK); return (ret == TMR_FUNCTION_COMPLETE) ? SUCCESS : FAILED; } @@ -374,7 +370,6 @@ static int tcm_loop_abort_task(struct scsi_cmnd *sc) static int tcm_loop_device_reset(struct scsi_cmnd *sc) { struct tcm_loop_hba *tl_hba; - struct tcm_loop_nexus *tl_nexus; struct tcm_loop_tpg *tl_tpg; int ret = FAILED; @@ -382,20 +377,9 @@ static int tcm_loop_device_reset(struct scsi_cmnd *sc) * Locate the tcm_loop_hba_t pointer */ tl_hba = *(struct tcm_loop_hba **)shost_priv(sc->device->host); - /* - * Locate the tl_nexus and se_sess pointers - */ - tl_nexus = tl_hba->tl_nexus; - if (!tl_nexus) { - pr_err("Unable to perform device reset without" - " active I_T Nexus\n"); - return FAILED; - } - /* - * Locate the tl_tpg pointer from TargetID in sc->device->id - */ tl_tpg = &tl_hba->tl_hba_tpgs[sc->device->id]; - ret = tcm_loop_issue_tmr(tl_tpg, tl_nexus, sc->device->lun, + + ret = tcm_loop_issue_tmr(tl_tpg, sc->device->lun, 0, TMR_LUN_RESET); return (ret == TMR_FUNCTION_COMPLETE) ? SUCCESS : FAILED; } @@ -1005,8 +989,8 @@ static int tcm_loop_make_nexus( struct tcm_loop_nexus *tl_nexus; int ret = -ENOMEM; - if (tl_tpg->tl_hba->tl_nexus) { - pr_debug("tl_tpg->tl_hba->tl_nexus already exists\n"); + if (tl_tpg->tl_nexus) { + pr_debug("tl_tpg->tl_nexus already exists\n"); return -EEXIST; } se_tpg = &tl_tpg->tl_se_tpg; @@ -1041,7 +1025,7 @@ static int tcm_loop_make_nexus( */ __transport_register_session(se_tpg, tl_nexus->se_sess->se_node_acl, tl_nexus->se_sess, tl_nexus); - tl_tpg->tl_hba->tl_nexus = tl_nexus; + tl_tpg->tl_nexus = tl_nexus; pr_debug("TCM_Loop_ConfigFS: Established I_T Nexus to emulated" " %s Initiator Port: %s\n", tcm_loop_dump_proto_id(tl_hba), name); @@ -1057,12 +1041,8 @@ static int tcm_loop_drop_nexus( { struct se_session *se_sess; struct tcm_loop_nexus *tl_nexus; - struct tcm_loop_hba *tl_hba = tpg->tl_hba; - if (!tl_hba) - return -ENODEV; - - tl_nexus = tl_hba->tl_nexus; + tl_nexus = tpg->tl_nexus; if (!tl_nexus) return -ENODEV; @@ -1078,13 +1058,13 @@ static int tcm_loop_drop_nexus( } pr_debug("TCM_Loop_ConfigFS: Removing I_T Nexus to emulated" - " %s Initiator Port: %s\n", tcm_loop_dump_proto_id(tl_hba), + " %s Initiator Port: %s\n", tcm_loop_dump_proto_id(tpg->tl_hba), tl_nexus->se_sess->se_node_acl->initiatorname); /* * Release the SCSI I_T Nexus to the emulated SAS Target Port */ transport_deregister_session(tl_nexus->se_sess); - tpg->tl_hba->tl_nexus = NULL; + tpg->tl_nexus = NULL; kfree(tl_nexus); return 0; } @@ -1100,7 +1080,7 @@ static ssize_t tcm_loop_tpg_show_nexus( struct tcm_loop_nexus *tl_nexus; ssize_t ret; - tl_nexus = tl_tpg->tl_hba->tl_nexus; + tl_nexus = tl_tpg->tl_nexus; if (!tl_nexus) return -ENODEV; diff --git a/drivers/target/loopback/tcm_loop.h b/drivers/target/loopback/tcm_loop.h index 54c59d0b6608..6ae49f272ba6 100644 --- a/drivers/target/loopback/tcm_loop.h +++ b/drivers/target/loopback/tcm_loop.h @@ -27,11 +27,6 @@ struct tcm_loop_tmr { }; struct tcm_loop_nexus { - int it_nexus_active; - /* - * Pointer to Linux/SCSI HBA from linux/include/scsi_host.h - */ - struct scsi_host *sh; /* * Pointer to TCM session for I_T Nexus */ @@ -51,6 +46,7 @@ struct tcm_loop_tpg { atomic_t tl_tpg_port_count; struct se_portal_group tl_se_tpg; struct tcm_loop_hba *tl_hba; + struct tcm_loop_nexus *tl_nexus; }; struct tcm_loop_hba { @@ -59,7 +55,6 @@ struct tcm_loop_hba { struct se_hba_s *se_hba; struct se_lun *tl_hba_lun; struct se_port *tl_hba_lun_sep; - struct tcm_loop_nexus *tl_nexus; struct device dev; struct Scsi_Host *sh; struct tcm_loop_tpg tl_hba_tpgs[TL_TPGS_PER_HBA]; -- cgit v1.2.3 From 7f2c53bb9512bd6c6c55ce0eed27f5c5da8b7ec2 Mon Sep 17 00:00:00 2001 From: Lino Sanfilippo Date: Sun, 30 Nov 2014 12:00:11 +0100 Subject: iscsi-target: fix error path in iscsi_target_init_module() In iscsi_target_init_module() unwind transport protocol registration in case that iscsit_load_discovery_tpg() failed. Signed-off-by: Lino Sanfilippo Reviewed-by: Sagi Grimberg Signed-off-by: Nicholas Bellinger --- drivers/target/iscsi/iscsi_target.c | 1 + 1 file changed, 1 insertion(+) diff --git a/drivers/target/iscsi/iscsi_target.c b/drivers/target/iscsi/iscsi_target.c index 73e58d22e325..f8ec32298906 100644 --- a/drivers/target/iscsi/iscsi_target.c +++ b/drivers/target/iscsi/iscsi_target.c @@ -609,6 +609,7 @@ static int __init iscsi_target_init_module(void) return ret; r2t_out: + iscsit_unregister_transport(&iscsi_target_transport); kmem_cache_destroy(lio_r2t_cache); ooo_out: kmem_cache_destroy(lio_ooo_cache); -- cgit v1.2.3 From 13ba564c956d8584d661aeb6ff7c9fa161b84f12 Mon Sep 17 00:00:00 2001 From: Julia Lawall Date: Sun, 30 Nov 2014 19:14:12 +0100 Subject: target: remove unneeded array Delete a local array that is only used to be initialized by memset. A semantic patch that makes this change is as follows: (http://coccinelle.lip6.fr/) // @@ identifier x; type T; @@ { ... when any -T x[...]; <+... when != x - memset(x,...); ...+> } // Signed-off-by: Julia Lawall Signed-off-by: Nicholas Bellinger --- drivers/target/target_core_pr.c | 7 ++----- 1 file changed, 2 insertions(+), 5 deletions(-) diff --git a/drivers/target/target_core_pr.c b/drivers/target/target_core_pr.c index 9f93b8234095..f91b6a1b846e 100644 --- a/drivers/target/target_core_pr.c +++ b/drivers/target/target_core_pr.c @@ -1429,14 +1429,12 @@ core_scsi3_decode_spec_i_port( struct target_core_fabric_ops *tmp_tf_ops; unsigned char *buf; unsigned char *ptr, *i_str = NULL, proto_ident, tmp_proto_ident; - char *iport_ptr = NULL, dest_iport[64], i_buf[PR_REG_ISID_ID_LEN]; + char *iport_ptr = NULL, i_buf[PR_REG_ISID_ID_LEN]; sense_reason_t ret; u32 tpdl, tid_len = 0; int dest_local_nexus; u32 dest_rtpi = 0; - memset(dest_iport, 0, 64); - local_se_deve = se_sess->se_node_acl->device_list[cmd->orig_fe_lun]; /* * Allocate a struct pr_transport_id_holder and setup the @@ -3059,7 +3057,7 @@ core_scsi3_emulate_pro_register_and_move(struct se_cmd *cmd, u64 res_key, struct t10_reservation *pr_tmpl = &dev->t10_pr; unsigned char *buf; unsigned char *initiator_str; - char *iport_ptr = NULL, dest_iport[64], i_buf[PR_REG_ISID_ID_LEN]; + char *iport_ptr = NULL, i_buf[PR_REG_ISID_ID_LEN]; u32 tid_len, tmp_tid_len; int new_reg = 0, type, scope, matching_iname; sense_reason_t ret; @@ -3071,7 +3069,6 @@ core_scsi3_emulate_pro_register_and_move(struct se_cmd *cmd, u64 res_key, return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE; } - memset(dest_iport, 0, 64); memset(i_buf, 0, PR_REG_ISID_ID_LEN); se_tpg = se_sess->se_tpg; tf_ops = se_tpg->se_tpg_tfo; -- cgit v1.2.3 From 954f23722b5753305be490330cf2680b7a25f4a3 Mon Sep 17 00:00:00 2001 From: Sagi Grimberg Date: Tue, 2 Dec 2014 16:57:17 +0200 Subject: iscsi,iser-target: Initiate termination only once Since commit 0fc4ea701fcf ("Target/iser: Don't put isert_conn inside disconnected handler") we put the conn kref in isert_wait_conn, so we need .wait_conn to be invoked also in the error path. Introduce call to isert_conn_terminate (called under lock) which transitions the connection state to TERMINATING and calls rdma_disconnect. If the state is already teminating, just bail out back (temination started). Also, make sure to destroy the connection when getting a connect error event if didn't get to connected (state UP). Same for the handling of REJECTED and UNREACHABLE cma events. Squashed: iscsi-target: Add call to wait_conn in establishment error flow Reported-by: Slava Shwartsman Signed-off-by: Sagi Grimberg Cc: # v3.10+ Signed-off-by: Nicholas Bellinger --- drivers/infiniband/ulp/isert/ib_isert.c | 84 +++++++++++++++++++------------ drivers/infiniband/ulp/isert/ib_isert.h | 1 - drivers/target/iscsi/iscsi_target_login.c | 3 ++ 3 files changed, 54 insertions(+), 34 deletions(-) diff --git a/drivers/infiniband/ulp/isert/ib_isert.c b/drivers/infiniband/ulp/isert/ib_isert.c index 10641b7816f4..2746bb857e57 100644 --- a/drivers/infiniband/ulp/isert/ib_isert.c +++ b/drivers/infiniband/ulp/isert/ib_isert.c @@ -777,6 +777,33 @@ isert_put_conn(struct isert_conn *isert_conn) kref_put(&isert_conn->conn_kref, isert_release_conn_kref); } +/** + * isert_conn_terminate() - Initiate connection termination + * @isert_conn: isert connection struct + * + * Notes: + * In case the connection state is UP, move state + * to TEMINATING and start teardown sequence (rdma_disconnect). + * + * This routine must be called with conn_mutex held. Thus it is + * safe to call multiple times. + */ +static void +isert_conn_terminate(struct isert_conn *isert_conn) +{ + int err; + + if (isert_conn->state == ISER_CONN_UP) { + isert_conn->state = ISER_CONN_TERMINATING; + pr_info("Terminating conn %p state %d\n", + isert_conn, isert_conn->state); + err = rdma_disconnect(isert_conn->conn_cm_id); + if (err) + pr_warn("Failed rdma_disconnect isert_conn %p\n", + isert_conn); + } +} + static void isert_disconnect_work(struct work_struct *work) { @@ -785,33 +812,15 @@ isert_disconnect_work(struct work_struct *work) pr_debug("isert_disconnect_work(): >>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>\n"); mutex_lock(&isert_conn->conn_mutex); - if (isert_conn->state == ISER_CONN_UP) - isert_conn->state = ISER_CONN_TERMINATING; - - if (isert_conn->post_recv_buf_count == 0 && - atomic_read(&isert_conn->post_send_buf_count) == 0) { - mutex_unlock(&isert_conn->conn_mutex); - goto wake_up; - } - if (!isert_conn->conn_cm_id) { - mutex_unlock(&isert_conn->conn_mutex); - isert_put_conn(isert_conn); - return; - } - - if (isert_conn->disconnect) { - /* Send DREQ/DREP towards our initiator */ - rdma_disconnect(isert_conn->conn_cm_id); - } - + isert_conn_terminate(isert_conn); mutex_unlock(&isert_conn->conn_mutex); -wake_up: + pr_info("conn %p completing conn_wait\n", isert_conn); complete(&isert_conn->conn_wait); } static int -isert_disconnected_handler(struct rdma_cm_id *cma_id, bool disconnect) +isert_disconnected_handler(struct rdma_cm_id *cma_id) { struct isert_conn *isert_conn; @@ -824,18 +833,24 @@ isert_disconnected_handler(struct rdma_cm_id *cma_id, bool disconnect) isert_conn = (struct isert_conn *)cma_id->context; - isert_conn->disconnect = disconnect; INIT_WORK(&isert_conn->conn_logout_work, isert_disconnect_work); schedule_work(&isert_conn->conn_logout_work); return 0; } +static void +isert_connect_error(struct rdma_cm_id *cma_id) +{ + struct isert_conn *isert_conn = (struct isert_conn *)cma_id->context; + + isert_put_conn(isert_conn); +} + static int isert_cma_handler(struct rdma_cm_id *cma_id, struct rdma_cm_event *event) { int ret = 0; - bool disconnect = false; pr_debug("isert_cma_handler: event %d status %d conn %p id %p\n", event->event, event->status, cma_id->context, cma_id); @@ -853,11 +868,14 @@ isert_cma_handler(struct rdma_cm_id *cma_id, struct rdma_cm_event *event) case RDMA_CM_EVENT_ADDR_CHANGE: /* FALLTHRU */ case RDMA_CM_EVENT_DISCONNECTED: /* FALLTHRU */ case RDMA_CM_EVENT_DEVICE_REMOVAL: /* FALLTHRU */ - disconnect = true; case RDMA_CM_EVENT_TIMEWAIT_EXIT: /* FALLTHRU */ - ret = isert_disconnected_handler(cma_id, disconnect); + ret = isert_disconnected_handler(cma_id); break; + case RDMA_CM_EVENT_REJECTED: /* FALLTHRU */ + case RDMA_CM_EVENT_UNREACHABLE: /* FALLTHRU */ case RDMA_CM_EVENT_CONNECT_ERROR: + isert_connect_error(cma_id); + break; default: pr_err("Unhandled RDMA CMA event: %d\n", event->event); break; @@ -2046,7 +2064,7 @@ isert_cq_rx_comp_err(struct isert_conn *isert_conn) msleep(3000); mutex_lock(&isert_conn->conn_mutex); - isert_conn->state = ISER_CONN_DOWN; + isert_conn_terminate(isert_conn); mutex_unlock(&isert_conn->conn_mutex); iscsit_cause_connection_reinstatement(isert_conn->conn, 0); @@ -3219,10 +3237,6 @@ static void isert_wait_conn(struct iscsi_conn *conn) pr_debug("isert_wait_conn: Starting \n"); mutex_lock(&isert_conn->conn_mutex); - if (isert_conn->conn_cm_id && !isert_conn->disconnect) { - pr_debug("Calling rdma_disconnect from isert_wait_conn\n"); - rdma_disconnect(isert_conn->conn_cm_id); - } /* * Only wait for conn_wait_comp_err if the isert_conn made it * into full feature phase.. @@ -3231,13 +3245,17 @@ static void isert_wait_conn(struct iscsi_conn *conn) mutex_unlock(&isert_conn->conn_mutex); return; } - if (isert_conn->state == ISER_CONN_UP) - isert_conn->state = ISER_CONN_TERMINATING; + isert_conn_terminate(isert_conn); mutex_unlock(&isert_conn->conn_mutex); wait_for_completion(&isert_conn->conn_wait_comp_err); - wait_for_completion(&isert_conn->conn_wait); + + mutex_lock(&isert_conn->conn_mutex); + isert_conn->state = ISER_CONN_DOWN; + mutex_unlock(&isert_conn->conn_mutex); + + pr_info("Destroying conn %p\n", isert_conn); isert_put_conn(isert_conn); } diff --git a/drivers/infiniband/ulp/isert/ib_isert.h b/drivers/infiniband/ulp/isert/ib_isert.h index 04f51f7bf614..a2e926452f76 100644 --- a/drivers/infiniband/ulp/isert/ib_isert.h +++ b/drivers/infiniband/ulp/isert/ib_isert.h @@ -150,7 +150,6 @@ struct isert_conn { #define ISERT_COMP_BATCH_COUNT 8 int conn_comp_batch; struct llist_head conn_comp_llist; - bool disconnect; }; #define ISERT_MAX_CQ 64 diff --git a/drivers/target/iscsi/iscsi_target_login.c b/drivers/target/iscsi/iscsi_target_login.c index 480f2e0ecc11..05ad5c7128f2 100644 --- a/drivers/target/iscsi/iscsi_target_login.c +++ b/drivers/target/iscsi/iscsi_target_login.c @@ -1204,6 +1204,9 @@ old_sess_out: conn->sock = NULL; } + if (conn->conn_transport->iscsit_wait_conn) + conn->conn_transport->iscsit_wait_conn(conn); + if (conn->conn_transport->iscsit_free_conn) conn->conn_transport->iscsit_free_conn(conn); -- cgit v1.2.3 From 128e9cc84566a84146baea2335b3824288eed817 Mon Sep 17 00:00:00 2001 From: Sagi Grimberg Date: Tue, 2 Dec 2014 16:57:20 +0200 Subject: iser-target: Fix flush + disconnect completion handling ISER_CONN_UP state is not sufficient to know if we should wait for completion of flush errors and disconnected_handler event. Instead, split it to 2 states: - ISER_CONN_UP: Got to CM connected phase, This state indicates that we need to wait for a CM disconnect event before going to teardown. - ISER_CONN_FULL_FEATURE: Got to full feature phase after we posted login response, This state indicates that we posted recv buffers and we need to wait for flush completions before going to teardown. Also avoid deffering disconnected handler to a work, and handle it within disconnected handler. More work here is needed to handle DEVICE_REMOVAL event correctly (cleanup all resources). Squashed: iser-target: Don't deffer disconnected handler to a work Signed-off-by: Sagi Grimberg Cc: # v3.10+ Signed-off-by: Nicholas Bellinger --- drivers/infiniband/ulp/isert/ib_isert.c | 52 +++++++++++++++++++-------------- drivers/infiniband/ulp/isert/ib_isert.h | 2 +- 2 files changed, 31 insertions(+), 23 deletions(-) diff --git a/drivers/infiniband/ulp/isert/ib_isert.c b/drivers/infiniband/ulp/isert/ib_isert.c index 2746bb857e57..bbc86110b477 100644 --- a/drivers/infiniband/ulp/isert/ib_isert.c +++ b/drivers/infiniband/ulp/isert/ib_isert.c @@ -756,6 +756,9 @@ isert_connected_handler(struct rdma_cm_id *cma_id) { struct isert_conn *isert_conn = cma_id->context; + pr_info("conn %p\n", isert_conn); + + isert_conn->state = ISER_CONN_UP; kref_get(&isert_conn->conn_kref); } @@ -782,8 +785,9 @@ isert_put_conn(struct isert_conn *isert_conn) * @isert_conn: isert connection struct * * Notes: - * In case the connection state is UP, move state + * In case the connection state is FULL_FEATURE, move state * to TEMINATING and start teardown sequence (rdma_disconnect). + * In case the connection state is UP, complete flush as well. * * This routine must be called with conn_mutex held. Thus it is * safe to call multiple times. @@ -793,32 +797,31 @@ isert_conn_terminate(struct isert_conn *isert_conn) { int err; - if (isert_conn->state == ISER_CONN_UP) { - isert_conn->state = ISER_CONN_TERMINATING; + switch (isert_conn->state) { + case ISER_CONN_TERMINATING: + break; + case ISER_CONN_UP: + /* + * No flush completions will occur as we didn't + * get to ISER_CONN_FULL_FEATURE yet, complete + * to allow teardown progress. + */ + complete(&isert_conn->conn_wait_comp_err); + case ISER_CONN_FULL_FEATURE: /* FALLTHRU */ pr_info("Terminating conn %p state %d\n", isert_conn, isert_conn->state); + isert_conn->state = ISER_CONN_TERMINATING; err = rdma_disconnect(isert_conn->conn_cm_id); if (err) pr_warn("Failed rdma_disconnect isert_conn %p\n", isert_conn); + break; + default: + pr_warn("conn %p teminating in state %d\n", + isert_conn, isert_conn->state); } } -static void -isert_disconnect_work(struct work_struct *work) -{ - struct isert_conn *isert_conn = container_of(work, - struct isert_conn, conn_logout_work); - - pr_debug("isert_disconnect_work(): >>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>\n"); - mutex_lock(&isert_conn->conn_mutex); - isert_conn_terminate(isert_conn); - mutex_unlock(&isert_conn->conn_mutex); - - pr_info("conn %p completing conn_wait\n", isert_conn); - complete(&isert_conn->conn_wait); -} - static int isert_disconnected_handler(struct rdma_cm_id *cma_id) { @@ -833,8 +836,12 @@ isert_disconnected_handler(struct rdma_cm_id *cma_id) isert_conn = (struct isert_conn *)cma_id->context; - INIT_WORK(&isert_conn->conn_logout_work, isert_disconnect_work); - schedule_work(&isert_conn->conn_logout_work); + mutex_lock(&isert_conn->conn_mutex); + isert_conn_terminate(isert_conn); + mutex_unlock(&isert_conn->conn_mutex); + + pr_info("conn %p completing conn_wait\n", isert_conn); + complete(&isert_conn->conn_wait); return 0; } @@ -1009,7 +1016,7 @@ isert_init_send_wr(struct isert_conn *isert_conn, struct isert_cmd *isert_cmd, * bit for every ISERT_COMP_BATCH_COUNT number of ib_post_send() calls. */ mutex_lock(&isert_conn->conn_mutex); - if (coalesce && isert_conn->state == ISER_CONN_UP && + if (coalesce && isert_conn->state == ISER_CONN_FULL_FEATURE && ++isert_conn->conn_comp_batch < ISERT_COMP_BATCH_COUNT) { tx_desc->llnode_active = true; llist_add(&tx_desc->comp_llnode, &isert_conn->conn_comp_llist); @@ -1110,7 +1117,8 @@ isert_put_login_tx(struct iscsi_conn *conn, struct iscsi_login *login, if (ret) return ret; - isert_conn->state = ISER_CONN_UP; + /* Now we are in FULL_FEATURE phase */ + isert_conn->state = ISER_CONN_FULL_FEATURE; goto post_send; } diff --git a/drivers/infiniband/ulp/isert/ib_isert.h b/drivers/infiniband/ulp/isert/ib_isert.h index a2e926452f76..dd4e0bf9563a 100644 --- a/drivers/infiniband/ulp/isert/ib_isert.h +++ b/drivers/infiniband/ulp/isert/ib_isert.h @@ -23,6 +23,7 @@ enum iser_ib_op_code { enum iser_conn_state { ISER_CONN_INIT, ISER_CONN_UP, + ISER_CONN_FULL_FEATURE, ISER_CONN_TERMINATING, ISER_CONN_DOWN, }; @@ -138,7 +139,6 @@ struct isert_conn { struct ib_mr *conn_mr; struct ib_qp *conn_qp; struct isert_device *conn_device; - struct work_struct conn_logout_work; struct mutex conn_mutex; struct completion conn_wait; struct completion conn_wait_comp_err; -- cgit v1.2.3 From 2371e5da8cfe91443339b54444dec6254fdd6dfc Mon Sep 17 00:00:00 2001 From: Sagi Grimberg Date: Tue, 2 Dec 2014 16:57:21 +0200 Subject: iser-target: Parallelize CM connection establishment There is no point in accepting a new CM request only when we are completely done with the last iscsi login. Instead we accept immediately, this will also cause the CM connection to reach connected state and the initiator is allowed to send the first login. We mark that we got the initial login and let iscsi layer pick it up when it gets there. This reduces the parallel login sequence by a factor of more then 4 (and more for multi-login) and also prevents the initiator (who does all logins in parallel) from giving up on login timeout expiration. In order to support multiple login requests sequence (CHAP) we call isert_rx_login_req from isert_rx_completion insead of letting isert_get_login_rx call it. Squashed: iser-target: Use kref_get_unless_zero in connected_handler iser-target: Acquire conn_mutex when changing connection state iser-target: Reject connect request in failure path Signed-off-by: Sagi Grimberg Cc: # v3.10+ Signed-off-by: Nicholas Bellinger --- drivers/infiniband/ulp/isert/ib_isert.c | 84 +++++++++++++++++++++++---------- drivers/infiniband/ulp/isert/ib_isert.h | 2 + 2 files changed, 62 insertions(+), 24 deletions(-) diff --git a/drivers/infiniband/ulp/isert/ib_isert.c b/drivers/infiniband/ulp/isert/ib_isert.c index bbc86110b477..a0fd77bf8f88 100644 --- a/drivers/infiniband/ulp/isert/ib_isert.c +++ b/drivers/infiniband/ulp/isert/ib_isert.c @@ -54,6 +54,10 @@ isert_reg_rdma(struct iscsi_conn *conn, struct iscsi_cmd *cmd, struct isert_rdma_wr *wr); static int isert_put_response(struct iscsi_conn *conn, struct iscsi_cmd *cmd); +static int +isert_rdma_post_recvl(struct isert_conn *isert_conn); +static int +isert_rdma_accept(struct isert_conn *isert_conn); static void isert_qp_event_callback(struct ib_event *e, void *context) @@ -590,6 +594,7 @@ isert_connect_request(struct rdma_cm_id *cma_id, struct rdma_cm_event *event) isert_conn->state = ISER_CONN_INIT; INIT_LIST_HEAD(&isert_conn->conn_accept_node); init_completion(&isert_conn->conn_login_comp); + init_completion(&isert_conn->login_req_comp); init_completion(&isert_conn->conn_wait); init_completion(&isert_conn->conn_wait_comp_err); kref_init(&isert_conn->conn_kref); @@ -681,6 +686,14 @@ isert_connect_request(struct rdma_cm_id *cma_id, struct rdma_cm_event *event) if (ret) goto out_conn_dev; + ret = isert_rdma_post_recvl(isert_conn); + if (ret) + goto out_conn_dev; + + ret = isert_rdma_accept(isert_conn); + if (ret) + goto out_conn_dev; + mutex_lock(&isert_np->np_accept_mutex); list_add_tail(&isert_conn->conn_accept_node, &isert_np->np_accept_list); mutex_unlock(&isert_np->np_accept_mutex); @@ -705,6 +718,7 @@ out_login_buf: kfree(isert_conn->login_buf); out: kfree(isert_conn); + rdma_reject(cma_id, NULL, 0); return ret; } @@ -758,8 +772,15 @@ isert_connected_handler(struct rdma_cm_id *cma_id) pr_info("conn %p\n", isert_conn); - isert_conn->state = ISER_CONN_UP; - kref_get(&isert_conn->conn_kref); + if (!kref_get_unless_zero(&isert_conn->conn_kref)) { + pr_warn("conn %p connect_release is running\n", isert_conn); + return; + } + + mutex_lock(&isert_conn->conn_mutex); + if (isert_conn->state != ISER_CONN_FULL_FEATURE) + isert_conn->state = ISER_CONN_UP; + mutex_unlock(&isert_conn->conn_mutex); } static void @@ -1118,7 +1139,9 @@ isert_put_login_tx(struct iscsi_conn *conn, struct iscsi_login *login, return ret; /* Now we are in FULL_FEATURE phase */ + mutex_lock(&isert_conn->conn_mutex); isert_conn->state = ISER_CONN_FULL_FEATURE; + mutex_unlock(&isert_conn->conn_mutex); goto post_send; } @@ -1135,18 +1158,17 @@ post_send: } static void -isert_rx_login_req(struct iser_rx_desc *rx_desc, int rx_buflen, - struct isert_conn *isert_conn) +isert_rx_login_req(struct isert_conn *isert_conn) { + struct iser_rx_desc *rx_desc = (void *)isert_conn->login_req_buf; + int rx_buflen = isert_conn->login_req_len; struct iscsi_conn *conn = isert_conn->conn; struct iscsi_login *login = conn->conn_login; int size; - if (!login) { - pr_err("conn->conn_login is NULL\n"); - dump_stack(); - return; - } + pr_info("conn %p\n", isert_conn); + + WARN_ON_ONCE(!login); if (login->first_request) { struct iscsi_login_req *login_req = @@ -1509,11 +1531,20 @@ isert_rx_completion(struct iser_rx_desc *desc, struct isert_conn *isert_conn, hdr->opcode, hdr->itt, hdr->flags, (int)(xfer_len - ISER_HEADERS_LEN)); - if ((char *)desc == isert_conn->login_req_buf) - isert_rx_login_req(desc, xfer_len - ISER_HEADERS_LEN, - isert_conn); - else + if ((char *)desc == isert_conn->login_req_buf) { + isert_conn->login_req_len = xfer_len - ISER_HEADERS_LEN; + if (isert_conn->conn) { + struct iscsi_login *login = isert_conn->conn->conn_login; + + if (login && !login->first_request) + isert_rx_login_req(isert_conn); + } + mutex_lock(&isert_conn->conn_mutex); + complete(&isert_conn->login_req_comp); + mutex_unlock(&isert_conn->conn_mutex); + } else { isert_rx_do_work(desc, isert_conn); + } ib_dma_sync_single_for_device(ib_dev, rx_dma, rx_buflen, DMA_FROM_DEVICE); @@ -3120,7 +3151,15 @@ isert_get_login_rx(struct iscsi_conn *conn, struct iscsi_login *login) struct isert_conn *isert_conn = (struct isert_conn *)conn->context; int ret; - pr_debug("isert_get_login_rx before conn_login_comp conn: %p\n", conn); + pr_info("before login_req comp conn: %p\n", isert_conn); + ret = wait_for_completion_interruptible(&isert_conn->login_req_comp); + if (ret) { + pr_err("isert_conn %p interrupted before got login req\n", + isert_conn); + return ret; + } + reinit_completion(&isert_conn->login_req_comp); + /* * For login requests after the first PDU, isert_rx_login_req() will * kick schedule_delayed_work(&conn->login_work) as the packet is @@ -3130,11 +3169,15 @@ isert_get_login_rx(struct iscsi_conn *conn, struct iscsi_login *login) if (!login->first_request) return 0; + isert_rx_login_req(isert_conn); + + pr_info("before conn_login_comp conn: %p\n", conn); ret = wait_for_completion_interruptible(&isert_conn->conn_login_comp); if (ret) return ret; - pr_debug("isert_get_login_rx processing login->req: %p\n", login->req); + pr_info("processing login->req: %p\n", login->req); + return 0; } @@ -3212,17 +3255,10 @@ accept_wait: isert_conn->conn = conn; max_accept = 0; - ret = isert_rdma_post_recvl(isert_conn); - if (ret) - return ret; - - ret = isert_rdma_accept(isert_conn); - if (ret) - return ret; - isert_set_conn_info(np, conn, isert_conn); - pr_debug("Processing isert_accept_np: isert_conn: %p\n", isert_conn); + pr_debug("Processing isert_conn: %p\n", isert_conn); + return 0; } diff --git a/drivers/infiniband/ulp/isert/ib_isert.h b/drivers/infiniband/ulp/isert/ib_isert.h index dd4e0bf9563a..97cb270d402a 100644 --- a/drivers/infiniband/ulp/isert/ib_isert.h +++ b/drivers/infiniband/ulp/isert/ib_isert.h @@ -126,6 +126,7 @@ struct isert_conn { char *login_req_buf; char *login_rsp_buf; u64 login_req_dma; + int login_req_len; u64 login_rsp_dma; unsigned int conn_rx_desc_head; struct iser_rx_desc *conn_rx_descs; @@ -133,6 +134,7 @@ struct isert_conn { struct iscsi_conn *conn; struct list_head conn_accept_node; struct completion conn_login_comp; + struct completion login_req_comp; struct iser_tx_desc conn_login_tx_desc; struct rdma_cm_id *conn_cm_id; struct ib_pd *conn_pd; -- cgit v1.2.3 From 19e2090fb246ca21b3e569ead51a6a7a1748eadd Mon Sep 17 00:00:00 2001 From: Sagi Grimberg Date: Tue, 2 Dec 2014 16:57:26 +0200 Subject: iser-target: Fix connected_handler + teardown flow race Take isert_conn pointer from cm_id->qp->qp_context. This will allow us to know that the cm_id context is always the network portal. This will make the cm_id event check (connection or network portal) more reliable. In order to avoid a NULL dereference in cma_id->qp->qp_context we destroy the qp after we destroy the cm_id (and make the dereference safe). session stablishment/teardown sequences can happen in parallel, we should take into account that connected_handler might race with connection teardown flow. Also, protect isert_conn->conn_device->active_qps decrement within the error patch during QP creation failure and the normal teardown path in isert_connect_release(). Squashed: iser-target: Decrement completion context active_qps in error flow Signed-off-by: Sagi Grimberg Cc: # v3.10+ Signed-off-by: Nicholas Bellinger --- drivers/infiniband/ulp/isert/ib_isert.c | 31 +++++++++++++++++++------------ 1 file changed, 19 insertions(+), 12 deletions(-) diff --git a/drivers/infiniband/ulp/isert/ib_isert.c b/drivers/infiniband/ulp/isert/ib_isert.c index a0fd77bf8f88..054fa425d09e 100644 --- a/drivers/infiniband/ulp/isert/ib_isert.c +++ b/drivers/infiniband/ulp/isert/ib_isert.c @@ -141,12 +141,18 @@ isert_conn_setup_qp(struct isert_conn *isert_conn, struct rdma_cm_id *cma_id, ret = rdma_create_qp(cma_id, isert_conn->conn_pd, &attr); if (ret) { pr_err("rdma_create_qp failed for cma_id %d\n", ret); - return ret; + goto err; } isert_conn->conn_qp = cma_id->qp; pr_debug("rdma_create_qp() returned success >>>>>>>>>>>>>>>>>>>>>>>>>.\n"); return 0; +err: + mutex_lock(&device_list_mutex); + device->cq_active_qps[min_index]--; + mutex_unlock(&device_list_mutex); + + return ret; } static void @@ -602,7 +608,6 @@ isert_connect_request(struct rdma_cm_id *cma_id, struct rdma_cm_event *event) spin_lock_init(&isert_conn->conn_lock); INIT_LIST_HEAD(&isert_conn->conn_fr_pool); - cma_id->context = isert_conn; isert_conn->conn_cm_id = cma_id; isert_conn->login_buf = kzalloc(ISCSI_DEF_MAX_RECV_SEG_LEN + @@ -734,18 +739,20 @@ isert_connect_release(struct isert_conn *isert_conn) if (device && device->use_fastreg) isert_conn_free_fastreg_pool(isert_conn); + isert_free_rx_descriptors(isert_conn); + rdma_destroy_id(isert_conn->conn_cm_id); + if (isert_conn->conn_qp) { cq_index = ((struct isert_cq_desc *) isert_conn->conn_qp->recv_cq->cq_context)->cq_index; pr_debug("isert_connect_release: cq_index: %d\n", cq_index); + mutex_lock(&device_list_mutex); isert_conn->conn_device->cq_active_qps[cq_index]--; + mutex_unlock(&device_list_mutex); - rdma_destroy_qp(isert_conn->conn_cm_id); + ib_destroy_qp(isert_conn->conn_qp); } - isert_free_rx_descriptors(isert_conn); - rdma_destroy_id(isert_conn->conn_cm_id); - ib_dereg_mr(isert_conn->conn_mr); ib_dealloc_pd(isert_conn->conn_pd); @@ -768,7 +775,7 @@ isert_connect_release(struct isert_conn *isert_conn) static void isert_connected_handler(struct rdma_cm_id *cma_id) { - struct isert_conn *isert_conn = cma_id->context; + struct isert_conn *isert_conn = cma_id->qp->qp_context; pr_info("conn %p\n", isert_conn); @@ -846,16 +853,16 @@ isert_conn_terminate(struct isert_conn *isert_conn) static int isert_disconnected_handler(struct rdma_cm_id *cma_id) { + struct iscsi_np *np = cma_id->context; + struct isert_np *isert_np = np->np_context; struct isert_conn *isert_conn; - if (!cma_id->qp) { - struct isert_np *isert_np = cma_id->context; - + if (isert_np->np_cm_id == cma_id) { isert_np->np_cm_id = NULL; return -1; } - isert_conn = (struct isert_conn *)cma_id->context; + isert_conn = cma_id->qp->qp_context; mutex_lock(&isert_conn->conn_mutex); isert_conn_terminate(isert_conn); @@ -870,7 +877,7 @@ isert_disconnected_handler(struct rdma_cm_id *cma_id) static void isert_connect_error(struct rdma_cm_id *cma_id) { - struct isert_conn *isert_conn = (struct isert_conn *)cma_id->context; + struct isert_conn *isert_conn = cma_id->qp->qp_context; isert_put_conn(isert_conn); } -- cgit v1.2.3 From ca6c1d82d12d8013fb75ce015900d62b9754623c Mon Sep 17 00:00:00 2001 From: Sagi Grimberg Date: Tue, 2 Dec 2014 16:57:27 +0200 Subject: iser-target: Handle ADDR_CHANGE event for listener cm_id The np listener cm_id will also get ADDR_CHANGE event upcall (in case it is bound to a specific IP). Handle it correctly by creating a new cm_id and implicitly destroy the old one. Since this is the second event a listener np cm_id may encounter, we move the np cm_id event handling to a routine. Squashed: iser-target: Move cma_id setup to a function Reported-by: Slava Shwartsman Signed-off-by: Sagi Grimberg Cc: # v3.10+ Signed-off-by: Nicholas Bellinger --- drivers/infiniband/ulp/isert/ib_isert.c | 107 +++++++++++++++++++++++--------- drivers/infiniband/ulp/isert/ib_isert.h | 1 + 2 files changed, 77 insertions(+), 31 deletions(-) diff --git a/drivers/infiniband/ulp/isert/ib_isert.c b/drivers/infiniband/ulp/isert/ib_isert.c index 054fa425d09e..f4b14ede18e4 100644 --- a/drivers/infiniband/ulp/isert/ib_isert.c +++ b/drivers/infiniband/ulp/isert/ib_isert.c @@ -58,6 +58,7 @@ static int isert_rdma_post_recvl(struct isert_conn *isert_conn); static int isert_rdma_accept(struct isert_conn *isert_conn); +struct rdma_cm_id *isert_setup_id(struct isert_np *isert_np); static void isert_qp_event_callback(struct ib_event *e, void *context) @@ -573,8 +574,8 @@ err: static int isert_connect_request(struct rdma_cm_id *cma_id, struct rdma_cm_event *event) { - struct iscsi_np *np = cma_id->context; - struct isert_np *isert_np = np->np_context; + struct isert_np *isert_np = cma_id->context; + struct iscsi_np *np = isert_np->np; struct isert_conn *isert_conn; struct isert_device *device; struct ib_device *ib_dev = cma_id->device; @@ -851,17 +852,41 @@ isert_conn_terminate(struct isert_conn *isert_conn) } static int -isert_disconnected_handler(struct rdma_cm_id *cma_id) +isert_np_cma_handler(struct isert_np *isert_np, + enum rdma_cm_event_type event) { - struct iscsi_np *np = cma_id->context; - struct isert_np *isert_np = np->np_context; - struct isert_conn *isert_conn; + pr_debug("isert np %p, handling event %d\n", isert_np, event); - if (isert_np->np_cm_id == cma_id) { + switch (event) { + case RDMA_CM_EVENT_DEVICE_REMOVAL: isert_np->np_cm_id = NULL; - return -1; + break; + case RDMA_CM_EVENT_ADDR_CHANGE: + isert_np->np_cm_id = isert_setup_id(isert_np); + if (IS_ERR(isert_np->np_cm_id)) { + pr_err("isert np %p setup id failed: %ld\n", + isert_np, PTR_ERR(isert_np->np_cm_id)); + isert_np->np_cm_id = NULL; + } + break; + default: + pr_err("isert np %p Unexpected event %d\n", + isert_np, event); } + return -1; +} + +static int +isert_disconnected_handler(struct rdma_cm_id *cma_id, + enum rdma_cm_event_type event) +{ + struct isert_np *isert_np = cma_id->context; + struct isert_conn *isert_conn; + + if (isert_np->np_cm_id == cma_id) + return isert_np_cma_handler(cma_id->context, event); + isert_conn = cma_id->qp->qp_context; mutex_lock(&isert_conn->conn_mutex); @@ -904,7 +929,7 @@ isert_cma_handler(struct rdma_cm_id *cma_id, struct rdma_cm_event *event) case RDMA_CM_EVENT_DISCONNECTED: /* FALLTHRU */ case RDMA_CM_EVENT_DEVICE_REMOVAL: /* FALLTHRU */ case RDMA_CM_EVENT_TIMEWAIT_EXIT: /* FALLTHRU */ - ret = isert_disconnected_handler(cma_id); + ret = isert_disconnected_handler(cma_id, event->event); break; case RDMA_CM_EVENT_REJECTED: /* FALLTHRU */ case RDMA_CM_EVENT_UNREACHABLE: /* FALLTHRU */ @@ -3065,13 +3090,51 @@ isert_response_queue(struct iscsi_conn *conn, struct iscsi_cmd *cmd, int state) return ret; } +struct rdma_cm_id * +isert_setup_id(struct isert_np *isert_np) +{ + struct iscsi_np *np = isert_np->np; + struct rdma_cm_id *id; + struct sockaddr *sa; + int ret; + + sa = (struct sockaddr *)&np->np_sockaddr; + pr_debug("ksockaddr: %p, sa: %p\n", &np->np_sockaddr, sa); + + id = rdma_create_id(isert_cma_handler, isert_np, + RDMA_PS_TCP, IB_QPT_RC); + if (IS_ERR(id)) { + pr_err("rdma_create_id() failed: %ld\n", PTR_ERR(id)); + ret = PTR_ERR(id); + goto out; + } + pr_debug("id %p context %p\n", id, id->context); + + ret = rdma_bind_addr(id, sa); + if (ret) { + pr_err("rdma_bind_addr() failed: %d\n", ret); + goto out_id; + } + + ret = rdma_listen(id, ISERT_RDMA_LISTEN_BACKLOG); + if (ret) { + pr_err("rdma_listen() failed: %d\n", ret); + goto out_id; + } + + return id; +out_id: + rdma_destroy_id(id); +out: + return ERR_PTR(ret); +} + static int isert_setup_np(struct iscsi_np *np, struct __kernel_sockaddr_storage *ksockaddr) { struct isert_np *isert_np; struct rdma_cm_id *isert_lid; - struct sockaddr *sa; int ret; isert_np = kzalloc(sizeof(struct isert_np), GFP_KERNEL); @@ -3083,9 +3146,8 @@ isert_setup_np(struct iscsi_np *np, mutex_init(&isert_np->np_accept_mutex); INIT_LIST_HEAD(&isert_np->np_accept_list); init_completion(&isert_np->np_login_comp); + isert_np->np = np; - sa = (struct sockaddr *)ksockaddr; - pr_debug("ksockaddr: %p, sa: %p\n", ksockaddr, sa); /* * Setup the np->np_sockaddr from the passed sockaddr setup * in iscsi_target_configfs.c code.. @@ -3093,37 +3155,20 @@ isert_setup_np(struct iscsi_np *np, memcpy(&np->np_sockaddr, ksockaddr, sizeof(struct __kernel_sockaddr_storage)); - isert_lid = rdma_create_id(isert_cma_handler, np, RDMA_PS_TCP, - IB_QPT_RC); + isert_lid = isert_setup_id(isert_np); if (IS_ERR(isert_lid)) { - pr_err("rdma_create_id() for isert_listen_handler failed: %ld\n", - PTR_ERR(isert_lid)); ret = PTR_ERR(isert_lid); goto out; } - ret = rdma_bind_addr(isert_lid, sa); - if (ret) { - pr_err("rdma_bind_addr() for isert_lid failed: %d\n", ret); - goto out_lid; - } - - ret = rdma_listen(isert_lid, ISERT_RDMA_LISTEN_BACKLOG); - if (ret) { - pr_err("rdma_listen() for isert_lid failed: %d\n", ret); - goto out_lid; - } - isert_np->np_cm_id = isert_lid; np->np_context = isert_np; - pr_debug("Setup isert_lid->context: %p\n", isert_lid->context); return 0; -out_lid: - rdma_destroy_id(isert_lid); out: kfree(isert_np); + return ret; } diff --git a/drivers/infiniband/ulp/isert/ib_isert.h b/drivers/infiniband/ulp/isert/ib_isert.h index 97cb270d402a..5cad43d3de44 100644 --- a/drivers/infiniband/ulp/isert/ib_isert.h +++ b/drivers/infiniband/ulp/isert/ib_isert.h @@ -183,6 +183,7 @@ struct isert_device { }; struct isert_np { + struct iscsi_np *np; struct semaphore np_sem; struct rdma_cm_id *np_cm_id; struct mutex np_accept_mutex; -- cgit v1.2.3 From b02efbfc9a051b41e71fe8f94ddf967260e024a6 Mon Sep 17 00:00:00 2001 From: Sagi Grimberg Date: Tue, 2 Dec 2014 16:57:29 +0200 Subject: iser-target: Fix implicit termination of connections In situations such as bond failover, The new session establishment implicitly invokes the termination of the old connection. So, we don't want to wait for the old connection wait_conn to completely terminate before we accept the new connection and post a login response. The solution is to deffer the comp_wait completion and the conn_put to a work so wait_conn will effectively be non-blocking (flush errors are assumed to come very fast). We allocate isert_release_wq with WQ_UNBOUND and WQ_UNBOUND_MAX_ACTIVE to spread the concurrency of release works. Reported-by: Slava Shwartsman Signed-off-by: Sagi Grimberg Cc: # v3.10+ Signed-off-by: Nicholas Bellinger --- drivers/infiniband/ulp/isert/ib_isert.c | 42 ++++++++++++++++++++++++++------- drivers/infiniband/ulp/isert/ib_isert.h | 1 + 2 files changed, 35 insertions(+), 8 deletions(-) diff --git a/drivers/infiniband/ulp/isert/ib_isert.c b/drivers/infiniband/ulp/isert/ib_isert.c index f4b14ede18e4..33b549e752c1 100644 --- a/drivers/infiniband/ulp/isert/ib_isert.c +++ b/drivers/infiniband/ulp/isert/ib_isert.c @@ -41,6 +41,7 @@ static DEFINE_MUTEX(device_list_mutex); static LIST_HEAD(device_list); static struct workqueue_struct *isert_rx_wq; static struct workqueue_struct *isert_comp_wq; +static struct workqueue_struct *isert_release_wq; static void isert_unmap_cmd(struct isert_cmd *isert_cmd, struct isert_conn *isert_conn); @@ -3326,6 +3327,24 @@ isert_free_np(struct iscsi_np *np) kfree(isert_np); } +static void isert_release_work(struct work_struct *work) +{ + struct isert_conn *isert_conn = container_of(work, + struct isert_conn, + release_work); + + pr_info("Starting release conn %p\n", isert_conn); + + wait_for_completion(&isert_conn->conn_wait); + + mutex_lock(&isert_conn->conn_mutex); + isert_conn->state = ISER_CONN_DOWN; + mutex_unlock(&isert_conn->conn_mutex); + + pr_info("Destroying conn %p\n", isert_conn); + isert_put_conn(isert_conn); +} + static void isert_wait_conn(struct iscsi_conn *conn) { struct isert_conn *isert_conn = conn->context; @@ -3345,14 +3364,9 @@ static void isert_wait_conn(struct iscsi_conn *conn) mutex_unlock(&isert_conn->conn_mutex); wait_for_completion(&isert_conn->conn_wait_comp_err); - wait_for_completion(&isert_conn->conn_wait); - - mutex_lock(&isert_conn->conn_mutex); - isert_conn->state = ISER_CONN_DOWN; - mutex_unlock(&isert_conn->conn_mutex); - pr_info("Destroying conn %p\n", isert_conn); - isert_put_conn(isert_conn); + INIT_WORK(&isert_conn->release_work, isert_release_work); + queue_work(isert_release_wq, &isert_conn->release_work); } static void isert_free_conn(struct iscsi_conn *conn) @@ -3400,10 +3414,21 @@ static int __init isert_init(void) goto destroy_rx_wq; } + isert_release_wq = alloc_workqueue("isert_release_wq", WQ_UNBOUND, + WQ_UNBOUND_MAX_ACTIVE); + if (!isert_release_wq) { + pr_err("Unable to allocate isert_release_wq\n"); + ret = -ENOMEM; + goto destroy_comp_wq; + } + iscsit_register_transport(&iser_target_transport); - pr_debug("iSER_TARGET[0] - Loaded iser_target_transport\n"); + pr_info("iSER_TARGET[0] - Loaded iser_target_transport\n"); + return 0; +destroy_comp_wq: + destroy_workqueue(isert_comp_wq); destroy_rx_wq: destroy_workqueue(isert_rx_wq); return ret; @@ -3412,6 +3437,7 @@ destroy_rx_wq: static void __exit isert_exit(void) { flush_scheduled_work(); + destroy_workqueue(isert_release_wq); destroy_workqueue(isert_comp_wq); destroy_workqueue(isert_rx_wq); iscsit_unregister_transport(&iser_target_transport); diff --git a/drivers/infiniband/ulp/isert/ib_isert.h b/drivers/infiniband/ulp/isert/ib_isert.h index 5cad43d3de44..9372d4d4d14a 100644 --- a/drivers/infiniband/ulp/isert/ib_isert.h +++ b/drivers/infiniband/ulp/isert/ib_isert.h @@ -149,6 +149,7 @@ struct isert_conn { int conn_fr_pool_size; /* lock to protect fastreg pool */ spinlock_t conn_lock; + struct work_struct release_work; #define ISERT_COMP_BATCH_COUNT 8 int conn_comp_batch; struct llist_head conn_comp_llist; -- cgit v1.2.3 From 570db170f37715b7df23c95868169f3d9affa48c Mon Sep 17 00:00:00 2001 From: Sagi Grimberg Date: Tue, 2 Dec 2014 16:57:31 +0200 Subject: iser-target: Allocate PI contexts dynamically This patch converts to allocate PI contexts dynamically in order avoid a potentially bogus np->tpg_np and associated NULL pointer dereference in isert_connect_request() during iser-target endpoint shutdown with multiple network portals. Also, there is really no need to allocate these at connection establishment since it is not guaranteed that all the IOs on that connection will be to a PI formatted device. We can do it in a lazy fashion so the initial burst will have a transient slow down, but very fast all IOs will allocate a PI context. Squashed: iser-target: Centralize PI context handling code Signed-off-by: Sagi Grimberg Cc: # v3.14+ Signed-off-by: Nicholas Bellinger --- drivers/infiniband/ulp/isert/ib_isert.c | 267 ++++++++++++++++++-------------- drivers/infiniband/ulp/isert/ib_isert.h | 7 + 2 files changed, 158 insertions(+), 116 deletions(-) diff --git a/drivers/infiniband/ulp/isert/ib_isert.c b/drivers/infiniband/ulp/isert/ib_isert.c index 33b549e752c1..b0e58f196d90 100644 --- a/drivers/infiniband/ulp/isert/ib_isert.c +++ b/drivers/infiniband/ulp/isert/ib_isert.c @@ -96,8 +96,7 @@ isert_query_device(struct ib_device *ib_dev, struct ib_device_attr *devattr) } static int -isert_conn_setup_qp(struct isert_conn *isert_conn, struct rdma_cm_id *cma_id, - u8 protection) +isert_conn_setup_qp(struct isert_conn *isert_conn, struct rdma_cm_id *cma_id) { struct isert_device *device = isert_conn->conn_device; struct ib_qp_init_attr attr; @@ -132,7 +131,7 @@ isert_conn_setup_qp(struct isert_conn *isert_conn, struct rdma_cm_id *cma_id, attr.cap.max_recv_sge = 1; attr.sq_sig_type = IB_SIGNAL_REQ_WR; attr.qp_type = IB_QPT_RC; - if (protection) + if (device->pi_capable) attr.create_flags |= IB_QP_CREATE_SIGNATURE_EN; pr_debug("isert_conn_setup_qp cma_id->device: %p\n", @@ -441,9 +440,69 @@ isert_conn_free_fastreg_pool(struct isert_conn *isert_conn) isert_conn->conn_fr_pool_size - i); } +static int +isert_create_pi_ctx(struct fast_reg_descriptor *desc, + struct ib_device *device, + struct ib_pd *pd) +{ + struct ib_mr_init_attr mr_init_attr; + struct pi_context *pi_ctx; + int ret; + + pi_ctx = kzalloc(sizeof(*desc->pi_ctx), GFP_KERNEL); + if (!pi_ctx) { + pr_err("Failed to allocate pi context\n"); + return -ENOMEM; + } + + pi_ctx->prot_frpl = ib_alloc_fast_reg_page_list(device, + ISCSI_ISER_SG_TABLESIZE); + if (IS_ERR(pi_ctx->prot_frpl)) { + pr_err("Failed to allocate prot frpl err=%ld\n", + PTR_ERR(pi_ctx->prot_frpl)); + ret = PTR_ERR(pi_ctx->prot_frpl); + goto err_pi_ctx; + } + + pi_ctx->prot_mr = ib_alloc_fast_reg_mr(pd, ISCSI_ISER_SG_TABLESIZE); + if (IS_ERR(pi_ctx->prot_mr)) { + pr_err("Failed to allocate prot frmr err=%ld\n", + PTR_ERR(pi_ctx->prot_mr)); + ret = PTR_ERR(pi_ctx->prot_mr); + goto err_prot_frpl; + } + desc->ind |= ISERT_PROT_KEY_VALID; + + memset(&mr_init_attr, 0, sizeof(mr_init_attr)); + mr_init_attr.max_reg_descriptors = 2; + mr_init_attr.flags |= IB_MR_SIGNATURE_EN; + pi_ctx->sig_mr = ib_create_mr(pd, &mr_init_attr); + if (IS_ERR(pi_ctx->sig_mr)) { + pr_err("Failed to allocate signature enabled mr err=%ld\n", + PTR_ERR(pi_ctx->sig_mr)); + ret = PTR_ERR(pi_ctx->sig_mr); + goto err_prot_mr; + } + + desc->pi_ctx = pi_ctx; + desc->ind |= ISERT_SIG_KEY_VALID; + desc->ind &= ~ISERT_PROTECTED; + + return 0; + +err_prot_mr: + ib_dereg_mr(desc->pi_ctx->prot_mr); +err_prot_frpl: + ib_free_fast_reg_page_list(desc->pi_ctx->prot_frpl); +err_pi_ctx: + kfree(desc->pi_ctx); + + return ret; +} + static int isert_create_fr_desc(struct ib_device *ib_device, struct ib_pd *pd, - struct fast_reg_descriptor *fr_desc, u8 protection) + struct fast_reg_descriptor *fr_desc) { int ret; @@ -462,62 +521,12 @@ isert_create_fr_desc(struct ib_device *ib_device, struct ib_pd *pd, ret = PTR_ERR(fr_desc->data_mr); goto err_data_frpl; } - pr_debug("Create fr_desc %p page_list %p\n", - fr_desc, fr_desc->data_frpl->page_list); fr_desc->ind |= ISERT_DATA_KEY_VALID; - if (protection) { - struct ib_mr_init_attr mr_init_attr = {0}; - struct pi_context *pi_ctx; - - fr_desc->pi_ctx = kzalloc(sizeof(*fr_desc->pi_ctx), GFP_KERNEL); - if (!fr_desc->pi_ctx) { - pr_err("Failed to allocate pi context\n"); - ret = -ENOMEM; - goto err_data_mr; - } - pi_ctx = fr_desc->pi_ctx; - - pi_ctx->prot_frpl = ib_alloc_fast_reg_page_list(ib_device, - ISCSI_ISER_SG_TABLESIZE); - if (IS_ERR(pi_ctx->prot_frpl)) { - pr_err("Failed to allocate prot frpl err=%ld\n", - PTR_ERR(pi_ctx->prot_frpl)); - ret = PTR_ERR(pi_ctx->prot_frpl); - goto err_pi_ctx; - } - - pi_ctx->prot_mr = ib_alloc_fast_reg_mr(pd, ISCSI_ISER_SG_TABLESIZE); - if (IS_ERR(pi_ctx->prot_mr)) { - pr_err("Failed to allocate prot frmr err=%ld\n", - PTR_ERR(pi_ctx->prot_mr)); - ret = PTR_ERR(pi_ctx->prot_mr); - goto err_prot_frpl; - } - fr_desc->ind |= ISERT_PROT_KEY_VALID; - - mr_init_attr.max_reg_descriptors = 2; - mr_init_attr.flags |= IB_MR_SIGNATURE_EN; - pi_ctx->sig_mr = ib_create_mr(pd, &mr_init_attr); - if (IS_ERR(pi_ctx->sig_mr)) { - pr_err("Failed to allocate signature enabled mr err=%ld\n", - PTR_ERR(pi_ctx->sig_mr)); - ret = PTR_ERR(pi_ctx->sig_mr); - goto err_prot_mr; - } - fr_desc->ind |= ISERT_SIG_KEY_VALID; - } - fr_desc->ind &= ~ISERT_PROTECTED; + pr_debug("Created fr_desc %p\n", fr_desc); return 0; -err_prot_mr: - ib_dereg_mr(fr_desc->pi_ctx->prot_mr); -err_prot_frpl: - ib_free_fast_reg_page_list(fr_desc->pi_ctx->prot_frpl); -err_pi_ctx: - kfree(fr_desc->pi_ctx); -err_data_mr: - ib_dereg_mr(fr_desc->data_mr); + err_data_frpl: ib_free_fast_reg_page_list(fr_desc->data_frpl); @@ -525,7 +534,7 @@ err_data_frpl: } static int -isert_conn_create_fastreg_pool(struct isert_conn *isert_conn, u8 pi_support) +isert_conn_create_fastreg_pool(struct isert_conn *isert_conn) { struct fast_reg_descriptor *fr_desc; struct isert_device *device = isert_conn->conn_device; @@ -549,8 +558,7 @@ isert_conn_create_fastreg_pool(struct isert_conn *isert_conn, u8 pi_support) } ret = isert_create_fr_desc(device->ib_device, - isert_conn->conn_pd, fr_desc, - pi_support); + isert_conn->conn_pd, fr_desc); if (ret) { pr_err("Failed to create fastreg descriptor err=%d\n", ret); @@ -581,7 +589,6 @@ isert_connect_request(struct rdma_cm_id *cma_id, struct rdma_cm_event *event) struct isert_device *device; struct ib_device *ib_dev = cma_id->device; int ret = 0; - u8 pi_support; spin_lock_bh(&np->np_thread_lock); if (!np->enabled) { @@ -681,15 +688,7 @@ isert_connect_request(struct rdma_cm_id *cma_id, struct rdma_cm_event *event) goto out_mr; } - pi_support = np->tpg_np->tpg->tpg_attrib.t10_pi; - if (pi_support && !device->pi_capable) { - pr_err("Protection information requested but not supported, " - "rejecting connect request\n"); - ret = rdma_reject(cma_id, NULL, 0); - goto out_mr; - } - - ret = isert_conn_setup_qp(isert_conn, cma_id, pi_support); + ret = isert_conn_setup_qp(isert_conn, cma_id); if (ret) goto out_conn_dev; @@ -1151,11 +1150,7 @@ isert_put_login_tx(struct iscsi_conn *conn, struct iscsi_login *login, if (login->login_complete) { if (!conn->sess->sess_ops->SessionType && isert_conn->conn_device->use_fastreg) { - /* Normal Session and fastreg is used */ - u8 pi_support = login->np->tpg_np->tpg->tpg_attrib.t10_pi; - - ret = isert_conn_create_fastreg_pool(isert_conn, - pi_support); + ret = isert_conn_create_fastreg_pool(isert_conn); if (ret) { pr_err("Conn: %p failed to create" " fastreg pool\n", isert_conn); @@ -2771,10 +2766,10 @@ isert_set_prot_checks(u8 prot_checks) } static int -isert_reg_sig_mr(struct isert_conn *isert_conn, struct se_cmd *se_cmd, - struct fast_reg_descriptor *fr_desc, - struct ib_sge *data_sge, struct ib_sge *prot_sge, - struct ib_sge *sig_sge) +isert_reg_sig_mr(struct isert_conn *isert_conn, + struct se_cmd *se_cmd, + struct isert_rdma_wr *rdma_wr, + struct fast_reg_descriptor *fr_desc) { struct ib_send_wr sig_wr, inv_wr; struct ib_send_wr *bad_wr, *wr = NULL; @@ -2804,13 +2799,13 @@ isert_reg_sig_mr(struct isert_conn *isert_conn, struct se_cmd *se_cmd, memset(&sig_wr, 0, sizeof(sig_wr)); sig_wr.opcode = IB_WR_REG_SIG_MR; sig_wr.wr_id = ISER_FASTREG_LI_WRID; - sig_wr.sg_list = data_sge; + sig_wr.sg_list = &rdma_wr->ib_sg[DATA]; sig_wr.num_sge = 1; sig_wr.wr.sig_handover.access_flags = IB_ACCESS_LOCAL_WRITE; sig_wr.wr.sig_handover.sig_attrs = &sig_attrs; sig_wr.wr.sig_handover.sig_mr = pi_ctx->sig_mr; if (se_cmd->t_prot_sg) - sig_wr.wr.sig_handover.prot = prot_sge; + sig_wr.wr.sig_handover.prot = &rdma_wr->ib_sg[PROT]; if (!wr) wr = &sig_wr; @@ -2824,24 +2819,83 @@ isert_reg_sig_mr(struct isert_conn *isert_conn, struct se_cmd *se_cmd, } fr_desc->ind &= ~ISERT_SIG_KEY_VALID; - sig_sge->lkey = pi_ctx->sig_mr->lkey; - sig_sge->addr = 0; - sig_sge->length = se_cmd->data_length; + rdma_wr->ib_sg[SIG].lkey = pi_ctx->sig_mr->lkey; + rdma_wr->ib_sg[SIG].addr = 0; + rdma_wr->ib_sg[SIG].length = se_cmd->data_length; if (se_cmd->prot_op != TARGET_PROT_DIN_STRIP && se_cmd->prot_op != TARGET_PROT_DOUT_INSERT) /* * We have protection guards on the wire * so we need to set a larget transfer */ - sig_sge->length += se_cmd->prot_length; + rdma_wr->ib_sg[SIG].length += se_cmd->prot_length; pr_debug("sig_sge: addr: 0x%llx length: %u lkey: %x\n", - sig_sge->addr, sig_sge->length, - sig_sge->lkey); + rdma_wr->ib_sg[SIG].addr, rdma_wr->ib_sg[SIG].length, + rdma_wr->ib_sg[SIG].lkey); err: return ret; } +static int +isert_handle_prot_cmd(struct isert_conn *isert_conn, + struct isert_cmd *isert_cmd, + struct isert_rdma_wr *wr) +{ + struct isert_device *device = isert_conn->conn_device; + struct se_cmd *se_cmd = &isert_cmd->iscsi_cmd->se_cmd; + int ret; + + if (!wr->fr_desc->pi_ctx) { + ret = isert_create_pi_ctx(wr->fr_desc, + device->ib_device, + isert_conn->conn_pd); + if (ret) { + pr_err("conn %p failed to allocate pi_ctx\n", + isert_conn); + return ret; + } + } + + if (se_cmd->t_prot_sg) { + ret = isert_map_data_buf(isert_conn, isert_cmd, + se_cmd->t_prot_sg, + se_cmd->t_prot_nents, + se_cmd->prot_length, + 0, wr->iser_ib_op, &wr->prot); + if (ret) { + pr_err("conn %p failed to map protection buffer\n", + isert_conn); + return ret; + } + + memset(&wr->ib_sg[PROT], 0, sizeof(wr->ib_sg[PROT])); + ret = isert_fast_reg_mr(isert_conn, wr->fr_desc, &wr->prot, + ISERT_PROT_KEY_VALID, &wr->ib_sg[PROT]); + if (ret) { + pr_err("conn %p failed to fast reg mr\n", + isert_conn); + goto unmap_prot_cmd; + } + } + + ret = isert_reg_sig_mr(isert_conn, se_cmd, wr, wr->fr_desc); + if (ret) { + pr_err("conn %p failed to fast reg mr\n", + isert_conn); + goto unmap_prot_cmd; + } + wr->fr_desc->ind |= ISERT_PROTECTED; + + return 0; + +unmap_prot_cmd: + if (se_cmd->t_prot_sg) + isert_unmap_data_buf(isert_conn, &wr->prot); + + return ret; +} + static int isert_reg_rdma(struct iscsi_conn *conn, struct iscsi_cmd *cmd, struct isert_rdma_wr *wr) @@ -2849,9 +2903,9 @@ isert_reg_rdma(struct iscsi_conn *conn, struct iscsi_cmd *cmd, struct se_cmd *se_cmd = &cmd->se_cmd; struct isert_cmd *isert_cmd = iscsit_priv_cmd(cmd); struct isert_conn *isert_conn = conn->context; - struct ib_sge data_sge; - struct ib_send_wr *send_wr; struct fast_reg_descriptor *fr_desc = NULL; + struct ib_send_wr *send_wr; + struct ib_sge *ib_sg; u32 offset; int ret = 0; unsigned long flags; @@ -2876,38 +2930,21 @@ isert_reg_rdma(struct iscsi_conn *conn, struct iscsi_cmd *cmd, } ret = isert_fast_reg_mr(isert_conn, fr_desc, &wr->data, - ISERT_DATA_KEY_VALID, &data_sge); + ISERT_DATA_KEY_VALID, &wr->ib_sg[DATA]); if (ret) goto unmap_cmd; if (se_cmd->prot_op != TARGET_PROT_NORMAL) { - struct ib_sge prot_sge, sig_sge; - - if (se_cmd->t_prot_sg) { - ret = isert_map_data_buf(isert_conn, isert_cmd, - se_cmd->t_prot_sg, - se_cmd->t_prot_nents, - se_cmd->prot_length, - 0, wr->iser_ib_op, &wr->prot); - if (ret) - goto unmap_cmd; - - ret = isert_fast_reg_mr(isert_conn, fr_desc, &wr->prot, - ISERT_PROT_KEY_VALID, &prot_sge); - if (ret) - goto unmap_prot_cmd; - } - - ret = isert_reg_sig_mr(isert_conn, se_cmd, fr_desc, - &data_sge, &prot_sge, &sig_sge); + ret = isert_handle_prot_cmd(isert_conn, isert_cmd, wr); if (ret) - goto unmap_prot_cmd; + goto unmap_cmd; - fr_desc->ind |= ISERT_PROTECTED; - memcpy(&wr->s_ib_sge, &sig_sge, sizeof(sig_sge)); - } else - memcpy(&wr->s_ib_sge, &data_sge, sizeof(data_sge)); + ib_sg = &wr->ib_sg[SIG]; + } else { + ib_sg = &wr->ib_sg[DATA]; + } + memcpy(&wr->s_ib_sge, ib_sg, sizeof(*ib_sg)); wr->ib_sge = &wr->s_ib_sge; wr->send_wr_num = 1; memset(&wr->s_send_wr, 0, sizeof(*send_wr)); @@ -2932,9 +2969,7 @@ isert_reg_rdma(struct iscsi_conn *conn, struct iscsi_cmd *cmd, } return 0; -unmap_prot_cmd: - if (se_cmd->t_prot_sg) - isert_unmap_data_buf(isert_conn, &wr->prot); + unmap_cmd: if (fr_desc) { spin_lock_irqsave(&isert_conn->conn_lock, flags); diff --git a/drivers/infiniband/ulp/isert/ib_isert.h b/drivers/infiniband/ulp/isert/ib_isert.h index 9372d4d4d14a..2e7868c5ad14 100644 --- a/drivers/infiniband/ulp/isert/ib_isert.h +++ b/drivers/infiniband/ulp/isert/ib_isert.h @@ -82,6 +82,12 @@ struct isert_data_buf { enum dma_data_direction dma_dir; }; +enum { + DATA = 0, + PROT = 1, + SIG = 2, +}; + struct isert_rdma_wr { struct list_head wr_list; struct isert_cmd *isert_cmd; @@ -91,6 +97,7 @@ struct isert_rdma_wr { int send_wr_num; struct ib_send_wr *send_wr; struct ib_send_wr s_send_wr; + struct ib_sge ib_sg[3]; struct isert_data_buf data; struct isert_data_buf prot; struct fast_reg_descriptor *fr_desc; -- cgit v1.2.3 From 302cc7c3ca14d21ccdffdebdb61c4fe028f2d5ad Mon Sep 17 00:00:00 2001 From: Sagi Grimberg Date: Tue, 2 Dec 2014 16:57:34 +0200 Subject: iser-target: Fix NULL dereference in SW mode DIF Fallback to software mode DIF if HCA does not support PI (without crashing obviously). It is still possible to run with backend protection and an unprotected frontend, so looking at the command prot_op is not enough. Check device PI capability on a per-IO basis (isert_prot_cmd inline static) to determine if we need to handle protection information. Trace: BUG: unable to handle kernel NULL pointer dereference at 0000000000000010 IP: [] isert_reg_sig_mr+0x351/0x3b0 [ib_isert] Call Trace: [] ? swiotlb_map_sg_attrs+0x7a/0x130 [] isert_reg_rdma+0x2fd/0x370 [ib_isert] [] ? idle_balance+0x6c/0x2c0 [] isert_put_datain+0x68/0x210 [ib_isert] [] lio_queue_data_in+0x2b/0x30 [iscsi_target_mod] [] target_complete_ok_work+0x21b/0x310 [target_core_mod] [] process_one_work+0x182/0x3b0 [] worker_thread+0x120/0x3c0 [] ? maybe_create_worker+0x190/0x190 [] kthread+0xce/0xf0 [] ? kthread_freezable_should_stop+0x70/0x70 [] ret_from_fork+0x7c/0xb0 [] ? kthread_freezable_should_stop+0x70/0x70 Reported-by: Slava Shwartsman Signed-off-by: Sagi Grimberg Cc: # v3.14+ Signed-off-by: Nicholas Bellinger --- drivers/infiniband/ulp/isert/ib_isert.c | 19 +++++++++++++------ 1 file changed, 13 insertions(+), 6 deletions(-) diff --git a/drivers/infiniband/ulp/isert/ib_isert.c b/drivers/infiniband/ulp/isert/ib_isert.c index b0e58f196d90..618c81576b05 100644 --- a/drivers/infiniband/ulp/isert/ib_isert.c +++ b/drivers/infiniband/ulp/isert/ib_isert.c @@ -61,6 +61,14 @@ static int isert_rdma_accept(struct isert_conn *isert_conn); struct rdma_cm_id *isert_setup_id(struct isert_np *isert_np); +static inline bool +isert_prot_cmd(struct isert_conn *conn, struct se_cmd *cmd) +{ + return (conn->conn_device->pi_capable && + cmd->prot_op != TARGET_PROT_NORMAL); +} + + static void isert_qp_event_callback(struct ib_event *e, void *context) { @@ -2919,8 +2927,7 @@ isert_reg_rdma(struct iscsi_conn *conn, struct iscsi_cmd *cmd, if (ret) return ret; - if (wr->data.dma_nents != 1 || - se_cmd->prot_op != TARGET_PROT_NORMAL) { + if (wr->data.dma_nents != 1 || isert_prot_cmd(isert_conn, se_cmd)) { spin_lock_irqsave(&isert_conn->conn_lock, flags); fr_desc = list_first_entry(&isert_conn->conn_fr_pool, struct fast_reg_descriptor, list); @@ -2934,7 +2941,7 @@ isert_reg_rdma(struct iscsi_conn *conn, struct iscsi_cmd *cmd, if (ret) goto unmap_cmd; - if (se_cmd->prot_op != TARGET_PROT_NORMAL) { + if (isert_prot_cmd(isert_conn, se_cmd)) { ret = isert_handle_prot_cmd(isert_conn, isert_cmd, wr); if (ret) goto unmap_cmd; @@ -2959,7 +2966,7 @@ isert_reg_rdma(struct iscsi_conn *conn, struct iscsi_cmd *cmd, send_wr->opcode = IB_WR_RDMA_WRITE; send_wr->wr.rdma.remote_addr = isert_cmd->read_va; send_wr->wr.rdma.rkey = isert_cmd->read_stag; - send_wr->send_flags = se_cmd->prot_op == TARGET_PROT_NORMAL ? + send_wr->send_flags = !isert_prot_cmd(isert_conn, se_cmd) ? 0 : IB_SEND_SIGNALED; } else { send_wr->opcode = IB_WR_RDMA_READ; @@ -3001,7 +3008,7 @@ isert_put_datain(struct iscsi_conn *conn, struct iscsi_cmd *cmd) return rc; } - if (se_cmd->prot_op == TARGET_PROT_NORMAL) { + if (!isert_prot_cmd(isert_conn, se_cmd)) { /* * Build isert_conn->tx_desc for iSCSI response PDU and attach */ @@ -3024,7 +3031,7 @@ isert_put_datain(struct iscsi_conn *conn, struct iscsi_cmd *cmd) atomic_sub(wr->send_wr_num, &isert_conn->post_send_buf_count); } - if (se_cmd->prot_op == TARGET_PROT_NORMAL) + if (!isert_prot_cmd(isert_conn, se_cmd)) pr_debug("Cmd: %p posted RDMA_WRITE + Response for iSER Data " "READ\n", isert_cmd); else -- cgit v1.2.3 From 23a548ee656c8ba6da8cb2412070edcd62e2ac5d Mon Sep 17 00:00:00 2001 From: Sagi Grimberg Date: Tue, 2 Dec 2014 16:57:35 +0200 Subject: iscsi,iser-target: Expose supported protection ops according to t10_pi iSER will report supported protection operations based on the tpg attribute t10_pi settings and HCA PI offload capabilities. If the HCA does not support PI offload or tpg attribute t10_pi is not set, we fall to SW PI mode. In order to do that, we move iscsit_get_sup_prot_ops after connection tpg assignment. Signed-off-by: Sagi Grimberg Cc: # v3.14+ Signed-off-by: Nicholas Bellinger --- drivers/infiniband/ulp/isert/ib_isert.c | 14 +++++++++++--- drivers/infiniband/ulp/isert/ib_isert.h | 1 + drivers/target/iscsi/iscsi_target_login.c | 7 ++++--- 3 files changed, 16 insertions(+), 6 deletions(-) diff --git a/drivers/infiniband/ulp/isert/ib_isert.c b/drivers/infiniband/ulp/isert/ib_isert.c index 618c81576b05..a6daabc70425 100644 --- a/drivers/infiniband/ulp/isert/ib_isert.c +++ b/drivers/infiniband/ulp/isert/ib_isert.c @@ -64,7 +64,7 @@ struct rdma_cm_id *isert_setup_id(struct isert_np *isert_np); static inline bool isert_prot_cmd(struct isert_conn *conn, struct se_cmd *cmd) { - return (conn->conn_device->pi_capable && + return (conn->pi_support && cmd->prot_op != TARGET_PROT_NORMAL); } @@ -2324,8 +2324,16 @@ isert_get_sup_prot_ops(struct iscsi_conn *conn) struct isert_conn *isert_conn = (struct isert_conn *)conn->context; struct isert_device *device = isert_conn->conn_device; - if (device->pi_capable) - return TARGET_PROT_ALL; + if (conn->tpg->tpg_attrib.t10_pi) { + if (device->pi_capable) { + pr_info("conn %p PI offload enabled\n", isert_conn); + isert_conn->pi_support = true; + return TARGET_PROT_ALL; + } + } + + pr_info("conn %p PI offload disabled\n", isert_conn); + isert_conn->pi_support = false; return TARGET_PROT_NORMAL; } diff --git a/drivers/infiniband/ulp/isert/ib_isert.h b/drivers/infiniband/ulp/isert/ib_isert.h index 2e7868c5ad14..141905f446dd 100644 --- a/drivers/infiniband/ulp/isert/ib_isert.h +++ b/drivers/infiniband/ulp/isert/ib_isert.h @@ -128,6 +128,7 @@ struct isert_conn { atomic_t post_send_buf_count; u32 responder_resources; u32 initiator_depth; + bool pi_support; u32 max_sge; char *login_buf; char *login_req_buf; diff --git a/drivers/target/iscsi/iscsi_target_login.c b/drivers/target/iscsi/iscsi_target_login.c index 05ad5c7128f2..18e2601527df 100644 --- a/drivers/target/iscsi/iscsi_target_login.c +++ b/drivers/target/iscsi/iscsi_target_login.c @@ -281,7 +281,6 @@ static int iscsi_login_zero_tsih_s1( { struct iscsi_session *sess = NULL; struct iscsi_login_req *pdu = (struct iscsi_login_req *)buf; - enum target_prot_op sup_pro_ops; int ret; sess = kzalloc(sizeof(struct iscsi_session), GFP_KERNEL); @@ -343,9 +342,8 @@ static int iscsi_login_zero_tsih_s1( kfree(sess); return -ENOMEM; } - sup_pro_ops = conn->conn_transport->iscsit_get_sup_prot_ops(conn); - sess->se_sess = transport_init_session(sup_pro_ops); + sess->se_sess = transport_init_session(TARGET_PROT_NORMAL); if (IS_ERR(sess->se_sess)) { iscsit_tx_login_rsp(conn, ISCSI_STATUS_CLS_TARGET_ERR, ISCSI_LOGIN_STATUS_NO_RESOURCES); @@ -1367,6 +1365,9 @@ static int __iscsi_target_login_thread(struct iscsi_np *np) } login->zero_tsih = zero_tsih; + conn->sess->se_sess->sup_prot_ops = + conn->conn_transport->iscsit_get_sup_prot_ops(conn); + tpg = conn->tpg; if (!tpg) { pr_err("Unable to locate struct iscsi_conn->tpg\n"); -- cgit v1.2.3 From 268e6811f5729fb9ce7415586bd992b504fe95a2 Mon Sep 17 00:00:00 2001 From: Sagi Grimberg Date: Tue, 2 Dec 2014 16:57:36 +0200 Subject: iser-target: Work-around live target stack shutdown resource cleanup Currently we have no way to tell that the target stack is in shutdown sequence. In case we have open connections, the initiator immediately attempts to reconnect in a DDOS attack style, so we may end up terminating the iser enabled network portal while it's np_accept_list still have pending connections. The workaround is simply release all the connections in the list. A proper fix will be to start shutdown sequence by shutting the network portal to avoid initiator immediate reconnect attempts. But the temporary work around seems to work at this point, so I think we can do this for now... Reported-by: Slava Shwartsman Signed-off-by: Sagi Grimberg Signed-off-by: Nicholas Bellinger --- drivers/infiniband/ulp/isert/ib_isert.c | 21 +++++++++++++++++++++ 1 file changed, 21 insertions(+) diff --git a/drivers/infiniband/ulp/isert/ib_isert.c b/drivers/infiniband/ulp/isert/ib_isert.c index a6daabc70425..b88686e96a54 100644 --- a/drivers/infiniband/ulp/isert/ib_isert.c +++ b/drivers/infiniband/ulp/isert/ib_isert.c @@ -3369,10 +3369,31 @@ static void isert_free_np(struct iscsi_np *np) { struct isert_np *isert_np = (struct isert_np *)np->np_context; + struct isert_conn *isert_conn, *n; if (isert_np->np_cm_id) rdma_destroy_id(isert_np->np_cm_id); + /* + * FIXME: At this point we don't have a good way to insure + * that at this point we don't have hanging connections that + * completed RDMA establishment but didn't start iscsi login + * process. So work-around this by cleaning up what ever piled + * up in np_accept_list. + */ + mutex_lock(&isert_np->np_accept_mutex); + if (!list_empty(&isert_np->np_accept_list)) { + pr_info("Still have isert connections, cleaning up...\n"); + list_for_each_entry_safe(isert_conn, n, + &isert_np->np_accept_list, + conn_accept_node) { + pr_info("cleaning isert_conn %p state (%d)\n", + isert_conn, isert_conn->state); + isert_connect_release(isert_conn); + } + } + mutex_unlock(&isert_np->np_accept_mutex); + np->np_context = NULL; kfree(isert_np); } -- cgit v1.2.3 From 68a86dee8a32358ffd9dfa6d2acde4f71a572285 Mon Sep 17 00:00:00 2001 From: Sagi Grimberg Date: Tue, 2 Dec 2014 16:57:37 +0200 Subject: iser-target: Remove interrupt coalescing It is disabled at the moment, we will get that back in once the target is more stable. This reverts commit 95b60f0 "Add support for completion interrupt coalescing" Signed-off-by: Sagi Grimberg Signed-off-by: Nicholas Bellinger --- drivers/infiniband/ulp/isert/ib_isert.c | 120 +++----------------------------- drivers/infiniband/ulp/isert/ib_isert.h | 6 -- 2 files changed, 10 insertions(+), 116 deletions(-) diff --git a/drivers/infiniband/ulp/isert/ib_isert.c b/drivers/infiniband/ulp/isert/ib_isert.c index b88686e96a54..992e452c9570 100644 --- a/drivers/infiniband/ulp/isert/ib_isert.c +++ b/drivers/infiniband/ulp/isert/ib_isert.c @@ -22,7 +22,6 @@ #include #include #include -#include #include #include #include @@ -1063,7 +1062,7 @@ isert_init_tx_hdrs(struct isert_conn *isert_conn, static void isert_init_send_wr(struct isert_conn *isert_conn, struct isert_cmd *isert_cmd, - struct ib_send_wr *send_wr, bool coalesce) + struct ib_send_wr *send_wr) { struct iser_tx_desc *tx_desc = &isert_cmd->tx_desc; @@ -1072,22 +1071,6 @@ isert_init_send_wr(struct isert_conn *isert_conn, struct isert_cmd *isert_cmd, send_wr->opcode = IB_WR_SEND; send_wr->sg_list = &tx_desc->tx_sg[0]; send_wr->num_sge = isert_cmd->tx_desc.num_sge; - /* - * Coalesce send completion interrupts by only setting IB_SEND_SIGNALED - * bit for every ISERT_COMP_BATCH_COUNT number of ib_post_send() calls. - */ - mutex_lock(&isert_conn->conn_mutex); - if (coalesce && isert_conn->state == ISER_CONN_FULL_FEATURE && - ++isert_conn->conn_comp_batch < ISERT_COMP_BATCH_COUNT) { - tx_desc->llnode_active = true; - llist_add(&tx_desc->comp_llnode, &isert_conn->conn_comp_llist); - mutex_unlock(&isert_conn->conn_mutex); - return; - } - isert_conn->conn_comp_batch = 0; - tx_desc->comp_llnode_batch = llist_del_all(&isert_conn->conn_comp_llist); - mutex_unlock(&isert_conn->conn_mutex); - send_wr->send_flags = IB_SEND_SIGNALED; } @@ -1988,8 +1971,8 @@ isert_response_completion(struct iser_tx_desc *tx_desc, } static void -__isert_send_completion(struct iser_tx_desc *tx_desc, - struct isert_conn *isert_conn) +isert_send_completion(struct iser_tx_desc *tx_desc, + struct isert_conn *isert_conn) { struct ib_device *ib_dev = isert_conn->conn_cm_id->device; struct isert_cmd *isert_cmd = tx_desc->isert_cmd; @@ -2030,88 +2013,11 @@ __isert_send_completion(struct iser_tx_desc *tx_desc, } } -static void -isert_send_completion(struct iser_tx_desc *tx_desc, - struct isert_conn *isert_conn) -{ - struct llist_node *llnode = tx_desc->comp_llnode_batch; - struct iser_tx_desc *t; - /* - * Drain coalesced completion llist starting from comp_llnode_batch - * setup in isert_init_send_wr(), and then complete trailing tx_desc. - */ - while (llnode) { - t = llist_entry(llnode, struct iser_tx_desc, comp_llnode); - llnode = llist_next(llnode); - __isert_send_completion(t, isert_conn); - } - __isert_send_completion(tx_desc, isert_conn); -} - -static void -isert_cq_drain_comp_llist(struct isert_conn *isert_conn, struct ib_device *ib_dev) -{ - struct llist_node *llnode; - struct isert_rdma_wr *wr; - struct iser_tx_desc *t; - - mutex_lock(&isert_conn->conn_mutex); - llnode = llist_del_all(&isert_conn->conn_comp_llist); - isert_conn->conn_comp_batch = 0; - mutex_unlock(&isert_conn->conn_mutex); - - while (llnode) { - t = llist_entry(llnode, struct iser_tx_desc, comp_llnode); - llnode = llist_next(llnode); - wr = &t->isert_cmd->rdma_wr; - - /** - * If send_wr_num is 0 this means that we got - * RDMA completion and we cleared it and we should - * simply decrement the response post. else the - * response is incorporated in send_wr_num, just - * sub it. - **/ - if (wr->send_wr_num) - atomic_sub(wr->send_wr_num, - &isert_conn->post_send_buf_count); - else - atomic_dec(&isert_conn->post_send_buf_count); - - isert_completion_put(t, t->isert_cmd, ib_dev, true); - } -} - static void isert_cq_tx_comp_err(struct iser_tx_desc *tx_desc, struct isert_conn *isert_conn) { struct ib_device *ib_dev = isert_conn->conn_cm_id->device; struct isert_cmd *isert_cmd = tx_desc->isert_cmd; - struct llist_node *llnode = tx_desc->comp_llnode_batch; - struct isert_rdma_wr *wr; - struct iser_tx_desc *t; - - while (llnode) { - t = llist_entry(llnode, struct iser_tx_desc, comp_llnode); - llnode = llist_next(llnode); - wr = &t->isert_cmd->rdma_wr; - - /** - * If send_wr_num is 0 this means that we got - * RDMA completion and we cleared it and we should - * simply decrement the response post. else the - * response is incorporated in send_wr_num, just - * sub it. - **/ - if (wr->send_wr_num) - atomic_sub(wr->send_wr_num, - &isert_conn->post_send_buf_count); - else - atomic_dec(&isert_conn->post_send_buf_count); - - isert_completion_put(t, t->isert_cmd, ib_dev, true); - } - tx_desc->comp_llnode_batch = NULL; if (!isert_cmd) isert_unmap_tx_desc(tx_desc, ib_dev); @@ -2122,14 +2028,11 @@ isert_cq_tx_comp_err(struct iser_tx_desc *tx_desc, struct isert_conn *isert_conn static void isert_cq_rx_comp_err(struct isert_conn *isert_conn) { - struct ib_device *ib_dev = isert_conn->conn_cm_id->device; struct iscsi_conn *conn = isert_conn->conn; if (isert_conn->post_recv_buf_count) return; - isert_cq_drain_comp_llist(isert_conn, ib_dev); - if (conn->sess) { target_sess_cmd_list_set_waiting(conn->sess->se_sess); target_wait_for_sess_cmds(conn->sess->se_sess); @@ -2171,9 +2074,6 @@ isert_cq_tx_work(struct work_struct *work) pr_debug("TX wc.vendor_err: 0x%08x\n", wc.vendor_err); if (wc.wr_id != ISER_FASTREG_LI_WRID) { - if (tx_desc->llnode_active) - continue; - atomic_dec(&isert_conn->post_send_buf_count); isert_cq_tx_comp_err(tx_desc, isert_conn); } @@ -2293,7 +2193,7 @@ isert_put_response(struct iscsi_conn *conn, struct iscsi_cmd *cmd) isert_cmd->tx_desc.num_sge = 2; } - isert_init_send_wr(isert_conn, isert_cmd, send_wr, false); + isert_init_send_wr(isert_conn, isert_cmd, send_wr); pr_debug("Posting SCSI Response IB_WR_SEND >>>>>>>>>>>>>>>>>>>>>>\n"); @@ -2351,7 +2251,7 @@ isert_put_nopin(struct iscsi_cmd *cmd, struct iscsi_conn *conn, &isert_cmd->tx_desc.iscsi_header, nopout_response); isert_init_tx_hdrs(isert_conn, &isert_cmd->tx_desc); - isert_init_send_wr(isert_conn, isert_cmd, send_wr, false); + isert_init_send_wr(isert_conn, isert_cmd, send_wr); pr_debug("Posting NOPIN Response IB_WR_SEND >>>>>>>>>>>>>>>>>>>>>>\n"); @@ -2369,7 +2269,7 @@ isert_put_logout_rsp(struct iscsi_cmd *cmd, struct iscsi_conn *conn) iscsit_build_logout_rsp(cmd, conn, (struct iscsi_logout_rsp *) &isert_cmd->tx_desc.iscsi_header); isert_init_tx_hdrs(isert_conn, &isert_cmd->tx_desc); - isert_init_send_wr(isert_conn, isert_cmd, send_wr, false); + isert_init_send_wr(isert_conn, isert_cmd, send_wr); pr_debug("Posting Logout Response IB_WR_SEND >>>>>>>>>>>>>>>>>>>>>>\n"); @@ -2387,7 +2287,7 @@ isert_put_tm_rsp(struct iscsi_cmd *cmd, struct iscsi_conn *conn) iscsit_build_task_mgt_rsp(cmd, conn, (struct iscsi_tm_rsp *) &isert_cmd->tx_desc.iscsi_header); isert_init_tx_hdrs(isert_conn, &isert_cmd->tx_desc); - isert_init_send_wr(isert_conn, isert_cmd, send_wr, false); + isert_init_send_wr(isert_conn, isert_cmd, send_wr); pr_debug("Posting Task Management Response IB_WR_SEND >>>>>>>>>>>>>>>>>>>>>>\n"); @@ -2419,7 +2319,7 @@ isert_put_reject(struct iscsi_cmd *cmd, struct iscsi_conn *conn) tx_dsg->lkey = isert_conn->conn_mr->lkey; isert_cmd->tx_desc.num_sge = 2; - isert_init_send_wr(isert_conn, isert_cmd, send_wr, false); + isert_init_send_wr(isert_conn, isert_cmd, send_wr); pr_debug("Posting Reject IB_WR_SEND >>>>>>>>>>>>>>>>>>>>>>\n"); @@ -2459,7 +2359,7 @@ isert_put_text_rsp(struct iscsi_cmd *cmd, struct iscsi_conn *conn) tx_dsg->lkey = isert_conn->conn_mr->lkey; isert_cmd->tx_desc.num_sge = 2; } - isert_init_send_wr(isert_conn, isert_cmd, send_wr, false); + isert_init_send_wr(isert_conn, isert_cmd, send_wr); pr_debug("Posting Text Response IB_WR_SEND >>>>>>>>>>>>>>>>>>>>>>\n"); @@ -3026,7 +2926,7 @@ isert_put_datain(struct iscsi_conn *conn, struct iscsi_cmd *cmd) &isert_cmd->tx_desc.iscsi_header); isert_init_tx_hdrs(isert_conn, &isert_cmd->tx_desc); isert_init_send_wr(isert_conn, isert_cmd, - &isert_cmd->tx_desc.send_wr, false); + &isert_cmd->tx_desc.send_wr); isert_cmd->rdma_wr.s_send_wr.next = &isert_cmd->tx_desc.send_wr; wr->send_wr_num += 1; } diff --git a/drivers/infiniband/ulp/isert/ib_isert.h b/drivers/infiniband/ulp/isert/ib_isert.h index 141905f446dd..b3735a07ca47 100644 --- a/drivers/infiniband/ulp/isert/ib_isert.h +++ b/drivers/infiniband/ulp/isert/ib_isert.h @@ -45,9 +45,6 @@ struct iser_tx_desc { struct ib_sge tx_sg[2]; int num_sge; struct isert_cmd *isert_cmd; - struct llist_node *comp_llnode_batch; - struct llist_node comp_llnode; - bool llnode_active; struct ib_send_wr send_wr; } __packed; @@ -158,9 +155,6 @@ struct isert_conn { /* lock to protect fastreg pool */ spinlock_t conn_lock; struct work_struct release_work; -#define ISERT_COMP_BATCH_COUNT 8 - int conn_comp_batch; - struct llist_head conn_comp_llist; }; #define ISERT_MAX_CQ 64 -- cgit v1.2.3 From df43debdfd926fd1f13d5b0902d330d3e04ca05f Mon Sep 17 00:00:00 2001 From: Sagi Grimberg Date: Tue, 2 Dec 2014 16:57:38 +0200 Subject: iser-target: Unite error completion handler for RX and TX As a pre-step to a single CQ, we unite the error completion handlers to a single handler. This patch does not change any functionality. Signed-off-by: Sagi Grimberg Signed-off-by: Nicholas Bellinger --- drivers/infiniband/ulp/isert/ib_isert.c | 61 +++++++++++++++------------------ 1 file changed, 28 insertions(+), 33 deletions(-) diff --git a/drivers/infiniband/ulp/isert/ib_isert.c b/drivers/infiniband/ulp/isert/ib_isert.c index 992e452c9570..bbfdd62f9724 100644 --- a/drivers/infiniband/ulp/isert/ib_isert.c +++ b/drivers/infiniband/ulp/isert/ib_isert.c @@ -2014,40 +2014,38 @@ isert_send_completion(struct iser_tx_desc *tx_desc, } static void -isert_cq_tx_comp_err(struct iser_tx_desc *tx_desc, struct isert_conn *isert_conn) +isert_cq_comp_err(void *desc, struct isert_conn *isert_conn, bool tx) { - struct ib_device *ib_dev = isert_conn->conn_cm_id->device; - struct isert_cmd *isert_cmd = tx_desc->isert_cmd; - - if (!isert_cmd) - isert_unmap_tx_desc(tx_desc, ib_dev); - else - isert_completion_put(tx_desc, isert_cmd, ib_dev, true); -} - -static void -isert_cq_rx_comp_err(struct isert_conn *isert_conn) -{ - struct iscsi_conn *conn = isert_conn->conn; - - if (isert_conn->post_recv_buf_count) - return; + if (tx) { + struct ib_device *ib_dev = isert_conn->conn_cm_id->device; + struct isert_cmd *isert_cmd; - if (conn->sess) { - target_sess_cmd_list_set_waiting(conn->sess->se_sess); - target_wait_for_sess_cmds(conn->sess->se_sess); + isert_cmd = ((struct iser_tx_desc *)desc)->isert_cmd; + if (!isert_cmd) + isert_unmap_tx_desc(desc, ib_dev); + else + isert_completion_put(desc, isert_cmd, ib_dev, true); + atomic_dec(&isert_conn->post_send_buf_count); + } else { + isert_conn->post_recv_buf_count--; } - while (atomic_read(&isert_conn->post_send_buf_count)) - msleep(3000); + if (isert_conn->post_recv_buf_count == 0 && + atomic_read(&isert_conn->post_send_buf_count) == 0) { + struct iscsi_conn *conn = isert_conn->conn; - mutex_lock(&isert_conn->conn_mutex); - isert_conn_terminate(isert_conn); - mutex_unlock(&isert_conn->conn_mutex); + if (conn->sess) { + target_sess_cmd_list_set_waiting(conn->sess->se_sess); + target_wait_for_sess_cmds(conn->sess->se_sess); + } - iscsit_cause_connection_reinstatement(isert_conn->conn, 0); + mutex_lock(&isert_conn->conn_mutex); + isert_conn_terminate(isert_conn); + mutex_unlock(&isert_conn->conn_mutex); - complete(&isert_conn->conn_wait_comp_err); + iscsit_cause_connection_reinstatement(isert_conn->conn, 0); + complete(&isert_conn->conn_wait_comp_err); + } } static void @@ -2073,10 +2071,8 @@ isert_cq_tx_work(struct work_struct *work) pr_debug("TX wc.status: 0x%08x\n", wc.status); pr_debug("TX wc.vendor_err: 0x%08x\n", wc.vendor_err); - if (wc.wr_id != ISER_FASTREG_LI_WRID) { - atomic_dec(&isert_conn->post_send_buf_count); - isert_cq_tx_comp_err(tx_desc, isert_conn); - } + if (wc.wr_id != ISER_FASTREG_LI_WRID) + isert_cq_comp_err(tx_desc, isert_conn, true); } } @@ -2118,8 +2114,7 @@ isert_cq_rx_work(struct work_struct *work) pr_debug("RX wc.vendor_err: 0x%08x\n", wc.vendor_err); } - isert_conn->post_recv_buf_count--; - isert_cq_rx_comp_err(isert_conn); + isert_cq_comp_err(rx_desc, isert_conn, false); } } -- cgit v1.2.3 From b0a191e703dfc6a1496a668a2650f8fc9360936d Mon Sep 17 00:00:00 2001 From: Sagi Grimberg Date: Tue, 2 Dec 2014 16:57:39 +0200 Subject: iser-target: Cast wr_id with uintptr_t instead of unsinged long Nit, uintptr_t is designed for pointer casting, use it. Signed-off-by: Sagi Grimberg Signed-off-by: Nicholas Bellinger --- drivers/infiniband/ulp/isert/ib_isert.c | 16 ++++++++-------- 1 file changed, 8 insertions(+), 8 deletions(-) diff --git a/drivers/infiniband/ulp/isert/ib_isert.c b/drivers/infiniband/ulp/isert/ib_isert.c index bbfdd62f9724..c4b9c6d3662b 100644 --- a/drivers/infiniband/ulp/isert/ib_isert.c +++ b/drivers/infiniband/ulp/isert/ib_isert.c @@ -961,7 +961,7 @@ isert_post_recv(struct isert_conn *isert_conn, u32 count) for (rx_wr = isert_conn->conn_rx_wr, i = 0; i < count; i++, rx_wr++) { rx_desc = &isert_conn->conn_rx_descs[rx_head]; - rx_wr->wr_id = (unsigned long)rx_desc; + rx_wr->wr_id = (uintptr_t)rx_desc; rx_wr->sg_list = &rx_desc->rx_sg; rx_wr->num_sge = 1; rx_wr->next = rx_wr + 1; @@ -995,7 +995,7 @@ isert_post_send(struct isert_conn *isert_conn, struct iser_tx_desc *tx_desc) ISER_HEADERS_LEN, DMA_TO_DEVICE); send_wr.next = NULL; - send_wr.wr_id = (unsigned long)tx_desc; + send_wr.wr_id = (uintptr_t)tx_desc; send_wr.sg_list = tx_desc->tx_sg; send_wr.num_sge = tx_desc->num_sge; send_wr.opcode = IB_WR_SEND; @@ -1067,7 +1067,7 @@ isert_init_send_wr(struct isert_conn *isert_conn, struct isert_cmd *isert_cmd, struct iser_tx_desc *tx_desc = &isert_cmd->tx_desc; isert_cmd->rdma_wr.iser_ib_op = ISER_IB_SEND; - send_wr->wr_id = (unsigned long)&isert_cmd->tx_desc; + send_wr->wr_id = (uintptr_t)&isert_cmd->tx_desc; send_wr->opcode = IB_WR_SEND; send_wr->sg_list = &tx_desc->tx_sg[0]; send_wr->num_sge = isert_cmd->tx_desc.num_sge; @@ -1090,7 +1090,7 @@ isert_rdma_post_recvl(struct isert_conn *isert_conn) sge.addr, sge.length, sge.lkey); memset(&rx_wr, 0, sizeof(struct ib_recv_wr)); - rx_wr.wr_id = (unsigned long)isert_conn->login_req_buf; + rx_wr.wr_id = (uintptr_t)isert_conn->login_req_buf; rx_wr.sg_list = &sge; rx_wr.num_sge = 1; @@ -2061,7 +2061,7 @@ isert_cq_tx_work(struct work_struct *work) struct ib_wc wc; while (ib_poll_cq(tx_cq, 1, &wc) == 1) { - tx_desc = (struct iser_tx_desc *)(unsigned long)wc.wr_id; + tx_desc = (struct iser_tx_desc *)(uintptr_t)wc.wr_id; isert_conn = wc.qp->qp_context; if (wc.status == IB_WC_SUCCESS) { @@ -2101,7 +2101,7 @@ isert_cq_rx_work(struct work_struct *work) unsigned long xfer_len; while (ib_poll_cq(rx_cq, 1, &wc) == 1) { - rx_desc = (struct iser_rx_desc *)(unsigned long)wc.wr_id; + rx_desc = (struct iser_rx_desc *)(uintptr_t)wc.wr_id; isert_conn = wc.qp->qp_context; if (wc.status == IB_WC_SUCCESS) { @@ -2379,7 +2379,7 @@ isert_build_rdma_wr(struct isert_conn *isert_conn, struct isert_cmd *isert_cmd, send_wr->sg_list = ib_sge; send_wr->num_sge = sg_nents; - send_wr->wr_id = (unsigned long)&isert_cmd->tx_desc; + send_wr->wr_id = (uintptr_t)&isert_cmd->tx_desc; /* * Perform mapping of TCM scatterlist memory ib_sge dma_addr. */ @@ -2864,7 +2864,7 @@ isert_reg_rdma(struct iscsi_conn *conn, struct iscsi_cmd *cmd, send_wr = &isert_cmd->rdma_wr.s_send_wr; send_wr->sg_list = &wr->s_ib_sge; send_wr->num_sge = 1; - send_wr->wr_id = (unsigned long)&isert_cmd->tx_desc; + send_wr->wr_id = (uintptr_t)&isert_cmd->tx_desc; if (wr->iser_ib_op == ISER_IB_RDMA_WRITE) { send_wr->opcode = IB_WR_RDMA_WRITE; send_wr->wr.rdma.remote_addr = isert_cmd->read_va; -- cgit v1.2.3 From 4a295bae7e72d870354d337fe42360b0e4441e57 Mon Sep 17 00:00:00 2001 From: Sagi Grimberg Date: Tue, 2 Dec 2014 16:57:40 +0200 Subject: iser-target: Centralize completion elements to a context A pre-step before going to a single CQ. Also this makes the code a little more simple to read. Signed-off-by: Sagi Grimberg Signed-off-by: Nicholas Bellinger --- drivers/infiniband/ulp/isert/ib_isert.c | 192 ++++++++++++++++---------------- drivers/infiniband/ulp/isert/ib_isert.h | 30 +++-- 2 files changed, 114 insertions(+), 108 deletions(-) diff --git a/drivers/infiniband/ulp/isert/ib_isert.c b/drivers/infiniband/ulp/isert/ib_isert.c index c4b9c6d3662b..d4a208381af8 100644 --- a/drivers/infiniband/ulp/isert/ib_isert.c +++ b/drivers/infiniband/ulp/isert/ib_isert.c @@ -107,22 +107,25 @@ isert_conn_setup_qp(struct isert_conn *isert_conn, struct rdma_cm_id *cma_id) { struct isert_device *device = isert_conn->conn_device; struct ib_qp_init_attr attr; - int ret, index, min_index = 0; + struct isert_comp *comp; + int ret, i, min = 0; mutex_lock(&device_list_mutex); - for (index = 0; index < device->cqs_used; index++) - if (device->cq_active_qps[index] < - device->cq_active_qps[min_index]) - min_index = index; - device->cq_active_qps[min_index]++; - pr_debug("isert_conn_setup_qp: Using min_index: %d\n", min_index); + for (i = 0; i < device->comps_used; i++) + if (device->comps[i].active_qps < + device->comps[min].active_qps) + min = i; + comp = &device->comps[min]; + comp->active_qps++; + pr_info("conn %p, using comp %p min_index: %d\n", + isert_conn, comp, min); mutex_unlock(&device_list_mutex); memset(&attr, 0, sizeof(struct ib_qp_init_attr)); attr.event_handler = isert_qp_event_callback; attr.qp_context = isert_conn; - attr.send_cq = device->dev_tx_cq[min_index]; - attr.recv_cq = device->dev_rx_cq[min_index]; + attr.send_cq = comp->tx_cq; + attr.recv_cq = comp->rx_cq; attr.cap.max_send_wr = ISERT_QP_MAX_REQ_DTOS; attr.cap.max_recv_wr = ISERT_QP_MAX_RECV_DTOS; /* @@ -157,7 +160,7 @@ isert_conn_setup_qp(struct isert_conn *isert_conn, struct rdma_cm_id *cma_id) return 0; err: mutex_lock(&device_list_mutex); - device->cq_active_qps[min_index]--; + comp->active_qps--; mutex_unlock(&device_list_mutex); return ret; @@ -243,9 +246,8 @@ static int isert_create_device_ib_res(struct isert_device *device) { struct ib_device *ib_dev = device->ib_device; - struct isert_cq_desc *cq_desc; struct ib_device_attr *dev_attr; - int ret = 0, i, j; + int ret = 0, i; int max_rx_cqe, max_tx_cqe; dev_attr = &device->dev_attr; @@ -272,55 +274,54 @@ isert_create_device_ib_res(struct isert_device *device) device->pi_capable = dev_attr->device_cap_flags & IB_DEVICE_SIGNATURE_HANDOVER ? true : false; - device->cqs_used = min_t(int, num_online_cpus(), - device->ib_device->num_comp_vectors); - device->cqs_used = min(ISERT_MAX_CQ, device->cqs_used); - pr_debug("Using %d CQs, device %s supports %d vectors support " - "Fast registration %d pi_capable %d\n", - device->cqs_used, device->ib_device->name, - device->ib_device->num_comp_vectors, device->use_fastreg, - device->pi_capable); - device->cq_desc = kzalloc(sizeof(struct isert_cq_desc) * - device->cqs_used, GFP_KERNEL); - if (!device->cq_desc) { - pr_err("Unable to allocate device->cq_desc\n"); + device->comps_used = min(ISERT_MAX_CQ, min_t(int, num_online_cpus(), + device->ib_device->num_comp_vectors)); + pr_info("Using %d CQs, %s supports %d vectors support " + "Fast registration %d pi_capable %d\n", + device->comps_used, device->ib_device->name, + device->ib_device->num_comp_vectors, device->use_fastreg, + device->pi_capable); + + device->comps = kcalloc(device->comps_used, sizeof(struct isert_comp), + GFP_KERNEL); + if (!device->comps) { + pr_err("Unable to allocate completion contexts\n"); return -ENOMEM; } - cq_desc = device->cq_desc; - - for (i = 0; i < device->cqs_used; i++) { - cq_desc[i].device = device; - cq_desc[i].cq_index = i; - - INIT_WORK(&cq_desc[i].cq_rx_work, isert_cq_rx_work); - device->dev_rx_cq[i] = ib_create_cq(device->ib_device, - isert_cq_rx_callback, - isert_cq_event_callback, - (void *)&cq_desc[i], - max_rx_cqe, i); - if (IS_ERR(device->dev_rx_cq[i])) { - ret = PTR_ERR(device->dev_rx_cq[i]); - device->dev_rx_cq[i] = NULL; + + for (i = 0; i < device->comps_used; i++) { + struct isert_comp *comp = &device->comps[i]; + + comp->device = device; + INIT_WORK(&comp->rx_work, isert_cq_rx_work); + comp->rx_cq = ib_create_cq(device->ib_device, + isert_cq_rx_callback, + isert_cq_event_callback, + (void *)comp, + max_rx_cqe, i); + if (IS_ERR(comp->rx_cq)) { + ret = PTR_ERR(comp->rx_cq); + comp->rx_cq = NULL; goto out_cq; } - INIT_WORK(&cq_desc[i].cq_tx_work, isert_cq_tx_work); - device->dev_tx_cq[i] = ib_create_cq(device->ib_device, - isert_cq_tx_callback, - isert_cq_event_callback, - (void *)&cq_desc[i], - max_tx_cqe, i); - if (IS_ERR(device->dev_tx_cq[i])) { - ret = PTR_ERR(device->dev_tx_cq[i]); - device->dev_tx_cq[i] = NULL; + INIT_WORK(&comp->tx_work, isert_cq_tx_work); + comp->tx_cq = ib_create_cq(device->ib_device, + isert_cq_tx_callback, + isert_cq_event_callback, + (void *)comp, + max_tx_cqe, i); + if (IS_ERR(comp->tx_cq)) { + ret = PTR_ERR(comp->tx_cq); + comp->tx_cq = NULL; goto out_cq; } - ret = ib_req_notify_cq(device->dev_rx_cq[i], IB_CQ_NEXT_COMP); + ret = ib_req_notify_cq(comp->rx_cq, IB_CQ_NEXT_COMP); if (ret) goto out_cq; - ret = ib_req_notify_cq(device->dev_tx_cq[i], IB_CQ_NEXT_COMP); + ret = ib_req_notify_cq(comp->tx_cq, IB_CQ_NEXT_COMP); if (ret) goto out_cq; } @@ -328,19 +329,19 @@ isert_create_device_ib_res(struct isert_device *device) return 0; out_cq: - for (j = 0; j < i; j++) { - cq_desc = &device->cq_desc[j]; + for (i = 0; i < device->comps_used; i++) { + struct isert_comp *comp = &device->comps[i]; - if (device->dev_rx_cq[j]) { - cancel_work_sync(&cq_desc->cq_rx_work); - ib_destroy_cq(device->dev_rx_cq[j]); + if (comp->rx_cq) { + cancel_work_sync(&comp->rx_work); + ib_destroy_cq(comp->rx_cq); } - if (device->dev_tx_cq[j]) { - cancel_work_sync(&cq_desc->cq_tx_work); - ib_destroy_cq(device->dev_tx_cq[j]); + if (comp->tx_cq) { + cancel_work_sync(&comp->tx_work); + ib_destroy_cq(comp->tx_cq); } } - kfree(device->cq_desc); + kfree(device->comps); return ret; } @@ -348,21 +349,21 @@ out_cq: static void isert_free_device_ib_res(struct isert_device *device) { - struct isert_cq_desc *cq_desc; int i; - for (i = 0; i < device->cqs_used; i++) { - cq_desc = &device->cq_desc[i]; + pr_info("device %p\n", device); - cancel_work_sync(&cq_desc->cq_rx_work); - cancel_work_sync(&cq_desc->cq_tx_work); - ib_destroy_cq(device->dev_rx_cq[i]); - ib_destroy_cq(device->dev_tx_cq[i]); - device->dev_rx_cq[i] = NULL; - device->dev_tx_cq[i] = NULL; - } + for (i = 0; i < device->comps_used; i++) { + struct isert_comp *comp = &device->comps[i]; - kfree(device->cq_desc); + cancel_work_sync(&comp->rx_work); + cancel_work_sync(&comp->tx_work); + ib_destroy_cq(comp->rx_cq); + ib_destroy_cq(comp->tx_cq); + comp->rx_cq = NULL; + comp->tx_cq = NULL; + } + kfree(device->comps); } static void @@ -740,7 +741,6 @@ isert_connect_release(struct isert_conn *isert_conn) { struct ib_device *ib_dev = isert_conn->conn_cm_id->device; struct isert_device *device = isert_conn->conn_device; - int cq_index; pr_debug("Entering isert_connect_release(): >>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>\n"); @@ -751,11 +751,11 @@ isert_connect_release(struct isert_conn *isert_conn) rdma_destroy_id(isert_conn->conn_cm_id); if (isert_conn->conn_qp) { - cq_index = ((struct isert_cq_desc *) - isert_conn->conn_qp->recv_cq->cq_context)->cq_index; - pr_debug("isert_connect_release: cq_index: %d\n", cq_index); + struct isert_comp *comp = isert_conn->conn_qp->recv_cq->cq_context; + + pr_debug("dec completion context %p active_qps\n", comp); mutex_lock(&device_list_mutex); - isert_conn->conn_device->cq_active_qps[cq_index]--; + comp->active_qps--; mutex_unlock(&device_list_mutex); ib_destroy_qp(isert_conn->conn_qp); @@ -1524,7 +1524,7 @@ isert_rx_do_work(struct iser_rx_desc *rx_desc, struct isert_conn *isert_conn) static void isert_rx_completion(struct iser_rx_desc *desc, struct isert_conn *isert_conn, - unsigned long xfer_len) + u32 xfer_len) { struct ib_device *ib_dev = isert_conn->conn_cm_id->device; struct iscsi_hdr *hdr; @@ -2051,18 +2051,16 @@ isert_cq_comp_err(void *desc, struct isert_conn *isert_conn, bool tx) static void isert_cq_tx_work(struct work_struct *work) { - struct isert_cq_desc *cq_desc = container_of(work, - struct isert_cq_desc, cq_tx_work); - struct isert_device *device = cq_desc->device; - int cq_index = cq_desc->cq_index; - struct ib_cq *tx_cq = device->dev_tx_cq[cq_index]; + struct isert_comp *comp = container_of(work, struct isert_comp, + tx_work); + struct ib_cq *cq = comp->tx_cq; struct isert_conn *isert_conn; struct iser_tx_desc *tx_desc; struct ib_wc wc; - while (ib_poll_cq(tx_cq, 1, &wc) == 1) { - tx_desc = (struct iser_tx_desc *)(uintptr_t)wc.wr_id; + while (ib_poll_cq(cq, 1, &wc) == 1) { isert_conn = wc.qp->qp_context; + tx_desc = (struct iser_tx_desc *)(uintptr_t)wc.wr_id; if (wc.status == IB_WC_SUCCESS) { isert_send_completion(tx_desc, isert_conn); @@ -2076,36 +2074,34 @@ isert_cq_tx_work(struct work_struct *work) } } - ib_req_notify_cq(tx_cq, IB_CQ_NEXT_COMP); + ib_req_notify_cq(cq, IB_CQ_NEXT_COMP); } static void isert_cq_tx_callback(struct ib_cq *cq, void *context) { - struct isert_cq_desc *cq_desc = (struct isert_cq_desc *)context; + struct isert_comp *comp = context; - queue_work(isert_comp_wq, &cq_desc->cq_tx_work); + queue_work(isert_comp_wq, &comp->tx_work); } static void isert_cq_rx_work(struct work_struct *work) { - struct isert_cq_desc *cq_desc = container_of(work, - struct isert_cq_desc, cq_rx_work); - struct isert_device *device = cq_desc->device; - int cq_index = cq_desc->cq_index; - struct ib_cq *rx_cq = device->dev_rx_cq[cq_index]; + struct isert_comp *comp = container_of(work, struct isert_comp, + rx_work); + struct ib_cq *cq = comp->rx_cq; struct isert_conn *isert_conn; struct iser_rx_desc *rx_desc; struct ib_wc wc; - unsigned long xfer_len; + u32 xfer_len; - while (ib_poll_cq(rx_cq, 1, &wc) == 1) { - rx_desc = (struct iser_rx_desc *)(uintptr_t)wc.wr_id; + while (ib_poll_cq(cq, 1, &wc) == 1) { isert_conn = wc.qp->qp_context; + rx_desc = (struct iser_rx_desc *)(uintptr_t)wc.wr_id; if (wc.status == IB_WC_SUCCESS) { - xfer_len = (unsigned long)wc.byte_len; + xfer_len = wc.byte_len; isert_rx_completion(rx_desc, isert_conn, xfer_len); } else { pr_debug("RX wc.status != IB_WC_SUCCESS >>>>>>>>>>>>>>\n"); @@ -2118,15 +2114,15 @@ isert_cq_rx_work(struct work_struct *work) } } - ib_req_notify_cq(rx_cq, IB_CQ_NEXT_COMP); + ib_req_notify_cq(cq, IB_CQ_NEXT_COMP); } static void isert_cq_rx_callback(struct ib_cq *cq, void *context) { - struct isert_cq_desc *cq_desc = (struct isert_cq_desc *)context; + struct isert_comp *comp = context; - queue_work(isert_rx_wq, &cq_desc->cq_rx_work); + queue_work(isert_rx_wq, &comp->rx_work); } static int diff --git a/drivers/infiniband/ulp/isert/ib_isert.h b/drivers/infiniband/ulp/isert/ib_isert.h index b3735a07ca47..3f93cb0a0f03 100644 --- a/drivers/infiniband/ulp/isert/ib_isert.h +++ b/drivers/infiniband/ulp/isert/ib_isert.h @@ -159,23 +159,33 @@ struct isert_conn { #define ISERT_MAX_CQ 64 -struct isert_cq_desc { - struct isert_device *device; - int cq_index; - struct work_struct cq_rx_work; - struct work_struct cq_tx_work; +/** + * struct isert_comp - iSER completion context + * + * @device: pointer to device handle + * @rx_cq: RX completion queue + * @tx_cq: TX completion queue + * @active_qps: Number of active QPs attached + * to completion context + * @rx_work: RX work handle + * @tx_work: TX work handle + */ +struct isert_comp { + struct isert_device *device; + struct ib_cq *rx_cq; + struct ib_cq *tx_cq; + int active_qps; + struct work_struct rx_work; + struct work_struct tx_work; }; struct isert_device { int use_fastreg; bool pi_capable; - int cqs_used; int refcount; - int cq_active_qps[ISERT_MAX_CQ]; struct ib_device *ib_device; - struct ib_cq *dev_rx_cq[ISERT_MAX_CQ]; - struct ib_cq *dev_tx_cq[ISERT_MAX_CQ]; - struct isert_cq_desc *cq_desc; + struct isert_comp *comps; + int comps_used; struct list_head dev_node; struct ib_device_attr dev_attr; int (*reg_rdma_mem)(struct iscsi_conn *conn, -- cgit v1.2.3 From 6f0fae3d7797172f5c30ada80e815122fdf55609 Mon Sep 17 00:00:00 2001 From: Sagi Grimberg Date: Tue, 2 Dec 2014 16:57:41 +0200 Subject: iser-target: Use single CQ for TX and RX Using TX and RX CQs attached to the same vector might create a throttling effect coming from the serial processing of a work-queue. Use one CQ instead, it will do better in interrupt processing and it provides a simpler code. Also, We get rid of redundant isert_rx_wq. Next we can remove the atomic post_send_buf_count from the IO path. Signed-off-by: Sagi Grimberg Signed-off-by: Nicholas Bellinger --- drivers/infiniband/ulp/isert/ib_isert.c | 193 +++++++++++++------------------- drivers/infiniband/ulp/isert/ib_isert.h | 14 +-- 2 files changed, 83 insertions(+), 124 deletions(-) diff --git a/drivers/infiniband/ulp/isert/ib_isert.c b/drivers/infiniband/ulp/isert/ib_isert.c index d4a208381af8..0dc6287a50f0 100644 --- a/drivers/infiniband/ulp/isert/ib_isert.c +++ b/drivers/infiniband/ulp/isert/ib_isert.c @@ -35,10 +35,10 @@ #define ISERT_MAX_CONN 8 #define ISER_MAX_RX_CQ_LEN (ISERT_QP_MAX_RECV_DTOS * ISERT_MAX_CONN) #define ISER_MAX_TX_CQ_LEN (ISERT_QP_MAX_REQ_DTOS * ISERT_MAX_CONN) +#define ISER_MAX_CQ_LEN (ISER_MAX_RX_CQ_LEN + ISER_MAX_TX_CQ_LEN) static DEFINE_MUTEX(device_list_mutex); static LIST_HEAD(device_list); -static struct workqueue_struct *isert_rx_wq; static struct workqueue_struct *isert_comp_wq; static struct workqueue_struct *isert_release_wq; @@ -124,8 +124,8 @@ isert_conn_setup_qp(struct isert_conn *isert_conn, struct rdma_cm_id *cma_id) memset(&attr, 0, sizeof(struct ib_qp_init_attr)); attr.event_handler = isert_qp_event_callback; attr.qp_context = isert_conn; - attr.send_cq = comp->tx_cq; - attr.recv_cq = comp->rx_cq; + attr.send_cq = comp->cq; + attr.recv_cq = comp->cq; attr.cap.max_send_wr = ISERT_QP_MAX_REQ_DTOS; attr.cap.max_recv_wr = ISERT_QP_MAX_RECV_DTOS; /* @@ -237,10 +237,8 @@ isert_free_rx_descriptors(struct isert_conn *isert_conn) isert_conn->conn_rx_descs = NULL; } -static void isert_cq_tx_work(struct work_struct *); -static void isert_cq_tx_callback(struct ib_cq *, void *); -static void isert_cq_rx_work(struct work_struct *); -static void isert_cq_rx_callback(struct ib_cq *, void *); +static void isert_cq_work(struct work_struct *); +static void isert_cq_callback(struct ib_cq *, void *); static int isert_create_device_ib_res(struct isert_device *device) @@ -248,15 +246,14 @@ isert_create_device_ib_res(struct isert_device *device) struct ib_device *ib_dev = device->ib_device; struct ib_device_attr *dev_attr; int ret = 0, i; - int max_rx_cqe, max_tx_cqe; + int max_cqe; dev_attr = &device->dev_attr; ret = isert_query_device(ib_dev, dev_attr); if (ret) return ret; - max_rx_cqe = min(ISER_MAX_RX_CQ_LEN, dev_attr->max_cqe); - max_tx_cqe = min(ISER_MAX_TX_CQ_LEN, dev_attr->max_cqe); + max_cqe = min(ISER_MAX_CQ_LEN, dev_attr->max_cqe); /* asign function handlers */ if (dev_attr->device_cap_flags & IB_DEVICE_MEM_MGT_EXTENSIONS && @@ -293,35 +290,19 @@ isert_create_device_ib_res(struct isert_device *device) struct isert_comp *comp = &device->comps[i]; comp->device = device; - INIT_WORK(&comp->rx_work, isert_cq_rx_work); - comp->rx_cq = ib_create_cq(device->ib_device, - isert_cq_rx_callback, - isert_cq_event_callback, - (void *)comp, - max_rx_cqe, i); - if (IS_ERR(comp->rx_cq)) { - ret = PTR_ERR(comp->rx_cq); - comp->rx_cq = NULL; + INIT_WORK(&comp->work, isert_cq_work); + comp->cq = ib_create_cq(device->ib_device, + isert_cq_callback, + isert_cq_event_callback, + (void *)comp, + max_cqe, i); + if (IS_ERR(comp->cq)) { + ret = PTR_ERR(comp->cq); + comp->cq = NULL; goto out_cq; } - INIT_WORK(&comp->tx_work, isert_cq_tx_work); - comp->tx_cq = ib_create_cq(device->ib_device, - isert_cq_tx_callback, - isert_cq_event_callback, - (void *)comp, - max_tx_cqe, i); - if (IS_ERR(comp->tx_cq)) { - ret = PTR_ERR(comp->tx_cq); - comp->tx_cq = NULL; - goto out_cq; - } - - ret = ib_req_notify_cq(comp->rx_cq, IB_CQ_NEXT_COMP); - if (ret) - goto out_cq; - - ret = ib_req_notify_cq(comp->tx_cq, IB_CQ_NEXT_COMP); + ret = ib_req_notify_cq(comp->cq, IB_CQ_NEXT_COMP); if (ret) goto out_cq; } @@ -332,13 +313,9 @@ out_cq: for (i = 0; i < device->comps_used; i++) { struct isert_comp *comp = &device->comps[i]; - if (comp->rx_cq) { - cancel_work_sync(&comp->rx_work); - ib_destroy_cq(comp->rx_cq); - } - if (comp->tx_cq) { - cancel_work_sync(&comp->tx_work); - ib_destroy_cq(comp->tx_cq); + if (comp->cq) { + cancel_work_sync(&comp->work); + ib_destroy_cq(comp->cq); } } kfree(device->comps); @@ -356,12 +333,9 @@ isert_free_device_ib_res(struct isert_device *device) for (i = 0; i < device->comps_used; i++) { struct isert_comp *comp = &device->comps[i]; - cancel_work_sync(&comp->rx_work); - cancel_work_sync(&comp->tx_work); - ib_destroy_cq(comp->rx_cq); - ib_destroy_cq(comp->tx_cq); - comp->rx_cq = NULL; - comp->tx_cq = NULL; + cancel_work_sync(&comp->work); + ib_destroy_cq(comp->cq); + comp->cq = NULL; } kfree(device->comps); } @@ -2013,14 +1987,39 @@ isert_send_completion(struct iser_tx_desc *tx_desc, } } +/** + * is_isert_tx_desc() - Indicate if the completion wr_id + * is a TX descriptor or not. + * @isert_conn: iser connection + * @wr_id: completion WR identifier + * + * Since we cannot rely on wc opcode in FLUSH errors + * we must work around it by checking if the wr_id address + * falls in the iser connection rx_descs buffer. If so + * it is an RX descriptor, otherwize it is a TX. + */ +static inline bool +is_isert_tx_desc(struct isert_conn *isert_conn, void *wr_id) +{ + void *start = isert_conn->conn_rx_descs; + int len = ISERT_QP_MAX_RECV_DTOS * sizeof(*isert_conn->conn_rx_descs); + + if (wr_id >= start && wr_id < start + len) + return false; + + return true; +} + static void -isert_cq_comp_err(void *desc, struct isert_conn *isert_conn, bool tx) +isert_cq_comp_err(struct isert_conn *isert_conn, struct ib_wc *wc) { - if (tx) { + if (is_isert_tx_desc(isert_conn, (void *)wc->wr_id)) { struct ib_device *ib_dev = isert_conn->conn_cm_id->device; struct isert_cmd *isert_cmd; + struct iser_tx_desc *desc; - isert_cmd = ((struct iser_tx_desc *)desc)->isert_cmd; + desc = (struct iser_tx_desc *)(uintptr_t)wc->wr_id; + isert_cmd = desc->isert_cmd; if (!isert_cmd) isert_unmap_tx_desc(desc, ib_dev); else @@ -2049,80 +2048,52 @@ isert_cq_comp_err(void *desc, struct isert_conn *isert_conn, bool tx) } static void -isert_cq_tx_work(struct work_struct *work) +isert_handle_wc(struct ib_wc *wc) { - struct isert_comp *comp = container_of(work, struct isert_comp, - tx_work); - struct ib_cq *cq = comp->tx_cq; struct isert_conn *isert_conn; struct iser_tx_desc *tx_desc; - struct ib_wc wc; - - while (ib_poll_cq(cq, 1, &wc) == 1) { - isert_conn = wc.qp->qp_context; - tx_desc = (struct iser_tx_desc *)(uintptr_t)wc.wr_id; + struct iser_rx_desc *rx_desc; - if (wc.status == IB_WC_SUCCESS) { - isert_send_completion(tx_desc, isert_conn); + isert_conn = wc->qp->qp_context; + if (likely(wc->status == IB_WC_SUCCESS)) { + if (wc->opcode == IB_WC_RECV) { + rx_desc = (struct iser_rx_desc *)(uintptr_t)wc->wr_id; + isert_rx_completion(rx_desc, isert_conn, wc->byte_len); } else { - pr_debug("TX wc.status != IB_WC_SUCCESS >>>>>>>>>>>>>>\n"); - pr_debug("TX wc.status: 0x%08x\n", wc.status); - pr_debug("TX wc.vendor_err: 0x%08x\n", wc.vendor_err); - - if (wc.wr_id != ISER_FASTREG_LI_WRID) - isert_cq_comp_err(tx_desc, isert_conn, true); + tx_desc = (struct iser_tx_desc *)(uintptr_t)wc->wr_id; + isert_send_completion(tx_desc, isert_conn); } - } - - ib_req_notify_cq(cq, IB_CQ_NEXT_COMP); -} - -static void -isert_cq_tx_callback(struct ib_cq *cq, void *context) -{ - struct isert_comp *comp = context; + } else { + if (wc->status != IB_WC_WR_FLUSH_ERR) + pr_err("wr id %llx status %d vend_err %x\n", + wc->wr_id, wc->status, wc->vendor_err); + else + pr_debug("flush error: wr id %llx\n", wc->wr_id); - queue_work(isert_comp_wq, &comp->tx_work); + if (wc->wr_id != ISER_FASTREG_LI_WRID) + isert_cq_comp_err(isert_conn, wc); + } } static void -isert_cq_rx_work(struct work_struct *work) +isert_cq_work(struct work_struct *work) { struct isert_comp *comp = container_of(work, struct isert_comp, - rx_work); - struct ib_cq *cq = comp->rx_cq; - struct isert_conn *isert_conn; - struct iser_rx_desc *rx_desc; + work); struct ib_wc wc; - u32 xfer_len; - - while (ib_poll_cq(cq, 1, &wc) == 1) { - isert_conn = wc.qp->qp_context; - rx_desc = (struct iser_rx_desc *)(uintptr_t)wc.wr_id; - if (wc.status == IB_WC_SUCCESS) { - xfer_len = wc.byte_len; - isert_rx_completion(rx_desc, isert_conn, xfer_len); - } else { - pr_debug("RX wc.status != IB_WC_SUCCESS >>>>>>>>>>>>>>\n"); - if (wc.status != IB_WC_WR_FLUSH_ERR) { - pr_debug("RX wc.status: 0x%08x\n", wc.status); - pr_debug("RX wc.vendor_err: 0x%08x\n", - wc.vendor_err); - } - isert_cq_comp_err(rx_desc, isert_conn, false); - } - } + while (ib_poll_cq(comp->cq, 1, &wc) == 1) + isert_handle_wc(&wc); - ib_req_notify_cq(cq, IB_CQ_NEXT_COMP); + ib_req_notify_cq(comp->cq, IB_CQ_NEXT_COMP); } static void -isert_cq_rx_callback(struct ib_cq *cq, void *context) +isert_cq_callback(struct ib_cq *cq, void *context) { struct isert_comp *comp = context; - queue_work(isert_rx_wq, &comp->rx_work); + queue_work(isert_comp_wq, &comp->work); } static int @@ -3363,17 +3334,11 @@ static int __init isert_init(void) { int ret; - isert_rx_wq = alloc_workqueue("isert_rx_wq", 0, 0); - if (!isert_rx_wq) { - pr_err("Unable to allocate isert_rx_wq\n"); - return -ENOMEM; - } - isert_comp_wq = alloc_workqueue("isert_comp_wq", 0, 0); if (!isert_comp_wq) { pr_err("Unable to allocate isert_comp_wq\n"); ret = -ENOMEM; - goto destroy_rx_wq; + return -ENOMEM; } isert_release_wq = alloc_workqueue("isert_release_wq", WQ_UNBOUND, @@ -3391,8 +3356,7 @@ static int __init isert_init(void) destroy_comp_wq: destroy_workqueue(isert_comp_wq); -destroy_rx_wq: - destroy_workqueue(isert_rx_wq); + return ret; } @@ -3401,7 +3365,6 @@ static void __exit isert_exit(void) flush_scheduled_work(); destroy_workqueue(isert_release_wq); destroy_workqueue(isert_comp_wq); - destroy_workqueue(isert_rx_wq); iscsit_unregister_transport(&iser_target_transport); pr_debug("iSER_TARGET[0] - Released iser_target_transport\n"); } diff --git a/drivers/infiniband/ulp/isert/ib_isert.h b/drivers/infiniband/ulp/isert/ib_isert.h index 3f93cb0a0f03..5c1a31e8df70 100644 --- a/drivers/infiniband/ulp/isert/ib_isert.h +++ b/drivers/infiniband/ulp/isert/ib_isert.h @@ -163,20 +163,16 @@ struct isert_conn { * struct isert_comp - iSER completion context * * @device: pointer to device handle - * @rx_cq: RX completion queue - * @tx_cq: TX completion queue + * @cq: completion queue * @active_qps: Number of active QPs attached * to completion context - * @rx_work: RX work handle - * @tx_work: TX work handle + * @work: completion work handle */ struct isert_comp { - struct isert_device *device; - struct ib_cq *rx_cq; - struct ib_cq *tx_cq; + struct isert_device *device; + struct ib_cq *cq; int active_qps; - struct work_struct rx_work; - struct work_struct tx_work; + struct work_struct work; }; struct isert_device { -- cgit v1.2.3 From 377d8f5c3ef3e1b90db4e1f9faa01d0b0036ea52 Mon Sep 17 00:00:00 2001 From: Sagi Grimberg Date: Tue, 2 Dec 2014 16:57:42 +0200 Subject: iser-target: Remove redundant call to isert_conn_terminate We are calling session reinstatement, wait_conn will start connection termination. Signed-off-by: Sagi Grimberg Signed-off-by: Nicholas Bellinger --- drivers/infiniband/ulp/isert/ib_isert.c | 4 ---- 1 file changed, 4 deletions(-) diff --git a/drivers/infiniband/ulp/isert/ib_isert.c b/drivers/infiniband/ulp/isert/ib_isert.c index 0dc6287a50f0..5373a5f3a8a5 100644 --- a/drivers/infiniband/ulp/isert/ib_isert.c +++ b/drivers/infiniband/ulp/isert/ib_isert.c @@ -2038,10 +2038,6 @@ isert_cq_comp_err(struct isert_conn *isert_conn, struct ib_wc *wc) target_wait_for_sess_cmds(conn->sess->se_sess); } - mutex_lock(&isert_conn->conn_mutex); - isert_conn_terminate(isert_conn); - mutex_unlock(&isert_conn->conn_mutex); - iscsit_cause_connection_reinstatement(isert_conn->conn, 0); complete(&isert_conn->conn_wait_comp_err); } -- cgit v1.2.3 From bdf20e72548cdcca1c16f29ad30c5725fa1d8d11 Mon Sep 17 00:00:00 2001 From: Sagi Grimberg Date: Tue, 2 Dec 2014 16:57:43 +0200 Subject: iser-target: Remove an atomic operation from the IO path In order to know that we consumed all the connection completions we maintain atomic post_send_buf_count for each IO post send. But we can know that if we post a "beacon" (zero length RECV work request) after we move the QP into error state and the target does not serve any new IO. When we consume it, we know we finished all the connection completion and we can go ahead and destroy stuff. In error completion handler we now just need to check for ISERT_BEACON_WRID to arrive and then wait for session commands to cleanup and complete conn_wait_comp_err. We reserve another CQ and QP entries to fit the zero length post recv. Signed-off-by: Sagi Grimberg Signed-off-by: Nicholas Bellinger --- drivers/infiniband/ulp/isert/ib_isert.c | 100 ++++++++++++-------------------- drivers/infiniband/ulp/isert/ib_isert.h | 3 +- 2 files changed, 40 insertions(+), 63 deletions(-) diff --git a/drivers/infiniband/ulp/isert/ib_isert.c b/drivers/infiniband/ulp/isert/ib_isert.c index 5373a5f3a8a5..eb3d628ec4dd 100644 --- a/drivers/infiniband/ulp/isert/ib_isert.c +++ b/drivers/infiniband/ulp/isert/ib_isert.c @@ -35,7 +35,8 @@ #define ISERT_MAX_CONN 8 #define ISER_MAX_RX_CQ_LEN (ISERT_QP_MAX_RECV_DTOS * ISERT_MAX_CONN) #define ISER_MAX_TX_CQ_LEN (ISERT_QP_MAX_REQ_DTOS * ISERT_MAX_CONN) -#define ISER_MAX_CQ_LEN (ISER_MAX_RX_CQ_LEN + ISER_MAX_TX_CQ_LEN) +#define ISER_MAX_CQ_LEN (ISER_MAX_RX_CQ_LEN + ISER_MAX_TX_CQ_LEN + \ + ISERT_MAX_CONN) static DEFINE_MUTEX(device_list_mutex); static LIST_HEAD(device_list); @@ -127,7 +128,7 @@ isert_conn_setup_qp(struct isert_conn *isert_conn, struct rdma_cm_id *cma_id) attr.send_cq = comp->cq; attr.recv_cq = comp->cq; attr.cap.max_send_wr = ISERT_QP_MAX_REQ_DTOS; - attr.cap.max_recv_wr = ISERT_QP_MAX_RECV_DTOS; + attr.cap.max_recv_wr = ISERT_QP_MAX_RECV_DTOS + 1; /* * FIXME: Use devattr.max_sge - 2 for max_send_sge as * work-around for RDMA_READs with ConnectX-2. @@ -593,7 +594,6 @@ isert_connect_request(struct rdma_cm_id *cma_id, struct rdma_cm_event *event) init_completion(&isert_conn->conn_login_comp); init_completion(&isert_conn->login_req_comp); init_completion(&isert_conn->conn_wait); - init_completion(&isert_conn->conn_wait_comp_err); kref_init(&isert_conn->conn_kref); mutex_init(&isert_conn->conn_mutex); spin_lock_init(&isert_conn->conn_lock); @@ -811,12 +811,6 @@ isert_conn_terminate(struct isert_conn *isert_conn) case ISER_CONN_TERMINATING: break; case ISER_CONN_UP: - /* - * No flush completions will occur as we didn't - * get to ISER_CONN_FULL_FEATURE yet, complete - * to allow teardown progress. - */ - complete(&isert_conn->conn_wait_comp_err); case ISER_CONN_FULL_FEATURE: /* FALLTHRU */ pr_info("Terminating conn %p state %d\n", isert_conn, isert_conn->state); @@ -975,13 +969,9 @@ isert_post_send(struct isert_conn *isert_conn, struct iser_tx_desc *tx_desc) send_wr.opcode = IB_WR_SEND; send_wr.send_flags = IB_SEND_SIGNALED; - atomic_inc(&isert_conn->post_send_buf_count); - ret = ib_post_send(isert_conn->conn_qp, &send_wr, &send_wr_failed); - if (ret) { + if (ret) pr_err("ib_post_send() failed, ret: %d\n", ret); - atomic_dec(&isert_conn->post_send_buf_count); - } return ret; } @@ -1877,7 +1867,6 @@ isert_do_control_comp(struct work_struct *work) case ISTATE_SEND_TASKMGTRSP: pr_debug("Calling iscsit_tmr_post_handler >>>>>>>>>>>>>>>>>\n"); - atomic_dec(&isert_conn->post_send_buf_count); iscsit_tmr_post_handler(cmd, cmd->conn); cmd->i_state = ISTATE_SENT_STATUS; @@ -1885,7 +1874,6 @@ isert_do_control_comp(struct work_struct *work) break; case ISTATE_SEND_REJECT: pr_debug("Got isert_do_control_comp ISTATE_SEND_REJECT: >>>\n"); - atomic_dec(&isert_conn->post_send_buf_count); cmd->i_state = ISTATE_SENT_STATUS; isert_completion_put(&isert_cmd->tx_desc, isert_cmd, ib_dev, false); @@ -1893,11 +1881,9 @@ isert_do_control_comp(struct work_struct *work) case ISTATE_SEND_LOGOUTRSP: pr_debug("Calling iscsit_logout_post_handler >>>>>>>>>>>>>>\n"); - atomic_dec(&isert_conn->post_send_buf_count); iscsit_logout_post_handler(cmd, cmd->conn); break; case ISTATE_SEND_TEXTRSP: - atomic_dec(&isert_conn->post_send_buf_count); cmd->i_state = ISTATE_SENT_STATUS; isert_completion_put(&isert_cmd->tx_desc, isert_cmd, ib_dev, false); break; @@ -1915,7 +1901,6 @@ isert_response_completion(struct iser_tx_desc *tx_desc, struct ib_device *ib_dev) { struct iscsi_cmd *cmd = isert_cmd->iscsi_cmd; - struct isert_rdma_wr *wr = &isert_cmd->rdma_wr; if (cmd->i_state == ISTATE_SEND_TASKMGTRSP || cmd->i_state == ISTATE_SEND_LOGOUTRSP || @@ -1928,18 +1913,6 @@ isert_response_completion(struct iser_tx_desc *tx_desc, return; } - /** - * If send_wr_num is 0 this means that we got - * RDMA completion and we cleared it and we should - * simply decrement the response post. else the - * response is incorporated in send_wr_num, just - * sub it. - **/ - if (wr->send_wr_num) - atomic_sub(wr->send_wr_num, &isert_conn->post_send_buf_count); - else - atomic_dec(&isert_conn->post_send_buf_count); - cmd->i_state = ISTATE_SENT_STATUS; isert_completion_put(tx_desc, isert_cmd, ib_dev, false); } @@ -1953,7 +1926,6 @@ isert_send_completion(struct iser_tx_desc *tx_desc, struct isert_rdma_wr *wr; if (!isert_cmd) { - atomic_dec(&isert_conn->post_send_buf_count); isert_unmap_tx_desc(tx_desc, ib_dev); return; } @@ -1971,13 +1943,11 @@ isert_send_completion(struct iser_tx_desc *tx_desc, break; case ISER_IB_RDMA_WRITE: pr_debug("isert_send_completion: Got ISER_IB_RDMA_WRITE\n"); - atomic_sub(wr->send_wr_num, &isert_conn->post_send_buf_count); isert_completion_rdma_write(tx_desc, isert_cmd); break; case ISER_IB_RDMA_READ: pr_debug("isert_send_completion: Got ISER_IB_RDMA_READ:\n"); - atomic_sub(wr->send_wr_num, &isert_conn->post_send_buf_count); isert_completion_rdma_read(tx_desc, isert_cmd); break; default: @@ -2013,6 +1983,18 @@ is_isert_tx_desc(struct isert_conn *isert_conn, void *wr_id) static void isert_cq_comp_err(struct isert_conn *isert_conn, struct ib_wc *wc) { + if (wc->wr_id == ISER_BEACON_WRID) { + struct iscsi_conn *conn = isert_conn->conn; + + if (conn->sess) { + target_sess_cmd_list_set_waiting(conn->sess->se_sess); + target_wait_for_sess_cmds(conn->sess->se_sess); + } + + pr_info("conn %p completing conn_wait_comp_err\n", + isert_conn); + complete(&isert_conn->conn_wait_comp_err); + } else if (is_isert_tx_desc(isert_conn, (void *)wc->wr_id)) { struct ib_device *ib_dev = isert_conn->conn_cm_id->device; struct isert_cmd *isert_cmd; @@ -2024,22 +2006,10 @@ isert_cq_comp_err(struct isert_conn *isert_conn, struct ib_wc *wc) isert_unmap_tx_desc(desc, ib_dev); else isert_completion_put(desc, isert_cmd, ib_dev, true); - atomic_dec(&isert_conn->post_send_buf_count); } else { isert_conn->post_recv_buf_count--; - } - - if (isert_conn->post_recv_buf_count == 0 && - atomic_read(&isert_conn->post_send_buf_count) == 0) { - struct iscsi_conn *conn = isert_conn->conn; - - if (conn->sess) { - target_sess_cmd_list_set_waiting(conn->sess->se_sess); - target_wait_for_sess_cmds(conn->sess->se_sess); - } - - iscsit_cause_connection_reinstatement(isert_conn->conn, 0); - complete(&isert_conn->conn_wait_comp_err); + if (!isert_conn->post_recv_buf_count) + iscsit_cause_connection_reinstatement(isert_conn->conn, 0); } } @@ -2098,13 +2068,10 @@ isert_post_response(struct isert_conn *isert_conn, struct isert_cmd *isert_cmd) struct ib_send_wr *wr_failed; int ret; - atomic_inc(&isert_conn->post_send_buf_count); - ret = ib_post_send(isert_conn->conn_qp, &isert_cmd->tx_desc.send_wr, &wr_failed); if (ret) { pr_err("ib_post_send failed with %d\n", ret); - atomic_dec(&isert_conn->post_send_buf_count); return ret; } return ret; @@ -2889,13 +2856,9 @@ isert_put_datain(struct iscsi_conn *conn, struct iscsi_cmd *cmd) wr->send_wr_num += 1; } - atomic_add(wr->send_wr_num, &isert_conn->post_send_buf_count); - rc = ib_post_send(isert_conn->conn_qp, wr->send_wr, &wr_failed); - if (rc) { + if (rc) pr_warn("ib_post_send() failed for IB_WR_RDMA_WRITE\n"); - atomic_sub(wr->send_wr_num, &isert_conn->post_send_buf_count); - } if (!isert_prot_cmd(isert_conn, se_cmd)) pr_debug("Cmd: %p posted RDMA_WRITE + Response for iSER Data " @@ -2927,13 +2890,10 @@ isert_get_dataout(struct iscsi_conn *conn, struct iscsi_cmd *cmd, bool recovery) return rc; } - atomic_add(wr->send_wr_num, &isert_conn->post_send_buf_count); - rc = ib_post_send(isert_conn->conn_qp, wr->send_wr, &wr_failed); - if (rc) { + if (rc) pr_warn("ib_post_send() failed for IB_WR_RDMA_READ\n"); - atomic_sub(wr->send_wr_num, &isert_conn->post_send_buf_count); - } + pr_debug("Cmd: %p posted RDMA_READ memory for ISER Data WRITE\n", isert_cmd); @@ -3274,6 +3234,22 @@ static void isert_release_work(struct work_struct *work) isert_put_conn(isert_conn); } +static void +isert_wait4flush(struct isert_conn *isert_conn) +{ + struct ib_recv_wr *bad_wr; + + init_completion(&isert_conn->conn_wait_comp_err); + isert_conn->beacon.wr_id = ISER_BEACON_WRID; + /* post an indication that all flush errors were consumed */ + if (ib_post_recv(isert_conn->conn_qp, &isert_conn->beacon, &bad_wr)) { + pr_err("conn %p failed to post beacon", isert_conn); + return; + } + + wait_for_completion(&isert_conn->conn_wait_comp_err); +} + static void isert_wait_conn(struct iscsi_conn *conn) { struct isert_conn *isert_conn = conn->context; @@ -3292,7 +3268,7 @@ static void isert_wait_conn(struct iscsi_conn *conn) isert_conn_terminate(isert_conn); mutex_unlock(&isert_conn->conn_mutex); - wait_for_completion(&isert_conn->conn_wait_comp_err); + isert_wait4flush(isert_conn); INIT_WORK(&isert_conn->release_work, isert_release_work); queue_work(isert_release_wq, &isert_conn->release_work); diff --git a/drivers/infiniband/ulp/isert/ib_isert.h b/drivers/infiniband/ulp/isert/ib_isert.h index 5c1a31e8df70..fc1d3232f896 100644 --- a/drivers/infiniband/ulp/isert/ib_isert.h +++ b/drivers/infiniband/ulp/isert/ib_isert.h @@ -7,6 +7,7 @@ #define ISERT_RDMA_LISTEN_BACKLOG 10 #define ISCSI_ISER_SG_TABLESIZE 256 #define ISER_FASTREG_LI_WRID 0xffffffffffffffffULL +#define ISER_BEACON_WRID 0xfffffffffffffffeULL enum isert_desc_type { ISCSI_TX_CONTROL, @@ -122,7 +123,6 @@ struct isert_device; struct isert_conn { enum iser_conn_state state; int post_recv_buf_count; - atomic_t post_send_buf_count; u32 responder_resources; u32 initiator_depth; bool pi_support; @@ -155,6 +155,7 @@ struct isert_conn { /* lock to protect fastreg pool */ spinlock_t conn_lock; struct work_struct release_work; + struct ib_recv_wr beacon; }; #define ISERT_MAX_CQ 64 -- cgit v1.2.3 From 37d9fe80a3afc87a3d9f3d83aa0e6137f9fd7cde Mon Sep 17 00:00:00 2001 From: Sagi Grimberg Date: Tue, 2 Dec 2014 16:57:44 +0200 Subject: iser-target: Introduce isert_poll_budget In case the CQ is packed with completions, we can't just hog the CPU forever. Poll until a sufficient budget (currently hard-coded to 64k completions) and if budget is exhausted, bailout and give a chance to other threads. Signed-off-by: Sagi Grimberg Signed-off-by: Nicholas Bellinger --- drivers/infiniband/ulp/isert/ib_isert.c | 8 +++++++- 1 file changed, 7 insertions(+), 1 deletion(-) diff --git a/drivers/infiniband/ulp/isert/ib_isert.c b/drivers/infiniband/ulp/isert/ib_isert.c index eb3d628ec4dd..22841487f600 100644 --- a/drivers/infiniband/ulp/isert/ib_isert.c +++ b/drivers/infiniband/ulp/isert/ib_isert.c @@ -2044,13 +2044,19 @@ isert_handle_wc(struct ib_wc *wc) static void isert_cq_work(struct work_struct *work) { + enum { isert_poll_budget = 65536 }; struct isert_comp *comp = container_of(work, struct isert_comp, work); + int completed = 0; struct ib_wc wc; - while (ib_poll_cq(comp->cq, 1, &wc) == 1) + while (ib_poll_cq(comp->cq, 1, &wc) == 1) { isert_handle_wc(&wc); + if (++completed >= isert_poll_budget) + break; + } + ib_req_notify_cq(comp->cq, IB_CQ_NEXT_COMP); } -- cgit v1.2.3 From 36ea63b523f3f3b57f708f14af848cac100677d5 Mon Sep 17 00:00:00 2001 From: Sagi Grimberg Date: Tue, 2 Dec 2014 16:57:45 +0200 Subject: iser-target: Reduce CQ lock contention by batch polling In order to reduce the contention on CQ locking (present in some LLDDs) we poll in batches of 16 work completion items. Signed-off-by: Sagi Grimberg Signed-off-by: Nicholas Bellinger --- drivers/infiniband/ulp/isert/ib_isert.c | 12 +++++++----- drivers/infiniband/ulp/isert/ib_isert.h | 2 ++ 2 files changed, 9 insertions(+), 5 deletions(-) diff --git a/drivers/infiniband/ulp/isert/ib_isert.c b/drivers/infiniband/ulp/isert/ib_isert.c index 22841487f600..276054b65b98 100644 --- a/drivers/infiniband/ulp/isert/ib_isert.c +++ b/drivers/infiniband/ulp/isert/ib_isert.c @@ -2047,13 +2047,15 @@ isert_cq_work(struct work_struct *work) enum { isert_poll_budget = 65536 }; struct isert_comp *comp = container_of(work, struct isert_comp, work); - int completed = 0; - struct ib_wc wc; + struct ib_wc *const wcs = comp->wcs; + int i, n, completed = 0; - while (ib_poll_cq(comp->cq, 1, &wc) == 1) { - isert_handle_wc(&wc); + while ((n = ib_poll_cq(comp->cq, ARRAY_SIZE(comp->wcs), wcs)) > 0) { + for (i = 0; i < n; i++) + isert_handle_wc(&wcs[i]); - if (++completed >= isert_poll_budget) + completed += n; + if (completed >= isert_poll_budget) break; } diff --git a/drivers/infiniband/ulp/isert/ib_isert.h b/drivers/infiniband/ulp/isert/ib_isert.h index fc1d3232f896..2a0721f1f5df 100644 --- a/drivers/infiniband/ulp/isert/ib_isert.h +++ b/drivers/infiniband/ulp/isert/ib_isert.h @@ -165,6 +165,7 @@ struct isert_conn { * * @device: pointer to device handle * @cq: completion queue + * @wcs: work completion array * @active_qps: Number of active QPs attached * to completion context * @work: completion work handle @@ -172,6 +173,7 @@ struct isert_conn { struct isert_comp { struct isert_device *device; struct ib_cq *cq; + struct ib_wc wcs[16]; int active_qps; struct work_struct work; }; -- cgit v1.2.3 From c7e160ee092059d3d1ddc24397c9d7a4dbe8186a Mon Sep 17 00:00:00 2001 From: Sagi Grimberg Date: Tue, 2 Dec 2014 16:57:46 +0200 Subject: iser-target: Don't wait for session commands from completion context Might result in a deadlock where completion context waits for session commands release where the later might need a final completion for it. Signed-off-by: Sagi Grimberg Signed-off-by: Nicholas Bellinger --- drivers/infiniband/ulp/isert/ib_isert.c | 17 ++++++++++------- 1 file changed, 10 insertions(+), 7 deletions(-) diff --git a/drivers/infiniband/ulp/isert/ib_isert.c b/drivers/infiniband/ulp/isert/ib_isert.c index 276054b65b98..fc4641e5fd1e 100644 --- a/drivers/infiniband/ulp/isert/ib_isert.c +++ b/drivers/infiniband/ulp/isert/ib_isert.c @@ -1984,13 +1984,6 @@ static void isert_cq_comp_err(struct isert_conn *isert_conn, struct ib_wc *wc) { if (wc->wr_id == ISER_BEACON_WRID) { - struct iscsi_conn *conn = isert_conn->conn; - - if (conn->sess) { - target_sess_cmd_list_set_waiting(conn->sess->se_sess); - target_wait_for_sess_cmds(conn->sess->se_sess); - } - pr_info("conn %p completing conn_wait_comp_err\n", isert_conn); complete(&isert_conn->conn_wait_comp_err); @@ -3242,6 +3235,15 @@ static void isert_release_work(struct work_struct *work) isert_put_conn(isert_conn); } +static void +isert_wait4cmds(struct iscsi_conn *conn) +{ + if (conn->sess) { + target_sess_cmd_list_set_waiting(conn->sess->se_sess); + target_wait_for_sess_cmds(conn->sess->se_sess); + } +} + static void isert_wait4flush(struct isert_conn *isert_conn) { @@ -3276,6 +3278,7 @@ static void isert_wait_conn(struct iscsi_conn *conn) isert_conn_terminate(isert_conn); mutex_unlock(&isert_conn->conn_mutex); + isert_wait4cmds(conn); isert_wait4flush(isert_conn); INIT_WORK(&isert_conn->release_work, isert_release_work); -- cgit v1.2.3 From 991bb7640d7e0971c360b6166cbca84a7f502312 Mon Sep 17 00:00:00 2001 From: Sagi Grimberg Date: Sun, 7 Dec 2014 13:12:01 +0200 Subject: iser-target: Fix logout sequence We don't want to wait for conn_logout_comp from isert_comp_wq context as this blocks further completions from being processed. Instead we wait for it conditionally (if logout response was actually posted) in wait_conn. This wait should normally happen immediately as it occurs after we consumed all the completions (including flush errors) and conn_logout_comp should have been completed. Signed-off-by: Sagi Grimberg Signed-off-by: Nicholas Bellinger --- drivers/infiniband/ulp/isert/ib_isert.c | 24 ++++++++++++++++-------- drivers/infiniband/ulp/isert/ib_isert.h | 1 + 2 files changed, 17 insertions(+), 8 deletions(-) diff --git a/drivers/infiniband/ulp/isert/ib_isert.c b/drivers/infiniband/ulp/isert/ib_isert.c index fc4641e5fd1e..108548437c9f 100644 --- a/drivers/infiniband/ulp/isert/ib_isert.c +++ b/drivers/infiniband/ulp/isert/ib_isert.c @@ -1426,10 +1426,6 @@ isert_rx_opcode(struct isert_conn *isert_conn, struct iser_rx_desc *rx_desc, break; ret = iscsit_handle_logout_cmd(conn, cmd, (unsigned char *)hdr); - if (ret > 0) - wait_for_completion_timeout(&conn->conn_logout_comp, - SECONDS_FOR_LOGOUT_COMP * - HZ); break; case ISCSI_OP_TEXT: cmd = isert_allocate_cmd(conn); @@ -2922,15 +2918,14 @@ isert_immediate_queue(struct iscsi_conn *conn, struct iscsi_cmd *cmd, int state) static int isert_response_queue(struct iscsi_conn *conn, struct iscsi_cmd *cmd, int state) { + struct isert_conn *isert_conn = conn->context; int ret; switch (state) { case ISTATE_SEND_LOGOUTRSP: ret = isert_put_logout_rsp(cmd, conn); - if (!ret) { - pr_debug("Returning iSER Logout -EAGAIN\n"); - ret = -EAGAIN; - } + if (!ret) + isert_conn->logout_posted = true; break; case ISTATE_SEND_NOPIN: ret = isert_put_nopin(cmd, conn, true); @@ -3235,6 +3230,18 @@ static void isert_release_work(struct work_struct *work) isert_put_conn(isert_conn); } +static void +isert_wait4logout(struct isert_conn *isert_conn) +{ + struct iscsi_conn *conn = isert_conn->conn; + + if (isert_conn->logout_posted) { + pr_info("conn %p wait for conn_logout_comp\n", isert_conn); + wait_for_completion_timeout(&conn->conn_logout_comp, + SECONDS_FOR_LOGOUT_COMP * HZ); + } +} + static void isert_wait4cmds(struct iscsi_conn *conn) { @@ -3280,6 +3287,7 @@ static void isert_wait_conn(struct iscsi_conn *conn) isert_wait4cmds(conn); isert_wait4flush(isert_conn); + isert_wait4logout(isert_conn); INIT_WORK(&isert_conn->release_work, isert_release_work); queue_work(isert_release_wq, &isert_conn->release_work); diff --git a/drivers/infiniband/ulp/isert/ib_isert.h b/drivers/infiniband/ulp/isert/ib_isert.h index 2a0721f1f5df..e89f384efc22 100644 --- a/drivers/infiniband/ulp/isert/ib_isert.h +++ b/drivers/infiniband/ulp/isert/ib_isert.h @@ -156,6 +156,7 @@ struct isert_conn { spinlock_t conn_lock; struct work_struct release_work; struct ib_recv_wr beacon; + bool logout_posted; }; #define ISERT_MAX_CQ 64 -- cgit v1.2.3 From 24f412dd3838a1fecad7c702ee76039b818b310a Mon Sep 17 00:00:00 2001 From: Sagi Grimberg Date: Sun, 7 Dec 2014 13:12:02 +0200 Subject: iser-target: Use debug_level parameter to control logging level Personal preference, easier control of the log level with a single modparam which can be changed dynamically. Allows better saparation of control and IO plains. Replaced throughout ib_isert.c: s/pr_debug/isert_dbg/g s/pr_info/isert_info/g s/pr_warn/isert_warn/g s/pr_err/isert_err/g Plus nit checkpatch warning change. Signed-off-by: Sagi Grimberg Signed-off-by: Nicholas Bellinger --- drivers/infiniband/ulp/isert/ib_isert.c | 360 ++++++++++++++++---------------- drivers/infiniband/ulp/isert/ib_isert.h | 27 +++ 2 files changed, 209 insertions(+), 178 deletions(-) diff --git a/drivers/infiniband/ulp/isert/ib_isert.c b/drivers/infiniband/ulp/isert/ib_isert.c index 108548437c9f..87b5e8983c19 100644 --- a/drivers/infiniband/ulp/isert/ib_isert.c +++ b/drivers/infiniband/ulp/isert/ib_isert.c @@ -38,6 +38,10 @@ #define ISER_MAX_CQ_LEN (ISER_MAX_RX_CQ_LEN + ISER_MAX_TX_CQ_LEN + \ ISERT_MAX_CONN) +int isert_debug_level = 0; +module_param_named(debug_level, isert_debug_level, int, 0644); +MODULE_PARM_DESC(debug_level, "Enable debug tracing if > 0 (default:0)"); + static DEFINE_MUTEX(device_list_mutex); static LIST_HEAD(device_list); static struct workqueue_struct *isert_comp_wq; @@ -74,13 +78,13 @@ isert_qp_event_callback(struct ib_event *e, void *context) { struct isert_conn *isert_conn = (struct isert_conn *)context; - pr_err("isert_qp_event_callback event: %d\n", e->event); + isert_err("isert_qp_event_callback event: %d\n", e->event); switch (e->event) { case IB_EVENT_COMM_EST: rdma_notify(isert_conn->conn_cm_id, IB_EVENT_COMM_EST); break; case IB_EVENT_QP_LAST_WQE_REACHED: - pr_warn("Reached TX IB_EVENT_QP_LAST_WQE_REACHED:\n"); + isert_warn("Reached TX IB_EVENT_QP_LAST_WQE_REACHED:\n"); break; default: break; @@ -94,11 +98,11 @@ isert_query_device(struct ib_device *ib_dev, struct ib_device_attr *devattr) ret = ib_query_device(ib_dev, devattr); if (ret) { - pr_err("ib_query_device() failed: %d\n", ret); + isert_err("ib_query_device() failed: %d\n", ret); return ret; } - pr_debug("devattr->max_sge: %d\n", devattr->max_sge); - pr_debug("devattr->max_sge_rd: %d\n", devattr->max_sge_rd); + isert_dbg("devattr->max_sge: %d\n", devattr->max_sge); + isert_dbg("devattr->max_sge_rd: %d\n", devattr->max_sge_rd); return 0; } @@ -118,7 +122,7 @@ isert_conn_setup_qp(struct isert_conn *isert_conn, struct rdma_cm_id *cma_id) min = i; comp = &device->comps[min]; comp->active_qps++; - pr_info("conn %p, using comp %p min_index: %d\n", + isert_info("conn %p, using comp %p min_index: %d\n", isert_conn, comp, min); mutex_unlock(&device_list_mutex); @@ -145,18 +149,18 @@ isert_conn_setup_qp(struct isert_conn *isert_conn, struct rdma_cm_id *cma_id) if (device->pi_capable) attr.create_flags |= IB_QP_CREATE_SIGNATURE_EN; - pr_debug("isert_conn_setup_qp cma_id->device: %p\n", + isert_dbg("isert_conn_setup_qp cma_id->device: %p\n", cma_id->device); - pr_debug("isert_conn_setup_qp conn_pd->device: %p\n", + isert_dbg("isert_conn_setup_qp conn_pd->device: %p\n", isert_conn->conn_pd->device); ret = rdma_create_qp(cma_id, isert_conn->conn_pd, &attr); if (ret) { - pr_err("rdma_create_qp failed for cma_id %d\n", ret); + isert_err("rdma_create_qp failed for cma_id %d\n", ret); goto err; } isert_conn->conn_qp = cma_id->qp; - pr_debug("rdma_create_qp() returned success >>>>>>>>>>>>>>>>>>>>>>>>>.\n"); + isert_dbg("rdma_create_qp() returned success >>>>>>>>>>>>>>>>>>>>>>>>>.\n"); return 0; err: @@ -170,7 +174,7 @@ err: static void isert_cq_event_callback(struct ib_event *e, void *context) { - pr_debug("isert_cq_event_callback event: %d\n", e->event); + isert_dbg("isert_cq_event_callback event: %d\n", e->event); } static int @@ -274,7 +278,7 @@ isert_create_device_ib_res(struct isert_device *device) device->comps_used = min(ISERT_MAX_CQ, min_t(int, num_online_cpus(), device->ib_device->num_comp_vectors)); - pr_info("Using %d CQs, %s supports %d vectors support " + isert_info("Using %d CQs, %s supports %d vectors support " "Fast registration %d pi_capable %d\n", device->comps_used, device->ib_device->name, device->ib_device->num_comp_vectors, device->use_fastreg, @@ -283,7 +287,7 @@ isert_create_device_ib_res(struct isert_device *device) device->comps = kcalloc(device->comps_used, sizeof(struct isert_comp), GFP_KERNEL); if (!device->comps) { - pr_err("Unable to allocate completion contexts\n"); + isert_err("Unable to allocate completion contexts\n"); return -ENOMEM; } @@ -329,7 +333,7 @@ isert_free_device_ib_res(struct isert_device *device) { int i; - pr_info("device %p\n", device); + isert_info("device %p\n", device); for (i = 0; i < device->comps_used; i++) { struct isert_comp *comp = &device->comps[i]; @@ -401,7 +405,7 @@ isert_conn_free_fastreg_pool(struct isert_conn *isert_conn) if (list_empty(&isert_conn->conn_fr_pool)) return; - pr_debug("Freeing conn %p fastreg pool", isert_conn); + isert_dbg("Freeing conn %p fastreg pool", isert_conn); list_for_each_entry_safe(fr_desc, tmp, &isert_conn->conn_fr_pool, list) { @@ -419,7 +423,7 @@ isert_conn_free_fastreg_pool(struct isert_conn *isert_conn) } if (i < isert_conn->conn_fr_pool_size) - pr_warn("Pool still has %d regions registered\n", + isert_warn("Pool still has %d regions registered\n", isert_conn->conn_fr_pool_size - i); } @@ -434,14 +438,14 @@ isert_create_pi_ctx(struct fast_reg_descriptor *desc, pi_ctx = kzalloc(sizeof(*desc->pi_ctx), GFP_KERNEL); if (!pi_ctx) { - pr_err("Failed to allocate pi context\n"); + isert_err("Failed to allocate pi context\n"); return -ENOMEM; } pi_ctx->prot_frpl = ib_alloc_fast_reg_page_list(device, ISCSI_ISER_SG_TABLESIZE); if (IS_ERR(pi_ctx->prot_frpl)) { - pr_err("Failed to allocate prot frpl err=%ld\n", + isert_err("Failed to allocate prot frpl err=%ld\n", PTR_ERR(pi_ctx->prot_frpl)); ret = PTR_ERR(pi_ctx->prot_frpl); goto err_pi_ctx; @@ -449,7 +453,7 @@ isert_create_pi_ctx(struct fast_reg_descriptor *desc, pi_ctx->prot_mr = ib_alloc_fast_reg_mr(pd, ISCSI_ISER_SG_TABLESIZE); if (IS_ERR(pi_ctx->prot_mr)) { - pr_err("Failed to allocate prot frmr err=%ld\n", + isert_err("Failed to allocate prot frmr err=%ld\n", PTR_ERR(pi_ctx->prot_mr)); ret = PTR_ERR(pi_ctx->prot_mr); goto err_prot_frpl; @@ -461,7 +465,7 @@ isert_create_pi_ctx(struct fast_reg_descriptor *desc, mr_init_attr.flags |= IB_MR_SIGNATURE_EN; pi_ctx->sig_mr = ib_create_mr(pd, &mr_init_attr); if (IS_ERR(pi_ctx->sig_mr)) { - pr_err("Failed to allocate signature enabled mr err=%ld\n", + isert_err("Failed to allocate signature enabled mr err=%ld\n", PTR_ERR(pi_ctx->sig_mr)); ret = PTR_ERR(pi_ctx->sig_mr); goto err_prot_mr; @@ -492,21 +496,21 @@ isert_create_fr_desc(struct ib_device *ib_device, struct ib_pd *pd, fr_desc->data_frpl = ib_alloc_fast_reg_page_list(ib_device, ISCSI_ISER_SG_TABLESIZE); if (IS_ERR(fr_desc->data_frpl)) { - pr_err("Failed to allocate data frpl err=%ld\n", + isert_err("Failed to allocate data frpl err=%ld\n", PTR_ERR(fr_desc->data_frpl)); return PTR_ERR(fr_desc->data_frpl); } fr_desc->data_mr = ib_alloc_fast_reg_mr(pd, ISCSI_ISER_SG_TABLESIZE); if (IS_ERR(fr_desc->data_mr)) { - pr_err("Failed to allocate data frmr err=%ld\n", + isert_err("Failed to allocate data frmr err=%ld\n", PTR_ERR(fr_desc->data_mr)); ret = PTR_ERR(fr_desc->data_mr); goto err_data_frpl; } fr_desc->ind |= ISERT_DATA_KEY_VALID; - pr_debug("Created fr_desc %p\n", fr_desc); + isert_dbg("Created fr_desc %p\n", fr_desc); return 0; @@ -535,7 +539,7 @@ isert_conn_create_fastreg_pool(struct isert_conn *isert_conn) for (i = 0; i < tag_num; i++) { fr_desc = kzalloc(sizeof(*fr_desc), GFP_KERNEL); if (!fr_desc) { - pr_err("Failed to allocate fast_reg descriptor\n"); + isert_err("Failed to allocate fast_reg descriptor\n"); ret = -ENOMEM; goto err; } @@ -543,7 +547,7 @@ isert_conn_create_fastreg_pool(struct isert_conn *isert_conn) ret = isert_create_fr_desc(device->ib_device, isert_conn->conn_pd, fr_desc); if (ret) { - pr_err("Failed to create fastreg descriptor err=%d\n", + isert_err("Failed to create fastreg descriptor err=%d\n", ret); kfree(fr_desc); goto err; @@ -553,7 +557,7 @@ isert_conn_create_fastreg_pool(struct isert_conn *isert_conn) isert_conn->conn_fr_pool_size++; } - pr_debug("Creating conn %p fastreg pool size=%d", + isert_dbg("Creating conn %p fastreg pool size=%d", isert_conn, isert_conn->conn_fr_pool_size); return 0; @@ -576,17 +580,17 @@ isert_connect_request(struct rdma_cm_id *cma_id, struct rdma_cm_event *event) spin_lock_bh(&np->np_thread_lock); if (!np->enabled) { spin_unlock_bh(&np->np_thread_lock); - pr_debug("iscsi_np is not enabled, reject connect request\n"); + isert_dbg("iscsi_np is not enabled, reject connect request\n"); return rdma_reject(cma_id, NULL, 0); } spin_unlock_bh(&np->np_thread_lock); - pr_debug("Entering isert_connect_request cma_id: %p, context: %p\n", + isert_dbg("Entering isert_connect_request cma_id: %p, context: %p\n", cma_id, cma_id->context); isert_conn = kzalloc(sizeof(struct isert_conn), GFP_KERNEL); if (!isert_conn) { - pr_err("Unable to allocate isert_conn\n"); + isert_err("Unable to allocate isert_conn\n"); return -ENOMEM; } isert_conn->state = ISER_CONN_INIT; @@ -604,7 +608,7 @@ isert_connect_request(struct rdma_cm_id *cma_id, struct rdma_cm_event *event) isert_conn->login_buf = kzalloc(ISCSI_DEF_MAX_RECV_SEG_LEN + ISER_RX_LOGIN_SIZE, GFP_KERNEL); if (!isert_conn->login_buf) { - pr_err("Unable to allocate isert_conn->login_buf\n"); + isert_err("Unable to allocate isert_conn->login_buf\n"); ret = -ENOMEM; goto out; } @@ -612,7 +616,7 @@ isert_connect_request(struct rdma_cm_id *cma_id, struct rdma_cm_event *event) isert_conn->login_req_buf = isert_conn->login_buf; isert_conn->login_rsp_buf = isert_conn->login_buf + ISCSI_DEF_MAX_RECV_SEG_LEN; - pr_debug("Set login_buf: %p login_req_buf: %p login_rsp_buf: %p\n", + isert_dbg("Set login_buf: %p login_req_buf: %p login_rsp_buf: %p\n", isert_conn->login_buf, isert_conn->login_req_buf, isert_conn->login_rsp_buf); @@ -622,7 +626,7 @@ isert_connect_request(struct rdma_cm_id *cma_id, struct rdma_cm_event *event) ret = ib_dma_mapping_error(ib_dev, isert_conn->login_req_dma); if (ret) { - pr_err("ib_dma_mapping_error failed for login_req_dma: %d\n", + isert_err("ib_dma_mapping_error failed for login_req_dma: %d\n", ret); isert_conn->login_req_dma = 0; goto out_login_buf; @@ -634,7 +638,7 @@ isert_connect_request(struct rdma_cm_id *cma_id, struct rdma_cm_event *event) ret = ib_dma_mapping_error(ib_dev, isert_conn->login_rsp_dma); if (ret) { - pr_err("ib_dma_mapping_error failed for login_rsp_dma: %d\n", + isert_err("ib_dma_mapping_error failed for login_rsp_dma: %d\n", ret); isert_conn->login_rsp_dma = 0; goto out_req_dma_map; @@ -650,13 +654,13 @@ isert_connect_request(struct rdma_cm_id *cma_id, struct rdma_cm_event *event) isert_conn->initiator_depth = min_t(u8, event->param.conn.initiator_depth, device->dev_attr.max_qp_init_rd_atom); - pr_debug("Using initiator_depth: %u\n", isert_conn->initiator_depth); + isert_dbg("Using initiator_depth: %u\n", isert_conn->initiator_depth); isert_conn->conn_device = device; isert_conn->conn_pd = ib_alloc_pd(isert_conn->conn_device->ib_device); if (IS_ERR(isert_conn->conn_pd)) { ret = PTR_ERR(isert_conn->conn_pd); - pr_err("ib_alloc_pd failed for conn %p: ret=%d\n", + isert_err("ib_alloc_pd failed for conn %p: ret=%d\n", isert_conn, ret); goto out_pd; } @@ -665,7 +669,7 @@ isert_connect_request(struct rdma_cm_id *cma_id, struct rdma_cm_event *event) IB_ACCESS_LOCAL_WRITE); if (IS_ERR(isert_conn->conn_mr)) { ret = PTR_ERR(isert_conn->conn_mr); - pr_err("ib_get_dma_mr failed for conn %p: ret=%d\n", + isert_err("ib_get_dma_mr failed for conn %p: ret=%d\n", isert_conn, ret); goto out_mr; } @@ -686,7 +690,7 @@ isert_connect_request(struct rdma_cm_id *cma_id, struct rdma_cm_event *event) list_add_tail(&isert_conn->conn_accept_node, &isert_np->np_accept_list); mutex_unlock(&isert_np->np_accept_mutex); - pr_debug("isert_connect_request() up np_sem np: %p\n", np); + isert_info("np %p: Allow accept_np to continue\n", np); up(&isert_np->np_sem); return 0; @@ -716,7 +720,7 @@ isert_connect_release(struct isert_conn *isert_conn) struct ib_device *ib_dev = isert_conn->conn_cm_id->device; struct isert_device *device = isert_conn->conn_device; - pr_debug("Entering isert_connect_release(): >>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>\n"); + isert_dbg("Entering isert_connect_release(): >>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>\n"); if (device && device->use_fastreg) isert_conn_free_fastreg_pool(isert_conn); @@ -727,7 +731,7 @@ isert_connect_release(struct isert_conn *isert_conn) if (isert_conn->conn_qp) { struct isert_comp *comp = isert_conn->conn_qp->recv_cq->cq_context; - pr_debug("dec completion context %p active_qps\n", comp); + isert_dbg("dec completion context %p active_qps\n", comp); mutex_lock(&device_list_mutex); comp->active_qps--; mutex_unlock(&device_list_mutex); @@ -751,7 +755,7 @@ isert_connect_release(struct isert_conn *isert_conn) if (device) isert_device_try_release(device); - pr_debug("Leaving isert_connect_release >>>>>>>>>>>>\n"); + isert_dbg("Leaving isert_connect_release >>>>>>>>>>>>\n"); } static void @@ -759,10 +763,10 @@ isert_connected_handler(struct rdma_cm_id *cma_id) { struct isert_conn *isert_conn = cma_id->qp->qp_context; - pr_info("conn %p\n", isert_conn); + isert_info("conn %p\n", isert_conn); if (!kref_get_unless_zero(&isert_conn->conn_kref)) { - pr_warn("conn %p connect_release is running\n", isert_conn); + isert_warn("conn %p connect_release is running\n", isert_conn); return; } @@ -778,7 +782,7 @@ isert_release_conn_kref(struct kref *kref) struct isert_conn *isert_conn = container_of(kref, struct isert_conn, conn_kref); - pr_debug("Calling isert_connect_release for final kref %s/%d\n", + isert_dbg("Calling isert_connect_release for final kref %s/%d\n", current->comm, current->pid); isert_connect_release(isert_conn); @@ -812,16 +816,16 @@ isert_conn_terminate(struct isert_conn *isert_conn) break; case ISER_CONN_UP: case ISER_CONN_FULL_FEATURE: /* FALLTHRU */ - pr_info("Terminating conn %p state %d\n", + isert_info("Terminating conn %p state %d\n", isert_conn, isert_conn->state); isert_conn->state = ISER_CONN_TERMINATING; err = rdma_disconnect(isert_conn->conn_cm_id); if (err) - pr_warn("Failed rdma_disconnect isert_conn %p\n", + isert_warn("Failed rdma_disconnect isert_conn %p\n", isert_conn); break; default: - pr_warn("conn %p teminating in state %d\n", + isert_warn("conn %p teminating in state %d\n", isert_conn, isert_conn->state); } } @@ -830,7 +834,7 @@ static int isert_np_cma_handler(struct isert_np *isert_np, enum rdma_cm_event_type event) { - pr_debug("isert np %p, handling event %d\n", isert_np, event); + isert_dbg("isert np %p, handling event %d\n", isert_np, event); switch (event) { case RDMA_CM_EVENT_DEVICE_REMOVAL: @@ -839,13 +843,13 @@ isert_np_cma_handler(struct isert_np *isert_np, case RDMA_CM_EVENT_ADDR_CHANGE: isert_np->np_cm_id = isert_setup_id(isert_np); if (IS_ERR(isert_np->np_cm_id)) { - pr_err("isert np %p setup id failed: %ld\n", - isert_np, PTR_ERR(isert_np->np_cm_id)); + isert_err("isert np %p setup id failed: %ld\n", + isert_np, PTR_ERR(isert_np->np_cm_id)); isert_np->np_cm_id = NULL; } break; default: - pr_err("isert np %p Unexpected event %d\n", + isert_err("isert np %p Unexpected event %d\n", isert_np, event); } @@ -868,7 +872,7 @@ isert_disconnected_handler(struct rdma_cm_id *cma_id, isert_conn_terminate(isert_conn); mutex_unlock(&isert_conn->conn_mutex); - pr_info("conn %p completing conn_wait\n", isert_conn); + isert_info("conn %p completing conn_wait\n", isert_conn); complete(&isert_conn->conn_wait); return 0; @@ -887,14 +891,14 @@ isert_cma_handler(struct rdma_cm_id *cma_id, struct rdma_cm_event *event) { int ret = 0; - pr_debug("isert_cma_handler: event %d status %d conn %p id %p\n", + isert_dbg("isert_cma_handler: event %d status %d conn %p id %p\n", event->event, event->status, cma_id->context, cma_id); switch (event->event) { case RDMA_CM_EVENT_CONNECT_REQUEST: ret = isert_connect_request(cma_id, event); if (ret) - pr_err("isert_cma_handler failed RDMA_CM_EVENT: 0x%08x %d\n", + isert_err("isert_cma_handler failed RDMA_CM_EVENT: 0x%08x %d\n", event->event, ret); break; case RDMA_CM_EVENT_ESTABLISHED: @@ -912,7 +916,7 @@ isert_cma_handler(struct rdma_cm_id *cma_id, struct rdma_cm_event *event) isert_connect_error(cma_id); break; default: - pr_err("Unhandled RDMA CMA event: %d\n", event->event); + isert_err("Unhandled RDMA CMA event: %d\n", event->event); break; } @@ -943,10 +947,10 @@ isert_post_recv(struct isert_conn *isert_conn, u32 count) ret = ib_post_recv(isert_conn->conn_qp, isert_conn->conn_rx_wr, &rx_wr_failed); if (ret) { - pr_err("ib_post_recv() failed with ret: %d\n", ret); + isert_err("ib_post_recv() failed with ret: %d\n", ret); isert_conn->post_recv_buf_count -= count; } else { - pr_debug("isert_post_recv(): Posted %d RX buffers\n", count); + isert_dbg("isert_post_recv(): Posted %d RX buffers\n", count); isert_conn->conn_rx_desc_head = rx_head; } return ret; @@ -971,7 +975,7 @@ isert_post_send(struct isert_conn *isert_conn, struct iser_tx_desc *tx_desc) ret = ib_post_send(isert_conn->conn_qp, &send_wr, &send_wr_failed); if (ret) - pr_err("ib_post_send() failed, ret: %d\n", ret); + isert_err("ib_post_send() failed, ret: %d\n", ret); return ret; } @@ -994,7 +998,7 @@ isert_create_send_desc(struct isert_conn *isert_conn, if (tx_desc->tx_sg[0].lkey != isert_conn->conn_mr->lkey) { tx_desc->tx_sg[0].lkey = isert_conn->conn_mr->lkey; - pr_debug("tx_desc %p lkey mismatch, fixing\n", tx_desc); + isert_dbg("tx_desc %p lkey mismatch, fixing\n", tx_desc); } } @@ -1008,7 +1012,7 @@ isert_init_tx_hdrs(struct isert_conn *isert_conn, dma_addr = ib_dma_map_single(ib_dev, (void *)tx_desc, ISER_HEADERS_LEN, DMA_TO_DEVICE); if (ib_dma_mapping_error(ib_dev, dma_addr)) { - pr_err("ib_dma_mapping_error() failed\n"); + isert_err("ib_dma_mapping_error() failed\n"); return -ENOMEM; } @@ -1017,7 +1021,7 @@ isert_init_tx_hdrs(struct isert_conn *isert_conn, tx_desc->tx_sg[0].length = ISER_HEADERS_LEN; tx_desc->tx_sg[0].lkey = isert_conn->conn_mr->lkey; - pr_debug("isert_init_tx_hdrs: Setup tx_sg[0].addr: 0x%llx length: %u" + isert_dbg("isert_init_tx_hdrs: Setup tx_sg[0].addr: 0x%llx length: %u" " lkey: 0x%08x\n", tx_desc->tx_sg[0].addr, tx_desc->tx_sg[0].length, tx_desc->tx_sg[0].lkey); @@ -1050,7 +1054,7 @@ isert_rdma_post_recvl(struct isert_conn *isert_conn) sge.length = ISER_RX_LOGIN_SIZE; sge.lkey = isert_conn->conn_mr->lkey; - pr_debug("Setup sge: addr: %llx length: %d 0x%08x\n", + isert_dbg("Setup sge: addr: %llx length: %d 0x%08x\n", sge.addr, sge.length, sge.lkey); memset(&rx_wr, 0, sizeof(struct ib_recv_wr)); @@ -1061,11 +1065,11 @@ isert_rdma_post_recvl(struct isert_conn *isert_conn) isert_conn->post_recv_buf_count++; ret = ib_post_recv(isert_conn->conn_qp, &rx_wr, &rx_wr_fail); if (ret) { - pr_err("ib_post_recv() failed: %d\n", ret); + isert_err("ib_post_recv() failed: %d\n", ret); isert_conn->post_recv_buf_count--; } - pr_debug("ib_post_recv(): returned success >>>>>>>>>>>>>>>>>>>>>>>>\n"); + isert_dbg("ib_post_recv(): returned success >>>>>>>>>>>>>>>>>>>>>>>>\n"); return ret; } @@ -1107,7 +1111,7 @@ isert_put_login_tx(struct iscsi_conn *conn, struct iscsi_login *login, isert_conn->conn_device->use_fastreg) { ret = isert_conn_create_fastreg_pool(isert_conn); if (ret) { - pr_err("Conn: %p failed to create" + isert_err("Conn: %p failed to create" " fastreg pool\n", isert_conn); return ret; } @@ -1149,7 +1153,7 @@ isert_rx_login_req(struct isert_conn *isert_conn) struct iscsi_login *login = conn->conn_login; int size; - pr_info("conn %p\n", isert_conn); + isert_info("conn %p\n", isert_conn); WARN_ON_ONCE(!login); @@ -1177,7 +1181,7 @@ isert_rx_login_req(struct isert_conn *isert_conn) memcpy(&login->req[0], (void *)&rx_desc->iscsi_header, ISCSI_HDR_LEN); size = min(rx_buflen, MAX_KEY_VALUE_PAIRS); - pr_debug("Using login payload size: %d, rx_buflen: %d MAX_KEY_VALUE_PAIRS: %d\n", + isert_dbg("Using login payload size: %d, rx_buflen: %d MAX_KEY_VALUE_PAIRS: %d\n", size, rx_buflen, MAX_KEY_VALUE_PAIRS); memcpy(login->req_buf, &rx_desc->data[0], size); @@ -1197,7 +1201,7 @@ static struct iscsi_cmd cmd = iscsit_allocate_cmd(conn, TASK_INTERRUPTIBLE); if (!cmd) { - pr_err("Unable to allocate iscsi_cmd + isert_cmd\n"); + isert_err("Unable to allocate iscsi_cmd + isert_cmd\n"); return NULL; } isert_cmd = iscsit_priv_cmd(cmd); @@ -1240,7 +1244,7 @@ isert_handle_scsi_cmd(struct isert_conn *isert_conn, sg = &cmd->se_cmd.t_data_sg[0]; sg_nents = max(1UL, DIV_ROUND_UP(imm_data_len, PAGE_SIZE)); - pr_debug("Copying Immediate SG: %p sg_nents: %u from %p imm_data_len: %d\n", + isert_dbg("Copying Immediate SG: %p sg_nents: %u from %p imm_data_len: %d\n", sg, sg_nents, &rx_desc->data[0], imm_data_len); sg_copy_from_buffer(sg, sg_nents, &rx_desc->data[0], imm_data_len); @@ -1285,12 +1289,12 @@ isert_handle_iscsi_dataout(struct isert_conn *isert_conn, * FIXME: Unexpected unsolicited_data out */ if (!cmd->unsolicited_data) { - pr_err("Received unexpected solicited data payload\n"); + isert_err("Received unexpected solicited data payload\n"); dump_stack(); return -1; } - pr_debug("Unsolicited DataOut unsol_data_len: %u, write_data_done: %u, data_length: %u\n", + isert_dbg("Unsolicited DataOut unsol_data_len: %u, write_data_done: %u, data_length: %u\n", unsol_data_len, cmd->write_data_done, cmd->se_cmd.data_length); sg_off = cmd->write_data_done / PAGE_SIZE; @@ -1301,11 +1305,11 @@ isert_handle_iscsi_dataout(struct isert_conn *isert_conn, * FIXME: Non page-aligned unsolicited_data out */ if (page_off) { - pr_err("Received unexpected non-page aligned data payload\n"); + isert_err("Received unexpected non-page aligned data payload\n"); dump_stack(); return -1; } - pr_debug("Copying DataOut: sg_start: %p, sg_off: %u sg_nents: %u from %p %u\n", + isert_dbg("Copying DataOut: sg_start: %p, sg_off: %u sg_nents: %u from %p %u\n", sg_start, sg_off, sg_nents, &rx_desc->data[0], unsol_data_len); sg_copy_from_buffer(sg_start, sg_nents, &rx_desc->data[0], @@ -1353,7 +1357,7 @@ isert_handle_text_cmd(struct isert_conn *isert_conn, struct isert_cmd *isert_cmd text_in = kzalloc(payload_length, GFP_KERNEL); if (!text_in) { - pr_err("Unable to allocate text_in of payload_length: %u\n", + isert_err("Unable to allocate text_in of payload_length: %u\n", payload_length); return -ENOMEM; } @@ -1379,7 +1383,7 @@ isert_rx_opcode(struct isert_conn *isert_conn, struct iser_rx_desc *rx_desc, if (sess->sess_ops->SessionType && (!(opcode & ISCSI_OP_TEXT) || !(opcode & ISCSI_OP_LOGOUT))) { - pr_err("Got illegal opcode: 0x%02x in SessionType=Discovery," + isert_err("Got illegal opcode: 0x%02x in SessionType=Discovery," " ignoring\n", opcode); return 0; } @@ -1437,7 +1441,7 @@ isert_rx_opcode(struct isert_conn *isert_conn, struct iser_rx_desc *rx_desc, rx_desc, (struct iscsi_text *)hdr); break; default: - pr_err("Got unknown iSCSI OpCode: 0x%02x\n", opcode); + isert_err("Got unknown iSCSI OpCode: 0x%02x\n", opcode); dump_stack(); break; } @@ -1458,23 +1462,23 @@ isert_rx_do_work(struct iser_rx_desc *rx_desc, struct isert_conn *isert_conn) if (iser_hdr->flags & ISER_RSV) { read_stag = be32_to_cpu(iser_hdr->read_stag); read_va = be64_to_cpu(iser_hdr->read_va); - pr_debug("ISER_RSV: read_stag: 0x%08x read_va: 0x%16llx\n", + isert_dbg("ISER_RSV: read_stag: 0x%08x read_va: 0x%16llx\n", read_stag, (unsigned long long)read_va); } if (iser_hdr->flags & ISER_WSV) { write_stag = be32_to_cpu(iser_hdr->write_stag); write_va = be64_to_cpu(iser_hdr->write_va); - pr_debug("ISER_WSV: write__stag: 0x%08x write_va: 0x%16llx\n", + isert_dbg("ISER_WSV: write__stag: 0x%08x write_va: 0x%16llx\n", write_stag, (unsigned long long)write_va); } - pr_debug("ISER ISCSI_CTRL PDU\n"); + isert_dbg("ISER ISCSI_CTRL PDU\n"); break; case ISER_HELLO: - pr_err("iSER Hello message\n"); + isert_err("iSER Hello message\n"); break; default: - pr_warn("Unknown iSER hdr flags: 0x%02x\n", iser_hdr->flags); + isert_warn("Unknown iSER hdr flags: 0x%02x\n", iser_hdr->flags); break; } @@ -1494,19 +1498,19 @@ isert_rx_completion(struct iser_rx_desc *desc, struct isert_conn *isert_conn, if ((char *)desc == isert_conn->login_req_buf) { rx_dma = isert_conn->login_req_dma; rx_buflen = ISER_RX_LOGIN_SIZE; - pr_debug("ISER login_buf: Using rx_dma: 0x%llx, rx_buflen: %d\n", + isert_dbg("ISER login_buf: Using rx_dma: 0x%llx, rx_buflen: %d\n", rx_dma, rx_buflen); } else { rx_dma = desc->dma_addr; rx_buflen = ISER_RX_PAYLOAD_SIZE; - pr_debug("ISER req_buf: Using rx_dma: 0x%llx, rx_buflen: %d\n", + isert_dbg("ISER req_buf: Using rx_dma: 0x%llx, rx_buflen: %d\n", rx_dma, rx_buflen); } ib_dma_sync_single_for_cpu(ib_dev, rx_dma, rx_buflen, DMA_FROM_DEVICE); hdr = &desc->iscsi_header; - pr_debug("iSCSI opcode: 0x%02x, ITT: 0x%08x, flags: 0x%02x dlen: %d\n", + isert_dbg("iSCSI opcode: 0x%02x, ITT: 0x%08x, flags: 0x%02x dlen: %d\n", hdr->opcode, hdr->itt, hdr->flags, (int)(xfer_len - ISER_HEADERS_LEN)); @@ -1529,7 +1533,7 @@ isert_rx_completion(struct iser_rx_desc *desc, struct isert_conn *isert_conn, DMA_FROM_DEVICE); isert_conn->post_recv_buf_count--; - pr_debug("iSERT: Decremented post_recv_buf_count: %d\n", + isert_dbg("iSERT: Decremented post_recv_buf_count: %d\n", isert_conn->post_recv_buf_count); if ((char *)desc == isert_conn->login_req_buf) @@ -1541,7 +1545,7 @@ isert_rx_completion(struct iser_rx_desc *desc, struct isert_conn *isert_conn, ISERT_MIN_POSTED_RX); err = isert_post_recv(isert_conn, count); if (err) { - pr_err("isert_post_recv() count: %d failed, %d\n", + isert_err("isert_post_recv() count: %d failed, %d\n", count, err); } } @@ -1570,11 +1574,11 @@ isert_map_data_buf(struct isert_conn *isert_conn, struct isert_cmd *isert_cmd, data->dma_nents = ib_dma_map_sg(ib_dev, data->sg, data->nents, data->dma_dir); if (unlikely(!data->dma_nents)) { - pr_err("Cmd: unable to dma map SGs %p\n", sg); + isert_err("Cmd: unable to dma map SGs %p\n", sg); return -EINVAL; } - pr_debug("Mapped cmd: %p count: %u sg: %p sg_nents: %u rdma_len %d\n", + isert_dbg("Mapped cmd: %p count: %u sg: %p sg_nents: %u rdma_len %d\n", isert_cmd, data->dma_nents, data->sg, data->nents, data->len); return 0; @@ -1596,21 +1600,21 @@ isert_unmap_cmd(struct isert_cmd *isert_cmd, struct isert_conn *isert_conn) { struct isert_rdma_wr *wr = &isert_cmd->rdma_wr; - pr_debug("isert_unmap_cmd: %p\n", isert_cmd); + isert_dbg("isert_unmap_cmd: %p\n", isert_cmd); if (wr->data.sg) { - pr_debug("isert_unmap_cmd: %p unmap_sg op\n", isert_cmd); + isert_dbg("isert_unmap_cmd: %p unmap_sg op\n", isert_cmd); isert_unmap_data_buf(isert_conn, &wr->data); } if (wr->send_wr) { - pr_debug("isert_unmap_cmd: %p free send_wr\n", isert_cmd); + isert_dbg("isert_unmap_cmd: %p free send_wr\n", isert_cmd); kfree(wr->send_wr); wr->send_wr = NULL; } if (wr->ib_sge) { - pr_debug("isert_unmap_cmd: %p free ib_sge\n", isert_cmd); + isert_dbg("isert_unmap_cmd: %p free ib_sge\n", isert_cmd); kfree(wr->ib_sge); wr->ib_sge = NULL; } @@ -1622,10 +1626,10 @@ isert_unreg_rdma(struct isert_cmd *isert_cmd, struct isert_conn *isert_conn) struct isert_rdma_wr *wr = &isert_cmd->rdma_wr; LIST_HEAD(unmap_list); - pr_debug("unreg_fastreg_cmd: %p\n", isert_cmd); + isert_dbg("unreg_fastreg_cmd: %p\n", isert_cmd); if (wr->fr_desc) { - pr_debug("unreg_fastreg_cmd: %p free fr_desc %p\n", + isert_dbg("unreg_fastreg_cmd: %p free fr_desc %p\n", isert_cmd, wr->fr_desc); if (wr->fr_desc->ind & ISERT_PROTECTED) { isert_unmap_data_buf(isert_conn, &wr->prot); @@ -1638,7 +1642,7 @@ isert_unreg_rdma(struct isert_cmd *isert_cmd, struct isert_conn *isert_conn) } if (wr->data.sg) { - pr_debug("unreg_fastreg_cmd: %p unmap_sg op\n", isert_cmd); + isert_dbg("unreg_fastreg_cmd: %p unmap_sg op\n", isert_cmd); isert_unmap_data_buf(isert_conn, &wr->data); } @@ -1654,7 +1658,7 @@ isert_put_cmd(struct isert_cmd *isert_cmd, bool comp_err) struct iscsi_conn *conn = isert_conn->conn; struct isert_device *device = isert_conn->conn_device; - pr_debug("Entering isert_put_cmd: %p\n", isert_cmd); + isert_dbg("Entering isert_put_cmd: %p\n", isert_cmd); switch (cmd->iscsi_opcode) { case ISCSI_OP_SCSI_CMD: @@ -1704,7 +1708,7 @@ isert_put_cmd(struct isert_cmd *isert_cmd, bool comp_err) * associated cmd->se_cmd needs to be released. */ if (cmd->se_cmd.se_tfo != NULL) { - pr_debug("Calling transport_generic_free_cmd from" + isert_dbg("Calling transport_generic_free_cmd from" " isert_put_cmd for 0x%02x\n", cmd->iscsi_opcode); transport_generic_free_cmd(&cmd->se_cmd, 0); @@ -1723,7 +1727,7 @@ static void isert_unmap_tx_desc(struct iser_tx_desc *tx_desc, struct ib_device *ib_dev) { if (tx_desc->dma_addr != 0) { - pr_debug("Calling ib_dma_unmap_single for tx_desc->dma_addr\n"); + isert_dbg("Calling ib_dma_unmap_single for tx_desc->dma_addr\n"); ib_dma_unmap_single(ib_dev, tx_desc->dma_addr, ISER_HEADERS_LEN, DMA_TO_DEVICE); tx_desc->dma_addr = 0; @@ -1735,7 +1739,7 @@ isert_completion_put(struct iser_tx_desc *tx_desc, struct isert_cmd *isert_cmd, struct ib_device *ib_dev, bool comp_err) { if (isert_cmd->pdu_buf_dma != 0) { - pr_debug("Calling ib_dma_unmap_single for isert_cmd->pdu_buf_dma\n"); + isert_dbg("Calling ib_dma_unmap_single for isert_cmd->pdu_buf_dma\n"); ib_dma_unmap_single(ib_dev, isert_cmd->pdu_buf_dma, isert_cmd->pdu_buf_len, DMA_TO_DEVICE); isert_cmd->pdu_buf_dma = 0; @@ -1753,7 +1757,7 @@ isert_check_pi_status(struct se_cmd *se_cmd, struct ib_mr *sig_mr) ret = ib_check_mr_status(sig_mr, IB_MR_CHECK_SIG_STATUS, &mr_status); if (ret) { - pr_err("ib_check_mr_status failed, ret %d\n", ret); + isert_err("ib_check_mr_status failed, ret %d\n", ret); goto fail_mr_status; } @@ -1776,7 +1780,7 @@ isert_check_pi_status(struct se_cmd *se_cmd, struct ib_mr *sig_mr) do_div(sec_offset_err, block_size); se_cmd->bad_sector = sec_offset_err + se_cmd->t_task_lba; - pr_err("isert: PI error found type %d at sector 0x%llx " + isert_err("isert: PI error found type %d at sector 0x%llx " "expected 0x%x vs actual 0x%x\n", mr_status.sig_err.err_type, (unsigned long long)se_cmd->bad_sector, @@ -1837,7 +1841,7 @@ isert_completion_rdma_read(struct iser_tx_desc *tx_desc, cmd->write_data_done = wr->data.len; wr->send_wr_num = 0; - pr_debug("Cmd: %p RDMA_READ comp calling execute_cmd\n", isert_cmd); + isert_dbg("Cmd: %p RDMA_READ comp calling execute_cmd\n", isert_cmd); spin_lock_bh(&cmd->istate_lock); cmd->cmd_flags |= ICF_GOT_LAST_DATAOUT; cmd->i_state = ISTATE_RECEIVED_LAST_DATAOUT; @@ -1861,7 +1865,7 @@ isert_do_control_comp(struct work_struct *work) switch (cmd->i_state) { case ISTATE_SEND_TASKMGTRSP: - pr_debug("Calling iscsit_tmr_post_handler >>>>>>>>>>>>>>>>>\n"); + isert_dbg("Calling iscsit_tmr_post_handler >>>>>>>>>>>>>>>>>\n"); iscsit_tmr_post_handler(cmd, cmd->conn); @@ -1869,13 +1873,13 @@ isert_do_control_comp(struct work_struct *work) isert_completion_put(&isert_cmd->tx_desc, isert_cmd, ib_dev, false); break; case ISTATE_SEND_REJECT: - pr_debug("Got isert_do_control_comp ISTATE_SEND_REJECT: >>>\n"); + isert_dbg("Got isert_do_control_comp ISTATE_SEND_REJECT: >>>\n"); cmd->i_state = ISTATE_SENT_STATUS; isert_completion_put(&isert_cmd->tx_desc, isert_cmd, ib_dev, false); break; case ISTATE_SEND_LOGOUTRSP: - pr_debug("Calling iscsit_logout_post_handler >>>>>>>>>>>>>>\n"); + isert_dbg("Calling iscsit_logout_post_handler >>>>>>>>>>>>>>\n"); iscsit_logout_post_handler(cmd, cmd->conn); break; @@ -1884,7 +1888,7 @@ isert_do_control_comp(struct work_struct *work) isert_completion_put(&isert_cmd->tx_desc, isert_cmd, ib_dev, false); break; default: - pr_err("Unknown do_control_comp i_state %d\n", cmd->i_state); + isert_err("Unknown do_control_comp i_state %d\n", cmd->i_state); dump_stack(); break; } @@ -1929,25 +1933,25 @@ isert_send_completion(struct iser_tx_desc *tx_desc, switch (wr->iser_ib_op) { case ISER_IB_RECV: - pr_err("isert_send_completion: Got ISER_IB_RECV\n"); + isert_err("isert_send_completion: Got ISER_IB_RECV\n"); dump_stack(); break; case ISER_IB_SEND: - pr_debug("isert_send_completion: Got ISER_IB_SEND\n"); + isert_dbg("isert_send_completion: Got ISER_IB_SEND\n"); isert_response_completion(tx_desc, isert_cmd, isert_conn, ib_dev); break; case ISER_IB_RDMA_WRITE: - pr_debug("isert_send_completion: Got ISER_IB_RDMA_WRITE\n"); + isert_dbg("isert_send_completion: Got ISER_IB_RDMA_WRITE\n"); isert_completion_rdma_write(tx_desc, isert_cmd); break; case ISER_IB_RDMA_READ: - pr_debug("isert_send_completion: Got ISER_IB_RDMA_READ:\n"); + isert_dbg("isert_send_completion: Got ISER_IB_RDMA_READ:\n"); isert_completion_rdma_read(tx_desc, isert_cmd); break; default: - pr_err("Unknown wr->iser_ib_op: 0x%02x\n", wr->iser_ib_op); + isert_err("Unknown wr->iser_ib_op: 0x%02x\n", wr->iser_ib_op); dump_stack(); break; } @@ -1980,7 +1984,7 @@ static void isert_cq_comp_err(struct isert_conn *isert_conn, struct ib_wc *wc) { if (wc->wr_id == ISER_BEACON_WRID) { - pr_info("conn %p completing conn_wait_comp_err\n", + isert_info("conn %p completing conn_wait_comp_err\n", isert_conn); complete(&isert_conn->conn_wait_comp_err); } else @@ -2020,10 +2024,10 @@ isert_handle_wc(struct ib_wc *wc) } } else { if (wc->status != IB_WC_WR_FLUSH_ERR) - pr_err("wr id %llx status %d vend_err %x\n", + isert_err("wr id %llx status %d vend_err %x\n", wc->wr_id, wc->status, wc->vendor_err); else - pr_debug("flush error: wr id %llx\n", wc->wr_id); + isert_dbg("flush error: wr id %llx\n", wc->wr_id); if (wc->wr_id != ISER_FASTREG_LI_WRID) isert_cq_comp_err(isert_conn, wc); @@ -2068,7 +2072,7 @@ isert_post_response(struct isert_conn *isert_conn, struct isert_cmd *isert_cmd) ret = ib_post_send(isert_conn->conn_qp, &isert_cmd->tx_desc.send_wr, &wr_failed); if (ret) { - pr_err("ib_post_send failed with %d\n", ret); + isert_err("ib_post_send failed with %d\n", ret); return ret; } return ret; @@ -2117,7 +2121,7 @@ isert_put_response(struct iscsi_conn *conn, struct iscsi_cmd *cmd) isert_init_send_wr(isert_conn, isert_cmd, send_wr); - pr_debug("Posting SCSI Response IB_WR_SEND >>>>>>>>>>>>>>>>>>>>>>\n"); + isert_dbg("Posting SCSI Response IB_WR_SEND >>>>>>>>>>>>>>>>>>>>>>\n"); return isert_post_response(isert_conn, isert_cmd); } @@ -2148,13 +2152,13 @@ isert_get_sup_prot_ops(struct iscsi_conn *conn) if (conn->tpg->tpg_attrib.t10_pi) { if (device->pi_capable) { - pr_info("conn %p PI offload enabled\n", isert_conn); + isert_info("conn %p PI offload enabled\n", isert_conn); isert_conn->pi_support = true; return TARGET_PROT_ALL; } } - pr_info("conn %p PI offload disabled\n", isert_conn); + isert_info("conn %p PI offload disabled\n", isert_conn); isert_conn->pi_support = false; return TARGET_PROT_NORMAL; @@ -2175,7 +2179,7 @@ isert_put_nopin(struct iscsi_cmd *cmd, struct iscsi_conn *conn, isert_init_tx_hdrs(isert_conn, &isert_cmd->tx_desc); isert_init_send_wr(isert_conn, isert_cmd, send_wr); - pr_debug("Posting NOPIN Response IB_WR_SEND >>>>>>>>>>>>>>>>>>>>>>\n"); + isert_dbg("Posting NOPIN Response IB_WR_SEND >>>>>>>>>>>>>>>>>>>>>>\n"); return isert_post_response(isert_conn, isert_cmd); } @@ -2193,7 +2197,7 @@ isert_put_logout_rsp(struct iscsi_cmd *cmd, struct iscsi_conn *conn) isert_init_tx_hdrs(isert_conn, &isert_cmd->tx_desc); isert_init_send_wr(isert_conn, isert_cmd, send_wr); - pr_debug("Posting Logout Response IB_WR_SEND >>>>>>>>>>>>>>>>>>>>>>\n"); + isert_dbg("Posting Logout Response IB_WR_SEND >>>>>>>>>>>>>>>>>>>>>>\n"); return isert_post_response(isert_conn, isert_cmd); } @@ -2211,7 +2215,7 @@ isert_put_tm_rsp(struct iscsi_cmd *cmd, struct iscsi_conn *conn) isert_init_tx_hdrs(isert_conn, &isert_cmd->tx_desc); isert_init_send_wr(isert_conn, isert_cmd, send_wr); - pr_debug("Posting Task Management Response IB_WR_SEND >>>>>>>>>>>>>>>>>>>>>>\n"); + isert_dbg("Posting Task Management Response IB_WR_SEND >>>>>>>>>>>>>>>>>>>>>>\n"); return isert_post_response(isert_conn, isert_cmd); } @@ -2243,7 +2247,7 @@ isert_put_reject(struct iscsi_cmd *cmd, struct iscsi_conn *conn) isert_init_send_wr(isert_conn, isert_cmd, send_wr); - pr_debug("Posting Reject IB_WR_SEND >>>>>>>>>>>>>>>>>>>>>>\n"); + isert_dbg("Posting Reject IB_WR_SEND >>>>>>>>>>>>>>>>>>>>>>\n"); return isert_post_response(isert_conn, isert_cmd); } @@ -2283,7 +2287,7 @@ isert_put_text_rsp(struct iscsi_cmd *cmd, struct iscsi_conn *conn) } isert_init_send_wr(isert_conn, isert_cmd, send_wr); - pr_debug("Posting Text Response IB_WR_SEND >>>>>>>>>>>>>>>>>>>>>>\n"); + isert_dbg("Posting Text Response IB_WR_SEND >>>>>>>>>>>>>>>>>>>>>>\n"); return isert_post_response(isert_conn, isert_cmd); } @@ -2311,7 +2315,7 @@ isert_build_rdma_wr(struct isert_conn *isert_conn, struct isert_cmd *isert_cmd, * Perform mapping of TCM scatterlist memory ib_sge dma_addr. */ for_each_sg(sg_start, tmp_sg, sg_nents, i) { - pr_debug("ISER RDMA from SGL dma_addr: 0x%16llx dma_len: %u, page_off: %u\n", + isert_dbg("ISER RDMA from SGL dma_addr: 0x%16llx dma_len: %u, page_off: %u\n", (unsigned long long)tmp_sg->dma_address, tmp_sg->length, page_off); @@ -2320,15 +2324,15 @@ isert_build_rdma_wr(struct isert_conn *isert_conn, struct isert_cmd *isert_cmd, ib_sg_dma_len(ib_dev, tmp_sg) - page_off); ib_sge->lkey = isert_conn->conn_mr->lkey; - pr_debug("RDMA ib_sge: addr: 0x%16llx length: %u lkey: %08x\n", + isert_dbg("RDMA ib_sge: addr: 0x%16llx length: %u lkey: %08x\n", ib_sge->addr, ib_sge->length, ib_sge->lkey); page_off = 0; data_left -= ib_sge->length; ib_sge++; - pr_debug("Incrementing ib_sge pointer to %p\n", ib_sge); + isert_dbg("Incrementing ib_sge pointer to %p\n", ib_sge); } - pr_debug("Set outgoing sg_list: %p num_sg: %u from TCM SGLs\n", + isert_dbg("Set outgoing sg_list: %p num_sg: %u from TCM SGLs\n", send_wr->sg_list, send_wr->num_sge); return sg_nents; @@ -2361,7 +2365,7 @@ isert_map_rdma(struct iscsi_conn *conn, struct iscsi_cmd *cmd, ib_sge = kzalloc(sizeof(struct ib_sge) * data->nents, GFP_KERNEL); if (!ib_sge) { - pr_warn("Unable to allocate ib_sge\n"); + isert_warn("Unable to allocate ib_sge\n"); ret = -ENOMEM; goto unmap_cmd; } @@ -2371,7 +2375,7 @@ isert_map_rdma(struct iscsi_conn *conn, struct iscsi_cmd *cmd, wr->send_wr = kzalloc(sizeof(struct ib_send_wr) * wr->send_wr_num, GFP_KERNEL); if (!wr->send_wr) { - pr_debug("Unable to allocate wr->send_wr\n"); + isert_dbg("Unable to allocate wr->send_wr\n"); ret = -ENOMEM; goto unmap_cmd; } @@ -2435,7 +2439,7 @@ isert_map_fr_pagelist(struct ib_device *ib_dev, chunk_start = start_addr; end_addr = start_addr + ib_sg_dma_len(ib_dev, tmp_sg); - pr_debug("SGL[%d] dma_addr: 0x%16llx len: %u\n", + isert_dbg("SGL[%d] dma_addr: 0x%16llx len: %u\n", i, (unsigned long long)tmp_sg->dma_address, tmp_sg->length); @@ -2448,7 +2452,7 @@ isert_map_fr_pagelist(struct ib_device *ib_dev, page = chunk_start & PAGE_MASK; do { fr_pl[n_pages++] = page; - pr_debug("Mapped page_list[%d] page_addr: 0x%16llx\n", + isert_dbg("Mapped page_list[%d] page_addr: 0x%16llx\n", n_pages - 1, page); page += PAGE_SIZE; } while (page < end_addr); @@ -2477,7 +2481,7 @@ isert_fast_reg_mr(struct isert_conn *isert_conn, sge->lkey = isert_conn->conn_mr->lkey; sge->addr = ib_sg_dma_address(ib_dev, &mem->sg[0]); sge->length = ib_sg_dma_len(ib_dev, &mem->sg[0]); - pr_debug("%s:%d sge: addr: 0x%llx length: %u lkey: %x\n", + isert_dbg("%s:%d sge: addr: 0x%llx length: %u lkey: %x\n", __func__, __LINE__, sge->addr, sge->length, sge->lkey); return 0; @@ -2495,7 +2499,7 @@ isert_fast_reg_mr(struct isert_conn *isert_conn, page_off = mem->offset % PAGE_SIZE; - pr_debug("Use fr_desc %p sg_nents %d offset %u\n", + isert_dbg("Use fr_desc %p sg_nents %d offset %u\n", fr_desc, mem->nents, mem->offset); pagelist_len = isert_map_fr_pagelist(ib_dev, mem->sg, mem->nents, @@ -2531,7 +2535,7 @@ isert_fast_reg_mr(struct isert_conn *isert_conn, ret = ib_post_send(isert_conn->conn_qp, wr, &bad_wr); if (ret) { - pr_err("fast registration failed, ret:%d\n", ret); + isert_err("fast registration failed, ret:%d\n", ret); return ret; } fr_desc->ind &= ~ind; @@ -2540,7 +2544,7 @@ isert_fast_reg_mr(struct isert_conn *isert_conn, sge->addr = frpl->page_list[0] + page_off; sge->length = mem->len; - pr_debug("%s:%d sge: addr: 0x%llx length: %u lkey: %x\n", + isert_dbg("%s:%d sge: addr: 0x%llx length: %u lkey: %x\n", __func__, __LINE__, sge->addr, sge->length, sge->lkey); @@ -2588,7 +2592,7 @@ isert_set_sig_attrs(struct se_cmd *se_cmd, struct ib_sig_attrs *sig_attrs) isert_set_dif_domain(se_cmd, sig_attrs, &sig_attrs->mem); break; default: - pr_err("Unsupported PI operation %d\n", se_cmd->prot_op); + isert_err("Unsupported PI operation %d\n", se_cmd->prot_op); return -EINVAL; } @@ -2652,7 +2656,7 @@ isert_reg_sig_mr(struct isert_conn *isert_conn, ret = ib_post_send(isert_conn->conn_qp, wr, &bad_wr); if (ret) { - pr_err("fast registration failed, ret:%d\n", ret); + isert_err("fast registration failed, ret:%d\n", ret); goto err; } fr_desc->ind &= ~ISERT_SIG_KEY_VALID; @@ -2668,7 +2672,7 @@ isert_reg_sig_mr(struct isert_conn *isert_conn, */ rdma_wr->ib_sg[SIG].length += se_cmd->prot_length; - pr_debug("sig_sge: addr: 0x%llx length: %u lkey: %x\n", + isert_dbg("sig_sge: addr: 0x%llx length: %u lkey: %x\n", rdma_wr->ib_sg[SIG].addr, rdma_wr->ib_sg[SIG].length, rdma_wr->ib_sg[SIG].lkey); err: @@ -2689,7 +2693,7 @@ isert_handle_prot_cmd(struct isert_conn *isert_conn, device->ib_device, isert_conn->conn_pd); if (ret) { - pr_err("conn %p failed to allocate pi_ctx\n", + isert_err("conn %p failed to allocate pi_ctx\n", isert_conn); return ret; } @@ -2702,7 +2706,7 @@ isert_handle_prot_cmd(struct isert_conn *isert_conn, se_cmd->prot_length, 0, wr->iser_ib_op, &wr->prot); if (ret) { - pr_err("conn %p failed to map protection buffer\n", + isert_err("conn %p failed to map protection buffer\n", isert_conn); return ret; } @@ -2711,7 +2715,7 @@ isert_handle_prot_cmd(struct isert_conn *isert_conn, ret = isert_fast_reg_mr(isert_conn, wr->fr_desc, &wr->prot, ISERT_PROT_KEY_VALID, &wr->ib_sg[PROT]); if (ret) { - pr_err("conn %p failed to fast reg mr\n", + isert_err("conn %p failed to fast reg mr\n", isert_conn); goto unmap_prot_cmd; } @@ -2719,7 +2723,7 @@ isert_handle_prot_cmd(struct isert_conn *isert_conn, ret = isert_reg_sig_mr(isert_conn, se_cmd, wr, wr->fr_desc); if (ret) { - pr_err("conn %p failed to fast reg mr\n", + isert_err("conn %p failed to fast reg mr\n", isert_conn); goto unmap_prot_cmd; } @@ -2829,12 +2833,12 @@ isert_put_datain(struct iscsi_conn *conn, struct iscsi_cmd *cmd) struct ib_send_wr *wr_failed; int rc; - pr_debug("Cmd: %p RDMA_WRITE data_length: %u\n", + isert_dbg("Cmd: %p RDMA_WRITE data_length: %u\n", isert_cmd, se_cmd->data_length); wr->iser_ib_op = ISER_IB_RDMA_WRITE; rc = device->reg_rdma_mem(conn, cmd, wr); if (rc) { - pr_err("Cmd: %p failed to prepare RDMA res\n", isert_cmd); + isert_err("Cmd: %p failed to prepare RDMA res\n", isert_cmd); return rc; } @@ -2855,13 +2859,13 @@ isert_put_datain(struct iscsi_conn *conn, struct iscsi_cmd *cmd) rc = ib_post_send(isert_conn->conn_qp, wr->send_wr, &wr_failed); if (rc) - pr_warn("ib_post_send() failed for IB_WR_RDMA_WRITE\n"); + isert_warn("ib_post_send() failed for IB_WR_RDMA_WRITE\n"); if (!isert_prot_cmd(isert_conn, se_cmd)) - pr_debug("Cmd: %p posted RDMA_WRITE + Response for iSER Data " + isert_dbg("Cmd: %p posted RDMA_WRITE + Response for iSER Data " "READ\n", isert_cmd); else - pr_debug("Cmd: %p posted RDMA_WRITE for iSER Data READ\n", + isert_dbg("Cmd: %p posted RDMA_WRITE for iSER Data READ\n", isert_cmd); return 1; @@ -2878,20 +2882,20 @@ isert_get_dataout(struct iscsi_conn *conn, struct iscsi_cmd *cmd, bool recovery) struct ib_send_wr *wr_failed; int rc; - pr_debug("Cmd: %p RDMA_READ data_length: %u write_data_done: %u\n", + isert_dbg("Cmd: %p RDMA_READ data_length: %u write_data_done: %u\n", isert_cmd, se_cmd->data_length, cmd->write_data_done); wr->iser_ib_op = ISER_IB_RDMA_READ; rc = device->reg_rdma_mem(conn, cmd, wr); if (rc) { - pr_err("Cmd: %p failed to prepare RDMA res\n", isert_cmd); + isert_err("Cmd: %p failed to prepare RDMA res\n", isert_cmd); return rc; } rc = ib_post_send(isert_conn->conn_qp, wr->send_wr, &wr_failed); if (rc) - pr_warn("ib_post_send() failed for IB_WR_RDMA_READ\n"); + isert_warn("ib_post_send() failed for IB_WR_RDMA_READ\n"); - pr_debug("Cmd: %p posted RDMA_READ memory for ISER Data WRITE\n", + isert_dbg("Cmd: %p posted RDMA_READ memory for ISER Data WRITE\n", isert_cmd); return 0; @@ -2907,7 +2911,7 @@ isert_immediate_queue(struct iscsi_conn *conn, struct iscsi_cmd *cmd, int state) ret = isert_put_nopin(cmd, conn, false); break; default: - pr_err("Unknown immediate state: 0x%02x\n", state); + isert_err("Unknown immediate state: 0x%02x\n", state); ret = -EINVAL; break; } @@ -2947,7 +2951,7 @@ isert_response_queue(struct iscsi_conn *conn, struct iscsi_cmd *cmd, int state) ret = isert_put_response(conn, cmd); break; default: - pr_err("Unknown response state: 0x%02x\n", state); + isert_err("Unknown response state: 0x%02x\n", state); ret = -EINVAL; break; } @@ -2964,26 +2968,26 @@ isert_setup_id(struct isert_np *isert_np) int ret; sa = (struct sockaddr *)&np->np_sockaddr; - pr_debug("ksockaddr: %p, sa: %p\n", &np->np_sockaddr, sa); + isert_dbg("ksockaddr: %p, sa: %p\n", &np->np_sockaddr, sa); id = rdma_create_id(isert_cma_handler, isert_np, RDMA_PS_TCP, IB_QPT_RC); if (IS_ERR(id)) { - pr_err("rdma_create_id() failed: %ld\n", PTR_ERR(id)); + isert_err("rdma_create_id() failed: %ld\n", PTR_ERR(id)); ret = PTR_ERR(id); goto out; } - pr_debug("id %p context %p\n", id, id->context); + isert_dbg("id %p context %p\n", id, id->context); ret = rdma_bind_addr(id, sa); if (ret) { - pr_err("rdma_bind_addr() failed: %d\n", ret); + isert_err("rdma_bind_addr() failed: %d\n", ret); goto out_id; } ret = rdma_listen(id, ISERT_RDMA_LISTEN_BACKLOG); if (ret) { - pr_err("rdma_listen() failed: %d\n", ret); + isert_err("rdma_listen() failed: %d\n", ret); goto out_id; } @@ -3004,7 +3008,7 @@ isert_setup_np(struct iscsi_np *np, isert_np = kzalloc(sizeof(struct isert_np), GFP_KERNEL); if (!isert_np) { - pr_err("Unable to allocate struct isert_np\n"); + isert_err("Unable to allocate struct isert_np\n"); return -ENOMEM; } sema_init(&isert_np->np_sem, 0); @@ -3049,15 +3053,15 @@ isert_rdma_accept(struct isert_conn *isert_conn) cp.retry_count = 7; cp.rnr_retry_count = 7; - pr_debug("Before rdma_accept >>>>>>>>>>>>>>>>>>>>.\n"); + isert_dbg("Before rdma_accept >>>>>>>>>>>>>>>>>>>>.\n"); ret = rdma_accept(cm_id, &cp); if (ret) { - pr_err("rdma_accept() failed with: %d\n", ret); + isert_err("rdma_accept() failed with: %d\n", ret); return ret; } - pr_debug("After rdma_accept >>>>>>>>>>>>>>>>>>>>>.\n"); + isert_dbg("After rdma_accept >>>>>>>>>>>>>>>>>>>>>.\n"); return 0; } @@ -3068,10 +3072,10 @@ isert_get_login_rx(struct iscsi_conn *conn, struct iscsi_login *login) struct isert_conn *isert_conn = (struct isert_conn *)conn->context; int ret; - pr_info("before login_req comp conn: %p\n", isert_conn); + isert_info("before login_req comp conn: %p\n", isert_conn); ret = wait_for_completion_interruptible(&isert_conn->login_req_comp); if (ret) { - pr_err("isert_conn %p interrupted before got login req\n", + isert_err("isert_conn %p interrupted before got login req\n", isert_conn); return ret; } @@ -3088,12 +3092,12 @@ isert_get_login_rx(struct iscsi_conn *conn, struct iscsi_login *login) isert_rx_login_req(isert_conn); - pr_info("before conn_login_comp conn: %p\n", conn); + isert_info("before conn_login_comp conn: %p\n", conn); ret = wait_for_completion_interruptible(&isert_conn->conn_login_comp); if (ret) return ret; - pr_info("processing login->req: %p\n", login->req); + isert_info("processing login->req: %p\n", login->req); return 0; } @@ -3147,7 +3151,7 @@ accept_wait: spin_lock_bh(&np->np_thread_lock); if (np->np_thread_state >= ISCSI_NP_THREAD_RESET) { spin_unlock_bh(&np->np_thread_lock); - pr_debug("np_thread_state %d for isert_accept_np\n", + isert_dbg("np_thread_state %d for isert_accept_np\n", np->np_thread_state); /** * No point in stalling here when np_thread @@ -3174,7 +3178,7 @@ accept_wait: isert_set_conn_info(np, conn, isert_conn); - pr_debug("Processing isert_conn: %p\n", isert_conn); + isert_dbg("Processing isert_conn: %p\n", isert_conn); return 0; } @@ -3197,11 +3201,11 @@ isert_free_np(struct iscsi_np *np) */ mutex_lock(&isert_np->np_accept_mutex); if (!list_empty(&isert_np->np_accept_list)) { - pr_info("Still have isert connections, cleaning up...\n"); + isert_info("Still have isert connections, cleaning up...\n"); list_for_each_entry_safe(isert_conn, n, &isert_np->np_accept_list, conn_accept_node) { - pr_info("cleaning isert_conn %p state (%d)\n", + isert_info("cleaning isert_conn %p state (%d)\n", isert_conn, isert_conn->state); isert_connect_release(isert_conn); } @@ -3218,7 +3222,7 @@ static void isert_release_work(struct work_struct *work) struct isert_conn, release_work); - pr_info("Starting release conn %p\n", isert_conn); + isert_info("Starting release conn %p\n", isert_conn); wait_for_completion(&isert_conn->conn_wait); @@ -3226,7 +3230,7 @@ static void isert_release_work(struct work_struct *work) isert_conn->state = ISER_CONN_DOWN; mutex_unlock(&isert_conn->conn_mutex); - pr_info("Destroying conn %p\n", isert_conn); + isert_info("Destroying conn %p\n", isert_conn); isert_put_conn(isert_conn); } @@ -3236,7 +3240,7 @@ isert_wait4logout(struct isert_conn *isert_conn) struct iscsi_conn *conn = isert_conn->conn; if (isert_conn->logout_posted) { - pr_info("conn %p wait for conn_logout_comp\n", isert_conn); + isert_info("conn %p wait for conn_logout_comp\n", isert_conn); wait_for_completion_timeout(&conn->conn_logout_comp, SECONDS_FOR_LOGOUT_COMP * HZ); } @@ -3260,7 +3264,7 @@ isert_wait4flush(struct isert_conn *isert_conn) isert_conn->beacon.wr_id = ISER_BEACON_WRID; /* post an indication that all flush errors were consumed */ if (ib_post_recv(isert_conn->conn_qp, &isert_conn->beacon, &bad_wr)) { - pr_err("conn %p failed to post beacon", isert_conn); + isert_err("conn %p failed to post beacon", isert_conn); return; } @@ -3271,7 +3275,7 @@ static void isert_wait_conn(struct iscsi_conn *conn) { struct isert_conn *isert_conn = conn->context; - pr_debug("isert_wait_conn: Starting \n"); + isert_dbg("isert_wait_conn: Starting\n"); mutex_lock(&isert_conn->conn_mutex); /* @@ -3327,7 +3331,7 @@ static int __init isert_init(void) isert_comp_wq = alloc_workqueue("isert_comp_wq", 0, 0); if (!isert_comp_wq) { - pr_err("Unable to allocate isert_comp_wq\n"); + isert_err("Unable to allocate isert_comp_wq\n"); ret = -ENOMEM; return -ENOMEM; } @@ -3335,13 +3339,13 @@ static int __init isert_init(void) isert_release_wq = alloc_workqueue("isert_release_wq", WQ_UNBOUND, WQ_UNBOUND_MAX_ACTIVE); if (!isert_release_wq) { - pr_err("Unable to allocate isert_release_wq\n"); + isert_err("Unable to allocate isert_release_wq\n"); ret = -ENOMEM; goto destroy_comp_wq; } iscsit_register_transport(&iser_target_transport); - pr_info("iSER_TARGET[0] - Loaded iser_target_transport\n"); + isert_info("iSER_TARGET[0] - Loaded iser_target_transport\n"); return 0; @@ -3357,7 +3361,7 @@ static void __exit isert_exit(void) destroy_workqueue(isert_release_wq); destroy_workqueue(isert_comp_wq); iscsit_unregister_transport(&iser_target_transport); - pr_debug("iSER_TARGET[0] - Released iser_target_transport\n"); + isert_dbg("iSER_TARGET[0] - Released iser_target_transport\n"); } MODULE_DESCRIPTION("iSER-Target for mainline target infrastructure"); diff --git a/drivers/infiniband/ulp/isert/ib_isert.h b/drivers/infiniband/ulp/isert/ib_isert.h index e89f384efc22..8dc8415d152d 100644 --- a/drivers/infiniband/ulp/isert/ib_isert.h +++ b/drivers/infiniband/ulp/isert/ib_isert.h @@ -4,6 +4,33 @@ #include #include +#define DRV_NAME "isert" +#define PFX DRV_NAME ": " + +#define isert_dbg(fmt, arg...) \ + do { \ + if (unlikely(isert_debug_level > 2)) \ + printk(KERN_DEBUG PFX "%s: " fmt,\ + __func__ , ## arg); \ + } while (0) + +#define isert_warn(fmt, arg...) \ + do { \ + if (unlikely(isert_debug_level > 0)) \ + pr_warn(PFX "%s: " fmt, \ + __func__ , ## arg); \ + } while (0) + +#define isert_info(fmt, arg...) \ + do { \ + if (unlikely(isert_debug_level > 1)) \ + pr_info(PFX "%s: " fmt, \ + __func__ , ## arg); \ + } while (0) + +#define isert_err(fmt, arg...) \ + pr_err(PFX "%s: " fmt, __func__ , ## arg) + #define ISERT_RDMA_LISTEN_BACKLOG 10 #define ISCSI_ISER_SG_TABLESIZE 256 #define ISER_FASTREG_LI_WRID 0xffffffffffffffffULL -- cgit v1.2.3 From 4c22e07ffdc947be61085c2a55bf564b9a8d50ee Mon Sep 17 00:00:00 2001 From: Sagi Grimberg Date: Sun, 7 Dec 2014 13:12:03 +0200 Subject: iser-target: Adjust log levels and prettify some prints debug_level 1 (warn): Include warning messages. debug_level 2 (info): Include relevant info for control plane. debug_level 3 (debug): Include relevant info in the IO path. Also, added/removed some logging messages. Signed-off-by: Sagi Grimberg Signed-off-by: Nicholas Bellinger --- drivers/infiniband/ulp/isert/ib_isert.c | 217 ++++++++++++++++---------------- 1 file changed, 108 insertions(+), 109 deletions(-) diff --git a/drivers/infiniband/ulp/isert/ib_isert.c b/drivers/infiniband/ulp/isert/ib_isert.c index 87b5e8983c19..0de507c40c5c 100644 --- a/drivers/infiniband/ulp/isert/ib_isert.c +++ b/drivers/infiniband/ulp/isert/ib_isert.c @@ -78,13 +78,13 @@ isert_qp_event_callback(struct ib_event *e, void *context) { struct isert_conn *isert_conn = (struct isert_conn *)context; - isert_err("isert_qp_event_callback event: %d\n", e->event); + isert_err("conn %p event: %d\n", isert_conn, e->event); switch (e->event) { case IB_EVENT_COMM_EST: rdma_notify(isert_conn->conn_cm_id, IB_EVENT_COMM_EST); break; case IB_EVENT_QP_LAST_WQE_REACHED: - isert_warn("Reached TX IB_EVENT_QP_LAST_WQE_REACHED:\n"); + isert_warn("Reached TX IB_EVENT_QP_LAST_WQE_REACHED\n"); break; default: break; @@ -149,18 +149,12 @@ isert_conn_setup_qp(struct isert_conn *isert_conn, struct rdma_cm_id *cma_id) if (device->pi_capable) attr.create_flags |= IB_QP_CREATE_SIGNATURE_EN; - isert_dbg("isert_conn_setup_qp cma_id->device: %p\n", - cma_id->device); - isert_dbg("isert_conn_setup_qp conn_pd->device: %p\n", - isert_conn->conn_pd->device); - ret = rdma_create_qp(cma_id, isert_conn->conn_pd, &attr); if (ret) { isert_err("rdma_create_qp failed for cma_id %d\n", ret); goto err; } isert_conn->conn_qp = cma_id->qp; - isert_dbg("rdma_create_qp() returned success >>>>>>>>>>>>>>>>>>>>>>>>>.\n"); return 0; err: @@ -174,7 +168,7 @@ err: static void isert_cq_event_callback(struct ib_event *e, void *context) { - isert_dbg("isert_cq_event_callback event: %d\n", e->event); + isert_dbg("event: %d\n", e->event); } static int @@ -208,6 +202,7 @@ isert_alloc_rx_descriptors(struct isert_conn *isert_conn) } isert_conn->conn_rx_desc_head = 0; + return 0; dma_map_fail: @@ -219,6 +214,8 @@ dma_map_fail: kfree(isert_conn->conn_rx_descs); isert_conn->conn_rx_descs = NULL; fail: + isert_err("conn %p failed to allocate rx descriptors\n", isert_conn); + return -ENOMEM; } @@ -350,6 +347,7 @@ isert_device_try_release(struct isert_device *device) { mutex_lock(&device_list_mutex); device->refcount--; + isert_info("device %p refcount %d\n", device, device->refcount); if (!device->refcount) { isert_free_device_ib_res(device); list_del(&device->dev_node); @@ -368,6 +366,8 @@ isert_device_find_by_ib_dev(struct rdma_cm_id *cma_id) list_for_each_entry(device, &device_list, dev_node) { if (device->ib_device->node_guid == cma_id->device->node_guid) { device->refcount++; + isert_info("Found iser device %p refcount %d\n", + device, device->refcount); mutex_unlock(&device_list_mutex); return device; } @@ -391,6 +391,8 @@ isert_device_find_by_ib_dev(struct rdma_cm_id *cma_id) device->refcount++; list_add_tail(&device->dev_node, &device_list); + isert_info("Created a new iser device %p refcount %d\n", + device, device->refcount); mutex_unlock(&device_list_mutex); return device; @@ -405,7 +407,7 @@ isert_conn_free_fastreg_pool(struct isert_conn *isert_conn) if (list_empty(&isert_conn->conn_fr_pool)) return; - isert_dbg("Freeing conn %p fastreg pool", isert_conn); + isert_info("Freeing conn %p fastreg pool", isert_conn); list_for_each_entry_safe(fr_desc, tmp, &isert_conn->conn_fr_pool, list) { @@ -497,14 +499,14 @@ isert_create_fr_desc(struct ib_device *ib_device, struct ib_pd *pd, ISCSI_ISER_SG_TABLESIZE); if (IS_ERR(fr_desc->data_frpl)) { isert_err("Failed to allocate data frpl err=%ld\n", - PTR_ERR(fr_desc->data_frpl)); + PTR_ERR(fr_desc->data_frpl)); return PTR_ERR(fr_desc->data_frpl); } fr_desc->data_mr = ib_alloc_fast_reg_mr(pd, ISCSI_ISER_SG_TABLESIZE); if (IS_ERR(fr_desc->data_mr)) { isert_err("Failed to allocate data frmr err=%ld\n", - PTR_ERR(fr_desc->data_mr)); + PTR_ERR(fr_desc->data_mr)); ret = PTR_ERR(fr_desc->data_mr); goto err_data_frpl; } @@ -585,7 +587,7 @@ isert_connect_request(struct rdma_cm_id *cma_id, struct rdma_cm_event *event) } spin_unlock_bh(&np->np_thread_lock); - isert_dbg("Entering isert_connect_request cma_id: %p, context: %p\n", + isert_dbg("cma_id: %p, portal: %p\n", cma_id, cma_id->context); isert_conn = kzalloc(sizeof(struct isert_conn), GFP_KERNEL); @@ -720,7 +722,7 @@ isert_connect_release(struct isert_conn *isert_conn) struct ib_device *ib_dev = isert_conn->conn_cm_id->device; struct isert_device *device = isert_conn->conn_device; - isert_dbg("Entering isert_connect_release(): >>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>\n"); + isert_dbg("conn %p\n", isert_conn); if (device && device->use_fastreg) isert_conn_free_fastreg_pool(isert_conn); @@ -754,8 +756,6 @@ isert_connect_release(struct isert_conn *isert_conn) if (device) isert_device_try_release(device); - - isert_dbg("Leaving isert_connect_release >>>>>>>>>>>>\n"); } static void @@ -782,8 +782,8 @@ isert_release_conn_kref(struct kref *kref) struct isert_conn *isert_conn = container_of(kref, struct isert_conn, conn_kref); - isert_dbg("Calling isert_connect_release for final kref %s/%d\n", - current->comm, current->pid); + isert_info("conn %p final kref %s/%d\n", isert_conn, current->comm, + current->pid); isert_connect_release(isert_conn); } @@ -891,15 +891,14 @@ isert_cma_handler(struct rdma_cm_id *cma_id, struct rdma_cm_event *event) { int ret = 0; - isert_dbg("isert_cma_handler: event %d status %d conn %p id %p\n", - event->event, event->status, cma_id->context, cma_id); + isert_info("event %d status %d id %p np %p\n", event->event, + event->status, cma_id, cma_id->context); switch (event->event) { case RDMA_CM_EVENT_CONNECT_REQUEST: ret = isert_connect_request(cma_id, event); if (ret) - isert_err("isert_cma_handler failed RDMA_CM_EVENT: 0x%08x %d\n", - event->event, ret); + isert_err("failed handle connect request %d\n", ret); break; case RDMA_CM_EVENT_ESTABLISHED: isert_connected_handler(cma_id); @@ -1021,9 +1020,9 @@ isert_init_tx_hdrs(struct isert_conn *isert_conn, tx_desc->tx_sg[0].length = ISER_HEADERS_LEN; tx_desc->tx_sg[0].lkey = isert_conn->conn_mr->lkey; - isert_dbg("isert_init_tx_hdrs: Setup tx_sg[0].addr: 0x%llx length: %u" - " lkey: 0x%08x\n", tx_desc->tx_sg[0].addr, - tx_desc->tx_sg[0].length, tx_desc->tx_sg[0].lkey); + isert_dbg("Setup tx_sg[0].addr: 0x%llx length: %u lkey: 0x%x\n", + tx_desc->tx_sg[0].addr, tx_desc->tx_sg[0].length, + tx_desc->tx_sg[0].lkey); return 0; } @@ -1069,7 +1068,6 @@ isert_rdma_post_recvl(struct isert_conn *isert_conn) isert_conn->post_recv_buf_count--; } - isert_dbg("ib_post_recv(): returned success >>>>>>>>>>>>>>>>>>>>>>>>\n"); return ret; } @@ -1181,8 +1179,9 @@ isert_rx_login_req(struct isert_conn *isert_conn) memcpy(&login->req[0], (void *)&rx_desc->iscsi_header, ISCSI_HDR_LEN); size = min(rx_buflen, MAX_KEY_VALUE_PAIRS); - isert_dbg("Using login payload size: %d, rx_buflen: %d MAX_KEY_VALUE_PAIRS: %d\n", - size, rx_buflen, MAX_KEY_VALUE_PAIRS); + isert_dbg("Using login payload size: %d, rx_buflen: %d " + "MAX_KEY_VALUE_PAIRS: %d\n", size, rx_buflen, + MAX_KEY_VALUE_PAIRS); memcpy(login->req_buf, &rx_desc->data[0], size); if (login->first_request) { @@ -1245,7 +1244,7 @@ isert_handle_scsi_cmd(struct isert_conn *isert_conn, sg_nents = max(1UL, DIV_ROUND_UP(imm_data_len, PAGE_SIZE)); isert_dbg("Copying Immediate SG: %p sg_nents: %u from %p imm_data_len: %d\n", - sg, sg_nents, &rx_desc->data[0], imm_data_len); + sg, sg_nents, &rx_desc->data[0], imm_data_len); sg_copy_from_buffer(sg, sg_nents, &rx_desc->data[0], imm_data_len); @@ -1294,8 +1293,10 @@ isert_handle_iscsi_dataout(struct isert_conn *isert_conn, return -1; } - isert_dbg("Unsolicited DataOut unsol_data_len: %u, write_data_done: %u, data_length: %u\n", - unsol_data_len, cmd->write_data_done, cmd->se_cmd.data_length); + isert_dbg("Unsolicited DataOut unsol_data_len: %u, " + "write_data_done: %u, data_length: %u\n", + unsol_data_len, cmd->write_data_done, + cmd->se_cmd.data_length); sg_off = cmd->write_data_done / PAGE_SIZE; sg_start = &cmd->se_cmd.t_data_sg[sg_off]; @@ -1305,12 +1306,13 @@ isert_handle_iscsi_dataout(struct isert_conn *isert_conn, * FIXME: Non page-aligned unsolicited_data out */ if (page_off) { - isert_err("Received unexpected non-page aligned data payload\n"); + isert_err("unexpected non-page aligned data payload\n"); dump_stack(); return -1; } - isert_dbg("Copying DataOut: sg_start: %p, sg_off: %u sg_nents: %u from %p %u\n", - sg_start, sg_off, sg_nents, &rx_desc->data[0], unsol_data_len); + isert_dbg("Copying DataOut: sg_start: %p, sg_off: %u " + "sg_nents: %u from %p %u\n", sg_start, sg_off, + sg_nents, &rx_desc->data[0], unsol_data_len); sg_copy_from_buffer(sg_start, sg_nents, &rx_desc->data[0], unsol_data_len); @@ -1358,7 +1360,7 @@ isert_handle_text_cmd(struct isert_conn *isert_conn, struct isert_cmd *isert_cmd text_in = kzalloc(payload_length, GFP_KERNEL); if (!text_in) { isert_err("Unable to allocate text_in of payload_length: %u\n", - payload_length); + payload_length); return -ENOMEM; } cmd->text_in_ptr = text_in; @@ -1384,7 +1386,7 @@ isert_rx_opcode(struct isert_conn *isert_conn, struct iser_rx_desc *rx_desc, if (sess->sess_ops->SessionType && (!(opcode & ISCSI_OP_TEXT) || !(opcode & ISCSI_OP_LOGOUT))) { isert_err("Got illegal opcode: 0x%02x in SessionType=Discovery," - " ignoring\n", opcode); + " ignoring\n", opcode); return 0; } @@ -1462,14 +1464,14 @@ isert_rx_do_work(struct iser_rx_desc *rx_desc, struct isert_conn *isert_conn) if (iser_hdr->flags & ISER_RSV) { read_stag = be32_to_cpu(iser_hdr->read_stag); read_va = be64_to_cpu(iser_hdr->read_va); - isert_dbg("ISER_RSV: read_stag: 0x%08x read_va: 0x%16llx\n", - read_stag, (unsigned long long)read_va); + isert_dbg("ISER_RSV: read_stag: 0x%x read_va: 0x%llx\n", + read_stag, (unsigned long long)read_va); } if (iser_hdr->flags & ISER_WSV) { write_stag = be32_to_cpu(iser_hdr->write_stag); write_va = be64_to_cpu(iser_hdr->write_va); - isert_dbg("ISER_WSV: write__stag: 0x%08x write_va: 0x%16llx\n", - write_stag, (unsigned long long)write_va); + isert_dbg("ISER_WSV: write_stag: 0x%x write_va: 0x%llx\n", + write_stag, (unsigned long long)write_va); } isert_dbg("ISER ISCSI_CTRL PDU\n"); @@ -1498,12 +1500,12 @@ isert_rx_completion(struct iser_rx_desc *desc, struct isert_conn *isert_conn, if ((char *)desc == isert_conn->login_req_buf) { rx_dma = isert_conn->login_req_dma; rx_buflen = ISER_RX_LOGIN_SIZE; - isert_dbg("ISER login_buf: Using rx_dma: 0x%llx, rx_buflen: %d\n", + isert_dbg("login_buf: Using rx_dma: 0x%llx, rx_buflen: %d\n", rx_dma, rx_buflen); } else { rx_dma = desc->dma_addr; rx_buflen = ISER_RX_PAYLOAD_SIZE; - isert_dbg("ISER req_buf: Using rx_dma: 0x%llx, rx_buflen: %d\n", + isert_dbg("req_buf: Using rx_dma: 0x%llx, rx_buflen: %d\n", rx_dma, rx_buflen); } @@ -1533,8 +1535,8 @@ isert_rx_completion(struct iser_rx_desc *desc, struct isert_conn *isert_conn, DMA_FROM_DEVICE); isert_conn->post_recv_buf_count--; - isert_dbg("iSERT: Decremented post_recv_buf_count: %d\n", - isert_conn->post_recv_buf_count); + isert_dbg("Decremented post_recv_buf_count: %d\n", + isert_conn->post_recv_buf_count); if ((char *)desc == isert_conn->login_req_buf) return; @@ -1579,7 +1581,7 @@ isert_map_data_buf(struct isert_conn *isert_conn, struct isert_cmd *isert_cmd, } isert_dbg("Mapped cmd: %p count: %u sg: %p sg_nents: %u rdma_len %d\n", - isert_cmd, data->dma_nents, data->sg, data->nents, data->len); + isert_cmd, data->dma_nents, data->sg, data->nents, data->len); return 0; } @@ -1600,21 +1602,21 @@ isert_unmap_cmd(struct isert_cmd *isert_cmd, struct isert_conn *isert_conn) { struct isert_rdma_wr *wr = &isert_cmd->rdma_wr; - isert_dbg("isert_unmap_cmd: %p\n", isert_cmd); + isert_dbg("Cmd %p\n", isert_cmd); if (wr->data.sg) { - isert_dbg("isert_unmap_cmd: %p unmap_sg op\n", isert_cmd); + isert_dbg("Cmd %p unmap_sg op\n", isert_cmd); isert_unmap_data_buf(isert_conn, &wr->data); } if (wr->send_wr) { - isert_dbg("isert_unmap_cmd: %p free send_wr\n", isert_cmd); + isert_dbg("Cmd %p free send_wr\n", isert_cmd); kfree(wr->send_wr); wr->send_wr = NULL; } if (wr->ib_sge) { - isert_dbg("isert_unmap_cmd: %p free ib_sge\n", isert_cmd); + isert_dbg("Cmd %p free ib_sge\n", isert_cmd); kfree(wr->ib_sge); wr->ib_sge = NULL; } @@ -1626,11 +1628,10 @@ isert_unreg_rdma(struct isert_cmd *isert_cmd, struct isert_conn *isert_conn) struct isert_rdma_wr *wr = &isert_cmd->rdma_wr; LIST_HEAD(unmap_list); - isert_dbg("unreg_fastreg_cmd: %p\n", isert_cmd); + isert_dbg("Cmd %p\n", isert_cmd); if (wr->fr_desc) { - isert_dbg("unreg_fastreg_cmd: %p free fr_desc %p\n", - isert_cmd, wr->fr_desc); + isert_dbg("Cmd %p free fr_desc %p\n", isert_cmd, wr->fr_desc); if (wr->fr_desc->ind & ISERT_PROTECTED) { isert_unmap_data_buf(isert_conn, &wr->prot); wr->fr_desc->ind &= ~ISERT_PROTECTED; @@ -1642,7 +1643,7 @@ isert_unreg_rdma(struct isert_cmd *isert_cmd, struct isert_conn *isert_conn) } if (wr->data.sg) { - isert_dbg("unreg_fastreg_cmd: %p unmap_sg op\n", isert_cmd); + isert_dbg("Cmd %p unmap_sg op\n", isert_cmd); isert_unmap_data_buf(isert_conn, &wr->data); } @@ -1658,7 +1659,7 @@ isert_put_cmd(struct isert_cmd *isert_cmd, bool comp_err) struct iscsi_conn *conn = isert_conn->conn; struct isert_device *device = isert_conn->conn_device; - isert_dbg("Entering isert_put_cmd: %p\n", isert_cmd); + isert_dbg("Cmd %p\n", isert_cmd); switch (cmd->iscsi_opcode) { case ISCSI_OP_SCSI_CMD: @@ -1727,7 +1728,7 @@ static void isert_unmap_tx_desc(struct iser_tx_desc *tx_desc, struct ib_device *ib_dev) { if (tx_desc->dma_addr != 0) { - isert_dbg("Calling ib_dma_unmap_single for tx_desc->dma_addr\n"); + isert_dbg("unmap single for tx_desc->dma_addr\n"); ib_dma_unmap_single(ib_dev, tx_desc->dma_addr, ISER_HEADERS_LEN, DMA_TO_DEVICE); tx_desc->dma_addr = 0; @@ -1739,7 +1740,7 @@ isert_completion_put(struct iser_tx_desc *tx_desc, struct isert_cmd *isert_cmd, struct ib_device *ib_dev, bool comp_err) { if (isert_cmd->pdu_buf_dma != 0) { - isert_dbg("Calling ib_dma_unmap_single for isert_cmd->pdu_buf_dma\n"); + isert_dbg("unmap single for isert_cmd->pdu_buf_dma\n"); ib_dma_unmap_single(ib_dev, isert_cmd->pdu_buf_dma, isert_cmd->pdu_buf_len, DMA_TO_DEVICE); isert_cmd->pdu_buf_dma = 0; @@ -1780,12 +1781,12 @@ isert_check_pi_status(struct se_cmd *se_cmd, struct ib_mr *sig_mr) do_div(sec_offset_err, block_size); se_cmd->bad_sector = sec_offset_err + se_cmd->t_task_lba; - isert_err("isert: PI error found type %d at sector 0x%llx " - "expected 0x%x vs actual 0x%x\n", - mr_status.sig_err.err_type, - (unsigned long long)se_cmd->bad_sector, - mr_status.sig_err.expected, - mr_status.sig_err.actual); + isert_err("PI error found type %d at sector 0x%llx " + "expected 0x%x vs actual 0x%x\n", + mr_status.sig_err.err_type, + (unsigned long long)se_cmd->bad_sector, + mr_status.sig_err.expected, + mr_status.sig_err.actual); ret = 1; } @@ -1863,32 +1864,30 @@ isert_do_control_comp(struct work_struct *work) struct ib_device *ib_dev = isert_conn->conn_cm_id->device; struct iscsi_cmd *cmd = isert_cmd->iscsi_cmd; + isert_dbg("Cmd %p i_state %d\n", isert_cmd, cmd->i_state); + switch (cmd->i_state) { case ISTATE_SEND_TASKMGTRSP: - isert_dbg("Calling iscsit_tmr_post_handler >>>>>>>>>>>>>>>>>\n"); - iscsit_tmr_post_handler(cmd, cmd->conn); - cmd->i_state = ISTATE_SENT_STATUS; - isert_completion_put(&isert_cmd->tx_desc, isert_cmd, ib_dev, false); + isert_completion_put(&isert_cmd->tx_desc, isert_cmd, + ib_dev, false); break; case ISTATE_SEND_REJECT: - isert_dbg("Got isert_do_control_comp ISTATE_SEND_REJECT: >>>\n"); - cmd->i_state = ISTATE_SENT_STATUS; - isert_completion_put(&isert_cmd->tx_desc, isert_cmd, ib_dev, false); + isert_completion_put(&isert_cmd->tx_desc, isert_cmd, + ib_dev, false); break; case ISTATE_SEND_LOGOUTRSP: - isert_dbg("Calling iscsit_logout_post_handler >>>>>>>>>>>>>>\n"); - iscsit_logout_post_handler(cmd, cmd->conn); break; case ISTATE_SEND_TEXTRSP: cmd->i_state = ISTATE_SENT_STATUS; - isert_completion_put(&isert_cmd->tx_desc, isert_cmd, ib_dev, false); + isert_completion_put(&isert_cmd->tx_desc, isert_cmd, + ib_dev, false); break; default: - isert_err("Unknown do_control_comp i_state %d\n", cmd->i_state); + isert_err("Unknown i_state %d\n", cmd->i_state); dump_stack(); break; } @@ -1931,27 +1930,25 @@ isert_send_completion(struct iser_tx_desc *tx_desc, } wr = &isert_cmd->rdma_wr; + isert_dbg("Cmd %p iser_ib_op %d\n", isert_cmd, wr->iser_ib_op); + switch (wr->iser_ib_op) { case ISER_IB_RECV: - isert_err("isert_send_completion: Got ISER_IB_RECV\n"); + isert_err("Got ISER_IB_RECV\n"); dump_stack(); break; case ISER_IB_SEND: - isert_dbg("isert_send_completion: Got ISER_IB_SEND\n"); isert_response_completion(tx_desc, isert_cmd, isert_conn, ib_dev); break; case ISER_IB_RDMA_WRITE: - isert_dbg("isert_send_completion: Got ISER_IB_RDMA_WRITE\n"); isert_completion_rdma_write(tx_desc, isert_cmd); break; case ISER_IB_RDMA_READ: - isert_dbg("isert_send_completion: Got ISER_IB_RDMA_READ:\n"); - isert_completion_rdma_read(tx_desc, isert_cmd); break; default: - isert_err("Unknown wr->iser_ib_op: 0x%02x\n", wr->iser_ib_op); + isert_err("Unknown wr->iser_ib_op: 0x%x\n", wr->iser_ib_op); dump_stack(); break; } @@ -2121,7 +2118,7 @@ isert_put_response(struct iscsi_conn *conn, struct iscsi_cmd *cmd) isert_init_send_wr(isert_conn, isert_cmd, send_wr); - isert_dbg("Posting SCSI Response IB_WR_SEND >>>>>>>>>>>>>>>>>>>>>>\n"); + isert_dbg("Posting SCSI Response\n"); return isert_post_response(isert_conn, isert_cmd); } @@ -2179,7 +2176,7 @@ isert_put_nopin(struct iscsi_cmd *cmd, struct iscsi_conn *conn, isert_init_tx_hdrs(isert_conn, &isert_cmd->tx_desc); isert_init_send_wr(isert_conn, isert_cmd, send_wr); - isert_dbg("Posting NOPIN Response IB_WR_SEND >>>>>>>>>>>>>>>>>>>>>>\n"); + isert_dbg("conn %p Posting NOPIN Response\n", isert_conn); return isert_post_response(isert_conn, isert_cmd); } @@ -2197,7 +2194,7 @@ isert_put_logout_rsp(struct iscsi_cmd *cmd, struct iscsi_conn *conn) isert_init_tx_hdrs(isert_conn, &isert_cmd->tx_desc); isert_init_send_wr(isert_conn, isert_cmd, send_wr); - isert_dbg("Posting Logout Response IB_WR_SEND >>>>>>>>>>>>>>>>>>>>>>\n"); + isert_dbg("conn %p Posting Logout Response\n", isert_conn); return isert_post_response(isert_conn, isert_cmd); } @@ -2215,7 +2212,7 @@ isert_put_tm_rsp(struct iscsi_cmd *cmd, struct iscsi_conn *conn) isert_init_tx_hdrs(isert_conn, &isert_cmd->tx_desc); isert_init_send_wr(isert_conn, isert_cmd, send_wr); - isert_dbg("Posting Task Management Response IB_WR_SEND >>>>>>>>>>>>>>>>>>>>>>\n"); + isert_dbg("conn %p Posting Task Management Response\n", isert_conn); return isert_post_response(isert_conn, isert_cmd); } @@ -2247,7 +2244,7 @@ isert_put_reject(struct iscsi_cmd *cmd, struct iscsi_conn *conn) isert_init_send_wr(isert_conn, isert_cmd, send_wr); - isert_dbg("Posting Reject IB_WR_SEND >>>>>>>>>>>>>>>>>>>>>>\n"); + isert_dbg("conn %p Posting Reject\n", isert_conn); return isert_post_response(isert_conn, isert_cmd); } @@ -2287,7 +2284,7 @@ isert_put_text_rsp(struct iscsi_cmd *cmd, struct iscsi_conn *conn) } isert_init_send_wr(isert_conn, isert_cmd, send_wr); - isert_dbg("Posting Text Response IB_WR_SEND >>>>>>>>>>>>>>>>>>>>>>\n"); + isert_dbg("conn %p Text Reject\n", isert_conn); return isert_post_response(isert_conn, isert_cmd); } @@ -2315,17 +2312,18 @@ isert_build_rdma_wr(struct isert_conn *isert_conn, struct isert_cmd *isert_cmd, * Perform mapping of TCM scatterlist memory ib_sge dma_addr. */ for_each_sg(sg_start, tmp_sg, sg_nents, i) { - isert_dbg("ISER RDMA from SGL dma_addr: 0x%16llx dma_len: %u, page_off: %u\n", - (unsigned long long)tmp_sg->dma_address, - tmp_sg->length, page_off); + isert_dbg("RDMA from SGL dma_addr: 0x%llx dma_len: %u, " + "page_off: %u\n", + (unsigned long long)tmp_sg->dma_address, + tmp_sg->length, page_off); ib_sge->addr = ib_sg_dma_address(ib_dev, tmp_sg) + page_off; ib_sge->length = min_t(u32, data_left, ib_sg_dma_len(ib_dev, tmp_sg) - page_off); ib_sge->lkey = isert_conn->conn_mr->lkey; - isert_dbg("RDMA ib_sge: addr: 0x%16llx length: %u lkey: %08x\n", - ib_sge->addr, ib_sge->length, ib_sge->lkey); + isert_dbg("RDMA ib_sge: addr: 0x%llx length: %u lkey: %x\n", + ib_sge->addr, ib_sge->length, ib_sge->lkey); page_off = 0; data_left -= ib_sge->length; ib_sge++; @@ -2333,7 +2331,7 @@ isert_build_rdma_wr(struct isert_conn *isert_conn, struct isert_cmd *isert_cmd, } isert_dbg("Set outgoing sg_list: %p num_sg: %u from TCM SGLs\n", - send_wr->sg_list, send_wr->num_sge); + send_wr->sg_list, send_wr->num_sge); return sg_nents; } @@ -2439,9 +2437,9 @@ isert_map_fr_pagelist(struct ib_device *ib_dev, chunk_start = start_addr; end_addr = start_addr + ib_sg_dma_len(ib_dev, tmp_sg); - isert_dbg("SGL[%d] dma_addr: 0x%16llx len: %u\n", - i, (unsigned long long)tmp_sg->dma_address, - tmp_sg->length); + isert_dbg("SGL[%d] dma_addr: 0x%llx len: %u\n", + i, (unsigned long long)tmp_sg->dma_address, + tmp_sg->length); if ((end_addr & ~PAGE_MASK) && i < last_ent) { new_chunk = 0; @@ -2452,8 +2450,8 @@ isert_map_fr_pagelist(struct ib_device *ib_dev, page = chunk_start & PAGE_MASK; do { fr_pl[n_pages++] = page; - isert_dbg("Mapped page_list[%d] page_addr: 0x%16llx\n", - n_pages - 1, page); + isert_dbg("Mapped page_list[%d] page_addr: 0x%llx\n", + n_pages - 1, page); page += PAGE_SIZE; } while (page < end_addr); } @@ -2481,9 +2479,8 @@ isert_fast_reg_mr(struct isert_conn *isert_conn, sge->lkey = isert_conn->conn_mr->lkey; sge->addr = ib_sg_dma_address(ib_dev, &mem->sg[0]); sge->length = ib_sg_dma_len(ib_dev, &mem->sg[0]); - isert_dbg("%s:%d sge: addr: 0x%llx length: %u lkey: %x\n", - __func__, __LINE__, sge->addr, sge->length, - sge->lkey); + isert_dbg("sge: addr: 0x%llx length: %u lkey: %x\n", + sge->addr, sge->length, sge->lkey); return 0; } @@ -2500,7 +2497,7 @@ isert_fast_reg_mr(struct isert_conn *isert_conn, page_off = mem->offset % PAGE_SIZE; isert_dbg("Use fr_desc %p sg_nents %d offset %u\n", - fr_desc, mem->nents, mem->offset); + fr_desc, mem->nents, mem->offset); pagelist_len = isert_map_fr_pagelist(ib_dev, mem->sg, mem->nents, &frpl->page_list[0]); @@ -2544,9 +2541,8 @@ isert_fast_reg_mr(struct isert_conn *isert_conn, sge->addr = frpl->page_list[0] + page_off; sge->length = mem->len; - isert_dbg("%s:%d sge: addr: 0x%llx length: %u lkey: %x\n", - __func__, __LINE__, sge->addr, sge->length, - sge->lkey); + isert_dbg("sge: addr: 0x%llx length: %u lkey: %x\n", + sge->addr, sge->length, sge->lkey); return ret; } @@ -2835,6 +2831,7 @@ isert_put_datain(struct iscsi_conn *conn, struct iscsi_cmd *cmd) isert_dbg("Cmd: %p RDMA_WRITE data_length: %u\n", isert_cmd, se_cmd->data_length); + wr->iser_ib_op = ISER_IB_RDMA_WRITE; rc = device->reg_rdma_mem(conn, cmd, wr); if (rc) { @@ -3053,16 +3050,12 @@ isert_rdma_accept(struct isert_conn *isert_conn) cp.retry_count = 7; cp.rnr_retry_count = 7; - isert_dbg("Before rdma_accept >>>>>>>>>>>>>>>>>>>>.\n"); - ret = rdma_accept(cm_id, &cp); if (ret) { isert_err("rdma_accept() failed with: %d\n", ret); return ret; } - isert_dbg("After rdma_accept >>>>>>>>>>>>>>>>>>>>>.\n"); - return 0; } @@ -3239,6 +3232,8 @@ isert_wait4logout(struct isert_conn *isert_conn) { struct iscsi_conn *conn = isert_conn->conn; + isert_info("conn %p\n", isert_conn); + if (isert_conn->logout_posted) { isert_info("conn %p wait for conn_logout_comp\n", isert_conn); wait_for_completion_timeout(&conn->conn_logout_comp, @@ -3249,6 +3244,8 @@ isert_wait4logout(struct isert_conn *isert_conn) static void isert_wait4cmds(struct iscsi_conn *conn) { + isert_info("iscsi_conn %p\n", conn); + if (conn->sess) { target_sess_cmd_list_set_waiting(conn->sess->se_sess); target_wait_for_sess_cmds(conn->sess->se_sess); @@ -3260,6 +3257,8 @@ isert_wait4flush(struct isert_conn *isert_conn) { struct ib_recv_wr *bad_wr; + isert_info("conn %p\n", isert_conn); + init_completion(&isert_conn->conn_wait_comp_err); isert_conn->beacon.wr_id = ISER_BEACON_WRID; /* post an indication that all flush errors were consumed */ @@ -3275,7 +3274,7 @@ static void isert_wait_conn(struct iscsi_conn *conn) { struct isert_conn *isert_conn = conn->context; - isert_dbg("isert_wait_conn: Starting\n"); + isert_info("Starting conn %p\n", isert_conn); mutex_lock(&isert_conn->conn_mutex); /* @@ -3361,7 +3360,7 @@ static void __exit isert_exit(void) destroy_workqueue(isert_release_wq); destroy_workqueue(isert_comp_wq); iscsit_unregister_transport(&iser_target_transport); - isert_dbg("iSER_TARGET[0] - Released iser_target_transport\n"); + isert_info("iSER_TARGET[0] - Released iser_target_transport\n"); } MODULE_DESCRIPTION("iSER-Target for mainline target infrastructure"); -- cgit v1.2.3 From 10633c37bf461b246491f6587f9efe09260893f9 Mon Sep 17 00:00:00 2001 From: Sagi Grimberg Date: Sun, 7 Dec 2014 13:12:04 +0200 Subject: iser-target: Remove code duplication - Fall-through in switch case instead in do_control_comp. - Move rkey invalidation to a function. Signed-off-by: Sagi Grimberg Signed-off-by: Nicholas Bellinger --- drivers/infiniband/ulp/isert/ib_isert.c | 47 ++++++++++++++------------------- 1 file changed, 20 insertions(+), 27 deletions(-) diff --git a/drivers/infiniband/ulp/isert/ib_isert.c b/drivers/infiniband/ulp/isert/ib_isert.c index 0de507c40c5c..2964f210d6e6 100644 --- a/drivers/infiniband/ulp/isert/ib_isert.c +++ b/drivers/infiniband/ulp/isert/ib_isert.c @@ -1869,11 +1869,8 @@ isert_do_control_comp(struct work_struct *work) switch (cmd->i_state) { case ISTATE_SEND_TASKMGTRSP: iscsit_tmr_post_handler(cmd, cmd->conn); - cmd->i_state = ISTATE_SENT_STATUS; - isert_completion_put(&isert_cmd->tx_desc, isert_cmd, - ib_dev, false); - break; - case ISTATE_SEND_REJECT: + case ISTATE_SEND_REJECT: /* FALLTHRU */ + case ISTATE_SEND_TEXTRSP: /* FALLTHRU */ cmd->i_state = ISTATE_SENT_STATUS; isert_completion_put(&isert_cmd->tx_desc, isert_cmd, ib_dev, false); @@ -1881,11 +1878,6 @@ isert_do_control_comp(struct work_struct *work) case ISTATE_SEND_LOGOUTRSP: iscsit_logout_post_handler(cmd, cmd->conn); break; - case ISTATE_SEND_TEXTRSP: - cmd->i_state = ISTATE_SENT_STATUS; - isert_completion_put(&isert_cmd->tx_desc, isert_cmd, - ib_dev, false); - break; default: isert_err("Unknown i_state %d\n", cmd->i_state); dump_stack(); @@ -2459,6 +2451,21 @@ isert_map_fr_pagelist(struct ib_device *ib_dev, return n_pages; } +static inline void +isert_inv_rkey(struct ib_send_wr *inv_wr, struct ib_mr *mr) +{ + u32 rkey; + + memset(inv_wr, 0, sizeof(*inv_wr)); + inv_wr->wr_id = ISER_FASTREG_LI_WRID; + inv_wr->opcode = IB_WR_LOCAL_INV; + inv_wr->ex.invalidate_rkey = mr->rkey; + + /* Bump the key */ + rkey = ib_inc_rkey(mr->rkey); + ib_update_fast_reg_key(mr, rkey); +} + static int isert_fast_reg_mr(struct isert_conn *isert_conn, struct fast_reg_descriptor *fr_desc, @@ -2473,7 +2480,6 @@ isert_fast_reg_mr(struct isert_conn *isert_conn, struct ib_send_wr *bad_wr, *wr = NULL; int ret, pagelist_len; u32 page_off; - u8 key; if (mem->dma_nents == 1) { sge->lkey = isert_conn->conn_mr->lkey; @@ -2502,15 +2508,9 @@ isert_fast_reg_mr(struct isert_conn *isert_conn, pagelist_len = isert_map_fr_pagelist(ib_dev, mem->sg, mem->nents, &frpl->page_list[0]); - if (!(fr_desc->ind & ISERT_DATA_KEY_VALID)) { - memset(&inv_wr, 0, sizeof(inv_wr)); - inv_wr.wr_id = ISER_FASTREG_LI_WRID; - inv_wr.opcode = IB_WR_LOCAL_INV; - inv_wr.ex.invalidate_rkey = mr->rkey; + if (!(fr_desc->ind & ind)) { + isert_inv_rkey(&inv_wr, mr); wr = &inv_wr; - /* Bump the key */ - key = (u8)(mr->rkey & 0x000000FF); - ib_update_fast_reg_key(mr, ++key); } /* Prepare FASTREG WR */ @@ -2614,7 +2614,6 @@ isert_reg_sig_mr(struct isert_conn *isert_conn, struct pi_context *pi_ctx = fr_desc->pi_ctx; struct ib_sig_attrs sig_attrs; int ret; - u32 key; memset(&sig_attrs, 0, sizeof(sig_attrs)); ret = isert_set_sig_attrs(se_cmd, &sig_attrs); @@ -2624,14 +2623,8 @@ isert_reg_sig_mr(struct isert_conn *isert_conn, sig_attrs.check_mask = isert_set_prot_checks(se_cmd->prot_checks); if (!(fr_desc->ind & ISERT_SIG_KEY_VALID)) { - memset(&inv_wr, 0, sizeof(inv_wr)); - inv_wr.opcode = IB_WR_LOCAL_INV; - inv_wr.wr_id = ISER_FASTREG_LI_WRID; - inv_wr.ex.invalidate_rkey = pi_ctx->sig_mr->rkey; + isert_inv_rkey(&inv_wr, pi_ctx->sig_mr); wr = &inv_wr; - /* Bump the key */ - key = (u8)(pi_ctx->sig_mr->rkey & 0x000000FF); - ib_update_fast_reg_key(pi_ctx->sig_mr, ++key); } memset(&sig_wr, 0, sizeof(sig_wr)); -- cgit v1.2.3 From ed4520ae9b896690bb6cbed2e0c87a53144de712 Mon Sep 17 00:00:00 2001 From: Nicholas Bellinger Date: Sun, 7 Dec 2014 13:12:05 +0200 Subject: iser-target: Fix wc->wr_id cast warning MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit CC [M] drivers/infiniband/ulp/isert/ib_isert.o drivers/infiniband/ulp/isert/ib_isert.c: In function ‘isert_cq_comp_err’: drivers/infiniband/ulp/isert/ib_isert.c:1979:42: warning: cast to pointer from integer of different size [-Wint-to-pointer-cast] Signed-off-by: Nicholas Bellinger --- drivers/infiniband/ulp/isert/ib_isert.c | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/drivers/infiniband/ulp/isert/ib_isert.c b/drivers/infiniband/ulp/isert/ib_isert.c index 2964f210d6e6..dafb3c531f96 100644 --- a/drivers/infiniband/ulp/isert/ib_isert.c +++ b/drivers/infiniband/ulp/isert/ib_isert.c @@ -1976,8 +1976,7 @@ isert_cq_comp_err(struct isert_conn *isert_conn, struct ib_wc *wc) isert_info("conn %p completing conn_wait_comp_err\n", isert_conn); complete(&isert_conn->conn_wait_comp_err); - } else - if (is_isert_tx_desc(isert_conn, (void *)wc->wr_id)) { + } else if (is_isert_tx_desc(isert_conn, (void *)(uintptr_t)wc->wr_id)) { struct ib_device *ib_dev = isert_conn->conn_cm_id->device; struct isert_cmd *isert_cmd; struct iser_tx_desc *desc; -- cgit v1.2.3 From ee8e3ad96e04749aa043c7fcb7fe6a562f8b9c48 Mon Sep 17 00:00:00 2001 From: Nicholas Bellinger Date: Fri, 12 Dec 2014 23:27:06 -0800 Subject: iscsi-target: Drop left-over bogus iscsi_np->tpg_np This patch drops the left-over iscsi_np->tpg_np pointer, now that iser-target PI is able to dynamically allocate PI contexts per I/O, instead of needing to determine support using a TPG attribute with this bogus reference. Cc: Sagi Grimberg Signed-off-by: Nicholas Bellinger --- drivers/target/iscsi/iscsi_target_core.h | 1 - drivers/target/iscsi/iscsi_target_tpg.c | 1 - 2 files changed, 2 deletions(-) diff --git a/drivers/target/iscsi/iscsi_target_core.h b/drivers/target/iscsi/iscsi_target_core.h index 302eb3b78715..09a522bae222 100644 --- a/drivers/target/iscsi/iscsi_target_core.h +++ b/drivers/target/iscsi/iscsi_target_core.h @@ -790,7 +790,6 @@ struct iscsi_np { void *np_context; struct iscsit_transport *np_transport; struct list_head np_list; - struct iscsi_tpg_np *tpg_np; } ____cacheline_aligned; struct iscsi_tpg_np { diff --git a/drivers/target/iscsi/iscsi_target_tpg.c b/drivers/target/iscsi/iscsi_target_tpg.c index c3cb5c15efda..9053a3c0c6e5 100644 --- a/drivers/target/iscsi/iscsi_target_tpg.c +++ b/drivers/target/iscsi/iscsi_target_tpg.c @@ -501,7 +501,6 @@ struct iscsi_tpg_np *iscsit_tpg_add_network_portal( init_completion(&tpg_np->tpg_np_comp); kref_init(&tpg_np->tpg_np_kref); tpg_np->tpg_np = np; - np->tpg_np = tpg_np; tpg_np->tpg = tpg; spin_lock(&tpg->tpg_np_lock); -- cgit v1.2.3 From d16ca7c5198fd668db10d2c7b048ed3359c12c54 Mon Sep 17 00:00:00 2001 From: Nicholas Bellinger Date: Sun, 14 Dec 2014 01:47:19 -0800 Subject: target: Fix R_HOLDER bit usage for AllRegistrants This patch fixes the usage of R_HOLDER bit for an All Registrants reservation in READ_FULL_STATUS, where only the registration who issued RESERVE was being reported as having an active reservation. It changes core_scsi3_pri_read_full_status() to check ahead of the list walk of active registrations to see if All Registrants is active, and if so set R_HOLDER bit and scope/type fields for all active registrations. Reported-by: Ilias Tsitsimpis Cc: James Bottomley Signed-off-by: Nicholas Bellinger --- drivers/target/target_core_pr.c | 26 +++++++++++++++++++++++--- 1 file changed, 23 insertions(+), 3 deletions(-) diff --git a/drivers/target/target_core_pr.c b/drivers/target/target_core_pr.c index f91b6a1b846e..c4a8da5415c5 100644 --- a/drivers/target/target_core_pr.c +++ b/drivers/target/target_core_pr.c @@ -3834,7 +3834,8 @@ core_scsi3_pri_read_full_status(struct se_cmd *cmd) unsigned char *buf; u32 add_desc_len = 0, add_len = 0, desc_len, exp_desc_len; u32 off = 8; /* off into first Full Status descriptor */ - int format_code = 0; + int format_code = 0, pr_res_type = 0, pr_res_scope = 0; + bool all_reg = false; if (cmd->data_length < 8) { pr_err("PRIN SA READ_FULL_STATUS SCSI Data Length: %u" @@ -3851,6 +3852,19 @@ core_scsi3_pri_read_full_status(struct se_cmd *cmd) buf[2] = ((dev->t10_pr.pr_generation >> 8) & 0xff); buf[3] = (dev->t10_pr.pr_generation & 0xff); + spin_lock(&dev->dev_reservation_lock); + if (dev->dev_pr_res_holder) { + struct t10_pr_registration *pr_holder = dev->dev_pr_res_holder; + + if (pr_holder->pr_res_type == PR_TYPE_WRITE_EXCLUSIVE_ALLREG || + pr_holder->pr_res_type == PR_TYPE_EXCLUSIVE_ACCESS_ALLREG) { + all_reg = true; + pr_res_type = pr_holder->pr_res_type; + pr_res_scope = pr_holder->pr_res_scope; + } + } + spin_unlock(&dev->dev_reservation_lock); + spin_lock(&pr_tmpl->registration_lock); list_for_each_entry_safe(pr_reg, pr_reg_tmp, &pr_tmpl->registration_list, pr_reg_list) { @@ -3898,14 +3912,20 @@ core_scsi3_pri_read_full_status(struct se_cmd *cmd) * reservation holder for PR_HOLDER bit. * * Also, if this registration is the reservation - * holder, fill in SCOPE and TYPE in the next byte. + * holder or there is an All Registrants reservation + * active, fill in SCOPE and TYPE in the next byte. */ if (pr_reg->pr_res_holder) { buf[off++] |= 0x01; buf[off++] = (pr_reg->pr_res_scope & 0xf0) | (pr_reg->pr_res_type & 0x0f); - } else + } else if (all_reg) { + buf[off++] |= 0x01; + buf[off++] = (pr_res_scope & 0xf0) | + (pr_res_type & 0x0f); + } else { off += 2; + } off += 4; /* Skip over reserved area */ /* -- cgit v1.2.3 From 6c3c9baa0debeb4bcc52a78c4463a0a97518de10 Mon Sep 17 00:00:00 2001 From: Nicholas Bellinger Date: Mon, 15 Dec 2014 11:50:26 -0800 Subject: target: Avoid dropping AllRegistrants reservation during unregister This patch fixes an issue with AllRegistrants reservations where an unregister operation by the I_T nexus reservation holder would incorrectly drop the reservation, instead of waiting until the last active I_T nexus is unregistered as per SPC-4. This includes updating __core_scsi3_complete_pro_release() to reset dev->dev_pr_res_holder with another pr_reg for this special case, as well as a new 'unreg' parameter to determine when the release is occuring from an implicit unregister, vs. explicit RELEASE. It also adds special handling in core_scsi3_free_pr_reg_from_nacl() to release the left-over pr_res_holder, now that pr_reg is deleted from pr_reg_list within __core_scsi3_complete_pro_release(). Reported-by: Ilias Tsitsimpis Cc: James Bottomley Signed-off-by: Nicholas Bellinger --- drivers/target/target_core_pr.c | 87 ++++++++++++++++++++++++++++++----------- 1 file changed, 65 insertions(+), 22 deletions(-) diff --git a/drivers/target/target_core_pr.c b/drivers/target/target_core_pr.c index c4a8da5415c5..703890c12071 100644 --- a/drivers/target/target_core_pr.c +++ b/drivers/target/target_core_pr.c @@ -76,7 +76,7 @@ enum preempt_type { }; static void __core_scsi3_complete_pro_release(struct se_device *, struct se_node_acl *, - struct t10_pr_registration *, int); + struct t10_pr_registration *, int, int); static sense_reason_t target_scsi2_reservation_check(struct se_cmd *cmd) @@ -1177,7 +1177,7 @@ static int core_scsi3_check_implicit_release( * service action with the SERVICE ACTION RESERVATION KEY * field set to zero (see 5.7.11.3). */ - __core_scsi3_complete_pro_release(dev, nacl, pr_reg, 0); + __core_scsi3_complete_pro_release(dev, nacl, pr_reg, 0, 1); ret = 1; /* * For 'All Registrants' reservation types, all existing @@ -1219,7 +1219,8 @@ static void __core_scsi3_free_registration( pr_reg->pr_reg_deve->def_pr_registered = 0; pr_reg->pr_reg_deve->pr_res_key = 0; - list_del(&pr_reg->pr_reg_list); + if (!list_empty(&pr_reg->pr_reg_list)) + list_del(&pr_reg->pr_reg_list); /* * Caller accessing *pr_reg using core_scsi3_locate_pr_reg(), * so call core_scsi3_put_pr_reg() to decrement our reference. @@ -1271,6 +1272,7 @@ void core_scsi3_free_pr_reg_from_nacl( { struct t10_reservation *pr_tmpl = &dev->t10_pr; struct t10_pr_registration *pr_reg, *pr_reg_tmp, *pr_res_holder; + bool free_reg = false; /* * If the passed se_node_acl matches the reservation holder, * release the reservation. @@ -1278,13 +1280,18 @@ void core_scsi3_free_pr_reg_from_nacl( spin_lock(&dev->dev_reservation_lock); pr_res_holder = dev->dev_pr_res_holder; if ((pr_res_holder != NULL) && - (pr_res_holder->pr_reg_nacl == nacl)) - __core_scsi3_complete_pro_release(dev, nacl, pr_res_holder, 0); + (pr_res_holder->pr_reg_nacl == nacl)) { + __core_scsi3_complete_pro_release(dev, nacl, pr_res_holder, 0, 1); + free_reg = true; + } spin_unlock(&dev->dev_reservation_lock); /* * Release any registration associated with the struct se_node_acl. */ spin_lock(&pr_tmpl->registration_lock); + if (pr_res_holder && free_reg) + __core_scsi3_free_registration(dev, pr_res_holder, NULL, 0); + list_for_each_entry_safe(pr_reg, pr_reg_tmp, &pr_tmpl->registration_list, pr_reg_list) { @@ -1307,7 +1314,7 @@ void core_scsi3_free_all_registrations( if (pr_res_holder != NULL) { struct se_node_acl *pr_res_nacl = pr_res_holder->pr_reg_nacl; __core_scsi3_complete_pro_release(dev, pr_res_nacl, - pr_res_holder, 0); + pr_res_holder, 0, 0); } spin_unlock(&dev->dev_reservation_lock); @@ -2103,13 +2110,13 @@ core_scsi3_emulate_pro_register(struct se_cmd *cmd, u64 res_key, u64 sa_res_key, /* * sa_res_key=0 Unregister Reservation Key for registered I_T Nexus. */ - pr_holder = core_scsi3_check_implicit_release( - cmd->se_dev, pr_reg); + type = pr_reg->pr_res_type; + pr_holder = core_scsi3_check_implicit_release(cmd->se_dev, + pr_reg); if (pr_holder < 0) { ret = TCM_RESERVATION_CONFLICT; goto out; } - type = pr_reg->pr_res_type; spin_lock(&pr_tmpl->registration_lock); /* @@ -2383,23 +2390,59 @@ static void __core_scsi3_complete_pro_release( struct se_device *dev, struct se_node_acl *se_nacl, struct t10_pr_registration *pr_reg, - int explicit) + int explicit, + int unreg) { struct target_core_fabric_ops *tfo = se_nacl->se_tpg->se_tpg_tfo; char i_buf[PR_REG_ISID_ID_LEN]; + int pr_res_type = 0, pr_res_scope = 0; memset(i_buf, 0, PR_REG_ISID_ID_LEN); core_pr_dump_initiator_port(pr_reg, i_buf, PR_REG_ISID_ID_LEN); /* * Go ahead and release the current PR reservation holder. + * If an All Registrants reservation is currently active and + * a unregister operation is requested, replace the current + * dev_pr_res_holder with another active registration. */ - dev->dev_pr_res_holder = NULL; + if (dev->dev_pr_res_holder) { + pr_res_type = dev->dev_pr_res_holder->pr_res_type; + pr_res_scope = dev->dev_pr_res_holder->pr_res_scope; + dev->dev_pr_res_holder->pr_res_type = 0; + dev->dev_pr_res_holder->pr_res_scope = 0; + dev->dev_pr_res_holder->pr_res_holder = 0; + dev->dev_pr_res_holder = NULL; + } + if (!unreg) + goto out; - pr_debug("SPC-3 PR [%s] Service Action: %s RELEASE cleared" - " reservation holder TYPE: %s ALL_TG_PT: %d\n", - tfo->get_fabric_name(), (explicit) ? "explicit" : "implicit", - core_scsi3_pr_dump_type(pr_reg->pr_res_type), - (pr_reg->pr_reg_all_tg_pt) ? 1 : 0); + spin_lock(&dev->t10_pr.registration_lock); + list_del_init(&pr_reg->pr_reg_list); + /* + * If the I_T nexus is a reservation holder, the persistent reservation + * is of an all registrants type, and the I_T nexus is the last remaining + * registered I_T nexus, then the device server shall also release the + * persistent reservation. + */ + if (!list_empty(&dev->t10_pr.registration_list) && + ((pr_res_type == PR_TYPE_WRITE_EXCLUSIVE_ALLREG) || + (pr_res_type == PR_TYPE_EXCLUSIVE_ACCESS_ALLREG))) { + dev->dev_pr_res_holder = + list_entry(dev->t10_pr.registration_list.next, + struct t10_pr_registration, pr_reg_list); + dev->dev_pr_res_holder->pr_res_type = pr_res_type; + dev->dev_pr_res_holder->pr_res_scope = pr_res_scope; + dev->dev_pr_res_holder->pr_res_holder = 1; + } + spin_unlock(&dev->t10_pr.registration_lock); +out: + if (!dev->dev_pr_res_holder) { + pr_debug("SPC-3 PR [%s] Service Action: %s RELEASE cleared" + " reservation holder TYPE: %s ALL_TG_PT: %d\n", + tfo->get_fabric_name(), (explicit) ? "explicit" : + "implicit", core_scsi3_pr_dump_type(pr_res_type), + (pr_reg->pr_reg_all_tg_pt) ? 1 : 0); + } pr_debug("SPC-3 PR [%s] RELEASE Node: %s%s\n", tfo->get_fabric_name(), se_nacl->initiatorname, i_buf); @@ -2530,7 +2573,7 @@ core_scsi3_emulate_pro_release(struct se_cmd *cmd, int type, int scope, * server shall not establish a unit attention condition. */ __core_scsi3_complete_pro_release(dev, se_sess->se_node_acl, - pr_reg, 1); + pr_reg, 1, 0); spin_unlock(&dev->dev_reservation_lock); @@ -2618,7 +2661,7 @@ core_scsi3_emulate_pro_clear(struct se_cmd *cmd, u64 res_key) if (pr_res_holder) { struct se_node_acl *pr_res_nacl = pr_res_holder->pr_reg_nacl; __core_scsi3_complete_pro_release(dev, pr_res_nacl, - pr_res_holder, 0); + pr_res_holder, 0, 0); } spin_unlock(&dev->dev_reservation_lock); /* @@ -2677,7 +2720,7 @@ static void __core_scsi3_complete_pro_preempt( */ if (dev->dev_pr_res_holder) __core_scsi3_complete_pro_release(dev, nacl, - dev->dev_pr_res_holder, 0); + dev->dev_pr_res_holder, 0, 0); dev->dev_pr_res_holder = pr_reg; pr_reg->pr_res_holder = 1; @@ -2922,8 +2965,8 @@ core_scsi3_pro_preempt(struct se_cmd *cmd, int type, int scope, u64 res_key, */ if (pr_reg_n != pr_res_holder) __core_scsi3_complete_pro_release(dev, - pr_res_holder->pr_reg_nacl, - dev->dev_pr_res_holder, 0); + pr_res_holder->pr_reg_nacl, + dev->dev_pr_res_holder, 0, 0); /* * b) Remove the registrations for all I_T nexuses identified * by the SERVICE ACTION RESERVATION KEY field, except the @@ -3386,7 +3429,7 @@ after_iport_check: * holder (i.e., the I_T nexus on which the */ __core_scsi3_complete_pro_release(dev, pr_res_nacl, - dev->dev_pr_res_holder, 0); + dev->dev_pr_res_holder, 0, 0); /* * g) Move the persistent reservation to the specified I_T nexus using * the same scope and type as the persistent reservation released in -- cgit v1.2.3 From a0b3b9b2409b409c677f7eb1e0485b816a5848f7 Mon Sep 17 00:00:00 2001 From: Sagi Grimberg Date: Mon, 15 Dec 2014 15:08:54 +0200 Subject: iscsi-target: nullify session in failed login sequence In case login sequence failed, make sure conn->sess is NULL before calling wait_conn as some transports (iser) may rely on that (waiting for session commands). Signed-off-by: Sagi Grimberg Signed-off-by: Nicholas Bellinger --- drivers/target/iscsi/iscsi_target_login.c | 1 + 1 file changed, 1 insertion(+) diff --git a/drivers/target/iscsi/iscsi_target_login.c b/drivers/target/iscsi/iscsi_target_login.c index 18e2601527df..713c0c1877ab 100644 --- a/drivers/target/iscsi/iscsi_target_login.c +++ b/drivers/target/iscsi/iscsi_target_login.c @@ -1159,6 +1159,7 @@ void iscsi_target_login_sess_out(struct iscsi_conn *conn, } kfree(conn->sess->sess_ops); kfree(conn->sess); + conn->sess = NULL; old_sess_out: iscsi_stop_login_thread_timer(np); -- cgit v1.2.3 From 6bf6ca7515c1df06f5c03737537f5e0eb191e29e Mon Sep 17 00:00:00 2001 From: Nicholas Bellinger Date: Thu, 20 Nov 2014 20:50:07 -0800 Subject: iscsi-target: Fail connection on short sendmsg writes This patch changes iscsit_do_tx_data() to fail on short writes when kernel_sendmsg() returns a value different than requested transfer length, returning -EPIPE and thus causing a connection reset to occur. This avoids a potential bug in the original code where a short write would result in kernel_sendmsg() being called again with the original iovec base + length. In practice this has not been an issue because iscsit_do_tx_data() is only used for transferring 48 byte headers + 4 byte digests, along with seldom used control payloads from NOPIN + TEXT_RSP + REJECT with less than 32k of data. So following Al's audit of iovec consumers, go ahead and fail the connection on short writes for now, and remove the bogus logic ahead of his proper upstream fix. Reported-by: Al Viro Cc: David S. Miller Cc: # v3.1+ Signed-off-by: Nicholas Bellinger --- drivers/target/iscsi/iscsi_target_util.c | 26 +++++++++++--------------- 1 file changed, 11 insertions(+), 15 deletions(-) diff --git a/drivers/target/iscsi/iscsi_target_util.c b/drivers/target/iscsi/iscsi_target_util.c index ce87ce9bdb9c..0b68c2ebce95 100644 --- a/drivers/target/iscsi/iscsi_target_util.c +++ b/drivers/target/iscsi/iscsi_target_util.c @@ -1358,15 +1358,15 @@ static int iscsit_do_tx_data( struct iscsi_conn *conn, struct iscsi_data_count *count) { - int data = count->data_length, total_tx = 0, tx_loop = 0, iov_len; + int ret, iov_len; struct kvec *iov_p; struct msghdr msg; if (!conn || !conn->sock || !conn->conn_ops) return -1; - if (data <= 0) { - pr_err("Data length is: %d\n", data); + if (count->data_length <= 0) { + pr_err("Data length is: %d\n", count->data_length); return -1; } @@ -1375,20 +1375,16 @@ static int iscsit_do_tx_data( iov_p = count->iov; iov_len = count->iov_count; - while (total_tx < data) { - tx_loop = kernel_sendmsg(conn->sock, &msg, iov_p, iov_len, - (data - total_tx)); - if (tx_loop <= 0) { - pr_debug("tx_loop: %d total_tx %d\n", - tx_loop, total_tx); - return tx_loop; - } - total_tx += tx_loop; - pr_debug("tx_loop: %d, total_tx: %d, data: %d\n", - tx_loop, total_tx, data); + ret = kernel_sendmsg(conn->sock, &msg, iov_p, iov_len, + count->data_length); + if (ret != count->data_length) { + pr_err("Unexpected ret: %d send data %d\n", + ret, count->data_length); + return -EPIPE; } + pr_debug("ret: %d, sent data: %d\n", ret, count->data_length); - return total_tx; + return ret; } int rx_data( -- cgit v1.2.3 From 3875f15207f9ecb3f24a8e91e7ad196899139595 Mon Sep 17 00:00:00 2001 From: Kyle McMartin Date: Thu, 18 Dec 2014 12:57:14 -0500 Subject: uapi/linux/target_core_user.h: fix headers_install.sh badness scripts/headers_install.sh will transform __packed to __attribute__((packed)), so the #ifndef is not necessary. (and, in fact, it's problematic, because we'll end up with the header containing: #ifndef __attribute__((packed)) #define __attribu... and so forth.) Cc: stable@vger.kernel.org # 3.18 Signed-off-by: Kyle McMartin Signed-off-by: Nicholas Bellinger --- include/uapi/linux/target_core_user.h | 4 ---- 1 file changed, 4 deletions(-) diff --git a/include/uapi/linux/target_core_user.h b/include/uapi/linux/target_core_user.h index 7dcfbe6771b1..b483d1909d3e 100644 --- a/include/uapi/linux/target_core_user.h +++ b/include/uapi/linux/target_core_user.h @@ -6,10 +6,6 @@ #include #include -#ifndef __packed -#define __packed __attribute__((packed)) -#endif - #define TCMU_VERSION "1.0" /* -- cgit v1.2.3 From ae450e246e8540300699480a3780a420a028b73f Mon Sep 17 00:00:00 2001 From: Nicholas Bellinger Date: Fri, 19 Dec 2014 00:49:23 +0000 Subject: target: Allow AllRegistrants to re-RESERVE existing reservation This patch changes core_scsi3_pro_release() logic to allow an existing AllRegistrants type reservation to be re-reserved by any registered I_T nexus. This addresses a issue where AllRegistrants type RESERVE was receiving RESERVATION_CONFLICT status if dev_pr_res_holder did not match the same I_T nexus, instead of just returning GOOD status following spc4r34 Section 5.9.9: "If the device server receives a PERSISTENT RESERVE OUT command with RESERVE service action where the TYPE field and the SCOPE field contain the same values as the existing type and scope from a persistent reservation holder, it shall not make any change to the existing persistent reservation and shall complete the command with GOOD status." Reported-by: Ilias Tsitsimpis Cc: Ilias Tsitsimpis Cc: Lee Duncan Cc: James Bottomley Signed-off-by: Nicholas Bellinger --- drivers/target/target_core_pr.c | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/drivers/target/target_core_pr.c b/drivers/target/target_core_pr.c index 703890c12071..f75a4baf6e56 100644 --- a/drivers/target/target_core_pr.c +++ b/drivers/target/target_core_pr.c @@ -2274,6 +2274,7 @@ core_scsi3_pro_reserve(struct se_cmd *cmd, int type, int scope, u64 res_key) spin_lock(&dev->dev_reservation_lock); pr_res_holder = dev->dev_pr_res_holder; if (pr_res_holder) { + int pr_res_type = pr_res_holder->pr_res_type; /* * From spc4r17 Section 5.7.9: Reserving: * @@ -2284,7 +2285,9 @@ core_scsi3_pro_reserve(struct se_cmd *cmd, int type, int scope, u64 res_key) * the logical unit, then the command shall be completed with * RESERVATION CONFLICT status. */ - if (pr_res_holder != pr_reg) { + if ((pr_res_holder != pr_reg) && + (pr_res_type != PR_TYPE_WRITE_EXCLUSIVE_ALLREG) && + (pr_res_type != PR_TYPE_EXCLUSIVE_ACCESS_ALLREG)) { struct se_node_acl *pr_res_nacl = pr_res_holder->pr_reg_nacl; pr_err("SPC-3 PR: Attempted RESERVE from" " [%s]: %s while reservation already held by" -- cgit v1.2.3