From 7e642ca0375b95072ab6240c8eb9f0b4f013fb24 Mon Sep 17 00:00:00 2001 From: Li Feng Date: Mon, 13 Sep 2021 16:30:45 +0800 Subject: scsi: target: Remove unused function arguments The se_cmd is unused in these functions, just remove it. Link: https://lore.kernel.org/r/20210913083045.3670648-1-fengli@smartx.com Reviewed-by: Himanshu Madhani Signed-off-by: Li Feng Signed-off-by: Martin K. Petersen --- drivers/target/target_core_xcopy.c | 14 ++++++-------- 1 file changed, 6 insertions(+), 8 deletions(-) (limited to 'drivers/target') diff --git a/drivers/target/target_core_xcopy.c b/drivers/target/target_core_xcopy.c index d4fe7cb2bd00..6bb20aa9c5bc 100644 --- a/drivers/target/target_core_xcopy.c +++ b/drivers/target/target_core_xcopy.c @@ -295,8 +295,7 @@ out: return -EINVAL; } -static int target_xcopy_parse_segdesc_02(struct se_cmd *se_cmd, struct xcopy_op *xop, - unsigned char *p) +static int target_xcopy_parse_segdesc_02(struct xcopy_op *xop, unsigned char *p) { unsigned char *desc = p; int dc = (desc[1] & 0x02); @@ -332,9 +331,9 @@ static int target_xcopy_parse_segdesc_02(struct se_cmd *se_cmd, struct xcopy_op return 0; } -static int target_xcopy_parse_segment_descriptors(struct se_cmd *se_cmd, - struct xcopy_op *xop, unsigned char *p, - unsigned int sdll, sense_reason_t *sense_ret) +static int target_xcopy_parse_segment_descriptors(struct xcopy_op *xop, + unsigned char *p, unsigned int sdll, + sense_reason_t *sense_ret) { unsigned char *desc = p; unsigned int start = 0; @@ -362,7 +361,7 @@ static int target_xcopy_parse_segment_descriptors(struct se_cmd *se_cmd, */ switch (desc[0]) { case 0x02: - rc = target_xcopy_parse_segdesc_02(se_cmd, xop, desc); + rc = target_xcopy_parse_segdesc_02(xop, desc); if (rc < 0) goto out; @@ -840,8 +839,7 @@ static sense_reason_t target_parse_xcopy_cmd(struct xcopy_op *xop) */ seg_desc = &p[16] + tdll; - rc = target_xcopy_parse_segment_descriptors(se_cmd, xop, seg_desc, - sdll, &ret); + rc = target_xcopy_parse_segment_descriptors(xop, seg_desc, sdll, &ret); if (rc <= 0) goto out; -- cgit v1.2.3 From 05787e3456ffcc8598aab632fcd5d160894619b3 Mon Sep 17 00:00:00 2001 From: Konstantin Shelekhin Date: Wed, 29 Sep 2021 14:50:00 +0300 Subject: scsi: target: core: Make logs less verbose Change the log level of the following message to debug: Unsupported SCSI Opcode 0xXX, sending CHECK_CONDITION. This message is mostly helpful during debugging sessions in order to understand errors on the initiator side. But most of the time it's just useless and makes reading logs much harder. It gets particularly annoying if there are many initiators that come and go or if an initiator runs a program that does not care whether the command is supported and just keeps sending it. Link: https://lore.kernel.org/r/20210929114959.705852-1-k.shelekhin@yadro.com Reviewed-by: Roman Bolshakov Reviewed-by: Lee Duncan Signed-off-by: Konstantin Shelekhin Signed-off-by: Martin K. Petersen --- drivers/target/target_core_transport.c | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) (limited to 'drivers/target') diff --git a/drivers/target/target_core_transport.c b/drivers/target/target_core_transport.c index 14c6f2bb1b01..4a0055ab9151 100644 --- a/drivers/target/target_core_transport.c +++ b/drivers/target/target_core_transport.c @@ -1511,10 +1511,10 @@ target_cmd_parse_cdb(struct se_cmd *cmd) ret = dev->transport->parse_cdb(cmd); if (ret == TCM_UNSUPPORTED_SCSI_OPCODE) - pr_warn_ratelimited("%s/%s: Unsupported SCSI Opcode 0x%02x, sending CHECK_CONDITION.\n", - cmd->se_tfo->fabric_name, - cmd->se_sess->se_node_acl->initiatorname, - cmd->t_task_cdb[0]); + pr_debug_ratelimited("%s/%s: Unsupported SCSI Opcode 0x%02x, sending CHECK_CONDITION.\n", + cmd->se_tfo->fabric_name, + cmd->se_sess->se_node_acl->initiatorname, + cmd->t_task_cdb[0]); if (ret) return ret; -- cgit v1.2.3 From 80ed33c8ba932480be99f73dc3f29e1f7da1582c Mon Sep 17 00:00:00 2001 From: Dmitry Bogdanov Date: Fri, 10 Sep 2021 11:41:27 +0300 Subject: scsi: target: core: Add common tpg/enable attribute Many fabric modules provide their own implementation of enable attribute in tpg. Provide a way to remove code duplication in the fabric modules and automatically add "enable" attribute if a fabric module has an implementation of fabric_enable_tpg(). Link: https://lore.kernel.org/r/20210910084133.17956-2-d.bogdanov@yadro.com Reviewed-by: Roman Bolshakov Reviewed-by: Mike Christie Signed-off-by: Dmitry Bogdanov Signed-off-by: Martin K. Petersen --- drivers/target/target_core_configfs.c | 1 + drivers/target/target_core_fabric_configfs.c | 78 +++++++++++++++++++++++++++- include/target/target_core_base.h | 1 + include/target/target_core_fabric.h | 1 + 4 files changed, 79 insertions(+), 2 deletions(-) (limited to 'drivers/target') diff --git a/drivers/target/target_core_configfs.c b/drivers/target/target_core_configfs.c index 102ec644bc8a..3b9e50c1ccef 100644 --- a/drivers/target/target_core_configfs.c +++ b/drivers/target/target_core_configfs.c @@ -490,6 +490,7 @@ void target_unregister_template(const struct target_core_fabric_ops *fo) * fabric driver unload of TFO->module to proceed. */ rcu_barrier(); + kfree(t->tf_tpg_base_cit.ct_attrs); kfree(t); return; } diff --git a/drivers/target/target_core_fabric_configfs.c b/drivers/target/target_core_fabric_configfs.c index fc7edc04ee09..0b65de9f2df1 100644 --- a/drivers/target/target_core_fabric_configfs.c +++ b/drivers/target/target_core_fabric_configfs.c @@ -815,8 +815,76 @@ static struct configfs_item_operations target_fabric_tpg_base_item_ops = { .release = target_fabric_tpg_release, }; -TF_CIT_SETUP_DRV(tpg_base, &target_fabric_tpg_base_item_ops, NULL); +static ssize_t target_fabric_tpg_base_enable_show(struct config_item *item, + char *page) +{ + return sysfs_emit(page, "%d\n", to_tpg(item)->enabled); +} + +static ssize_t target_fabric_tpg_base_enable_store(struct config_item *item, + const char *page, + size_t count) +{ + struct se_portal_group *se_tpg = to_tpg(item); + int ret; + bool op; + + ret = strtobool(page, &op); + if (ret) + return ret; + + if (se_tpg->enabled == op) + return count; + + ret = se_tpg->se_tpg_tfo->fabric_enable_tpg(se_tpg, op); + if (ret) + return ret; + + se_tpg->enabled = op; + + return count; +} + +CONFIGFS_ATTR(target_fabric_tpg_base_, enable); +static int +target_fabric_setup_tpg_base_cit(struct target_fabric_configfs *tf) +{ + struct config_item_type *cit = &tf->tf_tpg_base_cit; + struct configfs_attribute **attrs = NULL; + size_t nr_attrs = 0; + int i = 0; + + if (tf->tf_ops->tfc_tpg_base_attrs) + while (tf->tf_ops->tfc_tpg_base_attrs[nr_attrs] != NULL) + nr_attrs++; + + if (tf->tf_ops->fabric_enable_tpg) + nr_attrs++; + + if (nr_attrs == 0) + goto done; + + /* + 1 for final NULL in the array */ + attrs = kcalloc(nr_attrs + 1, sizeof(*attrs), GFP_KERNEL); + if (!attrs) + return -ENOMEM; + + if (tf->tf_ops->tfc_tpg_base_attrs) + for (; tf->tf_ops->tfc_tpg_base_attrs[i] != NULL; i++) + attrs[i] = tf->tf_ops->tfc_tpg_base_attrs[i]; + + if (tf->tf_ops->fabric_enable_tpg) + attrs[i] = &target_fabric_tpg_base_attr_enable; + +done: + cit->ct_item_ops = &target_fabric_tpg_base_item_ops; + cit->ct_attrs = attrs; + cit->ct_owner = tf->tf_ops->module; + pr_debug("Setup generic tpg_base\n"); + + return 0; +} /* End of tfc_tpg_base_cit */ /* Start of tfc_tpg_cit */ @@ -1028,12 +1096,18 @@ TF_CIT_SETUP_DRV(discovery, NULL, NULL); int target_fabric_setup_cits(struct target_fabric_configfs *tf) { + int ret; + target_fabric_setup_discovery_cit(tf); target_fabric_setup_wwn_cit(tf); target_fabric_setup_wwn_fabric_stats_cit(tf); target_fabric_setup_wwn_param_cit(tf); target_fabric_setup_tpg_cit(tf); - target_fabric_setup_tpg_base_cit(tf); + + ret = target_fabric_setup_tpg_base_cit(tf); + if (ret) + return ret; + target_fabric_setup_tpg_port_cit(tf); target_fabric_setup_tpg_port_stat_cit(tf); target_fabric_setup_tpg_lun_cit(tf); diff --git a/include/target/target_core_base.h b/include/target/target_core_base.h index fb11c7693b25..2130f102798d 100644 --- a/include/target/target_core_base.h +++ b/include/target/target_core_base.h @@ -900,6 +900,7 @@ struct se_portal_group { * Negative values can be used by fabric drivers for internal use TPGs. */ int proto_id; + bool enabled; /* Used for PR SPEC_I_PT=1 and REGISTER_AND_MOVE */ atomic_t tpg_pr_ref_count; /* Spinlock for adding/removing ACLed Nodes */ diff --git a/include/target/target_core_fabric.h b/include/target/target_core_fabric.h index 3c5ade7a04a6..38f0662476d1 100644 --- a/include/target/target_core_fabric.h +++ b/include/target/target_core_fabric.h @@ -89,6 +89,7 @@ struct target_core_fabric_ops { void (*add_wwn_groups)(struct se_wwn *); struct se_portal_group *(*fabric_make_tpg)(struct se_wwn *, const char *); + int (*fabric_enable_tpg)(struct se_portal_group *se_tpg, bool enable); void (*fabric_drop_tpg)(struct se_portal_group *); int (*fabric_post_link)(struct se_portal_group *, struct se_lun *); -- cgit v1.2.3 From 382731ec01b376ab32b29b9463e6718bbc18467b Mon Sep 17 00:00:00 2001 From: Dmitry Bogdanov Date: Fri, 10 Sep 2021 11:41:28 +0300 Subject: scsi: target: iscsi: Replace tpg enable attr with ops.enable Remove tpg/enable attribute. Add fabric ops enable_tpg implementation instead. Link: https://lore.kernel.org/r/20210910084133.17956-3-d.bogdanov@yadro.com Reviewed-by: Roman Bolshakov Reviewed-by: Mike Christie Signed-off-by: Dmitry Bogdanov Signed-off-by: Martin K. Petersen --- drivers/target/iscsi/iscsi_target_configfs.c | 91 ++++++++++------------------ 1 file changed, 32 insertions(+), 59 deletions(-) (limited to 'drivers/target') diff --git a/drivers/target/iscsi/iscsi_target_configfs.c b/drivers/target/iscsi/iscsi_target_configfs.c index f4a24fa5058e..2a9de24a8bbe 100644 --- a/drivers/target/iscsi/iscsi_target_configfs.c +++ b/drivers/target/iscsi/iscsi_target_configfs.c @@ -1005,74 +1005,15 @@ static struct configfs_attribute *lio_target_tpg_param_attrs[] = { /* Start items for lio_target_tpg_cit */ -static ssize_t lio_target_tpg_enable_show(struct config_item *item, char *page) -{ - struct se_portal_group *se_tpg = to_tpg(item); - struct iscsi_portal_group *tpg = container_of(se_tpg, - struct iscsi_portal_group, tpg_se_tpg); - ssize_t len; - - spin_lock(&tpg->tpg_state_lock); - len = sprintf(page, "%d\n", - (tpg->tpg_state == TPG_STATE_ACTIVE) ? 1 : 0); - spin_unlock(&tpg->tpg_state_lock); - - return len; -} - -static ssize_t lio_target_tpg_enable_store(struct config_item *item, - const char *page, size_t count) -{ - struct se_portal_group *se_tpg = to_tpg(item); - struct iscsi_portal_group *tpg = container_of(se_tpg, - struct iscsi_portal_group, tpg_se_tpg); - u32 op; - int ret; - - ret = kstrtou32(page, 0, &op); - if (ret) - return ret; - if ((op != 1) && (op != 0)) { - pr_err("Illegal value for tpg_enable: %u\n", op); - return -EINVAL; - } - - ret = iscsit_get_tpg(tpg); - if (ret < 0) - return -EINVAL; - - if (op) { - ret = iscsit_tpg_enable_portal_group(tpg); - if (ret < 0) - goto out; - } else { - /* - * iscsit_tpg_disable_portal_group() assumes force=1 - */ - ret = iscsit_tpg_disable_portal_group(tpg, 1); - if (ret < 0) - goto out; - } - - iscsit_put_tpg(tpg); - return count; -out: - iscsit_put_tpg(tpg); - return -EINVAL; -} - - static ssize_t lio_target_tpg_dynamic_sessions_show(struct config_item *item, char *page) { return target_show_dynamic_sessions(to_tpg(item), page); } -CONFIGFS_ATTR(lio_target_tpg_, enable); CONFIGFS_ATTR_RO(lio_target_tpg_, dynamic_sessions); static struct configfs_attribute *lio_target_tpg_attrs[] = { - &lio_target_tpg_attr_enable, &lio_target_tpg_attr_dynamic_sessions, NULL, }; @@ -1129,6 +1070,37 @@ free_out: return NULL; } +static int lio_target_tiqn_enabletpg(struct se_portal_group *se_tpg, + bool enable) +{ + struct iscsi_portal_group *tpg = container_of(se_tpg, + struct iscsi_portal_group, tpg_se_tpg); + int ret; + + ret = iscsit_get_tpg(tpg); + if (ret < 0) + return -EINVAL; + + if (enable) { + ret = iscsit_tpg_enable_portal_group(tpg); + if (ret < 0) + goto out; + } else { + /* + * iscsit_tpg_disable_portal_group() assumes force=1 + */ + ret = iscsit_tpg_disable_portal_group(tpg, 1); + if (ret < 0) + goto out; + } + + iscsit_put_tpg(tpg); + return 0; +out: + iscsit_put_tpg(tpg); + return -EINVAL; +} + static void lio_target_tiqn_deltpg(struct se_portal_group *se_tpg) { struct iscsi_portal_group *tpg; @@ -1556,6 +1528,7 @@ const struct target_core_fabric_ops iscsi_ops = { .fabric_drop_wwn = lio_target_call_coredeltiqn, .add_wwn_groups = lio_target_add_wwn_groups, .fabric_make_tpg = lio_target_tiqn_addtpg, + .fabric_enable_tpg = lio_target_tiqn_enabletpg, .fabric_drop_tpg = lio_target_tiqn_deltpg, .fabric_make_np = lio_target_call_addnptotpg, .fabric_drop_np = lio_target_call_delnpfromtpg, -- cgit v1.2.3 From fb00af92e5db24f69bba8c05a2c1926bf4a5cdc6 Mon Sep 17 00:00:00 2001 From: Dmitry Bogdanov Date: Fri, 10 Sep 2021 11:41:30 +0300 Subject: scsi: target: sbp: Replace enable attr with ops.enable Remove tpg/enable attribute. Add fabric ops enable_tpg implementation instead. Link: https://lore.kernel.org/r/20210910084133.17956-5-d.bogdanov@yadro.com Reviewed-by: Roman Bolshakov Reviewed-by: Bart Van Assche Reviewed-by: Mike Christie Signed-off-by: Dmitry Bogdanov Signed-off-by: Martin K. Petersen --- drivers/target/sbp/sbp_target.c | 30 +++++------------------------- 1 file changed, 5 insertions(+), 25 deletions(-) (limited to 'drivers/target') diff --git a/drivers/target/sbp/sbp_target.c b/drivers/target/sbp/sbp_target.c index b9f9fb5d7e63..504670994fb4 100644 --- a/drivers/target/sbp/sbp_target.c +++ b/drivers/target/sbp/sbp_target.c @@ -2125,32 +2125,13 @@ static ssize_t sbp_tpg_directory_id_store(struct config_item *item, return count; } -static ssize_t sbp_tpg_enable_show(struct config_item *item, char *page) +static int sbp_enable_tpg(struct se_portal_group *se_tpg, bool enable) { - struct se_portal_group *se_tpg = to_tpg(item); struct sbp_tpg *tpg = container_of(se_tpg, struct sbp_tpg, se_tpg); struct sbp_tport *tport = tpg->tport; - return sprintf(page, "%d\n", tport->enable); -} - -static ssize_t sbp_tpg_enable_store(struct config_item *item, - const char *page, size_t count) -{ - struct se_portal_group *se_tpg = to_tpg(item); - struct sbp_tpg *tpg = container_of(se_tpg, struct sbp_tpg, se_tpg); - struct sbp_tport *tport = tpg->tport; - unsigned long val; int ret; - if (kstrtoul(page, 0, &val) < 0) - return -EINVAL; - if ((val != 0) && (val != 1)) - return -EINVAL; - - if (tport->enable == val) - return count; - - if (val) { + if (enable) { if (sbp_count_se_tpg_luns(&tpg->se_tpg) == 0) { pr_err("Cannot enable a target with no LUNs!\n"); return -EINVAL; @@ -2165,7 +2146,7 @@ static ssize_t sbp_tpg_enable_store(struct config_item *item, spin_unlock_bh(&se_tpg->session_lock); } - tport->enable = val; + tport->enable = enable; ret = sbp_update_unit_directory(tport); if (ret < 0) { @@ -2173,15 +2154,13 @@ static ssize_t sbp_tpg_enable_store(struct config_item *item, return ret; } - return count; + return 0; } CONFIGFS_ATTR(sbp_tpg_, directory_id); -CONFIGFS_ATTR(sbp_tpg_, enable); static struct configfs_attribute *sbp_tpg_base_attrs[] = { &sbp_tpg_attr_directory_id, - &sbp_tpg_attr_enable, NULL, }; @@ -2319,6 +2298,7 @@ static const struct target_core_fabric_ops sbp_ops = { .fabric_make_wwn = sbp_make_tport, .fabric_drop_wwn = sbp_drop_tport, .fabric_make_tpg = sbp_make_tpg, + .fabric_enable_tpg = sbp_enable_tpg, .fabric_drop_tpg = sbp_drop_tpg, .fabric_post_link = sbp_post_link_lun, .fabric_pre_unlink = sbp_pre_unlink_lun, -- cgit v1.2.3 From c20bda341946c8ee77baec3bec7c5cd615ddf869 Mon Sep 17 00:00:00 2001 From: "Gustavo A. R. Silva" Date: Mon, 27 Sep 2021 17:43:44 -0500 Subject: scsi: target: tcmu: Use struct_size() helper in kmalloc() Make use of the struct_size() helper instead of an open-coded version, in order to avoid any potential type mistakes or integer overflows that, in the worst scenario, could lead to heap overflows. Link: https://github.com/KSPP/linux/issues/160 Link: https://lore.kernel.org/r/20210927224344.GA190701@embeddedor Reviewed-by: Bodo Stroesser Signed-off-by: Gustavo A. R. Silva Signed-off-by: Martin K. Petersen --- drivers/target/target_core_user.c | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) (limited to 'drivers/target') diff --git a/drivers/target/target_core_user.c b/drivers/target/target_core_user.c index 9f552f48084c..dc220fad06fa 100644 --- a/drivers/target/target_core_user.c +++ b/drivers/target/target_core_user.c @@ -1255,7 +1255,6 @@ tcmu_tmr_notify(struct se_device *se_dev, enum tcm_tmreq_table tmf, { int i = 0, cmd_cnt = 0; bool unqueued = false; - uint16_t *cmd_ids = NULL; struct tcmu_cmd *cmd; struct se_cmd *se_cmd; struct tcmu_tmr *tmr; @@ -1292,7 +1291,7 @@ tcmu_tmr_notify(struct se_device *se_dev, enum tcm_tmreq_table tmf, pr_debug("TMR event %d on dev %s, aborted cmds %d, afflicted cmd_ids %d\n", tcmu_tmr_type(tmf), udev->name, i, cmd_cnt); - tmr = kmalloc(sizeof(*tmr) + cmd_cnt * sizeof(*cmd_ids), GFP_NOIO); + tmr = kmalloc(struct_size(tmr, tmr_cmd_ids, cmd_cnt), GFP_NOIO); if (!tmr) goto unlock; -- cgit v1.2.3 From b9d82b7dea2c109d46c2b2a065a01a62286ed275 Mon Sep 17 00:00:00 2001 From: Bart Van Assche Date: Thu, 7 Oct 2021 13:46:09 -0700 Subject: scsi: target: tcm_loop: Call scsi_done() directly Conditional statements are faster than indirect calls. Hence call scsi_done() directly. Link: https://lore.kernel.org/r/20211007204618.2196847-9-bvanassche@acm.org Signed-off-by: Bart Van Assche Signed-off-by: Martin K. Petersen --- drivers/target/loopback/tcm_loop.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) (limited to 'drivers/target') diff --git a/drivers/target/loopback/tcm_loop.c b/drivers/target/loopback/tcm_loop.c index 52db28d868d5..4407b56aa6d1 100644 --- a/drivers/target/loopback/tcm_loop.c +++ b/drivers/target/loopback/tcm_loop.c @@ -71,7 +71,7 @@ static void tcm_loop_release_cmd(struct se_cmd *se_cmd) if (se_cmd->se_cmd_flags & SCF_SCSI_TMR_CDB) kmem_cache_free(tcm_loop_cmd_cache, tl_cmd); else - sc->scsi_done(sc); + scsi_done(sc); } static int tcm_loop_show_info(struct seq_file *m, struct Scsi_Host *host) @@ -165,7 +165,7 @@ static void tcm_loop_target_queue_cmd(struct tcm_loop_cmd *tl_cmd) return; out_done: - sc->scsi_done(sc); + scsi_done(sc); } /* -- cgit v1.2.3 From 7f96c7a67e4018317570bb8678e1c44265aba897 Mon Sep 17 00:00:00 2001 From: Varun Prakash Date: Wed, 13 Oct 2021 19:54:47 +0530 Subject: scsi: target: cxgbit: Increase max DataSegmentLength Current value of max DataSegmentLength is 8K. T5/T6 adapters support DataSegmentLength upto 16K. Increase max DataSegmentLength. Link: https://lore.kernel.org/r/1634135087-4996-1-git-send-email-varun@chelsio.com Signed-off-by: Varun Prakash Signed-off-by: Martin K. Petersen --- drivers/target/iscsi/cxgbit/cxgbit_main.c | 17 ++++++++++++----- 1 file changed, 12 insertions(+), 5 deletions(-) (limited to 'drivers/target') diff --git a/drivers/target/iscsi/cxgbit/cxgbit_main.c b/drivers/target/iscsi/cxgbit/cxgbit_main.c index bd37f2afadea..c6678dc8dd41 100644 --- a/drivers/target/iscsi/cxgbit/cxgbit_main.c +++ b/drivers/target/iscsi/cxgbit/cxgbit_main.c @@ -33,11 +33,18 @@ static void cxgbit_set_mdsl(struct cxgbit_device *cdev) struct cxgb4_lld_info *lldi = &cdev->lldi; u32 mdsl; -#define ULP2_MAX_PKT_LEN 16224 -#define ISCSI_PDU_NONPAYLOAD_LEN 312 - mdsl = min_t(u32, lldi->iscsi_iolen - ISCSI_PDU_NONPAYLOAD_LEN, - ULP2_MAX_PKT_LEN - ISCSI_PDU_NONPAYLOAD_LEN); - mdsl = min_t(u32, mdsl, 8192); +#define CXGBIT_T5_MAX_PDU_LEN 16224 +#define CXGBIT_PDU_NONPAYLOAD_LEN 312 /* 48(BHS) + 256(AHS) + 8(Digest) */ + if (is_t5(lldi->adapter_type)) { + mdsl = min_t(u32, lldi->iscsi_iolen - CXGBIT_PDU_NONPAYLOAD_LEN, + CXGBIT_T5_MAX_PDU_LEN - CXGBIT_PDU_NONPAYLOAD_LEN); + } else { + mdsl = lldi->iscsi_iolen - CXGBIT_PDU_NONPAYLOAD_LEN; + mdsl = min(mdsl, 16384U); + } + + mdsl = round_down(mdsl, 4); + mdsl = min_t(u32, mdsl, 4 * PAGE_SIZE); mdsl = min_t(u32, mdsl, (MAX_SKB_FRAGS - 1) * PAGE_SIZE); cdev->mdsl = mdsl; -- cgit v1.2.3 From d1e51ea6bf5f72f67937f4446a5b6d56330f9d79 Mon Sep 17 00:00:00 2001 From: Varun Prakash Date: Wed, 13 Oct 2021 19:55:09 +0530 Subject: scsi: target: cxgbit: Enable Delayed ACK Enable Delayed ACK to reduce the number of TCP ACKs. Link: https://lore.kernel.org/r/1634135109-5044-1-git-send-email-varun@chelsio.com Signed-off-by: Varun Prakash Signed-off-by: Martin K. Petersen --- drivers/target/iscsi/cxgbit/cxgbit_cm.c | 8 +++----- drivers/target/iscsi/cxgbit/cxgbit_target.c | 28 +++++++++++++++++++++++----- 2 files changed, 26 insertions(+), 10 deletions(-) (limited to 'drivers/target') diff --git a/drivers/target/iscsi/cxgbit/cxgbit_cm.c b/drivers/target/iscsi/cxgbit/cxgbit_cm.c index 518ded214e74..da31a308a064 100644 --- a/drivers/target/iscsi/cxgbit/cxgbit_cm.c +++ b/drivers/target/iscsi/cxgbit/cxgbit_cm.c @@ -836,11 +836,13 @@ static void cxgbit_set_tcp_window(struct cxgbit_sock *csk, struct port_info *pi) csk->rcv_win = CXGBIT_10G_RCV_WIN; if (scale) csk->rcv_win *= scale; + csk->rcv_win = min(csk->rcv_win, RCV_BUFSIZ_M << 10); #define CXGBIT_10G_SND_WIN (256 * 1024) csk->snd_win = CXGBIT_10G_SND_WIN; if (scale) csk->snd_win *= scale; + csk->snd_win = min(csk->snd_win, 512U * 1024); pr_debug("%s snd_win %d rcv_win %d\n", __func__, csk->snd_win, csk->rcv_win); @@ -1065,7 +1067,7 @@ int cxgbit_rx_data_ack(struct cxgbit_sock *csk) if (!skb) return -1; - credit_dack = RX_DACK_CHANGE_F | RX_DACK_MODE_V(1) | + credit_dack = RX_DACK_CHANGE_F | RX_DACK_MODE_V(3) | RX_CREDITS_V(csk->rx_credits); cxgb_mk_rx_data_ack(skb, len, csk->tid, csk->ctrlq_idx, @@ -1197,7 +1199,6 @@ cxgbit_pass_accept_rpl(struct cxgbit_sock *csk, struct cpl_pass_accept_req *req) if (tcph->ece && tcph->cwr) opt2 |= CCTRL_ECN_V(1); - opt2 |= RX_COALESCE_V(3); opt2 |= CONG_CNTRL_V(CONG_ALG_NEWRENO); opt2 |= T5_ISS_F; @@ -1646,9 +1647,6 @@ cxgbit_pass_establish(struct cxgbit_device *cdev, struct sk_buff *skb) csk->rcv_nxt = rcv_isn; - if (csk->rcv_win > (RCV_BUFSIZ_M << 10)) - csk->rx_credits = (csk->rcv_win - (RCV_BUFSIZ_M << 10)); - csk->snd_wscale = TCPOPT_SND_WSCALE_G(tcp_opt); cxgbit_set_emss(csk, tcp_opt); dst_confirm(csk->dst); diff --git a/drivers/target/iscsi/cxgbit/cxgbit_target.c b/drivers/target/iscsi/cxgbit/cxgbit_target.c index 282297ffc404..d314ee120a48 100644 --- a/drivers/target/iscsi/cxgbit/cxgbit_target.c +++ b/drivers/target/iscsi/cxgbit/cxgbit_target.c @@ -189,8 +189,8 @@ cxgbit_tx_data_wr(struct cxgbit_sock *csk, struct sk_buff *skb, u32 dlen, wr_ulp_mode = FW_OFLD_TX_DATA_WR_ULPMODE_V(ULP_MODE_ISCSI) | FW_OFLD_TX_DATA_WR_ULPSUBMODE_V(submode); - req->tunnel_to_proxy = htonl((wr_ulp_mode) | force | - FW_OFLD_TX_DATA_WR_SHOVE_V(skb_peek(&csk->txq) ? 0 : 1)); + req->tunnel_to_proxy = htonl(wr_ulp_mode | force | + FW_OFLD_TX_DATA_WR_SHOVE_F); } static void cxgbit_arp_failure_skb_discard(void *handle, struct sk_buff *skb) @@ -1531,7 +1531,7 @@ out: return ret; } -static int cxgbit_rx_lro_skb(struct cxgbit_sock *csk, struct sk_buff *skb) +static int cxgbit_t5_rx_lro_skb(struct cxgbit_sock *csk, struct sk_buff *skb) { struct cxgbit_lro_cb *lro_cb = cxgbit_skb_lro_cb(skb); struct cxgbit_lro_pdu_cb *pdu_cb = cxgbit_skb_lro_pdu_cb(skb, 0); @@ -1557,6 +1557,24 @@ static int cxgbit_rx_lro_skb(struct cxgbit_sock *csk, struct sk_buff *skb) return ret; } +static int cxgbit_rx_lro_skb(struct cxgbit_sock *csk, struct sk_buff *skb) +{ + struct cxgbit_lro_cb *lro_cb = cxgbit_skb_lro_cb(skb); + int ret; + + ret = cxgbit_process_lro_skb(csk, skb); + if (ret) + return ret; + + csk->rx_credits += lro_cb->pdu_totallen; + if (csk->rx_credits >= csk->rcv_win) { + csk->rx_credits = 0; + cxgbit_rx_data_ack(csk); + } + + return 0; +} + static int cxgbit_rx_skb(struct cxgbit_sock *csk, struct sk_buff *skb) { struct cxgb4_lld_info *lldi = &csk->com.cdev->lldi; @@ -1564,9 +1582,9 @@ static int cxgbit_rx_skb(struct cxgbit_sock *csk, struct sk_buff *skb) if (likely(cxgbit_skcb_flags(skb) & SKCBF_RX_LRO)) { if (is_t5(lldi->adapter_type)) - ret = cxgbit_rx_lro_skb(csk, skb); + ret = cxgbit_t5_rx_lro_skb(csk, skb); else - ret = cxgbit_process_lro_skb(csk, skb); + ret = cxgbit_rx_lro_skb(csk, skb); } __kfree_skb(skb); -- cgit v1.2.3 From 1d2ac7b69d6a984d0f58b82d0f658ebd9aa05428 Mon Sep 17 00:00:00 2001 From: Bodo Stroesser Date: Wed, 13 Oct 2021 19:16:06 +0200 Subject: scsi: target: tcmu: Allocate zeroed pages for data area Tcmu populates the data area (used for communication with userspace) with pages that are allocated by calling alloc_page(GFP_NOIO). Therefore previous content of the allocated pages is exposed to user space. Avoid this by adding __GFP_ZERO flag. Zeroing the pages does (nearly) not affect tcmu throughput, because allocated pages are re-used for the data transfers of later SCSI cmds. Link: https://lore.kernel.org/r/20211013171606.25197-1-bostroesser@gmail.com Signed-off-by: Bodo Stroesser Signed-off-by: Martin K. Petersen --- drivers/target/target_core_user.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) (limited to 'drivers/target') diff --git a/drivers/target/target_core_user.c b/drivers/target/target_core_user.c index dc220fad06fa..7b2a89a67cdb 100644 --- a/drivers/target/target_core_user.c +++ b/drivers/target/target_core_user.c @@ -523,8 +523,8 @@ static inline int tcmu_get_empty_block(struct tcmu_dev *udev, rcu_read_unlock(); for (i = cnt; i < page_cnt; i++) { - /* try to get new page from the mm */ - page = alloc_page(GFP_NOIO); + /* try to get new zeroed page from the mm */ + page = alloc_page(GFP_NOIO | __GFP_ZERO); if (!page) break; -- cgit v1.2.3 From 945a160794a90442329fdd6ff30f02a5c5b39481 Mon Sep 17 00:00:00 2001 From: Mike Christie Date: Wed, 29 Sep 2021 21:04:18 -0500 Subject: scsi: target: Fix ordered CMD_T_SENT handling We can race where target_handle_task_attr() has put the cmd on the delayed_cmd_list. Then target_restart_delayed_cmds() has removed it and set CMD_T_SENT, but then target_execute_cmd() now clears that bit. This patch moves the clearing to before we've put the cmd on the list. Link: https://lore.kernel.org/r/20210930020422.92578-2-michael.christie@oracle.com Reviewed-by: Lee Duncan Signed-off-by: Mike Christie Signed-off-by: Martin K. Petersen --- drivers/target/target_core_transport.c | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) (limited to 'drivers/target') diff --git a/drivers/target/target_core_transport.c b/drivers/target/target_core_transport.c index 4a0055ab9151..64e0477c7644 100644 --- a/drivers/target/target_core_transport.c +++ b/drivers/target/target_core_transport.c @@ -2200,6 +2200,10 @@ static bool target_handle_task_attr(struct se_cmd *cmd) if (atomic_read(&dev->dev_ordered_sync) == 0) return false; + spin_lock_irq(&cmd->t_state_lock); + cmd->transport_state &= ~CMD_T_SENT; + spin_unlock_irq(&cmd->t_state_lock); + spin_lock(&dev->delayed_cmd_lock); list_add_tail(&cmd->se_delayed_node, &dev->delayed_cmd_list); spin_unlock(&dev->delayed_cmd_lock); @@ -2228,12 +2232,8 @@ void target_execute_cmd(struct se_cmd *cmd) if (target_write_prot_action(cmd)) return; - if (target_handle_task_attr(cmd)) { - spin_lock_irq(&cmd->t_state_lock); - cmd->transport_state &= ~CMD_T_SENT; - spin_unlock_irq(&cmd->t_state_lock); + if (target_handle_task_attr(cmd)) return; - } __target_execute_cmd(cmd, true); } -- cgit v1.2.3 From ed1227e080990ffec5bf39006ec8a57358e6689a Mon Sep 17 00:00:00 2001 From: Mike Christie Date: Wed, 29 Sep 2021 21:04:19 -0500 Subject: scsi: target: Fix ordered tag handling This patch fixes the following bugs: 1. If there are multiple ordered cmds queued and multiple simple cmds completing, target_restart_delayed_cmds() could be called on different CPUs and each instance could start a ordered cmd. They could then run in different orders than they were queued. 2. target_restart_delayed_cmds() and target_handle_task_attr() can race where: 1. target_handle_task_attr() has passed the simple_cmds == 0 check. 2. transport_complete_task_attr() then decrements simple_cmds to 0. 3. transport_complete_task_attr() runs target_restart_delayed_cmds() and it does not see any cmds on the delayed_cmd_list. 4. target_handle_task_attr() adds the cmd to the delayed_cmd_list. The cmd will then end up timing out. 3. If we are sent > 1 ordered cmds and simple_cmds == 0, we can execute them out of order, because target_handle_task_attr() will hit that simple_cmds check first and return false for all ordered cmds sent. 4. We run target_restart_delayed_cmds() after every cmd completion, so if there is more than 1 simple cmd running, we start executing ordered cmds after that first cmd instead of waiting for all of them to complete. 5. Ordered cmds are not supposed to start until HEAD OF QUEUE and all older cmds have completed, and not just simple. 6. It's not a bug but it doesn't make sense to take the delayed_cmd_lock for every cmd completion when ordered cmds are almost never used. Just replacing that lock with an atomic increases IOPs by up to 10% when completions are spread over multiple CPUs and there are multiple sessions/ mqs/thread accessing the same device. This patch moves the queued delayed handling to a per device work to serialze the cmd executions for each device and adds a new counter to track HEAD_OF_QUEUE and SIMPLE cmds. We can then check the new counter to determine when to run the work on the completion path. Link: https://lore.kernel.org/r/20210930020422.92578-3-michael.christie@oracle.com Signed-off-by: Mike Christie Signed-off-by: Martin K. Petersen --- drivers/target/target_core_device.c | 2 + drivers/target/target_core_internal.h | 1 + drivers/target/target_core_transport.c | 76 ++++++++++++++++++++++++---------- include/target/target_core_base.h | 6 ++- 4 files changed, 61 insertions(+), 24 deletions(-) (limited to 'drivers/target') diff --git a/drivers/target/target_core_device.c b/drivers/target/target_core_device.c index 8cb1fa0c0585..44bb380e7390 100644 --- a/drivers/target/target_core_device.c +++ b/drivers/target/target_core_device.c @@ -772,6 +772,8 @@ struct se_device *target_alloc_device(struct se_hba *hba, const char *name) INIT_LIST_HEAD(&dev->t10_alua.lba_map_list); spin_lock_init(&dev->t10_alua.lba_map_lock); + INIT_WORK(&dev->delayed_cmd_work, target_do_delayed_work); + dev->t10_wwn.t10_dev = dev; /* * Use OpenFabrics IEEE Company ID: 00 14 05 diff --git a/drivers/target/target_core_internal.h b/drivers/target/target_core_internal.h index a343bcfa2180..a889a6237d9c 100644 --- a/drivers/target/target_core_internal.h +++ b/drivers/target/target_core_internal.h @@ -151,6 +151,7 @@ int transport_dump_vpd_ident(struct t10_vpd *, unsigned char *, int); void transport_clear_lun_ref(struct se_lun *); sense_reason_t target_cmd_size_check(struct se_cmd *cmd, unsigned int size); void target_qf_do_work(struct work_struct *work); +void target_do_delayed_work(struct work_struct *work); bool target_check_wce(struct se_device *dev); bool target_check_fua(struct se_device *dev); void __target_execute_cmd(struct se_cmd *, bool); diff --git a/drivers/target/target_core_transport.c b/drivers/target/target_core_transport.c index 64e0477c7644..4a2e749eb182 100644 --- a/drivers/target/target_core_transport.c +++ b/drivers/target/target_core_transport.c @@ -2173,32 +2173,35 @@ static bool target_handle_task_attr(struct se_cmd *cmd) */ switch (cmd->sam_task_attr) { case TCM_HEAD_TAG: + atomic_inc_mb(&dev->non_ordered); pr_debug("Added HEAD_OF_QUEUE for CDB: 0x%02x\n", cmd->t_task_cdb[0]); return false; case TCM_ORDERED_TAG: - atomic_inc_mb(&dev->dev_ordered_sync); + atomic_inc_mb(&dev->delayed_cmd_count); pr_debug("Added ORDERED for CDB: 0x%02x to ordered list\n", cmd->t_task_cdb[0]); - - /* - * Execute an ORDERED command if no other older commands - * exist that need to be completed first. - */ - if (!atomic_read(&dev->simple_cmds)) - return false; break; default: /* * For SIMPLE and UNTAGGED Task Attribute commands */ - atomic_inc_mb(&dev->simple_cmds); + atomic_inc_mb(&dev->non_ordered); + + if (atomic_read(&dev->delayed_cmd_count) == 0) + return false; break; } - if (atomic_read(&dev->dev_ordered_sync) == 0) - return false; + if (cmd->sam_task_attr != TCM_ORDERED_TAG) { + atomic_inc_mb(&dev->delayed_cmd_count); + /* + * We will account for this when we dequeue from the delayed + * list. + */ + atomic_dec_mb(&dev->non_ordered); + } spin_lock_irq(&cmd->t_state_lock); cmd->transport_state &= ~CMD_T_SENT; @@ -2210,6 +2213,12 @@ static bool target_handle_task_attr(struct se_cmd *cmd) pr_debug("Added CDB: 0x%02x Task Attr: 0x%02x to delayed CMD listn", cmd->t_task_cdb[0], cmd->sam_task_attr); + /* + * We may have no non ordered cmds when this function started or we + * could have raced with the last simple/head cmd completing, so kick + * the delayed handler here. + */ + schedule_work(&dev->delayed_cmd_work); return true; } @@ -2243,29 +2252,48 @@ EXPORT_SYMBOL(target_execute_cmd); * Process all commands up to the last received ORDERED task attribute which * requires another blocking boundary */ -static void target_restart_delayed_cmds(struct se_device *dev) +void target_do_delayed_work(struct work_struct *work) { - for (;;) { + struct se_device *dev = container_of(work, struct se_device, + delayed_cmd_work); + + spin_lock(&dev->delayed_cmd_lock); + while (!dev->ordered_sync_in_progress) { struct se_cmd *cmd; - spin_lock(&dev->delayed_cmd_lock); - if (list_empty(&dev->delayed_cmd_list)) { - spin_unlock(&dev->delayed_cmd_lock); + if (list_empty(&dev->delayed_cmd_list)) break; - } cmd = list_entry(dev->delayed_cmd_list.next, struct se_cmd, se_delayed_node); + + if (cmd->sam_task_attr == TCM_ORDERED_TAG) { + /* + * Check if we started with: + * [ordered] [simple] [ordered] + * and we are now at the last ordered so we have to wait + * for the simple cmd. + */ + if (atomic_read(&dev->non_ordered) > 0) + break; + + dev->ordered_sync_in_progress = true; + } + list_del(&cmd->se_delayed_node); + atomic_dec_mb(&dev->delayed_cmd_count); spin_unlock(&dev->delayed_cmd_lock); + if (cmd->sam_task_attr != TCM_ORDERED_TAG) + atomic_inc_mb(&dev->non_ordered); + cmd->transport_state |= CMD_T_SENT; __target_execute_cmd(cmd, true); - if (cmd->sam_task_attr == TCM_ORDERED_TAG) - break; + spin_lock(&dev->delayed_cmd_lock); } + spin_unlock(&dev->delayed_cmd_lock); } /* @@ -2283,14 +2311,17 @@ static void transport_complete_task_attr(struct se_cmd *cmd) goto restart; if (cmd->sam_task_attr == TCM_SIMPLE_TAG) { - atomic_dec_mb(&dev->simple_cmds); + atomic_dec_mb(&dev->non_ordered); dev->dev_cur_ordered_id++; } else if (cmd->sam_task_attr == TCM_HEAD_TAG) { + atomic_dec_mb(&dev->non_ordered); dev->dev_cur_ordered_id++; pr_debug("Incremented dev_cur_ordered_id: %u for HEAD_OF_QUEUE\n", dev->dev_cur_ordered_id); } else if (cmd->sam_task_attr == TCM_ORDERED_TAG) { - atomic_dec_mb(&dev->dev_ordered_sync); + spin_lock(&dev->delayed_cmd_lock); + dev->ordered_sync_in_progress = false; + spin_unlock(&dev->delayed_cmd_lock); dev->dev_cur_ordered_id++; pr_debug("Incremented dev_cur_ordered_id: %u for ORDERED\n", @@ -2299,7 +2330,8 @@ static void transport_complete_task_attr(struct se_cmd *cmd) cmd->se_cmd_flags &= ~SCF_TASK_ATTR_SET; restart: - target_restart_delayed_cmds(dev); + if (atomic_read(&dev->delayed_cmd_count) > 0) + schedule_work(&dev->delayed_cmd_work); } static void transport_complete_qf(struct se_cmd *cmd) diff --git a/include/target/target_core_base.h b/include/target/target_core_base.h index 2130f102798d..a7600b311b84 100644 --- a/include/target/target_core_base.h +++ b/include/target/target_core_base.h @@ -812,8 +812,9 @@ struct se_device { atomic_long_t read_bytes; atomic_long_t write_bytes; /* Active commands on this virtual SE device */ - atomic_t simple_cmds; - atomic_t dev_ordered_sync; + atomic_t non_ordered; + bool ordered_sync_in_progress; + atomic_t delayed_cmd_count; atomic_t dev_qf_count; u32 export_count; spinlock_t delayed_cmd_lock; @@ -834,6 +835,7 @@ struct se_device { struct list_head dev_sep_list; struct list_head dev_tmr_list; struct work_struct qf_work_queue; + struct work_struct delayed_cmd_work; struct list_head delayed_cmd_list; struct list_head qf_cmd_list; /* Pointer to associated SE HBA */ -- cgit v1.2.3 From 1283c0d1a32bb924324481586b5d6e8e76f676ba Mon Sep 17 00:00:00 2001 From: Mike Christie Date: Wed, 29 Sep 2021 21:04:20 -0500 Subject: scsi: target: Fix alua_tg_pt_gps_count tracking We can't free the tg_pt_gp in core_alua_set_tg_pt_gp_id() because it's still accessed via configfs. Its release must go through the normal configfs/refcount process. The max alua_tg_pt_gps_count check should probably have been done in core_alua_allocate_tg_pt_gp(), but with the current code userspace could have created 0x0000ffff + 1 groups, but only set the id for 0x0000ffff. Then it could have deleted a group with an ID set, and then set the ID for that extra group and it would work ok. It's unlikely, but just in case this patch continues to allow that type of behavior, and just fixes the kfree() while in use bug. Link: https://lore.kernel.org/r/20210930020422.92578-4-michael.christie@oracle.com Signed-off-by: Mike Christie Signed-off-by: Martin K. Petersen --- drivers/target/target_core_alua.c | 1 - 1 file changed, 1 deletion(-) (limited to 'drivers/target') diff --git a/drivers/target/target_core_alua.c b/drivers/target/target_core_alua.c index cb1de1ecaaa6..bd0f2ce011dd 100644 --- a/drivers/target/target_core_alua.c +++ b/drivers/target/target_core_alua.c @@ -1674,7 +1674,6 @@ int core_alua_set_tg_pt_gp_id( pr_err("Maximum ALUA alua_tg_pt_gps_count:" " 0x0000ffff reached\n"); spin_unlock(&dev->t10_alua.tg_pt_gps_lock); - kmem_cache_free(t10_alua_tg_pt_gp_cache, tg_pt_gp); return -ENOSPC; } again: -- cgit v1.2.3 From 7324f47d4293ff50f489010735a4057defb1a5d6 Mon Sep 17 00:00:00 2001 From: Mike Christie Date: Wed, 29 Sep 2021 21:04:21 -0500 Subject: scsi: target: Replace lun_tg_pt_gp_lock with rcu in I/O path We are only holding the lun_tg_pt_gp_lock in target_alua_state_check() to make sure tg_pt_gp is not freed from under us while we copy the state, delay, ID values. We can instead use RCU here to access the tg_pt_gp. With this patch IOPs can increase up to 10% for jobs like: fio --filename=/dev/sdX --direct=1 --rw=randrw --bs=4k \ --ioengine=libaio --iodepth=64 --numjobs=N when there are multiple sessions (running that fio command to each /dev/sdX or using multipath and there are over 8 paths), or more than 8 queues for the loop or vhost with multiple threads case and numjobs > 8. Link: https://lore.kernel.org/r/20210930020422.92578-5-michael.christie@oracle.com Signed-off-by: Mike Christie Signed-off-by: Martin K. Petersen --- drivers/target/target_core_alua.c | 61 ++++++++++++++++++++++----------------- include/target/target_core_base.h | 2 +- 2 files changed, 35 insertions(+), 28 deletions(-) (limited to 'drivers/target') diff --git a/drivers/target/target_core_alua.c b/drivers/target/target_core_alua.c index bd0f2ce011dd..74944b914b4e 100644 --- a/drivers/target/target_core_alua.c +++ b/drivers/target/target_core_alua.c @@ -247,11 +247,11 @@ target_emulate_report_target_port_groups(struct se_cmd *cmd) * this CDB was received upon to determine this value individually * for ALUA target port group. */ - spin_lock(&cmd->se_lun->lun_tg_pt_gp_lock); - tg_pt_gp = cmd->se_lun->lun_tg_pt_gp; + rcu_read_lock(); + tg_pt_gp = rcu_dereference(cmd->se_lun->lun_tg_pt_gp); if (tg_pt_gp) buf[5] = tg_pt_gp->tg_pt_gp_implicit_trans_secs; - spin_unlock(&cmd->se_lun->lun_tg_pt_gp_lock); + rcu_read_unlock(); } transport_kunmap_data_sg(cmd); @@ -292,24 +292,24 @@ target_emulate_set_target_port_groups(struct se_cmd *cmd) * Determine if explicit ALUA via SET_TARGET_PORT_GROUPS is allowed * for the local tg_pt_gp. */ - spin_lock(&l_lun->lun_tg_pt_gp_lock); - l_tg_pt_gp = l_lun->lun_tg_pt_gp; + rcu_read_lock(); + l_tg_pt_gp = rcu_dereference(l_lun->lun_tg_pt_gp); if (!l_tg_pt_gp) { - spin_unlock(&l_lun->lun_tg_pt_gp_lock); + rcu_read_unlock(); pr_err("Unable to access l_lun->tg_pt_gp\n"); rc = TCM_UNSUPPORTED_SCSI_OPCODE; goto out; } if (!(l_tg_pt_gp->tg_pt_gp_alua_access_type & TPGS_EXPLICIT_ALUA)) { - spin_unlock(&l_lun->lun_tg_pt_gp_lock); + rcu_read_unlock(); pr_debug("Unable to process SET_TARGET_PORT_GROUPS" " while TPGS_EXPLICIT_ALUA is disabled\n"); rc = TCM_UNSUPPORTED_SCSI_OPCODE; goto out; } valid_states = l_tg_pt_gp->tg_pt_gp_alua_supported_states; - spin_unlock(&l_lun->lun_tg_pt_gp_lock); + rcu_read_unlock(); ptr = &buf[4]; /* Skip over RESERVED area in header */ @@ -662,17 +662,17 @@ target_alua_state_check(struct se_cmd *cmd) " target port\n"); return TCM_ALUA_OFFLINE; } - - if (!lun->lun_tg_pt_gp) + rcu_read_lock(); + tg_pt_gp = rcu_dereference(lun->lun_tg_pt_gp); + if (!tg_pt_gp) { + rcu_read_unlock(); return 0; + } - spin_lock(&lun->lun_tg_pt_gp_lock); - tg_pt_gp = lun->lun_tg_pt_gp; out_alua_state = tg_pt_gp->tg_pt_gp_alua_access_state; nonop_delay_msecs = tg_pt_gp->tg_pt_gp_nonop_delay_msecs; tg_pt_gp_id = tg_pt_gp->tg_pt_gp_id; - - spin_unlock(&lun->lun_tg_pt_gp_lock); + rcu_read_unlock(); /* * Process ALUA_ACCESS_STATE_ACTIVE_OPTIMIZED in a separate conditional * statement so the compiler knows explicitly to check this case first. @@ -1219,10 +1219,10 @@ static int core_alua_set_tg_pt_secondary_state( struct t10_alua_tg_pt_gp *tg_pt_gp; int trans_delay_msecs; - spin_lock(&lun->lun_tg_pt_gp_lock); - tg_pt_gp = lun->lun_tg_pt_gp; + rcu_read_lock(); + tg_pt_gp = rcu_dereference(lun->lun_tg_pt_gp); if (!tg_pt_gp) { - spin_unlock(&lun->lun_tg_pt_gp_lock); + rcu_read_unlock(); pr_err("Unable to complete secondary state" " transition\n"); return -EINVAL; @@ -1246,7 +1246,7 @@ static int core_alua_set_tg_pt_secondary_state( "implicit", config_item_name(&tg_pt_gp->tg_pt_gp_group.cg_item), tg_pt_gp->tg_pt_gp_id, (offline) ? "OFFLINE" : "ONLINE"); - spin_unlock(&lun->lun_tg_pt_gp_lock); + rcu_read_unlock(); /* * Do the optional transition delay after we set the secondary * ALUA access state. @@ -1754,13 +1754,14 @@ void core_alua_free_tg_pt_gp( __target_attach_tg_pt_gp(lun, dev->t10_alua.default_tg_pt_gp); } else - lun->lun_tg_pt_gp = NULL; + rcu_assign_pointer(lun->lun_tg_pt_gp, NULL); spin_unlock(&lun->lun_tg_pt_gp_lock); spin_lock(&tg_pt_gp->tg_pt_gp_lock); } spin_unlock(&tg_pt_gp->tg_pt_gp_lock); + synchronize_rcu(); kmem_cache_free(t10_alua_tg_pt_gp_cache, tg_pt_gp); } @@ -1805,7 +1806,7 @@ static void __target_attach_tg_pt_gp(struct se_lun *lun, assert_spin_locked(&lun->lun_tg_pt_gp_lock); spin_lock(&tg_pt_gp->tg_pt_gp_lock); - lun->lun_tg_pt_gp = tg_pt_gp; + rcu_assign_pointer(lun->lun_tg_pt_gp, tg_pt_gp); list_add_tail(&lun->lun_tg_pt_gp_link, &tg_pt_gp->tg_pt_gp_lun_list); tg_pt_gp->tg_pt_gp_members++; spin_lock(&lun->lun_deve_lock); @@ -1822,6 +1823,7 @@ void target_attach_tg_pt_gp(struct se_lun *lun, spin_lock(&lun->lun_tg_pt_gp_lock); __target_attach_tg_pt_gp(lun, tg_pt_gp); spin_unlock(&lun->lun_tg_pt_gp_lock); + synchronize_rcu(); } static void __target_detach_tg_pt_gp(struct se_lun *lun, @@ -1834,7 +1836,7 @@ static void __target_detach_tg_pt_gp(struct se_lun *lun, tg_pt_gp->tg_pt_gp_members--; spin_unlock(&tg_pt_gp->tg_pt_gp_lock); - lun->lun_tg_pt_gp = NULL; + rcu_assign_pointer(lun->lun_tg_pt_gp, NULL); } void target_detach_tg_pt_gp(struct se_lun *lun) @@ -1842,10 +1844,12 @@ void target_detach_tg_pt_gp(struct se_lun *lun) struct t10_alua_tg_pt_gp *tg_pt_gp; spin_lock(&lun->lun_tg_pt_gp_lock); - tg_pt_gp = lun->lun_tg_pt_gp; + tg_pt_gp = rcu_dereference_check(lun->lun_tg_pt_gp, + lockdep_is_held(&lun->lun_tg_pt_gp_lock)); if (tg_pt_gp) __target_detach_tg_pt_gp(lun, tg_pt_gp); spin_unlock(&lun->lun_tg_pt_gp_lock); + synchronize_rcu(); } ssize_t core_alua_show_tg_pt_gp_info(struct se_lun *lun, char *page) @@ -1854,8 +1858,8 @@ ssize_t core_alua_show_tg_pt_gp_info(struct se_lun *lun, char *page) struct t10_alua_tg_pt_gp *tg_pt_gp; ssize_t len = 0; - spin_lock(&lun->lun_tg_pt_gp_lock); - tg_pt_gp = lun->lun_tg_pt_gp; + rcu_read_lock(); + tg_pt_gp = rcu_dereference(lun->lun_tg_pt_gp); if (tg_pt_gp) { tg_pt_ci = &tg_pt_gp->tg_pt_gp_group.cg_item; len += sprintf(page, "TG Port Alias: %s\nTG Port Group ID:" @@ -1871,7 +1875,7 @@ ssize_t core_alua_show_tg_pt_gp_info(struct se_lun *lun, char *page) "Offline" : "None", core_alua_dump_status(lun->lun_tg_pt_secondary_stat)); } - spin_unlock(&lun->lun_tg_pt_gp_lock); + rcu_read_unlock(); return len; } @@ -1918,7 +1922,8 @@ ssize_t core_alua_store_tg_pt_gp_info( } spin_lock(&lun->lun_tg_pt_gp_lock); - tg_pt_gp = lun->lun_tg_pt_gp; + tg_pt_gp = rcu_dereference_check(lun->lun_tg_pt_gp, + lockdep_is_held(&lun->lun_tg_pt_gp_lock)); if (tg_pt_gp) { /* * Clearing an existing tg_pt_gp association, and replacing @@ -1941,7 +1946,7 @@ ssize_t core_alua_store_tg_pt_gp_info( dev->t10_alua.default_tg_pt_gp); spin_unlock(&lun->lun_tg_pt_gp_lock); - return count; + goto sync_rcu; } __target_detach_tg_pt_gp(lun, tg_pt_gp); move = 1; @@ -1958,6 +1963,8 @@ ssize_t core_alua_store_tg_pt_gp_info( tg_pt_gp_new->tg_pt_gp_id); core_alua_put_tg_pt_gp_from_name(tg_pt_gp_new); +sync_rcu: + synchronize_rcu(); return count; } diff --git a/include/target/target_core_base.h b/include/target/target_core_base.h index a7600b311b84..c2b36f7d917d 100644 --- a/include/target/target_core_base.h +++ b/include/target/target_core_base.h @@ -749,7 +749,7 @@ struct se_lun { /* ALUA target port group linkage */ struct list_head lun_tg_pt_gp_link; - struct t10_alua_tg_pt_gp *lun_tg_pt_gp; + struct t10_alua_tg_pt_gp __rcu *lun_tg_pt_gp; spinlock_t lun_tg_pt_gp_lock; struct se_portal_group *lun_tpg; -- cgit v1.2.3 From f9793d649c29bad86651620ec49888ee9743cbb6 Mon Sep 17 00:00:00 2001 From: Mike Christie Date: Wed, 29 Sep 2021 21:04:22 -0500 Subject: scsi: target: Perform ALUA group changes in one step When userspace changes the LUN's ALUA group, it will set the LUN's group to NULL then to the new group. Before the new group is set, target_alua_state_check() will return 0 and allow the I/O to execute. This has us skip the NULL stage, and just swap in the new group. Link: https://lore.kernel.org/r/20210930020422.92578-6-michael.christie@oracle.com Signed-off-by: Mike Christie Signed-off-by: Martin K. Petersen --- drivers/target/target_core_alua.c | 23 ++++++++++++++++------- 1 file changed, 16 insertions(+), 7 deletions(-) (limited to 'drivers/target') diff --git a/drivers/target/target_core_alua.c b/drivers/target/target_core_alua.c index 74944b914b4e..b56ef8af66e7 100644 --- a/drivers/target/target_core_alua.c +++ b/drivers/target/target_core_alua.c @@ -1835,8 +1835,6 @@ static void __target_detach_tg_pt_gp(struct se_lun *lun, list_del_init(&lun->lun_tg_pt_gp_link); tg_pt_gp->tg_pt_gp_members--; spin_unlock(&tg_pt_gp->tg_pt_gp_lock); - - rcu_assign_pointer(lun->lun_tg_pt_gp, NULL); } void target_detach_tg_pt_gp(struct se_lun *lun) @@ -1846,12 +1844,25 @@ void target_detach_tg_pt_gp(struct se_lun *lun) spin_lock(&lun->lun_tg_pt_gp_lock); tg_pt_gp = rcu_dereference_check(lun->lun_tg_pt_gp, lockdep_is_held(&lun->lun_tg_pt_gp_lock)); - if (tg_pt_gp) + if (tg_pt_gp) { __target_detach_tg_pt_gp(lun, tg_pt_gp); + rcu_assign_pointer(lun->lun_tg_pt_gp, NULL); + } spin_unlock(&lun->lun_tg_pt_gp_lock); synchronize_rcu(); } +static void target_swap_tg_pt_gp(struct se_lun *lun, + struct t10_alua_tg_pt_gp *old_tg_pt_gp, + struct t10_alua_tg_pt_gp *new_tg_pt_gp) +{ + assert_spin_locked(&lun->lun_tg_pt_gp_lock); + + if (old_tg_pt_gp) + __target_detach_tg_pt_gp(lun, old_tg_pt_gp); + __target_attach_tg_pt_gp(lun, new_tg_pt_gp); +} + ssize_t core_alua_show_tg_pt_gp_info(struct se_lun *lun, char *page) { struct config_item *tg_pt_ci; @@ -1941,18 +1952,16 @@ ssize_t core_alua_store_tg_pt_gp_info( &tg_pt_gp->tg_pt_gp_group.cg_item), tg_pt_gp->tg_pt_gp_id); - __target_detach_tg_pt_gp(lun, tg_pt_gp); - __target_attach_tg_pt_gp(lun, + target_swap_tg_pt_gp(lun, tg_pt_gp, dev->t10_alua.default_tg_pt_gp); spin_unlock(&lun->lun_tg_pt_gp_lock); goto sync_rcu; } - __target_detach_tg_pt_gp(lun, tg_pt_gp); move = 1; } - __target_attach_tg_pt_gp(lun, tg_pt_gp_new); + target_swap_tg_pt_gp(lun, tg_pt_gp, tg_pt_gp_new); spin_unlock(&lun->lun_tg_pt_gp_lock); pr_debug("Target_Core_ConfigFS: %s %s/tpgt_%hu/%s to ALUA" " Target Port Group: alua/%s, ID: %hu\n", (move) ? -- cgit v1.2.3 From 1b74ab77d62fe542af83e4cac95e8995032a30db Mon Sep 17 00:00:00 2001 From: Christoph Hellwig Date: Mon, 18 Oct 2021 08:50:52 +0200 Subject: scsi: target: core: Stop using bdevname() Just use the %pg format specifier instead. Link: https://lore.kernel.org/r/20211018065052.1822500-1-hch@lst.de Signed-off-by: Christoph Hellwig Signed-off-by: Martin K. Petersen --- drivers/target/target_core_iblock.c | 4 +--- 1 file changed, 1 insertion(+), 3 deletions(-) (limited to 'drivers/target') diff --git a/drivers/target/target_core_iblock.c b/drivers/target/target_core_iblock.c index 4069a1edcfa3..3113c4440735 100644 --- a/drivers/target/target_core_iblock.c +++ b/drivers/target/target_core_iblock.c @@ -634,12 +634,10 @@ static ssize_t iblock_show_configfs_dev_params(struct se_device *dev, char *b) { struct iblock_dev *ib_dev = IBLOCK_DEV(dev); struct block_device *bd = ib_dev->ibd_bd; - char buf[BDEVNAME_SIZE]; ssize_t bl = 0; if (bd) - bl += sprintf(b + bl, "iBlock device: %s", - bdevname(bd, buf)); + bl += sprintf(b + bl, "iBlock device: %pg", bd); if (ib_dev->ibd_flags & IBDF_HAS_UDEV_PATH) bl += sprintf(b + bl, " UDEV PATH: %s", ib_dev->ibd_udev_path); -- cgit v1.2.3