From e3d7e4c30c494431d492864448fbb16cdd7a6178 Mon Sep 17 00:00:00 2001 From: Sagi Grimberg Date: Wed, 19 Feb 2014 17:50:22 +0200 Subject: IB/isert: Introduce isert_map/unmap_data_buf export map/unmap data buffer to a routine that may be used in various places in the code and keep the mapping data in a designated descriptor. Also, let isert_fast_reg_mr to decide weather to use global MR or do fast registration. This commit does not change any functionality. (Fix context change for v3.14-rc6 code - nab) Signed-off-by: Sagi Grimberg Signed-off-by: Nicholas Bellinger --- drivers/infiniband/ulp/isert/ib_isert.c | 238 ++++++++++++++++---------------- drivers/infiniband/ulp/isert/ib_isert.h | 14 +- 2 files changed, 127 insertions(+), 125 deletions(-) (limited to 'drivers/infiniband/ulp') diff --git a/drivers/infiniband/ulp/isert/ib_isert.c b/drivers/infiniband/ulp/isert/ib_isert.c index 8ee228e9ab5a..d0ca3328aa4d 100644 --- a/drivers/infiniband/ulp/isert/ib_isert.c +++ b/drivers/infiniband/ulp/isert/ib_isert.c @@ -1392,19 +1392,60 @@ isert_rx_completion(struct iser_rx_desc *desc, struct isert_conn *isert_conn, } } +static int +isert_map_data_buf(struct isert_conn *isert_conn, struct isert_cmd *isert_cmd, + struct scatterlist *sg, u32 nents, u32 length, u32 offset, + enum iser_ib_op_code op, struct isert_data_buf *data) +{ + struct ib_device *ib_dev = isert_conn->conn_cm_id->device; + + data->dma_dir = op == ISER_IB_RDMA_WRITE ? + DMA_TO_DEVICE : DMA_FROM_DEVICE; + + data->len = length - offset; + data->offset = offset; + data->sg_off = data->offset / PAGE_SIZE; + + data->sg = &sg[data->sg_off]; + data->nents = min_t(unsigned int, nents - data->sg_off, + ISCSI_ISER_SG_TABLESIZE); + data->len = min_t(unsigned int, data->len, ISCSI_ISER_SG_TABLESIZE * + PAGE_SIZE); + + data->dma_nents = ib_dma_map_sg(ib_dev, data->sg, data->nents, + data->dma_dir); + if (unlikely(!data->dma_nents)) { + pr_err("Cmd: unable to dma map SGs %p\n", sg); + return -EINVAL; + } + + pr_debug("Mapped cmd: %p count: %u sg: %p sg_nents: %u rdma_len %d\n", + isert_cmd, data->dma_nents, data->sg, data->nents, data->len); + + return 0; +} + +static void +isert_unmap_data_buf(struct isert_conn *isert_conn, struct isert_data_buf *data) +{ + struct ib_device *ib_dev = isert_conn->conn_cm_id->device; + + ib_dma_unmap_sg(ib_dev, data->sg, data->nents, data->dma_dir); + memset(data, 0, sizeof(*data)); +} + + + static void isert_unmap_cmd(struct isert_cmd *isert_cmd, struct isert_conn *isert_conn) { struct isert_rdma_wr *wr = &isert_cmd->rdma_wr; - struct ib_device *ib_dev = isert_conn->conn_cm_id->device; pr_debug("isert_unmap_cmd: %p\n", isert_cmd); - if (wr->sge) { + + if (wr->data.sg) { pr_debug("isert_unmap_cmd: %p unmap_sg op\n", isert_cmd); - ib_dma_unmap_sg(ib_dev, wr->sge, wr->num_sge, - (wr->iser_ib_op == ISER_IB_RDMA_WRITE) ? - DMA_TO_DEVICE : DMA_FROM_DEVICE); - wr->sge = NULL; + isert_unmap_data_buf(isert_conn, &wr->data); } if (wr->send_wr) { @@ -1424,7 +1465,6 @@ static void isert_unreg_rdma(struct isert_cmd *isert_cmd, struct isert_conn *isert_conn) { struct isert_rdma_wr *wr = &isert_cmd->rdma_wr; - struct ib_device *ib_dev = isert_conn->conn_cm_id->device; LIST_HEAD(unmap_list); pr_debug("unreg_fastreg_cmd: %p\n", isert_cmd); @@ -1438,12 +1478,9 @@ isert_unreg_rdma(struct isert_cmd *isert_cmd, struct isert_conn *isert_conn) wr->fr_desc = NULL; } - if (wr->sge) { + if (wr->data.sg) { pr_debug("unreg_fastreg_cmd: %p unmap_sg op\n", isert_cmd); - ib_dma_unmap_sg(ib_dev, wr->sge, wr->num_sge, - (wr->iser_ib_op == ISER_IB_RDMA_WRITE) ? - DMA_TO_DEVICE : DMA_FROM_DEVICE); - wr->sge = NULL; + isert_unmap_data_buf(isert_conn, &wr->data); } wr->ib_sge = NULL; @@ -1548,7 +1585,7 @@ isert_completion_rdma_read(struct iser_tx_desc *tx_desc, iscsit_stop_dataout_timer(cmd); device->unreg_rdma_mem(isert_cmd, isert_conn); - cmd->write_data_done = wr->cur_rdma_length; + cmd->write_data_done = wr->data.len; wr->send_wr_num = 0; pr_debug("Cmd: %p RDMA_READ comp calling execute_cmd\n", isert_cmd); @@ -2099,54 +2136,39 @@ isert_map_rdma(struct iscsi_conn *conn, struct iscsi_cmd *cmd, struct se_cmd *se_cmd = &cmd->se_cmd; struct isert_cmd *isert_cmd = iscsit_priv_cmd(cmd); struct isert_conn *isert_conn = (struct isert_conn *)conn->context; - struct ib_device *ib_dev = isert_conn->conn_cm_id->device; + struct isert_data_buf *data = &wr->data; struct ib_send_wr *send_wr; struct ib_sge *ib_sge; - struct scatterlist *sg_start; - u32 sg_off = 0, sg_nents; - u32 offset = 0, data_len, data_left, rdma_write_max, va_offset = 0; - int ret = 0, count, i, ib_sge_cnt; + u32 offset, data_len, data_left, rdma_write_max, va_offset = 0; + int ret = 0, i, ib_sge_cnt; - if (wr->iser_ib_op == ISER_IB_RDMA_WRITE) { - data_left = se_cmd->data_length; - } else { - sg_off = cmd->write_data_done / PAGE_SIZE; - data_left = se_cmd->data_length - cmd->write_data_done; - offset = cmd->write_data_done; - isert_cmd->tx_desc.isert_cmd = isert_cmd; - } + isert_cmd->tx_desc.isert_cmd = isert_cmd; - sg_start = &cmd->se_cmd.t_data_sg[sg_off]; - sg_nents = se_cmd->t_data_nents - sg_off; + offset = wr->iser_ib_op == ISER_IB_RDMA_READ ? cmd->write_data_done : 0; + ret = isert_map_data_buf(isert_conn, isert_cmd, se_cmd->t_data_sg, + se_cmd->t_data_nents, se_cmd->data_length, + offset, wr->iser_ib_op, &wr->data); + if (ret) + return ret; - count = ib_dma_map_sg(ib_dev, sg_start, sg_nents, - (wr->iser_ib_op == ISER_IB_RDMA_WRITE) ? - DMA_TO_DEVICE : DMA_FROM_DEVICE); - if (unlikely(!count)) { - pr_err("Cmd: %p unrable to map SGs\n", isert_cmd); - return -EINVAL; - } - wr->sge = sg_start; - wr->num_sge = sg_nents; - wr->cur_rdma_length = data_left; - pr_debug("Mapped cmd: %p count: %u sg: %p sg_nents: %u rdma_len %d\n", - isert_cmd, count, sg_start, sg_nents, data_left); + data_left = data->len; + offset = data->offset; - ib_sge = kzalloc(sizeof(struct ib_sge) * sg_nents, GFP_KERNEL); + ib_sge = kzalloc(sizeof(struct ib_sge) * data->nents, GFP_KERNEL); if (!ib_sge) { pr_warn("Unable to allocate ib_sge\n"); ret = -ENOMEM; - goto unmap_sg; + goto unmap_cmd; } wr->ib_sge = ib_sge; - wr->send_wr_num = DIV_ROUND_UP(sg_nents, isert_conn->max_sge); + wr->send_wr_num = DIV_ROUND_UP(data->nents, isert_conn->max_sge); wr->send_wr = kzalloc(sizeof(struct ib_send_wr) * wr->send_wr_num, GFP_KERNEL); if (!wr->send_wr) { pr_debug("Unable to allocate wr->send_wr\n"); ret = -ENOMEM; - goto unmap_sg; + goto unmap_cmd; } wr->isert_cmd = isert_cmd; @@ -2185,10 +2207,9 @@ isert_map_rdma(struct iscsi_conn *conn, struct iscsi_cmd *cmd, } return 0; -unmap_sg: - ib_dma_unmap_sg(ib_dev, sg_start, sg_nents, - (wr->iser_ib_op == ISER_IB_RDMA_WRITE) ? - DMA_TO_DEVICE : DMA_FROM_DEVICE); +unmap_cmd: + isert_unmap_data_buf(isert_conn, data); + return ret; } @@ -2232,10 +2253,10 @@ isert_map_fr_pagelist(struct ib_device *ib_dev, } static int -isert_fast_reg_mr(struct fast_reg_descriptor *fr_desc, - struct isert_conn *isert_conn, struct scatterlist *sg_start, - struct ib_sge *ib_sge, u32 sg_nents, u32 offset, - unsigned int data_len) +isert_fast_reg_mr(struct isert_conn *isert_conn, + struct fast_reg_descriptor *fr_desc, + struct isert_data_buf *mem, + struct ib_sge *sge) { struct ib_device *ib_dev = isert_conn->conn_cm_id->device; struct ib_send_wr fr_wr, inv_wr; @@ -2244,13 +2265,19 @@ isert_fast_reg_mr(struct fast_reg_descriptor *fr_desc, u32 page_off; u8 key; - sg_nents = min_t(unsigned int, sg_nents, ISCSI_ISER_SG_TABLESIZE); - page_off = offset % PAGE_SIZE; + if (mem->dma_nents == 1) { + sge->lkey = isert_conn->conn_mr->lkey; + sge->addr = ib_sg_dma_address(ib_dev, &mem->sg[0]); + sge->length = ib_sg_dma_len(ib_dev, &mem->sg[0]); + return 0; + } + + page_off = mem->offset % PAGE_SIZE; pr_debug("Use fr_desc %p sg_nents %d offset %u\n", - fr_desc, sg_nents, offset); + fr_desc, mem->nents, mem->offset); - pagelist_len = isert_map_fr_pagelist(ib_dev, sg_start, sg_nents, + pagelist_len = isert_map_fr_pagelist(ib_dev, mem->sg, mem->nents, &fr_desc->data_frpl->page_list[0]); if (!fr_desc->valid) { @@ -2273,7 +2300,7 @@ isert_fast_reg_mr(struct fast_reg_descriptor *fr_desc, fr_wr.wr.fast_reg.page_list = fr_desc->data_frpl; fr_wr.wr.fast_reg.page_list_len = pagelist_len; fr_wr.wr.fast_reg.page_shift = PAGE_SHIFT; - fr_wr.wr.fast_reg.length = data_len; + fr_wr.wr.fast_reg.length = mem->len; fr_wr.wr.fast_reg.rkey = fr_desc->data_mr->rkey; fr_wr.wr.fast_reg.access_flags = IB_ACCESS_LOCAL_WRITE; @@ -2289,12 +2316,12 @@ isert_fast_reg_mr(struct fast_reg_descriptor *fr_desc, } fr_desc->valid = false; - ib_sge->lkey = fr_desc->data_mr->lkey; - ib_sge->addr = fr_desc->data_frpl->page_list[0] + page_off; - ib_sge->length = data_len; + sge->lkey = fr_desc->data_mr->lkey; + sge->addr = fr_desc->data_frpl->page_list[0] + page_off; + sge->length = mem->len; pr_debug("RDMA ib_sge: addr: 0x%16llx length: %u lkey: %08x\n", - ib_sge->addr, ib_sge->length, ib_sge->lkey); + sge->addr, sge->length, sge->lkey); return ret; } @@ -2305,54 +2332,43 @@ isert_reg_rdma(struct iscsi_conn *conn, struct iscsi_cmd *cmd, { struct se_cmd *se_cmd = &cmd->se_cmd; struct isert_cmd *isert_cmd = iscsit_priv_cmd(cmd); - struct isert_conn *isert_conn = (struct isert_conn *)conn->context; - struct ib_device *ib_dev = isert_conn->conn_cm_id->device; + struct isert_conn *isert_conn = conn->context; struct ib_send_wr *send_wr; - struct ib_sge *ib_sge; - struct scatterlist *sg_start; - struct fast_reg_descriptor *fr_desc; - u32 sg_off = 0, sg_nents; - u32 offset = 0, data_len, data_left, rdma_write_max; - int ret = 0, count; + struct fast_reg_descriptor *fr_desc = NULL; + u32 offset; + int ret = 0; unsigned long flags; - if (wr->iser_ib_op == ISER_IB_RDMA_WRITE) { - data_left = se_cmd->data_length; - } else { - offset = cmd->write_data_done; - sg_off = offset / PAGE_SIZE; - data_left = se_cmd->data_length - cmd->write_data_done; - isert_cmd->tx_desc.isert_cmd = isert_cmd; - } + isert_cmd->tx_desc.isert_cmd = isert_cmd; - sg_start = &cmd->se_cmd.t_data_sg[sg_off]; - sg_nents = se_cmd->t_data_nents - sg_off; + offset = wr->iser_ib_op == ISER_IB_RDMA_READ ? cmd->write_data_done : 0; + ret = isert_map_data_buf(isert_conn, isert_cmd, se_cmd->t_data_sg, + se_cmd->t_data_nents, se_cmd->data_length, + offset, wr->iser_ib_op, &wr->data); + if (ret) + return ret; - count = ib_dma_map_sg(ib_dev, sg_start, sg_nents, - (wr->iser_ib_op == ISER_IB_RDMA_WRITE) ? - DMA_TO_DEVICE : DMA_FROM_DEVICE); - if (unlikely(!count)) { - pr_err("Cmd: %p unrable to map SGs\n", isert_cmd); - return -EINVAL; + if (wr->data.dma_nents != 1) { + spin_lock_irqsave(&isert_conn->conn_lock, flags); + fr_desc = list_first_entry(&isert_conn->conn_fr_pool, + struct fast_reg_descriptor, list); + list_del(&fr_desc->list); + spin_unlock_irqrestore(&isert_conn->conn_lock, flags); + wr->fr_desc = fr_desc; } - wr->sge = sg_start; - wr->num_sge = sg_nents; - pr_debug("Mapped cmd: %p count: %u sg: %p sg_nents: %u rdma_len %d\n", - isert_cmd, count, sg_start, sg_nents, data_left); - memset(&wr->s_ib_sge, 0, sizeof(*ib_sge)); - ib_sge = &wr->s_ib_sge; - wr->ib_sge = ib_sge; + ret = isert_fast_reg_mr(isert_conn, fr_desc, &wr->data, &wr->s_ib_sge); + if (ret) + goto unmap_cmd; + wr->ib_sge = &wr->s_ib_sge; wr->send_wr_num = 1; memset(&wr->s_send_wr, 0, sizeof(*send_wr)); wr->send_wr = &wr->s_send_wr; - wr->isert_cmd = isert_cmd; - rdma_write_max = ISCSI_ISER_SG_TABLESIZE * PAGE_SIZE; send_wr = &isert_cmd->rdma_wr.s_send_wr; - send_wr->sg_list = ib_sge; + send_wr->sg_list = &wr->s_ib_sge; send_wr->num_sge = 1; send_wr->wr_id = (unsigned long)&isert_cmd->tx_desc; if (wr->iser_ib_op == ISER_IB_RDMA_WRITE) { @@ -2368,37 +2384,15 @@ isert_reg_rdma(struct iscsi_conn *conn, struct iscsi_cmd *cmd, send_wr->send_flags = IB_SEND_SIGNALED; } - data_len = min(data_left, rdma_write_max); - wr->cur_rdma_length = data_len; - - /* if there is a single dma entry, dma mr is sufficient */ - if (count == 1) { - ib_sge->addr = ib_sg_dma_address(ib_dev, &sg_start[0]); - ib_sge->length = ib_sg_dma_len(ib_dev, &sg_start[0]); - ib_sge->lkey = isert_conn->conn_mr->lkey; - wr->fr_desc = NULL; - } else { + return 0; +unmap_cmd: + if (fr_desc) { spin_lock_irqsave(&isert_conn->conn_lock, flags); - fr_desc = list_first_entry(&isert_conn->conn_fr_pool, - struct fast_reg_descriptor, list); - list_del(&fr_desc->list); + list_add_tail(&fr_desc->list, &isert_conn->conn_fr_pool); spin_unlock_irqrestore(&isert_conn->conn_lock, flags); - wr->fr_desc = fr_desc; - - ret = isert_fast_reg_mr(fr_desc, isert_conn, sg_start, - ib_sge, sg_nents, offset, data_len); - if (ret) { - list_add_tail(&fr_desc->list, &isert_conn->conn_fr_pool); - goto unmap_sg; - } } + isert_unmap_data_buf(isert_conn, &wr->data); - return 0; - -unmap_sg: - ib_dma_unmap_sg(ib_dev, sg_start, sg_nents, - (wr->iser_ib_op == ISER_IB_RDMA_WRITE) ? - DMA_TO_DEVICE : DMA_FROM_DEVICE); return ret; } diff --git a/drivers/infiniband/ulp/isert/ib_isert.h b/drivers/infiniband/ulp/isert/ib_isert.h index f6ae7f5dd408..8a02c4ebe373 100644 --- a/drivers/infiniband/ulp/isert/ib_isert.h +++ b/drivers/infiniband/ulp/isert/ib_isert.h @@ -57,18 +57,26 @@ struct fast_reg_descriptor { bool valid; }; +struct isert_data_buf { + struct scatterlist *sg; + int nents; + u32 sg_off; + u32 len; /* cur_rdma_length */ + u32 offset; + unsigned int dma_nents; + enum dma_data_direction dma_dir; +}; + struct isert_rdma_wr { struct list_head wr_list; struct isert_cmd *isert_cmd; enum iser_ib_op_code iser_ib_op; struct ib_sge *ib_sge; struct ib_sge s_ib_sge; - int num_sge; - struct scatterlist *sge; int send_wr_num; struct ib_send_wr *send_wr; struct ib_send_wr s_send_wr; - u32 cur_rdma_length; + struct isert_data_buf data; struct fast_reg_descriptor *fr_desc; }; -- cgit v1.2.3 From d3e125dac1f4fd983bb6d8d654f152f243f7c953 Mon Sep 17 00:00:00 2001 From: Sagi Grimberg Date: Wed, 19 Feb 2014 17:50:23 +0200 Subject: IB/isert: Initialize T10-PI resources Introduce pi_context to hold relevant RDMA protection resources. We eliminate data_key_valid boolean and replace it with indicators container to indicate: - Is the descriptor protected (registered via signature MR) - Is the data_mr key valid (can spare LOCAL_INV WR) - Is the prot_mr key valid (can spare LOCAL_INV WR) - Is the sig_mr key valid (can spare LOCAL_INV WR) Upon connection establishment check if network portal is T10-PI enabled and allocate T10-PI resources if necessary, allocate signature enabled memory regions and mark connection queue-pair as signature enabled. (Fix context change for v3.14-rc6 code - nab) Signed-off-by: Sagi Grimberg Signed-off-by: Nicholas Bellinger --- drivers/infiniband/ulp/isert/ib_isert.c | 104 ++++++++++++++++++++++++++++---- drivers/infiniband/ulp/isert/ib_isert.h | 23 +++++-- 2 files changed, 110 insertions(+), 17 deletions(-) (limited to 'drivers/infiniband/ulp') diff --git a/drivers/infiniband/ulp/isert/ib_isert.c b/drivers/infiniband/ulp/isert/ib_isert.c index d0ca3328aa4d..72063147933c 100644 --- a/drivers/infiniband/ulp/isert/ib_isert.c +++ b/drivers/infiniband/ulp/isert/ib_isert.c @@ -87,7 +87,8 @@ isert_query_device(struct ib_device *ib_dev, struct ib_device_attr *devattr) } static int -isert_conn_setup_qp(struct isert_conn *isert_conn, struct rdma_cm_id *cma_id) +isert_conn_setup_qp(struct isert_conn *isert_conn, struct rdma_cm_id *cma_id, + u8 protection) { struct isert_device *device = isert_conn->conn_device; struct ib_qp_init_attr attr; @@ -119,6 +120,8 @@ isert_conn_setup_qp(struct isert_conn *isert_conn, struct rdma_cm_id *cma_id) attr.cap.max_recv_sge = 1; attr.sq_sig_type = IB_SIGNAL_REQ_WR; attr.qp_type = IB_QPT_RC; + if (protection) + attr.create_flags |= IB_QP_CREATE_SIGNATURE_EN; pr_debug("isert_conn_setup_qp cma_id->device: %p\n", cma_id->device); @@ -236,13 +239,18 @@ isert_create_device_ib_res(struct isert_device *device) device->unreg_rdma_mem = isert_unmap_cmd; } + /* Check signature cap */ + device->pi_capable = dev_attr->device_cap_flags & + IB_DEVICE_SIGNATURE_HANDOVER ? true : false; + device->cqs_used = min_t(int, num_online_cpus(), device->ib_device->num_comp_vectors); device->cqs_used = min(ISERT_MAX_CQ, device->cqs_used); pr_debug("Using %d CQs, device %s supports %d vectors support " - "Fast registration %d\n", + "Fast registration %d pi_capable %d\n", device->cqs_used, device->ib_device->name, - device->ib_device->num_comp_vectors, device->use_fastreg); + device->ib_device->num_comp_vectors, device->use_fastreg, + device->pi_capable); device->cq_desc = kzalloc(sizeof(struct isert_cq_desc) * device->cqs_used, GFP_KERNEL); if (!device->cq_desc) { @@ -395,6 +403,12 @@ isert_conn_free_fastreg_pool(struct isert_conn *isert_conn) list_del(&fr_desc->list); ib_free_fast_reg_page_list(fr_desc->data_frpl); ib_dereg_mr(fr_desc->data_mr); + if (fr_desc->pi_ctx) { + ib_free_fast_reg_page_list(fr_desc->pi_ctx->prot_frpl); + ib_dereg_mr(fr_desc->pi_ctx->prot_mr); + ib_destroy_mr(fr_desc->pi_ctx->sig_mr); + kfree(fr_desc->pi_ctx); + } kfree(fr_desc); ++i; } @@ -406,8 +420,10 @@ isert_conn_free_fastreg_pool(struct isert_conn *isert_conn) static int isert_create_fr_desc(struct ib_device *ib_device, struct ib_pd *pd, - struct fast_reg_descriptor *fr_desc) + struct fast_reg_descriptor *fr_desc, u8 protection) { + int ret; + fr_desc->data_frpl = ib_alloc_fast_reg_page_list(ib_device, ISCSI_ISER_SG_TABLESIZE); if (IS_ERR(fr_desc->data_frpl)) { @@ -420,19 +436,73 @@ isert_create_fr_desc(struct ib_device *ib_device, struct ib_pd *pd, if (IS_ERR(fr_desc->data_mr)) { pr_err("Failed to allocate data frmr err=%ld\n", PTR_ERR(fr_desc->data_mr)); - ib_free_fast_reg_page_list(fr_desc->data_frpl); - return PTR_ERR(fr_desc->data_mr); + ret = PTR_ERR(fr_desc->data_mr); + goto err_data_frpl; } pr_debug("Create fr_desc %p page_list %p\n", fr_desc, fr_desc->data_frpl->page_list); + fr_desc->ind |= ISERT_DATA_KEY_VALID; - fr_desc->valid = true; + if (protection) { + struct ib_mr_init_attr mr_init_attr = {0}; + struct pi_context *pi_ctx; + + fr_desc->pi_ctx = kzalloc(sizeof(*fr_desc->pi_ctx), GFP_KERNEL); + if (!fr_desc->pi_ctx) { + pr_err("Failed to allocate pi context\n"); + ret = -ENOMEM; + goto err_data_mr; + } + pi_ctx = fr_desc->pi_ctx; + + pi_ctx->prot_frpl = ib_alloc_fast_reg_page_list(ib_device, + ISCSI_ISER_SG_TABLESIZE); + if (IS_ERR(pi_ctx->prot_frpl)) { + pr_err("Failed to allocate prot frpl err=%ld\n", + PTR_ERR(pi_ctx->prot_frpl)); + ret = PTR_ERR(pi_ctx->prot_frpl); + goto err_pi_ctx; + } + + pi_ctx->prot_mr = ib_alloc_fast_reg_mr(pd, ISCSI_ISER_SG_TABLESIZE); + if (IS_ERR(pi_ctx->prot_mr)) { + pr_err("Failed to allocate prot frmr err=%ld\n", + PTR_ERR(pi_ctx->prot_mr)); + ret = PTR_ERR(pi_ctx->prot_mr); + goto err_prot_frpl; + } + fr_desc->ind |= ISERT_PROT_KEY_VALID; + + mr_init_attr.max_reg_descriptors = 2; + mr_init_attr.flags |= IB_MR_SIGNATURE_EN; + pi_ctx->sig_mr = ib_create_mr(pd, &mr_init_attr); + if (IS_ERR(pi_ctx->sig_mr)) { + pr_err("Failed to allocate signature enabled mr err=%ld\n", + PTR_ERR(pi_ctx->sig_mr)); + ret = PTR_ERR(pi_ctx->sig_mr); + goto err_prot_mr; + } + fr_desc->ind |= ISERT_SIG_KEY_VALID; + } + fr_desc->ind &= ~ISERT_PROTECTED; return 0; +err_prot_mr: + ib_dereg_mr(fr_desc->pi_ctx->prot_mr); +err_prot_frpl: + ib_free_fast_reg_page_list(fr_desc->pi_ctx->prot_frpl); +err_pi_ctx: + kfree(fr_desc->pi_ctx); +err_data_mr: + ib_dereg_mr(fr_desc->data_mr); +err_data_frpl: + ib_free_fast_reg_page_list(fr_desc->data_frpl); + + return ret; } static int -isert_conn_create_fastreg_pool(struct isert_conn *isert_conn) +isert_conn_create_fastreg_pool(struct isert_conn *isert_conn, u8 pi_support) { struct fast_reg_descriptor *fr_desc; struct isert_device *device = isert_conn->conn_device; @@ -449,7 +519,8 @@ isert_conn_create_fastreg_pool(struct isert_conn *isert_conn) } ret = isert_create_fr_desc(device->ib_device, - isert_conn->conn_pd, fr_desc); + isert_conn->conn_pd, fr_desc, + pi_support); if (ret) { pr_err("Failed to create fastreg descriptor err=%d\n", ret); @@ -480,6 +551,7 @@ isert_connect_request(struct rdma_cm_id *cma_id, struct rdma_cm_event *event) struct isert_device *device; struct ib_device *ib_dev = cma_id->device; int ret = 0; + u8 pi_support = np->tpg_np->tpg->tpg_attrib.t10_pi; pr_debug("Entering isert_connect_request cma_id: %p, context: %p\n", cma_id, cma_id->context); @@ -569,8 +641,14 @@ isert_connect_request(struct rdma_cm_id *cma_id, struct rdma_cm_event *event) goto out_mr; } + if (pi_support && !device->pi_capable) { + pr_err("Protection information requested but not supported\n"); + ret = -EINVAL; + goto out_mr; + } + if (device->use_fastreg) { - ret = isert_conn_create_fastreg_pool(isert_conn); + ret = isert_conn_create_fastreg_pool(isert_conn, pi_support); if (ret) { pr_err("Conn: %p failed to create fastreg pool\n", isert_conn); @@ -578,7 +656,7 @@ isert_connect_request(struct rdma_cm_id *cma_id, struct rdma_cm_event *event) } } - ret = isert_conn_setup_qp(isert_conn, cma_id); + ret = isert_conn_setup_qp(isert_conn, cma_id, pi_support); if (ret) goto out_conn_dev; @@ -2280,7 +2358,7 @@ isert_fast_reg_mr(struct isert_conn *isert_conn, pagelist_len = isert_map_fr_pagelist(ib_dev, mem->sg, mem->nents, &fr_desc->data_frpl->page_list[0]); - if (!fr_desc->valid) { + if (!(fr_desc->ind & ISERT_DATA_KEY_VALID)) { memset(&inv_wr, 0, sizeof(inv_wr)); inv_wr.wr_id = ISER_FASTREG_LI_WRID; inv_wr.opcode = IB_WR_LOCAL_INV; @@ -2314,7 +2392,7 @@ isert_fast_reg_mr(struct isert_conn *isert_conn, pr_err("fast registration failed, ret:%d\n", ret); return ret; } - fr_desc->valid = false; + fr_desc->ind &= ~ISERT_DATA_KEY_VALID; sge->lkey = fr_desc->data_mr->lkey; sge->addr = fr_desc->data_frpl->page_list[0] + page_off; diff --git a/drivers/infiniband/ulp/isert/ib_isert.h b/drivers/infiniband/ulp/isert/ib_isert.h index 8a02c4ebe373..a75b75fbc9d1 100644 --- a/drivers/infiniband/ulp/isert/ib_isert.h +++ b/drivers/infiniband/ulp/isert/ib_isert.h @@ -50,11 +50,25 @@ struct iser_tx_desc { struct ib_send_wr send_wr; } __packed; +enum isert_indicator { + ISERT_PROTECTED = 1 << 0, + ISERT_DATA_KEY_VALID = 1 << 1, + ISERT_PROT_KEY_VALID = 1 << 2, + ISERT_SIG_KEY_VALID = 1 << 3, +}; + +struct pi_context { + struct ib_mr *prot_mr; + struct ib_fast_reg_page_list *prot_frpl; + struct ib_mr *sig_mr; +}; + struct fast_reg_descriptor { - struct list_head list; - struct ib_mr *data_mr; - struct ib_fast_reg_page_list *data_frpl; - bool valid; + struct list_head list; + struct ib_mr *data_mr; + struct ib_fast_reg_page_list *data_frpl; + u8 ind; + struct pi_context *pi_ctx; }; struct isert_data_buf { @@ -149,6 +163,7 @@ struct isert_cq_desc { struct isert_device { int use_fastreg; + bool pi_capable; int cqs_used; int refcount; int cq_active_qps[ISERT_MAX_CQ]; -- cgit v1.2.3 From f93f3a70da9175b4641f93d466d779675eb83fa2 Mon Sep 17 00:00:00 2001 From: Sagi Grimberg Date: Wed, 19 Feb 2014 17:50:24 +0200 Subject: IB/isert: Accept RDMA_WRITE completions In case of protected transactions, we will need to check the protection status of the transaction before sending SCSI response. So be ready for RDMA_WRITE completions. currently we don't ask for these completions, but for T10-PI we will. Signed-off-by: Sagi Grimberg Signed-off-by: Nicholas Bellinger --- drivers/infiniband/ulp/isert/ib_isert.c | 19 +++++++++++++++++-- 1 file changed, 17 insertions(+), 2 deletions(-) (limited to 'drivers/infiniband/ulp') diff --git a/drivers/infiniband/ulp/isert/ib_isert.c b/drivers/infiniband/ulp/isert/ib_isert.c index 72063147933c..2eb07b943ed9 100644 --- a/drivers/infiniband/ulp/isert/ib_isert.c +++ b/drivers/infiniband/ulp/isert/ib_isert.c @@ -51,6 +51,8 @@ isert_unreg_rdma(struct isert_cmd *isert_cmd, struct isert_conn *isert_conn); static int isert_reg_rdma(struct iscsi_conn *conn, struct iscsi_cmd *cmd, struct isert_rdma_wr *wr); +static int +isert_put_response(struct iscsi_conn *conn, struct iscsi_cmd *cmd); static void isert_qp_event_callback(struct ib_event *e, void *context) @@ -1651,6 +1653,18 @@ isert_completion_put(struct iser_tx_desc *tx_desc, struct isert_cmd *isert_cmd, isert_put_cmd(isert_cmd); } +static void +isert_completion_rdma_write(struct iser_tx_desc *tx_desc, + struct isert_cmd *isert_cmd) +{ + struct iscsi_cmd *cmd = isert_cmd->iscsi_cmd; + struct isert_conn *isert_conn = isert_cmd->conn; + struct isert_device *device = isert_conn->conn_device; + + device->unreg_rdma_mem(isert_cmd, isert_conn); + isert_put_response(isert_conn->conn, cmd); +} + static void isert_completion_rdma_read(struct iser_tx_desc *tx_desc, struct isert_cmd *isert_cmd) @@ -1773,8 +1787,9 @@ __isert_send_completion(struct iser_tx_desc *tx_desc, isert_conn, ib_dev); break; case ISER_IB_RDMA_WRITE: - pr_err("isert_send_completion: Got ISER_IB_RDMA_WRITE\n"); - dump_stack(); + pr_debug("isert_send_completion: Got ISER_IB_RDMA_WRITE\n"); + atomic_dec(&isert_conn->post_send_buf_count); + isert_completion_rdma_write(tx_desc, isert_cmd); break; case ISER_IB_RDMA_READ: pr_debug("isert_send_completion: Got ISER_IB_RDMA_READ:\n"); -- cgit v1.2.3 From 9e961ae73c2ce81387e9b375231d4aefe5ffa13e Mon Sep 17 00:00:00 2001 From: Sagi Grimberg Date: Wed, 19 Feb 2014 17:50:25 +0200 Subject: IB/isert: Support T10-PI protected transactions In case the Target core passed transport T10 protection operation: 1. Register data buffer (data memory region) 2. Register protection buffer if exsists (prot memory region) 3. Register signature region (signature memory region) - use work request IB_WR_REG_SIG_MR 4. Execute RDMA 5. Upon RDMA completion check the signature status - if succeeded send good SCSI response - if failed send SCSI bad response with appropriate sense buffer (Fix up compile error in isert_reg_sig_mr, and fix up incorrect se_cmd->prot_type -> TARGET_PROT_NORMAL comparision - nab) (Fix failed sector assignment in isert_completion_rdma_* - Sagi + nab) (Fix enum assignements for protection type - Sagi) (Fix devision on 32-bit in isert_completion_rdma_* - Sagi + Fengguang) (Fix context change for v3.14-rc6 code - nab) (Fix iscsit_build_rsp_pdu inc_statsn flag usage - nab) Signed-off-by: Sagi Grimberg Signed-off-by: Nicholas Bellinger --- drivers/infiniband/ulp/isert/ib_isert.c | 339 +++++++++++++++++++++++++++++--- drivers/infiniband/ulp/isert/ib_isert.h | 1 + 2 files changed, 311 insertions(+), 29 deletions(-) (limited to 'drivers/infiniband/ulp') diff --git a/drivers/infiniband/ulp/isert/ib_isert.c b/drivers/infiniband/ulp/isert/ib_isert.c index 2eb07b943ed9..f82fe3dccabe 100644 --- a/drivers/infiniband/ulp/isert/ib_isert.c +++ b/drivers/infiniband/ulp/isert/ib_isert.c @@ -1552,6 +1552,10 @@ isert_unreg_rdma(struct isert_cmd *isert_cmd, struct isert_conn *isert_conn) if (wr->fr_desc) { pr_debug("unreg_fastreg_cmd: %p free fr_desc %p\n", isert_cmd, wr->fr_desc); + if (wr->fr_desc->ind & ISERT_PROTECTED) { + isert_unmap_data_buf(isert_conn, &wr->prot); + wr->fr_desc->ind &= ~ISERT_PROTECTED; + } spin_lock_bh(&isert_conn->conn_lock); list_add_tail(&wr->fr_desc->list, &isert_conn->conn_fr_pool); spin_unlock_bh(&isert_conn->conn_lock); @@ -1657,12 +1661,55 @@ static void isert_completion_rdma_write(struct iser_tx_desc *tx_desc, struct isert_cmd *isert_cmd) { + struct isert_rdma_wr *wr = &isert_cmd->rdma_wr; struct iscsi_cmd *cmd = isert_cmd->iscsi_cmd; + struct se_cmd *se_cmd = &cmd->se_cmd; struct isert_conn *isert_conn = isert_cmd->conn; struct isert_device *device = isert_conn->conn_device; + struct ib_mr_status mr_status; + int ret = 0; + + if (wr->fr_desc && wr->fr_desc->ind & ISERT_PROTECTED) { + ret = ib_check_mr_status(wr->fr_desc->pi_ctx->sig_mr, + IB_MR_CHECK_SIG_STATUS, &mr_status); + if (ret) { + pr_err("ib_check_mr_status failed, ret %d\n", ret); + goto fail_mr_status; + } + if (mr_status.fail_status & IB_MR_CHECK_SIG_STATUS) { + u32 block_size = se_cmd->se_dev->dev_attrib.block_size + 8; + + switch (mr_status.sig_err.err_type) { + case IB_SIG_BAD_GUARD: + se_cmd->pi_err = TCM_LOGICAL_BLOCK_GUARD_CHECK_FAILED; + break; + case IB_SIG_BAD_REFTAG: + se_cmd->pi_err = TCM_LOGICAL_BLOCK_REF_TAG_CHECK_FAILED; + break; + case IB_SIG_BAD_APPTAG: + se_cmd->pi_err = TCM_LOGICAL_BLOCK_APP_TAG_CHECK_FAILED; + break; + } + se_cmd->bad_sector = mr_status.sig_err.sig_err_offset; + do_div(se_cmd->bad_sector, block_size); + + pr_err("isert: PI error found type %d at sector 0x%llx " + "expected 0x%x vs actual 0x%x\n", + mr_status.sig_err.err_type, + (unsigned long long)se_cmd->bad_sector, + mr_status.sig_err.expected, + mr_status.sig_err.actual); + ret = 1; + } + } +fail_mr_status: device->unreg_rdma_mem(isert_cmd, isert_conn); - isert_put_response(isert_conn->conn, cmd); + if (ret) + transport_send_check_condition_and_sense(se_cmd, + se_cmd->pi_err, 0); + else + isert_put_response(isert_conn->conn, cmd); } static void @@ -1674,7 +1721,43 @@ isert_completion_rdma_read(struct iser_tx_desc *tx_desc, struct se_cmd *se_cmd = &cmd->se_cmd; struct isert_conn *isert_conn = isert_cmd->conn; struct isert_device *device = isert_conn->conn_device; + struct ib_mr_status mr_status; + int ret; + if (wr->fr_desc && wr->fr_desc->ind & ISERT_PROTECTED) { + ret = ib_check_mr_status(wr->fr_desc->pi_ctx->sig_mr, + IB_MR_CHECK_SIG_STATUS, &mr_status); + if (ret) { + pr_err("ib_check_mr_status failed, ret %d\n", ret); + goto fail_mr_status; + } + if (mr_status.fail_status & IB_MR_CHECK_SIG_STATUS) { + u32 block_size = se_cmd->se_dev->dev_attrib.block_size + 8; + + switch (mr_status.sig_err.err_type) { + case IB_SIG_BAD_GUARD: + se_cmd->pi_err = TCM_LOGICAL_BLOCK_GUARD_CHECK_FAILED; + break; + case IB_SIG_BAD_REFTAG: + se_cmd->pi_err = TCM_LOGICAL_BLOCK_REF_TAG_CHECK_FAILED; + break; + case IB_SIG_BAD_APPTAG: + se_cmd->pi_err = TCM_LOGICAL_BLOCK_APP_TAG_CHECK_FAILED; + break; + } + se_cmd->bad_sector = mr_status.sig_err.sig_err_offset; + do_div(se_cmd->bad_sector, block_size); + + pr_err("isert: PI error found type %d at sector 0x%llx " + "expected 0x%x vs actual 0x%x\n", + mr_status.sig_err.err_type, + (unsigned long long)se_cmd->bad_sector, + mr_status.sig_err.expected, + mr_status.sig_err.actual); + } + } + +fail_mr_status: iscsit_stop_dataout_timer(cmd); device->unreg_rdma_mem(isert_cmd, isert_conn); cmd->write_data_done = wr->data.len; @@ -2349,9 +2432,12 @@ static int isert_fast_reg_mr(struct isert_conn *isert_conn, struct fast_reg_descriptor *fr_desc, struct isert_data_buf *mem, + enum isert_indicator ind, struct ib_sge *sge) { struct ib_device *ib_dev = isert_conn->conn_cm_id->device; + struct ib_mr *mr; + struct ib_fast_reg_page_list *frpl; struct ib_send_wr fr_wr, inv_wr; struct ib_send_wr *bad_wr, *wr = NULL; int ret, pagelist_len; @@ -2362,39 +2448,51 @@ isert_fast_reg_mr(struct isert_conn *isert_conn, sge->lkey = isert_conn->conn_mr->lkey; sge->addr = ib_sg_dma_address(ib_dev, &mem->sg[0]); sge->length = ib_sg_dma_len(ib_dev, &mem->sg[0]); + pr_debug("%s:%d sge: addr: 0x%llx length: %u lkey: %x\n", + __func__, __LINE__, sge->addr, sge->length, + sge->lkey); return 0; } + if (ind == ISERT_DATA_KEY_VALID) { + /* Registering data buffer */ + mr = fr_desc->data_mr; + frpl = fr_desc->data_frpl; + } else { + /* Registering protection buffer */ + mr = fr_desc->pi_ctx->prot_mr; + frpl = fr_desc->pi_ctx->prot_frpl; + } + page_off = mem->offset % PAGE_SIZE; pr_debug("Use fr_desc %p sg_nents %d offset %u\n", fr_desc, mem->nents, mem->offset); pagelist_len = isert_map_fr_pagelist(ib_dev, mem->sg, mem->nents, - &fr_desc->data_frpl->page_list[0]); + &frpl->page_list[0]); if (!(fr_desc->ind & ISERT_DATA_KEY_VALID)) { memset(&inv_wr, 0, sizeof(inv_wr)); inv_wr.wr_id = ISER_FASTREG_LI_WRID; inv_wr.opcode = IB_WR_LOCAL_INV; - inv_wr.ex.invalidate_rkey = fr_desc->data_mr->rkey; + inv_wr.ex.invalidate_rkey = mr->rkey; wr = &inv_wr; /* Bump the key */ - key = (u8)(fr_desc->data_mr->rkey & 0x000000FF); - ib_update_fast_reg_key(fr_desc->data_mr, ++key); + key = (u8)(mr->rkey & 0x000000FF); + ib_update_fast_reg_key(mr, ++key); } /* Prepare FASTREG WR */ memset(&fr_wr, 0, sizeof(fr_wr)); fr_wr.wr_id = ISER_FASTREG_LI_WRID; fr_wr.opcode = IB_WR_FAST_REG_MR; - fr_wr.wr.fast_reg.iova_start = - fr_desc->data_frpl->page_list[0] + page_off; - fr_wr.wr.fast_reg.page_list = fr_desc->data_frpl; + fr_wr.wr.fast_reg.iova_start = frpl->page_list[0] + page_off; + fr_wr.wr.fast_reg.page_list = frpl; fr_wr.wr.fast_reg.page_list_len = pagelist_len; fr_wr.wr.fast_reg.page_shift = PAGE_SHIFT; fr_wr.wr.fast_reg.length = mem->len; - fr_wr.wr.fast_reg.rkey = fr_desc->data_mr->rkey; + fr_wr.wr.fast_reg.rkey = mr->rkey; fr_wr.wr.fast_reg.access_flags = IB_ACCESS_LOCAL_WRITE; if (!wr) @@ -2407,18 +2505,158 @@ isert_fast_reg_mr(struct isert_conn *isert_conn, pr_err("fast registration failed, ret:%d\n", ret); return ret; } - fr_desc->ind &= ~ISERT_DATA_KEY_VALID; + fr_desc->ind &= ~ind; - sge->lkey = fr_desc->data_mr->lkey; - sge->addr = fr_desc->data_frpl->page_list[0] + page_off; + sge->lkey = mr->lkey; + sge->addr = frpl->page_list[0] + page_off; sge->length = mem->len; - pr_debug("RDMA ib_sge: addr: 0x%16llx length: %u lkey: %08x\n", - sge->addr, sge->length, sge->lkey); + pr_debug("%s:%d sge: addr: 0x%llx length: %u lkey: %x\n", + __func__, __LINE__, sge->addr, sge->length, + sge->lkey); return ret; } +static inline enum ib_t10_dif_type +se2ib_prot_type(enum target_prot_type prot_type) +{ + switch (prot_type) { + case TARGET_DIF_TYPE0_PROT: + return IB_T10DIF_NONE; + case TARGET_DIF_TYPE1_PROT: + return IB_T10DIF_TYPE1; + case TARGET_DIF_TYPE2_PROT: + return IB_T10DIF_TYPE2; + case TARGET_DIF_TYPE3_PROT: + return IB_T10DIF_TYPE3; + default: + return IB_T10DIF_NONE; + } +} + +static int +isert_set_sig_attrs(struct se_cmd *se_cmd, struct ib_sig_attrs *sig_attrs) +{ + enum ib_t10_dif_type ib_prot_type = se2ib_prot_type(se_cmd->prot_type); + + sig_attrs->mem.sig_type = IB_SIG_TYPE_T10_DIF; + sig_attrs->wire.sig_type = IB_SIG_TYPE_T10_DIF; + sig_attrs->mem.sig.dif.pi_interval = + se_cmd->se_dev->dev_attrib.block_size; + sig_attrs->wire.sig.dif.pi_interval = + se_cmd->se_dev->dev_attrib.block_size; + + switch (se_cmd->prot_op) { + case TARGET_PROT_DIN_INSERT: + case TARGET_PROT_DOUT_STRIP: + sig_attrs->mem.sig.dif.type = IB_T10DIF_NONE; + sig_attrs->wire.sig.dif.type = ib_prot_type; + sig_attrs->wire.sig.dif.bg_type = IB_T10DIF_CRC; + sig_attrs->wire.sig.dif.ref_tag = se_cmd->reftag_seed; + break; + case TARGET_PROT_DOUT_INSERT: + case TARGET_PROT_DIN_STRIP: + sig_attrs->mem.sig.dif.type = ib_prot_type; + sig_attrs->mem.sig.dif.bg_type = IB_T10DIF_CRC; + sig_attrs->mem.sig.dif.ref_tag = se_cmd->reftag_seed; + sig_attrs->wire.sig.dif.type = IB_T10DIF_NONE; + break; + case TARGET_PROT_DIN_PASS: + case TARGET_PROT_DOUT_PASS: + sig_attrs->mem.sig.dif.type = ib_prot_type; + sig_attrs->mem.sig.dif.bg_type = IB_T10DIF_CRC; + sig_attrs->mem.sig.dif.ref_tag = se_cmd->reftag_seed; + sig_attrs->wire.sig.dif.type = ib_prot_type; + sig_attrs->wire.sig.dif.bg_type = IB_T10DIF_CRC; + sig_attrs->wire.sig.dif.ref_tag = se_cmd->reftag_seed; + break; + default: + pr_err("Unsupported PI operation %d\n", se_cmd->prot_op); + return -EINVAL; + } + + return 0; +} + +static inline u8 +isert_set_prot_checks(u8 prot_checks) +{ + return (prot_checks & TARGET_DIF_CHECK_GUARD ? 0xc0 : 0) | + (prot_checks & TARGET_DIF_CHECK_REFTAG ? 0x30 : 0) | + (prot_checks & TARGET_DIF_CHECK_REFTAG ? 0x0f : 0); +} + +static int +isert_reg_sig_mr(struct isert_conn *isert_conn, struct se_cmd *se_cmd, + struct fast_reg_descriptor *fr_desc, + struct ib_sge *data_sge, struct ib_sge *prot_sge, + struct ib_sge *sig_sge) +{ + struct ib_send_wr sig_wr, inv_wr; + struct ib_send_wr *bad_wr, *wr = NULL; + struct pi_context *pi_ctx = fr_desc->pi_ctx; + struct ib_sig_attrs sig_attrs; + int ret; + u32 key; + + memset(&sig_attrs, 0, sizeof(sig_attrs)); + ret = isert_set_sig_attrs(se_cmd, &sig_attrs); + if (ret) + goto err; + + sig_attrs.check_mask = isert_set_prot_checks(se_cmd->prot_checks); + + if (!(fr_desc->ind & ISERT_SIG_KEY_VALID)) { + memset(&inv_wr, 0, sizeof(inv_wr)); + inv_wr.opcode = IB_WR_LOCAL_INV; + inv_wr.ex.invalidate_rkey = pi_ctx->sig_mr->rkey; + wr = &inv_wr; + /* Bump the key */ + key = (u8)(pi_ctx->sig_mr->rkey & 0x000000FF); + ib_update_fast_reg_key(pi_ctx->sig_mr, ++key); + } + + memset(&sig_wr, 0, sizeof(sig_wr)); + sig_wr.opcode = IB_WR_REG_SIG_MR; + sig_wr.sg_list = data_sge; + sig_wr.num_sge = 1; + sig_wr.wr.sig_handover.access_flags = IB_ACCESS_LOCAL_WRITE; + sig_wr.wr.sig_handover.sig_attrs = &sig_attrs; + sig_wr.wr.sig_handover.sig_mr = pi_ctx->sig_mr; + if (se_cmd->t_prot_sg) + sig_wr.wr.sig_handover.prot = prot_sge; + + if (!wr) + wr = &sig_wr; + else + wr->next = &sig_wr; + + ret = ib_post_send(isert_conn->conn_qp, wr, &bad_wr); + if (ret) { + pr_err("fast registration failed, ret:%d\n", ret); + goto err; + } + fr_desc->ind &= ~ISERT_SIG_KEY_VALID; + + sig_sge->lkey = pi_ctx->sig_mr->lkey; + sig_sge->addr = 0; + sig_sge->length = se_cmd->data_length; + if (se_cmd->prot_op != TARGET_PROT_DIN_STRIP && + se_cmd->prot_op != TARGET_PROT_DOUT_INSERT) + /* + * We have protection guards on the wire + * so we need to set a larget transfer + */ + sig_sge->length += se_cmd->prot_length; + + pr_debug("sig_sge: addr: 0x%llx length: %u lkey: %x\n", + sig_sge->addr, sig_sge->length, + sig_sge->lkey); +err: + return ret; +} + static int isert_reg_rdma(struct iscsi_conn *conn, struct iscsi_cmd *cmd, struct isert_rdma_wr *wr) @@ -2426,6 +2664,7 @@ isert_reg_rdma(struct iscsi_conn *conn, struct iscsi_cmd *cmd, struct se_cmd *se_cmd = &cmd->se_cmd; struct isert_cmd *isert_cmd = iscsit_priv_cmd(cmd); struct isert_conn *isert_conn = conn->context; + struct ib_sge data_sge; struct ib_send_wr *send_wr; struct fast_reg_descriptor *fr_desc = NULL; u32 offset; @@ -2441,7 +2680,8 @@ isert_reg_rdma(struct iscsi_conn *conn, struct iscsi_cmd *cmd, if (ret) return ret; - if (wr->data.dma_nents != 1) { + if (wr->data.dma_nents != 1 || + se_cmd->prot_op != TARGET_PROT_NORMAL) { spin_lock_irqsave(&isert_conn->conn_lock, flags); fr_desc = list_first_entry(&isert_conn->conn_fr_pool, struct fast_reg_descriptor, list); @@ -2450,10 +2690,39 @@ isert_reg_rdma(struct iscsi_conn *conn, struct iscsi_cmd *cmd, wr->fr_desc = fr_desc; } - ret = isert_fast_reg_mr(isert_conn, fr_desc, &wr->data, &wr->s_ib_sge); + ret = isert_fast_reg_mr(isert_conn, fr_desc, &wr->data, + ISERT_DATA_KEY_VALID, &data_sge); if (ret) goto unmap_cmd; + if (se_cmd->prot_op != TARGET_PROT_NORMAL) { + struct ib_sge prot_sge, sig_sge; + + if (se_cmd->t_prot_sg) { + ret = isert_map_data_buf(isert_conn, isert_cmd, + se_cmd->t_prot_sg, + se_cmd->t_prot_nents, + se_cmd->prot_length, + 0, wr->iser_ib_op, &wr->prot); + if (ret) + goto unmap_cmd; + + ret = isert_fast_reg_mr(isert_conn, fr_desc, &wr->prot, + ISERT_PROT_KEY_VALID, &prot_sge); + if (ret) + goto unmap_prot_cmd; + } + + ret = isert_reg_sig_mr(isert_conn, se_cmd, fr_desc, + &data_sge, &prot_sge, &sig_sge); + if (ret) + goto unmap_prot_cmd; + + fr_desc->ind |= ISERT_PROTECTED; + memcpy(&wr->s_ib_sge, &sig_sge, sizeof(sig_sge)); + } else + memcpy(&wr->s_ib_sge, &data_sge, sizeof(data_sge)); + wr->ib_sge = &wr->s_ib_sge; wr->send_wr_num = 1; memset(&wr->s_send_wr, 0, sizeof(*send_wr)); @@ -2468,8 +2737,8 @@ isert_reg_rdma(struct iscsi_conn *conn, struct iscsi_cmd *cmd, send_wr->opcode = IB_WR_RDMA_WRITE; send_wr->wr.rdma.remote_addr = isert_cmd->read_va; send_wr->wr.rdma.rkey = isert_cmd->read_stag; - send_wr->send_flags = 0; - send_wr->next = &isert_cmd->tx_desc.send_wr; + send_wr->send_flags = se_cmd->prot_op == TARGET_PROT_NORMAL ? + 0 : IB_SEND_SIGNALED; } else { send_wr->opcode = IB_WR_RDMA_READ; send_wr->wr.rdma.remote_addr = isert_cmd->write_va; @@ -2478,6 +2747,9 @@ isert_reg_rdma(struct iscsi_conn *conn, struct iscsi_cmd *cmd, } return 0; +unmap_prot_cmd: + if (se_cmd->t_prot_sg) + isert_unmap_data_buf(isert_conn, &wr->prot); unmap_cmd: if (fr_desc) { spin_lock_irqsave(&isert_conn->conn_lock, flags); @@ -2509,15 +2781,19 @@ isert_put_datain(struct iscsi_conn *conn, struct iscsi_cmd *cmd) return rc; } - /* - * Build isert_conn->tx_desc for iSCSI response PDU and attach - */ - isert_create_send_desc(isert_conn, isert_cmd, &isert_cmd->tx_desc); - iscsit_build_rsp_pdu(cmd, conn, true, (struct iscsi_scsi_rsp *) - &isert_cmd->tx_desc.iscsi_header); - isert_init_tx_hdrs(isert_conn, &isert_cmd->tx_desc); - isert_init_send_wr(isert_conn, isert_cmd, - &isert_cmd->tx_desc.send_wr, true); + if (se_cmd->prot_op == TARGET_PROT_NORMAL) { + /* + * Build isert_conn->tx_desc for iSCSI response PDU and attach + */ + isert_create_send_desc(isert_conn, isert_cmd, + &isert_cmd->tx_desc); + iscsit_build_rsp_pdu(cmd, conn, true, (struct iscsi_scsi_rsp *) + &isert_cmd->tx_desc.iscsi_header); + isert_init_tx_hdrs(isert_conn, &isert_cmd->tx_desc); + isert_init_send_wr(isert_conn, isert_cmd, + &isert_cmd->tx_desc.send_wr, true); + isert_cmd->rdma_wr.s_send_wr.next = &isert_cmd->tx_desc.send_wr; + } atomic_add(wr->send_wr_num + 1, &isert_conn->post_send_buf_count); @@ -2526,8 +2802,13 @@ isert_put_datain(struct iscsi_conn *conn, struct iscsi_cmd *cmd) pr_warn("ib_post_send() failed for IB_WR_RDMA_WRITE\n"); atomic_sub(wr->send_wr_num + 1, &isert_conn->post_send_buf_count); } - pr_debug("Cmd: %p posted RDMA_WRITE + Response for iSER Data READ\n", - isert_cmd); + + if (se_cmd->prot_op == TARGET_PROT_NORMAL) + pr_debug("Cmd: %p posted RDMA_WRITE + Response for iSER Data " + "READ\n", isert_cmd); + else + pr_debug("Cmd: %p posted RDMA_WRITE for iSER Data READ\n", + isert_cmd); return 1; } diff --git a/drivers/infiniband/ulp/isert/ib_isert.h b/drivers/infiniband/ulp/isert/ib_isert.h index a75b75fbc9d1..4c072ae34c01 100644 --- a/drivers/infiniband/ulp/isert/ib_isert.h +++ b/drivers/infiniband/ulp/isert/ib_isert.h @@ -91,6 +91,7 @@ struct isert_rdma_wr { struct ib_send_wr *send_wr; struct ib_send_wr s_send_wr; struct isert_data_buf data; + struct isert_data_buf prot; struct fast_reg_descriptor *fr_desc; }; -- cgit v1.2.3 From c2caa207774683bddaa628d4c848ca48cc172e0a Mon Sep 17 00:00:00 2001 From: Sagi Grimberg Date: Mon, 17 Mar 2014 12:52:16 +0200 Subject: Target/iser: Fix signature work requests accounting As REG_SIG_MR work request and it's LOCAL_INVALIDATE are not accounted in post_send_buf_count we must color these with ISER_FASTREG_LI_WRID in order to process their error completions when the QP flushes. Signed-off-by: Sagi Grimberg Signed-off-by: Nicholas Bellinger --- drivers/infiniband/ulp/isert/ib_isert.c | 2 ++ 1 file changed, 2 insertions(+) (limited to 'drivers/infiniband/ulp') diff --git a/drivers/infiniband/ulp/isert/ib_isert.c b/drivers/infiniband/ulp/isert/ib_isert.c index f82fe3dccabe..e4ebb4c08358 100644 --- a/drivers/infiniband/ulp/isert/ib_isert.c +++ b/drivers/infiniband/ulp/isert/ib_isert.c @@ -2610,6 +2610,7 @@ isert_reg_sig_mr(struct isert_conn *isert_conn, struct se_cmd *se_cmd, if (!(fr_desc->ind & ISERT_SIG_KEY_VALID)) { memset(&inv_wr, 0, sizeof(inv_wr)); inv_wr.opcode = IB_WR_LOCAL_INV; + inv_wr.wr_id = ISER_FASTREG_LI_WRID; inv_wr.ex.invalidate_rkey = pi_ctx->sig_mr->rkey; wr = &inv_wr; /* Bump the key */ @@ -2619,6 +2620,7 @@ isert_reg_sig_mr(struct isert_conn *isert_conn, struct se_cmd *se_cmd, memset(&sig_wr, 0, sizeof(sig_wr)); sig_wr.opcode = IB_WR_REG_SIG_MR; + sig_wr.wr_id = ISER_FASTREG_LI_WRID; sig_wr.sg_list = data_sge; sig_wr.num_sge = 1; sig_wr.wr.sig_handover.access_flags = IB_ACCESS_LOCAL_WRITE; -- cgit v1.2.3 From 897bb2c9162fa8f03653693a751b473cd131ee60 Mon Sep 17 00:00:00 2001 From: Sagi Grimberg Date: Mon, 17 Mar 2014 12:52:17 +0200 Subject: Target/iser: Consider DIF and RDMA_READ completions when calculating post_send counter If protection is involved, iSER target must wait for completion of RDMA_READ before sending SCSI response. So we must consider that when calculating post_send_buf_count additions, also when processing good/error completions. Signed-off-by: Sagi Grimberg Signed-off-by: Nicholas Bellinger --- drivers/infiniband/ulp/isert/ib_isert.c | 47 ++++++++++++++++++++++++++++----- 1 file changed, 41 insertions(+), 6 deletions(-) (limited to 'drivers/infiniband/ulp') diff --git a/drivers/infiniband/ulp/isert/ib_isert.c b/drivers/infiniband/ulp/isert/ib_isert.c index e4ebb4c08358..faec22404da5 100644 --- a/drivers/infiniband/ulp/isert/ib_isert.c +++ b/drivers/infiniband/ulp/isert/ib_isert.c @@ -1705,6 +1705,7 @@ isert_completion_rdma_write(struct iser_tx_desc *tx_desc, fail_mr_status: device->unreg_rdma_mem(isert_cmd, isert_conn); + wr->send_wr_num = 0; if (ret) transport_send_check_condition_and_sense(se_cmd, se_cmd->pi_err, 0); @@ -1838,7 +1839,18 @@ isert_response_completion(struct iser_tx_desc *tx_desc, queue_work(isert_comp_wq, &isert_cmd->comp_work); return; } - atomic_sub(wr->send_wr_num + 1, &isert_conn->post_send_buf_count); + + /** + * If send_wr_num is 0 this means that we got + * RDMA completion and we cleared it and we should + * simply decrement the response post. else the + * response is incorporated in send_wr_num, just + * sub it. + **/ + if (wr->send_wr_num) + atomic_sub(wr->send_wr_num, &isert_conn->post_send_buf_count); + else + atomic_dec(&isert_conn->post_send_buf_count); cmd->i_state = ISTATE_SENT_STATUS; isert_completion_put(tx_desc, isert_cmd, ib_dev); @@ -1871,7 +1883,7 @@ __isert_send_completion(struct iser_tx_desc *tx_desc, break; case ISER_IB_RDMA_WRITE: pr_debug("isert_send_completion: Got ISER_IB_RDMA_WRITE\n"); - atomic_dec(&isert_conn->post_send_buf_count); + atomic_sub(wr->send_wr_num, &isert_conn->post_send_buf_count); isert_completion_rdma_write(tx_desc, isert_cmd); break; case ISER_IB_RDMA_READ: @@ -1922,7 +1934,18 @@ isert_cq_drain_comp_llist(struct isert_conn *isert_conn, struct ib_device *ib_de llnode = llist_next(llnode); wr = &t->isert_cmd->rdma_wr; - atomic_sub(wr->send_wr_num + 1, &isert_conn->post_send_buf_count); + /** + * If send_wr_num is 0 this means that we got + * RDMA completion and we cleared it and we should + * simply decrement the response post. else the + * response is incorporated in send_wr_num, just + * sub it. + **/ + if (wr->send_wr_num) + atomic_sub(wr->send_wr_num, + &isert_conn->post_send_buf_count); + else + atomic_dec(&isert_conn->post_send_buf_count); isert_completion_put(t, t->isert_cmd, ib_dev); } } @@ -1941,7 +1964,18 @@ isert_cq_tx_comp_err(struct iser_tx_desc *tx_desc, struct isert_conn *isert_conn llnode = llist_next(llnode); wr = &t->isert_cmd->rdma_wr; - atomic_sub(wr->send_wr_num + 1, &isert_conn->post_send_buf_count); + /** + * If send_wr_num is 0 this means that we got + * RDMA completion and we cleared it and we should + * simply decrement the response post. else the + * response is incorporated in send_wr_num, just + * sub it. + **/ + if (wr->send_wr_num) + atomic_sub(wr->send_wr_num, + &isert_conn->post_send_buf_count); + else + atomic_dec(&isert_conn->post_send_buf_count); isert_completion_put(t, t->isert_cmd, ib_dev); } tx_desc->comp_llnode_batch = NULL; @@ -2795,14 +2829,15 @@ isert_put_datain(struct iscsi_conn *conn, struct iscsi_cmd *cmd) isert_init_send_wr(isert_conn, isert_cmd, &isert_cmd->tx_desc.send_wr, true); isert_cmd->rdma_wr.s_send_wr.next = &isert_cmd->tx_desc.send_wr; + wr->send_wr_num += 1; } - atomic_add(wr->send_wr_num + 1, &isert_conn->post_send_buf_count); + atomic_add(wr->send_wr_num, &isert_conn->post_send_buf_count); rc = ib_post_send(isert_conn->conn_qp, wr->send_wr, &wr_failed); if (rc) { pr_warn("ib_post_send() failed for IB_WR_RDMA_WRITE\n"); - atomic_sub(wr->send_wr_num + 1, &isert_conn->post_send_buf_count); + atomic_sub(wr->send_wr_num, &isert_conn->post_send_buf_count); } if (se_cmd->prot_op == TARGET_PROT_NORMAL) -- cgit v1.2.3 From 96b7973e1c59e9ddde27bca882411c3980125330 Mon Sep 17 00:00:00 2001 From: Sagi Grimberg Date: Mon, 17 Mar 2014 12:52:18 +0200 Subject: Target/iser: Move check signature status to a function Remove code duplication from RDMA_READ and RDMA_WRITE completions that do basically the same check. Signed-off-by: Sagi Grimberg Signed-off-by: Nicholas Bellinger --- drivers/infiniband/ulp/isert/ib_isert.c | 115 ++++++++++++++------------------ 1 file changed, 50 insertions(+), 65 deletions(-) (limited to 'drivers/infiniband/ulp') diff --git a/drivers/infiniband/ulp/isert/ib_isert.c b/drivers/infiniband/ulp/isert/ib_isert.c index faec22404da5..e2d48a9d828a 100644 --- a/drivers/infiniband/ulp/isert/ib_isert.c +++ b/drivers/infiniband/ulp/isert/ib_isert.c @@ -1657,6 +1657,50 @@ isert_completion_put(struct iser_tx_desc *tx_desc, struct isert_cmd *isert_cmd, isert_put_cmd(isert_cmd); } +static int +isert_check_pi_status(struct se_cmd *se_cmd, struct ib_mr *sig_mr) +{ + struct ib_mr_status mr_status; + int ret; + + ret = ib_check_mr_status(sig_mr, IB_MR_CHECK_SIG_STATUS, &mr_status); + if (ret) { + pr_err("ib_check_mr_status failed, ret %d\n", ret); + goto fail_mr_status; + } + + if (mr_status.fail_status & IB_MR_CHECK_SIG_STATUS) { + u64 sec_offset_err; + u32 block_size = se_cmd->se_dev->dev_attrib.block_size + 8; + + switch (mr_status.sig_err.err_type) { + case IB_SIG_BAD_GUARD: + se_cmd->pi_err = TCM_LOGICAL_BLOCK_GUARD_CHECK_FAILED; + break; + case IB_SIG_BAD_REFTAG: + se_cmd->pi_err = TCM_LOGICAL_BLOCK_REF_TAG_CHECK_FAILED; + break; + case IB_SIG_BAD_APPTAG: + se_cmd->pi_err = TCM_LOGICAL_BLOCK_APP_TAG_CHECK_FAILED; + break; + } + sec_offset_err = mr_status.sig_err.sig_err_offset; + do_div(sec_offset_err, block_size); + se_cmd->bad_sector = sec_offset_err + se_cmd->t_task_lba; + + pr_err("isert: PI error found type %d at sector 0x%llx " + "expected 0x%x vs actual 0x%x\n", + mr_status.sig_err.err_type, + (unsigned long long)se_cmd->bad_sector, + mr_status.sig_err.expected, + mr_status.sig_err.actual); + ret = 1; + } + +fail_mr_status: + return ret; +} + static void isert_completion_rdma_write(struct iser_tx_desc *tx_desc, struct isert_cmd *isert_cmd) @@ -1666,44 +1710,14 @@ isert_completion_rdma_write(struct iser_tx_desc *tx_desc, struct se_cmd *se_cmd = &cmd->se_cmd; struct isert_conn *isert_conn = isert_cmd->conn; struct isert_device *device = isert_conn->conn_device; - struct ib_mr_status mr_status; int ret = 0; if (wr->fr_desc && wr->fr_desc->ind & ISERT_PROTECTED) { - ret = ib_check_mr_status(wr->fr_desc->pi_ctx->sig_mr, - IB_MR_CHECK_SIG_STATUS, &mr_status); - if (ret) { - pr_err("ib_check_mr_status failed, ret %d\n", ret); - goto fail_mr_status; - } - if (mr_status.fail_status & IB_MR_CHECK_SIG_STATUS) { - u32 block_size = se_cmd->se_dev->dev_attrib.block_size + 8; - - switch (mr_status.sig_err.err_type) { - case IB_SIG_BAD_GUARD: - se_cmd->pi_err = TCM_LOGICAL_BLOCK_GUARD_CHECK_FAILED; - break; - case IB_SIG_BAD_REFTAG: - se_cmd->pi_err = TCM_LOGICAL_BLOCK_REF_TAG_CHECK_FAILED; - break; - case IB_SIG_BAD_APPTAG: - se_cmd->pi_err = TCM_LOGICAL_BLOCK_APP_TAG_CHECK_FAILED; - break; - } - se_cmd->bad_sector = mr_status.sig_err.sig_err_offset; - do_div(se_cmd->bad_sector, block_size); - - pr_err("isert: PI error found type %d at sector 0x%llx " - "expected 0x%x vs actual 0x%x\n", - mr_status.sig_err.err_type, - (unsigned long long)se_cmd->bad_sector, - mr_status.sig_err.expected, - mr_status.sig_err.actual); - ret = 1; - } + ret = isert_check_pi_status(se_cmd, + wr->fr_desc->pi_ctx->sig_mr); + wr->fr_desc->ind &= ~ISERT_PROTECTED; } -fail_mr_status: device->unreg_rdma_mem(isert_cmd, isert_conn); wr->send_wr_num = 0; if (ret) @@ -1722,43 +1736,14 @@ isert_completion_rdma_read(struct iser_tx_desc *tx_desc, struct se_cmd *se_cmd = &cmd->se_cmd; struct isert_conn *isert_conn = isert_cmd->conn; struct isert_device *device = isert_conn->conn_device; - struct ib_mr_status mr_status; int ret; if (wr->fr_desc && wr->fr_desc->ind & ISERT_PROTECTED) { - ret = ib_check_mr_status(wr->fr_desc->pi_ctx->sig_mr, - IB_MR_CHECK_SIG_STATUS, &mr_status); - if (ret) { - pr_err("ib_check_mr_status failed, ret %d\n", ret); - goto fail_mr_status; - } - if (mr_status.fail_status & IB_MR_CHECK_SIG_STATUS) { - u32 block_size = se_cmd->se_dev->dev_attrib.block_size + 8; - - switch (mr_status.sig_err.err_type) { - case IB_SIG_BAD_GUARD: - se_cmd->pi_err = TCM_LOGICAL_BLOCK_GUARD_CHECK_FAILED; - break; - case IB_SIG_BAD_REFTAG: - se_cmd->pi_err = TCM_LOGICAL_BLOCK_REF_TAG_CHECK_FAILED; - break; - case IB_SIG_BAD_APPTAG: - se_cmd->pi_err = TCM_LOGICAL_BLOCK_APP_TAG_CHECK_FAILED; - break; - } - se_cmd->bad_sector = mr_status.sig_err.sig_err_offset; - do_div(se_cmd->bad_sector, block_size); - - pr_err("isert: PI error found type %d at sector 0x%llx " - "expected 0x%x vs actual 0x%x\n", - mr_status.sig_err.err_type, - (unsigned long long)se_cmd->bad_sector, - mr_status.sig_err.expected, - mr_status.sig_err.actual); - } + ret = isert_check_pi_status(se_cmd, + wr->fr_desc->pi_ctx->sig_mr); + wr->fr_desc->ind &= ~ISERT_PROTECTED; } -fail_mr_status: iscsit_stop_dataout_timer(cmd); device->unreg_rdma_mem(isert_cmd, isert_conn); cmd->write_data_done = wr->data.len; -- cgit v1.2.3 From 5bac4b1a1fd84769df941eefb8bdbe010df6b876 Mon Sep 17 00:00:00 2001 From: Sagi Grimberg Date: Tue, 18 Mar 2014 14:58:27 +0200 Subject: Target/iser: Fail SCSI WRITE command if device detected integrity error If during data-transfer a data-integrity error was detected we must fail the command with CHECK_CONDITION and not execute the command. Signed-off-by: Sagi Grimberg Reported-by: Or Gerlitz Signed-off-by: Nicholas Bellinger --- drivers/infiniband/ulp/isert/ib_isert.c | 8 ++++++-- 1 file changed, 6 insertions(+), 2 deletions(-) (limited to 'drivers/infiniband/ulp') diff --git a/drivers/infiniband/ulp/isert/ib_isert.c b/drivers/infiniband/ulp/isert/ib_isert.c index e2d48a9d828a..09bde9fe6b8d 100644 --- a/drivers/infiniband/ulp/isert/ib_isert.c +++ b/drivers/infiniband/ulp/isert/ib_isert.c @@ -1736,7 +1736,7 @@ isert_completion_rdma_read(struct iser_tx_desc *tx_desc, struct se_cmd *se_cmd = &cmd->se_cmd; struct isert_conn *isert_conn = isert_cmd->conn; struct isert_device *device = isert_conn->conn_device; - int ret; + int ret = 0; if (wr->fr_desc && wr->fr_desc->ind & ISERT_PROTECTED) { ret = isert_check_pi_status(se_cmd, @@ -1755,7 +1755,11 @@ isert_completion_rdma_read(struct iser_tx_desc *tx_desc, cmd->i_state = ISTATE_RECEIVED_LAST_DATAOUT; spin_unlock_bh(&cmd->istate_lock); - target_execute_cmd(se_cmd); + if (ret) + transport_send_check_condition_and_sense(se_cmd, + se_cmd->pi_err, 0); + else + target_execute_cmd(se_cmd); } static void -- cgit v1.2.3 From f46d6a8a01d6bbd83a97140f30a72a89b038807b Mon Sep 17 00:00:00 2001 From: Nicholas Bellinger Date: Fri, 21 Mar 2014 18:10:25 -0700 Subject: iser-target: Match FRMR descriptors to available session tags This patch changes isert_conn_create_fastreg_pool() to follow logic in iscsi_target_locate_portal() for determining how many FRMR descriptors to allocate based upon the number of possible per-session command slots that are available. This addresses an OOPs in isert_reg_rdma() where due to the use of ISCSI_DEF_XMIT_CMDS_MAX could end up returning a bogus fast_reg_descriptor when the number of active tags exceeded the original hardcoded max. Note this also includes moving isert_conn_create_fastreg_pool() from isert_connect_request() to isert_put_login_tx() before posting the final Login Response PDU in order to determine the se_nacl->queue_depth (eg: number of tags) per session the target will be enforcing. v2 changes: - Move isert_conn->conn_fr_pool list_head init into isert_conn_request() v3 changes: - Drop unnecessary list_empty() check in isert_reg_rdma() (Sagi) Cc: Sagi Grimberg Cc: Or Gerlitz Cc: #3.12+ Signed-off-by: Nicholas Bellinger --- drivers/infiniband/ulp/isert/ib_isert.c | 38 ++++++++++++++++++++------------- 1 file changed, 23 insertions(+), 15 deletions(-) (limited to 'drivers/infiniband/ulp') diff --git a/drivers/infiniband/ulp/isert/ib_isert.c b/drivers/infiniband/ulp/isert/ib_isert.c index 09bde9fe6b8d..529d2cbfe45a 100644 --- a/drivers/infiniband/ulp/isert/ib_isert.c +++ b/drivers/infiniband/ulp/isert/ib_isert.c @@ -508,11 +508,18 @@ isert_conn_create_fastreg_pool(struct isert_conn *isert_conn, u8 pi_support) { struct fast_reg_descriptor *fr_desc; struct isert_device *device = isert_conn->conn_device; - int i, ret; + struct se_session *se_sess = isert_conn->conn->sess->se_sess; + struct se_node_acl *se_nacl = se_sess->se_node_acl; + int i, ret, tag_num; + /* + * Setup the number of FRMRs based upon the number of tags + * available to session in iscsi_target_locate_portal(). + */ + tag_num = max_t(u32, ISCSIT_MIN_TAGS, se_nacl->queue_depth); + tag_num = (tag_num * 2) + ISCSIT_EXTRA_TAGS; - INIT_LIST_HEAD(&isert_conn->conn_fr_pool); isert_conn->conn_fr_pool_size = 0; - for (i = 0; i < ISCSI_DEF_XMIT_CMDS_MAX; i++) { + for (i = 0; i < tag_num; i++) { fr_desc = kzalloc(sizeof(*fr_desc), GFP_KERNEL); if (!fr_desc) { pr_err("Failed to allocate fast_reg descriptor\n"); @@ -572,6 +579,7 @@ isert_connect_request(struct rdma_cm_id *cma_id, struct rdma_cm_event *event) kref_get(&isert_conn->conn_kref); mutex_init(&isert_conn->conn_mutex); spin_lock_init(&isert_conn->conn_lock); + INIT_LIST_HEAD(&isert_conn->conn_fr_pool); cma_id->context = isert_conn; isert_conn->conn_cm_id = cma_id; @@ -649,15 +657,6 @@ isert_connect_request(struct rdma_cm_id *cma_id, struct rdma_cm_event *event) goto out_mr; } - if (device->use_fastreg) { - ret = isert_conn_create_fastreg_pool(isert_conn, pi_support); - if (ret) { - pr_err("Conn: %p failed to create fastreg pool\n", - isert_conn); - goto out_fastreg; - } - } - ret = isert_conn_setup_qp(isert_conn, cma_id, pi_support); if (ret) goto out_conn_dev; @@ -671,9 +670,6 @@ isert_connect_request(struct rdma_cm_id *cma_id, struct rdma_cm_event *event) return 0; out_conn_dev: - if (device->use_fastreg) - isert_conn_free_fastreg_pool(isert_conn); -out_fastreg: ib_dereg_mr(isert_conn->conn_mr); out_mr: ib_dealloc_pd(isert_conn->conn_pd); @@ -1047,6 +1043,18 @@ isert_put_login_tx(struct iscsi_conn *conn, struct iscsi_login *login, } if (!login->login_failed) { if (login->login_complete) { + if (isert_conn->conn_device->use_fastreg) { + u8 pi_support = login->np->tpg_np->tpg->tpg_attrib.t10_pi; + + ret = isert_conn_create_fastreg_pool(isert_conn, + pi_support); + if (ret) { + pr_err("Conn: %p failed to create" + " fastreg pool\n", isert_conn); + return ret; + } + } + ret = isert_alloc_rx_descriptors(isert_conn); if (ret) return ret; -- cgit v1.2.3 From 131e6abc674edb9f9a59090bb35bf6650569b7e7 Mon Sep 17 00:00:00 2001 From: Nicholas Bellinger Date: Sat, 22 Mar 2014 14:55:56 -0700 Subject: target: Add TFO->abort_task for aborted task resources release Now that TASK_ABORTED status is not generated for all cases by TMR ABORT_TASK + LUN_RESET, a new TFO->abort_task() caller is necessary in order to give fabric drivers a chance to unmap hardware / software resources before the se_cmd descriptor is released via the normal TFO->release_cmd() codepath. This patch adds TFO->aborted_task() in core_tmr_abort_task() in place of the original transport_send_task_abort(), and also updates all fabric drivers to implement this caller. The fabric drivers that include changes to perform cleanup via ->aborted_task() are: - iscsi-target - iser-target - srpt - tcm_qla2xxx The fabric drivers that currently set ->aborted_task() to NOPs are: - loopback - tcm_fc - usb-gadget - sbp-target - vhost-scsi For the latter five, there appears to be no additional cleanup required before invoking TFO->release_cmd() to release the se_cmd descriptor. v2 changes: - Move ->aborted_task() call into transport_cmd_finish_abort (Alex) Cc: Alex Leung Cc: Mark Rustad Cc: Roland Dreier Cc: Vu Pham Cc: Chris Boot Cc: Sebastian Andrzej Siewior Cc: Michael S. Tsirkin Cc: Giridhar Malavali Cc: Saurav Kashyap Cc: Quinn Tran Cc: Sagi Grimberg Cc: Or Gerlitz Signed-off-by: Nicholas Bellinger --- drivers/infiniband/ulp/isert/ib_isert.c | 19 +++++++++++++++++++ drivers/infiniband/ulp/srpt/ib_srpt.c | 9 +++++++++ drivers/scsi/qla2xxx/tcm_qla2xxx.c | 16 ++++++++++++++++ drivers/target/iscsi/iscsi_target.c | 13 +++++++++++++ drivers/target/iscsi/iscsi_target_configfs.c | 8 ++++++++ drivers/target/iscsi/iscsi_target_util.c | 4 ++-- drivers/target/iscsi/iscsi_target_util.h | 1 + drivers/target/loopback/tcm_loop.c | 6 ++++++ drivers/target/sbp/sbp_target.c | 6 ++++++ drivers/target/target_core_configfs.c | 4 ++++ drivers/target/target_core_transport.c | 6 ++++++ drivers/target/tcm_fc/tcm_fc.h | 1 + drivers/target/tcm_fc/tfc_cmd.c | 5 +++++ drivers/target/tcm_fc/tfc_conf.c | 1 + drivers/usb/gadget/tcm_usb_gadget.c | 6 ++++++ drivers/vhost/scsi.c | 6 ++++++ include/target/iscsi/iscsi_transport.h | 1 + include/target/target_core_fabric.h | 1 + 18 files changed, 111 insertions(+), 2 deletions(-) (limited to 'drivers/infiniband/ulp') diff --git a/drivers/infiniband/ulp/isert/ib_isert.c b/drivers/infiniband/ulp/isert/ib_isert.c index 529d2cbfe45a..18ada7fb0fc9 100644 --- a/drivers/infiniband/ulp/isert/ib_isert.c +++ b/drivers/infiniband/ulp/isert/ib_isert.c @@ -2162,6 +2162,24 @@ isert_put_response(struct iscsi_conn *conn, struct iscsi_cmd *cmd) return isert_post_response(isert_conn, isert_cmd); } +static void +isert_aborted_task(struct iscsi_conn *conn, struct iscsi_cmd *cmd) +{ + struct isert_cmd *isert_cmd = iscsit_priv_cmd(cmd); + struct isert_conn *isert_conn = (struct isert_conn *)conn->context; + struct isert_device *device = isert_conn->conn_device; + + spin_lock_bh(&conn->cmd_lock); + if (!list_empty(&cmd->i_conn_node)) + list_del_init(&cmd->i_conn_node); + spin_unlock_bh(&conn->cmd_lock); + + if (cmd->data_direction == DMA_TO_DEVICE) + iscsit_stop_dataout_timer(cmd); + + device->unreg_rdma_mem(isert_cmd, isert_conn); +} + static int isert_put_nopin(struct iscsi_cmd *cmd, struct iscsi_conn *conn, bool nopout_response) @@ -3217,6 +3235,7 @@ static struct iscsit_transport iser_target_transport = { .iscsit_get_dataout = isert_get_dataout, .iscsit_queue_data_in = isert_put_datain, .iscsit_queue_status = isert_put_response, + .iscsit_aborted_task = isert_aborted_task, }; static int __init isert_init(void) diff --git a/drivers/infiniband/ulp/srpt/ib_srpt.c b/drivers/infiniband/ulp/srpt/ib_srpt.c index 0e537d8d0e47..f03aafdc3572 100644 --- a/drivers/infiniband/ulp/srpt/ib_srpt.c +++ b/drivers/infiniband/ulp/srpt/ib_srpt.c @@ -3081,6 +3081,14 @@ static void srpt_queue_tm_rsp(struct se_cmd *cmd) srpt_queue_response(cmd); } +static void srpt_aborted_task(struct se_cmd *cmd) +{ + struct srpt_send_ioctx *ioctx = container_of(cmd, + struct srpt_send_ioctx, cmd); + + srpt_unmap_sg_to_ib_sge(ioctx->ch, ioctx); +} + static int srpt_queue_status(struct se_cmd *cmd) { struct srpt_send_ioctx *ioctx; @@ -3928,6 +3936,7 @@ static struct target_core_fabric_ops srpt_template = { .queue_data_in = srpt_queue_data_in, .queue_status = srpt_queue_status, .queue_tm_rsp = srpt_queue_tm_rsp, + .aborted_task = srpt_aborted_task, /* * Setup function pointers for generic logic in * target_core_fabric_configfs.c diff --git a/drivers/scsi/qla2xxx/tcm_qla2xxx.c b/drivers/scsi/qla2xxx/tcm_qla2xxx.c index 788c4fe2b0c9..b23a0ffe140e 100644 --- a/drivers/scsi/qla2xxx/tcm_qla2xxx.c +++ b/drivers/scsi/qla2xxx/tcm_qla2xxx.c @@ -684,6 +684,20 @@ static void tcm_qla2xxx_queue_tm_rsp(struct se_cmd *se_cmd) qlt_xmit_tm_rsp(mcmd); } +static void tcm_qla2xxx_aborted_task(struct se_cmd *se_cmd) +{ + struct qla_tgt_cmd *cmd = container_of(se_cmd, + struct qla_tgt_cmd, se_cmd); + struct scsi_qla_host *vha = cmd->vha; + struct qla_hw_data *ha = vha->hw; + + if (!cmd->sg_mapped) + return; + + pci_unmap_sg(ha->pdev, cmd->sg, cmd->sg_cnt, cmd->dma_data_direction); + cmd->sg_mapped = 0; +} + /* Local pointer to allocated TCM configfs fabric module */ struct target_fabric_configfs *tcm_qla2xxx_fabric_configfs; struct target_fabric_configfs *tcm_qla2xxx_npiv_fabric_configfs; @@ -1877,6 +1891,7 @@ static struct target_core_fabric_ops tcm_qla2xxx_ops = { .queue_data_in = tcm_qla2xxx_queue_data_in, .queue_status = tcm_qla2xxx_queue_status, .queue_tm_rsp = tcm_qla2xxx_queue_tm_rsp, + .aborted_task = tcm_qla2xxx_aborted_task, /* * Setup function pointers for generic logic in * target_core_fabric_configfs.c @@ -1926,6 +1941,7 @@ static struct target_core_fabric_ops tcm_qla2xxx_npiv_ops = { .queue_data_in = tcm_qla2xxx_queue_data_in, .queue_status = tcm_qla2xxx_queue_status, .queue_tm_rsp = tcm_qla2xxx_queue_tm_rsp, + .aborted_task = tcm_qla2xxx_aborted_task, /* * Setup function pointers for generic logic in * target_core_fabric_configfs.c diff --git a/drivers/target/iscsi/iscsi_target.c b/drivers/target/iscsi/iscsi_target.c index df0456abc4fd..27f37c92dff3 100644 --- a/drivers/target/iscsi/iscsi_target.c +++ b/drivers/target/iscsi/iscsi_target.c @@ -499,6 +499,18 @@ static int iscsit_queue_rsp(struct iscsi_conn *conn, struct iscsi_cmd *cmd) return 0; } +static void iscsit_aborted_task(struct iscsi_conn *conn, struct iscsi_cmd *cmd) +{ + bool scsi_cmd = (cmd->iscsi_opcode == ISCSI_OP_SCSI_CMD); + + spin_lock_bh(&conn->cmd_lock); + if (!list_empty(&cmd->i_conn_node)) + list_del_init(&cmd->i_conn_node); + spin_unlock_bh(&conn->cmd_lock); + + __iscsit_free_cmd(cmd, scsi_cmd, true); +} + static struct iscsit_transport iscsi_target_transport = { .name = "iSCSI/TCP", .transport_type = ISCSI_TCP, @@ -513,6 +525,7 @@ static struct iscsit_transport iscsi_target_transport = { .iscsit_response_queue = iscsit_response_queue, .iscsit_queue_data_in = iscsit_queue_rsp, .iscsit_queue_status = iscsit_queue_rsp, + .iscsit_aborted_task = iscsit_aborted_task, }; static int __init iscsi_target_init_module(void) diff --git a/drivers/target/iscsi/iscsi_target_configfs.c b/drivers/target/iscsi/iscsi_target_configfs.c index fe35dcbacb14..ae03f3e5de1e 100644 --- a/drivers/target/iscsi/iscsi_target_configfs.c +++ b/drivers/target/iscsi/iscsi_target_configfs.c @@ -1821,6 +1821,13 @@ static void lio_queue_tm_rsp(struct se_cmd *se_cmd) iscsit_add_cmd_to_response_queue(cmd, cmd->conn, cmd->i_state); } +static void lio_aborted_task(struct se_cmd *se_cmd) +{ + struct iscsi_cmd *cmd = container_of(se_cmd, struct iscsi_cmd, se_cmd); + + cmd->conn->conn_transport->iscsit_aborted_task(cmd->conn, cmd); +} + static char *lio_tpg_get_endpoint_wwn(struct se_portal_group *se_tpg) { struct iscsi_portal_group *tpg = se_tpg->se_tpg_fabric_ptr; @@ -2005,6 +2012,7 @@ int iscsi_target_register_configfs(void) fabric->tf_ops.queue_data_in = &lio_queue_data_in; fabric->tf_ops.queue_status = &lio_queue_status; fabric->tf_ops.queue_tm_rsp = &lio_queue_tm_rsp; + fabric->tf_ops.aborted_task = &lio_aborted_task; /* * Setup function pointers for generic logic in target_core_fabric_configfs.c */ diff --git a/drivers/target/iscsi/iscsi_target_util.c b/drivers/target/iscsi/iscsi_target_util.c index e655b042ed18..53e157cb8c54 100644 --- a/drivers/target/iscsi/iscsi_target_util.c +++ b/drivers/target/iscsi/iscsi_target_util.c @@ -705,8 +705,8 @@ void iscsit_release_cmd(struct iscsi_cmd *cmd) } EXPORT_SYMBOL(iscsit_release_cmd); -static void __iscsit_free_cmd(struct iscsi_cmd *cmd, bool scsi_cmd, - bool check_queues) +void __iscsit_free_cmd(struct iscsi_cmd *cmd, bool scsi_cmd, + bool check_queues) { struct iscsi_conn *conn = cmd->conn; diff --git a/drivers/target/iscsi/iscsi_target_util.h b/drivers/target/iscsi/iscsi_target_util.h index 561a424d1980..a68508c4fec8 100644 --- a/drivers/target/iscsi/iscsi_target_util.h +++ b/drivers/target/iscsi/iscsi_target_util.h @@ -30,6 +30,7 @@ extern void iscsit_remove_cmd_from_tx_queues(struct iscsi_cmd *, struct iscsi_co extern bool iscsit_conn_all_queues_empty(struct iscsi_conn *); extern void iscsit_free_queue_reqs_for_conn(struct iscsi_conn *); extern void iscsit_release_cmd(struct iscsi_cmd *); +extern void __iscsit_free_cmd(struct iscsi_cmd *, bool, bool); extern void iscsit_free_cmd(struct iscsi_cmd *, bool); extern int iscsit_check_session_usage_count(struct iscsi_session *); extern void iscsit_dec_session_usage_count(struct iscsi_session *); diff --git a/drivers/target/loopback/tcm_loop.c b/drivers/target/loopback/tcm_loop.c index a49ef0a49fa9..bdc1ad82d293 100644 --- a/drivers/target/loopback/tcm_loop.c +++ b/drivers/target/loopback/tcm_loop.c @@ -919,6 +919,11 @@ static void tcm_loop_queue_tm_rsp(struct se_cmd *se_cmd) wake_up(&tl_tmr->tl_tmr_wait); } +static void tcm_loop_aborted_task(struct se_cmd *se_cmd) +{ + return; +} + static char *tcm_loop_dump_proto_id(struct tcm_loop_hba *tl_hba) { switch (tl_hba->tl_proto_id) { @@ -1487,6 +1492,7 @@ static int tcm_loop_register_configfs(void) fabric->tf_ops.queue_data_in = &tcm_loop_queue_data_in; fabric->tf_ops.queue_status = &tcm_loop_queue_status; fabric->tf_ops.queue_tm_rsp = &tcm_loop_queue_tm_rsp; + fabric->tf_ops.aborted_task = &tcm_loop_aborted_task; /* * Setup function pointers for generic logic in target_core_fabric_configfs.c diff --git a/drivers/target/sbp/sbp_target.c b/drivers/target/sbp/sbp_target.c index 24884cac19ce..ad04ea928e4f 100644 --- a/drivers/target/sbp/sbp_target.c +++ b/drivers/target/sbp/sbp_target.c @@ -1846,6 +1846,11 @@ static void sbp_queue_tm_rsp(struct se_cmd *se_cmd) { } +static void sbp_aborted_task(struct se_cmd *se_cmd) +{ + return; +} + static int sbp_check_stop_free(struct se_cmd *se_cmd) { struct sbp_target_request *req = container_of(se_cmd, @@ -2526,6 +2531,7 @@ static struct target_core_fabric_ops sbp_ops = { .queue_data_in = sbp_queue_data_in, .queue_status = sbp_queue_status, .queue_tm_rsp = sbp_queue_tm_rsp, + .aborted_task = sbp_aborted_task, .check_stop_free = sbp_check_stop_free, .fabric_make_wwn = sbp_make_tport, diff --git a/drivers/target/target_core_configfs.c b/drivers/target/target_core_configfs.c index f0e85b119692..60a9ae6df763 100644 --- a/drivers/target/target_core_configfs.c +++ b/drivers/target/target_core_configfs.c @@ -457,6 +457,10 @@ static int target_fabric_tf_ops_check( pr_err("Missing tfo->queue_tm_rsp()\n"); return -EINVAL; } + if (!tfo->aborted_task) { + pr_err("Missing tfo->aborted_task()\n"); + return -EINVAL; + } /* * We at least require tfo->fabric_make_wwn(), tfo->fabric_drop_wwn() * tfo->fabric_make_tpg() and tfo->fabric_drop_tpg() in diff --git a/drivers/target/target_core_transport.c b/drivers/target/target_core_transport.c index 51a375453d9b..9393544fb471 100644 --- a/drivers/target/target_core_transport.c +++ b/drivers/target/target_core_transport.c @@ -605,6 +605,12 @@ void transport_cmd_finish_abort(struct se_cmd *cmd, int remove) { if (cmd->se_cmd_flags & SCF_SE_LUN_CMD) transport_lun_remove_cmd(cmd); + /* + * Allow the fabric driver to unmap any resources before + * releasing the descriptor via TFO->release_cmd() + */ + if (remove) + cmd->se_tfo->aborted_task(cmd); if (transport_cmd_check_stop_to_fabric(cmd)) return; diff --git a/drivers/target/tcm_fc/tcm_fc.h b/drivers/target/tcm_fc/tcm_fc.h index 752863acecb8..4f4b97161228 100644 --- a/drivers/target/tcm_fc/tcm_fc.h +++ b/drivers/target/tcm_fc/tcm_fc.h @@ -163,6 +163,7 @@ int ft_write_pending_status(struct se_cmd *); u32 ft_get_task_tag(struct se_cmd *); int ft_get_cmd_state(struct se_cmd *); void ft_queue_tm_resp(struct se_cmd *); +void ft_aborted_task(struct se_cmd *); /* * other internal functions. diff --git a/drivers/target/tcm_fc/tfc_cmd.c b/drivers/target/tcm_fc/tfc_cmd.c index 8b2c1aaf81de..01cf37f212c3 100644 --- a/drivers/target/tcm_fc/tfc_cmd.c +++ b/drivers/target/tcm_fc/tfc_cmd.c @@ -426,6 +426,11 @@ void ft_queue_tm_resp(struct se_cmd *se_cmd) ft_send_resp_code(cmd, code); } +void ft_aborted_task(struct se_cmd *se_cmd) +{ + return; +} + static void ft_send_work(struct work_struct *work); /* diff --git a/drivers/target/tcm_fc/tfc_conf.c b/drivers/target/tcm_fc/tfc_conf.c index e879da81ad93..b8b5a719a784 100644 --- a/drivers/target/tcm_fc/tfc_conf.c +++ b/drivers/target/tcm_fc/tfc_conf.c @@ -536,6 +536,7 @@ static struct target_core_fabric_ops ft_fabric_ops = { .queue_data_in = ft_queue_data_in, .queue_status = ft_queue_status, .queue_tm_rsp = ft_queue_tm_resp, + .aborted_task = ft_aborted_task, /* * Setup function pointers for generic logic in * target_core_fabric_configfs.c diff --git a/drivers/usb/gadget/tcm_usb_gadget.c b/drivers/usb/gadget/tcm_usb_gadget.c index 0f8aad78b54f..f9afa4a4ec3c 100644 --- a/drivers/usb/gadget/tcm_usb_gadget.c +++ b/drivers/usb/gadget/tcm_usb_gadget.c @@ -1471,6 +1471,11 @@ static void usbg_queue_tm_rsp(struct se_cmd *se_cmd) { } +static void usbg_aborted_task(struct se_cmd *se_cmd) +{ + return; +} + static const char *usbg_check_wwn(const char *name) { const char *n; @@ -1897,6 +1902,7 @@ static struct target_core_fabric_ops usbg_ops = { .queue_data_in = usbg_send_read_response, .queue_status = usbg_send_status_response, .queue_tm_rsp = usbg_queue_tm_rsp, + .aborted_task = usbg_aborted_task, .check_stop_free = usbg_check_stop_free, .fabric_make_wwn = usbg_make_tport, diff --git a/drivers/vhost/scsi.c b/drivers/vhost/scsi.c index e48d4a672580..4a473355020f 100644 --- a/drivers/vhost/scsi.c +++ b/drivers/vhost/scsi.c @@ -539,6 +539,11 @@ static void tcm_vhost_queue_tm_rsp(struct se_cmd *se_cmd) return; } +static void tcm_vhost_aborted_task(struct se_cmd *se_cmd) +{ + return; +} + static void tcm_vhost_free_evt(struct vhost_scsi *vs, struct tcm_vhost_evt *evt) { vs->vs_events_nr--; @@ -2131,6 +2136,7 @@ static struct target_core_fabric_ops tcm_vhost_ops = { .queue_data_in = tcm_vhost_queue_data_in, .queue_status = tcm_vhost_queue_status, .queue_tm_rsp = tcm_vhost_queue_tm_rsp, + .aborted_task = tcm_vhost_aborted_task, /* * Setup callers for generic logic in target_core_fabric_configfs.c */ diff --git a/include/target/iscsi/iscsi_transport.h b/include/target/iscsi/iscsi_transport.h index 4483fadfa68d..8d19339292b8 100644 --- a/include/target/iscsi/iscsi_transport.h +++ b/include/target/iscsi/iscsi_transport.h @@ -21,6 +21,7 @@ struct iscsit_transport { int (*iscsit_get_dataout)(struct iscsi_conn *, struct iscsi_cmd *, bool); int (*iscsit_queue_data_in)(struct iscsi_conn *, struct iscsi_cmd *); int (*iscsit_queue_status)(struct iscsi_conn *, struct iscsi_cmd *); + void (*iscsit_aborted_task)(struct iscsi_conn *, struct iscsi_cmd *); }; static inline void *iscsit_priv_cmd(struct iscsi_cmd *cmd) diff --git a/include/target/target_core_fabric.h b/include/target/target_core_fabric.h index 0218d689b3d7..1d1043644b9b 100644 --- a/include/target/target_core_fabric.h +++ b/include/target/target_core_fabric.h @@ -62,6 +62,7 @@ struct target_core_fabric_ops { int (*queue_data_in)(struct se_cmd *); int (*queue_status)(struct se_cmd *); void (*queue_tm_rsp)(struct se_cmd *); + void (*aborted_task)(struct se_cmd *); /* * fabric module calls for target_core_fabric_configfs.c */ -- cgit v1.2.3 From 03e7848a64ed535a30f5d7fc6dede2d5a6a2534b Mon Sep 17 00:00:00 2001 From: Nicholas Bellinger Date: Sun, 30 Mar 2014 15:50:03 -0700 Subject: iser-target: Add missing se_cmd put for WRITE_PENDING in tx_comp_err This patch fixes a bug where outstanding RDMA_READs with WRITE_PENDING status require an extra target_put_sess_cmd() in isert_put_cmd() code when called from isert_cq_tx_comp_err() + isert_cq_drain_comp_llist() context during session shutdown. The extra kref PUT is required so that transport_generic_free_cmd() invokes the last target_put_sess_cmd() -> target_release_cmd_kref(), which will complete(&se_cmd->cmd_wait_comp) the outstanding se_cmd descriptor with WRITE_PENDING status, and awake the completion in target_wait_for_sess_cmds() to invoke TFO->release_cmd(). The bug was manifesting itself in target_wait_for_sess_cmds() where a se_cmd descriptor with WRITE_PENDING status would end up sleeping indefinately. Acked-by: Sagi Grimberg Cc: Or Gerlitz Cc: #3.10+ Signed-off-by: Nicholas Bellinger --- drivers/infiniband/ulp/isert/ib_isert.c | 37 +++++++++++++++++++++++---------- 1 file changed, 26 insertions(+), 11 deletions(-) (limited to 'drivers/infiniband/ulp') diff --git a/drivers/infiniband/ulp/isert/ib_isert.c b/drivers/infiniband/ulp/isert/ib_isert.c index 18ada7fb0fc9..f7801aeb7628 100644 --- a/drivers/infiniband/ulp/isert/ib_isert.c +++ b/drivers/infiniband/ulp/isert/ib_isert.c @@ -1580,7 +1580,7 @@ isert_unreg_rdma(struct isert_cmd *isert_cmd, struct isert_conn *isert_conn) } static void -isert_put_cmd(struct isert_cmd *isert_cmd) +isert_put_cmd(struct isert_cmd *isert_cmd, bool comp_err) { struct iscsi_cmd *cmd = isert_cmd->iscsi_cmd; struct isert_conn *isert_conn = isert_cmd->conn; @@ -1596,8 +1596,21 @@ isert_put_cmd(struct isert_cmd *isert_cmd) list_del_init(&cmd->i_conn_node); spin_unlock_bh(&conn->cmd_lock); - if (cmd->data_direction == DMA_TO_DEVICE) + if (cmd->data_direction == DMA_TO_DEVICE) { iscsit_stop_dataout_timer(cmd); + /* + * Check for special case during comp_err where + * WRITE_PENDING has been handed off from core, + * but requires an extra target_put_sess_cmd() + * before transport_generic_free_cmd() below. + */ + if (comp_err && + cmd->se_cmd.t_state == TRANSPORT_WRITE_PENDING) { + struct se_cmd *se_cmd = &cmd->se_cmd; + + target_put_sess_cmd(se_cmd->se_sess, se_cmd); + } + } device->unreg_rdma_mem(isert_cmd, isert_conn); transport_generic_free_cmd(&cmd->se_cmd, 0); @@ -1652,7 +1665,7 @@ isert_unmap_tx_desc(struct iser_tx_desc *tx_desc, struct ib_device *ib_dev) static void isert_completion_put(struct iser_tx_desc *tx_desc, struct isert_cmd *isert_cmd, - struct ib_device *ib_dev) + struct ib_device *ib_dev, bool comp_err) { if (isert_cmd->pdu_buf_dma != 0) { pr_debug("Calling ib_dma_unmap_single for isert_cmd->pdu_buf_dma\n"); @@ -1662,7 +1675,7 @@ isert_completion_put(struct iser_tx_desc *tx_desc, struct isert_cmd *isert_cmd, } isert_unmap_tx_desc(tx_desc, ib_dev); - isert_put_cmd(isert_cmd); + isert_put_cmd(isert_cmd, comp_err); } static int @@ -1787,14 +1800,14 @@ isert_do_control_comp(struct work_struct *work) iscsit_tmr_post_handler(cmd, cmd->conn); cmd->i_state = ISTATE_SENT_STATUS; - isert_completion_put(&isert_cmd->tx_desc, isert_cmd, ib_dev); + isert_completion_put(&isert_cmd->tx_desc, isert_cmd, ib_dev, false); break; case ISTATE_SEND_REJECT: pr_debug("Got isert_do_control_comp ISTATE_SEND_REJECT: >>>\n"); atomic_dec(&isert_conn->post_send_buf_count); cmd->i_state = ISTATE_SENT_STATUS; - isert_completion_put(&isert_cmd->tx_desc, isert_cmd, ib_dev); + isert_completion_put(&isert_cmd->tx_desc, isert_cmd, ib_dev, false); break; case ISTATE_SEND_LOGOUTRSP: pr_debug("Calling iscsit_logout_post_handler >>>>>>>>>>>>>>\n"); @@ -1808,7 +1821,7 @@ isert_do_control_comp(struct work_struct *work) case ISTATE_SEND_TEXTRSP: atomic_dec(&isert_conn->post_send_buf_count); cmd->i_state = ISTATE_SENT_STATUS; - isert_completion_put(&isert_cmd->tx_desc, isert_cmd, ib_dev); + isert_completion_put(&isert_cmd->tx_desc, isert_cmd, ib_dev, false); break; default: pr_err("Unknown do_control_comp i_state %d\n", cmd->i_state); @@ -1850,7 +1863,7 @@ isert_response_completion(struct iser_tx_desc *tx_desc, atomic_dec(&isert_conn->post_send_buf_count); cmd->i_state = ISTATE_SENT_STATUS; - isert_completion_put(tx_desc, isert_cmd, ib_dev); + isert_completion_put(tx_desc, isert_cmd, ib_dev, false); } static void @@ -1943,7 +1956,8 @@ isert_cq_drain_comp_llist(struct isert_conn *isert_conn, struct ib_device *ib_de &isert_conn->post_send_buf_count); else atomic_dec(&isert_conn->post_send_buf_count); - isert_completion_put(t, t->isert_cmd, ib_dev); + + isert_completion_put(t, t->isert_cmd, ib_dev, true); } } @@ -1973,14 +1987,15 @@ isert_cq_tx_comp_err(struct iser_tx_desc *tx_desc, struct isert_conn *isert_conn &isert_conn->post_send_buf_count); else atomic_dec(&isert_conn->post_send_buf_count); - isert_completion_put(t, t->isert_cmd, ib_dev); + + isert_completion_put(t, t->isert_cmd, ib_dev, true); } tx_desc->comp_llnode_batch = NULL; if (!isert_cmd) isert_unmap_tx_desc(tx_desc, ib_dev); else - isert_completion_put(tx_desc, isert_cmd, ib_dev); + isert_completion_put(tx_desc, isert_cmd, ib_dev, true); } static void -- cgit v1.2.3 From f225225848a70006d039b4caa2a089b660756cd5 Mon Sep 17 00:00:00 2001 From: Sagi Grimberg Date: Thu, 27 Mar 2014 19:22:25 +0200 Subject: Target/iser: Use Fastreg only if device supports signature Fastreg is mandatory for signature, so if the device doesn't support it we don't need to use it. Signed-off-by: Sagi Grimberg Signed-off-by: Nicholas Bellinger --- drivers/infiniband/ulp/isert/ib_isert.c | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) (limited to 'drivers/infiniband/ulp') diff --git a/drivers/infiniband/ulp/isert/ib_isert.c b/drivers/infiniband/ulp/isert/ib_isert.c index f7801aeb7628..f5cc4affaeb4 100644 --- a/drivers/infiniband/ulp/isert/ib_isert.c +++ b/drivers/infiniband/ulp/isert/ib_isert.c @@ -231,7 +231,8 @@ isert_create_device_ib_res(struct isert_device *device) return ret; /* asign function handlers */ - if (dev_attr->device_cap_flags & IB_DEVICE_MEM_MGT_EXTENSIONS) { + if (dev_attr->device_cap_flags & IB_DEVICE_MEM_MGT_EXTENSIONS && + dev_attr->device_cap_flags & IB_DEVICE_SIGNATURE_HANDOVER) { device->use_fastreg = 1; device->reg_rdma_mem = isert_reg_rdma; device->unreg_rdma_mem = isert_unreg_rdma; -- cgit v1.2.3 From e70beee783d6977d80eede88a3394f02eabddad1 Mon Sep 17 00:00:00 2001 From: Nicholas Bellinger Date: Wed, 2 Apr 2014 12:52:38 -0700 Subject: target: Pass in transport supported PI at session initialization In order to support local WRITE_INSERT + READ_STRIP operations for non PI enabled fabrics, the fabric driver needs to be able signal what protection offload operations are supported. This is done at session initialization time so the modes can be signaled by individual se_wwn + se_portal_group endpoints, as well as optionally across different transports on the same endpoint. For iser-target, set TARGET_PROT_ALL if the underlying ib_device has already signaled PI offload support, and allow this to be exposed via a new iscsit_transport->iscsit_get_sup_prot_ops() callback. For loopback, set TARGET_PROT_ALL to signal SCSI initiator mode operation. For all other drivers, set TARGET_PROT_NORMAL to disable fabric level PI. Cc: Martin K. Petersen Cc: Sagi Grimberg Cc: Or Gerlitz Cc: Quinn Tran Cc: Giridhar Malavali Signed-off-by: Nicholas Bellinger --- drivers/infiniband/ulp/isert/ib_isert.c | 13 +++++++++++++ drivers/infiniband/ulp/srpt/ib_srpt.c | 2 +- drivers/scsi/qla2xxx/tcm_qla2xxx.c | 2 +- drivers/target/iscsi/iscsi_target.c | 6 ++++++ drivers/target/iscsi/iscsi_target_login.c | 4 +++- drivers/target/loopback/tcm_loop.c | 2 +- drivers/target/sbp/sbp_target.c | 2 +- drivers/target/target_core_transport.c | 8 +++++--- drivers/target/tcm_fc/tfc_sess.c | 3 ++- drivers/usb/gadget/tcm_usb_gadget.c | 2 +- drivers/vhost/scsi.c | 3 ++- include/target/iscsi/iscsi_transport.h | 1 + include/target/target_core_base.h | 19 ++++++++++++------- include/target/target_core_fabric.h | 5 +++-- 14 files changed, 52 insertions(+), 20 deletions(-) (limited to 'drivers/infiniband/ulp') diff --git a/drivers/infiniband/ulp/isert/ib_isert.c b/drivers/infiniband/ulp/isert/ib_isert.c index f5cc4affaeb4..c98fdb185931 100644 --- a/drivers/infiniband/ulp/isert/ib_isert.c +++ b/drivers/infiniband/ulp/isert/ib_isert.c @@ -2196,6 +2196,18 @@ isert_aborted_task(struct iscsi_conn *conn, struct iscsi_cmd *cmd) device->unreg_rdma_mem(isert_cmd, isert_conn); } +static enum target_prot_op +isert_get_sup_prot_ops(struct iscsi_conn *conn) +{ + struct isert_conn *isert_conn = (struct isert_conn *)conn->context; + struct isert_device *device = isert_conn->conn_device; + + if (device->pi_capable) + return TARGET_PROT_ALL; + + return TARGET_PROT_NORMAL; +} + static int isert_put_nopin(struct iscsi_cmd *cmd, struct iscsi_conn *conn, bool nopout_response) @@ -3252,6 +3264,7 @@ static struct iscsit_transport iser_target_transport = { .iscsit_queue_data_in = isert_put_datain, .iscsit_queue_status = isert_put_response, .iscsit_aborted_task = isert_aborted_task, + .iscsit_get_sup_prot_ops = isert_get_sup_prot_ops, }; static int __init isert_init(void) diff --git a/drivers/infiniband/ulp/srpt/ib_srpt.c b/drivers/infiniband/ulp/srpt/ib_srpt.c index f03aafdc3572..bcfb398a6639 100644 --- a/drivers/infiniband/ulp/srpt/ib_srpt.c +++ b/drivers/infiniband/ulp/srpt/ib_srpt.c @@ -2580,7 +2580,7 @@ static int srpt_cm_req_recv(struct ib_cm_id *cm_id, goto destroy_ib; } - ch->sess = transport_init_session(); + ch->sess = transport_init_session(TARGET_PROT_NORMAL); if (IS_ERR(ch->sess)) { rej->reason = __constant_cpu_to_be32( SRP_LOGIN_REJ_INSUFFICIENT_RESOURCES); diff --git a/drivers/scsi/qla2xxx/tcm_qla2xxx.c b/drivers/scsi/qla2xxx/tcm_qla2xxx.c index b23a0ffe140e..68fb66fdb757 100644 --- a/drivers/scsi/qla2xxx/tcm_qla2xxx.c +++ b/drivers/scsi/qla2xxx/tcm_qla2xxx.c @@ -1482,7 +1482,7 @@ static int tcm_qla2xxx_check_initiator_node_acl( } se_tpg = &tpg->se_tpg; - se_sess = transport_init_session(); + se_sess = transport_init_session(TARGET_PROT_NORMAL); if (IS_ERR(se_sess)) { pr_err("Unable to initialize struct se_session\n"); return PTR_ERR(se_sess); diff --git a/drivers/target/iscsi/iscsi_target.c b/drivers/target/iscsi/iscsi_target.c index 96aee439c9fd..78cab13bbb1b 100644 --- a/drivers/target/iscsi/iscsi_target.c +++ b/drivers/target/iscsi/iscsi_target.c @@ -511,6 +511,11 @@ static void iscsit_aborted_task(struct iscsi_conn *conn, struct iscsi_cmd *cmd) __iscsit_free_cmd(cmd, scsi_cmd, true); } +static enum target_prot_op iscsit_get_sup_prot_ops(struct iscsi_conn *conn) +{ + return TARGET_PROT_NORMAL; +} + static struct iscsit_transport iscsi_target_transport = { .name = "iSCSI/TCP", .transport_type = ISCSI_TCP, @@ -526,6 +531,7 @@ static struct iscsit_transport iscsi_target_transport = { .iscsit_queue_data_in = iscsit_queue_rsp, .iscsit_queue_status = iscsit_queue_rsp, .iscsit_aborted_task = iscsit_aborted_task, + .iscsit_get_sup_prot_ops = iscsit_get_sup_prot_ops, }; static int __init iscsi_target_init_module(void) diff --git a/drivers/target/iscsi/iscsi_target_login.c b/drivers/target/iscsi/iscsi_target_login.c index e29279e6b577..8739b98f6f93 100644 --- a/drivers/target/iscsi/iscsi_target_login.c +++ b/drivers/target/iscsi/iscsi_target_login.c @@ -259,6 +259,7 @@ static int iscsi_login_zero_tsih_s1( { struct iscsi_session *sess = NULL; struct iscsi_login_req *pdu = (struct iscsi_login_req *)buf; + enum target_prot_op sup_pro_ops; int ret; sess = kzalloc(sizeof(struct iscsi_session), GFP_KERNEL); @@ -320,8 +321,9 @@ static int iscsi_login_zero_tsih_s1( kfree(sess); return -ENOMEM; } + sup_pro_ops = conn->conn_transport->iscsit_get_sup_prot_ops(conn); - sess->se_sess = transport_init_session(); + sess->se_sess = transport_init_session(sup_pro_ops); if (IS_ERR(sess->se_sess)) { iscsit_tx_login_rsp(conn, ISCSI_STATUS_CLS_TARGET_ERR, ISCSI_LOGIN_STATUS_NO_RESOURCES); diff --git a/drivers/target/loopback/tcm_loop.c b/drivers/target/loopback/tcm_loop.c index bdc1ad82d293..c886ad1c39fb 100644 --- a/drivers/target/loopback/tcm_loop.c +++ b/drivers/target/loopback/tcm_loop.c @@ -1018,7 +1018,7 @@ static int tcm_loop_make_nexus( /* * Initialize the struct se_session pointer */ - tl_nexus->se_sess = transport_init_session(); + tl_nexus->se_sess = transport_init_session(TARGET_PROT_ALL); if (IS_ERR(tl_nexus->se_sess)) { ret = PTR_ERR(tl_nexus->se_sess); goto out; diff --git a/drivers/target/sbp/sbp_target.c b/drivers/target/sbp/sbp_target.c index ad04ea928e4f..e7e93727553c 100644 --- a/drivers/target/sbp/sbp_target.c +++ b/drivers/target/sbp/sbp_target.c @@ -210,7 +210,7 @@ static struct sbp_session *sbp_session_create( return ERR_PTR(-ENOMEM); } - sess->se_sess = transport_init_session(); + sess->se_sess = transport_init_session(TARGET_PROT_NORMAL); if (IS_ERR(sess->se_sess)) { pr_err("failed to init se_session\n"); diff --git a/drivers/target/target_core_transport.c b/drivers/target/target_core_transport.c index 9393544fb471..9c820ba5ae82 100644 --- a/drivers/target/target_core_transport.c +++ b/drivers/target/target_core_transport.c @@ -235,7 +235,7 @@ void transport_subsystem_check_init(void) sub_api_initialized = 1; } -struct se_session *transport_init_session(void) +struct se_session *transport_init_session(enum target_prot_op sup_prot_ops) { struct se_session *se_sess; @@ -251,6 +251,7 @@ struct se_session *transport_init_session(void) INIT_LIST_HEAD(&se_sess->sess_wait_list); spin_lock_init(&se_sess->sess_cmd_lock); kref_init(&se_sess->sess_kref); + se_sess->sup_prot_ops = sup_prot_ops; return se_sess; } @@ -288,12 +289,13 @@ int transport_alloc_session_tags(struct se_session *se_sess, EXPORT_SYMBOL(transport_alloc_session_tags); struct se_session *transport_init_session_tags(unsigned int tag_num, - unsigned int tag_size) + unsigned int tag_size, + enum target_prot_op sup_prot_ops) { struct se_session *se_sess; int rc; - se_sess = transport_init_session(); + se_sess = transport_init_session(sup_prot_ops); if (IS_ERR(se_sess)) return se_sess; diff --git a/drivers/target/tcm_fc/tfc_sess.c b/drivers/target/tcm_fc/tfc_sess.c index ae52c08dad09..04751422178c 100644 --- a/drivers/target/tcm_fc/tfc_sess.c +++ b/drivers/target/tcm_fc/tfc_sess.c @@ -211,7 +211,8 @@ static struct ft_sess *ft_sess_create(struct ft_tport *tport, u32 port_id, return NULL; sess->se_sess = transport_init_session_tags(TCM_FC_DEFAULT_TAGS, - sizeof(struct ft_cmd)); + sizeof(struct ft_cmd), + TARGET_PROT_NORMAL); if (IS_ERR(sess->se_sess)) { kfree(sess); return NULL; diff --git a/drivers/usb/gadget/tcm_usb_gadget.c b/drivers/usb/gadget/tcm_usb_gadget.c index f9afa4a4ec3c..f34b6df3572b 100644 --- a/drivers/usb/gadget/tcm_usb_gadget.c +++ b/drivers/usb/gadget/tcm_usb_gadget.c @@ -1731,7 +1731,7 @@ static int tcm_usbg_make_nexus(struct usbg_tpg *tpg, char *name) pr_err("Unable to allocate struct tcm_vhost_nexus\n"); goto err_unlock; } - tv_nexus->tvn_se_sess = transport_init_session(); + tv_nexus->tvn_se_sess = transport_init_session(TARGET_PROT_NORMAL); if (IS_ERR(tv_nexus->tvn_se_sess)) goto err_free; diff --git a/drivers/vhost/scsi.c b/drivers/vhost/scsi.c index 4a473355020f..cf50ce93975b 100644 --- a/drivers/vhost/scsi.c +++ b/drivers/vhost/scsi.c @@ -1745,7 +1745,8 @@ static int tcm_vhost_make_nexus(struct tcm_vhost_tpg *tpg, */ tv_nexus->tvn_se_sess = transport_init_session_tags( TCM_VHOST_DEFAULT_TAGS, - sizeof(struct tcm_vhost_cmd)); + sizeof(struct tcm_vhost_cmd), + TARGET_PROT_NORMAL); if (IS_ERR(tv_nexus->tvn_se_sess)) { mutex_unlock(&tpg->tv_tpg_mutex); kfree(tv_nexus); diff --git a/include/target/iscsi/iscsi_transport.h b/include/target/iscsi/iscsi_transport.h index 8d19339292b8..33b487b5da92 100644 --- a/include/target/iscsi/iscsi_transport.h +++ b/include/target/iscsi/iscsi_transport.h @@ -22,6 +22,7 @@ struct iscsit_transport { int (*iscsit_queue_data_in)(struct iscsi_conn *, struct iscsi_cmd *); int (*iscsit_queue_status)(struct iscsi_conn *, struct iscsi_cmd *); void (*iscsit_aborted_task)(struct iscsi_conn *, struct iscsi_cmd *); + enum target_prot_op (*iscsit_get_sup_prot_ops)(struct iscsi_conn *); }; static inline void *iscsit_priv_cmd(struct iscsi_cmd *cmd) diff --git a/include/target/target_core_base.h b/include/target/target_core_base.h index ec3e3a3ff4f6..9ec9864ecf38 100644 --- a/include/target/target_core_base.h +++ b/include/target/target_core_base.h @@ -442,15 +442,19 @@ struct se_tmr_req { }; enum target_prot_op { - TARGET_PROT_NORMAL = 0, - TARGET_PROT_DIN_INSERT, - TARGET_PROT_DOUT_INSERT, - TARGET_PROT_DIN_STRIP, - TARGET_PROT_DOUT_STRIP, - TARGET_PROT_DIN_PASS, - TARGET_PROT_DOUT_PASS, + TARGET_PROT_NORMAL = 0, + TARGET_PROT_DIN_INSERT = (1 << 0), + TARGET_PROT_DOUT_INSERT = (1 << 1), + TARGET_PROT_DIN_STRIP = (1 << 2), + TARGET_PROT_DOUT_STRIP = (1 << 3), + TARGET_PROT_DIN_PASS = (1 << 4), + TARGET_PROT_DOUT_PASS = (1 << 5), }; +#define TARGET_PROT_ALL TARGET_PROT_DIN_INSERT | TARGET_PROT_DOUT_INSERT | \ + TARGET_PROT_DIN_STRIP | TARGET_PROT_DOUT_STRIP | \ + TARGET_PROT_DIN_PASS | TARGET_PROT_DOUT_PASS + enum target_prot_type { TARGET_DIF_TYPE0_PROT, TARGET_DIF_TYPE1_PROT, @@ -605,6 +609,7 @@ struct se_node_acl { struct se_session { unsigned sess_tearing_down:1; u64 sess_bin_isid; + enum target_prot_op sup_prot_ops; struct se_node_acl *se_node_acl; struct se_portal_group *se_tpg; void *fabric_sess_ptr; diff --git a/include/target/target_core_fabric.h b/include/target/target_core_fabric.h index 1d1043644b9b..22a4e98eec80 100644 --- a/include/target/target_core_fabric.h +++ b/include/target/target_core_fabric.h @@ -84,10 +84,11 @@ struct target_core_fabric_ops { void (*fabric_drop_nodeacl)(struct se_node_acl *); }; -struct se_session *transport_init_session(void); +struct se_session *transport_init_session(enum target_prot_op); int transport_alloc_session_tags(struct se_session *, unsigned int, unsigned int); -struct se_session *transport_init_session_tags(unsigned int, unsigned int); +struct se_session *transport_init_session_tags(unsigned int, unsigned int, + enum target_prot_op); void __transport_register_session(struct se_portal_group *, struct se_node_acl *, struct se_session *, void *); void transport_register_session(struct se_portal_group *, -- cgit v1.2.3 From b076808051f2c80d38e03fb2f1294f525c7a446d Mon Sep 17 00:00:00 2001 From: Mike Marciniszyn Date: Mon, 7 Apr 2014 13:58:35 -0400 Subject: ib_srpt: Use correct ib_sg_dma primitives The code was incorrectly using sg_dma_address() and sg_dma_len() instead of ib_sg_dma_address() and ib_sg_dma_len(). This prevents srpt from functioning with the Intel HCA and indeed will corrupt memory badly. Cc: Bart Van Assche Reviewed-by: Dennis Dalessandro Tested-by: Vinod Kumar Signed-off-by: Mike Marciniszyn Cc: stable@vger.kernel.org # 3.3+ Signed-off-by: Nicholas Bellinger --- drivers/infiniband/ulp/srpt/ib_srpt.c | 16 ++++++++++------ 1 file changed, 10 insertions(+), 6 deletions(-) (limited to 'drivers/infiniband/ulp') diff --git a/drivers/infiniband/ulp/srpt/ib_srpt.c b/drivers/infiniband/ulp/srpt/ib_srpt.c index bcfb398a6639..fe09f2788b15 100644 --- a/drivers/infiniband/ulp/srpt/ib_srpt.c +++ b/drivers/infiniband/ulp/srpt/ib_srpt.c @@ -1078,6 +1078,7 @@ static void srpt_unmap_sg_to_ib_sge(struct srpt_rdma_ch *ch, static int srpt_map_sg_to_ib_sge(struct srpt_rdma_ch *ch, struct srpt_send_ioctx *ioctx) { + struct ib_device *dev = ch->sport->sdev->device; struct se_cmd *cmd; struct scatterlist *sg, *sg_orig; int sg_cnt; @@ -1124,7 +1125,7 @@ static int srpt_map_sg_to_ib_sge(struct srpt_rdma_ch *ch, db = ioctx->rbufs; tsize = cmd->data_length; - dma_len = sg_dma_len(&sg[0]); + dma_len = ib_sg_dma_len(dev, &sg[0]); riu = ioctx->rdma_ius; /* @@ -1155,7 +1156,8 @@ static int srpt_map_sg_to_ib_sge(struct srpt_rdma_ch *ch, ++j; if (j < count) { sg = sg_next(sg); - dma_len = sg_dma_len(sg); + dma_len = ib_sg_dma_len( + dev, sg); } } } else { @@ -1192,8 +1194,8 @@ static int srpt_map_sg_to_ib_sge(struct srpt_rdma_ch *ch, tsize = cmd->data_length; riu = ioctx->rdma_ius; sg = sg_orig; - dma_len = sg_dma_len(&sg[0]); - dma_addr = sg_dma_address(&sg[0]); + dma_len = ib_sg_dma_len(dev, &sg[0]); + dma_addr = ib_sg_dma_address(dev, &sg[0]); /* this second loop is really mapped sg_addres to rdma_iu->ib_sge */ for (i = 0, j = 0; @@ -1216,8 +1218,10 @@ static int srpt_map_sg_to_ib_sge(struct srpt_rdma_ch *ch, ++j; if (j < count) { sg = sg_next(sg); - dma_len = sg_dma_len(sg); - dma_addr = sg_dma_address(sg); + dma_len = ib_sg_dma_len( + dev, sg); + dma_addr = ib_sg_dma_address( + dev, sg); } } } else { -- cgit v1.2.3