summaryrefslogtreecommitdiffstats
path: root/drivers/nvme
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/nvme')
-rw-r--r--drivers/nvme/host/rdma.c13
-rw-r--r--drivers/nvme/target/rdma.c13
2 files changed, 12 insertions, 14 deletions
diff --git a/drivers/nvme/host/rdma.c b/drivers/nvme/host/rdma.c
index 0805fa6215ee..dc042017c293 100644
--- a/drivers/nvme/host/rdma.c
+++ b/drivers/nvme/host/rdma.c
@@ -378,7 +378,7 @@ nvme_rdma_find_get_device(struct rdma_cm_id *cm_id)
}
ndev->num_inline_segments = min(NVME_RDMA_MAX_INLINE_SEGMENTS,
- ndev->dev->attrs.max_sge - 1);
+ ndev->dev->attrs.max_send_sge - 1);
list_add(&ndev->entry, &device_list);
out_unlock:
mutex_unlock(&device_list_mutex);
@@ -1093,7 +1093,6 @@ static void nvme_rdma_inv_rkey_done(struct ib_cq *cq, struct ib_wc *wc)
static int nvme_rdma_inv_rkey(struct nvme_rdma_queue *queue,
struct nvme_rdma_request *req)
{
- struct ib_send_wr *bad_wr;
struct ib_send_wr wr = {
.opcode = IB_WR_LOCAL_INV,
.next = NULL,
@@ -1105,7 +1104,7 @@ static int nvme_rdma_inv_rkey(struct nvme_rdma_queue *queue,
req->reg_cqe.done = nvme_rdma_inv_rkey_done;
wr.wr_cqe = &req->reg_cqe;
- return ib_post_send(queue->qp, &wr, &bad_wr);
+ return ib_post_send(queue->qp, &wr, NULL);
}
static void nvme_rdma_unmap_data(struct nvme_rdma_queue *queue,
@@ -1308,7 +1307,7 @@ static int nvme_rdma_post_send(struct nvme_rdma_queue *queue,
struct nvme_rdma_qe *qe, struct ib_sge *sge, u32 num_sge,
struct ib_send_wr *first)
{
- struct ib_send_wr wr, *bad_wr;
+ struct ib_send_wr wr;
int ret;
sge->addr = qe->dma;
@@ -1327,7 +1326,7 @@ static int nvme_rdma_post_send(struct nvme_rdma_queue *queue,
else
first = ≀
- ret = ib_post_send(queue->qp, first, &bad_wr);
+ ret = ib_post_send(queue->qp, first, NULL);
if (unlikely(ret)) {
dev_err(queue->ctrl->ctrl.device,
"%s failed with error code %d\n", __func__, ret);
@@ -1338,7 +1337,7 @@ static int nvme_rdma_post_send(struct nvme_rdma_queue *queue,
static int nvme_rdma_post_recv(struct nvme_rdma_queue *queue,
struct nvme_rdma_qe *qe)
{
- struct ib_recv_wr wr, *bad_wr;
+ struct ib_recv_wr wr;
struct ib_sge list;
int ret;
@@ -1353,7 +1352,7 @@ static int nvme_rdma_post_recv(struct nvme_rdma_queue *queue,
wr.sg_list = &list;
wr.num_sge = 1;
- ret = ib_post_recv(queue->qp, &wr, &bad_wr);
+ ret = ib_post_recv(queue->qp, &wr, NULL);
if (unlikely(ret)) {
dev_err(queue->ctrl->ctrl.device,
"%s failed with error code %d\n", __func__, ret);
diff --git a/drivers/nvme/target/rdma.c b/drivers/nvme/target/rdma.c
index e7f43d1e1779..3533e918ea37 100644
--- a/drivers/nvme/target/rdma.c
+++ b/drivers/nvme/target/rdma.c
@@ -435,7 +435,6 @@ static void nvmet_rdma_free_rsps(struct nvmet_rdma_queue *queue)
static int nvmet_rdma_post_recv(struct nvmet_rdma_device *ndev,
struct nvmet_rdma_cmd *cmd)
{
- struct ib_recv_wr *bad_wr;
int ret;
ib_dma_sync_single_for_device(ndev->device,
@@ -443,9 +442,9 @@ static int nvmet_rdma_post_recv(struct nvmet_rdma_device *ndev,
DMA_FROM_DEVICE);
if (ndev->srq)
- ret = ib_post_srq_recv(ndev->srq, &cmd->wr, &bad_wr);
+ ret = ib_post_srq_recv(ndev->srq, &cmd->wr, NULL);
else
- ret = ib_post_recv(cmd->queue->cm_id->qp, &cmd->wr, &bad_wr);
+ ret = ib_post_recv(cmd->queue->cm_id->qp, &cmd->wr, NULL);
if (unlikely(ret))
pr_err("post_recv cmd failed\n");
@@ -532,7 +531,7 @@ static void nvmet_rdma_queue_response(struct nvmet_req *req)
struct nvmet_rdma_rsp *rsp =
container_of(req, struct nvmet_rdma_rsp, req);
struct rdma_cm_id *cm_id = rsp->queue->cm_id;
- struct ib_send_wr *first_wr, *bad_wr;
+ struct ib_send_wr *first_wr;
if (rsp->flags & NVMET_RDMA_REQ_INVALIDATE_RKEY) {
rsp->send_wr.opcode = IB_WR_SEND_WITH_INV;
@@ -553,7 +552,7 @@ static void nvmet_rdma_queue_response(struct nvmet_req *req)
rsp->send_sge.addr, rsp->send_sge.length,
DMA_TO_DEVICE);
- if (unlikely(ib_post_send(cm_id->qp, first_wr, &bad_wr))) {
+ if (unlikely(ib_post_send(cm_id->qp, first_wr, NULL))) {
pr_err("sending cmd response failed\n");
nvmet_rdma_release_rsp(rsp);
}
@@ -892,7 +891,7 @@ nvmet_rdma_find_get_device(struct rdma_cm_id *cm_id)
inline_page_count = num_pages(port->inline_data_size);
inline_sge_count = max(cm_id->device->attrs.max_sge_rd,
- cm_id->device->attrs.max_sge) - 1;
+ cm_id->device->attrs.max_recv_sge) - 1;
if (inline_page_count > inline_sge_count) {
pr_warn("inline_data_size %d cannot be supported by device %s. Reducing to %lu.\n",
port->inline_data_size, cm_id->device->name,
@@ -969,7 +968,7 @@ static int nvmet_rdma_create_queue_ib(struct nvmet_rdma_queue *queue)
qp_attr.cap.max_send_wr = queue->send_queue_size + 1;
qp_attr.cap.max_rdma_ctxs = queue->send_queue_size;
qp_attr.cap.max_send_sge = max(ndev->device->attrs.max_sge_rd,
- ndev->device->attrs.max_sge);
+ ndev->device->attrs.max_send_sge);
if (ndev->srq) {
qp_attr.srq = ndev->srq;