summaryrefslogtreecommitdiffstats
path: root/drivers/infiniband/hw/cxgb4/mem.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/infiniband/hw/cxgb4/mem.c')
-rw-r--r--drivers/infiniband/hw/cxgb4/mem.c38
1 files changed, 20 insertions, 18 deletions
diff --git a/drivers/infiniband/hw/cxgb4/mem.c b/drivers/infiniband/hw/cxgb4/mem.c
index b2523b213c86..7e0eb201cc26 100644
--- a/drivers/infiniband/hw/cxgb4/mem.c
+++ b/drivers/infiniband/hw/cxgb4/mem.c
@@ -100,11 +100,10 @@ static int _c4iw_write_mem_dma_aligned(struct c4iw_rdev *rdev, u32 addr,
sgl->len0 = cpu_to_be32(len);
sgl->addr0 = cpu_to_be64(data);
- ret = c4iw_ofld_send(rdev, skb);
- if (ret)
- return ret;
if (wr_waitp)
- ret = c4iw_wait_for_reply(rdev, wr_waitp, 0, 0, __func__);
+ ret = c4iw_ref_send_wait(rdev, skb, wr_waitp, 0, 0, __func__);
+ else
+ ret = c4iw_ofld_send(rdev, skb);
return ret;
}
@@ -173,14 +172,17 @@ static int _c4iw_write_mem_inline(struct c4iw_rdev *rdev, u32 addr, u32 len,
if (copy_len % T4_ULPTX_MIN_IO)
memset(to_dp + copy_len, 0, T4_ULPTX_MIN_IO -
(copy_len % T4_ULPTX_MIN_IO));
- ret = c4iw_ofld_send(rdev, skb);
- skb = NULL;
+ if (i == (num_wqe-1))
+ ret = c4iw_ref_send_wait(rdev, skb, wr_waitp, 0, 0,
+ __func__);
+ else
+ ret = c4iw_ofld_send(rdev, skb);
if (ret)
- return ret;
+ break;
+ skb = NULL;
len -= C4IW_MAX_INLINE_SIZE;
}
- ret = c4iw_wait_for_reply(rdev, wr_waitp, 0, 0, __func__);
return ret;
}
@@ -447,7 +449,7 @@ struct ib_mr *c4iw_get_dma_mr(struct ib_pd *pd, int acc)
mhp = kzalloc(sizeof(*mhp), GFP_KERNEL);
if (!mhp)
return ERR_PTR(-ENOMEM);
- mhp->wr_waitp = kzalloc(sizeof(*mhp->wr_waitp), GFP_KERNEL);
+ mhp->wr_waitp = c4iw_alloc_wr_wait(GFP_KERNEL);
if (!mhp->wr_waitp) {
ret = -ENOMEM;
goto err_free_mhp;
@@ -485,7 +487,7 @@ err_dereg_mem:
dereg_mem(&rhp->rdev, mhp->attr.stag, mhp->attr.pbl_size,
mhp->attr.pbl_addr, mhp->dereg_skb, mhp->wr_waitp);
err_free_wr_wait:
- kfree(mhp->wr_waitp);
+ c4iw_put_wr_wait(mhp->wr_waitp);
err_free_skb:
kfree_skb(mhp->dereg_skb);
err_free_mhp:
@@ -522,7 +524,7 @@ struct ib_mr *c4iw_reg_user_mr(struct ib_pd *pd, u64 start, u64 length,
mhp = kzalloc(sizeof(*mhp), GFP_KERNEL);
if (!mhp)
return ERR_PTR(-ENOMEM);
- mhp->wr_waitp = kzalloc(sizeof(*mhp->wr_waitp), GFP_KERNEL);
+ mhp->wr_waitp = c4iw_alloc_wr_wait(GFP_KERNEL);
if (!mhp->wr_waitp)
goto err_free_mhp;
@@ -600,7 +602,7 @@ err_umem_release:
err_free_skb:
kfree_skb(mhp->dereg_skb);
err_free_wr_wait:
- kfree(mhp->wr_waitp);
+ c4iw_put_wr_wait(mhp->wr_waitp);
err_free_mhp:
kfree(mhp);
return ERR_PTR(err);
@@ -625,7 +627,7 @@ struct ib_mw *c4iw_alloc_mw(struct ib_pd *pd, enum ib_mw_type type,
if (!mhp)
return ERR_PTR(-ENOMEM);
- mhp->wr_waitp = kzalloc(sizeof(*mhp->wr_waitp), GFP_KERNEL);
+ mhp->wr_waitp = c4iw_alloc_wr_wait(GFP_KERNEL);
if (!mhp->wr_waitp) {
ret = -ENOMEM;
goto free_mhp;
@@ -659,7 +661,7 @@ dealloc_win:
free_skb:
kfree_skb(mhp->dereg_skb);
free_wr_wait:
- kfree(mhp->wr_waitp);
+ c4iw_put_wr_wait(mhp->wr_waitp);
free_mhp:
kfree(mhp);
return ERR_PTR(ret);
@@ -678,7 +680,7 @@ int c4iw_dealloc_mw(struct ib_mw *mw)
deallocate_window(&rhp->rdev, mhp->attr.stag, mhp->dereg_skb,
mhp->wr_waitp);
kfree_skb(mhp->dereg_skb);
- kfree(mhp->wr_waitp);
+ c4iw_put_wr_wait(mhp->wr_waitp);
kfree(mhp);
pr_debug("ib_mw %p mmid 0x%x ptr %p\n", mw, mmid, mhp);
return 0;
@@ -710,7 +712,7 @@ struct ib_mr *c4iw_alloc_mr(struct ib_pd *pd,
goto err;
}
- mhp->wr_waitp = kzalloc(sizeof(*mhp->wr_waitp), GFP_KERNEL);
+ mhp->wr_waitp = c4iw_alloc_wr_wait(GFP_KERNEL);
if (!mhp->wr_waitp) {
ret = -ENOMEM;
goto err_free_mhp;
@@ -758,7 +760,7 @@ err_free_dma:
dma_free_coherent(&mhp->rhp->rdev.lldi.pdev->dev,
mhp->max_mpl_len, mhp->mpl, mhp->mpl_addr);
err_free_wr_wait:
- kfree(mhp->wr_waitp);
+ c4iw_put_wr_wait(mhp->wr_waitp);
err_free_mhp:
kfree(mhp);
err:
@@ -812,7 +814,7 @@ int c4iw_dereg_mr(struct ib_mr *ib_mr)
if (mhp->umem)
ib_umem_release(mhp->umem);
pr_debug("mmid 0x%x ptr %p\n", mmid, mhp);
- kfree(mhp->wr_waitp);
+ c4iw_put_wr_wait(mhp->wr_waitp);
kfree(mhp);
return 0;
}