summaryrefslogtreecommitdiffstats
path: root/drivers/infiniband/sw/rxe/rxe_verbs.c
diff options
context:
space:
mode:
authorBob Pearson <rpearsonhpe@gmail.com>2022-03-03 18:08:04 -0600
committerJason Gunthorpe <jgg@nvidia.com>2022-03-16 10:34:42 -0300
commit3225717f6dfa29a6f03629b7a7f8492e1521d06d (patch)
tree488095b35bc616b55823b18b4283ab38914cc702 /drivers/infiniband/sw/rxe/rxe_verbs.c
parentdf34dc9e03bfb9181f6f5405f4dd319340b5b46c (diff)
downloadlinux-3225717f6dfa29a6f03629b7a7f8492e1521d06d.tar.bz2
RDMA/rxe: Replace red-black trees by xarrays
Currently the rxe driver uses red-black trees to add indices to the rxe object pools. Linux xarrays provide a better way to implement the same functionality for indices. This patch replaces red-black trees by xarrays for pool objects. Since xarrays already have a spinlock use that in place of the pool rwlock. Make sure that all changes in the xarray(index) and kref(ref counnt) occur atomically. Link: https://lore.kernel.org/r/20220304000808.225811-9-rpearsonhpe@gmail.com Signed-off-by: Bob Pearson <rpearsonhpe@gmail.com> Signed-off-by: Jason Gunthorpe <jgg@nvidia.com>
Diffstat (limited to 'drivers/infiniband/sw/rxe/rxe_verbs.c')
-rw-r--r--drivers/infiniband/sw/rxe/rxe_verbs.c12
1 files changed, 0 insertions, 12 deletions
diff --git a/drivers/infiniband/sw/rxe/rxe_verbs.c b/drivers/infiniband/sw/rxe/rxe_verbs.c
index 80df9a8f71a1..f0c5715ac500 100644
--- a/drivers/infiniband/sw/rxe/rxe_verbs.c
+++ b/drivers/infiniband/sw/rxe/rxe_verbs.c
@@ -181,7 +181,6 @@ static int rxe_create_ah(struct ib_ah *ibah,
return err;
/* create index > 0 */
- rxe_add_index(ah);
ah->ah_num = ah->elem.index;
if (uresp) {
@@ -189,7 +188,6 @@ static int rxe_create_ah(struct ib_ah *ibah,
err = copy_to_user(&uresp->ah_num, &ah->ah_num,
sizeof(uresp->ah_num));
if (err) {
- rxe_drop_index(ah);
rxe_drop_ref(ah);
return -EFAULT;
}
@@ -230,7 +228,6 @@ static int rxe_destroy_ah(struct ib_ah *ibah, u32 flags)
{
struct rxe_ah *ah = to_rah(ibah);
- rxe_drop_index(ah);
rxe_drop_ref(ah);
return 0;
}
@@ -438,7 +435,6 @@ static int rxe_create_qp(struct ib_qp *ibqp, struct ib_qp_init_attr *init,
if (err)
return err;
- rxe_add_index(qp);
err = rxe_qp_from_init(rxe, qp, pd, init, uresp, ibqp->pd, udata);
if (err)
goto qp_init;
@@ -446,7 +442,6 @@ static int rxe_create_qp(struct ib_qp *ibqp, struct ib_qp_init_attr *init,
return 0;
qp_init:
- rxe_drop_index(qp);
rxe_drop_ref(qp);
return err;
}
@@ -501,7 +496,6 @@ static int rxe_destroy_qp(struct ib_qp *ibqp, struct ib_udata *udata)
return ret;
rxe_qp_destroy(qp);
- rxe_drop_index(qp);
rxe_drop_ref(qp);
return 0;
}
@@ -908,7 +902,6 @@ static struct ib_mr *rxe_get_dma_mr(struct ib_pd *ibpd, int access)
if (!mr)
return ERR_PTR(-ENOMEM);
- rxe_add_index(mr);
rxe_add_ref(pd);
rxe_mr_init_dma(pd, access, mr);
@@ -932,7 +925,6 @@ static struct ib_mr *rxe_reg_user_mr(struct ib_pd *ibpd,
goto err2;
}
- rxe_add_index(mr);
rxe_add_ref(pd);
@@ -944,7 +936,6 @@ static struct ib_mr *rxe_reg_user_mr(struct ib_pd *ibpd,
err3:
rxe_drop_ref(pd);
- rxe_drop_index(mr);
rxe_drop_ref(mr);
err2:
return ERR_PTR(err);
@@ -967,8 +958,6 @@ static struct ib_mr *rxe_alloc_mr(struct ib_pd *ibpd, enum ib_mr_type mr_type,
goto err1;
}
- rxe_add_index(mr);
-
rxe_add_ref(pd);
err = rxe_mr_init_fast(pd, max_num_sg, mr);
@@ -979,7 +968,6 @@ static struct ib_mr *rxe_alloc_mr(struct ib_pd *ibpd, enum ib_mr_type mr_type,
err2:
rxe_drop_ref(pd);
- rxe_drop_index(mr);
rxe_drop_ref(mr);
err1:
return ERR_PTR(err);