summaryrefslogtreecommitdiffstats
path: root/drivers
diff options
context:
space:
mode:
authorBob Pearson <rpearsonhpe@gmail.com>2022-02-08 15:16:35 -0600
committerJason Gunthorpe <jgg@nvidia.com>2022-02-16 11:55:28 -0400
commit9fd0eb7c3c73c80a7bbe28dc71ae8ec5698a7e84 (patch)
tree4bab37d4ca217043c9e6893fc88fb3dca7863653 /drivers
parenta099b08599e6ae6b8e9faccee83760dab622c11e (diff)
downloadlinux-9fd0eb7c3c73c80a7bbe28dc71ae8ec5698a7e84.tar.bz2
RDMA/rxe: Move mcg_lock to rxe
Replace mcg->mcg_lock and mc_grp_pool->pool_lock by rxe->mcg_lock. This is the first step of several intended to decouple the mc_grp and mc_elem objects from the rxe pool code. Link: https://lore.kernel.org/r/20220208211644.123457-2-rpearsonhpe@gmail.com Signed-off-by: Bob Pearson <rpearsonhpe@gmail.com> Signed-off-by: Jason Gunthorpe <jgg@nvidia.com>
Diffstat (limited to 'drivers')
-rw-r--r--drivers/infiniband/sw/rxe/rxe.c2
-rw-r--r--drivers/infiniband/sw/rxe/rxe_mcast.c19
-rw-r--r--drivers/infiniband/sw/rxe/rxe_recv.c4
-rw-r--r--drivers/infiniband/sw/rxe/rxe_verbs.h3
4 files changed, 15 insertions, 13 deletions
diff --git a/drivers/infiniband/sw/rxe/rxe.c b/drivers/infiniband/sw/rxe/rxe.c
index fab291245366..e74c4216b314 100644
--- a/drivers/infiniband/sw/rxe/rxe.c
+++ b/drivers/infiniband/sw/rxe/rxe.c
@@ -211,6 +211,8 @@ static int rxe_init(struct rxe_dev *rxe)
spin_lock_init(&rxe->pending_lock);
INIT_LIST_HEAD(&rxe->pending_mmaps);
+ spin_lock_init(&rxe->mcg_lock);
+
mutex_init(&rxe->usdev_lock);
return 0;
diff --git a/drivers/infiniband/sw/rxe/rxe_mcast.c b/drivers/infiniband/sw/rxe/rxe_mcast.c
index 2878a56d9994..fae04497cf2b 100644
--- a/drivers/infiniband/sw/rxe/rxe_mcast.c
+++ b/drivers/infiniband/sw/rxe/rxe_mcast.c
@@ -25,7 +25,7 @@ static int rxe_mcast_delete(struct rxe_dev *rxe, union ib_gid *mgid)
return dev_mc_del(rxe->ndev, ll_addr);
}
-/* caller should hold mc_grp_pool->pool_lock */
+/* caller should hold rxe->mcg_lock */
static struct rxe_mcg *create_grp(struct rxe_dev *rxe,
struct rxe_pool *pool,
union ib_gid *mgid)
@@ -38,7 +38,6 @@ static struct rxe_mcg *create_grp(struct rxe_dev *rxe,
return ERR_PTR(-ENOMEM);
INIT_LIST_HEAD(&grp->qp_list);
- spin_lock_init(&grp->mcg_lock);
grp->rxe = rxe;
rxe_add_key_locked(grp, mgid);
@@ -63,7 +62,7 @@ static int rxe_mcast_get_grp(struct rxe_dev *rxe, union ib_gid *mgid,
if (rxe->attr.max_mcast_qp_attach == 0)
return -EINVAL;
- write_lock_irqsave(&pool->pool_lock, flags);
+ spin_lock_irqsave(&rxe->mcg_lock, flags);
grp = rxe_pool_get_key_locked(pool, mgid);
if (grp)
@@ -71,13 +70,13 @@ static int rxe_mcast_get_grp(struct rxe_dev *rxe, union ib_gid *mgid,
grp = create_grp(rxe, pool, mgid);
if (IS_ERR(grp)) {
- write_unlock_irqrestore(&pool->pool_lock, flags);
+ spin_unlock_irqrestore(&rxe->mcg_lock, flags);
err = PTR_ERR(grp);
return err;
}
done:
- write_unlock_irqrestore(&pool->pool_lock, flags);
+ spin_unlock_irqrestore(&rxe->mcg_lock, flags);
*grp_p = grp;
return 0;
}
@@ -90,7 +89,7 @@ static int rxe_mcast_add_grp_elem(struct rxe_dev *rxe, struct rxe_qp *qp,
unsigned long flags;
/* check to see of the qp is already a member of the group */
- spin_lock_irqsave(&grp->mcg_lock, flags);
+ spin_lock_irqsave(&rxe->mcg_lock, flags);
list_for_each_entry(elem, &grp->qp_list, qp_list) {
if (elem->qp == qp) {
err = 0;
@@ -120,7 +119,7 @@ static int rxe_mcast_add_grp_elem(struct rxe_dev *rxe, struct rxe_qp *qp,
err = 0;
out:
- spin_unlock_irqrestore(&grp->mcg_lock, flags);
+ spin_unlock_irqrestore(&rxe->mcg_lock, flags);
return err;
}
@@ -135,7 +134,7 @@ static int rxe_mcast_drop_grp_elem(struct rxe_dev *rxe, struct rxe_qp *qp,
if (!grp)
goto err1;
- spin_lock_irqsave(&grp->mcg_lock, flags);
+ spin_lock_irqsave(&rxe->mcg_lock, flags);
list_for_each_entry_safe(elem, tmp, &grp->qp_list, qp_list) {
if (elem->qp == qp) {
@@ -143,7 +142,7 @@ static int rxe_mcast_drop_grp_elem(struct rxe_dev *rxe, struct rxe_qp *qp,
grp->num_qp--;
atomic_dec(&qp->mcg_num);
- spin_unlock_irqrestore(&grp->mcg_lock, flags);
+ spin_unlock_irqrestore(&rxe->mcg_lock, flags);
rxe_drop_ref(elem);
rxe_drop_ref(grp); /* ref held by QP */
rxe_drop_ref(grp); /* ref from get_key */
@@ -151,7 +150,7 @@ static int rxe_mcast_drop_grp_elem(struct rxe_dev *rxe, struct rxe_qp *qp,
}
}
- spin_unlock_irqrestore(&grp->mcg_lock, flags);
+ spin_unlock_irqrestore(&rxe->mcg_lock, flags);
rxe_drop_ref(grp); /* ref from get_key */
err1:
return -EINVAL;
diff --git a/drivers/infiniband/sw/rxe/rxe_recv.c b/drivers/infiniband/sw/rxe/rxe_recv.c
index 7ff6b53555f4..a084b5d69937 100644
--- a/drivers/infiniband/sw/rxe/rxe_recv.c
+++ b/drivers/infiniband/sw/rxe/rxe_recv.c
@@ -250,7 +250,7 @@ static void rxe_rcv_mcast_pkt(struct rxe_dev *rxe, struct sk_buff *skb)
if (!mcg)
goto drop; /* mcast group not registered */
- spin_lock_bh(&mcg->mcg_lock);
+ spin_lock_bh(&rxe->mcg_lock);
/* this is unreliable datagram service so we let
* failures to deliver a multicast packet to a
@@ -298,7 +298,7 @@ static void rxe_rcv_mcast_pkt(struct rxe_dev *rxe, struct sk_buff *skb)
}
}
- spin_unlock_bh(&mcg->mcg_lock);
+ spin_unlock_bh(&rxe->mcg_lock);
rxe_drop_ref(mcg); /* drop ref from rxe_pool_get_key. */
diff --git a/drivers/infiniband/sw/rxe/rxe_verbs.h b/drivers/infiniband/sw/rxe/rxe_verbs.h
index 55f8ed2bc621..9940c69cbb63 100644
--- a/drivers/infiniband/sw/rxe/rxe_verbs.h
+++ b/drivers/infiniband/sw/rxe/rxe_verbs.h
@@ -353,7 +353,6 @@ struct rxe_mw {
struct rxe_mcg {
struct rxe_pool_elem elem;
- spinlock_t mcg_lock; /* guard group */
struct rxe_dev *rxe;
struct list_head qp_list;
union ib_gid mgid;
@@ -399,6 +398,8 @@ struct rxe_dev {
struct rxe_pool mc_grp_pool;
struct rxe_pool mc_elem_pool;
+ spinlock_t mcg_lock;
+
spinlock_t pending_lock; /* guard pending_mmaps */
struct list_head pending_mmaps;