diff options
author | Bob Pearson <rpearsonhpe@gmail.com> | 2022-02-23 17:07:04 -0600 |
---|---|---|
committer | Jason Gunthorpe <jgg@nvidia.com> | 2022-02-23 20:29:15 -0400 |
commit | 4a4f1073475796bcb343998bb1eddf6844b77963 (patch) | |
tree | c41415687900fcba93a17494f69124408fab3d59 /drivers/infiniband/sw | |
parent | 6a8a2e473b986191f4113c485905ea8462724d58 (diff) | |
download | linux-4a4f1073475796bcb343998bb1eddf6844b77963.tar.bz2 |
RDMA/rxe: Collect mca init code in a subroutine
Collect initialization code for struct rxe_mca into a subroutine,
__rxe_init_mca(), to cleanup rxe_attach_mcg() in rxe_mcast.c. Check
limit on total number of attached qp's.
Link: https://lore.kernel.org/r/20220223230706.50332-3-rpearsonhpe@gmail.com
Signed-off-by: Bob Pearson <rpearsonhpe@gmail.com>
Signed-off-by: Jason Gunthorpe <jgg@nvidia.com>
Diffstat (limited to 'drivers/infiniband/sw')
-rw-r--r-- | drivers/infiniband/sw/rxe/rxe_mcast.c | 58 | ||||
-rw-r--r-- | drivers/infiniband/sw/rxe/rxe_verbs.h | 1 |
2 files changed, 44 insertions, 15 deletions
diff --git a/drivers/infiniband/sw/rxe/rxe_mcast.c b/drivers/infiniband/sw/rxe/rxe_mcast.c index 4935fe5c5868..a0a7f8720f95 100644 --- a/drivers/infiniband/sw/rxe/rxe_mcast.c +++ b/drivers/infiniband/sw/rxe/rxe_mcast.c @@ -259,6 +259,46 @@ static void rxe_destroy_mcg(struct rxe_mcg *mcg) spin_unlock_irqrestore(&mcg->rxe->mcg_lock, flags); } +/** + * __rxe_init_mca - initialize a new mca holding lock + * @qp: qp object + * @mcg: mcg object + * @mca: empty space for new mca + * + * Context: caller must hold references on qp and mcg, rxe->mcg_lock + * and pass memory for new mca + * + * Returns: 0 on success else an error + */ +static int __rxe_init_mca(struct rxe_qp *qp, struct rxe_mcg *mcg, + struct rxe_mca *mca) +{ + struct rxe_dev *rxe = to_rdev(qp->ibqp.device); + int n; + + n = atomic_inc_return(&rxe->mcg_attach); + if (n > rxe->attr.max_total_mcast_qp_attach) { + atomic_dec(&rxe->mcg_attach); + return -ENOMEM; + } + + n = atomic_inc_return(&mcg->qp_num); + if (n > rxe->attr.max_mcast_qp_attach) { + atomic_dec(&mcg->qp_num); + atomic_dec(&rxe->mcg_attach); + return -ENOMEM; + } + + atomic_inc(&qp->mcg_num); + + rxe_add_ref(qp); + mca->qp = qp; + + list_add_tail(&mca->qp_list, &mcg->qp_list); + + return 0; +} + static int rxe_attach_mcg(struct rxe_dev *rxe, struct rxe_qp *qp, struct rxe_mcg *mcg) { @@ -291,22 +331,9 @@ static int rxe_attach_mcg(struct rxe_dev *rxe, struct rxe_qp *qp, } } - /* check limits after checking if already attached */ - if (atomic_inc_return(&mcg->qp_num) > rxe->attr.max_mcast_qp_attach) { - atomic_dec(&mcg->qp_num); + err = __rxe_init_mca(qp, mcg, mca); + if (err) kfree(mca); - err = -ENOMEM; - goto out; - } - - /* protect pointer to qp in mca */ - rxe_add_ref(qp); - mca->qp = qp; - - atomic_inc(&qp->mcg_num); - list_add(&mca->qp_list, &mcg->qp_list); - - err = 0; out: spin_unlock_irqrestore(&rxe->mcg_lock, flags); return err; @@ -329,6 +356,7 @@ static int rxe_detach_mcg(struct rxe_dev *rxe, struct rxe_qp *qp, if (mca->qp == qp) { list_del(&mca->qp_list); atomic_dec(&qp->mcg_num); + atomic_dec(&rxe->mcg_attach); rxe_drop_ref(qp); /* if the number of qp's attached to the diff --git a/drivers/infiniband/sw/rxe/rxe_verbs.h b/drivers/infiniband/sw/rxe/rxe_verbs.h index 20fe3ee6589d..6b15251ff67a 100644 --- a/drivers/infiniband/sw/rxe/rxe_verbs.h +++ b/drivers/infiniband/sw/rxe/rxe_verbs.h @@ -401,6 +401,7 @@ struct rxe_dev { spinlock_t mcg_lock; struct rb_root mcg_tree; atomic_t mcg_num; + atomic_t mcg_attach; spinlock_t pending_lock; /* guard pending_mmaps */ struct list_head pending_mmaps; |