summaryrefslogtreecommitdiffstats
path: root/drivers/infiniband/sw/rxe
diff options
context:
space:
mode:
authorBob Pearson <rpearsonhpe@gmail.com>2021-01-25 15:16:36 -0600
committerJason Gunthorpe <jgg@nvidia.com>2021-01-28 15:29:55 -0400
commitc4369575b2bc2993edf8223a8f5c9f510ee629d0 (patch)
treec8a6a103fc2f47d9fd9bde345e52920173ed881f /drivers/infiniband/sw/rxe
parentdef4cd43f522253645b72c97181399c241b54536 (diff)
downloadlinux-c4369575b2bc2993edf8223a8f5c9f510ee629d0.tar.bz2
RDMA/rxe: Fix bug in rxe_alloc()
A recent patch which added an 'unlocked' version of rxe_alloc introduced a bug causing kzalloc(..., GFP_KERNEL) to be called while holding a spin lock. This patch corrects that error. rxe_alloc_nl() should always be called while holding the pool->pool_lock so the 2nd argument to kzalloc there should be GFP_ATOMIC. rxe_alloc() prior to the change only locked the code around checking that pool->state is RXE_POOL_STATE_VALID to avoid races between working threads and a thread shutting down the rxe driver. This patch reverts rxe_alloc() to this behavior so the lock is not held when kzalloc() is called. Link: https://lore.kernel.org/r/20210125211641.2694-2-rpearson@hpe.com Reported-by: syzbot+ec2fd72374785d0e558e@syzkaller.appspotmail.com Fixes: 3853c35e243d ("RDMA/rxe: Add unlocked versions of pool APIs") Signed-off-by: Bob Pearson <rpearson@hpe.com> Signed-off-by: Jason Gunthorpe <jgg@nvidia.com>
Diffstat (limited to 'drivers/infiniband/sw/rxe')
-rw-r--r--drivers/infiniband/sw/rxe/rxe_pool.c41
1 files changed, 35 insertions, 6 deletions
diff --git a/drivers/infiniband/sw/rxe/rxe_pool.c b/drivers/infiniband/sw/rxe/rxe_pool.c
index d26730eec720..cfcd55175572 100644
--- a/drivers/infiniband/sw/rxe/rxe_pool.c
+++ b/drivers/infiniband/sw/rxe/rxe_pool.c
@@ -343,8 +343,6 @@ void *rxe_alloc_nl(struct rxe_pool *pool)
struct rxe_pool_entry *elem;
u8 *obj;
- might_sleep_if(!(pool->flags & RXE_POOL_ATOMIC));
-
if (pool->state != RXE_POOL_STATE_VALID)
return NULL;
@@ -356,8 +354,7 @@ void *rxe_alloc_nl(struct rxe_pool *pool)
if (atomic_inc_return(&pool->num_elem) > pool->max_elem)
goto out_cnt;
- obj = kzalloc(info->size, (pool->flags & RXE_POOL_ATOMIC) ?
- GFP_ATOMIC : GFP_KERNEL);
+ obj = kzalloc(info->size, GFP_ATOMIC);
if (!obj)
goto out_cnt;
@@ -378,14 +375,46 @@ out_put_pool:
void *rxe_alloc(struct rxe_pool *pool)
{
- u8 *obj;
unsigned long flags;
+ struct rxe_type_info *info = &rxe_type_info[pool->type];
+ struct rxe_pool_entry *elem;
+ u8 *obj;
+
+ might_sleep_if(!(pool->flags & RXE_POOL_ATOMIC));
read_lock_irqsave(&pool->pool_lock, flags);
- obj = rxe_alloc_nl(pool);
+ if (pool->state != RXE_POOL_STATE_VALID) {
+ read_unlock_irqrestore(&pool->pool_lock, flags);
+ return NULL;
+ }
+
+ kref_get(&pool->ref_cnt);
read_unlock_irqrestore(&pool->pool_lock, flags);
+ if (!ib_device_try_get(&pool->rxe->ib_dev))
+ goto out_put_pool;
+
+ if (atomic_inc_return(&pool->num_elem) > pool->max_elem)
+ goto out_cnt;
+
+ obj = kzalloc(info->size, (pool->flags & RXE_POOL_ATOMIC) ?
+ GFP_ATOMIC : GFP_KERNEL);
+ if (!obj)
+ goto out_cnt;
+
+ elem = (struct rxe_pool_entry *)(obj + info->elem_offset);
+
+ elem->pool = pool;
+ kref_init(&elem->ref_cnt);
+
return obj;
+
+out_cnt:
+ atomic_dec(&pool->num_elem);
+ ib_device_put(&pool->rxe->ib_dev);
+out_put_pool:
+ rxe_pool_put(pool);
+ return NULL;
}
int __rxe_add_to_pool(struct rxe_pool *pool, struct rxe_pool_entry *elem)