summaryrefslogtreecommitdiffstats
path: root/drivers/infiniband/sw
diff options
context:
space:
mode:
authorBob Pearson <rpearsonhpe@gmail.com>2022-03-03 18:08:00 -0600
committerJason Gunthorpe <jgg@nvidia.com>2022-03-15 20:49:57 -0300
commit3c3e4d582bdc461081abea9de54eb4112a9a6283 (patch)
treeb4550ab7fc860d14cdbca49e20f7e559dc8d680c /drivers/infiniband/sw
parentc9f4c695835c9c2085065a3adc1b57d2005b508b (diff)
downloadlinux-3c3e4d582bdc461081abea9de54eb4112a9a6283.tar.bz2
RDMA/rxe: Delete _locked() APIs for pool objects
Since caller managed locks for indexed objects are no longer used these APIs are deleted. Link: https://lore.kernel.org/r/20220304000808.225811-5-rpearsonhpe@gmail.com Signed-off-by: Bob Pearson <rpearsonhpe@gmail.com> Signed-off-by: Jason Gunthorpe <jgg@nvidia.com>
Diffstat (limited to 'drivers/infiniband/sw')
-rw-r--r--drivers/infiniband/sw/rxe/rxe_pool.c67
-rw-r--r--drivers/infiniband/sw/rxe/rxe_pool.h24
2 files changed, 12 insertions, 79 deletions
diff --git a/drivers/infiniband/sw/rxe/rxe_pool.c b/drivers/infiniband/sw/rxe/rxe_pool.c
index 239c24544ff2..2e3543dde000 100644
--- a/drivers/infiniband/sw/rxe/rxe_pool.c
+++ b/drivers/infiniband/sw/rxe/rxe_pool.c
@@ -189,17 +189,6 @@ static int rxe_insert_index(struct rxe_pool *pool, struct rxe_pool_elem *new)
return 0;
}
-int __rxe_add_index_locked(struct rxe_pool_elem *elem)
-{
- struct rxe_pool *pool = elem->pool;
- int err;
-
- elem->index = alloc_index(pool);
- err = rxe_insert_index(pool, elem);
-
- return err;
-}
-
int __rxe_add_index(struct rxe_pool_elem *elem)
{
struct rxe_pool *pool = elem->pool;
@@ -207,55 +196,24 @@ int __rxe_add_index(struct rxe_pool_elem *elem)
int err;
write_lock_irqsave(&pool->pool_lock, flags);
- err = __rxe_add_index_locked(elem);
+ elem->index = alloc_index(pool);
+ err = rxe_insert_index(pool, elem);
write_unlock_irqrestore(&pool->pool_lock, flags);
return err;
}
-void __rxe_drop_index_locked(struct rxe_pool_elem *elem)
-{
- struct rxe_pool *pool = elem->pool;
-
- clear_bit(elem->index - pool->index.min_index, pool->index.table);
- rb_erase(&elem->index_node, &pool->index.tree);
-}
-
void __rxe_drop_index(struct rxe_pool_elem *elem)
{
struct rxe_pool *pool = elem->pool;
unsigned long flags;
write_lock_irqsave(&pool->pool_lock, flags);
- __rxe_drop_index_locked(elem);
+ clear_bit(elem->index - pool->index.min_index, pool->index.table);
+ rb_erase(&elem->index_node, &pool->index.tree);
write_unlock_irqrestore(&pool->pool_lock, flags);
}
-void *rxe_alloc_locked(struct rxe_pool *pool)
-{
- struct rxe_pool_elem *elem;
- void *obj;
-
- if (atomic_inc_return(&pool->num_elem) > pool->max_elem)
- goto out_cnt;
-
- obj = kzalloc(pool->elem_size, GFP_ATOMIC);
- if (!obj)
- goto out_cnt;
-
- elem = (struct rxe_pool_elem *)((u8 *)obj + pool->elem_offset);
-
- elem->pool = pool;
- elem->obj = obj;
- kref_init(&elem->ref_cnt);
-
- return obj;
-
-out_cnt:
- atomic_dec(&pool->num_elem);
- return NULL;
-}
-
void *rxe_alloc(struct rxe_pool *pool)
{
struct rxe_pool_elem *elem;
@@ -321,12 +279,14 @@ void rxe_elem_release(struct kref *kref)
atomic_dec(&pool->num_elem);
}
-void *rxe_pool_get_index_locked(struct rxe_pool *pool, u32 index)
+void *rxe_pool_get_index(struct rxe_pool *pool, u32 index)
{
- struct rb_node *node;
struct rxe_pool_elem *elem;
+ struct rb_node *node;
+ unsigned long flags;
void *obj;
+ read_lock_irqsave(&pool->pool_lock, flags);
node = pool->index.tree.rb_node;
while (node) {
@@ -346,17 +306,6 @@ void *rxe_pool_get_index_locked(struct rxe_pool *pool, u32 index)
} else {
obj = NULL;
}
-
- return obj;
-}
-
-void *rxe_pool_get_index(struct rxe_pool *pool, u32 index)
-{
- unsigned long flags;
- void *obj;
-
- read_lock_irqsave(&pool->pool_lock, flags);
- obj = rxe_pool_get_index_locked(pool, index);
read_unlock_irqrestore(&pool->pool_lock, flags);
return obj;
diff --git a/drivers/infiniband/sw/rxe/rxe_pool.h b/drivers/infiniband/sw/rxe/rxe_pool.h
index 44b944c8c360..7fec5d96d695 100644
--- a/drivers/infiniband/sw/rxe/rxe_pool.h
+++ b/drivers/infiniband/sw/rxe/rxe_pool.h
@@ -68,9 +68,7 @@ int rxe_pool_init(struct rxe_dev *rxe, struct rxe_pool *pool,
/* free resources from object pool */
void rxe_pool_cleanup(struct rxe_pool *pool);
-/* allocate an object from pool holding and not holding the pool lock */
-void *rxe_alloc_locked(struct rxe_pool *pool);
-
+/* allocate an object from pool */
void *rxe_alloc(struct rxe_pool *pool);
/* connect already allocated object to pool */
@@ -79,32 +77,18 @@ int __rxe_add_to_pool(struct rxe_pool *pool, struct rxe_pool_elem *elem);
#define rxe_add_to_pool(pool, obj) __rxe_add_to_pool(pool, &(obj)->elem)
/* assign an index to an indexed object and insert object into
- * pool's rb tree holding and not holding the pool_lock
+ * pool's rb tree
*/
-int __rxe_add_index_locked(struct rxe_pool_elem *elem);
-
-#define rxe_add_index_locked(obj) __rxe_add_index_locked(&(obj)->elem)
-
int __rxe_add_index(struct rxe_pool_elem *elem);
#define rxe_add_index(obj) __rxe_add_index(&(obj)->elem)
-/* drop an index and remove object from rb tree
- * holding and not holding the pool_lock
- */
-void __rxe_drop_index_locked(struct rxe_pool_elem *elem);
-
-#define rxe_drop_index_locked(obj) __rxe_drop_index_locked(&(obj)->elem)
-
+/* drop an index and remove object from rb tree */
void __rxe_drop_index(struct rxe_pool_elem *elem);
#define rxe_drop_index(obj) __rxe_drop_index(&(obj)->elem)
-/* lookup an indexed object from index holding and not holding the pool_lock.
- * takes a reference on object
- */
-void *rxe_pool_get_index_locked(struct rxe_pool *pool, u32 index);
-
+/* lookup an indexed object from index. takes a reference on object */
void *rxe_pool_get_index(struct rxe_pool *pool, u32 index);
/* cleanup an object when all references are dropped */