summaryrefslogtreecommitdiffstats
path: root/drivers/infiniband/sw/rxe
diff options
context:
space:
mode:
authorBob Pearson <rpearsonhpe@gmail.com>2020-12-16 17:15:49 -0600
committerJason Gunthorpe <jgg@nvidia.com>2021-01-12 16:35:39 -0400
commit3853c35e243d56238159e8365b6aca410bdd4576 (patch)
tree18bb52db45fd7af9a5e6e48de5f5eb9a77f0b129 /drivers/infiniband/sw/rxe
parent91a42c5becb685e1ae73554726906dfe74a8afdd (diff)
downloadlinux-3853c35e243d56238159e8365b6aca410bdd4576.tar.bz2
RDMA/rxe: Add unlocked versions of pool APIs
The existing pool APIs use the rw_lock pool_lock to protect critical sections that change the pool state. This does not correctly implement a typical sequence like the following elem = <lookup key in pool> if found use elem else elem = <alloc new elem in pool> <add key to elem> Which is racy if multiple threads are attempting to perform this at the same time. We want the second thread to use the elem created by the first thread not create two equivalent elems. This patch adds new APIs that are the same as existing APIs but do not take the pool_lock. A caller can then take the lock and perform a sequence of pool operations and then release the lock. Link: https://lore.kernel.org/r/20201216231550.27224-7-rpearson@hpe.com Signed-off-by: Bob Pearson <rpearson@hpe.com> Signed-off-by: Jason Gunthorpe <jgg@nvidia.com>
Diffstat (limited to 'drivers/infiniband/sw/rxe')
-rw-r--r--drivers/infiniband/sw/rxe/rxe_pool.c82
-rw-r--r--drivers/infiniband/sw/rxe/rxe_pool.h41
2 files changed, 97 insertions, 26 deletions
diff --git a/drivers/infiniband/sw/rxe/rxe_pool.c b/drivers/infiniband/sw/rxe/rxe_pool.c
index c8a6d0d7f01a..d26730eec720 100644
--- a/drivers/infiniband/sw/rxe/rxe_pool.c
+++ b/drivers/infiniband/sw/rxe/rxe_pool.c
@@ -266,65 +266,89 @@ out:
return;
}
+void __rxe_add_key_nl(struct rxe_pool_entry *elem, void *key)
+{
+ struct rxe_pool *pool = elem->pool;
+
+ memcpy((u8 *)elem + pool->key.key_offset, key, pool->key.key_size);
+ insert_key(pool, elem);
+}
+
void __rxe_add_key(struct rxe_pool_entry *elem, void *key)
{
struct rxe_pool *pool = elem->pool;
unsigned long flags;
write_lock_irqsave(&pool->pool_lock, flags);
- memcpy((u8 *)elem + pool->key.key_offset, key, pool->key.key_size);
- insert_key(pool, elem);
+ __rxe_add_key_nl(elem, key);
write_unlock_irqrestore(&pool->pool_lock, flags);
}
+void __rxe_drop_key_nl(struct rxe_pool_entry *elem)
+{
+ struct rxe_pool *pool = elem->pool;
+
+ rb_erase(&elem->key_node, &pool->key.tree);
+}
+
void __rxe_drop_key(struct rxe_pool_entry *elem)
{
struct rxe_pool *pool = elem->pool;
unsigned long flags;
write_lock_irqsave(&pool->pool_lock, flags);
- rb_erase(&elem->key_node, &pool->key.tree);
+ __rxe_drop_key_nl(elem);
write_unlock_irqrestore(&pool->pool_lock, flags);
}
+void __rxe_add_index_nl(struct rxe_pool_entry *elem)
+{
+ struct rxe_pool *pool = elem->pool;
+
+ elem->index = alloc_index(pool);
+ insert_index(pool, elem);
+}
+
void __rxe_add_index(struct rxe_pool_entry *elem)
{
struct rxe_pool *pool = elem->pool;
unsigned long flags;
write_lock_irqsave(&pool->pool_lock, flags);
- elem->index = alloc_index(pool);
- insert_index(pool, elem);
+ __rxe_add_index_nl(elem);
write_unlock_irqrestore(&pool->pool_lock, flags);
}
+void __rxe_drop_index_nl(struct rxe_pool_entry *elem)
+{
+ struct rxe_pool *pool = elem->pool;
+
+ clear_bit(elem->index - pool->index.min_index, pool->index.table);
+ rb_erase(&elem->index_node, &pool->index.tree);
+}
+
void __rxe_drop_index(struct rxe_pool_entry *elem)
{
struct rxe_pool *pool = elem->pool;
unsigned long flags;
write_lock_irqsave(&pool->pool_lock, flags);
- clear_bit(elem->index - pool->index.min_index, pool->index.table);
- rb_erase(&elem->index_node, &pool->index.tree);
+ __rxe_drop_index_nl(elem);
write_unlock_irqrestore(&pool->pool_lock, flags);
}
-void *rxe_alloc(struct rxe_pool *pool)
+void *rxe_alloc_nl(struct rxe_pool *pool)
{
struct rxe_type_info *info = &rxe_type_info[pool->type];
struct rxe_pool_entry *elem;
u8 *obj;
- unsigned long flags;
might_sleep_if(!(pool->flags & RXE_POOL_ATOMIC));
- read_lock_irqsave(&pool->pool_lock, flags);
- if (pool->state != RXE_POOL_STATE_VALID) {
- read_unlock_irqrestore(&pool->pool_lock, flags);
+ if (pool->state != RXE_POOL_STATE_VALID)
return NULL;
- }
+
kref_get(&pool->ref_cnt);
- read_unlock_irqrestore(&pool->pool_lock, flags);
if (!ib_device_try_get(&pool->rxe->ib_dev))
goto out_put_pool;
@@ -352,11 +376,21 @@ out_put_pool:
return NULL;
}
-int __rxe_add_to_pool(struct rxe_pool *pool, struct rxe_pool_entry *elem)
+void *rxe_alloc(struct rxe_pool *pool)
{
+ u8 *obj;
unsigned long flags;
- might_sleep_if(!(pool->flags & RXE_POOL_ATOMIC));
+ read_lock_irqsave(&pool->pool_lock, flags);
+ obj = rxe_alloc_nl(pool);
+ read_unlock_irqrestore(&pool->pool_lock, flags);
+
+ return obj;
+}
+
+int __rxe_add_to_pool(struct rxe_pool *pool, struct rxe_pool_entry *elem)
+{
+ unsigned long flags;
read_lock_irqsave(&pool->pool_lock, flags);
if (pool->state != RXE_POOL_STATE_VALID) {
@@ -444,16 +478,13 @@ out:
return obj;
}
-void *rxe_pool_get_key(struct rxe_pool *pool, void *key)
+void *rxe_pool_get_key_nl(struct rxe_pool *pool, void *key)
{
struct rxe_type_info *info = &rxe_type_info[pool->type];
struct rb_node *node;
struct rxe_pool_entry *elem;
u8 *obj = NULL;
int cmp;
- unsigned long flags;
-
- read_lock_irqsave(&pool->pool_lock, flags);
if (pool->state != RXE_POOL_STATE_VALID)
goto out;
@@ -482,6 +513,17 @@ void *rxe_pool_get_key(struct rxe_pool *pool, void *key)
}
out:
+ return obj;
+}
+
+void *rxe_pool_get_key(struct rxe_pool *pool, void *key)
+{
+ u8 *obj = NULL;
+ unsigned long flags;
+
+ read_lock_irqsave(&pool->pool_lock, flags);
+ obj = rxe_pool_get_key_nl(pool, key);
read_unlock_irqrestore(&pool->pool_lock, flags);
+
return obj;
}
diff --git a/drivers/infiniband/sw/rxe/rxe_pool.h b/drivers/infiniband/sw/rxe/rxe_pool.h
index 5db0bdde185e..373e08554c1c 100644
--- a/drivers/infiniband/sw/rxe/rxe_pool.h
+++ b/drivers/infiniband/sw/rxe/rxe_pool.h
@@ -109,41 +109,70 @@ void rxe_pool_cleanup(struct rxe_pool *pool);
/* allocate an object from pool */
void *rxe_alloc(struct rxe_pool *pool);
+/* allocate an object from pool - no lock */
+void *rxe_alloc_nl(struct rxe_pool *pool);
+
/* connect already allocated object to pool */
int __rxe_add_to_pool(struct rxe_pool *pool, struct rxe_pool_entry *elem);
#define rxe_add_to_pool(pool, obj) __rxe_add_to_pool(pool, &(obj)->pelem)
/* assign an index to an indexed object and insert object into
- * pool's rb tree
+ * pool's rb tree with and without holding the pool_lock
*/
void __rxe_add_index(struct rxe_pool_entry *elem);
#define rxe_add_index(obj) __rxe_add_index(&(obj)->pelem)
-/* drop an index and remove object from rb tree */
+void __rxe_add_index_nl(struct rxe_pool_entry *elem);
+
+#define rxe_add_index_nl(obj) __rxe_add_index_nl(&(obj)->pelem)
+
+/* drop an index and remove object from rb tree
+ * with and without holding the pool_lock
+ */
void __rxe_drop_index(struct rxe_pool_entry *elem);
#define rxe_drop_index(obj) __rxe_drop_index(&(obj)->pelem)
+void __rxe_drop_index_nl(struct rxe_pool_entry *elem);
+
+#define rxe_drop_index_nl(obj) __rxe_drop_index_nl(&(obj)->pelem)
+
/* assign a key to a keyed object and insert object into
- * pool's rb tree
+ * pool's rb tree with and without holding pool_lock
*/
void __rxe_add_key(struct rxe_pool_entry *elem, void *key);
#define rxe_add_key(obj, key) __rxe_add_key(&(obj)->pelem, key)
-/* remove elem from rb tree */
+void __rxe_add_key_nl(struct rxe_pool_entry *elem, void *key);
+
+#define rxe_add_key_nl(obj, key) __rxe_add_key_nl(&(obj)->pelem, key)
+
+/* remove elem from rb tree with and without holding pool_lock */
void __rxe_drop_key(struct rxe_pool_entry *elem);
#define rxe_drop_key(obj) __rxe_drop_key(&(obj)->pelem)
-/* lookup an indexed object from index. takes a reference on object */
+void __rxe_drop_key_nl(struct rxe_pool_entry *elem);
+
+#define rxe_drop_key_nl(obj) __rxe_drop_key_nl(&(obj)->pelem)
+
+/* lookup an indexed object from index with and without holding pool_lock.
+ * takes a reference on object
+ */
void *rxe_pool_get_index(struct rxe_pool *pool, u32 index);
-/* lookup keyed object from key. takes a reference on the object */
+void *rxe_pool_get_index_nl(struct rxe_pool *pool, u32 index);
+
+/* lookup keyed object from key with and without holding pool_lock.
+ * takes a reference on the objecti
+ */
void *rxe_pool_get_key(struct rxe_pool *pool, void *key);
+void *rxe_pool_get_key_nl(struct rxe_pool *pool, void *key);
+
/* cleanup an object when all references are dropped */
void rxe_elem_release(struct kref *kref);