summaryrefslogtreecommitdiffstats
path: root/drivers/infiniband/hw/mlx5/devx.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/infiniband/hw/mlx5/devx.c')
-rw-r--r--drivers/infiniband/hw/mlx5/devx.c102
1 files changed, 28 insertions, 74 deletions
diff --git a/drivers/infiniband/hw/mlx5/devx.c b/drivers/infiniband/hw/mlx5/devx.c
index 655ea9c984e1..9e3d8b826498 100644
--- a/drivers/infiniband/hw/mlx5/devx.c
+++ b/drivers/infiniband/hw/mlx5/devx.c
@@ -14,6 +14,7 @@
#include <linux/mlx5/driver.h>
#include <linux/mlx5/fs.h>
#include "mlx5_ib.h"
+#include "devx.h"
#include "qp.h"
#include <linux/xarray.h>
@@ -89,22 +90,6 @@ struct devx_async_event_file {
u8 is_destroyed:1;
};
-#define MLX5_MAX_DESTROY_INBOX_SIZE_DW MLX5_ST_SZ_DW(delete_fte_in)
-struct devx_obj {
- struct mlx5_ib_dev *ib_dev;
- u64 obj_id;
- u32 dinlen; /* destroy inbox length */
- u32 dinbox[MLX5_MAX_DESTROY_INBOX_SIZE_DW];
- u32 flags;
- union {
- struct mlx5_ib_devx_mr devx_mr;
- struct mlx5_core_dct core_dct;
- struct mlx5_core_cq core_cq;
- u32 flow_counter_bulk_size;
- };
- struct list_head event_sub; /* holds devx_event_subscription entries */
-};
-
struct devx_umem {
struct mlx5_core_dev *mdev;
struct ib_umem *umem;
@@ -171,48 +156,6 @@ void mlx5_ib_devx_destroy(struct mlx5_ib_dev *dev, u16 uid)
mlx5_cmd_exec(dev->mdev, in, sizeof(in), out, sizeof(out));
}
-bool mlx5_ib_devx_is_flow_dest(void *obj, int *dest_id, int *dest_type)
-{
- struct devx_obj *devx_obj = obj;
- u16 opcode = MLX5_GET(general_obj_in_cmd_hdr, devx_obj->dinbox, opcode);
-
- switch (opcode) {
- case MLX5_CMD_OP_DESTROY_TIR:
- *dest_type = MLX5_FLOW_DESTINATION_TYPE_TIR;
- *dest_id = MLX5_GET(general_obj_in_cmd_hdr, devx_obj->dinbox,
- obj_id);
- return true;
-
- case MLX5_CMD_OP_DESTROY_FLOW_TABLE:
- *dest_type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE;
- *dest_id = MLX5_GET(destroy_flow_table_in, devx_obj->dinbox,
- table_id);
- return true;
- default:
- return false;
- }
-}
-
-bool mlx5_ib_devx_is_flow_counter(void *obj, u32 offset, u32 *counter_id)
-{
- struct devx_obj *devx_obj = obj;
- u16 opcode = MLX5_GET(general_obj_in_cmd_hdr, devx_obj->dinbox, opcode);
-
- if (opcode == MLX5_CMD_OP_DEALLOC_FLOW_COUNTER) {
-
- if (offset && offset >= devx_obj->flow_counter_bulk_size)
- return false;
-
- *counter_id = MLX5_GET(dealloc_flow_counter_in,
- devx_obj->dinbox,
- flow_counter_id);
- *counter_id += offset;
- return true;
- }
-
- return false;
-}
-
static bool is_legacy_unaffiliated_event_num(u16 event_num)
{
switch (event_num) {
@@ -2419,17 +2362,24 @@ static int devx_event_notifier(struct notifier_block *nb,
return NOTIFY_OK;
}
-void mlx5_ib_devx_init_event_table(struct mlx5_ib_dev *dev)
+int mlx5_ib_devx_init(struct mlx5_ib_dev *dev)
{
struct mlx5_devx_event_table *table = &dev->devx_event_table;
+ int uid;
- xa_init(&table->event_xa);
- mutex_init(&table->event_xa_lock);
- MLX5_NB_INIT(&table->devx_nb, devx_event_notifier, NOTIFY_ANY);
- mlx5_eq_notifier_register(dev->mdev, &table->devx_nb);
+ uid = mlx5_ib_devx_create(dev, false);
+ if (uid > 0) {
+ dev->devx_whitelist_uid = uid;
+ xa_init(&table->event_xa);
+ mutex_init(&table->event_xa_lock);
+ MLX5_NB_INIT(&table->devx_nb, devx_event_notifier, NOTIFY_ANY);
+ mlx5_eq_notifier_register(dev->mdev, &table->devx_nb);
+ }
+
+ return 0;
}
-void mlx5_ib_devx_cleanup_event_table(struct mlx5_ib_dev *dev)
+void mlx5_ib_devx_cleanup(struct mlx5_ib_dev *dev)
{
struct mlx5_devx_event_table *table = &dev->devx_event_table;
struct devx_event_subscription *sub, *tmp;
@@ -2437,17 +2387,21 @@ void mlx5_ib_devx_cleanup_event_table(struct mlx5_ib_dev *dev)
void *entry;
unsigned long id;
- mlx5_eq_notifier_unregister(dev->mdev, &table->devx_nb);
- mutex_lock(&dev->devx_event_table.event_xa_lock);
- xa_for_each(&table->event_xa, id, entry) {
- event = entry;
- list_for_each_entry_safe(sub, tmp, &event->unaffiliated_list,
- xa_list)
- devx_cleanup_subscription(dev, sub);
- kfree(entry);
+ if (dev->devx_whitelist_uid) {
+ mlx5_eq_notifier_unregister(dev->mdev, &table->devx_nb);
+ mutex_lock(&dev->devx_event_table.event_xa_lock);
+ xa_for_each(&table->event_xa, id, entry) {
+ event = entry;
+ list_for_each_entry_safe(
+ sub, tmp, &event->unaffiliated_list, xa_list)
+ devx_cleanup_subscription(dev, sub);
+ kfree(entry);
+ }
+ mutex_unlock(&dev->devx_event_table.event_xa_lock);
+ xa_destroy(&table->event_xa);
+
+ mlx5_ib_devx_destroy(dev, dev->devx_whitelist_uid);
}
- mutex_unlock(&dev->devx_event_table.event_xa_lock);
- xa_destroy(&table->event_xa);
}
static ssize_t devx_async_cmd_event_read(struct file *filp, char __user *buf,