summaryrefslogtreecommitdiffstats
path: root/drivers/infiniband/hw/mlx5/main.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/infiniband/hw/mlx5/main.c')
-rw-r--r--drivers/infiniband/hw/mlx5/main.c343
1 files changed, 54 insertions, 289 deletions
diff --git a/drivers/infiniband/hw/mlx5/main.c b/drivers/infiniband/hw/mlx5/main.c
index 7a7f6ccd02a5..6d1dd09a4388 100644
--- a/drivers/infiniband/hw/mlx5/main.c
+++ b/drivers/infiniband/hw/mlx5/main.c
@@ -34,6 +34,7 @@
#include "ib_rep.h"
#include "cmd.h"
#include "devx.h"
+#include "dm.h"
#include "fs.h"
#include "srq.h"
#include "qp.h"
@@ -42,6 +43,7 @@
#include "counters.h"
#include <linux/mlx5/accel.h>
#include <rdma/uverbs_std_types.h>
+#include <rdma/uverbs_ioctl.h>
#include <rdma/mlx5_user_ioctl_verbs.h>
#include <rdma/mlx5_user_ioctl_cmds.h>
#include <rdma/ib_umem_odp.h>
@@ -100,7 +102,7 @@ mlx5_port_type_cap_to_rdma_ll(int port_type_cap)
}
static enum rdma_link_layer
-mlx5_ib_port_link_layer(struct ib_device *device, u8 port_num)
+mlx5_ib_port_link_layer(struct ib_device *device, u32 port_num)
{
struct mlx5_ib_dev *dev = to_mdev(device);
int port_type_cap = MLX5_CAP_GEN(dev->mdev, port_type);
@@ -109,7 +111,7 @@ mlx5_ib_port_link_layer(struct ib_device *device, u8 port_num)
}
static int get_port_state(struct ib_device *ibdev,
- u8 port_num,
+ u32 port_num,
enum ib_port_state *state)
{
struct ib_port_attr attr;
@@ -124,7 +126,7 @@ static int get_port_state(struct ib_device *ibdev,
static struct mlx5_roce *mlx5_get_rep_roce(struct mlx5_ib_dev *dev,
struct net_device *ndev,
- u8 *port_num)
+ u32 *port_num)
{
struct net_device *rep_ndev;
struct mlx5_ib_port *port;
@@ -154,7 +156,7 @@ static int mlx5_netdev_event(struct notifier_block *this,
{
struct mlx5_roce *roce = container_of(this, struct mlx5_roce, nb);
struct net_device *ndev = netdev_notifier_info_to_dev(ptr);
- u8 port_num = roce->native_port_num;
+ u32 port_num = roce->native_port_num;
struct mlx5_core_dev *mdev;
struct mlx5_ib_dev *ibdev;
@@ -233,7 +235,7 @@ done:
}
static struct net_device *mlx5_ib_get_netdev(struct ib_device *device,
- u8 port_num)
+ u32 port_num)
{
struct mlx5_ib_dev *ibdev = to_mdev(device);
struct net_device *ndev;
@@ -261,8 +263,8 @@ out:
}
struct mlx5_core_dev *mlx5_ib_get_native_port_mdev(struct mlx5_ib_dev *ibdev,
- u8 ib_port_num,
- u8 *native_port_num)
+ u32 ib_port_num,
+ u32 *native_port_num)
{
enum rdma_link_layer ll = mlx5_ib_port_link_layer(&ibdev->ib_dev,
ib_port_num);
@@ -296,7 +298,7 @@ struct mlx5_core_dev *mlx5_ib_get_native_port_mdev(struct mlx5_ib_dev *ibdev,
return mdev;
}
-void mlx5_ib_put_native_port_mdev(struct mlx5_ib_dev *ibdev, u8 port_num)
+void mlx5_ib_put_native_port_mdev(struct mlx5_ib_dev *ibdev, u32 port_num)
{
enum rdma_link_layer ll = mlx5_ib_port_link_layer(&ibdev->ib_dev,
port_num);
@@ -452,7 +454,7 @@ static int translate_eth_proto_oper(u32 eth_proto_oper, u16 *active_speed,
active_width);
}
-static int mlx5_query_port_roce(struct ib_device *device, u8 port_num,
+static int mlx5_query_port_roce(struct ib_device *device, u32 port_num,
struct ib_port_attr *props)
{
struct mlx5_ib_dev *dev = to_mdev(device);
@@ -462,7 +464,7 @@ static int mlx5_query_port_roce(struct ib_device *device, u8 port_num,
enum ib_mtu ndev_ib_mtu;
bool put_mdev = true;
u32 eth_prot_oper;
- u8 mdev_port_num;
+ u32 mdev_port_num;
bool ext;
int err;
@@ -498,7 +500,7 @@ static int mlx5_query_port_roce(struct ib_device *device, u8 port_num,
translate_eth_proto_oper(eth_prot_oper, &props->active_speed,
&props->active_width, ext);
- if (!dev->is_rep && mlx5_is_roce_enabled(mdev)) {
+ if (!dev->is_rep && dev->mdev->roce.roce_en) {
u16 qkey_viol_cntr;
props->port_cap_flags |= IB_PORT_CM_SUP;
@@ -549,19 +551,19 @@ out:
return err;
}
-static int set_roce_addr(struct mlx5_ib_dev *dev, u8 port_num,
+static int set_roce_addr(struct mlx5_ib_dev *dev, u32 port_num,
unsigned int index, const union ib_gid *gid,
const struct ib_gid_attr *attr)
{
- enum ib_gid_type gid_type = IB_GID_TYPE_ROCE;
+ enum ib_gid_type gid_type;
u16 vlan_id = 0xffff;
u8 roce_version = 0;
u8 roce_l3_type = 0;
u8 mac[ETH_ALEN];
int ret;
+ gid_type = attr->gid_type;
if (gid) {
- gid_type = attr->gid_type;
ret = rdma_read_gid_l2_fields(attr, &vlan_id, &mac[0]);
if (ret)
return ret;
@@ -573,7 +575,7 @@ static int set_roce_addr(struct mlx5_ib_dev *dev, u8 port_num,
break;
case IB_GID_TYPE_ROCE_UDP_ENCAP:
roce_version = MLX5_ROCE_VERSION_2;
- if (ipv6_addr_v4mapped((void *)gid))
+ if (gid && ipv6_addr_v4mapped((void *)gid))
roce_l3_type = MLX5_ROCE_L3_TYPE_IPV4;
else
roce_l3_type = MLX5_ROCE_L3_TYPE_IPV6;
@@ -600,7 +602,7 @@ static int mlx5_ib_del_gid(const struct ib_gid_attr *attr,
__always_unused void **context)
{
return set_roce_addr(to_mdev(attr->device), attr->port_num,
- attr->index, NULL, NULL);
+ attr->index, NULL, attr);
}
__be16 mlx5_get_roce_udp_sport_min(const struct mlx5_ib_dev *dev,
@@ -1267,7 +1269,7 @@ static int translate_max_vl_num(struct ib_device *ibdev, u8 vl_hw_cap,
return 0;
}
-static int mlx5_query_hca_port(struct ib_device *ibdev, u8 port,
+static int mlx5_query_hca_port(struct ib_device *ibdev, u32 port,
struct ib_port_attr *props)
{
struct mlx5_ib_dev *dev = to_mdev(ibdev);
@@ -1335,7 +1337,7 @@ out:
return err;
}
-int mlx5_ib_query_port(struct ib_device *ibdev, u8 port,
+int mlx5_ib_query_port(struct ib_device *ibdev, u32 port,
struct ib_port_attr *props)
{
unsigned int count;
@@ -1380,13 +1382,13 @@ int mlx5_ib_query_port(struct ib_device *ibdev, u8 port,
return ret;
}
-static int mlx5_ib_rep_query_port(struct ib_device *ibdev, u8 port,
+static int mlx5_ib_rep_query_port(struct ib_device *ibdev, u32 port,
struct ib_port_attr *props)
{
return mlx5_query_port_roce(ibdev, port, props);
}
-static int mlx5_ib_rep_query_pkey(struct ib_device *ibdev, u8 port, u16 index,
+static int mlx5_ib_rep_query_pkey(struct ib_device *ibdev, u32 port, u16 index,
u16 *pkey)
{
/* Default special Pkey for representor device port as per the
@@ -1396,7 +1398,7 @@ static int mlx5_ib_rep_query_pkey(struct ib_device *ibdev, u8 port, u16 index,
return 0;
}
-static int mlx5_ib_query_gid(struct ib_device *ibdev, u8 port, int index,
+static int mlx5_ib_query_gid(struct ib_device *ibdev, u32 port, int index,
union ib_gid *gid)
{
struct mlx5_ib_dev *dev = to_mdev(ibdev);
@@ -1415,13 +1417,13 @@ static int mlx5_ib_query_gid(struct ib_device *ibdev, u8 port, int index,
}
-static int mlx5_query_hca_nic_pkey(struct ib_device *ibdev, u8 port,
+static int mlx5_query_hca_nic_pkey(struct ib_device *ibdev, u32 port,
u16 index, u16 *pkey)
{
struct mlx5_ib_dev *dev = to_mdev(ibdev);
struct mlx5_core_dev *mdev;
bool put_mdev = true;
- u8 mdev_port_num;
+ u32 mdev_port_num;
int err;
mdev = mlx5_ib_get_native_port_mdev(dev, port, &mdev_port_num);
@@ -1442,7 +1444,7 @@ static int mlx5_query_hca_nic_pkey(struct ib_device *ibdev, u8 port,
return err;
}
-static int mlx5_ib_query_pkey(struct ib_device *ibdev, u8 port, u16 index,
+static int mlx5_ib_query_pkey(struct ib_device *ibdev, u32 port, u16 index,
u16 *pkey)
{
switch (mlx5_get_vport_access_method(ibdev)) {
@@ -1486,12 +1488,12 @@ static int mlx5_ib_modify_device(struct ib_device *ibdev, int mask,
return err;
}
-static int set_port_caps_atomic(struct mlx5_ib_dev *dev, u8 port_num, u32 mask,
+static int set_port_caps_atomic(struct mlx5_ib_dev *dev, u32 port_num, u32 mask,
u32 value)
{
struct mlx5_hca_vport_context ctx = {};
struct mlx5_core_dev *mdev;
- u8 mdev_port_num;
+ u32 mdev_port_num;
int err;
mdev = mlx5_ib_get_native_port_mdev(dev, port_num, &mdev_port_num);
@@ -1520,7 +1522,7 @@ out:
return err;
}
-static int mlx5_ib_modify_port(struct ib_device *ibdev, u8 port, int mask,
+static int mlx5_ib_modify_port(struct ib_device *ibdev, u32 port, int mask,
struct ib_port_modify *props)
{
struct mlx5_ib_dev *dev = to_mdev(ibdev);
@@ -1929,7 +1931,7 @@ uar_done:
print_lib_caps(dev, context->lib_caps);
if (mlx5_ib_lag_should_assign_affinity(dev)) {
- u8 port = mlx5_core_native_port_num(dev->mdev) - 1;
+ u32 port = mlx5_core_native_port_num(dev->mdev) - 1;
atomic_set(&context->tx_port_affinity,
atomic_add_return(
@@ -2087,14 +2089,11 @@ static void mlx5_ib_mmap_free(struct rdma_user_mmap_entry *entry)
struct mlx5_user_mmap_entry *mentry = to_mmmap(entry);
struct mlx5_ib_dev *dev = to_mdev(entry->ucontext->device);
struct mlx5_var_table *var_table = &dev->var_table;
- struct mlx5_ib_dm *mdm;
switch (mentry->mmap_flag) {
case MLX5_IB_MMAP_TYPE_MEMIC:
- mdm = container_of(mentry, struct mlx5_ib_dm, mentry);
- mlx5_cmd_dealloc_memic(&dev->dm, mdm->dev_addr,
- mdm->size);
- kfree(mdm);
+ case MLX5_IB_MMAP_TYPE_MEMIC_OP:
+ mlx5_ib_dm_mmap_free(dev, mentry);
break;
case MLX5_IB_MMAP_TYPE_VAR:
mutex_lock(&var_table->bitmap_lock);
@@ -2219,19 +2218,6 @@ free_bfreg:
return err;
}
-static int add_dm_mmap_entry(struct ib_ucontext *context,
- struct mlx5_ib_dm *mdm,
- u64 address)
-{
- mdm->mentry.mmap_flag = MLX5_IB_MMAP_TYPE_MEMIC;
- mdm->mentry.address = address;
- return rdma_user_mmap_entry_insert_range(
- context, &mdm->mentry.rdma_entry,
- mdm->size,
- MLX5_IB_MMAP_DEVICE_MEM << 16,
- (MLX5_IB_MMAP_DEVICE_MEM << 16) + (1UL << 16) - 1);
-}
-
static unsigned long mlx5_vma_to_pgoff(struct vm_area_struct *vma)
{
unsigned long idx;
@@ -2333,206 +2319,6 @@ static int mlx5_ib_mmap(struct ib_ucontext *ibcontext, struct vm_area_struct *vm
return 0;
}
-static inline int check_dm_type_support(struct mlx5_ib_dev *dev,
- u32 type)
-{
- switch (type) {
- case MLX5_IB_UAPI_DM_TYPE_MEMIC:
- if (!MLX5_CAP_DEV_MEM(dev->mdev, memic))
- return -EOPNOTSUPP;
- break;
- case MLX5_IB_UAPI_DM_TYPE_STEERING_SW_ICM:
- case MLX5_IB_UAPI_DM_TYPE_HEADER_MODIFY_SW_ICM:
- if (!capable(CAP_SYS_RAWIO) ||
- !capable(CAP_NET_RAW))
- return -EPERM;
-
- if (!(MLX5_CAP_FLOWTABLE_NIC_RX(dev->mdev, sw_owner) ||
- MLX5_CAP_FLOWTABLE_NIC_TX(dev->mdev, sw_owner) ||
- MLX5_CAP_FLOWTABLE_NIC_RX(dev->mdev, sw_owner_v2) ||
- MLX5_CAP_FLOWTABLE_NIC_TX(dev->mdev, sw_owner_v2)))
- return -EOPNOTSUPP;
- break;
- }
-
- return 0;
-}
-
-static int handle_alloc_dm_memic(struct ib_ucontext *ctx,
- struct mlx5_ib_dm *dm,
- struct ib_dm_alloc_attr *attr,
- struct uverbs_attr_bundle *attrs)
-{
- struct mlx5_dm *dm_db = &to_mdev(ctx->device)->dm;
- u64 start_offset;
- u16 page_idx;
- int err;
- u64 address;
-
- dm->size = roundup(attr->length, MLX5_MEMIC_BASE_SIZE);
-
- err = mlx5_cmd_alloc_memic(dm_db, &dm->dev_addr,
- dm->size, attr->alignment);
- if (err)
- return err;
-
- address = dm->dev_addr & PAGE_MASK;
- err = add_dm_mmap_entry(ctx, dm, address);
- if (err)
- goto err_dealloc;
-
- page_idx = dm->mentry.rdma_entry.start_pgoff & 0xFFFF;
- err = uverbs_copy_to(attrs,
- MLX5_IB_ATTR_ALLOC_DM_RESP_PAGE_INDEX,
- &page_idx,
- sizeof(page_idx));
- if (err)
- goto err_copy;
-
- start_offset = dm->dev_addr & ~PAGE_MASK;
- err = uverbs_copy_to(attrs,
- MLX5_IB_ATTR_ALLOC_DM_RESP_START_OFFSET,
- &start_offset, sizeof(start_offset));
- if (err)
- goto err_copy;
-
- return 0;
-
-err_copy:
- rdma_user_mmap_entry_remove(&dm->mentry.rdma_entry);
-err_dealloc:
- mlx5_cmd_dealloc_memic(dm_db, dm->dev_addr, dm->size);
-
- return err;
-}
-
-static int handle_alloc_dm_sw_icm(struct ib_ucontext *ctx,
- struct mlx5_ib_dm *dm,
- struct ib_dm_alloc_attr *attr,
- struct uverbs_attr_bundle *attrs,
- int type)
-{
- struct mlx5_core_dev *dev = to_mdev(ctx->device)->mdev;
- u64 act_size;
- int err;
-
- /* Allocation size must a multiple of the basic block size
- * and a power of 2.
- */
- act_size = round_up(attr->length, MLX5_SW_ICM_BLOCK_SIZE(dev));
- act_size = roundup_pow_of_two(act_size);
-
- dm->size = act_size;
- err = mlx5_dm_sw_icm_alloc(dev, type, act_size, attr->alignment,
- to_mucontext(ctx)->devx_uid, &dm->dev_addr,
- &dm->icm_dm.obj_id);
- if (err)
- return err;
-
- err = uverbs_copy_to(attrs,
- MLX5_IB_ATTR_ALLOC_DM_RESP_START_OFFSET,
- &dm->dev_addr, sizeof(dm->dev_addr));
- if (err)
- mlx5_dm_sw_icm_dealloc(dev, type, dm->size,
- to_mucontext(ctx)->devx_uid, dm->dev_addr,
- dm->icm_dm.obj_id);
-
- return err;
-}
-
-struct ib_dm *mlx5_ib_alloc_dm(struct ib_device *ibdev,
- struct ib_ucontext *context,
- struct ib_dm_alloc_attr *attr,
- struct uverbs_attr_bundle *attrs)
-{
- struct mlx5_ib_dm *dm;
- enum mlx5_ib_uapi_dm_type type;
- int err;
-
- err = uverbs_get_const_default(&type, attrs,
- MLX5_IB_ATTR_ALLOC_DM_REQ_TYPE,
- MLX5_IB_UAPI_DM_TYPE_MEMIC);
- if (err)
- return ERR_PTR(err);
-
- mlx5_ib_dbg(to_mdev(ibdev), "alloc_dm req: dm_type=%d user_length=0x%llx log_alignment=%d\n",
- type, attr->length, attr->alignment);
-
- err = check_dm_type_support(to_mdev(ibdev), type);
- if (err)
- return ERR_PTR(err);
-
- dm = kzalloc(sizeof(*dm), GFP_KERNEL);
- if (!dm)
- return ERR_PTR(-ENOMEM);
-
- dm->type = type;
-
- switch (type) {
- case MLX5_IB_UAPI_DM_TYPE_MEMIC:
- err = handle_alloc_dm_memic(context, dm,
- attr,
- attrs);
- break;
- case MLX5_IB_UAPI_DM_TYPE_STEERING_SW_ICM:
- err = handle_alloc_dm_sw_icm(context, dm,
- attr, attrs,
- MLX5_SW_ICM_TYPE_STEERING);
- break;
- case MLX5_IB_UAPI_DM_TYPE_HEADER_MODIFY_SW_ICM:
- err = handle_alloc_dm_sw_icm(context, dm,
- attr, attrs,
- MLX5_SW_ICM_TYPE_HEADER_MODIFY);
- break;
- default:
- err = -EOPNOTSUPP;
- }
-
- if (err)
- goto err_free;
-
- return &dm->ibdm;
-
-err_free:
- kfree(dm);
- return ERR_PTR(err);
-}
-
-int mlx5_ib_dealloc_dm(struct ib_dm *ibdm, struct uverbs_attr_bundle *attrs)
-{
- struct mlx5_ib_ucontext *ctx = rdma_udata_to_drv_context(
- &attrs->driver_udata, struct mlx5_ib_ucontext, ibucontext);
- struct mlx5_core_dev *dev = to_mdev(ibdm->device)->mdev;
- struct mlx5_ib_dm *dm = to_mdm(ibdm);
- int ret;
-
- switch (dm->type) {
- case MLX5_IB_UAPI_DM_TYPE_MEMIC:
- rdma_user_mmap_entry_remove(&dm->mentry.rdma_entry);
- return 0;
- case MLX5_IB_UAPI_DM_TYPE_STEERING_SW_ICM:
- ret = mlx5_dm_sw_icm_dealloc(dev, MLX5_SW_ICM_TYPE_STEERING,
- dm->size, ctx->devx_uid, dm->dev_addr,
- dm->icm_dm.obj_id);
- if (ret)
- return ret;
- break;
- case MLX5_IB_UAPI_DM_TYPE_HEADER_MODIFY_SW_ICM:
- ret = mlx5_dm_sw_icm_dealloc(dev, MLX5_SW_ICM_TYPE_HEADER_MODIFY,
- dm->size, ctx->devx_uid, dm->dev_addr,
- dm->icm_dm.obj_id);
- if (ret)
- return ret;
- break;
- default:
- return -EOPNOTSUPP;
- }
-
- kfree(dm);
-
- return 0;
-}
-
static int mlx5_ib_alloc_pd(struct ib_pd *ibpd, struct ib_udata *udata)
{
struct mlx5_ib_pd *pd = to_mpd(ibpd);
@@ -2779,7 +2565,7 @@ static void delay_drop_handler(struct work_struct *work)
static void handle_general_event(struct mlx5_ib_dev *ibdev, struct mlx5_eqe *eqe,
struct ib_event *ibev)
{
- u8 port = (eqe->data.port.port >> 4) & 0xf;
+ u32 port = (eqe->data.port.port >> 4) & 0xf;
switch (eqe->sub_type) {
case MLX5_GENERAL_SUBTYPE_DELAY_DROP_TIMEOUT:
@@ -2795,7 +2581,7 @@ static void handle_general_event(struct mlx5_ib_dev *ibdev, struct mlx5_eqe *eqe
static int handle_port_change(struct mlx5_ib_dev *ibdev, struct mlx5_eqe *eqe,
struct ib_event *ibev)
{
- u8 port = (eqe->data.port.port >> 4) & 0xf;
+ u32 port = (eqe->data.port.port >> 4) & 0xf;
ibev->element.port_num = port;
@@ -3152,7 +2938,7 @@ static u32 get_core_cap_flags(struct ib_device *ibdev,
return ret;
}
-static int mlx5_port_immutable(struct ib_device *ibdev, u8 port_num,
+static int mlx5_port_immutable(struct ib_device *ibdev, u32 port_num,
struct ib_port_immutable *immutable)
{
struct ib_port_attr attr;
@@ -3180,7 +2966,7 @@ static int mlx5_port_immutable(struct ib_device *ibdev, u8 port_num,
return 0;
}
-static int mlx5_port_rep_immutable(struct ib_device *ibdev, u8 port_num,
+static int mlx5_port_rep_immutable(struct ib_device *ibdev, u32 port_num,
struct ib_port_immutable *immutable)
{
struct ib_port_attr attr;
@@ -3252,7 +3038,7 @@ static void mlx5_eth_lag_cleanup(struct mlx5_ib_dev *dev)
}
}
-static int mlx5_add_netdev_notifier(struct mlx5_ib_dev *dev, u8 port_num)
+static int mlx5_add_netdev_notifier(struct mlx5_ib_dev *dev, u32 port_num)
{
int err;
@@ -3266,7 +3052,7 @@ static int mlx5_add_netdev_notifier(struct mlx5_ib_dev *dev, u8 port_num)
return 0;
}
-static void mlx5_remove_netdev_notifier(struct mlx5_ib_dev *dev, u8 port_num)
+static void mlx5_remove_netdev_notifier(struct mlx5_ib_dev *dev, u32 port_num)
{
if (dev->port[port_num].roce.nb.notifier_call) {
unregister_netdevice_notifier(&dev->port[port_num].roce.nb);
@@ -3300,7 +3086,7 @@ static void mlx5_disable_eth(struct mlx5_ib_dev *dev)
mlx5_nic_vport_disable_roce(dev->mdev);
}
-static int mlx5_ib_rn_get_params(struct ib_device *device, u8 port_num,
+static int mlx5_ib_rn_get_params(struct ib_device *device, u32 port_num,
enum rdma_netdev_t type,
struct rdma_netdev_alloc_params *params)
{
@@ -3352,7 +3138,7 @@ static const struct file_operations fops_delay_drop_timeout = {
static void mlx5_ib_unbind_slave_port(struct mlx5_ib_dev *ibdev,
struct mlx5_ib_multiport_info *mpi)
{
- u8 port_num = mlx5_core_native_port_num(mpi->mdev) - 1;
+ u32 port_num = mlx5_core_native_port_num(mpi->mdev) - 1;
struct mlx5_ib_port *port = &ibdev->port[port_num];
int comps;
int err;
@@ -3398,7 +3184,7 @@ static void mlx5_ib_unbind_slave_port(struct mlx5_ib_dev *ibdev,
err = mlx5_nic_vport_unaffiliate_multiport(mpi->mdev);
- mlx5_ib_dbg(ibdev, "unaffiliated port %d\n", port_num + 1);
+ mlx5_ib_dbg(ibdev, "unaffiliated port %u\n", port_num + 1);
/* Log an error, still needed to cleanup the pointers and add
* it back to the list.
*/
@@ -3412,14 +3198,14 @@ static void mlx5_ib_unbind_slave_port(struct mlx5_ib_dev *ibdev,
static bool mlx5_ib_bind_slave_port(struct mlx5_ib_dev *ibdev,
struct mlx5_ib_multiport_info *mpi)
{
- u8 port_num = mlx5_core_native_port_num(mpi->mdev) - 1;
+ u32 port_num = mlx5_core_native_port_num(mpi->mdev) - 1;
int err;
lockdep_assert_held(&mlx5_ib_multiport_mutex);
spin_lock(&ibdev->port[port_num].mp.mpi_lock);
if (ibdev->port[port_num].mp.mpi) {
- mlx5_ib_dbg(ibdev, "port %d already affiliated.\n",
+ mlx5_ib_dbg(ibdev, "port %u already affiliated.\n",
port_num + 1);
spin_unlock(&ibdev->port[port_num].mp.mpi_lock);
return false;
@@ -3455,12 +3241,12 @@ unbind:
static int mlx5_ib_init_multiport_master(struct mlx5_ib_dev *dev)
{
- int port_num = mlx5_core_native_port_num(dev->mdev) - 1;
+ u32 port_num = mlx5_core_native_port_num(dev->mdev) - 1;
enum rdma_link_layer ll = mlx5_ib_port_link_layer(&dev->ib_dev,
port_num + 1);
struct mlx5_ib_multiport_info *mpi;
int err;
- int i;
+ u32 i;
if (!mlx5_core_is_mp_master(dev->mdev) || ll != IB_LINK_LAYER_ETHERNET)
return 0;
@@ -3523,10 +3309,10 @@ static int mlx5_ib_init_multiport_master(struct mlx5_ib_dev *dev)
static void mlx5_ib_cleanup_multiport_master(struct mlx5_ib_dev *dev)
{
- int port_num = mlx5_core_native_port_num(dev->mdev) - 1;
+ u32 port_num = mlx5_core_native_port_num(dev->mdev) - 1;
enum rdma_link_layer ll = mlx5_ib_port_link_layer(&dev->ib_dev,
port_num + 1);
- int i;
+ u32 i;
if (!mlx5_core_is_mp_master(dev->mdev) || ll != IB_LINK_LAYER_ETHERNET)
return;
@@ -3539,7 +3325,8 @@ static void mlx5_ib_cleanup_multiport_master(struct mlx5_ib_dev *dev)
kfree(dev->port[i].mp.mpi);
dev->port[i].mp.mpi = NULL;
} else {
- mlx5_ib_dbg(dev, "unbinding port_num: %d\n", i + 1);
+ mlx5_ib_dbg(dev, "unbinding port_num: %u\n",
+ i + 1);
mlx5_ib_unbind_slave_port(dev, dev->port[i].mp.mpi);
}
}
@@ -3816,20 +3603,6 @@ DECLARE_UVERBS_NAMED_OBJECT(MLX5_IB_OBJECT_UAR,
&UVERBS_METHOD(MLX5_IB_METHOD_UAR_OBJ_DESTROY));
ADD_UVERBS_ATTRIBUTES_SIMPLE(
- mlx5_ib_dm,
- UVERBS_OBJECT_DM,
- UVERBS_METHOD_DM_ALLOC,
- UVERBS_ATTR_PTR_OUT(MLX5_IB_ATTR_ALLOC_DM_RESP_START_OFFSET,
- UVERBS_ATTR_TYPE(u64),
- UA_MANDATORY),
- UVERBS_ATTR_PTR_OUT(MLX5_IB_ATTR_ALLOC_DM_RESP_PAGE_INDEX,
- UVERBS_ATTR_TYPE(u16),
- UA_OPTIONAL),
- UVERBS_ATTR_CONST_IN(MLX5_IB_ATTR_ALLOC_DM_REQ_TYPE,
- enum mlx5_ib_uapi_dm_type,
- UA_OPTIONAL));
-
-ADD_UVERBS_ATTRIBUTES_SIMPLE(
mlx5_ib_flow_action,
UVERBS_OBJECT_FLOW_ACTION,
UVERBS_METHOD_FLOW_ACTION_ESP_CREATE,
@@ -3851,10 +3624,10 @@ static const struct uapi_definition mlx5_ib_defs[] = {
UAPI_DEF_CHAIN(mlx5_ib_flow_defs),
UAPI_DEF_CHAIN(mlx5_ib_qos_defs),
UAPI_DEF_CHAIN(mlx5_ib_std_types_defs),
+ UAPI_DEF_CHAIN(mlx5_ib_dm_defs),
UAPI_DEF_CHAIN_OBJ_TREE(UVERBS_OBJECT_FLOW_ACTION,
&mlx5_ib_flow_action),
- UAPI_DEF_CHAIN_OBJ_TREE(UVERBS_OBJECT_DM, &mlx5_ib_dm),
UAPI_DEF_CHAIN_OBJ_TREE(UVERBS_OBJECT_DEVICE, &mlx5_ib_query_context),
UAPI_DEF_CHAIN_OBJ_TREE_NAMED(MLX5_IB_OBJECT_VAR,
UAPI_DEF_IS_OBJ_SUPPORTED(var_is_supported)),
@@ -3891,8 +3664,6 @@ static int mlx5_ib_stage_init_init(struct mlx5_ib_dev *dev)
dev->port[i].roce.last_port_state = IB_PORT_DOWN;
}
- mlx5_ib_internal_fill_odp_caps(dev);
-
err = mlx5_ib_init_multiport_master(dev);
if (err)
return err;
@@ -4032,12 +3803,6 @@ static const struct ib_device_ops mlx5_ib_dev_xrc_ops = {
INIT_RDMA_OBJ_SIZE(ib_xrcd, mlx5_ib_xrcd, ibxrcd),
};
-static const struct ib_device_ops mlx5_ib_dev_dm_ops = {
- .alloc_dm = mlx5_ib_alloc_dm,
- .dealloc_dm = mlx5_ib_dealloc_dm,
- .reg_dm_mr = mlx5_ib_reg_dm_mr,
-};
-
static int mlx5_ib_init_var_table(struct mlx5_ib_dev *dev)
{
struct mlx5_core_dev *mdev = dev->mdev;
@@ -4160,7 +3925,7 @@ static int mlx5_ib_roce_init(struct mlx5_ib_dev *dev)
struct mlx5_core_dev *mdev = dev->mdev;
enum rdma_link_layer ll;
int port_type_cap;
- u8 port_num = 0;
+ u32 port_num = 0;
int err;
port_type_cap = MLX5_CAP_GEN(mdev, port_type);
@@ -4173,7 +3938,7 @@ static int mlx5_ib_roce_init(struct mlx5_ib_dev *dev)
/* Register only for native ports */
err = mlx5_add_netdev_notifier(dev, port_num);
- if (err || dev->is_rep || !mlx5_is_roce_enabled(mdev))
+ if (err || dev->is_rep || !mlx5_is_roce_init_enabled(mdev))
/*
* We don't enable ETH interface for
* 1. IB representors
@@ -4197,7 +3962,7 @@ static void mlx5_ib_roce_cleanup(struct mlx5_ib_dev *dev)
struct mlx5_core_dev *mdev = dev->mdev;
enum rdma_link_layer ll;
int port_type_cap;
- u8 port_num;
+ u32 port_num;
port_type_cap = MLX5_CAP_GEN(mdev, port_type);
ll = mlx5_port_type_cap_to_rdma_ll(port_type_cap);
@@ -4710,7 +4475,7 @@ static int mlx5r_probe(struct auxiliary_device *adev,
dev->mdev = mdev;
dev->num_ports = num_ports;
- if (ll == IB_LINK_LAYER_ETHERNET && !mlx5_is_roce_enabled(mdev))
+ if (ll == IB_LINK_LAYER_ETHERNET && !mlx5_is_roce_init_enabled(mdev))
profile = &raw_eth_profile;
else
profile = &pf_profile;