summaryrefslogtreecommitdiffstats
path: root/drivers/infiniband
diff options
context:
space:
mode:
authorLeon Romanovsky <leonro@nvidia.com>2021-10-21 08:27:39 +0300
committerLeon Romanovsky <leonro@nvidia.com>2021-10-21 08:27:39 +0300
commit60dd57c7479418e2bc902143eb46a2fdcfeecbbb (patch)
treeab6005faf6076a4c93fdfbcdccc1ff4bc044c8ec /drivers/infiniband
parentb8dfed636fc6239396c3a2ae5f812505906cf215 (diff)
parentae0579acde812bc1efd074086ae3bc5eae170f20 (diff)
downloadlinux-60dd57c7479418e2bc902143eb46a2fdcfeecbbb.tar.bz2
Merge brank 'mlx5_mkey' into rdma.git for-next
A small series to clean up the mlx5 mkey code across the mlx5_core and InfiniBand. * branch 'mlx5_mkey': RDMA/mlx5: Attach ndescs to mlx5_ib_mkey RDMA/mlx5: Move struct mlx5_core_mkey to mlx5_ib RDMA/mlx5: Replace struct mlx5_core_mkey by u32 key RDMA/mlx5: Remove pd from struct mlx5_core_mkey RDMA/mlx5: Remove size from struct mlx5_core_mkey RDMA/mlx5: Remove iova from struct mlx5_core_mkey Signed-off-by: Leon Romanovsky <leonro@nvidia.com>
Diffstat (limited to 'drivers/infiniband')
-rw-r--r--drivers/infiniband/core/cma.c51
-rw-r--r--drivers/infiniband/core/cma_priv.h1
-rw-r--r--drivers/infiniband/hw/hfi1/ipoib_tx.c8
-rw-r--r--drivers/infiniband/hw/hns/hns_roce_cq.c31
-rw-r--r--drivers/infiniband/hw/hns/hns_roce_hw_v2.c13
-rw-r--r--drivers/infiniband/hw/irdma/cm.c4
-rw-r--r--drivers/infiniband/hw/irdma/hw.c14
-rw-r--r--drivers/infiniband/hw/irdma/i40iw_if.c2
-rw-r--r--drivers/infiniband/hw/irdma/main.h1
-rw-r--r--drivers/infiniband/hw/irdma/user.h2
-rw-r--r--drivers/infiniband/hw/irdma/utils.c2
-rw-r--r--drivers/infiniband/hw/irdma/verbs.c9
-rw-r--r--drivers/infiniband/hw/mlx5/devx.c13
-rw-r--r--drivers/infiniband/hw/mlx5/devx.h2
-rw-r--r--drivers/infiniband/hw/mlx5/mlx5_ib.h31
-rw-r--r--drivers/infiniband/hw/mlx5/mr.c83
-rw-r--r--drivers/infiniband/hw/mlx5/odp.c38
-rw-r--r--drivers/infiniband/hw/mlx5/wr.c10
-rw-r--r--drivers/infiniband/hw/qib/qib_sysfs.c2
-rw-r--r--drivers/infiniband/hw/usnic/usnic_ib.h2
-rw-r--r--drivers/infiniband/hw/usnic/usnic_ib_main.c2
-rw-r--r--drivers/infiniband/hw/usnic/usnic_ib_verbs.c16
22 files changed, 194 insertions, 143 deletions
diff --git a/drivers/infiniband/core/cma.c b/drivers/infiniband/core/cma.c
index c40791baced5..704ce595542c 100644
--- a/drivers/infiniband/core/cma.c
+++ b/drivers/infiniband/core/cma.c
@@ -1746,15 +1746,16 @@ static void cma_cancel_route(struct rdma_id_private *id_priv)
}
}
-static void cma_cancel_listens(struct rdma_id_private *id_priv)
+static void _cma_cancel_listens(struct rdma_id_private *id_priv)
{
struct rdma_id_private *dev_id_priv;
+ lockdep_assert_held(&lock);
+
/*
* Remove from listen_any_list to prevent added devices from spawning
* additional listen requests.
*/
- mutex_lock(&lock);
list_del(&id_priv->list);
while (!list_empty(&id_priv->listen_list)) {
@@ -1768,6 +1769,12 @@ static void cma_cancel_listens(struct rdma_id_private *id_priv)
rdma_destroy_id(&dev_id_priv->id);
mutex_lock(&lock);
}
+}
+
+static void cma_cancel_listens(struct rdma_id_private *id_priv)
+{
+ mutex_lock(&lock);
+ _cma_cancel_listens(id_priv);
mutex_unlock(&lock);
}
@@ -1776,6 +1783,14 @@ static void cma_cancel_operation(struct rdma_id_private *id_priv,
{
switch (state) {
case RDMA_CM_ADDR_QUERY:
+ /*
+ * We can avoid doing the rdma_addr_cancel() based on state,
+ * only RDMA_CM_ADDR_QUERY has a work that could still execute.
+ * Notice that the addr_handler work could still be exiting
+ * outside this state, however due to the interaction with the
+ * handler_mutex the work is guaranteed not to touch id_priv
+ * during exit.
+ */
rdma_addr_cancel(&id_priv->id.route.addr.dev_addr);
break;
case RDMA_CM_ROUTE_QUERY:
@@ -1810,6 +1825,8 @@ static void cma_release_port(struct rdma_id_private *id_priv)
static void destroy_mc(struct rdma_id_private *id_priv,
struct cma_multicast *mc)
{
+ bool send_only = mc->join_state == BIT(SENDONLY_FULLMEMBER_JOIN);
+
if (rdma_cap_ib_mcast(id_priv->id.device, id_priv->id.port_num))
ib_sa_free_multicast(mc->sa_mc);
@@ -1826,7 +1843,10 @@ static void destroy_mc(struct rdma_id_private *id_priv,
cma_set_mgid(id_priv, (struct sockaddr *)&mc->addr,
&mgid);
- cma_igmp_send(ndev, &mgid, false);
+
+ if (!send_only)
+ cma_igmp_send(ndev, &mgid, false);
+
dev_put(ndev);
}
@@ -2574,7 +2594,7 @@ static int cma_listen_on_all(struct rdma_id_private *id_priv)
return 0;
err_listen:
- list_del(&id_priv->list);
+ _cma_cancel_listens(id_priv);
mutex_unlock(&lock);
if (to_destroy)
rdma_destroy_id(&to_destroy->id);
@@ -3413,6 +3433,21 @@ int rdma_resolve_addr(struct rdma_cm_id *id, struct sockaddr *src_addr,
if (dst_addr->sa_family == AF_IB) {
ret = cma_resolve_ib_addr(id_priv);
} else {
+ /*
+ * The FSM can return back to RDMA_CM_ADDR_BOUND after
+ * rdma_resolve_ip() is called, eg through the error
+ * path in addr_handler(). If this happens the existing
+ * request must be canceled before issuing a new one.
+ * Since canceling a request is a bit slow and this
+ * oddball path is rare, keep track once a request has
+ * been issued. The track turns out to be a permanent
+ * state since this is the only cancel as it is
+ * immediately before rdma_resolve_ip().
+ */
+ if (id_priv->used_resolve_ip)
+ rdma_addr_cancel(&id->route.addr.dev_addr);
+ else
+ id_priv->used_resolve_ip = 1;
ret = rdma_resolve_ip(cma_src_addr(id_priv), dst_addr,
&id->route.addr.dev_addr,
timeout_ms, addr_handler,
@@ -3771,9 +3806,13 @@ int rdma_listen(struct rdma_cm_id *id, int backlog)
int ret;
if (!cma_comp_exch(id_priv, RDMA_CM_ADDR_BOUND, RDMA_CM_LISTEN)) {
+ struct sockaddr_in any_in = {
+ .sin_family = AF_INET,
+ .sin_addr.s_addr = htonl(INADDR_ANY),
+ };
+
/* For a well behaved ULP state will be RDMA_CM_IDLE */
- id->route.addr.src_addr.ss_family = AF_INET;
- ret = rdma_bind_addr(id, cma_src_addr(id_priv));
+ ret = rdma_bind_addr(id, (struct sockaddr *)&any_in);
if (ret)
return ret;
if (WARN_ON(!cma_comp_exch(id_priv, RDMA_CM_ADDR_BOUND,
diff --git a/drivers/infiniband/core/cma_priv.h b/drivers/infiniband/core/cma_priv.h
index 5c463da99845..f92f101ea981 100644
--- a/drivers/infiniband/core/cma_priv.h
+++ b/drivers/infiniband/core/cma_priv.h
@@ -91,6 +91,7 @@ struct rdma_id_private {
u8 afonly;
u8 timeout;
u8 min_rnr_timer;
+ u8 used_resolve_ip;
enum ib_gid_type gid_type;
/*
diff --git a/drivers/infiniband/hw/hfi1/ipoib_tx.c b/drivers/infiniband/hw/hfi1/ipoib_tx.c
index e74ddbe46589..15b0cb0f363f 100644
--- a/drivers/infiniband/hw/hfi1/ipoib_tx.c
+++ b/drivers/infiniband/hw/hfi1/ipoib_tx.c
@@ -876,14 +876,14 @@ void hfi1_ipoib_tx_timeout(struct net_device *dev, unsigned int q)
struct hfi1_ipoib_txq *txq = &priv->txqs[q];
u64 completed = atomic64_read(&txq->complete_txreqs);
- dd_dev_info(priv->dd, "timeout txq %llx q %u stopped %u stops %d no_desc %d ring_full %d\n",
- (unsigned long long)txq, q,
+ dd_dev_info(priv->dd, "timeout txq %p q %u stopped %u stops %d no_desc %d ring_full %d\n",
+ txq, q,
__netif_subqueue_stopped(dev, txq->q_idx),
atomic_read(&txq->stops),
atomic_read(&txq->no_desc),
atomic_read(&txq->ring_full));
- dd_dev_info(priv->dd, "sde %llx engine %u\n",
- (unsigned long long)txq->sde,
+ dd_dev_info(priv->dd, "sde %p engine %u\n",
+ txq->sde,
txq->sde ? txq->sde->this_idx : 0);
dd_dev_info(priv->dd, "flow %x\n", txq->flow.as_int);
dd_dev_info(priv->dd, "sent %llu completed %llu used %llu\n",
diff --git a/drivers/infiniband/hw/hns/hns_roce_cq.c b/drivers/infiniband/hw/hns/hns_roce_cq.c
index 1e9c3c5bee68..d763f097599f 100644
--- a/drivers/infiniband/hw/hns/hns_roce_cq.c
+++ b/drivers/infiniband/hw/hns/hns_roce_cq.c
@@ -326,19 +326,30 @@ static void set_cq_param(struct hns_roce_cq *hr_cq, u32 cq_entries, int vector,
INIT_LIST_HEAD(&hr_cq->rq_list);
}
-static void set_cqe_size(struct hns_roce_cq *hr_cq, struct ib_udata *udata,
- struct hns_roce_ib_create_cq *ucmd)
+static int set_cqe_size(struct hns_roce_cq *hr_cq, struct ib_udata *udata,
+ struct hns_roce_ib_create_cq *ucmd)
{
struct hns_roce_dev *hr_dev = to_hr_dev(hr_cq->ib_cq.device);
- if (udata) {
- if (udata->inlen >= offsetofend(typeof(*ucmd), cqe_size))
- hr_cq->cqe_size = ucmd->cqe_size;
- else
- hr_cq->cqe_size = HNS_ROCE_V2_CQE_SIZE;
- } else {
+ if (!udata) {
hr_cq->cqe_size = hr_dev->caps.cqe_sz;
+ return 0;
+ }
+
+ if (udata->inlen >= offsetofend(typeof(*ucmd), cqe_size)) {
+ if (ucmd->cqe_size != HNS_ROCE_V2_CQE_SIZE &&
+ ucmd->cqe_size != HNS_ROCE_V3_CQE_SIZE) {
+ ibdev_err(&hr_dev->ib_dev,
+ "invalid cqe size %u.\n", ucmd->cqe_size);
+ return -EINVAL;
+ }
+
+ hr_cq->cqe_size = ucmd->cqe_size;
+ } else {
+ hr_cq->cqe_size = HNS_ROCE_V2_CQE_SIZE;
}
+
+ return 0;
}
int hns_roce_create_cq(struct ib_cq *ib_cq, const struct ib_cq_init_attr *attr,
@@ -366,7 +377,9 @@ int hns_roce_create_cq(struct ib_cq *ib_cq, const struct ib_cq_init_attr *attr,
set_cq_param(hr_cq, attr->cqe, attr->comp_vector, &ucmd);
- set_cqe_size(hr_cq, udata, &ucmd);
+ ret = set_cqe_size(hr_cq, udata, &ucmd);
+ if (ret)
+ return ret;
ret = alloc_cq_buf(hr_dev, hr_cq, udata, ucmd.buf_addr);
if (ret) {
diff --git a/drivers/infiniband/hw/hns/hns_roce_hw_v2.c b/drivers/infiniband/hw/hns/hns_roce_hw_v2.c
index 5b9953105752..d5f3faa1627a 100644
--- a/drivers/infiniband/hw/hns/hns_roce_hw_v2.c
+++ b/drivers/infiniband/hw/hns/hns_roce_hw_v2.c
@@ -3299,7 +3299,7 @@ static void __hns_roce_v2_cq_clean(struct hns_roce_cq *hr_cq, u32 qpn,
dest = get_cqe_v2(hr_cq, (prod_index + nfreed) &
hr_cq->ib_cq.cqe);
owner_bit = hr_reg_read(dest, CQE_OWNER);
- memcpy(dest, cqe, sizeof(*cqe));
+ memcpy(dest, cqe, hr_cq->cqe_size);
hr_reg_write(dest, CQE_OWNER, owner_bit);
}
}
@@ -4397,7 +4397,12 @@ static int modify_qp_init_to_rtr(struct ib_qp *ibqp,
hr_qp->path_mtu = ib_mtu;
mtu = ib_mtu_enum_to_int(ib_mtu);
- if (WARN_ON(mtu < 0))
+ if (WARN_ON(mtu <= 0))
+ return -EINVAL;
+#define MAX_LP_MSG_LEN 65536
+ /* MTU * (2 ^ LP_PKTN_INI) shouldn't be bigger than 64KB */
+ lp_pktn_ini = ilog2(MAX_LP_MSG_LEN / mtu);
+ if (WARN_ON(lp_pktn_ini >= 0xF))
return -EINVAL;
if (attr_mask & IB_QP_PATH_MTU) {
@@ -4405,10 +4410,6 @@ static int modify_qp_init_to_rtr(struct ib_qp *ibqp,
hr_reg_clear(qpc_mask, QPC_MTU);
}
-#define MAX_LP_MSG_LEN 65536
- /* MTU * (2 ^ LP_PKTN_INI) shouldn't be bigger than 64KB */
- lp_pktn_ini = ilog2(MAX_LP_MSG_LEN / mtu);
-
hr_reg_write(context, QPC_LP_PKTN_INI, lp_pktn_ini);
hr_reg_clear(qpc_mask, QPC_LP_PKTN_INI);
diff --git a/drivers/infiniband/hw/irdma/cm.c b/drivers/infiniband/hw/irdma/cm.c
index 6b62299abfbb..6dea0a49d171 100644
--- a/drivers/infiniband/hw/irdma/cm.c
+++ b/drivers/infiniband/hw/irdma/cm.c
@@ -3496,7 +3496,7 @@ static void irdma_cm_disconn_true(struct irdma_qp *iwqp)
original_hw_tcp_state == IRDMA_TCP_STATE_TIME_WAIT ||
last_ae == IRDMA_AE_RDMAP_ROE_BAD_LLP_CLOSE ||
last_ae == IRDMA_AE_BAD_CLOSE ||
- last_ae == IRDMA_AE_LLP_CONNECTION_RESET || iwdev->reset)) {
+ last_ae == IRDMA_AE_LLP_CONNECTION_RESET || iwdev->rf->reset)) {
issue_close = 1;
iwqp->cm_id = NULL;
qp->term_flags = 0;
@@ -4250,7 +4250,7 @@ void irdma_cm_teardown_connections(struct irdma_device *iwdev, u32 *ipaddr,
teardown_entry);
attr.qp_state = IB_QPS_ERR;
irdma_modify_qp(&cm_node->iwqp->ibqp, &attr, IB_QP_STATE, NULL);
- if (iwdev->reset)
+ if (iwdev->rf->reset)
irdma_cm_disconn(cm_node->iwqp);
irdma_rem_ref_cm_node(cm_node);
}
diff --git a/drivers/infiniband/hw/irdma/hw.c b/drivers/infiniband/hw/irdma/hw.c
index 00de5ee9a260..7de525a5ccf8 100644
--- a/drivers/infiniband/hw/irdma/hw.c
+++ b/drivers/infiniband/hw/irdma/hw.c
@@ -176,6 +176,14 @@ static void irdma_set_flush_fields(struct irdma_sc_qp *qp,
case IRDMA_AE_LLP_RECEIVED_MPA_CRC_ERROR:
qp->flush_code = FLUSH_GENERAL_ERR;
break;
+ case IRDMA_AE_LLP_TOO_MANY_RETRIES:
+ qp->flush_code = FLUSH_RETRY_EXC_ERR;
+ break;
+ case IRDMA_AE_AMP_MWBIND_INVALID_RIGHTS:
+ case IRDMA_AE_AMP_MWBIND_BIND_DISABLED:
+ case IRDMA_AE_AMP_MWBIND_INVALID_BOUNDS:
+ qp->flush_code = FLUSH_MW_BIND_ERR;
+ break;
default:
qp->flush_code = FLUSH_FATAL_ERR;
break;
@@ -1489,7 +1497,7 @@ void irdma_reinitialize_ieq(struct irdma_sc_vsi *vsi)
irdma_puda_dele_rsrc(vsi, IRDMA_PUDA_RSRC_TYPE_IEQ, false);
if (irdma_initialize_ieq(iwdev)) {
- iwdev->reset = true;
+ iwdev->rf->reset = true;
rf->gen_ops.request_reset(rf);
}
}
@@ -1632,13 +1640,13 @@ void irdma_rt_deinit_hw(struct irdma_device *iwdev)
case IEQ_CREATED:
if (!iwdev->roce_mode)
irdma_puda_dele_rsrc(&iwdev->vsi, IRDMA_PUDA_RSRC_TYPE_IEQ,
- iwdev->reset);
+ iwdev->rf->reset);
fallthrough;
case ILQ_CREATED:
if (!iwdev->roce_mode)
irdma_puda_dele_rsrc(&iwdev->vsi,
IRDMA_PUDA_RSRC_TYPE_ILQ,
- iwdev->reset);
+ iwdev->rf->reset);
break;
default:
ibdev_warn(&iwdev->ibdev, "bad init_state = %d\n", iwdev->init_state);
diff --git a/drivers/infiniband/hw/irdma/i40iw_if.c b/drivers/infiniband/hw/irdma/i40iw_if.c
index bddf88194d09..d219f64b2c3d 100644
--- a/drivers/infiniband/hw/irdma/i40iw_if.c
+++ b/drivers/infiniband/hw/irdma/i40iw_if.c
@@ -55,7 +55,7 @@ static void i40iw_close(struct i40e_info *cdev_info, struct i40e_client *client,
iwdev = to_iwdev(ibdev);
if (reset)
- iwdev->reset = true;
+ iwdev->rf->reset = true;
iwdev->iw_status = 0;
irdma_port_ibevent(iwdev);
diff --git a/drivers/infiniband/hw/irdma/main.h b/drivers/infiniband/hw/irdma/main.h
index 743d9e143a99..b678fe712447 100644
--- a/drivers/infiniband/hw/irdma/main.h
+++ b/drivers/infiniband/hw/irdma/main.h
@@ -346,7 +346,6 @@ struct irdma_device {
bool roce_mode:1;
bool roce_dcqcn_en:1;
bool dcb:1;
- bool reset:1;
bool iw_ooo:1;
enum init_completion_state init_state;
diff --git a/drivers/infiniband/hw/irdma/user.h b/drivers/infiniband/hw/irdma/user.h
index ff705f323233..3dcbb1fbf2c6 100644
--- a/drivers/infiniband/hw/irdma/user.h
+++ b/drivers/infiniband/hw/irdma/user.h
@@ -102,6 +102,8 @@ enum irdma_flush_opcode {
FLUSH_REM_OP_ERR,
FLUSH_LOC_LEN_ERR,
FLUSH_FATAL_ERR,
+ FLUSH_RETRY_EXC_ERR,
+ FLUSH_MW_BIND_ERR,
};
enum irdma_cmpl_status {
diff --git a/drivers/infiniband/hw/irdma/utils.c b/drivers/infiniband/hw/irdma/utils.c
index e94470991fe0..ac91ea5296db 100644
--- a/drivers/infiniband/hw/irdma/utils.c
+++ b/drivers/infiniband/hw/irdma/utils.c
@@ -2507,7 +2507,7 @@ void irdma_modify_qp_to_err(struct irdma_sc_qp *sc_qp)
struct irdma_qp *qp = sc_qp->qp_uk.back_qp;
struct ib_qp_attr attr;
- if (qp->iwdev->reset)
+ if (qp->iwdev->rf->reset)
return;
attr.qp_state = IB_QPS_ERR;
diff --git a/drivers/infiniband/hw/irdma/verbs.c b/drivers/infiniband/hw/irdma/verbs.c
index 4fc323402073..7110ebf834f9 100644
--- a/drivers/infiniband/hw/irdma/verbs.c
+++ b/drivers/infiniband/hw/irdma/verbs.c
@@ -535,8 +535,7 @@ static int irdma_destroy_qp(struct ib_qp *ibqp, struct ib_udata *udata)
irdma_qp_rem_ref(&iwqp->ibqp);
wait_for_completion(&iwqp->free_qp);
irdma_free_lsmm_rsrc(iwqp);
- if (!iwdev->reset)
- irdma_cqp_qp_destroy_cmd(&iwdev->rf->sc_dev, &iwqp->sc_qp);
+ irdma_cqp_qp_destroy_cmd(&iwdev->rf->sc_dev, &iwqp->sc_qp);
if (!iwqp->user_mode) {
if (iwqp->iwscq) {
@@ -2035,7 +2034,7 @@ static int irdma_create_cq(struct ib_cq *ibcq,
/* Kmode allocations */
int rsize;
- if (entries > rf->max_cqe) {
+ if (entries < 1 || entries > rf->max_cqe) {
err_code = -EINVAL;
goto cq_free_rsrc;
}
@@ -3353,6 +3352,10 @@ static enum ib_wc_status irdma_flush_err_to_ib_wc_status(enum irdma_flush_opcode
return IB_WC_LOC_LEN_ERR;
case FLUSH_GENERAL_ERR:
return IB_WC_WR_FLUSH_ERR;
+ case FLUSH_RETRY_EXC_ERR:
+ return IB_WC_RETRY_EXC_ERR;
+ case FLUSH_MW_BIND_ERR:
+ return IB_WC_MW_BIND_ERR;
case FLUSH_FATAL_ERR:
default:
return IB_WC_FATAL_ERR;
diff --git a/drivers/infiniband/hw/mlx5/devx.c b/drivers/infiniband/hw/mlx5/devx.c
index e95967aefe78..08b7f6bc56c3 100644
--- a/drivers/infiniband/hw/mlx5/devx.c
+++ b/drivers/infiniband/hw/mlx5/devx.c
@@ -1292,21 +1292,16 @@ static int devx_handle_mkey_indirect(struct devx_obj *obj,
struct mlx5_ib_dev *dev,
void *in, void *out)
{
- struct mlx5_ib_devx_mr *devx_mr = &obj->devx_mr;
- struct mlx5_core_mkey *mkey;
+ struct mlx5_ib_mkey *mkey = &obj->mkey;
void *mkc;
u8 key;
- mkey = &devx_mr->mmkey;
mkc = MLX5_ADDR_OF(create_mkey_in, in, memory_key_mkey_entry);
key = MLX5_GET(mkc, mkc, mkey_7_0);
mkey->key = mlx5_idx_to_mkey(
MLX5_GET(create_mkey_out, out, mkey_index)) | key;
mkey->type = MLX5_MKEY_INDIRECT_DEVX;
- mkey->iova = MLX5_GET64(mkc, mkc, start_addr);
- mkey->size = MLX5_GET64(mkc, mkc, len);
- mkey->pd = MLX5_GET(mkc, mkc, pd);
- devx_mr->ndescs = MLX5_GET(mkc, mkc, translations_octword_size);
+ mkey->ndescs = MLX5_GET(mkc, mkc, translations_octword_size);
init_waitqueue_head(&mkey->wait);
return mlx5r_store_odp_mkey(dev, mkey);
@@ -1384,13 +1379,13 @@ static int devx_obj_cleanup(struct ib_uobject *uobject,
dev = mlx5_udata_to_mdev(&attrs->driver_udata);
if (obj->flags & DEVX_OBJ_FLAGS_INDIRECT_MKEY &&
xa_erase(&obj->ib_dev->odp_mkeys,
- mlx5_base_mkey(obj->devx_mr.mmkey.key)))
+ mlx5_base_mkey(obj->mkey.key)))
/*
* The pagefault_single_data_segment() does commands against
* the mmkey, we must wait for that to stop before freeing the
* mkey, as another allocation could get the same mkey #.
*/
- mlx5r_deref_wait_odp_mkey(&obj->devx_mr.mmkey);
+ mlx5r_deref_wait_odp_mkey(&obj->mkey);
if (obj->flags & DEVX_OBJ_FLAGS_DCT)
ret = mlx5_core_destroy_dct(obj->ib_dev, &obj->core_dct);
diff --git a/drivers/infiniband/hw/mlx5/devx.h b/drivers/infiniband/hw/mlx5/devx.h
index 1f69866aed16..ee2213275fd6 100644
--- a/drivers/infiniband/hw/mlx5/devx.h
+++ b/drivers/infiniband/hw/mlx5/devx.h
@@ -16,7 +16,7 @@ struct devx_obj {
u32 dinbox[MLX5_MAX_DESTROY_INBOX_SIZE_DW];
u32 flags;
union {
- struct mlx5_ib_devx_mr devx_mr;
+ struct mlx5_ib_mkey mkey;
struct mlx5_core_dct core_dct;
struct mlx5_core_cq core_cq;
u32 flow_counter_bulk_size;
diff --git a/drivers/infiniband/hw/mlx5/mlx5_ib.h b/drivers/infiniband/hw/mlx5/mlx5_ib.h
index bf20a388eabe..e462e368c353 100644
--- a/drivers/infiniband/hw/mlx5/mlx5_ib.h
+++ b/drivers/infiniband/hw/mlx5/mlx5_ib.h
@@ -619,6 +619,20 @@ struct mlx5_user_mmap_entry {
u32 page_idx;
};
+enum mlx5_mkey_type {
+ MLX5_MKEY_MR = 1,
+ MLX5_MKEY_MW,
+ MLX5_MKEY_INDIRECT_DEVX,
+};
+
+struct mlx5_ib_mkey {
+ u32 key;
+ enum mlx5_mkey_type type;
+ unsigned int ndescs;
+ struct wait_queue_head wait;
+ refcount_t usecount;
+};
+
#define MLX5_IB_MTT_PRESENT (MLX5_IB_MTT_READ | MLX5_IB_MTT_WRITE)
#define MLX5_IB_DM_MEMIC_ALLOWED_ACCESS (IB_ACCESS_LOCAL_WRITE |\
@@ -637,7 +651,7 @@ struct mlx5_user_mmap_entry {
struct mlx5_ib_mr {
struct ib_mr ibmr;
- struct mlx5_core_mkey mmkey;
+ struct mlx5_ib_mkey mmkey;
/* User MR data */
struct mlx5_cache_ent *cache_ent;
@@ -659,7 +673,6 @@ struct mlx5_ib_mr {
void *descs_alloc;
dma_addr_t desc_map;
int max_descs;
- int ndescs;
int desc_size;
int access_mode;
@@ -713,13 +726,7 @@ static inline bool is_dmabuf_mr(struct mlx5_ib_mr *mr)
struct mlx5_ib_mw {
struct ib_mw ibmw;
- struct mlx5_core_mkey mmkey;
- int ndescs;
-};
-
-struct mlx5_ib_devx_mr {
- struct mlx5_core_mkey mmkey;
- int ndescs;
+ struct mlx5_ib_mkey mmkey;
};
struct mlx5_ib_umr_context {
@@ -1579,7 +1586,7 @@ static inline bool mlx5_ib_can_reconfig_with_umr(struct mlx5_ib_dev *dev,
}
static inline int mlx5r_store_odp_mkey(struct mlx5_ib_dev *dev,
- struct mlx5_core_mkey *mmkey)
+ struct mlx5_ib_mkey *mmkey)
{
refcount_set(&mmkey->usecount, 1);
@@ -1588,14 +1595,14 @@ static inline int mlx5r_store_odp_mkey(struct mlx5_ib_dev *dev,
}
/* deref an mkey that can participate in ODP flow */
-static inline void mlx5r_deref_odp_mkey(struct mlx5_core_mkey *mmkey)
+static inline void mlx5r_deref_odp_mkey(struct mlx5_ib_mkey *mmkey)
{
if (refcount_dec_and_test(&mmkey->usecount))
wake_up(&mmkey->wait);
}
/* deref an mkey that can participate in ODP flow and wait for relese */
-static inline void mlx5r_deref_wait_odp_mkey(struct mlx5_core_mkey *mmkey)
+static inline void mlx5r_deref_wait_odp_mkey(struct mlx5_ib_mkey *mmkey)
{
mlx5r_deref_odp_mkey(mmkey);
wait_event(mmkey->wait, refcount_read(&mmkey->usecount) == 0);
diff --git a/drivers/infiniband/hw/mlx5/mr.c b/drivers/infiniband/hw/mlx5/mr.c
index 3be36ebbf67a..14c5564428ab 100644
--- a/drivers/infiniband/hw/mlx5/mr.c
+++ b/drivers/infiniband/hw/mlx5/mr.c
@@ -88,9 +88,8 @@ static void set_mkc_access_pd_addr_fields(void *mkc, int acc, u64 start_addr,
MLX5_SET64(mkc, mkc, start_addr, start_addr);
}
-static void
-assign_mkey_variant(struct mlx5_ib_dev *dev, struct mlx5_core_mkey *mkey,
- u32 *in)
+static void assign_mkey_variant(struct mlx5_ib_dev *dev,
+ struct mlx5_ib_mkey *mkey, u32 *in)
{
u8 key = atomic_inc_return(&dev->mkey_var);
void *mkc;
@@ -100,17 +99,22 @@ assign_mkey_variant(struct mlx5_ib_dev *dev, struct mlx5_core_mkey *mkey,
mkey->key = key;
}
-static int
-mlx5_ib_create_mkey(struct mlx5_ib_dev *dev, struct mlx5_core_mkey *mkey,
- u32 *in, int inlen)
+static int mlx5_ib_create_mkey(struct mlx5_ib_dev *dev,
+ struct mlx5_ib_mkey *mkey, u32 *in, int inlen)
{
+ int ret;
+
assign_mkey_variant(dev, mkey, in);
- return mlx5_core_create_mkey(dev->mdev, mkey, in, inlen);
+ ret = mlx5_core_create_mkey(dev->mdev, &mkey->key, in, inlen);
+ if (!ret)
+ init_waitqueue_head(&mkey->wait);
+
+ return ret;
}
static int
mlx5_ib_create_mkey_cb(struct mlx5_ib_dev *dev,
- struct mlx5_core_mkey *mkey,
+ struct mlx5_ib_mkey *mkey,
struct mlx5_async_ctx *async_ctx,
u32 *in, int inlen, u32 *out, int outlen,
struct mlx5_async_work *context)
@@ -133,7 +137,7 @@ static int destroy_mkey(struct mlx5_ib_dev *dev, struct mlx5_ib_mr *mr)
{
WARN_ON(xa_load(&dev->odp_mkeys, mlx5_base_mkey(mr->mmkey.key)));
- return mlx5_core_destroy_mkey(dev->mdev, &mr->mmkey);
+ return mlx5_core_destroy_mkey(dev->mdev, mr->mmkey.key);
}
static void create_mkey_callback(int status, struct mlx5_async_work *context)
@@ -260,10 +264,11 @@ static struct mlx5_ib_mr *create_cache_mr(struct mlx5_cache_ent *ent)
goto free_in;
}
- err = mlx5_core_create_mkey(ent->dev->mdev, &mr->mmkey, in, inlen);
+ err = mlx5_core_create_mkey(ent->dev->mdev, &mr->mmkey.key, in, inlen);
if (err)
goto free_mr;
+ init_waitqueue_head(&mr->mmkey.wait);
mr->mmkey.type = MLX5_MKEY_MR;
WRITE_ONCE(ent->dev->cache.last_add, jiffies);
spin_lock_irq(&ent->lock);
@@ -290,7 +295,7 @@ static void remove_cache_mr_locked(struct mlx5_cache_ent *ent)
ent->available_mrs--;
ent->total_mrs--;
spin_unlock_irq(&ent->lock);
- mlx5_core_destroy_mkey(ent->dev->mdev, &mr->mmkey);
+ mlx5_core_destroy_mkey(ent->dev->mdev, mr->mmkey.key);
kfree(mr);
spin_lock_irq(&ent->lock);
}
@@ -658,7 +663,7 @@ static void clean_keys(struct mlx5_ib_dev *dev, int c)
ent->available_mrs--;
ent->total_mrs--;
spin_unlock_irq(&ent->lock);
- mlx5_core_destroy_mkey(dev->mdev, &mr->mmkey);
+ mlx5_core_destroy_mkey(dev->mdev, mr->mmkey.key);
}
list_for_each_entry_safe(mr, tmp_mr, &del_list, list) {
@@ -911,12 +916,13 @@ static struct mlx5_cache_ent *mr_cache_ent_from_order(struct mlx5_ib_dev *dev,
}
static void set_mr_fields(struct mlx5_ib_dev *dev, struct mlx5_ib_mr *mr,
- u64 length, int access_flags)
+ u64 length, int access_flags, u64 iova)
{
mr->ibmr.lkey = mr->mmkey.key;
mr->ibmr.rkey = mr->mmkey.key;
mr->ibmr.length = length;
mr->ibmr.device = &dev->ib_dev;
+ mr->ibmr.iova = iova;
mr->access_flags = access_flags;
}
@@ -974,11 +980,8 @@ static struct mlx5_ib_mr *alloc_cacheable_mr(struct ib_pd *pd,
mr->ibmr.pd = pd;
mr->umem = umem;
- mr->mmkey.iova = iova;
- mr->mmkey.size = umem->length;
- mr->mmkey.pd = to_mpd(pd)->pdn;
mr->page_shift = order_base_2(page_size);
- set_mr_fields(dev, mr, umem->length, access_flags);
+ set_mr_fields(dev, mr, umem->length, access_flags, iova);
return mr;
}
@@ -1087,8 +1090,8 @@ static void *mlx5_ib_create_xlt_wr(struct mlx5_ib_mr *mr,
wr->wr.opcode = MLX5_IB_WR_UMR;
wr->pd = mr->ibmr.pd;
wr->mkey = mr->mmkey.key;
- wr->length = mr->mmkey.size;
- wr->virt_addr = mr->mmkey.iova;
+ wr->length = mr->ibmr.length;
+ wr->virt_addr = mr->ibmr.iova;
wr->access_flags = mr->access_flags;
wr->page_shift = mr->page_shift;
wr->xlt_size = sg->length;
@@ -1341,7 +1344,7 @@ static struct mlx5_ib_mr *reg_create(struct ib_pd *pd, struct ib_umem *umem,
mr->mmkey.type = MLX5_MKEY_MR;
mr->desc_size = sizeof(struct mlx5_mtt);
mr->umem = umem;
- set_mr_fields(dev, mr, umem->length, access_flags);
+ set_mr_fields(dev, mr, umem->length, access_flags, iova);
kvfree(in);
mlx5_ib_dbg(dev, "mkey = 0x%x\n", mr->mmkey.key);
@@ -1388,7 +1391,7 @@ static struct ib_mr *mlx5_ib_get_dm_mr(struct ib_pd *pd, u64 start_addr,
kfree(in);
- set_mr_fields(dev, mr, length, acc);
+ set_mr_fields(dev, mr, length, acc, start_addr);
return &mr->ibmr;
@@ -1709,7 +1712,6 @@ static int umr_rereg_pd_access(struct mlx5_ib_mr *mr, struct ib_pd *pd,
return err;
mr->access_flags = access_flags;
- mr->mmkey.pd = to_mpd(pd)->pdn;
return 0;
}
@@ -1754,7 +1756,6 @@ static int umr_rereg_pas(struct mlx5_ib_mr *mr, struct ib_pd *pd,
if (flags & IB_MR_REREG_PD) {
mr->ibmr.pd = pd;
- mr->mmkey.pd = to_mpd(pd)->pdn;
upd_flags |= MLX5_IB_UPD_XLT_PD;
}
if (flags & IB_MR_REREG_ACCESS) {
@@ -1763,8 +1764,8 @@ static int umr_rereg_pas(struct mlx5_ib_mr *mr, struct ib_pd *pd,
}
mr->ibmr.length = new_umem->length;
- mr->mmkey.iova = iova;
- mr->mmkey.size = new_umem->length;
+ mr->ibmr.iova = iova;
+ mr->ibmr.length = new_umem->length;
mr->page_shift = order_base_2(page_size);
mr->umem = new_umem;
err = mlx5_ib_update_mr_pas(mr, upd_flags);
@@ -1834,7 +1835,7 @@ struct ib_mr *mlx5_ib_rereg_user_mr(struct ib_mr *ib_mr, int flags, u64 start,
mr->umem = NULL;
atomic_sub(ib_umem_num_pages(umem), &dev->mdev->priv.reg_pages);
- return create_real_mr(new_pd, umem, mr->mmkey.iova,
+ return create_real_mr(new_pd, umem, mr->ibmr.iova,
new_access_flags);
}
@@ -2263,9 +2264,9 @@ int mlx5_ib_alloc_mw(struct ib_mw *ibmw, struct ib_udata *udata)
struct mlx5_ib_dev *dev = to_mdev(ibmw->device);
int inlen = MLX5_ST_SZ_BYTES(create_mkey_in);
struct mlx5_ib_mw *mw = to_mmw(ibmw);
+ unsigned int ndescs;
u32 *in = NULL;
void *mkc;
- int ndescs;
int err;
struct mlx5_ib_alloc_mw req = {};
struct {
@@ -2310,7 +2311,7 @@ int mlx5_ib_alloc_mw(struct ib_mw *ibmw, struct ib_udata *udata)
mw->mmkey.type = MLX5_MKEY_MW;
ibmw->rkey = mw->mmkey.key;
- mw->ndescs = ndescs;
+ mw->mmkey.ndescs = ndescs;
resp.response_length =
min(offsetofend(typeof(resp), response_length), udata->outlen);
@@ -2330,7 +2331,7 @@ int mlx5_ib_alloc_mw(struct ib_mw *ibmw, struct ib_udata *udata)
return 0;
free_mkey:
- mlx5_core_destroy_mkey(dev->mdev, &mw->mmkey);
+ mlx5_core_destroy_mkey(dev->mdev, mw->mmkey.key);
free:
kfree(in);
return err;
@@ -2349,7 +2350,7 @@ int mlx5_ib_dealloc_mw(struct ib_mw *mw)
*/
mlx5r_deref_wait_odp_mkey(&mmw->mmkey);
- return mlx5_core_destroy_mkey(dev->mdev, &mmw->mmkey);
+ return mlx5_core_destroy_mkey(dev->mdev, mmw->mmkey.key);
}
int mlx5_ib_check_mr_status(struct ib_mr *ibmr, u32 check_mask,
@@ -2406,7 +2407,7 @@ mlx5_ib_map_pa_mr_sg_pi(struct ib_mr *ibmr, struct scatterlist *data_sg,
mr->meta_length = 0;
if (data_sg_nents == 1) {
n++;
- mr->ndescs = 1;
+ mr->mmkey.ndescs = 1;
if (data_sg_offset)
sg_offset = *data_sg_offset;
mr->data_length = sg_dma_len(data_sg) - sg_offset;
@@ -2459,7 +2460,7 @@ mlx5_ib_sg_to_klms(struct mlx5_ib_mr *mr,
if (sg_offset_p)
*sg_offset_p = sg_offset;
- mr->ndescs = i;
+ mr->mmkey.ndescs = i;
mr->data_length = mr->ibmr.length;
if (meta_sg_nents) {
@@ -2492,11 +2493,11 @@ static int mlx5_set_page(struct ib_mr *ibmr, u64 addr)
struct mlx5_ib_mr *mr = to_mmr(ibmr);
__be64 *descs;
- if (unlikely(mr->ndescs == mr->max_descs))
+ if (unlikely(mr->mmkey.ndescs == mr->max_descs))
return -ENOMEM;
descs = mr->descs;
- descs[mr->ndescs++] = cpu_to_be64(addr | MLX5_EN_RD | MLX5_EN_WR);
+ descs[mr->mmkey.ndescs++] = cpu_to_be64(addr | MLX5_EN_RD | MLX5_EN_WR);
return 0;
}
@@ -2506,11 +2507,11 @@ static int mlx5_set_page_pi(struct ib_mr *ibmr, u64 addr)
struct mlx5_ib_mr *mr = to_mmr(ibmr);
__be64 *descs;
- if (unlikely(mr->ndescs + mr->meta_ndescs == mr->max_descs))
+ if (unlikely(mr->mmkey.ndescs + mr->meta_ndescs == mr->max_descs))
return -ENOMEM;
descs = mr->descs;
- descs[mr->ndescs + mr->meta_ndescs++] =
+ descs[mr->mmkey.ndescs + mr->meta_ndescs++] =
cpu_to_be64(addr | MLX5_EN_RD | MLX5_EN_WR);
return 0;
@@ -2526,7 +2527,7 @@ mlx5_ib_map_mtt_mr_sg_pi(struct ib_mr *ibmr, struct scatterlist *data_sg,
struct mlx5_ib_mr *pi_mr = mr->mtt_mr;
int n;
- pi_mr->ndescs = 0;
+ pi_mr->mmkey.ndescs = 0;
pi_mr->meta_ndescs = 0;
pi_mr->meta_length = 0;
@@ -2560,7 +2561,7 @@ mlx5_ib_map_mtt_mr_sg_pi(struct ib_mr *ibmr, struct scatterlist *data_sg,
* metadata offset at the first metadata page
*/
pi_mr->pi_iova = (iova & page_mask) +
- pi_mr->ndescs * ibmr->page_size +
+ pi_mr->mmkey.ndescs * ibmr->page_size +
(pi_mr->ibmr.iova & ~page_mask);
/*
* In order to use one MTT MR for data and metadata, we register
@@ -2591,7 +2592,7 @@ mlx5_ib_map_klm_mr_sg_pi(struct ib_mr *ibmr, struct scatterlist *data_sg,
struct mlx5_ib_mr *pi_mr = mr->klm_mr;
int n;
- pi_mr->ndescs = 0;
+ pi_mr->mmkey.ndescs = 0;
pi_mr->meta_ndescs = 0;
pi_mr->meta_length = 0;
@@ -2626,7 +2627,7 @@ int mlx5_ib_map_mr_sg_pi(struct ib_mr *ibmr, struct scatterlist *data_sg,
WARN_ON(ibmr->type != IB_MR_TYPE_INTEGRITY);
- mr->ndescs = 0;
+ mr->mmkey.ndescs = 0;
mr->data_length = 0;
mr->data_iova = 0;
mr->meta_ndescs = 0;
@@ -2682,7 +2683,7 @@ int mlx5_ib_map_mr_sg(struct ib_mr *ibmr, struct scatterlist *sg, int sg_nents,
struct mlx5_ib_mr *mr = to_mmr(ibmr);
int n;
- mr->ndescs = 0;
+ mr->mmkey.ndescs = 0;
ib_dma_sync_single_for_cpu(ibmr->device, mr->desc_map,
mr->desc_size * mr->max_descs,
diff --git a/drivers/infiniband/hw/mlx5/odp.c b/drivers/infiniband/hw/mlx5/odp.c
index d0d98e584ebc..086ffb2cb5db 100644
--- a/drivers/infiniband/hw/mlx5/odp.c
+++ b/drivers/infiniband/hw/mlx5/odp.c
@@ -430,7 +430,7 @@ static struct mlx5_ib_mr *implicit_get_child_mr(struct mlx5_ib_mr *imr,
mr->umem = &odp->umem;
mr->ibmr.lkey = mr->mmkey.key;
mr->ibmr.rkey = mr->mmkey.key;
- mr->mmkey.iova = idx * MLX5_IMR_MTT_SIZE;
+ mr->ibmr.iova = idx * MLX5_IMR_MTT_SIZE;
mr->parent = imr;
odp->private = mr;
@@ -500,7 +500,7 @@ struct mlx5_ib_mr *mlx5_ib_alloc_implicit_mr(struct mlx5_ib_pd *pd,
}
imr->ibmr.pd = &pd->ibpd;
- imr->mmkey.iova = 0;
+ imr->ibmr.iova = 0;
imr->umem = &umem_odp->umem;
imr->ibmr.lkey = imr->mmkey.key;
imr->ibmr.rkey = imr->mmkey.key;
@@ -738,7 +738,7 @@ static int pagefault_mr(struct mlx5_ib_mr *mr, u64 io_virt, size_t bcnt,
{
struct ib_umem_odp *odp = to_ib_umem_odp(mr->umem);
- if (unlikely(io_virt < mr->mmkey.iova))
+ if (unlikely(io_virt < mr->ibmr.iova))
return -EFAULT;
if (mr->umem->is_dmabuf)
@@ -747,7 +747,7 @@ static int pagefault_mr(struct mlx5_ib_mr *mr, u64 io_virt, size_t bcnt,
if (!odp->is_implicit_odp) {
u64 user_va;
- if (check_add_overflow(io_virt - mr->mmkey.iova,
+ if (check_add_overflow(io_virt - mr->ibmr.iova,
(u64)odp->umem.address, &user_va))
return -EFAULT;
if (unlikely(user_va >= ib_umem_end(odp) ||
@@ -788,7 +788,7 @@ struct pf_frame {
int depth;
};
-static bool mkey_is_eq(struct mlx5_core_mkey *mmkey, u32 key)
+static bool mkey_is_eq(struct mlx5_ib_mkey *mmkey, u32 key)
{
if (!mmkey)
return false;
@@ -797,21 +797,6 @@ static bool mkey_is_eq(struct mlx5_core_mkey *mmkey, u32 key)
return mmkey->key == key;
}
-static int get_indirect_num_descs(struct mlx5_core_mkey *mmkey)
-{
- struct mlx5_ib_mw *mw;
- struct mlx5_ib_devx_mr *devx_mr;
-
- if (mmkey->type == MLX5_MKEY_MW) {
- mw = container_of(mmkey, struct mlx5_ib_mw, mmkey);
- return mw->ndescs;
- }
-
- devx_mr = container_of(mmkey, struct mlx5_ib_devx_mr,
- mmkey);
- return devx_mr->ndescs;
-}
-
/*
* Handle a single data segment in a page-fault WQE or RDMA region.
*
@@ -831,12 +816,11 @@ static int pagefault_single_data_segment(struct mlx5_ib_dev *dev,
{
int npages = 0, ret, i, outlen, cur_outlen = 0, depth = 0;
struct pf_frame *head = NULL, *frame;
- struct mlx5_core_mkey *mmkey;
+ struct mlx5_ib_mkey *mmkey;
struct mlx5_ib_mr *mr;
struct mlx5_klm *pklm;
u32 *out = NULL;
size_t offset;
- int ndescs;
io_virt += *bytes_committed;
bcnt -= *bytes_committed;
@@ -885,8 +869,6 @@ next_mr:
case MLX5_MKEY_MW:
case MLX5_MKEY_INDIRECT_DEVX:
- ndescs = get_indirect_num_descs(mmkey);
-
if (depth >= MLX5_CAP_GEN(dev->mdev, max_indirection)) {
mlx5_ib_dbg(dev, "indirection level exceeded\n");
ret = -EFAULT;
@@ -894,7 +876,7 @@ next_mr:
}
outlen = MLX5_ST_SZ_BYTES(query_mkey_out) +
- sizeof(*pklm) * (ndescs - 2);
+ sizeof(*pklm) * (mmkey->ndescs - 2);
if (outlen > cur_outlen) {
kfree(out);
@@ -909,14 +891,14 @@ next_mr:
pklm = (struct mlx5_klm *)MLX5_ADDR_OF(query_mkey_out, out,
bsf0_klm0_pas_mtt0_1);
- ret = mlx5_core_query_mkey(dev->mdev, mmkey, out, outlen);
+ ret = mlx5_core_query_mkey(dev->mdev, mmkey->key, out, outlen);
if (ret)
goto end;
offset = io_virt - MLX5_GET64(query_mkey_out, out,
memory_key_mkey_entry.start_addr);
- for (i = 0; bcnt && i < ndescs; i++, pklm++) {
+ for (i = 0; bcnt && i < mmkey->ndescs; i++, pklm++) {
if (offset >= be32_to_cpu(pklm->bcount)) {
offset -= be32_to_cpu(pklm->bcount);
continue;
@@ -1703,8 +1685,8 @@ get_prefetchable_mr(struct ib_pd *pd, enum ib_uverbs_advise_mr_advice advice,
u32 lkey)
{
struct mlx5_ib_dev *dev = to_mdev(pd->device);
- struct mlx5_core_mkey *mmkey;
struct mlx5_ib_mr *mr = NULL;
+ struct mlx5_ib_mkey *mmkey;
xa_lock(&dev->odp_mkeys);
mmkey = xa_load(&dev->odp_mkeys, mlx5_base_mkey(lkey));
diff --git a/drivers/infiniband/hw/mlx5/wr.c b/drivers/infiniband/hw/mlx5/wr.c
index 8841620af82f..51e48ca9016e 100644
--- a/drivers/infiniband/hw/mlx5/wr.c
+++ b/drivers/infiniband/hw/mlx5/wr.c
@@ -217,7 +217,7 @@ static __be64 sig_mkey_mask(void)
static void set_reg_umr_seg(struct mlx5_wqe_umr_ctrl_seg *umr,
struct mlx5_ib_mr *mr, u8 flags, bool atomic)
{
- int size = (mr->ndescs + mr->meta_ndescs) * mr->desc_size;
+ int size = (mr->mmkey.ndescs + mr->meta_ndescs) * mr->desc_size;
memset(umr, 0, sizeof(*umr));
@@ -374,7 +374,7 @@ static void set_reg_mkey_seg(struct mlx5_mkey_seg *seg,
struct mlx5_ib_mr *mr,
u32 key, int access)
{
- int ndescs = ALIGN(mr->ndescs + mr->meta_ndescs, 8) >> 1;
+ int ndescs = ALIGN(mr->mmkey.ndescs + mr->meta_ndescs, 8) >> 1;
memset(seg, 0, sizeof(*seg));
@@ -439,7 +439,7 @@ static void set_reg_data_seg(struct mlx5_wqe_data_seg *dseg,
struct mlx5_ib_mr *mr,
struct mlx5_ib_pd *pd)
{
- int bcount = mr->desc_size * (mr->ndescs + mr->meta_ndescs);
+ int bcount = mr->desc_size * (mr->mmkey.ndescs + mr->meta_ndescs);
dseg->addr = cpu_to_be64(mr->desc_map);
dseg->byte_count = cpu_to_be32(ALIGN(bcount, 64));
@@ -861,7 +861,7 @@ static int set_reg_wr(struct mlx5_ib_qp *qp,
struct mlx5_ib_mr *mr = to_mmr(wr->mr);
struct mlx5_ib_pd *pd = to_mpd(qp->ibqp.pd);
struct mlx5_ib_dev *dev = to_mdev(pd->ibpd.device);
- int mr_list_size = (mr->ndescs + mr->meta_ndescs) * mr->desc_size;
+ int mr_list_size = (mr->mmkey.ndescs + mr->meta_ndescs) * mr->desc_size;
bool umr_inline = mr_list_size <= MLX5_IB_SQ_UMR_INLINE_THRESHOLD;
bool atomic = wr->access & IB_ACCESS_REMOTE_ATOMIC;
u8 flags = 0;
@@ -1111,7 +1111,7 @@ static int handle_reg_mr_integrity(struct mlx5_ib_dev *dev,
memset(&pa_pi_mr, 0, sizeof(struct mlx5_ib_mr));
/* No UMR, use local_dma_lkey */
pa_pi_mr.ibmr.lkey = mr->ibmr.pd->local_dma_lkey;
- pa_pi_mr.ndescs = mr->ndescs;
+ pa_pi_mr.mmkey.ndescs = mr->mmkey.ndescs;
pa_pi_mr.data_length = mr->data_length;
pa_pi_mr.data_iova = mr->data_iova;
if (mr->meta_ndescs) {
diff --git a/drivers/infiniband/hw/qib/qib_sysfs.c b/drivers/infiniband/hw/qib/qib_sysfs.c
index 452e2355d24e..0a3b28142c05 100644
--- a/drivers/infiniband/hw/qib/qib_sysfs.c
+++ b/drivers/infiniband/hw/qib/qib_sysfs.c
@@ -403,7 +403,7 @@ static ssize_t diagc_attr_store(struct ib_device *ibdev, u32 port_num,
}
#define QIB_DIAGC_ATTR(N) \
- static_assert(&((struct qib_ibport *)0)->rvp.n_##N != (u64 *)NULL); \
+ static_assert(__same_type(((struct qib_ibport *)0)->rvp.n_##N, u64)); \
static struct qib_diagc_attr qib_diagc_attr_##N = { \
.attr = __ATTR(N, 0664, diagc_attr_show, diagc_attr_store), \
.counter = \
diff --git a/drivers/infiniband/hw/usnic/usnic_ib.h b/drivers/infiniband/hw/usnic/usnic_ib.h
index 84dd682d2334..b350081aeb5a 100644
--- a/drivers/infiniband/hw/usnic/usnic_ib.h
+++ b/drivers/infiniband/hw/usnic/usnic_ib.h
@@ -90,7 +90,7 @@ struct usnic_ib_dev {
struct usnic_ib_vf {
struct usnic_ib_dev *pf;
- spinlock_t lock;
+ struct mutex lock;
struct usnic_vnic *vnic;
unsigned int qp_grp_ref_cnt;
struct usnic_ib_pd *pd;
diff --git a/drivers/infiniband/hw/usnic/usnic_ib_main.c b/drivers/infiniband/hw/usnic/usnic_ib_main.c
index 228e9a36dad0..d346dd48e731 100644
--- a/drivers/infiniband/hw/usnic/usnic_ib_main.c
+++ b/drivers/infiniband/hw/usnic/usnic_ib_main.c
@@ -572,7 +572,7 @@ static int usnic_ib_pci_probe(struct pci_dev *pdev,
}
vf->pf = pf;
- spin_lock_init(&vf->lock);
+ mutex_init(&vf->lock);
mutex_lock(&pf->usdev_lock);
list_add_tail(&vf->link, &pf->vf_dev_list);
/*
diff --git a/drivers/infiniband/hw/usnic/usnic_ib_verbs.c b/drivers/infiniband/hw/usnic/usnic_ib_verbs.c
index 06a4e9d4545d..756a83bcff58 100644
--- a/drivers/infiniband/hw/usnic/usnic_ib_verbs.c
+++ b/drivers/infiniband/hw/usnic/usnic_ib_verbs.c
@@ -196,7 +196,7 @@ find_free_vf_and_create_qp_grp(struct ib_qp *qp,
for (i = 0; dev_list[i]; i++) {
dev = dev_list[i];
vf = dev_get_drvdata(dev);
- spin_lock(&vf->lock);
+ mutex_lock(&vf->lock);
vnic = vf->vnic;
if (!usnic_vnic_check_room(vnic, res_spec)) {
usnic_dbg("Found used vnic %s from %s\n",
@@ -208,10 +208,10 @@ find_free_vf_and_create_qp_grp(struct ib_qp *qp,
vf, pd, res_spec,
trans_spec);
- spin_unlock(&vf->lock);
+ mutex_unlock(&vf->lock);
goto qp_grp_check;
}
- spin_unlock(&vf->lock);
+ mutex_unlock(&vf->lock);
}
usnic_uiom_free_dev_list(dev_list);
@@ -220,7 +220,7 @@ find_free_vf_and_create_qp_grp(struct ib_qp *qp,
/* Try to find resources on an unused vf */
list_for_each_entry(vf, &us_ibdev->vf_dev_list, link) {
- spin_lock(&vf->lock);
+ mutex_lock(&vf->lock);
vnic = vf->vnic;
if (vf->qp_grp_ref_cnt == 0 &&
usnic_vnic_check_room(vnic, res_spec) == 0) {
@@ -228,10 +228,10 @@ find_free_vf_and_create_qp_grp(struct ib_qp *qp,
vf, pd, res_spec,
trans_spec);
- spin_unlock(&vf->lock);
+ mutex_unlock(&vf->lock);
goto qp_grp_check;
}
- spin_unlock(&vf->lock);
+ mutex_unlock(&vf->lock);
}
usnic_info("No free qp grp found on %s\n",
@@ -253,9 +253,9 @@ static void qp_grp_destroy(struct usnic_ib_qp_grp *qp_grp)
WARN_ON(qp_grp->state != IB_QPS_RESET);
- spin_lock(&vf->lock);
+ mutex_lock(&vf->lock);
usnic_ib_qp_grp_destroy(qp_grp);
- spin_unlock(&vf->lock);
+ mutex_unlock(&vf->lock);
}
static int create_qp_validate_user_data(struct usnic_ib_create_qp_cmd cmd)