summaryrefslogtreecommitdiffstats
path: root/drivers/infiniband/hw
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/infiniband/hw')
-rw-r--r--drivers/infiniband/hw/hfi1/init.c37
-rw-r--r--drivers/infiniband/hw/hfi1/qp.c7
-rw-r--r--drivers/infiniband/hw/hfi1/tid_rdma.c5
-rw-r--r--drivers/infiniband/hw/hns/hns_roce_hw_v2.c34
-rw-r--r--drivers/infiniband/hw/hns/hns_roce_mr.c2
-rw-r--r--drivers/infiniband/hw/mlx5/main.c2
-rw-r--r--drivers/infiniband/hw/mlx5/odp.c22
-rw-r--r--drivers/infiniband/hw/mlx5/qp.c7
-rw-r--r--drivers/infiniband/hw/mlx5/srq_cmd.c4
9 files changed, 88 insertions, 32 deletions
diff --git a/drivers/infiniband/hw/hfi1/init.c b/drivers/infiniband/hw/hfi1/init.c
index 5eed4360695f..cb7ad1288821 100644
--- a/drivers/infiniband/hw/hfi1/init.c
+++ b/drivers/infiniband/hw/hfi1/init.c
@@ -831,6 +831,29 @@ wq_error:
}
/**
+ * destroy_workqueues - destroy per port workqueues
+ * @dd: the hfi1_ib device
+ */
+static void destroy_workqueues(struct hfi1_devdata *dd)
+{
+ int pidx;
+ struct hfi1_pportdata *ppd;
+
+ for (pidx = 0; pidx < dd->num_pports; ++pidx) {
+ ppd = dd->pport + pidx;
+
+ if (ppd->hfi1_wq) {
+ destroy_workqueue(ppd->hfi1_wq);
+ ppd->hfi1_wq = NULL;
+ }
+ if (ppd->link_wq) {
+ destroy_workqueue(ppd->link_wq);
+ ppd->link_wq = NULL;
+ }
+ }
+}
+
+/**
* enable_general_intr() - Enable the IRQs that will be handled by the
* general interrupt handler.
* @dd: valid devdata
@@ -1103,15 +1126,10 @@ static void shutdown_device(struct hfi1_devdata *dd)
* We can't count on interrupts since we are stopping.
*/
hfi1_quiet_serdes(ppd);
-
- if (ppd->hfi1_wq) {
- destroy_workqueue(ppd->hfi1_wq);
- ppd->hfi1_wq = NULL;
- }
- if (ppd->link_wq) {
- destroy_workqueue(ppd->link_wq);
- ppd->link_wq = NULL;
- }
+ if (ppd->hfi1_wq)
+ flush_workqueue(ppd->hfi1_wq);
+ if (ppd->link_wq)
+ flush_workqueue(ppd->link_wq);
}
sdma_exit(dd);
}
@@ -1756,6 +1774,7 @@ static void remove_one(struct pci_dev *pdev)
* clear dma engines, etc.
*/
shutdown_device(dd);
+ destroy_workqueues(dd);
stop_timers(dd);
diff --git a/drivers/infiniband/hw/hfi1/qp.c b/drivers/infiniband/hw/hfi1/qp.c
index 0c2ae9f7b3e8..be62284e42d9 100644
--- a/drivers/infiniband/hw/hfi1/qp.c
+++ b/drivers/infiniband/hw/hfi1/qp.c
@@ -195,7 +195,7 @@ static inline int verbs_mtu_enum_to_int(struct ib_device *dev, enum ib_mtu mtu)
{
/* Constraining 10KB packets to 8KB packets */
if (mtu == (enum ib_mtu)OPA_MTU_10240)
- mtu = OPA_MTU_8192;
+ mtu = (enum ib_mtu)OPA_MTU_8192;
return opa_mtu_enum_to_int((enum opa_mtu)mtu);
}
@@ -367,7 +367,10 @@ bool _hfi1_schedule_send(struct rvt_qp *qp)
struct hfi1_ibport *ibp =
to_iport(qp->ibqp.device, qp->port_num);
struct hfi1_pportdata *ppd = ppd_from_ibp(ibp);
- struct hfi1_devdata *dd = dd_from_ibdev(qp->ibqp.device);
+ struct hfi1_devdata *dd = ppd->dd;
+
+ if (dd->flags & HFI1_SHUTDOWN)
+ return true;
return iowait_schedule(&priv->s_iowait, ppd->hfi1_wq,
priv->s_sde ?
diff --git a/drivers/infiniband/hw/hfi1/tid_rdma.c b/drivers/infiniband/hw/hfi1/tid_rdma.c
index 243b4ba0b6f6..facff133139a 100644
--- a/drivers/infiniband/hw/hfi1/tid_rdma.c
+++ b/drivers/infiniband/hw/hfi1/tid_rdma.c
@@ -5406,7 +5406,10 @@ static bool _hfi1_schedule_tid_send(struct rvt_qp *qp)
struct hfi1_ibport *ibp =
to_iport(qp->ibqp.device, qp->port_num);
struct hfi1_pportdata *ppd = ppd_from_ibp(ibp);
- struct hfi1_devdata *dd = dd_from_ibdev(qp->ibqp.device);
+ struct hfi1_devdata *dd = ppd->dd;
+
+ if ((dd->flags & HFI1_SHUTDOWN))
+ return true;
return iowait_tid_schedule(&priv->s_iowait, ppd->hfi1_wq,
priv->s_sde ?
diff --git a/drivers/infiniband/hw/hns/hns_roce_hw_v2.c b/drivers/infiniband/hw/hns/hns_roce_hw_v2.c
index dd01a51816cc..0618ced45bf8 100644
--- a/drivers/infiniband/hw/hns/hns_roce_hw_v2.c
+++ b/drivers/infiniband/hw/hns/hns_roce_hw_v2.c
@@ -3954,6 +3954,15 @@ static int config_qp_sq_buf(struct hns_roce_dev *hr_dev,
return 0;
}
+static inline enum ib_mtu get_mtu(struct ib_qp *ibqp,
+ const struct ib_qp_attr *attr)
+{
+ if (ibqp->qp_type == IB_QPT_GSI || ibqp->qp_type == IB_QPT_UD)
+ return IB_MTU_4096;
+
+ return attr->path_mtu;
+}
+
static int modify_qp_init_to_rtr(struct ib_qp *ibqp,
const struct ib_qp_attr *attr, int attr_mask,
struct hns_roce_v2_qp_context *context,
@@ -3965,6 +3974,7 @@ static int modify_qp_init_to_rtr(struct ib_qp *ibqp,
struct ib_device *ibdev = &hr_dev->ib_dev;
dma_addr_t trrl_ba;
dma_addr_t irrl_ba;
+ enum ib_mtu mtu;
u8 port_num;
u64 *mtts;
u8 *dmac;
@@ -4062,23 +4072,23 @@ static int modify_qp_init_to_rtr(struct ib_qp *ibqp,
roce_set_field(qpc_mask->byte_52_udpspn_dmac, V2_QPC_BYTE_52_DMAC_M,
V2_QPC_BYTE_52_DMAC_S, 0);
- /* mtu*(2^LP_PKTN_INI) should not bigger than 1 message length 64kb */
+ mtu = get_mtu(ibqp, attr);
+
+ if (attr_mask & IB_QP_PATH_MTU) {
+ roce_set_field(context->byte_24_mtu_tc, V2_QPC_BYTE_24_MTU_M,
+ V2_QPC_BYTE_24_MTU_S, mtu);
+ roce_set_field(qpc_mask->byte_24_mtu_tc, V2_QPC_BYTE_24_MTU_M,
+ V2_QPC_BYTE_24_MTU_S, 0);
+ }
+
+#define MAX_LP_MSG_LEN 65536
+ /* MTU*(2^LP_PKTN_INI) shouldn't be bigger than 64kb */
roce_set_field(context->byte_56_dqpn_err, V2_QPC_BYTE_56_LP_PKTN_INI_M,
V2_QPC_BYTE_56_LP_PKTN_INI_S,
- ilog2(hr_dev->caps.max_sq_inline / IB_MTU_4096));
+ ilog2(MAX_LP_MSG_LEN / ib_mtu_enum_to_int(mtu)));
roce_set_field(qpc_mask->byte_56_dqpn_err, V2_QPC_BYTE_56_LP_PKTN_INI_M,
V2_QPC_BYTE_56_LP_PKTN_INI_S, 0);
- if (ibqp->qp_type == IB_QPT_GSI || ibqp->qp_type == IB_QPT_UD)
- roce_set_field(context->byte_24_mtu_tc, V2_QPC_BYTE_24_MTU_M,
- V2_QPC_BYTE_24_MTU_S, IB_MTU_4096);
- else if (attr_mask & IB_QP_PATH_MTU)
- roce_set_field(context->byte_24_mtu_tc, V2_QPC_BYTE_24_MTU_M,
- V2_QPC_BYTE_24_MTU_S, attr->path_mtu);
-
- roce_set_field(qpc_mask->byte_24_mtu_tc, V2_QPC_BYTE_24_MTU_M,
- V2_QPC_BYTE_24_MTU_S, 0);
-
roce_set_bit(qpc_mask->byte_108_rx_reqepsn,
V2_QPC_BYTE_108_RX_REQ_PSN_ERR_S, 0);
roce_set_field(qpc_mask->byte_96_rx_reqmsn, V2_QPC_BYTE_96_RX_REQ_MSN_M,
diff --git a/drivers/infiniband/hw/hns/hns_roce_mr.c b/drivers/infiniband/hw/hns/hns_roce_mr.c
index 0e71ebee9e52..6b226a5eb7db 100644
--- a/drivers/infiniband/hw/hns/hns_roce_mr.c
+++ b/drivers/infiniband/hw/hns/hns_roce_mr.c
@@ -120,7 +120,7 @@ static int alloc_mr_pbl(struct hns_roce_dev *hr_dev, struct hns_roce_mr *mr,
mr->pbl_hop_num = is_fast ? 1 : hr_dev->caps.pbl_hop_num;
buf_attr.page_shift = is_fast ? PAGE_SHIFT :
- hr_dev->caps.pbl_buf_pg_sz + HNS_HW_PAGE_SHIFT;
+ hr_dev->caps.pbl_buf_pg_sz + PAGE_SHIFT;
buf_attr.region[0].size = length;
buf_attr.region[0].hopnum = mr->pbl_hop_num;
buf_attr.region_count = 1;
diff --git a/drivers/infiniband/hw/mlx5/main.c b/drivers/infiniband/hw/mlx5/main.c
index 343a8b8361e7..6f99ed03d88e 100644
--- a/drivers/infiniband/hw/mlx5/main.c
+++ b/drivers/infiniband/hw/mlx5/main.c
@@ -511,7 +511,7 @@ static int mlx5_query_port_roce(struct ib_device *device, u8 port_num,
mdev_port_num);
if (err)
goto out;
- ext = MLX5_CAP_PCAM_FEATURE(dev->mdev, ptys_extended_ethernet);
+ ext = !!MLX5_GET_ETH_PROTO(ptys_reg, out, true, eth_proto_capability);
eth_prot_oper = MLX5_GET_ETH_PROTO(ptys_reg, out, ext, eth_proto_oper);
props->active_width = IB_WIDTH_4X;
diff --git a/drivers/infiniband/hw/mlx5/odp.c b/drivers/infiniband/hw/mlx5/odp.c
index 7d2ec9ee5097..1ab676b66894 100644
--- a/drivers/infiniband/hw/mlx5/odp.c
+++ b/drivers/infiniband/hw/mlx5/odp.c
@@ -601,6 +601,23 @@ void mlx5_ib_free_implicit_mr(struct mlx5_ib_mr *imr)
*/
synchronize_srcu(&dev->odp_srcu);
+ /*
+ * All work on the prefetch list must be completed, xa_erase() prevented
+ * new work from being created.
+ */
+ wait_event(imr->q_deferred_work, !atomic_read(&imr->num_deferred_work));
+
+ /*
+ * At this point it is forbidden for any other thread to enter
+ * pagefault_mr() on this imr. It is already forbidden to call
+ * pagefault_mr() on an implicit child. Due to this additions to
+ * implicit_children are prevented.
+ */
+
+ /*
+ * Block destroy_unused_implicit_child_mr() from incrementing
+ * num_deferred_work.
+ */
xa_lock(&imr->implicit_children);
xa_for_each (&imr->implicit_children, idx, mtt) {
__xa_erase(&imr->implicit_children, idx);
@@ -609,9 +626,8 @@ void mlx5_ib_free_implicit_mr(struct mlx5_ib_mr *imr)
xa_unlock(&imr->implicit_children);
/*
- * num_deferred_work can only be incremented inside the odp_srcu, or
- * under xa_lock while the child is in the xarray. Thus at this point
- * it is only decreasing, and all work holding it is now on the wq.
+ * Wait for any concurrent destroy_unused_implicit_child_mr() to
+ * complete.
*/
wait_event(imr->q_deferred_work, !atomic_read(&imr->num_deferred_work));
diff --git a/drivers/infiniband/hw/mlx5/qp.c b/drivers/infiniband/hw/mlx5/qp.c
index f939c9b769f0..e050eade97a1 100644
--- a/drivers/infiniband/hw/mlx5/qp.c
+++ b/drivers/infiniband/hw/mlx5/qp.c
@@ -2668,6 +2668,10 @@ static int process_create_flags(struct mlx5_ib_dev *dev, struct mlx5_ib_qp *qp,
if (qp_type == IB_QPT_RAW_PACKET && attr->rwq_ind_tbl)
return (create_flags) ? -EINVAL : 0;
+ process_create_flag(dev, &create_flags, IB_QP_CREATE_NETIF_QP,
+ mlx5_get_flow_namespace(dev->mdev,
+ MLX5_FLOW_NAMESPACE_BYPASS),
+ qp);
process_create_flag(dev, &create_flags,
IB_QP_CREATE_INTEGRITY_EN,
MLX5_CAP_GEN(mdev, sho), qp);
@@ -3001,11 +3005,12 @@ destroy_qp:
mlx5_ib_destroy_dct(qp);
} else {
/*
- * The two lines below are temp solution till QP allocation
+ * These lines below are temp solution till QP allocation
* will be moved to be under IB/core responsiblity.
*/
qp->ibqp.send_cq = attr->send_cq;
qp->ibqp.recv_cq = attr->recv_cq;
+ qp->ibqp.pd = pd;
destroy_qp_common(dev, qp, udata);
}
diff --git a/drivers/infiniband/hw/mlx5/srq_cmd.c b/drivers/infiniband/hw/mlx5/srq_cmd.c
index 6f5eadc4d183..37aaacebd3f2 100644
--- a/drivers/infiniband/hw/mlx5/srq_cmd.c
+++ b/drivers/infiniband/hw/mlx5/srq_cmd.c
@@ -83,11 +83,11 @@ struct mlx5_core_srq *mlx5_cmd_get_srq(struct mlx5_ib_dev *dev, u32 srqn)
struct mlx5_srq_table *table = &dev->srq_table;
struct mlx5_core_srq *srq;
- xa_lock(&table->array);
+ xa_lock_irq(&table->array);
srq = xa_load(&table->array, srqn);
if (srq)
refcount_inc(&srq->common.refcount);
- xa_unlock(&table->array);
+ xa_unlock_irq(&table->array);
return srq;
}