summaryrefslogtreecommitdiffstats
path: root/drivers/infiniband/hw
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/infiniband/hw')
-rw-r--r--drivers/infiniband/hw/bnxt_re/bnxt_re.h6
-rw-r--r--drivers/infiniband/hw/bnxt_re/ib_verbs.c76
-rw-r--r--drivers/infiniband/hw/bnxt_re/ib_verbs.h5
-rw-r--r--drivers/infiniband/hw/bnxt_re/main.c24
-rw-r--r--drivers/infiniband/hw/bnxt_re/qplib_fp.c134
-rw-r--r--drivers/infiniband/hw/bnxt_re/qplib_fp.h16
-rw-r--r--drivers/infiniband/hw/bnxt_re/qplib_rcfw.c9
-rw-r--r--drivers/infiniband/hw/bnxt_re/qplib_rcfw.h1
-rw-r--r--drivers/infiniband/hw/bnxt_re/qplib_sp.c17
-rw-r--r--drivers/infiniband/hw/bnxt_re/roce_hsi.h25
-rw-r--r--drivers/infiniband/hw/mlx4/cq.c4
-rw-r--r--drivers/infiniband/hw/mlx4/main.c11
-rw-r--r--drivers/infiniband/hw/mlx5/cq.c10
-rw-r--r--drivers/infiniband/hw/mlx5/main.c42
-rw-r--r--drivers/infiniband/hw/mlx5/mlx5_ib.h3
-rw-r--r--drivers/infiniband/hw/mlx5/mr.c14
-rw-r--r--drivers/infiniband/hw/mlx5/qp.c34
-rw-r--r--drivers/infiniband/hw/mlx5/srq.c15
-rw-r--r--drivers/infiniband/hw/qedr/qedr_iw_cm.c19
-rw-r--r--drivers/infiniband/hw/qedr/verbs.c13
-rw-r--r--drivers/infiniband/hw/vmw_pvrdma/pvrdma_cq.c4
-rw-r--r--drivers/infiniband/hw/vmw_pvrdma/pvrdma_srq.c4
-rw-r--r--drivers/infiniband/hw/vmw_pvrdma/pvrdma_verbs.c4
23 files changed, 287 insertions, 203 deletions
diff --git a/drivers/infiniband/hw/bnxt_re/bnxt_re.h b/drivers/infiniband/hw/bnxt_re/bnxt_re.h
index ca32057e886f..96f76896488d 100644
--- a/drivers/infiniband/hw/bnxt_re/bnxt_re.h
+++ b/drivers/infiniband/hw/bnxt_re/bnxt_re.h
@@ -57,8 +57,8 @@
#define BNXT_RE_PAGE_SIZE_8M BIT(BNXT_RE_PAGE_SHIFT_8M)
#define BNXT_RE_PAGE_SIZE_1G BIT(BNXT_RE_PAGE_SHIFT_1G)
-#define BNXT_RE_MAX_MR_SIZE_LOW BIT(BNXT_RE_PAGE_SHIFT_1G)
-#define BNXT_RE_MAX_MR_SIZE_HIGH BIT(39)
+#define BNXT_RE_MAX_MR_SIZE_LOW BIT_ULL(BNXT_RE_PAGE_SHIFT_1G)
+#define BNXT_RE_MAX_MR_SIZE_HIGH BIT_ULL(39)
#define BNXT_RE_MAX_MR_SIZE BNXT_RE_MAX_MR_SIZE_HIGH
#define BNXT_RE_MAX_QPC_COUNT (64 * 1024)
@@ -120,7 +120,6 @@ struct bnxt_re_dev {
#define BNXT_RE_FLAG_HAVE_L2_REF 3
#define BNXT_RE_FLAG_RCFW_CHANNEL_EN 4
#define BNXT_RE_FLAG_QOS_WORK_REG 5
-#define BNXT_RE_FLAG_TASK_IN_PROG 6
#define BNXT_RE_FLAG_ISSUE_ROCE_STATS 29
struct net_device *netdev;
unsigned int version, major, minor;
@@ -158,6 +157,7 @@ struct bnxt_re_dev {
atomic_t srq_count;
atomic_t mr_count;
atomic_t mw_count;
+ atomic_t sched_count;
/* Max of 2 lossless traffic class supported per port */
u16 cosq[2];
diff --git a/drivers/infiniband/hw/bnxt_re/ib_verbs.c b/drivers/infiniband/hw/bnxt_re/ib_verbs.c
index ae9e9ff54826..8301d7e5fa8c 100644
--- a/drivers/infiniband/hw/bnxt_re/ib_verbs.c
+++ b/drivers/infiniband/hw/bnxt_re/ib_verbs.c
@@ -174,10 +174,8 @@ int bnxt_re_query_device(struct ib_device *ibdev,
ib_attr->max_pd = dev_attr->max_pd;
ib_attr->max_qp_rd_atom = dev_attr->max_qp_rd_atom;
ib_attr->max_qp_init_rd_atom = dev_attr->max_qp_init_rd_atom;
- if (dev_attr->is_atomic) {
- ib_attr->atomic_cap = IB_ATOMIC_HCA;
- ib_attr->masked_atomic_cap = IB_ATOMIC_HCA;
- }
+ ib_attr->atomic_cap = IB_ATOMIC_NONE;
+ ib_attr->masked_atomic_cap = IB_ATOMIC_NONE;
ib_attr->max_ee_rd_atom = 0;
ib_attr->max_res_rd_atom = 0;
@@ -787,20 +785,51 @@ int bnxt_re_query_ah(struct ib_ah *ib_ah, struct rdma_ah_attr *ah_attr)
return 0;
}
+unsigned long bnxt_re_lock_cqs(struct bnxt_re_qp *qp)
+ __acquires(&qp->scq->cq_lock) __acquires(&qp->rcq->cq_lock)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&qp->scq->cq_lock, flags);
+ if (qp->rcq != qp->scq)
+ spin_lock(&qp->rcq->cq_lock);
+ else
+ __acquire(&qp->rcq->cq_lock);
+
+ return flags;
+}
+
+void bnxt_re_unlock_cqs(struct bnxt_re_qp *qp,
+ unsigned long flags)
+ __releases(&qp->scq->cq_lock) __releases(&qp->rcq->cq_lock)
+{
+ if (qp->rcq != qp->scq)
+ spin_unlock(&qp->rcq->cq_lock);
+ else
+ __release(&qp->rcq->cq_lock);
+ spin_unlock_irqrestore(&qp->scq->cq_lock, flags);
+}
+
/* Queue Pairs */
int bnxt_re_destroy_qp(struct ib_qp *ib_qp)
{
struct bnxt_re_qp *qp = container_of(ib_qp, struct bnxt_re_qp, ib_qp);
struct bnxt_re_dev *rdev = qp->rdev;
int rc;
+ unsigned int flags;
bnxt_qplib_flush_cqn_wq(&qp->qplib_qp);
- bnxt_qplib_del_flush_qp(&qp->qplib_qp);
rc = bnxt_qplib_destroy_qp(&rdev->qplib_res, &qp->qplib_qp);
if (rc) {
dev_err(rdev_to_dev(rdev), "Failed to destroy HW QP");
return rc;
}
+
+ flags = bnxt_re_lock_cqs(qp);
+ bnxt_qplib_clean_qp(&qp->qplib_qp);
+ bnxt_re_unlock_cqs(qp, flags);
+ bnxt_qplib_free_qp_res(&rdev->qplib_res, &qp->qplib_qp);
+
if (ib_qp->qp_type == IB_QPT_GSI && rdev->qp1_sqp) {
rc = bnxt_qplib_destroy_ah(&rdev->qplib_res,
&rdev->sqp_ah->qplib_ah);
@@ -810,7 +839,7 @@ int bnxt_re_destroy_qp(struct ib_qp *ib_qp)
return rc;
}
- bnxt_qplib_del_flush_qp(&qp->qplib_qp);
+ bnxt_qplib_clean_qp(&qp->qplib_qp);
rc = bnxt_qplib_destroy_qp(&rdev->qplib_res,
&rdev->qp1_sqp->qplib_qp);
if (rc) {
@@ -1069,6 +1098,7 @@ struct ib_qp *bnxt_re_create_qp(struct ib_pd *ib_pd,
goto fail;
}
qp->qplib_qp.scq = &cq->qplib_cq;
+ qp->scq = cq;
}
if (qp_init_attr->recv_cq) {
@@ -1080,6 +1110,7 @@ struct ib_qp *bnxt_re_create_qp(struct ib_pd *ib_pd,
goto fail;
}
qp->qplib_qp.rcq = &cq->qplib_cq;
+ qp->rcq = cq;
}
if (qp_init_attr->srq) {
@@ -1185,7 +1216,7 @@ struct ib_qp *bnxt_re_create_qp(struct ib_pd *ib_pd,
rc = bnxt_qplib_create_qp(&rdev->qplib_res, &qp->qplib_qp);
if (rc) {
dev_err(rdev_to_dev(rdev), "Failed to create HW QP");
- goto fail;
+ goto free_umem;
}
}
@@ -1213,6 +1244,13 @@ struct ib_qp *bnxt_re_create_qp(struct ib_pd *ib_pd,
return &qp->ib_qp;
qp_destroy:
bnxt_qplib_destroy_qp(&rdev->qplib_res, &qp->qplib_qp);
+free_umem:
+ if (udata) {
+ if (qp->rumem)
+ ib_umem_release(qp->rumem);
+ if (qp->sumem)
+ ib_umem_release(qp->sumem);
+ }
fail:
kfree(qp);
return ERR_PTR(rc);
@@ -1568,6 +1606,7 @@ int bnxt_re_modify_qp(struct ib_qp *ib_qp, struct ib_qp_attr *qp_attr,
int status;
union ib_gid sgid;
struct ib_gid_attr sgid_attr;
+ unsigned int flags;
u8 nw_type;
qp->qplib_qp.modify_flags = 0;
@@ -1596,14 +1635,18 @@ int bnxt_re_modify_qp(struct ib_qp *ib_qp, struct ib_qp_attr *qp_attr,
dev_dbg(rdev_to_dev(rdev),
"Move QP = %p to flush list\n",
qp);
+ flags = bnxt_re_lock_cqs(qp);
bnxt_qplib_add_flush_qp(&qp->qplib_qp);
+ bnxt_re_unlock_cqs(qp, flags);
}
if (!qp->sumem &&
qp->qplib_qp.state == CMDQ_MODIFY_QP_NEW_STATE_RESET) {
dev_dbg(rdev_to_dev(rdev),
"Move QP = %p out of flush list\n",
qp);
- bnxt_qplib_del_flush_qp(&qp->qplib_qp);
+ flags = bnxt_re_lock_cqs(qp);
+ bnxt_qplib_clean_qp(&qp->qplib_qp);
+ bnxt_re_unlock_cqs(qp, flags);
}
}
if (qp_attr_mask & IB_QP_EN_SQD_ASYNC_NOTIFY) {
@@ -2189,10 +2232,13 @@ static int bnxt_re_build_inv_wqe(struct ib_send_wr *wr,
wqe->type = BNXT_QPLIB_SWQE_TYPE_LOCAL_INV;
wqe->local_inv.inv_l_key = wr->ex.invalidate_rkey;
+ /* Need unconditional fence for local invalidate
+ * opcode to work as expected.
+ */
+ wqe->flags |= BNXT_QPLIB_SWQE_FLAGS_UC_FENCE;
+
if (wr->send_flags & IB_SEND_SIGNALED)
wqe->flags |= BNXT_QPLIB_SWQE_FLAGS_SIGNAL_COMP;
- if (wr->send_flags & IB_SEND_FENCE)
- wqe->flags |= BNXT_QPLIB_SWQE_FLAGS_UC_FENCE;
if (wr->send_flags & IB_SEND_SOLICITED)
wqe->flags |= BNXT_QPLIB_SWQE_FLAGS_SOLICIT_EVENT;
@@ -2213,8 +2259,12 @@ static int bnxt_re_build_reg_wqe(struct ib_reg_wr *wr,
wqe->frmr.levels = qplib_frpl->hwq.level + 1;
wqe->type = BNXT_QPLIB_SWQE_TYPE_REG_MR;
- if (wr->wr.send_flags & IB_SEND_FENCE)
- wqe->flags |= BNXT_QPLIB_SWQE_FLAGS_UC_FENCE;
+ /* Need unconditional fence for reg_mr
+ * opcode to function as expected.
+ */
+
+ wqe->flags |= BNXT_QPLIB_SWQE_FLAGS_UC_FENCE;
+
if (wr->wr.send_flags & IB_SEND_SIGNALED)
wqe->flags |= BNXT_QPLIB_SWQE_FLAGS_SIGNAL_COMP;
@@ -3548,7 +3598,7 @@ struct ib_mr *bnxt_re_reg_user_mr(struct ib_pd *ib_pd, u64 start, u64 length,
int umem_pgs, page_shift, rc;
if (length > BNXT_RE_MAX_MR_SIZE) {
- dev_err(rdev_to_dev(rdev), "MR Size: %lld > Max supported:%ld\n",
+ dev_err(rdev_to_dev(rdev), "MR Size: %lld > Max supported:%lld\n",
length, BNXT_RE_MAX_MR_SIZE);
return ERR_PTR(-ENOMEM);
}
diff --git a/drivers/infiniband/hw/bnxt_re/ib_verbs.h b/drivers/infiniband/hw/bnxt_re/ib_verbs.h
index 423ebe012f95..e62b7c2c7da6 100644
--- a/drivers/infiniband/hw/bnxt_re/ib_verbs.h
+++ b/drivers/infiniband/hw/bnxt_re/ib_verbs.h
@@ -89,6 +89,8 @@ struct bnxt_re_qp {
/* QP1 */
u32 send_psn;
struct ib_ud_header qp1_hdr;
+ struct bnxt_re_cq *scq;
+ struct bnxt_re_cq *rcq;
};
struct bnxt_re_cq {
@@ -220,4 +222,7 @@ struct ib_ucontext *bnxt_re_alloc_ucontext(struct ib_device *ibdev,
struct ib_udata *udata);
int bnxt_re_dealloc_ucontext(struct ib_ucontext *context);
int bnxt_re_mmap(struct ib_ucontext *context, struct vm_area_struct *vma);
+
+unsigned long bnxt_re_lock_cqs(struct bnxt_re_qp *qp);
+void bnxt_re_unlock_cqs(struct bnxt_re_qp *qp, unsigned long flags);
#endif /* __BNXT_RE_IB_VERBS_H__ */
diff --git a/drivers/infiniband/hw/bnxt_re/main.c b/drivers/infiniband/hw/bnxt_re/main.c
index 508d00a5a106..f6e361750466 100644
--- a/drivers/infiniband/hw/bnxt_re/main.c
+++ b/drivers/infiniband/hw/bnxt_re/main.c
@@ -656,7 +656,6 @@ static void bnxt_re_dev_remove(struct bnxt_re_dev *rdev)
mutex_unlock(&bnxt_re_dev_lock);
synchronize_rcu();
- flush_workqueue(bnxt_re_wq);
ib_dealloc_device(&rdev->ibdev);
/* rdev is gone */
@@ -731,6 +730,13 @@ static int bnxt_re_handle_qp_async_event(struct creq_qp_event *qp_event,
struct bnxt_re_qp *qp)
{
struct ib_event event;
+ unsigned int flags;
+
+ if (qp->qplib_qp.state == CMDQ_MODIFY_QP_NEW_STATE_ERR) {
+ flags = bnxt_re_lock_cqs(qp);
+ bnxt_qplib_add_flush_qp(&qp->qplib_qp);
+ bnxt_re_unlock_cqs(qp, flags);
+ }
memset(&event, 0, sizeof(event));
if (qp->qplib_qp.srq) {
@@ -1417,9 +1423,12 @@ static void bnxt_re_task(struct work_struct *work)
switch (re_work->event) {
case NETDEV_REGISTER:
rc = bnxt_re_ib_reg(rdev);
- if (rc)
+ if (rc) {
dev_err(rdev_to_dev(rdev),
"Failed to register with IB: %#x", rc);
+ bnxt_re_remove_one(rdev);
+ bnxt_re_dev_unreg(rdev);
+ }
break;
case NETDEV_UP:
bnxt_re_dispatch_event(&rdev->ibdev, NULL, 1,
@@ -1441,7 +1450,7 @@ static void bnxt_re_task(struct work_struct *work)
break;
}
smp_mb__before_atomic();
- clear_bit(BNXT_RE_FLAG_TASK_IN_PROG, &rdev->flags);
+ atomic_dec(&rdev->sched_count);
kfree(re_work);
}
@@ -1503,7 +1512,7 @@ static int bnxt_re_netdev_event(struct notifier_block *notifier,
/* netdev notifier will call NETDEV_UNREGISTER again later since
* we are still holding the reference to the netdev
*/
- if (test_bit(BNXT_RE_FLAG_TASK_IN_PROG, &rdev->flags))
+ if (atomic_read(&rdev->sched_count) > 0)
goto exit;
bnxt_re_ib_unreg(rdev, false);
bnxt_re_remove_one(rdev);
@@ -1523,7 +1532,7 @@ static int bnxt_re_netdev_event(struct notifier_block *notifier,
re_work->vlan_dev = (real_dev == netdev ?
NULL : netdev);
INIT_WORK(&re_work->work, bnxt_re_task);
- set_bit(BNXT_RE_FLAG_TASK_IN_PROG, &rdev->flags);
+ atomic_inc(&rdev->sched_count);
queue_work(bnxt_re_wq, &re_work->work);
}
}
@@ -1578,6 +1587,11 @@ static void __exit bnxt_re_mod_exit(void)
*/
list_for_each_entry_safe_reverse(rdev, next, &to_be_deleted, list) {
dev_info(rdev_to_dev(rdev), "Unregistering Device");
+ /*
+ * Flush out any scheduled tasks before destroying the
+ * resources
+ */
+ flush_workqueue(bnxt_re_wq);
bnxt_re_dev_stop(rdev);
bnxt_re_ib_unreg(rdev, true);
bnxt_re_remove_one(rdev);
diff --git a/drivers/infiniband/hw/bnxt_re/qplib_fp.c b/drivers/infiniband/hw/bnxt_re/qplib_fp.c
index 1b0e94697fe3..3a78faba8d91 100644
--- a/drivers/infiniband/hw/bnxt_re/qplib_fp.c
+++ b/drivers/infiniband/hw/bnxt_re/qplib_fp.c
@@ -88,75 +88,35 @@ static void __bnxt_qplib_add_flush_qp(struct bnxt_qplib_qp *qp)
}
}
-void bnxt_qplib_acquire_cq_locks(struct bnxt_qplib_qp *qp,
- unsigned long *flags)
- __acquires(&qp->scq->hwq.lock) __acquires(&qp->rcq->hwq.lock)
+static void bnxt_qplib_acquire_cq_flush_locks(struct bnxt_qplib_qp *qp,
+ unsigned long *flags)
+ __acquires(&qp->scq->flush_lock) __acquires(&qp->rcq->flush_lock)
{
- spin_lock_irqsave(&qp->scq->hwq.lock, *flags);
+ spin_lock_irqsave(&qp->scq->flush_lock, *flags);
if (qp->scq == qp->rcq)
- __acquire(&qp->rcq->hwq.lock);
+ __acquire(&qp->rcq->flush_lock);
else
- spin_lock(&qp->rcq->hwq.lock);
+ spin_lock(&qp->rcq->flush_lock);
}
-void bnxt_qplib_release_cq_locks(struct bnxt_qplib_qp *qp,
- unsigned long *flags)
- __releases(&qp->scq->hwq.lock) __releases(&qp->rcq->hwq.lock)
+static void bnxt_qplib_release_cq_flush_locks(struct bnxt_qplib_qp *qp,
+ unsigned long *flags)
+ __releases(&qp->scq->flush_lock) __releases(&qp->rcq->flush_lock)
{
if (qp->scq == qp->rcq)
- __release(&qp->rcq->hwq.lock);
+ __release(&qp->rcq->flush_lock);
else
- spin_unlock(&qp->rcq->hwq.lock);
- spin_unlock_irqrestore(&qp->scq->hwq.lock, *flags);
-}
-
-static struct bnxt_qplib_cq *bnxt_qplib_find_buddy_cq(struct bnxt_qplib_qp *qp,
- struct bnxt_qplib_cq *cq)
-{
- struct bnxt_qplib_cq *buddy_cq = NULL;
-
- if (qp->scq == qp->rcq)
- buddy_cq = NULL;
- else if (qp->scq == cq)
- buddy_cq = qp->rcq;
- else
- buddy_cq = qp->scq;
- return buddy_cq;
-}
-
-static void bnxt_qplib_lock_buddy_cq(struct bnxt_qplib_qp *qp,
- struct bnxt_qplib_cq *cq)
- __acquires(&buddy_cq->hwq.lock)
-{
- struct bnxt_qplib_cq *buddy_cq = NULL;
-
- buddy_cq = bnxt_qplib_find_buddy_cq(qp, cq);
- if (!buddy_cq)
- __acquire(&cq->hwq.lock);
- else
- spin_lock(&buddy_cq->hwq.lock);
-}
-
-static void bnxt_qplib_unlock_buddy_cq(struct bnxt_qplib_qp *qp,
- struct bnxt_qplib_cq *cq)
- __releases(&buddy_cq->hwq.lock)
-{
- struct bnxt_qplib_cq *buddy_cq = NULL;
-
- buddy_cq = bnxt_qplib_find_buddy_cq(qp, cq);
- if (!buddy_cq)
- __release(&cq->hwq.lock);
- else
- spin_unlock(&buddy_cq->hwq.lock);
+ spin_unlock(&qp->rcq->flush_lock);
+ spin_unlock_irqrestore(&qp->scq->flush_lock, *flags);
}
void bnxt_qplib_add_flush_qp(struct bnxt_qplib_qp *qp)
{
unsigned long flags;
- bnxt_qplib_acquire_cq_locks(qp, &flags);
+ bnxt_qplib_acquire_cq_flush_locks(qp, &flags);
__bnxt_qplib_add_flush_qp(qp);
- bnxt_qplib_release_cq_locks(qp, &flags);
+ bnxt_qplib_release_cq_flush_locks(qp, &flags);
}
static void __bnxt_qplib_del_flush_qp(struct bnxt_qplib_qp *qp)
@@ -173,11 +133,11 @@ static void __bnxt_qplib_del_flush_qp(struct bnxt_qplib_qp *qp)
}
}
-void bnxt_qplib_del_flush_qp(struct bnxt_qplib_qp *qp)
+void bnxt_qplib_clean_qp(struct bnxt_qplib_qp *qp)
{
unsigned long flags;
- bnxt_qplib_acquire_cq_locks(qp, &flags);
+ bnxt_qplib_acquire_cq_flush_locks(qp, &flags);
__clean_cq(qp->scq, (u64)(unsigned long)qp);
qp->sq.hwq.prod = 0;
qp->sq.hwq.cons = 0;
@@ -186,7 +146,7 @@ void bnxt_qplib_del_flush_qp(struct bnxt_qplib_qp *qp)
qp->rq.hwq.cons = 0;
__bnxt_qplib_del_flush_qp(qp);
- bnxt_qplib_release_cq_locks(qp, &flags);
+ bnxt_qplib_release_cq_flush_locks(qp, &flags);
}
static void bnxt_qpn_cqn_sched_task(struct work_struct *work)
@@ -283,7 +243,7 @@ static void bnxt_qplib_service_nq(unsigned long data)
u32 sw_cons, raw_cons;
u16 type;
int budget = nq->budget;
- u64 q_handle;
+ uintptr_t q_handle;
/* Service the NQ until empty */
raw_cons = hwq->cons;
@@ -566,7 +526,7 @@ int bnxt_qplib_create_srq(struct bnxt_qplib_res *res,
/* Configure the request */
req.dpi = cpu_to_le32(srq->dpi->dpi);
- req.srq_handle = cpu_to_le64(srq);
+ req.srq_handle = cpu_to_le64((uintptr_t)srq);
req.srq_size = cpu_to_le16((u16)srq->hwq.max_elements);
pbl = &srq->hwq.pbl[PBL_LVL_0];
@@ -1419,7 +1379,6 @@ int bnxt_qplib_destroy_qp(struct bnxt_qplib_res *res,
struct bnxt_qplib_rcfw *rcfw = res->rcfw;
struct cmdq_destroy_qp req;
struct creq_destroy_qp_resp resp;
- unsigned long flags;
u16 cmd_flags = 0;
int rc;
@@ -1437,19 +1396,12 @@ int bnxt_qplib_destroy_qp(struct bnxt_qplib_res *res,
return rc;
}
- /* Must walk the associated CQs to nullified the QP ptr */
- spin_lock_irqsave(&qp->scq->hwq.lock, flags);
-
- __clean_cq(qp->scq, (u64)(unsigned long)qp);
-
- if (qp->rcq && qp->rcq != qp->scq) {
- spin_lock(&qp->rcq->hwq.lock);
- __clean_cq(qp->rcq, (u64)(unsigned long)qp);
- spin_unlock(&qp->rcq->hwq.lock);
- }
-
- spin_unlock_irqrestore(&qp->scq->hwq.lock, flags);
+ return 0;
+}
+void bnxt_qplib_free_qp_res(struct bnxt_qplib_res *res,
+ struct bnxt_qplib_qp *qp)
+{
bnxt_qplib_free_qp_hdr_buf(res, qp);
bnxt_qplib_free_hwq(res->pdev, &qp->sq.hwq);
kfree(qp->sq.swq);
@@ -1462,7 +1414,6 @@ int bnxt_qplib_destroy_qp(struct bnxt_qplib_res *res,
if (qp->orrq.max_elements)
bnxt_qplib_free_hwq(res->pdev, &qp->orrq);
- return 0;
}
void *bnxt_qplib_get_qp1_sq_buf(struct bnxt_qplib_qp *qp,
@@ -2116,9 +2067,6 @@ void bnxt_qplib_mark_qp_error(void *qp_handle)
/* Must block new posting of SQ and RQ */
qp->state = CMDQ_MODIFY_QP_NEW_STATE_ERR;
bnxt_qplib_cancel_phantom_processing(qp);
-
- /* Add qp to flush list of the CQ */
- __bnxt_qplib_add_flush_qp(qp);
}
/* Note: SQE is valid from sw_sq_cons up to cqe_sq_cons (exclusive)
@@ -2294,9 +2242,9 @@ static int bnxt_qplib_cq_process_req(struct bnxt_qplib_cq *cq,
sw_sq_cons, cqe->wr_id, cqe->status);
cqe++;
(*budget)--;
- bnxt_qplib_lock_buddy_cq(qp, cq);
bnxt_qplib_mark_qp_error(qp);
- bnxt_qplib_unlock_buddy_cq(qp, cq);
+ /* Add qp to flush list of the CQ */
+ bnxt_qplib_add_flush_qp(qp);
} else {
if (swq->flags & SQ_SEND_FLAGS_SIGNAL_COMP) {
/* Before we complete, do WA 9060 */
@@ -2412,9 +2360,7 @@ static int bnxt_qplib_cq_process_res_rc(struct bnxt_qplib_cq *cq,
if (hwcqe->status != CQ_RES_RC_STATUS_OK) {
qp->state = CMDQ_MODIFY_QP_NEW_STATE_ERR;
/* Add qp to flush list of the CQ */
- bnxt_qplib_lock_buddy_cq(qp, cq);
- __bnxt_qplib_add_flush_qp(qp);
- bnxt_qplib_unlock_buddy_cq(qp, cq);
+ bnxt_qplib_add_flush_qp(qp);
}
}
@@ -2498,9 +2444,7 @@ static int bnxt_qplib_cq_process_res_ud(struct bnxt_qplib_cq *cq,
if (hwcqe->status != CQ_RES_RC_STATUS_OK) {
qp->state = CMDQ_MODIFY_QP_NEW_STATE_ERR;
/* Add qp to flush list of the CQ */
- bnxt_qplib_lock_buddy_cq(qp, cq);
- __bnxt_qplib_add_flush_qp(qp);
- bnxt_qplib_unlock_buddy_cq(qp, cq);
+ bnxt_qplib_add_flush_qp(qp);
}
}
done:
@@ -2510,11 +2454,9 @@ done:
bool bnxt_qplib_is_cq_empty(struct bnxt_qplib_cq *cq)
{
struct cq_base *hw_cqe, **hw_cqe_ptr;
- unsigned long flags;
u32 sw_cons, raw_cons;
bool rc = true;
- spin_lock_irqsave(&cq->hwq.lock, flags);
raw_cons = cq->hwq.cons;
sw_cons = HWQ_CMP(raw_cons, &cq->hwq);
hw_cqe_ptr = (struct cq_base **)cq->hwq.pbl_ptr;
@@ -2522,7 +2464,6 @@ bool bnxt_qplib_is_cq_empty(struct bnxt_qplib_cq *cq)
/* Check for Valid bit. If the CQE is valid, return false */
rc = !CQE_CMP_VALID(hw_cqe, raw_cons, cq->hwq.max_elements);
- spin_unlock_irqrestore(&cq->hwq.lock, flags);
return rc;
}
@@ -2611,9 +2552,7 @@ static int bnxt_qplib_cq_process_res_raweth_qp1(struct bnxt_qplib_cq *cq,
if (hwcqe->status != CQ_RES_RC_STATUS_OK) {
qp->state = CMDQ_MODIFY_QP_NEW_STATE_ERR;
/* Add qp to flush list of the CQ */
- bnxt_qplib_lock_buddy_cq(qp, cq);
- __bnxt_qplib_add_flush_qp(qp);
- bnxt_qplib_unlock_buddy_cq(qp, cq);
+ bnxt_qplib_add_flush_qp(qp);
}
}
@@ -2728,9 +2667,7 @@ do_rq:
*/
/* Add qp to flush list of the CQ */
- bnxt_qplib_lock_buddy_cq(qp, cq);
- __bnxt_qplib_add_flush_qp(qp);
- bnxt_qplib_unlock_buddy_cq(qp, cq);
+ bnxt_qplib_add_flush_qp(qp);
done:
return rc;
}
@@ -2759,7 +2696,7 @@ int bnxt_qplib_process_flush_list(struct bnxt_qplib_cq *cq,
u32 budget = num_cqes;
unsigned long flags;
- spin_lock_irqsave(&cq->hwq.lock, flags);
+ spin_lock_irqsave(&cq->flush_lock, flags);
list_for_each_entry(qp, &cq->sqf_head, sq_flush) {
dev_dbg(&cq->hwq.pdev->dev,
"QPLIB: FP: Flushing SQ QP= %p",
@@ -2773,7 +2710,7 @@ int bnxt_qplib_process_flush_list(struct bnxt_qplib_cq *cq,
qp);
__flush_rq(&qp->rq, qp, &cqe, &budget);
}
- spin_unlock_irqrestore(&cq->hwq.lock, flags);
+ spin_unlock_irqrestore(&cq->flush_lock, flags);
return num_cqes - budget;
}
@@ -2782,11 +2719,9 @@ int bnxt_qplib_poll_cq(struct bnxt_qplib_cq *cq, struct bnxt_qplib_cqe *cqe,
int num_cqes, struct bnxt_qplib_qp **lib_qp)
{
struct cq_base *hw_cqe, **hw_cqe_ptr;
- unsigned long flags;
u32 sw_cons, raw_cons;
int budget, rc = 0;
- spin_lock_irqsave(&cq->hwq.lock, flags);
raw_cons = cq->hwq.cons;
budget = num_cqes;
@@ -2862,20 +2797,15 @@ int bnxt_qplib_poll_cq(struct bnxt_qplib_cq *cq, struct bnxt_qplib_cqe *cqe,
bnxt_qplib_arm_cq(cq, DBR_DBR_TYPE_CQ);
}
exit:
- spin_unlock_irqrestore(&cq->hwq.lock, flags);
return num_cqes - budget;
}
void bnxt_qplib_req_notify_cq(struct bnxt_qplib_cq *cq, u32 arm_type)
{
- unsigned long flags;
-
- spin_lock_irqsave(&cq->hwq.lock, flags);
if (arm_type)
bnxt_qplib_arm_cq(cq, arm_type);
/* Using cq->arm_state variable to track whether to issue cq handler */
atomic_set(&cq->arm_state, 1);
- spin_unlock_irqrestore(&cq->hwq.lock, flags);
}
void bnxt_qplib_flush_cqn_wq(struct bnxt_qplib_qp *qp)
diff --git a/drivers/infiniband/hw/bnxt_re/qplib_fp.h b/drivers/infiniband/hw/bnxt_re/qplib_fp.h
index 211b27a8f9e2..ade9f13c0fd1 100644
--- a/drivers/infiniband/hw/bnxt_re/qplib_fp.h
+++ b/drivers/infiniband/hw/bnxt_re/qplib_fp.h
@@ -389,6 +389,18 @@ struct bnxt_qplib_cq {
struct list_head sqf_head, rqf_head;
atomic_t arm_state;
spinlock_t compl_lock; /* synch CQ handlers */
+/* Locking Notes:
+ * QP can move to error state from modify_qp, async error event or error
+ * CQE as part of poll_cq. When QP is moved to error state, it gets added
+ * to two flush lists, one each for SQ and RQ.
+ * Each flush list is protected by qplib_cq->flush_lock. Both scq and rcq
+ * flush_locks should be acquired when QP is moved to error. The control path
+ * operations(modify_qp and async error events) are synchronized with poll_cq
+ * using upper level CQ locks (bnxt_re_cq->cq_lock) of both SCQ and RCQ.
+ * The qplib_cq->flush_lock is required to synchronize two instances of poll_cq
+ * of the same QP while manipulating the flush list.
+ */
+ spinlock_t flush_lock; /* QP flush management */
};
#define BNXT_QPLIB_MAX_IRRQE_ENTRY_SIZE sizeof(struct xrrq_irrq)
@@ -478,6 +490,9 @@ int bnxt_qplib_create_qp(struct bnxt_qplib_res *res, struct bnxt_qplib_qp *qp);
int bnxt_qplib_modify_qp(struct bnxt_qplib_res *res, struct bnxt_qplib_qp *qp);
int bnxt_qplib_query_qp(struct bnxt_qplib_res *res, struct bnxt_qplib_qp *qp);
int bnxt_qplib_destroy_qp(struct bnxt_qplib_res *res, struct bnxt_qplib_qp *qp);
+void bnxt_qplib_clean_qp(struct bnxt_qplib_qp *qp);
+void bnxt_qplib_free_qp_res(struct bnxt_qplib_res *res,
+ struct bnxt_qplib_qp *qp);
void *bnxt_qplib_get_qp1_sq_buf(struct bnxt_qplib_qp *qp,
struct bnxt_qplib_sge *sge);
void *bnxt_qplib_get_qp1_rq_buf(struct bnxt_qplib_qp *qp,
@@ -500,7 +515,6 @@ void bnxt_qplib_req_notify_cq(struct bnxt_qplib_cq *cq, u32 arm_type);
void bnxt_qplib_free_nq(struct bnxt_qplib_nq *nq);
int bnxt_qplib_alloc_nq(struct pci_dev *pdev, struct bnxt_qplib_nq *nq);
void bnxt_qplib_add_flush_qp(struct bnxt_qplib_qp *qp);
-void bnxt_qplib_del_flush_qp(struct bnxt_qplib_qp *qp);
void bnxt_qplib_acquire_cq_locks(struct bnxt_qplib_qp *qp,
unsigned long *flags);
void bnxt_qplib_release_cq_locks(struct bnxt_qplib_qp *qp,
diff --git a/drivers/infiniband/hw/bnxt_re/qplib_rcfw.c b/drivers/infiniband/hw/bnxt_re/qplib_rcfw.c
index 8329ec6a7946..80027a494730 100644
--- a/drivers/infiniband/hw/bnxt_re/qplib_rcfw.c
+++ b/drivers/infiniband/hw/bnxt_re/qplib_rcfw.c
@@ -305,9 +305,8 @@ static int bnxt_qplib_process_qp_event(struct bnxt_qplib_rcfw *rcfw,
err_event->res_err_state_reason);
if (!qp)
break;
- bnxt_qplib_acquire_cq_locks(qp, &flags);
bnxt_qplib_mark_qp_error(qp);
- bnxt_qplib_release_cq_locks(qp, &flags);
+ rcfw->aeq_handler(rcfw, qp_event, qp);
break;
default:
/* Command Response */
@@ -460,7 +459,11 @@ int bnxt_qplib_init_rcfw(struct bnxt_qplib_rcfw *rcfw,
int rc;
RCFW_CMD_PREP(req, INITIALIZE_FW, cmd_flags);
-
+ /* Supply (log-base-2-of-host-page-size - base-page-shift)
+ * to bono to adjust the doorbell page sizes.
+ */
+ req.log2_dbr_pg_size = cpu_to_le16(PAGE_SHIFT -
+ RCFW_DBR_BASE_PAGE_SHIFT);
/*
* VFs need not setup the HW context area, PF
* shall setup this area for VF. Skipping the
diff --git a/drivers/infiniband/hw/bnxt_re/qplib_rcfw.h b/drivers/infiniband/hw/bnxt_re/qplib_rcfw.h
index 6bee6e3636ea..c7cce2e4185e 100644
--- a/drivers/infiniband/hw/bnxt_re/qplib_rcfw.h
+++ b/drivers/infiniband/hw/bnxt_re/qplib_rcfw.h
@@ -49,6 +49,7 @@
#define RCFW_COMM_SIZE 0x104
#define RCFW_DBR_PCI_BAR_REGION 2
+#define RCFW_DBR_BASE_PAGE_SHIFT 12
#define RCFW_CMD_PREP(req, CMD, cmd_flags) \
do { \
diff --git a/drivers/infiniband/hw/bnxt_re/qplib_sp.c b/drivers/infiniband/hw/bnxt_re/qplib_sp.c
index c015c1861351..ee98e5efef84 100644
--- a/drivers/infiniband/hw/bnxt_re/qplib_sp.c
+++ b/drivers/infiniband/hw/bnxt_re/qplib_sp.c
@@ -52,18 +52,6 @@ const struct bnxt_qplib_gid bnxt_qplib_gid_zero = {{ 0, 0, 0, 0, 0, 0, 0, 0,
/* Device */
-static bool bnxt_qplib_is_atomic_cap(struct bnxt_qplib_rcfw *rcfw)
-{
- int rc;
- u16 pcie_ctl2;
-
- rc = pcie_capability_read_word(rcfw->pdev, PCI_EXP_DEVCTL2,
- &pcie_ctl2);
- if (rc)
- return false;
- return !!(pcie_ctl2 & PCI_EXP_DEVCTL2_ATOMIC_REQ);
-}
-
static void bnxt_qplib_query_version(struct bnxt_qplib_rcfw *rcfw,
char *fw_ver)
{
@@ -151,7 +139,8 @@ int bnxt_qplib_get_dev_attr(struct bnxt_qplib_rcfw *rcfw,
attr->max_pkey = le32_to_cpu(sb->max_pkeys);
attr->max_inline_data = le32_to_cpu(sb->max_inline_data);
- attr->l2_db_size = (sb->l2_db_space_size + 1) * PAGE_SIZE;
+ attr->l2_db_size = (sb->l2_db_space_size + 1) *
+ (0x01 << RCFW_DBR_BASE_PAGE_SHIFT);
attr->max_sgid = le32_to_cpu(sb->max_gid);
bnxt_qplib_query_version(rcfw, attr->fw_ver);
@@ -165,7 +154,7 @@ int bnxt_qplib_get_dev_attr(struct bnxt_qplib_rcfw *rcfw,
attr->tqm_alloc_reqs[i * 4 + 3] = *(++tqm_alloc);
}
- attr->is_atomic = bnxt_qplib_is_atomic_cap(rcfw);
+ attr->is_atomic = 0;
bail:
bnxt_qplib_rcfw_free_sbuf(rcfw, sbuf);
return rc;
diff --git a/drivers/infiniband/hw/bnxt_re/roce_hsi.h b/drivers/infiniband/hw/bnxt_re/roce_hsi.h
index 2d7ea096a247..3e5a4f760d0e 100644
--- a/drivers/infiniband/hw/bnxt_re/roce_hsi.h
+++ b/drivers/infiniband/hw/bnxt_re/roce_hsi.h
@@ -1761,7 +1761,30 @@ struct cmdq_initialize_fw {
#define CMDQ_INITIALIZE_FW_TIM_PG_SIZE_PG_2M (0x3UL << 4)
#define CMDQ_INITIALIZE_FW_TIM_PG_SIZE_PG_8M (0x4UL << 4)
#define CMDQ_INITIALIZE_FW_TIM_PG_SIZE_PG_1G (0x5UL << 4)
- __le16 reserved16;
+ /* This value is (log-base-2-of-DBR-page-size - 12).
+ * 0 for 4KB. HW supported values are enumerated below.
+ */
+ __le16 log2_dbr_pg_size;
+ #define CMDQ_INITIALIZE_FW_LOG2_DBR_PG_SIZE_MASK 0xfUL
+ #define CMDQ_INITIALIZE_FW_LOG2_DBR_PG_SIZE_SFT 0
+ #define CMDQ_INITIALIZE_FW_LOG2_DBR_PG_SIZE_PG_4K 0x0UL
+ #define CMDQ_INITIALIZE_FW_LOG2_DBR_PG_SIZE_PG_8K 0x1UL
+ #define CMDQ_INITIALIZE_FW_LOG2_DBR_PG_SIZE_PG_16K 0x2UL
+ #define CMDQ_INITIALIZE_FW_LOG2_DBR_PG_SIZE_PG_32K 0x3UL
+ #define CMDQ_INITIALIZE_FW_LOG2_DBR_PG_SIZE_PG_64K 0x4UL
+ #define CMDQ_INITIALIZE_FW_LOG2_DBR_PG_SIZE_PG_128K 0x5UL
+ #define CMDQ_INITIALIZE_FW_LOG2_DBR_PG_SIZE_PG_256K 0x6UL
+ #define CMDQ_INITIALIZE_FW_LOG2_DBR_PG_SIZE_PG_512K 0x7UL
+ #define CMDQ_INITIALIZE_FW_LOG2_DBR_PG_SIZE_PG_1M 0x8UL
+ #define CMDQ_INITIALIZE_FW_LOG2_DBR_PG_SIZE_PG_2M 0x9UL
+ #define CMDQ_INITIALIZE_FW_LOG2_DBR_PG_SIZE_PG_4M 0xaUL
+ #define CMDQ_INITIALIZE_FW_LOG2_DBR_PG_SIZE_PG_8M 0xbUL
+ #define CMDQ_INITIALIZE_FW_LOG2_DBR_PG_SIZE_PG_16M 0xcUL
+ #define CMDQ_INITIALIZE_FW_LOG2_DBR_PG_SIZE_PG_32M 0xdUL
+ #define CMDQ_INITIALIZE_FW_LOG2_DBR_PG_SIZE_PG_64M 0xeUL
+ #define CMDQ_INITIALIZE_FW_LOG2_DBR_PG_SIZE_PG_128M 0xfUL
+ #define CMDQ_INITIALIZE_FW_LOG2_DBR_PG_SIZE_LAST \
+ CMDQ_INITIALIZE_FW_LOG2_DBR_PG_SIZE_PG_128M
__le64 qpc_page_dir;
__le64 mrw_page_dir;
__le64 srq_page_dir;
diff --git a/drivers/infiniband/hw/mlx4/cq.c b/drivers/infiniband/hw/mlx4/cq.c
index 9a566ee3ceff..82adc0d1d30e 100644
--- a/drivers/infiniband/hw/mlx4/cq.c
+++ b/drivers/infiniband/hw/mlx4/cq.c
@@ -601,6 +601,7 @@ static void use_tunnel_data(struct mlx4_ib_qp *qp, struct mlx4_ib_cq *cq, struct
wc->dlid_path_bits = 0;
if (is_eth) {
+ wc->slid = 0;
wc->vlan_id = be16_to_cpu(hdr->tun.sl_vid);
memcpy(&(wc->smac[0]), (char *)&hdr->tun.mac_31_0, 4);
memcpy(&(wc->smac[4]), (char *)&hdr->tun.slid_mac_47_32, 2);
@@ -851,7 +852,6 @@ repoll:
}
}
- wc->slid = be16_to_cpu(cqe->rlid);
g_mlpath_rqpn = be32_to_cpu(cqe->g_mlpath_rqpn);
wc->src_qp = g_mlpath_rqpn & 0xffffff;
wc->dlid_path_bits = (g_mlpath_rqpn >> 24) & 0x7f;
@@ -860,6 +860,7 @@ repoll:
wc->wc_flags |= mlx4_ib_ipoib_csum_ok(cqe->status,
cqe->checksum) ? IB_WC_IP_CSUM_OK : 0;
if (is_eth) {
+ wc->slid = 0;
wc->sl = be16_to_cpu(cqe->sl_vid) >> 13;
if (be32_to_cpu(cqe->vlan_my_qpn) &
MLX4_CQE_CVLAN_PRESENT_MASK) {
@@ -871,6 +872,7 @@ repoll:
memcpy(wc->smac, cqe->smac, ETH_ALEN);
wc->wc_flags |= (IB_WC_WITH_VLAN | IB_WC_WITH_SMAC);
} else {
+ wc->slid = be16_to_cpu(cqe->rlid);
wc->sl = be16_to_cpu(cqe->sl_vid) >> 12;
wc->vlan_id = 0xffff;
}
diff --git a/drivers/infiniband/hw/mlx4/main.c b/drivers/infiniband/hw/mlx4/main.c
index 8d2ee9322f2e..5a0e4fc4785a 100644
--- a/drivers/infiniband/hw/mlx4/main.c
+++ b/drivers/infiniband/hw/mlx4/main.c
@@ -219,8 +219,6 @@ static int mlx4_ib_update_gids_v1_v2(struct gid_entry *gids,
gid_tbl[i].version = 2;
if (!ipv6_addr_v4mapped((struct in6_addr *)&gids[i].gid))
gid_tbl[i].type = 1;
- else
- memset(&gid_tbl[i].gid, 0, 12);
}
}
@@ -366,8 +364,13 @@ static int mlx4_ib_del_gid(struct ib_device *device,
if (!gids) {
ret = -ENOMEM;
} else {
- for (i = 0; i < MLX4_MAX_PORT_GIDS; i++)
- memcpy(&gids[i].gid, &port_gid_table->gids[i].gid, sizeof(union ib_gid));
+ for (i = 0; i < MLX4_MAX_PORT_GIDS; i++) {
+ memcpy(&gids[i].gid,
+ &port_gid_table->gids[i].gid,
+ sizeof(union ib_gid));
+ gids[i].gid_type =
+ port_gid_table->gids[i].gid_type;
+ }
}
}
spin_unlock_bh(&iboe->lock);
diff --git a/drivers/infiniband/hw/mlx5/cq.c b/drivers/infiniband/hw/mlx5/cq.c
index 5b974fb97611..15457c9569a7 100644
--- a/drivers/infiniband/hw/mlx5/cq.c
+++ b/drivers/infiniband/hw/mlx5/cq.c
@@ -226,7 +226,6 @@ static void handle_responder(struct ib_wc *wc, struct mlx5_cqe64 *cqe,
wc->ex.invalidate_rkey = be32_to_cpu(cqe->imm_inval_pkey);
break;
}
- wc->slid = be16_to_cpu(cqe->slid);
wc->src_qp = be32_to_cpu(cqe->flags_rqpn) & 0xffffff;
wc->dlid_path_bits = cqe->ml_path;
g = (be32_to_cpu(cqe->flags_rqpn) >> 28) & 3;
@@ -241,10 +240,12 @@ static void handle_responder(struct ib_wc *wc, struct mlx5_cqe64 *cqe,
}
if (ll != IB_LINK_LAYER_ETHERNET) {
+ wc->slid = be16_to_cpu(cqe->slid);
wc->sl = (be32_to_cpu(cqe->flags_rqpn) >> 24) & 0xf;
return;
}
+ wc->slid = 0;
vlan_present = cqe->l4_l3_hdr_type & 0x1;
roce_packet_type = (be32_to_cpu(cqe->flags_rqpn) >> 24) & 0x3;
if (vlan_present) {
@@ -1177,7 +1178,12 @@ static int resize_user(struct mlx5_ib_dev *dev, struct mlx5_ib_cq *cq,
if (ucmd.reserved0 || ucmd.reserved1)
return -EINVAL;
- umem = ib_umem_get(context, ucmd.buf_addr, entries * ucmd.cqe_size,
+ /* check multiplication overflow */
+ if (ucmd.cqe_size && SIZE_MAX / ucmd.cqe_size <= entries - 1)
+ return -EINVAL;
+
+ umem = ib_umem_get(context, ucmd.buf_addr,
+ (size_t)ucmd.cqe_size * entries,
IB_ACCESS_LOCAL_WRITE, 1);
if (IS_ERR(umem)) {
err = PTR_ERR(umem);
diff --git a/drivers/infiniband/hw/mlx5/main.c b/drivers/infiniband/hw/mlx5/main.c
index 4236c8086820..da091de4e69d 100644
--- a/drivers/infiniband/hw/mlx5/main.c
+++ b/drivers/infiniband/hw/mlx5/main.c
@@ -245,12 +245,16 @@ struct mlx5_core_dev *mlx5_ib_get_native_port_mdev(struct mlx5_ib_dev *ibdev,
struct mlx5_ib_multiport_info *mpi;
struct mlx5_ib_port *port;
+ if (!mlx5_core_mp_enabled(ibdev->mdev) ||
+ ll != IB_LINK_LAYER_ETHERNET) {
+ if (native_port_num)
+ *native_port_num = ib_port_num;
+ return ibdev->mdev;
+ }
+
if (native_port_num)
*native_port_num = 1;
- if (!mlx5_core_mp_enabled(ibdev->mdev) || ll != IB_LINK_LAYER_ETHERNET)
- return ibdev->mdev;
-
port = &ibdev->port[ib_port_num - 1];
if (!port)
return NULL;
@@ -3263,7 +3267,7 @@ static void mlx5_ib_handle_event(struct work_struct *_work)
struct mlx5_ib_dev *ibdev;
struct ib_event ibev;
bool fatal = false;
- u8 port = 0;
+ u8 port = (u8)work->param;
if (mlx5_core_is_mp_slave(work->dev)) {
ibdev = mlx5_ib_get_ibdev_from_mpi(work->context);
@@ -3283,8 +3287,6 @@ static void mlx5_ib_handle_event(struct work_struct *_work)
case MLX5_DEV_EVENT_PORT_UP:
case MLX5_DEV_EVENT_PORT_DOWN:
case MLX5_DEV_EVENT_PORT_INITIALIZED:
- port = (u8)work->param;
-
/* In RoCE, port up/down events are handled in
* mlx5_netdev_event().
*/
@@ -3298,24 +3300,19 @@ static void mlx5_ib_handle_event(struct work_struct *_work)
case MLX5_DEV_EVENT_LID_CHANGE:
ibev.event = IB_EVENT_LID_CHANGE;
- port = (u8)work->param;
break;
case MLX5_DEV_EVENT_PKEY_CHANGE:
ibev.event = IB_EVENT_PKEY_CHANGE;
- port = (u8)work->param;
-
schedule_work(&ibdev->devr.ports[port - 1].pkey_change_work);
break;
case MLX5_DEV_EVENT_GUID_CHANGE:
ibev.event = IB_EVENT_GID_CHANGE;
- port = (u8)work->param;
break;
case MLX5_DEV_EVENT_CLIENT_REREG:
ibev.event = IB_EVENT_CLIENT_REREGISTER;
- port = (u8)work->param;
break;
case MLX5_DEV_EVENT_DELAY_DROP_TIMEOUT:
schedule_work(&ibdev->delay_drop.delay_drop_work);
@@ -3327,7 +3324,7 @@ static void mlx5_ib_handle_event(struct work_struct *_work)
ibev.device = &ibdev->ib_dev;
ibev.element.port_num = port;
- if (port < 1 || port > ibdev->num_ports) {
+ if (!rdma_is_port_valid(&ibdev->ib_dev, port)) {
mlx5_ib_warn(ibdev, "warning: event on port %d\n", port);
goto out;
}
@@ -4863,19 +4860,19 @@ static int mlx5_ib_stage_ib_reg_init(struct mlx5_ib_dev *dev)
return ib_register_device(&dev->ib_dev, NULL);
}
-static void mlx5_ib_stage_ib_reg_cleanup(struct mlx5_ib_dev *dev)
+static void mlx5_ib_stage_pre_ib_reg_umr_cleanup(struct mlx5_ib_dev *dev)
{
- ib_unregister_device(&dev->ib_dev);
+ destroy_umrc_res(dev);
}
-static int mlx5_ib_stage_umr_res_init(struct mlx5_ib_dev *dev)
+static void mlx5_ib_stage_ib_reg_cleanup(struct mlx5_ib_dev *dev)
{
- return create_umr_res(dev);
+ ib_unregister_device(&dev->ib_dev);
}
-static void mlx5_ib_stage_umr_res_cleanup(struct mlx5_ib_dev *dev)
+static int mlx5_ib_stage_post_ib_reg_umr_init(struct mlx5_ib_dev *dev)
{
- destroy_umrc_res(dev);
+ return create_umr_res(dev);
}
static int mlx5_ib_stage_delay_drop_init(struct mlx5_ib_dev *dev)
@@ -4985,12 +4982,15 @@ static const struct mlx5_ib_profile pf_profile = {
STAGE_CREATE(MLX5_IB_STAGE_BFREG,
mlx5_ib_stage_bfrag_init,
mlx5_ib_stage_bfrag_cleanup),
+ STAGE_CREATE(MLX5_IB_STAGE_PRE_IB_REG_UMR,
+ NULL,
+ mlx5_ib_stage_pre_ib_reg_umr_cleanup),
STAGE_CREATE(MLX5_IB_STAGE_IB_REG,
mlx5_ib_stage_ib_reg_init,
mlx5_ib_stage_ib_reg_cleanup),
- STAGE_CREATE(MLX5_IB_STAGE_UMR_RESOURCES,
- mlx5_ib_stage_umr_res_init,
- mlx5_ib_stage_umr_res_cleanup),
+ STAGE_CREATE(MLX5_IB_STAGE_POST_IB_REG_UMR,
+ mlx5_ib_stage_post_ib_reg_umr_init,
+ NULL),
STAGE_CREATE(MLX5_IB_STAGE_DELAY_DROP,
mlx5_ib_stage_delay_drop_init,
mlx5_ib_stage_delay_drop_cleanup),
diff --git a/drivers/infiniband/hw/mlx5/mlx5_ib.h b/drivers/infiniband/hw/mlx5/mlx5_ib.h
index 139385129973..a5272499b600 100644
--- a/drivers/infiniband/hw/mlx5/mlx5_ib.h
+++ b/drivers/infiniband/hw/mlx5/mlx5_ib.h
@@ -739,8 +739,9 @@ enum mlx5_ib_stages {
MLX5_IB_STAGE_CONG_DEBUGFS,
MLX5_IB_STAGE_UAR,
MLX5_IB_STAGE_BFREG,
+ MLX5_IB_STAGE_PRE_IB_REG_UMR,
MLX5_IB_STAGE_IB_REG,
- MLX5_IB_STAGE_UMR_RESOURCES,
+ MLX5_IB_STAGE_POST_IB_REG_UMR,
MLX5_IB_STAGE_DELAY_DROP,
MLX5_IB_STAGE_CLASS_ATTR,
MLX5_IB_STAGE_MAX,
diff --git a/drivers/infiniband/hw/mlx5/mr.c b/drivers/infiniband/hw/mlx5/mr.c
index 556e015678de..c51c602f06d6 100644
--- a/drivers/infiniband/hw/mlx5/mr.c
+++ b/drivers/infiniband/hw/mlx5/mr.c
@@ -838,7 +838,8 @@ static int mr_umem_get(struct ib_pd *pd, u64 start, u64 length,
*umem = ib_umem_get(pd->uobject->context, start, length,
access_flags, 0);
err = PTR_ERR_OR_ZERO(*umem);
- if (err < 0) {
+ if (err) {
+ *umem = NULL;
mlx5_ib_err(dev, "umem get failed (%d)\n", err);
return err;
}
@@ -1415,6 +1416,7 @@ int mlx5_ib_rereg_user_mr(struct ib_mr *ib_mr, int flags, u64 start,
if (err) {
mlx5_ib_warn(dev, "Failed to rereg UMR\n");
ib_umem_release(mr->umem);
+ mr->umem = NULL;
clean_mr(dev, mr);
return err;
}
@@ -1498,14 +1500,11 @@ static int clean_mr(struct mlx5_ib_dev *dev, struct mlx5_ib_mr *mr)
u32 key = mr->mmkey.key;
err = destroy_mkey(dev, mr);
- kfree(mr);
if (err) {
mlx5_ib_warn(dev, "failed to destroy mkey 0x%x (%d)\n",
key, err);
return err;
}
- } else {
- mlx5_mr_cache_free(dev, mr);
}
return 0;
@@ -1548,6 +1547,11 @@ static int dereg_mr(struct mlx5_ib_dev *dev, struct mlx5_ib_mr *mr)
atomic_sub(npages, &dev->mdev->priv.reg_pages);
}
+ if (!mr->allocated_from_cache)
+ kfree(mr);
+ else
+ mlx5_mr_cache_free(dev, mr);
+
return 0;
}
@@ -1816,7 +1820,6 @@ mlx5_ib_sg_to_klms(struct mlx5_ib_mr *mr,
mr->ibmr.iova = sg_dma_address(sg) + sg_offset;
mr->ibmr.length = 0;
- mr->ndescs = sg_nents;
for_each_sg(sgl, sg, sg_nents, i) {
if (unlikely(i >= mr->max_descs))
@@ -1828,6 +1831,7 @@ mlx5_ib_sg_to_klms(struct mlx5_ib_mr *mr,
sg_offset = 0;
}
+ mr->ndescs = i;
if (sg_offset_p)
*sg_offset_p = sg_offset;
diff --git a/drivers/infiniband/hw/mlx5/qp.c b/drivers/infiniband/hw/mlx5/qp.c
index 39d24bf694a8..a2e1aa86e133 100644
--- a/drivers/infiniband/hw/mlx5/qp.c
+++ b/drivers/infiniband/hw/mlx5/qp.c
@@ -1161,7 +1161,7 @@ static void destroy_raw_packet_qp_sq(struct mlx5_ib_dev *dev,
ib_umem_release(sq->ubuffer.umem);
}
-static int get_rq_pas_size(void *qpc)
+static size_t get_rq_pas_size(void *qpc)
{
u32 log_page_size = MLX5_GET(qpc, qpc, log_page_size) + 12;
u32 log_rq_stride = MLX5_GET(qpc, qpc, log_rq_stride);
@@ -1177,7 +1177,8 @@ static int get_rq_pas_size(void *qpc)
}
static int create_raw_packet_qp_rq(struct mlx5_ib_dev *dev,
- struct mlx5_ib_rq *rq, void *qpin)
+ struct mlx5_ib_rq *rq, void *qpin,
+ size_t qpinlen)
{
struct mlx5_ib_qp *mqp = rq->base.container_mibqp;
__be64 *pas;
@@ -1186,9 +1187,12 @@ static int create_raw_packet_qp_rq(struct mlx5_ib_dev *dev,
void *rqc;
void *wq;
void *qpc = MLX5_ADDR_OF(create_qp_in, qpin, qpc);
- int inlen;
+ size_t rq_pas_size = get_rq_pas_size(qpc);
+ size_t inlen;
int err;
- u32 rq_pas_size = get_rq_pas_size(qpc);
+
+ if (qpinlen < rq_pas_size + MLX5_BYTE_OFF(create_qp_in, pas))
+ return -EINVAL;
inlen = MLX5_ST_SZ_BYTES(create_rq_in) + rq_pas_size;
in = kvzalloc(inlen, GFP_KERNEL);
@@ -1277,7 +1281,7 @@ static void destroy_raw_packet_qp_tir(struct mlx5_ib_dev *dev,
}
static int create_raw_packet_qp(struct mlx5_ib_dev *dev, struct mlx5_ib_qp *qp,
- u32 *in,
+ u32 *in, size_t inlen,
struct ib_pd *pd)
{
struct mlx5_ib_raw_packet_qp *raw_packet_qp = &qp->raw_packet_qp;
@@ -1309,7 +1313,7 @@ static int create_raw_packet_qp(struct mlx5_ib_dev *dev, struct mlx5_ib_qp *qp,
rq->flags |= MLX5_IB_RQ_CVLAN_STRIPPING;
if (qp->flags & MLX5_IB_QP_PCI_WRITE_END_PADDING)
rq->flags |= MLX5_IB_RQ_PCI_WRITE_END_PADDING;
- err = create_raw_packet_qp_rq(dev, rq, in);
+ err = create_raw_packet_qp_rq(dev, rq, in, inlen);
if (err)
goto err_destroy_sq;
@@ -1584,6 +1588,7 @@ static int create_qp_common(struct mlx5_ib_dev *dev, struct ib_pd *pd,
u32 uidx = MLX5_IB_DEFAULT_UIDX;
struct mlx5_ib_create_qp ucmd;
struct mlx5_ib_qp_base *base;
+ int mlx5_st;
void *qpc;
u32 *in;
int err;
@@ -1592,6 +1597,10 @@ static int create_qp_common(struct mlx5_ib_dev *dev, struct ib_pd *pd,
spin_lock_init(&qp->sq.lock);
spin_lock_init(&qp->rq.lock);
+ mlx5_st = to_mlx5_st(init_attr->qp_type);
+ if (mlx5_st < 0)
+ return -EINVAL;
+
if (init_attr->rwq_ind_tbl) {
if (!udata)
return -ENOSYS;
@@ -1753,7 +1762,7 @@ static int create_qp_common(struct mlx5_ib_dev *dev, struct ib_pd *pd,
qpc = MLX5_ADDR_OF(create_qp_in, in, qpc);
- MLX5_SET(qpc, qpc, st, to_mlx5_st(init_attr->qp_type));
+ MLX5_SET(qpc, qpc, st, mlx5_st);
MLX5_SET(qpc, qpc, pm_state, MLX5_QP_PM_MIGRATED);
if (init_attr->qp_type != MLX5_IB_QPT_REG_UMR)
@@ -1867,11 +1876,16 @@ static int create_qp_common(struct mlx5_ib_dev *dev, struct ib_pd *pd,
}
}
+ if (inlen < 0) {
+ err = -EINVAL;
+ goto err;
+ }
+
if (init_attr->qp_type == IB_QPT_RAW_PACKET ||
qp->flags & MLX5_IB_QP_UNDERLAY) {
qp->raw_packet_qp.sq.ubuffer.buf_addr = ucmd.sq_buf_addr;
raw_packet_qp_copy_info(qp, &qp->raw_packet_qp);
- err = create_raw_packet_qp(dev, qp, in, pd);
+ err = create_raw_packet_qp(dev, qp, in, inlen, pd);
} else {
err = mlx5_core_create_qp(dev->mdev, &base->mqp, in, inlen);
}
@@ -3095,8 +3109,10 @@ static int __mlx5_ib_modify_qp(struct ib_qp *ibqp,
goto out;
if (mlx5_cur >= MLX5_QP_NUM_STATE || mlx5_new >= MLX5_QP_NUM_STATE ||
- !optab[mlx5_cur][mlx5_new])
+ !optab[mlx5_cur][mlx5_new]) {
+ err = -EINVAL;
goto out;
+ }
op = optab[mlx5_cur][mlx5_new];
optpar = ib_mask_to_mlx5_opt(attr_mask);
diff --git a/drivers/infiniband/hw/mlx5/srq.c b/drivers/infiniband/hw/mlx5/srq.c
index 6d5fadad9090..3c7522d025f2 100644
--- a/drivers/infiniband/hw/mlx5/srq.c
+++ b/drivers/infiniband/hw/mlx5/srq.c
@@ -241,8 +241,8 @@ struct ib_srq *mlx5_ib_create_srq(struct ib_pd *pd,
{
struct mlx5_ib_dev *dev = to_mdev(pd->device);
struct mlx5_ib_srq *srq;
- int desc_size;
- int buf_size;
+ size_t desc_size;
+ size_t buf_size;
int err;
struct mlx5_srq_attr in = {0};
__u32 max_srq_wqes = 1 << MLX5_CAP_GEN(dev->mdev, log_max_srq_sz);
@@ -266,15 +266,18 @@ struct ib_srq *mlx5_ib_create_srq(struct ib_pd *pd,
desc_size = sizeof(struct mlx5_wqe_srq_next_seg) +
srq->msrq.max_gs * sizeof(struct mlx5_wqe_data_seg);
+ if (desc_size == 0 || srq->msrq.max_gs > desc_size)
+ return ERR_PTR(-EINVAL);
desc_size = roundup_pow_of_two(desc_size);
- desc_size = max_t(int, 32, desc_size);
+ desc_size = max_t(size_t, 32, desc_size);
+ if (desc_size < sizeof(struct mlx5_wqe_srq_next_seg))
+ return ERR_PTR(-EINVAL);
srq->msrq.max_avail_gather = (desc_size - sizeof(struct mlx5_wqe_srq_next_seg)) /
sizeof(struct mlx5_wqe_data_seg);
srq->msrq.wqe_shift = ilog2(desc_size);
buf_size = srq->msrq.max * desc_size;
- mlx5_ib_dbg(dev, "desc_size 0x%x, req wr 0x%x, srq size 0x%x, max_gs 0x%x, max_avail_gather 0x%x\n",
- desc_size, init_attr->attr.max_wr, srq->msrq.max, srq->msrq.max_gs,
- srq->msrq.max_avail_gather);
+ if (buf_size < desc_size)
+ return ERR_PTR(-EINVAL);
in.type = init_attr->srq_type;
if (pd->uobject)
diff --git a/drivers/infiniband/hw/qedr/qedr_iw_cm.c b/drivers/infiniband/hw/qedr/qedr_iw_cm.c
index 478b7317b80a..26dc374787f7 100644
--- a/drivers/infiniband/hw/qedr/qedr_iw_cm.c
+++ b/drivers/infiniband/hw/qedr/qedr_iw_cm.c
@@ -458,8 +458,7 @@ qedr_addr6_resolve(struct qedr_dev *dev,
}
return -EINVAL;
}
- neigh = dst_neigh_lookup(dst, &dst_in);
-
+ neigh = dst_neigh_lookup(dst, &fl6.daddr);
if (neigh) {
rcu_read_lock();
if (neigh->nud_state & NUD_VALID) {
@@ -494,10 +493,14 @@ int qedr_iw_connect(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param)
qp = idr_find(&dev->qpidr, conn_param->qpn);
- laddr = (struct sockaddr_in *)&cm_id->local_addr;
- raddr = (struct sockaddr_in *)&cm_id->remote_addr;
- laddr6 = (struct sockaddr_in6 *)&cm_id->local_addr;
- raddr6 = (struct sockaddr_in6 *)&cm_id->remote_addr;
+ laddr = (struct sockaddr_in *)&cm_id->m_local_addr;
+ raddr = (struct sockaddr_in *)&cm_id->m_remote_addr;
+ laddr6 = (struct sockaddr_in6 *)&cm_id->m_local_addr;
+ raddr6 = (struct sockaddr_in6 *)&cm_id->m_remote_addr;
+
+ DP_DEBUG(dev, QEDR_MSG_IWARP, "MAPPED %d %d\n",
+ ntohs(((struct sockaddr_in *)&cm_id->remote_addr)->sin_port),
+ ntohs(raddr->sin_port));
DP_DEBUG(dev, QEDR_MSG_IWARP,
"Connect source address: %pISpc, remote address: %pISpc\n",
@@ -599,8 +602,8 @@ int qedr_iw_create_listen(struct iw_cm_id *cm_id, int backlog)
int rc;
int i;
- laddr = (struct sockaddr_in *)&cm_id->local_addr;
- laddr6 = (struct sockaddr_in6 *)&cm_id->local_addr;
+ laddr = (struct sockaddr_in *)&cm_id->m_local_addr;
+ laddr6 = (struct sockaddr_in6 *)&cm_id->m_local_addr;
DP_DEBUG(dev, QEDR_MSG_IWARP,
"Create Listener address: %pISpc\n", &cm_id->local_addr);
diff --git a/drivers/infiniband/hw/qedr/verbs.c b/drivers/infiniband/hw/qedr/verbs.c
index 53f00dbf313f..875b17272d65 100644
--- a/drivers/infiniband/hw/qedr/verbs.c
+++ b/drivers/infiniband/hw/qedr/verbs.c
@@ -3034,6 +3034,11 @@ static int __qedr_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
switch (wr->opcode) {
case IB_WR_SEND_WITH_IMM:
+ if (unlikely(rdma_protocol_iwarp(&dev->ibdev, 1))) {
+ rc = -EINVAL;
+ *bad_wr = wr;
+ break;
+ }
wqe->req_type = RDMA_SQ_REQ_TYPE_SEND_WITH_IMM;
swqe = (struct rdma_sq_send_wqe_1st *)wqe;
swqe->wqe_size = 2;
@@ -3075,6 +3080,11 @@ static int __qedr_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
break;
case IB_WR_RDMA_WRITE_WITH_IMM:
+ if (unlikely(rdma_protocol_iwarp(&dev->ibdev, 1))) {
+ rc = -EINVAL;
+ *bad_wr = wr;
+ break;
+ }
wqe->req_type = RDMA_SQ_REQ_TYPE_RDMA_WR_WITH_IMM;
rwqe = (struct rdma_sq_rdma_wqe_1st *)wqe;
@@ -3724,7 +3734,7 @@ int qedr_poll_cq(struct ib_cq *ibcq, int num_entries, struct ib_wc *wc)
{
struct qedr_dev *dev = get_qedr_dev(ibcq->device);
struct qedr_cq *cq = get_qedr_cq(ibcq);
- union rdma_cqe *cqe = cq->latest_cqe;
+ union rdma_cqe *cqe;
u32 old_cons, new_cons;
unsigned long flags;
int update = 0;
@@ -3741,6 +3751,7 @@ int qedr_poll_cq(struct ib_cq *ibcq, int num_entries, struct ib_wc *wc)
return qedr_gsi_poll_cq(ibcq, num_entries, wc);
spin_lock_irqsave(&cq->cq_lock, flags);
+ cqe = cq->latest_cqe;
old_cons = qed_chain_get_cons_idx_u32(&cq->pbl);
while (num_entries && is_valid_cqe(cq, cqe)) {
struct qedr_qp *qp;
diff --git a/drivers/infiniband/hw/vmw_pvrdma/pvrdma_cq.c b/drivers/infiniband/hw/vmw_pvrdma/pvrdma_cq.c
index faa9478c14a6..f95b97646c25 100644
--- a/drivers/infiniband/hw/vmw_pvrdma/pvrdma_cq.c
+++ b/drivers/infiniband/hw/vmw_pvrdma/pvrdma_cq.c
@@ -114,6 +114,7 @@ struct ib_cq *pvrdma_create_cq(struct ib_device *ibdev,
union pvrdma_cmd_resp rsp;
struct pvrdma_cmd_create_cq *cmd = &req.create_cq;
struct pvrdma_cmd_create_cq_resp *resp = &rsp.create_cq_resp;
+ struct pvrdma_create_cq_resp cq_resp = {0};
struct pvrdma_create_cq ucmd;
BUILD_BUG_ON(sizeof(struct pvrdma_cqe) != 64);
@@ -197,6 +198,7 @@ struct ib_cq *pvrdma_create_cq(struct ib_device *ibdev,
cq->ibcq.cqe = resp->cqe;
cq->cq_handle = resp->cq_handle;
+ cq_resp.cqn = resp->cq_handle;
spin_lock_irqsave(&dev->cq_tbl_lock, flags);
dev->cq_tbl[cq->cq_handle % dev->dsr->caps.max_cq] = cq;
spin_unlock_irqrestore(&dev->cq_tbl_lock, flags);
@@ -205,7 +207,7 @@ struct ib_cq *pvrdma_create_cq(struct ib_device *ibdev,
cq->uar = &(to_vucontext(context)->uar);
/* Copy udata back. */
- if (ib_copy_to_udata(udata, &cq->cq_handle, sizeof(__u32))) {
+ if (ib_copy_to_udata(udata, &cq_resp, sizeof(cq_resp))) {
dev_warn(&dev->pdev->dev,
"failed to copy back udata\n");
pvrdma_destroy_cq(&cq->ibcq);
diff --git a/drivers/infiniband/hw/vmw_pvrdma/pvrdma_srq.c b/drivers/infiniband/hw/vmw_pvrdma/pvrdma_srq.c
index 5acebb1ef631..af235967a9c2 100644
--- a/drivers/infiniband/hw/vmw_pvrdma/pvrdma_srq.c
+++ b/drivers/infiniband/hw/vmw_pvrdma/pvrdma_srq.c
@@ -113,6 +113,7 @@ struct ib_srq *pvrdma_create_srq(struct ib_pd *pd,
union pvrdma_cmd_resp rsp;
struct pvrdma_cmd_create_srq *cmd = &req.create_srq;
struct pvrdma_cmd_create_srq_resp *resp = &rsp.create_srq_resp;
+ struct pvrdma_create_srq_resp srq_resp = {0};
struct pvrdma_create_srq ucmd;
unsigned long flags;
int ret;
@@ -204,12 +205,13 @@ struct ib_srq *pvrdma_create_srq(struct ib_pd *pd,
}
srq->srq_handle = resp->srqn;
+ srq_resp.srqn = resp->srqn;
spin_lock_irqsave(&dev->srq_tbl_lock, flags);
dev->srq_tbl[srq->srq_handle % dev->dsr->caps.max_srq] = srq;
spin_unlock_irqrestore(&dev->srq_tbl_lock, flags);
/* Copy udata back. */
- if (ib_copy_to_udata(udata, &srq->srq_handle, sizeof(__u32))) {
+ if (ib_copy_to_udata(udata, &srq_resp, sizeof(srq_resp))) {
dev_warn(&dev->pdev->dev, "failed to copy back udata\n");
pvrdma_destroy_srq(&srq->ibsrq);
return ERR_PTR(-EINVAL);
diff --git a/drivers/infiniband/hw/vmw_pvrdma/pvrdma_verbs.c b/drivers/infiniband/hw/vmw_pvrdma/pvrdma_verbs.c
index 16b96616ef7e..a51463cd2f37 100644
--- a/drivers/infiniband/hw/vmw_pvrdma/pvrdma_verbs.c
+++ b/drivers/infiniband/hw/vmw_pvrdma/pvrdma_verbs.c
@@ -447,6 +447,7 @@ struct ib_pd *pvrdma_alloc_pd(struct ib_device *ibdev,
union pvrdma_cmd_resp rsp;
struct pvrdma_cmd_create_pd *cmd = &req.create_pd;
struct pvrdma_cmd_create_pd_resp *resp = &rsp.create_pd_resp;
+ struct pvrdma_alloc_pd_resp pd_resp = {0};
int ret;
void *ptr;
@@ -475,9 +476,10 @@ struct ib_pd *pvrdma_alloc_pd(struct ib_device *ibdev,
pd->privileged = !context;
pd->pd_handle = resp->pd_handle;
pd->pdn = resp->pd_handle;
+ pd_resp.pdn = resp->pd_handle;
if (context) {
- if (ib_copy_to_udata(udata, &pd->pdn, sizeof(__u32))) {
+ if (ib_copy_to_udata(udata, &pd_resp, sizeof(pd_resp))) {
dev_warn(&dev->pdev->dev,
"failed to copy back protection domain\n");
pvrdma_dealloc_pd(&pd->ibpd);