diff options
Diffstat (limited to 'drivers/infiniband/hw')
-rw-r--r-- | drivers/infiniband/hw/hfi1/chip.c | 7 | ||||
-rw-r--r-- | drivers/infiniband/hw/hfi1/qp.c | 7 | ||||
-rw-r--r-- | drivers/infiniband/hw/hfi1/qp.h | 3 | ||||
-rw-r--r-- | drivers/infiniband/hw/hns/hns_roce_hw_v1.c | 86 | ||||
-rw-r--r-- | drivers/infiniband/hw/hns/hns_roce_main.c | 3 | ||||
-rw-r--r-- | drivers/infiniband/hw/mlx4/cq.c | 6 | ||||
-rw-r--r-- | drivers/infiniband/hw/mlx4/main.c | 2 | ||||
-rw-r--r-- | drivers/infiniband/hw/mlx4/mcg.c | 2 | ||||
-rw-r--r-- | drivers/infiniband/hw/mlx4/mlx4_ib.h | 1 | ||||
-rw-r--r-- | drivers/infiniband/hw/mlx4/qp.c | 40 | ||||
-rw-r--r-- | drivers/infiniband/hw/mlx4/srq.c | 8 | ||||
-rw-r--r-- | drivers/infiniband/hw/mlx5/mr.c | 36 | ||||
-rw-r--r-- | drivers/infiniband/hw/nes/nes_hw.c | 4 | ||||
-rw-r--r-- | drivers/infiniband/hw/qib/qib_qp.c | 15 | ||||
-rw-r--r-- | drivers/infiniband/hw/qib/qib_verbs.h | 4 |
15 files changed, 123 insertions, 101 deletions
diff --git a/drivers/infiniband/hw/hfi1/chip.c b/drivers/infiniband/hw/hfi1/chip.c index 2ba00b89df6a..94b54850ec75 100644 --- a/drivers/infiniband/hw/hfi1/chip.c +++ b/drivers/infiniband/hw/hfi1/chip.c @@ -12847,7 +12847,12 @@ static void remap_intr(struct hfi1_devdata *dd, int isrc, int msix_intr) /* clear from the handled mask of the general interrupt */ m = isrc / 64; n = isrc % 64; - dd->gi_mask[m] &= ~((u64)1 << n); + if (likely(m < CCE_NUM_INT_CSRS)) { + dd->gi_mask[m] &= ~((u64)1 << n); + } else { + dd_dev_err(dd, "remap interrupt err\n"); + return; + } /* direct the chip source to the given MSI-X interrupt */ m = isrc / 8; diff --git a/drivers/infiniband/hw/hfi1/qp.c b/drivers/infiniband/hw/hfi1/qp.c index 650305cc0373..1a7af9f60c13 100644 --- a/drivers/infiniband/hw/hfi1/qp.c +++ b/drivers/infiniband/hw/hfi1/qp.c @@ -647,18 +647,17 @@ void qp_iter_print(struct seq_file *s, struct qp_iter *iter) qp->pid); } -void *qp_priv_alloc(struct rvt_dev_info *rdi, struct rvt_qp *qp, - gfp_t gfp) +void *qp_priv_alloc(struct rvt_dev_info *rdi, struct rvt_qp *qp) { struct hfi1_qp_priv *priv; - priv = kzalloc_node(sizeof(*priv), gfp, rdi->dparms.node); + priv = kzalloc_node(sizeof(*priv), GFP_KERNEL, rdi->dparms.node); if (!priv) return ERR_PTR(-ENOMEM); priv->owner = qp; - priv->s_ahg = kzalloc_node(sizeof(*priv->s_ahg), gfp, + priv->s_ahg = kzalloc_node(sizeof(*priv->s_ahg), GFP_KERNEL, rdi->dparms.node); if (!priv->s_ahg) { kfree(priv); diff --git a/drivers/infiniband/hw/hfi1/qp.h b/drivers/infiniband/hw/hfi1/qp.h index 1eb9cd7b8c19..6fe542b6a927 100644 --- a/drivers/infiniband/hw/hfi1/qp.h +++ b/drivers/infiniband/hw/hfi1/qp.h @@ -123,8 +123,7 @@ void hfi1_migrate_qp(struct rvt_qp *qp); /* * Functions provided by hfi1 driver for rdmavt to use */ -void *qp_priv_alloc(struct rvt_dev_info *rdi, struct rvt_qp *qp, - gfp_t gfp); +void *qp_priv_alloc(struct rvt_dev_info *rdi, struct rvt_qp *qp); void qp_priv_free(struct rvt_dev_info *rdi, struct rvt_qp *qp); unsigned free_all_qps(struct rvt_dev_info *rdi); void notify_qp_reset(struct rvt_qp *qp); diff --git a/drivers/infiniband/hw/hns/hns_roce_hw_v1.c b/drivers/infiniband/hw/hns/hns_roce_hw_v1.c index 37d5d29597a4..23fad6d96944 100644 --- a/drivers/infiniband/hw/hns/hns_roce_hw_v1.c +++ b/drivers/infiniband/hw/hns/hns_roce_hw_v1.c @@ -228,14 +228,14 @@ int hns_roce_v1_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr, switch (wr->opcode) { case IB_WR_RDMA_READ: ps_opcode = HNS_ROCE_WQE_OPCODE_RDMA_READ; - set_raddr_seg(wqe, atomic_wr(wr)->remote_addr, - atomic_wr(wr)->rkey); + set_raddr_seg(wqe, rdma_wr(wr)->remote_addr, + rdma_wr(wr)->rkey); break; case IB_WR_RDMA_WRITE: case IB_WR_RDMA_WRITE_WITH_IMM: ps_opcode = HNS_ROCE_WQE_OPCODE_RDMA_WRITE; - set_raddr_seg(wqe, atomic_wr(wr)->remote_addr, - atomic_wr(wr)->rkey); + set_raddr_seg(wqe, rdma_wr(wr)->remote_addr, + rdma_wr(wr)->rkey); break; case IB_WR_SEND: case IB_WR_SEND_WITH_INV: @@ -661,9 +661,11 @@ static int hns_roce_v1_rsv_lp_qp(struct hns_roce_dev *hr_dev) union ib_gid dgid; u64 subnet_prefix; int attr_mask = 0; - int i; + int i, j; int ret; + u8 queue_en[HNS_ROCE_V1_RESV_QP] = { 0 }; u8 phy_port; + u8 port = 0; u8 sl; priv = (struct hns_roce_v1_priv *)hr_dev->hw->priv; @@ -709,11 +711,27 @@ static int hns_roce_v1_rsv_lp_qp(struct hns_roce_dev *hr_dev) attr.rnr_retry = 7; attr.timeout = 0x12; attr.path_mtu = IB_MTU_256; + attr.ah_attr.type = RDMA_AH_ATTR_TYPE_ROCE; rdma_ah_set_grh(&attr.ah_attr, NULL, 0, 0, 1, 0); rdma_ah_set_static_rate(&attr.ah_attr, 3); subnet_prefix = cpu_to_be64(0xfe80000000000000LL); for (i = 0; i < HNS_ROCE_V1_RESV_QP; i++) { + phy_port = (i >= HNS_ROCE_MAX_PORTS) ? (i - 2) : + (i % HNS_ROCE_MAX_PORTS); + sl = i / HNS_ROCE_MAX_PORTS; + + for (j = 0; j < caps->num_ports; j++) { + if (hr_dev->iboe.phy_port[j] == phy_port) { + queue_en[i] = 1; + port = j; + break; + } + } + + if (!queue_en[i]) + continue; + free_mr->mr_free_qp[i] = hns_roce_v1_create_lp_qp(hr_dev, pd); if (IS_ERR(free_mr->mr_free_qp[i])) { dev_err(dev, "Create loop qp failed!\n"); @@ -721,15 +739,7 @@ static int hns_roce_v1_rsv_lp_qp(struct hns_roce_dev *hr_dev) } hr_qp = free_mr->mr_free_qp[i]; - sl = i / caps->num_ports; - - if (caps->num_ports == HNS_ROCE_MAX_PORTS) - phy_port = (i >= HNS_ROCE_MAX_PORTS) ? (i - 2) : - (i % caps->num_ports); - else - phy_port = i % caps->num_ports; - - hr_qp->port = phy_port + 1; + hr_qp->port = port; hr_qp->phy_port = phy_port; hr_qp->ibqp.qp_type = IB_QPT_RC; hr_qp->ibqp.device = &hr_dev->ib_dev; @@ -739,23 +749,22 @@ static int hns_roce_v1_rsv_lp_qp(struct hns_roce_dev *hr_dev) hr_qp->ibqp.recv_cq = cq; hr_qp->ibqp.send_cq = cq; - rdma_ah_set_port_num(&attr.ah_attr, phy_port + 1); - rdma_ah_set_sl(&attr.ah_attr, phy_port + 1); - attr.port_num = phy_port + 1; + rdma_ah_set_port_num(&attr.ah_attr, port + 1); + rdma_ah_set_sl(&attr.ah_attr, sl); + attr.port_num = port + 1; attr.dest_qp_num = hr_qp->qpn; memcpy(rdma_ah_retrieve_dmac(&attr.ah_attr), - hr_dev->dev_addr[phy_port], + hr_dev->dev_addr[port], MAC_ADDR_OCTET_NUM); memcpy(&dgid.raw, &subnet_prefix, sizeof(u64)); - memcpy(&dgid.raw[8], hr_dev->dev_addr[phy_port], 3); - memcpy(&dgid.raw[13], hr_dev->dev_addr[phy_port] + 3, 3); + memcpy(&dgid.raw[8], hr_dev->dev_addr[port], 3); + memcpy(&dgid.raw[13], hr_dev->dev_addr[port] + 3, 3); dgid.raw[11] = 0xff; dgid.raw[12] = 0xfe; dgid.raw[8] ^= 2; rdma_ah_set_dgid_raw(&attr.ah_attr, dgid.raw); - attr_mask |= IB_QP_PORT; ret = hr_dev->hw->modify_qp(&hr_qp->ibqp, &attr, attr_mask, IB_QPS_RESET, IB_QPS_INIT); @@ -812,6 +821,9 @@ static void hns_roce_v1_release_lp_qp(struct hns_roce_dev *hr_dev) for (i = 0; i < HNS_ROCE_V1_RESV_QP; i++) { hr_qp = free_mr->mr_free_qp[i]; + if (!hr_qp) + continue; + ret = hns_roce_v1_destroy_qp(&hr_qp->ibqp); if (ret) dev_err(dev, "Destroy qp %d for mr free failed(%d)!\n", @@ -963,7 +975,7 @@ static void hns_roce_v1_mr_free_work_fn(struct work_struct *work) msecs_to_jiffies(HNS_ROCE_V1_FREE_MR_TIMEOUT_MSECS) + jiffies; int i; int ret; - int ne; + int ne = 0; mr_work = container_of(work, struct hns_roce_mr_free_work, work); hr_mr = (struct hns_roce_mr *)mr_work->mr; @@ -976,6 +988,10 @@ static void hns_roce_v1_mr_free_work_fn(struct work_struct *work) for (i = 0; i < HNS_ROCE_V1_RESV_QP; i++) { hr_qp = free_mr->mr_free_qp[i]; + if (!hr_qp) + continue; + ne++; + ret = hns_roce_v1_send_lp_wqe(hr_qp); if (ret) { dev_err(dev, @@ -985,7 +1001,6 @@ static void hns_roce_v1_mr_free_work_fn(struct work_struct *work) } } - ne = HNS_ROCE_V1_RESV_QP; do { ret = hns_roce_v1_poll_cq(&mr_free_cq->ib_cq, ne, wc); if (ret < 0) { @@ -995,7 +1010,8 @@ static void hns_roce_v1_mr_free_work_fn(struct work_struct *work) goto free_work; } ne -= ret; - msleep(HNS_ROCE_V1_FREE_MR_WAIT_VALUE); + usleep_range(HNS_ROCE_V1_FREE_MR_WAIT_VALUE * 1000, + (1 + HNS_ROCE_V1_FREE_MR_WAIT_VALUE) * 1000); } while (ne && time_before_eq(jiffies, end)); if (ne != 0) @@ -2181,7 +2197,7 @@ static int hns_roce_v1_poll_one(struct hns_roce_cq *hr_cq, } wc->wr_id = wq->wrid[wq->tail & (wq->wqe_cnt - 1)]; ++wq->tail; - } else { + } else { /* RQ conrespond to CQE */ wc->byte_len = le32_to_cpu(cqe->byte_cnt); opcode = roce_get_field(cqe->cqe_byte_4, @@ -3533,10 +3549,12 @@ static int check_qp_db_process_status(struct hns_roce_dev *hr_dev, old_cnt = roce_get_field(old_send, ROCEE_SDB_SEND_PTR_SDB_SEND_PTR_M, ROCEE_SDB_SEND_PTR_SDB_SEND_PTR_S); - if (cur_cnt - old_cnt > SDB_ST_CMP_VAL) + if (cur_cnt - old_cnt > + SDB_ST_CMP_VAL) { success_flags = 1; - else { - send_ptr = roce_get_field(old_send, + } else { + send_ptr = + roce_get_field(old_send, ROCEE_SDB_SEND_PTR_SDB_SEND_PTR_M, ROCEE_SDB_SEND_PTR_SDB_SEND_PTR_S) + roce_get_field(sdb_retry_cnt, @@ -3641,6 +3659,7 @@ static void hns_roce_v1_destroy_qp_work_fn(struct work_struct *work) struct hns_roce_dev *hr_dev; struct hns_roce_qp *hr_qp; struct device *dev; + unsigned long qpn; int ret; qp_work_entry = container_of(work, struct hns_roce_qp_work, work); @@ -3648,8 +3667,9 @@ static void hns_roce_v1_destroy_qp_work_fn(struct work_struct *work) dev = &hr_dev->pdev->dev; priv = (struct hns_roce_v1_priv *)hr_dev->hw->priv; hr_qp = qp_work_entry->qp; + qpn = hr_qp->qpn; - dev_dbg(dev, "Schedule destroy QP(0x%lx) work.\n", hr_qp->qpn); + dev_dbg(dev, "Schedule destroy QP(0x%lx) work.\n", qpn); qp_work_entry->sche_cnt++; @@ -3660,7 +3680,7 @@ static void hns_roce_v1_destroy_qp_work_fn(struct work_struct *work) &qp_work_entry->db_wait_stage); if (ret) { dev_err(dev, "Check QP(0x%lx) db process status failed!\n", - hr_qp->qpn); + qpn); return; } @@ -3674,7 +3694,7 @@ static void hns_roce_v1_destroy_qp_work_fn(struct work_struct *work) ret = hns_roce_v1_modify_qp(&hr_qp->ibqp, NULL, 0, hr_qp->state, IB_QPS_RESET); if (ret) { - dev_err(dev, "Modify QP(0x%lx) to RST failed!\n", hr_qp->qpn); + dev_err(dev, "Modify QP(0x%lx) to RST failed!\n", qpn); return; } @@ -3683,14 +3703,14 @@ static void hns_roce_v1_destroy_qp_work_fn(struct work_struct *work) if (hr_qp->ibqp.qp_type == IB_QPT_RC) { /* RC QP, release QPN */ - hns_roce_release_range_qp(hr_dev, hr_qp->qpn, 1); + hns_roce_release_range_qp(hr_dev, qpn, 1); kfree(hr_qp); } else kfree(hr_to_hr_sqp(hr_qp)); kfree(qp_work_entry); - dev_dbg(dev, "Accomplished destroy QP(0x%lx) work.\n", hr_qp->qpn); + dev_dbg(dev, "Accomplished destroy QP(0x%lx) work.\n", qpn); } int hns_roce_v1_destroy_qp(struct ib_qp *ibqp) diff --git a/drivers/infiniband/hw/hns/hns_roce_main.c b/drivers/infiniband/hw/hns/hns_roce_main.c index c3b41f95e70a..d9777b662eba 100644 --- a/drivers/infiniband/hw/hns/hns_roce_main.c +++ b/drivers/infiniband/hw/hns/hns_roce_main.c @@ -125,8 +125,6 @@ static int handle_en_event(struct hns_roce_dev *hr_dev, u8 port, return -ENODEV; } - spin_lock_bh(&hr_dev->iboe.lock); - switch (event) { case NETDEV_UP: case NETDEV_CHANGE: @@ -144,7 +142,6 @@ static int handle_en_event(struct hns_roce_dev *hr_dev, u8 port, break; } - spin_unlock_bh(&hr_dev->iboe.lock); return 0; } diff --git a/drivers/infiniband/hw/mlx4/cq.c b/drivers/infiniband/hw/mlx4/cq.c index 4f5a143fc0a7..ff931c580557 100644 --- a/drivers/infiniband/hw/mlx4/cq.c +++ b/drivers/infiniband/hw/mlx4/cq.c @@ -102,7 +102,7 @@ static int mlx4_ib_alloc_cq_buf(struct mlx4_ib_dev *dev, struct mlx4_ib_cq_buf * int err; err = mlx4_buf_alloc(dev->dev, nent * dev->dev->caps.cqe_size, - PAGE_SIZE * 2, &buf->buf, GFP_KERNEL); + PAGE_SIZE * 2, &buf->buf); if (err) goto out; @@ -113,7 +113,7 @@ static int mlx4_ib_alloc_cq_buf(struct mlx4_ib_dev *dev, struct mlx4_ib_cq_buf * if (err) goto err_buf; - err = mlx4_buf_write_mtt(dev->dev, &buf->mtt, &buf->buf, GFP_KERNEL); + err = mlx4_buf_write_mtt(dev->dev, &buf->mtt, &buf->buf); if (err) goto err_mtt; @@ -219,7 +219,7 @@ struct ib_cq *mlx4_ib_create_cq(struct ib_device *ibdev, uar = &to_mucontext(context)->uar; } else { - err = mlx4_db_alloc(dev->dev, &cq->db, 1, GFP_KERNEL); + err = mlx4_db_alloc(dev->dev, &cq->db, 1); if (err) goto err_cq; diff --git a/drivers/infiniband/hw/mlx4/main.c b/drivers/infiniband/hw/mlx4/main.c index 75b2f7d4cd95..d1b43cbbfea7 100644 --- a/drivers/infiniband/hw/mlx4/main.c +++ b/drivers/infiniband/hw/mlx4/main.c @@ -1155,7 +1155,7 @@ static void mlx4_ib_disassociate_ucontext(struct ib_ucontext *ibcontext) * call to mlx4_ib_vma_close. */ put_task_struct(owning_process); - msleep(1); + usleep_range(1000, 2000); owning_process = get_pid_task(ibcontext->tgid, PIDTYPE_PID); if (!owning_process || diff --git a/drivers/infiniband/hw/mlx4/mcg.c b/drivers/infiniband/hw/mlx4/mcg.c index 3405e947dc1e..b73f89700ef9 100644 --- a/drivers/infiniband/hw/mlx4/mcg.c +++ b/drivers/infiniband/hw/mlx4/mcg.c @@ -1091,7 +1091,7 @@ static void _mlx4_ib_mcg_port_cleanup(struct mlx4_ib_demux_ctx *ctx, int destroy if (!count) break; - msleep(1); + usleep_range(1000, 2000); } while (time_after(end, jiffies)); flush_workqueue(ctx->mcg_wq); diff --git a/drivers/infiniband/hw/mlx4/mlx4_ib.h b/drivers/infiniband/hw/mlx4/mlx4_ib.h index c2b9cbf4da05..9db82e67e959 100644 --- a/drivers/infiniband/hw/mlx4/mlx4_ib.h +++ b/drivers/infiniband/hw/mlx4/mlx4_ib.h @@ -185,7 +185,6 @@ enum mlx4_ib_qp_flags { MLX4_IB_QP_LSO = IB_QP_CREATE_IPOIB_UD_LSO, MLX4_IB_QP_BLOCK_MULTICAST_LOOPBACK = IB_QP_CREATE_BLOCK_MULTICAST_LOOPBACK, MLX4_IB_QP_NETIF = IB_QP_CREATE_NETIF_QP, - MLX4_IB_QP_CREATE_USE_GFP_NOIO = IB_QP_CREATE_USE_GFP_NOIO, /* Mellanox specific flags start from IB_QP_CREATE_RESERVED_START */ MLX4_IB_ROCE_V2_GSI_QP = MLX4_IB_QP_CREATE_ROCE_V2_GSI, diff --git a/drivers/infiniband/hw/mlx4/qp.c b/drivers/infiniband/hw/mlx4/qp.c index 996e9058e515..75c0e6c5dd56 100644 --- a/drivers/infiniband/hw/mlx4/qp.c +++ b/drivers/infiniband/hw/mlx4/qp.c @@ -634,8 +634,8 @@ static void mlx4_ib_free_qp_counter(struct mlx4_ib_dev *dev, static int create_qp_common(struct mlx4_ib_dev *dev, struct ib_pd *pd, struct ib_qp_init_attr *init_attr, - struct ib_udata *udata, int sqpn, struct mlx4_ib_qp **caller_qp, - gfp_t gfp) + struct ib_udata *udata, int sqpn, + struct mlx4_ib_qp **caller_qp) { int qpn; int err; @@ -691,14 +691,14 @@ static int create_qp_common(struct mlx4_ib_dev *dev, struct ib_pd *pd, if (qp_type == MLX4_IB_QPT_SMI || qp_type == MLX4_IB_QPT_GSI || (qp_type & (MLX4_IB_QPT_PROXY_SMI | MLX4_IB_QPT_PROXY_SMI_OWNER | MLX4_IB_QPT_PROXY_GSI | MLX4_IB_QPT_TUN_SMI_OWNER))) { - sqp = kzalloc(sizeof (struct mlx4_ib_sqp), gfp); + sqp = kzalloc(sizeof(struct mlx4_ib_sqp), GFP_KERNEL); if (!sqp) return -ENOMEM; qp = &sqp->qp; qp->pri.vid = 0xFFFF; qp->alt.vid = 0xFFFF; } else { - qp = kzalloc(sizeof (struct mlx4_ib_qp), gfp); + qp = kzalloc(sizeof(struct mlx4_ib_qp), GFP_KERNEL); if (!qp) return -ENOMEM; qp->pri.vid = 0xFFFF; @@ -780,7 +780,7 @@ static int create_qp_common(struct mlx4_ib_dev *dev, struct ib_pd *pd, goto err; if (qp_has_rq(init_attr)) { - err = mlx4_db_alloc(dev->dev, &qp->db, 0, gfp); + err = mlx4_db_alloc(dev->dev, &qp->db, 0); if (err) goto err; @@ -788,7 +788,7 @@ static int create_qp_common(struct mlx4_ib_dev *dev, struct ib_pd *pd, } if (mlx4_buf_alloc(dev->dev, qp->buf_size, qp->buf_size, - &qp->buf, gfp)) { + &qp->buf)) { memcpy(&init_attr->cap, &backup_cap, sizeof(backup_cap)); err = set_kernel_sq_size(dev, &init_attr->cap, qp_type, @@ -797,7 +797,7 @@ static int create_qp_common(struct mlx4_ib_dev *dev, struct ib_pd *pd, goto err_db; if (mlx4_buf_alloc(dev->dev, qp->buf_size, - PAGE_SIZE * 2, &qp->buf, gfp)) { + PAGE_SIZE * 2, &qp->buf)) { err = -ENOMEM; goto err_db; } @@ -808,20 +808,20 @@ static int create_qp_common(struct mlx4_ib_dev *dev, struct ib_pd *pd, if (err) goto err_buf; - err = mlx4_buf_write_mtt(dev->dev, &qp->mtt, &qp->buf, gfp); + err = mlx4_buf_write_mtt(dev->dev, &qp->mtt, &qp->buf); if (err) goto err_mtt; qp->sq.wrid = kmalloc_array(qp->sq.wqe_cnt, sizeof(u64), - gfp | __GFP_NOWARN); + GFP_KERNEL | __GFP_NOWARN); if (!qp->sq.wrid) qp->sq.wrid = __vmalloc(qp->sq.wqe_cnt * sizeof(u64), - gfp, PAGE_KERNEL); + GFP_KERNEL, PAGE_KERNEL); qp->rq.wrid = kmalloc_array(qp->rq.wqe_cnt, sizeof(u64), - gfp | __GFP_NOWARN); + GFP_KERNEL | __GFP_NOWARN); if (!qp->rq.wrid) qp->rq.wrid = __vmalloc(qp->rq.wqe_cnt * sizeof(u64), - gfp, PAGE_KERNEL); + GFP_KERNEL, PAGE_KERNEL); if (!qp->sq.wrid || !qp->rq.wrid) { err = -ENOMEM; goto err_wrid; @@ -859,7 +859,7 @@ static int create_qp_common(struct mlx4_ib_dev *dev, struct ib_pd *pd, if (init_attr->create_flags & IB_QP_CREATE_BLOCK_MULTICAST_LOOPBACK) qp->flags |= MLX4_IB_QP_BLOCK_MULTICAST_LOOPBACK; - err = mlx4_qp_alloc(dev->dev, qpn, &qp->mqp, gfp); + err = mlx4_qp_alloc(dev->dev, qpn, &qp->mqp); if (err) goto err_qpn; @@ -1127,10 +1127,7 @@ static struct ib_qp *_mlx4_ib_create_qp(struct ib_pd *pd, int err; int sup_u_create_flags = MLX4_IB_QP_BLOCK_MULTICAST_LOOPBACK; u16 xrcdn = 0; - gfp_t gfp; - gfp = (init_attr->create_flags & MLX4_IB_QP_CREATE_USE_GFP_NOIO) ? - GFP_NOIO : GFP_KERNEL; /* * We only support LSO, vendor flag1, and multicast loopback blocking, * and only for kernel UD QPs. @@ -1140,8 +1137,7 @@ static struct ib_qp *_mlx4_ib_create_qp(struct ib_pd *pd, MLX4_IB_SRIOV_TUNNEL_QP | MLX4_IB_SRIOV_SQP | MLX4_IB_QP_NETIF | - MLX4_IB_QP_CREATE_ROCE_V2_GSI | - MLX4_IB_QP_CREATE_USE_GFP_NOIO)) + MLX4_IB_QP_CREATE_ROCE_V2_GSI)) return ERR_PTR(-EINVAL); if (init_attr->create_flags & IB_QP_CREATE_NETIF_QP) { @@ -1154,7 +1150,6 @@ static struct ib_qp *_mlx4_ib_create_qp(struct ib_pd *pd, return ERR_PTR(-EINVAL); if ((init_attr->create_flags & ~(MLX4_IB_SRIOV_SQP | - MLX4_IB_QP_CREATE_USE_GFP_NOIO | MLX4_IB_QP_CREATE_ROCE_V2_GSI | MLX4_IB_QP_BLOCK_MULTICAST_LOOPBACK) && init_attr->qp_type != IB_QPT_UD) || @@ -1179,7 +1174,7 @@ static struct ib_qp *_mlx4_ib_create_qp(struct ib_pd *pd, case IB_QPT_RC: case IB_QPT_UC: case IB_QPT_RAW_PACKET: - qp = kzalloc(sizeof *qp, gfp); + qp = kzalloc(sizeof(*qp), GFP_KERNEL); if (!qp) return ERR_PTR(-ENOMEM); qp->pri.vid = 0xFFFF; @@ -1188,7 +1183,7 @@ static struct ib_qp *_mlx4_ib_create_qp(struct ib_pd *pd, case IB_QPT_UD: { err = create_qp_common(to_mdev(pd->device), pd, init_attr, - udata, 0, &qp, gfp); + udata, 0, &qp); if (err) { kfree(qp); return ERR_PTR(err); @@ -1217,8 +1212,7 @@ static struct ib_qp *_mlx4_ib_create_qp(struct ib_pd *pd, } err = create_qp_common(to_mdev(pd->device), pd, init_attr, udata, - sqpn, - &qp, gfp); + sqpn, &qp); if (err) return ERR_PTR(err); diff --git a/drivers/infiniband/hw/mlx4/srq.c b/drivers/infiniband/hw/mlx4/srq.c index e32dd58937a8..0facaf5f6d23 100644 --- a/drivers/infiniband/hw/mlx4/srq.c +++ b/drivers/infiniband/hw/mlx4/srq.c @@ -135,14 +135,14 @@ struct ib_srq *mlx4_ib_create_srq(struct ib_pd *pd, if (err) goto err_mtt; } else { - err = mlx4_db_alloc(dev->dev, &srq->db, 0, GFP_KERNEL); + err = mlx4_db_alloc(dev->dev, &srq->db, 0); if (err) goto err_srq; *srq->db.db = 0; - if (mlx4_buf_alloc(dev->dev, buf_size, PAGE_SIZE * 2, &srq->buf, - GFP_KERNEL)) { + if (mlx4_buf_alloc(dev->dev, buf_size, PAGE_SIZE * 2, + &srq->buf)) { err = -ENOMEM; goto err_db; } @@ -167,7 +167,7 @@ struct ib_srq *mlx4_ib_create_srq(struct ib_pd *pd, if (err) goto err_buf; - err = mlx4_buf_write_mtt(dev->dev, &srq->mtt, &srq->buf, GFP_KERNEL); + err = mlx4_buf_write_mtt(dev->dev, &srq->mtt, &srq->buf); if (err) goto err_mtt; diff --git a/drivers/infiniband/hw/mlx5/mr.c b/drivers/infiniband/hw/mlx5/mr.c index 763bb5b36144..8ab2f1360a45 100644 --- a/drivers/infiniband/hw/mlx5/mr.c +++ b/drivers/infiniband/hw/mlx5/mr.c @@ -582,6 +582,15 @@ static void clean_keys(struct mlx5_ib_dev *dev, int c) } } +static void mlx5_mr_cache_debugfs_cleanup(struct mlx5_ib_dev *dev) +{ + if (!mlx5_debugfs_root) + return; + + debugfs_remove_recursive(dev->cache.root); + dev->cache.root = NULL; +} + static int mlx5_mr_cache_debugfs_init(struct mlx5_ib_dev *dev) { struct mlx5_mr_cache *cache = &dev->cache; @@ -600,38 +609,34 @@ static int mlx5_mr_cache_debugfs_init(struct mlx5_ib_dev *dev) sprintf(ent->name, "%d", ent->order); ent->dir = debugfs_create_dir(ent->name, cache->root); if (!ent->dir) - return -ENOMEM; + goto err; ent->fsize = debugfs_create_file("size", 0600, ent->dir, ent, &size_fops); if (!ent->fsize) - return -ENOMEM; + goto err; ent->flimit = debugfs_create_file("limit", 0600, ent->dir, ent, &limit_fops); if (!ent->flimit) - return -ENOMEM; + goto err; ent->fcur = debugfs_create_u32("cur", 0400, ent->dir, &ent->cur); if (!ent->fcur) - return -ENOMEM; + goto err; ent->fmiss = debugfs_create_u32("miss", 0600, ent->dir, &ent->miss); if (!ent->fmiss) - return -ENOMEM; + goto err; } return 0; -} - -static void mlx5_mr_cache_debugfs_cleanup(struct mlx5_ib_dev *dev) -{ - if (!mlx5_debugfs_root) - return; +err: + mlx5_mr_cache_debugfs_cleanup(dev); - debugfs_remove_recursive(dev->cache.root); + return -ENOMEM; } static void delay_time_func(unsigned long ctx) @@ -692,6 +697,11 @@ int mlx5_mr_cache_init(struct mlx5_ib_dev *dev) if (err) mlx5_ib_warn(dev, "cache debugfs failure\n"); + /* + * We don't want to fail driver if debugfs failed to initialize, + * so we are not forwarding error to the user. + */ + return 0; } @@ -1779,7 +1789,7 @@ mlx5_ib_sg_to_klms(struct mlx5_ib_mr *mr, mr->ndescs = sg_nents; for_each_sg(sgl, sg, sg_nents, i) { - if (unlikely(i > mr->max_descs)) + if (unlikely(i >= mr->max_descs)) break; klms[i].va = cpu_to_be64(sg_dma_address(sg) + sg_offset); klms[i].bcount = cpu_to_be32(sg_dma_len(sg) - sg_offset); diff --git a/drivers/infiniband/hw/nes/nes_hw.c b/drivers/infiniband/hw/nes/nes_hw.c index 8f9d8b4ad583..b0adf65e4bdb 100644 --- a/drivers/infiniband/hw/nes/nes_hw.c +++ b/drivers/infiniband/hw/nes/nes_hw.c @@ -551,7 +551,7 @@ struct nes_adapter *nes_init_adapter(struct nes_device *nesdev, u8 hw_rev) { if ((0x0F000100 == (pcs_control_status0 & 0x0F000100)) || (0x0F000100 == (pcs_control_status1 & 0x0F000100))) int_cnt++; - msleep(1); + usleep_range(1000, 2000); } if (int_cnt > 1) { spin_lock_irqsave(&nesadapter->phy_lock, flags); @@ -592,7 +592,7 @@ struct nes_adapter *nes_init_adapter(struct nes_device *nesdev, u8 hw_rev) { break; } } - msleep(1); + usleep_range(1000, 2000); } } } diff --git a/drivers/infiniband/hw/qib/qib_qp.c b/drivers/infiniband/hw/qib/qib_qp.c index 5984981e7dd4..a343e3b5d4cb 100644 --- a/drivers/infiniband/hw/qib/qib_qp.c +++ b/drivers/infiniband/hw/qib/qib_qp.c @@ -104,10 +104,9 @@ const struct rvt_operation_params qib_post_parms[RVT_OPERATION_MAX] = { }; -static void get_map_page(struct rvt_qpn_table *qpt, struct rvt_qpn_map *map, - gfp_t gfp) +static void get_map_page(struct rvt_qpn_table *qpt, struct rvt_qpn_map *map) { - unsigned long page = get_zeroed_page(gfp); + unsigned long page = get_zeroed_page(GFP_KERNEL); /* * Free the page if someone raced with us installing it. @@ -126,7 +125,7 @@ static void get_map_page(struct rvt_qpn_table *qpt, struct rvt_qpn_map *map, * zero/one for QP type IB_QPT_SMI/IB_QPT_GSI. */ int qib_alloc_qpn(struct rvt_dev_info *rdi, struct rvt_qpn_table *qpt, - enum ib_qp_type type, u8 port, gfp_t gfp) + enum ib_qp_type type, u8 port) { u32 i, offset, max_scan, qpn; struct rvt_qpn_map *map; @@ -160,7 +159,7 @@ int qib_alloc_qpn(struct rvt_dev_info *rdi, struct rvt_qpn_table *qpt, max_scan = qpt->nmaps - !offset; for (i = 0;;) { if (unlikely(!map->page)) { - get_map_page(qpt, map, gfp); + get_map_page(qpt, map); if (unlikely(!map->page)) break; } @@ -317,16 +316,16 @@ u32 qib_mtu_from_qp(struct rvt_dev_info *rdi, struct rvt_qp *qp, u32 pmtu) return ib_mtu_enum_to_int(pmtu); } -void *qib_qp_priv_alloc(struct rvt_dev_info *rdi, struct rvt_qp *qp, gfp_t gfp) +void *qib_qp_priv_alloc(struct rvt_dev_info *rdi, struct rvt_qp *qp) { struct qib_qp_priv *priv; - priv = kzalloc(sizeof(*priv), gfp); + priv = kzalloc(sizeof(*priv), GFP_KERNEL); if (!priv) return ERR_PTR(-ENOMEM); priv->owner = qp; - priv->s_hdr = kzalloc(sizeof(*priv->s_hdr), gfp); + priv->s_hdr = kzalloc(sizeof(*priv->s_hdr), GFP_KERNEL); if (!priv->s_hdr) { kfree(priv); return ERR_PTR(-ENOMEM); diff --git a/drivers/infiniband/hw/qib/qib_verbs.h b/drivers/infiniband/hw/qib/qib_verbs.h index da0db5485ddc..a52fc67b40d7 100644 --- a/drivers/infiniband/hw/qib/qib_verbs.h +++ b/drivers/infiniband/hw/qib/qib_verbs.h @@ -274,11 +274,11 @@ int qib_get_counters(struct qib_pportdata *ppd, * Functions provided by qib driver for rdmavt to use */ unsigned qib_free_all_qps(struct rvt_dev_info *rdi); -void *qib_qp_priv_alloc(struct rvt_dev_info *rdi, struct rvt_qp *qp, gfp_t gfp); +void *qib_qp_priv_alloc(struct rvt_dev_info *rdi, struct rvt_qp *qp); void qib_qp_priv_free(struct rvt_dev_info *rdi, struct rvt_qp *qp); void qib_notify_qp_reset(struct rvt_qp *qp); int qib_alloc_qpn(struct rvt_dev_info *rdi, struct rvt_qpn_table *qpt, - enum ib_qp_type type, u8 port, gfp_t gfp); + enum ib_qp_type type, u8 port); void qib_restart_rc(struct rvt_qp *qp, u32 psn, int wait); #ifdef CONFIG_DEBUG_FS |