diff options
author | Christoph Hellwig <hch@lst.de> | 2015-10-08 09:16:33 +0100 |
---|---|---|
committer | Christoph Hellwig <hch@lst.de> | 2015-10-08 11:09:10 +0100 |
commit | e622f2f4ad2142d2a613a57fb85f8cf737935ef5 (patch) | |
tree | 19fa458bcaacf3f8b2f5e40676f748afc3df1e84 /drivers/infiniband/hw/mthca/mthca_qp.c | |
parent | b8cab5dab15ff5c2acc3faefdde28919b0341c11 (diff) | |
download | linux-e622f2f4ad2142d2a613a57fb85f8cf737935ef5.tar.bz2 |
IB: split struct ib_send_wr
This patch split up struct ib_send_wr so that all non-trivial verbs
use their own structure which embedds struct ib_send_wr. This dramaticly
shrinks the size of a WR for most common operations:
sizeof(struct ib_send_wr) (old): 96
sizeof(struct ib_send_wr): 48
sizeof(struct ib_rdma_wr): 64
sizeof(struct ib_atomic_wr): 96
sizeof(struct ib_ud_wr): 88
sizeof(struct ib_fast_reg_wr): 88
sizeof(struct ib_bind_mw_wr): 96
sizeof(struct ib_sig_handover_wr): 80
And with Sagi's pending MR rework the fast registration WR will also be
down to a reasonable size:
sizeof(struct ib_fastreg_wr): 64
Signed-off-by: Christoph Hellwig <hch@lst.de>
Reviewed-by: Bart Van Assche <bart.vanassche@sandisk.com> [srp, srpt]
Reviewed-by: Chuck Lever <chuck.lever@oracle.com> [sunrpc]
Tested-by: Haggai Eran <haggaie@mellanox.com>
Tested-by: Sagi Grimberg <sagig@mellanox.com>
Tested-by: Steve Wise <swise@opengridcomputing.com>
Diffstat (limited to 'drivers/infiniband/hw/mthca/mthca_qp.c')
-rw-r--r-- | drivers/infiniband/hw/mthca/mthca_qp.c | 84 |
1 files changed, 42 insertions, 42 deletions
diff --git a/drivers/infiniband/hw/mthca/mthca_qp.c b/drivers/infiniband/hw/mthca/mthca_qp.c index e354b2f04ad9..35fe506e2cfa 100644 --- a/drivers/infiniband/hw/mthca/mthca_qp.c +++ b/drivers/infiniband/hw/mthca/mthca_qp.c @@ -1476,7 +1476,7 @@ void mthca_free_qp(struct mthca_dev *dev, /* Create UD header for an MLX send and build a data segment for it */ static int build_mlx_header(struct mthca_dev *dev, struct mthca_sqp *sqp, - int ind, struct ib_send_wr *wr, + int ind, struct ib_ud_wr *wr, struct mthca_mlx_seg *mlx, struct mthca_data_seg *data) { @@ -1485,10 +1485,10 @@ static int build_mlx_header(struct mthca_dev *dev, struct mthca_sqp *sqp, u16 pkey; ib_ud_header_init(256, /* assume a MAD */ 1, 0, 0, - mthca_ah_grh_present(to_mah(wr->wr.ud.ah)), 0, + mthca_ah_grh_present(to_mah(wr->ah)), 0, &sqp->ud_header); - err = mthca_read_ah(dev, to_mah(wr->wr.ud.ah), &sqp->ud_header); + err = mthca_read_ah(dev, to_mah(wr->ah), &sqp->ud_header); if (err) return err; mlx->flags &= ~cpu_to_be32(MTHCA_NEXT_SOLICIT | 1); @@ -1499,7 +1499,7 @@ static int build_mlx_header(struct mthca_dev *dev, struct mthca_sqp *sqp, mlx->rlid = sqp->ud_header.lrh.destination_lid; mlx->vcrc = 0; - switch (wr->opcode) { + switch (wr->wr.opcode) { case IB_WR_SEND: sqp->ud_header.bth.opcode = IB_OPCODE_UD_SEND_ONLY; sqp->ud_header.immediate_present = 0; @@ -1507,7 +1507,7 @@ static int build_mlx_header(struct mthca_dev *dev, struct mthca_sqp *sqp, case IB_WR_SEND_WITH_IMM: sqp->ud_header.bth.opcode = IB_OPCODE_UD_SEND_ONLY_WITH_IMMEDIATE; sqp->ud_header.immediate_present = 1; - sqp->ud_header.immediate_data = wr->ex.imm_data; + sqp->ud_header.immediate_data = wr->wr.ex.imm_data; break; default: return -EINVAL; @@ -1516,18 +1516,18 @@ static int build_mlx_header(struct mthca_dev *dev, struct mthca_sqp *sqp, sqp->ud_header.lrh.virtual_lane = !sqp->qp.ibqp.qp_num ? 15 : 0; if (sqp->ud_header.lrh.destination_lid == IB_LID_PERMISSIVE) sqp->ud_header.lrh.source_lid = IB_LID_PERMISSIVE; - sqp->ud_header.bth.solicited_event = !!(wr->send_flags & IB_SEND_SOLICITED); + sqp->ud_header.bth.solicited_event = !!(wr->wr.send_flags & IB_SEND_SOLICITED); if (!sqp->qp.ibqp.qp_num) ib_get_cached_pkey(&dev->ib_dev, sqp->qp.port, sqp->pkey_index, &pkey); else ib_get_cached_pkey(&dev->ib_dev, sqp->qp.port, - wr->wr.ud.pkey_index, &pkey); + wr->pkey_index, &pkey); sqp->ud_header.bth.pkey = cpu_to_be16(pkey); - sqp->ud_header.bth.destination_qpn = cpu_to_be32(wr->wr.ud.remote_qpn); + sqp->ud_header.bth.destination_qpn = cpu_to_be32(wr->remote_qpn); sqp->ud_header.bth.psn = cpu_to_be32((sqp->send_psn++) & ((1 << 24) - 1)); - sqp->ud_header.deth.qkey = cpu_to_be32(wr->wr.ud.remote_qkey & 0x80000000 ? - sqp->qkey : wr->wr.ud.remote_qkey); + sqp->ud_header.deth.qkey = cpu_to_be32(wr->remote_qkey & 0x80000000 ? + sqp->qkey : wr->remote_qkey); sqp->ud_header.deth.source_qpn = cpu_to_be32(sqp->qp.ibqp.qp_num); header_size = ib_ud_header_pack(&sqp->ud_header, @@ -1569,34 +1569,34 @@ static __always_inline void set_raddr_seg(struct mthca_raddr_seg *rseg, } static __always_inline void set_atomic_seg(struct mthca_atomic_seg *aseg, - struct ib_send_wr *wr) + struct ib_atomic_wr *wr) { - if (wr->opcode == IB_WR_ATOMIC_CMP_AND_SWP) { - aseg->swap_add = cpu_to_be64(wr->wr.atomic.swap); - aseg->compare = cpu_to_be64(wr->wr.atomic.compare_add); + if (wr->wr.opcode == IB_WR_ATOMIC_CMP_AND_SWP) { + aseg->swap_add = cpu_to_be64(wr->swap); + aseg->compare = cpu_to_be64(wr->compare_add); } else { - aseg->swap_add = cpu_to_be64(wr->wr.atomic.compare_add); + aseg->swap_add = cpu_to_be64(wr->compare_add); aseg->compare = 0; } } static void set_tavor_ud_seg(struct mthca_tavor_ud_seg *useg, - struct ib_send_wr *wr) + struct ib_ud_wr *wr) { - useg->lkey = cpu_to_be32(to_mah(wr->wr.ud.ah)->key); - useg->av_addr = cpu_to_be64(to_mah(wr->wr.ud.ah)->avdma); - useg->dqpn = cpu_to_be32(wr->wr.ud.remote_qpn); - useg->qkey = cpu_to_be32(wr->wr.ud.remote_qkey); + useg->lkey = cpu_to_be32(to_mah(wr->ah)->key); + useg->av_addr = cpu_to_be64(to_mah(wr->ah)->avdma); + useg->dqpn = cpu_to_be32(wr->remote_qpn); + useg->qkey = cpu_to_be32(wr->remote_qkey); } static void set_arbel_ud_seg(struct mthca_arbel_ud_seg *useg, - struct ib_send_wr *wr) + struct ib_ud_wr *wr) { - memcpy(useg->av, to_mah(wr->wr.ud.ah)->av, MTHCA_AV_SIZE); - useg->dqpn = cpu_to_be32(wr->wr.ud.remote_qpn); - useg->qkey = cpu_to_be32(wr->wr.ud.remote_qkey); + memcpy(useg->av, to_mah(wr->ah)->av, MTHCA_AV_SIZE); + useg->dqpn = cpu_to_be32(wr->remote_qpn); + useg->qkey = cpu_to_be32(wr->remote_qkey); } int mthca_tavor_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr, @@ -1664,11 +1664,11 @@ int mthca_tavor_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr, switch (wr->opcode) { case IB_WR_ATOMIC_CMP_AND_SWP: case IB_WR_ATOMIC_FETCH_AND_ADD: - set_raddr_seg(wqe, wr->wr.atomic.remote_addr, - wr->wr.atomic.rkey); + set_raddr_seg(wqe, atomic_wr(wr)->remote_addr, + atomic_wr(wr)->rkey); wqe += sizeof (struct mthca_raddr_seg); - set_atomic_seg(wqe, wr); + set_atomic_seg(wqe, atomic_wr(wr)); wqe += sizeof (struct mthca_atomic_seg); size += (sizeof (struct mthca_raddr_seg) + sizeof (struct mthca_atomic_seg)) / 16; @@ -1677,8 +1677,8 @@ int mthca_tavor_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr, case IB_WR_RDMA_WRITE: case IB_WR_RDMA_WRITE_WITH_IMM: case IB_WR_RDMA_READ: - set_raddr_seg(wqe, wr->wr.rdma.remote_addr, - wr->wr.rdma.rkey); + set_raddr_seg(wqe, rdma_wr(wr)->remote_addr, + rdma_wr(wr)->rkey); wqe += sizeof (struct mthca_raddr_seg); size += sizeof (struct mthca_raddr_seg) / 16; break; @@ -1694,8 +1694,8 @@ int mthca_tavor_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr, switch (wr->opcode) { case IB_WR_RDMA_WRITE: case IB_WR_RDMA_WRITE_WITH_IMM: - set_raddr_seg(wqe, wr->wr.rdma.remote_addr, - wr->wr.rdma.rkey); + set_raddr_seg(wqe, rdma_wr(wr)->remote_addr, + rdma_wr(wr)->rkey); wqe += sizeof (struct mthca_raddr_seg); size += sizeof (struct mthca_raddr_seg) / 16; break; @@ -1708,13 +1708,13 @@ int mthca_tavor_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr, break; case UD: - set_tavor_ud_seg(wqe, wr); + set_tavor_ud_seg(wqe, ud_wr(wr)); wqe += sizeof (struct mthca_tavor_ud_seg); size += sizeof (struct mthca_tavor_ud_seg) / 16; break; case MLX: - err = build_mlx_header(dev, to_msqp(qp), ind, wr, + err = build_mlx_header(dev, to_msqp(qp), ind, ud_wr(wr), wqe - sizeof (struct mthca_next_seg), wqe); if (err) { @@ -2005,11 +2005,11 @@ int mthca_arbel_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr, switch (wr->opcode) { case IB_WR_ATOMIC_CMP_AND_SWP: case IB_WR_ATOMIC_FETCH_AND_ADD: - set_raddr_seg(wqe, wr->wr.atomic.remote_addr, - wr->wr.atomic.rkey); + set_raddr_seg(wqe, atomic_wr(wr)->remote_addr, + atomic_wr(wr)->rkey); wqe += sizeof (struct mthca_raddr_seg); - set_atomic_seg(wqe, wr); + set_atomic_seg(wqe, atomic_wr(wr)); wqe += sizeof (struct mthca_atomic_seg); size += (sizeof (struct mthca_raddr_seg) + sizeof (struct mthca_atomic_seg)) / 16; @@ -2018,8 +2018,8 @@ int mthca_arbel_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr, case IB_WR_RDMA_READ: case IB_WR_RDMA_WRITE: case IB_WR_RDMA_WRITE_WITH_IMM: - set_raddr_seg(wqe, wr->wr.rdma.remote_addr, - wr->wr.rdma.rkey); + set_raddr_seg(wqe, rdma_wr(wr)->remote_addr, + rdma_wr(wr)->rkey); wqe += sizeof (struct mthca_raddr_seg); size += sizeof (struct mthca_raddr_seg) / 16; break; @@ -2035,8 +2035,8 @@ int mthca_arbel_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr, switch (wr->opcode) { case IB_WR_RDMA_WRITE: case IB_WR_RDMA_WRITE_WITH_IMM: - set_raddr_seg(wqe, wr->wr.rdma.remote_addr, - wr->wr.rdma.rkey); + set_raddr_seg(wqe, rdma_wr(wr)->remote_addr, + rdma_wr(wr)->rkey); wqe += sizeof (struct mthca_raddr_seg); size += sizeof (struct mthca_raddr_seg) / 16; break; @@ -2049,13 +2049,13 @@ int mthca_arbel_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr, break; case UD: - set_arbel_ud_seg(wqe, wr); + set_arbel_ud_seg(wqe, ud_wr(wr)); wqe += sizeof (struct mthca_arbel_ud_seg); size += sizeof (struct mthca_arbel_ud_seg) / 16; break; case MLX: - err = build_mlx_header(dev, to_msqp(qp), ind, wr, + err = build_mlx_header(dev, to_msqp(qp), ind, ud_wr(wr), wqe - sizeof (struct mthca_next_seg), wqe); if (err) { |