From 99f80d2f5fb6d4165186390ecba83952803b667b Mon Sep 17 00:00:00 2001 From: Mike Marciniszyn Date: Mon, 10 Oct 2016 06:14:39 -0700 Subject: IB/hfi1: Optimize lkey validation structures Profiling shows that the key validation is susceptible to cache line trading when accessing the lkey table. Fix by separating out the read mostly fields from the write fields. In addition the shift amount, which is function of the lkey table size, is precomputed and stored with the table pointer. Since both the shift and table pointer are in the same read mostly cacheline, this saves a cache line in this hot path. Reviewed-by: Sebastian Sanchez Signed-off-by: Mike Marciniszyn Signed-off-by: Dennis Dalessandro Signed-off-by: Doug Ledford --- include/rdma/rdmavt_mr.h | 10 +++++++--- 1 file changed, 7 insertions(+), 3 deletions(-) (limited to 'include') diff --git a/include/rdma/rdmavt_mr.h b/include/rdma/rdmavt_mr.h index 6b3c6c8b6b77..de59de28b6a2 100644 --- a/include/rdma/rdmavt_mr.h +++ b/include/rdma/rdmavt_mr.h @@ -90,11 +90,15 @@ struct rvt_mregion { #define RVT_MAX_LKEY_TABLE_BITS 23 struct rvt_lkey_table { - spinlock_t lock; /* protect changes in this struct */ - u32 next; /* next unused index (speeds search) */ - u32 gen; /* generation count */ + /* read mostly fields */ u32 max; /* size of the table */ + u32 shift; /* lkey/rkey shift */ struct rvt_mregion __rcu **table; + /* writeable fields */ + /* protect changes in this struct */ + spinlock_t lock ____cacheline_aligned_in_smp; + u32 next; /* next unused index (speeds search) */ + u32 gen; /* generation count */ }; /* -- cgit v1.2.3 From be5d740bdc5448a8a542f8aa84d88d65fe88e486 Mon Sep 17 00:00:00 2001 From: Mike Marciniszyn Date: Mon, 17 Oct 2016 04:19:07 -0700 Subject: IB/rdmvat: Organize hot path calldowns into a single cacheline Save a cacheline by having hot path calldowns together. Reviewed-by: Sebastian Sanchez Signed-off-by: Mike Marciniszyn Signed-off-by: Dennis Dalessandro Signed-off-by: Doug Ledford --- include/rdma/rdma_vt.h | 46 ++++++++++++++++++++++++---------------------- 1 file changed, 24 insertions(+), 22 deletions(-) (limited to 'include') diff --git a/include/rdma/rdma_vt.h b/include/rdma/rdma_vt.h index e31502107a58..861e23eaebda 100644 --- a/include/rdma/rdma_vt.h +++ b/include/rdma/rdma_vt.h @@ -185,6 +185,27 @@ struct rvt_driver_provided { * check_support() for details. */ + /* hot path calldowns in a single cacheline */ + + /* + * Give the driver a notice that there is send work to do. It is up to + * the driver to generally push the packets out, this just queues the + * work with the driver. There are two variants here. The no_lock + * version requires the s_lock not to be held. The other assumes the + * s_lock is held. + */ + void (*schedule_send)(struct rvt_qp *qp); + void (*schedule_send_no_lock)(struct rvt_qp *qp); + + /* Driver specific work request checking */ + int (*check_send_wqe)(struct rvt_qp *qp, struct rvt_swqe *wqe); + + /* + * Sometimes rdmavt needs to kick the driver's send progress. That is + * done by this call back. + */ + void (*do_send)(struct rvt_qp *qp); + /* Passed to ib core registration. Callback to create syfs files */ int (*port_callback)(struct ib_device *, u8, struct kobject *); @@ -222,22 +243,6 @@ struct rvt_driver_provided { */ void (*notify_qp_reset)(struct rvt_qp *qp); - /* - * Give the driver a notice that there is send work to do. It is up to - * the driver to generally push the packets out, this just queues the - * work with the driver. There are two variants here. The no_lock - * version requires the s_lock not to be held. The other assumes the - * s_lock is held. - */ - void (*schedule_send)(struct rvt_qp *qp); - void (*schedule_send_no_lock)(struct rvt_qp *qp); - - /* - * Sometimes rdmavt needs to kick the driver's send progress. That is - * done by this call back. - */ - void (*do_send)(struct rvt_qp *qp); - /* * Get a path mtu from the driver based on qp attributes. */ @@ -324,9 +329,6 @@ struct rvt_driver_provided { void (*modify_qp)(struct rvt_qp *qp, struct ib_qp_attr *attr, int attr_mask, struct ib_udata *udata); - /* Driver specific work request checking */ - int (*check_send_wqe)(struct rvt_qp *qp, struct rvt_swqe *wqe); - /* Notify driver a mad agent has been created */ void (*notify_create_mad_agent)(struct rvt_dev_info *rdi, int port_idx); @@ -355,12 +357,12 @@ struct rvt_dev_info { /* post send table */ const struct rvt_operation_params *post_parms; - struct rvt_mregion __rcu *dma_mr; - struct rvt_lkey_table lkey_table; - /* Driver specific helper functions */ struct rvt_driver_provided driver_f; + struct rvt_mregion __rcu *dma_mr; + struct rvt_lkey_table lkey_table; + /* Internal use */ int n_pds_allocated; spinlock_t n_pds_lock; /* Protect pd allocated count */ -- cgit v1.2.3 From 850d8fd765079d223d47de89f1f03ab95d1036cb Mon Sep 17 00:00:00 2001 From: Moni Shoua Date: Thu, 10 Nov 2016 11:30:56 +0200 Subject: IB/mlx4: Handle IPv4 header when demultiplexing MAD When MAD arrives to the hypervisor, we need to identify which slave it should be sent by destination GID. When L3 protocol is IPv4 the GRH is replaced by an IPv4 header. This patch detects when IPv4 header needs to be parsed instead of GRH. Fixes: b6ffaeffaea4 ('mlx4: In RoCE allow guests to have multiple GIDS') Signed-off-by: Moni Shoua Signed-off-by: Daniel Jurgens Reviewed-by: Mark Bloch Signed-off-by: Leon Romanovsky Signed-off-by: Doug Ledford --- drivers/infiniband/core/verbs.c | 16 +++++++++------- drivers/infiniband/hw/mlx4/mad.c | 33 ++++++++++++++++++++++++++++++--- include/rdma/ib_verbs.h | 19 +++++++++++++++++++ 3 files changed, 58 insertions(+), 10 deletions(-) (limited to 'include') diff --git a/drivers/infiniband/core/verbs.c b/drivers/infiniband/core/verbs.c index b7f9f3d95975..98e10c5f2a96 100644 --- a/drivers/infiniband/core/verbs.c +++ b/drivers/infiniband/core/verbs.c @@ -328,7 +328,7 @@ struct ib_ah *ib_create_ah(struct ib_pd *pd, struct ib_ah_attr *ah_attr) } EXPORT_SYMBOL(ib_create_ah); -static int ib_get_header_version(const union rdma_network_hdr *hdr) +int ib_get_rdma_header_version(const union rdma_network_hdr *hdr) { const struct iphdr *ip4h = (struct iphdr *)&hdr->roce4grh; struct iphdr ip4h_checked; @@ -359,6 +359,7 @@ static int ib_get_header_version(const union rdma_network_hdr *hdr) return 4; return 6; } +EXPORT_SYMBOL(ib_get_rdma_header_version); static enum rdma_network_type ib_get_net_type_by_grh(struct ib_device *device, u8 port_num, @@ -369,7 +370,7 @@ static enum rdma_network_type ib_get_net_type_by_grh(struct ib_device *device, if (rdma_protocol_ib(device, port_num)) return RDMA_NETWORK_IB; - grh_version = ib_get_header_version((union rdma_network_hdr *)grh); + grh_version = ib_get_rdma_header_version((union rdma_network_hdr *)grh); if (grh_version == 4) return RDMA_NETWORK_IPV4; @@ -415,9 +416,9 @@ static int get_sgid_index_from_eth(struct ib_device *device, u8 port_num, &context, gid_index); } -static int get_gids_from_rdma_hdr(union rdma_network_hdr *hdr, - enum rdma_network_type net_type, - union ib_gid *sgid, union ib_gid *dgid) +int ib_get_gids_from_rdma_hdr(const union rdma_network_hdr *hdr, + enum rdma_network_type net_type, + union ib_gid *sgid, union ib_gid *dgid) { struct sockaddr_in src_in; struct sockaddr_in dst_in; @@ -447,6 +448,7 @@ static int get_gids_from_rdma_hdr(union rdma_network_hdr *hdr, return -EINVAL; } } +EXPORT_SYMBOL(ib_get_gids_from_rdma_hdr); int ib_init_ah_from_wc(struct ib_device *device, u8 port_num, const struct ib_wc *wc, const struct ib_grh *grh, @@ -469,8 +471,8 @@ int ib_init_ah_from_wc(struct ib_device *device, u8 port_num, net_type = ib_get_net_type_by_grh(device, port_num, grh); gid_type = ib_network_to_gid_type(net_type); } - ret = get_gids_from_rdma_hdr((union rdma_network_hdr *)grh, net_type, - &sgid, &dgid); + ret = ib_get_gids_from_rdma_hdr((union rdma_network_hdr *)grh, net_type, + &sgid, &dgid); if (ret) return ret; diff --git a/drivers/infiniband/hw/mlx4/mad.c b/drivers/infiniband/hw/mlx4/mad.c index 1672907ff219..b8e90130db37 100644 --- a/drivers/infiniband/hw/mlx4/mad.c +++ b/drivers/infiniband/hw/mlx4/mad.c @@ -39,6 +39,8 @@ #include #include #include +#include +#include #include #include "mlx4_ib.h" @@ -480,6 +482,23 @@ static int find_slave_port_pkey_ix(struct mlx4_ib_dev *dev, int slave, return -EINVAL; } +static int get_gids_from_l3_hdr(struct ib_grh *grh, union ib_gid *sgid, + union ib_gid *dgid) +{ + int version = ib_get_rdma_header_version((const union rdma_network_hdr *)grh); + enum rdma_network_type net_type; + + if (version == 4) + net_type = RDMA_NETWORK_IPV4; + else if (version == 6) + net_type = RDMA_NETWORK_IPV6; + else + return -EINVAL; + + return ib_get_gids_from_rdma_hdr((union rdma_network_hdr *)grh, net_type, + sgid, dgid); +} + int mlx4_ib_send_to_slave(struct mlx4_ib_dev *dev, int slave, u8 port, enum ib_qp_type dest_qpt, struct ib_wc *wc, struct ib_grh *grh, struct ib_mad *mad) @@ -538,7 +557,10 @@ int mlx4_ib_send_to_slave(struct mlx4_ib_dev *dev, int slave, u8 port, memset(&attr, 0, sizeof attr); attr.port_num = port; if (is_eth) { - memcpy(&attr.grh.dgid.raw[0], &grh->dgid.raw[0], 16); + union ib_gid sgid; + + if (get_gids_from_l3_hdr(grh, &sgid, &attr.grh.dgid)) + return -EINVAL; attr.ah_flags = IB_AH_GRH; } ah = ib_create_ah(tun_ctx->pd, &attr); @@ -651,6 +673,11 @@ static int mlx4_ib_demux_mad(struct ib_device *ibdev, u8 port, is_eth = 1; if (is_eth) { + union ib_gid dgid; + union ib_gid sgid; + + if (get_gids_from_l3_hdr(grh, &sgid, &dgid)) + return -EINVAL; if (!(wc->wc_flags & IB_WC_GRH)) { mlx4_ib_warn(ibdev, "RoCE grh not present.\n"); return -EINVAL; @@ -659,10 +686,10 @@ static int mlx4_ib_demux_mad(struct ib_device *ibdev, u8 port, mlx4_ib_warn(ibdev, "RoCE mgmt class is not CM\n"); return -EINVAL; } - err = mlx4_get_slave_from_roce_gid(dev->dev, port, grh->dgid.raw, &slave); + err = mlx4_get_slave_from_roce_gid(dev->dev, port, dgid.raw, &slave); if (err && mlx4_is_mf_bonded(dev->dev)) { other_port = (port == 1) ? 2 : 1; - err = mlx4_get_slave_from_roce_gid(dev->dev, other_port, grh->dgid.raw, &slave); + err = mlx4_get_slave_from_roce_gid(dev->dev, other_port, dgid.raw, &slave); if (!err) { port = other_port; pr_debug("resolved slave %d from gid %pI6 wire port %d other %d\n", diff --git a/include/rdma/ib_verbs.h b/include/rdma/ib_verbs.h index 5ad43a487745..467a4b476e29 100644 --- a/include/rdma/ib_verbs.h +++ b/include/rdma/ib_verbs.h @@ -2580,6 +2580,24 @@ void ib_dealloc_pd(struct ib_pd *pd); */ struct ib_ah *ib_create_ah(struct ib_pd *pd, struct ib_ah_attr *ah_attr); +/** + * ib_get_gids_from_rdma_hdr - Get sgid and dgid from GRH or IPv4 header + * work completion. + * @hdr: the L3 header to parse + * @net_type: type of header to parse + * @sgid: place to store source gid + * @dgid: place to store destination gid + */ +int ib_get_gids_from_rdma_hdr(const union rdma_network_hdr *hdr, + enum rdma_network_type net_type, + union ib_gid *sgid, union ib_gid *dgid); + +/** + * ib_get_rdma_header_version - Get the header version + * @hdr: the L3 header to parse + */ +int ib_get_rdma_header_version(const union rdma_network_hdr *hdr); + /** * ib_init_ah_from_wc - Initializes address handle attributes from a * work completion. @@ -3357,4 +3375,5 @@ int ib_sg_to_pages(struct ib_mr *mr, struct scatterlist *sgl, int sg_nents, void ib_drain_rq(struct ib_qp *qp); void ib_drain_sq(struct ib_qp *qp); void ib_drain_qp(struct ib_qp *qp); + #endif /* IB_VERBS_H */ -- cgit v1.2.3 From 4d4099584c2c4dca6c04d78ded4cc81f50cc3634 Mon Sep 17 00:00:00 2001 From: Leon Romanovsky Date: Wed, 19 Oct 2016 20:13:07 +0300 Subject: IB/hns: Move HNS RoCE user vendor structures This patch moves HNS vendor's specific structures to common UAPI folder which will be visible to all consumers. Signed-off-by: Leon Romanovsky Signed-off-by: Doug Ledford --- drivers/infiniband/hw/hns/hns_roce_cq.c | 2 +- drivers/infiniband/hw/hns/hns_roce_main.c | 2 +- drivers/infiniband/hw/hns/hns_roce_qp.c | 2 +- drivers/infiniband/hw/hns/hns_roce_user.h | 53 ------------------------------ include/uapi/rdma/Kbuild | 1 + include/uapi/rdma/hns-abi.h | 54 +++++++++++++++++++++++++++++++ 6 files changed, 58 insertions(+), 56 deletions(-) delete mode 100644 drivers/infiniband/hw/hns/hns_roce_user.h create mode 100644 include/uapi/rdma/hns-abi.h (limited to 'include') diff --git a/drivers/infiniband/hw/hns/hns_roce_cq.c b/drivers/infiniband/hw/hns/hns_roce_cq.c index ff9a6a3c826a..589496c8fb9e 100644 --- a/drivers/infiniband/hw/hns/hns_roce_cq.c +++ b/drivers/infiniband/hw/hns/hns_roce_cq.c @@ -35,7 +35,7 @@ #include "hns_roce_device.h" #include "hns_roce_cmd.h" #include "hns_roce_hem.h" -#include "hns_roce_user.h" +#include #include "hns_roce_common.h" static void hns_roce_ib_cq_comp(struct hns_roce_cq *hr_cq) diff --git a/drivers/infiniband/hw/hns/hns_roce_main.c b/drivers/infiniband/hw/hns/hns_roce_main.c index eddb053ff0ba..4953d9cb83a7 100644 --- a/drivers/infiniband/hw/hns/hns_roce_main.c +++ b/drivers/infiniband/hw/hns/hns_roce_main.c @@ -38,7 +38,7 @@ #include #include "hns_roce_common.h" #include "hns_roce_device.h" -#include "hns_roce_user.h" +#include #include "hns_roce_hem.h" /** diff --git a/drivers/infiniband/hw/hns/hns_roce_qp.c b/drivers/infiniband/hw/hns/hns_roce_qp.c index 4775b5c725a9..f036f32f15d3 100644 --- a/drivers/infiniband/hw/hns/hns_roce_qp.c +++ b/drivers/infiniband/hw/hns/hns_roce_qp.c @@ -37,7 +37,7 @@ #include "hns_roce_common.h" #include "hns_roce_device.h" #include "hns_roce_hem.h" -#include "hns_roce_user.h" +#include #define SQP_NUM (2 * HNS_ROCE_MAX_PORTS) diff --git a/drivers/infiniband/hw/hns/hns_roce_user.h b/drivers/infiniband/hw/hns/hns_roce_user.h deleted file mode 100644 index a28f761a9f65..000000000000 --- a/drivers/infiniband/hw/hns/hns_roce_user.h +++ /dev/null @@ -1,53 +0,0 @@ -/* - * Copyright (c) 2016 Hisilicon Limited. - * - * This software is available to you under a choice of one of two - * licenses. You may choose to be licensed under the terms of the GNU - * General Public License (GPL) Version 2, available from the file - * COPYING in the main directory of this source tree, or the - * OpenIB.org BSD license below: - * - * Redistribution and use in source and binary forms, with or - * without modification, are permitted provided that the following - * conditions are met: - * - * - Redistributions of source code must retain the above - * copyright notice, this list of conditions and the following - * disclaimer. - * - * - Redistributions in binary form must reproduce the above - * copyright notice, this list of conditions and the following - * disclaimer in the documentation and/or other materials - * provided with the distribution. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, - * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF - * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND - * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS - * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN - * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN - * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE - * SOFTWARE. - */ - -#ifndef _HNS_ROCE_USER_H -#define _HNS_ROCE_USER_H - -struct hns_roce_ib_create_cq { - __u64 buf_addr; -}; - -struct hns_roce_ib_create_qp { - __u64 buf_addr; - __u64 db_addr; - __u8 log_sq_bb_count; - __u8 log_sq_stride; - __u8 sq_no_prefetch; - __u8 reserved[5]; -}; - -struct hns_roce_ib_alloc_ucontext_resp { - __u32 qp_tab_size; -}; - -#endif /*_HNS_ROCE_USER_H */ diff --git a/include/uapi/rdma/Kbuild b/include/uapi/rdma/Kbuild index f14ab7ff5fee..b54f10d3c1f7 100644 --- a/include/uapi/rdma/Kbuild +++ b/include/uapi/rdma/Kbuild @@ -14,3 +14,4 @@ header-y += mlx5-abi.h header-y += mthca-abi.h header-y += nes-abi.h header-y += ocrdma-abi.h +header-y += hns-abi.h diff --git a/include/uapi/rdma/hns-abi.h b/include/uapi/rdma/hns-abi.h new file mode 100644 index 000000000000..5d7401963e35 --- /dev/null +++ b/include/uapi/rdma/hns-abi.h @@ -0,0 +1,54 @@ +/* + * Copyright (c) 2016 Hisilicon Limited. + * + * This software is available to you under a choice of one of two + * licenses. You may choose to be licensed under the terms of the GNU + * General Public License (GPL) Version 2, available from the file + * COPYING in the main directory of this source tree, or the + * OpenIB.org BSD license below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ + +#ifndef HNS_ABI_USER_H +#define HNS_ABI_USER_H + +#include + +struct hns_roce_ib_create_cq { + __u64 buf_addr; +}; + +struct hns_roce_ib_create_qp { + __u64 buf_addr; + __u64 db_addr; + __u8 log_sq_bb_count; + __u8 log_sq_stride; + __u8 sq_no_prefetch; + __u8 reserved[5]; +}; + +struct hns_roce_ib_alloc_ucontext_resp { + __u32 qp_tab_size; +}; +#endif /* HNS_ABI_USER_H */ -- cgit v1.2.3 From b1226c7db1d997fa6955cd3b54ba333bd0d8a29c Mon Sep 17 00:00:00 2001 From: Adit Ranadive Date: Sun, 2 Oct 2016 19:10:21 -0700 Subject: vmxnet3: Move PCI Id to pci_ids.h The VMXNet3 PCI Id will be shared with our paravirtual RDMA driver. Moved it to the shared location in pci_ids.h. Suggested-by: Leon Romanovsky Acked-by: Bjorn Helgaas Reviewed-by: Yuval Shaia Signed-off-by: Adit Ranadive Signed-off-by: Doug Ledford --- drivers/net/vmxnet3/vmxnet3_int.h | 3 +-- include/linux/pci_ids.h | 1 + 2 files changed, 2 insertions(+), 2 deletions(-) (limited to 'include') diff --git a/drivers/net/vmxnet3/vmxnet3_int.h b/drivers/net/vmxnet3/vmxnet3_int.h index 7dc37a090549..59e077be8829 100644 --- a/drivers/net/vmxnet3/vmxnet3_int.h +++ b/drivers/net/vmxnet3/vmxnet3_int.h @@ -119,9 +119,8 @@ enum { }; /* - * PCI vendor and device IDs. + * Maximum devices supported. */ -#define PCI_DEVICE_ID_VMWARE_VMXNET3 0x07B0 #define MAX_ETHERNET_CARDS 10 #define MAX_PCI_PASSTHRU_DEVICE 6 diff --git a/include/linux/pci_ids.h b/include/linux/pci_ids.h index c58752fe16c4..98bb455302cf 100644 --- a/include/linux/pci_ids.h +++ b/include/linux/pci_ids.h @@ -2251,6 +2251,7 @@ #define PCI_DEVICE_ID_RASTEL_2PORT 0x2000 #define PCI_VENDOR_ID_VMWARE 0x15ad +#define PCI_DEVICE_ID_VMWARE_VMXNET3 0x07b0 #define PCI_VENDOR_ID_ZOLTRIX 0x15b0 #define PCI_DEVICE_ID_ZOLTRIX_2BD0 0x2bd0 -- cgit v1.2.3 From e730139b3464cc740c33131c872f7d173744ef11 Mon Sep 17 00:00:00 2001 From: Jakub Pawlak Date: Wed, 7 Dec 2016 19:32:41 -0800 Subject: IB/hfi1: Disable header suppression for short packets For the received packets with payload less or equal 8DWS RxDmaDataFifoRdUncErr is not reported. There is set RHF.EccErr if the header is not suppressed. When such packet is detected on the send side the header suppression mechanism is disabled by clearing SH bit in the packet header. Reviewed-by: Mitko Haralanov Signed-off-by: Jakub Pawlak Signed-off-by: Dennis Dalessandro Signed-off-by: Doug Ledford --- drivers/infiniband/hw/hfi1/user_sdma.c | 60 ++++++++++++++++++++++++---------- include/uapi/rdma/hfi/hfi1_user.h | 2 +- 2 files changed, 44 insertions(+), 18 deletions(-) (limited to 'include') diff --git a/drivers/infiniband/hw/hfi1/user_sdma.c b/drivers/infiniband/hw/hfi1/user_sdma.c index a761f804111e..663980ef01a8 100644 --- a/drivers/infiniband/hw/hfi1/user_sdma.c +++ b/drivers/infiniband/hw/hfi1/user_sdma.c @@ -115,6 +115,7 @@ MODULE_PARM_DESC(sdma_comp_size, "Size of User SDMA completion ring. Default: 12 #define KDETH_HCRC_LOWER_MASK 0xff #define AHG_KDETH_INTR_SHIFT 12 +#define AHG_KDETH_SH_SHIFT 13 #define PBC2LRH(x) ((((x) & 0xfff) << 2) - 4) #define LRH2PBC(x) ((((x) >> 2) + 1) & 0xfff) @@ -144,8 +145,9 @@ MODULE_PARM_DESC(sdma_comp_size, "Size of User SDMA completion ring. Default: 12 #define KDETH_OM_LARGE 64 #define KDETH_OM_MAX_SIZE (1 << ((KDETH_OM_LARGE / KDETH_OM_SMALL) + 1)) -/* Last packet in the request */ -#define TXREQ_FLAGS_REQ_LAST_PKT BIT(0) +/* Tx request flag bits */ +#define TXREQ_FLAGS_REQ_ACK BIT(0) /* Set the ACK bit in the header */ +#define TXREQ_FLAGS_REQ_DISABLE_SH BIT(1) /* Disable header suppression */ /* SDMA request flag bits */ #define SDMA_REQ_FOR_THREAD 1 @@ -943,8 +945,13 @@ static int user_sdma_send_pkts(struct user_sdma_request *req, unsigned maxpkts) tx->busycount = 0; INIT_LIST_HEAD(&tx->list); + /* + * For the last packet set the ACK request + * and disable header suppression. + */ if (req->seqnum == req->info.npkts - 1) - tx->flags |= TXREQ_FLAGS_REQ_LAST_PKT; + tx->flags |= (TXREQ_FLAGS_REQ_ACK | + TXREQ_FLAGS_REQ_DISABLE_SH); /* * Calculate the payload size - this is min of the fragment @@ -963,11 +970,22 @@ static int user_sdma_send_pkts(struct user_sdma_request *req, unsigned maxpkts) } datalen = compute_data_length(req, tx); + + /* + * Disable header suppression for the payload <= 8DWS. + * If there is an uncorrectable error in the receive + * data FIFO when the received payload size is less than + * or equal to 8DWS then the RxDmaDataFifoRdUncErr is + * not reported.There is set RHF.EccErr if the header + * is not suppressed. + */ if (!datalen) { SDMA_DBG(req, "Request has data but pkt len is 0"); ret = -EFAULT; goto free_tx; + } else if (datalen <= 32) { + tx->flags |= TXREQ_FLAGS_REQ_DISABLE_SH; } } @@ -990,6 +1008,10 @@ static int user_sdma_send_pkts(struct user_sdma_request *req, unsigned maxpkts) LRH2PBC(lrhlen); tx->hdr.pbc[0] = cpu_to_le16(pbclen); } + ret = check_header_template(req, &tx->hdr, + lrhlen, datalen); + if (ret) + goto free_tx; ret = sdma_txinit_ahg(&tx->txreq, SDMA_TXREQ_F_AHG_COPY, sizeof(tx->hdr) + datalen, @@ -1351,7 +1373,7 @@ static int set_txreq_header(struct user_sdma_request *req, req->seqnum)); /* Set ACK request on last packet */ - if (unlikely(tx->flags & TXREQ_FLAGS_REQ_LAST_PKT)) + if (unlikely(tx->flags & TXREQ_FLAGS_REQ_ACK)) hdr->bth[2] |= cpu_to_be32(1UL << 31); /* Set the new offset */ @@ -1384,8 +1406,8 @@ static int set_txreq_header(struct user_sdma_request *req, /* Set KDETH.TID based on value for this TID */ KDETH_SET(hdr->kdeth.ver_tid_offset, TID, EXP_TID_GET(tidval, IDX)); - /* Clear KDETH.SH only on the last packet */ - if (unlikely(tx->flags & TXREQ_FLAGS_REQ_LAST_PKT)) + /* Clear KDETH.SH when DISABLE_SH flag is set */ + if (unlikely(tx->flags & TXREQ_FLAGS_REQ_DISABLE_SH)) KDETH_SET(hdr->kdeth.ver_tid_offset, SH, 0); /* * Set the KDETH.OFFSET and KDETH.OM based on size of @@ -1429,7 +1451,7 @@ static int set_txreq_header_ahg(struct user_sdma_request *req, /* BTH.PSN and BTH.A */ val32 = (be32_to_cpu(hdr->bth[2]) + req->seqnum) & (HFI1_CAP_IS_KSET(EXTENDED_PSN) ? 0x7fffffff : 0xffffff); - if (unlikely(tx->flags & TXREQ_FLAGS_REQ_LAST_PKT)) + if (unlikely(tx->flags & TXREQ_FLAGS_REQ_ACK)) val32 |= 1UL << 31; AHG_HEADER_SET(req->ahg, diff, 6, 0, 16, cpu_to_be16(val32 >> 16)); AHG_HEADER_SET(req->ahg, diff, 6, 16, 16, cpu_to_be16(val32 & 0xffff)); @@ -1468,19 +1490,23 @@ static int set_txreq_header_ahg(struct user_sdma_request *req, AHG_HEADER_SET(req->ahg, diff, 7, 0, 16, ((!!(req->omfactor - KDETH_OM_SMALL)) << 15 | ((req->tidoffset / req->omfactor) & 0x7fff))); - /* KDETH.TIDCtrl, KDETH.TID */ + /* KDETH.TIDCtrl, KDETH.TID, KDETH.Intr, KDETH.SH */ val = cpu_to_le16(((EXP_TID_GET(tidval, CTRL) & 0x3) << 10) | - (EXP_TID_GET(tidval, IDX) & 0x3ff)); - /* Clear KDETH.SH on last packet */ - if (unlikely(tx->flags & TXREQ_FLAGS_REQ_LAST_PKT)) { - val |= cpu_to_le16(KDETH_GET(hdr->kdeth.ver_tid_offset, - INTR) << - AHG_KDETH_INTR_SHIFT); - val &= cpu_to_le16(~(1U << 13)); - AHG_HEADER_SET(req->ahg, diff, 7, 16, 14, val); + (EXP_TID_GET(tidval, IDX) & 0x3ff)); + + if (unlikely(tx->flags & TXREQ_FLAGS_REQ_DISABLE_SH)) { + val |= cpu_to_le16((KDETH_GET(hdr->kdeth.ver_tid_offset, + INTR) << + AHG_KDETH_INTR_SHIFT)); } else { - AHG_HEADER_SET(req->ahg, diff, 7, 16, 12, val); + val |= KDETH_GET(hdr->kdeth.ver_tid_offset, SH) ? + cpu_to_le16(0x1 << AHG_KDETH_SH_SHIFT) : + cpu_to_le16((KDETH_GET(hdr->kdeth.ver_tid_offset, + INTR) << + AHG_KDETH_INTR_SHIFT)); } + + AHG_HEADER_SET(req->ahg, diff, 7, 16, 14, val); } trace_hfi1_sdma_user_header_ahg(pq->dd, pq->ctxt, pq->subctxt, diff --git a/include/uapi/rdma/hfi/hfi1_user.h b/include/uapi/rdma/hfi/hfi1_user.h index d15e7289d835..587b7360e820 100644 --- a/include/uapi/rdma/hfi/hfi1_user.h +++ b/include/uapi/rdma/hfi/hfi1_user.h @@ -75,7 +75,7 @@ * may not be implemented; the user code must deal with this if it * cares, or it must abort after initialization reports the difference. */ -#define HFI1_USER_SWMINOR 2 +#define HFI1_USER_SWMINOR 3 /* * We will encode the major/minor inside a single 32bit version number. -- cgit v1.2.3 From f2dc9cdce83c4aac2f8b6c803b0327df1c7d44a6 Mon Sep 17 00:00:00 2001 From: Mike Marciniszyn Date: Wed, 7 Dec 2016 19:34:06 -0800 Subject: IB/rdmavt: Add a send completion helper This is for use by client drivers to drive send completions into a CQ. A new exported table allows for the mapping of ib_wr_opcode into a ib_wc_opcode. Reviewed-by: Ashutosh Dixit Signed-off-by: Mike Marciniszyn Signed-off-by: Dennis Dalessandro Signed-off-by: Doug Ledford --- drivers/infiniband/sw/rdmavt/qp.c | 17 +++++++++++++++++ include/rdma/rdmavt_qp.h | 40 ++++++++++++++++++++++++++++++++++++++- 2 files changed, 56 insertions(+), 1 deletion(-) (limited to 'include') diff --git a/drivers/infiniband/sw/rdmavt/qp.c b/drivers/infiniband/sw/rdmavt/qp.c index 6500c3b5a89c..9e14addd690c 100644 --- a/drivers/infiniband/sw/rdmavt/qp.c +++ b/drivers/infiniband/sw/rdmavt/qp.c @@ -76,6 +76,23 @@ const int ib_rvt_state_ops[IB_QPS_ERR + 1] = { }; EXPORT_SYMBOL(ib_rvt_state_ops); +/* + * Translate ib_wr_opcode into ib_wc_opcode. + */ +const enum ib_wc_opcode ib_rvt_wc_opcode[] = { + [IB_WR_RDMA_WRITE] = IB_WC_RDMA_WRITE, + [IB_WR_RDMA_WRITE_WITH_IMM] = IB_WC_RDMA_WRITE, + [IB_WR_SEND] = IB_WC_SEND, + [IB_WR_SEND_WITH_IMM] = IB_WC_SEND, + [IB_WR_RDMA_READ] = IB_WC_RDMA_READ, + [IB_WR_ATOMIC_CMP_AND_SWP] = IB_WC_COMP_SWAP, + [IB_WR_ATOMIC_FETCH_AND_ADD] = IB_WC_FETCH_ADD, + [IB_WR_SEND_WITH_INV] = IB_WC_SEND, + [IB_WR_LOCAL_INV] = IB_WC_LOCAL_INV, + [IB_WR_REG_MR] = IB_WC_REG_MR +}; +EXPORT_SYMBOL(ib_rvt_wc_opcode); + static void get_map_page(struct rvt_qpn_table *qpt, struct rvt_qpn_map *map, gfp_t gfp) diff --git a/include/rdma/rdmavt_qp.h b/include/rdma/rdmavt_qp.h index 2c5183ef0243..d78e99cf6c11 100644 --- a/include/rdma/rdmavt_qp.h +++ b/include/rdma/rdmavt_qp.h @@ -51,6 +51,7 @@ #include #include #include +#include /* * Atomic bit definitions for r_aflags. */ @@ -527,7 +528,44 @@ static inline void rvt_qp_wqe_unreserve( } } -extern const int ib_rvt_state_ops[]; +extern const enum ib_wc_opcode ib_rvt_wc_opcode[]; + +/** + * rvt_qp_swqe_complete() - insert send completion + * @qp - the qp + * @wqe - the send wqe + * @status - completion status + * + * Insert a send completion into the completion + * queue if the qp indicates it should be done. + * + * See IBTA 10.7.3.1 for info on completion + * control. + */ +static inline void rvt_qp_swqe_complete( + struct rvt_qp *qp, + struct rvt_swqe *wqe, + enum ib_wc_status status) +{ + if (unlikely(wqe->wr.send_flags & RVT_SEND_RESERVE_USED)) + return; + if (!(qp->s_flags & RVT_S_SIGNAL_REQ_WR) || + (wqe->wr.send_flags & IB_SEND_SIGNALED) || + status != IB_WC_SUCCESS) { + struct ib_wc wc; + + memset(&wc, 0, sizeof(wc)); + wc.wr_id = wqe->wr.wr_id; + wc.status = status; + wc.opcode = ib_rvt_wc_opcode[wqe->wr.opcode]; + wc.qp = &qp->ibqp; + wc.byte_len = wqe->length; + rvt_cq_enter(ibcq_to_rvtcq(qp->ibqp.send_cq), &wc, + status != IB_WC_SUCCESS); + } +} + +extern const int ib_rvt_state_ops[]; struct rvt_dev_info; int rvt_error_qp(struct rvt_qp *qp, enum ib_wc_status err); -- cgit v1.2.3 From f6475223b12b3a38298976506edb042e13158798 Mon Sep 17 00:00:00 2001 From: Mike Marciniszyn Date: Wed, 7 Dec 2016 19:34:25 -0800 Subject: IB/rdmavt: Add swqe mr deref helper Add a helper to release mr references held by an swqe. Reviewed-by: Brian Welty Signed-off-by: Mike Marciniszyn Signed-off-by: Dennis Dalessandro Signed-off-by: Doug Ledford --- include/rdma/rdmavt_qp.h | 17 +++++++++++++++++ 1 file changed, 17 insertions(+) (limited to 'include') diff --git a/include/rdma/rdmavt_qp.h b/include/rdma/rdmavt_qp.h index d78e99cf6c11..04facda681e0 100644 --- a/include/rdma/rdmavt_qp.h +++ b/include/rdma/rdmavt_qp.h @@ -485,6 +485,23 @@ static inline void rvt_put_qp(struct rvt_qp *qp) wake_up(&qp->wait); } +/** + * rvt_put_swqe - drop mr refs held by swqe + * @wqe - the send wqe + * + * This drops any mr references held by the swqe + */ +static inline void rvt_put_swqe(struct rvt_swqe *wqe) +{ + int i; + + for (i = 0; i < wqe->wr.num_sge; i++) { + struct rvt_sge *sge = &wqe->sg_list[i]; + + rvt_put_mr(sge->mr); + } +} + /** * rvt_qp_wqe_reserve - reserve operation * @qp - the rvt qp -- cgit v1.2.3 From 5dc806052a9fc8c44e111646f9a9f436877749d0 Mon Sep 17 00:00:00 2001 From: Mike Marciniszyn Date: Wed, 7 Dec 2016 19:34:37 -0800 Subject: IB/rdmavt, IB/hfi1, IB/qib: Add inlines for mtu division Add rvt_div_round_up_mtu() and rvt_div_mtu() routines to do the computation based on the pmtu and the log_pmtu. Change divides in qib, hfi1 to use the new inlines. Reviewed-by: Kaike Wan Signed-off-by: Mike Marciniszyn Signed-off-by: Dennis Dalessandro Signed-off-by: Doug Ledford --- drivers/infiniband/hw/hfi1/rc.c | 3 +-- drivers/infiniband/hw/qib/qib_rc.c | 3 +-- include/rdma/rdmavt_qp.h | 24 +++++++++++++++++++++++- 3 files changed, 25 insertions(+), 5 deletions(-) (limited to 'include') diff --git a/drivers/infiniband/hw/hfi1/rc.c b/drivers/infiniband/hw/hfi1/rc.c index ddc02237e5f6..9db260fe782a 100644 --- a/drivers/infiniband/hw/hfi1/rc.c +++ b/drivers/infiniband/hw/hfi1/rc.c @@ -2377,8 +2377,7 @@ send_last: * Update the next expected PSN. We add 1 later * below, so only add the remainder here. */ - if (len > pmtu) - qp->r_psn += (len - 1) / pmtu; + qp->r_psn += rvt_div_mtu(qp, len - 1); } else { e->rdma_sge.mr = NULL; e->rdma_sge.vaddr = NULL; diff --git a/drivers/infiniband/hw/qib/qib_rc.c b/drivers/infiniband/hw/qib/qib_rc.c index e23ee6ce435a..031433cb7206 100644 --- a/drivers/infiniband/hw/qib/qib_rc.c +++ b/drivers/infiniband/hw/qib/qib_rc.c @@ -2079,8 +2079,7 @@ send_last: * Update the next expected PSN. We add 1 later * below, so only add the remainder here. */ - if (len > pmtu) - qp->r_psn += (len - 1) / pmtu; + qp->r_psn += rvt_div_mtu(qp, len - 1); } else { e->rdma_sge.mr = NULL; e->rdma_sge.vaddr = NULL; diff --git a/include/rdma/rdmavt_qp.h b/include/rdma/rdmavt_qp.h index 04facda681e0..f3dbd157ae5c 100644 --- a/include/rdma/rdmavt_qp.h +++ b/include/rdma/rdmavt_qp.h @@ -582,7 +582,29 @@ static inline void rvt_qp_swqe_complete( } } -extern const int ib_rvt_state_ops[]; +/** + * @qp - the qp pair + * @len - the length + * + * Perform a shift based mtu round up divide + */ +static inline u32 rvt_div_round_up_mtu(struct rvt_qp *qp, u32 len) +{ + return (len + qp->pmtu - 1) >> qp->log_pmtu; +} + +/** + * @qp - the qp pair + * @len - the length + * + * Perform a shift based mtu divide + */ +static inline u32 rvt_div_mtu(struct rvt_qp *qp, u32 len) +{ + return len >> qp->log_pmtu; +} + +extern const int ib_rvt_state_ops[]; struct rvt_dev_info; int rvt_error_qp(struct rvt_qp *qp, enum ib_wc_status err); -- cgit v1.2.3 From c226dc22ec4904340e3e14a536983cda3dbe7914 Mon Sep 17 00:00:00 2001 From: Leon Romanovsky Date: Mon, 31 Oct 2016 12:15:20 +0200 Subject: net/mlx5: Report multi packet WQE capabilities Multi packet WQE enables sending multiple fix sized packets using a single WQE. The exposed field reports such HW support. Signed-off-by: Bodong Wang Signed-off-by: Leon Romanovsky Signed-off-by: Doug Ledford --- include/linux/mlx5/mlx5_ifc.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'include') diff --git a/include/linux/mlx5/mlx5_ifc.h b/include/linux/mlx5/mlx5_ifc.h index 2632cb2caf10..0779ad2e8f51 100644 --- a/include/linux/mlx5/mlx5_ifc.h +++ b/include/linux/mlx5/mlx5_ifc.h @@ -576,7 +576,7 @@ struct mlx5_ifc_per_protocol_networking_offload_caps_bits { u8 self_lb_en_modifiable[0x1]; u8 reserved_at_9[0x2]; u8 max_lso_cap[0x5]; - u8 reserved_at_10[0x2]; + u8 multi_pkt_send_wqe[0x2]; u8 wqe_inline_mode[0x2]; u8 rss_ind_tbl_cap[0x4]; u8 reg_umr_sq[0x1]; -- cgit v1.2.3 From 191ded4a4d991acf17207e0b4370fef070bce3e9 Mon Sep 17 00:00:00 2001 From: Bodong Wang Date: Mon, 31 Oct 2016 12:15:21 +0200 Subject: IB/mlx5: Report mlx5 multi packet WQE caps during query The capabilities whether hardware support multi packet WQE or not is exposed to user space through query_device by uhw. Signed-off-by: Bodong Wang Reviewed-by: Matan Barak Signed-off-by: Leon Romanovsky Signed-off-by: Doug Ledford --- drivers/infiniband/hw/mlx5/main.c | 11 +++++++++++ include/uapi/rdma/mlx5-abi.h | 2 ++ 2 files changed, 13 insertions(+) (limited to 'include') diff --git a/drivers/infiniband/hw/mlx5/main.c b/drivers/infiniband/hw/mlx5/main.c index 46facd30e672..a16207c33333 100644 --- a/drivers/infiniband/hw/mlx5/main.c +++ b/drivers/infiniband/hw/mlx5/main.c @@ -672,6 +672,17 @@ static int mlx5_ib_query_device(struct ib_device *ibdev, 1 << MLX5_CAP_GEN(dev->mdev, log_max_rq); } + if (field_avail(typeof(resp), mlx5_ib_support_multi_pkt_send_wqes, + uhw->outlen)) { + resp.mlx5_ib_support_multi_pkt_send_wqes = + MLX5_CAP_ETH(mdev, multi_pkt_send_wqe); + resp.response_length += + sizeof(resp.mlx5_ib_support_multi_pkt_send_wqes); + } + + if (field_avail(typeof(resp), reserved, uhw->outlen)) + resp.response_length += sizeof(resp.reserved); + if (uhw->outlen) { err = ib_copy_to_udata(uhw, &resp, resp.response_length); diff --git a/include/uapi/rdma/mlx5-abi.h b/include/uapi/rdma/mlx5-abi.h index f5d0f4e83b59..93d6b9fe8e78 100644 --- a/include/uapi/rdma/mlx5-abi.h +++ b/include/uapi/rdma/mlx5-abi.h @@ -129,6 +129,8 @@ struct mlx5_ib_query_device_resp { __u32 response_length; struct mlx5_ib_tso_caps tso_caps; struct mlx5_ib_rss_caps rss_caps; + __u32 mlx5_ib_support_multi_pkt_send_wqes; + __u32 reserved; }; struct mlx5_ib_create_cq { -- cgit v1.2.3 From 7e43a2a5bae39fedaa7cce21d637e0c8d96d8e54 Mon Sep 17 00:00:00 2001 From: Bodong Wang Date: Mon, 31 Oct 2016 12:16:44 +0200 Subject: IB/mlx5: Report mlx5 CQE compression caps during query The capabilities include: - Max number of compressed and aggregated CQEs in a single session, while zero means unsupported. - For Responder, there are two formats of mini CQE: mini CQE with Rx hash and mini CQE with checksum. They're mutual exclusive. Signed-off-by: Bodong Wang Reviewed-by: Matan Barak Signed-off-by: Leon Romanovsky Signed-off-by: Doug Ledford --- drivers/infiniband/hw/mlx5/main.c | 10 ++++++++++ include/uapi/rdma/mlx5-abi.h | 12 ++++++++++++ 2 files changed, 22 insertions(+) (limited to 'include') diff --git a/drivers/infiniband/hw/mlx5/main.c b/drivers/infiniband/hw/mlx5/main.c index a16207c33333..2687a93c4af2 100644 --- a/drivers/infiniband/hw/mlx5/main.c +++ b/drivers/infiniband/hw/mlx5/main.c @@ -683,6 +683,16 @@ static int mlx5_ib_query_device(struct ib_device *ibdev, if (field_avail(typeof(resp), reserved, uhw->outlen)) resp.response_length += sizeof(resp.reserved); + if (field_avail(typeof(resp), cqe_comp_caps, uhw->outlen)) { + resp.cqe_comp_caps.max_num = + MLX5_CAP_GEN(dev->mdev, cqe_compression) ? + MLX5_CAP_GEN(dev->mdev, cqe_compression_max_num) : 0; + resp.cqe_comp_caps.supported_format = + MLX5_IB_CQE_RES_FORMAT_HASH | + MLX5_IB_CQE_RES_FORMAT_CSUM; + resp.response_length += sizeof(resp.cqe_comp_caps); + } + if (uhw->outlen) { err = ib_copy_to_udata(uhw, &resp, resp.response_length); diff --git a/include/uapi/rdma/mlx5-abi.h b/include/uapi/rdma/mlx5-abi.h index 93d6b9fe8e78..6649d13a2dbb 100644 --- a/include/uapi/rdma/mlx5-abi.h +++ b/include/uapi/rdma/mlx5-abi.h @@ -124,11 +124,23 @@ struct mlx5_ib_rss_caps { __u8 reserved[7]; }; +enum mlx5_ib_cqe_comp_res_format { + MLX5_IB_CQE_RES_FORMAT_HASH = 1 << 0, + MLX5_IB_CQE_RES_FORMAT_CSUM = 1 << 1, + MLX5_IB_CQE_RES_RESERVED = 1 << 2, +}; + +struct mlx5_ib_cqe_comp_caps { + __u32 max_num; + __u32 supported_format; /* enum mlx5_ib_cqe_comp_res_format */ +}; + struct mlx5_ib_query_device_resp { __u32 comp_mask; __u32 response_length; struct mlx5_ib_tso_caps tso_caps; struct mlx5_ib_rss_caps rss_caps; + struct mlx5_ib_cqe_comp_caps cqe_comp_caps; __u32 mlx5_ib_support_multi_pkt_send_wqes; __u32 reserved; }; -- cgit v1.2.3 From 1cbe6fc86ccfe05a910be4883da7c7bd28c190fe Mon Sep 17 00:00:00 2001 From: Bodong Wang Date: Mon, 31 Oct 2016 12:16:45 +0200 Subject: IB/mlx5: Add support for CQE compressing CQE compressing reduces PCI overhead by coalescing and compressing multiple CQEs into a single merged CQE. Successful compressing improves message rate especially for small packet traffic. CQE compressing is supported for all 64B CQE formats (with certain limitations) generated by RQ/Responder or by SQ/Requestor. Signed-off-by: Bodong Wang Reviewed-by: Matan Barak Signed-off-by: Leon Romanovsky Signed-off-by: Doug Ledford --- drivers/infiniband/hw/mlx5/cq.c | 30 +++++++++++++++++++++++++++++- include/uapi/rdma/mlx5-abi.h | 4 +++- 2 files changed, 32 insertions(+), 2 deletions(-) (limited to 'include') diff --git a/drivers/infiniband/hw/mlx5/cq.c b/drivers/infiniband/hw/mlx5/cq.c index 9e0598b5615f..d72a4367c891 100644 --- a/drivers/infiniband/hw/mlx5/cq.c +++ b/drivers/infiniband/hw/mlx5/cq.c @@ -731,7 +731,7 @@ static int create_cq_user(struct mlx5_ib_dev *dev, struct ib_udata *udata, int entries, u32 **cqb, int *cqe_size, int *index, int *inlen) { - struct mlx5_ib_create_cq ucmd; + struct mlx5_ib_create_cq ucmd = {}; size_t ucmdlen; int page_shift; __be64 *pas; @@ -792,8 +792,36 @@ static int create_cq_user(struct mlx5_ib_dev *dev, struct ib_udata *udata, *index = to_mucontext(context)->uuari.uars[0].index; + if (ucmd.cqe_comp_en == 1) { + if (unlikely((*cqe_size != 64) || + !MLX5_CAP_GEN(dev->mdev, cqe_compression))) { + err = -EOPNOTSUPP; + mlx5_ib_warn(dev, "CQE compression is not supported for size %d!\n", + *cqe_size); + goto err_cqb; + } + + if (unlikely(!ucmd.cqe_comp_res_format || + !(ucmd.cqe_comp_res_format < + MLX5_IB_CQE_RES_RESERVED) || + (ucmd.cqe_comp_res_format & + (ucmd.cqe_comp_res_format - 1)))) { + err = -EOPNOTSUPP; + mlx5_ib_warn(dev, "CQE compression res format %d is not supported!\n", + ucmd.cqe_comp_res_format); + goto err_cqb; + } + + MLX5_SET(cqc, cqc, cqe_comp_en, 1); + MLX5_SET(cqc, cqc, mini_cqe_res_format, + ilog2(ucmd.cqe_comp_res_format)); + } + return 0; +err_cqb: + kfree(cqb); + err_db: mlx5_ib_db_unmap_user(to_mucontext(context), &cq->db); diff --git a/include/uapi/rdma/mlx5-abi.h b/include/uapi/rdma/mlx5-abi.h index 6649d13a2dbb..b0a41c8dc1fb 100644 --- a/include/uapi/rdma/mlx5-abi.h +++ b/include/uapi/rdma/mlx5-abi.h @@ -149,7 +149,9 @@ struct mlx5_ib_create_cq { __u64 buf_addr; __u64 db_addr; __u32 cqe_size; - __u32 reserved; /* explicit padding (optional on i386) */ + __u8 cqe_comp_en; + __u8 cqe_comp_res_format; + __u16 reserved; /* explicit padding (optional on i386) */ }; struct mlx5_ib_create_cq_resp { -- cgit v1.2.3 From 0dbf3332b7b683db33a385a3ce9baab157e3ff9a Mon Sep 17 00:00:00 2001 From: Moses Reuben Date: Mon, 14 Nov 2016 19:04:47 +0200 Subject: IB/core: Add flow spec tunneling support In order to support tunneling, that can be used by the QP, both struct ib_flow_spec_tunnel and struct ib_flow_tunnel_filter can be used to more IP or UDP based tunneling protocols (e.g NVGRE, GRE, etc). IB_FLOW_SPEC_VXLAN_TUNNEL type flow specification is added to use this functionality and match specific Vxlan packets. In similar to IPv6, we check overflow of the vni value by comparing with the maximum size. Signed-off-by: Moses Reuben Reviewed-by: Maor Gottlieb Signed-off-by: Leon Romanovsky Signed-off-by: Doug Ledford --- drivers/infiniband/core/uverbs_cmd.c | 15 +++++++++++++++ include/rdma/ib_verbs.h | 19 ++++++++++++++++++- 2 files changed, 33 insertions(+), 1 deletion(-) (limited to 'include') diff --git a/drivers/infiniband/core/uverbs_cmd.c b/drivers/infiniband/core/uverbs_cmd.c index 2690c9263ee4..deb06fa3548b 100644 --- a/drivers/infiniband/core/uverbs_cmd.c +++ b/drivers/infiniband/core/uverbs_cmd.c @@ -3175,6 +3175,21 @@ static int kern_spec_to_ib_spec(struct ib_uverbs_flow_spec *kern_spec, memcpy(&ib_spec->tcp_udp.val, kern_spec_val, actual_filter_sz); memcpy(&ib_spec->tcp_udp.mask, kern_spec_mask, actual_filter_sz); break; + case IB_FLOW_SPEC_VXLAN_TUNNEL: + ib_filter_sz = offsetof(struct ib_flow_tunnel_filter, real_sz); + actual_filter_sz = spec_filter_size(kern_spec_mask, + kern_filter_sz, + ib_filter_sz); + if (actual_filter_sz <= 0) + return -EINVAL; + ib_spec->tunnel.size = sizeof(struct ib_flow_spec_tunnel); + memcpy(&ib_spec->tunnel.val, kern_spec_val, actual_filter_sz); + memcpy(&ib_spec->tunnel.mask, kern_spec_mask, actual_filter_sz); + + if ((ntohl(ib_spec->tunnel.mask.tunnel_id)) >= BIT(24) || + (ntohl(ib_spec->tunnel.val.tunnel_id)) >= BIT(24)) + return -EINVAL; + break; default: return -EINVAL; } diff --git a/include/rdma/ib_verbs.h b/include/rdma/ib_verbs.h index 467a4b476e29..b8213818de89 100644 --- a/include/rdma/ib_verbs.h +++ b/include/rdma/ib_verbs.h @@ -1599,7 +1599,8 @@ enum ib_flow_spec_type { IB_FLOW_SPEC_IPV6 = 0x31, /* L4 headers*/ IB_FLOW_SPEC_TCP = 0x40, - IB_FLOW_SPEC_UDP = 0x41 + IB_FLOW_SPEC_UDP = 0x41, + IB_FLOW_SPEC_VXLAN_TUNNEL = 0x50, }; #define IB_FLOW_SPEC_LAYER_MASK 0xF0 #define IB_FLOW_SPEC_SUPPORT_LAYERS 4 @@ -1707,6 +1708,21 @@ struct ib_flow_spec_tcp_udp { struct ib_flow_tcp_udp_filter mask; }; +struct ib_flow_tunnel_filter { + __be32 tunnel_id; + u8 real_sz[0]; +}; + +/* ib_flow_spec_tunnel describes the Vxlan tunnel + * the tunnel_id from val has the vni value + */ +struct ib_flow_spec_tunnel { + enum ib_flow_spec_type type; + u16 size; + struct ib_flow_tunnel_filter val; + struct ib_flow_tunnel_filter mask; +}; + union ib_flow_spec { struct { enum ib_flow_spec_type type; @@ -1717,6 +1733,7 @@ union ib_flow_spec { struct ib_flow_spec_ipv4 ipv4; struct ib_flow_spec_tcp_udp tcp_udp; struct ib_flow_spec_ipv6 ipv6; + struct ib_flow_spec_tunnel tunnel; }; struct ib_flow_attr { -- cgit v1.2.3 From 76bd23b34204cad78f48aec4cef38869a66aa594 Mon Sep 17 00:00:00 2001 From: Moses Reuben Date: Mon, 14 Nov 2016 19:04:48 +0200 Subject: IB/core: Align structure ib_flow_spec_type Aligned the structure ib_flow_spec_type indentation, after adding a new definition. Signed-off-by: Moses Reuben Reviewed-by: Maor Gottlieb Signed-off-by: Leon Romanovsky Signed-off-by: Doug Ledford --- include/rdma/ib_verbs.h | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) (limited to 'include') diff --git a/include/rdma/ib_verbs.h b/include/rdma/ib_verbs.h index b8213818de89..4bc748fddcac 100644 --- a/include/rdma/ib_verbs.h +++ b/include/rdma/ib_verbs.h @@ -1592,14 +1592,14 @@ enum ib_flow_attr_type { /* Supported steering header types */ enum ib_flow_spec_type { /* L2 headers*/ - IB_FLOW_SPEC_ETH = 0x20, - IB_FLOW_SPEC_IB = 0x22, + IB_FLOW_SPEC_ETH = 0x20, + IB_FLOW_SPEC_IB = 0x22, /* L3 header*/ - IB_FLOW_SPEC_IPV4 = 0x30, - IB_FLOW_SPEC_IPV6 = 0x31, + IB_FLOW_SPEC_IPV4 = 0x30, + IB_FLOW_SPEC_IPV6 = 0x31, /* L4 headers*/ - IB_FLOW_SPEC_TCP = 0x40, - IB_FLOW_SPEC_UDP = 0x41, + IB_FLOW_SPEC_TCP = 0x40, + IB_FLOW_SPEC_UDP = 0x41, IB_FLOW_SPEC_VXLAN_TUNNEL = 0x50, }; #define IB_FLOW_SPEC_LAYER_MASK 0xF0 -- cgit v1.2.3 From a0cb4c759af12943806564294fa53ab08cb7cf93 Mon Sep 17 00:00:00 2001 From: Moses Reuben Date: Mon, 14 Nov 2016 19:04:49 +0200 Subject: IB/uverbs: Add support for Vxlan protocol Add ib_uverbs_flow_spec_tunnel to define the rule to match Vxlan, the type, size, reserved fields are identical to rest of the protocols, and are used to identify the spec. The tunnel id is the vni value of the Vxlan protocol, and it is used as part of the steering rule, it is limited by the mask. The steering rule configured on the hardware does a match according to vni and other protocols. In the same way as rest of the protocols that we match, the uniq field's of each protocol are represented on the val and the mask. Signed-off-by: Moses Reuben Reviewed-by: Maor Gottlieb Signed-off-by: Leon Romanovsky Signed-off-by: Doug Ledford --- include/uapi/rdma/ib_user_verbs.h | 17 +++++++++++++++++ 1 file changed, 17 insertions(+) (limited to 'include') diff --git a/include/uapi/rdma/ib_user_verbs.h b/include/uapi/rdma/ib_user_verbs.h index 25225ebbc7d5..90ba5e88ec00 100644 --- a/include/uapi/rdma/ib_user_verbs.h +++ b/include/uapi/rdma/ib_user_verbs.h @@ -908,6 +908,23 @@ struct ib_uverbs_flow_spec_ipv6 { struct ib_uverbs_flow_ipv6_filter mask; }; +struct ib_uverbs_flow_tunnel_filter { + __be32 tunnel_id; +}; + +struct ib_uverbs_flow_spec_tunnel { + union { + struct ib_uverbs_flow_spec_hdr hdr; + struct { + __u32 type; + __u16 size; + __u16 reserved; + }; + }; + struct ib_uverbs_flow_tunnel_filter val; + struct ib_uverbs_flow_tunnel_filter mask; +}; + struct ib_uverbs_flow_attr { __u32 type; __u16 size; -- cgit v1.2.3 From fbf46860b19ddb485f00bef1ad1a43aabc9f71ad Mon Sep 17 00:00:00 2001 From: Moses Reuben Date: Mon, 14 Nov 2016 19:04:51 +0200 Subject: IB/core: Introduce inner flow steering For a tunneled packet which contains external and internal headers, we refer to the external headers as "outer fields" and the internal headers as "inner fields". Example of a tunneled packet: { L2 | L3 | L4 | tunnel header | L2 | L3 | l4 | data } | | | | | | | { outer fields }{ inner fields } This patch introduces a new flag for flow steering rules - IB_FLOW_SPEC_INNER - which specifies that the rule applies to the inner fields, rather than to the outer fields of the packet. Signed-off-by: Moses Reuben Reviewed-by: Maor Gottlieb Signed-off-by: Leon Romanovsky Signed-off-by: Doug Ledford --- drivers/infiniband/core/uverbs_cmd.c | 4 +++- include/rdma/ib_verbs.h | 17 +++++++++-------- 2 files changed, 12 insertions(+), 9 deletions(-) (limited to 'include') diff --git a/drivers/infiniband/core/uverbs_cmd.c b/drivers/infiniband/core/uverbs_cmd.c index deb06fa3548b..d84dcde7a8bb 100644 --- a/drivers/infiniband/core/uverbs_cmd.c +++ b/drivers/infiniband/core/uverbs_cmd.c @@ -3124,8 +3124,10 @@ static int kern_spec_to_ib_spec(struct ib_uverbs_flow_spec *kern_spec, kern_spec_val = (void *)kern_spec + sizeof(struct ib_uverbs_flow_spec_hdr); kern_spec_mask = kern_spec_val + kern_filter_sz; + if (ib_spec->type == (IB_FLOW_SPEC_INNER | IB_FLOW_SPEC_VXLAN_TUNNEL)) + return -EINVAL; - switch (ib_spec->type) { + switch (ib_spec->type & ~IB_FLOW_SPEC_INNER) { case IB_FLOW_SPEC_ETH: ib_filter_sz = offsetof(struct ib_flow_eth_filter, real_sz); actual_filter_sz = spec_filter_size(kern_spec_mask, diff --git a/include/rdma/ib_verbs.h b/include/rdma/ib_verbs.h index 4bc748fddcac..6d0dd6525e14 100644 --- a/include/rdma/ib_verbs.h +++ b/include/rdma/ib_verbs.h @@ -1601,9 +1601,10 @@ enum ib_flow_spec_type { IB_FLOW_SPEC_TCP = 0x40, IB_FLOW_SPEC_UDP = 0x41, IB_FLOW_SPEC_VXLAN_TUNNEL = 0x50, + IB_FLOW_SPEC_INNER = 0x100, }; #define IB_FLOW_SPEC_LAYER_MASK 0xF0 -#define IB_FLOW_SPEC_SUPPORT_LAYERS 4 +#define IB_FLOW_SPEC_SUPPORT_LAYERS 8 /* Flow steering rule priority is set according to it's domain. * Lower domain value means higher priority. @@ -1631,7 +1632,7 @@ struct ib_flow_eth_filter { }; struct ib_flow_spec_eth { - enum ib_flow_spec_type type; + u32 type; u16 size; struct ib_flow_eth_filter val; struct ib_flow_eth_filter mask; @@ -1645,7 +1646,7 @@ struct ib_flow_ib_filter { }; struct ib_flow_spec_ib { - enum ib_flow_spec_type type; + u32 type; u16 size; struct ib_flow_ib_filter val; struct ib_flow_ib_filter mask; @@ -1670,7 +1671,7 @@ struct ib_flow_ipv4_filter { }; struct ib_flow_spec_ipv4 { - enum ib_flow_spec_type type; + u32 type; u16 size; struct ib_flow_ipv4_filter val; struct ib_flow_ipv4_filter mask; @@ -1688,7 +1689,7 @@ struct ib_flow_ipv6_filter { }; struct ib_flow_spec_ipv6 { - enum ib_flow_spec_type type; + u32 type; u16 size; struct ib_flow_ipv6_filter val; struct ib_flow_ipv6_filter mask; @@ -1702,7 +1703,7 @@ struct ib_flow_tcp_udp_filter { }; struct ib_flow_spec_tcp_udp { - enum ib_flow_spec_type type; + u32 type; u16 size; struct ib_flow_tcp_udp_filter val; struct ib_flow_tcp_udp_filter mask; @@ -1717,7 +1718,7 @@ struct ib_flow_tunnel_filter { * the tunnel_id from val has the vni value */ struct ib_flow_spec_tunnel { - enum ib_flow_spec_type type; + u32 type; u16 size; struct ib_flow_tunnel_filter val; struct ib_flow_tunnel_filter mask; @@ -1725,7 +1726,7 @@ struct ib_flow_spec_tunnel { union ib_flow_spec { struct { - enum ib_flow_spec_type type; + u32 type; u16 size; }; struct ib_flow_spec_eth eth; -- cgit v1.2.3 From c90ea9d8e51196d9c528e57d9ab09ee7d41f0ba0 Mon Sep 17 00:00:00 2001 From: Moni Shoua Date: Wed, 23 Nov 2016 08:23:22 +0200 Subject: IB/core: Change ib_resolve_eth_dmac to use it in create AH The function ib_resolve_eth_dmac() requires struct qp_attr * and qp_attr_mask as parameters while the function might be useful to resolve dmac for address handles. This patch changes the signature of the function so it can be used in the flow of creating an address handle. Signed-off-by: Moni Shoua Reviewed-by: Yishai Hadas Signed-off-by: Leon Romanovsky Signed-off-by: Doug Ledford --- drivers/infiniband/core/core_priv.h | 3 -- drivers/infiniband/core/uverbs_cmd.c | 8 ++-- drivers/infiniband/core/verbs.c | 84 ++++++++++++++++++------------------ include/rdma/ib_verbs.h | 2 + 4 files changed, 49 insertions(+), 48 deletions(-) (limited to 'include') diff --git a/drivers/infiniband/core/core_priv.h b/drivers/infiniband/core/core_priv.h index 19d499dcab76..1acc95b3aaa3 100644 --- a/drivers/infiniband/core/core_priv.h +++ b/drivers/infiniband/core/core_priv.h @@ -72,9 +72,6 @@ void ib_device_unregister_sysfs(struct ib_device *device); void ib_cache_setup(void); void ib_cache_cleanup(void); -int ib_resolve_eth_dmac(struct ib_qp *qp, - struct ib_qp_attr *qp_attr, int *qp_attr_mask); - typedef void (*roce_netdev_callback)(struct ib_device *device, u8 port, struct net_device *idev, void *cookie); diff --git a/drivers/infiniband/core/uverbs_cmd.c b/drivers/infiniband/core/uverbs_cmd.c index d84dcde7a8bb..77bb15f66414 100644 --- a/drivers/infiniband/core/uverbs_cmd.c +++ b/drivers/infiniband/core/uverbs_cmd.c @@ -2402,9 +2402,11 @@ ssize_t ib_uverbs_modify_qp(struct ib_uverbs_file *file, attr->alt_ah_attr.port_num = cmd.alt_dest.port_num; if (qp->real_qp == qp) { - ret = ib_resolve_eth_dmac(qp, attr, &cmd.attr_mask); - if (ret) - goto release_qp; + if (cmd.attr_mask & IB_QP_AV) { + ret = ib_resolve_eth_dmac(qp->device, &attr->ah_attr); + if (ret) + goto release_qp; + } ret = qp->device->modify_qp(qp, attr, modify_qp_mask(qp->qp_type, cmd.attr_mask), &udata); } else { diff --git a/drivers/infiniband/core/verbs.c b/drivers/infiniband/core/verbs.c index 98e10c5f2a96..cacee169cd55 100644 --- a/drivers/infiniband/core/verbs.c +++ b/drivers/infiniband/core/verbs.c @@ -1198,66 +1198,66 @@ int ib_modify_qp_is_ok(enum ib_qp_state cur_state, enum ib_qp_state next_state, } EXPORT_SYMBOL(ib_modify_qp_is_ok); -int ib_resolve_eth_dmac(struct ib_qp *qp, - struct ib_qp_attr *qp_attr, int *qp_attr_mask) +int ib_resolve_eth_dmac(struct ib_device *device, + struct ib_ah_attr *ah_attr) { int ret = 0; - if (*qp_attr_mask & IB_QP_AV) { - if (qp_attr->ah_attr.port_num < rdma_start_port(qp->device) || - qp_attr->ah_attr.port_num > rdma_end_port(qp->device)) - return -EINVAL; - - if (!rdma_cap_eth_ah(qp->device, qp_attr->ah_attr.port_num)) - return 0; - - if (rdma_link_local_addr((struct in6_addr *)qp_attr->ah_attr.grh.dgid.raw)) { - rdma_get_ll_mac((struct in6_addr *)qp_attr->ah_attr.grh.dgid.raw, - qp_attr->ah_attr.dmac); - } else { - union ib_gid sgid; - struct ib_gid_attr sgid_attr; - int ifindex; - int hop_limit; - - ret = ib_query_gid(qp->device, - qp_attr->ah_attr.port_num, - qp_attr->ah_attr.grh.sgid_index, - &sgid, &sgid_attr); - - if (ret || !sgid_attr.ndev) { - if (!ret) - ret = -ENXIO; - goto out; - } + if (ah_attr->port_num < rdma_start_port(device) || + ah_attr->port_num > rdma_end_port(device)) + return -EINVAL; - ifindex = sgid_attr.ndev->ifindex; + if (!rdma_cap_eth_ah(device, ah_attr->port_num)) + return 0; - ret = rdma_addr_find_l2_eth_by_grh(&sgid, - &qp_attr->ah_attr.grh.dgid, - qp_attr->ah_attr.dmac, - NULL, &ifindex, &hop_limit); + if (rdma_link_local_addr((struct in6_addr *)ah_attr->grh.dgid.raw)) { + rdma_get_ll_mac((struct in6_addr *)ah_attr->grh.dgid.raw, + ah_attr->dmac); + } else { + union ib_gid sgid; + struct ib_gid_attr sgid_attr; + int ifindex; + int hop_limit; + + ret = ib_query_gid(device, + ah_attr->port_num, + ah_attr->grh.sgid_index, + &sgid, &sgid_attr); + + if (ret || !sgid_attr.ndev) { + if (!ret) + ret = -ENXIO; + goto out; + } - dev_put(sgid_attr.ndev); + ifindex = sgid_attr.ndev->ifindex; - qp_attr->ah_attr.grh.hop_limit = hop_limit; - } + ret = rdma_addr_find_l2_eth_by_grh(&sgid, + &ah_attr->grh.dgid, + ah_attr->dmac, + NULL, &ifindex, &hop_limit); + + dev_put(sgid_attr.ndev); + + ah_attr->grh.hop_limit = hop_limit; } out: return ret; } EXPORT_SYMBOL(ib_resolve_eth_dmac); - int ib_modify_qp(struct ib_qp *qp, struct ib_qp_attr *qp_attr, int qp_attr_mask) { - int ret; - ret = ib_resolve_eth_dmac(qp, qp_attr, &qp_attr_mask); - if (ret) - return ret; + if (qp_attr_mask & IB_QP_AV) { + int ret; + + ret = ib_resolve_eth_dmac(qp->device, &qp_attr->ah_attr); + if (ret) + return ret; + } return qp->device->modify_qp(qp->real_qp, qp_attr, qp_attr_mask, NULL); } diff --git a/include/rdma/ib_verbs.h b/include/rdma/ib_verbs.h index 6d0dd6525e14..0c6f973e407c 100644 --- a/include/rdma/ib_verbs.h +++ b/include/rdma/ib_verbs.h @@ -3394,4 +3394,6 @@ void ib_drain_rq(struct ib_qp *qp); void ib_drain_sq(struct ib_qp *qp); void ib_drain_qp(struct ib_qp *qp); +int ib_resolve_eth_dmac(struct ib_device *device, + struct ib_ah_attr *ah_attr); #endif /* IB_VERBS_H */ -- cgit v1.2.3 From 6ad279c5a2e55bf2bd100b4222090d4717de88d5 Mon Sep 17 00:00:00 2001 From: Moni Shoua Date: Wed, 23 Nov 2016 08:23:23 +0200 Subject: IB/mlx5: Report that device has udata response in create_ah To make mlx5 user driver aware of whether kernel driver returns dmac in user data response add a new flag that will be returned back to user-space through alloc_ucontext. Signed-off-by: Moni Shoua Reviewed-by: Yishai Hadas Signed-off-by: Leon Romanovsky Signed-off-by: Doug Ledford --- drivers/infiniband/hw/mlx5/main.c | 3 ++- include/uapi/rdma/mlx5-abi.h | 1 + 2 files changed, 3 insertions(+), 1 deletion(-) (limited to 'include') diff --git a/drivers/infiniband/hw/mlx5/main.c b/drivers/infiniband/hw/mlx5/main.c index f7a299431e6c..f2ef2a170d9f 100644 --- a/drivers/infiniband/hw/mlx5/main.c +++ b/drivers/infiniband/hw/mlx5/main.c @@ -1117,7 +1117,8 @@ static struct ib_ucontext *mlx5_ib_alloc_ucontext(struct ib_device *ibdev, resp.response_length += sizeof(resp.cqe_version); if (field_avail(typeof(resp), cmds_supp_uhw, udata->outlen)) { - resp.cmds_supp_uhw |= MLX5_USER_CMDS_SUPP_UHW_QUERY_DEVICE; + resp.cmds_supp_uhw |= MLX5_USER_CMDS_SUPP_UHW_QUERY_DEVICE | + MLX5_USER_CMDS_SUPP_UHW_CREATE_AH; resp.response_length += sizeof(resp.cmds_supp_uhw); } diff --git a/include/uapi/rdma/mlx5-abi.h b/include/uapi/rdma/mlx5-abi.h index b0a41c8dc1fb..ac28729a1245 100644 --- a/include/uapi/rdma/mlx5-abi.h +++ b/include/uapi/rdma/mlx5-abi.h @@ -82,6 +82,7 @@ enum mlx5_ib_alloc_ucontext_resp_mask { enum mlx5_user_cmds_supp_uhw { MLX5_USER_CMDS_SUPP_UHW_QUERY_DEVICE = 1 << 0, + MLX5_USER_CMDS_SUPP_UHW_CREATE_AH = 1 << 1, }; struct mlx5_ib_alloc_ucontext_resp { -- cgit v1.2.3 From 477864c8fcd953e5a988073ca5be18bb7fd93410 Mon Sep 17 00:00:00 2001 From: Moni Shoua Date: Wed, 23 Nov 2016 08:23:24 +0200 Subject: IB/core: Let create_ah return extended response to user Add struct ib_udata to the signature of create_ah callback that is implemented by IB device drivers. This allows HW drivers to return extra data to the userspace library. This patch prepares the ground for mlx5 driver to resolve destination mac address for a given GID and return it to userspace. This patch was previously submitted by Knut Omang as a part of the patch set to support Oracle's Infiniband HCA (SIF). Signed-off-by: Knut Omang Signed-off-by: Moni Shoua Reviewed-by: Yishai Hadas Signed-off-by: Leon Romanovsky Signed-off-by: Doug Ledford --- drivers/infiniband/core/uverbs_cmd.c | 11 ++++++++++- drivers/infiniband/core/verbs.c | 2 +- drivers/infiniband/hw/cxgb3/iwch_provider.c | 3 ++- drivers/infiniband/hw/cxgb4/provider.c | 4 +++- drivers/infiniband/hw/hns/hns_roce_ah.c | 3 ++- drivers/infiniband/hw/hns/hns_roce_device.h | 3 ++- drivers/infiniband/hw/i40iw/i40iw_verbs.c | 4 +++- drivers/infiniband/hw/mlx4/ah.c | 4 +++- drivers/infiniband/hw/mlx4/mlx4_ib.h | 3 ++- drivers/infiniband/hw/mlx5/ah.c | 4 +++- drivers/infiniband/hw/mlx5/mlx5_ib.h | 3 ++- drivers/infiniband/hw/mthca/mthca_provider.c | 4 +++- drivers/infiniband/hw/nes/nes_verbs.c | 3 ++- drivers/infiniband/hw/ocrdma/ocrdma_ah.c | 3 ++- drivers/infiniband/hw/ocrdma/ocrdma_ah.h | 4 +++- drivers/infiniband/hw/qedr/verbs.c | 3 ++- drivers/infiniband/hw/qedr/verbs.h | 3 ++- drivers/infiniband/hw/usnic/usnic_ib_verbs.c | 4 +++- drivers/infiniband/hw/usnic/usnic_ib_verbs.h | 4 +++- drivers/infiniband/sw/rxe/rxe_verbs.c | 4 +++- include/rdma/ib_verbs.h | 3 ++- 21 files changed, 58 insertions(+), 21 deletions(-) (limited to 'include') diff --git a/drivers/infiniband/core/uverbs_cmd.c b/drivers/infiniband/core/uverbs_cmd.c index 77bb15f66414..cdb935ddab69 100644 --- a/drivers/infiniband/core/uverbs_cmd.c +++ b/drivers/infiniband/core/uverbs_cmd.c @@ -2877,6 +2877,7 @@ ssize_t ib_uverbs_create_ah(struct ib_uverbs_file *file, struct ib_ah *ah; struct ib_ah_attr attr; int ret; + struct ib_udata udata; if (out_len < sizeof resp) return -ENOSPC; @@ -2884,6 +2885,10 @@ ssize_t ib_uverbs_create_ah(struct ib_uverbs_file *file, if (copy_from_user(&cmd, buf, sizeof cmd)) return -EFAULT; + INIT_UDATA(&udata, buf + sizeof(cmd), + (unsigned long)cmd.response + sizeof(resp), + in_len - sizeof(cmd), out_len - sizeof(resp)); + uobj = kmalloc(sizeof *uobj, GFP_KERNEL); if (!uobj) return -ENOMEM; @@ -2910,12 +2915,16 @@ ssize_t ib_uverbs_create_ah(struct ib_uverbs_file *file, memset(&attr.dmac, 0, sizeof(attr.dmac)); memcpy(attr.grh.dgid.raw, cmd.attr.grh.dgid, 16); - ah = ib_create_ah(pd, &attr); + ah = pd->device->create_ah(pd, &attr, &udata); + if (IS_ERR(ah)) { ret = PTR_ERR(ah); goto err_put; } + ah->device = pd->device; + ah->pd = pd; + atomic_inc(&pd->usecnt); ah->uobject = uobj; uobj->object = ah; diff --git a/drivers/infiniband/core/verbs.c b/drivers/infiniband/core/verbs.c index cacee169cd55..c976f29f7ad2 100644 --- a/drivers/infiniband/core/verbs.c +++ b/drivers/infiniband/core/verbs.c @@ -315,7 +315,7 @@ struct ib_ah *ib_create_ah(struct ib_pd *pd, struct ib_ah_attr *ah_attr) { struct ib_ah *ah; - ah = pd->device->create_ah(pd, ah_attr); + ah = pd->device->create_ah(pd, ah_attr, NULL); if (!IS_ERR(ah)) { ah->device = pd->device; diff --git a/drivers/infiniband/hw/cxgb3/iwch_provider.c b/drivers/infiniband/hw/cxgb3/iwch_provider.c index cba57bb53dba..9d5fe1853da4 100644 --- a/drivers/infiniband/hw/cxgb3/iwch_provider.c +++ b/drivers/infiniband/hw/cxgb3/iwch_provider.c @@ -62,7 +62,8 @@ #include "common.h" static struct ib_ah *iwch_ah_create(struct ib_pd *pd, - struct ib_ah_attr *ah_attr) + struct ib_ah_attr *ah_attr, + struct ib_udata *udata) { return ERR_PTR(-ENOSYS); } diff --git a/drivers/infiniband/hw/cxgb4/provider.c b/drivers/infiniband/hw/cxgb4/provider.c index 645e606a17c5..49b51b7e0fd7 100644 --- a/drivers/infiniband/hw/cxgb4/provider.c +++ b/drivers/infiniband/hw/cxgb4/provider.c @@ -59,7 +59,9 @@ module_param(fastreg_support, int, 0644); MODULE_PARM_DESC(fastreg_support, "Advertise fastreg support (default=1)"); static struct ib_ah *c4iw_ah_create(struct ib_pd *pd, - struct ib_ah_attr *ah_attr) + struct ib_ah_attr *ah_attr, + struct ib_udata *udata) + { return ERR_PTR(-ENOSYS); } diff --git a/drivers/infiniband/hw/hns/hns_roce_ah.c b/drivers/infiniband/hw/hns/hns_roce_ah.c index 24f79ee39fdf..0ac294db3b29 100644 --- a/drivers/infiniband/hw/hns/hns_roce_ah.c +++ b/drivers/infiniband/hw/hns/hns_roce_ah.c @@ -39,7 +39,8 @@ #define HNS_ROCE_VLAN_SL_BIT_MASK 7 #define HNS_ROCE_VLAN_SL_SHIFT 13 -struct ib_ah *hns_roce_create_ah(struct ib_pd *ibpd, struct ib_ah_attr *ah_attr) +struct ib_ah *hns_roce_create_ah(struct ib_pd *ibpd, struct ib_ah_attr *ah_attr, + struct ib_udata *udata) { struct hns_roce_dev *hr_dev = to_hr_dev(ibpd->device); struct device *dev = &hr_dev->pdev->dev; diff --git a/drivers/infiniband/hw/hns/hns_roce_device.h b/drivers/infiniband/hw/hns/hns_roce_device.h index 341731553a60..470615f7b517 100644 --- a/drivers/infiniband/hw/hns/hns_roce_device.h +++ b/drivers/infiniband/hw/hns/hns_roce_device.h @@ -667,7 +667,8 @@ int hns_roce_bitmap_alloc_range(struct hns_roce_bitmap *bitmap, int cnt, void hns_roce_bitmap_free_range(struct hns_roce_bitmap *bitmap, unsigned long obj, int cnt); -struct ib_ah *hns_roce_create_ah(struct ib_pd *pd, struct ib_ah_attr *ah_attr); +struct ib_ah *hns_roce_create_ah(struct ib_pd *pd, struct ib_ah_attr *ah_attr, + struct ib_udata *udata); int hns_roce_query_ah(struct ib_ah *ibah, struct ib_ah_attr *ah_attr); int hns_roce_destroy_ah(struct ib_ah *ah); diff --git a/drivers/infiniband/hw/i40iw/i40iw_verbs.c b/drivers/infiniband/hw/i40iw/i40iw_verbs.c index 6329c971c22f..f03fc1527d70 100644 --- a/drivers/infiniband/hw/i40iw/i40iw_verbs.c +++ b/drivers/infiniband/hw/i40iw/i40iw_verbs.c @@ -2562,7 +2562,9 @@ static int i40iw_query_pkey(struct ib_device *ibdev, * @ah_attr: address handle attributes */ static struct ib_ah *i40iw_create_ah(struct ib_pd *ibpd, - struct ib_ah_attr *attr) + struct ib_ah_attr *attr, + struct ib_udata *udata) + { return ERR_PTR(-ENOSYS); } diff --git a/drivers/infiniband/hw/mlx4/ah.c b/drivers/infiniband/hw/mlx4/ah.c index 6be7dc320ff7..20c6d17ac8b8 100644 --- a/drivers/infiniband/hw/mlx4/ah.c +++ b/drivers/infiniband/hw/mlx4/ah.c @@ -126,7 +126,9 @@ static struct ib_ah *create_iboe_ah(struct ib_pd *pd, struct ib_ah_attr *ah_attr return &ah->ibah; } -struct ib_ah *mlx4_ib_create_ah(struct ib_pd *pd, struct ib_ah_attr *ah_attr) +struct ib_ah *mlx4_ib_create_ah(struct ib_pd *pd, struct ib_ah_attr *ah_attr, + struct ib_udata *udata) + { struct mlx4_ib_ah *ah; struct ib_ah *ret; diff --git a/drivers/infiniband/hw/mlx4/mlx4_ib.h b/drivers/infiniband/hw/mlx4/mlx4_ib.h index 35141f451e5c..7f3d976d81ed 100644 --- a/drivers/infiniband/hw/mlx4/mlx4_ib.h +++ b/drivers/infiniband/hw/mlx4/mlx4_ib.h @@ -742,7 +742,8 @@ int mlx4_ib_arm_cq(struct ib_cq *cq, enum ib_cq_notify_flags flags); void __mlx4_ib_cq_clean(struct mlx4_ib_cq *cq, u32 qpn, struct mlx4_ib_srq *srq); void mlx4_ib_cq_clean(struct mlx4_ib_cq *cq, u32 qpn, struct mlx4_ib_srq *srq); -struct ib_ah *mlx4_ib_create_ah(struct ib_pd *pd, struct ib_ah_attr *ah_attr); +struct ib_ah *mlx4_ib_create_ah(struct ib_pd *pd, struct ib_ah_attr *ah_attr, + struct ib_udata *udata); int mlx4_ib_query_ah(struct ib_ah *ibah, struct ib_ah_attr *ah_attr); int mlx4_ib_destroy_ah(struct ib_ah *ah); diff --git a/drivers/infiniband/hw/mlx5/ah.c b/drivers/infiniband/hw/mlx5/ah.c index 745efa4cfc71..ecac9ea2c85f 100644 --- a/drivers/infiniband/hw/mlx5/ah.c +++ b/drivers/infiniband/hw/mlx5/ah.c @@ -64,7 +64,9 @@ static struct ib_ah *create_ib_ah(struct mlx5_ib_dev *dev, return &ah->ibah; } -struct ib_ah *mlx5_ib_create_ah(struct ib_pd *pd, struct ib_ah_attr *ah_attr) +struct ib_ah *mlx5_ib_create_ah(struct ib_pd *pd, struct ib_ah_attr *ah_attr, + struct ib_udata *udata) + { struct mlx5_ib_ah *ah; struct mlx5_ib_dev *dev = to_mdev(pd->device); diff --git a/drivers/infiniband/hw/mlx5/mlx5_ib.h b/drivers/infiniband/hw/mlx5/mlx5_ib.h index 95937e7d2584..ff05afa864ee 100644 --- a/drivers/infiniband/hw/mlx5/mlx5_ib.h +++ b/drivers/infiniband/hw/mlx5/mlx5_ib.h @@ -739,7 +739,8 @@ void mlx5_ib_free_srq_wqe(struct mlx5_ib_srq *srq, int wqe_index); int mlx5_MAD_IFC(struct mlx5_ib_dev *dev, int ignore_mkey, int ignore_bkey, u8 port, const struct ib_wc *in_wc, const struct ib_grh *in_grh, const void *in_mad, void *response_mad); -struct ib_ah *mlx5_ib_create_ah(struct ib_pd *pd, struct ib_ah_attr *ah_attr); +struct ib_ah *mlx5_ib_create_ah(struct ib_pd *pd, struct ib_ah_attr *ah_attr, + struct ib_udata *udata); int mlx5_ib_query_ah(struct ib_ah *ibah, struct ib_ah_attr *ah_attr); int mlx5_ib_destroy_ah(struct ib_ah *ah); struct ib_srq *mlx5_ib_create_srq(struct ib_pd *pd, diff --git a/drivers/infiniband/hw/mthca/mthca_provider.c b/drivers/infiniband/hw/mthca/mthca_provider.c index 358930a41e36..d31708742ba5 100644 --- a/drivers/infiniband/hw/mthca/mthca_provider.c +++ b/drivers/infiniband/hw/mthca/mthca_provider.c @@ -410,7 +410,9 @@ static int mthca_dealloc_pd(struct ib_pd *pd) } static struct ib_ah *mthca_ah_create(struct ib_pd *pd, - struct ib_ah_attr *ah_attr) + struct ib_ah_attr *ah_attr, + struct ib_udata *udata) + { int err; struct mthca_ah *ah; diff --git a/drivers/infiniband/hw/nes/nes_verbs.c b/drivers/infiniband/hw/nes/nes_verbs.c index bd69125731c1..0bb857c7e439 100644 --- a/drivers/infiniband/hw/nes/nes_verbs.c +++ b/drivers/infiniband/hw/nes/nes_verbs.c @@ -771,7 +771,8 @@ static int nes_dealloc_pd(struct ib_pd *ibpd) /** * nes_create_ah */ -static struct ib_ah *nes_create_ah(struct ib_pd *pd, struct ib_ah_attr *ah_attr) +static struct ib_ah *nes_create_ah(struct ib_pd *pd, struct ib_ah_attr *ah_attr, + struct ib_udata *udata) { return ERR_PTR(-ENOSYS); } diff --git a/drivers/infiniband/hw/ocrdma/ocrdma_ah.c b/drivers/infiniband/hw/ocrdma/ocrdma_ah.c index 797362a297b2..14d33b0f3950 100644 --- a/drivers/infiniband/hw/ocrdma/ocrdma_ah.c +++ b/drivers/infiniband/hw/ocrdma/ocrdma_ah.c @@ -154,7 +154,8 @@ static inline int set_av_attr(struct ocrdma_dev *dev, struct ocrdma_ah *ah, return status; } -struct ib_ah *ocrdma_create_ah(struct ib_pd *ibpd, struct ib_ah_attr *attr) +struct ib_ah *ocrdma_create_ah(struct ib_pd *ibpd, struct ib_ah_attr *attr, + struct ib_udata *udata) { u32 *ahid_addr; int status; diff --git a/drivers/infiniband/hw/ocrdma/ocrdma_ah.h b/drivers/infiniband/hw/ocrdma/ocrdma_ah.h index 3856dd4c7e3d..0704a24b17c8 100644 --- a/drivers/infiniband/hw/ocrdma/ocrdma_ah.h +++ b/drivers/infiniband/hw/ocrdma/ocrdma_ah.h @@ -50,7 +50,9 @@ enum { OCRDMA_AH_L3_TYPE_MASK = 0x03, OCRDMA_AH_L3_TYPE_SHIFT = 0x1D /* 29 bits */ }; -struct ib_ah *ocrdma_create_ah(struct ib_pd *, struct ib_ah_attr *); + +struct ib_ah *ocrdma_create_ah(struct ib_pd *, struct ib_ah_attr *, + struct ib_udata *); int ocrdma_destroy_ah(struct ib_ah *); int ocrdma_query_ah(struct ib_ah *, struct ib_ah_attr *); int ocrdma_modify_ah(struct ib_ah *, struct ib_ah_attr *); diff --git a/drivers/infiniband/hw/qedr/verbs.c b/drivers/infiniband/hw/qedr/verbs.c index a61514296767..ccff6c6e3f33 100644 --- a/drivers/infiniband/hw/qedr/verbs.c +++ b/drivers/infiniband/hw/qedr/verbs.c @@ -2094,7 +2094,8 @@ int qedr_destroy_qp(struct ib_qp *ibqp) return rc; } -struct ib_ah *qedr_create_ah(struct ib_pd *ibpd, struct ib_ah_attr *attr) +struct ib_ah *qedr_create_ah(struct ib_pd *ibpd, struct ib_ah_attr *attr, + struct ib_udata *udata) { struct qedr_ah *ah; diff --git a/drivers/infiniband/hw/qedr/verbs.h b/drivers/infiniband/hw/qedr/verbs.h index a9b5e67bb81e..070677ca4d19 100644 --- a/drivers/infiniband/hw/qedr/verbs.h +++ b/drivers/infiniband/hw/qedr/verbs.h @@ -70,7 +70,8 @@ int qedr_query_qp(struct ib_qp *, struct ib_qp_attr *qp_attr, int qp_attr_mask, struct ib_qp_init_attr *); int qedr_destroy_qp(struct ib_qp *ibqp); -struct ib_ah *qedr_create_ah(struct ib_pd *ibpd, struct ib_ah_attr *attr); +struct ib_ah *qedr_create_ah(struct ib_pd *ibpd, struct ib_ah_attr *attr, + struct ib_udata *udata); int qedr_destroy_ah(struct ib_ah *ibah); int qedr_dereg_mr(struct ib_mr *); diff --git a/drivers/infiniband/hw/usnic/usnic_ib_verbs.c b/drivers/infiniband/hw/usnic/usnic_ib_verbs.c index a5bfbba6bbac..fd2a50eb4c91 100644 --- a/drivers/infiniband/hw/usnic/usnic_ib_verbs.c +++ b/drivers/infiniband/hw/usnic/usnic_ib_verbs.c @@ -738,7 +738,9 @@ int usnic_ib_mmap(struct ib_ucontext *context, /* In ib callbacks section - Start of stub funcs */ struct ib_ah *usnic_ib_create_ah(struct ib_pd *pd, - struct ib_ah_attr *ah_attr) + struct ib_ah_attr *ah_attr, + struct ib_udata *udata) + { usnic_dbg("\n"); return ERR_PTR(-EPERM); diff --git a/drivers/infiniband/hw/usnic/usnic_ib_verbs.h b/drivers/infiniband/hw/usnic/usnic_ib_verbs.h index 0d9d2e6a14d5..0ed8e072329e 100644 --- a/drivers/infiniband/hw/usnic/usnic_ib_verbs.h +++ b/drivers/infiniband/hw/usnic/usnic_ib_verbs.h @@ -75,7 +75,9 @@ int usnic_ib_dealloc_ucontext(struct ib_ucontext *ibcontext); int usnic_ib_mmap(struct ib_ucontext *context, struct vm_area_struct *vma); struct ib_ah *usnic_ib_create_ah(struct ib_pd *pd, - struct ib_ah_attr *ah_attr); + struct ib_ah_attr *ah_attr, + struct ib_udata *udata); + int usnic_ib_destroy_ah(struct ib_ah *ah); int usnic_ib_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr, struct ib_send_wr **bad_wr); diff --git a/drivers/infiniband/sw/rxe/rxe_verbs.c b/drivers/infiniband/sw/rxe/rxe_verbs.c index 19841c863daf..187d85ccfe58 100644 --- a/drivers/infiniband/sw/rxe/rxe_verbs.c +++ b/drivers/infiniband/sw/rxe/rxe_verbs.c @@ -316,7 +316,9 @@ static int rxe_init_av(struct rxe_dev *rxe, struct ib_ah_attr *attr, return err; } -static struct ib_ah *rxe_create_ah(struct ib_pd *ibpd, struct ib_ah_attr *attr) +static struct ib_ah *rxe_create_ah(struct ib_pd *ibpd, struct ib_ah_attr *attr, + struct ib_udata *udata) + { int err; struct rxe_dev *rxe = to_rdev(ibpd->device); diff --git a/include/rdma/ib_verbs.h b/include/rdma/ib_verbs.h index 0c6f973e407c..73417a22ee4d 100644 --- a/include/rdma/ib_verbs.h +++ b/include/rdma/ib_verbs.h @@ -1951,7 +1951,8 @@ struct ib_device { struct ib_udata *udata); int (*dealloc_pd)(struct ib_pd *pd); struct ib_ah * (*create_ah)(struct ib_pd *pd, - struct ib_ah_attr *ah_attr); + struct ib_ah_attr *ah_attr, + struct ib_udata *udata); int (*modify_ah)(struct ib_ah *ah, struct ib_ah_attr *ah_attr); int (*query_ah)(struct ib_ah *ah, -- cgit v1.2.3 From 5097e71f3edafad3e7d8d4f9c4a137d9aad0fae2 Mon Sep 17 00:00:00 2001 From: Moni Shoua Date: Wed, 23 Nov 2016 08:23:25 +0200 Subject: IB/mlx5: Use kernel driver to help userspace create ah Resolving a MAC address for a given IP address in userspace is inefficient. This patch lets mlx5 user driver using the kernel driver to resolve the mac and get the answer in the private section of the response. Signed-off-by: Moni Shoua Reviewed-by: Yishai Hadas Signed-off-by: Leon Romanovsky Signed-off-by: Doug Ledford --- drivers/infiniband/hw/mlx5/ah.c | 21 +++++++++++++++++++++ include/uapi/rdma/mlx5-abi.h | 6 ++++++ 2 files changed, 27 insertions(+) (limited to 'include') diff --git a/drivers/infiniband/hw/mlx5/ah.c b/drivers/infiniband/hw/mlx5/ah.c index ecac9ea2c85f..d090e96f6f01 100644 --- a/drivers/infiniband/hw/mlx5/ah.c +++ b/drivers/infiniband/hw/mlx5/ah.c @@ -77,6 +77,27 @@ struct ib_ah *mlx5_ib_create_ah(struct ib_pd *pd, struct ib_ah_attr *ah_attr, if (ll == IB_LINK_LAYER_ETHERNET && !(ah_attr->ah_flags & IB_AH_GRH)) return ERR_PTR(-EINVAL); + if (ll == IB_LINK_LAYER_ETHERNET && udata) { + int err; + struct mlx5_ib_create_ah_resp resp = {}; + u32 min_resp_len = offsetof(typeof(resp), dmac) + + sizeof(resp.dmac); + + if (udata->outlen < min_resp_len) + return ERR_PTR(-EINVAL); + + resp.response_length = min_resp_len; + + err = ib_resolve_eth_dmac(pd->device, ah_attr); + if (err) + return ERR_PTR(err); + + memcpy(resp.dmac, ah_attr->dmac, ETH_ALEN); + err = ib_copy_to_udata(udata, &resp, resp.response_length); + if (err) + return ERR_PTR(err); + } + ah = kzalloc(sizeof(*ah), GFP_ATOMIC); if (!ah) return ERR_PTR(-ENOMEM); diff --git a/include/uapi/rdma/mlx5-abi.h b/include/uapi/rdma/mlx5-abi.h index ac28729a1245..3ebf3db24c34 100644 --- a/include/uapi/rdma/mlx5-abi.h +++ b/include/uapi/rdma/mlx5-abi.h @@ -249,6 +249,12 @@ struct mlx5_ib_create_wq { __u32 reserved; }; +struct mlx5_ib_create_ah_resp { + __u32 response_length; + __u8 dmac[ETH_ALEN]; + __u8 reserved[6]; +}; + struct mlx5_ib_create_wq_resp { __u32 response_length; __u32 reserved; -- cgit v1.2.3 From d949167d68b304c0a00331cf33ef49a29b65d85f Mon Sep 17 00:00:00 2001 From: Bodong Wang Date: Thu, 1 Dec 2016 13:43:13 +0200 Subject: IB/mlx5: Report mlx5 packet pacing capabilities when querying device Enable mlx5 based hardware to report packet pacing capabilities from kernel to user space. Packet pacing allows to limit the rate to any number between the maximum and minimum, based on user settings. The capabilities are exposed to user space through query_device by uhw. The following capabilities are reported: 1. The maximum and minimum rate limit in kbps supported by packet pacing. 2. Bitmap showing which QP types are supported by packet pacing operation. Signed-off-by: Bodong Wang Reviewed-by: Matan Barak Signed-off-by: Leon Romanovsky Signed-off-by: Doug Ledford --- drivers/infiniband/hw/mlx5/main.c | 13 +++++++++++++ include/uapi/rdma/mlx5-abi.h | 13 +++++++++++++ 2 files changed, 26 insertions(+) (limited to 'include') diff --git a/drivers/infiniband/hw/mlx5/main.c b/drivers/infiniband/hw/mlx5/main.c index 8b013f8b832a..6c194000903d 100644 --- a/drivers/infiniband/hw/mlx5/main.c +++ b/drivers/infiniband/hw/mlx5/main.c @@ -693,6 +693,19 @@ static int mlx5_ib_query_device(struct ib_device *ibdev, resp.response_length += sizeof(resp.cqe_comp_caps); } + if (field_avail(typeof(resp), packet_pacing_caps, uhw->outlen)) { + if (MLX5_CAP_QOS(mdev, packet_pacing) && + MLX5_CAP_GEN(mdev, qos)) { + resp.packet_pacing_caps.qp_rate_limit_max = + MLX5_CAP_QOS(mdev, packet_pacing_max_rate); + resp.packet_pacing_caps.qp_rate_limit_min = + MLX5_CAP_QOS(mdev, packet_pacing_min_rate); + resp.packet_pacing_caps.supported_qpts |= + 1 << IB_QPT_RAW_PACKET; + } + resp.response_length += sizeof(resp.packet_pacing_caps); + } + if (uhw->outlen) { err = ib_copy_to_udata(uhw, &resp, resp.response_length); diff --git a/include/uapi/rdma/mlx5-abi.h b/include/uapi/rdma/mlx5-abi.h index 3ebf3db24c34..fae6cdaeb56d 100644 --- a/include/uapi/rdma/mlx5-abi.h +++ b/include/uapi/rdma/mlx5-abi.h @@ -136,12 +136,25 @@ struct mlx5_ib_cqe_comp_caps { __u32 supported_format; /* enum mlx5_ib_cqe_comp_res_format */ }; +struct mlx5_packet_pacing_caps { + __u32 qp_rate_limit_min; + __u32 qp_rate_limit_max; /* In kpbs */ + + /* Corresponding bit will be set if qp type from + * 'enum ib_qp_type' is supported, e.g. + * supported_qpts |= 1 << IB_QPT_RAW_PACKET + */ + __u32 supported_qpts; + __u32 reserved; +}; + struct mlx5_ib_query_device_resp { __u32 comp_mask; __u32 response_length; struct mlx5_ib_tso_caps tso_caps; struct mlx5_ib_rss_caps rss_caps; struct mlx5_ib_cqe_comp_caps cqe_comp_caps; + struct mlx5_packet_pacing_caps packet_pacing_caps; __u32 mlx5_ib_support_multi_pkt_send_wqes; __u32 reserved; }; -- cgit v1.2.3 From 528e5a1bd3f0e9b760cb3a1062fce7513712a15d Mon Sep 17 00:00:00 2001 From: Bodong Wang Date: Thu, 1 Dec 2016 13:43:14 +0200 Subject: IB/core: Support rate limit for packet pacing Add new member rate_limit to ib_qp_attr which holds the packet pacing rate in kbps, 0 means unlimited. IB_QP_RATE_LIMIT is added to ib_attr_mask and could be used by RAW QPs when changing QP state from RTR to RTS, RTS to RTS. Signed-off-by: Bodong Wang Reviewed-by: Matan Barak Signed-off-by: Leon Romanovsky Signed-off-by: Doug Ledford --- drivers/infiniband/core/verbs.c | 2 ++ include/rdma/ib_verbs.h | 2 ++ 2 files changed, 4 insertions(+) (limited to 'include') diff --git a/drivers/infiniband/core/verbs.c b/drivers/infiniband/core/verbs.c index c976f29f7ad2..71580cc28c9e 100644 --- a/drivers/infiniband/core/verbs.c +++ b/drivers/infiniband/core/verbs.c @@ -1016,6 +1016,7 @@ static const struct { IB_QP_QKEY), [IB_QPT_GSI] = (IB_QP_CUR_STATE | IB_QP_QKEY), + [IB_QPT_RAW_PACKET] = IB_QP_RATE_LIMIT, } } }, @@ -1049,6 +1050,7 @@ static const struct { IB_QP_QKEY), [IB_QPT_GSI] = (IB_QP_CUR_STATE | IB_QP_QKEY), + [IB_QPT_RAW_PACKET] = IB_QP_RATE_LIMIT, } }, [IB_QPS_SQD] = { diff --git a/include/rdma/ib_verbs.h b/include/rdma/ib_verbs.h index 73417a22ee4d..8029d2a51f14 100644 --- a/include/rdma/ib_verbs.h +++ b/include/rdma/ib_verbs.h @@ -1102,6 +1102,7 @@ enum ib_qp_attr_mask { IB_QP_RESERVED2 = (1<<22), IB_QP_RESERVED3 = (1<<23), IB_QP_RESERVED4 = (1<<24), + IB_QP_RATE_LIMIT = (1<<25), }; enum ib_qp_state { @@ -1151,6 +1152,7 @@ struct ib_qp_attr { u8 rnr_retry; u8 alt_port_num; u8 alt_timeout; + u32 rate_limit; }; enum ib_wr_opcode { -- cgit v1.2.3 From 189aba99e70030cfb56bd8f199bc5b077a1bc6ff Mon Sep 17 00:00:00 2001 From: Bodong Wang Date: Thu, 1 Dec 2016 13:43:15 +0200 Subject: IB/uverbs: Extend modify_qp and support packet pacing An new uverbs command ib_uverbs_ex_modify_qp is added to support more QP attributes. User driver should choose to call the legacy/extended API based on input mask. IB_USER_LAST_QP_ATTR_MASK is added to indicated the maximum bit position which supports legacy ib_uverbs_modify_qp. IB_USER_LEGACY_LAST_QP_ATTR_MASK indicates the maximum bit position which supports ib_uverbs_ex_modify_qp, the value of this mask should be updated if new mask is added later. Along with this change, rate_limit is supported by the extended command, user driver could use it to control packet packing. Signed-off-by: Bodong Wang Reviewed-by: Matan Barak Signed-off-by: Leon Romanovsky Signed-off-by: Doug Ledford --- drivers/infiniband/core/uverbs.h | 1 + drivers/infiniband/core/uverbs_cmd.c | 192 ++++++++++++++++++++++------------ drivers/infiniband/core/uverbs_main.c | 1 + include/uapi/rdma/ib_user_verbs.h | 21 ++++ 4 files changed, 146 insertions(+), 69 deletions(-) (limited to 'include') diff --git a/drivers/infiniband/core/uverbs.h b/drivers/infiniband/core/uverbs.h index df26a741cda6..455034ac994e 100644 --- a/drivers/infiniband/core/uverbs.h +++ b/drivers/infiniband/core/uverbs.h @@ -289,5 +289,6 @@ IB_UVERBS_DECLARE_EX_CMD(modify_wq); IB_UVERBS_DECLARE_EX_CMD(destroy_wq); IB_UVERBS_DECLARE_EX_CMD(create_rwq_ind_table); IB_UVERBS_DECLARE_EX_CMD(destroy_rwq_ind_table); +IB_UVERBS_DECLARE_EX_CMD(modify_qp); #endif /* UVERBS_H */ diff --git a/drivers/infiniband/core/uverbs_cmd.c b/drivers/infiniband/core/uverbs_cmd.c index cdb935ddab69..09b649159e6c 100644 --- a/drivers/infiniband/core/uverbs_cmd.c +++ b/drivers/infiniband/core/uverbs_cmd.c @@ -2328,96 +2328,88 @@ static int modify_qp_mask(enum ib_qp_type qp_type, int mask) } } -ssize_t ib_uverbs_modify_qp(struct ib_uverbs_file *file, - struct ib_device *ib_dev, - const char __user *buf, int in_len, - int out_len) +static int modify_qp(struct ib_uverbs_file *file, + struct ib_uverbs_ex_modify_qp *cmd, struct ib_udata *udata) { - struct ib_uverbs_modify_qp cmd; - struct ib_udata udata; - struct ib_qp *qp; - struct ib_qp_attr *attr; - int ret; - - if (copy_from_user(&cmd, buf, sizeof cmd)) - return -EFAULT; - - INIT_UDATA(&udata, buf + sizeof cmd, NULL, in_len - sizeof cmd, - out_len); + struct ib_qp_attr *attr; + struct ib_qp *qp; + int ret; attr = kmalloc(sizeof *attr, GFP_KERNEL); if (!attr) return -ENOMEM; - qp = idr_read_qp(cmd.qp_handle, file->ucontext); + qp = idr_read_qp(cmd->base.qp_handle, file->ucontext); if (!qp) { ret = -EINVAL; goto out; } - attr->qp_state = cmd.qp_state; - attr->cur_qp_state = cmd.cur_qp_state; - attr->path_mtu = cmd.path_mtu; - attr->path_mig_state = cmd.path_mig_state; - attr->qkey = cmd.qkey; - attr->rq_psn = cmd.rq_psn; - attr->sq_psn = cmd.sq_psn; - attr->dest_qp_num = cmd.dest_qp_num; - attr->qp_access_flags = cmd.qp_access_flags; - attr->pkey_index = cmd.pkey_index; - attr->alt_pkey_index = cmd.alt_pkey_index; - attr->en_sqd_async_notify = cmd.en_sqd_async_notify; - attr->max_rd_atomic = cmd.max_rd_atomic; - attr->max_dest_rd_atomic = cmd.max_dest_rd_atomic; - attr->min_rnr_timer = cmd.min_rnr_timer; - attr->port_num = cmd.port_num; - attr->timeout = cmd.timeout; - attr->retry_cnt = cmd.retry_cnt; - attr->rnr_retry = cmd.rnr_retry; - attr->alt_port_num = cmd.alt_port_num; - attr->alt_timeout = cmd.alt_timeout; - - memcpy(attr->ah_attr.grh.dgid.raw, cmd.dest.dgid, 16); - attr->ah_attr.grh.flow_label = cmd.dest.flow_label; - attr->ah_attr.grh.sgid_index = cmd.dest.sgid_index; - attr->ah_attr.grh.hop_limit = cmd.dest.hop_limit; - attr->ah_attr.grh.traffic_class = cmd.dest.traffic_class; - attr->ah_attr.dlid = cmd.dest.dlid; - attr->ah_attr.sl = cmd.dest.sl; - attr->ah_attr.src_path_bits = cmd.dest.src_path_bits; - attr->ah_attr.static_rate = cmd.dest.static_rate; - attr->ah_attr.ah_flags = cmd.dest.is_global ? IB_AH_GRH : 0; - attr->ah_attr.port_num = cmd.dest.port_num; - - memcpy(attr->alt_ah_attr.grh.dgid.raw, cmd.alt_dest.dgid, 16); - attr->alt_ah_attr.grh.flow_label = cmd.alt_dest.flow_label; - attr->alt_ah_attr.grh.sgid_index = cmd.alt_dest.sgid_index; - attr->alt_ah_attr.grh.hop_limit = cmd.alt_dest.hop_limit; - attr->alt_ah_attr.grh.traffic_class = cmd.alt_dest.traffic_class; - attr->alt_ah_attr.dlid = cmd.alt_dest.dlid; - attr->alt_ah_attr.sl = cmd.alt_dest.sl; - attr->alt_ah_attr.src_path_bits = cmd.alt_dest.src_path_bits; - attr->alt_ah_attr.static_rate = cmd.alt_dest.static_rate; - attr->alt_ah_attr.ah_flags = cmd.alt_dest.is_global ? IB_AH_GRH : 0; - attr->alt_ah_attr.port_num = cmd.alt_dest.port_num; + attr->qp_state = cmd->base.qp_state; + attr->cur_qp_state = cmd->base.cur_qp_state; + attr->path_mtu = cmd->base.path_mtu; + attr->path_mig_state = cmd->base.path_mig_state; + attr->qkey = cmd->base.qkey; + attr->rq_psn = cmd->base.rq_psn; + attr->sq_psn = cmd->base.sq_psn; + attr->dest_qp_num = cmd->base.dest_qp_num; + attr->qp_access_flags = cmd->base.qp_access_flags; + attr->pkey_index = cmd->base.pkey_index; + attr->alt_pkey_index = cmd->base.alt_pkey_index; + attr->en_sqd_async_notify = cmd->base.en_sqd_async_notify; + attr->max_rd_atomic = cmd->base.max_rd_atomic; + attr->max_dest_rd_atomic = cmd->base.max_dest_rd_atomic; + attr->min_rnr_timer = cmd->base.min_rnr_timer; + attr->port_num = cmd->base.port_num; + attr->timeout = cmd->base.timeout; + attr->retry_cnt = cmd->base.retry_cnt; + attr->rnr_retry = cmd->base.rnr_retry; + attr->alt_port_num = cmd->base.alt_port_num; + attr->alt_timeout = cmd->base.alt_timeout; + attr->rate_limit = cmd->rate_limit; + + memcpy(attr->ah_attr.grh.dgid.raw, cmd->base.dest.dgid, 16); + attr->ah_attr.grh.flow_label = cmd->base.dest.flow_label; + attr->ah_attr.grh.sgid_index = cmd->base.dest.sgid_index; + attr->ah_attr.grh.hop_limit = cmd->base.dest.hop_limit; + attr->ah_attr.grh.traffic_class = cmd->base.dest.traffic_class; + attr->ah_attr.dlid = cmd->base.dest.dlid; + attr->ah_attr.sl = cmd->base.dest.sl; + attr->ah_attr.src_path_bits = cmd->base.dest.src_path_bits; + attr->ah_attr.static_rate = cmd->base.dest.static_rate; + attr->ah_attr.ah_flags = cmd->base.dest.is_global ? + IB_AH_GRH : 0; + attr->ah_attr.port_num = cmd->base.dest.port_num; + + memcpy(attr->alt_ah_attr.grh.dgid.raw, cmd->base.alt_dest.dgid, 16); + attr->alt_ah_attr.grh.flow_label = cmd->base.alt_dest.flow_label; + attr->alt_ah_attr.grh.sgid_index = cmd->base.alt_dest.sgid_index; + attr->alt_ah_attr.grh.hop_limit = cmd->base.alt_dest.hop_limit; + attr->alt_ah_attr.grh.traffic_class = cmd->base.alt_dest.traffic_class; + attr->alt_ah_attr.dlid = cmd->base.alt_dest.dlid; + attr->alt_ah_attr.sl = cmd->base.alt_dest.sl; + attr->alt_ah_attr.src_path_bits = cmd->base.alt_dest.src_path_bits; + attr->alt_ah_attr.static_rate = cmd->base.alt_dest.static_rate; + attr->alt_ah_attr.ah_flags = cmd->base.alt_dest.is_global ? + IB_AH_GRH : 0; + attr->alt_ah_attr.port_num = cmd->base.alt_dest.port_num; if (qp->real_qp == qp) { - if (cmd.attr_mask & IB_QP_AV) { + if (cmd->base.attr_mask & IB_QP_AV) { ret = ib_resolve_eth_dmac(qp->device, &attr->ah_attr); if (ret) goto release_qp; } ret = qp->device->modify_qp(qp, attr, - modify_qp_mask(qp->qp_type, cmd.attr_mask), &udata); + modify_qp_mask(qp->qp_type, + cmd->base.attr_mask), + udata); } else { - ret = ib_modify_qp(qp, attr, modify_qp_mask(qp->qp_type, cmd.attr_mask)); + ret = ib_modify_qp(qp, attr, + modify_qp_mask(qp->qp_type, + cmd->base.attr_mask)); } - if (ret) - goto release_qp; - - ret = in_len; - release_qp: put_qp_read(qp); @@ -2427,6 +2419,68 @@ out: return ret; } +ssize_t ib_uverbs_modify_qp(struct ib_uverbs_file *file, + struct ib_device *ib_dev, + const char __user *buf, int in_len, + int out_len) +{ + struct ib_uverbs_ex_modify_qp cmd = {}; + struct ib_udata udata; + int ret; + + if (copy_from_user(&cmd.base, buf, sizeof(cmd.base))) + return -EFAULT; + + if (cmd.base.attr_mask & + ~((IB_USER_LEGACY_LAST_QP_ATTR_MASK << 1) - 1)) + return -EOPNOTSUPP; + + INIT_UDATA(&udata, buf + sizeof(cmd.base), NULL, + in_len - sizeof(cmd.base), out_len); + + ret = modify_qp(file, &cmd, &udata); + if (ret) + return ret; + + return in_len; +} + +int ib_uverbs_ex_modify_qp(struct ib_uverbs_file *file, + struct ib_device *ib_dev, + struct ib_udata *ucore, + struct ib_udata *uhw) +{ + struct ib_uverbs_ex_modify_qp cmd = {}; + int ret; + + /* + * Last bit is reserved for extending the attr_mask by + * using another field. + */ + BUILD_BUG_ON(IB_USER_LAST_QP_ATTR_MASK == (1 << 31)); + + if (ucore->inlen < sizeof(cmd.base)) + return -EINVAL; + + ret = ib_copy_from_udata(&cmd, ucore, min(sizeof(cmd), ucore->inlen)); + if (ret) + return ret; + + if (cmd.base.attr_mask & + ~((IB_USER_LAST_QP_ATTR_MASK << 1) - 1)) + return -EOPNOTSUPP; + + if (ucore->inlen > sizeof(cmd)) { + if (ib_is_udata_cleared(ucore, sizeof(cmd), + ucore->inlen - sizeof(cmd))) + return -EOPNOTSUPP; + } + + ret = modify_qp(file, &cmd, uhw); + + return ret; +} + ssize_t ib_uverbs_destroy_qp(struct ib_uverbs_file *file, struct ib_device *ib_dev, const char __user *buf, int in_len, diff --git a/drivers/infiniband/core/uverbs_main.c b/drivers/infiniband/core/uverbs_main.c index 0012fa58c105..80839c31c887 100644 --- a/drivers/infiniband/core/uverbs_main.c +++ b/drivers/infiniband/core/uverbs_main.c @@ -137,6 +137,7 @@ static int (*uverbs_ex_cmd_table[])(struct ib_uverbs_file *file, [IB_USER_VERBS_EX_CMD_DESTROY_WQ] = ib_uverbs_ex_destroy_wq, [IB_USER_VERBS_EX_CMD_CREATE_RWQ_IND_TBL] = ib_uverbs_ex_create_rwq_ind_table, [IB_USER_VERBS_EX_CMD_DESTROY_RWQ_IND_TBL] = ib_uverbs_ex_destroy_rwq_ind_table, + [IB_USER_VERBS_EX_CMD_MODIFY_QP] = ib_uverbs_ex_modify_qp, }; static void ib_uverbs_add_one(struct ib_device *device); diff --git a/include/uapi/rdma/ib_user_verbs.h b/include/uapi/rdma/ib_user_verbs.h index 90ba5e88ec00..dfdfe4e92d31 100644 --- a/include/uapi/rdma/ib_user_verbs.h +++ b/include/uapi/rdma/ib_user_verbs.h @@ -37,6 +37,7 @@ #define IB_USER_VERBS_H #include +#include /* * Increment this value if any changes that break userspace ABI @@ -93,6 +94,7 @@ enum { IB_USER_VERBS_EX_CMD_QUERY_DEVICE = IB_USER_VERBS_CMD_QUERY_DEVICE, IB_USER_VERBS_EX_CMD_CREATE_CQ = IB_USER_VERBS_CMD_CREATE_CQ, IB_USER_VERBS_EX_CMD_CREATE_QP = IB_USER_VERBS_CMD_CREATE_QP, + IB_USER_VERBS_EX_CMD_MODIFY_QP = IB_USER_VERBS_CMD_MODIFY_QP, IB_USER_VERBS_EX_CMD_CREATE_FLOW = IB_USER_VERBS_CMD_THRESHOLD, IB_USER_VERBS_EX_CMD_DESTROY_FLOW, IB_USER_VERBS_EX_CMD_CREATE_WQ, @@ -545,6 +547,14 @@ enum { IB_UVERBS_CREATE_QP_SUP_COMP_MASK = IB_UVERBS_CREATE_QP_MASK_IND_TABLE, }; +enum { + IB_USER_LEGACY_LAST_QP_ATTR_MASK = IB_QP_DEST_QPN +}; + +enum { + IB_USER_LAST_QP_ATTR_MASK = IB_QP_RATE_LIMIT +}; + struct ib_uverbs_ex_create_qp { __u64 user_handle; __u32 pd_handle; @@ -684,9 +694,20 @@ struct ib_uverbs_modify_qp { __u64 driver_data[0]; }; +struct ib_uverbs_ex_modify_qp { + struct ib_uverbs_modify_qp base; + __u32 rate_limit; + __u32 reserved; +}; + struct ib_uverbs_modify_qp_resp { }; +struct ib_uverbs_ex_modify_qp_resp { + __u32 comp_mask; + __u32 response_length; +}; + struct ib_uverbs_destroy_qp { __u64 response; __u32 qp_handle; -- cgit v1.2.3 From 9fa240bbfc4200b080c8fad12579659c2c2f36b5 Mon Sep 17 00:00:00 2001 From: Hal Rosenstock Date: Tue, 18 Oct 2016 13:20:29 -0400 Subject: IB/mad: Eliminate redundant SM class version defines for OPA and rename class version define to indicate SM rather than SMP or SMI Signed-off-by: Hal Rosenstock Reviewed-by: Ira Weiny Signed-off-by: Doug Ledford --- drivers/infiniband/core/mad.c | 4 ++-- drivers/infiniband/hw/hfi1/mad.c | 12 ++++++------ include/rdma/ib_mad.h | 2 +- include/rdma/opa_smi.h | 2 -- 4 files changed, 9 insertions(+), 11 deletions(-) (limited to 'include') diff --git a/drivers/infiniband/core/mad.c b/drivers/infiniband/core/mad.c index 40cbd6bdb73b..bc2a9c2caa3c 100644 --- a/drivers/infiniband/core/mad.c +++ b/drivers/infiniband/core/mad.c @@ -769,7 +769,7 @@ static int handle_outgoing_dr_smp(struct ib_mad_agent_private *mad_agent_priv, * If we are at the start of the LID routed part, don't update the * hop_ptr or hop_cnt. See section 14.2.2, Vol 1 IB spec. */ - if (opa && smp->class_version == OPA_SMP_CLASS_VERSION) { + if (opa && smp->class_version == OPA_SM_CLASS_VERSION) { u32 opa_drslid; if ((opa_get_smp_direction(opa_smp) @@ -2167,7 +2167,7 @@ handle_smi(struct ib_mad_port_private *port_priv, struct ib_mad_hdr *mad_hdr = (struct ib_mad_hdr *)recv->mad; if (opa && mad_hdr->base_version == OPA_MGMT_BASE_VERSION && - mad_hdr->class_version == OPA_SMI_CLASS_VERSION) + mad_hdr->class_version == OPA_SM_CLASS_VERSION) return handle_opa_smi(port_priv, qp_info, wc, port_num, recv, response); diff --git a/drivers/infiniband/hw/hfi1/mad.c b/drivers/infiniband/hw/hfi1/mad.c index 9487c9bb8920..320e4daa5fc6 100644 --- a/drivers/infiniband/hw/hfi1/mad.c +++ b/drivers/infiniband/hw/hfi1/mad.c @@ -128,7 +128,7 @@ static void send_trap(struct hfi1_ibport *ibp, void *data, unsigned len) smp = send_buf->mad; smp->base_version = OPA_MGMT_BASE_VERSION; smp->mgmt_class = IB_MGMT_CLASS_SUBN_LID_ROUTED; - smp->class_version = OPA_SMI_CLASS_VERSION; + smp->class_version = OPA_SM_CLASS_VERSION; smp->method = IB_MGMT_METHOD_TRAP; ibp->rvp.tid++; smp->tid = cpu_to_be64(ibp->rvp.tid); @@ -343,7 +343,7 @@ static int __subn_get_opa_nodeinfo(struct opa_smp *smp, u32 am, u8 *data, ni->port_guid = cpu_to_be64(dd->pport[pidx].guid); ni->base_version = OPA_MGMT_BASE_VERSION; - ni->class_version = OPA_SMI_CLASS_VERSION; + ni->class_version = OPA_SM_CLASS_VERSION; ni->node_type = 1; /* channel adapter */ ni->num_ports = ibdev->phys_port_cnt; /* This is already in network order */ @@ -379,7 +379,7 @@ static int subn_get_nodeinfo(struct ib_smp *smp, struct ib_device *ibdev, nip->port_guid = cpu_to_be64(dd->pport[pidx].guid); nip->base_version = OPA_MGMT_BASE_VERSION; - nip->class_version = OPA_SMI_CLASS_VERSION; + nip->class_version = OPA_SM_CLASS_VERSION; nip->node_type = 1; /* channel adapter */ nip->num_ports = ibdev->phys_port_cnt; /* This is already in network order */ @@ -2302,7 +2302,7 @@ static int pma_get_opa_classportinfo(struct opa_pma_mad *pmp, pmp->mad_hdr.status |= IB_SMP_INVALID_FIELD; p->base_version = OPA_MGMT_BASE_VERSION; - p->class_version = OPA_SMI_CLASS_VERSION; + p->class_version = OPA_SM_CLASS_VERSION; /* * Expected response time is 4.096 usec. * 2^18 == 1.073741824 sec. */ @@ -4022,7 +4022,7 @@ static int process_subn_opa(struct ib_device *ibdev, int mad_flags, am = be32_to_cpu(smp->attr_mod); attr_id = smp->attr_id; - if (smp->class_version != OPA_SMI_CLASS_VERSION) { + if (smp->class_version != OPA_SM_CLASS_VERSION) { smp->status |= IB_SMP_UNSUP_VERSION; ret = reply((struct ib_mad_hdr *)smp); return ret; @@ -4232,7 +4232,7 @@ static int process_perf_opa(struct ib_device *ibdev, u8 port, *out_mad = *in_mad; - if (pmp->mad_hdr.class_version != OPA_SMI_CLASS_VERSION) { + if (pmp->mad_hdr.class_version != OPA_SM_CLASS_VERSION) { pmp->mad_hdr.status |= IB_SMP_UNSUP_VERSION; return reply((struct ib_mad_hdr *)pmp); } diff --git a/include/rdma/ib_mad.h b/include/rdma/ib_mad.h index c8a773ffe23b..981214b3790c 100644 --- a/include/rdma/ib_mad.h +++ b/include/rdma/ib_mad.h @@ -46,7 +46,7 @@ #define IB_MGMT_BASE_VERSION 1 #define OPA_MGMT_BASE_VERSION 0x80 -#define OPA_SMP_CLASS_VERSION 0x80 +#define OPA_SM_CLASS_VERSION 0x80 /* Management classes */ #define IB_MGMT_CLASS_SUBN_LID_ROUTED 0x01 diff --git a/include/rdma/opa_smi.h b/include/rdma/opa_smi.h index 4a529ef47995..f7896117936e 100644 --- a/include/rdma/opa_smi.h +++ b/include/rdma/opa_smi.h @@ -44,8 +44,6 @@ #define OPA_MAX_SLS 32 #define OPA_MAX_SCS 32 -#define OPA_SMI_CLASS_VERSION 0x80 - #define OPA_LID_PERMISSIVE cpu_to_be32(0xFFFFFFFF) struct opa_smp { -- cgit v1.2.3 From 77a5db13153906a7e00740b10b2730e53385c5a8 Mon Sep 17 00:00:00 2001 From: Steve Wise Date: Wed, 26 Oct 2016 12:36:40 -0700 Subject: rdma_cm: add rdma_reject_msg() helper function rdma_reject_msg() returns a pointer to a string message associated with the transport reject reason codes. Reviewed-by: Christoph Hellwig Reviewed-by: Sagi Grimberg Signed-off-by: Steve Wise Reviewed-by: Bart Van Assche Signed-off-by: Doug Ledford --- drivers/infiniband/core/cm.c | 48 ++++++++++++++++++++++++++++++++++++++++++ drivers/infiniband/core/cma.c | 14 ++++++++++++ drivers/infiniband/core/iwcm.c | 21 ++++++++++++++++++ include/rdma/ib_cm.h | 6 ++++++ include/rdma/iw_cm.h | 6 ++++++ include/rdma/rdma_cm.h | 8 +++++++ 6 files changed, 103 insertions(+) (limited to 'include') diff --git a/drivers/infiniband/core/cm.c b/drivers/infiniband/core/cm.c index c99525512b34..6c64d0ce64b3 100644 --- a/drivers/infiniband/core/cm.c +++ b/drivers/infiniband/core/cm.c @@ -57,6 +57,54 @@ MODULE_AUTHOR("Sean Hefty"); MODULE_DESCRIPTION("InfiniBand CM"); MODULE_LICENSE("Dual BSD/GPL"); +static const char * const ibcm_rej_reason_strs[] = { + [IB_CM_REJ_NO_QP] = "no QP", + [IB_CM_REJ_NO_EEC] = "no EEC", + [IB_CM_REJ_NO_RESOURCES] = "no resources", + [IB_CM_REJ_TIMEOUT] = "timeout", + [IB_CM_REJ_UNSUPPORTED] = "unsupported", + [IB_CM_REJ_INVALID_COMM_ID] = "invalid comm ID", + [IB_CM_REJ_INVALID_COMM_INSTANCE] = "invalid comm instance", + [IB_CM_REJ_INVALID_SERVICE_ID] = "invalid service ID", + [IB_CM_REJ_INVALID_TRANSPORT_TYPE] = "invalid transport type", + [IB_CM_REJ_STALE_CONN] = "stale conn", + [IB_CM_REJ_RDC_NOT_EXIST] = "RDC not exist", + [IB_CM_REJ_INVALID_GID] = "invalid GID", + [IB_CM_REJ_INVALID_LID] = "invalid LID", + [IB_CM_REJ_INVALID_SL] = "invalid SL", + [IB_CM_REJ_INVALID_TRAFFIC_CLASS] = "invalid traffic class", + [IB_CM_REJ_INVALID_HOP_LIMIT] = "invalid hop limit", + [IB_CM_REJ_INVALID_PACKET_RATE] = "invalid packet rate", + [IB_CM_REJ_INVALID_ALT_GID] = "invalid alt GID", + [IB_CM_REJ_INVALID_ALT_LID] = "invalid alt LID", + [IB_CM_REJ_INVALID_ALT_SL] = "invalid alt SL", + [IB_CM_REJ_INVALID_ALT_TRAFFIC_CLASS] = "invalid alt traffic class", + [IB_CM_REJ_INVALID_ALT_HOP_LIMIT] = "invalid alt hop limit", + [IB_CM_REJ_INVALID_ALT_PACKET_RATE] = "invalid alt packet rate", + [IB_CM_REJ_PORT_CM_REDIRECT] = "port CM redirect", + [IB_CM_REJ_PORT_REDIRECT] = "port redirect", + [IB_CM_REJ_INVALID_MTU] = "invalid MTU", + [IB_CM_REJ_INSUFFICIENT_RESP_RESOURCES] = "insufficient resp resources", + [IB_CM_REJ_CONSUMER_DEFINED] = "consumer defined", + [IB_CM_REJ_INVALID_RNR_RETRY] = "invalid RNR retry", + [IB_CM_REJ_DUPLICATE_LOCAL_COMM_ID] = "duplicate local comm ID", + [IB_CM_REJ_INVALID_CLASS_VERSION] = "invalid class version", + [IB_CM_REJ_INVALID_FLOW_LABEL] = "invalid flow label", + [IB_CM_REJ_INVALID_ALT_FLOW_LABEL] = "invalid alt flow label", +}; + +const char *__attribute_const__ ibcm_reject_msg(int reason) +{ + size_t index = reason; + + if (index < ARRAY_SIZE(ibcm_rej_reason_strs) && + ibcm_rej_reason_strs[index]) + return ibcm_rej_reason_strs[index]; + else + return "unrecognized reason"; +} +EXPORT_SYMBOL(ibcm_reject_msg); + static void cm_add_one(struct ib_device *device); static void cm_remove_one(struct ib_device *device, void *client_data); diff --git a/drivers/infiniband/core/cma.c b/drivers/infiniband/core/cma.c index 36bf50ebb187..877e4dc9e382 100644 --- a/drivers/infiniband/core/cma.c +++ b/drivers/infiniband/core/cma.c @@ -101,6 +101,20 @@ const char *__attribute_const__ rdma_event_msg(enum rdma_cm_event_type event) } EXPORT_SYMBOL(rdma_event_msg); +const char *__attribute_const__ rdma_reject_msg(struct rdma_cm_id *id, + int reason) +{ + if (rdma_ib_or_roce(id->device, id->port_num)) + return ibcm_reject_msg(reason); + + if (rdma_protocol_iwarp(id->device, id->port_num)) + return iwcm_reject_msg(reason); + + WARN_ON_ONCE(1); + return "unrecognized transport"; +} +EXPORT_SYMBOL(rdma_reject_msg); + static void cma_add_one(struct ib_device *device); static void cma_remove_one(struct ib_device *device, void *client_data); diff --git a/drivers/infiniband/core/iwcm.c b/drivers/infiniband/core/iwcm.c index 5495e22839a7..31661b5c1743 100644 --- a/drivers/infiniband/core/iwcm.c +++ b/drivers/infiniband/core/iwcm.c @@ -59,6 +59,27 @@ MODULE_AUTHOR("Tom Tucker"); MODULE_DESCRIPTION("iWARP CM"); MODULE_LICENSE("Dual BSD/GPL"); +static const char * const iwcm_rej_reason_strs[] = { + [ECONNRESET] = "reset by remote host", + [ECONNREFUSED] = "refused by remote application", + [ETIMEDOUT] = "setup timeout", +}; + +const char *__attribute_const__ iwcm_reject_msg(int reason) +{ + size_t index; + + /* iWARP uses negative errnos */ + index = -reason; + + if (index < ARRAY_SIZE(iwcm_rej_reason_strs) && + iwcm_rej_reason_strs[index]) + return iwcm_rej_reason_strs[index]; + else + return "unrecognized reason"; +} +EXPORT_SYMBOL(iwcm_reject_msg); + static struct ibnl_client_cbs iwcm_nl_cb_table[] = { [RDMA_NL_IWPM_REG_PID] = {.dump = iwpm_register_pid_cb}, [RDMA_NL_IWPM_ADD_MAPPING] = {.dump = iwpm_add_mapping_cb}, diff --git a/include/rdma/ib_cm.h b/include/rdma/ib_cm.h index 92a7d85917b4..b49258b16f4e 100644 --- a/include/rdma/ib_cm.h +++ b/include/rdma/ib_cm.h @@ -603,4 +603,10 @@ struct ib_cm_sidr_rep_param { int ib_send_cm_sidr_rep(struct ib_cm_id *cm_id, struct ib_cm_sidr_rep_param *param); +/** + * ibcm_reject_msg - return a pointer to a reject message string. + * @reason: Value returned in the REJECT event status field. + */ +const char *__attribute_const__ ibcm_reject_msg(int reason); + #endif /* IB_CM_H */ diff --git a/include/rdma/iw_cm.h b/include/rdma/iw_cm.h index 6d0065c322b7..5cd7701db148 100644 --- a/include/rdma/iw_cm.h +++ b/include/rdma/iw_cm.h @@ -253,4 +253,10 @@ int iw_cm_disconnect(struct iw_cm_id *cm_id, int abrupt); int iw_cm_init_qp_attr(struct iw_cm_id *cm_id, struct ib_qp_attr *qp_attr, int *qp_attr_mask); +/** + * iwcm_reject_msg - return a pointer to a reject message string. + * @reason: Value returned in the REJECT event status field. + */ +const char *__attribute_const__ iwcm_reject_msg(int reason); + #endif /* IW_CM_H */ diff --git a/include/rdma/rdma_cm.h b/include/rdma/rdma_cm.h index 81fb1d15e8bb..f11a768be06b 100644 --- a/include/rdma/rdma_cm.h +++ b/include/rdma/rdma_cm.h @@ -388,4 +388,12 @@ int rdma_set_afonly(struct rdma_cm_id *id, int afonly); */ __be64 rdma_get_service_id(struct rdma_cm_id *id, struct sockaddr *addr); +/** + * rdma_reject_msg - return a pointer to a reject message string. + * @id: Communication identifier that received the REJECT event. + * @reason: Value returned in the REJECT event status field. + */ +const char *__attribute_const__ rdma_reject_msg(struct rdma_cm_id *id, + int reason); + #endif /* RDMA_CM_H */ -- cgit v1.2.3 From 5042a73d3e9de7bcc2a31adea08ee95bbce998dc Mon Sep 17 00:00:00 2001 From: Steve Wise Date: Wed, 26 Oct 2016 12:36:47 -0700 Subject: rdma_cm: add rdma_is_consumer_reject() helper function Return true if the peer consumer application rejected the connection attempt. Reviewed-by: Sagi Grimberg Reviewed-by: Christoph Hellwig Signed-off-by: Steve Wise Reviewed-by: Bart Van Assche Signed-off-by: Doug Ledford --- drivers/infiniband/core/cma.c | 13 +++++++++++++ include/rdma/rdma_cm.h | 7 +++++++ 2 files changed, 20 insertions(+) (limited to 'include') diff --git a/drivers/infiniband/core/cma.c b/drivers/infiniband/core/cma.c index 877e4dc9e382..bd1d4fe47740 100644 --- a/drivers/infiniband/core/cma.c +++ b/drivers/infiniband/core/cma.c @@ -115,6 +115,19 @@ const char *__attribute_const__ rdma_reject_msg(struct rdma_cm_id *id, } EXPORT_SYMBOL(rdma_reject_msg); +bool rdma_is_consumer_reject(struct rdma_cm_id *id, int reason) +{ + if (rdma_ib_or_roce(id->device, id->port_num)) + return reason == IB_CM_REJ_CONSUMER_DEFINED; + + if (rdma_protocol_iwarp(id->device, id->port_num)) + return reason == -ECONNREFUSED; + + WARN_ON_ONCE(1); + return false; +} +EXPORT_SYMBOL(rdma_is_consumer_reject); + static void cma_add_one(struct ib_device *device); static void cma_remove_one(struct ib_device *device, void *client_data); diff --git a/include/rdma/rdma_cm.h b/include/rdma/rdma_cm.h index f11a768be06b..62039c2fd951 100644 --- a/include/rdma/rdma_cm.h +++ b/include/rdma/rdma_cm.h @@ -395,5 +395,12 @@ __be64 rdma_get_service_id(struct rdma_cm_id *id, struct sockaddr *addr); */ const char *__attribute_const__ rdma_reject_msg(struct rdma_cm_id *id, int reason); +/** + * rdma_is_consumer_reject - return true if the consumer rejected the connect + * request. + * @id: Communication identifier that received the REJECT event. + * @reason: Value returned in the REJECT event status field. + */ +bool rdma_is_consumer_reject(struct rdma_cm_id *id, int reason); #endif /* RDMA_CM_H */ -- cgit v1.2.3 From 5f24410408fd093734ce758f2fe3a66fe543de22 Mon Sep 17 00:00:00 2001 From: Steve Wise Date: Wed, 26 Oct 2016 12:36:47 -0700 Subject: rdma_cm: add rdma_consumer_reject_data helper function rdma_consumer_reject_data() will return the private data pointer and length if any is available. Reviewed-by: Sagi Grimberg Reviewed-by: Christoph Hellwig Signed-off-by: Steve Wise Signed-off-by: Doug Ledford --- drivers/infiniband/core/cma.c | 16 ++++++++++++++++ include/rdma/rdma_cm.h | 10 ++++++++++ 2 files changed, 26 insertions(+) (limited to 'include') diff --git a/drivers/infiniband/core/cma.c b/drivers/infiniband/core/cma.c index bd1d4fe47740..8c30e3dedebe 100644 --- a/drivers/infiniband/core/cma.c +++ b/drivers/infiniband/core/cma.c @@ -128,6 +128,22 @@ bool rdma_is_consumer_reject(struct rdma_cm_id *id, int reason) } EXPORT_SYMBOL(rdma_is_consumer_reject); +const void *rdma_consumer_reject_data(struct rdma_cm_id *id, + struct rdma_cm_event *ev, u8 *data_len) +{ + const void *p; + + if (rdma_is_consumer_reject(id, ev->status)) { + *data_len = ev->param.conn.private_data_len; + p = ev->param.conn.private_data; + } else { + *data_len = 0; + p = NULL; + } + return p; +} +EXPORT_SYMBOL(rdma_consumer_reject_data); + static void cma_add_one(struct ib_device *device); static void cma_remove_one(struct ib_device *device, void *client_data); diff --git a/include/rdma/rdma_cm.h b/include/rdma/rdma_cm.h index 62039c2fd951..d3968b561f86 100644 --- a/include/rdma/rdma_cm.h +++ b/include/rdma/rdma_cm.h @@ -403,4 +403,14 @@ const char *__attribute_const__ rdma_reject_msg(struct rdma_cm_id *id, */ bool rdma_is_consumer_reject(struct rdma_cm_id *id, int reason); +/** + * rdma_consumer_reject_data - return the consumer reject private data and + * length, if any. + * @id: Communication identifier that received the REJECT event. + * @ev: RDMA CM reject event. + * @data_len: Pointer to the resulting length of the consumer data. + */ +const void *rdma_consumer_reject_data(struct rdma_cm_id *id, + struct rdma_cm_event *ev, u8 *data_len); + #endif /* RDMA_CM_H */ -- cgit v1.2.3 From 35493294df1a1b06268a4e7b77b7a323772779f3 Mon Sep 17 00:00:00 2001 From: Jason Gunthorpe Date: Thu, 27 Oct 2016 10:51:17 -0600 Subject: rdma UAPI: Use __kernel_sockaddr_storage The kernel side is #ifdef'd to this type, and the UAPI header should use it directly. It has slightly different alignment requirments from the usual user space version. Signed-off-by: Jason Gunthorpe Reviewed-by: Leon Romanovsky Signed-off-by: Doug Ledford --- include/uapi/rdma/rdma_user_cm.h | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) (limited to 'include') diff --git a/include/uapi/rdma/rdma_user_cm.h b/include/uapi/rdma/rdma_user_cm.h index 01923d463673..d71da36e3cd6 100644 --- a/include/uapi/rdma/rdma_user_cm.h +++ b/include/uapi/rdma/rdma_user_cm.h @@ -110,7 +110,7 @@ struct rdma_ucm_bind { __u32 id; __u16 addr_size; __u16 reserved; - struct sockaddr_storage addr; + struct __kernel_sockaddr_storage addr; }; struct rdma_ucm_resolve_ip { @@ -126,8 +126,8 @@ struct rdma_ucm_resolve_addr { __u16 src_size; __u16 dst_size; __u32 reserved; - struct sockaddr_storage src_addr; - struct sockaddr_storage dst_addr; + struct __kernel_sockaddr_storage src_addr; + struct __kernel_sockaddr_storage dst_addr; }; struct rdma_ucm_resolve_route { @@ -164,8 +164,8 @@ struct rdma_ucm_query_addr_resp { __u16 pkey; __u16 src_size; __u16 dst_size; - struct sockaddr_storage src_addr; - struct sockaddr_storage dst_addr; + struct __kernel_sockaddr_storage src_addr; + struct __kernel_sockaddr_storage dst_addr; }; struct rdma_ucm_query_path_resp { @@ -257,7 +257,7 @@ struct rdma_ucm_join_mcast { __u32 id; __u16 addr_size; __u16 join_flags; - struct sockaddr_storage addr; + struct __kernel_sockaddr_storage addr; }; struct rdma_ucm_get_event { -- cgit v1.2.3 From 29c8d9eba550c6d73d17cc1618a9f5f2a7345aa1 Mon Sep 17 00:00:00 2001 From: Adit Ranadive Date: Sun, 2 Oct 2016 19:10:22 -0700 Subject: IB: Add vmw_pvrdma driver This patch series adds a driver for a paravirtual RDMA device. The device is developed for VMware's Virtual Machines and allows existing RDMA applications to continue to use existing Verbs API when deployed in VMs on ESXi. We recently did a presentation in the OFA Workshop [1] regarding this device. Description and RDMA Support ============================ The virtual device is exposed as a dual function PCIe device. One part is a virtual network device (VMXNet3) which provides networking properties like MAC, IP addresses to the RDMA part of the device. The networking properties are used to register GIDs required by RDMA applications to communicate. These patches add support and the all required infrastructure for letting applications use such a device. We support the mandatory Verbs API as well as the base memory management extensions (Local Inv, Send with Inv and Fast Register Work Requests). We currently support both Reliable Connected and Unreliable Datagram QPs but do not support Shared Receive Queues (SRQs). Also, we support the following types of Work Requests: o Send/Receive (with or without Immediate Data) o RDMA Write (with or without Immediate Data) o RDMA Read o Local Invalidate o Send with Invalidate o Fast Register Work Requests This version only adds support for version 1 of RoCE. We will add RoCEv2 support in a future patch. We do support registration of both MAC-based and IP-based GIDs. I have also created a git tree for our user-level driver [2]. Testing ======= We have tested this internally for various types of Guest OS - Red Hat, Centos, Ubuntu 12.04/14.04/16.04, Oracle Enterprise Linux, SLES 12 using backported versions of this driver. The tests included several runs of the performance tests (included with OFED), Intel MPI PingPong benchmark on OpenMPI, krping for FRWRs. Mellanox has been kind enough to test the backported version of the driver internally on their hardware using a VMware provided ESX build. I have also applied and tested this with Doug's k.o/for-4.9 branch (commit 5603910b). Note, that this patch series should be applied all together. I split out the commits so that it may be easier to review. PVRDMA Resources ================ [1] OFA Workshop Presentation - https://openfabrics.org/images/eventpresos/2016presentations/102parardma.pdf [2] Libpvrdma User-level library - http://git.openfabrics.org/?p=~aditr/libpvrdma.git;a=summary Reviewed-by: Jorgen Hansen Reviewed-by: George Zhang Reviewed-by: Aditya Sarwade Reviewed-by: Bryan Tan Reviewed-by: Leon Romanovsky Signed-off-by: Adit Ranadive Signed-off-by: Doug Ledford --- MAINTAINERS | 7 + drivers/infiniband/Kconfig | 1 + drivers/infiniband/hw/Makefile | 1 + drivers/infiniband/hw/vmw_pvrdma/Kconfig | 7 + drivers/infiniband/hw/vmw_pvrdma/Makefile | 3 + drivers/infiniband/hw/vmw_pvrdma/pvrdma.h | 474 ++++++++ drivers/infiniband/hw/vmw_pvrdma/pvrdma_cmd.c | 119 ++ drivers/infiniband/hw/vmw_pvrdma/pvrdma_cq.c | 425 +++++++ drivers/infiniband/hw/vmw_pvrdma/pvrdma_dev_api.h | 586 ++++++++++ drivers/infiniband/hw/vmw_pvrdma/pvrdma_doorbell.c | 127 ++ drivers/infiniband/hw/vmw_pvrdma/pvrdma_main.c | 1211 ++++++++++++++++++++ drivers/infiniband/hw/vmw_pvrdma/pvrdma_misc.c | 304 +++++ drivers/infiniband/hw/vmw_pvrdma/pvrdma_mr.c | 334 ++++++ drivers/infiniband/hw/vmw_pvrdma/pvrdma_qp.c | 972 ++++++++++++++++ drivers/infiniband/hw/vmw_pvrdma/pvrdma_ring.h | 131 +++ drivers/infiniband/hw/vmw_pvrdma/pvrdma_verbs.c | 579 ++++++++++ drivers/infiniband/hw/vmw_pvrdma/pvrdma_verbs.h | 436 +++++++ include/uapi/rdma/Kbuild | 1 + include/uapi/rdma/vmw_pvrdma-abi.h | 289 +++++ 19 files changed, 6007 insertions(+) create mode 100644 drivers/infiniband/hw/vmw_pvrdma/Kconfig create mode 100644 drivers/infiniband/hw/vmw_pvrdma/Makefile create mode 100644 drivers/infiniband/hw/vmw_pvrdma/pvrdma.h create mode 100644 drivers/infiniband/hw/vmw_pvrdma/pvrdma_cmd.c create mode 100644 drivers/infiniband/hw/vmw_pvrdma/pvrdma_cq.c create mode 100644 drivers/infiniband/hw/vmw_pvrdma/pvrdma_dev_api.h create mode 100644 drivers/infiniband/hw/vmw_pvrdma/pvrdma_doorbell.c create mode 100644 drivers/infiniband/hw/vmw_pvrdma/pvrdma_main.c create mode 100644 drivers/infiniband/hw/vmw_pvrdma/pvrdma_misc.c create mode 100644 drivers/infiniband/hw/vmw_pvrdma/pvrdma_mr.c create mode 100644 drivers/infiniband/hw/vmw_pvrdma/pvrdma_qp.c create mode 100644 drivers/infiniband/hw/vmw_pvrdma/pvrdma_ring.h create mode 100644 drivers/infiniband/hw/vmw_pvrdma/pvrdma_verbs.c create mode 100644 drivers/infiniband/hw/vmw_pvrdma/pvrdma_verbs.h create mode 100644 include/uapi/rdma/vmw_pvrdma-abi.h (limited to 'include') diff --git a/MAINTAINERS b/MAINTAINERS index 3d838cf49f81..75a68f079188 100644 --- a/MAINTAINERS +++ b/MAINTAINERS @@ -12928,6 +12928,13 @@ S: Maintained F: drivers/scsi/vmw_pvscsi.c F: drivers/scsi/vmw_pvscsi.h +VMWARE PVRDMA DRIVER +M: Adit Ranadive +M: VMware PV-Drivers +L: linux-rdma@vger.kernel.org +S: Maintained +F: drivers/infiniband/hw/vmw_pvrdma/ + VOLTAGE AND CURRENT REGULATOR FRAMEWORK M: Liam Girdwood M: Mark Brown diff --git a/drivers/infiniband/Kconfig b/drivers/infiniband/Kconfig index fb3fb89640e5..670917387eda 100644 --- a/drivers/infiniband/Kconfig +++ b/drivers/infiniband/Kconfig @@ -73,6 +73,7 @@ source "drivers/infiniband/hw/mlx4/Kconfig" source "drivers/infiniband/hw/mlx5/Kconfig" source "drivers/infiniband/hw/nes/Kconfig" source "drivers/infiniband/hw/ocrdma/Kconfig" +source "drivers/infiniband/hw/vmw_pvrdma/Kconfig" source "drivers/infiniband/hw/usnic/Kconfig" source "drivers/infiniband/hw/hns/Kconfig" diff --git a/drivers/infiniband/hw/Makefile b/drivers/infiniband/hw/Makefile index e7a5ed9f6f3f..ed553de2ca12 100644 --- a/drivers/infiniband/hw/Makefile +++ b/drivers/infiniband/hw/Makefile @@ -7,6 +7,7 @@ obj-$(CONFIG_MLX4_INFINIBAND) += mlx4/ obj-$(CONFIG_MLX5_INFINIBAND) += mlx5/ obj-$(CONFIG_INFINIBAND_NES) += nes/ obj-$(CONFIG_INFINIBAND_OCRDMA) += ocrdma/ +obj-$(CONFIG_INFINIBAND_VMWARE_PVRDMA) += vmw_pvrdma/ obj-$(CONFIG_INFINIBAND_USNIC) += usnic/ obj-$(CONFIG_INFINIBAND_HFI1) += hfi1/ obj-$(CONFIG_INFINIBAND_HNS) += hns/ diff --git a/drivers/infiniband/hw/vmw_pvrdma/Kconfig b/drivers/infiniband/hw/vmw_pvrdma/Kconfig new file mode 100644 index 000000000000..5a9790ac0ede --- /dev/null +++ b/drivers/infiniband/hw/vmw_pvrdma/Kconfig @@ -0,0 +1,7 @@ +config INFINIBAND_VMWARE_PVRDMA + tristate "VMware Paravirtualized RDMA Driver" + depends on NETDEVICES && ETHERNET && PCI && INET && VMXNET3 + ---help--- + This driver provides low-level support for VMware Paravirtual + RDMA adapter. It interacts with the VMXNet3 driver to provide + Ethernet capabilities. diff --git a/drivers/infiniband/hw/vmw_pvrdma/Makefile b/drivers/infiniband/hw/vmw_pvrdma/Makefile new file mode 100644 index 000000000000..0194ed19f542 --- /dev/null +++ b/drivers/infiniband/hw/vmw_pvrdma/Makefile @@ -0,0 +1,3 @@ +obj-$(CONFIG_INFINIBAND_VMWARE_PVRDMA) += vmw_pvrdma.o + +vmw_pvrdma-y := pvrdma_cmd.o pvrdma_cq.o pvrdma_doorbell.o pvrdma_main.o pvrdma_misc.o pvrdma_mr.o pvrdma_qp.o pvrdma_verbs.o diff --git a/drivers/infiniband/hw/vmw_pvrdma/pvrdma.h b/drivers/infiniband/hw/vmw_pvrdma/pvrdma.h new file mode 100644 index 000000000000..71e1d55d69d6 --- /dev/null +++ b/drivers/infiniband/hw/vmw_pvrdma/pvrdma.h @@ -0,0 +1,474 @@ +/* + * Copyright (c) 2012-2016 VMware, Inc. All rights reserved. + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of EITHER the GNU General Public License + * version 2 as published by the Free Software Foundation or the BSD + * 2-Clause License. This program is distributed in the hope that it + * will be useful, but WITHOUT ANY WARRANTY; WITHOUT EVEN THE IMPLIED + * WARRANTY OF MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE. + * See the GNU General Public License version 2 for more details at + * http://www.gnu.org/licenses/old-licenses/gpl-2.0.en.html. + * + * You should have received a copy of the GNU General Public License + * along with this program available in the file COPYING in the main + * directory of this source tree. + * + * The BSD 2-Clause License + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS + * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE + * COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, + * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES + * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR + * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, + * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED + * OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +#ifndef __PVRDMA_H__ +#define __PVRDMA_H__ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "pvrdma_ring.h" +#include "pvrdma_dev_api.h" +#include "pvrdma_verbs.h" + +/* NOT the same as BIT_MASK(). */ +#define PVRDMA_MASK(n) ((n << 1) - 1) + +/* + * VMware PVRDMA PCI device id. + */ +#define PCI_DEVICE_ID_VMWARE_PVRDMA 0x0820 + +struct pvrdma_dev; + +struct pvrdma_page_dir { + dma_addr_t dir_dma; + u64 *dir; + int ntables; + u64 **tables; + u64 npages; + void **pages; +}; + +struct pvrdma_cq { + struct ib_cq ibcq; + int offset; + spinlock_t cq_lock; /* Poll lock. */ + struct pvrdma_uar_map *uar; + struct ib_umem *umem; + struct pvrdma_ring_state *ring_state; + struct pvrdma_page_dir pdir; + u32 cq_handle; + bool is_kernel; + atomic_t refcnt; + wait_queue_head_t wait; +}; + +struct pvrdma_id_table { + u32 last; + u32 top; + u32 max; + u32 mask; + spinlock_t lock; /* Table lock. */ + unsigned long *table; +}; + +struct pvrdma_uar_map { + unsigned long pfn; + void __iomem *map; + int index; +}; + +struct pvrdma_uar_table { + struct pvrdma_id_table tbl; + int size; +}; + +struct pvrdma_ucontext { + struct ib_ucontext ibucontext; + struct pvrdma_dev *dev; + struct pvrdma_uar_map uar; + u64 ctx_handle; +}; + +struct pvrdma_pd { + struct ib_pd ibpd; + u32 pdn; + u32 pd_handle; + int privileged; +}; + +struct pvrdma_mr { + u32 mr_handle; + u64 iova; + u64 size; +}; + +struct pvrdma_user_mr { + struct ib_mr ibmr; + struct ib_umem *umem; + struct pvrdma_mr mmr; + struct pvrdma_page_dir pdir; + u64 *pages; + u32 npages; + u32 max_pages; + u32 page_shift; +}; + +struct pvrdma_wq { + struct pvrdma_ring *ring; + spinlock_t lock; /* Work queue lock. */ + int wqe_cnt; + int wqe_size; + int max_sg; + int offset; +}; + +struct pvrdma_ah { + struct ib_ah ibah; + struct pvrdma_av av; +}; + +struct pvrdma_qp { + struct ib_qp ibqp; + u32 qp_handle; + u32 qkey; + struct pvrdma_wq sq; + struct pvrdma_wq rq; + struct ib_umem *rumem; + struct ib_umem *sumem; + struct pvrdma_page_dir pdir; + int npages; + int npages_send; + int npages_recv; + u32 flags; + u8 port; + u8 state; + bool is_kernel; + struct mutex mutex; /* QP state mutex. */ + atomic_t refcnt; + wait_queue_head_t wait; +}; + +struct pvrdma_dev { + /* PCI device-related information. */ + struct ib_device ib_dev; + struct pci_dev *pdev; + void __iomem *regs; + struct pvrdma_device_shared_region *dsr; /* Shared region pointer */ + dma_addr_t dsrbase; /* Shared region base address */ + void *cmd_slot; + void *resp_slot; + unsigned long flags; + struct list_head device_link; + + /* Locking and interrupt information. */ + spinlock_t cmd_lock; /* Command lock. */ + struct semaphore cmd_sema; + struct completion cmd_done; + struct { + enum pvrdma_intr_type type; /* Intr type */ + struct msix_entry msix_entry[PVRDMA_MAX_INTERRUPTS]; + irq_handler_t handler[PVRDMA_MAX_INTERRUPTS]; + u8 enabled[PVRDMA_MAX_INTERRUPTS]; + u8 size; + } intr; + + /* RDMA-related device information. */ + union ib_gid *sgid_tbl; + struct pvrdma_ring_state *async_ring_state; + struct pvrdma_page_dir async_pdir; + struct pvrdma_ring_state *cq_ring_state; + struct pvrdma_page_dir cq_pdir; + struct pvrdma_cq **cq_tbl; + spinlock_t cq_tbl_lock; + struct pvrdma_qp **qp_tbl; + spinlock_t qp_tbl_lock; + struct pvrdma_uar_table uar_table; + struct pvrdma_uar_map driver_uar; + __be64 sys_image_guid; + spinlock_t desc_lock; /* Device modification lock. */ + u32 port_cap_mask; + struct mutex port_mutex; /* Port modification mutex. */ + bool ib_active; + atomic_t num_qps; + atomic_t num_cqs; + atomic_t num_pds; + atomic_t num_ahs; + + /* Network device information. */ + struct net_device *netdev; + struct notifier_block nb_netdev; +}; + +struct pvrdma_netdevice_work { + struct work_struct work; + struct net_device *event_netdev; + unsigned long event; +}; + +static inline struct pvrdma_dev *to_vdev(struct ib_device *ibdev) +{ + return container_of(ibdev, struct pvrdma_dev, ib_dev); +} + +static inline struct +pvrdma_ucontext *to_vucontext(struct ib_ucontext *ibucontext) +{ + return container_of(ibucontext, struct pvrdma_ucontext, ibucontext); +} + +static inline struct pvrdma_pd *to_vpd(struct ib_pd *ibpd) +{ + return container_of(ibpd, struct pvrdma_pd, ibpd); +} + +static inline struct pvrdma_cq *to_vcq(struct ib_cq *ibcq) +{ + return container_of(ibcq, struct pvrdma_cq, ibcq); +} + +static inline struct pvrdma_user_mr *to_vmr(struct ib_mr *ibmr) +{ + return container_of(ibmr, struct pvrdma_user_mr, ibmr); +} + +static inline struct pvrdma_qp *to_vqp(struct ib_qp *ibqp) +{ + return container_of(ibqp, struct pvrdma_qp, ibqp); +} + +static inline struct pvrdma_ah *to_vah(struct ib_ah *ibah) +{ + return container_of(ibah, struct pvrdma_ah, ibah); +} + +static inline void pvrdma_write_reg(struct pvrdma_dev *dev, u32 reg, u32 val) +{ + writel(cpu_to_le32(val), dev->regs + reg); +} + +static inline u32 pvrdma_read_reg(struct pvrdma_dev *dev, u32 reg) +{ + return le32_to_cpu(readl(dev->regs + reg)); +} + +static inline void pvrdma_write_uar_cq(struct pvrdma_dev *dev, u32 val) +{ + writel(cpu_to_le32(val), dev->driver_uar.map + PVRDMA_UAR_CQ_OFFSET); +} + +static inline void pvrdma_write_uar_qp(struct pvrdma_dev *dev, u32 val) +{ + writel(cpu_to_le32(val), dev->driver_uar.map + PVRDMA_UAR_QP_OFFSET); +} + +static inline void *pvrdma_page_dir_get_ptr(struct pvrdma_page_dir *pdir, + u64 offset) +{ + return pdir->pages[offset / PAGE_SIZE] + (offset % PAGE_SIZE); +} + +static inline enum pvrdma_mtu ib_mtu_to_pvrdma(enum ib_mtu mtu) +{ + return (enum pvrdma_mtu)mtu; +} + +static inline enum ib_mtu pvrdma_mtu_to_ib(enum pvrdma_mtu mtu) +{ + return (enum ib_mtu)mtu; +} + +static inline enum pvrdma_port_state ib_port_state_to_pvrdma( + enum ib_port_state state) +{ + return (enum pvrdma_port_state)state; +} + +static inline enum ib_port_state pvrdma_port_state_to_ib( + enum pvrdma_port_state state) +{ + return (enum ib_port_state)state; +} + +static inline int ib_port_cap_flags_to_pvrdma(int flags) +{ + return flags & PVRDMA_MASK(PVRDMA_PORT_CAP_FLAGS_MAX); +} + +static inline int pvrdma_port_cap_flags_to_ib(int flags) +{ + return flags; +} + +static inline enum pvrdma_port_width ib_port_width_to_pvrdma( + enum ib_port_width width) +{ + return (enum pvrdma_port_width)width; +} + +static inline enum ib_port_width pvrdma_port_width_to_ib( + enum pvrdma_port_width width) +{ + return (enum ib_port_width)width; +} + +static inline enum pvrdma_port_speed ib_port_speed_to_pvrdma( + enum ib_port_speed speed) +{ + return (enum pvrdma_port_speed)speed; +} + +static inline enum ib_port_speed pvrdma_port_speed_to_ib( + enum pvrdma_port_speed speed) +{ + return (enum ib_port_speed)speed; +} + +static inline int pvrdma_qp_attr_mask_to_ib(int attr_mask) +{ + return attr_mask; +} + +static inline int ib_qp_attr_mask_to_pvrdma(int attr_mask) +{ + return attr_mask & PVRDMA_MASK(PVRDMA_QP_ATTR_MASK_MAX); +} + +static inline enum pvrdma_mig_state ib_mig_state_to_pvrdma( + enum ib_mig_state state) +{ + return (enum pvrdma_mig_state)state; +} + +static inline enum ib_mig_state pvrdma_mig_state_to_ib( + enum pvrdma_mig_state state) +{ + return (enum ib_mig_state)state; +} + +static inline int ib_access_flags_to_pvrdma(int flags) +{ + return flags; +} + +static inline int pvrdma_access_flags_to_ib(int flags) +{ + return flags & PVRDMA_MASK(PVRDMA_ACCESS_FLAGS_MAX); +} + +static inline enum pvrdma_qp_type ib_qp_type_to_pvrdma(enum ib_qp_type type) +{ + return (enum pvrdma_qp_type)type; +} + +static inline enum ib_qp_type pvrdma_qp_type_to_ib(enum pvrdma_qp_type type) +{ + return (enum ib_qp_type)type; +} + +static inline enum pvrdma_qp_state ib_qp_state_to_pvrdma(enum ib_qp_state state) +{ + return (enum pvrdma_qp_state)state; +} + +static inline enum ib_qp_state pvrdma_qp_state_to_ib(enum pvrdma_qp_state state) +{ + return (enum ib_qp_state)state; +} + +static inline enum pvrdma_wr_opcode ib_wr_opcode_to_pvrdma(enum ib_wr_opcode op) +{ + return (enum pvrdma_wr_opcode)op; +} + +static inline enum ib_wc_status pvrdma_wc_status_to_ib( + enum pvrdma_wc_status status) +{ + return (enum ib_wc_status)status; +} + +static inline int pvrdma_wc_opcode_to_ib(int opcode) +{ + return opcode; +} + +static inline int pvrdma_wc_flags_to_ib(int flags) +{ + return flags; +} + +static inline int ib_send_flags_to_pvrdma(int flags) +{ + return flags & PVRDMA_MASK(PVRDMA_SEND_FLAGS_MAX); +} + +void pvrdma_qp_cap_to_ib(struct ib_qp_cap *dst, + const struct pvrdma_qp_cap *src); +void ib_qp_cap_to_pvrdma(struct pvrdma_qp_cap *dst, + const struct ib_qp_cap *src); +void pvrdma_gid_to_ib(union ib_gid *dst, const union pvrdma_gid *src); +void ib_gid_to_pvrdma(union pvrdma_gid *dst, const union ib_gid *src); +void pvrdma_global_route_to_ib(struct ib_global_route *dst, + const struct pvrdma_global_route *src); +void ib_global_route_to_pvrdma(struct pvrdma_global_route *dst, + const struct ib_global_route *src); +void pvrdma_ah_attr_to_ib(struct ib_ah_attr *dst, + const struct pvrdma_ah_attr *src); +void ib_ah_attr_to_pvrdma(struct pvrdma_ah_attr *dst, + const struct ib_ah_attr *src); + +int pvrdma_uar_table_init(struct pvrdma_dev *dev); +void pvrdma_uar_table_cleanup(struct pvrdma_dev *dev); + +int pvrdma_uar_alloc(struct pvrdma_dev *dev, struct pvrdma_uar_map *uar); +void pvrdma_uar_free(struct pvrdma_dev *dev, struct pvrdma_uar_map *uar); + +void _pvrdma_flush_cqe(struct pvrdma_qp *qp, struct pvrdma_cq *cq); + +int pvrdma_page_dir_init(struct pvrdma_dev *dev, struct pvrdma_page_dir *pdir, + u64 npages, bool alloc_pages); +void pvrdma_page_dir_cleanup(struct pvrdma_dev *dev, + struct pvrdma_page_dir *pdir); +int pvrdma_page_dir_insert_dma(struct pvrdma_page_dir *pdir, u64 idx, + dma_addr_t daddr); +int pvrdma_page_dir_insert_umem(struct pvrdma_page_dir *pdir, + struct ib_umem *umem, u64 offset); +dma_addr_t pvrdma_page_dir_get_dma(struct pvrdma_page_dir *pdir, u64 idx); +int pvrdma_page_dir_insert_page_list(struct pvrdma_page_dir *pdir, + u64 *page_list, int num_pages); + +int pvrdma_cmd_post(struct pvrdma_dev *dev, union pvrdma_cmd_req *req, + union pvrdma_cmd_resp *rsp, unsigned resp_code); + +#endif /* __PVRDMA_H__ */ diff --git a/drivers/infiniband/hw/vmw_pvrdma/pvrdma_cmd.c b/drivers/infiniband/hw/vmw_pvrdma/pvrdma_cmd.c new file mode 100644 index 000000000000..4a78c537d8a1 --- /dev/null +++ b/drivers/infiniband/hw/vmw_pvrdma/pvrdma_cmd.c @@ -0,0 +1,119 @@ +/* + * Copyright (c) 2012-2016 VMware, Inc. All rights reserved. + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of EITHER the GNU General Public License + * version 2 as published by the Free Software Foundation or the BSD + * 2-Clause License. This program is distributed in the hope that it + * will be useful, but WITHOUT ANY WARRANTY; WITHOUT EVEN THE IMPLIED + * WARRANTY OF MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE. + * See the GNU General Public License version 2 for more details at + * http://www.gnu.org/licenses/old-licenses/gpl-2.0.en.html. + * + * You should have received a copy of the GNU General Public License + * along with this program available in the file COPYING in the main + * directory of this source tree. + * + * The BSD 2-Clause License + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS + * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE + * COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, + * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES + * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR + * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, + * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED + * OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +#include + +#include "pvrdma.h" + +#define PVRDMA_CMD_TIMEOUT 10000 /* ms */ + +static inline int pvrdma_cmd_recv(struct pvrdma_dev *dev, + union pvrdma_cmd_resp *resp, + unsigned resp_code) +{ + int err; + + dev_dbg(&dev->pdev->dev, "receive response from device\n"); + + err = wait_for_completion_interruptible_timeout(&dev->cmd_done, + msecs_to_jiffies(PVRDMA_CMD_TIMEOUT)); + if (err == 0 || err == -ERESTARTSYS) { + dev_warn(&dev->pdev->dev, + "completion timeout or interrupted\n"); + return -ETIMEDOUT; + } + + spin_lock(&dev->cmd_lock); + memcpy(resp, dev->resp_slot, sizeof(*resp)); + spin_unlock(&dev->cmd_lock); + + if (resp->hdr.ack != resp_code) { + dev_warn(&dev->pdev->dev, + "unknown response %#x expected %#x\n", + resp->hdr.ack, resp_code); + return -EFAULT; + } + + return 0; +} + +int +pvrdma_cmd_post(struct pvrdma_dev *dev, union pvrdma_cmd_req *req, + union pvrdma_cmd_resp *resp, unsigned resp_code) +{ + int err; + + dev_dbg(&dev->pdev->dev, "post request to device\n"); + + /* Serializiation */ + down(&dev->cmd_sema); + + BUILD_BUG_ON(sizeof(union pvrdma_cmd_req) != + sizeof(struct pvrdma_cmd_modify_qp)); + + spin_lock(&dev->cmd_lock); + memcpy(dev->cmd_slot, req, sizeof(*req)); + spin_unlock(&dev->cmd_lock); + + init_completion(&dev->cmd_done); + pvrdma_write_reg(dev, PVRDMA_REG_REQUEST, 0); + + /* Make sure the request is written before reading status. */ + mb(); + + err = pvrdma_read_reg(dev, PVRDMA_REG_ERR); + if (err == 0) { + if (resp != NULL) + err = pvrdma_cmd_recv(dev, resp, resp_code); + } else { + dev_warn(&dev->pdev->dev, + "failed to write request error reg: %d\n", err); + err = -EFAULT; + } + + up(&dev->cmd_sema); + + return err; +} diff --git a/drivers/infiniband/hw/vmw_pvrdma/pvrdma_cq.c b/drivers/infiniband/hw/vmw_pvrdma/pvrdma_cq.c new file mode 100644 index 000000000000..e429ca5b16aa --- /dev/null +++ b/drivers/infiniband/hw/vmw_pvrdma/pvrdma_cq.c @@ -0,0 +1,425 @@ +/* + * Copyright (c) 2012-2016 VMware, Inc. All rights reserved. + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of EITHER the GNU General Public License + * version 2 as published by the Free Software Foundation or the BSD + * 2-Clause License. This program is distributed in the hope that it + * will be useful, but WITHOUT ANY WARRANTY; WITHOUT EVEN THE IMPLIED + * WARRANTY OF MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE. + * See the GNU General Public License version 2 for more details at + * http://www.gnu.org/licenses/old-licenses/gpl-2.0.en.html. + * + * You should have received a copy of the GNU General Public License + * along with this program available in the file COPYING in the main + * directory of this source tree. + * + * The BSD 2-Clause License + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS + * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE + * COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, + * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES + * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR + * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, + * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED + * OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +#include +#include +#include +#include +#include +#include + +#include "pvrdma.h" + +/** + * pvrdma_req_notify_cq - request notification for a completion queue + * @ibcq: the completion queue + * @notify_flags: notification flags + * + * @return: 0 for success. + */ +int pvrdma_req_notify_cq(struct ib_cq *ibcq, + enum ib_cq_notify_flags notify_flags) +{ + struct pvrdma_dev *dev = to_vdev(ibcq->device); + struct pvrdma_cq *cq = to_vcq(ibcq); + u32 val = cq->cq_handle; + + val |= (notify_flags & IB_CQ_SOLICITED_MASK) == IB_CQ_SOLICITED ? + PVRDMA_UAR_CQ_ARM_SOL : PVRDMA_UAR_CQ_ARM; + + pvrdma_write_uar_cq(dev, val); + + return 0; +} + +/** + * pvrdma_create_cq - create completion queue + * @ibdev: the device + * @attr: completion queue attributes + * @context: user context + * @udata: user data + * + * @return: ib_cq completion queue pointer on success, + * otherwise returns negative errno. + */ +struct ib_cq *pvrdma_create_cq(struct ib_device *ibdev, + const struct ib_cq_init_attr *attr, + struct ib_ucontext *context, + struct ib_udata *udata) +{ + int entries = attr->cqe; + struct pvrdma_dev *dev = to_vdev(ibdev); + struct pvrdma_cq *cq; + int ret; + int npages; + unsigned long flags; + union pvrdma_cmd_req req; + union pvrdma_cmd_resp rsp; + struct pvrdma_cmd_create_cq *cmd = &req.create_cq; + struct pvrdma_cmd_create_cq_resp *resp = &rsp.create_cq_resp; + struct pvrdma_create_cq ucmd; + + BUILD_BUG_ON(sizeof(struct pvrdma_cqe) != 64); + + entries = roundup_pow_of_two(entries); + if (entries < 1 || entries > dev->dsr->caps.max_cqe) + return ERR_PTR(-EINVAL); + + if (!atomic_add_unless(&dev->num_cqs, 1, dev->dsr->caps.max_cq)) + return ERR_PTR(-ENOMEM); + + cq = kzalloc(sizeof(*cq), GFP_KERNEL); + if (!cq) { + atomic_dec(&dev->num_cqs); + return ERR_PTR(-ENOMEM); + } + + cq->ibcq.cqe = entries; + + if (context) { + if (ib_copy_from_udata(&ucmd, udata, sizeof(ucmd))) { + ret = -EFAULT; + goto err_cq; + } + + cq->umem = ib_umem_get(context, ucmd.buf_addr, ucmd.buf_size, + IB_ACCESS_LOCAL_WRITE, 1); + if (IS_ERR(cq->umem)) { + ret = PTR_ERR(cq->umem); + goto err_cq; + } + + npages = ib_umem_page_count(cq->umem); + } else { + cq->is_kernel = true; + + /* One extra page for shared ring state */ + npages = 1 + (entries * sizeof(struct pvrdma_cqe) + + PAGE_SIZE - 1) / PAGE_SIZE; + + /* Skip header page. */ + cq->offset = PAGE_SIZE; + } + + if (npages < 0 || npages > PVRDMA_PAGE_DIR_MAX_PAGES) { + dev_warn(&dev->pdev->dev, + "overflow pages in completion queue\n"); + ret = -EINVAL; + goto err_umem; + } + + ret = pvrdma_page_dir_init(dev, &cq->pdir, npages, cq->is_kernel); + if (ret) { + dev_warn(&dev->pdev->dev, + "could not allocate page directory\n"); + goto err_umem; + } + + /* Ring state is always the first page. Set in library for user cq. */ + if (cq->is_kernel) + cq->ring_state = cq->pdir.pages[0]; + else + pvrdma_page_dir_insert_umem(&cq->pdir, cq->umem, 0); + + atomic_set(&cq->refcnt, 1); + init_waitqueue_head(&cq->wait); + spin_lock_init(&cq->cq_lock); + + memset(cmd, 0, sizeof(*cmd)); + cmd->hdr.cmd = PVRDMA_CMD_CREATE_CQ; + cmd->nchunks = npages; + cmd->ctx_handle = (context) ? + (u64)to_vucontext(context)->ctx_handle : 0; + cmd->cqe = entries; + cmd->pdir_dma = cq->pdir.dir_dma; + ret = pvrdma_cmd_post(dev, &req, &rsp, PVRDMA_CMD_CREATE_CQ_RESP); + if (ret < 0) { + dev_warn(&dev->pdev->dev, + "could not create completion queue, error: %d\n", ret); + goto err_page_dir; + } + + cq->ibcq.cqe = resp->cqe; + cq->cq_handle = resp->cq_handle; + spin_lock_irqsave(&dev->cq_tbl_lock, flags); + dev->cq_tbl[cq->cq_handle % dev->dsr->caps.max_cq] = cq; + spin_unlock_irqrestore(&dev->cq_tbl_lock, flags); + + if (context) { + cq->uar = &(to_vucontext(context)->uar); + + /* Copy udata back. */ + if (ib_copy_to_udata(udata, &cq->cq_handle, sizeof(__u32))) { + dev_warn(&dev->pdev->dev, + "failed to copy back udata\n"); + pvrdma_destroy_cq(&cq->ibcq); + return ERR_PTR(-EINVAL); + } + } + + return &cq->ibcq; + +err_page_dir: + pvrdma_page_dir_cleanup(dev, &cq->pdir); +err_umem: + if (context) + ib_umem_release(cq->umem); +err_cq: + atomic_dec(&dev->num_cqs); + kfree(cq); + + return ERR_PTR(ret); +} + +static void pvrdma_free_cq(struct pvrdma_dev *dev, struct pvrdma_cq *cq) +{ + atomic_dec(&cq->refcnt); + wait_event(cq->wait, !atomic_read(&cq->refcnt)); + + if (!cq->is_kernel) + ib_umem_release(cq->umem); + + pvrdma_page_dir_cleanup(dev, &cq->pdir); + kfree(cq); +} + +/** + * pvrdma_destroy_cq - destroy completion queue + * @cq: the completion queue to destroy. + * + * @return: 0 for success. + */ +int pvrdma_destroy_cq(struct ib_cq *cq) +{ + struct pvrdma_cq *vcq = to_vcq(cq); + union pvrdma_cmd_req req; + struct pvrdma_cmd_destroy_cq *cmd = &req.destroy_cq; + struct pvrdma_dev *dev = to_vdev(cq->device); + unsigned long flags; + int ret; + + memset(cmd, 0, sizeof(*cmd)); + cmd->hdr.cmd = PVRDMA_CMD_DESTROY_CQ; + cmd->cq_handle = vcq->cq_handle; + + ret = pvrdma_cmd_post(dev, &req, NULL, 0); + if (ret < 0) + dev_warn(&dev->pdev->dev, + "could not destroy completion queue, error: %d\n", + ret); + + /* free cq's resources */ + spin_lock_irqsave(&dev->cq_tbl_lock, flags); + dev->cq_tbl[vcq->cq_handle] = NULL; + spin_unlock_irqrestore(&dev->cq_tbl_lock, flags); + + pvrdma_free_cq(dev, vcq); + atomic_dec(&dev->num_cqs); + + return ret; +} + +/** + * pvrdma_modify_cq - modify the CQ moderation parameters + * @ibcq: the CQ to modify + * @cq_count: number of CQEs that will trigger an event + * @cq_period: max period of time in usec before triggering an event + * + * @return: -EOPNOTSUPP as CQ resize is not supported. + */ +int pvrdma_modify_cq(struct ib_cq *cq, u16 cq_count, u16 cq_period) +{ + return -EOPNOTSUPP; +} + +static inline struct pvrdma_cqe *get_cqe(struct pvrdma_cq *cq, int i) +{ + return (struct pvrdma_cqe *)pvrdma_page_dir_get_ptr( + &cq->pdir, + cq->offset + + sizeof(struct pvrdma_cqe) * i); +} + +void _pvrdma_flush_cqe(struct pvrdma_qp *qp, struct pvrdma_cq *cq) +{ + int head; + int has_data; + + if (!cq->is_kernel) + return; + + /* Lock held */ + has_data = pvrdma_idx_ring_has_data(&cq->ring_state->rx, + cq->ibcq.cqe, &head); + if (unlikely(has_data > 0)) { + int items; + int curr; + int tail = pvrdma_idx(&cq->ring_state->rx.prod_tail, + cq->ibcq.cqe); + struct pvrdma_cqe *cqe; + struct pvrdma_cqe *curr_cqe; + + items = (tail > head) ? (tail - head) : + (cq->ibcq.cqe - head + tail); + curr = --tail; + while (items-- > 0) { + if (curr < 0) + curr = cq->ibcq.cqe - 1; + if (tail < 0) + tail = cq->ibcq.cqe - 1; + curr_cqe = get_cqe(cq, curr); + if ((curr_cqe->qp & 0xFFFF) != qp->qp_handle) { + if (curr != tail) { + cqe = get_cqe(cq, tail); + *cqe = *curr_cqe; + } + tail--; + } else { + pvrdma_idx_ring_inc( + &cq->ring_state->rx.cons_head, + cq->ibcq.cqe); + } + curr--; + } + } +} + +static int pvrdma_poll_one(struct pvrdma_cq *cq, struct pvrdma_qp **cur_qp, + struct ib_wc *wc) +{ + struct pvrdma_dev *dev = to_vdev(cq->ibcq.device); + int has_data; + unsigned int head; + bool tried = false; + struct pvrdma_cqe *cqe; + +retry: + has_data = pvrdma_idx_ring_has_data(&cq->ring_state->rx, + cq->ibcq.cqe, &head); + if (has_data == 0) { + if (tried) + return -EAGAIN; + + pvrdma_write_uar_cq(dev, cq->cq_handle | PVRDMA_UAR_CQ_POLL); + + tried = true; + goto retry; + } else if (has_data == PVRDMA_INVALID_IDX) { + dev_err(&dev->pdev->dev, "CQ ring state invalid\n"); + return -EAGAIN; + } + + cqe = get_cqe(cq, head); + + /* Ensure cqe is valid. */ + rmb(); + if (dev->qp_tbl[cqe->qp & 0xffff]) + *cur_qp = (struct pvrdma_qp *)dev->qp_tbl[cqe->qp & 0xffff]; + else + return -EAGAIN; + + wc->opcode = pvrdma_wc_opcode_to_ib(cqe->opcode); + wc->status = pvrdma_wc_status_to_ib(cqe->status); + wc->wr_id = cqe->wr_id; + wc->qp = &(*cur_qp)->ibqp; + wc->byte_len = cqe->byte_len; + wc->ex.imm_data = cqe->imm_data; + wc->src_qp = cqe->src_qp; + wc->wc_flags = pvrdma_wc_flags_to_ib(cqe->wc_flags); + wc->pkey_index = cqe->pkey_index; + wc->slid = cqe->slid; + wc->sl = cqe->sl; + wc->dlid_path_bits = cqe->dlid_path_bits; + wc->port_num = cqe->port_num; + wc->vendor_err = 0; + + /* Update shared ring state */ + pvrdma_idx_ring_inc(&cq->ring_state->rx.cons_head, cq->ibcq.cqe); + + return 0; +} + +/** + * pvrdma_poll_cq - poll for work completion queue entries + * @ibcq: completion queue + * @num_entries: the maximum number of entries + * @entry: pointer to work completion array + * + * @return: number of polled completion entries + */ +int pvrdma_poll_cq(struct ib_cq *ibcq, int num_entries, struct ib_wc *wc) +{ + struct pvrdma_cq *cq = to_vcq(ibcq); + struct pvrdma_qp *cur_qp = NULL; + unsigned long flags; + int npolled; + + if (num_entries < 1 || wc == NULL) + return 0; + + spin_lock_irqsave(&cq->cq_lock, flags); + for (npolled = 0; npolled < num_entries; ++npolled) { + if (pvrdma_poll_one(cq, &cur_qp, wc + npolled)) + break; + } + + spin_unlock_irqrestore(&cq->cq_lock, flags); + + /* Ensure we do not return errors from poll_cq */ + return npolled; +} + +/** + * pvrdma_resize_cq - resize CQ + * @ibcq: the completion queue + * @entries: CQ entries + * @udata: user data + * + * @return: -EOPNOTSUPP as CQ resize is not supported. + */ +int pvrdma_resize_cq(struct ib_cq *ibcq, int entries, struct ib_udata *udata) +{ + return -EOPNOTSUPP; +} diff --git a/drivers/infiniband/hw/vmw_pvrdma/pvrdma_dev_api.h b/drivers/infiniband/hw/vmw_pvrdma/pvrdma_dev_api.h new file mode 100644 index 000000000000..c06768635d65 --- /dev/null +++ b/drivers/infiniband/hw/vmw_pvrdma/pvrdma_dev_api.h @@ -0,0 +1,586 @@ +/* + * Copyright (c) 2012-2016 VMware, Inc. All rights reserved. + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of EITHER the GNU General Public License + * version 2 as published by the Free Software Foundation or the BSD + * 2-Clause License. This program is distributed in the hope that it + * will be useful, but WITHOUT ANY WARRANTY; WITHOUT EVEN THE IMPLIED + * WARRANTY OF MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE. + * See the GNU General Public License version 2 for more details at + * http://www.gnu.org/licenses/old-licenses/gpl-2.0.en.html. + * + * You should have received a copy of the GNU General Public License + * along with this program available in the file COPYING in the main + * directory of this source tree. + * + * The BSD 2-Clause License + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS + * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE + * COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, + * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES + * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR + * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, + * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED + * OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +#ifndef __PVRDMA_DEV_API_H__ +#define __PVRDMA_DEV_API_H__ + +#include + +#include "pvrdma_verbs.h" + +#define PVRDMA_VERSION 17 +#define PVRDMA_BOARD_ID 1 +#define PVRDMA_REV_ID 1 + +/* + * Masks and accessors for page directory, which is a two-level lookup: + * page directory -> page table -> page. Only one directory for now, but we + * could expand that easily. 9 bits for tables, 9 bits for pages, gives one + * gigabyte for memory regions and so forth. + */ + +#define PVRDMA_PDIR_SHIFT 18 +#define PVRDMA_PTABLE_SHIFT 9 +#define PVRDMA_PAGE_DIR_DIR(x) (((x) >> PVRDMA_PDIR_SHIFT) & 0x1) +#define PVRDMA_PAGE_DIR_TABLE(x) (((x) >> PVRDMA_PTABLE_SHIFT) & 0x1ff) +#define PVRDMA_PAGE_DIR_PAGE(x) ((x) & 0x1ff) +#define PVRDMA_PAGE_DIR_MAX_PAGES (1 * 512 * 512) +#define PVRDMA_MAX_FAST_REG_PAGES 128 + +/* + * Max MSI-X vectors. + */ + +#define PVRDMA_MAX_INTERRUPTS 3 + +/* Register offsets within PCI resource on BAR1. */ +#define PVRDMA_REG_VERSION 0x00 /* R: Version of device. */ +#define PVRDMA_REG_DSRLOW 0x04 /* W: Device shared region low PA. */ +#define PVRDMA_REG_DSRHIGH 0x08 /* W: Device shared region high PA. */ +#define PVRDMA_REG_CTL 0x0c /* W: PVRDMA_DEVICE_CTL */ +#define PVRDMA_REG_REQUEST 0x10 /* W: Indicate device request. */ +#define PVRDMA_REG_ERR 0x14 /* R: Device error. */ +#define PVRDMA_REG_ICR 0x18 /* R: Interrupt cause. */ +#define PVRDMA_REG_IMR 0x1c /* R/W: Interrupt mask. */ +#define PVRDMA_REG_MACL 0x20 /* R/W: MAC address low. */ +#define PVRDMA_REG_MACH 0x24 /* R/W: MAC address high. */ + +/* Object flags. */ +#define PVRDMA_CQ_FLAG_ARMED_SOL BIT(0) /* Armed for solicited-only. */ +#define PVRDMA_CQ_FLAG_ARMED BIT(1) /* Armed. */ +#define PVRDMA_MR_FLAG_DMA BIT(0) /* DMA region. */ +#define PVRDMA_MR_FLAG_FRMR BIT(1) /* Fast reg memory region. */ + +/* + * Atomic operation capability (masked versions are extended atomic + * operations. + */ + +#define PVRDMA_ATOMIC_OP_COMP_SWAP BIT(0) /* Compare and swap. */ +#define PVRDMA_ATOMIC_OP_FETCH_ADD BIT(1) /* Fetch and add. */ +#define PVRDMA_ATOMIC_OP_MASK_COMP_SWAP BIT(2) /* Masked compare and swap. */ +#define PVRDMA_ATOMIC_OP_MASK_FETCH_ADD BIT(3) /* Masked fetch and add. */ + +/* + * Base Memory Management Extension flags to support Fast Reg Memory Regions + * and Fast Reg Work Requests. Each flag represents a verb operation and we + * must support all of them to qualify for the BMME device cap. + */ + +#define PVRDMA_BMME_FLAG_LOCAL_INV BIT(0) /* Local Invalidate. */ +#define PVRDMA_BMME_FLAG_REMOTE_INV BIT(1) /* Remote Invalidate. */ +#define PVRDMA_BMME_FLAG_FAST_REG_WR BIT(2) /* Fast Reg Work Request. */ + +/* + * GID types. The interpretation of the gid_types bit field in the device + * capabilities will depend on the device mode. For now, the device only + * supports RoCE as mode, so only the different GID types for RoCE are + * defined. + */ + +#define PVRDMA_GID_TYPE_FLAG_ROCE_V1 BIT(0) +#define PVRDMA_GID_TYPE_FLAG_ROCE_V2 BIT(1) + +enum pvrdma_pci_resource { + PVRDMA_PCI_RESOURCE_MSIX, /* BAR0: MSI-X, MMIO. */ + PVRDMA_PCI_RESOURCE_REG, /* BAR1: Registers, MMIO. */ + PVRDMA_PCI_RESOURCE_UAR, /* BAR2: UAR pages, MMIO, 64-bit. */ + PVRDMA_PCI_RESOURCE_LAST, /* Last. */ +}; + +enum pvrdma_device_ctl { + PVRDMA_DEVICE_CTL_ACTIVATE, /* Activate device. */ + PVRDMA_DEVICE_CTL_QUIESCE, /* Quiesce device. */ + PVRDMA_DEVICE_CTL_RESET, /* Reset device. */ +}; + +enum pvrdma_intr_vector { + PVRDMA_INTR_VECTOR_RESPONSE, /* Command response. */ + PVRDMA_INTR_VECTOR_ASYNC, /* Async events. */ + PVRDMA_INTR_VECTOR_CQ, /* CQ notification. */ + /* Additional CQ notification vectors. */ +}; + +enum pvrdma_intr_cause { + PVRDMA_INTR_CAUSE_RESPONSE = (1 << PVRDMA_INTR_VECTOR_RESPONSE), + PVRDMA_INTR_CAUSE_ASYNC = (1 << PVRDMA_INTR_VECTOR_ASYNC), + PVRDMA_INTR_CAUSE_CQ = (1 << PVRDMA_INTR_VECTOR_CQ), +}; + +enum pvrdma_intr_type { + PVRDMA_INTR_TYPE_INTX, /* Legacy. */ + PVRDMA_INTR_TYPE_MSI, /* MSI. */ + PVRDMA_INTR_TYPE_MSIX, /* MSI-X. */ +}; + +enum pvrdma_gos_bits { + PVRDMA_GOS_BITS_UNK, /* Unknown. */ + PVRDMA_GOS_BITS_32, /* 32-bit. */ + PVRDMA_GOS_BITS_64, /* 64-bit. */ +}; + +enum pvrdma_gos_type { + PVRDMA_GOS_TYPE_UNK, /* Unknown. */ + PVRDMA_GOS_TYPE_LINUX, /* Linux. */ +}; + +enum pvrdma_device_mode { + PVRDMA_DEVICE_MODE_ROCE, /* RoCE. */ + PVRDMA_DEVICE_MODE_IWARP, /* iWarp. */ + PVRDMA_DEVICE_MODE_IB, /* InfiniBand. */ +}; + +struct pvrdma_gos_info { + u32 gos_bits:2; /* W: PVRDMA_GOS_BITS_ */ + u32 gos_type:4; /* W: PVRDMA_GOS_TYPE_ */ + u32 gos_ver:16; /* W: Guest OS version. */ + u32 gos_misc:10; /* W: Other. */ + u32 pad; /* Pad to 8-byte alignment. */ +}; + +struct pvrdma_device_caps { + u64 fw_ver; /* R: Query device. */ + __be64 node_guid; + __be64 sys_image_guid; + u64 max_mr_size; + u64 page_size_cap; + u64 atomic_arg_sizes; /* EX verbs. */ + u32 ex_comp_mask; /* EX verbs. */ + u32 device_cap_flags2; /* EX verbs. */ + u32 max_fa_bit_boundary; /* EX verbs. */ + u32 log_max_atomic_inline_arg; /* EX verbs. */ + u32 vendor_id; + u32 vendor_part_id; + u32 hw_ver; + u32 max_qp; + u32 max_qp_wr; + u32 device_cap_flags; + u32 max_sge; + u32 max_sge_rd; + u32 max_cq; + u32 max_cqe; + u32 max_mr; + u32 max_pd; + u32 max_qp_rd_atom; + u32 max_ee_rd_atom; + u32 max_res_rd_atom; + u32 max_qp_init_rd_atom; + u32 max_ee_init_rd_atom; + u32 max_ee; + u32 max_rdd; + u32 max_mw; + u32 max_raw_ipv6_qp; + u32 max_raw_ethy_qp; + u32 max_mcast_grp; + u32 max_mcast_qp_attach; + u32 max_total_mcast_qp_attach; + u32 max_ah; + u32 max_fmr; + u32 max_map_per_fmr; + u32 max_srq; + u32 max_srq_wr; + u32 max_srq_sge; + u32 max_uar; + u32 gid_tbl_len; + u16 max_pkeys; + u8 local_ca_ack_delay; + u8 phys_port_cnt; + u8 mode; /* PVRDMA_DEVICE_MODE_ */ + u8 atomic_ops; /* PVRDMA_ATOMIC_OP_* bits */ + u8 bmme_flags; /* FRWR Mem Mgmt Extensions */ + u8 gid_types; /* PVRDMA_GID_TYPE_FLAG_ */ + u8 reserved[4]; +}; + +struct pvrdma_ring_page_info { + u32 num_pages; /* Num pages incl. header. */ + u32 reserved; /* Reserved. */ + u64 pdir_dma; /* Page directory PA. */ +}; + +#pragma pack(push, 1) + +struct pvrdma_device_shared_region { + u32 driver_version; /* W: Driver version. */ + u32 pad; /* Pad to 8-byte align. */ + struct pvrdma_gos_info gos_info; /* W: Guest OS information. */ + u64 cmd_slot_dma; /* W: Command slot address. */ + u64 resp_slot_dma; /* W: Response slot address. */ + struct pvrdma_ring_page_info async_ring_pages; + /* W: Async ring page info. */ + struct pvrdma_ring_page_info cq_ring_pages; + /* W: CQ ring page info. */ + u32 uar_pfn; /* W: UAR pageframe. */ + u32 pad2; /* Pad to 8-byte align. */ + struct pvrdma_device_caps caps; /* R: Device capabilities. */ +}; + +#pragma pack(pop) + +/* Event types. Currently a 1:1 mapping with enum ib_event. */ +enum pvrdma_eqe_type { + PVRDMA_EVENT_CQ_ERR, + PVRDMA_EVENT_QP_FATAL, + PVRDMA_EVENT_QP_REQ_ERR, + PVRDMA_EVENT_QP_ACCESS_ERR, + PVRDMA_EVENT_COMM_EST, + PVRDMA_EVENT_SQ_DRAINED, + PVRDMA_EVENT_PATH_MIG, + PVRDMA_EVENT_PATH_MIG_ERR, + PVRDMA_EVENT_DEVICE_FATAL, + PVRDMA_EVENT_PORT_ACTIVE, + PVRDMA_EVENT_PORT_ERR, + PVRDMA_EVENT_LID_CHANGE, + PVRDMA_EVENT_PKEY_CHANGE, + PVRDMA_EVENT_SM_CHANGE, + PVRDMA_EVENT_SRQ_ERR, + PVRDMA_EVENT_SRQ_LIMIT_REACHED, + PVRDMA_EVENT_QP_LAST_WQE_REACHED, + PVRDMA_EVENT_CLIENT_REREGISTER, + PVRDMA_EVENT_GID_CHANGE, +}; + +/* Event queue element. */ +struct pvrdma_eqe { + u32 type; /* Event type. */ + u32 info; /* Handle, other. */ +}; + +/* CQ notification queue element. */ +struct pvrdma_cqne { + u32 info; /* Handle */ +}; + +enum { + PVRDMA_CMD_FIRST, + PVRDMA_CMD_QUERY_PORT = PVRDMA_CMD_FIRST, + PVRDMA_CMD_QUERY_PKEY, + PVRDMA_CMD_CREATE_PD, + PVRDMA_CMD_DESTROY_PD, + PVRDMA_CMD_CREATE_MR, + PVRDMA_CMD_DESTROY_MR, + PVRDMA_CMD_CREATE_CQ, + PVRDMA_CMD_RESIZE_CQ, + PVRDMA_CMD_DESTROY_CQ, + PVRDMA_CMD_CREATE_QP, + PVRDMA_CMD_MODIFY_QP, + PVRDMA_CMD_QUERY_QP, + PVRDMA_CMD_DESTROY_QP, + PVRDMA_CMD_CREATE_UC, + PVRDMA_CMD_DESTROY_UC, + PVRDMA_CMD_CREATE_BIND, + PVRDMA_CMD_DESTROY_BIND, + PVRDMA_CMD_MAX, +}; + +enum { + PVRDMA_CMD_FIRST_RESP = (1 << 31), + PVRDMA_CMD_QUERY_PORT_RESP = PVRDMA_CMD_FIRST_RESP, + PVRDMA_CMD_QUERY_PKEY_RESP, + PVRDMA_CMD_CREATE_PD_RESP, + PVRDMA_CMD_DESTROY_PD_RESP_NOOP, + PVRDMA_CMD_CREATE_MR_RESP, + PVRDMA_CMD_DESTROY_MR_RESP_NOOP, + PVRDMA_CMD_CREATE_CQ_RESP, + PVRDMA_CMD_RESIZE_CQ_RESP, + PVRDMA_CMD_DESTROY_CQ_RESP_NOOP, + PVRDMA_CMD_CREATE_QP_RESP, + PVRDMA_CMD_MODIFY_QP_RESP, + PVRDMA_CMD_QUERY_QP_RESP, + PVRDMA_CMD_DESTROY_QP_RESP, + PVRDMA_CMD_CREATE_UC_RESP, + PVRDMA_CMD_DESTROY_UC_RESP_NOOP, + PVRDMA_CMD_CREATE_BIND_RESP_NOOP, + PVRDMA_CMD_DESTROY_BIND_RESP_NOOP, + PVRDMA_CMD_MAX_RESP, +}; + +struct pvrdma_cmd_hdr { + u64 response; /* Key for response lookup. */ + u32 cmd; /* PVRDMA_CMD_ */ + u32 reserved; /* Reserved. */ +}; + +struct pvrdma_cmd_resp_hdr { + u64 response; /* From cmd hdr. */ + u32 ack; /* PVRDMA_CMD_XXX_RESP */ + u8 err; /* Error. */ + u8 reserved[3]; /* Reserved. */ +}; + +struct pvrdma_cmd_query_port { + struct pvrdma_cmd_hdr hdr; + u8 port_num; + u8 reserved[7]; +}; + +struct pvrdma_cmd_query_port_resp { + struct pvrdma_cmd_resp_hdr hdr; + struct pvrdma_port_attr attrs; +}; + +struct pvrdma_cmd_query_pkey { + struct pvrdma_cmd_hdr hdr; + u8 port_num; + u8 index; + u8 reserved[6]; +}; + +struct pvrdma_cmd_query_pkey_resp { + struct pvrdma_cmd_resp_hdr hdr; + u16 pkey; + u8 reserved[6]; +}; + +struct pvrdma_cmd_create_uc { + struct pvrdma_cmd_hdr hdr; + u32 pfn; /* UAR page frame number */ + u8 reserved[4]; +}; + +struct pvrdma_cmd_create_uc_resp { + struct pvrdma_cmd_resp_hdr hdr; + u32 ctx_handle; + u8 reserved[4]; +}; + +struct pvrdma_cmd_destroy_uc { + struct pvrdma_cmd_hdr hdr; + u32 ctx_handle; + u8 reserved[4]; +}; + +struct pvrdma_cmd_create_pd { + struct pvrdma_cmd_hdr hdr; + u32 ctx_handle; + u8 reserved[4]; +}; + +struct pvrdma_cmd_create_pd_resp { + struct pvrdma_cmd_resp_hdr hdr; + u32 pd_handle; + u8 reserved[4]; +}; + +struct pvrdma_cmd_destroy_pd { + struct pvrdma_cmd_hdr hdr; + u32 pd_handle; + u8 reserved[4]; +}; + +struct pvrdma_cmd_create_mr { + struct pvrdma_cmd_hdr hdr; + u64 start; + u64 length; + u64 pdir_dma; + u32 pd_handle; + u32 access_flags; + u32 flags; + u32 nchunks; +}; + +struct pvrdma_cmd_create_mr_resp { + struct pvrdma_cmd_resp_hdr hdr; + u32 mr_handle; + u32 lkey; + u32 rkey; + u8 reserved[4]; +}; + +struct pvrdma_cmd_destroy_mr { + struct pvrdma_cmd_hdr hdr; + u32 mr_handle; + u8 reserved[4]; +}; + +struct pvrdma_cmd_create_cq { + struct pvrdma_cmd_hdr hdr; + u64 pdir_dma; + u32 ctx_handle; + u32 cqe; + u32 nchunks; + u8 reserved[4]; +}; + +struct pvrdma_cmd_create_cq_resp { + struct pvrdma_cmd_resp_hdr hdr; + u32 cq_handle; + u32 cqe; +}; + +struct pvrdma_cmd_resize_cq { + struct pvrdma_cmd_hdr hdr; + u32 cq_handle; + u32 cqe; +}; + +struct pvrdma_cmd_resize_cq_resp { + struct pvrdma_cmd_resp_hdr hdr; + u32 cqe; + u8 reserved[4]; +}; + +struct pvrdma_cmd_destroy_cq { + struct pvrdma_cmd_hdr hdr; + u32 cq_handle; + u8 reserved[4]; +}; + +struct pvrdma_cmd_create_qp { + struct pvrdma_cmd_hdr hdr; + u64 pdir_dma; + u32 pd_handle; + u32 send_cq_handle; + u32 recv_cq_handle; + u32 srq_handle; + u32 max_send_wr; + u32 max_recv_wr; + u32 max_send_sge; + u32 max_recv_sge; + u32 max_inline_data; + u32 lkey; + u32 access_flags; + u16 total_chunks; + u16 send_chunks; + u16 max_atomic_arg; + u8 sq_sig_all; + u8 qp_type; + u8 is_srq; + u8 reserved[3]; +}; + +struct pvrdma_cmd_create_qp_resp { + struct pvrdma_cmd_resp_hdr hdr; + u32 qpn; + u32 max_send_wr; + u32 max_recv_wr; + u32 max_send_sge; + u32 max_recv_sge; + u32 max_inline_data; +}; + +struct pvrdma_cmd_modify_qp { + struct pvrdma_cmd_hdr hdr; + u32 qp_handle; + u32 attr_mask; + struct pvrdma_qp_attr attrs; +}; + +struct pvrdma_cmd_query_qp { + struct pvrdma_cmd_hdr hdr; + u32 qp_handle; + u32 attr_mask; +}; + +struct pvrdma_cmd_query_qp_resp { + struct pvrdma_cmd_resp_hdr hdr; + struct pvrdma_qp_attr attrs; +}; + +struct pvrdma_cmd_destroy_qp { + struct pvrdma_cmd_hdr hdr; + u32 qp_handle; + u8 reserved[4]; +}; + +struct pvrdma_cmd_destroy_qp_resp { + struct pvrdma_cmd_resp_hdr hdr; + u32 events_reported; + u8 reserved[4]; +}; + +struct pvrdma_cmd_create_bind { + struct pvrdma_cmd_hdr hdr; + u32 mtu; + u32 vlan; + u32 index; + u8 new_gid[16]; + u8 gid_type; + u8 reserved[3]; +}; + +struct pvrdma_cmd_destroy_bind { + struct pvrdma_cmd_hdr hdr; + u32 index; + u8 dest_gid[16]; + u8 reserved[4]; +}; + +union pvrdma_cmd_req { + struct pvrdma_cmd_hdr hdr; + struct pvrdma_cmd_query_port query_port; + struct pvrdma_cmd_query_pkey query_pkey; + struct pvrdma_cmd_create_uc create_uc; + struct pvrdma_cmd_destroy_uc destroy_uc; + struct pvrdma_cmd_create_pd create_pd; + struct pvrdma_cmd_destroy_pd destroy_pd; + struct pvrdma_cmd_create_mr create_mr; + struct pvrdma_cmd_destroy_mr destroy_mr; + struct pvrdma_cmd_create_cq create_cq; + struct pvrdma_cmd_resize_cq resize_cq; + struct pvrdma_cmd_destroy_cq destroy_cq; + struct pvrdma_cmd_create_qp create_qp; + struct pvrdma_cmd_modify_qp modify_qp; + struct pvrdma_cmd_query_qp query_qp; + struct pvrdma_cmd_destroy_qp destroy_qp; + struct pvrdma_cmd_create_bind create_bind; + struct pvrdma_cmd_destroy_bind destroy_bind; +}; + +union pvrdma_cmd_resp { + struct pvrdma_cmd_resp_hdr hdr; + struct pvrdma_cmd_query_port_resp query_port_resp; + struct pvrdma_cmd_query_pkey_resp query_pkey_resp; + struct pvrdma_cmd_create_uc_resp create_uc_resp; + struct pvrdma_cmd_create_pd_resp create_pd_resp; + struct pvrdma_cmd_create_mr_resp create_mr_resp; + struct pvrdma_cmd_create_cq_resp create_cq_resp; + struct pvrdma_cmd_resize_cq_resp resize_cq_resp; + struct pvrdma_cmd_create_qp_resp create_qp_resp; + struct pvrdma_cmd_query_qp_resp query_qp_resp; + struct pvrdma_cmd_destroy_qp_resp destroy_qp_resp; +}; + +#endif /* __PVRDMA_DEV_API_H__ */ diff --git a/drivers/infiniband/hw/vmw_pvrdma/pvrdma_doorbell.c b/drivers/infiniband/hw/vmw_pvrdma/pvrdma_doorbell.c new file mode 100644 index 000000000000..bf51357ea3aa --- /dev/null +++ b/drivers/infiniband/hw/vmw_pvrdma/pvrdma_doorbell.c @@ -0,0 +1,127 @@ +/* + * Copyright (c) 2012-2016 VMware, Inc. All rights reserved. + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of EITHER the GNU General Public License + * version 2 as published by the Free Software Foundation or the BSD + * 2-Clause License. This program is distributed in the hope that it + * will be useful, but WITHOUT ANY WARRANTY; WITHOUT EVEN THE IMPLIED + * WARRANTY OF MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE. + * See the GNU General Public License version 2 for more details at + * http://www.gnu.org/licenses/old-licenses/gpl-2.0.en.html. + * + * You should have received a copy of the GNU General Public License + * along with this program available in the file COPYING in the main + * directory of this source tree. + * + * The BSD 2-Clause License + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS + * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE + * COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, + * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES + * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR + * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, + * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED + * OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +#include +#include +#include + +#include "pvrdma.h" + +int pvrdma_uar_table_init(struct pvrdma_dev *dev) +{ + u32 num = dev->dsr->caps.max_uar; + u32 mask = num - 1; + struct pvrdma_id_table *tbl = &dev->uar_table.tbl; + + if (!is_power_of_2(num)) + return -EINVAL; + + tbl->last = 0; + tbl->top = 0; + tbl->max = num; + tbl->mask = mask; + spin_lock_init(&tbl->lock); + tbl->table = kcalloc(BITS_TO_LONGS(num), sizeof(long), GFP_KERNEL); + if (!tbl->table) + return -ENOMEM; + + /* 0th UAR is taken by the device. */ + set_bit(0, tbl->table); + + return 0; +} + +void pvrdma_uar_table_cleanup(struct pvrdma_dev *dev) +{ + struct pvrdma_id_table *tbl = &dev->uar_table.tbl; + + kfree(tbl->table); +} + +int pvrdma_uar_alloc(struct pvrdma_dev *dev, struct pvrdma_uar_map *uar) +{ + struct pvrdma_id_table *tbl; + unsigned long flags; + u32 obj; + + tbl = &dev->uar_table.tbl; + + spin_lock_irqsave(&tbl->lock, flags); + obj = find_next_zero_bit(tbl->table, tbl->max, tbl->last); + if (obj >= tbl->max) { + tbl->top = (tbl->top + tbl->max) & tbl->mask; + obj = find_first_zero_bit(tbl->table, tbl->max); + } + + if (obj >= tbl->max) { + spin_unlock_irqrestore(&tbl->lock, flags); + return -ENOMEM; + } + + set_bit(obj, tbl->table); + obj |= tbl->top; + + spin_unlock_irqrestore(&tbl->lock, flags); + + uar->index = obj; + uar->pfn = (pci_resource_start(dev->pdev, PVRDMA_PCI_RESOURCE_UAR) >> + PAGE_SHIFT) + uar->index; + + return 0; +} + +void pvrdma_uar_free(struct pvrdma_dev *dev, struct pvrdma_uar_map *uar) +{ + struct pvrdma_id_table *tbl = &dev->uar_table.tbl; + unsigned long flags; + u32 obj; + + obj = uar->index & (tbl->max - 1); + spin_lock_irqsave(&tbl->lock, flags); + clear_bit(obj, tbl->table); + tbl->last = min(tbl->last, obj); + tbl->top = (tbl->top + tbl->max) & tbl->mask; + spin_unlock_irqrestore(&tbl->lock, flags); +} diff --git a/drivers/infiniband/hw/vmw_pvrdma/pvrdma_main.c b/drivers/infiniband/hw/vmw_pvrdma/pvrdma_main.c new file mode 100644 index 000000000000..231a1ce1f4be --- /dev/null +++ b/drivers/infiniband/hw/vmw_pvrdma/pvrdma_main.c @@ -0,0 +1,1211 @@ +/* + * Copyright (c) 2012-2016 VMware, Inc. All rights reserved. + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of EITHER the GNU General Public License + * version 2 as published by the Free Software Foundation or the BSD + * 2-Clause License. This program is distributed in the hope that it + * will be useful, but WITHOUT ANY WARRANTY; WITHOUT EVEN THE IMPLIED + * WARRANTY OF MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE. + * See the GNU General Public License version 2 for more details at + * http://www.gnu.org/licenses/old-licenses/gpl-2.0.en.html. + * + * You should have received a copy of the GNU General Public License + * along with this program available in the file COPYING in the main + * directory of this source tree. + * + * The BSD 2-Clause License + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS + * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE + * COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, + * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES + * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR + * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, + * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED + * OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "pvrdma.h" + +#define DRV_NAME "vmw_pvrdma" +#define DRV_VERSION "1.0.0.0-k" + +static DEFINE_MUTEX(pvrdma_device_list_lock); +static LIST_HEAD(pvrdma_device_list); +static struct workqueue_struct *event_wq; + +static int pvrdma_add_gid(struct ib_device *ibdev, + u8 port_num, + unsigned int index, + const union ib_gid *gid, + const struct ib_gid_attr *attr, + void **context); +static int pvrdma_del_gid(struct ib_device *ibdev, + u8 port_num, + unsigned int index, + void **context); + + +static ssize_t show_hca(struct device *device, struct device_attribute *attr, + char *buf) +{ + return sprintf(buf, "VMW_PVRDMA-%s\n", DRV_VERSION); +} + +static ssize_t show_rev(struct device *device, struct device_attribute *attr, + char *buf) +{ + return sprintf(buf, "%d\n", PVRDMA_REV_ID); +} + +static ssize_t show_board(struct device *device, struct device_attribute *attr, + char *buf) +{ + return sprintf(buf, "%d\n", PVRDMA_BOARD_ID); +} + +static DEVICE_ATTR(hw_rev, S_IRUGO, show_rev, NULL); +static DEVICE_ATTR(hca_type, S_IRUGO, show_hca, NULL); +static DEVICE_ATTR(board_id, S_IRUGO, show_board, NULL); + +static struct device_attribute *pvrdma_class_attributes[] = { + &dev_attr_hw_rev, + &dev_attr_hca_type, + &dev_attr_board_id +}; + +static void pvrdma_get_fw_ver_str(struct ib_device *device, char *str, + size_t str_len) +{ + struct pvrdma_dev *dev = + container_of(device, struct pvrdma_dev, ib_dev); + snprintf(str, str_len, "%d.%d.%d\n", + (int) (dev->dsr->caps.fw_ver >> 32), + (int) (dev->dsr->caps.fw_ver >> 16) & 0xffff, + (int) dev->dsr->caps.fw_ver & 0xffff); +} + +static int pvrdma_init_device(struct pvrdma_dev *dev) +{ + /* Initialize some device related stuff */ + spin_lock_init(&dev->cmd_lock); + sema_init(&dev->cmd_sema, 1); + atomic_set(&dev->num_qps, 0); + atomic_set(&dev->num_cqs, 0); + atomic_set(&dev->num_pds, 0); + atomic_set(&dev->num_ahs, 0); + + return 0; +} + +static int pvrdma_port_immutable(struct ib_device *ibdev, u8 port_num, + struct ib_port_immutable *immutable) +{ + struct ib_port_attr attr; + int err; + + err = pvrdma_query_port(ibdev, port_num, &attr); + if (err) + return err; + + immutable->pkey_tbl_len = attr.pkey_tbl_len; + immutable->gid_tbl_len = attr.gid_tbl_len; + immutable->core_cap_flags = RDMA_CORE_PORT_IBA_ROCE; + immutable->max_mad_size = IB_MGMT_MAD_SIZE; + return 0; +} + +static struct net_device *pvrdma_get_netdev(struct ib_device *ibdev, + u8 port_num) +{ + struct net_device *netdev; + struct pvrdma_dev *dev = to_vdev(ibdev); + + if (port_num != 1) + return NULL; + + rcu_read_lock(); + netdev = dev->netdev; + if (netdev) + dev_hold(netdev); + rcu_read_unlock(); + + return netdev; +} + +static int pvrdma_register_device(struct pvrdma_dev *dev) +{ + int ret = -1; + int i = 0; + + strlcpy(dev->ib_dev.name, "vmw_pvrdma%d", IB_DEVICE_NAME_MAX); + dev->ib_dev.node_guid = dev->dsr->caps.node_guid; + dev->sys_image_guid = dev->dsr->caps.sys_image_guid; + dev->flags = 0; + dev->ib_dev.owner = THIS_MODULE; + dev->ib_dev.num_comp_vectors = 1; + dev->ib_dev.dma_device = &dev->pdev->dev; + dev->ib_dev.uverbs_abi_ver = PVRDMA_UVERBS_ABI_VERSION; + dev->ib_dev.uverbs_cmd_mask = + (1ull << IB_USER_VERBS_CMD_GET_CONTEXT) | + (1ull << IB_USER_VERBS_CMD_QUERY_DEVICE) | + (1ull << IB_USER_VERBS_CMD_QUERY_PORT) | + (1ull << IB_USER_VERBS_CMD_ALLOC_PD) | + (1ull << IB_USER_VERBS_CMD_DEALLOC_PD) | + (1ull << IB_USER_VERBS_CMD_REG_MR) | + (1ull << IB_USER_VERBS_CMD_DEREG_MR) | + (1ull << IB_USER_VERBS_CMD_CREATE_COMP_CHANNEL) | + (1ull << IB_USER_VERBS_CMD_CREATE_CQ) | + (1ull << IB_USER_VERBS_CMD_POLL_CQ) | + (1ull << IB_USER_VERBS_CMD_REQ_NOTIFY_CQ) | + (1ull << IB_USER_VERBS_CMD_DESTROY_CQ) | + (1ull << IB_USER_VERBS_CMD_CREATE_QP) | + (1ull << IB_USER_VERBS_CMD_MODIFY_QP) | + (1ull << IB_USER_VERBS_CMD_QUERY_QP) | + (1ull << IB_USER_VERBS_CMD_DESTROY_QP) | + (1ull << IB_USER_VERBS_CMD_POST_SEND) | + (1ull << IB_USER_VERBS_CMD_POST_RECV) | + (1ull << IB_USER_VERBS_CMD_CREATE_AH) | + (1ull << IB_USER_VERBS_CMD_DESTROY_AH); + + dev->ib_dev.node_type = RDMA_NODE_IB_CA; + dev->ib_dev.phys_port_cnt = dev->dsr->caps.phys_port_cnt; + + dev->ib_dev.query_device = pvrdma_query_device; + dev->ib_dev.query_port = pvrdma_query_port; + dev->ib_dev.query_gid = pvrdma_query_gid; + dev->ib_dev.query_pkey = pvrdma_query_pkey; + dev->ib_dev.modify_port = pvrdma_modify_port; + dev->ib_dev.alloc_ucontext = pvrdma_alloc_ucontext; + dev->ib_dev.dealloc_ucontext = pvrdma_dealloc_ucontext; + dev->ib_dev.mmap = pvrdma_mmap; + dev->ib_dev.alloc_pd = pvrdma_alloc_pd; + dev->ib_dev.dealloc_pd = pvrdma_dealloc_pd; + dev->ib_dev.create_ah = pvrdma_create_ah; + dev->ib_dev.destroy_ah = pvrdma_destroy_ah; + dev->ib_dev.create_qp = pvrdma_create_qp; + dev->ib_dev.modify_qp = pvrdma_modify_qp; + dev->ib_dev.query_qp = pvrdma_query_qp; + dev->ib_dev.destroy_qp = pvrdma_destroy_qp; + dev->ib_dev.post_send = pvrdma_post_send; + dev->ib_dev.post_recv = pvrdma_post_recv; + dev->ib_dev.create_cq = pvrdma_create_cq; + dev->ib_dev.modify_cq = pvrdma_modify_cq; + dev->ib_dev.resize_cq = pvrdma_resize_cq; + dev->ib_dev.destroy_cq = pvrdma_destroy_cq; + dev->ib_dev.poll_cq = pvrdma_poll_cq; + dev->ib_dev.req_notify_cq = pvrdma_req_notify_cq; + dev->ib_dev.get_dma_mr = pvrdma_get_dma_mr; + dev->ib_dev.reg_user_mr = pvrdma_reg_user_mr; + dev->ib_dev.dereg_mr = pvrdma_dereg_mr; + dev->ib_dev.alloc_mr = pvrdma_alloc_mr; + dev->ib_dev.map_mr_sg = pvrdma_map_mr_sg; + dev->ib_dev.add_gid = pvrdma_add_gid; + dev->ib_dev.del_gid = pvrdma_del_gid; + dev->ib_dev.get_netdev = pvrdma_get_netdev; + dev->ib_dev.get_port_immutable = pvrdma_port_immutable; + dev->ib_dev.get_link_layer = pvrdma_port_link_layer; + dev->ib_dev.get_dev_fw_str = pvrdma_get_fw_ver_str; + + mutex_init(&dev->port_mutex); + spin_lock_init(&dev->desc_lock); + + dev->cq_tbl = kcalloc(dev->dsr->caps.max_cq, sizeof(void *), + GFP_KERNEL); + if (!dev->cq_tbl) + return ret; + spin_lock_init(&dev->cq_tbl_lock); + + dev->qp_tbl = kcalloc(dev->dsr->caps.max_qp, sizeof(void *), + GFP_KERNEL); + if (!dev->qp_tbl) + goto err_cq_free; + spin_lock_init(&dev->qp_tbl_lock); + + ret = ib_register_device(&dev->ib_dev, NULL); + if (ret) + goto err_qp_free; + + for (i = 0; i < ARRAY_SIZE(pvrdma_class_attributes); ++i) { + ret = device_create_file(&dev->ib_dev.dev, + pvrdma_class_attributes[i]); + if (ret) + goto err_class; + } + + dev->ib_active = true; + + return 0; + +err_class: + ib_unregister_device(&dev->ib_dev); +err_qp_free: + kfree(dev->qp_tbl); +err_cq_free: + kfree(dev->cq_tbl); + + return ret; +} + +static irqreturn_t pvrdma_intr0_handler(int irq, void *dev_id) +{ + u32 icr = PVRDMA_INTR_CAUSE_RESPONSE; + struct pvrdma_dev *dev = dev_id; + + dev_dbg(&dev->pdev->dev, "interrupt 0 (response) handler\n"); + + if (dev->intr.type != PVRDMA_INTR_TYPE_MSIX) { + /* Legacy intr */ + icr = pvrdma_read_reg(dev, PVRDMA_REG_ICR); + if (icr == 0) + return IRQ_NONE; + } + + if (icr == PVRDMA_INTR_CAUSE_RESPONSE) + complete(&dev->cmd_done); + + return IRQ_HANDLED; +} + +static void pvrdma_qp_event(struct pvrdma_dev *dev, u32 qpn, int type) +{ + struct pvrdma_qp *qp; + unsigned long flags; + + spin_lock_irqsave(&dev->qp_tbl_lock, flags); + qp = dev->qp_tbl[qpn % dev->dsr->caps.max_qp]; + if (qp) + atomic_inc(&qp->refcnt); + spin_unlock_irqrestore(&dev->qp_tbl_lock, flags); + + if (qp && qp->ibqp.event_handler) { + struct ib_qp *ibqp = &qp->ibqp; + struct ib_event e; + + e.device = ibqp->device; + e.element.qp = ibqp; + e.event = type; /* 1:1 mapping for now. */ + ibqp->event_handler(&e, ibqp->qp_context); + } + if (qp) { + atomic_dec(&qp->refcnt); + if (atomic_read(&qp->refcnt) == 0) + wake_up(&qp->wait); + } +} + +static void pvrdma_cq_event(struct pvrdma_dev *dev, u32 cqn, int type) +{ + struct pvrdma_cq *cq; + unsigned long flags; + + spin_lock_irqsave(&dev->cq_tbl_lock, flags); + cq = dev->cq_tbl[cqn % dev->dsr->caps.max_cq]; + if (cq) + atomic_inc(&cq->refcnt); + spin_unlock_irqrestore(&dev->cq_tbl_lock, flags); + + if (cq && cq->ibcq.event_handler) { + struct ib_cq *ibcq = &cq->ibcq; + struct ib_event e; + + e.device = ibcq->device; + e.element.cq = ibcq; + e.event = type; /* 1:1 mapping for now. */ + ibcq->event_handler(&e, ibcq->cq_context); + } + if (cq) { + atomic_dec(&cq->refcnt); + if (atomic_read(&cq->refcnt) == 0) + wake_up(&cq->wait); + } +} + +static void pvrdma_dispatch_event(struct pvrdma_dev *dev, int port, + enum ib_event_type event) +{ + struct ib_event ib_event; + + memset(&ib_event, 0, sizeof(ib_event)); + ib_event.device = &dev->ib_dev; + ib_event.element.port_num = port; + ib_event.event = event; + ib_dispatch_event(&ib_event); +} + +static void pvrdma_dev_event(struct pvrdma_dev *dev, u8 port, int type) +{ + if (port < 1 || port > dev->dsr->caps.phys_port_cnt) { + dev_warn(&dev->pdev->dev, "event on port %d\n", port); + return; + } + + pvrdma_dispatch_event(dev, port, type); +} + +static inline struct pvrdma_eqe *get_eqe(struct pvrdma_dev *dev, unsigned int i) +{ + return (struct pvrdma_eqe *)pvrdma_page_dir_get_ptr( + &dev->async_pdir, + PAGE_SIZE + + sizeof(struct pvrdma_eqe) * i); +} + +static irqreturn_t pvrdma_intr1_handler(int irq, void *dev_id) +{ + struct pvrdma_dev *dev = dev_id; + struct pvrdma_ring *ring = &dev->async_ring_state->rx; + int ring_slots = (dev->dsr->async_ring_pages.num_pages - 1) * + PAGE_SIZE / sizeof(struct pvrdma_eqe); + unsigned int head; + + dev_dbg(&dev->pdev->dev, "interrupt 1 (async event) handler\n"); + + /* + * Don't process events until the IB device is registered. Otherwise + * we'll try to ib_dispatch_event() on an invalid device. + */ + if (!dev->ib_active) + return IRQ_HANDLED; + + while (pvrdma_idx_ring_has_data(ring, ring_slots, &head) > 0) { + struct pvrdma_eqe *eqe; + + eqe = get_eqe(dev, head); + + switch (eqe->type) { + case PVRDMA_EVENT_QP_FATAL: + case PVRDMA_EVENT_QP_REQ_ERR: + case PVRDMA_EVENT_QP_ACCESS_ERR: + case PVRDMA_EVENT_COMM_EST: + case PVRDMA_EVENT_SQ_DRAINED: + case PVRDMA_EVENT_PATH_MIG: + case PVRDMA_EVENT_PATH_MIG_ERR: + case PVRDMA_EVENT_QP_LAST_WQE_REACHED: + pvrdma_qp_event(dev, eqe->info, eqe->type); + break; + + case PVRDMA_EVENT_CQ_ERR: + pvrdma_cq_event(dev, eqe->info, eqe->type); + break; + + case PVRDMA_EVENT_SRQ_ERR: + case PVRDMA_EVENT_SRQ_LIMIT_REACHED: + break; + + case PVRDMA_EVENT_PORT_ACTIVE: + case PVRDMA_EVENT_PORT_ERR: + case PVRDMA_EVENT_LID_CHANGE: + case PVRDMA_EVENT_PKEY_CHANGE: + case PVRDMA_EVENT_SM_CHANGE: + case PVRDMA_EVENT_CLIENT_REREGISTER: + case PVRDMA_EVENT_GID_CHANGE: + pvrdma_dev_event(dev, eqe->info, eqe->type); + break; + + case PVRDMA_EVENT_DEVICE_FATAL: + pvrdma_dev_event(dev, 1, eqe->type); + break; + + default: + break; + } + + pvrdma_idx_ring_inc(&ring->cons_head, ring_slots); + } + + return IRQ_HANDLED; +} + +static inline struct pvrdma_cqne *get_cqne(struct pvrdma_dev *dev, + unsigned int i) +{ + return (struct pvrdma_cqne *)pvrdma_page_dir_get_ptr( + &dev->cq_pdir, + PAGE_SIZE + + sizeof(struct pvrdma_cqne) * i); +} + +static irqreturn_t pvrdma_intrx_handler(int irq, void *dev_id) +{ + struct pvrdma_dev *dev = dev_id; + struct pvrdma_ring *ring = &dev->cq_ring_state->rx; + int ring_slots = (dev->dsr->cq_ring_pages.num_pages - 1) * PAGE_SIZE / + sizeof(struct pvrdma_cqne); + unsigned int head; + unsigned long flags; + + dev_dbg(&dev->pdev->dev, "interrupt x (completion) handler\n"); + + while (pvrdma_idx_ring_has_data(ring, ring_slots, &head) > 0) { + struct pvrdma_cqne *cqne; + struct pvrdma_cq *cq; + + cqne = get_cqne(dev, head); + spin_lock_irqsave(&dev->cq_tbl_lock, flags); + cq = dev->cq_tbl[cqne->info % dev->dsr->caps.max_cq]; + if (cq) + atomic_inc(&cq->refcnt); + spin_unlock_irqrestore(&dev->cq_tbl_lock, flags); + + if (cq && cq->ibcq.comp_handler) + cq->ibcq.comp_handler(&cq->ibcq, cq->ibcq.cq_context); + if (cq) { + atomic_dec(&cq->refcnt); + if (atomic_read(&cq->refcnt)) + wake_up(&cq->wait); + } + pvrdma_idx_ring_inc(&ring->cons_head, ring_slots); + } + + return IRQ_HANDLED; +} + +static void pvrdma_disable_msi_all(struct pvrdma_dev *dev) +{ + if (dev->intr.type == PVRDMA_INTR_TYPE_MSIX) + pci_disable_msix(dev->pdev); + else if (dev->intr.type == PVRDMA_INTR_TYPE_MSI) + pci_disable_msi(dev->pdev); +} + +static void pvrdma_free_irq(struct pvrdma_dev *dev) +{ + int i; + + dev_dbg(&dev->pdev->dev, "freeing interrupts\n"); + + if (dev->intr.type == PVRDMA_INTR_TYPE_MSIX) { + for (i = 0; i < dev->intr.size; i++) { + if (dev->intr.enabled[i]) { + free_irq(dev->intr.msix_entry[i].vector, dev); + dev->intr.enabled[i] = 0; + } + } + } else if (dev->intr.type == PVRDMA_INTR_TYPE_INTX || + dev->intr.type == PVRDMA_INTR_TYPE_MSI) { + free_irq(dev->pdev->irq, dev); + } +} + +static void pvrdma_enable_intrs(struct pvrdma_dev *dev) +{ + dev_dbg(&dev->pdev->dev, "enable interrupts\n"); + pvrdma_write_reg(dev, PVRDMA_REG_IMR, 0); +} + +static void pvrdma_disable_intrs(struct pvrdma_dev *dev) +{ + dev_dbg(&dev->pdev->dev, "disable interrupts\n"); + pvrdma_write_reg(dev, PVRDMA_REG_IMR, ~0); +} + +static int pvrdma_enable_msix(struct pci_dev *pdev, struct pvrdma_dev *dev) +{ + int i; + int ret; + + for (i = 0; i < PVRDMA_MAX_INTERRUPTS; i++) { + dev->intr.msix_entry[i].entry = i; + dev->intr.msix_entry[i].vector = i; + + switch (i) { + case 0: + /* CMD ring handler */ + dev->intr.handler[i] = pvrdma_intr0_handler; + break; + case 1: + /* Async event ring handler */ + dev->intr.handler[i] = pvrdma_intr1_handler; + break; + default: + /* Completion queue handler */ + dev->intr.handler[i] = pvrdma_intrx_handler; + break; + } + } + + ret = pci_enable_msix(pdev, dev->intr.msix_entry, + PVRDMA_MAX_INTERRUPTS); + if (!ret) { + dev->intr.type = PVRDMA_INTR_TYPE_MSIX; + dev->intr.size = PVRDMA_MAX_INTERRUPTS; + } else if (ret > 0) { + ret = pci_enable_msix(pdev, dev->intr.msix_entry, ret); + if (!ret) { + dev->intr.type = PVRDMA_INTR_TYPE_MSIX; + dev->intr.size = ret; + } else { + dev->intr.size = 0; + } + } + + dev_dbg(&pdev->dev, "using interrupt type %d, size %d\n", + dev->intr.type, dev->intr.size); + + return ret; +} + +static int pvrdma_alloc_intrs(struct pvrdma_dev *dev) +{ + int ret = 0; + int i; + + if (pci_find_capability(dev->pdev, PCI_CAP_ID_MSIX) && + pvrdma_enable_msix(dev->pdev, dev)) { + /* Try MSI */ + ret = pci_enable_msi(dev->pdev); + if (!ret) { + dev->intr.type = PVRDMA_INTR_TYPE_MSI; + } else { + /* Legacy INTR */ + dev->intr.type = PVRDMA_INTR_TYPE_INTX; + } + } + + /* Request First IRQ */ + switch (dev->intr.type) { + case PVRDMA_INTR_TYPE_INTX: + case PVRDMA_INTR_TYPE_MSI: + ret = request_irq(dev->pdev->irq, pvrdma_intr0_handler, + IRQF_SHARED, DRV_NAME, dev); + if (ret) { + dev_err(&dev->pdev->dev, + "failed to request interrupt\n"); + goto disable_msi; + } + break; + case PVRDMA_INTR_TYPE_MSIX: + ret = request_irq(dev->intr.msix_entry[0].vector, + pvrdma_intr0_handler, 0, DRV_NAME, dev); + if (ret) { + dev_err(&dev->pdev->dev, + "failed to request interrupt 0\n"); + goto disable_msi; + } + dev->intr.enabled[0] = 1; + break; + default: + /* Not reached */ + break; + } + + /* For MSIX: request intr for each vector */ + if (dev->intr.size > 1) { + ret = request_irq(dev->intr.msix_entry[1].vector, + pvrdma_intr1_handler, 0, DRV_NAME, dev); + if (ret) { + dev_err(&dev->pdev->dev, + "failed to request interrupt 1\n"); + goto free_irq; + } + dev->intr.enabled[1] = 1; + + for (i = 2; i < dev->intr.size; i++) { + ret = request_irq(dev->intr.msix_entry[i].vector, + pvrdma_intrx_handler, 0, + DRV_NAME, dev); + if (ret) { + dev_err(&dev->pdev->dev, + "failed to request interrupt %d\n", i); + goto free_irq; + } + dev->intr.enabled[i] = 1; + } + } + + return 0; + +free_irq: + pvrdma_free_irq(dev); +disable_msi: + pvrdma_disable_msi_all(dev); + return ret; +} + +static void pvrdma_free_slots(struct pvrdma_dev *dev) +{ + struct pci_dev *pdev = dev->pdev; + + if (dev->resp_slot) + dma_free_coherent(&pdev->dev, PAGE_SIZE, dev->resp_slot, + dev->dsr->resp_slot_dma); + if (dev->cmd_slot) + dma_free_coherent(&pdev->dev, PAGE_SIZE, dev->cmd_slot, + dev->dsr->cmd_slot_dma); +} + +static int pvrdma_add_gid_at_index(struct pvrdma_dev *dev, + const union ib_gid *gid, + int index) +{ + int ret; + union pvrdma_cmd_req req; + struct pvrdma_cmd_create_bind *cmd_bind = &req.create_bind; + + if (!dev->sgid_tbl) { + dev_warn(&dev->pdev->dev, "sgid table not initialized\n"); + return -EINVAL; + } + + memset(cmd_bind, 0, sizeof(*cmd_bind)); + cmd_bind->hdr.cmd = PVRDMA_CMD_CREATE_BIND; + memcpy(cmd_bind->new_gid, gid->raw, 16); + cmd_bind->mtu = ib_mtu_enum_to_int(IB_MTU_1024); + cmd_bind->vlan = 0xfff; + cmd_bind->index = index; + cmd_bind->gid_type = PVRDMA_GID_TYPE_FLAG_ROCE_V1; + + ret = pvrdma_cmd_post(dev, &req, NULL, 0); + if (ret < 0) { + dev_warn(&dev->pdev->dev, + "could not create binding, error: %d\n", ret); + return -EFAULT; + } + memcpy(&dev->sgid_tbl[index], gid, sizeof(*gid)); + return 0; +} + +static int pvrdma_add_gid(struct ib_device *ibdev, + u8 port_num, + unsigned int index, + const union ib_gid *gid, + const struct ib_gid_attr *attr, + void **context) +{ + struct pvrdma_dev *dev = to_vdev(ibdev); + + return pvrdma_add_gid_at_index(dev, gid, index); +} + +static int pvrdma_del_gid_at_index(struct pvrdma_dev *dev, int index) +{ + int ret; + union pvrdma_cmd_req req; + struct pvrdma_cmd_destroy_bind *cmd_dest = &req.destroy_bind; + + /* Update sgid table. */ + if (!dev->sgid_tbl) { + dev_warn(&dev->pdev->dev, "sgid table not initialized\n"); + return -EINVAL; + } + + memset(cmd_dest, 0, sizeof(*cmd_dest)); + cmd_dest->hdr.cmd = PVRDMA_CMD_DESTROY_BIND; + memcpy(cmd_dest->dest_gid, &dev->sgid_tbl[index], 16); + cmd_dest->index = index; + + ret = pvrdma_cmd_post(dev, &req, NULL, 0); + if (ret < 0) { + dev_warn(&dev->pdev->dev, + "could not destroy binding, error: %d\n", ret); + return ret; + } + memset(&dev->sgid_tbl[index], 0, 16); + return 0; +} + +static int pvrdma_del_gid(struct ib_device *ibdev, + u8 port_num, + unsigned int index, + void **context) +{ + struct pvrdma_dev *dev = to_vdev(ibdev); + + dev_dbg(&dev->pdev->dev, "removing gid at index %u from %s", + index, dev->netdev->name); + + return pvrdma_del_gid_at_index(dev, index); +} + +static void pvrdma_netdevice_event_handle(struct pvrdma_dev *dev, + unsigned long event) +{ + switch (event) { + case NETDEV_REBOOT: + case NETDEV_DOWN: + pvrdma_dispatch_event(dev, 1, IB_EVENT_PORT_ERR); + break; + case NETDEV_UP: + pvrdma_dispatch_event(dev, 1, IB_EVENT_PORT_ACTIVE); + break; + default: + dev_dbg(&dev->pdev->dev, "ignore netdevice event %ld on %s\n", + event, dev->ib_dev.name); + break; + } +} + +static void pvrdma_netdevice_event_work(struct work_struct *work) +{ + struct pvrdma_netdevice_work *netdev_work; + struct pvrdma_dev *dev; + + netdev_work = container_of(work, struct pvrdma_netdevice_work, work); + + mutex_lock(&pvrdma_device_list_lock); + list_for_each_entry(dev, &pvrdma_device_list, device_link) { + if (dev->netdev == netdev_work->event_netdev) { + pvrdma_netdevice_event_handle(dev, netdev_work->event); + break; + } + } + mutex_unlock(&pvrdma_device_list_lock); + + kfree(netdev_work); +} + +static int pvrdma_netdevice_event(struct notifier_block *this, + unsigned long event, void *ptr) +{ + struct net_device *event_netdev = netdev_notifier_info_to_dev(ptr); + struct pvrdma_netdevice_work *netdev_work; + + netdev_work = kmalloc(sizeof(*netdev_work), GFP_ATOMIC); + if (!netdev_work) + return NOTIFY_BAD; + + INIT_WORK(&netdev_work->work, pvrdma_netdevice_event_work); + netdev_work->event_netdev = event_netdev; + netdev_work->event = event; + queue_work(event_wq, &netdev_work->work); + + return NOTIFY_DONE; +} + +static int pvrdma_pci_probe(struct pci_dev *pdev, + const struct pci_device_id *id) +{ + struct pci_dev *pdev_net; + struct pvrdma_dev *dev; + int ret; + unsigned long start; + unsigned long len; + unsigned int version; + dma_addr_t slot_dma = 0; + + dev_dbg(&pdev->dev, "initializing driver %s\n", pci_name(pdev)); + + /* Allocate zero-out device */ + dev = (struct pvrdma_dev *)ib_alloc_device(sizeof(*dev)); + if (!dev) { + dev_err(&pdev->dev, "failed to allocate IB device\n"); + return -ENOMEM; + } + + mutex_lock(&pvrdma_device_list_lock); + list_add(&dev->device_link, &pvrdma_device_list); + mutex_unlock(&pvrdma_device_list_lock); + + ret = pvrdma_init_device(dev); + if (ret) + goto err_free_device; + + dev->pdev = pdev; + pci_set_drvdata(pdev, dev); + + ret = pci_enable_device(pdev); + if (ret) { + dev_err(&pdev->dev, "cannot enable PCI device\n"); + goto err_free_device; + } + + dev_dbg(&pdev->dev, "PCI resource flags BAR0 %#lx\n", + pci_resource_flags(pdev, 0)); + dev_dbg(&pdev->dev, "PCI resource len %#llx\n", + (unsigned long long)pci_resource_len(pdev, 0)); + dev_dbg(&pdev->dev, "PCI resource start %#llx\n", + (unsigned long long)pci_resource_start(pdev, 0)); + dev_dbg(&pdev->dev, "PCI resource flags BAR1 %#lx\n", + pci_resource_flags(pdev, 1)); + dev_dbg(&pdev->dev, "PCI resource len %#llx\n", + (unsigned long long)pci_resource_len(pdev, 1)); + dev_dbg(&pdev->dev, "PCI resource start %#llx\n", + (unsigned long long)pci_resource_start(pdev, 1)); + + if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM) || + !(pci_resource_flags(pdev, 1) & IORESOURCE_MEM)) { + dev_err(&pdev->dev, "PCI BAR region not MMIO\n"); + ret = -ENOMEM; + goto err_free_device; + } + + ret = pci_request_regions(pdev, DRV_NAME); + if (ret) { + dev_err(&pdev->dev, "cannot request PCI resources\n"); + goto err_disable_pdev; + } + + /* Enable 64-Bit DMA */ + if (pci_set_dma_mask(pdev, DMA_BIT_MASK(64)) == 0) { + ret = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64)); + if (ret != 0) { + dev_err(&pdev->dev, + "pci_set_consistent_dma_mask failed\n"); + goto err_free_resource; + } + } else { + ret = pci_set_dma_mask(pdev, DMA_BIT_MASK(32)); + if (ret != 0) { + dev_err(&pdev->dev, + "pci_set_dma_mask failed\n"); + goto err_free_resource; + } + } + + pci_set_master(pdev); + + /* Map register space */ + start = pci_resource_start(dev->pdev, PVRDMA_PCI_RESOURCE_REG); + len = pci_resource_len(dev->pdev, PVRDMA_PCI_RESOURCE_REG); + dev->regs = ioremap(start, len); + if (!dev->regs) { + dev_err(&pdev->dev, "register mapping failed\n"); + ret = -ENOMEM; + goto err_free_resource; + } + + /* Setup per-device UAR. */ + dev->driver_uar.index = 0; + dev->driver_uar.pfn = + pci_resource_start(dev->pdev, PVRDMA_PCI_RESOURCE_UAR) >> + PAGE_SHIFT; + dev->driver_uar.map = + ioremap(dev->driver_uar.pfn << PAGE_SHIFT, PAGE_SIZE); + if (!dev->driver_uar.map) { + dev_err(&pdev->dev, "failed to remap UAR pages\n"); + ret = -ENOMEM; + goto err_unmap_regs; + } + + version = pvrdma_read_reg(dev, PVRDMA_REG_VERSION); + dev_info(&pdev->dev, "device version %d, driver version %d\n", + version, PVRDMA_VERSION); + if (version < PVRDMA_VERSION) { + dev_err(&pdev->dev, "incompatible device version\n"); + goto err_uar_unmap; + } + + dev->dsr = dma_alloc_coherent(&pdev->dev, sizeof(*dev->dsr), + &dev->dsrbase, GFP_KERNEL); + if (!dev->dsr) { + dev_err(&pdev->dev, "failed to allocate shared region\n"); + ret = -ENOMEM; + goto err_uar_unmap; + } + + /* Setup the shared region */ + memset(dev->dsr, 0, sizeof(*dev->dsr)); + dev->dsr->driver_version = PVRDMA_VERSION; + dev->dsr->gos_info.gos_bits = sizeof(void *) == 4 ? + PVRDMA_GOS_BITS_32 : + PVRDMA_GOS_BITS_64; + dev->dsr->gos_info.gos_type = PVRDMA_GOS_TYPE_LINUX; + dev->dsr->gos_info.gos_ver = 1; + dev->dsr->uar_pfn = dev->driver_uar.pfn; + + /* Command slot. */ + dev->cmd_slot = dma_alloc_coherent(&pdev->dev, PAGE_SIZE, + &slot_dma, GFP_KERNEL); + if (!dev->cmd_slot) { + ret = -ENOMEM; + goto err_free_dsr; + } + + dev->dsr->cmd_slot_dma = (u64)slot_dma; + + /* Response slot. */ + dev->resp_slot = dma_alloc_coherent(&pdev->dev, PAGE_SIZE, + &slot_dma, GFP_KERNEL); + if (!dev->resp_slot) { + ret = -ENOMEM; + goto err_free_slots; + } + + dev->dsr->resp_slot_dma = (u64)slot_dma; + + /* Async event ring */ + dev->dsr->async_ring_pages.num_pages = 4; + ret = pvrdma_page_dir_init(dev, &dev->async_pdir, + dev->dsr->async_ring_pages.num_pages, true); + if (ret) + goto err_free_slots; + dev->async_ring_state = dev->async_pdir.pages[0]; + dev->dsr->async_ring_pages.pdir_dma = dev->async_pdir.dir_dma; + + /* CQ notification ring */ + dev->dsr->cq_ring_pages.num_pages = 4; + ret = pvrdma_page_dir_init(dev, &dev->cq_pdir, + dev->dsr->cq_ring_pages.num_pages, true); + if (ret) + goto err_free_async_ring; + dev->cq_ring_state = dev->cq_pdir.pages[0]; + dev->dsr->cq_ring_pages.pdir_dma = dev->cq_pdir.dir_dma; + + /* + * Write the PA of the shared region to the device. The writes must be + * ordered such that the high bits are written last. When the writes + * complete, the device will have filled out the capabilities. + */ + + pvrdma_write_reg(dev, PVRDMA_REG_DSRLOW, (u32)dev->dsrbase); + pvrdma_write_reg(dev, PVRDMA_REG_DSRHIGH, + (u32)((u64)(dev->dsrbase) >> 32)); + + /* Make sure the write is complete before reading status. */ + mb(); + + /* Currently, the driver only supports RoCE mode. */ + if (dev->dsr->caps.mode != PVRDMA_DEVICE_MODE_ROCE) { + dev_err(&pdev->dev, "unsupported transport %d\n", + dev->dsr->caps.mode); + ret = -EFAULT; + goto err_free_cq_ring; + } + + /* Currently, the driver only supports RoCE V1. */ + if (!(dev->dsr->caps.gid_types & PVRDMA_GID_TYPE_FLAG_ROCE_V1)) { + dev_err(&pdev->dev, "driver needs RoCE v1 support\n"); + ret = -EFAULT; + goto err_free_cq_ring; + } + + /* Paired vmxnet3 will have same bus, slot. But func will be 0 */ + pdev_net = pci_get_slot(pdev->bus, PCI_DEVFN(PCI_SLOT(pdev->devfn), 0)); + if (!pdev_net) { + dev_err(&pdev->dev, "failed to find paired net device\n"); + ret = -ENODEV; + goto err_free_cq_ring; + } + + if (pdev_net->vendor != PCI_VENDOR_ID_VMWARE || + pdev_net->device != PCI_DEVICE_ID_VMWARE_VMXNET3) { + dev_err(&pdev->dev, "failed to find paired vmxnet3 device\n"); + pci_dev_put(pdev_net); + ret = -ENODEV; + goto err_free_cq_ring; + } + + dev->netdev = pci_get_drvdata(pdev_net); + pci_dev_put(pdev_net); + if (!dev->netdev) { + dev_err(&pdev->dev, "failed to get vmxnet3 device\n"); + ret = -ENODEV; + goto err_free_cq_ring; + } + + dev_info(&pdev->dev, "paired device to %s\n", dev->netdev->name); + + /* Interrupt setup */ + ret = pvrdma_alloc_intrs(dev); + if (ret) { + dev_err(&pdev->dev, "failed to allocate interrupts\n"); + ret = -ENOMEM; + goto err_netdevice; + } + + /* Allocate UAR table. */ + ret = pvrdma_uar_table_init(dev); + if (ret) { + dev_err(&pdev->dev, "failed to allocate UAR table\n"); + ret = -ENOMEM; + goto err_free_intrs; + } + + /* Allocate GID table */ + dev->sgid_tbl = kcalloc(dev->dsr->caps.gid_tbl_len, + sizeof(union ib_gid), GFP_KERNEL); + if (!dev->sgid_tbl) { + ret = -ENOMEM; + goto err_free_uar_table; + } + dev_dbg(&pdev->dev, "gid table len %d\n", dev->dsr->caps.gid_tbl_len); + + pvrdma_enable_intrs(dev); + + /* Activate pvrdma device */ + pvrdma_write_reg(dev, PVRDMA_REG_CTL, PVRDMA_DEVICE_CTL_ACTIVATE); + + /* Make sure the write is complete before reading status. */ + mb(); + + /* Check if device was successfully activated */ + ret = pvrdma_read_reg(dev, PVRDMA_REG_ERR); + if (ret != 0) { + dev_err(&pdev->dev, "failed to activate device\n"); + ret = -EFAULT; + goto err_disable_intr; + } + + /* Register IB device */ + ret = pvrdma_register_device(dev); + if (ret) { + dev_err(&pdev->dev, "failed to register IB device\n"); + goto err_disable_intr; + } + + dev->nb_netdev.notifier_call = pvrdma_netdevice_event; + ret = register_netdevice_notifier(&dev->nb_netdev); + if (ret) { + dev_err(&pdev->dev, "failed to register netdevice events\n"); + goto err_unreg_ibdev; + } + + dev_info(&pdev->dev, "attached to device\n"); + return 0; + +err_unreg_ibdev: + ib_unregister_device(&dev->ib_dev); +err_disable_intr: + pvrdma_disable_intrs(dev); + kfree(dev->sgid_tbl); +err_free_uar_table: + pvrdma_uar_table_cleanup(dev); +err_free_intrs: + pvrdma_free_irq(dev); + pvrdma_disable_msi_all(dev); +err_netdevice: + unregister_netdevice_notifier(&dev->nb_netdev); +err_free_cq_ring: + pvrdma_page_dir_cleanup(dev, &dev->cq_pdir); +err_free_async_ring: + pvrdma_page_dir_cleanup(dev, &dev->async_pdir); +err_free_slots: + pvrdma_free_slots(dev); +err_free_dsr: + dma_free_coherent(&pdev->dev, sizeof(*dev->dsr), dev->dsr, + dev->dsrbase); +err_uar_unmap: + iounmap(dev->driver_uar.map); +err_unmap_regs: + iounmap(dev->regs); +err_free_resource: + pci_release_regions(pdev); +err_disable_pdev: + pci_disable_device(pdev); + pci_set_drvdata(pdev, NULL); +err_free_device: + mutex_lock(&pvrdma_device_list_lock); + list_del(&dev->device_link); + mutex_unlock(&pvrdma_device_list_lock); + ib_dealloc_device(&dev->ib_dev); + return ret; +} + +static void pvrdma_pci_remove(struct pci_dev *pdev) +{ + struct pvrdma_dev *dev = pci_get_drvdata(pdev); + + if (!dev) + return; + + dev_info(&pdev->dev, "detaching from device\n"); + + unregister_netdevice_notifier(&dev->nb_netdev); + dev->nb_netdev.notifier_call = NULL; + + flush_workqueue(event_wq); + + /* Unregister ib device */ + ib_unregister_device(&dev->ib_dev); + + mutex_lock(&pvrdma_device_list_lock); + list_del(&dev->device_link); + mutex_unlock(&pvrdma_device_list_lock); + + pvrdma_disable_intrs(dev); + pvrdma_free_irq(dev); + pvrdma_disable_msi_all(dev); + + /* Deactivate pvrdma device */ + pvrdma_write_reg(dev, PVRDMA_REG_CTL, PVRDMA_DEVICE_CTL_RESET); + pvrdma_page_dir_cleanup(dev, &dev->cq_pdir); + pvrdma_page_dir_cleanup(dev, &dev->async_pdir); + pvrdma_free_slots(dev); + + iounmap(dev->regs); + kfree(dev->sgid_tbl); + kfree(dev->cq_tbl); + kfree(dev->qp_tbl); + pvrdma_uar_table_cleanup(dev); + iounmap(dev->driver_uar.map); + + ib_dealloc_device(&dev->ib_dev); + + /* Free pci resources */ + pci_release_regions(pdev); + pci_disable_device(pdev); + pci_set_drvdata(pdev, NULL); +} + +static struct pci_device_id pvrdma_pci_table[] = { + { PCI_DEVICE(PCI_VENDOR_ID_VMWARE, PCI_DEVICE_ID_VMWARE_PVRDMA), }, + { 0 }, +}; + +MODULE_DEVICE_TABLE(pci, pvrdma_pci_table); + +static struct pci_driver pvrdma_driver = { + .name = DRV_NAME, + .id_table = pvrdma_pci_table, + .probe = pvrdma_pci_probe, + .remove = pvrdma_pci_remove, +}; + +static int __init pvrdma_init(void) +{ + int err; + + event_wq = alloc_ordered_workqueue("pvrdma_event_wq", WQ_MEM_RECLAIM); + if (!event_wq) + return -ENOMEM; + + err = pci_register_driver(&pvrdma_driver); + if (err) + destroy_workqueue(event_wq); + + return err; +} + +static void __exit pvrdma_cleanup(void) +{ + pci_unregister_driver(&pvrdma_driver); + + destroy_workqueue(event_wq); +} + +module_init(pvrdma_init); +module_exit(pvrdma_cleanup); + +MODULE_AUTHOR("VMware, Inc"); +MODULE_DESCRIPTION("VMware Paravirtual RDMA driver"); +MODULE_VERSION(DRV_VERSION); +MODULE_LICENSE("Dual BSD/GPL"); diff --git a/drivers/infiniband/hw/vmw_pvrdma/pvrdma_misc.c b/drivers/infiniband/hw/vmw_pvrdma/pvrdma_misc.c new file mode 100644 index 000000000000..948b5ccd2a70 --- /dev/null +++ b/drivers/infiniband/hw/vmw_pvrdma/pvrdma_misc.c @@ -0,0 +1,304 @@ +/* + * Copyright (c) 2012-2016 VMware, Inc. All rights reserved. + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of EITHER the GNU General Public License + * version 2 as published by the Free Software Foundation or the BSD + * 2-Clause License. This program is distributed in the hope that it + * will be useful, but WITHOUT ANY WARRANTY; WITHOUT EVEN THE IMPLIED + * WARRANTY OF MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE. + * See the GNU General Public License version 2 for more details at + * http://www.gnu.org/licenses/old-licenses/gpl-2.0.en.html. + * + * You should have received a copy of the GNU General Public License + * along with this program available in the file COPYING in the main + * directory of this source tree. + * + * The BSD 2-Clause License + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS + * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE + * COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, + * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES + * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR + * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, + * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED + * OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +#include +#include +#include + +#include "pvrdma.h" + +int pvrdma_page_dir_init(struct pvrdma_dev *dev, struct pvrdma_page_dir *pdir, + u64 npages, bool alloc_pages) +{ + u64 i; + + if (npages > PVRDMA_PAGE_DIR_MAX_PAGES) + return -EINVAL; + + memset(pdir, 0, sizeof(*pdir)); + + pdir->dir = dma_alloc_coherent(&dev->pdev->dev, PAGE_SIZE, + &pdir->dir_dma, GFP_KERNEL); + if (!pdir->dir) + goto err; + + pdir->ntables = PVRDMA_PAGE_DIR_TABLE(npages - 1) + 1; + pdir->tables = kcalloc(pdir->ntables, sizeof(*pdir->tables), + GFP_KERNEL); + if (!pdir->tables) + goto err; + + for (i = 0; i < pdir->ntables; i++) { + pdir->tables[i] = dma_alloc_coherent(&dev->pdev->dev, PAGE_SIZE, + (dma_addr_t *)&pdir->dir[i], + GFP_KERNEL); + if (!pdir->tables[i]) + goto err; + } + + pdir->npages = npages; + + if (alloc_pages) { + pdir->pages = kcalloc(npages, sizeof(*pdir->pages), + GFP_KERNEL); + if (!pdir->pages) + goto err; + + for (i = 0; i < pdir->npages; i++) { + dma_addr_t page_dma; + + pdir->pages[i] = dma_alloc_coherent(&dev->pdev->dev, + PAGE_SIZE, + &page_dma, + GFP_KERNEL); + if (!pdir->pages[i]) + goto err; + + pvrdma_page_dir_insert_dma(pdir, i, page_dma); + } + } + + return 0; + +err: + pvrdma_page_dir_cleanup(dev, pdir); + + return -ENOMEM; +} + +static u64 *pvrdma_page_dir_table(struct pvrdma_page_dir *pdir, u64 idx) +{ + return pdir->tables[PVRDMA_PAGE_DIR_TABLE(idx)]; +} + +dma_addr_t pvrdma_page_dir_get_dma(struct pvrdma_page_dir *pdir, u64 idx) +{ + return pvrdma_page_dir_table(pdir, idx)[PVRDMA_PAGE_DIR_PAGE(idx)]; +} + +static void pvrdma_page_dir_cleanup_pages(struct pvrdma_dev *dev, + struct pvrdma_page_dir *pdir) +{ + if (pdir->pages) { + u64 i; + + for (i = 0; i < pdir->npages && pdir->pages[i]; i++) { + dma_addr_t page_dma = pvrdma_page_dir_get_dma(pdir, i); + + dma_free_coherent(&dev->pdev->dev, PAGE_SIZE, + pdir->pages[i], page_dma); + } + + kfree(pdir->pages); + } +} + +static void pvrdma_page_dir_cleanup_tables(struct pvrdma_dev *dev, + struct pvrdma_page_dir *pdir) +{ + if (pdir->tables) { + int i; + + pvrdma_page_dir_cleanup_pages(dev, pdir); + + for (i = 0; i < pdir->ntables; i++) { + u64 *table = pdir->tables[i]; + + if (table) + dma_free_coherent(&dev->pdev->dev, PAGE_SIZE, + table, pdir->dir[i]); + } + + kfree(pdir->tables); + } +} + +void pvrdma_page_dir_cleanup(struct pvrdma_dev *dev, + struct pvrdma_page_dir *pdir) +{ + if (pdir->dir) { + pvrdma_page_dir_cleanup_tables(dev, pdir); + dma_free_coherent(&dev->pdev->dev, PAGE_SIZE, + pdir->dir, pdir->dir_dma); + } +} + +int pvrdma_page_dir_insert_dma(struct pvrdma_page_dir *pdir, u64 idx, + dma_addr_t daddr) +{ + u64 *table; + + if (idx >= pdir->npages) + return -EINVAL; + + table = pvrdma_page_dir_table(pdir, idx); + table[PVRDMA_PAGE_DIR_PAGE(idx)] = daddr; + + return 0; +} + +int pvrdma_page_dir_insert_umem(struct pvrdma_page_dir *pdir, + struct ib_umem *umem, u64 offset) +{ + u64 i = offset; + int j, entry; + int ret = 0, len = 0; + struct scatterlist *sg; + + if (offset >= pdir->npages) + return -EINVAL; + + for_each_sg(umem->sg_head.sgl, sg, umem->nmap, entry) { + len = sg_dma_len(sg) >> PAGE_SHIFT; + for (j = 0; j < len; j++) { + dma_addr_t addr = sg_dma_address(sg) + + umem->page_size * j; + + ret = pvrdma_page_dir_insert_dma(pdir, i, addr); + if (ret) + goto exit; + + i++; + } + } + +exit: + return ret; +} + +int pvrdma_page_dir_insert_page_list(struct pvrdma_page_dir *pdir, + u64 *page_list, + int num_pages) +{ + int i; + int ret; + + if (num_pages > pdir->npages) + return -EINVAL; + + for (i = 0; i < num_pages; i++) { + ret = pvrdma_page_dir_insert_dma(pdir, i, page_list[i]); + if (ret) + return ret; + } + + return 0; +} + +void pvrdma_qp_cap_to_ib(struct ib_qp_cap *dst, const struct pvrdma_qp_cap *src) +{ + dst->max_send_wr = src->max_send_wr; + dst->max_recv_wr = src->max_recv_wr; + dst->max_send_sge = src->max_send_sge; + dst->max_recv_sge = src->max_recv_sge; + dst->max_inline_data = src->max_inline_data; +} + +void ib_qp_cap_to_pvrdma(struct pvrdma_qp_cap *dst, const struct ib_qp_cap *src) +{ + dst->max_send_wr = src->max_send_wr; + dst->max_recv_wr = src->max_recv_wr; + dst->max_send_sge = src->max_send_sge; + dst->max_recv_sge = src->max_recv_sge; + dst->max_inline_data = src->max_inline_data; +} + +void pvrdma_gid_to_ib(union ib_gid *dst, const union pvrdma_gid *src) +{ + BUILD_BUG_ON(sizeof(union pvrdma_gid) != sizeof(union ib_gid)); + memcpy(dst, src, sizeof(*src)); +} + +void ib_gid_to_pvrdma(union pvrdma_gid *dst, const union ib_gid *src) +{ + BUILD_BUG_ON(sizeof(union pvrdma_gid) != sizeof(union ib_gid)); + memcpy(dst, src, sizeof(*src)); +} + +void pvrdma_global_route_to_ib(struct ib_global_route *dst, + const struct pvrdma_global_route *src) +{ + pvrdma_gid_to_ib(&dst->dgid, &src->dgid); + dst->flow_label = src->flow_label; + dst->sgid_index = src->sgid_index; + dst->hop_limit = src->hop_limit; + dst->traffic_class = src->traffic_class; +} + +void ib_global_route_to_pvrdma(struct pvrdma_global_route *dst, + const struct ib_global_route *src) +{ + ib_gid_to_pvrdma(&dst->dgid, &src->dgid); + dst->flow_label = src->flow_label; + dst->sgid_index = src->sgid_index; + dst->hop_limit = src->hop_limit; + dst->traffic_class = src->traffic_class; +} + +void pvrdma_ah_attr_to_ib(struct ib_ah_attr *dst, + const struct pvrdma_ah_attr *src) +{ + pvrdma_global_route_to_ib(&dst->grh, &src->grh); + dst->dlid = src->dlid; + dst->sl = src->sl; + dst->src_path_bits = src->src_path_bits; + dst->static_rate = src->static_rate; + dst->ah_flags = src->ah_flags; + dst->port_num = src->port_num; + memcpy(&dst->dmac, &src->dmac, sizeof(dst->dmac)); +} + +void ib_ah_attr_to_pvrdma(struct pvrdma_ah_attr *dst, + const struct ib_ah_attr *src) +{ + ib_global_route_to_pvrdma(&dst->grh, &src->grh); + dst->dlid = src->dlid; + dst->sl = src->sl; + dst->src_path_bits = src->src_path_bits; + dst->static_rate = src->static_rate; + dst->ah_flags = src->ah_flags; + dst->port_num = src->port_num; + memcpy(&dst->dmac, &src->dmac, sizeof(dst->dmac)); +} diff --git a/drivers/infiniband/hw/vmw_pvrdma/pvrdma_mr.c b/drivers/infiniband/hw/vmw_pvrdma/pvrdma_mr.c new file mode 100644 index 000000000000..8519f3212e52 --- /dev/null +++ b/drivers/infiniband/hw/vmw_pvrdma/pvrdma_mr.c @@ -0,0 +1,334 @@ +/* + * Copyright (c) 2012-2016 VMware, Inc. All rights reserved. + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of EITHER the GNU General Public License + * version 2 as published by the Free Software Foundation or the BSD + * 2-Clause License. This program is distributed in the hope that it + * will be useful, but WITHOUT ANY WARRANTY; WITHOUT EVEN THE IMPLIED + * WARRANTY OF MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE. + * See the GNU General Public License version 2 for more details at + * http://www.gnu.org/licenses/old-licenses/gpl-2.0.en.html. + * + * You should have received a copy of the GNU General Public License + * along with this program available in the file COPYING in the main + * directory of this source tree. + * + * The BSD 2-Clause License + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS + * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE + * COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, + * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES + * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR + * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, + * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED + * OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +#include +#include + +#include "pvrdma.h" + +/** + * pvrdma_get_dma_mr - get a DMA memory region + * @pd: protection domain + * @acc: access flags + * + * @return: ib_mr pointer on success, otherwise returns an errno. + */ +struct ib_mr *pvrdma_get_dma_mr(struct ib_pd *pd, int acc) +{ + struct pvrdma_dev *dev = to_vdev(pd->device); + struct pvrdma_user_mr *mr; + union pvrdma_cmd_req req; + union pvrdma_cmd_resp rsp; + struct pvrdma_cmd_create_mr *cmd = &req.create_mr; + struct pvrdma_cmd_create_mr_resp *resp = &rsp.create_mr_resp; + int ret; + + /* Support only LOCAL_WRITE flag for DMA MRs */ + if (acc & ~IB_ACCESS_LOCAL_WRITE) { + dev_warn(&dev->pdev->dev, + "unsupported dma mr access flags %#x\n", acc); + return ERR_PTR(-EOPNOTSUPP); + } + + mr = kzalloc(sizeof(*mr), GFP_KERNEL); + if (!mr) + return ERR_PTR(-ENOMEM); + + memset(cmd, 0, sizeof(*cmd)); + cmd->hdr.cmd = PVRDMA_CMD_CREATE_MR; + cmd->pd_handle = to_vpd(pd)->pd_handle; + cmd->access_flags = acc; + cmd->flags = PVRDMA_MR_FLAG_DMA; + + ret = pvrdma_cmd_post(dev, &req, &rsp, PVRDMA_CMD_CREATE_MR_RESP); + if (ret < 0) { + dev_warn(&dev->pdev->dev, + "could not get DMA mem region, error: %d\n", ret); + kfree(mr); + return ERR_PTR(ret); + } + + mr->mmr.mr_handle = resp->mr_handle; + mr->ibmr.lkey = resp->lkey; + mr->ibmr.rkey = resp->rkey; + + return &mr->ibmr; +} + +/** + * pvrdma_reg_user_mr - register a userspace memory region + * @pd: protection domain + * @start: starting address + * @length: length of region + * @virt_addr: I/O virtual address + * @access_flags: access flags for memory region + * @udata: user data + * + * @return: ib_mr pointer on success, otherwise returns an errno. + */ +struct ib_mr *pvrdma_reg_user_mr(struct ib_pd *pd, u64 start, u64 length, + u64 virt_addr, int access_flags, + struct ib_udata *udata) +{ + struct pvrdma_dev *dev = to_vdev(pd->device); + struct pvrdma_user_mr *mr = NULL; + struct ib_umem *umem; + union pvrdma_cmd_req req; + union pvrdma_cmd_resp rsp; + struct pvrdma_cmd_create_mr *cmd = &req.create_mr; + struct pvrdma_cmd_create_mr_resp *resp = &rsp.create_mr_resp; + int nchunks; + int ret; + int entry; + struct scatterlist *sg; + + if (length == 0 || length > dev->dsr->caps.max_mr_size) { + dev_warn(&dev->pdev->dev, "invalid mem region length\n"); + return ERR_PTR(-EINVAL); + } + + umem = ib_umem_get(pd->uobject->context, start, + length, access_flags, 0); + if (IS_ERR(umem)) { + dev_warn(&dev->pdev->dev, + "could not get umem for mem region\n"); + return ERR_CAST(umem); + } + + nchunks = 0; + for_each_sg(umem->sg_head.sgl, sg, umem->nmap, entry) + nchunks += sg_dma_len(sg) >> PAGE_SHIFT; + + if (nchunks < 0 || nchunks > PVRDMA_PAGE_DIR_MAX_PAGES) { + dev_warn(&dev->pdev->dev, "overflow %d pages in mem region\n", + nchunks); + ret = -EINVAL; + goto err_umem; + } + + mr = kzalloc(sizeof(*mr), GFP_KERNEL); + if (!mr) { + ret = -ENOMEM; + goto err_umem; + } + + mr->mmr.iova = virt_addr; + mr->mmr.size = length; + mr->umem = umem; + + ret = pvrdma_page_dir_init(dev, &mr->pdir, nchunks, false); + if (ret) { + dev_warn(&dev->pdev->dev, + "could not allocate page directory\n"); + goto err_umem; + } + + ret = pvrdma_page_dir_insert_umem(&mr->pdir, mr->umem, 0); + if (ret) + goto err_pdir; + + memset(cmd, 0, sizeof(*cmd)); + cmd->hdr.cmd = PVRDMA_CMD_CREATE_MR; + cmd->start = start; + cmd->length = length; + cmd->pd_handle = to_vpd(pd)->pd_handle; + cmd->access_flags = access_flags; + cmd->nchunks = nchunks; + cmd->pdir_dma = mr->pdir.dir_dma; + + ret = pvrdma_cmd_post(dev, &req, &rsp, PVRDMA_CMD_CREATE_MR_RESP); + if (ret < 0) { + dev_warn(&dev->pdev->dev, + "could not register mem region, error: %d\n", ret); + goto err_pdir; + } + + mr->mmr.mr_handle = resp->mr_handle; + mr->ibmr.lkey = resp->lkey; + mr->ibmr.rkey = resp->rkey; + + return &mr->ibmr; + +err_pdir: + pvrdma_page_dir_cleanup(dev, &mr->pdir); +err_umem: + ib_umem_release(umem); + kfree(mr); + + return ERR_PTR(ret); +} + +/** + * pvrdma_alloc_mr - allocate a memory region + * @pd: protection domain + * @mr_type: type of memory region + * @max_num_sg: maximum number of pages + * + * @return: ib_mr pointer on success, otherwise returns an errno. + */ +struct ib_mr *pvrdma_alloc_mr(struct ib_pd *pd, enum ib_mr_type mr_type, + u32 max_num_sg) +{ + struct pvrdma_dev *dev = to_vdev(pd->device); + struct pvrdma_user_mr *mr; + union pvrdma_cmd_req req; + union pvrdma_cmd_resp rsp; + struct pvrdma_cmd_create_mr *cmd = &req.create_mr; + struct pvrdma_cmd_create_mr_resp *resp = &rsp.create_mr_resp; + int size = max_num_sg * sizeof(u64); + int ret; + + if (mr_type != IB_MR_TYPE_MEM_REG || + max_num_sg > PVRDMA_MAX_FAST_REG_PAGES) + return ERR_PTR(-EINVAL); + + mr = kzalloc(sizeof(*mr), GFP_KERNEL); + if (!mr) + return ERR_PTR(-ENOMEM); + + mr->pages = kzalloc(size, GFP_KERNEL); + if (!mr->pages) { + ret = -ENOMEM; + goto freemr; + } + + ret = pvrdma_page_dir_init(dev, &mr->pdir, max_num_sg, false); + if (ret) { + dev_warn(&dev->pdev->dev, + "failed to allocate page dir for mr\n"); + ret = -ENOMEM; + goto freepages; + } + + memset(cmd, 0, sizeof(*cmd)); + cmd->hdr.cmd = PVRDMA_CMD_CREATE_MR; + cmd->pd_handle = to_vpd(pd)->pd_handle; + cmd->access_flags = 0; + cmd->flags = PVRDMA_MR_FLAG_FRMR; + cmd->nchunks = max_num_sg; + + ret = pvrdma_cmd_post(dev, &req, &rsp, PVRDMA_CMD_CREATE_MR_RESP); + if (ret < 0) { + dev_warn(&dev->pdev->dev, + "could not create FR mem region, error: %d\n", ret); + goto freepdir; + } + + mr->max_pages = max_num_sg; + mr->mmr.mr_handle = resp->mr_handle; + mr->ibmr.lkey = resp->lkey; + mr->ibmr.rkey = resp->rkey; + mr->page_shift = PAGE_SHIFT; + mr->umem = NULL; + + return &mr->ibmr; + +freepdir: + pvrdma_page_dir_cleanup(dev, &mr->pdir); +freepages: + kfree(mr->pages); +freemr: + kfree(mr); + return ERR_PTR(ret); +} + +/** + * pvrdma_dereg_mr - deregister a memory region + * @ibmr: memory region + * + * @return: 0 on success. + */ +int pvrdma_dereg_mr(struct ib_mr *ibmr) +{ + struct pvrdma_user_mr *mr = to_vmr(ibmr); + struct pvrdma_dev *dev = to_vdev(ibmr->device); + union pvrdma_cmd_req req; + struct pvrdma_cmd_destroy_mr *cmd = &req.destroy_mr; + int ret; + + memset(cmd, 0, sizeof(*cmd)); + cmd->hdr.cmd = PVRDMA_CMD_DESTROY_MR; + cmd->mr_handle = mr->mmr.mr_handle; + ret = pvrdma_cmd_post(dev, &req, NULL, 0); + if (ret < 0) + dev_warn(&dev->pdev->dev, + "could not deregister mem region, error: %d\n", ret); + + pvrdma_page_dir_cleanup(dev, &mr->pdir); + if (mr->umem) + ib_umem_release(mr->umem); + + kfree(mr->pages); + kfree(mr); + + return 0; +} + +static int pvrdma_set_page(struct ib_mr *ibmr, u64 addr) +{ + struct pvrdma_user_mr *mr = to_vmr(ibmr); + + if (mr->npages == mr->max_pages) + return -ENOMEM; + + mr->pages[mr->npages++] = addr; + return 0; +} + +int pvrdma_map_mr_sg(struct ib_mr *ibmr, struct scatterlist *sg, int sg_nents, + unsigned int *sg_offset) +{ + struct pvrdma_user_mr *mr = to_vmr(ibmr); + struct pvrdma_dev *dev = to_vdev(ibmr->device); + int ret; + + mr->npages = 0; + + ret = ib_sg_to_pages(ibmr, sg, sg_nents, sg_offset, pvrdma_set_page); + if (ret < 0) + dev_warn(&dev->pdev->dev, "could not map sg to pages\n"); + + return ret; +} diff --git a/drivers/infiniband/hw/vmw_pvrdma/pvrdma_qp.c b/drivers/infiniband/hw/vmw_pvrdma/pvrdma_qp.c new file mode 100644 index 000000000000..c8c01e558125 --- /dev/null +++ b/drivers/infiniband/hw/vmw_pvrdma/pvrdma_qp.c @@ -0,0 +1,972 @@ +/* + * Copyright (c) 2012-2016 VMware, Inc. All rights reserved. + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of EITHER the GNU General Public License + * version 2 as published by the Free Software Foundation or the BSD + * 2-Clause License. This program is distributed in the hope that it + * will be useful, but WITHOUT ANY WARRANTY; WITHOUT EVEN THE IMPLIED + * WARRANTY OF MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE. + * See the GNU General Public License version 2 for more details at + * http://www.gnu.org/licenses/old-licenses/gpl-2.0.en.html. + * + * You should have received a copy of the GNU General Public License + * along with this program available in the file COPYING in the main + * directory of this source tree. + * + * The BSD 2-Clause License + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS + * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE + * COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, + * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES + * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR + * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, + * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED + * OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +#include +#include +#include +#include +#include +#include + +#include "pvrdma.h" + +static inline void get_cqs(struct pvrdma_qp *qp, struct pvrdma_cq **send_cq, + struct pvrdma_cq **recv_cq) +{ + *send_cq = to_vcq(qp->ibqp.send_cq); + *recv_cq = to_vcq(qp->ibqp.recv_cq); +} + +static void pvrdma_lock_cqs(struct pvrdma_cq *scq, struct pvrdma_cq *rcq, + unsigned long *scq_flags, + unsigned long *rcq_flags) + __acquires(scq->cq_lock) __acquires(rcq->cq_lock) +{ + if (scq == rcq) { + spin_lock_irqsave(&scq->cq_lock, *scq_flags); + __acquire(rcq->cq_lock); + } else if (scq->cq_handle < rcq->cq_handle) { + spin_lock_irqsave(&scq->cq_lock, *scq_flags); + spin_lock_irqsave_nested(&rcq->cq_lock, *rcq_flags, + SINGLE_DEPTH_NESTING); + } else { + spin_lock_irqsave(&rcq->cq_lock, *rcq_flags); + spin_lock_irqsave_nested(&scq->cq_lock, *scq_flags, + SINGLE_DEPTH_NESTING); + } +} + +static void pvrdma_unlock_cqs(struct pvrdma_cq *scq, struct pvrdma_cq *rcq, + unsigned long *scq_flags, + unsigned long *rcq_flags) + __releases(scq->cq_lock) __releases(rcq->cq_lock) +{ + if (scq == rcq) { + __release(rcq->cq_lock); + spin_unlock_irqrestore(&scq->cq_lock, *scq_flags); + } else if (scq->cq_handle < rcq->cq_handle) { + spin_unlock_irqrestore(&rcq->cq_lock, *rcq_flags); + spin_unlock_irqrestore(&scq->cq_lock, *scq_flags); + } else { + spin_unlock_irqrestore(&scq->cq_lock, *scq_flags); + spin_unlock_irqrestore(&rcq->cq_lock, *rcq_flags); + } +} + +static void pvrdma_reset_qp(struct pvrdma_qp *qp) +{ + struct pvrdma_cq *scq, *rcq; + unsigned long scq_flags, rcq_flags; + + /* Clean up cqes */ + get_cqs(qp, &scq, &rcq); + pvrdma_lock_cqs(scq, rcq, &scq_flags, &rcq_flags); + + _pvrdma_flush_cqe(qp, scq); + if (scq != rcq) + _pvrdma_flush_cqe(qp, rcq); + + pvrdma_unlock_cqs(scq, rcq, &scq_flags, &rcq_flags); + + /* + * Reset queuepair. The checks are because usermode queuepairs won't + * have kernel ringstates. + */ + if (qp->rq.ring) { + atomic_set(&qp->rq.ring->cons_head, 0); + atomic_set(&qp->rq.ring->prod_tail, 0); + } + if (qp->sq.ring) { + atomic_set(&qp->sq.ring->cons_head, 0); + atomic_set(&qp->sq.ring->prod_tail, 0); + } +} + +static int pvrdma_set_rq_size(struct pvrdma_dev *dev, + struct ib_qp_cap *req_cap, + struct pvrdma_qp *qp) +{ + if (req_cap->max_recv_wr > dev->dsr->caps.max_qp_wr || + req_cap->max_recv_sge > dev->dsr->caps.max_sge) { + dev_warn(&dev->pdev->dev, "recv queue size invalid\n"); + return -EINVAL; + } + + qp->rq.wqe_cnt = roundup_pow_of_two(max(1U, req_cap->max_recv_wr)); + qp->rq.max_sg = roundup_pow_of_two(max(1U, req_cap->max_recv_sge)); + + /* Write back */ + req_cap->max_recv_wr = qp->rq.wqe_cnt; + req_cap->max_recv_sge = qp->rq.max_sg; + + qp->rq.wqe_size = roundup_pow_of_two(sizeof(struct pvrdma_rq_wqe_hdr) + + sizeof(struct pvrdma_sge) * + qp->rq.max_sg); + qp->npages_recv = (qp->rq.wqe_cnt * qp->rq.wqe_size + PAGE_SIZE - 1) / + PAGE_SIZE; + + return 0; +} + +static int pvrdma_set_sq_size(struct pvrdma_dev *dev, struct ib_qp_cap *req_cap, + enum ib_qp_type type, struct pvrdma_qp *qp) +{ + if (req_cap->max_send_wr > dev->dsr->caps.max_qp_wr || + req_cap->max_send_sge > dev->dsr->caps.max_sge) { + dev_warn(&dev->pdev->dev, "send queue size invalid\n"); + return -EINVAL; + } + + qp->sq.wqe_cnt = roundup_pow_of_two(max(1U, req_cap->max_send_wr)); + qp->sq.max_sg = roundup_pow_of_two(max(1U, req_cap->max_send_sge)); + + /* Write back */ + req_cap->max_send_wr = qp->sq.wqe_cnt; + req_cap->max_send_sge = qp->sq.max_sg; + + qp->sq.wqe_size = roundup_pow_of_two(sizeof(struct pvrdma_sq_wqe_hdr) + + sizeof(struct pvrdma_sge) * + qp->sq.max_sg); + /* Note: one extra page for the header. */ + qp->npages_send = 1 + (qp->sq.wqe_cnt * qp->sq.wqe_size + + PAGE_SIZE - 1) / PAGE_SIZE; + + return 0; +} + +/** + * pvrdma_create_qp - create queue pair + * @pd: protection domain + * @init_attr: queue pair attributes + * @udata: user data + * + * @return: the ib_qp pointer on success, otherwise returns an errno. + */ +struct ib_qp *pvrdma_create_qp(struct ib_pd *pd, + struct ib_qp_init_attr *init_attr, + struct ib_udata *udata) +{ + struct pvrdma_qp *qp = NULL; + struct pvrdma_dev *dev = to_vdev(pd->device); + union pvrdma_cmd_req req; + union pvrdma_cmd_resp rsp; + struct pvrdma_cmd_create_qp *cmd = &req.create_qp; + struct pvrdma_cmd_create_qp_resp *resp = &rsp.create_qp_resp; + struct pvrdma_create_qp ucmd; + unsigned long flags; + int ret; + + if (init_attr->create_flags) { + dev_warn(&dev->pdev->dev, + "invalid create queuepair flags %#x\n", + init_attr->create_flags); + return ERR_PTR(-EINVAL); + } + + if (init_attr->qp_type != IB_QPT_RC && + init_attr->qp_type != IB_QPT_UD && + init_attr->qp_type != IB_QPT_GSI) { + dev_warn(&dev->pdev->dev, "queuepair type %d not supported\n", + init_attr->qp_type); + return ERR_PTR(-EINVAL); + } + + if (!atomic_add_unless(&dev->num_qps, 1, dev->dsr->caps.max_qp)) + return ERR_PTR(-ENOMEM); + + switch (init_attr->qp_type) { + case IB_QPT_GSI: + if (init_attr->port_num == 0 || + init_attr->port_num > pd->device->phys_port_cnt || + udata) { + dev_warn(&dev->pdev->dev, "invalid queuepair attrs\n"); + ret = -EINVAL; + goto err_qp; + } + /* fall through */ + case IB_QPT_RC: + case IB_QPT_UD: + qp = kzalloc(sizeof(*qp), GFP_KERNEL); + if (!qp) { + ret = -ENOMEM; + goto err_qp; + } + + spin_lock_init(&qp->sq.lock); + spin_lock_init(&qp->rq.lock); + mutex_init(&qp->mutex); + atomic_set(&qp->refcnt, 1); + init_waitqueue_head(&qp->wait); + + qp->state = IB_QPS_RESET; + + if (pd->uobject && udata) { + dev_dbg(&dev->pdev->dev, + "create queuepair from user space\n"); + + if (ib_copy_from_udata(&ucmd, udata, sizeof(ucmd))) { + ret = -EFAULT; + goto err_qp; + } + + /* set qp->sq.wqe_cnt, shift, buf_size.. */ + qp->rumem = ib_umem_get(pd->uobject->context, + ucmd.rbuf_addr, + ucmd.rbuf_size, 0, 0); + if (IS_ERR(qp->rumem)) { + ret = PTR_ERR(qp->rumem); + goto err_qp; + } + + qp->sumem = ib_umem_get(pd->uobject->context, + ucmd.sbuf_addr, + ucmd.sbuf_size, 0, 0); + if (IS_ERR(qp->sumem)) { + ib_umem_release(qp->rumem); + ret = PTR_ERR(qp->sumem); + goto err_qp; + } + + qp->npages_send = ib_umem_page_count(qp->sumem); + qp->npages_recv = ib_umem_page_count(qp->rumem); + qp->npages = qp->npages_send + qp->npages_recv; + } else { + qp->is_kernel = true; + + ret = pvrdma_set_sq_size(to_vdev(pd->device), + &init_attr->cap, + init_attr->qp_type, qp); + if (ret) + goto err_qp; + + ret = pvrdma_set_rq_size(to_vdev(pd->device), + &init_attr->cap, qp); + if (ret) + goto err_qp; + + qp->npages = qp->npages_send + qp->npages_recv; + + /* Skip header page. */ + qp->sq.offset = PAGE_SIZE; + + /* Recv queue pages are after send pages. */ + qp->rq.offset = qp->npages_send * PAGE_SIZE; + } + + if (qp->npages < 0 || qp->npages > PVRDMA_PAGE_DIR_MAX_PAGES) { + dev_warn(&dev->pdev->dev, + "overflow pages in queuepair\n"); + ret = -EINVAL; + goto err_umem; + } + + ret = pvrdma_page_dir_init(dev, &qp->pdir, qp->npages, + qp->is_kernel); + if (ret) { + dev_warn(&dev->pdev->dev, + "could not allocate page directory\n"); + goto err_umem; + } + + if (!qp->is_kernel) { + pvrdma_page_dir_insert_umem(&qp->pdir, qp->sumem, 0); + pvrdma_page_dir_insert_umem(&qp->pdir, qp->rumem, + qp->npages_send); + } else { + /* Ring state is always the first page. */ + qp->sq.ring = qp->pdir.pages[0]; + qp->rq.ring = &qp->sq.ring[1]; + } + break; + default: + ret = -EINVAL; + goto err_qp; + } + + /* Not supported */ + init_attr->cap.max_inline_data = 0; + + memset(cmd, 0, sizeof(*cmd)); + cmd->hdr.cmd = PVRDMA_CMD_CREATE_QP; + cmd->pd_handle = to_vpd(pd)->pd_handle; + cmd->send_cq_handle = to_vcq(init_attr->send_cq)->cq_handle; + cmd->recv_cq_handle = to_vcq(init_attr->recv_cq)->cq_handle; + cmd->max_send_wr = init_attr->cap.max_send_wr; + cmd->max_recv_wr = init_attr->cap.max_recv_wr; + cmd->max_send_sge = init_attr->cap.max_send_sge; + cmd->max_recv_sge = init_attr->cap.max_recv_sge; + cmd->max_inline_data = init_attr->cap.max_inline_data; + cmd->sq_sig_all = (init_attr->sq_sig_type == IB_SIGNAL_ALL_WR) ? 1 : 0; + cmd->qp_type = ib_qp_type_to_pvrdma(init_attr->qp_type); + cmd->access_flags = IB_ACCESS_LOCAL_WRITE; + cmd->total_chunks = qp->npages; + cmd->send_chunks = qp->npages_send - 1; + cmd->pdir_dma = qp->pdir.dir_dma; + + dev_dbg(&dev->pdev->dev, "create queuepair with %d, %d, %d, %d\n", + cmd->max_send_wr, cmd->max_recv_wr, cmd->max_send_sge, + cmd->max_recv_sge); + + ret = pvrdma_cmd_post(dev, &req, &rsp, PVRDMA_CMD_CREATE_QP_RESP); + if (ret < 0) { + dev_warn(&dev->pdev->dev, + "could not create queuepair, error: %d\n", ret); + goto err_pdir; + } + + /* max_send_wr/_recv_wr/_send_sge/_recv_sge/_inline_data */ + qp->qp_handle = resp->qpn; + qp->port = init_attr->port_num; + qp->ibqp.qp_num = resp->qpn; + spin_lock_irqsave(&dev->qp_tbl_lock, flags); + dev->qp_tbl[qp->qp_handle % dev->dsr->caps.max_qp] = qp; + spin_unlock_irqrestore(&dev->qp_tbl_lock, flags); + + return &qp->ibqp; + +err_pdir: + pvrdma_page_dir_cleanup(dev, &qp->pdir); +err_umem: + if (pd->uobject && udata) { + if (qp->rumem) + ib_umem_release(qp->rumem); + if (qp->sumem) + ib_umem_release(qp->sumem); + } +err_qp: + kfree(qp); + atomic_dec(&dev->num_qps); + + return ERR_PTR(ret); +} + +static void pvrdma_free_qp(struct pvrdma_qp *qp) +{ + struct pvrdma_dev *dev = to_vdev(qp->ibqp.device); + struct pvrdma_cq *scq; + struct pvrdma_cq *rcq; + unsigned long flags, scq_flags, rcq_flags; + + /* In case cq is polling */ + get_cqs(qp, &scq, &rcq); + pvrdma_lock_cqs(scq, rcq, &scq_flags, &rcq_flags); + + _pvrdma_flush_cqe(qp, scq); + if (scq != rcq) + _pvrdma_flush_cqe(qp, rcq); + + spin_lock_irqsave(&dev->qp_tbl_lock, flags); + dev->qp_tbl[qp->qp_handle] = NULL; + spin_unlock_irqrestore(&dev->qp_tbl_lock, flags); + + pvrdma_unlock_cqs(scq, rcq, &scq_flags, &rcq_flags); + + atomic_dec(&qp->refcnt); + wait_event(qp->wait, !atomic_read(&qp->refcnt)); + + pvrdma_page_dir_cleanup(dev, &qp->pdir); + + kfree(qp); + + atomic_dec(&dev->num_qps); +} + +/** + * pvrdma_destroy_qp - destroy a queue pair + * @qp: the queue pair to destroy + * + * @return: 0 on success. + */ +int pvrdma_destroy_qp(struct ib_qp *qp) +{ + struct pvrdma_qp *vqp = to_vqp(qp); + union pvrdma_cmd_req req; + struct pvrdma_cmd_destroy_qp *cmd = &req.destroy_qp; + int ret; + + memset(cmd, 0, sizeof(*cmd)); + cmd->hdr.cmd = PVRDMA_CMD_DESTROY_QP; + cmd->qp_handle = vqp->qp_handle; + + ret = pvrdma_cmd_post(to_vdev(qp->device), &req, NULL, 0); + if (ret < 0) + dev_warn(&to_vdev(qp->device)->pdev->dev, + "destroy queuepair failed, error: %d\n", ret); + + pvrdma_free_qp(vqp); + + return 0; +} + +/** + * pvrdma_modify_qp - modify queue pair attributes + * @ibqp: the queue pair + * @attr: the new queue pair's attributes + * @attr_mask: attributes mask + * @udata: user data + * + * @returns 0 on success, otherwise returns an errno. + */ +int pvrdma_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr, + int attr_mask, struct ib_udata *udata) +{ + struct pvrdma_dev *dev = to_vdev(ibqp->device); + struct pvrdma_qp *qp = to_vqp(ibqp); + union pvrdma_cmd_req req; + union pvrdma_cmd_resp rsp; + struct pvrdma_cmd_modify_qp *cmd = &req.modify_qp; + int cur_state, next_state; + int ret; + + /* Sanity checking. Should need lock here */ + mutex_lock(&qp->mutex); + cur_state = (attr_mask & IB_QP_CUR_STATE) ? attr->cur_qp_state : + qp->state; + next_state = (attr_mask & IB_QP_STATE) ? attr->qp_state : cur_state; + + if (!ib_modify_qp_is_ok(cur_state, next_state, ibqp->qp_type, + attr_mask, IB_LINK_LAYER_ETHERNET)) { + ret = -EINVAL; + goto out; + } + + if (attr_mask & IB_QP_PORT) { + if (attr->port_num == 0 || + attr->port_num > ibqp->device->phys_port_cnt) { + ret = -EINVAL; + goto out; + } + } + + if (attr_mask & IB_QP_MIN_RNR_TIMER) { + if (attr->min_rnr_timer > 31) { + ret = -EINVAL; + goto out; + } + } + + if (attr_mask & IB_QP_PKEY_INDEX) { + if (attr->pkey_index >= dev->dsr->caps.max_pkeys) { + ret = -EINVAL; + goto out; + } + } + + if (attr_mask & IB_QP_QKEY) + qp->qkey = attr->qkey; + + if (cur_state == next_state && cur_state == IB_QPS_RESET) { + ret = 0; + goto out; + } + + qp->state = next_state; + memset(cmd, 0, sizeof(*cmd)); + cmd->hdr.cmd = PVRDMA_CMD_MODIFY_QP; + cmd->qp_handle = qp->qp_handle; + cmd->attr_mask = ib_qp_attr_mask_to_pvrdma(attr_mask); + cmd->attrs.qp_state = ib_qp_state_to_pvrdma(attr->qp_state); + cmd->attrs.cur_qp_state = + ib_qp_state_to_pvrdma(attr->cur_qp_state); + cmd->attrs.path_mtu = ib_mtu_to_pvrdma(attr->path_mtu); + cmd->attrs.path_mig_state = + ib_mig_state_to_pvrdma(attr->path_mig_state); + cmd->attrs.qkey = attr->qkey; + cmd->attrs.rq_psn = attr->rq_psn; + cmd->attrs.sq_psn = attr->sq_psn; + cmd->attrs.dest_qp_num = attr->dest_qp_num; + cmd->attrs.qp_access_flags = + ib_access_flags_to_pvrdma(attr->qp_access_flags); + cmd->attrs.pkey_index = attr->pkey_index; + cmd->attrs.alt_pkey_index = attr->alt_pkey_index; + cmd->attrs.en_sqd_async_notify = attr->en_sqd_async_notify; + cmd->attrs.sq_draining = attr->sq_draining; + cmd->attrs.max_rd_atomic = attr->max_rd_atomic; + cmd->attrs.max_dest_rd_atomic = attr->max_dest_rd_atomic; + cmd->attrs.min_rnr_timer = attr->min_rnr_timer; + cmd->attrs.port_num = attr->port_num; + cmd->attrs.timeout = attr->timeout; + cmd->attrs.retry_cnt = attr->retry_cnt; + cmd->attrs.rnr_retry = attr->rnr_retry; + cmd->attrs.alt_port_num = attr->alt_port_num; + cmd->attrs.alt_timeout = attr->alt_timeout; + ib_qp_cap_to_pvrdma(&cmd->attrs.cap, &attr->cap); + ib_ah_attr_to_pvrdma(&cmd->attrs.ah_attr, &attr->ah_attr); + ib_ah_attr_to_pvrdma(&cmd->attrs.alt_ah_attr, &attr->alt_ah_attr); + + ret = pvrdma_cmd_post(dev, &req, &rsp, PVRDMA_CMD_MODIFY_QP_RESP); + if (ret < 0) { + dev_warn(&dev->pdev->dev, + "could not modify queuepair, error: %d\n", ret); + } else if (rsp.hdr.err > 0) { + dev_warn(&dev->pdev->dev, + "cannot modify queuepair, error: %d\n", rsp.hdr.err); + ret = -EINVAL; + } + + if (ret == 0 && next_state == IB_QPS_RESET) + pvrdma_reset_qp(qp); + +out: + mutex_unlock(&qp->mutex); + + return ret; +} + +static inline void *get_sq_wqe(struct pvrdma_qp *qp, int n) +{ + return pvrdma_page_dir_get_ptr(&qp->pdir, + qp->sq.offset + n * qp->sq.wqe_size); +} + +static inline void *get_rq_wqe(struct pvrdma_qp *qp, int n) +{ + return pvrdma_page_dir_get_ptr(&qp->pdir, + qp->rq.offset + n * qp->rq.wqe_size); +} + +static int set_reg_seg(struct pvrdma_sq_wqe_hdr *wqe_hdr, struct ib_reg_wr *wr) +{ + struct pvrdma_user_mr *mr = to_vmr(wr->mr); + + wqe_hdr->wr.fast_reg.iova_start = mr->ibmr.iova; + wqe_hdr->wr.fast_reg.pl_pdir_dma = mr->pdir.dir_dma; + wqe_hdr->wr.fast_reg.page_shift = mr->page_shift; + wqe_hdr->wr.fast_reg.page_list_len = mr->npages; + wqe_hdr->wr.fast_reg.length = mr->ibmr.length; + wqe_hdr->wr.fast_reg.access_flags = wr->access; + wqe_hdr->wr.fast_reg.rkey = wr->key; + + return pvrdma_page_dir_insert_page_list(&mr->pdir, mr->pages, + mr->npages); +} + +/** + * pvrdma_post_send - post send work request entries on a QP + * @ibqp: the QP + * @wr: work request list to post + * @bad_wr: the first bad WR returned + * + * @return: 0 on success, otherwise errno returned. + */ +int pvrdma_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr, + struct ib_send_wr **bad_wr) +{ + struct pvrdma_qp *qp = to_vqp(ibqp); + struct pvrdma_dev *dev = to_vdev(ibqp->device); + unsigned long flags; + struct pvrdma_sq_wqe_hdr *wqe_hdr; + struct pvrdma_sge *sge; + int i, index; + int nreq; + int ret; + + /* + * In states lower than RTS, we can fail immediately. In other states, + * just post and let the device figure it out. + */ + if (qp->state < IB_QPS_RTS) { + *bad_wr = wr; + return -EINVAL; + } + + spin_lock_irqsave(&qp->sq.lock, flags); + + index = pvrdma_idx(&qp->sq.ring->prod_tail, qp->sq.wqe_cnt); + for (nreq = 0; wr; nreq++, wr = wr->next) { + unsigned int tail; + + if (unlikely(!pvrdma_idx_ring_has_space( + qp->sq.ring, qp->sq.wqe_cnt, &tail))) { + dev_warn_ratelimited(&dev->pdev->dev, + "send queue is full\n"); + *bad_wr = wr; + ret = -ENOMEM; + goto out; + } + + if (unlikely(wr->num_sge > qp->sq.max_sg || wr->num_sge < 0)) { + dev_warn_ratelimited(&dev->pdev->dev, + "send SGE overflow\n"); + *bad_wr = wr; + ret = -EINVAL; + goto out; + } + + if (unlikely(wr->opcode < 0)) { + dev_warn_ratelimited(&dev->pdev->dev, + "invalid send opcode\n"); + *bad_wr = wr; + ret = -EINVAL; + goto out; + } + + /* + * Only support UD, RC. + * Need to check opcode table for thorough checking. + * opcode _UD _UC _RC + * _SEND x x x + * _SEND_WITH_IMM x x x + * _RDMA_WRITE x x + * _RDMA_WRITE_WITH_IMM x x + * _LOCAL_INV x x + * _SEND_WITH_INV x x + * _RDMA_READ x + * _ATOMIC_CMP_AND_SWP x + * _ATOMIC_FETCH_AND_ADD x + * _MASK_ATOMIC_CMP_AND_SWP x + * _MASK_ATOMIC_FETCH_AND_ADD x + * _REG_MR x + * + */ + if (qp->ibqp.qp_type != IB_QPT_UD && + qp->ibqp.qp_type != IB_QPT_RC && + wr->opcode != IB_WR_SEND) { + dev_warn_ratelimited(&dev->pdev->dev, + "unsupported queuepair type\n"); + *bad_wr = wr; + ret = -EINVAL; + goto out; + } else if (qp->ibqp.qp_type == IB_QPT_UD || + qp->ibqp.qp_type == IB_QPT_GSI) { + if (wr->opcode != IB_WR_SEND && + wr->opcode != IB_WR_SEND_WITH_IMM) { + dev_warn_ratelimited(&dev->pdev->dev, + "invalid send opcode\n"); + *bad_wr = wr; + ret = -EINVAL; + goto out; + } + } + + wqe_hdr = (struct pvrdma_sq_wqe_hdr *)get_sq_wqe(qp, index); + memset(wqe_hdr, 0, sizeof(*wqe_hdr)); + wqe_hdr->wr_id = wr->wr_id; + wqe_hdr->num_sge = wr->num_sge; + wqe_hdr->opcode = ib_wr_opcode_to_pvrdma(wr->opcode); + wqe_hdr->send_flags = ib_send_flags_to_pvrdma(wr->send_flags); + if (wr->opcode == IB_WR_SEND_WITH_IMM || + wr->opcode == IB_WR_RDMA_WRITE_WITH_IMM) + wqe_hdr->ex.imm_data = wr->ex.imm_data; + + switch (qp->ibqp.qp_type) { + case IB_QPT_GSI: + case IB_QPT_UD: + if (unlikely(!ud_wr(wr)->ah)) { + dev_warn_ratelimited(&dev->pdev->dev, + "invalid address handle\n"); + *bad_wr = wr; + ret = -EINVAL; + goto out; + } + + /* + * Use qkey from qp context if high order bit set, + * otherwise from work request. + */ + wqe_hdr->wr.ud.remote_qpn = ud_wr(wr)->remote_qpn; + wqe_hdr->wr.ud.remote_qkey = + ud_wr(wr)->remote_qkey & 0x80000000 ? + qp->qkey : ud_wr(wr)->remote_qkey; + wqe_hdr->wr.ud.av = to_vah(ud_wr(wr)->ah)->av; + + break; + case IB_QPT_RC: + switch (wr->opcode) { + case IB_WR_RDMA_READ: + case IB_WR_RDMA_WRITE: + case IB_WR_RDMA_WRITE_WITH_IMM: + wqe_hdr->wr.rdma.remote_addr = + rdma_wr(wr)->remote_addr; + wqe_hdr->wr.rdma.rkey = rdma_wr(wr)->rkey; + break; + case IB_WR_LOCAL_INV: + case IB_WR_SEND_WITH_INV: + wqe_hdr->ex.invalidate_rkey = + wr->ex.invalidate_rkey; + break; + case IB_WR_ATOMIC_CMP_AND_SWP: + case IB_WR_ATOMIC_FETCH_AND_ADD: + wqe_hdr->wr.atomic.remote_addr = + atomic_wr(wr)->remote_addr; + wqe_hdr->wr.atomic.rkey = atomic_wr(wr)->rkey; + wqe_hdr->wr.atomic.compare_add = + atomic_wr(wr)->compare_add; + if (wr->opcode == IB_WR_ATOMIC_CMP_AND_SWP) + wqe_hdr->wr.atomic.swap = + atomic_wr(wr)->swap; + break; + case IB_WR_REG_MR: + ret = set_reg_seg(wqe_hdr, reg_wr(wr)); + if (ret < 0) { + dev_warn_ratelimited(&dev->pdev->dev, + "Failed to set fast register work request\n"); + *bad_wr = wr; + goto out; + } + break; + default: + break; + } + + break; + default: + dev_warn_ratelimited(&dev->pdev->dev, + "invalid queuepair type\n"); + ret = -EINVAL; + *bad_wr = wr; + goto out; + } + + sge = (struct pvrdma_sge *)(wqe_hdr + 1); + for (i = 0; i < wr->num_sge; i++) { + /* Need to check wqe_size 0 or max size */ + sge->addr = wr->sg_list[i].addr; + sge->length = wr->sg_list[i].length; + sge->lkey = wr->sg_list[i].lkey; + sge++; + } + + /* Make sure wqe is written before index update */ + smp_wmb(); + + index++; + if (unlikely(index >= qp->sq.wqe_cnt)) + index = 0; + /* Update shared sq ring */ + pvrdma_idx_ring_inc(&qp->sq.ring->prod_tail, + qp->sq.wqe_cnt); + } + + ret = 0; + +out: + spin_unlock_irqrestore(&qp->sq.lock, flags); + + if (!ret) + pvrdma_write_uar_qp(dev, PVRDMA_UAR_QP_SEND | qp->qp_handle); + + return ret; +} + +/** + * pvrdma_post_receive - post receive work request entries on a QP + * @ibqp: the QP + * @wr: the work request list to post + * @bad_wr: the first bad WR returned + * + * @return: 0 on success, otherwise errno returned. + */ +int pvrdma_post_recv(struct ib_qp *ibqp, struct ib_recv_wr *wr, + struct ib_recv_wr **bad_wr) +{ + struct pvrdma_dev *dev = to_vdev(ibqp->device); + unsigned long flags; + struct pvrdma_qp *qp = to_vqp(ibqp); + struct pvrdma_rq_wqe_hdr *wqe_hdr; + struct pvrdma_sge *sge; + int index, nreq; + int ret = 0; + int i; + + /* + * In the RESET state, we can fail immediately. For other states, + * just post and let the device figure it out. + */ + if (qp->state == IB_QPS_RESET) { + *bad_wr = wr; + return -EINVAL; + } + + spin_lock_irqsave(&qp->rq.lock, flags); + + index = pvrdma_idx(&qp->rq.ring->prod_tail, qp->rq.wqe_cnt); + for (nreq = 0; wr; nreq++, wr = wr->next) { + unsigned int tail; + + if (unlikely(wr->num_sge > qp->rq.max_sg || + wr->num_sge < 0)) { + ret = -EINVAL; + *bad_wr = wr; + dev_warn_ratelimited(&dev->pdev->dev, + "recv SGE overflow\n"); + goto out; + } + + if (unlikely(!pvrdma_idx_ring_has_space( + qp->rq.ring, qp->rq.wqe_cnt, &tail))) { + ret = -ENOMEM; + *bad_wr = wr; + dev_warn_ratelimited(&dev->pdev->dev, + "recv queue full\n"); + goto out; + } + + wqe_hdr = (struct pvrdma_rq_wqe_hdr *)get_rq_wqe(qp, index); + wqe_hdr->wr_id = wr->wr_id; + wqe_hdr->num_sge = wr->num_sge; + wqe_hdr->total_len = 0; + + sge = (struct pvrdma_sge *)(wqe_hdr + 1); + for (i = 0; i < wr->num_sge; i++) { + sge->addr = wr->sg_list[i].addr; + sge->length = wr->sg_list[i].length; + sge->lkey = wr->sg_list[i].lkey; + sge++; + } + + /* Make sure wqe is written before index update */ + smp_wmb(); + + index++; + if (unlikely(index >= qp->rq.wqe_cnt)) + index = 0; + /* Update shared rq ring */ + pvrdma_idx_ring_inc(&qp->rq.ring->prod_tail, + qp->rq.wqe_cnt); + } + + spin_unlock_irqrestore(&qp->rq.lock, flags); + + pvrdma_write_uar_qp(dev, PVRDMA_UAR_QP_RECV | qp->qp_handle); + + return ret; + +out: + spin_unlock_irqrestore(&qp->rq.lock, flags); + + return ret; +} + +/** + * pvrdma_query_qp - query a queue pair's attributes + * @ibqp: the queue pair to query + * @attr: the queue pair's attributes + * @attr_mask: attributes mask + * @init_attr: initial queue pair attributes + * + * @returns 0 on success, otherwise returns an errno. + */ +int pvrdma_query_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr, + int attr_mask, struct ib_qp_init_attr *init_attr) +{ + struct pvrdma_dev *dev = to_vdev(ibqp->device); + struct pvrdma_qp *qp = to_vqp(ibqp); + union pvrdma_cmd_req req; + union pvrdma_cmd_resp rsp; + struct pvrdma_cmd_query_qp *cmd = &req.query_qp; + struct pvrdma_cmd_query_qp_resp *resp = &rsp.query_qp_resp; + int ret = 0; + + mutex_lock(&qp->mutex); + + if (qp->state == IB_QPS_RESET) { + attr->qp_state = IB_QPS_RESET; + goto out; + } + + memset(cmd, 0, sizeof(*cmd)); + cmd->hdr.cmd = PVRDMA_CMD_QUERY_QP; + cmd->qp_handle = qp->qp_handle; + cmd->attr_mask = ib_qp_attr_mask_to_pvrdma(attr_mask); + + ret = pvrdma_cmd_post(dev, &req, &rsp, PVRDMA_CMD_QUERY_QP_RESP); + if (ret < 0) { + dev_warn(&dev->pdev->dev, + "could not query queuepair, error: %d\n", ret); + goto out; + } + + attr->qp_state = pvrdma_qp_state_to_ib(resp->attrs.qp_state); + attr->cur_qp_state = + pvrdma_qp_state_to_ib(resp->attrs.cur_qp_state); + attr->path_mtu = pvrdma_mtu_to_ib(resp->attrs.path_mtu); + attr->path_mig_state = + pvrdma_mig_state_to_ib(resp->attrs.path_mig_state); + attr->qkey = resp->attrs.qkey; + attr->rq_psn = resp->attrs.rq_psn; + attr->sq_psn = resp->attrs.sq_psn; + attr->dest_qp_num = resp->attrs.dest_qp_num; + attr->qp_access_flags = + pvrdma_access_flags_to_ib(resp->attrs.qp_access_flags); + attr->pkey_index = resp->attrs.pkey_index; + attr->alt_pkey_index = resp->attrs.alt_pkey_index; + attr->en_sqd_async_notify = resp->attrs.en_sqd_async_notify; + attr->sq_draining = resp->attrs.sq_draining; + attr->max_rd_atomic = resp->attrs.max_rd_atomic; + attr->max_dest_rd_atomic = resp->attrs.max_dest_rd_atomic; + attr->min_rnr_timer = resp->attrs.min_rnr_timer; + attr->port_num = resp->attrs.port_num; + attr->timeout = resp->attrs.timeout; + attr->retry_cnt = resp->attrs.retry_cnt; + attr->rnr_retry = resp->attrs.rnr_retry; + attr->alt_port_num = resp->attrs.alt_port_num; + attr->alt_timeout = resp->attrs.alt_timeout; + pvrdma_qp_cap_to_ib(&attr->cap, &resp->attrs.cap); + pvrdma_ah_attr_to_ib(&attr->ah_attr, &resp->attrs.ah_attr); + pvrdma_ah_attr_to_ib(&attr->alt_ah_attr, &resp->attrs.alt_ah_attr); + + qp->state = attr->qp_state; + + ret = 0; + +out: + attr->cur_qp_state = attr->qp_state; + + init_attr->event_handler = qp->ibqp.event_handler; + init_attr->qp_context = qp->ibqp.qp_context; + init_attr->send_cq = qp->ibqp.send_cq; + init_attr->recv_cq = qp->ibqp.recv_cq; + init_attr->srq = qp->ibqp.srq; + init_attr->xrcd = NULL; + init_attr->cap = attr->cap; + init_attr->sq_sig_type = 0; + init_attr->qp_type = qp->ibqp.qp_type; + init_attr->create_flags = 0; + init_attr->port_num = qp->port; + + mutex_unlock(&qp->mutex); + return ret; +} diff --git a/drivers/infiniband/hw/vmw_pvrdma/pvrdma_ring.h b/drivers/infiniband/hw/vmw_pvrdma/pvrdma_ring.h new file mode 100644 index 000000000000..ed9022a91a1d --- /dev/null +++ b/drivers/infiniband/hw/vmw_pvrdma/pvrdma_ring.h @@ -0,0 +1,131 @@ +/* + * Copyright (c) 2012-2016 VMware, Inc. All rights reserved. + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of EITHER the GNU General Public License + * version 2 as published by the Free Software Foundation or the BSD + * 2-Clause License. This program is distributed in the hope that it + * will be useful, but WITHOUT ANY WARRANTY; WITHOUT EVEN THE IMPLIED + * WARRANTY OF MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE. + * See the GNU General Public License version 2 for more details at + * http://www.gnu.org/licenses/old-licenses/gpl-2.0.en.html. + * + * You should have received a copy of the GNU General Public License + * along with this program available in the file COPYING in the main + * directory of this source tree. + * + * The BSD 2-Clause License + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS + * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE + * COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, + * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES + * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR + * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, + * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED + * OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +#ifndef __PVRDMA_RING_H__ +#define __PVRDMA_RING_H__ + +#include + +#define PVRDMA_INVALID_IDX -1 /* Invalid index. */ + +struct pvrdma_ring { + atomic_t prod_tail; /* Producer tail. */ + atomic_t cons_head; /* Consumer head. */ +}; + +struct pvrdma_ring_state { + struct pvrdma_ring tx; /* Tx ring. */ + struct pvrdma_ring rx; /* Rx ring. */ +}; + +static inline int pvrdma_idx_valid(__u32 idx, __u32 max_elems) +{ + /* Generates fewer instructions than a less-than. */ + return (idx & ~((max_elems << 1) - 1)) == 0; +} + +static inline __s32 pvrdma_idx(atomic_t *var, __u32 max_elems) +{ + const unsigned int idx = atomic_read(var); + + if (pvrdma_idx_valid(idx, max_elems)) + return idx & (max_elems - 1); + return PVRDMA_INVALID_IDX; +} + +static inline void pvrdma_idx_ring_inc(atomic_t *var, __u32 max_elems) +{ + __u32 idx = atomic_read(var) + 1; /* Increment. */ + + idx &= (max_elems << 1) - 1; /* Modulo size, flip gen. */ + atomic_set(var, idx); +} + +static inline __s32 pvrdma_idx_ring_has_space(const struct pvrdma_ring *r, + __u32 max_elems, __u32 *out_tail) +{ + const __u32 tail = atomic_read(&r->prod_tail); + const __u32 head = atomic_read(&r->cons_head); + + if (pvrdma_idx_valid(tail, max_elems) && + pvrdma_idx_valid(head, max_elems)) { + *out_tail = tail & (max_elems - 1); + return tail != (head ^ max_elems); + } + return PVRDMA_INVALID_IDX; +} + +static inline __s32 pvrdma_idx_ring_has_data(const struct pvrdma_ring *r, + __u32 max_elems, __u32 *out_head) +{ + const __u32 tail = atomic_read(&r->prod_tail); + const __u32 head = atomic_read(&r->cons_head); + + if (pvrdma_idx_valid(tail, max_elems) && + pvrdma_idx_valid(head, max_elems)) { + *out_head = head & (max_elems - 1); + return tail != head; + } + return PVRDMA_INVALID_IDX; +} + +static inline bool pvrdma_idx_ring_is_valid_idx(const struct pvrdma_ring *r, + __u32 max_elems, __u32 *idx) +{ + const __u32 tail = atomic_read(&r->prod_tail); + const __u32 head = atomic_read(&r->cons_head); + + if (pvrdma_idx_valid(tail, max_elems) && + pvrdma_idx_valid(head, max_elems) && + pvrdma_idx_valid(*idx, max_elems)) { + if (tail > head && (*idx < tail && *idx >= head)) + return true; + else if (head > tail && (*idx >= head || *idx < tail)) + return true; + } + return false; +} + +#endif /* __PVRDMA_RING_H__ */ diff --git a/drivers/infiniband/hw/vmw_pvrdma/pvrdma_verbs.c b/drivers/infiniband/hw/vmw_pvrdma/pvrdma_verbs.c new file mode 100644 index 000000000000..54891370d18a --- /dev/null +++ b/drivers/infiniband/hw/vmw_pvrdma/pvrdma_verbs.c @@ -0,0 +1,579 @@ +/* + * Copyright (c) 2012-2016 VMware, Inc. All rights reserved. + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of EITHER the GNU General Public License + * version 2 as published by the Free Software Foundation or the BSD + * 2-Clause License. This program is distributed in the hope that it + * will be useful, but WITHOUT ANY WARRANTY; WITHOUT EVEN THE IMPLIED + * WARRANTY OF MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE. + * See the GNU General Public License version 2 for more details at + * http://www.gnu.org/licenses/old-licenses/gpl-2.0.en.html. + * + * You should have received a copy of the GNU General Public License + * along with this program available in the file COPYING in the main + * directory of this source tree. + * + * The BSD 2-Clause License + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS + * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE + * COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, + * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES + * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR + * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, + * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED + * OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +#include +#include +#include +#include +#include +#include +#include + +#include "pvrdma.h" + +/** + * pvrdma_query_device - query device + * @ibdev: the device to query + * @props: the device properties + * @uhw: user data + * + * @return: 0 on success, otherwise negative errno + */ +int pvrdma_query_device(struct ib_device *ibdev, + struct ib_device_attr *props, + struct ib_udata *uhw) +{ + struct pvrdma_dev *dev = to_vdev(ibdev); + + if (uhw->inlen || uhw->outlen) + return -EINVAL; + + memset(props, 0, sizeof(*props)); + + props->fw_ver = dev->dsr->caps.fw_ver; + props->sys_image_guid = dev->dsr->caps.sys_image_guid; + props->max_mr_size = dev->dsr->caps.max_mr_size; + props->page_size_cap = dev->dsr->caps.page_size_cap; + props->vendor_id = dev->dsr->caps.vendor_id; + props->vendor_part_id = dev->pdev->device; + props->hw_ver = dev->dsr->caps.hw_ver; + props->max_qp = dev->dsr->caps.max_qp; + props->max_qp_wr = dev->dsr->caps.max_qp_wr; + props->device_cap_flags = dev->dsr->caps.device_cap_flags; + props->max_sge = dev->dsr->caps.max_sge; + props->max_cq = dev->dsr->caps.max_cq; + props->max_cqe = dev->dsr->caps.max_cqe; + props->max_mr = dev->dsr->caps.max_mr; + props->max_pd = dev->dsr->caps.max_pd; + props->max_qp_rd_atom = dev->dsr->caps.max_qp_rd_atom; + props->max_qp_init_rd_atom = dev->dsr->caps.max_qp_init_rd_atom; + props->atomic_cap = + dev->dsr->caps.atomic_ops & + (PVRDMA_ATOMIC_OP_COMP_SWAP | PVRDMA_ATOMIC_OP_FETCH_ADD) ? + IB_ATOMIC_HCA : IB_ATOMIC_NONE; + props->masked_atomic_cap = props->atomic_cap; + props->max_ah = dev->dsr->caps.max_ah; + props->max_pkeys = dev->dsr->caps.max_pkeys; + props->local_ca_ack_delay = dev->dsr->caps.local_ca_ack_delay; + if ((dev->dsr->caps.bmme_flags & PVRDMA_BMME_FLAG_LOCAL_INV) && + (dev->dsr->caps.bmme_flags & PVRDMA_BMME_FLAG_REMOTE_INV) && + (dev->dsr->caps.bmme_flags & PVRDMA_BMME_FLAG_FAST_REG_WR)) { + props->device_cap_flags |= IB_DEVICE_MEM_MGT_EXTENSIONS; + } + + return 0; +} + +/** + * pvrdma_query_port - query device port attributes + * @ibdev: the device to query + * @port: the port number + * @props: the device properties + * + * @return: 0 on success, otherwise negative errno + */ +int pvrdma_query_port(struct ib_device *ibdev, u8 port, + struct ib_port_attr *props) +{ + struct pvrdma_dev *dev = to_vdev(ibdev); + union pvrdma_cmd_req req; + union pvrdma_cmd_resp rsp; + struct pvrdma_cmd_query_port *cmd = &req.query_port; + struct pvrdma_cmd_query_port_resp *resp = &rsp.query_port_resp; + int err; + + memset(cmd, 0, sizeof(*cmd)); + cmd->hdr.cmd = PVRDMA_CMD_QUERY_PORT; + cmd->port_num = port; + + err = pvrdma_cmd_post(dev, &req, &rsp, PVRDMA_CMD_QUERY_PORT_RESP); + if (err < 0) { + dev_warn(&dev->pdev->dev, + "could not query port, error: %d\n", err); + return err; + } + + memset(props, 0, sizeof(*props)); + + props->state = pvrdma_port_state_to_ib(resp->attrs.state); + props->max_mtu = pvrdma_mtu_to_ib(resp->attrs.max_mtu); + props->active_mtu = pvrdma_mtu_to_ib(resp->attrs.active_mtu); + props->gid_tbl_len = resp->attrs.gid_tbl_len; + props->port_cap_flags = + pvrdma_port_cap_flags_to_ib(resp->attrs.port_cap_flags); + props->max_msg_sz = resp->attrs.max_msg_sz; + props->bad_pkey_cntr = resp->attrs.bad_pkey_cntr; + props->qkey_viol_cntr = resp->attrs.qkey_viol_cntr; + props->pkey_tbl_len = resp->attrs.pkey_tbl_len; + props->lid = resp->attrs.lid; + props->sm_lid = resp->attrs.sm_lid; + props->lmc = resp->attrs.lmc; + props->max_vl_num = resp->attrs.max_vl_num; + props->sm_sl = resp->attrs.sm_sl; + props->subnet_timeout = resp->attrs.subnet_timeout; + props->init_type_reply = resp->attrs.init_type_reply; + props->active_width = pvrdma_port_width_to_ib(resp->attrs.active_width); + props->active_speed = pvrdma_port_speed_to_ib(resp->attrs.active_speed); + props->phys_state = resp->attrs.phys_state; + + return 0; +} + +/** + * pvrdma_query_gid - query device gid + * @ibdev: the device to query + * @port: the port number + * @index: the index + * @gid: the device gid value + * + * @return: 0 on success, otherwise negative errno + */ +int pvrdma_query_gid(struct ib_device *ibdev, u8 port, int index, + union ib_gid *gid) +{ + struct pvrdma_dev *dev = to_vdev(ibdev); + + if (index >= dev->dsr->caps.gid_tbl_len) + return -EINVAL; + + memcpy(gid, &dev->sgid_tbl[index], sizeof(union ib_gid)); + + return 0; +} + +/** + * pvrdma_query_pkey - query device port's P_Key table + * @ibdev: the device to query + * @port: the port number + * @index: the index + * @pkey: the device P_Key value + * + * @return: 0 on success, otherwise negative errno + */ +int pvrdma_query_pkey(struct ib_device *ibdev, u8 port, u16 index, + u16 *pkey) +{ + int err = 0; + union pvrdma_cmd_req req; + union pvrdma_cmd_resp rsp; + struct pvrdma_cmd_query_pkey *cmd = &req.query_pkey; + + memset(cmd, 0, sizeof(*cmd)); + cmd->hdr.cmd = PVRDMA_CMD_QUERY_PKEY; + cmd->port_num = port; + cmd->index = index; + + err = pvrdma_cmd_post(to_vdev(ibdev), &req, &rsp, + PVRDMA_CMD_QUERY_PKEY_RESP); + if (err < 0) { + dev_warn(&to_vdev(ibdev)->pdev->dev, + "could not query pkey, error: %d\n", err); + return err; + } + + *pkey = rsp.query_pkey_resp.pkey; + + return 0; +} + +enum rdma_link_layer pvrdma_port_link_layer(struct ib_device *ibdev, + u8 port) +{ + return IB_LINK_LAYER_ETHERNET; +} + +int pvrdma_modify_device(struct ib_device *ibdev, int mask, + struct ib_device_modify *props) +{ + unsigned long flags; + + if (mask & ~(IB_DEVICE_MODIFY_SYS_IMAGE_GUID | + IB_DEVICE_MODIFY_NODE_DESC)) { + dev_warn(&to_vdev(ibdev)->pdev->dev, + "unsupported device modify mask %#x\n", mask); + return -EOPNOTSUPP; + } + + if (mask & IB_DEVICE_MODIFY_NODE_DESC) { + spin_lock_irqsave(&to_vdev(ibdev)->desc_lock, flags); + memcpy(ibdev->node_desc, props->node_desc, 64); + spin_unlock_irqrestore(&to_vdev(ibdev)->desc_lock, flags); + } + + if (mask & IB_DEVICE_MODIFY_SYS_IMAGE_GUID) { + mutex_lock(&to_vdev(ibdev)->port_mutex); + to_vdev(ibdev)->sys_image_guid = + cpu_to_be64(props->sys_image_guid); + mutex_unlock(&to_vdev(ibdev)->port_mutex); + } + + return 0; +} + +/** + * pvrdma_modify_port - modify device port attributes + * @ibdev: the device to modify + * @port: the port number + * @mask: attributes to modify + * @props: the device properties + * + * @return: 0 on success, otherwise negative errno + */ +int pvrdma_modify_port(struct ib_device *ibdev, u8 port, int mask, + struct ib_port_modify *props) +{ + struct ib_port_attr attr; + struct pvrdma_dev *vdev = to_vdev(ibdev); + int ret; + + if (mask & ~IB_PORT_SHUTDOWN) { + dev_warn(&vdev->pdev->dev, + "unsupported port modify mask %#x\n", mask); + return -EOPNOTSUPP; + } + + mutex_lock(&vdev->port_mutex); + ret = pvrdma_query_port(ibdev, port, &attr); + if (ret) + goto out; + + vdev->port_cap_mask |= props->set_port_cap_mask; + vdev->port_cap_mask &= ~props->clr_port_cap_mask; + + if (mask & IB_PORT_SHUTDOWN) + vdev->ib_active = false; + +out: + mutex_unlock(&vdev->port_mutex); + return ret; +} + +/** + * pvrdma_alloc_ucontext - allocate ucontext + * @ibdev: the IB device + * @udata: user data + * + * @return: the ib_ucontext pointer on success, otherwise errno. + */ +struct ib_ucontext *pvrdma_alloc_ucontext(struct ib_device *ibdev, + struct ib_udata *udata) +{ + struct pvrdma_dev *vdev = to_vdev(ibdev); + struct pvrdma_ucontext *context; + union pvrdma_cmd_req req; + union pvrdma_cmd_resp rsp; + struct pvrdma_cmd_create_uc *cmd = &req.create_uc; + struct pvrdma_cmd_create_uc_resp *resp = &rsp.create_uc_resp; + struct pvrdma_alloc_ucontext_resp uresp; + int ret; + void *ptr; + + if (!vdev->ib_active) + return ERR_PTR(-EAGAIN); + + context = kmalloc(sizeof(*context), GFP_KERNEL); + if (!context) + return ERR_PTR(-ENOMEM); + + context->dev = vdev; + ret = pvrdma_uar_alloc(vdev, &context->uar); + if (ret) { + kfree(context); + return ERR_PTR(-ENOMEM); + } + + /* get ctx_handle from host */ + memset(cmd, 0, sizeof(*cmd)); + cmd->pfn = context->uar.pfn; + cmd->hdr.cmd = PVRDMA_CMD_CREATE_UC; + ret = pvrdma_cmd_post(vdev, &req, &rsp, PVRDMA_CMD_CREATE_UC_RESP); + if (ret < 0) { + dev_warn(&vdev->pdev->dev, + "could not create ucontext, error: %d\n", ret); + ptr = ERR_PTR(ret); + goto err; + } + + context->ctx_handle = resp->ctx_handle; + + /* copy back to user */ + uresp.qp_tab_size = vdev->dsr->caps.max_qp; + ret = ib_copy_to_udata(udata, &uresp, sizeof(uresp)); + if (ret) { + pvrdma_uar_free(vdev, &context->uar); + context->ibucontext.device = ibdev; + pvrdma_dealloc_ucontext(&context->ibucontext); + return ERR_PTR(-EFAULT); + } + + return &context->ibucontext; + +err: + pvrdma_uar_free(vdev, &context->uar); + kfree(context); + return ptr; +} + +/** + * pvrdma_dealloc_ucontext - deallocate ucontext + * @ibcontext: the ucontext + * + * @return: 0 on success, otherwise errno. + */ +int pvrdma_dealloc_ucontext(struct ib_ucontext *ibcontext) +{ + struct pvrdma_ucontext *context = to_vucontext(ibcontext); + union pvrdma_cmd_req req; + struct pvrdma_cmd_destroy_uc *cmd = &req.destroy_uc; + int ret; + + memset(cmd, 0, sizeof(*cmd)); + cmd->hdr.cmd = PVRDMA_CMD_DESTROY_UC; + cmd->ctx_handle = context->ctx_handle; + + ret = pvrdma_cmd_post(context->dev, &req, NULL, 0); + if (ret < 0) + dev_warn(&context->dev->pdev->dev, + "destroy ucontext failed, error: %d\n", ret); + + /* Free the UAR even if the device command failed */ + pvrdma_uar_free(to_vdev(ibcontext->device), &context->uar); + kfree(context); + + return ret; +} + +/** + * pvrdma_mmap - create mmap region + * @ibcontext: the user context + * @vma: the VMA + * + * @return: 0 on success, otherwise errno. + */ +int pvrdma_mmap(struct ib_ucontext *ibcontext, struct vm_area_struct *vma) +{ + struct pvrdma_ucontext *context = to_vucontext(ibcontext); + unsigned long start = vma->vm_start; + unsigned long size = vma->vm_end - vma->vm_start; + unsigned long offset = vma->vm_pgoff << PAGE_SHIFT; + + dev_dbg(&context->dev->pdev->dev, "create mmap region\n"); + + if ((size != PAGE_SIZE) || (offset & ~PAGE_MASK)) { + dev_warn(&context->dev->pdev->dev, + "invalid params for mmap region\n"); + return -EINVAL; + } + + /* Map UAR to kernel space, VM_LOCKED? */ + vma->vm_flags |= VM_DONTCOPY | VM_DONTEXPAND; + vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot); + if (io_remap_pfn_range(vma, start, context->uar.pfn, size, + vma->vm_page_prot)) + return -EAGAIN; + + return 0; +} + +/** + * pvrdma_alloc_pd - allocate protection domain + * @ibdev: the IB device + * @context: user context + * @udata: user data + * + * @return: the ib_pd protection domain pointer on success, otherwise errno. + */ +struct ib_pd *pvrdma_alloc_pd(struct ib_device *ibdev, + struct ib_ucontext *context, + struct ib_udata *udata) +{ + struct pvrdma_pd *pd; + struct pvrdma_dev *dev = to_vdev(ibdev); + union pvrdma_cmd_req req; + union pvrdma_cmd_resp rsp; + struct pvrdma_cmd_create_pd *cmd = &req.create_pd; + struct pvrdma_cmd_create_pd_resp *resp = &rsp.create_pd_resp; + int ret; + void *ptr; + + /* Check allowed max pds */ + if (!atomic_add_unless(&dev->num_pds, 1, dev->dsr->caps.max_pd)) + return ERR_PTR(-ENOMEM); + + pd = kmalloc(sizeof(*pd), GFP_KERNEL); + if (!pd) { + ptr = ERR_PTR(-ENOMEM); + goto err; + } + + memset(cmd, 0, sizeof(*cmd)); + cmd->hdr.cmd = PVRDMA_CMD_CREATE_PD; + cmd->ctx_handle = (context) ? to_vucontext(context)->ctx_handle : 0; + ret = pvrdma_cmd_post(dev, &req, &rsp, PVRDMA_CMD_CREATE_PD_RESP); + if (ret < 0) { + dev_warn(&dev->pdev->dev, + "failed to allocate protection domain, error: %d\n", + ret); + ptr = ERR_PTR(ret); + goto freepd; + } + + pd->privileged = !context; + pd->pd_handle = resp->pd_handle; + pd->pdn = resp->pd_handle; + + if (context) { + if (ib_copy_to_udata(udata, &pd->pdn, sizeof(__u32))) { + dev_warn(&dev->pdev->dev, + "failed to copy back protection domain\n"); + pvrdma_dealloc_pd(&pd->ibpd); + return ERR_PTR(-EFAULT); + } + } + + /* u32 pd handle */ + return &pd->ibpd; + +freepd: + kfree(pd); +err: + atomic_dec(&dev->num_pds); + return ptr; +} + +/** + * pvrdma_dealloc_pd - deallocate protection domain + * @pd: the protection domain to be released + * + * @return: 0 on success, otherwise errno. + */ +int pvrdma_dealloc_pd(struct ib_pd *pd) +{ + struct pvrdma_dev *dev = to_vdev(pd->device); + union pvrdma_cmd_req req; + struct pvrdma_cmd_destroy_pd *cmd = &req.destroy_pd; + int ret; + + memset(cmd, 0, sizeof(*cmd)); + cmd->hdr.cmd = PVRDMA_CMD_DESTROY_PD; + cmd->pd_handle = to_vpd(pd)->pd_handle; + + ret = pvrdma_cmd_post(dev, &req, NULL, 0); + if (ret) + dev_warn(&dev->pdev->dev, + "could not dealloc protection domain, error: %d\n", + ret); + + kfree(to_vpd(pd)); + atomic_dec(&dev->num_pds); + + return 0; +} + +/** + * pvrdma_create_ah - create an address handle + * @pd: the protection domain + * @ah_attr: the attributes of the AH + * @udata: user data blob + * + * @return: the ib_ah pointer on success, otherwise errno. + */ +struct ib_ah *pvrdma_create_ah(struct ib_pd *pd, struct ib_ah_attr *ah_attr, + struct ib_udata *udata) +{ + struct pvrdma_dev *dev = to_vdev(pd->device); + struct pvrdma_ah *ah; + enum rdma_link_layer ll; + + if (!(ah_attr->ah_flags & IB_AH_GRH)) + return ERR_PTR(-EINVAL); + + ll = rdma_port_get_link_layer(pd->device, ah_attr->port_num); + + if (ll != IB_LINK_LAYER_ETHERNET || + rdma_is_multicast_addr((struct in6_addr *)ah_attr->grh.dgid.raw)) + return ERR_PTR(-EINVAL); + + if (!atomic_add_unless(&dev->num_ahs, 1, dev->dsr->caps.max_ah)) + return ERR_PTR(-ENOMEM); + + ah = kzalloc(sizeof(*ah), GFP_KERNEL); + if (!ah) { + atomic_dec(&dev->num_ahs); + return ERR_PTR(-ENOMEM); + } + + ah->av.port_pd = to_vpd(pd)->pd_handle | (ah_attr->port_num << 24); + ah->av.src_path_bits = ah_attr->src_path_bits; + ah->av.src_path_bits |= 0x80; + ah->av.gid_index = ah_attr->grh.sgid_index; + ah->av.hop_limit = ah_attr->grh.hop_limit; + ah->av.sl_tclass_flowlabel = (ah_attr->grh.traffic_class << 20) | + ah_attr->grh.flow_label; + memcpy(ah->av.dgid, ah_attr->grh.dgid.raw, 16); + memcpy(ah->av.dmac, ah_attr->dmac, 6); + + ah->ibah.device = pd->device; + ah->ibah.pd = pd; + ah->ibah.uobject = NULL; + + return &ah->ibah; +} + +/** + * pvrdma_destroy_ah - destroy an address handle + * @ah: the address handle to destroyed + * + * @return: 0 on success. + */ +int pvrdma_destroy_ah(struct ib_ah *ah) +{ + struct pvrdma_dev *dev = to_vdev(ah->device); + + kfree(to_vah(ah)); + atomic_dec(&dev->num_ahs); + + return 0; +} diff --git a/drivers/infiniband/hw/vmw_pvrdma/pvrdma_verbs.h b/drivers/infiniband/hw/vmw_pvrdma/pvrdma_verbs.h new file mode 100644 index 000000000000..bfbe96b56255 --- /dev/null +++ b/drivers/infiniband/hw/vmw_pvrdma/pvrdma_verbs.h @@ -0,0 +1,436 @@ +/* + * Copyright (c) 2012-2016 VMware, Inc. All rights reserved. + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of EITHER the GNU General Public License + * version 2 as published by the Free Software Foundation or the BSD + * 2-Clause License. This program is distributed in the hope that it + * will be useful, but WITHOUT ANY WARRANTY; WITHOUT EVEN THE IMPLIED + * WARRANTY OF MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE. + * See the GNU General Public License version 2 for more details at + * http://www.gnu.org/licenses/old-licenses/gpl-2.0.en.html. + * + * You should have received a copy of the GNU General Public License + * along with this program available in the file COPYING in the main + * directory of this source tree. + * + * The BSD 2-Clause License + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS + * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE + * COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, + * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES + * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR + * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, + * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED + * OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +#ifndef __PVRDMA_VERBS_H__ +#define __PVRDMA_VERBS_H__ + +#include + +union pvrdma_gid { + u8 raw[16]; + struct { + __be64 subnet_prefix; + __be64 interface_id; + } global; +}; + +enum pvrdma_link_layer { + PVRDMA_LINK_LAYER_UNSPECIFIED, + PVRDMA_LINK_LAYER_INFINIBAND, + PVRDMA_LINK_LAYER_ETHERNET, +}; + +enum pvrdma_mtu { + PVRDMA_MTU_256 = 1, + PVRDMA_MTU_512 = 2, + PVRDMA_MTU_1024 = 3, + PVRDMA_MTU_2048 = 4, + PVRDMA_MTU_4096 = 5, +}; + +static inline int pvrdma_mtu_enum_to_int(enum pvrdma_mtu mtu) +{ + switch (mtu) { + case PVRDMA_MTU_256: return 256; + case PVRDMA_MTU_512: return 512; + case PVRDMA_MTU_1024: return 1024; + case PVRDMA_MTU_2048: return 2048; + case PVRDMA_MTU_4096: return 4096; + default: return -1; + } +} + +static inline enum pvrdma_mtu pvrdma_mtu_int_to_enum(int mtu) +{ + switch (mtu) { + case 256: return PVRDMA_MTU_256; + case 512: return PVRDMA_MTU_512; + case 1024: return PVRDMA_MTU_1024; + case 2048: return PVRDMA_MTU_2048; + case 4096: + default: return PVRDMA_MTU_4096; + } +} + +enum pvrdma_port_state { + PVRDMA_PORT_NOP = 0, + PVRDMA_PORT_DOWN = 1, + PVRDMA_PORT_INIT = 2, + PVRDMA_PORT_ARMED = 3, + PVRDMA_PORT_ACTIVE = 4, + PVRDMA_PORT_ACTIVE_DEFER = 5, +}; + +enum pvrdma_port_cap_flags { + PVRDMA_PORT_SM = 1 << 1, + PVRDMA_PORT_NOTICE_SUP = 1 << 2, + PVRDMA_PORT_TRAP_SUP = 1 << 3, + PVRDMA_PORT_OPT_IPD_SUP = 1 << 4, + PVRDMA_PORT_AUTO_MIGR_SUP = 1 << 5, + PVRDMA_PORT_SL_MAP_SUP = 1 << 6, + PVRDMA_PORT_MKEY_NVRAM = 1 << 7, + PVRDMA_PORT_PKEY_NVRAM = 1 << 8, + PVRDMA_PORT_LED_INFO_SUP = 1 << 9, + PVRDMA_PORT_SM_DISABLED = 1 << 10, + PVRDMA_PORT_SYS_IMAGE_GUID_SUP = 1 << 11, + PVRDMA_PORT_PKEY_SW_EXT_PORT_TRAP_SUP = 1 << 12, + PVRDMA_PORT_EXTENDED_SPEEDS_SUP = 1 << 14, + PVRDMA_PORT_CM_SUP = 1 << 16, + PVRDMA_PORT_SNMP_TUNNEL_SUP = 1 << 17, + PVRDMA_PORT_REINIT_SUP = 1 << 18, + PVRDMA_PORT_DEVICE_MGMT_SUP = 1 << 19, + PVRDMA_PORT_VENDOR_CLASS_SUP = 1 << 20, + PVRDMA_PORT_DR_NOTICE_SUP = 1 << 21, + PVRDMA_PORT_CAP_MASK_NOTICE_SUP = 1 << 22, + PVRDMA_PORT_BOOT_MGMT_SUP = 1 << 23, + PVRDMA_PORT_LINK_LATENCY_SUP = 1 << 24, + PVRDMA_PORT_CLIENT_REG_SUP = 1 << 25, + PVRDMA_PORT_IP_BASED_GIDS = 1 << 26, + PVRDMA_PORT_CAP_FLAGS_MAX = PVRDMA_PORT_IP_BASED_GIDS, +}; + +enum pvrdma_port_width { + PVRDMA_WIDTH_1X = 1, + PVRDMA_WIDTH_4X = 2, + PVRDMA_WIDTH_8X = 4, + PVRDMA_WIDTH_12X = 8, +}; + +static inline int pvrdma_width_enum_to_int(enum pvrdma_port_width width) +{ + switch (width) { + case PVRDMA_WIDTH_1X: return 1; + case PVRDMA_WIDTH_4X: return 4; + case PVRDMA_WIDTH_8X: return 8; + case PVRDMA_WIDTH_12X: return 12; + default: return -1; + } +} + +enum pvrdma_port_speed { + PVRDMA_SPEED_SDR = 1, + PVRDMA_SPEED_DDR = 2, + PVRDMA_SPEED_QDR = 4, + PVRDMA_SPEED_FDR10 = 8, + PVRDMA_SPEED_FDR = 16, + PVRDMA_SPEED_EDR = 32, +}; + +struct pvrdma_port_attr { + enum pvrdma_port_state state; + enum pvrdma_mtu max_mtu; + enum pvrdma_mtu active_mtu; + u32 gid_tbl_len; + u32 port_cap_flags; + u32 max_msg_sz; + u32 bad_pkey_cntr; + u32 qkey_viol_cntr; + u16 pkey_tbl_len; + u16 lid; + u16 sm_lid; + u8 lmc; + u8 max_vl_num; + u8 sm_sl; + u8 subnet_timeout; + u8 init_type_reply; + u8 active_width; + u8 active_speed; + u8 phys_state; + u8 reserved[2]; +}; + +struct pvrdma_global_route { + union pvrdma_gid dgid; + u32 flow_label; + u8 sgid_index; + u8 hop_limit; + u8 traffic_class; + u8 reserved; +}; + +struct pvrdma_grh { + __be32 version_tclass_flow; + __be16 paylen; + u8 next_hdr; + u8 hop_limit; + union pvrdma_gid sgid; + union pvrdma_gid dgid; +}; + +enum pvrdma_ah_flags { + PVRDMA_AH_GRH = 1, +}; + +enum pvrdma_rate { + PVRDMA_RATE_PORT_CURRENT = 0, + PVRDMA_RATE_2_5_GBPS = 2, + PVRDMA_RATE_5_GBPS = 5, + PVRDMA_RATE_10_GBPS = 3, + PVRDMA_RATE_20_GBPS = 6, + PVRDMA_RATE_30_GBPS = 4, + PVRDMA_RATE_40_GBPS = 7, + PVRDMA_RATE_60_GBPS = 8, + PVRDMA_RATE_80_GBPS = 9, + PVRDMA_RATE_120_GBPS = 10, + PVRDMA_RATE_14_GBPS = 11, + PVRDMA_RATE_56_GBPS = 12, + PVRDMA_RATE_112_GBPS = 13, + PVRDMA_RATE_168_GBPS = 14, + PVRDMA_RATE_25_GBPS = 15, + PVRDMA_RATE_100_GBPS = 16, + PVRDMA_RATE_200_GBPS = 17, + PVRDMA_RATE_300_GBPS = 18, +}; + +struct pvrdma_ah_attr { + struct pvrdma_global_route grh; + u16 dlid; + u16 vlan_id; + u8 sl; + u8 src_path_bits; + u8 static_rate; + u8 ah_flags; + u8 port_num; + u8 dmac[6]; + u8 reserved; +}; + +enum pvrdma_cq_notify_flags { + PVRDMA_CQ_SOLICITED = 1 << 0, + PVRDMA_CQ_NEXT_COMP = 1 << 1, + PVRDMA_CQ_SOLICITED_MASK = PVRDMA_CQ_SOLICITED | + PVRDMA_CQ_NEXT_COMP, + PVRDMA_CQ_REPORT_MISSED_EVENTS = 1 << 2, +}; + +struct pvrdma_qp_cap { + u32 max_send_wr; + u32 max_recv_wr; + u32 max_send_sge; + u32 max_recv_sge; + u32 max_inline_data; + u32 reserved; +}; + +enum pvrdma_sig_type { + PVRDMA_SIGNAL_ALL_WR, + PVRDMA_SIGNAL_REQ_WR, +}; + +enum pvrdma_qp_type { + PVRDMA_QPT_SMI, + PVRDMA_QPT_GSI, + PVRDMA_QPT_RC, + PVRDMA_QPT_UC, + PVRDMA_QPT_UD, + PVRDMA_QPT_RAW_IPV6, + PVRDMA_QPT_RAW_ETHERTYPE, + PVRDMA_QPT_RAW_PACKET = 8, + PVRDMA_QPT_XRC_INI = 9, + PVRDMA_QPT_XRC_TGT, + PVRDMA_QPT_MAX, +}; + +enum pvrdma_qp_create_flags { + PVRDMA_QP_CREATE_IPOPVRDMA_UD_LSO = 1 << 0, + PVRDMA_QP_CREATE_BLOCK_MULTICAST_LOOPBACK = 1 << 1, +}; + +enum pvrdma_qp_attr_mask { + PVRDMA_QP_STATE = 1 << 0, + PVRDMA_QP_CUR_STATE = 1 << 1, + PVRDMA_QP_EN_SQD_ASYNC_NOTIFY = 1 << 2, + PVRDMA_QP_ACCESS_FLAGS = 1 << 3, + PVRDMA_QP_PKEY_INDEX = 1 << 4, + PVRDMA_QP_PORT = 1 << 5, + PVRDMA_QP_QKEY = 1 << 6, + PVRDMA_QP_AV = 1 << 7, + PVRDMA_QP_PATH_MTU = 1 << 8, + PVRDMA_QP_TIMEOUT = 1 << 9, + PVRDMA_QP_RETRY_CNT = 1 << 10, + PVRDMA_QP_RNR_RETRY = 1 << 11, + PVRDMA_QP_RQ_PSN = 1 << 12, + PVRDMA_QP_MAX_QP_RD_ATOMIC = 1 << 13, + PVRDMA_QP_ALT_PATH = 1 << 14, + PVRDMA_QP_MIN_RNR_TIMER = 1 << 15, + PVRDMA_QP_SQ_PSN = 1 << 16, + PVRDMA_QP_MAX_DEST_RD_ATOMIC = 1 << 17, + PVRDMA_QP_PATH_MIG_STATE = 1 << 18, + PVRDMA_QP_CAP = 1 << 19, + PVRDMA_QP_DEST_QPN = 1 << 20, + PVRDMA_QP_ATTR_MASK_MAX = PVRDMA_QP_DEST_QPN, +}; + +enum pvrdma_qp_state { + PVRDMA_QPS_RESET, + PVRDMA_QPS_INIT, + PVRDMA_QPS_RTR, + PVRDMA_QPS_RTS, + PVRDMA_QPS_SQD, + PVRDMA_QPS_SQE, + PVRDMA_QPS_ERR, +}; + +enum pvrdma_mig_state { + PVRDMA_MIG_MIGRATED, + PVRDMA_MIG_REARM, + PVRDMA_MIG_ARMED, +}; + +enum pvrdma_mw_type { + PVRDMA_MW_TYPE_1 = 1, + PVRDMA_MW_TYPE_2 = 2, +}; + +struct pvrdma_qp_attr { + enum pvrdma_qp_state qp_state; + enum pvrdma_qp_state cur_qp_state; + enum pvrdma_mtu path_mtu; + enum pvrdma_mig_state path_mig_state; + u32 qkey; + u32 rq_psn; + u32 sq_psn; + u32 dest_qp_num; + u32 qp_access_flags; + u16 pkey_index; + u16 alt_pkey_index; + u8 en_sqd_async_notify; + u8 sq_draining; + u8 max_rd_atomic; + u8 max_dest_rd_atomic; + u8 min_rnr_timer; + u8 port_num; + u8 timeout; + u8 retry_cnt; + u8 rnr_retry; + u8 alt_port_num; + u8 alt_timeout; + u8 reserved[5]; + struct pvrdma_qp_cap cap; + struct pvrdma_ah_attr ah_attr; + struct pvrdma_ah_attr alt_ah_attr; +}; + +enum pvrdma_send_flags { + PVRDMA_SEND_FENCE = 1 << 0, + PVRDMA_SEND_SIGNALED = 1 << 1, + PVRDMA_SEND_SOLICITED = 1 << 2, + PVRDMA_SEND_INLINE = 1 << 3, + PVRDMA_SEND_IP_CSUM = 1 << 4, + PVRDMA_SEND_FLAGS_MAX = PVRDMA_SEND_IP_CSUM, +}; + +enum pvrdma_access_flags { + PVRDMA_ACCESS_LOCAL_WRITE = 1 << 0, + PVRDMA_ACCESS_REMOTE_WRITE = 1 << 1, + PVRDMA_ACCESS_REMOTE_READ = 1 << 2, + PVRDMA_ACCESS_REMOTE_ATOMIC = 1 << 3, + PVRDMA_ACCESS_MW_BIND = 1 << 4, + PVRDMA_ZERO_BASED = 1 << 5, + PVRDMA_ACCESS_ON_DEMAND = 1 << 6, + PVRDMA_ACCESS_FLAGS_MAX = PVRDMA_ACCESS_ON_DEMAND, +}; + +int pvrdma_query_device(struct ib_device *ibdev, + struct ib_device_attr *props, + struct ib_udata *udata); +int pvrdma_query_port(struct ib_device *ibdev, u8 port, + struct ib_port_attr *props); +int pvrdma_query_gid(struct ib_device *ibdev, u8 port, + int index, union ib_gid *gid); +int pvrdma_query_pkey(struct ib_device *ibdev, u8 port, + u16 index, u16 *pkey); +enum rdma_link_layer pvrdma_port_link_layer(struct ib_device *ibdev, + u8 port); +int pvrdma_modify_device(struct ib_device *ibdev, int mask, + struct ib_device_modify *props); +int pvrdma_modify_port(struct ib_device *ibdev, u8 port, + int mask, struct ib_port_modify *props); +int pvrdma_mmap(struct ib_ucontext *context, struct vm_area_struct *vma); +struct ib_ucontext *pvrdma_alloc_ucontext(struct ib_device *ibdev, + struct ib_udata *udata); +int pvrdma_dealloc_ucontext(struct ib_ucontext *context); +struct ib_pd *pvrdma_alloc_pd(struct ib_device *ibdev, + struct ib_ucontext *context, + struct ib_udata *udata); +int pvrdma_dealloc_pd(struct ib_pd *ibpd); +struct ib_mr *pvrdma_get_dma_mr(struct ib_pd *pd, int acc); +struct ib_mr *pvrdma_reg_user_mr(struct ib_pd *pd, u64 start, u64 length, + u64 virt_addr, int access_flags, + struct ib_udata *udata); +int pvrdma_dereg_mr(struct ib_mr *mr); +struct ib_mr *pvrdma_alloc_mr(struct ib_pd *pd, enum ib_mr_type mr_type, + u32 max_num_sg); +int pvrdma_map_mr_sg(struct ib_mr *ibmr, struct scatterlist *sg, + int sg_nents, unsigned int *sg_offset); +int pvrdma_modify_cq(struct ib_cq *cq, u16 cq_count, u16 cq_period); +int pvrdma_resize_cq(struct ib_cq *ibcq, int entries, + struct ib_udata *udata); +struct ib_cq *pvrdma_create_cq(struct ib_device *ibdev, + const struct ib_cq_init_attr *attr, + struct ib_ucontext *context, + struct ib_udata *udata); +int pvrdma_resize_cq(struct ib_cq *ibcq, int entries, + struct ib_udata *udata); +int pvrdma_destroy_cq(struct ib_cq *cq); +int pvrdma_poll_cq(struct ib_cq *ibcq, int num_entries, struct ib_wc *wc); +int pvrdma_req_notify_cq(struct ib_cq *cq, enum ib_cq_notify_flags flags); +struct ib_ah *pvrdma_create_ah(struct ib_pd *pd, struct ib_ah_attr *ah_attr, + struct ib_udata *udata); +int pvrdma_destroy_ah(struct ib_ah *ah); +struct ib_qp *pvrdma_create_qp(struct ib_pd *pd, + struct ib_qp_init_attr *init_attr, + struct ib_udata *udata); +int pvrdma_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr, + int attr_mask, struct ib_udata *udata); +int pvrdma_query_qp(struct ib_qp *ibqp, struct ib_qp_attr *qp_attr, + int qp_attr_mask, struct ib_qp_init_attr *qp_init_attr); +int pvrdma_destroy_qp(struct ib_qp *qp); +int pvrdma_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr, + struct ib_send_wr **bad_wr); +int pvrdma_post_recv(struct ib_qp *ibqp, struct ib_recv_wr *wr, + struct ib_recv_wr **bad_wr); + +#endif /* __PVRDMA_VERBS_H__ */ diff --git a/include/uapi/rdma/Kbuild b/include/uapi/rdma/Kbuild index f14ab7ff5fee..6a8a934c540c 100644 --- a/include/uapi/rdma/Kbuild +++ b/include/uapi/rdma/Kbuild @@ -14,3 +14,4 @@ header-y += mlx5-abi.h header-y += mthca-abi.h header-y += nes-abi.h header-y += ocrdma-abi.h +header-y += vmw_pvrdma-abi.h diff --git a/include/uapi/rdma/vmw_pvrdma-abi.h b/include/uapi/rdma/vmw_pvrdma-abi.h new file mode 100644 index 000000000000..5016abc9ee97 --- /dev/null +++ b/include/uapi/rdma/vmw_pvrdma-abi.h @@ -0,0 +1,289 @@ +/* + * Copyright (c) 2012-2016 VMware, Inc. All rights reserved. + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of EITHER the GNU General Public License + * version 2 as published by the Free Software Foundation or the BSD + * 2-Clause License. This program is distributed in the hope that it + * will be useful, but WITHOUT ANY WARRANTY; WITHOUT EVEN THE IMPLIED + * WARRANTY OF MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE. + * See the GNU General Public License version 2 for more details at + * http://www.gnu.org/licenses/old-licenses/gpl-2.0.en.html. + * + * You should have received a copy of the GNU General Public License + * along with this program available in the file COPYING in the main + * directory of this source tree. + * + * The BSD 2-Clause License + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS + * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE + * COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, + * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES + * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR + * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, + * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED + * OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +#ifndef __VMW_PVRDMA_ABI_H__ +#define __VMW_PVRDMA_ABI_H__ + +#include + +#define PVRDMA_UVERBS_ABI_VERSION 3 /* ABI Version. */ +#define PVRDMA_UAR_HANDLE_MASK 0x00FFFFFF /* Bottom 24 bits. */ +#define PVRDMA_UAR_QP_OFFSET 0 /* QP doorbell. */ +#define PVRDMA_UAR_QP_SEND BIT(30) /* Send bit. */ +#define PVRDMA_UAR_QP_RECV BIT(31) /* Recv bit. */ +#define PVRDMA_UAR_CQ_OFFSET 4 /* CQ doorbell. */ +#define PVRDMA_UAR_CQ_ARM_SOL BIT(29) /* Arm solicited bit. */ +#define PVRDMA_UAR_CQ_ARM BIT(30) /* Arm bit. */ +#define PVRDMA_UAR_CQ_POLL BIT(31) /* Poll bit. */ + +enum pvrdma_wr_opcode { + PVRDMA_WR_RDMA_WRITE, + PVRDMA_WR_RDMA_WRITE_WITH_IMM, + PVRDMA_WR_SEND, + PVRDMA_WR_SEND_WITH_IMM, + PVRDMA_WR_RDMA_READ, + PVRDMA_WR_ATOMIC_CMP_AND_SWP, + PVRDMA_WR_ATOMIC_FETCH_AND_ADD, + PVRDMA_WR_LSO, + PVRDMA_WR_SEND_WITH_INV, + PVRDMA_WR_RDMA_READ_WITH_INV, + PVRDMA_WR_LOCAL_INV, + PVRDMA_WR_FAST_REG_MR, + PVRDMA_WR_MASKED_ATOMIC_CMP_AND_SWP, + PVRDMA_WR_MASKED_ATOMIC_FETCH_AND_ADD, + PVRDMA_WR_BIND_MW, + PVRDMA_WR_REG_SIG_MR, +}; + +enum pvrdma_wc_status { + PVRDMA_WC_SUCCESS, + PVRDMA_WC_LOC_LEN_ERR, + PVRDMA_WC_LOC_QP_OP_ERR, + PVRDMA_WC_LOC_EEC_OP_ERR, + PVRDMA_WC_LOC_PROT_ERR, + PVRDMA_WC_WR_FLUSH_ERR, + PVRDMA_WC_MW_BIND_ERR, + PVRDMA_WC_BAD_RESP_ERR, + PVRDMA_WC_LOC_ACCESS_ERR, + PVRDMA_WC_REM_INV_REQ_ERR, + PVRDMA_WC_REM_ACCESS_ERR, + PVRDMA_WC_REM_OP_ERR, + PVRDMA_WC_RETRY_EXC_ERR, + PVRDMA_WC_RNR_RETRY_EXC_ERR, + PVRDMA_WC_LOC_RDD_VIOL_ERR, + PVRDMA_WC_REM_INV_RD_REQ_ERR, + PVRDMA_WC_REM_ABORT_ERR, + PVRDMA_WC_INV_EECN_ERR, + PVRDMA_WC_INV_EEC_STATE_ERR, + PVRDMA_WC_FATAL_ERR, + PVRDMA_WC_RESP_TIMEOUT_ERR, + PVRDMA_WC_GENERAL_ERR, +}; + +enum pvrdma_wc_opcode { + PVRDMA_WC_SEND, + PVRDMA_WC_RDMA_WRITE, + PVRDMA_WC_RDMA_READ, + PVRDMA_WC_COMP_SWAP, + PVRDMA_WC_FETCH_ADD, + PVRDMA_WC_BIND_MW, + PVRDMA_WC_LSO, + PVRDMA_WC_LOCAL_INV, + PVRDMA_WC_FAST_REG_MR, + PVRDMA_WC_MASKED_COMP_SWAP, + PVRDMA_WC_MASKED_FETCH_ADD, + PVRDMA_WC_RECV = 1 << 7, + PVRDMA_WC_RECV_RDMA_WITH_IMM, +}; + +enum pvrdma_wc_flags { + PVRDMA_WC_GRH = 1 << 0, + PVRDMA_WC_WITH_IMM = 1 << 1, + PVRDMA_WC_WITH_INVALIDATE = 1 << 2, + PVRDMA_WC_IP_CSUM_OK = 1 << 3, + PVRDMA_WC_WITH_SMAC = 1 << 4, + PVRDMA_WC_WITH_VLAN = 1 << 5, + PVRDMA_WC_FLAGS_MAX = PVRDMA_WC_WITH_VLAN, +}; + +struct pvrdma_alloc_ucontext_resp { + __u32 qp_tab_size; + __u32 reserved; +}; + +struct pvrdma_alloc_pd_resp { + __u32 pdn; + __u32 reserved; +}; + +struct pvrdma_create_cq { + __u64 buf_addr; + __u32 buf_size; + __u32 reserved; +}; + +struct pvrdma_create_cq_resp { + __u32 cqn; + __u32 reserved; +}; + +struct pvrdma_resize_cq { + __u64 buf_addr; + __u32 buf_size; + __u32 reserved; +}; + +struct pvrdma_create_srq { + __u64 buf_addr; +}; + +struct pvrdma_create_srq_resp { + __u32 srqn; + __u32 reserved; +}; + +struct pvrdma_create_qp { + __u64 rbuf_addr; + __u64 sbuf_addr; + __u32 rbuf_size; + __u32 sbuf_size; + __u64 qp_addr; +}; + +/* PVRDMA masked atomic compare and swap */ +struct pvrdma_ex_cmp_swap { + __u64 swap_val; + __u64 compare_val; + __u64 swap_mask; + __u64 compare_mask; +}; + +/* PVRDMA masked atomic fetch and add */ +struct pvrdma_ex_fetch_add { + __u64 add_val; + __u64 field_boundary; +}; + +/* PVRDMA address vector. */ +struct pvrdma_av { + __u32 port_pd; + __u32 sl_tclass_flowlabel; + __u8 dgid[16]; + __u8 src_path_bits; + __u8 gid_index; + __u8 stat_rate; + __u8 hop_limit; + __u8 dmac[6]; + __u8 reserved[6]; +}; + +/* PVRDMA scatter/gather entry */ +struct pvrdma_sge { + __u64 addr; + __u32 length; + __u32 lkey; +}; + +/* PVRDMA receive queue work request */ +struct pvrdma_rq_wqe_hdr { + __u64 wr_id; /* wr id */ + __u32 num_sge; /* size of s/g array */ + __u32 total_len; /* reserved */ +}; +/* Use pvrdma_sge (ib_sge) for receive queue s/g array elements. */ + +/* PVRDMA send queue work request */ +struct pvrdma_sq_wqe_hdr { + __u64 wr_id; /* wr id */ + __u32 num_sge; /* size of s/g array */ + __u32 total_len; /* reserved */ + __u32 opcode; /* operation type */ + __u32 send_flags; /* wr flags */ + union { + __u32 imm_data; + __u32 invalidate_rkey; + } ex; + __u32 reserved; + union { + struct { + __u64 remote_addr; + __u32 rkey; + __u8 reserved[4]; + } rdma; + struct { + __u64 remote_addr; + __u64 compare_add; + __u64 swap; + __u32 rkey; + __u32 reserved; + } atomic; + struct { + __u64 remote_addr; + __u32 log_arg_sz; + __u32 rkey; + union { + struct pvrdma_ex_cmp_swap cmp_swap; + struct pvrdma_ex_fetch_add fetch_add; + } wr_data; + } masked_atomics; + struct { + __u64 iova_start; + __u64 pl_pdir_dma; + __u32 page_shift; + __u32 page_list_len; + __u32 length; + __u32 access_flags; + __u32 rkey; + } fast_reg; + struct { + __u32 remote_qpn; + __u32 remote_qkey; + struct pvrdma_av av; + } ud; + } wr; +}; +/* Use pvrdma_sge (ib_sge) for send queue s/g array elements. */ + +/* Completion queue element. */ +struct pvrdma_cqe { + __u64 wr_id; + __u64 qp; + __u32 opcode; + __u32 status; + __u32 byte_len; + __u32 imm_data; + __u32 src_qp; + __u32 wc_flags; + __u32 vendor_err; + __u16 pkey_index; + __u16 slid; + __u8 sl; + __u8 dlid_path_bits; + __u8 port_num; + __u8 smac[6]; + __u8 reserved2[7]; /* Pad to next power of 2 (64). */ +}; + +#endif /* __VMW_PVRDMA_ABI_H__ */ -- cgit v1.2.3