diff options
author | Doug Ledford <dledford@redhat.com> | 2016-12-14 14:44:25 -0500 |
---|---|---|
committer | Doug Ledford <dledford@redhat.com> | 2016-12-14 14:44:25 -0500 |
commit | 86ef0beaa0bdbec70d4261977b8b4a100fe54bfe (patch) | |
tree | d2c1b52a7a6e493bada10dbdbcd2f8511baccc66 /include | |
parent | 253f8b22e0ad643edafd75e831e5c765732877f5 (diff) | |
parent | 7ceb740c540dde362b3055ad92c6a38009eb7a83 (diff) | |
download | linux-86ef0beaa0bdbec70d4261977b8b4a100fe54bfe.tar.bz2 |
Merge branch 'mlx' into merge-test
Diffstat (limited to 'include')
-rw-r--r-- | include/linux/mlx5/mlx5_ifc.h | 2 | ||||
-rw-r--r-- | include/rdma/ib_verbs.h | 70 | ||||
-rw-r--r-- | include/uapi/rdma/ib_user_verbs.h | 38 | ||||
-rw-r--r-- | include/uapi/rdma/mlx5-abi.h | 38 |
4 files changed, 132 insertions, 16 deletions
diff --git a/include/linux/mlx5/mlx5_ifc.h b/include/linux/mlx5/mlx5_ifc.h index 2632cb2caf10..0779ad2e8f51 100644 --- a/include/linux/mlx5/mlx5_ifc.h +++ b/include/linux/mlx5/mlx5_ifc.h @@ -576,7 +576,7 @@ struct mlx5_ifc_per_protocol_networking_offload_caps_bits { u8 self_lb_en_modifiable[0x1]; u8 reserved_at_9[0x2]; u8 max_lso_cap[0x5]; - u8 reserved_at_10[0x2]; + u8 multi_pkt_send_wqe[0x2]; u8 wqe_inline_mode[0x2]; u8 rss_ind_tbl_cap[0x4]; u8 reg_umr_sq[0x1]; diff --git a/include/rdma/ib_verbs.h b/include/rdma/ib_verbs.h index 5ad43a487745..8029d2a51f14 100644 --- a/include/rdma/ib_verbs.h +++ b/include/rdma/ib_verbs.h @@ -1102,6 +1102,7 @@ enum ib_qp_attr_mask { IB_QP_RESERVED2 = (1<<22), IB_QP_RESERVED3 = (1<<23), IB_QP_RESERVED4 = (1<<24), + IB_QP_RATE_LIMIT = (1<<25), }; enum ib_qp_state { @@ -1151,6 +1152,7 @@ struct ib_qp_attr { u8 rnr_retry; u8 alt_port_num; u8 alt_timeout; + u32 rate_limit; }; enum ib_wr_opcode { @@ -1592,17 +1594,19 @@ enum ib_flow_attr_type { /* Supported steering header types */ enum ib_flow_spec_type { /* L2 headers*/ - IB_FLOW_SPEC_ETH = 0x20, - IB_FLOW_SPEC_IB = 0x22, + IB_FLOW_SPEC_ETH = 0x20, + IB_FLOW_SPEC_IB = 0x22, /* L3 header*/ - IB_FLOW_SPEC_IPV4 = 0x30, - IB_FLOW_SPEC_IPV6 = 0x31, + IB_FLOW_SPEC_IPV4 = 0x30, + IB_FLOW_SPEC_IPV6 = 0x31, /* L4 headers*/ - IB_FLOW_SPEC_TCP = 0x40, - IB_FLOW_SPEC_UDP = 0x41 + IB_FLOW_SPEC_TCP = 0x40, + IB_FLOW_SPEC_UDP = 0x41, + IB_FLOW_SPEC_VXLAN_TUNNEL = 0x50, + IB_FLOW_SPEC_INNER = 0x100, }; #define IB_FLOW_SPEC_LAYER_MASK 0xF0 -#define IB_FLOW_SPEC_SUPPORT_LAYERS 4 +#define IB_FLOW_SPEC_SUPPORT_LAYERS 8 /* Flow steering rule priority is set according to it's domain. * Lower domain value means higher priority. @@ -1630,7 +1634,7 @@ struct ib_flow_eth_filter { }; struct ib_flow_spec_eth { - enum ib_flow_spec_type type; + u32 type; u16 size; struct ib_flow_eth_filter val; struct ib_flow_eth_filter mask; @@ -1644,7 +1648,7 @@ struct ib_flow_ib_filter { }; struct ib_flow_spec_ib { - enum ib_flow_spec_type type; + u32 type; u16 size; struct ib_flow_ib_filter val; struct ib_flow_ib_filter mask; @@ -1669,7 +1673,7 @@ struct ib_flow_ipv4_filter { }; struct ib_flow_spec_ipv4 { - enum ib_flow_spec_type type; + u32 type; u16 size; struct ib_flow_ipv4_filter val; struct ib_flow_ipv4_filter mask; @@ -1687,7 +1691,7 @@ struct ib_flow_ipv6_filter { }; struct ib_flow_spec_ipv6 { - enum ib_flow_spec_type type; + u32 type; u16 size; struct ib_flow_ipv6_filter val; struct ib_flow_ipv6_filter mask; @@ -1701,15 +1705,30 @@ struct ib_flow_tcp_udp_filter { }; struct ib_flow_spec_tcp_udp { - enum ib_flow_spec_type type; + u32 type; u16 size; struct ib_flow_tcp_udp_filter val; struct ib_flow_tcp_udp_filter mask; }; +struct ib_flow_tunnel_filter { + __be32 tunnel_id; + u8 real_sz[0]; +}; + +/* ib_flow_spec_tunnel describes the Vxlan tunnel + * the tunnel_id from val has the vni value + */ +struct ib_flow_spec_tunnel { + u32 type; + u16 size; + struct ib_flow_tunnel_filter val; + struct ib_flow_tunnel_filter mask; +}; + union ib_flow_spec { struct { - enum ib_flow_spec_type type; + u32 type; u16 size; }; struct ib_flow_spec_eth eth; @@ -1717,6 +1736,7 @@ union ib_flow_spec { struct ib_flow_spec_ipv4 ipv4; struct ib_flow_spec_tcp_udp tcp_udp; struct ib_flow_spec_ipv6 ipv6; + struct ib_flow_spec_tunnel tunnel; }; struct ib_flow_attr { @@ -1933,7 +1953,8 @@ struct ib_device { struct ib_udata *udata); int (*dealloc_pd)(struct ib_pd *pd); struct ib_ah * (*create_ah)(struct ib_pd *pd, - struct ib_ah_attr *ah_attr); + struct ib_ah_attr *ah_attr, + struct ib_udata *udata); int (*modify_ah)(struct ib_ah *ah, struct ib_ah_attr *ah_attr); int (*query_ah)(struct ib_ah *ah, @@ -2581,6 +2602,24 @@ void ib_dealloc_pd(struct ib_pd *pd); struct ib_ah *ib_create_ah(struct ib_pd *pd, struct ib_ah_attr *ah_attr); /** + * ib_get_gids_from_rdma_hdr - Get sgid and dgid from GRH or IPv4 header + * work completion. + * @hdr: the L3 header to parse + * @net_type: type of header to parse + * @sgid: place to store source gid + * @dgid: place to store destination gid + */ +int ib_get_gids_from_rdma_hdr(const union rdma_network_hdr *hdr, + enum rdma_network_type net_type, + union ib_gid *sgid, union ib_gid *dgid); + +/** + * ib_get_rdma_header_version - Get the header version + * @hdr: the L3 header to parse + */ +int ib_get_rdma_header_version(const union rdma_network_hdr *hdr); + +/** * ib_init_ah_from_wc - Initializes address handle attributes from a * work completion. * @device: Device on which the received message arrived. @@ -3357,4 +3396,7 @@ int ib_sg_to_pages(struct ib_mr *mr, struct scatterlist *sgl, int sg_nents, void ib_drain_rq(struct ib_qp *qp); void ib_drain_sq(struct ib_qp *qp); void ib_drain_qp(struct ib_qp *qp); + +int ib_resolve_eth_dmac(struct ib_device *device, + struct ib_ah_attr *ah_attr); #endif /* IB_VERBS_H */ diff --git a/include/uapi/rdma/ib_user_verbs.h b/include/uapi/rdma/ib_user_verbs.h index 25225ebbc7d5..dfdfe4e92d31 100644 --- a/include/uapi/rdma/ib_user_verbs.h +++ b/include/uapi/rdma/ib_user_verbs.h @@ -37,6 +37,7 @@ #define IB_USER_VERBS_H #include <linux/types.h> +#include <rdma/ib_verbs.h> /* * Increment this value if any changes that break userspace ABI @@ -93,6 +94,7 @@ enum { IB_USER_VERBS_EX_CMD_QUERY_DEVICE = IB_USER_VERBS_CMD_QUERY_DEVICE, IB_USER_VERBS_EX_CMD_CREATE_CQ = IB_USER_VERBS_CMD_CREATE_CQ, IB_USER_VERBS_EX_CMD_CREATE_QP = IB_USER_VERBS_CMD_CREATE_QP, + IB_USER_VERBS_EX_CMD_MODIFY_QP = IB_USER_VERBS_CMD_MODIFY_QP, IB_USER_VERBS_EX_CMD_CREATE_FLOW = IB_USER_VERBS_CMD_THRESHOLD, IB_USER_VERBS_EX_CMD_DESTROY_FLOW, IB_USER_VERBS_EX_CMD_CREATE_WQ, @@ -545,6 +547,14 @@ enum { IB_UVERBS_CREATE_QP_SUP_COMP_MASK = IB_UVERBS_CREATE_QP_MASK_IND_TABLE, }; +enum { + IB_USER_LEGACY_LAST_QP_ATTR_MASK = IB_QP_DEST_QPN +}; + +enum { + IB_USER_LAST_QP_ATTR_MASK = IB_QP_RATE_LIMIT +}; + struct ib_uverbs_ex_create_qp { __u64 user_handle; __u32 pd_handle; @@ -684,9 +694,20 @@ struct ib_uverbs_modify_qp { __u64 driver_data[0]; }; +struct ib_uverbs_ex_modify_qp { + struct ib_uverbs_modify_qp base; + __u32 rate_limit; + __u32 reserved; +}; + struct ib_uverbs_modify_qp_resp { }; +struct ib_uverbs_ex_modify_qp_resp { + __u32 comp_mask; + __u32 response_length; +}; + struct ib_uverbs_destroy_qp { __u64 response; __u32 qp_handle; @@ -908,6 +929,23 @@ struct ib_uverbs_flow_spec_ipv6 { struct ib_uverbs_flow_ipv6_filter mask; }; +struct ib_uverbs_flow_tunnel_filter { + __be32 tunnel_id; +}; + +struct ib_uverbs_flow_spec_tunnel { + union { + struct ib_uverbs_flow_spec_hdr hdr; + struct { + __u32 type; + __u16 size; + __u16 reserved; + }; + }; + struct ib_uverbs_flow_tunnel_filter val; + struct ib_uverbs_flow_tunnel_filter mask; +}; + struct ib_uverbs_flow_attr { __u32 type; __u16 size; diff --git a/include/uapi/rdma/mlx5-abi.h b/include/uapi/rdma/mlx5-abi.h index f5d0f4e83b59..fae6cdaeb56d 100644 --- a/include/uapi/rdma/mlx5-abi.h +++ b/include/uapi/rdma/mlx5-abi.h @@ -82,6 +82,7 @@ enum mlx5_ib_alloc_ucontext_resp_mask { enum mlx5_user_cmds_supp_uhw { MLX5_USER_CMDS_SUPP_UHW_QUERY_DEVICE = 1 << 0, + MLX5_USER_CMDS_SUPP_UHW_CREATE_AH = 1 << 1, }; struct mlx5_ib_alloc_ucontext_resp { @@ -124,18 +125,47 @@ struct mlx5_ib_rss_caps { __u8 reserved[7]; }; +enum mlx5_ib_cqe_comp_res_format { + MLX5_IB_CQE_RES_FORMAT_HASH = 1 << 0, + MLX5_IB_CQE_RES_FORMAT_CSUM = 1 << 1, + MLX5_IB_CQE_RES_RESERVED = 1 << 2, +}; + +struct mlx5_ib_cqe_comp_caps { + __u32 max_num; + __u32 supported_format; /* enum mlx5_ib_cqe_comp_res_format */ +}; + +struct mlx5_packet_pacing_caps { + __u32 qp_rate_limit_min; + __u32 qp_rate_limit_max; /* In kpbs */ + + /* Corresponding bit will be set if qp type from + * 'enum ib_qp_type' is supported, e.g. + * supported_qpts |= 1 << IB_QPT_RAW_PACKET + */ + __u32 supported_qpts; + __u32 reserved; +}; + struct mlx5_ib_query_device_resp { __u32 comp_mask; __u32 response_length; struct mlx5_ib_tso_caps tso_caps; struct mlx5_ib_rss_caps rss_caps; + struct mlx5_ib_cqe_comp_caps cqe_comp_caps; + struct mlx5_packet_pacing_caps packet_pacing_caps; + __u32 mlx5_ib_support_multi_pkt_send_wqes; + __u32 reserved; }; struct mlx5_ib_create_cq { __u64 buf_addr; __u64 db_addr; __u32 cqe_size; - __u32 reserved; /* explicit padding (optional on i386) */ + __u8 cqe_comp_en; + __u8 cqe_comp_res_format; + __u16 reserved; /* explicit padding (optional on i386) */ }; struct mlx5_ib_create_cq_resp { @@ -232,6 +262,12 @@ struct mlx5_ib_create_wq { __u32 reserved; }; +struct mlx5_ib_create_ah_resp { + __u32 response_length; + __u8 dmac[ETH_ALEN]; + __u8 reserved[6]; +}; + struct mlx5_ib_create_wq_resp { __u32 response_length; __u32 reserved; |