summaryrefslogtreecommitdiffstats
path: root/include
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@g5.osdl.org>2006-03-21 09:22:08 -0800
committerLinus Torvalds <torvalds@g5.osdl.org>2006-03-21 09:22:08 -0800
commit52aef8183fbedb0232b20127b089e85e7aa095e3 (patch)
tree4c06b3351284f222a9cd825e6f1ea8543e7656c2 /include
parent28c006c1f09ea92d4f2585a087a188955ce3f64c (diff)
parentfd02e8038eb943755e8727a0ea193c037a51714f (diff)
downloadlinux-52aef8183fbedb0232b20127b089e85e7aa095e3.tar.bz2
Merge branch 'for-linus' of master.kernel.org:/pub/scm/linux/kernel/git/roland/infiniband
* 'for-linus' of master.kernel.org:/pub/scm/linux/kernel/git/roland/infiniband: (47 commits) IB/mthca: Query SRQ srq_limit fixes IPoIB: Get rid of useless test of queue length IB/mthca: Correct reported SRQ size in MemFree case. IB/mad: Fix oopsable race on device removal IB/srp: Coverity fix to srp_parse_options() IB/mthca: Coverity fix to mthca_init_eq_table() IB: Coverity fixes to sysfs.c IPoIB: Move ipoib_ib_dev_flush() to ipoib workqueue IPoIB: Fix build now that neighbour destructor is in neigh_params IB/uverbs: Use correct alt_pkey_index in modify QP IB/umad: Add support for large RMPP transfers IB/srp: Add SCSI host attributes to show target port IB/cm: Check cm_id state before handling a REP IB/mthca: Update firmware versions IB/mthca: Optimize large messages on Sinai HCAs IB/uverbs: Fix query QP return of sq_sig_all IB: Fix modify QP checking of "current QP state" attribute IPoIB: Fix multicast race between canceling and completing IPoIB: Clean up if posting receives fails IB/mthca: Use an enum for HCA page size ...
Diffstat (limited to 'include')
-rw-r--r--include/rdma/ib_fmr_pool.h2
-rw-r--r--include/rdma/ib_mad.h48
-rw-r--r--include/rdma/ib_user_verbs.h79
-rw-r--r--include/rdma/ib_verbs.h38
4 files changed, 139 insertions, 28 deletions
diff --git a/include/rdma/ib_fmr_pool.h b/include/rdma/ib_fmr_pool.h
index 86b7e93f198b..4ace54cd0cce 100644
--- a/include/rdma/ib_fmr_pool.h
+++ b/include/rdma/ib_fmr_pool.h
@@ -43,6 +43,7 @@ struct ib_fmr_pool;
/**
* struct ib_fmr_pool_param - Parameters for creating FMR pool
* @max_pages_per_fmr:Maximum number of pages per map request.
+ * @page_shift: Log2 of sizeof "pages" mapped by this fmr
* @access:Access flags for FMRs in pool.
* @pool_size:Number of FMRs to allocate for pool.
* @dirty_watermark:Flush is triggered when @dirty_watermark dirty
@@ -55,6 +56,7 @@ struct ib_fmr_pool;
*/
struct ib_fmr_pool_param {
int max_pages_per_fmr;
+ int page_shift;
enum ib_access_flags access;
int pool_size;
int dirty_watermark;
diff --git a/include/rdma/ib_mad.h b/include/rdma/ib_mad.h
index 2c133506742b..51ab8eddb295 100644
--- a/include/rdma/ib_mad.h
+++ b/include/rdma/ib_mad.h
@@ -33,7 +33,7 @@
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*
- * $Id: ib_mad.h 2775 2005-07-02 13:42:12Z halr $
+ * $Id: ib_mad.h 5596 2006-03-03 01:00:07Z sean.hefty $
*/
#if !defined( IB_MAD_H )
@@ -208,15 +208,23 @@ struct ib_class_port_info
/**
* ib_mad_send_buf - MAD data buffer and work request for sends.
* @next: A pointer used to chain together MADs for posting.
- * @mad: References an allocated MAD data buffer.
+ * @mad: References an allocated MAD data buffer for MADs that do not have
+ * RMPP active. For MADs using RMPP, references the common and management
+ * class specific headers.
* @mad_agent: MAD agent that allocated the buffer.
* @ah: The address handle to use when sending the MAD.
* @context: User-controlled context fields.
+ * @hdr_len: Indicates the size of the data header of the MAD. This length
+ * includes the common MAD, RMPP, and class specific headers.
+ * @data_len: Indicates the total size of user-transferred data.
+ * @seg_count: The number of RMPP segments allocated for this send.
+ * @seg_size: Size of each RMPP segment.
* @timeout_ms: Time to wait for a response.
* @retries: Number of times to retry a request for a response.
*
* Users are responsible for initializing the MAD buffer itself, with the
- * exception of specifying the payload length field in any RMPP MAD.
+ * exception of any RMPP header. Additional segment buffer space allocated
+ * beyond data_len is padding.
*/
struct ib_mad_send_buf {
struct ib_mad_send_buf *next;
@@ -224,6 +232,10 @@ struct ib_mad_send_buf {
struct ib_mad_agent *mad_agent;
struct ib_ah *ah;
void *context[2];
+ int hdr_len;
+ int data_len;
+ int seg_count;
+ int seg_size;
int timeout_ms;
int retries;
};
@@ -299,7 +311,7 @@ typedef void (*ib_mad_snoop_handler)(struct ib_mad_agent *mad_agent,
* @mad_recv_wc: Received work completion information on the received MAD.
*
* MADs received in response to a send request operation will be handed to
- * the user after the send operation completes. All data buffers given
+ * the user before the send operation completes. All data buffers given
* to registered agents through this routine are owned by the receiving
* client, except for snooping agents. Clients snooping MADs should not
* modify the data referenced by @mad_recv_wc.
@@ -485,17 +497,6 @@ int ib_unregister_mad_agent(struct ib_mad_agent *mad_agent);
int ib_post_send_mad(struct ib_mad_send_buf *send_buf,
struct ib_mad_send_buf **bad_send_buf);
-/**
- * ib_coalesce_recv_mad - Coalesces received MAD data into a single buffer.
- * @mad_recv_wc: Work completion information for a received MAD.
- * @buf: User-provided data buffer to receive the coalesced buffers. The
- * referenced buffer should be at least the size of the mad_len specified
- * by @mad_recv_wc.
- *
- * This call copies a chain of received MAD segments into a single data buffer,
- * removing duplicated headers.
- */
-void ib_coalesce_recv_mad(struct ib_mad_recv_wc *mad_recv_wc, void *buf);
/**
* ib_free_recv_mad - Returns data buffers used to receive a MAD.
@@ -590,9 +591,10 @@ int ib_process_mad_wc(struct ib_mad_agent *mad_agent,
* with an initialized work request structure. Users may modify the returned
* MAD data buffer before posting the send.
*
- * The returned data buffer will be cleared. Users are responsible for
- * initializing the common MAD and any class specific headers. If @rmpp_active
- * is set, the RMPP header will be initialized for sending.
+ * The returned MAD header, class specific headers, and any padding will be
+ * cleared. Users are responsible for initializing the common MAD header,
+ * any class specific header, and MAD data area.
+ * If @rmpp_active is set, the RMPP header will be initialized for sending.
*/
struct ib_mad_send_buf * ib_create_send_mad(struct ib_mad_agent *mad_agent,
u32 remote_qpn, u16 pkey_index,
@@ -601,6 +603,16 @@ struct ib_mad_send_buf * ib_create_send_mad(struct ib_mad_agent *mad_agent,
gfp_t gfp_mask);
/**
+ * ib_get_rmpp_segment - returns the data buffer for a given RMPP segment.
+ * @send_buf: Previously allocated send data buffer.
+ * @seg_num: number of segment to return
+ *
+ * This routine returns a pointer to the data buffer of an RMPP MAD.
+ * Users must provide synchronization to @send_buf around this call.
+ */
+void *ib_get_rmpp_segment(struct ib_mad_send_buf *send_buf, int seg_num);
+
+/**
* ib_free_send_mad - Returns data buffers used to send a MAD.
* @send_buf: Previously allocated send data buffer.
*/
diff --git a/include/rdma/ib_user_verbs.h b/include/rdma/ib_user_verbs.h
index 5ff1490c08db..338ed4333063 100644
--- a/include/rdma/ib_user_verbs.h
+++ b/include/rdma/ib_user_verbs.h
@@ -1,7 +1,8 @@
/*
* Copyright (c) 2005 Topspin Communications. All rights reserved.
- * Copyright (c) 2005 Cisco Systems. All rights reserved.
+ * Copyright (c) 2005, 2006 Cisco Systems. All rights reserved.
* Copyright (c) 2005 PathScale, Inc. All rights reserved.
+ * Copyright (c) 2006 Mellanox Technologies. All rights reserved.
*
* This software is available to you under a choice of one of two
* licenses. You may choose to be licensed under the terms of the GNU
@@ -43,7 +44,7 @@
* Increment this value if any changes that break userspace ABI
* compatibility are made.
*/
-#define IB_USER_VERBS_ABI_VERSION 4
+#define IB_USER_VERBS_ABI_VERSION 6
enum {
IB_USER_VERBS_CMD_GET_CONTEXT,
@@ -265,6 +266,17 @@ struct ib_uverbs_create_cq_resp {
__u32 cqe;
};
+struct ib_uverbs_resize_cq {
+ __u64 response;
+ __u32 cq_handle;
+ __u32 cqe;
+ __u64 driver_data[0];
+};
+
+struct ib_uverbs_resize_cq_resp {
+ __u32 cqe;
+};
+
struct ib_uverbs_poll_cq {
__u64 response;
__u32 cq_handle;
@@ -338,6 +350,7 @@ struct ib_uverbs_create_qp_resp {
__u32 max_send_sge;
__u32 max_recv_sge;
__u32 max_inline_data;
+ __u32 reserved;
};
/*
@@ -359,6 +372,47 @@ struct ib_uverbs_qp_dest {
__u8 port_num;
};
+struct ib_uverbs_query_qp {
+ __u64 response;
+ __u32 qp_handle;
+ __u32 attr_mask;
+ __u64 driver_data[0];
+};
+
+struct ib_uverbs_query_qp_resp {
+ struct ib_uverbs_qp_dest dest;
+ struct ib_uverbs_qp_dest alt_dest;
+ __u32 max_send_wr;
+ __u32 max_recv_wr;
+ __u32 max_send_sge;
+ __u32 max_recv_sge;
+ __u32 max_inline_data;
+ __u32 qkey;
+ __u32 rq_psn;
+ __u32 sq_psn;
+ __u32 dest_qp_num;
+ __u32 qp_access_flags;
+ __u16 pkey_index;
+ __u16 alt_pkey_index;
+ __u8 qp_state;
+ __u8 cur_qp_state;
+ __u8 path_mtu;
+ __u8 path_mig_state;
+ __u8 en_sqd_async_notify;
+ __u8 max_rd_atomic;
+ __u8 max_dest_rd_atomic;
+ __u8 min_rnr_timer;
+ __u8 port_num;
+ __u8 timeout;
+ __u8 retry_cnt;
+ __u8 rnr_retry;
+ __u8 alt_port_num;
+ __u8 alt_timeout;
+ __u8 sq_sig_all;
+ __u8 reserved[5];
+ __u64 driver_data[0];
+};
+
struct ib_uverbs_modify_qp {
struct ib_uverbs_qp_dest dest;
struct ib_uverbs_qp_dest alt_dest;
@@ -415,7 +469,7 @@ struct ib_uverbs_sge {
};
struct ib_uverbs_send_wr {
- __u64 wr_id;
+ __u64 wr_id;
__u32 num_sge;
__u32 opcode;
__u32 send_flags;
@@ -489,7 +543,7 @@ struct ib_uverbs_post_srq_recv_resp {
struct ib_uverbs_global_route {
__u8 dgid[16];
- __u32 flow_label;
+ __u32 flow_label;
__u8 sgid_index;
__u8 hop_limit;
__u8 traffic_class;
@@ -551,6 +605,9 @@ struct ib_uverbs_create_srq {
struct ib_uverbs_create_srq_resp {
__u32 srq_handle;
+ __u32 max_wr;
+ __u32 max_sge;
+ __u32 reserved;
};
struct ib_uverbs_modify_srq {
@@ -561,6 +618,20 @@ struct ib_uverbs_modify_srq {
__u64 driver_data[0];
};
+struct ib_uverbs_query_srq {
+ __u64 response;
+ __u32 srq_handle;
+ __u32 reserved;
+ __u64 driver_data[0];
+};
+
+struct ib_uverbs_query_srq_resp {
+ __u32 max_wr;
+ __u32 max_sge;
+ __u32 srq_limit;
+ __u32 reserved;
+};
+
struct ib_uverbs_destroy_srq {
__u64 response;
__u32 srq_handle;
diff --git a/include/rdma/ib_verbs.h b/include/rdma/ib_verbs.h
index 22fc886b9695..c1ad6273ac6c 100644
--- a/include/rdma/ib_verbs.h
+++ b/include/rdma/ib_verbs.h
@@ -5,7 +5,7 @@
* Copyright (c) 2004 Topspin Corporation. All rights reserved.
* Copyright (c) 2004 Voltaire Corporation. All rights reserved.
* Copyright (c) 2005 Sun Microsystems, Inc. All rights reserved.
- * Copyright (c) 2005 Cisco Systems. All rights reserved.
+ * Copyright (c) 2005, 2006 Cisco Systems. All rights reserved.
*
* This software is available to you under a choice of one of two
* licenses. You may choose to be licensed under the terms of the GNU
@@ -222,11 +222,13 @@ struct ib_port_attr {
};
enum ib_device_modify_flags {
- IB_DEVICE_MODIFY_SYS_IMAGE_GUID = 1
+ IB_DEVICE_MODIFY_SYS_IMAGE_GUID = 1 << 0,
+ IB_DEVICE_MODIFY_NODE_DESC = 1 << 1
};
struct ib_device_modify {
u64 sys_image_guid;
+ char node_desc[64];
};
enum ib_port_modify_flags {
@@ -649,7 +651,7 @@ struct ib_mw_bind {
struct ib_fmr_attr {
int max_pages;
int max_maps;
- u8 page_size;
+ u8 page_shift;
};
struct ib_ucontext {
@@ -880,7 +882,8 @@ struct ib_device {
struct ib_ucontext *context,
struct ib_udata *udata);
int (*destroy_cq)(struct ib_cq *cq);
- int (*resize_cq)(struct ib_cq *cq, int cqe);
+ int (*resize_cq)(struct ib_cq *cq, int cqe,
+ struct ib_udata *udata);
int (*poll_cq)(struct ib_cq *cq, int num_entries,
struct ib_wc *wc);
int (*peek_cq)(struct ib_cq *cq, int wc_cnt);
@@ -950,6 +953,7 @@ struct ib_device {
u64 uverbs_cmd_mask;
int uverbs_abi_ver;
+ char node_desc[64];
__be64 node_guid;
u8 node_type;
u8 phys_port_cnt;
@@ -986,6 +990,24 @@ static inline int ib_copy_to_udata(struct ib_udata *udata, void *src, size_t len
return copy_to_user(udata->outbuf, src, len) ? -EFAULT : 0;
}
+/**
+ * ib_modify_qp_is_ok - Check that the supplied attribute mask
+ * contains all required attributes and no attributes not allowed for
+ * the given QP state transition.
+ * @cur_state: Current QP state
+ * @next_state: Next QP state
+ * @type: QP type
+ * @mask: Mask of supplied QP attributes
+ *
+ * This function is a helper function that a low-level driver's
+ * modify_qp method can use to validate the consumer's input. It
+ * checks that cur_state and next_state are valid QP states, that a
+ * transition from cur_state to next_state is allowed by the IB spec,
+ * and that the attribute mask supplied is allowed for the transition.
+ */
+int ib_modify_qp_is_ok(enum ib_qp_state cur_state, enum ib_qp_state next_state,
+ enum ib_qp_type type, enum ib_qp_attr_mask mask);
+
int ib_register_event_handler (struct ib_event_handler *event_handler);
int ib_unregister_event_handler(struct ib_event_handler *event_handler);
void ib_dispatch_event(struct ib_event *event);
@@ -1078,7 +1100,9 @@ int ib_destroy_ah(struct ib_ah *ah);
* ib_create_srq - Creates a SRQ associated with the specified protection
* domain.
* @pd: The protection domain associated with the SRQ.
- * @srq_init_attr: A list of initial attributes required to create the SRQ.
+ * @srq_init_attr: A list of initial attributes required to create the
+ * SRQ. If SRQ creation succeeds, then the attributes are updated to
+ * the actual capabilities of the created SRQ.
*
* srq_attr->max_wr and srq_attr->max_sge are read the determine the
* requested size of the SRQ, and set to the actual values allocated
@@ -1137,7 +1161,9 @@ static inline int ib_post_srq_recv(struct ib_srq *srq,
* ib_create_qp - Creates a QP associated with the specified protection
* domain.
* @pd: The protection domain associated with the QP.
- * @qp_init_attr: A list of initial attributes required to create the QP.
+ * @qp_init_attr: A list of initial attributes required to create the
+ * QP. If QP creation succeeds, then the attributes are updated to
+ * the actual capabilities of the created QP.
*/
struct ib_qp *ib_create_qp(struct ib_pd *pd,
struct ib_qp_init_attr *qp_init_attr);