summaryrefslogtreecommitdiffstats
path: root/drivers/infiniband
diff options
context:
space:
mode:
authorDavid S. Miller <davem@davemloft.net>2012-10-09 13:14:32 -0400
committerDavid S. Miller <davem@davemloft.net>2012-10-09 13:14:32 -0400
commit8dd9117cc7a021ced1c5cf177e2d44dd92b88617 (patch)
treecad990f58f9ec6d400226dda86718fc10781416e /drivers/infiniband
parent16e310ae6ed352c4963b1f2413fcd88fa693eeda (diff)
parent547b1e81afe3119f7daf702cc03b158495535a25 (diff)
downloadlinux-8dd9117cc7a021ced1c5cf177e2d44dd92b88617.tar.bz2
Merge git://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux
Pulled mainline in order to get the UAPI infrastructure already merged before I pull in David Howells's UAPI trees for networking. Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'drivers/infiniband')
-rw-r--r--drivers/infiniband/core/cma.c6
-rw-r--r--drivers/infiniband/hw/ehca/ehca_uverbs.c4
-rw-r--r--drivers/infiniband/hw/ipath/ipath_file_ops.c2
-rw-r--r--drivers/infiniband/hw/nes/nes.c5
-rw-r--r--drivers/infiniband/hw/nes/nes.h3
-rw-r--r--drivers/infiniband/hw/nes/nes_verbs.c16
-rw-r--r--drivers/infiniband/hw/qib/qib_file_ops.c2
-rw-r--r--drivers/infiniband/ulp/ipoib/ipoib.h4
-rw-r--r--drivers/infiniband/ulp/ipoib/ipoib_cm.c31
-rw-r--r--drivers/infiniband/ulp/ipoib/ipoib_main.c31
-rw-r--r--drivers/infiniband/ulp/iser/iscsi_iser.h17
-rw-r--r--drivers/infiniband/ulp/iser/iser_verbs.c130
12 files changed, 148 insertions, 103 deletions
diff --git a/drivers/infiniband/core/cma.c b/drivers/infiniband/core/cma.c
index 4fff27a7e37c..a7568c34a1aa 100644
--- a/drivers/infiniband/core/cma.c
+++ b/drivers/infiniband/core/cma.c
@@ -2648,8 +2648,8 @@ static int cma_connect_ib(struct rdma_id_private *id_priv,
req.responder_resources = conn_param->responder_resources;
req.initiator_depth = conn_param->initiator_depth;
req.flow_control = conn_param->flow_control;
- req.retry_count = conn_param->retry_count;
- req.rnr_retry_count = conn_param->rnr_retry_count;
+ req.retry_count = min_t(u8, 7, conn_param->retry_count);
+ req.rnr_retry_count = min_t(u8, 7, conn_param->rnr_retry_count);
req.remote_cm_response_timeout = CMA_CM_RESPONSE_TIMEOUT;
req.local_cm_response_timeout = CMA_CM_RESPONSE_TIMEOUT;
req.max_cm_retries = CMA_MAX_CM_RETRIES;
@@ -2770,7 +2770,7 @@ static int cma_accept_ib(struct rdma_id_private *id_priv,
rep.initiator_depth = conn_param->initiator_depth;
rep.failover_accepted = 0;
rep.flow_control = conn_param->flow_control;
- rep.rnr_retry_count = conn_param->rnr_retry_count;
+ rep.rnr_retry_count = min_t(u8, 7, conn_param->rnr_retry_count);
rep.srq = id_priv->srq ? 1 : 0;
ret = ib_send_cm_rep(id_priv->cm_id.ib, &rep);
diff --git a/drivers/infiniband/hw/ehca/ehca_uverbs.c b/drivers/infiniband/hw/ehca/ehca_uverbs.c
index 45ee89b65c23..1a1d5d99fcf9 100644
--- a/drivers/infiniband/hw/ehca/ehca_uverbs.c
+++ b/drivers/infiniband/hw/ehca/ehca_uverbs.c
@@ -117,7 +117,7 @@ static int ehca_mmap_fw(struct vm_area_struct *vma, struct h_galpas *galpas,
physical = galpas->user.fw_handle;
vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
ehca_gen_dbg("vsize=%llx physical=%llx", vsize, physical);
- /* VM_IO | VM_RESERVED are set by remap_pfn_range() */
+ /* VM_IO | VM_DONTEXPAND | VM_DONTDUMP are set by remap_pfn_range() */
ret = remap_4k_pfn(vma, vma->vm_start, physical >> EHCA_PAGESHIFT,
vma->vm_page_prot);
if (unlikely(ret)) {
@@ -139,7 +139,7 @@ static int ehca_mmap_queue(struct vm_area_struct *vma, struct ipz_queue *queue,
u64 start, ofs;
struct page *page;
- vma->vm_flags |= VM_RESERVED;
+ vma->vm_flags |= VM_DONTEXPAND | VM_DONTDUMP;
start = vma->vm_start;
for (ofs = 0; ofs < queue->queue_length; ofs += PAGE_SIZE) {
u64 virt_addr = (u64)ipz_qeit_calc(queue, ofs);
diff --git a/drivers/infiniband/hw/ipath/ipath_file_ops.c b/drivers/infiniband/hw/ipath/ipath_file_ops.c
index 736d9edbdbe7..3eb7e454849b 100644
--- a/drivers/infiniband/hw/ipath/ipath_file_ops.c
+++ b/drivers/infiniband/hw/ipath/ipath_file_ops.c
@@ -1225,7 +1225,7 @@ static int mmap_kvaddr(struct vm_area_struct *vma, u64 pgaddr,
vma->vm_pgoff = (unsigned long) addr >> PAGE_SHIFT;
vma->vm_ops = &ipath_file_vm_ops;
- vma->vm_flags |= VM_RESERVED | VM_DONTEXPAND;
+ vma->vm_flags |= VM_DONTEXPAND | VM_DONTDUMP;
ret = 1;
bail:
diff --git a/drivers/infiniband/hw/nes/nes.c b/drivers/infiniband/hw/nes/nes.c
index 7140199f562e..748db2d3e465 100644
--- a/drivers/infiniband/hw/nes/nes.c
+++ b/drivers/infiniband/hw/nes/nes.c
@@ -79,11 +79,6 @@ int disable_mpa_crc = 0;
module_param(disable_mpa_crc, int, 0644);
MODULE_PARM_DESC(disable_mpa_crc, "Disable checking of MPA CRC");
-unsigned int send_first = 0;
-module_param(send_first, int, 0644);
-MODULE_PARM_DESC(send_first, "Send RDMA Message First on Active Connection");
-
-
unsigned int nes_drv_opt = NES_DRV_OPT_DISABLE_INT_MOD | NES_DRV_OPT_ENABLE_PAU;
module_param(nes_drv_opt, int, 0644);
MODULE_PARM_DESC(nes_drv_opt, "Driver option parameters");
diff --git a/drivers/infiniband/hw/nes/nes.h b/drivers/infiniband/hw/nes/nes.h
index 0da62b904d00..5cac29e6bc1c 100644
--- a/drivers/infiniband/hw/nes/nes.h
+++ b/drivers/infiniband/hw/nes/nes.h
@@ -57,7 +57,7 @@
#define QUEUE_DISCONNECTS
#define DRV_NAME "iw_nes"
-#define DRV_VERSION "1.5.0.0"
+#define DRV_VERSION "1.5.0.1"
#define PFX DRV_NAME ": "
/*
@@ -172,7 +172,6 @@ extern int interrupt_mod_interval;
extern int nes_if_count;
extern int mpa_version;
extern int disable_mpa_crc;
-extern unsigned int send_first;
extern unsigned int nes_drv_opt;
extern unsigned int nes_debug_level;
extern unsigned int wqm_quanta;
diff --git a/drivers/infiniband/hw/nes/nes_verbs.c b/drivers/infiniband/hw/nes/nes_verbs.c
index 1dadcf388c02..cd0ecb215cca 100644
--- a/drivers/infiniband/hw/nes/nes_verbs.c
+++ b/drivers/infiniband/hw/nes/nes_verbs.c
@@ -3006,6 +3006,7 @@ int nes_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
switch (nesqp->hw_iwarp_state) {
case NES_AEQE_IWARP_STATE_CLOSING:
next_iwarp_state = NES_CQP_QP_IWARP_STATE_CLOSING;
+ break;
case NES_AEQE_IWARP_STATE_TERMINATE:
next_iwarp_state = NES_CQP_QP_IWARP_STATE_TERMINATE;
break;
@@ -3068,18 +3069,9 @@ int nes_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
}
nesqp->ibqp_state = attr->qp_state;
- if (((nesqp->iwarp_state & NES_CQP_QP_IWARP_STATE_MASK) ==
- (u32)NES_CQP_QP_IWARP_STATE_RTS) &&
- ((next_iwarp_state & NES_CQP_QP_IWARP_STATE_MASK) >
- (u32)NES_CQP_QP_IWARP_STATE_RTS)) {
- nesqp->iwarp_state = next_iwarp_state & NES_CQP_QP_IWARP_STATE_MASK;
- nes_debug(NES_DBG_MOD_QP, "Change nesqp->iwarp_state=%08x\n",
- nesqp->iwarp_state);
- } else {
- nesqp->iwarp_state = next_iwarp_state & NES_CQP_QP_IWARP_STATE_MASK;
- nes_debug(NES_DBG_MOD_QP, "Change nesqp->iwarp_state=%08x\n",
- nesqp->iwarp_state);
- }
+ nesqp->iwarp_state = next_iwarp_state & NES_CQP_QP_IWARP_STATE_MASK;
+ nes_debug(NES_DBG_MOD_QP, "Change nesqp->iwarp_state=%08x\n",
+ nesqp->iwarp_state);
}
if (attr_mask & IB_QP_ACCESS_FLAGS) {
diff --git a/drivers/infiniband/hw/qib/qib_file_ops.c b/drivers/infiniband/hw/qib/qib_file_ops.c
index faa44cb08071..959a5c4ff812 100644
--- a/drivers/infiniband/hw/qib/qib_file_ops.c
+++ b/drivers/infiniband/hw/qib/qib_file_ops.c
@@ -971,7 +971,7 @@ static int mmap_kvaddr(struct vm_area_struct *vma, u64 pgaddr,
vma->vm_pgoff = (unsigned long) addr >> PAGE_SHIFT;
vma->vm_ops = &qib_file_vm_ops;
- vma->vm_flags |= VM_RESERVED | VM_DONTEXPAND;
+ vma->vm_flags |= VM_DONTEXPAND | VM_DONTDUMP;
ret = 1;
bail:
diff --git a/drivers/infiniband/ulp/ipoib/ipoib.h b/drivers/infiniband/ulp/ipoib/ipoib.h
index 196eb52f0035..07ca6fd5546b 100644
--- a/drivers/infiniband/ulp/ipoib/ipoib.h
+++ b/drivers/infiniband/ulp/ipoib/ipoib.h
@@ -535,14 +535,14 @@ void ipoib_drain_cq(struct net_device *dev);
void ipoib_set_ethtool_ops(struct net_device *dev);
int ipoib_set_dev_features(struct ipoib_dev_priv *priv, struct ib_device *hca);
-#ifdef CONFIG_INFINIBAND_IPOIB_CM
-
#define IPOIB_FLAGS_RC 0x80
#define IPOIB_FLAGS_UC 0x40
/* We don't support UC connections at the moment */
#define IPOIB_CM_SUPPORTED(ha) (ha[0] & (IPOIB_FLAGS_RC))
+#ifdef CONFIG_INFINIBAND_IPOIB_CM
+
extern int ipoib_max_conn_qp;
static inline int ipoib_cm_admin_enabled(struct net_device *dev)
diff --git a/drivers/infiniband/ulp/ipoib/ipoib_cm.c b/drivers/infiniband/ulp/ipoib/ipoib_cm.c
index 175581cf478c..72ae63f0072d 100644
--- a/drivers/infiniband/ulp/ipoib/ipoib_cm.c
+++ b/drivers/infiniband/ulp/ipoib/ipoib_cm.c
@@ -1448,37 +1448,6 @@ static ssize_t show_mode(struct device *d, struct device_attribute *attr,
return sprintf(buf, "datagram\n");
}
-int ipoib_set_mode(struct net_device *dev, const char *buf)
-{
- struct ipoib_dev_priv *priv = netdev_priv(dev);
-
- /* flush paths if we switch modes so that connections are restarted */
- if (IPOIB_CM_SUPPORTED(dev->dev_addr) && !strcmp(buf, "connected\n")) {
- set_bit(IPOIB_FLAG_ADMIN_CM, &priv->flags);
- ipoib_warn(priv, "enabling connected mode "
- "will cause multicast packet drops\n");
- netdev_update_features(dev);
- rtnl_unlock();
- priv->tx_wr.send_flags &= ~IB_SEND_IP_CSUM;
-
- ipoib_flush_paths(dev);
- rtnl_lock();
- return 0;
- }
-
- if (!strcmp(buf, "datagram\n")) {
- clear_bit(IPOIB_FLAG_ADMIN_CM, &priv->flags);
- netdev_update_features(dev);
- dev_set_mtu(dev, min(priv->mcast_mtu, dev->mtu));
- rtnl_unlock();
- ipoib_flush_paths(dev);
- rtnl_lock();
- return 0;
- }
-
- return -EINVAL;
-}
-
static ssize_t set_mode(struct device *d, struct device_attribute *attr,
const char *buf, size_t count)
{
diff --git a/drivers/infiniband/ulp/ipoib/ipoib_main.c b/drivers/infiniband/ulp/ipoib/ipoib_main.c
index d576c7aad89d..6fdc9e78da0d 100644
--- a/drivers/infiniband/ulp/ipoib/ipoib_main.c
+++ b/drivers/infiniband/ulp/ipoib/ipoib_main.c
@@ -215,6 +215,37 @@ static int ipoib_change_mtu(struct net_device *dev, int new_mtu)
return 0;
}
+int ipoib_set_mode(struct net_device *dev, const char *buf)
+{
+ struct ipoib_dev_priv *priv = netdev_priv(dev);
+
+ /* flush paths if we switch modes so that connections are restarted */
+ if (IPOIB_CM_SUPPORTED(dev->dev_addr) && !strcmp(buf, "connected\n")) {
+ set_bit(IPOIB_FLAG_ADMIN_CM, &priv->flags);
+ ipoib_warn(priv, "enabling connected mode "
+ "will cause multicast packet drops\n");
+ netdev_update_features(dev);
+ rtnl_unlock();
+ priv->tx_wr.send_flags &= ~IB_SEND_IP_CSUM;
+
+ ipoib_flush_paths(dev);
+ rtnl_lock();
+ return 0;
+ }
+
+ if (!strcmp(buf, "datagram\n")) {
+ clear_bit(IPOIB_FLAG_ADMIN_CM, &priv->flags);
+ netdev_update_features(dev);
+ dev_set_mtu(dev, min(priv->mcast_mtu, dev->mtu));
+ rtnl_unlock();
+ ipoib_flush_paths(dev);
+ rtnl_lock();
+ return 0;
+ }
+
+ return -EINVAL;
+}
+
static struct ipoib_path *__path_find(struct net_device *dev, void *gid)
{
struct ipoib_dev_priv *priv = netdev_priv(dev);
diff --git a/drivers/infiniband/ulp/iser/iscsi_iser.h b/drivers/infiniband/ulp/iser/iscsi_iser.h
index 296be431a0e9..ef7d3be46c31 100644
--- a/drivers/infiniband/ulp/iser/iscsi_iser.h
+++ b/drivers/infiniband/ulp/iser/iscsi_iser.h
@@ -177,6 +177,7 @@ struct iser_data_buf {
/* fwd declarations */
struct iser_device;
+struct iser_cq_desc;
struct iscsi_iser_conn;
struct iscsi_iser_task;
struct iscsi_endpoint;
@@ -226,16 +227,21 @@ struct iser_rx_desc {
char pad[ISER_RX_PAD_SIZE];
} __attribute__((packed));
+#define ISER_MAX_CQ 4
+
struct iser_device {
struct ib_device *ib_device;
struct ib_pd *pd;
- struct ib_cq *rx_cq;
- struct ib_cq *tx_cq;
+ struct ib_cq *rx_cq[ISER_MAX_CQ];
+ struct ib_cq *tx_cq[ISER_MAX_CQ];
struct ib_mr *mr;
- struct tasklet_struct cq_tasklet;
+ struct tasklet_struct cq_tasklet[ISER_MAX_CQ];
struct ib_event_handler event_handler;
struct list_head ig_list; /* entry in ig devices list */
int refcount;
+ int cq_active_qps[ISER_MAX_CQ];
+ int cqs_used;
+ struct iser_cq_desc *cq_desc;
};
struct iser_conn {
@@ -287,6 +293,11 @@ struct iser_page_vec {
int data_size;
};
+struct iser_cq_desc {
+ struct iser_device *device;
+ int cq_index;
+};
+
struct iser_global {
struct mutex device_list_mutex;/* */
struct list_head device_list; /* all iSER devices */
diff --git a/drivers/infiniband/ulp/iser/iser_verbs.c b/drivers/infiniband/ulp/iser/iser_verbs.c
index 2dddabd8fcf9..95a49affee44 100644
--- a/drivers/infiniband/ulp/iser/iser_verbs.c
+++ b/drivers/infiniband/ulp/iser/iser_verbs.c
@@ -70,32 +70,50 @@ static void iser_event_handler(struct ib_event_handler *handler,
*/
static int iser_create_device_ib_res(struct iser_device *device)
{
+ int i, j;
+ struct iser_cq_desc *cq_desc;
+
+ device->cqs_used = min(ISER_MAX_CQ, device->ib_device->num_comp_vectors);
+ iser_err("using %d CQs, device %s supports %d vectors\n", device->cqs_used,
+ device->ib_device->name, device->ib_device->num_comp_vectors);
+
+ device->cq_desc = kmalloc(sizeof(struct iser_cq_desc) * device->cqs_used,
+ GFP_KERNEL);
+ if (device->cq_desc == NULL)
+ goto cq_desc_err;
+ cq_desc = device->cq_desc;
+
device->pd = ib_alloc_pd(device->ib_device);
if (IS_ERR(device->pd))
goto pd_err;
- device->rx_cq = ib_create_cq(device->ib_device,
- iser_cq_callback,
- iser_cq_event_callback,
- (void *)device,
- ISER_MAX_RX_CQ_LEN, 0);
- if (IS_ERR(device->rx_cq))
- goto rx_cq_err;
+ for (i = 0; i < device->cqs_used; i++) {
+ cq_desc[i].device = device;
+ cq_desc[i].cq_index = i;
+
+ device->rx_cq[i] = ib_create_cq(device->ib_device,
+ iser_cq_callback,
+ iser_cq_event_callback,
+ (void *)&cq_desc[i],
+ ISER_MAX_RX_CQ_LEN, i);
+ if (IS_ERR(device->rx_cq[i]))
+ goto cq_err;
- device->tx_cq = ib_create_cq(device->ib_device,
- NULL, iser_cq_event_callback,
- (void *)device,
- ISER_MAX_TX_CQ_LEN, 0);
+ device->tx_cq[i] = ib_create_cq(device->ib_device,
+ NULL, iser_cq_event_callback,
+ (void *)&cq_desc[i],
+ ISER_MAX_TX_CQ_LEN, i);
- if (IS_ERR(device->tx_cq))
- goto tx_cq_err;
+ if (IS_ERR(device->tx_cq[i]))
+ goto cq_err;
- if (ib_req_notify_cq(device->rx_cq, IB_CQ_NEXT_COMP))
- goto cq_arm_err;
+ if (ib_req_notify_cq(device->rx_cq[i], IB_CQ_NEXT_COMP))
+ goto cq_err;
- tasklet_init(&device->cq_tasklet,
- iser_cq_tasklet_fn,
- (unsigned long)device);
+ tasklet_init(&device->cq_tasklet[i],
+ iser_cq_tasklet_fn,
+ (unsigned long)&cq_desc[i]);
+ }
device->mr = ib_get_dma_mr(device->pd, IB_ACCESS_LOCAL_WRITE |
IB_ACCESS_REMOTE_WRITE |
@@ -113,14 +131,19 @@ static int iser_create_device_ib_res(struct iser_device *device)
handler_err:
ib_dereg_mr(device->mr);
dma_mr_err:
- tasklet_kill(&device->cq_tasklet);
-cq_arm_err:
- ib_destroy_cq(device->tx_cq);
-tx_cq_err:
- ib_destroy_cq(device->rx_cq);
-rx_cq_err:
+ for (j = 0; j < device->cqs_used; j++)
+ tasklet_kill(&device->cq_tasklet[j]);
+cq_err:
+ for (j = 0; j < i; j++) {
+ if (device->tx_cq[j])
+ ib_destroy_cq(device->tx_cq[j]);
+ if (device->rx_cq[j])
+ ib_destroy_cq(device->rx_cq[j]);
+ }
ib_dealloc_pd(device->pd);
pd_err:
+ kfree(device->cq_desc);
+cq_desc_err:
iser_err("failed to allocate an IB resource\n");
return -1;
}
@@ -131,18 +154,24 @@ pd_err:
*/
static void iser_free_device_ib_res(struct iser_device *device)
{
+ int i;
BUG_ON(device->mr == NULL);
- tasklet_kill(&device->cq_tasklet);
+ for (i = 0; i < device->cqs_used; i++) {
+ tasklet_kill(&device->cq_tasklet[i]);
+ (void)ib_destroy_cq(device->tx_cq[i]);
+ (void)ib_destroy_cq(device->rx_cq[i]);
+ device->tx_cq[i] = NULL;
+ device->rx_cq[i] = NULL;
+ }
+
(void)ib_unregister_event_handler(&device->event_handler);
(void)ib_dereg_mr(device->mr);
- (void)ib_destroy_cq(device->tx_cq);
- (void)ib_destroy_cq(device->rx_cq);
(void)ib_dealloc_pd(device->pd);
+ kfree(device->cq_desc);
+
device->mr = NULL;
- device->tx_cq = NULL;
- device->rx_cq = NULL;
device->pd = NULL;
}
@@ -157,6 +186,7 @@ static int iser_create_ib_conn_res(struct iser_conn *ib_conn)
struct ib_qp_init_attr init_attr;
int req_err, resp_err, ret = -ENOMEM;
struct ib_fmr_pool_param params;
+ int index, min_index = 0;
BUG_ON(ib_conn->device == NULL);
@@ -220,10 +250,20 @@ static int iser_create_ib_conn_res(struct iser_conn *ib_conn)
memset(&init_attr, 0, sizeof init_attr);
+ mutex_lock(&ig.connlist_mutex);
+ /* select the CQ with the minimal number of usages */
+ for (index = 0; index < device->cqs_used; index++)
+ if (device->cq_active_qps[index] <
+ device->cq_active_qps[min_index])
+ min_index = index;
+ device->cq_active_qps[min_index]++;
+ mutex_unlock(&ig.connlist_mutex);
+ iser_err("cq index %d used for ib_conn %p\n", min_index, ib_conn);
+
init_attr.event_handler = iser_qp_event_callback;
init_attr.qp_context = (void *)ib_conn;
- init_attr.send_cq = device->tx_cq;
- init_attr.recv_cq = device->rx_cq;
+ init_attr.send_cq = device->tx_cq[min_index];
+ init_attr.recv_cq = device->rx_cq[min_index];
init_attr.cap.max_send_wr = ISER_QP_MAX_REQ_DTOS;
init_attr.cap.max_recv_wr = ISER_QP_MAX_RECV_DTOS;
init_attr.cap.max_send_sge = 2;
@@ -252,6 +292,7 @@ out_err:
*/
static int iser_free_ib_conn_res(struct iser_conn *ib_conn, int can_destroy_id)
{
+ int cq_index;
BUG_ON(ib_conn == NULL);
iser_err("freeing conn %p cma_id %p fmr pool %p qp %p\n",
@@ -262,9 +303,12 @@ static int iser_free_ib_conn_res(struct iser_conn *ib_conn, int can_destroy_id)
if (ib_conn->fmr_pool != NULL)
ib_destroy_fmr_pool(ib_conn->fmr_pool);
- if (ib_conn->qp != NULL)
- rdma_destroy_qp(ib_conn->cma_id);
+ if (ib_conn->qp != NULL) {
+ cq_index = ((struct iser_cq_desc *)ib_conn->qp->recv_cq->cq_context)->cq_index;
+ ib_conn->device->cq_active_qps[cq_index]--;
+ rdma_destroy_qp(ib_conn->cma_id);
+ }
/* if cma handler context, the caller acts s.t the cma destroy the id */
if (ib_conn->cma_id != NULL && can_destroy_id)
rdma_destroy_id(ib_conn->cma_id);
@@ -791,9 +835,9 @@ static void iser_handle_comp_error(struct iser_tx_desc *desc,
}
}
-static int iser_drain_tx_cq(struct iser_device *device)
+static int iser_drain_tx_cq(struct iser_device *device, int cq_index)
{
- struct ib_cq *cq = device->tx_cq;
+ struct ib_cq *cq = device->tx_cq[cq_index];
struct ib_wc wc;
struct iser_tx_desc *tx_desc;
struct iser_conn *ib_conn;
@@ -822,8 +866,10 @@ static int iser_drain_tx_cq(struct iser_device *device)
static void iser_cq_tasklet_fn(unsigned long data)
{
- struct iser_device *device = (struct iser_device *)data;
- struct ib_cq *cq = device->rx_cq;
+ struct iser_cq_desc *cq_desc = (struct iser_cq_desc *)data;
+ struct iser_device *device = cq_desc->device;
+ int cq_index = cq_desc->cq_index;
+ struct ib_cq *cq = device->rx_cq[cq_index];
struct ib_wc wc;
struct iser_rx_desc *desc;
unsigned long xfer_len;
@@ -851,19 +897,21 @@ static void iser_cq_tasklet_fn(unsigned long data)
}
completed_rx++;
if (!(completed_rx & 63))
- completed_tx += iser_drain_tx_cq(device);
+ completed_tx += iser_drain_tx_cq(device, cq_index);
}
/* #warning "it is assumed here that arming CQ only once its empty" *
* " would not cause interrupts to be missed" */
ib_req_notify_cq(cq, IB_CQ_NEXT_COMP);
- completed_tx += iser_drain_tx_cq(device);
+ completed_tx += iser_drain_tx_cq(device, cq_index);
iser_dbg("got %d rx %d tx completions\n", completed_rx, completed_tx);
}
static void iser_cq_callback(struct ib_cq *cq, void *cq_context)
{
- struct iser_device *device = (struct iser_device *)cq_context;
+ struct iser_cq_desc *cq_desc = (struct iser_cq_desc *)cq_context;
+ struct iser_device *device = cq_desc->device;
+ int cq_index = cq_desc->cq_index;
- tasklet_schedule(&device->cq_tasklet);
+ tasklet_schedule(&device->cq_tasklet[cq_index]);
}