summaryrefslogtreecommitdiffstats
path: root/drivers/infiniband/ulp
diff options
context:
space:
mode:
authorDoug Ledford <dledford@redhat.com>2017-10-18 13:07:13 -0400
committerDoug Ledford <dledford@redhat.com>2017-10-18 13:07:13 -0400
commit754137a769ac8f13cd6c0e1bc4fc2fa768d3da63 (patch)
tree0a39eda714b8b0bb3522654fb326e499fbe552e7 /drivers/infiniband/ulp
parente980b44134c89afb65176e70aaf293d608002e83 (diff)
parent4c532d6ce14bb3fb4e1cb2d29fafdd7d6bded51c (diff)
downloadlinux-754137a769ac8f13cd6c0e1bc4fc2fa768d3da63.tar.bz2
Merge branch 'for-next-early' into for-next
The early for-next branch was based on v4.14-rc2, while the shared pull request I got from Mellanox used a v4.14-rc4 base. I'm making the branch that was the shared Mellanox pull request the new for-next branch and merging the early for-next branch into it. Signed-off-by: Doug Ledford <dledford@redhat.com>
Diffstat (limited to 'drivers/infiniband/ulp')
-rw-r--r--drivers/infiniband/ulp/ipoib/ipoib_cm.c10
-rw-r--r--drivers/infiniband/ulp/ipoib/ipoib_ib.c8
-rw-r--r--drivers/infiniband/ulp/ipoib/ipoib_main.c4
-rw-r--r--drivers/infiniband/ulp/iser/iser_verbs.c2
-rw-r--r--drivers/infiniband/ulp/isert/ib_isert.c14
-rw-r--r--drivers/infiniband/ulp/opa_vnic/opa_vnic_encap.c42
-rw-r--r--drivers/infiniband/ulp/opa_vnic/opa_vnic_encap.h22
-rw-r--r--drivers/infiniband/ulp/opa_vnic/opa_vnic_internal.h7
-rw-r--r--drivers/infiniband/ulp/opa_vnic/opa_vnic_netdev.c44
-rw-r--r--drivers/infiniband/ulp/opa_vnic/opa_vnic_vema.c1
-rw-r--r--drivers/infiniband/ulp/opa_vnic/opa_vnic_vema_iface.c22
-rw-r--r--drivers/infiniband/ulp/srp/ib_srp.c90
-rw-r--r--drivers/infiniband/ulp/srp/ib_srp.h3
-rw-r--r--drivers/infiniband/ulp/srpt/ib_srpt.c176
-rw-r--r--drivers/infiniband/ulp/srpt/ib_srpt.h9
15 files changed, 324 insertions, 130 deletions
diff --git a/drivers/infiniband/ulp/ipoib/ipoib_cm.c b/drivers/infiniband/ulp/ipoib/ipoib_cm.c
index 7774654c2ccb..7500c28eac6b 100644
--- a/drivers/infiniband/ulp/ipoib/ipoib_cm.c
+++ b/drivers/infiniband/ulp/ipoib/ipoib_cm.c
@@ -594,9 +594,9 @@ void ipoib_cm_handle_rx_wc(struct net_device *dev, struct ib_wc *wc)
skb = rx_ring[wr_id].skb;
if (unlikely(wc->status != IB_WC_SUCCESS)) {
- ipoib_dbg(priv, "cm recv error "
- "(status=%d, wrid=%d vend_err %x)\n",
- wc->status, wr_id, wc->vendor_err);
+ ipoib_dbg(priv,
+ "cm recv error (status=%d, wrid=%d vend_err %#x)\n",
+ wc->status, wr_id, wc->vendor_err);
++dev->stats.rx_dropped;
if (has_srq)
goto repost;
@@ -829,11 +829,11 @@ void ipoib_cm_handle_tx_wc(struct net_device *dev, struct ib_wc *wc)
if (wc->status == IB_WC_RNR_RETRY_EXC_ERR ||
wc->status == IB_WC_RETRY_EXC_ERR)
ipoib_dbg(priv,
- "%s: failed cm send event (status=%d, wrid=%d vend_err 0x%x)\n",
+ "%s: failed cm send event (status=%d, wrid=%d vend_err %#x)\n",
__func__, wc->status, wr_id, wc->vendor_err);
else
ipoib_warn(priv,
- "%s: failed cm send event (status=%d, wrid=%d vend_err 0x%x)\n",
+ "%s: failed cm send event (status=%d, wrid=%d vend_err %#x)\n",
__func__, wc->status, wr_id, wc->vendor_err);
spin_lock_irqsave(&priv->lock, flags);
diff --git a/drivers/infiniband/ulp/ipoib/ipoib_ib.c b/drivers/infiniband/ulp/ipoib/ipoib_ib.c
index fe690f82af29..bcb8d501618b 100644
--- a/drivers/infiniband/ulp/ipoib/ipoib_ib.c
+++ b/drivers/infiniband/ulp/ipoib/ipoib_ib.c
@@ -192,8 +192,8 @@ static void ipoib_ib_handle_rx_wc(struct net_device *dev, struct ib_wc *wc)
if (unlikely(wc->status != IB_WC_SUCCESS)) {
if (wc->status != IB_WC_WR_FLUSH_ERR)
- ipoib_warn(priv, "failed recv event "
- "(status=%d, wrid=%d vend_err %x)\n",
+ ipoib_warn(priv,
+ "failed recv event (status=%d, wrid=%d vend_err %#x)\n",
wc->status, wr_id, wc->vendor_err);
ipoib_ud_dma_unmap_rx(priv, priv->rx_ring[wr_id].mapping);
dev_kfree_skb_any(skb);
@@ -414,8 +414,8 @@ static void ipoib_ib_handle_tx_wc(struct net_device *dev, struct ib_wc *wc)
if (wc->status != IB_WC_SUCCESS &&
wc->status != IB_WC_WR_FLUSH_ERR) {
struct ipoib_qp_state_validate *qp_work;
- ipoib_warn(priv, "failed send event "
- "(status=%d, wrid=%d vend_err %x)\n",
+ ipoib_warn(priv,
+ "failed send event (status=%d, wrid=%d vend_err %#x)\n",
wc->status, wr_id, wc->vendor_err);
qp_work = kzalloc(sizeof(*qp_work), GFP_ATOMIC);
if (!qp_work)
diff --git a/drivers/infiniband/ulp/ipoib/ipoib_main.c b/drivers/infiniband/ulp/ipoib/ipoib_main.c
index dcc77014018d..827461b10a01 100644
--- a/drivers/infiniband/ulp/ipoib/ipoib_main.c
+++ b/drivers/infiniband/ulp/ipoib/ipoib_main.c
@@ -51,7 +51,6 @@
#include <net/addrconf.h>
#include <linux/inetdevice.h>
#include <rdma/ib_cache.h>
-#include <linux/pci.h>
#define DRV_VERSION "1.0.0"
@@ -2314,7 +2313,8 @@ static void ipoib_add_one(struct ib_device *device)
}
if (!count) {
- kfree(dev_list);
+ pr_err("Failed to init port, removing it\n");
+ ipoib_remove_one(device, dev_list);
return;
}
diff --git a/drivers/infiniband/ulp/iser/iser_verbs.c b/drivers/infiniband/ulp/iser/iser_verbs.c
index 55a73b0ed4c6..56b7240a3fc3 100644
--- a/drivers/infiniband/ulp/iser/iser_verbs.c
+++ b/drivers/infiniband/ulp/iser/iser_verbs.c
@@ -1146,7 +1146,7 @@ void iser_err_comp(struct ib_wc *wc, const char *type)
if (wc->status != IB_WC_WR_FLUSH_ERR) {
struct iser_conn *iser_conn = to_iser_conn(wc->qp->qp_context);
- iser_err("%s failure: %s (%d) vend_err %x\n", type,
+ iser_err("%s failure: %s (%d) vend_err %#x\n", type,
ib_wc_status_msg(wc->status), wc->status,
wc->vendor_err);
diff --git a/drivers/infiniband/ulp/isert/ib_isert.c b/drivers/infiniband/ulp/isert/ib_isert.c
index ceabdb85df8b..720dfb3a1ac2 100644
--- a/drivers/infiniband/ulp/isert/ib_isert.c
+++ b/drivers/infiniband/ulp/isert/ib_isert.c
@@ -788,10 +788,11 @@ isert_cma_handler(struct rdma_cm_id *cma_id, struct rdma_cm_event *event)
* the rdma cm id
*/
return 1;
- case RDMA_CM_EVENT_REJECTED: /* FALLTHRU */
+ case RDMA_CM_EVENT_REJECTED:
isert_info("Connection rejected: %s\n",
rdma_reject_msg(cma_id, event->status));
- case RDMA_CM_EVENT_UNREACHABLE: /* FALLTHRU */
+ /* fall through */
+ case RDMA_CM_EVENT_UNREACHABLE:
case RDMA_CM_EVENT_CONNECT_ERROR:
ret = isert_connect_error(cma_id);
break;
@@ -1569,9 +1570,7 @@ isert_put_cmd(struct isert_cmd *isert_cmd, bool comp_err)
transport_generic_free_cmd(&cmd->se_cmd, 0);
break;
}
- /*
- * Fall-through
- */
+ /* fall through */
default:
iscsit_release_cmd(cmd);
break;
@@ -1749,8 +1748,9 @@ isert_do_control_comp(struct work_struct *work)
switch (cmd->i_state) {
case ISTATE_SEND_TASKMGTRSP:
iscsit_tmr_post_handler(cmd, cmd->conn);
- case ISTATE_SEND_REJECT: /* FALLTHRU */
- case ISTATE_SEND_TEXTRSP: /* FALLTHRU */
+ /* fall through */
+ case ISTATE_SEND_REJECT:
+ case ISTATE_SEND_TEXTRSP:
cmd->i_state = ISTATE_SENT_STATUS;
isert_completion_put(&isert_cmd->tx_desc, isert_cmd,
ib_dev, false);
diff --git a/drivers/infiniband/ulp/opa_vnic/opa_vnic_encap.c b/drivers/infiniband/ulp/opa_vnic/opa_vnic_encap.c
index afa938bd26d6..4be3aef40bd2 100644
--- a/drivers/infiniband/ulp/opa_vnic/opa_vnic_encap.c
+++ b/drivers/infiniband/ulp/opa_vnic/opa_vnic_encap.c
@@ -139,6 +139,7 @@ void opa_vnic_release_mac_tbl(struct opa_vnic_adapter *adapter)
rcu_assign_pointer(adapter->mactbl, NULL);
synchronize_rcu();
opa_vnic_free_mac_tbl(mactbl);
+ adapter->info.vport.mac_tbl_digest = 0;
mutex_unlock(&adapter->mactbl_lock);
}
@@ -405,6 +406,42 @@ u8 opa_vnic_get_vl(struct opa_vnic_adapter *adapter, struct sk_buff *skb)
return vl;
}
+/* opa_vnic_get_rc - return the routing control */
+static u8 opa_vnic_get_rc(struct __opa_veswport_info *info,
+ struct sk_buff *skb)
+{
+ u8 proto, rout_ctrl;
+
+ switch (vlan_get_protocol(skb)) {
+ case htons(ETH_P_IPV6):
+ proto = ipv6_hdr(skb)->nexthdr;
+ if (proto == IPPROTO_TCP)
+ rout_ctrl = OPA_VNIC_ENCAP_RC_EXT(info->vesw.rc,
+ IPV6_TCP);
+ else if (proto == IPPROTO_UDP)
+ rout_ctrl = OPA_VNIC_ENCAP_RC_EXT(info->vesw.rc,
+ IPV6_UDP);
+ else
+ rout_ctrl = OPA_VNIC_ENCAP_RC_EXT(info->vesw.rc, IPV6);
+ break;
+ case htons(ETH_P_IP):
+ proto = ip_hdr(skb)->protocol;
+ if (proto == IPPROTO_TCP)
+ rout_ctrl = OPA_VNIC_ENCAP_RC_EXT(info->vesw.rc,
+ IPV4_TCP);
+ else if (proto == IPPROTO_UDP)
+ rout_ctrl = OPA_VNIC_ENCAP_RC_EXT(info->vesw.rc,
+ IPV4_UDP);
+ else
+ rout_ctrl = OPA_VNIC_ENCAP_RC_EXT(info->vesw.rc, IPV4);
+ break;
+ default:
+ rout_ctrl = OPA_VNIC_ENCAP_RC_EXT(info->vesw.rc, DEFAULT);
+ }
+
+ return rout_ctrl;
+}
+
/* opa_vnic_calc_entropy - calculate the packet entropy */
u8 opa_vnic_calc_entropy(struct opa_vnic_adapter *adapter, struct sk_buff *skb)
{
@@ -447,7 +484,7 @@ void opa_vnic_encap_skb(struct opa_vnic_adapter *adapter, struct sk_buff *skb)
{
struct __opa_veswport_info *info = &adapter->info;
struct opa_vnic_skb_mdata *mdata;
- u8 def_port, sc, entropy, *hdr;
+ u8 def_port, sc, rc, entropy, *hdr;
u16 len, l4_hdr;
u32 dlid;
@@ -458,6 +495,7 @@ void opa_vnic_encap_skb(struct opa_vnic_adapter *adapter, struct sk_buff *skb)
len = opa_vnic_wire_length(skb);
dlid = opa_vnic_get_dlid(adapter, skb, def_port);
sc = opa_vnic_get_sc(info, skb);
+ rc = opa_vnic_get_rc(info, skb);
l4_hdr = info->vesw.vesw_id;
mdata = skb_push(skb, sizeof(*mdata));
@@ -470,6 +508,6 @@ void opa_vnic_encap_skb(struct opa_vnic_adapter *adapter, struct sk_buff *skb)
}
opa_vnic_make_header(hdr, info->vport.encap_slid, dlid, len,
- info->vesw.pkey, entropy, sc, 0,
+ info->vesw.pkey, entropy, sc, rc,
OPA_VNIC_L4_ETHR, l4_hdr);
}
diff --git a/drivers/infiniband/ulp/opa_vnic/opa_vnic_encap.h b/drivers/infiniband/ulp/opa_vnic/opa_vnic_encap.h
index 4c434b9dd84c..e4c9bf2ef7e2 100644
--- a/drivers/infiniband/ulp/opa_vnic/opa_vnic_encap.h
+++ b/drivers/infiniband/ulp/opa_vnic/opa_vnic_encap.h
@@ -103,6 +103,17 @@
#define OPA_VNIC_ETH_LINK_UP 1
#define OPA_VNIC_ETH_LINK_DOWN 2
+/* routing control */
+#define OPA_VNIC_ENCAP_RC_DEFAULT 0
+#define OPA_VNIC_ENCAP_RC_IPV4 4
+#define OPA_VNIC_ENCAP_RC_IPV4_UDP 8
+#define OPA_VNIC_ENCAP_RC_IPV4_TCP 12
+#define OPA_VNIC_ENCAP_RC_IPV6 16
+#define OPA_VNIC_ENCAP_RC_IPV6_TCP 20
+#define OPA_VNIC_ENCAP_RC_IPV6_UDP 24
+
+#define OPA_VNIC_ENCAP_RC_EXT(w, b) (((w) >> OPA_VNIC_ENCAP_RC_ ## b) & 0x7)
+
/**
* struct opa_vesw_info - OPA vnic switch information
* @fabric_id: 10-bit fabric id
@@ -111,8 +122,8 @@
* @pkey: partition key
* @u_mcast_dlid: unknown multicast dlid
* @u_ucast_dlid: array of unknown unicast dlids
- * @eth_mtu: MTUs for each vlan PCP
- * @eth_mtu_non_vlan: MTU for non vlan packets
+ * @rc: routing control
+ * @eth_mtu: Ethernet MTU
*/
struct opa_vesw_info {
__be16 fabric_id;
@@ -128,9 +139,10 @@ struct opa_vesw_info {
__be32 u_mcast_dlid;
__be32 u_ucast_dlid[OPA_VESW_MAX_NUM_DEF_PORT];
- u8 rsvd3[44];
- __be16 eth_mtu[OPA_VNIC_MAX_NUM_PCP];
- __be16 eth_mtu_non_vlan;
+ __be32 rc;
+
+ u8 rsvd3[56];
+ __be16 eth_mtu;
u8 rsvd4[2];
} __packed;
diff --git a/drivers/infiniband/ulp/opa_vnic/opa_vnic_internal.h b/drivers/infiniband/ulp/opa_vnic/opa_vnic_internal.h
index ca29e6d5aedc..afd95f432262 100644
--- a/drivers/infiniband/ulp/opa_vnic/opa_vnic_internal.h
+++ b/drivers/infiniband/ulp/opa_vnic/opa_vnic_internal.h
@@ -89,9 +89,10 @@ struct __opa_vesw_info {
u32 u_mcast_dlid;
u32 u_ucast_dlid[OPA_VESW_MAX_NUM_DEF_PORT];
- u8 rsvd3[44];
- u16 eth_mtu[OPA_VNIC_MAX_NUM_PCP];
- u16 eth_mtu_non_vlan;
+ u32 rc;
+
+ u8 rsvd3[56];
+ u16 eth_mtu;
u8 rsvd4[2];
} __packed;
diff --git a/drivers/infiniband/ulp/opa_vnic/opa_vnic_netdev.c b/drivers/infiniband/ulp/opa_vnic/opa_vnic_netdev.c
index 1a3c25364b64..ce57e0f10289 100644
--- a/drivers/infiniband/ulp/opa_vnic/opa_vnic_netdev.c
+++ b/drivers/infiniband/ulp/opa_vnic/opa_vnic_netdev.c
@@ -112,6 +112,27 @@ static u16 opa_vnic_select_queue(struct net_device *netdev, struct sk_buff *skb,
return rc;
}
+static void opa_vnic_update_state(struct opa_vnic_adapter *adapter, bool up)
+{
+ struct __opa_veswport_info *info = &adapter->info;
+
+ mutex_lock(&adapter->lock);
+ /* Operational state can only be DROP_ALL or FORWARDING */
+ if ((info->vport.config_state == OPA_VNIC_STATE_FORWARDING) && up) {
+ info->vport.oper_state = OPA_VNIC_STATE_FORWARDING;
+ info->vport.eth_link_status = OPA_VNIC_ETH_LINK_UP;
+ } else {
+ info->vport.oper_state = OPA_VNIC_STATE_DROP_ALL;
+ info->vport.eth_link_status = OPA_VNIC_ETH_LINK_DOWN;
+ }
+
+ if (info->vport.config_state == OPA_VNIC_STATE_FORWARDING)
+ netif_dormant_off(adapter->netdev);
+ else
+ netif_dormant_on(adapter->netdev);
+ mutex_unlock(&adapter->lock);
+}
+
/* opa_vnic_process_vema_config - process vema configuration updates */
void opa_vnic_process_vema_config(struct opa_vnic_adapter *adapter)
{
@@ -130,7 +151,7 @@ void opa_vnic_process_vema_config(struct opa_vnic_adapter *adapter)
memcpy(saddr.sa_data, info->vport.base_mac_addr,
ARRAY_SIZE(info->vport.base_mac_addr));
mutex_lock(&adapter->lock);
- eth_mac_addr(netdev, &saddr);
+ eth_commit_mac_addr_change(netdev, &saddr);
memcpy(adapter->vema_mac_addr,
info->vport.base_mac_addr, ETH_ALEN);
mutex_unlock(&adapter->lock);
@@ -140,7 +161,7 @@ void opa_vnic_process_vema_config(struct opa_vnic_adapter *adapter)
/* Handle MTU limit change */
rtnl_lock();
- netdev->max_mtu = max_t(unsigned int, info->vesw.eth_mtu_non_vlan,
+ netdev->max_mtu = max_t(unsigned int, info->vesw.eth_mtu,
netdev->min_mtu);
if (netdev->mtu > netdev->max_mtu)
dev_set_mtu(netdev, netdev->max_mtu);
@@ -164,14 +185,8 @@ void opa_vnic_process_vema_config(struct opa_vnic_adapter *adapter)
adapter->flow_tbl[i] = port_count ? port_num[i % port_count] :
OPA_VNIC_INVALID_PORT;
- /* Operational state can only be DROP_ALL or FORWARDING */
- if (info->vport.config_state == OPA_VNIC_STATE_FORWARDING) {
- info->vport.oper_state = OPA_VNIC_STATE_FORWARDING;
- netif_dormant_off(netdev);
- } else {
- info->vport.oper_state = OPA_VNIC_STATE_DROP_ALL;
- netif_dormant_on(netdev);
- }
+ /* update state */
+ opa_vnic_update_state(adapter, !!(netdev->flags & IFF_UP));
}
/*
@@ -183,6 +198,7 @@ static inline void opa_vnic_set_pod_values(struct opa_vnic_adapter *adapter)
adapter->info.vport.max_smac_ent = OPA_VNIC_MAX_SMAC_LIMIT;
adapter->info.vport.config_state = OPA_VNIC_STATE_DROP_ALL;
adapter->info.vport.eth_link_status = OPA_VNIC_ETH_LINK_DOWN;
+ adapter->info.vesw.eth_mtu = ETH_DATA_LEN;
}
/* opa_vnic_set_mac_addr - change mac address */
@@ -268,8 +284,8 @@ static int opa_netdev_open(struct net_device *netdev)
return rc;
}
- /* Update eth link status and send trap */
- adapter->info.vport.eth_link_status = OPA_VNIC_ETH_LINK_UP;
+ /* Update status and send trap */
+ opa_vnic_update_state(adapter, true);
opa_vnic_vema_report_event(adapter,
OPA_VESWPORT_TRAP_ETH_LINK_STATUS_CHANGE);
return 0;
@@ -287,8 +303,8 @@ static int opa_netdev_close(struct net_device *netdev)
return rc;
}
- /* Update eth link status and send trap */
- adapter->info.vport.eth_link_status = OPA_VNIC_ETH_LINK_DOWN;
+ /* Update status and send trap */
+ opa_vnic_update_state(adapter, false);
opa_vnic_vema_report_event(adapter,
OPA_VESWPORT_TRAP_ETH_LINK_STATUS_CHANGE);
return 0;
diff --git a/drivers/infiniband/ulp/opa_vnic/opa_vnic_vema.c b/drivers/infiniband/ulp/opa_vnic/opa_vnic_vema.c
index 21f0b481edcc..4b615c1451e7 100644
--- a/drivers/infiniband/ulp/opa_vnic/opa_vnic_vema.c
+++ b/drivers/infiniband/ulp/opa_vnic/opa_vnic_vema.c
@@ -186,6 +186,7 @@ static inline void vema_get_pod_values(struct opa_veswport_info *port_info)
cpu_to_be16(OPA_VNIC_MAX_SMAC_LIMIT);
port_info->vport.oper_state = OPA_VNIC_STATE_DROP_ALL;
port_info->vport.config_state = OPA_VNIC_STATE_DROP_ALL;
+ port_info->vesw.eth_mtu = cpu_to_be16(ETH_DATA_LEN);
}
/**
diff --git a/drivers/infiniband/ulp/opa_vnic/opa_vnic_vema_iface.c b/drivers/infiniband/ulp/opa_vnic/opa_vnic_vema_iface.c
index c2733964379c..868b5aec1537 100644
--- a/drivers/infiniband/ulp/opa_vnic/opa_vnic_vema_iface.c
+++ b/drivers/infiniband/ulp/opa_vnic/opa_vnic_vema_iface.c
@@ -176,11 +176,10 @@ void opa_vnic_get_vesw_info(struct opa_vnic_adapter *adapter,
for (i = 0; i < OPA_VESW_MAX_NUM_DEF_PORT; i++)
info->u_ucast_dlid[i] = cpu_to_be32(src->u_ucast_dlid[i]);
- memcpy(info->rsvd3, src->rsvd3, ARRAY_SIZE(src->rsvd3));
- for (i = 0; i < OPA_VNIC_MAX_NUM_PCP; i++)
- info->eth_mtu[i] = cpu_to_be16(src->eth_mtu[i]);
+ info->rc = cpu_to_be32(src->rc);
- info->eth_mtu_non_vlan = cpu_to_be16(src->eth_mtu_non_vlan);
+ memcpy(info->rsvd3, src->rsvd3, ARRAY_SIZE(src->rsvd3));
+ info->eth_mtu = cpu_to_be16(src->eth_mtu);
memcpy(info->rsvd4, src->rsvd4, ARRAY_SIZE(src->rsvd4));
}
@@ -211,11 +210,10 @@ void opa_vnic_set_vesw_info(struct opa_vnic_adapter *adapter,
for (i = 0; i < OPA_VESW_MAX_NUM_DEF_PORT; i++)
dst->u_ucast_dlid[i] = be32_to_cpu(info->u_ucast_dlid[i]);
- memcpy(dst->rsvd3, info->rsvd3, ARRAY_SIZE(info->rsvd3));
- for (i = 0; i < OPA_VNIC_MAX_NUM_PCP; i++)
- dst->eth_mtu[i] = be16_to_cpu(info->eth_mtu[i]);
+ dst->rc = be32_to_cpu(info->rc);
- dst->eth_mtu_non_vlan = be16_to_cpu(info->eth_mtu_non_vlan);
+ memcpy(dst->rsvd3, info->rsvd3, ARRAY_SIZE(info->rsvd3));
+ dst->eth_mtu = be16_to_cpu(info->eth_mtu);
memcpy(dst->rsvd4, info->rsvd4, ARRAY_SIZE(info->rsvd4));
}
@@ -348,7 +346,7 @@ void opa_vnic_query_mcast_macs(struct opa_vnic_adapter *adapter,
void opa_vnic_query_ucast_macs(struct opa_vnic_adapter *adapter,
struct opa_veswport_iface_macs *macs)
{
- u16 start_idx, tot_macs, num_macs, idx = 0, count = 0;
+ u16 start_idx, tot_macs, num_macs, idx = 0, count = 0, em_macs = 0;
struct netdev_hw_addr *ha;
start_idx = be16_to_cpu(macs->start_idx);
@@ -359,8 +357,10 @@ void opa_vnic_query_ucast_macs(struct opa_vnic_adapter *adapter,
/* Do not include EM specified MAC address */
if (!memcmp(adapter->info.vport.base_mac_addr, ha->addr,
- ARRAY_SIZE(adapter->info.vport.base_mac_addr)))
+ ARRAY_SIZE(adapter->info.vport.base_mac_addr))) {
+ em_macs++;
continue;
+ }
if (start_idx > idx++)
continue;
@@ -383,7 +383,7 @@ void opa_vnic_query_ucast_macs(struct opa_vnic_adapter *adapter,
}
tot_macs = netdev_hw_addr_list_count(&adapter->netdev->dev_addrs) +
- netdev_uc_count(adapter->netdev);
+ netdev_uc_count(adapter->netdev) - em_macs;
macs->tot_macs_in_lst = cpu_to_be16(tot_macs);
macs->num_macs_in_msg = cpu_to_be16(count);
macs->gen_count = cpu_to_be16(adapter->info.vport.uc_macs_gen_count);
diff --git a/drivers/infiniband/ulp/srp/ib_srp.c b/drivers/infiniband/ulp/srp/ib_srp.c
index fa5ccdb3bb2a..972d4b3c5223 100644
--- a/drivers/infiniband/ulp/srp/ib_srp.c
+++ b/drivers/infiniband/ulp/srp/ib_srp.c
@@ -464,20 +464,20 @@ static struct srp_fr_pool *srp_alloc_fr_pool(struct srp_target_port *target)
/**
* srp_destroy_qp() - destroy an RDMA queue pair
- * @qp: RDMA queue pair.
+ * @ch: SRP RDMA channel.
*
* Drain the qp before destroying it. This avoids that the receive
* completion handler can access the queue pair while it is
* being destroyed.
*/
-static void srp_destroy_qp(struct srp_rdma_ch *ch, struct ib_qp *qp)
+static void srp_destroy_qp(struct srp_rdma_ch *ch)
{
spin_lock_irq(&ch->lock);
ib_process_cq_direct(ch->send_cq, -1);
spin_unlock_irq(&ch->lock);
- ib_drain_qp(qp);
- ib_destroy_qp(qp);
+ ib_drain_qp(ch->qp);
+ ib_destroy_qp(ch->qp);
}
static int srp_create_ch_ib(struct srp_rdma_ch *ch)
@@ -550,7 +550,7 @@ static int srp_create_ch_ib(struct srp_rdma_ch *ch)
}
if (ch->qp)
- srp_destroy_qp(ch, ch->qp);
+ srp_destroy_qp(ch);
if (ch->recv_cq)
ib_free_cq(ch->recv_cq);
if (ch->send_cq)
@@ -617,7 +617,7 @@ static void srp_free_ch_ib(struct srp_target_port *target,
ib_destroy_fmr_pool(ch->fmr_pool);
}
- srp_destroy_qp(ch, ch->qp);
+ srp_destroy_qp(ch);
ib_free_cq(ch->send_cq);
ib_free_cq(ch->recv_cq);
@@ -665,12 +665,19 @@ static void srp_path_rec_completion(int status,
static int srp_lookup_path(struct srp_rdma_ch *ch)
{
struct srp_target_port *target = ch->target;
- int ret;
+ int ret = -ENODEV;
ch->path.numb_path = 1;
init_completion(&ch->done);
+ /*
+ * Avoid that the SCSI host can be removed by srp_remove_target()
+ * before srp_path_rec_completion() is called.
+ */
+ if (!scsi_host_get(target->scsi_host))
+ goto out;
+
ch->path_query_id = ib_sa_path_rec_get(&srp_sa_client,
target->srp_host->srp_dev->dev,
target->srp_host->port,
@@ -684,18 +691,41 @@ static int srp_lookup_path(struct srp_rdma_ch *ch)
GFP_KERNEL,
srp_path_rec_completion,
ch, &ch->path_query);
- if (ch->path_query_id < 0)
- return ch->path_query_id;
+ ret = ch->path_query_id;
+ if (ret < 0)
+ goto put;
ret = wait_for_completion_interruptible(&ch->done);
if (ret < 0)
- return ret;
+ goto put;
- if (ch->status < 0)
+ ret = ch->status;
+ if (ret < 0)
shost_printk(KERN_WARNING, target->scsi_host,
PFX "Path record query failed\n");
- return ch->status;
+put:
+ scsi_host_put(target->scsi_host);
+
+out:
+ return ret;
+}
+
+static u8 srp_get_subnet_timeout(struct srp_host *host)
+{
+ struct ib_port_attr attr;
+ int ret;
+ u8 subnet_timeout = 18;
+
+ ret = ib_query_port(host->srp_dev->dev, host->port, &attr);
+ if (ret == 0)
+ subnet_timeout = attr.subnet_timeout;
+
+ if (unlikely(subnet_timeout < 15))
+ pr_warn("%s: subnet timeout %d may cause SRP login to fail.\n",
+ dev_name(&host->srp_dev->dev->dev), subnet_timeout);
+
+ return subnet_timeout;
}
static int srp_send_req(struct srp_rdma_ch *ch, bool multich)
@@ -706,6 +736,9 @@ static int srp_send_req(struct srp_rdma_ch *ch, bool multich)
struct srp_login_req priv;
} *req = NULL;
int status;
+ u8 subnet_timeout;
+
+ subnet_timeout = srp_get_subnet_timeout(target->srp_host);
req = kzalloc(sizeof *req, GFP_KERNEL);
if (!req)
@@ -728,8 +761,8 @@ static int srp_send_req(struct srp_rdma_ch *ch, bool multich)
* module parameters if anyone cared about setting them.
*/
req->param.responder_resources = 4;
- req->param.remote_cm_response_timeout = 20;
- req->param.local_cm_response_timeout = 20;
+ req->param.remote_cm_response_timeout = subnet_timeout + 2;
+ req->param.local_cm_response_timeout = subnet_timeout + 2;
req->param.retry_count = target->tl_retry_count;
req->param.rnr_retry_count = 7;
req->param.max_cm_retries = 15;
@@ -1279,7 +1312,6 @@ static int srp_map_finish_fmr(struct srp_map_state *state,
{
struct srp_target_port *target = ch->target;
struct srp_device *dev = target->srp_host->srp_dev;
- struct ib_pd *pd = target->pd;
struct ib_pool_fmr *fmr;
u64 io_addr = 0;
@@ -1295,9 +1327,9 @@ static int srp_map_finish_fmr(struct srp_map_state *state,
if (state->npages == 0)
return 0;
- if (state->npages == 1 && (pd->flags & IB_PD_UNSAFE_GLOBAL_RKEY)) {
+ if (state->npages == 1 && target->global_rkey) {
srp_map_desc(state, state->base_dma_addr, state->dma_len,
- pd->unsafe_global_rkey);
+ target->global_rkey);
goto reset_state;
}
@@ -1337,7 +1369,6 @@ static int srp_map_finish_fr(struct srp_map_state *state,
{
struct srp_target_port *target = ch->target;
struct srp_device *dev = target->srp_host->srp_dev;
- struct ib_pd *pd = target->pd;
struct ib_send_wr *bad_wr;
struct ib_reg_wr wr;
struct srp_fr_desc *desc;
@@ -1353,12 +1384,12 @@ static int srp_map_finish_fr(struct srp_map_state *state,
WARN_ON_ONCE(!dev->use_fast_reg);
- if (sg_nents == 1 && (pd->flags & IB_PD_UNSAFE_GLOBAL_RKEY)) {
+ if (sg_nents == 1 && target->global_rkey) {
unsigned int sg_offset = sg_offset_p ? *sg_offset_p : 0;
srp_map_desc(state, sg_dma_address(state->sg) + sg_offset,
sg_dma_len(state->sg) - sg_offset,
- pd->unsafe_global_rkey);
+ target->global_rkey);
if (sg_offset_p)
*sg_offset_p = 0;
return 1;
@@ -1520,7 +1551,7 @@ static int srp_map_sg_dma(struct srp_map_state *state, struct srp_rdma_ch *ch,
for_each_sg(scat, sg, count, i) {
srp_map_desc(state, ib_sg_dma_address(dev->dev, sg),
ib_sg_dma_len(dev->dev, sg),
- target->pd->unsafe_global_rkey);
+ target->global_rkey);
}
return 0;
@@ -1618,7 +1649,6 @@ static int srp_map_data(struct scsi_cmnd *scmnd, struct srp_rdma_ch *ch,
struct srp_request *req)
{
struct srp_target_port *target = ch->target;
- struct ib_pd *pd = target->pd;
struct scatterlist *scat;
struct srp_cmd *cmd = req->cmd->buf;
int len, nents, count, ret;
@@ -1654,7 +1684,7 @@ static int srp_map_data(struct scsi_cmnd *scmnd, struct srp_rdma_ch *ch,
fmt = SRP_DATA_DESC_DIRECT;
len = sizeof (struct srp_cmd) + sizeof (struct srp_direct_buf);
- if (count == 1 && (pd->flags & IB_PD_UNSAFE_GLOBAL_RKEY)) {
+ if (count == 1 && target->global_rkey) {
/*
* The midlayer only generated a single gather/scatter
* entry, or DMA mapping coalesced everything to a
@@ -1664,7 +1694,7 @@ static int srp_map_data(struct scsi_cmnd *scmnd, struct srp_rdma_ch *ch,
struct srp_direct_buf *buf = (void *) cmd->add_data;
buf->va = cpu_to_be64(ib_sg_dma_address(ibdev, scat));
- buf->key = cpu_to_be32(pd->unsafe_global_rkey);
+ buf->key = cpu_to_be32(target->global_rkey);
buf->len = cpu_to_be32(ib_sg_dma_len(ibdev, scat));
req->nmdesc = 0;
@@ -1735,14 +1765,14 @@ static int srp_map_data(struct scsi_cmnd *scmnd, struct srp_rdma_ch *ch,
memcpy(indirect_hdr->desc_list, req->indirect_desc,
count * sizeof (struct srp_direct_buf));
- if (!(pd->flags & IB_PD_UNSAFE_GLOBAL_RKEY)) {
+ if (!target->global_rkey) {
ret = srp_map_idb(ch, req, state.gen.next, state.gen.end,
idb_len, &idb_rkey);
if (ret < 0)
goto unmap;
req->nmdesc++;
} else {
- idb_rkey = cpu_to_be32(pd->unsafe_global_rkey);
+ idb_rkey = cpu_to_be32(target->global_rkey);
}
indirect_hdr->table_desc.va = cpu_to_be64(req->indirect_dma_addr);
@@ -2403,7 +2433,7 @@ static void srp_cm_rej_handler(struct ib_cm_id *cm_id,
switch (event->param.rej_rcvd.reason) {
case IB_CM_REJ_PORT_CM_REDIRECT:
cpi = event->param.rej_rcvd.ari;
- sa_path_set_dlid(&ch->path, htonl(ntohs(cpi->redirect_lid)));
+ sa_path_set_dlid(&ch->path, ntohs(cpi->redirect_lid));
ch->path.pkey = cpi->redirect_pkey;
cm_id->remote_cm_qpn = be32_to_cpu(cpi->redirect_qp) & 0x00ffffff;
memcpy(ch->path.dgid.raw, cpi->redirect_gid, 16);
@@ -3318,8 +3348,8 @@ static ssize_t srp_create_target(struct device *dev,
target->io_class = SRP_REV16A_IB_IO_CLASS;
target->scsi_host = target_host;
target->srp_host = host;
- target->pd = host->srp_dev->pd;
target->lkey = host->srp_dev->pd->local_dma_lkey;
+ target->global_rkey = host->srp_dev->global_rkey;
target->cmd_sg_cnt = cmd_sg_entries;
target->sg_tablesize = indirect_sg_entries ? : cmd_sg_entries;
target->allow_ext_sg = allow_ext_sg;
@@ -3638,6 +3668,10 @@ static void srp_add_one(struct ib_device *device)
if (IS_ERR(srp_dev->pd))
goto free_dev;
+ if (flags & IB_PD_UNSAFE_GLOBAL_RKEY) {
+ srp_dev->global_rkey = srp_dev->pd->unsafe_global_rkey;
+ WARN_ON_ONCE(srp_dev->global_rkey == 0);
+ }
for (p = rdma_start_port(device); p <= rdma_end_port(device); ++p) {
host = srp_add_port(srp_dev, p);
diff --git a/drivers/infiniband/ulp/srp/ib_srp.h b/drivers/infiniband/ulp/srp/ib_srp.h
index ab9077b81d5a..a814f5ef16f9 100644
--- a/drivers/infiniband/ulp/srp/ib_srp.h
+++ b/drivers/infiniband/ulp/srp/ib_srp.h
@@ -90,6 +90,7 @@ struct srp_device {
struct list_head dev_list;
struct ib_device *dev;
struct ib_pd *pd;
+ u32 global_rkey;
u64 mr_page_mask;
int mr_page_size;
int mr_max_size;
@@ -179,7 +180,7 @@ struct srp_target_port {
spinlock_t lock;
/* read only in the hot path */
- struct ib_pd *pd;
+ u32 global_rkey;
struct srp_rdma_ch *ch;
u32 ch_count;
u32 lkey;
diff --git a/drivers/infiniband/ulp/srpt/ib_srpt.c b/drivers/infiniband/ulp/srpt/ib_srpt.c
index 9e8e9220f816..304855b9b537 100644
--- a/drivers/infiniband/ulp/srpt/ib_srpt.c
+++ b/drivers/infiniband/ulp/srpt/ib_srpt.c
@@ -295,6 +295,7 @@ static void srpt_get_ioc(struct srpt_port *sport, u32 slot,
{
struct srpt_device *sdev = sport->sdev;
struct ib_dm_ioc_profile *iocp;
+ int send_queue_depth;
iocp = (struct ib_dm_ioc_profile *)mad->data;
@@ -310,6 +311,12 @@ static void srpt_get_ioc(struct srpt_port *sport, u32 slot,
return;
}
+ if (sdev->use_srq)
+ send_queue_depth = sdev->srq_size;
+ else
+ send_queue_depth = min(SRPT_RQ_SIZE,
+ sdev->device->attrs.max_qp_wr);
+
memset(iocp, 0, sizeof(*iocp));
strcpy(iocp->id_string, SRPT_ID_STRING);
iocp->guid = cpu_to_be64(srpt_service_guid);
@@ -322,7 +329,7 @@ static void srpt_get_ioc(struct srpt_port *sport, u32 slot,
iocp->io_subclass = cpu_to_be16(SRP_IO_SUBCLASS);
iocp->protocol = cpu_to_be16(SRP_PROTOCOL);
iocp->protocol_version = cpu_to_be16(SRP_PROTOCOL_VERSION);
- iocp->send_queue_depth = cpu_to_be16(sdev->srq_size);
+ iocp->send_queue_depth = cpu_to_be16(send_queue_depth);
iocp->rdma_read_depth = 4;
iocp->send_size = cpu_to_be32(srp_max_req_size);
iocp->rdma_size = cpu_to_be32(min(sport->port_attrib.srp_max_rdma_size,
@@ -686,6 +693,9 @@ static void srpt_free_ioctx_ring(struct srpt_ioctx **ioctx_ring,
{
int i;
+ if (!ioctx_ring)
+ return;
+
for (i = 0; i < ring_size; ++i)
srpt_free_ioctx(sdev, ioctx_ring[i], dma_size, dir);
kfree(ioctx_ring);
@@ -757,7 +767,7 @@ static bool srpt_test_and_set_cmd_state(struct srpt_send_ioctx *ioctx,
/**
* srpt_post_recv() - Post an IB receive request.
*/
-static int srpt_post_recv(struct srpt_device *sdev,
+static int srpt_post_recv(struct srpt_device *sdev, struct srpt_rdma_ch *ch,
struct srpt_recv_ioctx *ioctx)
{
struct ib_sge list;
@@ -766,7 +776,7 @@ static int srpt_post_recv(struct srpt_device *sdev,
BUG_ON(!sdev);
list.addr = ioctx->ioctx.dma;
list.length = srp_max_req_size;
- list.lkey = sdev->pd->local_dma_lkey;
+ list.lkey = sdev->lkey;
ioctx->ioctx.cqe.done = srpt_recv_done;
wr.wr_cqe = &ioctx->ioctx.cqe;
@@ -774,7 +784,10 @@ static int srpt_post_recv(struct srpt_device *sdev,
wr.sg_list = &list;
wr.num_sge = 1;
- return ib_post_srq_recv(sdev->srq, &wr, &bad_wr);
+ if (sdev->use_srq)
+ return ib_post_srq_recv(sdev->srq, &wr, &bad_wr);
+ else
+ return ib_post_recv(ch->qp, &wr, &bad_wr);
}
/**
@@ -1517,7 +1530,7 @@ static void srpt_handle_new_iu(struct srpt_rdma_ch *ch,
break;
}
- srpt_post_recv(ch->sport->sdev, recv_ioctx);
+ srpt_post_recv(ch->sport->sdev, ch, recv_ioctx);
return;
out_wait:
@@ -1616,7 +1629,7 @@ static int srpt_create_ch_ib(struct srpt_rdma_ch *ch)
struct srpt_device *sdev = sport->sdev;
const struct ib_device_attr *attrs = &sdev->device->attrs;
u32 srp_sq_size = sport->port_attrib.srp_sq_size;
- int ret;
+ int i, ret;
WARN_ON(ch->rq_size < 1);
@@ -1640,7 +1653,6 @@ retry:
= (void(*)(struct ib_event *, void*))srpt_qp_event;
qp_init->send_cq = ch->cq;
qp_init->recv_cq = ch->cq;
- qp_init->srq = sdev->srq;
qp_init->sq_sig_type = IB_SIGNAL_REQ_WR;
qp_init->qp_type = IB_QPT_RC;
/*
@@ -1650,10 +1662,16 @@ retry:
* both both, as RDMA contexts will also post completions for the
* RDMA READ case.
*/
- qp_init->cap.max_send_wr = srp_sq_size / 2;
+ qp_init->cap.max_send_wr = min(srp_sq_size / 2, attrs->max_qp_wr + 0U);
qp_init->cap.max_rdma_ctxs = srp_sq_size / 2;
qp_init->cap.max_send_sge = min(attrs->max_sge, SRPT_MAX_SG_PER_WQE);
qp_init->port_num = ch->sport->port;
+ if (sdev->use_srq) {
+ qp_init->srq = sdev->srq;
+ } else {
+ qp_init->cap.max_recv_wr = ch->rq_size;
+ qp_init->cap.max_recv_sge = qp_init->cap.max_send_sge;
+ }
ch->qp = ib_create_qp(sdev->pd, qp_init);
if (IS_ERR(ch->qp)) {
@@ -1669,6 +1687,10 @@ retry:
goto err_destroy_cq;
}
+ if (!sdev->use_srq)
+ for (i = 0; i < ch->rq_size; i++)
+ srpt_post_recv(sdev, ch, ch->ioctx_recv_ring[i]);
+
atomic_set(&ch->sq_wr_avail, qp_init->cap.max_send_wr);
pr_debug("%s: max_cqe= %d max_sge= %d sq_size = %d cm_id= %p\n",
@@ -1818,6 +1840,10 @@ static void srpt_release_channel_work(struct work_struct *w)
ch->sport->sdev, ch->rq_size,
ch->rsp_size, DMA_TO_DEVICE);
+ srpt_free_ioctx_ring((struct srpt_ioctx **)ch->ioctx_recv_ring,
+ sdev, ch->rq_size,
+ srp_max_req_size, DMA_FROM_DEVICE);
+
mutex_lock(&sdev->mutex);
list_del_init(&ch->list);
if (ch->release_done)
@@ -1953,10 +1979,11 @@ static int srpt_cm_req_recv(struct ib_cm_id *cm_id,
ch->cm_id = cm_id;
cm_id->context = ch;
/*
- * Avoid QUEUE_FULL conditions by limiting the number of buffers used
- * for the SRP protocol to the command queue size.
+ * ch->rq_size should be at least as large as the initiator queue
+ * depth to avoid that the initiator driver has to report QUEUE_FULL
+ * to the SCSI mid-layer.
*/
- ch->rq_size = SRPT_RQ_SIZE;
+ ch->rq_size = min(SRPT_RQ_SIZE, sdev->device->attrs.max_qp_wr);
spin_lock_init(&ch->spinlock);
ch->state = CH_CONNECTING;
INIT_LIST_HEAD(&ch->cmd_wait_list);
@@ -1974,6 +2001,19 @@ static int srpt_cm_req_recv(struct ib_cm_id *cm_id,
ch->ioctx_ring[i]->ch = ch;
list_add_tail(&ch->ioctx_ring[i]->free_list, &ch->free_list);
}
+ if (!sdev->use_srq) {
+ ch->ioctx_recv_ring = (struct srpt_recv_ioctx **)
+ srpt_alloc_ioctx_ring(ch->sport->sdev, ch->rq_size,
+ sizeof(*ch->ioctx_recv_ring[0]),
+ srp_max_req_size,
+ DMA_FROM_DEVICE);
+ if (!ch->ioctx_recv_ring) {
+ pr_err("rejected SRP_LOGIN_REQ because creating a new QP RQ ring failed.\n");
+ rej->reason =
+ cpu_to_be32(SRP_LOGIN_REJ_INSUFFICIENT_RESOURCES);
+ goto free_ring;
+ }
+ }
ret = srpt_create_ch_ib(ch);
if (ret) {
@@ -1981,7 +2021,7 @@ static int srpt_cm_req_recv(struct ib_cm_id *cm_id,
SRP_LOGIN_REJ_INSUFFICIENT_RESOURCES);
pr_err("rejected SRP_LOGIN_REQ because creating"
" a new RDMA channel failed.\n");
- goto free_ring;
+ goto free_recv_ring;
}
ret = srpt_ch_qp_rtr(ch, ch->qp);
@@ -2072,6 +2112,11 @@ release_channel:
destroy_ib:
srpt_destroy_ch_ib(ch);
+free_recv_ring:
+ srpt_free_ioctx_ring((struct srpt_ioctx **)ch->ioctx_recv_ring,
+ ch->sport->sdev, ch->rq_size,
+ srp_max_req_size, DMA_FROM_DEVICE);
+
free_ring:
srpt_free_ioctx_ring((struct srpt_ioctx **)ch->ioctx_ring,
ch->sport->sdev, ch->rq_size,
@@ -2342,7 +2387,7 @@ static void srpt_queue_response(struct se_cmd *cmd)
sge.addr = ioctx->ioctx.dma;
sge.length = resp_len;
- sge.lkey = sdev->pd->local_dma_lkey;
+ sge.lkey = sdev->lkey;
ioctx->ioctx.cqe.done = srpt_send_done;
send_wr.next = NULL;
@@ -2490,6 +2535,8 @@ static void srpt_add_one(struct ib_device *device)
if (IS_ERR(sdev->pd))
goto free_dev;
+ sdev->lkey = sdev->pd->local_dma_lkey;
+
sdev->srq_size = min(srpt_srq_size, sdev->device->attrs.max_srq_wr);
srq_attr.event_handler = srpt_srq_event;
@@ -2499,20 +2546,38 @@ static void srpt_add_one(struct ib_device *device)
srq_attr.attr.srq_limit = 0;
srq_attr.srq_type = IB_SRQT_BASIC;
- sdev->srq = ib_create_srq(sdev->pd, &srq_attr);
- if (IS_ERR(sdev->srq))
- goto err_pd;
+ sdev->srq = sdev->port[0].port_attrib.use_srq ?
+ ib_create_srq(sdev->pd, &srq_attr) : ERR_PTR(-ENOTSUPP);
+ if (IS_ERR(sdev->srq)) {
+ pr_debug("ib_create_srq() failed: %ld\n", PTR_ERR(sdev->srq));
+
+ /* SRQ not supported. */
+ sdev->use_srq = false;
+ } else {
+ pr_debug("create SRQ #wr= %d max_allow=%d dev= %s\n",
+ sdev->srq_size, sdev->device->attrs.max_srq_wr,
+ device->name);
+
+ sdev->use_srq = true;
- pr_debug("%s: create SRQ #wr= %d max_allow=%d dev= %s\n",
- __func__, sdev->srq_size, sdev->device->attrs.max_srq_wr,
- device->name);
+ sdev->ioctx_ring = (struct srpt_recv_ioctx **)
+ srpt_alloc_ioctx_ring(sdev, sdev->srq_size,
+ sizeof(*sdev->ioctx_ring[0]),
+ srp_max_req_size,
+ DMA_FROM_DEVICE);
+ if (!sdev->ioctx_ring)
+ goto err_pd;
+
+ for (i = 0; i < sdev->srq_size; ++i)
+ srpt_post_recv(sdev, NULL, sdev->ioctx_ring[i]);
+ }
if (!srpt_service_guid)
srpt_service_guid = be64_to_cpu(device->node_guid);
sdev->cm_id = ib_create_cm_id(device, srpt_cm_handler, sdev);
if (IS_ERR(sdev->cm_id))
- goto err_srq;
+ goto err_ring;
/* print out target login information */
pr_debug("Target login info: id_ext=%016llx,ioc_guid=%016llx,"
@@ -2532,16 +2597,6 @@ static void srpt_add_one(struct ib_device *device)
srpt_event_handler);
ib_register_event_handler(&sdev->event_handler);
- sdev->ioctx_ring = (struct srpt_recv_ioctx **)
- srpt_alloc_ioctx_ring(sdev, sdev->srq_size,
- sizeof(*sdev->ioctx_ring[0]),
- srp_max_req_size, DMA_FROM_DEVICE);
- if (!sdev->ioctx_ring)
- goto err_event;
-
- for (i = 0; i < sdev->srq_size; ++i)
- srpt_post_recv(sdev, sdev->ioctx_ring[i]);
-
WARN_ON(sdev->device->phys_port_cnt > ARRAY_SIZE(sdev->port));
for (i = 1; i <= sdev->device->phys_port_cnt; i++) {
@@ -2551,12 +2606,13 @@ static void srpt_add_one(struct ib_device *device)
sport->port_attrib.srp_max_rdma_size = DEFAULT_MAX_RDMA_SIZE;
sport->port_attrib.srp_max_rsp_size = DEFAULT_MAX_RSP_SIZE;
sport->port_attrib.srp_sq_size = DEF_SRPT_SQ_SIZE;
+ sport->port_attrib.use_srq = false;
INIT_WORK(&sport->work, srpt_refresh_port_work);
if (srpt_refresh_port(sport)) {
pr_err("MAD registration failed for %s-%d.\n",
sdev->device->name, i);
- goto err_ring;
+ goto err_event;
}
}
@@ -2569,16 +2625,16 @@ out:
pr_debug("added %s.\n", device->name);
return;
-err_ring:
- srpt_free_ioctx_ring((struct srpt_ioctx **)sdev->ioctx_ring, sdev,
- sdev->srq_size, srp_max_req_size,
- DMA_FROM_DEVICE);
err_event:
ib_unregister_event_handler(&sdev->event_handler);
err_cm:
ib_destroy_cm_id(sdev->cm_id);
-err_srq:
- ib_destroy_srq(sdev->srq);
+err_ring:
+ if (sdev->use_srq)
+ ib_destroy_srq(sdev->srq);
+ srpt_free_ioctx_ring((struct srpt_ioctx **)sdev->ioctx_ring, sdev,
+ sdev->srq_size, srp_max_req_size,
+ DMA_FROM_DEVICE);
err_pd:
ib_dealloc_pd(sdev->pd);
free_dev:
@@ -2622,12 +2678,12 @@ static void srpt_remove_one(struct ib_device *device, void *client_data)
spin_unlock(&srpt_dev_lock);
srpt_release_sdev(sdev);
- ib_destroy_srq(sdev->srq);
- ib_dealloc_pd(sdev->pd);
-
+ if (sdev->use_srq)
+ ib_destroy_srq(sdev->srq);
srpt_free_ioctx_ring((struct srpt_ioctx **)sdev->ioctx_ring, sdev,
sdev->srq_size, srp_max_req_size, DMA_FROM_DEVICE);
- sdev->ioctx_ring = NULL;
+ ib_dealloc_pd(sdev->pd);
+
kfree(sdev);
}
@@ -2777,7 +2833,7 @@ static int srpt_parse_i_port_id(u8 i_port_id[16], const char *name)
{
const char *p;
unsigned len, count, leading_zero_bytes;
- int ret, rc;
+ int ret;
p = name;
if (strncasecmp(p, "0x", 2) == 0)
@@ -2789,10 +2845,9 @@ static int srpt_parse_i_port_id(u8 i_port_id[16], const char *name)
count = min(len / 2, 16U);
leading_zero_bytes = 16 - count;
memset(i_port_id, 0, leading_zero_bytes);
- rc = hex2bin(i_port_id + leading_zero_bytes, p, count);
- if (rc < 0)
- pr_debug("hex2bin failed for srpt_parse_i_port_id: %d\n", rc);
- ret = 0;
+ ret = hex2bin(i_port_id + leading_zero_bytes, p, count);
+ if (ret < 0)
+ pr_debug("hex2bin failed for srpt_parse_i_port_id: %d\n", ret);
out:
return ret;
}
@@ -2926,14 +2981,43 @@ static ssize_t srpt_tpg_attrib_srp_sq_size_store(struct config_item *item,
return count;
}
+static ssize_t srpt_tpg_attrib_use_srq_show(struct config_item *item,
+ char *page)
+{
+ struct se_portal_group *se_tpg = attrib_to_tpg(item);
+ struct srpt_port *sport = srpt_tpg_to_sport(se_tpg);
+
+ return sprintf(page, "%d\n", sport->port_attrib.use_srq);
+}
+
+static ssize_t srpt_tpg_attrib_use_srq_store(struct config_item *item,
+ const char *page, size_t count)
+{
+ struct se_portal_group *se_tpg = attrib_to_tpg(item);
+ struct srpt_port *sport = srpt_tpg_to_sport(se_tpg);
+ unsigned long val;
+ int ret;
+
+ ret = kstrtoul(page, 0, &val);
+ if (ret < 0)
+ return ret;
+ if (val != !!val)
+ return -EINVAL;
+ sport->port_attrib.use_srq = val;
+
+ return count;
+}
+
CONFIGFS_ATTR(srpt_tpg_attrib_, srp_max_rdma_size);
CONFIGFS_ATTR(srpt_tpg_attrib_, srp_max_rsp_size);
CONFIGFS_ATTR(srpt_tpg_attrib_, srp_sq_size);
+CONFIGFS_ATTR(srpt_tpg_attrib_, use_srq);
static struct configfs_attribute *srpt_tpg_attrib_attrs[] = {
&srpt_tpg_attrib_attr_srp_max_rdma_size,
&srpt_tpg_attrib_attr_srp_max_rsp_size,
&srpt_tpg_attrib_attr_srp_sq_size,
+ &srpt_tpg_attrib_attr_use_srq,
NULL,
};
diff --git a/drivers/infiniband/ulp/srpt/ib_srpt.h b/drivers/infiniband/ulp/srpt/ib_srpt.h
index 1b817e51b84b..673387d365a3 100644
--- a/drivers/infiniband/ulp/srpt/ib_srpt.h
+++ b/drivers/infiniband/ulp/srpt/ib_srpt.h
@@ -252,6 +252,7 @@ enum rdma_ch_state {
* @free_list: Head of list with free send I/O contexts.
* @state: channel state. See also enum rdma_ch_state.
* @ioctx_ring: Send ring.
+ * @ioctx_recv_ring: Receive I/O context ring.
* @list: Node for insertion in the srpt_device.rch_list list.
* @cmd_wait_list: List of SCSI commands that arrived before the RTU event. This
* list contains struct srpt_ioctx elements and is protected
@@ -281,6 +282,7 @@ struct srpt_rdma_ch {
struct list_head free_list;
enum rdma_ch_state state;
struct srpt_send_ioctx **ioctx_ring;
+ struct srpt_recv_ioctx **ioctx_recv_ring;
struct list_head list;
struct list_head cmd_wait_list;
struct se_session *sess;
@@ -295,11 +297,13 @@ struct srpt_rdma_ch {
* @srp_max_rdma_size: Maximum size of SRP RDMA transfers for new connections.
* @srp_max_rsp_size: Maximum size of SRP response messages in bytes.
* @srp_sq_size: Shared receive queue (SRQ) size.
+ * @use_srq: Whether or not to use SRQ.
*/
struct srpt_port_attrib {
u32 srp_max_rdma_size;
u32 srp_max_rsp_size;
u32 srp_sq_size;
+ bool use_srq;
};
/**
@@ -343,10 +347,11 @@ struct srpt_port {
* struct srpt_device - Information associated by SRPT with a single HCA.
* @device: Backpointer to the struct ib_device managed by the IB core.
* @pd: IB protection domain.
- * @mr: L_Key (local key) with write access to all local memory.
+ * @lkey: L_Key (local key) with write access to all local memory.
* @srq: Per-HCA SRQ (shared receive queue).
* @cm_id: Connection identifier.
* @srq_size: SRQ size.
+ * @use_srq: Whether or not to use SRQ.
* @ioctx_ring: Per-HCA SRQ.
* @rch_list: Per-device channel list -- see also srpt_rdma_ch.list.
* @ch_releaseQ: Enables waiting for removal from rch_list.
@@ -358,9 +363,11 @@ struct srpt_port {
struct srpt_device {
struct ib_device *device;
struct ib_pd *pd;
+ u32 lkey;
struct ib_srq *srq;
struct ib_cm_id *cm_id;
int srq_size;
+ bool use_srq;
struct srpt_recv_ioctx **ioctx_ring;
struct list_head rch_list;
wait_queue_head_t ch_releaseQ;