summaryrefslogtreecommitdiffstats
path: root/drivers/infiniband
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/infiniband')
-rw-r--r--drivers/infiniband/core/cm.c3
-rw-r--r--drivers/infiniband/core/cma.c18
-rw-r--r--drivers/infiniband/core/counters.c4
-rw-r--r--drivers/infiniband/core/cq.c14
-rw-r--r--drivers/infiniband/core/mad.c3
-rw-r--r--drivers/infiniband/core/rdma_core.c42
-rw-r--r--drivers/infiniband/core/sa_query.c38
-rw-r--r--drivers/infiniband/core/ucma.c4
-rw-r--r--drivers/infiniband/hw/efa/efa_verbs.c1
-rw-r--r--drivers/infiniband/hw/hfi1/debugfs.c19
-rw-r--r--drivers/infiniband/hw/hfi1/init.c37
-rw-r--r--drivers/infiniband/hw/hfi1/iowait.h2
-rw-r--r--drivers/infiniband/hw/hfi1/ipoib.h6
-rw-r--r--drivers/infiniband/hw/hfi1/ipoib_tx.c104
-rw-r--r--drivers/infiniband/hw/hfi1/netdev_rx.c2
-rw-r--r--drivers/infiniband/hw/hfi1/qp.c7
-rw-r--r--drivers/infiniband/hw/hfi1/tid_rdma.c5
-rw-r--r--drivers/infiniband/hw/hfi1/verbs_txreq.h2
-rw-r--r--drivers/infiniband/hw/hns/hns_roce_device.h7
-rw-r--r--drivers/infiniband/hw/hns/hns_roce_hw_v1.c4
-rw-r--r--drivers/infiniband/hw/hns/hns_roce_hw_v2.c51
-rw-r--r--drivers/infiniband/hw/hns/hns_roce_mr.c7
-rw-r--r--drivers/infiniband/hw/mlx5/main.c2
-rw-r--r--drivers/infiniband/hw/mlx5/odp.c27
-rw-r--r--drivers/infiniband/hw/mlx5/qp.c84
-rw-r--r--drivers/infiniband/hw/mlx5/qpc.c8
-rw-r--r--drivers/infiniband/hw/mlx5/srq_cmd.c4
-rw-r--r--drivers/infiniband/hw/qedr/qedr_iw_cm.c13
-rw-r--r--drivers/infiniband/sw/rdmavt/qp.c39
-rw-r--r--drivers/infiniband/sw/rdmavt/rc.c4
-rw-r--r--drivers/infiniband/sw/siw/siw_main.c3
-rw-r--r--drivers/infiniband/sw/siw/siw_qp_rx.c3
32 files changed, 340 insertions, 227 deletions
diff --git a/drivers/infiniband/core/cm.c b/drivers/infiniband/core/cm.c
index 9ce787e37e22..dc0558b23158 100644
--- a/drivers/infiniband/core/cm.c
+++ b/drivers/infiniband/core/cm.c
@@ -918,6 +918,7 @@ static void cm_free_work(struct cm_work *work)
static void cm_queue_work_unlock(struct cm_id_private *cm_id_priv,
struct cm_work *work)
+ __releases(&cm_id_priv->lock)
{
bool immediate;
@@ -3675,10 +3676,12 @@ static int cm_send_sidr_rep_locked(struct cm_id_private *cm_id_priv,
return ret;
}
cm_id_priv->id.state = IB_CM_IDLE;
+ spin_lock_irq(&cm.lock);
if (!RB_EMPTY_NODE(&cm_id_priv->sidr_id_node)) {
rb_erase(&cm_id_priv->sidr_id_node, &cm.remote_sidr_table);
RB_CLEAR_NODE(&cm_id_priv->sidr_id_node);
}
+ spin_unlock_irq(&cm.lock);
return 0;
}
diff --git a/drivers/infiniband/core/cma.c b/drivers/infiniband/core/cma.c
index 3d7cc9f0f3d4..c30cf5307ce3 100644
--- a/drivers/infiniband/core/cma.c
+++ b/drivers/infiniband/core/cma.c
@@ -1624,6 +1624,8 @@ static struct rdma_id_private *cma_find_listener(
{
struct rdma_id_private *id_priv, *id_priv_dev;
+ lockdep_assert_held(&lock);
+
if (!bind_list)
return ERR_PTR(-EINVAL);
@@ -1670,6 +1672,7 @@ cma_ib_id_from_event(struct ib_cm_id *cm_id,
}
}
+ mutex_lock(&lock);
/*
* Net namespace might be getting deleted while route lookup,
* cm_id lookup is in progress. Therefore, perform netdevice
@@ -1711,6 +1714,7 @@ cma_ib_id_from_event(struct ib_cm_id *cm_id,
id_priv = cma_find_listener(bind_list, cm_id, ib_event, req, *net_dev);
err:
rcu_read_unlock();
+ mutex_unlock(&lock);
if (IS_ERR(id_priv) && *net_dev) {
dev_put(*net_dev);
*net_dev = NULL;
@@ -2492,6 +2496,8 @@ static void cma_listen_on_dev(struct rdma_id_private *id_priv,
struct net *net = id_priv->id.route.addr.dev_addr.net;
int ret;
+ lockdep_assert_held(&lock);
+
if (cma_family(id_priv) == AF_IB && !rdma_cap_ib_cm(cma_dev->device, 1))
return;
@@ -3342,6 +3348,8 @@ static void cma_bind_port(struct rdma_bind_list *bind_list,
u64 sid, mask;
__be16 port;
+ lockdep_assert_held(&lock);
+
addr = cma_src_addr(id_priv);
port = htons(bind_list->port);
@@ -3370,6 +3378,8 @@ static int cma_alloc_port(enum rdma_ucm_port_space ps,
struct rdma_bind_list *bind_list;
int ret;
+ lockdep_assert_held(&lock);
+
bind_list = kzalloc(sizeof *bind_list, GFP_KERNEL);
if (!bind_list)
return -ENOMEM;
@@ -3396,6 +3406,8 @@ static int cma_port_is_unique(struct rdma_bind_list *bind_list,
struct sockaddr *saddr = cma_src_addr(id_priv);
__be16 dport = cma_port(daddr);
+ lockdep_assert_held(&lock);
+
hlist_for_each_entry(cur_id, &bind_list->owners, node) {
struct sockaddr *cur_daddr = cma_dst_addr(cur_id);
struct sockaddr *cur_saddr = cma_src_addr(cur_id);
@@ -3435,6 +3447,8 @@ static int cma_alloc_any_port(enum rdma_ucm_port_space ps,
unsigned int rover;
struct net *net = id_priv->id.route.addr.dev_addr.net;
+ lockdep_assert_held(&lock);
+
inet_get_local_port_range(net, &low, &high);
remaining = (high - low) + 1;
rover = prandom_u32() % remaining + low;
@@ -3482,6 +3496,8 @@ static int cma_check_port(struct rdma_bind_list *bind_list,
struct rdma_id_private *cur_id;
struct sockaddr *addr, *cur_addr;
+ lockdep_assert_held(&lock);
+
addr = cma_src_addr(id_priv);
hlist_for_each_entry(cur_id, &bind_list->owners, node) {
if (id_priv == cur_id)
@@ -3512,6 +3528,8 @@ static int cma_use_port(enum rdma_ucm_port_space ps,
unsigned short snum;
int ret;
+ lockdep_assert_held(&lock);
+
snum = ntohs(cma_port(cma_src_addr(id_priv)));
if (snum < PROT_SOCK && !capable(CAP_NET_BIND_SERVICE))
return -EACCES;
diff --git a/drivers/infiniband/core/counters.c b/drivers/infiniband/core/counters.c
index 2257d7f7810f..738d1faf4bba 100644
--- a/drivers/infiniband/core/counters.c
+++ b/drivers/infiniband/core/counters.c
@@ -202,7 +202,7 @@ static int __rdma_counter_unbind_qp(struct ib_qp *qp)
return ret;
}
-static void counter_history_stat_update(const struct rdma_counter *counter)
+static void counter_history_stat_update(struct rdma_counter *counter)
{
struct ib_device *dev = counter->device;
struct rdma_port_counter *port_counter;
@@ -212,6 +212,8 @@ static void counter_history_stat_update(const struct rdma_counter *counter)
if (!port_counter->hstats)
return;
+ rdma_counter_query_stats(counter);
+
for (i = 0; i < counter->stats->num_counters; i++)
port_counter->hstats->value[i] += counter->stats->value[i];
}
diff --git a/drivers/infiniband/core/cq.c b/drivers/infiniband/core/cq.c
index 655795bfa0ee..513825e424bf 100644
--- a/drivers/infiniband/core/cq.c
+++ b/drivers/infiniband/core/cq.c
@@ -72,6 +72,15 @@ static void rdma_dim_init(struct ib_cq *cq)
INIT_WORK(&dim->work, ib_cq_rdma_dim_work);
}
+static void rdma_dim_destroy(struct ib_cq *cq)
+{
+ if (!cq->dim)
+ return;
+
+ cancel_work_sync(&cq->dim->work);
+ kfree(cq->dim);
+}
+
static int __poll_cq(struct ib_cq *cq, int num_entries, struct ib_wc *wc)
{
int rc;
@@ -266,6 +275,7 @@ struct ib_cq *__ib_alloc_cq_user(struct ib_device *dev, void *private,
return cq;
out_destroy_cq:
+ rdma_dim_destroy(cq);
rdma_restrack_del(&cq->res);
cq->device->ops.destroy_cq(cq, udata);
out_free_wc:
@@ -331,12 +341,10 @@ void ib_free_cq_user(struct ib_cq *cq, struct ib_udata *udata)
WARN_ON_ONCE(1);
}
+ rdma_dim_destroy(cq);
trace_cq_free(cq);
rdma_restrack_del(&cq->res);
cq->device->ops.destroy_cq(cq, udata);
- if (cq->dim)
- cancel_work_sync(&cq->dim->work);
- kfree(cq->dim);
kfree(cq->wc);
kfree(cq);
}
diff --git a/drivers/infiniband/core/mad.c b/drivers/infiniband/core/mad.c
index 186e0d652e8b..a09f8e3c7f3f 100644
--- a/drivers/infiniband/core/mad.c
+++ b/drivers/infiniband/core/mad.c
@@ -509,10 +509,10 @@ static void unregister_mad_agent(struct ib_mad_agent_private *mad_agent_priv)
xa_erase(&ib_mad_clients, mad_agent_priv->agent.hi_tid);
flush_workqueue(port_priv->wq);
- ib_cancel_rmpp_recvs(mad_agent_priv);
deref_mad_agent(mad_agent_priv);
wait_for_completion(&mad_agent_priv->comp);
+ ib_cancel_rmpp_recvs(mad_agent_priv);
ib_mad_agent_security_cleanup(&mad_agent_priv->agent);
@@ -2718,6 +2718,7 @@ static int ib_mad_post_receive_mads(struct ib_mad_qp_info *qp_info,
DMA_FROM_DEVICE);
if (unlikely(ib_dma_mapping_error(qp_info->port_priv->device,
sg_list.addr))) {
+ kfree(mad_priv);
ret = -ENOMEM;
break;
}
diff --git a/drivers/infiniband/core/rdma_core.c b/drivers/infiniband/core/rdma_core.c
index 38de4942c682..6d3ed7c6e19e 100644
--- a/drivers/infiniband/core/rdma_core.c
+++ b/drivers/infiniband/core/rdma_core.c
@@ -470,40 +470,46 @@ static struct ib_uobject *
alloc_begin_fd_uobject(const struct uverbs_api_object *obj,
struct uverbs_attr_bundle *attrs)
{
- const struct uverbs_obj_fd_type *fd_type =
- container_of(obj->type_attrs, struct uverbs_obj_fd_type, type);
+ const struct uverbs_obj_fd_type *fd_type;
int new_fd;
- struct ib_uobject *uobj;
+ struct ib_uobject *uobj, *ret;
struct file *filp;
+ uobj = alloc_uobj(attrs, obj);
+ if (IS_ERR(uobj))
+ return uobj;
+
+ fd_type =
+ container_of(obj->type_attrs, struct uverbs_obj_fd_type, type);
if (WARN_ON(fd_type->fops->release != &uverbs_uobject_fd_release &&
- fd_type->fops->release != &uverbs_async_event_release))
- return ERR_PTR(-EINVAL);
+ fd_type->fops->release != &uverbs_async_event_release)) {
+ ret = ERR_PTR(-EINVAL);
+ goto err_fd;
+ }
new_fd = get_unused_fd_flags(O_CLOEXEC);
- if (new_fd < 0)
- return ERR_PTR(new_fd);
-
- uobj = alloc_uobj(attrs, obj);
- if (IS_ERR(uobj))
+ if (new_fd < 0) {
+ ret = ERR_PTR(new_fd);
goto err_fd;
+ }
/* Note that uverbs_uobject_fd_release() is called during abort */
filp = anon_inode_getfile(fd_type->name, fd_type->fops, NULL,
fd_type->flags);
if (IS_ERR(filp)) {
- uverbs_uobject_put(uobj);
- uobj = ERR_CAST(filp);
- goto err_fd;
+ ret = ERR_CAST(filp);
+ goto err_getfile;
}
uobj->object = filp;
uobj->id = new_fd;
return uobj;
-err_fd:
+err_getfile:
put_unused_fd(new_fd);
- return uobj;
+err_fd:
+ uverbs_uobject_put(uobj);
+ return ret;
}
struct ib_uobject *rdma_alloc_begin_uobject(const struct uverbs_api_object *obj,
@@ -643,9 +649,6 @@ void rdma_alloc_commit_uobject(struct ib_uobject *uobj,
{
struct ib_uverbs_file *ufile = attrs->ufile;
- /* alloc_commit consumes the uobj kref */
- uobj->uapi_object->type_class->alloc_commit(uobj);
-
/* kref is held so long as the uobj is on the uobj list. */
uverbs_uobject_get(uobj);
spin_lock_irq(&ufile->uobjects_lock);
@@ -655,6 +658,9 @@ void rdma_alloc_commit_uobject(struct ib_uobject *uobj,
/* matches atomic_set(-1) in alloc_uobj */
atomic_set(&uobj->usecnt, 0);
+ /* alloc_commit consumes the uobj kref */
+ uobj->uapi_object->type_class->alloc_commit(uobj);
+
/* Matches the down_read in rdma_alloc_begin_uobject */
up_read(&ufile->hw_destroy_rwsem);
}
diff --git a/drivers/infiniband/core/sa_query.c b/drivers/infiniband/core/sa_query.c
index a2ed09a3c714..8c930bf1df89 100644
--- a/drivers/infiniband/core/sa_query.c
+++ b/drivers/infiniband/core/sa_query.c
@@ -829,13 +829,20 @@ static int ib_nl_get_path_rec_attrs_len(ib_sa_comp_mask comp_mask)
return len;
}
-static int ib_nl_send_msg(struct ib_sa_query *query, gfp_t gfp_mask)
+static int ib_nl_make_request(struct ib_sa_query *query, gfp_t gfp_mask)
{
struct sk_buff *skb = NULL;
struct nlmsghdr *nlh;
void *data;
struct ib_sa_mad *mad;
int len;
+ unsigned long flags;
+ unsigned long delay;
+ gfp_t gfp_flag;
+ int ret;
+
+ INIT_LIST_HEAD(&query->list);
+ query->seq = (u32)atomic_inc_return(&ib_nl_sa_request_seq);
mad = query->mad_buf->mad;
len = ib_nl_get_path_rec_attrs_len(mad->sa_hdr.comp_mask);
@@ -860,36 +867,25 @@ static int ib_nl_send_msg(struct ib_sa_query *query, gfp_t gfp_mask)
/* Repair the nlmsg header length */
nlmsg_end(skb, nlh);
- return rdma_nl_multicast(&init_net, skb, RDMA_NL_GROUP_LS, gfp_mask);
-}
+ gfp_flag = ((gfp_mask & GFP_ATOMIC) == GFP_ATOMIC) ? GFP_ATOMIC :
+ GFP_NOWAIT;
-static int ib_nl_make_request(struct ib_sa_query *query, gfp_t gfp_mask)
-{
- unsigned long flags;
- unsigned long delay;
- int ret;
+ spin_lock_irqsave(&ib_nl_request_lock, flags);
+ ret = rdma_nl_multicast(&init_net, skb, RDMA_NL_GROUP_LS, gfp_flag);
- INIT_LIST_HEAD(&query->list);
- query->seq = (u32)atomic_inc_return(&ib_nl_sa_request_seq);
+ if (ret)
+ goto out;
- /* Put the request on the list first.*/
- spin_lock_irqsave(&ib_nl_request_lock, flags);
+ /* Put the request on the list.*/
delay = msecs_to_jiffies(sa_local_svc_timeout_ms);
query->timeout = delay + jiffies;
list_add_tail(&query->list, &ib_nl_request_list);
/* Start the timeout if this is the only request */
if (ib_nl_request_list.next == &query->list)
queue_delayed_work(ib_nl_wq, &ib_nl_timed_work, delay);
- spin_unlock_irqrestore(&ib_nl_request_lock, flags);
- ret = ib_nl_send_msg(query, gfp_mask);
- if (ret) {
- ret = -EIO;
- /* Remove the request */
- spin_lock_irqsave(&ib_nl_request_lock, flags);
- list_del(&query->list);
- spin_unlock_irqrestore(&ib_nl_request_lock, flags);
- }
+out:
+ spin_unlock_irqrestore(&ib_nl_request_lock, flags);
return ret;
}
diff --git a/drivers/infiniband/core/ucma.c b/drivers/infiniband/core/ucma.c
index 5b87eee8ccc8..d03dacaef788 100644
--- a/drivers/infiniband/core/ucma.c
+++ b/drivers/infiniband/core/ucma.c
@@ -1084,6 +1084,8 @@ static ssize_t ucma_connect(struct ucma_file *file, const char __user *inbuf,
size_t in_size;
int ret;
+ if (in_len < offsetofend(typeof(cmd), reserved))
+ return -EINVAL;
in_size = min_t(size_t, in_len, sizeof(cmd));
if (copy_from_user(&cmd, inbuf, in_size))
return -EFAULT;
@@ -1141,6 +1143,8 @@ static ssize_t ucma_accept(struct ucma_file *file, const char __user *inbuf,
size_t in_size;
int ret;
+ if (in_len < offsetofend(typeof(cmd), reserved))
+ return -EINVAL;
in_size = min_t(size_t, in_len, sizeof(cmd));
if (copy_from_user(&cmd, inbuf, in_size))
return -EFAULT;
diff --git a/drivers/infiniband/hw/efa/efa_verbs.c b/drivers/infiniband/hw/efa/efa_verbs.c
index 08313f7c73bc..7dd082441333 100644
--- a/drivers/infiniband/hw/efa/efa_verbs.c
+++ b/drivers/infiniband/hw/efa/efa_verbs.c
@@ -212,6 +212,7 @@ int efa_query_device(struct ib_device *ibdev,
props->max_send_sge = dev_attr->max_sq_sge;
props->max_recv_sge = dev_attr->max_rq_sge;
props->max_sge_rd = dev_attr->max_wr_rdma_sge;
+ props->max_pkeys = 1;
if (udata && udata->outlen) {
resp.max_sq_sge = dev_attr->max_sq_sge;
diff --git a/drivers/infiniband/hw/hfi1/debugfs.c b/drivers/infiniband/hw/hfi1/debugfs.c
index 4633a0ce1a8c..2ced236e1553 100644
--- a/drivers/infiniband/hw/hfi1/debugfs.c
+++ b/drivers/infiniband/hw/hfi1/debugfs.c
@@ -985,15 +985,10 @@ static ssize_t qsfp2_debugfs_read(struct file *file, char __user *buf,
static int __i2c_debugfs_open(struct inode *in, struct file *fp, u32 target)
{
struct hfi1_pportdata *ppd;
- int ret;
ppd = private2ppd(fp);
- ret = acquire_chip_resource(ppd->dd, i2c_target(target), 0);
- if (ret) /* failed - release the module */
- module_put(THIS_MODULE);
-
- return ret;
+ return acquire_chip_resource(ppd->dd, i2c_target(target), 0);
}
static int i2c1_debugfs_open(struct inode *in, struct file *fp)
@@ -1013,7 +1008,6 @@ static int __i2c_debugfs_release(struct inode *in, struct file *fp, u32 target)
ppd = private2ppd(fp);
release_chip_resource(ppd->dd, i2c_target(target));
- module_put(THIS_MODULE);
return 0;
}
@@ -1031,18 +1025,10 @@ static int i2c2_debugfs_release(struct inode *in, struct file *fp)
static int __qsfp_debugfs_open(struct inode *in, struct file *fp, u32 target)
{
struct hfi1_pportdata *ppd;
- int ret;
-
- if (!try_module_get(THIS_MODULE))
- return -ENODEV;
ppd = private2ppd(fp);
- ret = acquire_chip_resource(ppd->dd, i2c_target(target), 0);
- if (ret) /* failed - release the module */
- module_put(THIS_MODULE);
-
- return ret;
+ return acquire_chip_resource(ppd->dd, i2c_target(target), 0);
}
static int qsfp1_debugfs_open(struct inode *in, struct file *fp)
@@ -1062,7 +1048,6 @@ static int __qsfp_debugfs_release(struct inode *in, struct file *fp, u32 target)
ppd = private2ppd(fp);
release_chip_resource(ppd->dd, i2c_target(target));
- module_put(THIS_MODULE);
return 0;
}
diff --git a/drivers/infiniband/hw/hfi1/init.c b/drivers/infiniband/hw/hfi1/init.c
index 5eed4360695f..cb7ad1288821 100644
--- a/drivers/infiniband/hw/hfi1/init.c
+++ b/drivers/infiniband/hw/hfi1/init.c
@@ -831,6 +831,29 @@ wq_error:
}
/**
+ * destroy_workqueues - destroy per port workqueues
+ * @dd: the hfi1_ib device
+ */
+static void destroy_workqueues(struct hfi1_devdata *dd)
+{
+ int pidx;
+ struct hfi1_pportdata *ppd;
+
+ for (pidx = 0; pidx < dd->num_pports; ++pidx) {
+ ppd = dd->pport + pidx;
+
+ if (ppd->hfi1_wq) {
+ destroy_workqueue(ppd->hfi1_wq);
+ ppd->hfi1_wq = NULL;
+ }
+ if (ppd->link_wq) {
+ destroy_workqueue(ppd->link_wq);
+ ppd->link_wq = NULL;
+ }
+ }
+}
+
+/**
* enable_general_intr() - Enable the IRQs that will be handled by the
* general interrupt handler.
* @dd: valid devdata
@@ -1103,15 +1126,10 @@ static void shutdown_device(struct hfi1_devdata *dd)
* We can't count on interrupts since we are stopping.
*/
hfi1_quiet_serdes(ppd);
-
- if (ppd->hfi1_wq) {
- destroy_workqueue(ppd->hfi1_wq);
- ppd->hfi1_wq = NULL;
- }
- if (ppd->link_wq) {
- destroy_workqueue(ppd->link_wq);
- ppd->link_wq = NULL;
- }
+ if (ppd->hfi1_wq)
+ flush_workqueue(ppd->hfi1_wq);
+ if (ppd->link_wq)
+ flush_workqueue(ppd->link_wq);
}
sdma_exit(dd);
}
@@ -1756,6 +1774,7 @@ static void remove_one(struct pci_dev *pdev)
* clear dma engines, etc.
*/
shutdown_device(dd);
+ destroy_workqueues(dd);
stop_timers(dd);
diff --git a/drivers/infiniband/hw/hfi1/iowait.h b/drivers/infiniband/hw/hfi1/iowait.h
index 07847cb72169..d580aa17ae37 100644
--- a/drivers/infiniband/hw/hfi1/iowait.h
+++ b/drivers/infiniband/hw/hfi1/iowait.h
@@ -399,7 +399,7 @@ static inline void iowait_get_priority(struct iowait *w)
* @wait_head: the wait queue
*
* This function is called to insert an iowait struct into a
- * wait queue after a resource (eg, sdma decriptor or pio
+ * wait queue after a resource (eg, sdma descriptor or pio
* buffer) is run out.
*/
static inline void iowait_queue(bool pkts_sent, struct iowait *w,
diff --git a/drivers/infiniband/hw/hfi1/ipoib.h b/drivers/infiniband/hw/hfi1/ipoib.h
index 185c9b02c974..b8c9d0a003fb 100644
--- a/drivers/infiniband/hw/hfi1/ipoib.h
+++ b/drivers/infiniband/hw/hfi1/ipoib.h
@@ -67,6 +67,9 @@ struct hfi1_ipoib_circ_buf {
* @sde: sdma engine
* @tx_list: tx request list
* @sent_txreqs: count of txreqs posted to sdma
+ * @stops: count of stops of queue
+ * @ring_full: ring has been filled
+ * @no_desc: descriptor shortage seen
* @flow: tracks when list needs to be flushed for a flow change
* @q_idx: ipoib Tx queue index
* @pkts_sent: indicator packets have been sent from this queue
@@ -80,6 +83,9 @@ struct hfi1_ipoib_txq {
struct sdma_engine *sde;
struct list_head tx_list;
u64 sent_txreqs;
+ atomic_t stops;
+ atomic_t ring_full;
+ atomic_t no_desc;
union hfi1_ipoib_flow flow;
u8 q_idx;
bool pkts_sent;
diff --git a/drivers/infiniband/hw/hfi1/ipoib_tx.c b/drivers/infiniband/hw/hfi1/ipoib_tx.c
index 883cb9d48022..9df292b51a05 100644
--- a/drivers/infiniband/hw/hfi1/ipoib_tx.c
+++ b/drivers/infiniband/hw/hfi1/ipoib_tx.c
@@ -55,23 +55,48 @@ static u64 hfi1_ipoib_txreqs(const u64 sent, const u64 completed)
return sent - completed;
}
-static void hfi1_ipoib_check_queue_depth(struct hfi1_ipoib_txq *txq)
+static u64 hfi1_ipoib_used(struct hfi1_ipoib_txq *txq)
{
- if (unlikely(hfi1_ipoib_txreqs(++txq->sent_txreqs,
- atomic64_read(&txq->complete_txreqs)) >=
- min_t(unsigned int, txq->priv->netdev->tx_queue_len,
- txq->tx_ring.max_items - 1)))
+ return hfi1_ipoib_txreqs(txq->sent_txreqs,
+ atomic64_read(&txq->complete_txreqs));
+}
+
+static void hfi1_ipoib_stop_txq(struct hfi1_ipoib_txq *txq)
+{
+ if (atomic_inc_return(&txq->stops) == 1)
netif_stop_subqueue(txq->priv->netdev, txq->q_idx);
}
+static void hfi1_ipoib_wake_txq(struct hfi1_ipoib_txq *txq)
+{
+ if (atomic_dec_and_test(&txq->stops))
+ netif_wake_subqueue(txq->priv->netdev, txq->q_idx);
+}
+
+static uint hfi1_ipoib_ring_hwat(struct hfi1_ipoib_txq *txq)
+{
+ return min_t(uint, txq->priv->netdev->tx_queue_len,
+ txq->tx_ring.max_items - 1);
+}
+
+static uint hfi1_ipoib_ring_lwat(struct hfi1_ipoib_txq *txq)
+{
+ return min_t(uint, txq->priv->netdev->tx_queue_len,
+ txq->tx_ring.max_items) >> 1;
+}
+
+static void hfi1_ipoib_check_queue_depth(struct hfi1_ipoib_txq *txq)
+{
+ ++txq->sent_txreqs;
+ if (hfi1_ipoib_used(txq) >= hfi1_ipoib_ring_hwat(txq) &&
+ !atomic_xchg(&txq->ring_full, 1))
+ hfi1_ipoib_stop_txq(txq);
+}
+
static void hfi1_ipoib_check_queue_stopped(struct hfi1_ipoib_txq *txq)
{
struct net_device *dev = txq->priv->netdev;
- /* If the queue is already running just return */
- if (likely(!__netif_subqueue_stopped(dev, txq->q_idx)))
- return;
-
/* If shutting down just return as queue state is irrelevant */
if (unlikely(dev->reg_state != NETREG_REGISTERED))
return;
@@ -86,11 +111,9 @@ static void hfi1_ipoib_check_queue_stopped(struct hfi1_ipoib_txq *txq)
* Use the minimum of the current tx_queue_len or the rings max txreqs
* to protect against ring overflow.
*/
- if (hfi1_ipoib_txreqs(txq->sent_txreqs,
- atomic64_read(&txq->complete_txreqs))
- < min_t(unsigned int, dev->tx_queue_len,
- txq->tx_ring.max_items) >> 1)
- netif_wake_subqueue(dev, txq->q_idx);
+ if (hfi1_ipoib_used(txq) < hfi1_ipoib_ring_lwat(txq) &&
+ atomic_xchg(&txq->ring_full, 0))
+ hfi1_ipoib_wake_txq(txq);
}
static void hfi1_ipoib_free_tx(struct ipoib_txreq *tx, int budget)
@@ -364,11 +387,12 @@ static struct ipoib_txreq *hfi1_ipoib_send_dma_common(struct net_device *dev,
if (unlikely(!tx))
return ERR_PTR(-ENOMEM);
- /* so that we can test if the sdma decriptors are there */
+ /* so that we can test if the sdma descriptors are there */
tx->txreq.num_desc = 0;
tx->priv = priv;
tx->txq = txp->txq;
tx->skb = skb;
+ INIT_LIST_HEAD(&tx->txreq.list);
hfi1_ipoib_build_ib_tx_headers(tx, txp);
@@ -469,6 +493,7 @@ static int hfi1_ipoib_send_dma_single(struct net_device *dev,
ret = hfi1_ipoib_submit_tx(txq, tx);
if (likely(!ret)) {
+tx_ok:
trace_sdma_output_ibhdr(tx->priv->dd,
&tx->sdma_hdr.hdr,
ib_is_sc5(txp->flow.sc5));
@@ -478,20 +503,8 @@ static int hfi1_ipoib_send_dma_single(struct net_device *dev,
txq->pkts_sent = false;
- if (ret == -EBUSY) {
- list_add_tail(&tx->txreq.list, &txq->tx_list);
-
- trace_sdma_output_ibhdr(tx->priv->dd,
- &tx->sdma_hdr.hdr,
- ib_is_sc5(txp->flow.sc5));
- hfi1_ipoib_check_queue_depth(txq);
- return NETDEV_TX_OK;
- }
-
- if (ret == -ECOMM) {
- hfi1_ipoib_check_queue_depth(txq);
- return NETDEV_TX_OK;
- }
+ if (ret == -EBUSY || ret == -ECOMM)
+ goto tx_ok;
sdma_txclean(priv->dd, &tx->txreq);
dev_kfree_skb_any(skb);
@@ -509,9 +522,17 @@ static int hfi1_ipoib_send_dma_list(struct net_device *dev,
struct ipoib_txreq *tx;
/* Has the flow change ? */
- if (txq->flow.as_int != txp->flow.as_int)
- (void)hfi1_ipoib_flush_tx_list(dev, txq);
-
+ if (txq->flow.as_int != txp->flow.as_int) {
+ int ret;
+
+ ret = hfi1_ipoib_flush_tx_list(dev, txq);
+ if (unlikely(ret)) {
+ if (ret == -EBUSY)
+ ++dev->stats.tx_dropped;
+ dev_kfree_skb_any(skb);
+ return NETDEV_TX_OK;
+ }
+ }
tx = hfi1_ipoib_send_dma_common(dev, skb, txp);
if (IS_ERR(tx)) {
int ret = PTR_ERR(tx);
@@ -610,10 +631,14 @@ static int hfi1_ipoib_sdma_sleep(struct sdma_engine *sde,
return -EAGAIN;
}
- netif_stop_subqueue(txq->priv->netdev, txq->q_idx);
-
- if (list_empty(&txq->wait.list))
+ if (list_empty(&txreq->list))
+ /* came from non-list submit */
+ list_add_tail(&txreq->list, &txq->tx_list);
+ if (list_empty(&txq->wait.list)) {
+ if (!atomic_xchg(&txq->no_desc, 1))
+ hfi1_ipoib_stop_txq(txq);
iowait_queue(pkts_sent, wait->iow, &sde->dmawait);
+ }
write_sequnlock(&sde->waitlock);
return -EBUSY;
@@ -648,9 +673,9 @@ static void hfi1_ipoib_flush_txq(struct work_struct *work)
struct net_device *dev = txq->priv->netdev;
if (likely(dev->reg_state == NETREG_REGISTERED) &&
- likely(__netif_subqueue_stopped(dev, txq->q_idx)) &&
likely(!hfi1_ipoib_flush_tx_list(dev, txq)))
- netif_wake_subqueue(dev, txq->q_idx);
+ if (atomic_xchg(&txq->no_desc, 0))
+ hfi1_ipoib_wake_txq(txq);
}
int hfi1_ipoib_txreq_init(struct hfi1_ipoib_dev_priv *priv)
@@ -704,6 +729,9 @@ int hfi1_ipoib_txreq_init(struct hfi1_ipoib_dev_priv *priv)
txq->sde = NULL;
INIT_LIST_HEAD(&txq->tx_list);
atomic64_set(&txq->complete_txreqs, 0);
+ atomic_set(&txq->stops, 0);
+ atomic_set(&txq->ring_full, 0);
+ atomic_set(&txq->no_desc, 0);
txq->q_idx = i;
txq->flow.tx_queue = 0xff;
txq->flow.sc5 = 0xff;
@@ -769,7 +797,7 @@ static void hfi1_ipoib_drain_tx_list(struct hfi1_ipoib_txq *txq)
atomic64_inc(complete_txreqs);
}
- if (hfi1_ipoib_txreqs(txq->sent_txreqs, atomic64_read(complete_txreqs)))
+ if (hfi1_ipoib_used(txq))
dd_dev_warn(txq->priv->dd,
"txq %d not empty found %llu requests\n",
txq->q_idx,
diff --git a/drivers/infiniband/hw/hfi1/netdev_rx.c b/drivers/infiniband/hw/hfi1/netdev_rx.c
index 63688e85e8da..6d263c9749b3 100644
--- a/drivers/infiniband/hw/hfi1/netdev_rx.c
+++ b/drivers/infiniband/hw/hfi1/netdev_rx.c
@@ -373,7 +373,7 @@ void hfi1_netdev_free(struct hfi1_devdata *dd)
{
if (dd->dummy_netdev) {
dd_dev_info(dd, "hfi1 netdev freed\n");
- free_netdev(dd->dummy_netdev);
+ kfree(dd->dummy_netdev);
dd->dummy_netdev = NULL;
}
}
diff --git a/drivers/infiniband/hw/hfi1/qp.c b/drivers/infiniband/hw/hfi1/qp.c
index 0c2ae9f7b3e8..be62284e42d9 100644
--- a/drivers/infiniband/hw/hfi1/qp.c
+++ b/drivers/infiniband/hw/hfi1/qp.c
@@ -195,7 +195,7 @@ static inline int verbs_mtu_enum_to_int(struct ib_device *dev, enum ib_mtu mtu)
{
/* Constraining 10KB packets to 8KB packets */
if (mtu == (enum ib_mtu)OPA_MTU_10240)
- mtu = OPA_MTU_8192;
+ mtu = (enum ib_mtu)OPA_MTU_8192;
return opa_mtu_enum_to_int((enum opa_mtu)mtu);
}
@@ -367,7 +367,10 @@ bool _hfi1_schedule_send(struct rvt_qp *qp)
struct hfi1_ibport *ibp =
to_iport(qp->ibqp.device, qp->port_num);
struct hfi1_pportdata *ppd = ppd_from_ibp(ibp);
- struct hfi1_devdata *dd = dd_from_ibdev(qp->ibqp.device);
+ struct hfi1_devdata *dd = ppd->dd;
+
+ if (dd->flags & HFI1_SHUTDOWN)
+ return true;
return iowait_schedule(&priv->s_iowait, ppd->hfi1_wq,
priv->s_sde ?
diff --git a/drivers/infiniband/hw/hfi1/tid_rdma.c b/drivers/infiniband/hw/hfi1/tid_rdma.c
index 243b4ba0b6f6..facff133139a 100644
--- a/drivers/infiniband/hw/hfi1/tid_rdma.c
+++ b/drivers/infiniband/hw/hfi1/tid_rdma.c
@@ -5406,7 +5406,10 @@ static bool _hfi1_schedule_tid_send(struct rvt_qp *qp)
struct hfi1_ibport *ibp =
to_iport(qp->ibqp.device, qp->port_num);
struct hfi1_pportdata *ppd = ppd_from_ibp(ibp);
- struct hfi1_devdata *dd = dd_from_ibdev(qp->ibqp.device);
+ struct hfi1_devdata *dd = ppd->dd;
+
+ if ((dd->flags & HFI1_SHUTDOWN))
+ return true;
return iowait_tid_schedule(&priv->s_iowait, ppd->hfi1_wq,
priv->s_sde ?
diff --git a/drivers/infiniband/hw/hfi1/verbs_txreq.h b/drivers/infiniband/hw/hfi1/verbs_txreq.h
index bfa6e081cb56..d2d526c5a756 100644
--- a/drivers/infiniband/hw/hfi1/verbs_txreq.h
+++ b/drivers/infiniband/hw/hfi1/verbs_txreq.h
@@ -91,7 +91,7 @@ static inline struct verbs_txreq *get_txreq(struct hfi1_ibdev *dev,
tx->mr = NULL;
tx->sde = priv->s_sde;
tx->psc = priv->s_sendcontext;
- /* so that we can test if the sdma decriptors are there */
+ /* so that we can test if the sdma descriptors are there */
tx->txreq.num_desc = 0;
/* Set the header type */
tx->phdr.hdr.hdr_type = priv->hdr_type;
diff --git a/drivers/infiniband/hw/hns/hns_roce_device.h b/drivers/infiniband/hw/hns/hns_roce_device.h
index a77fa6730b2d..479fa557993e 100644
--- a/drivers/infiniband/hw/hns/hns_roce_device.h
+++ b/drivers/infiniband/hw/hns/hns_roce_device.h
@@ -898,13 +898,14 @@ struct hns_roce_hw {
int (*set_mac)(struct hns_roce_dev *hr_dev, u8 phy_port, u8 *addr);
void (*set_mtu)(struct hns_roce_dev *hr_dev, u8 phy_port,
enum ib_mtu mtu);
- int (*write_mtpt)(void *mb_buf, struct hns_roce_mr *mr,
- unsigned long mtpt_idx);
+ int (*write_mtpt)(struct hns_roce_dev *hr_dev, void *mb_buf,
+ struct hns_roce_mr *mr, unsigned long mtpt_idx);
int (*rereg_write_mtpt)(struct hns_roce_dev *hr_dev,
struct hns_roce_mr *mr, int flags, u32 pdn,
int mr_access_flags, u64 iova, u64 size,
void *mb_buf);
- int (*frmr_write_mtpt)(void *mb_buf, struct hns_roce_mr *mr);
+ int (*frmr_write_mtpt)(struct hns_roce_dev *hr_dev, void *mb_buf,
+ struct hns_roce_mr *mr);
int (*mw_write_mtpt)(void *mb_buf, struct hns_roce_mw *mw);
void (*write_cqc)(struct hns_roce_dev *hr_dev,
struct hns_roce_cq *hr_cq, void *mb_buf, u64 *mtts,
diff --git a/drivers/infiniband/hw/hns/hns_roce_hw_v1.c b/drivers/infiniband/hw/hns/hns_roce_hw_v1.c
index d02207cd30df..cf39f560b800 100644
--- a/drivers/infiniband/hw/hns/hns_roce_hw_v1.c
+++ b/drivers/infiniband/hw/hns/hns_roce_hw_v1.c
@@ -1756,10 +1756,10 @@ static void hns_roce_v1_set_mtu(struct hns_roce_dev *hr_dev, u8 phy_port,
val);
}
-static int hns_roce_v1_write_mtpt(void *mb_buf, struct hns_roce_mr *mr,
+static int hns_roce_v1_write_mtpt(struct hns_roce_dev *hr_dev, void *mb_buf,
+ struct hns_roce_mr *mr,
unsigned long mtpt_idx)
{
- struct hns_roce_dev *hr_dev = to_hr_dev(mr->ibmr.device);
u64 pages[HNS_ROCE_MAX_INNER_MTPT_NUM] = { 0 };
struct ib_device *ibdev = &hr_dev->ib_dev;
struct hns_roce_v1_mpt_entry *mpt_entry;
diff --git a/drivers/infiniband/hw/hns/hns_roce_hw_v2.c b/drivers/infiniband/hw/hns/hns_roce_hw_v2.c
index c597d7281629..0618ced45bf8 100644
--- a/drivers/infiniband/hw/hns/hns_roce_hw_v2.c
+++ b/drivers/infiniband/hw/hns/hns_roce_hw_v2.c
@@ -910,7 +910,7 @@ static int hns_roce_v2_rst_process_cmd(struct hns_roce_dev *hr_dev)
instance_stage = handle->rinfo.instance_state;
reset_stage = handle->rinfo.reset_state;
reset_cnt = ops->ae_dev_reset_cnt(handle);
- hw_resetting = ops->get_hw_reset_stat(handle);
+ hw_resetting = ops->get_cmdq_stat(handle);
sw_resetting = ops->ae_dev_resetting(handle);
if (reset_cnt != hr_dev->reset_cnt)
@@ -2529,10 +2529,10 @@ static int hns_roce_v2_set_mac(struct hns_roce_dev *hr_dev, u8 phy_port,
return hns_roce_cmq_send(hr_dev, &desc, 1);
}
-static int set_mtpt_pbl(struct hns_roce_v2_mpt_entry *mpt_entry,
+static int set_mtpt_pbl(struct hns_roce_dev *hr_dev,
+ struct hns_roce_v2_mpt_entry *mpt_entry,
struct hns_roce_mr *mr)
{
- struct hns_roce_dev *hr_dev = to_hr_dev(mr->ibmr.device);
u64 pages[HNS_ROCE_V2_MAX_INNER_MTPT_NUM] = { 0 };
struct ib_device *ibdev = &hr_dev->ib_dev;
dma_addr_t pbl_ba;
@@ -2571,7 +2571,8 @@ static int set_mtpt_pbl(struct hns_roce_v2_mpt_entry *mpt_entry,
return 0;
}
-static int hns_roce_v2_write_mtpt(void *mb_buf, struct hns_roce_mr *mr,
+static int hns_roce_v2_write_mtpt(struct hns_roce_dev *hr_dev,
+ void *mb_buf, struct hns_roce_mr *mr,
unsigned long mtpt_idx)
{
struct hns_roce_v2_mpt_entry *mpt_entry;
@@ -2620,7 +2621,7 @@ static int hns_roce_v2_write_mtpt(void *mb_buf, struct hns_roce_mr *mr,
if (mr->type == MR_TYPE_DMA)
return 0;
- ret = set_mtpt_pbl(mpt_entry, mr);
+ ret = set_mtpt_pbl(hr_dev, mpt_entry, mr);
return ret;
}
@@ -2666,15 +2667,15 @@ static int hns_roce_v2_rereg_write_mtpt(struct hns_roce_dev *hr_dev,
mr->iova = iova;
mr->size = size;
- ret = set_mtpt_pbl(mpt_entry, mr);
+ ret = set_mtpt_pbl(hr_dev, mpt_entry, mr);
}
return ret;
}
-static int hns_roce_v2_frmr_write_mtpt(void *mb_buf, struct hns_roce_mr *mr)
+static int hns_roce_v2_frmr_write_mtpt(struct hns_roce_dev *hr_dev,
+ void *mb_buf, struct hns_roce_mr *mr)
{
- struct hns_roce_dev *hr_dev = to_hr_dev(mr->ibmr.device);
struct ib_device *ibdev = &hr_dev->ib_dev;
struct hns_roce_v2_mpt_entry *mpt_entry;
dma_addr_t pbl_ba = 0;
@@ -3953,6 +3954,15 @@ static int config_qp_sq_buf(struct hns_roce_dev *hr_dev,
return 0;
}
+static inline enum ib_mtu get_mtu(struct ib_qp *ibqp,
+ const struct ib_qp_attr *attr)
+{
+ if (ibqp->qp_type == IB_QPT_GSI || ibqp->qp_type == IB_QPT_UD)
+ return IB_MTU_4096;
+
+ return attr->path_mtu;
+}
+
static int modify_qp_init_to_rtr(struct ib_qp *ibqp,
const struct ib_qp_attr *attr, int attr_mask,
struct hns_roce_v2_qp_context *context,
@@ -3964,6 +3974,7 @@ static int modify_qp_init_to_rtr(struct ib_qp *ibqp,
struct ib_device *ibdev = &hr_dev->ib_dev;
dma_addr_t trrl_ba;
dma_addr_t irrl_ba;
+ enum ib_mtu mtu;
u8 port_num;
u64 *mtts;
u8 *dmac;
@@ -4061,23 +4072,23 @@ static int modify_qp_init_to_rtr(struct ib_qp *ibqp,
roce_set_field(qpc_mask->byte_52_udpspn_dmac, V2_QPC_BYTE_52_DMAC_M,
V2_QPC_BYTE_52_DMAC_S, 0);
- /* mtu*(2^LP_PKTN_INI) should not bigger than 1 message length 64kb */
+ mtu = get_mtu(ibqp, attr);
+
+ if (attr_mask & IB_QP_PATH_MTU) {
+ roce_set_field(context->byte_24_mtu_tc, V2_QPC_BYTE_24_MTU_M,
+ V2_QPC_BYTE_24_MTU_S, mtu);
+ roce_set_field(qpc_mask->byte_24_mtu_tc, V2_QPC_BYTE_24_MTU_M,
+ V2_QPC_BYTE_24_MTU_S, 0);
+ }
+
+#define MAX_LP_MSG_LEN 65536
+ /* MTU*(2^LP_PKTN_INI) shouldn't be bigger than 64kb */
roce_set_field(context->byte_56_dqpn_err, V2_QPC_BYTE_56_LP_PKTN_INI_M,
V2_QPC_BYTE_56_LP_PKTN_INI_S,
- ilog2(hr_dev->caps.max_sq_inline / IB_MTU_4096));
+ ilog2(MAX_LP_MSG_LEN / ib_mtu_enum_to_int(mtu)));
roce_set_field(qpc_mask->byte_56_dqpn_err, V2_QPC_BYTE_56_LP_PKTN_INI_M,
V2_QPC_BYTE_56_LP_PKTN_INI_S, 0);
- if (ibqp->qp_type == IB_QPT_GSI || ibqp->qp_type == IB_QPT_UD)
- roce_set_field(context->byte_24_mtu_tc, V2_QPC_BYTE_24_MTU_M,
- V2_QPC_BYTE_24_MTU_S, IB_MTU_4096);
- else if (attr_mask & IB_QP_PATH_MTU)
- roce_set_field(context->byte_24_mtu_tc, V2_QPC_BYTE_24_MTU_M,
- V2_QPC_BYTE_24_MTU_S, attr->path_mtu);
-
- roce_set_field(qpc_mask->byte_24_mtu_tc, V2_QPC_BYTE_24_MTU_M,
- V2_QPC_BYTE_24_MTU_S, 0);
-
roce_set_bit(qpc_mask->byte_108_rx_reqepsn,
V2_QPC_BYTE_108_RX_REQ_PSN_ERR_S, 0);
roce_set_field(qpc_mask->byte_96_rx_reqmsn, V2_QPC_BYTE_96_RX_REQ_MSN_M,
diff --git a/drivers/infiniband/hw/hns/hns_roce_mr.c b/drivers/infiniband/hw/hns/hns_roce_mr.c
index 4c0bbb12770d..6b226a5eb7db 100644
--- a/drivers/infiniband/hw/hns/hns_roce_mr.c
+++ b/drivers/infiniband/hw/hns/hns_roce_mr.c
@@ -120,7 +120,7 @@ static int alloc_mr_pbl(struct hns_roce_dev *hr_dev, struct hns_roce_mr *mr,
mr->pbl_hop_num = is_fast ? 1 : hr_dev->caps.pbl_hop_num;
buf_attr.page_shift = is_fast ? PAGE_SHIFT :
- hr_dev->caps.pbl_buf_pg_sz + HNS_HW_PAGE_SHIFT;
+ hr_dev->caps.pbl_buf_pg_sz + PAGE_SHIFT;
buf_attr.region[0].size = length;
buf_attr.region[0].hopnum = mr->pbl_hop_num;
buf_attr.region_count = 1;
@@ -180,9 +180,10 @@ static int hns_roce_mr_enable(struct hns_roce_dev *hr_dev,
}
if (mr->type != MR_TYPE_FRMR)
- ret = hr_dev->hw->write_mtpt(mailbox->buf, mr, mtpt_idx);
+ ret = hr_dev->hw->write_mtpt(hr_dev, mailbox->buf, mr,
+ mtpt_idx);
else
- ret = hr_dev->hw->frmr_write_mtpt(mailbox->buf, mr);
+ ret = hr_dev->hw->frmr_write_mtpt(hr_dev, mailbox->buf, mr);
if (ret) {
dev_err(dev, "Write mtpt fail!\n");
goto err_page;
diff --git a/drivers/infiniband/hw/mlx5/main.c b/drivers/infiniband/hw/mlx5/main.c
index 343a8b8361e7..6f99ed03d88e 100644
--- a/drivers/infiniband/hw/mlx5/main.c
+++ b/drivers/infiniband/hw/mlx5/main.c
@@ -511,7 +511,7 @@ static int mlx5_query_port_roce(struct ib_device *device, u8 port_num,
mdev_port_num);
if (err)
goto out;
- ext = MLX5_CAP_PCAM_FEATURE(dev->mdev, ptys_extended_ethernet);
+ ext = !!MLX5_GET_ETH_PROTO(ptys_reg, out, true, eth_proto_capability);
eth_prot_oper = MLX5_GET_ETH_PROTO(ptys_reg, out, ext, eth_proto_oper);
props->active_width = IB_WIDTH_4X;
diff --git a/drivers/infiniband/hw/mlx5/odp.c b/drivers/infiniband/hw/mlx5/odp.c
index 7d2ec9ee5097..77dca1e05bba 100644
--- a/drivers/infiniband/hw/mlx5/odp.c
+++ b/drivers/infiniband/hw/mlx5/odp.c
@@ -601,6 +601,23 @@ void mlx5_ib_free_implicit_mr(struct mlx5_ib_mr *imr)
*/
synchronize_srcu(&dev->odp_srcu);
+ /*
+ * All work on the prefetch list must be completed, xa_erase() prevented
+ * new work from being created.
+ */
+ wait_event(imr->q_deferred_work, !atomic_read(&imr->num_deferred_work));
+
+ /*
+ * At this point it is forbidden for any other thread to enter
+ * pagefault_mr() on this imr. It is already forbidden to call
+ * pagefault_mr() on an implicit child. Due to this additions to
+ * implicit_children are prevented.
+ */
+
+ /*
+ * Block destroy_unused_implicit_child_mr() from incrementing
+ * num_deferred_work.
+ */
xa_lock(&imr->implicit_children);
xa_for_each (&imr->implicit_children, idx, mtt) {
__xa_erase(&imr->implicit_children, idx);
@@ -609,9 +626,8 @@ void mlx5_ib_free_implicit_mr(struct mlx5_ib_mr *imr)
xa_unlock(&imr->implicit_children);
/*
- * num_deferred_work can only be incremented inside the odp_srcu, or
- * under xa_lock while the child is in the xarray. Thus at this point
- * it is only decreasing, and all work holding it is now on the wq.
+ * Wait for any concurrent destroy_unused_implicit_child_mr() to
+ * complete.
*/
wait_event(imr->q_deferred_work, !atomic_read(&imr->num_deferred_work));
@@ -1781,9 +1797,7 @@ static bool init_prefetch_work(struct ib_pd *pd,
work->frags[i].mr =
get_prefetchable_mr(pd, advice, sg_list[i].lkey);
if (!work->frags[i].mr) {
- work->num_sge = i - 1;
- if (i)
- destroy_prefetch_work(work);
+ work->num_sge = i;
return false;
}
@@ -1849,6 +1863,7 @@ int mlx5_ib_advise_mr_prefetch(struct ib_pd *pd,
srcu_key = srcu_read_lock(&dev->odp_srcu);
if (!init_prefetch_work(pd, advice, pf_flags, work, sg_list, num_sge)) {
srcu_read_unlock(&dev->odp_srcu, srcu_key);
+ destroy_prefetch_work(work);
return -EINVAL;
}
queue_work(system_unbound_wq, &work->work);
diff --git a/drivers/infiniband/hw/mlx5/qp.c b/drivers/infiniband/hw/mlx5/qp.c
index 81bf6b975e0e..1225b8d77510 100644
--- a/drivers/infiniband/hw/mlx5/qp.c
+++ b/drivers/infiniband/hw/mlx5/qp.c
@@ -1766,15 +1766,14 @@ err:
}
static void configure_requester_scat_cqe(struct mlx5_ib_dev *dev,
+ struct mlx5_ib_qp *qp,
struct ib_qp_init_attr *init_attr,
- struct mlx5_ib_create_qp *ucmd,
void *qpc)
{
int scqe_sz;
bool allow_scat_cqe = false;
- if (ucmd)
- allow_scat_cqe = ucmd->flags & MLX5_QP_FLAG_ALLOW_SCATTER_CQE;
+ allow_scat_cqe = qp->flags_en & MLX5_QP_FLAG_ALLOW_SCATTER_CQE;
if (!allow_scat_cqe && init_attr->sq_sig_type != IB_SIGNAL_ALL_WR)
return;
@@ -1853,8 +1852,6 @@ static int create_xrc_tgt_qp(struct mlx5_ib_dev *dev, struct mlx5_ib_qp *qp,
u32 *in;
int err;
- mutex_init(&qp->mutex);
-
if (attr->sq_sig_type == IB_SIGNAL_ALL_WR)
qp->sq_signal_bits = MLX5_WQE_CTRL_CQ_UPDATE;
@@ -1862,7 +1859,7 @@ static int create_xrc_tgt_qp(struct mlx5_ib_dev *dev, struct mlx5_ib_qp *qp,
if (!in)
return -ENOMEM;
- if (MLX5_CAP_GEN(mdev, ece_support))
+ if (MLX5_CAP_GEN(mdev, ece_support) && ucmd)
MLX5_SET(create_qp_in, in, ece, ucmd->ece_options);
qpc = MLX5_ADDR_OF(create_qp_in, in, qpc);
@@ -1938,7 +1935,6 @@ static int create_user_qp(struct mlx5_ib_dev *dev, struct ib_pd *pd,
u32 *in;
int err;
- mutex_init(&qp->mutex);
spin_lock_init(&qp->sq.lock);
spin_lock_init(&qp->rq.lock);
@@ -2012,7 +2008,7 @@ static int create_user_qp(struct mlx5_ib_dev *dev, struct ib_pd *pd,
}
if ((qp->flags_en & MLX5_QP_FLAG_SCATTER_CQE) &&
(qp->type == MLX5_IB_QPT_DCI || qp->type == IB_QPT_RC))
- configure_requester_scat_cqe(dev, init_attr, ucmd, qpc);
+ configure_requester_scat_cqe(dev, qp, init_attr, qpc);
if (qp->rq.wqe_cnt) {
MLX5_SET(qpc, qpc, log_rq_stride, qp->rq.wqe_shift - 4);
@@ -2129,7 +2125,6 @@ static int create_kernel_qp(struct mlx5_ib_dev *dev, struct ib_pd *pd,
u32 *in;
int err;
- mutex_init(&qp->mutex);
spin_lock_init(&qp->sq.lock);
spin_lock_init(&qp->rq.lock);
@@ -2341,18 +2336,18 @@ static void destroy_qp_common(struct mlx5_ib_dev *dev, struct mlx5_ib_qp *qp,
unsigned long flags;
int err;
- if (qp->ibqp.rwq_ind_tbl) {
+ if (qp->is_rss) {
destroy_rss_raw_qp_tir(dev, qp);
return;
}
- base = (qp->ibqp.qp_type == IB_QPT_RAW_PACKET ||
+ base = (qp->type == IB_QPT_RAW_PACKET ||
qp->flags & IB_QP_CREATE_SOURCE_QPN) ?
- &qp->raw_packet_qp.rq.base :
- &qp->trans_qp.base;
+ &qp->raw_packet_qp.rq.base :
+ &qp->trans_qp.base;
if (qp->state != IB_QPS_RESET) {
- if (qp->ibqp.qp_type != IB_QPT_RAW_PACKET &&
+ if (qp->type != IB_QPT_RAW_PACKET &&
!(qp->flags & IB_QP_CREATE_SOURCE_QPN)) {
err = mlx5_core_qp_modify(dev, MLX5_CMD_OP_2RST_QP, 0,
NULL, &base->mqp, NULL);
@@ -2368,8 +2363,8 @@ static void destroy_qp_common(struct mlx5_ib_dev *dev, struct mlx5_ib_qp *qp,
base->mqp.qpn);
}
- get_cqs(qp->ibqp.qp_type, qp->ibqp.send_cq, qp->ibqp.recv_cq,
- &send_cq, &recv_cq);
+ get_cqs(qp->type, qp->ibqp.send_cq, qp->ibqp.recv_cq, &send_cq,
+ &recv_cq);
spin_lock_irqsave(&dev->reset_flow_resource_lock, flags);
mlx5_ib_lock_cqs(send_cq, recv_cq);
@@ -2391,7 +2386,7 @@ static void destroy_qp_common(struct mlx5_ib_dev *dev, struct mlx5_ib_qp *qp,
mlx5_ib_unlock_cqs(send_cq, recv_cq);
spin_unlock_irqrestore(&dev->reset_flow_resource_lock, flags);
- if (qp->ibqp.qp_type == IB_QPT_RAW_PACKET ||
+ if (qp->type == IB_QPT_RAW_PACKET ||
qp->flags & IB_QP_CREATE_SOURCE_QPN) {
destroy_raw_packet_qp(dev, qp);
} else {
@@ -2543,13 +2538,18 @@ static void process_vendor_flag(struct mlx5_ib_dev *dev, int *flags, int flag,
return;
}
- if (flag == MLX5_QP_FLAG_SCATTER_CQE) {
+ switch (flag) {
+ case MLX5_QP_FLAG_SCATTER_CQE:
+ case MLX5_QP_FLAG_ALLOW_SCATTER_CQE:
/*
- * We don't return error if this flag was provided,
- * and mlx5 doesn't have right capability.
- */
- *flags &= ~MLX5_QP_FLAG_SCATTER_CQE;
+ * We don't return error if these flags were provided,
+ * and mlx5 doesn't have right capability.
+ */
+ *flags &= ~(MLX5_QP_FLAG_SCATTER_CQE |
+ MLX5_QP_FLAG_ALLOW_SCATTER_CQE);
return;
+ default:
+ break;
}
mlx5_ib_dbg(dev, "Vendor create QP flag 0x%X is not supported\n", flag);
}
@@ -2589,6 +2589,8 @@ static int process_vendor_flags(struct mlx5_ib_dev *dev, struct mlx5_ib_qp *qp,
process_vendor_flag(dev, &flags, MLX5_QP_FLAG_SIGNATURE, true, qp);
process_vendor_flag(dev, &flags, MLX5_QP_FLAG_SCATTER_CQE,
MLX5_CAP_GEN(mdev, sctr_data_cqe), qp);
+ process_vendor_flag(dev, &flags, MLX5_QP_FLAG_ALLOW_SCATTER_CQE,
+ MLX5_CAP_GEN(mdev, sctr_data_cqe), qp);
if (qp->type == IB_QPT_RAW_PACKET) {
cond = MLX5_CAP_ETH(mdev, tunnel_stateless_vxlan) ||
@@ -2668,6 +2670,13 @@ static int process_create_flags(struct mlx5_ib_dev *dev, struct mlx5_ib_qp *qp,
if (qp_type == IB_QPT_RAW_PACKET && attr->rwq_ind_tbl)
return (create_flags) ? -EINVAL : 0;
+ process_create_flag(dev, &create_flags, IB_QP_CREATE_NETIF_QP,
+ mlx5_get_flow_namespace(dev->mdev,
+ MLX5_FLOW_NAMESPACE_BYPASS),
+ qp);
+ process_create_flag(dev, &create_flags,
+ IB_QP_CREATE_INTEGRITY_EN,
+ MLX5_CAP_GEN(mdev, sho), qp);
process_create_flag(dev, &create_flags,
IB_QP_CREATE_BLOCK_MULTICAST_LOOPBACK,
MLX5_CAP_GEN(mdev, block_lb_mc), qp);
@@ -2873,7 +2882,6 @@ static int mlx5_ib_destroy_dct(struct mlx5_ib_qp *mqp)
static int check_ucmd_data(struct mlx5_ib_dev *dev,
struct mlx5_create_qp_params *params)
{
- struct ib_qp_init_attr *attr = params->attr;
struct ib_udata *udata = params->udata;
size_t size, last;
int ret;
@@ -2885,14 +2893,7 @@ static int check_ucmd_data(struct mlx5_ib_dev *dev,
*/
last = sizeof(struct mlx5_ib_create_qp_rss);
else
- /* IB_QPT_RAW_PACKET doesn't have ECE data */
- switch (attr->qp_type) {
- case IB_QPT_RAW_PACKET:
- last = offsetof(struct mlx5_ib_create_qp, ece_options);
- break;
- default:
- last = offsetof(struct mlx5_ib_create_qp, reserved);
- }
+ last = offsetof(struct mlx5_ib_create_qp, reserved);
if (udata->inlen <= last)
return 0;
@@ -2907,7 +2908,7 @@ static int check_ucmd_data(struct mlx5_ib_dev *dev,
if (!ret)
mlx5_ib_dbg(
dev,
- "udata is not cleared, inlen = %lu, ucmd = %lu, last = %lu, size = %lu\n",
+ "udata is not cleared, inlen = %zu, ucmd = %zu, last = %zu, size = %zu\n",
udata->inlen, params->ucmd_size, last, size);
return ret ? 0 : -EINVAL;
}
@@ -2964,6 +2965,7 @@ struct ib_qp *mlx5_ib_create_qp(struct ib_pd *pd, struct ib_qp_init_attr *attr,
goto free_ucmd;
}
+ mutex_init(&qp->mutex);
qp->type = type;
if (udata) {
err = process_vendor_flags(dev, qp, params.ucmd, attr);
@@ -3002,10 +3004,19 @@ struct ib_qp *mlx5_ib_create_qp(struct ib_pd *pd, struct ib_qp_init_attr *attr,
return &qp->ibqp;
destroy_qp:
- if (qp->type == MLX5_IB_QPT_DCT)
+ if (qp->type == MLX5_IB_QPT_DCT) {
mlx5_ib_destroy_dct(qp);
- else
+ } else {
+ /*
+ * These lines below are temp solution till QP allocation
+ * will be moved to be under IB/core responsiblity.
+ */
+ qp->ibqp.send_cq = attr->send_cq;
+ qp->ibqp.recv_cq = attr->recv_cq;
+ qp->ibqp.pd = pd;
destroy_qp_common(dev, qp, udata);
+ }
+
qp = NULL;
free_qp:
kfree(qp);
@@ -4162,8 +4173,6 @@ static int mlx5_ib_modify_dct(struct ib_qp *ibqp, struct ib_qp_attr *attr,
if (udata->outlen < min_resp_len)
return -EINVAL;
- resp.response_length = min_resp_len;
-
/*
* If we don't have enough space for the ECE options,
* simply indicate it with resp.response_length.
@@ -4384,8 +4393,7 @@ static void to_rdma_ah_attr(struct mlx5_ib_dev *ibdev,
MLX5_GET(ads, path, src_addr_index),
MLX5_GET(ads, path, hop_limit),
MLX5_GET(ads, path, tclass));
- memcpy(ah_attr, MLX5_ADDR_OF(ads, path, rgid_rip),
- MLX5_FLD_SZ_BYTES(ads, rgid_rip));
+ rdma_ah_set_dgid_raw(ah_attr, MLX5_ADDR_OF(ads, path, rgid_rip));
}
}
diff --git a/drivers/infiniband/hw/mlx5/qpc.c b/drivers/infiniband/hw/mlx5/qpc.c
index c19d91d6dce8..7c3968ef9cd1 100644
--- a/drivers/infiniband/hw/mlx5/qpc.c
+++ b/drivers/infiniband/hw/mlx5/qpc.c
@@ -346,6 +346,9 @@ static int get_ece_from_mbox(void *out, u16 opcode)
int ece = 0;
switch (opcode) {
+ case MLX5_CMD_OP_INIT2INIT_QP:
+ ece = MLX5_GET(init2init_qp_out, out, ece);
+ break;
case MLX5_CMD_OP_INIT2RTR_QP:
ece = MLX5_GET(init2rtr_qp_out, out, ece);
break;
@@ -355,6 +358,9 @@ static int get_ece_from_mbox(void *out, u16 opcode)
case MLX5_CMD_OP_RTS2RTS_QP:
ece = MLX5_GET(rts2rts_qp_out, out, ece);
break;
+ case MLX5_CMD_OP_RST2INIT_QP:
+ ece = MLX5_GET(rst2init_qp_out, out, ece);
+ break;
default:
break;
}
@@ -406,6 +412,7 @@ static int modify_qp_mbox_alloc(struct mlx5_core_dev *dev, u16 opcode, int qpn,
return -ENOMEM;
MOD_QP_IN_SET_QPC(rst2init_qp, mbox->in, opcode, qpn,
opt_param_mask, qpc, uid);
+ MLX5_SET(rst2init_qp_in, mbox->in, ece, ece);
break;
case MLX5_CMD_OP_INIT2RTR_QP:
if (MBOX_ALLOC(mbox, init2rtr_qp))
@@ -439,6 +446,7 @@ static int modify_qp_mbox_alloc(struct mlx5_core_dev *dev, u16 opcode, int qpn,
return -ENOMEM;
MOD_QP_IN_SET_QPC(init2init_qp, mbox->in, opcode, qpn,
opt_param_mask, qpc, uid);
+ MLX5_SET(init2init_qp_in, mbox->in, ece, ece);
break;
default:
return -EINVAL;
diff --git a/drivers/infiniband/hw/mlx5/srq_cmd.c b/drivers/infiniband/hw/mlx5/srq_cmd.c
index 6f5eadc4d183..37aaacebd3f2 100644
--- a/drivers/infiniband/hw/mlx5/srq_cmd.c
+++ b/drivers/infiniband/hw/mlx5/srq_cmd.c
@@ -83,11 +83,11 @@ struct mlx5_core_srq *mlx5_cmd_get_srq(struct mlx5_ib_dev *dev, u32 srqn)
struct mlx5_srq_table *table = &dev->srq_table;
struct mlx5_core_srq *srq;
- xa_lock(&table->array);
+ xa_lock_irq(&table->array);
srq = xa_load(&table->array, srqn);
if (srq)
refcount_inc(&srq->common.refcount);
- xa_unlock(&table->array);
+ xa_unlock_irq(&table->array);
return srq;
}
diff --git a/drivers/infiniband/hw/qedr/qedr_iw_cm.c b/drivers/infiniband/hw/qedr/qedr_iw_cm.c
index 792eecd206b6..97fc7dd353b0 100644
--- a/drivers/infiniband/hw/qedr/qedr_iw_cm.c
+++ b/drivers/infiniband/hw/qedr/qedr_iw_cm.c
@@ -150,8 +150,17 @@ qedr_iw_issue_event(void *context,
if (params->cm_info) {
event.ird = params->cm_info->ird;
event.ord = params->cm_info->ord;
- event.private_data_len = params->cm_info->private_data_len;
- event.private_data = (void *)params->cm_info->private_data;
+ /* Only connect_request and reply have valid private data
+ * the rest of the events this may be left overs from
+ * connection establishment. CONNECT_REQUEST is issued via
+ * qedr_iw_mpa_request
+ */
+ if (event_type == IW_CM_EVENT_CONNECT_REPLY) {
+ event.private_data_len =
+ params->cm_info->private_data_len;
+ event.private_data =
+ (void *)params->cm_info->private_data;
+ }
}
if (ep->cm_id)
diff --git a/drivers/infiniband/sw/rdmavt/qp.c b/drivers/infiniband/sw/rdmavt/qp.c
index 511b72809e14..332a8ba94b81 100644
--- a/drivers/infiniband/sw/rdmavt/qp.c
+++ b/drivers/infiniband/sw/rdmavt/qp.c
@@ -901,8 +901,6 @@ static void rvt_init_qp(struct rvt_dev_info *rdi, struct rvt_qp *qp,
qp->s_tail_ack_queue = 0;
qp->s_acked_ack_queue = 0;
qp->s_num_rd_atomic = 0;
- if (qp->r_rq.kwq)
- qp->r_rq.kwq->count = qp->r_rq.size;
qp->r_sge.num_sge = 0;
atomic_set(&qp->s_reserved_used, 0);
}
@@ -1204,7 +1202,7 @@ struct ib_qp *rvt_create_qp(struct ib_pd *ibpd,
err = alloc_ud_wq_attr(qp, rdi->dparms.node);
if (err) {
ret = (ERR_PTR(err));
- goto bail_driver_priv;
+ goto bail_rq_rvt;
}
if (init_attr->create_flags & IB_QP_CREATE_NETDEV_USE)
@@ -1314,9 +1312,11 @@ bail_qpn:
rvt_free_qpn(&rdi->qp_dev->qpn_table, qp->ibqp.qp_num);
bail_rq_wq:
- rvt_free_rq(&qp->r_rq);
free_ud_wq_attr(qp);
+bail_rq_rvt:
+ rvt_free_rq(&qp->r_rq);
+
bail_driver_priv:
rdi->driver_f.qp_priv_free(rdi, qp);
@@ -2365,31 +2365,6 @@ bad_lkey:
}
/**
- * get_count - count numbers of request work queue entries
- * in circular buffer
- * @rq: data structure for request queue entry
- * @tail: tail indices of the circular buffer
- * @head: head indices of the circular buffer
- *
- * Return - total number of entries in the circular buffer
- */
-static u32 get_count(struct rvt_rq *rq, u32 tail, u32 head)
-{
- u32 count;
-
- count = head;
-
- if (count >= rq->size)
- count = 0;
- if (count < tail)
- count += rq->size - tail;
- else
- count -= tail;
-
- return count;
-}
-
-/**
* get_rvt_head - get head indices of the circular buffer
* @rq: data structure for request queue entry
* @ip: the QP
@@ -2463,7 +2438,7 @@ int rvt_get_rwqe(struct rvt_qp *qp, bool wr_id_only)
if (kwq->count < RVT_RWQ_COUNT_THRESHOLD) {
head = get_rvt_head(rq, ip);
- kwq->count = get_count(rq, tail, head);
+ kwq->count = rvt_get_rq_count(rq, head, tail);
}
if (unlikely(kwq->count == 0)) {
ret = 0;
@@ -2498,7 +2473,9 @@ int rvt_get_rwqe(struct rvt_qp *qp, bool wr_id_only)
* the number of remaining WQEs.
*/
if (kwq->count < srq->limit) {
- kwq->count = get_count(rq, tail, get_rvt_head(rq, ip));
+ kwq->count =
+ rvt_get_rq_count(rq,
+ get_rvt_head(rq, ip), tail);
if (kwq->count < srq->limit) {
struct ib_event ev;
diff --git a/drivers/infiniband/sw/rdmavt/rc.c b/drivers/infiniband/sw/rdmavt/rc.c
index 977906cc0d11..c58735f4c94a 100644
--- a/drivers/infiniband/sw/rdmavt/rc.c
+++ b/drivers/infiniband/sw/rdmavt/rc.c
@@ -127,9 +127,7 @@ __be32 rvt_compute_aeth(struct rvt_qp *qp)
* not atomic, which is OK, since the fuzziness is
* resolved as further ACKs go out.
*/
- credits = head - tail;
- if ((int)credits < 0)
- credits += qp->r_rq.size;
+ credits = rvt_get_rq_count(&qp->r_rq, head, tail);
}
/*
* Binary search the credit table to find the code to
diff --git a/drivers/infiniband/sw/siw/siw_main.c b/drivers/infiniband/sw/siw/siw_main.c
index a0b8cc643c5c..ed60c9e4643e 100644
--- a/drivers/infiniband/sw/siw/siw_main.c
+++ b/drivers/infiniband/sw/siw/siw_main.c
@@ -67,12 +67,13 @@ static int siw_device_register(struct siw_device *sdev, const char *name)
static int dev_id = 1;
int rv;
+ sdev->vendor_part_id = dev_id++;
+
rv = ib_register_device(base_dev, name);
if (rv) {
pr_warn("siw: device registration error %d\n", rv);
return rv;
}
- sdev->vendor_part_id = dev_id++;
siw_dbg(base_dev, "HWaddr=%pM\n", sdev->netdev->dev_addr);
diff --git a/drivers/infiniband/sw/siw/siw_qp_rx.c b/drivers/infiniband/sw/siw/siw_qp_rx.c
index 650520244ed0..7271d705f4b0 100644
--- a/drivers/infiniband/sw/siw/siw_qp_rx.c
+++ b/drivers/infiniband/sw/siw/siw_qp_rx.c
@@ -139,7 +139,8 @@ static int siw_rx_pbl(struct siw_rx_stream *srx, int *pbl_idx,
break;
bytes = min(bytes, len);
- if (siw_rx_kva(srx, (void *)buf_addr, bytes) == bytes) {
+ if (siw_rx_kva(srx, (void *)(uintptr_t)buf_addr, bytes) ==
+ bytes) {
copied += bytes;
offset += bytes;
len -= bytes;