summaryrefslogtreecommitdiffstats
path: root/drivers/net/wireless/ath/wil6210/rx_reorder.c
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2015-02-10 20:01:30 -0800
committerLinus Torvalds <torvalds@linux-foundation.org>2015-02-10 20:01:30 -0800
commitc5ce28df0e7c01a1de23c36ebdefcd803f2b6cbb (patch)
tree9830baf38832769e1cf621708889111bbe3c93df /drivers/net/wireless/ath/wil6210/rx_reorder.c
parent29afc4e9a408f2304e09c6dd0dbcfbd2356d0faa (diff)
parent9399f0c51489ae8c16d6559b82a452fdc1895e91 (diff)
downloadlinux-c5ce28df0e7c01a1de23c36ebdefcd803f2b6cbb.tar.bz2
Merge git://git.kernel.org/pub/scm/linux/kernel/git/davem/net-next
Pull networking updates from David Miller: 1) More iov_iter conversion work from Al Viro. [ The "crypto: switch af_alg_make_sg() to iov_iter" commit was wrong, and this pull actually adds an extra commit on top of the branch I'm pulling to fix that up, so that the pre-merge state is ok. - Linus ] 2) Various optimizations to the ipv4 forwarding information base trie lookup implementation. From Alexander Duyck. 3) Remove sock_iocb altogether, from CHristoph Hellwig. 4) Allow congestion control algorithm selection via routing metrics. From Daniel Borkmann. 5) Make ipv4 uncached route list per-cpu, from Eric Dumazet. 6) Handle rfs hash collisions more gracefully, also from Eric Dumazet. 7) Add xmit_more support to r8169, e1000, and e1000e drivers. From Florian Westphal. 8) Transparent Ethernet Bridging support for GRO, from Jesse Gross. 9) Add BPF packet actions to packet scheduler, from Jiri Pirko. 10) Add support for uniqu flow IDs to openvswitch, from Joe Stringer. 11) New NetCP ethernet driver, from Muralidharan Karicheri and Wingman Kwok. 12) More sanely handle out-of-window dupacks, which can result in serious ACK storms. From Neal Cardwell. 13) Various rhashtable bug fixes and enhancements, from Herbert Xu, Patrick McHardy, and Thomas Graf. 14) Support xmit_more in be2net, from Sathya Perla. 15) Group Policy extensions for vxlan, from Thomas Graf. 16) Remove Checksum Offload support for vxlan, from Tom Herbert. 17) Like ipv4, support lockless transmit over ipv6 UDP sockets. From Vlad Yasevich. * git://git.kernel.org/pub/scm/linux/kernel/git/davem/net-next: (1494+1 commits) crypto: fix af_alg_make_sg() conversion to iov_iter ipv4: Namespecify TCP PMTU mechanism i40e: Fix for stats init function call in Rx setup tcp: don't include Fast Open option in SYN-ACK on pure SYN-data openvswitch: Only set TUNNEL_VXLAN_OPT if VXLAN-GBP metadata is set ipv6: Make __ipv6_select_ident static ipv6: Fix fragment id assignment on LE arches. bridge: Fix inability to add non-vlan fdb entry net: Mellanox: Delete unnecessary checks before the function call "vunmap" cxgb4: Add support in cxgb4 to get expansion rom version via ethtool ethtool: rename reserved1 memeber in ethtool_drvinfo for expansion ROM version net: dsa: Remove redundant phy_attach() IB/mlx4: Reset flow support for IB kernel ULPs IB/mlx4: Always use the correct port for mirrored multicast attachments net/bonding: Fix potential bad memory access during bonding events tipc: remove tipc_snprintf tipc: nl compat add noop and remove legacy nl framework tipc: convert legacy nl stats show to nl compat tipc: convert legacy nl net id get to nl compat tipc: convert legacy nl net id set to nl compat ...
Diffstat (limited to 'drivers/net/wireless/ath/wil6210/rx_reorder.c')
-rw-r--r--drivers/net/wireless/ath/wil6210/rx_reorder.c277
1 files changed, 266 insertions, 11 deletions
diff --git a/drivers/net/wireless/ath/wil6210/rx_reorder.c b/drivers/net/wireless/ath/wil6210/rx_reorder.c
index 489cb73d139b..ca10dcf0986e 100644
--- a/drivers/net/wireless/ath/wil6210/rx_reorder.c
+++ b/drivers/net/wireless/ath/wil6210/rx_reorder.c
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2014 Qualcomm Atheros, Inc.
+ * Copyright (c) 2014-2015 Qualcomm Atheros, Inc.
*
* Permission to use, copy, modify, and/or distribute this software for any
* purpose with or without fee is hereby granted, provided that the above
@@ -89,7 +89,9 @@ static void wil_reorder_release(struct wil6210_priv *wil,
}
}
+/* called in NAPI context */
void wil_rx_reorder(struct wil6210_priv *wil, struct sk_buff *skb)
+__acquires(&sta->tid_rx_lock) __releases(&sta->tid_rx_lock)
{
struct net_device *ndev = wil_to_ndev(wil);
struct vring_rx_desc *d = wil_skb_rxdesc(skb);
@@ -97,22 +99,26 @@ void wil_rx_reorder(struct wil6210_priv *wil, struct sk_buff *skb)
int cid = wil_rxdesc_cid(d);
int mid = wil_rxdesc_mid(d);
u16 seq = wil_rxdesc_seq(d);
+ int mcast = wil_rxdesc_mcast(d);
struct wil_sta_info *sta = &wil->sta[cid];
struct wil_tid_ampdu_rx *r;
u16 hseq;
int index;
- unsigned long flags;
- wil_dbg_txrx(wil, "MID %d CID %d TID %d Seq 0x%03x\n",
- mid, cid, tid, seq);
+ wil_dbg_txrx(wil, "MID %d CID %d TID %d Seq 0x%03x mcast %01x\n",
+ mid, cid, tid, seq, mcast);
- spin_lock_irqsave(&sta->tid_rx_lock, flags);
+ if (unlikely(mcast)) {
+ wil_netif_rx_any(skb, ndev);
+ return;
+ }
+
+ spin_lock(&sta->tid_rx_lock);
r = sta->tid_rx[tid];
if (!r) {
- spin_unlock_irqrestore(&sta->tid_rx_lock, flags);
wil_netif_rx_any(skb, ndev);
- return;
+ goto out;
}
hseq = r->head_seq_num;
@@ -121,13 +127,24 @@ void wil_rx_reorder(struct wil6210_priv *wil, struct sk_buff *skb)
* reported, and data Rx, few packets may be pass up before reorder
* buffer get allocated. Catch up by pretending SSN is what we
* see in the 1-st Rx packet
+ *
+ * Another scenario, Rx get delayed and we got packet from before
+ * BACK. Pass it to the stack and wait.
*/
if (r->first_time) {
r->first_time = false;
if (seq != r->head_seq_num) {
- wil_err(wil, "Error: 1-st frame with wrong sequence"
- " %d, should be %d. Fixing...\n", seq,
- r->head_seq_num);
+ if (seq_less(seq, r->head_seq_num)) {
+ wil_err(wil,
+ "Error: frame with early sequence 0x%03x, should be 0x%03x. Waiting...\n",
+ seq, r->head_seq_num);
+ r->first_time = true;
+ wil_netif_rx_any(skb, ndev);
+ goto out;
+ }
+ wil_err(wil,
+ "Error: 1-st frame with wrong sequence 0x%03x, should be 0x%03x. Fixing...\n",
+ seq, r->head_seq_num);
r->head_seq_num = seq;
r->ssn = seq;
}
@@ -179,7 +196,7 @@ void wil_rx_reorder(struct wil6210_priv *wil, struct sk_buff *skb)
wil_reorder_release(wil, r);
out:
- spin_unlock_irqrestore(&sta->tid_rx_lock, flags);
+ spin_unlock(&sta->tid_rx_lock);
}
struct wil_tid_ampdu_rx *wil_tid_ampdu_rx_alloc(struct wil6210_priv *wil,
@@ -219,3 +236,241 @@ void wil_tid_ampdu_rx_free(struct wil6210_priv *wil,
kfree(r->reorder_time);
kfree(r);
}
+
+/* ADDBA processing */
+static u16 wil_agg_size(struct wil6210_priv *wil, u16 req_agg_wsize)
+{
+ u16 max_agg_size = min_t(u16, WIL_MAX_AGG_WSIZE, WIL_MAX_AMPDU_SIZE /
+ (mtu_max + WIL_MAX_MPDU_OVERHEAD));
+
+ if (!req_agg_wsize)
+ return max_agg_size;
+
+ return min(max_agg_size, req_agg_wsize);
+}
+
+/* Block Ack - Rx side (recipient */
+int wil_addba_rx_request(struct wil6210_priv *wil, u8 cidxtid,
+ u8 dialog_token, __le16 ba_param_set,
+ __le16 ba_timeout, __le16 ba_seq_ctrl)
+{
+ struct wil_back_rx *req = kzalloc(sizeof(*req), GFP_KERNEL);
+
+ if (!req)
+ return -ENOMEM;
+
+ req->cidxtid = cidxtid;
+ req->dialog_token = dialog_token;
+ req->ba_param_set = le16_to_cpu(ba_param_set);
+ req->ba_timeout = le16_to_cpu(ba_timeout);
+ req->ba_seq_ctrl = le16_to_cpu(ba_seq_ctrl);
+
+ mutex_lock(&wil->back_rx_mutex);
+ list_add_tail(&req->list, &wil->back_rx_pending);
+ mutex_unlock(&wil->back_rx_mutex);
+
+ queue_work(wil->wq_service, &wil->back_rx_worker);
+
+ return 0;
+}
+
+static void wil_back_rx_handle(struct wil6210_priv *wil,
+ struct wil_back_rx *req)
+__acquires(&sta->tid_rx_lock) __releases(&sta->tid_rx_lock)
+{
+ struct wil_sta_info *sta;
+ u8 cid, tid;
+ u16 agg_wsize = 0;
+ /* bit 0: A-MSDU supported
+ * bit 1: policy (should be 0 for us)
+ * bits 2..5: TID
+ * bits 6..15: buffer size
+ */
+ u16 req_agg_wsize = WIL_GET_BITS(req->ba_param_set, 6, 15);
+ bool agg_amsdu = !!(req->ba_param_set & BIT(0));
+ int ba_policy = req->ba_param_set & BIT(1);
+ u16 agg_timeout = req->ba_timeout;
+ u16 status = WLAN_STATUS_SUCCESS;
+ u16 ssn = req->ba_seq_ctrl >> 4;
+ struct wil_tid_ampdu_rx *r;
+ int rc;
+
+ might_sleep();
+ parse_cidxtid(req->cidxtid, &cid, &tid);
+
+ /* sanity checks */
+ if (cid >= WIL6210_MAX_CID) {
+ wil_err(wil, "BACK: invalid CID %d\n", cid);
+ return;
+ }
+
+ sta = &wil->sta[cid];
+ if (sta->status != wil_sta_connected) {
+ wil_err(wil, "BACK: CID %d not connected\n", cid);
+ return;
+ }
+
+ wil_dbg_wmi(wil,
+ "ADDBA request for CID %d %pM TID %d size %d timeout %d AMSDU%s policy %d token %d SSN 0x%03x\n",
+ cid, sta->addr, tid, req_agg_wsize, req->ba_timeout,
+ agg_amsdu ? "+" : "-", !!ba_policy, req->dialog_token, ssn);
+
+ /* apply policies */
+ if (ba_policy) {
+ wil_err(wil, "BACK requested unsupported ba_policy == 1\n");
+ status = WLAN_STATUS_INVALID_QOS_PARAM;
+ }
+ if (status == WLAN_STATUS_SUCCESS)
+ agg_wsize = wil_agg_size(wil, req_agg_wsize);
+
+ rc = wmi_addba_rx_resp(wil, cid, tid, req->dialog_token, status,
+ agg_amsdu, agg_wsize, agg_timeout);
+ if (rc || (status != WLAN_STATUS_SUCCESS))
+ return;
+
+ /* apply */
+ r = wil_tid_ampdu_rx_alloc(wil, agg_wsize, ssn);
+ spin_lock_bh(&sta->tid_rx_lock);
+ wil_tid_ampdu_rx_free(wil, sta->tid_rx[tid]);
+ sta->tid_rx[tid] = r;
+ spin_unlock_bh(&sta->tid_rx_lock);
+}
+
+void wil_back_rx_flush(struct wil6210_priv *wil)
+{
+ struct wil_back_rx *evt, *t;
+
+ wil_dbg_misc(wil, "%s()\n", __func__);
+
+ mutex_lock(&wil->back_rx_mutex);
+
+ list_for_each_entry_safe(evt, t, &wil->back_rx_pending, list) {
+ list_del(&evt->list);
+ kfree(evt);
+ }
+
+ mutex_unlock(&wil->back_rx_mutex);
+}
+
+/* Retrieve next ADDBA request from the pending list */
+static struct list_head *next_back_rx(struct wil6210_priv *wil)
+{
+ struct list_head *ret = NULL;
+
+ mutex_lock(&wil->back_rx_mutex);
+
+ if (!list_empty(&wil->back_rx_pending)) {
+ ret = wil->back_rx_pending.next;
+ list_del(ret);
+ }
+
+ mutex_unlock(&wil->back_rx_mutex);
+
+ return ret;
+}
+
+void wil_back_rx_worker(struct work_struct *work)
+{
+ struct wil6210_priv *wil = container_of(work, struct wil6210_priv,
+ back_rx_worker);
+ struct wil_back_rx *evt;
+ struct list_head *lh;
+
+ while ((lh = next_back_rx(wil)) != NULL) {
+ evt = list_entry(lh, struct wil_back_rx, list);
+
+ wil_back_rx_handle(wil, evt);
+ kfree(evt);
+ }
+}
+
+/* BACK - Tx (originator) side */
+static void wil_back_tx_handle(struct wil6210_priv *wil,
+ struct wil_back_tx *req)
+{
+ struct vring_tx_data *txdata = &wil->vring_tx_data[req->ringid];
+ int rc;
+
+ if (txdata->addba_in_progress) {
+ wil_dbg_misc(wil, "ADDBA for vring[%d] already in progress\n",
+ req->ringid);
+ return;
+ }
+ if (txdata->agg_wsize) {
+ wil_dbg_misc(wil,
+ "ADDBA for vring[%d] already established wsize %d\n",
+ req->ringid, txdata->agg_wsize);
+ return;
+ }
+ txdata->addba_in_progress = true;
+ rc = wmi_addba(wil, req->ringid, req->agg_wsize, req->agg_timeout);
+ if (rc)
+ txdata->addba_in_progress = false;
+}
+
+static struct list_head *next_back_tx(struct wil6210_priv *wil)
+{
+ struct list_head *ret = NULL;
+
+ mutex_lock(&wil->back_tx_mutex);
+
+ if (!list_empty(&wil->back_tx_pending)) {
+ ret = wil->back_tx_pending.next;
+ list_del(ret);
+ }
+
+ mutex_unlock(&wil->back_tx_mutex);
+
+ return ret;
+}
+
+void wil_back_tx_worker(struct work_struct *work)
+{
+ struct wil6210_priv *wil = container_of(work, struct wil6210_priv,
+ back_tx_worker);
+ struct wil_back_tx *evt;
+ struct list_head *lh;
+
+ while ((lh = next_back_tx(wil)) != NULL) {
+ evt = list_entry(lh, struct wil_back_tx, list);
+
+ wil_back_tx_handle(wil, evt);
+ kfree(evt);
+ }
+}
+
+void wil_back_tx_flush(struct wil6210_priv *wil)
+{
+ struct wil_back_tx *evt, *t;
+
+ wil_dbg_misc(wil, "%s()\n", __func__);
+
+ mutex_lock(&wil->back_tx_mutex);
+
+ list_for_each_entry_safe(evt, t, &wil->back_tx_pending, list) {
+ list_del(&evt->list);
+ kfree(evt);
+ }
+
+ mutex_unlock(&wil->back_tx_mutex);
+}
+
+int wil_addba_tx_request(struct wil6210_priv *wil, u8 ringid, u16 wsize)
+{
+ struct wil_back_tx *req = kzalloc(sizeof(*req), GFP_KERNEL);
+
+ if (!req)
+ return -ENOMEM;
+
+ req->ringid = ringid;
+ req->agg_wsize = wil_agg_size(wil, wsize);
+ req->agg_timeout = 0;
+
+ mutex_lock(&wil->back_tx_mutex);
+ list_add_tail(&req->list, &wil->back_tx_pending);
+ mutex_unlock(&wil->back_tx_mutex);
+
+ queue_work(wil->wq_service, &wil->back_tx_worker);
+
+ return 0;
+}