diff options
author | Linus Torvalds <torvalds@linux-foundation.org> | 2015-02-10 20:01:30 -0800 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2015-02-10 20:01:30 -0800 |
commit | c5ce28df0e7c01a1de23c36ebdefcd803f2b6cbb (patch) | |
tree | 9830baf38832769e1cf621708889111bbe3c93df /drivers/net/wireless/ath/ath10k/htt_tx.c | |
parent | 29afc4e9a408f2304e09c6dd0dbcfbd2356d0faa (diff) | |
parent | 9399f0c51489ae8c16d6559b82a452fdc1895e91 (diff) | |
download | linux-c5ce28df0e7c01a1de23c36ebdefcd803f2b6cbb.tar.bz2 |
Merge git://git.kernel.org/pub/scm/linux/kernel/git/davem/net-next
Pull networking updates from David Miller:
1) More iov_iter conversion work from Al Viro.
[ The "crypto: switch af_alg_make_sg() to iov_iter" commit was
wrong, and this pull actually adds an extra commit on top of the
branch I'm pulling to fix that up, so that the pre-merge state is
ok. - Linus ]
2) Various optimizations to the ipv4 forwarding information base trie
lookup implementation. From Alexander Duyck.
3) Remove sock_iocb altogether, from CHristoph Hellwig.
4) Allow congestion control algorithm selection via routing metrics.
From Daniel Borkmann.
5) Make ipv4 uncached route list per-cpu, from Eric Dumazet.
6) Handle rfs hash collisions more gracefully, also from Eric Dumazet.
7) Add xmit_more support to r8169, e1000, and e1000e drivers. From
Florian Westphal.
8) Transparent Ethernet Bridging support for GRO, from Jesse Gross.
9) Add BPF packet actions to packet scheduler, from Jiri Pirko.
10) Add support for uniqu flow IDs to openvswitch, from Joe Stringer.
11) New NetCP ethernet driver, from Muralidharan Karicheri and Wingman
Kwok.
12) More sanely handle out-of-window dupacks, which can result in
serious ACK storms. From Neal Cardwell.
13) Various rhashtable bug fixes and enhancements, from Herbert Xu,
Patrick McHardy, and Thomas Graf.
14) Support xmit_more in be2net, from Sathya Perla.
15) Group Policy extensions for vxlan, from Thomas Graf.
16) Remove Checksum Offload support for vxlan, from Tom Herbert.
17) Like ipv4, support lockless transmit over ipv6 UDP sockets. From
Vlad Yasevich.
* git://git.kernel.org/pub/scm/linux/kernel/git/davem/net-next: (1494+1 commits)
crypto: fix af_alg_make_sg() conversion to iov_iter
ipv4: Namespecify TCP PMTU mechanism
i40e: Fix for stats init function call in Rx setup
tcp: don't include Fast Open option in SYN-ACK on pure SYN-data
openvswitch: Only set TUNNEL_VXLAN_OPT if VXLAN-GBP metadata is set
ipv6: Make __ipv6_select_ident static
ipv6: Fix fragment id assignment on LE arches.
bridge: Fix inability to add non-vlan fdb entry
net: Mellanox: Delete unnecessary checks before the function call "vunmap"
cxgb4: Add support in cxgb4 to get expansion rom version via ethtool
ethtool: rename reserved1 memeber in ethtool_drvinfo for expansion ROM version
net: dsa: Remove redundant phy_attach()
IB/mlx4: Reset flow support for IB kernel ULPs
IB/mlx4: Always use the correct port for mirrored multicast attachments
net/bonding: Fix potential bad memory access during bonding events
tipc: remove tipc_snprintf
tipc: nl compat add noop and remove legacy nl framework
tipc: convert legacy nl stats show to nl compat
tipc: convert legacy nl net id get to nl compat
tipc: convert legacy nl net id set to nl compat
...
Diffstat (limited to 'drivers/net/wireless/ath/ath10k/htt_tx.c')
-rw-r--r-- | drivers/net/wireless/ath/ath10k/htt_tx.c | 99 |
1 files changed, 38 insertions, 61 deletions
diff --git a/drivers/net/wireless/ath/ath10k/htt_tx.c b/drivers/net/wireless/ath/ath10k/htt_tx.c index 4bc51d8a14a3..cbd2bc9e6202 100644 --- a/drivers/net/wireless/ath/ath10k/htt_tx.c +++ b/drivers/net/wireless/ath/ath10k/htt_tx.c @@ -56,21 +56,18 @@ exit: return ret; } -int ath10k_htt_tx_alloc_msdu_id(struct ath10k_htt *htt) +int ath10k_htt_tx_alloc_msdu_id(struct ath10k_htt *htt, struct sk_buff *skb) { struct ath10k *ar = htt->ar; - int msdu_id; + int ret; lockdep_assert_held(&htt->tx_lock); - msdu_id = find_first_zero_bit(htt->used_msdu_ids, - htt->max_num_pending_tx); - if (msdu_id == htt->max_num_pending_tx) - return -ENOBUFS; + ret = idr_alloc(&htt->pending_tx, skb, 0, 0x10000, GFP_ATOMIC); + + ath10k_dbg(ar, ATH10K_DBG_HTT, "htt tx alloc msdu_id %d\n", ret); - ath10k_dbg(ar, ATH10K_DBG_HTT, "htt tx alloc msdu_id %d\n", msdu_id); - __set_bit(msdu_id, htt->used_msdu_ids); - return msdu_id; + return ret; } void ath10k_htt_tx_free_msdu_id(struct ath10k_htt *htt, u16 msdu_id) @@ -79,79 +76,53 @@ void ath10k_htt_tx_free_msdu_id(struct ath10k_htt *htt, u16 msdu_id) lockdep_assert_held(&htt->tx_lock); - if (!test_bit(msdu_id, htt->used_msdu_ids)) - ath10k_warn(ar, "trying to free unallocated msdu_id %d\n", - msdu_id); - ath10k_dbg(ar, ATH10K_DBG_HTT, "htt tx free msdu_id %hu\n", msdu_id); - __clear_bit(msdu_id, htt->used_msdu_ids); + + idr_remove(&htt->pending_tx, msdu_id); } int ath10k_htt_tx_alloc(struct ath10k_htt *htt) { struct ath10k *ar = htt->ar; - spin_lock_init(&htt->tx_lock); - - if (test_bit(ATH10K_FW_FEATURE_WMI_10X, htt->ar->fw_features)) - htt->max_num_pending_tx = TARGET_10X_NUM_MSDU_DESC; - else - htt->max_num_pending_tx = TARGET_NUM_MSDU_DESC; - ath10k_dbg(ar, ATH10K_DBG_BOOT, "htt tx max num pending tx %d\n", htt->max_num_pending_tx); - htt->pending_tx = kzalloc(sizeof(*htt->pending_tx) * - htt->max_num_pending_tx, GFP_KERNEL); - if (!htt->pending_tx) - return -ENOMEM; - - htt->used_msdu_ids = kzalloc(sizeof(unsigned long) * - BITS_TO_LONGS(htt->max_num_pending_tx), - GFP_KERNEL); - if (!htt->used_msdu_ids) { - kfree(htt->pending_tx); - return -ENOMEM; - } + spin_lock_init(&htt->tx_lock); + idr_init(&htt->pending_tx); htt->tx_pool = dma_pool_create("ath10k htt tx pool", htt->ar->dev, sizeof(struct ath10k_htt_txbuf), 4, 0); if (!htt->tx_pool) { - kfree(htt->used_msdu_ids); - kfree(htt->pending_tx); + idr_destroy(&htt->pending_tx); return -ENOMEM; } return 0; } -static void ath10k_htt_tx_free_pending(struct ath10k_htt *htt) +static int ath10k_htt_tx_clean_up_pending(int msdu_id, void *skb, void *ctx) { - struct ath10k *ar = htt->ar; + struct ath10k *ar = ctx; + struct ath10k_htt *htt = &ar->htt; struct htt_tx_done tx_done = {0}; - int msdu_id; - - spin_lock_bh(&htt->tx_lock); - for (msdu_id = 0; msdu_id < htt->max_num_pending_tx; msdu_id++) { - if (!test_bit(msdu_id, htt->used_msdu_ids)) - continue; - ath10k_dbg(ar, ATH10K_DBG_HTT, "force cleanup msdu_id %hu\n", - msdu_id); + ath10k_dbg(ar, ATH10K_DBG_HTT, "force cleanup msdu_id %hu\n", msdu_id); - tx_done.discard = 1; - tx_done.msdu_id = msdu_id; + tx_done.discard = 1; + tx_done.msdu_id = msdu_id; - ath10k_txrx_tx_unref(htt, &tx_done); - } + spin_lock_bh(&htt->tx_lock); + ath10k_txrx_tx_unref(htt, &tx_done); spin_unlock_bh(&htt->tx_lock); + + return 0; } void ath10k_htt_tx_free(struct ath10k_htt *htt) { - ath10k_htt_tx_free_pending(htt); - kfree(htt->pending_tx); - kfree(htt->used_msdu_ids); + idr_for_each(&htt->pending_tx, ath10k_htt_tx_clean_up_pending, htt->ar); + idr_destroy(&htt->pending_tx); dma_pool_destroy(htt->tx_pool); } @@ -383,13 +354,12 @@ int ath10k_htt_mgmt_tx(struct ath10k_htt *htt, struct sk_buff *msdu) len += sizeof(cmd->mgmt_tx); spin_lock_bh(&htt->tx_lock); - res = ath10k_htt_tx_alloc_msdu_id(htt); + res = ath10k_htt_tx_alloc_msdu_id(htt, msdu); if (res < 0) { spin_unlock_bh(&htt->tx_lock); goto err_tx_dec; } msdu_id = res; - htt->pending_tx[msdu_id] = msdu; spin_unlock_bh(&htt->tx_lock); txdesc = ath10k_htc_alloc_skb(ar, len); @@ -428,7 +398,6 @@ err_free_txdesc: dev_kfree_skb_any(txdesc); err_free_msdu_id: spin_lock_bh(&htt->tx_lock); - htt->pending_tx[msdu_id] = NULL; ath10k_htt_tx_free_msdu_id(htt, msdu_id); spin_unlock_bh(&htt->tx_lock); err_tx_dec: @@ -460,13 +429,12 @@ int ath10k_htt_tx(struct ath10k_htt *htt, struct sk_buff *msdu) goto err; spin_lock_bh(&htt->tx_lock); - res = ath10k_htt_tx_alloc_msdu_id(htt); + res = ath10k_htt_tx_alloc_msdu_id(htt, msdu); if (res < 0) { spin_unlock_bh(&htt->tx_lock); goto err_tx_dec; } msdu_id = res; - htt->pending_tx[msdu_id] = msdu; spin_unlock_bh(&htt->tx_lock); prefetch_len = min(htt->prefetch_len, msdu->len); @@ -480,10 +448,18 @@ int ath10k_htt_tx(struct ath10k_htt *htt, struct sk_buff *msdu) skb_cb->htt.txbuf = dma_pool_alloc(htt->tx_pool, GFP_ATOMIC, &paddr); - if (!skb_cb->htt.txbuf) + if (!skb_cb->htt.txbuf) { + res = -ENOMEM; goto err_free_msdu_id; + } skb_cb->htt.txbuf_paddr = paddr; + if ((ieee80211_is_action(hdr->frame_control) || + ieee80211_is_deauth(hdr->frame_control) || + ieee80211_is_disassoc(hdr->frame_control)) && + ieee80211_has_protected(hdr->frame_control)) + skb_put(msdu, IEEE80211_CCMP_MIC_LEN); + skb_cb->paddr = dma_map_single(dev, msdu->data, msdu->len, DMA_TO_DEVICE); res = dma_mapping_error(dev, skb_cb->paddr); @@ -539,8 +515,10 @@ int ath10k_htt_tx(struct ath10k_htt *htt, struct sk_buff *msdu) flags1 |= SM((u16)vdev_id, HTT_DATA_TX_DESC_FLAGS1_VDEV_ID); flags1 |= SM((u16)tid, HTT_DATA_TX_DESC_FLAGS1_EXT_TID); - flags1 |= HTT_DATA_TX_DESC_FLAGS1_CKSUM_L3_OFFLOAD; - flags1 |= HTT_DATA_TX_DESC_FLAGS1_CKSUM_L4_OFFLOAD; + if (msdu->ip_summed == CHECKSUM_PARTIAL) { + flags1 |= HTT_DATA_TX_DESC_FLAGS1_CKSUM_L3_OFFLOAD; + flags1 |= HTT_DATA_TX_DESC_FLAGS1_CKSUM_L4_OFFLOAD; + } /* Prevent firmware from sending up tx inspection requests. There's * nothing ath10k can do with frames requested for inspection so force @@ -598,7 +576,6 @@ err_free_txbuf: skb_cb->htt.txbuf_paddr); err_free_msdu_id: spin_lock_bh(&htt->tx_lock); - htt->pending_tx[msdu_id] = NULL; ath10k_htt_tx_free_msdu_id(htt, msdu_id); spin_unlock_bh(&htt->tx_lock); err_tx_dec: |