diff options
author | Linus Torvalds <torvalds@linux-foundation.org> | 2015-02-10 20:01:30 -0800 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2015-02-10 20:01:30 -0800 |
commit | c5ce28df0e7c01a1de23c36ebdefcd803f2b6cbb (patch) | |
tree | 9830baf38832769e1cf621708889111bbe3c93df /drivers/net/wireless/ath/ath10k/wmi-ops.h | |
parent | 29afc4e9a408f2304e09c6dd0dbcfbd2356d0faa (diff) | |
parent | 9399f0c51489ae8c16d6559b82a452fdc1895e91 (diff) | |
download | linux-c5ce28df0e7c01a1de23c36ebdefcd803f2b6cbb.tar.bz2 |
Merge git://git.kernel.org/pub/scm/linux/kernel/git/davem/net-next
Pull networking updates from David Miller:
1) More iov_iter conversion work from Al Viro.
[ The "crypto: switch af_alg_make_sg() to iov_iter" commit was
wrong, and this pull actually adds an extra commit on top of the
branch I'm pulling to fix that up, so that the pre-merge state is
ok. - Linus ]
2) Various optimizations to the ipv4 forwarding information base trie
lookup implementation. From Alexander Duyck.
3) Remove sock_iocb altogether, from CHristoph Hellwig.
4) Allow congestion control algorithm selection via routing metrics.
From Daniel Borkmann.
5) Make ipv4 uncached route list per-cpu, from Eric Dumazet.
6) Handle rfs hash collisions more gracefully, also from Eric Dumazet.
7) Add xmit_more support to r8169, e1000, and e1000e drivers. From
Florian Westphal.
8) Transparent Ethernet Bridging support for GRO, from Jesse Gross.
9) Add BPF packet actions to packet scheduler, from Jiri Pirko.
10) Add support for uniqu flow IDs to openvswitch, from Joe Stringer.
11) New NetCP ethernet driver, from Muralidharan Karicheri and Wingman
Kwok.
12) More sanely handle out-of-window dupacks, which can result in
serious ACK storms. From Neal Cardwell.
13) Various rhashtable bug fixes and enhancements, from Herbert Xu,
Patrick McHardy, and Thomas Graf.
14) Support xmit_more in be2net, from Sathya Perla.
15) Group Policy extensions for vxlan, from Thomas Graf.
16) Remove Checksum Offload support for vxlan, from Tom Herbert.
17) Like ipv4, support lockless transmit over ipv6 UDP sockets. From
Vlad Yasevich.
* git://git.kernel.org/pub/scm/linux/kernel/git/davem/net-next: (1494+1 commits)
crypto: fix af_alg_make_sg() conversion to iov_iter
ipv4: Namespecify TCP PMTU mechanism
i40e: Fix for stats init function call in Rx setup
tcp: don't include Fast Open option in SYN-ACK on pure SYN-data
openvswitch: Only set TUNNEL_VXLAN_OPT if VXLAN-GBP metadata is set
ipv6: Make __ipv6_select_ident static
ipv6: Fix fragment id assignment on LE arches.
bridge: Fix inability to add non-vlan fdb entry
net: Mellanox: Delete unnecessary checks before the function call "vunmap"
cxgb4: Add support in cxgb4 to get expansion rom version via ethtool
ethtool: rename reserved1 memeber in ethtool_drvinfo for expansion ROM version
net: dsa: Remove redundant phy_attach()
IB/mlx4: Reset flow support for IB kernel ULPs
IB/mlx4: Always use the correct port for mirrored multicast attachments
net/bonding: Fix potential bad memory access during bonding events
tipc: remove tipc_snprintf
tipc: nl compat add noop and remove legacy nl framework
tipc: convert legacy nl stats show to nl compat
tipc: convert legacy nl net id get to nl compat
tipc: convert legacy nl net id set to nl compat
...
Diffstat (limited to 'drivers/net/wireless/ath/ath10k/wmi-ops.h')
-rw-r--r-- | drivers/net/wireless/ath/ath10k/wmi-ops.h | 1064 |
1 files changed, 1064 insertions, 0 deletions
diff --git a/drivers/net/wireless/ath/ath10k/wmi-ops.h b/drivers/net/wireless/ath/ath10k/wmi-ops.h new file mode 100644 index 000000000000..04dc4b9db04e --- /dev/null +++ b/drivers/net/wireless/ath/ath10k/wmi-ops.h @@ -0,0 +1,1064 @@ +/* + * Copyright (c) 2005-2011 Atheros Communications Inc. + * Copyright (c) 2011-2014 Qualcomm Atheros, Inc. + * + * Permission to use, copy, modify, and/or distribute this software for any + * purpose with or without fee is hereby granted, provided that the above + * copyright notice and this permission notice appear in all copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF + * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + */ + +#ifndef _WMI_OPS_H_ +#define _WMI_OPS_H_ + +struct ath10k; +struct sk_buff; + +struct wmi_ops { + void (*rx)(struct ath10k *ar, struct sk_buff *skb); + void (*map_svc)(const __le32 *in, unsigned long *out, size_t len); + + int (*pull_scan)(struct ath10k *ar, struct sk_buff *skb, + struct wmi_scan_ev_arg *arg); + int (*pull_mgmt_rx)(struct ath10k *ar, struct sk_buff *skb, + struct wmi_mgmt_rx_ev_arg *arg); + int (*pull_ch_info)(struct ath10k *ar, struct sk_buff *skb, + struct wmi_ch_info_ev_arg *arg); + int (*pull_vdev_start)(struct ath10k *ar, struct sk_buff *skb, + struct wmi_vdev_start_ev_arg *arg); + int (*pull_peer_kick)(struct ath10k *ar, struct sk_buff *skb, + struct wmi_peer_kick_ev_arg *arg); + int (*pull_swba)(struct ath10k *ar, struct sk_buff *skb, + struct wmi_swba_ev_arg *arg); + int (*pull_phyerr)(struct ath10k *ar, struct sk_buff *skb, + struct wmi_phyerr_ev_arg *arg); + int (*pull_svc_rdy)(struct ath10k *ar, struct sk_buff *skb, + struct wmi_svc_rdy_ev_arg *arg); + int (*pull_rdy)(struct ath10k *ar, struct sk_buff *skb, + struct wmi_rdy_ev_arg *arg); + int (*pull_fw_stats)(struct ath10k *ar, struct sk_buff *skb, + struct ath10k_fw_stats *stats); + + struct sk_buff *(*gen_pdev_suspend)(struct ath10k *ar, u32 suspend_opt); + struct sk_buff *(*gen_pdev_resume)(struct ath10k *ar); + struct sk_buff *(*gen_pdev_set_rd)(struct ath10k *ar, u16 rd, u16 rd2g, + u16 rd5g, u16 ctl2g, u16 ctl5g, + enum wmi_dfs_region dfs_reg); + struct sk_buff *(*gen_pdev_set_param)(struct ath10k *ar, u32 id, + u32 value); + struct sk_buff *(*gen_init)(struct ath10k *ar); + struct sk_buff *(*gen_start_scan)(struct ath10k *ar, + const struct wmi_start_scan_arg *arg); + struct sk_buff *(*gen_stop_scan)(struct ath10k *ar, + const struct wmi_stop_scan_arg *arg); + struct sk_buff *(*gen_vdev_create)(struct ath10k *ar, u32 vdev_id, + enum wmi_vdev_type type, + enum wmi_vdev_subtype subtype, + const u8 macaddr[ETH_ALEN]); + struct sk_buff *(*gen_vdev_delete)(struct ath10k *ar, u32 vdev_id); + struct sk_buff *(*gen_vdev_start)(struct ath10k *ar, + const struct wmi_vdev_start_request_arg *arg, + bool restart); + struct sk_buff *(*gen_vdev_stop)(struct ath10k *ar, u32 vdev_id); + struct sk_buff *(*gen_vdev_up)(struct ath10k *ar, u32 vdev_id, u32 aid, + const u8 *bssid); + struct sk_buff *(*gen_vdev_down)(struct ath10k *ar, u32 vdev_id); + struct sk_buff *(*gen_vdev_set_param)(struct ath10k *ar, u32 vdev_id, + u32 param_id, u32 param_value); + struct sk_buff *(*gen_vdev_install_key)(struct ath10k *ar, + const struct wmi_vdev_install_key_arg *arg); + struct sk_buff *(*gen_vdev_spectral_conf)(struct ath10k *ar, + const struct wmi_vdev_spectral_conf_arg *arg); + struct sk_buff *(*gen_vdev_spectral_enable)(struct ath10k *ar, u32 vdev_id, + u32 trigger, u32 enable); + struct sk_buff *(*gen_vdev_wmm_conf)(struct ath10k *ar, u32 vdev_id, + const struct wmi_wmm_params_all_arg *arg); + struct sk_buff *(*gen_peer_create)(struct ath10k *ar, u32 vdev_id, + const u8 peer_addr[ETH_ALEN]); + struct sk_buff *(*gen_peer_delete)(struct ath10k *ar, u32 vdev_id, + const u8 peer_addr[ETH_ALEN]); + struct sk_buff *(*gen_peer_flush)(struct ath10k *ar, u32 vdev_id, + const u8 peer_addr[ETH_ALEN], + u32 tid_bitmap); + struct sk_buff *(*gen_peer_set_param)(struct ath10k *ar, u32 vdev_id, + const u8 *peer_addr, + enum wmi_peer_param param_id, + u32 param_value); + struct sk_buff *(*gen_peer_assoc)(struct ath10k *ar, + const struct wmi_peer_assoc_complete_arg *arg); + struct sk_buff *(*gen_set_psmode)(struct ath10k *ar, u32 vdev_id, + enum wmi_sta_ps_mode psmode); + struct sk_buff *(*gen_set_sta_ps)(struct ath10k *ar, u32 vdev_id, + enum wmi_sta_powersave_param param_id, + u32 value); + struct sk_buff *(*gen_set_ap_ps)(struct ath10k *ar, u32 vdev_id, + const u8 *mac, + enum wmi_ap_ps_peer_param param_id, + u32 value); + struct sk_buff *(*gen_scan_chan_list)(struct ath10k *ar, + const struct wmi_scan_chan_list_arg *arg); + struct sk_buff *(*gen_beacon_dma)(struct ath10k *ar, u32 vdev_id, + const void *bcn, size_t bcn_len, + u32 bcn_paddr, bool dtim_zero, + bool deliver_cab); + struct sk_buff *(*gen_pdev_set_wmm)(struct ath10k *ar, + const struct wmi_wmm_params_all_arg *arg); + struct sk_buff *(*gen_request_stats)(struct ath10k *ar, + enum wmi_stats_id stats_id); + struct sk_buff *(*gen_force_fw_hang)(struct ath10k *ar, + enum wmi_force_fw_hang_type type, + u32 delay_ms); + struct sk_buff *(*gen_mgmt_tx)(struct ath10k *ar, struct sk_buff *skb); + struct sk_buff *(*gen_dbglog_cfg)(struct ath10k *ar, u32 module_enable, + u32 log_level); + struct sk_buff *(*gen_pktlog_enable)(struct ath10k *ar, u32 filter); + struct sk_buff *(*gen_pktlog_disable)(struct ath10k *ar); + struct sk_buff *(*gen_pdev_set_quiet_mode)(struct ath10k *ar, + u32 period, u32 duration, + u32 next_offset, + u32 enabled); + struct sk_buff *(*gen_pdev_get_temperature)(struct ath10k *ar); + struct sk_buff *(*gen_addba_clear_resp)(struct ath10k *ar, u32 vdev_id, + const u8 *mac); + struct sk_buff *(*gen_addba_send)(struct ath10k *ar, u32 vdev_id, + const u8 *mac, u32 tid, u32 buf_size); + struct sk_buff *(*gen_addba_set_resp)(struct ath10k *ar, u32 vdev_id, + const u8 *mac, u32 tid, + u32 status); + struct sk_buff *(*gen_delba_send)(struct ath10k *ar, u32 vdev_id, + const u8 *mac, u32 tid, u32 initiator, + u32 reason); + struct sk_buff *(*gen_bcn_tmpl)(struct ath10k *ar, u32 vdev_id, + u32 tim_ie_offset, struct sk_buff *bcn, + u32 prb_caps, u32 prb_erp, + void *prb_ies, size_t prb_ies_len); + struct sk_buff *(*gen_prb_tmpl)(struct ath10k *ar, u32 vdev_id, + struct sk_buff *bcn); + struct sk_buff *(*gen_p2p_go_bcn_ie)(struct ath10k *ar, u32 vdev_id, + const u8 *p2p_ie); + struct sk_buff *(*gen_vdev_sta_uapsd)(struct ath10k *ar, u32 vdev_id, + const u8 peer_addr[ETH_ALEN], + const struct wmi_sta_uapsd_auto_trig_arg *args, + u32 num_ac); + struct sk_buff *(*gen_sta_keepalive)(struct ath10k *ar, + const struct wmi_sta_keepalive_arg *arg); +}; + +int ath10k_wmi_cmd_send(struct ath10k *ar, struct sk_buff *skb, u32 cmd_id); + +static inline int +ath10k_wmi_rx(struct ath10k *ar, struct sk_buff *skb) +{ + if (WARN_ON_ONCE(!ar->wmi.ops->rx)) + return -EOPNOTSUPP; + + ar->wmi.ops->rx(ar, skb); + return 0; +} + +static inline int +ath10k_wmi_map_svc(struct ath10k *ar, const __le32 *in, unsigned long *out, + size_t len) +{ + if (!ar->wmi.ops->map_svc) + return -EOPNOTSUPP; + + ar->wmi.ops->map_svc(in, out, len); + return 0; +} + +static inline int +ath10k_wmi_pull_scan(struct ath10k *ar, struct sk_buff *skb, + struct wmi_scan_ev_arg *arg) +{ + if (!ar->wmi.ops->pull_scan) + return -EOPNOTSUPP; + + return ar->wmi.ops->pull_scan(ar, skb, arg); +} + +static inline int +ath10k_wmi_pull_mgmt_rx(struct ath10k *ar, struct sk_buff *skb, + struct wmi_mgmt_rx_ev_arg *arg) +{ + if (!ar->wmi.ops->pull_mgmt_rx) + return -EOPNOTSUPP; + + return ar->wmi.ops->pull_mgmt_rx(ar, skb, arg); +} + +static inline int +ath10k_wmi_pull_ch_info(struct ath10k *ar, struct sk_buff *skb, + struct wmi_ch_info_ev_arg *arg) +{ + if (!ar->wmi.ops->pull_ch_info) + return -EOPNOTSUPP; + + return ar->wmi.ops->pull_ch_info(ar, skb, arg); +} + +static inline int +ath10k_wmi_pull_vdev_start(struct ath10k *ar, struct sk_buff *skb, + struct wmi_vdev_start_ev_arg *arg) +{ + if (!ar->wmi.ops->pull_vdev_start) + return -EOPNOTSUPP; + + return ar->wmi.ops->pull_vdev_start(ar, skb, arg); +} + +static inline int +ath10k_wmi_pull_peer_kick(struct ath10k *ar, struct sk_buff *skb, + struct wmi_peer_kick_ev_arg *arg) +{ + if (!ar->wmi.ops->pull_peer_kick) + return -EOPNOTSUPP; + + return ar->wmi.ops->pull_peer_kick(ar, skb, arg); +} + +static inline int +ath10k_wmi_pull_swba(struct ath10k *ar, struct sk_buff *skb, + struct wmi_swba_ev_arg *arg) +{ + if (!ar->wmi.ops->pull_swba) + return -EOPNOTSUPP; + + return ar->wmi.ops->pull_swba(ar, skb, arg); +} + +static inline int +ath10k_wmi_pull_phyerr(struct ath10k *ar, struct sk_buff *skb, + struct wmi_phyerr_ev_arg *arg) +{ + if (!ar->wmi.ops->pull_phyerr) + return -EOPNOTSUPP; + + return ar->wmi.ops->pull_phyerr(ar, skb, arg); +} + +static inline int +ath10k_wmi_pull_svc_rdy(struct ath10k *ar, struct sk_buff *skb, + struct wmi_svc_rdy_ev_arg *arg) +{ + if (!ar->wmi.ops->pull_svc_rdy) + return -EOPNOTSUPP; + + return ar->wmi.ops->pull_svc_rdy(ar, skb, arg); +} + +static inline int +ath10k_wmi_pull_rdy(struct ath10k *ar, struct sk_buff *skb, + struct wmi_rdy_ev_arg *arg) +{ + if (!ar->wmi.ops->pull_rdy) + return -EOPNOTSUPP; + + return ar->wmi.ops->pull_rdy(ar, skb, arg); +} + +static inline int +ath10k_wmi_pull_fw_stats(struct ath10k *ar, struct sk_buff *skb, + struct ath10k_fw_stats *stats) +{ + if (!ar->wmi.ops->pull_fw_stats) + return -EOPNOTSUPP; + + return ar->wmi.ops->pull_fw_stats(ar, skb, stats); +} + +static inline int +ath10k_wmi_mgmt_tx(struct ath10k *ar, struct sk_buff *msdu) +{ + struct ieee80211_tx_info *info = IEEE80211_SKB_CB(msdu); + struct sk_buff *skb; + int ret; + + if (!ar->wmi.ops->gen_mgmt_tx) + return -EOPNOTSUPP; + + skb = ar->wmi.ops->gen_mgmt_tx(ar, msdu); + if (IS_ERR(skb)) + return PTR_ERR(skb); + + ret = ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->mgmt_tx_cmdid); + if (ret) + return ret; + + /* FIXME There's no ACK event for Management Tx. This probably + * shouldn't be called here either. */ + info->flags |= IEEE80211_TX_STAT_ACK; + ieee80211_tx_status_irqsafe(ar->hw, msdu); + + return 0; +} + +static inline int +ath10k_wmi_pdev_set_regdomain(struct ath10k *ar, u16 rd, u16 rd2g, u16 rd5g, + u16 ctl2g, u16 ctl5g, + enum wmi_dfs_region dfs_reg) +{ + struct sk_buff *skb; + + if (!ar->wmi.ops->gen_pdev_set_rd) + return -EOPNOTSUPP; + + skb = ar->wmi.ops->gen_pdev_set_rd(ar, rd, rd2g, rd5g, ctl2g, ctl5g, + dfs_reg); + if (IS_ERR(skb)) + return PTR_ERR(skb); + + return ath10k_wmi_cmd_send(ar, skb, + ar->wmi.cmd->pdev_set_regdomain_cmdid); +} + +static inline int +ath10k_wmi_pdev_suspend_target(struct ath10k *ar, u32 suspend_opt) +{ + struct sk_buff *skb; + + if (!ar->wmi.ops->gen_pdev_suspend) + return -EOPNOTSUPP; + + skb = ar->wmi.ops->gen_pdev_suspend(ar, suspend_opt); + if (IS_ERR(skb)) + return PTR_ERR(skb); + + return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->pdev_suspend_cmdid); +} + +static inline int +ath10k_wmi_pdev_resume_target(struct ath10k *ar) +{ + struct sk_buff *skb; + + if (!ar->wmi.ops->gen_pdev_resume) + return -EOPNOTSUPP; + + skb = ar->wmi.ops->gen_pdev_resume(ar); + if (IS_ERR(skb)) + return PTR_ERR(skb); + + return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->pdev_resume_cmdid); +} + +static inline int +ath10k_wmi_pdev_set_param(struct ath10k *ar, u32 id, u32 value) +{ + struct sk_buff *skb; + + if (!ar->wmi.ops->gen_pdev_set_param) + return -EOPNOTSUPP; + + skb = ar->wmi.ops->gen_pdev_set_param(ar, id, value); + if (IS_ERR(skb)) + return PTR_ERR(skb); + + return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->pdev_set_param_cmdid); +} + +static inline int +ath10k_wmi_cmd_init(struct ath10k *ar) +{ + struct sk_buff *skb; + + if (!ar->wmi.ops->gen_init) + return -EOPNOTSUPP; + + skb = ar->wmi.ops->gen_init(ar); + if (IS_ERR(skb)) + return PTR_ERR(skb); + + return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->init_cmdid); +} + +static inline int +ath10k_wmi_start_scan(struct ath10k *ar, + const struct wmi_start_scan_arg *arg) +{ + struct sk_buff *skb; + + if (!ar->wmi.ops->gen_start_scan) + return -EOPNOTSUPP; + + skb = ar->wmi.ops->gen_start_scan(ar, arg); + if (IS_ERR(skb)) + return PTR_ERR(skb); + + return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->start_scan_cmdid); +} + +static inline int +ath10k_wmi_stop_scan(struct ath10k *ar, const struct wmi_stop_scan_arg *arg) +{ + struct sk_buff *skb; + + if (!ar->wmi.ops->gen_stop_scan) + return -EOPNOTSUPP; + + skb = ar->wmi.ops->gen_stop_scan(ar, arg); + if (IS_ERR(skb)) + return PTR_ERR(skb); + + return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->stop_scan_cmdid); +} + +static inline int +ath10k_wmi_vdev_create(struct ath10k *ar, u32 vdev_id, + enum wmi_vdev_type type, + enum wmi_vdev_subtype subtype, + const u8 macaddr[ETH_ALEN]) +{ + struct sk_buff *skb; + + if (!ar->wmi.ops->gen_vdev_create) + return -EOPNOTSUPP; + + skb = ar->wmi.ops->gen_vdev_create(ar, vdev_id, type, subtype, macaddr); + if (IS_ERR(skb)) + return PTR_ERR(skb); + + return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->vdev_create_cmdid); +} + +static inline int +ath10k_wmi_vdev_delete(struct ath10k *ar, u32 vdev_id) +{ + struct sk_buff *skb; + + if (!ar->wmi.ops->gen_vdev_delete) + return -EOPNOTSUPP; + + skb = ar->wmi.ops->gen_vdev_delete(ar, vdev_id); + if (IS_ERR(skb)) + return PTR_ERR(skb); + + return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->vdev_delete_cmdid); +} + +static inline int +ath10k_wmi_vdev_start(struct ath10k *ar, + const struct wmi_vdev_start_request_arg *arg) +{ + struct sk_buff *skb; + + if (!ar->wmi.ops->gen_vdev_start) + return -EOPNOTSUPP; + + skb = ar->wmi.ops->gen_vdev_start(ar, arg, false); + if (IS_ERR(skb)) + return PTR_ERR(skb); + + return ath10k_wmi_cmd_send(ar, skb, + ar->wmi.cmd->vdev_start_request_cmdid); +} + +static inline int +ath10k_wmi_vdev_restart(struct ath10k *ar, + const struct wmi_vdev_start_request_arg *arg) +{ + struct sk_buff *skb; + + if (!ar->wmi.ops->gen_vdev_start) + return -EOPNOTSUPP; + + skb = ar->wmi.ops->gen_vdev_start(ar, arg, true); + if (IS_ERR(skb)) + return PTR_ERR(skb); + + return ath10k_wmi_cmd_send(ar, skb, + ar->wmi.cmd->vdev_restart_request_cmdid); +} + +static inline int +ath10k_wmi_vdev_stop(struct ath10k *ar, u32 vdev_id) +{ + struct sk_buff *skb; + + if (!ar->wmi.ops->gen_vdev_stop) + return -EOPNOTSUPP; + + skb = ar->wmi.ops->gen_vdev_stop(ar, vdev_id); + if (IS_ERR(skb)) + return PTR_ERR(skb); + + return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->vdev_stop_cmdid); +} + +static inline int +ath10k_wmi_vdev_up(struct ath10k *ar, u32 vdev_id, u32 aid, const u8 *bssid) +{ + struct sk_buff *skb; + + if (!ar->wmi.ops->gen_vdev_up) + return -EOPNOTSUPP; + + skb = ar->wmi.ops->gen_vdev_up(ar, vdev_id, aid, bssid); + if (IS_ERR(skb)) + return PTR_ERR(skb); + + return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->vdev_up_cmdid); +} + +static inline int +ath10k_wmi_vdev_down(struct ath10k *ar, u32 vdev_id) +{ + struct sk_buff *skb; + + if (!ar->wmi.ops->gen_vdev_down) + return -EOPNOTSUPP; + + skb = ar->wmi.ops->gen_vdev_down(ar, vdev_id); + if (IS_ERR(skb)) + return PTR_ERR(skb); + + return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->vdev_down_cmdid); +} + +static inline int +ath10k_wmi_vdev_set_param(struct ath10k *ar, u32 vdev_id, u32 param_id, + u32 param_value) +{ + struct sk_buff *skb; + + if (!ar->wmi.ops->gen_vdev_set_param) + return -EOPNOTSUPP; + + skb = ar->wmi.ops->gen_vdev_set_param(ar, vdev_id, param_id, + param_value); + if (IS_ERR(skb)) + return PTR_ERR(skb); + + return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->vdev_set_param_cmdid); +} + +static inline int +ath10k_wmi_vdev_install_key(struct ath10k *ar, + const struct wmi_vdev_install_key_arg *arg) +{ + struct sk_buff *skb; + + if (!ar->wmi.ops->gen_vdev_install_key) + return -EOPNOTSUPP; + + skb = ar->wmi.ops->gen_vdev_install_key(ar, arg); + if (IS_ERR(skb)) + return PTR_ERR(skb); + + return ath10k_wmi_cmd_send(ar, skb, + ar->wmi.cmd->vdev_install_key_cmdid); +} + +static inline int +ath10k_wmi_vdev_spectral_conf(struct ath10k *ar, + const struct wmi_vdev_spectral_conf_arg *arg) +{ + struct sk_buff *skb; + u32 cmd_id; + + skb = ar->wmi.ops->gen_vdev_spectral_conf(ar, arg); + if (IS_ERR(skb)) + return PTR_ERR(skb); + + cmd_id = ar->wmi.cmd->vdev_spectral_scan_configure_cmdid; + return ath10k_wmi_cmd_send(ar, skb, cmd_id); +} + +static inline int +ath10k_wmi_vdev_spectral_enable(struct ath10k *ar, u32 vdev_id, u32 trigger, + u32 enable) +{ + struct sk_buff *skb; + u32 cmd_id; + + skb = ar->wmi.ops->gen_vdev_spectral_enable(ar, vdev_id, trigger, + enable); + if (IS_ERR(skb)) + return PTR_ERR(skb); + + cmd_id = ar->wmi.cmd->vdev_spectral_scan_enable_cmdid; + return ath10k_wmi_cmd_send(ar, skb, cmd_id); +} + +static inline int +ath10k_wmi_vdev_sta_uapsd(struct ath10k *ar, u32 vdev_id, + const u8 peer_addr[ETH_ALEN], + const struct wmi_sta_uapsd_auto_trig_arg *args, + u32 num_ac) +{ + struct sk_buff *skb; + u32 cmd_id; + + if (!ar->wmi.ops->gen_vdev_sta_uapsd) + return -EOPNOTSUPP; + + skb = ar->wmi.ops->gen_vdev_sta_uapsd(ar, vdev_id, peer_addr, args, + num_ac); + if (IS_ERR(skb)) + return PTR_ERR(skb); + + cmd_id = ar->wmi.cmd->sta_uapsd_auto_trig_cmdid; + return ath10k_wmi_cmd_send(ar, skb, cmd_id); +} + +static inline int +ath10k_wmi_vdev_wmm_conf(struct ath10k *ar, u32 vdev_id, + const struct wmi_wmm_params_all_arg *arg) +{ + struct sk_buff *skb; + u32 cmd_id; + + skb = ar->wmi.ops->gen_vdev_wmm_conf(ar, vdev_id, arg); + if (IS_ERR(skb)) + return PTR_ERR(skb); + + cmd_id = ar->wmi.cmd->vdev_set_wmm_params_cmdid; + return ath10k_wmi_cmd_send(ar, skb, cmd_id); +} + +static inline int +ath10k_wmi_peer_create(struct ath10k *ar, u32 vdev_id, + const u8 peer_addr[ETH_ALEN]) +{ + struct sk_buff *skb; + + if (!ar->wmi.ops->gen_peer_create) + return -EOPNOTSUPP; + + skb = ar->wmi.ops->gen_peer_create(ar, vdev_id, peer_addr); + if (IS_ERR(skb)) + return PTR_ERR(skb); + + return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->peer_create_cmdid); +} + +static inline int +ath10k_wmi_peer_delete(struct ath10k *ar, u32 vdev_id, + const u8 peer_addr[ETH_ALEN]) +{ + struct sk_buff *skb; + + if (!ar->wmi.ops->gen_peer_delete) + return -EOPNOTSUPP; + + skb = ar->wmi.ops->gen_peer_delete(ar, vdev_id, peer_addr); + if (IS_ERR(skb)) + return PTR_ERR(skb); + + return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->peer_delete_cmdid); +} + +static inline int +ath10k_wmi_peer_flush(struct ath10k *ar, u32 vdev_id, + const u8 peer_addr[ETH_ALEN], u32 tid_bitmap) +{ + struct sk_buff *skb; + + if (!ar->wmi.ops->gen_peer_flush) + return -EOPNOTSUPP; + + skb = ar->wmi.ops->gen_peer_flush(ar, vdev_id, peer_addr, tid_bitmap); + if (IS_ERR(skb)) + return PTR_ERR(skb); + + return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->peer_flush_tids_cmdid); +} + +static inline int +ath10k_wmi_peer_set_param(struct ath10k *ar, u32 vdev_id, const u8 *peer_addr, + enum wmi_peer_param param_id, u32 param_value) +{ + struct sk_buff *skb; + + if (!ar->wmi.ops->gen_peer_set_param) + return -EOPNOTSUPP; + + skb = ar->wmi.ops->gen_peer_set_param(ar, vdev_id, peer_addr, param_id, + param_value); + if (IS_ERR(skb)) + return PTR_ERR(skb); + + return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->peer_set_param_cmdid); +} + +static inline int +ath10k_wmi_set_psmode(struct ath10k *ar, u32 vdev_id, + enum wmi_sta_ps_mode psmode) +{ + struct sk_buff *skb; + + if (!ar->wmi.ops->gen_set_psmode) + return -EOPNOTSUPP; + + skb = ar->wmi.ops->gen_set_psmode(ar, vdev_id, psmode); + if (IS_ERR(skb)) + return PTR_ERR(skb); + + return ath10k_wmi_cmd_send(ar, skb, + ar->wmi.cmd->sta_powersave_mode_cmdid); +} + +static inline int +ath10k_wmi_set_sta_ps_param(struct ath10k *ar, u32 vdev_id, + enum wmi_sta_powersave_param param_id, u32 value) +{ + struct sk_buff *skb; + + if (!ar->wmi.ops->gen_set_sta_ps) + return -EOPNOTSUPP; + + skb = ar->wmi.ops->gen_set_sta_ps(ar, vdev_id, param_id, value); + if (IS_ERR(skb)) + return PTR_ERR(skb); + + return ath10k_wmi_cmd_send(ar, skb, + ar->wmi.cmd->sta_powersave_param_cmdid); +} + +static inline int +ath10k_wmi_set_ap_ps_param(struct ath10k *ar, u32 vdev_id, const u8 *mac, + enum wmi_ap_ps_peer_param param_id, u32 value) +{ + struct sk_buff *skb; + + if (!ar->wmi.ops->gen_set_ap_ps) + return -EOPNOTSUPP; + + skb = ar->wmi.ops->gen_set_ap_ps(ar, vdev_id, mac, param_id, value); + if (IS_ERR(skb)) + return PTR_ERR(skb); + + return ath10k_wmi_cmd_send(ar, skb, + ar->wmi.cmd->ap_ps_peer_param_cmdid); +} + +static inline int +ath10k_wmi_scan_chan_list(struct ath10k *ar, + const struct wmi_scan_chan_list_arg *arg) +{ + struct sk_buff *skb; + + if (!ar->wmi.ops->gen_scan_chan_list) + return -EOPNOTSUPP; + + skb = ar->wmi.ops->gen_scan_chan_list(ar, arg); + if (IS_ERR(skb)) + return PTR_ERR(skb); + + return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->scan_chan_list_cmdid); +} + +static inline int +ath10k_wmi_peer_assoc(struct ath10k *ar, + const struct wmi_peer_assoc_complete_arg *arg) +{ + struct sk_buff *skb; + + if (!ar->wmi.ops->gen_peer_assoc) + return -EOPNOTSUPP; + + skb = ar->wmi.ops->gen_peer_assoc(ar, arg); + if (IS_ERR(skb)) + return PTR_ERR(skb); + + return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->peer_assoc_cmdid); +} + +static inline int +ath10k_wmi_beacon_send_ref_nowait(struct ath10k *ar, u32 vdev_id, + const void *bcn, size_t bcn_len, + u32 bcn_paddr, bool dtim_zero, + bool deliver_cab) +{ + struct sk_buff *skb; + int ret; + + if (!ar->wmi.ops->gen_beacon_dma) + return -EOPNOTSUPP; + + skb = ar->wmi.ops->gen_beacon_dma(ar, vdev_id, bcn, bcn_len, bcn_paddr, + dtim_zero, deliver_cab); + if (IS_ERR(skb)) + return PTR_ERR(skb); + + ret = ath10k_wmi_cmd_send_nowait(ar, skb, + ar->wmi.cmd->pdev_send_bcn_cmdid); + if (ret) { + dev_kfree_skb(skb); + return ret; + } + + return 0; +} + +static inline int +ath10k_wmi_pdev_set_wmm_params(struct ath10k *ar, + const struct wmi_wmm_params_all_arg *arg) +{ + struct sk_buff *skb; + + if (!ar->wmi.ops->gen_pdev_set_wmm) + return -EOPNOTSUPP; + + skb = ar->wmi.ops->gen_pdev_set_wmm(ar, arg); + if (IS_ERR(skb)) + return PTR_ERR(skb); + + return ath10k_wmi_cmd_send(ar, skb, + ar->wmi.cmd->pdev_set_wmm_params_cmdid); +} + +static inline int +ath10k_wmi_request_stats(struct ath10k *ar, enum wmi_stats_id stats_id) +{ + struct sk_buff *skb; + + if (!ar->wmi.ops->gen_request_stats) + return -EOPNOTSUPP; + + skb = ar->wmi.ops->gen_request_stats(ar, stats_id); + if (IS_ERR(skb)) + return PTR_ERR(skb); + + return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->request_stats_cmdid); +} + +static inline int +ath10k_wmi_force_fw_hang(struct ath10k *ar, + enum wmi_force_fw_hang_type type, u32 delay_ms) +{ + struct sk_buff *skb; + + if (!ar->wmi.ops->gen_force_fw_hang) + return -EOPNOTSUPP; + + skb = ar->wmi.ops->gen_force_fw_hang(ar, type, delay_ms); + if (IS_ERR(skb)) + return PTR_ERR(skb); + + return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->force_fw_hang_cmdid); +} + +static inline int +ath10k_wmi_dbglog_cfg(struct ath10k *ar, u32 module_enable, u32 log_level) +{ + struct sk_buff *skb; + + if (!ar->wmi.ops->gen_dbglog_cfg) + return -EOPNOTSUPP; + + skb = ar->wmi.ops->gen_dbglog_cfg(ar, module_enable, log_level); + if (IS_ERR(skb)) + return PTR_ERR(skb); + + return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->dbglog_cfg_cmdid); +} + +static inline int +ath10k_wmi_pdev_pktlog_enable(struct ath10k *ar, u32 filter) +{ + struct sk_buff *skb; + + if (!ar->wmi.ops->gen_pktlog_enable) + return -EOPNOTSUPP; + + skb = ar->wmi.ops->gen_pktlog_enable(ar, filter); + if (IS_ERR(skb)) + return PTR_ERR(skb); + + return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->pdev_pktlog_enable_cmdid); +} + +static inline int +ath10k_wmi_pdev_pktlog_disable(struct ath10k *ar) +{ + struct sk_buff *skb; + + if (!ar->wmi.ops->gen_pktlog_disable) + return -EOPNOTSUPP; + + skb = ar->wmi.ops->gen_pktlog_disable(ar); + if (IS_ERR(skb)) + return PTR_ERR(skb); + + return ath10k_wmi_cmd_send(ar, skb, + ar->wmi.cmd->pdev_pktlog_disable_cmdid); +} + +static inline int +ath10k_wmi_pdev_set_quiet_mode(struct ath10k *ar, u32 period, u32 duration, + u32 next_offset, u32 enabled) +{ + struct sk_buff *skb; + + if (!ar->wmi.ops->gen_pdev_set_quiet_mode) + return -EOPNOTSUPP; + + skb = ar->wmi.ops->gen_pdev_set_quiet_mode(ar, period, duration, + next_offset, enabled); + if (IS_ERR(skb)) + return PTR_ERR(skb); + + return ath10k_wmi_cmd_send(ar, skb, + ar->wmi.cmd->pdev_set_quiet_mode_cmdid); +} + +static inline int +ath10k_wmi_pdev_get_temperature(struct ath10k *ar) +{ + struct sk_buff *skb; + + if (!ar->wmi.ops->gen_pdev_get_temperature) + return -EOPNOTSUPP; + + skb = ar->wmi.ops->gen_pdev_get_temperature(ar); + if (IS_ERR(skb)) + return PTR_ERR(skb); + + return ath10k_wmi_cmd_send(ar, skb, + ar->wmi.cmd->pdev_get_temperature_cmdid); +} + +static inline int +ath10k_wmi_addba_clear_resp(struct ath10k *ar, u32 vdev_id, const u8 *mac) +{ + struct sk_buff *skb; + + if (!ar->wmi.ops->gen_addba_clear_resp) + return -EOPNOTSUPP; + + skb = ar->wmi.ops->gen_addba_clear_resp(ar, vdev_id, mac); + if (IS_ERR(skb)) + return PTR_ERR(skb); + + return ath10k_wmi_cmd_send(ar, skb, + ar->wmi.cmd->addba_clear_resp_cmdid); +} + +static inline int +ath10k_wmi_addba_send(struct ath10k *ar, u32 vdev_id, const u8 *mac, + u32 tid, u32 buf_size) +{ + struct sk_buff *skb; + + if (!ar->wmi.ops->gen_addba_send) + return -EOPNOTSUPP; + + skb = ar->wmi.ops->gen_addba_send(ar, vdev_id, mac, tid, buf_size); + if (IS_ERR(skb)) + return PTR_ERR(skb); + + return ath10k_wmi_cmd_send(ar, skb, + ar->wmi.cmd->addba_send_cmdid); +} + +static inline int +ath10k_wmi_addba_set_resp(struct ath10k *ar, u32 vdev_id, const u8 *mac, + u32 tid, u32 status) +{ + struct sk_buff *skb; + + if (!ar->wmi.ops->gen_addba_set_resp) + return -EOPNOTSUPP; + + skb = ar->wmi.ops->gen_addba_set_resp(ar, vdev_id, mac, tid, status); + if (IS_ERR(skb)) + return PTR_ERR(skb); + + return ath10k_wmi_cmd_send(ar, skb, + ar->wmi.cmd->addba_set_resp_cmdid); +} + +static inline int +ath10k_wmi_delba_send(struct ath10k *ar, u32 vdev_id, const u8 *mac, + u32 tid, u32 initiator, u32 reason) +{ + struct sk_buff *skb; + + if (!ar->wmi.ops->gen_delba_send) + return -EOPNOTSUPP; + + skb = ar->wmi.ops->gen_delba_send(ar, vdev_id, mac, tid, initiator, + reason); + if (IS_ERR(skb)) + return PTR_ERR(skb); + + return ath10k_wmi_cmd_send(ar, skb, + ar->wmi.cmd->delba_send_cmdid); +} + +static inline int +ath10k_wmi_bcn_tmpl(struct ath10k *ar, u32 vdev_id, u32 tim_ie_offset, + struct sk_buff *bcn, u32 prb_caps, u32 prb_erp, + void *prb_ies, size_t prb_ies_len) +{ + struct sk_buff *skb; + + if (!ar->wmi.ops->gen_bcn_tmpl) + return -EOPNOTSUPP; + + skb = ar->wmi.ops->gen_bcn_tmpl(ar, vdev_id, tim_ie_offset, bcn, + prb_caps, prb_erp, prb_ies, + prb_ies_len); + if (IS_ERR(skb)) + return PTR_ERR(skb); + + return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->bcn_tmpl_cmdid); +} + +static inline int +ath10k_wmi_prb_tmpl(struct ath10k *ar, u32 vdev_id, struct sk_buff *prb) +{ + struct sk_buff *skb; + + if (!ar->wmi.ops->gen_prb_tmpl) + return -EOPNOTSUPP; + + skb = ar->wmi.ops->gen_prb_tmpl(ar, vdev_id, prb); + if (IS_ERR(skb)) + return PTR_ERR(skb); + + return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->prb_tmpl_cmdid); +} + +static inline int +ath10k_wmi_p2p_go_bcn_ie(struct ath10k *ar, u32 vdev_id, const u8 *p2p_ie) +{ + struct sk_buff *skb; + + if (!ar->wmi.ops->gen_p2p_go_bcn_ie) + return -EOPNOTSUPP; + + skb = ar->wmi.ops->gen_p2p_go_bcn_ie(ar, vdev_id, p2p_ie); + if (IS_ERR(skb)) + return PTR_ERR(skb); + + return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->p2p_go_set_beacon_ie); +} + +static inline int +ath10k_wmi_sta_keepalive(struct ath10k *ar, + const struct wmi_sta_keepalive_arg *arg) +{ + struct sk_buff *skb; + u32 cmd_id; + + if (!ar->wmi.ops->gen_sta_keepalive) + return -EOPNOTSUPP; + + skb = ar->wmi.ops->gen_sta_keepalive(ar, arg); + if (IS_ERR(skb)) + return PTR_ERR(skb); + + cmd_id = ar->wmi.cmd->sta_keepalive_cmd; + return ath10k_wmi_cmd_send(ar, skb, cmd_id); +} + +#endif |