diff options
author | Linus Torvalds <torvalds@linux-foundation.org> | 2014-04-18 17:53:46 -0700 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2014-04-18 17:53:46 -0700 |
commit | ebfc45ee7004088d610cd399d2dee6d95f87199b (patch) | |
tree | 7e3edfaeb7660d20eb187e9d2b1d6c9a9b3ceece /drivers | |
parent | 6e66d5dab5d530a368314eb631201a02aabb075d (diff) | |
parent | b14878ccb7fac0242db82720b784ab62c467c0dc (diff) | |
download | linux-ebfc45ee7004088d610cd399d2dee6d95f87199b.tar.bz2 |
Merge git://git.kernel.org/pub/scm/linux/kernel/git/davem/net
Pull more networking fixes from David Miller:
1) Fix mlx4_en_netpoll implementation, it needs to schedule a NAPI
context, not synchronize it. From Chris Mason.
2) Ipv4 flow input interface should never be zero, it should be
LOOPBACK_IFINDEX instead. From Cong Wang and Julian Anastasov.
3) Properly configure MAC to PHY connection in mvneta devices, from
Thomas Petazzoni.
4) sys_recv should use SYSCALL_DEFINE. From Jan Glauber.
5) Tunnel driver ioctls do not use the correct namespace, fix from
Nicolas Dichtel.
6) Fix memory leak on seccomp filter attach, from Kees Cook.
7) Fix lockdep warning for nested vlans, from Ding Tianhong.
8) Crashes can happen in SCTP due to how the auth_enable value is
managed, fix from Vlad Yasevich.
9) Wireless fixes from John W Linville and co.
* git://git.kernel.org/pub/scm/linux/kernel/git/davem/net: (45 commits)
net: sctp: cache auth_enable per endpoint
tg3: update rx_jumbo_pending ring param only when jumbo frames are enabled
vlan: Fix lockdep warning when vlan dev handle notification
seccomp: fix memory leak on filter attach
isdn: icn: buffer overflow in icn_command()
ip6_tunnel: use the right netns in ioctl handler
sit: use the right netns in ioctl handler
ip_tunnel: use the right netns in ioctl handler
net: use SYSCALL_DEFINEx for sys_recv
net: mdio-gpio: Add support for separate MDI and MDO gpio pins
net: mdio-gpio: Add support for active low gpio pins
net: mdio-gpio: Use devm_ functions where possible
ipv4, route: pass 0 instead of LOOPBACK_IFINDEX to fib_validate_source()
ipv4, fib: pass LOOPBACK_IFINDEX instead of 0 to flowi4_iif
mlx4_en: don't use napi_synchronize inside mlx4_en_netpoll
net: mvneta: properly configure the MAC <-> PHY connection in all situations
net: phy: add minimal support for QSGMII PHY
sfc:On MCDI timeout, issue an FLR (and mark MCDI to fail-fast)
mwifiex: fix hung task on command timeout
mwifiex: process event before command response
...
Diffstat (limited to 'drivers')
31 files changed, 467 insertions, 225 deletions
diff --git a/drivers/isdn/icn/icn.c b/drivers/isdn/icn/icn.c index 53d487f0c79d..6a7447c304ac 100644 --- a/drivers/isdn/icn/icn.c +++ b/drivers/isdn/icn/icn.c @@ -1155,7 +1155,7 @@ icn_command(isdn_ctrl *c, icn_card *card) ulong a; ulong flags; int i; - char cbuf[60]; + char cbuf[80]; isdn_ctrl cmd; icn_cdef cdef; char __user *arg; @@ -1309,7 +1309,6 @@ icn_command(isdn_ctrl *c, icn_card *card) break; if ((c->arg & 255) < ICN_BCH) { char *p; - char dial[50]; char dcode[4]; a = c->arg; @@ -1321,10 +1320,10 @@ icn_command(isdn_ctrl *c, icn_card *card) } else /* Normal Dial */ strcpy(dcode, "CAL"); - strcpy(dial, p); - sprintf(cbuf, "%02d;D%s_R%s,%02d,%02d,%s\n", (int) (a + 1), - dcode, dial, c->parm.setup.si1, - c->parm.setup.si2, c->parm.setup.eazmsn); + snprintf(cbuf, sizeof(cbuf), + "%02d;D%s_R%s,%02d,%02d,%s\n", (int) (a + 1), + dcode, p, c->parm.setup.si1, + c->parm.setup.si2, c->parm.setup.eazmsn); i = icn_writecmd(cbuf, strlen(cbuf), 0, card); } break; diff --git a/drivers/net/ethernet/broadcom/tg3.c b/drivers/net/ethernet/broadcom/tg3.c index b9f7022f4e81..e5d95c5ce1ad 100644 --- a/drivers/net/ethernet/broadcom/tg3.c +++ b/drivers/net/ethernet/broadcom/tg3.c @@ -12286,7 +12286,9 @@ static int tg3_set_ringparam(struct net_device *dev, struct ethtool_ringparam *e if (tg3_flag(tp, MAX_RXPEND_64) && tp->rx_pending > 63) tp->rx_pending = 63; - tp->rx_jumbo_pending = ering->rx_jumbo_pending; + + if (tg3_flag(tp, JUMBO_RING_ENABLE)) + tp->rx_jumbo_pending = ering->rx_jumbo_pending; for (i = 0; i < tp->irq_max; i++) tp->napi[i].tx_pending = ering->tx_pending; diff --git a/drivers/net/ethernet/marvell/mvneta.c b/drivers/net/ethernet/marvell/mvneta.c index b248bcbdae63..14786c8bf99e 100644 --- a/drivers/net/ethernet/marvell/mvneta.c +++ b/drivers/net/ethernet/marvell/mvneta.c @@ -89,8 +89,9 @@ #define MVNETA_TX_IN_PRGRS BIT(1) #define MVNETA_TX_FIFO_EMPTY BIT(8) #define MVNETA_RX_MIN_FRAME_SIZE 0x247c -#define MVNETA_SGMII_SERDES_CFG 0x24A0 +#define MVNETA_SERDES_CFG 0x24A0 #define MVNETA_SGMII_SERDES_PROTO 0x0cc7 +#define MVNETA_QSGMII_SERDES_PROTO 0x0667 #define MVNETA_TYPE_PRIO 0x24bc #define MVNETA_FORCE_UNI BIT(21) #define MVNETA_TXQ_CMD_1 0x24e4 @@ -711,35 +712,6 @@ static void mvneta_rxq_bm_disable(struct mvneta_port *pp, mvreg_write(pp, MVNETA_RXQ_CONFIG_REG(rxq->id), val); } - - -/* Sets the RGMII Enable bit (RGMIIEn) in port MAC control register */ -static void mvneta_gmac_rgmii_set(struct mvneta_port *pp, int enable) -{ - u32 val; - - val = mvreg_read(pp, MVNETA_GMAC_CTRL_2); - - if (enable) - val |= MVNETA_GMAC2_PORT_RGMII; - else - val &= ~MVNETA_GMAC2_PORT_RGMII; - - mvreg_write(pp, MVNETA_GMAC_CTRL_2, val); -} - -/* Config SGMII port */ -static void mvneta_port_sgmii_config(struct mvneta_port *pp) -{ - u32 val; - - val = mvreg_read(pp, MVNETA_GMAC_CTRL_2); - val |= MVNETA_GMAC2_PCS_ENABLE; - mvreg_write(pp, MVNETA_GMAC_CTRL_2, val); - - mvreg_write(pp, MVNETA_SGMII_SERDES_CFG, MVNETA_SGMII_SERDES_PROTO); -} - /* Start the Ethernet port RX and TX activity */ static void mvneta_port_up(struct mvneta_port *pp) { @@ -2749,26 +2721,44 @@ static void mvneta_conf_mbus_windows(struct mvneta_port *pp, } /* Power up the port */ -static void mvneta_port_power_up(struct mvneta_port *pp, int phy_mode) +static int mvneta_port_power_up(struct mvneta_port *pp, int phy_mode) { - u32 val; + u32 ctrl; /* MAC Cause register should be cleared */ mvreg_write(pp, MVNETA_UNIT_INTR_CAUSE, 0); - if (phy_mode == PHY_INTERFACE_MODE_SGMII) - mvneta_port_sgmii_config(pp); + ctrl = mvreg_read(pp, MVNETA_GMAC_CTRL_2); - mvneta_gmac_rgmii_set(pp, 1); + /* Even though it might look weird, when we're configured in + * SGMII or QSGMII mode, the RGMII bit needs to be set. + */ + switch(phy_mode) { + case PHY_INTERFACE_MODE_QSGMII: + mvreg_write(pp, MVNETA_SERDES_CFG, MVNETA_QSGMII_SERDES_PROTO); + ctrl |= MVNETA_GMAC2_PCS_ENABLE | MVNETA_GMAC2_PORT_RGMII; + break; + case PHY_INTERFACE_MODE_SGMII: + mvreg_write(pp, MVNETA_SERDES_CFG, MVNETA_SGMII_SERDES_PROTO); + ctrl |= MVNETA_GMAC2_PCS_ENABLE | MVNETA_GMAC2_PORT_RGMII; + break; + case PHY_INTERFACE_MODE_RGMII: + case PHY_INTERFACE_MODE_RGMII_ID: + ctrl |= MVNETA_GMAC2_PORT_RGMII; + break; + default: + return -EINVAL; + } /* Cancel Port Reset */ - val = mvreg_read(pp, MVNETA_GMAC_CTRL_2); - val &= ~MVNETA_GMAC2_PORT_RESET; - mvreg_write(pp, MVNETA_GMAC_CTRL_2, val); + ctrl &= ~MVNETA_GMAC2_PORT_RESET; + mvreg_write(pp, MVNETA_GMAC_CTRL_2, ctrl); while ((mvreg_read(pp, MVNETA_GMAC_CTRL_2) & MVNETA_GMAC2_PORT_RESET) != 0) continue; + + return 0; } /* Device initialization routine */ @@ -2879,7 +2869,12 @@ static int mvneta_probe(struct platform_device *pdev) dev_err(&pdev->dev, "can't init eth hal\n"); goto err_free_stats; } - mvneta_port_power_up(pp, phy_mode); + + err = mvneta_port_power_up(pp, phy_mode); + if (err < 0) { + dev_err(&pdev->dev, "can't power up port\n"); + goto err_deinit; + } dram_target_info = mv_mbus_dram_info(); if (dram_target_info) diff --git a/drivers/net/ethernet/mellanox/mlx4/en_cq.c b/drivers/net/ethernet/mellanox/mlx4/en_cq.c index 70e95324a97d..c2cd8d31bcad 100644 --- a/drivers/net/ethernet/mellanox/mlx4/en_cq.c +++ b/drivers/net/ethernet/mellanox/mlx4/en_cq.c @@ -66,7 +66,6 @@ int mlx4_en_create_cq(struct mlx4_en_priv *priv, cq->ring = ring; cq->is_tx = mode; - spin_lock_init(&cq->lock); /* Allocate HW buffers on provided NUMA node. * dev->numa_node is used in mtt range allocation flow. diff --git a/drivers/net/ethernet/mellanox/mlx4/en_netdev.c b/drivers/net/ethernet/mellanox/mlx4/en_netdev.c index f085c2df5e69..7e4b1720c3d1 100644 --- a/drivers/net/ethernet/mellanox/mlx4/en_netdev.c +++ b/drivers/net/ethernet/mellanox/mlx4/en_netdev.c @@ -1304,15 +1304,11 @@ static void mlx4_en_netpoll(struct net_device *dev) { struct mlx4_en_priv *priv = netdev_priv(dev); struct mlx4_en_cq *cq; - unsigned long flags; int i; for (i = 0; i < priv->rx_ring_num; i++) { cq = priv->rx_cq[i]; - spin_lock_irqsave(&cq->lock, flags); - napi_synchronize(&cq->napi); - mlx4_en_process_rx_cq(dev, cq, 0); - spin_unlock_irqrestore(&cq->lock, flags); + napi_schedule(&cq->napi); } } #endif diff --git a/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h b/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h index 7a733c287744..04d9b6fe3e80 100644 --- a/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h +++ b/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h @@ -319,7 +319,6 @@ struct mlx4_en_cq { struct mlx4_cq mcq; struct mlx4_hwq_resources wqres; int ring; - spinlock_t lock; struct net_device *dev; struct napi_struct napi; int size; diff --git a/drivers/net/ethernet/sfc/ef10.c b/drivers/net/ethernet/sfc/ef10.c index 21c20ea0dad0..b5ed30a39144 100644 --- a/drivers/net/ethernet/sfc/ef10.c +++ b/drivers/net/ethernet/sfc/ef10.c @@ -738,8 +738,11 @@ static int efx_ef10_reset(struct efx_nic *efx, enum reset_type reset_type) /* If it was a port reset, trigger reallocation of MC resources. * Note that on an MC reset nothing needs to be done now because we'll * detect the MC reset later and handle it then. + * For an FLR, we never get an MC reset event, but the MC has reset all + * resources assigned to us, so we have to trigger reallocation now. */ - if (reset_type == RESET_TYPE_ALL && !rc) + if ((reset_type == RESET_TYPE_ALL || + reset_type == RESET_TYPE_MCDI_TIMEOUT) && !rc) efx_ef10_reset_mc_allocations(efx); return rc; } @@ -2141,6 +2144,11 @@ static int efx_ef10_fini_dmaq(struct efx_nic *efx) return 0; } +static void efx_ef10_prepare_flr(struct efx_nic *efx) +{ + atomic_set(&efx->active_queues, 0); +} + static bool efx_ef10_filter_equal(const struct efx_filter_spec *left, const struct efx_filter_spec *right) { @@ -3603,6 +3611,8 @@ const struct efx_nic_type efx_hunt_a0_nic_type = { .probe_port = efx_mcdi_port_probe, .remove_port = efx_mcdi_port_remove, .fini_dmaq = efx_ef10_fini_dmaq, + .prepare_flr = efx_ef10_prepare_flr, + .finish_flr = efx_port_dummy_op_void, .describe_stats = efx_ef10_describe_stats, .update_stats = efx_ef10_update_stats, .start_stats = efx_mcdi_mac_start_stats, diff --git a/drivers/net/ethernet/sfc/efx.c b/drivers/net/ethernet/sfc/efx.c index 57b971e5e6b2..63d595fd3cc5 100644 --- a/drivers/net/ethernet/sfc/efx.c +++ b/drivers/net/ethernet/sfc/efx.c @@ -76,6 +76,7 @@ const char *const efx_reset_type_names[] = { [RESET_TYPE_RECOVER_OR_ALL] = "RECOVER_OR_ALL", [RESET_TYPE_WORLD] = "WORLD", [RESET_TYPE_RECOVER_OR_DISABLE] = "RECOVER_OR_DISABLE", + [RESET_TYPE_MC_BIST] = "MC_BIST", [RESET_TYPE_DISABLE] = "DISABLE", [RESET_TYPE_TX_WATCHDOG] = "TX_WATCHDOG", [RESET_TYPE_INT_ERROR] = "INT_ERROR", @@ -83,7 +84,7 @@ const char *const efx_reset_type_names[] = { [RESET_TYPE_DMA_ERROR] = "DMA_ERROR", [RESET_TYPE_TX_SKIP] = "TX_SKIP", [RESET_TYPE_MC_FAILURE] = "MC_FAILURE", - [RESET_TYPE_MC_BIST] = "MC_BIST", + [RESET_TYPE_MCDI_TIMEOUT] = "MCDI_TIMEOUT (FLR)", }; /* Reset workqueue. If any NIC has a hardware failure then a reset will be @@ -1739,7 +1740,8 @@ static void efx_start_all(struct efx_nic *efx) /* Check that it is appropriate to restart the interface. All * of these flags are safe to read under just the rtnl lock */ - if (efx->port_enabled || !netif_running(efx->net_dev)) + if (efx->port_enabled || !netif_running(efx->net_dev) || + efx->reset_pending) return; efx_start_port(efx); @@ -2334,6 +2336,9 @@ void efx_reset_down(struct efx_nic *efx, enum reset_type method) { EFX_ASSERT_RESET_SERIALISED(efx); + if (method == RESET_TYPE_MCDI_TIMEOUT) + efx->type->prepare_flr(efx); + efx_stop_all(efx); efx_disable_interrupts(efx); @@ -2354,6 +2359,10 @@ int efx_reset_up(struct efx_nic *efx, enum reset_type method, bool ok) EFX_ASSERT_RESET_SERIALISED(efx); + if (method == RESET_TYPE_MCDI_TIMEOUT) + efx->type->finish_flr(efx); + + /* Ensure that SRAM is initialised even if we're disabling the device */ rc = efx->type->init(efx); if (rc) { netif_err(efx, drv, efx->net_dev, "failed to initialise NIC\n"); @@ -2417,7 +2426,10 @@ int efx_reset(struct efx_nic *efx, enum reset_type method) /* Clear flags for the scopes we covered. We assume the NIC and * driver are now quiescent so that there is no race here. */ - efx->reset_pending &= -(1 << (method + 1)); + if (method < RESET_TYPE_MAX_METHOD) + efx->reset_pending &= -(1 << (method + 1)); + else /* it doesn't fit into the well-ordered scope hierarchy */ + __clear_bit(method, &efx->reset_pending); /* Reinitialise bus-mastering, which may have been turned off before * the reset was scheduled. This is still appropriate, even in the @@ -2546,6 +2558,7 @@ void efx_schedule_reset(struct efx_nic *efx, enum reset_type type) case RESET_TYPE_DISABLE: case RESET_TYPE_RECOVER_OR_DISABLE: case RESET_TYPE_MC_BIST: + case RESET_TYPE_MCDI_TIMEOUT: method = type; netif_dbg(efx, drv, efx->net_dev, "scheduling %s reset\n", RESET_TYPE(method)); diff --git a/drivers/net/ethernet/sfc/enum.h b/drivers/net/ethernet/sfc/enum.h index 75ef7ef6450b..d1dbb5fb31bb 100644 --- a/drivers/net/ethernet/sfc/enum.h +++ b/drivers/net/ethernet/sfc/enum.h @@ -143,6 +143,7 @@ enum efx_loopback_mode { * @RESET_TYPE_WORLD: Reset as much as possible * @RESET_TYPE_RECOVER_OR_DISABLE: Try to recover. Apply RESET_TYPE_DISABLE if * unsuccessful. + * @RESET_TYPE_MC_BIST: MC entering BIST mode. * @RESET_TYPE_DISABLE: Reset datapath, MAC and PHY; leave NIC disabled * @RESET_TYPE_TX_WATCHDOG: reset due to TX watchdog * @RESET_TYPE_INT_ERROR: reset due to internal error @@ -150,14 +151,16 @@ enum efx_loopback_mode { * @RESET_TYPE_DMA_ERROR: DMA error * @RESET_TYPE_TX_SKIP: hardware completed empty tx descriptors * @RESET_TYPE_MC_FAILURE: MC reboot/assertion + * @RESET_TYPE_MCDI_TIMEOUT: MCDI timeout. */ enum reset_type { - RESET_TYPE_INVISIBLE = 0, - RESET_TYPE_RECOVER_OR_ALL = 1, - RESET_TYPE_ALL = 2, - RESET_TYPE_WORLD = 3, - RESET_TYPE_RECOVER_OR_DISABLE = 4, - RESET_TYPE_DISABLE = 5, + RESET_TYPE_INVISIBLE, + RESET_TYPE_RECOVER_OR_ALL, + RESET_TYPE_ALL, + RESET_TYPE_WORLD, + RESET_TYPE_RECOVER_OR_DISABLE, + RESET_TYPE_MC_BIST, + RESET_TYPE_DISABLE, RESET_TYPE_MAX_METHOD, RESET_TYPE_TX_WATCHDOG, RESET_TYPE_INT_ERROR, @@ -165,7 +168,13 @@ enum reset_type { RESET_TYPE_DMA_ERROR, RESET_TYPE_TX_SKIP, RESET_TYPE_MC_FAILURE, - RESET_TYPE_MC_BIST, + /* RESET_TYPE_MCDI_TIMEOUT is actually a method, not just a reason, but + * it doesn't fit the scope hierarchy (not well-ordered by inclusion). + * We encode this by having its enum value be greater than + * RESET_TYPE_MAX_METHOD. This also prevents issuing it with + * efx_ioctl_reset. + */ + RESET_TYPE_MCDI_TIMEOUT, RESET_TYPE_MAX, }; diff --git a/drivers/net/ethernet/sfc/falcon.c b/drivers/net/ethernet/sfc/falcon.c index 8ec20b713cc6..fae25a418647 100644 --- a/drivers/net/ethernet/sfc/falcon.c +++ b/drivers/net/ethernet/sfc/falcon.c @@ -2696,6 +2696,8 @@ const struct efx_nic_type falcon_a1_nic_type = { .fini_dmaq = efx_farch_fini_dmaq, .prepare_flush = falcon_prepare_flush, .finish_flush = efx_port_dummy_op_void, + .prepare_flr = efx_port_dummy_op_void, + .finish_flr = efx_farch_finish_flr, .describe_stats = falcon_describe_nic_stats, .update_stats = falcon_update_nic_stats, .start_stats = falcon_start_nic_stats, @@ -2790,6 +2792,8 @@ const struct efx_nic_type falcon_b0_nic_type = { .fini_dmaq = efx_farch_fini_dmaq, .prepare_flush = falcon_prepare_flush, .finish_flush = efx_port_dummy_op_void, + .prepare_flr = efx_port_dummy_op_void, + .finish_flr = efx_farch_finish_flr, .describe_stats = falcon_describe_nic_stats, .update_stats = falcon_update_nic_stats, .start_stats = falcon_start_nic_stats, diff --git a/drivers/net/ethernet/sfc/farch.c b/drivers/net/ethernet/sfc/farch.c index a08761360cdf..0537381cd2f6 100644 --- a/drivers/net/ethernet/sfc/farch.c +++ b/drivers/net/ethernet/sfc/farch.c @@ -741,6 +741,28 @@ int efx_farch_fini_dmaq(struct efx_nic *efx) return rc; } +/* Reset queue and flush accounting after FLR + * + * One possible cause of FLR recovery is that DMA may be failing (eg. if bus + * mastering was disabled), in which case we don't receive (RXQ) flush + * completion events. This means that efx->rxq_flush_outstanding remained at 4 + * after the FLR; also, efx->active_queues was non-zero (as no flush completion + * events were received, and we didn't go through efx_check_tx_flush_complete()) + * If we don't fix this up, on the next call to efx_realloc_channels() we won't + * flush any RX queues because efx->rxq_flush_outstanding is at the limit of 4 + * for batched flush requests; and the efx->active_queues gets messed up because + * we keep incrementing for the newly initialised queues, but it never went to + * zero previously. Then we get a timeout every time we try to restart the + * queues, as it doesn't go back to zero when we should be flushing the queues. + */ +void efx_farch_finish_flr(struct efx_nic *efx) +{ + atomic_set(&efx->rxq_flush_pending, 0); + atomic_set(&efx->rxq_flush_outstanding, 0); + atomic_set(&efx->active_queues, 0); +} + + /************************************************************************** * * Event queue processing diff --git a/drivers/net/ethernet/sfc/mcdi.c b/drivers/net/ethernet/sfc/mcdi.c index 7bd4b14bf3b3..5239cf9bdc56 100644 --- a/drivers/net/ethernet/sfc/mcdi.c +++ b/drivers/net/ethernet/sfc/mcdi.c @@ -52,12 +52,7 @@ static void efx_mcdi_timeout_async(unsigned long context); static int efx_mcdi_drv_attach(struct efx_nic *efx, bool driver_operating, bool *was_attached_out); static bool efx_mcdi_poll_once(struct efx_nic *efx); - -static inline struct efx_mcdi_iface *efx_mcdi(struct efx_nic *efx) -{ - EFX_BUG_ON_PARANOID(!efx->mcdi); - return &efx->mcdi->iface; -} +static void efx_mcdi_abandon(struct efx_nic *efx); int efx_mcdi_init(struct efx_nic *efx) { @@ -558,6 +553,8 @@ static int _efx_mcdi_rpc_finish(struct efx_nic *efx, unsigned cmd, size_t inlen, rc = 0; } + efx_mcdi_abandon(efx); + /* Close the race with efx_mcdi_ev_cpl() executing just too late * and completing a request we've just cancelled, by ensuring * that the seqno check therein fails. @@ -672,6 +669,9 @@ int efx_mcdi_rpc_start(struct efx_nic *efx, unsigned cmd, if (efx->mc_bist_for_other_fn) return -ENETDOWN; + if (mcdi->mode == MCDI_MODE_FAIL) + return -ENETDOWN; + efx_mcdi_acquire_sync(mcdi); efx_mcdi_send_request(efx, cmd, inbuf, inlen); return 0; @@ -812,7 +812,11 @@ void efx_mcdi_mode_poll(struct efx_nic *efx) return; mcdi = efx_mcdi(efx); - if (mcdi->mode == MCDI_MODE_POLL) + /* If already in polling mode, nothing to do. + * If in fail-fast state, don't switch to polled completion. + * FLR recovery will do that later. + */ + if (mcdi->mode == MCDI_MODE_POLL || mcdi->mode == MCDI_MODE_FAIL) return; /* We can switch from event completion to polled completion, because @@ -841,8 +845,8 @@ void efx_mcdi_flush_async(struct efx_nic *efx) mcdi = efx_mcdi(efx); - /* We must be in polling mode so no more requests can be queued */ - BUG_ON(mcdi->mode != MCDI_MODE_POLL); + /* We must be in poll or fail mode so no more requests can be queued */ + BUG_ON(mcdi->mode == MCDI_MODE_EVENTS); del_timer_sync(&mcdi->async_timer); @@ -875,8 +879,11 @@ void efx_mcdi_mode_event(struct efx_nic *efx) return; mcdi = efx_mcdi(efx); - - if (mcdi->mode == MCDI_MODE_EVENTS) + /* If already in event completion mode, nothing to do. + * If in fail-fast state, don't switch to event completion. FLR + * recovery will do that later. + */ + if (mcdi->mode == MCDI_MODE_EVENTS || mcdi->mode == MCDI_MODE_FAIL) return; /* We can't switch from polled to event completion in the middle of a @@ -966,6 +973,19 @@ static void efx_mcdi_ev_bist(struct efx_nic *efx) spin_unlock(&mcdi->iface_lock); } +/* MCDI timeouts seen, so make all MCDI calls fail-fast and issue an FLR to try + * to recover. + */ +static void efx_mcdi_abandon(struct efx_nic *efx) +{ + struct efx_mcdi_iface *mcdi = efx_mcdi(efx); + + if (xchg(&mcdi->mode, MCDI_MODE_FAIL) == MCDI_MODE_FAIL) + return; /* it had already been done */ + netif_dbg(efx, hw, efx->net_dev, "MCDI is timing out; trying to recover\n"); + efx_schedule_reset(efx, RESET_TYPE_MCDI_TIMEOUT); +} + /* Called from falcon_process_eventq for MCDI events */ void efx_mcdi_process_event(struct efx_channel *channel, efx_qword_t *event) @@ -1512,6 +1532,19 @@ int efx_mcdi_reset(struct efx_nic *efx, enum reset_type method) { int rc; + /* If MCDI is down, we can't handle_assertion */ + if (method == RESET_TYPE_MCDI_TIMEOUT) { + rc = pci_reset_function(efx->pci_dev); + if (rc) + return rc; + /* Re-enable polled MCDI completion */ + if (efx->mcdi) { + struct efx_mcdi_iface *mcdi = efx_mcdi(efx); + mcdi->mode = MCDI_MODE_POLL; + } + return 0; + } + /* Recover from a failed assertion pre-reset */ rc = efx_mcdi_handle_assertion(efx); if (rc) diff --git a/drivers/net/ethernet/sfc/mcdi.h b/drivers/net/ethernet/sfc/mcdi.h index 52931aebf3c3..56465f7465a2 100644 --- a/drivers/net/ethernet/sfc/mcdi.h +++ b/drivers/net/ethernet/sfc/mcdi.h @@ -28,9 +28,16 @@ enum efx_mcdi_state { MCDI_STATE_COMPLETED, }; +/** + * enum efx_mcdi_mode - MCDI transaction mode + * @MCDI_MODE_POLL: poll for MCDI completion, until timeout + * @MCDI_MODE_EVENTS: wait for an mcdi_event. On timeout, poll once + * @MCDI_MODE_FAIL: we think MCDI is dead, so fail-fast all calls + */ enum efx_mcdi_mode { MCDI_MODE_POLL, MCDI_MODE_EVENTS, + MCDI_MODE_FAIL, }; /** @@ -104,6 +111,12 @@ struct efx_mcdi_data { u32 fn_flags; }; +static inline struct efx_mcdi_iface *efx_mcdi(struct efx_nic *efx) +{ + EFX_BUG_ON_PARANOID(!efx->mcdi); + return &efx->mcdi->iface; +} + #ifdef CONFIG_SFC_MCDI_MON static inline struct efx_mcdi_mon *efx_mcdi_mon(struct efx_nic *efx) { diff --git a/drivers/net/ethernet/sfc/net_driver.h b/drivers/net/ethernet/sfc/net_driver.h index 8a400a0595eb..5bdae8ed7c57 100644 --- a/drivers/net/ethernet/sfc/net_driver.h +++ b/drivers/net/ethernet/sfc/net_driver.h @@ -972,6 +972,8 @@ struct efx_mtd_partition { * (for Falcon architecture) * @finish_flush: Clean up after flushing the DMA queues (for Falcon * architecture) + * @prepare_flr: Prepare for an FLR + * @finish_flr: Clean up after an FLR * @describe_stats: Describe statistics for ethtool * @update_stats: Update statistics not provided by event handling. * Either argument may be %NULL. @@ -1100,6 +1102,8 @@ struct efx_nic_type { int (*fini_dmaq)(struct efx_nic *efx); void (*prepare_flush)(struct efx_nic *efx); void (*finish_flush)(struct efx_nic *efx); + void (*prepare_flr)(struct efx_nic *efx); + void (*finish_flr)(struct efx_nic *efx); size_t (*describe_stats)(struct efx_nic *efx, u8 *names); size_t (*update_stats)(struct efx_nic *efx, u64 *full_stats, struct rtnl_link_stats64 *core_stats); diff --git a/drivers/net/ethernet/sfc/nic.h b/drivers/net/ethernet/sfc/nic.h index a001fae1a8d7..d3ad8ed8d901 100644 --- a/drivers/net/ethernet/sfc/nic.h +++ b/drivers/net/ethernet/sfc/nic.h @@ -757,6 +757,7 @@ static inline int efx_nic_irq_test_irq_cpu(struct efx_nic *efx) int efx_nic_flush_queues(struct efx_nic *efx); void siena_prepare_flush(struct efx_nic *efx); int efx_farch_fini_dmaq(struct efx_nic *efx); +void efx_farch_finish_flr(struct efx_nic *efx); void siena_finish_flush(struct efx_nic *efx); void falcon_start_nic_stats(struct efx_nic *efx); void falcon_stop_nic_stats(struct efx_nic *efx); diff --git a/drivers/net/ethernet/sfc/siena.c b/drivers/net/ethernet/sfc/siena.c index 23f3a6f7737a..50ffefed492c 100644 --- a/drivers/net/ethernet/sfc/siena.c +++ b/drivers/net/ethernet/sfc/siena.c @@ -921,6 +921,8 @@ const struct efx_nic_type siena_a0_nic_type = { .fini_dmaq = efx_farch_fini_dmaq, .prepare_flush = siena_prepare_flush, .finish_flush = siena_finish_flush, + .prepare_flr = efx_port_dummy_op_void, + .finish_flr = efx_farch_finish_flr, .describe_stats = siena_describe_nic_stats, .update_stats = siena_update_nic_stats, .start_stats = efx_mcdi_mac_start_stats, diff --git a/drivers/net/phy/mdio-gpio.c b/drivers/net/phy/mdio-gpio.c index e701433bf52f..9c4defdec67b 100644 --- a/drivers/net/phy/mdio-gpio.c +++ b/drivers/net/phy/mdio-gpio.c @@ -32,29 +32,39 @@ struct mdio_gpio_info { struct mdiobb_ctrl ctrl; - int mdc, mdio; + int mdc, mdio, mdo; + int mdc_active_low, mdio_active_low, mdo_active_low; }; static void *mdio_gpio_of_get_data(struct platform_device *pdev) { struct device_node *np = pdev->dev.of_node; struct mdio_gpio_platform_data *pdata; + enum of_gpio_flags flags; int ret; pdata = devm_kzalloc(&pdev->dev, sizeof(*pdata), GFP_KERNEL); if (!pdata) return NULL; - ret = of_get_gpio(np, 0); + ret = of_get_gpio_flags(np, 0, &flags); if (ret < 0) return NULL; pdata->mdc = ret; + pdata->mdc_active_low = flags & OF_GPIO_ACTIVE_LOW; - ret = of_get_gpio(np, 1); + ret = of_get_gpio_flags(np, 1, &flags); if (ret < 0) return NULL; pdata->mdio = ret; + pdata->mdio_active_low = flags & OF_GPIO_ACTIVE_LOW; + + ret = of_get_gpio_flags(np, 2, &flags); + if (ret > 0) { + pdata->mdo = ret; + pdata->mdo_active_low = flags & OF_GPIO_ACTIVE_LOW; + } return pdata; } @@ -64,8 +74,19 @@ static void mdio_dir(struct mdiobb_ctrl *ctrl, int dir) struct mdio_gpio_info *bitbang = container_of(ctrl, struct mdio_gpio_info, ctrl); + if (bitbang->mdo) { + /* Separate output pin. Always set its value to high + * when changing direction. If direction is input, + * assume the pin serves as pull-up. If direction is + * output, the default value is high. + */ + gpio_set_value(bitbang->mdo, 1 ^ bitbang->mdo_active_low); + return; + } + if (dir) - gpio_direction_output(bitbang->mdio, 1); + gpio_direction_output(bitbang->mdio, + 1 ^ bitbang->mdio_active_low); else gpio_direction_input(bitbang->mdio); } @@ -75,7 +96,7 @@ static int mdio_get(struct mdiobb_ctrl *ctrl) struct mdio_gpio_info *bitbang = container_of(ctrl, struct mdio_gpio_info, ctrl); - return gpio_get_value(bitbang->mdio); + return gpio_get_value(bitbang->mdio) ^ bitbang->mdio_active_low; } static void mdio_set(struct mdiobb_ctrl *ctrl, int what) @@ -83,7 +104,10 @@ static void mdio_set(struct mdiobb_ctrl *ctrl, int what) struct mdio_gpio_info *bitbang = container_of(ctrl, struct mdio_gpio_info, ctrl); - gpio_set_value(bitbang->mdio, what); + if (bitbang->mdo) + gpio_set_value(bitbang->mdo, what ^ bitbang->mdo_active_low); + else + gpio_set_value(bitbang->mdio, what ^ bitbang->mdio_active_low); } static void mdc_set(struct mdiobb_ctrl *ctrl, int what) @@ -91,7 +115,7 @@ static void mdc_set(struct mdiobb_ctrl *ctrl, int what) struct mdio_gpio_info *bitbang = container_of(ctrl, struct mdio_gpio_info, ctrl); - gpio_set_value(bitbang->mdc, what); + gpio_set_value(bitbang->mdc, what ^ bitbang->mdc_active_low); } static struct mdiobb_ops mdio_gpio_ops = { @@ -110,18 +134,22 @@ static struct mii_bus *mdio_gpio_bus_init(struct device *dev, struct mdio_gpio_info *bitbang; int i; - bitbang = kzalloc(sizeof(*bitbang), GFP_KERNEL); + bitbang = devm_kzalloc(dev, sizeof(*bitbang), GFP_KERNEL); if (!bitbang) goto out; bitbang->ctrl.ops = &mdio_gpio_ops; bitbang->ctrl.reset = pdata->reset; bitbang->mdc = pdata->mdc; + bitbang->mdc_active_low = pdata->mdc_active_low; bitbang->mdio = pdata->mdio; + bitbang->mdio_active_low = pdata->mdio_active_low; + bitbang->mdo = pdata->mdo; + bitbang->mdo_active_low = pdata->mdo_active_low; new_bus = alloc_mdio_bitbang(&bitbang->ctrl); if (!new_bus) - goto out_free_bitbang; + goto out; new_bus->name = "GPIO Bitbanged MDIO", @@ -138,11 +166,18 @@ static struct mii_bus *mdio_gpio_bus_init(struct device *dev, snprintf(new_bus->id, MII_BUS_ID_SIZE, "gpio-%x", bus_id); - if (gpio_request(bitbang->mdc, "mdc")) + if (devm_gpio_request(dev, bitbang->mdc, "mdc")) + goto out_free_bus; + + if (devm_gpio_request(dev, bitbang->mdio, "mdio")) goto out_free_bus; - if (gpio_request(bitbang->mdio, "mdio")) - goto out_free_mdc; + if (bitbang->mdo) { + if (devm_gpio_request(dev, bitbang->mdo, "mdo")) + goto out_free_bus; + gpio_direction_output(bitbang->mdo, 1); + gpio_direction_input(bitbang->mdio); + } gpio_direction_output(bitbang->mdc, 0); @@ -150,12 +185,8 @@ static struct mii_bus *mdio_gpio_bus_init(struct device *dev, return new_bus; -out_free_mdc: - gpio_free(bitbang->mdc); out_free_bus: free_mdio_bitbang(new_bus); -out_free_bitbang: - kfree(bitbang); out: return NULL; } @@ -163,13 +194,8 @@ out: static void mdio_gpio_bus_deinit(struct device *dev) { struct mii_bus *bus = dev_get_drvdata(dev); - struct mdio_gpio_info *bitbang = bus->priv; - dev_set_drvdata(dev, NULL); - gpio_free(bitbang->mdio); - gpio_free(bitbang->mdc); free_mdio_bitbang(bus); - kfree(bitbang); } static void mdio_gpio_bus_destroy(struct device *dev) diff --git a/drivers/net/wireless/cw1200/debug.c b/drivers/net/wireless/cw1200/debug.c index e323b4d54338..34f97c31eecf 100644 --- a/drivers/net/wireless/cw1200/debug.c +++ b/drivers/net/wireless/cw1200/debug.c @@ -41,6 +41,8 @@ static const char * const cw1200_debug_link_id[] = { "REQ", "SOFT", "HARD", + "RESET", + "RESET_REMAP", }; static const char *cw1200_debug_mode(int mode) diff --git a/drivers/net/wireless/iwlwifi/iwl-7000.c b/drivers/net/wireless/iwlwifi/iwl-7000.c index 003a546571d4..4c2d4ef28b22 100644 --- a/drivers/net/wireless/iwlwifi/iwl-7000.c +++ b/drivers/net/wireless/iwlwifi/iwl-7000.c @@ -67,8 +67,8 @@ #include "iwl-agn-hw.h" /* Highest firmware API version supported */ -#define IWL7260_UCODE_API_MAX 8 -#define IWL3160_UCODE_API_MAX 8 +#define IWL7260_UCODE_API_MAX 9 +#define IWL3160_UCODE_API_MAX 9 /* Oldest version we won't warn about */ #define IWL7260_UCODE_API_OK 8 @@ -244,3 +244,4 @@ const struct iwl_cfg iwl7265_n_cfg = { MODULE_FIRMWARE(IWL7260_MODULE_FIRMWARE(IWL7260_UCODE_API_OK)); MODULE_FIRMWARE(IWL3160_MODULE_FIRMWARE(IWL3160_UCODE_API_OK)); +MODULE_FIRMWARE(IWL7265_MODULE_FIRMWARE(IWL7260_UCODE_API_OK)); diff --git a/drivers/net/wireless/iwlwifi/mvm/coex.c b/drivers/net/wireless/iwlwifi/mvm/coex.c index 685f7e8e6943..fa858d548d13 100644 --- a/drivers/net/wireless/iwlwifi/mvm/coex.c +++ b/drivers/net/wireless/iwlwifi/mvm/coex.c @@ -190,7 +190,7 @@ static const __le32 iwl_combined_lookup[BT_COEX_MAX_LUT][BT_COEX_LUT_SIZE] = { cpu_to_le32(0xcc00aaaa), cpu_to_le32(0x0000aaaa), cpu_to_le32(0xc0004000), - cpu_to_le32(0x00000000), + cpu_to_le32(0x00004000), cpu_to_le32(0xf0005000), cpu_to_le32(0xf0005000), }, @@ -213,16 +213,16 @@ static const __le32 iwl_combined_lookup[BT_COEX_MAX_LUT][BT_COEX_LUT_SIZE] = { /* Tx Tx disabled */ cpu_to_le32(0xaaaaaaaa), cpu_to_le32(0xaaaaaaaa), - cpu_to_le32(0xaaaaaaaa), + cpu_to_le32(0xeeaaaaaa), cpu_to_le32(0xaaaaaaaa), cpu_to_le32(0xcc00ff28), cpu_to_le32(0x0000aaaa), cpu_to_le32(0xcc00aaaa), cpu_to_le32(0x0000aaaa), - cpu_to_le32(0xC0004000), - cpu_to_le32(0xC0004000), - cpu_to_le32(0xF0005000), - cpu_to_le32(0xF0005000), + cpu_to_le32(0xc0004000), + cpu_to_le32(0xc0004000), + cpu_to_le32(0xf0005000), + cpu_to_le32(0xf0005000), }, }; @@ -1262,6 +1262,7 @@ int iwl_mvm_rx_ant_coupling_notif(struct iwl_mvm *mvm, struct iwl_rx_packet *pkt = rxb_addr(rxb); u32 ant_isolation = le32_to_cpup((void *)pkt->data); u8 __maybe_unused lower_bound, upper_bound; + int ret; u8 lut; struct iwl_bt_coex_cmd *bt_cmd; @@ -1318,5 +1319,8 @@ int iwl_mvm_rx_ant_coupling_notif(struct iwl_mvm *mvm, memcpy(bt_cmd->bt4_corun_lut40, antenna_coupling_ranges[lut].lut20, sizeof(bt_cmd->bt4_corun_lut40)); - return 0; + ret = iwl_mvm_send_cmd(mvm, &cmd); + + kfree(bt_cmd); + return ret; } diff --git a/drivers/net/wireless/iwlwifi/mvm/mac80211.c b/drivers/net/wireless/iwlwifi/mvm/mac80211.c index 4dd9ff43b8b6..f0cebf12c7b8 100644 --- a/drivers/net/wireless/iwlwifi/mvm/mac80211.c +++ b/drivers/net/wireless/iwlwifi/mvm/mac80211.c @@ -1332,6 +1332,7 @@ static void iwl_mvm_bss_info_changed_station(struct iwl_mvm *mvm, */ iwl_mvm_remove_time_event(mvm, mvmvif, &mvmvif->time_event_data); + iwl_mvm_sf_update(mvm, vif, false); WARN_ON(iwl_mvm_enable_beacon_filter(mvm, vif, CMD_SYNC)); } else if (changes & (BSS_CHANGED_PS | BSS_CHANGED_P2P_PS | BSS_CHANGED_QOS)) { diff --git a/drivers/net/wireless/iwlwifi/mvm/rs.c b/drivers/net/wireless/iwlwifi/mvm/rs.c index 568abd61b14f..9f52c5b3f0ec 100644 --- a/drivers/net/wireless/iwlwifi/mvm/rs.c +++ b/drivers/net/wireless/iwlwifi/mvm/rs.c @@ -59,7 +59,7 @@ /* max allowed rate miss before sync LQ cmd */ #define IWL_MISSED_RATE_MAX 15 #define RS_STAY_IN_COLUMN_TIMEOUT (5*HZ) - +#define RS_IDLE_TIMEOUT (5*HZ) static u8 rs_ht_to_legacy[] = { [IWL_RATE_MCS_0_INDEX] = IWL_RATE_6M_INDEX, @@ -142,7 +142,7 @@ enum rs_column_mode { RS_MIMO2, }; -#define MAX_NEXT_COLUMNS 5 +#define MAX_NEXT_COLUMNS 7 #define MAX_COLUMN_CHECKS 3 typedef bool (*allow_column_func_t) (struct iwl_mvm *mvm, @@ -212,8 +212,10 @@ static const struct rs_tx_column rs_tx_columns[] = { RS_COLUMN_LEGACY_ANT_B, RS_COLUMN_SISO_ANT_A, RS_COLUMN_SISO_ANT_B, - RS_COLUMN_MIMO2, - RS_COLUMN_MIMO2_SGI, + RS_COLUMN_INVALID, + RS_COLUMN_INVALID, + RS_COLUMN_INVALID, + RS_COLUMN_INVALID, }, }, [RS_COLUMN_LEGACY_ANT_B] = { @@ -223,8 +225,10 @@ static const struct rs_tx_column rs_tx_columns[] = { RS_COLUMN_LEGACY_ANT_A, RS_COLUMN_SISO_ANT_A, RS_COLUMN_SISO_ANT_B, - RS_COLUMN_MIMO2, - RS_COLUMN_MIMO2_SGI, + RS_COLUMN_INVALID, + RS_COLUMN_INVALID, + RS_COLUMN_INVALID, + RS_COLUMN_INVALID, }, }, [RS_COLUMN_SISO_ANT_A] = { @@ -235,7 +239,9 @@ static const struct rs_tx_column rs_tx_columns[] = { RS_COLUMN_MIMO2, RS_COLUMN_SISO_ANT_A_SGI, RS_COLUMN_SISO_ANT_B_SGI, - RS_COLUMN_MIMO2_SGI, + RS_COLUMN_LEGACY_ANT_A, + RS_COLUMN_LEGACY_ANT_B, + RS_COLUMN_INVALID, }, .checks = { rs_siso_allow, @@ -249,7 +255,9 @@ static const struct rs_tx_column rs_tx_columns[] = { RS_COLUMN_MIMO2, RS_COLUMN_SISO_ANT_B_SGI, RS_COLUMN_SISO_ANT_A_SGI, - RS_COLUMN_MIMO2_SGI, + RS_COLUMN_LEGACY_ANT_A, + RS_COLUMN_LEGACY_ANT_B, + RS_COLUMN_INVALID, }, .checks = { rs_siso_allow, @@ -265,6 +273,8 @@ static const struct rs_tx_column rs_tx_columns[] = { RS_COLUMN_SISO_ANT_A, RS_COLUMN_SISO_ANT_B, RS_COLUMN_MIMO2, + RS_COLUMN_LEGACY_ANT_A, + RS_COLUMN_LEGACY_ANT_B, }, .checks = { rs_siso_allow, @@ -281,6 +291,8 @@ static const struct rs_tx_column rs_tx_columns[] = { RS_COLUMN_SISO_ANT_B, RS_COLUMN_SISO_ANT_A, RS_COLUMN_MIMO2, + RS_COLUMN_LEGACY_ANT_A, + RS_COLUMN_LEGACY_ANT_B, }, .checks = { rs_siso_allow, @@ -296,6 +308,8 @@ static const struct rs_tx_column rs_tx_columns[] = { RS_COLUMN_SISO_ANT_A_SGI, RS_COLUMN_SISO_ANT_B_SGI, RS_COLUMN_MIMO2_SGI, + RS_COLUMN_LEGACY_ANT_A, + RS_COLUMN_LEGACY_ANT_B, }, .checks = { rs_mimo_allow, @@ -311,6 +325,8 @@ static const struct rs_tx_column rs_tx_columns[] = { RS_COLUMN_SISO_ANT_A, RS_COLUMN_SISO_ANT_B, RS_COLUMN_MIMO2, + RS_COLUMN_LEGACY_ANT_A, + RS_COLUMN_LEGACY_ANT_B, }, .checks = { rs_mimo_allow, @@ -503,10 +519,12 @@ static void rs_rate_scale_clear_window(struct iwl_rate_scale_data *window) window->average_tpt = IWL_INVALID_VALUE; } -static void rs_rate_scale_clear_tbl_windows(struct iwl_scale_tbl_info *tbl) +static void rs_rate_scale_clear_tbl_windows(struct iwl_mvm *mvm, + struct iwl_scale_tbl_info *tbl) { int i; + IWL_DEBUG_RATE(mvm, "Clearing up window stats\n"); for (i = 0; i < IWL_RATE_COUNT; i++) rs_rate_scale_clear_window(&tbl->win[i]); } @@ -992,6 +1010,13 @@ static void rs_tx_status(void *mvm_r, struct ieee80211_supported_band *sband, return; } +#ifdef CPTCFG_MAC80211_DEBUGFS + /* Disable last tx check if we are debugging with fixed rate */ + if (lq_sta->dbg_fixed_rate) { + IWL_DEBUG_RATE(mvm, "Fixed rate. avoid rate scaling\n"); + return; + } +#endif if (!ieee80211_is_data(hdr->frame_control) || info->flags & IEEE80211_TX_CTL_NO_ACK) return; @@ -1034,6 +1059,18 @@ static void rs_tx_status(void *mvm_r, struct ieee80211_supported_band *sband, mac_index++; } + if (time_after(jiffies, + (unsigned long)(lq_sta->last_tx + RS_IDLE_TIMEOUT))) { + int tid; + IWL_DEBUG_RATE(mvm, "Tx idle for too long. reinit rs\n"); + for (tid = 0; tid < IWL_MAX_TID_COUNT; tid++) + ieee80211_stop_tx_ba_session(sta, tid); + + iwl_mvm_rs_rate_init(mvm, sta, sband->band, false); + return; + } + lq_sta->last_tx = jiffies; + /* Here we actually compare this rate to the latest LQ command */ if ((mac_index < 0) || (rate.sgi != !!(mac_flags & IEEE80211_TX_RC_SHORT_GI)) || @@ -1186,9 +1223,26 @@ static void rs_set_stay_in_table(struct iwl_mvm *mvm, u8 is_legacy, lq_sta->visited_columns = 0; } +static int rs_get_max_allowed_rate(struct iwl_lq_sta *lq_sta, + const struct rs_tx_column *column) +{ + switch (column->mode) { + case RS_LEGACY: + return lq_sta->max_legacy_rate_idx; + case RS_SISO: + return lq_sta->max_siso_rate_idx; + case RS_MIMO2: + return lq_sta->max_mimo2_rate_idx; + default: + WARN_ON_ONCE(1); + } + + return lq_sta->max_legacy_rate_idx; +} + static const u16 *rs_get_expected_tpt_table(struct iwl_lq_sta *lq_sta, - const struct rs_tx_column *column, - u32 bw) + const struct rs_tx_column *column, + u32 bw) { /* Used to choose among HT tables */ const u16 (*ht_tbl_pointer)[IWL_RATE_COUNT]; @@ -1438,7 +1492,7 @@ static void rs_stay_in_table(struct iwl_lq_sta *lq_sta, bool force_search) IWL_DEBUG_RATE(mvm, "LQ: stay in table clear win\n"); - rs_rate_scale_clear_tbl_windows(tbl); + rs_rate_scale_clear_tbl_windows(mvm, tbl); } } @@ -1446,8 +1500,7 @@ static void rs_stay_in_table(struct iwl_lq_sta *lq_sta, bool force_search) * bitmaps and stats in active table (this will become the new * "search" table). */ if (lq_sta->rs_state == RS_STATE_SEARCH_CYCLE_STARTED) { - IWL_DEBUG_RATE(mvm, "Clearing up window stats\n"); - rs_rate_scale_clear_tbl_windows(tbl); + rs_rate_scale_clear_tbl_windows(mvm, tbl); } } } @@ -1485,14 +1538,14 @@ static enum rs_column rs_get_next_column(struct iwl_mvm *mvm, struct ieee80211_sta *sta, struct iwl_scale_tbl_info *tbl) { - int i, j, n; + int i, j, max_rate; enum rs_column next_col_id; const struct rs_tx_column *curr_col = &rs_tx_columns[tbl->column]; const struct rs_tx_column *next_col; allow_column_func_t allow_func; u8 valid_ants = mvm->fw->valid_tx_ant; const u16 *expected_tpt_tbl; - s32 tpt, max_expected_tpt; + u16 tpt, max_expected_tpt; for (i = 0; i < MAX_NEXT_COLUMNS; i++) { next_col_id = curr_col->next_columns[i]; @@ -1535,11 +1588,11 @@ static enum rs_column rs_get_next_column(struct iwl_mvm *mvm, if (WARN_ON_ONCE(!expected_tpt_tbl)) continue; - max_expected_tpt = 0; - for (n = 0; n < IWL_RATE_COUNT; n++) - if (expected_tpt_tbl[n] > max_expected_tpt) - max_expected_tpt = expected_tpt_tbl[n]; + max_rate = rs_get_max_allowed_rate(lq_sta, next_col); + if (WARN_ON_ONCE(max_rate == IWL_RATE_INVALID)) + continue; + max_expected_tpt = expected_tpt_tbl[max_rate]; if (tpt >= max_expected_tpt) { IWL_DEBUG_RATE(mvm, "Skip column %d: can't beat current TPT. Max expected %d current %d\n", @@ -1547,14 +1600,15 @@ static enum rs_column rs_get_next_column(struct iwl_mvm *mvm, continue; } + IWL_DEBUG_RATE(mvm, + "Found potential column %d. Max expected %d current %d\n", + next_col_id, max_expected_tpt, tpt); break; } if (i == MAX_NEXT_COLUMNS) return RS_COLUMN_INVALID; - IWL_DEBUG_RATE(mvm, "Found potential column %d\n", next_col_id); - return next_col_id; } @@ -1640,85 +1694,76 @@ static enum rs_action rs_get_rate_action(struct iwl_mvm *mvm, { enum rs_action action = RS_ACTION_STAY; - /* Too many failures, decrease rate */ if ((sr <= RS_SR_FORCE_DECREASE) || (current_tpt == 0)) { IWL_DEBUG_RATE(mvm, - "decrease rate because of low SR\n"); - action = RS_ACTION_DOWNSCALE; - /* No throughput measured yet for adjacent rates; try increase. */ - } else if ((low_tpt == IWL_INVALID_VALUE) && - (high_tpt == IWL_INVALID_VALUE)) { - if (high != IWL_RATE_INVALID && sr >= IWL_RATE_INCREASE_TH) { - IWL_DEBUG_RATE(mvm, - "Good SR and no high rate measurement. " - "Increase rate\n"); - action = RS_ACTION_UPSCALE; - } else if (low != IWL_RATE_INVALID) { - IWL_DEBUG_RATE(mvm, - "Remain in current rate\n"); - action = RS_ACTION_STAY; - } + "Decrease rate because of low SR\n"); + return RS_ACTION_DOWNSCALE; } - /* Both adjacent throughputs are measured, but neither one has better - * throughput; we're using the best rate, don't change it! - */ - else if ((low_tpt != IWL_INVALID_VALUE) && - (high_tpt != IWL_INVALID_VALUE) && - (low_tpt < current_tpt) && - (high_tpt < current_tpt)) { + if ((low_tpt == IWL_INVALID_VALUE) && + (high_tpt == IWL_INVALID_VALUE) && + (high != IWL_RATE_INVALID)) { IWL_DEBUG_RATE(mvm, - "Both high and low are worse. " - "Maintain rate\n"); - action = RS_ACTION_STAY; + "No data about high/low rates. Increase rate\n"); + return RS_ACTION_UPSCALE; } - /* At least one adjacent rate's throughput is measured, - * and may have better performance. - */ - else { - /* Higher adjacent rate's throughput is measured */ - if (high_tpt != IWL_INVALID_VALUE) { - /* Higher rate has better throughput */ - if (high_tpt > current_tpt && - sr >= IWL_RATE_INCREASE_TH) { - IWL_DEBUG_RATE(mvm, - "Higher rate is better and good " - "SR. Increate rate\n"); - action = RS_ACTION_UPSCALE; - } else { - IWL_DEBUG_RATE(mvm, - "Higher rate isn't better OR " - "no good SR. Maintain rate\n"); - action = RS_ACTION_STAY; - } + if ((high_tpt == IWL_INVALID_VALUE) && + (high != IWL_RATE_INVALID) && + (low_tpt != IWL_INVALID_VALUE) && + (low_tpt < current_tpt)) { + IWL_DEBUG_RATE(mvm, + "No data about high rate and low rate is worse. Increase rate\n"); + return RS_ACTION_UPSCALE; + } - /* Lower adjacent rate's throughput is measured */ - } else if (low_tpt != IWL_INVALID_VALUE) { - /* Lower rate has better throughput */ - if (low_tpt > current_tpt) { - IWL_DEBUG_RATE(mvm, - "Lower rate is better. " - "Decrease rate\n"); - action = RS_ACTION_DOWNSCALE; - } else if (sr >= IWL_RATE_INCREASE_TH) { - IWL_DEBUG_RATE(mvm, - "Lower rate isn't better and " - "good SR. Increase rate\n"); - action = RS_ACTION_UPSCALE; - } - } + if ((high_tpt != IWL_INVALID_VALUE) && + (high_tpt > current_tpt)) { + IWL_DEBUG_RATE(mvm, + "Higher rate is better. Increate rate\n"); + return RS_ACTION_UPSCALE; } - /* Sanity check; asked for decrease, but success rate or throughput - * has been good at old rate. Don't change it. - */ - if ((action == RS_ACTION_DOWNSCALE) && (low != IWL_RATE_INVALID) && - ((sr > IWL_RATE_HIGH_TH) || - (current_tpt > (100 * tbl->expected_tpt[low])))) { + if ((low_tpt != IWL_INVALID_VALUE) && + (high_tpt != IWL_INVALID_VALUE) && + (low_tpt < current_tpt) && + (high_tpt < current_tpt)) { + IWL_DEBUG_RATE(mvm, + "Both high and low are worse. Maintain rate\n"); + return RS_ACTION_STAY; + } + + if ((low_tpt != IWL_INVALID_VALUE) && + (low_tpt > current_tpt)) { + IWL_DEBUG_RATE(mvm, + "Lower rate is better\n"); + action = RS_ACTION_DOWNSCALE; + goto out; + } + + if ((low_tpt == IWL_INVALID_VALUE) && + (low != IWL_RATE_INVALID)) { IWL_DEBUG_RATE(mvm, - "Sanity check failed. Maintain rate\n"); - action = RS_ACTION_STAY; + "No data about lower rate\n"); + action = RS_ACTION_DOWNSCALE; + goto out; + } + + IWL_DEBUG_RATE(mvm, "Maintain rate\n"); + +out: + if ((action == RS_ACTION_DOWNSCALE) && (low != IWL_RATE_INVALID)) { + if (sr >= RS_SR_NO_DECREASE) { + IWL_DEBUG_RATE(mvm, + "SR is above NO DECREASE. Avoid downscale\n"); + action = RS_ACTION_STAY; + } else if (current_tpt > (100 * tbl->expected_tpt[low])) { + IWL_DEBUG_RATE(mvm, + "Current TPT is higher than max expected in low rate. Avoid downscale\n"); + action = RS_ACTION_STAY; + } else { + IWL_DEBUG_RATE(mvm, "Decrease rate\n"); + } } return action; @@ -1792,6 +1837,7 @@ static void rs_rate_scale_perform(struct iwl_mvm *mvm, "Aggregation changed: prev %d current %d. Update expected TPT table\n", prev_agg, lq_sta->is_agg); rs_set_expected_tpt_table(lq_sta, tbl); + rs_rate_scale_clear_tbl_windows(mvm, tbl); } /* current tx rate */ @@ -2021,7 +2067,7 @@ lq_update: if (lq_sta->search_better_tbl) { /* Access the "search" table, clear its history. */ tbl = &(lq_sta->lq_info[(1 - lq_sta->active_tbl)]); - rs_rate_scale_clear_tbl_windows(tbl); + rs_rate_scale_clear_tbl_windows(mvm, tbl); /* Use new "search" start rate */ index = tbl->rate.index; @@ -2042,8 +2088,18 @@ lq_update: * stay with best antenna legacy modulation for a while * before next round of mode comparisons. */ tbl1 = &(lq_sta->lq_info[lq_sta->active_tbl]); - if (is_legacy(&tbl1->rate) && !sta->ht_cap.ht_supported) { + if (is_legacy(&tbl1->rate)) { IWL_DEBUG_RATE(mvm, "LQ: STAY in legacy table\n"); + + if (tid != IWL_MAX_TID_COUNT) { + tid_data = &sta_priv->tid_data[tid]; + if (tid_data->state != IWL_AGG_OFF) { + IWL_DEBUG_RATE(mvm, + "Stop aggregation on tid %d\n", + tid); + ieee80211_stop_tx_ba_session(sta, tid); + } + } rs_set_stay_in_table(mvm, 1, lq_sta); } else { /* If we're in an HT mode, and all 3 mode switch actions @@ -2342,9 +2398,10 @@ void iwl_mvm_rs_rate_init(struct iwl_mvm *mvm, struct ieee80211_sta *sta, lq_sta->lq.sta_id = sta_priv->sta_id; for (j = 0; j < LQ_SIZE; j++) - rs_rate_scale_clear_tbl_windows(&lq_sta->lq_info[j]); + rs_rate_scale_clear_tbl_windows(mvm, &lq_sta->lq_info[j]); lq_sta->flush_timer = 0; + lq_sta->last_tx = jiffies; IWL_DEBUG_RATE(mvm, "LQ: *** rate scale station global init for station %d ***\n", @@ -2388,11 +2445,22 @@ void iwl_mvm_rs_rate_init(struct iwl_mvm *mvm, struct ieee80211_sta *sta, lq_sta->is_vht = true; } - IWL_DEBUG_RATE(mvm, - "SISO-RATE=%X MIMO2-RATE=%X VHT=%d\n", + lq_sta->max_legacy_rate_idx = find_last_bit(&lq_sta->active_legacy_rate, + BITS_PER_LONG); + lq_sta->max_siso_rate_idx = find_last_bit(&lq_sta->active_siso_rate, + BITS_PER_LONG); + lq_sta->max_mimo2_rate_idx = find_last_bit(&lq_sta->active_mimo2_rate, + BITS_PER_LONG); + + IWL_DEBUG_RATE(mvm, "RATE MASK: LEGACY=%lX SISO=%lX MIMO2=%lX VHT=%d\n", + lq_sta->active_legacy_rate, lq_sta->active_siso_rate, lq_sta->active_mimo2_rate, lq_sta->is_vht); + IWL_DEBUG_RATE(mvm, "MAX RATE: LEGACY=%d SISO=%d MIMO2=%d\n", + lq_sta->max_legacy_rate_idx, + lq_sta->max_siso_rate_idx, + lq_sta->max_mimo2_rate_idx); /* These values will be overridden later */ lq_sta->lq.single_stream_ant_msk = @@ -2547,6 +2615,7 @@ static void rs_build_rates_table(struct iwl_mvm *mvm, if (is_siso(&rate)) { num_rates = RS_SECONDARY_SISO_NUM_RATES; num_retries = RS_SECONDARY_SISO_RETRIES; + lq_cmd->mimo_delim = index; } else if (is_legacy(&rate)) { num_rates = RS_SECONDARY_LEGACY_NUM_RATES; num_retries = RS_LEGACY_RETRIES_PER_RATE; @@ -2749,7 +2818,7 @@ static ssize_t rs_sta_dbgfs_scale_table_read(struct file *file, return -ENOMEM; desc += sprintf(buff+desc, "sta_id %d\n", lq_sta->lq.sta_id); - desc += sprintf(buff+desc, "failed=%d success=%d rate=0%X\n", + desc += sprintf(buff+desc, "failed=%d success=%d rate=0%lX\n", lq_sta->total_failed, lq_sta->total_success, lq_sta->active_legacy_rate); desc += sprintf(buff+desc, "fixed rate 0x%X\n", diff --git a/drivers/net/wireless/iwlwifi/mvm/rs.h b/drivers/net/wireless/iwlwifi/mvm/rs.h index 3332b396011e..0acfac96a56c 100644 --- a/drivers/net/wireless/iwlwifi/mvm/rs.h +++ b/drivers/net/wireless/iwlwifi/mvm/rs.h @@ -156,6 +156,7 @@ enum { #define IWL_RATE_HIGH_TH 10880 /* 85% */ #define IWL_RATE_INCREASE_TH 6400 /* 50% */ #define RS_SR_FORCE_DECREASE 1920 /* 15% */ +#define RS_SR_NO_DECREASE 10880 /* 85% */ #define LINK_QUAL_AGG_TIME_LIMIT_DEF (4000) /* 4 milliseconds */ #define LINK_QUAL_AGG_TIME_LIMIT_MAX (8000) @@ -310,13 +311,20 @@ struct iwl_lq_sta { u32 visited_columns; /* Bitmask marking which Tx columns were * explored during a search cycle */ + u64 last_tx; bool is_vht; enum ieee80211_band band; /* The following are bitmaps of rates; IWL_RATE_6M_MASK, etc. */ - u16 active_legacy_rate; - u16 active_siso_rate; - u16 active_mimo2_rate; + unsigned long active_legacy_rate; + unsigned long active_siso_rate; + unsigned long active_mimo2_rate; + + /* Highest rate per Tx mode */ + u8 max_legacy_rate_idx; + u8 max_siso_rate_idx; + u8 max_mimo2_rate_idx; + s8 max_rate_idx; /* Max rate set by user */ u8 missed_rate_counter; diff --git a/drivers/net/wireless/iwlwifi/mvm/sf.c b/drivers/net/wireless/iwlwifi/mvm/sf.c index 8401627c0030..88809b2d1654 100644 --- a/drivers/net/wireless/iwlwifi/mvm/sf.c +++ b/drivers/net/wireless/iwlwifi/mvm/sf.c @@ -274,7 +274,8 @@ int iwl_mvm_sf_update(struct iwl_mvm *mvm, struct ieee80211_vif *changed_vif, return -EINVAL; if (changed_vif->type != NL80211_IFTYPE_STATION) { new_state = SF_UNINIT; - } else if (changed_vif->bss_conf.assoc) { + } else if (changed_vif->bss_conf.assoc && + changed_vif->bss_conf.dtim_period) { mvmvif = iwl_mvm_vif_from_mac80211(changed_vif); sta_id = mvmvif->ap_sta_id; new_state = SF_FULL_ON; diff --git a/drivers/net/wireless/iwlwifi/pcie/drv.c b/drivers/net/wireless/iwlwifi/pcie/drv.c index edb015c99049..3d1d57f9f5bc 100644 --- a/drivers/net/wireless/iwlwifi/pcie/drv.c +++ b/drivers/net/wireless/iwlwifi/pcie/drv.c @@ -373,12 +373,14 @@ static DEFINE_PCI_DEVICE_TABLE(iwl_hw_card_ids) = { {IWL_PCI_DEVICE(0x095A, 0x500A, iwl7265_2n_cfg)}, {IWL_PCI_DEVICE(0x095B, 0x5200, iwl7265_2n_cfg)}, {IWL_PCI_DEVICE(0x095A, 0x5002, iwl7265_n_cfg)}, + {IWL_PCI_DEVICE(0x095A, 0x5102, iwl7265_n_cfg)}, {IWL_PCI_DEVICE(0x095B, 0x5202, iwl7265_n_cfg)}, {IWL_PCI_DEVICE(0x095A, 0x9010, iwl7265_2ac_cfg)}, {IWL_PCI_DEVICE(0x095A, 0x9012, iwl7265_2ac_cfg)}, {IWL_PCI_DEVICE(0x095A, 0x9110, iwl7265_2ac_cfg)}, {IWL_PCI_DEVICE(0x095A, 0x9112, iwl7265_2ac_cfg)}, {IWL_PCI_DEVICE(0x095A, 0x9210, iwl7265_2ac_cfg)}, + {IWL_PCI_DEVICE(0x095A, 0x9200, iwl7265_2ac_cfg)}, {IWL_PCI_DEVICE(0x095A, 0x9510, iwl7265_2ac_cfg)}, {IWL_PCI_DEVICE(0x095A, 0x9310, iwl7265_2ac_cfg)}, {IWL_PCI_DEVICE(0x095A, 0x9410, iwl7265_2ac_cfg)}, diff --git a/drivers/net/wireless/mwifiex/main.c b/drivers/net/wireless/mwifiex/main.c index 77db0886c6e2..9c771b3e9918 100644 --- a/drivers/net/wireless/mwifiex/main.c +++ b/drivers/net/wireless/mwifiex/main.c @@ -292,6 +292,12 @@ process_start: while ((skb = skb_dequeue(&adapter->usb_rx_data_q))) mwifiex_handle_rx_packet(adapter, skb); + /* Check for event */ + if (adapter->event_received) { + adapter->event_received = false; + mwifiex_process_event(adapter); + } + /* Check for Cmd Resp */ if (adapter->cmd_resp_received) { adapter->cmd_resp_received = false; @@ -304,12 +310,6 @@ process_start: } } - /* Check for event */ - if (adapter->event_received) { - adapter->event_received = false; - mwifiex_process_event(adapter); - } - /* Check if we need to confirm Sleep Request received previously */ if (adapter->ps_state == PS_STATE_PRE_SLEEP) { diff --git a/drivers/net/wireless/mwifiex/sta_ioctl.c b/drivers/net/wireless/mwifiex/sta_ioctl.c index 894270611f2c..536c14aa71f3 100644 --- a/drivers/net/wireless/mwifiex/sta_ioctl.c +++ b/drivers/net/wireless/mwifiex/sta_ioctl.c @@ -60,9 +60,10 @@ int mwifiex_wait_queue_complete(struct mwifiex_adapter *adapter, int status; /* Wait for completion */ - status = wait_event_interruptible(adapter->cmd_wait_q.wait, - *(cmd_queued->condition)); - if (status) { + status = wait_event_interruptible_timeout(adapter->cmd_wait_q.wait, + *(cmd_queued->condition), + (12 * HZ)); + if (status <= 0) { dev_err(adapter->dev, "cmd_wait_q terminated: %d\n", status); mwifiex_cancel_all_pending_cmd(adapter); return status; diff --git a/drivers/net/wireless/rsi/rsi_91x_core.c b/drivers/net/wireless/rsi/rsi_91x_core.c index 1a8d32138593..cf61d6e3eaa7 100644 --- a/drivers/net/wireless/rsi/rsi_91x_core.c +++ b/drivers/net/wireless/rsi/rsi_91x_core.c @@ -88,7 +88,7 @@ static u8 rsi_core_determine_hal_queue(struct rsi_common *common) bool recontend_queue = false; u32 q_len = 0; u8 q_num = INVALID_QUEUE; - u8 ii, min = 0; + u8 ii = 0, min = 0; if (skb_queue_len(&common->tx_queue[MGMT_SOFT_Q])) { if (!common->mgmt_q_block) diff --git a/drivers/net/wireless/rsi/rsi_91x_mgmt.c b/drivers/net/wireless/rsi/rsi_91x_mgmt.c index 73694295648f..1b28cda6ca88 100644 --- a/drivers/net/wireless/rsi/rsi_91x_mgmt.c +++ b/drivers/net/wireless/rsi/rsi_91x_mgmt.c @@ -841,16 +841,6 @@ int rsi_set_channel(struct rsi_common *common, u16 channel) rsi_dbg(MGMT_TX_ZONE, "%s: Sending scan req frame\n", __func__); - skb = dev_alloc_skb(FRAME_DESC_SZ); - if (!skb) { - rsi_dbg(ERR_ZONE, "%s: Failed in allocation of skb\n", - __func__); - return -ENOMEM; - } - - memset(skb->data, 0, FRAME_DESC_SZ); - mgmt_frame = (struct rsi_mac_frame *)skb->data; - if (common->band == IEEE80211_BAND_5GHZ) { if ((channel >= 36) && (channel <= 64)) channel = ((channel - 32) / 4); @@ -868,6 +858,16 @@ int rsi_set_channel(struct rsi_common *common, u16 channel) } } + skb = dev_alloc_skb(FRAME_DESC_SZ); + if (!skb) { + rsi_dbg(ERR_ZONE, "%s: Failed in allocation of skb\n", + __func__); + return -ENOMEM; + } + + memset(skb->data, 0, FRAME_DESC_SZ); + mgmt_frame = (struct rsi_mac_frame *)skb->data; + mgmt_frame->desc_word[0] = cpu_to_le16(RSI_WIFI_MGMT_Q << 12); mgmt_frame->desc_word[1] = cpu_to_le16(SCAN_REQUEST); mgmt_frame->desc_word[4] = cpu_to_le16(channel); @@ -966,6 +966,7 @@ static int rsi_send_auto_rate_request(struct rsi_common *common) if (!selected_rates) { rsi_dbg(ERR_ZONE, "%s: Failed in allocation of mem\n", __func__); + dev_kfree_skb(skb); return -ENOMEM; } diff --git a/drivers/net/wireless/ti/wl18xx/event.h b/drivers/net/wireless/ti/wl18xx/event.h index 398f3d2c0a6c..a76e98eb8372 100644 --- a/drivers/net/wireless/ti/wl18xx/event.h +++ b/drivers/net/wireless/ti/wl18xx/event.h @@ -68,6 +68,26 @@ struct wl18xx_event_mailbox { /* bitmap of inactive stations (by HLID) */ __le32 inactive_sta_bitmap; + + /* rx BA win size indicated by RX_BA_WIN_SIZE_CHANGE_EVENT_ID */ + u8 rx_ba_role_id; + u8 rx_ba_link_id; + u8 rx_ba_win_size; + u8 padding; + + /* smart config */ + u8 sc_ssid_len; + u8 sc_pwd_len; + u8 sc_token_len; + u8 padding1; + u8 sc_ssid[32]; + u8 sc_pwd[32]; + u8 sc_token[32]; + + /* smart config sync channel */ + u8 sc_sync_channel; + u8 sc_sync_band; + u8 padding2[2]; } __packed; int wl18xx_wait_for_event(struct wl1271 *wl, enum wlcore_wait_event event, diff --git a/drivers/net/wireless/ti/wlcore/event.c b/drivers/net/wireless/ti/wlcore/event.c index 1f9a36031b06..16d10281798d 100644 --- a/drivers/net/wireless/ti/wlcore/event.c +++ b/drivers/net/wireless/ti/wlcore/event.c @@ -158,6 +158,11 @@ EXPORT_SYMBOL_GPL(wlcore_event_channel_switch); void wlcore_event_dummy_packet(struct wl1271 *wl) { + if (wl->plt) { + wl1271_info("Got DUMMY_PACKET event in PLT mode. FW bug, ignoring."); + return; + } + wl1271_debug(DEBUG_EVENT, "DUMMY_PACKET_ID_EVENT_ID"); wl1271_tx_dummy_packet(wl); } |