From 8d5f4b07174976c55a5f5d6967777373c6826944 Mon Sep 17 00:00:00 2001 From: Bernd Edlinger Date: Sat, 21 Oct 2017 06:51:30 +0000 Subject: stmmac: Don't access tx_q->dirty_tx before netif_tx_lock This is the possible reason for different hard to reproduce problems on my ARMv7-SMP test system. The symptoms are in recent kernels imprecise external aborts, and in older kernels various kinds of network stalls and unexpected page allocation failures. My testing indicates that the trouble started between v4.5 and v4.6 and prevails up to v4.14. Using the dirty_tx before acquiring the spin lock is clearly wrong and was first introduced with v4.6. Fixes: e3ad57c96715 ("stmmac: review RX/TX ring management") Signed-off-by: Bernd Edlinger Signed-off-by: David S. Miller --- drivers/net/ethernet/stmicro/stmmac/stmmac_main.c | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c index 284c10720daf..16bd50929084 100644 --- a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c +++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c @@ -1799,12 +1799,13 @@ static void stmmac_tx_clean(struct stmmac_priv *priv, u32 queue) { struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue]; unsigned int bytes_compl = 0, pkts_compl = 0; - unsigned int entry = tx_q->dirty_tx; + unsigned int entry; netif_tx_lock(priv->dev); priv->xstats.tx_clean++; + entry = tx_q->dirty_tx; while (entry != tx_q->cur_tx) { struct sk_buff *skb = tx_q->tx_skbuff[entry]; struct dma_desc *p; -- cgit v1.2.3