From 5969c42768ed6d19e8a61d1ac60e5c8bee8fe9d0 Mon Sep 17 00:00:00 2001 From: John Crispin Date: Mon, 19 Jun 2017 15:37:03 +0200 Subject: net-next: mediatek: print phy status changes for non DSA GMACs Currently PHY status changes are only printed for DSA ports. This patch adds code to also print status changes for non-fixed links. Signed-off-by: John Crispin Signed-off-by: David S. Miller --- drivers/net/ethernet/mediatek/mtk_eth_soc.c | 3 +++ 1 file changed, 3 insertions(+) diff --git a/drivers/net/ethernet/mediatek/mtk_eth_soc.c b/drivers/net/ethernet/mediatek/mtk_eth_soc.c index 962975d192d1..24d5f1cad7f4 100644 --- a/drivers/net/ethernet/mediatek/mtk_eth_soc.c +++ b/drivers/net/ethernet/mediatek/mtk_eth_soc.c @@ -221,6 +221,9 @@ static void mtk_phy_link_adjust(struct net_device *dev) netif_carrier_on(dev); else netif_carrier_off(dev); + + if (!of_phy_is_fixed_link(mac->of_node)) + phy_print_status(dev->phydev); } static int mtk_phy_connect_node(struct mtk_eth *eth, struct mtk_mac *mac, -- cgit v1.2.3 From 671d41e60dbd8943ce069137b312f834a483c582 Mon Sep 17 00:00:00 2001 From: John Crispin Date: Mon, 19 Jun 2017 15:37:04 +0200 Subject: net-next: mediatek: add RX IRQ delay support The PDMA engine used for RX allows IRQ aggregation. The patch sets up the corresponding registers to aggregate 4 IRQs into one. Using aggregation reduces the load on the core handling to a quarter thus reducing IRQ latency and increasing RX performance by around 10%. Signed-off-by: John Crispin Signed-off-by: David S. Miller --- drivers/net/ethernet/mediatek/mtk_eth_soc.c | 4 +++- drivers/net/ethernet/mediatek/mtk_eth_soc.h | 13 ++++++++++--- 2 files changed, 13 insertions(+), 4 deletions(-) diff --git a/drivers/net/ethernet/mediatek/mtk_eth_soc.c b/drivers/net/ethernet/mediatek/mtk_eth_soc.c index 24d5f1cad7f4..92be59a1e4e7 100644 --- a/drivers/net/ethernet/mediatek/mtk_eth_soc.c +++ b/drivers/net/ethernet/mediatek/mtk_eth_soc.c @@ -1861,9 +1861,11 @@ static int mtk_hw_init(struct mtk_eth *eth) /* Enable RX VLan Offloading */ mtk_w32(eth, 1, MTK_CDMP_EG_CTRL); + /* enable interrupt delay for RX */ + mtk_w32(eth, MTK_PDMA_DELAY_RX_DELAY, MTK_PDMA_DELAY_INT); + /* disable delay and normal interrupt */ mtk_w32(eth, 0, MTK_QDMA_DELAY_INT); - mtk_w32(eth, 0, MTK_PDMA_DELAY_INT); mtk_irq_disable(eth, MTK_QDMA_INT_MASK, ~0); mtk_irq_disable(eth, MTK_PDMA_INT_MASK, ~0); mtk_w32(eth, RST_GL_PSE, MTK_RST_GL); diff --git a/drivers/net/ethernet/mediatek/mtk_eth_soc.h b/drivers/net/ethernet/mediatek/mtk_eth_soc.h index 3c46a3b613b9..e130c3b24c4c 100644 --- a/drivers/net/ethernet/mediatek/mtk_eth_soc.h +++ b/drivers/net/ethernet/mediatek/mtk_eth_soc.h @@ -125,7 +125,14 @@ #define MTK_PST_DRX_IDX_CFG(x) (MTK_PST_DRX_IDX0 << (x)) /* PDMA Delay Interrupt Register */ -#define MTK_PDMA_DELAY_INT 0xa0c +#define MTK_PDMA_DELAY_INT 0xa0c +#define MTK_PDMA_DELAY_RX_EN BIT(15) +#define MTK_PDMA_DELAY_RX_PINT 4 +#define MTK_PDMA_DELAY_RX_PINT_SHIFT 8 +#define MTK_PDMA_DELAY_RX_PTIME 4 +#define MTK_PDMA_DELAY_RX_DELAY \ + (MTK_PDMA_DELAY_RX_EN | MTK_PDMA_DELAY_RX_PTIME | \ + (MTK_PDMA_DELAY_RX_PINT << MTK_PDMA_DELAY_RX_PINT_SHIFT)) /* PDMA Interrupt Status Register */ #define MTK_PDMA_INT_STATUS 0xa20 @@ -206,6 +213,7 @@ /* QDMA Interrupt Status Register */ #define MTK_QMTK_INT_STATUS 0x1A18 +#define MTK_RX_DONE_DLY BIT(30) #define MTK_RX_DONE_INT3 BIT(19) #define MTK_RX_DONE_INT2 BIT(18) #define MTK_RX_DONE_INT1 BIT(17) @@ -214,8 +222,7 @@ #define MTK_TX_DONE_INT2 BIT(2) #define MTK_TX_DONE_INT1 BIT(1) #define MTK_TX_DONE_INT0 BIT(0) -#define MTK_RX_DONE_INT (MTK_RX_DONE_INT0 | MTK_RX_DONE_INT1 | \ - MTK_RX_DONE_INT2 | MTK_RX_DONE_INT3) +#define MTK_RX_DONE_INT MTK_RX_DONE_DLY #define MTK_TX_DONE_INT (MTK_TX_DONE_INT0 | MTK_TX_DONE_INT1 | \ MTK_TX_DONE_INT2 | MTK_TX_DONE_INT3) -- cgit v1.2.3 From 5cce0322cf8cd2c8073b2f8dac08c56e3f5f4acb Mon Sep 17 00:00:00 2001 From: John Crispin Date: Mon, 19 Jun 2017 15:37:05 +0200 Subject: net-next: mediatek: split IRQ register locking into TX and RX Originally the driver only utilised the new QDMA engine. The current code still assumes this is the case when locking the IRQ mask register. Since RX now runs on the old style PDMA engine we can add a second lock. This patch reduces the IRQ latency as the TX and RX path no longer need to wait on each other under heavy load. Signed-off-by: John Crispin Signed-off-by: David S. Miller --- drivers/net/ethernet/mediatek/mtk_eth_soc.c | 79 ++++++++++++++++++----------- drivers/net/ethernet/mediatek/mtk_eth_soc.h | 5 +- 2 files changed, 54 insertions(+), 30 deletions(-) diff --git a/drivers/net/ethernet/mediatek/mtk_eth_soc.c b/drivers/net/ethernet/mediatek/mtk_eth_soc.c index 92be59a1e4e7..462d1e83e254 100644 --- a/drivers/net/ethernet/mediatek/mtk_eth_soc.c +++ b/drivers/net/ethernet/mediatek/mtk_eth_soc.c @@ -372,28 +372,48 @@ static void mtk_mdio_cleanup(struct mtk_eth *eth) mdiobus_unregister(eth->mii_bus); } -static inline void mtk_irq_disable(struct mtk_eth *eth, - unsigned reg, u32 mask) +static inline void mtk_tx_irq_disable(struct mtk_eth *eth, u32 mask) { unsigned long flags; u32 val; - spin_lock_irqsave(ð->irq_lock, flags); - val = mtk_r32(eth, reg); - mtk_w32(eth, val & ~mask, reg); - spin_unlock_irqrestore(ð->irq_lock, flags); + spin_lock_irqsave(ð->tx_irq_lock, flags); + val = mtk_r32(eth, MTK_QDMA_INT_MASK); + mtk_w32(eth, val & ~mask, MTK_QDMA_INT_MASK); + spin_unlock_irqrestore(ð->tx_irq_lock, flags); } -static inline void mtk_irq_enable(struct mtk_eth *eth, - unsigned reg, u32 mask) +static inline void mtk_tx_irq_enable(struct mtk_eth *eth, u32 mask) { unsigned long flags; u32 val; - spin_lock_irqsave(ð->irq_lock, flags); - val = mtk_r32(eth, reg); - mtk_w32(eth, val | mask, reg); - spin_unlock_irqrestore(ð->irq_lock, flags); + spin_lock_irqsave(ð->tx_irq_lock, flags); + val = mtk_r32(eth, MTK_QDMA_INT_MASK); + mtk_w32(eth, val | mask, MTK_QDMA_INT_MASK); + spin_unlock_irqrestore(ð->tx_irq_lock, flags); +} + +static inline void mtk_rx_irq_disable(struct mtk_eth *eth, u32 mask) +{ + unsigned long flags; + u32 val; + + spin_lock_irqsave(ð->rx_irq_lock, flags); + val = mtk_r32(eth, MTK_PDMA_INT_MASK); + mtk_w32(eth, val & ~mask, MTK_PDMA_INT_MASK); + spin_unlock_irqrestore(ð->rx_irq_lock, flags); +} + +static inline void mtk_rx_irq_enable(struct mtk_eth *eth, u32 mask) +{ + unsigned long flags; + u32 val; + + spin_lock_irqsave(ð->rx_irq_lock, flags); + val = mtk_r32(eth, MTK_PDMA_INT_MASK); + mtk_w32(eth, val | mask, MTK_PDMA_INT_MASK); + spin_unlock_irqrestore(ð->rx_irq_lock, flags); } static int mtk_set_mac_address(struct net_device *dev, void *p) @@ -1101,7 +1121,7 @@ static int mtk_napi_tx(struct napi_struct *napi, int budget) return budget; napi_complete(napi); - mtk_irq_enable(eth, MTK_QDMA_INT_MASK, MTK_TX_DONE_INT); + mtk_tx_irq_enable(eth, MTK_TX_DONE_INT); return tx_done; } @@ -1135,7 +1155,7 @@ poll_again: goto poll_again; } napi_complete(napi); - mtk_irq_enable(eth, MTK_PDMA_INT_MASK, MTK_RX_DONE_INT); + mtk_rx_irq_enable(eth, MTK_RX_DONE_INT); return rx_done + budget - remain_budget; } @@ -1670,7 +1690,7 @@ static irqreturn_t mtk_handle_irq_rx(int irq, void *_eth) if (likely(napi_schedule_prep(ð->rx_napi))) { __napi_schedule(ð->rx_napi); - mtk_irq_disable(eth, MTK_PDMA_INT_MASK, MTK_RX_DONE_INT); + mtk_rx_irq_disable(eth, MTK_RX_DONE_INT); } return IRQ_HANDLED; @@ -1682,7 +1702,7 @@ static irqreturn_t mtk_handle_irq_tx(int irq, void *_eth) if (likely(napi_schedule_prep(ð->tx_napi))) { __napi_schedule(ð->tx_napi); - mtk_irq_disable(eth, MTK_QDMA_INT_MASK, MTK_TX_DONE_INT); + mtk_tx_irq_disable(eth, MTK_TX_DONE_INT); } return IRQ_HANDLED; @@ -1694,11 +1714,11 @@ static void mtk_poll_controller(struct net_device *dev) struct mtk_mac *mac = netdev_priv(dev); struct mtk_eth *eth = mac->hw; - mtk_irq_disable(eth, MTK_QDMA_INT_MASK, MTK_TX_DONE_INT); - mtk_irq_disable(eth, MTK_PDMA_INT_MASK, MTK_RX_DONE_INT); + mtk_tx_irq_disable(eth, MTK_TX_DONE_INT); + mtk_rx_irq_disable(eth, MTK_RX_DONE_INT); mtk_handle_irq_rx(eth->irq[2], dev); - mtk_irq_enable(eth, MTK_QDMA_INT_MASK, MTK_TX_DONE_INT); - mtk_irq_enable(eth, MTK_PDMA_INT_MASK, MTK_RX_DONE_INT); + mtk_tx_irq_enable(eth, MTK_TX_DONE_INT); + mtk_rx_irq_enable(eth, MTK_RX_DONE_INT); } #endif @@ -1739,8 +1759,8 @@ static int mtk_open(struct net_device *dev) napi_enable(ð->tx_napi); napi_enable(ð->rx_napi); - mtk_irq_enable(eth, MTK_QDMA_INT_MASK, MTK_TX_DONE_INT); - mtk_irq_enable(eth, MTK_PDMA_INT_MASK, MTK_RX_DONE_INT); + mtk_tx_irq_enable(eth, MTK_TX_DONE_INT); + mtk_rx_irq_enable(eth, MTK_RX_DONE_INT); } atomic_inc(ð->dma_refcnt); @@ -1785,8 +1805,8 @@ static int mtk_stop(struct net_device *dev) if (!atomic_dec_and_test(ð->dma_refcnt)) return 0; - mtk_irq_disable(eth, MTK_QDMA_INT_MASK, MTK_TX_DONE_INT); - mtk_irq_disable(eth, MTK_PDMA_INT_MASK, MTK_RX_DONE_INT); + mtk_tx_irq_disable(eth, MTK_TX_DONE_INT); + mtk_rx_irq_disable(eth, MTK_RX_DONE_INT); napi_disable(ð->tx_napi); napi_disable(ð->rx_napi); @@ -1866,8 +1886,8 @@ static int mtk_hw_init(struct mtk_eth *eth) /* disable delay and normal interrupt */ mtk_w32(eth, 0, MTK_QDMA_DELAY_INT); - mtk_irq_disable(eth, MTK_QDMA_INT_MASK, ~0); - mtk_irq_disable(eth, MTK_PDMA_INT_MASK, ~0); + mtk_tx_irq_disable(eth, ~0); + mtk_rx_irq_disable(eth, ~0); mtk_w32(eth, RST_GL_PSE, MTK_RST_GL); mtk_w32(eth, 0, MTK_RST_GL); @@ -1938,8 +1958,8 @@ static void mtk_uninit(struct net_device *dev) phy_disconnect(dev->phydev); if (of_phy_is_fixed_link(mac->of_node)) of_phy_deregister_fixed_link(mac->of_node); - mtk_irq_disable(eth, MTK_QDMA_INT_MASK, ~0); - mtk_irq_disable(eth, MTK_PDMA_INT_MASK, ~0); + mtk_tx_irq_disable(eth, ~0); + mtk_rx_irq_disable(eth, ~0); } static int mtk_do_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd) @@ -2399,7 +2419,8 @@ static int mtk_probe(struct platform_device *pdev) return PTR_ERR(eth->base); spin_lock_init(ð->page_lock); - spin_lock_init(ð->irq_lock); + spin_lock_init(ð->tx_irq_lock); + spin_lock_init(ð->rx_irq_lock); eth->ethsys = syscon_regmap_lookup_by_phandle(pdev->dev.of_node, "mediatek,ethsys"); diff --git a/drivers/net/ethernet/mediatek/mtk_eth_soc.h b/drivers/net/ethernet/mediatek/mtk_eth_soc.h index e130c3b24c4c..5868a09f623a 100644 --- a/drivers/net/ethernet/mediatek/mtk_eth_soc.h +++ b/drivers/net/ethernet/mediatek/mtk_eth_soc.h @@ -519,6 +519,8 @@ struct mtk_rx_ring { * @dev: The device pointer * @base: The mapped register i/o base * @page_lock: Make sure that register operations are atomic + * @tx_irq__lock: Make sure that IRQ register operations are atomic + * @rx_irq__lock: Make sure that IRQ register operations are atomic * @dummy_dev: we run 2 netdevs on 1 physical DMA ring and need a * dummy for NAPI to work * @netdev: The netdev instances @@ -547,7 +549,8 @@ struct mtk_eth { struct device *dev; void __iomem *base; spinlock_t page_lock; - spinlock_t irq_lock; + spinlock_t tx_irq_lock; + spinlock_t rx_irq_lock; struct net_device dummy_dev; struct net_device *netdev[MTK_MAX_DEVS]; struct mtk_mac *mac[MTK_MAX_DEVS]; -- cgit v1.2.3 From a2d5e7b4102deac784373464a8fd9f3eaa53afc0 Mon Sep 17 00:00:00 2001 From: John Crispin Date: Mon, 19 Jun 2017 15:37:06 +0200 Subject: net-next: mediatek: set the rx_queue to 0 The get_rps_cpu() function will not do any RPS on the data flow when no queue is setup and always use the current cpu where the IRQ was handled to also handle the backlog. As we only have one physical queue we always set this to 0 unconditionally. Signed-off-by: John Crispin Signed-off-by: David S. Miller --- drivers/net/ethernet/mediatek/mtk_eth_soc.c | 1 + 1 file changed, 1 insertion(+) diff --git a/drivers/net/ethernet/mediatek/mtk_eth_soc.c b/drivers/net/ethernet/mediatek/mtk_eth_soc.c index 462d1e83e254..adaaafc20532 100644 --- a/drivers/net/ethernet/mediatek/mtk_eth_soc.c +++ b/drivers/net/ethernet/mediatek/mtk_eth_soc.c @@ -992,6 +992,7 @@ static int mtk_poll_rx(struct napi_struct *napi, int budget, RX_DMA_VID(trxd.rxd3)) __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), RX_DMA_VID(trxd.rxd3)); + skb_record_rx_queue(skb, 0); napi_gro_receive(napi, skb); ring->data[idx] = new_data; -- cgit v1.2.3