summaryrefslogtreecommitdiffstats
path: root/drivers/net/ethernet/broadcom/bcmsysport.c
diff options
context:
space:
mode:
authorFlorian Fainelli <f.fainelli@gmail.com>2018-03-28 15:15:36 -0700
committerDavid S. Miller <davem@davemloft.net>2018-03-30 10:03:35 -0400
commitfd41f2bfb71ba161309797de1ae1966810c19703 (patch)
tree749801cd321c7800e96ffd88f9f92eb10fac3402 /drivers/net/ethernet/broadcom/bcmsysport.c
parent9daae9bd47cff82a2a06aca23c458d6c79d09d52 (diff)
downloadlinux-fd41f2bfb71ba161309797de1ae1966810c19703.tar.bz2
net: systemport: Remove adaptive TX coalescing
Adaptive TX coalescing is not currently giving us any advantages and ends up making the CPU spin more frequently until TX completion. Deny and disable adaptive TX coalescing for now and rely on static configuration, we can always add it back later. Reviewed-by: Tal Gilboa <talgi@mellanox.com> Signed-off-by: Florian Fainelli <f.fainelli@gmail.com> Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'drivers/net/ethernet/broadcom/bcmsysport.c')
-rw-r--r--drivers/net/ethernet/broadcom/bcmsysport.c61
1 files changed, 8 insertions, 53 deletions
diff --git a/drivers/net/ethernet/broadcom/bcmsysport.c b/drivers/net/ethernet/broadcom/bcmsysport.c
index 4e26f606a7f2..1e52bb7d822e 100644
--- a/drivers/net/ethernet/broadcom/bcmsysport.c
+++ b/drivers/net/ethernet/broadcom/bcmsysport.c
@@ -15,7 +15,6 @@
#include <linux/module.h>
#include <linux/kernel.h>
#include <linux/netdevice.h>
-#include <linux/net_dim.h>
#include <linux/etherdevice.h>
#include <linux/platform_device.h>
#include <linux/of.h>
@@ -588,7 +587,8 @@ static void bcm_sysport_set_rx_coalesce(struct bcm_sysport_priv *priv)
rdma_writel(priv, reg, RDMA_MBDONE_INTR);
}
-static void bcm_sysport_set_tx_coalesce(struct bcm_sysport_tx_ring *ring)
+static void bcm_sysport_set_tx_coalesce(struct bcm_sysport_tx_ring *ring,
+ struct ethtool_coalesce *ec)
{
struct bcm_sysport_priv *priv = ring->priv;
u32 reg;
@@ -596,8 +596,8 @@ static void bcm_sysport_set_tx_coalesce(struct bcm_sysport_tx_ring *ring)
reg = tdma_readl(priv, TDMA_DESC_RING_INTR_CONTROL(ring->index));
reg &= ~(RING_INTR_THRESH_MASK |
RING_TIMEOUT_MASK << RING_TIMEOUT_SHIFT);
- reg |= ring->dim.coal_pkts;
- reg |= DIV_ROUND_UP(ring->dim.coal_usecs * 1000, 8192) <<
+ reg |= ec->tx_max_coalesced_frames;
+ reg |= DIV_ROUND_UP(ec->tx_coalesce_usecs * 1000, 8192) <<
RING_TIMEOUT_SHIFT;
tdma_writel(priv, reg, TDMA_DESC_RING_INTR_CONTROL(ring->index));
}
@@ -606,18 +606,12 @@ static int bcm_sysport_get_coalesce(struct net_device *dev,
struct ethtool_coalesce *ec)
{
struct bcm_sysport_priv *priv = netdev_priv(dev);
- struct bcm_sysport_tx_ring *ring;
- unsigned int i;
u32 reg;
reg = tdma_readl(priv, TDMA_DESC_RING_INTR_CONTROL(0));
ec->tx_coalesce_usecs = (reg >> RING_TIMEOUT_SHIFT) * 8192 / 1000;
ec->tx_max_coalesced_frames = reg & RING_INTR_THRESH_MASK;
- for (i = 0; i < dev->num_tx_queues; i++) {
- ring = &priv->tx_rings[i];
- ec->use_adaptive_tx_coalesce |= ring->dim.use_dim;
- }
reg = rdma_readl(priv, RDMA_MBDONE_INTR);
@@ -632,7 +626,6 @@ static int bcm_sysport_set_coalesce(struct net_device *dev,
struct ethtool_coalesce *ec)
{
struct bcm_sysport_priv *priv = netdev_priv(dev);
- struct bcm_sysport_tx_ring *ring;
unsigned int i;
/* Base system clock is 125Mhz, DMA timeout is this reference clock
@@ -646,20 +639,12 @@ static int bcm_sysport_set_coalesce(struct net_device *dev,
return -EINVAL;
if ((ec->tx_coalesce_usecs == 0 && ec->tx_max_coalesced_frames == 0) ||
- (ec->rx_coalesce_usecs == 0 && ec->rx_max_coalesced_frames == 0))
+ (ec->rx_coalesce_usecs == 0 && ec->rx_max_coalesced_frames == 0) ||
+ ec->use_adaptive_tx_coalesce)
return -EINVAL;
- for (i = 0; i < dev->num_tx_queues; i++) {
- ring = &priv->tx_rings[i];
- ring->dim.coal_pkts = ec->tx_max_coalesced_frames;
- ring->dim.coal_usecs = ec->tx_coalesce_usecs;
- if (!ec->use_adaptive_tx_coalesce && ring->dim.use_dim) {
- ring->dim.coal_pkts = 1;
- ring->dim.coal_usecs = 0;
- }
- ring->dim.use_dim = ec->use_adaptive_tx_coalesce;
- bcm_sysport_set_tx_coalesce(ring);
- }
+ for (i = 0; i < dev->num_tx_queues; i++)
+ bcm_sysport_set_tx_coalesce(&priv->tx_rings[i], ec);
priv->dim.coal_usecs = ec->rx_coalesce_usecs;
priv->dim.coal_pkts = ec->rx_max_coalesced_frames;
@@ -940,8 +925,6 @@ static unsigned int __bcm_sysport_tx_reclaim(struct bcm_sysport_priv *priv,
ring->packets += pkts_compl;
ring->bytes += bytes_compl;
u64_stats_update_end(&priv->syncp);
- ring->dim.packets = pkts_compl;
- ring->dim.bytes = bytes_compl;
ring->c_index = c_index;
@@ -987,7 +970,6 @@ static int bcm_sysport_tx_poll(struct napi_struct *napi, int budget)
{
struct bcm_sysport_tx_ring *ring =
container_of(napi, struct bcm_sysport_tx_ring, napi);
- struct net_dim_sample dim_sample;
unsigned int work_done = 0;
work_done = bcm_sysport_tx_reclaim(ring->priv, ring);
@@ -1004,12 +986,6 @@ static int bcm_sysport_tx_poll(struct napi_struct *napi, int budget)
return 0;
}
- if (ring->dim.use_dim) {
- net_dim_sample(ring->dim.event_ctr, ring->dim.packets,
- ring->dim.bytes, &dim_sample);
- net_dim(&ring->dim.dim, dim_sample);
- }
-
return budget;
}
@@ -1089,23 +1065,6 @@ static void bcm_sysport_dim_work(struct work_struct *work)
dim->state = NET_DIM_START_MEASURE;
}
-static void bcm_sysport_dim_tx_work(struct work_struct *work)
-{
- struct net_dim *dim = container_of(work, struct net_dim, work);
- struct bcm_sysport_net_dim *ndim =
- container_of(dim, struct bcm_sysport_net_dim, dim);
- struct bcm_sysport_tx_ring *ring =
- container_of(ndim, struct bcm_sysport_tx_ring, dim);
- struct net_dim_cq_moder cur_profile =
- net_dim_get_profile(dim->mode, dim->profile_ix);
-
- ring->dim.coal_usecs = cur_profile.usec;
- ring->dim.coal_pkts = cur_profile.pkts;
-
- bcm_sysport_set_tx_coalesce(ring);
- dim->state = NET_DIM_START_MEASURE;
-}
-
/* RX and misc interrupt routine */
static irqreturn_t bcm_sysport_rx_isr(int irq, void *dev_id)
{
@@ -1152,7 +1111,6 @@ static irqreturn_t bcm_sysport_rx_isr(int irq, void *dev_id)
continue;
txr = &priv->tx_rings[ring];
- txr->dim.event_ctr++;
if (likely(napi_schedule_prep(&txr->napi))) {
intrl2_0_mask_set(priv, ring_bit);
@@ -1185,7 +1143,6 @@ static irqreturn_t bcm_sysport_tx_isr(int irq, void *dev_id)
continue;
txr = &priv->tx_rings[ring];
- txr->dim.event_ctr++;
if (likely(napi_schedule_prep(&txr->napi))) {
intrl2_1_mask_set(priv, BIT(ring));
@@ -1551,7 +1508,6 @@ static int bcm_sysport_init_tx_ring(struct bcm_sysport_priv *priv,
reg |= (1 << index);
tdma_writel(priv, reg, TDMA_TIER1_ARB_0_QUEUE_EN);
- bcm_sysport_init_dim(&ring->dim, bcm_sysport_dim_tx_work);
napi_enable(&ring->napi);
netif_dbg(priv, hw, priv->netdev,
@@ -1582,7 +1538,6 @@ static void bcm_sysport_fini_tx_ring(struct bcm_sysport_priv *priv,
return;
napi_disable(&ring->napi);
- cancel_work_sync(&ring->dim.dim.work);
netif_napi_del(&ring->napi);
bcm_sysport_tx_clean(priv, ring);