diff options
author | Niklas Söderlund <niklas.soderlund+renesas@ragnatech.se> | 2018-02-16 17:10:08 +0100 |
---|---|---|
committer | David S. Miller <davem@davemloft.net> | 2018-02-16 16:34:50 -0500 |
commit | 75efa06f457bbed3931bf693b7137cf4da3b5c80 (patch) | |
tree | fd58bf23816832d12ad64d4927a2fb2d7fae6253 | |
parent | a73d65b5cd1c766fc64db59d18255923d01e92db (diff) | |
download | linux-75efa06f457bbed3931bf693b7137cf4da3b5c80.tar.bz2 |
ravb: add support for changing MTU
Allow for changing the MTU within the limit of the maximum size of a
descriptor (2048 bytes). Add the callback to change MTU from user-space
and take the configurable MTU into account when configuring the
hardware.
Signed-off-by: Niklas Söderlund <niklas.soderlund+renesas@ragnatech.se>
Signed-off-by: David S. Miller <davem@davemloft.net>
-rw-r--r-- | drivers/net/ethernet/renesas/ravb.h | 1 | ||||
-rw-r--r-- | drivers/net/ethernet/renesas/ravb_main.c | 34 |
2 files changed, 28 insertions, 7 deletions
diff --git a/drivers/net/ethernet/renesas/ravb.h b/drivers/net/ethernet/renesas/ravb.h index 96a27b00c90e..b81f4faf7b10 100644 --- a/drivers/net/ethernet/renesas/ravb.h +++ b/drivers/net/ethernet/renesas/ravb.h @@ -1018,6 +1018,7 @@ struct ravb_private { u32 dirty_rx[NUM_RX_QUEUE]; /* Producer ring indices */ u32 cur_tx[NUM_TX_QUEUE]; u32 dirty_tx[NUM_TX_QUEUE]; + u32 rx_buf_sz; /* Based on MTU+slack. */ struct napi_struct napi[NUM_RX_QUEUE]; struct work_struct work; /* MII transceiver section. */ diff --git a/drivers/net/ethernet/renesas/ravb_main.c b/drivers/net/ethernet/renesas/ravb_main.c index c87f57ca4437..34e841306e04 100644 --- a/drivers/net/ethernet/renesas/ravb_main.c +++ b/drivers/net/ethernet/renesas/ravb_main.c @@ -238,7 +238,7 @@ static void ravb_ring_free(struct net_device *ndev, int q) le32_to_cpu(desc->dptr))) dma_unmap_single(ndev->dev.parent, le32_to_cpu(desc->dptr), - PKT_BUF_SZ, + priv->rx_buf_sz, DMA_FROM_DEVICE); } ring_size = sizeof(struct ravb_ex_rx_desc) * @@ -300,9 +300,9 @@ static void ravb_ring_format(struct net_device *ndev, int q) for (i = 0; i < priv->num_rx_ring[q]; i++) { /* RX descriptor */ rx_desc = &priv->rx_ring[q][i]; - rx_desc->ds_cc = cpu_to_le16(PKT_BUF_SZ); + rx_desc->ds_cc = cpu_to_le16(priv->rx_buf_sz); dma_addr = dma_map_single(ndev->dev.parent, priv->rx_skb[q][i]->data, - PKT_BUF_SZ, + priv->rx_buf_sz, DMA_FROM_DEVICE); /* We just set the data size to 0 for a failed mapping which * should prevent DMA from happening... @@ -346,6 +346,10 @@ static int ravb_ring_init(struct net_device *ndev, int q) int ring_size; int i; + /* +16 gets room from the status from the card. */ + priv->rx_buf_sz = (ndev->mtu <= 1492 ? PKT_BUF_SZ : ndev->mtu) + + ETH_HLEN + VLAN_HLEN; + /* Allocate RX and TX skb rings */ priv->rx_skb[q] = kcalloc(priv->num_rx_ring[q], sizeof(*priv->rx_skb[q]), GFP_KERNEL); @@ -355,7 +359,7 @@ static int ravb_ring_init(struct net_device *ndev, int q) goto error; for (i = 0; i < priv->num_rx_ring[q]; i++) { - skb = netdev_alloc_skb(ndev, PKT_BUF_SZ + RAVB_ALIGN - 1); + skb = netdev_alloc_skb(ndev, priv->rx_buf_sz + RAVB_ALIGN - 1); if (!skb) goto error; ravb_set_buffer_align(skb); @@ -586,7 +590,7 @@ static bool ravb_rx(struct net_device *ndev, int *quota, int q) skb = priv->rx_skb[q][entry]; priv->rx_skb[q][entry] = NULL; dma_unmap_single(ndev->dev.parent, le32_to_cpu(desc->dptr), - PKT_BUF_SZ, + priv->rx_buf_sz, DMA_FROM_DEVICE); get_ts &= (q == RAVB_NC) ? RAVB_RXTSTAMP_TYPE_V2_L2_EVENT : @@ -619,11 +623,12 @@ static bool ravb_rx(struct net_device *ndev, int *quota, int q) for (; priv->cur_rx[q] - priv->dirty_rx[q] > 0; priv->dirty_rx[q]++) { entry = priv->dirty_rx[q] % priv->num_rx_ring[q]; desc = &priv->rx_ring[q][entry]; - desc->ds_cc = cpu_to_le16(PKT_BUF_SZ); + desc->ds_cc = cpu_to_le16(priv->rx_buf_sz); if (!priv->rx_skb[q][entry]) { skb = netdev_alloc_skb(ndev, - PKT_BUF_SZ + RAVB_ALIGN - 1); + priv->rx_buf_sz + + RAVB_ALIGN - 1); if (!skb) break; /* Better luck next round. */ ravb_set_buffer_align(skb); @@ -1854,6 +1859,17 @@ static int ravb_do_ioctl(struct net_device *ndev, struct ifreq *req, int cmd) return phy_mii_ioctl(phydev, req, cmd); } +static int ravb_change_mtu(struct net_device *ndev, int new_mtu) +{ + if (netif_running(ndev)) + return -EBUSY; + + ndev->mtu = new_mtu; + netdev_update_features(ndev); + + return 0; +} + static void ravb_set_rx_csum(struct net_device *ndev, bool enable) { struct ravb_private *priv = netdev_priv(ndev); @@ -1895,6 +1911,7 @@ static const struct net_device_ops ravb_netdev_ops = { .ndo_set_rx_mode = ravb_set_rx_mode, .ndo_tx_timeout = ravb_tx_timeout, .ndo_do_ioctl = ravb_do_ioctl, + .ndo_change_mtu = ravb_change_mtu, .ndo_validate_addr = eth_validate_addr, .ndo_set_mac_address = eth_mac_addr, .ndo_set_features = ravb_set_features, @@ -2117,6 +2134,9 @@ static int ravb_probe(struct platform_device *pdev) goto out_release; } + ndev->max_mtu = 2048 - (ETH_HLEN + VLAN_HLEN + ETH_FCS_LEN); + ndev->min_mtu = ETH_MIN_MTU; + /* Set function */ ndev->netdev_ops = &ravb_netdev_ops; ndev->ethtool_ops = &ravb_ethtool_ops; |