summaryrefslogtreecommitdiffstats
path: root/drivers/net/ethernet/renesas/sh_eth.c
diff options
context:
space:
mode:
authorDavid S. Miller <davem@davemloft.net>2017-12-09 22:09:55 -0500
committerDavid S. Miller <davem@davemloft.net>2017-12-09 22:09:55 -0500
commit51e18a453f5f59a40c721d4aeab082b4e2e9fac6 (patch)
tree540d37e6f7a83466677ab6095fcdd2d6f72583c4 /drivers/net/ethernet/renesas/sh_eth.c
parent5e54b3c1202765ae62de24a160f1eaf6b0ebf9d4 (diff)
parentf335195adf043168ee69d78ea72ac3e30f0c57ce (diff)
downloadlinux-51e18a453f5f59a40c721d4aeab082b4e2e9fac6.tar.bz2
Merge git://git.kernel.org/pub/scm/linux/kernel/git/davem/net
Conflict was two parallel additions of include files to sch_generic.c, no biggie. Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'drivers/net/ethernet/renesas/sh_eth.c')
-rw-r--r--drivers/net/ethernet/renesas/sh_eth.c31
1 files changed, 16 insertions, 15 deletions
diff --git a/drivers/net/ethernet/renesas/sh_eth.c b/drivers/net/ethernet/renesas/sh_eth.c
index 7e060aa9fbed..db72d13cebb9 100644
--- a/drivers/net/ethernet/renesas/sh_eth.c
+++ b/drivers/net/ethernet/renesas/sh_eth.c
@@ -1149,7 +1149,8 @@ static int sh_eth_tx_free(struct net_device *ndev, bool sent_only)
entry, le32_to_cpu(txdesc->status));
/* Free the original skb. */
if (mdp->tx_skbuff[entry]) {
- dma_unmap_single(&ndev->dev, le32_to_cpu(txdesc->addr),
+ dma_unmap_single(&mdp->pdev->dev,
+ le32_to_cpu(txdesc->addr),
le32_to_cpu(txdesc->len) >> 16,
DMA_TO_DEVICE);
dev_kfree_skb_irq(mdp->tx_skbuff[entry]);
@@ -1179,14 +1180,14 @@ static void sh_eth_ring_free(struct net_device *ndev)
if (mdp->rx_skbuff[i]) {
struct sh_eth_rxdesc *rxdesc = &mdp->rx_ring[i];
- dma_unmap_single(&ndev->dev,
+ dma_unmap_single(&mdp->pdev->dev,
le32_to_cpu(rxdesc->addr),
ALIGN(mdp->rx_buf_sz, 32),
DMA_FROM_DEVICE);
}
}
ringsize = sizeof(struct sh_eth_rxdesc) * mdp->num_rx_ring;
- dma_free_coherent(NULL, ringsize, mdp->rx_ring,
+ dma_free_coherent(&mdp->pdev->dev, ringsize, mdp->rx_ring,
mdp->rx_desc_dma);
mdp->rx_ring = NULL;
}
@@ -1203,7 +1204,7 @@ static void sh_eth_ring_free(struct net_device *ndev)
sh_eth_tx_free(ndev, false);
ringsize = sizeof(struct sh_eth_txdesc) * mdp->num_tx_ring;
- dma_free_coherent(NULL, ringsize, mdp->tx_ring,
+ dma_free_coherent(&mdp->pdev->dev, ringsize, mdp->tx_ring,
mdp->tx_desc_dma);
mdp->tx_ring = NULL;
}
@@ -1245,9 +1246,9 @@ static void sh_eth_ring_format(struct net_device *ndev)
/* The size of the buffer is a multiple of 32 bytes. */
buf_len = ALIGN(mdp->rx_buf_sz, 32);
- dma_addr = dma_map_single(&ndev->dev, skb->data, buf_len,
+ dma_addr = dma_map_single(&mdp->pdev->dev, skb->data, buf_len,
DMA_FROM_DEVICE);
- if (dma_mapping_error(&ndev->dev, dma_addr)) {
+ if (dma_mapping_error(&mdp->pdev->dev, dma_addr)) {
kfree_skb(skb);
break;
}
@@ -1323,8 +1324,8 @@ static int sh_eth_ring_init(struct net_device *ndev)
/* Allocate all Rx descriptors. */
rx_ringsize = sizeof(struct sh_eth_rxdesc) * mdp->num_rx_ring;
- mdp->rx_ring = dma_alloc_coherent(NULL, rx_ringsize, &mdp->rx_desc_dma,
- GFP_KERNEL);
+ mdp->rx_ring = dma_alloc_coherent(&mdp->pdev->dev, rx_ringsize,
+ &mdp->rx_desc_dma, GFP_KERNEL);
if (!mdp->rx_ring)
goto ring_free;
@@ -1332,8 +1333,8 @@ static int sh_eth_ring_init(struct net_device *ndev)
/* Allocate all Tx descriptors. */
tx_ringsize = sizeof(struct sh_eth_txdesc) * mdp->num_tx_ring;
- mdp->tx_ring = dma_alloc_coherent(NULL, tx_ringsize, &mdp->tx_desc_dma,
- GFP_KERNEL);
+ mdp->tx_ring = dma_alloc_coherent(&mdp->pdev->dev, tx_ringsize,
+ &mdp->tx_desc_dma, GFP_KERNEL);
if (!mdp->tx_ring)
goto ring_free;
return 0;
@@ -1527,7 +1528,7 @@ static int sh_eth_rx(struct net_device *ndev, u32 intr_status, int *quota)
mdp->rx_skbuff[entry] = NULL;
if (mdp->cd->rpadir)
skb_reserve(skb, NET_IP_ALIGN);
- dma_unmap_single(&ndev->dev, dma_addr,
+ dma_unmap_single(&mdp->pdev->dev, dma_addr,
ALIGN(mdp->rx_buf_sz, 32),
DMA_FROM_DEVICE);
skb_put(skb, pkt_len);
@@ -1555,9 +1556,9 @@ static int sh_eth_rx(struct net_device *ndev, u32 intr_status, int *quota)
if (skb == NULL)
break; /* Better luck next round. */
sh_eth_set_receive_align(skb);
- dma_addr = dma_map_single(&ndev->dev, skb->data,
+ dma_addr = dma_map_single(&mdp->pdev->dev, skb->data,
buf_len, DMA_FROM_DEVICE);
- if (dma_mapping_error(&ndev->dev, dma_addr)) {
+ if (dma_mapping_error(&mdp->pdev->dev, dma_addr)) {
kfree_skb(skb);
break;
}
@@ -2441,9 +2442,9 @@ static int sh_eth_start_xmit(struct sk_buff *skb, struct net_device *ndev)
/* soft swap. */
if (!mdp->cd->hw_swap)
sh_eth_soft_swap(PTR_ALIGN(skb->data, 4), skb->len + 2);
- dma_addr = dma_map_single(&ndev->dev, skb->data, skb->len,
+ dma_addr = dma_map_single(&mdp->pdev->dev, skb->data, skb->len,
DMA_TO_DEVICE);
- if (dma_mapping_error(&ndev->dev, dma_addr)) {
+ if (dma_mapping_error(&mdp->pdev->dev, dma_addr)) {
kfree_skb(skb);
return NETDEV_TX_OK;
}