summaryrefslogtreecommitdiffstats
path: root/drivers/net/ethernet
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2020-10-15 14:43:29 -0700
committerLinus Torvalds <torvalds@linux-foundation.org>2020-10-15 14:43:29 -0700
commit5a32c3413d3340f90c82c84b375ad4b335a59f28 (patch)
tree4166f2ff0fca170bdd9bab6e56cac99baac37ace /drivers/net/ethernet
parentf065199d4df0b1512f935621d2de128ddb3fcc3a (diff)
parent2a410d09417b5344ab1f3cf001ac73a1daf8dcce (diff)
downloadlinux-5a32c3413d3340f90c82c84b375ad4b335a59f28.tar.bz2
Merge tag 'dma-mapping-5.10' of git://git.infradead.org/users/hch/dma-mapping
Pull dma-mapping updates from Christoph Hellwig: - rework the non-coherent DMA allocator - move private definitions out of <linux/dma-mapping.h> - lower CMA_ALIGNMENT (Paul Cercueil) - remove the omap1 dma address translation in favor of the common code - make dma-direct aware of multiple dma offset ranges (Jim Quinlan) - support per-node DMA CMA areas (Barry Song) - increase the default seg boundary limit (Nicolin Chen) - misc fixes (Robin Murphy, Thomas Tai, Xu Wang) - various cleanups * tag 'dma-mapping-5.10' of git://git.infradead.org/users/hch/dma-mapping: (63 commits) ARM/ixp4xx: add a missing include of dma-map-ops.h dma-direct: simplify the DMA_ATTR_NO_KERNEL_MAPPING handling dma-direct: factor out a dma_direct_alloc_from_pool helper dma-direct check for highmem pages in dma_direct_alloc_pages dma-mapping: merge <linux/dma-noncoherent.h> into <linux/dma-map-ops.h> dma-mapping: move large parts of <linux/dma-direct.h> to kernel/dma dma-mapping: move dma-debug.h to kernel/dma/ dma-mapping: remove <asm/dma-contiguous.h> dma-mapping: merge <linux/dma-contiguous.h> into <linux/dma-map-ops.h> dma-contiguous: remove dma_contiguous_set_default dma-contiguous: remove dev_set_cma_area dma-contiguous: remove dma_declare_contiguous dma-mapping: split <linux/dma-mapping.h> cma: decrease CMA_ALIGNMENT lower limit to 2 firewire-ohci: use dma_alloc_pages dma-iommu: implement ->alloc_noncoherent dma-mapping: add new {alloc,free}_noncoherent dma_map_ops methods dma-mapping: add a new dma_alloc_pages API dma-mapping: remove dma_cache_sync 53c700: convert to dma_alloc_noncoherent ...
Diffstat (limited to 'drivers/net/ethernet')
-rw-r--r--drivers/net/ethernet/amd/au1000_eth.c15
-rw-r--r--drivers/net/ethernet/i825xx/lasi_82596.c37
-rw-r--r--drivers/net/ethernet/i825xx/lib82596.c148
-rw-r--r--drivers/net/ethernet/i825xx/sni_82596.c23
-rw-r--r--drivers/net/ethernet/seeq/sgiseeq.c28
5 files changed, 137 insertions, 114 deletions
diff --git a/drivers/net/ethernet/amd/au1000_eth.c b/drivers/net/ethernet/amd/au1000_eth.c
index 75dbd221dc59..19e195420e24 100644
--- a/drivers/net/ethernet/amd/au1000_eth.c
+++ b/drivers/net/ethernet/amd/au1000_eth.c
@@ -1131,10 +1131,9 @@ static int au1000_probe(struct platform_device *pdev)
/* Allocate the data buffers
* Snooping works fine with eth on all au1xxx
*/
- aup->vaddr = (u32)dma_alloc_attrs(&pdev->dev, MAX_BUF_SIZE *
+ aup->vaddr = (u32)dma_alloc_coherent(&pdev->dev, MAX_BUF_SIZE *
(NUM_TX_BUFFS + NUM_RX_BUFFS),
- &aup->dma_addr, 0,
- DMA_ATTR_NON_CONSISTENT);
+ &aup->dma_addr, 0);
if (!aup->vaddr) {
dev_err(&pdev->dev, "failed to allocate data buffers\n");
err = -ENOMEM;
@@ -1310,9 +1309,8 @@ err_remap3:
err_remap2:
iounmap(aup->mac);
err_remap1:
- dma_free_attrs(&pdev->dev, MAX_BUF_SIZE * (NUM_TX_BUFFS + NUM_RX_BUFFS),
- (void *)aup->vaddr, aup->dma_addr,
- DMA_ATTR_NON_CONSISTENT);
+ dma_free_coherent(&pdev->dev, MAX_BUF_SIZE * (NUM_TX_BUFFS + NUM_RX_BUFFS),
+ (void *)aup->vaddr, aup->dma_addr);
err_vaddr:
free_netdev(dev);
err_alloc:
@@ -1344,9 +1342,8 @@ static int au1000_remove(struct platform_device *pdev)
if (aup->tx_db_inuse[i])
au1000_ReleaseDB(aup, aup->tx_db_inuse[i]);
- dma_free_attrs(&pdev->dev, MAX_BUF_SIZE * (NUM_TX_BUFFS + NUM_RX_BUFFS),
- (void *)aup->vaddr, aup->dma_addr,
- DMA_ATTR_NON_CONSISTENT);
+ dma_free_coherent(&pdev->dev, MAX_BUF_SIZE * (NUM_TX_BUFFS + NUM_RX_BUFFS),
+ (void *)aup->vaddr, aup->dma_addr);
iounmap(aup->macdma);
iounmap(aup->mac);
diff --git a/drivers/net/ethernet/i825xx/lasi_82596.c b/drivers/net/ethernet/i825xx/lasi_82596.c
index aec7e98bcc85..96c6f4f36904 100644
--- a/drivers/net/ethernet/i825xx/lasi_82596.c
+++ b/drivers/net/ethernet/i825xx/lasi_82596.c
@@ -96,23 +96,14 @@
#define OPT_SWAP_PORT 0x0001 /* Need to wordswp on the MPU port */
-#define LIB82596_DMA_ATTR DMA_ATTR_NON_CONSISTENT
-
-#define DMA_WBACK(ndev, addr, len) \
- do { dma_cache_sync((ndev)->dev.parent, (void *)addr, len, DMA_TO_DEVICE); } while (0)
-
-#define DMA_INV(ndev, addr, len) \
- do { dma_cache_sync((ndev)->dev.parent, (void *)addr, len, DMA_FROM_DEVICE); } while (0)
-
-#define DMA_WBACK_INV(ndev, addr, len) \
- do { dma_cache_sync((ndev)->dev.parent, (void *)addr, len, DMA_BIDIRECTIONAL); } while (0)
-
#define SYSBUS 0x0000006c
/* big endian CPU, 82596 "big" endian mode */
#define SWAP32(x) (((u32)(x)<<16) | ((((u32)(x)))>>16))
#define SWAP16(x) (x)
+#define NONCOHERENT_DMA 1
+
#include "lib82596.c"
MODULE_AUTHOR("Richard Hirst");
@@ -155,7 +146,7 @@ lan_init_chip(struct parisc_device *dev)
{
struct net_device *netdevice;
struct i596_private *lp;
- int retval;
+ int retval = -ENOMEM;
int i;
if (!dev->irq) {
@@ -186,12 +177,22 @@ lan_init_chip(struct parisc_device *dev)
lp = netdev_priv(netdevice);
lp->options = dev->id.sversion == 0x72 ? OPT_SWAP_PORT : 0;
+ lp->dma = dma_alloc_noncoherent(&dev->dev,
+ sizeof(struct i596_dma), &lp->dma_addr,
+ DMA_BIDIRECTIONAL, GFP_KERNEL);
+ if (!lp->dma)
+ goto out_free_netdev;
retval = i82596_probe(netdevice);
- if (retval) {
- free_netdev(netdevice);
- return -ENODEV;
- }
+ if (retval)
+ goto out_free_dma;
+ return 0;
+
+out_free_dma:
+ dma_free_noncoherent(&dev->dev, sizeof(struct i596_dma),
+ lp->dma, lp->dma_addr, DMA_BIDIRECTIONAL);
+out_free_netdev:
+ free_netdev(netdevice);
return retval;
}
@@ -201,8 +202,8 @@ static int __exit lan_remove_chip(struct parisc_device *pdev)
struct i596_private *lp = netdev_priv(dev);
unregister_netdev (dev);
- dma_free_attrs(&pdev->dev, sizeof(struct i596_private), lp->dma,
- lp->dma_addr, LIB82596_DMA_ATTR);
+ dma_free_noncoherent(&pdev->dev, sizeof(struct i596_private), lp->dma,
+ lp->dma_addr, DMA_BIDIRECTIONAL);
free_netdev (dev);
return 0;
}
diff --git a/drivers/net/ethernet/i825xx/lib82596.c b/drivers/net/ethernet/i825xx/lib82596.c
index b03757e169e4..ca2fb303fcc6 100644
--- a/drivers/net/ethernet/i825xx/lib82596.c
+++ b/drivers/net/ethernet/i825xx/lib82596.c
@@ -365,13 +365,44 @@ static int max_cmd_backlog = TX_RING_SIZE-1;
static void i596_poll_controller(struct net_device *dev);
#endif
+static inline dma_addr_t virt_to_dma(struct i596_private *lp, volatile void *v)
+{
+ return lp->dma_addr + ((unsigned long)v - (unsigned long)lp->dma);
+}
+
+#ifdef NONCOHERENT_DMA
+static inline void dma_sync_dev(struct net_device *ndev, volatile void *addr,
+ size_t len)
+{
+ dma_sync_single_for_device(ndev->dev.parent,
+ virt_to_dma(netdev_priv(ndev), addr), len,
+ DMA_BIDIRECTIONAL);
+}
+
+static inline void dma_sync_cpu(struct net_device *ndev, volatile void *addr,
+ size_t len)
+{
+ dma_sync_single_for_cpu(ndev->dev.parent,
+ virt_to_dma(netdev_priv(ndev), addr), len,
+ DMA_BIDIRECTIONAL);
+}
+#else
+static inline void dma_sync_dev(struct net_device *ndev, volatile void *addr,
+ size_t len)
+{
+}
+static inline void dma_sync_cpu(struct net_device *ndev, volatile void *addr,
+ size_t len)
+{
+}
+#endif /* NONCOHERENT_DMA */
static inline int wait_istat(struct net_device *dev, struct i596_dma *dma, int delcnt, char *str)
{
- DMA_INV(dev, &(dma->iscp), sizeof(struct i596_iscp));
+ dma_sync_cpu(dev, &(dma->iscp), sizeof(struct i596_iscp));
while (--delcnt && dma->iscp.stat) {
udelay(10);
- DMA_INV(dev, &(dma->iscp), sizeof(struct i596_iscp));
+ dma_sync_cpu(dev, &(dma->iscp), sizeof(struct i596_iscp));
}
if (!delcnt) {
printk(KERN_ERR "%s: %s, iscp.stat %04x, didn't clear\n",
@@ -384,10 +415,10 @@ static inline int wait_istat(struct net_device *dev, struct i596_dma *dma, int d
static inline int wait_cmd(struct net_device *dev, struct i596_dma *dma, int delcnt, char *str)
{
- DMA_INV(dev, &(dma->scb), sizeof(struct i596_scb));
+ dma_sync_cpu(dev, &(dma->scb), sizeof(struct i596_scb));
while (--delcnt && dma->scb.command) {
udelay(10);
- DMA_INV(dev, &(dma->scb), sizeof(struct i596_scb));
+ dma_sync_cpu(dev, &(dma->scb), sizeof(struct i596_scb));
}
if (!delcnt) {
printk(KERN_ERR "%s: %s, status %4.4x, cmd %4.4x.\n",
@@ -451,12 +482,9 @@ static void i596_display_data(struct net_device *dev)
SWAP32(rbd->b_data), SWAP16(rbd->size));
rbd = rbd->v_next;
} while (rbd != lp->rbd_head);
- DMA_INV(dev, dma, sizeof(struct i596_dma));
+ dma_sync_cpu(dev, dma, sizeof(struct i596_dma));
}
-
-#define virt_to_dma(lp, v) ((lp)->dma_addr + (dma_addr_t)((unsigned long)(v)-(unsigned long)((lp)->dma)))
-
static inline int init_rx_bufs(struct net_device *dev)
{
struct i596_private *lp = netdev_priv(dev);
@@ -508,7 +536,7 @@ static inline int init_rx_bufs(struct net_device *dev)
rfd->b_next = SWAP32(virt_to_dma(lp, dma->rfds));
rfd->cmd = SWAP16(CMD_EOL|CMD_FLEX);
- DMA_WBACK_INV(dev, dma, sizeof(struct i596_dma));
+ dma_sync_dev(dev, dma, sizeof(struct i596_dma));
return 0;
}
@@ -547,7 +575,7 @@ static void rebuild_rx_bufs(struct net_device *dev)
lp->rbd_head = dma->rbds;
dma->rfds[0].rbd = SWAP32(virt_to_dma(lp, dma->rbds));
- DMA_WBACK_INV(dev, dma, sizeof(struct i596_dma));
+ dma_sync_dev(dev, dma, sizeof(struct i596_dma));
}
@@ -575,9 +603,9 @@ static int init_i596_mem(struct net_device *dev)
DEB(DEB_INIT, printk(KERN_DEBUG "%s: starting i82596.\n", dev->name));
- DMA_WBACK(dev, &(dma->scp), sizeof(struct i596_scp));
- DMA_WBACK(dev, &(dma->iscp), sizeof(struct i596_iscp));
- DMA_WBACK(dev, &(dma->scb), sizeof(struct i596_scb));
+ dma_sync_dev(dev, &(dma->scp), sizeof(struct i596_scp));
+ dma_sync_dev(dev, &(dma->iscp), sizeof(struct i596_iscp));
+ dma_sync_dev(dev, &(dma->scb), sizeof(struct i596_scb));
mpu_port(dev, PORT_ALTSCP, virt_to_dma(lp, &dma->scp));
ca(dev);
@@ -596,24 +624,24 @@ static int init_i596_mem(struct net_device *dev)
rebuild_rx_bufs(dev);
dma->scb.command = 0;
- DMA_WBACK(dev, &(dma->scb), sizeof(struct i596_scb));
+ dma_sync_dev(dev, &(dma->scb), sizeof(struct i596_scb));
DEB(DEB_INIT, printk(KERN_DEBUG
"%s: queuing CmdConfigure\n", dev->name));
memcpy(dma->cf_cmd.i596_config, init_setup, 14);
dma->cf_cmd.cmd.command = SWAP16(CmdConfigure);
- DMA_WBACK(dev, &(dma->cf_cmd), sizeof(struct cf_cmd));
+ dma_sync_dev(dev, &(dma->cf_cmd), sizeof(struct cf_cmd));
i596_add_cmd(dev, &dma->cf_cmd.cmd);
DEB(DEB_INIT, printk(KERN_DEBUG "%s: queuing CmdSASetup\n", dev->name));
memcpy(dma->sa_cmd.eth_addr, dev->dev_addr, ETH_ALEN);
dma->sa_cmd.cmd.command = SWAP16(CmdSASetup);
- DMA_WBACK(dev, &(dma->sa_cmd), sizeof(struct sa_cmd));
+ dma_sync_dev(dev, &(dma->sa_cmd), sizeof(struct sa_cmd));
i596_add_cmd(dev, &dma->sa_cmd.cmd);
DEB(DEB_INIT, printk(KERN_DEBUG "%s: queuing CmdTDR\n", dev->name));
dma->tdr_cmd.cmd.command = SWAP16(CmdTDR);
- DMA_WBACK(dev, &(dma->tdr_cmd), sizeof(struct tdr_cmd));
+ dma_sync_dev(dev, &(dma->tdr_cmd), sizeof(struct tdr_cmd));
i596_add_cmd(dev, &dma->tdr_cmd.cmd);
spin_lock_irqsave (&lp->lock, flags);
@@ -625,7 +653,7 @@ static int init_i596_mem(struct net_device *dev)
DEB(DEB_INIT, printk(KERN_DEBUG "%s: Issuing RX_START\n", dev->name));
dma->scb.command = SWAP16(RX_START);
dma->scb.rfd = SWAP32(virt_to_dma(lp, dma->rfds));
- DMA_WBACK(dev, &(dma->scb), sizeof(struct i596_scb));
+ dma_sync_dev(dev, &(dma->scb), sizeof(struct i596_scb));
ca(dev);
@@ -659,13 +687,13 @@ static inline int i596_rx(struct net_device *dev)
rfd = lp->rfd_head; /* Ref next frame to check */
- DMA_INV(dev, rfd, sizeof(struct i596_rfd));
+ dma_sync_cpu(dev, rfd, sizeof(struct i596_rfd));
while (rfd->stat & SWAP16(STAT_C)) { /* Loop while complete frames */
if (rfd->rbd == I596_NULL)
rbd = NULL;
else if (rfd->rbd == lp->rbd_head->b_addr) {
rbd = lp->rbd_head;
- DMA_INV(dev, rbd, sizeof(struct i596_rbd));
+ dma_sync_cpu(dev, rbd, sizeof(struct i596_rbd));
} else {
printk(KERN_ERR "%s: rbd chain broken!\n", dev->name);
/* XXX Now what? */
@@ -713,7 +741,7 @@ static inline int i596_rx(struct net_device *dev)
DMA_FROM_DEVICE);
rbd->v_data = newskb->data;
rbd->b_data = SWAP32(dma_addr);
- DMA_WBACK_INV(dev, rbd, sizeof(struct i596_rbd));
+ dma_sync_dev(dev, rbd, sizeof(struct i596_rbd));
} else {
skb = netdev_alloc_skb_ip_align(dev, pkt_len);
}
@@ -765,7 +793,7 @@ memory_squeeze:
if (rbd != NULL && (rbd->count & SWAP16(0x4000))) {
rbd->count = 0;
lp->rbd_head = rbd->v_next;
- DMA_WBACK_INV(dev, rbd, sizeof(struct i596_rbd));
+ dma_sync_dev(dev, rbd, sizeof(struct i596_rbd));
}
/* Tidy the frame descriptor, marking it as end of list */
@@ -779,14 +807,14 @@ memory_squeeze:
lp->dma->scb.rfd = rfd->b_next;
lp->rfd_head = rfd->v_next;
- DMA_WBACK_INV(dev, rfd, sizeof(struct i596_rfd));
+ dma_sync_dev(dev, rfd, sizeof(struct i596_rfd));
/* Remove end-of-list from old end descriptor */
rfd->v_prev->cmd = SWAP16(CMD_FLEX);
- DMA_WBACK_INV(dev, rfd->v_prev, sizeof(struct i596_rfd));
+ dma_sync_dev(dev, rfd->v_prev, sizeof(struct i596_rfd));
rfd = lp->rfd_head;
- DMA_INV(dev, rfd, sizeof(struct i596_rfd));
+ dma_sync_cpu(dev, rfd, sizeof(struct i596_rfd));
}
DEB(DEB_RXFRAME, printk(KERN_DEBUG "frames %d\n", frames));
@@ -827,12 +855,12 @@ static inline void i596_cleanup_cmd(struct net_device *dev, struct i596_private
ptr->v_next = NULL;
ptr->b_next = I596_NULL;
}
- DMA_WBACK_INV(dev, ptr, sizeof(struct i596_cmd));
+ dma_sync_dev(dev, ptr, sizeof(struct i596_cmd));
}
wait_cmd(dev, lp->dma, 100, "i596_cleanup_cmd timed out");
lp->dma->scb.cmd = I596_NULL;
- DMA_WBACK(dev, &(lp->dma->scb), sizeof(struct i596_scb));
+ dma_sync_dev(dev, &(lp->dma->scb), sizeof(struct i596_scb));
}
@@ -850,7 +878,7 @@ static inline void i596_reset(struct net_device *dev, struct i596_private *lp)
/* FIXME: this command might cause an lpmc */
lp->dma->scb.command = SWAP16(CUC_ABORT | RX_ABORT);
- DMA_WBACK(dev, &(lp->dma->scb), sizeof(struct i596_scb));
+ dma_sync_dev(dev, &(lp->dma->scb), sizeof(struct i596_scb));
ca(dev);
/* wait for shutdown */
@@ -878,20 +906,20 @@ static void i596_add_cmd(struct net_device *dev, struct i596_cmd *cmd)
cmd->command |= SWAP16(CMD_EOL | CMD_INTR);
cmd->v_next = NULL;
cmd->b_next = I596_NULL;
- DMA_WBACK(dev, cmd, sizeof(struct i596_cmd));
+ dma_sync_dev(dev, cmd, sizeof(struct i596_cmd));
spin_lock_irqsave (&lp->lock, flags);
if (lp->cmd_head != NULL) {
lp->cmd_tail->v_next = cmd;
lp->cmd_tail->b_next = SWAP32(virt_to_dma(lp, &cmd->status));
- DMA_WBACK(dev, lp->cmd_tail, sizeof(struct i596_cmd));
+ dma_sync_dev(dev, lp->cmd_tail, sizeof(struct i596_cmd));
} else {
lp->cmd_head = cmd;
wait_cmd(dev, dma, 100, "i596_add_cmd timed out");
dma->scb.cmd = SWAP32(virt_to_dma(lp, &cmd->status));
dma->scb.command = SWAP16(CUC_START);
- DMA_WBACK(dev, &(dma->scb), sizeof(struct i596_scb));
+ dma_sync_dev(dev, &(dma->scb), sizeof(struct i596_scb));
ca(dev);
}
lp->cmd_tail = cmd;
@@ -956,7 +984,7 @@ static void i596_tx_timeout (struct net_device *dev, unsigned int txqueue)
/* Issue a channel attention signal */
DEB(DEB_ERRORS, printk(KERN_DEBUG "Kicking board.\n"));
lp->dma->scb.command = SWAP16(CUC_START | RX_START);
- DMA_WBACK_INV(dev, &(lp->dma->scb), sizeof(struct i596_scb));
+ dma_sync_dev(dev, &(lp->dma->scb), sizeof(struct i596_scb));
ca (dev);
lp->last_restart = dev->stats.tx_packets;
}
@@ -1014,8 +1042,8 @@ static netdev_tx_t i596_start_xmit(struct sk_buff *skb, struct net_device *dev)
tbd->data = SWAP32(tx_cmd->dma_addr);
DEB(DEB_TXADDR, print_eth(skb->data, "tx-queued"));
- DMA_WBACK_INV(dev, tx_cmd, sizeof(struct tx_cmd));
- DMA_WBACK_INV(dev, tbd, sizeof(struct i596_tbd));
+ dma_sync_dev(dev, tx_cmd, sizeof(struct tx_cmd));
+ dma_sync_dev(dev, tbd, sizeof(struct i596_tbd));
i596_add_cmd(dev, &tx_cmd->cmd);
dev->stats.tx_packets++;
@@ -1047,9 +1075,8 @@ static const struct net_device_ops i596_netdev_ops = {
static int i82596_probe(struct net_device *dev)
{
- int i;
struct i596_private *lp = netdev_priv(dev);
- struct i596_dma *dma;
+ int ret;
/* This lot is ensure things have been cache line aligned. */
BUILD_BUG_ON(sizeof(struct i596_rfd) != 32);
@@ -1063,41 +1090,28 @@ static int i82596_probe(struct net_device *dev)
if (!dev->base_addr || !dev->irq)
return -ENODEV;
- dma = dma_alloc_attrs(dev->dev.parent, sizeof(struct i596_dma),
- &lp->dma_addr, GFP_KERNEL,
- LIB82596_DMA_ATTR);
- if (!dma) {
- printk(KERN_ERR "%s: Couldn't get shared memory\n", __FILE__);
- return -ENOMEM;
- }
-
dev->netdev_ops = &i596_netdev_ops;
dev->watchdog_timeo = TX_TIMEOUT;
- memset(dma, 0, sizeof(struct i596_dma));
- lp->dma = dma;
-
- dma->scb.command = 0;
- dma->scb.cmd = I596_NULL;
- dma->scb.rfd = I596_NULL;
+ memset(lp->dma, 0, sizeof(struct i596_dma));
+ lp->dma->scb.command = 0;
+ lp->dma->scb.cmd = I596_NULL;
+ lp->dma->scb.rfd = I596_NULL;
spin_lock_init(&lp->lock);
- DMA_WBACK_INV(dev, dma, sizeof(struct i596_dma));
+ dma_sync_dev(dev, lp->dma, sizeof(struct i596_dma));
- i = register_netdev(dev);
- if (i) {
- dma_free_attrs(dev->dev.parent, sizeof(struct i596_dma),
- dma, lp->dma_addr, LIB82596_DMA_ATTR);
- return i;
- }
+ ret = register_netdev(dev);
+ if (ret)
+ return ret;
DEB(DEB_PROBE, printk(KERN_INFO "%s: 82596 at %#3lx, %pM IRQ %d.\n",
dev->name, dev->base_addr, dev->dev_addr,
dev->irq));
DEB(DEB_INIT, printk(KERN_INFO
"%s: dma at 0x%p (%d bytes), lp->scb at 0x%p\n",
- dev->name, dma, (int)sizeof(struct i596_dma),
- &dma->scb));
+ dev->name, lp->dma, (int)sizeof(struct i596_dma),
+ &lp->dma->scb));
return 0;
}
@@ -1155,7 +1169,7 @@ static irqreturn_t i596_interrupt(int irq, void *dev_id)
dev->name, status & 0x0700));
while (lp->cmd_head != NULL) {
- DMA_INV(dev, lp->cmd_head, sizeof(struct i596_cmd));
+ dma_sync_cpu(dev, lp->cmd_head, sizeof(struct i596_cmd));
if (!(lp->cmd_head->status & SWAP16(STAT_C)))
break;
@@ -1237,7 +1251,7 @@ static irqreturn_t i596_interrupt(int irq, void *dev_id)
}
ptr->v_next = NULL;
ptr->b_next = I596_NULL;
- DMA_WBACK(dev, ptr, sizeof(struct i596_cmd));
+ dma_sync_dev(dev, ptr, sizeof(struct i596_cmd));
lp->last_cmd = jiffies;
}
@@ -1251,13 +1265,13 @@ static irqreturn_t i596_interrupt(int irq, void *dev_id)
ptr->command &= SWAP16(0x1fff);
ptr = ptr->v_next;
- DMA_WBACK_INV(dev, prev, sizeof(struct i596_cmd));
+ dma_sync_dev(dev, prev, sizeof(struct i596_cmd));
}
if (lp->cmd_head != NULL)
ack_cmd |= CUC_START;
dma->scb.cmd = SWAP32(virt_to_dma(lp, &lp->cmd_head->status));
- DMA_WBACK_INV(dev, &dma->scb, sizeof(struct i596_scb));
+ dma_sync_dev(dev, &dma->scb, sizeof(struct i596_scb));
}
if ((status & 0x1000) || (status & 0x4000)) {
if ((status & 0x4000))
@@ -1282,7 +1296,7 @@ static irqreturn_t i596_interrupt(int irq, void *dev_id)
}
wait_cmd(dev, dma, 100, "i596 interrupt, timeout");
dma->scb.command = SWAP16(ack_cmd);
- DMA_WBACK(dev, &dma->scb, sizeof(struct i596_scb));
+ dma_sync_dev(dev, &dma->scb, sizeof(struct i596_scb));
/* DANGER: I suspect that some kind of interrupt
acknowledgement aside from acking the 82596 might be needed
@@ -1313,7 +1327,7 @@ static int i596_close(struct net_device *dev)
wait_cmd(dev, lp->dma, 100, "close1 timed out");
lp->dma->scb.command = SWAP16(CUC_ABORT | RX_ABORT);
- DMA_WBACK(dev, &lp->dma->scb, sizeof(struct i596_scb));
+ dma_sync_dev(dev, &lp->dma->scb, sizeof(struct i596_scb));
ca(dev);
@@ -1372,7 +1386,7 @@ static void set_multicast_list(struct net_device *dev)
dev->name);
else {
dma->cf_cmd.cmd.command = SWAP16(CmdConfigure);
- DMA_WBACK_INV(dev, &dma->cf_cmd, sizeof(struct cf_cmd));
+ dma_sync_dev(dev, &dma->cf_cmd, sizeof(struct cf_cmd));
i596_add_cmd(dev, &dma->cf_cmd.cmd);
}
}
@@ -1404,7 +1418,7 @@ static void set_multicast_list(struct net_device *dev)
dev->name, cp));
cp += ETH_ALEN;
}
- DMA_WBACK_INV(dev, &dma->mc_cmd, sizeof(struct mc_cmd));
+ dma_sync_dev(dev, &dma->mc_cmd, sizeof(struct mc_cmd));
i596_add_cmd(dev, &cmd->cmd);
}
}
diff --git a/drivers/net/ethernet/i825xx/sni_82596.c b/drivers/net/ethernet/i825xx/sni_82596.c
index 22f5887578b2..27937c5d7956 100644
--- a/drivers/net/ethernet/i825xx/sni_82596.c
+++ b/drivers/net/ethernet/i825xx/sni_82596.c
@@ -24,12 +24,6 @@
static const char sni_82596_string[] = "snirm_82596";
-#define LIB82596_DMA_ATTR 0
-
-#define DMA_WBACK(priv, addr, len) do { } while (0)
-#define DMA_INV(priv, addr, len) do { } while (0)
-#define DMA_WBACK_INV(priv, addr, len) do { } while (0)
-
#define SYSBUS 0x00004400
/* big endian CPU, 82596 little endian */
@@ -134,10 +128,19 @@ static int sni_82596_probe(struct platform_device *dev)
lp->ca = ca_addr;
lp->mpu_port = mpu_addr;
+ lp->dma = dma_alloc_coherent(&dev->dev, sizeof(struct i596_dma),
+ &lp->dma_addr, GFP_KERNEL);
+ if (!lp->dma)
+ goto probe_failed;
+
retval = i82596_probe(netdevice);
- if (retval == 0)
- return 0;
+ if (retval)
+ goto probe_failed_free_dma;
+ return 0;
+probe_failed_free_dma:
+ dma_free_coherent(&dev->dev, sizeof(struct i596_dma), lp->dma,
+ lp->dma_addr);
probe_failed:
free_netdev(netdevice);
probe_failed_free_ca:
@@ -153,8 +156,8 @@ static int sni_82596_driver_remove(struct platform_device *pdev)
struct i596_private *lp = netdev_priv(dev);
unregister_netdev(dev);
- dma_free_attrs(dev->dev.parent, sizeof(struct i596_private), lp->dma,
- lp->dma_addr, LIB82596_DMA_ATTR);
+ dma_free_coherent(&pdev->dev, sizeof(struct i596_private), lp->dma,
+ lp->dma_addr);
iounmap(lp->ca);
iounmap(lp->mpu_port);
free_netdev (dev);
diff --git a/drivers/net/ethernet/seeq/sgiseeq.c b/drivers/net/ethernet/seeq/sgiseeq.c
index 8507ff242014..37ff25a84030 100644
--- a/drivers/net/ethernet/seeq/sgiseeq.c
+++ b/drivers/net/ethernet/seeq/sgiseeq.c
@@ -112,14 +112,18 @@ struct sgiseeq_private {
static inline void dma_sync_desc_cpu(struct net_device *dev, void *addr)
{
- dma_cache_sync(dev->dev.parent, addr, sizeof(struct sgiseeq_rx_desc),
- DMA_FROM_DEVICE);
+ struct sgiseeq_private *sp = netdev_priv(dev);
+
+ dma_sync_single_for_cpu(dev->dev.parent, VIRT_TO_DMA(sp, addr),
+ sizeof(struct sgiseeq_rx_desc), DMA_BIDIRECTIONAL);
}
static inline void dma_sync_desc_dev(struct net_device *dev, void *addr)
{
- dma_cache_sync(dev->dev.parent, addr, sizeof(struct sgiseeq_rx_desc),
- DMA_TO_DEVICE);
+ struct sgiseeq_private *sp = netdev_priv(dev);
+
+ dma_sync_single_for_device(dev->dev.parent, VIRT_TO_DMA(sp, addr),
+ sizeof(struct sgiseeq_rx_desc), DMA_BIDIRECTIONAL);
}
static inline void hpc3_eth_reset(struct hpc3_ethregs *hregs)
@@ -403,6 +407,8 @@ memory_squeeze:
rd = &sp->rx_desc[sp->rx_new];
dma_sync_desc_cpu(dev, rd);
}
+ dma_sync_desc_dev(dev, rd);
+
dma_sync_desc_cpu(dev, &sp->rx_desc[orig_end]);
sp->rx_desc[orig_end].rdma.cntinfo &= ~(HPCDMA_EOR);
dma_sync_desc_dev(dev, &sp->rx_desc[orig_end]);
@@ -443,6 +449,7 @@ static inline void kick_tx(struct net_device *dev,
dma_sync_desc_cpu(dev, td);
}
if (td->tdma.cntinfo & HPCDMA_XIU) {
+ dma_sync_desc_dev(dev, td);
hregs->tx_ndptr = VIRT_TO_DMA(sp, td);
hregs->tx_ctrl = HPC3_ETXCTRL_ACTIVE;
}
@@ -476,6 +483,7 @@ static inline void sgiseeq_tx(struct net_device *dev, struct sgiseeq_private *sp
if (!(td->tdma.cntinfo & (HPCDMA_XIU)))
break;
if (!(td->tdma.cntinfo & (HPCDMA_ETXD))) {
+ dma_sync_desc_dev(dev, td);
if (!(status & HPC3_ETXCTRL_ACTIVE)) {
hregs->tx_ndptr = VIRT_TO_DMA(sp, td);
hregs->tx_ctrl = HPC3_ETXCTRL_ACTIVE;
@@ -740,8 +748,8 @@ static int sgiseeq_probe(struct platform_device *pdev)
sp = netdev_priv(dev);
/* Make private data page aligned */
- sr = dma_alloc_attrs(&pdev->dev, sizeof(*sp->srings), &sp->srings_dma,
- GFP_KERNEL, DMA_ATTR_NON_CONSISTENT);
+ sr = dma_alloc_noncoherent(&pdev->dev, sizeof(*sp->srings),
+ &sp->srings_dma, DMA_BIDIRECTIONAL, GFP_KERNEL);
if (!sr) {
printk(KERN_ERR "Sgiseeq: Page alloc failed, aborting.\n");
err = -ENOMEM;
@@ -802,8 +810,8 @@ static int sgiseeq_probe(struct platform_device *pdev)
return 0;
err_out_free_attrs:
- dma_free_attrs(&pdev->dev, sizeof(*sp->srings), sp->srings,
- sp->srings_dma, DMA_ATTR_NON_CONSISTENT);
+ dma_free_noncoherent(&pdev->dev, sizeof(*sp->srings), sp->srings,
+ sp->srings_dma, DMA_BIDIRECTIONAL);
err_out_free_dev:
free_netdev(dev);
@@ -817,8 +825,8 @@ static int sgiseeq_remove(struct platform_device *pdev)
struct sgiseeq_private *sp = netdev_priv(dev);
unregister_netdev(dev);
- dma_free_attrs(&pdev->dev, sizeof(*sp->srings), sp->srings,
- sp->srings_dma, DMA_ATTR_NON_CONSISTENT);
+ dma_free_noncoherent(&pdev->dev, sizeof(*sp->srings), sp->srings,
+ sp->srings_dma, DMA_BIDIRECTIONAL);
free_netdev(dev);
return 0;