diff options
Diffstat (limited to 'drivers/dma')
-rw-r--r-- | drivers/dma/dmaengine.c | 17 | ||||
-rw-r--r-- | drivers/dma/dmatest.c | 4 | ||||
-rw-r--r-- | drivers/dma/fsldma.c | 71 | ||||
-rw-r--r-- | drivers/dma/ioat_dma.c | 47 | ||||
-rw-r--r-- | drivers/dma/ipu/ipu_idmac.c | 7 |
5 files changed, 92 insertions, 54 deletions
diff --git a/drivers/dma/dmaengine.c b/drivers/dma/dmaengine.c index 92438e9dacc3..5a87384ea4ff 100644 --- a/drivers/dma/dmaengine.c +++ b/drivers/dma/dmaengine.c @@ -804,11 +804,14 @@ dma_async_memcpy_buf_to_buf(struct dma_chan *chan, void *dest, dma_addr_t dma_dest, dma_src; dma_cookie_t cookie; int cpu; + unsigned long flags; dma_src = dma_map_single(dev->dev, src, len, DMA_TO_DEVICE); dma_dest = dma_map_single(dev->dev, dest, len, DMA_FROM_DEVICE); - tx = dev->device_prep_dma_memcpy(chan, dma_dest, dma_src, len, - DMA_CTRL_ACK); + flags = DMA_CTRL_ACK | + DMA_COMPL_SRC_UNMAP_SINGLE | + DMA_COMPL_DEST_UNMAP_SINGLE; + tx = dev->device_prep_dma_memcpy(chan, dma_dest, dma_src, len, flags); if (!tx) { dma_unmap_single(dev->dev, dma_src, len, DMA_TO_DEVICE); @@ -850,11 +853,12 @@ dma_async_memcpy_buf_to_pg(struct dma_chan *chan, struct page *page, dma_addr_t dma_dest, dma_src; dma_cookie_t cookie; int cpu; + unsigned long flags; dma_src = dma_map_single(dev->dev, kdata, len, DMA_TO_DEVICE); dma_dest = dma_map_page(dev->dev, page, offset, len, DMA_FROM_DEVICE); - tx = dev->device_prep_dma_memcpy(chan, dma_dest, dma_src, len, - DMA_CTRL_ACK); + flags = DMA_CTRL_ACK | DMA_COMPL_SRC_UNMAP_SINGLE; + tx = dev->device_prep_dma_memcpy(chan, dma_dest, dma_src, len, flags); if (!tx) { dma_unmap_single(dev->dev, dma_src, len, DMA_TO_DEVICE); @@ -898,12 +902,13 @@ dma_async_memcpy_pg_to_pg(struct dma_chan *chan, struct page *dest_pg, dma_addr_t dma_dest, dma_src; dma_cookie_t cookie; int cpu; + unsigned long flags; dma_src = dma_map_page(dev->dev, src_pg, src_off, len, DMA_TO_DEVICE); dma_dest = dma_map_page(dev->dev, dest_pg, dest_off, len, DMA_FROM_DEVICE); - tx = dev->device_prep_dma_memcpy(chan, dma_dest, dma_src, len, - DMA_CTRL_ACK); + flags = DMA_CTRL_ACK; + tx = dev->device_prep_dma_memcpy(chan, dma_dest, dma_src, len, flags); if (!tx) { dma_unmap_page(dev->dev, dma_src, len, DMA_TO_DEVICE); diff --git a/drivers/dma/dmatest.c b/drivers/dma/dmatest.c index a27c0fb1bc11..fb7da5141e96 100644 --- a/drivers/dma/dmatest.c +++ b/drivers/dma/dmatest.c @@ -531,9 +531,7 @@ static int __init dmatest_init(void) chan = dma_request_channel(mask, filter, NULL); if (chan) { err = dmatest_add_channel(chan); - if (err == 0) - continue; - else { + if (err) { dma_release_channel(chan); break; /* add_channel failed, punt */ } diff --git a/drivers/dma/fsldma.c b/drivers/dma/fsldma.c index da8a8ed9e411..f18d1bde0439 100644 --- a/drivers/dma/fsldma.c +++ b/drivers/dma/fsldma.c @@ -179,9 +179,14 @@ static void dma_halt(struct fsl_dma_chan *fsl_chan) static void set_ld_eol(struct fsl_dma_chan *fsl_chan, struct fsl_desc_sw *desc) { + u64 snoop_bits; + + snoop_bits = ((fsl_chan->feature & FSL_DMA_IP_MASK) == FSL_DMA_IP_83XX) + ? FSL_DMA_SNEN : 0; + desc->hw.next_ln_addr = CPU_TO_DMA(fsl_chan, - DMA_TO_CPU(fsl_chan, desc->hw.next_ln_addr, 64) | FSL_DMA_EOL, - 64); + DMA_TO_CPU(fsl_chan, desc->hw.next_ln_addr, 64) | FSL_DMA_EOL + | snoop_bits, 64); } static void append_ld_queue(struct fsl_dma_chan *fsl_chan, @@ -313,8 +318,8 @@ static void fsl_chan_toggle_ext_start(struct fsl_dma_chan *fsl_chan, int enable) static dma_cookie_t fsl_dma_tx_submit(struct dma_async_tx_descriptor *tx) { - struct fsl_desc_sw *desc = tx_to_fsl_desc(tx); struct fsl_dma_chan *fsl_chan = to_fsl_chan(tx->chan); + struct fsl_desc_sw *desc; unsigned long flags; dma_cookie_t cookie; @@ -322,14 +327,17 @@ static dma_cookie_t fsl_dma_tx_submit(struct dma_async_tx_descriptor *tx) spin_lock_irqsave(&fsl_chan->desc_lock, flags); cookie = fsl_chan->common.cookie; - cookie++; - if (cookie < 0) - cookie = 1; - desc->async_tx.cookie = cookie; - fsl_chan->common.cookie = desc->async_tx.cookie; + list_for_each_entry(desc, &tx->tx_list, node) { + cookie++; + if (cookie < 0) + cookie = 1; - append_ld_queue(fsl_chan, desc); - list_splice_init(&desc->async_tx.tx_list, fsl_chan->ld_queue.prev); + desc->async_tx.cookie = cookie; + } + + fsl_chan->common.cookie = cookie; + append_ld_queue(fsl_chan, tx_to_fsl_desc(tx)); + list_splice_init(&tx->tx_list, fsl_chan->ld_queue.prev); spin_unlock_irqrestore(&fsl_chan->desc_lock, flags); @@ -454,8 +462,8 @@ static struct dma_async_tx_descriptor *fsl_dma_prep_memcpy( { struct fsl_dma_chan *fsl_chan; struct fsl_desc_sw *first = NULL, *prev = NULL, *new; + struct list_head *list; size_t copy; - LIST_HEAD(link_chain); if (!chan) return NULL; @@ -472,7 +480,7 @@ static struct dma_async_tx_descriptor *fsl_dma_prep_memcpy( if (!new) { dev_err(fsl_chan->dev, "No free memory for link descriptor\n"); - return NULL; + goto fail; } #ifdef FSL_DMA_LD_DEBUG dev_dbg(fsl_chan->dev, "new link desc alloc %p\n", new); @@ -507,7 +515,19 @@ static struct dma_async_tx_descriptor *fsl_dma_prep_memcpy( /* Set End-of-link to the last link descriptor of new list*/ set_ld_eol(fsl_chan, new); - return first ? &first->async_tx : NULL; + return &first->async_tx; + +fail: + if (!first) + return NULL; + + list = &first->async_tx.tx_list; + list_for_each_entry_safe_reverse(new, prev, list, node) { + list_del(&new->node); + dma_pool_free(fsl_chan->desc_pool, new, new->async_tx.phys); + } + + return NULL; } /** @@ -598,15 +618,16 @@ static void fsl_chan_xfer_ld_queue(struct fsl_dma_chan *fsl_chan) dma_addr_t next_dest_addr; unsigned long flags; + spin_lock_irqsave(&fsl_chan->desc_lock, flags); + if (!dma_is_idle(fsl_chan)) - return; + goto out_unlock; dma_halt(fsl_chan); /* If there are some link descriptors * not transfered in queue. We need to start it. */ - spin_lock_irqsave(&fsl_chan->desc_lock, flags); /* Find the first un-transfer desciptor */ for (ld_node = fsl_chan->ld_queue.next; @@ -617,19 +638,20 @@ static void fsl_chan_xfer_ld_queue(struct fsl_dma_chan *fsl_chan) fsl_chan->common.cookie) == DMA_SUCCESS); ld_node = ld_node->next); - spin_unlock_irqrestore(&fsl_chan->desc_lock, flags); - if (ld_node != &fsl_chan->ld_queue) { /* Get the ld start address from ld_queue */ next_dest_addr = to_fsl_desc(ld_node)->async_tx.phys; - dev_dbg(fsl_chan->dev, "xfer LDs staring from %p\n", - (void *)next_dest_addr); + dev_dbg(fsl_chan->dev, "xfer LDs staring from 0x%llx\n", + (unsigned long long)next_dest_addr); set_cdar(fsl_chan, next_dest_addr); dma_start(fsl_chan); } else { set_cdar(fsl_chan, 0); set_ndar(fsl_chan, 0); } + +out_unlock: + spin_unlock_irqrestore(&fsl_chan->desc_lock, flags); } /** @@ -734,8 +756,9 @@ static irqreturn_t fsl_dma_chan_do_interrupt(int irq, void *data) */ if (stat & FSL_DMA_SR_EOSI) { dev_dbg(fsl_chan->dev, "event: End-of-segments INT\n"); - dev_dbg(fsl_chan->dev, "event: clndar %p, nlndar %p\n", - (void *)get_cdar(fsl_chan), (void *)get_ndar(fsl_chan)); + dev_dbg(fsl_chan->dev, "event: clndar 0x%llx, nlndar 0x%llx\n", + (unsigned long long)get_cdar(fsl_chan), + (unsigned long long)get_ndar(fsl_chan)); stat &= ~FSL_DMA_SR_EOSI; update_cookie = 1; } @@ -830,7 +853,7 @@ static int __devinit fsl_dma_chan_probe(struct fsl_dma_device *fdev, new_fsl_chan->reg.end - new_fsl_chan->reg.start + 1); new_fsl_chan->id = ((new_fsl_chan->reg.start - 0x100) & 0xfff) >> 7; - if (new_fsl_chan->id > FSL_DMA_MAX_CHANS_PER_DEVICE) { + if (new_fsl_chan->id >= FSL_DMA_MAX_CHANS_PER_DEVICE) { dev_err(fdev->dev, "There is no %d channel!\n", new_fsl_chan->id); err = -EINVAL; @@ -925,8 +948,8 @@ static int __devinit of_fsl_dma_probe(struct of_device *dev, } dev_info(&dev->dev, "Probe the Freescale DMA driver for %s " - "controller at %p...\n", - match->compatible, (void *)fdev->reg.start); + "controller at 0x%llx...\n", + match->compatible, (unsigned long long)fdev->reg.start); fdev->reg_base = ioremap(fdev->reg.start, fdev->reg.end - fdev->reg.start + 1); diff --git a/drivers/dma/ioat_dma.c b/drivers/dma/ioat_dma.c index e4fc33c1c32f..a600fc0f7962 100644 --- a/drivers/dma/ioat_dma.c +++ b/drivers/dma/ioat_dma.c @@ -173,7 +173,7 @@ static int ioat_dma_enumerate_channels(struct ioatdma_device *device) xfercap = (xfercap_scale == 0 ? -1 : (1UL << xfercap_scale)); #ifdef CONFIG_I7300_IDLE_IOAT_CHANNEL - if (i7300_idle_platform_probe(NULL, NULL) == 0) { + if (i7300_idle_platform_probe(NULL, NULL, 1) == 0) { device->common.chancnt--; } #endif @@ -1063,22 +1063,31 @@ static void ioat_dma_cleanup_tasklet(unsigned long data) static void ioat_dma_unmap(struct ioat_dma_chan *ioat_chan, struct ioat_desc_sw *desc) { - /* - * yes we are unmapping both _page and _single - * alloc'd regions with unmap_page. Is this - * *really* that bad? - */ - if (!(desc->async_tx.flags & DMA_COMPL_SKIP_DEST_UNMAP)) - pci_unmap_page(ioat_chan->device->pdev, - pci_unmap_addr(desc, dst), - pci_unmap_len(desc, len), - PCI_DMA_FROMDEVICE); - - if (!(desc->async_tx.flags & DMA_COMPL_SKIP_SRC_UNMAP)) - pci_unmap_page(ioat_chan->device->pdev, - pci_unmap_addr(desc, src), - pci_unmap_len(desc, len), - PCI_DMA_TODEVICE); + if (!(desc->async_tx.flags & DMA_COMPL_SKIP_DEST_UNMAP)) { + if (desc->async_tx.flags & DMA_COMPL_DEST_UNMAP_SINGLE) + pci_unmap_single(ioat_chan->device->pdev, + pci_unmap_addr(desc, dst), + pci_unmap_len(desc, len), + PCI_DMA_FROMDEVICE); + else + pci_unmap_page(ioat_chan->device->pdev, + pci_unmap_addr(desc, dst), + pci_unmap_len(desc, len), + PCI_DMA_FROMDEVICE); + } + + if (!(desc->async_tx.flags & DMA_COMPL_SKIP_SRC_UNMAP)) { + if (desc->async_tx.flags & DMA_COMPL_SRC_UNMAP_SINGLE) + pci_unmap_single(ioat_chan->device->pdev, + pci_unmap_addr(desc, src), + pci_unmap_len(desc, len), + PCI_DMA_TODEVICE); + else + pci_unmap_page(ioat_chan->device->pdev, + pci_unmap_addr(desc, src), + pci_unmap_len(desc, len), + PCI_DMA_TODEVICE); + } } /** @@ -1363,6 +1372,7 @@ static int ioat_dma_self_test(struct ioatdma_device *device) int err = 0; struct completion cmp; unsigned long tmo; + unsigned long flags; src = kzalloc(sizeof(u8) * IOAT_TEST_SIZE, GFP_KERNEL); if (!src) @@ -1392,8 +1402,9 @@ static int ioat_dma_self_test(struct ioatdma_device *device) DMA_TO_DEVICE); dma_dest = dma_map_single(dma_chan->device->dev, dest, IOAT_TEST_SIZE, DMA_FROM_DEVICE); + flags = DMA_COMPL_SRC_UNMAP_SINGLE | DMA_COMPL_DEST_UNMAP_SINGLE; tx = device->common.device_prep_dma_memcpy(dma_chan, dma_dest, dma_src, - IOAT_TEST_SIZE, 0); + IOAT_TEST_SIZE, flags); if (!tx) { dev_err(&device->pdev->dev, "Self-test prep failed, disabling\n"); diff --git a/drivers/dma/ipu/ipu_idmac.c b/drivers/dma/ipu/ipu_idmac.c index e202a6ce5573..9a5bc1a7389e 100644 --- a/drivers/dma/ipu/ipu_idmac.c +++ b/drivers/dma/ipu/ipu_idmac.c @@ -1272,7 +1272,8 @@ static irqreturn_t idmac_interrupt(int irq, void *dev_id) /* Other interrupts do not interfere with this channel */ spin_lock(&ichan->lock); if (unlikely(chan_id != IDMAC_SDC_0 && chan_id != IDMAC_SDC_1 && - ((curbuf >> chan_id) & 1) == ichan->active_buffer)) { + ((curbuf >> chan_id) & 1) == ichan->active_buffer && + !list_is_last(ichan->queue.next, &ichan->queue))) { int i = 100; /* This doesn't help. See comment in ipu_disable_channel() */ @@ -1547,7 +1548,7 @@ static irqreturn_t ic_sof_irq(int irq, void *dev_id) struct idmac_channel *ichan = dev_id; printk(KERN_DEBUG "Got SOF IRQ %d on Channel %d\n", irq, ichan->dma_chan.chan_id); - disable_irq(irq); + disable_irq_nosync(irq); return IRQ_HANDLED; } @@ -1556,7 +1557,7 @@ static irqreturn_t ic_eof_irq(int irq, void *dev_id) struct idmac_channel *ichan = dev_id; printk(KERN_DEBUG "Got EOF IRQ %d on Channel %d\n", irq, ichan->dma_chan.chan_id); - disable_irq(irq); + disable_irq_nosync(irq); return IRQ_HANDLED; } |