diff options
author | Linus Torvalds <torvalds@linux-foundation.org> | 2013-02-26 09:24:48 -0800 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2013-02-26 09:24:48 -0800 |
commit | 5115f3c19d17851aaff5a857f55b4a019c908775 (patch) | |
tree | 0d02cf01e12e86365f4f5e3b234f986daef181a7 /drivers/dma/ioat | |
parent | c41b3810c09e60664433548c5218cc6ece6a8903 (diff) | |
parent | 17166a3b6e88b93189e6be5f7e1335a3cc4fa965 (diff) | |
download | linux-5115f3c19d17851aaff5a857f55b4a019c908775.tar.bz2 |
Merge branch 'next' of git://git.infradead.org/users/vkoul/slave-dma
Pull slave-dmaengine updates from Vinod Koul:
"This is fairly big pull by my standards as I had missed last merge
window. So we have the support for device tree for slave-dmaengine,
large updates to dw_dmac driver from Andy for reusing on different
architectures. Along with this we have fixes on bunch of the drivers"
Fix up trivial conflicts, usually due to #include line movement next to
each other.
* 'next' of git://git.infradead.org/users/vkoul/slave-dma: (111 commits)
Revert "ARM: SPEAr13xx: Pass DW DMAC platform data from DT"
ARM: dts: pl330: Add #dma-cells for generic dma binding support
DMA: PL330: Register the DMA controller with the generic DMA helpers
DMA: PL330: Add xlate function
DMA: PL330: Add new pl330 filter for DT case.
dma: tegra20-apb-dma: remove unnecessary assignment
edma: do not waste memory for dma_mask
dma: coh901318: set residue only if dma is in progress
dma: coh901318: avoid unbalanced locking
dmaengine.h: remove redundant else keyword
dma: of-dma: protect list write operation by spin_lock
dmaengine: ste_dma40: do not remove descriptors for cyclic transfers
dma: of-dma.c: fix memory leakage
dw_dmac: apply default dma_mask if needed
dmaengine: ioat - fix spare sparse complain
dmaengine: move drivers/of/dma.c -> drivers/dma/of-dma.c
ioatdma: fix race between updating ioat->head and IOAT_COMPLETION_PENDING
dw_dmac: add support for Lynxpoint DMA controllers
dw_dmac: return proper residue value
dw_dmac: fill individual length of descriptor
...
Diffstat (limited to 'drivers/dma/ioat')
-rw-r--r-- | drivers/dma/ioat/dma.c | 11 | ||||
-rw-r--r-- | drivers/dma/ioat/dma.h | 1 | ||||
-rw-r--r-- | drivers/dma/ioat/dma_v2.c | 113 | ||||
-rw-r--r-- | drivers/dma/ioat/dma_v3.c | 216 | ||||
-rw-r--r-- | drivers/dma/ioat/hw.h | 11 | ||||
-rw-r--r-- | drivers/dma/ioat/pci.c | 11 |
6 files changed, 227 insertions, 136 deletions
diff --git a/drivers/dma/ioat/dma.c b/drivers/dma/ioat/dma.c index 1a68a8ba87e6..1879a5942bfc 100644 --- a/drivers/dma/ioat/dma.c +++ b/drivers/dma/ioat/dma.c @@ -833,14 +833,14 @@ int ioat_dma_self_test(struct ioatdma_device *device) dma_src = dma_map_single(dev, src, IOAT_TEST_SIZE, DMA_TO_DEVICE); dma_dest = dma_map_single(dev, dest, IOAT_TEST_SIZE, DMA_FROM_DEVICE); - flags = DMA_COMPL_SRC_UNMAP_SINGLE | DMA_COMPL_DEST_UNMAP_SINGLE | + flags = DMA_COMPL_SKIP_SRC_UNMAP | DMA_COMPL_SKIP_DEST_UNMAP | DMA_PREP_INTERRUPT; tx = device->common.device_prep_dma_memcpy(dma_chan, dma_dest, dma_src, IOAT_TEST_SIZE, flags); if (!tx) { dev_err(dev, "Self-test prep failed, disabling\n"); err = -ENODEV; - goto free_resources; + goto unmap_dma; } async_tx_ack(tx); @@ -851,7 +851,7 @@ int ioat_dma_self_test(struct ioatdma_device *device) if (cookie < 0) { dev_err(dev, "Self-test setup failed, disabling\n"); err = -ENODEV; - goto free_resources; + goto unmap_dma; } dma->device_issue_pending(dma_chan); @@ -862,7 +862,7 @@ int ioat_dma_self_test(struct ioatdma_device *device) != DMA_SUCCESS) { dev_err(dev, "Self-test copy timed out, disabling\n"); err = -ENODEV; - goto free_resources; + goto unmap_dma; } if (memcmp(src, dest, IOAT_TEST_SIZE)) { dev_err(dev, "Self-test copy failed compare, disabling\n"); @@ -870,6 +870,9 @@ int ioat_dma_self_test(struct ioatdma_device *device) goto free_resources; } +unmap_dma: + dma_unmap_single(dev, dma_src, IOAT_TEST_SIZE, DMA_TO_DEVICE); + dma_unmap_single(dev, dma_dest, IOAT_TEST_SIZE, DMA_FROM_DEVICE); free_resources: dma->device_free_chan_resources(dma_chan); out: diff --git a/drivers/dma/ioat/dma.h b/drivers/dma/ioat/dma.h index 087935f1565f..53a4cbb78f47 100644 --- a/drivers/dma/ioat/dma.h +++ b/drivers/dma/ioat/dma.h @@ -97,6 +97,7 @@ struct ioat_chan_common { #define IOAT_KOBJ_INIT_FAIL 3 #define IOAT_RESHAPE_PENDING 4 #define IOAT_RUN 5 + #define IOAT_CHAN_ACTIVE 6 struct timer_list timer; #define COMPLETION_TIMEOUT msecs_to_jiffies(100) #define IDLE_TIMEOUT msecs_to_jiffies(2000) diff --git a/drivers/dma/ioat/dma_v2.c b/drivers/dma/ioat/dma_v2.c index 82d4e306c32e..b925e1b1d139 100644 --- a/drivers/dma/ioat/dma_v2.c +++ b/drivers/dma/ioat/dma_v2.c @@ -269,61 +269,22 @@ static void ioat2_restart_channel(struct ioat2_dma_chan *ioat) __ioat2_restart_chan(ioat); } -void ioat2_timer_event(unsigned long data) +static void check_active(struct ioat2_dma_chan *ioat) { - struct ioat2_dma_chan *ioat = to_ioat2_chan((void *) data); struct ioat_chan_common *chan = &ioat->base; - if (test_bit(IOAT_COMPLETION_PENDING, &chan->state)) { - dma_addr_t phys_complete; - u64 status; - - status = ioat_chansts(chan); - - /* when halted due to errors check for channel - * programming errors before advancing the completion state - */ - if (is_ioat_halted(status)) { - u32 chanerr; - - chanerr = readl(chan->reg_base + IOAT_CHANERR_OFFSET); - dev_err(to_dev(chan), "%s: Channel halted (%x)\n", - __func__, chanerr); - if (test_bit(IOAT_RUN, &chan->state)) - BUG_ON(is_ioat_bug(chanerr)); - else /* we never got off the ground */ - return; - } - - /* if we haven't made progress and we have already - * acknowledged a pending completion once, then be more - * forceful with a restart - */ - spin_lock_bh(&chan->cleanup_lock); - if (ioat_cleanup_preamble(chan, &phys_complete)) { - __cleanup(ioat, phys_complete); - } else if (test_bit(IOAT_COMPLETION_ACK, &chan->state)) { - spin_lock_bh(&ioat->prep_lock); - ioat2_restart_channel(ioat); - spin_unlock_bh(&ioat->prep_lock); - } else { - set_bit(IOAT_COMPLETION_ACK, &chan->state); - mod_timer(&chan->timer, jiffies + COMPLETION_TIMEOUT); - } - spin_unlock_bh(&chan->cleanup_lock); - } else { - u16 active; + if (ioat2_ring_active(ioat)) { + mod_timer(&chan->timer, jiffies + COMPLETION_TIMEOUT); + return; + } + if (test_and_clear_bit(IOAT_CHAN_ACTIVE, &chan->state)) + mod_timer(&chan->timer, jiffies + IDLE_TIMEOUT); + else if (ioat->alloc_order > ioat_get_alloc_order()) { /* if the ring is idle, empty, and oversized try to step * down the size */ - spin_lock_bh(&chan->cleanup_lock); - spin_lock_bh(&ioat->prep_lock); - active = ioat2_ring_active(ioat); - if (active == 0 && ioat->alloc_order > ioat_get_alloc_order()) - reshape_ring(ioat, ioat->alloc_order-1); - spin_unlock_bh(&ioat->prep_lock); - spin_unlock_bh(&chan->cleanup_lock); + reshape_ring(ioat, ioat->alloc_order - 1); /* keep shrinking until we get back to our minimum * default size @@ -331,6 +292,60 @@ void ioat2_timer_event(unsigned long data) if (ioat->alloc_order > ioat_get_alloc_order()) mod_timer(&chan->timer, jiffies + IDLE_TIMEOUT); } + +} + +void ioat2_timer_event(unsigned long data) +{ + struct ioat2_dma_chan *ioat = to_ioat2_chan((void *) data); + struct ioat_chan_common *chan = &ioat->base; + dma_addr_t phys_complete; + u64 status; + + status = ioat_chansts(chan); + + /* when halted due to errors check for channel + * programming errors before advancing the completion state + */ + if (is_ioat_halted(status)) { + u32 chanerr; + + chanerr = readl(chan->reg_base + IOAT_CHANERR_OFFSET); + dev_err(to_dev(chan), "%s: Channel halted (%x)\n", + __func__, chanerr); + if (test_bit(IOAT_RUN, &chan->state)) + BUG_ON(is_ioat_bug(chanerr)); + else /* we never got off the ground */ + return; + } + + /* if we haven't made progress and we have already + * acknowledged a pending completion once, then be more + * forceful with a restart + */ + spin_lock_bh(&chan->cleanup_lock); + if (ioat_cleanup_preamble(chan, &phys_complete)) + __cleanup(ioat, phys_complete); + else if (test_bit(IOAT_COMPLETION_ACK, &chan->state)) { + spin_lock_bh(&ioat->prep_lock); + ioat2_restart_channel(ioat); + spin_unlock_bh(&ioat->prep_lock); + spin_unlock_bh(&chan->cleanup_lock); + return; + } else { + set_bit(IOAT_COMPLETION_ACK, &chan->state); + mod_timer(&chan->timer, jiffies + COMPLETION_TIMEOUT); + } + + + if (ioat2_ring_active(ioat)) + mod_timer(&chan->timer, jiffies + COMPLETION_TIMEOUT); + else { + spin_lock_bh(&ioat->prep_lock); + check_active(ioat); + spin_unlock_bh(&ioat->prep_lock); + } + spin_unlock_bh(&chan->cleanup_lock); } static int ioat2_reset_hw(struct ioat_chan_common *chan) @@ -404,7 +419,7 @@ static dma_cookie_t ioat2_tx_submit_unlock(struct dma_async_tx_descriptor *tx) cookie = dma_cookie_assign(tx); dev_dbg(to_dev(&ioat->base), "%s: cookie: %d\n", __func__, cookie); - if (!test_and_set_bit(IOAT_COMPLETION_PENDING, &chan->state)) + if (!test_and_set_bit(IOAT_CHAN_ACTIVE, &chan->state)) mod_timer(&chan->timer, jiffies + COMPLETION_TIMEOUT); /* make descriptor updates visible before advancing ioat->head, diff --git a/drivers/dma/ioat/dma_v3.c b/drivers/dma/ioat/dma_v3.c index 3e9d66920eb3..e8336cce360b 100644 --- a/drivers/dma/ioat/dma_v3.c +++ b/drivers/dma/ioat/dma_v3.c @@ -342,61 +342,22 @@ static void ioat3_restart_channel(struct ioat2_dma_chan *ioat) __ioat2_restart_chan(ioat); } -static void ioat3_timer_event(unsigned long data) +static void check_active(struct ioat2_dma_chan *ioat) { - struct ioat2_dma_chan *ioat = to_ioat2_chan((void *) data); struct ioat_chan_common *chan = &ioat->base; - if (test_bit(IOAT_COMPLETION_PENDING, &chan->state)) { - dma_addr_t phys_complete; - u64 status; - - status = ioat_chansts(chan); - - /* when halted due to errors check for channel - * programming errors before advancing the completion state - */ - if (is_ioat_halted(status)) { - u32 chanerr; - - chanerr = readl(chan->reg_base + IOAT_CHANERR_OFFSET); - dev_err(to_dev(chan), "%s: Channel halted (%x)\n", - __func__, chanerr); - if (test_bit(IOAT_RUN, &chan->state)) - BUG_ON(is_ioat_bug(chanerr)); - else /* we never got off the ground */ - return; - } - - /* if we haven't made progress and we have already - * acknowledged a pending completion once, then be more - * forceful with a restart - */ - spin_lock_bh(&chan->cleanup_lock); - if (ioat_cleanup_preamble(chan, &phys_complete)) - __cleanup(ioat, phys_complete); - else if (test_bit(IOAT_COMPLETION_ACK, &chan->state)) { - spin_lock_bh(&ioat->prep_lock); - ioat3_restart_channel(ioat); - spin_unlock_bh(&ioat->prep_lock); - } else { - set_bit(IOAT_COMPLETION_ACK, &chan->state); - mod_timer(&chan->timer, jiffies + COMPLETION_TIMEOUT); - } - spin_unlock_bh(&chan->cleanup_lock); - } else { - u16 active; + if (ioat2_ring_active(ioat)) { + mod_timer(&chan->timer, jiffies + COMPLETION_TIMEOUT); + return; + } + if (test_and_clear_bit(IOAT_CHAN_ACTIVE, &chan->state)) + mod_timer(&chan->timer, jiffies + IDLE_TIMEOUT); + else if (ioat->alloc_order > ioat_get_alloc_order()) { /* if the ring is idle, empty, and oversized try to step * down the size */ - spin_lock_bh(&chan->cleanup_lock); - spin_lock_bh(&ioat->prep_lock); - active = ioat2_ring_active(ioat); - if (active == 0 && ioat->alloc_order > ioat_get_alloc_order()) - reshape_ring(ioat, ioat->alloc_order-1); - spin_unlock_bh(&ioat->prep_lock); - spin_unlock_bh(&chan->cleanup_lock); + reshape_ring(ioat, ioat->alloc_order - 1); /* keep shrinking until we get back to our minimum * default size @@ -404,6 +365,60 @@ static void ioat3_timer_event(unsigned long data) if (ioat->alloc_order > ioat_get_alloc_order()) mod_timer(&chan->timer, jiffies + IDLE_TIMEOUT); } + +} + +static void ioat3_timer_event(unsigned long data) +{ + struct ioat2_dma_chan *ioat = to_ioat2_chan((void *) data); + struct ioat_chan_common *chan = &ioat->base; + dma_addr_t phys_complete; + u64 status; + + status = ioat_chansts(chan); + + /* when halted due to errors check for channel + * programming errors before advancing the completion state + */ + if (is_ioat_halted(status)) { + u32 chanerr; + + chanerr = readl(chan->reg_base + IOAT_CHANERR_OFFSET); + dev_err(to_dev(chan), "%s: Channel halted (%x)\n", + __func__, chanerr); + if (test_bit(IOAT_RUN, &chan->state)) + BUG_ON(is_ioat_bug(chanerr)); + else /* we never got off the ground */ + return; + } + + /* if we haven't made progress and we have already + * acknowledged a pending completion once, then be more + * forceful with a restart + */ + spin_lock_bh(&chan->cleanup_lock); + if (ioat_cleanup_preamble(chan, &phys_complete)) + __cleanup(ioat, phys_complete); + else if (test_bit(IOAT_COMPLETION_ACK, &chan->state)) { + spin_lock_bh(&ioat->prep_lock); + ioat3_restart_channel(ioat); + spin_unlock_bh(&ioat->prep_lock); + spin_unlock_bh(&chan->cleanup_lock); + return; + } else { + set_bit(IOAT_COMPLETION_ACK, &chan->state); + mod_timer(&chan->timer, jiffies + COMPLETION_TIMEOUT); + } + + + if (ioat2_ring_active(ioat)) + mod_timer(&chan->timer, jiffies + COMPLETION_TIMEOUT); + else { + spin_lock_bh(&ioat->prep_lock); + check_active(ioat); + spin_unlock_bh(&ioat->prep_lock); + } + spin_unlock_bh(&chan->cleanup_lock); } static enum dma_status @@ -863,6 +878,7 @@ static int ioat_xor_val_self_test(struct ioatdma_device *device) unsigned long tmo; struct device *dev = &device->pdev->dev; struct dma_device *dma = &device->common; + u8 op = 0; dev_dbg(dev, "%s\n", __func__); @@ -908,18 +924,22 @@ static int ioat_xor_val_self_test(struct ioatdma_device *device) } /* test xor */ + op = IOAT_OP_XOR; + dest_dma = dma_map_page(dev, dest, 0, PAGE_SIZE, DMA_FROM_DEVICE); for (i = 0; i < IOAT_NUM_SRC_TEST; i++) dma_srcs[i] = dma_map_page(dev, xor_srcs[i], 0, PAGE_SIZE, DMA_TO_DEVICE); tx = dma->device_prep_dma_xor(dma_chan, dest_dma, dma_srcs, IOAT_NUM_SRC_TEST, PAGE_SIZE, - DMA_PREP_INTERRUPT); + DMA_PREP_INTERRUPT | + DMA_COMPL_SKIP_SRC_UNMAP | + DMA_COMPL_SKIP_DEST_UNMAP); if (!tx) { dev_err(dev, "Self-test xor prep failed\n"); err = -ENODEV; - goto free_resources; + goto dma_unmap; } async_tx_ack(tx); @@ -930,7 +950,7 @@ static int ioat_xor_val_self_test(struct ioatdma_device *device) if (cookie < 0) { dev_err(dev, "Self-test xor setup failed\n"); err = -ENODEV; - goto free_resources; + goto dma_unmap; } dma->device_issue_pending(dma_chan); @@ -939,9 +959,13 @@ static int ioat_xor_val_self_test(struct ioatdma_device *device) if (dma->device_tx_status(dma_chan, cookie, NULL) != DMA_SUCCESS) { dev_err(dev, "Self-test xor timed out\n"); err = -ENODEV; - goto free_resources; + goto dma_unmap; } + dma_unmap_page(dev, dest_dma, PAGE_SIZE, DMA_FROM_DEVICE); + for (i = 0; i < IOAT_NUM_SRC_TEST; i++) + dma_unmap_page(dev, dma_srcs[i], PAGE_SIZE, DMA_TO_DEVICE); + dma_sync_single_for_cpu(dev, dest_dma, PAGE_SIZE, DMA_FROM_DEVICE); for (i = 0; i < (PAGE_SIZE / sizeof(u32)); i++) { u32 *ptr = page_address(dest); @@ -957,6 +981,8 @@ static int ioat_xor_val_self_test(struct ioatdma_device *device) if (!dma_has_cap(DMA_XOR_VAL, dma_chan->device->cap_mask)) goto free_resources; + op = IOAT_OP_XOR_VAL; + /* validate the sources with the destintation page */ for (i = 0; i < IOAT_NUM_SRC_TEST; i++) xor_val_srcs[i] = xor_srcs[i]; @@ -969,11 +995,13 @@ static int ioat_xor_val_self_test(struct ioatdma_device *device) DMA_TO_DEVICE); tx = dma->device_prep_dma_xor_val(dma_chan, dma_srcs, IOAT_NUM_SRC_TEST + 1, PAGE_SIZE, - &xor_val_result, DMA_PREP_INTERRUPT); + &xor_val_result, DMA_PREP_INTERRUPT | + DMA_COMPL_SKIP_SRC_UNMAP | + DMA_COMPL_SKIP_DEST_UNMAP); if (!tx) { dev_err(dev, "Self-test zero prep failed\n"); err = -ENODEV; - goto free_resources; + goto dma_unmap; } async_tx_ack(tx); @@ -984,7 +1012,7 @@ static int ioat_xor_val_self_test(struct ioatdma_device *device) if (cookie < 0) { dev_err(dev, "Self-test zero setup failed\n"); err = -ENODEV; - goto free_resources; + goto dma_unmap; } dma->device_issue_pending(dma_chan); @@ -993,9 +1021,12 @@ static int ioat_xor_val_self_test(struct ioatdma_device *device) if (dma->device_tx_status(dma_chan, cookie, NULL) != DMA_SUCCESS) { dev_err(dev, "Self-test validate timed out\n"); err = -ENODEV; - goto free_resources; + goto dma_unmap; } + for (i = 0; i < IOAT_NUM_SRC_TEST + 1; i++) + dma_unmap_page(dev, dma_srcs[i], PAGE_SIZE, DMA_TO_DEVICE); + if (xor_val_result != 0) { dev_err(dev, "Self-test validate failed compare\n"); err = -ENODEV; @@ -1007,14 +1038,18 @@ static int ioat_xor_val_self_test(struct ioatdma_device *device) goto free_resources; /* test memset */ + op = IOAT_OP_FILL; + dma_addr = dma_map_page(dev, dest, 0, PAGE_SIZE, DMA_FROM_DEVICE); tx = dma->device_prep_dma_memset(dma_chan, dma_addr, 0, PAGE_SIZE, - DMA_PREP_INTERRUPT); + DMA_PREP_INTERRUPT | + DMA_COMPL_SKIP_SRC_UNMAP | + DMA_COMPL_SKIP_DEST_UNMAP); if (!tx) { dev_err(dev, "Self-test memset prep failed\n"); err = -ENODEV; - goto free_resources; + goto dma_unmap; } async_tx_ack(tx); @@ -1025,7 +1060,7 @@ static int ioat_xor_val_self_test(struct ioatdma_device *device) if (cookie < 0) { dev_err(dev, "Self-test memset setup failed\n"); err = -ENODEV; - goto free_resources; + goto dma_unmap; } dma->device_issue_pending(dma_chan); @@ -1034,9 +1069,11 @@ static int ioat_xor_val_self_test(struct ioatdma_device *device) if (dma->device_tx_status(dma_chan, cookie, NULL) != DMA_SUCCESS) { dev_err(dev, "Self-test memset timed out\n"); err = -ENODEV; - goto free_resources; + goto dma_unmap; } + dma_unmap_page(dev, dma_addr, PAGE_SIZE, DMA_FROM_DEVICE); + for (i = 0; i < PAGE_SIZE/sizeof(u32); i++) { u32 *ptr = page_address(dest); if (ptr[i]) { @@ -1047,17 +1084,21 @@ static int ioat_xor_val_self_test(struct ioatdma_device *device) } /* test for non-zero parity sum */ + op = IOAT_OP_XOR_VAL; + xor_val_result = 0; for (i = 0; i < IOAT_NUM_SRC_TEST + 1; i++) dma_srcs[i] = dma_map_page(dev, xor_val_srcs[i], 0, PAGE_SIZE, DMA_TO_DEVICE); tx = dma->device_prep_dma_xor_val(dma_chan, dma_srcs, IOAT_NUM_SRC_TEST + 1, PAGE_SIZE, - &xor_val_result, DMA_PREP_INTERRUPT); + &xor_val_result, DMA_PREP_INTERRUPT | + DMA_COMPL_SKIP_SRC_UNMAP | + DMA_COMPL_SKIP_DEST_UNMAP); if (!tx) { dev_err(dev, "Self-test 2nd zero prep failed\n"); err = -ENODEV; - goto free_resources; + goto dma_unmap; } async_tx_ack(tx); @@ -1068,7 +1109,7 @@ static int ioat_xor_val_self_test(struct ioatdma_device *device) if (cookie < 0) { dev_err(dev, "Self-test 2nd zero setup failed\n"); err = -ENODEV; - goto free_resources; + goto dma_unmap; } dma->device_issue_pending(dma_chan); @@ -1077,15 +1118,31 @@ static int ioat_xor_val_self_test(struct ioatdma_device *device) if (dma->device_tx_status(dma_chan, cookie, NULL) != DMA_SUCCESS) { dev_err(dev, "Self-test 2nd validate timed out\n"); err = -ENODEV; - goto free_resources; + goto dma_unmap; } if (xor_val_result != SUM_CHECK_P_RESULT) { dev_err(dev, "Self-test validate failed compare\n"); err = -ENODEV; - goto free_resources; + goto dma_unmap; } + for (i = 0; i < IOAT_NUM_SRC_TEST + 1; i++) + dma_unmap_page(dev, dma_srcs[i], PAGE_SIZE, DMA_TO_DEVICE); + + goto free_resources; +dma_unmap: + if (op == IOAT_OP_XOR) { + dma_unmap_page(dev, dest_dma, PAGE_SIZE, DMA_FROM_DEVICE); + for (i = 0; i < IOAT_NUM_SRC_TEST; i++) + dma_unmap_page(dev, dma_srcs[i], PAGE_SIZE, + DMA_TO_DEVICE); + } else if (op == IOAT_OP_XOR_VAL) { + for (i = 0; i < IOAT_NUM_SRC_TEST + 1; i++) + dma_unmap_page(dev, dma_srcs[i], PAGE_SIZE, + DMA_TO_DEVICE); + } else if (op == IOAT_OP_FILL) + dma_unmap_page(dev, dma_addr, PAGE_SIZE, DMA_FROM_DEVICE); free_resources: dma->device_free_chan_resources(dma_chan); out: @@ -1126,12 +1183,7 @@ static int ioat3_reset_hw(struct ioat_chan_common *chan) chanerr = readl(chan->reg_base + IOAT_CHANERR_OFFSET); writel(chanerr, chan->reg_base + IOAT_CHANERR_OFFSET); - /* -= IOAT ver.3 workarounds =- */ - /* Write CHANERRMSK_INT with 3E07h to mask out the errors - * that can cause stability issues for IOAT ver.3, and clear any - * pending errors - */ - pci_write_config_dword(pdev, IOAT_PCI_CHANERRMASK_INT_OFFSET, 0x3e07); + /* clear any pending errors */ err = pci_read_config_dword(pdev, IOAT_PCI_CHANERR_INT_OFFSET, &chanerr); if (err) { dev_err(&pdev->dev, "channel error register unreachable\n"); @@ -1187,6 +1239,26 @@ static bool is_snb_ioat(struct pci_dev *pdev) } } +static bool is_ivb_ioat(struct pci_dev *pdev) +{ + switch (pdev->device) { + case PCI_DEVICE_ID_INTEL_IOAT_IVB0: + case PCI_DEVICE_ID_INTEL_IOAT_IVB1: + case PCI_DEVICE_ID_INTEL_IOAT_IVB2: + case PCI_DEVICE_ID_INTEL_IOAT_IVB3: + case PCI_DEVICE_ID_INTEL_IOAT_IVB4: + case PCI_DEVICE_ID_INTEL_IOAT_IVB5: + case PCI_DEVICE_ID_INTEL_IOAT_IVB6: + case PCI_DEVICE_ID_INTEL_IOAT_IVB7: + case PCI_DEVICE_ID_INTEL_IOAT_IVB8: + case PCI_DEVICE_ID_INTEL_IOAT_IVB9: + return true; + default: + return false; + } + +} + int ioat3_dma_probe(struct ioatdma_device *device, int dca) { struct pci_dev *pdev = device->pdev; @@ -1207,7 +1279,7 @@ int ioat3_dma_probe(struct ioatdma_device *device, int dca) dma->device_alloc_chan_resources = ioat2_alloc_chan_resources; dma->device_free_chan_resources = ioat2_free_chan_resources; - if (is_jf_ioat(pdev) || is_snb_ioat(pdev)) + if (is_jf_ioat(pdev) || is_snb_ioat(pdev) || is_ivb_ioat(pdev)) dma->copy_align = 6; dma_cap_set(DMA_INTERRUPT, dma->cap_mask); diff --git a/drivers/dma/ioat/hw.h b/drivers/dma/ioat/hw.h index d2ff3fda0b18..7cb74c62c719 100644 --- a/drivers/dma/ioat/hw.h +++ b/drivers/dma/ioat/hw.h @@ -35,6 +35,17 @@ #define IOAT_VER_3_0 0x30 /* Version 3.0 */ #define IOAT_VER_3_2 0x32 /* Version 3.2 */ +#define PCI_DEVICE_ID_INTEL_IOAT_IVB0 0x0e20 +#define PCI_DEVICE_ID_INTEL_IOAT_IVB1 0x0e21 +#define PCI_DEVICE_ID_INTEL_IOAT_IVB2 0x0e22 +#define PCI_DEVICE_ID_INTEL_IOAT_IVB3 0x0e23 +#define PCI_DEVICE_ID_INTEL_IOAT_IVB4 0x0e24 +#define PCI_DEVICE_ID_INTEL_IOAT_IVB5 0x0e25 +#define PCI_DEVICE_ID_INTEL_IOAT_IVB6 0x0e26 +#define PCI_DEVICE_ID_INTEL_IOAT_IVB7 0x0e27 +#define PCI_DEVICE_ID_INTEL_IOAT_IVB8 0x0e2e +#define PCI_DEVICE_ID_INTEL_IOAT_IVB9 0x0e2f + int system_has_dca_enabled(struct pci_dev *pdev); struct ioat_dma_descriptor { diff --git a/drivers/dma/ioat/pci.c b/drivers/dma/ioat/pci.c index 4f686c527ab6..71c7ecd80fac 100644 --- a/drivers/dma/ioat/pci.c +++ b/drivers/dma/ioat/pci.c @@ -40,17 +40,6 @@ MODULE_VERSION(IOAT_DMA_VERSION); MODULE_LICENSE("Dual BSD/GPL"); MODULE_AUTHOR("Intel Corporation"); -#define PCI_DEVICE_ID_INTEL_IOAT_IVB0 0x0e20 -#define PCI_DEVICE_ID_INTEL_IOAT_IVB1 0x0e21 -#define PCI_DEVICE_ID_INTEL_IOAT_IVB2 0x0e22 -#define PCI_DEVICE_ID_INTEL_IOAT_IVB3 0x0e23 -#define PCI_DEVICE_ID_INTEL_IOAT_IVB4 0x0e24 -#define PCI_DEVICE_ID_INTEL_IOAT_IVB5 0x0e25 -#define PCI_DEVICE_ID_INTEL_IOAT_IVB6 0x0e26 -#define PCI_DEVICE_ID_INTEL_IOAT_IVB7 0x0e27 -#define PCI_DEVICE_ID_INTEL_IOAT_IVB8 0x0e2e -#define PCI_DEVICE_ID_INTEL_IOAT_IVB9 0x0e2f - static struct pci_device_id ioat_pci_tbl[] = { /* I/OAT v1 platforms */ { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT) }, |