diff options
author | Linus Torvalds <torvalds@linux-foundation.org> | 2016-12-13 15:52:23 -0800 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2016-12-13 15:52:23 -0800 |
commit | b5cab0da75c292ffa0fbd68dd2c820066b2842de (patch) | |
tree | 3b8d4483b8ce6d1ea997cde2c89445564cba7b0f /drivers/xen | |
parent | 93173b5bf2841da7e3a9b0cb1312ef5c87251524 (diff) | |
parent | d29fa0cb7602fa3e96c9eee05e14d14d3e823c89 (diff) | |
download | linux-b5cab0da75c292ffa0fbd68dd2c820066b2842de.tar.bz2 |
Merge branch 'stable/for-linus-4.9' of git://git.kernel.org/pub/scm/linux/kernel/git/konrad/swiotlb
Pull swiotlb updates from Konrad Rzeszutek Wilk:
- minor fixes (rate limiting), remove certain functions
- support for DMA_ATTR_SKIP_CPU_SYNC which is an optimization
in the DMA API
* 'stable/for-linus-4.9' of git://git.kernel.org/pub/scm/linux/kernel/git/konrad/swiotlb:
swiotlb: Minor fix-ups for DMA_ATTR_SKIP_CPU_SYNC support
swiotlb: Add support for DMA_ATTR_SKIP_CPU_SYNC
swiotlb-xen: Enforce return of DMA_ERROR_CODE in mapping function
swiotlb: Drop unused functions swiotlb_map_sg and swiotlb_unmap_sg
swiotlb: Rate-limit printing when running out of SW-IOMMU space
Diffstat (limited to 'drivers/xen')
-rw-r--r-- | drivers/xen/swiotlb-xen.c | 27 |
1 files changed, 12 insertions, 15 deletions
diff --git a/drivers/xen/swiotlb-xen.c b/drivers/xen/swiotlb-xen.c index 87e6035c9e81..478fb91e3df2 100644 --- a/drivers/xen/swiotlb-xen.c +++ b/drivers/xen/swiotlb-xen.c @@ -405,7 +405,8 @@ dma_addr_t xen_swiotlb_map_page(struct device *dev, struct page *page, */ trace_swiotlb_bounced(dev, dev_addr, size, swiotlb_force); - map = swiotlb_tbl_map_single(dev, start_dma_addr, phys, size, dir); + map = swiotlb_tbl_map_single(dev, start_dma_addr, phys, size, dir, + attrs); if (map == SWIOTLB_MAP_ERROR) return DMA_ERROR_CODE; @@ -416,11 +417,13 @@ dma_addr_t xen_swiotlb_map_page(struct device *dev, struct page *page, /* * Ensure that the address returned is DMA'ble */ - if (!dma_capable(dev, dev_addr, size)) { - swiotlb_tbl_unmap_single(dev, map, size, dir); - dev_addr = 0; - } - return dev_addr; + if (dma_capable(dev, dev_addr, size)) + return dev_addr; + + attrs |= DMA_ATTR_SKIP_CPU_SYNC; + swiotlb_tbl_unmap_single(dev, map, size, dir, attrs); + + return DMA_ERROR_CODE; } EXPORT_SYMBOL_GPL(xen_swiotlb_map_page); @@ -444,7 +447,7 @@ static void xen_unmap_single(struct device *hwdev, dma_addr_t dev_addr, /* NOTE: We use dev_addr here, not paddr! */ if (is_xen_swiotlb_buffer(dev_addr)) { - swiotlb_tbl_unmap_single(hwdev, paddr, size, dir); + swiotlb_tbl_unmap_single(hwdev, paddr, size, dir, attrs); return; } @@ -557,11 +560,12 @@ xen_swiotlb_map_sg_attrs(struct device *hwdev, struct scatterlist *sgl, start_dma_addr, sg_phys(sg), sg->length, - dir); + dir, attrs); if (map == SWIOTLB_MAP_ERROR) { dev_warn(hwdev, "swiotlb buffer is full\n"); /* Don't panic here, we expect map_sg users to do proper error handling. */ + attrs |= DMA_ATTR_SKIP_CPU_SYNC; xen_swiotlb_unmap_sg_attrs(hwdev, sgl, i, dir, attrs); sg_dma_len(sgl) = 0; @@ -648,13 +652,6 @@ xen_swiotlb_sync_sg_for_device(struct device *hwdev, struct scatterlist *sg, } EXPORT_SYMBOL_GPL(xen_swiotlb_sync_sg_for_device); -int -xen_swiotlb_dma_mapping_error(struct device *hwdev, dma_addr_t dma_addr) -{ - return !dma_addr; -} -EXPORT_SYMBOL_GPL(xen_swiotlb_dma_mapping_error); - /* * Return whether the given device DMA address mask can be supported * properly. For example, if your device can only drive the low 24-bits |