summaryrefslogtreecommitdiffstats
path: root/drivers/xen
diff options
context:
space:
mode:
authorChristoph Hellwig <hch@lst.de>2019-04-11 09:20:00 +0200
committerKonrad Rzeszutek Wilk <konrad.wilk@oracle.com>2019-05-02 19:52:06 -0400
commit063b8271ec8f706d833e61dfca40c512504a62c1 (patch)
treeb1ae9fd85840c7c1a5bf4514e984ea6ebb2574dd /drivers/xen
parent2e12dceef3d3fb8d796e384aa242062d1643cad4 (diff)
downloadlinux-063b8271ec8f706d833e61dfca40c512504a62c1.tar.bz2
swiotlb-xen: ensure we have a single callsite for xen_dma_map_page
Refactor the code a bit to make further changes easier. Reviewed-by: Stefano Stabellini <sstabellini@kernel.org> Signed-off-by: Christoph Hellwig <hch@lst.de> Signed-off-by: Konrad Rzeszutek Wilk <konrad.wilk@oracle.com>
Diffstat (limited to 'drivers/xen')
-rw-r--r--drivers/xen/swiotlb-xen.c31
1 files changed, 16 insertions, 15 deletions
diff --git a/drivers/xen/swiotlb-xen.c b/drivers/xen/swiotlb-xen.c
index ba558094d0e6..64cb94dfedd4 100644
--- a/drivers/xen/swiotlb-xen.c
+++ b/drivers/xen/swiotlb-xen.c
@@ -388,13 +388,8 @@ static dma_addr_t xen_swiotlb_map_page(struct device *dev, struct page *page,
if (dma_capable(dev, dev_addr, size) &&
!range_straddles_page_boundary(phys, size) &&
!xen_arch_need_swiotlb(dev, phys, dev_addr) &&
- (swiotlb_force != SWIOTLB_FORCE)) {
- /* we are not interested in the dma_addr returned by
- * xen_dma_map_page, only in the potential cache flushes executed
- * by the function. */
- xen_dma_map_page(dev, page, dev_addr, offset, size, dir, attrs);
- return dev_addr;
- }
+ swiotlb_force != SWIOTLB_FORCE)
+ goto done;
/*
* Oh well, have to allocate and map a bounce buffer.
@@ -407,19 +402,25 @@ static dma_addr_t xen_swiotlb_map_page(struct device *dev, struct page *page,
return DMA_MAPPING_ERROR;
dev_addr = xen_phys_to_bus(map);
- xen_dma_map_page(dev, pfn_to_page(map >> PAGE_SHIFT),
- dev_addr, map & ~PAGE_MASK, size, dir, attrs);
/*
* Ensure that the address returned is DMA'ble
*/
- if (dma_capable(dev, dev_addr, size))
- return dev_addr;
-
- attrs |= DMA_ATTR_SKIP_CPU_SYNC;
- swiotlb_tbl_unmap_single(dev, map, size, dir, attrs);
+ if (unlikely(!dma_capable(dev, dev_addr, size))) {
+ swiotlb_tbl_unmap_single(dev, map, size, dir,
+ attrs | DMA_ATTR_SKIP_CPU_SYNC);
+ return DMA_MAPPING_ERROR;
+ }
- return DMA_MAPPING_ERROR;
+ page = pfn_to_page(map >> PAGE_SHIFT);
+ offset = map & ~PAGE_MASK;
+done:
+ /*
+ * we are not interested in the dma_addr returned by xen_dma_map_page,
+ * only in the potential cache flushes executed by the function.
+ */
+ xen_dma_map_page(dev, page, dev_addr, offset, size, dir, attrs);
+ return dev_addr;
}
/*