summaryrefslogtreecommitdiffstats
path: root/drivers/xen
diff options
context:
space:
mode:
authorKonrad Rzeszutek Wilk <konrad.wilk@oracle.com>2011-08-25 16:13:54 -0400
committerKonrad Rzeszutek Wilk <konrad.wilk@oracle.com>2011-08-26 12:10:42 -0400
commit6810df88dcfc22de267caf23eb072ffb97b3c411 (patch)
tree5481262624654fc570be5aa7e1a5513687df263d /drivers/xen
parent12e13ac84ca70e6641a4750e9317aa2d2c1f6f50 (diff)
downloadlinux-6810df88dcfc22de267caf23eb072ffb97b3c411.tar.bz2
xen-swiotlb: When doing coherent alloc/dealloc check before swizzling the MFNs.
The process to swizzle a Machine Frame Number (MFN) is not always necessary. Especially if we know that we actually do not have to do it. In this patch we check the MFN against the device's coherent DMA mask and if the requested page(s) are contingous. If it all checks out we will just return the bus addr without doing the memory swizzle. Signed-off-by: Konrad Rzeszutek Wilk <konrad.wilk@oracle.com>
Diffstat (limited to 'drivers/xen')
-rw-r--r--drivers/xen/swiotlb-xen.c28
1 files changed, 24 insertions, 4 deletions
diff --git a/drivers/xen/swiotlb-xen.c b/drivers/xen/swiotlb-xen.c
index 0408f3225722..c984768d98ca 100644
--- a/drivers/xen/swiotlb-xen.c
+++ b/drivers/xen/swiotlb-xen.c
@@ -209,6 +209,8 @@ xen_swiotlb_alloc_coherent(struct device *hwdev, size_t size,
int order = get_order(size);
u64 dma_mask = DMA_BIT_MASK(32);
unsigned long vstart;
+ phys_addr_t phys;
+ dma_addr_t dev_addr;
/*
* Ignore region specifiers - the kernel's ideas of
@@ -224,18 +226,26 @@ xen_swiotlb_alloc_coherent(struct device *hwdev, size_t size,
vstart = __get_free_pages(flags, order);
ret = (void *)vstart;
+ if (!ret)
+ return ret;
+
if (hwdev && hwdev->coherent_dma_mask)
- dma_mask = dma_alloc_coherent_mask(hwdev, flags);
+ dma_mask = hwdev->coherent_dma_mask;
- if (ret) {
+ phys = virt_to_phys(ret);
+ dev_addr = xen_phys_to_bus(phys);
+ if (((dev_addr + size - 1 <= dma_mask)) &&
+ !range_straddles_page_boundary(phys, size))
+ *dma_handle = dev_addr;
+ else {
if (xen_create_contiguous_region(vstart, order,
fls64(dma_mask)) != 0) {
free_pages(vstart, order);
return NULL;
}
- memset(ret, 0, size);
*dma_handle = virt_to_machine(ret).maddr;
}
+ memset(ret, 0, size);
return ret;
}
EXPORT_SYMBOL_GPL(xen_swiotlb_alloc_coherent);
@@ -245,11 +255,21 @@ xen_swiotlb_free_coherent(struct device *hwdev, size_t size, void *vaddr,
dma_addr_t dev_addr)
{
int order = get_order(size);
+ phys_addr_t phys;
+ u64 dma_mask = DMA_BIT_MASK(32);
if (dma_release_from_coherent(hwdev, order, vaddr))
return;
- xen_destroy_contiguous_region((unsigned long)vaddr, order);
+ if (hwdev && hwdev->coherent_dma_mask)
+ dma_mask = hwdev->coherent_dma_mask;
+
+ phys = virt_to_phys(vaddr);
+
+ if (((dev_addr + size - 1 > dma_mask)) ||
+ range_straddles_page_boundary(phys, size))
+ xen_destroy_contiguous_region((unsigned long)vaddr, order);
+
free_pages((unsigned long)vaddr, order);
}
EXPORT_SYMBOL_GPL(xen_swiotlb_free_coherent);