diff options
-rw-r--r-- | include/linux/swiotlb.h | 3 | ||||
-rw-r--r-- | lib/swiotlb.c | 52 |
2 files changed, 39 insertions, 16 deletions
diff --git a/include/linux/swiotlb.h b/include/linux/swiotlb.h index 58b996a642f9..694f1839cbc0 100644 --- a/include/linux/swiotlb.h +++ b/include/linux/swiotlb.h @@ -27,6 +27,9 @@ swiotlb_init(void); extern void *swiotlb_alloc_boot(size_t bytes, unsigned long nslabs); extern void *swiotlb_alloc(unsigned order, unsigned long nslabs); +extern dma_addr_t swiotlb_phys_to_bus(phys_addr_t address); +extern phys_addr_t swiotlb_bus_to_phys(dma_addr_t address); + extern void *swiotlb_alloc_coherent(struct device *hwdev, size_t size, dma_addr_t *dma_handle, gfp_t flags); diff --git a/lib/swiotlb.c b/lib/swiotlb.c index 1272b23e4769..3494263cdd9a 100644 --- a/lib/swiotlb.c +++ b/lib/swiotlb.c @@ -125,6 +125,26 @@ void * __weak swiotlb_alloc(unsigned order, unsigned long nslabs) return (void *)__get_free_pages(GFP_DMA | __GFP_NOWARN, order); } +dma_addr_t __weak swiotlb_phys_to_bus(phys_addr_t paddr) +{ + return paddr; +} + +phys_addr_t __weak swiotlb_bus_to_phys(dma_addr_t baddr) +{ + return baddr; +} + +static dma_addr_t swiotlb_virt_to_bus(volatile void *address) +{ + return swiotlb_phys_to_bus(virt_to_phys(address)); +} + +static void *swiotlb_bus_to_virt(dma_addr_t address) +{ + return phys_to_virt(swiotlb_bus_to_phys(address)); +} + /* * Statically reserve bounce buffer space and initialize bounce buffer data * structures for the software IO TLB used to implement the DMA API. @@ -168,7 +188,7 @@ swiotlb_init_with_default_size(size_t default_size) panic("Cannot allocate SWIOTLB overflow buffer!\n"); printk(KERN_INFO "Placing software IO TLB between 0x%lx - 0x%lx\n", - virt_to_bus(io_tlb_start), virt_to_bus(io_tlb_end)); + swiotlb_virt_to_bus(io_tlb_start), swiotlb_virt_to_bus(io_tlb_end)); } void __init @@ -250,7 +270,7 @@ swiotlb_late_init_with_default_size(size_t default_size) printk(KERN_INFO "Placing %luMB software IO TLB between 0x%lx - " "0x%lx\n", bytes >> 20, - virt_to_bus(io_tlb_start), virt_to_bus(io_tlb_end)); + swiotlb_virt_to_bus(io_tlb_start), swiotlb_virt_to_bus(io_tlb_end)); return 0; @@ -298,7 +318,7 @@ map_single(struct device *hwdev, char *buffer, size_t size, int dir) unsigned long max_slots; mask = dma_get_seg_boundary(hwdev); - start_dma_addr = virt_to_bus(io_tlb_start) & mask; + start_dma_addr = swiotlb_virt_to_bus(io_tlb_start) & mask; offset_slots = ALIGN(start_dma_addr, 1 << IO_TLB_SHIFT) >> IO_TLB_SHIFT; @@ -475,7 +495,7 @@ swiotlb_alloc_coherent(struct device *hwdev, size_t size, dma_mask = hwdev->coherent_dma_mask; ret = (void *)__get_free_pages(flags, order); - if (ret && !is_buffer_dma_capable(dma_mask, virt_to_bus(ret), size)) { + if (ret && !is_buffer_dma_capable(dma_mask, swiotlb_virt_to_bus(ret), size)) { /* * The allocated memory isn't reachable by the device. * Fall back on swiotlb_map_single(). @@ -496,7 +516,7 @@ swiotlb_alloc_coherent(struct device *hwdev, size_t size, } memset(ret, 0, size); - dev_addr = virt_to_bus(ret); + dev_addr = swiotlb_virt_to_bus(ret); /* Confirm address can be DMA'd by device */ if (!is_buffer_dma_capable(dma_mask, dev_addr, size)) { @@ -556,7 +576,7 @@ dma_addr_t swiotlb_map_single_attrs(struct device *hwdev, void *ptr, size_t size, int dir, struct dma_attrs *attrs) { - dma_addr_t dev_addr = virt_to_bus(ptr); + dma_addr_t dev_addr = swiotlb_virt_to_bus(ptr); void *map; BUG_ON(dir == DMA_NONE); @@ -577,7 +597,7 @@ swiotlb_map_single_attrs(struct device *hwdev, void *ptr, size_t size, map = io_tlb_overflow_buffer; } - dev_addr = virt_to_bus(map); + dev_addr = swiotlb_virt_to_bus(map); /* * Ensure that the address returned is DMA'ble @@ -607,7 +627,7 @@ void swiotlb_unmap_single_attrs(struct device *hwdev, dma_addr_t dev_addr, size_t size, int dir, struct dma_attrs *attrs) { - char *dma_addr = bus_to_virt(dev_addr); + char *dma_addr = swiotlb_bus_to_virt(dev_addr); BUG_ON(dir == DMA_NONE); if (is_swiotlb_buffer(dma_addr)) @@ -637,7 +657,7 @@ static void swiotlb_sync_single(struct device *hwdev, dma_addr_t dev_addr, size_t size, int dir, int target) { - char *dma_addr = bus_to_virt(dev_addr); + char *dma_addr = swiotlb_bus_to_virt(dev_addr); BUG_ON(dir == DMA_NONE); if (is_swiotlb_buffer(dma_addr)) @@ -668,7 +688,7 @@ swiotlb_sync_single_range(struct device *hwdev, dma_addr_t dev_addr, unsigned long offset, size_t size, int dir, int target) { - char *dma_addr = bus_to_virt(dev_addr) + offset; + char *dma_addr = swiotlb_bus_to_virt(dev_addr) + offset; BUG_ON(dir == DMA_NONE); if (is_swiotlb_buffer(dma_addr)) @@ -724,7 +744,7 @@ swiotlb_map_sg_attrs(struct device *hwdev, struct scatterlist *sgl, int nelems, for_each_sg(sgl, sg, nelems, i) { addr = SG_ENT_VIRT_ADDRESS(sg); - dev_addr = virt_to_bus(addr); + dev_addr = swiotlb_virt_to_bus(addr); if (swiotlb_force || address_needs_mapping(hwdev, dev_addr, sg->length)) { void *map = map_single(hwdev, addr, sg->length, dir); @@ -737,7 +757,7 @@ swiotlb_map_sg_attrs(struct device *hwdev, struct scatterlist *sgl, int nelems, sgl[0].dma_length = 0; return 0; } - sg->dma_address = virt_to_bus(map); + sg->dma_address = swiotlb_virt_to_bus(map); } else sg->dma_address = dev_addr; sg->dma_length = sg->length; @@ -768,7 +788,7 @@ swiotlb_unmap_sg_attrs(struct device *hwdev, struct scatterlist *sgl, for_each_sg(sgl, sg, nelems, i) { if (sg->dma_address != SG_ENT_PHYS_ADDRESS(sg)) - unmap_single(hwdev, bus_to_virt(sg->dma_address), + unmap_single(hwdev, swiotlb_bus_to_virt(sg->dma_address), sg->dma_length, dir); else if (dir == DMA_FROM_DEVICE) dma_mark_clean(SG_ENT_VIRT_ADDRESS(sg), sg->dma_length); @@ -801,7 +821,7 @@ swiotlb_sync_sg(struct device *hwdev, struct scatterlist *sgl, for_each_sg(sgl, sg, nelems, i) { if (sg->dma_address != SG_ENT_PHYS_ADDRESS(sg)) - sync_single(hwdev, bus_to_virt(sg->dma_address), + sync_single(hwdev, swiotlb_bus_to_virt(sg->dma_address), sg->dma_length, dir, target); else if (dir == DMA_FROM_DEVICE) dma_mark_clean(SG_ENT_VIRT_ADDRESS(sg), sg->dma_length); @@ -825,7 +845,7 @@ swiotlb_sync_sg_for_device(struct device *hwdev, struct scatterlist *sg, int swiotlb_dma_mapping_error(struct device *hwdev, dma_addr_t dma_addr) { - return (dma_addr == virt_to_bus(io_tlb_overflow_buffer)); + return (dma_addr == swiotlb_virt_to_bus(io_tlb_overflow_buffer)); } /* @@ -837,7 +857,7 @@ swiotlb_dma_mapping_error(struct device *hwdev, dma_addr_t dma_addr) int swiotlb_dma_supported(struct device *hwdev, u64 mask) { - return virt_to_bus(io_tlb_end - 1) <= mask; + return swiotlb_virt_to_bus(io_tlb_end - 1) <= mask; } EXPORT_SYMBOL(swiotlb_map_single); |