diff options
author | Linus Torvalds <torvalds@linux-foundation.org> | 2021-05-04 10:58:49 -0700 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2021-05-04 10:58:49 -0700 |
commit | 74d6790cdaaf3825afe53e668b32e662ad5e2e12 (patch) | |
tree | 1838c987f3f8e3e794ecb97a12da2c779beeaf47 /drivers/xen | |
parent | 954b7207059cc4004f2e18f49c335304b1c6d64a (diff) | |
parent | dfc06b389a4f54e78c03abecd5b42ab6ea8d492a (diff) | |
download | linux-74d6790cdaaf3825afe53e668b32e662ad5e2e12.tar.bz2 |
Merge branch 'stable/for-linus-5.13' of git://git.kernel.org/pub/scm/linux/kernel/git/konrad/swiotlb
Pull swiotlb updates from Konrad Rzeszutek Wilk:
"Christoph Hellwig has taken a cleaver and trimmed off the not-needed
code and nicely folded duplicate code in the generic framework.
This lays the groundwork for more work to add extra DMA-backend-ish in
the future. Along with that some bug-fixes to make this a nice working
package"
* 'stable/for-linus-5.13' of git://git.kernel.org/pub/scm/linux/kernel/git/konrad/swiotlb:
swiotlb: don't override user specified size in swiotlb_adjust_size
swiotlb: Fix the type of index
swiotlb: Make SWIOTLB_NO_FORCE perform no allocation
ARM: Qualify enabling of swiotlb_init()
swiotlb: remove swiotlb_nr_tbl
swiotlb: dynamically allocate io_tlb_default_mem
swiotlb: move global variables into a new io_tlb_mem structure
xen-swiotlb: remove the unused size argument from xen_swiotlb_fixup
xen-swiotlb: split xen_swiotlb_init
swiotlb: lift the double initialization protection from xen-swiotlb
xen-swiotlb: remove xen_io_tlb_start and xen_io_tlb_nslabs
xen-swiotlb: remove xen_set_nslabs
xen-swiotlb: use io_tlb_end in xen_swiotlb_dma_supported
xen-swiotlb: use is_swiotlb_buffer in is_xen_swiotlb_buffer
swiotlb: split swiotlb_tbl_sync_single
swiotlb: move orig addr and size validation into swiotlb_bounce
swiotlb: remove the alloc_size parameter to swiotlb_tbl_unmap_single
powerpc/svm: stop using io_tlb_start
Diffstat (limited to 'drivers/xen')
-rw-r--r-- | drivers/xen/swiotlb-xen.c | 182 |
1 files changed, 82 insertions, 100 deletions
diff --git a/drivers/xen/swiotlb-xen.c b/drivers/xen/swiotlb-xen.c index 2b385c1b4a99..4c89afc0df62 100644 --- a/drivers/xen/swiotlb-xen.c +++ b/drivers/xen/swiotlb-xen.c @@ -40,14 +40,7 @@ #include <trace/events/swiotlb.h> #define MAX_DMA_BITS 32 -/* - * Used to do a quick range check in swiotlb_tbl_unmap_single and - * swiotlb_tbl_sync_single_*, to see if the memory was in fact allocated by this - * API. - */ -static char *xen_io_tlb_start, *xen_io_tlb_end; -static unsigned long xen_io_tlb_nslabs; /* * Quick lookup value of the bus address of the IOTLB. */ @@ -82,11 +75,6 @@ static inline phys_addr_t xen_dma_to_phys(struct device *dev, return xen_bus_to_phys(dev, dma_to_phys(dev, dma_addr)); } -static inline dma_addr_t xen_virt_to_bus(struct device *dev, void *address) -{ - return xen_phys_to_dma(dev, virt_to_phys(address)); -} - static inline int range_straddles_page_boundary(phys_addr_t p, size_t size) { unsigned long next_bfn, xen_pfn = XEN_PFN_DOWN(p); @@ -111,15 +99,12 @@ static int is_xen_swiotlb_buffer(struct device *dev, dma_addr_t dma_addr) * have the same virtual address as another address * in our domain. Therefore _only_ check address within our domain. */ - if (pfn_valid(PFN_DOWN(paddr))) { - return paddr >= virt_to_phys(xen_io_tlb_start) && - paddr < virt_to_phys(xen_io_tlb_end); - } + if (pfn_valid(PFN_DOWN(paddr))) + return is_swiotlb_buffer(paddr); return 0; } -static int -xen_swiotlb_fixup(void *buf, size_t size, unsigned long nslabs) +static int xen_swiotlb_fixup(void *buf, unsigned long nslabs) { int i, rc; int dma_bits; @@ -145,16 +130,6 @@ xen_swiotlb_fixup(void *buf, size_t size, unsigned long nslabs) } while (i < nslabs); return 0; } -static unsigned long xen_set_nslabs(unsigned long nr_tbl) -{ - if (!nr_tbl) { - xen_io_tlb_nslabs = (64 * 1024 * 1024 >> IO_TLB_SHIFT); - xen_io_tlb_nslabs = ALIGN(xen_io_tlb_nslabs, IO_TLB_SEGSIZE); - } else - xen_io_tlb_nslabs = nr_tbl; - - return xen_io_tlb_nslabs << IO_TLB_SHIFT; -} enum xen_swiotlb_err { XEN_SWIOTLB_UNKNOWN = 0, @@ -177,102 +152,109 @@ static const char *xen_swiotlb_error(enum xen_swiotlb_err err) } return ""; } -int __ref xen_swiotlb_init(int verbose, bool early) + +#define DEFAULT_NSLABS ALIGN(SZ_64M >> IO_TLB_SHIFT, IO_TLB_SEGSIZE) + +int __ref xen_swiotlb_init(void) { - unsigned long bytes, order; - int rc = -ENOMEM; enum xen_swiotlb_err m_ret = XEN_SWIOTLB_UNKNOWN; - unsigned int repeat = 3; + unsigned long bytes = swiotlb_size_or_default(); + unsigned long nslabs = bytes >> IO_TLB_SHIFT; + unsigned int order, repeat = 3; + int rc = -ENOMEM; + char *start; - xen_io_tlb_nslabs = swiotlb_nr_tbl(); retry: - bytes = xen_set_nslabs(xen_io_tlb_nslabs); - order = get_order(xen_io_tlb_nslabs << IO_TLB_SHIFT); - - /* - * IO TLB memory already allocated. Just use it. - */ - if (io_tlb_start != 0) { - xen_io_tlb_start = phys_to_virt(io_tlb_start); - goto end; - } + m_ret = XEN_SWIOTLB_ENOMEM; + order = get_order(bytes); /* * Get IO TLB memory from any location. */ - if (early) { - xen_io_tlb_start = memblock_alloc(PAGE_ALIGN(bytes), - PAGE_SIZE); - if (!xen_io_tlb_start) - panic("%s: Failed to allocate %lu bytes align=0x%lx\n", - __func__, PAGE_ALIGN(bytes), PAGE_SIZE); - } else { #define SLABS_PER_PAGE (1 << (PAGE_SHIFT - IO_TLB_SHIFT)) #define IO_TLB_MIN_SLABS ((1<<20) >> IO_TLB_SHIFT) - while ((SLABS_PER_PAGE << order) > IO_TLB_MIN_SLABS) { - xen_io_tlb_start = (void *)xen_get_swiotlb_free_pages(order); - if (xen_io_tlb_start) - break; - order--; - } - if (order != get_order(bytes)) { - pr_warn("Warning: only able to allocate %ld MB for software IO TLB\n", - (PAGE_SIZE << order) >> 20); - xen_io_tlb_nslabs = SLABS_PER_PAGE << order; - bytes = xen_io_tlb_nslabs << IO_TLB_SHIFT; - } + while ((SLABS_PER_PAGE << order) > IO_TLB_MIN_SLABS) { + start = (void *)xen_get_swiotlb_free_pages(order); + if (start) + break; + order--; } - if (!xen_io_tlb_start) { - m_ret = XEN_SWIOTLB_ENOMEM; + if (!start) goto error; + if (order != get_order(bytes)) { + pr_warn("Warning: only able to allocate %ld MB for software IO TLB\n", + (PAGE_SIZE << order) >> 20); + nslabs = SLABS_PER_PAGE << order; + bytes = nslabs << IO_TLB_SHIFT; } + /* * And replace that memory with pages under 4GB. */ - rc = xen_swiotlb_fixup(xen_io_tlb_start, - bytes, - xen_io_tlb_nslabs); + rc = xen_swiotlb_fixup(start, nslabs); if (rc) { - if (early) - memblock_free(__pa(xen_io_tlb_start), - PAGE_ALIGN(bytes)); - else { - free_pages((unsigned long)xen_io_tlb_start, order); - xen_io_tlb_start = NULL; - } + free_pages((unsigned long)start, order); m_ret = XEN_SWIOTLB_EFIXUP; goto error; } - if (early) { - if (swiotlb_init_with_tbl(xen_io_tlb_start, xen_io_tlb_nslabs, - verbose)) - panic("Cannot allocate SWIOTLB buffer"); - rc = 0; - } else - rc = swiotlb_late_init_with_tbl(xen_io_tlb_start, xen_io_tlb_nslabs); - -end: - xen_io_tlb_end = xen_io_tlb_start + bytes; - if (!rc) - swiotlb_set_max_segment(PAGE_SIZE); - - return rc; + rc = swiotlb_late_init_with_tbl(start, nslabs); + if (rc) + return rc; + swiotlb_set_max_segment(PAGE_SIZE); + return 0; error: if (repeat--) { - xen_io_tlb_nslabs = max(1024UL, /* Min is 2MB */ - (xen_io_tlb_nslabs >> 1)); + /* Min is 2MB */ + nslabs = max(1024UL, (nslabs >> 1)); pr_info("Lowering to %luMB\n", - (xen_io_tlb_nslabs << IO_TLB_SHIFT) >> 20); + (nslabs << IO_TLB_SHIFT) >> 20); goto retry; } pr_err("%s (rc:%d)\n", xen_swiotlb_error(m_ret), rc); - if (early) - panic("%s (rc:%d)", xen_swiotlb_error(m_ret), rc); - else - free_pages((unsigned long)xen_io_tlb_start, order); + free_pages((unsigned long)start, order); return rc; } +#ifdef CONFIG_X86 +void __init xen_swiotlb_init_early(void) +{ + unsigned long bytes = swiotlb_size_or_default(); + unsigned long nslabs = bytes >> IO_TLB_SHIFT; + unsigned int repeat = 3; + char *start; + int rc; + +retry: + /* + * Get IO TLB memory from any location. + */ + start = memblock_alloc(PAGE_ALIGN(bytes), PAGE_SIZE); + if (!start) + panic("%s: Failed to allocate %lu bytes align=0x%lx\n", + __func__, PAGE_ALIGN(bytes), PAGE_SIZE); + + /* + * And replace that memory with pages under 4GB. + */ + rc = xen_swiotlb_fixup(start, nslabs); + if (rc) { + memblock_free(__pa(start), PAGE_ALIGN(bytes)); + if (repeat--) { + /* Min is 2MB */ + nslabs = max(1024UL, (nslabs >> 1)); + bytes = nslabs << IO_TLB_SHIFT; + pr_info("Lowering to %luMB\n", bytes >> 20); + goto retry; + } + panic("%s (rc:%d)", xen_swiotlb_error(XEN_SWIOTLB_EFIXUP), rc); + } + + if (swiotlb_init_with_tbl(start, nslabs, false)) + panic("Cannot allocate SWIOTLB buffer"); + swiotlb_set_max_segment(PAGE_SIZE); +} +#endif /* CONFIG_X86 */ + static void * xen_swiotlb_alloc_coherent(struct device *hwdev, size_t size, dma_addr_t *dma_handle, gfp_t flags, @@ -406,7 +388,7 @@ static dma_addr_t xen_swiotlb_map_page(struct device *dev, struct page *page, * Ensure that the address returned is DMA'ble */ if (unlikely(!dma_capable(dev, dev_addr, size, true))) { - swiotlb_tbl_unmap_single(dev, map, size, size, dir, + swiotlb_tbl_unmap_single(dev, map, size, dir, attrs | DMA_ATTR_SKIP_CPU_SYNC); return DMA_MAPPING_ERROR; } @@ -445,7 +427,7 @@ static void xen_swiotlb_unmap_page(struct device *hwdev, dma_addr_t dev_addr, /* NOTE: We use dev_addr here, not paddr! */ if (is_xen_swiotlb_buffer(hwdev, dev_addr)) - swiotlb_tbl_unmap_single(hwdev, paddr, size, size, dir, attrs); + swiotlb_tbl_unmap_single(hwdev, paddr, size, dir, attrs); } static void @@ -462,7 +444,7 @@ xen_swiotlb_sync_single_for_cpu(struct device *dev, dma_addr_t dma_addr, } if (is_xen_swiotlb_buffer(dev, dma_addr)) - swiotlb_tbl_sync_single(dev, paddr, size, dir, SYNC_FOR_CPU); + swiotlb_sync_single_for_cpu(dev, paddr, size, dir); } static void @@ -472,7 +454,7 @@ xen_swiotlb_sync_single_for_device(struct device *dev, dma_addr_t dma_addr, phys_addr_t paddr = xen_dma_to_phys(dev, dma_addr); if (is_xen_swiotlb_buffer(dev, dma_addr)) - swiotlb_tbl_sync_single(dev, paddr, size, dir, SYNC_FOR_DEVICE); + swiotlb_sync_single_for_device(dev, paddr, size, dir); if (!dev_is_dma_coherent(dev)) { if (pfn_valid(PFN_DOWN(dma_to_phys(dev, dma_addr)))) @@ -560,7 +542,7 @@ xen_swiotlb_sync_sg_for_device(struct device *dev, struct scatterlist *sgl, static int xen_swiotlb_dma_supported(struct device *hwdev, u64 mask) { - return xen_virt_to_bus(hwdev, xen_io_tlb_end - 1) <= mask; + return xen_phys_to_dma(hwdev, io_tlb_default_mem->end - 1) <= mask; } const struct dma_map_ops xen_swiotlb_dma_ops = { |