diff options
author | Linus Torvalds <torvalds@linux-foundation.org> | 2018-12-28 14:12:21 -0800 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2018-12-28 14:12:21 -0800 |
commit | af7ddd8a627c62a835524b3f5b471edbbbcce025 (patch) | |
tree | af9777ddef6d394c7cc01fca599328b584ca2bc1 /drivers | |
parent | fe2b0cdabcd9e6aeca66a104bc03576946e5fee2 (diff) | |
parent | 8b1cce9f5832a8eda17d37a3c49fb7dd2d650f46 (diff) | |
download | linux-af7ddd8a627c62a835524b3f5b471edbbbcce025.tar.bz2 |
Merge tag 'dma-mapping-4.21' of git://git.infradead.org/users/hch/dma-mapping
Pull DMA mapping updates from Christoph Hellwig:
"A huge update this time, but a lot of that is just consolidating or
removing code:
- provide a common DMA_MAPPING_ERROR definition and avoid indirect
calls for dma_map_* error checking
- use direct calls for the DMA direct mapping case, avoiding huge
retpoline overhead for high performance workloads
- merge the swiotlb dma_map_ops into dma-direct
- provide a generic remapping DMA consistent allocator for
architectures that have devices that perform DMA that is not cache
coherent. Based on the existing arm64 implementation and also used
for csky now.
- improve the dma-debug infrastructure, including dynamic allocation
of entries (Robin Murphy)
- default to providing chaining scatterlist everywhere, with opt-outs
for the few architectures (alpha, parisc, most arm32 variants) that
can't cope with it
- misc sparc32 dma-related cleanups
- remove the dma_mark_clean arch hook used by swiotlb on ia64 and
replace it with the generic noncoherent infrastructure
- fix the return type of dma_set_max_seg_size (Niklas Söderlund)
- move the dummy dma ops for not DMA capable devices from arm64 to
common code (Robin Murphy)
- ensure dma_alloc_coherent returns zeroed memory to avoid kernel
data leaks through userspace. We already did this for most common
architectures, but this ensures we do it everywhere.
dma_zalloc_coherent has been deprecated and can hopefully be
removed after -rc1 with a coccinelle script"
* tag 'dma-mapping-4.21' of git://git.infradead.org/users/hch/dma-mapping: (73 commits)
dma-mapping: fix inverted logic in dma_supported
dma-mapping: deprecate dma_zalloc_coherent
dma-mapping: zero memory returned from dma_alloc_*
sparc/iommu: fix ->map_sg return value
sparc/io-unit: fix ->map_sg return value
arm64: default to the direct mapping in get_arch_dma_ops
PCI: Remove unused attr variable in pci_dma_configure
ia64: only select ARCH_HAS_DMA_COHERENT_TO_PFN if swiotlb is enabled
dma-mapping: bypass indirect calls for dma-direct
vmd: use the proper dma_* APIs instead of direct methods calls
dma-direct: merge swiotlb_dma_ops into the dma_direct code
dma-direct: use dma_direct_map_page to implement dma_direct_map_sg
dma-direct: improve addressability error reporting
swiotlb: remove dma_mark_clean
swiotlb: remove SWIOTLB_MAP_ERROR
ACPI / scan: Refactor _CCA enforcement
dma-mapping: factor out dummy DMA ops
dma-mapping: always build the direct mapping code
dma-mapping: move dma_cache_sync out of line
dma-mapping: move various slow path functions out of line
...
Diffstat (limited to 'drivers')
-rw-r--r-- | drivers/acpi/scan.c | 5 | ||||
-rw-r--r-- | drivers/base/platform.c | 34 | ||||
-rw-r--r-- | drivers/gpu/drm/vmwgfx/vmwgfx_drv.c | 2 | ||||
-rw-r--r-- | drivers/iommu/amd_iommu.c | 31 | ||||
-rw-r--r-- | drivers/iommu/dma-iommu.c | 23 | ||||
-rw-r--r-- | drivers/iommu/intel-iommu.c | 26 | ||||
-rw-r--r-- | drivers/misc/mic/host/mic_boot.c | 2 | ||||
-rw-r--r-- | drivers/parisc/ccio-dma.c | 10 | ||||
-rw-r--r-- | drivers/parisc/sba_iommu.c | 10 | ||||
-rw-r--r-- | drivers/pci/controller/vmd.c | 48 | ||||
-rw-r--r-- | drivers/pci/pci-driver.c | 4 | ||||
-rw-r--r-- | drivers/xen/swiotlb-xen.c | 36 |
12 files changed, 57 insertions, 174 deletions
diff --git a/drivers/acpi/scan.c b/drivers/acpi/scan.c index e9eda5558c1f..5efd4219f112 100644 --- a/drivers/acpi/scan.c +++ b/drivers/acpi/scan.c @@ -1456,6 +1456,11 @@ int acpi_dma_configure(struct device *dev, enum dev_dma_attr attr) const struct iommu_ops *iommu; u64 dma_addr = 0, size = 0; + if (attr == DEV_DMA_NOT_SUPPORTED) { + set_dma_ops(dev, &dma_dummy_ops); + return 0; + } + iort_dma_setup(dev, &dma_addr, &size); iommu = iort_iommu_configure(dev); diff --git a/drivers/base/platform.c b/drivers/base/platform.c index 0fb5f140f1b0..445cbd8266ca 100644 --- a/drivers/base/platform.c +++ b/drivers/base/platform.c @@ -1137,8 +1137,7 @@ int platform_dma_configure(struct device *dev) ret = of_dma_configure(dev, dev->of_node, true); } else if (has_acpi_companion(dev)) { attr = acpi_get_dma_attr(to_acpi_device_node(dev->fwnode)); - if (attr != DEV_DMA_NOT_SUPPORTED) - ret = acpi_dma_configure(dev, attr); + ret = acpi_dma_configure(dev, attr); } return ret; @@ -1178,37 +1177,6 @@ int __init platform_bus_init(void) return error; } -#ifndef ARCH_HAS_DMA_GET_REQUIRED_MASK -static u64 dma_default_get_required_mask(struct device *dev) -{ - u32 low_totalram = ((max_pfn - 1) << PAGE_SHIFT); - u32 high_totalram = ((max_pfn - 1) >> (32 - PAGE_SHIFT)); - u64 mask; - - if (!high_totalram) { - /* convert to mask just covering totalram */ - low_totalram = (1 << (fls(low_totalram) - 1)); - low_totalram += low_totalram - 1; - mask = low_totalram; - } else { - high_totalram = (1 << (fls(high_totalram) - 1)); - high_totalram += high_totalram - 1; - mask = (((u64)high_totalram) << 32) + 0xffffffff; - } - return mask; -} - -u64 dma_get_required_mask(struct device *dev) -{ - const struct dma_map_ops *ops = get_dma_ops(dev); - - if (ops->get_required_mask) - return ops->get_required_mask(dev); - return dma_default_get_required_mask(dev); -} -EXPORT_SYMBOL_GPL(dma_get_required_mask); -#endif - static __initdata LIST_HEAD(early_platform_driver_list); static __initdata LIST_HEAD(early_platform_device_list); diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c index f05a29ff586e..9fd5fbe8bebf 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c @@ -583,7 +583,7 @@ static int vmw_dma_select_mode(struct vmw_private *dev_priv) dev_priv->map_mode = vmw_dma_map_populate; - if (dma_ops->sync_single_for_cpu) + if (dma_ops && dma_ops->sync_single_for_cpu) dev_priv->map_mode = vmw_dma_alloc_coherent; #ifdef CONFIG_SWIOTLB if (swiotlb_nr_tbl() == 0) diff --git a/drivers/iommu/amd_iommu.c b/drivers/iommu/amd_iommu.c index 1167ff0416cf..567221cca13c 100644 --- a/drivers/iommu/amd_iommu.c +++ b/drivers/iommu/amd_iommu.c @@ -55,8 +55,6 @@ #include "amd_iommu_types.h" #include "irq_remapping.h" -#define AMD_IOMMU_MAPPING_ERROR 0 - #define CMD_SET_TYPE(cmd, t) ((cmd)->data[1] |= ((t) << 28)) #define LOOP_TIMEOUT 100000 @@ -2186,7 +2184,7 @@ static int amd_iommu_add_device(struct device *dev) dev_name(dev)); iommu_ignore_device(dev); - dev->dma_ops = &dma_direct_ops; + dev->dma_ops = NULL; goto out; } init_iommu_group(dev); @@ -2339,7 +2337,7 @@ static dma_addr_t __map_single(struct device *dev, paddr &= PAGE_MASK; address = dma_ops_alloc_iova(dev, dma_dom, pages, dma_mask); - if (address == AMD_IOMMU_MAPPING_ERROR) + if (!address) goto out; prot = dir2prot(direction); @@ -2376,7 +2374,7 @@ out_unmap: dma_ops_free_iova(dma_dom, address, pages); - return AMD_IOMMU_MAPPING_ERROR; + return DMA_MAPPING_ERROR; } /* @@ -2427,7 +2425,7 @@ static dma_addr_t map_page(struct device *dev, struct page *page, if (PTR_ERR(domain) == -EINVAL) return (dma_addr_t)paddr; else if (IS_ERR(domain)) - return AMD_IOMMU_MAPPING_ERROR; + return DMA_MAPPING_ERROR; dma_mask = *dev->dma_mask; dma_dom = to_dma_ops_domain(domain); @@ -2504,7 +2502,7 @@ static int map_sg(struct device *dev, struct scatterlist *sglist, npages = sg_num_pages(dev, sglist, nelems); address = dma_ops_alloc_iova(dev, dma_dom, npages, dma_mask); - if (address == AMD_IOMMU_MAPPING_ERROR) + if (address == DMA_MAPPING_ERROR) goto out_err; prot = dir2prot(direction); @@ -2627,7 +2625,7 @@ static void *alloc_coherent(struct device *dev, size_t size, *dma_addr = __map_single(dev, dma_dom, page_to_phys(page), size, DMA_BIDIRECTIONAL, dma_mask); - if (*dma_addr == AMD_IOMMU_MAPPING_ERROR) + if (*dma_addr == DMA_MAPPING_ERROR) goto out_free; return page_address(page); @@ -2678,11 +2676,6 @@ static int amd_iommu_dma_supported(struct device *dev, u64 mask) return check_device(dev); } -static int amd_iommu_mapping_error(struct device *dev, dma_addr_t dma_addr) -{ - return dma_addr == AMD_IOMMU_MAPPING_ERROR; -} - static const struct dma_map_ops amd_iommu_dma_ops = { .alloc = alloc_coherent, .free = free_coherent, @@ -2691,7 +2684,6 @@ static const struct dma_map_ops amd_iommu_dma_ops = { .map_sg = map_sg, .unmap_sg = unmap_sg, .dma_supported = amd_iommu_dma_supported, - .mapping_error = amd_iommu_mapping_error, }; static int init_reserved_iova_ranges(void) @@ -2778,17 +2770,6 @@ int __init amd_iommu_init_dma_ops(void) swiotlb = (iommu_pass_through || sme_me_mask) ? 1 : 0; iommu_detected = 1; - /* - * In case we don't initialize SWIOTLB (actually the common case - * when AMD IOMMU is enabled and SME is not active), make sure there - * are global dma_ops set as a fall-back for devices not handled by - * this driver (for example non-PCI devices). When SME is active, - * make sure that swiotlb variable remains set so the global dma_ops - * continue to be SWIOTLB. - */ - if (!swiotlb) - dma_ops = &dma_direct_ops; - if (amd_iommu_unmap_flush) pr_info("AMD-Vi: IO/TLB flush on unmap enabled\n"); else diff --git a/drivers/iommu/dma-iommu.c b/drivers/iommu/dma-iommu.c index d1b04753b204..60c7e9e9901e 100644 --- a/drivers/iommu/dma-iommu.c +++ b/drivers/iommu/dma-iommu.c @@ -32,8 +32,6 @@ #include <linux/scatterlist.h> #include <linux/vmalloc.h> -#define IOMMU_MAPPING_ERROR 0 - struct iommu_dma_msi_page { struct list_head list; dma_addr_t iova; @@ -523,7 +521,7 @@ void iommu_dma_free(struct device *dev, struct page **pages, size_t size, { __iommu_dma_unmap(iommu_get_dma_domain(dev), *handle, size); __iommu_dma_free_pages(pages, PAGE_ALIGN(size) >> PAGE_SHIFT); - *handle = IOMMU_MAPPING_ERROR; + *handle = DMA_MAPPING_ERROR; } /** @@ -556,7 +554,7 @@ struct page **iommu_dma_alloc(struct device *dev, size_t size, gfp_t gfp, dma_addr_t iova; unsigned int count, min_size, alloc_sizes = domain->pgsize_bitmap; - *handle = IOMMU_MAPPING_ERROR; + *handle = DMA_MAPPING_ERROR; min_size = alloc_sizes & -alloc_sizes; if (min_size < PAGE_SIZE) { @@ -649,11 +647,11 @@ static dma_addr_t __iommu_dma_map(struct device *dev, phys_addr_t phys, iova = iommu_dma_alloc_iova(domain, size, dma_get_mask(dev), dev); if (!iova) - return IOMMU_MAPPING_ERROR; + return DMA_MAPPING_ERROR; if (iommu_map(domain, iova, phys - iova_off, size, prot)) { iommu_dma_free_iova(cookie, iova, size); - return IOMMU_MAPPING_ERROR; + return DMA_MAPPING_ERROR; } return iova + iova_off; } @@ -694,7 +692,7 @@ static int __finalise_sg(struct device *dev, struct scatterlist *sg, int nents, s->offset += s_iova_off; s->length = s_length; - sg_dma_address(s) = IOMMU_MAPPING_ERROR; + sg_dma_address(s) = DMA_MAPPING_ERROR; sg_dma_len(s) = 0; /* @@ -737,11 +735,11 @@ static void __invalidate_sg(struct scatterlist *sg, int nents) int i; for_each_sg(sg, s, nents, i) { - if (sg_dma_address(s) != IOMMU_MAPPING_ERROR) + if (sg_dma_address(s) != DMA_MAPPING_ERROR) s->offset += sg_dma_address(s); if (sg_dma_len(s)) s->length = sg_dma_len(s); - sg_dma_address(s) = IOMMU_MAPPING_ERROR; + sg_dma_address(s) = DMA_MAPPING_ERROR; sg_dma_len(s) = 0; } } @@ -858,11 +856,6 @@ void iommu_dma_unmap_resource(struct device *dev, dma_addr_t handle, __iommu_dma_unmap(iommu_get_dma_domain(dev), handle, size); } -int iommu_dma_mapping_error(struct device *dev, dma_addr_t dma_addr) -{ - return dma_addr == IOMMU_MAPPING_ERROR; -} - static struct iommu_dma_msi_page *iommu_dma_get_msi_page(struct device *dev, phys_addr_t msi_addr, struct iommu_domain *domain) { @@ -882,7 +875,7 @@ static struct iommu_dma_msi_page *iommu_dma_get_msi_page(struct device *dev, return NULL; iova = __iommu_dma_map(dev, msi_addr, size, prot, domain); - if (iommu_dma_mapping_error(dev, iova)) + if (iova == DMA_MAPPING_ERROR) goto out_free_page; INIT_LIST_HEAD(&msi_page->list); diff --git a/drivers/iommu/intel-iommu.c b/drivers/iommu/intel-iommu.c index 41a4b8808802..0ad67d65bbce 100644 --- a/drivers/iommu/intel-iommu.c +++ b/drivers/iommu/intel-iommu.c @@ -3597,9 +3597,11 @@ static int iommu_no_mapping(struct device *dev) return 0; } -static dma_addr_t __intel_map_single(struct device *dev, phys_addr_t paddr, - size_t size, int dir, u64 dma_mask) +static dma_addr_t __intel_map_page(struct device *dev, struct page *page, + unsigned long offset, size_t size, int dir, + u64 dma_mask) { + phys_addr_t paddr = page_to_phys(page) + offset; struct dmar_domain *domain; phys_addr_t start_paddr; unsigned long iova_pfn; @@ -3615,7 +3617,7 @@ static dma_addr_t __intel_map_single(struct device *dev, phys_addr_t paddr, domain = get_valid_domain_for_dev(dev); if (!domain) - return 0; + return DMA_MAPPING_ERROR; iommu = domain_get_iommu(domain); size = aligned_nrpages(paddr, size); @@ -3653,7 +3655,7 @@ error: free_iova_fast(&domain->iovad, iova_pfn, dma_to_mm_pfn(size)); pr_err("Device %s request: %zx@%llx dir %d --- failed\n", dev_name(dev), size, (unsigned long long)paddr, dir); - return 0; + return DMA_MAPPING_ERROR; } static dma_addr_t intel_map_page(struct device *dev, struct page *page, @@ -3661,8 +3663,7 @@ static dma_addr_t intel_map_page(struct device *dev, struct page *page, enum dma_data_direction dir, unsigned long attrs) { - return __intel_map_single(dev, page_to_phys(page) + offset, size, - dir, *dev->dma_mask); + return __intel_map_page(dev, page, offset, size, dir, *dev->dma_mask); } static void intel_unmap(struct device *dev, dma_addr_t dev_addr, size_t size) @@ -3753,10 +3754,9 @@ static void *intel_alloc_coherent(struct device *dev, size_t size, return NULL; memset(page_address(page), 0, size); - *dma_handle = __intel_map_single(dev, page_to_phys(page), size, - DMA_BIDIRECTIONAL, - dev->coherent_dma_mask); - if (*dma_handle) + *dma_handle = __intel_map_page(dev, page, 0, size, DMA_BIDIRECTIONAL, + dev->coherent_dma_mask); + if (*dma_handle != DMA_MAPPING_ERROR) return page_address(page); if (!dma_release_from_contiguous(dev, page, size >> PAGE_SHIFT)) __free_pages(page, order); @@ -3865,11 +3865,6 @@ static int intel_map_sg(struct device *dev, struct scatterlist *sglist, int nele return nelems; } -static int intel_mapping_error(struct device *dev, dma_addr_t dma_addr) -{ - return !dma_addr; -} - static const struct dma_map_ops intel_dma_ops = { .alloc = intel_alloc_coherent, .free = intel_free_coherent, @@ -3877,7 +3872,6 @@ static const struct dma_map_ops intel_dma_ops = { .unmap_sg = intel_unmap_sg, .map_page = intel_map_page, .unmap_page = intel_unmap_page, - .mapping_error = intel_mapping_error, .dma_supported = dma_direct_supported, }; diff --git a/drivers/misc/mic/host/mic_boot.c b/drivers/misc/mic/host/mic_boot.c index c327985c9523..6479435ac96b 100644 --- a/drivers/misc/mic/host/mic_boot.c +++ b/drivers/misc/mic/host/mic_boot.c @@ -149,7 +149,7 @@ static void *__mic_dma_alloc(struct device *dev, size_t size, struct scif_hw_dev *scdev = dev_get_drvdata(dev); struct mic_device *mdev = scdev_to_mdev(scdev); dma_addr_t tmp; - void *va = kmalloc(size, gfp); + void *va = kmalloc(size, gfp | __GFP_ZERO); if (va) { tmp = mic_map_single(mdev, va, size); diff --git a/drivers/parisc/ccio-dma.c b/drivers/parisc/ccio-dma.c index 701a7d6a74d5..714aac72df0e 100644 --- a/drivers/parisc/ccio-dma.c +++ b/drivers/parisc/ccio-dma.c @@ -110,8 +110,6 @@ #define CMD_TLB_DIRECT_WRITE 35 /* IO_COMMAND for I/O TLB Writes */ #define CMD_TLB_PURGE 33 /* IO_COMMAND to Purge I/O TLB entry */ -#define CCIO_MAPPING_ERROR (~(dma_addr_t)0) - struct ioa_registers { /* Runway Supervisory Set */ int32_t unused1[12]; @@ -740,7 +738,7 @@ ccio_map_single(struct device *dev, void *addr, size_t size, BUG_ON(!dev); ioc = GET_IOC(dev); if (!ioc) - return CCIO_MAPPING_ERROR; + return DMA_MAPPING_ERROR; BUG_ON(size <= 0); @@ -1021,11 +1019,6 @@ ccio_unmap_sg(struct device *dev, struct scatterlist *sglist, int nents, DBG_RUN_SG("%s() DONE (nents %d)\n", __func__, nents); } -static int ccio_mapping_error(struct device *dev, dma_addr_t dma_addr) -{ - return dma_addr == CCIO_MAPPING_ERROR; -} - static const struct dma_map_ops ccio_ops = { .dma_supported = ccio_dma_supported, .alloc = ccio_alloc, @@ -1034,7 +1027,6 @@ static const struct dma_map_ops ccio_ops = { .unmap_page = ccio_unmap_page, .map_sg = ccio_map_sg, .unmap_sg = ccio_unmap_sg, - .mapping_error = ccio_mapping_error, }; #ifdef CONFIG_PROC_FS diff --git a/drivers/parisc/sba_iommu.c b/drivers/parisc/sba_iommu.c index c1e599a429af..452d306ce5cb 100644 --- a/drivers/parisc/sba_iommu.c +++ b/drivers/parisc/sba_iommu.c @@ -93,8 +93,6 @@ #define DEFAULT_DMA_HINT_REG 0 -#define SBA_MAPPING_ERROR (~(dma_addr_t)0) - struct sba_device *sba_list; EXPORT_SYMBOL_GPL(sba_list); @@ -725,7 +723,7 @@ sba_map_single(struct device *dev, void *addr, size_t size, ioc = GET_IOC(dev); if (!ioc) - return SBA_MAPPING_ERROR; + return DMA_MAPPING_ERROR; /* save offset bits */ offset = ((dma_addr_t) (long) addr) & ~IOVP_MASK; @@ -1080,11 +1078,6 @@ sba_unmap_sg(struct device *dev, struct scatterlist *sglist, int nents, } -static int sba_mapping_error(struct device *dev, dma_addr_t dma_addr) -{ - return dma_addr == SBA_MAPPING_ERROR; -} - static const struct dma_map_ops sba_ops = { .dma_supported = sba_dma_supported, .alloc = sba_alloc, @@ -1093,7 +1086,6 @@ static const struct dma_map_ops sba_ops = { .unmap_page = sba_unmap_page, .map_sg = sba_map_sg, .unmap_sg = sba_unmap_sg, - .mapping_error = sba_mapping_error, }; diff --git a/drivers/pci/controller/vmd.c b/drivers/pci/controller/vmd.c index e50b0b5815ff..3890812cdf87 100644 --- a/drivers/pci/controller/vmd.c +++ b/drivers/pci/controller/vmd.c @@ -307,39 +307,32 @@ static struct device *to_vmd_dev(struct device *dev) return &vmd->dev->dev; } -static const struct dma_map_ops *vmd_dma_ops(struct device *dev) -{ - return get_dma_ops(to_vmd_dev(dev)); -} - static void *vmd_alloc(struct device *dev, size_t size, dma_addr_t *addr, gfp_t flag, unsigned long attrs) { - return vmd_dma_ops(dev)->alloc(to_vmd_dev(dev), size, addr, flag, - attrs); + return dma_alloc_attrs(to_vmd_dev(dev), size, addr, flag, attrs); } static void vmd_free(struct device *dev, size_t size, void *vaddr, dma_addr_t addr, unsigned long attrs) { - return vmd_dma_ops(dev)->free(to_vmd_dev(dev), size, vaddr, addr, - attrs); + return dma_free_attrs(to_vmd_dev(dev), size, vaddr, addr, attrs); } static int vmd_mmap(struct device *dev, struct vm_area_struct *vma, void *cpu_addr, dma_addr_t addr, size_t size, unsigned long attrs) { - return vmd_dma_ops(dev)->mmap(to_vmd_dev(dev), vma, cpu_addr, addr, - size, attrs); + return dma_mmap_attrs(to_vmd_dev(dev), vma, cpu_addr, addr, size, + attrs); } static int vmd_get_sgtable(struct device *dev, struct sg_table *sgt, void *cpu_addr, dma_addr_t addr, size_t size, unsigned long attrs) { - return vmd_dma_ops(dev)->get_sgtable(to_vmd_dev(dev), sgt, cpu_addr, - addr, size, attrs); + return dma_get_sgtable_attrs(to_vmd_dev(dev), sgt, cpu_addr, addr, size, + attrs); } static dma_addr_t vmd_map_page(struct device *dev, struct page *page, @@ -347,66 +340,60 @@ static dma_addr_t vmd_map_page(struct device *dev, struct page *page, enum dma_data_direction dir, unsigned long attrs) { - return vmd_dma_ops(dev)->map_page(to_vmd_dev(dev), page, offset, size, - dir, attrs); + return dma_map_page_attrs(to_vmd_dev(dev), page, offset, size, dir, + attrs); } static void vmd_unmap_page(struct device *dev, dma_addr_t addr, size_t size, enum dma_data_direction dir, unsigned long attrs) { - vmd_dma_ops(dev)->unmap_page(to_vmd_dev(dev), addr, size, dir, attrs); + dma_unmap_page_attrs(to_vmd_dev(dev), addr, size, dir, attrs); } static int vmd_map_sg(struct device *dev, struct scatterlist *sg, int nents, enum dma_data_direction dir, unsigned long attrs) { - return vmd_dma_ops(dev)->map_sg(to_vmd_dev(dev), sg, nents, dir, attrs); + return dma_map_sg_attrs(to_vmd_dev(dev), sg, nents, dir, attrs); } static void vmd_unmap_sg(struct device *dev, struct scatterlist *sg, int nents, enum dma_data_direction dir, unsigned long attrs) { - vmd_dma_ops(dev)->unmap_sg(to_vmd_dev(dev), sg, nents, dir, attrs); + dma_unmap_sg_attrs(to_vmd_dev(dev), sg, nents, dir, attrs); } static void vmd_sync_single_for_cpu(struct device *dev, dma_addr_t addr, size_t size, enum dma_data_direction dir) { - vmd_dma_ops(dev)->sync_single_for_cpu(to_vmd_dev(dev), addr, size, dir); + dma_sync_single_for_cpu(to_vmd_dev(dev), addr, size, dir); } static void vmd_sync_single_for_device(struct device *dev, dma_addr_t addr, size_t size, enum dma_data_direction dir) { - vmd_dma_ops(dev)->sync_single_for_device(to_vmd_dev(dev), addr, size, - dir); + dma_sync_single_for_device(to_vmd_dev(dev), addr, size, dir); } static void vmd_sync_sg_for_cpu(struct device *dev, struct scatterlist *sg, int nents, enum dma_data_direction dir) { - vmd_dma_ops(dev)->sync_sg_for_cpu(to_vmd_dev(dev), sg, nents, dir); + dma_sync_sg_for_cpu(to_vmd_dev(dev), sg, nents, dir); } static void vmd_sync_sg_for_device(struct device *dev, struct scatterlist *sg, int nents, enum dma_data_direction dir) { - vmd_dma_ops(dev)->sync_sg_for_device(to_vmd_dev(dev), sg, nents, dir); -} - -static int vmd_mapping_error(struct device *dev, dma_addr_t addr) -{ - return vmd_dma_ops(dev)->mapping_error(to_vmd_dev(dev), addr); + dma_sync_sg_for_device(to_vmd_dev(dev), sg, nents, dir); } static int vmd_dma_supported(struct device *dev, u64 mask) { - return vmd_dma_ops(dev)->dma_supported(to_vmd_dev(dev), mask); + return dma_supported(to_vmd_dev(dev), mask); } static u64 vmd_get_required_mask(struct device *dev) { - return vmd_dma_ops(dev)->get_required_mask(to_vmd_dev(dev)); + return dma_get_required_mask(to_vmd_dev(dev)); } static void vmd_teardown_dma_ops(struct vmd_dev *vmd) @@ -446,7 +433,6 @@ static void vmd_setup_dma_ops(struct vmd_dev *vmd) ASSIGN_VMD_DMA_OPS(source, dest, sync_single_for_device); ASSIGN_VMD_DMA_OPS(source, dest, sync_sg_for_cpu); ASSIGN_VMD_DMA_OPS(source, dest, sync_sg_for_device); - ASSIGN_VMD_DMA_OPS(source, dest, mapping_error); ASSIGN_VMD_DMA_OPS(source, dest, dma_supported); ASSIGN_VMD_DMA_OPS(source, dest, get_required_mask); add_dma_domain(domain); diff --git a/drivers/pci/pci-driver.c b/drivers/pci/pci-driver.c index bef17c3fca67..ea55444e6ead 100644 --- a/drivers/pci/pci-driver.c +++ b/drivers/pci/pci-driver.c @@ -1600,10 +1600,8 @@ static int pci_dma_configure(struct device *dev) ret = of_dma_configure(dev, bridge->parent->of_node, true); } else if (has_acpi_companion(bridge)) { struct acpi_device *adev = to_acpi_device_node(bridge->fwnode); - enum dev_dma_attr attr = acpi_get_dma_attr(adev); - if (attr != DEV_DMA_NOT_SUPPORTED) - ret = acpi_dma_configure(dev, attr); + ret = acpi_dma_configure(dev, acpi_get_dma_attr(adev)); } pci_put_host_bridge_device(bridge); diff --git a/drivers/xen/swiotlb-xen.c b/drivers/xen/swiotlb-xen.c index 2a7f545bd0b5..989cf872b98c 100644 --- a/drivers/xen/swiotlb-xen.c +++ b/drivers/xen/swiotlb-xen.c @@ -53,8 +53,6 @@ * API. */ -#define XEN_SWIOTLB_ERROR_CODE (~(dma_addr_t)0x0) - static char *xen_io_tlb_start, *xen_io_tlb_end; static unsigned long xen_io_tlb_nslabs; /* @@ -405,8 +403,8 @@ static dma_addr_t xen_swiotlb_map_page(struct device *dev, struct page *page, map = swiotlb_tbl_map_single(dev, start_dma_addr, phys, size, dir, attrs); - if (map == SWIOTLB_MAP_ERROR) - return XEN_SWIOTLB_ERROR_CODE; + if (map == DMA_MAPPING_ERROR) + return DMA_MAPPING_ERROR; dev_addr = xen_phys_to_bus(map); xen_dma_map_page(dev, pfn_to_page(map >> PAGE_SHIFT), @@ -421,7 +419,7 @@ static dma_addr_t xen_swiotlb_map_page(struct device *dev, struct page *page, attrs |= DMA_ATTR_SKIP_CPU_SYNC; swiotlb_tbl_unmap_single(dev, map, size, dir, attrs); - return XEN_SWIOTLB_ERROR_CODE; + return DMA_MAPPING_ERROR; } /* @@ -443,21 +441,8 @@ static void xen_unmap_single(struct device *hwdev, dma_addr_t dev_addr, xen_dma_unmap_page(hwdev, dev_addr, size, dir, attrs); /* NOTE: We use dev_addr here, not paddr! */ - if (is_xen_swiotlb_buffer(dev_addr)) { + if (is_xen_swiotlb_buffer(dev_addr)) swiotlb_tbl_unmap_single(hwdev, paddr, size, dir, attrs); - return; - } - - if (dir != DMA_FROM_DEVICE) - return; - - /* - * phys_to_virt doesn't work with hihgmem page but we could - * call dma_mark_clean() with hihgmem page here. However, we - * are fine since dma_mark_clean() is null on POWERPC. We can - * make dma_mark_clean() take a physical address if necessary. - */ - dma_mark_clean(phys_to_virt(paddr), size); } static void xen_swiotlb_unmap_page(struct device *hwdev, dma_addr_t dev_addr, @@ -495,11 +480,6 @@ xen_swiotlb_sync_single(struct device *hwdev, dma_addr_t dev_addr, if (target == SYNC_FOR_DEVICE) xen_dma_sync_single_for_device(hwdev, dev_addr, size, dir); - - if (dir != DMA_FROM_DEVICE) - return; - - dma_mark_clean(phys_to_virt(paddr), size); } void @@ -574,7 +554,7 @@ xen_swiotlb_map_sg_attrs(struct device *hwdev, struct scatterlist *sgl, sg_phys(sg), sg->length, dir, attrs); - if (map == SWIOTLB_MAP_ERROR) { + if (map == DMA_MAPPING_ERROR) { dev_warn(hwdev, "swiotlb buffer is full\n"); /* Don't panic here, we expect map_sg users to do proper error handling. */ @@ -700,11 +680,6 @@ xen_swiotlb_get_sgtable(struct device *dev, struct sg_table *sgt, return dma_common_get_sgtable(dev, sgt, cpu_addr, handle, size, attrs); } -static int xen_swiotlb_mapping_error(struct device *dev, dma_addr_t dma_addr) -{ - return dma_addr == XEN_SWIOTLB_ERROR_CODE; -} - const struct dma_map_ops xen_swiotlb_dma_ops = { .alloc = xen_swiotlb_alloc_coherent, .free = xen_swiotlb_free_coherent, @@ -719,5 +694,4 @@ const struct dma_map_ops xen_swiotlb_dma_ops = { .dma_supported = xen_swiotlb_dma_supported, .mmap = xen_swiotlb_dma_mmap, .get_sgtable = xen_swiotlb_get_sgtable, - .mapping_error = xen_swiotlb_mapping_error, }; |