summaryrefslogtreecommitdiffstats
path: root/kernel
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2019-08-14 10:31:11 -0700
committerLinus Torvalds <torvalds@linux-foundation.org>2019-08-14 10:31:11 -0700
commite83b009c5c366b678c7986fa6c1d38fed06c954c (patch)
treea7cc0449b32c7086ba44441dfac547a3940528be /kernel
parentb5e33e44d994bb03c75f1901d47b1cf971f752a0 (diff)
parent33dcb37cef741294b481f4d889a465b8091f11bf (diff)
downloadlinux-e83b009c5c366b678c7986fa6c1d38fed06c954c.tar.bz2
Merge tag 'dma-mapping-5.3-4' of git://git.infradead.org/users/hch/dma-mapping
Pull dma-mapping fixes from Christoph Hellwig: - fix the handling of the bus_dma_mask in dma_get_required_mask, which caused a regression in this merge window (Lucas Stach) - fix a regression in the handling of DMA_ATTR_NO_KERNEL_MAPPING (me) - fix dma_mmap_coherent to not cause page attribute mismatches on coherent architectures like x86 (me) * tag 'dma-mapping-5.3-4' of git://git.infradead.org/users/hch/dma-mapping: dma-mapping: fix page attributes for dma_mmap_* dma-direct: don't truncate dma_required_mask to bus addressing capabilities dma-direct: fix DMA_ATTR_NO_KERNEL_MAPPING
Diffstat (limited to 'kernel')
-rw-r--r--kernel/dma/direct.c10
-rw-r--r--kernel/dma/mapping.c19
-rw-r--r--kernel/dma/remap.c2
3 files changed, 24 insertions, 7 deletions
diff --git a/kernel/dma/direct.c b/kernel/dma/direct.c
index 59bdceea3737..795c9b095d75 100644
--- a/kernel/dma/direct.c
+++ b/kernel/dma/direct.c
@@ -47,9 +47,6 @@ u64 dma_direct_get_required_mask(struct device *dev)
{
u64 max_dma = phys_to_dma_direct(dev, (max_pfn - 1) << PAGE_SHIFT);
- if (dev->bus_dma_mask && dev->bus_dma_mask < max_dma)
- max_dma = dev->bus_dma_mask;
-
return (1ULL << (fls64(max_dma) - 1)) * 2 - 1;
}
@@ -130,10 +127,12 @@ void *dma_direct_alloc_pages(struct device *dev, size_t size,
if (!page)
return NULL;
- if (attrs & DMA_ATTR_NO_KERNEL_MAPPING) {
+ if ((attrs & DMA_ATTR_NO_KERNEL_MAPPING) &&
+ !force_dma_unencrypted(dev)) {
/* remove any dirty cache lines on the kernel alias */
if (!PageHighMem(page))
arch_dma_prep_coherent(page, size);
+ *dma_handle = phys_to_dma(dev, page_to_phys(page));
/* return the page pointer as the opaque cookie */
return page;
}
@@ -178,7 +177,8 @@ void dma_direct_free_pages(struct device *dev, size_t size, void *cpu_addr,
{
unsigned int page_order = get_order(size);
- if (attrs & DMA_ATTR_NO_KERNEL_MAPPING) {
+ if ((attrs & DMA_ATTR_NO_KERNEL_MAPPING) &&
+ !force_dma_unencrypted(dev)) {
/* cpu_addr is a struct page cookie, not a kernel address */
__dma_direct_free_pages(dev, size, cpu_addr);
return;
diff --git a/kernel/dma/mapping.c b/kernel/dma/mapping.c
index b945239621d8..b0038ca3aa92 100644
--- a/kernel/dma/mapping.c
+++ b/kernel/dma/mapping.c
@@ -150,6 +150,23 @@ int dma_get_sgtable_attrs(struct device *dev, struct sg_table *sgt,
}
EXPORT_SYMBOL(dma_get_sgtable_attrs);
+#ifdef CONFIG_MMU
+/*
+ * Return the page attributes used for mapping dma_alloc_* memory, either in
+ * kernel space if remapping is needed, or to userspace through dma_mmap_*.
+ */
+pgprot_t dma_pgprot(struct device *dev, pgprot_t prot, unsigned long attrs)
+{
+ if (dev_is_dma_coherent(dev) ||
+ (IS_ENABLED(CONFIG_DMA_NONCOHERENT_CACHE_SYNC) &&
+ (attrs & DMA_ATTR_NON_CONSISTENT)))
+ return prot;
+ if (IS_ENABLED(CONFIG_ARCH_HAS_DMA_MMAP_PGPROT))
+ return arch_dma_mmap_pgprot(dev, prot, attrs);
+ return pgprot_noncached(prot);
+}
+#endif /* CONFIG_MMU */
+
/*
* Create userspace mapping for the DMA-coherent memory.
*/
@@ -164,7 +181,7 @@ int dma_common_mmap(struct device *dev, struct vm_area_struct *vma,
unsigned long pfn;
int ret = -ENXIO;
- vma->vm_page_prot = arch_dma_mmap_pgprot(dev, vma->vm_page_prot, attrs);
+ vma->vm_page_prot = dma_pgprot(dev, vma->vm_page_prot, attrs);
if (dma_mmap_from_dev_coherent(dev, vma, cpu_addr, size, &ret))
return ret;
diff --git a/kernel/dma/remap.c b/kernel/dma/remap.c
index a594aec07882..ffe78f0b2fe4 100644
--- a/kernel/dma/remap.c
+++ b/kernel/dma/remap.c
@@ -218,7 +218,7 @@ void *arch_dma_alloc(struct device *dev, size_t size, dma_addr_t *dma_handle,
/* create a coherent mapping */
ret = dma_common_contiguous_remap(page, size, VM_USERMAP,
- arch_dma_mmap_pgprot(dev, PAGE_KERNEL, attrs),
+ dma_pgprot(dev, PAGE_KERNEL, attrs),
__builtin_return_address(0));
if (!ret) {
__dma_direct_free_pages(dev, size, page);