summaryrefslogtreecommitdiffstats
path: root/arch/microblaze
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2018-01-31 11:32:27 -0800
committerLinus Torvalds <torvalds@linux-foundation.org>2018-01-31 11:32:27 -0800
commit2382dc9a3eca644147be83dd2cd0dd64dc9e3e8c (patch)
tree71a152721a9b9b11875bf5ea053edb72c9b3a94e /arch/microblaze
parent28bc6fb9596fe1e577d09fc17ee6e1bb051c6ba3 (diff)
parent04f56534786c885f578c24461bcd782fe9a787cf (diff)
downloadlinux-2382dc9a3eca644147be83dd2cd0dd64dc9e3e8c.tar.bz2
Merge tag 'dma-mapping-4.16' of git://git.infradead.org/users/hch/dma-mapping
Pull dma mapping updates from Christoph Hellwig: "Except for a runtime warning fix from Christian this is all about consolidation of the generic no-IOMMU code, a well as the glue code for swiotlb. All the code is based on the x86 implementation with hooks to allow all architectures that aren't cache coherent to use it. The x86 conversion itself has been deferred because the x86 maintainers were a little busy in the last months" * tag 'dma-mapping-4.16' of git://git.infradead.org/users/hch/dma-mapping: (57 commits) MAINTAINERS: add the iommu list for swiotlb and xen-swiotlb arm64: use swiotlb_alloc and swiotlb_free arm64: replace ZONE_DMA with ZONE_DMA32 mips: use swiotlb_{alloc,free} mips/netlogic: remove swiotlb support tile: use generic swiotlb_ops tile: replace ZONE_DMA with ZONE_DMA32 unicore32: use generic swiotlb_ops ia64: remove an ifdef around the content of pci-dma.c ia64: clean up swiotlb support ia64: use generic swiotlb_ops ia64: replace ZONE_DMA with ZONE_DMA32 swiotlb: remove various exports swiotlb: refactor coherent buffer allocation swiotlb: refactor coherent buffer freeing swiotlb: wire up ->dma_supported in swiotlb_dma_ops swiotlb: add common swiotlb_map_ops swiotlb: rename swiotlb_free to swiotlb_exit x86: rename swiotlb_dma_ops powerpc: rename swiotlb_dma_ops ...
Diffstat (limited to 'arch/microblaze')
-rw-r--r--arch/microblaze/include/asm/dma-mapping.h4
-rw-r--r--arch/microblaze/kernel/dma.c78
2 files changed, 24 insertions, 58 deletions
diff --git a/arch/microblaze/include/asm/dma-mapping.h b/arch/microblaze/include/asm/dma-mapping.h
index 6b9ea39405b8..add50c1373bf 100644
--- a/arch/microblaze/include/asm/dma-mapping.h
+++ b/arch/microblaze/include/asm/dma-mapping.h
@@ -18,11 +18,11 @@
/*
* Available generic sets of operations
*/
-extern const struct dma_map_ops dma_direct_ops;
+extern const struct dma_map_ops dma_nommu_ops;
static inline const struct dma_map_ops *get_arch_dma_ops(struct bus_type *bus)
{
- return &dma_direct_ops;
+ return &dma_nommu_ops;
}
#endif /* _ASM_MICROBLAZE_DMA_MAPPING_H */
diff --git a/arch/microblaze/kernel/dma.c b/arch/microblaze/kernel/dma.c
index 990bf9ea0ec6..c91e8cef98dd 100644
--- a/arch/microblaze/kernel/dma.c
+++ b/arch/microblaze/kernel/dma.c
@@ -15,42 +15,18 @@
#include <linux/bug.h>
#include <asm/cacheflush.h>
-#define NOT_COHERENT_CACHE
-
-static void *dma_direct_alloc_coherent(struct device *dev, size_t size,
+static void *dma_nommu_alloc_coherent(struct device *dev, size_t size,
dma_addr_t *dma_handle, gfp_t flag,
unsigned long attrs)
{
-#ifdef NOT_COHERENT_CACHE
return consistent_alloc(flag, size, dma_handle);
-#else
- void *ret;
- struct page *page;
- int node = dev_to_node(dev);
-
- /* ignore region specifiers */
- flag &= ~(__GFP_HIGHMEM);
-
- page = alloc_pages_node(node, flag, get_order(size));
- if (page == NULL)
- return NULL;
- ret = page_address(page);
- memset(ret, 0, size);
- *dma_handle = virt_to_phys(ret);
-
- return ret;
-#endif
}
-static void dma_direct_free_coherent(struct device *dev, size_t size,
+static void dma_nommu_free_coherent(struct device *dev, size_t size,
void *vaddr, dma_addr_t dma_handle,
unsigned long attrs)
{
-#ifdef NOT_COHERENT_CACHE
consistent_free(size, vaddr);
-#else
- free_pages((unsigned long)vaddr, get_order(size));
-#endif
}
static inline void __dma_sync(unsigned long paddr,
@@ -69,7 +45,7 @@ static inline void __dma_sync(unsigned long paddr,
}
}
-static int dma_direct_map_sg(struct device *dev, struct scatterlist *sgl,
+static int dma_nommu_map_sg(struct device *dev, struct scatterlist *sgl,
int nents, enum dma_data_direction direction,
unsigned long attrs)
{
@@ -89,12 +65,7 @@ static int dma_direct_map_sg(struct device *dev, struct scatterlist *sgl,
return nents;
}
-static int dma_direct_dma_supported(struct device *dev, u64 mask)
-{
- return 1;
-}
-
-static inline dma_addr_t dma_direct_map_page(struct device *dev,
+static inline dma_addr_t dma_nommu_map_page(struct device *dev,
struct page *page,
unsigned long offset,
size_t size,
@@ -106,7 +77,7 @@ static inline dma_addr_t dma_direct_map_page(struct device *dev,
return page_to_phys(page) + offset;
}
-static inline void dma_direct_unmap_page(struct device *dev,
+static inline void dma_nommu_unmap_page(struct device *dev,
dma_addr_t dma_address,
size_t size,
enum dma_data_direction direction,
@@ -122,7 +93,7 @@ static inline void dma_direct_unmap_page(struct device *dev,
}
static inline void
-dma_direct_sync_single_for_cpu(struct device *dev,
+dma_nommu_sync_single_for_cpu(struct device *dev,
dma_addr_t dma_handle, size_t size,
enum dma_data_direction direction)
{
@@ -136,7 +107,7 @@ dma_direct_sync_single_for_cpu(struct device *dev,
}
static inline void
-dma_direct_sync_single_for_device(struct device *dev,
+dma_nommu_sync_single_for_device(struct device *dev,
dma_addr_t dma_handle, size_t size,
enum dma_data_direction direction)
{
@@ -150,7 +121,7 @@ dma_direct_sync_single_for_device(struct device *dev,
}
static inline void
-dma_direct_sync_sg_for_cpu(struct device *dev,
+dma_nommu_sync_sg_for_cpu(struct device *dev,
struct scatterlist *sgl, int nents,
enum dma_data_direction direction)
{
@@ -164,7 +135,7 @@ dma_direct_sync_sg_for_cpu(struct device *dev,
}
static inline void
-dma_direct_sync_sg_for_device(struct device *dev,
+dma_nommu_sync_sg_for_device(struct device *dev,
struct scatterlist *sgl, int nents,
enum dma_data_direction direction)
{
@@ -178,7 +149,7 @@ dma_direct_sync_sg_for_device(struct device *dev,
}
static
-int dma_direct_mmap_coherent(struct device *dev, struct vm_area_struct *vma,
+int dma_nommu_mmap_coherent(struct device *dev, struct vm_area_struct *vma,
void *cpu_addr, dma_addr_t handle, size_t size,
unsigned long attrs)
{
@@ -191,12 +162,8 @@ int dma_direct_mmap_coherent(struct device *dev, struct vm_area_struct *vma,
if (off >= count || user_count > (count - off))
return -ENXIO;
-#ifdef NOT_COHERENT_CACHE
vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
pfn = consistent_virt_to_pfn(cpu_addr);
-#else
- pfn = virt_to_pfn(cpu_addr);
-#endif
return remap_pfn_range(vma, vma->vm_start, pfn + off,
vma->vm_end - vma->vm_start, vma->vm_page_prot);
#else
@@ -204,20 +171,19 @@ int dma_direct_mmap_coherent(struct device *dev, struct vm_area_struct *vma,
#endif
}
-const struct dma_map_ops dma_direct_ops = {
- .alloc = dma_direct_alloc_coherent,
- .free = dma_direct_free_coherent,
- .mmap = dma_direct_mmap_coherent,
- .map_sg = dma_direct_map_sg,
- .dma_supported = dma_direct_dma_supported,
- .map_page = dma_direct_map_page,
- .unmap_page = dma_direct_unmap_page,
- .sync_single_for_cpu = dma_direct_sync_single_for_cpu,
- .sync_single_for_device = dma_direct_sync_single_for_device,
- .sync_sg_for_cpu = dma_direct_sync_sg_for_cpu,
- .sync_sg_for_device = dma_direct_sync_sg_for_device,
+const struct dma_map_ops dma_nommu_ops = {
+ .alloc = dma_nommu_alloc_coherent,
+ .free = dma_nommu_free_coherent,
+ .mmap = dma_nommu_mmap_coherent,
+ .map_sg = dma_nommu_map_sg,
+ .map_page = dma_nommu_map_page,
+ .unmap_page = dma_nommu_unmap_page,
+ .sync_single_for_cpu = dma_nommu_sync_single_for_cpu,
+ .sync_single_for_device = dma_nommu_sync_single_for_device,
+ .sync_sg_for_cpu = dma_nommu_sync_sg_for_cpu,
+ .sync_sg_for_device = dma_nommu_sync_sg_for_device,
};
-EXPORT_SYMBOL(dma_direct_ops);
+EXPORT_SYMBOL(dma_nommu_ops);
/* Number of entries preallocated for DMA-API debugging */
#define PREALLOC_DMA_DEBUG_ENTRIES (1 << 16)