From abdaf11ac18925ce8cc229e62e35b342d548ece2 Mon Sep 17 00:00:00 2001 From: Christoph Hellwig Date: Mon, 17 Aug 2020 16:41:50 +0200 Subject: dma-mapping: add (back) arch_dma_mark_clean for ia64 Add back a hook to optimize dcache flushing after reading executable code using DMA. This gets ia64 out of the business of pretending to be dma incoherent just for this optimization. Signed-off-by: Christoph Hellwig --- include/linux/dma-direct.h | 3 +++ include/linux/dma-noncoherent.h | 8 ++++++++ 2 files changed, 11 insertions(+) (limited to 'include') diff --git a/include/linux/dma-direct.h b/include/linux/dma-direct.h index 6e87225600ae..95e3e28bd93f 100644 --- a/include/linux/dma-direct.h +++ b/include/linux/dma-direct.h @@ -150,6 +150,9 @@ static inline void dma_direct_sync_single_for_cpu(struct device *dev, if (unlikely(is_swiotlb_buffer(paddr))) swiotlb_tbl_sync_single(dev, paddr, size, dir, SYNC_FOR_CPU); + + if (dir == DMA_FROM_DEVICE) + arch_dma_mark_clean(paddr, size); } static inline dma_addr_t dma_direct_map_page(struct device *dev, diff --git a/include/linux/dma-noncoherent.h b/include/linux/dma-noncoherent.h index ca09a4e07d2d..b9bc6c557ea4 100644 --- a/include/linux/dma-noncoherent.h +++ b/include/linux/dma-noncoherent.h @@ -108,6 +108,14 @@ static inline void arch_dma_prep_coherent(struct page *page, size_t size) } #endif /* CONFIG_ARCH_HAS_DMA_PREP_COHERENT */ +#ifdef CONFIG_ARCH_HAS_DMA_MARK_CLEAN +void arch_dma_mark_clean(phys_addr_t paddr, size_t size); +#else +static inline void arch_dma_mark_clean(phys_addr_t paddr, size_t size) +{ +} +#endif /* ARCH_HAS_DMA_MARK_CLEAN */ + void *arch_dma_set_uncached(void *addr, size_t size); void arch_dma_clear_uncached(void *addr, size_t size); -- cgit v1.2.3