summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorChristoph Hellwig <hch@lst.de>2019-08-13 11:24:04 +0200
committerChristoph Hellwig <hch@lst.de>2019-11-11 21:18:20 +0100
commit80b0ca98f91ddbc09828aff5a00af1c73837713e (patch)
tree9d88b3bd5945b7408861a6297b23164323bac713
parent98c90e5ea34e98bdd4bcb67c48a0bdfd59bcd6cd (diff)
downloadlinux-80b0ca98f91ddbc09828aff5a00af1c73837713e.tar.bz2
lib: provide a simple generic ioremap implementation
A lot of architectures reuse the same simple ioremap implementation, so start lifting the most simple variant to lib/ioremap.c. It provides ioremap_prot and iounmap, plus a default ioremap that uses prot_noncached, although that can be overridden by asm/io.h. Signed-off-by: Christoph Hellwig <hch@lst.de> Reviewed-by: Arnd Bergmann <arnd@arndb.de> Reviewed-by: Palmer Dabbelt <palmer@dabbelt.com>
-rw-r--r--include/asm-generic/io.h20
-rw-r--r--lib/Kconfig3
-rw-r--r--lib/ioremap.c39
3 files changed, 58 insertions, 4 deletions
diff --git a/include/asm-generic/io.h b/include/asm-generic/io.h
index bd676f21b5bd..325fc98cc9ff 100644
--- a/include/asm-generic/io.h
+++ b/include/asm-generic/io.h
@@ -923,9 +923,10 @@ static inline void *phys_to_virt(unsigned long address)
* DOC: ioremap() and ioremap_*() variants
*
* Architectures with an MMU are expected to provide ioremap() and iounmap()
- * themselves. For NOMMU architectures we provide a default nop-op
- * implementation that expect that the physical address used for MMIO are
- * already marked as uncached, and can be used as kernel virtual addresses.
+ * themselves or rely on GENERIC_IOREMAP. For NOMMU architectures we provide
+ * a default nop-op implementation that expect that the physical address used
+ * for MMIO are already marked as uncached, and can be used as kernel virtual
+ * addresses.
*
* ioremap_wc() and ioremap_wt() can provide more relaxed caching attributes
* for specific drivers if the architecture choses to implement them. If they
@@ -946,7 +947,18 @@ static inline void iounmap(void __iomem *addr)
{
}
#endif
-#endif /* CONFIG_MMU */
+#elif defined(CONFIG_GENERIC_IOREMAP)
+#include <asm/pgtable.h>
+
+void __iomem *ioremap_prot(phys_addr_t addr, size_t size, unsigned long prot);
+void iounmap(volatile void __iomem *addr);
+
+static inline void __iomem *ioremap(phys_addr_t addr, size_t size)
+{
+ /* _PAGE_IOREMAP needs to be supplied by the architecture */
+ return ioremap_prot(addr, size, _PAGE_IOREMAP);
+}
+#endif /* !CONFIG_MMU || CONFIG_GENERIC_IOREMAP */
#ifndef ioremap_nocache
#define ioremap_nocache ioremap
diff --git a/lib/Kconfig b/lib/Kconfig
index 3321d04dfa5a..cb571767d080 100644
--- a/lib/Kconfig
+++ b/lib/Kconfig
@@ -637,6 +637,9 @@ config STRING_SELFTEST
endmenu
+config GENERIC_IOREMAP
+ bool
+
config GENERIC_LIB_ASHLDI3
bool
diff --git a/lib/ioremap.c b/lib/ioremap.c
index 0a2ffadc6d71..3f0e18543de8 100644
--- a/lib/ioremap.c
+++ b/lib/ioremap.c
@@ -231,3 +231,42 @@ int ioremap_page_range(unsigned long addr,
return err;
}
+
+#ifdef CONFIG_GENERIC_IOREMAP
+void __iomem *ioremap_prot(phys_addr_t addr, size_t size, unsigned long prot)
+{
+ unsigned long offset, vaddr;
+ phys_addr_t last_addr;
+ struct vm_struct *area;
+
+ /* Disallow wrap-around or zero size */
+ last_addr = addr + size - 1;
+ if (!size || last_addr < addr)
+ return NULL;
+
+ /* Page-align mappings */
+ offset = addr & (~PAGE_MASK);
+ addr -= offset;
+ size = PAGE_ALIGN(size + offset);
+
+ area = get_vm_area_caller(size, VM_IOREMAP,
+ __builtin_return_address(0));
+ if (!area)
+ return NULL;
+ vaddr = (unsigned long)area->addr;
+
+ if (ioremap_page_range(vaddr, vaddr + size, addr, __pgprot(prot))) {
+ free_vm_area(area);
+ return NULL;
+ }
+
+ return (void __iomem *)(vaddr + offset);
+}
+EXPORT_SYMBOL(ioremap_prot);
+
+void iounmap(volatile void __iomem *addr)
+{
+ vunmap((void *)((unsigned long)addr & PAGE_MASK));
+}
+EXPORT_SYMBOL(iounmap);
+#endif /* CONFIG_GENERIC_IOREMAP */