summaryrefslogtreecommitdiffstats
path: root/arch/csky
diff options
context:
space:
mode:
authorThomas Gleixner <tglx@linutronix.de>2020-11-03 10:27:23 +0100
committerThomas Gleixner <tglx@linutronix.de>2020-11-06 23:14:56 +0100
commit5af627a043e39d3226eecd75753dcd2c920c16ec (patch)
treee6db78c3d4a3154f6aaae6d62ccf3b850de1b51a /arch/csky
parent2a15ba82fa6ca3f35502b3060f22118a938d2889 (diff)
downloadlinux-5af627a043e39d3226eecd75753dcd2c920c16ec.tar.bz2
csky/mm/highmem: Switch to generic kmap atomic
No reason having the same code in every architecture. Signed-off-by: Thomas Gleixner <tglx@linutronix.de> Cc: Guo Ren <guoren@kernel.org> Cc: Arnd Bergmann <arnd@arndb.de> Link: https://lore.kernel.org/r/20201103095857.681196473@linutronix.de
Diffstat (limited to 'arch/csky')
-rw-r--r--arch/csky/Kconfig1
-rw-r--r--arch/csky/include/asm/fixmap.h4
-rw-r--r--arch/csky/include/asm/highmem.h6
-rw-r--r--arch/csky/mm/highmem.c75
4 files changed, 8 insertions, 78 deletions
diff --git a/arch/csky/Kconfig b/arch/csky/Kconfig
index 268fad5f51cf..7a86481a22ff 100644
--- a/arch/csky/Kconfig
+++ b/arch/csky/Kconfig
@@ -286,6 +286,7 @@ config NR_CPUS
config HIGHMEM
bool "High Memory Support"
depends on !CPU_CK610
+ select KMAP_LOCAL
default y
config FORCE_MAX_ZONEORDER
diff --git a/arch/csky/include/asm/fixmap.h b/arch/csky/include/asm/fixmap.h
index 81f9477d5330..4b589cc20900 100644
--- a/arch/csky/include/asm/fixmap.h
+++ b/arch/csky/include/asm/fixmap.h
@@ -8,7 +8,7 @@
#include <asm/memory.h>
#ifdef CONFIG_HIGHMEM
#include <linux/threads.h>
-#include <asm/kmap_types.h>
+#include <asm/kmap_size.h>
#endif
enum fixed_addresses {
@@ -17,7 +17,7 @@ enum fixed_addresses {
#endif
#ifdef CONFIG_HIGHMEM
FIX_KMAP_BEGIN,
- FIX_KMAP_END = FIX_KMAP_BEGIN + (KM_TYPE_NR * NR_CPUS) - 1,
+ FIX_KMAP_END = FIX_KMAP_BEGIN + (KM_MAX_IDX * NR_CPUS) - 1,
#endif
__end_of_fixed_addresses
};
diff --git a/arch/csky/include/asm/highmem.h b/arch/csky/include/asm/highmem.h
index 14645e3d5cd5..1f4ed3f4c0d9 100644
--- a/arch/csky/include/asm/highmem.h
+++ b/arch/csky/include/asm/highmem.h
@@ -9,7 +9,7 @@
#include <linux/init.h>
#include <linux/interrupt.h>
#include <linux/uaccess.h>
-#include <asm/kmap_types.h>
+#include <asm/kmap_size.h>
#include <asm/cache.h>
/* undef for production */
@@ -32,10 +32,12 @@ extern pte_t *pkmap_page_table;
#define ARCH_HAS_KMAP_FLUSH_TLB
extern void kmap_flush_tlb(unsigned long addr);
-extern void *kmap_atomic_pfn(unsigned long pfn);
#define flush_cache_kmaps() do {} while (0)
+#define arch_kmap_local_post_map(vaddr, pteval) kmap_flush_tlb(vaddr)
+#define arch_kmap_local_post_unmap(vaddr) kmap_flush_tlb(vaddr)
+
extern void kmap_init(void);
#endif /* __KERNEL__ */
diff --git a/arch/csky/mm/highmem.c b/arch/csky/mm/highmem.c
index 89c10800a002..4161df3c6c15 100644
--- a/arch/csky/mm/highmem.c
+++ b/arch/csky/mm/highmem.c
@@ -9,8 +9,6 @@
#include <asm/tlbflush.h>
#include <asm/cacheflush.h>
-static pte_t *kmap_pte;
-
unsigned long highstart_pfn, highend_pfn;
void kmap_flush_tlb(unsigned long addr)
@@ -19,67 +17,7 @@ void kmap_flush_tlb(unsigned long addr)
}
EXPORT_SYMBOL(kmap_flush_tlb);
-void *kmap_atomic_high_prot(struct page *page, pgprot_t prot)
-{
- unsigned long vaddr;
- int idx, type;
-
- type = kmap_atomic_idx_push();
- idx = type + KM_TYPE_NR*smp_processor_id();
- vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx);
-#ifdef CONFIG_DEBUG_HIGHMEM
- BUG_ON(!pte_none(*(kmap_pte - idx)));
-#endif
- set_pte(kmap_pte-idx, mk_pte(page, prot));
- flush_tlb_one((unsigned long)vaddr);
-
- return (void *)vaddr;
-}
-EXPORT_SYMBOL(kmap_atomic_high_prot);
-
-void kunmap_atomic_high(void *kvaddr)
-{
- unsigned long vaddr = (unsigned long) kvaddr & PAGE_MASK;
- int idx;
-
- if (vaddr < FIXADDR_START)
- return;
-
-#ifdef CONFIG_DEBUG_HIGHMEM
- idx = KM_TYPE_NR*smp_processor_id() + kmap_atomic_idx();
-
- BUG_ON(vaddr != __fix_to_virt(FIX_KMAP_BEGIN + idx));
-
- pte_clear(&init_mm, vaddr, kmap_pte - idx);
- flush_tlb_one(vaddr);
-#else
- (void) idx; /* to kill a warning */
-#endif
- kmap_atomic_idx_pop();
-}
-EXPORT_SYMBOL(kunmap_atomic_high);
-
-/*
- * This is the same as kmap_atomic() but can map memory that doesn't
- * have a struct page associated with it.
- */
-void *kmap_atomic_pfn(unsigned long pfn)
-{
- unsigned long vaddr;
- int idx, type;
-
- pagefault_disable();
-
- type = kmap_atomic_idx_push();
- idx = type + KM_TYPE_NR*smp_processor_id();
- vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx);
- set_pte(kmap_pte-idx, pfn_pte(pfn, PAGE_KERNEL));
- flush_tlb_one(vaddr);
-
- return (void *) vaddr;
-}
-
-static void __init kmap_pages_init(void)
+void __init kmap_init(void)
{
unsigned long vaddr;
pgd_t *pgd;
@@ -96,14 +34,3 @@ static void __init kmap_pages_init(void)
pte = pte_offset_kernel(pmd, vaddr);
pkmap_page_table = pte;
}
-
-void __init kmap_init(void)
-{
- unsigned long vaddr;
-
- kmap_pages_init();
-
- vaddr = __fix_to_virt(FIX_KMAP_BEGIN);
-
- kmap_pte = pte_offset_kernel((pmd_t *)pgd_offset_k(vaddr), vaddr);
-}