summaryrefslogtreecommitdiffstats
path: root/arch/x86/mm
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2020-12-14 18:35:53 -0800
committerLinus Torvalds <torvalds@linux-foundation.org>2020-12-14 18:35:53 -0800
commitedd7ab76847442e299af64a761febd180d71f98d (patch)
tree91c3ee6ca27074c655bc1aaf04358f4e41df54d4 /arch/x86/mm
parentadb35e8dc98ba9bda99ff79ac6a05b8fcde2a762 (diff)
parent68061c02bb295da4955f0d309b9459f0a7ba83dd (diff)
downloadlinux-edd7ab76847442e299af64a761febd180d71f98d.tar.bz2
Merge tag 'core-mm-2020-12-14' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip
Pull kmap updates from Thomas Gleixner: "The new preemtible kmap_local() implementation: - Consolidate all kmap_atomic() internals into a generic implementation which builds the base for the kmap_local() API and make the kmap_atomic() interface wrappers which handle the disabling/enabling of preemption and pagefaults. - Switch the storage from per-CPU to per task and provide scheduler support for clearing mapping when scheduling out and restoring them when scheduling back in. - Merge the migrate_disable/enable() code, which is also part of the scheduler pull request. This was required to make the kmap_local() interface available which does not disable preemption when a mapping is established. It has to disable migration instead to guarantee that the virtual address of the mapped slot is the same across preemption. - Provide better debug facilities: guard pages and enforced utilization of the mapping mechanics on 64bit systems when the architecture allows it. - Provide the new kmap_local() API which can now be used to cleanup the kmap_atomic() usage sites all over the place. Most of the usage sites do not require the implicit disabling of preemption and pagefaults so the penalty on 64bit and 32bit non-highmem systems is removed and quite some of the code can be simplified. A wholesale conversion is not possible because some usage depends on the implicit side effects and some need to be cleaned up because they work around these side effects. The migrate disable side effect is only effective on highmem systems and when enforced debugging is enabled. On 64bit and 32bit non-highmem systems the overhead is completely avoided" * tag 'core-mm-2020-12-14' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip: (33 commits) ARM: highmem: Fix cache_is_vivt() reference x86/crashdump/32: Simplify copy_oldmem_page() io-mapping: Provide iomap_local variant mm/highmem: Provide kmap_local* sched: highmem: Store local kmaps in task struct x86: Support kmap_local() forced debugging mm/highmem: Provide CONFIG_DEBUG_KMAP_LOCAL_FORCE_MAP mm/highmem: Provide and use CONFIG_DEBUG_KMAP_LOCAL microblaze/mm/highmem: Add dropped #ifdef back xtensa/mm/highmem: Make generic kmap_atomic() work correctly mm/highmem: Take kmap_high_get() properly into account highmem: High implementation details and document API Documentation/io-mapping: Remove outdated blurb io-mapping: Cleanup atomic iomap mm/highmem: Remove the old kmap_atomic cruft highmem: Get rid of kmap_types.h xtensa/mm/highmem: Switch to generic kmap atomic sparc/mm/highmem: Switch to generic kmap atomic powerpc/mm/highmem: Switch to generic kmap atomic nds32/mm/highmem: Switch to generic kmap atomic ...
Diffstat (limited to 'arch/x86/mm')
-rw-r--r--arch/x86/mm/highmem_32.c59
-rw-r--r--arch/x86/mm/init_32.c15
-rw-r--r--arch/x86/mm/iomap_32.c57
3 files changed, 3 insertions, 128 deletions
diff --git a/arch/x86/mm/highmem_32.c b/arch/x86/mm/highmem_32.c
index 075fe51317b0..2c54b76d8f84 100644
--- a/arch/x86/mm/highmem_32.c
+++ b/arch/x86/mm/highmem_32.c
@@ -4,65 +4,6 @@
#include <linux/swap.h> /* for totalram_pages */
#include <linux/memblock.h>
-void *kmap_atomic_high_prot(struct page *page, pgprot_t prot)
-{
- unsigned long vaddr;
- int idx, type;
-
- type = kmap_atomic_idx_push();
- idx = type + KM_TYPE_NR*smp_processor_id();
- vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx);
- BUG_ON(!pte_none(*(kmap_pte-idx)));
- set_pte(kmap_pte-idx, mk_pte(page, prot));
- arch_flush_lazy_mmu_mode();
-
- return (void *)vaddr;
-}
-EXPORT_SYMBOL(kmap_atomic_high_prot);
-
-/*
- * This is the same as kmap_atomic() but can map memory that doesn't
- * have a struct page associated with it.
- */
-void *kmap_atomic_pfn(unsigned long pfn)
-{
- return kmap_atomic_prot_pfn(pfn, kmap_prot);
-}
-EXPORT_SYMBOL_GPL(kmap_atomic_pfn);
-
-void kunmap_atomic_high(void *kvaddr)
-{
- unsigned long vaddr = (unsigned long) kvaddr & PAGE_MASK;
-
- if (vaddr >= __fix_to_virt(FIX_KMAP_END) &&
- vaddr <= __fix_to_virt(FIX_KMAP_BEGIN)) {
- int idx, type;
-
- type = kmap_atomic_idx();
- idx = type + KM_TYPE_NR * smp_processor_id();
-
-#ifdef CONFIG_DEBUG_HIGHMEM
- WARN_ON_ONCE(vaddr != __fix_to_virt(FIX_KMAP_BEGIN + idx));
-#endif
- /*
- * Force other mappings to Oops if they'll try to access this
- * pte without first remap it. Keeping stale mappings around
- * is a bad idea also, in case the page changes cacheability
- * attributes or becomes a protected page in a hypervisor.
- */
- kpte_clear_flush(kmap_pte-idx, vaddr);
- kmap_atomic_idx_pop();
- arch_flush_lazy_mmu_mode();
- }
-#ifdef CONFIG_DEBUG_HIGHMEM
- else {
- BUG_ON(vaddr < PAGE_OFFSET);
- BUG_ON(vaddr >= (unsigned long)high_memory);
- }
-#endif
-}
-EXPORT_SYMBOL(kunmap_atomic_high);
-
void __init set_highmem_pages_init(void)
{
struct zone *zone;
diff --git a/arch/x86/mm/init_32.c b/arch/x86/mm/init_32.c
index 7c055259de3a..da31c2635ee4 100644
--- a/arch/x86/mm/init_32.c
+++ b/arch/x86/mm/init_32.c
@@ -394,19 +394,6 @@ repeat:
return last_map_addr;
}
-pte_t *kmap_pte;
-
-static void __init kmap_init(void)
-{
- unsigned long kmap_vstart;
-
- /*
- * Cache the first kmap pte:
- */
- kmap_vstart = __fix_to_virt(FIX_KMAP_BEGIN);
- kmap_pte = virt_to_kpte(kmap_vstart);
-}
-
#ifdef CONFIG_HIGHMEM
static void __init permanent_kmaps_init(pgd_t *pgd_base)
{
@@ -712,8 +699,6 @@ void __init paging_init(void)
__flush_tlb_all();
- kmap_init();
-
/*
* NOTE: at this point the bootmem allocator is fully available.
*/
diff --git a/arch/x86/mm/iomap_32.c b/arch/x86/mm/iomap_32.c
index f60398aeb644..9aaa756ddf21 100644
--- a/arch/x86/mm/iomap_32.c
+++ b/arch/x86/mm/iomap_32.c
@@ -44,28 +44,7 @@ void iomap_free(resource_size_t base, unsigned long size)
}
EXPORT_SYMBOL_GPL(iomap_free);
-void *kmap_atomic_prot_pfn(unsigned long pfn, pgprot_t prot)
-{
- unsigned long vaddr;
- int idx, type;
-
- preempt_disable();
- pagefault_disable();
-
- type = kmap_atomic_idx_push();
- idx = type + KM_TYPE_NR * smp_processor_id();
- vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx);
- set_pte(kmap_pte - idx, pfn_pte(pfn, prot));
- arch_flush_lazy_mmu_mode();
-
- return (void *)vaddr;
-}
-
-/*
- * Map 'pfn' using protections 'prot'
- */
-void __iomem *
-iomap_atomic_prot_pfn(unsigned long pfn, pgprot_t prot)
+void __iomem *__iomap_local_pfn_prot(unsigned long pfn, pgprot_t prot)
{
/*
* For non-PAT systems, translate non-WB request to UC- just in
@@ -81,36 +60,6 @@ iomap_atomic_prot_pfn(unsigned long pfn, pgprot_t prot)
/* Filter out unsupported __PAGE_KERNEL* bits: */
pgprot_val(prot) &= __default_kernel_pte_mask;
- return (void __force __iomem *) kmap_atomic_prot_pfn(pfn, prot);
-}
-EXPORT_SYMBOL_GPL(iomap_atomic_prot_pfn);
-
-void
-iounmap_atomic(void __iomem *kvaddr)
-{
- unsigned long vaddr = (unsigned long) kvaddr & PAGE_MASK;
-
- if (vaddr >= __fix_to_virt(FIX_KMAP_END) &&
- vaddr <= __fix_to_virt(FIX_KMAP_BEGIN)) {
- int idx, type;
-
- type = kmap_atomic_idx();
- idx = type + KM_TYPE_NR * smp_processor_id();
-
-#ifdef CONFIG_DEBUG_HIGHMEM
- WARN_ON_ONCE(vaddr != __fix_to_virt(FIX_KMAP_BEGIN + idx));
-#endif
- /*
- * Force other mappings to Oops if they'll try to access this
- * pte without first remap it. Keeping stale mappings around
- * is a bad idea also, in case the page changes cacheability
- * attributes or becomes a protected page in a hypervisor.
- */
- kpte_clear_flush(kmap_pte-idx, vaddr);
- kmap_atomic_idx_pop();
- }
-
- pagefault_enable();
- preempt_enable();
+ return (void __force __iomem *)__kmap_local_pfn_prot(pfn, prot);
}
-EXPORT_SYMBOL_GPL(iounmap_atomic);
+EXPORT_SYMBOL_GPL(__iomap_local_pfn_prot);