summaryrefslogtreecommitdiffstats
path: root/mm
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2022-12-17 14:06:53 -0600
committerLinus Torvalds <torvalds@linux-foundation.org>2022-12-17 14:06:53 -0600
commit4f292c4de4f6fb83776c0ff22674121eb6ddfa2f (patch)
tree7625005ed153dbc8341867bfc0076aae5adf93f9 /mm
parent03d84bd6d43269df2dc63b2945dfed6610fac526 (diff)
parent3e844d842d49cdbe61a4b338bdd512654179488a (diff)
downloadlinux-4f292c4de4f6fb83776c0ff22674121eb6ddfa2f.tar.bz2
Merge tag 'x86_mm_for_6.2_v2' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip
Pull x86 mm updates from Dave Hansen: "New Feature: - Randomize the per-cpu entry areas Cleanups: - Have CR3_ADDR_MASK use PHYSICAL_PAGE_MASK instead of open coding it - Move to "native" set_memory_rox() helper - Clean up pmd_get_atomic() and i386-PAE - Remove some unused page table size macros" * tag 'x86_mm_for_6.2_v2' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip: (35 commits) x86/mm: Ensure forced page table splitting x86/kasan: Populate shadow for shared chunk of the CPU entry area x86/kasan: Add helpers to align shadow addresses up and down x86/kasan: Rename local CPU_ENTRY_AREA variables to shorten names x86/mm: Populate KASAN shadow for entire per-CPU range of CPU entry area x86/mm: Recompute physical address for every page of per-CPU CEA mapping x86/mm: Rename __change_page_attr_set_clr(.checkalias) x86/mm: Inhibit _PAGE_NX changes from cpa_process_alias() x86/mm: Untangle __change_page_attr_set_clr(.checkalias) x86/mm: Add a few comments x86/mm: Fix CR3_ADDR_MASK x86/mm: Remove P*D_PAGE_MASK and P*D_PAGE_SIZE macros mm: Convert __HAVE_ARCH_P..P_GET to the new style mm: Remove pointless barrier() after pmdp_get_lockless() x86/mm/pae: Get rid of set_64bit() x86_64: Remove pointless set_64bit() usage x86/mm/pae: Be consistent with pXXp_get_and_clear() x86/mm/pae: Use WRITE_ONCE() x86/mm/pae: Don't (ab)use atomic64 mm/gup: Fix the lockless PMD access ...
Diffstat (limited to 'mm')
-rw-r--r--mm/Kconfig2
-rw-r--r--mm/gup.c2
-rw-r--r--mm/hmm.c3
-rw-r--r--mm/khugepaged.c2
-rw-r--r--mm/mapping_dirty_helpers.c2
-rw-r--r--mm/mprotect.c2
-rw-r--r--mm/userfaultfd.c2
-rw-r--r--mm/vmscan.c5
8 files changed, 8 insertions, 12 deletions
diff --git a/mm/Kconfig b/mm/Kconfig
index 3425708f274c..ff7b209dec05 100644
--- a/mm/Kconfig
+++ b/mm/Kconfig
@@ -1078,7 +1078,7 @@ config GUP_TEST
comment "GUP_TEST needs to have DEBUG_FS enabled"
depends on !GUP_TEST && !DEBUG_FS
-config GUP_GET_PTE_LOW_HIGH
+config GUP_GET_PXX_LOW_HIGH
bool
config ARCH_HAS_PTE_SPECIAL
diff --git a/mm/gup.c b/mm/gup.c
index 2b45d7817a90..f45a3a5be53a 100644
--- a/mm/gup.c
+++ b/mm/gup.c
@@ -2721,7 +2721,7 @@ static int gup_pmd_range(pud_t *pudp, pud_t pud, unsigned long addr, unsigned lo
pmdp = pmd_offset_lockless(pudp, pud, addr);
do {
- pmd_t pmd = READ_ONCE(*pmdp);
+ pmd_t pmd = pmdp_get_lockless(pmdp);
next = pmd_addr_end(addr, end);
if (!pmd_present(pmd))
diff --git a/mm/hmm.c b/mm/hmm.c
index 3850fb625dda..601a99ce3c84 100644
--- a/mm/hmm.c
+++ b/mm/hmm.c
@@ -361,8 +361,7 @@ again:
* huge or device mapping one and compute corresponding pfn
* values.
*/
- pmd = pmd_read_atomic(pmdp);
- barrier();
+ pmd = pmdp_get_lockless(pmdp);
if (!pmd_devmap(pmd) && !pmd_trans_huge(pmd))
goto again;
diff --git a/mm/khugepaged.c b/mm/khugepaged.c
index 5a7d2d5093f9..5cb401aa2b9d 100644
--- a/mm/khugepaged.c
+++ b/mm/khugepaged.c
@@ -857,7 +857,7 @@ static int find_pmd_or_thp_or_none(struct mm_struct *mm,
if (!*pmd)
return SCAN_PMD_NULL;
- pmde = pmd_read_atomic(*pmd);
+ pmde = pmdp_get_lockless(*pmd);
#ifdef CONFIG_TRANSPARENT_HUGEPAGE
/* See comments in pmd_none_or_trans_huge_or_clear_bad() */
diff --git a/mm/mapping_dirty_helpers.c b/mm/mapping_dirty_helpers.c
index 1b0ab8fcfd8b..175e424b9ab1 100644
--- a/mm/mapping_dirty_helpers.c
+++ b/mm/mapping_dirty_helpers.c
@@ -126,7 +126,7 @@ static int clean_record_pte(pte_t *pte, unsigned long addr,
static int wp_clean_pmd_entry(pmd_t *pmd, unsigned long addr, unsigned long end,
struct mm_walk *walk)
{
- pmd_t pmdval = pmd_read_atomic(pmd);
+ pmd_t pmdval = pmdp_get_lockless(pmd);
if (!pmd_trans_unstable(&pmdval))
return 0;
diff --git a/mm/mprotect.c b/mm/mprotect.c
index 093cb50f2fc4..908df12caa26 100644
--- a/mm/mprotect.c
+++ b/mm/mprotect.c
@@ -297,7 +297,7 @@ static unsigned long change_pte_range(struct mmu_gather *tlb,
*/
static inline int pmd_none_or_clear_bad_unless_trans_huge(pmd_t *pmd)
{
- pmd_t pmdval = pmd_read_atomic(pmd);
+ pmd_t pmdval = pmdp_get_lockless(pmd);
/* See pmd_none_or_trans_huge_or_clear_bad for info on barrier */
#ifdef CONFIG_TRANSPARENT_HUGEPAGE
diff --git a/mm/userfaultfd.c b/mm/userfaultfd.c
index b7a9479bece2..0499907b6f1a 100644
--- a/mm/userfaultfd.c
+++ b/mm/userfaultfd.c
@@ -632,7 +632,7 @@ retry:
break;
}
- dst_pmdval = pmd_read_atomic(dst_pmd);
+ dst_pmdval = pmdp_get_lockless(dst_pmd);
/*
* If the dst_pmd is mapped as THP don't
* override it and just be strict.
diff --git a/mm/vmscan.c b/mm/vmscan.c
index aba991c505f1..bd6637fcd8f9 100644
--- a/mm/vmscan.c
+++ b/mm/vmscan.c
@@ -4084,10 +4084,7 @@ restart:
/* walk_pte_range() may call get_next_vma() */
vma = args->vma;
for (i = pmd_index(start), addr = start; addr != end; i++, addr = next) {
- pmd_t val = pmd_read_atomic(pmd + i);
-
- /* for pmd_read_atomic() */
- barrier();
+ pmd_t val = pmdp_get_lockless(pmd + i);
next = pmd_addr_end(addr, end);