summaryrefslogtreecommitdiffstats
path: root/mm/hugetlb.c
diff options
context:
space:
mode:
authorBaolin Wang <baolin.wang@linux.alibaba.com>2022-05-09 18:20:52 -0700
committerAndrew Morton <akpm@linux-foundation.org>2022-05-13 07:20:07 -0700
commit3d0b95cd87b26b0b10e0cda8ee6105c2194a5800 (patch)
tree86532f8f27b7791c9157a1359a8707b2da7f320d /mm/hugetlb.c
parent6366238b8dfc383723c211c1ffe8c8d7914107e5 (diff)
downloadlinux-3d0b95cd87b26b0b10e0cda8ee6105c2194a5800.tar.bz2
mm: hugetlb: considering PMD sharing when flushing cache/TLBs
This patchset fixes some cache flushing issues if PMD sharing is possible for hugetlb pages, which were found by code inspection. Meanwhile Mike found the flush_cache_page() can not cover the whole size of a hugetlb page on some architectures [1], so I added a new patch 3 to fix this issue, since I found only try_to_unmap_one() and try_to_migrate_one() need to fix after some investigation. [1] https://lore.kernel.org/linux-mm/064da3bb-5b4b-7332-a722-c5a541128705@oracle.com/ This patch (of 3): When moving hugetlb page tables, the cache flushing is called in move_page_tables() without considering the shared PMDs, which may be cause cache issues on some architectures. Thus we should move the hugetlb cache flushing into move_hugetlb_page_tables() with considering the shared PMDs ranges, calculated by adjust_range_if_pmd_sharing_possible(). Meanwhile also expanding the TLBs flushing range in case of shared PMDs. Note this is discovered via code inspection, and did not meet a real problem in practice so far. Link: https://lkml.kernel.org/r/cover.1651056365.git.baolin.wang@linux.alibaba.com Link: https://lkml.kernel.org/r/0443c8cf20db554d3ff4b439b30e0ff26c0181dd.1651056365.git.baolin.wang@linux.alibaba.com Fixes: 550a7d60bd5e ("mm, hugepages: add mremap() support for hugepage backed vma") Signed-off-by: Baolin Wang <baolin.wang@linux.alibaba.com> Reviewed-by: Mike Kravetz <mike.kravetz@oracle.com> Reviewed-by: Muchun Song <songmuchun@bytedance.com> Cc: Mina Almasry <almasrymina@google.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Diffstat (limited to 'mm/hugetlb.c')
-rw-r--r--mm/hugetlb.c17
1 files changed, 15 insertions, 2 deletions
diff --git a/mm/hugetlb.c b/mm/hugetlb.c
index 4454c42a82f0..fc31a8f49067 100644
--- a/mm/hugetlb.c
+++ b/mm/hugetlb.c
@@ -4922,10 +4922,17 @@ int move_hugetlb_page_tables(struct vm_area_struct *vma,
unsigned long old_addr_copy;
pte_t *src_pte, *dst_pte;
struct mmu_notifier_range range;
+ bool shared_pmd = false;
mmu_notifier_range_init(&range, MMU_NOTIFY_CLEAR, 0, vma, mm, old_addr,
old_end);
adjust_range_if_pmd_sharing_possible(vma, &range.start, &range.end);
+ /*
+ * In case of shared PMDs, we should cover the maximum possible
+ * range.
+ */
+ flush_cache_range(vma, range.start, range.end);
+
mmu_notifier_invalidate_range_start(&range);
/* Prevent race with file truncation */
i_mmap_lock_write(mapping);
@@ -4942,8 +4949,10 @@ int move_hugetlb_page_tables(struct vm_area_struct *vma,
*/
old_addr_copy = old_addr;
- if (huge_pmd_unshare(mm, vma, &old_addr_copy, src_pte))
+ if (huge_pmd_unshare(mm, vma, &old_addr_copy, src_pte)) {
+ shared_pmd = true;
continue;
+ }
dst_pte = huge_pte_alloc(mm, new_vma, new_addr, sz);
if (!dst_pte)
@@ -4951,7 +4960,11 @@ int move_hugetlb_page_tables(struct vm_area_struct *vma,
move_huge_pte(vma, old_addr, new_addr, src_pte, dst_pte);
}
- flush_tlb_range(vma, old_end - len, old_end);
+
+ if (shared_pmd)
+ flush_tlb_range(vma, range.start, range.end);
+ else
+ flush_tlb_range(vma, old_end - len, old_end);
mmu_notifier_invalidate_range_end(&range);
i_mmap_unlock_write(mapping);