diff options
author | Linus Torvalds <torvalds@linux-foundation.org> | 2021-02-21 12:19:56 -0800 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2021-02-21 12:19:56 -0800 |
commit | 7b15c27e2f7b6d114770c2922b2c49d2e8f3867c (patch) | |
tree | 96be1dfb322e23fca3de86b1e27244c74a6132fc /mm/hugetlb.c | |
parent | 9eef02334505411667a7b51a8f349f8c6c4f3b66 (diff) | |
parent | 8cf55f24ce6cf90eb8828421e15e9efcd508bd2c (diff) | |
download | linux-7b15c27e2f7b6d114770c2922b2c49d2e8f3867c.tar.bz2 |
Merge tag 'core-mm-2021-02-17' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip
Pull tlb gather updates from Ingo Molnar:
"Theses fix MM (soft-)dirty bit management in the procfs code & clean
up the TLB gather API"
* tag 'core-mm-2021-02-17' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip:
x86/ldt: Use tlb_gather_mmu_fullmm() when freeing LDT page-tables
tlb: arch: Remove empty __tlb_remove_tlb_entry() stubs
tlb: mmu_gather: Remove start/end arguments from tlb_gather_mmu()
tlb: mmu_gather: Introduce tlb_gather_mmu_fullmm()
tlb: mmu_gather: Remove unused start/end arguments from tlb_finish_mmu()
mm: proc: Invalidate TLB after clearing soft-dirty page state
Diffstat (limited to 'mm/hugetlb.c')
-rw-r--r-- | mm/hugetlb.c | 18 |
1 files changed, 2 insertions, 16 deletions
diff --git a/mm/hugetlb.c b/mm/hugetlb.c index 4bdb58ab14cb..905a7d549b00 100644 --- a/mm/hugetlb.c +++ b/mm/hugetlb.c @@ -4008,25 +4008,11 @@ void __unmap_hugepage_range_final(struct mmu_gather *tlb, void unmap_hugepage_range(struct vm_area_struct *vma, unsigned long start, unsigned long end, struct page *ref_page) { - struct mm_struct *mm; struct mmu_gather tlb; - unsigned long tlb_start = start; - unsigned long tlb_end = end; - /* - * If shared PMDs were possibly used within this vma range, adjust - * start/end for worst case tlb flushing. - * Note that we can not be sure if PMDs are shared until we try to - * unmap pages. However, we want to make sure TLB flushing covers - * the largest possible range. - */ - adjust_range_if_pmd_sharing_possible(vma, &tlb_start, &tlb_end); - - mm = vma->vm_mm; - - tlb_gather_mmu(&tlb, mm, tlb_start, tlb_end); + tlb_gather_mmu(&tlb, vma->vm_mm); __unmap_hugepage_range(&tlb, vma, start, end, ref_page); - tlb_finish_mmu(&tlb, tlb_start, tlb_end); + tlb_finish_mmu(&tlb); } /* |