diff options
author | Rafael J. Wysocki <rafael.j.wysocki@intel.com> | 2021-12-27 16:35:44 +0100 |
---|---|---|
committer | Rafael J. Wysocki <rafael.j.wysocki@intel.com> | 2021-12-27 16:35:44 +0100 |
commit | 3f0bb496ee41d0eb99d308768c9f8593cbd3eb9f (patch) | |
tree | c56b697a7e818e91974d951872781bfd9b3af5b5 /mm/hugetlb.c | |
parent | d7fbdc575b33c374ce88ccfe3ab364f7eb240f8a (diff) | |
parent | 9c33eef84e3110963d6f41d87d63bc40d716eb1f (diff) | |
download | linux-3f0bb496ee41d0eb99d308768c9f8593cbd3eb9f.tar.bz2 |
Merge branches 'thermal-tools' and 'thermal-int340x'
Merge tmon fix and int340x driver improvement for 5.17-rc1.
* thermal-tools:
thermal: tools: tmon: remove unneeded local variable
* thermal-int340x:
thermal: int340x: Use struct_group() for memcpy() region
Diffstat (limited to 'mm/hugetlb.c')
-rw-r--r-- | mm/hugetlb.c | 25 |
1 files changed, 20 insertions, 5 deletions
diff --git a/mm/hugetlb.c b/mm/hugetlb.c index f025d234522f..abcd1785c629 100644 --- a/mm/hugetlb.c +++ b/mm/hugetlb.c @@ -4919,9 +4919,9 @@ int move_hugetlb_page_tables(struct vm_area_struct *vma, move_huge_pte(vma, old_addr, new_addr, src_pte); } - i_mmap_unlock_write(mapping); flush_tlb_range(vma, old_end - len, old_end); mmu_notifier_invalidate_range_end(&range); + i_mmap_unlock_write(mapping); return len + old_addr - old_end; } @@ -4939,6 +4939,7 @@ static void __unmap_hugepage_range(struct mmu_gather *tlb, struct vm_area_struct struct hstate *h = hstate_vma(vma); unsigned long sz = huge_page_size(h); struct mmu_notifier_range range; + bool force_flush = false; WARN_ON(!is_vm_hugetlb_page(vma)); BUG_ON(start & ~huge_page_mask(h)); @@ -4967,10 +4968,8 @@ static void __unmap_hugepage_range(struct mmu_gather *tlb, struct vm_area_struct ptl = huge_pte_lock(h, mm, ptep); if (huge_pmd_unshare(mm, vma, &address, ptep)) { spin_unlock(ptl); - /* - * We just unmapped a page of PMDs by clearing a PUD. - * The caller's TLB flush range should cover this area. - */ + tlb_flush_pmd_range(tlb, address & PUD_MASK, PUD_SIZE); + force_flush = true; continue; } @@ -5027,6 +5026,22 @@ static void __unmap_hugepage_range(struct mmu_gather *tlb, struct vm_area_struct } mmu_notifier_invalidate_range_end(&range); tlb_end_vma(tlb, vma); + + /* + * If we unshared PMDs, the TLB flush was not recorded in mmu_gather. We + * could defer the flush until now, since by holding i_mmap_rwsem we + * guaranteed that the last refernece would not be dropped. But we must + * do the flushing before we return, as otherwise i_mmap_rwsem will be + * dropped and the last reference to the shared PMDs page might be + * dropped as well. + * + * In theory we could defer the freeing of the PMD pages as well, but + * huge_pmd_unshare() relies on the exact page_count for the PMD page to + * detect sharing, so we cannot defer the release of the page either. + * Instead, do flush now. + */ + if (force_flush) + tlb_flush_mmu_tlbonly(tlb); } void __unmap_hugepage_range_final(struct mmu_gather *tlb, |