diff options
-rw-r--r-- | mm/huge_memory.c | 22 |
1 files changed, 5 insertions, 17 deletions
diff --git a/mm/huge_memory.c b/mm/huge_memory.c index ce883459e246..08f6c1993832 100644 --- a/mm/huge_memory.c +++ b/mm/huge_memory.c @@ -1410,7 +1410,6 @@ int do_huge_pmd_numa_page(struct vm_fault *vmf, pmd_t pmd) unsigned long haddr = vmf->address & HPAGE_PMD_MASK; int page_nid = -1, this_nid = numa_node_id(); int target_nid, last_cpupid = -1; - bool need_flush = false; bool page_locked; bool migrated = false; bool was_writable; @@ -1497,22 +1496,18 @@ int do_huge_pmd_numa_page(struct vm_fault *vmf, pmd_t pmd) } /* - * The page_table_lock above provides a memory barrier - * with change_protection_range. - */ - if (mm_tlb_flush_pending(vma->vm_mm)) - flush_tlb_range(vma, haddr, haddr + HPAGE_PMD_SIZE); - - /* * Since we took the NUMA fault, we must have observed the !accessible * bit. Make sure all other CPUs agree with that, to avoid them * modifying the page we're about to migrate. * * Must be done under PTL such that we'll observe the relevant - * set_tlb_flush_pending(). + * inc_tlb_flush_pending(). + * + * We are not sure a pending tlb flush here is for a huge page + * mapping or not. Hence use the tlb range variant */ if (mm_tlb_flush_pending(vma->vm_mm)) - need_flush = true; + flush_tlb_range(vma, haddr, haddr + HPAGE_PMD_SIZE); /* * Migrate the THP to the requested node, returns with page unlocked @@ -1520,13 +1515,6 @@ int do_huge_pmd_numa_page(struct vm_fault *vmf, pmd_t pmd) */ spin_unlock(vmf->ptl); - /* - * We are not sure a pending tlb flush here is for a huge page - * mapping or not. Hence use the tlb range variant - */ - if (need_flush) - flush_tlb_range(vma, haddr, haddr + HPAGE_PMD_SIZE); - migrated = migrate_misplaced_transhuge_page(vma->vm_mm, vma, vmf->pmd, pmd, vmf->address, page, target_nid); if (migrated) { |