diff options
author | Matthew Wilcox (Oracle) <willy@infradead.org> | 2022-09-02 20:46:53 +0100 |
---|---|---|
committer | Andrew Morton <akpm@linux-foundation.org> | 2022-10-03 14:02:55 -0700 |
commit | 19672a9e4a75252871cba319f4e3b859b8fdf671 (patch) | |
tree | c3af45abf1628a433cb15f8340ace6c79bfeae4f /mm | |
parent | 82e66bf76173a1525db9866455a7fdbc07b57297 (diff) | |
download | linux-19672a9e4a75252871cba319f4e3b859b8fdf671.tar.bz2 |
mm: convert lock_page_or_retry() to folio_lock_or_retry()
Remove a call to compound_head() in each of the two callers.
Link: https://lkml.kernel.org/r/20220902194653.1739778-58-willy@infradead.org
Signed-off-by: Matthew Wilcox (Oracle) <willy@infradead.org>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Diffstat (limited to 'mm')
-rw-r--r-- | mm/memory.c | 10 |
1 files changed, 5 insertions, 5 deletions
diff --git a/mm/memory.c b/mm/memory.c index 6e568f190e7a..d671ad367d67 100644 --- a/mm/memory.c +++ b/mm/memory.c @@ -3618,11 +3618,11 @@ EXPORT_SYMBOL(unmap_mapping_range); */ static vm_fault_t remove_device_exclusive_entry(struct vm_fault *vmf) { - struct page *page = vmf->page; + struct folio *folio = page_folio(vmf->page); struct vm_area_struct *vma = vmf->vma; struct mmu_notifier_range range; - if (!lock_page_or_retry(page, vma->vm_mm, vmf->flags)) + if (!folio_lock_or_retry(folio, vma->vm_mm, vmf->flags)) return VM_FAULT_RETRY; mmu_notifier_range_init_owner(&range, MMU_NOTIFY_EXCLUSIVE, 0, vma, vma->vm_mm, vmf->address & PAGE_MASK, @@ -3632,10 +3632,10 @@ static vm_fault_t remove_device_exclusive_entry(struct vm_fault *vmf) vmf->pte = pte_offset_map_lock(vma->vm_mm, vmf->pmd, vmf->address, &vmf->ptl); if (likely(pte_same(*vmf->pte, vmf->orig_pte))) - restore_exclusive_pte(vma, page, vmf->address, vmf->pte); + restore_exclusive_pte(vma, vmf->page, vmf->address, vmf->pte); pte_unmap_unlock(vmf->pte, vmf->ptl); - unlock_page(page); + folio_unlock(folio); mmu_notifier_invalidate_range_end(&range); return 0; @@ -3835,7 +3835,7 @@ vm_fault_t do_swap_page(struct vm_fault *vmf) goto out_release; } - locked = lock_page_or_retry(page, vma->vm_mm, vmf->flags); + locked = folio_lock_or_retry(folio, vma->vm_mm, vmf->flags); if (!locked) { ret |= VM_FAULT_RETRY; |