diff options
author | Kirill A. Shutemov <kirill.shutemov@linux.intel.com> | 2014-04-03 14:48:15 -0700 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2014-04-03 16:21:04 -0700 |
commit | fb09a46425823604bf337d2c9999756f9b753cf1 (patch) | |
tree | ec939244bce01290a294e95fc97ed5e6d7e661df /mm/memory.c | |
parent | f0c6d4d295e4ea9a47375304420baa38ca279542 (diff) | |
download | linux-fb09a46425823604bf337d2c9999756f9b753cf1.tar.bz2 |
mm: consolidate code to call vm_ops->page_mkwrite()
There are two functions which need to call vm_ops->page_mkwrite():
do_shared_fault() and do_wp_page(). We can consolidate preparation
code.
Signed-off-by: Kirill A. Shutemov <kirill.shutemov@linux.intel.com>
Cc: Mel Gorman <mgorman@suse.de>
Cc: Rik van Riel <riel@redhat.com>
Cc: Andi Kleen <ak@linux.intel.com>
Cc: Matthew Wilcox <matthew.r.wilcox@intel.com>
Cc: Dave Hansen <dave.hansen@linux.intel.com>
Cc: Naoya Horiguchi <n-horiguchi@ah.jp.nec.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'mm/memory.c')
-rw-r--r-- | mm/memory.c | 105 |
1 files changed, 45 insertions, 60 deletions
diff --git a/mm/memory.c b/mm/memory.c index d4320e42989d..ac3990d77ec9 100644 --- a/mm/memory.c +++ b/mm/memory.c @@ -2587,6 +2587,38 @@ static inline void cow_user_page(struct page *dst, struct page *src, unsigned lo } /* + * Notify the address space that the page is about to become writable so that + * it can prohibit this or wait for the page to get into an appropriate state. + * + * We do this without the lock held, so that it can sleep if it needs to. + */ +static int do_page_mkwrite(struct vm_area_struct *vma, struct page *page, + unsigned long address) +{ + struct vm_fault vmf; + int ret; + + vmf.virtual_address = (void __user *)(address & PAGE_MASK); + vmf.pgoff = page->index; + vmf.flags = FAULT_FLAG_WRITE|FAULT_FLAG_MKWRITE; + vmf.page = page; + + ret = vma->vm_ops->page_mkwrite(vma, &vmf); + if (unlikely(ret & (VM_FAULT_ERROR | VM_FAULT_NOPAGE))) + return ret; + if (unlikely(!(ret & VM_FAULT_LOCKED))) { + lock_page(page); + if (!page->mapping) { + unlock_page(page); + return 0; /* retry */ + } + ret |= VM_FAULT_LOCKED; + } else + VM_BUG_ON_PAGE(!PageLocked(page), page); + return ret; +} + +/* * This routine handles present pages, when users try to write * to a shared page. It is done by copying the page to a new address * and decrementing the shared-page counter for the old page. @@ -2668,42 +2700,15 @@ static int do_wp_page(struct mm_struct *mm, struct vm_area_struct *vma, * get_user_pages(.write=1, .force=1). */ if (vma->vm_ops && vma->vm_ops->page_mkwrite) { - struct vm_fault vmf; int tmp; - - vmf.virtual_address = (void __user *)(address & - PAGE_MASK); - vmf.pgoff = old_page->index; - vmf.flags = FAULT_FLAG_WRITE|FAULT_FLAG_MKWRITE; - vmf.page = old_page; - - /* - * Notify the address space that the page is about to - * become writable so that it can prohibit this or wait - * for the page to get into an appropriate state. - * - * We do this without the lock held, so that it can - * sleep if it needs to. - */ page_cache_get(old_page); pte_unmap_unlock(page_table, ptl); - - tmp = vma->vm_ops->page_mkwrite(vma, &vmf); - if (unlikely(tmp & - (VM_FAULT_ERROR | VM_FAULT_NOPAGE))) { - ret = tmp; - goto unwritable_page; + tmp = do_page_mkwrite(vma, old_page, address); + if (unlikely(!tmp || (tmp & + (VM_FAULT_ERROR | VM_FAULT_NOPAGE)))) { + page_cache_release(old_page); + return tmp; } - if (unlikely(!(tmp & VM_FAULT_LOCKED))) { - lock_page(old_page); - if (!old_page->mapping) { - ret = 0; /* retry the fault */ - unlock_page(old_page); - goto unwritable_page; - } - } else - VM_BUG_ON_PAGE(!PageLocked(old_page), old_page); - /* * Since we dropped the lock we need to revalidate * the PTE as someone else may have changed it. If @@ -2892,10 +2897,6 @@ oom: if (old_page) page_cache_release(old_page); return VM_FAULT_OOM; - -unwritable_page: - page_cache_release(old_page); - return ret; } static void unmap_mapping_range_vma(struct vm_area_struct *vma, @@ -3419,7 +3420,6 @@ static int do_shared_fault(struct mm_struct *mm, struct vm_area_struct *vma, spinlock_t *ptl; pte_t entry, *pte; int dirtied = 0; - struct vm_fault vmf; int ret, tmp; ret = __do_fault(vma, address, pgoff, flags, &fault_page); @@ -3430,31 +3430,16 @@ static int do_shared_fault(struct mm_struct *mm, struct vm_area_struct *vma, * Check if the backing address space wants to know that the page is * about to become writable */ - if (!vma->vm_ops->page_mkwrite) - goto set_pte; - - unlock_page(fault_page); - vmf.virtual_address = (void __user *)(address & PAGE_MASK); - vmf.pgoff = pgoff; - vmf.flags = FAULT_FLAG_WRITE|FAULT_FLAG_MKWRITE; - vmf.page = fault_page; - - tmp = vma->vm_ops->page_mkwrite(vma, &vmf); - if (unlikely(tmp & (VM_FAULT_ERROR | VM_FAULT_NOPAGE))) { - page_cache_release(fault_page); - return tmp; - } - - if (unlikely(!(tmp & VM_FAULT_LOCKED))) { - lock_page(fault_page); - if (!fault_page->mapping) { - unlock_page(fault_page); + if (vma->vm_ops->page_mkwrite) { + unlock_page(fault_page); + tmp = do_page_mkwrite(vma, fault_page, address); + if (unlikely(!tmp || + (tmp & (VM_FAULT_ERROR | VM_FAULT_NOPAGE)))) { page_cache_release(fault_page); - return 0; /* retry */ + return tmp; } - } else - VM_BUG_ON_PAGE(!PageLocked(fault_page), fault_page); -set_pte: + } + pte = pte_offset_map_lock(mm, pmd, address, &ptl); if (unlikely(!pte_same(*pte, orig_pte))) { pte_unmap_unlock(pte, ptl); |