summaryrefslogtreecommitdiffstats
path: root/mm
diff options
context:
space:
mode:
authorJan Kara <jack@suse.cz>2016-12-14 15:07:39 -0800
committerLinus Torvalds <torvalds@linux-foundation.org>2016-12-14 16:04:09 -0800
commit66a6197c118540d454913eef24d68d7491ab5d5f (patch)
tree085d09ca04b45caa37228d5561a8fb1729be16cb /mm
parent997dd98dd68beb2aea74cac53e7fd440cc8dba68 (diff)
downloadlinux-66a6197c118540d454913eef24d68d7491ab5d5f.tar.bz2
mm: provide helper for finishing mkwrite faults
Provide a helper function for finishing write faults due to PTE being read-only. The helper will be used by DAX to avoid the need of complicating generic MM code with DAX locking specifics. Link: http://lkml.kernel.org/r/1479460644-25076-16-git-send-email-jack@suse.cz Signed-off-by: Jan Kara <jack@suse.cz> Reviewed-by: Ross Zwisler <ross.zwisler@linux.intel.com> Acked-by: Kirill A. Shutemov <kirill.shutemov@linux.intel.com> Cc: Dan Williams <dan.j.williams@intel.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'mm')
-rw-r--r--mm/memory.c67
1 files changed, 40 insertions, 27 deletions
diff --git a/mm/memory.c b/mm/memory.c
index 82e7689e3059..bbc25da48a18 100644
--- a/mm/memory.c
+++ b/mm/memory.c
@@ -2269,6 +2269,38 @@ oom:
return VM_FAULT_OOM;
}
+/**
+ * finish_mkwrite_fault - finish page fault for a shared mapping, making PTE
+ * writeable once the page is prepared
+ *
+ * @vmf: structure describing the fault
+ *
+ * This function handles all that is needed to finish a write page fault in a
+ * shared mapping due to PTE being read-only once the mapped page is prepared.
+ * It handles locking of PTE and modifying it. The function returns
+ * VM_FAULT_WRITE on success, 0 when PTE got changed before we acquired PTE
+ * lock.
+ *
+ * The function expects the page to be locked or other protection against
+ * concurrent faults / writeback (such as DAX radix tree locks).
+ */
+int finish_mkwrite_fault(struct vm_fault *vmf)
+{
+ WARN_ON_ONCE(!(vmf->vma->vm_flags & VM_SHARED));
+ vmf->pte = pte_offset_map_lock(vmf->vma->vm_mm, vmf->pmd, vmf->address,
+ &vmf->ptl);
+ /*
+ * We might have raced with another page fault while we released the
+ * pte_offset_map_lock.
+ */
+ if (!pte_same(*vmf->pte, vmf->orig_pte)) {
+ pte_unmap_unlock(vmf->pte, vmf->ptl);
+ return 0;
+ }
+ wp_page_reuse(vmf);
+ return VM_FAULT_WRITE;
+}
+
/*
* Handle write page faults for VM_MIXEDMAP or VM_PFNMAP for a VM_SHARED
* mapping
@@ -2285,16 +2317,7 @@ static int wp_pfn_shared(struct vm_fault *vmf)
ret = vma->vm_ops->pfn_mkwrite(vma, vmf);
if (ret & VM_FAULT_ERROR)
return ret;
- vmf->pte = pte_offset_map_lock(vma->vm_mm, vmf->pmd,
- vmf->address, &vmf->ptl);
- /*
- * We might have raced with another page fault while we
- * released the pte_offset_map_lock.
- */
- if (!pte_same(*vmf->pte, vmf->orig_pte)) {
- pte_unmap_unlock(vmf->pte, vmf->ptl);
- return 0;
- }
+ return finish_mkwrite_fault(vmf);
}
wp_page_reuse(vmf);
return VM_FAULT_WRITE;
@@ -2304,7 +2327,6 @@ static int wp_page_shared(struct vm_fault *vmf)
__releases(vmf->ptl)
{
struct vm_area_struct *vma = vmf->vma;
- int page_mkwrite = 0;
get_page(vmf->page);
@@ -2318,26 +2340,17 @@ static int wp_page_shared(struct vm_fault *vmf)
put_page(vmf->page);
return tmp;
}
- /*
- * Since we dropped the lock we need to revalidate
- * the PTE as someone else may have changed it. If
- * they did, we just return, as we can count on the
- * MMU to tell us if they didn't also make it writable.
- */
- vmf->pte = pte_offset_map_lock(vma->vm_mm, vmf->pmd,
- vmf->address, &vmf->ptl);
- if (!pte_same(*vmf->pte, vmf->orig_pte)) {
+ tmp = finish_mkwrite_fault(vmf);
+ if (unlikely(!tmp || (tmp &
+ (VM_FAULT_ERROR | VM_FAULT_NOPAGE)))) {
unlock_page(vmf->page);
- pte_unmap_unlock(vmf->pte, vmf->ptl);
put_page(vmf->page);
- return 0;
+ return tmp;
}
- page_mkwrite = 1;
- }
-
- wp_page_reuse(vmf);
- if (!page_mkwrite)
+ } else {
+ wp_page_reuse(vmf);
lock_page(vmf->page);
+ }
fault_dirty_shared_page(vma, vmf->page);
put_page(vmf->page);