diff options
author | Thomas Hellstrom <thellstrom@vmware.com> | 2019-10-01 11:17:34 +0200 |
---|---|---|
committer | Thomas Hellstrom <thellstrom@vmware.com> | 2019-11-06 13:02:43 +0100 |
commit | ecaad8aca20432fa60821282d8ff479629c9f7b9 (patch) | |
tree | f654d60d4a7853d2f4f74d510906831c7eb099fb /mm/pagewalk.c | |
parent | ace88f1018b88167a78bafd545d30816d6e207bf (diff) | |
download | linux-ecaad8aca20432fa60821282d8ff479629c9f7b9.tar.bz2 |
mm: Add a walk_page_mapping() function to the pagewalk code
For users that want to travers all page table entries pointing into a
region of a struct address_space mapping, introduce a walk_page_mapping()
function.
The walk_page_mapping() function will be initially be used for dirty-
tracking in virtual graphics drivers.
Cc: Andrew Morton <akpm@linux-foundation.org>
Cc: Matthew Wilcox <willy@infradead.org>
Cc: Will Deacon <will.deacon@arm.com>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Rik van Riel <riel@surriel.com>
Cc: Minchan Kim <minchan@kernel.org>
Cc: Michal Hocko <mhocko@suse.com>
Cc: Huang Ying <ying.huang@intel.com>
Cc: Jérôme Glisse <jglisse@redhat.com>
Cc: Kirill A. Shutemov <kirill@shutemov.name>
Signed-off-by: Thomas Hellstrom <thellstrom@vmware.com>
Reviewed-by: Andrew Morton <akpm@linux-foundation.org>
Diffstat (limited to 'mm/pagewalk.c')
-rw-r--r-- | mm/pagewalk.c | 94 |
1 files changed, 93 insertions, 1 deletions
diff --git a/mm/pagewalk.c b/mm/pagewalk.c index c5fa42cab14f..ea0b9e606ad1 100644 --- a/mm/pagewalk.c +++ b/mm/pagewalk.c @@ -254,13 +254,23 @@ static int __walk_page_range(unsigned long start, unsigned long end, { int err = 0; struct vm_area_struct *vma = walk->vma; + const struct mm_walk_ops *ops = walk->ops; + + if (vma && ops->pre_vma) { + err = ops->pre_vma(start, end, walk); + if (err) + return err; + } if (vma && is_vm_hugetlb_page(vma)) { - if (walk->ops->hugetlb_entry) + if (ops->hugetlb_entry) err = walk_hugetlb_range(start, end, walk); } else err = walk_pgd_range(start, end, walk); + if (vma && ops->post_vma) + ops->post_vma(walk); + return err; } @@ -291,6 +301,11 @@ static int __walk_page_range(unsigned long start, unsigned long end, * its vm_flags. walk_page_test() and @ops->test_walk() are used for this * purpose. * + * If operations need to be staged before and committed after a vma is walked, + * there are two callbacks, pre_vma() and post_vma(). Note that post_vma(), + * since it is intended to handle commit-type operations, can't return any + * errors. + * * struct mm_walk keeps current values of some common data like vma and pmd, * which are useful for the access from callbacks. If you want to pass some * caller-specific data to callbacks, @private should be helpful. @@ -377,3 +392,80 @@ int walk_page_vma(struct vm_area_struct *vma, const struct mm_walk_ops *ops, return err; return __walk_page_range(vma->vm_start, vma->vm_end, &walk); } + +/** + * walk_page_mapping - walk all memory areas mapped into a struct address_space. + * @mapping: Pointer to the struct address_space + * @first_index: First page offset in the address_space + * @nr: Number of incremental page offsets to cover + * @ops: operation to call during the walk + * @private: private data for callbacks' usage + * + * This function walks all memory areas mapped into a struct address_space. + * The walk is limited to only the given page-size index range, but if + * the index boundaries cross a huge page-table entry, that entry will be + * included. + * + * Also see walk_page_range() for additional information. + * + * Locking: + * This function can't require that the struct mm_struct::mmap_sem is held, + * since @mapping may be mapped by multiple processes. Instead + * @mapping->i_mmap_rwsem must be held. This might have implications in the + * callbacks, and it's up tho the caller to ensure that the + * struct mm_struct::mmap_sem is not needed. + * + * Also this means that a caller can't rely on the struct + * vm_area_struct::vm_flags to be constant across a call, + * except for immutable flags. Callers requiring this shouldn't use + * this function. + * + * Return: 0 on success, negative error code on failure, positive number on + * caller defined premature termination. + */ +int walk_page_mapping(struct address_space *mapping, pgoff_t first_index, + pgoff_t nr, const struct mm_walk_ops *ops, + void *private) +{ + struct mm_walk walk = { + .ops = ops, + .private = private, + }; + struct vm_area_struct *vma; + pgoff_t vba, vea, cba, cea; + unsigned long start_addr, end_addr; + int err = 0; + + lockdep_assert_held(&mapping->i_mmap_rwsem); + vma_interval_tree_foreach(vma, &mapping->i_mmap, first_index, + first_index + nr - 1) { + /* Clip to the vma */ + vba = vma->vm_pgoff; + vea = vba + vma_pages(vma); + cba = first_index; + cba = max(cba, vba); + cea = first_index + nr; + cea = min(cea, vea); + + start_addr = ((cba - vba) << PAGE_SHIFT) + vma->vm_start; + end_addr = ((cea - vba) << PAGE_SHIFT) + vma->vm_start; + if (start_addr >= end_addr) + continue; + + walk.vma = vma; + walk.mm = vma->vm_mm; + + err = walk_page_test(vma->vm_start, vma->vm_end, &walk); + if (err > 0) { + err = 0; + break; + } else if (err < 0) + break; + + err = __walk_page_range(start_addr, end_addr, &walk); + if (err) + break; + } + + return err; +} |