diff options
Diffstat (limited to 'mm/internal.h')
-rw-r--r-- | mm/internal.h | 29 |
1 files changed, 8 insertions, 21 deletions
diff --git a/mm/internal.h b/mm/internal.h index 6ec2cea9926b..2d7c9a2e0118 100644 --- a/mm/internal.h +++ b/mm/internal.h @@ -274,11 +274,10 @@ isolate_freepages_range(struct compact_control *cc, int isolate_migratepages_range(struct compact_control *cc, unsigned long low_pfn, unsigned long end_pfn); +#endif int find_suitable_fallback(struct free_area *area, unsigned int order, int migratetype, bool only_stealable, bool *can_steal); -#endif - /* * This function returns the order of a free page in the buddy system. In * general, page_zone(page)->lock must be held by the caller to prevent the @@ -344,7 +343,10 @@ void __vma_unlink_list(struct mm_struct *mm, struct vm_area_struct *vma); #ifdef CONFIG_MMU extern long populate_vma_page_range(struct vm_area_struct *vma, - unsigned long start, unsigned long end, int *nonblocking); + unsigned long start, unsigned long end, int *locked); +extern long faultin_vma_page_range(struct vm_area_struct *vma, + unsigned long start, unsigned long end, + bool write, int *locked); extern void munlock_vma_pages_range(struct vm_area_struct *vma, unsigned long start, unsigned long end); static inline void munlock_vma_pages_all(struct vm_area_struct *vma) @@ -369,23 +371,6 @@ extern unsigned int munlock_vma_page(struct page *page); */ extern void clear_page_mlock(struct page *page); -/* - * mlock_migrate_page - called only from migrate_misplaced_transhuge_page() - * (because that does not go through the full procedure of migration ptes): - * to migrate the Mlocked page flag; update statistics. - */ -static inline void mlock_migrate_page(struct page *newpage, struct page *page) -{ - if (TestClearPageMlocked(page)) { - int nr_pages = thp_nr_pages(page); - - /* Holding pmd lock, no change in irq context: __mod is safe */ - __mod_zone_page_state(page_zone(page), NR_MLOCK, -nr_pages); - SetPageMlocked(newpage); - __mod_zone_page_state(page_zone(newpage), NR_MLOCK, nr_pages); - } -} - extern pmd_t maybe_pmd_mkwrite(pmd_t pmd, struct vm_area_struct *vma); /* @@ -461,7 +446,6 @@ static inline struct file *maybe_unlock_mmap_for_io(struct vm_fault *vmf, #else /* !CONFIG_MMU */ static inline void clear_page_mlock(struct page *page) { } static inline void mlock_vma_page(struct page *page) { } -static inline void mlock_migrate_page(struct page *new, struct page *old) { } static inline void vunmap_range_noflush(unsigned long start, unsigned long end) { } @@ -672,4 +656,7 @@ int vmap_pages_range_noflush(unsigned long addr, unsigned long end, void vunmap_range_noflush(unsigned long start, unsigned long end); +int numa_migrate_prep(struct page *page, struct vm_area_struct *vma, + unsigned long addr, int page_nid, int *flags); + #endif /* __MM_INTERNAL_H */ |