From 9a46b04f16a032c26bbf0ece61d6cd1e7ba9f627 Mon Sep 17 00:00:00 2001 From: Brian Foster Date: Tue, 26 Jul 2016 15:21:53 -0700 Subject: fs/fs-writeback.c: inode writeback list tracking tracepoints MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit The per-sb inode writeback list tracks inodes currently under writeback to facilitate efficient sync processing. In particular, it ensures that sync only needs to walk through a list of inodes that were cleaned by the sync. Add a couple tracepoints to help identify when inodes are added/removed to and from the writeback lists. Piggyback off of the writeback lazytime tracepoint template as it already tracks the relevant inode information. Link: http://lkml.kernel.org/r/1466594593-6757-3-git-send-email-bfoster@redhat.com Signed-off-by: Brian Foster Reviewed-by: Jan Kara Cc: Dave Chinner cc: Josef Bacik Cc: Holger Hoffstätte Cc: Al Viro Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- include/trace/events/writeback.h | 22 ++++++++++++++++++---- 1 file changed, 18 insertions(+), 4 deletions(-) (limited to 'include/trace/events') diff --git a/include/trace/events/writeback.h b/include/trace/events/writeback.h index 73614ce1d204..531f5811ff6b 100644 --- a/include/trace/events/writeback.h +++ b/include/trace/events/writeback.h @@ -696,7 +696,7 @@ DEFINE_EVENT(writeback_single_inode_template, writeback_single_inode, TP_ARGS(inode, wbc, nr_to_write) ); -DECLARE_EVENT_CLASS(writeback_lazytime_template, +DECLARE_EVENT_CLASS(writeback_inode_template, TP_PROTO(struct inode *inode), TP_ARGS(inode), @@ -723,25 +723,39 @@ DECLARE_EVENT_CLASS(writeback_lazytime_template, show_inode_state(__entry->state), __entry->mode) ); -DEFINE_EVENT(writeback_lazytime_template, writeback_lazytime, +DEFINE_EVENT(writeback_inode_template, writeback_lazytime, TP_PROTO(struct inode *inode), TP_ARGS(inode) ); -DEFINE_EVENT(writeback_lazytime_template, writeback_lazytime_iput, +DEFINE_EVENT(writeback_inode_template, writeback_lazytime_iput, TP_PROTO(struct inode *inode), TP_ARGS(inode) ); -DEFINE_EVENT(writeback_lazytime_template, writeback_dirty_inode_enqueue, +DEFINE_EVENT(writeback_inode_template, writeback_dirty_inode_enqueue, TP_PROTO(struct inode *inode), TP_ARGS(inode) ); +/* + * Inode writeback list tracking. + */ + +DEFINE_EVENT(writeback_inode_template, sb_mark_inode_writeback, + TP_PROTO(struct inode *inode), + TP_ARGS(inode) +); + +DEFINE_EVENT(writeback_inode_template, sb_clear_inode_writeback, + TP_PROTO(struct inode *inode), + TP_ARGS(inode) +); + #endif /* _TRACE_WRITEBACK_H */ /* This part must be outside protection */ -- cgit v1.2.3 From 70652f6ec0566ae6b4147d88c6d043c68484227f Mon Sep 17 00:00:00 2001 From: Ebru Akagunduz Date: Tue, 26 Jul 2016 15:24:59 -0700 Subject: mm: make optimistic check for swapin readahead Introduce a new sysfs integer knob /sys/kernel/mm/transparent_hugepage/khugepaged/max_ptes_swap which makes optimistic check for swapin readahead to increase thp collapse rate. Before getting swapped out pages to memory, checks them and allows up to a certain number. It also prints out using tracepoints amount of unmapped ptes. [vdavydov@parallels.com: fix scan not aborted on SCAN_EXCEED_SWAP_PTE] [sfr@canb.auug.org.au: build fix] Link: http://lkml.kernel.org/r/20160616154503.65806e12@canb.auug.org.au Signed-off-by: Ebru Akagunduz Acked-by: Rik van Riel Cc: Kirill A. Shutemov Cc: Naoya Horiguchi Cc: Andrea Arcangeli Cc: Joonsoo Kim Cc: Xie XiuQi Cc: Cyrill Gorcunov Cc: Mel Gorman Cc: David Rientjes Cc: Vlastimil Babka Cc: Aneesh Kumar K.V Cc: Hugh Dickins Cc: Johannes Weiner Cc: Michal Hocko Signed-off-by: Stephen Rothwell Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- include/trace/events/huge_memory.h | 14 +++++++----- mm/huge_memory.c | 45 +++++++++++++++++++++++++++++++++++--- 2 files changed, 51 insertions(+), 8 deletions(-) (limited to 'include/trace/events') diff --git a/include/trace/events/huge_memory.h b/include/trace/events/huge_memory.h index 551ba4acde4d..fad6539c9d68 100644 --- a/include/trace/events/huge_memory.h +++ b/include/trace/events/huge_memory.h @@ -28,7 +28,8 @@ EM( SCAN_SWAP_CACHE_PAGE, "page_swap_cache") \ EM( SCAN_DEL_PAGE_LRU, "could_not_delete_page_from_lru")\ EM( SCAN_ALLOC_HUGE_PAGE_FAIL, "alloc_huge_page_failed") \ - EMe( SCAN_CGROUP_CHARGE_FAIL, "ccgroup_charge_failed") + EM( SCAN_CGROUP_CHARGE_FAIL, "ccgroup_charge_failed") \ + EMe( SCAN_EXCEED_SWAP_PTE, "exceed_swap_pte") #undef EM #undef EMe @@ -45,9 +46,9 @@ SCAN_STATUS TRACE_EVENT(mm_khugepaged_scan_pmd, TP_PROTO(struct mm_struct *mm, struct page *page, bool writable, - bool referenced, int none_or_zero, int status), + bool referenced, int none_or_zero, int status, int unmapped), - TP_ARGS(mm, page, writable, referenced, none_or_zero, status), + TP_ARGS(mm, page, writable, referenced, none_or_zero, status, unmapped), TP_STRUCT__entry( __field(struct mm_struct *, mm) @@ -56,6 +57,7 @@ TRACE_EVENT(mm_khugepaged_scan_pmd, __field(bool, referenced) __field(int, none_or_zero) __field(int, status) + __field(int, unmapped) ), TP_fast_assign( @@ -65,15 +67,17 @@ TRACE_EVENT(mm_khugepaged_scan_pmd, __entry->referenced = referenced; __entry->none_or_zero = none_or_zero; __entry->status = status; + __entry->unmapped = unmapped; ), - TP_printk("mm=%p, scan_pfn=0x%lx, writable=%d, referenced=%d, none_or_zero=%d, status=%s", + TP_printk("mm=%p, scan_pfn=0x%lx, writable=%d, referenced=%d, none_or_zero=%d, status=%s, unmapped=%d", __entry->mm, __entry->pfn, __entry->writable, __entry->referenced, __entry->none_or_zero, - __print_symbolic(__entry->status, SCAN_STATUS)) + __print_symbolic(__entry->status, SCAN_STATUS), + __entry->unmapped) ); TRACE_EVENT(mm_collapse_huge_page, diff --git a/mm/huge_memory.c b/mm/huge_memory.c index 23d1bf42fef1..ed474483a620 100644 --- a/mm/huge_memory.c +++ b/mm/huge_memory.c @@ -57,7 +57,8 @@ enum scan_result { SCAN_SWAP_CACHE_PAGE, SCAN_DEL_PAGE_LRU, SCAN_ALLOC_HUGE_PAGE_FAIL, - SCAN_CGROUP_CHARGE_FAIL + SCAN_CGROUP_CHARGE_FAIL, + SCAN_EXCEED_SWAP_PTE }; #define CREATE_TRACE_POINTS @@ -100,6 +101,7 @@ static DECLARE_WAIT_QUEUE_HEAD(khugepaged_wait); * fault. */ static unsigned int khugepaged_max_ptes_none __read_mostly; +static unsigned int khugepaged_max_ptes_swap __read_mostly; static int khugepaged(void *none); static int khugepaged_slab_init(void); @@ -598,6 +600,33 @@ static struct kobj_attribute khugepaged_max_ptes_none_attr = __ATTR(max_ptes_none, 0644, khugepaged_max_ptes_none_show, khugepaged_max_ptes_none_store); +static ssize_t khugepaged_max_ptes_swap_show(struct kobject *kobj, + struct kobj_attribute *attr, + char *buf) +{ + return sprintf(buf, "%u\n", khugepaged_max_ptes_swap); +} + +static ssize_t khugepaged_max_ptes_swap_store(struct kobject *kobj, + struct kobj_attribute *attr, + const char *buf, size_t count) +{ + int err; + unsigned long max_ptes_swap; + + err = kstrtoul(buf, 10, &max_ptes_swap); + if (err || max_ptes_swap > HPAGE_PMD_NR-1) + return -EINVAL; + + khugepaged_max_ptes_swap = max_ptes_swap; + + return count; +} + +static struct kobj_attribute khugepaged_max_ptes_swap_attr = + __ATTR(max_ptes_swap, 0644, khugepaged_max_ptes_swap_show, + khugepaged_max_ptes_swap_store); + static struct attribute *khugepaged_attr[] = { &khugepaged_defrag_attr.attr, &khugepaged_max_ptes_none_attr.attr, @@ -606,6 +635,7 @@ static struct attribute *khugepaged_attr[] = { &full_scans_attr.attr, &scan_sleep_millisecs_attr.attr, &alloc_sleep_millisecs_attr.attr, + &khugepaged_max_ptes_swap_attr.attr, NULL, }; @@ -674,6 +704,7 @@ static int __init hugepage_init(void) khugepaged_pages_to_scan = HPAGE_PMD_NR * 8; khugepaged_max_ptes_none = HPAGE_PMD_NR - 1; + khugepaged_max_ptes_swap = HPAGE_PMD_NR / 8; /* * hugepages can't be allocated by the buddy allocator */ @@ -2507,7 +2538,7 @@ static int khugepaged_scan_pmd(struct mm_struct *mm, struct page *page = NULL; unsigned long _address; spinlock_t *ptl; - int node = NUMA_NO_NODE; + int node = NUMA_NO_NODE, unmapped = 0; bool writable = false, referenced = false; VM_BUG_ON(address & ~HPAGE_PMD_MASK); @@ -2523,6 +2554,14 @@ static int khugepaged_scan_pmd(struct mm_struct *mm, for (_address = address, _pte = pte; _pte < pte+HPAGE_PMD_NR; _pte++, _address += PAGE_SIZE) { pte_t pteval = *_pte; + if (is_swap_pte(pteval)) { + if (++unmapped <= khugepaged_max_ptes_swap) { + continue; + } else { + result = SCAN_EXCEED_SWAP_PTE; + goto out_unmap; + } + } if (pte_none(pteval) || is_zero_pfn(pte_pfn(pteval))) { if (!userfaultfd_armed(vma) && ++none_or_zero <= khugepaged_max_ptes_none) { @@ -2609,7 +2648,7 @@ out_unmap: } out: trace_mm_khugepaged_scan_pmd(mm, page, writable, referenced, - none_or_zero, result); + none_or_zero, result, unmapped); return ret; } -- cgit v1.2.3 From 8a966ed746d63c8103d496da85973eeeec01d77f Mon Sep 17 00:00:00 2001 From: Ebru Akagunduz Date: Tue, 26 Jul 2016 15:25:03 -0700 Subject: mm: make swapin readahead to improve thp collapse rate This patch makes swapin readahead to improve thp collapse rate. When khugepaged scanned pages, there can be a few of the pages in swap area. With the patch THP can collapse 4kB pages into a THP when there are up to max_ptes_swap swap ptes in a 2MB range. The patch was tested with a test program that allocates 400B of memory, writes to it, and then sleeps. I force the system to swap out all. Afterwards, the test program touches the area by writing, it skips a page in each 20 pages of the area. Without the patch, system did not swap in readahead. THP rate was %65 of the program of the memory, it did not change over time. With this patch, after 10 minutes of waiting khugepaged had collapsed %99 of the program's memory. [kirill.shutemov@linux.intel.com: trivial cleanup of exit path of the function] [kirill.shutemov@linux.intel.com: __collapse_huge_page_swapin(): drop unused 'pte' parameter] [kirill.shutemov@linux.intel.com: do not hold anon_vma lock during swap in] Signed-off-by: Ebru Akagunduz Acked-by: Rik van Riel Cc: Naoya Horiguchi Cc: Andrea Arcangeli Cc: Joonsoo Kim Cc: Xie XiuQi Cc: Cyrill Gorcunov Cc: Mel Gorman Cc: David Rientjes Cc: Vlastimil Babka Cc: Aneesh Kumar K.V Cc: Hugh Dickins Cc: Johannes Weiner Cc: Michal Hocko Signed-off-by: Kirill A. Shutemov Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- include/trace/events/huge_memory.h | 24 +++++++++++++++++++++ mm/huge_memory.c | 43 +++++++++++++++++++++++++++++++++++--- mm/internal.h | 4 ++++ mm/memory.c | 2 +- 4 files changed, 69 insertions(+), 4 deletions(-) (limited to 'include/trace/events') diff --git a/include/trace/events/huge_memory.h b/include/trace/events/huge_memory.h index fad6539c9d68..bda21183eb05 100644 --- a/include/trace/events/huge_memory.h +++ b/include/trace/events/huge_memory.h @@ -135,5 +135,29 @@ TRACE_EVENT(mm_collapse_huge_page_isolate, __print_symbolic(__entry->status, SCAN_STATUS)) ); +TRACE_EVENT(mm_collapse_huge_page_swapin, + + TP_PROTO(struct mm_struct *mm, int swapped_in, int ret), + + TP_ARGS(mm, swapped_in, ret), + + TP_STRUCT__entry( + __field(struct mm_struct *, mm) + __field(int, swapped_in) + __field(int, ret) + ), + + TP_fast_assign( + __entry->mm = mm; + __entry->swapped_in = swapped_in; + __entry->ret = ret; + ), + + TP_printk("mm=%p, swapped_in=%d, ret=%d", + __entry->mm, + __entry->swapped_in, + __entry->ret) +); + #endif /* __HUGE_MEMORY_H */ #include diff --git a/mm/huge_memory.c b/mm/huge_memory.c index ed474483a620..b11351579e7a 100644 --- a/mm/huge_memory.c +++ b/mm/huge_memory.c @@ -2373,6 +2373,44 @@ static bool hugepage_vma_check(struct vm_area_struct *vma) return !(vma->vm_flags & VM_NO_THP); } +/* + * Bring missing pages in from swap, to complete THP collapse. + * Only done if khugepaged_scan_pmd believes it is worthwhile. + * + * Called and returns without pte mapped or spinlocks held, + * but with mmap_sem held to protect against vma changes. + */ + +static void __collapse_huge_page_swapin(struct mm_struct *mm, + struct vm_area_struct *vma, + unsigned long address, pmd_t *pmd) +{ + unsigned long _address; + pte_t *pte, pteval; + int swapped_in = 0, ret = 0; + + pte = pte_offset_map(pmd, address); + for (_address = address; _address < address + HPAGE_PMD_NR*PAGE_SIZE; + pte++, _address += PAGE_SIZE) { + pteval = *pte; + if (!is_swap_pte(pteval)) + continue; + swapped_in++; + ret = do_swap_page(mm, vma, _address, pte, pmd, + FAULT_FLAG_ALLOW_RETRY|FAULT_FLAG_RETRY_NOWAIT, + pteval); + if (ret & VM_FAULT_ERROR) { + trace_mm_collapse_huge_page_swapin(mm, swapped_in, 0); + return; + } + /* pte is unmapped now, we need to map it */ + pte = pte_offset_map(pmd, _address); + } + pte--; + pte_unmap(pte); + trace_mm_collapse_huge_page_swapin(mm, swapped_in, 1); +} + static void collapse_huge_page(struct mm_struct *mm, unsigned long address, struct page **hpage, @@ -2440,6 +2478,8 @@ static void collapse_huge_page(struct mm_struct *mm, goto out; } + __collapse_huge_page_swapin(mm, vma, address, pmd); + anon_vma_lock_write(vma->anon_vma); pte = pte_offset_map(pmd, address); @@ -2516,9 +2556,6 @@ static void collapse_huge_page(struct mm_struct *mm, result = SCAN_SUCCEED; out_up_write: up_write(&mm->mmap_sem); - trace_mm_collapse_huge_page(mm, isolated, result); - return; - out_nolock: trace_mm_collapse_huge_page(mm, isolated, result); return; diff --git a/mm/internal.h b/mm/internal.h index fbfba0cc2c35..e1531758122b 100644 --- a/mm/internal.h +++ b/mm/internal.h @@ -36,6 +36,10 @@ /* Do not use these with a slab allocator */ #define GFP_SLAB_BUG_MASK (__GFP_DMA32|__GFP_HIGHMEM|~__GFP_BITS_MASK) +extern int do_swap_page(struct mm_struct *mm, struct vm_area_struct *vma, + unsigned long address, pte_t *page_table, pmd_t *pmd, + unsigned int flags, pte_t orig_pte); + void free_pgtables(struct mmu_gather *tlb, struct vm_area_struct *start_vma, unsigned long floor, unsigned long ceiling); diff --git a/mm/memory.c b/mm/memory.c index a329149e1c54..5e6eadd127e7 100644 --- a/mm/memory.c +++ b/mm/memory.c @@ -2522,7 +2522,7 @@ EXPORT_SYMBOL(unmap_mapping_range); * We return with the mmap_sem locked or unlocked in the same cases * as does filemap_fault(). */ -static int do_swap_page(struct mm_struct *mm, struct vm_area_struct *vma, +int do_swap_page(struct mm_struct *mm, struct vm_area_struct *vma, unsigned long address, pte_t *page_table, pmd_t *pmd, unsigned int flags, pte_t orig_pte) { -- cgit v1.2.3 From f3f0e1d2150b2b99da2cbdfaad000089efe9bf30 Mon Sep 17 00:00:00 2001 From: "Kirill A. Shutemov" Date: Tue, 26 Jul 2016 15:26:32 -0700 Subject: khugepaged: add support of collapse for tmpfs/shmem pages This patch extends khugepaged to support collapse of tmpfs/shmem pages. We share fair amount of infrastructure with anon-THP collapse. Few design points: - First we are looking for VMA which can be suitable for mapping huge page; - If the VMA maps shmem file, the rest scan/collapse operations operates on page cache, not on page tables as in anon VMA case. - khugepaged_scan_shmem() finds a range which is suitable for huge page. The scan is lockless and shouldn't disturb system too much. - once the candidate for collapse is found, collapse_shmem() attempts to create a huge page: + scan over radix tree, making the range point to new huge page; + new huge page is not-uptodate, locked and freezed (refcount is 0), so nobody can touch them until we say so. + we swap in pages during the scan. khugepaged_scan_shmem() filters out ranges with more than khugepaged_max_ptes_swap swapped out pages. It's HPAGE_PMD_NR/8 by default. + old pages are isolated, unmapped and put to local list in case to be restored back if collapse failed. - if collapse succeed, we retract pte page tables from VMAs where huge pages mapping is possible. The huge page will be mapped as PMD on next minor fault into the range. Link: http://lkml.kernel.org/r/1466021202-61880-35-git-send-email-kirill.shutemov@linux.intel.com Signed-off-by: Kirill A. Shutemov Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- include/linux/shmem_fs.h | 23 ++ include/trace/events/huge_memory.h | 3 +- mm/khugepaged.c | 435 ++++++++++++++++++++++++++++++++++++- mm/shmem.c | 56 ++++- 4 files changed, 500 insertions(+), 17 deletions(-) (limited to 'include/trace/events') diff --git a/include/linux/shmem_fs.h b/include/linux/shmem_fs.h index 94eaaa2c6ad9..0890f700a546 100644 --- a/include/linux/shmem_fs.h +++ b/include/linux/shmem_fs.h @@ -54,6 +54,7 @@ extern unsigned long shmem_get_unmapped_area(struct file *, unsigned long addr, unsigned long len, unsigned long pgoff, unsigned long flags); extern int shmem_lock(struct file *file, int lock, struct user_struct *user); extern bool shmem_mapping(struct address_space *mapping); +extern bool shmem_huge_enabled(struct vm_area_struct *vma); extern void shmem_unlock_mapping(struct address_space *mapping); extern struct page *shmem_read_mapping_page_gfp(struct address_space *mapping, pgoff_t index, gfp_t gfp_mask); @@ -64,6 +65,19 @@ extern unsigned long shmem_swap_usage(struct vm_area_struct *vma); extern unsigned long shmem_partial_swap_usage(struct address_space *mapping, pgoff_t start, pgoff_t end); +/* Flag allocation requirements to shmem_getpage */ +enum sgp_type { + SGP_READ, /* don't exceed i_size, don't allocate page */ + SGP_CACHE, /* don't exceed i_size, may allocate page */ + SGP_NOHUGE, /* like SGP_CACHE, but no huge pages */ + SGP_HUGE, /* like SGP_CACHE, huge pages preferred */ + SGP_WRITE, /* may exceed i_size, may allocate !Uptodate page */ + SGP_FALLOC, /* like SGP_WRITE, but make existing page Uptodate */ +}; + +extern int shmem_getpage(struct inode *inode, pgoff_t index, + struct page **pagep, enum sgp_type sgp); + static inline struct page *shmem_read_mapping_page( struct address_space *mapping, pgoff_t index) { @@ -71,6 +85,15 @@ static inline struct page *shmem_read_mapping_page( mapping_gfp_mask(mapping)); } +static inline bool shmem_file(struct file *file) +{ + if (!IS_ENABLED(CONFIG_SHMEM)) + return false; + if (!file || !file->f_mapping) + return false; + return shmem_mapping(file->f_mapping); +} + extern bool shmem_charge(struct inode *inode, long pages); extern void shmem_uncharge(struct inode *inode, long pages); diff --git a/include/trace/events/huge_memory.h b/include/trace/events/huge_memory.h index bda21183eb05..830d47d5ca41 100644 --- a/include/trace/events/huge_memory.h +++ b/include/trace/events/huge_memory.h @@ -29,7 +29,8 @@ EM( SCAN_DEL_PAGE_LRU, "could_not_delete_page_from_lru")\ EM( SCAN_ALLOC_HUGE_PAGE_FAIL, "alloc_huge_page_failed") \ EM( SCAN_CGROUP_CHARGE_FAIL, "ccgroup_charge_failed") \ - EMe( SCAN_EXCEED_SWAP_PTE, "exceed_swap_pte") + EM( SCAN_EXCEED_SWAP_PTE, "exceed_swap_pte") \ + EMe(SCAN_TRUNCATED, "truncated") \ #undef EM #undef EMe diff --git a/mm/khugepaged.c b/mm/khugepaged.c index 639047cc6ea7..573e4366d3b9 100644 --- a/mm/khugepaged.c +++ b/mm/khugepaged.c @@ -14,6 +14,7 @@ #include #include #include +#include #include #include @@ -42,7 +43,8 @@ enum scan_result { SCAN_DEL_PAGE_LRU, SCAN_ALLOC_HUGE_PAGE_FAIL, SCAN_CGROUP_CHARGE_FAIL, - SCAN_EXCEED_SWAP_PTE + SCAN_EXCEED_SWAP_PTE, + SCAN_TRUNCATED, }; #define CREATE_TRACE_POINTS @@ -294,7 +296,7 @@ struct attribute_group khugepaged_attr_group = { .name = "khugepaged", }; -#define VM_NO_KHUGEPAGED (VM_SPECIAL | VM_HUGETLB | VM_SHARED | VM_MAYSHARE) +#define VM_NO_KHUGEPAGED (VM_SPECIAL | VM_HUGETLB) int hugepage_madvise(struct vm_area_struct *vma, unsigned long *vm_flags, int advice) @@ -816,6 +818,10 @@ static bool hugepage_vma_check(struct vm_area_struct *vma) if ((!(vma->vm_flags & VM_HUGEPAGE) && !khugepaged_always()) || (vma->vm_flags & VM_NOHUGEPAGE)) return false; + if (shmem_file(vma->vm_file)) { + return IS_ALIGNED((vma->vm_start >> PAGE_SHIFT) - vma->vm_pgoff, + HPAGE_PMD_NR); + } if (!vma->anon_vma || vma->vm_ops) return false; if (is_vma_temporary_stack(vma)) @@ -1216,6 +1222,412 @@ static void collect_mm_slot(struct mm_slot *mm_slot) } } +#ifdef CONFIG_SHMEM +static void retract_page_tables(struct address_space *mapping, pgoff_t pgoff) +{ + struct vm_area_struct *vma; + unsigned long addr; + pmd_t *pmd, _pmd; + + i_mmap_lock_write(mapping); + vma_interval_tree_foreach(vma, &mapping->i_mmap, pgoff, pgoff) { + /* probably overkill */ + if (vma->anon_vma) + continue; + addr = vma->vm_start + ((pgoff - vma->vm_pgoff) << PAGE_SHIFT); + if (addr & ~HPAGE_PMD_MASK) + continue; + if (vma->vm_end < addr + HPAGE_PMD_SIZE) + continue; + pmd = mm_find_pmd(vma->vm_mm, addr); + if (!pmd) + continue; + /* + * We need exclusive mmap_sem to retract page table. + * If trylock fails we would end up with pte-mapped THP after + * re-fault. Not ideal, but it's more important to not disturb + * the system too much. + */ + if (down_write_trylock(&vma->vm_mm->mmap_sem)) { + spinlock_t *ptl = pmd_lock(vma->vm_mm, pmd); + /* assume page table is clear */ + _pmd = pmdp_collapse_flush(vma, addr, pmd); + spin_unlock(ptl); + up_write(&vma->vm_mm->mmap_sem); + atomic_long_dec(&vma->vm_mm->nr_ptes); + pte_free(vma->vm_mm, pmd_pgtable(_pmd)); + } + } + i_mmap_unlock_write(mapping); +} + +/** + * collapse_shmem - collapse small tmpfs/shmem pages into huge one. + * + * Basic scheme is simple, details are more complex: + * - allocate and freeze a new huge page; + * - scan over radix tree replacing old pages the new one + * + swap in pages if necessary; + * + fill in gaps; + * + keep old pages around in case if rollback is required; + * - if replacing succeed: + * + copy data over; + * + free old pages; + * + unfreeze huge page; + * - if replacing failed; + * + put all pages back and unfreeze them; + * + restore gaps in the radix-tree; + * + free huge page; + */ +static void collapse_shmem(struct mm_struct *mm, + struct address_space *mapping, pgoff_t start, + struct page **hpage, int node) +{ + gfp_t gfp; + struct page *page, *new_page, *tmp; + struct mem_cgroup *memcg; + pgoff_t index, end = start + HPAGE_PMD_NR; + LIST_HEAD(pagelist); + struct radix_tree_iter iter; + void **slot; + int nr_none = 0, result = SCAN_SUCCEED; + + VM_BUG_ON(start & (HPAGE_PMD_NR - 1)); + + /* Only allocate from the target node */ + gfp = alloc_hugepage_khugepaged_gfpmask() | + __GFP_OTHER_NODE | __GFP_THISNODE; + + new_page = khugepaged_alloc_page(hpage, gfp, node); + if (!new_page) { + result = SCAN_ALLOC_HUGE_PAGE_FAIL; + goto out; + } + + if (unlikely(mem_cgroup_try_charge(new_page, mm, gfp, &memcg, true))) { + result = SCAN_CGROUP_CHARGE_FAIL; + goto out; + } + + new_page->index = start; + new_page->mapping = mapping; + __SetPageSwapBacked(new_page); + __SetPageLocked(new_page); + BUG_ON(!page_ref_freeze(new_page, 1)); + + + /* + * At this point the new_page is 'frozen' (page_count() is zero), locked + * and not up-to-date. It's safe to insert it into radix tree, because + * nobody would be able to map it or use it in other way until we + * unfreeze it. + */ + + index = start; + spin_lock_irq(&mapping->tree_lock); + radix_tree_for_each_slot(slot, &mapping->page_tree, &iter, start) { + int n = min(iter.index, end) - index; + + /* + * Handle holes in the radix tree: charge it from shmem and + * insert relevant subpage of new_page into the radix-tree. + */ + if (n && !shmem_charge(mapping->host, n)) { + result = SCAN_FAIL; + break; + } + nr_none += n; + for (; index < min(iter.index, end); index++) { + radix_tree_insert(&mapping->page_tree, index, + new_page + (index % HPAGE_PMD_NR)); + } + + /* We are done. */ + if (index >= end) + break; + + page = radix_tree_deref_slot_protected(slot, + &mapping->tree_lock); + if (radix_tree_exceptional_entry(page) || !PageUptodate(page)) { + spin_unlock_irq(&mapping->tree_lock); + /* swap in or instantiate fallocated page */ + if (shmem_getpage(mapping->host, index, &page, + SGP_NOHUGE)) { + result = SCAN_FAIL; + goto tree_unlocked; + } + spin_lock_irq(&mapping->tree_lock); + } else if (trylock_page(page)) { + get_page(page); + } else { + result = SCAN_PAGE_LOCK; + break; + } + + /* + * The page must be locked, so we can drop the tree_lock + * without racing with truncate. + */ + VM_BUG_ON_PAGE(!PageLocked(page), page); + VM_BUG_ON_PAGE(!PageUptodate(page), page); + VM_BUG_ON_PAGE(PageTransCompound(page), page); + + if (page_mapping(page) != mapping) { + result = SCAN_TRUNCATED; + goto out_unlock; + } + spin_unlock_irq(&mapping->tree_lock); + + if (isolate_lru_page(page)) { + result = SCAN_DEL_PAGE_LRU; + goto out_isolate_failed; + } + + if (page_mapped(page)) + unmap_mapping_range(mapping, index << PAGE_SHIFT, + PAGE_SIZE, 0); + + spin_lock_irq(&mapping->tree_lock); + + VM_BUG_ON_PAGE(page_mapped(page), page); + + /* + * The page is expected to have page_count() == 3: + * - we hold a pin on it; + * - one reference from radix tree; + * - one from isolate_lru_page; + */ + if (!page_ref_freeze(page, 3)) { + result = SCAN_PAGE_COUNT; + goto out_lru; + } + + /* + * Add the page to the list to be able to undo the collapse if + * something go wrong. + */ + list_add_tail(&page->lru, &pagelist); + + /* Finally, replace with the new page. */ + radix_tree_replace_slot(slot, + new_page + (index % HPAGE_PMD_NR)); + + index++; + continue; +out_lru: + spin_unlock_irq(&mapping->tree_lock); + putback_lru_page(page); +out_isolate_failed: + unlock_page(page); + put_page(page); + goto tree_unlocked; +out_unlock: + unlock_page(page); + put_page(page); + break; + } + + /* + * Handle hole in radix tree at the end of the range. + * This code only triggers if there's nothing in radix tree + * beyond 'end'. + */ + if (result == SCAN_SUCCEED && index < end) { + int n = end - index; + + if (!shmem_charge(mapping->host, n)) { + result = SCAN_FAIL; + goto tree_locked; + } + + for (; index < end; index++) { + radix_tree_insert(&mapping->page_tree, index, + new_page + (index % HPAGE_PMD_NR)); + } + nr_none += n; + } + +tree_locked: + spin_unlock_irq(&mapping->tree_lock); +tree_unlocked: + + if (result == SCAN_SUCCEED) { + unsigned long flags; + struct zone *zone = page_zone(new_page); + + /* + * Replacing old pages with new one has succeed, now we need to + * copy the content and free old pages. + */ + list_for_each_entry_safe(page, tmp, &pagelist, lru) { + copy_highpage(new_page + (page->index % HPAGE_PMD_NR), + page); + list_del(&page->lru); + unlock_page(page); + page_ref_unfreeze(page, 1); + page->mapping = NULL; + ClearPageActive(page); + ClearPageUnevictable(page); + put_page(page); + } + + local_irq_save(flags); + __inc_zone_page_state(new_page, NR_SHMEM_THPS); + if (nr_none) { + __mod_zone_page_state(zone, NR_FILE_PAGES, nr_none); + __mod_zone_page_state(zone, NR_SHMEM, nr_none); + } + local_irq_restore(flags); + + /* + * Remove pte page tables, so we can re-faulti + * the page as huge. + */ + retract_page_tables(mapping, start); + + /* Everything is ready, let's unfreeze the new_page */ + set_page_dirty(new_page); + SetPageUptodate(new_page); + page_ref_unfreeze(new_page, HPAGE_PMD_NR); + mem_cgroup_commit_charge(new_page, memcg, false, true); + lru_cache_add_anon(new_page); + unlock_page(new_page); + + *hpage = NULL; + } else { + /* Something went wrong: rollback changes to the radix-tree */ + shmem_uncharge(mapping->host, nr_none); + spin_lock_irq(&mapping->tree_lock); + radix_tree_for_each_slot(slot, &mapping->page_tree, &iter, + start) { + if (iter.index >= end) + break; + page = list_first_entry_or_null(&pagelist, + struct page, lru); + if (!page || iter.index < page->index) { + if (!nr_none) + break; + /* Put holes back where they were */ + radix_tree_replace_slot(slot, NULL); + nr_none--; + continue; + } + + VM_BUG_ON_PAGE(page->index != iter.index, page); + + /* Unfreeze the page. */ + list_del(&page->lru); + page_ref_unfreeze(page, 2); + radix_tree_replace_slot(slot, page); + spin_unlock_irq(&mapping->tree_lock); + putback_lru_page(page); + unlock_page(page); + spin_lock_irq(&mapping->tree_lock); + } + VM_BUG_ON(nr_none); + spin_unlock_irq(&mapping->tree_lock); + + /* Unfreeze new_page, caller would take care about freeing it */ + page_ref_unfreeze(new_page, 1); + mem_cgroup_cancel_charge(new_page, memcg, true); + unlock_page(new_page); + new_page->mapping = NULL; + } +out: + VM_BUG_ON(!list_empty(&pagelist)); + /* TODO: tracepoints */ +} + +static void khugepaged_scan_shmem(struct mm_struct *mm, + struct address_space *mapping, + pgoff_t start, struct page **hpage) +{ + struct page *page = NULL; + struct radix_tree_iter iter; + void **slot; + int present, swap; + int node = NUMA_NO_NODE; + int result = SCAN_SUCCEED; + + present = 0; + swap = 0; + memset(khugepaged_node_load, 0, sizeof(khugepaged_node_load)); + rcu_read_lock(); + radix_tree_for_each_slot(slot, &mapping->page_tree, &iter, start) { + if (iter.index >= start + HPAGE_PMD_NR) + break; + + page = radix_tree_deref_slot(slot); + if (radix_tree_deref_retry(page)) { + slot = radix_tree_iter_retry(&iter); + continue; + } + + if (radix_tree_exception(page)) { + if (++swap > khugepaged_max_ptes_swap) { + result = SCAN_EXCEED_SWAP_PTE; + break; + } + continue; + } + + if (PageTransCompound(page)) { + result = SCAN_PAGE_COMPOUND; + break; + } + + node = page_to_nid(page); + if (khugepaged_scan_abort(node)) { + result = SCAN_SCAN_ABORT; + break; + } + khugepaged_node_load[node]++; + + if (!PageLRU(page)) { + result = SCAN_PAGE_LRU; + break; + } + + if (page_count(page) != 1 + page_mapcount(page)) { + result = SCAN_PAGE_COUNT; + break; + } + + /* + * We probably should check if the page is referenced here, but + * nobody would transfer pte_young() to PageReferenced() for us. + * And rmap walk here is just too costly... + */ + + present++; + + if (need_resched()) { + cond_resched_rcu(); + slot = radix_tree_iter_next(&iter); + } + } + rcu_read_unlock(); + + if (result == SCAN_SUCCEED) { + if (present < HPAGE_PMD_NR - khugepaged_max_ptes_none) { + result = SCAN_EXCEED_NONE_PTE; + } else { + node = khugepaged_find_target_node(); + collapse_shmem(mm, mapping, start, hpage, node); + } + } + + /* TODO: tracepoints */ +} +#else +static void khugepaged_scan_shmem(struct mm_struct *mm, + struct address_space *mapping, + pgoff_t start, struct page **hpage) +{ + BUILD_BUG(); +} +#endif + static unsigned int khugepaged_scan_mm_slot(unsigned int pages, struct page **hpage) __releases(&khugepaged_mm_lock) @@ -1269,6 +1681,8 @@ skip: if (khugepaged_scan.address < hstart) khugepaged_scan.address = hstart; VM_BUG_ON(khugepaged_scan.address & ~HPAGE_PMD_MASK); + if (shmem_file(vma->vm_file) && !shmem_huge_enabled(vma)) + goto skip; while (khugepaged_scan.address < hend) { int ret; @@ -1279,9 +1693,20 @@ skip: VM_BUG_ON(khugepaged_scan.address < hstart || khugepaged_scan.address + HPAGE_PMD_SIZE > hend); - ret = khugepaged_scan_pmd(mm, vma, - khugepaged_scan.address, - hpage); + if (shmem_file(vma->vm_file)) { + struct file *file = get_file(vma->vm_file); + pgoff_t pgoff = linear_page_index(vma, + khugepaged_scan.address); + up_read(&mm->mmap_sem); + ret = 1; + khugepaged_scan_shmem(mm, file->f_mapping, + pgoff, hpage); + fput(file); + } else { + ret = khugepaged_scan_pmd(mm, vma, + khugepaged_scan.address, + hpage); + } /* move to next address */ khugepaged_scan.address += HPAGE_PMD_SIZE; progress += HPAGE_PMD_NR; diff --git a/mm/shmem.c b/mm/shmem.c index 03eb915c82eb..905c0e1cf5af 100644 --- a/mm/shmem.c +++ b/mm/shmem.c @@ -32,6 +32,7 @@ #include #include #include +#include static struct vfsmount *shm_mnt; @@ -97,16 +98,6 @@ struct shmem_falloc { pgoff_t nr_unswapped; /* how often writepage refused to swap out */ }; -/* Flag allocation requirements to shmem_getpage */ -enum sgp_type { - SGP_READ, /* don't exceed i_size, don't allocate page */ - SGP_CACHE, /* don't exceed i_size, may allocate page */ - SGP_NOHUGE, /* like SGP_CACHE, but no huge pages */ - SGP_HUGE, /* like SGP_CACHE, huge pages preferred */ - SGP_WRITE, /* may exceed i_size, may allocate !Uptodate page */ - SGP_FALLOC, /* like SGP_WRITE, but make existing page Uptodate */ -}; - #ifdef CONFIG_TMPFS static unsigned long shmem_default_max_blocks(void) { @@ -126,7 +117,7 @@ static int shmem_getpage_gfp(struct inode *inode, pgoff_t index, struct page **pagep, enum sgp_type sgp, gfp_t gfp, struct mm_struct *fault_mm, int *fault_type); -static inline int shmem_getpage(struct inode *inode, pgoff_t index, +int shmem_getpage(struct inode *inode, pgoff_t index, struct page **pagep, enum sgp_type sgp) { return shmem_getpage_gfp(inode, index, pagep, sgp, @@ -1899,6 +1890,11 @@ static int shmem_mmap(struct file *file, struct vm_area_struct *vma) { file_accessed(file); vma->vm_ops = &shmem_vm_ops; + if (IS_ENABLED(CONFIG_TRANSPARENT_HUGEPAGE) && + ((vma->vm_start + ~HPAGE_PMD_MASK) & HPAGE_PMD_MASK) < + (vma->vm_end & HPAGE_PMD_MASK)) { + khugepaged_enter(vma, vma->vm_flags); + } return 0; } @@ -3803,6 +3799,37 @@ static ssize_t shmem_enabled_store(struct kobject *kobj, struct kobj_attribute shmem_enabled_attr = __ATTR(shmem_enabled, 0644, shmem_enabled_show, shmem_enabled_store); + +bool shmem_huge_enabled(struct vm_area_struct *vma) +{ + struct inode *inode = file_inode(vma->vm_file); + struct shmem_sb_info *sbinfo = SHMEM_SB(inode->i_sb); + loff_t i_size; + pgoff_t off; + + if (shmem_huge == SHMEM_HUGE_FORCE) + return true; + if (shmem_huge == SHMEM_HUGE_DENY) + return false; + switch (sbinfo->huge) { + case SHMEM_HUGE_NEVER: + return false; + case SHMEM_HUGE_ALWAYS: + return true; + case SHMEM_HUGE_WITHIN_SIZE: + off = round_up(vma->vm_pgoff, HPAGE_PMD_NR); + i_size = round_up(i_size_read(inode), PAGE_SIZE); + if (i_size >= HPAGE_PMD_SIZE && + i_size >> PAGE_SHIFT >= off) + return true; + case SHMEM_HUGE_ADVISE: + /* TODO: implement fadvise() hints */ + return (vma->vm_flags & VM_HUGEPAGE); + default: + VM_BUG_ON(1); + return false; + } +} #endif /* CONFIG_TRANSPARENT_HUGEPAGE && CONFIG_SYSFS */ #else /* !CONFIG_SHMEM */ @@ -3982,6 +4009,13 @@ int shmem_zero_setup(struct vm_area_struct *vma) fput(vma->vm_file); vma->vm_file = file; vma->vm_ops = &shmem_vm_ops; + + if (IS_ENABLED(CONFIG_TRANSPARENT_HUGEPAGE) && + ((vma->vm_start + ~HPAGE_PMD_MASK) & HPAGE_PMD_MASK) < + (vma->vm_end & HPAGE_PMD_MASK)) { + khugepaged_enter(vma, vma->vm_flags); + } + return 0; } -- cgit v1.2.3 From 0db501f7a34c11d3b964205e5b6d00692a648035 Mon Sep 17 00:00:00 2001 From: Ebru Akagunduz Date: Tue, 26 Jul 2016 15:26:46 -0700 Subject: mm, thp: convert from optimistic swapin collapsing to conservative To detect whether khugepaged swapin is worthwhile, this patch checks the amount of young pages. There should be at least half of HPAGE_PMD_NR to swapin. Link: http://lkml.kernel.org/r/1468109451-1615-1-git-send-email-ebru.akagunduz@gmail.com Signed-off-by: Ebru Akagunduz Suggested-by: Minchan Kim Acked-by: Rik van Riel Cc: Hugh Dickins Cc: Kirill A. Shutemov Cc: Naoya Horiguchi Cc: Andrea Arcangeli Cc: Joonsoo Kim Cc: Cyrill Gorcunov Cc: Mel Gorman Cc: David Rientjes Cc: Vlastimil Babka Cc: Aneesh Kumar K.V Cc: Johannes Weiner Cc: Michal Hocko Cc: Boaz Harrosh Cc: Hillf Danton Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- include/trace/events/huge_memory.h | 19 +++++++++++-------- mm/khugepaged.c | 38 +++++++++++++++++++++++--------------- 2 files changed, 34 insertions(+), 23 deletions(-) (limited to 'include/trace/events') diff --git a/include/trace/events/huge_memory.h b/include/trace/events/huge_memory.h index 830d47d5ca41..04f58acda8e8 100644 --- a/include/trace/events/huge_memory.h +++ b/include/trace/events/huge_memory.h @@ -13,7 +13,7 @@ EM( SCAN_EXCEED_NONE_PTE, "exceed_none_pte") \ EM( SCAN_PTE_NON_PRESENT, "pte_non_present") \ EM( SCAN_PAGE_RO, "no_writable_page") \ - EM( SCAN_NO_REFERENCED_PAGE, "no_referenced_page") \ + EM( SCAN_LACK_REFERENCED_PAGE, "lack_referenced_page") \ EM( SCAN_PAGE_NULL, "page_null") \ EM( SCAN_SCAN_ABORT, "scan_aborted") \ EM( SCAN_PAGE_COUNT, "not_suitable_page_count") \ @@ -47,7 +47,7 @@ SCAN_STATUS TRACE_EVENT(mm_khugepaged_scan_pmd, TP_PROTO(struct mm_struct *mm, struct page *page, bool writable, - bool referenced, int none_or_zero, int status, int unmapped), + int referenced, int none_or_zero, int status, int unmapped), TP_ARGS(mm, page, writable, referenced, none_or_zero, status, unmapped), @@ -55,7 +55,7 @@ TRACE_EVENT(mm_khugepaged_scan_pmd, __field(struct mm_struct *, mm) __field(unsigned long, pfn) __field(bool, writable) - __field(bool, referenced) + __field(int, referenced) __field(int, none_or_zero) __field(int, status) __field(int, unmapped) @@ -108,14 +108,14 @@ TRACE_EVENT(mm_collapse_huge_page, TRACE_EVENT(mm_collapse_huge_page_isolate, TP_PROTO(struct page *page, int none_or_zero, - bool referenced, bool writable, int status), + int referenced, bool writable, int status), TP_ARGS(page, none_or_zero, referenced, writable, status), TP_STRUCT__entry( __field(unsigned long, pfn) __field(int, none_or_zero) - __field(bool, referenced) + __field(int, referenced) __field(bool, writable) __field(int, status) ), @@ -138,25 +138,28 @@ TRACE_EVENT(mm_collapse_huge_page_isolate, TRACE_EVENT(mm_collapse_huge_page_swapin, - TP_PROTO(struct mm_struct *mm, int swapped_in, int ret), + TP_PROTO(struct mm_struct *mm, int swapped_in, int referenced, int ret), - TP_ARGS(mm, swapped_in, ret), + TP_ARGS(mm, swapped_in, referenced, ret), TP_STRUCT__entry( __field(struct mm_struct *, mm) __field(int, swapped_in) + __field(int, referenced) __field(int, ret) ), TP_fast_assign( __entry->mm = mm; __entry->swapped_in = swapped_in; + __entry->referenced = referenced; __entry->ret = ret; ), - TP_printk("mm=%p, swapped_in=%d, ret=%d", + TP_printk("mm=%p, swapped_in=%d, referenced=%d, ret=%d", __entry->mm, __entry->swapped_in, + __entry->referenced, __entry->ret) ); diff --git a/mm/khugepaged.c b/mm/khugepaged.c index 566148489e33..7dbee698d6aa 100644 --- a/mm/khugepaged.c +++ b/mm/khugepaged.c @@ -27,7 +27,7 @@ enum scan_result { SCAN_EXCEED_NONE_PTE, SCAN_PTE_NON_PRESENT, SCAN_PAGE_RO, - SCAN_NO_REFERENCED_PAGE, + SCAN_LACK_REFERENCED_PAGE, SCAN_PAGE_NULL, SCAN_SCAN_ABORT, SCAN_PAGE_COUNT, @@ -500,8 +500,8 @@ static int __collapse_huge_page_isolate(struct vm_area_struct *vma, { struct page *page = NULL; pte_t *_pte; - int none_or_zero = 0, result = 0; - bool referenced = false, writable = false; + int none_or_zero = 0, result = 0, referenced = 0; + bool writable = false; for (_pte = pte; _pte < pte+HPAGE_PMD_NR; _pte++, address += PAGE_SIZE) { @@ -580,11 +580,11 @@ static int __collapse_huge_page_isolate(struct vm_area_struct *vma, VM_BUG_ON_PAGE(!PageLocked(page), page); VM_BUG_ON_PAGE(PageLRU(page), page); - /* If there is no mapped pte young don't collapse the page */ + /* There should be enough young pte to collapse the page */ if (pte_young(pteval) || page_is_young(page) || PageReferenced(page) || mmu_notifier_test_young(vma->vm_mm, address)) - referenced = true; + referenced++; } if (likely(writable)) { if (likely(referenced)) { @@ -869,7 +869,8 @@ static int hugepage_vma_revalidate(struct mm_struct *mm, unsigned long address) static bool __collapse_huge_page_swapin(struct mm_struct *mm, struct vm_area_struct *vma, - unsigned long address, pmd_t *pmd) + unsigned long address, pmd_t *pmd, + int referenced) { pte_t pteval; int swapped_in = 0, ret = 0; @@ -887,12 +888,19 @@ static bool __collapse_huge_page_swapin(struct mm_struct *mm, if (!is_swap_pte(pteval)) continue; swapped_in++; + /* we only decide to swapin, if there is enough young ptes */ + if (referenced < HPAGE_PMD_NR/2) { + trace_mm_collapse_huge_page_swapin(mm, swapped_in, referenced, 0); + return false; + } ret = do_swap_page(&fe, pteval); + /* do_swap_page returns VM_FAULT_RETRY with released mmap_sem */ if (ret & VM_FAULT_RETRY) { down_read(&mm->mmap_sem); if (hugepage_vma_revalidate(mm, address)) { /* vma is no longer available, don't continue to swapin */ + trace_mm_collapse_huge_page_swapin(mm, swapped_in, referenced, 0); return false; } /* check if the pmd is still valid */ @@ -900,7 +908,7 @@ static bool __collapse_huge_page_swapin(struct mm_struct *mm, return false; } if (ret & VM_FAULT_ERROR) { - trace_mm_collapse_huge_page_swapin(mm, swapped_in, 0); + trace_mm_collapse_huge_page_swapin(mm, swapped_in, referenced, 0); return false; } /* pte is unmapped now, we need to map it */ @@ -908,7 +916,7 @@ static bool __collapse_huge_page_swapin(struct mm_struct *mm, } fe.pte--; pte_unmap(fe.pte); - trace_mm_collapse_huge_page_swapin(mm, swapped_in, 1); + trace_mm_collapse_huge_page_swapin(mm, swapped_in, referenced, 1); return true; } @@ -916,7 +924,7 @@ static void collapse_huge_page(struct mm_struct *mm, unsigned long address, struct page **hpage, struct vm_area_struct *vma, - int node) + int node, int referenced) { pmd_t *pmd, _pmd; pte_t *pte; @@ -973,7 +981,7 @@ static void collapse_huge_page(struct mm_struct *mm, * If it fails, we release mmap_sem and jump out_nolock. * Continuing to collapse causes inconsistency. */ - if (!__collapse_huge_page_swapin(mm, vma, address, pmd)) { + if (!__collapse_huge_page_swapin(mm, vma, address, pmd, referenced)) { mem_cgroup_cancel_charge(new_page, memcg, true); up_read(&mm->mmap_sem); goto out_nolock; @@ -1084,12 +1092,12 @@ static int khugepaged_scan_pmd(struct mm_struct *mm, { pmd_t *pmd; pte_t *pte, *_pte; - int ret = 0, none_or_zero = 0, result = 0; + int ret = 0, none_or_zero = 0, result = 0, referenced = 0; struct page *page = NULL; unsigned long _address; spinlock_t *ptl; int node = NUMA_NO_NODE, unmapped = 0; - bool writable = false, referenced = false; + bool writable = false; VM_BUG_ON(address & ~HPAGE_PMD_MASK); @@ -1177,14 +1185,14 @@ static int khugepaged_scan_pmd(struct mm_struct *mm, if (pte_young(pteval) || page_is_young(page) || PageReferenced(page) || mmu_notifier_test_young(vma->vm_mm, address)) - referenced = true; + referenced++; } if (writable) { if (referenced) { result = SCAN_SUCCEED; ret = 1; } else { - result = SCAN_NO_REFERENCED_PAGE; + result = SCAN_LACK_REFERENCED_PAGE; } } else { result = SCAN_PAGE_RO; @@ -1194,7 +1202,7 @@ out_unmap: if (ret) { node = khugepaged_find_target_node(); /* collapse_huge_page will return with the mmap_sem released */ - collapse_huge_page(mm, address, hpage, vma, node); + collapse_huge_page(mm, address, hpage, vma, node, referenced); } out: trace_mm_khugepaged_scan_pmd(mm, page, writable, referenced, -- cgit v1.2.3