summaryrefslogtreecommitdiffstats
path: root/fs
diff options
context:
space:
mode:
authorDave Hansen <dave@linux.vnet.ibm.com>2011-09-20 15:19:41 -0700
committerLinus Torvalds <torvalds@linux-foundation.org>2011-09-21 13:15:44 -0700
commit32ef43848f283e0ef945d3c67e851c143fea3970 (patch)
treedc220a36bd188772896ba27db36b243ae1718385 /fs
parent3200a8aaab0c9ccdc0f59b0dac2d4a47029137fa (diff)
downloadlinux-32ef43848f283e0ef945d3c67e851c143fea3970.tar.bz2
teach /proc/$pid/numa_maps about transparent hugepages
This is modeled after the smaps code. It detects transparent hugepages and then does a single gather_stats() for the page as a whole. This has two benifits: 1. It is more efficient since it does many pages in a single shot. 2. It does not have to break down the huge page. Signed-off-by: Dave Hansen <dave@linux.vnet.ibm.com> Acked-by: Hugh Dickins <hughd@google.com> Acked-by: David Rientjes <rientjes@google.com> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'fs')
-rw-r--r--fs/proc/task_mmu.c20
1 files changed, 20 insertions, 0 deletions
diff --git a/fs/proc/task_mmu.c b/fs/proc/task_mmu.c
index 9dca07e0758d..5afaa58a8630 100644
--- a/fs/proc/task_mmu.c
+++ b/fs/proc/task_mmu.c
@@ -936,6 +936,26 @@ static int gather_pte_stats(pmd_t *pmd, unsigned long addr,
pte_t *pte;
md = walk->private;
+ spin_lock(&walk->mm->page_table_lock);
+ if (pmd_trans_huge(*pmd)) {
+ if (pmd_trans_splitting(*pmd)) {
+ spin_unlock(&walk->mm->page_table_lock);
+ wait_split_huge_page(md->vma->anon_vma, pmd);
+ } else {
+ pte_t huge_pte = *(pte_t *)pmd;
+ struct page *page;
+
+ page = can_gather_numa_stats(huge_pte, md->vma, addr);
+ if (page)
+ gather_stats(page, md, pte_dirty(huge_pte),
+ HPAGE_PMD_SIZE/PAGE_SIZE);
+ spin_unlock(&walk->mm->page_table_lock);
+ return 0;
+ }
+ } else {
+ spin_unlock(&walk->mm->page_table_lock);
+ }
+
orig_pte = pte = pte_offset_map_lock(walk->mm, pmd, addr, &ptl);
do {
struct page *page = can_gather_numa_stats(*pte, md->vma, addr);