summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorAndrea Arcangeli <aarcange@redhat.com>2011-01-13 15:47:16 -0800
committerLinus Torvalds <torvalds@linux-foundation.org>2011-01-13 17:32:47 -0800
commit37c2ac7872a9387542616f658d20ac25f5bdb32e (patch)
treed115915db4a61e261012bf0f9c4cf14630243d71
parent91600e9e592e48736e630851c83da2ad6bf0e91f (diff)
downloadlinux-37c2ac7872a9387542616f658d20ac25f5bdb32e.tar.bz2
thp: compound_trans_order
Read compound_trans_order safe. Noop for CONFIG_TRANSPARENT_HUGEPAGE=n. Signed-off-by: Andrea Arcangeli <aarcange@redhat.com> Cc: Daisuke Nishimura <nishimura@mxp.nes.nec.co.jp> Cc: KAMEZAWA Hiroyuki <kamezawa.hiroyu@jp.fujitsu.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
-rw-r--r--include/linux/mm.h14
-rw-r--r--mm/memcontrol.c12
-rw-r--r--mm/memory-failure.c12
3 files changed, 26 insertions, 12 deletions
diff --git a/include/linux/mm.h b/include/linux/mm.h
index 9c2695beab86..ce97a2bb0b19 100644
--- a/include/linux/mm.h
+++ b/include/linux/mm.h
@@ -450,6 +450,20 @@ static inline int compound_order(struct page *page)
return (unsigned long)page[1].lru.prev;
}
+static inline int compound_trans_order(struct page *page)
+{
+ int order;
+ unsigned long flags;
+
+ if (!PageHead(page))
+ return 0;
+
+ flags = compound_lock_irqsave(page);
+ order = compound_order(page);
+ compound_unlock_irqrestore(page, flags);
+ return order;
+}
+
static inline void set_compound_order(struct page *page, unsigned long order)
{
page[1].lru.prev = (void *)order;
diff --git a/mm/memcontrol.c b/mm/memcontrol.c
index f4ea3410fb4d..741206ffdace 100644
--- a/mm/memcontrol.c
+++ b/mm/memcontrol.c
@@ -1027,10 +1027,6 @@ mem_cgroup_get_reclaim_stat_from_page(struct page *page)
{
struct page_cgroup *pc;
struct mem_cgroup_per_zone *mz;
- int page_size = PAGE_SIZE;
-
- if (PageTransHuge(page))
- page_size <<= compound_order(page);
if (mem_cgroup_disabled())
return NULL;
@@ -2286,8 +2282,10 @@ static int mem_cgroup_charge_common(struct page *page, struct mm_struct *mm,
int ret;
int page_size = PAGE_SIZE;
- if (PageTransHuge(page))
+ if (PageTransHuge(page)) {
page_size <<= compound_order(page);
+ VM_BUG_ON(!PageTransHuge(page));
+ }
pc = lookup_page_cgroup(page);
/* can happen at boot */
@@ -2558,8 +2556,10 @@ __mem_cgroup_uncharge_common(struct page *page, enum charge_type ctype)
if (PageSwapCache(page))
return NULL;
- if (PageTransHuge(page))
+ if (PageTransHuge(page)) {
page_size <<= compound_order(page);
+ VM_BUG_ON(!PageTransHuge(page));
+ }
count = page_size >> PAGE_SHIFT;
/*
diff --git a/mm/memory-failure.c b/mm/memory-failure.c
index 1b43d0ffff65..548fbd70f026 100644
--- a/mm/memory-failure.c
+++ b/mm/memory-failure.c
@@ -203,7 +203,7 @@ static int kill_proc_ao(struct task_struct *t, unsigned long addr, int trapno,
#ifdef __ARCH_SI_TRAPNO
si.si_trapno = trapno;
#endif
- si.si_addr_lsb = compound_order(compound_head(page)) + PAGE_SHIFT;
+ si.si_addr_lsb = compound_trans_order(compound_head(page)) + PAGE_SHIFT;
/*
* Don't use force here, it's convenient if the signal
* can be temporarily blocked.
@@ -930,7 +930,7 @@ static int hwpoison_user_mappings(struct page *p, unsigned long pfn,
static void set_page_hwpoison_huge_page(struct page *hpage)
{
int i;
- int nr_pages = 1 << compound_order(hpage);
+ int nr_pages = 1 << compound_trans_order(hpage);
for (i = 0; i < nr_pages; i++)
SetPageHWPoison(hpage + i);
}
@@ -938,7 +938,7 @@ static void set_page_hwpoison_huge_page(struct page *hpage)
static void clear_page_hwpoison_huge_page(struct page *hpage)
{
int i;
- int nr_pages = 1 << compound_order(hpage);
+ int nr_pages = 1 << compound_trans_order(hpage);
for (i = 0; i < nr_pages; i++)
ClearPageHWPoison(hpage + i);
}
@@ -968,7 +968,7 @@ int __memory_failure(unsigned long pfn, int trapno, int flags)
return 0;
}
- nr_pages = 1 << compound_order(hpage);
+ nr_pages = 1 << compound_trans_order(hpage);
atomic_long_add(nr_pages, &mce_bad_pages);
/*
@@ -1166,7 +1166,7 @@ int unpoison_memory(unsigned long pfn)
return 0;
}
- nr_pages = 1 << compound_order(page);
+ nr_pages = 1 << compound_trans_order(page);
if (!get_page_unless_zero(page)) {
/*
@@ -1304,7 +1304,7 @@ static int soft_offline_huge_page(struct page *page, int flags)
}
done:
if (!PageHWPoison(hpage))
- atomic_long_add(1 << compound_order(hpage), &mce_bad_pages);
+ atomic_long_add(1 << compound_trans_order(hpage), &mce_bad_pages);
set_page_hwpoison_huge_page(hpage);
dequeue_hwpoisoned_huge_page(hpage);
/* keep elevated page count for bad page */