summaryrefslogtreecommitdiffstats
path: root/mm
diff options
context:
space:
mode:
Diffstat (limited to 'mm')
-rw-r--r--mm/hugetlb.c9
-rw-r--r--mm/memory.c2
-rw-r--r--mm/rmap.c3
3 files changed, 9 insertions, 5 deletions
diff --git a/mm/hugetlb.c b/mm/hugetlb.c
index 254ce2b90158..28a2980ee435 100644
--- a/mm/hugetlb.c
+++ b/mm/hugetlb.c
@@ -17,7 +17,7 @@
#include <linux/mutex.h>
#include <linux/bootmem.h>
#include <linux/sysfs.h>
-
+#include <asm/io.h>
#include <asm/page.h>
#include <asm/pgtable.h>
#include <asm/io.h>
@@ -1283,7 +1283,12 @@ module_exit(hugetlb_exit);
static int __init hugetlb_init(void)
{
- BUILD_BUG_ON(HPAGE_SHIFT == 0);
+ /* Some platform decide whether they support huge pages at boot
+ * time. On these, such as powerpc, HPAGE_SHIFT is set to 0 when
+ * there is no such support
+ */
+ if (HPAGE_SHIFT == 0)
+ return 0;
if (!size_to_hstate(default_hstate_size)) {
default_hstate_size = HPAGE_SIZE;
diff --git a/mm/memory.c b/mm/memory.c
index 0e4eea10c7b0..6793b9c68107 100644
--- a/mm/memory.c
+++ b/mm/memory.c
@@ -993,7 +993,6 @@ unsigned long zap_page_range(struct vm_area_struct *vma, unsigned long address,
tlb_finish_mmu(tlb, address, end);
return end;
}
-EXPORT_SYMBOL_GPL(zap_page_range);
/**
* zap_vma_ptes - remove ptes mapping the vma
@@ -1111,7 +1110,6 @@ no_page_table:
}
return page;
}
-EXPORT_SYMBOL_GPL(follow_page);
/* Can we do the FOLL_ANON optimization? */
static inline int use_zero_page(struct vm_area_struct *vma)
diff --git a/mm/rmap.c b/mm/rmap.c
index 99bc3f9cd796..94a5246a3f98 100644
--- a/mm/rmap.c
+++ b/mm/rmap.c
@@ -667,7 +667,8 @@ void page_remove_rmap(struct page *page, struct vm_area_struct *vma)
* Leaving it set also helps swapoff to reinstate ptes
* faster for those pages still in swapcache.
*/
- if (page_test_dirty(page)) {
+ if ((!PageAnon(page) || PageSwapCache(page)) &&
+ page_test_dirty(page)) {
page_clear_dirty(page);
set_page_dirty(page);
}