summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorKOSAKI Motohiro <kosaki.motohiro@jp.fujitsu.com>2009-01-06 14:39:16 -0800
committerLinus Torvalds <torvalds@linux-foundation.org>2009-01-06 15:59:00 -0800
commit64cdd548ffe26849d4cd113ac640f60606063b14 (patch)
tree9a7a8af9022621d2da3648757b724fcd32b20168
parent1b0bd118862cd9fe9ac2872137a1b8107e83ff9d (diff)
downloadlinux-64cdd548ffe26849d4cd113ac640f60606063b14.tar.bz2
mm: cleanup: remove #ifdef CONFIG_MIGRATION
#ifdef in *.c file decrease source readability a bit. removing is better. This patch doesn't have any functional change. Signed-off-by: KOSAKI Motohiro <kosaki.motohiro@jp.fujitsu.com> Cc: Christoph Lameter <cl@linux-foundation.org> Cc: Mel Gorman <mel@csn.ul.ie> Cc: Lee Schermerhorn <Lee.Schermerhorn@hp.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
-rw-r--r--include/linux/migrate.h4
-rw-r--r--mm/mprotect.c6
-rw-r--r--mm/rmap.c10
3 files changed, 9 insertions, 11 deletions
diff --git a/include/linux/migrate.h b/include/linux/migrate.h
index 3f34005068d4..527602cdea1c 100644
--- a/include/linux/migrate.h
+++ b/include/linux/migrate.h
@@ -7,6 +7,8 @@
typedef struct page *new_page_t(struct page *, unsigned long private, int **);
#ifdef CONFIG_MIGRATION
+#define PAGE_MIGRATION 1
+
extern int putback_lru_pages(struct list_head *l);
extern int migrate_page(struct address_space *,
struct page *, struct page *);
@@ -20,6 +22,8 @@ extern int migrate_vmas(struct mm_struct *mm,
const nodemask_t *from, const nodemask_t *to,
unsigned long flags);
#else
+#define PAGE_MIGRATION 0
+
static inline int putback_lru_pages(struct list_head *l) { return 0; }
static inline int migrate_pages(struct list_head *l, new_page_t x,
unsigned long private) { return -ENOSYS; }
diff --git a/mm/mprotect.c b/mm/mprotect.c
index cfb4c4852062..d0f6e7ce09f1 100644
--- a/mm/mprotect.c
+++ b/mm/mprotect.c
@@ -22,6 +22,7 @@
#include <linux/swap.h>
#include <linux/swapops.h>
#include <linux/mmu_notifier.h>
+#include <linux/migrate.h>
#include <asm/uaccess.h>
#include <asm/pgtable.h>
#include <asm/cacheflush.h>
@@ -59,8 +60,7 @@ static void change_pte_range(struct mm_struct *mm, pmd_t *pmd,
ptent = pte_mkwrite(ptent);
ptep_modify_prot_commit(mm, addr, pte, ptent);
-#ifdef CONFIG_MIGRATION
- } else if (!pte_file(oldpte)) {
+ } else if (PAGE_MIGRATION && !pte_file(oldpte)) {
swp_entry_t entry = pte_to_swp_entry(oldpte);
if (is_write_migration_entry(entry)) {
@@ -72,9 +72,7 @@ static void change_pte_range(struct mm_struct *mm, pmd_t *pmd,
set_pte_at(mm, addr, pte,
swp_entry_to_pte(entry));
}
-#endif
}
-
} while (pte++, addr += PAGE_SIZE, addr != end);
arch_leave_lazy_mmu_mode();
pte_unmap_unlock(pte - 1, ptl);
diff --git a/mm/rmap.c b/mm/rmap.c
index 10993942d6c9..53c56dacd725 100644
--- a/mm/rmap.c
+++ b/mm/rmap.c
@@ -50,6 +50,7 @@
#include <linux/kallsyms.h>
#include <linux/memcontrol.h>
#include <linux/mmu_notifier.h>
+#include <linux/migrate.h>
#include <asm/tlbflush.h>
@@ -818,8 +819,7 @@ static int try_to_unmap_one(struct page *page, struct vm_area_struct *vma,
spin_unlock(&mmlist_lock);
}
dec_mm_counter(mm, anon_rss);
-#ifdef CONFIG_MIGRATION
- } else {
+ } else if (PAGE_MIGRATION) {
/*
* Store the pfn of the page in a special migration
* pte. do_swap_page() will wait until the migration
@@ -827,19 +827,15 @@ static int try_to_unmap_one(struct page *page, struct vm_area_struct *vma,
*/
BUG_ON(!migration);
entry = make_migration_entry(page, pte_write(pteval));
-#endif
}
set_pte_at(mm, address, pte, swp_entry_to_pte(entry));
BUG_ON(pte_file(*pte));
- } else
-#ifdef CONFIG_MIGRATION
- if (migration) {
+ } else if (PAGE_MIGRATION && migration) {
/* Establish migration entry for a file page */
swp_entry_t entry;
entry = make_migration_entry(page, pte_write(pteval));
set_pte_at(mm, address, pte, swp_entry_to_pte(entry));
} else
-#endif
dec_mm_counter(mm, file_rss);