summaryrefslogtreecommitdiffstats
path: root/mm
diff options
context:
space:
mode:
authorJames Morris <jmorris@namei.org>2009-05-22 18:40:59 +1000
committerJames Morris <jmorris@namei.org>2009-05-22 18:40:59 +1000
commit2c9e703c618106f5383226fbb1f526cb11034f8a (patch)
tree87d7548001ea82f655fede0640466fc16aabcdf7 /mm
parent6470c077cae12227318f40f3e6d756caadcce4b0 (diff)
parent5805977e63a36ad56594a623f3bd2bebcb7db233 (diff)
downloadlinux-2c9e703c618106f5383226fbb1f526cb11034f8a.tar.bz2
Merge branch 'master' into next
Conflicts: fs/exec.c Removed IMA changes (the IMA checks are now performed via may_open()). Signed-off-by: James Morris <jmorris@namei.org>
Diffstat (limited to 'mm')
-rw-r--r--mm/madvise.c8
-rw-r--r--mm/mmzone.c15
-rw-r--r--mm/page-writeback.c6
-rw-r--r--mm/pdflush.c31
-rw-r--r--mm/slob.c5
-rw-r--r--mm/slub.c6
-rw-r--r--mm/vmstat.c19
7 files changed, 43 insertions, 47 deletions
diff --git a/mm/madvise.c b/mm/madvise.c
index 36d6ea2b6340..b9ce574827c8 100644
--- a/mm/madvise.c
+++ b/mm/madvise.c
@@ -112,14 +112,6 @@ static long madvise_willneed(struct vm_area_struct * vma,
if (!file)
return -EBADF;
- /*
- * Page cache readahead assumes page cache pages are order-0 which
- * is not the case for hugetlbfs. Do not give a bad return value
- * but ignore the advice.
- */
- if (vma->vm_flags & VM_HUGETLB)
- return 0;
-
if (file->f_mapping->a_ops->get_xip_mem) {
/* no bad return value, but ignore advice */
return 0;
diff --git a/mm/mmzone.c b/mm/mmzone.c
index 16ce8b955dcf..f5b7d1760213 100644
--- a/mm/mmzone.c
+++ b/mm/mmzone.c
@@ -6,6 +6,7 @@
#include <linux/stddef.h>
+#include <linux/mm.h>
#include <linux/mmzone.h>
#include <linux/module.h>
@@ -72,3 +73,17 @@ struct zoneref *next_zones_zonelist(struct zoneref *z,
*zone = zonelist_zone(z);
return z;
}
+
+#ifdef CONFIG_ARCH_HAS_HOLES_MEMORYMODEL
+int memmap_valid_within(unsigned long pfn,
+ struct page *page, struct zone *zone)
+{
+ if (page_to_pfn(page) != pfn)
+ return 0;
+
+ if (page_zone(page) != zone)
+ return 0;
+
+ return 1;
+}
+#endif /* CONFIG_ARCH_HAS_HOLES_MEMORYMODEL */
diff --git a/mm/page-writeback.c b/mm/page-writeback.c
index 30351f0063ac..bb553c3e955d 100644
--- a/mm/page-writeback.c
+++ b/mm/page-writeback.c
@@ -94,12 +94,12 @@ unsigned long vm_dirty_bytes;
/*
* The interval between `kupdate'-style writebacks
*/
-unsigned int dirty_writeback_interval = 5 * 100; /* sentiseconds */
+unsigned int dirty_writeback_interval = 5 * 100; /* centiseconds */
/*
* The longest time for which data is allowed to remain dirty
*/
-unsigned int dirty_expire_interval = 30 * 100; /* sentiseconds */
+unsigned int dirty_expire_interval = 30 * 100; /* centiseconds */
/*
* Flag that makes the machine dump writes/reads and block dirtyings.
@@ -770,7 +770,7 @@ static void wb_kupdate(unsigned long arg)
sync_supers();
- oldest_jif = jiffies - msecs_to_jiffies(dirty_expire_interval);
+ oldest_jif = jiffies - msecs_to_jiffies(dirty_expire_interval * 10);
start_jif = jiffies;
next_jif = start_jif + msecs_to_jiffies(dirty_writeback_interval * 10);
nr_to_write = global_page_state(NR_FILE_DIRTY) +
diff --git a/mm/pdflush.c b/mm/pdflush.c
index f2caf96993f8..235ac440c44e 100644
--- a/mm/pdflush.c
+++ b/mm/pdflush.c
@@ -58,14 +58,6 @@ static DEFINE_SPINLOCK(pdflush_lock);
int nr_pdflush_threads = 0;
/*
- * The max/min number of pdflush threads. R/W by sysctl at
- * /proc/sys/vm/nr_pdflush_threads_max/min
- */
-int nr_pdflush_threads_max __read_mostly = MAX_PDFLUSH_THREADS;
-int nr_pdflush_threads_min __read_mostly = MIN_PDFLUSH_THREADS;
-
-
-/*
* The time at which the pdflush thread pool last went empty
*/
static unsigned long last_empty_jifs;
@@ -76,7 +68,7 @@ static unsigned long last_empty_jifs;
* Thread pool management algorithm:
*
* - The minimum and maximum number of pdflush instances are bound
- * by nr_pdflush_threads_min and nr_pdflush_threads_max.
+ * by MIN_PDFLUSH_THREADS and MAX_PDFLUSH_THREADS.
*
* - If there have been no idle pdflush instances for 1 second, create
* a new one.
@@ -142,13 +134,14 @@ static int __pdflush(struct pdflush_work *my_work)
* To throttle creation, we reset last_empty_jifs.
*/
if (time_after(jiffies, last_empty_jifs + 1 * HZ)) {
- if (list_empty(&pdflush_list) &&
- nr_pdflush_threads < nr_pdflush_threads_max) {
- last_empty_jifs = jiffies;
- nr_pdflush_threads++;
- spin_unlock_irq(&pdflush_lock);
- start_one_pdflush_thread();
- spin_lock_irq(&pdflush_lock);
+ if (list_empty(&pdflush_list)) {
+ if (nr_pdflush_threads < MAX_PDFLUSH_THREADS) {
+ last_empty_jifs = jiffies;
+ nr_pdflush_threads++;
+ spin_unlock_irq(&pdflush_lock);
+ start_one_pdflush_thread();
+ spin_lock_irq(&pdflush_lock);
+ }
}
}
@@ -160,7 +153,7 @@ static int __pdflush(struct pdflush_work *my_work)
*/
if (list_empty(&pdflush_list))
continue;
- if (nr_pdflush_threads <= nr_pdflush_threads_min)
+ if (nr_pdflush_threads <= MIN_PDFLUSH_THREADS)
continue;
pdf = list_entry(pdflush_list.prev, struct pdflush_work, list);
if (time_after(jiffies, pdf->when_i_went_to_sleep + 1 * HZ)) {
@@ -266,9 +259,9 @@ static int __init pdflush_init(void)
* Pre-set nr_pdflush_threads... If we fail to create,
* the count will be decremented.
*/
- nr_pdflush_threads = nr_pdflush_threads_min;
+ nr_pdflush_threads = MIN_PDFLUSH_THREADS;
- for (i = 0; i < nr_pdflush_threads_min; i++)
+ for (i = 0; i < MIN_PDFLUSH_THREADS; i++)
start_one_pdflush_thread();
return 0;
}
diff --git a/mm/slob.c b/mm/slob.c
index a2d4ab32198d..f92e66d558bd 100644
--- a/mm/slob.c
+++ b/mm/slob.c
@@ -60,6 +60,7 @@
#include <linux/kernel.h>
#include <linux/slab.h>
#include <linux/mm.h>
+#include <linux/swap.h> /* struct reclaim_state */
#include <linux/cache.h>
#include <linux/init.h>
#include <linux/module.h>
@@ -255,6 +256,8 @@ static void *slob_new_pages(gfp_t gfp, int order, int node)
static void slob_free_pages(void *b, int order)
{
+ if (current->reclaim_state)
+ current->reclaim_state->reclaimed_slab += 1 << order;
free_pages((unsigned long)b, order);
}
@@ -407,7 +410,7 @@ static void slob_free(void *block, int size)
spin_unlock_irqrestore(&slob_lock, flags);
clear_slob_page(sp);
free_slob_page(sp);
- free_page((unsigned long)b);
+ slob_free_pages(b, 0);
return;
}
diff --git a/mm/slub.c b/mm/slub.c
index 7ab54ecbd3f3..65ffda5934b0 100644
--- a/mm/slub.c
+++ b/mm/slub.c
@@ -9,6 +9,7 @@
*/
#include <linux/mm.h>
+#include <linux/swap.h> /* struct reclaim_state */
#include <linux/module.h>
#include <linux/bit_spinlock.h>
#include <linux/interrupt.h>
@@ -1170,6 +1171,8 @@ static void __free_slab(struct kmem_cache *s, struct page *page)
__ClearPageSlab(page);
reset_page_mapcount(page);
+ if (current->reclaim_state)
+ current->reclaim_state->reclaimed_slab += pages;
__free_pages(page, order);
}
@@ -1909,7 +1912,7 @@ static inline int calculate_order(int size)
* Doh this slab cannot be placed using slub_max_order.
*/
order = slab_order(size, 1, MAX_ORDER, 1);
- if (order <= MAX_ORDER)
+ if (order < MAX_ORDER)
return order;
return -ENOSYS;
}
@@ -2522,6 +2525,7 @@ __setup("slub_min_order=", setup_slub_min_order);
static int __init setup_slub_max_order(char *str)
{
get_option(&str, &slub_max_order);
+ slub_max_order = min(slub_max_order, MAX_ORDER - 1);
return 1;
}
diff --git a/mm/vmstat.c b/mm/vmstat.c
index 66f6130976cb..74d66dba0cbe 100644
--- a/mm/vmstat.c
+++ b/mm/vmstat.c
@@ -509,22 +509,11 @@ static void pagetypeinfo_showblockcount_print(struct seq_file *m,
continue;
page = pfn_to_page(pfn);
-#ifdef CONFIG_ARCH_FLATMEM_HAS_HOLES
- /*
- * Ordinarily, memory holes in flatmem still have a valid
- * memmap for the PFN range. However, an architecture for
- * embedded systems (e.g. ARM) can free up the memmap backing
- * holes to save memory on the assumption the memmap is
- * never used. The page_zone linkages are then broken even
- * though pfn_valid() returns true. Skip the page if the
- * linkages are broken. Even if this test passed, the impact
- * is that the counters for the movable type are off but
- * fragmentation monitoring is likely meaningless on small
- * systems.
- */
- if (page_zone(page) != zone)
+
+ /* Watch for unexpected holes punched in the memmap */
+ if (!memmap_valid_within(pfn, page, zone))
continue;
-#endif
+
mtype = get_pageblock_migratetype(page);
if (mtype < MIGRATE_TYPES)