summaryrefslogtreecommitdiffstats
path: root/mm/page_alloc.c
diff options
context:
space:
mode:
authorChristoph Lameter <clameter@sgi.com>2007-02-10 01:43:02 -0800
committerLinus Torvalds <torvalds@woody.linux-foundation.org>2007-02-11 10:51:17 -0800
commitd23ad42324cc4378132e51f2fc5c9ba6cbe75182 (patch)
tree6844416befb3988e432e8f422f3a369e2f760d39 /mm/page_alloc.c
parentc878538598d1e7ab41ecc0de8894e34e2fdef630 (diff)
downloadlinux-d23ad42324cc4378132e51f2fc5c9ba6cbe75182.tar.bz2
[PATCH] Use ZVC for free_pages
This is again simplifies some of the VM counter calculations through the use of the ZVC consolidated counters. [michal.k.k.piotrowski@gmail.com: build fix] Signed-off-by: Christoph Lameter <clameter@sgi.com> Signed-off-by: Michal Piotrowski <michal.k.k.piotrowski@gmail.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'mm/page_alloc.c')
-rw-r--r--mm/page_alloc.c37
1 files changed, 13 insertions, 24 deletions
diff --git a/mm/page_alloc.c b/mm/page_alloc.c
index 07c954e53270..ba62d8789f73 100644
--- a/mm/page_alloc.c
+++ b/mm/page_alloc.c
@@ -395,7 +395,7 @@ static inline void __free_one_page(struct page *page,
VM_BUG_ON(page_idx & (order_size - 1));
VM_BUG_ON(bad_range(zone, page));
- zone->free_pages += order_size;
+ __mod_zone_page_state(zone, NR_FREE_PAGES, order_size);
while (order < MAX_ORDER-1) {
unsigned long combined_idx;
struct free_area *area;
@@ -631,7 +631,7 @@ static struct page *__rmqueue(struct zone *zone, unsigned int order)
list_del(&page->lru);
rmv_page_order(page);
area->nr_free--;
- zone->free_pages -= 1UL << order;
+ __mod_zone_page_state(zone, NR_FREE_PAGES, - (1UL << order));
expand(zone, page, order, current_order, area);
return page;
}
@@ -989,7 +989,8 @@ int zone_watermark_ok(struct zone *z, int order, unsigned long mark,
int classzone_idx, int alloc_flags)
{
/* free_pages my go negative - that's OK */
- long min = mark, free_pages = z->free_pages - (1 << order) + 1;
+ long min = mark;
+ long free_pages = zone_page_state(z, NR_FREE_PAGES) - (1 << order) + 1;
int o;
if (alloc_flags & ALLOC_HIGH)
@@ -1444,13 +1445,7 @@ EXPORT_SYMBOL(free_pages);
*/
unsigned int nr_free_pages(void)
{
- unsigned int sum = 0;
- struct zone *zone;
-
- for_each_zone(zone)
- sum += zone->free_pages;
-
- return sum;
+ return global_page_state(NR_FREE_PAGES);
}
EXPORT_SYMBOL(nr_free_pages);
@@ -1458,13 +1453,7 @@ EXPORT_SYMBOL(nr_free_pages);
#ifdef CONFIG_NUMA
unsigned int nr_free_pages_pgdat(pg_data_t *pgdat)
{
- unsigned int sum = 0;
- enum zone_type i;
-
- for (i = 0; i < MAX_NR_ZONES; i++)
- sum += pgdat->node_zones[i].free_pages;
-
- return sum;
+ return node_page_state(pgdat->node_id, NR_FREE_PAGES);
}
#endif
@@ -1514,7 +1503,7 @@ void si_meminfo(struct sysinfo *val)
{
val->totalram = totalram_pages;
val->sharedram = 0;
- val->freeram = nr_free_pages();
+ val->freeram = global_page_state(NR_FREE_PAGES);
val->bufferram = nr_blockdev_pages();
val->totalhigh = totalhigh_pages;
val->freehigh = nr_free_highpages();
@@ -1529,10 +1518,11 @@ void si_meminfo_node(struct sysinfo *val, int nid)
pg_data_t *pgdat = NODE_DATA(nid);
val->totalram = pgdat->node_present_pages;
- val->freeram = nr_free_pages_pgdat(pgdat);
+ val->freeram = node_page_state(nid, NR_FREE_PAGES);
#ifdef CONFIG_HIGHMEM
val->totalhigh = pgdat->node_zones[ZONE_HIGHMEM].present_pages;
- val->freehigh = pgdat->node_zones[ZONE_HIGHMEM].free_pages;
+ val->freehigh = zone_page_state(&pgdat->node_zones[ZONE_HIGHMEM],
+ NR_FREE_PAGES);
#else
val->totalhigh = 0;
val->freehigh = 0;
@@ -1580,13 +1570,13 @@ void show_free_areas(void)
get_zone_counts(&active, &inactive, &free);
printk("Active:%lu inactive:%lu dirty:%lu writeback:%lu unstable:%lu\n"
- " free:%u slab:%lu mapped:%lu pagetables:%lu bounce:%lu\n",
+ " free:%lu slab:%lu mapped:%lu pagetables:%lu bounce:%lu\n",
active,
inactive,
global_page_state(NR_FILE_DIRTY),
global_page_state(NR_WRITEBACK),
global_page_state(NR_UNSTABLE_NFS),
- nr_free_pages(),
+ global_page_state(NR_FREE_PAGES),
global_page_state(NR_SLAB_RECLAIMABLE) +
global_page_state(NR_SLAB_UNRECLAIMABLE),
global_page_state(NR_FILE_MAPPED),
@@ -1612,7 +1602,7 @@ void show_free_areas(void)
" all_unreclaimable? %s"
"\n",
zone->name,
- K(zone->free_pages),
+ K(zone_page_state(zone, NR_FREE_PAGES)),
K(zone->pages_min),
K(zone->pages_low),
K(zone->pages_high),
@@ -2675,7 +2665,6 @@ static void __meminit free_area_init_core(struct pglist_data *pgdat,
spin_lock_init(&zone->lru_lock);
zone_seqlock_init(zone);
zone->zone_pgdat = pgdat;
- zone->free_pages = 0;
zone->prev_priority = DEF_PRIORITY;