summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorChristoph Lameter <clameter@sgi.com>2006-09-27 01:50:08 -0700
committerLinus Torvalds <torvalds@g5.osdl.org>2006-09-27 08:26:13 -0700
commitd5f541ed6e31518508c688912e7464facf253c87 (patch)
tree028d296306e247ca32ec5db398d365dcd70d26b7
parent765c4507af71c39aba21006bbd3ec809fe9714ff (diff)
downloadlinux-d5f541ed6e31518508c688912e7464facf253c87.tar.bz2
[PATCH] Add node to zone for the NUMA case
Add the node in order to optimize zone_to_nid. Signed-off-by: Christoph Lameter <clameter@sgi.com> Acked-by: Paul Jackson <pj@sgi.com> Signed-off-by: Andrew Morton <akpm@osdl.org> Signed-off-by: Linus Torvalds <torvalds@osdl.org>
-rw-r--r--include/linux/mm.h6
-rw-r--r--include/linux/mmzone.h1
-rw-r--r--mm/page_alloc.c1
3 files changed, 7 insertions, 1 deletions
diff --git a/include/linux/mm.h b/include/linux/mm.h
index 7477fb59c4f2..8e433bbc6e7e 100644
--- a/include/linux/mm.h
+++ b/include/linux/mm.h
@@ -446,7 +446,11 @@ static inline struct zone *page_zone(struct page *page)
static inline unsigned long zone_to_nid(struct zone *zone)
{
- return zone->zone_pgdat->node_id;
+#ifdef CONFIG_NUMA
+ return zone->node;
+#else
+ return 0;
+#endif
}
static inline unsigned long page_to_nid(struct page *page)
diff --git a/include/linux/mmzone.h b/include/linux/mmzone.h
index 562cf7a8f3ee..59855b8718a0 100644
--- a/include/linux/mmzone.h
+++ b/include/linux/mmzone.h
@@ -168,6 +168,7 @@ struct zone {
unsigned long lowmem_reserve[MAX_NR_ZONES];
#ifdef CONFIG_NUMA
+ int node;
/*
* zone reclaim becomes active if more unmapped pages exist.
*/
diff --git a/mm/page_alloc.c b/mm/page_alloc.c
index 4c76188b1681..d0432e44f77d 100644
--- a/mm/page_alloc.c
+++ b/mm/page_alloc.c
@@ -2405,6 +2405,7 @@ static void __meminit free_area_init_core(struct pglist_data *pgdat,
zone->spanned_pages = size;
zone->present_pages = realsize;
#ifdef CONFIG_NUMA
+ zone->node = nid;
zone->min_unmapped_pages = (realsize*sysctl_min_unmapped_ratio)
/ 100;
zone->min_slab_pages = (realsize * sysctl_min_slab_ratio) / 100;