summaryrefslogtreecommitdiffstats
path: root/net
diff options
context:
space:
mode:
authorJan Beulich <JBeulich@novell.com>2009-09-21 17:03:05 -0700
committerLinus Torvalds <torvalds@linux-foundation.org>2009-09-22 07:17:38 -0700
commit4481374ce88ba8f460c8b89f2572027bd27057d0 (patch)
tree6896601b6a1da0e3e932ffa75fcff966c834c02c /net
parent4738e1b9cf8f9e28d7de080a5e6ce5d0095ea18f (diff)
downloadlinux-4481374ce88ba8f460c8b89f2572027bd27057d0.tar.bz2
mm: replace various uses of num_physpages by totalram_pages
Sizing of memory allocations shouldn't depend on the number of physical pages found in a system, as that generally includes (perhaps a huge amount of) non-RAM pages. The amount of what actually is usable as storage should instead be used as a basis here. Some of the calculations (i.e. those not intending to use high memory) should likely even use (totalram_pages - totalhigh_pages). Signed-off-by: Jan Beulich <jbeulich@novell.com> Acked-by: Rusty Russell <rusty@rustcorp.com.au> Acked-by: Ingo Molnar <mingo@elte.hu> Cc: Dave Airlie <airlied@linux.ie> Cc: Kyle McMartin <kyle@mcmartin.ca> Cc: Jeremy Fitzhardinge <jeremy@goop.org> Cc: Pekka Enberg <penberg@cs.helsinki.fi> Cc: Hugh Dickins <hugh.dickins@tiscali.co.uk> Cc: "David S. Miller" <davem@davemloft.net> Cc: Patrick McHardy <kaber@trash.net> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'net')
-rw-r--r--net/core/sock.c4
-rw-r--r--net/dccp/proto.c6
-rw-r--r--net/decnet/dn_route.c2
-rw-r--r--net/ipv4/route.c2
-rw-r--r--net/ipv4/tcp.c4
-rw-r--r--net/netfilter/nf_conntrack_core.c4
-rw-r--r--net/netfilter/x_tables.c2
-rw-r--r--net/netfilter/xt_hashlimit.c8
-rw-r--r--net/netlink/af_netlink.c6
-rw-r--r--net/sctp/protocol.c6
10 files changed, 22 insertions, 22 deletions
diff --git a/net/core/sock.c b/net/core/sock.c
index 30d5446512f9..524712a7b154 100644
--- a/net/core/sock.c
+++ b/net/core/sock.c
@@ -1206,12 +1206,12 @@ EXPORT_SYMBOL_GPL(sk_setup_caps);
void __init sk_init(void)
{
- if (num_physpages <= 4096) {
+ if (totalram_pages <= 4096) {
sysctl_wmem_max = 32767;
sysctl_rmem_max = 32767;
sysctl_wmem_default = 32767;
sysctl_rmem_default = 32767;
- } else if (num_physpages >= 131072) {
+ } else if (totalram_pages >= 131072) {
sysctl_wmem_max = 131071;
sysctl_rmem_max = 131071;
}
diff --git a/net/dccp/proto.c b/net/dccp/proto.c
index 923db06c7e55..bc4467082a00 100644
--- a/net/dccp/proto.c
+++ b/net/dccp/proto.c
@@ -1049,10 +1049,10 @@ static int __init dccp_init(void)
*
* The methodology is similar to that of the buffer cache.
*/
- if (num_physpages >= (128 * 1024))
- goal = num_physpages >> (21 - PAGE_SHIFT);
+ if (totalram_pages >= (128 * 1024))
+ goal = totalram_pages >> (21 - PAGE_SHIFT);
else
- goal = num_physpages >> (23 - PAGE_SHIFT);
+ goal = totalram_pages >> (23 - PAGE_SHIFT);
if (thash_entries)
goal = (thash_entries *
diff --git a/net/decnet/dn_route.c b/net/decnet/dn_route.c
index 9383d3e5a1ab..57662cabaf9b 100644
--- a/net/decnet/dn_route.c
+++ b/net/decnet/dn_route.c
@@ -1750,7 +1750,7 @@ void __init dn_route_init(void)
dn_route_timer.expires = jiffies + decnet_dst_gc_interval * HZ;
add_timer(&dn_route_timer);
- goal = num_physpages >> (26 - PAGE_SHIFT);
+ goal = totalram_pages >> (26 - PAGE_SHIFT);
for(order = 0; (1UL << order) < goal; order++)
/* NOTHING */;
diff --git a/net/ipv4/route.c b/net/ipv4/route.c
index 91867d3e6328..df9347314538 100644
--- a/net/ipv4/route.c
+++ b/net/ipv4/route.c
@@ -3414,7 +3414,7 @@ int __init ip_rt_init(void)
alloc_large_system_hash("IP route cache",
sizeof(struct rt_hash_bucket),
rhash_entries,
- (num_physpages >= 128 * 1024) ?
+ (totalram_pages >= 128 * 1024) ?
15 : 17,
0,
&rt_hash_log,
diff --git a/net/ipv4/tcp.c b/net/ipv4/tcp.c
index 19a0612b8a20..21387ebabf00 100644
--- a/net/ipv4/tcp.c
+++ b/net/ipv4/tcp.c
@@ -2862,7 +2862,7 @@ void __init tcp_init(void)
alloc_large_system_hash("TCP established",
sizeof(struct inet_ehash_bucket),
thash_entries,
- (num_physpages >= 128 * 1024) ?
+ (totalram_pages >= 128 * 1024) ?
13 : 15,
0,
&tcp_hashinfo.ehash_size,
@@ -2879,7 +2879,7 @@ void __init tcp_init(void)
alloc_large_system_hash("TCP bind",
sizeof(struct inet_bind_hashbucket),
tcp_hashinfo.ehash_size,
- (num_physpages >= 128 * 1024) ?
+ (totalram_pages >= 128 * 1024) ?
13 : 15,
0,
&tcp_hashinfo.bhash_size,
diff --git a/net/netfilter/nf_conntrack_core.c b/net/netfilter/nf_conntrack_core.c
index b37109817a98..7c9ec3dee96e 100644
--- a/net/netfilter/nf_conntrack_core.c
+++ b/net/netfilter/nf_conntrack_core.c
@@ -1245,9 +1245,9 @@ static int nf_conntrack_init_init_net(void)
* machine has 512 buckets. >= 1GB machines have 16384 buckets. */
if (!nf_conntrack_htable_size) {
nf_conntrack_htable_size
- = (((num_physpages << PAGE_SHIFT) / 16384)
+ = (((totalram_pages << PAGE_SHIFT) / 16384)
/ sizeof(struct hlist_head));
- if (num_physpages > (1024 * 1024 * 1024 / PAGE_SIZE))
+ if (totalram_pages > (1024 * 1024 * 1024 / PAGE_SIZE))
nf_conntrack_htable_size = 16384;
if (nf_conntrack_htable_size < 32)
nf_conntrack_htable_size = 32;
diff --git a/net/netfilter/x_tables.c b/net/netfilter/x_tables.c
index a6ac83a93348..f01955cce314 100644
--- a/net/netfilter/x_tables.c
+++ b/net/netfilter/x_tables.c
@@ -617,7 +617,7 @@ struct xt_table_info *xt_alloc_table_info(unsigned int size)
int cpu;
/* Pedantry: prevent them from hitting BUG() in vmalloc.c --RR */
- if ((SMP_ALIGN(size) >> PAGE_SHIFT) + 2 > num_physpages)
+ if ((SMP_ALIGN(size) >> PAGE_SHIFT) + 2 > totalram_pages)
return NULL;
newinfo = kzalloc(XT_TABLE_INFO_SZ, GFP_KERNEL);
diff --git a/net/netfilter/xt_hashlimit.c b/net/netfilter/xt_hashlimit.c
index 219dcdbe388c..dd16e404424f 100644
--- a/net/netfilter/xt_hashlimit.c
+++ b/net/netfilter/xt_hashlimit.c
@@ -194,9 +194,9 @@ static int htable_create_v0(struct xt_hashlimit_info *minfo, u_int8_t family)
if (minfo->cfg.size)
size = minfo->cfg.size;
else {
- size = ((num_physpages << PAGE_SHIFT) / 16384) /
+ size = ((totalram_pages << PAGE_SHIFT) / 16384) /
sizeof(struct list_head);
- if (num_physpages > (1024 * 1024 * 1024 / PAGE_SIZE))
+ if (totalram_pages > (1024 * 1024 * 1024 / PAGE_SIZE))
size = 8192;
if (size < 16)
size = 16;
@@ -266,9 +266,9 @@ static int htable_create(struct xt_hashlimit_mtinfo1 *minfo, u_int8_t family)
if (minfo->cfg.size) {
size = minfo->cfg.size;
} else {
- size = (num_physpages << PAGE_SHIFT) / 16384 /
+ size = (totalram_pages << PAGE_SHIFT) / 16384 /
sizeof(struct list_head);
- if (num_physpages > 1024 * 1024 * 1024 / PAGE_SIZE)
+ if (totalram_pages > 1024 * 1024 * 1024 / PAGE_SIZE)
size = 8192;
if (size < 16)
size = 16;
diff --git a/net/netlink/af_netlink.c b/net/netlink/af_netlink.c
index c5aab6a368ce..55180b99562a 100644
--- a/net/netlink/af_netlink.c
+++ b/net/netlink/af_netlink.c
@@ -2091,10 +2091,10 @@ static int __init netlink_proto_init(void)
if (!nl_table)
goto panic;
- if (num_physpages >= (128 * 1024))
- limit = num_physpages >> (21 - PAGE_SHIFT);
+ if (totalram_pages >= (128 * 1024))
+ limit = totalram_pages >> (21 - PAGE_SHIFT);
else
- limit = num_physpages >> (23 - PAGE_SHIFT);
+ limit = totalram_pages >> (23 - PAGE_SHIFT);
order = get_bitmask_order(limit) - 1 + PAGE_SHIFT;
limit = (1UL << order) / sizeof(struct hlist_head);
diff --git a/net/sctp/protocol.c b/net/sctp/protocol.c
index c557f1fb1c66..612dc878e05c 100644
--- a/net/sctp/protocol.c
+++ b/net/sctp/protocol.c
@@ -1184,10 +1184,10 @@ SCTP_STATIC __init int sctp_init(void)
/* Size and allocate the association hash table.
* The methodology is similar to that of the tcp hash tables.
*/
- if (num_physpages >= (128 * 1024))
- goal = num_physpages >> (22 - PAGE_SHIFT);
+ if (totalram_pages >= (128 * 1024))
+ goal = totalram_pages >> (22 - PAGE_SHIFT);
else
- goal = num_physpages >> (24 - PAGE_SHIFT);
+ goal = totalram_pages >> (24 - PAGE_SHIFT);
for (order = 0; (1UL << order) < goal; order++)
;