summaryrefslogtreecommitdiffstats
path: root/net/ipv4
diff options
context:
space:
mode:
authorStephen Hemminger <shemminger@vyatta.com>2008-04-10 02:56:38 -0700
committerDavid S. Miller <davem@davemloft.net>2008-04-10 02:56:38 -0700
commit15be75cdb5db442d0e33d37b20832b88f3ccd383 (patch)
tree83fc9261da859ecf3d6dcad29dadc78f481f7d7f /net/ipv4
parent5c06f510a25153ff79e8c2dca312b732a367c5bb (diff)
downloadlinux-15be75cdb5db442d0e33d37b20832b88f3ccd383.tar.bz2
IPV4: fib_trie use vmalloc for large tnodes
Use vmalloc rather than alloc_pages to avoid wasting memory. The problem is that tnode structure has a power of 2 sized array, plus a header. So the current code wastes almost half the memory allocated because it always needs the next bigger size to hold that small header. This is similar to an earlier patch by Eric, but instead of a list and lock, I used a workqueue to handle the fact that vfree can't be done in interrupt context. Signed-off-by: Stephen Hemminger <shemminger@vyatta.com> Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'net/ipv4')
-rw-r--r--net/ipv4/fib_trie.c25
1 files changed, 15 insertions, 10 deletions
diff --git a/net/ipv4/fib_trie.c b/net/ipv4/fib_trie.c
index 9e491e70e855..64a274282042 100644
--- a/net/ipv4/fib_trie.c
+++ b/net/ipv4/fib_trie.c
@@ -122,7 +122,10 @@ struct tnode {
unsigned char bits; /* 2log(KEYLENGTH) bits needed */
unsigned int full_children; /* KEYLENGTH bits needed */
unsigned int empty_children; /* KEYLENGTH bits needed */
- struct rcu_head rcu;
+ union {
+ struct rcu_head rcu;
+ struct work_struct work;
+ };
struct node *child[0];
};
@@ -346,16 +349,16 @@ static inline void free_leaf_info(struct leaf_info *leaf)
static struct tnode *tnode_alloc(size_t size)
{
- struct page *pages;
-
if (size <= PAGE_SIZE)
return kzalloc(size, GFP_KERNEL);
+ else
+ return __vmalloc(size, GFP_KERNEL | __GFP_ZERO, PAGE_KERNEL);
+}
- pages = alloc_pages(GFP_KERNEL|__GFP_ZERO, get_order(size));
- if (!pages)
- return NULL;
-
- return page_address(pages);
+static void __tnode_vfree(struct work_struct *arg)
+{
+ struct tnode *tn = container_of(arg, struct tnode, work);
+ vfree(tn);
}
static void __tnode_free_rcu(struct rcu_head *head)
@@ -366,8 +369,10 @@ static void __tnode_free_rcu(struct rcu_head *head)
if (size <= PAGE_SIZE)
kfree(tn);
- else
- free_pages((unsigned long)tn, get_order(size));
+ else {
+ INIT_WORK(&tn->work, __tnode_vfree);
+ schedule_work(&tn->work);
+ }
}
static inline void tnode_free(struct tnode *tn)