diff options
author | Linus Torvalds <torvalds@linux-foundation.org> | 2010-05-20 21:04:44 -0700 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2010-05-20 21:04:44 -0700 |
commit | f8965467f366fd18f01feafb5db10512d7b4422c (patch) | |
tree | 3706a9cd779859271ca61b85c63a1bc3f82d626e /net/core/flow.c | |
parent | a26272e5200765691e67d6780e52b32498fdb659 (diff) | |
parent | 2ec8c6bb5d8f3a62a79f463525054bae1e3d4487 (diff) | |
download | linux-f8965467f366fd18f01feafb5db10512d7b4422c.tar.bz2 |
Merge git://git.kernel.org/pub/scm/linux/kernel/git/davem/net-next-2.6
* git://git.kernel.org/pub/scm/linux/kernel/git/davem/net-next-2.6: (1674 commits)
qlcnic: adding co maintainer
ixgbe: add support for active DA cables
ixgbe: dcb, do not tag tc_prio_control frames
ixgbe: fix ixgbe_tx_is_paused logic
ixgbe: always enable vlan strip/insert when DCB is enabled
ixgbe: remove some redundant code in setting FCoE FIP filter
ixgbe: fix wrong offset to fc_frame_header in ixgbe_fcoe_ddp
ixgbe: fix header len when unsplit packet overflows to data buffer
ipv6: Never schedule DAD timer on dead address
ipv6: Use POSTDAD state
ipv6: Use state_lock to protect ifa state
ipv6: Replace inet6_ifaddr->dead with state
cxgb4: notify upper drivers if the device is already up when they load
cxgb4: keep interrupts available when the ports are brought down
cxgb4: fix initial addition of MAC address
cnic: Return SPQ credit to bnx2x after ring setup and shutdown.
cnic: Convert cnic_local_flags to atomic ops.
can: Fix SJA1000 command register writes on SMP systems
bridge: fix build for CONFIG_SYSFS disabled
ARCNET: Limit com20020 PCI ID matches for SOHARD cards
...
Fix up various conflicts with pcmcia tree drivers/net/
{pcmcia/3c589_cs.c, wireless/orinoco/orinoco_cs.c and
wireless/orinoco/spectrum_cs.c} and feature removal
(Documentation/feature-removal-schedule.txt).
Also fix a non-content conflict due to pm_qos_requirement getting
renamed in the PM tree (now pm_qos_request) in net/mac80211/scan.c
Diffstat (limited to 'net/core/flow.c')
-rw-r--r-- | net/core/flow.c | 405 |
1 files changed, 233 insertions, 172 deletions
diff --git a/net/core/flow.c b/net/core/flow.c index 96015871ecea..161900674009 100644 --- a/net/core/flow.c +++ b/net/core/flow.c @@ -26,113 +26,158 @@ #include <linux/security.h> struct flow_cache_entry { - struct flow_cache_entry *next; - u16 family; - u8 dir; - u32 genid; - struct flowi key; - void *object; - atomic_t *object_ref; + union { + struct hlist_node hlist; + struct list_head gc_list; + } u; + u16 family; + u8 dir; + u32 genid; + struct flowi key; + struct flow_cache_object *object; }; -atomic_t flow_cache_genid = ATOMIC_INIT(0); - -static u32 flow_hash_shift; -#define flow_hash_size (1 << flow_hash_shift) -static DEFINE_PER_CPU(struct flow_cache_entry **, flow_tables) = { NULL }; - -#define flow_table(cpu) (per_cpu(flow_tables, cpu)) - -static struct kmem_cache *flow_cachep __read_mostly; +struct flow_cache_percpu { + struct hlist_head *hash_table; + int hash_count; + u32 hash_rnd; + int hash_rnd_recalc; + struct tasklet_struct flush_tasklet; +}; -static int flow_lwm, flow_hwm; +struct flow_flush_info { + struct flow_cache *cache; + atomic_t cpuleft; + struct completion completion; +}; -struct flow_percpu_info { - int hash_rnd_recalc; - u32 hash_rnd; - int count; +struct flow_cache { + u32 hash_shift; + unsigned long order; + struct flow_cache_percpu *percpu; + struct notifier_block hotcpu_notifier; + int low_watermark; + int high_watermark; + struct timer_list rnd_timer; }; -static DEFINE_PER_CPU(struct flow_percpu_info, flow_hash_info) = { 0 }; -#define flow_hash_rnd_recalc(cpu) \ - (per_cpu(flow_hash_info, cpu).hash_rnd_recalc) -#define flow_hash_rnd(cpu) \ - (per_cpu(flow_hash_info, cpu).hash_rnd) -#define flow_count(cpu) \ - (per_cpu(flow_hash_info, cpu).count) +atomic_t flow_cache_genid = ATOMIC_INIT(0); +static struct flow_cache flow_cache_global; +static struct kmem_cache *flow_cachep; -static struct timer_list flow_hash_rnd_timer; +static DEFINE_SPINLOCK(flow_cache_gc_lock); +static LIST_HEAD(flow_cache_gc_list); -#define FLOW_HASH_RND_PERIOD (10 * 60 * HZ) - -struct flow_flush_info { - atomic_t cpuleft; - struct completion completion; -}; -static DEFINE_PER_CPU(struct tasklet_struct, flow_flush_tasklets) = { NULL }; - -#define flow_flush_tasklet(cpu) (&per_cpu(flow_flush_tasklets, cpu)) +#define flow_cache_hash_size(cache) (1 << (cache)->hash_shift) +#define FLOW_HASH_RND_PERIOD (10 * 60 * HZ) static void flow_cache_new_hashrnd(unsigned long arg) { + struct flow_cache *fc = (void *) arg; int i; for_each_possible_cpu(i) - flow_hash_rnd_recalc(i) = 1; + per_cpu_ptr(fc->percpu, i)->hash_rnd_recalc = 1; - flow_hash_rnd_timer.expires = jiffies + FLOW_HASH_RND_PERIOD; - add_timer(&flow_hash_rnd_timer); + fc->rnd_timer.expires = jiffies + FLOW_HASH_RND_PERIOD; + add_timer(&fc->rnd_timer); +} + +static int flow_entry_valid(struct flow_cache_entry *fle) +{ + if (atomic_read(&flow_cache_genid) != fle->genid) + return 0; + if (fle->object && !fle->object->ops->check(fle->object)) + return 0; + return 1; } -static void flow_entry_kill(int cpu, struct flow_cache_entry *fle) +static void flow_entry_kill(struct flow_cache_entry *fle) { if (fle->object) - atomic_dec(fle->object_ref); + fle->object->ops->delete(fle->object); kmem_cache_free(flow_cachep, fle); - flow_count(cpu)--; } -static void __flow_cache_shrink(int cpu, int shrink_to) +static void flow_cache_gc_task(struct work_struct *work) { - struct flow_cache_entry *fle, **flp; - int i; + struct list_head gc_list; + struct flow_cache_entry *fce, *n; - for (i = 0; i < flow_hash_size; i++) { - int k = 0; + INIT_LIST_HEAD(&gc_list); + spin_lock_bh(&flow_cache_gc_lock); + list_splice_tail_init(&flow_cache_gc_list, &gc_list); + spin_unlock_bh(&flow_cache_gc_lock); - flp = &flow_table(cpu)[i]; - while ((fle = *flp) != NULL && k < shrink_to) { - k++; - flp = &fle->next; - } - while ((fle = *flp) != NULL) { - *flp = fle->next; - flow_entry_kill(cpu, fle); - } + list_for_each_entry_safe(fce, n, &gc_list, u.gc_list) + flow_entry_kill(fce); +} +static DECLARE_WORK(flow_cache_gc_work, flow_cache_gc_task); + +static void flow_cache_queue_garbage(struct flow_cache_percpu *fcp, + int deleted, struct list_head *gc_list) +{ + if (deleted) { + fcp->hash_count -= deleted; + spin_lock_bh(&flow_cache_gc_lock); + list_splice_tail(gc_list, &flow_cache_gc_list); + spin_unlock_bh(&flow_cache_gc_lock); + schedule_work(&flow_cache_gc_work); } } -static void flow_cache_shrink(int cpu) +static void __flow_cache_shrink(struct flow_cache *fc, + struct flow_cache_percpu *fcp, + int shrink_to) { - int shrink_to = flow_lwm / flow_hash_size; + struct flow_cache_entry *fle; + struct hlist_node *entry, *tmp; + LIST_HEAD(gc_list); + int i, deleted = 0; + + for (i = 0; i < flow_cache_hash_size(fc); i++) { + int saved = 0; + + hlist_for_each_entry_safe(fle, entry, tmp, + &fcp->hash_table[i], u.hlist) { + if (saved < shrink_to && + flow_entry_valid(fle)) { + saved++; + } else { + deleted++; + hlist_del(&fle->u.hlist); + list_add_tail(&fle->u.gc_list, &gc_list); + } + } + } - __flow_cache_shrink(cpu, shrink_to); + flow_cache_queue_garbage(fcp, deleted, &gc_list); } -static void flow_new_hash_rnd(int cpu) +static void flow_cache_shrink(struct flow_cache *fc, + struct flow_cache_percpu *fcp) { - get_random_bytes(&flow_hash_rnd(cpu), sizeof(u32)); - flow_hash_rnd_recalc(cpu) = 0; + int shrink_to = fc->low_watermark / flow_cache_hash_size(fc); - __flow_cache_shrink(cpu, 0); + __flow_cache_shrink(fc, fcp, shrink_to); } -static u32 flow_hash_code(struct flowi *key, int cpu) +static void flow_new_hash_rnd(struct flow_cache *fc, + struct flow_cache_percpu *fcp) +{ + get_random_bytes(&fcp->hash_rnd, sizeof(u32)); + fcp->hash_rnd_recalc = 0; + __flow_cache_shrink(fc, fcp, 0); +} + +static u32 flow_hash_code(struct flow_cache *fc, + struct flow_cache_percpu *fcp, + struct flowi *key) { u32 *k = (u32 *) key; - return (jhash2(k, (sizeof(*key) / sizeof(u32)), flow_hash_rnd(cpu)) & - (flow_hash_size - 1)); + return (jhash2(k, (sizeof(*key) / sizeof(u32)), fcp->hash_rnd) + & (flow_cache_hash_size(fc) - 1)); } #if (BITS_PER_LONG == 64) @@ -165,114 +210,117 @@ static int flow_key_compare(struct flowi *key1, struct flowi *key2) return 0; } -void *flow_cache_lookup(struct net *net, struct flowi *key, u16 family, u8 dir, - flow_resolve_t resolver) +struct flow_cache_object * +flow_cache_lookup(struct net *net, struct flowi *key, u16 family, u8 dir, + flow_resolve_t resolver, void *ctx) { - struct flow_cache_entry *fle, **head; + struct flow_cache *fc = &flow_cache_global; + struct flow_cache_percpu *fcp; + struct flow_cache_entry *fle, *tfle; + struct hlist_node *entry; + struct flow_cache_object *flo; unsigned int hash; - int cpu; local_bh_disable(); - cpu = smp_processor_id(); + fcp = per_cpu_ptr(fc->percpu, smp_processor_id()); fle = NULL; + flo = NULL; /* Packet really early in init? Making flow_cache_init a * pre-smp initcall would solve this. --RR */ - if (!flow_table(cpu)) + if (!fcp->hash_table) goto nocache; - if (flow_hash_rnd_recalc(cpu)) - flow_new_hash_rnd(cpu); - hash = flow_hash_code(key, cpu); + if (fcp->hash_rnd_recalc) + flow_new_hash_rnd(fc, fcp); - head = &flow_table(cpu)[hash]; - for (fle = *head; fle; fle = fle->next) { - if (fle->family == family && - fle->dir == dir && - flow_key_compare(key, &fle->key) == 0) { - if (fle->genid == atomic_read(&flow_cache_genid)) { - void *ret = fle->object; - - if (ret) - atomic_inc(fle->object_ref); - local_bh_enable(); - - return ret; - } + hash = flow_hash_code(fc, fcp, key); + hlist_for_each_entry(tfle, entry, &fcp->hash_table[hash], u.hlist) { + if (tfle->family == family && + tfle->dir == dir && + flow_key_compare(key, &tfle->key) == 0) { + fle = tfle; break; } } - if (!fle) { - if (flow_count(cpu) > flow_hwm) - flow_cache_shrink(cpu); + if (unlikely(!fle)) { + if (fcp->hash_count > fc->high_watermark) + flow_cache_shrink(fc, fcp); fle = kmem_cache_alloc(flow_cachep, GFP_ATOMIC); if (fle) { - fle->next = *head; - *head = fle; fle->family = family; fle->dir = dir; memcpy(&fle->key, key, sizeof(*key)); fle->object = NULL; - flow_count(cpu)++; + hlist_add_head(&fle->u.hlist, &fcp->hash_table[hash]); + fcp->hash_count++; } + } else if (likely(fle->genid == atomic_read(&flow_cache_genid))) { + flo = fle->object; + if (!flo) + goto ret_object; + flo = flo->ops->get(flo); + if (flo) + goto ret_object; + } else if (fle->object) { + flo = fle->object; + flo->ops->delete(flo); + fle->object = NULL; } nocache: - { - int err; - void *obj; - atomic_t *obj_ref; - - err = resolver(net, key, family, dir, &obj, &obj_ref); - - if (fle && !err) { - fle->genid = atomic_read(&flow_cache_genid); - - if (fle->object) - atomic_dec(fle->object_ref); - - fle->object = obj; - fle->object_ref = obj_ref; - if (obj) - atomic_inc(fle->object_ref); - } - local_bh_enable(); - - if (err) - obj = ERR_PTR(err); - return obj; + flo = NULL; + if (fle) { + flo = fle->object; + fle->object = NULL; } + flo = resolver(net, key, family, dir, flo, ctx); + if (fle) { + fle->genid = atomic_read(&flow_cache_genid); + if (!IS_ERR(flo)) + fle->object = flo; + else + fle->genid--; + } else { + if (flo && !IS_ERR(flo)) + flo->ops->delete(flo); + } +ret_object: + local_bh_enable(); + return flo; } static void flow_cache_flush_tasklet(unsigned long data) { struct flow_flush_info *info = (void *)data; - int i; - int cpu; - - cpu = smp_processor_id(); - for (i = 0; i < flow_hash_size; i++) { - struct flow_cache_entry *fle; - - fle = flow_table(cpu)[i]; - for (; fle; fle = fle->next) { - unsigned genid = atomic_read(&flow_cache_genid); - - if (!fle->object || fle->genid == genid) + struct flow_cache *fc = info->cache; + struct flow_cache_percpu *fcp; + struct flow_cache_entry *fle; + struct hlist_node *entry, *tmp; + LIST_HEAD(gc_list); + int i, deleted = 0; + + fcp = per_cpu_ptr(fc->percpu, smp_processor_id()); + for (i = 0; i < flow_cache_hash_size(fc); i++) { + hlist_for_each_entry_safe(fle, entry, tmp, + &fcp->hash_table[i], u.hlist) { + if (flow_entry_valid(fle)) continue; - fle->object = NULL; - atomic_dec(fle->object_ref); + deleted++; + hlist_del(&fle->u.hlist); + list_add_tail(&fle->u.gc_list, &gc_list); } } + flow_cache_queue_garbage(fcp, deleted, &gc_list); + if (atomic_dec_and_test(&info->cpuleft)) complete(&info->completion); } -static void flow_cache_flush_per_cpu(void *) __attribute__((__unused__)); static void flow_cache_flush_per_cpu(void *data) { struct flow_flush_info *info = data; @@ -280,8 +328,7 @@ static void flow_cache_flush_per_cpu(void *data) struct tasklet_struct *tasklet; cpu = smp_processor_id(); - - tasklet = flow_flush_tasklet(cpu); + tasklet = &per_cpu_ptr(info->cache->percpu, cpu)->flush_tasklet; tasklet->data = (unsigned long)info; tasklet_schedule(tasklet); } @@ -294,6 +341,7 @@ void flow_cache_flush(void) /* Don't want cpus going down or up during this. */ get_online_cpus(); mutex_lock(&flow_flush_sem); + info.cache = &flow_cache_global; atomic_set(&info.cpuleft, num_online_cpus()); init_completion(&info.completion); @@ -307,62 +355,75 @@ void flow_cache_flush(void) put_online_cpus(); } -static void __init flow_cache_cpu_prepare(int cpu) +static void __init flow_cache_cpu_prepare(struct flow_cache *fc, + struct flow_cache_percpu *fcp) { - struct tasklet_struct *tasklet; - unsigned long order; - - for (order = 0; - (PAGE_SIZE << order) < - (sizeof(struct flow_cache_entry *)*flow_hash_size); - order++) - /* NOTHING */; - - flow_table(cpu) = (struct flow_cache_entry **) - __get_free_pages(GFP_KERNEL|__GFP_ZERO, order); - if (!flow_table(cpu)) - panic("NET: failed to allocate flow cache order %lu\n", order); - - flow_hash_rnd_recalc(cpu) = 1; - flow_count(cpu) = 0; - - tasklet = flow_flush_tasklet(cpu); - tasklet_init(tasklet, flow_cache_flush_tasklet, 0); + fcp->hash_table = (struct hlist_head *) + __get_free_pages(GFP_KERNEL|__GFP_ZERO, fc->order); + if (!fcp->hash_table) + panic("NET: failed to allocate flow cache order %lu\n", fc->order); + + fcp->hash_rnd_recalc = 1; + fcp->hash_count = 0; + tasklet_init(&fcp->flush_tasklet, flow_cache_flush_tasklet, 0); } static int flow_cache_cpu(struct notifier_block *nfb, unsigned long action, void *hcpu) { + struct flow_cache *fc = container_of(nfb, struct flow_cache, hotcpu_notifier); + int cpu = (unsigned long) hcpu; + struct flow_cache_percpu *fcp = per_cpu_ptr(fc->percpu, cpu); + if (action == CPU_DEAD || action == CPU_DEAD_FROZEN) - __flow_cache_shrink((unsigned long)hcpu, 0); + __flow_cache_shrink(fc, fcp, 0); return NOTIFY_OK; } -static int __init flow_cache_init(void) +static int flow_cache_init(struct flow_cache *fc) { + unsigned long order; int i; - flow_cachep = kmem_cache_create("flow_cache", - sizeof(struct flow_cache_entry), - 0, SLAB_PANIC, - NULL); - flow_hash_shift = 10; - flow_lwm = 2 * flow_hash_size; - flow_hwm = 4 * flow_hash_size; + fc->hash_shift = 10; + fc->low_watermark = 2 * flow_cache_hash_size(fc); + fc->high_watermark = 4 * flow_cache_hash_size(fc); + + for (order = 0; + (PAGE_SIZE << order) < + (sizeof(struct hlist_head)*flow_cache_hash_size(fc)); + order++) + /* NOTHING */; + fc->order = order; + fc->percpu = alloc_percpu(struct flow_cache_percpu); - setup_timer(&flow_hash_rnd_timer, flow_cache_new_hashrnd, 0); - flow_hash_rnd_timer.expires = jiffies + FLOW_HASH_RND_PERIOD; - add_timer(&flow_hash_rnd_timer); + setup_timer(&fc->rnd_timer, flow_cache_new_hashrnd, + (unsigned long) fc); + fc->rnd_timer.expires = jiffies + FLOW_HASH_RND_PERIOD; + add_timer(&fc->rnd_timer); for_each_possible_cpu(i) - flow_cache_cpu_prepare(i); + flow_cache_cpu_prepare(fc, per_cpu_ptr(fc->percpu, i)); + + fc->hotcpu_notifier = (struct notifier_block){ + .notifier_call = flow_cache_cpu, + }; + register_hotcpu_notifier(&fc->hotcpu_notifier); - hotcpu_notifier(flow_cache_cpu, 0); return 0; } -module_init(flow_cache_init); +static int __init flow_cache_init_global(void) +{ + flow_cachep = kmem_cache_create("flow_cache", + sizeof(struct flow_cache_entry), + 0, SLAB_PANIC, NULL); + + return flow_cache_init(&flow_cache_global); +} + +module_init(flow_cache_init_global); EXPORT_SYMBOL(flow_cache_genid); EXPORT_SYMBOL(flow_cache_lookup); |