diff options
author | Linus Torvalds <torvalds@linux-foundation.org> | 2015-03-03 15:30:07 -0800 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2015-03-03 15:30:07 -0800 |
commit | 789d7f60cdfdbef510025a2b988bba44cfcc96fe (patch) | |
tree | 68b60928dcf79869b8fd926f516f20500d94dfb9 /lib | |
parent | 13a7a6ac0a11197edcd0f756a035f472b42cdf8b (diff) | |
parent | f4f8e73850589008095b1da3c8c17cf68bd1c62a (diff) | |
download | linux-789d7f60cdfdbef510025a2b988bba44cfcc96fe.tar.bz2 |
Merge git://git.kernel.org/pub/scm/linux/kernel/git/davem/net
Pull networking fixes from David Miller:
1) If an IPVS tunnel is created with a mixed-family destination
address, it cannot be removed. Fix from Alexey Andriyanov.
2) Fix module refcount underflow in netfilter's nft_compat, from Pablo
Neira Ayuso.
3) Generic statistics infrastructure can reference variables sitting on
a released function stack, therefore use dynamic allocation always.
Fix from Ignacy Gawędzki.
4) skb_copy_bits() return value test is inverted in ip_check_defrag().
5) Fix network namespace exit in openvswitch, we have to release all of
the per-net vports. From Pravin B Shelar.
6) Fix signedness bug in CAIF's cfpkt_iterate(), from Dan Carpenter.
7) Fix rhashtable grow/shrink behavior, only expand during inserts and
shrink during deletes. From Daniel Borkmann.
8) Netdevice names with semicolons should never be allowed, because
they serve as a separator. From Matthew Thode.
9) Use {,__}set_current_state() where appropriate, from Fabian
Frederick.
10) Revert byte queue limits support in r8169 driver, it's causing
regressions we can't figure out.
11) tcp_should_expand_sndbuf() erroneously uses tp->packets_out to
measure packets in flight, properly use tcp_packets_in_flight()
instead. From Neal Cardwell.
12) Fix accidental removal of support for bluetooth in CSR based Intel
wireless cards. From Marcel Holtmann.
13) We accidently added a behavioral change between native and compat
tasks, wrt testing the MSG_CMSG_COMPAT bit. Just ignore it if the
user happened to set it in a native binary as that was always the
behavior we had. From Catalin Marinas.
14) Check genlmsg_unicast() return valud in hwsim netlink tx frame
handling, from Bob Copeland.
15) Fix stale ->radar_required setting in mac80211 that can prevent
starting new scans, from Eliad Peller.
16) Fix memory leak in nl80211 monitor, from Johannes Berg.
17) Fix race in TX index handling in xen-netback, from David Vrabel.
18) Don't enable interrupts in amx-xgbe driver until all software et al.
state is ready for the interrupt handler to run. From Thomas
Lendacky.
19) Add missing netlink_ns_capable() checks to rtnl_newlink(), from Eric
W Biederman.
20) The amount of header space needed in macvtap was not calculated
properly, fix it otherwise we splat past the beginning of the
packet. From Eric Dumazet.
21) Fix bcmgenet TCP TX perf regression, from Jaedon Shin.
22) Don't raw initialize or mod timers, use setup_timer() and
mod_timer() instead. From Vaishali Thakkar.
23) Fix software maintained statistics in bcmgenet and systemport
drivers, from Florian Fainelli.
24) DMA descriptor updates in sh_eth need proper memory barriers, from
Ben Hutchings.
25) Don't do UDP Fragmentation Offload on RAW sockets, from Michal
Kubecek.
26) Openvswitch's non-masked set actions aren't constructed properly
into netlink messages, fix from Joe Stringer.
* git://git.kernel.org/pub/scm/linux/kernel/git/davem/net: (116 commits)
openvswitch: Fix serialization of non-masked set actions.
gianfar: Reduce logging noise seen due to phy polling if link is down
ibmveth: Add function to enable live MAC address changes
net: bridge: add compile-time assert for cb struct size
udp: only allow UFO for packets from SOCK_DGRAM sockets
sh_eth: Really fix padding of short frames on TX
Revert "sh_eth: Enable Rx descriptor word 0 shift for r8a7790"
sh_eth: Fix RX recovery on R-Car in case of RX ring underrun
sh_eth: Ensure proper ordering of descriptor active bit write/read
net/mlx4_en: Disbale GRO for incoming loopback/selftest packets
net/mlx4_core: Fix wrong mask and error flow for the update-qp command
net: systemport: fix software maintained statistics
net: bcmgenet: fix software maintained statistics
rxrpc: don't multiply with HZ twice
rxrpc: terminate retrans loop when sending of skb fails
net/hsr: Fix NULL pointer dereference and refcnt bugs when deleting a HSR interface.
net: pasemi: Use setup_timer and mod_timer
net: stmmac: Use setup_timer and mod_timer
net: 8390: axnet_cs: Use setup_timer and mod_timer
net: 8390: pcnet_cs: Use setup_timer and mod_timer
...
Diffstat (limited to 'lib')
-rw-r--r-- | lib/rhashtable.c | 62 | ||||
-rw-r--r-- | lib/test_rhashtable.c | 11 |
2 files changed, 36 insertions, 37 deletions
diff --git a/lib/rhashtable.c b/lib/rhashtable.c index 9cc4c4a90d00..b5344ef4c684 100644 --- a/lib/rhashtable.c +++ b/lib/rhashtable.c @@ -17,6 +17,7 @@ #include <linux/kernel.h> #include <linux/init.h> #include <linux/log2.h> +#include <linux/sched.h> #include <linux/slab.h> #include <linux/vmalloc.h> #include <linux/mm.h> @@ -217,15 +218,15 @@ static void bucket_table_free(const struct bucket_table *tbl) static struct bucket_table *bucket_table_alloc(struct rhashtable *ht, size_t nbuckets) { - struct bucket_table *tbl; + struct bucket_table *tbl = NULL; size_t size; int i; size = sizeof(*tbl) + nbuckets * sizeof(tbl->buckets[0]); - tbl = kzalloc(size, GFP_KERNEL | __GFP_NOWARN); + if (size <= (PAGE_SIZE << PAGE_ALLOC_COSTLY_ORDER)) + tbl = kzalloc(size, GFP_KERNEL | __GFP_NOWARN | __GFP_NORETRY); if (tbl == NULL) tbl = vzalloc(size); - if (tbl == NULL) return NULL; @@ -247,26 +248,24 @@ static struct bucket_table *bucket_table_alloc(struct rhashtable *ht, * @ht: hash table * @new_size: new table size */ -bool rht_grow_above_75(const struct rhashtable *ht, size_t new_size) +static bool rht_grow_above_75(const struct rhashtable *ht, size_t new_size) { /* Expand table when exceeding 75% load */ return atomic_read(&ht->nelems) > (new_size / 4 * 3) && - (ht->p.max_shift && atomic_read(&ht->shift) < ht->p.max_shift); + (!ht->p.max_shift || atomic_read(&ht->shift) < ht->p.max_shift); } -EXPORT_SYMBOL_GPL(rht_grow_above_75); /** * rht_shrink_below_30 - returns true if nelems < 0.3 * table-size * @ht: hash table * @new_size: new table size */ -bool rht_shrink_below_30(const struct rhashtable *ht, size_t new_size) +static bool rht_shrink_below_30(const struct rhashtable *ht, size_t new_size) { /* Shrink table beneath 30% load */ return atomic_read(&ht->nelems) < (new_size * 3 / 10) && (atomic_read(&ht->shift) > ht->p.min_shift); } -EXPORT_SYMBOL_GPL(rht_shrink_below_30); static void lock_buckets(struct bucket_table *new_tbl, struct bucket_table *old_tbl, unsigned int hash) @@ -414,6 +413,7 @@ int rhashtable_expand(struct rhashtable *ht) } } unlock_buckets(new_tbl, old_tbl, new_hash); + cond_resched(); } /* Unzip interleaved hash chains */ @@ -437,6 +437,7 @@ int rhashtable_expand(struct rhashtable *ht) complete = false; unlock_buckets(new_tbl, old_tbl, old_hash); + cond_resched(); } } @@ -495,6 +496,7 @@ int rhashtable_shrink(struct rhashtable *ht) tbl->buckets[new_hash + new_tbl->size]); unlock_buckets(new_tbl, tbl, new_hash); + cond_resched(); } /* Publish the new, valid hash table */ @@ -528,31 +530,19 @@ static void rht_deferred_worker(struct work_struct *work) list_for_each_entry(walker, &ht->walkers, list) walker->resize = true; - if (ht->p.grow_decision && ht->p.grow_decision(ht, tbl->size)) + if (rht_grow_above_75(ht, tbl->size)) rhashtable_expand(ht); - else if (ht->p.shrink_decision && ht->p.shrink_decision(ht, tbl->size)) + else if (rht_shrink_below_30(ht, tbl->size)) rhashtable_shrink(ht); - unlock: mutex_unlock(&ht->mutex); } -static void rhashtable_wakeup_worker(struct rhashtable *ht) -{ - struct bucket_table *tbl = rht_dereference_rcu(ht->tbl, ht); - struct bucket_table *new_tbl = rht_dereference_rcu(ht->future_tbl, ht); - size_t size = tbl->size; - - /* Only adjust the table if no resizing is currently in progress. */ - if (tbl == new_tbl && - ((ht->p.grow_decision && ht->p.grow_decision(ht, size)) || - (ht->p.shrink_decision && ht->p.shrink_decision(ht, size)))) - schedule_work(&ht->run_work); -} - static void __rhashtable_insert(struct rhashtable *ht, struct rhash_head *obj, - struct bucket_table *tbl, u32 hash) + struct bucket_table *tbl, + const struct bucket_table *old_tbl, u32 hash) { + bool no_resize_running = tbl == old_tbl; struct rhash_head *head; hash = rht_bucket_index(tbl, hash); @@ -568,8 +558,8 @@ static void __rhashtable_insert(struct rhashtable *ht, struct rhash_head *obj, rcu_assign_pointer(tbl->buckets[hash], obj); atomic_inc(&ht->nelems); - - rhashtable_wakeup_worker(ht); + if (no_resize_running && rht_grow_above_75(ht, tbl->size)) + schedule_work(&ht->run_work); } /** @@ -599,7 +589,7 @@ void rhashtable_insert(struct rhashtable *ht, struct rhash_head *obj) hash = obj_raw_hashfn(ht, rht_obj(ht, obj)); lock_buckets(tbl, old_tbl, hash); - __rhashtable_insert(ht, obj, tbl, hash); + __rhashtable_insert(ht, obj, tbl, old_tbl, hash); unlock_buckets(tbl, old_tbl, hash); rcu_read_unlock(); @@ -681,8 +671,11 @@ found: unlock_buckets(new_tbl, old_tbl, new_hash); if (ret) { + bool no_resize_running = new_tbl == old_tbl; + atomic_dec(&ht->nelems); - rhashtable_wakeup_worker(ht); + if (no_resize_running && rht_shrink_below_30(ht, new_tbl->size)) + schedule_work(&ht->run_work); } rcu_read_unlock(); @@ -852,7 +845,7 @@ bool rhashtable_lookup_compare_insert(struct rhashtable *ht, goto exit; } - __rhashtable_insert(ht, obj, new_tbl, new_hash); + __rhashtable_insert(ht, obj, new_tbl, old_tbl, new_hash); exit: unlock_buckets(new_tbl, old_tbl, new_hash); @@ -894,6 +887,9 @@ int rhashtable_walk_init(struct rhashtable *ht, struct rhashtable_iter *iter) if (!iter->walker) return -ENOMEM; + INIT_LIST_HEAD(&iter->walker->list); + iter->walker->resize = false; + mutex_lock(&ht->mutex); list_add(&iter->walker->list, &ht->walkers); mutex_unlock(&ht->mutex); @@ -1111,8 +1107,7 @@ int rhashtable_init(struct rhashtable *ht, struct rhashtable_params *params) if (!ht->p.hash_rnd) get_random_bytes(&ht->p.hash_rnd, sizeof(ht->p.hash_rnd)); - if (ht->p.grow_decision || ht->p.shrink_decision) - INIT_WORK(&ht->run_work, rht_deferred_worker); + INIT_WORK(&ht->run_work, rht_deferred_worker); return 0; } @@ -1130,8 +1125,7 @@ void rhashtable_destroy(struct rhashtable *ht) { ht->being_destroyed = true; - if (ht->p.grow_decision || ht->p.shrink_decision) - cancel_work_sync(&ht->run_work); + cancel_work_sync(&ht->run_work); mutex_lock(&ht->mutex); bucket_table_free(rht_dereference(ht->tbl, ht)); diff --git a/lib/test_rhashtable.c b/lib/test_rhashtable.c index 1dfeba73fc74..67c7593d1dd6 100644 --- a/lib/test_rhashtable.c +++ b/lib/test_rhashtable.c @@ -191,18 +191,18 @@ error: return err; } +static struct rhashtable ht; + static int __init test_rht_init(void) { - struct rhashtable ht; struct rhashtable_params params = { .nelem_hint = TEST_HT_SIZE, .head_offset = offsetof(struct test_obj, node), .key_offset = offsetof(struct test_obj, value), .key_len = sizeof(int), .hashfn = jhash, + .max_shift = 1, /* we expand/shrink manually here */ .nulls_base = (3U << RHT_BASE_SHIFT), - .grow_decision = rht_grow_above_75, - .shrink_decision = rht_shrink_below_30, }; int err; @@ -222,6 +222,11 @@ static int __init test_rht_init(void) return err; } +static void __exit test_rht_exit(void) +{ +} + module_init(test_rht_init); +module_exit(test_rht_exit); MODULE_LICENSE("GPL v2"); |