summaryrefslogtreecommitdiffstats
path: root/lib/rhashtable.c
diff options
context:
space:
mode:
authorHerbert Xu <herbert@gondor.apana.org.au>2015-03-18 20:01:15 +1100
committerDavid S. Miller <davem@davemloft.net>2015-03-18 12:46:40 -0400
commit6aebd940840a4d3a0a8ffc5883d3892f4bd61e90 (patch)
tree2f3e6778b2105a446ce41a09f7d29c078119d63f /lib/rhashtable.c
parenta61bfa65facebd64403c94ebdab50323ce8942b2 (diff)
downloadlinux-6aebd940840a4d3a0a8ffc5883d3892f4bd61e90.tar.bz2
rhashtable: Remove shift from bucket_table
Keeping both size and shift is silly. We only need one. Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au> Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'lib/rhashtable.c')
-rw-r--r--lib/rhashtable.c5
1 files changed, 2 insertions, 3 deletions
diff --git a/lib/rhashtable.c b/lib/rhashtable.c
index 09a7ada89ade..097400362467 100644
--- a/lib/rhashtable.c
+++ b/lib/rhashtable.c
@@ -162,7 +162,6 @@ static struct bucket_table *bucket_table_alloc(struct rhashtable *ht,
return NULL;
tbl->size = nbuckets;
- tbl->shift = ilog2(nbuckets);
if (alloc_bucket_locks(ht, tbl) < 0) {
bucket_table_free(tbl);
@@ -189,7 +188,7 @@ static bool rht_grow_above_75(const struct rhashtable *ht,
{
/* Expand table when exceeding 75% load */
return atomic_read(&ht->nelems) > (tbl->size / 4 * 3) &&
- (!ht->p.max_shift || tbl->shift < ht->p.max_shift);
+ (!ht->p.max_shift || tbl->size < (1 << ht->p.max_shift));
}
/**
@@ -202,7 +201,7 @@ static bool rht_shrink_below_30(const struct rhashtable *ht,
{
/* Shrink table beneath 30% load */
return atomic_read(&ht->nelems) < (tbl->size * 3 / 10) &&
- tbl->shift > ht->p.min_shift;
+ tbl->size > (1 << ht->p.min_shift);
}
static int rhashtable_rehash_one(struct rhashtable *ht, unsigned old_hash)