summaryrefslogtreecommitdiffstats
path: root/net
diff options
context:
space:
mode:
authorEric Dumazet <dada1@cosmosbay.com>2007-12-21 01:49:07 -0800
committerDavid S. Miller <davem@davemloft.net>2008-01-28 14:59:57 -0800
commitb790cedd24a7f7d1639072b3faf35f1f56cb38ea (patch)
treeb39d755577be2cc822b5340ba5e4d77a45a6b4a7 /net
parent9cb5734e5b9b26097c7fa28a9c6426a204cc15e3 (diff)
downloadlinux-b790cedd24a7f7d1639072b3faf35f1f56cb38ea.tar.bz2
[INET]: Avoid an integer divide in rt_garbage_collect()
Since 'goal' is a signed int, compiler may emit an integer divide to compute goal/2. Using a right shift is OK here and less expensive. Signed-off-by: Eric Dumazet <dada1@cosmosbay.com> Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'net')
-rw-r--r--net/ipv4/route.c4
1 files changed, 2 insertions, 2 deletions
diff --git a/net/ipv4/route.c b/net/ipv4/route.c
index 1cc6c23cf758..933b093721ea 100644
--- a/net/ipv4/route.c
+++ b/net/ipv4/route.c
@@ -851,14 +851,14 @@ static int rt_garbage_collect(void)
equilibrium = ipv4_dst_ops.gc_thresh;
goal = atomic_read(&ipv4_dst_ops.entries) - equilibrium;
if (goal > 0) {
- equilibrium += min_t(unsigned int, goal / 2, rt_hash_mask + 1);
+ equilibrium += min_t(unsigned int, goal >> 1, rt_hash_mask + 1);
goal = atomic_read(&ipv4_dst_ops.entries) - equilibrium;
}
} else {
/* We are in dangerous area. Try to reduce cache really
* aggressively.
*/
- goal = max_t(unsigned int, goal / 2, rt_hash_mask + 1);
+ goal = max_t(unsigned int, goal >> 1, rt_hash_mask + 1);
equilibrium = atomic_read(&ipv4_dst_ops.entries) - goal;
}