summaryrefslogtreecommitdiffstats
path: root/include/net/fq_impl.h
diff options
context:
space:
mode:
authorFelix Fietkau <nbd@nbd.name>2020-07-26 15:09:46 +0200
committerJohannes Berg <johannes.berg@intel.com>2020-07-31 09:24:24 +0200
commit48a54f6bc456859dabbd1fbd805e233d260754cf (patch)
tree5b4b89380a37fabd300e5e1dfaa83aae743dab27 /include/net/fq_impl.h
parent3ff901cb5df1d2102e924d75d91347a2a3070fa5 (diff)
downloadlinux-48a54f6bc456859dabbd1fbd805e233d260754cf.tar.bz2
net/fq_impl: use skb_get_hash instead of skb_get_hash_perturb
This avoids unnecessarily regenerating the skb flow hash Signed-off-by: Felix Fietkau <nbd@nbd.name> Link: https://lore.kernel.org/r/20200726130947.88145-1-nbd@nbd.name [small commit message fixup] Signed-off-by: Johannes Berg <johannes.berg@intel.com>
Diffstat (limited to 'include/net/fq_impl.h')
-rw-r--r--include/net/fq_impl.h3
1 files changed, 1 insertions, 2 deletions
diff --git a/include/net/fq_impl.h b/include/net/fq_impl.h
index 38a9a3d1222b..e73d74d2fabf 100644
--- a/include/net/fq_impl.h
+++ b/include/net/fq_impl.h
@@ -108,7 +108,7 @@ begin:
static u32 fq_flow_idx(struct fq *fq, struct sk_buff *skb)
{
- u32 hash = skb_get_hash_perturb(skb, &fq->perturbation);
+ u32 hash = skb_get_hash(skb);
return reciprocal_scale(hash, fq->flows_cnt);
}
@@ -308,7 +308,6 @@ static int fq_init(struct fq *fq, int flows_cnt)
INIT_LIST_HEAD(&fq->backlogs);
spin_lock_init(&fq->lock);
fq->flows_cnt = max_t(u32, flows_cnt, 1);
- get_random_bytes(&fq->perturbation, sizeof(fq->perturbation));
fq->quantum = 300;
fq->limit = 8192;
fq->memory_limit = 16 << 20; /* 16 MBytes */