summaryrefslogtreecommitdiffstats
path: root/net/sched
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2022-10-16 15:27:07 -0700
committerLinus Torvalds <torvalds@linux-foundation.org>2022-10-16 15:27:07 -0700
commitf1947d7c8a61db1cb0ef909a6512ede0b1f2115b (patch)
treebbe7f785243bb692f243d08de8bc5ef4a82454d6 /net/sched
parent8636df94ec917019c4cb744ba0a1f94cf9057790 (diff)
parentde492c83cae0af72de370b9404aacda93dafcad5 (diff)
downloadlinux-f1947d7c8a61db1cb0ef909a6512ede0b1f2115b.tar.bz2
Merge tag 'random-6.1-rc1-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/crng/random
Pull more random number generator updates from Jason Donenfeld: "This time with some large scale treewide cleanups. The intent of this pull is to clean up the way callers fetch random integers. The current rules for doing this right are: - If you want a secure or an insecure random u64, use get_random_u64() - If you want a secure or an insecure random u32, use get_random_u32() The old function prandom_u32() has been deprecated for a while now and is just a wrapper around get_random_u32(). Same for get_random_int(). - If you want a secure or an insecure random u16, use get_random_u16() - If you want a secure or an insecure random u8, use get_random_u8() - If you want secure or insecure random bytes, use get_random_bytes(). The old function prandom_bytes() has been deprecated for a while now and has long been a wrapper around get_random_bytes() - If you want a non-uniform random u32, u16, or u8 bounded by a certain open interval maximum, use prandom_u32_max() I say "non-uniform", because it doesn't do any rejection sampling or divisions. Hence, it stays within the prandom_*() namespace, not the get_random_*() namespace. I'm currently investigating a "uniform" function for 6.2. We'll see what comes of that. By applying these rules uniformly, we get several benefits: - By using prandom_u32_max() with an upper-bound that the compiler can prove at compile-time is ≤65536 or ≤256, internally get_random_u16() or get_random_u8() is used, which wastes fewer batched random bytes, and hence has higher throughput. - By using prandom_u32_max() instead of %, when the upper-bound is not a constant, division is still avoided, because prandom_u32_max() uses a faster multiplication-based trick instead. - By using get_random_u16() or get_random_u8() in cases where the return value is intended to indeed be a u16 or a u8, we waste fewer batched random bytes, and hence have higher throughput. This series was originally done by hand while I was on an airplane without Internet. Later, Kees and I worked on retroactively figuring out what could be done with Coccinelle and what had to be done manually, and then we split things up based on that. So while this touches a lot of files, the actual amount of code that's hand fiddled is comfortably small" * tag 'random-6.1-rc1-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/crng/random: prandom: remove unused functions treewide: use get_random_bytes() when possible treewide: use get_random_u32() when possible treewide: use get_random_{u8,u16}() when possible, part 2 treewide: use get_random_{u8,u16}() when possible, part 1 treewide: use prandom_u32_max() when possible, part 2 treewide: use prandom_u32_max() when possible, part 1
Diffstat (limited to 'net/sched')
-rw-r--r--net/sched/act_gact.c2
-rw-r--r--net/sched/act_sample.c2
-rw-r--r--net/sched/sch_cake.c8
-rw-r--r--net/sched/sch_netem.c22
-rw-r--r--net/sched/sch_pie.c2
-rw-r--r--net/sched/sch_sfb.c2
6 files changed, 19 insertions, 19 deletions
diff --git a/net/sched/act_gact.c b/net/sched/act_gact.c
index abe1bcc5c797..62d682b96b88 100644
--- a/net/sched/act_gact.c
+++ b/net/sched/act_gact.c
@@ -25,7 +25,7 @@ static struct tc_action_ops act_gact_ops;
static int gact_net_rand(struct tcf_gact *gact)
{
smp_rmb(); /* coupled with smp_wmb() in tcf_gact_init() */
- if (prandom_u32() % gact->tcfg_pval)
+ if (prandom_u32_max(gact->tcfg_pval))
return gact->tcf_action;
return gact->tcfg_paction;
}
diff --git a/net/sched/act_sample.c b/net/sched/act_sample.c
index 5ba36f70e3a1..7a25477f5d99 100644
--- a/net/sched/act_sample.c
+++ b/net/sched/act_sample.c
@@ -168,7 +168,7 @@ static int tcf_sample_act(struct sk_buff *skb, const struct tc_action *a,
psample_group = rcu_dereference_bh(s->psample_group);
/* randomly sample packets according to rate */
- if (psample_group && (prandom_u32() % s->rate == 0)) {
+ if (psample_group && (prandom_u32_max(s->rate) == 0)) {
if (!skb_at_tc_ingress(skb)) {
md.in_ifindex = skb->skb_iif;
md.out_ifindex = skb->dev->ifindex;
diff --git a/net/sched/sch_cake.c b/net/sched/sch_cake.c
index 55c6879d2c7e..817cd0695b35 100644
--- a/net/sched/sch_cake.c
+++ b/net/sched/sch_cake.c
@@ -573,7 +573,7 @@ static bool cobalt_should_drop(struct cobalt_vars *vars,
/* Simple BLUE implementation. Lack of ECN is deliberate. */
if (vars->p_drop)
- drop |= (prandom_u32() < vars->p_drop);
+ drop |= (get_random_u32() < vars->p_drop);
/* Overload the drop_next field as an activity timeout */
if (!vars->count)
@@ -2092,11 +2092,11 @@ retry:
WARN_ON(host_load > CAKE_QUEUES);
- /* The shifted prandom_u32() is a way to apply dithering to
- * avoid accumulating roundoff errors
+ /* The get_random_u16() is a way to apply dithering to avoid
+ * accumulating roundoff errors
*/
flow->deficit += (b->flow_quantum * quantum_div[host_load] +
- (prandom_u32() >> 16)) >> 16;
+ get_random_u16()) >> 16;
list_move_tail(&flow->flowchain, &b->old_flows);
goto retry;
diff --git a/net/sched/sch_netem.c b/net/sched/sch_netem.c
index 18f4273a835b..fb00ac40ecb7 100644
--- a/net/sched/sch_netem.c
+++ b/net/sched/sch_netem.c
@@ -171,7 +171,7 @@ static inline struct netem_skb_cb *netem_skb_cb(struct sk_buff *skb)
static void init_crandom(struct crndstate *state, unsigned long rho)
{
state->rho = rho;
- state->last = prandom_u32();
+ state->last = get_random_u32();
}
/* get_crandom - correlated random number generator
@@ -184,9 +184,9 @@ static u32 get_crandom(struct crndstate *state)
unsigned long answer;
if (!state || state->rho == 0) /* no correlation */
- return prandom_u32();
+ return get_random_u32();
- value = prandom_u32();
+ value = get_random_u32();
rho = (u64)state->rho + 1;
answer = (value * ((1ull<<32) - rho) + state->last * rho) >> 32;
state->last = answer;
@@ -200,7 +200,7 @@ static u32 get_crandom(struct crndstate *state)
static bool loss_4state(struct netem_sched_data *q)
{
struct clgstate *clg = &q->clg;
- u32 rnd = prandom_u32();
+ u32 rnd = get_random_u32();
/*
* Makes a comparison between rnd and the transition
@@ -268,15 +268,15 @@ static bool loss_gilb_ell(struct netem_sched_data *q)
switch (clg->state) {
case GOOD_STATE:
- if (prandom_u32() < clg->a1)
+ if (get_random_u32() < clg->a1)
clg->state = BAD_STATE;
- if (prandom_u32() < clg->a4)
+ if (get_random_u32() < clg->a4)
return true;
break;
case BAD_STATE:
- if (prandom_u32() < clg->a2)
+ if (get_random_u32() < clg->a2)
clg->state = GOOD_STATE;
- if (prandom_u32() > clg->a3)
+ if (get_random_u32() > clg->a3)
return true;
}
@@ -513,8 +513,8 @@ static int netem_enqueue(struct sk_buff *skb, struct Qdisc *sch,
goto finish_segs;
}
- skb->data[prandom_u32() % skb_headlen(skb)] ^=
- 1<<(prandom_u32() % 8);
+ skb->data[prandom_u32_max(skb_headlen(skb))] ^=
+ 1<<prandom_u32_max(8);
}
if (unlikely(sch->q.qlen >= sch->limit)) {
@@ -632,7 +632,7 @@ static void get_slot_next(struct netem_sched_data *q, u64 now)
if (!q->slot_dist)
next_delay = q->slot_config.min_delay +
- (prandom_u32() *
+ (get_random_u32() *
(q->slot_config.max_delay -
q->slot_config.min_delay) >> 32);
else
diff --git a/net/sched/sch_pie.c b/net/sched/sch_pie.c
index 974038ba6c7b..265c238047a4 100644
--- a/net/sched/sch_pie.c
+++ b/net/sched/sch_pie.c
@@ -72,7 +72,7 @@ bool pie_drop_early(struct Qdisc *sch, struct pie_params *params,
if (vars->accu_prob >= (MAX_PROB / 2) * 17)
return true;
- prandom_bytes(&rnd, 8);
+ get_random_bytes(&rnd, 8);
if ((rnd >> BITS_PER_BYTE) < local_prob) {
vars->accu_prob = 0;
return true;
diff --git a/net/sched/sch_sfb.c b/net/sched/sch_sfb.c
index e2389fa3cff8..0366a1a029a9 100644
--- a/net/sched/sch_sfb.c
+++ b/net/sched/sch_sfb.c
@@ -379,7 +379,7 @@ static int sfb_enqueue(struct sk_buff *skb, struct Qdisc *sch,
goto enqueue;
}
- r = prandom_u32() & SFB_MAX_PROB;
+ r = get_random_u16() & SFB_MAX_PROB;
if (unlikely(r < p_min)) {
if (unlikely(p_min > SFB_MAX_PROB / 2)) {