summaryrefslogtreecommitdiffstats
path: root/lib
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2020-08-03 14:58:38 -0700
committerLinus Torvalds <torvalds@linux-foundation.org>2020-08-03 14:58:38 -0700
commite4cbce4d131753eca271d9d67f58c6377f27ad21 (patch)
treee08e3c8836cd7b9f800e209131aed70897f5fe07 /lib
parentb34133fec882d2717f2d61a2a010edd3422368c8 (diff)
parent949bcb8135a96a6923e676646bd29cbe69e8350f (diff)
downloadlinux-e4cbce4d131753eca271d9d67f58c6377f27ad21.tar.bz2
Merge tag 'sched-core-2020-08-03' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip
Pull scheduler updates from Ingo Molnar: - Improve uclamp performance by using a static key for the fast path - Add the "sched_util_clamp_min_rt_default" sysctl, to optimize for better power efficiency of RT tasks on battery powered devices. (The default is to maximize performance & reduce RT latencies.) - Improve utime and stime tracking accuracy, which had a fixed boundary of error, which created larger and larger relative errors as the values become larger. This is now replaced with more precise arithmetics, using the new mul_u64_u64_div_u64() helper in math64.h. - Improve the deadline scheduler, such as making it capacity aware - Improve frequency-invariant scheduling - Misc cleanups in energy/power aware scheduling - Add sched_update_nr_running tracepoint to track changes to nr_running - Documentation additions and updates - Misc cleanups and smaller fixes * tag 'sched-core-2020-08-03' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip: (54 commits) sched/doc: Factorize bits between sched-energy.rst & sched-capacity.rst sched/doc: Document capacity aware scheduling sched: Document arch_scale_*_capacity() arm, arm64: Fix selection of CONFIG_SCHED_THERMAL_PRESSURE Documentation/sysctl: Document uclamp sysctl knobs sched/uclamp: Add a new sysctl to control RT default boost value sched/uclamp: Fix a deadlock when enabling uclamp static key sched: Remove duplicated tick_nohz_full_enabled() check sched: Fix a typo in a comment sched/uclamp: Remove unnecessary mutex_init() arm, arm64: Select CONFIG_SCHED_THERMAL_PRESSURE sched: Cleanup SCHED_THERMAL_PRESSURE kconfig entry arch_topology, sched/core: Cleanup thermal pressure definition trace/events/sched.h: fix duplicated word linux/sched/mm.h: drop duplicated words in comments smp: Fix a potential usage of stale nr_cpus sched/fair: update_pick_idlest() Select group with lowest group_util when idle_cpus are equal sched: nohz: stop passing around unused "ticks" parameter. sched: Better document ttwu() sched: Add a tracepoint to track rq->nr_running ...
Diffstat (limited to 'lib')
-rw-r--r--lib/cpumask.c16
-rw-r--r--lib/math/div64.c41
2 files changed, 52 insertions, 5 deletions
diff --git a/lib/cpumask.c b/lib/cpumask.c
index fb22fb266f93..85da6ab4fbb5 100644
--- a/lib/cpumask.c
+++ b/lib/cpumask.c
@@ -6,6 +6,7 @@
#include <linux/export.h>
#include <linux/memblock.h>
#include <linux/numa.h>
+#include <linux/sched/isolation.h>
/**
* cpumask_next - get the next cpu in a cpumask
@@ -205,22 +206,27 @@ void __init free_bootmem_cpumask_var(cpumask_var_t mask)
*/
unsigned int cpumask_local_spread(unsigned int i, int node)
{
- int cpu;
+ int cpu, hk_flags;
+ const struct cpumask *mask;
+ hk_flags = HK_FLAG_DOMAIN | HK_FLAG_MANAGED_IRQ;
+ mask = housekeeping_cpumask(hk_flags);
/* Wrap: we always want a cpu. */
- i %= num_online_cpus();
+ i %= cpumask_weight(mask);
if (node == NUMA_NO_NODE) {
- for_each_cpu(cpu, cpu_online_mask)
+ for_each_cpu(cpu, mask) {
if (i-- == 0)
return cpu;
+ }
} else {
/* NUMA first. */
- for_each_cpu_and(cpu, cpumask_of_node(node), cpu_online_mask)
+ for_each_cpu_and(cpu, cpumask_of_node(node), mask) {
if (i-- == 0)
return cpu;
+ }
- for_each_cpu(cpu, cpu_online_mask) {
+ for_each_cpu(cpu, mask) {
/* Skip NUMA nodes, done above. */
if (cpumask_test_cpu(cpu, cpumask_of_node(node)))
continue;
diff --git a/lib/math/div64.c b/lib/math/div64.c
index 368ca7fd0d82..3952a07130d8 100644
--- a/lib/math/div64.c
+++ b/lib/math/div64.c
@@ -190,3 +190,44 @@ u32 iter_div_u64_rem(u64 dividend, u32 divisor, u64 *remainder)
return __iter_div_u64_rem(dividend, divisor, remainder);
}
EXPORT_SYMBOL(iter_div_u64_rem);
+
+#ifndef mul_u64_u64_div_u64
+u64 mul_u64_u64_div_u64(u64 a, u64 b, u64 c)
+{
+ u64 res = 0, div, rem;
+ int shift;
+
+ /* can a * b overflow ? */
+ if (ilog2(a) + ilog2(b) > 62) {
+ /*
+ * (b * a) / c is equal to
+ *
+ * (b / c) * a +
+ * (b % c) * a / c
+ *
+ * if nothing overflows. Can the 1st multiplication
+ * overflow? Yes, but we do not care: this can only
+ * happen if the end result can't fit in u64 anyway.
+ *
+ * So the code below does
+ *
+ * res = (b / c) * a;
+ * b = b % c;
+ */
+ div = div64_u64_rem(b, c, &rem);
+ res = div * a;
+ b = rem;
+
+ shift = ilog2(a) + ilog2(b) - 62;
+ if (shift > 0) {
+ /* drop precision */
+ b >>= shift;
+ c >>= shift;
+ if (!c)
+ return res;
+ }
+ }
+
+ return res + div64_u64(a * b, c);
+}
+#endif