summaryrefslogtreecommitdiffstats
path: root/kernel/sched/sched.h
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2019-05-06 14:31:50 -0700
committerLinus Torvalds <torvalds@linux-foundation.org>2019-05-06 14:31:50 -0700
commite00d4135751bfe786a9e26b5560b185ce3f9f963 (patch)
tree6945bd6749aab501000f7084e020aa201971e1ff /kernel/sched/sched.h
parent90489a72fba9529c85e051067ecb41183b8e982e (diff)
parent08ae95f4fd3b38b257f5dc7e6507e071c27ba0d5 (diff)
downloadlinux-e00d4135751bfe786a9e26b5560b185ce3f9f963.tar.bz2
Merge branch 'sched-core-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip
Pull scheduler updates from Ingo Molnar: "The main changes in this cycle were: - Make nohz housekeeping processing more permissive and less intrusive to isolated CPUs - Decouple CPU-bound workqueue acconting from the scheduler and move it into the workqueue code. - Optimize topology building - Better handle quota and period overflows - Add more RCU annotations - Comment updates, misc cleanups" * 'sched-core-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip: (25 commits) nohz_full: Allow the boot CPU to be nohz_full sched/isolation: Require a present CPU in housekeeping mask kernel/cpu: Allow non-zero CPU to be primary for suspend / kexec freeze power/suspend: Add function to disable secondaries for suspend sched/core: Allow the remote scheduler tick to be started on CPU0 sched/nohz: Run NOHZ idle load balancer on HK_FLAG_MISC CPUs sched/debug: Fix spelling mistake "logaritmic" -> "logarithmic" sched/topology: Update init_sched_domains() comment cgroup/cpuset: Update stale generate_sched_domains() comments sched/core: Check quota and period overflow at usec to nsec conversion sched/core: Handle overflow in cpu_shares_write_u64 sched/rt: Check integer overflow at usec to nsec conversion sched/core: Fix typo in comment sched/core: Make some functions static sched/core: Unify p->on_rq updates sched/core: Remove ttwu_activate() sched/core, workqueues: Distangle worker accounting from rq lock sched/fair: Remove unneeded prototype of capacity_of() sched/topology: Skip duplicate group rewrites in build_sched_groups() sched/topology: Fix build_sched_groups() comment ...
Diffstat (limited to 'kernel/sched/sched.h')
-rw-r--r--kernel/sched/sched.h18
1 files changed, 9 insertions, 9 deletions
diff --git a/kernel/sched/sched.h b/kernel/sched/sched.h
index efa686eeff26..b52ed1ada0be 100644
--- a/kernel/sched/sched.h
+++ b/kernel/sched/sched.h
@@ -780,7 +780,7 @@ struct root_domain {
* NULL-terminated list of performance domains intersecting with the
* CPUs of the rd. Protected by RCU.
*/
- struct perf_domain *pd;
+ struct perf_domain __rcu *pd;
};
extern struct root_domain def_root_domain;
@@ -869,8 +869,8 @@ struct rq {
atomic_t nr_iowait;
#ifdef CONFIG_SMP
- struct root_domain *rd;
- struct sched_domain *sd;
+ struct root_domain *rd;
+ struct sched_domain __rcu *sd;
unsigned long cpu_capacity;
unsigned long cpu_capacity_orig;
@@ -1324,13 +1324,13 @@ static inline struct sched_domain *lowest_flag_domain(int cpu, int flag)
return sd;
}
-DECLARE_PER_CPU(struct sched_domain *, sd_llc);
+DECLARE_PER_CPU(struct sched_domain __rcu *, sd_llc);
DECLARE_PER_CPU(int, sd_llc_size);
DECLARE_PER_CPU(int, sd_llc_id);
-DECLARE_PER_CPU(struct sched_domain_shared *, sd_llc_shared);
-DECLARE_PER_CPU(struct sched_domain *, sd_numa);
-DECLARE_PER_CPU(struct sched_domain *, sd_asym_packing);
-DECLARE_PER_CPU(struct sched_domain *, sd_asym_cpucapacity);
+DECLARE_PER_CPU(struct sched_domain_shared __rcu *, sd_llc_shared);
+DECLARE_PER_CPU(struct sched_domain __rcu *, sd_numa);
+DECLARE_PER_CPU(struct sched_domain __rcu *, sd_asym_packing);
+DECLARE_PER_CPU(struct sched_domain __rcu *, sd_asym_cpucapacity);
extern struct static_key_false sched_asym_cpucapacity;
struct sched_group_capacity {
@@ -2185,7 +2185,7 @@ static inline u64 irq_time_read(int cpu)
#endif /* CONFIG_IRQ_TIME_ACCOUNTING */
#ifdef CONFIG_CPU_FREQ
-DECLARE_PER_CPU(struct update_util_data *, cpufreq_update_util_data);
+DECLARE_PER_CPU(struct update_util_data __rcu *, cpufreq_update_util_data);
/**
* cpufreq_update_util - Take a note about CPU utilization changes.