summaryrefslogtreecommitdiffstats
path: root/kernel/sched/core.c
diff options
context:
space:
mode:
authorPeter Zijlstra <peterz@infradead.org>2021-03-03 16:45:41 +0100
committerPeter Zijlstra <peterz@infradead.org>2021-05-12 11:43:27 +0200
commit9ef7e7e33bcdb57be1afb28884053c28b5f05240 (patch)
tree40e43fa4c6d82adf7cd39fbc1f5dfb868701165b /kernel/sched/core.c
parent9edeaea1bc452372718837ed2ba775811baf1ba1 (diff)
downloadlinux-9ef7e7e33bcdb57be1afb28884053c28b5f05240.tar.bz2
sched: Optimize rq_lockp() usage
rq_lockp() includes a static_branch(), which is asm-goto, which is asm volatile which defeats regular CSE. This means that: if (!static_branch(&foo)) return simple; if (static_branch(&foo) && cond) return complex; Doesn't fold and we get horrible code. Introduce __rq_lockp() without the static_branch() on. Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org> Tested-by: Don Hiatt <dhiatt@digitalocean.com> Tested-by: Hongyu Ning <hongyu.ning@linux.intel.com> Tested-by: Vincent Guittot <vincent.guittot@linaro.org> Link: https://lkml.kernel.org/r/20210422123308.316696988@infradead.org
Diffstat (limited to 'kernel/sched/core.c')
-rw-r--r--kernel/sched/core.c16
1 files changed, 8 insertions, 8 deletions
diff --git a/kernel/sched/core.c b/kernel/sched/core.c
index 384b79363a39..42c1c88741c0 100644
--- a/kernel/sched/core.c
+++ b/kernel/sched/core.c
@@ -300,9 +300,9 @@ void raw_spin_rq_lock_nested(struct rq *rq, int subclass)
}
for (;;) {
- lock = rq_lockp(rq);
+ lock = __rq_lockp(rq);
raw_spin_lock_nested(lock, subclass);
- if (likely(lock == rq_lockp(rq))) {
+ if (likely(lock == __rq_lockp(rq))) {
/* preempt_count *MUST* be > 1 */
preempt_enable_no_resched();
return;
@@ -325,9 +325,9 @@ bool raw_spin_rq_trylock(struct rq *rq)
}
for (;;) {
- lock = rq_lockp(rq);
+ lock = __rq_lockp(rq);
ret = raw_spin_trylock(lock);
- if (!ret || (likely(lock == rq_lockp(rq)))) {
+ if (!ret || (likely(lock == __rq_lockp(rq)))) {
preempt_enable();
return ret;
}
@@ -352,7 +352,7 @@ void double_rq_lock(struct rq *rq1, struct rq *rq2)
swap(rq1, rq2);
raw_spin_rq_lock(rq1);
- if (rq_lockp(rq1) == rq_lockp(rq2))
+ if (__rq_lockp(rq1) == __rq_lockp(rq2))
return;
raw_spin_rq_lock_nested(rq2, SINGLE_DEPTH_NESTING);
@@ -2622,7 +2622,7 @@ void set_task_cpu(struct task_struct *p, unsigned int new_cpu)
* task_rq_lock().
*/
WARN_ON_ONCE(debug_locks && !(lockdep_is_held(&p->pi_lock) ||
- lockdep_is_held(rq_lockp(task_rq(p)))));
+ lockdep_is_held(__rq_lockp(task_rq(p)))));
#endif
/*
* Clearly, migrating tasks to offline CPUs is a fairly daft thing.
@@ -4248,7 +4248,7 @@ prepare_lock_switch(struct rq *rq, struct task_struct *next, struct rq_flags *rf
* do an early lockdep release here:
*/
rq_unpin_lock(rq, rf);
- spin_release(&rq_lockp(rq)->dep_map, _THIS_IP_);
+ spin_release(&__rq_lockp(rq)->dep_map, _THIS_IP_);
#ifdef CONFIG_DEBUG_SPINLOCK
/* this is a valid case when another task releases the spinlock */
rq_lockp(rq)->owner = next;
@@ -4262,7 +4262,7 @@ static inline void finish_lock_switch(struct rq *rq)
* fix up the runqueue lock - which gets 'carried over' from
* prev into current:
*/
- spin_acquire(&rq_lockp(rq)->dep_map, 0, 0, _THIS_IP_);
+ spin_acquire(&__rq_lockp(rq)->dep_map, 0, 0, _THIS_IP_);
__balance_callbacks(rq);
raw_spin_rq_unlock_irq(rq);
}