diff options
-rw-r--r-- | kernel/sched.c | 2 | ||||
-rw-r--r-- | kernel/sched_rt.c | 10 |
2 files changed, 10 insertions, 2 deletions
diff --git a/kernel/sched.c b/kernel/sched.c index 3344ba776b97..c591abd9ca38 100644 --- a/kernel/sched.c +++ b/kernel/sched.c @@ -346,6 +346,7 @@ struct rt_rq { unsigned long rt_nr_migratory; /* highest queued rt task prio */ int highest_prio; + int overloaded; }; /* @@ -6770,6 +6771,7 @@ void __init sched_init(void) rq->migration_thread = NULL; INIT_LIST_HEAD(&rq->migration_queue); rq->rt.highest_prio = MAX_RT_PRIO; + rq->rt.overloaded = 0; #endif atomic_set(&rq->nr_iowait, 0); diff --git a/kernel/sched_rt.c b/kernel/sched_rt.c index a9d7d4408160..87d7b3ff3861 100644 --- a/kernel/sched_rt.c +++ b/kernel/sched_rt.c @@ -16,6 +16,7 @@ static inline cpumask_t *rt_overload(void) } static inline void rt_set_overload(struct rq *rq) { + rq->rt.overloaded = 1; cpu_set(rq->cpu, rt_overload_mask); /* * Make sure the mask is visible before we set @@ -32,6 +33,7 @@ static inline void rt_clear_overload(struct rq *rq) /* the order here really doesn't matter */ atomic_dec(&rto_count); cpu_clear(rq->cpu, rt_overload_mask); + rq->rt.overloaded = 0; } static void update_rt_migration(struct rq *rq) @@ -448,6 +450,9 @@ static int push_rt_task(struct rq *rq) assert_spin_locked(&rq->lock); + if (!rq->rt.overloaded) + return 0; + next_task = pick_next_highest_task_rt(rq, -1); if (!next_task) return 0; @@ -675,7 +680,7 @@ static void schedule_tail_balance_rt(struct rq *rq) * the lock was owned by prev, we need to release it * first via finish_lock_switch and then reaquire it here. */ - if (unlikely(rq->rt.rt_nr_running > 1)) { + if (unlikely(rq->rt.overloaded)) { spin_lock_irq(&rq->lock); push_rt_tasks(rq); spin_unlock_irq(&rq->lock); @@ -687,7 +692,8 @@ static void wakeup_balance_rt(struct rq *rq, struct task_struct *p) { if (unlikely(rt_task(p)) && !task_running(rq, p) && - (p->prio >= rq->curr->prio)) + (p->prio >= rq->rt.highest_prio) && + rq->rt.overloaded) push_rt_tasks(rq); } |