summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2013-12-02 10:13:44 -0800
committerLinus Torvalds <torvalds@linux-foundation.org>2013-12-02 10:13:44 -0800
commita0b57ca33ec1cd915ba49051512b3463fa44b4e3 (patch)
treea68f879562194bffd0774a3c8cac4d19c0c363b1
parente321ae4c207ce4c6c6812fc65e666efc325cc65e (diff)
parent96739d6e548e16d76de39d059e1e39e70c187fff (diff)
downloadlinux-a0b57ca33ec1cd915ba49051512b3463fa44b4e3.tar.bz2
Merge branch 'sched-urgent-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip
Pull scheduler fixes from Ingo Molnar: "Various smaller fixlets, all over the place" * 'sched-urgent-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip: sched/doc: Fix generation of device-drivers sched: Expose preempt_schedule_irq() sched: Fix a trivial typo in comments sched: Remove unused variable in 'struct sched_domain' sched: Avoid NULL dereference on sd_busy sched: Check sched_domain before computing group power MAINTAINERS: Update file patterns in the lockdep and scheduler entries
-rw-r--r--MAINTAINERS3
-rw-r--r--include/linux/sched.h2
-rw-r--r--kernel/sched/core.c8
-rw-r--r--kernel/sched/fair.c27
4 files changed, 29 insertions, 11 deletions
diff --git a/MAINTAINERS b/MAINTAINERS
index e9c7b50c612d..4afcfb4c892b 100644
--- a/MAINTAINERS
+++ b/MAINTAINERS
@@ -5267,7 +5267,7 @@ S: Maintained
F: Documentation/lockdep*.txt
F: Documentation/lockstat.txt
F: include/linux/lockdep.h
-F: kernel/lockdep*
+F: kernel/locking/
LOGICAL DISK MANAGER SUPPORT (LDM, Windows 2000/XP/Vista Dynamic Disks)
M: "Richard Russon (FlatCap)" <ldm@flatcap.org>
@@ -7391,7 +7391,6 @@ S: Maintained
F: kernel/sched/
F: include/linux/sched.h
F: include/uapi/linux/sched.h
-F: kernel/wait.c
F: include/linux/wait.h
SCORE ARCHITECTURE
diff --git a/include/linux/sched.h b/include/linux/sched.h
index 7e35d4b9e14a..768b037dfacb 100644
--- a/include/linux/sched.h
+++ b/include/linux/sched.h
@@ -831,8 +831,6 @@ struct sched_domain {
unsigned int balance_interval; /* initialise to 1. units in ms. */
unsigned int nr_balance_failed; /* initialise to 0 */
- u64 last_update;
-
/* idle_balance() stats */
u64 max_newidle_lb_cost;
unsigned long next_decay_max_lb_cost;
diff --git a/kernel/sched/core.c b/kernel/sched/core.c
index c1808606ee5f..e85cda20ab2b 100644
--- a/kernel/sched/core.c
+++ b/kernel/sched/core.c
@@ -2660,6 +2660,7 @@ asmlinkage void __sched notrace preempt_schedule(void)
} while (need_resched());
}
EXPORT_SYMBOL(preempt_schedule);
+#endif /* CONFIG_PREEMPT */
/*
* this is the entry point to schedule() from kernel preemption
@@ -2693,8 +2694,6 @@ asmlinkage void __sched preempt_schedule_irq(void)
exception_exit(prev_state);
}
-#endif /* CONFIG_PREEMPT */
-
int default_wake_function(wait_queue_t *curr, unsigned mode, int wake_flags,
void *key)
{
@@ -4762,7 +4761,7 @@ static void rq_attach_root(struct rq *rq, struct root_domain *rd)
cpumask_clear_cpu(rq->cpu, old_rd->span);
/*
- * If we dont want to free the old_rt yet then
+ * If we dont want to free the old_rd yet then
* set old_rd to NULL to skip the freeing later
* in this function:
*/
@@ -4910,8 +4909,9 @@ static void update_top_cache_domain(int cpu)
if (sd) {
id = cpumask_first(sched_domain_span(sd));
size = cpumask_weight(sched_domain_span(sd));
- rcu_assign_pointer(per_cpu(sd_busy, cpu), sd->parent);
+ sd = sd->parent; /* sd_busy */
}
+ rcu_assign_pointer(per_cpu(sd_busy, cpu), sd);
rcu_assign_pointer(per_cpu(sd_llc, cpu), sd);
per_cpu(sd_llc_size, cpu) = size;
diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c
index e8b652ebe027..fd773ade1a31 100644
--- a/kernel/sched/fair.c
+++ b/kernel/sched/fair.c
@@ -5379,10 +5379,31 @@ void update_group_power(struct sched_domain *sd, int cpu)
*/
for_each_cpu(cpu, sched_group_cpus(sdg)) {
- struct sched_group *sg = cpu_rq(cpu)->sd->groups;
+ struct sched_group_power *sgp;
+ struct rq *rq = cpu_rq(cpu);
- power_orig += sg->sgp->power_orig;
- power += sg->sgp->power;
+ /*
+ * build_sched_domains() -> init_sched_groups_power()
+ * gets here before we've attached the domains to the
+ * runqueues.
+ *
+ * Use power_of(), which is set irrespective of domains
+ * in update_cpu_power().
+ *
+ * This avoids power/power_orig from being 0 and
+ * causing divide-by-zero issues on boot.
+ *
+ * Runtime updates will correct power_orig.
+ */
+ if (unlikely(!rq->sd)) {
+ power_orig += power_of(cpu);
+ power += power_of(cpu);
+ continue;
+ }
+
+ sgp = rq->sd->groups->sgp;
+ power_orig += sgp->power_orig;
+ power += sgp->power;
}
} else {
/*