summaryrefslogtreecommitdiffstats
path: root/kernel
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2010-06-28 12:17:40 -0700
committerLinus Torvalds <torvalds@linux-foundation.org>2010-06-28 12:17:40 -0700
commite6cb6281ef8547fea1243b1c2a4e0f08d9b86ae1 (patch)
tree4444be8141566dec3c88ff5fa7354cbe4cebccdc /kernel
parentab8aadbda7d59d4674ef614cba2a67c50667a6af (diff)
parent8695159967957015f8dfb49315d6f88e111d90e0 (diff)
downloadlinux-e6cb6281ef8547fea1243b1c2a4e0f08d9b86ae1.tar.bz2
Merge branch 'core-fixes-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/linux-2.6-tip
* 'core-fixes-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/linux-2.6-tip: sched: silence PROVE_RCU in sched_fork() idr: fix RCU lockdep splat in idr_get_next() rcu: apply RCU protection to wake_affine()
Diffstat (limited to 'kernel')
-rw-r--r--kernel/sched.c9
-rw-r--r--kernel/sched_fair.c2
2 files changed, 11 insertions, 0 deletions
diff --git a/kernel/sched.c b/kernel/sched.c
index f8b8996228dd..a2d215d132f6 100644
--- a/kernel/sched.c
+++ b/kernel/sched.c
@@ -2494,7 +2494,16 @@ void sched_fork(struct task_struct *p, int clone_flags)
if (p->sched_class->task_fork)
p->sched_class->task_fork(p);
+ /*
+ * The child is not yet in the pid-hash so no cgroup attach races,
+ * and the cgroup is pinned to this child due to cgroup_fork()
+ * is ran before sched_fork().
+ *
+ * Silence PROVE_RCU.
+ */
+ rcu_read_lock();
set_task_cpu(p, cpu);
+ rcu_read_unlock();
#if defined(CONFIG_SCHEDSTATS) || defined(CONFIG_TASK_DELAY_ACCT)
if (likely(sched_info_on()))
diff --git a/kernel/sched_fair.c b/kernel/sched_fair.c
index eed35eded602..a878b5332daa 100644
--- a/kernel/sched_fair.c
+++ b/kernel/sched_fair.c
@@ -1240,6 +1240,7 @@ static int wake_affine(struct sched_domain *sd, struct task_struct *p, int sync)
* effect of the currently running task from the load
* of the current CPU:
*/
+ rcu_read_lock();
if (sync) {
tg = task_group(current);
weight = current->se.load.weight;
@@ -1275,6 +1276,7 @@ static int wake_affine(struct sched_domain *sd, struct task_struct *p, int sync)
balanced = this_eff_load <= prev_eff_load;
} else
balanced = true;
+ rcu_read_unlock();
/*
* If the currently running task will sleep within