diff options
author | Linus Torvalds <torvalds@linux-foundation.org> | 2013-07-02 16:17:25 -0700 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2013-07-02 16:17:25 -0700 |
commit | 2d722f6d5671794c0de0e29e3da75006ac086718 (patch) | |
tree | 042c6320cfd771798f985e79aa0422cbe8096258 /arch | |
parent | f0bb4c0ab064a8aeeffbda1cee380151a594eaab (diff) | |
parent | 2fd1b487884310d0aa0c0640179dc7490ad86313 (diff) | |
download | linux-2d722f6d5671794c0de0e29e3da75006ac086718.tar.bz2 |
Merge branch 'sched-core-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip
Pull scheduler updates from Ingo Molnar:
"The main changes:
- load-calculation cleanups and improvements, by Alex Shi
- various nohz related tidying up of statisics, by Frederic
Weisbecker
- factor out /proc functions to kernel/sched/proc.c, by Paul
Gortmaker
- simplify the RT policy scheduler, by Kirill Tkhai
- various fixes and cleanups"
* 'sched-core-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip: (42 commits)
sched/debug: Remove CONFIG_FAIR_GROUP_SCHED mask
sched/debug: Fix formatting of /proc/<PID>/sched
sched: Fix typo in struct sched_avg member description
sched/fair: Fix typo describing flags in enqueue_entity
sched/debug: Add load-tracking statistics to task
sched: Change get_rq_runnable_load() to static and inline
sched/tg: Remove tg.load_weight
sched/cfs_rq: Change atomic64_t removed_load to atomic_long_t
sched/tg: Use 'unsigned long' for load variable in task group
sched: Change cfs_rq load avg to unsigned long
sched: Consider runnable load average in move_tasks()
sched: Compute runnable load avg in cpu_load and cpu_avg_load_per_task
sched: Update cpu load after task_tick
sched: Fix sleep time double accounting in enqueue entity
sched: Set an initial value of runnable avg for new forked task
sched: Move a few runnable tg variables into CONFIG_SMP
Revert "sched: Introduce temporary FAIR_GROUP_SCHED dependency for load-tracking"
sched: Don't mix use of typedef ctl_table and struct ctl_table
sched: Remove WARN_ON(!sd) from init_sched_groups_power()
sched: Fix memory leakage in build_sched_groups()
...
Diffstat (limited to 'arch')
-rw-r--r-- | arch/avr32/kernel/process.c | 2 | ||||
-rw-r--r-- | arch/cris/include/arch-v10/arch/bitops.h | 2 | ||||
-rw-r--r-- | arch/ia64/kernel/head.S | 2 | ||||
-rw-r--r-- | arch/mips/kernel/mips-mt-fpaff.c | 4 | ||||
-rw-r--r-- | arch/mips/kernel/scall32-o32.S | 5 | ||||
-rw-r--r-- | arch/powerpc/include/asm/mmu_context.h | 2 | ||||
-rw-r--r-- | arch/tile/include/asm/processor.h | 2 | ||||
-rw-r--r-- | arch/tile/kernel/stack.c | 2 | ||||
-rw-r--r-- | arch/um/kernel/sysrq.c | 2 |
9 files changed, 12 insertions, 11 deletions
diff --git a/arch/avr32/kernel/process.c b/arch/avr32/kernel/process.c index e7b61494c312..c2731003edef 100644 --- a/arch/avr32/kernel/process.c +++ b/arch/avr32/kernel/process.c @@ -341,7 +341,7 @@ unsigned long get_wchan(struct task_struct *p) * is actually quite ugly. It might be possible to * determine the frame size automatically at build * time by doing this: - * - compile sched.c + * - compile sched/core.c * - disassemble the resulting sched.o * - look for 'sub sp,??' shortly after '<schedule>:' */ diff --git a/arch/cris/include/arch-v10/arch/bitops.h b/arch/cris/include/arch-v10/arch/bitops.h index be85f6de25d3..03d9cfd92c8a 100644 --- a/arch/cris/include/arch-v10/arch/bitops.h +++ b/arch/cris/include/arch-v10/arch/bitops.h @@ -17,7 +17,7 @@ static inline unsigned long cris_swapnwbrlz(unsigned long w) in another register: ! __asm__ ("swapnwbr %2\n\tlz %2,%0" ! : "=r,r" (res), "=r,X" (dummy) : "1,0" (w)); - confuses gcc (sched.c, gcc from cris-dist-1.14). */ + confuses gcc (core.c, gcc from cris-dist-1.14). */ unsigned long res; __asm__ ("swapnwbr %0 \n\t" diff --git a/arch/ia64/kernel/head.S b/arch/ia64/kernel/head.S index 9be4e497f3d3..991ca336b8a2 100644 --- a/arch/ia64/kernel/head.S +++ b/arch/ia64/kernel/head.S @@ -1035,7 +1035,7 @@ END(ia64_delay_loop) * Return a CPU-local timestamp in nano-seconds. This timestamp is * NOT synchronized across CPUs its return value must never be * compared against the values returned on another CPU. The usage in - * kernel/sched.c ensures that. + * kernel/sched/core.c ensures that. * * The return-value of sched_clock() is NOT supposed to wrap-around. * If it did, it would cause some scheduling hiccups (at the worst). diff --git a/arch/mips/kernel/mips-mt-fpaff.c b/arch/mips/kernel/mips-mt-fpaff.c index fd814e08c945..cb098628aee8 100644 --- a/arch/mips/kernel/mips-mt-fpaff.c +++ b/arch/mips/kernel/mips-mt-fpaff.c @@ -27,12 +27,12 @@ unsigned long mt_fpemul_threshold; * FPU affinity with the user's requested processor affinity. * This code is 98% identical with the sys_sched_setaffinity() * and sys_sched_getaffinity() system calls, and should be - * updated when kernel/sched.c changes. + * updated when kernel/sched/core.c changes. */ /* * find_process_by_pid - find a process with a matching PID value. - * used in sys_sched_set/getaffinity() in kernel/sched.c, so + * used in sys_sched_set/getaffinity() in kernel/sched/core.c, so * cloned here. */ static inline struct task_struct *find_process_by_pid(pid_t pid) diff --git a/arch/mips/kernel/scall32-o32.S b/arch/mips/kernel/scall32-o32.S index 9b36424b03c5..e9127ec612ef 100644 --- a/arch/mips/kernel/scall32-o32.S +++ b/arch/mips/kernel/scall32-o32.S @@ -476,8 +476,9 @@ einval: li v0, -ENOSYS /* * For FPU affinity scheduling on MIPS MT processors, we need to * intercept sys_sched_xxxaffinity() calls until we get a proper hook - * in kernel/sched.c. Considered only temporary we only support these - * hooks for the 32-bit kernel - there is no MIPS64 MT processor atm. + * in kernel/sched/core.c. Considered only temporary we only support + * these hooks for the 32-bit kernel - there is no MIPS64 MT processor + * atm. */ sys mipsmt_sys_sched_setaffinity 3 sys mipsmt_sys_sched_getaffinity 3 diff --git a/arch/powerpc/include/asm/mmu_context.h b/arch/powerpc/include/asm/mmu_context.h index a73668a5f30d..b467530e2485 100644 --- a/arch/powerpc/include/asm/mmu_context.h +++ b/arch/powerpc/include/asm/mmu_context.h @@ -38,7 +38,7 @@ extern void drop_cop(unsigned long acop, struct mm_struct *mm); /* * switch_mm is the entry point called from the architecture independent - * code in kernel/sched.c + * code in kernel/sched/core.c */ static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next, struct task_struct *tsk) diff --git a/arch/tile/include/asm/processor.h b/arch/tile/include/asm/processor.h index 2b70dfb1442e..b3f104953da2 100644 --- a/arch/tile/include/asm/processor.h +++ b/arch/tile/include/asm/processor.h @@ -225,7 +225,7 @@ extern int do_work_pending(struct pt_regs *regs, u32 flags); /* * Return saved (kernel) PC of a blocked thread. - * Only used in a printk() in kernel/sched.c, so don't work too hard. + * Only used in a printk() in kernel/sched/core.c, so don't work too hard. */ #define thread_saved_pc(t) ((t)->thread.pc) diff --git a/arch/tile/kernel/stack.c b/arch/tile/kernel/stack.c index ed258b8ae320..af8dfc9665f6 100644 --- a/arch/tile/kernel/stack.c +++ b/arch/tile/kernel/stack.c @@ -442,7 +442,7 @@ void _KBacktraceIterator_init_current(struct KBacktraceIterator *kbt, ulong pc, regs_to_pt_regs(®s, pc, lr, sp, r52)); } -/* This is called only from kernel/sched.c, with esp == NULL */ +/* This is called only from kernel/sched/core.c, with esp == NULL */ void show_stack(struct task_struct *task, unsigned long *esp) { struct KBacktraceIterator kbt; diff --git a/arch/um/kernel/sysrq.c b/arch/um/kernel/sysrq.c index 7d101a2a1541..0dc4d1c6f98a 100644 --- a/arch/um/kernel/sysrq.c +++ b/arch/um/kernel/sysrq.c @@ -39,7 +39,7 @@ void show_trace(struct task_struct *task, unsigned long * stack) static const int kstack_depth_to_print = 24; /* This recently started being used in arch-independent code too, as in - * kernel/sched.c.*/ + * kernel/sched/core.c.*/ void show_stack(struct task_struct *task, unsigned long *esp) { unsigned long *stack; |