summaryrefslogtreecommitdiffstats
path: root/include
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2012-01-06 08:33:28 -0800
committerLinus Torvalds <torvalds@linux-foundation.org>2012-01-06 08:44:54 -0800
commit0db49b72bce26341274b74fd968501489a361ae3 (patch)
treecdb076827aefb38d719d4c42f8ef291c36072fa8 /include
parent35b740e4662ef386f0c60e1b60aaf5b44db9914c (diff)
parent1ac9bc6943edf7d181b4b1cc734981350d4f6bae (diff)
downloadlinux-0db49b72bce26341274b74fd968501489a361ae3.tar.bz2
Merge branch 'sched-core-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip
* 'sched-core-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip: (40 commits) sched/tracing: Add a new tracepoint for sleeptime sched: Disable scheduler warnings during oopses sched: Fix cgroup movement of waking process sched: Fix cgroup movement of newly created process sched: Fix cgroup movement of forking process sched: Remove cfs bandwidth period check in tg_set_cfs_period() sched: Fix load-balance lock-breaking sched: Replace all_pinned with a generic flags field sched: Only queue remote wakeups when crossing cache boundaries sched: Add missing rcu_dereference() around ->real_parent usage [S390] fix cputime overflow in uptime_proc_show [S390] cputime: add sparse checking and cleanup sched: Mark parent and real_parent as __rcu sched, nohz: Fix missing RCU read lock sched, nohz: Set the NOHZ_BALANCE_KICK flag for idle load balancer sched, nohz: Fix the idle cpu check in nohz_idle_balance sched: Use jump_labels for sched_feat sched/accounting: Fix parameter passing in task_group_account_field sched/accounting: Fix user/system tick double accounting sched/accounting: Re-use scheduler statistics for the root cgroup ... Fix up conflicts in - arch/ia64/include/asm/cputime.h, include/asm-generic/cputime.h usecs_to_cputime64() vs the sparse cleanups - kernel/sched/fair.c, kernel/time/tick-sched.c scheduler changes in multiple branches
Diffstat (limited to 'include')
-rw-r--r--include/asm-generic/cputime.h65
-rw-r--r--include/linux/kernel_stat.h36
-rw-r--r--include/linux/latencytop.h3
-rw-r--r--include/linux/sched.h23
-rw-r--r--include/trace/events/sched.h57
5 files changed, 130 insertions, 54 deletions
diff --git a/include/asm-generic/cputime.h b/include/asm-generic/cputime.h
index 12a1764f612b..9a62937c56ca 100644
--- a/include/asm-generic/cputime.h
+++ b/include/asm-generic/cputime.h
@@ -4,71 +4,66 @@
#include <linux/time.h>
#include <linux/jiffies.h>
-typedef unsigned long cputime_t;
+typedef unsigned long __nocast cputime_t;
-#define cputime_zero (0UL)
#define cputime_one_jiffy jiffies_to_cputime(1)
-#define cputime_max ((~0UL >> 1) - 1)
-#define cputime_add(__a, __b) ((__a) + (__b))
-#define cputime_sub(__a, __b) ((__a) - (__b))
-#define cputime_div(__a, __n) ((__a) / (__n))
-#define cputime_halve(__a) ((__a) >> 1)
-#define cputime_eq(__a, __b) ((__a) == (__b))
-#define cputime_gt(__a, __b) ((__a) > (__b))
-#define cputime_ge(__a, __b) ((__a) >= (__b))
-#define cputime_lt(__a, __b) ((__a) < (__b))
-#define cputime_le(__a, __b) ((__a) <= (__b))
-#define cputime_to_jiffies(__ct) (__ct)
+#define cputime_to_jiffies(__ct) (__force unsigned long)(__ct)
#define cputime_to_scaled(__ct) (__ct)
-#define jiffies_to_cputime(__hz) (__hz)
+#define jiffies_to_cputime(__hz) (__force cputime_t)(__hz)
-typedef u64 cputime64_t;
+typedef u64 __nocast cputime64_t;
-#define cputime64_zero (0ULL)
-#define cputime64_add(__a, __b) ((__a) + (__b))
-#define cputime64_sub(__a, __b) ((__a) - (__b))
-#define cputime64_to_jiffies64(__ct) (__ct)
-#define jiffies64_to_cputime64(__jif) (__jif)
-#define cputime_to_cputime64(__ct) ((u64) __ct)
-#define cputime64_gt(__a, __b) ((__a) > (__b))
+#define cputime64_to_jiffies64(__ct) (__force u64)(__ct)
+#define jiffies64_to_cputime64(__jif) (__force cputime64_t)(__jif)
-#define nsecs_to_cputime64(__ct) nsecs_to_jiffies64(__ct)
+#define nsecs_to_cputime64(__ct) \
+ jiffies64_to_cputime64(nsecs_to_jiffies64(__ct))
/*
* Convert cputime to microseconds and back.
*/
-#define cputime_to_usecs(__ct) jiffies_to_usecs(__ct)
-#define usecs_to_cputime(__msecs) usecs_to_jiffies(__msecs)
-#define usecs_to_cputime64(__msecs) nsecs_to_jiffies64((__msecs) * 1000)
+#define cputime_to_usecs(__ct) \
+ jiffies_to_usecs(cputime_to_jiffies(__ct))
+#define usecs_to_cputime(__usec) \
+ jiffies_to_cputime(usecs_to_jiffies(__usec))
+#define usecs_to_cputime64(__usec) \
+ jiffies64_to_cputime64(nsecs_to_jiffies64((__usec) * 1000))
/*
* Convert cputime to seconds and back.
*/
-#define cputime_to_secs(jif) ((jif) / HZ)
-#define secs_to_cputime(sec) ((sec) * HZ)
+#define cputime_to_secs(jif) (cputime_to_jiffies(jif) / HZ)
+#define secs_to_cputime(sec) jiffies_to_cputime((sec) * HZ)
/*
* Convert cputime to timespec and back.
*/
-#define timespec_to_cputime(__val) timespec_to_jiffies(__val)
-#define cputime_to_timespec(__ct,__val) jiffies_to_timespec(__ct,__val)
+#define timespec_to_cputime(__val) \
+ jiffies_to_cputime(timespec_to_jiffies(__val))
+#define cputime_to_timespec(__ct,__val) \
+ jiffies_to_timespec(cputime_to_jiffies(__ct),__val)
/*
* Convert cputime to timeval and back.
*/
-#define timeval_to_cputime(__val) timeval_to_jiffies(__val)
-#define cputime_to_timeval(__ct,__val) jiffies_to_timeval(__ct,__val)
+#define timeval_to_cputime(__val) \
+ jiffies_to_cputime(timeval_to_jiffies(__val))
+#define cputime_to_timeval(__ct,__val) \
+ jiffies_to_timeval(cputime_to_jiffies(__ct),__val)
/*
* Convert cputime to clock and back.
*/
-#define cputime_to_clock_t(__ct) jiffies_to_clock_t(__ct)
-#define clock_t_to_cputime(__x) clock_t_to_jiffies(__x)
+#define cputime_to_clock_t(__ct) \
+ jiffies_to_clock_t(cputime_to_jiffies(__ct))
+#define clock_t_to_cputime(__x) \
+ jiffies_to_cputime(clock_t_to_jiffies(__x))
/*
* Convert cputime64 to clock.
*/
-#define cputime64_to_clock_t(__ct) jiffies_64_to_clock_t(__ct)
+#define cputime64_to_clock_t(__ct) \
+ jiffies_64_to_clock_t(cputime64_to_jiffies64(__ct))
#endif
diff --git a/include/linux/kernel_stat.h b/include/linux/kernel_stat.h
index 0cce2db580c3..2fbd9053c2df 100644
--- a/include/linux/kernel_stat.h
+++ b/include/linux/kernel_stat.h
@@ -6,6 +6,7 @@
#include <linux/percpu.h>
#include <linux/cpumask.h>
#include <linux/interrupt.h>
+#include <linux/sched.h>
#include <asm/irq.h>
#include <asm/cputime.h>
@@ -15,21 +16,25 @@
* used by rstatd/perfmeter
*/
-struct cpu_usage_stat {
- cputime64_t user;
- cputime64_t nice;
- cputime64_t system;
- cputime64_t softirq;
- cputime64_t irq;
- cputime64_t idle;
- cputime64_t iowait;
- cputime64_t steal;
- cputime64_t guest;
- cputime64_t guest_nice;
+enum cpu_usage_stat {
+ CPUTIME_USER,
+ CPUTIME_NICE,
+ CPUTIME_SYSTEM,
+ CPUTIME_SOFTIRQ,
+ CPUTIME_IRQ,
+ CPUTIME_IDLE,
+ CPUTIME_IOWAIT,
+ CPUTIME_STEAL,
+ CPUTIME_GUEST,
+ CPUTIME_GUEST_NICE,
+ NR_STATS,
+};
+
+struct kernel_cpustat {
+ u64 cpustat[NR_STATS];
};
struct kernel_stat {
- struct cpu_usage_stat cpustat;
#ifndef CONFIG_GENERIC_HARDIRQS
unsigned int irqs[NR_IRQS];
#endif
@@ -38,10 +43,13 @@ struct kernel_stat {
};
DECLARE_PER_CPU(struct kernel_stat, kstat);
+DECLARE_PER_CPU(struct kernel_cpustat, kernel_cpustat);
-#define kstat_cpu(cpu) per_cpu(kstat, cpu)
/* Must have preemption disabled for this to be meaningful. */
-#define kstat_this_cpu __get_cpu_var(kstat)
+#define kstat_this_cpu (&__get_cpu_var(kstat))
+#define kcpustat_this_cpu (&__get_cpu_var(kernel_cpustat))
+#define kstat_cpu(cpu) per_cpu(kstat, cpu)
+#define kcpustat_cpu(cpu) per_cpu(kernel_cpustat, cpu)
extern unsigned long long nr_context_switches(void);
diff --git a/include/linux/latencytop.h b/include/linux/latencytop.h
index b0e99898527c..e23121f9d82a 100644
--- a/include/linux/latencytop.h
+++ b/include/linux/latencytop.h
@@ -10,6 +10,8 @@
#define _INCLUDE_GUARD_LATENCYTOP_H_
#include <linux/compiler.h>
+struct task_struct;
+
#ifdef CONFIG_LATENCYTOP
#define LT_SAVECOUNT 32
@@ -23,7 +25,6 @@ struct latency_record {
};
-struct task_struct;
extern int latencytop_enabled;
void __account_scheduler_latency(struct task_struct *task, int usecs, int inter);
diff --git a/include/linux/sched.h b/include/linux/sched.h
index 4a7e4d333a27..cf0eb342bcba 100644
--- a/include/linux/sched.h
+++ b/include/linux/sched.h
@@ -273,9 +273,11 @@ extern int runqueue_is_locked(int cpu);
#if defined(CONFIG_SMP) && defined(CONFIG_NO_HZ)
extern void select_nohz_load_balancer(int stop_tick);
+extern void set_cpu_sd_state_idle(void);
extern int get_nohz_timer_target(void);
#else
static inline void select_nohz_load_balancer(int stop_tick) { }
+static inline void set_cpu_sd_state_idle(void) { }
#endif
/*
@@ -483,8 +485,8 @@ struct task_cputime {
#define INIT_CPUTIME \
(struct task_cputime) { \
- .utime = cputime_zero, \
- .stime = cputime_zero, \
+ .utime = 0, \
+ .stime = 0, \
.sum_exec_runtime = 0, \
}
@@ -901,6 +903,10 @@ struct sched_group_power {
* single CPU.
*/
unsigned int power, power_orig;
+ /*
+ * Number of busy cpus in this group.
+ */
+ atomic_t nr_busy_cpus;
};
struct sched_group {
@@ -925,6 +931,15 @@ static inline struct cpumask *sched_group_cpus(struct sched_group *sg)
return to_cpumask(sg->cpumask);
}
+/**
+ * group_first_cpu - Returns the first cpu in the cpumask of a sched_group.
+ * @group: The group whose first cpu is to be returned.
+ */
+static inline unsigned int group_first_cpu(struct sched_group *group)
+{
+ return cpumask_first(sched_group_cpus(group));
+}
+
struct sched_domain_attr {
int relax_domain_level;
};
@@ -1315,8 +1330,8 @@ struct task_struct {
* older sibling, respectively. (p->father can be replaced with
* p->real_parent->pid)
*/
- struct task_struct *real_parent; /* real parent process */
- struct task_struct *parent; /* recipient of SIGCHLD, wait4() reports */
+ struct task_struct __rcu *real_parent; /* real parent process */
+ struct task_struct __rcu *parent; /* recipient of SIGCHLD, wait4() reports */
/*
* children/sibling forms the list of my natural children
*/
diff --git a/include/trace/events/sched.h b/include/trace/events/sched.h
index 959ff18b63b6..6ba596b07a72 100644
--- a/include/trace/events/sched.h
+++ b/include/trace/events/sched.h
@@ -331,6 +331,13 @@ DEFINE_EVENT(sched_stat_template, sched_stat_iowait,
TP_ARGS(tsk, delay));
/*
+ * Tracepoint for accounting blocked time (time the task is in uninterruptible).
+ */
+DEFINE_EVENT(sched_stat_template, sched_stat_blocked,
+ TP_PROTO(struct task_struct *tsk, u64 delay),
+ TP_ARGS(tsk, delay));
+
+/*
* Tracepoint for accounting runtime (time the task is executing
* on a CPU).
*/
@@ -363,6 +370,56 @@ TRACE_EVENT(sched_stat_runtime,
(unsigned long long)__entry->vruntime)
);
+#ifdef CREATE_TRACE_POINTS
+static inline u64 trace_get_sleeptime(struct task_struct *tsk)
+{
+#ifdef CONFIG_SCHEDSTATS
+ u64 block, sleep;
+
+ block = tsk->se.statistics.block_start;
+ sleep = tsk->se.statistics.sleep_start;
+ tsk->se.statistics.block_start = 0;
+ tsk->se.statistics.sleep_start = 0;
+
+ return block ? block : sleep ? sleep : 0;
+#else
+ return 0;
+#endif
+}
+#endif
+
+/*
+ * Tracepoint for accounting sleeptime (time the task is sleeping
+ * or waiting for I/O).
+ */
+TRACE_EVENT(sched_stat_sleeptime,
+
+ TP_PROTO(struct task_struct *tsk, u64 now),
+
+ TP_ARGS(tsk, now),
+
+ TP_STRUCT__entry(
+ __array( char, comm, TASK_COMM_LEN )
+ __field( pid_t, pid )
+ __field( u64, sleeptime )
+ ),
+
+ TP_fast_assign(
+ memcpy(__entry->comm, tsk->comm, TASK_COMM_LEN);
+ __entry->pid = tsk->pid;
+ __entry->sleeptime = trace_get_sleeptime(tsk);
+ __entry->sleeptime = __entry->sleeptime ?
+ now - __entry->sleeptime : 0;
+ )
+ TP_perf_assign(
+ __perf_count(__entry->sleeptime);
+ ),
+
+ TP_printk("comm=%s pid=%d sleeptime=%Lu [ns]",
+ __entry->comm, __entry->pid,
+ (unsigned long long)__entry->sleeptime)
+);
+
/*
* Tracepoint for showing priority inheritance modifying a tasks
* priority.