summaryrefslogtreecommitdiffstats
path: root/kernel
diff options
context:
space:
mode:
Diffstat (limited to 'kernel')
-rw-r--r--kernel/Kconfig.instrumentation49
-rw-r--r--kernel/Kconfig.preempt3
-rw-r--r--kernel/Makefile9
-rw-r--r--kernel/acct.c66
-rw-r--r--kernel/audit.c30
-rw-r--r--kernel/auditfilter.c4
-rw-r--r--kernel/auditsc.c12
-rw-r--r--kernel/capability.c204
-rw-r--r--kernel/cgroup.c2805
-rw-r--r--kernel/cgroup_debug.c97
-rw-r--r--kernel/compat.c117
-rw-r--r--kernel/cpu.c14
-rw-r--r--kernel/cpu_acct.c186
-rw-r--r--kernel/cpuset.c1736
-rw-r--r--kernel/delayacct.c8
-rw-r--r--kernel/die_notifier.c38
-rw-r--r--kernel/dma.c8
-rw-r--r--kernel/exec_domain.c2
-rw-r--r--kernel/exit.c273
-rw-r--r--kernel/fork.c181
-rw-r--r--kernel/futex.c33
-rw-r--r--kernel/futex_compat.c3
-rw-r--r--kernel/hrtimer.c53
-rw-r--r--kernel/irq/chip.c3
-rw-r--r--kernel/irq/manage.c31
-rw-r--r--kernel/itimer.c4
-rw-r--r--kernel/kexec.c285
-rw-r--r--kernel/kprobes.c62
-rw-r--r--kernel/ksysfs.c18
-rw-r--r--kernel/lockdep.c48
-rw-r--r--kernel/lockdep_proc.c61
-rw-r--r--kernel/marker.c525
-rw-r--r--kernel/module.c198
-rw-r--r--kernel/mutex.c35
-rw-r--r--kernel/notifier.c539
-rw-r--r--kernel/ns_cgroup.c100
-rw-r--r--kernel/nsproxy.c78
-rw-r--r--kernel/panic.c10
-rw-r--r--kernel/params.c25
-rw-r--r--kernel/pid.c353
-rw-r--r--kernel/posix-cpu-timers.c12
-rw-r--r--kernel/posix-timers.c30
-rw-r--r--kernel/power/Kconfig11
-rw-r--r--kernel/power/disk.c156
-rw-r--r--kernel/power/main.c48
-rw-r--r--kernel/power/power.h21
-rw-r--r--kernel/power/process.c141
-rw-r--r--kernel/power/snapshot.c53
-rw-r--r--kernel/power/swsusp.c33
-rw-r--r--kernel/power/user.c4
-rw-r--r--kernel/printk.c127
-rw-r--r--kernel/profile.c6
-rw-r--r--kernel/ptrace.c18
-rw-r--r--kernel/rcupdate.c9
-rw-r--r--kernel/rcutorture.c10
-rw-r--r--kernel/relay.c6
-rw-r--r--kernel/resource.c26
-rw-r--r--kernel/rtmutex-debug.c22
-rw-r--r--kernel/rtmutex.c2
-rw-r--r--kernel/sched.c1847
-rw-r--r--kernel/sched_debug.c282
-rw-r--r--kernel/sched_fair.c809
-rw-r--r--kernel/sched_idletask.c8
-rw-r--r--kernel/sched_rt.c19
-rw-r--r--kernel/sched_stats.h36
-rw-r--r--kernel/signal.c91
-rw-r--r--kernel/softirq.c4
-rw-r--r--kernel/softlockup.c54
-rw-r--r--kernel/sys.c587
-rw-r--r--kernel/sys_ni.c4
-rw-r--r--kernel/sysctl.c373
-rw-r--r--kernel/sysctl_check.c1588
-rw-r--r--kernel/taskstats.c68
-rw-r--r--kernel/time.c21
-rw-r--r--kernel/time/Kconfig5
-rw-r--r--kernel/time/Makefile2
-rw-r--r--kernel/time/clockevents.c3
-rw-r--r--kernel/time/clocksource.c22
-rw-r--r--kernel/time/tick-broadcast.c51
-rw-r--r--kernel/time/tick-common.c5
-rw-r--r--kernel/time/tick-sched.c2
-rw-r--r--kernel/time/timekeeping.c12
-rw-r--r--kernel/timer.c16
-rw-r--r--kernel/tsacct.c4
-rw-r--r--kernel/user.c260
-rw-r--r--kernel/workqueue.c38
86 files changed, 11050 insertions, 4202 deletions
diff --git a/kernel/Kconfig.instrumentation b/kernel/Kconfig.instrumentation
new file mode 100644
index 000000000000..f5f2c769d95e
--- /dev/null
+++ b/kernel/Kconfig.instrumentation
@@ -0,0 +1,49 @@
+menuconfig INSTRUMENTATION
+ bool "Instrumentation Support"
+ default y
+ ---help---
+ Say Y here to get to see options related to performance measurement,
+ system-wide debugging, and testing. This option alone does not add any
+ kernel code.
+
+ If you say N, all options in this submenu will be skipped and
+ disabled. If you're trying to debug the kernel itself, go see the
+ Kernel Hacking menu.
+
+if INSTRUMENTATION
+
+config PROFILING
+ bool "Profiling support (EXPERIMENTAL)"
+ help
+ Say Y here to enable the extended profiling support mechanisms used
+ by profilers such as OProfile.
+
+config OPROFILE
+ tristate "OProfile system profiling (EXPERIMENTAL)"
+ depends on PROFILING
+ depends on ALPHA || ARM || BLACKFIN || X86_32 || IA64 || M32R || MIPS || PARISC || PPC || S390 || SUPERH || SPARC || X86_64
+ help
+ OProfile is a profiling system capable of profiling the
+ whole system, include the kernel, kernel modules, libraries,
+ and applications.
+
+ If unsure, say N.
+
+config KPROBES
+ bool "Kprobes"
+ depends on KALLSYMS && MODULES
+ depends on X86_32 || IA64 || PPC || S390 || SPARC64 || X86_64 || AVR32
+ help
+ Kprobes allows you to trap at almost any kernel address and
+ execute a callback function. register_kprobe() establishes
+ a probepoint and specifies the callback. Kprobes is useful
+ for kernel debugging, non-intrusive instrumentation and testing.
+ If in doubt, say "N".
+
+config MARKERS
+ bool "Activate markers"
+ help
+ Place an empty function call at each marker site. Can be
+ dynamically changed for a probe function.
+
+endif # INSTRUMENTATION
diff --git a/kernel/Kconfig.preempt b/kernel/Kconfig.preempt
index 6b066632e40c..c64ce9c14207 100644
--- a/kernel/Kconfig.preempt
+++ b/kernel/Kconfig.preempt
@@ -63,6 +63,3 @@ config PREEMPT_BKL
Say Y here if you are building a kernel for a desktop system.
Say N if you are unsure.
-config PREEMPT_NOTIFIERS
- bool
-
diff --git a/kernel/Makefile b/kernel/Makefile
index 2a999836ca18..05c3e6df8597 100644
--- a/kernel/Makefile
+++ b/kernel/Makefile
@@ -8,8 +8,8 @@ obj-y = sched.o fork.o exec_domain.o panic.o printk.o profile.o \
signal.o sys.o kmod.o workqueue.o pid.o \
rcupdate.o extable.o params.o posix-timers.o \
kthread.o wait.o kfifo.o sys_ni.o posix-cpu-timers.o mutex.o \
- hrtimer.o rwsem.o latency.o nsproxy.o srcu.o die_notifier.o \
- utsname.o
+ hrtimer.o rwsem.o latency.o nsproxy.o srcu.o \
+ utsname.o sysctl_check.o notifier.o
obj-$(CONFIG_STACKTRACE) += stacktrace.o
obj-y += time/
@@ -36,7 +36,11 @@ obj-$(CONFIG_PM) += power/
obj-$(CONFIG_BSD_PROCESS_ACCT) += acct.o
obj-$(CONFIG_KEXEC) += kexec.o
obj-$(CONFIG_COMPAT) += compat.o
+obj-$(CONFIG_CGROUPS) += cgroup.o
+obj-$(CONFIG_CGROUP_DEBUG) += cgroup_debug.o
obj-$(CONFIG_CPUSETS) += cpuset.o
+obj-$(CONFIG_CGROUP_CPUACCT) += cpu_acct.o
+obj-$(CONFIG_CGROUP_NS) += ns_cgroup.o
obj-$(CONFIG_IKCONFIG) += configs.o
obj-$(CONFIG_STOP_MACHINE) += stop_machine.o
obj-$(CONFIG_AUDIT) += audit.o auditfilter.o
@@ -51,6 +55,7 @@ obj-$(CONFIG_RELAY) += relay.o
obj-$(CONFIG_SYSCTL) += utsname_sysctl.o
obj-$(CONFIG_TASK_DELAY_ACCT) += delayacct.o
obj-$(CONFIG_TASKSTATS) += taskstats.o tsacct.o
+obj-$(CONFIG_MARKERS) += marker.o
ifneq ($(CONFIG_SCHED_NO_NO_OMIT_FRAME_POINTER),y)
# According to Alan Modra <alan@linuxcare.com.au>, the -fno-omit-frame-pointer is
diff --git a/kernel/acct.c b/kernel/acct.c
index 24f0f8b2ba72..fce53d8df8a7 100644
--- a/kernel/acct.c
+++ b/kernel/acct.c
@@ -329,16 +329,16 @@ static comp_t encode_comp_t(unsigned long value)
}
/*
- * If we need to round up, do it (and handle overflow correctly).
- */
+ * If we need to round up, do it (and handle overflow correctly).
+ */
if (rnd && (++value > MAXFRACT)) {
value >>= EXPSIZE;
exp++;
}
/*
- * Clean it up and polish it off.
- */
+ * Clean it up and polish it off.
+ */
exp <<= MANTSIZE; /* Shift the exponent into place */
exp += value; /* and add on the mantissa. */
return exp;
@@ -361,30 +361,30 @@ static comp_t encode_comp_t(unsigned long value)
static comp2_t encode_comp2_t(u64 value)
{
- int exp, rnd;
-
- exp = (value > (MAXFRACT2>>1));
- rnd = 0;
- while (value > MAXFRACT2) {
- rnd = value & 1;
- value >>= 1;
- exp++;
- }
-
- /*
- * If we need to round up, do it (and handle overflow correctly).
- */
- if (rnd && (++value > MAXFRACT2)) {
- value >>= 1;
- exp++;
- }
-
- if (exp > MAXEXP2) {
- /* Overflow. Return largest representable number instead. */
- return (1ul << (MANTSIZE2+EXPSIZE2-1)) - 1;
- } else {
- return (value & (MAXFRACT2>>1)) | (exp << (MANTSIZE2-1));
- }
+ int exp, rnd;
+
+ exp = (value > (MAXFRACT2>>1));
+ rnd = 0;
+ while (value > MAXFRACT2) {
+ rnd = value & 1;
+ value >>= 1;
+ exp++;
+ }
+
+ /*
+ * If we need to round up, do it (and handle overflow correctly).
+ */
+ if (rnd && (++value > MAXFRACT2)) {
+ value >>= 1;
+ exp++;
+ }
+
+ if (exp > MAXEXP2) {
+ /* Overflow. Return largest representable number instead. */
+ return (1ul << (MANTSIZE2+EXPSIZE2-1)) - 1;
+ } else {
+ return (value & (MAXFRACT2>>1)) | (exp << (MANTSIZE2-1));
+ }
}
#endif
@@ -501,14 +501,14 @@ static void do_acct_process(struct file *file)
ac.ac_swaps = encode_comp_t(0);
/*
- * Kernel segment override to datasegment and write it
- * to the accounting file.
- */
+ * Kernel segment override to datasegment and write it
+ * to the accounting file.
+ */
fs = get_fs();
set_fs(KERNEL_DS);
/*
- * Accounting records are not subject to resource limits.
- */
+ * Accounting records are not subject to resource limits.
+ */
flim = current->signal->rlim[RLIMIT_FSIZE].rlim_cur;
current->signal->rlim[RLIMIT_FSIZE].rlim_cur = RLIM_INFINITY;
file->f_op->write(file, (char *)&ac,
diff --git a/kernel/audit.c b/kernel/audit.c
index eb0f9165b401..6977ea57a7e2 100644
--- a/kernel/audit.c
+++ b/kernel/audit.c
@@ -664,11 +664,11 @@ static int audit_receive_msg(struct sk_buff *skb, struct nlmsghdr *nlh)
if (sid) {
if (selinux_sid_to_string(
sid, &ctx, &len)) {
- audit_log_format(ab,
+ audit_log_format(ab,
" ssid=%u", sid);
/* Maybe call audit_panic? */
} else
- audit_log_format(ab,
+ audit_log_format(ab,
" subj=%s", ctx);
kfree(ctx);
}
@@ -769,7 +769,7 @@ static int audit_receive_msg(struct sk_buff *skb, struct nlmsghdr *nlh)
sig_data->pid = audit_sig_pid;
memcpy(sig_data->ctx, ctx, len);
kfree(ctx);
- audit_send_reply(NETLINK_CB(skb).pid, seq, AUDIT_SIGNAL_INFO,
+ audit_send_reply(NETLINK_CB(skb).pid, seq, AUDIT_SIGNAL_INFO,
0, 0, sig_data, sizeof(*sig_data) + len);
kfree(sig_data);
break;
@@ -847,18 +847,10 @@ static void audit_receive_skb(struct sk_buff *skb)
}
/* Receive messages from netlink socket. */
-static void audit_receive(struct sock *sk, int length)
+static void audit_receive(struct sk_buff *skb)
{
- struct sk_buff *skb;
- unsigned int qlen;
-
mutex_lock(&audit_cmd_mutex);
-
- for (qlen = skb_queue_len(&sk->sk_receive_queue); qlen; qlen--) {
- skb = skb_dequeue(&sk->sk_receive_queue);
- audit_receive_skb(skb);
- kfree_skb(skb);
- }
+ audit_receive_skb(skb);
mutex_unlock(&audit_cmd_mutex);
}
@@ -876,8 +868,8 @@ static int __init audit_init(void)
printk(KERN_INFO "audit: initializing netlink socket (%s)\n",
audit_default ? "enabled" : "disabled");
- audit_sock = netlink_kernel_create(NETLINK_AUDIT, 0, audit_receive,
- NULL, THIS_MODULE);
+ audit_sock = netlink_kernel_create(&init_net, NETLINK_AUDIT, 0,
+ audit_receive, NULL, THIS_MODULE);
if (!audit_sock)
audit_panic("cannot initialize netlink socket");
else
@@ -1013,7 +1005,7 @@ unsigned int audit_serial(void)
return ret;
}
-static inline void audit_get_stamp(struct audit_context *ctx,
+static inline void audit_get_stamp(struct audit_context *ctx,
struct timespec *t, unsigned int *serial)
{
if (ctx)
@@ -1064,7 +1056,7 @@ struct audit_buffer *audit_log_start(struct audit_context *ctx, gfp_t gfp_mask,
if (gfp_mask & __GFP_WAIT)
reserve = 0;
else
- reserve = 5; /* Allow atomic callers to go up to five
+ reserve = 5; /* Allow atomic callers to go up to five
entries over the normal backlog limit */
while (audit_backlog_limit
@@ -1327,7 +1319,7 @@ void audit_log_d_path(struct audit_buffer *ab, const char *prefix,
if (IS_ERR(p)) { /* Should never happen since we send PATH_MAX */
/* FIXME: can we save some information here? */
audit_log_format(ab, "<too long>");
- } else
+ } else
audit_log_untrustedstring(ab, p);
kfree(path);
}
@@ -1373,7 +1365,7 @@ void audit_log_end(struct audit_buffer *ab)
* audit_log_vformat, and audit_log_end. It may be called
* in any context.
*/
-void audit_log(struct audit_context *ctx, gfp_t gfp_mask, int type,
+void audit_log(struct audit_context *ctx, gfp_t gfp_mask, int type,
const char *fmt, ...)
{
struct audit_buffer *ab;
diff --git a/kernel/auditfilter.c b/kernel/auditfilter.c
index 359645cff5b2..df66a21fb360 100644
--- a/kernel/auditfilter.c
+++ b/kernel/auditfilter.c
@@ -1498,7 +1498,7 @@ int audit_receive_filter(int type, int pid, int uid, int seq, void *data,
* auditctl to read from it... which isn't ever going to
* happen if we're actually running in the context of auditctl
* trying to _send_ the stuff */
-
+
dest = kmalloc(sizeof(struct audit_netlink_list), GFP_KERNEL);
if (!dest)
return -ENOMEM;
@@ -1678,7 +1678,7 @@ int audit_filter_type(int type)
{
struct audit_entry *e;
int result = 0;
-
+
rcu_read_lock();
if (list_empty(&audit_filter_list[AUDIT_FILTER_TYPE]))
goto unlock_and_return;
diff --git a/kernel/auditsc.c b/kernel/auditsc.c
index 04f3ffb8d9d4..e19b5a33aede 100644
--- a/kernel/auditsc.c
+++ b/kernel/auditsc.c
@@ -45,7 +45,6 @@
#include <linux/init.h>
#include <asm/types.h>
#include <asm/atomic.h>
-#include <asm/types.h>
#include <linux/fs.h>
#include <linux/namei.h>
#include <linux/mm.h>
@@ -321,7 +320,7 @@ static int audit_filter_rules(struct task_struct *tsk,
result = audit_comparator(tsk->personality, f->op, f->val);
break;
case AUDIT_ARCH:
- if (ctx)
+ if (ctx)
result = audit_comparator(ctx->arch, f->op, f->val);
break;
@@ -899,7 +898,7 @@ static void audit_log_exit(struct audit_context *context, struct task_struct *ts
if (context->personality != PER_LINUX)
audit_log_format(ab, " per=%lx", context->personality);
if (context->return_valid)
- audit_log_format(ab, " success=%s exit=%ld",
+ audit_log_format(ab, " success=%s exit=%ld",
(context->return_valid==AUDITSC_SUCCESS)?"yes":"no",
context->return_code);
@@ -1136,8 +1135,8 @@ void audit_free(struct task_struct *tsk)
return;
/* Check for system calls that do not go through the exit
- * function (e.g., exit_group), then free context block.
- * We use GFP_ATOMIC here because we might be doing this
+ * function (e.g., exit_group), then free context block.
+ * We use GFP_ATOMIC here because we might be doing this
* in the context of the idle thread */
/* that can happen only if we are called from do_exit() */
if (context->in_syscall && context->auditable)
@@ -1317,7 +1316,7 @@ void __audit_getname(const char *name)
context->pwdmnt = mntget(current->fs->pwdmnt);
read_unlock(&current->fs->lock);
}
-
+
}
/* audit_putname - intercept a putname request
@@ -1525,6 +1524,7 @@ add_names:
context->names[idx].ino = (unsigned long)-1;
}
}
+EXPORT_SYMBOL_GPL(__audit_inode_child);
/**
* auditsc_get_stamp - get local copies of audit_context values
diff --git a/kernel/capability.c b/kernel/capability.c
index c8d3c7762034..efbd9cdce132 100644
--- a/kernel/capability.c
+++ b/kernel/capability.c
@@ -3,23 +3,18 @@
*
* Copyright (C) 1997 Andrew Main <zefram@fysh.org>
*
- * Integrated into 2.1.97+, Andrew G. Morgan <morgan@transmeta.com>
+ * Integrated into 2.1.97+, Andrew G. Morgan <morgan@kernel.org>
* 30 May 2002: Cleanup, Robert M. Love <rml@tech9.net>
- */
+ */
#include <linux/capability.h>
#include <linux/mm.h>
#include <linux/module.h>
#include <linux/security.h>
#include <linux/syscalls.h>
+#include <linux/pid_namespace.h>
#include <asm/uaccess.h>
-unsigned securebits = SECUREBITS_DEFAULT; /* systemwide security settings */
-kernel_cap_t cap_bset = CAP_INIT_EFF_SET;
-
-EXPORT_SYMBOL(securebits);
-EXPORT_SYMBOL(cap_bset);
-
/*
* This lock protects task->cap_* for all tasks including current.
* Locking rule: acquire this prior to tasklist_lock.
@@ -43,49 +38,49 @@ static DEFINE_SPINLOCK(task_capability_lock);
*/
asmlinkage long sys_capget(cap_user_header_t header, cap_user_data_t dataptr)
{
- int ret = 0;
- pid_t pid;
- __u32 version;
- struct task_struct *target;
- struct __user_cap_data_struct data;
-
- if (get_user(version, &header->version))
- return -EFAULT;
-
- if (version != _LINUX_CAPABILITY_VERSION) {
- if (put_user(_LINUX_CAPABILITY_VERSION, &header->version))
- return -EFAULT;
- return -EINVAL;
- }
+ int ret = 0;
+ pid_t pid;
+ __u32 version;
+ struct task_struct *target;
+ struct __user_cap_data_struct data;
+
+ if (get_user(version, &header->version))
+ return -EFAULT;
+
+ if (version != _LINUX_CAPABILITY_VERSION) {
+ if (put_user(_LINUX_CAPABILITY_VERSION, &header->version))
+ return -EFAULT;
+ return -EINVAL;
+ }
- if (get_user(pid, &header->pid))
- return -EFAULT;
+ if (get_user(pid, &header->pid))
+ return -EFAULT;
- if (pid < 0)
- return -EINVAL;
+ if (pid < 0)
+ return -EINVAL;
- spin_lock(&task_capability_lock);
- read_lock(&tasklist_lock);
+ spin_lock(&task_capability_lock);
+ read_lock(&tasklist_lock);
- if (pid && pid != current->pid) {
- target = find_task_by_pid(pid);
- if (!target) {
- ret = -ESRCH;
- goto out;
- }
- } else
- target = current;
+ if (pid && pid != task_pid_vnr(current)) {
+ target = find_task_by_vpid(pid);
+ if (!target) {
+ ret = -ESRCH;
+ goto out;
+ }
+ } else
+ target = current;
- ret = security_capget(target, &data.effective, &data.inheritable, &data.permitted);
+ ret = security_capget(target, &data.effective, &data.inheritable, &data.permitted);
out:
- read_unlock(&tasklist_lock);
- spin_unlock(&task_capability_lock);
+ read_unlock(&tasklist_lock);
+ spin_unlock(&task_capability_lock);
- if (!ret && copy_to_user(dataptr, &data, sizeof data))
- return -EFAULT;
+ if (!ret && copy_to_user(dataptr, &data, sizeof data))
+ return -EFAULT;
- return ret;
+ return ret;
}
/*
@@ -101,7 +96,7 @@ static inline int cap_set_pg(int pgrp_nr, kernel_cap_t *effective,
int found = 0;
struct pid *pgrp;
- pgrp = find_pid(pgrp_nr);
+ pgrp = find_vpid(pgrp_nr);
do_each_pid_task(pgrp, PIDTYPE_PGID, g) {
target = g;
while_each_thread(g, target) {
@@ -118,7 +113,7 @@ static inline int cap_set_pg(int pgrp_nr, kernel_cap_t *effective,
} while_each_pid_task(pgrp, PIDTYPE_PGID, g);
if (!found)
- ret = 0;
+ ret = 0;
return ret;
}
@@ -135,7 +130,7 @@ static inline int cap_set_all(kernel_cap_t *effective,
int found = 0;
do_each_thread(g, target) {
- if (target == current || is_init(target))
+ if (target == current || is_container_init(target->group_leader))
continue;
found = 1;
if (security_capset_check(target, effective, inheritable,
@@ -172,68 +167,68 @@ static inline int cap_set_all(kernel_cap_t *effective,
*/
asmlinkage long sys_capset(cap_user_header_t header, const cap_user_data_t data)
{
- kernel_cap_t inheritable, permitted, effective;
- __u32 version;
- struct task_struct *target;
- int ret;
- pid_t pid;
-
- if (get_user(version, &header->version))
- return -EFAULT;
-
- if (version != _LINUX_CAPABILITY_VERSION) {
- if (put_user(_LINUX_CAPABILITY_VERSION, &header->version))
- return -EFAULT;
- return -EINVAL;
- }
-
- if (get_user(pid, &header->pid))
- return -EFAULT;
-
- if (pid && pid != current->pid && !capable(CAP_SETPCAP))
- return -EPERM;
-
- if (copy_from_user(&effective, &data->effective, sizeof(effective)) ||
- copy_from_user(&inheritable, &data->inheritable, sizeof(inheritable)) ||
- copy_from_user(&permitted, &data->permitted, sizeof(permitted)))
- return -EFAULT;
-
- spin_lock(&task_capability_lock);
- read_lock(&tasklist_lock);
-
- if (pid > 0 && pid != current->pid) {
- target = find_task_by_pid(pid);
- if (!target) {
- ret = -ESRCH;
- goto out;
- }
- } else
- target = current;
-
- ret = 0;
-
- /* having verified that the proposed changes are legal,
- we now put them into effect. */
- if (pid < 0) {
- if (pid == -1) /* all procs other than current and init */
- ret = cap_set_all(&effective, &inheritable, &permitted);
-
- else /* all procs in process group */
- ret = cap_set_pg(-pid, &effective, &inheritable,
- &permitted);
- } else {
- ret = security_capset_check(target, &effective, &inheritable,
- &permitted);
- if (!ret)
- security_capset_set(target, &effective, &inheritable,
- &permitted);
- }
+ kernel_cap_t inheritable, permitted, effective;
+ __u32 version;
+ struct task_struct *target;
+ int ret;
+ pid_t pid;
+
+ if (get_user(version, &header->version))
+ return -EFAULT;
+
+ if (version != _LINUX_CAPABILITY_VERSION) {
+ if (put_user(_LINUX_CAPABILITY_VERSION, &header->version))
+ return -EFAULT;
+ return -EINVAL;
+ }
+
+ if (get_user(pid, &header->pid))
+ return -EFAULT;
+
+ if (pid && pid != task_pid_vnr(current) && !capable(CAP_SETPCAP))
+ return -EPERM;
+
+ if (copy_from_user(&effective, &data->effective, sizeof(effective)) ||
+ copy_from_user(&inheritable, &data->inheritable, sizeof(inheritable)) ||
+ copy_from_user(&permitted, &data->permitted, sizeof(permitted)))
+ return -EFAULT;
+
+ spin_lock(&task_capability_lock);
+ read_lock(&tasklist_lock);
+
+ if (pid > 0 && pid != task_pid_vnr(current)) {
+ target = find_task_by_vpid(pid);
+ if (!target) {
+ ret = -ESRCH;
+ goto out;
+ }
+ } else
+ target = current;
+
+ ret = 0;
+
+ /* having verified that the proposed changes are legal,
+ we now put them into effect. */
+ if (pid < 0) {
+ if (pid == -1) /* all procs other than current and init */
+ ret = cap_set_all(&effective, &inheritable, &permitted);
+
+ else /* all procs in process group */
+ ret = cap_set_pg(-pid, &effective, &inheritable,
+ &permitted);
+ } else {
+ ret = security_capset_check(target, &effective, &inheritable,
+ &permitted);
+ if (!ret)
+ security_capset_set(target, &effective, &inheritable,
+ &permitted);
+ }
out:
- read_unlock(&tasklist_lock);
- spin_unlock(&task_capability_lock);
+ read_unlock(&tasklist_lock);
+ spin_unlock(&task_capability_lock);
- return ret;
+ return ret;
}
int __capable(struct task_struct *t, int cap)
@@ -244,7 +239,6 @@ int __capable(struct task_struct *t, int cap)
}
return 0;
}
-EXPORT_SYMBOL(__capable);
int capable(int cap)
{
diff --git a/kernel/cgroup.c b/kernel/cgroup.c
new file mode 100644
index 000000000000..5987dccdb2a0
--- /dev/null
+++ b/kernel/cgroup.c
@@ -0,0 +1,2805 @@
+/*
+ * kernel/cgroup.c
+ *
+ * Generic process-grouping system.
+ *
+ * Based originally on the cpuset system, extracted by Paul Menage
+ * Copyright (C) 2006 Google, Inc
+ *
+ * Copyright notices from the original cpuset code:
+ * --------------------------------------------------
+ * Copyright (C) 2003 BULL SA.
+ * Copyright (C) 2004-2006 Silicon Graphics, Inc.
+ *
+ * Portions derived from Patrick Mochel's sysfs code.
+ * sysfs is Copyright (c) 2001-3 Patrick Mochel
+ *
+ * 2003-10-10 Written by Simon Derr.
+ * 2003-10-22 Updates by Stephen Hemminger.
+ * 2004 May-July Rework by Paul Jackson.
+ * ---------------------------------------------------
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file COPYING in the main directory of the Linux
+ * distribution for more details.
+ */
+
+#include <linux/cgroup.h>
+#include <linux/errno.h>
+#include <linux/fs.h>
+#include <linux/kernel.h>
+#include <linux/list.h>
+#include <linux/mm.h>
+#include <linux/mutex.h>
+#include <linux/mount.h>
+#include <linux/pagemap.h>
+#include <linux/proc_fs.h>
+#include <linux/rcupdate.h>
+#include <linux/sched.h>
+#include <linux/backing-dev.h>
+#include <linux/seq_file.h>
+#include <linux/slab.h>
+#include <linux/magic.h>
+#include <linux/spinlock.h>
+#include <linux/string.h>
+#include <linux/sort.h>
+#include <linux/kmod.h>
+#include <linux/delayacct.h>
+#include <linux/cgroupstats.h>
+
+#include <asm/atomic.h>
+
+static DEFINE_MUTEX(cgroup_mutex);
+
+/* Generate an array of cgroup subsystem pointers */
+#define SUBSYS(_x) &_x ## _subsys,
+
+static struct cgroup_subsys *subsys[] = {
+#include <linux/cgroup_subsys.h>
+};
+
+/*
+ * A cgroupfs_root represents the root of a cgroup hierarchy,
+ * and may be associated with a superblock to form an active
+ * hierarchy
+ */
+struct cgroupfs_root {
+ struct super_block *sb;
+
+ /*
+ * The bitmask of subsystems intended to be attached to this
+ * hierarchy
+ */
+ unsigned long subsys_bits;
+
+ /* The bitmask of subsystems currently attached to this hierarchy */
+ unsigned long actual_subsys_bits;
+
+ /* A list running through the attached subsystems */
+ struct list_head subsys_list;
+
+ /* The root cgroup for this hierarchy */
+ struct cgroup top_cgroup;
+
+ /* Tracks how many cgroups are currently defined in hierarchy.*/
+ int number_of_cgroups;
+
+ /* A list running through the mounted hierarchies */
+ struct list_head root_list;
+
+ /* Hierarchy-specific flags */
+ unsigned long flags;
+
+ /* The path to use for release notifications. No locking
+ * between setting and use - so if userspace updates this
+ * while child cgroups exist, you could miss a
+ * notification. We ensure that it's always a valid
+ * NUL-terminated string */
+ char release_agent_path[PATH_MAX];
+};
+
+
+/*
+ * The "rootnode" hierarchy is the "dummy hierarchy", reserved for the
+ * subsystems that are otherwise unattached - it never has more than a
+ * single cgroup, and all tasks are part of that cgroup.
+ */
+static struct cgroupfs_root rootnode;
+
+/* The list of hierarchy roots */
+
+static LIST_HEAD(roots);
+static int root_count;
+
+/* dummytop is a shorthand for the dummy hierarchy's top cgroup */
+#define dummytop (&rootnode.top_cgroup)
+
+/* This flag indicates whether tasks in the fork and exit paths should
+ * take callback_mutex and check for fork/exit handlers to call. This
+ * avoids us having to do extra work in the fork/exit path if none of the
+ * subsystems need to be called.
+ */
+static int need_forkexit_callback;
+
+/* bits in struct cgroup flags field */
+enum {
+ /* Control Group is dead */
+ CGRP_REMOVED,
+ /* Control Group has previously had a child cgroup or a task,
+ * but no longer (only if CGRP_NOTIFY_ON_RELEASE is set) */
+ CGRP_RELEASABLE,
+ /* Control Group requires release notifications to userspace */
+ CGRP_NOTIFY_ON_RELEASE,
+};
+
+/* convenient tests for these bits */
+inline int cgroup_is_removed(const struct cgroup *cgrp)
+{
+ return test_bit(CGRP_REMOVED, &cgrp->flags);
+}
+
+/* bits in struct cgroupfs_root flags field */
+enum {
+ ROOT_NOPREFIX, /* mounted subsystems have no named prefix */
+};
+
+inline int cgroup_is_releasable(const struct cgroup *cgrp)
+{
+ const int bits =
+ (1 << CGRP_RELEASABLE) |
+ (1 << CGRP_NOTIFY_ON_RELEASE);
+ return (cgrp->flags & bits) == bits;
+}
+
+inline int notify_on_release(const struct cgroup *cgrp)
+{
+ return test_bit(CGRP_NOTIFY_ON_RELEASE, &cgrp->flags);
+}
+
+/*
+ * for_each_subsys() allows you to iterate on each subsystem attached to
+ * an active hierarchy
+ */
+#define for_each_subsys(_root, _ss) \
+list_for_each_entry(_ss, &_root->subsys_list, sibling)
+
+/* for_each_root() allows you to iterate across the active hierarchies */
+#define for_each_root(_root) \
+list_for_each_entry(_root, &roots, root_list)
+
+/* the list of cgroups eligible for automatic release. Protected by
+ * release_list_lock */
+static LIST_HEAD(release_list);
+static DEFINE_SPINLOCK(release_list_lock);
+static void cgroup_release_agent(struct work_struct *work);
+static DECLARE_WORK(release_agent_work, cgroup_release_agent);
+static void check_for_release(struct cgroup *cgrp);
+
+/* Link structure for associating css_set objects with cgroups */
+struct cg_cgroup_link {
+ /*
+ * List running through cg_cgroup_links associated with a
+ * cgroup, anchored on cgroup->css_sets
+ */
+ struct list_head cgrp_link_list;
+ /*
+ * List running through cg_cgroup_links pointing at a
+ * single css_set object, anchored on css_set->cg_links
+ */
+ struct list_head cg_link_list;
+ struct css_set *cg;
+};
+
+/* The default css_set - used by init and its children prior to any
+ * hierarchies being mounted. It contains a pointer to the root state
+ * for each subsystem. Also used to anchor the list of css_sets. Not
+ * reference-counted, to improve performance when child cgroups
+ * haven't been created.
+ */
+
+static struct css_set init_css_set;
+static struct cg_cgroup_link init_css_set_link;
+
+/* css_set_lock protects the list of css_set objects, and the
+ * chain of tasks off each css_set. Nests outside task->alloc_lock
+ * due to cgroup_iter_start() */
+static DEFINE_RWLOCK(css_set_lock);
+static int css_set_count;
+
+/* We don't maintain the lists running through each css_set to its
+ * task until after the first call to cgroup_iter_start(). This
+ * reduces the fork()/exit() overhead for people who have cgroups
+ * compiled into their kernel but not actually in use */
+static int use_task_css_set_links;
+
+/* When we create or destroy a css_set, the operation simply
+ * takes/releases a reference count on all the cgroups referenced
+ * by subsystems in this css_set. This can end up multiple-counting
+ * some cgroups, but that's OK - the ref-count is just a
+ * busy/not-busy indicator; ensuring that we only count each cgroup
+ * once would require taking a global lock to ensure that no
+ * subsystems moved between hierarchies while we were doing so.
+ *
+ * Possible TODO: decide at boot time based on the number of
+ * registered subsystems and the number of CPUs or NUMA nodes whether
+ * it's better for performance to ref-count every subsystem, or to
+ * take a global lock and only add one ref count to each hierarchy.
+ */
+
+/*
+ * unlink a css_set from the list and free it
+ */
+static void unlink_css_set(struct css_set *cg)
+{
+ write_lock(&css_set_lock);
+ list_del(&cg->list);
+ css_set_count--;
+ while (!list_empty(&cg->cg_links)) {
+ struct cg_cgroup_link *link;
+ link = list_entry(cg->cg_links.next,
+ struct cg_cgroup_link, cg_link_list);
+ list_del(&link->cg_link_list);
+ list_del(&link->cgrp_link_list);
+ kfree(link);
+ }
+ write_unlock(&css_set_lock);
+}
+
+static void __release_css_set(struct kref *k, int taskexit)
+{
+ int i;
+ struct css_set *cg = container_of(k, struct css_set, ref);
+
+ unlink_css_set(cg);
+
+ rcu_read_lock();
+ for (i = 0; i < CGROUP_SUBSYS_COUNT; i++) {
+ struct cgroup *cgrp = cg->subsys[i]->cgroup;
+ if (atomic_dec_and_test(&cgrp->count) &&
+ notify_on_release(cgrp)) {
+ if (taskexit)
+ set_bit(CGRP_RELEASABLE, &cgrp->flags);
+ check_for_release(cgrp);
+ }
+ }
+ rcu_read_unlock();
+ kfree(cg);
+}
+
+static void release_css_set(struct kref *k)
+{
+ __release_css_set(k, 0);
+}
+
+static void release_css_set_taskexit(struct kref *k)
+{
+ __release_css_set(k, 1);
+}
+
+/*
+ * refcounted get/put for css_set objects
+ */
+static inline void get_css_set(struct css_set *cg)
+{
+ kref_get(&cg->ref);
+}
+
+static inline void put_css_set(struct css_set *cg)
+{
+ kref_put(&cg->ref, release_css_set);
+}
+
+static inline void put_css_set_taskexit(struct css_set *cg)
+{
+ kref_put(&cg->ref, release_css_set_taskexit);
+}
+
+/*
+ * find_existing_css_set() is a helper for
+ * find_css_set(), and checks to see whether an existing
+ * css_set is suitable. This currently walks a linked-list for
+ * simplicity; a later patch will use a hash table for better
+ * performance
+ *
+ * oldcg: the cgroup group that we're using before the cgroup
+ * transition
+ *
+ * cgrp: the cgroup that we're moving into
+ *
+ * template: location in which to build the desired set of subsystem
+ * state objects for the new cgroup group
+ */
+
+static struct css_set *find_existing_css_set(
+ struct css_set *oldcg,
+ struct cgroup *cgrp,
+ struct cgroup_subsys_state *template[])
+{
+ int i;
+ struct cgroupfs_root *root = cgrp->root;
+ struct list_head *l = &init_css_set.list;
+
+ /* Built the set of subsystem state objects that we want to
+ * see in the new css_set */
+ for (i = 0; i < CGROUP_SUBSYS_COUNT; i++) {
+ if (root->subsys_bits & (1ull << i)) {
+ /* Subsystem is in this hierarchy. So we want
+ * the subsystem state from the new
+ * cgroup */
+ template[i] = cgrp->subsys[i];
+ } else {
+ /* Subsystem is not in this hierarchy, so we
+ * don't want to change the subsystem state */
+ template[i] = oldcg->subsys[i];
+ }
+ }
+
+ /* Look through existing cgroup groups to find one to reuse */
+ do {
+ struct css_set *cg =
+ list_entry(l, struct css_set, list);
+
+ if (!memcmp(template, cg->subsys, sizeof(cg->subsys))) {
+ /* All subsystems matched */
+ return cg;
+ }
+ /* Try the next cgroup group */
+ l = l->next;
+ } while (l != &init_css_set.list);
+
+ /* No existing cgroup group matched */
+ return NULL;
+}
+
+/*
+ * allocate_cg_links() allocates "count" cg_cgroup_link structures
+ * and chains them on tmp through their cgrp_link_list fields. Returns 0 on
+ * success or a negative error
+ */
+
+static int allocate_cg_links(int count, struct list_head *tmp)
+{
+ struct cg_cgroup_link *link;
+ int i;
+ INIT_LIST_HEAD(tmp);
+ for (i = 0; i < count; i++) {
+ link = kmalloc(sizeof(*link), GFP_KERNEL);
+ if (!link) {
+ while (!list_empty(tmp)) {
+ link = list_entry(tmp->next,
+ struct cg_cgroup_link,
+ cgrp_link_list);
+ list_del(&link->cgrp_link_list);
+ kfree(link);
+ }
+ return -ENOMEM;
+ }
+ list_add(&link->cgrp_link_list, tmp);
+ }
+ return 0;
+}
+
+static void free_cg_links(struct list_head *tmp)
+{
+ while (!list_empty(tmp)) {
+ struct cg_cgroup_link *link;
+ link = list_entry(tmp->next,
+ struct cg_cgroup_link,
+ cgrp_link_list);
+ list_del(&link->cgrp_link_list);
+ kfree(link);
+ }
+}
+
+/*
+ * find_css_set() takes an existing cgroup group and a
+ * cgroup object, and returns a css_set object that's
+ * equivalent to the old group, but with the given cgroup
+ * substituted into the appropriate hierarchy. Must be called with
+ * cgroup_mutex held
+ */
+
+static struct css_set *find_css_set(
+ struct css_set *oldcg, struct cgroup *cgrp)
+{
+ struct css_set *res;
+ struct cgroup_subsys_state *template[CGROUP_SUBSYS_COUNT];
+ int i;
+
+ struct list_head tmp_cg_links;
+ struct cg_cgroup_link *link;
+
+ /* First see if we already have a cgroup group that matches
+ * the desired set */
+ write_lock(&css_set_lock);
+ res = find_existing_css_set(oldcg, cgrp, template);
+ if (res)
+ get_css_set(res);
+ write_unlock(&css_set_lock);
+
+ if (res)
+ return res;
+
+ res = kmalloc(sizeof(*res), GFP_KERNEL);
+ if (!res)
+ return NULL;
+
+ /* Allocate all the cg_cgroup_link objects that we'll need */
+ if (allocate_cg_links(root_count, &tmp_cg_links) < 0) {
+ kfree(res);
+ return NULL;
+ }
+
+ kref_init(&res->ref);
+ INIT_LIST_HEAD(&res->cg_links);
+ INIT_LIST_HEAD(&res->tasks);
+
+ /* Copy the set of subsystem state objects generated in
+ * find_existing_css_set() */
+ memcpy(res->subsys, template, sizeof(res->subsys));
+
+ write_lock(&css_set_lock);
+ /* Add reference counts and links from the new css_set. */
+ for (i = 0; i < CGROUP_SUBSYS_COUNT; i++) {
+ struct cgroup *cgrp = res->subsys[i]->cgroup;
+ struct cgroup_subsys *ss = subsys[i];
+ atomic_inc(&cgrp->count);
+ /*
+ * We want to add a link once per cgroup, so we
+ * only do it for the first subsystem in each
+ * hierarchy
+ */
+ if (ss->root->subsys_list.next == &ss->sibling) {
+ BUG_ON(list_empty(&tmp_cg_links));
+ link = list_entry(tmp_cg_links.next,
+ struct cg_cgroup_link,
+ cgrp_link_list);
+ list_del(&link->cgrp_link_list);
+ list_add(&link->cgrp_link_list, &cgrp->css_sets);
+ link->cg = res;
+ list_add(&link->cg_link_list, &res->cg_links);
+ }
+ }
+ if (list_empty(&rootnode.subsys_list)) {
+ link = list_entry(tmp_cg_links.next,
+ struct cg_cgroup_link,
+ cgrp_link_list);
+ list_del(&link->cgrp_link_list);
+ list_add(&link->cgrp_link_list, &dummytop->css_sets);
+ link->cg = res;
+ list_add(&link->cg_link_list, &res->cg_links);
+ }
+
+ BUG_ON(!list_empty(&tmp_cg_links));
+
+ /* Link this cgroup group into the list */
+ list_add(&res->list, &init_css_set.list);
+ css_set_count++;
+ INIT_LIST_HEAD(&res->tasks);
+ write_unlock(&css_set_lock);
+
+ return res;
+}
+
+/*
+ * There is one global cgroup mutex. We also require taking
+ * task_lock() when dereferencing a task's cgroup subsys pointers.
+ * See "The task_lock() exception", at the end of this comment.
+ *
+ * A task must hold cgroup_mutex to modify cgroups.
+ *
+ * Any task can increment and decrement the count field without lock.
+ * So in general, code holding cgroup_mutex can't rely on the count
+ * field not changing. However, if the count goes to zero, then only
+ * attach_task() can increment it again. Because a count of zero
+ * means that no tasks are currently attached, therefore there is no
+ * way a task attached to that cgroup can fork (the other way to
+ * increment the count). So code holding cgroup_mutex can safely
+ * assume that if the count is zero, it will stay zero. Similarly, if
+ * a task holds cgroup_mutex on a cgroup with zero count, it
+ * knows that the cgroup won't be removed, as cgroup_rmdir()
+ * needs that mutex.
+ *
+ * The cgroup_common_file_write handler for operations that modify
+ * the cgroup hierarchy holds cgroup_mutex across the entire operation,
+ * single threading all such cgroup modifications across the system.
+ *
+ * The fork and exit callbacks cgroup_fork() and cgroup_exit(), don't
+ * (usually) take cgroup_mutex. These are the two most performance
+ * critical pieces of code here. The exception occurs on cgroup_exit(),
+ * when a task in a notify_on_release cgroup exits. Then cgroup_mutex
+ * is taken, and if the cgroup count is zero, a usermode call made
+ * to /sbin/cgroup_release_agent with the name of the cgroup (path
+ * relative to the root of cgroup file system) as the argument.
+ *
+ * A cgroup can only be deleted if both its 'count' of using tasks
+ * is zero, and its list of 'children' cgroups is empty. Since all
+ * tasks in the system use _some_ cgroup, and since there is always at
+ * least one task in the system (init, pid == 1), therefore, top_cgroup
+ * always has either children cgroups and/or using tasks. So we don't
+ * need a special hack to ensure that top_cgroup cannot be deleted.
+ *
+ * The task_lock() exception
+ *
+ * The need for this exception arises from the action of
+ * attach_task(), which overwrites one tasks cgroup pointer with
+ * another. It does so using cgroup_mutexe, however there are
+ * several performance critical places that need to reference
+ * task->cgroup without the expense of grabbing a system global
+ * mutex. Therefore except as noted below, when dereferencing or, as
+ * in attach_task(), modifying a task'ss cgroup pointer we use
+ * task_lock(), which acts on a spinlock (task->alloc_lock) already in
+ * the task_struct routinely used for such matters.
+ *
+ * P.S. One more locking exception. RCU is used to guard the
+ * update of a tasks cgroup pointer by attach_task()
+ */
+
+/**
+ * cgroup_lock - lock out any changes to cgroup structures
+ *
+ */
+
+void cgroup_lock(void)
+{
+ mutex_lock(&cgroup_mutex);
+}
+
+/**
+ * cgroup_unlock - release lock on cgroup changes
+ *
+ * Undo the lock taken in a previous cgroup_lock() call.
+ */
+
+void cgroup_unlock(void)
+{
+ mutex_unlock(&cgroup_mutex);
+}
+
+/*
+ * A couple of forward declarations required, due to cyclic reference loop:
+ * cgroup_mkdir -> cgroup_create -> cgroup_populate_dir ->
+ * cgroup_add_file -> cgroup_create_file -> cgroup_dir_inode_operations
+ * -> cgroup_mkdir.
+ */
+
+static int cgroup_mkdir(struct inode *dir, struct dentry *dentry, int mode);
+static int cgroup_rmdir(struct inode *unused_dir, struct dentry *dentry);
+static int cgroup_populate_dir(struct cgroup *cgrp);
+static struct inode_operations cgroup_dir_inode_operations;
+static struct file_operations proc_cgroupstats_operations;
+
+static struct backing_dev_info cgroup_backing_dev_info = {
+ .capabilities = BDI_CAP_NO_ACCT_DIRTY | BDI_CAP_NO_WRITEBACK,
+};
+
+static struct inode *cgroup_new_inode(mode_t mode, struct super_block *sb)
+{
+ struct inode *inode = new_inode(sb);
+
+ if (inode) {
+ inode->i_mode = mode;
+ inode->i_uid = current->fsuid;
+ inode->i_gid = current->fsgid;
+ inode->i_blocks = 0;
+ inode->i_atime = inode->i_mtime = inode->i_ctime = CURRENT_TIME;
+ inode->i_mapping->backing_dev_info = &cgroup_backing_dev_info;
+ }
+ return inode;
+}
+
+static void cgroup_diput(struct dentry *dentry, struct inode *inode)
+{
+ /* is dentry a directory ? if so, kfree() associated cgroup */
+ if (S_ISDIR(inode->i_mode)) {
+ struct cgroup *cgrp = dentry->d_fsdata;
+ BUG_ON(!(cgroup_is_removed(cgrp)));
+ /* It's possible for external users to be holding css
+ * reference counts on a cgroup; css_put() needs to
+ * be able to access the cgroup after decrementing
+ * the reference count in order to know if it needs to
+ * queue the cgroup to be handled by the release
+ * agent */
+ synchronize_rcu();
+ kfree(cgrp);
+ }
+ iput(inode);
+}
+
+static void remove_dir(struct dentry *d)
+{
+ struct dentry *parent = dget(d->d_parent);
+
+ d_delete(d);
+ simple_rmdir(parent->d_inode, d);
+ dput(parent);
+}
+
+static void cgroup_clear_directory(struct dentry *dentry)
+{
+ struct list_head *node;
+
+ BUG_ON(!mutex_is_locked(&dentry->d_inode->i_mutex));
+ spin_lock(&dcache_lock);
+ node = dentry->d_subdirs.next;
+ while (node != &dentry->d_subdirs) {
+ struct dentry *d = list_entry(node, struct dentry, d_u.d_child);
+ list_del_init(node);
+ if (d->d_inode) {
+ /* This should never be called on a cgroup
+ * directory with child cgroups */
+ BUG_ON(d->d_inode->i_mode & S_IFDIR);
+ d = dget_locked(d);
+ spin_unlock(&dcache_lock);
+ d_delete(d);
+ simple_unlink(dentry->d_inode, d);
+ dput(d);
+ spin_lock(&dcache_lock);
+ }
+ node = dentry->d_subdirs.next;
+ }
+ spin_unlock(&dcache_lock);
+}
+
+/*
+ * NOTE : the dentry must have been dget()'ed
+ */
+static void cgroup_d_remove_dir(struct dentry *dentry)
+{
+ cgroup_clear_directory(dentry);
+
+ spin_lock(&dcache_lock);
+ list_del_init(&dentry->d_u.d_child);
+ spin_unlock(&dcache_lock);
+ remove_dir(dentry);
+}
+
+static int rebind_subsystems(struct cgroupfs_root *root,
+ unsigned long final_bits)
+{
+ unsigned long added_bits, removed_bits;
+ struct cgroup *cgrp = &root->top_cgroup;
+ int i;
+
+ removed_bits = root->actual_subsys_bits & ~final_bits;
+ added_bits = final_bits & ~root->actual_subsys_bits;
+ /* Check that any added subsystems are currently free */
+ for (i = 0; i < CGROUP_SUBSYS_COUNT; i++) {
+ unsigned long long bit = 1ull << i;
+ struct cgroup_subsys *ss = subsys[i];
+ if (!(bit & added_bits))
+ continue;
+ if (ss->root != &rootnode) {
+ /* Subsystem isn't free */
+ return -EBUSY;
+ }
+ }
+
+ /* Currently we don't handle adding/removing subsystems when
+ * any child cgroups exist. This is theoretically supportable
+ * but involves complex error handling, so it's being left until
+ * later */
+ if (!list_empty(&cgrp->children))
+ return -EBUSY;
+
+ /* Process each subsystem */
+ for (i = 0; i < CGROUP_SUBSYS_COUNT; i++) {
+ struct cgroup_subsys *ss = subsys[i];
+ unsigned long bit = 1UL << i;
+ if (bit & added_bits) {
+ /* We're binding this subsystem to this hierarchy */
+ BUG_ON(cgrp->subsys[i]);
+ BUG_ON(!dummytop->subsys[i]);
+ BUG_ON(dummytop->subsys[i]->cgroup != dummytop);
+ cgrp->subsys[i] = dummytop->subsys[i];
+ cgrp->subsys[i]->cgroup = cgrp;
+ list_add(&ss->sibling, &root->subsys_list);
+ rcu_assign_pointer(ss->root, root);
+ if (ss->bind)
+ ss->bind(ss, cgrp);
+
+ } else if (bit & removed_bits) {
+ /* We're removing this subsystem */
+ BUG_ON(cgrp->subsys[i] != dummytop->subsys[i]);
+ BUG_ON(cgrp->subsys[i]->cgroup != cgrp);
+ if (ss->bind)
+ ss->bind(ss, dummytop);
+ dummytop->subsys[i]->cgroup = dummytop;
+ cgrp->subsys[i] = NULL;
+ rcu_assign_pointer(subsys[i]->root, &rootnode);
+ list_del(&ss->sibling);
+ } else if (bit & final_bits) {
+ /* Subsystem state should already exist */
+ BUG_ON(!cgrp->subsys[i]);
+ } else {
+ /* Subsystem state shouldn't exist */
+ BUG_ON(cgrp->subsys[i]);
+ }
+ }
+ root->subsys_bits = root->actual_subsys_bits = final_bits;
+ synchronize_rcu();
+
+ return 0;
+}
+
+static int cgroup_show_options(struct seq_file *seq, struct vfsmount *vfs)
+{
+ struct cgroupfs_root *root = vfs->mnt_sb->s_fs_info;
+ struct cgroup_subsys *ss;
+
+ mutex_lock(&cgroup_mutex);
+ for_each_subsys(root, ss)
+ seq_printf(seq, ",%s", ss->name);
+ if (test_bit(ROOT_NOPREFIX, &root->flags))
+ seq_puts(seq, ",noprefix");
+ if (strlen(root->release_agent_path))
+ seq_printf(seq, ",release_agent=%s", root->release_agent_path);
+ mutex_unlock(&cgroup_mutex);
+ return 0;
+}
+
+struct cgroup_sb_opts {
+ unsigned long subsys_bits;
+ unsigned long flags;
+ char *release_agent;
+};
+
+/* Convert a hierarchy specifier into a bitmask of subsystems and
+ * flags. */
+static int parse_cgroupfs_options(char *data,
+ struct cgroup_sb_opts *opts)
+{
+ char *token, *o = data ?: "all";
+
+ opts->subsys_bits = 0;
+ opts->flags = 0;
+ opts->release_agent = NULL;
+
+ while ((token = strsep(&o, ",")) != NULL) {
+ if (!*token)
+ return -EINVAL;
+ if (!strcmp(token, "all")) {
+ opts->subsys_bits = (1 << CGROUP_SUBSYS_COUNT) - 1;
+ } else if (!strcmp(token, "noprefix")) {
+ set_bit(ROOT_NOPREFIX, &opts->flags);
+ } else if (!strncmp(token, "release_agent=", 14)) {
+ /* Specifying two release agents is forbidden */
+ if (opts->release_agent)
+ return -EINVAL;
+ opts->release_agent = kzalloc(PATH_MAX, GFP_KERNEL);
+ if (!opts->release_agent)
+ return -ENOMEM;
+ strncpy(opts->release_agent, token + 14, PATH_MAX - 1);
+ opts->release_agent[PATH_MAX - 1] = 0;
+ } else {
+ struct cgroup_subsys *ss;
+ int i;
+ for (i = 0; i < CGROUP_SUBSYS_COUNT; i++) {
+ ss = subsys[i];
+ if (!strcmp(token, ss->name)) {
+ set_bit(i, &opts->subsys_bits);
+ break;
+ }
+ }
+ if (i == CGROUP_SUBSYS_COUNT)
+ return -ENOENT;
+ }
+ }
+
+ /* We can't have an empty hierarchy */
+ if (!opts->subsys_bits)
+ return -EINVAL;
+
+ return 0;
+}
+
+static int cgroup_remount(struct super_block *sb, int *flags, char *data)
+{
+ int ret = 0;
+ struct cgroupfs_root *root = sb->s_fs_info;
+ struct cgroup *cgrp = &root->top_cgroup;
+ struct cgroup_sb_opts opts;
+
+ mutex_lock(&cgrp->dentry->d_inode->i_mutex);
+ mutex_lock(&cgroup_mutex);
+
+ /* See what subsystems are wanted */
+ ret = parse_cgroupfs_options(data, &opts);
+ if (ret)
+ goto out_unlock;
+
+ /* Don't allow flags to change at remount */
+ if (opts.flags != root->flags) {
+ ret = -EINVAL;
+ goto out_unlock;
+ }
+
+ ret = rebind_subsystems(root, opts.subsys_bits);
+
+ /* (re)populate subsystem files */
+ if (!ret)
+ cgroup_populate_dir(cgrp);
+
+ if (opts.release_agent)
+ strcpy(root->release_agent_path, opts.release_agent);
+ out_unlock:
+ if (opts.release_agent)
+ kfree(opts.release_agent);
+ mutex_unlock(&cgroup_mutex);
+ mutex_unlock(&cgrp->dentry->d_inode->i_mutex);
+ return ret;
+}
+
+static struct super_operations cgroup_ops = {
+ .statfs = simple_statfs,
+ .drop_inode = generic_delete_inode,
+ .show_options = cgroup_show_options,
+ .remount_fs = cgroup_remount,
+};
+
+static void init_cgroup_root(struct cgroupfs_root *root)
+{
+ struct cgroup *cgrp = &root->top_cgroup;
+ INIT_LIST_HEAD(&root->subsys_list);
+ INIT_LIST_HEAD(&root->root_list);
+ root->number_of_cgroups = 1;
+ cgrp->root = root;
+ cgrp->top_cgroup = cgrp;
+ INIT_LIST_HEAD(&cgrp->sibling);
+ INIT_LIST_HEAD(&cgrp->children);
+ INIT_LIST_HEAD(&cgrp->css_sets);
+ INIT_LIST_HEAD(&cgrp->release_list);
+}
+
+static int cgroup_test_super(struct super_block *sb, void *data)
+{
+ struct cgroupfs_root *new = data;
+ struct cgroupfs_root *root = sb->s_fs_info;
+
+ /* First check subsystems */
+ if (new->subsys_bits != root->subsys_bits)
+ return 0;
+
+ /* Next check flags */
+ if (new->flags != root->flags)
+ return 0;
+
+ return 1;
+}
+
+static int cgroup_set_super(struct super_block *sb, void *data)
+{
+ int ret;
+ struct cgroupfs_root *root = data;
+
+ ret = set_anon_super(sb, NULL);
+ if (ret)
+ return ret;
+
+ sb->s_fs_info = root;
+ root->sb = sb;
+
+ sb->s_blocksize = PAGE_CACHE_SIZE;
+ sb->s_blocksize_bits = PAGE_CACHE_SHIFT;
+ sb->s_magic = CGROUP_SUPER_MAGIC;
+ sb->s_op = &cgroup_ops;
+
+ return 0;
+}
+
+static int cgroup_get_rootdir(struct super_block *sb)
+{
+ struct inode *inode =
+ cgroup_new_inode(S_IFDIR | S_IRUGO | S_IXUGO | S_IWUSR, sb);
+ struct dentry *dentry;
+
+ if (!inode)
+ return -ENOMEM;
+
+ inode->i_op = &simple_dir_inode_operations;
+ inode->i_fop = &simple_dir_operations;
+ inode->i_op = &cgroup_dir_inode_operations;
+ /* directories start off with i_nlink == 2 (for "." entry) */
+ inc_nlink(inode);
+ dentry = d_alloc_root(inode);
+ if (!dentry) {
+ iput(inode);
+ return -ENOMEM;
+ }
+ sb->s_root = dentry;
+ return 0;
+}
+
+static int cgroup_get_sb(struct file_system_type *fs_type,
+ int flags, const char *unused_dev_name,
+ void *data, struct vfsmount *mnt)
+{
+ struct cgroup_sb_opts opts;
+ int ret = 0;
+ struct super_block *sb;
+ struct cgroupfs_root *root;
+ struct list_head tmp_cg_links, *l;
+ INIT_LIST_HEAD(&tmp_cg_links);
+
+ /* First find the desired set of subsystems */
+ ret = parse_cgroupfs_options(data, &opts);
+ if (ret) {
+ if (opts.release_agent)
+ kfree(opts.release_agent);
+ return ret;
+ }
+
+ root = kzalloc(sizeof(*root), GFP_KERNEL);
+ if (!root)
+ return -ENOMEM;
+
+ init_cgroup_root(root);
+ root->subsys_bits = opts.subsys_bits;
+ root->flags = opts.flags;
+ if (opts.release_agent) {
+ strcpy(root->release_agent_path, opts.release_agent);
+ kfree(opts.release_agent);
+ }
+
+ sb = sget(fs_type, cgroup_test_super, cgroup_set_super, root);
+
+ if (IS_ERR(sb)) {
+ kfree(root);
+ return PTR_ERR(sb);
+ }
+
+ if (sb->s_fs_info != root) {
+ /* Reusing an existing superblock */
+ BUG_ON(sb->s_root == NULL);
+ kfree(root);
+ root = NULL;
+ } else {
+ /* New superblock */
+ struct cgroup *cgrp = &root->top_cgroup;
+ struct inode *inode;
+
+ BUG_ON(sb->s_root != NULL);
+
+ ret = cgroup_get_rootdir(sb);
+ if (ret)
+ goto drop_new_super;
+ inode = sb->s_root->d_inode;
+
+ mutex_lock(&inode->i_mutex);
+ mutex_lock(&cgroup_mutex);
+
+ /*
+ * We're accessing css_set_count without locking
+ * css_set_lock here, but that's OK - it can only be
+ * increased by someone holding cgroup_lock, and
+ * that's us. The worst that can happen is that we
+ * have some link structures left over
+ */
+ ret = allocate_cg_links(css_set_count, &tmp_cg_links);
+ if (ret) {
+ mutex_unlock(&cgroup_mutex);
+ mutex_unlock(&inode->i_mutex);
+ goto drop_new_super;
+ }
+
+ ret = rebind_subsystems(root, root->subsys_bits);
+ if (ret == -EBUSY) {
+ mutex_unlock(&cgroup_mutex);
+ mutex_unlock(&inode->i_mutex);
+ goto drop_new_super;
+ }
+
+ /* EBUSY should be the only error here */
+ BUG_ON(ret);
+
+ list_add(&root->root_list, &roots);
+ root_count++;
+
+ sb->s_root->d_fsdata = &root->top_cgroup;
+ root->top_cgroup.dentry = sb->s_root;
+
+ /* Link the top cgroup in this hierarchy into all
+ * the css_set objects */
+ write_lock(&css_set_lock);
+ l = &init_css_set.list;
+ do {
+ struct css_set *cg;
+ struct cg_cgroup_link *link;
+ cg = list_entry(l, struct css_set, list);
+ BUG_ON(list_empty(&tmp_cg_links));
+ link = list_entry(tmp_cg_links.next,
+ struct cg_cgroup_link,
+ cgrp_link_list);
+ list_del(&link->cgrp_link_list);
+ link->cg = cg;
+ list_add(&link->cgrp_link_list,
+ &root->top_cgroup.css_sets);
+ list_add(&link->cg_link_list, &cg->cg_links);
+ l = l->next;
+ } while (l != &init_css_set.list);
+ write_unlock(&css_set_lock);
+
+ free_cg_links(&tmp_cg_links);
+
+ BUG_ON(!list_empty(&cgrp->sibling));
+ BUG_ON(!list_empty(&cgrp->children));
+ BUG_ON(root->number_of_cgroups != 1);
+
+ cgroup_populate_dir(cgrp);
+ mutex_unlock(&inode->i_mutex);
+ mutex_unlock(&cgroup_mutex);
+ }
+
+ return simple_set_mnt(mnt, sb);
+
+ drop_new_super:
+ up_write(&sb->s_umount);
+ deactivate_super(sb);
+ free_cg_links(&tmp_cg_links);
+ return ret;
+}
+
+static void cgroup_kill_sb(struct super_block *sb) {
+ struct cgroupfs_root *root = sb->s_fs_info;
+ struct cgroup *cgrp = &root->top_cgroup;
+ int ret;
+
+ BUG_ON(!root);
+
+ BUG_ON(root->number_of_cgroups != 1);
+ BUG_ON(!list_empty(&cgrp->children));
+ BUG_ON(!list_empty(&cgrp->sibling));
+
+ mutex_lock(&cgroup_mutex);
+
+ /* Rebind all subsystems back to the default hierarchy */
+ ret = rebind_subsystems(root, 0);
+ /* Shouldn't be able to fail ... */
+ BUG_ON(ret);
+
+ /*
+ * Release all the links from css_sets to this hierarchy's
+ * root cgroup
+ */
+ write_lock(&css_set_lock);
+ while (!list_empty(&cgrp->css_sets)) {
+ struct cg_cgroup_link *link;
+ link = list_entry(cgrp->css_sets.next,
+ struct cg_cgroup_link, cgrp_link_list);
+ list_del(&link->cg_link_list);
+ list_del(&link->cgrp_link_list);
+ kfree(link);
+ }
+ write_unlock(&css_set_lock);
+
+ if (!list_empty(&root->root_list)) {
+ list_del(&root->root_list);
+ root_count--;
+ }
+ mutex_unlock(&cgroup_mutex);
+
+ kfree(root);
+ kill_litter_super(sb);
+}
+
+static struct file_system_type cgroup_fs_type = {
+ .name = "cgroup",
+ .get_sb = cgroup_get_sb,
+ .kill_sb = cgroup_kill_sb,
+};
+
+static inline struct cgroup *__d_cgrp(struct dentry *dentry)
+{
+ return dentry->d_fsdata;
+}
+
+static inline struct cftype *__d_cft(struct dentry *dentry)
+{
+ return dentry->d_fsdata;
+}
+
+/*
+ * Called with cgroup_mutex held. Writes path of cgroup into buf.
+ * Returns 0 on success, -errno on error.
+ */
+int cgroup_path(const struct cgroup *cgrp, char *buf, int buflen)
+{
+ char *start;
+
+ if (cgrp == dummytop) {
+ /*
+ * Inactive subsystems have no dentry for their root
+ * cgroup
+ */
+ strcpy(buf, "/");
+ return 0;
+ }
+
+ start = buf + buflen;
+
+ *--start = '\0';
+ for (;;) {
+ int len = cgrp->dentry->d_name.len;
+ if ((start -= len) < buf)
+ return -ENAMETOOLONG;
+ memcpy(start, cgrp->dentry->d_name.name, len);
+ cgrp = cgrp->parent;
+ if (!cgrp)
+ break;
+ if (!cgrp->parent)
+ continue;
+ if (--start < buf)
+ return -ENAMETOOLONG;
+ *start = '/';
+ }
+ memmove(buf, start, buf + buflen - start);
+ return 0;
+}
+
+/*
+ * Return the first subsystem attached to a cgroup's hierarchy, and
+ * its subsystem id.
+ */
+
+static void get_first_subsys(const struct cgroup *cgrp,
+ struct cgroup_subsys_state **css, int *subsys_id)
+{
+ const struct cgroupfs_root *root = cgrp->root;
+ const struct cgroup_subsys *test_ss;
+ BUG_ON(list_empty(&root->subsys_list));
+ test_ss = list_entry(root->subsys_list.next,
+ struct cgroup_subsys, sibling);
+ if (css) {
+ *css = cgrp->subsys[test_ss->subsys_id];
+ BUG_ON(!*css);
+ }
+ if (subsys_id)
+ *subsys_id = test_ss->subsys_id;
+}
+
+/*
+ * Attach task 'tsk' to cgroup 'cgrp'
+ *
+ * Call holding cgroup_mutex. May take task_lock of
+ * the task 'pid' during call.
+ */
+static int attach_task(struct cgroup *cgrp, struct task_struct *tsk)
+{
+ int retval = 0;
+ struct cgroup_subsys *ss;
+ struct cgroup *oldcgrp;
+ struct css_set *cg = tsk->cgroups;
+ struct css_set *newcg;
+ struct cgroupfs_root *root = cgrp->root;
+ int subsys_id;
+
+ get_first_subsys(cgrp, NULL, &subsys_id);
+
+ /* Nothing to do if the task is already in that cgroup */
+ oldcgrp = task_cgroup(tsk, subsys_id);
+ if (cgrp == oldcgrp)
+ return 0;
+
+ for_each_subsys(root, ss) {
+ if (ss->can_attach) {
+ retval = ss->can_attach(ss, cgrp, tsk);
+ if (retval) {
+ return retval;
+ }
+ }
+ }
+
+ /*
+ * Locate or allocate a new css_set for this task,
+ * based on its final set of cgroups
+ */
+ newcg = find_css_set(cg, cgrp);
+ if (!newcg) {
+ return -ENOMEM;
+ }
+
+ task_lock(tsk);
+ if (tsk->flags & PF_EXITING) {
+ task_unlock(tsk);
+ put_css_set(newcg);
+ return -ESRCH;
+ }
+ rcu_assign_pointer(tsk->cgroups, newcg);
+ task_unlock(tsk);
+
+ /* Update the css_set linked lists if we're using them */
+ write_lock(&css_set_lock);
+ if (!list_empty(&tsk->cg_list)) {
+ list_del(&tsk->cg_list);
+ list_add(&tsk->cg_list, &newcg->tasks);
+ }
+ write_unlock(&css_set_lock);
+
+ for_each_subsys(root, ss) {
+ if (ss->attach) {
+ ss->attach(ss, cgrp, oldcgrp, tsk);
+ }
+ }
+ set_bit(CGRP_RELEASABLE, &oldcgrp->flags);
+ synchronize_rcu();
+ put_css_set(cg);
+ return 0;
+}
+
+/*
+ * Attach task with pid 'pid' to cgroup 'cgrp'. Call with
+ * cgroup_mutex, may take task_lock of task
+ */
+static int attach_task_by_pid(struct cgroup *cgrp, char *pidbuf)
+{
+ pid_t pid;
+ struct task_struct *tsk;
+ int ret;
+
+ if (sscanf(pidbuf, "%d", &pid) != 1)
+ return -EIO;
+
+ if (pid) {
+ rcu_read_lock();
+ tsk = find_task_by_pid(pid);
+ if (!tsk || tsk->flags & PF_EXITING) {
+ rcu_read_unlock();
+ return -ESRCH;
+ }
+ get_task_struct(tsk);
+ rcu_read_unlock();
+
+ if ((current->euid) && (current->euid != tsk->uid)
+ && (current->euid != tsk->suid)) {
+ put_task_struct(tsk);
+ return -EACCES;
+ }
+ } else {
+ tsk = current;
+ get_task_struct(tsk);
+ }
+
+ ret = attach_task(cgrp, tsk);
+ put_task_struct(tsk);
+ return ret;
+}
+
+/* The various types of files and directories in a cgroup file system */
+
+enum cgroup_filetype {
+ FILE_ROOT,
+ FILE_DIR,
+ FILE_TASKLIST,
+ FILE_NOTIFY_ON_RELEASE,
+ FILE_RELEASABLE,
+ FILE_RELEASE_AGENT,
+};
+
+static ssize_t cgroup_write_uint(struct cgroup *cgrp, struct cftype *cft,
+ struct file *file,
+ const char __user *userbuf,
+ size_t nbytes, loff_t *unused_ppos)
+{
+ char buffer[64];
+ int retval = 0;
+ u64 val;
+ char *end;
+
+ if (!nbytes)
+ return -EINVAL;
+ if (nbytes >= sizeof(buffer))
+ return -E2BIG;
+ if (copy_from_user(buffer, userbuf, nbytes))
+ return -EFAULT;
+
+ buffer[nbytes] = 0; /* nul-terminate */
+
+ /* strip newline if necessary */
+ if (nbytes && (buffer[nbytes-1] == '\n'))
+ buffer[nbytes-1] = 0;
+ val = simple_strtoull(buffer, &end, 0);
+ if (*end)
+ return -EINVAL;
+
+ /* Pass to subsystem */
+ retval = cft->write_uint(cgrp, cft, val);
+ if (!retval)
+ retval = nbytes;
+ return retval;
+}
+
+static ssize_t cgroup_common_file_write(struct cgroup *cgrp,
+ struct cftype *cft,
+ struct file *file,
+ const char __user *userbuf,
+ size_t nbytes, loff_t *unused_ppos)
+{
+ enum cgroup_filetype type = cft->private;
+ char *buffer;
+ int retval = 0;
+
+ if (nbytes >= PATH_MAX)
+ return -E2BIG;
+
+ /* +1 for nul-terminator */
+ buffer = kmalloc(nbytes + 1, GFP_KERNEL);
+ if (buffer == NULL)
+ return -ENOMEM;
+
+ if (copy_from_user(buffer, userbuf, nbytes)) {
+ retval = -EFAULT;
+ goto out1;
+ }
+ buffer[nbytes] = 0; /* nul-terminate */
+
+ mutex_lock(&cgroup_mutex);
+
+ if (cgroup_is_removed(cgrp)) {
+ retval = -ENODEV;
+ goto out2;
+ }
+
+ switch (type) {
+ case FILE_TASKLIST:
+ retval = attach_task_by_pid(cgrp, buffer);
+ break;
+ case FILE_NOTIFY_ON_RELEASE:
+ clear_bit(CGRP_RELEASABLE, &cgrp->flags);
+ if (simple_strtoul(buffer, NULL, 10) != 0)
+ set_bit(CGRP_NOTIFY_ON_RELEASE, &cgrp->flags);
+ else
+ clear_bit(CGRP_NOTIFY_ON_RELEASE, &cgrp->flags);
+ break;
+ case FILE_RELEASE_AGENT:
+ {
+ struct cgroupfs_root *root = cgrp->root;
+ /* Strip trailing newline */
+ if (nbytes && (buffer[nbytes-1] == '\n')) {
+ buffer[nbytes-1] = 0;
+ }
+ if (nbytes < sizeof(root->release_agent_path)) {
+ /* We never write anything other than '\0'
+ * into the last char of release_agent_path,
+ * so it always remains a NUL-terminated
+ * string */
+ strncpy(root->release_agent_path, buffer, nbytes);
+ root->release_agent_path[nbytes] = 0;
+ } else {
+ retval = -ENOSPC;
+ }
+ break;
+ }
+ default:
+ retval = -EINVAL;
+ goto out2;
+ }
+
+ if (retval == 0)
+ retval = nbytes;
+out2:
+ mutex_unlock(&cgroup_mutex);
+out1:
+ kfree(buffer);
+ return retval;
+}
+
+static ssize_t cgroup_file_write(struct file *file, const char __user *buf,
+ size_t nbytes, loff_t *ppos)
+{
+ struct cftype *cft = __d_cft(file->f_dentry);
+ struct cgroup *cgrp = __d_cgrp(file->f_dentry->d_parent);
+
+ if (!cft)
+ return -ENODEV;
+ if (cft->write)
+ return cft->write(cgrp, cft, file, buf, nbytes, ppos);
+ if (cft->write_uint)
+ return cgroup_write_uint(cgrp, cft, file, buf, nbytes, ppos);
+ return -EINVAL;
+}
+
+static ssize_t cgroup_read_uint(struct cgroup *cgrp, struct cftype *cft,
+ struct file *file,
+ char __user *buf, size_t nbytes,
+ loff_t *ppos)
+{
+ char tmp[64];
+ u64 val = cft->read_uint(cgrp, cft);
+ int len = sprintf(tmp, "%llu\n", (unsigned long long) val);
+
+ return simple_read_from_buffer(buf, nbytes, ppos, tmp, len);
+}
+
+static ssize_t cgroup_common_file_read(struct cgroup *cgrp,
+ struct cftype *cft,
+ struct file *file,
+ char __user *buf,
+ size_t nbytes, loff_t *ppos)
+{
+ enum cgroup_filetype type = cft->private;
+ char *page;
+ ssize_t retval = 0;
+ char *s;
+
+ if (!(page = (char *)__get_free_page(GFP_KERNEL)))
+ return -ENOMEM;
+
+ s = page;
+
+ switch (type) {
+ case FILE_RELEASE_AGENT:
+ {
+ struct cgroupfs_root *root;
+ size_t n;
+ mutex_lock(&cgroup_mutex);
+ root = cgrp->root;
+ n = strnlen(root->release_agent_path,
+ sizeof(root->release_agent_path));
+ n = min(n, (size_t) PAGE_SIZE);
+ strncpy(s, root->release_agent_path, n);
+ mutex_unlock(&cgroup_mutex);
+ s += n;
+ break;
+ }
+ default:
+ retval = -EINVAL;
+ goto out;
+ }
+ *s++ = '\n';
+
+ retval = simple_read_from_buffer(buf, nbytes, ppos, page, s - page);
+out:
+ free_page((unsigned long)page);
+ return retval;
+}
+
+static ssize_t cgroup_file_read(struct file *file, char __user *buf,
+ size_t nbytes, loff_t *ppos)
+{
+ struct cftype *cft = __d_cft(file->f_dentry);
+ struct cgroup *cgrp = __d_cgrp(file->f_dentry->d_parent);
+
+ if (!cft)
+ return -ENODEV;
+
+ if (cft->read)
+ return cft->read(cgrp, cft, file, buf, nbytes, ppos);
+ if (cft->read_uint)
+ return cgroup_read_uint(cgrp, cft, file, buf, nbytes, ppos);
+ return -EINVAL;
+}
+
+static int cgroup_file_open(struct inode *inode, struct file *file)
+{
+ int err;
+ struct cftype *cft;
+
+ err = generic_file_open(inode, file);
+ if (err)
+ return err;
+
+ cft = __d_cft(file->f_dentry);
+ if (!cft)
+ return -ENODEV;
+ if (cft->open)
+ err = cft->open(inode, file);
+ else
+ err = 0;
+
+ return err;
+}
+
+static int cgroup_file_release(struct inode *inode, struct file *file)
+{
+ struct cftype *cft = __d_cft(file->f_dentry);
+ if (cft->release)
+ return cft->release(inode, file);
+ return 0;
+}
+
+/*
+ * cgroup_rename - Only allow simple rename of directories in place.
+ */
+static int cgroup_rename(struct inode *old_dir, struct dentry *old_dentry,
+ struct inode *new_dir, struct dentry *new_dentry)
+{
+ if (!S_ISDIR(old_dentry->d_inode->i_mode))
+ return -ENOTDIR;
+ if (new_dentry->d_inode)
+ return -EEXIST;
+ if (old_dir != new_dir)
+ return -EIO;
+ return simple_rename(old_dir, old_dentry, new_dir, new_dentry);
+}
+
+static struct file_operations cgroup_file_operations = {
+ .read = cgroup_file_read,
+ .write = cgroup_file_write,
+ .llseek = generic_file_llseek,
+ .open = cgroup_file_open,
+ .release = cgroup_file_release,
+};
+
+static struct inode_operations cgroup_dir_inode_operations = {
+ .lookup = simple_lookup,
+ .mkdir = cgroup_mkdir,
+ .rmdir = cgroup_rmdir,
+ .rename = cgroup_rename,
+};
+
+static int cgroup_create_file(struct dentry *dentry, int mode,
+ struct super_block *sb)
+{
+ static struct dentry_operations cgroup_dops = {
+ .d_iput = cgroup_diput,
+ };
+
+ struct inode *inode;
+
+ if (!dentry)
+ return -ENOENT;
+ if (dentry->d_inode)
+ return -EEXIST;
+
+ inode = cgroup_new_inode(mode, sb);
+ if (!inode)
+ return -ENOMEM;
+
+ if (S_ISDIR(mode)) {
+ inode->i_op = &cgroup_dir_inode_operations;
+ inode->i_fop = &simple_dir_operations;
+
+ /* start off with i_nlink == 2 (for "." entry) */
+ inc_nlink(inode);
+
+ /* start with the directory inode held, so that we can
+ * populate it without racing with another mkdir */
+ mutex_lock_nested(&inode->i_mutex, I_MUTEX_CHILD);
+ } else if (S_ISREG(mode)) {
+ inode->i_size = 0;
+ inode->i_fop = &cgroup_file_operations;
+ }
+ dentry->d_op = &cgroup_dops;
+ d_instantiate(dentry, inode);
+ dget(dentry); /* Extra count - pin the dentry in core */
+ return 0;
+}
+
+/*
+ * cgroup_create_dir - create a directory for an object.
+ * cgrp: the cgroup we create the directory for.
+ * It must have a valid ->parent field
+ * And we are going to fill its ->dentry field.
+ * dentry: dentry of the new cgroup
+ * mode: mode to set on new directory.
+ */
+static int cgroup_create_dir(struct cgroup *cgrp, struct dentry *dentry,
+ int mode)
+{
+ struct dentry *parent;
+ int error = 0;
+
+ parent = cgrp->parent->dentry;
+ error = cgroup_create_file(dentry, S_IFDIR | mode, cgrp->root->sb);
+ if (!error) {
+ dentry->d_fsdata = cgrp;
+ inc_nlink(parent->d_inode);
+ cgrp->dentry = dentry;
+ dget(dentry);
+ }
+ dput(dentry);
+
+ return error;
+}
+
+int cgroup_add_file(struct cgroup *cgrp,
+ struct cgroup_subsys *subsys,
+ const struct cftype *cft)
+{
+ struct dentry *dir = cgrp->dentry;
+ struct dentry *dentry;
+ int error;
+
+ char name[MAX_CGROUP_TYPE_NAMELEN + MAX_CFTYPE_NAME + 2] = { 0 };
+ if (subsys && !test_bit(ROOT_NOPREFIX, &cgrp->root->flags)) {
+ strcpy(name, subsys->name);
+ strcat(name, ".");
+ }
+ strcat(name, cft->name);
+ BUG_ON(!mutex_is_locked(&dir->d_inode->i_mutex));
+ dentry = lookup_one_len(name, dir, strlen(name));
+ if (!IS_ERR(dentry)) {
+ error = cgroup_create_file(dentry, 0644 | S_IFREG,
+ cgrp->root->sb);
+ if (!error)
+ dentry->d_fsdata = (void *)cft;
+ dput(dentry);
+ } else
+ error = PTR_ERR(dentry);
+ return error;
+}
+
+int cgroup_add_files(struct cgroup *cgrp,
+ struct cgroup_subsys *subsys,
+ const struct cftype cft[],
+ int count)
+{
+ int i, err;
+ for (i = 0; i < count; i++) {
+ err = cgroup_add_file(cgrp, subsys, &cft[i]);
+ if (err)
+ return err;
+ }
+ return 0;
+}
+
+/* Count the number of tasks in a cgroup. */
+
+int cgroup_task_count(const struct cgroup *cgrp)
+{
+ int count = 0;
+ struct list_head *l;
+
+ read_lock(&css_set_lock);
+ l = cgrp->css_sets.next;
+ while (l != &cgrp->css_sets) {
+ struct cg_cgroup_link *link =
+ list_entry(l, struct cg_cgroup_link, cgrp_link_list);
+ count += atomic_read(&link->cg->ref.refcount);
+ l = l->next;
+ }
+ read_unlock(&css_set_lock);
+ return count;
+}
+
+/*
+ * Advance a list_head iterator. The iterator should be positioned at
+ * the start of a css_set
+ */
+static void cgroup_advance_iter(struct cgroup *cgrp,
+ struct cgroup_iter *it)
+{
+ struct list_head *l = it->cg_link;
+ struct cg_cgroup_link *link;
+ struct css_set *cg;
+
+ /* Advance to the next non-empty css_set */
+ do {
+ l = l->next;
+ if (l == &cgrp->css_sets) {
+ it->cg_link = NULL;
+ return;
+ }
+ link = list_entry(l, struct cg_cgroup_link, cgrp_link_list);
+ cg = link->cg;
+ } while (list_empty(&cg->tasks));
+ it->cg_link = l;
+ it->task = cg->tasks.next;
+}
+
+void cgroup_iter_start(struct cgroup *cgrp, struct cgroup_iter *it)
+{
+ /*
+ * The first time anyone tries to iterate across a cgroup,
+ * we need to enable the list linking each css_set to its
+ * tasks, and fix up all existing tasks.
+ */
+ if (!use_task_css_set_links) {
+ struct task_struct *p, *g;
+ write_lock(&css_set_lock);
+ use_task_css_set_links = 1;
+ do_each_thread(g, p) {
+ task_lock(p);
+ if (list_empty(&p->cg_list))
+ list_add(&p->cg_list, &p->cgroups->tasks);
+ task_unlock(p);
+ } while_each_thread(g, p);
+ write_unlock(&css_set_lock);
+ }
+ read_lock(&css_set_lock);
+ it->cg_link = &cgrp->css_sets;
+ cgroup_advance_iter(cgrp, it);
+}
+
+struct task_struct *cgroup_iter_next(struct cgroup *cgrp,
+ struct cgroup_iter *it)
+{
+ struct task_struct *res;
+ struct list_head *l = it->task;
+
+ /* If the iterator cg is NULL, we have no tasks */
+ if (!it->cg_link)
+ return NULL;
+ res = list_entry(l, struct task_struct, cg_list);
+ /* Advance iterator to find next entry */
+ l = l->next;
+ if (l == &res->cgroups->tasks) {
+ /* We reached the end of this task list - move on to
+ * the next cg_cgroup_link */
+ cgroup_advance_iter(cgrp, it);
+ } else {
+ it->task = l;
+ }
+ return res;
+}
+
+void cgroup_iter_end(struct cgroup *cgrp, struct cgroup_iter *it)
+{
+ read_unlock(&css_set_lock);
+}
+
+/*
+ * Stuff for reading the 'tasks' file.
+ *
+ * Reading this file can return large amounts of data if a cgroup has
+ * *lots* of attached tasks. So it may need several calls to read(),
+ * but we cannot guarantee that the information we produce is correct
+ * unless we produce it entirely atomically.
+ *
+ * Upon tasks file open(), a struct ctr_struct is allocated, that
+ * will have a pointer to an array (also allocated here). The struct
+ * ctr_struct * is stored in file->private_data. Its resources will
+ * be freed by release() when the file is closed. The array is used
+ * to sprintf the PIDs and then used by read().
+ */
+struct ctr_struct {
+ char *buf;
+ int bufsz;
+};
+
+/*
+ * Load into 'pidarray' up to 'npids' of the tasks using cgroup
+ * 'cgrp'. Return actual number of pids loaded. No need to
+ * task_lock(p) when reading out p->cgroup, since we're in an RCU
+ * read section, so the css_set can't go away, and is
+ * immutable after creation.
+ */
+static int pid_array_load(pid_t *pidarray, int npids, struct cgroup *cgrp)
+{
+ int n = 0;
+ struct cgroup_iter it;
+ struct task_struct *tsk;
+ cgroup_iter_start(cgrp, &it);
+ while ((tsk = cgroup_iter_next(cgrp, &it))) {
+ if (unlikely(n == npids))
+ break;
+ pidarray[n++] = task_pid_nr(tsk);
+ }
+ cgroup_iter_end(cgrp, &it);
+ return n;
+}
+
+/**
+ * Build and fill cgroupstats so that taskstats can export it to user
+ * space.
+ *
+ * @stats: cgroupstats to fill information into
+ * @dentry: A dentry entry belonging to the cgroup for which stats have
+ * been requested.
+ */
+int cgroupstats_build(struct cgroupstats *stats, struct dentry *dentry)
+{
+ int ret = -EINVAL;
+ struct cgroup *cgrp;
+ struct cgroup_iter it;
+ struct task_struct *tsk;
+ /*
+ * Validate dentry by checking the superblock operations
+ */
+ if (dentry->d_sb->s_op != &cgroup_ops)
+ goto err;
+
+ ret = 0;
+ cgrp = dentry->d_fsdata;
+ rcu_read_lock();
+
+ cgroup_iter_start(cgrp, &it);
+ while ((tsk = cgroup_iter_next(cgrp, &it))) {
+ switch (tsk->state) {
+ case TASK_RUNNING:
+ stats->nr_running++;
+ break;
+ case TASK_INTERRUPTIBLE:
+ stats->nr_sleeping++;
+ break;
+ case TASK_UNINTERRUPTIBLE:
+ stats->nr_uninterruptible++;
+ break;
+ case TASK_STOPPED:
+ stats->nr_stopped++;
+ break;
+ default:
+ if (delayacct_is_task_waiting_on_io(tsk))
+ stats->nr_io_wait++;
+ break;
+ }
+ }
+ cgroup_iter_end(cgrp, &it);
+
+ rcu_read_unlock();
+err:
+ return ret;
+}
+
+static int cmppid(const void *a, const void *b)
+{
+ return *(pid_t *)a - *(pid_t *)b;
+}
+
+/*
+ * Convert array 'a' of 'npids' pid_t's to a string of newline separated
+ * decimal pids in 'buf'. Don't write more than 'sz' chars, but return
+ * count 'cnt' of how many chars would be written if buf were large enough.
+ */
+static int pid_array_to_buf(char *buf, int sz, pid_t *a, int npids)
+{
+ int cnt = 0;
+ int i;
+
+ for (i = 0; i < npids; i++)
+ cnt += snprintf(buf + cnt, max(sz - cnt, 0), "%d\n", a[i]);
+ return cnt;
+}
+
+/*
+ * Handle an open on 'tasks' file. Prepare a buffer listing the
+ * process id's of tasks currently attached to the cgroup being opened.
+ *
+ * Does not require any specific cgroup mutexes, and does not take any.
+ */
+static int cgroup_tasks_open(struct inode *unused, struct file *file)
+{
+ struct cgroup *cgrp = __d_cgrp(file->f_dentry->d_parent);
+ struct ctr_struct *ctr;
+ pid_t *pidarray;
+ int npids;
+ char c;
+
+ if (!(file->f_mode & FMODE_READ))
+ return 0;
+
+ ctr = kmalloc(sizeof(*ctr), GFP_KERNEL);
+ if (!ctr)
+ goto err0;
+
+ /*
+ * If cgroup gets more users after we read count, we won't have
+ * enough space - tough. This race is indistinguishable to the
+ * caller from the case that the additional cgroup users didn't
+ * show up until sometime later on.
+ */
+ npids = cgroup_task_count(cgrp);
+ if (npids) {
+ pidarray = kmalloc(npids * sizeof(pid_t), GFP_KERNEL);
+ if (!pidarray)
+ goto err1;
+
+ npids = pid_array_load(pidarray, npids, cgrp);
+ sort(pidarray, npids, sizeof(pid_t), cmppid, NULL);
+
+ /* Call pid_array_to_buf() twice, first just to get bufsz */
+ ctr->bufsz = pid_array_to_buf(&c, sizeof(c), pidarray, npids) + 1;
+ ctr->buf = kmalloc(ctr->bufsz, GFP_KERNEL);
+ if (!ctr->buf)
+ goto err2;
+ ctr->bufsz = pid_array_to_buf(ctr->buf, ctr->bufsz, pidarray, npids);
+
+ kfree(pidarray);
+ } else {
+ ctr->buf = 0;
+ ctr->bufsz = 0;
+ }
+ file->private_data = ctr;
+ return 0;
+
+err2:
+ kfree(pidarray);
+err1:
+ kfree(ctr);
+err0:
+ return -ENOMEM;
+}
+
+static ssize_t cgroup_tasks_read(struct cgroup *cgrp,
+ struct cftype *cft,
+ struct file *file, char __user *buf,
+ size_t nbytes, loff_t *ppos)
+{
+ struct ctr_struct *ctr = file->private_data;
+
+ return simple_read_from_buffer(buf, nbytes, ppos, ctr->buf, ctr->bufsz);
+}
+
+static int cgroup_tasks_release(struct inode *unused_inode,
+ struct file *file)
+{
+ struct ctr_struct *ctr;
+
+ if (file->f_mode & FMODE_READ) {
+ ctr = file->private_data;
+ kfree(ctr->buf);
+ kfree(ctr);
+ }
+ return 0;
+}
+
+static u64 cgroup_read_notify_on_release(struct cgroup *cgrp,
+ struct cftype *cft)
+{
+ return notify_on_release(cgrp);
+}
+
+static u64 cgroup_read_releasable(struct cgroup *cgrp, struct cftype *cft)
+{
+ return test_bit(CGRP_RELEASABLE, &cgrp->flags);
+}
+
+/*
+ * for the common functions, 'private' gives the type of file
+ */
+static struct cftype files[] = {
+ {
+ .name = "tasks",
+ .open = cgroup_tasks_open,
+ .read = cgroup_tasks_read,
+ .write = cgroup_common_file_write,
+ .release = cgroup_tasks_release,
+ .private = FILE_TASKLIST,
+ },
+
+ {
+ .name = "notify_on_release",
+ .read_uint = cgroup_read_notify_on_release,
+ .write = cgroup_common_file_write,
+ .private = FILE_NOTIFY_ON_RELEASE,
+ },
+
+ {
+ .name = "releasable",
+ .read_uint = cgroup_read_releasable,
+ .private = FILE_RELEASABLE,
+ }
+};
+
+static struct cftype cft_release_agent = {
+ .name = "release_agent",
+ .read = cgroup_common_file_read,
+ .write = cgroup_common_file_write,
+ .private = FILE_RELEASE_AGENT,
+};
+
+static int cgroup_populate_dir(struct cgroup *cgrp)
+{
+ int err;
+ struct cgroup_subsys *ss;
+
+ /* First clear out any existing files */
+ cgroup_clear_directory(cgrp->dentry);
+
+ err = cgroup_add_files(cgrp, NULL, files, ARRAY_SIZE(files));
+ if (err < 0)
+ return err;
+
+ if (cgrp == cgrp->top_cgroup) {
+ if ((err = cgroup_add_file(cgrp, NULL, &cft_release_agent)) < 0)
+ return err;
+ }
+
+ for_each_subsys(cgrp->root, ss) {
+ if (ss->populate && (err = ss->populate(ss, cgrp)) < 0)
+ return err;
+ }
+
+ return 0;
+}
+
+static void init_cgroup_css(struct cgroup_subsys_state *css,
+ struct cgroup_subsys *ss,
+ struct cgroup *cgrp)
+{
+ css->cgroup = cgrp;
+ atomic_set(&css->refcnt, 0);
+ css->flags = 0;
+ if (cgrp == dummytop)
+ set_bit(CSS_ROOT, &css->flags);
+ BUG_ON(cgrp->subsys[ss->subsys_id]);
+ cgrp->subsys[ss->subsys_id] = css;
+}
+
+/*
+ * cgroup_create - create a cgroup
+ * parent: cgroup that will be parent of the new cgroup.
+ * name: name of the new cgroup. Will be strcpy'ed.
+ * mode: mode to set on new inode
+ *
+ * Must be called with the mutex on the parent inode held
+ */
+
+static long cgroup_create(struct cgroup *parent, struct dentry *dentry,
+ int mode)
+{
+ struct cgroup *cgrp;
+ struct cgroupfs_root *root = parent->root;
+ int err = 0;
+ struct cgroup_subsys *ss;
+ struct super_block *sb = root->sb;
+
+ cgrp = kzalloc(sizeof(*cgrp), GFP_KERNEL);
+ if (!cgrp)
+ return -ENOMEM;
+
+ /* Grab a reference on the superblock so the hierarchy doesn't
+ * get deleted on unmount if there are child cgroups. This
+ * can be done outside cgroup_mutex, since the sb can't
+ * disappear while someone has an open control file on the
+ * fs */
+ atomic_inc(&sb->s_active);
+
+ mutex_lock(&cgroup_mutex);
+
+ cgrp->flags = 0;
+ INIT_LIST_HEAD(&cgrp->sibling);
+ INIT_LIST_HEAD(&cgrp->children);
+ INIT_LIST_HEAD(&cgrp->css_sets);
+ INIT_LIST_HEAD(&cgrp->release_list);
+
+ cgrp->parent = parent;
+ cgrp->root = parent->root;
+ cgrp->top_cgroup = parent->top_cgroup;
+
+ for_each_subsys(root, ss) {
+ struct cgroup_subsys_state *css = ss->create(ss, cgrp);
+ if (IS_ERR(css)) {
+ err = PTR_ERR(css);
+ goto err_destroy;
+ }
+ init_cgroup_css(css, ss, cgrp);
+ }
+
+ list_add(&cgrp->sibling, &cgrp->parent->children);
+ root->number_of_cgroups++;
+
+ err = cgroup_create_dir(cgrp, dentry, mode);
+ if (err < 0)
+ goto err_remove;
+
+ /* The cgroup directory was pre-locked for us */
+ BUG_ON(!mutex_is_locked(&cgrp->dentry->d_inode->i_mutex));
+
+ err = cgroup_populate_dir(cgrp);
+ /* If err < 0, we have a half-filled directory - oh well ;) */
+
+ mutex_unlock(&cgroup_mutex);
+ mutex_unlock(&cgrp->dentry->d_inode->i_mutex);
+
+ return 0;
+
+ err_remove:
+
+ list_del(&cgrp->sibling);
+ root->number_of_cgroups--;
+
+ err_destroy:
+
+ for_each_subsys(root, ss) {
+ if (cgrp->subsys[ss->subsys_id])
+ ss->destroy(ss, cgrp);
+ }
+
+ mutex_unlock(&cgroup_mutex);
+
+ /* Release the reference count that we took on the superblock */
+ deactivate_super(sb);
+
+ kfree(cgrp);
+ return err;
+}
+
+static int cgroup_mkdir(struct inode *dir, struct dentry *dentry, int mode)
+{
+ struct cgroup *c_parent = dentry->d_parent->d_fsdata;
+
+ /* the vfs holds inode->i_mutex already */
+ return cgroup_create(c_parent, dentry, mode | S_IFDIR);
+}
+
+static inline int cgroup_has_css_refs(struct cgroup *cgrp)
+{
+ /* Check the reference count on each subsystem. Since we
+ * already established that there are no tasks in the
+ * cgroup, if the css refcount is also 0, then there should
+ * be no outstanding references, so the subsystem is safe to
+ * destroy. We scan across all subsystems rather than using
+ * the per-hierarchy linked list of mounted subsystems since
+ * we can be called via check_for_release() with no
+ * synchronization other than RCU, and the subsystem linked
+ * list isn't RCU-safe */
+ int i;
+ for (i = 0; i < CGROUP_SUBSYS_COUNT; i++) {
+ struct cgroup_subsys *ss = subsys[i];
+ struct cgroup_subsys_state *css;
+ /* Skip subsystems not in this hierarchy */
+ if (ss->root != cgrp->root)
+ continue;
+ css = cgrp->subsys[ss->subsys_id];
+ /* When called from check_for_release() it's possible
+ * that by this point the cgroup has been removed
+ * and the css deleted. But a false-positive doesn't
+ * matter, since it can only happen if the cgroup
+ * has been deleted and hence no longer needs the
+ * release agent to be called anyway. */
+ if (css && atomic_read(&css->refcnt)) {
+ return 1;
+ }
+ }
+ return 0;
+}
+
+static int cgroup_rmdir(struct inode *unused_dir, struct dentry *dentry)
+{
+ struct cgroup *cgrp = dentry->d_fsdata;
+ struct dentry *d;
+ struct cgroup *parent;
+ struct cgroup_subsys *ss;
+ struct super_block *sb;
+ struct cgroupfs_root *root;
+
+ /* the vfs holds both inode->i_mutex already */
+
+ mutex_lock(&cgroup_mutex);
+ if (atomic_read(&cgrp->count) != 0) {
+ mutex_unlock(&cgroup_mutex);
+ return -EBUSY;
+ }
+ if (!list_empty(&cgrp->children)) {
+ mutex_unlock(&cgroup_mutex);
+ return -EBUSY;
+ }
+
+ parent = cgrp->parent;
+ root = cgrp->root;
+ sb = root->sb;
+
+ if (cgroup_has_css_refs(cgrp)) {
+ mutex_unlock(&cgroup_mutex);
+ return -EBUSY;
+ }
+
+ for_each_subsys(root, ss) {
+ if (cgrp->subsys[ss->subsys_id])
+ ss->destroy(ss, cgrp);
+ }
+
+ spin_lock(&release_list_lock);
+ set_bit(CGRP_REMOVED, &cgrp->flags);
+ if (!list_empty(&cgrp->release_list))
+ list_del(&cgrp->release_list);
+ spin_unlock(&release_list_lock);
+ /* delete my sibling from parent->children */
+ list_del(&cgrp->sibling);
+ spin_lock(&cgrp->dentry->d_lock);
+ d = dget(cgrp->dentry);
+ cgrp->dentry = NULL;
+ spin_unlock(&d->d_lock);
+
+ cgroup_d_remove_dir(d);
+ dput(d);
+ root->number_of_cgroups--;
+
+ set_bit(CGRP_RELEASABLE, &parent->flags);
+ check_for_release(parent);
+
+ mutex_unlock(&cgroup_mutex);
+ /* Drop the active superblock reference that we took when we
+ * created the cgroup */
+ deactivate_super(sb);
+ return 0;
+}
+
+static void cgroup_init_subsys(struct cgroup_subsys *ss)
+{
+ struct cgroup_subsys_state *css;
+ struct list_head *l;
+ printk(KERN_ERR "Initializing cgroup subsys %s\n", ss->name);
+
+ /* Create the top cgroup state for this subsystem */
+ ss->root = &rootnode;
+ css = ss->create(ss, dummytop);
+ /* We don't handle early failures gracefully */
+ BUG_ON(IS_ERR(css));
+ init_cgroup_css(css, ss, dummytop);
+
+ /* Update all cgroup groups to contain a subsys
+ * pointer to this state - since the subsystem is
+ * newly registered, all tasks and hence all cgroup
+ * groups are in the subsystem's top cgroup. */
+ write_lock(&css_set_lock);
+ l = &init_css_set.list;
+ do {
+ struct css_set *cg =
+ list_entry(l, struct css_set, list);
+ cg->subsys[ss->subsys_id] = dummytop->subsys[ss->subsys_id];
+ l = l->next;
+ } while (l != &init_css_set.list);
+ write_unlock(&css_set_lock);
+
+ /* If this subsystem requested that it be notified with fork
+ * events, we should send it one now for every process in the
+ * system */
+ if (ss->fork) {
+ struct task_struct *g, *p;
+
+ read_lock(&tasklist_lock);
+ do_each_thread(g, p) {
+ ss->fork(ss, p);
+ } while_each_thread(g, p);
+ read_unlock(&tasklist_lock);
+ }
+
+ need_forkexit_callback |= ss->fork || ss->exit;
+
+ ss->active = 1;
+}
+
+/**
+ * cgroup_init_early - initialize cgroups at system boot, and
+ * initialize any subsystems that request early init.
+ */
+int __init cgroup_init_early(void)
+{
+ int i;
+ kref_init(&init_css_set.ref);
+ kref_get(&init_css_set.ref);
+ INIT_LIST_HEAD(&init_css_set.list);
+ INIT_LIST_HEAD(&init_css_set.cg_links);
+ INIT_LIST_HEAD(&init_css_set.tasks);
+ css_set_count = 1;
+ init_cgroup_root(&rootnode);
+ list_add(&rootnode.root_list, &roots);
+ root_count = 1;
+ init_task.cgroups = &init_css_set;
+
+ init_css_set_link.cg = &init_css_set;
+ list_add(&init_css_set_link.cgrp_link_list,
+ &rootnode.top_cgroup.css_sets);
+ list_add(&init_css_set_link.cg_link_list,
+ &init_css_set.cg_links);
+
+ for (i = 0; i < CGROUP_SUBSYS_COUNT; i++) {
+ struct cgroup_subsys *ss = subsys[i];
+
+ BUG_ON(!ss->name);
+ BUG_ON(strlen(ss->name) > MAX_CGROUP_TYPE_NAMELEN);
+ BUG_ON(!ss->create);
+ BUG_ON(!ss->destroy);
+ if (ss->subsys_id != i) {
+ printk(KERN_ERR "Subsys %s id == %d\n",
+ ss->name, ss->subsys_id);
+ BUG();
+ }
+
+ if (ss->early_init)
+ cgroup_init_subsys(ss);
+ }
+ return 0;
+}
+
+/**
+ * cgroup_init - register cgroup filesystem and /proc file, and
+ * initialize any subsystems that didn't request early init.
+ */
+int __init cgroup_init(void)
+{
+ int err;
+ int i;
+ struct proc_dir_entry *entry;
+
+ err = bdi_init(&cgroup_backing_dev_info);
+ if (err)
+ return err;
+
+ for (i = 0; i < CGROUP_SUBSYS_COUNT; i++) {
+ struct cgroup_subsys *ss = subsys[i];
+ if (!ss->early_init)
+ cgroup_init_subsys(ss);
+ }
+
+ err = register_filesystem(&cgroup_fs_type);
+ if (err < 0)
+ goto out;
+
+ entry = create_proc_entry("cgroups", 0, NULL);
+ if (entry)
+ entry->proc_fops = &proc_cgroupstats_operations;
+
+out:
+ if (err)
+ bdi_destroy(&cgroup_backing_dev_info);
+
+ return err;
+}
+
+/*
+ * proc_cgroup_show()
+ * - Print task's cgroup paths into seq_file, one line for each hierarchy
+ * - Used for /proc/<pid>/cgroup.
+ * - No need to task_lock(tsk) on this tsk->cgroup reference, as it
+ * doesn't really matter if tsk->cgroup changes after we read it,
+ * and we take cgroup_mutex, keeping attach_task() from changing it
+ * anyway. No need to check that tsk->cgroup != NULL, thanks to
+ * the_top_cgroup_hack in cgroup_exit(), which sets an exiting tasks
+ * cgroup to top_cgroup.
+ */
+
+/* TODO: Use a proper seq_file iterator */
+static int proc_cgroup_show(struct seq_file *m, void *v)
+{
+ struct pid *pid;
+ struct task_struct *tsk;
+ char *buf;
+ int retval;
+ struct cgroupfs_root *root;
+
+ retval = -ENOMEM;
+ buf = kmalloc(PAGE_SIZE, GFP_KERNEL);
+ if (!buf)
+ goto out;
+
+ retval = -ESRCH;
+ pid = m->private;
+ tsk = get_pid_task(pid, PIDTYPE_PID);
+ if (!tsk)
+ goto out_free;
+
+ retval = 0;
+
+ mutex_lock(&cgroup_mutex);
+
+ for_each_root(root) {
+ struct cgroup_subsys *ss;
+ struct cgroup *cgrp;
+ int subsys_id;
+ int count = 0;
+
+ /* Skip this hierarchy if it has no active subsystems */
+ if (!root->actual_subsys_bits)
+ continue;
+ for_each_subsys(root, ss)
+ seq_printf(m, "%s%s", count++ ? "," : "", ss->name);
+ seq_putc(m, ':');
+ get_first_subsys(&root->top_cgroup, NULL, &subsys_id);
+ cgrp = task_cgroup(tsk, subsys_id);
+ retval = cgroup_path(cgrp, buf, PAGE_SIZE);
+ if (retval < 0)
+ goto out_unlock;
+ seq_puts(m, buf);
+ seq_putc(m, '\n');
+ }
+
+out_unlock:
+ mutex_unlock(&cgroup_mutex);
+ put_task_struct(tsk);
+out_free:
+ kfree(buf);
+out:
+ return retval;
+}
+
+static int cgroup_open(struct inode *inode, struct file *file)
+{
+ struct pid *pid = PROC_I(inode)->pid;
+ return single_open(file, proc_cgroup_show, pid);
+}
+
+struct file_operations proc_cgroup_operations = {
+ .open = cgroup_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = single_release,
+};
+
+/* Display information about each subsystem and each hierarchy */
+static int proc_cgroupstats_show(struct seq_file *m, void *v)
+{
+ int i;
+ struct cgroupfs_root *root;
+
+ seq_puts(m, "#subsys_name\thierarchy\tnum_cgroups\n");
+ mutex_lock(&cgroup_mutex);
+ for (i = 0; i < CGROUP_SUBSYS_COUNT; i++) {
+ struct cgroup_subsys *ss = subsys[i];
+ seq_printf(m, "%s\t%lu\t%d\n",
+ ss->name, ss->root->subsys_bits,
+ ss->root->number_of_cgroups);
+ }
+ mutex_unlock(&cgroup_mutex);
+ return 0;
+}
+
+static int cgroupstats_open(struct inode *inode, struct file *file)
+{
+ return single_open(file, proc_cgroupstats_show, 0);
+}
+
+static struct file_operations proc_cgroupstats_operations = {
+ .open = cgroupstats_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = single_release,
+};
+
+/**
+ * cgroup_fork - attach newly forked task to its parents cgroup.
+ * @tsk: pointer to task_struct of forking parent process.
+ *
+ * Description: A task inherits its parent's cgroup at fork().
+ *
+ * A pointer to the shared css_set was automatically copied in
+ * fork.c by dup_task_struct(). However, we ignore that copy, since
+ * it was not made under the protection of RCU or cgroup_mutex, so
+ * might no longer be a valid cgroup pointer. attach_task() might
+ * have already changed current->cgroups, allowing the previously
+ * referenced cgroup group to be removed and freed.
+ *
+ * At the point that cgroup_fork() is called, 'current' is the parent
+ * task, and the passed argument 'child' points to the child task.
+ */
+void cgroup_fork(struct task_struct *child)
+{
+ task_lock(current);
+ child->cgroups = current->cgroups;
+ get_css_set(child->cgroups);
+ task_unlock(current);
+ INIT_LIST_HEAD(&child->cg_list);
+}
+
+/**
+ * cgroup_fork_callbacks - called on a new task very soon before
+ * adding it to the tasklist. No need to take any locks since no-one
+ * can be operating on this task
+ */
+void cgroup_fork_callbacks(struct task_struct *child)
+{
+ if (need_forkexit_callback) {
+ int i;
+ for (i = 0; i < CGROUP_SUBSYS_COUNT; i++) {
+ struct cgroup_subsys *ss = subsys[i];
+ if (ss->fork)
+ ss->fork(ss, child);
+ }
+ }
+}
+
+/**
+ * cgroup_post_fork - called on a new task after adding it to the
+ * task list. Adds the task to the list running through its css_set
+ * if necessary. Has to be after the task is visible on the task list
+ * in case we race with the first call to cgroup_iter_start() - to
+ * guarantee that the new task ends up on its list. */
+void cgroup_post_fork(struct task_struct *child)
+{
+ if (use_task_css_set_links) {
+ write_lock(&css_set_lock);
+ if (list_empty(&child->cg_list))
+ list_add(&child->cg_list, &child->cgroups->tasks);
+ write_unlock(&css_set_lock);
+ }
+}
+/**
+ * cgroup_exit - detach cgroup from exiting task
+ * @tsk: pointer to task_struct of exiting process
+ *
+ * Description: Detach cgroup from @tsk and release it.
+ *
+ * Note that cgroups marked notify_on_release force every task in
+ * them to take the global cgroup_mutex mutex when exiting.
+ * This could impact scaling on very large systems. Be reluctant to
+ * use notify_on_release cgroups where very high task exit scaling
+ * is required on large systems.
+ *
+ * the_top_cgroup_hack:
+ *
+ * Set the exiting tasks cgroup to the root cgroup (top_cgroup).
+ *
+ * We call cgroup_exit() while the task is still competent to
+ * handle notify_on_release(), then leave the task attached to the
+ * root cgroup in each hierarchy for the remainder of its exit.
+ *
+ * To do this properly, we would increment the reference count on
+ * top_cgroup, and near the very end of the kernel/exit.c do_exit()
+ * code we would add a second cgroup function call, to drop that
+ * reference. This would just create an unnecessary hot spot on
+ * the top_cgroup reference count, to no avail.
+ *
+ * Normally, holding a reference to a cgroup without bumping its
+ * count is unsafe. The cgroup could go away, or someone could
+ * attach us to a different cgroup, decrementing the count on
+ * the first cgroup that we never incremented. But in this case,
+ * top_cgroup isn't going away, and either task has PF_EXITING set,
+ * which wards off any attach_task() attempts, or task is a failed
+ * fork, never visible to attach_task.
+ *
+ */
+void cgroup_exit(struct task_struct *tsk, int run_callbacks)
+{
+ int i;
+ struct css_set *cg;
+
+ if (run_callbacks && need_forkexit_callback) {
+ for (i = 0; i < CGROUP_SUBSYS_COUNT; i++) {
+ struct cgroup_subsys *ss = subsys[i];
+ if (ss->exit)
+ ss->exit(ss, tsk);
+ }
+ }
+
+ /*
+ * Unlink from the css_set task list if necessary.
+ * Optimistically check cg_list before taking
+ * css_set_lock
+ */
+ if (!list_empty(&tsk->cg_list)) {
+ write_lock(&css_set_lock);
+ if (!list_empty(&tsk->cg_list))
+ list_del(&tsk->cg_list);
+ write_unlock(&css_set_lock);
+ }
+
+ /* Reassign the task to the init_css_set. */
+ task_lock(tsk);
+ cg = tsk->cgroups;
+ tsk->cgroups = &init_css_set;
+ task_unlock(tsk);
+ if (cg)
+ put_css_set_taskexit(cg);
+}
+
+/**
+ * cgroup_clone - duplicate the current cgroup in the hierarchy
+ * that the given subsystem is attached to, and move this task into
+ * the new child
+ */
+int cgroup_clone(struct task_struct *tsk, struct cgroup_subsys *subsys)
+{
+ struct dentry *dentry;
+ int ret = 0;
+ char nodename[MAX_CGROUP_TYPE_NAMELEN];
+ struct cgroup *parent, *child;
+ struct inode *inode;
+ struct css_set *cg;
+ struct cgroupfs_root *root;
+ struct cgroup_subsys *ss;
+
+ /* We shouldn't be called by an unregistered subsystem */
+ BUG_ON(!subsys->active);
+
+ /* First figure out what hierarchy and cgroup we're dealing
+ * with, and pin them so we can drop cgroup_mutex */
+ mutex_lock(&cgroup_mutex);
+ again:
+ root = subsys->root;
+ if (root == &rootnode) {
+ printk(KERN_INFO
+ "Not cloning cgroup for unused subsystem %s\n",
+ subsys->name);
+ mutex_unlock(&cgroup_mutex);
+ return 0;
+ }
+ cg = tsk->cgroups;
+ parent = task_cgroup(tsk, subsys->subsys_id);
+
+ snprintf(nodename, MAX_CGROUP_TYPE_NAMELEN, "node_%d", tsk->pid);
+
+ /* Pin the hierarchy */
+ atomic_inc(&parent->root->sb->s_active);
+
+ /* Keep the cgroup alive */
+ get_css_set(cg);
+ mutex_unlock(&cgroup_mutex);
+
+ /* Now do the VFS work to create a cgroup */
+ inode = parent->dentry->d_inode;
+
+ /* Hold the parent directory mutex across this operation to
+ * stop anyone else deleting the new cgroup */
+ mutex_lock(&inode->i_mutex);
+ dentry = lookup_one_len(nodename, parent->dentry, strlen(nodename));
+ if (IS_ERR(dentry)) {
+ printk(KERN_INFO
+ "Couldn't allocate dentry for %s: %ld\n", nodename,
+ PTR_ERR(dentry));
+ ret = PTR_ERR(dentry);
+ goto out_release;
+ }
+
+ /* Create the cgroup directory, which also creates the cgroup */
+ ret = vfs_mkdir(inode, dentry, S_IFDIR | 0755);
+ child = __d_cgrp(dentry);
+ dput(dentry);
+ if (ret) {
+ printk(KERN_INFO
+ "Failed to create cgroup %s: %d\n", nodename,
+ ret);
+ goto out_release;
+ }
+
+ if (!child) {
+ printk(KERN_INFO
+ "Couldn't find new cgroup %s\n", nodename);
+ ret = -ENOMEM;
+ goto out_release;
+ }
+
+ /* The cgroup now exists. Retake cgroup_mutex and check
+ * that we're still in the same state that we thought we
+ * were. */
+ mutex_lock(&cgroup_mutex);
+ if ((root != subsys->root) ||
+ (parent != task_cgroup(tsk, subsys->subsys_id))) {
+ /* Aargh, we raced ... */
+ mutex_unlock(&inode->i_mutex);
+ put_css_set(cg);
+
+ deactivate_super(parent->root->sb);
+ /* The cgroup is still accessible in the VFS, but
+ * we're not going to try to rmdir() it at this
+ * point. */
+ printk(KERN_INFO
+ "Race in cgroup_clone() - leaking cgroup %s\n",
+ nodename);
+ goto again;
+ }
+
+ /* do any required auto-setup */
+ for_each_subsys(root, ss) {
+ if (ss->post_clone)
+ ss->post_clone(ss, child);
+ }
+
+ /* All seems fine. Finish by moving the task into the new cgroup */
+ ret = attach_task(child, tsk);
+ mutex_unlock(&cgroup_mutex);
+
+ out_release:
+ mutex_unlock(&inode->i_mutex);
+
+ mutex_lock(&cgroup_mutex);
+ put_css_set(cg);
+ mutex_unlock(&cgroup_mutex);
+ deactivate_super(parent->root->sb);
+ return ret;
+}
+
+/*
+ * See if "cgrp" is a descendant of the current task's cgroup in
+ * the appropriate hierarchy
+ *
+ * If we are sending in dummytop, then presumably we are creating
+ * the top cgroup in the subsystem.
+ *
+ * Called only by the ns (nsproxy) cgroup.
+ */
+int cgroup_is_descendant(const struct cgroup *cgrp)
+{
+ int ret;
+ struct cgroup *target;
+ int subsys_id;
+
+ if (cgrp == dummytop)
+ return 1;
+
+ get_first_subsys(cgrp, NULL, &subsys_id);
+ target = task_cgroup(current, subsys_id);
+ while (cgrp != target && cgrp!= cgrp->top_cgroup)
+ cgrp = cgrp->parent;
+ ret = (cgrp == target);
+ return ret;
+}
+
+static void check_for_release(struct cgroup *cgrp)
+{
+ /* All of these checks rely on RCU to keep the cgroup
+ * structure alive */
+ if (cgroup_is_releasable(cgrp) && !atomic_read(&cgrp->count)
+ && list_empty(&cgrp->children) && !cgroup_has_css_refs(cgrp)) {
+ /* Control Group is currently removeable. If it's not
+ * already queued for a userspace notification, queue
+ * it now */
+ int need_schedule_work = 0;
+ spin_lock(&release_list_lock);
+ if (!cgroup_is_removed(cgrp) &&
+ list_empty(&cgrp->release_list)) {
+ list_add(&cgrp->release_list, &release_list);
+ need_schedule_work = 1;
+ }
+ spin_unlock(&release_list_lock);
+ if (need_schedule_work)
+ schedule_work(&release_agent_work);
+ }
+}
+
+void __css_put(struct cgroup_subsys_state *css)
+{
+ struct cgroup *cgrp = css->cgroup;
+ rcu_read_lock();
+ if (atomic_dec_and_test(&css->refcnt) && notify_on_release(cgrp)) {
+ set_bit(CGRP_RELEASABLE, &cgrp->flags);
+ check_for_release(cgrp);
+ }
+ rcu_read_unlock();
+}
+
+/*
+ * Notify userspace when a cgroup is released, by running the
+ * configured release agent with the name of the cgroup (path
+ * relative to the root of cgroup file system) as the argument.
+ *
+ * Most likely, this user command will try to rmdir this cgroup.
+ *
+ * This races with the possibility that some other task will be
+ * attached to this cgroup before it is removed, or that some other
+ * user task will 'mkdir' a child cgroup of this cgroup. That's ok.
+ * The presumed 'rmdir' will fail quietly if this cgroup is no longer
+ * unused, and this cgroup will be reprieved from its death sentence,
+ * to continue to serve a useful existence. Next time it's released,
+ * we will get notified again, if it still has 'notify_on_release' set.
+ *
+ * The final arg to call_usermodehelper() is UMH_WAIT_EXEC, which
+ * means only wait until the task is successfully execve()'d. The
+ * separate release agent task is forked by call_usermodehelper(),
+ * then control in this thread returns here, without waiting for the
+ * release agent task. We don't bother to wait because the caller of
+ * this routine has no use for the exit status of the release agent
+ * task, so no sense holding our caller up for that.
+ *
+ */
+
+static void cgroup_release_agent(struct work_struct *work)
+{
+ BUG_ON(work != &release_agent_work);
+ mutex_lock(&cgroup_mutex);
+ spin_lock(&release_list_lock);
+ while (!list_empty(&release_list)) {
+ char *argv[3], *envp[3];
+ int i;
+ char *pathbuf;
+ struct cgroup *cgrp = list_entry(release_list.next,
+ struct cgroup,
+ release_list);
+ list_del_init(&cgrp->release_list);
+ spin_unlock(&release_list_lock);
+ pathbuf = kmalloc(PAGE_SIZE, GFP_KERNEL);
+ if (!pathbuf) {
+ spin_lock(&release_list_lock);
+ continue;
+ }
+
+ if (cgroup_path(cgrp, pathbuf, PAGE_SIZE) < 0) {
+ kfree(pathbuf);
+ spin_lock(&release_list_lock);
+ continue;
+ }
+
+ i = 0;
+ argv[i++] = cgrp->root->release_agent_path;
+ argv[i++] = (char *)pathbuf;
+ argv[i] = NULL;
+
+ i = 0;
+ /* minimal command environment */
+ envp[i++] = "HOME=/";
+ envp[i++] = "PATH=/sbin:/bin:/usr/sbin:/usr/bin";
+ envp[i] = NULL;
+
+ /* Drop the lock while we invoke the usermode helper,
+ * since the exec could involve hitting disk and hence
+ * be a slow process */
+ mutex_unlock(&cgroup_mutex);
+ call_usermodehelper(argv[0], argv, envp, UMH_WAIT_EXEC);
+ kfree(pathbuf);
+ mutex_lock(&cgroup_mutex);
+ spin_lock(&release_list_lock);
+ }
+ spin_unlock(&release_list_lock);
+ mutex_unlock(&cgroup_mutex);
+}
diff --git a/kernel/cgroup_debug.c b/kernel/cgroup_debug.c
new file mode 100644
index 000000000000..37301e877cb0
--- /dev/null
+++ b/kernel/cgroup_debug.c
@@ -0,0 +1,97 @@
+/*
+ * kernel/ccontainer_debug.c - Example cgroup subsystem that
+ * exposes debug info
+ *
+ * Copyright (C) Google Inc, 2007
+ *
+ * Developed by Paul Menage (menage@google.com)
+ *
+ */
+
+#include <linux/cgroup.h>
+#include <linux/fs.h>
+#include <linux/slab.h>
+#include <linux/rcupdate.h>
+
+#include <asm/atomic.h>
+
+static struct cgroup_subsys_state *debug_create(struct cgroup_subsys *ss,
+ struct cgroup *cont)
+{
+ struct cgroup_subsys_state *css = kzalloc(sizeof(*css), GFP_KERNEL);
+
+ if (!css)
+ return ERR_PTR(-ENOMEM);
+
+ return css;
+}
+
+static void debug_destroy(struct cgroup_subsys *ss, struct cgroup *cont)
+{
+ kfree(cont->subsys[debug_subsys_id]);
+}
+
+static u64 cgroup_refcount_read(struct cgroup *cont, struct cftype *cft)
+{
+ return atomic_read(&cont->count);
+}
+
+static u64 taskcount_read(struct cgroup *cont, struct cftype *cft)
+{
+ u64 count;
+
+ cgroup_lock();
+ count = cgroup_task_count(cont);
+ cgroup_unlock();
+ return count;
+}
+
+static u64 current_css_set_read(struct cgroup *cont, struct cftype *cft)
+{
+ return (u64)(long)current->cgroups;
+}
+
+static u64 current_css_set_refcount_read(struct cgroup *cont,
+ struct cftype *cft)
+{
+ u64 count;
+
+ rcu_read_lock();
+ count = atomic_read(&current->cgroups->ref.refcount);
+ rcu_read_unlock();
+ return count;
+}
+
+static struct cftype files[] = {
+ {
+ .name = "cgroup_refcount",
+ .read_uint = cgroup_refcount_read,
+ },
+ {
+ .name = "taskcount",
+ .read_uint = taskcount_read,
+ },
+
+ {
+ .name = "current_css_set",
+ .read_uint = current_css_set_read,
+ },
+
+ {
+ .name = "current_css_set_refcount",
+ .read_uint = current_css_set_refcount_read,
+ },
+};
+
+static int debug_populate(struct cgroup_subsys *ss, struct cgroup *cont)
+{
+ return cgroup_add_files(cont, ss, files, ARRAY_SIZE(files));
+}
+
+struct cgroup_subsys debug_subsys = {
+ .name = "debug",
+ .create = debug_create,
+ .destroy = debug_destroy,
+ .populate = debug_populate,
+ .subsys_id = debug_subsys_id,
+};
diff --git a/kernel/compat.c b/kernel/compat.c
index 3bae3742c2aa..42a1ed4b61b1 100644
--- a/kernel/compat.c
+++ b/kernel/compat.c
@@ -40,62 +40,27 @@ int put_compat_timespec(const struct timespec *ts, struct compat_timespec __user
__put_user(ts->tv_nsec, &cts->tv_nsec)) ? -EFAULT : 0;
}
-static long compat_nanosleep_restart(struct restart_block *restart)
-{
- unsigned long expire = restart->arg0, now = jiffies;
- struct compat_timespec __user *rmtp;
-
- /* Did it expire while we handled signals? */
- if (!time_after(expire, now))
- return 0;
-
- expire = schedule_timeout_interruptible(expire - now);
- if (expire == 0)
- return 0;
-
- rmtp = (struct compat_timespec __user *)restart->arg1;
- if (rmtp) {
- struct compat_timespec ct;
- struct timespec t;
-
- jiffies_to_timespec(expire, &t);
- ct.tv_sec = t.tv_sec;
- ct.tv_nsec = t.tv_nsec;
- if (copy_to_user(rmtp, &ct, sizeof(ct)))
- return -EFAULT;
- }
- /* The 'restart' block is already filled in */
- return -ERESTART_RESTARTBLOCK;
-}
-
asmlinkage long compat_sys_nanosleep(struct compat_timespec __user *rqtp,
- struct compat_timespec __user *rmtp)
+ struct compat_timespec __user *rmtp)
{
- struct timespec t;
- struct restart_block *restart;
- unsigned long expire;
+ struct timespec tu, rmt;
+ long ret;
- if (get_compat_timespec(&t, rqtp))
+ if (get_compat_timespec(&tu, rqtp))
return -EFAULT;
- if ((t.tv_nsec >= 1000000000L) || (t.tv_nsec < 0) || (t.tv_sec < 0))
+ if (!timespec_valid(&tu))
return -EINVAL;
- expire = timespec_to_jiffies(&t) + (t.tv_sec || t.tv_nsec);
- expire = schedule_timeout_interruptible(expire);
- if (expire == 0)
- return 0;
+ ret = hrtimer_nanosleep(&tu, rmtp ? &rmt : NULL, HRTIMER_MODE_REL,
+ CLOCK_MONOTONIC);
- if (rmtp) {
- jiffies_to_timespec(expire, &t);
- if (put_compat_timespec(&t, rmtp))
+ if (ret && rmtp) {
+ if (put_compat_timespec(&rmt, rmtp))
return -EFAULT;
}
- restart = &current_thread_info()->restart_block;
- restart->fn = compat_nanosleep_restart;
- restart->arg0 = jiffies + expire;
- restart->arg1 = (unsigned long) rmtp;
- return -ERESTART_RESTARTBLOCK;
+
+ return ret;
}
static inline long get_compat_itimerval(struct itimerval *o,
@@ -247,8 +212,8 @@ asmlinkage long compat_sys_setrlimit(unsigned int resource,
int ret;
mm_segment_t old_fs = get_fs ();
- if (resource >= RLIM_NLIMITS)
- return -EINVAL;
+ if (resource >= RLIM_NLIMITS)
+ return -EINVAL;
if (!access_ok(VERIFY_READ, rlim, sizeof(*rlim)) ||
__get_user(r.rlim_cur, &rlim->rlim_cur) ||
@@ -477,21 +442,21 @@ asmlinkage long compat_sys_sched_getaffinity(compat_pid_t pid, unsigned int len,
int get_compat_itimerspec(struct itimerspec *dst,
const struct compat_itimerspec __user *src)
-{
+{
if (get_compat_timespec(&dst->it_interval, &src->it_interval) ||
get_compat_timespec(&dst->it_value, &src->it_value))
return -EFAULT;
return 0;
-}
+}
int put_compat_itimerspec(struct compat_itimerspec __user *dst,
const struct itimerspec *src)
-{
+{
if (put_compat_timespec(&src->it_interval, &dst->it_interval) ||
put_compat_timespec(&src->it_value, &dst->it_value))
return -EFAULT;
return 0;
-}
+}
long compat_sys_timer_create(clockid_t which_clock,
struct compat_sigevent __user *timer_event_spec,
@@ -512,9 +477,9 @@ long compat_sys_timer_create(clockid_t which_clock,
}
long compat_sys_timer_settime(timer_t timer_id, int flags,
- struct compat_itimerspec __user *new,
+ struct compat_itimerspec __user *new,
struct compat_itimerspec __user *old)
-{
+{
long err;
mm_segment_t oldfs;
struct itimerspec newts, oldts;
@@ -522,58 +487,58 @@ long compat_sys_timer_settime(timer_t timer_id, int flags,
if (!new)
return -EINVAL;
if (get_compat_itimerspec(&newts, new))
- return -EFAULT;
+ return -EFAULT;
oldfs = get_fs();
set_fs(KERNEL_DS);
err = sys_timer_settime(timer_id, flags,
(struct itimerspec __user *) &newts,
(struct itimerspec __user *) &oldts);
- set_fs(oldfs);
+ set_fs(oldfs);
if (!err && old && put_compat_itimerspec(old, &oldts))
return -EFAULT;
return err;
-}
+}
long compat_sys_timer_gettime(timer_t timer_id,
struct compat_itimerspec __user *setting)
-{
+{
long err;
mm_segment_t oldfs;
- struct itimerspec ts;
+ struct itimerspec ts;
oldfs = get_fs();
set_fs(KERNEL_DS);
err = sys_timer_gettime(timer_id,
- (struct itimerspec __user *) &ts);
- set_fs(oldfs);
+ (struct itimerspec __user *) &ts);
+ set_fs(oldfs);
if (!err && put_compat_itimerspec(setting, &ts))
return -EFAULT;
return err;
-}
+}
long compat_sys_clock_settime(clockid_t which_clock,
struct compat_timespec __user *tp)
{
long err;
mm_segment_t oldfs;
- struct timespec ts;
+ struct timespec ts;
if (get_compat_timespec(&ts, tp))
- return -EFAULT;
+ return -EFAULT;
oldfs = get_fs();
- set_fs(KERNEL_DS);
+ set_fs(KERNEL_DS);
err = sys_clock_settime(which_clock,
(struct timespec __user *) &ts);
set_fs(oldfs);
return err;
-}
+}
long compat_sys_clock_gettime(clockid_t which_clock,
struct compat_timespec __user *tp)
{
long err;
mm_segment_t oldfs;
- struct timespec ts;
+ struct timespec ts;
oldfs = get_fs();
set_fs(KERNEL_DS);
@@ -581,16 +546,16 @@ long compat_sys_clock_gettime(clockid_t which_clock,
(struct timespec __user *) &ts);
set_fs(oldfs);
if (!err && put_compat_timespec(&ts, tp))
- return -EFAULT;
+ return -EFAULT;
return err;
-}
+}
long compat_sys_clock_getres(clockid_t which_clock,
struct compat_timespec __user *tp)
{
long err;
mm_segment_t oldfs;
- struct timespec ts;
+ struct timespec ts;
oldfs = get_fs();
set_fs(KERNEL_DS);
@@ -598,9 +563,9 @@ long compat_sys_clock_getres(clockid_t which_clock,
(struct timespec __user *) &ts);
set_fs(oldfs);
if (!err && tp && put_compat_timespec(&ts, tp))
- return -EFAULT;
+ return -EFAULT;
return err;
-}
+}
static long compat_clock_nanosleep_restart(struct restart_block *restart)
{
@@ -632,10 +597,10 @@ long compat_sys_clock_nanosleep(clockid_t which_clock, int flags,
{
long err;
mm_segment_t oldfs;
- struct timespec in, out;
+ struct timespec in, out;
struct restart_block *restart;
- if (get_compat_timespec(&in, rqtp))
+ if (get_compat_timespec(&in, rqtp))
return -EFAULT;
oldfs = get_fs();
@@ -654,8 +619,8 @@ long compat_sys_clock_nanosleep(clockid_t which_clock, int flags,
restart->fn = compat_clock_nanosleep_restart;
restart->arg1 = (unsigned long) rmtp;
}
- return err;
-}
+ return err;
+}
/*
* We currently only need the following fields from the sigevent
diff --git a/kernel/cpu.c b/kernel/cpu.c
index 38033db8d8ec..6b3a0c15144f 100644
--- a/kernel/cpu.c
+++ b/kernel/cpu.c
@@ -98,7 +98,8 @@ static inline void check_for_tasks(int cpu)
!cputime_eq(p->stime, cputime_zero)))
printk(KERN_WARNING "Task %s (pid = %d) is on cpu %d\
(state = %ld, flags = %x) \n",
- p->comm, p->pid, cpu, p->state, p->flags);
+ p->comm, task_pid_nr(p), cpu,
+ p->state, p->flags);
}
write_unlock_irq(&tasklist_lock);
}
@@ -150,6 +151,7 @@ static int _cpu_down(unsigned int cpu, int tasks_frozen)
err = __raw_notifier_call_chain(&cpu_chain, CPU_DOWN_PREPARE | mod,
hcpu, -1, &nr_calls);
if (err == NOTIFY_BAD) {
+ nr_calls--;
__raw_notifier_call_chain(&cpu_chain, CPU_DOWN_FAILED | mod,
hcpu, nr_calls, NULL);
printk("%s: attempt to take down CPU %u failed\n",
@@ -233,6 +235,7 @@ static int __cpuinit _cpu_up(unsigned int cpu, int tasks_frozen)
ret = __raw_notifier_call_chain(&cpu_chain, CPU_UP_PREPARE | mod, hcpu,
-1, &nr_calls);
if (ret == NOTIFY_BAD) {
+ nr_calls--;
printk("%s: attempt to bring up CPU %u failed\n",
__FUNCTION__, cpu);
ret = -EINVAL;
@@ -262,6 +265,15 @@ out_notify:
int __cpuinit cpu_up(unsigned int cpu)
{
int err = 0;
+ if (!cpu_isset(cpu, cpu_possible_map)) {
+ printk(KERN_ERR "can't online cpu %d because it is not "
+ "configured as may-hotadd at boot time\n", cpu);
+#if defined(CONFIG_IA64) || defined(CONFIG_X86_64) || defined(CONFIG_S390)
+ printk(KERN_ERR "please check additional_cpus= boot "
+ "parameter\n");
+#endif
+ return -EINVAL;
+ }
mutex_lock(&cpu_add_remove_lock);
if (cpu_hotplug_disabled)
diff --git a/kernel/cpu_acct.c b/kernel/cpu_acct.c
new file mode 100644
index 000000000000..731e47e7f164
--- /dev/null
+++ b/kernel/cpu_acct.c
@@ -0,0 +1,186 @@
+/*
+ * kernel/cpu_acct.c - CPU accounting cgroup subsystem
+ *
+ * Copyright (C) Google Inc, 2006
+ *
+ * Developed by Paul Menage (menage@google.com) and Balbir Singh
+ * (balbir@in.ibm.com)
+ *
+ */
+
+/*
+ * Example cgroup subsystem for reporting total CPU usage of tasks in a
+ * cgroup, along with percentage load over a time interval
+ */
+
+#include <linux/module.h>
+#include <linux/cgroup.h>
+#include <linux/fs.h>
+#include <linux/rcupdate.h>
+
+#include <asm/div64.h>
+
+struct cpuacct {
+ struct cgroup_subsys_state css;
+ spinlock_t lock;
+ /* total time used by this class */
+ cputime64_t time;
+
+ /* time when next load calculation occurs */
+ u64 next_interval_check;
+
+ /* time used in current period */
+ cputime64_t current_interval_time;
+
+ /* time used in last period */
+ cputime64_t last_interval_time;
+};
+
+struct cgroup_subsys cpuacct_subsys;
+
+static inline struct cpuacct *cgroup_ca(struct cgroup *cont)
+{
+ return container_of(cgroup_subsys_state(cont, cpuacct_subsys_id),
+ struct cpuacct, css);
+}
+
+static inline struct cpuacct *task_ca(struct task_struct *task)
+{
+ return container_of(task_subsys_state(task, cpuacct_subsys_id),
+ struct cpuacct, css);
+}
+
+#define INTERVAL (HZ * 10)
+
+static inline u64 next_interval_boundary(u64 now)
+{
+ /* calculate the next interval boundary beyond the
+ * current time */
+ do_div(now, INTERVAL);
+ return (now + 1) * INTERVAL;
+}
+
+static struct cgroup_subsys_state *cpuacct_create(
+ struct cgroup_subsys *ss, struct cgroup *cont)
+{
+ struct cpuacct *ca = kzalloc(sizeof(*ca), GFP_KERNEL);
+
+ if (!ca)
+ return ERR_PTR(-ENOMEM);
+ spin_lock_init(&ca->lock);
+ ca->next_interval_check = next_interval_boundary(get_jiffies_64());
+ return &ca->css;
+}
+
+static void cpuacct_destroy(struct cgroup_subsys *ss,
+ struct cgroup *cont)
+{
+ kfree(cgroup_ca(cont));
+}
+
+/* Lazily update the load calculation if necessary. Called with ca locked */
+static void cpuusage_update(struct cpuacct *ca)
+{
+ u64 now = get_jiffies_64();
+
+ /* If we're not due for an update, return */
+ if (ca->next_interval_check > now)
+ return;
+
+ if (ca->next_interval_check <= (now - INTERVAL)) {
+ /* If it's been more than an interval since the last
+ * check, then catch up - the last interval must have
+ * been zero load */
+ ca->last_interval_time = 0;
+ ca->next_interval_check = next_interval_boundary(now);
+ } else {
+ /* If a steal takes the last interval time negative,
+ * then we just ignore it */
+ if ((s64)ca->current_interval_time > 0)
+ ca->last_interval_time = ca->current_interval_time;
+ else
+ ca->last_interval_time = 0;
+ ca->next_interval_check += INTERVAL;
+ }
+ ca->current_interval_time = 0;
+}
+
+static u64 cpuusage_read(struct cgroup *cont, struct cftype *cft)
+{
+ struct cpuacct *ca = cgroup_ca(cont);
+ u64 time;
+
+ spin_lock_irq(&ca->lock);
+ cpuusage_update(ca);
+ time = cputime64_to_jiffies64(ca->time);
+ spin_unlock_irq(&ca->lock);
+
+ /* Convert 64-bit jiffies to seconds */
+ time *= 1000;
+ do_div(time, HZ);
+ return time;
+}
+
+static u64 load_read(struct cgroup *cont, struct cftype *cft)
+{
+ struct cpuacct *ca = cgroup_ca(cont);
+ u64 time;
+
+ /* Find the time used in the previous interval */
+ spin_lock_irq(&ca->lock);
+ cpuusage_update(ca);
+ time = cputime64_to_jiffies64(ca->last_interval_time);
+ spin_unlock_irq(&ca->lock);
+
+ /* Convert time to a percentage, to give the load in the
+ * previous period */
+ time *= 100;
+ do_div(time, INTERVAL);
+
+ return time;
+}
+
+static struct cftype files[] = {
+ {
+ .name = "usage",
+ .read_uint = cpuusage_read,
+ },
+ {
+ .name = "load",
+ .read_uint = load_read,
+ }
+};
+
+static int cpuacct_populate(struct cgroup_subsys *ss, struct cgroup *cont)
+{
+ return cgroup_add_files(cont, ss, files, ARRAY_SIZE(files));
+}
+
+void cpuacct_charge(struct task_struct *task, cputime_t cputime)
+{
+
+ struct cpuacct *ca;
+ unsigned long flags;
+
+ if (!cpuacct_subsys.active)
+ return;
+ rcu_read_lock();
+ ca = task_ca(task);
+ if (ca) {
+ spin_lock_irqsave(&ca->lock, flags);
+ cpuusage_update(ca);
+ ca->time = cputime64_add(ca->time, cputime);
+ ca->current_interval_time =
+ cputime64_add(ca->current_interval_time, cputime);
+ spin_unlock_irqrestore(&ca->lock, flags);
+ }
+ rcu_read_unlock();
+}
+
+struct cgroup_subsys cpuacct_subsys = {
+ .name = "cpuacct",
+ .create = cpuacct_create,
+ .destroy = cpuacct_destroy,
+ .populate = cpuacct_populate,
+ .subsys_id = cpuacct_subsys_id,
+};
diff --git a/kernel/cpuset.c b/kernel/cpuset.c
index 57e6448b171e..50f5dc463688 100644
--- a/kernel/cpuset.c
+++ b/kernel/cpuset.c
@@ -4,7 +4,8 @@
* Processor and Memory placement constraints for sets of tasks.
*
* Copyright (C) 2003 BULL SA.
- * Copyright (C) 2004-2006 Silicon Graphics, Inc.
+ * Copyright (C) 2004-2007 Silicon Graphics, Inc.
+ * Copyright (C) 2006 Google, Inc
*
* Portions derived from Patrick Mochel's sysfs code.
* sysfs is Copyright (c) 2001-3 Patrick Mochel
@@ -12,6 +13,7 @@
* 2003-10-10 Written by Simon Derr.
* 2003-10-22 Updates by Stephen Hemminger.
* 2004 May-July Rework by Paul Jackson.
+ * 2006 Rework by Paul Menage to use generic cgroups
*
* This file is subject to the terms and conditions of the GNU General Public
* License. See the file COPYING in the main directory of the Linux
@@ -36,6 +38,7 @@
#include <linux/mount.h>
#include <linux/namei.h>
#include <linux/pagemap.h>
+#include <linux/prio_heap.h>
#include <linux/proc_fs.h>
#include <linux/rcupdate.h>
#include <linux/sched.h>
@@ -52,8 +55,7 @@
#include <asm/uaccess.h>
#include <asm/atomic.h>
#include <linux/mutex.h>
-
-#define CPUSET_SUPER_MAGIC 0x27e0eb
+#include <linux/kfifo.h>
/*
* Tracks how many cpusets are currently defined in system.
@@ -62,6 +64,10 @@
*/
int number_of_cpusets __read_mostly;
+/* Retrieve the cpuset from a cgroup */
+struct cgroup_subsys cpuset_subsys;
+struct cpuset;
+
/* See "Frequency meter" comments, below. */
struct fmeter {
@@ -72,24 +78,13 @@ struct fmeter {
};
struct cpuset {
+ struct cgroup_subsys_state css;
+
unsigned long flags; /* "unsigned long" so bitops work */
cpumask_t cpus_allowed; /* CPUs allowed to tasks in cpuset */
nodemask_t mems_allowed; /* Memory Nodes allowed to tasks */
- /*
- * Count is atomic so can incr (fork) or decr (exit) without a lock.
- */
- atomic_t count; /* count tasks using this cpuset */
-
- /*
- * We link our 'sibling' struct into our parents 'children'.
- * Our children link their 'sibling' into our 'children'.
- */
- struct list_head sibling; /* my parents children */
- struct list_head children; /* my children */
-
struct cpuset *parent; /* my parent */
- struct dentry *dentry; /* cpuset fs entry */
/*
* Copy of global cpuset_mems_generation as of the most
@@ -98,15 +93,32 @@ struct cpuset {
int mems_generation;
struct fmeter fmeter; /* memory_pressure filter */
+
+ /* partition number for rebuild_sched_domains() */
+ int pn;
};
+/* Retrieve the cpuset for a cgroup */
+static inline struct cpuset *cgroup_cs(struct cgroup *cont)
+{
+ return container_of(cgroup_subsys_state(cont, cpuset_subsys_id),
+ struct cpuset, css);
+}
+
+/* Retrieve the cpuset for a task */
+static inline struct cpuset *task_cs(struct task_struct *task)
+{
+ return container_of(task_subsys_state(task, cpuset_subsys_id),
+ struct cpuset, css);
+}
+
+
/* bits in struct cpuset flags field */
typedef enum {
CS_CPU_EXCLUSIVE,
CS_MEM_EXCLUSIVE,
CS_MEMORY_MIGRATE,
- CS_REMOVED,
- CS_NOTIFY_ON_RELEASE,
+ CS_SCHED_LOAD_BALANCE,
CS_SPREAD_PAGE,
CS_SPREAD_SLAB,
} cpuset_flagbits_t;
@@ -122,14 +134,9 @@ static inline int is_mem_exclusive(const struct cpuset *cs)
return test_bit(CS_MEM_EXCLUSIVE, &cs->flags);
}
-static inline int is_removed(const struct cpuset *cs)
+static inline int is_sched_load_balance(const struct cpuset *cs)
{
- return test_bit(CS_REMOVED, &cs->flags);
-}
-
-static inline int notify_on_release(const struct cpuset *cs)
-{
- return test_bit(CS_NOTIFY_ON_RELEASE, &cs->flags);
+ return test_bit(CS_SCHED_LOAD_BALANCE, &cs->flags);
}
static inline int is_memory_migrate(const struct cpuset *cs)
@@ -172,14 +179,8 @@ static struct cpuset top_cpuset = {
.flags = ((1 << CS_CPU_EXCLUSIVE) | (1 << CS_MEM_EXCLUSIVE)),
.cpus_allowed = CPU_MASK_ALL,
.mems_allowed = NODE_MASK_ALL,
- .count = ATOMIC_INIT(0),
- .sibling = LIST_HEAD_INIT(top_cpuset.sibling),
- .children = LIST_HEAD_INIT(top_cpuset.children),
};
-static struct vfsmount *cpuset_mount;
-static struct super_block *cpuset_sb;
-
/*
* We have two global cpuset mutexes below. They can nest.
* It is ok to first take manage_mutex, then nest callback_mutex. We also
@@ -263,297 +264,33 @@ static struct super_block *cpuset_sb;
* the routine cpuset_update_task_memory_state().
*/
-static DEFINE_MUTEX(manage_mutex);
static DEFINE_MUTEX(callback_mutex);
-/*
- * A couple of forward declarations required, due to cyclic reference loop:
- * cpuset_mkdir -> cpuset_create -> cpuset_populate_dir -> cpuset_add_file
- * -> cpuset_create_file -> cpuset_dir_inode_operations -> cpuset_mkdir.
- */
-
-static int cpuset_mkdir(struct inode *dir, struct dentry *dentry, int mode);
-static int cpuset_rmdir(struct inode *unused_dir, struct dentry *dentry);
-
-static struct backing_dev_info cpuset_backing_dev_info = {
- .ra_pages = 0, /* No readahead */
- .capabilities = BDI_CAP_NO_ACCT_DIRTY | BDI_CAP_NO_WRITEBACK,
-};
-
-static struct inode *cpuset_new_inode(mode_t mode)
-{
- struct inode *inode = new_inode(cpuset_sb);
-
- if (inode) {
- inode->i_mode = mode;
- inode->i_uid = current->fsuid;
- inode->i_gid = current->fsgid;
- inode->i_blocks = 0;
- inode->i_atime = inode->i_mtime = inode->i_ctime = CURRENT_TIME;
- inode->i_mapping->backing_dev_info = &cpuset_backing_dev_info;
- }
- return inode;
-}
-
-static void cpuset_diput(struct dentry *dentry, struct inode *inode)
-{
- /* is dentry a directory ? if so, kfree() associated cpuset */
- if (S_ISDIR(inode->i_mode)) {
- struct cpuset *cs = dentry->d_fsdata;
- BUG_ON(!(is_removed(cs)));
- kfree(cs);
- }
- iput(inode);
-}
-
-static struct dentry_operations cpuset_dops = {
- .d_iput = cpuset_diput,
-};
-
-static struct dentry *cpuset_get_dentry(struct dentry *parent, const char *name)
-{
- struct dentry *d = lookup_one_len(name, parent, strlen(name));
- if (!IS_ERR(d))
- d->d_op = &cpuset_dops;
- return d;
-}
-
-static void remove_dir(struct dentry *d)
-{
- struct dentry *parent = dget(d->d_parent);
-
- d_delete(d);
- simple_rmdir(parent->d_inode, d);
- dput(parent);
-}
-
-/*
- * NOTE : the dentry must have been dget()'ed
- */
-static void cpuset_d_remove_dir(struct dentry *dentry)
-{
- struct list_head *node;
-
- spin_lock(&dcache_lock);
- node = dentry->d_subdirs.next;
- while (node != &dentry->d_subdirs) {
- struct dentry *d = list_entry(node, struct dentry, d_u.d_child);
- list_del_init(node);
- if (d->d_inode) {
- d = dget_locked(d);
- spin_unlock(&dcache_lock);
- d_delete(d);
- simple_unlink(dentry->d_inode, d);
- dput(d);
- spin_lock(&dcache_lock);
- }
- node = dentry->d_subdirs.next;
- }
- list_del_init(&dentry->d_u.d_child);
- spin_unlock(&dcache_lock);
- remove_dir(dentry);
-}
-
-static struct super_operations cpuset_ops = {
- .statfs = simple_statfs,
- .drop_inode = generic_delete_inode,
-};
-
-static int cpuset_fill_super(struct super_block *sb, void *unused_data,
- int unused_silent)
-{
- struct inode *inode;
- struct dentry *root;
-
- sb->s_blocksize = PAGE_CACHE_SIZE;
- sb->s_blocksize_bits = PAGE_CACHE_SHIFT;
- sb->s_magic = CPUSET_SUPER_MAGIC;
- sb->s_op = &cpuset_ops;
- cpuset_sb = sb;
-
- inode = cpuset_new_inode(S_IFDIR | S_IRUGO | S_IXUGO | S_IWUSR);
- if (inode) {
- inode->i_op = &simple_dir_inode_operations;
- inode->i_fop = &simple_dir_operations;
- /* directories start off with i_nlink == 2 (for "." entry) */
- inc_nlink(inode);
- } else {
- return -ENOMEM;
- }
-
- root = d_alloc_root(inode);
- if (!root) {
- iput(inode);
- return -ENOMEM;
- }
- sb->s_root = root;
- return 0;
-}
-
+/* This is ugly, but preserves the userspace API for existing cpuset
+ * users. If someone tries to mount the "cpuset" filesystem, we
+ * silently switch it to mount "cgroup" instead */
static int cpuset_get_sb(struct file_system_type *fs_type,
int flags, const char *unused_dev_name,
void *data, struct vfsmount *mnt)
{
- return get_sb_single(fs_type, flags, data, cpuset_fill_super, mnt);
+ struct file_system_type *cgroup_fs = get_fs_type("cgroup");
+ int ret = -ENODEV;
+ if (cgroup_fs) {
+ char mountopts[] =
+ "cpuset,noprefix,"
+ "release_agent=/sbin/cpuset_release_agent";
+ ret = cgroup_fs->get_sb(cgroup_fs, flags,
+ unused_dev_name, mountopts, mnt);
+ put_filesystem(cgroup_fs);
+ }
+ return ret;
}
static struct file_system_type cpuset_fs_type = {
.name = "cpuset",
.get_sb = cpuset_get_sb,
- .kill_sb = kill_litter_super,
-};
-
-/* struct cftype:
- *
- * The files in the cpuset filesystem mostly have a very simple read/write
- * handling, some common function will take care of it. Nevertheless some cases
- * (read tasks) are special and therefore I define this structure for every
- * kind of file.
- *
- *
- * When reading/writing to a file:
- * - the cpuset to use in file->f_path.dentry->d_parent->d_fsdata
- * - the 'cftype' of the file is file->f_path.dentry->d_fsdata
- */
-
-struct cftype {
- char *name;
- int private;
- int (*open) (struct inode *inode, struct file *file);
- ssize_t (*read) (struct file *file, char __user *buf, size_t nbytes,
- loff_t *ppos);
- int (*write) (struct file *file, const char __user *buf, size_t nbytes,
- loff_t *ppos);
- int (*release) (struct inode *inode, struct file *file);
};
-static inline struct cpuset *__d_cs(struct dentry *dentry)
-{
- return dentry->d_fsdata;
-}
-
-static inline struct cftype *__d_cft(struct dentry *dentry)
-{
- return dentry->d_fsdata;
-}
-
-/*
- * Call with manage_mutex held. Writes path of cpuset into buf.
- * Returns 0 on success, -errno on error.
- */
-
-static int cpuset_path(const struct cpuset *cs, char *buf, int buflen)
-{
- char *start;
-
- start = buf + buflen;
-
- *--start = '\0';
- for (;;) {
- int len = cs->dentry->d_name.len;
- if ((start -= len) < buf)
- return -ENAMETOOLONG;
- memcpy(start, cs->dentry->d_name.name, len);
- cs = cs->parent;
- if (!cs)
- break;
- if (!cs->parent)
- continue;
- if (--start < buf)
- return -ENAMETOOLONG;
- *start = '/';
- }
- memmove(buf, start, buf + buflen - start);
- return 0;
-}
-
-/*
- * Notify userspace when a cpuset is released, by running
- * /sbin/cpuset_release_agent with the name of the cpuset (path
- * relative to the root of cpuset file system) as the argument.
- *
- * Most likely, this user command will try to rmdir this cpuset.
- *
- * This races with the possibility that some other task will be
- * attached to this cpuset before it is removed, or that some other
- * user task will 'mkdir' a child cpuset of this cpuset. That's ok.
- * The presumed 'rmdir' will fail quietly if this cpuset is no longer
- * unused, and this cpuset will be reprieved from its death sentence,
- * to continue to serve a useful existence. Next time it's released,
- * we will get notified again, if it still has 'notify_on_release' set.
- *
- * The final arg to call_usermodehelper() is 0, which means don't
- * wait. The separate /sbin/cpuset_release_agent task is forked by
- * call_usermodehelper(), then control in this thread returns here,
- * without waiting for the release agent task. We don't bother to
- * wait because the caller of this routine has no use for the exit
- * status of the /sbin/cpuset_release_agent task, so no sense holding
- * our caller up for that.
- *
- * When we had only one cpuset mutex, we had to call this
- * without holding it, to avoid deadlock when call_usermodehelper()
- * allocated memory. With two locks, we could now call this while
- * holding manage_mutex, but we still don't, so as to minimize
- * the time manage_mutex is held.
- */
-
-static void cpuset_release_agent(const char *pathbuf)
-{
- char *argv[3], *envp[3];
- int i;
-
- if (!pathbuf)
- return;
-
- i = 0;
- argv[i++] = "/sbin/cpuset_release_agent";
- argv[i++] = (char *)pathbuf;
- argv[i] = NULL;
-
- i = 0;
- /* minimal command environment */
- envp[i++] = "HOME=/";
- envp[i++] = "PATH=/sbin:/bin:/usr/sbin:/usr/bin";
- envp[i] = NULL;
-
- call_usermodehelper(argv[0], argv, envp, UMH_WAIT_EXEC);
- kfree(pathbuf);
-}
-
-/*
- * Either cs->count of using tasks transitioned to zero, or the
- * cs->children list of child cpusets just became empty. If this
- * cs is notify_on_release() and now both the user count is zero and
- * the list of children is empty, prepare cpuset path in a kmalloc'd
- * buffer, to be returned via ppathbuf, so that the caller can invoke
- * cpuset_release_agent() with it later on, once manage_mutex is dropped.
- * Call here with manage_mutex held.
- *
- * This check_for_release() routine is responsible for kmalloc'ing
- * pathbuf. The above cpuset_release_agent() is responsible for
- * kfree'ing pathbuf. The caller of these routines is responsible
- * for providing a pathbuf pointer, initialized to NULL, then
- * calling check_for_release() with manage_mutex held and the address
- * of the pathbuf pointer, then dropping manage_mutex, then calling
- * cpuset_release_agent() with pathbuf, as set by check_for_release().
- */
-
-static void check_for_release(struct cpuset *cs, char **ppathbuf)
-{
- if (notify_on_release(cs) && atomic_read(&cs->count) == 0 &&
- list_empty(&cs->children)) {
- char *buf;
-
- buf = kmalloc(PAGE_SIZE, GFP_KERNEL);
- if (!buf)
- return;
- if (cpuset_path(cs, buf, PAGE_SIZE) < 0)
- kfree(buf);
- else
- *ppathbuf = buf;
- }
-}
-
/*
* Return in *pmask the portion of a cpusets's cpus_allowed that
* are online. If none are online, walk up the cpuset hierarchy
@@ -581,26 +318,28 @@ static void guarantee_online_cpus(const struct cpuset *cs, cpumask_t *pmask)
/*
* Return in *pmask the portion of a cpusets's mems_allowed that
- * are online. If none are online, walk up the cpuset hierarchy
- * until we find one that does have some online mems. If we get
- * all the way to the top and still haven't found any online mems,
- * return node_online_map.
+ * are online, with memory. If none are online with memory, walk
+ * up the cpuset hierarchy until we find one that does have some
+ * online mems. If we get all the way to the top and still haven't
+ * found any online mems, return node_states[N_HIGH_MEMORY].
*
* One way or another, we guarantee to return some non-empty subset
- * of node_online_map.
+ * of node_states[N_HIGH_MEMORY].
*
* Call with callback_mutex held.
*/
static void guarantee_online_mems(const struct cpuset *cs, nodemask_t *pmask)
{
- while (cs && !nodes_intersects(cs->mems_allowed, node_online_map))
+ while (cs && !nodes_intersects(cs->mems_allowed,
+ node_states[N_HIGH_MEMORY]))
cs = cs->parent;
if (cs)
- nodes_and(*pmask, cs->mems_allowed, node_online_map);
+ nodes_and(*pmask, cs->mems_allowed,
+ node_states[N_HIGH_MEMORY]);
else
- *pmask = node_online_map;
- BUG_ON(!nodes_intersects(*pmask, node_online_map));
+ *pmask = node_states[N_HIGH_MEMORY];
+ BUG_ON(!nodes_intersects(*pmask, node_states[N_HIGH_MEMORY]));
}
/**
@@ -651,20 +390,19 @@ void cpuset_update_task_memory_state(void)
struct task_struct *tsk = current;
struct cpuset *cs;
- if (tsk->cpuset == &top_cpuset) {
+ if (task_cs(tsk) == &top_cpuset) {
/* Don't need rcu for top_cpuset. It's never freed. */
my_cpusets_mem_gen = top_cpuset.mems_generation;
} else {
rcu_read_lock();
- cs = rcu_dereference(tsk->cpuset);
- my_cpusets_mem_gen = cs->mems_generation;
+ my_cpusets_mem_gen = task_cs(current)->mems_generation;
rcu_read_unlock();
}
if (my_cpusets_mem_gen != tsk->cpuset_mems_generation) {
mutex_lock(&callback_mutex);
task_lock(tsk);
- cs = tsk->cpuset; /* Maybe changed when task not locked */
+ cs = task_cs(tsk); /* Maybe changed when task not locked */
guarantee_online_mems(cs, &tsk->mems_allowed);
tsk->cpuset_mems_generation = cs->mems_generation;
if (is_spread_page(cs))
@@ -719,11 +457,12 @@ static int is_cpuset_subset(const struct cpuset *p, const struct cpuset *q)
static int validate_change(const struct cpuset *cur, const struct cpuset *trial)
{
+ struct cgroup *cont;
struct cpuset *c, *par;
/* Each of our child cpusets must be a subset of us */
- list_for_each_entry(c, &cur->children, sibling) {
- if (!is_cpuset_subset(c, trial))
+ list_for_each_entry(cont, &cur->css.cgroup->children, sibling) {
+ if (!is_cpuset_subset(cgroup_cs(cont), trial))
return -EBUSY;
}
@@ -738,7 +477,8 @@ static int validate_change(const struct cpuset *cur, const struct cpuset *trial)
return -EACCES;
/* If either I or some sibling (!= me) is exclusive, we can't overlap */
- list_for_each_entry(c, &par->children, sibling) {
+ list_for_each_entry(cont, &par->css.cgroup->children, sibling) {
+ c = cgroup_cs(cont);
if ((is_cpu_exclusive(trial) || is_cpu_exclusive(c)) &&
c != cur &&
cpus_intersects(trial->cpus_allowed, c->cpus_allowed))
@@ -749,62 +489,247 @@ static int validate_change(const struct cpuset *cur, const struct cpuset *trial)
return -EINVAL;
}
+ /* Cpusets with tasks can't have empty cpus_allowed or mems_allowed */
+ if (cgroup_task_count(cur->css.cgroup)) {
+ if (cpus_empty(trial->cpus_allowed) ||
+ nodes_empty(trial->mems_allowed)) {
+ return -ENOSPC;
+ }
+ }
+
return 0;
}
/*
- * For a given cpuset cur, partition the system as follows
- * a. All cpus in the parent cpuset's cpus_allowed that are not part of any
- * exclusive child cpusets
- * b. All cpus in the current cpuset's cpus_allowed that are not part of any
- * exclusive child cpusets
- * Build these two partitions by calling partition_sched_domains
- *
- * Call with manage_mutex held. May nest a call to the
- * lock_cpu_hotplug()/unlock_cpu_hotplug() pair.
- * Must not be called holding callback_mutex, because we must
- * not call lock_cpu_hotplug() while holding callback_mutex.
+ * Helper routine for rebuild_sched_domains().
+ * Do cpusets a, b have overlapping cpus_allowed masks?
*/
-static void update_cpu_domains(struct cpuset *cur)
+static int cpusets_overlap(struct cpuset *a, struct cpuset *b)
{
- struct cpuset *c, *par = cur->parent;
- cpumask_t pspan, cspan;
+ return cpus_intersects(a->cpus_allowed, b->cpus_allowed);
+}
- if (par == NULL || cpus_empty(cur->cpus_allowed))
- return;
+/*
+ * rebuild_sched_domains()
+ *
+ * If the flag 'sched_load_balance' of any cpuset with non-empty
+ * 'cpus' changes, or if the 'cpus' allowed changes in any cpuset
+ * which has that flag enabled, or if any cpuset with a non-empty
+ * 'cpus' is removed, then call this routine to rebuild the
+ * scheduler's dynamic sched domains.
+ *
+ * This routine builds a partial partition of the systems CPUs
+ * (the set of non-overlappping cpumask_t's in the array 'part'
+ * below), and passes that partial partition to the kernel/sched.c
+ * partition_sched_domains() routine, which will rebuild the
+ * schedulers load balancing domains (sched domains) as specified
+ * by that partial partition. A 'partial partition' is a set of
+ * non-overlapping subsets whose union is a subset of that set.
+ *
+ * See "What is sched_load_balance" in Documentation/cpusets.txt
+ * for a background explanation of this.
+ *
+ * Does not return errors, on the theory that the callers of this
+ * routine would rather not worry about failures to rebuild sched
+ * domains when operating in the severe memory shortage situations
+ * that could cause allocation failures below.
+ *
+ * Call with cgroup_mutex held. May take callback_mutex during
+ * call due to the kfifo_alloc() and kmalloc() calls. May nest
+ * a call to the lock_cpu_hotplug()/unlock_cpu_hotplug() pair.
+ * Must not be called holding callback_mutex, because we must not
+ * call lock_cpu_hotplug() while holding callback_mutex. Elsewhere
+ * the kernel nests callback_mutex inside lock_cpu_hotplug() calls.
+ * So the reverse nesting would risk an ABBA deadlock.
+ *
+ * The three key local variables below are:
+ * q - a kfifo queue of cpuset pointers, used to implement a
+ * top-down scan of all cpusets. This scan loads a pointer
+ * to each cpuset marked is_sched_load_balance into the
+ * array 'csa'. For our purposes, rebuilding the schedulers
+ * sched domains, we can ignore !is_sched_load_balance cpusets.
+ * csa - (for CpuSet Array) Array of pointers to all the cpusets
+ * that need to be load balanced, for convenient iterative
+ * access by the subsequent code that finds the best partition,
+ * i.e the set of domains (subsets) of CPUs such that the
+ * cpus_allowed of every cpuset marked is_sched_load_balance
+ * is a subset of one of these domains, while there are as
+ * many such domains as possible, each as small as possible.
+ * doms - Conversion of 'csa' to an array of cpumasks, for passing to
+ * the kernel/sched.c routine partition_sched_domains() in a
+ * convenient format, that can be easily compared to the prior
+ * value to determine what partition elements (sched domains)
+ * were changed (added or removed.)
+ *
+ * Finding the best partition (set of domains):
+ * The triple nested loops below over i, j, k scan over the
+ * load balanced cpusets (using the array of cpuset pointers in
+ * csa[]) looking for pairs of cpusets that have overlapping
+ * cpus_allowed, but which don't have the same 'pn' partition
+ * number and gives them in the same partition number. It keeps
+ * looping on the 'restart' label until it can no longer find
+ * any such pairs.
+ *
+ * The union of the cpus_allowed masks from the set of
+ * all cpusets having the same 'pn' value then form the one
+ * element of the partition (one sched domain) to be passed to
+ * partition_sched_domains().
+ */
- /*
- * Get all cpus from parent's cpus_allowed not part of exclusive
- * children
- */
- pspan = par->cpus_allowed;
- list_for_each_entry(c, &par->children, sibling) {
- if (is_cpu_exclusive(c))
- cpus_andnot(pspan, pspan, c->cpus_allowed);
+static void rebuild_sched_domains(void)
+{
+ struct kfifo *q; /* queue of cpusets to be scanned */
+ struct cpuset *cp; /* scans q */
+ struct cpuset **csa; /* array of all cpuset ptrs */
+ int csn; /* how many cpuset ptrs in csa so far */
+ int i, j, k; /* indices for partition finding loops */
+ cpumask_t *doms; /* resulting partition; i.e. sched domains */
+ int ndoms; /* number of sched domains in result */
+ int nslot; /* next empty doms[] cpumask_t slot */
+
+ q = NULL;
+ csa = NULL;
+ doms = NULL;
+
+ /* Special case for the 99% of systems with one, full, sched domain */
+ if (is_sched_load_balance(&top_cpuset)) {
+ ndoms = 1;
+ doms = kmalloc(sizeof(cpumask_t), GFP_KERNEL);
+ if (!doms)
+ goto rebuild;
+ *doms = top_cpuset.cpus_allowed;
+ goto rebuild;
}
- if (!is_cpu_exclusive(cur)) {
- cpus_or(pspan, pspan, cur->cpus_allowed);
- if (cpus_equal(pspan, cur->cpus_allowed))
- return;
- cspan = CPU_MASK_NONE;
- } else {
- if (cpus_empty(pspan))
- return;
- cspan = cur->cpus_allowed;
- /*
- * Get all cpus from current cpuset's cpus_allowed not part
- * of exclusive children
- */
- list_for_each_entry(c, &cur->children, sibling) {
- if (is_cpu_exclusive(c))
- cpus_andnot(cspan, cspan, c->cpus_allowed);
+
+ q = kfifo_alloc(number_of_cpusets * sizeof(cp), GFP_KERNEL, NULL);
+ if (IS_ERR(q))
+ goto done;
+ csa = kmalloc(number_of_cpusets * sizeof(cp), GFP_KERNEL);
+ if (!csa)
+ goto done;
+ csn = 0;
+
+ cp = &top_cpuset;
+ __kfifo_put(q, (void *)&cp, sizeof(cp));
+ while (__kfifo_get(q, (void *)&cp, sizeof(cp))) {
+ struct cgroup *cont;
+ struct cpuset *child; /* scans child cpusets of cp */
+ if (is_sched_load_balance(cp))
+ csa[csn++] = cp;
+ list_for_each_entry(cont, &cp->css.cgroup->children, sibling) {
+ child = cgroup_cs(cont);
+ __kfifo_put(q, (void *)&child, sizeof(cp));
+ }
+ }
+
+ for (i = 0; i < csn; i++)
+ csa[i]->pn = i;
+ ndoms = csn;
+
+restart:
+ /* Find the best partition (set of sched domains) */
+ for (i = 0; i < csn; i++) {
+ struct cpuset *a = csa[i];
+ int apn = a->pn;
+
+ for (j = 0; j < csn; j++) {
+ struct cpuset *b = csa[j];
+ int bpn = b->pn;
+
+ if (apn != bpn && cpusets_overlap(a, b)) {
+ for (k = 0; k < csn; k++) {
+ struct cpuset *c = csa[k];
+
+ if (c->pn == bpn)
+ c->pn = apn;
+ }
+ ndoms--; /* one less element */
+ goto restart;
+ }
+ }
+ }
+
+ /* Convert <csn, csa> to <ndoms, doms> */
+ doms = kmalloc(ndoms * sizeof(cpumask_t), GFP_KERNEL);
+ if (!doms)
+ goto rebuild;
+
+ for (nslot = 0, i = 0; i < csn; i++) {
+ struct cpuset *a = csa[i];
+ int apn = a->pn;
+
+ if (apn >= 0) {
+ cpumask_t *dp = doms + nslot;
+
+ if (nslot == ndoms) {
+ static int warnings = 10;
+ if (warnings) {
+ printk(KERN_WARNING
+ "rebuild_sched_domains confused:"
+ " nslot %d, ndoms %d, csn %d, i %d,"
+ " apn %d\n",
+ nslot, ndoms, csn, i, apn);
+ warnings--;
+ }
+ continue;
+ }
+
+ cpus_clear(*dp);
+ for (j = i; j < csn; j++) {
+ struct cpuset *b = csa[j];
+
+ if (apn == b->pn) {
+ cpus_or(*dp, *dp, b->cpus_allowed);
+ b->pn = -1;
+ }
+ }
+ nslot++;
}
}
+ BUG_ON(nslot != ndoms);
+rebuild:
+ /* Have scheduler rebuild sched domains */
lock_cpu_hotplug();
- partition_sched_domains(&pspan, &cspan);
+ partition_sched_domains(ndoms, doms);
unlock_cpu_hotplug();
+
+done:
+ if (q && !IS_ERR(q))
+ kfifo_free(q);
+ kfree(csa);
+ /* Don't kfree(doms) -- partition_sched_domains() does that. */
+}
+
+static inline int started_after_time(struct task_struct *t1,
+ struct timespec *time,
+ struct task_struct *t2)
+{
+ int start_diff = timespec_compare(&t1->start_time, time);
+ if (start_diff > 0) {
+ return 1;
+ } else if (start_diff < 0) {
+ return 0;
+ } else {
+ /*
+ * Arbitrarily, if two processes started at the same
+ * time, we'll say that the lower pointer value
+ * started first. Note that t2 may have exited by now
+ * so this may not be a valid pointer any longer, but
+ * that's fine - it still serves to distinguish
+ * between two tasks started (effectively)
+ * simultaneously.
+ */
+ return t1 > t2;
+ }
+}
+
+static inline int started_after(void *p1, void *p2)
+{
+ struct task_struct *t1 = p1;
+ struct task_struct *t2 = p2;
+ return started_after_time(t1, &t2->start_time, t2);
}
/*
@@ -814,7 +739,15 @@ static void update_cpu_domains(struct cpuset *cur)
static int update_cpumask(struct cpuset *cs, char *buf)
{
struct cpuset trialcs;
- int retval, cpus_unchanged;
+ int retval, i;
+ int is_load_balanced;
+ struct cgroup_iter it;
+ struct cgroup *cgrp = cs->css.cgroup;
+ struct task_struct *p, *dropped;
+ /* Never dereference latest_task, since it's not refcounted */
+ struct task_struct *latest_task = NULL;
+ struct ptr_heap heap;
+ struct timespec latest_time = { 0, 0 };
/* top_cpuset.cpus_allowed tracks cpu_online_map; it's read-only */
if (cs == &top_cpuset)
@@ -823,11 +756,13 @@ static int update_cpumask(struct cpuset *cs, char *buf)
trialcs = *cs;
/*
- * We allow a cpuset's cpus_allowed to be empty; if it has attached
- * tasks, we'll catch it later when we validate the change and return
- * -ENOSPC.
+ * An empty cpus_allowed is ok iff there are no tasks in the cpuset.
+ * Since cpulist_parse() fails on an empty mask, we special case
+ * that parsing. The validate_change() call ensures that cpusets
+ * with tasks have cpus.
*/
- if (!buf[0] || (buf[0] == '\n' && !buf[1])) {
+ buf = strstrip(buf);
+ if (!*buf) {
cpus_clear(trialcs.cpus_allowed);
} else {
retval = cpulist_parse(buf, trialcs.cpus_allowed);
@@ -835,18 +770,79 @@ static int update_cpumask(struct cpuset *cs, char *buf)
return retval;
}
cpus_and(trialcs.cpus_allowed, trialcs.cpus_allowed, cpu_online_map);
- /* cpus_allowed cannot be empty for a cpuset with attached tasks. */
- if (atomic_read(&cs->count) && cpus_empty(trialcs.cpus_allowed))
- return -ENOSPC;
retval = validate_change(cs, &trialcs);
if (retval < 0)
return retval;
- cpus_unchanged = cpus_equal(cs->cpus_allowed, trialcs.cpus_allowed);
+
+ /* Nothing to do if the cpus didn't change */
+ if (cpus_equal(cs->cpus_allowed, trialcs.cpus_allowed))
+ return 0;
+ retval = heap_init(&heap, PAGE_SIZE, GFP_KERNEL, &started_after);
+ if (retval)
+ return retval;
+
+ is_load_balanced = is_sched_load_balance(&trialcs);
+
mutex_lock(&callback_mutex);
cs->cpus_allowed = trialcs.cpus_allowed;
mutex_unlock(&callback_mutex);
- if (is_cpu_exclusive(cs) && !cpus_unchanged)
- update_cpu_domains(cs);
+
+ again:
+ /*
+ * Scan tasks in the cpuset, and update the cpumasks of any
+ * that need an update. Since we can't call set_cpus_allowed()
+ * while holding tasklist_lock, gather tasks to be processed
+ * in a heap structure. If the statically-sized heap fills up,
+ * overflow tasks that started later, and in future iterations
+ * only consider tasks that started after the latest task in
+ * the previous pass. This guarantees forward progress and
+ * that we don't miss any tasks
+ */
+ heap.size = 0;
+ cgroup_iter_start(cgrp, &it);
+ while ((p = cgroup_iter_next(cgrp, &it))) {
+ /* Only affect tasks that don't have the right cpus_allowed */
+ if (cpus_equal(p->cpus_allowed, cs->cpus_allowed))
+ continue;
+ /*
+ * Only process tasks that started after the last task
+ * we processed
+ */
+ if (!started_after_time(p, &latest_time, latest_task))
+ continue;
+ dropped = heap_insert(&heap, p);
+ if (dropped == NULL) {
+ get_task_struct(p);
+ } else if (dropped != p) {
+ get_task_struct(p);
+ put_task_struct(dropped);
+ }
+ }
+ cgroup_iter_end(cgrp, &it);
+ if (heap.size) {
+ for (i = 0; i < heap.size; i++) {
+ struct task_struct *p = heap.ptrs[i];
+ if (i == 0) {
+ latest_time = p->start_time;
+ latest_task = p;
+ }
+ set_cpus_allowed(p, cs->cpus_allowed);
+ put_task_struct(p);
+ }
+ /*
+ * If we had to process any tasks at all, scan again
+ * in case some of them were in the middle of forking
+ * children that didn't notice the new cpumask
+ * restriction. Not the most efficient way to do it,
+ * but it avoids having to take callback_mutex in the
+ * fork path
+ */
+ goto again;
+ }
+ heap_free(&heap);
+ if (is_load_balanced)
+ rebuild_sched_domains();
+
return 0;
}
@@ -895,7 +891,7 @@ static void cpuset_migrate_mm(struct mm_struct *mm, const nodemask_t *from,
do_migrate_pages(mm, from, to, MPOL_MF_MOVE_ALL);
mutex_lock(&callback_mutex);
- guarantee_online_mems(tsk->cpuset, &tsk->mems_allowed);
+ guarantee_online_mems(task_cs(tsk),&tsk->mems_allowed);
mutex_unlock(&callback_mutex);
}
@@ -913,46 +909,50 @@ static void cpuset_migrate_mm(struct mm_struct *mm, const nodemask_t *from,
* their mempolicies to the cpusets new mems_allowed.
*/
+static void *cpuset_being_rebound;
+
static int update_nodemask(struct cpuset *cs, char *buf)
{
struct cpuset trialcs;
nodemask_t oldmem;
- struct task_struct *g, *p;
+ struct task_struct *p;
struct mm_struct **mmarray;
int i, n, ntasks;
int migrate;
int fudge;
int retval;
+ struct cgroup_iter it;
- /* top_cpuset.mems_allowed tracks node_online_map; it's read-only */
+ /*
+ * top_cpuset.mems_allowed tracks node_stats[N_HIGH_MEMORY];
+ * it's read-only
+ */
if (cs == &top_cpuset)
return -EACCES;
trialcs = *cs;
/*
- * We allow a cpuset's mems_allowed to be empty; if it has attached
- * tasks, we'll catch it later when we validate the change and return
- * -ENOSPC.
+ * An empty mems_allowed is ok iff there are no tasks in the cpuset.
+ * Since nodelist_parse() fails on an empty mask, we special case
+ * that parsing. The validate_change() call ensures that cpusets
+ * with tasks have memory.
*/
- if (!buf[0] || (buf[0] == '\n' && !buf[1])) {
+ buf = strstrip(buf);
+ if (!*buf) {
nodes_clear(trialcs.mems_allowed);
} else {
retval = nodelist_parse(buf, trialcs.mems_allowed);
if (retval < 0)
goto done;
}
- nodes_and(trialcs.mems_allowed, trialcs.mems_allowed, node_online_map);
+ nodes_and(trialcs.mems_allowed, trialcs.mems_allowed,
+ node_states[N_HIGH_MEMORY]);
oldmem = cs->mems_allowed;
if (nodes_equal(oldmem, trialcs.mems_allowed)) {
retval = 0; /* Too easy - nothing to do */
goto done;
}
- /* mems_allowed cannot be empty for a cpuset with attached tasks. */
- if (atomic_read(&cs->count) && nodes_empty(trialcs.mems_allowed)) {
- retval = -ENOSPC;
- goto done;
- }
retval = validate_change(cs, &trialcs);
if (retval < 0)
goto done;
@@ -962,7 +962,7 @@ static int update_nodemask(struct cpuset *cs, char *buf)
cs->mems_generation = cpuset_mems_generation++;
mutex_unlock(&callback_mutex);
- set_cpuset_being_rebound(cs); /* causes mpol_copy() rebind */
+ cpuset_being_rebound = cs; /* causes mpol_copy() rebind */
fudge = 10; /* spare mmarray[] slots */
fudge += cpus_weight(cs->cpus_allowed); /* imagine one fork-bomb/cpu */
@@ -976,13 +976,13 @@ static int update_nodemask(struct cpuset *cs, char *buf)
* enough mmarray[] w/o using GFP_ATOMIC.
*/
while (1) {
- ntasks = atomic_read(&cs->count); /* guess */
+ ntasks = cgroup_task_count(cs->css.cgroup); /* guess */
ntasks += fudge;
mmarray = kmalloc(ntasks * sizeof(*mmarray), GFP_KERNEL);
if (!mmarray)
goto done;
read_lock(&tasklist_lock); /* block fork */
- if (atomic_read(&cs->count) <= ntasks)
+ if (cgroup_task_count(cs->css.cgroup) <= ntasks)
break; /* got enough */
read_unlock(&tasklist_lock); /* try again */
kfree(mmarray);
@@ -991,21 +991,21 @@ static int update_nodemask(struct cpuset *cs, char *buf)
n = 0;
/* Load up mmarray[] with mm reference for each task in cpuset. */
- do_each_thread(g, p) {
+ cgroup_iter_start(cs->css.cgroup, &it);
+ while ((p = cgroup_iter_next(cs->css.cgroup, &it))) {
struct mm_struct *mm;
if (n >= ntasks) {
printk(KERN_WARNING
"Cpuset mempolicy rebind incomplete.\n");
- continue;
+ break;
}
- if (p->cpuset != cs)
- continue;
mm = get_task_mm(p);
if (!mm)
continue;
mmarray[n++] = mm;
- } while_each_thread(g, p);
+ }
+ cgroup_iter_end(cs->css.cgroup, &it);
read_unlock(&tasklist_lock);
/*
@@ -1033,12 +1033,17 @@ static int update_nodemask(struct cpuset *cs, char *buf)
/* We're done rebinding vma's to this cpusets new mems_allowed. */
kfree(mmarray);
- set_cpuset_being_rebound(NULL);
+ cpuset_being_rebound = NULL;
retval = 0;
done:
return retval;
}
+int current_cpuset_is_being_rebound(void)
+{
+ return task_cs(current) == cpuset_being_rebound;
+}
+
/*
* Call with manage_mutex held.
*/
@@ -1055,6 +1060,7 @@ static int update_memory_pressure_enabled(struct cpuset *cs, char *buf)
/*
* update_flag - read a 0 or a 1 in a file and update associated flag
* bit: the bit to update (CS_CPU_EXCLUSIVE, CS_MEM_EXCLUSIVE,
+ * CS_SCHED_LOAD_BALANCE,
* CS_NOTIFY_ON_RELEASE, CS_MEMORY_MIGRATE,
* CS_SPREAD_PAGE, CS_SPREAD_SLAB)
* cs: the cpuset to update
@@ -1067,7 +1073,8 @@ static int update_flag(cpuset_flagbits_t bit, struct cpuset *cs, char *buf)
{
int turning_on;
struct cpuset trialcs;
- int err, cpu_exclusive_changed;
+ int err;
+ int cpus_nonempty, balance_flag_changed;
turning_on = (simple_strtoul(buf, NULL, 10) != 0);
@@ -1080,14 +1087,18 @@ static int update_flag(cpuset_flagbits_t bit, struct cpuset *cs, char *buf)
err = validate_change(cs, &trialcs);
if (err < 0)
return err;
- cpu_exclusive_changed =
- (is_cpu_exclusive(cs) != is_cpu_exclusive(&trialcs));
+
+ cpus_nonempty = !cpus_empty(trialcs.cpus_allowed);
+ balance_flag_changed = (is_sched_load_balance(cs) !=
+ is_sched_load_balance(&trialcs));
+
mutex_lock(&callback_mutex);
cs->flags = trialcs.flags;
mutex_unlock(&callback_mutex);
- if (cpu_exclusive_changed)
- update_cpu_domains(cs);
+ if (cpus_nonempty && balance_flag_changed)
+ rebuild_sched_domains();
+
return 0;
}
@@ -1189,85 +1200,34 @@ static int fmeter_getrate(struct fmeter *fmp)
return val;
}
-/*
- * Attack task specified by pid in 'pidbuf' to cpuset 'cs', possibly
- * writing the path of the old cpuset in 'ppathbuf' if it needs to be
- * notified on release.
- *
- * Call holding manage_mutex. May take callback_mutex and task_lock of
- * the task 'pid' during call.
- */
-
-static int attach_task(struct cpuset *cs, char *pidbuf, char **ppathbuf)
+static int cpuset_can_attach(struct cgroup_subsys *ss,
+ struct cgroup *cont, struct task_struct *tsk)
{
- pid_t pid;
- struct task_struct *tsk;
- struct cpuset *oldcs;
- cpumask_t cpus;
- nodemask_t from, to;
- struct mm_struct *mm;
- int retval;
+ struct cpuset *cs = cgroup_cs(cont);
- if (sscanf(pidbuf, "%d", &pid) != 1)
- return -EIO;
if (cpus_empty(cs->cpus_allowed) || nodes_empty(cs->mems_allowed))
return -ENOSPC;
- if (pid) {
- read_lock(&tasklist_lock);
-
- tsk = find_task_by_pid(pid);
- if (!tsk || tsk->flags & PF_EXITING) {
- read_unlock(&tasklist_lock);
- return -ESRCH;
- }
-
- get_task_struct(tsk);
- read_unlock(&tasklist_lock);
-
- if ((current->euid) && (current->euid != tsk->uid)
- && (current->euid != tsk->suid)) {
- put_task_struct(tsk);
- return -EACCES;
- }
- } else {
- tsk = current;
- get_task_struct(tsk);
- }
+ return security_task_setscheduler(tsk, 0, NULL);
+}
- retval = security_task_setscheduler(tsk, 0, NULL);
- if (retval) {
- put_task_struct(tsk);
- return retval;
- }
+static void cpuset_attach(struct cgroup_subsys *ss,
+ struct cgroup *cont, struct cgroup *oldcont,
+ struct task_struct *tsk)
+{
+ cpumask_t cpus;
+ nodemask_t from, to;
+ struct mm_struct *mm;
+ struct cpuset *cs = cgroup_cs(cont);
+ struct cpuset *oldcs = cgroup_cs(oldcont);
mutex_lock(&callback_mutex);
-
- task_lock(tsk);
- oldcs = tsk->cpuset;
- /*
- * After getting 'oldcs' cpuset ptr, be sure still not exiting.
- * If 'oldcs' might be the top_cpuset due to the_top_cpuset_hack
- * then fail this attach_task(), to avoid breaking top_cpuset.count.
- */
- if (tsk->flags & PF_EXITING) {
- task_unlock(tsk);
- mutex_unlock(&callback_mutex);
- put_task_struct(tsk);
- return -ESRCH;
- }
- atomic_inc(&cs->count);
- rcu_assign_pointer(tsk->cpuset, cs);
- task_unlock(tsk);
-
guarantee_online_cpus(cs, &cpus);
set_cpus_allowed(tsk, cpus);
+ mutex_unlock(&callback_mutex);
from = oldcs->mems_allowed;
to = cs->mems_allowed;
-
- mutex_unlock(&callback_mutex);
-
mm = get_task_mm(tsk);
if (mm) {
mpol_rebind_mm(mm, &to);
@@ -1276,44 +1236,36 @@ static int attach_task(struct cpuset *cs, char *pidbuf, char **ppathbuf)
mmput(mm);
}
- put_task_struct(tsk);
- synchronize_rcu();
- if (atomic_dec_and_test(&oldcs->count))
- check_for_release(oldcs, ppathbuf);
- return 0;
}
/* The various types of files and directories in a cpuset file system */
typedef enum {
- FILE_ROOT,
- FILE_DIR,
FILE_MEMORY_MIGRATE,
FILE_CPULIST,
FILE_MEMLIST,
FILE_CPU_EXCLUSIVE,
FILE_MEM_EXCLUSIVE,
- FILE_NOTIFY_ON_RELEASE,
+ FILE_SCHED_LOAD_BALANCE,
FILE_MEMORY_PRESSURE_ENABLED,
FILE_MEMORY_PRESSURE,
FILE_SPREAD_PAGE,
FILE_SPREAD_SLAB,
- FILE_TASKLIST,
} cpuset_filetype_t;
-static ssize_t cpuset_common_file_write(struct file *file,
+static ssize_t cpuset_common_file_write(struct cgroup *cont,
+ struct cftype *cft,
+ struct file *file,
const char __user *userbuf,
size_t nbytes, loff_t *unused_ppos)
{
- struct cpuset *cs = __d_cs(file->f_path.dentry->d_parent);
- struct cftype *cft = __d_cft(file->f_path.dentry);
+ struct cpuset *cs = cgroup_cs(cont);
cpuset_filetype_t type = cft->private;
char *buffer;
- char *pathbuf = NULL;
int retval = 0;
/* Crude upper limit on largest legitimate cpulist user might write. */
- if (nbytes > 100 + 6 * max(NR_CPUS, MAX_NUMNODES))
+ if (nbytes > 100U + 6 * max(NR_CPUS, MAX_NUMNODES))
return -E2BIG;
/* +1 for nul-terminator */
@@ -1326,9 +1278,9 @@ static ssize_t cpuset_common_file_write(struct file *file,
}
buffer[nbytes] = 0; /* nul-terminate */
- mutex_lock(&manage_mutex);
+ cgroup_lock();
- if (is_removed(cs)) {
+ if (cgroup_is_removed(cont)) {
retval = -ENODEV;
goto out2;
}
@@ -1346,8 +1298,8 @@ static ssize_t cpuset_common_file_write(struct file *file,
case FILE_MEM_EXCLUSIVE:
retval = update_flag(CS_MEM_EXCLUSIVE, cs, buffer);
break;
- case FILE_NOTIFY_ON_RELEASE:
- retval = update_flag(CS_NOTIFY_ON_RELEASE, cs, buffer);
+ case FILE_SCHED_LOAD_BALANCE:
+ retval = update_flag(CS_SCHED_LOAD_BALANCE, cs, buffer);
break;
case FILE_MEMORY_MIGRATE:
retval = update_flag(CS_MEMORY_MIGRATE, cs, buffer);
@@ -1366,9 +1318,6 @@ static ssize_t cpuset_common_file_write(struct file *file,
retval = update_flag(CS_SPREAD_SLAB, cs, buffer);
cs->mems_generation = cpuset_mems_generation++;
break;
- case FILE_TASKLIST:
- retval = attach_task(cs, buffer, &pathbuf);
- break;
default:
retval = -EINVAL;
goto out2;
@@ -1377,30 +1326,12 @@ static ssize_t cpuset_common_file_write(struct file *file,
if (retval == 0)
retval = nbytes;
out2:
- mutex_unlock(&manage_mutex);
- cpuset_release_agent(pathbuf);
+ cgroup_unlock();
out1:
kfree(buffer);
return retval;
}
-static ssize_t cpuset_file_write(struct file *file, const char __user *buf,
- size_t nbytes, loff_t *ppos)
-{
- ssize_t retval = 0;
- struct cftype *cft = __d_cft(file->f_path.dentry);
- if (!cft)
- return -ENODEV;
-
- /* special function ? */
- if (cft->write)
- retval = cft->write(file, buf, nbytes, ppos);
- else
- retval = cpuset_common_file_write(file, buf, nbytes, ppos);
-
- return retval;
-}
-
/*
* These ascii lists should be read in a single call, by using a user
* buffer large enough to hold the entire map. If read in smaller
@@ -1435,17 +1366,19 @@ static int cpuset_sprintf_memlist(char *page, struct cpuset *cs)
return nodelist_scnprintf(page, PAGE_SIZE, mask);
}
-static ssize_t cpuset_common_file_read(struct file *file, char __user *buf,
- size_t nbytes, loff_t *ppos)
+static ssize_t cpuset_common_file_read(struct cgroup *cont,
+ struct cftype *cft,
+ struct file *file,
+ char __user *buf,
+ size_t nbytes, loff_t *ppos)
{
- struct cftype *cft = __d_cft(file->f_path.dentry);
- struct cpuset *cs = __d_cs(file->f_path.dentry->d_parent);
+ struct cpuset *cs = cgroup_cs(cont);
cpuset_filetype_t type = cft->private;
char *page;
ssize_t retval = 0;
char *s;
- if (!(page = (char *)__get_free_page(GFP_KERNEL)))
+ if (!(page = (char *)__get_free_page(GFP_TEMPORARY)))
return -ENOMEM;
s = page;
@@ -1463,8 +1396,8 @@ static ssize_t cpuset_common_file_read(struct file *file, char __user *buf,
case FILE_MEM_EXCLUSIVE:
*s++ = is_mem_exclusive(cs) ? '1' : '0';
break;
- case FILE_NOTIFY_ON_RELEASE:
- *s++ = notify_on_release(cs) ? '1' : '0';
+ case FILE_SCHED_LOAD_BALANCE:
+ *s++ = is_sched_load_balance(cs) ? '1' : '0';
break;
case FILE_MEMORY_MIGRATE:
*s++ = is_memory_migrate(cs) ? '1' : '0';
@@ -1493,390 +1426,150 @@ out:
return retval;
}
-static ssize_t cpuset_file_read(struct file *file, char __user *buf, size_t nbytes,
- loff_t *ppos)
-{
- ssize_t retval = 0;
- struct cftype *cft = __d_cft(file->f_path.dentry);
- if (!cft)
- return -ENODEV;
- /* special function ? */
- if (cft->read)
- retval = cft->read(file, buf, nbytes, ppos);
- else
- retval = cpuset_common_file_read(file, buf, nbytes, ppos);
- return retval;
-}
-static int cpuset_file_open(struct inode *inode, struct file *file)
-{
- int err;
- struct cftype *cft;
-
- err = generic_file_open(inode, file);
- if (err)
- return err;
-
- cft = __d_cft(file->f_path.dentry);
- if (!cft)
- return -ENODEV;
- if (cft->open)
- err = cft->open(inode, file);
- else
- err = 0;
-
- return err;
-}
-
-static int cpuset_file_release(struct inode *inode, struct file *file)
-{
- struct cftype *cft = __d_cft(file->f_path.dentry);
- if (cft->release)
- return cft->release(inode, file);
- return 0;
-}
-
-/*
- * cpuset_rename - Only allow simple rename of directories in place.
- */
-static int cpuset_rename(struct inode *old_dir, struct dentry *old_dentry,
- struct inode *new_dir, struct dentry *new_dentry)
-{
- if (!S_ISDIR(old_dentry->d_inode->i_mode))
- return -ENOTDIR;
- if (new_dentry->d_inode)
- return -EEXIST;
- if (old_dir != new_dir)
- return -EIO;
- return simple_rename(old_dir, old_dentry, new_dir, new_dentry);
-}
-
-static const struct file_operations cpuset_file_operations = {
- .read = cpuset_file_read,
- .write = cpuset_file_write,
- .llseek = generic_file_llseek,
- .open = cpuset_file_open,
- .release = cpuset_file_release,
-};
-
-static const struct inode_operations cpuset_dir_inode_operations = {
- .lookup = simple_lookup,
- .mkdir = cpuset_mkdir,
- .rmdir = cpuset_rmdir,
- .rename = cpuset_rename,
-};
-
-static int cpuset_create_file(struct dentry *dentry, int mode)
-{
- struct inode *inode;
-
- if (!dentry)
- return -ENOENT;
- if (dentry->d_inode)
- return -EEXIST;
-
- inode = cpuset_new_inode(mode);
- if (!inode)
- return -ENOMEM;
-
- if (S_ISDIR(mode)) {
- inode->i_op = &cpuset_dir_inode_operations;
- inode->i_fop = &simple_dir_operations;
-
- /* start off with i_nlink == 2 (for "." entry) */
- inc_nlink(inode);
- } else if (S_ISREG(mode)) {
- inode->i_size = 0;
- inode->i_fop = &cpuset_file_operations;
- }
-
- d_instantiate(dentry, inode);
- dget(dentry); /* Extra count - pin the dentry in core */
- return 0;
-}
-
-/*
- * cpuset_create_dir - create a directory for an object.
- * cs: the cpuset we create the directory for.
- * It must have a valid ->parent field
- * And we are going to fill its ->dentry field.
- * name: The name to give to the cpuset directory. Will be copied.
- * mode: mode to set on new directory.
- */
-
-static int cpuset_create_dir(struct cpuset *cs, const char *name, int mode)
-{
- struct dentry *dentry = NULL;
- struct dentry *parent;
- int error = 0;
-
- parent = cs->parent->dentry;
- dentry = cpuset_get_dentry(parent, name);
- if (IS_ERR(dentry))
- return PTR_ERR(dentry);
- error = cpuset_create_file(dentry, S_IFDIR | mode);
- if (!error) {
- dentry->d_fsdata = cs;
- inc_nlink(parent->d_inode);
- cs->dentry = dentry;
- }
- dput(dentry);
-
- return error;
-}
-
-static int cpuset_add_file(struct dentry *dir, const struct cftype *cft)
-{
- struct dentry *dentry;
- int error;
-
- mutex_lock(&dir->d_inode->i_mutex);
- dentry = cpuset_get_dentry(dir, cft->name);
- if (!IS_ERR(dentry)) {
- error = cpuset_create_file(dentry, 0644 | S_IFREG);
- if (!error)
- dentry->d_fsdata = (void *)cft;
- dput(dentry);
- } else
- error = PTR_ERR(dentry);
- mutex_unlock(&dir->d_inode->i_mutex);
- return error;
-}
-
-/*
- * Stuff for reading the 'tasks' file.
- *
- * Reading this file can return large amounts of data if a cpuset has
- * *lots* of attached tasks. So it may need several calls to read(),
- * but we cannot guarantee that the information we produce is correct
- * unless we produce it entirely atomically.
- *
- * Upon tasks file open(), a struct ctr_struct is allocated, that
- * will have a pointer to an array (also allocated here). The struct
- * ctr_struct * is stored in file->private_data. Its resources will
- * be freed by release() when the file is closed. The array is used
- * to sprintf the PIDs and then used by read().
- */
-
-/* cpusets_tasks_read array */
-
-struct ctr_struct {
- char *buf;
- int bufsz;
-};
-
-/*
- * Load into 'pidarray' up to 'npids' of the tasks using cpuset 'cs'.
- * Return actual number of pids loaded. No need to task_lock(p)
- * when reading out p->cpuset, as we don't really care if it changes
- * on the next cycle, and we are not going to try to dereference it.
- */
-static int pid_array_load(pid_t *pidarray, int npids, struct cpuset *cs)
-{
- int n = 0;
- struct task_struct *g, *p;
-
- read_lock(&tasklist_lock);
-
- do_each_thread(g, p) {
- if (p->cpuset == cs) {
- if (unlikely(n == npids))
- goto array_full;
- pidarray[n++] = p->pid;
- }
- } while_each_thread(g, p);
-
-array_full:
- read_unlock(&tasklist_lock);
- return n;
-}
-
-static int cmppid(const void *a, const void *b)
-{
- return *(pid_t *)a - *(pid_t *)b;
-}
-
-/*
- * Convert array 'a' of 'npids' pid_t's to a string of newline separated
- * decimal pids in 'buf'. Don't write more than 'sz' chars, but return
- * count 'cnt' of how many chars would be written if buf were large enough.
- */
-static int pid_array_to_buf(char *buf, int sz, pid_t *a, int npids)
-{
- int cnt = 0;
- int i;
-
- for (i = 0; i < npids; i++)
- cnt += snprintf(buf + cnt, max(sz - cnt, 0), "%d\n", a[i]);
- return cnt;
-}
-
-/*
- * Handle an open on 'tasks' file. Prepare a buffer listing the
- * process id's of tasks currently attached to the cpuset being opened.
- *
- * Does not require any specific cpuset mutexes, and does not take any.
- */
-static int cpuset_tasks_open(struct inode *unused, struct file *file)
-{
- struct cpuset *cs = __d_cs(file->f_path.dentry->d_parent);
- struct ctr_struct *ctr;
- pid_t *pidarray;
- int npids;
- char c;
-
- if (!(file->f_mode & FMODE_READ))
- return 0;
-
- ctr = kmalloc(sizeof(*ctr), GFP_KERNEL);
- if (!ctr)
- goto err0;
-
- /*
- * If cpuset gets more users after we read count, we won't have
- * enough space - tough. This race is indistinguishable to the
- * caller from the case that the additional cpuset users didn't
- * show up until sometime later on.
- */
- npids = atomic_read(&cs->count);
- pidarray = kmalloc(npids * sizeof(pid_t), GFP_KERNEL);
- if (!pidarray)
- goto err1;
-
- npids = pid_array_load(pidarray, npids, cs);
- sort(pidarray, npids, sizeof(pid_t), cmppid, NULL);
-
- /* Call pid_array_to_buf() twice, first just to get bufsz */
- ctr->bufsz = pid_array_to_buf(&c, sizeof(c), pidarray, npids) + 1;
- ctr->buf = kmalloc(ctr->bufsz, GFP_KERNEL);
- if (!ctr->buf)
- goto err2;
- ctr->bufsz = pid_array_to_buf(ctr->buf, ctr->bufsz, pidarray, npids);
-
- kfree(pidarray);
- file->private_data = ctr;
- return 0;
-
-err2:
- kfree(pidarray);
-err1:
- kfree(ctr);
-err0:
- return -ENOMEM;
-}
-
-static ssize_t cpuset_tasks_read(struct file *file, char __user *buf,
- size_t nbytes, loff_t *ppos)
-{
- struct ctr_struct *ctr = file->private_data;
-
- return simple_read_from_buffer(buf, nbytes, ppos, ctr->buf, ctr->bufsz);
-}
-
-static int cpuset_tasks_release(struct inode *unused_inode, struct file *file)
-{
- struct ctr_struct *ctr;
-
- if (file->f_mode & FMODE_READ) {
- ctr = file->private_data;
- kfree(ctr->buf);
- kfree(ctr);
- }
- return 0;
-}
/*
* for the common functions, 'private' gives the type of file
*/
-static struct cftype cft_tasks = {
- .name = "tasks",
- .open = cpuset_tasks_open,
- .read = cpuset_tasks_read,
- .release = cpuset_tasks_release,
- .private = FILE_TASKLIST,
-};
-
static struct cftype cft_cpus = {
.name = "cpus",
+ .read = cpuset_common_file_read,
+ .write = cpuset_common_file_write,
.private = FILE_CPULIST,
};
static struct cftype cft_mems = {
.name = "mems",
+ .read = cpuset_common_file_read,
+ .write = cpuset_common_file_write,
.private = FILE_MEMLIST,
};
static struct cftype cft_cpu_exclusive = {
.name = "cpu_exclusive",
+ .read = cpuset_common_file_read,
+ .write = cpuset_common_file_write,
.private = FILE_CPU_EXCLUSIVE,
};
static struct cftype cft_mem_exclusive = {
.name = "mem_exclusive",
+ .read = cpuset_common_file_read,
+ .write = cpuset_common_file_write,
.private = FILE_MEM_EXCLUSIVE,
};
-static struct cftype cft_notify_on_release = {
- .name = "notify_on_release",
- .private = FILE_NOTIFY_ON_RELEASE,
+static struct cftype cft_sched_load_balance = {
+ .name = "sched_load_balance",
+ .read = cpuset_common_file_read,
+ .write = cpuset_common_file_write,
+ .private = FILE_SCHED_LOAD_BALANCE,
};
static struct cftype cft_memory_migrate = {
.name = "memory_migrate",
+ .read = cpuset_common_file_read,
+ .write = cpuset_common_file_write,
.private = FILE_MEMORY_MIGRATE,
};
static struct cftype cft_memory_pressure_enabled = {
.name = "memory_pressure_enabled",
+ .read = cpuset_common_file_read,
+ .write = cpuset_common_file_write,
.private = FILE_MEMORY_PRESSURE_ENABLED,
};
static struct cftype cft_memory_pressure = {
.name = "memory_pressure",
+ .read = cpuset_common_file_read,
+ .write = cpuset_common_file_write,
.private = FILE_MEMORY_PRESSURE,
};
static struct cftype cft_spread_page = {
.name = "memory_spread_page",
+ .read = cpuset_common_file_read,
+ .write = cpuset_common_file_write,
.private = FILE_SPREAD_PAGE,
};
static struct cftype cft_spread_slab = {
.name = "memory_spread_slab",
+ .read = cpuset_common_file_read,
+ .write = cpuset_common_file_write,
.private = FILE_SPREAD_SLAB,
};
-static int cpuset_populate_dir(struct dentry *cs_dentry)
+static int cpuset_populate(struct cgroup_subsys *ss, struct cgroup *cont)
{
int err;
- if ((err = cpuset_add_file(cs_dentry, &cft_cpus)) < 0)
- return err;
- if ((err = cpuset_add_file(cs_dentry, &cft_mems)) < 0)
+ if ((err = cgroup_add_file(cont, ss, &cft_cpus)) < 0)
return err;
- if ((err = cpuset_add_file(cs_dentry, &cft_cpu_exclusive)) < 0)
+ if ((err = cgroup_add_file(cont, ss, &cft_mems)) < 0)
return err;
- if ((err = cpuset_add_file(cs_dentry, &cft_mem_exclusive)) < 0)
+ if ((err = cgroup_add_file(cont, ss, &cft_cpu_exclusive)) < 0)
return err;
- if ((err = cpuset_add_file(cs_dentry, &cft_notify_on_release)) < 0)
+ if ((err = cgroup_add_file(cont, ss, &cft_mem_exclusive)) < 0)
return err;
- if ((err = cpuset_add_file(cs_dentry, &cft_memory_migrate)) < 0)
+ if ((err = cgroup_add_file(cont, ss, &cft_memory_migrate)) < 0)
return err;
- if ((err = cpuset_add_file(cs_dentry, &cft_memory_pressure)) < 0)
+ if ((err = cgroup_add_file(cont, ss, &cft_sched_load_balance)) < 0)
return err;
- if ((err = cpuset_add_file(cs_dentry, &cft_spread_page)) < 0)
+ if ((err = cgroup_add_file(cont, ss, &cft_memory_pressure)) < 0)
return err;
- if ((err = cpuset_add_file(cs_dentry, &cft_spread_slab)) < 0)
+ if ((err = cgroup_add_file(cont, ss, &cft_spread_page)) < 0)
return err;
- if ((err = cpuset_add_file(cs_dentry, &cft_tasks)) < 0)
+ if ((err = cgroup_add_file(cont, ss, &cft_spread_slab)) < 0)
return err;
+ /* memory_pressure_enabled is in root cpuset only */
+ if (err == 0 && !cont->parent)
+ err = cgroup_add_file(cont, ss,
+ &cft_memory_pressure_enabled);
return 0;
}
/*
+ * post_clone() is called at the end of cgroup_clone().
+ * 'cgroup' was just created automatically as a result of
+ * a cgroup_clone(), and the current task is about to
+ * be moved into 'cgroup'.
+ *
+ * Currently we refuse to set up the cgroup - thereby
+ * refusing the task to be entered, and as a result refusing
+ * the sys_unshare() or clone() which initiated it - if any
+ * sibling cpusets have exclusive cpus or mem.
+ *
+ * If this becomes a problem for some users who wish to
+ * allow that scenario, then cpuset_post_clone() could be
+ * changed to grant parent->cpus_allowed-sibling_cpus_exclusive
+ * (and likewise for mems) to the new cgroup.
+ */
+static void cpuset_post_clone(struct cgroup_subsys *ss,
+ struct cgroup *cgroup)
+{
+ struct cgroup *parent, *child;
+ struct cpuset *cs, *parent_cs;
+
+ parent = cgroup->parent;
+ list_for_each_entry(child, &parent->children, sibling) {
+ cs = cgroup_cs(child);
+ if (is_mem_exclusive(cs) || is_cpu_exclusive(cs))
+ return;
+ }
+ cs = cgroup_cs(cgroup);
+ parent_cs = cgroup_cs(parent);
+
+ cs->mems_allowed = parent_cs->mems_allowed;
+ cs->cpus_allowed = parent_cs->cpus_allowed;
+ return;
+}
+
+/*
* cpuset_create - create a cpuset
* parent: cpuset that will be parent of the new cpuset.
* name: name of the new cpuset. Will be strcpy'ed.
@@ -1885,124 +1578,77 @@ static int cpuset_populate_dir(struct dentry *cs_dentry)
* Must be called with the mutex on the parent inode held
*/
-static long cpuset_create(struct cpuset *parent, const char *name, int mode)
+static struct cgroup_subsys_state *cpuset_create(
+ struct cgroup_subsys *ss,
+ struct cgroup *cont)
{
struct cpuset *cs;
- int err;
+ struct cpuset *parent;
+ if (!cont->parent) {
+ /* This is early initialization for the top cgroup */
+ top_cpuset.mems_generation = cpuset_mems_generation++;
+ return &top_cpuset.css;
+ }
+ parent = cgroup_cs(cont->parent);
cs = kmalloc(sizeof(*cs), GFP_KERNEL);
if (!cs)
- return -ENOMEM;
+ return ERR_PTR(-ENOMEM);
- mutex_lock(&manage_mutex);
cpuset_update_task_memory_state();
cs->flags = 0;
- if (notify_on_release(parent))
- set_bit(CS_NOTIFY_ON_RELEASE, &cs->flags);
if (is_spread_page(parent))
set_bit(CS_SPREAD_PAGE, &cs->flags);
if (is_spread_slab(parent))
set_bit(CS_SPREAD_SLAB, &cs->flags);
+ set_bit(CS_SCHED_LOAD_BALANCE, &cs->flags);
cs->cpus_allowed = CPU_MASK_NONE;
cs->mems_allowed = NODE_MASK_NONE;
- atomic_set(&cs->count, 0);
- INIT_LIST_HEAD(&cs->sibling);
- INIT_LIST_HEAD(&cs->children);
cs->mems_generation = cpuset_mems_generation++;
fmeter_init(&cs->fmeter);
cs->parent = parent;
-
- mutex_lock(&callback_mutex);
- list_add(&cs->sibling, &cs->parent->children);
number_of_cpusets++;
- mutex_unlock(&callback_mutex);
-
- err = cpuset_create_dir(cs, name, mode);
- if (err < 0)
- goto err;
-
- /*
- * Release manage_mutex before cpuset_populate_dir() because it
- * will down() this new directory's i_mutex and if we race with
- * another mkdir, we might deadlock.
- */
- mutex_unlock(&manage_mutex);
-
- err = cpuset_populate_dir(cs->dentry);
- /* If err < 0, we have a half-filled directory - oh well ;) */
- return 0;
-err:
- list_del(&cs->sibling);
- mutex_unlock(&manage_mutex);
- kfree(cs);
- return err;
-}
-
-static int cpuset_mkdir(struct inode *dir, struct dentry *dentry, int mode)
-{
- struct cpuset *c_parent = dentry->d_parent->d_fsdata;
-
- /* the vfs holds inode->i_mutex already */
- return cpuset_create(c_parent, dentry->d_name.name, mode | S_IFDIR);
+ return &cs->css ;
}
/*
* Locking note on the strange update_flag() call below:
*
- * If the cpuset being removed is marked cpu_exclusive, then simulate
- * turning cpu_exclusive off, which will call update_cpu_domains().
- * The lock_cpu_hotplug() call in update_cpu_domains() must not be
- * made while holding callback_mutex. Elsewhere the kernel nests
- * callback_mutex inside lock_cpu_hotplug() calls. So the reverse
- * nesting would risk an ABBA deadlock.
+ * If the cpuset being removed has its flag 'sched_load_balance'
+ * enabled, then simulate turning sched_load_balance off, which
+ * will call rebuild_sched_domains(). The lock_cpu_hotplug()
+ * call in rebuild_sched_domains() must not be made while holding
+ * callback_mutex. Elsewhere the kernel nests callback_mutex inside
+ * lock_cpu_hotplug() calls. So the reverse nesting would risk an
+ * ABBA deadlock.
*/
-static int cpuset_rmdir(struct inode *unused_dir, struct dentry *dentry)
+static void cpuset_destroy(struct cgroup_subsys *ss, struct cgroup *cont)
{
- struct cpuset *cs = dentry->d_fsdata;
- struct dentry *d;
- struct cpuset *parent;
- char *pathbuf = NULL;
-
- /* the vfs holds both inode->i_mutex already */
+ struct cpuset *cs = cgroup_cs(cont);
- mutex_lock(&manage_mutex);
cpuset_update_task_memory_state();
- if (atomic_read(&cs->count) > 0) {
- mutex_unlock(&manage_mutex);
- return -EBUSY;
- }
- if (!list_empty(&cs->children)) {
- mutex_unlock(&manage_mutex);
- return -EBUSY;
- }
- if (is_cpu_exclusive(cs)) {
- int retval = update_flag(CS_CPU_EXCLUSIVE, cs, "0");
- if (retval < 0) {
- mutex_unlock(&manage_mutex);
- return retval;
- }
- }
- parent = cs->parent;
- mutex_lock(&callback_mutex);
- set_bit(CS_REMOVED, &cs->flags);
- list_del(&cs->sibling); /* delete my sibling from parent->children */
- spin_lock(&cs->dentry->d_lock);
- d = dget(cs->dentry);
- cs->dentry = NULL;
- spin_unlock(&d->d_lock);
- cpuset_d_remove_dir(d);
- dput(d);
+
+ if (is_sched_load_balance(cs))
+ update_flag(CS_SCHED_LOAD_BALANCE, cs, "0");
+
number_of_cpusets--;
- mutex_unlock(&callback_mutex);
- if (list_empty(&parent->children))
- check_for_release(parent, &pathbuf);
- mutex_unlock(&manage_mutex);
- cpuset_release_agent(pathbuf);
- return 0;
+ kfree(cs);
}
+struct cgroup_subsys cpuset_subsys = {
+ .name = "cpuset",
+ .create = cpuset_create,
+ .destroy = cpuset_destroy,
+ .can_attach = cpuset_can_attach,
+ .attach = cpuset_attach,
+ .populate = cpuset_populate,
+ .post_clone = cpuset_post_clone,
+ .subsys_id = cpuset_subsys_id,
+ .early_init = 1,
+};
+
/*
* cpuset_init_early - just enough so that the calls to
* cpuset_update_task_memory_state() in early init code
@@ -2011,13 +1657,11 @@ static int cpuset_rmdir(struct inode *unused_dir, struct dentry *dentry)
int __init cpuset_init_early(void)
{
- struct task_struct *tsk = current;
-
- tsk->cpuset = &top_cpuset;
- tsk->cpuset->mems_generation = cpuset_mems_generation++;
+ top_cpuset.mems_generation = cpuset_mems_generation++;
return 0;
}
+
/**
* cpuset_init - initialize cpusets at system boot
*
@@ -2026,39 +1670,21 @@ int __init cpuset_init_early(void)
int __init cpuset_init(void)
{
- struct dentry *root;
- int err;
+ int err = 0;
top_cpuset.cpus_allowed = CPU_MASK_ALL;
top_cpuset.mems_allowed = NODE_MASK_ALL;
fmeter_init(&top_cpuset.fmeter);
top_cpuset.mems_generation = cpuset_mems_generation++;
-
- init_task.cpuset = &top_cpuset;
+ set_bit(CS_SCHED_LOAD_BALANCE, &top_cpuset.flags);
err = register_filesystem(&cpuset_fs_type);
if (err < 0)
- goto out;
- cpuset_mount = kern_mount(&cpuset_fs_type);
- if (IS_ERR(cpuset_mount)) {
- printk(KERN_ERR "cpuset: could not mount!\n");
- err = PTR_ERR(cpuset_mount);
- cpuset_mount = NULL;
- goto out;
- }
- root = cpuset_mount->mnt_sb->s_root;
- root->d_fsdata = &top_cpuset;
- inc_nlink(root->d_inode);
- top_cpuset.dentry = root;
- root->d_inode->i_op = &cpuset_dir_inode_operations;
+ return err;
+
number_of_cpusets = 1;
- err = cpuset_populate_dir(root);
- /* memory_pressure_enabled is in root cpuset only */
- if (err == 0)
- err = cpuset_add_file(root, &cft_memory_pressure_enabled);
-out:
- return err;
+ return 0;
}
/*
@@ -2084,10 +1710,12 @@ out:
static void guarantee_online_cpus_mems_in_subtree(const struct cpuset *cur)
{
+ struct cgroup *cont;
struct cpuset *c;
/* Each of our child cpusets mems must be online */
- list_for_each_entry(c, &cur->children, sibling) {
+ list_for_each_entry(cont, &cur->css.cgroup->children, sibling) {
+ c = cgroup_cs(cont);
guarantee_online_cpus_mems_in_subtree(c);
if (!cpus_empty(c->cpus_allowed))
guarantee_online_cpus(c, &c->cpus_allowed);
@@ -2098,8 +1726,9 @@ static void guarantee_online_cpus_mems_in_subtree(const struct cpuset *cur)
/*
* The cpus_allowed and mems_allowed nodemasks in the top_cpuset track
- * cpu_online_map and node_online_map. Force the top cpuset to track
- * whats online after any CPU or memory node hotplug or unplug event.
+ * cpu_online_map and node_states[N_HIGH_MEMORY]. Force the top cpuset to
+ * track what's online after any CPU or memory node hotplug or unplug
+ * event.
*
* To ensure that we don't remove a CPU or node from the top cpuset
* that is currently in use by a child cpuset (which would violate
@@ -2114,15 +1743,15 @@ static void guarantee_online_cpus_mems_in_subtree(const struct cpuset *cur)
static void common_cpu_mem_hotplug_unplug(void)
{
- mutex_lock(&manage_mutex);
+ cgroup_lock();
mutex_lock(&callback_mutex);
guarantee_online_cpus_mems_in_subtree(&top_cpuset);
top_cpuset.cpus_allowed = cpu_online_map;
- top_cpuset.mems_allowed = node_online_map;
+ top_cpuset.mems_allowed = node_states[N_HIGH_MEMORY];
mutex_unlock(&callback_mutex);
- mutex_unlock(&manage_mutex);
+ cgroup_unlock();
}
/*
@@ -2135,8 +1764,8 @@ static void common_cpu_mem_hotplug_unplug(void)
* cpu_online_map on each CPU hotplug (cpuhp) event.
*/
-static int cpuset_handle_cpuhp(struct notifier_block *nb,
- unsigned long phase, void *cpu)
+static int cpuset_handle_cpuhp(struct notifier_block *unused_nb,
+ unsigned long phase, void *unused_cpu)
{
if (phase == CPU_DYING || phase == CPU_DYING_FROZEN)
return NOTIFY_DONE;
@@ -2147,8 +1776,9 @@ static int cpuset_handle_cpuhp(struct notifier_block *nb,
#ifdef CONFIG_MEMORY_HOTPLUG
/*
- * Keep top_cpuset.mems_allowed tracking node_online_map.
- * Call this routine anytime after you change node_online_map.
+ * Keep top_cpuset.mems_allowed tracking node_states[N_HIGH_MEMORY].
+ * Call this routine anytime after you change
+ * node_states[N_HIGH_MEMORY].
* See also the previous routine cpuset_handle_cpuhp().
*/
@@ -2167,115 +1797,13 @@ void cpuset_track_online_nodes(void)
void __init cpuset_init_smp(void)
{
top_cpuset.cpus_allowed = cpu_online_map;
- top_cpuset.mems_allowed = node_online_map;
+ top_cpuset.mems_allowed = node_states[N_HIGH_MEMORY];
hotcpu_notifier(cpuset_handle_cpuhp, 0);
}
/**
- * cpuset_fork - attach newly forked task to its parents cpuset.
- * @tsk: pointer to task_struct of forking parent process.
- *
- * Description: A task inherits its parent's cpuset at fork().
- *
- * A pointer to the shared cpuset was automatically copied in fork.c
- * by dup_task_struct(). However, we ignore that copy, since it was
- * not made under the protection of task_lock(), so might no longer be
- * a valid cpuset pointer. attach_task() might have already changed
- * current->cpuset, allowing the previously referenced cpuset to
- * be removed and freed. Instead, we task_lock(current) and copy
- * its present value of current->cpuset for our freshly forked child.
- *
- * At the point that cpuset_fork() is called, 'current' is the parent
- * task, and the passed argument 'child' points to the child task.
- **/
-
-void cpuset_fork(struct task_struct *child)
-{
- task_lock(current);
- child->cpuset = current->cpuset;
- atomic_inc(&child->cpuset->count);
- task_unlock(current);
-}
-
-/**
- * cpuset_exit - detach cpuset from exiting task
- * @tsk: pointer to task_struct of exiting process
- *
- * Description: Detach cpuset from @tsk and release it.
- *
- * Note that cpusets marked notify_on_release force every task in
- * them to take the global manage_mutex mutex when exiting.
- * This could impact scaling on very large systems. Be reluctant to
- * use notify_on_release cpusets where very high task exit scaling
- * is required on large systems.
- *
- * Don't even think about derefencing 'cs' after the cpuset use count
- * goes to zero, except inside a critical section guarded by manage_mutex
- * or callback_mutex. Otherwise a zero cpuset use count is a license to
- * any other task to nuke the cpuset immediately, via cpuset_rmdir().
- *
- * This routine has to take manage_mutex, not callback_mutex, because
- * it is holding that mutex while calling check_for_release(),
- * which calls kmalloc(), so can't be called holding callback_mutex().
- *
- * the_top_cpuset_hack:
- *
- * Set the exiting tasks cpuset to the root cpuset (top_cpuset).
- *
- * Don't leave a task unable to allocate memory, as that is an
- * accident waiting to happen should someone add a callout in
- * do_exit() after the cpuset_exit() call that might allocate.
- * If a task tries to allocate memory with an invalid cpuset,
- * it will oops in cpuset_update_task_memory_state().
- *
- * We call cpuset_exit() while the task is still competent to
- * handle notify_on_release(), then leave the task attached to
- * the root cpuset (top_cpuset) for the remainder of its exit.
- *
- * To do this properly, we would increment the reference count on
- * top_cpuset, and near the very end of the kernel/exit.c do_exit()
- * code we would add a second cpuset function call, to drop that
- * reference. This would just create an unnecessary hot spot on
- * the top_cpuset reference count, to no avail.
- *
- * Normally, holding a reference to a cpuset without bumping its
- * count is unsafe. The cpuset could go away, or someone could
- * attach us to a different cpuset, decrementing the count on
- * the first cpuset that we never incremented. But in this case,
- * top_cpuset isn't going away, and either task has PF_EXITING set,
- * which wards off any attach_task() attempts, or task is a failed
- * fork, never visible to attach_task.
- *
- * Another way to do this would be to set the cpuset pointer
- * to NULL here, and check in cpuset_update_task_memory_state()
- * for a NULL pointer. This hack avoids that NULL check, for no
- * cost (other than this way too long comment ;).
- **/
-
-void cpuset_exit(struct task_struct *tsk)
-{
- struct cpuset *cs;
-
- task_lock(current);
- cs = tsk->cpuset;
- tsk->cpuset = &top_cpuset; /* the_top_cpuset_hack - see above */
- task_unlock(current);
-
- if (notify_on_release(cs)) {
- char *pathbuf = NULL;
-
- mutex_lock(&manage_mutex);
- if (atomic_dec_and_test(&cs->count))
- check_for_release(cs, &pathbuf);
- mutex_unlock(&manage_mutex);
- cpuset_release_agent(pathbuf);
- } else {
- atomic_dec(&cs->count);
- }
-}
-/**
* cpuset_cpus_allowed - return cpus_allowed mask from a tasks cpuset.
* @tsk: pointer to task_struct from which to obtain cpuset->cpus_allowed.
*
@@ -2290,10 +1818,23 @@ cpumask_t cpuset_cpus_allowed(struct task_struct *tsk)
cpumask_t mask;
mutex_lock(&callback_mutex);
+ mask = cpuset_cpus_allowed_locked(tsk);
+ mutex_unlock(&callback_mutex);
+
+ return mask;
+}
+
+/**
+ * cpuset_cpus_allowed_locked - return cpus_allowed mask from a tasks cpuset.
+ * Must be called with callback_mutex held.
+ **/
+cpumask_t cpuset_cpus_allowed_locked(struct task_struct *tsk)
+{
+ cpumask_t mask;
+
task_lock(tsk);
- guarantee_online_cpus(tsk->cpuset, &mask);
+ guarantee_online_cpus(task_cs(tsk), &mask);
task_unlock(tsk);
- mutex_unlock(&callback_mutex);
return mask;
}
@@ -2309,7 +1850,7 @@ void cpuset_init_current_mems_allowed(void)
*
* Description: Returns the nodemask_t mems_allowed of the cpuset
* attached to the specified @tsk. Guaranteed to return some non-empty
- * subset of node_online_map, even if this means going outside the
+ * subset of node_states[N_HIGH_MEMORY], even if this means going outside the
* tasks cpuset.
**/
@@ -2319,7 +1860,7 @@ nodemask_t cpuset_mems_allowed(struct task_struct *tsk)
mutex_lock(&callback_mutex);
task_lock(tsk);
- guarantee_online_mems(tsk->cpuset, &mask);
+ guarantee_online_mems(task_cs(tsk), &mask);
task_unlock(tsk);
mutex_unlock(&callback_mutex);
@@ -2450,7 +1991,7 @@ int __cpuset_zone_allowed_softwall(struct zone *z, gfp_t gfp_mask)
mutex_lock(&callback_mutex);
task_lock(current);
- cs = nearest_exclusive_ancestor(current->cpuset);
+ cs = nearest_exclusive_ancestor(task_cs(current));
task_unlock(current);
allowed = node_isset(node, cs->mems_allowed);
@@ -2491,12 +2032,12 @@ int __cpuset_zone_allowed_hardwall(struct zone *z, gfp_t gfp_mask)
node = zone_to_nid(z);
if (node_isset(node, current->mems_allowed))
return 1;
- /*
- * Allow tasks that have access to memory reserves because they have
- * been OOM killed to get memory anywhere.
- */
- if (unlikely(test_thread_flag(TIF_MEMDIE)))
- return 1;
+ /*
+ * Allow tasks that have access to memory reserves because they have
+ * been OOM killed to get memory anywhere.
+ */
+ if (unlikely(test_thread_flag(TIF_MEMDIE)))
+ return 1;
return 0;
}
@@ -2566,41 +2107,20 @@ int cpuset_mem_spread_node(void)
EXPORT_SYMBOL_GPL(cpuset_mem_spread_node);
/**
- * cpuset_excl_nodes_overlap - Do we overlap @p's mem_exclusive ancestors?
- * @p: pointer to task_struct of some other task.
- *
- * Description: Return true if the nearest mem_exclusive ancestor
- * cpusets of tasks @p and current overlap. Used by oom killer to
- * determine if task @p's memory usage might impact the memory
- * available to the current task.
- *
- * Call while holding callback_mutex.
+ * cpuset_mems_allowed_intersects - Does @tsk1's mems_allowed intersect @tsk2's?
+ * @tsk1: pointer to task_struct of some task.
+ * @tsk2: pointer to task_struct of some other task.
+ *
+ * Description: Return true if @tsk1's mems_allowed intersects the
+ * mems_allowed of @tsk2. Used by the OOM killer to determine if
+ * one of the task's memory usage might impact the memory available
+ * to the other.
**/
-int cpuset_excl_nodes_overlap(const struct task_struct *p)
+int cpuset_mems_allowed_intersects(const struct task_struct *tsk1,
+ const struct task_struct *tsk2)
{
- const struct cpuset *cs1, *cs2; /* my and p's cpuset ancestors */
- int overlap = 1; /* do cpusets overlap? */
-
- task_lock(current);
- if (current->flags & PF_EXITING) {
- task_unlock(current);
- goto done;
- }
- cs1 = nearest_exclusive_ancestor(current->cpuset);
- task_unlock(current);
-
- task_lock((struct task_struct *)p);
- if (p->flags & PF_EXITING) {
- task_unlock((struct task_struct *)p);
- goto done;
- }
- cs2 = nearest_exclusive_ancestor(p->cpuset);
- task_unlock((struct task_struct *)p);
-
- overlap = nodes_intersects(cs1->mems_allowed, cs2->mems_allowed);
-done:
- return overlap;
+ return nodes_intersects(tsk1->mems_allowed, tsk2->mems_allowed);
}
/*
@@ -2631,14 +2151,12 @@ int cpuset_memory_pressure_enabled __read_mostly;
void __cpuset_memory_pressure_bump(void)
{
- struct cpuset *cs;
-
task_lock(current);
- cs = current->cpuset;
- fmeter_markevent(&cs->fmeter);
+ fmeter_markevent(&task_cs(current)->fmeter);
task_unlock(current);
}
+#ifdef CONFIG_PROC_PID_CPUSET
/*
* proc_cpuset_show()
* - Print tasks cpuset path into seq_file.
@@ -2650,11 +2168,12 @@ void __cpuset_memory_pressure_bump(void)
* the_top_cpuset_hack in cpuset_exit(), which sets an exiting tasks
* cpuset to top_cpuset.
*/
-static int proc_cpuset_show(struct seq_file *m, void *v)
+static int proc_cpuset_show(struct seq_file *m, void *unused_v)
{
struct pid *pid;
struct task_struct *tsk;
char *buf;
+ struct cgroup_subsys_state *css;
int retval;
retval = -ENOMEM;
@@ -2669,15 +2188,15 @@ static int proc_cpuset_show(struct seq_file *m, void *v)
goto out_free;
retval = -EINVAL;
- mutex_lock(&manage_mutex);
-
- retval = cpuset_path(tsk->cpuset, buf, PAGE_SIZE);
+ cgroup_lock();
+ css = task_subsys_state(tsk, cpuset_subsys_id);
+ retval = cgroup_path(css->cgroup, buf, PAGE_SIZE);
if (retval < 0)
goto out_unlock;
seq_puts(m, buf);
seq_putc(m, '\n');
out_unlock:
- mutex_unlock(&manage_mutex);
+ cgroup_unlock();
put_task_struct(tsk);
out_free:
kfree(buf);
@@ -2697,6 +2216,7 @@ const struct file_operations proc_cpuset_operations = {
.llseek = seq_lseek,
.release = single_release,
};
+#endif /* CONFIG_PROC_PID_CPUSET */
/* Display task cpus_allowed, mems_allowed in /proc/<pid>/status file. */
char *cpuset_task_status_allowed(struct task_struct *task, char *buffer)
diff --git a/kernel/delayacct.c b/kernel/delayacct.c
index 81e697829633..10e43fd8b721 100644
--- a/kernel/delayacct.c
+++ b/kernel/delayacct.c
@@ -115,11 +115,17 @@ int __delayacct_add_tsk(struct taskstats *d, struct task_struct *tsk)
tmp += timespec_to_ns(&ts);
d->cpu_run_real_total = (tmp < (s64)d->cpu_run_real_total) ? 0 : tmp;
+ tmp = (s64)d->cpu_scaled_run_real_total;
+ cputime_to_timespec(tsk->utimescaled + tsk->stimescaled, &ts);
+ tmp += timespec_to_ns(&ts);
+ d->cpu_scaled_run_real_total =
+ (tmp < (s64)d->cpu_scaled_run_real_total) ? 0 : tmp;
+
/*
* No locking available for sched_info (and too expensive to add one)
* Mitigate by taking snapshot of values
*/
- t1 = tsk->sched_info.pcnt;
+ t1 = tsk->sched_info.pcount;
t2 = tsk->sched_info.run_delay;
t3 = tsk->sched_info.cpu_time;
diff --git a/kernel/die_notifier.c b/kernel/die_notifier.c
deleted file mode 100644
index 0d98827887a7..000000000000
--- a/kernel/die_notifier.c
+++ /dev/null
@@ -1,38 +0,0 @@
-
-#include <linux/module.h>
-#include <linux/notifier.h>
-#include <linux/vmalloc.h>
-#include <linux/kdebug.h>
-
-
-static ATOMIC_NOTIFIER_HEAD(die_chain);
-
-int notify_die(enum die_val val, const char *str,
- struct pt_regs *regs, long err, int trap, int sig)
-{
- struct die_args args = {
- .regs = regs,
- .str = str,
- .err = err,
- .trapnr = trap,
- .signr = sig,
-
- };
-
- return atomic_notifier_call_chain(&die_chain, val, &args);
-}
-
-int register_die_notifier(struct notifier_block *nb)
-{
- vmalloc_sync_all();
- return atomic_notifier_chain_register(&die_chain, nb);
-}
-EXPORT_SYMBOL_GPL(register_die_notifier);
-
-int unregister_die_notifier(struct notifier_block *nb)
-{
- return atomic_notifier_chain_unregister(&die_chain, nb);
-}
-EXPORT_SYMBOL_GPL(unregister_die_notifier);
-
-
diff --git a/kernel/dma.c b/kernel/dma.c
index 937b13ca33ba..6a82bb716dac 100644
--- a/kernel/dma.c
+++ b/kernel/dma.c
@@ -20,7 +20,7 @@
#include <asm/dma.h>
#include <asm/system.h>
-
+
/* A note on resource allocation:
*
@@ -95,7 +95,7 @@ void free_dma(unsigned int dmanr)
if (xchg(&dma_chan_busy[dmanr].lock, 0) == 0) {
printk(KERN_WARNING "Trying to free free DMA%d\n", dmanr);
return;
- }
+ }
} /* free_dma */
@@ -121,8 +121,8 @@ static int proc_dma_show(struct seq_file *m, void *v)
for (i = 0 ; i < MAX_DMA_CHANNELS ; i++) {
if (dma_chan_busy[i].lock) {
- seq_printf(m, "%2d: %s\n", i,
- dma_chan_busy[i].device_id);
+ seq_printf(m, "%2d: %s\n", i,
+ dma_chan_busy[i].device_id);
}
}
return 0;
diff --git a/kernel/exec_domain.c b/kernel/exec_domain.c
index 3c2eaea66b1e..a9e6bad9f706 100644
--- a/kernel/exec_domain.c
+++ b/kernel/exec_domain.c
@@ -57,7 +57,7 @@ lookup_exec_domain(u_long personality)
{
struct exec_domain * ep;
u_long pers = personality(personality);
-
+
read_lock(&exec_domains_lock);
for (ep = exec_domains; ep; ep = ep->next) {
if (pers >= ep->pers_low && pers <= ep->pers_high)
diff --git a/kernel/exit.c b/kernel/exit.c
index 993369ee94d1..f1aec27f1df0 100644
--- a/kernel/exit.c
+++ b/kernel/exit.c
@@ -31,7 +31,7 @@
#include <linux/taskstats_kern.h>
#include <linux/delayacct.h>
#include <linux/freezer.h>
-#include <linux/cpuset.h>
+#include <linux/cgroup.h>
#include <linux/syscalls.h>
#include <linux/signal.h>
#include <linux/posix-timers.h>
@@ -44,7 +44,6 @@
#include <linux/resource.h>
#include <linux/blkdev.h>
#include <linux/task_io_accounting_ops.h>
-#include <linux/freezer.h>
#include <asm/uaccess.h>
#include <asm/unistd.h>
@@ -93,10 +92,9 @@ static void __exit_signal(struct task_struct *tsk)
* If there is any task waiting for the group exit
* then notify it:
*/
- if (sig->group_exit_task && atomic_read(&sig->count) == sig->notify_count) {
+ if (sig->group_exit_task && atomic_read(&sig->count) == sig->notify_count)
wake_up_process(sig->group_exit_task);
- sig->group_exit_task = NULL;
- }
+
if (tsk == sig->curr_target)
sig->curr_target = next_thread(tsk);
/*
@@ -111,6 +109,7 @@ static void __exit_signal(struct task_struct *tsk)
*/
sig->utime = cputime_add(sig->utime, tsk->utime);
sig->stime = cputime_add(sig->stime, tsk->stime);
+ sig->gtime = cputime_add(sig->gtime, tsk->gtime);
sig->min_flt += tsk->min_flt;
sig->maj_flt += tsk->maj_flt;
sig->nvcsw += tsk->nvcsw;
@@ -149,6 +148,7 @@ void release_task(struct task_struct * p)
int zap_leader;
repeat:
atomic_dec(&p->user->processes);
+ proc_flush_task(p);
write_lock_irq(&tasklist_lock);
ptrace_unlink(p);
BUG_ON(!list_empty(&p->ptrace_list) || !list_empty(&p->ptrace_children));
@@ -176,7 +176,6 @@ repeat:
}
write_unlock_irq(&tasklist_lock);
- proc_flush_task(p);
release_thread(p);
call_rcu(&p->rcu, delayed_put_task_struct);
@@ -222,7 +221,7 @@ static int will_become_orphaned_pgrp(struct pid *pgrp, struct task_struct *ignor
do_each_pid_task(pgrp, PIDTYPE_PGID, p) {
if (p == ignored_task
|| p->exit_state
- || is_init(p->real_parent))
+ || is_global_init(p->real_parent))
continue;
if (task_pgrp(p->real_parent) != pgrp &&
task_session(p->real_parent) == task_session(p)) {
@@ -300,14 +299,14 @@ void __set_special_pids(pid_t session, pid_t pgrp)
{
struct task_struct *curr = current->group_leader;
- if (process_session(curr) != session) {
+ if (task_session_nr(curr) != session) {
detach_pid(curr, PIDTYPE_SID);
- set_signal_session(curr->signal, session);
+ set_task_session(curr, session);
attach_pid(curr, PIDTYPE_SID, find_pid(session));
}
- if (process_group(curr) != pgrp) {
+ if (task_pgrp_nr(curr) != pgrp) {
detach_pid(curr, PIDTYPE_PGID);
- curr->signal->pgrp = pgrp;
+ set_task_pgrp(curr, pgrp);
attach_pid(curr, PIDTYPE_PGID, find_pid(pgrp));
}
}
@@ -401,11 +400,12 @@ void daemonize(const char *name, ...)
current->fs = fs;
atomic_inc(&fs->count);
- exit_task_namespaces(current);
- current->nsproxy = init_task.nsproxy;
- get_task_namespaces(current);
+ if (current->nsproxy != init_task.nsproxy) {
+ get_nsproxy(init_task.nsproxy);
+ switch_task_namespaces(current, init_task.nsproxy);
+ }
- exit_files(current);
+ exit_files(current);
current->files = init_task.files;
atomic_inc(&current->files->count);
@@ -493,7 +493,7 @@ void reset_files_struct(struct task_struct *tsk, struct files_struct *files)
}
EXPORT_SYMBOL(reset_files_struct);
-static inline void __exit_files(struct task_struct *tsk)
+static void __exit_files(struct task_struct *tsk)
{
struct files_struct * files = tsk->files;
@@ -510,7 +510,7 @@ void exit_files(struct task_struct *tsk)
__exit_files(tsk);
}
-static inline void __put_fs_struct(struct fs_struct *fs)
+static void __put_fs_struct(struct fs_struct *fs)
{
/* No need to hold fs->lock if we are killing it */
if (atomic_dec_and_test(&fs->count)) {
@@ -531,7 +531,7 @@ void put_fs_struct(struct fs_struct *fs)
__put_fs_struct(fs);
}
-static inline void __exit_fs(struct task_struct *tsk)
+static void __exit_fs(struct task_struct *tsk)
{
struct fs_struct * fs = tsk->fs;
@@ -592,17 +592,6 @@ static void exit_mm(struct task_struct * tsk)
mmput(mm);
}
-static inline void
-choose_new_parent(struct task_struct *p, struct task_struct *reaper)
-{
- /*
- * Make sure we're not reparenting to ourselves and that
- * the parent is not a zombie.
- */
- BUG_ON(p == reaper || reaper->exit_state);
- p->real_parent = reaper;
-}
-
static void
reparent_thread(struct task_struct *p, struct task_struct *father, int traced)
{
@@ -677,19 +666,22 @@ reparent_thread(struct task_struct *p, struct task_struct *father, int traced)
* the child reaper process (ie "init") in our pid
* space.
*/
-static void
-forget_original_parent(struct task_struct *father, struct list_head *to_release)
+static void forget_original_parent(struct task_struct *father)
{
- struct task_struct *p, *reaper = father;
- struct list_head *_p, *_n;
+ struct task_struct *p, *n, *reaper = father;
+ struct list_head ptrace_dead;
+
+ INIT_LIST_HEAD(&ptrace_dead);
+
+ write_lock_irq(&tasklist_lock);
do {
reaper = next_thread(reaper);
if (reaper == father) {
- reaper = child_reaper(father);
+ reaper = task_child_reaper(father);
break;
}
- } while (reaper->exit_state);
+ } while (reaper->flags & PF_EXITING);
/*
* There are only two places where our children can be:
@@ -699,9 +691,8 @@ forget_original_parent(struct task_struct *father, struct list_head *to_release)
*
* Search them and reparent children.
*/
- list_for_each_safe(_p, _n, &father->children) {
+ list_for_each_entry_safe(p, n, &father->children, sibling) {
int ptrace;
- p = list_entry(_p, struct task_struct, sibling);
ptrace = p->ptrace;
@@ -710,7 +701,7 @@ forget_original_parent(struct task_struct *father, struct list_head *to_release)
if (father == p->real_parent) {
/* reparent with a reaper, real father it's us */
- choose_new_parent(p, reaper);
+ p->real_parent = reaper;
reparent_thread(p, father, 0);
} else {
/* reparent ptraced task to its real parent */
@@ -727,13 +718,23 @@ forget_original_parent(struct task_struct *father, struct list_head *to_release)
* while it was being traced by us, to be able to see it in wait4.
*/
if (unlikely(ptrace && p->exit_state == EXIT_ZOMBIE && p->exit_signal == -1))
- list_add(&p->ptrace_list, to_release);
+ list_add(&p->ptrace_list, &ptrace_dead);
}
- list_for_each_safe(_p, _n, &father->ptrace_children) {
- p = list_entry(_p, struct task_struct, ptrace_list);
- choose_new_parent(p, reaper);
+
+ list_for_each_entry_safe(p, n, &father->ptrace_children, ptrace_list) {
+ p->real_parent = reaper;
reparent_thread(p, father, 1);
}
+
+ write_unlock_irq(&tasklist_lock);
+ BUG_ON(!list_empty(&father->children));
+ BUG_ON(!list_empty(&father->ptrace_children));
+
+ list_for_each_entry_safe(p, n, &ptrace_dead, ptrace_list) {
+ list_del_init(&p->ptrace_list);
+ release_task(p);
+ }
+
}
/*
@@ -744,7 +745,6 @@ static void exit_notify(struct task_struct *tsk)
{
int state;
struct task_struct *t;
- struct list_head ptrace_dead, *_p, *_n;
struct pid *pgrp;
if (signal_pending(tsk) && !(tsk->signal->flags & SIGNAL_GROUP_EXIT)
@@ -758,17 +758,13 @@ static void exit_notify(struct task_struct *tsk)
* Now we'll wake all the threads in the group just to make
* sure someone gets all the pending signals.
*/
- read_lock(&tasklist_lock);
spin_lock_irq(&tsk->sighand->siglock);
for (t = next_thread(tsk); t != tsk; t = next_thread(t))
if (!signal_pending(t) && !(t->flags & PF_EXITING))
recalc_sigpending_and_wake(t);
spin_unlock_irq(&tsk->sighand->siglock);
- read_unlock(&tasklist_lock);
}
- write_lock_irq(&tasklist_lock);
-
/*
* This does two things:
*
@@ -777,12 +773,10 @@ static void exit_notify(struct task_struct *tsk)
* as a result of our exiting, and if they have any stopped
* jobs, send them a SIGHUP and then a SIGCONT. (POSIX 3.2.2.2)
*/
+ forget_original_parent(tsk);
+ exit_task_namespaces(tsk);
- INIT_LIST_HEAD(&ptrace_dead);
- forget_original_parent(tsk, &ptrace_dead);
- BUG_ON(!list_empty(&tsk->children));
- BUG_ON(!list_empty(&tsk->ptrace_children));
-
+ write_lock_irq(&tasklist_lock);
/*
* Check to see if any process groups have become orphaned
* as a result of our exiting, and if they have any stopped
@@ -792,9 +786,8 @@ static void exit_notify(struct task_struct *tsk)
* and we were the only connection outside, so our pgrp
* is about to become orphaned.
*/
-
t = tsk->real_parent;
-
+
pgrp = task_pgrp(tsk);
if ((task_pgrp(t) != pgrp) &&
(task_session(t) == task_session(tsk)) &&
@@ -807,7 +800,7 @@ static void exit_notify(struct task_struct *tsk)
/* Let father know we died
*
* Thread signals are configurable, but you aren't going to use
- * that to send signals to arbitary processes.
+ * that to send signals to arbitary processes.
* That stops right now.
*
* If the parent exec id doesn't match the exec id we saved
@@ -841,13 +834,12 @@ static void exit_notify(struct task_struct *tsk)
state = EXIT_DEAD;
tsk->exit_state = state;
- write_unlock_irq(&tasklist_lock);
+ if (thread_group_leader(tsk) &&
+ tsk->signal->notify_count < 0 &&
+ tsk->signal->group_exit_task)
+ wake_up_process(tsk->signal->group_exit_task);
- list_for_each_safe(_p, _n, &ptrace_dead) {
- list_del_init(_p);
- t = list_entry(_p, struct task_struct, ptrace_list);
- release_task(t);
- }
+ write_unlock_irq(&tasklist_lock);
/* If the process is dead, release it - nobody will wait for it */
if (state == EXIT_DEAD)
@@ -882,6 +874,39 @@ static void check_stack_usage(void)
static inline void check_stack_usage(void) {}
#endif
+static inline void exit_child_reaper(struct task_struct *tsk)
+{
+ if (likely(tsk->group_leader != task_child_reaper(tsk)))
+ return;
+
+ if (tsk->nsproxy->pid_ns == &init_pid_ns)
+ panic("Attempted to kill init!");
+
+ /*
+ * @tsk is the last thread in the 'cgroup-init' and is exiting.
+ * Terminate all remaining processes in the namespace and reap them
+ * before exiting @tsk.
+ *
+ * Note that @tsk (last thread of cgroup-init) may not necessarily
+ * be the child-reaper (i.e main thread of cgroup-init) of the
+ * namespace i.e the child_reaper may have already exited.
+ *
+ * Even after a child_reaper exits, we let it inherit orphaned children,
+ * because, pid_ns->child_reaper remains valid as long as there is
+ * at least one living sub-thread in the cgroup init.
+
+ * This living sub-thread of the cgroup-init will be notified when
+ * a child inherited by the 'child-reaper' exits (do_notify_parent()
+ * uses __group_send_sig_info()). Further, when reaping child processes,
+ * do_wait() iterates over children of all living sub threads.
+
+ * i.e even though 'child_reaper' thread is listed as the parent of the
+ * orphaned children, any living sub-thread in the cgroup-init can
+ * perform the role of the child_reaper.
+ */
+ zap_pid_ns_processes(tsk->nsproxy->pid_ns);
+}
+
fastcall NORET_TYPE void do_exit(long code)
{
struct task_struct *tsk = current;
@@ -895,13 +920,6 @@ fastcall NORET_TYPE void do_exit(long code)
panic("Aiee, killing interrupt handler!");
if (unlikely(!tsk->pid))
panic("Attempted to kill the idle task!");
- if (unlikely(tsk == child_reaper(tsk))) {
- if (tsk->nsproxy->pid_ns != &init_pid_ns)
- tsk->nsproxy->pid_ns->child_reaper = init_pid_ns.child_reaper;
- else
- panic("Attempted to kill init!");
- }
-
if (unlikely(current->ptrace & PT_TRACE_EXIT)) {
current->ptrace_message = code;
@@ -931,17 +949,17 @@ fastcall NORET_TYPE void do_exit(long code)
schedule();
}
+ tsk->flags |= PF_EXITING;
/*
* tsk->flags are checked in the futex code to protect against
* an exiting task cleaning up the robust pi futexes.
*/
- spin_lock_irq(&tsk->pi_lock);
- tsk->flags |= PF_EXITING;
- spin_unlock_irq(&tsk->pi_lock);
+ smp_mb();
+ spin_unlock_wait(&tsk->pi_lock);
if (unlikely(in_atomic()))
printk(KERN_INFO "note: %s[%d] exited with preempt_count %d\n",
- current->comm, current->pid,
+ current->comm, task_pid_nr(current),
preempt_count());
acct_update_integrals(tsk);
@@ -951,16 +969,19 @@ fastcall NORET_TYPE void do_exit(long code)
}
group_dead = atomic_dec_and_test(&tsk->signal->live);
if (group_dead) {
+ exit_child_reaper(tsk);
hrtimer_cancel(&tsk->signal->real_timer);
exit_itimers(tsk->signal);
}
acct_collect(code, group_dead);
+#ifdef CONFIG_FUTEX
if (unlikely(tsk->robust_list))
exit_robust_list(tsk);
-#if defined(CONFIG_FUTEX) && defined(CONFIG_COMPAT)
+#ifdef CONFIG_COMPAT
if (unlikely(tsk->compat_robust_list))
compat_exit_robust_list(tsk);
#endif
+#endif
if (group_dead)
tty_audit_exit();
if (unlikely(tsk->audit_context))
@@ -978,7 +999,7 @@ fastcall NORET_TYPE void do_exit(long code)
__exit_fs(tsk);
check_stack_usage();
exit_thread();
- cpuset_exit(tsk);
+ cgroup_exit(tsk, 1);
exit_keys(tsk);
if (group_dead && tsk->signal->leader)
@@ -989,12 +1010,12 @@ fastcall NORET_TYPE void do_exit(long code)
module_put(tsk->binfmt->module);
proc_exit_connector(tsk);
- exit_task_namespaces(tsk);
exit_notify(tsk);
#ifdef CONFIG_NUMA
mpol_free(tsk->mempolicy);
tsk->mempolicy = NULL;
#endif
+#ifdef CONFIG_FUTEX
/*
* This must happen late, after the PID is not
* hashed anymore:
@@ -1003,6 +1024,7 @@ fastcall NORET_TYPE void do_exit(long code)
exit_pi_state_list(tsk);
if (unlikely(current->pi_state_cache))
kfree(current->pi_state_cache);
+#endif
/*
* Make sure we are holding no locks:
*/
@@ -1090,15 +1112,17 @@ asmlinkage void sys_exit_group(int error_code)
static int eligible_child(pid_t pid, int options, struct task_struct *p)
{
int err;
+ struct pid_namespace *ns;
+ ns = current->nsproxy->pid_ns;
if (pid > 0) {
- if (p->pid != pid)
+ if (task_pid_nr_ns(p, ns) != pid)
return 0;
} else if (!pid) {
- if (process_group(p) != process_group(current))
+ if (task_pgrp_nr_ns(p, ns) != task_pgrp_vnr(current))
return 0;
} else if (pid != -1) {
- if (process_group(p) != -pid)
+ if (task_pgrp_nr_ns(p, ns) != -pid)
return 0;
}
@@ -1167,11 +1191,13 @@ static int wait_task_zombie(struct task_struct *p, int noreap,
int __user *stat_addr, struct rusage __user *ru)
{
unsigned long state;
- int retval;
- int status;
+ int retval, status, traced;
+ struct pid_namespace *ns;
+
+ ns = current->nsproxy->pid_ns;
if (unlikely(noreap)) {
- pid_t pid = p->pid;
+ pid_t pid = task_pid_nr_ns(p, ns);
uid_t uid = p->uid;
int exit_code = p->exit_code;
int why, status;
@@ -1202,15 +1228,11 @@ static int wait_task_zombie(struct task_struct *p, int noreap,
BUG_ON(state != EXIT_DEAD);
return 0;
}
- if (unlikely(p->exit_signal == -1 && p->ptrace == 0)) {
- /*
- * This can only happen in a race with a ptraced thread
- * dying on another processor.
- */
- return 0;
- }
- if (likely(p->real_parent == p->parent) && likely(p->signal)) {
+ /* traced means p->ptrace, but not vice versa */
+ traced = (p->real_parent != p->parent);
+
+ if (likely(!traced)) {
struct signal_struct *psig;
struct signal_struct *sig;
@@ -1242,6 +1264,11 @@ static int wait_task_zombie(struct task_struct *p, int noreap,
cputime_add(p->stime,
cputime_add(sig->stime,
sig->cstime)));
+ psig->cgtime =
+ cputime_add(psig->cgtime,
+ cputime_add(p->gtime,
+ cputime_add(sig->gtime,
+ sig->cgtime)));
psig->cmin_flt +=
p->min_flt + sig->min_flt + sig->cmin_flt;
psig->cmaj_flt +=
@@ -1289,38 +1316,33 @@ static int wait_task_zombie(struct task_struct *p, int noreap,
retval = put_user(status, &infop->si_status);
}
if (!retval && infop)
- retval = put_user(p->pid, &infop->si_pid);
+ retval = put_user(task_pid_nr_ns(p, ns), &infop->si_pid);
if (!retval && infop)
retval = put_user(p->uid, &infop->si_uid);
- if (retval) {
- // TODO: is this safe?
- p->exit_state = EXIT_ZOMBIE;
- return retval;
- }
- retval = p->pid;
- if (p->real_parent != p->parent) {
+ if (!retval)
+ retval = task_pid_nr_ns(p, ns);
+
+ if (traced) {
write_lock_irq(&tasklist_lock);
- /* Double-check with lock held. */
- if (p->real_parent != p->parent) {
- __ptrace_unlink(p);
- // TODO: is this safe?
- p->exit_state = EXIT_ZOMBIE;
- /*
- * If this is not a detached task, notify the parent.
- * If it's still not detached after that, don't release
- * it now.
- */
+ /* We dropped tasklist, ptracer could die and untrace */
+ ptrace_unlink(p);
+ /*
+ * If this is not a detached task, notify the parent.
+ * If it's still not detached after that, don't release
+ * it now.
+ */
+ if (p->exit_signal != -1) {
+ do_notify_parent(p, p->exit_signal);
if (p->exit_signal != -1) {
- do_notify_parent(p, p->exit_signal);
- if (p->exit_signal != -1)
- p = NULL;
+ p->exit_state = EXIT_ZOMBIE;
+ p = NULL;
}
}
write_unlock_irq(&tasklist_lock);
}
if (p != NULL)
release_task(p);
- BUG_ON(!retval);
+
return retval;
}
@@ -1335,11 +1357,12 @@ static int wait_task_stopped(struct task_struct *p, int delayed_group_leader,
int __user *stat_addr, struct rusage __user *ru)
{
int retval, exit_code;
+ struct pid_namespace *ns;
if (!p->exit_code)
return 0;
if (delayed_group_leader && !(p->ptrace & PT_PTRACED) &&
- p->signal && p->signal->group_stop_count > 0)
+ p->signal->group_stop_count > 0)
/*
* A group stop is in progress and this is the group leader.
* We won't report until all threads have stopped.
@@ -1353,11 +1376,12 @@ static int wait_task_stopped(struct task_struct *p, int delayed_group_leader,
* keep holding onto the tasklist_lock while we call getrusage and
* possibly take page faults for user memory.
*/
+ ns = current->nsproxy->pid_ns;
get_task_struct(p);
read_unlock(&tasklist_lock);
if (unlikely(noreap)) {
- pid_t pid = p->pid;
+ pid_t pid = task_pid_nr_ns(p, ns);
uid_t uid = p->uid;
int why = (p->ptrace & PT_PTRACED) ? CLD_TRAPPED : CLD_STOPPED;
@@ -1428,11 +1452,11 @@ bail_ref:
if (!retval && infop)
retval = put_user(exit_code, &infop->si_status);
if (!retval && infop)
- retval = put_user(p->pid, &infop->si_pid);
+ retval = put_user(task_pid_nr_ns(p, ns), &infop->si_pid);
if (!retval && infop)
retval = put_user(p->uid, &infop->si_uid);
if (!retval)
- retval = p->pid;
+ retval = task_pid_nr_ns(p, ns);
put_task_struct(p);
BUG_ON(!retval);
@@ -1452,9 +1476,7 @@ static int wait_task_continued(struct task_struct *p, int noreap,
int retval;
pid_t pid;
uid_t uid;
-
- if (unlikely(!p->signal))
- return 0;
+ struct pid_namespace *ns;
if (!(p->signal->flags & SIGNAL_STOP_CONTINUED))
return 0;
@@ -1469,7 +1491,8 @@ static int wait_task_continued(struct task_struct *p, int noreap,
p->signal->flags &= ~SIGNAL_STOP_CONTINUED;
spin_unlock_irq(&p->sighand->siglock);
- pid = p->pid;
+ ns = current->nsproxy->pid_ns;
+ pid = task_pid_nr_ns(p, ns);
uid = p->uid;
get_task_struct(p);
read_unlock(&tasklist_lock);
@@ -1480,7 +1503,7 @@ static int wait_task_continued(struct task_struct *p, int noreap,
if (!retval && stat_addr)
retval = put_user(0xffff, stat_addr);
if (!retval)
- retval = p->pid;
+ retval = task_pid_nr_ns(p, ns);
} else {
retval = wait_noreap_copyout(p, pid, uid,
CLD_CONTINUED, SIGCONT,
@@ -1529,12 +1552,9 @@ repeat:
tsk = current;
do {
struct task_struct *p;
- struct list_head *_p;
int ret;
- list_for_each(_p,&tsk->children) {
- p = list_entry(_p, struct task_struct, sibling);
-
+ list_for_each_entry(p, &tsk->children, sibling) {
ret = eligible_child(pid, options, p);
if (!ret)
continue;
@@ -1616,9 +1636,8 @@ check_continued:
}
}
if (!flag) {
- list_for_each(_p, &tsk->ptrace_children) {
- p = list_entry(_p, struct task_struct,
- ptrace_list);
+ list_for_each_entry(p, &tsk->ptrace_children,
+ ptrace_list) {
if (!eligible_child(pid, options, p))
continue;
flag = 1;
diff --git a/kernel/fork.c b/kernel/fork.c
index 33f12f48684a..ddafdfac9456 100644
--- a/kernel/fork.c
+++ b/kernel/fork.c
@@ -29,7 +29,7 @@
#include <linux/nsproxy.h>
#include <linux/capability.h>
#include <linux/cpu.h>
-#include <linux/cpuset.h>
+#include <linux/cgroup.h>
#include <linux/security.h>
#include <linux/swap.h>
#include <linux/syscalls.h>
@@ -50,6 +50,7 @@
#include <linux/taskstats_kern.h>
#include <linux/random.h>
#include <linux/tty.h>
+#include <linux/proc_fs.h>
#include <asm/pgtable.h>
#include <asm/pgalloc.h>
@@ -107,6 +108,7 @@ static struct kmem_cache *mm_cachep;
void free_task(struct task_struct *tsk)
{
+ prop_local_destroy_single(&tsk->dirties);
free_thread_info(tsk->stack);
rt_mutex_debug_task_free(tsk);
free_task_struct(tsk);
@@ -115,7 +117,7 @@ EXPORT_SYMBOL(free_task);
void __put_task_struct(struct task_struct *tsk)
{
- WARN_ON(!(tsk->exit_state & (EXIT_DEAD | EXIT_ZOMBIE)));
+ WARN_ON(!tsk->exit_state);
WARN_ON(atomic_read(&tsk->usage));
WARN_ON(tsk == current);
@@ -163,6 +165,7 @@ static struct task_struct *dup_task_struct(struct task_struct *orig)
{
struct task_struct *tsk;
struct thread_info *ti;
+ int err;
prepare_to_copy(orig);
@@ -178,6 +181,14 @@ static struct task_struct *dup_task_struct(struct task_struct *orig)
*tsk = *orig;
tsk->stack = ti;
+
+ err = prop_local_init_single(&tsk->dirties);
+ if (err) {
+ free_thread_info(ti);
+ free_task_struct(tsk);
+ return NULL;
+ }
+
setup_thread_stack(tsk, orig);
#ifdef CONFIG_CC_STACKPROTECTOR
@@ -195,7 +206,7 @@ static struct task_struct *dup_task_struct(struct task_struct *orig)
}
#ifdef CONFIG_MMU
-static inline int dup_mmap(struct mm_struct *mm, struct mm_struct *oldmm)
+static int dup_mmap(struct mm_struct *mm, struct mm_struct *oldmm)
{
struct vm_area_struct *mpnt, *tmp, **pprev;
struct rb_node **rb_link, *rb_parent;
@@ -258,7 +269,7 @@ static inline int dup_mmap(struct mm_struct *mm, struct mm_struct *oldmm)
get_file(file);
if (tmp->vm_flags & VM_DENYWRITE)
atomic_dec(&inode->i_writecount);
-
+
/* insert tmp into the share list, just after mpnt */
spin_lock(&file->f_mapping->i_mmap_lock);
tmp->vm_truncate_count = mpnt->vm_truncate_count;
@@ -321,7 +332,7 @@ static inline void mm_free_pgd(struct mm_struct * mm)
#define mm_free_pgd(mm)
#endif /* CONFIG_MMU */
- __cacheline_aligned_in_smp DEFINE_SPINLOCK(mmlist_lock);
+__cacheline_aligned_in_smp DEFINE_SPINLOCK(mmlist_lock);
#define allocate_mm() (kmem_cache_alloc(mm_cachep, GFP_KERNEL))
#define free_mm(mm) (kmem_cache_free(mm_cachep, (mm)))
@@ -573,7 +584,7 @@ fail_nomem:
return retval;
}
-static inline struct fs_struct *__copy_fs_struct(struct fs_struct *old)
+static struct fs_struct *__copy_fs_struct(struct fs_struct *old)
{
struct fs_struct *fs = kmem_cache_alloc(fs_cachep, GFP_KERNEL);
/* We don't need to lock fs - think why ;-) */
@@ -605,7 +616,7 @@ struct fs_struct *copy_fs_struct(struct fs_struct *old)
EXPORT_SYMBOL_GPL(copy_fs_struct);
-static inline int copy_fs(unsigned long clone_flags, struct task_struct * tsk)
+static int copy_fs(unsigned long clone_flags, struct task_struct *tsk)
{
if (clone_flags & CLONE_FS) {
atomic_inc(&current->fs->count);
@@ -728,8 +739,8 @@ static struct files_struct *dup_fd(struct files_struct *oldf, int *errorp)
/* compute the remainder to be cleared */
size = (new_fdt->max_fds - open_files) * sizeof(struct file *);
- /* This is long word aligned thus could use a optimized version */
- memset(new_fds, 0, size);
+ /* This is long word aligned thus could use a optimized version */
+ memset(new_fds, 0, size);
if (new_fdt->max_fds > open_files) {
int left = (new_fdt->max_fds-open_files)/8;
@@ -808,7 +819,7 @@ int unshare_files(void)
EXPORT_SYMBOL(unshare_files);
-static inline int copy_sighand(unsigned long clone_flags, struct task_struct * tsk)
+static int copy_sighand(unsigned long clone_flags, struct task_struct *tsk)
{
struct sighand_struct *sig;
@@ -831,7 +842,7 @@ void __cleanup_sighand(struct sighand_struct *sighand)
kmem_cache_free(sighand_cachep, sighand);
}
-static inline int copy_signal(unsigned long clone_flags, struct task_struct * tsk)
+static int copy_signal(unsigned long clone_flags, struct task_struct *tsk)
{
struct signal_struct *sig;
int ret;
@@ -877,6 +888,8 @@ static inline int copy_signal(unsigned long clone_flags, struct task_struct * ts
sig->tty_old_pgrp = NULL;
sig->utime = sig->stime = sig->cutime = sig->cstime = cputime_zero;
+ sig->gtime = cputime_zero;
+ sig->cgtime = cputime_zero;
sig->nvcsw = sig->nivcsw = sig->cnvcsw = sig->cnivcsw = 0;
sig->min_flt = sig->maj_flt = sig->cmin_flt = sig->cmaj_flt = 0;
sig->inblock = sig->oublock = sig->cinblock = sig->coublock = 0;
@@ -911,7 +924,7 @@ void __cleanup_signal(struct signal_struct *sig)
kmem_cache_free(signal_cachep, sig);
}
-static inline void cleanup_signal(struct task_struct *tsk)
+static void cleanup_signal(struct task_struct *tsk)
{
struct signal_struct *sig = tsk->signal;
@@ -921,7 +934,7 @@ static inline void cleanup_signal(struct task_struct *tsk)
__cleanup_signal(sig);
}
-static inline void copy_flags(unsigned long clone_flags, struct task_struct *p)
+static void copy_flags(unsigned long clone_flags, struct task_struct *p)
{
unsigned long new_flags = p->flags;
@@ -930,16 +943,17 @@ static inline void copy_flags(unsigned long clone_flags, struct task_struct *p)
if (!(clone_flags & CLONE_PTRACE))
p->ptrace = 0;
p->flags = new_flags;
+ clear_freeze_flag(p);
}
asmlinkage long sys_set_tid_address(int __user *tidptr)
{
current->clear_child_tid = tidptr;
- return current->pid;
+ return task_pid_vnr(current);
}
-static inline void rt_mutex_init_task(struct task_struct *p)
+static void rt_mutex_init_task(struct task_struct *p)
{
spin_lock_init(&p->pi_lock);
#ifdef CONFIG_RT_MUTEXES
@@ -960,12 +974,12 @@ static struct task_struct *copy_process(unsigned long clone_flags,
unsigned long stack_start,
struct pt_regs *regs,
unsigned long stack_size,
- int __user *parent_tidptr,
int __user *child_tidptr,
struct pid *pid)
{
int retval;
- struct task_struct *p = NULL;
+ struct task_struct *p;
+ int cgroup_callbacks_done = 0;
if ((clone_flags & (CLONE_NEWNS|CLONE_FS)) == (CLONE_NEWNS|CLONE_FS))
return ERR_PTR(-EINVAL);
@@ -1029,12 +1043,6 @@ static struct task_struct *copy_process(unsigned long clone_flags,
p->did_exec = 0;
delayacct_tsk_init(p); /* Must remain after dup_task_struct() */
copy_flags(clone_flags, p);
- p->pid = pid_nr(pid);
- retval = -EFAULT;
- if (clone_flags & CLONE_PARENT_SETTID)
- if (put_user(p->pid, parent_tidptr))
- goto bad_fork_cleanup_delays_binfmt;
-
INIT_LIST_HEAD(&p->children);
INIT_LIST_HEAD(&p->sibling);
p->vfork_done = NULL;
@@ -1045,6 +1053,9 @@ static struct task_struct *copy_process(unsigned long clone_flags,
p->utime = cputime_zero;
p->stime = cputime_zero;
+ p->gtime = cputime_zero;
+ p->utimescaled = cputime_zero;
+ p->stimescaled = cputime_zero;
#ifdef CONFIG_TASK_XACCT
p->rchar = 0; /* I/O counter: bytes read */
@@ -1055,28 +1066,29 @@ static struct task_struct *copy_process(unsigned long clone_flags,
task_io_accounting_init(p);
acct_clear_integrals(p);
- p->it_virt_expires = cputime_zero;
+ p->it_virt_expires = cputime_zero;
p->it_prof_expires = cputime_zero;
- p->it_sched_expires = 0;
- INIT_LIST_HEAD(&p->cpu_timers[0]);
- INIT_LIST_HEAD(&p->cpu_timers[1]);
- INIT_LIST_HEAD(&p->cpu_timers[2]);
+ p->it_sched_expires = 0;
+ INIT_LIST_HEAD(&p->cpu_timers[0]);
+ INIT_LIST_HEAD(&p->cpu_timers[1]);
+ INIT_LIST_HEAD(&p->cpu_timers[2]);
p->lock_depth = -1; /* -1 = no lock */
do_posix_clock_monotonic_gettime(&p->start_time);
p->real_start_time = p->start_time;
monotonic_to_bootbased(&p->real_start_time);
+#ifdef CONFIG_SECURITY
p->security = NULL;
+#endif
p->io_context = NULL;
- p->io_wait = NULL;
p->audit_context = NULL;
- cpuset_fork(p);
+ cgroup_fork(p);
#ifdef CONFIG_NUMA
p->mempolicy = mpol_copy(p->mempolicy);
if (IS_ERR(p->mempolicy)) {
retval = PTR_ERR(p->mempolicy);
p->mempolicy = NULL;
- goto bad_fork_cleanup_cpuset;
+ goto bad_fork_cleanup_cgroup;
}
mpol_fix_fork_child_flag(p);
#endif
@@ -1109,10 +1121,6 @@ static struct task_struct *copy_process(unsigned long clone_flags,
p->blocked_on = NULL; /* not blocked yet */
#endif
- p->tgid = p->pid;
- if (clone_flags & CLONE_THREAD)
- p->tgid = current->tgid;
-
if ((retval = security_task_alloc(p)))
goto bad_fork_cleanup_policy;
if ((retval = audit_alloc(p)))
@@ -1138,18 +1146,37 @@ static struct task_struct *copy_process(unsigned long clone_flags,
if (retval)
goto bad_fork_cleanup_namespaces;
+ if (pid != &init_struct_pid) {
+ retval = -ENOMEM;
+ pid = alloc_pid(task_active_pid_ns(p));
+ if (!pid)
+ goto bad_fork_cleanup_namespaces;
+
+ if (clone_flags & CLONE_NEWPID) {
+ retval = pid_ns_prepare_proc(task_active_pid_ns(p));
+ if (retval < 0)
+ goto bad_fork_free_pid;
+ }
+ }
+
+ p->pid = pid_nr(pid);
+ p->tgid = p->pid;
+ if (clone_flags & CLONE_THREAD)
+ p->tgid = current->tgid;
+
p->set_child_tid = (clone_flags & CLONE_CHILD_SETTID) ? child_tidptr : NULL;
/*
* Clear TID on mm_release()?
*/
p->clear_child_tid = (clone_flags & CLONE_CHILD_CLEARTID) ? child_tidptr: NULL;
+#ifdef CONFIG_FUTEX
p->robust_list = NULL;
#ifdef CONFIG_COMPAT
p->compat_robust_list = NULL;
#endif
INIT_LIST_HEAD(&p->pi_state_list);
p->pi_state_cache = NULL;
-
+#endif
/*
* sigaltstack should be cleared when sharing the same VM
*/
@@ -1186,6 +1213,12 @@ static struct task_struct *copy_process(unsigned long clone_flags,
/* Perform scheduler related setup. Assign this task to a CPU. */
sched_fork(p, clone_flags);
+ /* Now that the task is set up, run cgroup callbacks if
+ * necessary. We need to run them before the task is visible
+ * on the tasklist. */
+ cgroup_fork_callbacks(p);
+ cgroup_callbacks_done = 1;
+
/* Need tasklist lock for parent etc handling! */
write_lock_irq(&tasklist_lock);
@@ -1223,12 +1256,12 @@ static struct task_struct *copy_process(unsigned long clone_flags,
* A fatal signal pending means that current will exit, so the new
* thread can't slip out of an OOM kill (or normal SIGKILL).
*/
- recalc_sigpending();
+ recalc_sigpending();
if (signal_pending(current)) {
spin_unlock(&current->sighand->siglock);
write_unlock_irq(&tasklist_lock);
retval = -ERESTARTNOINTR;
- goto bad_fork_cleanup_namespaces;
+ goto bad_fork_free_pid;
}
if (clone_flags & CLONE_THREAD) {
@@ -1257,11 +1290,22 @@ static struct task_struct *copy_process(unsigned long clone_flags,
__ptrace_link(p, current->parent);
if (thread_group_leader(p)) {
- p->signal->tty = current->signal->tty;
- p->signal->pgrp = process_group(current);
- set_signal_session(p->signal, process_session(current));
- attach_pid(p, PIDTYPE_PGID, task_pgrp(current));
- attach_pid(p, PIDTYPE_SID, task_session(current));
+ if (clone_flags & CLONE_NEWPID) {
+ p->nsproxy->pid_ns->child_reaper = p;
+ p->signal->tty = NULL;
+ set_task_pgrp(p, p->pid);
+ set_task_session(p, p->pid);
+ attach_pid(p, PIDTYPE_PGID, pid);
+ attach_pid(p, PIDTYPE_SID, pid);
+ } else {
+ p->signal->tty = current->signal->tty;
+ set_task_pgrp(p, task_pgrp_nr(current));
+ set_task_session(p, task_session_nr(current));
+ attach_pid(p, PIDTYPE_PGID,
+ task_pgrp(current));
+ attach_pid(p, PIDTYPE_SID,
+ task_session(current));
+ }
list_add_tail_rcu(&p->tasks, &init_task.tasks);
__get_cpu_var(process_counts)++;
@@ -1274,8 +1318,12 @@ static struct task_struct *copy_process(unsigned long clone_flags,
spin_unlock(&current->sighand->siglock);
write_unlock_irq(&tasklist_lock);
proc_fork_connector(p);
+ cgroup_post_fork(p);
return p;
+bad_fork_free_pid:
+ if (pid != &init_struct_pid)
+ free_pid(pid);
bad_fork_cleanup_namespaces:
exit_task_namespaces(p);
bad_fork_cleanup_keys:
@@ -1300,10 +1348,9 @@ bad_fork_cleanup_security:
bad_fork_cleanup_policy:
#ifdef CONFIG_NUMA
mpol_free(p->mempolicy);
-bad_fork_cleanup_cpuset:
+bad_fork_cleanup_cgroup:
#endif
- cpuset_exit(p);
-bad_fork_cleanup_delays_binfmt:
+ cgroup_exit(p, cgroup_callbacks_done);
delayacct_tsk_free(p);
if (p->binfmt)
module_put(p->binfmt->module);
@@ -1330,7 +1377,7 @@ struct task_struct * __cpuinit fork_idle(int cpu)
struct task_struct *task;
struct pt_regs regs;
- task = copy_process(CLONE_VM, 0, idle_regs(&regs), 0, NULL, NULL,
+ task = copy_process(CLONE_VM, 0, idle_regs(&regs), 0, NULL,
&init_struct_pid);
if (!IS_ERR(task))
init_idle(task, cpu);
@@ -1338,7 +1385,7 @@ struct task_struct * __cpuinit fork_idle(int cpu)
return task;
}
-static inline int fork_traceflag (unsigned clone_flags)
+static int fork_traceflag(unsigned clone_flags)
{
if (clone_flags & CLONE_UNTRACED)
return 0;
@@ -1369,19 +1416,16 @@ long do_fork(unsigned long clone_flags,
{
struct task_struct *p;
int trace = 0;
- struct pid *pid = alloc_pid();
long nr;
- if (!pid)
- return -EAGAIN;
- nr = pid->nr;
if (unlikely(current->ptrace)) {
trace = fork_traceflag (clone_flags);
if (trace)
clone_flags |= CLONE_PTRACE;
}
- p = copy_process(clone_flags, stack_start, regs, stack_size, parent_tidptr, child_tidptr, pid);
+ p = copy_process(clone_flags, stack_start, regs, stack_size,
+ child_tidptr, NULL);
/*
* Do this prior waking up the new thread - the thread pointer
* might get invalid after that point, if the thread exits quickly.
@@ -1389,6 +1433,17 @@ long do_fork(unsigned long clone_flags,
if (!IS_ERR(p)) {
struct completion vfork;
+ /*
+ * this is enough to call pid_nr_ns here, but this if
+ * improves optimisation of regular fork()
+ */
+ nr = (clone_flags & CLONE_NEWPID) ?
+ task_pid_nr_ns(p, current->nsproxy->pid_ns) :
+ task_pid_vnr(p);
+
+ if (clone_flags & CLONE_PARENT_SETTID)
+ put_user(nr, parent_tidptr);
+
if (clone_flags & CLONE_VFORK) {
p->vfork_done = &vfork;
init_completion(&vfork);
@@ -1422,7 +1477,6 @@ long do_fork(unsigned long clone_flags,
}
}
} else {
- free_pid(pid);
nr = PTR_ERR(p);
}
return nr;
@@ -1432,8 +1486,7 @@ long do_fork(unsigned long clone_flags,
#define ARCH_MIN_MMSTRUCT_ALIGN 0
#endif
-static void sighand_ctor(void *data, struct kmem_cache *cachep,
- unsigned long flags)
+static void sighand_ctor(struct kmem_cache *cachep, void *data)
{
struct sighand_struct *sighand = data;
@@ -1468,7 +1521,7 @@ void __init proc_caches_init(void)
* Check constraints on flags passed to the unshare system call and
* force unsharing of additional process context as appropriate.
*/
-static inline void check_unshare_flags(unsigned long *flags_ptr)
+static void check_unshare_flags(unsigned long *flags_ptr)
{
/*
* If unsharing a thread from a thread group, must also
@@ -1600,7 +1653,7 @@ asmlinkage long sys_unshare(unsigned long unshare_flags)
struct mm_struct *mm, *new_mm = NULL, *active_mm = NULL;
struct files_struct *fd, *new_fd = NULL;
struct sem_undo_list *new_ulist = NULL;
- struct nsproxy *new_nsproxy = NULL, *old_nsproxy = NULL;
+ struct nsproxy *new_nsproxy = NULL;
check_unshare_flags(&unshare_flags);
@@ -1608,7 +1661,8 @@ asmlinkage long sys_unshare(unsigned long unshare_flags)
err = -EINVAL;
if (unshare_flags & ~(CLONE_THREAD|CLONE_FS|CLONE_NEWNS|CLONE_SIGHAND|
CLONE_VM|CLONE_FILES|CLONE_SYSVSEM|
- CLONE_NEWUTS|CLONE_NEWIPC|CLONE_NEWUSER))
+ CLONE_NEWUTS|CLONE_NEWIPC|CLONE_NEWUSER|
+ CLONE_NEWNET))
goto bad_unshare_out;
if ((err = unshare_thread(unshare_flags)))
@@ -1629,14 +1683,13 @@ asmlinkage long sys_unshare(unsigned long unshare_flags)
if (new_fs || new_mm || new_fd || new_ulist || new_nsproxy) {
- task_lock(current);
-
if (new_nsproxy) {
- old_nsproxy = current->nsproxy;
- current->nsproxy = new_nsproxy;
- new_nsproxy = old_nsproxy;
+ switch_task_namespaces(current, new_nsproxy);
+ new_nsproxy = NULL;
}
+ task_lock(current);
+
if (new_fs) {
fs = current->fs;
current->fs = new_fs;
diff --git a/kernel/futex.c b/kernel/futex.c
index fcc94e7b4086..32710451dc20 100644
--- a/kernel/futex.c
+++ b/kernel/futex.c
@@ -52,6 +52,10 @@
#include <linux/syscalls.h>
#include <linux/signal.h>
#include <linux/module.h>
+#include <linux/magic.h>
+#include <linux/pid.h>
+#include <linux/nsproxy.h>
+
#include <asm/futex.h>
#include "rtmutex_common.h"
@@ -292,7 +296,7 @@ EXPORT_SYMBOL_GPL(get_futex_key_refs);
*/
void drop_futex_key_refs(union futex_key *key)
{
- if (key->both.ptr == 0)
+ if (!key->both.ptr)
return;
switch (key->both.offset & (FUT_OFF_INODE|FUT_OFF_MMSHARED)) {
case FUT_OFF_INODE:
@@ -442,8 +446,7 @@ static struct task_struct * futex_find_get_task(pid_t pid)
struct task_struct *p;
rcu_read_lock();
- p = find_task_by_pid(pid);
-
+ p = find_task_by_vpid(pid);
if (!p || ((current->euid != p->euid) && (current->euid != p->uid)))
p = ERR_PTR(-ESRCH);
else
@@ -652,7 +655,7 @@ static int wake_futex_pi(u32 __user *uaddr, u32 uval, struct futex_q *this)
if (!(uval & FUTEX_OWNER_DIED)) {
int ret = 0;
- newval = FUTEX_WAITERS | new_owner->pid;
+ newval = FUTEX_WAITERS | task_pid_vnr(new_owner);
curval = cmpxchg_futex_value_locked(uaddr, uval, newval);
@@ -1045,7 +1048,7 @@ static int unqueue_me(struct futex_q *q)
retry:
lock_ptr = q->lock_ptr;
barrier();
- if (lock_ptr != 0) {
+ if (lock_ptr != NULL) {
spin_lock(lock_ptr);
/*
* q->lock_ptr can change between reading it and
@@ -1105,7 +1108,7 @@ static void unqueue_me_pi(struct futex_q *q)
static int fixup_pi_state_owner(u32 __user *uaddr, struct futex_q *q,
struct task_struct *curr)
{
- u32 newtid = curr->pid | FUTEX_WAITERS;
+ u32 newtid = task_pid_vnr(curr) | FUTEX_WAITERS;
struct futex_pi_state *pi_state = q->pi_state;
u32 uval, curval, newval;
int ret;
@@ -1367,7 +1370,7 @@ static int futex_lock_pi(u32 __user *uaddr, struct rw_semaphore *fshared,
* (by doing a 0 -> TID atomic cmpxchg), while holding all
* the locks. It will most likely not succeed.
*/
- newval = current->pid;
+ newval = task_pid_vnr(current);
curval = cmpxchg_futex_value_locked(uaddr, 0, newval);
@@ -1378,7 +1381,7 @@ static int futex_lock_pi(u32 __user *uaddr, struct rw_semaphore *fshared,
* Detect deadlocks. In case of REQUEUE_PI this is a valid
* situation and we return success to user space.
*/
- if (unlikely((curval & FUTEX_TID_MASK) == current->pid)) {
+ if (unlikely((curval & FUTEX_TID_MASK) == task_pid_vnr(current))) {
ret = -EDEADLK;
goto out_unlock_release_sem;
}
@@ -1407,7 +1410,7 @@ static int futex_lock_pi(u32 __user *uaddr, struct rw_semaphore *fshared,
*/
if (unlikely(ownerdied || !(curval & FUTEX_TID_MASK))) {
/* Keep the OWNER_DIED bit */
- newval = (curval & ~FUTEX_TID_MASK) | current->pid;
+ newval = (curval & ~FUTEX_TID_MASK) | task_pid_vnr(current);
ownerdied = 0;
lock_taken = 1;
}
@@ -1586,7 +1589,7 @@ retry:
/*
* We release only a lock we actually own:
*/
- if ((uval & FUTEX_TID_MASK) != current->pid)
+ if ((uval & FUTEX_TID_MASK) != task_pid_vnr(current))
return -EPERM;
/*
* First take all the futex related locks:
@@ -1607,7 +1610,7 @@ retry_unlocked:
* anyone else up:
*/
if (!(uval & FUTEX_OWNER_DIED))
- uval = cmpxchg_futex_value_locked(uaddr, current->pid, 0);
+ uval = cmpxchg_futex_value_locked(uaddr, task_pid_vnr(current), 0);
if (unlikely(uval == -EFAULT))
@@ -1616,7 +1619,7 @@ retry_unlocked:
* Rare case: we managed to release the lock atomically,
* no need to wake anyone else up:
*/
- if (unlikely(uval == current->pid))
+ if (unlikely(uval == task_pid_vnr(current)))
goto out_unlock;
/*
@@ -1853,7 +1856,7 @@ sys_get_robust_list(int pid, struct robust_list_head __user * __user *head_ptr,
ret = -ESRCH;
rcu_read_lock();
- p = find_task_by_pid(pid);
+ p = find_task_by_vpid(pid);
if (!p)
goto err_unlock;
ret = -EPERM;
@@ -1886,7 +1889,7 @@ retry:
if (get_user(uval, uaddr))
return -1;
- if ((uval & FUTEX_TID_MASK) == curr->pid) {
+ if ((uval & FUTEX_TID_MASK) == task_pid_vnr(curr)) {
/*
* Ok, this dying thread is truly holding a futex
* of interest. Set the OWNER_DIED bit atomically
@@ -2080,7 +2083,7 @@ static int futexfs_get_sb(struct file_system_type *fs_type,
int flags, const char *dev_name, void *data,
struct vfsmount *mnt)
{
- return get_sb_pseudo(fs_type, "futex", NULL, 0xBAD1DEA, mnt);
+ return get_sb_pseudo(fs_type, "futex", NULL, FUTEXFS_SUPER_MAGIC, mnt);
}
static struct file_system_type futex_fs_type = {
diff --git a/kernel/futex_compat.c b/kernel/futex_compat.c
index 2c2e2954b713..00b572666cc7 100644
--- a/kernel/futex_compat.c
+++ b/kernel/futex_compat.c
@@ -8,6 +8,7 @@
#include <linux/linkage.h>
#include <linux/compat.h>
+#include <linux/nsproxy.h>
#include <linux/futex.h>
#include <asm/uaccess.h>
@@ -124,7 +125,7 @@ compat_sys_get_robust_list(int pid, compat_uptr_t __user *head_ptr,
ret = -ESRCH;
read_lock(&tasklist_lock);
- p = find_task_by_pid(pid);
+ p = find_task_by_vpid(pid);
if (!p)
goto err_unlock;
ret = -EPERM;
diff --git a/kernel/hrtimer.c b/kernel/hrtimer.c
index c21ca6bfaa66..b2b2c2b0a49b 100644
--- a/kernel/hrtimer.c
+++ b/kernel/hrtimer.c
@@ -277,6 +277,30 @@ ktime_t ktime_add_ns(const ktime_t kt, u64 nsec)
}
EXPORT_SYMBOL_GPL(ktime_add_ns);
+
+/**
+ * ktime_sub_ns - Subtract a scalar nanoseconds value from a ktime_t variable
+ * @kt: minuend
+ * @nsec: the scalar nsec value to subtract
+ *
+ * Returns the subtraction of @nsec from @kt in ktime_t format
+ */
+ktime_t ktime_sub_ns(const ktime_t kt, u64 nsec)
+{
+ ktime_t tmp;
+
+ if (likely(nsec < NSEC_PER_SEC)) {
+ tmp.tv64 = nsec;
+ } else {
+ unsigned long rem = do_div(nsec, NSEC_PER_SEC);
+
+ tmp = ktime_set((long)nsec, rem);
+ }
+
+ return ktime_sub(kt, tmp);
+}
+
+EXPORT_SYMBOL_GPL(ktime_sub_ns);
# endif /* !CONFIG_KTIME_SCALAR */
/*
@@ -1262,8 +1286,7 @@ static int __sched do_nanosleep(struct hrtimer_sleeper *t, enum hrtimer_mode mod
long __sched hrtimer_nanosleep_restart(struct restart_block *restart)
{
struct hrtimer_sleeper t;
- struct timespec __user *rmtp;
- struct timespec tu;
+ struct timespec *rmtp;
ktime_t time;
restart->fn = do_no_restart_syscall;
@@ -1274,14 +1297,12 @@ long __sched hrtimer_nanosleep_restart(struct restart_block *restart)
if (do_nanosleep(&t, HRTIMER_MODE_ABS))
return 0;
- rmtp = (struct timespec __user *) restart->arg1;
+ rmtp = (struct timespec *)restart->arg1;
if (rmtp) {
time = ktime_sub(t.timer.expires, t.timer.base->get_time());
if (time.tv64 <= 0)
return 0;
- tu = ktime_to_timespec(time);
- if (copy_to_user(rmtp, &tu, sizeof(tu)))
- return -EFAULT;
+ *rmtp = ktime_to_timespec(time);
}
restart->fn = hrtimer_nanosleep_restart;
@@ -1290,12 +1311,11 @@ long __sched hrtimer_nanosleep_restart(struct restart_block *restart)
return -ERESTART_RESTARTBLOCK;
}
-long hrtimer_nanosleep(struct timespec *rqtp, struct timespec __user *rmtp,
+long hrtimer_nanosleep(struct timespec *rqtp, struct timespec *rmtp,
const enum hrtimer_mode mode, const clockid_t clockid)
{
struct restart_block *restart;
struct hrtimer_sleeper t;
- struct timespec tu;
ktime_t rem;
hrtimer_init(&t.timer, clockid, mode);
@@ -1311,9 +1331,7 @@ long hrtimer_nanosleep(struct timespec *rqtp, struct timespec __user *rmtp,
rem = ktime_sub(t.timer.expires, t.timer.base->get_time());
if (rem.tv64 <= 0)
return 0;
- tu = ktime_to_timespec(rem);
- if (copy_to_user(rmtp, &tu, sizeof(tu)))
- return -EFAULT;
+ *rmtp = ktime_to_timespec(rem);
}
restart = &current_thread_info()->restart_block;
@@ -1329,7 +1347,8 @@ long hrtimer_nanosleep(struct timespec *rqtp, struct timespec __user *rmtp,
asmlinkage long
sys_nanosleep(struct timespec __user *rqtp, struct timespec __user *rmtp)
{
- struct timespec tu;
+ struct timespec tu, rmt;
+ int ret;
if (copy_from_user(&tu, rqtp, sizeof(tu)))
return -EFAULT;
@@ -1337,7 +1356,15 @@ sys_nanosleep(struct timespec __user *rqtp, struct timespec __user *rmtp)
if (!timespec_valid(&tu))
return -EINVAL;
- return hrtimer_nanosleep(&tu, rmtp, HRTIMER_MODE_REL, CLOCK_MONOTONIC);
+ ret = hrtimer_nanosleep(&tu, rmtp ? &rmt : NULL, HRTIMER_MODE_REL,
+ CLOCK_MONOTONIC);
+
+ if (ret && rmtp) {
+ if (copy_to_user(rmtp, &rmt, sizeof(*rmtp)))
+ return -EFAULT;
+ }
+
+ return ret;
}
/*
diff --git a/kernel/irq/chip.c b/kernel/irq/chip.c
index f1a73f0b54e7..9b5dff6b3f6a 100644
--- a/kernel/irq/chip.c
+++ b/kernel/irq/chip.c
@@ -503,7 +503,6 @@ out_unlock:
spin_unlock(&desc->lock);
}
-#ifdef CONFIG_SMP
/**
* handle_percpu_IRQ - Per CPU local irq handler
* @irq: the interrupt number
@@ -529,8 +528,6 @@ handle_percpu_irq(unsigned int irq, struct irq_desc *desc)
desc->chip->eoi(irq);
}
-#endif /* CONFIG_SMP */
-
void
__set_irq_handler(unsigned int irq, irq_flow_handler_t handle, int is_chained,
const char *name)
diff --git a/kernel/irq/manage.c b/kernel/irq/manage.c
index 7230d914eaa2..80eab7a04205 100644
--- a/kernel/irq/manage.c
+++ b/kernel/irq/manage.c
@@ -405,7 +405,6 @@ void free_irq(unsigned int irq, void *dev_id)
struct irq_desc *desc;
struct irqaction **p;
unsigned long flags;
- irqreturn_t (*handler)(int, void *) = NULL;
WARN_ON(in_interrupt());
if (irq >= NR_IRQS)
@@ -445,8 +444,21 @@ void free_irq(unsigned int irq, void *dev_id)
/* Make sure it's not being used on another CPU */
synchronize_irq(irq);
- if (action->flags & IRQF_SHARED)
- handler = action->handler;
+#ifdef CONFIG_DEBUG_SHIRQ
+ /*
+ * It's a shared IRQ -- the driver ought to be
+ * prepared for it to happen even now it's
+ * being freed, so let's make sure.... We do
+ * this after actually deregistering it, to
+ * make sure that a 'real' IRQ doesn't run in
+ * parallel with our fake
+ */
+ if (action->flags & IRQF_SHARED) {
+ local_irq_save(flags);
+ action->handler(irq, dev_id);
+ local_irq_restore(flags);
+ }
+#endif
kfree(action);
return;
}
@@ -454,19 +466,6 @@ void free_irq(unsigned int irq, void *dev_id)
spin_unlock_irqrestore(&desc->lock, flags);
return;
}
-#ifdef CONFIG_DEBUG_SHIRQ
- if (handler) {
- /*
- * It's a shared IRQ -- the driver ought to be prepared for it
- * to happen even now it's being freed, so let's make sure....
- * We do this after actually deregistering it, to make sure that
- * a 'real' IRQ doesn't run in parallel with our fake
- */
- local_irq_save(flags);
- handler(irq, dev_id);
- local_irq_restore(flags);
- }
-#endif
}
EXPORT_SYMBOL(free_irq);
diff --git a/kernel/itimer.c b/kernel/itimer.c
index 3205e8e114fa..2fab344dbf56 100644
--- a/kernel/itimer.c
+++ b/kernel/itimer.c
@@ -130,7 +130,7 @@ asmlinkage long sys_getitimer(int which, struct itimerval __user *value)
enum hrtimer_restart it_real_fn(struct hrtimer *timer)
{
struct signal_struct *sig =
- container_of(timer, struct signal_struct, real_timer);
+ container_of(timer, struct signal_struct, real_timer);
send_group_sig_info(SIGALRM, SEND_SIG_PRIV, sig->tsk);
@@ -291,6 +291,6 @@ asmlinkage long sys_setitimer(int which,
return error;
if (copy_to_user(ovalue, &get_buffer, sizeof(get_buffer)))
- return -EFAULT;
+ return -EFAULT;
return 0;
}
diff --git a/kernel/kexec.c b/kernel/kexec.c
index 25db14b89e82..aa74a1ef2da8 100644
--- a/kernel/kexec.c
+++ b/kernel/kexec.c
@@ -17,21 +17,30 @@
#include <linux/highmem.h>
#include <linux/syscalls.h>
#include <linux/reboot.h>
-#include <linux/syscalls.h>
#include <linux/ioport.h>
#include <linux/hardirq.h>
#include <linux/elf.h>
#include <linux/elfcore.h>
+#include <linux/utsrelease.h>
+#include <linux/utsname.h>
+#include <linux/numa.h>
#include <asm/page.h>
#include <asm/uaccess.h>
#include <asm/io.h>
#include <asm/system.h>
#include <asm/semaphore.h>
+#include <asm/sections.h>
/* Per cpu memory for storing cpu states in case of system crash. */
note_buf_t* crash_notes;
+/* vmcoreinfo stuff */
+unsigned char vmcoreinfo_data[VMCOREINFO_BYTES];
+u32 vmcoreinfo_note[VMCOREINFO_NOTE_SIZE/4];
+size_t vmcoreinfo_size;
+size_t vmcoreinfo_max_size = sizeof(vmcoreinfo_data);
+
/* Location of the reserved area for the crash kernel */
struct resource crashk_res = {
.name = "Crash kernel",
@@ -42,7 +51,7 @@ struct resource crashk_res = {
int kexec_should_crash(struct task_struct *p)
{
- if (in_interrupt() || !p->pid || is_init(p) || panic_on_oops)
+ if (in_interrupt() || !p->pid || is_global_init(p) || panic_on_oops)
return 1;
return 0;
}
@@ -776,7 +785,7 @@ static int kimage_load_normal_segment(struct kimage *image,
size_t uchunk, mchunk;
page = kimage_alloc_page(image, GFP_HIGHUSER, maddr);
- if (page == 0) {
+ if (!page) {
result = -ENOMEM;
goto out;
}
@@ -835,7 +844,7 @@ static int kimage_load_crash_segment(struct kimage *image,
size_t uchunk, mchunk;
page = pfn_to_page(maddr >> PAGE_SHIFT);
- if (page == 0) {
+ if (!page) {
result = -ENOMEM;
goto out;
}
@@ -1061,6 +1070,7 @@ void crash_kexec(struct pt_regs *regs)
if (kexec_crash_image) {
struct pt_regs fixed_regs;
crash_setup_regs(&fixed_regs, regs);
+ crash_save_vmcoreinfo();
machine_crash_shutdown(&fixed_regs);
machine_kexec(kexec_crash_image);
}
@@ -1135,3 +1145,270 @@ static int __init crash_notes_memory_init(void)
return 0;
}
module_init(crash_notes_memory_init)
+
+
+/*
+ * parsing the "crashkernel" commandline
+ *
+ * this code is intended to be called from architecture specific code
+ */
+
+
+/*
+ * This function parses command lines in the format
+ *
+ * crashkernel=ramsize-range:size[,...][@offset]
+ *
+ * The function returns 0 on success and -EINVAL on failure.
+ */
+static int __init parse_crashkernel_mem(char *cmdline,
+ unsigned long long system_ram,
+ unsigned long long *crash_size,
+ unsigned long long *crash_base)
+{
+ char *cur = cmdline, *tmp;
+
+ /* for each entry of the comma-separated list */
+ do {
+ unsigned long long start, end = ULLONG_MAX, size;
+
+ /* get the start of the range */
+ start = memparse(cur, &tmp);
+ if (cur == tmp) {
+ pr_warning("crashkernel: Memory value expected\n");
+ return -EINVAL;
+ }
+ cur = tmp;
+ if (*cur != '-') {
+ pr_warning("crashkernel: '-' expected\n");
+ return -EINVAL;
+ }
+ cur++;
+
+ /* if no ':' is here, than we read the end */
+ if (*cur != ':') {
+ end = memparse(cur, &tmp);
+ if (cur == tmp) {
+ pr_warning("crashkernel: Memory "
+ "value expected\n");
+ return -EINVAL;
+ }
+ cur = tmp;
+ if (end <= start) {
+ pr_warning("crashkernel: end <= start\n");
+ return -EINVAL;
+ }
+ }
+
+ if (*cur != ':') {
+ pr_warning("crashkernel: ':' expected\n");
+ return -EINVAL;
+ }
+ cur++;
+
+ size = memparse(cur, &tmp);
+ if (cur == tmp) {
+ pr_warning("Memory value expected\n");
+ return -EINVAL;
+ }
+ cur = tmp;
+ if (size >= system_ram) {
+ pr_warning("crashkernel: invalid size\n");
+ return -EINVAL;
+ }
+
+ /* match ? */
+ if (system_ram >= start && system_ram <= end) {
+ *crash_size = size;
+ break;
+ }
+ } while (*cur++ == ',');
+
+ if (*crash_size > 0) {
+ while (*cur != ' ' && *cur != '@')
+ cur++;
+ if (*cur == '@') {
+ cur++;
+ *crash_base = memparse(cur, &tmp);
+ if (cur == tmp) {
+ pr_warning("Memory value expected "
+ "after '@'\n");
+ return -EINVAL;
+ }
+ }
+ }
+
+ return 0;
+}
+
+/*
+ * That function parses "simple" (old) crashkernel command lines like
+ *
+ * crashkernel=size[@offset]
+ *
+ * It returns 0 on success and -EINVAL on failure.
+ */
+static int __init parse_crashkernel_simple(char *cmdline,
+ unsigned long long *crash_size,
+ unsigned long long *crash_base)
+{
+ char *cur = cmdline;
+
+ *crash_size = memparse(cmdline, &cur);
+ if (cmdline == cur) {
+ pr_warning("crashkernel: memory value expected\n");
+ return -EINVAL;
+ }
+
+ if (*cur == '@')
+ *crash_base = memparse(cur+1, &cur);
+
+ return 0;
+}
+
+/*
+ * That function is the entry point for command line parsing and should be
+ * called from the arch-specific code.
+ */
+int __init parse_crashkernel(char *cmdline,
+ unsigned long long system_ram,
+ unsigned long long *crash_size,
+ unsigned long long *crash_base)
+{
+ char *p = cmdline, *ck_cmdline = NULL;
+ char *first_colon, *first_space;
+
+ BUG_ON(!crash_size || !crash_base);
+ *crash_size = 0;
+ *crash_base = 0;
+
+ /* find crashkernel and use the last one if there are more */
+ p = strstr(p, "crashkernel=");
+ while (p) {
+ ck_cmdline = p;
+ p = strstr(p+1, "crashkernel=");
+ }
+
+ if (!ck_cmdline)
+ return -EINVAL;
+
+ ck_cmdline += 12; /* strlen("crashkernel=") */
+
+ /*
+ * if the commandline contains a ':', then that's the extended
+ * syntax -- if not, it must be the classic syntax
+ */
+ first_colon = strchr(ck_cmdline, ':');
+ first_space = strchr(ck_cmdline, ' ');
+ if (first_colon && (!first_space || first_colon < first_space))
+ return parse_crashkernel_mem(ck_cmdline, system_ram,
+ crash_size, crash_base);
+ else
+ return parse_crashkernel_simple(ck_cmdline, crash_size,
+ crash_base);
+
+ return 0;
+}
+
+
+
+void crash_save_vmcoreinfo(void)
+{
+ u32 *buf;
+
+ if (!vmcoreinfo_size)
+ return;
+
+ vmcoreinfo_append_str("CRASHTIME=%ld", get_seconds());
+
+ buf = (u32 *)vmcoreinfo_note;
+
+ buf = append_elf_note(buf, VMCOREINFO_NOTE_NAME, 0, vmcoreinfo_data,
+ vmcoreinfo_size);
+
+ final_note(buf);
+}
+
+void vmcoreinfo_append_str(const char *fmt, ...)
+{
+ va_list args;
+ char buf[0x50];
+ int r;
+
+ va_start(args, fmt);
+ r = vsnprintf(buf, sizeof(buf), fmt, args);
+ va_end(args);
+
+ if (r + vmcoreinfo_size > vmcoreinfo_max_size)
+ r = vmcoreinfo_max_size - vmcoreinfo_size;
+
+ memcpy(&vmcoreinfo_data[vmcoreinfo_size], buf, r);
+
+ vmcoreinfo_size += r;
+}
+
+/*
+ * provide an empty default implementation here -- architecture
+ * code may override this
+ */
+void __attribute__ ((weak)) arch_crash_save_vmcoreinfo(void)
+{}
+
+unsigned long __attribute__ ((weak)) paddr_vmcoreinfo_note(void)
+{
+ return __pa((unsigned long)(char *)&vmcoreinfo_note);
+}
+
+static int __init crash_save_vmcoreinfo_init(void)
+{
+ vmcoreinfo_append_str("OSRELEASE=%s\n", init_uts_ns.name.release);
+ vmcoreinfo_append_str("PAGESIZE=%ld\n", PAGE_SIZE);
+
+ VMCOREINFO_SYMBOL(init_uts_ns);
+ VMCOREINFO_SYMBOL(node_online_map);
+ VMCOREINFO_SYMBOL(swapper_pg_dir);
+ VMCOREINFO_SYMBOL(_stext);
+
+#ifndef CONFIG_NEED_MULTIPLE_NODES
+ VMCOREINFO_SYMBOL(mem_map);
+ VMCOREINFO_SYMBOL(contig_page_data);
+#endif
+#ifdef CONFIG_SPARSEMEM
+ VMCOREINFO_SYMBOL(mem_section);
+ VMCOREINFO_LENGTH(mem_section, NR_SECTION_ROOTS);
+ VMCOREINFO_SIZE(mem_section);
+ VMCOREINFO_OFFSET(mem_section, section_mem_map);
+#endif
+ VMCOREINFO_SIZE(page);
+ VMCOREINFO_SIZE(pglist_data);
+ VMCOREINFO_SIZE(zone);
+ VMCOREINFO_SIZE(free_area);
+ VMCOREINFO_SIZE(list_head);
+ VMCOREINFO_TYPEDEF_SIZE(nodemask_t);
+ VMCOREINFO_OFFSET(page, flags);
+ VMCOREINFO_OFFSET(page, _count);
+ VMCOREINFO_OFFSET(page, mapping);
+ VMCOREINFO_OFFSET(page, lru);
+ VMCOREINFO_OFFSET(pglist_data, node_zones);
+ VMCOREINFO_OFFSET(pglist_data, nr_zones);
+#ifdef CONFIG_FLAT_NODE_MEM_MAP
+ VMCOREINFO_OFFSET(pglist_data, node_mem_map);
+#endif
+ VMCOREINFO_OFFSET(pglist_data, node_start_pfn);
+ VMCOREINFO_OFFSET(pglist_data, node_spanned_pages);
+ VMCOREINFO_OFFSET(pglist_data, node_id);
+ VMCOREINFO_OFFSET(zone, free_area);
+ VMCOREINFO_OFFSET(zone, vm_stat);
+ VMCOREINFO_OFFSET(zone, spanned_pages);
+ VMCOREINFO_OFFSET(free_area, free_list);
+ VMCOREINFO_OFFSET(list_head, next);
+ VMCOREINFO_OFFSET(list_head, prev);
+ VMCOREINFO_LENGTH(zone.free_area, MAX_ORDER);
+ VMCOREINFO_NUMBER(NR_FREE_PAGES);
+
+ arch_crash_save_vmcoreinfo();
+
+ return 0;
+}
+
+module_init(crash_save_vmcoreinfo_init)
diff --git a/kernel/kprobes.c b/kernel/kprobes.c
index 4b8a4493c541..e3a5d817ac9b 100644
--- a/kernel/kprobes.c
+++ b/kernel/kprobes.c
@@ -64,7 +64,6 @@
static struct hlist_head kprobe_table[KPROBE_TABLE_SIZE];
static struct hlist_head kretprobe_inst_table[KPROBE_TABLE_SIZE];
-static atomic_t kprobe_count;
/* NOTE: change this value only with kprobe_mutex held */
static bool kprobe_enabled;
@@ -73,11 +72,6 @@ DEFINE_MUTEX(kprobe_mutex); /* Protects kprobe_table */
DEFINE_SPINLOCK(kretprobe_lock); /* Protects kretprobe_inst_table */
static DEFINE_PER_CPU(struct kprobe *, kprobe_instance) = NULL;
-static struct notifier_block kprobe_page_fault_nb = {
- .notifier_call = kprobe_exceptions_notify,
- .priority = 0x7fffffff /* we need to notified first */
-};
-
#ifdef __ARCH_WANT_KPROBES_INSN_SLOT
/*
* kprobe->ainsn.insn points to the copy of the instruction to be
@@ -556,8 +550,6 @@ static int __kprobes __register_kprobe(struct kprobe *p,
old_p = get_kprobe(p->addr);
if (old_p) {
ret = register_aggr_kprobe(old_p, p);
- if (!ret)
- atomic_inc(&kprobe_count);
goto out;
}
@@ -569,13 +561,9 @@ static int __kprobes __register_kprobe(struct kprobe *p,
hlist_add_head_rcu(&p->hlist,
&kprobe_table[hash_ptr(p->addr, KPROBE_HASH_BITS)]);
- if (kprobe_enabled) {
- if (atomic_add_return(1, &kprobe_count) == \
- (ARCH_INACTIVE_KPROBE_COUNT + 1))
- register_page_fault_notifier(&kprobe_page_fault_nb);
-
+ if (kprobe_enabled)
arch_arm_kprobe(p);
- }
+
out:
mutex_unlock(&kprobe_mutex);
@@ -658,16 +646,6 @@ valid_p:
}
mutex_unlock(&kprobe_mutex);
}
-
- /* Call unregister_page_fault_notifier()
- * if no probes are active
- */
- mutex_lock(&kprobe_mutex);
- if (atomic_add_return(-1, &kprobe_count) == \
- ARCH_INACTIVE_KPROBE_COUNT)
- unregister_page_fault_notifier(&kprobe_page_fault_nb);
- mutex_unlock(&kprobe_mutex);
- return;
}
static struct notifier_block kprobe_exceptions_nb = {
@@ -738,6 +716,18 @@ int __kprobes register_kretprobe(struct kretprobe *rp)
int ret = 0;
struct kretprobe_instance *inst;
int i;
+ void *addr = rp->kp.addr;
+
+ if (kretprobe_blacklist_size) {
+ if (addr == NULL)
+ kprobe_lookup_name(rp->kp.symbol_name, addr);
+ addr += rp->kp.offset;
+
+ for (i = 0; kretprobe_blacklist[i].name != NULL; i++) {
+ if (kretprobe_blacklist[i].addr == addr)
+ return -EINVAL;
+ }
+ }
rp->kp.pre_handler = pre_handler_kretprobe;
rp->kp.post_handler = NULL;
@@ -815,7 +805,17 @@ static int __init init_kprobes(void)
INIT_HLIST_HEAD(&kprobe_table[i]);
INIT_HLIST_HEAD(&kretprobe_inst_table[i]);
}
- atomic_set(&kprobe_count, 0);
+
+ if (kretprobe_blacklist_size) {
+ /* lookup the function address from its name */
+ for (i = 0; kretprobe_blacklist[i].name != NULL; i++) {
+ kprobe_lookup_name(kretprobe_blacklist[i].name,
+ kretprobe_blacklist[i].addr);
+ if (!kretprobe_blacklist[i].addr)
+ printk("kretprobe: lookup failed: %s\n",
+ kretprobe_blacklist[i].name);
+ }
+ }
/* By default, kprobes are enabled */
kprobe_enabled = true;
@@ -921,13 +921,6 @@ static void __kprobes enable_all_kprobes(void)
if (kprobe_enabled)
goto already_enabled;
- /*
- * Re-register the page fault notifier only if there are any
- * active probes at the time of enabling kprobes globally
- */
- if (atomic_read(&kprobe_count) > ARCH_INACTIVE_KPROBE_COUNT)
- register_page_fault_notifier(&kprobe_page_fault_nb);
-
for (i = 0; i < KPROBE_TABLE_SIZE; i++) {
head = &kprobe_table[i];
hlist_for_each_entry_rcu(p, node, head, hlist)
@@ -968,10 +961,7 @@ static void __kprobes disable_all_kprobes(void)
mutex_unlock(&kprobe_mutex);
/* Allow all currently running kprobes to complete */
synchronize_sched();
-
- mutex_lock(&kprobe_mutex);
- /* Unconditionally unregister the page_fault notifier */
- unregister_page_fault_notifier(&kprobe_page_fault_nb);
+ return;
already_disabled:
mutex_unlock(&kprobe_mutex);
diff --git a/kernel/ksysfs.c b/kernel/ksysfs.c
index d0e5c48e18c7..65daa5373ca6 100644
--- a/kernel/ksysfs.c
+++ b/kernel/ksysfs.c
@@ -14,6 +14,7 @@
#include <linux/module.h>
#include <linux/init.h>
#include <linux/kexec.h>
+#include <linux/sched.h>
#define KERNEL_ATTR_RO(_name) \
static struct subsys_attribute _name##_attr = __ATTR_RO(_name)
@@ -60,6 +61,15 @@ static ssize_t kexec_crash_loaded_show(struct kset *kset, char *page)
return sprintf(page, "%d\n", !!kexec_crash_image);
}
KERNEL_ATTR_RO(kexec_crash_loaded);
+
+static ssize_t vmcoreinfo_show(struct kset *kset, char *page)
+{
+ return sprintf(page, "%lx %x\n",
+ paddr_vmcoreinfo_note(),
+ (unsigned int)vmcoreinfo_max_size);
+}
+KERNEL_ATTR_RO(vmcoreinfo);
+
#endif /* CONFIG_KEXEC */
/*
@@ -95,6 +105,7 @@ static struct attribute * kernel_attrs[] = {
#ifdef CONFIG_KEXEC
&kexec_loaded_attr.attr,
&kexec_crash_loaded_attr.attr,
+ &vmcoreinfo_attr.attr,
#endif
NULL
};
@@ -116,6 +127,13 @@ static int __init ksysfs_init(void)
&notes_attr);
}
+ /*
+ * Create "/sys/kernel/uids" directory and corresponding root user's
+ * directory under it.
+ */
+ if (!error)
+ error = uids_kobject_init();
+
return error;
}
diff --git a/kernel/lockdep.c b/kernel/lockdep.c
index 734da579ad13..55fe0c7cd95f 100644
--- a/kernel/lockdep.c
+++ b/kernel/lockdep.c
@@ -511,11 +511,11 @@ static void lockdep_print_held_locks(struct task_struct *curr)
int i, depth = curr->lockdep_depth;
if (!depth) {
- printk("no locks held by %s/%d.\n", curr->comm, curr->pid);
+ printk("no locks held by %s/%d.\n", curr->comm, task_pid_nr(curr));
return;
}
printk("%d lock%s held by %s/%d:\n",
- depth, depth > 1 ? "s" : "", curr->comm, curr->pid);
+ depth, depth > 1 ? "s" : "", curr->comm, task_pid_nr(curr));
for (i = 0; i < depth; i++) {
printk(" #%d: ", i);
@@ -904,7 +904,7 @@ print_circular_bug_header(struct lock_list *entry, unsigned int depth)
print_kernel_version();
printk( "-------------------------------------------------------\n");
printk("%s/%d is trying to acquire lock:\n",
- curr->comm, curr->pid);
+ curr->comm, task_pid_nr(curr));
print_lock(check_source);
printk("\nbut task is already holding lock:\n");
print_lock(check_target);
@@ -1085,7 +1085,7 @@ print_bad_irq_dependency(struct task_struct *curr,
print_kernel_version();
printk( "------------------------------------------------------\n");
printk("%s/%d [HC%u[%lu]:SC%u[%lu]:HE%u:SE%u] is trying to acquire:\n",
- curr->comm, curr->pid,
+ curr->comm, task_pid_nr(curr),
curr->hardirq_context, hardirq_count() >> HARDIRQ_SHIFT,
curr->softirq_context, softirq_count() >> SOFTIRQ_SHIFT,
curr->hardirqs_enabled,
@@ -1237,7 +1237,7 @@ print_deadlock_bug(struct task_struct *curr, struct held_lock *prev,
print_kernel_version();
printk( "---------------------------------------------\n");
printk("%s/%d is trying to acquire lock:\n",
- curr->comm, curr->pid);
+ curr->comm, task_pid_nr(curr));
print_lock(next);
printk("\nbut task is already holding lock:\n");
print_lock(prev);
@@ -1521,7 +1521,7 @@ cache_hit:
}
static int validate_chain(struct task_struct *curr, struct lockdep_map *lock,
- struct held_lock *hlock, int chain_head)
+ struct held_lock *hlock, int chain_head, u64 chain_key)
{
/*
* Trylock needs to maintain the stack of held locks, but it
@@ -1534,7 +1534,7 @@ static int validate_chain(struct task_struct *curr, struct lockdep_map *lock,
* graph_lock for us)
*/
if (!hlock->trylock && (hlock->check == 2) &&
- lookup_chain_cache(curr->curr_chain_key, hlock->class)) {
+ lookup_chain_cache(chain_key, hlock->class)) {
/*
* Check whether last held lock:
*
@@ -1576,7 +1576,7 @@ static int validate_chain(struct task_struct *curr, struct lockdep_map *lock,
#else
static inline int validate_chain(struct task_struct *curr,
struct lockdep_map *lock, struct held_lock *hlock,
- int chain_head)
+ int chain_head, u64 chain_key)
{
return 1;
}
@@ -1641,7 +1641,7 @@ print_usage_bug(struct task_struct *curr, struct held_lock *this,
usage_str[prev_bit], usage_str[new_bit]);
printk("%s/%d [HC%u[%lu]:SC%u[%lu]:HE%u:SE%u] takes:\n",
- curr->comm, curr->pid,
+ curr->comm, task_pid_nr(curr),
trace_hardirq_context(curr), hardirq_count() >> HARDIRQ_SHIFT,
trace_softirq_context(curr), softirq_count() >> SOFTIRQ_SHIFT,
trace_hardirqs_enabled(curr),
@@ -1694,7 +1694,7 @@ print_irq_inversion_bug(struct task_struct *curr, struct lock_class *other,
print_kernel_version();
printk( "---------------------------------------------------------\n");
printk("%s/%d just changed the state of lock:\n",
- curr->comm, curr->pid);
+ curr->comm, task_pid_nr(curr));
print_lock(this);
if (forwards)
printk("but this lock took another, %s-irq-unsafe lock in the past:\n", irqclass);
@@ -2450,11 +2450,11 @@ static int __lock_acquire(struct lockdep_map *lock, unsigned int subclass,
chain_head = 1;
}
chain_key = iterate_chain_key(chain_key, id);
- curr->curr_chain_key = chain_key;
- if (!validate_chain(curr, lock, hlock, chain_head))
+ if (!validate_chain(curr, lock, hlock, chain_head, chain_key))
return 0;
+ curr->curr_chain_key = chain_key;
curr->lockdep_depth++;
check_chain_key(curr);
#ifdef CONFIG_DEBUG_LOCKDEP
@@ -2487,7 +2487,7 @@ print_unlock_inbalance_bug(struct task_struct *curr, struct lockdep_map *lock,
printk( "[ BUG: bad unlock balance detected! ]\n");
printk( "-------------------------------------\n");
printk("%s/%d is trying to release lock (",
- curr->comm, curr->pid);
+ curr->comm, task_pid_nr(curr));
print_lockdep_cache(lock);
printk(") at:\n");
print_ip_sym(ip);
@@ -2737,7 +2737,7 @@ print_lock_contention_bug(struct task_struct *curr, struct lockdep_map *lock,
printk( "[ BUG: bad contention detected! ]\n");
printk( "---------------------------------\n");
printk("%s/%d is trying to contend lock (",
- curr->comm, curr->pid);
+ curr->comm, task_pid_nr(curr));
print_lockdep_cache(lock);
printk(") at:\n");
print_ip_sym(ip);
@@ -3072,7 +3072,7 @@ print_freed_lock_bug(struct task_struct *curr, const void *mem_from,
printk( "[ BUG: held lock freed! ]\n");
printk( "-------------------------\n");
printk("%s/%d is freeing memory %p-%p, with a lock still held there!\n",
- curr->comm, curr->pid, mem_from, mem_to-1);
+ curr->comm, task_pid_nr(curr), mem_from, mem_to-1);
print_lock(hlock);
lockdep_print_held_locks(curr);
@@ -3125,7 +3125,7 @@ static void print_held_locks_bug(struct task_struct *curr)
printk( "[ BUG: lock held at task exit time! ]\n");
printk( "-------------------------------------\n");
printk("%s/%d is exiting with locks still held!\n",
- curr->comm, curr->pid);
+ curr->comm, task_pid_nr(curr));
lockdep_print_held_locks(curr);
printk("\nstack backtrace:\n");
@@ -3199,3 +3199,19 @@ void debug_show_held_locks(struct task_struct *task)
}
EXPORT_SYMBOL_GPL(debug_show_held_locks);
+
+void lockdep_sys_exit(void)
+{
+ struct task_struct *curr = current;
+
+ if (unlikely(curr->lockdep_depth)) {
+ if (!debug_locks_off())
+ return;
+ printk("\n================================================\n");
+ printk( "[ BUG: lock held when returning to user space! ]\n");
+ printk( "------------------------------------------------\n");
+ printk("%s/%d is leaving the kernel with locks still held!\n",
+ curr->comm, curr->pid);
+ lockdep_print_held_locks(curr);
+ }
+}
diff --git a/kernel/lockdep_proc.c b/kernel/lockdep_proc.c
index c851b2dcc685..8a135bd163c2 100644
--- a/kernel/lockdep_proc.c
+++ b/kernel/lockdep_proc.c
@@ -25,28 +25,38 @@
static void *l_next(struct seq_file *m, void *v, loff_t *pos)
{
- struct lock_class *class = v;
+ struct lock_class *class;
(*pos)++;
- if (class->lock_entry.next != &all_lock_classes)
- class = list_entry(class->lock_entry.next, struct lock_class,
- lock_entry);
- else
- class = NULL;
- m->private = class;
+ if (v == SEQ_START_TOKEN)
+ class = m->private;
+ else {
+ class = v;
+
+ if (class->lock_entry.next != &all_lock_classes)
+ class = list_entry(class->lock_entry.next,
+ struct lock_class, lock_entry);
+ else
+ class = NULL;
+ }
return class;
}
static void *l_start(struct seq_file *m, loff_t *pos)
{
- struct lock_class *class = m->private;
+ struct lock_class *class;
+ loff_t i = 0;
- if (&class->lock_entry == all_lock_classes.next)
- seq_printf(m, "all lock classes:\n");
+ if (*pos == 0)
+ return SEQ_START_TOKEN;
- return class;
+ list_for_each_entry(class, &all_lock_classes, lock_entry) {
+ if (++i == *pos)
+ return class;
+ }
+ return NULL;
}
static void l_stop(struct seq_file *m, void *v)
@@ -101,10 +111,15 @@ static void print_name(struct seq_file *m, struct lock_class *class)
static int l_show(struct seq_file *m, void *v)
{
unsigned long nr_forward_deps, nr_backward_deps;
- struct lock_class *class = m->private;
+ struct lock_class *class = v;
struct lock_list *entry;
char c1, c2, c3, c4;
+ if (v == SEQ_START_TOKEN) {
+ seq_printf(m, "all lock classes:\n");
+ return 0;
+ }
+
seq_printf(m, "%p", class->key);
#ifdef CONFIG_DEBUG_LOCKDEP
seq_printf(m, " OPS:%8ld", class->ops);
@@ -523,10 +538,11 @@ static void *ls_start(struct seq_file *m, loff_t *pos)
{
struct lock_stat_seq *data = m->private;
- if (data->iter == data->stats)
- seq_header(m);
+ if (*pos == 0)
+ return SEQ_START_TOKEN;
- if (data->iter == data->iter_end)
+ data->iter = data->stats + *pos;
+ if (data->iter >= data->iter_end)
data->iter = NULL;
return data->iter;
@@ -538,8 +554,13 @@ static void *ls_next(struct seq_file *m, void *v, loff_t *pos)
(*pos)++;
- data->iter = v;
- data->iter++;
+ if (v == SEQ_START_TOKEN)
+ data->iter = data->stats;
+ else {
+ data->iter = v;
+ data->iter++;
+ }
+
if (data->iter == data->iter_end)
data->iter = NULL;
@@ -552,9 +573,11 @@ static void ls_stop(struct seq_file *m, void *v)
static int ls_show(struct seq_file *m, void *v)
{
- struct lock_stat_seq *data = m->private;
+ if (v == SEQ_START_TOKEN)
+ seq_header(m);
+ else
+ seq_stats(m, v);
- seq_stats(m, data->iter);
return 0;
}
diff --git a/kernel/marker.c b/kernel/marker.c
new file mode 100644
index 000000000000..ccb48d9a3657
--- /dev/null
+++ b/kernel/marker.c
@@ -0,0 +1,525 @@
+/*
+ * Copyright (C) 2007 Mathieu Desnoyers
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ */
+#include <linux/module.h>
+#include <linux/mutex.h>
+#include <linux/types.h>
+#include <linux/jhash.h>
+#include <linux/list.h>
+#include <linux/rcupdate.h>
+#include <linux/marker.h>
+#include <linux/err.h>
+
+extern struct marker __start___markers[];
+extern struct marker __stop___markers[];
+
+/*
+ * module_mutex nests inside markers_mutex. Markers mutex protects the builtin
+ * and module markers, the hash table and deferred_sync.
+ */
+static DEFINE_MUTEX(markers_mutex);
+
+/*
+ * Marker deferred synchronization.
+ * Upon marker probe_unregister, we delay call to synchronize_sched() to
+ * accelerate mass unregistration (only when there is no more reference to a
+ * given module do we call synchronize_sched()). However, we need to make sure
+ * every critical region has ended before we re-arm a marker that has been
+ * unregistered and then registered back with a different probe data.
+ */
+static int deferred_sync;
+
+/*
+ * Marker hash table, containing the active markers.
+ * Protected by module_mutex.
+ */
+#define MARKER_HASH_BITS 6
+#define MARKER_TABLE_SIZE (1 << MARKER_HASH_BITS)
+
+struct marker_entry {
+ struct hlist_node hlist;
+ char *format;
+ marker_probe_func *probe;
+ void *private;
+ int refcount; /* Number of times armed. 0 if disarmed. */
+ char name[0]; /* Contains name'\0'format'\0' */
+};
+
+static struct hlist_head marker_table[MARKER_TABLE_SIZE];
+
+/**
+ * __mark_empty_function - Empty probe callback
+ * @mdata: pointer of type const struct marker
+ * @fmt: format string
+ * @...: variable argument list
+ *
+ * Empty callback provided as a probe to the markers. By providing this to a
+ * disabled marker, we make sure the execution flow is always valid even
+ * though the function pointer change and the marker enabling are two distinct
+ * operations that modifies the execution flow of preemptible code.
+ */
+void __mark_empty_function(const struct marker *mdata, void *private,
+ const char *fmt, ...)
+{
+}
+EXPORT_SYMBOL_GPL(__mark_empty_function);
+
+/*
+ * Get marker if the marker is present in the marker hash table.
+ * Must be called with markers_mutex held.
+ * Returns NULL if not present.
+ */
+static struct marker_entry *get_marker(const char *name)
+{
+ struct hlist_head *head;
+ struct hlist_node *node;
+ struct marker_entry *e;
+ u32 hash = jhash(name, strlen(name), 0);
+
+ head = &marker_table[hash & ((1 << MARKER_HASH_BITS)-1)];
+ hlist_for_each_entry(e, node, head, hlist) {
+ if (!strcmp(name, e->name))
+ return e;
+ }
+ return NULL;
+}
+
+/*
+ * Add the marker to the marker hash table. Must be called with markers_mutex
+ * held.
+ */
+static int add_marker(const char *name, const char *format,
+ marker_probe_func *probe, void *private)
+{
+ struct hlist_head *head;
+ struct hlist_node *node;
+ struct marker_entry *e;
+ size_t name_len = strlen(name) + 1;
+ size_t format_len = 0;
+ u32 hash = jhash(name, name_len-1, 0);
+
+ if (format)
+ format_len = strlen(format) + 1;
+ head = &marker_table[hash & ((1 << MARKER_HASH_BITS)-1)];
+ hlist_for_each_entry(e, node, head, hlist) {
+ if (!strcmp(name, e->name)) {
+ printk(KERN_NOTICE
+ "Marker %s busy, probe %p already installed\n",
+ name, e->probe);
+ return -EBUSY; /* Already there */
+ }
+ }
+ /*
+ * Using kmalloc here to allocate a variable length element. Could
+ * cause some memory fragmentation if overused.
+ */
+ e = kmalloc(sizeof(struct marker_entry) + name_len + format_len,
+ GFP_KERNEL);
+ if (!e)
+ return -ENOMEM;
+ memcpy(&e->name[0], name, name_len);
+ if (format) {
+ e->format = &e->name[name_len];
+ memcpy(e->format, format, format_len);
+ trace_mark(core_marker_format, "name %s format %s",
+ e->name, e->format);
+ } else
+ e->format = NULL;
+ e->probe = probe;
+ e->private = private;
+ e->refcount = 0;
+ hlist_add_head(&e->hlist, head);
+ return 0;
+}
+
+/*
+ * Remove the marker from the marker hash table. Must be called with mutex_lock
+ * held.
+ */
+static void *remove_marker(const char *name)
+{
+ struct hlist_head *head;
+ struct hlist_node *node;
+ struct marker_entry *e;
+ int found = 0;
+ size_t len = strlen(name) + 1;
+ void *private = NULL;
+ u32 hash = jhash(name, len-1, 0);
+
+ head = &marker_table[hash & ((1 << MARKER_HASH_BITS)-1)];
+ hlist_for_each_entry(e, node, head, hlist) {
+ if (!strcmp(name, e->name)) {
+ found = 1;
+ break;
+ }
+ }
+ if (found) {
+ private = e->private;
+ hlist_del(&e->hlist);
+ kfree(e);
+ }
+ return private;
+}
+
+/*
+ * Set the mark_entry format to the format found in the element.
+ */
+static int marker_set_format(struct marker_entry **entry, const char *format)
+{
+ struct marker_entry *e;
+ size_t name_len = strlen((*entry)->name) + 1;
+ size_t format_len = strlen(format) + 1;
+
+ e = kmalloc(sizeof(struct marker_entry) + name_len + format_len,
+ GFP_KERNEL);
+ if (!e)
+ return -ENOMEM;
+ memcpy(&e->name[0], (*entry)->name, name_len);
+ e->format = &e->name[name_len];
+ memcpy(e->format, format, format_len);
+ e->probe = (*entry)->probe;
+ e->private = (*entry)->private;
+ e->refcount = (*entry)->refcount;
+ hlist_add_before(&e->hlist, &(*entry)->hlist);
+ hlist_del(&(*entry)->hlist);
+ kfree(*entry);
+ *entry = e;
+ trace_mark(core_marker_format, "name %s format %s",
+ e->name, e->format);
+ return 0;
+}
+
+/*
+ * Sets the probe callback corresponding to one marker.
+ */
+static int set_marker(struct marker_entry **entry, struct marker *elem)
+{
+ int ret;
+ WARN_ON(strcmp((*entry)->name, elem->name) != 0);
+
+ if ((*entry)->format) {
+ if (strcmp((*entry)->format, elem->format) != 0) {
+ printk(KERN_NOTICE
+ "Format mismatch for probe %s "
+ "(%s), marker (%s)\n",
+ (*entry)->name,
+ (*entry)->format,
+ elem->format);
+ return -EPERM;
+ }
+ } else {
+ ret = marker_set_format(entry, elem->format);
+ if (ret)
+ return ret;
+ }
+ elem->call = (*entry)->probe;
+ elem->private = (*entry)->private;
+ elem->state = 1;
+ return 0;
+}
+
+/*
+ * Disable a marker and its probe callback.
+ * Note: only after a synchronize_sched() issued after setting elem->call to the
+ * empty function insures that the original callback is not used anymore. This
+ * insured by preemption disabling around the call site.
+ */
+static void disable_marker(struct marker *elem)
+{
+ elem->state = 0;
+ elem->call = __mark_empty_function;
+ /*
+ * Leave the private data and id there, because removal is racy and
+ * should be done only after a synchronize_sched(). These are never used
+ * until the next initialization anyway.
+ */
+}
+
+/**
+ * marker_update_probe_range - Update a probe range
+ * @begin: beginning of the range
+ * @end: end of the range
+ * @probe_module: module address of the probe being updated
+ * @refcount: number of references left to the given probe_module (out)
+ *
+ * Updates the probe callback corresponding to a range of markers.
+ * Must be called with markers_mutex held.
+ */
+void marker_update_probe_range(struct marker *begin,
+ struct marker *end, struct module *probe_module,
+ int *refcount)
+{
+ struct marker *iter;
+ struct marker_entry *mark_entry;
+
+ for (iter = begin; iter < end; iter++) {
+ mark_entry = get_marker(iter->name);
+ if (mark_entry && mark_entry->refcount) {
+ set_marker(&mark_entry, iter);
+ /*
+ * ignore error, continue
+ */
+ if (probe_module)
+ if (probe_module ==
+ __module_text_address((unsigned long)mark_entry->probe))
+ (*refcount)++;
+ } else {
+ disable_marker(iter);
+ }
+ }
+}
+
+/*
+ * Update probes, removing the faulty probes.
+ * Issues a synchronize_sched() when no reference to the module passed
+ * as parameter is found in the probes so the probe module can be
+ * safely unloaded from now on.
+ */
+static void marker_update_probes(struct module *probe_module)
+{
+ int refcount = 0;
+
+ mutex_lock(&markers_mutex);
+ /* Core kernel markers */
+ marker_update_probe_range(__start___markers,
+ __stop___markers, probe_module, &refcount);
+ /* Markers in modules. */
+ module_update_markers(probe_module, &refcount);
+ if (probe_module && refcount == 0) {
+ synchronize_sched();
+ deferred_sync = 0;
+ }
+ mutex_unlock(&markers_mutex);
+}
+
+/**
+ * marker_probe_register - Connect a probe to a marker
+ * @name: marker name
+ * @format: format string
+ * @probe: probe handler
+ * @private: probe private data
+ *
+ * private data must be a valid allocated memory address, or NULL.
+ * Returns 0 if ok, error value on error.
+ */
+int marker_probe_register(const char *name, const char *format,
+ marker_probe_func *probe, void *private)
+{
+ struct marker_entry *entry;
+ int ret = 0, need_update = 0;
+
+ mutex_lock(&markers_mutex);
+ entry = get_marker(name);
+ if (entry && entry->refcount) {
+ ret = -EBUSY;
+ goto end;
+ }
+ if (deferred_sync) {
+ synchronize_sched();
+ deferred_sync = 0;
+ }
+ ret = add_marker(name, format, probe, private);
+ if (ret)
+ goto end;
+ need_update = 1;
+end:
+ mutex_unlock(&markers_mutex);
+ if (need_update)
+ marker_update_probes(NULL);
+ return ret;
+}
+EXPORT_SYMBOL_GPL(marker_probe_register);
+
+/**
+ * marker_probe_unregister - Disconnect a probe from a marker
+ * @name: marker name
+ *
+ * Returns the private data given to marker_probe_register, or an ERR_PTR().
+ */
+void *marker_probe_unregister(const char *name)
+{
+ struct module *probe_module;
+ struct marker_entry *entry;
+ void *private;
+ int need_update = 0;
+
+ mutex_lock(&markers_mutex);
+ entry = get_marker(name);
+ if (!entry) {
+ private = ERR_PTR(-ENOENT);
+ goto end;
+ }
+ entry->refcount = 0;
+ /* In what module is the probe handler ? */
+ probe_module = __module_text_address((unsigned long)entry->probe);
+ private = remove_marker(name);
+ deferred_sync = 1;
+ need_update = 1;
+end:
+ mutex_unlock(&markers_mutex);
+ if (need_update)
+ marker_update_probes(probe_module);
+ return private;
+}
+EXPORT_SYMBOL_GPL(marker_probe_unregister);
+
+/**
+ * marker_probe_unregister_private_data - Disconnect a probe from a marker
+ * @private: probe private data
+ *
+ * Unregister a marker by providing the registered private data.
+ * Returns the private data given to marker_probe_register, or an ERR_PTR().
+ */
+void *marker_probe_unregister_private_data(void *private)
+{
+ struct module *probe_module;
+ struct hlist_head *head;
+ struct hlist_node *node;
+ struct marker_entry *entry;
+ int found = 0;
+ unsigned int i;
+ int need_update = 0;
+
+ mutex_lock(&markers_mutex);
+ for (i = 0; i < MARKER_TABLE_SIZE; i++) {
+ head = &marker_table[i];
+ hlist_for_each_entry(entry, node, head, hlist) {
+ if (entry->private == private) {
+ found = 1;
+ goto iter_end;
+ }
+ }
+ }
+iter_end:
+ if (!found) {
+ private = ERR_PTR(-ENOENT);
+ goto end;
+ }
+ entry->refcount = 0;
+ /* In what module is the probe handler ? */
+ probe_module = __module_text_address((unsigned long)entry->probe);
+ private = remove_marker(entry->name);
+ deferred_sync = 1;
+ need_update = 1;
+end:
+ mutex_unlock(&markers_mutex);
+ if (need_update)
+ marker_update_probes(probe_module);
+ return private;
+}
+EXPORT_SYMBOL_GPL(marker_probe_unregister_private_data);
+
+/**
+ * marker_arm - Arm a marker
+ * @name: marker name
+ *
+ * Activate a marker. It keeps a reference count of the number of
+ * arming/disarming done.
+ * Returns 0 if ok, error value on error.
+ */
+int marker_arm(const char *name)
+{
+ struct marker_entry *entry;
+ int ret = 0, need_update = 0;
+
+ mutex_lock(&markers_mutex);
+ entry = get_marker(name);
+ if (!entry) {
+ ret = -ENOENT;
+ goto end;
+ }
+ /*
+ * Only need to update probes when refcount passes from 0 to 1.
+ */
+ if (entry->refcount++)
+ goto end;
+ need_update = 1;
+end:
+ mutex_unlock(&markers_mutex);
+ if (need_update)
+ marker_update_probes(NULL);
+ return ret;
+}
+EXPORT_SYMBOL_GPL(marker_arm);
+
+/**
+ * marker_disarm - Disarm a marker
+ * @name: marker name
+ *
+ * Disarm a marker. It keeps a reference count of the number of arming/disarming
+ * done.
+ * Returns 0 if ok, error value on error.
+ */
+int marker_disarm(const char *name)
+{
+ struct marker_entry *entry;
+ int ret = 0, need_update = 0;
+
+ mutex_lock(&markers_mutex);
+ entry = get_marker(name);
+ if (!entry) {
+ ret = -ENOENT;
+ goto end;
+ }
+ /*
+ * Only permit decrement refcount if higher than 0.
+ * Do probe update only on 1 -> 0 transition.
+ */
+ if (entry->refcount) {
+ if (--entry->refcount)
+ goto end;
+ } else {
+ ret = -EPERM;
+ goto end;
+ }
+ need_update = 1;
+end:
+ mutex_unlock(&markers_mutex);
+ if (need_update)
+ marker_update_probes(NULL);
+ return ret;
+}
+EXPORT_SYMBOL_GPL(marker_disarm);
+
+/**
+ * marker_get_private_data - Get a marker's probe private data
+ * @name: marker name
+ *
+ * Returns the private data pointer, or an ERR_PTR.
+ * The private data pointer should _only_ be dereferenced if the caller is the
+ * owner of the data, or its content could vanish. This is mostly used to
+ * confirm that a caller is the owner of a registered probe.
+ */
+void *marker_get_private_data(const char *name)
+{
+ struct hlist_head *head;
+ struct hlist_node *node;
+ struct marker_entry *e;
+ size_t name_len = strlen(name) + 1;
+ u32 hash = jhash(name, name_len-1, 0);
+ int found = 0;
+
+ head = &marker_table[hash & ((1 << MARKER_HASH_BITS)-1)];
+ hlist_for_each_entry(e, node, head, hlist) {
+ if (!strcmp(name, e->name)) {
+ found = 1;
+ return e->private;
+ }
+ }
+ return ERR_PTR(-ENOENT);
+}
+EXPORT_SYMBOL_GPL(marker_get_private_data);
diff --git a/kernel/module.c b/kernel/module.c
index db0ead0363e2..3202c9950073 100644
--- a/kernel/module.c
+++ b/kernel/module.c
@@ -20,6 +20,7 @@
#include <linux/moduleloader.h>
#include <linux/init.h>
#include <linux/kallsyms.h>
+#include <linux/sysfs.h>
#include <linux/kernel.h>
#include <linux/slab.h>
#include <linux/vmalloc.h>
@@ -104,7 +105,7 @@ void __module_put_and_exit(struct module *mod, long code)
do_exit(code);
}
EXPORT_SYMBOL(__module_put_and_exit);
-
+
/* Find a module section: 0 means not found. */
static unsigned int find_sec(Elf_Ehdr *hdr,
Elf_Shdr *sechdrs,
@@ -178,7 +179,7 @@ static unsigned long __find_symbol(const char *name,
struct module *mod;
const struct kernel_symbol *ks;
- /* Core kernel first. */
+ /* Core kernel first. */
*owner = NULL;
ks = lookup_symbol(name, __start___ksymtab, __stop___ksymtab);
if (ks) {
@@ -230,7 +231,7 @@ static unsigned long __find_symbol(const char *name,
return ks->value;
}
- /* Now try modules. */
+ /* Now try modules. */
list_for_each_entry(mod, &modules, list) {
*owner = mod;
ks = lookup_symbol(name, mod->syms, mod->syms + mod->num_syms);
@@ -284,7 +285,7 @@ static unsigned long __find_symbol(const char *name,
}
}
DEBUGP("Failed to find symbol %s\n", name);
- return 0;
+ return 0;
}
/* Search for module by name: must hold module_mutex. */
@@ -440,7 +441,7 @@ static int percpu_modinit(void)
}
return 0;
-}
+}
__initcall(percpu_modinit);
#else /* ... !CONFIG_SMP */
static inline void *percpu_modalloc(unsigned long size, unsigned long align,
@@ -482,8 +483,8 @@ static int modinfo_##field##_exists(struct module *mod) \
} \
static void free_modinfo_##field(struct module *mod) \
{ \
- kfree(mod->field); \
- mod->field = NULL; \
+ kfree(mod->field); \
+ mod->field = NULL; \
} \
static struct module_attribute modinfo_##field = { \
.attr = { .name = __stringify(field), .mode = 0444 }, \
@@ -692,8 +693,7 @@ sys_delete_module(const char __user *name_user, unsigned int flags)
}
/* If it has an init func, it must have an exit func to unload */
- if ((mod->init != NULL && mod->exit == NULL)
- || mod->unsafe) {
+ if (mod->init && !mod->exit) {
forced = try_force_unload(flags);
if (!forced) {
/* This module can't be removed */
@@ -741,11 +741,6 @@ static void print_unload_info(struct seq_file *m, struct module *mod)
seq_printf(m, "%s,", use->module_which_uses->name);
}
- if (mod->unsafe) {
- printed_something = 1;
- seq_printf(m, "[unsafe],");
- }
-
if (mod->init != NULL && mod->exit == NULL) {
printed_something = 1;
seq_printf(m, "[permanent],");
@@ -995,7 +990,7 @@ static void add_sect_attrs(struct module *mod, unsigned int nsect,
struct module_sect_attrs *sect_attrs;
struct module_sect_attr *sattr;
struct attribute **gattr;
-
+
/* Count loaded sections and allocate structures */
for (i = 0; i < nsect; i++)
if (sechdrs[i].sh_flags & SHF_ALLOC)
@@ -1053,6 +1048,100 @@ static void remove_sect_attrs(struct module *mod)
}
}
+/*
+ * /sys/module/foo/notes/.section.name gives contents of SHT_NOTE sections.
+ */
+
+struct module_notes_attrs {
+ struct kobject *dir;
+ unsigned int notes;
+ struct bin_attribute attrs[0];
+};
+
+static ssize_t module_notes_read(struct kobject *kobj,
+ struct bin_attribute *bin_attr,
+ char *buf, loff_t pos, size_t count)
+{
+ /*
+ * The caller checked the pos and count against our size.
+ */
+ memcpy(buf, bin_attr->private + pos, count);
+ return count;
+}
+
+static void free_notes_attrs(struct module_notes_attrs *notes_attrs,
+ unsigned int i)
+{
+ if (notes_attrs->dir) {
+ while (i-- > 0)
+ sysfs_remove_bin_file(notes_attrs->dir,
+ &notes_attrs->attrs[i]);
+ kobject_del(notes_attrs->dir);
+ }
+ kfree(notes_attrs);
+}
+
+static void add_notes_attrs(struct module *mod, unsigned int nsect,
+ char *secstrings, Elf_Shdr *sechdrs)
+{
+ unsigned int notes, loaded, i;
+ struct module_notes_attrs *notes_attrs;
+ struct bin_attribute *nattr;
+
+ /* Count notes sections and allocate structures. */
+ notes = 0;
+ for (i = 0; i < nsect; i++)
+ if ((sechdrs[i].sh_flags & SHF_ALLOC) &&
+ (sechdrs[i].sh_type == SHT_NOTE))
+ ++notes;
+
+ if (notes == 0)
+ return;
+
+ notes_attrs = kzalloc(sizeof(*notes_attrs)
+ + notes * sizeof(notes_attrs->attrs[0]),
+ GFP_KERNEL);
+ if (notes_attrs == NULL)
+ return;
+
+ notes_attrs->notes = notes;
+ nattr = &notes_attrs->attrs[0];
+ for (loaded = i = 0; i < nsect; ++i) {
+ if (!(sechdrs[i].sh_flags & SHF_ALLOC))
+ continue;
+ if (sechdrs[i].sh_type == SHT_NOTE) {
+ nattr->attr.name = mod->sect_attrs->attrs[loaded].name;
+ nattr->attr.mode = S_IRUGO;
+ nattr->size = sechdrs[i].sh_size;
+ nattr->private = (void *) sechdrs[i].sh_addr;
+ nattr->read = module_notes_read;
+ ++nattr;
+ }
+ ++loaded;
+ }
+
+ notes_attrs->dir = kobject_add_dir(&mod->mkobj.kobj, "notes");
+ if (!notes_attrs->dir)
+ goto out;
+
+ for (i = 0; i < notes; ++i)
+ if (sysfs_create_bin_file(notes_attrs->dir,
+ &notes_attrs->attrs[i]))
+ goto out;
+
+ mod->notes_attrs = notes_attrs;
+ return;
+
+ out:
+ free_notes_attrs(notes_attrs, i);
+}
+
+static void remove_notes_attrs(struct module *mod)
+{
+ if (mod->notes_attrs)
+ free_notes_attrs(mod->notes_attrs, mod->notes_attrs->notes);
+}
+
#else
static inline void add_sect_attrs(struct module *mod, unsigned int nsect,
@@ -1063,6 +1152,15 @@ static inline void add_sect_attrs(struct module *mod, unsigned int nsect,
static inline void remove_sect_attrs(struct module *mod)
{
}
+
+static inline void add_notes_attrs(struct module *mod, unsigned int nsect,
+ char *sectstrings, Elf_Shdr *sechdrs)
+{
+}
+
+static inline void remove_notes_attrs(struct module *mod)
+{
+}
#endif /* CONFIG_KALLSYMS */
#ifdef CONFIG_SYSFS
@@ -1197,6 +1295,7 @@ static void free_module(struct module *mod)
{
/* Delete from various lists */
stop_machine_run(__unlink_module, mod, NR_CPUS);
+ remove_notes_attrs(mod);
remove_sect_attrs(mod);
mod_kobject_remove(mod);
@@ -1249,14 +1348,14 @@ static int verify_export_symbols(struct module *mod)
const unsigned long *crc;
for (i = 0; i < mod->num_syms; i++)
- if (__find_symbol(mod->syms[i].name, &owner, &crc, 1)) {
+ if (__find_symbol(mod->syms[i].name, &owner, &crc, 1)) {
name = mod->syms[i].name;
ret = -ENOEXEC;
goto dup;
}
for (i = 0; i < mod->num_gpl_syms; i++)
- if (__find_symbol(mod->gpl_syms[i].name, &owner, &crc, 1)) {
+ if (__find_symbol(mod->gpl_syms[i].name, &owner, &crc, 1)) {
name = mod->gpl_syms[i].name;
ret = -ENOEXEC;
goto dup;
@@ -1574,6 +1673,8 @@ static struct module *load_module(void __user *umod,
unsigned int unusedcrcindex;
unsigned int unusedgplindex;
unsigned int unusedgplcrcindex;
+ unsigned int markersindex;
+ unsigned int markersstringsindex;
struct module *mod;
long err = 0;
void *percpu = NULL, *ptr = NULL; /* Stops spurious gcc warning */
@@ -1782,7 +1883,8 @@ static struct module *load_module(void __user *umod,
module_unload_init(mod);
/* Initialize kobject, so we can reference it. */
- if (mod_sysfs_init(mod) != 0)
+ err = mod_sysfs_init(mod);
+ if (err)
goto cleanup;
/* Set up license info based on the info section */
@@ -1829,7 +1931,7 @@ static struct module *load_module(void __user *umod,
mod->unused_crcs = (void *)sechdrs[unusedgplcrcindex].sh_addr;
#ifdef CONFIG_MODVERSIONS
- if ((mod->num_syms && !crcindex) ||
+ if ((mod->num_syms && !crcindex) ||
(mod->num_gpl_syms && !gplcrcindex) ||
(mod->num_gpl_future_syms && !gplfuturecrcindex) ||
(mod->num_unused_syms && !unusedcrcindex) ||
@@ -1839,6 +1941,9 @@ static struct module *load_module(void __user *umod,
add_taint_module(mod, TAINT_FORCED_MODULE);
}
#endif
+ markersindex = find_sec(hdr, sechdrs, secstrings, "__markers");
+ markersstringsindex = find_sec(hdr, sechdrs, secstrings,
+ "__markers_strings");
/* Now do relocations. */
for (i = 1; i < hdr->e_shnum; i++) {
@@ -1861,6 +1966,11 @@ static struct module *load_module(void __user *umod,
if (err < 0)
goto cleanup;
}
+#ifdef CONFIG_MARKERS
+ mod->markers = (void *)sechdrs[markersindex].sh_addr;
+ mod->num_markers =
+ sechdrs[markersindex].sh_size / sizeof(*mod->markers);
+#endif
/* Find duplicate symbols */
err = verify_export_symbols(mod);
@@ -1879,6 +1989,11 @@ static struct module *load_module(void __user *umod,
add_kallsyms(mod, sechdrs, symindex, strindex, secstrings);
+#ifdef CONFIG_MARKERS
+ if (!mod->taints)
+ marker_update_probe_range(mod->markers,
+ mod->markers + mod->num_markers, NULL, NULL);
+#endif
err = module_finalize(hdr, sechdrs, mod);
if (err < 0)
goto cleanup;
@@ -1916,7 +2031,7 @@ static struct module *load_module(void __user *umod,
if (err < 0)
goto arch_cleanup;
- err = mod_sysfs_setup(mod,
+ err = mod_sysfs_setup(mod,
(struct kernel_param *)
sechdrs[setupindex].sh_addr,
sechdrs[setupindex].sh_size
@@ -1924,11 +2039,12 @@ static struct module *load_module(void __user *umod,
if (err < 0)
goto arch_cleanup;
add_sect_attrs(mod, hdr->e_shnum, secstrings, sechdrs);
+ add_notes_attrs(mod, hdr->e_shnum, secstrings, sechdrs);
/* Size of section 0 is 0, so this works well if no unwind info. */
mod->unwind_info = unwind_add_table(mod,
- (void *)sechdrs[unwindex].sh_addr,
- sechdrs[unwindex].sh_size);
+ (void *)sechdrs[unwindex].sh_addr,
+ sechdrs[unwindex].sh_size);
/* Get rid of temporary copy */
vfree(hdr);
@@ -2011,15 +2127,10 @@ sys_init_module(void __user *umod,
buggy refcounters. */
mod->state = MODULE_STATE_GOING;
synchronize_sched();
- if (mod->unsafe)
- printk(KERN_ERR "%s: module is now stuck!\n",
- mod->name);
- else {
- module_put(mod);
- mutex_lock(&module_mutex);
- free_module(mod);
- mutex_unlock(&module_mutex);
- }
+ module_put(mod);
+ mutex_lock(&module_mutex);
+ free_module(mod);
+ mutex_unlock(&module_mutex);
return ret;
}
@@ -2050,7 +2161,7 @@ static inline int within(unsigned long addr, void *start, unsigned long size)
*/
static inline int is_arm_mapping_symbol(const char *str)
{
- return str[0] == '$' && strchr("atd", str[1])
+ return str[0] == '$' && strchr("atd", str[1])
&& (str[2] == '\0' || str[2] == '.');
}
@@ -2065,11 +2176,11 @@ static const char *get_ksymbol(struct module *mod,
/* At worse, next value is at end of module */
if (within(addr, mod->module_init, mod->init_size))
nextval = (unsigned long)mod->module_init+mod->init_text_size;
- else
+ else
nextval = (unsigned long)mod->module_core+mod->core_text_size;
/* Scan for closest preceeding symbol, and next symbol. (ELF
- starts real symbols at 1). */
+ starts real symbols at 1). */
for (i = 1; i < mod->num_symtab; i++) {
if (mod->symtab[i].st_shndx == SHN_UNDEF)
continue;
@@ -2311,7 +2422,7 @@ const struct exception_table_entry *search_module_extables(unsigned long addr)
list_for_each_entry(mod, &modules, list) {
if (mod->num_exentries == 0)
continue;
-
+
e = search_extable(mod->extable,
mod->extable + mod->num_exentries - 1,
addr);
@@ -2321,7 +2432,7 @@ const struct exception_table_entry *search_module_extables(unsigned long addr)
preempt_enable();
/* Now, if we found one, we are running inside it now, hence
- we cannot unload the module, hence no refcnt needed. */
+ we cannot unload the module, hence no refcnt needed. */
return e;
}
@@ -2474,3 +2585,18 @@ EXPORT_SYMBOL(module_remove_driver);
void struct_module(struct module *mod) { return; }
EXPORT_SYMBOL(struct_module);
#endif
+
+#ifdef CONFIG_MARKERS
+void module_update_markers(struct module *probe_module, int *refcount)
+{
+ struct module *mod;
+
+ mutex_lock(&module_mutex);
+ list_for_each_entry(mod, &modules, list)
+ if (!mod->taints)
+ marker_update_probe_range(mod->markers,
+ mod->markers + mod->num_markers,
+ probe_module, refcount);
+ mutex_unlock(&module_mutex);
+}
+#endif
diff --git a/kernel/mutex.c b/kernel/mutex.c
index 691b86564dd9..d7fe50cc556f 100644
--- a/kernel/mutex.c
+++ b/kernel/mutex.c
@@ -51,6 +51,7 @@ __mutex_init(struct mutex *lock, const char *name, struct lock_class_key *key)
EXPORT_SYMBOL(__mutex_init);
+#ifndef CONFIG_DEBUG_LOCK_ALLOC
/*
* We split the mutex lock/unlock logic into separate fastpath and
* slowpath functions, to reduce the register pressure on the fastpath.
@@ -92,6 +93,7 @@ void inline fastcall __sched mutex_lock(struct mutex *lock)
}
EXPORT_SYMBOL(mutex_lock);
+#endif
static void fastcall noinline __sched
__mutex_unlock_slowpath(atomic_t *lock_count);
@@ -122,7 +124,8 @@ EXPORT_SYMBOL(mutex_unlock);
* Lock a mutex (possibly interruptible), slowpath:
*/
static inline int __sched
-__mutex_lock_common(struct mutex *lock, long state, unsigned int subclass)
+__mutex_lock_common(struct mutex *lock, long state, unsigned int subclass,
+ unsigned long ip)
{
struct task_struct *task = current;
struct mutex_waiter waiter;
@@ -132,7 +135,7 @@ __mutex_lock_common(struct mutex *lock, long state, unsigned int subclass)
spin_lock_mutex(&lock->wait_lock, flags);
debug_mutex_lock_common(lock, &waiter);
- mutex_acquire(&lock->dep_map, subclass, 0, _RET_IP_);
+ mutex_acquire(&lock->dep_map, subclass, 0, ip);
debug_mutex_add_waiter(lock, &waiter, task_thread_info(task));
/* add waiting tasks to the end of the waitqueue (FIFO): */
@@ -143,7 +146,7 @@ __mutex_lock_common(struct mutex *lock, long state, unsigned int subclass)
if (old_val == 1)
goto done;
- lock_contended(&lock->dep_map, _RET_IP_);
+ lock_contended(&lock->dep_map, ip);
for (;;) {
/*
@@ -166,7 +169,7 @@ __mutex_lock_common(struct mutex *lock, long state, unsigned int subclass)
if (unlikely(state == TASK_INTERRUPTIBLE &&
signal_pending(task))) {
mutex_remove_waiter(lock, &waiter, task_thread_info(task));
- mutex_release(&lock->dep_map, 1, _RET_IP_);
+ mutex_release(&lock->dep_map, 1, ip);
spin_unlock_mutex(&lock->wait_lock, flags);
debug_mutex_free_waiter(&waiter);
@@ -197,20 +200,12 @@ done:
return 0;
}
-static void fastcall noinline __sched
-__mutex_lock_slowpath(atomic_t *lock_count)
-{
- struct mutex *lock = container_of(lock_count, struct mutex, count);
-
- __mutex_lock_common(lock, TASK_UNINTERRUPTIBLE, 0);
-}
-
#ifdef CONFIG_DEBUG_LOCK_ALLOC
void __sched
mutex_lock_nested(struct mutex *lock, unsigned int subclass)
{
might_sleep();
- __mutex_lock_common(lock, TASK_UNINTERRUPTIBLE, subclass);
+ __mutex_lock_common(lock, TASK_UNINTERRUPTIBLE, subclass, _RET_IP_);
}
EXPORT_SYMBOL_GPL(mutex_lock_nested);
@@ -219,7 +214,7 @@ int __sched
mutex_lock_interruptible_nested(struct mutex *lock, unsigned int subclass)
{
might_sleep();
- return __mutex_lock_common(lock, TASK_INTERRUPTIBLE, subclass);
+ return __mutex_lock_common(lock, TASK_INTERRUPTIBLE, subclass, _RET_IP_);
}
EXPORT_SYMBOL_GPL(mutex_lock_interruptible_nested);
@@ -271,6 +266,7 @@ __mutex_unlock_slowpath(atomic_t *lock_count)
__mutex_unlock_common_slowpath(lock_count, 1);
}
+#ifndef CONFIG_DEBUG_LOCK_ALLOC
/*
* Here come the less common (and hence less performance-critical) APIs:
* mutex_lock_interruptible() and mutex_trylock().
@@ -298,13 +294,22 @@ int fastcall __sched mutex_lock_interruptible(struct mutex *lock)
EXPORT_SYMBOL(mutex_lock_interruptible);
+static void fastcall noinline __sched
+__mutex_lock_slowpath(atomic_t *lock_count)
+{
+ struct mutex *lock = container_of(lock_count, struct mutex, count);
+
+ __mutex_lock_common(lock, TASK_UNINTERRUPTIBLE, 0, _RET_IP_);
+}
+
static int fastcall noinline __sched
__mutex_lock_interruptible_slowpath(atomic_t *lock_count)
{
struct mutex *lock = container_of(lock_count, struct mutex, count);
- return __mutex_lock_common(lock, TASK_INTERRUPTIBLE, 0);
+ return __mutex_lock_common(lock, TASK_INTERRUPTIBLE, 0, _RET_IP_);
}
+#endif
/*
* Spinlock based trylock, we take the spinlock and check whether we
diff --git a/kernel/notifier.c b/kernel/notifier.c
new file mode 100644
index 000000000000..4253f472f060
--- /dev/null
+++ b/kernel/notifier.c
@@ -0,0 +1,539 @@
+#include <linux/kdebug.h>
+#include <linux/kprobes.h>
+#include <linux/module.h>
+#include <linux/notifier.h>
+#include <linux/rcupdate.h>
+#include <linux/vmalloc.h>
+
+/*
+ * Notifier list for kernel code which wants to be called
+ * at shutdown. This is used to stop any idling DMA operations
+ * and the like.
+ */
+BLOCKING_NOTIFIER_HEAD(reboot_notifier_list);
+
+/*
+ * Notifier chain core routines. The exported routines below
+ * are layered on top of these, with appropriate locking added.
+ */
+
+static int notifier_chain_register(struct notifier_block **nl,
+ struct notifier_block *n)
+{
+ while ((*nl) != NULL) {
+ if (n->priority > (*nl)->priority)
+ break;
+ nl = &((*nl)->next);
+ }
+ n->next = *nl;
+ rcu_assign_pointer(*nl, n);
+ return 0;
+}
+
+static int notifier_chain_unregister(struct notifier_block **nl,
+ struct notifier_block *n)
+{
+ while ((*nl) != NULL) {
+ if ((*nl) == n) {
+ rcu_assign_pointer(*nl, n->next);
+ return 0;
+ }
+ nl = &((*nl)->next);
+ }
+ return -ENOENT;
+}
+
+/**
+ * notifier_call_chain - Informs the registered notifiers about an event.
+ * @nl: Pointer to head of the blocking notifier chain
+ * @val: Value passed unmodified to notifier function
+ * @v: Pointer passed unmodified to notifier function
+ * @nr_to_call: Number of notifier functions to be called. Don't care
+ * value of this parameter is -1.
+ * @nr_calls: Records the number of notifications sent. Don't care
+ * value of this field is NULL.
+ * @returns: notifier_call_chain returns the value returned by the
+ * last notifier function called.
+ */
+static int __kprobes notifier_call_chain(struct notifier_block **nl,
+ unsigned long val, void *v,
+ int nr_to_call, int *nr_calls)
+{
+ int ret = NOTIFY_DONE;
+ struct notifier_block *nb, *next_nb;
+
+ nb = rcu_dereference(*nl);
+
+ while (nb && nr_to_call) {
+ next_nb = rcu_dereference(nb->next);
+ ret = nb->notifier_call(nb, val, v);
+
+ if (nr_calls)
+ (*nr_calls)++;
+
+ if ((ret & NOTIFY_STOP_MASK) == NOTIFY_STOP_MASK)
+ break;
+ nb = next_nb;
+ nr_to_call--;
+ }
+ return ret;
+}
+
+/*
+ * Atomic notifier chain routines. Registration and unregistration
+ * use a spinlock, and call_chain is synchronized by RCU (no locks).
+ */
+
+/**
+ * atomic_notifier_chain_register - Add notifier to an atomic notifier chain
+ * @nh: Pointer to head of the atomic notifier chain
+ * @n: New entry in notifier chain
+ *
+ * Adds a notifier to an atomic notifier chain.
+ *
+ * Currently always returns zero.
+ */
+int atomic_notifier_chain_register(struct atomic_notifier_head *nh,
+ struct notifier_block *n)
+{
+ unsigned long flags;
+ int ret;
+
+ spin_lock_irqsave(&nh->lock, flags);
+ ret = notifier_chain_register(&nh->head, n);
+ spin_unlock_irqrestore(&nh->lock, flags);
+ return ret;
+}
+EXPORT_SYMBOL_GPL(atomic_notifier_chain_register);
+
+/**
+ * atomic_notifier_chain_unregister - Remove notifier from an atomic notifier chain
+ * @nh: Pointer to head of the atomic notifier chain
+ * @n: Entry to remove from notifier chain
+ *
+ * Removes a notifier from an atomic notifier chain.
+ *
+ * Returns zero on success or %-ENOENT on failure.
+ */
+int atomic_notifier_chain_unregister(struct atomic_notifier_head *nh,
+ struct notifier_block *n)
+{
+ unsigned long flags;
+ int ret;
+
+ spin_lock_irqsave(&nh->lock, flags);
+ ret = notifier_chain_unregister(&nh->head, n);
+ spin_unlock_irqrestore(&nh->lock, flags);
+ synchronize_rcu();
+ return ret;
+}
+EXPORT_SYMBOL_GPL(atomic_notifier_chain_unregister);
+
+/**
+ * __atomic_notifier_call_chain - Call functions in an atomic notifier chain
+ * @nh: Pointer to head of the atomic notifier chain
+ * @val: Value passed unmodified to notifier function
+ * @v: Pointer passed unmodified to notifier function
+ * @nr_to_call: See the comment for notifier_call_chain.
+ * @nr_calls: See the comment for notifier_call_chain.
+ *
+ * Calls each function in a notifier chain in turn. The functions
+ * run in an atomic context, so they must not block.
+ * This routine uses RCU to synchronize with changes to the chain.
+ *
+ * If the return value of the notifier can be and'ed
+ * with %NOTIFY_STOP_MASK then atomic_notifier_call_chain()
+ * will return immediately, with the return value of
+ * the notifier function which halted execution.
+ * Otherwise the return value is the return value
+ * of the last notifier function called.
+ */
+int __kprobes __atomic_notifier_call_chain(struct atomic_notifier_head *nh,
+ unsigned long val, void *v,
+ int nr_to_call, int *nr_calls)
+{
+ int ret;
+
+ rcu_read_lock();
+ ret = notifier_call_chain(&nh->head, val, v, nr_to_call, nr_calls);
+ rcu_read_unlock();
+ return ret;
+}
+EXPORT_SYMBOL_GPL(__atomic_notifier_call_chain);
+
+int __kprobes atomic_notifier_call_chain(struct atomic_notifier_head *nh,
+ unsigned long val, void *v)
+{
+ return __atomic_notifier_call_chain(nh, val, v, -1, NULL);
+}
+EXPORT_SYMBOL_GPL(atomic_notifier_call_chain);
+
+/*
+ * Blocking notifier chain routines. All access to the chain is
+ * synchronized by an rwsem.
+ */
+
+/**
+ * blocking_notifier_chain_register - Add notifier to a blocking notifier chain
+ * @nh: Pointer to head of the blocking notifier chain
+ * @n: New entry in notifier chain
+ *
+ * Adds a notifier to a blocking notifier chain.
+ * Must be called in process context.
+ *
+ * Currently always returns zero.
+ */
+int blocking_notifier_chain_register(struct blocking_notifier_head *nh,
+ struct notifier_block *n)
+{
+ int ret;
+
+ /*
+ * This code gets used during boot-up, when task switching is
+ * not yet working and interrupts must remain disabled. At
+ * such times we must not call down_write().
+ */
+ if (unlikely(system_state == SYSTEM_BOOTING))
+ return notifier_chain_register(&nh->head, n);
+
+ down_write(&nh->rwsem);
+ ret = notifier_chain_register(&nh->head, n);
+ up_write(&nh->rwsem);
+ return ret;
+}
+EXPORT_SYMBOL_GPL(blocking_notifier_chain_register);
+
+/**
+ * blocking_notifier_chain_unregister - Remove notifier from a blocking notifier chain
+ * @nh: Pointer to head of the blocking notifier chain
+ * @n: Entry to remove from notifier chain
+ *
+ * Removes a notifier from a blocking notifier chain.
+ * Must be called from process context.
+ *
+ * Returns zero on success or %-ENOENT on failure.
+ */
+int blocking_notifier_chain_unregister(struct blocking_notifier_head *nh,
+ struct notifier_block *n)
+{
+ int ret;
+
+ /*
+ * This code gets used during boot-up, when task switching is
+ * not yet working and interrupts must remain disabled. At
+ * such times we must not call down_write().
+ */
+ if (unlikely(system_state == SYSTEM_BOOTING))
+ return notifier_chain_unregister(&nh->head, n);
+
+ down_write(&nh->rwsem);
+ ret = notifier_chain_unregister(&nh->head, n);
+ up_write(&nh->rwsem);
+ return ret;
+}
+EXPORT_SYMBOL_GPL(blocking_notifier_chain_unregister);
+
+/**
+ * __blocking_notifier_call_chain - Call functions in a blocking notifier chain
+ * @nh: Pointer to head of the blocking notifier chain
+ * @val: Value passed unmodified to notifier function
+ * @v: Pointer passed unmodified to notifier function
+ * @nr_to_call: See comment for notifier_call_chain.
+ * @nr_calls: See comment for notifier_call_chain.
+ *
+ * Calls each function in a notifier chain in turn. The functions
+ * run in a process context, so they are allowed to block.
+ *
+ * If the return value of the notifier can be and'ed
+ * with %NOTIFY_STOP_MASK then blocking_notifier_call_chain()
+ * will return immediately, with the return value of
+ * the notifier function which halted execution.
+ * Otherwise the return value is the return value
+ * of the last notifier function called.
+ */
+int __blocking_notifier_call_chain(struct blocking_notifier_head *nh,
+ unsigned long val, void *v,
+ int nr_to_call, int *nr_calls)
+{
+ int ret = NOTIFY_DONE;
+
+ /*
+ * We check the head outside the lock, but if this access is
+ * racy then it does not matter what the result of the test
+ * is, we re-check the list after having taken the lock anyway:
+ */
+ if (rcu_dereference(nh->head)) {
+ down_read(&nh->rwsem);
+ ret = notifier_call_chain(&nh->head, val, v, nr_to_call,
+ nr_calls);
+ up_read(&nh->rwsem);
+ }
+ return ret;
+}
+EXPORT_SYMBOL_GPL(__blocking_notifier_call_chain);
+
+int blocking_notifier_call_chain(struct blocking_notifier_head *nh,
+ unsigned long val, void *v)
+{
+ return __blocking_notifier_call_chain(nh, val, v, -1, NULL);
+}
+EXPORT_SYMBOL_GPL(blocking_notifier_call_chain);
+
+/*
+ * Raw notifier chain routines. There is no protection;
+ * the caller must provide it. Use at your own risk!
+ */
+
+/**
+ * raw_notifier_chain_register - Add notifier to a raw notifier chain
+ * @nh: Pointer to head of the raw notifier chain
+ * @n: New entry in notifier chain
+ *
+ * Adds a notifier to a raw notifier chain.
+ * All locking must be provided by the caller.
+ *
+ * Currently always returns zero.
+ */
+int raw_notifier_chain_register(struct raw_notifier_head *nh,
+ struct notifier_block *n)
+{
+ return notifier_chain_register(&nh->head, n);
+}
+EXPORT_SYMBOL_GPL(raw_notifier_chain_register);
+
+/**
+ * raw_notifier_chain_unregister - Remove notifier from a raw notifier chain
+ * @nh: Pointer to head of the raw notifier chain
+ * @n: Entry to remove from notifier chain
+ *
+ * Removes a notifier from a raw notifier chain.
+ * All locking must be provided by the caller.
+ *
+ * Returns zero on success or %-ENOENT on failure.
+ */
+int raw_notifier_chain_unregister(struct raw_notifier_head *nh,
+ struct notifier_block *n)
+{
+ return notifier_chain_unregister(&nh->head, n);
+}
+EXPORT_SYMBOL_GPL(raw_notifier_chain_unregister);
+
+/**
+ * __raw_notifier_call_chain - Call functions in a raw notifier chain
+ * @nh: Pointer to head of the raw notifier chain
+ * @val: Value passed unmodified to notifier function
+ * @v: Pointer passed unmodified to notifier function
+ * @nr_to_call: See comment for notifier_call_chain.
+ * @nr_calls: See comment for notifier_call_chain
+ *
+ * Calls each function in a notifier chain in turn. The functions
+ * run in an undefined context.
+ * All locking must be provided by the caller.
+ *
+ * If the return value of the notifier can be and'ed
+ * with %NOTIFY_STOP_MASK then raw_notifier_call_chain()
+ * will return immediately, with the return value of
+ * the notifier function which halted execution.
+ * Otherwise the return value is the return value
+ * of the last notifier function called.
+ */
+int __raw_notifier_call_chain(struct raw_notifier_head *nh,
+ unsigned long val, void *v,
+ int nr_to_call, int *nr_calls)
+{
+ return notifier_call_chain(&nh->head, val, v, nr_to_call, nr_calls);
+}
+EXPORT_SYMBOL_GPL(__raw_notifier_call_chain);
+
+int raw_notifier_call_chain(struct raw_notifier_head *nh,
+ unsigned long val, void *v)
+{
+ return __raw_notifier_call_chain(nh, val, v, -1, NULL);
+}
+EXPORT_SYMBOL_GPL(raw_notifier_call_chain);
+
+/*
+ * SRCU notifier chain routines. Registration and unregistration
+ * use a mutex, and call_chain is synchronized by SRCU (no locks).
+ */
+
+/**
+ * srcu_notifier_chain_register - Add notifier to an SRCU notifier chain
+ * @nh: Pointer to head of the SRCU notifier chain
+ * @n: New entry in notifier chain
+ *
+ * Adds a notifier to an SRCU notifier chain.
+ * Must be called in process context.
+ *
+ * Currently always returns zero.
+ */
+int srcu_notifier_chain_register(struct srcu_notifier_head *nh,
+ struct notifier_block *n)
+{
+ int ret;
+
+ /*
+ * This code gets used during boot-up, when task switching is
+ * not yet working and interrupts must remain disabled. At
+ * such times we must not call mutex_lock().
+ */
+ if (unlikely(system_state == SYSTEM_BOOTING))
+ return notifier_chain_register(&nh->head, n);
+
+ mutex_lock(&nh->mutex);
+ ret = notifier_chain_register(&nh->head, n);
+ mutex_unlock(&nh->mutex);
+ return ret;
+}
+EXPORT_SYMBOL_GPL(srcu_notifier_chain_register);
+
+/**
+ * srcu_notifier_chain_unregister - Remove notifier from an SRCU notifier chain
+ * @nh: Pointer to head of the SRCU notifier chain
+ * @n: Entry to remove from notifier chain
+ *
+ * Removes a notifier from an SRCU notifier chain.
+ * Must be called from process context.
+ *
+ * Returns zero on success or %-ENOENT on failure.
+ */
+int srcu_notifier_chain_unregister(struct srcu_notifier_head *nh,
+ struct notifier_block *n)
+{
+ int ret;
+
+ /*
+ * This code gets used during boot-up, when task switching is
+ * not yet working and interrupts must remain disabled. At
+ * such times we must not call mutex_lock().
+ */
+ if (unlikely(system_state == SYSTEM_BOOTING))
+ return notifier_chain_unregister(&nh->head, n);
+
+ mutex_lock(&nh->mutex);
+ ret = notifier_chain_unregister(&nh->head, n);
+ mutex_unlock(&nh->mutex);
+ synchronize_srcu(&nh->srcu);
+ return ret;
+}
+EXPORT_SYMBOL_GPL(srcu_notifier_chain_unregister);
+
+/**
+ * __srcu_notifier_call_chain - Call functions in an SRCU notifier chain
+ * @nh: Pointer to head of the SRCU notifier chain
+ * @val: Value passed unmodified to notifier function
+ * @v: Pointer passed unmodified to notifier function
+ * @nr_to_call: See comment for notifier_call_chain.
+ * @nr_calls: See comment for notifier_call_chain
+ *
+ * Calls each function in a notifier chain in turn. The functions
+ * run in a process context, so they are allowed to block.
+ *
+ * If the return value of the notifier can be and'ed
+ * with %NOTIFY_STOP_MASK then srcu_notifier_call_chain()
+ * will return immediately, with the return value of
+ * the notifier function which halted execution.
+ * Otherwise the return value is the return value
+ * of the last notifier function called.
+ */
+int __srcu_notifier_call_chain(struct srcu_notifier_head *nh,
+ unsigned long val, void *v,
+ int nr_to_call, int *nr_calls)
+{
+ int ret;
+ int idx;
+
+ idx = srcu_read_lock(&nh->srcu);
+ ret = notifier_call_chain(&nh->head, val, v, nr_to_call, nr_calls);
+ srcu_read_unlock(&nh->srcu, idx);
+ return ret;
+}
+EXPORT_SYMBOL_GPL(__srcu_notifier_call_chain);
+
+int srcu_notifier_call_chain(struct srcu_notifier_head *nh,
+ unsigned long val, void *v)
+{
+ return __srcu_notifier_call_chain(nh, val, v, -1, NULL);
+}
+EXPORT_SYMBOL_GPL(srcu_notifier_call_chain);
+
+/**
+ * srcu_init_notifier_head - Initialize an SRCU notifier head
+ * @nh: Pointer to head of the srcu notifier chain
+ *
+ * Unlike other sorts of notifier heads, SRCU notifier heads require
+ * dynamic initialization. Be sure to call this routine before
+ * calling any of the other SRCU notifier routines for this head.
+ *
+ * If an SRCU notifier head is deallocated, it must first be cleaned
+ * up by calling srcu_cleanup_notifier_head(). Otherwise the head's
+ * per-cpu data (used by the SRCU mechanism) will leak.
+ */
+void srcu_init_notifier_head(struct srcu_notifier_head *nh)
+{
+ mutex_init(&nh->mutex);
+ if (init_srcu_struct(&nh->srcu) < 0)
+ BUG();
+ nh->head = NULL;
+}
+EXPORT_SYMBOL_GPL(srcu_init_notifier_head);
+
+/**
+ * register_reboot_notifier - Register function to be called at reboot time
+ * @nb: Info about notifier function to be called
+ *
+ * Registers a function with the list of functions
+ * to be called at reboot time.
+ *
+ * Currently always returns zero, as blocking_notifier_chain_register()
+ * always returns zero.
+ */
+int register_reboot_notifier(struct notifier_block *nb)
+{
+ return blocking_notifier_chain_register(&reboot_notifier_list, nb);
+}
+EXPORT_SYMBOL(register_reboot_notifier);
+
+/**
+ * unregister_reboot_notifier - Unregister previously registered reboot notifier
+ * @nb: Hook to be unregistered
+ *
+ * Unregisters a previously registered reboot
+ * notifier function.
+ *
+ * Returns zero on success, or %-ENOENT on failure.
+ */
+int unregister_reboot_notifier(struct notifier_block *nb)
+{
+ return blocking_notifier_chain_unregister(&reboot_notifier_list, nb);
+}
+EXPORT_SYMBOL(unregister_reboot_notifier);
+
+static ATOMIC_NOTIFIER_HEAD(die_chain);
+
+int notify_die(enum die_val val, const char *str,
+ struct pt_regs *regs, long err, int trap, int sig)
+{
+ struct die_args args = {
+ .regs = regs,
+ .str = str,
+ .err = err,
+ .trapnr = trap,
+ .signr = sig,
+
+ };
+ return atomic_notifier_call_chain(&die_chain, val, &args);
+}
+
+int register_die_notifier(struct notifier_block *nb)
+{
+ vmalloc_sync_all();
+ return atomic_notifier_chain_register(&die_chain, nb);
+}
+EXPORT_SYMBOL_GPL(register_die_notifier);
+
+int unregister_die_notifier(struct notifier_block *nb)
+{
+ return atomic_notifier_chain_unregister(&die_chain, nb);
+}
+EXPORT_SYMBOL_GPL(unregister_die_notifier);
diff --git a/kernel/ns_cgroup.c b/kernel/ns_cgroup.c
new file mode 100644
index 000000000000..aead4d69f62b
--- /dev/null
+++ b/kernel/ns_cgroup.c
@@ -0,0 +1,100 @@
+/*
+ * ns_cgroup.c - namespace cgroup subsystem
+ *
+ * Copyright 2006, 2007 IBM Corp
+ */
+
+#include <linux/module.h>
+#include <linux/cgroup.h>
+#include <linux/fs.h>
+
+struct ns_cgroup {
+ struct cgroup_subsys_state css;
+ spinlock_t lock;
+};
+
+struct cgroup_subsys ns_subsys;
+
+static inline struct ns_cgroup *cgroup_to_ns(
+ struct cgroup *cgroup)
+{
+ return container_of(cgroup_subsys_state(cgroup, ns_subsys_id),
+ struct ns_cgroup, css);
+}
+
+int ns_cgroup_clone(struct task_struct *task)
+{
+ return cgroup_clone(task, &ns_subsys);
+}
+
+/*
+ * Rules:
+ * 1. you can only enter a cgroup which is a child of your current
+ * cgroup
+ * 2. you can only place another process into a cgroup if
+ * a. you have CAP_SYS_ADMIN
+ * b. your cgroup is an ancestor of task's destination cgroup
+ * (hence either you are in the same cgroup as task, or in an
+ * ancestor cgroup thereof)
+ */
+static int ns_can_attach(struct cgroup_subsys *ss,
+ struct cgroup *new_cgroup, struct task_struct *task)
+{
+ struct cgroup *orig;
+
+ if (current != task) {
+ if (!capable(CAP_SYS_ADMIN))
+ return -EPERM;
+
+ if (!cgroup_is_descendant(new_cgroup))
+ return -EPERM;
+ }
+
+ if (atomic_read(&new_cgroup->count) != 0)
+ return -EPERM;
+
+ orig = task_cgroup(task, ns_subsys_id);
+ if (orig && orig != new_cgroup->parent)
+ return -EPERM;
+
+ return 0;
+}
+
+/*
+ * Rules: you can only create a cgroup if
+ * 1. you are capable(CAP_SYS_ADMIN)
+ * 2. the target cgroup is a descendant of your own cgroup
+ */
+static struct cgroup_subsys_state *ns_create(struct cgroup_subsys *ss,
+ struct cgroup *cgroup)
+{
+ struct ns_cgroup *ns_cgroup;
+
+ if (!capable(CAP_SYS_ADMIN))
+ return ERR_PTR(-EPERM);
+ if (!cgroup_is_descendant(cgroup))
+ return ERR_PTR(-EPERM);
+
+ ns_cgroup = kzalloc(sizeof(*ns_cgroup), GFP_KERNEL);
+ if (!ns_cgroup)
+ return ERR_PTR(-ENOMEM);
+ spin_lock_init(&ns_cgroup->lock);
+ return &ns_cgroup->css;
+}
+
+static void ns_destroy(struct cgroup_subsys *ss,
+ struct cgroup *cgroup)
+{
+ struct ns_cgroup *ns_cgroup;
+
+ ns_cgroup = cgroup_to_ns(cgroup);
+ kfree(ns_cgroup);
+}
+
+struct cgroup_subsys ns_subsys = {
+ .name = "ns",
+ .can_attach = ns_can_attach,
+ .create = ns_create,
+ .destroy = ns_destroy,
+ .subsys_id = ns_subsys_id,
+};
diff --git a/kernel/nsproxy.c b/kernel/nsproxy.c
index a4fb7d46971f..79f871bc0ef4 100644
--- a/kernel/nsproxy.c
+++ b/kernel/nsproxy.c
@@ -20,24 +20,12 @@
#include <linux/mnt_namespace.h>
#include <linux/utsname.h>
#include <linux/pid_namespace.h>
+#include <net/net_namespace.h>
static struct kmem_cache *nsproxy_cachep;
struct nsproxy init_nsproxy = INIT_NSPROXY(init_nsproxy);
-static inline void get_nsproxy(struct nsproxy *ns)
-{
- atomic_inc(&ns->count);
-}
-
-void get_task_namespaces(struct task_struct *tsk)
-{
- struct nsproxy *ns = tsk->nsproxy;
- if (ns) {
- get_nsproxy(ns);
- }
-}
-
/*
* creates a copy of "orig" with refcount 1.
*/
@@ -86,7 +74,7 @@ static struct nsproxy *create_new_namespaces(unsigned long flags,
goto out_ipc;
}
- new_nsp->pid_ns = copy_pid_ns(flags, tsk->nsproxy->pid_ns);
+ new_nsp->pid_ns = copy_pid_ns(flags, task_active_pid_ns(tsk));
if (IS_ERR(new_nsp->pid_ns)) {
err = PTR_ERR(new_nsp->pid_ns);
goto out_pid;
@@ -98,8 +86,17 @@ static struct nsproxy *create_new_namespaces(unsigned long flags,
goto out_user;
}
+ new_nsp->net_ns = copy_net_ns(flags, tsk->nsproxy->net_ns);
+ if (IS_ERR(new_nsp->net_ns)) {
+ err = PTR_ERR(new_nsp->net_ns);
+ goto out_net;
+ }
+
return new_nsp;
+out_net:
+ if (new_nsp->user_ns)
+ put_user_ns(new_nsp->user_ns);
out_user:
if (new_nsp->pid_ns)
put_pid_ns(new_nsp->pid_ns);
@@ -132,7 +129,8 @@ int copy_namespaces(unsigned long flags, struct task_struct *tsk)
get_nsproxy(old_ns);
- if (!(flags & (CLONE_NEWNS | CLONE_NEWUTS | CLONE_NEWIPC | CLONE_NEWUSER)))
+ if (!(flags & (CLONE_NEWNS | CLONE_NEWUTS | CLONE_NEWIPC |
+ CLONE_NEWUSER | CLONE_NEWPID | CLONE_NEWNET)))
return 0;
if (!capable(CAP_SYS_ADMIN)) {
@@ -146,7 +144,14 @@ int copy_namespaces(unsigned long flags, struct task_struct *tsk)
goto out;
}
+ err = ns_cgroup_clone(tsk);
+ if (err) {
+ put_nsproxy(new_ns);
+ goto out;
+ }
+
tsk->nsproxy = new_ns;
+
out:
put_nsproxy(old_ns);
return err;
@@ -164,6 +169,7 @@ void free_nsproxy(struct nsproxy *ns)
put_pid_ns(ns->pid_ns);
if (ns->user_ns)
put_user_ns(ns->user_ns);
+ put_net(ns->net_ns);
kmem_cache_free(nsproxy_cachep, ns);
}
@@ -177,7 +183,7 @@ int unshare_nsproxy_namespaces(unsigned long unshare_flags,
int err = 0;
if (!(unshare_flags & (CLONE_NEWNS | CLONE_NEWUTS | CLONE_NEWIPC |
- CLONE_NEWUSER)))
+ CLONE_NEWUSER | CLONE_NEWNET)))
return 0;
if (!capable(CAP_SYS_ADMIN))
@@ -185,15 +191,49 @@ int unshare_nsproxy_namespaces(unsigned long unshare_flags,
*new_nsp = create_new_namespaces(unshare_flags, current,
new_fs ? new_fs : current->fs);
- if (IS_ERR(*new_nsp))
+ if (IS_ERR(*new_nsp)) {
err = PTR_ERR(*new_nsp);
+ goto out;
+ }
+
+ err = ns_cgroup_clone(current);
+ if (err)
+ put_nsproxy(*new_nsp);
+
+out:
return err;
}
+void switch_task_namespaces(struct task_struct *p, struct nsproxy *new)
+{
+ struct nsproxy *ns;
+
+ might_sleep();
+
+ ns = p->nsproxy;
+
+ rcu_assign_pointer(p->nsproxy, new);
+
+ if (ns && atomic_dec_and_test(&ns->count)) {
+ /*
+ * wait for others to get what they want from this nsproxy.
+ *
+ * cannot release this nsproxy via the call_rcu() since
+ * put_mnt_ns() will want to sleep
+ */
+ synchronize_rcu();
+ free_nsproxy(ns);
+ }
+}
+
+void exit_task_namespaces(struct task_struct *p)
+{
+ switch_task_namespaces(p, NULL);
+}
+
static int __init nsproxy_cache_init(void)
{
- nsproxy_cachep = kmem_cache_create("nsproxy", sizeof(struct nsproxy),
- 0, SLAB_PANIC, NULL);
+ nsproxy_cachep = KMEM_CACHE(nsproxy, SLAB_PANIC);
return 0;
}
diff --git a/kernel/panic.c b/kernel/panic.c
index f64f4c1ac11f..3886bd8230fe 100644
--- a/kernel/panic.c
+++ b/kernel/panic.c
@@ -56,14 +56,14 @@ EXPORT_SYMBOL(panic_blink);
*
* This function never returns.
*/
-
+
NORET_TYPE void panic(const char * fmt, ...)
{
long i;
static char buf[1024];
va_list args;
#if defined(CONFIG_S390)
- unsigned long caller = (unsigned long) __builtin_return_address(0);
+ unsigned long caller = (unsigned long) __builtin_return_address(0);
#endif
/*
@@ -128,7 +128,7 @@ NORET_TYPE void panic(const char * fmt, ...)
}
#endif
#if defined(CONFIG_S390)
- disabled_wait(caller);
+ disabled_wait(caller);
#endif
local_irq_enable();
for (i = 0;;) {
@@ -154,7 +154,7 @@ EXPORT_SYMBOL(panic);
*
* The string is overwritten by the next call to print_taint().
*/
-
+
const char *print_tainted(void)
{
static char buf[20];
@@ -164,7 +164,7 @@ const char *print_tainted(void)
tainted & TAINT_FORCED_MODULE ? 'F' : ' ',
tainted & TAINT_UNSAFE_SMP ? 'S' : ' ',
tainted & TAINT_FORCED_RMMOD ? 'R' : ' ',
- tainted & TAINT_MACHINE_CHECK ? 'M' : ' ',
+ tainted & TAINT_MACHINE_CHECK ? 'M' : ' ',
tainted & TAINT_BAD_PAGE ? 'B' : ' ',
tainted & TAINT_USER ? 'U' : ' ',
tainted & TAINT_DIE ? 'D' : ' ');
diff --git a/kernel/params.c b/kernel/params.c
index 4e57732fcfb4..16f269e9ddc9 100644
--- a/kernel/params.c
+++ b/kernel/params.c
@@ -252,8 +252,9 @@ int param_get_bool(char *buffer, struct kernel_param *kp)
int param_set_invbool(const char *val, struct kernel_param *kp)
{
int boolval, ret;
- struct kernel_param dummy = { .arg = &boolval };
+ struct kernel_param dummy;
+ dummy.arg = &boolval;
ret = param_set_bool(val, &dummy);
if (ret == 0)
*(int *)kp->arg = !boolval;
@@ -262,11 +263,7 @@ int param_set_invbool(const char *val, struct kernel_param *kp)
int param_get_invbool(char *buffer, struct kernel_param *kp)
{
- int val;
- struct kernel_param dummy = { .arg = &val };
-
- val = !*(int *)kp->arg;
- return param_get_bool(buffer, &dummy);
+ return sprintf(buffer, "%c", (*(int *)kp->arg) ? 'N' : 'Y');
}
/* We break the rule and mangle the string. */
@@ -325,7 +322,7 @@ static int param_array(const char *name,
int param_array_set(const char *val, struct kernel_param *kp)
{
- struct kparam_array *arr = kp->arg;
+ const struct kparam_array *arr = kp->arr;
unsigned int temp_num;
return param_array(kp->name, val, 1, arr->max, arr->elem,
@@ -335,7 +332,7 @@ int param_array_set(const char *val, struct kernel_param *kp)
int param_array_get(char *buffer, struct kernel_param *kp)
{
int i, off, ret;
- struct kparam_array *arr = kp->arg;
+ const struct kparam_array *arr = kp->arr;
struct kernel_param p;
p = *kp;
@@ -354,7 +351,7 @@ int param_array_get(char *buffer, struct kernel_param *kp)
int param_set_copystring(const char *val, struct kernel_param *kp)
{
- struct kparam_string *kps = kp->arg;
+ const struct kparam_string *kps = kp->str;
if (!val) {
printk(KERN_ERR "%s: missing param set value\n", kp->name);
@@ -371,7 +368,7 @@ int param_set_copystring(const char *val, struct kernel_param *kp)
int param_get_string(char *buffer, struct kernel_param *kp)
{
- struct kparam_string *kps = kp->arg;
+ const struct kparam_string *kps = kp->str;
return strlcpy(buffer, kps->string, kps->maxlen);
}
@@ -595,11 +592,17 @@ static void __init param_sysfs_builtin(void)
for (i=0; i < __stop___param - __start___param; i++) {
char *dot;
+ size_t kplen;
kp = &__start___param[i];
+ kplen = strlen(kp->name);
/* We do not handle args without periods. */
- dot = memchr(kp->name, '.', MAX_KBUILD_MODNAME);
+ if (kplen > MAX_KBUILD_MODNAME) {
+ DEBUGP("kernel parameter name is too long: %s\n", kp->name);
+ continue;
+ }
+ dot = memchr(kp->name, '.', kplen);
if (!dot) {
DEBUGP("couldn't find period in %s\n", kp->name);
continue;
diff --git a/kernel/pid.c b/kernel/pid.c
index c6e3f9ffff87..d1db36b94674 100644
--- a/kernel/pid.c
+++ b/kernel/pid.c
@@ -18,6 +18,12 @@
* allocation scenario when all but one out of 1 million PIDs possible are
* allocated already: the scanning of 32 list entries and at most PAGE_SIZE
* bytes. The typical fastpath is a single successful setbit. Freeing is O(1).
+ *
+ * Pid namespaces:
+ * (C) 2007 Pavel Emelyanov <xemul@openvz.org>, OpenVZ, SWsoft Inc.
+ * (C) 2007 Sukadev Bhattiprolu <sukadev@us.ibm.com>, IBM
+ * Many thanks to Oleg Nesterov for comments and help
+ *
*/
#include <linux/mm.h>
@@ -28,12 +34,14 @@
#include <linux/hash.h>
#include <linux/pid_namespace.h>
#include <linux/init_task.h>
+#include <linux/syscalls.h>
-#define pid_hashfn(nr) hash_long((unsigned long)nr, pidhash_shift)
+#define pid_hashfn(nr, ns) \
+ hash_long((unsigned long)nr + (unsigned long)ns, pidhash_shift)
static struct hlist_head *pid_hash;
static int pidhash_shift;
-static struct kmem_cache *pid_cachep;
struct pid init_struct_pid = INIT_STRUCT_PID;
+static struct kmem_cache *pid_ns_cachep;
int pid_max = PID_MAX_DEFAULT;
@@ -68,8 +76,25 @@ struct pid_namespace init_pid_ns = {
[ 0 ... PIDMAP_ENTRIES-1] = { ATOMIC_INIT(BITS_PER_PAGE), NULL }
},
.last_pid = 0,
- .child_reaper = &init_task
+ .level = 0,
+ .child_reaper = &init_task,
};
+EXPORT_SYMBOL_GPL(init_pid_ns);
+
+int is_container_init(struct task_struct *tsk)
+{
+ int ret = 0;
+ struct pid *pid;
+
+ rcu_read_lock();
+ pid = task_pid(tsk);
+ if (pid != NULL && pid->numbers[pid->level].nr == 1)
+ ret = 1;
+ rcu_read_unlock();
+
+ return ret;
+}
+EXPORT_SYMBOL(is_container_init);
/*
* Note: disable interrupts while the pidmap_lock is held as an
@@ -176,11 +201,17 @@ static int next_pidmap(struct pid_namespace *pid_ns, int last)
fastcall void put_pid(struct pid *pid)
{
+ struct pid_namespace *ns;
+
if (!pid)
return;
+
+ ns = pid->numbers[pid->level].ns;
if ((atomic_read(&pid->count) == 1) ||
- atomic_dec_and_test(&pid->count))
- kmem_cache_free(pid_cachep, pid);
+ atomic_dec_and_test(&pid->count)) {
+ kmem_cache_free(ns->pid_cachep, pid);
+ put_pid_ns(ns);
+ }
}
EXPORT_SYMBOL_GPL(put_pid);
@@ -193,60 +224,94 @@ static void delayed_put_pid(struct rcu_head *rhp)
fastcall void free_pid(struct pid *pid)
{
/* We can be called with write_lock_irq(&tasklist_lock) held */
+ int i;
unsigned long flags;
spin_lock_irqsave(&pidmap_lock, flags);
- hlist_del_rcu(&pid->pid_chain);
+ for (i = 0; i <= pid->level; i++)
+ hlist_del_rcu(&pid->numbers[i].pid_chain);
spin_unlock_irqrestore(&pidmap_lock, flags);
- free_pidmap(&init_pid_ns, pid->nr);
+ for (i = 0; i <= pid->level; i++)
+ free_pidmap(pid->numbers[i].ns, pid->numbers[i].nr);
+
call_rcu(&pid->rcu, delayed_put_pid);
}
-struct pid *alloc_pid(void)
+struct pid *alloc_pid(struct pid_namespace *ns)
{
struct pid *pid;
enum pid_type type;
- int nr = -1;
+ int i, nr;
+ struct pid_namespace *tmp;
+ struct upid *upid;
- pid = kmem_cache_alloc(pid_cachep, GFP_KERNEL);
+ pid = kmem_cache_alloc(ns->pid_cachep, GFP_KERNEL);
if (!pid)
goto out;
- nr = alloc_pidmap(current->nsproxy->pid_ns);
- if (nr < 0)
- goto out_free;
+ tmp = ns;
+ for (i = ns->level; i >= 0; i--) {
+ nr = alloc_pidmap(tmp);
+ if (nr < 0)
+ goto out_free;
+
+ pid->numbers[i].nr = nr;
+ pid->numbers[i].ns = tmp;
+ tmp = tmp->parent;
+ }
+ get_pid_ns(ns);
+ pid->level = ns->level;
atomic_set(&pid->count, 1);
- pid->nr = nr;
for (type = 0; type < PIDTYPE_MAX; ++type)
INIT_HLIST_HEAD(&pid->tasks[type]);
spin_lock_irq(&pidmap_lock);
- hlist_add_head_rcu(&pid->pid_chain, &pid_hash[pid_hashfn(pid->nr)]);
+ for (i = ns->level; i >= 0; i--) {
+ upid = &pid->numbers[i];
+ hlist_add_head_rcu(&upid->pid_chain,
+ &pid_hash[pid_hashfn(upid->nr, upid->ns)]);
+ }
spin_unlock_irq(&pidmap_lock);
out:
return pid;
out_free:
- kmem_cache_free(pid_cachep, pid);
+ for (i++; i <= ns->level; i++)
+ free_pidmap(pid->numbers[i].ns, pid->numbers[i].nr);
+
+ kmem_cache_free(ns->pid_cachep, pid);
pid = NULL;
goto out;
}
-struct pid * fastcall find_pid(int nr)
+struct pid * fastcall find_pid_ns(int nr, struct pid_namespace *ns)
{
struct hlist_node *elem;
- struct pid *pid;
+ struct upid *pnr;
+
+ hlist_for_each_entry_rcu(pnr, elem,
+ &pid_hash[pid_hashfn(nr, ns)], pid_chain)
+ if (pnr->nr == nr && pnr->ns == ns)
+ return container_of(pnr, struct pid,
+ numbers[ns->level]);
- hlist_for_each_entry_rcu(pid, elem,
- &pid_hash[pid_hashfn(nr)], pid_chain) {
- if (pid->nr == nr)
- return pid;
- }
return NULL;
}
+EXPORT_SYMBOL_GPL(find_pid_ns);
+
+struct pid *find_vpid(int nr)
+{
+ return find_pid_ns(nr, current->nsproxy->pid_ns);
+}
+EXPORT_SYMBOL_GPL(find_vpid);
+
+struct pid *find_pid(int nr)
+{
+ return find_pid_ns(nr, &init_pid_ns);
+}
EXPORT_SYMBOL_GPL(find_pid);
/*
@@ -307,12 +372,32 @@ struct task_struct * fastcall pid_task(struct pid *pid, enum pid_type type)
/*
* Must be called under rcu_read_lock() or with tasklist_lock read-held.
*/
-struct task_struct *find_task_by_pid_type(int type, int nr)
+struct task_struct *find_task_by_pid_type_ns(int type, int nr,
+ struct pid_namespace *ns)
{
- return pid_task(find_pid(nr), type);
+ return pid_task(find_pid_ns(nr, ns), type);
}
-EXPORT_SYMBOL(find_task_by_pid_type);
+EXPORT_SYMBOL(find_task_by_pid_type_ns);
+
+struct task_struct *find_task_by_pid(pid_t nr)
+{
+ return find_task_by_pid_type_ns(PIDTYPE_PID, nr, &init_pid_ns);
+}
+EXPORT_SYMBOL(find_task_by_pid);
+
+struct task_struct *find_task_by_vpid(pid_t vnr)
+{
+ return find_task_by_pid_type_ns(PIDTYPE_PID, vnr,
+ current->nsproxy->pid_ns);
+}
+EXPORT_SYMBOL(find_task_by_vpid);
+
+struct task_struct *find_task_by_pid_ns(pid_t nr, struct pid_namespace *ns)
+{
+ return find_task_by_pid_type_ns(PIDTYPE_PID, nr, ns);
+}
+EXPORT_SYMBOL(find_task_by_pid_ns);
struct pid *get_task_pid(struct task_struct *task, enum pid_type type)
{
@@ -339,45 +424,239 @@ struct pid *find_get_pid(pid_t nr)
struct pid *pid;
rcu_read_lock();
- pid = get_pid(find_pid(nr));
+ pid = get_pid(find_vpid(nr));
rcu_read_unlock();
return pid;
}
+pid_t pid_nr_ns(struct pid *pid, struct pid_namespace *ns)
+{
+ struct upid *upid;
+ pid_t nr = 0;
+
+ if (pid && ns->level <= pid->level) {
+ upid = &pid->numbers[ns->level];
+ if (upid->ns == ns)
+ nr = upid->nr;
+ }
+ return nr;
+}
+
+pid_t task_pid_nr_ns(struct task_struct *tsk, struct pid_namespace *ns)
+{
+ return pid_nr_ns(task_pid(tsk), ns);
+}
+EXPORT_SYMBOL(task_pid_nr_ns);
+
+pid_t task_tgid_nr_ns(struct task_struct *tsk, struct pid_namespace *ns)
+{
+ return pid_nr_ns(task_tgid(tsk), ns);
+}
+EXPORT_SYMBOL(task_tgid_nr_ns);
+
+pid_t task_pgrp_nr_ns(struct task_struct *tsk, struct pid_namespace *ns)
+{
+ return pid_nr_ns(task_pgrp(tsk), ns);
+}
+EXPORT_SYMBOL(task_pgrp_nr_ns);
+
+pid_t task_session_nr_ns(struct task_struct *tsk, struct pid_namespace *ns)
+{
+ return pid_nr_ns(task_session(tsk), ns);
+}
+EXPORT_SYMBOL(task_session_nr_ns);
+
/*
* Used by proc to find the first pid that is greater then or equal to nr.
*
* If there is a pid at nr this function is exactly the same as find_pid.
*/
-struct pid *find_ge_pid(int nr)
+struct pid *find_ge_pid(int nr, struct pid_namespace *ns)
{
struct pid *pid;
do {
- pid = find_pid(nr);
+ pid = find_pid_ns(nr, ns);
if (pid)
break;
- nr = next_pidmap(current->nsproxy->pid_ns, nr);
+ nr = next_pidmap(ns, nr);
} while (nr > 0);
return pid;
}
EXPORT_SYMBOL_GPL(find_get_pid);
+struct pid_cache {
+ int nr_ids;
+ char name[16];
+ struct kmem_cache *cachep;
+ struct list_head list;
+};
+
+static LIST_HEAD(pid_caches_lh);
+static DEFINE_MUTEX(pid_caches_mutex);
+
+/*
+ * creates the kmem cache to allocate pids from.
+ * @nr_ids: the number of numerical ids this pid will have to carry
+ */
+
+static struct kmem_cache *create_pid_cachep(int nr_ids)
+{
+ struct pid_cache *pcache;
+ struct kmem_cache *cachep;
+
+ mutex_lock(&pid_caches_mutex);
+ list_for_each_entry (pcache, &pid_caches_lh, list)
+ if (pcache->nr_ids == nr_ids)
+ goto out;
+
+ pcache = kmalloc(sizeof(struct pid_cache), GFP_KERNEL);
+ if (pcache == NULL)
+ goto err_alloc;
+
+ snprintf(pcache->name, sizeof(pcache->name), "pid_%d", nr_ids);
+ cachep = kmem_cache_create(pcache->name,
+ sizeof(struct pid) + (nr_ids - 1) * sizeof(struct upid),
+ 0, SLAB_HWCACHE_ALIGN, NULL);
+ if (cachep == NULL)
+ goto err_cachep;
+
+ pcache->nr_ids = nr_ids;
+ pcache->cachep = cachep;
+ list_add(&pcache->list, &pid_caches_lh);
+out:
+ mutex_unlock(&pid_caches_mutex);
+ return pcache->cachep;
+
+err_cachep:
+ kfree(pcache);
+err_alloc:
+ mutex_unlock(&pid_caches_mutex);
+ return NULL;
+}
+
+static struct pid_namespace *create_pid_namespace(int level)
+{
+ struct pid_namespace *ns;
+ int i;
+
+ ns = kmem_cache_alloc(pid_ns_cachep, GFP_KERNEL);
+ if (ns == NULL)
+ goto out;
+
+ ns->pidmap[0].page = kzalloc(PAGE_SIZE, GFP_KERNEL);
+ if (!ns->pidmap[0].page)
+ goto out_free;
+
+ ns->pid_cachep = create_pid_cachep(level + 1);
+ if (ns->pid_cachep == NULL)
+ goto out_free_map;
+
+ kref_init(&ns->kref);
+ ns->last_pid = 0;
+ ns->child_reaper = NULL;
+ ns->level = level;
+
+ set_bit(0, ns->pidmap[0].page);
+ atomic_set(&ns->pidmap[0].nr_free, BITS_PER_PAGE - 1);
+
+ for (i = 1; i < PIDMAP_ENTRIES; i++) {
+ ns->pidmap[i].page = 0;
+ atomic_set(&ns->pidmap[i].nr_free, BITS_PER_PAGE);
+ }
+
+ return ns;
+
+out_free_map:
+ kfree(ns->pidmap[0].page);
+out_free:
+ kmem_cache_free(pid_ns_cachep, ns);
+out:
+ return ERR_PTR(-ENOMEM);
+}
+
+static void destroy_pid_namespace(struct pid_namespace *ns)
+{
+ int i;
+
+ for (i = 0; i < PIDMAP_ENTRIES; i++)
+ kfree(ns->pidmap[i].page);
+ kmem_cache_free(pid_ns_cachep, ns);
+}
+
struct pid_namespace *copy_pid_ns(unsigned long flags, struct pid_namespace *old_ns)
{
+ struct pid_namespace *new_ns;
+
BUG_ON(!old_ns);
- get_pid_ns(old_ns);
- return old_ns;
+ new_ns = get_pid_ns(old_ns);
+ if (!(flags & CLONE_NEWPID))
+ goto out;
+
+ new_ns = ERR_PTR(-EINVAL);
+ if (flags & CLONE_THREAD)
+ goto out_put;
+
+ new_ns = create_pid_namespace(old_ns->level + 1);
+ if (!IS_ERR(new_ns))
+ new_ns->parent = get_pid_ns(old_ns);
+
+out_put:
+ put_pid_ns(old_ns);
+out:
+ return new_ns;
}
void free_pid_ns(struct kref *kref)
{
- struct pid_namespace *ns;
+ struct pid_namespace *ns, *parent;
ns = container_of(kref, struct pid_namespace, kref);
- kfree(ns);
+
+ parent = ns->parent;
+ destroy_pid_namespace(ns);
+
+ if (parent != NULL)
+ put_pid_ns(parent);
+}
+
+void zap_pid_ns_processes(struct pid_namespace *pid_ns)
+{
+ int nr;
+ int rc;
+
+ /*
+ * The last thread in the cgroup-init thread group is terminating.
+ * Find remaining pid_ts in the namespace, signal and wait for them
+ * to exit.
+ *
+ * Note: This signals each threads in the namespace - even those that
+ * belong to the same thread group, To avoid this, we would have
+ * to walk the entire tasklist looking a processes in this
+ * namespace, but that could be unnecessarily expensive if the
+ * pid namespace has just a few processes. Or we need to
+ * maintain a tasklist for each pid namespace.
+ *
+ */
+ read_lock(&tasklist_lock);
+ nr = next_pidmap(pid_ns, 1);
+ while (nr > 0) {
+ kill_proc_info(SIGKILL, SEND_SIG_PRIV, nr);
+ nr = next_pidmap(pid_ns, nr);
+ }
+ read_unlock(&tasklist_lock);
+
+ do {
+ clear_thread_flag(TIF_SIGPENDING);
+ rc = sys_wait4(-1, NULL, __WALL, NULL);
+ } while (rc != -ECHILD);
+
+
+ /* Child reaper for the pid namespace is going away */
+ pid_ns->child_reaper = NULL;
+ return;
}
/*
@@ -412,5 +691,9 @@ void __init pidmap_init(void)
set_bit(0, init_pid_ns.pidmap[0].page);
atomic_dec(&init_pid_ns.pidmap[0].nr_free);
- pid_cachep = KMEM_CACHE(pid, SLAB_PANIC);
+ init_pid_ns.pid_cachep = create_pid_cachep(1);
+ if (init_pid_ns.pid_cachep == NULL)
+ panic("Can't create pid_1 cachep\n");
+
+ pid_ns_cachep = KMEM_CACHE(pid_namespace, SLAB_PANIC);
}
diff --git a/kernel/posix-cpu-timers.c b/kernel/posix-cpu-timers.c
index b53c8fcd9d82..68c96376e84a 100644
--- a/kernel/posix-cpu-timers.c
+++ b/kernel/posix-cpu-timers.c
@@ -21,8 +21,8 @@ static int check_clock(const clockid_t which_clock)
read_lock(&tasklist_lock);
p = find_task_by_pid(pid);
- if (!p || (CPUCLOCK_PERTHREAD(which_clock) ?
- p->tgid != current->tgid : p->tgid != pid)) {
+ if (!p || !(CPUCLOCK_PERTHREAD(which_clock) ?
+ same_thread_group(p, current) : thread_group_leader(p))) {
error = -EINVAL;
}
read_unlock(&tasklist_lock);
@@ -308,13 +308,13 @@ int posix_cpu_clock_get(const clockid_t which_clock, struct timespec *tp)
p = find_task_by_pid(pid);
if (p) {
if (CPUCLOCK_PERTHREAD(which_clock)) {
- if (p->tgid == current->tgid) {
+ if (same_thread_group(p, current)) {
error = cpu_clock_sample(which_clock,
p, &rtn);
}
} else {
read_lock(&tasklist_lock);
- if (p->tgid == pid && p->signal) {
+ if (thread_group_leader(p) && p->signal) {
error =
cpu_clock_sample_group(which_clock,
p, &rtn);
@@ -355,7 +355,7 @@ int posix_cpu_timer_create(struct k_itimer *new_timer)
p = current;
} else {
p = find_task_by_pid(pid);
- if (p && p->tgid != current->tgid)
+ if (p && !same_thread_group(p, current))
p = NULL;
}
} else {
@@ -363,7 +363,7 @@ int posix_cpu_timer_create(struct k_itimer *new_timer)
p = current->group_leader;
} else {
p = find_task_by_pid(pid);
- if (p && p->tgid != pid)
+ if (p && !thread_group_leader(p))
p = NULL;
}
}
diff --git a/kernel/posix-timers.c b/kernel/posix-timers.c
index 7a15afb73ed0..35b4bbfc78ff 100644
--- a/kernel/posix-timers.c
+++ b/kernel/posix-timers.c
@@ -241,7 +241,8 @@ static __init int init_posix_timers(void)
register_posix_clock(CLOCK_MONOTONIC, &clock_monotonic);
posix_timers_cache = kmem_cache_create("posix_timers_cache",
- sizeof (struct k_itimer), 0, 0, NULL);
+ sizeof (struct k_itimer), 0, SLAB_PANIC,
+ NULL);
idr_init(&posix_timers_id);
return 0;
}
@@ -403,7 +404,7 @@ static struct task_struct * good_sigevent(sigevent_t * event)
if ((event->sigev_notify & SIGEV_THREAD_ID ) &&
(!(rtn = find_task_by_pid(event->sigev_notify_thread_id)) ||
- rtn->tgid != current->tgid ||
+ !same_thread_group(rtn, current) ||
(event->sigev_notify & ~SIGEV_THREAD_ID) != SIGEV_SIGNAL))
return NULL;
@@ -607,7 +608,7 @@ static struct k_itimer * lock_timer(timer_t timer_id, unsigned long *flags)
spin_lock(&timr->it_lock);
if ((timr->it_id != timer_id) || !(timr->it_process) ||
- timr->it_process->tgid != current->tgid) {
+ !same_thread_group(timr->it_process, current)) {
spin_unlock(&timr->it_lock);
spin_unlock_irqrestore(&idr_lock, *flags);
timr = NULL;
@@ -712,7 +713,7 @@ sys_timer_getoverrun(timer_t timer_id)
{
struct k_itimer *timr;
int overrun;
- long flags;
+ unsigned long flags;
timr = lock_timer(timer_id, &flags);
if (!timr)
@@ -784,7 +785,7 @@ sys_timer_settime(timer_t timer_id, int flags,
struct k_itimer *timr;
struct itimerspec new_spec, old_spec;
int error = 0;
- long flag;
+ unsigned long flag;
struct itimerspec *rtn = old_setting ? &old_spec : NULL;
if (!new_setting)
@@ -836,7 +837,7 @@ asmlinkage long
sys_timer_delete(timer_t timer_id)
{
struct k_itimer *timer;
- long flags;
+ unsigned long flags;
retry_delete:
timer = lock_timer(timer_id, &flags);
@@ -980,9 +981,20 @@ sys_clock_getres(const clockid_t which_clock, struct timespec __user *tp)
static int common_nsleep(const clockid_t which_clock, int flags,
struct timespec *tsave, struct timespec __user *rmtp)
{
- return hrtimer_nanosleep(tsave, rmtp, flags & TIMER_ABSTIME ?
- HRTIMER_MODE_ABS : HRTIMER_MODE_REL,
- which_clock);
+ struct timespec rmt;
+ int ret;
+
+ ret = hrtimer_nanosleep(tsave, rmtp ? &rmt : NULL,
+ flags & TIMER_ABSTIME ?
+ HRTIMER_MODE_ABS : HRTIMER_MODE_REL,
+ which_clock);
+
+ if (ret && rmtp) {
+ if (copy_to_user(rmtp, &rmt, sizeof(*rmtp)))
+ return -EFAULT;
+ }
+
+ return ret;
}
asmlinkage long
diff --git a/kernel/power/Kconfig b/kernel/power/Kconfig
index 14b0e10dc95c..8e186c678149 100644
--- a/kernel/power/Kconfig
+++ b/kernel/power/Kconfig
@@ -44,17 +44,6 @@ config PM_VERBOSE
---help---
This option enables verbose messages from the Power Management code.
-config DISABLE_CONSOLE_SUSPEND
- bool "Keep console(s) enabled during suspend/resume (DANGEROUS)"
- depends on PM_DEBUG && PM_SLEEP
- default n
- ---help---
- This option turns off the console suspend mechanism that prevents
- debug messages from reaching the console during the suspend/resume
- operations. This may be helpful when debugging device drivers'
- suspend/resume routines, but may itself lead to problems, for example
- if netconsole is used.
-
config PM_TRACE
bool "Suspend/resume event tracing"
depends on PM_DEBUG && X86 && PM_SLEEP && EXPERIMENTAL
diff --git a/kernel/power/disk.c b/kernel/power/disk.c
index eb72255b5c86..8b15f777010a 100644
--- a/kernel/power/disk.c
+++ b/kernel/power/disk.c
@@ -45,17 +45,18 @@ enum {
static int hibernation_mode = HIBERNATION_SHUTDOWN;
-static struct hibernation_ops *hibernation_ops;
+static struct platform_hibernation_ops *hibernation_ops;
/**
* hibernation_set_ops - set the global hibernate operations
* @ops: the hibernation operations to use in subsequent hibernation transitions
*/
-void hibernation_set_ops(struct hibernation_ops *ops)
+void hibernation_set_ops(struct platform_hibernation_ops *ops)
{
- if (ops && !(ops->prepare && ops->enter && ops->finish
- && ops->pre_restore && ops->restore_cleanup)) {
+ if (ops && !(ops->start && ops->pre_snapshot && ops->finish
+ && ops->prepare && ops->enter && ops->pre_restore
+ && ops->restore_cleanup)) {
WARN_ON(1);
return;
}
@@ -69,16 +70,37 @@ void hibernation_set_ops(struct hibernation_ops *ops)
mutex_unlock(&pm_mutex);
}
+/**
+ * platform_start - tell the platform driver that we're starting
+ * hibernation
+ */
+
+static int platform_start(int platform_mode)
+{
+ return (platform_mode && hibernation_ops) ?
+ hibernation_ops->start() : 0;
+}
/**
- * platform_prepare - prepare the machine for hibernation using the
+ * platform_pre_snapshot - prepare the machine for hibernation using the
* platform driver if so configured and return an error code if it fails
*/
-static int platform_prepare(int platform_mode)
+static int platform_pre_snapshot(int platform_mode)
{
return (platform_mode && hibernation_ops) ?
- hibernation_ops->prepare() : 0;
+ hibernation_ops->pre_snapshot() : 0;
+}
+
+/**
+ * platform_leave - prepare the machine for switching to the normal mode
+ * of operation using the platform driver (called with interrupts disabled)
+ */
+
+static void platform_leave(int platform_mode)
+{
+ if (platform_mode && hibernation_ops)
+ hibernation_ops->leave();
}
/**
@@ -118,6 +140,51 @@ static void platform_restore_cleanup(int platform_mode)
}
/**
+ * create_image - freeze devices that need to be frozen with interrupts
+ * off, create the hibernation image and thaw those devices. Control
+ * reappears in this routine after a restore.
+ */
+
+int create_image(int platform_mode)
+{
+ int error;
+
+ error = arch_prepare_suspend();
+ if (error)
+ return error;
+
+ local_irq_disable();
+ /* At this point, device_suspend() has been called, but *not*
+ * device_power_down(). We *must* call device_power_down() now.
+ * Otherwise, drivers for some devices (e.g. interrupt controllers)
+ * become desynchronized with the actual state of the hardware
+ * at resume time, and evil weirdness ensues.
+ */
+ error = device_power_down(PMSG_FREEZE);
+ if (error) {
+ printk(KERN_ERR "Some devices failed to power down, "
+ KERN_ERR "aborting suspend\n");
+ goto Enable_irqs;
+ }
+
+ save_processor_state();
+ error = swsusp_arch_suspend();
+ if (error)
+ printk(KERN_ERR "Error %d while creating the image\n", error);
+ /* Restore control flow magically appears here */
+ restore_processor_state();
+ if (!in_suspend)
+ platform_leave(platform_mode);
+ /* NOTE: device_power_up() is just a resume() for devices
+ * that suspended with irqs off ... no overall powerup.
+ */
+ device_power_up();
+ Enable_irqs:
+ local_irq_enable();
+ return error;
+}
+
+/**
* hibernation_snapshot - quiesce devices and create the hibernation
* snapshot image.
* @platform_mode - if set, use the platform driver, if available, to
@@ -135,12 +202,16 @@ int hibernation_snapshot(int platform_mode)
if (error)
return error;
+ error = platform_start(platform_mode);
+ if (error)
+ return error;
+
suspend_console();
error = device_suspend(PMSG_FREEZE);
if (error)
goto Resume_console;
- error = platform_prepare(platform_mode);
+ error = platform_pre_snapshot(platform_mode);
if (error)
goto Resume_devices;
@@ -148,7 +219,7 @@ int hibernation_snapshot(int platform_mode)
if (!error) {
if (hibernation_mode != HIBERNATION_TEST) {
in_suspend = 1;
- error = swsusp_suspend();
+ error = create_image(platform_mode);
/* Control returns here after successful restore */
} else {
printk("swsusp debug: Waiting for 5 seconds.\n");
@@ -207,21 +278,50 @@ int hibernation_platform_enter(void)
{
int error;
- if (hibernation_ops) {
- kernel_shutdown_prepare(SYSTEM_SUSPEND_DISK);
- /*
- * We have cancelled the power transition by running
- * hibernation_ops->finish() before saving the image, so we
- * should let the firmware know that we're going to enter the
- * sleep state after all
- */
- error = hibernation_ops->prepare();
- sysdev_shutdown();
- if (!error)
- error = hibernation_ops->enter();
- } else {
- error = -ENOSYS;
+ if (!hibernation_ops)
+ return -ENOSYS;
+
+ /*
+ * We have cancelled the power transition by running
+ * hibernation_ops->finish() before saving the image, so we should let
+ * the firmware know that we're going to enter the sleep state after all
+ */
+ error = hibernation_ops->start();
+ if (error)
+ return error;
+
+ suspend_console();
+ error = device_suspend(PMSG_SUSPEND);
+ if (error)
+ goto Resume_console;
+
+ error = hibernation_ops->prepare();
+ if (error)
+ goto Resume_devices;
+
+ error = disable_nonboot_cpus();
+ if (error)
+ goto Finish;
+
+ local_irq_disable();
+ error = device_power_down(PMSG_SUSPEND);
+ if (!error) {
+ hibernation_ops->enter();
+ /* We should never get here */
+ while (1);
}
+ local_irq_enable();
+
+ /*
+ * We don't need to reenable the nonboot CPUs or resume consoles, since
+ * the system is going to be halted anyway.
+ */
+ Finish:
+ hibernation_ops->finish();
+ Resume_devices:
+ device_resume();
+ Resume_console:
+ resume_console();
return error;
}
@@ -238,14 +338,14 @@ static void power_down(void)
case HIBERNATION_TEST:
case HIBERNATION_TESTPROC:
break;
- case HIBERNATION_SHUTDOWN:
- kernel_power_off();
- break;
case HIBERNATION_REBOOT:
kernel_restart(NULL);
break;
case HIBERNATION_PLATFORM:
hibernation_platform_enter();
+ case HIBERNATION_SHUTDOWN:
+ kernel_power_off();
+ break;
}
kernel_halt();
/*
@@ -298,6 +398,10 @@ int hibernate(void)
if (error)
goto Exit;
+ printk("Syncing filesystems ... ");
+ sys_sync();
+ printk("done.\n");
+
error = prepare_processes();
if (error)
goto Finish;
diff --git a/kernel/power/main.c b/kernel/power/main.c
index 350b485b3b60..3cdf95b1dc92 100644
--- a/kernel/power/main.c
+++ b/kernel/power/main.c
@@ -20,6 +20,7 @@
#include <linux/resume-trace.h>
#include <linux/freezer.h>
#include <linux/vmstat.h>
+#include <linux/syscalls.h>
#include "power.h"
@@ -32,39 +33,32 @@ DEFINE_MUTEX(pm_mutex);
/* This is just an arbitrary number */
#define FREE_PAGE_NUMBER (100)
-struct pm_ops *pm_ops;
+static struct platform_suspend_ops *suspend_ops;
/**
- * pm_set_ops - Set the global power method table.
+ * suspend_set_ops - Set the global suspend method table.
* @ops: Pointer to ops structure.
*/
-void pm_set_ops(struct pm_ops * ops)
+void suspend_set_ops(struct platform_suspend_ops *ops)
{
mutex_lock(&pm_mutex);
- pm_ops = ops;
+ suspend_ops = ops;
mutex_unlock(&pm_mutex);
}
/**
- * pm_valid_only_mem - generic memory-only valid callback
+ * suspend_valid_only_mem - generic memory-only valid callback
*
- * pm_ops drivers that implement mem suspend only and only need
+ * Platform drivers that implement mem suspend only and only need
* to check for that in their .valid callback can use this instead
* of rolling their own .valid callback.
*/
-int pm_valid_only_mem(suspend_state_t state)
+int suspend_valid_only_mem(suspend_state_t state)
{
return state == PM_SUSPEND_MEM;
}
-
-static inline void pm_finish(suspend_state_t state)
-{
- if (pm_ops->finish)
- pm_ops->finish(state);
-}
-
/**
* suspend_prepare - Do prep work before entering low-power state.
*
@@ -76,7 +70,7 @@ static int suspend_prepare(void)
int error;
unsigned int free_pages;
- if (!pm_ops || !pm_ops->enter)
+ if (!suspend_ops || !suspend_ops->enter)
return -EPERM;
error = pm_notifier_call_chain(PM_SUSPEND_PREPARE);
@@ -128,7 +122,7 @@ void __attribute__ ((weak)) arch_suspend_enable_irqs(void)
*
* This function should be called after devices have been suspended.
*/
-int suspend_enter(suspend_state_t state)
+static int suspend_enter(suspend_state_t state)
{
int error = 0;
@@ -139,7 +133,7 @@ int suspend_enter(suspend_state_t state)
printk(KERN_ERR "Some devices failed to power down\n");
goto Done;
}
- error = pm_ops->enter(state);
+ error = suspend_ops->enter(state);
device_power_up();
Done:
arch_suspend_enable_irqs();
@@ -156,11 +150,11 @@ int suspend_devices_and_enter(suspend_state_t state)
{
int error;
- if (!pm_ops)
+ if (!suspend_ops)
return -ENOSYS;
- if (pm_ops->set_target) {
- error = pm_ops->set_target(state);
+ if (suspend_ops->set_target) {
+ error = suspend_ops->set_target(state);
if (error)
return error;
}
@@ -170,8 +164,8 @@ int suspend_devices_and_enter(suspend_state_t state)
printk(KERN_ERR "Some devices failed to suspend\n");
goto Resume_console;
}
- if (pm_ops->prepare) {
- error = pm_ops->prepare(state);
+ if (suspend_ops->prepare) {
+ error = suspend_ops->prepare();
if (error)
goto Resume_devices;
}
@@ -180,7 +174,8 @@ int suspend_devices_and_enter(suspend_state_t state)
suspend_enter(state);
enable_nonboot_cpus();
- pm_finish(state);
+ if (suspend_ops->finish)
+ suspend_ops->finish();
Resume_devices:
device_resume();
Resume_console:
@@ -214,7 +209,7 @@ static inline int valid_state(suspend_state_t state)
/* All states need lowlevel support and need to be valid
* to the lowlevel implementation, no valid callback
* implies that none are valid. */
- if (!pm_ops || !pm_ops->valid || !pm_ops->valid(state))
+ if (!suspend_ops || !suspend_ops->valid || !suspend_ops->valid(state))
return 0;
return 1;
}
@@ -236,9 +231,14 @@ static int enter_state(suspend_state_t state)
if (!valid_state(state))
return -ENODEV;
+
if (!mutex_trylock(&pm_mutex))
return -EBUSY;
+ printk("Syncing filesystems ... ");
+ sys_sync();
+ printk("done.\n");
+
pr_debug("PM: Preparing system for %s sleep\n", pm_states[state]);
if ((error = suspend_prepare()))
goto Unlock;
diff --git a/kernel/power/power.h b/kernel/power/power.h
index 95fbf2dd3fe3..195dc4611764 100644
--- a/kernel/power/power.h
+++ b/kernel/power/power.h
@@ -11,14 +11,32 @@ struct swsusp_info {
unsigned long size;
} __attribute__((aligned(PAGE_SIZE)));
+#ifdef CONFIG_HIBERNATION
+#ifdef CONFIG_ARCH_HIBERNATION_HEADER
+/* Maximum size of architecture specific data in a hibernation header */
+#define MAX_ARCH_HEADER_SIZE (sizeof(struct new_utsname) + 4)
+extern int arch_hibernation_header_save(void *addr, unsigned int max_size);
+extern int arch_hibernation_header_restore(void *addr);
+
+static inline int init_header_complete(struct swsusp_info *info)
+{
+ return arch_hibernation_header_save(info, MAX_ARCH_HEADER_SIZE);
+}
+
+static inline char *check_image_kernel(struct swsusp_info *info)
+{
+ return arch_hibernation_header_restore(info) ?
+ "architecture specific data" : NULL;
+}
+#endif /* CONFIG_ARCH_HIBERNATION_HEADER */
-#ifdef CONFIG_HIBERNATION
/*
* Keep some memory free so that I/O operations can succeed without paging
* [Might this be more than 4 MB?]
*/
#define PAGES_FOR_IO ((4096 * 1024) >> PAGE_SHIFT)
+
/*
* Keep 1 MB of memory free so that device drivers can allocate some pages in
* their .suspend() routines without breaking the suspend to disk.
@@ -165,7 +183,6 @@ extern int swsusp_swap_in_use(void);
extern int swsusp_check(void);
extern int swsusp_shrink_memory(void);
extern void swsusp_free(void);
-extern int swsusp_suspend(void);
extern int swsusp_resume(void);
extern int swsusp_read(unsigned int *flags_p);
extern int swsusp_write(unsigned int flags);
diff --git a/kernel/power/process.c b/kernel/power/process.c
index 3434940a3df1..6533923e711b 100644
--- a/kernel/power/process.c
+++ b/kernel/power/process.c
@@ -75,21 +75,79 @@ void refrigerator(void)
__set_current_state(save);
}
-static void freeze_task(struct task_struct *p)
+static void fake_signal_wake_up(struct task_struct *p, int resume)
{
unsigned long flags;
- if (!freezing(p)) {
+ spin_lock_irqsave(&p->sighand->siglock, flags);
+ signal_wake_up(p, resume);
+ spin_unlock_irqrestore(&p->sighand->siglock, flags);
+}
+
+static void send_fake_signal(struct task_struct *p)
+{
+ if (p->state == TASK_STOPPED)
+ force_sig_specific(SIGSTOP, p);
+ fake_signal_wake_up(p, p->state == TASK_STOPPED);
+}
+
+static int has_mm(struct task_struct *p)
+{
+ return (p->mm && !(p->flags & PF_BORROWED_MM));
+}
+
+/**
+ * freeze_task - send a freeze request to given task
+ * @p: task to send the request to
+ * @with_mm_only: if set, the request will only be sent if the task has its
+ * own mm
+ * Return value: 0, if @with_mm_only is set and the task has no mm of its
+ * own or the task is frozen, 1, otherwise
+ *
+ * The freeze request is sent by seting the tasks's TIF_FREEZE flag and
+ * either sending a fake signal to it or waking it up, depending on whether
+ * or not it has its own mm (ie. it is a user land task). If @with_mm_only
+ * is set and the task has no mm of its own (ie. it is a kernel thread),
+ * its TIF_FREEZE flag should not be set.
+ *
+ * The task_lock() is necessary to prevent races with exit_mm() or
+ * use_mm()/unuse_mm() from occuring.
+ */
+static int freeze_task(struct task_struct *p, int with_mm_only)
+{
+ int ret = 1;
+
+ task_lock(p);
+ if (freezing(p)) {
+ if (has_mm(p)) {
+ if (!signal_pending(p))
+ fake_signal_wake_up(p, 0);
+ } else {
+ if (with_mm_only)
+ ret = 0;
+ else
+ wake_up_state(p, TASK_INTERRUPTIBLE);
+ }
+ } else {
rmb();
- if (!frozen(p)) {
- set_freeze_flag(p);
- if (p->state == TASK_STOPPED)
- force_sig_specific(SIGSTOP, p);
- spin_lock_irqsave(&p->sighand->siglock, flags);
- signal_wake_up(p, p->state == TASK_STOPPED);
- spin_unlock_irqrestore(&p->sighand->siglock, flags);
+ if (frozen(p)) {
+ ret = 0;
+ } else {
+ if (has_mm(p)) {
+ set_freeze_flag(p);
+ send_fake_signal(p);
+ } else {
+ if (with_mm_only) {
+ ret = 0;
+ } else {
+ set_freeze_flag(p);
+ wake_up_state(p, TASK_INTERRUPTIBLE);
+ }
+ }
}
}
+ task_unlock(p);
+ return ret;
}
static void cancel_freezing(struct task_struct *p)
@@ -110,6 +168,11 @@ static int try_to_freeze_tasks(int freeze_user_space)
struct task_struct *g, *p;
unsigned long end_time;
unsigned int todo;
+ struct timeval start, end;
+ s64 elapsed_csecs64;
+ unsigned int elapsed_csecs;
+
+ do_gettimeofday(&start);
end_time = jiffies + TIMEOUT;
do {
@@ -119,31 +182,14 @@ static int try_to_freeze_tasks(int freeze_user_space)
if (frozen(p) || !freezeable(p))
continue;
- if (freeze_user_space) {
- if (p->state == TASK_TRACED &&
- frozen(p->parent)) {
- cancel_freezing(p);
- continue;
- }
- /*
- * Kernel threads should not have TIF_FREEZE set
- * at this point, so we must ensure that either
- * p->mm is not NULL *and* PF_BORROWED_MM is
- * unset, or TIF_FRREZE is left unset.
- * The task_lock() is necessary to prevent races
- * with exit_mm() or use_mm()/unuse_mm() from
- * occuring.
- */
- task_lock(p);
- if (!p->mm || (p->flags & PF_BORROWED_MM)) {
- task_unlock(p);
- continue;
- }
- freeze_task(p);
- task_unlock(p);
- } else {
- freeze_task(p);
+ if (p->state == TASK_TRACED && frozen(p->parent)) {
+ cancel_freezing(p);
+ continue;
}
+
+ if (!freeze_task(p, freeze_user_space))
+ continue;
+
if (!freezer_should_skip(p))
todo++;
} while_each_thread(g, p);
@@ -153,6 +199,11 @@ static int try_to_freeze_tasks(int freeze_user_space)
break;
} while (todo);
+ do_gettimeofday(&end);
+ elapsed_csecs64 = timeval_to_ns(&end) - timeval_to_ns(&start);
+ do_div(elapsed_csecs64, NSEC_PER_SEC / 100);
+ elapsed_csecs = elapsed_csecs64;
+
if (todo) {
/* This does not unfreeze processes that are already frozen
* (we have slightly ugly calling convention in that respect,
@@ -160,10 +211,9 @@ static int try_to_freeze_tasks(int freeze_user_space)
* but it cleans up leftover PF_FREEZE requests.
*/
printk("\n");
- printk(KERN_ERR "Freezing of %s timed out after %d seconds "
+ printk(KERN_ERR "Freezing of tasks failed after %d.%02d seconds "
"(%d tasks refusing to freeze):\n",
- freeze_user_space ? "user space " : "tasks ",
- TIMEOUT / HZ, todo);
+ elapsed_csecs / 100, elapsed_csecs % 100, todo);
show_state();
read_lock(&tasklist_lock);
do_each_thread(g, p) {
@@ -174,6 +224,9 @@ static int try_to_freeze_tasks(int freeze_user_space)
task_unlock(p);
} while_each_thread(g, p);
read_unlock(&tasklist_lock);
+ } else {
+ printk("(elapsed %d.%02d seconds) ", elapsed_csecs / 100,
+ elapsed_csecs % 100);
}
return todo ? -EBUSY : 0;
@@ -186,19 +239,21 @@ int freeze_processes(void)
{
int error;
- printk("Stopping tasks ... ");
+ printk("Freezing user space processes ... ");
error = try_to_freeze_tasks(FREEZER_USER_SPACE);
if (error)
- return error;
+ goto Exit;
+ printk("done.\n");
- sys_sync();
+ printk("Freezing remaining freezable tasks ... ");
error = try_to_freeze_tasks(FREEZER_KERNEL_THREADS);
if (error)
- return error;
-
- printk("done.\n");
+ goto Exit;
+ printk("done.");
+ Exit:
BUG_ON(in_atomic());
- return 0;
+ printk("\n");
+ return error;
}
static void thaw_tasks(int thaw_user_space)
diff --git a/kernel/power/snapshot.c b/kernel/power/snapshot.c
index a686590d88c1..ccc95ac07bed 100644
--- a/kernel/power/snapshot.c
+++ b/kernel/power/snapshot.c
@@ -1239,17 +1239,39 @@ asmlinkage int swsusp_save(void)
return 0;
}
-static void init_header(struct swsusp_info *info)
+#ifndef CONFIG_ARCH_HIBERNATION_HEADER
+static int init_header_complete(struct swsusp_info *info)
{
- memset(info, 0, sizeof(struct swsusp_info));
+ memcpy(&info->uts, init_utsname(), sizeof(struct new_utsname));
info->version_code = LINUX_VERSION_CODE;
+ return 0;
+}
+
+static char *check_image_kernel(struct swsusp_info *info)
+{
+ if (info->version_code != LINUX_VERSION_CODE)
+ return "kernel version";
+ if (strcmp(info->uts.sysname,init_utsname()->sysname))
+ return "system type";
+ if (strcmp(info->uts.release,init_utsname()->release))
+ return "kernel release";
+ if (strcmp(info->uts.version,init_utsname()->version))
+ return "version";
+ if (strcmp(info->uts.machine,init_utsname()->machine))
+ return "machine";
+ return NULL;
+}
+#endif /* CONFIG_ARCH_HIBERNATION_HEADER */
+
+static int init_header(struct swsusp_info *info)
+{
+ memset(info, 0, sizeof(struct swsusp_info));
info->num_physpages = num_physpages;
- memcpy(&info->uts, init_utsname(), sizeof(struct new_utsname));
- info->cpus = num_online_cpus();
info->image_pages = nr_copy_pages;
info->pages = nr_copy_pages + nr_meta_pages + 1;
info->size = info->pages;
info->size <<= PAGE_SHIFT;
+ return init_header_complete(info);
}
/**
@@ -1303,7 +1325,11 @@ int snapshot_read_next(struct snapshot_handle *handle, size_t count)
return -ENOMEM;
}
if (!handle->offset) {
- init_header((struct swsusp_info *)buffer);
+ int error;
+
+ error = init_header((struct swsusp_info *)buffer);
+ if (error)
+ return error;
handle->buffer = buffer;
memory_bm_position_reset(&orig_bm);
memory_bm_position_reset(&copy_bm);
@@ -1394,22 +1420,13 @@ duplicate_memory_bitmap(struct memory_bitmap *dst, struct memory_bitmap *src)
}
}
-static inline int check_header(struct swsusp_info *info)
+static int check_header(struct swsusp_info *info)
{
- char *reason = NULL;
+ char *reason;
- if (info->version_code != LINUX_VERSION_CODE)
- reason = "kernel version";
- if (info->num_physpages != num_physpages)
+ reason = check_image_kernel(info);
+ if (!reason && info->num_physpages != num_physpages)
reason = "memory size";
- if (strcmp(info->uts.sysname,init_utsname()->sysname))
- reason = "system type";
- if (strcmp(info->uts.release,init_utsname()->release))
- reason = "kernel release";
- if (strcmp(info->uts.version,init_utsname()->version))
- reason = "version";
- if (strcmp(info->uts.machine,init_utsname()->machine))
- reason = "machine";
if (reason) {
printk(KERN_ERR "swsusp: Resume mismatch: %s\n", reason);
return -EPERM;
diff --git a/kernel/power/swsusp.c b/kernel/power/swsusp.c
index 5da304c8f1f6..e1722d3155f1 100644
--- a/kernel/power/swsusp.c
+++ b/kernel/power/swsusp.c
@@ -270,39 +270,6 @@ int swsusp_shrink_memory(void)
return 0;
}
-int swsusp_suspend(void)
-{
- int error;
-
- if ((error = arch_prepare_suspend()))
- return error;
-
- local_irq_disable();
- /* At this point, device_suspend() has been called, but *not*
- * device_power_down(). We *must* device_power_down() now.
- * Otherwise, drivers for some devices (e.g. interrupt controllers)
- * become desynchronized with the actual state of the hardware
- * at resume time, and evil weirdness ensues.
- */
- if ((error = device_power_down(PMSG_FREEZE))) {
- printk(KERN_ERR "Some devices failed to power down, aborting suspend\n");
- goto Enable_irqs;
- }
-
- save_processor_state();
- if ((error = swsusp_arch_suspend()))
- printk(KERN_ERR "Error %d suspending\n", error);
- /* Restore control flow magically appears here */
- restore_processor_state();
- /* NOTE: device_power_up() is just a resume() for devices
- * that suspended with irqs off ... no overall powerup.
- */
- device_power_up();
- Enable_irqs:
- local_irq_enable();
- return error;
-}
-
int swsusp_resume(void)
{
int error;
diff --git a/kernel/power/user.c b/kernel/power/user.c
index bd0723a7df3f..5bd321bcbb75 100644
--- a/kernel/power/user.c
+++ b/kernel/power/user.c
@@ -153,6 +153,10 @@ static int snapshot_ioctl(struct inode *inode, struct file *filp,
mutex_lock(&pm_mutex);
error = pm_notifier_call_chain(PM_HIBERNATION_PREPARE);
if (!error) {
+ printk("Syncing filesystems ... ");
+ sys_sync();
+ printk("done.\n");
+
error = freeze_processes();
if (error)
thaw_processes();
diff --git a/kernel/printk.c b/kernel/printk.c
index 8451dfc31d25..a30fe33de395 100644
--- a/kernel/printk.c
+++ b/kernel/printk.c
@@ -22,6 +22,8 @@
#include <linux/tty_driver.h>
#include <linux/console.h>
#include <linux/init.h>
+#include <linux/jiffies.h>
+#include <linux/nmi.h>
#include <linux/module.h>
#include <linux/moduleparam.h>
#include <linux/interrupt.h> /* For in_interrupt() */
@@ -162,6 +164,113 @@ out:
__setup("log_buf_len=", log_buf_len_setup);
+#ifdef CONFIG_BOOT_PRINTK_DELAY
+
+static unsigned int boot_delay; /* msecs delay after each printk during bootup */
+static unsigned long long printk_delay_msec; /* per msec, based on boot_delay */
+
+static int __init boot_delay_setup(char *str)
+{
+ unsigned long lpj;
+ unsigned long long loops_per_msec;
+
+ lpj = preset_lpj ? preset_lpj : 1000000; /* some guess */
+ loops_per_msec = (unsigned long long)lpj / 1000 * HZ;
+
+ get_option(&str, &boot_delay);
+ if (boot_delay > 10 * 1000)
+ boot_delay = 0;
+
+ printk_delay_msec = loops_per_msec;
+ printk(KERN_DEBUG "boot_delay: %u, preset_lpj: %ld, lpj: %lu, "
+ "HZ: %d, printk_delay_msec: %llu\n",
+ boot_delay, preset_lpj, lpj, HZ, printk_delay_msec);
+ return 1;
+}
+__setup("boot_delay=", boot_delay_setup);
+
+static void boot_delay_msec(void)
+{
+ unsigned long long k;
+ unsigned long timeout;
+
+ if (boot_delay == 0 || system_state != SYSTEM_BOOTING)
+ return;
+
+ k = (unsigned long long)printk_delay_msec * boot_delay;
+
+ timeout = jiffies + msecs_to_jiffies(boot_delay);
+ while (k) {
+ k--;
+ cpu_relax();
+ /*
+ * use (volatile) jiffies to prevent
+ * compiler reduction; loop termination via jiffies
+ * is secondary and may or may not happen.
+ */
+ if (time_after(jiffies, timeout))
+ break;
+ touch_nmi_watchdog();
+ }
+}
+#else
+static inline void boot_delay_msec(void)
+{
+}
+#endif
+
+/*
+ * Return the number of unread characters in the log buffer.
+ */
+int log_buf_get_len(void)
+{
+ return logged_chars;
+}
+
+/*
+ * Copy a range of characters from the log buffer.
+ */
+int log_buf_copy(char *dest, int idx, int len)
+{
+ int ret, max;
+ bool took_lock = false;
+
+ if (!oops_in_progress) {
+ spin_lock_irq(&logbuf_lock);
+ took_lock = true;
+ }
+
+ max = log_buf_get_len();
+ if (idx < 0 || idx >= max) {
+ ret = -1;
+ } else {
+ if (len > max)
+ len = max;
+ ret = len;
+ idx += (log_end - max);
+ while (len-- > 0)
+ dest[len] = LOG_BUF(idx + len);
+ }
+
+ if (took_lock)
+ spin_unlock_irq(&logbuf_lock);
+
+ return ret;
+}
+
+/*
+ * Extract a single character from the log buffer.
+ */
+int log_buf_read(int idx)
+{
+ char ret;
+
+ if (log_buf_copy(&ret, idx, 1) == 1)
+ return ret;
+ else
+ return -1;
+}
+
/*
* Commands to do_syslog:
*
@@ -527,6 +636,8 @@ asmlinkage int vprintk(const char *fmt, va_list args)
static char printk_buf[1024];
static int log_level_unknown = 1;
+ boot_delay_msec();
+
preempt_disable();
if (unlikely(oops_in_progress) && printk_cpu == smp_processor_id())
/* If a crash is occurring during printk() on this CPU,
@@ -751,7 +862,16 @@ int update_console_cmdline(char *name, int idx, char *name_new, int idx_new, cha
return -1;
}
-#ifndef CONFIG_DISABLE_CONSOLE_SUSPEND
+int console_suspend_enabled = 1;
+EXPORT_SYMBOL(console_suspend_enabled);
+
+static int __init console_suspend_disable(char *str)
+{
+ console_suspend_enabled = 0;
+ return 1;
+}
+__setup("no_console_suspend", console_suspend_disable);
+
/**
* suspend_console - suspend the console subsystem
*
@@ -759,6 +879,8 @@ int update_console_cmdline(char *name, int idx, char *name_new, int idx_new, cha
*/
void suspend_console(void)
{
+ if (!console_suspend_enabled)
+ return;
printk("Suspending console(s)\n");
acquire_console_sem();
console_suspended = 1;
@@ -766,10 +888,11 @@ void suspend_console(void)
void resume_console(void)
{
+ if (!console_suspend_enabled)
+ return;
console_suspended = 0;
release_console_sem();
}
-#endif /* CONFIG_DISABLE_CONSOLE_SUSPEND */
/**
* acquire_console_sem - lock the console system for exclusive use.
diff --git a/kernel/profile.c b/kernel/profile.c
index cb1e37d2dac3..631b75c25d7e 100644
--- a/kernel/profile.c
+++ b/kernel/profile.c
@@ -37,7 +37,7 @@ struct profile_hit {
#define NR_PROFILE_GRP (NR_PROFILE_HIT/PROFILE_GRPSZ)
/* Oprofile timer tick hook */
-int (*timer_hook)(struct pt_regs *) __read_mostly;
+static int (*timer_hook)(struct pt_regs *) __read_mostly;
static atomic_t *prof_buffer;
static unsigned long prof_len, prof_shift;
@@ -346,7 +346,7 @@ static int __devinit profile_cpu_callback(struct notifier_block *info,
per_cpu(cpu_profile_flip, cpu) = 0;
if (!per_cpu(cpu_profile_hits, cpu)[1]) {
page = alloc_pages_node(node,
- GFP_KERNEL | __GFP_ZERO | GFP_THISNODE,
+ GFP_KERNEL | __GFP_ZERO,
0);
if (!page)
return NOTIFY_BAD;
@@ -354,7 +354,7 @@ static int __devinit profile_cpu_callback(struct notifier_block *info,
}
if (!per_cpu(cpu_profile_hits, cpu)[0]) {
page = alloc_pages_node(node,
- GFP_KERNEL | __GFP_ZERO | GFP_THISNODE,
+ GFP_KERNEL | __GFP_ZERO,
0);
if (!page)
goto out_free;
diff --git a/kernel/ptrace.c b/kernel/ptrace.c
index 3eca7a55f2ee..7c76f2ffaeaa 100644
--- a/kernel/ptrace.c
+++ b/kernel/ptrace.c
@@ -19,6 +19,7 @@
#include <linux/security.h>
#include <linux/signal.h>
#include <linux/audit.h>
+#include <linux/pid_namespace.h>
#include <asm/pgtable.h>
#include <asm/uaccess.h>
@@ -168,7 +169,7 @@ int ptrace_attach(struct task_struct *task)
retval = -EPERM;
if (task->pid <= 1)
goto out;
- if (task->tgid == current->tgid)
+ if (same_thread_group(task, current))
goto out;
repeat:
@@ -386,6 +387,9 @@ int ptrace_request(struct task_struct *child, long request,
case PTRACE_SETSIGINFO:
ret = ptrace_setsiginfo(child, (siginfo_t __user *) data);
break;
+ case PTRACE_DETACH: /* detach a process that was attached. */
+ ret = ptrace_detach(child, data);
+ break;
default:
break;
}
@@ -440,7 +444,7 @@ struct task_struct *ptrace_get_task_struct(pid_t pid)
return ERR_PTR(-EPERM);
read_lock(&tasklist_lock);
- child = find_task_by_pid(pid);
+ child = find_task_by_vpid(pid);
if (child)
get_task_struct(child);
@@ -450,6 +454,10 @@ struct task_struct *ptrace_get_task_struct(pid_t pid)
return child;
}
+#ifndef arch_ptrace_attach
+#define arch_ptrace_attach(child) do { } while (0)
+#endif
+
#ifndef __ARCH_SYS_PTRACE
asmlinkage long sys_ptrace(long request, long pid, long addr, long data)
{
@@ -473,6 +481,12 @@ asmlinkage long sys_ptrace(long request, long pid, long addr, long data)
if (request == PTRACE_ATTACH) {
ret = ptrace_attach(child);
+ /*
+ * Some architectures need to do book-keeping after
+ * a ptrace attach.
+ */
+ if (!ret)
+ arch_ptrace_attach(child);
goto out_put_task_struct;
}
diff --git a/kernel/rcupdate.c b/kernel/rcupdate.c
index 2c2dd8410dc4..a66d4d1615f7 100644
--- a/kernel/rcupdate.c
+++ b/kernel/rcupdate.c
@@ -45,10 +45,17 @@
#include <linux/moduleparam.h>
#include <linux/percpu.h>
#include <linux/notifier.h>
-#include <linux/rcupdate.h>
#include <linux/cpu.h>
#include <linux/mutex.h>
+#ifdef CONFIG_DEBUG_LOCK_ALLOC
+static struct lock_class_key rcu_lock_key;
+struct lockdep_map rcu_lock_map =
+ STATIC_LOCKDEP_MAP_INIT("rcu_read_lock", &rcu_lock_key);
+
+EXPORT_SYMBOL_GPL(rcu_lock_map);
+#endif
+
/* Definition for rcupdate control block. */
static struct rcu_ctrlblk rcu_ctrlblk = {
.cur = -300,
diff --git a/kernel/rcutorture.c b/kernel/rcutorture.c
index ddff33247785..c3e165c2318f 100644
--- a/kernel/rcutorture.c
+++ b/kernel/rcutorture.c
@@ -35,14 +35,12 @@
#include <linux/sched.h>
#include <asm/atomic.h>
#include <linux/bitops.h>
-#include <linux/module.h>
#include <linux/completion.h>
#include <linux/moduleparam.h>
#include <linux/percpu.h>
#include <linux/notifier.h>
#include <linux/freezer.h>
#include <linux/cpu.h>
-#include <linux/random.h>
#include <linux/delay.h>
#include <linux/byteorder/swabb.h>
#include <linux/stat.h>
@@ -166,16 +164,14 @@ struct rcu_random_state {
/*
* Crude but fast random-number generator. Uses a linear congruential
- * generator, with occasional help from get_random_bytes().
+ * generator, with occasional help from cpu_clock().
*/
static unsigned long
rcu_random(struct rcu_random_state *rrsp)
{
- long refresh;
-
if (--rrsp->rrs_count < 0) {
- get_random_bytes(&refresh, sizeof(refresh));
- rrsp->rrs_state += refresh;
+ rrsp->rrs_state +=
+ (unsigned long)cpu_clock(raw_smp_processor_id());
rrsp->rrs_count = RCU_RANDOM_REFRESH;
}
rrsp->rrs_state = rrsp->rrs_state * RCU_RANDOM_MULT + RCU_RANDOM_ADD;
diff --git a/kernel/relay.c b/kernel/relay.c
index ad855017bc59..61134eb7a0c8 100644
--- a/kernel/relay.c
+++ b/kernel/relay.c
@@ -370,7 +370,7 @@ void relay_reset(struct rchan *chan)
if (!chan)
return;
- if (chan->is_global && chan->buf[0]) {
+ if (chan->is_global && chan->buf[0]) {
__relay_reset(chan->buf[0], 0);
return;
}
@@ -850,13 +850,13 @@ static int relay_file_read_avail(struct rchan_buf *buf, size_t read_pos)
buf->subbufs_consumed = consumed;
buf->bytes_consumed = 0;
}
-
+
produced = (produced % n_subbufs) * subbuf_size + buf->offset;
consumed = (consumed % n_subbufs) * subbuf_size + buf->bytes_consumed;
if (consumed > produced)
produced += n_subbufs * subbuf_size;
-
+
if (consumed == produced)
return 0;
diff --git a/kernel/resource.c b/kernel/resource.c
index 9bd14fd3e6de..a358142ff48f 100644
--- a/kernel/resource.c
+++ b/kernel/resource.c
@@ -234,7 +234,7 @@ EXPORT_SYMBOL(release_resource);
* the caller must specify res->start, res->end, res->flags.
* If found, returns 0, res is overwritten, if not found, returns -1.
*/
-int find_next_system_ram(struct resource *res)
+static int find_next_system_ram(struct resource *res)
{
resource_size_t start, end;
struct resource *p;
@@ -267,6 +267,30 @@ int find_next_system_ram(struct resource *res)
res->end = p->end;
return 0;
}
+int
+walk_memory_resource(unsigned long start_pfn, unsigned long nr_pages, void *arg,
+ int (*func)(unsigned long, unsigned long, void *))
+{
+ struct resource res;
+ unsigned long pfn, len;
+ u64 orig_end;
+ int ret = -1;
+ res.start = (u64) start_pfn << PAGE_SHIFT;
+ res.end = ((u64)(start_pfn + nr_pages) << PAGE_SHIFT) - 1;
+ res.flags = IORESOURCE_MEM;
+ orig_end = res.end;
+ while ((res.start < res.end) && (find_next_system_ram(&res) >= 0)) {
+ pfn = (unsigned long)(res.start >> PAGE_SHIFT);
+ len = (unsigned long)((res.end + 1 - res.start) >> PAGE_SHIFT);
+ ret = (*func)(pfn, len, arg);
+ if (ret)
+ break;
+ res.start = res.end + 1;
+ res.end = orig_end;
+ }
+ return ret;
+}
+
#endif
/*
diff --git a/kernel/rtmutex-debug.c b/kernel/rtmutex-debug.c
index 5aedbee014df..56d73cb8826d 100644
--- a/kernel/rtmutex-debug.c
+++ b/kernel/rtmutex-debug.c
@@ -82,17 +82,12 @@ do { \
* into the tracing code when doing error printk or
* executing a BUG():
*/
-int rt_trace_on = 1;
-
-void deadlock_trace_off(void)
-{
- rt_trace_on = 0;
-}
+static int rt_trace_on = 1;
static void printk_task(struct task_struct *p)
{
if (p)
- printk("%16s:%5d [%p, %3d]", p->comm, p->pid, p, p->prio);
+ printk("%16s:%5d [%p, %3d]", p->comm, task_pid_nr(p), p, p->prio);
else
printk("<none>");
}
@@ -157,22 +152,25 @@ void debug_rt_mutex_print_deadlock(struct rt_mutex_waiter *waiter)
printk( "[ BUG: circular locking deadlock detected! ]\n");
printk( "--------------------------------------------\n");
printk("%s/%d is deadlocking current task %s/%d\n\n",
- task->comm, task->pid, current->comm, current->pid);
+ task->comm, task_pid_nr(task),
+ current->comm, task_pid_nr(current));
printk("\n1) %s/%d is trying to acquire this lock:\n",
- current->comm, current->pid);
+ current->comm, task_pid_nr(current));
printk_lock(waiter->lock, 1);
- printk("\n2) %s/%d is blocked on this lock:\n", task->comm, task->pid);
+ printk("\n2) %s/%d is blocked on this lock:\n",
+ task->comm, task_pid_nr(task));
printk_lock(waiter->deadlock_lock, 1);
debug_show_held_locks(current);
debug_show_held_locks(task);
- printk("\n%s/%d's [blocked] stackdump:\n\n", task->comm, task->pid);
+ printk("\n%s/%d's [blocked] stackdump:\n\n",
+ task->comm, task_pid_nr(task));
show_stack(task, NULL);
printk("\n%s/%d's [current] stackdump:\n\n",
- current->comm, current->pid);
+ current->comm, task_pid_nr(current));
dump_stack();
debug_show_all_locks();
diff --git a/kernel/rtmutex.c b/kernel/rtmutex.c
index 8cd9bd2cdb34..0deef71ff8d2 100644
--- a/kernel/rtmutex.c
+++ b/kernel/rtmutex.c
@@ -185,7 +185,7 @@ static int rt_mutex_adjust_prio_chain(struct task_struct *task,
prev_max = max_lock_depth;
printk(KERN_WARNING "Maximum lock depth %d reached "
"task: %s (%d)\n", max_lock_depth,
- top_task->comm, top_task->pid);
+ top_task->comm, task_pid_nr(top_task));
}
put_task_struct(task);
diff --git a/kernel/sched.c b/kernel/sched.c
index 6107a0cd6325..afe76ec2e7fe 100644
--- a/kernel/sched.c
+++ b/kernel/sched.c
@@ -44,6 +44,7 @@
#include <linux/vmalloc.h>
#include <linux/blkdev.h>
#include <linux/delay.h>
+#include <linux/pid_namespace.h>
#include <linux/smp.h>
#include <linux/threads.h>
#include <linux/timer.h>
@@ -51,6 +52,7 @@
#include <linux/cpu.h>
#include <linux/cpuset.h>
#include <linux/percpu.h>
+#include <linux/cpu_acct.h>
#include <linux/kthread.h>
#include <linux/seq_file.h>
#include <linux/sysctl.h>
@@ -61,6 +63,7 @@
#include <linux/delayacct.h>
#include <linux/reciprocal_div.h>
#include <linux/unistd.h>
+#include <linux/pagemap.h>
#include <asm/tlb.h>
@@ -95,7 +98,7 @@ unsigned long long __attribute__((weak)) sched_clock(void)
/*
* Some helpers for converting nanosecond timing to jiffy resolution
*/
-#define NS_TO_JIFFIES(TIME) ((TIME) / (1000000000 / HZ))
+#define NS_TO_JIFFIES(TIME) ((unsigned long)(TIME) / (1000000000 / HZ))
#define JIFFIES_TO_NS(TIME) ((TIME) * (1000000000 / HZ))
#define NICE_0_LOAD SCHED_LOAD_SCALE
@@ -104,11 +107,9 @@ unsigned long long __attribute__((weak)) sched_clock(void)
/*
* These are the 'tuning knobs' of the scheduler:
*
- * Minimum timeslice is 5 msecs (or 1 jiffy, whichever is larger),
- * default timeslice is 100 msecs, maximum timeslice is 800 msecs.
+ * default timeslice is 100 msecs (used only for SCHED_RR tasks).
* Timeslices get refilled after they expire.
*/
-#define MIN_TIMESLICE max(5 * HZ / 1000, 1)
#define DEF_TIMESLICE (100 * HZ / 1000)
#ifdef CONFIG_SMP
@@ -132,24 +133,6 @@ static inline void sg_inc_cpu_power(struct sched_group *sg, u32 val)
}
#endif
-#define SCALE_PRIO(x, prio) \
- max(x * (MAX_PRIO - prio) / (MAX_USER_PRIO / 2), MIN_TIMESLICE)
-
-/*
- * static_prio_timeslice() scales user-nice values [ -20 ... 0 ... 19 ]
- * to time slice values: [800ms ... 100ms ... 5ms]
- */
-static unsigned int static_prio_timeslice(int static_prio)
-{
- if (static_prio == NICE_TO_PRIO(19))
- return 1;
-
- if (static_prio < NICE_TO_PRIO(0))
- return SCALE_PRIO(DEF_TIMESLICE * 4, static_prio);
- else
- return SCALE_PRIO(DEF_TIMESLICE, static_prio);
-}
-
static inline int rt_policy(int policy)
{
if (unlikely(policy == SCHED_FIFO) || unlikely(policy == SCHED_RR))
@@ -170,31 +153,99 @@ struct rt_prio_array {
struct list_head queue[MAX_RT_PRIO];
};
-struct load_stat {
- struct load_weight load;
- u64 load_update_start, load_update_last;
- unsigned long delta_fair, delta_exec, delta_stat;
+#ifdef CONFIG_FAIR_GROUP_SCHED
+
+#include <linux/cgroup.h>
+
+struct cfs_rq;
+
+/* task group related information */
+struct task_group {
+#ifdef CONFIG_FAIR_CGROUP_SCHED
+ struct cgroup_subsys_state css;
+#endif
+ /* schedulable entities of this group on each cpu */
+ struct sched_entity **se;
+ /* runqueue "owned" by this group on each cpu */
+ struct cfs_rq **cfs_rq;
+ unsigned long shares;
+ /* spinlock to serialize modification to shares */
+ spinlock_t lock;
+};
+
+/* Default task group's sched entity on each cpu */
+static DEFINE_PER_CPU(struct sched_entity, init_sched_entity);
+/* Default task group's cfs_rq on each cpu */
+static DEFINE_PER_CPU(struct cfs_rq, init_cfs_rq) ____cacheline_aligned_in_smp;
+
+static struct sched_entity *init_sched_entity_p[NR_CPUS];
+static struct cfs_rq *init_cfs_rq_p[NR_CPUS];
+
+/* Default task group.
+ * Every task in system belong to this group at bootup.
+ */
+struct task_group init_task_group = {
+ .se = init_sched_entity_p,
+ .cfs_rq = init_cfs_rq_p,
};
+#ifdef CONFIG_FAIR_USER_SCHED
+# define INIT_TASK_GRP_LOAD 2*NICE_0_LOAD
+#else
+# define INIT_TASK_GRP_LOAD NICE_0_LOAD
+#endif
+
+static int init_task_group_load = INIT_TASK_GRP_LOAD;
+
+/* return group to which a task belongs */
+static inline struct task_group *task_group(struct task_struct *p)
+{
+ struct task_group *tg;
+
+#ifdef CONFIG_FAIR_USER_SCHED
+ tg = p->user->tg;
+#elif defined(CONFIG_FAIR_CGROUP_SCHED)
+ tg = container_of(task_subsys_state(p, cpu_cgroup_subsys_id),
+ struct task_group, css);
+#else
+ tg = &init_task_group;
+#endif
+
+ return tg;
+}
+
+/* Change a task's cfs_rq and parent entity if it moves across CPUs/groups */
+static inline void set_task_cfs_rq(struct task_struct *p)
+{
+ p->se.cfs_rq = task_group(p)->cfs_rq[task_cpu(p)];
+ p->se.parent = task_group(p)->se[task_cpu(p)];
+}
+
+#else
+
+static inline void set_task_cfs_rq(struct task_struct *p) { }
+
+#endif /* CONFIG_FAIR_GROUP_SCHED */
+
/* CFS-related fields in a runqueue */
struct cfs_rq {
struct load_weight load;
unsigned long nr_running;
- s64 fair_clock;
u64 exec_clock;
- s64 wait_runtime;
- u64 sleeper_bonus;
- unsigned long wait_runtime_overruns, wait_runtime_underruns;
+ u64 min_vruntime;
struct rb_root tasks_timeline;
struct rb_node *rb_leftmost;
struct rb_node *rb_load_balance_curr;
-#ifdef CONFIG_FAIR_GROUP_SCHED
/* 'curr' points to currently running entity on this cfs_rq.
* It is set to NULL otherwise (i.e when none are currently running).
*/
struct sched_entity *curr;
+
+ unsigned long nr_spread_over;
+
+#ifdef CONFIG_FAIR_GROUP_SCHED
struct rq *rq; /* cpu runqueue to which this cfs_rq is attached */
/* leaf cfs_rqs are those that hold tasks (lowest schedulable entity in
@@ -205,6 +256,8 @@ struct cfs_rq {
* list is used during load balance.
*/
struct list_head leaf_cfs_rq_list; /* Better name : task_cfs_rq_list? */
+ struct task_group *tg; /* group that "owns" this runqueue */
+ struct rcu_head rcu;
#endif
};
@@ -223,7 +276,8 @@ struct rt_rq {
* acquire operations must be ordered by ascending &runqueue.
*/
struct rq {
- spinlock_t lock; /* runqueue lock */
+ /* runqueue lock: */
+ spinlock_t lock;
/*
* nr_running and cpu_load should be in the same cacheline because
@@ -236,13 +290,15 @@ struct rq {
#ifdef CONFIG_NO_HZ
unsigned char in_nohz_recently;
#endif
- struct load_stat ls; /* capture load from *all* tasks on this cpu */
+ /* capture load from *all* tasks on this cpu: */
+ struct load_weight load;
unsigned long nr_load_updates;
u64 nr_switches;
struct cfs_rq cfs;
#ifdef CONFIG_FAIR_GROUP_SCHED
- struct list_head leaf_cfs_rq_list; /* list of leaf cfs_rq on this cpu */
+ /* list of leaf cfs_rq on this cpu: */
+ struct list_head leaf_cfs_rq_list;
#endif
struct rt_rq rt;
@@ -274,7 +330,8 @@ struct rq {
/* For active balancing */
int active_balance;
int push_cpu;
- int cpu; /* cpu of this runqueue */
+ /* cpu of this runqueue: */
+ int cpu;
struct task_struct *migration_thread;
struct list_head migration_queue;
@@ -285,19 +342,22 @@ struct rq {
struct sched_info rq_sched_info;
/* sys_sched_yield() stats */
- unsigned long yld_exp_empty;
- unsigned long yld_act_empty;
- unsigned long yld_both_empty;
- unsigned long yld_cnt;
+ unsigned int yld_exp_empty;
+ unsigned int yld_act_empty;
+ unsigned int yld_both_empty;
+ unsigned int yld_count;
/* schedule() stats */
- unsigned long sched_switch;
- unsigned long sched_cnt;
- unsigned long sched_goidle;
+ unsigned int sched_switch;
+ unsigned int sched_count;
+ unsigned int sched_goidle;
/* try_to_wake_up() stats */
- unsigned long ttwu_cnt;
- unsigned long ttwu_local;
+ unsigned int ttwu_count;
+ unsigned int ttwu_local;
+
+ /* BKL stats */
+ unsigned int bkl_count;
#endif
struct lock_class_key rq_lock_key;
};
@@ -382,6 +442,37 @@ static void update_rq_clock(struct rq *rq)
#define cpu_curr(cpu) (cpu_rq(cpu)->curr)
/*
+ * Tunables that become constants when CONFIG_SCHED_DEBUG is off:
+ */
+#ifdef CONFIG_SCHED_DEBUG
+# define const_debug __read_mostly
+#else
+# define const_debug static const
+#endif
+
+/*
+ * Debugging: various feature bits
+ */
+enum {
+ SCHED_FEAT_NEW_FAIR_SLEEPERS = 1,
+ SCHED_FEAT_START_DEBIT = 2,
+ SCHED_FEAT_TREE_AVG = 4,
+ SCHED_FEAT_APPROX_AVG = 8,
+ SCHED_FEAT_WAKEUP_PREEMPT = 16,
+ SCHED_FEAT_PREEMPT_RESTRICT = 32,
+};
+
+const_debug unsigned int sysctl_sched_features =
+ SCHED_FEAT_NEW_FAIR_SLEEPERS * 1 |
+ SCHED_FEAT_START_DEBIT * 1 |
+ SCHED_FEAT_TREE_AVG * 0 |
+ SCHED_FEAT_APPROX_AVG * 0 |
+ SCHED_FEAT_WAKEUP_PREEMPT * 1 |
+ SCHED_FEAT_PREEMPT_RESTRICT * 1;
+
+#define sched_feat(x) (sysctl_sched_features & SCHED_FEAT_##x)
+
+/*
* For kernel-internal use: high-speed (but slightly incorrect) per-cpu
* clock constructed from sched_clock():
*/
@@ -399,18 +490,7 @@ unsigned long long cpu_clock(int cpu)
return now;
}
-
-#ifdef CONFIG_FAIR_GROUP_SCHED
-/* Change a task's ->cfs_rq if it moves across CPUs */
-static inline void set_task_cfs_rq(struct task_struct *p)
-{
- p->se.cfs_rq = &task_rq(p)->cfs;
-}
-#else
-static inline void set_task_cfs_rq(struct task_struct *p)
-{
-}
-#endif
+EXPORT_SYMBOL_GPL(cpu_clock);
#ifndef prepare_arch_switch
# define prepare_arch_switch(next) do { } while (0)
@@ -496,16 +576,13 @@ static inline void finish_lock_switch(struct rq *rq, struct task_struct *prev)
static inline struct rq *__task_rq_lock(struct task_struct *p)
__acquires(rq->lock)
{
- struct rq *rq;
-
-repeat_lock_task:
- rq = task_rq(p);
- spin_lock(&rq->lock);
- if (unlikely(rq != task_rq(p))) {
+ for (;;) {
+ struct rq *rq = task_rq(p);
+ spin_lock(&rq->lock);
+ if (likely(rq == task_rq(p)))
+ return rq;
spin_unlock(&rq->lock);
- goto repeat_lock_task;
}
- return rq;
}
/*
@@ -518,18 +595,17 @@ static struct rq *task_rq_lock(struct task_struct *p, unsigned long *flags)
{
struct rq *rq;
-repeat_lock_task:
- local_irq_save(*flags);
- rq = task_rq(p);
- spin_lock(&rq->lock);
- if (unlikely(rq != task_rq(p))) {
+ for (;;) {
+ local_irq_save(*flags);
+ rq = task_rq(p);
+ spin_lock(&rq->lock);
+ if (likely(rq == task_rq(p)))
+ return rq;
spin_unlock_irqrestore(&rq->lock, *flags);
- goto repeat_lock_task;
}
- return rq;
}
-static inline void __task_rq_unlock(struct rq *rq)
+static void __task_rq_unlock(struct rq *rq)
__releases(rq->lock)
{
spin_unlock(&rq->lock);
@@ -544,7 +620,7 @@ static inline void task_rq_unlock(struct rq *rq, unsigned long *flags)
/*
* this_rq_lock - lock this runqueue and disable interrupts.
*/
-static inline struct rq *this_rq_lock(void)
+static struct rq *this_rq_lock(void)
__acquires(rq->lock)
{
struct rq *rq;
@@ -644,19 +720,6 @@ static inline void resched_task(struct task_struct *p)
}
#endif
-static u64 div64_likely32(u64 divident, unsigned long divisor)
-{
-#if BITS_PER_LONG == 32
- if (likely(divident <= 0xffffffffULL))
- return (u32)divident / divisor;
- do_div(divident, divisor);
-
- return divident;
-#else
- return divident / divisor;
-#endif
-}
-
#if BITS_PER_LONG == 32
# define WMULT_CONST (~0UL)
#else
@@ -698,16 +761,14 @@ calc_delta_fair(unsigned long delta_exec, struct load_weight *lw)
return calc_delta_mine(delta_exec, NICE_0_LOAD, lw);
}
-static void update_load_add(struct load_weight *lw, unsigned long inc)
+static inline void update_load_add(struct load_weight *lw, unsigned long inc)
{
lw->weight += inc;
- lw->inv_weight = 0;
}
-static void update_load_sub(struct load_weight *lw, unsigned long dec)
+static inline void update_load_sub(struct load_weight *lw, unsigned long dec)
{
lw->weight -= dec;
- lw->inv_weight = 0;
}
/*
@@ -783,29 +844,20 @@ static int balance_tasks(struct rq *this_rq, int this_cpu, struct rq *busiest,
int *this_best_prio, struct rq_iterator *iterator);
#include "sched_stats.h"
-#include "sched_rt.c"
-#include "sched_fair.c"
#include "sched_idletask.c"
+#include "sched_fair.c"
+#include "sched_rt.c"
#ifdef CONFIG_SCHED_DEBUG
# include "sched_debug.c"
#endif
#define sched_class_highest (&rt_sched_class)
-static void __update_curr_load(struct rq *rq, struct load_stat *ls)
-{
- if (rq->curr != rq->idle && ls->load.weight) {
- ls->delta_exec += ls->delta_stat;
- ls->delta_fair += calc_delta_fair(ls->delta_stat, &ls->load);
- ls->delta_stat = 0;
- }
-}
-
/*
* Update delta_exec, delta_fair fields for rq.
*
* delta_fair clock advances at a rate inversely proportional to
- * total load (rq->ls.load.weight) on the runqueue, while
+ * total load (rq->load.weight) on the runqueue, while
* delta_exec advances at the same rate as wall-clock (provided
* cpu is not idle).
*
@@ -813,35 +865,17 @@ static void __update_curr_load(struct rq *rq, struct load_stat *ls)
* runqueue over any given interval. This (smoothened) load is used
* during load balance.
*
- * This function is called /before/ updating rq->ls.load
+ * This function is called /before/ updating rq->load
* and when switching tasks.
*/
-static void update_curr_load(struct rq *rq)
-{
- struct load_stat *ls = &rq->ls;
- u64 start;
-
- start = ls->load_update_start;
- ls->load_update_start = rq->clock;
- ls->delta_stat += rq->clock - start;
- /*
- * Stagger updates to ls->delta_fair. Very frequent updates
- * can be expensive.
- */
- if (ls->delta_stat >= sysctl_sched_stat_granularity)
- __update_curr_load(rq, ls);
-}
-
static inline void inc_load(struct rq *rq, const struct task_struct *p)
{
- update_curr_load(rq);
- update_load_add(&rq->ls.load, p->se.load.weight);
+ update_load_add(&rq->load, p->se.load.weight);
}
static inline void dec_load(struct rq *rq, const struct task_struct *p)
{
- update_curr_load(rq);
- update_load_sub(&rq->ls.load, p->se.load.weight);
+ update_load_sub(&rq->load, p->se.load.weight);
}
static void inc_nr_running(struct task_struct *p, struct rq *rq)
@@ -858,8 +892,6 @@ static void dec_nr_running(struct task_struct *p, struct rq *rq)
static void set_load_weight(struct task_struct *p)
{
- p->se.wait_runtime = 0;
-
if (task_has_rt_policy(p)) {
p->se.load.weight = prio_to_weight[0] * 2;
p->se.load.inv_weight = prio_to_wmult[0] >> 1;
@@ -951,20 +983,6 @@ static void activate_task(struct rq *rq, struct task_struct *p, int wakeup)
}
/*
- * activate_idle_task - move idle task to the _front_ of runqueue.
- */
-static inline void activate_idle_task(struct task_struct *p, struct rq *rq)
-{
- update_rq_clock(rq);
-
- if (p->state == TASK_UNINTERRUPTIBLE)
- rq->nr_uninterruptible--;
-
- enqueue_task(rq, p, 0);
- inc_nr_running(p, rq);
-}
-
-/*
* deactivate_task - remove a task from the runqueue.
*/
static void deactivate_task(struct rq *rq, struct task_struct *p, int sleep)
@@ -988,32 +1006,50 @@ inline int task_curr(const struct task_struct *p)
/* Used instead of source_load when we know the type == 0 */
unsigned long weighted_cpuload(const int cpu)
{
- return cpu_rq(cpu)->ls.load.weight;
+ return cpu_rq(cpu)->load.weight;
}
static inline void __set_task_cpu(struct task_struct *p, unsigned int cpu)
{
#ifdef CONFIG_SMP
task_thread_info(p)->cpu = cpu;
- set_task_cfs_rq(p);
#endif
+ set_task_cfs_rq(p);
}
#ifdef CONFIG_SMP
+/*
+ * Is this task likely cache-hot:
+ */
+static inline int
+task_hot(struct task_struct *p, u64 now, struct sched_domain *sd)
+{
+ s64 delta;
+
+ if (p->sched_class != &fair_sched_class)
+ return 0;
+
+ if (sysctl_sched_migration_cost == -1)
+ return 1;
+ if (sysctl_sched_migration_cost == 0)
+ return 0;
+
+ delta = now - p->se.exec_start;
+
+ return delta < (s64)sysctl_sched_migration_cost;
+}
+
+
void set_task_cpu(struct task_struct *p, unsigned int new_cpu)
{
int old_cpu = task_cpu(p);
struct rq *old_rq = cpu_rq(old_cpu), *new_rq = cpu_rq(new_cpu);
- u64 clock_offset, fair_clock_offset;
+ struct cfs_rq *old_cfsrq = task_cfs_rq(p),
+ *new_cfsrq = cpu_cfs_rq(old_cfsrq, new_cpu);
+ u64 clock_offset;
clock_offset = old_rq->clock - new_rq->clock;
- fair_clock_offset = old_rq->cfs.fair_clock - new_rq->cfs.fair_clock;
-
- if (p->se.wait_start_fair)
- p->se.wait_start_fair -= fair_clock_offset;
- if (p->se.sleep_start_fair)
- p->se.sleep_start_fair -= fair_clock_offset;
#ifdef CONFIG_SCHEDSTATS
if (p->se.wait_start)
@@ -1022,7 +1058,14 @@ void set_task_cpu(struct task_struct *p, unsigned int new_cpu)
p->se.sleep_start -= clock_offset;
if (p->se.block_start)
p->se.block_start -= clock_offset;
+ if (old_cpu != new_cpu) {
+ schedstat_inc(p, se.nr_migrations);
+ if (task_hot(p, old_rq->clock, NULL))
+ schedstat_inc(p, se.nr_forced2_migrations);
+ }
#endif
+ p->se.vruntime -= old_cfsrq->min_vruntime -
+ new_cfsrq->min_vruntime;
__set_task_cpu(p, new_cpu);
}
@@ -1077,69 +1120,71 @@ void wait_task_inactive(struct task_struct *p)
int running, on_rq;
struct rq *rq;
-repeat:
- /*
- * We do the initial early heuristics without holding
- * any task-queue locks at all. We'll only try to get
- * the runqueue lock when things look like they will
- * work out!
- */
- rq = task_rq(p);
+ for (;;) {
+ /*
+ * We do the initial early heuristics without holding
+ * any task-queue locks at all. We'll only try to get
+ * the runqueue lock when things look like they will
+ * work out!
+ */
+ rq = task_rq(p);
- /*
- * If the task is actively running on another CPU
- * still, just relax and busy-wait without holding
- * any locks.
- *
- * NOTE! Since we don't hold any locks, it's not
- * even sure that "rq" stays as the right runqueue!
- * But we don't care, since "task_running()" will
- * return false if the runqueue has changed and p
- * is actually now running somewhere else!
- */
- while (task_running(rq, p))
- cpu_relax();
+ /*
+ * If the task is actively running on another CPU
+ * still, just relax and busy-wait without holding
+ * any locks.
+ *
+ * NOTE! Since we don't hold any locks, it's not
+ * even sure that "rq" stays as the right runqueue!
+ * But we don't care, since "task_running()" will
+ * return false if the runqueue has changed and p
+ * is actually now running somewhere else!
+ */
+ while (task_running(rq, p))
+ cpu_relax();
- /*
- * Ok, time to look more closely! We need the rq
- * lock now, to be *sure*. If we're wrong, we'll
- * just go back and repeat.
- */
- rq = task_rq_lock(p, &flags);
- running = task_running(rq, p);
- on_rq = p->se.on_rq;
- task_rq_unlock(rq, &flags);
+ /*
+ * Ok, time to look more closely! We need the rq
+ * lock now, to be *sure*. If we're wrong, we'll
+ * just go back and repeat.
+ */
+ rq = task_rq_lock(p, &flags);
+ running = task_running(rq, p);
+ on_rq = p->se.on_rq;
+ task_rq_unlock(rq, &flags);
- /*
- * Was it really running after all now that we
- * checked with the proper locks actually held?
- *
- * Oops. Go back and try again..
- */
- if (unlikely(running)) {
- cpu_relax();
- goto repeat;
- }
+ /*
+ * Was it really running after all now that we
+ * checked with the proper locks actually held?
+ *
+ * Oops. Go back and try again..
+ */
+ if (unlikely(running)) {
+ cpu_relax();
+ continue;
+ }
- /*
- * It's not enough that it's not actively running,
- * it must be off the runqueue _entirely_, and not
- * preempted!
- *
- * So if it wa still runnable (but just not actively
- * running right now), it's preempted, and we should
- * yield - it could be a while.
- */
- if (unlikely(on_rq)) {
- yield();
- goto repeat;
- }
+ /*
+ * It's not enough that it's not actively running,
+ * it must be off the runqueue _entirely_, and not
+ * preempted!
+ *
+ * So if it wa still runnable (but just not actively
+ * running right now), it's preempted, and we should
+ * yield - it could be a while.
+ */
+ if (unlikely(on_rq)) {
+ schedule_timeout_uninterruptible(1);
+ continue;
+ }
- /*
- * Ahh, all good. It wasn't running, and it wasn't
- * runnable, which means that it will never become
- * running in the future either. We're all done!
- */
+ /*
+ * Ahh, all good. It wasn't running, and it wasn't
+ * runnable, which means that it will never become
+ * running in the future either. We're all done!
+ */
+ break;
+ }
}
/***
@@ -1173,7 +1218,7 @@ void kick_process(struct task_struct *p)
* We want to under-estimate the load of migration sources, to
* balance conservatively.
*/
-static inline unsigned long source_load(int cpu, int type)
+static unsigned long source_load(int cpu, int type)
{
struct rq *rq = cpu_rq(cpu);
unsigned long total = weighted_cpuload(cpu);
@@ -1188,7 +1233,7 @@ static inline unsigned long source_load(int cpu, int type)
* Return a high guess at the load of a migration-target cpu weighted
* according to the scheduling class and "nice" value.
*/
-static inline unsigned long target_load(int cpu, int type)
+static unsigned long target_load(int cpu, int type)
{
struct rq *rq = cpu_rq(cpu);
unsigned long total = weighted_cpuload(cpu);
@@ -1230,7 +1275,7 @@ find_idlest_group(struct sched_domain *sd, struct task_struct *p, int this_cpu)
/* Skip over this group if it has no CPUs allowed */
if (!cpus_intersects(group->cpumask, p->cpus_allowed))
- goto nextgroup;
+ continue;
local_group = cpu_isset(this_cpu, group->cpumask);
@@ -1258,9 +1303,7 @@ find_idlest_group(struct sched_domain *sd, struct task_struct *p, int this_cpu)
min_load = avg_load;
idlest = group;
}
-nextgroup:
- group = group->next;
- } while (group != sd->groups);
+ } while (group = group->next, group != sd->groups);
if (!idlest || 100*this_load < imbalance*min_load)
return NULL;
@@ -1392,8 +1435,13 @@ static int wake_idle(int cpu, struct task_struct *p)
if (sd->flags & SD_WAKE_IDLE) {
cpus_and(tmp, sd->span, p->cpus_allowed);
for_each_cpu_mask(i, tmp) {
- if (idle_cpu(i))
+ if (idle_cpu(i)) {
+ if (i != task_cpu(p)) {
+ schedstat_inc(p,
+ se.nr_wakeups_idle);
+ }
return i;
+ }
}
} else {
break;
@@ -1424,7 +1472,7 @@ static inline int wake_idle(int cpu, struct task_struct *p)
*/
static int try_to_wake_up(struct task_struct *p, unsigned int state, int sync)
{
- int cpu, this_cpu, success = 0;
+ int cpu, orig_cpu, this_cpu, success = 0;
unsigned long flags;
long old_state;
struct rq *rq;
@@ -1443,6 +1491,7 @@ static int try_to_wake_up(struct task_struct *p, unsigned int state, int sync)
goto out_running;
cpu = task_cpu(p);
+ orig_cpu = cpu;
this_cpu = smp_processor_id();
#ifdef CONFIG_SMP
@@ -1451,7 +1500,7 @@ static int try_to_wake_up(struct task_struct *p, unsigned int state, int sync)
new_cpu = cpu;
- schedstat_inc(rq, ttwu_cnt);
+ schedstat_inc(rq, ttwu_count);
if (cpu == this_cpu) {
schedstat_inc(rq, ttwu_local);
goto out_set_cpu;
@@ -1486,6 +1535,13 @@ static int try_to_wake_up(struct task_struct *p, unsigned int state, int sync)
unsigned long tl = this_load;
unsigned long tl_per_task;
+ /*
+ * Attract cache-cold tasks on sync wakeups:
+ */
+ if (sync && !task_hot(p, rq->clock, this_sd))
+ goto out_set_cpu;
+
+ schedstat_inc(p, se.nr_wakeups_affine_attempts);
tl_per_task = cpu_avg_load_per_task(this_cpu);
/*
@@ -1505,6 +1561,7 @@ static int try_to_wake_up(struct task_struct *p, unsigned int state, int sync)
* there is no bad imbalance.
*/
schedstat_inc(this_sd, ttwu_move_affine);
+ schedstat_inc(p, se.nr_wakeups_affine);
goto out_set_cpu;
}
}
@@ -1516,6 +1573,7 @@ static int try_to_wake_up(struct task_struct *p, unsigned int state, int sync)
if (this_sd->flags & SD_WAKE_BALANCE) {
if (imbalance*this_load <= 100*load) {
schedstat_inc(this_sd, ttwu_move_balance);
+ schedstat_inc(p, se.nr_wakeups_passive);
goto out_set_cpu;
}
}
@@ -1541,18 +1599,18 @@ out_set_cpu:
out_activate:
#endif /* CONFIG_SMP */
+ schedstat_inc(p, se.nr_wakeups);
+ if (sync)
+ schedstat_inc(p, se.nr_wakeups_sync);
+ if (orig_cpu != cpu)
+ schedstat_inc(p, se.nr_wakeups_migrate);
+ if (cpu == this_cpu)
+ schedstat_inc(p, se.nr_wakeups_local);
+ else
+ schedstat_inc(p, se.nr_wakeups_remote);
update_rq_clock(rq);
activate_task(rq, p, 1);
- /*
- * Sync wakeups (i.e. those types of wakeups where the waker
- * has indicated that it will leave the CPU in short order)
- * don't trigger a preemption, if the woken up task will run on
- * this cpu. (in this case the 'I will reschedule' promise of
- * the waker guarantees that the freshly woken up task is going
- * to be considered on this CPU.)
- */
- if (!sync || cpu != this_cpu)
- check_preempt_curr(rq, p);
+ check_preempt_curr(rq, p);
success = 1;
out_running:
@@ -1583,28 +1641,20 @@ int fastcall wake_up_state(struct task_struct *p, unsigned int state)
*/
static void __sched_fork(struct task_struct *p)
{
- p->se.wait_start_fair = 0;
p->se.exec_start = 0;
p->se.sum_exec_runtime = 0;
p->se.prev_sum_exec_runtime = 0;
- p->se.delta_exec = 0;
- p->se.delta_fair_run = 0;
- p->se.delta_fair_sleep = 0;
- p->se.wait_runtime = 0;
- p->se.sleep_start_fair = 0;
#ifdef CONFIG_SCHEDSTATS
p->se.wait_start = 0;
- p->se.sum_wait_runtime = 0;
p->se.sum_sleep_runtime = 0;
p->se.sleep_start = 0;
p->se.block_start = 0;
p->se.sleep_max = 0;
p->se.block_max = 0;
p->se.exec_max = 0;
+ p->se.slice_max = 0;
p->se.wait_max = 0;
- p->se.wait_runtime_overruns = 0;
- p->se.wait_runtime_underruns = 0;
#endif
INIT_LIST_HEAD(&p->run_list);
@@ -1635,12 +1685,14 @@ void sched_fork(struct task_struct *p, int clone_flags)
#ifdef CONFIG_SMP
cpu = sched_balance_self(cpu, SD_BALANCE_FORK);
#endif
- __set_task_cpu(p, cpu);
+ set_task_cpu(p, cpu);
/*
* Make sure we do not leak PI boosting priority to the child:
*/
p->prio = current->normal_prio;
+ if (!rt_prio(p->prio))
+ p->sched_class = &fair_sched_class;
#if defined(CONFIG_SCHEDSTATS) || defined(CONFIG_TASK_DELAY_ACCT)
if (likely(sched_info_on()))
@@ -1657,12 +1709,6 @@ void sched_fork(struct task_struct *p, int clone_flags)
}
/*
- * After fork, child runs first. (default) If set to 0 then
- * parent will (try to) run first.
- */
-unsigned int __read_mostly sysctl_sched_child_runs_first = 1;
-
-/*
* wake_up_new_task - wake up a newly created task for the first time.
*
* This function will do some initial scheduler statistics housekeeping
@@ -1673,24 +1719,14 @@ void fastcall wake_up_new_task(struct task_struct *p, unsigned long clone_flags)
{
unsigned long flags;
struct rq *rq;
- int this_cpu;
rq = task_rq_lock(p, &flags);
BUG_ON(p->state != TASK_RUNNING);
- this_cpu = smp_processor_id(); /* parent's CPU */
update_rq_clock(rq);
p->prio = effective_prio(p);
- if (rt_prio(p->prio))
- p->sched_class = &rt_sched_class;
- else
- p->sched_class = &fair_sched_class;
-
- if (!p->sched_class->task_new || !sysctl_sched_child_runs_first ||
- (clone_flags & CLONE_VM) || task_cpu(p) != this_cpu ||
- !current->se.on_rq) {
-
+ if (!p->sched_class->task_new || !current->se.on_rq) {
activate_task(rq, p, 0);
} else {
/*
@@ -1799,7 +1835,7 @@ prepare_task_switch(struct rq *rq, struct task_struct *prev,
* with the lock held can cause deadlocks; see schedule() for
* details.)
*/
-static inline void finish_task_switch(struct rq *rq, struct task_struct *prev)
+static void finish_task_switch(struct rq *rq, struct task_struct *prev)
__releases(rq->lock)
{
struct mm_struct *mm = rq->prev_mm;
@@ -1849,7 +1885,7 @@ asmlinkage void schedule_tail(struct task_struct *prev)
preempt_enable();
#endif
if (current->set_child_tid)
- put_user(current->pid, current->set_child_tid);
+ put_user(task_pid_vnr(current), current->set_child_tid);
}
/*
@@ -1981,42 +2017,10 @@ unsigned long nr_active(void)
*/
static void update_cpu_load(struct rq *this_rq)
{
- u64 fair_delta64, exec_delta64, idle_delta64, sample_interval64, tmp64;
- unsigned long total_load = this_rq->ls.load.weight;
- unsigned long this_load = total_load;
- struct load_stat *ls = &this_rq->ls;
+ unsigned long this_load = this_rq->load.weight;
int i, scale;
this_rq->nr_load_updates++;
- if (unlikely(!(sysctl_sched_features & SCHED_FEAT_PRECISE_CPU_LOAD)))
- goto do_avg;
-
- /* Update delta_fair/delta_exec fields first */
- update_curr_load(this_rq);
-
- fair_delta64 = ls->delta_fair + 1;
- ls->delta_fair = 0;
-
- exec_delta64 = ls->delta_exec + 1;
- ls->delta_exec = 0;
-
- sample_interval64 = this_rq->clock - ls->load_update_last;
- ls->load_update_last = this_rq->clock;
-
- if ((s64)sample_interval64 < (s64)TICK_NSEC)
- sample_interval64 = TICK_NSEC;
-
- if (exec_delta64 > sample_interval64)
- exec_delta64 = sample_interval64;
-
- idle_delta64 = sample_interval64 - exec_delta64;
-
- tmp64 = div64_64(SCHED_LOAD_SCALE * exec_delta64, fair_delta64);
- tmp64 = div64_64(tmp64 * exec_delta64, sample_interval64);
-
- this_load = (unsigned long)tmp64;
-
-do_avg:
/* Update our load: */
for (i = 0, scale = 1; i < CPU_LOAD_IDX_MAX; i++, scale += scale) {
@@ -2026,7 +2030,13 @@ do_avg:
old_load = this_rq->cpu_load[i];
new_load = this_load;
-
+ /*
+ * Round up the averaging division if load is increasing. This
+ * prevents us from getting stuck on 9 if the load is 10, for
+ * example.
+ */
+ if (new_load > old_load)
+ new_load += scale-1;
this_rq->cpu_load[i] = (old_load*(scale-1) + new_load) >> i;
}
}
@@ -2178,13 +2188,38 @@ int can_migrate_task(struct task_struct *p, struct rq *rq, int this_cpu,
* 2) cannot be migrated to this CPU due to cpus_allowed, or
* 3) are cache-hot on their current CPU.
*/
- if (!cpu_isset(this_cpu, p->cpus_allowed))
+ if (!cpu_isset(this_cpu, p->cpus_allowed)) {
+ schedstat_inc(p, se.nr_failed_migrations_affine);
return 0;
+ }
*all_pinned = 0;
- if (task_running(rq, p))
+ if (task_running(rq, p)) {
+ schedstat_inc(p, se.nr_failed_migrations_running);
return 0;
+ }
+
+ /*
+ * Aggressive migration if:
+ * 1) task is cache cold, or
+ * 2) too many balance attempts have failed.
+ */
+
+ if (!task_hot(p, rq->clock, sd) ||
+ sd->nr_balance_failed > sd->cache_nice_tries) {
+#ifdef CONFIG_SCHEDSTATS
+ if (task_hot(p, rq->clock, sd)) {
+ schedstat_inc(sd, lb_hot_gained[idle]);
+ schedstat_inc(p, se.nr_forced_migrations);
+ }
+#endif
+ return 1;
+ }
+ if (task_hot(p, rq->clock, sd)) {
+ schedstat_inc(p, se.nr_failed_migrations_hot);
+ return 0;
+ }
return 1;
}
@@ -2263,7 +2298,7 @@ static int move_tasks(struct rq *this_rq, int this_cpu, struct rq *busiest,
struct sched_domain *sd, enum cpu_idle_type idle,
int *all_pinned)
{
- struct sched_class *class = sched_class_highest;
+ const struct sched_class *class = sched_class_highest;
unsigned long total_load_moved = 0;
int this_best_prio = this_rq->curr->prio;
@@ -2288,7 +2323,7 @@ static int move_tasks(struct rq *this_rq, int this_cpu, struct rq *busiest,
static int move_one_task(struct rq *this_rq, int this_cpu, struct rq *busiest,
struct sched_domain *sd, enum cpu_idle_type idle)
{
- struct sched_class *class;
+ const struct sched_class *class;
int this_best_prio = MAX_PRIO;
for (class = sched_class_highest; class; class = class->next)
@@ -2315,7 +2350,7 @@ find_busiest_group(struct sched_domain *sd, int this_cpu,
unsigned long max_pull;
unsigned long busiest_load_per_task, busiest_nr_running;
unsigned long this_load_per_task, this_nr_running;
- int load_idx;
+ int load_idx, group_imb = 0;
#if defined(CONFIG_SCHED_MC) || defined(CONFIG_SCHED_SMT)
int power_savings_balance = 1;
unsigned long leader_nr_running = 0, min_load_per_task = 0;
@@ -2334,9 +2369,10 @@ find_busiest_group(struct sched_domain *sd, int this_cpu,
load_idx = sd->idle_idx;
do {
- unsigned long load, group_capacity;
+ unsigned long load, group_capacity, max_cpu_load, min_cpu_load;
int local_group;
int i;
+ int __group_imb = 0;
unsigned int balance_cpu = -1, first_idle_cpu = 0;
unsigned long sum_nr_running, sum_weighted_load;
@@ -2347,6 +2383,8 @@ find_busiest_group(struct sched_domain *sd, int this_cpu,
/* Tally up the load of all CPUs in the group */
sum_weighted_load = sum_nr_running = avg_load = 0;
+ max_cpu_load = 0;
+ min_cpu_load = ~0UL;
for_each_cpu_mask(i, group->cpumask) {
struct rq *rq;
@@ -2367,8 +2405,13 @@ find_busiest_group(struct sched_domain *sd, int this_cpu,
}
load = target_load(i, load_idx);
- } else
+ } else {
load = source_load(i, load_idx);
+ if (load > max_cpu_load)
+ max_cpu_load = load;
+ if (min_cpu_load > load)
+ min_cpu_load = load;
+ }
avg_load += load;
sum_nr_running += rq->nr_running;
@@ -2394,6 +2437,9 @@ find_busiest_group(struct sched_domain *sd, int this_cpu,
avg_load = sg_div_cpu_power(group,
avg_load * SCHED_LOAD_SCALE);
+ if ((max_cpu_load - min_cpu_load) > SCHED_LOAD_SCALE)
+ __group_imb = 1;
+
group_capacity = group->__cpu_power / SCHED_LOAD_SCALE;
if (local_group) {
@@ -2402,11 +2448,12 @@ find_busiest_group(struct sched_domain *sd, int this_cpu,
this_nr_running = sum_nr_running;
this_load_per_task = sum_weighted_load;
} else if (avg_load > max_load &&
- sum_nr_running > group_capacity) {
+ (sum_nr_running > group_capacity || __group_imb)) {
max_load = avg_load;
busiest = group;
busiest_nr_running = sum_nr_running;
busiest_load_per_task = sum_weighted_load;
+ group_imb = __group_imb;
}
#if defined(CONFIG_SCHED_MC) || defined(CONFIG_SCHED_SMT)
@@ -2478,6 +2525,9 @@ group_next:
goto out_balanced;
busiest_load_per_task /= busiest_nr_running;
+ if (group_imb)
+ busiest_load_per_task = min(busiest_load_per_task, avg_load);
+
/*
* We're trying to get all the cpus to the average_load, so we don't
* want to push ourselves above the average load, nor do we wish to
@@ -2652,7 +2702,7 @@ static int load_balance(int this_cpu, struct rq *this_rq,
!test_sd_parent(sd, SD_POWERSAVINGS_BALANCE))
sd_idle = 1;
- schedstat_inc(sd, lb_cnt[idle]);
+ schedstat_inc(sd, lb_count[idle]);
redo:
group = find_busiest_group(sd, this_cpu, &imbalance, idle, &sd_idle,
@@ -2805,7 +2855,7 @@ load_balance_newidle(int this_cpu, struct rq *this_rq, struct sched_domain *sd)
!test_sd_parent(sd, SD_POWERSAVINGS_BALANCE))
sd_idle = 1;
- schedstat_inc(sd, lb_cnt[CPU_NEWLY_IDLE]);
+ schedstat_inc(sd, lb_count[CPU_NEWLY_IDLE]);
redo:
group = find_busiest_group(sd, this_cpu, &imbalance, CPU_NEWLY_IDLE,
&sd_idle, &cpus, NULL);
@@ -2939,7 +2989,7 @@ static void active_load_balance(struct rq *busiest_rq, int busiest_cpu)
}
if (likely(sd)) {
- schedstat_inc(sd, alb_cnt);
+ schedstat_inc(sd, alb_count);
if (move_one_task(target_rq, target_cpu, busiest_rq,
sd, CPU_IDLE))
@@ -3032,7 +3082,7 @@ static DEFINE_SPINLOCK(balancing);
*
* Balancing parameters are set up in arch_init_sched_domains.
*/
-static inline void rebalance_domains(int cpu, enum cpu_idle_type idle)
+static void rebalance_domains(int cpu, enum cpu_idle_type idle)
{
int balance = 1;
struct rq *rq = cpu_rq(cpu);
@@ -3267,9 +3317,13 @@ void account_user_time(struct task_struct *p, cputime_t cputime)
{
struct cpu_usage_stat *cpustat = &kstat_this_cpu.cpustat;
cputime64_t tmp;
+ struct rq *rq = this_rq();
p->utime = cputime_add(p->utime, cputime);
+ if (p != rq->idle)
+ cpuacct_charge(p, cputime);
+
/* Add user time to cpustat. */
tmp = cputime_to_cputime64(cputime);
if (TASK_NICE(p) > 0)
@@ -3279,6 +3333,35 @@ void account_user_time(struct task_struct *p, cputime_t cputime)
}
/*
+ * Account guest cpu time to a process.
+ * @p: the process that the cpu time gets accounted to
+ * @cputime: the cpu time spent in virtual machine since the last update
+ */
+void account_guest_time(struct task_struct *p, cputime_t cputime)
+{
+ cputime64_t tmp;
+ struct cpu_usage_stat *cpustat = &kstat_this_cpu.cpustat;
+
+ tmp = cputime_to_cputime64(cputime);
+
+ p->utime = cputime_add(p->utime, cputime);
+ p->gtime = cputime_add(p->gtime, cputime);
+
+ cpustat->user = cputime64_add(cpustat->user, tmp);
+ cpustat->guest = cputime64_add(cpustat->guest, tmp);
+}
+
+/*
+ * Account scaled user cpu time to a process.
+ * @p: the process that the cpu time gets accounted to
+ * @cputime: the cpu time spent in user space since the last update
+ */
+void account_user_time_scaled(struct task_struct *p, cputime_t cputime)
+{
+ p->utimescaled = cputime_add(p->utimescaled, cputime);
+}
+
+/*
* Account system cpu time to a process.
* @p: the process that the cpu time gets accounted to
* @hardirq_offset: the offset to subtract from hardirq_count()
@@ -3291,6 +3374,12 @@ void account_system_time(struct task_struct *p, int hardirq_offset,
struct rq *rq = this_rq();
cputime64_t tmp;
+ if (p->flags & PF_VCPU) {
+ account_guest_time(p, cputime);
+ p->flags &= ~PF_VCPU;
+ return;
+ }
+
p->stime = cputime_add(p->stime, cputime);
/* Add system time to cpustat. */
@@ -3299,9 +3388,10 @@ void account_system_time(struct task_struct *p, int hardirq_offset,
cpustat->irq = cputime64_add(cpustat->irq, tmp);
else if (softirq_count())
cpustat->softirq = cputime64_add(cpustat->softirq, tmp);
- else if (p != rq->idle)
+ else if (p != rq->idle) {
cpustat->system = cputime64_add(cpustat->system, tmp);
- else if (atomic_read(&rq->nr_iowait) > 0)
+ cpuacct_charge(p, cputime);
+ } else if (atomic_read(&rq->nr_iowait) > 0)
cpustat->iowait = cputime64_add(cpustat->iowait, tmp);
else
cpustat->idle = cputime64_add(cpustat->idle, tmp);
@@ -3310,6 +3400,17 @@ void account_system_time(struct task_struct *p, int hardirq_offset,
}
/*
+ * Account scaled system cpu time to a process.
+ * @p: the process that the cpu time gets accounted to
+ * @hardirq_offset: the offset to subtract from hardirq_count()
+ * @cputime: the cpu time spent in kernel space since the last update
+ */
+void account_system_time_scaled(struct task_struct *p, cputime_t cputime)
+{
+ p->stimescaled = cputime_add(p->stimescaled, cputime);
+}
+
+/*
* Account for involuntary wait time.
* @p: the process from which the cpu time has been stolen
* @steal: the cpu time spent in involuntary wait
@@ -3326,8 +3427,10 @@ void account_steal_time(struct task_struct *p, cputime_t steal)
cpustat->iowait = cputime64_add(cpustat->iowait, tmp);
else
cpustat->idle = cputime64_add(cpustat->idle, tmp);
- } else
+ } else {
cpustat->steal = cputime64_add(cpustat->steal, tmp);
+ cpuacct_charge(p, -tmp);
+ }
}
/*
@@ -3407,7 +3510,7 @@ EXPORT_SYMBOL(sub_preempt_count);
static noinline void __schedule_bug(struct task_struct *prev)
{
printk(KERN_ERR "BUG: scheduling while atomic: %s/0x%08x/%d\n",
- prev->comm, preempt_count(), prev->pid);
+ prev->comm, preempt_count(), task_pid_nr(prev));
debug_show_held_locks(prev);
if (irqs_disabled())
print_irqtrace_events(prev);
@@ -3429,7 +3532,13 @@ static inline void schedule_debug(struct task_struct *prev)
profile_hit(SCHED_PROFILING, __builtin_return_address(0));
- schedstat_inc(this_rq(), sched_cnt);
+ schedstat_inc(this_rq(), sched_count);
+#ifdef CONFIG_SCHEDSTATS
+ if (unlikely(prev->lock_depth >= 0)) {
+ schedstat_inc(this_rq(), bkl_count);
+ schedstat_inc(prev, sched_info.bkl_count);
+ }
+#endif
}
/*
@@ -3438,7 +3547,7 @@ static inline void schedule_debug(struct task_struct *prev)
static inline struct task_struct *
pick_next_task(struct rq *rq, struct task_struct *prev)
{
- struct sched_class *class;
+ const struct sched_class *class;
struct task_struct *p;
/*
@@ -3487,9 +3596,13 @@ need_resched_nonpreemptible:
schedule_debug(prev);
- spin_lock_irq(&rq->lock);
- clear_tsk_need_resched(prev);
+ /*
+ * Do the rq-clock update outside the rq lock:
+ */
+ local_irq_disable();
__update_rq_clock(rq);
+ spin_lock(&rq->lock);
+ clear_tsk_need_resched(prev);
if (prev->state && !(preempt_count() & PREEMPT_ACTIVE)) {
if (unlikely((prev->state & TASK_INTERRUPTIBLE) &&
@@ -3549,27 +3662,30 @@ asmlinkage void __sched preempt_schedule(void)
if (likely(ti->preempt_count || irqs_disabled()))
return;
-need_resched:
- add_preempt_count(PREEMPT_ACTIVE);
- /*
- * We keep the big kernel semaphore locked, but we
- * clear ->lock_depth so that schedule() doesnt
- * auto-release the semaphore:
- */
+ do {
+ add_preempt_count(PREEMPT_ACTIVE);
+
+ /*
+ * We keep the big kernel semaphore locked, but we
+ * clear ->lock_depth so that schedule() doesnt
+ * auto-release the semaphore:
+ */
#ifdef CONFIG_PREEMPT_BKL
- saved_lock_depth = task->lock_depth;
- task->lock_depth = -1;
+ saved_lock_depth = task->lock_depth;
+ task->lock_depth = -1;
#endif
- schedule();
+ schedule();
#ifdef CONFIG_PREEMPT_BKL
- task->lock_depth = saved_lock_depth;
+ task->lock_depth = saved_lock_depth;
#endif
- sub_preempt_count(PREEMPT_ACTIVE);
+ sub_preempt_count(PREEMPT_ACTIVE);
- /* we could miss a preemption opportunity between schedule and now */
- barrier();
- if (unlikely(test_thread_flag(TIF_NEED_RESCHED)))
- goto need_resched;
+ /*
+ * Check again in case we missed a preemption opportunity
+ * between schedule and now.
+ */
+ barrier();
+ } while (unlikely(test_thread_flag(TIF_NEED_RESCHED)));
}
EXPORT_SYMBOL(preempt_schedule);
@@ -3589,29 +3705,32 @@ asmlinkage void __sched preempt_schedule_irq(void)
/* Catch callers which need to be fixed */
BUG_ON(ti->preempt_count || !irqs_disabled());
-need_resched:
- add_preempt_count(PREEMPT_ACTIVE);
- /*
- * We keep the big kernel semaphore locked, but we
- * clear ->lock_depth so that schedule() doesnt
- * auto-release the semaphore:
- */
+ do {
+ add_preempt_count(PREEMPT_ACTIVE);
+
+ /*
+ * We keep the big kernel semaphore locked, but we
+ * clear ->lock_depth so that schedule() doesnt
+ * auto-release the semaphore:
+ */
#ifdef CONFIG_PREEMPT_BKL
- saved_lock_depth = task->lock_depth;
- task->lock_depth = -1;
+ saved_lock_depth = task->lock_depth;
+ task->lock_depth = -1;
#endif
- local_irq_enable();
- schedule();
- local_irq_disable();
+ local_irq_enable();
+ schedule();
+ local_irq_disable();
#ifdef CONFIG_PREEMPT_BKL
- task->lock_depth = saved_lock_depth;
+ task->lock_depth = saved_lock_depth;
#endif
- sub_preempt_count(PREEMPT_ACTIVE);
+ sub_preempt_count(PREEMPT_ACTIVE);
- /* we could miss a preemption opportunity between schedule and now */
- barrier();
- if (unlikely(test_thread_flag(TIF_NEED_RESCHED)))
- goto need_resched;
+ /*
+ * Check again in case we missed a preemption opportunity
+ * between schedule and now.
+ */
+ barrier();
+ } while (unlikely(test_thread_flag(TIF_NEED_RESCHED)));
}
#endif /* CONFIG_PREEMPT */
@@ -3635,10 +3754,9 @@ EXPORT_SYMBOL(default_wake_function);
static void __wake_up_common(wait_queue_head_t *q, unsigned int mode,
int nr_exclusive, int sync, void *key)
{
- struct list_head *tmp, *next;
+ wait_queue_t *curr, *next;
- list_for_each_safe(tmp, next, &q->task_list) {
- wait_queue_t *curr = list_entry(tmp, wait_queue_t, task_list);
+ list_for_each_entry_safe(curr, next, &q->task_list, task_list) {
unsigned flags = curr->flags;
if (curr->func(curr, mode, sync, key) &&
@@ -3728,206 +3846,119 @@ void fastcall complete_all(struct completion *x)
}
EXPORT_SYMBOL(complete_all);
-void fastcall __sched wait_for_completion(struct completion *x)
+static inline long __sched
+do_wait_for_common(struct completion *x, long timeout, int state)
{
- might_sleep();
-
- spin_lock_irq(&x->wait.lock);
if (!x->done) {
DECLARE_WAITQUEUE(wait, current);
wait.flags |= WQ_FLAG_EXCLUSIVE;
__add_wait_queue_tail(&x->wait, &wait);
do {
- __set_current_state(TASK_UNINTERRUPTIBLE);
- spin_unlock_irq(&x->wait.lock);
- schedule();
- spin_lock_irq(&x->wait.lock);
- } while (!x->done);
- __remove_wait_queue(&x->wait, &wait);
- }
- x->done--;
- spin_unlock_irq(&x->wait.lock);
-}
-EXPORT_SYMBOL(wait_for_completion);
-
-unsigned long fastcall __sched
-wait_for_completion_timeout(struct completion *x, unsigned long timeout)
-{
- might_sleep();
-
- spin_lock_irq(&x->wait.lock);
- if (!x->done) {
- DECLARE_WAITQUEUE(wait, current);
-
- wait.flags |= WQ_FLAG_EXCLUSIVE;
- __add_wait_queue_tail(&x->wait, &wait);
- do {
- __set_current_state(TASK_UNINTERRUPTIBLE);
+ if (state == TASK_INTERRUPTIBLE &&
+ signal_pending(current)) {
+ __remove_wait_queue(&x->wait, &wait);
+ return -ERESTARTSYS;
+ }
+ __set_current_state(state);
spin_unlock_irq(&x->wait.lock);
timeout = schedule_timeout(timeout);
spin_lock_irq(&x->wait.lock);
if (!timeout) {
__remove_wait_queue(&x->wait, &wait);
- goto out;
+ return timeout;
}
} while (!x->done);
__remove_wait_queue(&x->wait, &wait);
}
x->done--;
-out:
- spin_unlock_irq(&x->wait.lock);
return timeout;
}
-EXPORT_SYMBOL(wait_for_completion_timeout);
-int fastcall __sched wait_for_completion_interruptible(struct completion *x)
+static long __sched
+wait_for_common(struct completion *x, long timeout, int state)
{
- int ret = 0;
-
might_sleep();
spin_lock_irq(&x->wait.lock);
- if (!x->done) {
- DECLARE_WAITQUEUE(wait, current);
-
- wait.flags |= WQ_FLAG_EXCLUSIVE;
- __add_wait_queue_tail(&x->wait, &wait);
- do {
- if (signal_pending(current)) {
- ret = -ERESTARTSYS;
- __remove_wait_queue(&x->wait, &wait);
- goto out;
- }
- __set_current_state(TASK_INTERRUPTIBLE);
- spin_unlock_irq(&x->wait.lock);
- schedule();
- spin_lock_irq(&x->wait.lock);
- } while (!x->done);
- __remove_wait_queue(&x->wait, &wait);
- }
- x->done--;
-out:
+ timeout = do_wait_for_common(x, timeout, state);
spin_unlock_irq(&x->wait.lock);
+ return timeout;
+}
- return ret;
+void fastcall __sched wait_for_completion(struct completion *x)
+{
+ wait_for_common(x, MAX_SCHEDULE_TIMEOUT, TASK_UNINTERRUPTIBLE);
}
-EXPORT_SYMBOL(wait_for_completion_interruptible);
+EXPORT_SYMBOL(wait_for_completion);
unsigned long fastcall __sched
-wait_for_completion_interruptible_timeout(struct completion *x,
- unsigned long timeout)
+wait_for_completion_timeout(struct completion *x, unsigned long timeout)
{
- might_sleep();
-
- spin_lock_irq(&x->wait.lock);
- if (!x->done) {
- DECLARE_WAITQUEUE(wait, current);
-
- wait.flags |= WQ_FLAG_EXCLUSIVE;
- __add_wait_queue_tail(&x->wait, &wait);
- do {
- if (signal_pending(current)) {
- timeout = -ERESTARTSYS;
- __remove_wait_queue(&x->wait, &wait);
- goto out;
- }
- __set_current_state(TASK_INTERRUPTIBLE);
- spin_unlock_irq(&x->wait.lock);
- timeout = schedule_timeout(timeout);
- spin_lock_irq(&x->wait.lock);
- if (!timeout) {
- __remove_wait_queue(&x->wait, &wait);
- goto out;
- }
- } while (!x->done);
- __remove_wait_queue(&x->wait, &wait);
- }
- x->done--;
-out:
- spin_unlock_irq(&x->wait.lock);
- return timeout;
+ return wait_for_common(x, timeout, TASK_UNINTERRUPTIBLE);
}
-EXPORT_SYMBOL(wait_for_completion_interruptible_timeout);
+EXPORT_SYMBOL(wait_for_completion_timeout);
-static inline void
-sleep_on_head(wait_queue_head_t *q, wait_queue_t *wait, unsigned long *flags)
+int __sched wait_for_completion_interruptible(struct completion *x)
{
- spin_lock_irqsave(&q->lock, *flags);
- __add_wait_queue(q, wait);
- spin_unlock(&q->lock);
+ long t = wait_for_common(x, MAX_SCHEDULE_TIMEOUT, TASK_INTERRUPTIBLE);
+ if (t == -ERESTARTSYS)
+ return t;
+ return 0;
}
+EXPORT_SYMBOL(wait_for_completion_interruptible);
-static inline void
-sleep_on_tail(wait_queue_head_t *q, wait_queue_t *wait, unsigned long *flags)
+unsigned long fastcall __sched
+wait_for_completion_interruptible_timeout(struct completion *x,
+ unsigned long timeout)
{
- spin_lock_irq(&q->lock);
- __remove_wait_queue(q, wait);
- spin_unlock_irqrestore(&q->lock, *flags);
+ return wait_for_common(x, timeout, TASK_INTERRUPTIBLE);
}
+EXPORT_SYMBOL(wait_for_completion_interruptible_timeout);
-void __sched interruptible_sleep_on(wait_queue_head_t *q)
+static long __sched
+sleep_on_common(wait_queue_head_t *q, int state, long timeout)
{
unsigned long flags;
wait_queue_t wait;
init_waitqueue_entry(&wait, current);
- current->state = TASK_INTERRUPTIBLE;
+ __set_current_state(state);
- sleep_on_head(q, &wait, &flags);
- schedule();
- sleep_on_tail(q, &wait, &flags);
+ spin_lock_irqsave(&q->lock, flags);
+ __add_wait_queue(q, &wait);
+ spin_unlock(&q->lock);
+ timeout = schedule_timeout(timeout);
+ spin_lock_irq(&q->lock);
+ __remove_wait_queue(q, &wait);
+ spin_unlock_irqrestore(&q->lock, flags);
+
+ return timeout;
+}
+
+void __sched interruptible_sleep_on(wait_queue_head_t *q)
+{
+ sleep_on_common(q, TASK_INTERRUPTIBLE, MAX_SCHEDULE_TIMEOUT);
}
EXPORT_SYMBOL(interruptible_sleep_on);
long __sched
interruptible_sleep_on_timeout(wait_queue_head_t *q, long timeout)
{
- unsigned long flags;
- wait_queue_t wait;
-
- init_waitqueue_entry(&wait, current);
-
- current->state = TASK_INTERRUPTIBLE;
-
- sleep_on_head(q, &wait, &flags);
- timeout = schedule_timeout(timeout);
- sleep_on_tail(q, &wait, &flags);
-
- return timeout;
+ return sleep_on_common(q, TASK_INTERRUPTIBLE, timeout);
}
EXPORT_SYMBOL(interruptible_sleep_on_timeout);
void __sched sleep_on(wait_queue_head_t *q)
{
- unsigned long flags;
- wait_queue_t wait;
-
- init_waitqueue_entry(&wait, current);
-
- current->state = TASK_UNINTERRUPTIBLE;
-
- sleep_on_head(q, &wait, &flags);
- schedule();
- sleep_on_tail(q, &wait, &flags);
+ sleep_on_common(q, TASK_UNINTERRUPTIBLE, MAX_SCHEDULE_TIMEOUT);
}
EXPORT_SYMBOL(sleep_on);
long __sched sleep_on_timeout(wait_queue_head_t *q, long timeout)
{
- unsigned long flags;
- wait_queue_t wait;
-
- init_waitqueue_entry(&wait, current);
-
- current->state = TASK_UNINTERRUPTIBLE;
-
- sleep_on_head(q, &wait, &flags);
- timeout = schedule_timeout(timeout);
- sleep_on_tail(q, &wait, &flags);
-
- return timeout;
+ return sleep_on_common(q, TASK_UNINTERRUPTIBLE, timeout);
}
EXPORT_SYMBOL(sleep_on_timeout);
@@ -3946,7 +3977,7 @@ EXPORT_SYMBOL(sleep_on_timeout);
void rt_mutex_setprio(struct task_struct *p, int prio)
{
unsigned long flags;
- int oldprio, on_rq;
+ int oldprio, on_rq, running;
struct rq *rq;
BUG_ON(prio < 0 || prio > MAX_PRIO);
@@ -3956,8 +3987,12 @@ void rt_mutex_setprio(struct task_struct *p, int prio)
oldprio = p->prio;
on_rq = p->se.on_rq;
- if (on_rq)
+ running = task_running(rq, p);
+ if (on_rq) {
dequeue_task(rq, p, 0);
+ if (running)
+ p->sched_class->put_prev_task(rq, p);
+ }
if (rt_prio(prio))
p->sched_class = &rt_sched_class;
@@ -3967,13 +4002,15 @@ void rt_mutex_setprio(struct task_struct *p, int prio)
p->prio = prio;
if (on_rq) {
+ if (running)
+ p->sched_class->set_curr_task(rq);
enqueue_task(rq, p, 0);
/*
* Reschedule if we are currently running on this runqueue and
* our priority decreased, or if we are not currently running on
* this runqueue and our priority is higher than the current's
*/
- if (task_running(rq, p)) {
+ if (running) {
if (p->prio > oldprio)
resched_task(rq->curr);
} else {
@@ -4137,9 +4174,9 @@ struct task_struct *idle_task(int cpu)
* find_process_by_pid - find a process with a matching PID value.
* @pid: the pid in question.
*/
-static inline struct task_struct *find_process_by_pid(pid_t pid)
+static struct task_struct *find_process_by_pid(pid_t pid)
{
- return pid ? find_task_by_pid(pid) : current;
+ return pid ? find_task_by_vpid(pid) : current;
}
/* Actually do priority change: must hold rq lock. */
@@ -4179,7 +4216,7 @@ __setscheduler(struct rq *rq, struct task_struct *p, int policy, int prio)
int sched_setscheduler(struct task_struct *p, int policy,
struct sched_param *param)
{
- int retval, oldprio, oldpolicy = -1, on_rq;
+ int retval, oldprio, oldpolicy = -1, on_rq, running;
unsigned long flags;
struct rq *rq;
@@ -4261,18 +4298,26 @@ recheck:
}
update_rq_clock(rq);
on_rq = p->se.on_rq;
- if (on_rq)
+ running = task_running(rq, p);
+ if (on_rq) {
deactivate_task(rq, p, 0);
+ if (running)
+ p->sched_class->put_prev_task(rq, p);
+ }
+
oldprio = p->prio;
__setscheduler(rq, p, policy, param->sched_priority);
+
if (on_rq) {
+ if (running)
+ p->sched_class->set_curr_task(rq);
activate_task(rq, p, 0);
/*
* Reschedule if we are currently running on this runqueue and
* our priority decreased, or if we are not currently running on
* this runqueue and our priority is higher than the current's
*/
- if (task_running(rq, p)) {
+ if (running) {
if (p->prio > oldprio)
resched_task(rq->curr);
} else {
@@ -4343,10 +4388,10 @@ asmlinkage long sys_sched_setparam(pid_t pid, struct sched_param __user *param)
asmlinkage long sys_sched_getscheduler(pid_t pid)
{
struct task_struct *p;
- int retval = -EINVAL;
+ int retval;
if (pid < 0)
- goto out_nounlock;
+ return -EINVAL;
retval = -ESRCH;
read_lock(&tasklist_lock);
@@ -4357,8 +4402,6 @@ asmlinkage long sys_sched_getscheduler(pid_t pid)
retval = p->policy;
}
read_unlock(&tasklist_lock);
-
-out_nounlock:
return retval;
}
@@ -4371,10 +4414,10 @@ asmlinkage long sys_sched_getparam(pid_t pid, struct sched_param __user *param)
{
struct sched_param lp;
struct task_struct *p;
- int retval = -EINVAL;
+ int retval;
if (!param || pid < 0)
- goto out_nounlock;
+ return -EINVAL;
read_lock(&tasklist_lock);
p = find_process_by_pid(pid);
@@ -4394,7 +4437,6 @@ asmlinkage long sys_sched_getparam(pid_t pid, struct sched_param __user *param)
*/
retval = copy_to_user(param, &lp, sizeof(*param)) ? -EFAULT : 0;
-out_nounlock:
return retval;
out_unlock:
@@ -4437,8 +4479,21 @@ long sched_setaffinity(pid_t pid, cpumask_t new_mask)
cpus_allowed = cpuset_cpus_allowed(p);
cpus_and(new_mask, new_mask, cpus_allowed);
+ again:
retval = set_cpus_allowed(p, new_mask);
+ if (!retval) {
+ cpus_allowed = cpuset_cpus_allowed(p);
+ if (!cpus_subset(new_mask, cpus_allowed)) {
+ /*
+ * We must have raced with a concurrent cpuset
+ * update. Just reset the cpus_allowed to the
+ * cpuset's cpus_allowed
+ */
+ new_mask = cpus_allowed;
+ goto again;
+ }
+ }
out_unlock:
put_task_struct(p);
mutex_unlock(&sched_hotcpu_mutex);
@@ -4554,8 +4609,8 @@ asmlinkage long sys_sched_yield(void)
{
struct rq *rq = this_rq_lock();
- schedstat_inc(rq, yld_cnt);
- current->sched_class->yield_task(rq, current);
+ schedstat_inc(rq, yld_count);
+ current->sched_class->yield_task(rq);
/*
* Since we are going to call schedule() anyway, there's
@@ -4749,11 +4804,12 @@ asmlinkage
long sys_sched_rr_get_interval(pid_t pid, struct timespec __user *interval)
{
struct task_struct *p;
- int retval = -EINVAL;
+ unsigned int time_slice;
+ int retval;
struct timespec t;
if (pid < 0)
- goto out_nounlock;
+ return -EINVAL;
retval = -ESRCH;
read_lock(&tasklist_lock);
@@ -4765,12 +4821,24 @@ long sys_sched_rr_get_interval(pid_t pid, struct timespec __user *interval)
if (retval)
goto out_unlock;
- jiffies_to_timespec(p->policy == SCHED_FIFO ?
- 0 : static_prio_timeslice(p->static_prio), &t);
+ if (p->policy == SCHED_FIFO)
+ time_slice = 0;
+ else if (p->policy == SCHED_RR)
+ time_slice = DEF_TIMESLICE;
+ else {
+ struct sched_entity *se = &p->se;
+ unsigned long flags;
+ struct rq *rq;
+
+ rq = task_rq_lock(p, &flags);
+ time_slice = NS_TO_JIFFIES(sched_slice(cfs_rq_of(se), se));
+ task_rq_unlock(rq, &flags);
+ }
read_unlock(&tasklist_lock);
+ jiffies_to_timespec(time_slice, &t);
retval = copy_to_user(interval, &t, sizeof(t)) ? -EFAULT : 0;
-out_nounlock:
return retval;
+
out_unlock:
read_unlock(&tasklist_lock);
return retval;
@@ -4784,18 +4852,18 @@ static void show_task(struct task_struct *p)
unsigned state;
state = p->state ? __ffs(p->state) + 1 : 0;
- printk("%-13.13s %c", p->comm,
+ printk(KERN_INFO "%-13.13s %c", p->comm,
state < sizeof(stat_nam) - 1 ? stat_nam[state] : '?');
#if BITS_PER_LONG == 32
if (state == TASK_RUNNING)
- printk(" running ");
+ printk(KERN_CONT " running ");
else
- printk(" %08lx ", thread_saved_pc(p));
+ printk(KERN_CONT " %08lx ", thread_saved_pc(p));
#else
if (state == TASK_RUNNING)
- printk(" running task ");
+ printk(KERN_CONT " running task ");
else
- printk(" %016lx ", thread_saved_pc(p));
+ printk(KERN_CONT " %016lx ", thread_saved_pc(p));
#endif
#ifdef CONFIG_DEBUG_STACK_USAGE
{
@@ -4805,7 +4873,8 @@ static void show_task(struct task_struct *p)
free = (unsigned long)n - (unsigned long)end_of_stack(p);
}
#endif
- printk("%5lu %5d %6d\n", free, p->pid, p->parent->pid);
+ printk(KERN_CONT "%5lu %5d %6d\n", free,
+ task_pid_nr(p), task_pid_nr(p->parent));
if (state != TASK_RUNNING)
show_stack(p, NULL);
@@ -4899,32 +4968,6 @@ void __cpuinit init_idle(struct task_struct *idle, int cpu)
*/
cpumask_t nohz_cpu_mask = CPU_MASK_NONE;
-/*
- * Increase the granularity value when there are more CPUs,
- * because with more CPUs the 'effective latency' as visible
- * to users decreases. But the relationship is not linear,
- * so pick a second-best guess by going with the log2 of the
- * number of CPUs.
- *
- * This idea comes from the SD scheduler of Con Kolivas:
- */
-static inline void sched_init_granularity(void)
-{
- unsigned int factor = 1 + ilog2(num_online_cpus());
- const unsigned long limit = 100000000;
-
- sysctl_sched_min_granularity *= factor;
- if (sysctl_sched_min_granularity > limit)
- sysctl_sched_min_granularity = limit;
-
- sysctl_sched_latency *= factor;
- if (sysctl_sched_latency > limit)
- sysctl_sched_latency = limit;
-
- sysctl_sched_runtime_limit = sysctl_sched_latency;
- sysctl_sched_wakeup_granularity = sysctl_sched_min_granularity / 2;
-}
-
#ifdef CONFIG_SMP
/*
* This is how migration works:
@@ -5091,6 +5134,17 @@ wait_to_die:
}
#ifdef CONFIG_HOTPLUG_CPU
+
+static int __migrate_task_irq(struct task_struct *p, int src_cpu, int dest_cpu)
+{
+ int ret;
+
+ local_irq_disable();
+ ret = __migrate_task(p, src_cpu, dest_cpu);
+ local_irq_enable();
+ return ret;
+}
+
/*
* Figure out where task on dead CPU should go, use force if neccessary.
* NOTE: interrupts should be disabled by the caller
@@ -5102,35 +5156,42 @@ static void move_task_off_dead_cpu(int dead_cpu, struct task_struct *p)
struct rq *rq;
int dest_cpu;
-restart:
- /* On same node? */
- mask = node_to_cpumask(cpu_to_node(dead_cpu));
- cpus_and(mask, mask, p->cpus_allowed);
- dest_cpu = any_online_cpu(mask);
-
- /* On any allowed CPU? */
- if (dest_cpu == NR_CPUS)
- dest_cpu = any_online_cpu(p->cpus_allowed);
-
- /* No more Mr. Nice Guy. */
- if (dest_cpu == NR_CPUS) {
- rq = task_rq_lock(p, &flags);
- cpus_setall(p->cpus_allowed);
- dest_cpu = any_online_cpu(p->cpus_allowed);
- task_rq_unlock(rq, &flags);
+ do {
+ /* On same node? */
+ mask = node_to_cpumask(cpu_to_node(dead_cpu));
+ cpus_and(mask, mask, p->cpus_allowed);
+ dest_cpu = any_online_cpu(mask);
+
+ /* On any allowed CPU? */
+ if (dest_cpu == NR_CPUS)
+ dest_cpu = any_online_cpu(p->cpus_allowed);
+
+ /* No more Mr. Nice Guy. */
+ if (dest_cpu == NR_CPUS) {
+ cpumask_t cpus_allowed = cpuset_cpus_allowed_locked(p);
+ /*
+ * Try to stay on the same cpuset, where the
+ * current cpuset may be a subset of all cpus.
+ * The cpuset_cpus_allowed_locked() variant of
+ * cpuset_cpus_allowed() will not block. It must be
+ * called within calls to cpuset_lock/cpuset_unlock.
+ */
+ rq = task_rq_lock(p, &flags);
+ p->cpus_allowed = cpus_allowed;
+ dest_cpu = any_online_cpu(p->cpus_allowed);
+ task_rq_unlock(rq, &flags);
- /*
- * Don't tell them about moving exiting tasks or
- * kernel threads (both mm NULL), since they never
- * leave kernel.
- */
- if (p->mm && printk_ratelimit())
- printk(KERN_INFO "process %d (%s) no "
- "longer affine to cpu%d\n",
- p->pid, p->comm, dead_cpu);
- }
- if (!__migrate_task(p, dead_cpu, dest_cpu))
- goto restart;
+ /*
+ * Don't tell them about moving exiting tasks or
+ * kernel threads (both mm NULL), since they never
+ * leave kernel.
+ */
+ if (p->mm && printk_ratelimit())
+ printk(KERN_INFO "process %d (%s) no "
+ "longer affine to cpu%d\n",
+ task_pid_nr(p), p->comm, dead_cpu);
+ }
+ } while (!__migrate_task_irq(p, dead_cpu, dest_cpu));
}
/*
@@ -5158,7 +5219,7 @@ static void migrate_live_tasks(int src_cpu)
{
struct task_struct *p, *t;
- write_lock_irq(&tasklist_lock);
+ read_lock(&tasklist_lock);
do_each_thread(t, p) {
if (p == current)
@@ -5168,7 +5229,21 @@ static void migrate_live_tasks(int src_cpu)
move_task_off_dead_cpu(src_cpu, p);
} while_each_thread(t, p);
- write_unlock_irq(&tasklist_lock);
+ read_unlock(&tasklist_lock);
+}
+
+/*
+ * activate_idle_task - move idle task to the _front_ of runqueue.
+ */
+static void activate_idle_task(struct task_struct *p, struct rq *rq)
+{
+ update_rq_clock(rq);
+
+ if (p->state == TASK_UNINTERRUPTIBLE)
+ rq->nr_uninterruptible--;
+
+ enqueue_task(rq, p, 0);
+ inc_nr_running(p, rq);
}
/*
@@ -5221,7 +5296,7 @@ static void migrate_dead(unsigned int dead_cpu, struct task_struct *p)
struct rq *rq = cpu_rq(dead_cpu);
/* Must be exiting, otherwise would be on tasklist. */
- BUG_ON(p->exit_state != EXIT_ZOMBIE && p->exit_state != EXIT_DEAD);
+ BUG_ON(!p->exit_state);
/* Cannot have done final schedule yet: would have vanished. */
BUG_ON(p->state == TASK_DEAD);
@@ -5232,11 +5307,10 @@ static void migrate_dead(unsigned int dead_cpu, struct task_struct *p)
* Drop lock around migration; if someone else moves it,
* that's OK. No task can be added to this CPU, so iteration is
* fine.
- * NOTE: interrupts should be left disabled --dev@
*/
- spin_unlock(&rq->lock);
+ spin_unlock_irq(&rq->lock);
move_task_off_dead_cpu(dead_cpu, p);
- spin_lock(&rq->lock);
+ spin_lock_irq(&rq->lock);
put_task_struct(p);
}
@@ -5283,14 +5357,32 @@ static struct ctl_table sd_ctl_root[] = {
static struct ctl_table *sd_alloc_ctl_entry(int n)
{
struct ctl_table *entry =
- kmalloc(n * sizeof(struct ctl_table), GFP_KERNEL);
-
- BUG_ON(!entry);
- memset(entry, 0, n * sizeof(struct ctl_table));
+ kcalloc(n, sizeof(struct ctl_table), GFP_KERNEL);
return entry;
}
+static void sd_free_ctl_entry(struct ctl_table **tablep)
+{
+ struct ctl_table *entry;
+
+ /*
+ * In the intermediate directories, both the child directory and
+ * procname are dynamically allocated and could fail but the mode
+ * will always be set. In the lowest directory the names are
+ * static strings and all have proc handlers.
+ */
+ for (entry = *tablep; entry->mode; entry++) {
+ if (entry->child)
+ sd_free_ctl_entry(&entry->child);
+ if (entry->proc_handler == NULL)
+ kfree(entry->procname);
+ }
+
+ kfree(*tablep);
+ *tablep = NULL;
+}
+
static void
set_table_entry(struct ctl_table *entry,
const char *procname, void *data, int maxlen,
@@ -5306,7 +5398,10 @@ set_table_entry(struct ctl_table *entry,
static struct ctl_table *
sd_alloc_ctl_domain_table(struct sched_domain *sd)
{
- struct ctl_table *table = sd_alloc_ctl_entry(14);
+ struct ctl_table *table = sd_alloc_ctl_entry(12);
+
+ if (table == NULL)
+ return NULL;
set_table_entry(&table[0], "min_interval", &sd->min_interval,
sizeof(long), 0644, proc_doulongvec_minmax);
@@ -5326,16 +5421,17 @@ sd_alloc_ctl_domain_table(struct sched_domain *sd)
sizeof(int), 0644, proc_dointvec_minmax);
set_table_entry(&table[8], "imbalance_pct", &sd->imbalance_pct,
sizeof(int), 0644, proc_dointvec_minmax);
- set_table_entry(&table[10], "cache_nice_tries",
+ set_table_entry(&table[9], "cache_nice_tries",
&sd->cache_nice_tries,
sizeof(int), 0644, proc_dointvec_minmax);
- set_table_entry(&table[12], "flags", &sd->flags,
+ set_table_entry(&table[10], "flags", &sd->flags,
sizeof(int), 0644, proc_dointvec_minmax);
+ /* &table[11] is terminator */
return table;
}
-static ctl_table *sd_alloc_ctl_cpu_table(int cpu)
+static ctl_table * sd_alloc_ctl_cpu_table(int cpu)
{
struct ctl_table *entry, *table;
struct sched_domain *sd;
@@ -5345,6 +5441,8 @@ static ctl_table *sd_alloc_ctl_cpu_table(int cpu)
for_each_domain(cpu, sd)
domain_num++;
entry = table = sd_alloc_ctl_entry(domain_num + 1);
+ if (table == NULL)
+ return NULL;
i = 0;
for_each_domain(cpu, sd) {
@@ -5359,24 +5457,38 @@ static ctl_table *sd_alloc_ctl_cpu_table(int cpu)
}
static struct ctl_table_header *sd_sysctl_header;
-static void init_sched_domain_sysctl(void)
+static void register_sched_domain_sysctl(void)
{
int i, cpu_num = num_online_cpus();
struct ctl_table *entry = sd_alloc_ctl_entry(cpu_num + 1);
char buf[32];
+ if (entry == NULL)
+ return;
+
sd_ctl_dir[0].child = entry;
- for (i = 0; i < cpu_num; i++, entry++) {
+ for_each_online_cpu(i) {
snprintf(buf, 32, "cpu%d", i);
entry->procname = kstrdup(buf, GFP_KERNEL);
entry->mode = 0555;
entry->child = sd_alloc_ctl_cpu_table(i);
+ entry++;
}
sd_sysctl_header = register_sysctl_table(sd_ctl_root);
}
+
+static void unregister_sched_domain_sysctl(void)
+{
+ unregister_sysctl_table(sd_sysctl_header);
+ sd_sysctl_header = NULL;
+ sd_free_ctl_entry(&sd_ctl_dir[0].child);
+}
#else
-static void init_sched_domain_sysctl(void)
+static void register_sched_domain_sysctl(void)
+{
+}
+static void unregister_sched_domain_sysctl(void)
{
}
#endif
@@ -5431,19 +5543,21 @@ migration_call(struct notifier_block *nfb, unsigned long action, void *hcpu)
case CPU_DEAD:
case CPU_DEAD_FROZEN:
+ cpuset_lock(); /* around calls to cpuset_cpus_allowed_lock() */
migrate_live_tasks(cpu);
rq = cpu_rq(cpu);
kthread_stop(rq->migration_thread);
rq->migration_thread = NULL;
/* Idle task back to normal (off runqueue, low prio) */
- rq = task_rq_lock(rq->idle, &flags);
+ spin_lock_irq(&rq->lock);
update_rq_clock(rq);
deactivate_task(rq, rq->idle, 0);
rq->idle->static_prio = MAX_PRIO;
__setscheduler(rq, rq->idle, SCHED_NORMAL, 0);
rq->idle->sched_class = &idle_sched_class;
migrate_dead_tasks(cpu);
- task_rq_unlock(rq, &flags);
+ spin_unlock_irq(&rq->lock);
+ cpuset_unlock();
migrate_nr_uninterruptible(rq);
BUG_ON(rq->nr_running != 0);
@@ -5498,8 +5612,7 @@ int __init migration_init(void)
int nr_cpu_ids __read_mostly = NR_CPUS;
EXPORT_SYMBOL(nr_cpu_ids);
-#undef SCHED_DOMAIN_DEBUG
-#ifdef SCHED_DOMAIN_DEBUG
+#ifdef CONFIG_SCHED_DEBUG
static void sched_domain_debug(struct sched_domain *sd, int cpu)
{
int level = 0;
@@ -5554,29 +5667,32 @@ static void sched_domain_debug(struct sched_domain *sd, int cpu)
}
if (!group->__cpu_power) {
- printk("\n");
+ printk(KERN_CONT "\n");
printk(KERN_ERR "ERROR: domain->cpu_power not "
"set\n");
+ break;
}
if (!cpus_weight(group->cpumask)) {
- printk("\n");
+ printk(KERN_CONT "\n");
printk(KERN_ERR "ERROR: empty group\n");
+ break;
}
if (cpus_intersects(groupmask, group->cpumask)) {
- printk("\n");
+ printk(KERN_CONT "\n");
printk(KERN_ERR "ERROR: repeated CPUs\n");
+ break;
}
cpus_or(groupmask, groupmask, group->cpumask);
cpumask_scnprintf(str, NR_CPUS, group->cpumask);
- printk(" %s", str);
+ printk(KERN_CONT " %s", str);
group = group->next;
} while (group != sd->groups);
- printk("\n");
+ printk(KERN_CONT "\n");
if (!cpus_equal(sd->span, groupmask))
printk(KERN_ERR "ERROR: groups don't span "
@@ -5700,7 +5816,7 @@ static int __init isolated_cpu_setup(char *str)
return 1;
}
-__setup ("isolcpus=", isolated_cpu_setup);
+__setup("isolcpus=", isolated_cpu_setup);
/*
* init_sched_build_groups takes the cpumask we wish to span, and a pointer
@@ -5856,7 +5972,7 @@ static int cpu_to_core_group(int cpu, const cpumask_t *cpu_map,
struct sched_group **sg)
{
int group;
- cpumask_t mask = cpu_sibling_map[cpu];
+ cpumask_t mask = per_cpu(cpu_sibling_map, cpu);
cpus_and(mask, mask, *cpu_map);
group = first_cpu(mask);
if (sg)
@@ -5885,7 +6001,7 @@ static int cpu_to_phys_group(int cpu, const cpumask_t *cpu_map,
cpus_and(mask, mask, *cpu_map);
group = first_cpu(mask);
#elif defined(CONFIG_SCHED_SMT)
- cpumask_t mask = cpu_sibling_map[cpu];
+ cpumask_t mask = per_cpu(cpu_sibling_map, cpu);
cpus_and(mask, mask, *cpu_map);
group = first_cpu(mask);
#else
@@ -5929,24 +6045,23 @@ static void init_numa_sched_groups_power(struct sched_group *group_head)
if (!sg)
return;
-next_sg:
- for_each_cpu_mask(j, sg->cpumask) {
- struct sched_domain *sd;
+ do {
+ for_each_cpu_mask(j, sg->cpumask) {
+ struct sched_domain *sd;
- sd = &per_cpu(phys_domains, j);
- if (j != first_cpu(sd->groups->cpumask)) {
- /*
- * Only add "power" once for each
- * physical package.
- */
- continue;
- }
+ sd = &per_cpu(phys_domains, j);
+ if (j != first_cpu(sd->groups->cpumask)) {
+ /*
+ * Only add "power" once for each
+ * physical package.
+ */
+ continue;
+ }
- sg_inc_cpu_power(sg, sd->groups->__cpu_power);
- }
- sg = sg->next;
- if (sg != group_head)
- goto next_sg;
+ sg_inc_cpu_power(sg, sd->groups->__cpu_power);
+ }
+ sg = sg->next;
+ } while (sg != group_head);
}
#endif
@@ -6057,7 +6172,7 @@ static int build_sched_domains(const cpumask_t *cpu_map)
/*
* Allocate the per-node list of sched groups
*/
- sched_group_nodes = kzalloc(sizeof(struct sched_group *)*MAX_NUMNODES,
+ sched_group_nodes = kcalloc(MAX_NUMNODES, sizeof(struct sched_group *),
GFP_KERNEL);
if (!sched_group_nodes) {
printk(KERN_WARNING "Can not alloc sched group node list\n");
@@ -6120,7 +6235,7 @@ static int build_sched_domains(const cpumask_t *cpu_map)
p = sd;
sd = &per_cpu(cpu_domains, i);
*sd = SD_SIBLING_INIT;
- sd->span = cpu_sibling_map[i];
+ sd->span = per_cpu(cpu_sibling_map, i);
cpus_and(sd->span, sd->span, *cpu_map);
sd->parent = p;
p->child = sd;
@@ -6131,7 +6246,7 @@ static int build_sched_domains(const cpumask_t *cpu_map)
#ifdef CONFIG_SCHED_SMT
/* Set up CPU (sibling) groups */
for_each_cpu_mask(i, *cpu_map) {
- cpumask_t this_sibling_map = cpu_sibling_map[i];
+ cpumask_t this_sibling_map = per_cpu(cpu_sibling_map, i);
cpus_and(this_sibling_map, this_sibling_map, *cpu_map);
if (i != first_cpu(this_sibling_map))
continue;
@@ -6293,24 +6408,31 @@ error:
return -ENOMEM;
#endif
}
+
+static cpumask_t *doms_cur; /* current sched domains */
+static int ndoms_cur; /* number of sched domains in 'doms_cur' */
+
+/*
+ * Special case: If a kmalloc of a doms_cur partition (array of
+ * cpumask_t) fails, then fallback to a single sched domain,
+ * as determined by the single cpumask_t fallback_doms.
+ */
+static cpumask_t fallback_doms;
+
/*
* Set up scheduler domains and groups. Callers must hold the hotplug lock.
+ * For now this just excludes isolated cpus, but could be used to
+ * exclude other special cases in the future.
*/
static int arch_init_sched_domains(const cpumask_t *cpu_map)
{
- cpumask_t cpu_default_map;
- int err;
-
- /*
- * Setup mask for cpus without special case scheduling requirements.
- * For now this just excludes isolated cpus, but could be used to
- * exclude other special cases in the future.
- */
- cpus_andnot(cpu_default_map, *cpu_map, cpu_isolated_map);
-
- err = build_sched_domains(&cpu_default_map);
-
- return err;
+ ndoms_cur = 1;
+ doms_cur = kmalloc(sizeof(cpumask_t), GFP_KERNEL);
+ if (!doms_cur)
+ doms_cur = &fallback_doms;
+ cpus_andnot(*doms_cur, *cpu_map, cpu_isolated_map);
+ register_sched_domain_sysctl();
+ return build_sched_domains(doms_cur);
}
static void arch_destroy_sched_domains(const cpumask_t *cpu_map)
@@ -6326,6 +6448,8 @@ static void detach_destroy_domains(const cpumask_t *cpu_map)
{
int i;
+ unregister_sched_domain_sysctl();
+
for_each_cpu_mask(i, *cpu_map)
cpu_attach_domain(NULL, i);
synchronize_sched();
@@ -6333,30 +6457,65 @@ static void detach_destroy_domains(const cpumask_t *cpu_map)
}
/*
- * Partition sched domains as specified by the cpumasks below.
- * This attaches all cpus from the cpumasks to the NULL domain,
- * waits for a RCU quiescent period, recalculates sched
- * domain information and then attaches them back to the
- * correct sched domains
+ * Partition sched domains as specified by the 'ndoms_new'
+ * cpumasks in the array doms_new[] of cpumasks. This compares
+ * doms_new[] to the current sched domain partitioning, doms_cur[].
+ * It destroys each deleted domain and builds each new domain.
+ *
+ * 'doms_new' is an array of cpumask_t's of length 'ndoms_new'.
+ * The masks don't intersect (don't overlap.) We should setup one
+ * sched domain for each mask. CPUs not in any of the cpumasks will
+ * not be load balanced. If the same cpumask appears both in the
+ * current 'doms_cur' domains and in the new 'doms_new', we can leave
+ * it as it is.
+ *
+ * The passed in 'doms_new' should be kmalloc'd. This routine takes
+ * ownership of it and will kfree it when done with it. If the caller
+ * failed the kmalloc call, then it can pass in doms_new == NULL,
+ * and partition_sched_domains() will fallback to the single partition
+ * 'fallback_doms'.
+ *
* Call with hotplug lock held
*/
-int partition_sched_domains(cpumask_t *partition1, cpumask_t *partition2)
+void partition_sched_domains(int ndoms_new, cpumask_t *doms_new)
{
- cpumask_t change_map;
- int err = 0;
+ int i, j;
- cpus_and(*partition1, *partition1, cpu_online_map);
- cpus_and(*partition2, *partition2, cpu_online_map);
- cpus_or(change_map, *partition1, *partition2);
+ if (doms_new == NULL) {
+ ndoms_new = 1;
+ doms_new = &fallback_doms;
+ cpus_andnot(doms_new[0], cpu_online_map, cpu_isolated_map);
+ }
- /* Detach sched domains from all of the affected cpus */
- detach_destroy_domains(&change_map);
- if (!cpus_empty(*partition1))
- err = build_sched_domains(partition1);
- if (!err && !cpus_empty(*partition2))
- err = build_sched_domains(partition2);
+ /* Destroy deleted domains */
+ for (i = 0; i < ndoms_cur; i++) {
+ for (j = 0; j < ndoms_new; j++) {
+ if (cpus_equal(doms_cur[i], doms_new[j]))
+ goto match1;
+ }
+ /* no match - a current sched domain not in new doms_new[] */
+ detach_destroy_domains(doms_cur + i);
+match1:
+ ;
+ }
- return err;
+ /* Build new domains */
+ for (i = 0; i < ndoms_new; i++) {
+ for (j = 0; j < ndoms_cur; j++) {
+ if (cpus_equal(doms_new[i], doms_cur[j]))
+ goto match2;
+ }
+ /* no match - add a new doms_new */
+ build_sched_domains(doms_new + i);
+match2:
+ ;
+ }
+
+ /* Remember the new sched domains */
+ if (doms_cur != &fallback_doms)
+ kfree(doms_cur);
+ doms_cur = doms_new;
+ ndoms_cur = ndoms_new;
}
#if defined(CONFIG_SCHED_MC) || defined(CONFIG_SCHED_SMT)
@@ -6487,17 +6646,13 @@ void __init sched_init_smp(void)
/* XXX: Theoretical race here - CPU may be hotplugged now */
hotcpu_notifier(update_sched_domains, 0);
- init_sched_domain_sysctl();
-
/* Move init over to a non-isolated CPU */
if (set_cpus_allowed(current, non_isolated_cpus) < 0)
BUG();
- sched_init_granularity();
}
#else
void __init sched_init_smp(void)
{
- sched_init_granularity();
}
#endif /* CONFIG_SMP */
@@ -6511,28 +6666,20 @@ int in_sched_functions(unsigned long addr)
&& addr < (unsigned long)__sched_text_end);
}
-static inline void init_cfs_rq(struct cfs_rq *cfs_rq, struct rq *rq)
+static void init_cfs_rq(struct cfs_rq *cfs_rq, struct rq *rq)
{
cfs_rq->tasks_timeline = RB_ROOT;
- cfs_rq->fair_clock = 1;
#ifdef CONFIG_FAIR_GROUP_SCHED
cfs_rq->rq = rq;
#endif
+ cfs_rq->min_vruntime = (u64)(-(1LL << 20));
}
void __init sched_init(void)
{
- u64 now = sched_clock();
int highest_cpu = 0;
int i, j;
- /*
- * Link up the scheduling class hierarchy:
- */
- rt_sched_class.next = &fair_sched_class;
- fair_sched_class.next = &idle_sched_class;
- idle_sched_class.next = NULL;
-
for_each_possible_cpu(i) {
struct rt_prio_array *array;
struct rq *rq;
@@ -6545,10 +6692,28 @@ void __init sched_init(void)
init_cfs_rq(&rq->cfs, rq);
#ifdef CONFIG_FAIR_GROUP_SCHED
INIT_LIST_HEAD(&rq->leaf_cfs_rq_list);
- list_add(&rq->cfs.leaf_cfs_rq_list, &rq->leaf_cfs_rq_list);
+ {
+ struct cfs_rq *cfs_rq = &per_cpu(init_cfs_rq, i);
+ struct sched_entity *se =
+ &per_cpu(init_sched_entity, i);
+
+ init_cfs_rq_p[i] = cfs_rq;
+ init_cfs_rq(cfs_rq, rq);
+ cfs_rq->tg = &init_task_group;
+ list_add(&cfs_rq->leaf_cfs_rq_list,
+ &rq->leaf_cfs_rq_list);
+
+ init_sched_entity_p[i] = se;
+ se->cfs_rq = &rq->cfs;
+ se->my_q = cfs_rq;
+ se->load.weight = init_task_group_load;
+ se->load.inv_weight =
+ div64_64(1ULL<<32, init_task_group_load);
+ se->parent = NULL;
+ }
+ init_task_group.shares = init_task_group_load;
+ spin_lock_init(&init_task_group.lock);
#endif
- rq->ls.load_update_last = now;
- rq->ls.load_update_start = now;
for (j = 0; j < CPU_LOAD_IDX_MAX; j++)
rq->cpu_load[j] = 0;
@@ -6633,26 +6798,40 @@ EXPORT_SYMBOL(__might_sleep);
#endif
#ifdef CONFIG_MAGIC_SYSRQ
+static void normalize_task(struct rq *rq, struct task_struct *p)
+{
+ int on_rq;
+ update_rq_clock(rq);
+ on_rq = p->se.on_rq;
+ if (on_rq)
+ deactivate_task(rq, p, 0);
+ __setscheduler(rq, p, SCHED_NORMAL, 0);
+ if (on_rq) {
+ activate_task(rq, p, 0);
+ resched_task(rq->curr);
+ }
+}
+
void normalize_rt_tasks(void)
{
struct task_struct *g, *p;
unsigned long flags;
struct rq *rq;
- int on_rq;
read_lock_irq(&tasklist_lock);
do_each_thread(g, p) {
- p->se.fair_key = 0;
- p->se.wait_runtime = 0;
+ /*
+ * Only normalize user tasks:
+ */
+ if (!p->mm)
+ continue;
+
p->se.exec_start = 0;
- p->se.wait_start_fair = 0;
- p->se.sleep_start_fair = 0;
#ifdef CONFIG_SCHEDSTATS
p->se.wait_start = 0;
p->se.sleep_start = 0;
p->se.block_start = 0;
#endif
- task_rq(p)->cfs.fair_clock = 0;
task_rq(p)->clock = 0;
if (!rt_task(p)) {
@@ -6667,26 +6846,9 @@ void normalize_rt_tasks(void)
spin_lock_irqsave(&p->pi_lock, flags);
rq = __task_rq_lock(p);
-#ifdef CONFIG_SMP
- /*
- * Do not touch the migration thread:
- */
- if (p == rq->migration_thread)
- goto out_unlock;
-#endif
- update_rq_clock(rq);
- on_rq = p->se.on_rq;
- if (on_rq)
- deactivate_task(rq, p, 0);
- __setscheduler(rq, p, SCHED_NORMAL, 0);
- if (on_rq) {
- activate_task(rq, p, 0);
- resched_task(rq->curr);
- }
-#ifdef CONFIG_SMP
- out_unlock:
-#endif
+ normalize_task(rq, p);
+
__task_rq_unlock(rq);
spin_unlock_irqrestore(&p->pi_lock, flags);
} while_each_thread(g, p);
@@ -6739,3 +6901,314 @@ void set_curr_task(int cpu, struct task_struct *p)
}
#endif
+
+#ifdef CONFIG_FAIR_GROUP_SCHED
+
+/* allocate runqueue etc for a new task group */
+struct task_group *sched_create_group(void)
+{
+ struct task_group *tg;
+ struct cfs_rq *cfs_rq;
+ struct sched_entity *se;
+ struct rq *rq;
+ int i;
+
+ tg = kzalloc(sizeof(*tg), GFP_KERNEL);
+ if (!tg)
+ return ERR_PTR(-ENOMEM);
+
+ tg->cfs_rq = kzalloc(sizeof(cfs_rq) * NR_CPUS, GFP_KERNEL);
+ if (!tg->cfs_rq)
+ goto err;
+ tg->se = kzalloc(sizeof(se) * NR_CPUS, GFP_KERNEL);
+ if (!tg->se)
+ goto err;
+
+ for_each_possible_cpu(i) {
+ rq = cpu_rq(i);
+
+ cfs_rq = kmalloc_node(sizeof(struct cfs_rq), GFP_KERNEL,
+ cpu_to_node(i));
+ if (!cfs_rq)
+ goto err;
+
+ se = kmalloc_node(sizeof(struct sched_entity), GFP_KERNEL,
+ cpu_to_node(i));
+ if (!se)
+ goto err;
+
+ memset(cfs_rq, 0, sizeof(struct cfs_rq));
+ memset(se, 0, sizeof(struct sched_entity));
+
+ tg->cfs_rq[i] = cfs_rq;
+ init_cfs_rq(cfs_rq, rq);
+ cfs_rq->tg = tg;
+
+ tg->se[i] = se;
+ se->cfs_rq = &rq->cfs;
+ se->my_q = cfs_rq;
+ se->load.weight = NICE_0_LOAD;
+ se->load.inv_weight = div64_64(1ULL<<32, NICE_0_LOAD);
+ se->parent = NULL;
+ }
+
+ for_each_possible_cpu(i) {
+ rq = cpu_rq(i);
+ cfs_rq = tg->cfs_rq[i];
+ list_add_rcu(&cfs_rq->leaf_cfs_rq_list, &rq->leaf_cfs_rq_list);
+ }
+
+ tg->shares = NICE_0_LOAD;
+ spin_lock_init(&tg->lock);
+
+ return tg;
+
+err:
+ for_each_possible_cpu(i) {
+ if (tg->cfs_rq)
+ kfree(tg->cfs_rq[i]);
+ if (tg->se)
+ kfree(tg->se[i]);
+ }
+ kfree(tg->cfs_rq);
+ kfree(tg->se);
+ kfree(tg);
+
+ return ERR_PTR(-ENOMEM);
+}
+
+/* rcu callback to free various structures associated with a task group */
+static void free_sched_group(struct rcu_head *rhp)
+{
+ struct cfs_rq *cfs_rq = container_of(rhp, struct cfs_rq, rcu);
+ struct task_group *tg = cfs_rq->tg;
+ struct sched_entity *se;
+ int i;
+
+ /* now it should be safe to free those cfs_rqs */
+ for_each_possible_cpu(i) {
+ cfs_rq = tg->cfs_rq[i];
+ kfree(cfs_rq);
+
+ se = tg->se[i];
+ kfree(se);
+ }
+
+ kfree(tg->cfs_rq);
+ kfree(tg->se);
+ kfree(tg);
+}
+
+/* Destroy runqueue etc associated with a task group */
+void sched_destroy_group(struct task_group *tg)
+{
+ struct cfs_rq *cfs_rq;
+ int i;
+
+ for_each_possible_cpu(i) {
+ cfs_rq = tg->cfs_rq[i];
+ list_del_rcu(&cfs_rq->leaf_cfs_rq_list);
+ }
+
+ cfs_rq = tg->cfs_rq[0];
+
+ /* wait for possible concurrent references to cfs_rqs complete */
+ call_rcu(&cfs_rq->rcu, free_sched_group);
+}
+
+/* change task's runqueue when it moves between groups.
+ * The caller of this function should have put the task in its new group
+ * by now. This function just updates tsk->se.cfs_rq and tsk->se.parent to
+ * reflect its new group.
+ */
+void sched_move_task(struct task_struct *tsk)
+{
+ int on_rq, running;
+ unsigned long flags;
+ struct rq *rq;
+
+ rq = task_rq_lock(tsk, &flags);
+
+ if (tsk->sched_class != &fair_sched_class)
+ goto done;
+
+ update_rq_clock(rq);
+
+ running = task_running(rq, tsk);
+ on_rq = tsk->se.on_rq;
+
+ if (on_rq) {
+ dequeue_task(rq, tsk, 0);
+ if (unlikely(running))
+ tsk->sched_class->put_prev_task(rq, tsk);
+ }
+
+ set_task_cfs_rq(tsk);
+
+ if (on_rq) {
+ if (unlikely(running))
+ tsk->sched_class->set_curr_task(rq);
+ enqueue_task(rq, tsk, 0);
+ }
+
+done:
+ task_rq_unlock(rq, &flags);
+}
+
+static void set_se_shares(struct sched_entity *se, unsigned long shares)
+{
+ struct cfs_rq *cfs_rq = se->cfs_rq;
+ struct rq *rq = cfs_rq->rq;
+ int on_rq;
+
+ spin_lock_irq(&rq->lock);
+
+ on_rq = se->on_rq;
+ if (on_rq)
+ dequeue_entity(cfs_rq, se, 0);
+
+ se->load.weight = shares;
+ se->load.inv_weight = div64_64((1ULL<<32), shares);
+
+ if (on_rq)
+ enqueue_entity(cfs_rq, se, 0);
+
+ spin_unlock_irq(&rq->lock);
+}
+
+int sched_group_set_shares(struct task_group *tg, unsigned long shares)
+{
+ int i;
+
+ spin_lock(&tg->lock);
+ if (tg->shares == shares)
+ goto done;
+
+ tg->shares = shares;
+ for_each_possible_cpu(i)
+ set_se_shares(tg->se[i], shares);
+
+done:
+ spin_unlock(&tg->lock);
+ return 0;
+}
+
+unsigned long sched_group_shares(struct task_group *tg)
+{
+ return tg->shares;
+}
+
+#endif /* CONFIG_FAIR_GROUP_SCHED */
+
+#ifdef CONFIG_FAIR_CGROUP_SCHED
+
+/* return corresponding task_group object of a cgroup */
+static inline struct task_group *cgroup_tg(struct cgroup *cont)
+{
+ return container_of(cgroup_subsys_state(cont, cpu_cgroup_subsys_id),
+ struct task_group, css);
+}
+
+static struct cgroup_subsys_state *
+cpu_cgroup_create(struct cgroup_subsys *ss, struct cgroup *cont)
+{
+ struct task_group *tg;
+
+ if (!cont->parent) {
+ /* This is early initialization for the top cgroup */
+ init_task_group.css.cgroup = cont;
+ return &init_task_group.css;
+ }
+
+ /* we support only 1-level deep hierarchical scheduler atm */
+ if (cont->parent->parent)
+ return ERR_PTR(-EINVAL);
+
+ tg = sched_create_group();
+ if (IS_ERR(tg))
+ return ERR_PTR(-ENOMEM);
+
+ /* Bind the cgroup to task_group object we just created */
+ tg->css.cgroup = cont;
+
+ return &tg->css;
+}
+
+static void cpu_cgroup_destroy(struct cgroup_subsys *ss,
+ struct cgroup *cont)
+{
+ struct task_group *tg = cgroup_tg(cont);
+
+ sched_destroy_group(tg);
+}
+
+static int cpu_cgroup_can_attach(struct cgroup_subsys *ss,
+ struct cgroup *cont, struct task_struct *tsk)
+{
+ /* We don't support RT-tasks being in separate groups */
+ if (tsk->sched_class != &fair_sched_class)
+ return -EINVAL;
+
+ return 0;
+}
+
+static void
+cpu_cgroup_attach(struct cgroup_subsys *ss, struct cgroup *cont,
+ struct cgroup *old_cont, struct task_struct *tsk)
+{
+ sched_move_task(tsk);
+}
+
+static ssize_t cpu_shares_write(struct cgroup *cont, struct cftype *cftype,
+ struct file *file, const char __user *userbuf,
+ size_t nbytes, loff_t *ppos)
+{
+ unsigned long shareval;
+ struct task_group *tg = cgroup_tg(cont);
+ char buffer[2*sizeof(unsigned long) + 1];
+ int rc;
+
+ if (nbytes > 2*sizeof(unsigned long)) /* safety check */
+ return -E2BIG;
+
+ if (copy_from_user(buffer, userbuf, nbytes))
+ return -EFAULT;
+
+ buffer[nbytes] = 0; /* nul-terminate */
+ shareval = simple_strtoul(buffer, NULL, 10);
+
+ rc = sched_group_set_shares(tg, shareval);
+
+ return (rc < 0 ? rc : nbytes);
+}
+
+static u64 cpu_shares_read_uint(struct cgroup *cont, struct cftype *cft)
+{
+ struct task_group *tg = cgroup_tg(cont);
+
+ return (u64) tg->shares;
+}
+
+static struct cftype cpu_shares = {
+ .name = "shares",
+ .read_uint = cpu_shares_read_uint,
+ .write = cpu_shares_write,
+};
+
+static int cpu_cgroup_populate(struct cgroup_subsys *ss, struct cgroup *cont)
+{
+ return cgroup_add_file(cont, ss, &cpu_shares);
+}
+
+struct cgroup_subsys cpu_cgroup_subsys = {
+ .name = "cpu",
+ .create = cpu_cgroup_create,
+ .destroy = cpu_cgroup_destroy,
+ .can_attach = cpu_cgroup_can_attach,
+ .attach = cpu_cgroup_attach,
+ .populate = cpu_cgroup_populate,
+ .subsys_id = cpu_cgroup_subsys_id,
+ .early_init = 1,
+};
+
+#endif /* CONFIG_FAIR_CGROUP_SCHED */
diff --git a/kernel/sched_debug.c b/kernel/sched_debug.c
index c3ee38bd3426..e6fb392e5164 100644
--- a/kernel/sched_debug.c
+++ b/kernel/sched_debug.c
@@ -28,6 +28,31 @@
printk(x); \
} while (0)
+/*
+ * Ease the printing of nsec fields:
+ */
+static long long nsec_high(long long nsec)
+{
+ if (nsec < 0) {
+ nsec = -nsec;
+ do_div(nsec, 1000000);
+ return -nsec;
+ }
+ do_div(nsec, 1000000);
+
+ return nsec;
+}
+
+static unsigned long nsec_low(long long nsec)
+{
+ if (nsec < 0)
+ nsec = -nsec;
+
+ return do_div(nsec, 1000000);
+}
+
+#define SPLIT_NS(x) nsec_high(x), nsec_low(x)
+
static void
print_task(struct seq_file *m, struct rq *rq, struct task_struct *p)
{
@@ -36,23 +61,19 @@ print_task(struct seq_file *m, struct rq *rq, struct task_struct *p)
else
SEQ_printf(m, " ");
- SEQ_printf(m, "%15s %5d %15Ld %13Ld %13Ld %9Ld %5d ",
+ SEQ_printf(m, "%15s %5d %9Ld.%06ld %9Ld %5d ",
p->comm, p->pid,
- (long long)p->se.fair_key,
- (long long)(p->se.fair_key - rq->cfs.fair_clock),
- (long long)p->se.wait_runtime,
+ SPLIT_NS(p->se.vruntime),
(long long)(p->nvcsw + p->nivcsw),
p->prio);
#ifdef CONFIG_SCHEDSTATS
- SEQ_printf(m, "%15Ld %15Ld %15Ld %15Ld %15Ld\n",
- (long long)p->se.sum_exec_runtime,
- (long long)p->se.sum_wait_runtime,
- (long long)p->se.sum_sleep_runtime,
- (long long)p->se.wait_runtime_overruns,
- (long long)p->se.wait_runtime_underruns);
+ SEQ_printf(m, "%9Ld.%06ld %9Ld.%06ld %9Ld.%06ld\n",
+ SPLIT_NS(p->se.vruntime),
+ SPLIT_NS(p->se.sum_exec_runtime),
+ SPLIT_NS(p->se.sum_sleep_runtime));
#else
- SEQ_printf(m, "%15Ld %15Ld %15Ld %15Ld %15Ld\n",
- 0LL, 0LL, 0LL, 0LL, 0LL);
+ SEQ_printf(m, "%15Ld %15Ld %15Ld.%06ld %15Ld.%06ld %15Ld.%06ld\n",
+ 0LL, 0LL, 0LL, 0L, 0LL, 0L, 0LL, 0L);
#endif
}
@@ -62,14 +83,10 @@ static void print_rq(struct seq_file *m, struct rq *rq, int rq_cpu)
SEQ_printf(m,
"\nrunnable tasks:\n"
- " task PID tree-key delta waiting"
- " switches prio"
- " sum-exec sum-wait sum-sleep"
- " wait-overrun wait-underrun\n"
- "------------------------------------------------------------------"
- "----------------"
- "------------------------------------------------"
- "--------------------------------\n");
+ " task PID tree-key switches prio"
+ " exec-runtime sum-exec sum-sleep\n"
+ "------------------------------------------------------"
+ "----------------------------------------------------\n");
read_lock_irq(&tasklist_lock);
@@ -83,45 +100,48 @@ static void print_rq(struct seq_file *m, struct rq *rq, int rq_cpu)
read_unlock_irq(&tasklist_lock);
}
-static void
-print_cfs_rq_runtime_sum(struct seq_file *m, int cpu, struct cfs_rq *cfs_rq)
+void print_cfs_rq(struct seq_file *m, int cpu, struct cfs_rq *cfs_rq)
{
- s64 wait_runtime_rq_sum = 0;
- struct task_struct *p;
- struct rb_node *curr;
- unsigned long flags;
+ s64 MIN_vruntime = -1, min_vruntime, max_vruntime = -1,
+ spread, rq0_min_vruntime, spread0;
struct rq *rq = &per_cpu(runqueues, cpu);
+ struct sched_entity *last;
+ unsigned long flags;
- spin_lock_irqsave(&rq->lock, flags);
- curr = first_fair(cfs_rq);
- while (curr) {
- p = rb_entry(curr, struct task_struct, se.run_node);
- wait_runtime_rq_sum += p->se.wait_runtime;
-
- curr = rb_next(curr);
- }
- spin_unlock_irqrestore(&rq->lock, flags);
-
- SEQ_printf(m, " .%-30s: %Ld\n", "wait_runtime_rq_sum",
- (long long)wait_runtime_rq_sum);
-}
-
-void print_cfs_rq(struct seq_file *m, int cpu, struct cfs_rq *cfs_rq)
-{
SEQ_printf(m, "\ncfs_rq\n");
-#define P(x) \
- SEQ_printf(m, " .%-30s: %Ld\n", #x, (long long)(cfs_rq->x))
-
- P(fair_clock);
- P(exec_clock);
- P(wait_runtime);
- P(wait_runtime_overruns);
- P(wait_runtime_underruns);
- P(sleeper_bonus);
-#undef P
+ SEQ_printf(m, " .%-30s: %Ld.%06ld\n", "exec_clock",
+ SPLIT_NS(cfs_rq->exec_clock));
- print_cfs_rq_runtime_sum(m, cpu, cfs_rq);
+ spin_lock_irqsave(&rq->lock, flags);
+ if (cfs_rq->rb_leftmost)
+ MIN_vruntime = (__pick_next_entity(cfs_rq))->vruntime;
+ last = __pick_last_entity(cfs_rq);
+ if (last)
+ max_vruntime = last->vruntime;
+ min_vruntime = rq->cfs.min_vruntime;
+ rq0_min_vruntime = per_cpu(runqueues, 0).cfs.min_vruntime;
+ spin_unlock_irqrestore(&rq->lock, flags);
+ SEQ_printf(m, " .%-30s: %Ld.%06ld\n", "MIN_vruntime",
+ SPLIT_NS(MIN_vruntime));
+ SEQ_printf(m, " .%-30s: %Ld.%06ld\n", "min_vruntime",
+ SPLIT_NS(min_vruntime));
+ SEQ_printf(m, " .%-30s: %Ld.%06ld\n", "max_vruntime",
+ SPLIT_NS(max_vruntime));
+ spread = max_vruntime - MIN_vruntime;
+ SEQ_printf(m, " .%-30s: %Ld.%06ld\n", "spread",
+ SPLIT_NS(spread));
+ spread0 = min_vruntime - rq0_min_vruntime;
+ SEQ_printf(m, " .%-30s: %Ld.%06ld\n", "spread0",
+ SPLIT_NS(spread0));
+ SEQ_printf(m, " .%-30s: %ld\n", "nr_running", cfs_rq->nr_running);
+ SEQ_printf(m, " .%-30s: %ld\n", "load", cfs_rq->load.weight);
+#ifdef CONFIG_SCHEDSTATS
+ SEQ_printf(m, " .%-30s: %d\n", "bkl_count",
+ rq->bkl_count);
+#endif
+ SEQ_printf(m, " .%-30s: %ld\n", "nr_spread_over",
+ cfs_rq->nr_spread_over);
}
static void print_cpu(struct seq_file *m, int cpu)
@@ -141,31 +161,32 @@ static void print_cpu(struct seq_file *m, int cpu)
#define P(x) \
SEQ_printf(m, " .%-30s: %Ld\n", #x, (long long)(rq->x))
+#define PN(x) \
+ SEQ_printf(m, " .%-30s: %Ld.%06ld\n", #x, SPLIT_NS(rq->x))
P(nr_running);
SEQ_printf(m, " .%-30s: %lu\n", "load",
- rq->ls.load.weight);
- P(ls.delta_fair);
- P(ls.delta_exec);
+ rq->load.weight);
P(nr_switches);
P(nr_load_updates);
P(nr_uninterruptible);
SEQ_printf(m, " .%-30s: %lu\n", "jiffies", jiffies);
- P(next_balance);
+ PN(next_balance);
P(curr->pid);
- P(clock);
- P(idle_clock);
- P(prev_clock_raw);
+ PN(clock);
+ PN(idle_clock);
+ PN(prev_clock_raw);
P(clock_warps);
P(clock_overflows);
P(clock_deep_idle_events);
- P(clock_max_delta);
+ PN(clock_max_delta);
P(cpu_load[0]);
P(cpu_load[1]);
P(cpu_load[2]);
P(cpu_load[3]);
P(cpu_load[4]);
#undef P
+#undef PN
print_cfs_stats(m, cpu);
@@ -177,12 +198,25 @@ static int sched_debug_show(struct seq_file *m, void *v)
u64 now = ktime_to_ns(ktime_get());
int cpu;
- SEQ_printf(m, "Sched Debug Version: v0.05-v20, %s %.*s\n",
+ SEQ_printf(m, "Sched Debug Version: v0.06-v22, %s %.*s\n",
init_utsname()->release,
(int)strcspn(init_utsname()->version, " "),
init_utsname()->version);
- SEQ_printf(m, "now at %Lu nsecs\n", (unsigned long long)now);
+ SEQ_printf(m, "now at %Lu.%06ld msecs\n", SPLIT_NS(now));
+
+#define P(x) \
+ SEQ_printf(m, " .%-40s: %Ld\n", #x, (long long)(x))
+#define PN(x) \
+ SEQ_printf(m, " .%-40s: %Ld.%06ld\n", #x, SPLIT_NS(x))
+ PN(sysctl_sched_latency);
+ PN(sysctl_sched_nr_latency);
+ PN(sysctl_sched_wakeup_granularity);
+ PN(sysctl_sched_batch_wakeup_granularity);
+ PN(sysctl_sched_child_runs_first);
+ P(sysctl_sched_features);
+#undef PN
+#undef P
for_each_online_cpu(cpu)
print_cpu(m, cpu);
@@ -202,7 +236,7 @@ static int sched_debug_open(struct inode *inode, struct file *filp)
return single_open(filp, sched_debug_show, NULL);
}
-static struct file_operations sched_debug_fops = {
+static const struct file_operations sched_debug_fops = {
.open = sched_debug_open,
.read = seq_read,
.llseek = seq_lseek,
@@ -226,6 +260,7 @@ __initcall(init_sched_debug_procfs);
void proc_sched_show_task(struct task_struct *p, struct seq_file *m)
{
+ unsigned long nr_switches;
unsigned long flags;
int num_threads = 1;
@@ -237,41 +272,89 @@ void proc_sched_show_task(struct task_struct *p, struct seq_file *m)
rcu_read_unlock();
SEQ_printf(m, "%s (%d, #threads: %d)\n", p->comm, p->pid, num_threads);
- SEQ_printf(m, "----------------------------------------------\n");
+ SEQ_printf(m,
+ "---------------------------------------------------------\n");
+#define __P(F) \
+ SEQ_printf(m, "%-35s:%21Ld\n", #F, (long long)F)
#define P(F) \
- SEQ_printf(m, "%-25s:%20Ld\n", #F, (long long)p->F)
+ SEQ_printf(m, "%-35s:%21Ld\n", #F, (long long)p->F)
+#define __PN(F) \
+ SEQ_printf(m, "%-35s:%14Ld.%06ld\n", #F, SPLIT_NS((long long)F))
+#define PN(F) \
+ SEQ_printf(m, "%-35s:%14Ld.%06ld\n", #F, SPLIT_NS((long long)p->F))
- P(se.wait_runtime);
- P(se.wait_start_fair);
- P(se.exec_start);
- P(se.sleep_start_fair);
- P(se.sum_exec_runtime);
+ PN(se.exec_start);
+ PN(se.vruntime);
+ PN(se.sum_exec_runtime);
+
+ nr_switches = p->nvcsw + p->nivcsw;
#ifdef CONFIG_SCHEDSTATS
- P(se.wait_start);
- P(se.sleep_start);
- P(se.block_start);
- P(se.sleep_max);
- P(se.block_max);
- P(se.exec_max);
- P(se.wait_max);
- P(se.wait_runtime_overruns);
- P(se.wait_runtime_underruns);
- P(se.sum_wait_runtime);
+ PN(se.wait_start);
+ PN(se.sleep_start);
+ PN(se.block_start);
+ PN(se.sleep_max);
+ PN(se.block_max);
+ PN(se.exec_max);
+ PN(se.slice_max);
+ PN(se.wait_max);
+ P(sched_info.bkl_count);
+ P(se.nr_migrations);
+ P(se.nr_migrations_cold);
+ P(se.nr_failed_migrations_affine);
+ P(se.nr_failed_migrations_running);
+ P(se.nr_failed_migrations_hot);
+ P(se.nr_forced_migrations);
+ P(se.nr_forced2_migrations);
+ P(se.nr_wakeups);
+ P(se.nr_wakeups_sync);
+ P(se.nr_wakeups_migrate);
+ P(se.nr_wakeups_local);
+ P(se.nr_wakeups_remote);
+ P(se.nr_wakeups_affine);
+ P(se.nr_wakeups_affine_attempts);
+ P(se.nr_wakeups_passive);
+ P(se.nr_wakeups_idle);
+
+ {
+ u64 avg_atom, avg_per_cpu;
+
+ avg_atom = p->se.sum_exec_runtime;
+ if (nr_switches)
+ do_div(avg_atom, nr_switches);
+ else
+ avg_atom = -1LL;
+
+ avg_per_cpu = p->se.sum_exec_runtime;
+ if (p->se.nr_migrations)
+ avg_per_cpu = div64_64(avg_per_cpu, p->se.nr_migrations);
+ else
+ avg_per_cpu = -1LL;
+
+ __PN(avg_atom);
+ __PN(avg_per_cpu);
+ }
#endif
- SEQ_printf(m, "%-25s:%20Ld\n",
- "nr_switches", (long long)(p->nvcsw + p->nivcsw));
+ __P(nr_switches);
+ SEQ_printf(m, "%-35s:%21Ld\n",
+ "nr_voluntary_switches", (long long)p->nvcsw);
+ SEQ_printf(m, "%-35s:%21Ld\n",
+ "nr_involuntary_switches", (long long)p->nivcsw);
+
P(se.load.weight);
P(policy);
P(prio);
+#undef PN
+#undef __PN
#undef P
+#undef __P
{
u64 t0, t1;
t0 = sched_clock();
t1 = sched_clock();
- SEQ_printf(m, "%-25s:%20Ld\n",
+ SEQ_printf(m, "%-35s:%21Ld\n",
"clock-delta", (long long)(t1-t0));
}
}
@@ -279,9 +362,32 @@ void proc_sched_show_task(struct task_struct *p, struct seq_file *m)
void proc_sched_set_task(struct task_struct *p)
{
#ifdef CONFIG_SCHEDSTATS
- p->se.sleep_max = p->se.block_max = p->se.exec_max = p->se.wait_max = 0;
- p->se.wait_runtime_overruns = p->se.wait_runtime_underruns = 0;
+ p->se.wait_max = 0;
+ p->se.sleep_max = 0;
+ p->se.sum_sleep_runtime = 0;
+ p->se.block_max = 0;
+ p->se.exec_max = 0;
+ p->se.slice_max = 0;
+ p->se.nr_migrations = 0;
+ p->se.nr_migrations_cold = 0;
+ p->se.nr_failed_migrations_affine = 0;
+ p->se.nr_failed_migrations_running = 0;
+ p->se.nr_failed_migrations_hot = 0;
+ p->se.nr_forced_migrations = 0;
+ p->se.nr_forced2_migrations = 0;
+ p->se.nr_wakeups = 0;
+ p->se.nr_wakeups_sync = 0;
+ p->se.nr_wakeups_migrate = 0;
+ p->se.nr_wakeups_local = 0;
+ p->se.nr_wakeups_remote = 0;
+ p->se.nr_wakeups_affine = 0;
+ p->se.nr_wakeups_affine_attempts = 0;
+ p->se.nr_wakeups_passive = 0;
+ p->se.nr_wakeups_idle = 0;
+ p->sched_info.bkl_count = 0;
#endif
- p->se.sum_exec_runtime = 0;
- p->se.prev_sum_exec_runtime = 0;
+ p->se.sum_exec_runtime = 0;
+ p->se.prev_sum_exec_runtime = 0;
+ p->nvcsw = 0;
+ p->nivcsw = 0;
}
diff --git a/kernel/sched_fair.c b/kernel/sched_fair.c
index 67c67a87146e..166ed6db600b 100644
--- a/kernel/sched_fair.c
+++ b/kernel/sched_fair.c
@@ -25,22 +25,26 @@
* (default: 20ms, units: nanoseconds)
*
* NOTE: this latency value is not the same as the concept of
- * 'timeslice length' - timeslices in CFS are of variable length.
- * (to see the precise effective timeslice length of your workload,
- * run vmstat and monitor the context-switches field)
+ * 'timeslice length' - timeslices in CFS are of variable length
+ * and have no persistent notion like in traditional, time-slice
+ * based scheduling concepts.
*
- * On SMP systems the value of this is multiplied by the log2 of the
- * number of CPUs. (i.e. factor 2x on 2-way systems, 3x on 4-way
- * systems, 4x on 8-way systems, 5x on 16-way systems, etc.)
- * Targeted preemption latency for CPU-bound tasks:
+ * (to see the precise effective timeslice length of your workload,
+ * run vmstat and monitor the context-switches (cs) field)
*/
-unsigned int sysctl_sched_latency __read_mostly = 20000000ULL;
+const_debug unsigned int sysctl_sched_latency = 20000000ULL;
+
+/*
+ * After fork, child runs first. (default) If set to 0 then
+ * parent will (try to) run first.
+ */
+const_debug unsigned int sysctl_sched_child_runs_first = 1;
/*
* Minimal preemption granularity for CPU-bound tasks:
* (default: 2 msec, units: nanoseconds)
*/
-unsigned int sysctl_sched_min_granularity __read_mostly = 2000000ULL;
+const_debug unsigned int sysctl_sched_nr_latency = 20;
/*
* sys_sched_yield() compat mode
@@ -52,52 +56,25 @@ unsigned int __read_mostly sysctl_sched_compat_yield;
/*
* SCHED_BATCH wake-up granularity.
- * (default: 25 msec, units: nanoseconds)
+ * (default: 10 msec, units: nanoseconds)
*
* This option delays the preemption effects of decoupled workloads
* and reduces their over-scheduling. Synchronous workloads will still
* have immediate wakeup/sleep latencies.
*/
-unsigned int sysctl_sched_batch_wakeup_granularity __read_mostly = 25000000UL;
+const_debug unsigned int sysctl_sched_batch_wakeup_granularity = 10000000UL;
/*
* SCHED_OTHER wake-up granularity.
- * (default: 1 msec, units: nanoseconds)
+ * (default: 10 msec, units: nanoseconds)
*
* This option delays the preemption effects of decoupled workloads
* and reduces their over-scheduling. Synchronous workloads will still
* have immediate wakeup/sleep latencies.
*/
-unsigned int sysctl_sched_wakeup_granularity __read_mostly = 1000000UL;
-
-unsigned int sysctl_sched_stat_granularity __read_mostly;
-
-/*
- * Initialized in sched_init_granularity() [to 5 times the base granularity]:
- */
-unsigned int sysctl_sched_runtime_limit __read_mostly;
-
-/*
- * Debugging: various feature bits
- */
-enum {
- SCHED_FEAT_FAIR_SLEEPERS = 1,
- SCHED_FEAT_SLEEPER_AVG = 2,
- SCHED_FEAT_SLEEPER_LOAD_AVG = 4,
- SCHED_FEAT_PRECISE_CPU_LOAD = 8,
- SCHED_FEAT_START_DEBIT = 16,
- SCHED_FEAT_SKIP_INITIAL = 32,
-};
+const_debug unsigned int sysctl_sched_wakeup_granularity = 10000000UL;
-unsigned int sysctl_sched_features __read_mostly =
- SCHED_FEAT_FAIR_SLEEPERS *1 |
- SCHED_FEAT_SLEEPER_AVG *0 |
- SCHED_FEAT_SLEEPER_LOAD_AVG *1 |
- SCHED_FEAT_PRECISE_CPU_LOAD *1 |
- SCHED_FEAT_START_DEBIT *1 |
- SCHED_FEAT_SKIP_INITIAL *0;
-
-extern struct sched_class fair_sched_class;
+const_debug unsigned int sysctl_sched_migration_cost = 500000UL;
/**************************************************************
* CFS operations on generic schedulable entities:
@@ -111,21 +88,9 @@ static inline struct rq *rq_of(struct cfs_rq *cfs_rq)
return cfs_rq->rq;
}
-/* currently running entity (if any) on this cfs_rq */
-static inline struct sched_entity *cfs_rq_curr(struct cfs_rq *cfs_rq)
-{
- return cfs_rq->curr;
-}
-
/* An entity is a task if it doesn't "own" a runqueue */
#define entity_is_task(se) (!se->my_q)
-static inline void
-set_cfs_rq_curr(struct cfs_rq *cfs_rq, struct sched_entity *se)
-{
- cfs_rq->curr = se;
-}
-
#else /* CONFIG_FAIR_GROUP_SCHED */
static inline struct rq *rq_of(struct cfs_rq *cfs_rq)
@@ -133,21 +98,8 @@ static inline struct rq *rq_of(struct cfs_rq *cfs_rq)
return container_of(cfs_rq, struct rq, cfs);
}
-static inline struct sched_entity *cfs_rq_curr(struct cfs_rq *cfs_rq)
-{
- struct rq *rq = rq_of(cfs_rq);
-
- if (unlikely(rq->curr->sched_class != &fair_sched_class))
- return NULL;
-
- return &rq->curr->se;
-}
-
#define entity_is_task(se) 1
-static inline void
-set_cfs_rq_curr(struct cfs_rq *cfs_rq, struct sched_entity *se) { }
-
#endif /* CONFIG_FAIR_GROUP_SCHED */
static inline struct task_struct *task_of(struct sched_entity *se)
@@ -160,16 +112,38 @@ static inline struct task_struct *task_of(struct sched_entity *se)
* Scheduling class tree data structure manipulation methods:
*/
+static inline u64 max_vruntime(u64 min_vruntime, u64 vruntime)
+{
+ s64 delta = (s64)(vruntime - min_vruntime);
+ if (delta > 0)
+ min_vruntime = vruntime;
+
+ return min_vruntime;
+}
+
+static inline u64 min_vruntime(u64 min_vruntime, u64 vruntime)
+{
+ s64 delta = (s64)(vruntime - min_vruntime);
+ if (delta < 0)
+ min_vruntime = vruntime;
+
+ return min_vruntime;
+}
+
+static inline s64 entity_key(struct cfs_rq *cfs_rq, struct sched_entity *se)
+{
+ return se->vruntime - cfs_rq->min_vruntime;
+}
+
/*
* Enqueue an entity into the rb-tree:
*/
-static inline void
-__enqueue_entity(struct cfs_rq *cfs_rq, struct sched_entity *se)
+static void __enqueue_entity(struct cfs_rq *cfs_rq, struct sched_entity *se)
{
struct rb_node **link = &cfs_rq->tasks_timeline.rb_node;
struct rb_node *parent = NULL;
struct sched_entity *entry;
- s64 key = se->fair_key;
+ s64 key = entity_key(cfs_rq, se);
int leftmost = 1;
/*
@@ -182,7 +156,7 @@ __enqueue_entity(struct cfs_rq *cfs_rq, struct sched_entity *se)
* We dont care about collisions. Nodes with
* the same key stay together.
*/
- if (key - entry->fair_key < 0) {
+ if (key < entity_key(cfs_rq, entry)) {
link = &parent->rb_left;
} else {
link = &parent->rb_right;
@@ -199,24 +173,14 @@ __enqueue_entity(struct cfs_rq *cfs_rq, struct sched_entity *se)
rb_link_node(&se->run_node, parent, link);
rb_insert_color(&se->run_node, &cfs_rq->tasks_timeline);
- update_load_add(&cfs_rq->load, se->load.weight);
- cfs_rq->nr_running++;
- se->on_rq = 1;
-
- schedstat_add(cfs_rq, wait_runtime, se->wait_runtime);
}
-static inline void
-__dequeue_entity(struct cfs_rq *cfs_rq, struct sched_entity *se)
+static void __dequeue_entity(struct cfs_rq *cfs_rq, struct sched_entity *se)
{
if (cfs_rq->rb_leftmost == &se->run_node)
cfs_rq->rb_leftmost = rb_next(&se->run_node);
- rb_erase(&se->run_node, &cfs_rq->tasks_timeline);
- update_load_sub(&cfs_rq->load, se->load.weight);
- cfs_rq->nr_running--;
- se->on_rq = 0;
- schedstat_add(cfs_rq, wait_runtime, -se->wait_runtime);
+ rb_erase(&se->run_node, &cfs_rq->tasks_timeline);
}
static inline struct rb_node *first_fair(struct cfs_rq *cfs_rq)
@@ -229,118 +193,86 @@ static struct sched_entity *__pick_next_entity(struct cfs_rq *cfs_rq)
return rb_entry(first_fair(cfs_rq), struct sched_entity, run_node);
}
+static inline struct sched_entity *__pick_last_entity(struct cfs_rq *cfs_rq)
+{
+ struct rb_node **link = &cfs_rq->tasks_timeline.rb_node;
+ struct sched_entity *se = NULL;
+ struct rb_node *parent;
+
+ while (*link) {
+ parent = *link;
+ se = rb_entry(parent, struct sched_entity, run_node);
+ link = &parent->rb_right;
+ }
+
+ return se;
+}
+
/**************************************************************
* Scheduling class statistics methods:
*/
+
/*
- * Calculate the preemption granularity needed to schedule every
- * runnable task once per sysctl_sched_latency amount of time.
- * (down to a sensible low limit on granularity)
- *
- * For example, if there are 2 tasks running and latency is 10 msecs,
- * we switch tasks every 5 msecs. If we have 3 tasks running, we have
- * to switch tasks every 3.33 msecs to get a 10 msecs observed latency
- * for each task. We do finer and finer scheduling up to until we
- * reach the minimum granularity value.
+ * The idea is to set a period in which each task runs once.
*
- * To achieve this we use the following dynamic-granularity rule:
+ * When there are too many tasks (sysctl_sched_nr_latency) we have to stretch
+ * this period because otherwise the slices get too small.
*
- * gran = lat/nr - lat/nr/nr
- *
- * This comes out of the following equations:
- *
- * kA1 + gran = kB1
- * kB2 + gran = kA2
- * kA2 = kA1
- * kB2 = kB1 - d + d/nr
- * lat = d * nr
- *
- * Where 'k' is key, 'A' is task A (waiting), 'B' is task B (running),
- * '1' is start of time, '2' is end of time, 'd' is delay between
- * 1 and 2 (during which task B was running), 'nr' is number of tasks
- * running, 'lat' is the the period of each task. ('lat' is the
- * sched_latency that we aim for.)
+ * p = (nr <= nl) ? l : l*nr/nl
*/
-static long
-sched_granularity(struct cfs_rq *cfs_rq)
+static u64 __sched_period(unsigned long nr_running)
{
- unsigned int gran = sysctl_sched_latency;
- unsigned int nr = cfs_rq->nr_running;
+ u64 period = sysctl_sched_latency;
+ unsigned long nr_latency = sysctl_sched_nr_latency;
- if (nr > 1) {
- gran = gran/nr - gran/nr/nr;
- gran = max(gran, sysctl_sched_min_granularity);
+ if (unlikely(nr_running > nr_latency)) {
+ period *= nr_running;
+ do_div(period, nr_latency);
}
- return gran;
+ return period;
}
/*
- * We rescale the rescheduling granularity of tasks according to their
- * nice level, but only linearly, not exponentially:
+ * We calculate the wall-time slice from the period by taking a part
+ * proportional to the weight.
+ *
+ * s = p*w/rw
*/
-static long
-niced_granularity(struct sched_entity *curr, unsigned long granularity)
+static u64 sched_slice(struct cfs_rq *cfs_rq, struct sched_entity *se)
{
- u64 tmp;
+ u64 slice = __sched_period(cfs_rq->nr_running);
- if (likely(curr->load.weight == NICE_0_LOAD))
- return granularity;
- /*
- * Positive nice levels get the same granularity as nice-0:
- */
- if (likely(curr->load.weight < NICE_0_LOAD)) {
- tmp = curr->load.weight * (u64)granularity;
- return (long) (tmp >> NICE_0_SHIFT);
- }
- /*
- * Negative nice level tasks get linearly finer
- * granularity:
- */
- tmp = curr->load.inv_weight * (u64)granularity;
+ slice *= se->load.weight;
+ do_div(slice, cfs_rq->load.weight);
- /*
- * It will always fit into 'long':
- */
- return (long) (tmp >> (WMULT_SHIFT-NICE_0_SHIFT));
+ return slice;
}
-static inline void
-limit_wait_runtime(struct cfs_rq *cfs_rq, struct sched_entity *se)
+/*
+ * We calculate the vruntime slice.
+ *
+ * vs = s/w = p/rw
+ */
+static u64 __sched_vslice(unsigned long rq_weight, unsigned long nr_running)
{
- long limit = sysctl_sched_runtime_limit;
+ u64 vslice = __sched_period(nr_running);
- /*
- * Niced tasks have the same history dynamic range as
- * non-niced tasks:
- */
- if (unlikely(se->wait_runtime > limit)) {
- se->wait_runtime = limit;
- schedstat_inc(se, wait_runtime_overruns);
- schedstat_inc(cfs_rq, wait_runtime_overruns);
- }
- if (unlikely(se->wait_runtime < -limit)) {
- se->wait_runtime = -limit;
- schedstat_inc(se, wait_runtime_underruns);
- schedstat_inc(cfs_rq, wait_runtime_underruns);
- }
+ do_div(vslice, rq_weight);
+
+ return vslice;
}
-static inline void
-__add_wait_runtime(struct cfs_rq *cfs_rq, struct sched_entity *se, long delta)
+static u64 sched_vslice(struct cfs_rq *cfs_rq)
{
- se->wait_runtime += delta;
- schedstat_add(se, sum_wait_runtime, delta);
- limit_wait_runtime(cfs_rq, se);
+ return __sched_vslice(cfs_rq->load.weight, cfs_rq->nr_running);
}
-static void
-add_wait_runtime(struct cfs_rq *cfs_rq, struct sched_entity *se, long delta)
+static u64 sched_vslice_add(struct cfs_rq *cfs_rq, struct sched_entity *se)
{
- schedstat_add(cfs_rq, wait_runtime, -se->wait_runtime);
- __add_wait_runtime(cfs_rq, se, delta);
- schedstat_add(cfs_rq, wait_runtime, se->wait_runtime);
+ return __sched_vslice(cfs_rq->load.weight + se->load.weight,
+ cfs_rq->nr_running + 1);
}
/*
@@ -348,46 +280,41 @@ add_wait_runtime(struct cfs_rq *cfs_rq, struct sched_entity *se, long delta)
* are not in our scheduling class.
*/
static inline void
-__update_curr(struct cfs_rq *cfs_rq, struct sched_entity *curr)
+__update_curr(struct cfs_rq *cfs_rq, struct sched_entity *curr,
+ unsigned long delta_exec)
{
- unsigned long delta, delta_exec, delta_fair, delta_mine;
- struct load_weight *lw = &cfs_rq->load;
- unsigned long load = lw->weight;
+ unsigned long delta_exec_weighted;
+ u64 vruntime;
- delta_exec = curr->delta_exec;
schedstat_set(curr->exec_max, max((u64)delta_exec, curr->exec_max));
curr->sum_exec_runtime += delta_exec;
- cfs_rq->exec_clock += delta_exec;
-
- if (unlikely(!load))
- return;
-
- delta_fair = calc_delta_fair(delta_exec, lw);
- delta_mine = calc_delta_mine(delta_exec, curr->load.weight, lw);
-
- if (cfs_rq->sleeper_bonus > sysctl_sched_min_granularity) {
- delta = min((u64)delta_mine, cfs_rq->sleeper_bonus);
- delta = min(delta, (unsigned long)(
- (long)sysctl_sched_runtime_limit - curr->wait_runtime));
- cfs_rq->sleeper_bonus -= delta;
- delta_mine -= delta;
+ schedstat_add(cfs_rq, exec_clock, delta_exec);
+ delta_exec_weighted = delta_exec;
+ if (unlikely(curr->load.weight != NICE_0_LOAD)) {
+ delta_exec_weighted = calc_delta_fair(delta_exec_weighted,
+ &curr->load);
}
+ curr->vruntime += delta_exec_weighted;
- cfs_rq->fair_clock += delta_fair;
/*
- * We executed delta_exec amount of time on the CPU,
- * but we were only entitled to delta_mine amount of
- * time during that period (if nr_running == 1 then
- * the two values are equal)
- * [Note: delta_mine - delta_exec is negative]:
+ * maintain cfs_rq->min_vruntime to be a monotonic increasing
+ * value tracking the leftmost vruntime in the tree.
*/
- add_wait_runtime(cfs_rq, curr, delta_mine - delta_exec);
+ if (first_fair(cfs_rq)) {
+ vruntime = min_vruntime(curr->vruntime,
+ __pick_next_entity(cfs_rq)->vruntime);
+ } else
+ vruntime = curr->vruntime;
+
+ cfs_rq->min_vruntime =
+ max_vruntime(cfs_rq->min_vruntime, vruntime);
}
static void update_curr(struct cfs_rq *cfs_rq)
{
- struct sched_entity *curr = cfs_rq_curr(cfs_rq);
+ struct sched_entity *curr = cfs_rq->curr;
+ u64 now = rq_of(cfs_rq)->clock;
unsigned long delta_exec;
if (unlikely(!curr))
@@ -398,135 +325,47 @@ static void update_curr(struct cfs_rq *cfs_rq)
* since the last time we changed load (this cannot
* overflow on 32 bits):
*/
- delta_exec = (unsigned long)(rq_of(cfs_rq)->clock - curr->exec_start);
-
- curr->delta_exec += delta_exec;
+ delta_exec = (unsigned long)(now - curr->exec_start);
- if (unlikely(curr->delta_exec > sysctl_sched_stat_granularity)) {
- __update_curr(cfs_rq, curr);
- curr->delta_exec = 0;
- }
- curr->exec_start = rq_of(cfs_rq)->clock;
+ __update_curr(cfs_rq, curr, delta_exec);
+ curr->exec_start = now;
}
static inline void
update_stats_wait_start(struct cfs_rq *cfs_rq, struct sched_entity *se)
{
- se->wait_start_fair = cfs_rq->fair_clock;
schedstat_set(se->wait_start, rq_of(cfs_rq)->clock);
}
/*
- * We calculate fair deltas here, so protect against the random effects
- * of a multiplication overflow by capping it to the runtime limit:
- */
-#if BITS_PER_LONG == 32
-static inline unsigned long
-calc_weighted(unsigned long delta, unsigned long weight, int shift)
-{
- u64 tmp = (u64)delta * weight >> shift;
-
- if (unlikely(tmp > sysctl_sched_runtime_limit*2))
- return sysctl_sched_runtime_limit*2;
- return tmp;
-}
-#else
-static inline unsigned long
-calc_weighted(unsigned long delta, unsigned long weight, int shift)
-{
- return delta * weight >> shift;
-}
-#endif
-
-/*
* Task is being enqueued - update stats:
*/
static void update_stats_enqueue(struct cfs_rq *cfs_rq, struct sched_entity *se)
{
- s64 key;
-
/*
* Are we enqueueing a waiting task? (for current tasks
* a dequeue/enqueue event is a NOP)
*/
- if (se != cfs_rq_curr(cfs_rq))
+ if (se != cfs_rq->curr)
update_stats_wait_start(cfs_rq, se);
- /*
- * Update the key:
- */
- key = cfs_rq->fair_clock;
-
- /*
- * Optimize the common nice 0 case:
- */
- if (likely(se->load.weight == NICE_0_LOAD)) {
- key -= se->wait_runtime;
- } else {
- u64 tmp;
-
- if (se->wait_runtime < 0) {
- tmp = -se->wait_runtime;
- key += (tmp * se->load.inv_weight) >>
- (WMULT_SHIFT - NICE_0_SHIFT);
- } else {
- tmp = se->wait_runtime;
- key -= (tmp * se->load.inv_weight) >>
- (WMULT_SHIFT - NICE_0_SHIFT);
- }
- }
-
- se->fair_key = key;
-}
-
-/*
- * Note: must be called with a freshly updated rq->fair_clock.
- */
-static inline void
-__update_stats_wait_end(struct cfs_rq *cfs_rq, struct sched_entity *se)
-{
- unsigned long delta_fair = se->delta_fair_run;
-
- schedstat_set(se->wait_max, max(se->wait_max,
- rq_of(cfs_rq)->clock - se->wait_start));
-
- if (unlikely(se->load.weight != NICE_0_LOAD))
- delta_fair = calc_weighted(delta_fair, se->load.weight,
- NICE_0_SHIFT);
-
- add_wait_runtime(cfs_rq, se, delta_fair);
}
static void
update_stats_wait_end(struct cfs_rq *cfs_rq, struct sched_entity *se)
{
- unsigned long delta_fair;
-
- if (unlikely(!se->wait_start_fair))
- return;
-
- delta_fair = (unsigned long)min((u64)(2*sysctl_sched_runtime_limit),
- (u64)(cfs_rq->fair_clock - se->wait_start_fair));
-
- se->delta_fair_run += delta_fair;
- if (unlikely(abs(se->delta_fair_run) >=
- sysctl_sched_stat_granularity)) {
- __update_stats_wait_end(cfs_rq, se);
- se->delta_fair_run = 0;
- }
-
- se->wait_start_fair = 0;
+ schedstat_set(se->wait_max, max(se->wait_max,
+ rq_of(cfs_rq)->clock - se->wait_start));
schedstat_set(se->wait_start, 0);
}
static inline void
update_stats_dequeue(struct cfs_rq *cfs_rq, struct sched_entity *se)
{
- update_curr(cfs_rq);
/*
* Mark the end of the wait period if dequeueing a
* waiting task:
*/
- if (se != cfs_rq_curr(cfs_rq))
+ if (se != cfs_rq->curr)
update_stats_wait_end(cfs_rq, se);
}
@@ -542,79 +381,28 @@ update_stats_curr_start(struct cfs_rq *cfs_rq, struct sched_entity *se)
se->exec_start = rq_of(cfs_rq)->clock;
}
-/*
- * We are descheduling a task - update its stats:
- */
-static inline void
-update_stats_curr_end(struct cfs_rq *cfs_rq, struct sched_entity *se)
-{
- se->exec_start = 0;
-}
-
/**************************************************
* Scheduling class queueing methods:
*/
-static void __enqueue_sleeper(struct cfs_rq *cfs_rq, struct sched_entity *se)
+static void
+account_entity_enqueue(struct cfs_rq *cfs_rq, struct sched_entity *se)
{
- unsigned long load = cfs_rq->load.weight, delta_fair;
- long prev_runtime;
-
- /*
- * Do not boost sleepers if there's too much bonus 'in flight'
- * already:
- */
- if (unlikely(cfs_rq->sleeper_bonus > sysctl_sched_runtime_limit))
- return;
-
- if (sysctl_sched_features & SCHED_FEAT_SLEEPER_LOAD_AVG)
- load = rq_of(cfs_rq)->cpu_load[2];
-
- delta_fair = se->delta_fair_sleep;
-
- /*
- * Fix up delta_fair with the effect of us running
- * during the whole sleep period:
- */
- if (sysctl_sched_features & SCHED_FEAT_SLEEPER_AVG)
- delta_fair = div64_likely32((u64)delta_fair * load,
- load + se->load.weight);
-
- if (unlikely(se->load.weight != NICE_0_LOAD))
- delta_fair = calc_weighted(delta_fair, se->load.weight,
- NICE_0_SHIFT);
-
- prev_runtime = se->wait_runtime;
- __add_wait_runtime(cfs_rq, se, delta_fair);
- delta_fair = se->wait_runtime - prev_runtime;
+ update_load_add(&cfs_rq->load, se->load.weight);
+ cfs_rq->nr_running++;
+ se->on_rq = 1;
+}
- /*
- * Track the amount of bonus we've given to sleepers:
- */
- cfs_rq->sleeper_bonus += delta_fair;
+static void
+account_entity_dequeue(struct cfs_rq *cfs_rq, struct sched_entity *se)
+{
+ update_load_sub(&cfs_rq->load, se->load.weight);
+ cfs_rq->nr_running--;
+ se->on_rq = 0;
}
static void enqueue_sleeper(struct cfs_rq *cfs_rq, struct sched_entity *se)
{
- struct task_struct *tsk = task_of(se);
- unsigned long delta_fair;
-
- if ((entity_is_task(se) && tsk->policy == SCHED_BATCH) ||
- !(sysctl_sched_features & SCHED_FEAT_FAIR_SLEEPERS))
- return;
-
- delta_fair = (unsigned long)min((u64)(2*sysctl_sched_runtime_limit),
- (u64)(cfs_rq->fair_clock - se->sleep_start_fair));
-
- se->delta_fair_sleep += delta_fair;
- if (unlikely(abs(se->delta_fair_sleep) >=
- sysctl_sched_stat_granularity)) {
- __enqueue_sleeper(cfs_rq, se);
- se->delta_fair_sleep = 0;
- }
-
- se->sleep_start_fair = 0;
-
#ifdef CONFIG_SCHEDSTATS
if (se->sleep_start) {
u64 delta = rq_of(cfs_rq)->clock - se->sleep_start;
@@ -646,6 +434,8 @@ static void enqueue_sleeper(struct cfs_rq *cfs_rq, struct sched_entity *se)
* time that the task spent sleeping:
*/
if (unlikely(prof_on == SLEEP_PROFILING)) {
+ struct task_struct *tsk = task_of(se);
+
profile_hits(SLEEP_PROFILING, (void *)get_wchan(tsk),
delta >> 20);
}
@@ -653,27 +443,81 @@ static void enqueue_sleeper(struct cfs_rq *cfs_rq, struct sched_entity *se)
#endif
}
+static void check_spread(struct cfs_rq *cfs_rq, struct sched_entity *se)
+{
+#ifdef CONFIG_SCHED_DEBUG
+ s64 d = se->vruntime - cfs_rq->min_vruntime;
+
+ if (d < 0)
+ d = -d;
+
+ if (d > 3*sysctl_sched_latency)
+ schedstat_inc(cfs_rq, nr_spread_over);
+#endif
+}
+
+static void
+place_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, int initial)
+{
+ u64 vruntime;
+
+ vruntime = cfs_rq->min_vruntime;
+
+ if (sched_feat(TREE_AVG)) {
+ struct sched_entity *last = __pick_last_entity(cfs_rq);
+ if (last) {
+ vruntime += last->vruntime;
+ vruntime >>= 1;
+ }
+ } else if (sched_feat(APPROX_AVG) && cfs_rq->nr_running)
+ vruntime += sched_vslice(cfs_rq)/2;
+
+ if (initial && sched_feat(START_DEBIT))
+ vruntime += sched_vslice_add(cfs_rq, se);
+
+ if (!initial) {
+ if (sched_feat(NEW_FAIR_SLEEPERS) && entity_is_task(se) &&
+ task_of(se)->policy != SCHED_BATCH)
+ vruntime -= sysctl_sched_latency;
+
+ vruntime = max_t(s64, vruntime, se->vruntime);
+ }
+
+ se->vruntime = vruntime;
+
+}
+
static void
enqueue_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, int wakeup)
{
/*
- * Update the fair clock.
+ * Update run-time statistics of the 'current'.
*/
update_curr(cfs_rq);
- if (wakeup)
+ if (wakeup) {
+ place_entity(cfs_rq, se, 0);
enqueue_sleeper(cfs_rq, se);
+ }
update_stats_enqueue(cfs_rq, se);
- __enqueue_entity(cfs_rq, se);
+ check_spread(cfs_rq, se);
+ if (se != cfs_rq->curr)
+ __enqueue_entity(cfs_rq, se);
+ account_entity_enqueue(cfs_rq, se);
}
static void
dequeue_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, int sleep)
{
+ /*
+ * Update run-time statistics of the 'current'.
+ */
+ update_curr(cfs_rq);
+
update_stats_dequeue(cfs_rq, se);
if (sleep) {
- se->sleep_start_fair = cfs_rq->fair_clock;
+ se->peer_preempt = 0;
#ifdef CONFIG_SCHEDSTATS
if (entity_is_task(se)) {
struct task_struct *tsk = task_of(se);
@@ -685,68 +529,66 @@ dequeue_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, int sleep)
}
#endif
}
- __dequeue_entity(cfs_rq, se);
+
+ if (se != cfs_rq->curr)
+ __dequeue_entity(cfs_rq, se);
+ account_entity_dequeue(cfs_rq, se);
}
/*
* Preempt the current task with a newly woken task if needed:
*/
static void
-__check_preempt_curr_fair(struct cfs_rq *cfs_rq, struct sched_entity *se,
- struct sched_entity *curr, unsigned long granularity)
+check_preempt_tick(struct cfs_rq *cfs_rq, struct sched_entity *curr)
{
- s64 __delta = curr->fair_key - se->fair_key;
unsigned long ideal_runtime, delta_exec;
- /*
- * ideal_runtime is compared against sum_exec_runtime, which is
- * walltime, hence do not scale.
- */
- ideal_runtime = max(sysctl_sched_latency / cfs_rq->nr_running,
- (unsigned long)sysctl_sched_min_granularity);
-
- /*
- * If we executed more than what the latency constraint suggests,
- * reduce the rescheduling granularity. This way the total latency
- * of how much a task is not scheduled converges to
- * sysctl_sched_latency:
- */
+ ideal_runtime = sched_slice(cfs_rq, curr);
delta_exec = curr->sum_exec_runtime - curr->prev_sum_exec_runtime;
- if (delta_exec > ideal_runtime)
- granularity = 0;
-
- /*
- * Take scheduling granularity into account - do not
- * preempt the current task unless the best task has
- * a larger than sched_granularity fairness advantage:
- *
- * scale granularity as key space is in fair_clock.
- */
- if (__delta > niced_granularity(curr, granularity))
+ if (delta_exec > ideal_runtime ||
+ (sched_feat(PREEMPT_RESTRICT) && curr->peer_preempt))
resched_task(rq_of(cfs_rq)->curr);
+ curr->peer_preempt = 0;
}
-static inline void
+static void
set_next_entity(struct cfs_rq *cfs_rq, struct sched_entity *se)
{
+ /* 'current' is not kept within the tree. */
+ if (se->on_rq) {
+ /*
+ * Any task has to be enqueued before it get to execute on
+ * a CPU. So account for the time it spent waiting on the
+ * runqueue.
+ */
+ update_stats_wait_end(cfs_rq, se);
+ __dequeue_entity(cfs_rq, se);
+ }
+
+ update_stats_curr_start(cfs_rq, se);
+ cfs_rq->curr = se;
+#ifdef CONFIG_SCHEDSTATS
/*
- * Any task has to be enqueued before it get to execute on
- * a CPU. So account for the time it spent waiting on the
- * runqueue. (note, here we rely on pick_next_task() having
- * done a put_prev_task_fair() shortly before this, which
- * updated rq->fair_clock - used by update_stats_wait_end())
+ * Track our maximum slice length, if the CPU's load is at
+ * least twice that of our own weight (i.e. dont track it
+ * when there are only lesser-weight tasks around):
*/
- update_stats_wait_end(cfs_rq, se);
- update_stats_curr_start(cfs_rq, se);
- set_cfs_rq_curr(cfs_rq, se);
+ if (rq_of(cfs_rq)->load.weight >= 2*se->load.weight) {
+ se->slice_max = max(se->slice_max,
+ se->sum_exec_runtime - se->prev_sum_exec_runtime);
+ }
+#endif
se->prev_sum_exec_runtime = se->sum_exec_runtime;
}
static struct sched_entity *pick_next_entity(struct cfs_rq *cfs_rq)
{
- struct sched_entity *se = __pick_next_entity(cfs_rq);
+ struct sched_entity *se = NULL;
- set_next_entity(cfs_rq, se);
+ if (first_fair(cfs_rq)) {
+ se = __pick_next_entity(cfs_rq);
+ set_next_entity(cfs_rq, se);
+ }
return se;
}
@@ -760,33 +602,24 @@ static void put_prev_entity(struct cfs_rq *cfs_rq, struct sched_entity *prev)
if (prev->on_rq)
update_curr(cfs_rq);
- update_stats_curr_end(cfs_rq, prev);
-
- if (prev->on_rq)
+ check_spread(cfs_rq, prev);
+ if (prev->on_rq) {
update_stats_wait_start(cfs_rq, prev);
- set_cfs_rq_curr(cfs_rq, NULL);
+ /* Put 'current' back into the tree. */
+ __enqueue_entity(cfs_rq, prev);
+ }
+ cfs_rq->curr = NULL;
}
static void entity_tick(struct cfs_rq *cfs_rq, struct sched_entity *curr)
{
- struct sched_entity *next;
-
/*
- * Dequeue and enqueue the task to update its
- * position within the tree:
+ * Update run-time statistics of the 'current'.
*/
- dequeue_entity(cfs_rq, curr, 0);
- enqueue_entity(cfs_rq, curr, 0);
-
- /*
- * Reschedule if another task tops the current one.
- */
- next = __pick_next_entity(cfs_rq);
- if (next == curr)
- return;
+ update_curr(cfs_rq);
- __check_preempt_curr_fair(cfs_rq, next, curr,
- sched_granularity(cfs_rq));
+ if (cfs_rq->nr_running > 1 || !sched_feat(WAKEUP_PREEMPT))
+ check_preempt_tick(cfs_rq, curr);
}
/**************************************************
@@ -821,23 +654,28 @@ static inline struct cfs_rq *group_cfs_rq(struct sched_entity *grp)
*/
static inline struct cfs_rq *cpu_cfs_rq(struct cfs_rq *cfs_rq, int this_cpu)
{
- /* A later patch will take group into account */
- return &cpu_rq(this_cpu)->cfs;
+ return cfs_rq->tg->cfs_rq[this_cpu];
}
/* Iterate thr' all leaf cfs_rq's on a runqueue */
#define for_each_leaf_cfs_rq(rq, cfs_rq) \
list_for_each_entry(cfs_rq, &rq->leaf_cfs_rq_list, leaf_cfs_rq_list)
-/* Do the two (enqueued) tasks belong to the same group ? */
-static inline int is_same_group(struct task_struct *curr, struct task_struct *p)
+/* Do the two (enqueued) entities belong to the same group ? */
+static inline int
+is_same_group(struct sched_entity *se, struct sched_entity *pse)
{
- if (curr->se.cfs_rq == p->se.cfs_rq)
+ if (se->cfs_rq == pse->cfs_rq)
return 1;
return 0;
}
+static inline struct sched_entity *parent_entity(struct sched_entity *se)
+{
+ return se->parent;
+}
+
#else /* CONFIG_FAIR_GROUP_SCHED */
#define for_each_sched_entity(se) \
@@ -870,11 +708,17 @@ static inline struct cfs_rq *cpu_cfs_rq(struct cfs_rq *cfs_rq, int this_cpu)
#define for_each_leaf_cfs_rq(rq, cfs_rq) \
for (cfs_rq = &rq->cfs; cfs_rq; cfs_rq = NULL)
-static inline int is_same_group(struct task_struct *curr, struct task_struct *p)
+static inline int
+is_same_group(struct sched_entity *se, struct sched_entity *pse)
{
return 1;
}
+static inline struct sched_entity *parent_entity(struct sched_entity *se)
+{
+ return NULL;
+}
+
#endif /* CONFIG_FAIR_GROUP_SCHED */
/*
@@ -892,6 +736,7 @@ static void enqueue_task_fair(struct rq *rq, struct task_struct *p, int wakeup)
break;
cfs_rq = cfs_rq_of(se);
enqueue_entity(cfs_rq, se, wakeup);
+ wakeup = 1;
}
}
@@ -911,6 +756,7 @@ static void dequeue_task_fair(struct rq *rq, struct task_struct *p, int sleep)
/* Don't dequeue parent if it has other entities besides us */
if (cfs_rq->load.weight)
break;
+ sleep = 1;
}
}
@@ -919,12 +765,10 @@ static void dequeue_task_fair(struct rq *rq, struct task_struct *p, int sleep)
*
* If compat_yield is turned on then we requeue to the end of the tree.
*/
-static void yield_task_fair(struct rq *rq, struct task_struct *p)
+static void yield_task_fair(struct rq *rq)
{
- struct cfs_rq *cfs_rq = task_cfs_rq(p);
- struct rb_node **link = &cfs_rq->tasks_timeline.rb_node;
- struct sched_entity *rightmost, *se = &p->se;
- struct rb_node *parent;
+ struct cfs_rq *cfs_rq = task_cfs_rq(rq->curr);
+ struct sched_entity *rightmost, *se = &rq->curr->se;
/*
* Are we the only task in the tree?
@@ -935,52 +779,39 @@ static void yield_task_fair(struct rq *rq, struct task_struct *p)
if (likely(!sysctl_sched_compat_yield)) {
__update_rq_clock(rq);
/*
- * Dequeue and enqueue the task to update its
- * position within the tree:
+ * Update run-time statistics of the 'current'.
*/
- dequeue_entity(cfs_rq, &p->se, 0);
- enqueue_entity(cfs_rq, &p->se, 0);
+ update_curr(cfs_rq);
return;
}
/*
* Find the rightmost entry in the rbtree:
*/
- do {
- parent = *link;
- link = &parent->rb_right;
- } while (*link);
-
- rightmost = rb_entry(parent, struct sched_entity, run_node);
+ rightmost = __pick_last_entity(cfs_rq);
/*
* Already in the rightmost position?
*/
- if (unlikely(rightmost == se))
+ if (unlikely(rightmost->vruntime < se->vruntime))
return;
/*
* Minimally necessary key value to be last in the tree:
+ * Upon rescheduling, sched_class::put_prev_task() will place
+ * 'current' within the tree based on its new key value.
*/
- se->fair_key = rightmost->fair_key + 1;
-
- if (cfs_rq->rb_leftmost == &se->run_node)
- cfs_rq->rb_leftmost = rb_next(&se->run_node);
- /*
- * Relink the task to the rightmost position:
- */
- rb_erase(&se->run_node, &cfs_rq->tasks_timeline);
- rb_link_node(&se->run_node, parent, link);
- rb_insert_color(&se->run_node, &cfs_rq->tasks_timeline);
+ se->vruntime = rightmost->vruntime + 1;
}
/*
* Preempt the current task with a newly woken task if needed:
*/
-static void check_preempt_curr_fair(struct rq *rq, struct task_struct *p)
+static void check_preempt_wakeup(struct rq *rq, struct task_struct *p)
{
struct task_struct *curr = rq->curr;
struct cfs_rq *cfs_rq = task_cfs_rq(curr);
- unsigned long gran;
+ struct sched_entity *se = &curr->se, *pse = &p->se;
+ s64 delta, gran;
if (unlikely(rt_prio(p->prio))) {
update_rq_clock(rq);
@@ -988,16 +819,31 @@ static void check_preempt_curr_fair(struct rq *rq, struct task_struct *p)
resched_task(curr);
return;
}
-
- gran = sysctl_sched_wakeup_granularity;
/*
- * Batch tasks prefer throughput over latency:
+ * Batch tasks do not preempt (their preemption is driven by
+ * the tick):
*/
if (unlikely(p->policy == SCHED_BATCH))
- gran = sysctl_sched_batch_wakeup_granularity;
+ return;
+
+ if (sched_feat(WAKEUP_PREEMPT)) {
+ while (!is_same_group(se, pse)) {
+ se = parent_entity(se);
+ pse = parent_entity(pse);
+ }
+
+ delta = se->vruntime - pse->vruntime;
+ gran = sysctl_sched_wakeup_granularity;
+ if (unlikely(se->load.weight != NICE_0_LOAD))
+ gran = calc_delta_fair(gran, &se->load);
+
+ if (delta > gran) {
+ int now = !sched_feat(PREEMPT_RESTRICT);
- if (is_same_group(curr, p))
- __check_preempt_curr_fair(cfs_rq, &p->se, &curr->se, gran);
+ if (now || p->prio < curr->prio || !se->peer_preempt++)
+ resched_task(curr);
+ }
+ }
}
static struct task_struct *pick_next_task_fair(struct rq *rq)
@@ -1041,7 +887,7 @@ static void put_prev_task_fair(struct rq *rq, struct task_struct *prev)
* achieve that by always pre-iterating before returning
* the current task:
*/
-static inline struct task_struct *
+static struct task_struct *
__load_balance_iterator(struct cfs_rq *cfs_rq, struct rb_node *curr)
{
struct task_struct *p;
@@ -1078,7 +924,10 @@ static int cfs_rq_best_prio(struct cfs_rq *cfs_rq)
if (!cfs_rq->nr_running)
return MAX_PRIO;
- curr = __pick_next_entity(cfs_rq);
+ curr = cfs_rq->curr;
+ if (!curr)
+ curr = __pick_next_entity(cfs_rq);
+
p = task_of(curr);
return p->prio;
@@ -1153,6 +1002,8 @@ static void task_tick_fair(struct rq *rq, struct task_struct *curr)
}
}
+#define swap(a,b) do { typeof(a) tmp = (a); (a) = (b); (b) = tmp; } while (0)
+
/*
* Share the fairness runtime between parent and child, thus the
* total amount of pressure for CPU stays equal - new tasks
@@ -1163,37 +1014,28 @@ static void task_tick_fair(struct rq *rq, struct task_struct *curr)
static void task_new_fair(struct rq *rq, struct task_struct *p)
{
struct cfs_rq *cfs_rq = task_cfs_rq(p);
- struct sched_entity *se = &p->se, *curr = cfs_rq_curr(cfs_rq);
+ struct sched_entity *se = &p->se, *curr = cfs_rq->curr;
+ int this_cpu = smp_processor_id();
sched_info_queued(p);
update_curr(cfs_rq);
- update_stats_enqueue(cfs_rq, se);
- /*
- * Child runs first: we let it run before the parent
- * until it reschedules once. We set up the key so that
- * it will preempt the parent:
- */
- se->fair_key = curr->fair_key -
- niced_granularity(curr, sched_granularity(cfs_rq)) - 1;
- /*
- * The first wait is dominated by the child-runs-first logic,
- * so do not credit it with that waiting time yet:
- */
- if (sysctl_sched_features & SCHED_FEAT_SKIP_INITIAL)
- se->wait_start_fair = 0;
+ place_entity(cfs_rq, se, 1);
- /*
- * The statistical average of wait_runtime is about
- * -granularity/2, so initialize the task with that:
- */
- if (sysctl_sched_features & SCHED_FEAT_START_DEBIT)
- se->wait_runtime = -(sched_granularity(cfs_rq) / 2);
+ if (sysctl_sched_child_runs_first && this_cpu == task_cpu(p) &&
+ curr->vruntime < se->vruntime) {
+ /*
+ * Upon rescheduling, sched_class::put_prev_task() will place
+ * 'current' within the tree based on its new key value.
+ */
+ swap(curr->vruntime, se->vruntime);
+ }
- __enqueue_entity(cfs_rq, se);
+ se->peer_preempt = 0;
+ enqueue_task_fair(rq, p, 0);
+ resched_task(rq->curr);
}
-#ifdef CONFIG_FAIR_GROUP_SCHED
/* Account for a task changing its policy or group.
*
* This routine is mostly called to set cfs_rq->curr field when a task
@@ -1206,21 +1048,17 @@ static void set_curr_task_fair(struct rq *rq)
for_each_sched_entity(se)
set_next_entity(cfs_rq_of(se), se);
}
-#else
-static void set_curr_task_fair(struct rq *rq)
-{
-}
-#endif
/*
* All the scheduling class methods:
*/
-struct sched_class fair_sched_class __read_mostly = {
+static const struct sched_class fair_sched_class = {
+ .next = &idle_sched_class,
.enqueue_task = enqueue_task_fair,
.dequeue_task = dequeue_task_fair,
.yield_task = yield_task_fair,
- .check_preempt_curr = check_preempt_curr_fair,
+ .check_preempt_curr = check_preempt_wakeup,
.pick_next_task = pick_next_task_fair,
.put_prev_task = put_prev_task_fair,
@@ -1237,6 +1075,9 @@ static void print_cfs_stats(struct seq_file *m, int cpu)
{
struct cfs_rq *cfs_rq;
+#ifdef CONFIG_FAIR_GROUP_SCHED
+ print_cfs_rq(m, cpu, &cpu_rq(cpu)->cfs);
+#endif
for_each_leaf_cfs_rq(cpu_rq(cpu), cfs_rq)
print_cfs_rq(m, cpu, cfs_rq);
}
diff --git a/kernel/sched_idletask.c b/kernel/sched_idletask.c
index 3503fb2d9f96..6e2ead41516e 100644
--- a/kernel/sched_idletask.c
+++ b/kernel/sched_idletask.c
@@ -50,10 +50,15 @@ static void task_tick_idle(struct rq *rq, struct task_struct *curr)
{
}
+static void set_curr_task_idle(struct rq *rq)
+{
+}
+
/*
* Simple, special scheduling class for the per-CPU idle tasks:
*/
-static struct sched_class idle_sched_class __read_mostly = {
+const struct sched_class idle_sched_class = {
+ /* .next is NULL */
/* no enqueue/yield_task for idle tasks */
/* dequeue is not valid, we print a debug message there: */
@@ -66,6 +71,7 @@ static struct sched_class idle_sched_class __read_mostly = {
.load_balance = load_balance_idle,
+ .set_curr_task = set_curr_task_idle,
.task_tick = task_tick_idle,
/* no .task_new for idle tasks */
};
diff --git a/kernel/sched_rt.c b/kernel/sched_rt.c
index 4b87476a02d0..d0097a0634e5 100644
--- a/kernel/sched_rt.c
+++ b/kernel/sched_rt.c
@@ -7,7 +7,7 @@
* Update the current task's runtime statistics. Skip current tasks that
* are not in our scheduling class.
*/
-static inline void update_curr_rt(struct rq *rq)
+static void update_curr_rt(struct rq *rq)
{
struct task_struct *curr = rq->curr;
u64 delta_exec;
@@ -59,9 +59,9 @@ static void requeue_task_rt(struct rq *rq, struct task_struct *p)
}
static void
-yield_task_rt(struct rq *rq, struct task_struct *p)
+yield_task_rt(struct rq *rq)
{
- requeue_task_rt(rq, p);
+ requeue_task_rt(rq, rq->curr);
}
/*
@@ -206,7 +206,7 @@ static void task_tick_rt(struct rq *rq, struct task_struct *p)
if (--p->time_slice)
return;
- p->time_slice = static_prio_timeslice(p->static_prio);
+ p->time_slice = DEF_TIMESLICE;
/*
* Requeue to the end of queue if we are not the only element
@@ -218,7 +218,15 @@ static void task_tick_rt(struct rq *rq, struct task_struct *p)
}
}
-static struct sched_class rt_sched_class __read_mostly = {
+static void set_curr_task_rt(struct rq *rq)
+{
+ struct task_struct *p = rq->curr;
+
+ p->se.exec_start = rq->clock;
+}
+
+const struct sched_class rt_sched_class = {
+ .next = &fair_sched_class,
.enqueue_task = enqueue_task_rt,
.dequeue_task = dequeue_task_rt,
.yield_task = yield_task_rt,
@@ -230,5 +238,6 @@ static struct sched_class rt_sched_class __read_mostly = {
.load_balance = load_balance_rt,
+ .set_curr_task = set_curr_task_rt,
.task_tick = task_tick_rt,
};
diff --git a/kernel/sched_stats.h b/kernel/sched_stats.h
index c20a94dda61e..ef1a7df80ea2 100644
--- a/kernel/sched_stats.h
+++ b/kernel/sched_stats.h
@@ -16,18 +16,18 @@ static int show_schedstat(struct seq_file *seq, void *v)
struct rq *rq = cpu_rq(cpu);
#ifdef CONFIG_SMP
struct sched_domain *sd;
- int dcnt = 0;
+ int dcount = 0;
#endif
/* runqueue-specific stats */
seq_printf(seq,
- "cpu%d %lu %lu %lu %lu %lu %lu %lu %lu %lu %llu %llu %lu",
+ "cpu%d %u %u %u %u %u %u %u %u %u %llu %llu %lu",
cpu, rq->yld_both_empty,
- rq->yld_act_empty, rq->yld_exp_empty, rq->yld_cnt,
- rq->sched_switch, rq->sched_cnt, rq->sched_goidle,
- rq->ttwu_cnt, rq->ttwu_local,
+ rq->yld_act_empty, rq->yld_exp_empty, rq->yld_count,
+ rq->sched_switch, rq->sched_count, rq->sched_goidle,
+ rq->ttwu_count, rq->ttwu_local,
rq->rq_sched_info.cpu_time,
- rq->rq_sched_info.run_delay, rq->rq_sched_info.pcnt);
+ rq->rq_sched_info.run_delay, rq->rq_sched_info.pcount);
seq_printf(seq, "\n");
@@ -39,12 +39,11 @@ static int show_schedstat(struct seq_file *seq, void *v)
char mask_str[NR_CPUS];
cpumask_scnprintf(mask_str, NR_CPUS, sd->span);
- seq_printf(seq, "domain%d %s", dcnt++, mask_str);
+ seq_printf(seq, "domain%d %s", dcount++, mask_str);
for (itype = CPU_IDLE; itype < CPU_MAX_IDLE_TYPES;
itype++) {
- seq_printf(seq, " %lu %lu %lu %lu %lu %lu %lu "
- "%lu",
- sd->lb_cnt[itype],
+ seq_printf(seq, " %u %u %u %u %u %u %u %u",
+ sd->lb_count[itype],
sd->lb_balanced[itype],
sd->lb_failed[itype],
sd->lb_imbalance[itype],
@@ -53,11 +52,10 @@ static int show_schedstat(struct seq_file *seq, void *v)
sd->lb_nobusyq[itype],
sd->lb_nobusyg[itype]);
}
- seq_printf(seq, " %lu %lu %lu %lu %lu %lu %lu %lu %lu"
- " %lu %lu %lu\n",
- sd->alb_cnt, sd->alb_failed, sd->alb_pushed,
- sd->sbe_cnt, sd->sbe_balanced, sd->sbe_pushed,
- sd->sbf_cnt, sd->sbf_balanced, sd->sbf_pushed,
+ seq_printf(seq, " %u %u %u %u %u %u %u %u %u %u %u %u\n",
+ sd->alb_count, sd->alb_failed, sd->alb_pushed,
+ sd->sbe_count, sd->sbe_balanced, sd->sbe_pushed,
+ sd->sbf_count, sd->sbf_balanced, sd->sbf_pushed,
sd->ttwu_wake_remote, sd->ttwu_move_affine,
sd->ttwu_move_balance);
}
@@ -101,7 +99,7 @@ rq_sched_info_arrive(struct rq *rq, unsigned long long delta)
{
if (rq) {
rq->rq_sched_info.run_delay += delta;
- rq->rq_sched_info.pcnt++;
+ rq->rq_sched_info.pcount++;
}
}
@@ -129,7 +127,7 @@ rq_sched_info_depart(struct rq *rq, unsigned long long delta)
# define schedstat_set(var, val) do { } while (0)
#endif
-#if defined(CONFIG_SCHEDSTATS) || defined(CONFIG_TASK_DELAY_ACCT)
+#ifdef CONFIG_SCHEDSTATS
/*
* Called when a process is dequeued from the active array and given
* the cpu. We should note that with the exception of interactive
@@ -164,7 +162,7 @@ static void sched_info_arrive(struct task_struct *t)
sched_info_dequeued(t);
t->sched_info.run_delay += delta;
t->sched_info.last_arrival = now;
- t->sched_info.pcnt++;
+ t->sched_info.pcount++;
rq_sched_info_arrive(task_rq(t), delta);
}
@@ -233,5 +231,5 @@ sched_info_switch(struct task_struct *prev, struct task_struct *next)
#else
#define sched_info_queued(t) do { } while (0)
#define sched_info_switch(t, next) do { } while (0)
-#endif /* CONFIG_SCHEDSTATS || CONFIG_TASK_DELAY_ACCT */
+#endif /* CONFIG_SCHEDSTATS */
diff --git a/kernel/signal.c b/kernel/signal.c
index 792952381092..12006308c7eb 100644
--- a/kernel/signal.c
+++ b/kernel/signal.c
@@ -99,7 +99,6 @@ static inline int has_pending_signals(sigset_t *signal, sigset_t *blocked)
static int recalc_sigpending_tsk(struct task_struct *t)
{
if (t->signal->group_stop_count > 0 ||
- (freezing(t)) ||
PENDING(&t->pending, &t->blocked) ||
PENDING(&t->signal->shared_pending, &t->blocked)) {
set_tsk_thread_flag(t, TIF_SIGPENDING);
@@ -257,7 +256,7 @@ flush_signal_handlers(struct task_struct *t, int force_default)
int unhandled_signal(struct task_struct *tsk, int sig)
{
- if (is_init(tsk))
+ if (is_global_init(tsk))
return 1;
if (tsk->ptrace & PT_PTRACED)
return 0;
@@ -537,7 +536,7 @@ static int check_kill_permission(int sig, struct siginfo *info,
return error;
error = -EPERM;
if (((sig != SIGCONT) ||
- (process_session(current) != process_session(t)))
+ (task_session_nr(current) != task_session_nr(t)))
&& (current->euid ^ t->suid) && (current->euid ^ t->uid)
&& (current->uid ^ t->suid) && (current->uid ^ t->uid)
&& !capable(CAP_KILL))
@@ -695,7 +694,7 @@ static int send_signal(int sig, struct siginfo *info, struct task_struct *t,
q->info.si_signo = sig;
q->info.si_errno = 0;
q->info.si_code = SI_USER;
- q->info.si_pid = current->pid;
+ q->info.si_pid = task_pid_vnr(current);
q->info.si_uid = current->uid;
break;
case (unsigned long) SEND_SIG_PRIV:
@@ -731,7 +730,7 @@ int print_fatal_signals;
static void print_fatal_signal(struct pt_regs *regs, int signr)
{
printk("%s/%d: potentially unexpected fatal signal %d.\n",
- current->comm, current->pid, signr);
+ current->comm, task_pid_nr(current), signr);
#ifdef __i386__
printk("code at %08lx: ", regs->eip);
@@ -909,8 +908,7 @@ __group_complete_signal(int sig, struct task_struct *p)
do {
sigaddset(&t->pending.signal, SIGKILL);
signal_wake_up(t, 1);
- t = next_thread(t);
- } while (t != p);
+ } while_each_thread(p, t);
return;
}
@@ -928,13 +926,11 @@ __group_complete_signal(int sig, struct task_struct *p)
rm_from_queue(SIG_KERNEL_STOP_MASK, &p->signal->shared_pending);
p->signal->group_stop_count = 0;
p->signal->group_exit_task = t;
- t = p;
+ p = t;
do {
p->signal->group_stop_count++;
- signal_wake_up(t, 0);
- t = next_thread(t);
- } while (t != p);
- wake_up_process(p->signal->group_exit_task);
+ signal_wake_up(t, t == p);
+ } while_each_thread(p, t);
return;
}
@@ -985,9 +981,6 @@ void zap_other_threads(struct task_struct *p)
p->signal->flags = SIGNAL_GROUP_EXIT;
p->signal->group_stop_count = 0;
- if (thread_group_empty(p))
- return;
-
for (t = next_thread(p); t != p; t = next_thread(t)) {
/*
* Don't bother with already dead threads
@@ -1096,7 +1089,7 @@ kill_proc_info(int sig, struct siginfo *info, pid_t pid)
{
int error;
rcu_read_lock();
- error = kill_pid_info(sig, info, find_pid(pid));
+ error = kill_pid_info(sig, info, find_vpid(pid));
rcu_read_unlock();
return error;
}
@@ -1157,7 +1150,7 @@ static int kill_something_info(int sig, struct siginfo *info, int pid)
read_lock(&tasklist_lock);
for_each_process(p) {
- if (p->pid > 1 && p->tgid != current->tgid) {
+ if (p->pid > 1 && !same_thread_group(p, current)) {
int err = group_send_sig_info(sig, info, p);
++count;
if (err != -EPERM)
@@ -1167,9 +1160,9 @@ static int kill_something_info(int sig, struct siginfo *info, int pid)
read_unlock(&tasklist_lock);
ret = count ? retval : -ESRCH;
} else if (pid < 0) {
- ret = kill_pgrp_info(sig, info, find_pid(-pid));
+ ret = kill_pgrp_info(sig, info, find_vpid(-pid));
} else {
- ret = kill_pid_info(sig, info, find_pid(pid));
+ ret = kill_pid_info(sig, info, find_vpid(pid));
}
rcu_read_unlock();
return ret;
@@ -1273,7 +1266,12 @@ EXPORT_SYMBOL(kill_pid);
int
kill_proc(pid_t pid, int sig, int priv)
{
- return kill_proc_info(sig, __si_special(priv), pid);
+ int ret;
+
+ rcu_read_lock();
+ ret = kill_pid_info(sig, __si_special(priv), find_pid(pid));
+ rcu_read_unlock();
+ return ret;
}
/*
@@ -1450,7 +1448,22 @@ void do_notify_parent(struct task_struct *tsk, int sig)
info.si_signo = sig;
info.si_errno = 0;
- info.si_pid = tsk->pid;
+ /*
+ * we are under tasklist_lock here so our parent is tied to
+ * us and cannot exit and release its namespace.
+ *
+ * the only it can is to switch its nsproxy with sys_unshare,
+ * bu uncharing pid namespaces is not allowed, so we'll always
+ * see relevant namespace
+ *
+ * write_lock() currently calls preempt_disable() which is the
+ * same as rcu_read_lock(), but according to Oleg, this is not
+ * correct to rely on this
+ */
+ rcu_read_lock();
+ info.si_pid = task_pid_nr_ns(tsk, tsk->parent->nsproxy->pid_ns);
+ rcu_read_unlock();
+
info.si_uid = tsk->uid;
/* FIXME: find out whether or not this is supposed to be c*time. */
@@ -1515,7 +1528,13 @@ static void do_notify_parent_cldstop(struct task_struct *tsk, int why)
info.si_signo = SIGCHLD;
info.si_errno = 0;
- info.si_pid = tsk->pid;
+ /*
+ * see comment in do_notify_parent() abot the following 3 lines
+ */
+ rcu_read_lock();
+ info.si_pid = task_pid_nr_ns(tsk, tsk->parent->nsproxy->pid_ns);
+ rcu_read_unlock();
+
info.si_uid = tsk->uid;
/* FIXME: find out whether or not this is supposed to be c*time. */
@@ -1641,7 +1660,7 @@ void ptrace_notify(int exit_code)
memset(&info, 0, sizeof info);
info.si_signo = SIGTRAP;
info.si_code = exit_code;
- info.si_pid = current->pid;
+ info.si_pid = task_pid_vnr(current);
info.si_uid = current->uid;
/* Let the debugger run. */
@@ -1811,7 +1830,7 @@ relock:
info->si_signo = signr;
info->si_errno = 0;
info->si_code = SI_USER;
- info->si_pid = current->parent->pid;
+ info->si_pid = task_pid_vnr(current->parent);
info->si_uid = current->parent->uid;
}
@@ -1842,11 +1861,9 @@ relock:
continue;
/*
- * Init of a pid space gets no signals it doesn't want from
- * within that pid space. It can of course get signals from
- * its parent pid space.
+ * Global init gets no signals it doesn't want.
*/
- if (current == child_reaper(current))
+ if (is_global_init(current))
continue;
if (sig_kernel_stop(signr)) {
@@ -2200,7 +2217,7 @@ sys_kill(int pid, int sig)
info.si_signo = sig;
info.si_errno = 0;
info.si_code = SI_USER;
- info.si_pid = current->tgid;
+ info.si_pid = task_tgid_vnr(current);
info.si_uid = current->uid;
return kill_something_info(sig, &info, pid);
@@ -2216,12 +2233,12 @@ static int do_tkill(int tgid, int pid, int sig)
info.si_signo = sig;
info.si_errno = 0;
info.si_code = SI_TKILL;
- info.si_pid = current->tgid;
+ info.si_pid = task_tgid_vnr(current);
info.si_uid = current->uid;
read_lock(&tasklist_lock);
- p = find_task_by_pid(pid);
- if (p && (tgid <= 0 || p->tgid == tgid)) {
+ p = find_task_by_vpid(pid);
+ if (p && (tgid <= 0 || task_tgid_vnr(p) == tgid)) {
error = check_kill_permission(sig, &info, p);
/*
* The null signal is a permissions and process existence
@@ -2300,15 +2317,6 @@ int do_sigaction(int sig, struct k_sigaction *act, struct k_sigaction *oact)
k = &current->sighand->action[sig-1];
spin_lock_irq(&current->sighand->siglock);
- if (signal_pending(current)) {
- /*
- * If there might be a fatal signal pending on multiple
- * threads, make sure we take it before changing the action.
- */
- spin_unlock_irq(&current->sighand->siglock);
- return -ERESTARTNOINTR;
- }
-
if (oact)
*oact = *k;
@@ -2335,7 +2343,6 @@ int do_sigaction(int sig, struct k_sigaction *act, struct k_sigaction *oact)
rm_from_queue_full(&mask, &t->signal->shared_pending);
do {
rm_from_queue_full(&mask, &t->pending);
- recalc_sigpending_and_wake(t);
t = next_thread(t);
} while (t != current);
}
diff --git a/kernel/softirq.c b/kernel/softirq.c
index 0f546ddea43d..bd89bc4eb0b9 100644
--- a/kernel/softirq.c
+++ b/kernel/softirq.c
@@ -271,8 +271,6 @@ asmlinkage void do_softirq(void)
local_irq_restore(flags);
}
-EXPORT_SYMBOL(do_softirq);
-
#endif
/*
@@ -332,8 +330,6 @@ inline fastcall void raise_softirq_irqoff(unsigned int nr)
wakeup_softirqd();
}
-EXPORT_SYMBOL(raise_softirq_irqoff);
-
void fastcall raise_softirq(unsigned int nr)
{
unsigned long flags;
diff --git a/kernel/softlockup.c b/kernel/softlockup.c
index 708d4882c0c3..11df812263c8 100644
--- a/kernel/softlockup.c
+++ b/kernel/softlockup.c
@@ -15,13 +15,16 @@
#include <linux/notifier.h>
#include <linux/module.h>
+#include <asm/irq_regs.h>
+
static DEFINE_SPINLOCK(print_lock);
static DEFINE_PER_CPU(unsigned long, touch_timestamp);
static DEFINE_PER_CPU(unsigned long, print_timestamp);
static DEFINE_PER_CPU(struct task_struct *, watchdog_task);
-static int did_panic = 0;
+static int did_panic;
+int softlockup_thresh = 10;
static int
softlock_panic(struct notifier_block *this, unsigned long event, void *ptr)
@@ -40,14 +43,16 @@ static struct notifier_block panic_block = {
* resolution, and we don't need to waste time with a big divide when
* 2^30ns == 1.074s.
*/
-static unsigned long get_timestamp(void)
+static unsigned long get_timestamp(int this_cpu)
{
- return sched_clock() >> 30; /* 2^30 ~= 10^9 */
+ return cpu_clock(this_cpu) >> 30; /* 2^30 ~= 10^9 */
}
void touch_softlockup_watchdog(void)
{
- __raw_get_cpu_var(touch_timestamp) = get_timestamp();
+ int this_cpu = raw_smp_processor_id();
+
+ __raw_get_cpu_var(touch_timestamp) = get_timestamp(this_cpu);
}
EXPORT_SYMBOL(touch_softlockup_watchdog);
@@ -70,6 +75,7 @@ void softlockup_tick(void)
int this_cpu = smp_processor_id();
unsigned long touch_timestamp = per_cpu(touch_timestamp, this_cpu);
unsigned long print_timestamp;
+ struct pt_regs *regs = get_irq_regs();
unsigned long now;
if (touch_timestamp == 0) {
@@ -80,10 +86,11 @@ void softlockup_tick(void)
print_timestamp = per_cpu(print_timestamp, this_cpu);
/* report at most once a second */
- if (print_timestamp < (touch_timestamp + 1) ||
- did_panic ||
- !per_cpu(watchdog_task, this_cpu))
+ if ((print_timestamp >= touch_timestamp &&
+ print_timestamp < (touch_timestamp + 1)) ||
+ did_panic || !per_cpu(watchdog_task, this_cpu)) {
return;
+ }
/* do not print during early bootup: */
if (unlikely(system_state != SYSTEM_RUNNING)) {
@@ -91,28 +98,33 @@ void softlockup_tick(void)
return;
}
- now = get_timestamp();
+ now = get_timestamp(this_cpu);
/* Wake up the high-prio watchdog task every second: */
if (now > (touch_timestamp + 1))
wake_up_process(per_cpu(watchdog_task, this_cpu));
/* Warn about unreasonable 10+ seconds delays: */
- if (now > (touch_timestamp + 10)) {
- per_cpu(print_timestamp, this_cpu) = touch_timestamp;
+ if (now <= (touch_timestamp + softlockup_thresh))
+ return;
- spin_lock(&print_lock);
- printk(KERN_ERR "BUG: soft lockup detected on CPU#%d!\n",
- this_cpu);
+ per_cpu(print_timestamp, this_cpu) = touch_timestamp;
+
+ spin_lock(&print_lock);
+ printk(KERN_ERR "BUG: soft lockup - CPU#%d stuck for %lus! [%s:%d]\n",
+ this_cpu, now - touch_timestamp,
+ current->comm, task_pid_nr(current));
+ if (regs)
+ show_regs(regs);
+ else
dump_stack();
- spin_unlock(&print_lock);
- }
+ spin_unlock(&print_lock);
}
/*
* The watchdog thread - runs every second and touches the timestamp.
*/
-static int watchdog(void * __bind_cpu)
+static int watchdog(void *__bind_cpu)
{
struct sched_param param = { .sched_priority = MAX_RT_PRIO-1 };
@@ -150,13 +162,13 @@ cpu_callback(struct notifier_block *nfb, unsigned long action, void *hcpu)
BUG_ON(per_cpu(watchdog_task, hotcpu));
p = kthread_create(watchdog, hcpu, "watchdog/%d", hotcpu);
if (IS_ERR(p)) {
- printk("watchdog for %i failed\n", hotcpu);
+ printk(KERN_ERR "watchdog for %i failed\n", hotcpu);
return NOTIFY_BAD;
}
- per_cpu(touch_timestamp, hotcpu) = 0;
- per_cpu(watchdog_task, hotcpu) = p;
+ per_cpu(touch_timestamp, hotcpu) = 0;
+ per_cpu(watchdog_task, hotcpu) = p;
kthread_bind(p, hotcpu);
- break;
+ break;
case CPU_ONLINE:
case CPU_ONLINE_FROZEN:
wake_up_process(per_cpu(watchdog_task, hotcpu));
@@ -176,7 +188,7 @@ cpu_callback(struct notifier_block *nfb, unsigned long action, void *hcpu)
kthread_stop(p);
break;
#endif /* CONFIG_HOTPLUG_CPU */
- }
+ }
return NOTIFY_OK;
}
diff --git a/kernel/sys.c b/kernel/sys.c
index 8ae2e636eb1b..304b5410d746 100644
--- a/kernel/sys.c
+++ b/kernel/sys.c
@@ -105,538 +105,6 @@ EXPORT_SYMBOL(cad_pid);
*/
void (*pm_power_off_prepare)(void);
-EXPORT_SYMBOL(pm_power_off_prepare);
-
-/*
- * Notifier list for kernel code which wants to be called
- * at shutdown. This is used to stop any idling DMA operations
- * and the like.
- */
-
-static BLOCKING_NOTIFIER_HEAD(reboot_notifier_list);
-
-/*
- * Notifier chain core routines. The exported routines below
- * are layered on top of these, with appropriate locking added.
- */
-
-static int notifier_chain_register(struct notifier_block **nl,
- struct notifier_block *n)
-{
- while ((*nl) != NULL) {
- if (n->priority > (*nl)->priority)
- break;
- nl = &((*nl)->next);
- }
- n->next = *nl;
- rcu_assign_pointer(*nl, n);
- return 0;
-}
-
-static int notifier_chain_unregister(struct notifier_block **nl,
- struct notifier_block *n)
-{
- while ((*nl) != NULL) {
- if ((*nl) == n) {
- rcu_assign_pointer(*nl, n->next);
- return 0;
- }
- nl = &((*nl)->next);
- }
- return -ENOENT;
-}
-
-/**
- * notifier_call_chain - Informs the registered notifiers about an event.
- * @nl: Pointer to head of the blocking notifier chain
- * @val: Value passed unmodified to notifier function
- * @v: Pointer passed unmodified to notifier function
- * @nr_to_call: Number of notifier functions to be called. Don't care
- * value of this parameter is -1.
- * @nr_calls: Records the number of notifications sent. Don't care
- * value of this field is NULL.
- * @returns: notifier_call_chain returns the value returned by the
- * last notifier function called.
- */
-
-static int __kprobes notifier_call_chain(struct notifier_block **nl,
- unsigned long val, void *v,
- int nr_to_call, int *nr_calls)
-{
- int ret = NOTIFY_DONE;
- struct notifier_block *nb, *next_nb;
-
- nb = rcu_dereference(*nl);
-
- while (nb && nr_to_call) {
- next_nb = rcu_dereference(nb->next);
- ret = nb->notifier_call(nb, val, v);
-
- if (nr_calls)
- (*nr_calls)++;
-
- if ((ret & NOTIFY_STOP_MASK) == NOTIFY_STOP_MASK)
- break;
- nb = next_nb;
- nr_to_call--;
- }
- return ret;
-}
-
-/*
- * Atomic notifier chain routines. Registration and unregistration
- * use a spinlock, and call_chain is synchronized by RCU (no locks).
- */
-
-/**
- * atomic_notifier_chain_register - Add notifier to an atomic notifier chain
- * @nh: Pointer to head of the atomic notifier chain
- * @n: New entry in notifier chain
- *
- * Adds a notifier to an atomic notifier chain.
- *
- * Currently always returns zero.
- */
-
-int atomic_notifier_chain_register(struct atomic_notifier_head *nh,
- struct notifier_block *n)
-{
- unsigned long flags;
- int ret;
-
- spin_lock_irqsave(&nh->lock, flags);
- ret = notifier_chain_register(&nh->head, n);
- spin_unlock_irqrestore(&nh->lock, flags);
- return ret;
-}
-
-EXPORT_SYMBOL_GPL(atomic_notifier_chain_register);
-
-/**
- * atomic_notifier_chain_unregister - Remove notifier from an atomic notifier chain
- * @nh: Pointer to head of the atomic notifier chain
- * @n: Entry to remove from notifier chain
- *
- * Removes a notifier from an atomic notifier chain.
- *
- * Returns zero on success or %-ENOENT on failure.
- */
-int atomic_notifier_chain_unregister(struct atomic_notifier_head *nh,
- struct notifier_block *n)
-{
- unsigned long flags;
- int ret;
-
- spin_lock_irqsave(&nh->lock, flags);
- ret = notifier_chain_unregister(&nh->head, n);
- spin_unlock_irqrestore(&nh->lock, flags);
- synchronize_rcu();
- return ret;
-}
-
-EXPORT_SYMBOL_GPL(atomic_notifier_chain_unregister);
-
-/**
- * __atomic_notifier_call_chain - Call functions in an atomic notifier chain
- * @nh: Pointer to head of the atomic notifier chain
- * @val: Value passed unmodified to notifier function
- * @v: Pointer passed unmodified to notifier function
- * @nr_to_call: See the comment for notifier_call_chain.
- * @nr_calls: See the comment for notifier_call_chain.
- *
- * Calls each function in a notifier chain in turn. The functions
- * run in an atomic context, so they must not block.
- * This routine uses RCU to synchronize with changes to the chain.
- *
- * If the return value of the notifier can be and'ed
- * with %NOTIFY_STOP_MASK then atomic_notifier_call_chain()
- * will return immediately, with the return value of
- * the notifier function which halted execution.
- * Otherwise the return value is the return value
- * of the last notifier function called.
- */
-
-int __kprobes __atomic_notifier_call_chain(struct atomic_notifier_head *nh,
- unsigned long val, void *v,
- int nr_to_call, int *nr_calls)
-{
- int ret;
-
- rcu_read_lock();
- ret = notifier_call_chain(&nh->head, val, v, nr_to_call, nr_calls);
- rcu_read_unlock();
- return ret;
-}
-
-EXPORT_SYMBOL_GPL(__atomic_notifier_call_chain);
-
-int __kprobes atomic_notifier_call_chain(struct atomic_notifier_head *nh,
- unsigned long val, void *v)
-{
- return __atomic_notifier_call_chain(nh, val, v, -1, NULL);
-}
-
-EXPORT_SYMBOL_GPL(atomic_notifier_call_chain);
-/*
- * Blocking notifier chain routines. All access to the chain is
- * synchronized by an rwsem.
- */
-
-/**
- * blocking_notifier_chain_register - Add notifier to a blocking notifier chain
- * @nh: Pointer to head of the blocking notifier chain
- * @n: New entry in notifier chain
- *
- * Adds a notifier to a blocking notifier chain.
- * Must be called in process context.
- *
- * Currently always returns zero.
- */
-
-int blocking_notifier_chain_register(struct blocking_notifier_head *nh,
- struct notifier_block *n)
-{
- int ret;
-
- /*
- * This code gets used during boot-up, when task switching is
- * not yet working and interrupts must remain disabled. At
- * such times we must not call down_write().
- */
- if (unlikely(system_state == SYSTEM_BOOTING))
- return notifier_chain_register(&nh->head, n);
-
- down_write(&nh->rwsem);
- ret = notifier_chain_register(&nh->head, n);
- up_write(&nh->rwsem);
- return ret;
-}
-
-EXPORT_SYMBOL_GPL(blocking_notifier_chain_register);
-
-/**
- * blocking_notifier_chain_unregister - Remove notifier from a blocking notifier chain
- * @nh: Pointer to head of the blocking notifier chain
- * @n: Entry to remove from notifier chain
- *
- * Removes a notifier from a blocking notifier chain.
- * Must be called from process context.
- *
- * Returns zero on success or %-ENOENT on failure.
- */
-int blocking_notifier_chain_unregister(struct blocking_notifier_head *nh,
- struct notifier_block *n)
-{
- int ret;
-
- /*
- * This code gets used during boot-up, when task switching is
- * not yet working and interrupts must remain disabled. At
- * such times we must not call down_write().
- */
- if (unlikely(system_state == SYSTEM_BOOTING))
- return notifier_chain_unregister(&nh->head, n);
-
- down_write(&nh->rwsem);
- ret = notifier_chain_unregister(&nh->head, n);
- up_write(&nh->rwsem);
- return ret;
-}
-
-EXPORT_SYMBOL_GPL(blocking_notifier_chain_unregister);
-
-/**
- * __blocking_notifier_call_chain - Call functions in a blocking notifier chain
- * @nh: Pointer to head of the blocking notifier chain
- * @val: Value passed unmodified to notifier function
- * @v: Pointer passed unmodified to notifier function
- * @nr_to_call: See comment for notifier_call_chain.
- * @nr_calls: See comment for notifier_call_chain.
- *
- * Calls each function in a notifier chain in turn. The functions
- * run in a process context, so they are allowed to block.
- *
- * If the return value of the notifier can be and'ed
- * with %NOTIFY_STOP_MASK then blocking_notifier_call_chain()
- * will return immediately, with the return value of
- * the notifier function which halted execution.
- * Otherwise the return value is the return value
- * of the last notifier function called.
- */
-
-int __blocking_notifier_call_chain(struct blocking_notifier_head *nh,
- unsigned long val, void *v,
- int nr_to_call, int *nr_calls)
-{
- int ret = NOTIFY_DONE;
-
- /*
- * We check the head outside the lock, but if this access is
- * racy then it does not matter what the result of the test
- * is, we re-check the list after having taken the lock anyway:
- */
- if (rcu_dereference(nh->head)) {
- down_read(&nh->rwsem);
- ret = notifier_call_chain(&nh->head, val, v, nr_to_call,
- nr_calls);
- up_read(&nh->rwsem);
- }
- return ret;
-}
-EXPORT_SYMBOL_GPL(__blocking_notifier_call_chain);
-
-int blocking_notifier_call_chain(struct blocking_notifier_head *nh,
- unsigned long val, void *v)
-{
- return __blocking_notifier_call_chain(nh, val, v, -1, NULL);
-}
-EXPORT_SYMBOL_GPL(blocking_notifier_call_chain);
-
-/*
- * Raw notifier chain routines. There is no protection;
- * the caller must provide it. Use at your own risk!
- */
-
-/**
- * raw_notifier_chain_register - Add notifier to a raw notifier chain
- * @nh: Pointer to head of the raw notifier chain
- * @n: New entry in notifier chain
- *
- * Adds a notifier to a raw notifier chain.
- * All locking must be provided by the caller.
- *
- * Currently always returns zero.
- */
-
-int raw_notifier_chain_register(struct raw_notifier_head *nh,
- struct notifier_block *n)
-{
- return notifier_chain_register(&nh->head, n);
-}
-
-EXPORT_SYMBOL_GPL(raw_notifier_chain_register);
-
-/**
- * raw_notifier_chain_unregister - Remove notifier from a raw notifier chain
- * @nh: Pointer to head of the raw notifier chain
- * @n: Entry to remove from notifier chain
- *
- * Removes a notifier from a raw notifier chain.
- * All locking must be provided by the caller.
- *
- * Returns zero on success or %-ENOENT on failure.
- */
-int raw_notifier_chain_unregister(struct raw_notifier_head *nh,
- struct notifier_block *n)
-{
- return notifier_chain_unregister(&nh->head, n);
-}
-
-EXPORT_SYMBOL_GPL(raw_notifier_chain_unregister);
-
-/**
- * __raw_notifier_call_chain - Call functions in a raw notifier chain
- * @nh: Pointer to head of the raw notifier chain
- * @val: Value passed unmodified to notifier function
- * @v: Pointer passed unmodified to notifier function
- * @nr_to_call: See comment for notifier_call_chain.
- * @nr_calls: See comment for notifier_call_chain
- *
- * Calls each function in a notifier chain in turn. The functions
- * run in an undefined context.
- * All locking must be provided by the caller.
- *
- * If the return value of the notifier can be and'ed
- * with %NOTIFY_STOP_MASK then raw_notifier_call_chain()
- * will return immediately, with the return value of
- * the notifier function which halted execution.
- * Otherwise the return value is the return value
- * of the last notifier function called.
- */
-
-int __raw_notifier_call_chain(struct raw_notifier_head *nh,
- unsigned long val, void *v,
- int nr_to_call, int *nr_calls)
-{
- return notifier_call_chain(&nh->head, val, v, nr_to_call, nr_calls);
-}
-
-EXPORT_SYMBOL_GPL(__raw_notifier_call_chain);
-
-int raw_notifier_call_chain(struct raw_notifier_head *nh,
- unsigned long val, void *v)
-{
- return __raw_notifier_call_chain(nh, val, v, -1, NULL);
-}
-
-EXPORT_SYMBOL_GPL(raw_notifier_call_chain);
-
-/*
- * SRCU notifier chain routines. Registration and unregistration
- * use a mutex, and call_chain is synchronized by SRCU (no locks).
- */
-
-/**
- * srcu_notifier_chain_register - Add notifier to an SRCU notifier chain
- * @nh: Pointer to head of the SRCU notifier chain
- * @n: New entry in notifier chain
- *
- * Adds a notifier to an SRCU notifier chain.
- * Must be called in process context.
- *
- * Currently always returns zero.
- */
-
-int srcu_notifier_chain_register(struct srcu_notifier_head *nh,
- struct notifier_block *n)
-{
- int ret;
-
- /*
- * This code gets used during boot-up, when task switching is
- * not yet working and interrupts must remain disabled. At
- * such times we must not call mutex_lock().
- */
- if (unlikely(system_state == SYSTEM_BOOTING))
- return notifier_chain_register(&nh->head, n);
-
- mutex_lock(&nh->mutex);
- ret = notifier_chain_register(&nh->head, n);
- mutex_unlock(&nh->mutex);
- return ret;
-}
-
-EXPORT_SYMBOL_GPL(srcu_notifier_chain_register);
-
-/**
- * srcu_notifier_chain_unregister - Remove notifier from an SRCU notifier chain
- * @nh: Pointer to head of the SRCU notifier chain
- * @n: Entry to remove from notifier chain
- *
- * Removes a notifier from an SRCU notifier chain.
- * Must be called from process context.
- *
- * Returns zero on success or %-ENOENT on failure.
- */
-int srcu_notifier_chain_unregister(struct srcu_notifier_head *nh,
- struct notifier_block *n)
-{
- int ret;
-
- /*
- * This code gets used during boot-up, when task switching is
- * not yet working and interrupts must remain disabled. At
- * such times we must not call mutex_lock().
- */
- if (unlikely(system_state == SYSTEM_BOOTING))
- return notifier_chain_unregister(&nh->head, n);
-
- mutex_lock(&nh->mutex);
- ret = notifier_chain_unregister(&nh->head, n);
- mutex_unlock(&nh->mutex);
- synchronize_srcu(&nh->srcu);
- return ret;
-}
-
-EXPORT_SYMBOL_GPL(srcu_notifier_chain_unregister);
-
-/**
- * __srcu_notifier_call_chain - Call functions in an SRCU notifier chain
- * @nh: Pointer to head of the SRCU notifier chain
- * @val: Value passed unmodified to notifier function
- * @v: Pointer passed unmodified to notifier function
- * @nr_to_call: See comment for notifier_call_chain.
- * @nr_calls: See comment for notifier_call_chain
- *
- * Calls each function in a notifier chain in turn. The functions
- * run in a process context, so they are allowed to block.
- *
- * If the return value of the notifier can be and'ed
- * with %NOTIFY_STOP_MASK then srcu_notifier_call_chain()
- * will return immediately, with the return value of
- * the notifier function which halted execution.
- * Otherwise the return value is the return value
- * of the last notifier function called.
- */
-
-int __srcu_notifier_call_chain(struct srcu_notifier_head *nh,
- unsigned long val, void *v,
- int nr_to_call, int *nr_calls)
-{
- int ret;
- int idx;
-
- idx = srcu_read_lock(&nh->srcu);
- ret = notifier_call_chain(&nh->head, val, v, nr_to_call, nr_calls);
- srcu_read_unlock(&nh->srcu, idx);
- return ret;
-}
-EXPORT_SYMBOL_GPL(__srcu_notifier_call_chain);
-
-int srcu_notifier_call_chain(struct srcu_notifier_head *nh,
- unsigned long val, void *v)
-{
- return __srcu_notifier_call_chain(nh, val, v, -1, NULL);
-}
-EXPORT_SYMBOL_GPL(srcu_notifier_call_chain);
-
-/**
- * srcu_init_notifier_head - Initialize an SRCU notifier head
- * @nh: Pointer to head of the srcu notifier chain
- *
- * Unlike other sorts of notifier heads, SRCU notifier heads require
- * dynamic initialization. Be sure to call this routine before
- * calling any of the other SRCU notifier routines for this head.
- *
- * If an SRCU notifier head is deallocated, it must first be cleaned
- * up by calling srcu_cleanup_notifier_head(). Otherwise the head's
- * per-cpu data (used by the SRCU mechanism) will leak.
- */
-
-void srcu_init_notifier_head(struct srcu_notifier_head *nh)
-{
- mutex_init(&nh->mutex);
- if (init_srcu_struct(&nh->srcu) < 0)
- BUG();
- nh->head = NULL;
-}
-
-EXPORT_SYMBOL_GPL(srcu_init_notifier_head);
-
-/**
- * register_reboot_notifier - Register function to be called at reboot time
- * @nb: Info about notifier function to be called
- *
- * Registers a function with the list of functions
- * to be called at reboot time.
- *
- * Currently always returns zero, as blocking_notifier_chain_register()
- * always returns zero.
- */
-
-int register_reboot_notifier(struct notifier_block * nb)
-{
- return blocking_notifier_chain_register(&reboot_notifier_list, nb);
-}
-
-EXPORT_SYMBOL(register_reboot_notifier);
-
-/**
- * unregister_reboot_notifier - Unregister previously registered reboot notifier
- * @nb: Hook to be unregistered
- *
- * Unregisters a previously registered reboot
- * notifier function.
- *
- * Returns zero on success, or %-ENOENT on failure.
- */
-
-int unregister_reboot_notifier(struct notifier_block * nb)
-{
- return blocking_notifier_chain_unregister(&reboot_notifier_list, nb);
-}
-
-EXPORT_SYMBOL(unregister_reboot_notifier);
static int set_one_prio(struct task_struct *p, int niceval, int error)
{
@@ -684,7 +152,7 @@ asmlinkage long sys_setpriority(int which, int who, int niceval)
switch (which) {
case PRIO_PROCESS:
if (who)
- p = find_task_by_pid(who);
+ p = find_task_by_vpid(who);
else
p = current;
if (p)
@@ -692,7 +160,7 @@ asmlinkage long sys_setpriority(int which, int who, int niceval)
break;
case PRIO_PGRP:
if (who)
- pgrp = find_pid(who);
+ pgrp = find_vpid(who);
else
pgrp = task_pgrp(current);
do_each_pid_task(pgrp, PIDTYPE_PGID, p) {
@@ -741,7 +209,7 @@ asmlinkage long sys_getpriority(int which, int who)
switch (which) {
case PRIO_PROCESS:
if (who)
- p = find_task_by_pid(who);
+ p = find_task_by_vpid(who);
else
p = current;
if (p) {
@@ -752,7 +220,7 @@ asmlinkage long sys_getpriority(int which, int who)
break;
case PRIO_PGRP:
if (who)
- pgrp = find_pid(who);
+ pgrp = find_vpid(who);
else
pgrp = task_pgrp(current);
do_each_pid_task(pgrp, PIDTYPE_PGID, p) {
@@ -1449,9 +917,10 @@ asmlinkage long sys_setpgid(pid_t pid, pid_t pgid)
struct task_struct *p;
struct task_struct *group_leader = current->group_leader;
int err = -EINVAL;
+ struct pid_namespace *ns;
if (!pid)
- pid = group_leader->pid;
+ pid = task_pid_vnr(group_leader);
if (!pgid)
pgid = pid;
if (pgid < 0)
@@ -1460,10 +929,12 @@ asmlinkage long sys_setpgid(pid_t pid, pid_t pgid)
/* From this point forward we keep holding onto the tasklist lock
* so that our parent does not change from under us. -DaveM
*/
+ ns = current->nsproxy->pid_ns;
+
write_lock_irq(&tasklist_lock);
err = -ESRCH;
- p = find_task_by_pid(pid);
+ p = find_task_by_pid_ns(pid, ns);
if (!p)
goto out;
@@ -1489,9 +960,9 @@ asmlinkage long sys_setpgid(pid_t pid, pid_t pgid)
goto out;
if (pgid != pid) {
- struct task_struct *g =
- find_task_by_pid_type(PIDTYPE_PGID, pgid);
+ struct task_struct *g;
+ g = find_task_by_pid_type_ns(PIDTYPE_PGID, pgid, ns);
if (!g || task_session(g) != task_session(group_leader))
goto out;
}
@@ -1500,10 +971,13 @@ asmlinkage long sys_setpgid(pid_t pid, pid_t pgid)
if (err)
goto out;
- if (process_group(p) != pgid) {
+ if (task_pgrp_nr_ns(p, ns) != pgid) {
+ struct pid *pid;
+
detach_pid(p, PIDTYPE_PGID);
- p->signal->pgrp = pgid;
- attach_pid(p, PIDTYPE_PGID, find_pid(pgid));
+ pid = find_vpid(pgid);
+ attach_pid(p, PIDTYPE_PGID, pid);
+ set_task_pgrp(p, pid_nr(pid));
}
err = 0;
@@ -1516,19 +990,21 @@ out:
asmlinkage long sys_getpgid(pid_t pid)
{
if (!pid)
- return process_group(current);
+ return task_pgrp_vnr(current);
else {
int retval;
struct task_struct *p;
+ struct pid_namespace *ns;
- read_lock(&tasklist_lock);
- p = find_task_by_pid(pid);
+ ns = current->nsproxy->pid_ns;
+ read_lock(&tasklist_lock);
+ p = find_task_by_pid_ns(pid, ns);
retval = -ESRCH;
if (p) {
retval = security_task_getpgid(p);
if (!retval)
- retval = process_group(p);
+ retval = task_pgrp_nr_ns(p, ns);
}
read_unlock(&tasklist_lock);
return retval;
@@ -1540,7 +1016,7 @@ asmlinkage long sys_getpgid(pid_t pid)
asmlinkage long sys_getpgrp(void)
{
/* SMP - assuming writes are word atomic this is fine */
- return process_group(current);
+ return task_pgrp_vnr(current);
}
#endif
@@ -1548,19 +1024,21 @@ asmlinkage long sys_getpgrp(void)
asmlinkage long sys_getsid(pid_t pid)
{
if (!pid)
- return process_session(current);
+ return task_session_vnr(current);
else {
int retval;
struct task_struct *p;
+ struct pid_namespace *ns;
- read_lock(&tasklist_lock);
- p = find_task_by_pid(pid);
+ ns = current->nsproxy->pid_ns;
+ read_lock(&tasklist_lock);
+ p = find_task_by_pid_ns(pid, ns);
retval = -ESRCH;
if (p) {
retval = security_task_getsid(p);
if (!retval)
- retval = process_session(p);
+ retval = task_session_nr_ns(p, ns);
}
read_unlock(&tasklist_lock);
return retval;
@@ -1587,7 +1065,8 @@ asmlinkage long sys_setsid(void)
* session id and so the check will always fail and make it so
* init cannot successfully call setsid.
*/
- if (session > 1 && find_task_by_pid_type(PIDTYPE_PGID, session))
+ if (session > 1 && find_task_by_pid_type_ns(PIDTYPE_PGID,
+ session, &init_pid_ns))
goto out;
group_leader->signal->leader = 1;
@@ -1597,7 +1076,7 @@ asmlinkage long sys_setsid(void)
group_leader->signal->tty = NULL;
spin_unlock(&group_leader->sighand->siglock);
- err = process_group(group_leader);
+ err = task_pgrp_vnr(group_leader);
out:
write_unlock_irq(&tasklist_lock);
return err;
diff --git a/kernel/sys_ni.c b/kernel/sys_ni.c
index b0ec498a18d9..52c7a151e298 100644
--- a/kernel/sys_ni.c
+++ b/kernel/sys_ni.c
@@ -4,6 +4,10 @@
#include <asm/unistd.h>
+/* we can't #include <linux/syscalls.h> here,
+ but tell gcc to not warn with -Wmissing-prototypes */
+asmlinkage long sys_ni_syscall(void);
+
/*
* Non-implemented system calls get redirected here.
*/
diff --git a/kernel/sysctl.c b/kernel/sysctl.c
index 53a456ebf6d5..3b4efbe26445 100644
--- a/kernel/sysctl.c
+++ b/kernel/sysctl.c
@@ -24,7 +24,7 @@
#include <linux/slab.h>
#include <linux/sysctl.h>
#include <linux/proc_fs.h>
-#include <linux/capability.h>
+#include <linux/security.h>
#include <linux/ctype.h>
#include <linux/utsname.h>
#include <linux/smp_lock.h>
@@ -55,6 +55,8 @@
#include <asm/stacktrace.h>
#endif
+static int deprecated_sysctl_warning(struct __sysctl_args *args);
+
#if defined(CONFIG_SYSCTL)
/* External variables not in a header file. */
@@ -63,6 +65,7 @@ extern int print_fatal_signals;
extern int sysctl_overcommit_memory;
extern int sysctl_overcommit_ratio;
extern int sysctl_panic_on_oom;
+extern int sysctl_oom_kill_allocating_task;
extern int max_threads;
extern int core_uses_pid;
extern int suid_dumpable;
@@ -79,6 +82,19 @@ extern int maps_protect;
extern int sysctl_stat_interval;
extern int audit_argv_kb;
+/* Constants used for minimum and maximum */
+#ifdef CONFIG_DETECT_SOFTLOCKUP
+static int one = 1;
+static int sixty = 60;
+#endif
+
+#ifdef CONFIG_MMU
+static int two = 2;
+#endif
+
+static int zero;
+static int one_hundred = 100;
+
/* this is needed for the proc_dointvec_minmax for [fs_]overflow UID and GID */
static int maxolduid = 65535;
static int minolduid;
@@ -128,32 +144,29 @@ extern int max_lock_depth;
#ifdef CONFIG_SYSCTL_SYSCALL
static int parse_table(int __user *, int, void __user *, size_t __user *,
- void __user *, size_t, ctl_table *);
+ void __user *, size_t, struct ctl_table *);
#endif
#ifdef CONFIG_PROC_SYSCTL
-static int proc_do_cad_pid(ctl_table *table, int write, struct file *filp,
+static int proc_do_cad_pid(struct ctl_table *table, int write, struct file *filp,
void __user *buffer, size_t *lenp, loff_t *ppos);
-static int proc_dointvec_taint(ctl_table *table, int write, struct file *filp,
+static int proc_dointvec_taint(struct ctl_table *table, int write, struct file *filp,
void __user *buffer, size_t *lenp, loff_t *ppos);
#endif
-static ctl_table root_table[];
+static struct ctl_table root_table[];
static struct ctl_table_header root_table_header =
{ root_table, LIST_HEAD_INIT(root_table_header.ctl_entry) };
-static ctl_table kern_table[];
-static ctl_table vm_table[];
-static ctl_table fs_table[];
-static ctl_table debug_table[];
-static ctl_table dev_table[];
-extern ctl_table random_table[];
-#ifdef CONFIG_UNIX98_PTYS
-extern ctl_table pty_table[];
-#endif
+static struct ctl_table kern_table[];
+static struct ctl_table vm_table[];
+static struct ctl_table fs_table[];
+static struct ctl_table debug_table[];
+static struct ctl_table dev_table[];
+extern struct ctl_table random_table[];
#ifdef CONFIG_INOTIFY_USER
-extern ctl_table inotify_table[];
+extern struct ctl_table inotify_table[];
#endif
#ifdef HAVE_ARCH_PICK_MMAP_LAYOUT
@@ -165,7 +178,7 @@ extern int lock_stat;
/* The default sysctl tables: */
-static ctl_table root_table[] = {
+static struct ctl_table root_table[] = {
{
.ctl_name = CTL_KERN,
.procname = "kernel",
@@ -218,18 +231,15 @@ static unsigned long min_wakeup_granularity_ns; /* 0 usecs */
static unsigned long max_wakeup_granularity_ns = 1000000000; /* 1 second */
#endif
-static ctl_table kern_table[] = {
+static struct ctl_table kern_table[] = {
#ifdef CONFIG_SCHED_DEBUG
{
.ctl_name = CTL_UNNUMBERED,
- .procname = "sched_min_granularity_ns",
- .data = &sysctl_sched_min_granularity,
+ .procname = "sched_nr_latency",
+ .data = &sysctl_sched_nr_latency,
.maxlen = sizeof(unsigned int),
.mode = 0644,
- .proc_handler = &proc_dointvec_minmax,
- .strategy = &sysctl_intvec,
- .extra1 = &min_sched_granularity_ns,
- .extra2 = &max_sched_granularity_ns,
+ .proc_handler = &proc_dointvec,
},
{
.ctl_name = CTL_UNNUMBERED,
@@ -266,38 +276,24 @@ static ctl_table kern_table[] = {
},
{
.ctl_name = CTL_UNNUMBERED,
- .procname = "sched_stat_granularity_ns",
- .data = &sysctl_sched_stat_granularity,
- .maxlen = sizeof(unsigned int),
- .mode = 0644,
- .proc_handler = &proc_dointvec_minmax,
- .strategy = &sysctl_intvec,
- .extra1 = &min_wakeup_granularity_ns,
- .extra2 = &max_wakeup_granularity_ns,
- },
- {
- .ctl_name = CTL_UNNUMBERED,
- .procname = "sched_runtime_limit_ns",
- .data = &sysctl_sched_runtime_limit,
+ .procname = "sched_child_runs_first",
+ .data = &sysctl_sched_child_runs_first,
.maxlen = sizeof(unsigned int),
.mode = 0644,
- .proc_handler = &proc_dointvec_minmax,
- .strategy = &sysctl_intvec,
- .extra1 = &min_sched_granularity_ns,
- .extra2 = &max_sched_granularity_ns,
+ .proc_handler = &proc_dointvec,
},
{
.ctl_name = CTL_UNNUMBERED,
- .procname = "sched_child_runs_first",
- .data = &sysctl_sched_child_runs_first,
+ .procname = "sched_features",
+ .data = &sysctl_sched_features,
.maxlen = sizeof(unsigned int),
.mode = 0644,
.proc_handler = &proc_dointvec,
},
{
.ctl_name = CTL_UNNUMBERED,
- .procname = "sched_features",
- .data = &sysctl_sched_features,
+ .procname = "sched_migration_cost",
+ .data = &sysctl_sched_migration_cost,
.maxlen = sizeof(unsigned int),
.mode = 0644,
.proc_handler = &proc_dointvec,
@@ -368,7 +364,6 @@ static ctl_table kern_table[] = {
},
#ifdef CONFIG_PROC_SYSCTL
{
- .ctl_name = KERN_TAINTED,
.procname = "tainted",
.data = &tainted,
.maxlen = sizeof(int),
@@ -376,14 +371,15 @@ static ctl_table kern_table[] = {
.proc_handler = &proc_dointvec_taint,
},
#endif
+#ifdef CONFIG_SECURITY_CAPABILITIES
{
- .ctl_name = KERN_CAP_BSET,
.procname = "cap-bound",
.data = &cap_bset,
.maxlen = sizeof(kernel_cap_t),
.mode = 0600,
.proc_handler = &proc_dointvec_bset,
},
+#endif /* def CONFIG_SECURITY_CAPABILITIES */
#ifdef CONFIG_BLK_DEV_INITRD
{
.ctl_name = KERN_REALROOTDEV,
@@ -517,7 +513,6 @@ static ctl_table kern_table[] = {
#endif
#ifdef CONFIG_PROC_SYSCTL
{
- .ctl_name = KERN_CADPID,
.procname = "cad_pid",
.data = NULL,
.maxlen = sizeof (int),
@@ -539,14 +534,6 @@ static ctl_table kern_table[] = {
.mode = 0555,
.child = random_table,
},
-#ifdef CONFIG_UNIX98_PTYS
- {
- .ctl_name = KERN_PTY,
- .procname = "pty",
- .mode = 0555,
- .child = pty_table,
- },
-#endif
{
.ctl_name = KERN_OVERFLOWUID,
.procname = "overflowuid",
@@ -653,7 +640,6 @@ static ctl_table kern_table[] = {
.proc_handler = &proc_dointvec,
},
{
- .ctl_name = KERN_NMI_WATCHDOG,
.procname = "nmi_watchdog",
.data = &nmi_watchdog_enabled,
.maxlen = sizeof (int),
@@ -709,7 +695,6 @@ static ctl_table kern_table[] = {
#endif
#if defined(CONFIG_ACPI_SLEEP) && defined(CONFIG_X86)
{
- .ctl_name = KERN_ACPI_VIDEO_FLAGS,
.procname = "acpi_video_flags",
.data = &acpi_realmode_flags,
.maxlen = sizeof (unsigned long),
@@ -727,6 +712,19 @@ static ctl_table kern_table[] = {
.proc_handler = &proc_dointvec,
},
#endif
+#ifdef CONFIG_DETECT_SOFTLOCKUP
+ {
+ .ctl_name = CTL_UNNUMBERED,
+ .procname = "softlockup_thresh",
+ .data = &softlockup_thresh,
+ .maxlen = sizeof(int),
+ .mode = 0644,
+ .proc_handler = &proc_dointvec_minmax,
+ .strategy = &sysctl_intvec,
+ .extra1 = &one,
+ .extra2 = &sixty,
+ },
+#endif
#ifdef CONFIG_COMPAT
{
.ctl_name = KERN_COMPAT_LOG,
@@ -773,14 +771,7 @@ static ctl_table kern_table[] = {
{ .ctl_name = 0 }
};
-/* Constants for minimum and maximum testing in vm_table.
- We use these as one-element integer vectors. */
-static int zero;
-static int two = 2;
-static int one_hundred = 100;
-
-
-static ctl_table vm_table[] = {
+static struct ctl_table vm_table[] = {
{
.ctl_name = VM_OVERCOMMIT_MEMORY,
.procname = "overcommit_memory",
@@ -798,6 +789,14 @@ static ctl_table vm_table[] = {
.proc_handler = &proc_dointvec,
},
{
+ .ctl_name = CTL_UNNUMBERED,
+ .procname = "oom_kill_allocating_task",
+ .data = &sysctl_oom_kill_allocating_task,
+ .maxlen = sizeof(sysctl_oom_kill_allocating_task),
+ .mode = 0644,
+ .proc_handler = &proc_dointvec,
+ },
+ {
.ctl_name = VM_OVERCOMMIT_RATIO,
.procname = "overcommit_ratio",
.data = &sysctl_overcommit_ratio,
@@ -830,13 +829,12 @@ static ctl_table vm_table[] = {
.data = &vm_dirty_ratio,
.maxlen = sizeof(vm_dirty_ratio),
.mode = 0644,
- .proc_handler = &proc_dointvec_minmax,
+ .proc_handler = &dirty_ratio_handler,
.strategy = &sysctl_intvec,
.extra1 = &zero,
.extra2 = &one_hundred,
},
{
- .ctl_name = VM_DIRTY_WB_CS,
.procname = "dirty_writeback_centisecs",
.data = &dirty_writeback_interval,
.maxlen = sizeof(dirty_writeback_interval),
@@ -844,7 +842,6 @@ static ctl_table vm_table[] = {
.proc_handler = &dirty_writeback_centisecs_handler,
},
{
- .ctl_name = VM_DIRTY_EXPIRE_CS,
.procname = "dirty_expire_centisecs",
.data = &dirty_expire_interval,
.maxlen = sizeof(dirty_expire_interval),
@@ -872,7 +869,6 @@ static ctl_table vm_table[] = {
},
#ifdef CONFIG_HUGETLB_PAGE
{
- .ctl_name = VM_HUGETLB_PAGES,
.procname = "nr_hugepages",
.data = &max_huge_pages,
.maxlen = sizeof(unsigned long),
@@ -897,6 +893,14 @@ static ctl_table vm_table[] = {
.mode = 0644,
.proc_handler = &hugetlb_treat_movable_handler,
},
+ {
+ .ctl_name = CTL_UNNUMBERED,
+ .procname = "hugetlb_dynamic_pool",
+ .data = &hugetlb_dynamic_pool,
+ .maxlen = sizeof(hugetlb_dynamic_pool),
+ .mode = 0644,
+ .proc_handler = &proc_dointvec,
+ },
#endif
{
.ctl_name = VM_LOWMEM_RESERVE_RATIO,
@@ -1053,7 +1057,7 @@ static ctl_table vm_table[] = {
.strategy = &sysctl_string,
},
#endif
-#if defined(CONFIG_X86_32) || \
+#if (defined(CONFIG_X86_32) && !defined(CONFIG_UML))|| \
(defined(CONFIG_SUPERH) && defined(CONFIG_VSYSCALL))
{
.ctl_name = VM_VDSO_ENABLED,
@@ -1074,12 +1078,12 @@ static ctl_table vm_table[] = {
};
#if defined(CONFIG_BINFMT_MISC) || defined(CONFIG_BINFMT_MISC_MODULE)
-static ctl_table binfmt_misc_table[] = {
+static struct ctl_table binfmt_misc_table[] = {
{ .ctl_name = 0 }
};
#endif
-static ctl_table fs_table[] = {
+static struct ctl_table fs_table[] = {
{
.ctl_name = FS_NRINODE,
.procname = "inode-nr",
@@ -1097,7 +1101,6 @@ static ctl_table fs_table[] = {
.proc_handler = &proc_dointvec,
},
{
- .ctl_name = FS_NRFILE,
.procname = "file-nr",
.data = &files_stat,
.maxlen = 3*sizeof(int),
@@ -1173,7 +1176,6 @@ static ctl_table fs_table[] = {
.extra2 = &two,
},
{
- .ctl_name = FS_AIO_NR,
.procname = "aio-nr",
.data = &aio_nr,
.maxlen = sizeof(aio_nr),
@@ -1181,7 +1183,6 @@ static ctl_table fs_table[] = {
.proc_handler = &proc_doulongvec_minmax,
},
{
- .ctl_name = FS_AIO_MAX_NR,
.procname = "aio-max-nr",
.data = &aio_max_nr,
.maxlen = sizeof(aio_max_nr),
@@ -1220,8 +1221,8 @@ static ctl_table fs_table[] = {
{ .ctl_name = 0 }
};
-static ctl_table debug_table[] = {
-#ifdef CONFIG_X86
+static struct ctl_table debug_table[] = {
+#if defined(CONFIG_X86) || defined(CONFIG_PPC)
{
.ctl_name = CTL_UNNUMBERED,
.procname = "exception-trace",
@@ -1234,7 +1235,7 @@ static ctl_table debug_table[] = {
{ .ctl_name = 0 }
};
-static ctl_table dev_table[] = {
+static struct ctl_table dev_table[] = {
{ .ctl_name = 0 }
};
@@ -1350,10 +1351,15 @@ asmlinkage long sys_sysctl(struct __sysctl_args __user *args)
if (copy_from_user(&tmp, args, sizeof(tmp)))
return -EFAULT;
+ error = deprecated_sysctl_warning(&tmp);
+ if (error)
+ goto out;
+
lock_kernel();
error = do_sysctl(tmp.name, tmp.nlen, tmp.oldval, tmp.oldlenp,
tmp.newval, tmp.newlen);
unlock_kernel();
+out:
return error;
}
#endif /* CONFIG_SYSCTL_SYSCALL */
@@ -1374,7 +1380,7 @@ static int test_perm(int mode, int op)
return -EACCES;
}
-int sysctl_perm(ctl_table *table, int op)
+int sysctl_perm(struct ctl_table *table, int op)
{
int error;
error = security_sysctl(table, op);
@@ -1387,7 +1393,7 @@ int sysctl_perm(ctl_table *table, int op)
static int parse_table(int __user *name, int nlen,
void __user *oldval, size_t __user *oldlenp,
void __user *newval, size_t newlen,
- ctl_table *table)
+ struct ctl_table *table)
{
int n;
repeat:
@@ -1418,13 +1424,12 @@ repeat:
}
/* Perform the actual read/write of a sysctl table entry. */
-int do_sysctl_strategy (ctl_table *table,
+int do_sysctl_strategy (struct ctl_table *table,
int __user *name, int nlen,
void __user *oldval, size_t __user *oldlenp,
void __user *newval, size_t newlen)
{
int op = 0, rc;
- size_t len;
if (oldval)
op |= 004;
@@ -1445,25 +1450,10 @@ int do_sysctl_strategy (ctl_table *table,
/* If there is no strategy routine, or if the strategy returns
* zero, proceed with automatic r/w */
if (table->data && table->maxlen) {
- if (oldval && oldlenp) {
- if (get_user(len, oldlenp))
- return -EFAULT;
- if (len) {
- if (len > table->maxlen)
- len = table->maxlen;
- if(copy_to_user(oldval, table->data, len))
- return -EFAULT;
- if(put_user(len, oldlenp))
- return -EFAULT;
- }
- }
- if (newval && newlen) {
- len = newlen;
- if (len > table->maxlen)
- len = table->maxlen;
- if(copy_from_user(table->data, newval, len))
- return -EFAULT;
- }
+ rc = sysctl_data(table, name, nlen, oldval, oldlenp,
+ newval, newlen);
+ if (rc < 0)
+ return rc;
}
return 0;
}
@@ -1480,7 +1470,9 @@ static void sysctl_set_parent(struct ctl_table *parent, struct ctl_table *table)
static __init int sysctl_init(void)
{
+ int err;
sysctl_set_parent(NULL, root_table);
+ err = sysctl_check_table(root_table);
return 0;
}
@@ -1493,7 +1485,7 @@ core_initcall(sysctl_init);
* Register a sysctl table hierarchy. @table should be a filled in ctl_table
* array. An entry with a ctl_name of 0 terminates the table.
*
- * The members of the &ctl_table structure are used as follows:
+ * The members of the &struct ctl_table structure are used as follows:
*
* ctl_name - This is the numeric sysctl value used by sysctl(2). The number
* must be unique within that level of sysctl
@@ -1554,7 +1546,7 @@ core_initcall(sysctl_init);
* This routine returns %NULL on a failure to register, and a pointer
* to the table header on success.
*/
-struct ctl_table_header *register_sysctl_table(ctl_table * table)
+struct ctl_table_header *register_sysctl_table(struct ctl_table * table)
{
struct ctl_table_header *tmp;
tmp = kmalloc(sizeof(struct ctl_table_header), GFP_KERNEL);
@@ -1565,6 +1557,10 @@ struct ctl_table_header *register_sysctl_table(ctl_table * table)
tmp->used = 0;
tmp->unregistering = NULL;
sysctl_set_parent(NULL, table);
+ if (sysctl_check_table(tmp->ctl_table)) {
+ kfree(tmp);
+ return NULL;
+ }
spin_lock(&sysctl_lock);
list_add_tail(&tmp->ctl_entry, &root_table_header.ctl_entry);
spin_unlock(&sysctl_lock);
@@ -1588,7 +1584,7 @@ void unregister_sysctl_table(struct ctl_table_header * header)
}
#else /* !CONFIG_SYSCTL */
-struct ctl_table_header *register_sysctl_table(ctl_table * table)
+struct ctl_table_header *register_sysctl_table(struct ctl_table * table)
{
return NULL;
}
@@ -1681,7 +1677,7 @@ static int _proc_do_string(void* data, int maxlen, int write,
*
* Returns 0 on success.
*/
-int proc_dostring(ctl_table *table, int write, struct file *filp,
+int proc_dostring(struct ctl_table *table, int write, struct file *filp,
void __user *buffer, size_t *lenp, loff_t *ppos)
{
return _proc_do_string(table->data, table->maxlen, write, filp,
@@ -1708,7 +1704,7 @@ static int do_proc_dointvec_conv(int *negp, unsigned long *lvalp,
return 0;
}
-static int __do_proc_dointvec(void *tbl_data, ctl_table *table,
+static int __do_proc_dointvec(void *tbl_data, struct ctl_table *table,
int write, struct file *filp, void __user *buffer,
size_t *lenp, loff_t *ppos,
int (*conv)(int *negp, unsigned long *lvalp, int *valp,
@@ -1818,7 +1814,7 @@ static int __do_proc_dointvec(void *tbl_data, ctl_table *table,
#undef TMPBUFLEN
}
-static int do_proc_dointvec(ctl_table *table, int write, struct file *filp,
+static int do_proc_dointvec(struct ctl_table *table, int write, struct file *filp,
void __user *buffer, size_t *lenp, loff_t *ppos,
int (*conv)(int *negp, unsigned long *lvalp, int *valp,
int write, void *data),
@@ -1842,7 +1838,7 @@ static int do_proc_dointvec(ctl_table *table, int write, struct file *filp,
*
* Returns 0 on success.
*/
-int proc_dointvec(ctl_table *table, int write, struct file *filp,
+int proc_dointvec(struct ctl_table *table, int write, struct file *filp,
void __user *buffer, size_t *lenp, loff_t *ppos)
{
return do_proc_dointvec(table,write,filp,buffer,lenp,ppos,
@@ -1878,11 +1874,12 @@ static int do_proc_dointvec_bset_conv(int *negp, unsigned long *lvalp,
return 0;
}
+#ifdef CONFIG_SECURITY_CAPABILITIES
/*
* init may raise the set.
*/
-
-int proc_dointvec_bset(ctl_table *table, int write, struct file *filp,
+
+int proc_dointvec_bset(struct ctl_table *table, int write, struct file *filp,
void __user *buffer, size_t *lenp, loff_t *ppos)
{
int op;
@@ -1891,15 +1888,16 @@ int proc_dointvec_bset(ctl_table *table, int write, struct file *filp,
return -EPERM;
}
- op = is_init(current) ? OP_SET : OP_AND;
+ op = is_global_init(current) ? OP_SET : OP_AND;
return do_proc_dointvec(table,write,filp,buffer,lenp,ppos,
do_proc_dointvec_bset_conv,&op);
}
+#endif /* def CONFIG_SECURITY_CAPABILITIES */
/*
* Taint values can only be increased
*/
-static int proc_dointvec_taint(ctl_table *table, int write, struct file *filp,
+static int proc_dointvec_taint(struct ctl_table *table, int write, struct file *filp,
void __user *buffer, size_t *lenp, loff_t *ppos)
{
int op;
@@ -1958,7 +1956,7 @@ static int do_proc_dointvec_minmax_conv(int *negp, unsigned long *lvalp,
*
* Returns 0 on success.
*/
-int proc_dointvec_minmax(ctl_table *table, int write, struct file *filp,
+int proc_dointvec_minmax(struct ctl_table *table, int write, struct file *filp,
void __user *buffer, size_t *lenp, loff_t *ppos)
{
struct do_proc_dointvec_minmax_conv_param param = {
@@ -1969,7 +1967,7 @@ int proc_dointvec_minmax(ctl_table *table, int write, struct file *filp,
do_proc_dointvec_minmax_conv, &param);
}
-static int __do_proc_doulongvec_minmax(void *data, ctl_table *table, int write,
+static int __do_proc_doulongvec_minmax(void *data, struct ctl_table *table, int write,
struct file *filp,
void __user *buffer,
size_t *lenp, loff_t *ppos,
@@ -2074,7 +2072,7 @@ static int __do_proc_doulongvec_minmax(void *data, ctl_table *table, int write,
#undef TMPBUFLEN
}
-static int do_proc_doulongvec_minmax(ctl_table *table, int write,
+static int do_proc_doulongvec_minmax(struct ctl_table *table, int write,
struct file *filp,
void __user *buffer,
size_t *lenp, loff_t *ppos,
@@ -2102,7 +2100,7 @@ static int do_proc_doulongvec_minmax(ctl_table *table, int write,
*
* Returns 0 on success.
*/
-int proc_doulongvec_minmax(ctl_table *table, int write, struct file *filp,
+int proc_doulongvec_minmax(struct ctl_table *table, int write, struct file *filp,
void __user *buffer, size_t *lenp, loff_t *ppos)
{
return do_proc_doulongvec_minmax(table, write, filp, buffer, lenp, ppos, 1l, 1l);
@@ -2126,7 +2124,7 @@ int proc_doulongvec_minmax(ctl_table *table, int write, struct file *filp,
*
* Returns 0 on success.
*/
-int proc_doulongvec_ms_jiffies_minmax(ctl_table *table, int write,
+int proc_doulongvec_ms_jiffies_minmax(struct ctl_table *table, int write,
struct file *filp,
void __user *buffer,
size_t *lenp, loff_t *ppos)
@@ -2219,7 +2217,7 @@ static int do_proc_dointvec_ms_jiffies_conv(int *negp, unsigned long *lvalp,
*
* Returns 0 on success.
*/
-int proc_dointvec_jiffies(ctl_table *table, int write, struct file *filp,
+int proc_dointvec_jiffies(struct ctl_table *table, int write, struct file *filp,
void __user *buffer, size_t *lenp, loff_t *ppos)
{
return do_proc_dointvec(table,write,filp,buffer,lenp,ppos,
@@ -2242,7 +2240,7 @@ int proc_dointvec_jiffies(ctl_table *table, int write, struct file *filp,
*
* Returns 0 on success.
*/
-int proc_dointvec_userhz_jiffies(ctl_table *table, int write, struct file *filp,
+int proc_dointvec_userhz_jiffies(struct ctl_table *table, int write, struct file *filp,
void __user *buffer, size_t *lenp, loff_t *ppos)
{
return do_proc_dointvec(table,write,filp,buffer,lenp,ppos,
@@ -2266,21 +2264,21 @@ int proc_dointvec_userhz_jiffies(ctl_table *table, int write, struct file *filp,
*
* Returns 0 on success.
*/
-int proc_dointvec_ms_jiffies(ctl_table *table, int write, struct file *filp,
+int proc_dointvec_ms_jiffies(struct ctl_table *table, int write, struct file *filp,
void __user *buffer, size_t *lenp, loff_t *ppos)
{
return do_proc_dointvec(table, write, filp, buffer, lenp, ppos,
do_proc_dointvec_ms_jiffies_conv, NULL);
}
-static int proc_do_cad_pid(ctl_table *table, int write, struct file *filp,
+static int proc_do_cad_pid(struct ctl_table *table, int write, struct file *filp,
void __user *buffer, size_t *lenp, loff_t *ppos)
{
struct pid *new_pid;
pid_t tmp;
int r;
- tmp = pid_nr(cad_pid);
+ tmp = pid_nr_ns(cad_pid, current->nsproxy->pid_ns);
r = __do_proc_dointvec(&tmp, table, write, filp, buffer,
lenp, ppos, NULL, NULL);
@@ -2297,55 +2295,55 @@ static int proc_do_cad_pid(ctl_table *table, int write, struct file *filp,
#else /* CONFIG_PROC_FS */
-int proc_dostring(ctl_table *table, int write, struct file *filp,
+int proc_dostring(struct ctl_table *table, int write, struct file *filp,
void __user *buffer, size_t *lenp, loff_t *ppos)
{
return -ENOSYS;
}
-int proc_dointvec(ctl_table *table, int write, struct file *filp,
+int proc_dointvec(struct ctl_table *table, int write, struct file *filp,
void __user *buffer, size_t *lenp, loff_t *ppos)
{
return -ENOSYS;
}
-int proc_dointvec_bset(ctl_table *table, int write, struct file *filp,
+int proc_dointvec_bset(struct ctl_table *table, int write, struct file *filp,
void __user *buffer, size_t *lenp, loff_t *ppos)
{
return -ENOSYS;
}
-int proc_dointvec_minmax(ctl_table *table, int write, struct file *filp,
+int proc_dointvec_minmax(struct ctl_table *table, int write, struct file *filp,
void __user *buffer, size_t *lenp, loff_t *ppos)
{
return -ENOSYS;
}
-int proc_dointvec_jiffies(ctl_table *table, int write, struct file *filp,
+int proc_dointvec_jiffies(struct ctl_table *table, int write, struct file *filp,
void __user *buffer, size_t *lenp, loff_t *ppos)
{
return -ENOSYS;
}
-int proc_dointvec_userhz_jiffies(ctl_table *table, int write, struct file *filp,
+int proc_dointvec_userhz_jiffies(struct ctl_table *table, int write, struct file *filp,
void __user *buffer, size_t *lenp, loff_t *ppos)
{
return -ENOSYS;
}
-int proc_dointvec_ms_jiffies(ctl_table *table, int write, struct file *filp,
+int proc_dointvec_ms_jiffies(struct ctl_table *table, int write, struct file *filp,
void __user *buffer, size_t *lenp, loff_t *ppos)
{
return -ENOSYS;
}
-int proc_doulongvec_minmax(ctl_table *table, int write, struct file *filp,
+int proc_doulongvec_minmax(struct ctl_table *table, int write, struct file *filp,
void __user *buffer, size_t *lenp, loff_t *ppos)
{
return -ENOSYS;
}
-int proc_doulongvec_ms_jiffies_minmax(ctl_table *table, int write,
+int proc_doulongvec_ms_jiffies_minmax(struct ctl_table *table, int write,
struct file *filp,
void __user *buffer,
size_t *lenp, loff_t *ppos)
@@ -2362,8 +2360,42 @@ int proc_doulongvec_ms_jiffies_minmax(ctl_table *table, int write,
* General sysctl support routines
*/
+/* The generic sysctl data routine (used if no strategy routine supplied) */
+int sysctl_data(struct ctl_table *table, int __user *name, int nlen,
+ void __user *oldval, size_t __user *oldlenp,
+ void __user *newval, size_t newlen)
+{
+ size_t len;
+
+ /* Get out of I don't have a variable */
+ if (!table->data || !table->maxlen)
+ return -ENOTDIR;
+
+ if (oldval && oldlenp) {
+ if (get_user(len, oldlenp))
+ return -EFAULT;
+ if (len) {
+ if (len > table->maxlen)
+ len = table->maxlen;
+ if (copy_to_user(oldval, table->data, len))
+ return -EFAULT;
+ if (put_user(len, oldlenp))
+ return -EFAULT;
+ }
+ }
+
+ if (newval && newlen) {
+ if (newlen > table->maxlen)
+ newlen = table->maxlen;
+
+ if (copy_from_user(table->data, newval, newlen))
+ return -EFAULT;
+ }
+ return 1;
+}
+
/* The generic string strategy routine: */
-int sysctl_string(ctl_table *table, int __user *name, int nlen,
+int sysctl_string(struct ctl_table *table, int __user *name, int nlen,
void __user *oldval, size_t __user *oldlenp,
void __user *newval, size_t newlen)
{
@@ -2409,7 +2441,7 @@ int sysctl_string(ctl_table *table, int __user *name, int nlen,
* are between the minimum and maximum values given in the arrays
* table->extra1 and table->extra2, respectively.
*/
-int sysctl_intvec(ctl_table *table, int __user *name, int nlen,
+int sysctl_intvec(struct ctl_table *table, int __user *name, int nlen,
void __user *oldval, size_t __user *oldlenp,
void __user *newval, size_t newlen)
{
@@ -2445,7 +2477,7 @@ int sysctl_intvec(ctl_table *table, int __user *name, int nlen,
}
/* Strategy function to convert jiffies to seconds */
-int sysctl_jiffies(ctl_table *table, int __user *name, int nlen,
+int sysctl_jiffies(struct ctl_table *table, int __user *name, int nlen,
void __user *oldval, size_t __user *oldlenp,
void __user *newval, size_t newlen)
{
@@ -2479,7 +2511,7 @@ int sysctl_jiffies(ctl_table *table, int __user *name, int nlen,
}
/* Strategy function to convert jiffies to seconds */
-int sysctl_ms_jiffies(ctl_table *table, int __user *name, int nlen,
+int sysctl_ms_jiffies(struct ctl_table *table, int __user *name, int nlen,
void __user *oldval, size_t __user *oldlenp,
void __user *newval, size_t newlen)
{
@@ -2519,59 +2551,50 @@ int sysctl_ms_jiffies(ctl_table *table, int __user *name, int nlen,
asmlinkage long sys_sysctl(struct __sysctl_args __user *args)
{
- static int msg_count;
struct __sysctl_args tmp;
- int name[CTL_MAXNAME];
- int i;
+ int error;
- /* Read in the sysctl name for better debug message logging */
if (copy_from_user(&tmp, args, sizeof(tmp)))
return -EFAULT;
- if (tmp.nlen <= 0 || tmp.nlen >= CTL_MAXNAME)
- return -ENOTDIR;
- for (i = 0; i < tmp.nlen; i++)
- if (get_user(name[i], tmp.name + i))
- return -EFAULT;
- /* Ignore accesses to kernel.version */
- if ((tmp.nlen == 2) && (name[0] == CTL_KERN) && (name[1] == KERN_VERSION))
- goto out;
+ error = deprecated_sysctl_warning(&tmp);
- if (msg_count < 5) {
- msg_count++;
- printk(KERN_INFO
- "warning: process `%s' used the removed sysctl "
- "system call with ", current->comm);
- for (i = 0; i < tmp.nlen; i++)
- printk("%d.", name[i]);
- printk("\n");
- }
-out:
+ /* If no error reading the parameters then just -ENOSYS ... */
+ if (!error)
+ error = -ENOSYS;
+
+ return error;
+}
+
+int sysctl_data(struct ctl_table *table, int __user *name, int nlen,
+ void __user *oldval, size_t __user *oldlenp,
+ void __user *newval, size_t newlen)
+{
return -ENOSYS;
}
-int sysctl_string(ctl_table *table, int __user *name, int nlen,
+int sysctl_string(struct ctl_table *table, int __user *name, int nlen,
void __user *oldval, size_t __user *oldlenp,
void __user *newval, size_t newlen)
{
return -ENOSYS;
}
-int sysctl_intvec(ctl_table *table, int __user *name, int nlen,
+int sysctl_intvec(struct ctl_table *table, int __user *name, int nlen,
void __user *oldval, size_t __user *oldlenp,
void __user *newval, size_t newlen)
{
return -ENOSYS;
}
-int sysctl_jiffies(ctl_table *table, int __user *name, int nlen,
+int sysctl_jiffies(struct ctl_table *table, int __user *name, int nlen,
void __user *oldval, size_t __user *oldlenp,
void __user *newval, size_t newlen)
{
return -ENOSYS;
}
-int sysctl_ms_jiffies(ctl_table *table, int __user *name, int nlen,
+int sysctl_ms_jiffies(struct ctl_table *table, int __user *name, int nlen,
void __user *oldval, size_t __user *oldlenp,
void __user *newval, size_t newlen)
{
@@ -2580,6 +2603,33 @@ int sysctl_ms_jiffies(ctl_table *table, int __user *name, int nlen,
#endif /* CONFIG_SYSCTL_SYSCALL */
+static int deprecated_sysctl_warning(struct __sysctl_args *args)
+{
+ static int msg_count;
+ int name[CTL_MAXNAME];
+ int i;
+
+ /* Read in the sysctl name for better debug message logging */
+ for (i = 0; i < args->nlen; i++)
+ if (get_user(name[i], args->name + i))
+ return -EFAULT;
+
+ /* Ignore accesses to kernel.version */
+ if ((args->nlen == 2) && (name[0] == CTL_KERN) && (name[1] == KERN_VERSION))
+ return 0;
+
+ if (msg_count < 5) {
+ msg_count++;
+ printk(KERN_INFO
+ "warning: process `%s' used the deprecated sysctl "
+ "system call with ", current->comm);
+ for (i = 0; i < args->nlen; i++)
+ printk("%d.", name[i]);
+ printk("\n");
+ }
+ return 0;
+}
+
/*
* No sense putting this after each symbol definition, twice,
* exception granted :-)
@@ -2597,4 +2647,5 @@ EXPORT_SYMBOL(sysctl_intvec);
EXPORT_SYMBOL(sysctl_jiffies);
EXPORT_SYMBOL(sysctl_ms_jiffies);
EXPORT_SYMBOL(sysctl_string);
+EXPORT_SYMBOL(sysctl_data);
EXPORT_SYMBOL(unregister_sysctl_table);
diff --git a/kernel/sysctl_check.c b/kernel/sysctl_check.c
new file mode 100644
index 000000000000..3c9ef5a7d575
--- /dev/null
+++ b/kernel/sysctl_check.c
@@ -0,0 +1,1588 @@
+#include <linux/stat.h>
+#include <linux/sysctl.h>
+#include "../arch/s390/appldata/appldata.h"
+#include "../fs/xfs/linux-2.6/xfs_sysctl.h"
+#include <linux/sunrpc/debug.h>
+#include <linux/string.h>
+#include <net/ip_vs.h>
+
+struct trans_ctl_table {
+ int ctl_name;
+ const char *procname;
+ struct trans_ctl_table *child;
+};
+
+static struct trans_ctl_table trans_random_table[] = {
+ { RANDOM_POOLSIZE, "poolsize" },
+ { RANDOM_ENTROPY_COUNT, "entropy_avail" },
+ { RANDOM_READ_THRESH, "read_wakeup_threshold" },
+ { RANDOM_WRITE_THRESH, "write_wakeup_threshold" },
+ { RANDOM_BOOT_ID, "boot_id" },
+ { RANDOM_UUID, "uuid" },
+ {}
+};
+
+static struct trans_ctl_table trans_pty_table[] = {
+ { PTY_MAX, "max" },
+ { PTY_NR, "nr" },
+ {}
+};
+
+static struct trans_ctl_table trans_kern_table[] = {
+ { KERN_OSTYPE, "ostype" },
+ { KERN_OSRELEASE, "osrelease" },
+ /* KERN_OSREV not used */
+ { KERN_VERSION, "version" },
+ /* KERN_SECUREMASK not used */
+ /* KERN_PROF not used */
+ { KERN_NODENAME, "hostname" },
+ { KERN_DOMAINNAME, "domainname" },
+
+#ifdef CONFIG_SECURITY_CAPABILITIES
+ { KERN_CAP_BSET, "cap-bound" },
+#endif /* def CONFIG_SECURITY_CAPABILITIES */
+
+ { KERN_PANIC, "panic" },
+ { KERN_REALROOTDEV, "real-root-dev" },
+
+ { KERN_SPARC_REBOOT, "reboot-cmd" },
+ { KERN_CTLALTDEL, "ctrl-alt-del" },
+ { KERN_PRINTK, "printk" },
+
+ /* KERN_NAMETRANS not used */
+ /* KERN_PPC_HTABRECLAIM not used */
+ /* KERN_PPC_ZEROPAGED not used */
+ { KERN_PPC_POWERSAVE_NAP, "powersave-nap" },
+
+ { KERN_MODPROBE, "modprobe" },
+ { KERN_SG_BIG_BUFF, "sg-big-buff" },
+ { KERN_ACCT, "acct" },
+ { KERN_PPC_L2CR, "l2cr" },
+
+ /* KERN_RTSIGNR not used */
+ /* KERN_RTSIGMAX not used */
+
+ { KERN_SHMMAX, "shmmax" },
+ { KERN_MSGMAX, "msgmax" },
+ { KERN_MSGMNB, "msgmnb" },
+ /* KERN_MSGPOOL not used*/
+ { KERN_SYSRQ, "sysrq" },
+ { KERN_MAX_THREADS, "threads-max" },
+ { KERN_RANDOM, "random", trans_random_table },
+ { KERN_SHMALL, "shmall" },
+ { KERN_MSGMNI, "msgmni" },
+ { KERN_SEM, "sem" },
+ { KERN_SPARC_STOP_A, "stop-a" },
+ { KERN_SHMMNI, "shmmni" },
+
+ { KERN_OVERFLOWUID, "overflowuid" },
+ { KERN_OVERFLOWGID, "overflowgid" },
+
+ { KERN_HOTPLUG, "hotplug", },
+ { KERN_IEEE_EMULATION_WARNINGS, "ieee_emulation_warnings" },
+
+ { KERN_S390_USER_DEBUG_LOGGING, "userprocess_debug" },
+ { KERN_CORE_USES_PID, "core_uses_pid" },
+ { KERN_TAINTED, "tainted" },
+ { KERN_CADPID, "cad_pid" },
+ { KERN_PIDMAX, "pid_max" },
+ { KERN_CORE_PATTERN, "core_pattern" },
+ { KERN_PANIC_ON_OOPS, "panic_on_oops" },
+ { KERN_HPPA_PWRSW, "soft-power" },
+ { KERN_HPPA_UNALIGNED, "unaligned-trap" },
+
+ { KERN_PRINTK_RATELIMIT, "printk_ratelimit" },
+ { KERN_PRINTK_RATELIMIT_BURST, "printk_ratelimit_burst" },
+
+ { KERN_PTY, "pty", trans_pty_table },
+ { KERN_NGROUPS_MAX, "ngroups_max" },
+ { KERN_SPARC_SCONS_PWROFF, "scons_poweroff" },
+ { KERN_HZ_TIMER, "hz_timer" },
+ { KERN_UNKNOWN_NMI_PANIC, "unknown_nmi_panic" },
+ { KERN_BOOTLOADER_TYPE, "bootloader_type" },
+ { KERN_RANDOMIZE, "randomize_va_space" },
+
+ { KERN_SPIN_RETRY, "spin_retry" },
+ { KERN_ACPI_VIDEO_FLAGS, "acpi_video_flags" },
+ { KERN_IA64_UNALIGNED, "ignore-unaligned-usertrap" },
+ { KERN_COMPAT_LOG, "compat-log" },
+ { KERN_MAX_LOCK_DEPTH, "max_lock_depth" },
+ { KERN_NMI_WATCHDOG, "nmi_watchdog" },
+ { KERN_PANIC_ON_NMI, "panic_on_unrecovered_nmi" },
+ {}
+};
+
+static struct trans_ctl_table trans_vm_table[] = {
+ { VM_OVERCOMMIT_MEMORY, "overcommit_memory" },
+ { VM_PAGE_CLUSTER, "page-cluster" },
+ { VM_DIRTY_BACKGROUND, "dirty_background_ratio" },
+ { VM_DIRTY_RATIO, "dirty_ratio" },
+ { VM_DIRTY_WB_CS, "dirty_writeback_centisecs" },
+ { VM_DIRTY_EXPIRE_CS, "dirty_expire_centisecs" },
+ { VM_NR_PDFLUSH_THREADS, "nr_pdflush_threads" },
+ { VM_OVERCOMMIT_RATIO, "overcommit_ratio" },
+ /* VM_PAGEBUF unused */
+ { VM_HUGETLB_PAGES, "nr_hugepages" },
+ { VM_SWAPPINESS, "swappiness" },
+ { VM_LOWMEM_RESERVE_RATIO, "lowmem_reserve_ratio" },
+ { VM_MIN_FREE_KBYTES, "min_free_kbytes" },
+ { VM_MAX_MAP_COUNT, "max_map_count" },
+ { VM_LAPTOP_MODE, "laptop_mode" },
+ { VM_BLOCK_DUMP, "block_dump" },
+ { VM_HUGETLB_GROUP, "hugetlb_shm_group" },
+ { VM_VFS_CACHE_PRESSURE, "vfs_cache_pressure" },
+ { VM_LEGACY_VA_LAYOUT, "legacy_va_layout" },
+ /* VM_SWAP_TOKEN_TIMEOUT unused */
+ { VM_DROP_PAGECACHE, "drop_caches" },
+ { VM_PERCPU_PAGELIST_FRACTION, "percpu_pagelist_fraction" },
+ { VM_ZONE_RECLAIM_MODE, "zone_reclaim_mode" },
+ { VM_MIN_UNMAPPED, "min_unmapped_ratio" },
+ { VM_PANIC_ON_OOM, "panic_on_oom" },
+ { VM_VDSO_ENABLED, "vdso_enabled" },
+ { VM_MIN_SLAB, "min_slab_ratio" },
+ { VM_CMM_PAGES, "cmm_pages" },
+ { VM_CMM_TIMED_PAGES, "cmm_timed_pages" },
+ { VM_CMM_TIMEOUT, "cmm_timeout" },
+
+ {}
+};
+
+static struct trans_ctl_table trans_net_core_table[] = {
+ { NET_CORE_WMEM_MAX, "wmem_max" },
+ { NET_CORE_RMEM_MAX, "rmem_max" },
+ { NET_CORE_WMEM_DEFAULT, "wmem_default" },
+ { NET_CORE_RMEM_DEFAULT, "rmem_default" },
+ /* NET_CORE_DESTROY_DELAY unused */
+ { NET_CORE_MAX_BACKLOG, "netdev_max_backlog" },
+ /* NET_CORE_FASTROUTE unused */
+ { NET_CORE_MSG_COST, "message_cost" },
+ { NET_CORE_MSG_BURST, "message_burst" },
+ { NET_CORE_OPTMEM_MAX, "optmem_max" },
+ /* NET_CORE_HOT_LIST_LENGTH unused */
+ /* NET_CORE_DIVERT_VERSION unused */
+ /* NET_CORE_NO_CONG_THRESH unused */
+ /* NET_CORE_NO_CONG unused */
+ /* NET_CORE_LO_CONG unused */
+ /* NET_CORE_MOD_CONG unused */
+ { NET_CORE_DEV_WEIGHT, "dev_weight" },
+ { NET_CORE_SOMAXCONN, "somaxconn" },
+ { NET_CORE_BUDGET, "netdev_budget" },
+ { NET_CORE_AEVENT_ETIME, "xfrm_aevent_etime" },
+ { NET_CORE_AEVENT_RSEQTH, "xfrm_aevent_rseqth" },
+ { NET_CORE_WARNINGS, "warnings" },
+ {},
+};
+
+static struct trans_ctl_table trans_net_unix_table[] = {
+ /* NET_UNIX_DESTROY_DELAY unused */
+ /* NET_UNIX_DELETE_DELAY unused */
+ { NET_UNIX_MAX_DGRAM_QLEN, "max_dgram_qlen" },
+ {}
+};
+
+static struct trans_ctl_table trans_net_ipv4_route_table[] = {
+ { NET_IPV4_ROUTE_FLUSH, "flush" },
+ { NET_IPV4_ROUTE_MIN_DELAY, "min_delay" },
+ { NET_IPV4_ROUTE_MAX_DELAY, "max_delay" },
+ { NET_IPV4_ROUTE_GC_THRESH, "gc_thresh" },
+ { NET_IPV4_ROUTE_MAX_SIZE, "max_size" },
+ { NET_IPV4_ROUTE_GC_MIN_INTERVAL, "gc_min_interval" },
+ { NET_IPV4_ROUTE_GC_TIMEOUT, "gc_timeout" },
+ { NET_IPV4_ROUTE_GC_INTERVAL, "gc_interval" },
+ { NET_IPV4_ROUTE_REDIRECT_LOAD, "redirect_load" },
+ { NET_IPV4_ROUTE_REDIRECT_NUMBER, "redirect_number" },
+ { NET_IPV4_ROUTE_REDIRECT_SILENCE, "redirect_silence" },
+ { NET_IPV4_ROUTE_ERROR_COST, "error_cost" },
+ { NET_IPV4_ROUTE_ERROR_BURST, "error_burst" },
+ { NET_IPV4_ROUTE_GC_ELASTICITY, "gc_elasticity" },
+ { NET_IPV4_ROUTE_MTU_EXPIRES, "mtu_expires" },
+ { NET_IPV4_ROUTE_MIN_PMTU, "min_pmtu" },
+ { NET_IPV4_ROUTE_MIN_ADVMSS, "min_adv_mss" },
+ { NET_IPV4_ROUTE_SECRET_INTERVAL, "secret_interval" },
+ { NET_IPV4_ROUTE_GC_MIN_INTERVAL_MS, "gc_min_interval_ms" },
+ {}
+};
+
+static struct trans_ctl_table trans_net_ipv4_conf_vars_table[] = {
+ { NET_IPV4_CONF_FORWARDING, "forwarding" },
+ { NET_IPV4_CONF_MC_FORWARDING, "mc_forwarding" },
+
+ { NET_IPV4_CONF_PROXY_ARP, "proxy_arp" },
+ { NET_IPV4_CONF_ACCEPT_REDIRECTS, "accept_redirects" },
+ { NET_IPV4_CONF_SECURE_REDIRECTS, "secure_redirects" },
+ { NET_IPV4_CONF_SEND_REDIRECTS, "send_redirects" },
+ { NET_IPV4_CONF_SHARED_MEDIA, "shared_media" },
+ { NET_IPV4_CONF_RP_FILTER, "rp_filter" },
+ { NET_IPV4_CONF_ACCEPT_SOURCE_ROUTE, "accept_source_route" },
+ { NET_IPV4_CONF_BOOTP_RELAY, "bootp_relay" },
+ { NET_IPV4_CONF_LOG_MARTIANS, "log_martians" },
+ { NET_IPV4_CONF_TAG, "tag" },
+ { NET_IPV4_CONF_ARPFILTER, "arp_filter" },
+ { NET_IPV4_CONF_MEDIUM_ID, "medium_id" },
+ { NET_IPV4_CONF_NOXFRM, "disable_xfrm" },
+ { NET_IPV4_CONF_NOPOLICY, "disable_policy" },
+ { NET_IPV4_CONF_FORCE_IGMP_VERSION, "force_igmp_version" },
+
+ { NET_IPV4_CONF_ARP_ANNOUNCE, "arp_announce" },
+ { NET_IPV4_CONF_ARP_IGNORE, "arp_ignore" },
+ { NET_IPV4_CONF_PROMOTE_SECONDARIES, "promote_secondaries" },
+ { NET_IPV4_CONF_ARP_ACCEPT, "arp_accept" },
+ {}
+};
+
+static struct trans_ctl_table trans_net_ipv4_conf_table[] = {
+ { NET_PROTO_CONF_ALL, "all", trans_net_ipv4_conf_vars_table },
+ { NET_PROTO_CONF_DEFAULT, "default", trans_net_ipv4_conf_vars_table },
+ { 0, NULL, trans_net_ipv4_conf_vars_table },
+ {}
+};
+
+
+static struct trans_ctl_table trans_net_ipv4_vs_table[] = {
+ { NET_IPV4_VS_AMEMTHRESH, "amemthresh" },
+ { NET_IPV4_VS_DEBUG_LEVEL, "debug_level" },
+ { NET_IPV4_VS_AMDROPRATE, "am_droprate" },
+ { NET_IPV4_VS_DROP_ENTRY, "drop_entry" },
+ { NET_IPV4_VS_DROP_PACKET, "drop_packet" },
+ { NET_IPV4_VS_SECURE_TCP, "secure_tcp" },
+ { NET_IPV4_VS_TO_ES, "timeout_established" },
+ { NET_IPV4_VS_TO_SS, "timeout_synsent" },
+ { NET_IPV4_VS_TO_SR, "timeout_synrecv" },
+ { NET_IPV4_VS_TO_FW, "timeout_finwait" },
+ { NET_IPV4_VS_TO_TW, "timeout_timewait" },
+ { NET_IPV4_VS_TO_CL, "timeout_close" },
+ { NET_IPV4_VS_TO_CW, "timeout_closewait" },
+ { NET_IPV4_VS_TO_LA, "timeout_lastack" },
+ { NET_IPV4_VS_TO_LI, "timeout_listen" },
+ { NET_IPV4_VS_TO_SA, "timeout_synack" },
+ { NET_IPV4_VS_TO_UDP, "timeout_udp" },
+ { NET_IPV4_VS_TO_ICMP, "timeout_icmp" },
+ { NET_IPV4_VS_CACHE_BYPASS, "cache_bypass" },
+ { NET_IPV4_VS_EXPIRE_NODEST_CONN, "expire_nodest_conn" },
+ { NET_IPV4_VS_EXPIRE_QUIESCENT_TEMPLATE, "expire_quiescent_template" },
+ { NET_IPV4_VS_SYNC_THRESHOLD, "sync_threshold" },
+ { NET_IPV4_VS_NAT_ICMP_SEND, "nat_icmp_send" },
+ { NET_IPV4_VS_LBLC_EXPIRE, "lblc_expiration" },
+ { NET_IPV4_VS_LBLCR_EXPIRE, "lblcr_expiration" },
+ {}
+};
+
+static struct trans_ctl_table trans_net_neigh_vars_table[] = {
+ { NET_NEIGH_MCAST_SOLICIT, "mcast_solicit" },
+ { NET_NEIGH_UCAST_SOLICIT, "ucast_solicit" },
+ { NET_NEIGH_APP_SOLICIT, "app_solicit" },
+ { NET_NEIGH_RETRANS_TIME, "retrans_time" },
+ { NET_NEIGH_REACHABLE_TIME, "base_reachable_time" },
+ { NET_NEIGH_DELAY_PROBE_TIME, "delay_first_probe_time" },
+ { NET_NEIGH_GC_STALE_TIME, "gc_stale_time" },
+ { NET_NEIGH_UNRES_QLEN, "unres_qlen" },
+ { NET_NEIGH_PROXY_QLEN, "proxy_qlen" },
+ { NET_NEIGH_ANYCAST_DELAY, "anycast_delay" },
+ { NET_NEIGH_PROXY_DELAY, "proxy_delay" },
+ { NET_NEIGH_LOCKTIME, "locktime" },
+ { NET_NEIGH_GC_INTERVAL, "gc_interval" },
+ { NET_NEIGH_GC_THRESH1, "gc_thresh1" },
+ { NET_NEIGH_GC_THRESH2, "gc_thresh2" },
+ { NET_NEIGH_GC_THRESH3, "gc_thresh3" },
+ { NET_NEIGH_RETRANS_TIME_MS, "retrans_time_ms" },
+ { NET_NEIGH_REACHABLE_TIME_MS, "base_reachable_time_ms" },
+ {}
+};
+
+static struct trans_ctl_table trans_net_neigh_table[] = {
+ { NET_PROTO_CONF_DEFAULT, "default", trans_net_neigh_vars_table },
+ { 0, NULL, trans_net_neigh_vars_table },
+ {}
+};
+
+static struct trans_ctl_table trans_net_ipv4_netfilter_table[] = {
+ { NET_IPV4_NF_CONNTRACK_MAX, "ip_conntrack_max" },
+
+ { NET_IPV4_NF_CONNTRACK_TCP_TIMEOUT_SYN_SENT, "ip_conntrack_tcp_timeout_syn_sent" },
+ { NET_IPV4_NF_CONNTRACK_TCP_TIMEOUT_SYN_RECV, "ip_conntrack_tcp_timeout_syn_recv" },
+ { NET_IPV4_NF_CONNTRACK_TCP_TIMEOUT_ESTABLISHED, "ip_conntrack_tcp_timeout_established" },
+ { NET_IPV4_NF_CONNTRACK_TCP_TIMEOUT_FIN_WAIT, "ip_conntrack_tcp_timeout_fin_wait" },
+ { NET_IPV4_NF_CONNTRACK_TCP_TIMEOUT_CLOSE_WAIT, "ip_conntrack_tcp_timeout_close_wait" },
+ { NET_IPV4_NF_CONNTRACK_TCP_TIMEOUT_LAST_ACK, "ip_conntrack_tcp_timeout_last_ack" },
+ { NET_IPV4_NF_CONNTRACK_TCP_TIMEOUT_TIME_WAIT, "ip_conntrack_tcp_timeout_time_wait" },
+ { NET_IPV4_NF_CONNTRACK_TCP_TIMEOUT_CLOSE, "ip_conntrack_tcp_timeout_close" },
+
+ { NET_IPV4_NF_CONNTRACK_UDP_TIMEOUT, "ip_conntrack_udp_timeout" },
+ { NET_IPV4_NF_CONNTRACK_UDP_TIMEOUT_STREAM, "ip_conntrack_udp_timeout_stream" },
+ { NET_IPV4_NF_CONNTRACK_ICMP_TIMEOUT, "ip_conntrack_icmp_timeout" },
+ { NET_IPV4_NF_CONNTRACK_GENERIC_TIMEOUT, "ip_conntrack_generic_timeout" },
+
+ { NET_IPV4_NF_CONNTRACK_BUCKETS, "ip_conntrack_buckets" },
+ { NET_IPV4_NF_CONNTRACK_LOG_INVALID, "ip_conntrack_log_invalid" },
+ { NET_IPV4_NF_CONNTRACK_TCP_TIMEOUT_MAX_RETRANS, "ip_conntrack_tcp_timeout_max_retrans" },
+ { NET_IPV4_NF_CONNTRACK_TCP_LOOSE, "ip_conntrack_tcp_loose" },
+ { NET_IPV4_NF_CONNTRACK_TCP_BE_LIBERAL, "ip_conntrack_tcp_be_liberal" },
+ { NET_IPV4_NF_CONNTRACK_TCP_MAX_RETRANS, "ip_conntrack_tcp_max_retrans" },
+
+ { NET_IPV4_NF_CONNTRACK_SCTP_TIMEOUT_CLOSED, "ip_conntrack_sctp_timeout_closed" },
+ { NET_IPV4_NF_CONNTRACK_SCTP_TIMEOUT_COOKIE_WAIT, "ip_conntrack_sctp_timeout_cookie_wait" },
+ { NET_IPV4_NF_CONNTRACK_SCTP_TIMEOUT_COOKIE_ECHOED, "ip_conntrack_sctp_timeout_cookie_echoed" },
+ { NET_IPV4_NF_CONNTRACK_SCTP_TIMEOUT_ESTABLISHED, "ip_conntrack_sctp_timeout_established" },
+ { NET_IPV4_NF_CONNTRACK_SCTP_TIMEOUT_SHUTDOWN_SENT, "ip_conntrack_sctp_timeout_shutdown_sent" },
+ { NET_IPV4_NF_CONNTRACK_SCTP_TIMEOUT_SHUTDOWN_RECD, "ip_conntrack_sctp_timeout_shutdown_recd" },
+ { NET_IPV4_NF_CONNTRACK_SCTP_TIMEOUT_SHUTDOWN_ACK_SENT, "ip_conntrack_sctp_timeout_shutdown_ack_sent" },
+
+ { NET_IPV4_NF_CONNTRACK_COUNT, "ip_conntrack_count" },
+ { NET_IPV4_NF_CONNTRACK_CHECKSUM, "ip_conntrack_checksum" },
+ {}
+};
+
+static struct trans_ctl_table trans_net_ipv4_table[] = {
+ { NET_IPV4_FORWARD, "ip_forward" },
+ { NET_IPV4_DYNADDR, "ip_dynaddr" },
+
+ { NET_IPV4_CONF, "conf", trans_net_ipv4_conf_table },
+ { NET_IPV4_NEIGH, "neigh", trans_net_neigh_table },
+ { NET_IPV4_ROUTE, "route", trans_net_ipv4_route_table },
+ /* NET_IPV4_FIB_HASH unused */
+ { NET_IPV4_NETFILTER, "netfilter", trans_net_ipv4_netfilter_table },
+ { NET_IPV4_VS, "vs", trans_net_ipv4_vs_table },
+
+ { NET_IPV4_TCP_TIMESTAMPS, "tcp_timestamps" },
+ { NET_IPV4_TCP_WINDOW_SCALING, "tcp_window_scaling" },
+ { NET_IPV4_TCP_SACK, "tcp_sack" },
+ { NET_IPV4_TCP_RETRANS_COLLAPSE, "tcp_retrans_collapse" },
+ { NET_IPV4_DEFAULT_TTL, "ip_default_ttl" },
+ /* NET_IPV4_AUTOCONFIG unused */
+ { NET_IPV4_NO_PMTU_DISC, "ip_no_pmtu_disc" },
+ { NET_IPV4_TCP_SYN_RETRIES, "tcp_syn_retries" },
+ { NET_IPV4_IPFRAG_HIGH_THRESH, "ipfrag_high_thresh" },
+ { NET_IPV4_IPFRAG_LOW_THRESH, "ipfrag_low_thresh" },
+ { NET_IPV4_IPFRAG_TIME, "ipfrag_time" },
+ /* NET_IPV4_TCP_MAX_KA_PROBES unused */
+ { NET_IPV4_TCP_KEEPALIVE_TIME, "tcp_keepalive_time" },
+ { NET_IPV4_TCP_KEEPALIVE_PROBES, "tcp_keepalive_probes" },
+ { NET_IPV4_TCP_RETRIES1, "tcp_retries1" },
+ { NET_IPV4_TCP_RETRIES2, "tcp_retries2" },
+ { NET_IPV4_TCP_FIN_TIMEOUT, "tcp_fin_timeout" },
+ /* NET_IPV4_IP_MASQ_DEBUG unused */
+ { NET_TCP_SYNCOOKIES, "tcp_syncookies" },
+ { NET_TCP_STDURG, "tcp_stdurg" },
+ { NET_TCP_RFC1337, "tcp_rfc1337" },
+ /* NET_TCP_SYN_TAILDROP unused */
+ { NET_TCP_MAX_SYN_BACKLOG, "tcp_max_syn_backlog" },
+ { NET_IPV4_LOCAL_PORT_RANGE, "ip_local_port_range" },
+ { NET_IPV4_ICMP_ECHO_IGNORE_ALL, "icmp_echo_ignore_all" },
+ { NET_IPV4_ICMP_ECHO_IGNORE_BROADCASTS, "icmp_echo_ignore_broadcasts" },
+ /* NET_IPV4_ICMP_SOURCEQUENCH_RATE unused */
+ /* NET_IPV4_ICMP_DESTUNREACH_RATE unused */
+ /* NET_IPV4_ICMP_TIMEEXCEED_RATE unused */
+ /* NET_IPV4_ICMP_PARAMPROB_RATE unused */
+ /* NET_IPV4_ICMP_ECHOREPLY_RATE unused */
+ { NET_IPV4_ICMP_IGNORE_BOGUS_ERROR_RESPONSES, "icmp_ignore_bogus_error_responses" },
+ { NET_IPV4_IGMP_MAX_MEMBERSHIPS, "igmp_max_memberships" },
+ { NET_TCP_TW_RECYCLE, "tcp_tw_recycle" },
+ /* NET_IPV4_ALWAYS_DEFRAG unused */
+ { NET_IPV4_TCP_KEEPALIVE_INTVL, "tcp_keepalive_intvl" },
+ { NET_IPV4_INET_PEER_THRESHOLD, "inet_peer_threshold" },
+ { NET_IPV4_INET_PEER_MINTTL, "inet_peer_minttl" },
+ { NET_IPV4_INET_PEER_MAXTTL, "inet_peer_maxttl" },
+ { NET_IPV4_INET_PEER_GC_MINTIME, "inet_peer_gc_mintime" },
+ { NET_IPV4_INET_PEER_GC_MAXTIME, "inet_peer_gc_maxtime" },
+ { NET_TCP_ORPHAN_RETRIES, "tcp_orphan_retries" },
+ { NET_TCP_ABORT_ON_OVERFLOW, "tcp_abort_on_overflow" },
+ { NET_TCP_SYNACK_RETRIES, "tcp_synack_retries" },
+ { NET_TCP_MAX_ORPHANS, "tcp_max_orphans" },
+ { NET_TCP_MAX_TW_BUCKETS, "tcp_max_tw_buckets" },
+ { NET_TCP_FACK, "tcp_fack" },
+ { NET_TCP_REORDERING, "tcp_reordering" },
+ { NET_TCP_ECN, "tcp_ecn" },
+ { NET_TCP_DSACK, "tcp_dsack" },
+ { NET_TCP_MEM, "tcp_mem" },
+ { NET_TCP_WMEM, "tcp_wmem" },
+ { NET_TCP_RMEM, "tcp_rmem" },
+ { NET_TCP_APP_WIN, "tcp_app_win" },
+ { NET_TCP_ADV_WIN_SCALE, "tcp_adv_win_scale" },
+ { NET_IPV4_NONLOCAL_BIND, "ip_nonlocal_bind" },
+ { NET_IPV4_ICMP_RATELIMIT, "icmp_ratelimit" },
+ { NET_IPV4_ICMP_RATEMASK, "icmp_ratemask" },
+ { NET_TCP_TW_REUSE, "tcp_tw_reuse" },
+ { NET_TCP_FRTO, "tcp_frto" },
+ { NET_TCP_LOW_LATENCY, "tcp_low_latency" },
+ { NET_IPV4_IPFRAG_SECRET_INTERVAL, "ipfrag_secret_interval" },
+ { NET_IPV4_IGMP_MAX_MSF, "igmp_max_msf" },
+ { NET_TCP_NO_METRICS_SAVE, "tcp_no_metrics_save" },
+ /* NET_TCP_DEFAULT_WIN_SCALE unused */
+ { NET_TCP_MODERATE_RCVBUF, "tcp_moderate_rcvbuf" },
+ { NET_TCP_TSO_WIN_DIVISOR, "tcp_tso_win_divisor" },
+ /* NET_TCP_BIC_BETA unused */
+ { NET_IPV4_ICMP_ERRORS_USE_INBOUND_IFADDR, "icmp_errors_use_inbound_ifaddr" },
+ { NET_TCP_CONG_CONTROL, "tcp_congestion_control" },
+ { NET_TCP_ABC, "tcp_abc" },
+ { NET_IPV4_IPFRAG_MAX_DIST, "ipfrag_max_dist" },
+ { NET_TCP_MTU_PROBING, "tcp_mtu_probing" },
+ { NET_TCP_BASE_MSS, "tcp_base_mss" },
+ { NET_IPV4_TCP_WORKAROUND_SIGNED_WINDOWS, "tcp_workaround_signed_windows" },
+ { NET_TCP_DMA_COPYBREAK, "tcp_dma_copybreak" },
+ { NET_TCP_SLOW_START_AFTER_IDLE, "tcp_slow_start_after_idle" },
+ { NET_CIPSOV4_CACHE_ENABLE, "cipso_cache_enable" },
+ { NET_CIPSOV4_CACHE_BUCKET_SIZE, "cipso_cache_bucket_size" },
+ { NET_CIPSOV4_RBM_OPTFMT, "cipso_rbm_optfmt" },
+ { NET_CIPSOV4_RBM_STRICTVALID, "cipso_rbm_strictvalid" },
+ { NET_TCP_AVAIL_CONG_CONTROL, "tcp_available_congestion_control" },
+ { NET_TCP_ALLOWED_CONG_CONTROL, "tcp_allowed_congestion_control" },
+ { NET_TCP_MAX_SSTHRESH, "tcp_max_ssthresh" },
+ { NET_TCP_FRTO_RESPONSE, "tcp_frto_response" },
+ { 2088 /* NET_IPQ_QMAX */, "ip_queue_maxlen" },
+ {}
+};
+
+static struct trans_ctl_table trans_net_ipx_table[] = {
+ { NET_IPX_PPROP_BROADCASTING, "ipx_pprop_broadcasting" },
+ /* NET_IPX_FORWARDING unused */
+ {}
+};
+
+static struct trans_ctl_table trans_net_atalk_table[] = {
+ { NET_ATALK_AARP_EXPIRY_TIME, "aarp-expiry-time" },
+ { NET_ATALK_AARP_TICK_TIME, "aarp-tick-time" },
+ { NET_ATALK_AARP_RETRANSMIT_LIMIT, "aarp-retransmit-limit" },
+ { NET_ATALK_AARP_RESOLVE_TIME, "aarp-resolve-time" },
+ {},
+};
+
+static struct trans_ctl_table trans_net_netrom_table[] = {
+ { NET_NETROM_DEFAULT_PATH_QUALITY, "default_path_quality" },
+ { NET_NETROM_OBSOLESCENCE_COUNT_INITIALISER, "obsolescence_count_initialiser" },
+ { NET_NETROM_NETWORK_TTL_INITIALISER, "network_ttl_initialiser" },
+ { NET_NETROM_TRANSPORT_TIMEOUT, "transport_timeout" },
+ { NET_NETROM_TRANSPORT_MAXIMUM_TRIES, "transport_maximum_tries" },
+ { NET_NETROM_TRANSPORT_ACKNOWLEDGE_DELAY, "transport_acknowledge_delay" },
+ { NET_NETROM_TRANSPORT_BUSY_DELAY, "transport_busy_delay" },
+ { NET_NETROM_TRANSPORT_REQUESTED_WINDOW_SIZE, "transport_requested_window_size" },
+ { NET_NETROM_TRANSPORT_NO_ACTIVITY_TIMEOUT, "transport_no_activity_timeout" },
+ { NET_NETROM_ROUTING_CONTROL, "routing_control" },
+ { NET_NETROM_LINK_FAILS_COUNT, "link_fails_count" },
+ { NET_NETROM_RESET, "reset" },
+ {}
+};
+
+static struct trans_ctl_table trans_net_ax25_table[] = {
+ { NET_AX25_IP_DEFAULT_MODE, "ip_default_mode" },
+ { NET_AX25_DEFAULT_MODE, "ax25_default_mode" },
+ { NET_AX25_BACKOFF_TYPE, "backoff_type" },
+ { NET_AX25_CONNECT_MODE, "connect_mode" },
+ { NET_AX25_STANDARD_WINDOW, "standard_window_size" },
+ { NET_AX25_EXTENDED_WINDOW, "extended_window_size" },
+ { NET_AX25_T1_TIMEOUT, "t1_timeout" },
+ { NET_AX25_T2_TIMEOUT, "t2_timeout" },
+ { NET_AX25_T3_TIMEOUT, "t3_timeout" },
+ { NET_AX25_IDLE_TIMEOUT, "idle_timeout" },
+ { NET_AX25_N2, "maximum_retry_count" },
+ { NET_AX25_PACLEN, "maximum_packet_length" },
+ { NET_AX25_PROTOCOL, "protocol" },
+ { NET_AX25_DAMA_SLAVE_TIMEOUT, "dama_slave_timeout" },
+ {}
+};
+
+static struct trans_ctl_table trans_net_bridge_table[] = {
+ { NET_BRIDGE_NF_CALL_ARPTABLES, "bridge-nf-call-arptables" },
+ { NET_BRIDGE_NF_CALL_IPTABLES, "bridge-nf-call-iptables" },
+ { NET_BRIDGE_NF_CALL_IP6TABLES, "bridge-nf-call-ip6tables" },
+ { NET_BRIDGE_NF_FILTER_VLAN_TAGGED, "bridge-nf-filter-vlan-tagged" },
+ { NET_BRIDGE_NF_FILTER_PPPOE_TAGGED, "bridge-nf-filter-pppoe-tagged" },
+ {}
+};
+
+static struct trans_ctl_table trans_net_rose_table[] = {
+ { NET_ROSE_RESTART_REQUEST_TIMEOUT, "restart_request_timeout" },
+ { NET_ROSE_CALL_REQUEST_TIMEOUT, "call_request_timeout" },
+ { NET_ROSE_RESET_REQUEST_TIMEOUT, "reset_request_timeout" },
+ { NET_ROSE_CLEAR_REQUEST_TIMEOUT, "clear_request_timeout" },
+ { NET_ROSE_ACK_HOLD_BACK_TIMEOUT, "acknowledge_hold_back_timeout" },
+ { NET_ROSE_ROUTING_CONTROL, "routing_control" },
+ { NET_ROSE_LINK_FAIL_TIMEOUT, "link_fail_timeout" },
+ { NET_ROSE_MAX_VCS, "maximum_virtual_circuits" },
+ { NET_ROSE_WINDOW_SIZE, "window_size" },
+ { NET_ROSE_NO_ACTIVITY_TIMEOUT, "no_activity_timeout" },
+ {}
+};
+
+static struct trans_ctl_table trans_net_ipv6_conf_var_table[] = {
+ { NET_IPV6_FORWARDING, "forwarding" },
+ { NET_IPV6_HOP_LIMIT, "hop_limit" },
+ { NET_IPV6_MTU, "mtu" },
+ { NET_IPV6_ACCEPT_RA, "accept_ra" },
+ { NET_IPV6_ACCEPT_REDIRECTS, "accept_redirects" },
+ { NET_IPV6_AUTOCONF, "autoconf" },
+ { NET_IPV6_DAD_TRANSMITS, "dad_transmits" },
+ { NET_IPV6_RTR_SOLICITS, "router_solicitations" },
+ { NET_IPV6_RTR_SOLICIT_INTERVAL, "router_solicitation_interval" },
+ { NET_IPV6_RTR_SOLICIT_DELAY, "router_solicitation_delay" },
+ { NET_IPV6_USE_TEMPADDR, "use_tempaddr" },
+ { NET_IPV6_TEMP_VALID_LFT, "temp_valid_lft" },
+ { NET_IPV6_TEMP_PREFERED_LFT, "temp_prefered_lft" },
+ { NET_IPV6_REGEN_MAX_RETRY, "regen_max_retry" },
+ { NET_IPV6_MAX_DESYNC_FACTOR, "max_desync_factor" },
+ { NET_IPV6_MAX_ADDRESSES, "max_addresses" },
+ { NET_IPV6_FORCE_MLD_VERSION, "force_mld_version" },
+ { NET_IPV6_ACCEPT_RA_DEFRTR, "accept_ra_defrtr" },
+ { NET_IPV6_ACCEPT_RA_PINFO, "accept_ra_pinfo" },
+ { NET_IPV6_ACCEPT_RA_RTR_PREF, "accept_ra_rtr_pref" },
+ { NET_IPV6_RTR_PROBE_INTERVAL, "router_probe_interval" },
+ { NET_IPV6_ACCEPT_RA_RT_INFO_MAX_PLEN, "accept_ra_rt_info_max_plen" },
+ { NET_IPV6_PROXY_NDP, "proxy_ndp" },
+ { NET_IPV6_ACCEPT_SOURCE_ROUTE, "accept_source_route" },
+ {}
+};
+
+static struct trans_ctl_table trans_net_ipv6_conf_table[] = {
+ { NET_PROTO_CONF_ALL, "all", trans_net_ipv6_conf_var_table },
+ { NET_PROTO_CONF_DEFAULT, "default", trans_net_ipv6_conf_var_table },
+ { 0, NULL, trans_net_ipv6_conf_var_table },
+ {}
+};
+
+static struct trans_ctl_table trans_net_ipv6_route_table[] = {
+ { NET_IPV6_ROUTE_FLUSH, "flush" },
+ { NET_IPV6_ROUTE_GC_THRESH, "gc_thresh" },
+ { NET_IPV6_ROUTE_MAX_SIZE, "max_size" },
+ { NET_IPV6_ROUTE_GC_MIN_INTERVAL, "gc_min_interval" },
+ { NET_IPV6_ROUTE_GC_TIMEOUT, "gc_timeout" },
+ { NET_IPV6_ROUTE_GC_INTERVAL, "gc_interval" },
+ { NET_IPV6_ROUTE_GC_ELASTICITY, "gc_elasticity" },
+ { NET_IPV6_ROUTE_MTU_EXPIRES, "mtu_expires" },
+ { NET_IPV6_ROUTE_MIN_ADVMSS, "min_adv_mss" },
+ { NET_IPV6_ROUTE_GC_MIN_INTERVAL_MS, "gc_min_interval_ms" },
+ {}
+};
+
+static struct trans_ctl_table trans_net_ipv6_icmp_table[] = {
+ { NET_IPV6_ICMP_RATELIMIT, "ratelimit" },
+ {}
+};
+
+static struct trans_ctl_table trans_net_ipv6_table[] = {
+ { NET_IPV6_CONF, "conf", trans_net_ipv6_conf_table },
+ { NET_IPV6_NEIGH, "neigh", trans_net_neigh_table },
+ { NET_IPV6_ROUTE, "route", trans_net_ipv6_route_table },
+ { NET_IPV6_ICMP, "icmp", trans_net_ipv6_icmp_table },
+ { NET_IPV6_BINDV6ONLY, "bindv6only" },
+ { NET_IPV6_IP6FRAG_HIGH_THRESH, "ip6frag_high_thresh" },
+ { NET_IPV6_IP6FRAG_LOW_THRESH, "ip6frag_low_thresh" },
+ { NET_IPV6_IP6FRAG_TIME, "ip6frag_time" },
+ { NET_IPV6_IP6FRAG_SECRET_INTERVAL, "ip6frag_secret_interval" },
+ { NET_IPV6_MLD_MAX_MSF, "mld_max_msf" },
+ { 2088 /* IPQ_QMAX */, "ip6_queue_maxlen" },
+ {}
+};
+
+static struct trans_ctl_table trans_net_x25_table[] = {
+ { NET_X25_RESTART_REQUEST_TIMEOUT, "restart_request_timeout" },
+ { NET_X25_CALL_REQUEST_TIMEOUT, "call_request_timeout" },
+ { NET_X25_RESET_REQUEST_TIMEOUT, "reset_request_timeout" },
+ { NET_X25_CLEAR_REQUEST_TIMEOUT, "clear_request_timeout" },
+ { NET_X25_ACK_HOLD_BACK_TIMEOUT, "acknowledgement_hold_back_timeout" },
+ { NET_X25_FORWARD, "x25_forward" },
+ {}
+};
+
+static struct trans_ctl_table trans_net_tr_table[] = {
+ { NET_TR_RIF_TIMEOUT, "rif_timeout" },
+ {}
+};
+
+
+static struct trans_ctl_table trans_net_decnet_conf_vars[] = {
+ { NET_DECNET_CONF_DEV_FORWARDING, "forwarding" },
+ { NET_DECNET_CONF_DEV_PRIORITY, "priority" },
+ { NET_DECNET_CONF_DEV_T2, "t2" },
+ { NET_DECNET_CONF_DEV_T3, "t3" },
+ {}
+};
+
+static struct trans_ctl_table trans_net_decnet_conf[] = {
+ { 0, NULL, trans_net_decnet_conf_vars },
+ {}
+};
+
+static struct trans_ctl_table trans_net_decnet_table[] = {
+ { NET_DECNET_CONF, "conf", trans_net_decnet_conf },
+ { NET_DECNET_NODE_ADDRESS, "node_address" },
+ { NET_DECNET_NODE_NAME, "node_name" },
+ { NET_DECNET_DEFAULT_DEVICE, "default_device" },
+ { NET_DECNET_TIME_WAIT, "time_wait" },
+ { NET_DECNET_DN_COUNT, "dn_count" },
+ { NET_DECNET_DI_COUNT, "di_count" },
+ { NET_DECNET_DR_COUNT, "dr_count" },
+ { NET_DECNET_DST_GC_INTERVAL, "dst_gc_interval" },
+ { NET_DECNET_NO_FC_MAX_CWND, "no_fc_max_cwnd" },
+ { NET_DECNET_MEM, "decnet_mem" },
+ { NET_DECNET_RMEM, "decnet_rmem" },
+ { NET_DECNET_WMEM, "decnet_wmem" },
+ { NET_DECNET_DEBUG_LEVEL, "debug" },
+ {}
+};
+
+static struct trans_ctl_table trans_net_sctp_table[] = {
+ { NET_SCTP_RTO_INITIAL, "rto_initial" },
+ { NET_SCTP_RTO_MIN, "rto_min" },
+ { NET_SCTP_RTO_MAX, "rto_max" },
+ { NET_SCTP_RTO_ALPHA, "rto_alpha_exp_divisor" },
+ { NET_SCTP_RTO_BETA, "rto_beta_exp_divisor" },
+ { NET_SCTP_VALID_COOKIE_LIFE, "valid_cookie_life" },
+ { NET_SCTP_ASSOCIATION_MAX_RETRANS, "association_max_retrans" },
+ { NET_SCTP_PATH_MAX_RETRANS, "path_max_retrans" },
+ { NET_SCTP_MAX_INIT_RETRANSMITS, "max_init_retransmits" },
+ { NET_SCTP_HB_INTERVAL, "hb_interval" },
+ { NET_SCTP_PRESERVE_ENABLE, "cookie_preserve_enable" },
+ { NET_SCTP_MAX_BURST, "max_burst" },
+ { NET_SCTP_ADDIP_ENABLE, "addip_enable" },
+ { NET_SCTP_PRSCTP_ENABLE, "prsctp_enable" },
+ { NET_SCTP_SNDBUF_POLICY, "sndbuf_policy" },
+ { NET_SCTP_SACK_TIMEOUT, "sack_timeout" },
+ { NET_SCTP_RCVBUF_POLICY, "rcvbuf_policy" },
+ {}
+};
+
+static struct trans_ctl_table trans_net_llc_llc2_timeout_table[] = {
+ { NET_LLC2_ACK_TIMEOUT, "ack" },
+ { NET_LLC2_P_TIMEOUT, "p" },
+ { NET_LLC2_REJ_TIMEOUT, "rej" },
+ { NET_LLC2_BUSY_TIMEOUT, "busy" },
+ {}
+};
+
+static struct trans_ctl_table trans_net_llc_station_table[] = {
+ { NET_LLC_STATION_ACK_TIMEOUT, "ack_timeout" },
+ {}
+};
+
+static struct trans_ctl_table trans_net_llc_llc2_table[] = {
+ { NET_LLC2, "timeout", trans_net_llc_llc2_timeout_table },
+ {}
+};
+
+static struct trans_ctl_table trans_net_llc_table[] = {
+ { NET_LLC2, "llc2", trans_net_llc_llc2_table },
+ { NET_LLC_STATION, "station", trans_net_llc_station_table },
+ {}
+};
+
+static struct trans_ctl_table trans_net_netfilter_table[] = {
+ { NET_NF_CONNTRACK_MAX, "nf_conntrack_max" },
+ { NET_NF_CONNTRACK_TCP_TIMEOUT_SYN_SENT, "nf_conntrack_tcp_timeout_syn_sent" },
+ { NET_NF_CONNTRACK_TCP_TIMEOUT_SYN_RECV, "nf_conntrack_tcp_timeout_syn_recv" },
+ { NET_NF_CONNTRACK_TCP_TIMEOUT_ESTABLISHED, "nf_conntrack_tcp_timeout_established" },
+ { NET_NF_CONNTRACK_TCP_TIMEOUT_FIN_WAIT, "nf_conntrack_tcp_timeout_fin_wait" },
+ { NET_NF_CONNTRACK_TCP_TIMEOUT_CLOSE_WAIT, "nf_conntrack_tcp_timeout_close_wait" },
+ { NET_NF_CONNTRACK_TCP_TIMEOUT_LAST_ACK, "nf_conntrack_tcp_timeout_last_ack" },
+ { NET_NF_CONNTRACK_TCP_TIMEOUT_TIME_WAIT, "nf_conntrack_tcp_timeout_time_wait" },
+ { NET_NF_CONNTRACK_TCP_TIMEOUT_CLOSE, "nf_conntrack_tcp_timeout_close" },
+ { NET_NF_CONNTRACK_UDP_TIMEOUT, "nf_conntrack_udp_timeout" },
+ { NET_NF_CONNTRACK_UDP_TIMEOUT_STREAM, "nf_conntrack_udp_timeout_stream" },
+ { NET_NF_CONNTRACK_ICMP_TIMEOUT, "nf_conntrack_icmp_timeout" },
+ { NET_NF_CONNTRACK_GENERIC_TIMEOUT, "nf_conntrack_generic_timeout" },
+ { NET_NF_CONNTRACK_BUCKETS, "nf_conntrack_buckets" },
+ { NET_NF_CONNTRACK_LOG_INVALID, "nf_conntrack_log_invalid" },
+ { NET_NF_CONNTRACK_TCP_TIMEOUT_MAX_RETRANS, "nf_conntrack_tcp_timeout_max_retrans" },
+ { NET_NF_CONNTRACK_TCP_LOOSE, "nf_conntrack_tcp_loose" },
+ { NET_NF_CONNTRACK_TCP_BE_LIBERAL, "nf_conntrack_tcp_be_liberal" },
+ { NET_NF_CONNTRACK_TCP_MAX_RETRANS, "nf_conntrack_tcp_max_retrans" },
+ { NET_NF_CONNTRACK_SCTP_TIMEOUT_CLOSED, "nf_conntrack_sctp_timeout_closed" },
+ { NET_NF_CONNTRACK_SCTP_TIMEOUT_COOKIE_WAIT, "nf_conntrack_sctp_timeout_cookie_wait" },
+ { NET_NF_CONNTRACK_SCTP_TIMEOUT_COOKIE_ECHOED, "nf_conntrack_sctp_timeout_cookie_echoed" },
+ { NET_NF_CONNTRACK_SCTP_TIMEOUT_ESTABLISHED, "nf_conntrack_sctp_timeout_established" },
+ { NET_NF_CONNTRACK_SCTP_TIMEOUT_SHUTDOWN_SENT, "nf_conntrack_sctp_timeout_shutdown_sent" },
+ { NET_NF_CONNTRACK_SCTP_TIMEOUT_SHUTDOWN_RECD, "nf_conntrack_sctp_timeout_shutdown_recd" },
+ { NET_NF_CONNTRACK_SCTP_TIMEOUT_SHUTDOWN_ACK_SENT, "nf_conntrack_sctp_timeout_shutdown_ack_sent" },
+ { NET_NF_CONNTRACK_COUNT, "nf_conntrack_count" },
+ { NET_NF_CONNTRACK_ICMPV6_TIMEOUT, "nf_conntrack_icmpv6_timeout" },
+ { NET_NF_CONNTRACK_FRAG6_TIMEOUT, "nf_conntrack_frag6_timeout" },
+ { NET_NF_CONNTRACK_FRAG6_LOW_THRESH, "nf_conntrack_frag6_low_thresh" },
+ { NET_NF_CONNTRACK_FRAG6_HIGH_THRESH, "nf_conntrack_frag6_high_thresh" },
+ { NET_NF_CONNTRACK_CHECKSUM, "nf_conntrack_checksum" },
+
+ {}
+};
+
+static struct trans_ctl_table trans_net_dccp_table[] = {
+ { NET_DCCP_DEFAULT, "default" },
+ {}
+};
+
+static struct trans_ctl_table trans_net_irda_table[] = {
+ { NET_IRDA_DISCOVERY, "discovery" },
+ { NET_IRDA_DEVNAME, "devname" },
+ { NET_IRDA_DEBUG, "debug" },
+ { NET_IRDA_FAST_POLL, "fast_poll_increase" },
+ { NET_IRDA_DISCOVERY_SLOTS, "discovery_slots" },
+ { NET_IRDA_DISCOVERY_TIMEOUT, "discovery_timeout" },
+ { NET_IRDA_SLOT_TIMEOUT, "slot_timeout" },
+ { NET_IRDA_MAX_BAUD_RATE, "max_baud_rate" },
+ { NET_IRDA_MIN_TX_TURN_TIME, "min_tx_turn_time" },
+ { NET_IRDA_MAX_TX_DATA_SIZE, "max_tx_data_size" },
+ { NET_IRDA_MAX_TX_WINDOW, "max_tx_window" },
+ { NET_IRDA_MAX_NOREPLY_TIME, "max_noreply_time" },
+ { NET_IRDA_WARN_NOREPLY_TIME, "warn_noreply_time" },
+ { NET_IRDA_LAP_KEEPALIVE_TIME, "lap_keepalive_time" },
+ {}
+};
+
+static struct trans_ctl_table trans_net_table[] = {
+ { NET_CORE, "core", trans_net_core_table },
+ /* NET_ETHER not used */
+ /* NET_802 not used */
+ { NET_UNIX, "unix", trans_net_unix_table },
+ { NET_IPV4, "ipv4", trans_net_ipv4_table },
+ { NET_IPX, "ipx", trans_net_ipx_table },
+ { NET_ATALK, "atalk", trans_net_atalk_table },
+ { NET_NETROM, "netrom", trans_net_netrom_table },
+ { NET_AX25, "ax25", trans_net_ax25_table },
+ { NET_BRIDGE, "bridge", trans_net_bridge_table },
+ { NET_ROSE, "rose", trans_net_rose_table },
+ { NET_IPV6, "ipv6", trans_net_ipv6_table },
+ { NET_X25, "x25", trans_net_x25_table },
+ { NET_TR, "tr", trans_net_tr_table },
+ { NET_DECNET, "decnet", trans_net_decnet_table },
+ /* NET_ECONET not used */
+ { NET_SCTP, "sctp", trans_net_sctp_table },
+ { NET_LLC, "llc", trans_net_llc_table },
+ { NET_NETFILTER, "netfilter", trans_net_netfilter_table },
+ { NET_DCCP, "dccp", trans_net_dccp_table },
+ { NET_IRDA, "irda", trans_net_irda_table },
+ { 2089, "nf_conntrack_max" },
+ {}
+};
+
+static struct trans_ctl_table trans_fs_quota_table[] = {
+ { FS_DQ_LOOKUPS, "lookups" },
+ { FS_DQ_DROPS, "drops" },
+ { FS_DQ_READS, "reads" },
+ { FS_DQ_WRITES, "writes" },
+ { FS_DQ_CACHE_HITS, "cache_hits" },
+ { FS_DQ_ALLOCATED, "allocated_dquots" },
+ { FS_DQ_FREE, "free_dquots" },
+ { FS_DQ_SYNCS, "syncs" },
+ { FS_DQ_WARNINGS, "warnings" },
+ {}
+};
+
+static struct trans_ctl_table trans_fs_xfs_table[] = {
+ { XFS_RESTRICT_CHOWN, "restrict_chown" },
+ { XFS_SGID_INHERIT, "irix_sgid_inherit" },
+ { XFS_SYMLINK_MODE, "irix_symlink_mode" },
+ { XFS_PANIC_MASK, "panic_mask" },
+
+ { XFS_ERRLEVEL, "error_level" },
+ { XFS_SYNCD_TIMER, "xfssyncd_centisecs" },
+ { XFS_INHERIT_SYNC, "inherit_sync" },
+ { XFS_INHERIT_NODUMP, "inherit_nodump" },
+ { XFS_INHERIT_NOATIME, "inherit_noatime" },
+ { XFS_BUF_TIMER, "xfsbufd_centisecs" },
+ { XFS_BUF_AGE, "age_buffer_centisecs" },
+ { XFS_INHERIT_NOSYM, "inherit_nosymlinks" },
+ { XFS_ROTORSTEP, "rotorstep" },
+ { XFS_INHERIT_NODFRG, "inherit_nodefrag" },
+ { XFS_FILESTREAM_TIMER, "filestream_centisecs" },
+ { XFS_STATS_CLEAR, "stats_clear" },
+ {}
+};
+
+static struct trans_ctl_table trans_fs_ocfs2_nm_table[] = {
+ { 1, "hb_ctl_path" },
+ {}
+};
+
+static struct trans_ctl_table trans_fs_ocfs2_table[] = {
+ { 1, "nm", trans_fs_ocfs2_nm_table },
+ {}
+};
+
+static struct trans_ctl_table trans_inotify_table[] = {
+ { INOTIFY_MAX_USER_INSTANCES, "max_user_instances" },
+ { INOTIFY_MAX_USER_WATCHES, "max_user_watches" },
+ { INOTIFY_MAX_QUEUED_EVENTS, "max_queued_events" },
+ {}
+};
+
+static struct trans_ctl_table trans_fs_table[] = {
+ { FS_NRINODE, "inode-nr" },
+ { FS_STATINODE, "inode-state" },
+ /* FS_MAXINODE unused */
+ /* FS_NRDQUOT unused */
+ /* FS_MAXDQUOT unused */
+ { FS_NRFILE, "file-nr" },
+ { FS_MAXFILE, "file-max" },
+ { FS_DENTRY, "dentry-state" },
+ /* FS_NRSUPER unused */
+ /* FS_MAXUPSER unused */
+ { FS_OVERFLOWUID, "overflowuid" },
+ { FS_OVERFLOWGID, "overflowgid" },
+ { FS_LEASES, "leases-enable" },
+ { FS_DIR_NOTIFY, "dir-notify-enable" },
+ { FS_LEASE_TIME, "lease-break-time" },
+ { FS_DQSTATS, "quota", trans_fs_quota_table },
+ { FS_XFS, "xfs", trans_fs_xfs_table },
+ { FS_AIO_NR, "aio-nr" },
+ { FS_AIO_MAX_NR, "aio-max-nr" },
+ { FS_INOTIFY, "inotify", trans_inotify_table },
+ { FS_OCFS2, "ocfs2", trans_fs_ocfs2_table },
+ { KERN_SETUID_DUMPABLE, "suid_dumpable" },
+ {}
+};
+
+static struct trans_ctl_table trans_debug_table[] = {
+ {}
+};
+
+static struct trans_ctl_table trans_cdrom_table[] = {
+ { DEV_CDROM_INFO, "info" },
+ { DEV_CDROM_AUTOCLOSE, "autoclose" },
+ { DEV_CDROM_AUTOEJECT, "autoeject" },
+ { DEV_CDROM_DEBUG, "debug" },
+ { DEV_CDROM_LOCK, "lock" },
+ { DEV_CDROM_CHECK_MEDIA, "check_media" },
+ {}
+};
+
+static struct trans_ctl_table trans_ipmi_table[] = {
+ { DEV_IPMI_POWEROFF_POWERCYCLE, "poweroff_powercycle" },
+ {}
+};
+
+static struct trans_ctl_table trans_mac_hid_files[] = {
+ /* DEV_MAC_HID_KEYBOARD_SENDS_LINUX_KEYCODES unused */
+ /* DEV_MAC_HID_KEYBOARD_LOCK_KEYCODES unused */
+ { DEV_MAC_HID_MOUSE_BUTTON_EMULATION, "mouse_button_emulation" },
+ { DEV_MAC_HID_MOUSE_BUTTON2_KEYCODE, "mouse_button2_keycode" },
+ { DEV_MAC_HID_MOUSE_BUTTON3_KEYCODE, "mouse_button3_keycode" },
+ /* DEV_MAC_HID_ADB_MOUSE_SENDS_KEYCODES unused */
+ {}
+};
+
+static struct trans_ctl_table trans_raid_table[] = {
+ { DEV_RAID_SPEED_LIMIT_MIN, "speed_limit_min" },
+ { DEV_RAID_SPEED_LIMIT_MAX, "speed_limit_max" },
+ {}
+};
+
+static struct trans_ctl_table trans_scsi_table[] = {
+ { DEV_SCSI_LOGGING_LEVEL, "logging_level" },
+ {}
+};
+
+static struct trans_ctl_table trans_parport_default_table[] = {
+ { DEV_PARPORT_DEFAULT_TIMESLICE, "timeslice" },
+ { DEV_PARPORT_DEFAULT_SPINTIME, "spintime" },
+ {}
+};
+
+static struct trans_ctl_table trans_parport_device_table[] = {
+ { DEV_PARPORT_DEVICE_TIMESLICE, "timeslice" },
+ {}
+};
+
+static struct trans_ctl_table trans_parport_devices_table[] = {
+ { DEV_PARPORT_DEVICES_ACTIVE, "active" },
+ { 0, NULL, trans_parport_device_table },
+ {}
+};
+
+static struct trans_ctl_table trans_parport_parport_table[] = {
+ { DEV_PARPORT_SPINTIME, "spintime" },
+ { DEV_PARPORT_BASE_ADDR, "base-addr" },
+ { DEV_PARPORT_IRQ, "irq" },
+ { DEV_PARPORT_DMA, "dma" },
+ { DEV_PARPORT_MODES, "modes" },
+ { DEV_PARPORT_DEVICES, "devices", trans_parport_devices_table },
+ { DEV_PARPORT_AUTOPROBE, "autoprobe" },
+ { DEV_PARPORT_AUTOPROBE + 1, "autoprobe0" },
+ { DEV_PARPORT_AUTOPROBE + 2, "autoprobe1" },
+ { DEV_PARPORT_AUTOPROBE + 3, "autoprobe2" },
+ { DEV_PARPORT_AUTOPROBE + 4, "autoprobe3" },
+ {}
+};
+static struct trans_ctl_table trans_parport_table[] = {
+ { DEV_PARPORT_DEFAULT, "default", trans_parport_default_table },
+ { 0, NULL, trans_parport_parport_table },
+ {}
+};
+
+static struct trans_ctl_table trans_dev_table[] = {
+ { DEV_CDROM, "cdrom", trans_cdrom_table },
+ /* DEV_HWMON unused */
+ { DEV_PARPORT, "parport", trans_parport_table },
+ { DEV_RAID, "raid", trans_raid_table },
+ { DEV_MAC_HID, "mac_hid", trans_mac_hid_files },
+ { DEV_SCSI, "scsi", trans_scsi_table },
+ { DEV_IPMI, "ipmi", trans_ipmi_table },
+ {}
+};
+
+static struct trans_ctl_table trans_bus_isa_table[] = {
+ { BUS_ISA_MEM_BASE, "membase" },
+ { BUS_ISA_PORT_BASE, "portbase" },
+ { BUS_ISA_PORT_SHIFT, "portshift" },
+ {}
+};
+
+static struct trans_ctl_table trans_bus_table[] = {
+ { CTL_BUS_ISA, "isa", trans_bus_isa_table },
+ {}
+};
+
+static struct trans_ctl_table trans_arlan_conf_table0[] = {
+ { 1, "spreadingCode" },
+ { 2, "channelNumber" },
+ { 3, "scramblingDisable" },
+ { 4, "txAttenuation" },
+ { 5, "systemId" },
+ { 6, "maxDatagramSize" },
+ { 7, "maxFrameSize" },
+ { 8, "maxRetries" },
+ { 9, "receiveMode" },
+ { 10, "priority" },
+ { 11, "rootOrRepeater" },
+ { 12, "SID" },
+ { 13, "registrationMode" },
+ { 14, "registrationFill" },
+ { 15, "localTalkAddress" },
+ { 16, "codeFormat" },
+ { 17, "numChannels" },
+ { 18, "channel1" },
+ { 19, "channel2" },
+ { 20, "channel3" },
+ { 21, "channel4" },
+ { 22, "txClear" },
+ { 23, "txRetries" },
+ { 24, "txRouting" },
+ { 25, "txScrambled" },
+ { 26, "rxParameter" },
+ { 27, "txTimeoutMs" },
+ { 28, "waitCardTimeout" },
+ { 29, "channelSet" },
+ { 30, "name" },
+ { 31, "waitTime" },
+ { 32, "lParameter" },
+ { 33, "_15" },
+ { 34, "headerSize" },
+ { 36, "tx_delay_ms" },
+ { 37, "retries" },
+ { 38, "ReTransmitPacketMaxSize" },
+ { 39, "waitReTransmitPacketMaxSize" },
+ { 40, "fastReTransCount" },
+ { 41, "driverRetransmissions" },
+ { 42, "txAckTimeoutMs" },
+ { 43, "registrationInterrupts" },
+ { 44, "hardwareType" },
+ { 45, "radioType" },
+ { 46, "writeEEPROM" },
+ { 47, "writeRadioType" },
+ { 48, "entry_exit_debug" },
+ { 49, "debug" },
+ { 50, "in_speed" },
+ { 51, "out_speed" },
+ { 52, "in_speed10" },
+ { 53, "out_speed10" },
+ { 54, "in_speed_max" },
+ { 55, "out_speed_max" },
+ { 56, "measure_rate" },
+ { 57, "pre_Command_Wait" },
+ { 58, "rx_tweak1" },
+ { 59, "rx_tweak2" },
+ { 60, "tx_queue_len" },
+
+ { 150, "arlan0-txRing" },
+ { 151, "arlan0-rxRing" },
+ { 152, "arlan0-18" },
+ { 153, "arlan0-ring" },
+ { 154, "arlan0-shm-cpy" },
+ { 155, "config0" },
+ { 156, "reset0" },
+ {}
+};
+
+static struct trans_ctl_table trans_arlan_conf_table1[] = {
+ { 1, "spreadingCode" },
+ { 2, "channelNumber" },
+ { 3, "scramblingDisable" },
+ { 4, "txAttenuation" },
+ { 5, "systemId" },
+ { 6, "maxDatagramSize" },
+ { 7, "maxFrameSize" },
+ { 8, "maxRetries" },
+ { 9, "receiveMode" },
+ { 10, "priority" },
+ { 11, "rootOrRepeater" },
+ { 12, "SID" },
+ { 13, "registrationMode" },
+ { 14, "registrationFill" },
+ { 15, "localTalkAddress" },
+ { 16, "codeFormat" },
+ { 17, "numChannels" },
+ { 18, "channel1" },
+ { 19, "channel2" },
+ { 20, "channel3" },
+ { 21, "channel4" },
+ { 22, "txClear" },
+ { 23, "txRetries" },
+ { 24, "txRouting" },
+ { 25, "txScrambled" },
+ { 26, "rxParameter" },
+ { 27, "txTimeoutMs" },
+ { 28, "waitCardTimeout" },
+ { 29, "channelSet" },
+ { 30, "name" },
+ { 31, "waitTime" },
+ { 32, "lParameter" },
+ { 33, "_15" },
+ { 34, "headerSize" },
+ { 36, "tx_delay_ms" },
+ { 37, "retries" },
+ { 38, "ReTransmitPacketMaxSize" },
+ { 39, "waitReTransmitPacketMaxSize" },
+ { 40, "fastReTransCount" },
+ { 41, "driverRetransmissions" },
+ { 42, "txAckTimeoutMs" },
+ { 43, "registrationInterrupts" },
+ { 44, "hardwareType" },
+ { 45, "radioType" },
+ { 46, "writeEEPROM" },
+ { 47, "writeRadioType" },
+ { 48, "entry_exit_debug" },
+ { 49, "debug" },
+ { 50, "in_speed" },
+ { 51, "out_speed" },
+ { 52, "in_speed10" },
+ { 53, "out_speed10" },
+ { 54, "in_speed_max" },
+ { 55, "out_speed_max" },
+ { 56, "measure_rate" },
+ { 57, "pre_Command_Wait" },
+ { 58, "rx_tweak1" },
+ { 59, "rx_tweak2" },
+ { 60, "tx_queue_len" },
+
+ { 150, "arlan1-txRing" },
+ { 151, "arlan1-rxRing" },
+ { 152, "arlan1-18" },
+ { 153, "arlan1-ring" },
+ { 154, "arlan1-shm-cpy" },
+ { 155, "config1" },
+ { 156, "reset1" },
+ {}
+};
+
+static struct trans_ctl_table trans_arlan_conf_table2[] = {
+ { 1, "spreadingCode" },
+ { 2, "channelNumber" },
+ { 3, "scramblingDisable" },
+ { 4, "txAttenuation" },
+ { 5, "systemId" },
+ { 6, "maxDatagramSize" },
+ { 7, "maxFrameSize" },
+ { 8, "maxRetries" },
+ { 9, "receiveMode" },
+ { 10, "priority" },
+ { 11, "rootOrRepeater" },
+ { 12, "SID" },
+ { 13, "registrationMode" },
+ { 14, "registrationFill" },
+ { 15, "localTalkAddress" },
+ { 16, "codeFormat" },
+ { 17, "numChannels" },
+ { 18, "channel1" },
+ { 19, "channel2" },
+ { 20, "channel3" },
+ { 21, "channel4" },
+ { 22, "txClear" },
+ { 23, "txRetries" },
+ { 24, "txRouting" },
+ { 25, "txScrambled" },
+ { 26, "rxParameter" },
+ { 27, "txTimeoutMs" },
+ { 28, "waitCardTimeout" },
+ { 29, "channelSet" },
+ { 30, "name" },
+ { 31, "waitTime" },
+ { 32, "lParameter" },
+ { 33, "_15" },
+ { 34, "headerSize" },
+ { 36, "tx_delay_ms" },
+ { 37, "retries" },
+ { 38, "ReTransmitPacketMaxSize" },
+ { 39, "waitReTransmitPacketMaxSize" },
+ { 40, "fastReTransCount" },
+ { 41, "driverRetransmissions" },
+ { 42, "txAckTimeoutMs" },
+ { 43, "registrationInterrupts" },
+ { 44, "hardwareType" },
+ { 45, "radioType" },
+ { 46, "writeEEPROM" },
+ { 47, "writeRadioType" },
+ { 48, "entry_exit_debug" },
+ { 49, "debug" },
+ { 50, "in_speed" },
+ { 51, "out_speed" },
+ { 52, "in_speed10" },
+ { 53, "out_speed10" },
+ { 54, "in_speed_max" },
+ { 55, "out_speed_max" },
+ { 56, "measure_rate" },
+ { 57, "pre_Command_Wait" },
+ { 58, "rx_tweak1" },
+ { 59, "rx_tweak2" },
+ { 60, "tx_queue_len" },
+
+ { 150, "arlan2-txRing" },
+ { 151, "arlan2-rxRing" },
+ { 152, "arlan2-18" },
+ { 153, "arlan2-ring" },
+ { 154, "arlan2-shm-cpy" },
+ { 155, "config2" },
+ { 156, "reset2" },
+ {}
+};
+
+static struct trans_ctl_table trans_arlan_conf_table3[] = {
+ { 1, "spreadingCode" },
+ { 2, "channelNumber" },
+ { 3, "scramblingDisable" },
+ { 4, "txAttenuation" },
+ { 5, "systemId" },
+ { 6, "maxDatagramSize" },
+ { 7, "maxFrameSize" },
+ { 8, "maxRetries" },
+ { 9, "receiveMode" },
+ { 10, "priority" },
+ { 11, "rootOrRepeater" },
+ { 12, "SID" },
+ { 13, "registrationMode" },
+ { 14, "registrationFill" },
+ { 15, "localTalkAddress" },
+ { 16, "codeFormat" },
+ { 17, "numChannels" },
+ { 18, "channel1" },
+ { 19, "channel2" },
+ { 20, "channel3" },
+ { 21, "channel4" },
+ { 22, "txClear" },
+ { 23, "txRetries" },
+ { 24, "txRouting" },
+ { 25, "txScrambled" },
+ { 26, "rxParameter" },
+ { 27, "txTimeoutMs" },
+ { 28, "waitCardTimeout" },
+ { 29, "channelSet" },
+ { 30, "name" },
+ { 31, "waitTime" },
+ { 32, "lParameter" },
+ { 33, "_15" },
+ { 34, "headerSize" },
+ { 36, "tx_delay_ms" },
+ { 37, "retries" },
+ { 38, "ReTransmitPacketMaxSize" },
+ { 39, "waitReTransmitPacketMaxSize" },
+ { 40, "fastReTransCount" },
+ { 41, "driverRetransmissions" },
+ { 42, "txAckTimeoutMs" },
+ { 43, "registrationInterrupts" },
+ { 44, "hardwareType" },
+ { 45, "radioType" },
+ { 46, "writeEEPROM" },
+ { 47, "writeRadioType" },
+ { 48, "entry_exit_debug" },
+ { 49, "debug" },
+ { 50, "in_speed" },
+ { 51, "out_speed" },
+ { 52, "in_speed10" },
+ { 53, "out_speed10" },
+ { 54, "in_speed_max" },
+ { 55, "out_speed_max" },
+ { 56, "measure_rate" },
+ { 57, "pre_Command_Wait" },
+ { 58, "rx_tweak1" },
+ { 59, "rx_tweak2" },
+ { 60, "tx_queue_len" },
+
+ { 150, "arlan3-txRing" },
+ { 151, "arlan3-rxRing" },
+ { 152, "arlan3-18" },
+ { 153, "arlan3-ring" },
+ { 154, "arlan3-shm-cpy" },
+ { 155, "config3" },
+ { 156, "reset3" },
+ {}
+};
+
+static struct trans_ctl_table trans_arlan_table[] = {
+ { 1, "arlan0", trans_arlan_conf_table0 },
+ { 2, "arlan1", trans_arlan_conf_table1 },
+ { 3, "arlan2", trans_arlan_conf_table2 },
+ { 4, "arlan3", trans_arlan_conf_table3 },
+ {}
+};
+
+static struct trans_ctl_table trans_appldata_table[] = {
+ { CTL_APPLDATA_TIMER, "timer" },
+ { CTL_APPLDATA_INTERVAL, "interval" },
+ { CTL_APPLDATA_OS, "os" },
+ { CTL_APPLDATA_NET_SUM, "net_sum" },
+ { CTL_APPLDATA_MEM, "mem" },
+ {}
+
+};
+
+static struct trans_ctl_table trans_s390dbf_table[] = {
+ { 5678 /* CTL_S390DBF_STOPPABLE */, "debug_stoppable" },
+ { 5679 /* CTL_S390DBF_ACTIVE */, "debug_active" },
+ {}
+};
+
+static struct trans_ctl_table trans_sunrpc_table[] = {
+ { CTL_RPCDEBUG, "rpc_debug" },
+ { CTL_NFSDEBUG, "nfs_debug" },
+ { CTL_NFSDDEBUG, "nfsd_debug" },
+ { CTL_NLMDEBUG, "nlm_debug" },
+ { CTL_SLOTTABLE_UDP, "udp_slot_table_entries" },
+ { CTL_SLOTTABLE_TCP, "tcp_slot_table_entries" },
+ { CTL_MIN_RESVPORT, "min_resvport" },
+ { CTL_MAX_RESVPORT, "max_resvport" },
+ {}
+};
+
+static struct trans_ctl_table trans_pm_table[] = {
+ { 1 /* CTL_PM_SUSPEND */, "suspend" },
+ { 2 /* CTL_PM_CMODE */, "cmode" },
+ { 3 /* CTL_PM_P0 */, "p0" },
+ { 4 /* CTL_PM_CM */, "cm" },
+ {}
+};
+
+static struct trans_ctl_table trans_frv_table[] = {
+ { 1, "cache-mode" },
+ { 2, "pin-cxnr" },
+ {}
+};
+
+static struct trans_ctl_table trans_root_table[] = {
+ { CTL_KERN, "kernel", trans_kern_table },
+ { CTL_VM, "vm", trans_vm_table },
+ { CTL_NET, "net", trans_net_table },
+ /* CTL_PROC not used */
+ { CTL_FS, "fs", trans_fs_table },
+ { CTL_DEBUG, "debug", trans_debug_table },
+ { CTL_DEV, "dev", trans_dev_table },
+ { CTL_BUS, "bus", trans_bus_table },
+ { CTL_ABI, "abi" },
+ /* CTL_CPU not used */
+ { CTL_ARLAN, "arlan", trans_arlan_table },
+ { CTL_APPLDATA, "appldata", trans_appldata_table },
+ { CTL_S390DBF, "s390dbf", trans_s390dbf_table },
+ { CTL_SUNRPC, "sunrpc", trans_sunrpc_table },
+ { CTL_PM, "pm", trans_pm_table },
+ { CTL_FRV, "frv", trans_frv_table },
+ {}
+};
+
+
+
+
+static int sysctl_depth(struct ctl_table *table)
+{
+ struct ctl_table *tmp;
+ int depth;
+
+ depth = 0;
+ for (tmp = table; tmp->parent; tmp = tmp->parent)
+ depth++;
+
+ return depth;
+}
+
+static struct ctl_table *sysctl_parent(struct ctl_table *table, int n)
+{
+ int i;
+
+ for (i = 0; table && i < n; i++)
+ table = table->parent;
+
+ return table;
+}
+
+static struct trans_ctl_table *sysctl_binary_lookup(struct ctl_table *table)
+{
+ struct ctl_table *test;
+ struct trans_ctl_table *ref;
+ int depth, cur_depth;
+
+ depth = sysctl_depth(table);
+
+ cur_depth = depth;
+ ref = trans_root_table;
+repeat:
+ test = sysctl_parent(table, cur_depth);
+ for (; ref->ctl_name || ref->procname || ref->child; ref++) {
+ int match = 0;
+
+ if (cur_depth && !ref->child)
+ continue;
+
+ if (test->procname && ref->procname &&
+ (strcmp(test->procname, ref->procname) == 0))
+ match++;
+
+ if (test->ctl_name && ref->ctl_name &&
+ (test->ctl_name == ref->ctl_name))
+ match++;
+
+ if (!ref->ctl_name && !ref->procname)
+ match++;
+
+ if (match) {
+ if (cur_depth != 0) {
+ cur_depth--;
+ ref = ref->child;
+ goto repeat;
+ }
+ goto out;
+ }
+ }
+ ref = NULL;
+out:
+ return ref;
+}
+
+static void sysctl_print_path(struct ctl_table *table)
+{
+ struct ctl_table *tmp;
+ int depth, i;
+ depth = sysctl_depth(table);
+ if (table->procname) {
+ for (i = depth; i >= 0; i--) {
+ tmp = sysctl_parent(table, i);
+ printk("/%s", tmp->procname?tmp->procname:"");
+ }
+ }
+ printk(" ");
+ if (table->ctl_name) {
+ for (i = depth; i >= 0; i--) {
+ tmp = sysctl_parent(table, i);
+ printk(".%d", tmp->ctl_name);
+ }
+ }
+}
+
+static void sysctl_repair_table(struct ctl_table *table)
+{
+ /* Don't complain about the classic default
+ * sysctl strategy routine. Maybe later we
+ * can get the tables fixed and complain about
+ * this.
+ */
+ if (table->ctl_name && table->procname &&
+ (table->proc_handler == proc_dointvec) &&
+ (!table->strategy)) {
+ table->strategy = sysctl_data;
+ }
+}
+
+static struct ctl_table *sysctl_check_lookup(struct ctl_table *table)
+{
+ struct ctl_table_header *head;
+ struct ctl_table *ref, *test;
+ int depth, cur_depth;
+
+ depth = sysctl_depth(table);
+
+ for (head = sysctl_head_next(NULL); head;
+ head = sysctl_head_next(head)) {
+ cur_depth = depth;
+ ref = head->ctl_table;
+repeat:
+ test = sysctl_parent(table, cur_depth);
+ for (; ref->ctl_name || ref->procname; ref++) {
+ int match = 0;
+ if (cur_depth && !ref->child)
+ continue;
+
+ if (test->procname && ref->procname &&
+ (strcmp(test->procname, ref->procname) == 0))
+ match++;
+
+ if (test->ctl_name && ref->ctl_name &&
+ (test->ctl_name == ref->ctl_name))
+ match++;
+
+ if (match) {
+ if (cur_depth != 0) {
+ cur_depth--;
+ ref = ref->child;
+ goto repeat;
+ }
+ goto out;
+ }
+ }
+ }
+ ref = NULL;
+out:
+ sysctl_head_finish(head);
+ return ref;
+}
+
+static void set_fail(const char **fail, struct ctl_table *table, const char *str)
+{
+ if (*fail) {
+ printk(KERN_ERR "sysctl table check failed: ");
+ sysctl_print_path(table);
+ printk(" %s\n", *fail);
+ }
+ *fail = str;
+}
+
+static int sysctl_check_dir(struct ctl_table *table)
+{
+ struct ctl_table *ref;
+ int error;
+
+ error = 0;
+ ref = sysctl_check_lookup(table);
+ if (ref) {
+ int match = 0;
+ if ((!table->procname && !ref->procname) ||
+ (table->procname && ref->procname &&
+ (strcmp(table->procname, ref->procname) == 0)))
+ match++;
+
+ if ((!table->ctl_name && !ref->ctl_name) ||
+ (table->ctl_name && ref->ctl_name &&
+ (table->ctl_name == ref->ctl_name)))
+ match++;
+
+ if (match != 2) {
+ printk(KERN_ERR "%s: failed: ", __func__);
+ sysctl_print_path(table);
+ printk(" ref: ");
+ sysctl_print_path(ref);
+ printk("\n");
+ error = -EINVAL;
+ }
+ }
+ return error;
+}
+
+static void sysctl_check_leaf(struct ctl_table *table, const char **fail)
+{
+ struct ctl_table *ref;
+
+ ref = sysctl_check_lookup(table);
+ if (ref && (ref != table))
+ set_fail(fail, table, "Sysctl already exists");
+}
+
+static void sysctl_check_bin_path(struct ctl_table *table, const char **fail)
+{
+ struct trans_ctl_table *ref;
+
+ ref = sysctl_binary_lookup(table);
+ if (table->ctl_name && !ref)
+ set_fail(fail, table, "Unknown sysctl binary path");
+ if (ref) {
+ if (ref->procname &&
+ (!table->procname ||
+ (strcmp(table->procname, ref->procname) != 0)))
+ set_fail(fail, table, "procname does not match binary path procname");
+
+ if (ref->ctl_name && table->ctl_name &&
+ (table->ctl_name != ref->ctl_name))
+ set_fail(fail, table, "ctl_name does not match binary path ctl_name");
+ }
+}
+
+int sysctl_check_table(struct ctl_table *table)
+{
+ int error = 0;
+ for (; table->ctl_name || table->procname; table++) {
+ const char *fail = NULL;
+
+ sysctl_repair_table(table);
+ if (table->parent) {
+ if (table->procname && !table->parent->procname)
+ set_fail(&fail, table, "Parent without procname");
+ if (table->ctl_name && !table->parent->ctl_name)
+ set_fail(&fail, table, "Parent without ctl_name");
+ }
+ if (!table->procname)
+ set_fail(&fail, table, "No procname");
+ if (table->child) {
+ if (table->data)
+ set_fail(&fail, table, "Directory with data?");
+ if (table->maxlen)
+ set_fail(&fail, table, "Directory with maxlen?");
+ if ((table->mode & (S_IRUGO|S_IXUGO)) != table->mode)
+ set_fail(&fail, table, "Writable sysctl directory");
+ if (table->proc_handler)
+ set_fail(&fail, table, "Directory with proc_handler");
+ if (table->strategy)
+ set_fail(&fail, table, "Directory with strategy");
+ if (table->extra1)
+ set_fail(&fail, table, "Directory with extra1");
+ if (table->extra2)
+ set_fail(&fail, table, "Directory with extra2");
+ if (sysctl_check_dir(table))
+ set_fail(&fail, table, "Inconsistent directory names");
+ } else {
+ if ((table->strategy == sysctl_data) ||
+ (table->strategy == sysctl_string) ||
+ (table->strategy == sysctl_intvec) ||
+ (table->strategy == sysctl_jiffies) ||
+ (table->strategy == sysctl_ms_jiffies) ||
+ (table->proc_handler == proc_dostring) ||
+ (table->proc_handler == proc_dointvec) ||
+#ifdef CONFIG_SECURITY_CAPABILITIES
+ (table->proc_handler == proc_dointvec_bset) ||
+#endif /* def CONFIG_SECURITY_CAPABILITIES */
+ (table->proc_handler == proc_dointvec_minmax) ||
+ (table->proc_handler == proc_dointvec_jiffies) ||
+ (table->proc_handler == proc_dointvec_userhz_jiffies) ||
+ (table->proc_handler == proc_dointvec_ms_jiffies) ||
+ (table->proc_handler == proc_doulongvec_minmax) ||
+ (table->proc_handler == proc_doulongvec_ms_jiffies_minmax)) {
+ if (!table->data)
+ set_fail(&fail, table, "No data");
+ if (!table->maxlen)
+ set_fail(&fail, table, "No maxlen");
+ }
+ if ((table->proc_handler == proc_doulongvec_minmax) ||
+ (table->proc_handler == proc_doulongvec_ms_jiffies_minmax)) {
+ if (table->maxlen > sizeof (unsigned long)) {
+ if (!table->extra1)
+ set_fail(&fail, table, "No min");
+ if (!table->extra2)
+ set_fail(&fail, table, "No max");
+ }
+ }
+#ifdef CONFIG_SYSCTL_SYSCALL
+ if (table->ctl_name && !table->strategy)
+ set_fail(&fail, table, "Missing strategy");
+#endif
+#if 0
+ if (!table->ctl_name && table->strategy)
+ set_fail(&fail, table, "Strategy without ctl_name");
+#endif
+#ifdef CONFIG_PROC_FS
+ if (table->procname && !table->proc_handler)
+ set_fail(&fail, table, "No proc_handler");
+#endif
+#if 0
+ if (!table->procname && table->proc_handler)
+ set_fail(&fail, table, "proc_handler without procname");
+#endif
+ sysctl_check_leaf(table, &fail);
+ }
+ sysctl_check_bin_path(table, &fail);
+ if (fail) {
+ set_fail(&fail, table, NULL);
+ error = -EINVAL;
+ }
+ if (table->child)
+ error |= sysctl_check_table(table->child);
+ }
+ return error;
+}
diff --git a/kernel/taskstats.c b/kernel/taskstats.c
index 059431ed67db..9f360f68aad6 100644
--- a/kernel/taskstats.c
+++ b/kernel/taskstats.c
@@ -20,9 +20,12 @@
#include <linux/taskstats_kern.h>
#include <linux/tsacct_kern.h>
#include <linux/delayacct.h>
-#include <linux/tsacct_kern.h>
#include <linux/cpumask.h>
#include <linux/percpu.h>
+#include <linux/cgroupstats.h>
+#include <linux/cgroup.h>
+#include <linux/fs.h>
+#include <linux/file.h>
#include <net/genetlink.h>
#include <asm/atomic.h>
@@ -50,6 +53,11 @@ __read_mostly = {
[TASKSTATS_CMD_ATTR_REGISTER_CPUMASK] = { .type = NLA_STRING },
[TASKSTATS_CMD_ATTR_DEREGISTER_CPUMASK] = { .type = NLA_STRING },};
+static struct nla_policy
+cgroupstats_cmd_get_policy[CGROUPSTATS_CMD_ATTR_MAX+1] __read_mostly = {
+ [CGROUPSTATS_CMD_ATTR_FD] = { .type = NLA_U32 },
+};
+
struct listener {
struct list_head list;
pid_t pid;
@@ -373,6 +381,51 @@ err:
return NULL;
}
+static int cgroupstats_user_cmd(struct sk_buff *skb, struct genl_info *info)
+{
+ int rc = 0;
+ struct sk_buff *rep_skb;
+ struct cgroupstats *stats;
+ struct nlattr *na;
+ size_t size;
+ u32 fd;
+ struct file *file;
+ int fput_needed;
+
+ na = info->attrs[CGROUPSTATS_CMD_ATTR_FD];
+ if (!na)
+ return -EINVAL;
+
+ fd = nla_get_u32(info->attrs[CGROUPSTATS_CMD_ATTR_FD]);
+ file = fget_light(fd, &fput_needed);
+ if (file) {
+ size = nla_total_size(sizeof(struct cgroupstats));
+
+ rc = prepare_reply(info, CGROUPSTATS_CMD_NEW, &rep_skb,
+ size);
+ if (rc < 0)
+ goto err;
+
+ na = nla_reserve(rep_skb, CGROUPSTATS_TYPE_CGROUP_STATS,
+ sizeof(struct cgroupstats));
+ stats = nla_data(na);
+ memset(stats, 0, sizeof(*stats));
+
+ rc = cgroupstats_build(stats, file->f_dentry);
+ if (rc < 0)
+ goto err;
+
+ fput_light(file, fput_needed);
+ return send_reply(rep_skb, info->snd_pid);
+ }
+
+err:
+ if (file)
+ fput_light(file, fput_needed);
+ nlmsg_free(rep_skb);
+ return rc;
+}
+
static int taskstats_user_cmd(struct sk_buff *skb, struct genl_info *info)
{
int rc = 0;
@@ -523,6 +576,12 @@ static struct genl_ops taskstats_ops = {
.policy = taskstats_cmd_get_policy,
};
+static struct genl_ops cgroupstats_ops = {
+ .cmd = CGROUPSTATS_CMD_GET,
+ .doit = cgroupstats_user_cmd,
+ .policy = cgroupstats_cmd_get_policy,
+};
+
/* Needed early in initialization */
void __init taskstats_init_early(void)
{
@@ -547,8 +606,15 @@ static int __init taskstats_init(void)
if (rc < 0)
goto err;
+ rc = genl_register_ops(&family, &cgroupstats_ops);
+ if (rc < 0)
+ goto err_cgroup_ops;
+
family_registered = 1;
+ printk("registered taskstats version %d\n", TASKSTATS_GENL_VERSION);
return 0;
+err_cgroup_ops:
+ genl_unregister_ops(&family, &taskstats_ops);
err:
genl_unregister_family(&family);
return rc;
diff --git a/kernel/time.c b/kernel/time.c
index 2289a8d68314..09d3c45c4da7 100644
--- a/kernel/time.c
+++ b/kernel/time.c
@@ -9,9 +9,9 @@
*/
/*
* Modification history kernel/time.c
- *
+ *
* 1993-09-02 Philip Gladstone
- * Created file with time related functions from sched.c and adjtimex()
+ * Created file with time related functions from sched.c and adjtimex()
* 1993-10-08 Torsten Duwe
* adjtime interface update and CMOS clock write code
* 1995-08-13 Torsten Duwe
@@ -30,16 +30,16 @@
#include <linux/module.h>
#include <linux/timex.h>
#include <linux/capability.h>
+#include <linux/clocksource.h>
#include <linux/errno.h>
#include <linux/syscalls.h>
#include <linux/security.h>
#include <linux/fs.h>
-#include <linux/module.h>
#include <asm/uaccess.h>
#include <asm/unistd.h>
-/*
+/*
* The timezone where the local system is located. Used as a default by some
* programs who obtain this value by using gettimeofday.
*/
@@ -57,11 +57,7 @@ EXPORT_SYMBOL(sys_tz);
*/
asmlinkage long sys_time(time_t __user * tloc)
{
- time_t i;
- struct timespec tv;
-
- getnstimeofday(&tv);
- i = tv.tv_sec;
+ time_t i = get_seconds();
if (tloc) {
if (put_user(i,tloc))
@@ -76,7 +72,7 @@ asmlinkage long sys_time(time_t __user * tloc)
* why not move it into the appropriate arch directory (for those
* architectures that need it).
*/
-
+
asmlinkage long sys_stime(time_t __user *tptr)
{
struct timespec tv;
@@ -115,10 +111,10 @@ asmlinkage long sys_gettimeofday(struct timeval __user *tv, struct timezone __us
/*
* Adjust the time obtained from the CMOS to be UTC time instead of
* local time.
- *
+ *
* This is ugly, but preferable to the alternatives. Otherwise we
* would either need to write a program to do it in /etc/rc (and risk
- * confusion if the program gets run more than once; it would also be
+ * confusion if the program gets run more than once; it would also be
* hard to make the program warp the clock precisely n hours) or
* compile in the timezone information into the kernel. Bad, bad....
*
@@ -163,6 +159,7 @@ int do_sys_settimeofday(struct timespec *tv, struct timezone *tz)
if (tz) {
/* SMP safe, global irq locking makes it work. */
sys_tz = *tz;
+ update_vsyscall_tz();
if (firsttime) {
firsttime = 0;
if (!tv)
diff --git a/kernel/time/Kconfig b/kernel/time/Kconfig
index f66351126544..8d53106a0a92 100644
--- a/kernel/time/Kconfig
+++ b/kernel/time/Kconfig
@@ -23,3 +23,8 @@ config HIGH_RES_TIMERS
hardware is not capable then this option only increases
the size of the kernel image.
+config GENERIC_CLOCKEVENTS_BUILD
+ bool
+ default y
+ depends on GENERIC_CLOCKEVENTS || GENERIC_CLOCKEVENTS_MIGR
+
diff --git a/kernel/time/Makefile b/kernel/time/Makefile
index 99b6034fc86b..905b0b50792d 100644
--- a/kernel/time/Makefile
+++ b/kernel/time/Makefile
@@ -1,6 +1,6 @@
obj-y += timekeeping.o ntp.o clocksource.o jiffies.o timer_list.o
-obj-$(CONFIG_GENERIC_CLOCKEVENTS) += clockevents.o
+obj-$(CONFIG_GENERIC_CLOCKEVENTS_BUILD) += clockevents.o
obj-$(CONFIG_GENERIC_CLOCKEVENTS) += tick-common.o
obj-$(CONFIG_GENERIC_CLOCKEVENTS_BROADCAST) += tick-broadcast.o
obj-$(CONFIG_TICK_ONESHOT) += tick-oneshot.o
diff --git a/kernel/time/clockevents.c b/kernel/time/clockevents.c
index 41dd3105ce7f..822beebe664a 100644
--- a/kernel/time/clockevents.c
+++ b/kernel/time/clockevents.c
@@ -194,6 +194,7 @@ void clockevents_exchange_device(struct clock_event_device *old,
local_irq_restore(flags);
}
+#ifdef CONFIG_GENERIC_CLOCKEVENTS
/**
* clockevents_notify - notification about relevant events
*/
@@ -222,4 +223,4 @@ void clockevents_notify(unsigned long reason, void *arg)
spin_unlock(&clockevents_lock);
}
EXPORT_SYMBOL_GPL(clockevents_notify);
-
+#endif
diff --git a/kernel/time/clocksource.c b/kernel/time/clocksource.c
index 51b6a6a6158c..c8a9d13874df 100644
--- a/kernel/time/clocksource.c
+++ b/kernel/time/clocksource.c
@@ -207,15 +207,12 @@ static inline void clocksource_resume_watchdog(void) { }
*/
void clocksource_resume(void)
{
- struct list_head *tmp;
+ struct clocksource *cs;
unsigned long flags;
spin_lock_irqsave(&clocksource_lock, flags);
- list_for_each(tmp, &clocksource_list) {
- struct clocksource *cs;
-
- cs = list_entry(tmp, struct clocksource, list);
+ list_for_each_entry(cs, &clocksource_list, list) {
if (cs->resume)
cs->resume();
}
@@ -369,7 +366,6 @@ static ssize_t sysfs_override_clocksource(struct sys_device *dev,
const char *buf, size_t count)
{
struct clocksource *ovr = NULL;
- struct list_head *tmp;
size_t ret = count;
int len;
@@ -389,12 +385,11 @@ static ssize_t sysfs_override_clocksource(struct sys_device *dev,
len = strlen(override_name);
if (len) {
+ struct clocksource *cs;
+
ovr = clocksource_override;
/* try to select it: */
- list_for_each(tmp, &clocksource_list) {
- struct clocksource *cs;
-
- cs = list_entry(tmp, struct clocksource, list);
+ list_for_each_entry(cs, &clocksource_list, list) {
if (strlen(cs->name) == len &&
!strcmp(cs->name, override_name))
ovr = cs;
@@ -422,14 +417,11 @@ static ssize_t sysfs_override_clocksource(struct sys_device *dev,
static ssize_t
sysfs_show_available_clocksources(struct sys_device *dev, char *buf)
{
- struct list_head *tmp;
+ struct clocksource *src;
char *curr = buf;
spin_lock_irq(&clocksource_lock);
- list_for_each(tmp, &clocksource_list) {
- struct clocksource *src;
-
- src = list_entry(tmp, struct clocksource, list);
+ list_for_each_entry(src, &clocksource_list, list) {
curr += sprintf(curr, "%s ", src->name);
}
spin_unlock_irq(&clocksource_lock);
diff --git a/kernel/time/tick-broadcast.c b/kernel/time/tick-broadcast.c
index 0962e0577660..8cfb8b2ce773 100644
--- a/kernel/time/tick-broadcast.c
+++ b/kernel/time/tick-broadcast.c
@@ -64,8 +64,9 @@ static void tick_broadcast_start_periodic(struct clock_event_device *bc)
*/
int tick_check_broadcast_device(struct clock_event_device *dev)
{
- if (tick_broadcast_device.evtdev ||
- (dev->features & CLOCK_EVT_FEAT_C3STOP))
+ if ((tick_broadcast_device.evtdev &&
+ tick_broadcast_device.evtdev->rating >= dev->rating) ||
+ (dev->features & CLOCK_EVT_FEAT_C3STOP))
return 0;
clockevents_exchange_device(NULL, dev);
@@ -176,8 +177,6 @@ static void tick_do_periodic_broadcast(void)
*/
static void tick_handle_periodic_broadcast(struct clock_event_device *dev)
{
- dev->next_event.tv64 = KTIME_MAX;
-
tick_do_periodic_broadcast();
/*
@@ -218,26 +217,33 @@ static void tick_do_broadcast_on_off(void *why)
bc = tick_broadcast_device.evtdev;
/*
- * Is the device in broadcast mode forever or is it not
- * affected by the powerstate ?
+ * Is the device not affected by the powerstate ?
*/
- if (!dev || !tick_device_is_functional(dev) ||
- !(dev->features & CLOCK_EVT_FEAT_C3STOP))
+ if (!dev || !(dev->features & CLOCK_EVT_FEAT_C3STOP))
+ goto out;
+
+ if (!tick_device_is_functional(dev))
goto out;
- if (*reason == CLOCK_EVT_NOTIFY_BROADCAST_ON) {
+ switch (*reason) {
+ case CLOCK_EVT_NOTIFY_BROADCAST_ON:
+ case CLOCK_EVT_NOTIFY_BROADCAST_FORCE:
if (!cpu_isset(cpu, tick_broadcast_mask)) {
cpu_set(cpu, tick_broadcast_mask);
if (td->mode == TICKDEV_MODE_PERIODIC)
clockevents_set_mode(dev,
CLOCK_EVT_MODE_SHUTDOWN);
}
- } else {
+ if (*reason == CLOCK_EVT_NOTIFY_BROADCAST_FORCE)
+ dev->features |= CLOCK_EVT_FEAT_DUMMY;
+ break;
+ case CLOCK_EVT_NOTIFY_BROADCAST_OFF:
if (cpu_isset(cpu, tick_broadcast_mask)) {
cpu_clear(cpu, tick_broadcast_mask);
if (td->mode == TICKDEV_MODE_PERIODIC)
tick_setup_periodic(dev, 0);
}
+ break;
}
if (cpus_empty(tick_broadcast_mask))
@@ -258,21 +264,12 @@ out:
*/
void tick_broadcast_on_off(unsigned long reason, int *oncpu)
{
- int cpu = get_cpu();
-
- if (!cpu_isset(*oncpu, cpu_online_map)) {
+ if (!cpu_isset(*oncpu, cpu_online_map))
printk(KERN_ERR "tick-braodcast: ignoring broadcast for "
"offline CPU #%d\n", *oncpu);
- } else {
-
- if (cpu == *oncpu)
- tick_do_broadcast_on_off(&reason);
- else
- smp_call_function_single(*oncpu,
- tick_do_broadcast_on_off,
- &reason, 1, 1);
- }
- put_cpu();
+ else
+ smp_call_function_single(*oncpu, tick_do_broadcast_on_off,
+ &reason, 1, 1);
}
/*
@@ -515,11 +512,9 @@ static void tick_broadcast_clear_oneshot(int cpu)
*/
void tick_broadcast_setup_oneshot(struct clock_event_device *bc)
{
- if (bc->mode != CLOCK_EVT_MODE_ONESHOT) {
- bc->event_handler = tick_handle_oneshot_broadcast;
- clockevents_set_mode(bc, CLOCK_EVT_MODE_ONESHOT);
- bc->next_event.tv64 = KTIME_MAX;
- }
+ bc->event_handler = tick_handle_oneshot_broadcast;
+ clockevents_set_mode(bc, CLOCK_EVT_MODE_ONESHOT);
+ bc->next_event.tv64 = KTIME_MAX;
}
/*
diff --git a/kernel/time/tick-common.c b/kernel/time/tick-common.c
index 77a21abc8716..1bea399a9ef0 100644
--- a/kernel/time/tick-common.c
+++ b/kernel/time/tick-common.c
@@ -200,7 +200,7 @@ static int tick_check_new_device(struct clock_event_device *newdev)
cpu = smp_processor_id();
if (!cpu_isset(cpu, newdev->cpumask))
- goto out;
+ goto out_bc;
td = &per_cpu(tick_cpu_device, cpu);
curdev = td->evtdev;
@@ -265,7 +265,7 @@ out_bc:
*/
if (tick_check_broadcast_device(newdev))
ret = NOTIFY_STOP;
-out:
+
spin_unlock_irqrestore(&tick_device_lock, flags);
return ret;
@@ -345,6 +345,7 @@ static int tick_notify(struct notifier_block *nb, unsigned long reason,
case CLOCK_EVT_NOTIFY_BROADCAST_ON:
case CLOCK_EVT_NOTIFY_BROADCAST_OFF:
+ case CLOCK_EVT_NOTIFY_BROADCAST_FORCE:
tick_broadcast_on_off(reason, dev);
break;
diff --git a/kernel/time/tick-sched.c b/kernel/time/tick-sched.c
index 637519af6151..10a1347597fd 100644
--- a/kernel/time/tick-sched.c
+++ b/kernel/time/tick-sched.c
@@ -586,7 +586,7 @@ void tick_setup_sched_timer(void)
/* Get the next period (per cpu) */
ts->sched_timer.expires = tick_init_jiffy_update();
offset = ktime_to_ns(tick_period) >> 1;
- do_div(offset, NR_CPUS);
+ do_div(offset, num_possible_cpus());
offset *= smp_processor_id();
ts->sched_timer.expires = ktime_add_ns(ts->sched_timer.expires, offset);
diff --git a/kernel/time/timekeeping.c b/kernel/time/timekeeping.c
index 4ad79f6bdec6..e5e466b27598 100644
--- a/kernel/time/timekeeping.c
+++ b/kernel/time/timekeeping.c
@@ -24,9 +24,7 @@
* This read-write spinlock protects us from races in SMP while
* playing with xtime and avenrun.
*/
-__attribute__((weak)) __cacheline_aligned_in_smp DEFINE_SEQLOCK(xtime_lock);
-
-EXPORT_SYMBOL(xtime_lock);
+__cacheline_aligned_in_smp DEFINE_SEQLOCK(xtime_lock);
/*
@@ -47,21 +45,13 @@ EXPORT_SYMBOL(xtime_lock);
struct timespec xtime __attribute__ ((aligned (16)));
struct timespec wall_to_monotonic __attribute__ ((aligned (16)));
static unsigned long total_sleep_time; /* seconds */
-EXPORT_SYMBOL(xtime);
-
-#ifdef CONFIG_NO_HZ
static struct timespec xtime_cache __attribute__ ((aligned (16)));
static inline void update_xtime_cache(u64 nsec)
{
xtime_cache = xtime;
timespec_add_ns(&xtime_cache, nsec);
}
-#else
-#define xtime_cache xtime
-/* We do *not* want to evaluate the argument for this case */
-#define update_xtime_cache(n) do { } while (0)
-#endif
static struct clocksource *clock; /* pointer to current clocksource */
diff --git a/kernel/timer.c b/kernel/timer.c
index 6ce1952eea7d..fb4e67d5dd60 100644
--- a/kernel/timer.c
+++ b/kernel/timer.c
@@ -26,6 +26,7 @@
#include <linux/init.h>
#include <linux/mm.h>
#include <linux/swap.h>
+#include <linux/pid_namespace.h>
#include <linux/notifier.h>
#include <linux/thread_info.h>
#include <linux/time.h>
@@ -817,7 +818,7 @@ unsigned long next_timer_interrupt(void)
#endif
/*
- * Called from the timer interrupt handler to charge one tick to the current
+ * Called from the timer interrupt handler to charge one tick to the current
* process. user_tick is 1 if the tick is user time, 0 for system.
*/
void update_process_times(int user_tick)
@@ -826,10 +827,13 @@ void update_process_times(int user_tick)
int cpu = smp_processor_id();
/* Note: this timer irq context must be accounted for as well. */
- if (user_tick)
+ if (user_tick) {
account_user_time(p, jiffies_to_cputime(1));
- else
+ account_user_time_scaled(p, jiffies_to_cputime(1));
+ } else {
account_system_time(p, HARDIRQ_OFFSET, jiffies_to_cputime(1));
+ account_system_time_scaled(p, jiffies_to_cputime(1));
+ }
run_local_timers();
if (rcu_pending(cpu))
rcu_check_callbacks(cpu, user_tick);
@@ -953,7 +957,7 @@ asmlinkage unsigned long sys_alarm(unsigned int seconds)
*/
asmlinkage long sys_getpid(void)
{
- return current->tgid;
+ return task_tgid_vnr(current);
}
/*
@@ -967,7 +971,7 @@ asmlinkage long sys_getppid(void)
int pid;
rcu_read_lock();
- pid = rcu_dereference(current->real_parent)->tgid;
+ pid = task_ppid_nr_ns(current, current->nsproxy->pid_ns);
rcu_read_unlock();
return pid;
@@ -1099,7 +1103,7 @@ EXPORT_SYMBOL(schedule_timeout_uninterruptible);
/* Thread ID - the internal kernel "pid" */
asmlinkage long sys_gettid(void)
{
- return current->pid;
+ return task_pid_vnr(current);
}
/**
diff --git a/kernel/tsacct.c b/kernel/tsacct.c
index c122131a122f..4ab1b584961b 100644
--- a/kernel/tsacct.c
+++ b/kernel/tsacct.c
@@ -62,6 +62,10 @@ void bacct_add_tsk(struct taskstats *stats, struct task_struct *tsk)
rcu_read_unlock();
stats->ac_utime = cputime_to_msecs(tsk->utime) * USEC_PER_MSEC;
stats->ac_stime = cputime_to_msecs(tsk->stime) * USEC_PER_MSEC;
+ stats->ac_utimescaled =
+ cputime_to_msecs(tsk->utimescaled) * USEC_PER_MSEC;
+ stats->ac_stimescaled =
+ cputime_to_msecs(tsk->stimescaled) * USEC_PER_MSEC;
stats->ac_minflt = tsk->min_flt;
stats->ac_majflt = tsk->maj_flt;
diff --git a/kernel/user.c b/kernel/user.c
index 9ca2848fc356..e91331c457e2 100644
--- a/kernel/user.c
+++ b/kernel/user.c
@@ -44,34 +44,36 @@ struct user_struct root_user = {
.processes = ATOMIC_INIT(1),
.files = ATOMIC_INIT(0),
.sigpending = ATOMIC_INIT(0),
- .mq_bytes = 0,
.locked_shm = 0,
#ifdef CONFIG_KEYS
.uid_keyring = &root_user_keyring,
.session_keyring = &root_session_keyring,
#endif
+#ifdef CONFIG_FAIR_USER_SCHED
+ .tg = &init_task_group,
+#endif
};
/*
* These routines must be called with the uidhash spinlock held!
*/
-static inline void uid_hash_insert(struct user_struct *up, struct hlist_head *hashent)
+static void uid_hash_insert(struct user_struct *up, struct hlist_head *hashent)
{
hlist_add_head(&up->uidhash_node, hashent);
}
-static inline void uid_hash_remove(struct user_struct *up)
+static void uid_hash_remove(struct user_struct *up)
{
hlist_del_init(&up->uidhash_node);
}
-static inline struct user_struct *uid_hash_find(uid_t uid, struct hlist_head *hashent)
+static struct user_struct *uid_hash_find(uid_t uid, struct hlist_head *hashent)
{
struct user_struct *user;
struct hlist_node *h;
hlist_for_each_entry(user, h, hashent, uidhash_node) {
- if(user->uid == uid) {
+ if (user->uid == uid) {
atomic_inc(&user->__count);
return user;
}
@@ -80,6 +82,210 @@ static inline struct user_struct *uid_hash_find(uid_t uid, struct hlist_head *ha
return NULL;
}
+#ifdef CONFIG_FAIR_USER_SCHED
+
+static void sched_destroy_user(struct user_struct *up)
+{
+ sched_destroy_group(up->tg);
+}
+
+static int sched_create_user(struct user_struct *up)
+{
+ int rc = 0;
+
+ up->tg = sched_create_group();
+ if (IS_ERR(up->tg))
+ rc = -ENOMEM;
+
+ return rc;
+}
+
+static void sched_switch_user(struct task_struct *p)
+{
+ sched_move_task(p);
+}
+
+#else /* CONFIG_FAIR_USER_SCHED */
+
+static void sched_destroy_user(struct user_struct *up) { }
+static int sched_create_user(struct user_struct *up) { return 0; }
+static void sched_switch_user(struct task_struct *p) { }
+
+#endif /* CONFIG_FAIR_USER_SCHED */
+
+#if defined(CONFIG_FAIR_USER_SCHED) && defined(CONFIG_SYSFS)
+
+static struct kobject uids_kobject; /* represents /sys/kernel/uids directory */
+static DEFINE_MUTEX(uids_mutex);
+
+static inline void uids_mutex_lock(void)
+{
+ mutex_lock(&uids_mutex);
+}
+
+static inline void uids_mutex_unlock(void)
+{
+ mutex_unlock(&uids_mutex);
+}
+
+/* return cpu shares held by the user */
+ssize_t cpu_shares_show(struct kset *kset, char *buffer)
+{
+ struct user_struct *up = container_of(kset, struct user_struct, kset);
+
+ return sprintf(buffer, "%lu\n", sched_group_shares(up->tg));
+}
+
+/* modify cpu shares held by the user */
+ssize_t cpu_shares_store(struct kset *kset, const char *buffer, size_t size)
+{
+ struct user_struct *up = container_of(kset, struct user_struct, kset);
+ unsigned long shares;
+ int rc;
+
+ sscanf(buffer, "%lu", &shares);
+
+ rc = sched_group_set_shares(up->tg, shares);
+
+ return (rc ? rc : size);
+}
+
+static void user_attr_init(struct subsys_attribute *sa, char *name, int mode)
+{
+ sa->attr.name = name;
+ sa->attr.mode = mode;
+ sa->show = cpu_shares_show;
+ sa->store = cpu_shares_store;
+}
+
+/* Create "/sys/kernel/uids/<uid>" directory and
+ * "/sys/kernel/uids/<uid>/cpu_share" file for this user.
+ */
+static int user_kobject_create(struct user_struct *up)
+{
+ struct kset *kset = &up->kset;
+ struct kobject *kobj = &kset->kobj;
+ int error;
+
+ memset(kset, 0, sizeof(struct kset));
+ kobj->parent = &uids_kobject; /* create under /sys/kernel/uids dir */
+ kobject_set_name(kobj, "%d", up->uid);
+ kset_init(kset);
+ user_attr_init(&up->user_attr, "cpu_share", 0644);
+
+ error = kobject_add(kobj);
+ if (error)
+ goto done;
+
+ error = sysfs_create_file(kobj, &up->user_attr.attr);
+ if (error)
+ kobject_del(kobj);
+
+ kobject_uevent(kobj, KOBJ_ADD);
+
+done:
+ return error;
+}
+
+/* create these in sysfs filesystem:
+ * "/sys/kernel/uids" directory
+ * "/sys/kernel/uids/0" directory (for root user)
+ * "/sys/kernel/uids/0/cpu_share" file (for root user)
+ */
+int __init uids_kobject_init(void)
+{
+ int error;
+
+ /* create under /sys/kernel dir */
+ uids_kobject.parent = &kernel_subsys.kobj;
+ uids_kobject.kset = &kernel_subsys;
+ kobject_set_name(&uids_kobject, "uids");
+ kobject_init(&uids_kobject);
+
+ error = kobject_add(&uids_kobject);
+ if (!error)
+ error = user_kobject_create(&root_user);
+
+ return error;
+}
+
+/* work function to remove sysfs directory for a user and free up
+ * corresponding structures.
+ */
+static void remove_user_sysfs_dir(struct work_struct *w)
+{
+ struct user_struct *up = container_of(w, struct user_struct, work);
+ struct kobject *kobj = &up->kset.kobj;
+ unsigned long flags;
+ int remove_user = 0;
+
+ /* Make uid_hash_remove() + sysfs_remove_file() + kobject_del()
+ * atomic.
+ */
+ uids_mutex_lock();
+
+ local_irq_save(flags);
+
+ if (atomic_dec_and_lock(&up->__count, &uidhash_lock)) {
+ uid_hash_remove(up);
+ remove_user = 1;
+ spin_unlock_irqrestore(&uidhash_lock, flags);
+ } else {
+ local_irq_restore(flags);
+ }
+
+ if (!remove_user)
+ goto done;
+
+ sysfs_remove_file(kobj, &up->user_attr.attr);
+ kobject_uevent(kobj, KOBJ_REMOVE);
+ kobject_del(kobj);
+
+ sched_destroy_user(up);
+ key_put(up->uid_keyring);
+ key_put(up->session_keyring);
+ kmem_cache_free(uid_cachep, up);
+
+done:
+ uids_mutex_unlock();
+}
+
+/* IRQs are disabled and uidhash_lock is held upon function entry.
+ * IRQ state (as stored in flags) is restored and uidhash_lock released
+ * upon function exit.
+ */
+static inline void free_user(struct user_struct *up, unsigned long flags)
+{
+ /* restore back the count */
+ atomic_inc(&up->__count);
+ spin_unlock_irqrestore(&uidhash_lock, flags);
+
+ INIT_WORK(&up->work, remove_user_sysfs_dir);
+ schedule_work(&up->work);
+}
+
+#else /* CONFIG_FAIR_USER_SCHED && CONFIG_SYSFS */
+
+static inline int user_kobject_create(struct user_struct *up) { return 0; }
+static inline void uids_mutex_lock(void) { }
+static inline void uids_mutex_unlock(void) { }
+
+/* IRQs are disabled and uidhash_lock is held upon function entry.
+ * IRQ state (as stored in flags) is restored and uidhash_lock released
+ * upon function exit.
+ */
+static inline void free_user(struct user_struct *up, unsigned long flags)
+{
+ uid_hash_remove(up);
+ spin_unlock_irqrestore(&uidhash_lock, flags);
+ sched_destroy_user(up);
+ key_put(up->uid_keyring);
+ key_put(up->session_keyring);
+ kmem_cache_free(uid_cachep, up);
+}
+
+#endif
+
/*
* Locate the user_struct for the passed UID. If found, take a ref on it. The
* caller must undo that ref with free_uid().
@@ -106,15 +312,10 @@ void free_uid(struct user_struct *up)
return;
local_irq_save(flags);
- if (atomic_dec_and_lock(&up->__count, &uidhash_lock)) {
- uid_hash_remove(up);
- spin_unlock_irqrestore(&uidhash_lock, flags);
- key_put(up->uid_keyring);
- key_put(up->session_keyring);
- kmem_cache_free(uid_cachep, up);
- } else {
+ if (atomic_dec_and_lock(&up->__count, &uidhash_lock))
+ free_user(up, flags);
+ else
local_irq_restore(flags);
- }
}
struct user_struct * alloc_uid(struct user_namespace *ns, uid_t uid)
@@ -122,6 +323,11 @@ struct user_struct * alloc_uid(struct user_namespace *ns, uid_t uid)
struct hlist_head *hashent = uidhashentry(ns, uid);
struct user_struct *up;
+ /* Make uid_hash_find() + user_kobject_create() + uid_hash_insert()
+ * atomic.
+ */
+ uids_mutex_lock();
+
spin_lock_irq(&uidhash_lock);
up = uid_hash_find(uid, hashent);
spin_unlock_irq(&uidhash_lock);
@@ -141,8 +347,9 @@ struct user_struct * alloc_uid(struct user_namespace *ns, uid_t uid)
atomic_set(&new->inotify_watches, 0);
atomic_set(&new->inotify_devs, 0);
#endif
-
+#ifdef CONFIG_POSIX_MQUEUE
new->mq_bytes = 0;
+#endif
new->locked_shm = 0;
if (alloc_uid_keyring(new, current) < 0) {
@@ -150,6 +357,22 @@ struct user_struct * alloc_uid(struct user_namespace *ns, uid_t uid)
return NULL;
}
+ if (sched_create_user(new) < 0) {
+ key_put(new->uid_keyring);
+ key_put(new->session_keyring);
+ kmem_cache_free(uid_cachep, new);
+ return NULL;
+ }
+
+ if (user_kobject_create(new)) {
+ sched_destroy_user(new);
+ key_put(new->uid_keyring);
+ key_put(new->session_keyring);
+ kmem_cache_free(uid_cachep, new);
+ uids_mutex_unlock();
+ return NULL;
+ }
+
/*
* Before adding this, check whether we raced
* on adding the same user already..
@@ -157,6 +380,11 @@ struct user_struct * alloc_uid(struct user_namespace *ns, uid_t uid)
spin_lock_irq(&uidhash_lock);
up = uid_hash_find(uid, hashent);
if (up) {
+ /* This case is not possible when CONFIG_FAIR_USER_SCHED
+ * is defined, since we serialize alloc_uid() using
+ * uids_mutex. Hence no need to call
+ * sched_destroy_user() or remove_user_sysfs_dir().
+ */
key_put(new->uid_keyring);
key_put(new->session_keyring);
kmem_cache_free(uid_cachep, new);
@@ -167,6 +395,9 @@ struct user_struct * alloc_uid(struct user_namespace *ns, uid_t uid)
spin_unlock_irq(&uidhash_lock);
}
+
+ uids_mutex_unlock();
+
return up;
}
@@ -184,6 +415,7 @@ void switch_uid(struct user_struct *new_user)
atomic_dec(&old_user->processes);
switch_uid_keyring(new_user);
current->user = new_user;
+ sched_switch_user(current);
/*
* We need to synchronize with __sigqueue_alloc()
diff --git a/kernel/workqueue.c b/kernel/workqueue.c
index e080d1d744cc..52d5e7c9a8e6 100644
--- a/kernel/workqueue.c
+++ b/kernel/workqueue.c
@@ -32,6 +32,7 @@
#include <linux/freezer.h>
#include <linux/kallsyms.h>
#include <linux/debug_locks.h>
+#include <linux/lockdep.h>
/*
* The per-CPU workqueue (if single thread, we always use the first
@@ -61,6 +62,9 @@ struct workqueue_struct {
const char *name;
int singlethread;
int freezeable; /* Freeze threads during suspend */
+#ifdef CONFIG_LOCKDEP
+ struct lockdep_map lockdep_map;
+#endif
};
/* All the per-cpu workqueues on the system, for hotplug cpu to add/remove
@@ -250,6 +254,17 @@ static void run_workqueue(struct cpu_workqueue_struct *cwq)
struct work_struct *work = list_entry(cwq->worklist.next,
struct work_struct, entry);
work_func_t f = work->func;
+#ifdef CONFIG_LOCKDEP
+ /*
+ * It is permissible to free the struct work_struct
+ * from inside the function that is called from it,
+ * this we need to take into account for lockdep too.
+ * To avoid bogus "held lock freed" warnings as well
+ * as problems when looking into work->lockdep_map,
+ * make a copy and use that here.
+ */
+ struct lockdep_map lockdep_map = work->lockdep_map;
+#endif
cwq->current_work = work;
list_del_init(cwq->worklist.next);
@@ -257,13 +272,17 @@ static void run_workqueue(struct cpu_workqueue_struct *cwq)
BUG_ON(get_wq_data(work) != cwq);
work_clear_pending(work);
+ lock_acquire(&cwq->wq->lockdep_map, 0, 0, 0, 2, _THIS_IP_);
+ lock_acquire(&lockdep_map, 0, 0, 0, 2, _THIS_IP_);
f(work);
+ lock_release(&lockdep_map, 1, _THIS_IP_);
+ lock_release(&cwq->wq->lockdep_map, 1, _THIS_IP_);
if (unlikely(in_atomic() || lockdep_depth(current) > 0)) {
printk(KERN_ERR "BUG: workqueue leaked lock or atomic: "
"%s/0x%08x/%d\n",
current->comm, preempt_count(),
- current->pid);
+ task_pid_nr(current));
printk(KERN_ERR " last function: ");
print_symbol("%s\n", (unsigned long)f);
debug_show_held_locks(current);
@@ -376,6 +395,8 @@ void fastcall flush_workqueue(struct workqueue_struct *wq)
int cpu;
might_sleep();
+ lock_acquire(&wq->lockdep_map, 0, 0, 0, 2, _THIS_IP_);
+ lock_release(&wq->lockdep_map, 1, _THIS_IP_);
for_each_cpu_mask(cpu, *cpu_map)
flush_cpu_workqueue(per_cpu_ptr(wq->cpu_wq, cpu));
}
@@ -446,6 +467,9 @@ static void wait_on_work(struct work_struct *work)
might_sleep();
+ lock_acquire(&work->lockdep_map, 0, 0, 0, 2, _THIS_IP_);
+ lock_release(&work->lockdep_map, 1, _THIS_IP_);
+
cwq = get_wq_data(work);
if (!cwq)
return;
@@ -695,8 +719,10 @@ static void start_workqueue_thread(struct cpu_workqueue_struct *cwq, int cpu)
}
}
-struct workqueue_struct *__create_workqueue(const char *name,
- int singlethread, int freezeable)
+struct workqueue_struct *__create_workqueue_key(const char *name,
+ int singlethread,
+ int freezeable,
+ struct lock_class_key *key)
{
struct workqueue_struct *wq;
struct cpu_workqueue_struct *cwq;
@@ -713,6 +739,7 @@ struct workqueue_struct *__create_workqueue(const char *name,
}
wq->name = name;
+ lockdep_init_map(&wq->lockdep_map, name, key, 0);
wq->singlethread = singlethread;
wq->freezeable = freezeable;
INIT_LIST_HEAD(&wq->list);
@@ -741,7 +768,7 @@ struct workqueue_struct *__create_workqueue(const char *name,
}
return wq;
}
-EXPORT_SYMBOL_GPL(__create_workqueue);
+EXPORT_SYMBOL_GPL(__create_workqueue_key);
static void cleanup_workqueue_thread(struct cpu_workqueue_struct *cwq, int cpu)
{
@@ -752,6 +779,9 @@ static void cleanup_workqueue_thread(struct cpu_workqueue_struct *cwq, int cpu)
if (cwq->thread == NULL)
return;
+ lock_acquire(&cwq->wq->lockdep_map, 0, 0, 0, 2, _THIS_IP_);
+ lock_release(&cwq->wq->lockdep_map, 1, _THIS_IP_);
+
flush_cpu_workqueue(cwq);
/*
* If the caller is CPU_DEAD and cwq->worklist was not empty,