summaryrefslogtreecommitdiffstats
path: root/kernel
diff options
context:
space:
mode:
authorJames Morris <jmorris@namei.org>2009-05-08 17:56:47 +1000
committerJames Morris <jmorris@namei.org>2009-05-08 17:56:47 +1000
commitd254117099d711f215e62427f55dfb8ebd5ad011 (patch)
tree0848ff8dd74314fec14a86497f8d288c86ba7c65 /kernel
parent07ff7a0b187f3951788f64ae1f30e8109bc8e9eb (diff)
parent8c9ed899b44c19e81859fbb0e9d659fe2f8630fc (diff)
downloadlinux-d254117099d711f215e62427f55dfb8ebd5ad011.tar.bz2
Merge branch 'master' into next
Diffstat (limited to 'kernel')
-rw-r--r--kernel/Makefile2
-rw-r--r--kernel/audit.c9
-rw-r--r--kernel/audit_tree.c5
-rw-r--r--kernel/auditfilter.c20
-rw-r--r--kernel/auditsc.c34
-rw-r--r--kernel/cgroup.c430
-rw-r--r--kernel/cgroup_debug.c2
-rw-r--r--kernel/cpu.c6
-rw-r--r--kernel/cpuset.c254
-rw-r--r--kernel/exec_domain.c23
-rw-r--r--kernel/exit.c250
-rw-r--r--kernel/extable.c48
-rw-r--r--kernel/fork.c93
-rw-r--r--kernel/futex.c7
-rw-r--r--kernel/hrtimer.c55
-rw-r--r--kernel/hung_task.c217
-rw-r--r--kernel/irq/Makefile1
-rw-r--r--kernel/irq/devres.c16
-rw-r--r--kernel/irq/handle.c58
-rw-r--r--kernel/irq/internals.h2
-rw-r--r--kernel/irq/manage.c225
-rw-r--r--kernel/irq/numa_migrate.c1
-rw-r--r--kernel/irq/pm.c79
-rw-r--r--kernel/kallsyms.c19
-rw-r--r--kernel/kexec.c22
-rw-r--r--kernel/kmod.c12
-rw-r--r--kernel/kprobes.c298
-rw-r--r--kernel/kthread.c28
-rw-r--r--kernel/lockdep.c582
-rw-r--r--kernel/lockdep_internals.h45
-rw-r--r--kernel/lockdep_proc.c22
-rw-r--r--kernel/lockdep_states.h9
-rw-r--r--kernel/module.c274
-rw-r--r--kernel/mutex-debug.c9
-rw-r--r--kernel/mutex-debug.h18
-rw-r--r--kernel/mutex.c122
-rw-r--r--kernel/mutex.h22
-rw-r--r--kernel/ns_cgroup.c14
-rw-r--r--kernel/panic.c136
-rw-r--r--kernel/params.c26
-rw-r--r--kernel/pid.c33
-rw-r--r--kernel/pid_namespace.c15
-rw-r--r--kernel/posix-cpu-timers.c17
-rw-r--r--kernel/power/disk.c188
-rw-r--r--kernel/power/main.c67
-rw-r--r--kernel/power/snapshot.c9
-rw-r--r--kernel/power/swap.c2
-rw-r--r--kernel/power/swsusp.c18
-rw-r--r--kernel/power/user.c9
-rw-r--r--kernel/printk.c26
-rw-r--r--kernel/ptrace.c130
-rw-r--r--kernel/rcuclassic.c23
-rw-r--r--kernel/rcupdate.c42
-rw-r--r--kernel/rcupreempt.c48
-rw-r--r--kernel/rcutorture.c25
-rw-r--r--kernel/rcutree.c39
-rw-r--r--kernel/rcutree.h10
-rw-r--r--kernel/rcutree_trace.c16
-rw-r--r--kernel/relay.c12
-rw-r--r--kernel/resource.c46
-rw-r--r--kernel/sched.c340
-rw-r--r--kernel/sched_clock.c12
-rw-r--r--kernel/sched_cpupri.c5
-rw-r--r--kernel/sched_cpupri.h2
-rw-r--r--kernel/sched_features.h1
-rw-r--r--kernel/sched_rt.c15
-rw-r--r--kernel/signal.c63
-rw-r--r--kernel/slow-work.c640
-rw-r--r--kernel/smp.c432
-rw-r--r--kernel/softirq.c40
-rw-r--r--kernel/softlockup.c100
-rw-r--r--kernel/spinlock.c18
-rw-r--r--kernel/stop_machine.c2
-rw-r--r--kernel/sys.c29
-rw-r--r--kernel/sysctl.c83
-rw-r--r--kernel/time/clocksource.c8
-rw-r--r--kernel/time/jiffies.c2
-rw-r--r--kernel/time/tick-common.c12
-rw-r--r--kernel/time/timekeeping.c12
-rw-r--r--kernel/timer.c75
-rw-r--r--kernel/trace/Kconfig134
-rw-r--r--kernel/trace/Makefile13
-rw-r--r--kernel/trace/blktrace.c1550
-rw-r--r--kernel/trace/events.c14
-rw-r--r--kernel/trace/ftrace.c1133
-rw-r--r--kernel/trace/kmemtrace.c464
-rw-r--r--kernel/trace/ring_buffer.c693
-rw-r--r--kernel/trace/trace.c3060
-rw-r--r--kernel/trace/trace.h321
-rw-r--r--kernel/trace/trace_boot.c36
-rw-r--r--kernel/trace/trace_branch.c286
-rw-r--r--kernel/trace/trace_clock.c109
-rw-r--r--kernel/trace/trace_event_profile.c31
-rw-r--r--kernel/trace/trace_event_types.h173
-rw-r--r--kernel/trace/trace_events.c828
-rw-r--r--kernel/trace/trace_events_filter.c433
-rw-r--r--kernel/trace/trace_events_stage_1.h39
-rw-r--r--kernel/trace/trace_events_stage_2.h176
-rw-r--r--kernel/trace/trace_events_stage_3.h281
-rw-r--r--kernel/trace/trace_export.c102
-rw-r--r--kernel/trace/trace_functions.c369
-rw-r--r--kernel/trace/trace_functions_graph.c635
-rw-r--r--kernel/trace/trace_hw_branches.c185
-rw-r--r--kernel/trace/trace_irqsoff.c54
-rw-r--r--kernel/trace/trace_mmiotrace.c45
-rw-r--r--kernel/trace/trace_nop.c6
-rw-r--r--kernel/trace/trace_output.c1017
-rw-r--r--kernel/trace/trace_output.h71
-rw-r--r--kernel/trace/trace_power.c201
-rw-r--r--kernel/trace/trace_printk.c270
-rw-r--r--kernel/trace/trace_sched_switch.c27
-rw-r--r--kernel/trace/trace_sched_wakeup.c102
-rw-r--r--kernel/trace/trace_selftest.c169
-rw-r--r--kernel/trace/trace_stack.c19
-rw-r--r--kernel/trace/trace_stat.c326
-rw-r--r--kernel/trace/trace_stat.h31
-rw-r--r--kernel/trace/trace_syscalls.c250
-rw-r--r--kernel/trace/trace_sysprof.c23
-rw-r--r--kernel/trace/trace_workqueue.c288
-rw-r--r--kernel/tracepoint.c7
-rw-r--r--kernel/utsname_sysctl.c2
-rw-r--r--kernel/workqueue.c99
122 files changed, 15841 insertions, 4392 deletions
diff --git a/kernel/Makefile b/kernel/Makefile
index e4791b3ba55d..42423665660a 100644
--- a/kernel/Makefile
+++ b/kernel/Makefile
@@ -74,6 +74,7 @@ obj-$(CONFIG_AUDIT_TREE) += audit_tree.o
obj-$(CONFIG_KPROBES) += kprobes.o
obj-$(CONFIG_KGDB) += kgdb.o
obj-$(CONFIG_DETECT_SOFTLOCKUP) += softlockup.o
+obj-$(CONFIG_DETECT_HUNG_TASK) += hung_task.o
obj-$(CONFIG_GENERIC_HARDIRQS) += irq/
obj-$(CONFIG_SECCOMP) += seccomp.o
obj-$(CONFIG_RCU_TORTURE_TEST) += rcutorture.o
@@ -93,6 +94,7 @@ obj-$(CONFIG_HAVE_GENERIC_DMA_COHERENT) += dma-coherent.o
obj-$(CONFIG_FUNCTION_TRACER) += trace/
obj-$(CONFIG_TRACING) += trace/
obj-$(CONFIG_SMP) += sched_cpupri.o
+obj-$(CONFIG_SLOW_WORK) += slow-work.o
ifneq ($(CONFIG_SCHED_OMIT_FRAME_POINTER),y)
# According to Alan Modra <alan@linuxcare.com.au>, the -fno-omit-frame-pointer is
diff --git a/kernel/audit.c b/kernel/audit.c
index ce6d8ea3131e..9442c3533ba9 100644
--- a/kernel/audit.c
+++ b/kernel/audit.c
@@ -766,6 +766,9 @@ static int audit_receive_msg(struct sk_buff *skb, struct nlmsghdr *nlh)
audit_log_format(ab, " msg=");
size = nlmsg_len(nlh);
+ if (size > 0 &&
+ ((unsigned char *)data)[size - 1] == '\0')
+ size--;
audit_log_n_untrustedstring(ab, data, size);
}
audit_set_pid(ab, pid);
@@ -1382,7 +1385,7 @@ void audit_log_n_string(struct audit_buffer *ab, const char *string,
int audit_string_contains_control(const char *string, size_t len)
{
const unsigned char *p;
- for (p = string; p < (const unsigned char *)string + len && *p; p++) {
+ for (p = string; p < (const unsigned char *)string + len; p++) {
if (*p == '"' || *p < 0x21 || *p > 0x7e)
return 1;
}
@@ -1437,13 +1440,13 @@ void audit_log_d_path(struct audit_buffer *ab, const char *prefix,
/* We will allow 11 spaces for ' (deleted)' to be appended */
pathname = kmalloc(PATH_MAX+11, ab->gfp_mask);
if (!pathname) {
- audit_log_format(ab, "<no memory>");
+ audit_log_string(ab, "<no_memory>");
return;
}
p = d_path(path, pathname, PATH_MAX+11);
if (IS_ERR(p)) { /* Should never happen since we send PATH_MAX */
/* FIXME: can we save some information here? */
- audit_log_format(ab, "<too long>");
+ audit_log_string(ab, "<too_long>");
} else
audit_log_untrustedstring(ab, p);
kfree(pathname);
diff --git a/kernel/audit_tree.c b/kernel/audit_tree.c
index 8ad9545b8db9..6e7351739a82 100644
--- a/kernel/audit_tree.c
+++ b/kernel/audit_tree.c
@@ -385,6 +385,7 @@ static int tag_chunk(struct inode *inode, struct audit_tree *tree)
mutex_lock(&inode->inotify_mutex);
if (inotify_clone_watch(&old->watch, &chunk->watch) < 0) {
mutex_unlock(&inode->inotify_mutex);
+ put_inotify_watch(&old->watch);
free_chunk(chunk);
return -ENOSPC;
}
@@ -394,6 +395,7 @@ static int tag_chunk(struct inode *inode, struct audit_tree *tree)
chunk->dead = 1;
inotify_evict_watch(&chunk->watch);
mutex_unlock(&inode->inotify_mutex);
+ put_inotify_watch(&old->watch);
put_inotify_watch(&chunk->watch);
return 0;
}
@@ -732,9 +734,6 @@ int audit_tag_tree(char *old, char *new)
dentry = dget(path.dentry);
path_put(&path);
- if (dentry == tagged->mnt_root && dentry == mnt->mnt_root)
- follow_up(&mnt, &dentry);
-
list_add_tail(&list, &tagged->mnt_list);
mutex_lock(&audit_filter_mutex);
diff --git a/kernel/auditfilter.c b/kernel/auditfilter.c
index fbf24d121d97..713098ee5a02 100644
--- a/kernel/auditfilter.c
+++ b/kernel/auditfilter.c
@@ -135,18 +135,18 @@ static void audit_remove_watch(struct audit_watch *watch)
static inline void audit_free_rule(struct audit_entry *e)
{
int i;
-
+ struct audit_krule *erule = &e->rule;
/* some rules don't have associated watches */
- if (e->rule.watch)
- audit_put_watch(e->rule.watch);
- if (e->rule.fields)
- for (i = 0; i < e->rule.field_count; i++) {
- struct audit_field *f = &e->rule.fields[i];
+ if (erule->watch)
+ audit_put_watch(erule->watch);
+ if (erule->fields)
+ for (i = 0; i < erule->field_count; i++) {
+ struct audit_field *f = &erule->fields[i];
kfree(f->lsm_str);
security_audit_rule_free(f->lsm_rule);
}
- kfree(e->rule.fields);
- kfree(e->rule.filterkey);
+ kfree(erule->fields);
+ kfree(erule->filterkey);
kfree(e);
}
@@ -1028,7 +1028,7 @@ static void audit_update_watch(struct audit_parent *parent,
if (audit_enabled) {
struct audit_buffer *ab;
- ab = audit_log_start(NULL, GFP_KERNEL,
+ ab = audit_log_start(NULL, GFP_NOFS,
AUDIT_CONFIG_CHANGE);
audit_log_format(ab, "auid=%u ses=%u",
audit_get_loginuid(current),
@@ -1067,7 +1067,7 @@ static void audit_remove_parent_watches(struct audit_parent *parent)
e = container_of(r, struct audit_entry, rule);
if (audit_enabled) {
struct audit_buffer *ab;
- ab = audit_log_start(NULL, GFP_KERNEL,
+ ab = audit_log_start(NULL, GFP_NOFS,
AUDIT_CONFIG_CHANGE);
audit_log_format(ab, "auid=%u ses=%u",
audit_get_loginuid(current),
diff --git a/kernel/auditsc.c b/kernel/auditsc.c
index 8cbddff6c283..7d6ac7c1f414 100644
--- a/kernel/auditsc.c
+++ b/kernel/auditsc.c
@@ -66,6 +66,7 @@
#include <linux/syscalls.h>
#include <linux/inotify.h>
#include <linux/capability.h>
+#include <linux/fs_struct.h>
#include "audit.h"
@@ -328,6 +329,14 @@ static int audit_match_filetype(struct audit_context *ctx, int which)
*/
#ifdef CONFIG_AUDIT_TREE
+static void audit_set_auditable(struct audit_context *ctx)
+{
+ if (!ctx->prio) {
+ ctx->prio = 1;
+ ctx->current_state = AUDIT_RECORD_CONTEXT;
+ }
+}
+
static int put_tree_ref(struct audit_context *ctx, struct audit_chunk *chunk)
{
struct audit_tree_refs *p = ctx->trees;
@@ -741,17 +750,9 @@ void audit_filter_inodes(struct task_struct *tsk, struct audit_context *ctx)
rcu_read_unlock();
}
-static void audit_set_auditable(struct audit_context *ctx)
-{
- if (!ctx->prio) {
- ctx->prio = 1;
- ctx->current_state = AUDIT_RECORD_CONTEXT;
- }
-}
-
static inline struct audit_context *audit_get_context(struct task_struct *tsk,
int return_valid,
- int return_code)
+ long return_code)
{
struct audit_context *context = tsk->audit_context;
@@ -1023,7 +1024,7 @@ static int audit_log_single_execve_arg(struct audit_context *context,
{
char arg_num_len_buf[12];
const char __user *tmp_p = p;
- /* how many digits are in arg_num? 3 is the length of a=\n */
+ /* how many digits are in arg_num? 3 is the length of " a=" */
size_t arg_num_len = snprintf(arg_num_len_buf, 12, "%d", arg_num) + 3;
size_t len, len_left, to_send;
size_t max_execve_audit_len = MAX_EXECVE_AUDIT_LEN;
@@ -1109,7 +1110,7 @@ static int audit_log_single_execve_arg(struct audit_context *context,
* so we can be sure nothing was lost.
*/
if ((i == 0) && (too_long))
- audit_log_format(*ab, "a%d_len=%zu ", arg_num,
+ audit_log_format(*ab, " a%d_len=%zu", arg_num,
has_cntl ? 2*len : len);
/*
@@ -1129,7 +1130,7 @@ static int audit_log_single_execve_arg(struct audit_context *context,
buf[to_send] = '\0';
/* actually log it */
- audit_log_format(*ab, "a%d", arg_num);
+ audit_log_format(*ab, " a%d", arg_num);
if (too_long)
audit_log_format(*ab, "[%d]", i);
audit_log_format(*ab, "=");
@@ -1137,7 +1138,6 @@ static int audit_log_single_execve_arg(struct audit_context *context,
audit_log_n_hex(*ab, buf, to_send);
else
audit_log_format(*ab, "\"%s\"", buf);
- audit_log_format(*ab, "\n");
p += to_send;
len_left -= to_send;
@@ -1165,7 +1165,7 @@ static void audit_log_execve_info(struct audit_context *context,
p = (const char __user *)axi->mm->arg_start;
- audit_log_format(*ab, "argc=%d ", axi->argc);
+ audit_log_format(*ab, "argc=%d", axi->argc);
/*
* we need some kernel buffer to hold the userspace args. Just
@@ -1478,7 +1478,7 @@ static void audit_log_exit(struct audit_context *context, struct task_struct *ts
case 0:
/* name was specified as a relative path and the
* directory component is the cwd */
- audit_log_d_path(ab, " name=", &context->pwd);
+ audit_log_d_path(ab, "name=", &context->pwd);
break;
default:
/* log the name's directory component */
@@ -2149,7 +2149,7 @@ int audit_set_loginuid(struct task_struct *task, uid_t loginuid)
* __audit_mq_open - record audit data for a POSIX MQ open
* @oflag: open flag
* @mode: mode bits
- * @u_attr: queue attributes
+ * @attr: queue attributes
*
*/
void __audit_mq_open(int oflag, mode_t mode, struct mq_attr *attr)
@@ -2196,7 +2196,7 @@ void __audit_mq_sendrecv(mqd_t mqdes, size_t msg_len, unsigned int msg_prio,
/**
* __audit_mq_notify - record audit data for a POSIX MQ notify
* @mqdes: MQ descriptor
- * @u_notification: Notification event
+ * @notification: Notification event
*
*/
diff --git a/kernel/cgroup.c b/kernel/cgroup.c
index c500ca7239b2..382109b5baeb 100644
--- a/kernel/cgroup.c
+++ b/kernel/cgroup.c
@@ -94,7 +94,6 @@ struct cgroupfs_root {
char release_agent_path[PATH_MAX];
};
-
/*
* The "rootnode" hierarchy is the "dummy hierarchy", reserved for the
* subsystems that are otherwise unattached - it never has more than a
@@ -102,6 +101,39 @@ struct cgroupfs_root {
*/
static struct cgroupfs_root rootnode;
+/*
+ * CSS ID -- ID per subsys's Cgroup Subsys State(CSS). used only when
+ * cgroup_subsys->use_id != 0.
+ */
+#define CSS_ID_MAX (65535)
+struct css_id {
+ /*
+ * The css to which this ID points. This pointer is set to valid value
+ * after cgroup is populated. If cgroup is removed, this will be NULL.
+ * This pointer is expected to be RCU-safe because destroy()
+ * is called after synchronize_rcu(). But for safe use, css_is_removed()
+ * css_tryget() should be used for avoiding race.
+ */
+ struct cgroup_subsys_state *css;
+ /*
+ * ID of this css.
+ */
+ unsigned short id;
+ /*
+ * Depth in hierarchy which this ID belongs to.
+ */
+ unsigned short depth;
+ /*
+ * ID is freed by RCU. (and lookup routine is RCU safe.)
+ */
+ struct rcu_head rcu_head;
+ /*
+ * Hierarchy of CSS ID belongs to.
+ */
+ unsigned short stack[0]; /* Array of Length (depth+1) */
+};
+
+
/* The list of hierarchy roots */
static LIST_HEAD(roots);
@@ -185,6 +217,8 @@ struct cg_cgroup_link {
static struct css_set init_css_set;
static struct cg_cgroup_link init_css_set_link;
+static int cgroup_subsys_init_idr(struct cgroup_subsys *ss);
+
/* css_set_lock protects the list of css_set objects, and the
* chain of tasks off each css_set. Nests outside task->alloc_lock
* due to cgroup_iter_start() */
@@ -567,6 +601,9 @@ static struct backing_dev_info cgroup_backing_dev_info = {
.capabilities = BDI_CAP_NO_ACCT_AND_WRITEBACK,
};
+static int alloc_css_id(struct cgroup_subsys *ss,
+ struct cgroup *parent, struct cgroup *child);
+
static struct inode *cgroup_new_inode(mode_t mode, struct super_block *sb)
{
struct inode *inode = new_inode(sb);
@@ -585,13 +622,18 @@ static struct inode *cgroup_new_inode(mode_t mode, struct super_block *sb)
* Call subsys's pre_destroy handler.
* This is called before css refcnt check.
*/
-static void cgroup_call_pre_destroy(struct cgroup *cgrp)
+static int cgroup_call_pre_destroy(struct cgroup *cgrp)
{
struct cgroup_subsys *ss;
+ int ret = 0;
+
for_each_subsys(cgrp->root, ss)
- if (ss->pre_destroy)
- ss->pre_destroy(ss, cgrp);
- return;
+ if (ss->pre_destroy) {
+ ret = ss->pre_destroy(ss, cgrp);
+ if (ret)
+ break;
+ }
+ return ret;
}
static void free_cgroup_rcu(struct rcu_head *obj)
@@ -685,6 +727,22 @@ static void cgroup_d_remove_dir(struct dentry *dentry)
remove_dir(dentry);
}
+/*
+ * A queue for waiters to do rmdir() cgroup. A tasks will sleep when
+ * cgroup->count == 0 && list_empty(&cgroup->children) && subsys has some
+ * reference to css->refcnt. In general, this refcnt is expected to goes down
+ * to zero, soon.
+ *
+ * CGRP_WAIT_ON_RMDIR flag is modified under cgroup's inode->i_mutex;
+ */
+DECLARE_WAIT_QUEUE_HEAD(cgroup_rmdir_waitq);
+
+static void cgroup_wakeup_rmdir_waiters(const struct cgroup *cgrp)
+{
+ if (unlikely(test_bit(CGRP_WAIT_ON_RMDIR, &cgrp->flags)))
+ wake_up_all(&cgroup_rmdir_waitq);
+}
+
static int rebind_subsystems(struct cgroupfs_root *root,
unsigned long final_bits)
{
@@ -857,16 +915,16 @@ static int cgroup_remount(struct super_block *sb, int *flags, char *data)
}
ret = rebind_subsystems(root, opts.subsys_bits);
+ if (ret)
+ goto out_unlock;
/* (re)populate subsystem files */
- if (!ret)
- cgroup_populate_dir(cgrp);
+ cgroup_populate_dir(cgrp);
if (opts.release_agent)
strcpy(root->release_agent_path, opts.release_agent);
out_unlock:
- if (opts.release_agent)
- kfree(opts.release_agent);
+ kfree(opts.release_agent);
mutex_unlock(&cgroup_mutex);
mutex_unlock(&cgrp->dentry->d_inode->i_mutex);
return ret;
@@ -969,15 +1027,13 @@ static int cgroup_get_sb(struct file_system_type *fs_type,
/* First find the desired set of subsystems */
ret = parse_cgroupfs_options(data, &opts);
if (ret) {
- if (opts.release_agent)
- kfree(opts.release_agent);
+ kfree(opts.release_agent);
return ret;
}
root = kzalloc(sizeof(*root), GFP_KERNEL);
if (!root) {
- if (opts.release_agent)
- kfree(opts.release_agent);
+ kfree(opts.release_agent);
return -ENOMEM;
}
@@ -1280,6 +1336,12 @@ int cgroup_attach_task(struct cgroup *cgrp, struct task_struct *tsk)
set_bit(CGRP_RELEASABLE, &oldcgrp->flags);
synchronize_rcu();
put_css_set(cg);
+
+ /*
+ * wake up rmdir() waiter. the rmdir should fail since the cgroup
+ * is no longer empty.
+ */
+ cgroup_wakeup_rmdir_waiters(cgrp);
return 0;
}
@@ -1625,7 +1687,7 @@ static struct inode_operations cgroup_dir_inode_operations = {
.rename = cgroup_rename,
};
-static int cgroup_create_file(struct dentry *dentry, int mode,
+static int cgroup_create_file(struct dentry *dentry, mode_t mode,
struct super_block *sb)
{
static const struct dentry_operations cgroup_dops = {
@@ -1671,7 +1733,7 @@ static int cgroup_create_file(struct dentry *dentry, int mode,
* @mode: mode to set on new directory.
*/
static int cgroup_create_dir(struct cgroup *cgrp, struct dentry *dentry,
- int mode)
+ mode_t mode)
{
struct dentry *parent;
int error = 0;
@@ -1689,6 +1751,33 @@ static int cgroup_create_dir(struct cgroup *cgrp, struct dentry *dentry,
return error;
}
+/**
+ * cgroup_file_mode - deduce file mode of a control file
+ * @cft: the control file in question
+ *
+ * returns cft->mode if ->mode is not 0
+ * returns S_IRUGO|S_IWUSR if it has both a read and a write handler
+ * returns S_IRUGO if it has only a read handler
+ * returns S_IWUSR if it has only a write hander
+ */
+static mode_t cgroup_file_mode(const struct cftype *cft)
+{
+ mode_t mode = 0;
+
+ if (cft->mode)
+ return cft->mode;
+
+ if (cft->read || cft->read_u64 || cft->read_s64 ||
+ cft->read_map || cft->read_seq_string)
+ mode |= S_IRUGO;
+
+ if (cft->write || cft->write_u64 || cft->write_s64 ||
+ cft->write_string || cft->trigger)
+ mode |= S_IWUSR;
+
+ return mode;
+}
+
int cgroup_add_file(struct cgroup *cgrp,
struct cgroup_subsys *subsys,
const struct cftype *cft)
@@ -1696,6 +1785,7 @@ int cgroup_add_file(struct cgroup *cgrp,
struct dentry *dir = cgrp->dentry;
struct dentry *dentry;
int error;
+ mode_t mode;
char name[MAX_CGROUP_TYPE_NAMELEN + MAX_CFTYPE_NAME + 2] = { 0 };
if (subsys && !test_bit(ROOT_NOPREFIX, &cgrp->root->flags)) {
@@ -1706,7 +1796,8 @@ int cgroup_add_file(struct cgroup *cgrp,
BUG_ON(!mutex_is_locked(&dir->d_inode->i_mutex));
dentry = lookup_one_len(name, dir, strlen(name));
if (!IS_ERR(dentry)) {
- error = cgroup_create_file(dentry, 0644 | S_IFREG,
+ mode = cgroup_file_mode(cft);
+ error = cgroup_create_file(dentry, mode | S_IFREG,
cgrp->root->sb);
if (!error)
dentry->d_fsdata = (void *)cft;
@@ -2288,6 +2379,7 @@ static struct cftype files[] = {
.write_u64 = cgroup_tasks_write,
.release = cgroup_tasks_release,
.private = FILE_TASKLIST,
+ .mode = S_IRUGO | S_IWUSR,
},
{
@@ -2327,6 +2419,17 @@ static int cgroup_populate_dir(struct cgroup *cgrp)
if (ss->populate && (err = ss->populate(ss, cgrp)) < 0)
return err;
}
+ /* This cgroup is ready now */
+ for_each_subsys(cgrp->root, ss) {
+ struct cgroup_subsys_state *css = cgrp->subsys[ss->subsys_id];
+ /*
+ * Update id->css pointer and make this css visible from
+ * CSS ID functions. This pointer will be dereferened
+ * from RCU-read-side without locks.
+ */
+ if (css->id)
+ rcu_assign_pointer(css->id->css, css);
+ }
return 0;
}
@@ -2338,6 +2441,7 @@ static void init_cgroup_css(struct cgroup_subsys_state *css,
css->cgroup = cgrp;
atomic_set(&css->refcnt, 1);
css->flags = 0;
+ css->id = NULL;
if (cgrp == dummytop)
set_bit(CSS_ROOT, &css->flags);
BUG_ON(cgrp->subsys[ss->subsys_id]);
@@ -2376,7 +2480,7 @@ static void cgroup_unlock_hierarchy(struct cgroupfs_root *root)
* Must be called with the mutex on the parent inode held
*/
static long cgroup_create(struct cgroup *parent, struct dentry *dentry,
- int mode)
+ mode_t mode)
{
struct cgroup *cgrp;
struct cgroupfs_root *root = parent->root;
@@ -2413,6 +2517,10 @@ static long cgroup_create(struct cgroup *parent, struct dentry *dentry,
goto err_destroy;
}
init_cgroup_css(css, ss, cgrp);
+ if (ss->use_id)
+ if (alloc_css_id(ss, parent, cgrp))
+ goto err_destroy;
+ /* At error, ->destroy() callback has to free assigned ID. */
}
cgroup_lock_hierarchy(root);
@@ -2555,9 +2663,11 @@ static int cgroup_rmdir(struct inode *unused_dir, struct dentry *dentry)
struct cgroup *cgrp = dentry->d_fsdata;
struct dentry *d;
struct cgroup *parent;
+ DEFINE_WAIT(wait);
+ int ret;
/* the vfs holds both inode->i_mutex already */
-
+again:
mutex_lock(&cgroup_mutex);
if (atomic_read(&cgrp->count) != 0) {
mutex_unlock(&cgroup_mutex);
@@ -2573,17 +2683,39 @@ static int cgroup_rmdir(struct inode *unused_dir, struct dentry *dentry)
* Call pre_destroy handlers of subsys. Notify subsystems
* that rmdir() request comes.
*/
- cgroup_call_pre_destroy(cgrp);
+ ret = cgroup_call_pre_destroy(cgrp);
+ if (ret)
+ return ret;
mutex_lock(&cgroup_mutex);
parent = cgrp->parent;
-
- if (atomic_read(&cgrp->count)
- || !list_empty(&cgrp->children)
- || !cgroup_clear_css_refs(cgrp)) {
+ if (atomic_read(&cgrp->count) || !list_empty(&cgrp->children)) {
mutex_unlock(&cgroup_mutex);
return -EBUSY;
}
+ /*
+ * css_put/get is provided for subsys to grab refcnt to css. In typical
+ * case, subsystem has no reference after pre_destroy(). But, under
+ * hierarchy management, some *temporal* refcnt can be hold.
+ * To avoid returning -EBUSY to a user, waitqueue is used. If subsys
+ * is really busy, it should return -EBUSY at pre_destroy(). wake_up
+ * is called when css_put() is called and refcnt goes down to 0.
+ */
+ set_bit(CGRP_WAIT_ON_RMDIR, &cgrp->flags);
+ prepare_to_wait(&cgroup_rmdir_waitq, &wait, TASK_INTERRUPTIBLE);
+
+ if (!cgroup_clear_css_refs(cgrp)) {
+ mutex_unlock(&cgroup_mutex);
+ schedule();
+ finish_wait(&cgroup_rmdir_waitq, &wait);
+ clear_bit(CGRP_WAIT_ON_RMDIR, &cgrp->flags);
+ if (signal_pending(current))
+ return -EINTR;
+ goto again;
+ }
+ /* NO css_tryget() can success after here. */
+ finish_wait(&cgroup_rmdir_waitq, &wait);
+ clear_bit(CGRP_WAIT_ON_RMDIR, &cgrp->flags);
spin_lock(&release_list_lock);
set_bit(CGRP_REMOVED, &cgrp->flags);
@@ -2708,6 +2840,8 @@ int __init cgroup_init(void)
struct cgroup_subsys *ss = subsys[i];
if (!ss->early_init)
cgroup_init_subsys(ss);
+ if (ss->use_id)
+ cgroup_subsys_init_idr(ss);
}
/* Add init_css_set to the hash table */
@@ -3084,18 +3218,19 @@ int cgroup_clone(struct task_struct *tsk, struct cgroup_subsys *subsys,
}
/**
- * cgroup_is_descendant - see if @cgrp is a descendant of current task's cgrp
+ * cgroup_is_descendant - see if @cgrp is a descendant of @task's cgrp
* @cgrp: the cgroup in question
+ * @task: the task in question
*
- * See if @cgrp is a descendant of the current task's cgroup in
- * the appropriate hierarchy.
+ * See if @cgrp is a descendant of @task's cgroup in the appropriate
+ * hierarchy.
*
* If we are sending in dummytop, then presumably we are creating
* the top cgroup in the subsystem.
*
* Called only by the ns (nsproxy) cgroup.
*/
-int cgroup_is_descendant(const struct cgroup *cgrp)
+int cgroup_is_descendant(const struct cgroup *cgrp, struct task_struct *task)
{
int ret;
struct cgroup *target;
@@ -3105,7 +3240,7 @@ int cgroup_is_descendant(const struct cgroup *cgrp)
return 1;
get_first_subsys(cgrp, NULL, &subsys_id);
- target = task_cgroup(current, subsys_id);
+ target = task_cgroup(task, subsys_id);
while (cgrp != target && cgrp!= cgrp->top_cgroup)
cgrp = cgrp->parent;
ret = (cgrp == target);
@@ -3138,10 +3273,12 @@ void __css_put(struct cgroup_subsys_state *css)
{
struct cgroup *cgrp = css->cgroup;
rcu_read_lock();
- if ((atomic_dec_return(&css->refcnt) == 1) &&
- notify_on_release(cgrp)) {
- set_bit(CGRP_RELEASABLE, &cgrp->flags);
- check_for_release(cgrp);
+ if (atomic_dec_return(&css->refcnt) == 1) {
+ if (notify_on_release(cgrp)) {
+ set_bit(CGRP_RELEASABLE, &cgrp->flags);
+ check_for_release(cgrp);
+ }
+ cgroup_wakeup_rmdir_waiters(cgrp);
}
rcu_read_unlock();
}
@@ -3241,3 +3378,232 @@ static int __init cgroup_disable(char *str)
return 1;
}
__setup("cgroup_disable=", cgroup_disable);
+
+/*
+ * Functons for CSS ID.
+ */
+
+/*
+ *To get ID other than 0, this should be called when !cgroup_is_removed().
+ */
+unsigned short css_id(struct cgroup_subsys_state *css)
+{
+ struct css_id *cssid = rcu_dereference(css->id);
+
+ if (cssid)
+ return cssid->id;
+ return 0;
+}
+
+unsigned short css_depth(struct cgroup_subsys_state *css)
+{
+ struct css_id *cssid = rcu_dereference(css->id);
+
+ if (cssid)
+ return cssid->depth;
+ return 0;
+}
+
+bool css_is_ancestor(struct cgroup_subsys_state *child,
+ const struct cgroup_subsys_state *root)
+{
+ struct css_id *child_id = rcu_dereference(child->id);
+ struct css_id *root_id = rcu_dereference(root->id);
+
+ if (!child_id || !root_id || (child_id->depth < root_id->depth))
+ return false;
+ return child_id->stack[root_id->depth] == root_id->id;
+}
+
+static void __free_css_id_cb(struct rcu_head *head)
+{
+ struct css_id *id;
+
+ id = container_of(head, struct css_id, rcu_head);
+ kfree(id);
+}
+
+void free_css_id(struct cgroup_subsys *ss, struct cgroup_subsys_state *css)
+{
+ struct css_id *id = css->id;
+ /* When this is called before css_id initialization, id can be NULL */
+ if (!id)
+ return;
+
+ BUG_ON(!ss->use_id);
+
+ rcu_assign_pointer(id->css, NULL);
+ rcu_assign_pointer(css->id, NULL);
+ spin_lock(&ss->id_lock);
+ idr_remove(&ss->idr, id->id);
+ spin_unlock(&ss->id_lock);
+ call_rcu(&id->rcu_head, __free_css_id_cb);
+}
+
+/*
+ * This is called by init or create(). Then, calls to this function are
+ * always serialized (By cgroup_mutex() at create()).
+ */
+
+static struct css_id *get_new_cssid(struct cgroup_subsys *ss, int depth)
+{
+ struct css_id *newid;
+ int myid, error, size;
+
+ BUG_ON(!ss->use_id);
+
+ size = sizeof(*newid) + sizeof(unsigned short) * (depth + 1);
+ newid = kzalloc(size, GFP_KERNEL);
+ if (!newid)
+ return ERR_PTR(-ENOMEM);
+ /* get id */
+ if (unlikely(!idr_pre_get(&ss->idr, GFP_KERNEL))) {
+ error = -ENOMEM;
+ goto err_out;
+ }
+ spin_lock(&ss->id_lock);
+ /* Don't use 0. allocates an ID of 1-65535 */
+ error = idr_get_new_above(&ss->idr, newid, 1, &myid);
+ spin_unlock(&ss->id_lock);
+
+ /* Returns error when there are no free spaces for new ID.*/
+ if (error) {
+ error = -ENOSPC;
+ goto err_out;
+ }
+ if (myid > CSS_ID_MAX)
+ goto remove_idr;
+
+ newid->id = myid;
+ newid->depth = depth;
+ return newid;
+remove_idr:
+ error = -ENOSPC;
+ spin_lock(&ss->id_lock);
+ idr_remove(&ss->idr, myid);
+ spin_unlock(&ss->id_lock);
+err_out:
+ kfree(newid);
+ return ERR_PTR(error);
+
+}
+
+static int __init cgroup_subsys_init_idr(struct cgroup_subsys *ss)
+{
+ struct css_id *newid;
+ struct cgroup_subsys_state *rootcss;
+
+ spin_lock_init(&ss->id_lock);
+ idr_init(&ss->idr);
+
+ rootcss = init_css_set.subsys[ss->subsys_id];
+ newid = get_new_cssid(ss, 0);
+ if (IS_ERR(newid))
+ return PTR_ERR(newid);
+
+ newid->stack[0] = newid->id;
+ newid->css = rootcss;
+ rootcss->id = newid;
+ return 0;
+}
+
+static int alloc_css_id(struct cgroup_subsys *ss, struct cgroup *parent,
+ struct cgroup *child)
+{
+ int subsys_id, i, depth = 0;
+ struct cgroup_subsys_state *parent_css, *child_css;
+ struct css_id *child_id, *parent_id = NULL;
+
+ subsys_id = ss->subsys_id;
+ parent_css = parent->subsys[subsys_id];
+ child_css = child->subsys[subsys_id];
+ depth = css_depth(parent_css) + 1;
+ parent_id = parent_css->id;
+
+ child_id = get_new_cssid(ss, depth);
+ if (IS_ERR(child_id))
+ return PTR_ERR(child_id);
+
+ for (i = 0; i < depth; i++)
+ child_id->stack[i] = parent_id->stack[i];
+ child_id->stack[depth] = child_id->id;
+ /*
+ * child_id->css pointer will be set after this cgroup is available
+ * see cgroup_populate_dir()
+ */
+ rcu_assign_pointer(child_css->id, child_id);
+
+ return 0;
+}
+
+/**
+ * css_lookup - lookup css by id
+ * @ss: cgroup subsys to be looked into.
+ * @id: the id
+ *
+ * Returns pointer to cgroup_subsys_state if there is valid one with id.
+ * NULL if not. Should be called under rcu_read_lock()
+ */
+struct cgroup_subsys_state *css_lookup(struct cgroup_subsys *ss, int id)
+{
+ struct css_id *cssid = NULL;
+
+ BUG_ON(!ss->use_id);
+ cssid = idr_find(&ss->idr, id);
+
+ if (unlikely(!cssid))
+ return NULL;
+
+ return rcu_dereference(cssid->css);
+}
+
+/**
+ * css_get_next - lookup next cgroup under specified hierarchy.
+ * @ss: pointer to subsystem
+ * @id: current position of iteration.
+ * @root: pointer to css. search tree under this.
+ * @foundid: position of found object.
+ *
+ * Search next css under the specified hierarchy of rootid. Calling under
+ * rcu_read_lock() is necessary. Returns NULL if it reaches the end.
+ */
+struct cgroup_subsys_state *
+css_get_next(struct cgroup_subsys *ss, int id,
+ struct cgroup_subsys_state *root, int *foundid)
+{
+ struct cgroup_subsys_state *ret = NULL;
+ struct css_id *tmp;
+ int tmpid;
+ int rootid = css_id(root);
+ int depth = css_depth(root);
+
+ if (!rootid)
+ return NULL;
+
+ BUG_ON(!ss->use_id);
+ /* fill start point for scan */
+ tmpid = id;
+ while (1) {
+ /*
+ * scan next entry from bitmap(tree), tmpid is updated after
+ * idr_get_next().
+ */
+ spin_lock(&ss->id_lock);
+ tmp = idr_get_next(&ss->idr, &tmpid);
+ spin_unlock(&ss->id_lock);
+
+ if (!tmp)
+ break;
+ if (tmp->depth >= depth && tmp->stack[depth] == rootid) {
+ ret = rcu_dereference(tmp->css);
+ if (ret) {
+ *foundid = tmpid;
+ break;
+ }
+ }
+ /* continue to scan from next id */
+ tmpid = tmpid + 1;
+ }
+ return ret;
+}
+
diff --git a/kernel/cgroup_debug.c b/kernel/cgroup_debug.c
index daca6209202d..0c92d797baa6 100644
--- a/kernel/cgroup_debug.c
+++ b/kernel/cgroup_debug.c
@@ -40,9 +40,7 @@ static u64 taskcount_read(struct cgroup *cont, struct cftype *cft)
{
u64 count;
- cgroup_lock();
count = cgroup_task_count(cont);
- cgroup_unlock();
return count;
}
diff --git a/kernel/cpu.c b/kernel/cpu.c
index 79e40f00dcb8..395b6974dc8d 100644
--- a/kernel/cpu.c
+++ b/kernel/cpu.c
@@ -281,7 +281,7 @@ int __ref cpu_down(unsigned int cpu)
goto out;
}
- cpu_clear(cpu, cpu_active_map);
+ set_cpu_active(cpu, false);
/*
* Make sure the all cpus did the reschedule and are not
@@ -296,7 +296,7 @@ int __ref cpu_down(unsigned int cpu)
err = _cpu_down(cpu, 0);
if (cpu_online(cpu))
- cpu_set(cpu, cpu_active_map);
+ set_cpu_active(cpu, true);
out:
cpu_maps_update_done();
@@ -333,7 +333,7 @@ static int __cpuinit _cpu_up(unsigned int cpu, int tasks_frozen)
goto out_notify;
BUG_ON(!cpu_online(cpu));
- cpu_set(cpu, cpu_active_map);
+ set_cpu_active(cpu, true);
/* Now call notifier in preparation. */
raw_notifier_call_chain(&cpu_chain, CPU_ONLINE | mod, hcpu);
diff --git a/kernel/cpuset.c b/kernel/cpuset.c
index f76db9dcaa05..026faccca869 100644
--- a/kernel/cpuset.c
+++ b/kernel/cpuset.c
@@ -128,10 +128,6 @@ static inline struct cpuset *task_cs(struct task_struct *task)
return container_of(task_subsys_state(task, cpuset_subsys_id),
struct cpuset, css);
}
-struct cpuset_hotplug_scanner {
- struct cgroup_scanner scan;
- struct cgroup *to;
-};
/* bits in struct cpuset flags field */
typedef enum {
@@ -521,6 +517,7 @@ static int validate_change(const struct cpuset *cur, const struct cpuset *trial)
return 0;
}
+#ifdef CONFIG_SMP
/*
* Helper routine for generate_sched_domains().
* Do cpusets a, b have overlapping cpus_allowed masks?
@@ -815,6 +812,18 @@ static void do_rebuild_sched_domains(struct work_struct *unused)
put_online_cpus();
}
+#else /* !CONFIG_SMP */
+static void do_rebuild_sched_domains(struct work_struct *unused)
+{
+}
+
+static int generate_sched_domains(struct cpumask **domains,
+ struct sched_domain_attr **attributes)
+{
+ *domains = NULL;
+ return 1;
+}
+#endif /* CONFIG_SMP */
static DECLARE_WORK(rebuild_sched_domains_work, do_rebuild_sched_domains);
@@ -1026,101 +1035,70 @@ static void cpuset_migrate_mm(struct mm_struct *mm, const nodemask_t *from,
mutex_unlock(&callback_mutex);
}
+/*
+ * Rebind task's vmas to cpuset's new mems_allowed, and migrate pages to new
+ * nodes if memory_migrate flag is set. Called with cgroup_mutex held.
+ */
+static void cpuset_change_nodemask(struct task_struct *p,
+ struct cgroup_scanner *scan)
+{
+ struct mm_struct *mm;
+ struct cpuset *cs;
+ int migrate;
+ const nodemask_t *oldmem = scan->data;
+
+ mm = get_task_mm(p);
+ if (!mm)
+ return;
+
+ cs = cgroup_cs(scan->cg);
+ migrate = is_memory_migrate(cs);
+
+ mpol_rebind_mm(mm, &cs->mems_allowed);
+ if (migrate)
+ cpuset_migrate_mm(mm, oldmem, &cs->mems_allowed);
+ mmput(mm);
+}
+
static void *cpuset_being_rebound;
/**
* update_tasks_nodemask - Update the nodemasks of tasks in the cpuset.
* @cs: the cpuset in which each task's mems_allowed mask needs to be changed
* @oldmem: old mems_allowed of cpuset cs
+ * @heap: if NULL, defer allocating heap memory to cgroup_scan_tasks()
*
* Called with cgroup_mutex held
- * Return 0 if successful, -errno if not.
+ * No return value. It's guaranteed that cgroup_scan_tasks() always returns 0
+ * if @heap != NULL.
*/
-static int update_tasks_nodemask(struct cpuset *cs, const nodemask_t *oldmem)
+static void update_tasks_nodemask(struct cpuset *cs, const nodemask_t *oldmem,
+ struct ptr_heap *heap)
{
- struct task_struct *p;
- struct mm_struct **mmarray;
- int i, n, ntasks;
- int migrate;
- int fudge;
- struct cgroup_iter it;
- int retval;
+ struct cgroup_scanner scan;
cpuset_being_rebound = cs; /* causes mpol_dup() rebind */
- fudge = 10; /* spare mmarray[] slots */
- fudge += cpumask_weight(cs->cpus_allowed);/* imagine 1 fork-bomb/cpu */
- retval = -ENOMEM;
-
- /*
- * Allocate mmarray[] to hold mm reference for each task
- * in cpuset cs. Can't kmalloc GFP_KERNEL while holding
- * tasklist_lock. We could use GFP_ATOMIC, but with a
- * few more lines of code, we can retry until we get a big
- * enough mmarray[] w/o using GFP_ATOMIC.
- */
- while (1) {
- ntasks = cgroup_task_count(cs->css.cgroup); /* guess */
- ntasks += fudge;
- mmarray = kmalloc(ntasks * sizeof(*mmarray), GFP_KERNEL);
- if (!mmarray)
- goto done;
- read_lock(&tasklist_lock); /* block fork */
- if (cgroup_task_count(cs->css.cgroup) <= ntasks)
- break; /* got enough */
- read_unlock(&tasklist_lock); /* try again */
- kfree(mmarray);
- }
-
- n = 0;
-
- /* Load up mmarray[] with mm reference for each task in cpuset. */
- cgroup_iter_start(cs->css.cgroup, &it);
- while ((p = cgroup_iter_next(cs->css.cgroup, &it))) {
- struct mm_struct *mm;
-
- if (n >= ntasks) {
- printk(KERN_WARNING
- "Cpuset mempolicy rebind incomplete.\n");
- break;
- }
- mm = get_task_mm(p);
- if (!mm)
- continue;
- mmarray[n++] = mm;
- }
- cgroup_iter_end(cs->css.cgroup, &it);
- read_unlock(&tasklist_lock);
+ scan.cg = cs->css.cgroup;
+ scan.test_task = NULL;
+ scan.process_task = cpuset_change_nodemask;
+ scan.heap = heap;
+ scan.data = (nodemask_t *)oldmem;
/*
- * Now that we've dropped the tasklist spinlock, we can
- * rebind the vma mempolicies of each mm in mmarray[] to their
- * new cpuset, and release that mm. The mpol_rebind_mm()
- * call takes mmap_sem, which we couldn't take while holding
- * tasklist_lock. Forks can happen again now - the mpol_dup()
- * cpuset_being_rebound check will catch such forks, and rebind
- * their vma mempolicies too. Because we still hold the global
- * cgroup_mutex, we know that no other rebind effort will
- * be contending for the global variable cpuset_being_rebound.
+ * The mpol_rebind_mm() call takes mmap_sem, which we couldn't
+ * take while holding tasklist_lock. Forks can happen - the
+ * mpol_dup() cpuset_being_rebound check will catch such forks,
+ * and rebind their vma mempolicies too. Because we still hold
+ * the global cgroup_mutex, we know that no other rebind effort
+ * will be contending for the global variable cpuset_being_rebound.
* It's ok if we rebind the same mm twice; mpol_rebind_mm()
* is idempotent. Also migrate pages in each mm to new nodes.
*/
- migrate = is_memory_migrate(cs);
- for (i = 0; i < n; i++) {
- struct mm_struct *mm = mmarray[i];
-
- mpol_rebind_mm(mm, &cs->mems_allowed);
- if (migrate)
- cpuset_migrate_mm(mm, oldmem, &cs->mems_allowed);
- mmput(mm);
- }
+ cgroup_scan_tasks(&scan);
/* We're done rebinding vmas to this cpuset's new mems_allowed. */
- kfree(mmarray);
cpuset_being_rebound = NULL;
- retval = 0;
-done:
- return retval;
}
/*
@@ -1141,6 +1119,7 @@ static int update_nodemask(struct cpuset *cs, struct cpuset *trialcs,
{
nodemask_t oldmem;
int retval;
+ struct ptr_heap heap;
/*
* top_cpuset.mems_allowed tracks node_stats[N_HIGH_MEMORY];
@@ -1175,12 +1154,18 @@ static int update_nodemask(struct cpuset *cs, struct cpuset *trialcs,
if (retval < 0)
goto done;
+ retval = heap_init(&heap, PAGE_SIZE, GFP_KERNEL, NULL);
+ if (retval < 0)
+ goto done;
+
mutex_lock(&callback_mutex);
cs->mems_allowed = trialcs->mems_allowed;
cs->mems_generation = cpuset_mems_generation++;
mutex_unlock(&callback_mutex);
- retval = update_tasks_nodemask(cs, &oldmem);
+ update_tasks_nodemask(cs, &oldmem, &heap);
+
+ heap_free(&heap);
done:
return retval;
}
@@ -1192,8 +1177,10 @@ int current_cpuset_is_being_rebound(void)
static int update_relax_domain_level(struct cpuset *cs, s64 val)
{
+#ifdef CONFIG_SMP
if (val < -1 || val >= SD_LV_MAX)
return -EINVAL;
+#endif
if (val != cs->relax_domain_level) {
cs->relax_domain_level = val;
@@ -1355,19 +1342,22 @@ static int cpuset_can_attach(struct cgroup_subsys *ss,
struct cgroup *cont, struct task_struct *tsk)
{
struct cpuset *cs = cgroup_cs(cont);
- int ret = 0;
if (cpumask_empty(cs->cpus_allowed) || nodes_empty(cs->mems_allowed))
return -ENOSPC;
- if (tsk->flags & PF_THREAD_BOUND) {
- mutex_lock(&callback_mutex);
- if (!cpumask_equal(&tsk->cpus_allowed, cs->cpus_allowed))
- ret = -EINVAL;
- mutex_unlock(&callback_mutex);
- }
+ /*
+ * Kthreads bound to specific cpus cannot be moved to a new cpuset; we
+ * cannot change their cpu affinity and isolating such threads by their
+ * set of allowed nodes is unnecessary. Thus, cpusets are not
+ * applicable for such threads. This prevents checking for success of
+ * set_cpus_allowed_ptr() on all attached tasks before cpus_allowed may
+ * be changed.
+ */
+ if (tsk->flags & PF_THREAD_BOUND)
+ return -EINVAL;
- return ret < 0 ? ret : security_task_setscheduler(tsk, 0, NULL);
+ return security_task_setscheduler(tsk, 0, NULL);
}
static void cpuset_attach(struct cgroup_subsys *ss,
@@ -1706,6 +1696,7 @@ static struct cftype files[] = {
.read_u64 = cpuset_read_u64,
.write_u64 = cpuset_write_u64,
.private = FILE_MEMORY_PRESSURE,
+ .mode = S_IRUGO,
},
{
@@ -1913,10 +1904,9 @@ int __init cpuset_init(void)
static void cpuset_do_move_task(struct task_struct *tsk,
struct cgroup_scanner *scan)
{
- struct cpuset_hotplug_scanner *chsp;
+ struct cgroup *new_cgroup = scan->data;
- chsp = container_of(scan, struct cpuset_hotplug_scanner, scan);
- cgroup_attach_task(chsp->to, tsk);
+ cgroup_attach_task(new_cgroup, tsk);
}
/**
@@ -1932,15 +1922,15 @@ static void cpuset_do_move_task(struct task_struct *tsk,
*/
static void move_member_tasks_to_cpuset(struct cpuset *from, struct cpuset *to)
{
- struct cpuset_hotplug_scanner scan;
+ struct cgroup_scanner scan;
- scan.scan.cg = from->css.cgroup;
- scan.scan.test_task = NULL; /* select all tasks in cgroup */
- scan.scan.process_task = cpuset_do_move_task;
- scan.scan.heap = NULL;
- scan.to = to->css.cgroup;
+ scan.cg = from->css.cgroup;
+ scan.test_task = NULL; /* select all tasks in cgroup */
+ scan.process_task = cpuset_do_move_task;
+ scan.heap = NULL;
+ scan.data = to->css.cgroup;
- if (cgroup_scan_tasks(&scan.scan))
+ if (cgroup_scan_tasks(&scan))
printk(KERN_ERR "move_member_tasks_to_cpuset: "
"cgroup_scan_tasks failed\n");
}
@@ -2033,7 +2023,7 @@ static void scan_for_empty_cpusets(struct cpuset *root)
remove_tasks_in_empty_cpuset(cp);
else {
update_tasks_cpumask(cp, NULL);
- update_tasks_nodemask(cp, &oldmems);
+ update_tasks_nodemask(cp, &oldmems, NULL);
}
}
}
@@ -2069,7 +2059,9 @@ static int cpuset_track_online_cpus(struct notifier_block *unused_nb,
}
cgroup_lock();
+ mutex_lock(&callback_mutex);
cpumask_copy(top_cpuset.cpus_allowed, cpu_online_mask);
+ mutex_unlock(&callback_mutex);
scan_for_empty_cpusets(&top_cpuset);
ndoms = generate_sched_domains(&doms, &attr);
cgroup_unlock();
@@ -2092,11 +2084,12 @@ static int cpuset_track_online_nodes(struct notifier_block *self,
cgroup_lock();
switch (action) {
case MEM_ONLINE:
- top_cpuset.mems_allowed = node_states[N_HIGH_MEMORY];
- break;
case MEM_OFFLINE:
+ mutex_lock(&callback_mutex);
top_cpuset.mems_allowed = node_states[N_HIGH_MEMORY];
- scan_for_empty_cpusets(&top_cpuset);
+ mutex_unlock(&callback_mutex);
+ if (action == MEM_OFFLINE)
+ scan_for_empty_cpusets(&top_cpuset);
break;
default:
break;
@@ -2206,26 +2199,24 @@ static const struct cpuset *nearest_hardwall_ancestor(const struct cpuset *cs)
}
/**
- * cpuset_zone_allowed_softwall - Can we allocate on zone z's memory node?
- * @z: is this zone on an allowed node?
+ * cpuset_node_allowed_softwall - Can we allocate on a memory node?
+ * @node: is this an allowed node?
* @gfp_mask: memory allocation flags
*
- * If we're in interrupt, yes, we can always allocate. If
- * __GFP_THISNODE is set, yes, we can always allocate. If zone
- * z's node is in our tasks mems_allowed, yes. If it's not a
- * __GFP_HARDWALL request and this zone's nodes is in the nearest
- * hardwalled cpuset ancestor to this tasks cpuset, yes.
- * If the task has been OOM killed and has access to memory reserves
- * as specified by the TIF_MEMDIE flag, yes.
+ * If we're in interrupt, yes, we can always allocate. If __GFP_THISNODE is
+ * set, yes, we can always allocate. If node is in our task's mems_allowed,
+ * yes. If it's not a __GFP_HARDWALL request and this node is in the nearest
+ * hardwalled cpuset ancestor to this task's cpuset, yes. If the task has been
+ * OOM killed and has access to memory reserves as specified by the TIF_MEMDIE
+ * flag, yes.
* Otherwise, no.
*
- * If __GFP_HARDWALL is set, cpuset_zone_allowed_softwall()
- * reduces to cpuset_zone_allowed_hardwall(). Otherwise,
- * cpuset_zone_allowed_softwall() might sleep, and might allow a zone
- * from an enclosing cpuset.
+ * If __GFP_HARDWALL is set, cpuset_node_allowed_softwall() reduces to
+ * cpuset_node_allowed_hardwall(). Otherwise, cpuset_node_allowed_softwall()
+ * might sleep, and might allow a node from an enclosing cpuset.
*
- * cpuset_zone_allowed_hardwall() only handles the simpler case of
- * hardwall cpusets, and never sleeps.
+ * cpuset_node_allowed_hardwall() only handles the simpler case of hardwall
+ * cpusets, and never sleeps.
*
* The __GFP_THISNODE placement logic is really handled elsewhere,
* by forcibly using a zonelist starting at a specified node, and by
@@ -2264,20 +2255,17 @@ static const struct cpuset *nearest_hardwall_ancestor(const struct cpuset *cs)
* GFP_USER - only nodes in current tasks mems allowed ok.
*
* Rule:
- * Don't call cpuset_zone_allowed_softwall if you can't sleep, unless you
+ * Don't call cpuset_node_allowed_softwall if you can't sleep, unless you
* pass in the __GFP_HARDWALL flag set in gfp_flag, which disables
* the code that might scan up ancestor cpusets and sleep.
*/
-
-int __cpuset_zone_allowed_softwall(struct zone *z, gfp_t gfp_mask)
+int __cpuset_node_allowed_softwall(int node, gfp_t gfp_mask)
{
- int node; /* node that zone z is on */
const struct cpuset *cs; /* current cpuset ancestors */
int allowed; /* is allocation in zone z allowed? */
if (in_interrupt() || (gfp_mask & __GFP_THISNODE))
return 1;
- node = zone_to_nid(z);
might_sleep_if(!(gfp_mask & __GFP_HARDWALL));
if (node_isset(node, current->mems_allowed))
return 1;
@@ -2306,15 +2294,15 @@ int __cpuset_zone_allowed_softwall(struct zone *z, gfp_t gfp_mask)
}
/*
- * cpuset_zone_allowed_hardwall - Can we allocate on zone z's memory node?
- * @z: is this zone on an allowed node?
+ * cpuset_node_allowed_hardwall - Can we allocate on a memory node?
+ * @node: is this an allowed node?
* @gfp_mask: memory allocation flags
*
- * If we're in interrupt, yes, we can always allocate.
- * If __GFP_THISNODE is set, yes, we can always allocate. If zone
- * z's node is in our tasks mems_allowed, yes. If the task has been
- * OOM killed and has access to memory reserves as specified by the
- * TIF_MEMDIE flag, yes. Otherwise, no.
+ * If we're in interrupt, yes, we can always allocate. If __GFP_THISNODE is
+ * set, yes, we can always allocate. If node is in our task's mems_allowed,
+ * yes. If the task has been OOM killed and has access to memory reserves as
+ * specified by the TIF_MEMDIE flag, yes.
+ * Otherwise, no.
*
* The __GFP_THISNODE placement logic is really handled elsewhere,
* by forcibly using a zonelist starting at a specified node, and by
@@ -2322,20 +2310,16 @@ int __cpuset_zone_allowed_softwall(struct zone *z, gfp_t gfp_mask)
* any node on the zonelist except the first. By the time any such
* calls get to this routine, we should just shut up and say 'yes'.
*
- * Unlike the cpuset_zone_allowed_softwall() variant, above,
- * this variant requires that the zone be in the current tasks
+ * Unlike the cpuset_node_allowed_softwall() variant, above,
+ * this variant requires that the node be in the current task's
* mems_allowed or that we're in interrupt. It does not scan up the
* cpuset hierarchy for the nearest enclosing mem_exclusive cpuset.
* It never sleeps.
*/
-
-int __cpuset_zone_allowed_hardwall(struct zone *z, gfp_t gfp_mask)
+int __cpuset_node_allowed_hardwall(int node, gfp_t gfp_mask)
{
- int node; /* node that zone z is on */
-
if (in_interrupt() || (gfp_mask & __GFP_THISNODE))
return 1;
- node = zone_to_nid(z);
if (node_isset(node, current->mems_allowed))
return 1;
/*
diff --git a/kernel/exec_domain.c b/kernel/exec_domain.c
index 667c841c2952..c35452cadded 100644
--- a/kernel/exec_domain.c
+++ b/kernel/exec_domain.c
@@ -18,6 +18,7 @@
#include <linux/syscalls.h>
#include <linux/sysctl.h>
#include <linux/types.h>
+#include <linux/fs_struct.h>
static void default_handler(int, struct pt_regs *);
@@ -145,28 +146,6 @@ __set_personality(u_long personality)
return 0;
}
- if (atomic_read(&current->fs->count) != 1) {
- struct fs_struct *fsp, *ofsp;
-
- fsp = copy_fs_struct(current->fs);
- if (fsp == NULL) {
- module_put(ep->module);
- return -ENOMEM;
- }
-
- task_lock(current);
- ofsp = current->fs;
- current->fs = fsp;
- task_unlock(current);
-
- put_fs_struct(ofsp);
- }
-
- /*
- * At that point we are guaranteed to be the sole owner of
- * current->fs.
- */
-
current->personality = personality;
oep = current_thread_info()->exec_domain;
current_thread_info()->exec_domain = ep;
diff --git a/kernel/exit.c b/kernel/exit.c
index d2e8239ea187..036e8d740169 100644
--- a/kernel/exit.c
+++ b/kernel/exit.c
@@ -46,6 +46,7 @@
#include <linux/blkdev.h>
#include <linux/task_io_accounting_ops.h>
#include <linux/tracehook.h>
+#include <linux/fs_struct.h>
#include <linux/init_task.h>
#include <trace/sched.h>
@@ -61,11 +62,6 @@ DEFINE_TRACE(sched_process_wait);
static void exit_mm(struct task_struct * tsk);
-static inline int task_detached(struct task_struct *p)
-{
- return p->exit_signal == -1;
-}
-
static void __unhash_process(struct task_struct *p)
{
nr_threads--;
@@ -362,16 +358,12 @@ static void reparent_to_kthreadd(void)
void __set_special_pids(struct pid *pid)
{
struct task_struct *curr = current->group_leader;
- pid_t nr = pid_nr(pid);
- if (task_session(curr) != pid) {
+ if (task_session(curr) != pid)
change_pid(curr, PIDTYPE_SID, pid);
- set_task_session(curr, nr);
- }
- if (task_pgrp(curr) != pid) {
+
+ if (task_pgrp(curr) != pid)
change_pid(curr, PIDTYPE_PGID, pid);
- set_task_pgrp(curr, nr);
- }
}
static void set_special_pids(struct pid *pid)
@@ -429,7 +421,6 @@ EXPORT_SYMBOL(disallow_signal);
void daemonize(const char *name, ...)
{
va_list args;
- struct fs_struct *fs;
sigset_t blocked;
va_start(args, name);
@@ -462,11 +453,7 @@ void daemonize(const char *name, ...)
/* Become as one with the init task */
- exit_fs(current); /* current->fs->count--; */
- fs = init_task.fs;
- current->fs = fs;
- atomic_inc(&fs->count);
-
+ daemonize_fs_struct();
exit_files(current);
current->files = init_task.files;
atomic_inc(&current->files->count);
@@ -565,30 +552,6 @@ void exit_files(struct task_struct *tsk)
}
}
-void put_fs_struct(struct fs_struct *fs)
-{
- /* No need to hold fs->lock if we are killing it */
- if (atomic_dec_and_test(&fs->count)) {
- path_put(&fs->root);
- path_put(&fs->pwd);
- kmem_cache_free(fs_cachep, fs);
- }
-}
-
-void exit_fs(struct task_struct *tsk)
-{
- struct fs_struct * fs = tsk->fs;
-
- if (fs) {
- task_lock(tsk);
- tsk->fs = NULL;
- task_unlock(tsk);
- put_fs_struct(fs);
- }
-}
-
-EXPORT_SYMBOL_GPL(exit_fs);
-
#ifdef CONFIG_MM_OWNER
/*
* Task p is exiting and it owned mm, lets find a new owner for it
@@ -732,119 +695,6 @@ static void exit_mm(struct task_struct * tsk)
}
/*
- * Return nonzero if @parent's children should reap themselves.
- *
- * Called with write_lock_irq(&tasklist_lock) held.
- */
-static int ignoring_children(struct task_struct *parent)
-{
- int ret;
- struct sighand_struct *psig = parent->sighand;
- unsigned long flags;
- spin_lock_irqsave(&psig->siglock, flags);
- ret = (psig->action[SIGCHLD-1].sa.sa_handler == SIG_IGN ||
- (psig->action[SIGCHLD-1].sa.sa_flags & SA_NOCLDWAIT));
- spin_unlock_irqrestore(&psig->siglock, flags);
- return ret;
-}
-
-/*
- * Detach all tasks we were using ptrace on.
- * Any that need to be release_task'd are put on the @dead list.
- *
- * Called with write_lock(&tasklist_lock) held.
- */
-static void ptrace_exit(struct task_struct *parent, struct list_head *dead)
-{
- struct task_struct *p, *n;
- int ign = -1;
-
- list_for_each_entry_safe(p, n, &parent->ptraced, ptrace_entry) {
- __ptrace_unlink(p);
-
- if (p->exit_state != EXIT_ZOMBIE)
- continue;
-
- /*
- * If it's a zombie, our attachedness prevented normal
- * parent notification or self-reaping. Do notification
- * now if it would have happened earlier. If it should
- * reap itself, add it to the @dead list. We can't call
- * release_task() here because we already hold tasklist_lock.
- *
- * If it's our own child, there is no notification to do.
- * But if our normal children self-reap, then this child
- * was prevented by ptrace and we must reap it now.
- */
- if (!task_detached(p) && thread_group_empty(p)) {
- if (!same_thread_group(p->real_parent, parent))
- do_notify_parent(p, p->exit_signal);
- else {
- if (ign < 0)
- ign = ignoring_children(parent);
- if (ign)
- p->exit_signal = -1;
- }
- }
-
- if (task_detached(p)) {
- /*
- * Mark it as in the process of being reaped.
- */
- p->exit_state = EXIT_DEAD;
- list_add(&p->ptrace_entry, dead);
- }
- }
-}
-
-/*
- * Finish up exit-time ptrace cleanup.
- *
- * Called without locks.
- */
-static void ptrace_exit_finish(struct task_struct *parent,
- struct list_head *dead)
-{
- struct task_struct *p, *n;
-
- BUG_ON(!list_empty(&parent->ptraced));
-
- list_for_each_entry_safe(p, n, dead, ptrace_entry) {
- list_del_init(&p->ptrace_entry);
- release_task(p);
- }
-}
-
-static void reparent_thread(struct task_struct *p, struct task_struct *father)
-{
- if (p->pdeath_signal)
- /* We already hold the tasklist_lock here. */
- group_send_sig_info(p->pdeath_signal, SEND_SIG_NOINFO, p);
-
- list_move_tail(&p->sibling, &p->real_parent->children);
-
- /* If this is a threaded reparent there is no need to
- * notify anyone anything has happened.
- */
- if (same_thread_group(p->real_parent, father))
- return;
-
- /* We don't want people slaying init. */
- if (!task_detached(p))
- p->exit_signal = SIGCHLD;
-
- /* If we'd notified the old parent about this child's death,
- * also notify the new parent.
- */
- if (!ptrace_reparented(p) &&
- p->exit_state == EXIT_ZOMBIE &&
- !task_detached(p) && thread_group_empty(p))
- do_notify_parent(p, p->exit_signal);
-
- kill_orphaned_pgrp(p, father);
-}
-
-/*
* When we die, we re-parent all our children.
* Try to give them to another thread in our thread
* group, and if no such member exists, give it to
@@ -883,17 +733,51 @@ static struct task_struct *find_new_reaper(struct task_struct *father)
return pid_ns->child_reaper;
}
+/*
+* Any that need to be release_task'd are put on the @dead list.
+ */
+static void reparent_thread(struct task_struct *father, struct task_struct *p,
+ struct list_head *dead)
+{
+ if (p->pdeath_signal)
+ group_send_sig_info(p->pdeath_signal, SEND_SIG_NOINFO, p);
+
+ list_move_tail(&p->sibling, &p->real_parent->children);
+
+ if (task_detached(p))
+ return;
+ /*
+ * If this is a threaded reparent there is no need to
+ * notify anyone anything has happened.
+ */
+ if (same_thread_group(p->real_parent, father))
+ return;
+
+ /* We don't want people slaying init. */
+ p->exit_signal = SIGCHLD;
+
+ /* If it has exited notify the new parent about this child's death. */
+ if (!p->ptrace &&
+ p->exit_state == EXIT_ZOMBIE && thread_group_empty(p)) {
+ do_notify_parent(p, p->exit_signal);
+ if (task_detached(p)) {
+ p->exit_state = EXIT_DEAD;
+ list_move_tail(&p->sibling, dead);
+ }
+ }
+
+ kill_orphaned_pgrp(p, father);
+}
+
static void forget_original_parent(struct task_struct *father)
{
struct task_struct *p, *n, *reaper;
- LIST_HEAD(ptrace_dead);
+ LIST_HEAD(dead_children);
+
+ exit_ptrace(father);
write_lock_irq(&tasklist_lock);
reaper = find_new_reaper(father);
- /*
- * First clean up ptrace if we were using it.
- */
- ptrace_exit(father, &ptrace_dead);
list_for_each_entry_safe(p, n, &father->children, sibling) {
p->real_parent = reaper;
@@ -901,13 +785,16 @@ static void forget_original_parent(struct task_struct *father)
BUG_ON(p->ptrace);
p->parent = p->real_parent;
}
- reparent_thread(p, father);
+ reparent_thread(father, p, &dead_children);
}
-
write_unlock_irq(&tasklist_lock);
+
BUG_ON(!list_empty(&father->children));
- ptrace_exit_finish(father, &ptrace_dead);
+ list_for_each_entry_safe(p, n, &dead_children, sibling) {
+ list_del_init(&p->sibling);
+ release_task(p);
+ }
}
/*
@@ -950,8 +837,7 @@ static void exit_notify(struct task_struct *tsk, int group_dead)
*/
if (tsk->exit_signal != SIGCHLD && !task_detached(tsk) &&
(tsk->parent_exec_id != tsk->real_parent->self_exec_id ||
- tsk->self_exec_id != tsk->parent_exec_id) &&
- !capable(CAP_KILL))
+ tsk->self_exec_id != tsk->parent_exec_id))
tsk->exit_signal = SIGCHLD;
signal = tracehook_notify_death(tsk, &cookie, group_dead);
@@ -1037,6 +923,8 @@ NORET_TYPE void do_exit(long code)
schedule();
}
+ exit_irq_thread();
+
exit_signals(tsk); /* sets PF_EXITING */
/*
* tsk->flags are checked in the futex code to protect against
@@ -1417,6 +1305,18 @@ static int wait_task_zombie(struct task_struct *p, int options,
return retval;
}
+static int *task_stopped_code(struct task_struct *p, bool ptrace)
+{
+ if (ptrace) {
+ if (task_is_stopped_or_traced(p))
+ return &p->exit_code;
+ } else {
+ if (p->signal->flags & SIGNAL_STOP_STOPPED)
+ return &p->signal->group_exit_code;
+ }
+ return NULL;
+}
+
/*
* Handle sys_wait4 work for one task in state TASK_STOPPED. We hold
* read_lock(&tasklist_lock) on entry. If we return zero, we still hold
@@ -1427,7 +1327,7 @@ static int wait_task_stopped(int ptrace, struct task_struct *p,
int options, struct siginfo __user *infop,
int __user *stat_addr, struct rusage __user *ru)
{
- int retval, exit_code, why;
+ int retval, exit_code, *p_code, why;
uid_t uid = 0; /* unneeded, required by compiler */
pid_t pid;
@@ -1437,22 +1337,16 @@ static int wait_task_stopped(int ptrace, struct task_struct *p,
exit_code = 0;
spin_lock_irq(&p->sighand->siglock);
- if (unlikely(!task_is_stopped_or_traced(p)))
- goto unlock_sig;
-
- if (!ptrace && p->signal->group_stop_count > 0)
- /*
- * A group stop is in progress and this is the group leader.
- * We won't report until all threads have stopped.
- */
+ p_code = task_stopped_code(p, ptrace);
+ if (unlikely(!p_code))
goto unlock_sig;
- exit_code = p->exit_code;
+ exit_code = *p_code;
if (!exit_code)
goto unlock_sig;
if (!unlikely(options & WNOWAIT))
- p->exit_code = 0;
+ *p_code = 0;
/* don't need the RCU readlock here as we're holding a spinlock */
uid = __task_cred(p)->uid;
@@ -1609,7 +1503,7 @@ static int wait_consider_task(struct task_struct *parent, int ptrace,
*/
*notask_error = 0;
- if (task_is_stopped_or_traced(p))
+ if (task_stopped_code(p, ptrace))
return wait_task_stopped(ptrace, p, options,
infop, stat_addr, ru);
@@ -1813,7 +1707,7 @@ SYSCALL_DEFINE4(wait4, pid_t, upid, int __user *, stat_addr,
pid = find_get_pid(-upid);
} else if (upid == 0) {
type = PIDTYPE_PGID;
- pid = get_pid(task_pgrp(current));
+ pid = get_task_pid(current, PIDTYPE_PGID);
} else /* upid > 0 */ {
type = PIDTYPE_PID;
pid = find_get_pid(upid);
diff --git a/kernel/extable.c b/kernel/extable.c
index e136ed8d82ba..7f8f263f8524 100644
--- a/kernel/extable.c
+++ b/kernel/extable.c
@@ -15,11 +15,22 @@
along with this program; if not, write to the Free Software
Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*/
+#include <linux/ftrace.h>
+#include <linux/memory.h>
#include <linux/module.h>
+#include <linux/mutex.h>
#include <linux/init.h>
-#include <linux/ftrace.h>
-#include <asm/uaccess.h>
+
#include <asm/sections.h>
+#include <asm/uaccess.h>
+
+/*
+ * mutex protecting text section modification (dynamic code patching).
+ * some users need to sleep (allocating memory...) while they hold this lock.
+ *
+ * NOT exported to modules - patching kernel text is a really delicate matter.
+ */
+DEFINE_MUTEX(text_mutex);
extern struct exception_table_entry __start___ex_table[];
extern struct exception_table_entry __stop___ex_table[];
@@ -41,31 +52,50 @@ const struct exception_table_entry *search_exception_tables(unsigned long addr)
return e;
}
-__notrace_funcgraph int core_kernel_text(unsigned long addr)
+static inline int init_kernel_text(unsigned long addr)
+{
+ if (addr >= (unsigned long)_sinittext &&
+ addr <= (unsigned long)_einittext)
+ return 1;
+ return 0;
+}
+
+int core_kernel_text(unsigned long addr)
{
if (addr >= (unsigned long)_stext &&
addr <= (unsigned long)_etext)
return 1;
if (system_state == SYSTEM_BOOTING &&
- addr >= (unsigned long)_sinittext &&
- addr <= (unsigned long)_einittext)
+ init_kernel_text(addr))
return 1;
return 0;
}
-__notrace_funcgraph int __kernel_text_address(unsigned long addr)
+int __kernel_text_address(unsigned long addr)
{
if (core_kernel_text(addr))
return 1;
- return __module_text_address(addr) != NULL;
+ if (is_module_text_address(addr))
+ return 1;
+ /*
+ * There might be init symbols in saved stacktraces.
+ * Give those symbols a chance to be printed in
+ * backtraces (such as lockdep traces).
+ *
+ * Since we are after the module-symbols check, there's
+ * no danger of address overlap:
+ */
+ if (init_kernel_text(addr))
+ return 1;
+ return 0;
}
int kernel_text_address(unsigned long addr)
{
if (core_kernel_text(addr))
return 1;
- return module_text_address(addr) != NULL;
+ return is_module_text_address(addr);
}
/*
@@ -81,5 +111,5 @@ int func_ptr_is_kernel_text(void *ptr)
addr = (unsigned long) dereference_function_descriptor(ptr);
if (core_kernel_text(addr))
return 1;
- return module_text_address(addr) != NULL;
+ return is_module_text_address(addr);
}
diff --git a/kernel/fork.c b/kernel/fork.c
index 6715ebc3761d..b9e2edd00726 100644
--- a/kernel/fork.c
+++ b/kernel/fork.c
@@ -60,6 +60,7 @@
#include <linux/tty.h>
#include <linux/proc_fs.h>
#include <linux/blkdev.h>
+#include <linux/fs_struct.h>
#include <trace/sched.h>
#include <linux/magic.h>
@@ -284,7 +285,7 @@ static int dup_mmap(struct mm_struct *mm, struct mm_struct *oldmm)
mm->free_area_cache = oldmm->mmap_base;
mm->cached_hole_size = ~0UL;
mm->map_count = 0;
- cpus_clear(mm->cpu_vm_mask);
+ cpumask_clear(mm_cpumask(mm));
mm->mm_rb = RB_ROOT;
rb_link = &mm->mm_rb.rb_node;
rb_parent = NULL;
@@ -644,6 +645,9 @@ static int copy_mm(unsigned long clone_flags, struct task_struct * tsk)
tsk->min_flt = tsk->maj_flt = 0;
tsk->nvcsw = tsk->nivcsw = 0;
+#ifdef CONFIG_DETECT_HUNG_TASK
+ tsk->last_switch_count = tsk->nvcsw + tsk->nivcsw;
+#endif
tsk->mm = NULL;
tsk->active_mm = NULL;
@@ -681,38 +685,21 @@ fail_nomem:
return retval;
}
-static struct fs_struct *__copy_fs_struct(struct fs_struct *old)
-{
- struct fs_struct *fs = kmem_cache_alloc(fs_cachep, GFP_KERNEL);
- /* We don't need to lock fs - think why ;-) */
- if (fs) {
- atomic_set(&fs->count, 1);
- rwlock_init(&fs->lock);
- fs->umask = old->umask;
- read_lock(&old->lock);
- fs->root = old->root;
- path_get(&old->root);
- fs->pwd = old->pwd;
- path_get(&old->pwd);
- read_unlock(&old->lock);
- }
- return fs;
-}
-
-struct fs_struct *copy_fs_struct(struct fs_struct *old)
-{
- return __copy_fs_struct(old);
-}
-
-EXPORT_SYMBOL_GPL(copy_fs_struct);
-
static int copy_fs(unsigned long clone_flags, struct task_struct *tsk)
{
+ struct fs_struct *fs = current->fs;
if (clone_flags & CLONE_FS) {
- atomic_inc(&current->fs->count);
+ /* tsk->fs is already what we want */
+ write_lock(&fs->lock);
+ if (fs->in_exec) {
+ write_unlock(&fs->lock);
+ return -EAGAIN;
+ }
+ fs->users++;
+ write_unlock(&fs->lock);
return 0;
}
- tsk->fs = __copy_fs_struct(current->fs);
+ tsk->fs = copy_fs_struct(fs);
if (!tsk->fs)
return -ENOMEM;
return 0;
@@ -813,6 +800,12 @@ static void posix_cpu_timers_init_group(struct signal_struct *sig)
sig->cputime_expires.virt_exp = cputime_zero;
sig->cputime_expires.sched_exp = 0;
+ if (sig->rlim[RLIMIT_CPU].rlim_cur != RLIM_INFINITY) {
+ sig->cputime_expires.prof_exp =
+ secs_to_cputime(sig->rlim[RLIMIT_CPU].rlim_cur);
+ sig->cputimer.running = 1;
+ }
+
/* The timer lists. */
INIT_LIST_HEAD(&sig->cpu_timers[0]);
INIT_LIST_HEAD(&sig->cpu_timers[1]);
@@ -828,11 +821,8 @@ static int copy_signal(unsigned long clone_flags, struct task_struct *tsk)
atomic_inc(&current->signal->live);
return 0;
}
- sig = kmem_cache_alloc(signal_cachep, GFP_KERNEL);
-
- if (sig)
- posix_cpu_timers_init_group(sig);
+ sig = kmem_cache_alloc(signal_cachep, GFP_KERNEL);
tsk->signal = sig;
if (!sig)
return -ENOMEM;
@@ -841,6 +831,8 @@ static int copy_signal(unsigned long clone_flags, struct task_struct *tsk)
atomic_set(&sig->live, 1);
init_waitqueue_head(&sig->wait_chldexit);
sig->flags = 0;
+ if (clone_flags & CLONE_NEWPID)
+ sig->flags |= SIGNAL_UNKILLABLE;
sig->group_exit_code = 0;
sig->group_exit_task = NULL;
sig->group_stop_count = 0;
@@ -870,6 +862,8 @@ static int copy_signal(unsigned long clone_flags, struct task_struct *tsk)
memcpy(sig->rlim, current->signal->rlim, sizeof sig->rlim);
task_unlock(current->group_leader);
+ posix_cpu_timers_init_group(sig);
+
acct_init_pacct(&sig->pacct);
tty_audit_fork(sig);
@@ -1046,11 +1040,6 @@ static struct task_struct *copy_process(unsigned long clone_flags,
p->default_timer_slack_ns = current->timer_slack_ns;
-#ifdef CONFIG_DETECT_SOFTLOCKUP
- p->last_switch_count = 0;
- p->last_switch_timestamp = 0;
-#endif
-
task_io_accounting_init(&p->ioac);
acct_clear_integrals(p);
@@ -1125,7 +1114,7 @@ static struct task_struct *copy_process(unsigned long clone_flags,
goto bad_fork_cleanup_mm;
if ((retval = copy_io(clone_flags, p)))
goto bad_fork_cleanup_namespaces;
- retval = copy_thread(0, clone_flags, stack_start, stack_size, p, regs);
+ retval = copy_thread(clone_flags, stack_start, stack_size, p, regs);
if (retval)
goto bad_fork_cleanup_io;
@@ -1263,8 +1252,6 @@ static struct task_struct *copy_process(unsigned long clone_flags,
p->signal->leader_pid = pid;
tty_kref_put(p->signal->tty);
p->signal->tty = tty_kref_get(current->signal->tty);
- set_task_pgrp(p, task_pgrp_nr(current));
- set_task_session(p, task_session_nr(current));
attach_pid(p, PIDTYPE_PGID, task_pgrp(current));
attach_pid(p, PIDTYPE_SID, task_session(current));
list_add_tail_rcu(&p->tasks, &init_task.tasks);
@@ -1488,6 +1475,7 @@ void __init proc_caches_init(void)
mm_cachep = kmem_cache_create("mm_struct",
sizeof(struct mm_struct), ARCH_MIN_MMSTRUCT_ALIGN,
SLAB_HWCACHE_ALIGN|SLAB_PANIC, NULL);
+ vm_area_cachep = KMEM_CACHE(vm_area_struct, SLAB_PANIC);
mmap_init();
}
@@ -1543,12 +1531,16 @@ static int unshare_fs(unsigned long unshare_flags, struct fs_struct **new_fsp)
{
struct fs_struct *fs = current->fs;
- if ((unshare_flags & CLONE_FS) &&
- (fs && atomic_read(&fs->count) > 1)) {
- *new_fsp = __copy_fs_struct(current->fs);
- if (!*new_fsp)
- return -ENOMEM;
- }
+ if (!(unshare_flags & CLONE_FS) || !fs)
+ return 0;
+
+ /* don't need lock here; in the worst case we'll do useless copy */
+ if (fs->users == 1)
+ return 0;
+
+ *new_fsp = copy_fs_struct(fs);
+ if (!*new_fsp)
+ return -ENOMEM;
return 0;
}
@@ -1664,8 +1656,13 @@ SYSCALL_DEFINE1(unshare, unsigned long, unshare_flags)
if (new_fs) {
fs = current->fs;
+ write_lock(&fs->lock);
current->fs = new_fs;
- new_fs = fs;
+ if (--fs->users)
+ new_fs = NULL;
+ else
+ new_fs = fs;
+ write_unlock(&fs->lock);
}
if (new_mm) {
@@ -1704,7 +1701,7 @@ bad_unshare_cleanup_sigh:
bad_unshare_cleanup_fs:
if (new_fs)
- put_fs_struct(new_fs);
+ free_fs_struct(new_fs);
bad_unshare_cleanup_thread:
bad_unshare_out:
diff --git a/kernel/futex.c b/kernel/futex.c
index 6b50a024bca2..eef8cd26b5e5 100644
--- a/kernel/futex.c
+++ b/kernel/futex.c
@@ -883,7 +883,12 @@ retry_private:
out_unlock:
double_unlock_hb(hb1, hb2);
- /* drop_futex_key_refs() must be called outside the spinlocks. */
+ /*
+ * drop_futex_key_refs() must be called outside the spinlocks. During
+ * the requeue we moved futex_q's from the hash bucket at key1 to the
+ * one at key2 and updated their key pointer. We no longer need to
+ * hold the references to key1.
+ */
while (--drop_count >= 0)
drop_futex_key_refs(&key1);
diff --git a/kernel/hrtimer.c b/kernel/hrtimer.c
index f394d2a42ca3..cb8a15c19583 100644
--- a/kernel/hrtimer.c
+++ b/kernel/hrtimer.c
@@ -651,14 +651,20 @@ static inline void hrtimer_init_timer_hres(struct hrtimer *timer)
* and expiry check is done in the hrtimer_interrupt or in the softirq.
*/
static inline int hrtimer_enqueue_reprogram(struct hrtimer *timer,
- struct hrtimer_clock_base *base)
+ struct hrtimer_clock_base *base,
+ int wakeup)
{
if (base->cpu_base->hres_active && hrtimer_reprogram(timer, base)) {
- spin_unlock(&base->cpu_base->lock);
- raise_softirq_irqoff(HRTIMER_SOFTIRQ);
- spin_lock(&base->cpu_base->lock);
+ if (wakeup) {
+ spin_unlock(&base->cpu_base->lock);
+ raise_softirq_irqoff(HRTIMER_SOFTIRQ);
+ spin_lock(&base->cpu_base->lock);
+ } else
+ __raise_softirq_irqoff(HRTIMER_SOFTIRQ);
+
return 1;
}
+
return 0;
}
@@ -703,7 +709,8 @@ static inline int hrtimer_is_hres_enabled(void) { return 0; }
static inline int hrtimer_switch_to_hres(void) { return 0; }
static inline void hrtimer_force_reprogram(struct hrtimer_cpu_base *base) { }
static inline int hrtimer_enqueue_reprogram(struct hrtimer *timer,
- struct hrtimer_clock_base *base)
+ struct hrtimer_clock_base *base,
+ int wakeup)
{
return 0;
}
@@ -886,20 +893,9 @@ remove_hrtimer(struct hrtimer *timer, struct hrtimer_clock_base *base)
return 0;
}
-/**
- * hrtimer_start_range_ns - (re)start an hrtimer on the current CPU
- * @timer: the timer to be added
- * @tim: expiry time
- * @delta_ns: "slack" range for the timer
- * @mode: expiry mode: absolute (HRTIMER_ABS) or relative (HRTIMER_REL)
- *
- * Returns:
- * 0 on success
- * 1 when the timer was active
- */
-int
-hrtimer_start_range_ns(struct hrtimer *timer, ktime_t tim, unsigned long delta_ns,
- const enum hrtimer_mode mode)
+int __hrtimer_start_range_ns(struct hrtimer *timer, ktime_t tim,
+ unsigned long delta_ns, const enum hrtimer_mode mode,
+ int wakeup)
{
struct hrtimer_clock_base *base, *new_base;
unsigned long flags;
@@ -940,12 +936,29 @@ hrtimer_start_range_ns(struct hrtimer *timer, ktime_t tim, unsigned long delta_n
* XXX send_remote_softirq() ?
*/
if (leftmost && new_base->cpu_base == &__get_cpu_var(hrtimer_bases))
- hrtimer_enqueue_reprogram(timer, new_base);
+ hrtimer_enqueue_reprogram(timer, new_base, wakeup);
unlock_hrtimer_base(timer, &flags);
return ret;
}
+
+/**
+ * hrtimer_start_range_ns - (re)start an hrtimer on the current CPU
+ * @timer: the timer to be added
+ * @tim: expiry time
+ * @delta_ns: "slack" range for the timer
+ * @mode: expiry mode: absolute (HRTIMER_ABS) or relative (HRTIMER_REL)
+ *
+ * Returns:
+ * 0 on success
+ * 1 when the timer was active
+ */
+int hrtimer_start_range_ns(struct hrtimer *timer, ktime_t tim,
+ unsigned long delta_ns, const enum hrtimer_mode mode)
+{
+ return __hrtimer_start_range_ns(timer, tim, delta_ns, mode, 1);
+}
EXPORT_SYMBOL_GPL(hrtimer_start_range_ns);
/**
@@ -961,7 +974,7 @@ EXPORT_SYMBOL_GPL(hrtimer_start_range_ns);
int
hrtimer_start(struct hrtimer *timer, ktime_t tim, const enum hrtimer_mode mode)
{
- return hrtimer_start_range_ns(timer, tim, 0, mode);
+ return __hrtimer_start_range_ns(timer, tim, 0, mode, 1);
}
EXPORT_SYMBOL_GPL(hrtimer_start);
diff --git a/kernel/hung_task.c b/kernel/hung_task.c
new file mode 100644
index 000000000000..022a4927b785
--- /dev/null
+++ b/kernel/hung_task.c
@@ -0,0 +1,217 @@
+/*
+ * Detect Hung Task
+ *
+ * kernel/hung_task.c - kernel thread for detecting tasks stuck in D state
+ *
+ */
+
+#include <linux/mm.h>
+#include <linux/cpu.h>
+#include <linux/nmi.h>
+#include <linux/init.h>
+#include <linux/delay.h>
+#include <linux/freezer.h>
+#include <linux/kthread.h>
+#include <linux/lockdep.h>
+#include <linux/module.h>
+#include <linux/sysctl.h>
+
+/*
+ * The number of tasks checked:
+ */
+unsigned long __read_mostly sysctl_hung_task_check_count = PID_MAX_LIMIT;
+
+/*
+ * Limit number of tasks checked in a batch.
+ *
+ * This value controls the preemptibility of khungtaskd since preemption
+ * is disabled during the critical section. It also controls the size of
+ * the RCU grace period. So it needs to be upper-bound.
+ */
+#define HUNG_TASK_BATCHING 1024
+
+/*
+ * Zero means infinite timeout - no checking done:
+ */
+unsigned long __read_mostly sysctl_hung_task_timeout_secs = 120;
+
+unsigned long __read_mostly sysctl_hung_task_warnings = 10;
+
+static int __read_mostly did_panic;
+
+static struct task_struct *watchdog_task;
+
+/*
+ * Should we panic (and reboot, if panic_timeout= is set) when a
+ * hung task is detected:
+ */
+unsigned int __read_mostly sysctl_hung_task_panic =
+ CONFIG_BOOTPARAM_HUNG_TASK_PANIC_VALUE;
+
+static int __init hung_task_panic_setup(char *str)
+{
+ sysctl_hung_task_panic = simple_strtoul(str, NULL, 0);
+
+ return 1;
+}
+__setup("hung_task_panic=", hung_task_panic_setup);
+
+static int
+hung_task_panic(struct notifier_block *this, unsigned long event, void *ptr)
+{
+ did_panic = 1;
+
+ return NOTIFY_DONE;
+}
+
+static struct notifier_block panic_block = {
+ .notifier_call = hung_task_panic,
+};
+
+static void check_hung_task(struct task_struct *t, unsigned long timeout)
+{
+ unsigned long switch_count = t->nvcsw + t->nivcsw;
+
+ /*
+ * Ensure the task is not frozen.
+ * Also, when a freshly created task is scheduled once, changes
+ * its state to TASK_UNINTERRUPTIBLE without having ever been
+ * switched out once, it musn't be checked.
+ */
+ if (unlikely(t->flags & PF_FROZEN || !switch_count))
+ return;
+
+ if (switch_count != t->last_switch_count) {
+ t->last_switch_count = switch_count;
+ return;
+ }
+ if (!sysctl_hung_task_warnings)
+ return;
+ sysctl_hung_task_warnings--;
+
+ /*
+ * Ok, the task did not get scheduled for more than 2 minutes,
+ * complain:
+ */
+ printk(KERN_ERR "INFO: task %s:%d blocked for more than "
+ "%ld seconds.\n", t->comm, t->pid, timeout);
+ printk(KERN_ERR "\"echo 0 > /proc/sys/kernel/hung_task_timeout_secs\""
+ " disables this message.\n");
+ sched_show_task(t);
+ __debug_show_held_locks(t);
+
+ touch_nmi_watchdog();
+
+ if (sysctl_hung_task_panic)
+ panic("hung_task: blocked tasks");
+}
+
+/*
+ * To avoid extending the RCU grace period for an unbounded amount of time,
+ * periodically exit the critical section and enter a new one.
+ *
+ * For preemptible RCU it is sufficient to call rcu_read_unlock in order
+ * exit the grace period. For classic RCU, a reschedule is required.
+ */
+static void rcu_lock_break(struct task_struct *g, struct task_struct *t)
+{
+ get_task_struct(g);
+ get_task_struct(t);
+ rcu_read_unlock();
+ cond_resched();
+ rcu_read_lock();
+ put_task_struct(t);
+ put_task_struct(g);
+}
+
+/*
+ * Check whether a TASK_UNINTERRUPTIBLE does not get woken up for
+ * a really long time (120 seconds). If that happens, print out
+ * a warning.
+ */
+static void check_hung_uninterruptible_tasks(unsigned long timeout)
+{
+ int max_count = sysctl_hung_task_check_count;
+ int batch_count = HUNG_TASK_BATCHING;
+ struct task_struct *g, *t;
+
+ /*
+ * If the system crashed already then all bets are off,
+ * do not report extra hung tasks:
+ */
+ if (test_taint(TAINT_DIE) || did_panic)
+ return;
+
+ rcu_read_lock();
+ do_each_thread(g, t) {
+ if (!--max_count)
+ goto unlock;
+ if (!--batch_count) {
+ batch_count = HUNG_TASK_BATCHING;
+ rcu_lock_break(g, t);
+ /* Exit if t or g was unhashed during refresh. */
+ if (t->state == TASK_DEAD || g->state == TASK_DEAD)
+ goto unlock;
+ }
+ /* use "==" to skip the TASK_KILLABLE tasks waiting on NFS */
+ if (t->state == TASK_UNINTERRUPTIBLE)
+ check_hung_task(t, timeout);
+ } while_each_thread(g, t);
+ unlock:
+ rcu_read_unlock();
+}
+
+static unsigned long timeout_jiffies(unsigned long timeout)
+{
+ /* timeout of 0 will disable the watchdog */
+ return timeout ? timeout * HZ : MAX_SCHEDULE_TIMEOUT;
+}
+
+/*
+ * Process updating of timeout sysctl
+ */
+int proc_dohung_task_timeout_secs(struct ctl_table *table, int write,
+ struct file *filp, void __user *buffer,
+ size_t *lenp, loff_t *ppos)
+{
+ int ret;
+
+ ret = proc_doulongvec_minmax(table, write, filp, buffer, lenp, ppos);
+
+ if (ret || !write)
+ goto out;
+
+ wake_up_process(watchdog_task);
+
+ out:
+ return ret;
+}
+
+/*
+ * kthread which checks for tasks stuck in D state
+ */
+static int watchdog(void *dummy)
+{
+ set_user_nice(current, 0);
+
+ for ( ; ; ) {
+ unsigned long timeout = sysctl_hung_task_timeout_secs;
+
+ while (schedule_timeout_interruptible(timeout_jiffies(timeout)))
+ timeout = sysctl_hung_task_timeout_secs;
+
+ check_hung_uninterruptible_tasks(timeout);
+ }
+
+ return 0;
+}
+
+static int __init hung_task_init(void)
+{
+ atomic_notifier_chain_register(&panic_notifier_list, &panic_block);
+ watchdog_task = kthread_run(watchdog, NULL, "khungtaskd");
+
+ return 0;
+}
+
+module_init(hung_task_init);
diff --git a/kernel/irq/Makefile b/kernel/irq/Makefile
index 4dd5b1edac98..3394f8f52964 100644
--- a/kernel/irq/Makefile
+++ b/kernel/irq/Makefile
@@ -4,3 +4,4 @@ obj-$(CONFIG_GENERIC_IRQ_PROBE) += autoprobe.o
obj-$(CONFIG_PROC_FS) += proc.o
obj-$(CONFIG_GENERIC_PENDING_IRQ) += migration.o
obj-$(CONFIG_NUMA_MIGRATE_IRQ_DESC) += numa_migrate.o
+obj-$(CONFIG_PM_SLEEP) += pm.o
diff --git a/kernel/irq/devres.c b/kernel/irq/devres.c
index 38a25b8d8bff..d06df9c41cba 100644
--- a/kernel/irq/devres.c
+++ b/kernel/irq/devres.c
@@ -26,10 +26,12 @@ static int devm_irq_match(struct device *dev, void *res, void *data)
}
/**
- * devm_request_irq - allocate an interrupt line for a managed device
+ * devm_request_threaded_irq - allocate an interrupt line for a managed device
* @dev: device to request interrupt for
* @irq: Interrupt line to allocate
* @handler: Function to be called when the IRQ occurs
+ * @thread_fn: function to be called in a threaded interrupt context. NULL
+ * for devices which handle everything in @handler
* @irqflags: Interrupt type flags
* @devname: An ascii name for the claiming device
* @dev_id: A cookie passed back to the handler function
@@ -42,9 +44,10 @@ static int devm_irq_match(struct device *dev, void *res, void *data)
* If an IRQ allocated with this function needs to be freed
* separately, dev_free_irq() must be used.
*/
-int devm_request_irq(struct device *dev, unsigned int irq,
- irq_handler_t handler, unsigned long irqflags,
- const char *devname, void *dev_id)
+int devm_request_threaded_irq(struct device *dev, unsigned int irq,
+ irq_handler_t handler, irq_handler_t thread_fn,
+ unsigned long irqflags, const char *devname,
+ void *dev_id)
{
struct irq_devres *dr;
int rc;
@@ -54,7 +57,8 @@ int devm_request_irq(struct device *dev, unsigned int irq,
if (!dr)
return -ENOMEM;
- rc = request_irq(irq, handler, irqflags, devname, dev_id);
+ rc = request_threaded_irq(irq, handler, thread_fn, irqflags, devname,
+ dev_id);
if (rc) {
devres_free(dr);
return rc;
@@ -66,7 +70,7 @@ int devm_request_irq(struct device *dev, unsigned int irq,
return 0;
}
-EXPORT_SYMBOL(devm_request_irq);
+EXPORT_SYMBOL(devm_request_threaded_irq);
/**
* devm_free_irq - free an interrupt
diff --git a/kernel/irq/handle.c b/kernel/irq/handle.c
index 9ebf77968871..26e08754744f 100644
--- a/kernel/irq/handle.c
+++ b/kernel/irq/handle.c
@@ -17,6 +17,7 @@
#include <linux/kernel_stat.h>
#include <linux/rculist.h>
#include <linux/hash.h>
+#include <trace/irq.h>
#include <linux/bootmem.h>
#include "internals.h"
@@ -338,6 +339,18 @@ irqreturn_t no_action(int cpl, void *dev_id)
return IRQ_NONE;
}
+static void warn_no_thread(unsigned int irq, struct irqaction *action)
+{
+ if (test_and_set_bit(IRQTF_WARNED, &action->thread_flags))
+ return;
+
+ printk(KERN_WARNING "IRQ %d device %s returned IRQ_WAKE_THREAD "
+ "but no thread function available.", irq, action->name);
+}
+
+DEFINE_TRACE(irq_handler_entry);
+DEFINE_TRACE(irq_handler_exit);
+
/**
* handle_IRQ_event - irq action chain handler
* @irq: the interrupt number
@@ -350,15 +363,54 @@ irqreturn_t handle_IRQ_event(unsigned int irq, struct irqaction *action)
irqreturn_t ret, retval = IRQ_NONE;
unsigned int status = 0;
- WARN_ONCE(!in_irq(), "BUG: IRQ handler called from non-hardirq context!");
-
if (!(action->flags & IRQF_DISABLED))
local_irq_enable_in_hardirq();
do {
+ trace_irq_handler_entry(irq, action);
ret = action->handler(irq, action->dev_id);
- if (ret == IRQ_HANDLED)
+ trace_irq_handler_exit(irq, action, ret);
+
+ switch (ret) {
+ case IRQ_WAKE_THREAD:
+ /*
+ * Set result to handled so the spurious check
+ * does not trigger.
+ */
+ ret = IRQ_HANDLED;
+
+ /*
+ * Catch drivers which return WAKE_THREAD but
+ * did not set up a thread function
+ */
+ if (unlikely(!action->thread_fn)) {
+ warn_no_thread(irq, action);
+ break;
+ }
+
+ /*
+ * Wake up the handler thread for this
+ * action. In case the thread crashed and was
+ * killed we just pretend that we handled the
+ * interrupt. The hardirq handler above has
+ * disabled the device interrupt, so no irq
+ * storm is lurking.
+ */
+ if (likely(!test_bit(IRQTF_DIED,
+ &action->thread_flags))) {
+ set_bit(IRQTF_RUNTHREAD, &action->thread_flags);
+ wake_up_process(action->thread);
+ }
+
+ /* Fall through to add to randomness */
+ case IRQ_HANDLED:
status |= action->flags;
+ break;
+
+ default:
+ break;
+ }
+
retval |= ret;
action = action->next;
} while (action);
diff --git a/kernel/irq/internals.h b/kernel/irq/internals.h
index ee1aa9f8e8b9..01ce20eab38f 100644
--- a/kernel/irq/internals.h
+++ b/kernel/irq/internals.h
@@ -12,6 +12,8 @@ extern void compat_irq_chip_set_default_handler(struct irq_desc *desc);
extern int __irq_set_trigger(struct irq_desc *desc, unsigned int irq,
unsigned long flags);
+extern void __disable_irq(struct irq_desc *desc, unsigned int irq, bool susp);
+extern void __enable_irq(struct irq_desc *desc, unsigned int irq, bool resume);
extern struct lock_class_key irq_desc_lock_class;
extern void init_kstat_irqs(struct irq_desc *desc, int cpu, int nr);
diff --git a/kernel/irq/manage.c b/kernel/irq/manage.c
index 6458e99984c0..2734eca59243 100644
--- a/kernel/irq/manage.c
+++ b/kernel/irq/manage.c
@@ -8,16 +8,15 @@
*/
#include <linux/irq.h>
+#include <linux/kthread.h>
#include <linux/module.h>
#include <linux/random.h>
#include <linux/interrupt.h>
#include <linux/slab.h>
+#include <linux/sched.h>
#include "internals.h"
-#if defined(CONFIG_SMP) && defined(CONFIG_GENERIC_HARDIRQS)
-cpumask_var_t irq_default_affinity;
-
/**
* synchronize_irq - wait for pending IRQ handlers (on other CPUs)
* @irq: interrupt number to wait for
@@ -53,9 +52,18 @@ void synchronize_irq(unsigned int irq)
/* Oops, that failed? */
} while (status & IRQ_INPROGRESS);
+
+ /*
+ * We made sure that no hardirq handler is running. Now verify
+ * that no threaded handlers are active.
+ */
+ wait_event(desc->wait_for_threads, !atomic_read(&desc->threads_active));
}
EXPORT_SYMBOL(synchronize_irq);
+#ifdef CONFIG_SMP
+cpumask_var_t irq_default_affinity;
+
/**
* irq_can_set_affinity - Check if the affinity of a given irq can be set
* @irq: Interrupt to check
@@ -72,6 +80,18 @@ int irq_can_set_affinity(unsigned int irq)
return 1;
}
+static void
+irq_set_thread_affinity(struct irq_desc *desc, const struct cpumask *cpumask)
+{
+ struct irqaction *action = desc->action;
+
+ while (action) {
+ if (action->thread)
+ set_cpus_allowed_ptr(action->thread, cpumask);
+ action = action->next;
+ }
+}
+
/**
* irq_set_affinity - Set the irq affinity of a given irq
* @irq: Interrupt to set affinity
@@ -89,10 +109,9 @@ int irq_set_affinity(unsigned int irq, const struct cpumask *cpumask)
spin_lock_irqsave(&desc->lock, flags);
#ifdef CONFIG_GENERIC_PENDING_IRQ
- if (desc->status & IRQ_MOVE_PCNTXT || desc->status & IRQ_DISABLED) {
- cpumask_copy(desc->affinity, cpumask);
+ if (desc->status & IRQ_MOVE_PCNTXT)
desc->chip->set_affinity(irq, cpumask);
- } else {
+ else {
desc->status |= IRQ_MOVE_PENDING;
cpumask_copy(desc->pending_mask, cpumask);
}
@@ -100,6 +119,7 @@ int irq_set_affinity(unsigned int irq, const struct cpumask *cpumask)
cpumask_copy(desc->affinity, cpumask);
desc->chip->set_affinity(irq, cpumask);
#endif
+ irq_set_thread_affinity(desc, cpumask);
desc->status |= IRQ_AFFINITY_SET;
spin_unlock_irqrestore(&desc->lock, flags);
return 0;
@@ -150,6 +170,8 @@ int irq_select_affinity_usr(unsigned int irq)
spin_lock_irqsave(&desc->lock, flags);
ret = setup_affinity(irq, desc);
+ if (!ret)
+ irq_set_thread_affinity(desc, desc->affinity);
spin_unlock_irqrestore(&desc->lock, flags);
return ret;
@@ -162,6 +184,20 @@ static inline int setup_affinity(unsigned int irq, struct irq_desc *desc)
}
#endif
+void __disable_irq(struct irq_desc *desc, unsigned int irq, bool suspend)
+{
+ if (suspend) {
+ if (!desc->action || (desc->action->flags & IRQF_TIMER))
+ return;
+ desc->status |= IRQ_SUSPENDED;
+ }
+
+ if (!desc->depth++) {
+ desc->status |= IRQ_DISABLED;
+ desc->chip->disable(irq);
+ }
+}
+
/**
* disable_irq_nosync - disable an irq without waiting
* @irq: Interrupt to disable
@@ -182,10 +218,7 @@ void disable_irq_nosync(unsigned int irq)
return;
spin_lock_irqsave(&desc->lock, flags);
- if (!desc->depth++) {
- desc->status |= IRQ_DISABLED;
- desc->chip->disable(irq);
- }
+ __disable_irq(desc, irq, false);
spin_unlock_irqrestore(&desc->lock, flags);
}
EXPORT_SYMBOL(disable_irq_nosync);
@@ -215,15 +248,21 @@ void disable_irq(unsigned int irq)
}
EXPORT_SYMBOL(disable_irq);
-static void __enable_irq(struct irq_desc *desc, unsigned int irq)
+void __enable_irq(struct irq_desc *desc, unsigned int irq, bool resume)
{
+ if (resume)
+ desc->status &= ~IRQ_SUSPENDED;
+
switch (desc->depth) {
case 0:
+ err_out:
WARN(1, KERN_WARNING "Unbalanced enable for IRQ %d\n", irq);
break;
case 1: {
unsigned int status = desc->status & ~IRQ_DISABLED;
+ if (desc->status & IRQ_SUSPENDED)
+ goto err_out;
/* Prevent probing on this irq: */
desc->status = status | IRQ_NOPROBE;
check_irq_resend(desc, irq);
@@ -253,7 +292,7 @@ void enable_irq(unsigned int irq)
return;
spin_lock_irqsave(&desc->lock, flags);
- __enable_irq(desc, irq);
+ __enable_irq(desc, irq, false);
spin_unlock_irqrestore(&desc->lock, flags);
}
EXPORT_SYMBOL(enable_irq);
@@ -384,6 +423,90 @@ int __irq_set_trigger(struct irq_desc *desc, unsigned int irq,
return ret;
}
+static int irq_wait_for_interrupt(struct irqaction *action)
+{
+ while (!kthread_should_stop()) {
+ set_current_state(TASK_INTERRUPTIBLE);
+
+ if (test_and_clear_bit(IRQTF_RUNTHREAD,
+ &action->thread_flags)) {
+ __set_current_state(TASK_RUNNING);
+ return 0;
+ }
+ schedule();
+ }
+ return -1;
+}
+
+/*
+ * Interrupt handler thread
+ */
+static int irq_thread(void *data)
+{
+ struct sched_param param = { .sched_priority = MAX_USER_RT_PRIO/2, };
+ struct irqaction *action = data;
+ struct irq_desc *desc = irq_to_desc(action->irq);
+ int wake;
+
+ sched_setscheduler(current, SCHED_FIFO, &param);
+ current->irqaction = action;
+
+ while (!irq_wait_for_interrupt(action)) {
+
+ atomic_inc(&desc->threads_active);
+
+ spin_lock_irq(&desc->lock);
+ if (unlikely(desc->status & IRQ_DISABLED)) {
+ /*
+ * CHECKME: We might need a dedicated
+ * IRQ_THREAD_PENDING flag here, which
+ * retriggers the thread in check_irq_resend()
+ * but AFAICT IRQ_PENDING should be fine as it
+ * retriggers the interrupt itself --- tglx
+ */
+ desc->status |= IRQ_PENDING;
+ spin_unlock_irq(&desc->lock);
+ } else {
+ spin_unlock_irq(&desc->lock);
+
+ action->thread_fn(action->irq, action->dev_id);
+ }
+
+ wake = atomic_dec_and_test(&desc->threads_active);
+
+ if (wake && waitqueue_active(&desc->wait_for_threads))
+ wake_up(&desc->wait_for_threads);
+ }
+
+ /*
+ * Clear irqaction. Otherwise exit_irq_thread() would make
+ * fuzz about an active irq thread going into nirvana.
+ */
+ current->irqaction = NULL;
+ return 0;
+}
+
+/*
+ * Called from do_exit()
+ */
+void exit_irq_thread(void)
+{
+ struct task_struct *tsk = current;
+
+ if (!tsk->irqaction)
+ return;
+
+ printk(KERN_ERR
+ "exiting task \"%s\" (%d) is an active IRQ thread (irq %d)\n",
+ tsk->comm ? tsk->comm : "", tsk->pid, tsk->irqaction->irq);
+
+ /*
+ * Set the THREAD DIED flag to prevent further wakeups of the
+ * soon to be gone threaded handler.
+ */
+ set_bit(IRQTF_DIED, &tsk->irqaction->flags);
+}
+
/*
* Internal function to register an irqaction - typically used to
* allocate special interrupts that are part of the architecture.
@@ -420,6 +543,26 @@ __setup_irq(unsigned int irq, struct irq_desc *desc, struct irqaction *new)
}
/*
+ * Threaded handler ?
+ */
+ if (new->thread_fn) {
+ struct task_struct *t;
+
+ t = kthread_create(irq_thread, new, "irq/%d-%s", irq,
+ new->name);
+ if (IS_ERR(t))
+ return PTR_ERR(t);
+ /*
+ * We keep the reference to the task struct even if
+ * the thread dies to avoid that the interrupt code
+ * references an already freed task_struct.
+ */
+ get_task_struct(t);
+ new->thread = t;
+ wake_up_process(t);
+ }
+
+ /*
* The following block of code has to be executed atomically
*/
spin_lock_irqsave(&desc->lock, flags);
@@ -456,15 +599,15 @@ __setup_irq(unsigned int irq, struct irq_desc *desc, struct irqaction *new)
if (!shared) {
irq_chip_set_defaults(desc->chip);
+ init_waitqueue_head(&desc->wait_for_threads);
+
/* Setup the type (level, edge polarity) if configured: */
if (new->flags & IRQF_TRIGGER_MASK) {
ret = __irq_set_trigger(desc, irq,
new->flags & IRQF_TRIGGER_MASK);
- if (ret) {
- spin_unlock_irqrestore(&desc->lock, flags);
- return ret;
- }
+ if (ret)
+ goto out_thread;
} else
compat_irq_chip_set_default_handler(desc);
#if defined(CONFIG_IRQ_PER_CPU)
@@ -511,7 +654,7 @@ __setup_irq(unsigned int irq, struct irq_desc *desc, struct irqaction *new)
*/
if (shared && (desc->status & IRQ_SPURIOUS_DISABLED)) {
desc->status &= ~IRQ_SPURIOUS_DISABLED;
- __enable_irq(desc, irq);
+ __enable_irq(desc, irq, false);
}
spin_unlock_irqrestore(&desc->lock, flags);
@@ -532,8 +675,19 @@ mismatch:
dump_stack();
}
#endif
+ ret = -EBUSY;
+
+out_thread:
spin_unlock_irqrestore(&desc->lock, flags);
- return -EBUSY;
+ if (new->thread) {
+ struct task_struct *t = new->thread;
+
+ new->thread = NULL;
+ if (likely(!test_bit(IRQTF_DIED, &new->thread_flags)))
+ kthread_stop(t);
+ put_task_struct(t);
+ }
+ return ret;
}
/**
@@ -559,6 +713,7 @@ static struct irqaction *__free_irq(unsigned int irq, void *dev_id)
{
struct irq_desc *desc = irq_to_desc(irq);
struct irqaction *action, **action_ptr;
+ struct task_struct *irqthread;
unsigned long flags;
WARN(in_interrupt(), "Trying to free IRQ %d from IRQ context!\n", irq);
@@ -605,6 +760,10 @@ static struct irqaction *__free_irq(unsigned int irq, void *dev_id)
else
desc->chip->disable(irq);
}
+
+ irqthread = action->thread;
+ action->thread = NULL;
+
spin_unlock_irqrestore(&desc->lock, flags);
unregister_handler_proc(irq, action);
@@ -612,6 +771,12 @@ static struct irqaction *__free_irq(unsigned int irq, void *dev_id)
/* Make sure it's not being used on another CPU: */
synchronize_irq(irq);
+ if (irqthread) {
+ if (!test_bit(IRQTF_DIED, &action->thread_flags))
+ kthread_stop(irqthread);
+ put_task_struct(irqthread);
+ }
+
#ifdef CONFIG_DEBUG_SHIRQ
/*
* It's a shared IRQ -- the driver ought to be prepared for an IRQ
@@ -664,9 +829,12 @@ void free_irq(unsigned int irq, void *dev_id)
EXPORT_SYMBOL(free_irq);
/**
- * request_irq - allocate an interrupt line
+ * request_threaded_irq - allocate an interrupt line
* @irq: Interrupt line to allocate
- * @handler: Function to be called when the IRQ occurs
+ * @handler: Function to be called when the IRQ occurs.
+ * Primary handler for threaded interrupts
+ * @thread_fn: Function called from the irq handler thread
+ * If NULL, no irq thread is created
* @irqflags: Interrupt type flags
* @devname: An ascii name for the claiming device
* @dev_id: A cookie passed back to the handler function
@@ -678,6 +846,15 @@ EXPORT_SYMBOL(free_irq);
* raises, you must take care both to initialise your hardware
* and to set up the interrupt handler in the right order.
*
+ * If you want to set up a threaded irq handler for your device
+ * then you need to supply @handler and @thread_fn. @handler ist
+ * still called in hard interrupt context and has to check
+ * whether the interrupt originates from the device. If yes it
+ * needs to disable the interrupt on the device and return
+ * IRQ_THREAD_WAKE which will wake up the handler thread and run
+ * @thread_fn. This split handler design is necessary to support
+ * shared interrupts.
+ *
* Dev_id must be globally unique. Normally the address of the
* device data structure is used as the cookie. Since the handler
* receives this value it makes sense to use it.
@@ -693,8 +870,9 @@ EXPORT_SYMBOL(free_irq);
* IRQF_TRIGGER_* Specify active edge(s) or level
*
*/
-int request_irq(unsigned int irq, irq_handler_t handler,
- unsigned long irqflags, const char *devname, void *dev_id)
+int request_threaded_irq(unsigned int irq, irq_handler_t handler,
+ irq_handler_t thread_fn, unsigned long irqflags,
+ const char *devname, void *dev_id)
{
struct irqaction *action;
struct irq_desc *desc;
@@ -742,6 +920,7 @@ int request_irq(unsigned int irq, irq_handler_t handler,
return -ENOMEM;
action->handler = handler;
+ action->thread_fn = thread_fn;
action->flags = irqflags;
action->name = devname;
action->dev_id = dev_id;
@@ -771,4 +950,4 @@ int request_irq(unsigned int irq, irq_handler_t handler,
#endif
return retval;
}
-EXPORT_SYMBOL(request_irq);
+EXPORT_SYMBOL(request_threaded_irq);
diff --git a/kernel/irq/numa_migrate.c b/kernel/irq/numa_migrate.c
index 243d6121e50e..44bbdcbaf8d2 100644
--- a/kernel/irq/numa_migrate.c
+++ b/kernel/irq/numa_migrate.c
@@ -54,6 +54,7 @@ static bool init_copy_one_irq_desc(int irq, struct irq_desc *old_desc,
static void free_one_irq_desc(struct irq_desc *old_desc, struct irq_desc *desc)
{
free_kstat_irqs(old_desc, desc);
+ free_desc_masks(old_desc, desc);
arch_free_chip_data(old_desc, desc);
}
diff --git a/kernel/irq/pm.c b/kernel/irq/pm.c
new file mode 100644
index 000000000000..638d8bedec14
--- /dev/null
+++ b/kernel/irq/pm.c
@@ -0,0 +1,79 @@
+/*
+ * linux/kernel/irq/pm.c
+ *
+ * Copyright (C) 2009 Rafael J. Wysocki <rjw@sisk.pl>, Novell Inc.
+ *
+ * This file contains power management functions related to interrupts.
+ */
+
+#include <linux/irq.h>
+#include <linux/module.h>
+#include <linux/interrupt.h>
+
+#include "internals.h"
+
+/**
+ * suspend_device_irqs - disable all currently enabled interrupt lines
+ *
+ * During system-wide suspend or hibernation device interrupts need to be
+ * disabled at the chip level and this function is provided for this purpose.
+ * It disables all interrupt lines that are enabled at the moment and sets the
+ * IRQ_SUSPENDED flag for them.
+ */
+void suspend_device_irqs(void)
+{
+ struct irq_desc *desc;
+ int irq;
+
+ for_each_irq_desc(irq, desc) {
+ unsigned long flags;
+
+ spin_lock_irqsave(&desc->lock, flags);
+ __disable_irq(desc, irq, true);
+ spin_unlock_irqrestore(&desc->lock, flags);
+ }
+
+ for_each_irq_desc(irq, desc)
+ if (desc->status & IRQ_SUSPENDED)
+ synchronize_irq(irq);
+}
+EXPORT_SYMBOL_GPL(suspend_device_irqs);
+
+/**
+ * resume_device_irqs - enable interrupt lines disabled by suspend_device_irqs()
+ *
+ * Enable all interrupt lines previously disabled by suspend_device_irqs() that
+ * have the IRQ_SUSPENDED flag set.
+ */
+void resume_device_irqs(void)
+{
+ struct irq_desc *desc;
+ int irq;
+
+ for_each_irq_desc(irq, desc) {
+ unsigned long flags;
+
+ if (!(desc->status & IRQ_SUSPENDED))
+ continue;
+
+ spin_lock_irqsave(&desc->lock, flags);
+ __enable_irq(desc, irq, true);
+ spin_unlock_irqrestore(&desc->lock, flags);
+ }
+}
+EXPORT_SYMBOL_GPL(resume_device_irqs);
+
+/**
+ * check_wakeup_irqs - check if any wake-up interrupts are pending
+ */
+int check_wakeup_irqs(void)
+{
+ struct irq_desc *desc;
+ int irq;
+
+ for_each_irq_desc(irq, desc)
+ if ((desc->status & IRQ_WAKEUP) && (desc->status & IRQ_PENDING))
+ return -EBUSY;
+
+ return 0;
+}
diff --git a/kernel/kallsyms.c b/kernel/kallsyms.c
index 7b8b0f21a5b1..374faf9bfdc7 100644
--- a/kernel/kallsyms.c
+++ b/kernel/kallsyms.c
@@ -161,6 +161,25 @@ unsigned long kallsyms_lookup_name(const char *name)
return module_kallsyms_lookup_name(name);
}
+int kallsyms_on_each_symbol(int (*fn)(void *, const char *, struct module *,
+ unsigned long),
+ void *data)
+{
+ char namebuf[KSYM_NAME_LEN];
+ unsigned long i;
+ unsigned int off;
+ int ret;
+
+ for (i = 0, off = 0; i < kallsyms_num_syms; i++) {
+ off = kallsyms_expand_symbol(off, namebuf);
+ ret = fn(data, namebuf, NULL, kallsyms_addresses[i]);
+ if (ret != 0)
+ return ret;
+ }
+ return module_kallsyms_on_each_symbol(fn, data);
+}
+EXPORT_SYMBOL_GPL(kallsyms_on_each_symbol);
+
static unsigned long get_symbol_pos(unsigned long addr,
unsigned long *symbolsize,
unsigned long *offset)
diff --git a/kernel/kexec.c b/kernel/kexec.c
index c7fd6692939d..5a758c6e4950 100644
--- a/kernel/kexec.c
+++ b/kernel/kexec.c
@@ -42,7 +42,7 @@
note_buf_t* crash_notes;
/* vmcoreinfo stuff */
-unsigned char vmcoreinfo_data[VMCOREINFO_BYTES];
+static unsigned char vmcoreinfo_data[VMCOREINFO_BYTES];
u32 vmcoreinfo_note[VMCOREINFO_NOTE_SIZE/4];
size_t vmcoreinfo_size;
size_t vmcoreinfo_max_size = sizeof(vmcoreinfo_data);
@@ -1409,6 +1409,7 @@ static int __init crash_save_vmcoreinfo_init(void)
VMCOREINFO_OFFSET(list_head, prev);
VMCOREINFO_OFFSET(vm_struct, addr);
VMCOREINFO_LENGTH(zone.free_area, MAX_ORDER);
+ log_buf_kexec_setup();
VMCOREINFO_LENGTH(free_area.free_list, MIGRATE_TYPES);
VMCOREINFO_NUMBER(NR_FREE_PAGES);
VMCOREINFO_NUMBER(PG_lru);
@@ -1450,11 +1451,7 @@ int kernel_kexec(void)
error = device_suspend(PMSG_FREEZE);
if (error)
goto Resume_console;
- error = disable_nonboot_cpus();
- if (error)
- goto Resume_devices;
device_pm_lock();
- local_irq_disable();
/* At this point, device_suspend() has been called,
* but *not* device_power_down(). We *must*
* device_power_down() now. Otherwise, drivers for
@@ -1464,12 +1461,15 @@ int kernel_kexec(void)
*/
error = device_power_down(PMSG_FREEZE);
if (error)
- goto Enable_irqs;
-
+ goto Resume_devices;
+ error = disable_nonboot_cpus();
+ if (error)
+ goto Enable_cpus;
+ local_irq_disable();
/* Suspend system devices */
error = sysdev_suspend(PMSG_FREEZE);
if (error)
- goto Power_up_devices;
+ goto Enable_irqs;
} else
#endif
{
@@ -1483,13 +1483,13 @@ int kernel_kexec(void)
#ifdef CONFIG_KEXEC_JUMP
if (kexec_image->preserve_context) {
sysdev_resume();
- Power_up_devices:
- device_power_up(PMSG_RESTORE);
Enable_irqs:
local_irq_enable();
- device_pm_unlock();
+ Enable_cpus:
enable_nonboot_cpus();
+ device_power_up(PMSG_RESTORE);
Resume_devices:
+ device_pm_unlock();
device_resume(PMSG_RESTORE);
Resume_console:
resume_console();
diff --git a/kernel/kmod.c b/kernel/kmod.c
index a27a5f64443d..b750675251e5 100644
--- a/kernel/kmod.c
+++ b/kernel/kmod.c
@@ -50,7 +50,8 @@ static struct workqueue_struct *khelper_wq;
char modprobe_path[KMOD_PATH_LEN] = "/sbin/modprobe";
/**
- * request_module - try to load a kernel module
+ * __request_module - try to load a kernel module
+ * @wait: wait (or not) for the operation to complete
* @fmt: printf style format string for the name of the module
* @...: arguments as specified in the format string
*
@@ -63,7 +64,7 @@ char modprobe_path[KMOD_PATH_LEN] = "/sbin/modprobe";
* If module auto-loading support is disabled then this function
* becomes a no-operation.
*/
-int request_module(const char *fmt, ...)
+int __request_module(bool wait, const char *fmt, ...)
{
va_list args;
char module_name[MODULE_NAME_LEN];
@@ -108,11 +109,12 @@ int request_module(const char *fmt, ...)
return -ENOMEM;
}
- ret = call_usermodehelper(modprobe_path, argv, envp, 1);
+ ret = call_usermodehelper(modprobe_path, argv, envp,
+ wait ? UMH_WAIT_PROC : UMH_WAIT_EXEC);
atomic_dec(&kmod_concurrent);
return ret;
}
-EXPORT_SYMBOL(request_module);
+EXPORT_SYMBOL(__request_module);
#endif /* CONFIG_MODULES */
struct subprocess_info {
@@ -167,7 +169,7 @@ static int ____call_usermodehelper(void *data)
}
/* We can run anywhere, unlike our parent keventd(). */
- set_cpus_allowed_ptr(current, CPU_MASK_ALL_PTR);
+ set_cpus_allowed_ptr(current, cpu_all_mask);
/*
* Our parent is keventd, which runs with elevated scheduling priority.
diff --git a/kernel/kprobes.c b/kernel/kprobes.c
index 7ba8cd9845cb..a5e74ddee0e2 100644
--- a/kernel/kprobes.c
+++ b/kernel/kprobes.c
@@ -43,6 +43,7 @@
#include <linux/seq_file.h>
#include <linux/debugfs.h>
#include <linux/kdebug.h>
+#include <linux/memory.h>
#include <asm-generic/sections.h>
#include <asm/cacheflush.h>
@@ -67,7 +68,7 @@ static struct hlist_head kprobe_table[KPROBE_TABLE_SIZE];
static struct hlist_head kretprobe_inst_table[KPROBE_TABLE_SIZE];
/* NOTE: change this value only with kprobe_mutex held */
-static bool kprobe_enabled;
+static bool kprobes_all_disarmed;
static DEFINE_MUTEX(kprobe_mutex); /* Protects kprobe_table */
static DEFINE_PER_CPU(struct kprobe *, kprobe_instance) = NULL;
@@ -327,7 +328,7 @@ static int __kprobes aggr_pre_handler(struct kprobe *p, struct pt_regs *regs)
struct kprobe *kp;
list_for_each_entry_rcu(kp, &p->list, list) {
- if (kp->pre_handler && !kprobe_gone(kp)) {
+ if (kp->pre_handler && likely(!kprobe_disabled(kp))) {
set_kprobe_instance(kp);
if (kp->pre_handler(kp, regs))
return 1;
@@ -343,7 +344,7 @@ static void __kprobes aggr_post_handler(struct kprobe *p, struct pt_regs *regs,
struct kprobe *kp;
list_for_each_entry_rcu(kp, &p->list, list) {
- if (kp->post_handler && !kprobe_gone(kp)) {
+ if (kp->post_handler && likely(!kprobe_disabled(kp))) {
set_kprobe_instance(kp);
kp->post_handler(kp, regs, flags);
reset_kprobe_instance();
@@ -517,20 +518,28 @@ static inline void copy_kprobe(struct kprobe *old_p, struct kprobe *p)
}
/*
-* Add the new probe to old_p->list. Fail if this is the
+* Add the new probe to ap->list. Fail if this is the
* second jprobe at the address - two jprobes can't coexist
*/
-static int __kprobes add_new_kprobe(struct kprobe *old_p, struct kprobe *p)
+static int __kprobes add_new_kprobe(struct kprobe *ap, struct kprobe *p)
{
+ BUG_ON(kprobe_gone(ap) || kprobe_gone(p));
if (p->break_handler) {
- if (old_p->break_handler)
+ if (ap->break_handler)
return -EEXIST;
- list_add_tail_rcu(&p->list, &old_p->list);
- old_p->break_handler = aggr_break_handler;
+ list_add_tail_rcu(&p->list, &ap->list);
+ ap->break_handler = aggr_break_handler;
} else
- list_add_rcu(&p->list, &old_p->list);
- if (p->post_handler && !old_p->post_handler)
- old_p->post_handler = aggr_post_handler;
+ list_add_rcu(&p->list, &ap->list);
+ if (p->post_handler && !ap->post_handler)
+ ap->post_handler = aggr_post_handler;
+
+ if (kprobe_disabled(ap) && !kprobe_disabled(p)) {
+ ap->flags &= ~KPROBE_FLAG_DISABLED;
+ if (!kprobes_all_disarmed)
+ /* Arm the breakpoint again. */
+ arch_arm_kprobe(ap);
+ }
return 0;
}
@@ -543,6 +552,7 @@ static inline void add_aggr_kprobe(struct kprobe *ap, struct kprobe *p)
copy_kprobe(p, ap);
flush_insn_slot(ap);
ap->addr = p->addr;
+ ap->flags = p->flags;
ap->pre_handler = aggr_pre_handler;
ap->fault_handler = aggr_fault_handler;
/* We don't care the kprobe which has gone. */
@@ -565,44 +575,59 @@ static int __kprobes register_aggr_kprobe(struct kprobe *old_p,
struct kprobe *p)
{
int ret = 0;
- struct kprobe *ap;
+ struct kprobe *ap = old_p;
- if (kprobe_gone(old_p)) {
+ if (old_p->pre_handler != aggr_pre_handler) {
+ /* If old_p is not an aggr_probe, create new aggr_kprobe. */
+ ap = kzalloc(sizeof(struct kprobe), GFP_KERNEL);
+ if (!ap)
+ return -ENOMEM;
+ add_aggr_kprobe(ap, old_p);
+ }
+
+ if (kprobe_gone(ap)) {
/*
* Attempting to insert new probe at the same location that
* had a probe in the module vaddr area which already
* freed. So, the instruction slot has already been
* released. We need a new slot for the new probe.
*/
- ret = arch_prepare_kprobe(old_p);
+ ret = arch_prepare_kprobe(ap);
if (ret)
+ /*
+ * Even if fail to allocate new slot, don't need to
+ * free aggr_probe. It will be used next time, or
+ * freed by unregister_kprobe.
+ */
return ret;
- }
- if (old_p->pre_handler == aggr_pre_handler) {
- copy_kprobe(old_p, p);
- ret = add_new_kprobe(old_p, p);
- ap = old_p;
- } else {
- ap = kzalloc(sizeof(struct kprobe), GFP_KERNEL);
- if (!ap) {
- if (kprobe_gone(old_p))
- arch_remove_kprobe(old_p);
- return -ENOMEM;
- }
- add_aggr_kprobe(ap, old_p);
- copy_kprobe(ap, p);
- ret = add_new_kprobe(ap, p);
- }
- if (kprobe_gone(old_p)) {
+
/*
- * If the old_p has gone, its breakpoint has been disarmed.
- * We have to arm it again after preparing real kprobes.
+ * Clear gone flag to prevent allocating new slot again, and
+ * set disabled flag because it is not armed yet.
*/
- ap->flags &= ~KPROBE_FLAG_GONE;
- if (kprobe_enabled)
- arch_arm_kprobe(ap);
+ ap->flags = (ap->flags & ~KPROBE_FLAG_GONE)
+ | KPROBE_FLAG_DISABLED;
}
- return ret;
+
+ copy_kprobe(ap, p);
+ return add_new_kprobe(ap, p);
+}
+
+/* Try to disable aggr_kprobe, and return 1 if succeeded.*/
+static int __kprobes try_to_disable_aggr_kprobe(struct kprobe *p)
+{
+ struct kprobe *kp;
+
+ list_for_each_entry_rcu(kp, &p->list, list) {
+ if (!kprobe_disabled(kp))
+ /*
+ * There is an active probe on the list.
+ * We can't disable aggr_kprobe.
+ */
+ return 0;
+ }
+ p->flags |= KPROBE_FLAG_DISABLED;
+ return 1;
}
static int __kprobes in_kprobes_functions(unsigned long addr)
@@ -663,7 +688,9 @@ int __kprobes register_kprobe(struct kprobe *p)
return -EINVAL;
}
- p->flags = 0;
+ /* User can pass only KPROBE_FLAG_DISABLED to register_kprobe */
+ p->flags &= KPROBE_FLAG_DISABLED;
+
/*
* Check if are we probing a module.
*/
@@ -699,17 +726,20 @@ int __kprobes register_kprobe(struct kprobe *p)
goto out;
}
+ mutex_lock(&text_mutex);
ret = arch_prepare_kprobe(p);
if (ret)
- goto out;
+ goto out_unlock_text;
INIT_HLIST_NODE(&p->hlist);
hlist_add_head_rcu(&p->hlist,
&kprobe_table[hash_ptr(p->addr, KPROBE_HASH_BITS)]);
- if (kprobe_enabled)
+ if (!kprobes_all_disarmed && !kprobe_disabled(p))
arch_arm_kprobe(p);
+out_unlock_text:
+ mutex_unlock(&text_mutex);
out:
mutex_unlock(&kprobe_mutex);
@@ -718,26 +748,39 @@ out:
return ret;
}
+EXPORT_SYMBOL_GPL(register_kprobe);
-/*
- * Unregister a kprobe without a scheduler synchronization.
- */
-static int __kprobes __unregister_kprobe_top(struct kprobe *p)
+/* Check passed kprobe is valid and return kprobe in kprobe_table. */
+static struct kprobe * __kprobes __get_valid_kprobe(struct kprobe *p)
{
struct kprobe *old_p, *list_p;
old_p = get_kprobe(p->addr);
if (unlikely(!old_p))
- return -EINVAL;
+ return NULL;
if (p != old_p) {
list_for_each_entry_rcu(list_p, &old_p->list, list)
if (list_p == p)
/* kprobe p is a valid probe */
- goto valid_p;
- return -EINVAL;
+ goto valid;
+ return NULL;
}
-valid_p:
+valid:
+ return old_p;
+}
+
+/*
+ * Unregister a kprobe without a scheduler synchronization.
+ */
+static int __kprobes __unregister_kprobe_top(struct kprobe *p)
+{
+ struct kprobe *old_p, *list_p;
+
+ old_p = __get_valid_kprobe(p);
+ if (old_p == NULL)
+ return -EINVAL;
+
if (old_p == p ||
(old_p->pre_handler == aggr_pre_handler &&
list_is_singular(&old_p->list))) {
@@ -746,8 +789,11 @@ valid_p:
* enabled and not gone - otherwise, the breakpoint would
* already have been removed. We save on flushing icache.
*/
- if (kprobe_enabled && !kprobe_gone(old_p))
+ if (!kprobes_all_disarmed && !kprobe_disabled(old_p)) {
+ mutex_lock(&text_mutex);
arch_disarm_kprobe(p);
+ mutex_unlock(&text_mutex);
+ }
hlist_del_rcu(&old_p->hlist);
} else {
if (p->break_handler && !kprobe_gone(p))
@@ -761,6 +807,11 @@ valid_p:
}
noclean:
list_del_rcu(&p->list);
+ if (!kprobe_disabled(old_p)) {
+ try_to_disable_aggr_kprobe(old_p);
+ if (!kprobes_all_disarmed && kprobe_disabled(old_p))
+ arch_disarm_kprobe(old_p);
+ }
}
return 0;
}
@@ -796,11 +847,13 @@ int __kprobes register_kprobes(struct kprobe **kps, int num)
}
return ret;
}
+EXPORT_SYMBOL_GPL(register_kprobes);
void __kprobes unregister_kprobe(struct kprobe *p)
{
unregister_kprobes(&p, 1);
}
+EXPORT_SYMBOL_GPL(unregister_kprobe);
void __kprobes unregister_kprobes(struct kprobe **kps, int num)
{
@@ -819,6 +872,7 @@ void __kprobes unregister_kprobes(struct kprobe **kps, int num)
if (kps[i]->addr)
__unregister_kprobe_bottom(kps[i]);
}
+EXPORT_SYMBOL_GPL(unregister_kprobes);
static struct notifier_block kprobe_exceptions_nb = {
.notifier_call = kprobe_exceptions_notify,
@@ -858,16 +912,19 @@ int __kprobes register_jprobes(struct jprobe **jps, int num)
}
return ret;
}
+EXPORT_SYMBOL_GPL(register_jprobes);
int __kprobes register_jprobe(struct jprobe *jp)
{
return register_jprobes(&jp, 1);
}
+EXPORT_SYMBOL_GPL(register_jprobe);
void __kprobes unregister_jprobe(struct jprobe *jp)
{
unregister_jprobes(&jp, 1);
}
+EXPORT_SYMBOL_GPL(unregister_jprobe);
void __kprobes unregister_jprobes(struct jprobe **jps, int num)
{
@@ -887,6 +944,7 @@ void __kprobes unregister_jprobes(struct jprobe **jps, int num)
__unregister_kprobe_bottom(&jps[i]->kp);
}
}
+EXPORT_SYMBOL_GPL(unregister_jprobes);
#ifdef CONFIG_KRETPROBES
/*
@@ -912,10 +970,8 @@ static int __kprobes pre_handler_kretprobe(struct kprobe *p,
ri->rp = rp;
ri->task = current;
- if (rp->entry_handler && rp->entry_handler(ri, regs)) {
- spin_unlock_irqrestore(&rp->lock, flags);
+ if (rp->entry_handler && rp->entry_handler(ri, regs))
return 0;
- }
arch_prepare_kretprobe(ri, regs);
@@ -982,6 +1038,7 @@ int __kprobes register_kretprobe(struct kretprobe *rp)
free_rp_inst(rp);
return ret;
}
+EXPORT_SYMBOL_GPL(register_kretprobe);
int __kprobes register_kretprobes(struct kretprobe **rps, int num)
{
@@ -999,11 +1056,13 @@ int __kprobes register_kretprobes(struct kretprobe **rps, int num)
}
return ret;
}
+EXPORT_SYMBOL_GPL(register_kretprobes);
void __kprobes unregister_kretprobe(struct kretprobe *rp)
{
unregister_kretprobes(&rp, 1);
}
+EXPORT_SYMBOL_GPL(unregister_kretprobe);
void __kprobes unregister_kretprobes(struct kretprobe **rps, int num)
{
@@ -1025,24 +1084,30 @@ void __kprobes unregister_kretprobes(struct kretprobe **rps, int num)
}
}
}
+EXPORT_SYMBOL_GPL(unregister_kretprobes);
#else /* CONFIG_KRETPROBES */
int __kprobes register_kretprobe(struct kretprobe *rp)
{
return -ENOSYS;
}
+EXPORT_SYMBOL_GPL(register_kretprobe);
int __kprobes register_kretprobes(struct kretprobe **rps, int num)
{
return -ENOSYS;
}
+EXPORT_SYMBOL_GPL(register_kretprobes);
+
void __kprobes unregister_kretprobe(struct kretprobe *rp)
{
}
+EXPORT_SYMBOL_GPL(unregister_kretprobe);
void __kprobes unregister_kretprobes(struct kretprobe **rps, int num)
{
}
+EXPORT_SYMBOL_GPL(unregister_kretprobes);
static int __kprobes pre_handler_kretprobe(struct kprobe *p,
struct pt_regs *regs)
@@ -1056,6 +1121,7 @@ static int __kprobes pre_handler_kretprobe(struct kprobe *p,
static void __kprobes kill_kprobe(struct kprobe *p)
{
struct kprobe *kp;
+
p->flags |= KPROBE_FLAG_GONE;
if (p->pre_handler == aggr_pre_handler) {
/*
@@ -1168,8 +1234,8 @@ static int __init init_kprobes(void)
}
}
- /* By default, kprobes are enabled */
- kprobe_enabled = true;
+ /* By default, kprobes are armed */
+ kprobes_all_disarmed = false;
err = arch_init_kprobes();
if (!err)
@@ -1197,12 +1263,18 @@ static void __kprobes report_probe(struct seq_file *pi, struct kprobe *p,
else
kprobe_type = "k";
if (sym)
- seq_printf(pi, "%p %s %s+0x%x %s %s\n", p->addr, kprobe_type,
- sym, offset, (modname ? modname : " "),
- (kprobe_gone(p) ? "[GONE]" : ""));
+ seq_printf(pi, "%p %s %s+0x%x %s %s%s\n",
+ p->addr, kprobe_type, sym, offset,
+ (modname ? modname : " "),
+ (kprobe_gone(p) ? "[GONE]" : ""),
+ ((kprobe_disabled(p) && !kprobe_gone(p)) ?
+ "[DISABLED]" : ""));
else
- seq_printf(pi, "%p %s %p %s\n", p->addr, kprobe_type, p->addr,
- (kprobe_gone(p) ? "[GONE]" : ""));
+ seq_printf(pi, "%p %s %p %s%s\n",
+ p->addr, kprobe_type, p->addr,
+ (kprobe_gone(p) ? "[GONE]" : ""),
+ ((kprobe_disabled(p) && !kprobe_gone(p)) ?
+ "[DISABLED]" : ""));
}
static void __kprobes *kprobe_seq_start(struct seq_file *f, loff_t *pos)
@@ -1267,7 +1339,72 @@ static struct file_operations debugfs_kprobes_operations = {
.release = seq_release,
};
-static void __kprobes enable_all_kprobes(void)
+/* Disable one kprobe */
+int __kprobes disable_kprobe(struct kprobe *kp)
+{
+ int ret = 0;
+ struct kprobe *p;
+
+ mutex_lock(&kprobe_mutex);
+
+ /* Check whether specified probe is valid. */
+ p = __get_valid_kprobe(kp);
+ if (unlikely(p == NULL)) {
+ ret = -EINVAL;
+ goto out;
+ }
+
+ /* If the probe is already disabled (or gone), just return */
+ if (kprobe_disabled(kp))
+ goto out;
+
+ kp->flags |= KPROBE_FLAG_DISABLED;
+ if (p != kp)
+ /* When kp != p, p is always enabled. */
+ try_to_disable_aggr_kprobe(p);
+
+ if (!kprobes_all_disarmed && kprobe_disabled(p))
+ arch_disarm_kprobe(p);
+out:
+ mutex_unlock(&kprobe_mutex);
+ return ret;
+}
+EXPORT_SYMBOL_GPL(disable_kprobe);
+
+/* Enable one kprobe */
+int __kprobes enable_kprobe(struct kprobe *kp)
+{
+ int ret = 0;
+ struct kprobe *p;
+
+ mutex_lock(&kprobe_mutex);
+
+ /* Check whether specified probe is valid. */
+ p = __get_valid_kprobe(kp);
+ if (unlikely(p == NULL)) {
+ ret = -EINVAL;
+ goto out;
+ }
+
+ if (kprobe_gone(kp)) {
+ /* This kprobe has gone, we couldn't enable it. */
+ ret = -EINVAL;
+ goto out;
+ }
+
+ if (!kprobes_all_disarmed && kprobe_disabled(p))
+ arch_arm_kprobe(p);
+
+ p->flags &= ~KPROBE_FLAG_DISABLED;
+ if (p != kp)
+ kp->flags &= ~KPROBE_FLAG_DISABLED;
+out:
+ mutex_unlock(&kprobe_mutex);
+ return ret;
+}
+EXPORT_SYMBOL_GPL(enable_kprobe);
+
+static void __kprobes arm_all_kprobes(void)
{
struct hlist_head *head;
struct hlist_node *node;
@@ -1276,18 +1413,20 @@ static void __kprobes enable_all_kprobes(void)
mutex_lock(&kprobe_mutex);
- /* If kprobes are already enabled, just return */
- if (kprobe_enabled)
+ /* If kprobes are armed, just return */
+ if (!kprobes_all_disarmed)
goto already_enabled;
+ mutex_lock(&text_mutex);
for (i = 0; i < KPROBE_TABLE_SIZE; i++) {
head = &kprobe_table[i];
hlist_for_each_entry_rcu(p, node, head, hlist)
- if (!kprobe_gone(p))
+ if (!kprobe_disabled(p))
arch_arm_kprobe(p);
}
+ mutex_unlock(&text_mutex);
- kprobe_enabled = true;
+ kprobes_all_disarmed = false;
printk(KERN_INFO "Kprobes globally enabled\n");
already_enabled:
@@ -1295,7 +1434,7 @@ already_enabled:
return;
}
-static void __kprobes disable_all_kprobes(void)
+static void __kprobes disarm_all_kprobes(void)
{
struct hlist_head *head;
struct hlist_node *node;
@@ -1304,20 +1443,22 @@ static void __kprobes disable_all_kprobes(void)
mutex_lock(&kprobe_mutex);
- /* If kprobes are already disabled, just return */
- if (!kprobe_enabled)
+ /* If kprobes are already disarmed, just return */
+ if (kprobes_all_disarmed)
goto already_disabled;
- kprobe_enabled = false;
+ kprobes_all_disarmed = true;
printk(KERN_INFO "Kprobes globally disabled\n");
+ mutex_lock(&text_mutex);
for (i = 0; i < KPROBE_TABLE_SIZE; i++) {
head = &kprobe_table[i];
hlist_for_each_entry_rcu(p, node, head, hlist) {
- if (!arch_trampoline_kprobe(p) && !kprobe_gone(p))
+ if (!arch_trampoline_kprobe(p) && !kprobe_disabled(p))
arch_disarm_kprobe(p);
}
}
+ mutex_unlock(&text_mutex);
mutex_unlock(&kprobe_mutex);
/* Allow all currently running kprobes to complete */
synchronize_sched();
@@ -1338,7 +1479,7 @@ static ssize_t read_enabled_file_bool(struct file *file,
{
char buf[3];
- if (kprobe_enabled)
+ if (!kprobes_all_disarmed)
buf[0] = '1';
else
buf[0] = '0';
@@ -1361,12 +1502,12 @@ static ssize_t write_enabled_file_bool(struct file *file,
case 'y':
case 'Y':
case '1':
- enable_all_kprobes();
+ arm_all_kprobes();
break;
case 'n':
case 'N':
case '0':
- disable_all_kprobes();
+ disarm_all_kprobes();
break;
}
@@ -1409,16 +1550,5 @@ late_initcall(debugfs_kprobe_init);
module_init(init_kprobes);
-EXPORT_SYMBOL_GPL(register_kprobe);
-EXPORT_SYMBOL_GPL(unregister_kprobe);
-EXPORT_SYMBOL_GPL(register_kprobes);
-EXPORT_SYMBOL_GPL(unregister_kprobes);
-EXPORT_SYMBOL_GPL(register_jprobe);
-EXPORT_SYMBOL_GPL(unregister_jprobe);
-EXPORT_SYMBOL_GPL(register_jprobes);
-EXPORT_SYMBOL_GPL(unregister_jprobes);
+/* defined in arch/.../kernel/kprobes.c */
EXPORT_SYMBOL_GPL(jprobe_return);
-EXPORT_SYMBOL_GPL(register_kretprobe);
-EXPORT_SYMBOL_GPL(unregister_kretprobe);
-EXPORT_SYMBOL_GPL(register_kretprobes);
-EXPORT_SYMBOL_GPL(unregister_kretprobes);
diff --git a/kernel/kthread.c b/kernel/kthread.c
index 4fbc456f393d..4ebaf8519abf 100644
--- a/kernel/kthread.c
+++ b/kernel/kthread.c
@@ -76,6 +76,7 @@ static int kthread(void *_create)
/* OK, tell user we're spawned, wait for stop or wakeup */
__set_current_state(TASK_UNINTERRUPTIBLE);
+ create->result = current;
complete(&create->started);
schedule();
@@ -96,22 +97,10 @@ static void create_kthread(struct kthread_create_info *create)
/* We want our own signal handler (we take no signals by default). */
pid = kernel_thread(kthread, create, CLONE_FS | CLONE_FILES | SIGCHLD);
- if (pid < 0) {
+ if (pid < 0)
create->result = ERR_PTR(pid);
- } else {
- struct sched_param param = { .sched_priority = 0 };
+ else
wait_for_completion(&create->started);
- read_lock(&tasklist_lock);
- create->result = find_task_by_pid_ns(pid, &init_pid_ns);
- read_unlock(&tasklist_lock);
- /*
- * root may have changed our (kthreadd's) priority or CPU mask.
- * The kernel thread should not inherit these properties.
- */
- sched_setscheduler(create->result, SCHED_NORMAL, &param);
- set_user_nice(create->result, KTHREAD_NICE_LEVEL);
- set_cpus_allowed_ptr(create->result, CPU_MASK_ALL_PTR);
- }
complete(&create->done);
}
@@ -154,11 +143,20 @@ struct task_struct *kthread_create(int (*threadfn)(void *data),
wait_for_completion(&create.done);
if (!IS_ERR(create.result)) {
+ struct sched_param param = { .sched_priority = 0 };
va_list args;
+
va_start(args, namefmt);
vsnprintf(create.result->comm, sizeof(create.result->comm),
namefmt, args);
va_end(args);
+ /*
+ * root may have changed our (kthreadd's) priority or CPU mask.
+ * The kernel thread should not inherit these properties.
+ */
+ sched_setscheduler_nocheck(create.result, SCHED_NORMAL, &param);
+ set_user_nice(create.result, KTHREAD_NICE_LEVEL);
+ set_cpus_allowed_ptr(create.result, cpu_all_mask);
}
return create.result;
}
@@ -240,7 +238,7 @@ int kthreadd(void *unused)
set_task_comm(tsk, "kthreadd");
ignore_signals(tsk);
set_user_nice(tsk, KTHREAD_NICE_LEVEL);
- set_cpus_allowed_ptr(tsk, CPU_MASK_ALL_PTR);
+ set_cpus_allowed_ptr(tsk, cpu_all_mask);
current->flags |= PF_NOFREEZE | PF_FREEZER_NOSIG;
diff --git a/kernel/lockdep.c b/kernel/lockdep.c
index 06b0c3568f0b..accb40cdb12a 100644
--- a/kernel/lockdep.c
+++ b/kernel/lockdep.c
@@ -41,6 +41,8 @@
#include <linux/utsname.h>
#include <linux/hash.h>
#include <linux/ftrace.h>
+#include <linux/stringify.h>
+#include <trace/lockdep.h>
#include <asm/sections.h>
@@ -310,12 +312,14 @@ EXPORT_SYMBOL(lockdep_on);
#if VERBOSE
# define HARDIRQ_VERBOSE 1
# define SOFTIRQ_VERBOSE 1
+# define RECLAIM_VERBOSE 1
#else
# define HARDIRQ_VERBOSE 0
# define SOFTIRQ_VERBOSE 0
+# define RECLAIM_VERBOSE 0
#endif
-#if VERBOSE || HARDIRQ_VERBOSE || SOFTIRQ_VERBOSE
+#if VERBOSE || HARDIRQ_VERBOSE || SOFTIRQ_VERBOSE || RECLAIM_VERBOSE
/*
* Quick filtering for interesting events:
*/
@@ -430,30 +434,24 @@ atomic_t nr_find_usage_forwards_checks;
atomic_t nr_find_usage_forwards_recursions;
atomic_t nr_find_usage_backwards_checks;
atomic_t nr_find_usage_backwards_recursions;
-# define debug_atomic_inc(ptr) atomic_inc(ptr)
-# define debug_atomic_dec(ptr) atomic_dec(ptr)
-# define debug_atomic_read(ptr) atomic_read(ptr)
-#else
-# define debug_atomic_inc(ptr) do { } while (0)
-# define debug_atomic_dec(ptr) do { } while (0)
-# define debug_atomic_read(ptr) 0
#endif
/*
* Locking printouts:
*/
+#define __USAGE(__STATE) \
+ [LOCK_USED_IN_##__STATE] = "IN-"__stringify(__STATE)"-W", \
+ [LOCK_ENABLED_##__STATE] = __stringify(__STATE)"-ON-W", \
+ [LOCK_USED_IN_##__STATE##_READ] = "IN-"__stringify(__STATE)"-R",\
+ [LOCK_ENABLED_##__STATE##_READ] = __stringify(__STATE)"-ON-R",
+
static const char *usage_str[] =
{
- [LOCK_USED] = "initial-use ",
- [LOCK_USED_IN_HARDIRQ] = "in-hardirq-W",
- [LOCK_USED_IN_SOFTIRQ] = "in-softirq-W",
- [LOCK_ENABLED_SOFTIRQS] = "softirq-on-W",
- [LOCK_ENABLED_HARDIRQS] = "hardirq-on-W",
- [LOCK_USED_IN_HARDIRQ_READ] = "in-hardirq-R",
- [LOCK_USED_IN_SOFTIRQ_READ] = "in-softirq-R",
- [LOCK_ENABLED_SOFTIRQS_READ] = "softirq-on-R",
- [LOCK_ENABLED_HARDIRQS_READ] = "hardirq-on-R",
+#define LOCKDEP_STATE(__STATE) __USAGE(__STATE)
+#include "lockdep_states.h"
+#undef LOCKDEP_STATE
+ [LOCK_USED] = "INITIAL USE",
};
const char * __get_key_name(struct lockdep_subclass_key *key, char *str)
@@ -461,46 +459,45 @@ const char * __get_key_name(struct lockdep_subclass_key *key, char *str)
return kallsyms_lookup((unsigned long)key, NULL, NULL, NULL, str);
}
-void
-get_usage_chars(struct lock_class *class, char *c1, char *c2, char *c3, char *c4)
+static inline unsigned long lock_flag(enum lock_usage_bit bit)
{
- *c1 = '.', *c2 = '.', *c3 = '.', *c4 = '.';
-
- if (class->usage_mask & LOCKF_USED_IN_HARDIRQ)
- *c1 = '+';
- else
- if (class->usage_mask & LOCKF_ENABLED_HARDIRQS)
- *c1 = '-';
+ return 1UL << bit;
+}
- if (class->usage_mask & LOCKF_USED_IN_SOFTIRQ)
- *c2 = '+';
- else
- if (class->usage_mask & LOCKF_ENABLED_SOFTIRQS)
- *c2 = '-';
+static char get_usage_char(struct lock_class *class, enum lock_usage_bit bit)
+{
+ char c = '.';
- if (class->usage_mask & LOCKF_ENABLED_HARDIRQS_READ)
- *c3 = '-';
- if (class->usage_mask & LOCKF_USED_IN_HARDIRQ_READ) {
- *c3 = '+';
- if (class->usage_mask & LOCKF_ENABLED_HARDIRQS_READ)
- *c3 = '?';
+ if (class->usage_mask & lock_flag(bit + 2))
+ c = '+';
+ if (class->usage_mask & lock_flag(bit)) {
+ c = '-';
+ if (class->usage_mask & lock_flag(bit + 2))
+ c = '?';
}
- if (class->usage_mask & LOCKF_ENABLED_SOFTIRQS_READ)
- *c4 = '-';
- if (class->usage_mask & LOCKF_USED_IN_SOFTIRQ_READ) {
- *c4 = '+';
- if (class->usage_mask & LOCKF_ENABLED_SOFTIRQS_READ)
- *c4 = '?';
- }
+ return c;
+}
+
+void get_usage_chars(struct lock_class *class, char usage[LOCK_USAGE_CHARS])
+{
+ int i = 0;
+
+#define LOCKDEP_STATE(__STATE) \
+ usage[i++] = get_usage_char(class, LOCK_USED_IN_##__STATE); \
+ usage[i++] = get_usage_char(class, LOCK_USED_IN_##__STATE##_READ);
+#include "lockdep_states.h"
+#undef LOCKDEP_STATE
+
+ usage[i] = '\0';
}
static void print_lock_name(struct lock_class *class)
{
- char str[KSYM_NAME_LEN], c1, c2, c3, c4;
+ char str[KSYM_NAME_LEN], usage[LOCK_USAGE_CHARS];
const char *name;
- get_usage_chars(class, &c1, &c2, &c3, &c4);
+ get_usage_chars(class, usage);
name = class->name;
if (!name) {
@@ -513,7 +510,7 @@ static void print_lock_name(struct lock_class *class)
if (class->subclass)
printk("/%d", class->subclass);
}
- printk("){%c%c%c%c}", c1, c2, c3, c4);
+ printk("){%s}", usage);
}
static void print_lockdep_cache(struct lockdep_map *lock)
@@ -796,6 +793,7 @@ register_lock_class(struct lockdep_map *lock, unsigned int subclass, int force)
printk("BUG: MAX_LOCKDEP_KEYS too low!\n");
printk("turning off the locking correctness validator.\n");
+ dump_stack();
return NULL;
}
class = lock_classes + nr_lock_classes++;
@@ -859,6 +857,7 @@ static struct lock_list *alloc_list_entry(void)
printk("BUG: MAX_LOCKDEP_ENTRIES too low!\n");
printk("turning off the locking correctness validator.\n");
+ dump_stack();
return NULL;
}
return list_entries + nr_list_entries++;
@@ -1263,9 +1262,49 @@ check_usage(struct task_struct *curr, struct held_lock *prev,
bit_backwards, bit_forwards, irqclass);
}
-static int
-check_prev_add_irq(struct task_struct *curr, struct held_lock *prev,
- struct held_lock *next)
+static const char *state_names[] = {
+#define LOCKDEP_STATE(__STATE) \
+ __stringify(__STATE),
+#include "lockdep_states.h"
+#undef LOCKDEP_STATE
+};
+
+static const char *state_rnames[] = {
+#define LOCKDEP_STATE(__STATE) \
+ __stringify(__STATE)"-READ",
+#include "lockdep_states.h"
+#undef LOCKDEP_STATE
+};
+
+static inline const char *state_name(enum lock_usage_bit bit)
+{
+ return (bit & 1) ? state_rnames[bit >> 2] : state_names[bit >> 2];
+}
+
+static int exclusive_bit(int new_bit)
+{
+ /*
+ * USED_IN
+ * USED_IN_READ
+ * ENABLED
+ * ENABLED_READ
+ *
+ * bit 0 - write/read
+ * bit 1 - used_in/enabled
+ * bit 2+ state
+ */
+
+ int state = new_bit & ~3;
+ int dir = new_bit & 2;
+
+ /*
+ * keep state, bit flip the direction and strip read.
+ */
+ return state | (dir ^ 2);
+}
+
+static int check_irq_usage(struct task_struct *curr, struct held_lock *prev,
+ struct held_lock *next, enum lock_usage_bit bit)
{
/*
* Prove that the new dependency does not connect a hardirq-safe
@@ -1273,38 +1312,34 @@ check_prev_add_irq(struct task_struct *curr, struct held_lock *prev,
* the backwards-subgraph starting at <prev>, and the
* forwards-subgraph starting at <next>:
*/
- if (!check_usage(curr, prev, next, LOCK_USED_IN_HARDIRQ,
- LOCK_ENABLED_HARDIRQS, "hard"))
+ if (!check_usage(curr, prev, next, bit,
+ exclusive_bit(bit), state_name(bit)))
return 0;
+ bit++; /* _READ */
+
/*
* Prove that the new dependency does not connect a hardirq-safe-read
* lock with a hardirq-unsafe lock - to achieve this we search
* the backwards-subgraph starting at <prev>, and the
* forwards-subgraph starting at <next>:
*/
- if (!check_usage(curr, prev, next, LOCK_USED_IN_HARDIRQ_READ,
- LOCK_ENABLED_HARDIRQS, "hard-read"))
+ if (!check_usage(curr, prev, next, bit,
+ exclusive_bit(bit), state_name(bit)))
return 0;
- /*
- * Prove that the new dependency does not connect a softirq-safe
- * lock with a softirq-unsafe lock - to achieve this we search
- * the backwards-subgraph starting at <prev>, and the
- * forwards-subgraph starting at <next>:
- */
- if (!check_usage(curr, prev, next, LOCK_USED_IN_SOFTIRQ,
- LOCK_ENABLED_SOFTIRQS, "soft"))
- return 0;
- /*
- * Prove that the new dependency does not connect a softirq-safe-read
- * lock with a softirq-unsafe lock - to achieve this we search
- * the backwards-subgraph starting at <prev>, and the
- * forwards-subgraph starting at <next>:
- */
- if (!check_usage(curr, prev, next, LOCK_USED_IN_SOFTIRQ_READ,
- LOCK_ENABLED_SOFTIRQS, "soft"))
+ return 1;
+}
+
+static int
+check_prev_add_irq(struct task_struct *curr, struct held_lock *prev,
+ struct held_lock *next)
+{
+#define LOCKDEP_STATE(__STATE) \
+ if (!check_irq_usage(curr, prev, next, LOCK_USED_IN_##__STATE)) \
return 0;
+#include "lockdep_states.h"
+#undef LOCKDEP_STATE
return 1;
}
@@ -1649,6 +1684,7 @@ cache_hit:
printk("BUG: MAX_LOCKDEP_CHAINS too low!\n");
printk("turning off the locking correctness validator.\n");
+ dump_stack();
return 0;
}
chain = lock_chains + nr_lock_chains++;
@@ -1861,9 +1897,9 @@ print_irq_inversion_bug(struct task_struct *curr, struct lock_class *other,
curr->comm, task_pid_nr(curr));
print_lock(this);
if (forwards)
- printk("but this lock took another, %s-irq-unsafe lock in the past:\n", irqclass);
+ printk("but this lock took another, %s-unsafe lock in the past:\n", irqclass);
else
- printk("but this lock was taken by another, %s-irq-safe lock in the past:\n", irqclass);
+ printk("but this lock was taken by another, %s-safe lock in the past:\n", irqclass);
print_lock_name(other);
printk("\n\nand interrupts could create inverse lock ordering between them.\n\n");
@@ -1933,7 +1969,7 @@ void print_irqtrace_events(struct task_struct *curr)
print_ip_sym(curr->softirq_disable_ip);
}
-static int hardirq_verbose(struct lock_class *class)
+static int HARDIRQ_verbose(struct lock_class *class)
{
#if HARDIRQ_VERBOSE
return class_filter(class);
@@ -1941,7 +1977,7 @@ static int hardirq_verbose(struct lock_class *class)
return 0;
}
-static int softirq_verbose(struct lock_class *class)
+static int SOFTIRQ_verbose(struct lock_class *class)
{
#if SOFTIRQ_VERBOSE
return class_filter(class);
@@ -1949,185 +1985,95 @@ static int softirq_verbose(struct lock_class *class)
return 0;
}
+static int RECLAIM_FS_verbose(struct lock_class *class)
+{
+#if RECLAIM_VERBOSE
+ return class_filter(class);
+#endif
+ return 0;
+}
+
#define STRICT_READ_CHECKS 1
-static int mark_lock_irq(struct task_struct *curr, struct held_lock *this,
+static int (*state_verbose_f[])(struct lock_class *class) = {
+#define LOCKDEP_STATE(__STATE) \
+ __STATE##_verbose,
+#include "lockdep_states.h"
+#undef LOCKDEP_STATE
+};
+
+static inline int state_verbose(enum lock_usage_bit bit,
+ struct lock_class *class)
+{
+ return state_verbose_f[bit >> 2](class);
+}
+
+typedef int (*check_usage_f)(struct task_struct *, struct held_lock *,
+ enum lock_usage_bit bit, const char *name);
+
+static int
+mark_lock_irq(struct task_struct *curr, struct held_lock *this,
enum lock_usage_bit new_bit)
{
- int ret = 1;
+ int excl_bit = exclusive_bit(new_bit);
+ int read = new_bit & 1;
+ int dir = new_bit & 2;
- switch(new_bit) {
- case LOCK_USED_IN_HARDIRQ:
- if (!valid_state(curr, this, new_bit, LOCK_ENABLED_HARDIRQS))
- return 0;
- if (!valid_state(curr, this, new_bit,
- LOCK_ENABLED_HARDIRQS_READ))
- return 0;
- /*
- * just marked it hardirq-safe, check that this lock
- * took no hardirq-unsafe lock in the past:
- */
- if (!check_usage_forwards(curr, this,
- LOCK_ENABLED_HARDIRQS, "hard"))
- return 0;
-#if STRICT_READ_CHECKS
- /*
- * just marked it hardirq-safe, check that this lock
- * took no hardirq-unsafe-read lock in the past:
- */
- if (!check_usage_forwards(curr, this,
- LOCK_ENABLED_HARDIRQS_READ, "hard-read"))
- return 0;
-#endif
- if (hardirq_verbose(hlock_class(this)))
- ret = 2;
- break;
- case LOCK_USED_IN_SOFTIRQ:
- if (!valid_state(curr, this, new_bit, LOCK_ENABLED_SOFTIRQS))
- return 0;
- if (!valid_state(curr, this, new_bit,
- LOCK_ENABLED_SOFTIRQS_READ))
- return 0;
- /*
- * just marked it softirq-safe, check that this lock
- * took no softirq-unsafe lock in the past:
- */
- if (!check_usage_forwards(curr, this,
- LOCK_ENABLED_SOFTIRQS, "soft"))
- return 0;
-#if STRICT_READ_CHECKS
- /*
- * just marked it softirq-safe, check that this lock
- * took no softirq-unsafe-read lock in the past:
- */
- if (!check_usage_forwards(curr, this,
- LOCK_ENABLED_SOFTIRQS_READ, "soft-read"))
- return 0;
-#endif
- if (softirq_verbose(hlock_class(this)))
- ret = 2;
- break;
- case LOCK_USED_IN_HARDIRQ_READ:
- if (!valid_state(curr, this, new_bit, LOCK_ENABLED_HARDIRQS))
- return 0;
- /*
- * just marked it hardirq-read-safe, check that this lock
- * took no hardirq-unsafe lock in the past:
- */
- if (!check_usage_forwards(curr, this,
- LOCK_ENABLED_HARDIRQS, "hard"))
- return 0;
- if (hardirq_verbose(hlock_class(this)))
- ret = 2;
- break;
- case LOCK_USED_IN_SOFTIRQ_READ:
- if (!valid_state(curr, this, new_bit, LOCK_ENABLED_SOFTIRQS))
- return 0;
- /*
- * just marked it softirq-read-safe, check that this lock
- * took no softirq-unsafe lock in the past:
- */
- if (!check_usage_forwards(curr, this,
- LOCK_ENABLED_SOFTIRQS, "soft"))
- return 0;
- if (softirq_verbose(hlock_class(this)))
- ret = 2;
- break;
- case LOCK_ENABLED_HARDIRQS:
- if (!valid_state(curr, this, new_bit, LOCK_USED_IN_HARDIRQ))
- return 0;
- if (!valid_state(curr, this, new_bit,
- LOCK_USED_IN_HARDIRQ_READ))
- return 0;
- /*
- * just marked it hardirq-unsafe, check that no hardirq-safe
- * lock in the system ever took it in the past:
- */
- if (!check_usage_backwards(curr, this,
- LOCK_USED_IN_HARDIRQ, "hard"))
- return 0;
-#if STRICT_READ_CHECKS
- /*
- * just marked it hardirq-unsafe, check that no
- * hardirq-safe-read lock in the system ever took
- * it in the past:
- */
- if (!check_usage_backwards(curr, this,
- LOCK_USED_IN_HARDIRQ_READ, "hard-read"))
- return 0;
-#endif
- if (hardirq_verbose(hlock_class(this)))
- ret = 2;
- break;
- case LOCK_ENABLED_SOFTIRQS:
- if (!valid_state(curr, this, new_bit, LOCK_USED_IN_SOFTIRQ))
- return 0;
- if (!valid_state(curr, this, new_bit,
- LOCK_USED_IN_SOFTIRQ_READ))
- return 0;
- /*
- * just marked it softirq-unsafe, check that no softirq-safe
- * lock in the system ever took it in the past:
- */
- if (!check_usage_backwards(curr, this,
- LOCK_USED_IN_SOFTIRQ, "soft"))
- return 0;
-#if STRICT_READ_CHECKS
- /*
- * just marked it softirq-unsafe, check that no
- * softirq-safe-read lock in the system ever took
- * it in the past:
- */
- if (!check_usage_backwards(curr, this,
- LOCK_USED_IN_SOFTIRQ_READ, "soft-read"))
- return 0;
-#endif
- if (softirq_verbose(hlock_class(this)))
- ret = 2;
- break;
- case LOCK_ENABLED_HARDIRQS_READ:
- if (!valid_state(curr, this, new_bit, LOCK_USED_IN_HARDIRQ))
- return 0;
-#if STRICT_READ_CHECKS
- /*
- * just marked it hardirq-read-unsafe, check that no
- * hardirq-safe lock in the system ever took it in the past:
- */
- if (!check_usage_backwards(curr, this,
- LOCK_USED_IN_HARDIRQ, "hard"))
- return 0;
-#endif
- if (hardirq_verbose(hlock_class(this)))
- ret = 2;
- break;
- case LOCK_ENABLED_SOFTIRQS_READ:
- if (!valid_state(curr, this, new_bit, LOCK_USED_IN_SOFTIRQ))
+ /*
+ * mark USED_IN has to look forwards -- to ensure no dependency
+ * has ENABLED state, which would allow recursion deadlocks.
+ *
+ * mark ENABLED has to look backwards -- to ensure no dependee
+ * has USED_IN state, which, again, would allow recursion deadlocks.
+ */
+ check_usage_f usage = dir ?
+ check_usage_backwards : check_usage_forwards;
+
+ /*
+ * Validate that this particular lock does not have conflicting
+ * usage states.
+ */
+ if (!valid_state(curr, this, new_bit, excl_bit))
+ return 0;
+
+ /*
+ * Validate that the lock dependencies don't have conflicting usage
+ * states.
+ */
+ if ((!read || !dir || STRICT_READ_CHECKS) &&
+ !usage(curr, this, excl_bit, state_name(new_bit & ~1)))
+ return 0;
+
+ /*
+ * Check for read in write conflicts
+ */
+ if (!read) {
+ if (!valid_state(curr, this, new_bit, excl_bit + 1))
return 0;
-#if STRICT_READ_CHECKS
- /*
- * just marked it softirq-read-unsafe, check that no
- * softirq-safe lock in the system ever took it in the past:
- */
- if (!check_usage_backwards(curr, this,
- LOCK_USED_IN_SOFTIRQ, "soft"))
+
+ if (STRICT_READ_CHECKS &&
+ !usage(curr, this, excl_bit + 1,
+ state_name(new_bit + 1)))
return 0;
-#endif
- if (softirq_verbose(hlock_class(this)))
- ret = 2;
- break;
- default:
- WARN_ON(1);
- break;
}
- return ret;
+ if (state_verbose(new_bit, hlock_class(this)))
+ return 2;
+
+ return 1;
}
+enum mark_type {
+#define LOCKDEP_STATE(__STATE) __STATE,
+#include "lockdep_states.h"
+#undef LOCKDEP_STATE
+};
+
/*
* Mark all held locks with a usage bit:
*/
static int
-mark_held_locks(struct task_struct *curr, int hardirq)
+mark_held_locks(struct task_struct *curr, enum mark_type mark)
{
enum lock_usage_bit usage_bit;
struct held_lock *hlock;
@@ -2136,17 +2082,12 @@ mark_held_locks(struct task_struct *curr, int hardirq)
for (i = 0; i < curr->lockdep_depth; i++) {
hlock = curr->held_locks + i;
- if (hardirq) {
- if (hlock->read)
- usage_bit = LOCK_ENABLED_HARDIRQS_READ;
- else
- usage_bit = LOCK_ENABLED_HARDIRQS;
- } else {
- if (hlock->read)
- usage_bit = LOCK_ENABLED_SOFTIRQS_READ;
- else
- usage_bit = LOCK_ENABLED_SOFTIRQS;
- }
+ usage_bit = 2 + (mark << 2); /* ENABLED */
+ if (hlock->read)
+ usage_bit += 1; /* READ */
+
+ BUG_ON(usage_bit >= LOCK_USAGE_STATES);
+
if (!mark_lock(curr, hlock, usage_bit))
return 0;
}
@@ -2200,7 +2141,7 @@ void trace_hardirqs_on_caller(unsigned long ip)
* We are going to turn hardirqs on, so set the
* usage bit for all held locks:
*/
- if (!mark_held_locks(curr, 1))
+ if (!mark_held_locks(curr, HARDIRQ))
return;
/*
* If we have softirqs enabled, then set the usage
@@ -2208,7 +2149,7 @@ void trace_hardirqs_on_caller(unsigned long ip)
* this bit from being set before)
*/
if (curr->softirqs_enabled)
- if (!mark_held_locks(curr, 0))
+ if (!mark_held_locks(curr, SOFTIRQ))
return;
curr->hardirq_enable_ip = ip;
@@ -2288,7 +2229,7 @@ void trace_softirqs_on(unsigned long ip)
* enabled too:
*/
if (curr->hardirqs_enabled)
- mark_held_locks(curr, 0);
+ mark_held_locks(curr, SOFTIRQ);
}
/*
@@ -2317,6 +2258,48 @@ void trace_softirqs_off(unsigned long ip)
debug_atomic_inc(&redundant_softirqs_off);
}
+static void __lockdep_trace_alloc(gfp_t gfp_mask, unsigned long flags)
+{
+ struct task_struct *curr = current;
+
+ if (unlikely(!debug_locks))
+ return;
+
+ /* no reclaim without waiting on it */
+ if (!(gfp_mask & __GFP_WAIT))
+ return;
+
+ /* this guy won't enter reclaim */
+ if ((curr->flags & PF_MEMALLOC) && !(gfp_mask & __GFP_NOMEMALLOC))
+ return;
+
+ /* We're only interested __GFP_FS allocations for now */
+ if (!(gfp_mask & __GFP_FS))
+ return;
+
+ if (DEBUG_LOCKS_WARN_ON(irqs_disabled_flags(flags)))
+ return;
+
+ mark_held_locks(curr, RECLAIM_FS);
+}
+
+static void check_flags(unsigned long flags);
+
+void lockdep_trace_alloc(gfp_t gfp_mask)
+{
+ unsigned long flags;
+
+ if (unlikely(current->lockdep_recursion))
+ return;
+
+ raw_local_irq_save(flags);
+ check_flags(flags);
+ current->lockdep_recursion = 1;
+ __lockdep_trace_alloc(gfp_mask, flags);
+ current->lockdep_recursion = 0;
+ raw_local_irq_restore(flags);
+}
+
static int mark_irqflags(struct task_struct *curr, struct held_lock *hlock)
{
/*
@@ -2345,19 +2328,35 @@ static int mark_irqflags(struct task_struct *curr, struct held_lock *hlock)
if (!hlock->hardirqs_off) {
if (hlock->read) {
if (!mark_lock(curr, hlock,
- LOCK_ENABLED_HARDIRQS_READ))
+ LOCK_ENABLED_HARDIRQ_READ))
return 0;
if (curr->softirqs_enabled)
if (!mark_lock(curr, hlock,
- LOCK_ENABLED_SOFTIRQS_READ))
+ LOCK_ENABLED_SOFTIRQ_READ))
return 0;
} else {
if (!mark_lock(curr, hlock,
- LOCK_ENABLED_HARDIRQS))
+ LOCK_ENABLED_HARDIRQ))
return 0;
if (curr->softirqs_enabled)
if (!mark_lock(curr, hlock,
- LOCK_ENABLED_SOFTIRQS))
+ LOCK_ENABLED_SOFTIRQ))
+ return 0;
+ }
+ }
+
+ /*
+ * We reuse the irq context infrastructure more broadly as a general
+ * context checking code. This tests GFP_FS recursion (a lock taken
+ * during reclaim for a GFP_FS allocation is held over a GFP_FS
+ * allocation).
+ */
+ if (!hlock->trylock && (curr->lockdep_reclaim_gfp & __GFP_FS)) {
+ if (hlock->read) {
+ if (!mark_lock(curr, hlock, LOCK_USED_IN_RECLAIM_FS_READ))
+ return 0;
+ } else {
+ if (!mark_lock(curr, hlock, LOCK_USED_IN_RECLAIM_FS))
return 0;
}
}
@@ -2412,6 +2411,10 @@ static inline int separate_irq_context(struct task_struct *curr,
return 0;
}
+void lockdep_trace_alloc(gfp_t gfp_mask)
+{
+}
+
#endif
/*
@@ -2445,14 +2448,13 @@ static int mark_lock(struct task_struct *curr, struct held_lock *this,
return 0;
switch (new_bit) {
- case LOCK_USED_IN_HARDIRQ:
- case LOCK_USED_IN_SOFTIRQ:
- case LOCK_USED_IN_HARDIRQ_READ:
- case LOCK_USED_IN_SOFTIRQ_READ:
- case LOCK_ENABLED_HARDIRQS:
- case LOCK_ENABLED_SOFTIRQS:
- case LOCK_ENABLED_HARDIRQS_READ:
- case LOCK_ENABLED_SOFTIRQS_READ:
+#define LOCKDEP_STATE(__STATE) \
+ case LOCK_USED_IN_##__STATE: \
+ case LOCK_USED_IN_##__STATE##_READ: \
+ case LOCK_ENABLED_##__STATE: \
+ case LOCK_ENABLED_##__STATE##_READ:
+#include "lockdep_states.h"
+#undef LOCKDEP_STATE
ret = mark_lock_irq(curr, this, new_bit);
if (!ret)
return 0;
@@ -2488,13 +2490,20 @@ static int mark_lock(struct task_struct *curr, struct held_lock *this,
void lockdep_init_map(struct lockdep_map *lock, const char *name,
struct lock_class_key *key, int subclass)
{
- if (unlikely(!debug_locks))
+ lock->class_cache = NULL;
+#ifdef CONFIG_LOCK_STAT
+ lock->cpu = raw_smp_processor_id();
+#endif
+
+ if (DEBUG_LOCKS_WARN_ON(!name)) {
+ lock->name = "NULL";
return;
+ }
+
+ lock->name = name;
if (DEBUG_LOCKS_WARN_ON(!key))
return;
- if (DEBUG_LOCKS_WARN_ON(!name))
- return;
/*
* Sanity check, the lock-class key must be persistent:
*/
@@ -2503,12 +2512,11 @@ void lockdep_init_map(struct lockdep_map *lock, const char *name,
DEBUG_LOCKS_WARN_ON(1);
return;
}
- lock->name = name;
lock->key = key;
- lock->class_cache = NULL;
-#ifdef CONFIG_LOCK_STAT
- lock->cpu = raw_smp_processor_id();
-#endif
+
+ if (unlikely(!debug_locks))
+ return;
+
if (subclass)
register_lock_class(lock, subclass, 1);
}
@@ -2542,6 +2550,7 @@ static int __lock_acquire(struct lockdep_map *lock, unsigned int subclass,
debug_locks_off();
printk("BUG: MAX_LOCKDEP_SUBCLASSES too low!\n");
printk("turning off the locking correctness validator.\n");
+ dump_stack();
return 0;
}
@@ -2638,6 +2647,7 @@ static int __lock_acquire(struct lockdep_map *lock, unsigned int subclass,
debug_locks_off();
printk("BUG: MAX_LOCK_DEPTH too low!\n");
printk("turning off the locking correctness validator.\n");
+ dump_stack();
return 0;
}
@@ -2925,6 +2935,8 @@ void lock_set_class(struct lockdep_map *lock, const char *name,
}
EXPORT_SYMBOL_GPL(lock_set_class);
+DEFINE_TRACE(lock_acquire);
+
/*
* We are not always called with irqs disabled - do that here,
* and also avoid lockdep recursion:
@@ -2935,6 +2947,8 @@ void lock_acquire(struct lockdep_map *lock, unsigned int subclass,
{
unsigned long flags;
+ trace_lock_acquire(lock, subclass, trylock, read, check, nest_lock, ip);
+
if (unlikely(current->lockdep_recursion))
return;
@@ -2949,11 +2963,15 @@ void lock_acquire(struct lockdep_map *lock, unsigned int subclass,
}
EXPORT_SYMBOL_GPL(lock_acquire);
+DEFINE_TRACE(lock_release);
+
void lock_release(struct lockdep_map *lock, int nested,
unsigned long ip)
{
unsigned long flags;
+ trace_lock_release(lock, nested, ip);
+
if (unlikely(current->lockdep_recursion))
return;
@@ -2966,6 +2984,16 @@ void lock_release(struct lockdep_map *lock, int nested,
}
EXPORT_SYMBOL_GPL(lock_release);
+void lockdep_set_current_reclaim_state(gfp_t gfp_mask)
+{
+ current->lockdep_reclaim_gfp = gfp_mask;
+}
+
+void lockdep_clear_current_reclaim_state(void)
+{
+ current->lockdep_reclaim_gfp = 0;
+}
+
#ifdef CONFIG_LOCK_STAT
static int
print_lock_contention_bug(struct task_struct *curr, struct lockdep_map *lock,
@@ -3092,10 +3120,14 @@ found_it:
lock->ip = ip;
}
+DEFINE_TRACE(lock_contended);
+
void lock_contended(struct lockdep_map *lock, unsigned long ip)
{
unsigned long flags;
+ trace_lock_contended(lock, ip);
+
if (unlikely(!lock_stat))
return;
@@ -3111,10 +3143,14 @@ void lock_contended(struct lockdep_map *lock, unsigned long ip)
}
EXPORT_SYMBOL_GPL(lock_contended);
+DEFINE_TRACE(lock_acquired);
+
void lock_acquired(struct lockdep_map *lock, unsigned long ip)
{
unsigned long flags;
+ trace_lock_acquired(lock, ip);
+
if (unlikely(!lock_stat))
return;
diff --git a/kernel/lockdep_internals.h b/kernel/lockdep_internals.h
index 56b196932c08..a2cc7e9a6e84 100644
--- a/kernel/lockdep_internals.h
+++ b/kernel/lockdep_internals.h
@@ -7,6 +7,45 @@
*/
/*
+ * Lock-class usage-state bits:
+ */
+enum lock_usage_bit {
+#define LOCKDEP_STATE(__STATE) \
+ LOCK_USED_IN_##__STATE, \
+ LOCK_USED_IN_##__STATE##_READ, \
+ LOCK_ENABLED_##__STATE, \
+ LOCK_ENABLED_##__STATE##_READ,
+#include "lockdep_states.h"
+#undef LOCKDEP_STATE
+ LOCK_USED,
+ LOCK_USAGE_STATES
+};
+
+/*
+ * Usage-state bitmasks:
+ */
+#define __LOCKF(__STATE) LOCKF_##__STATE = (1 << LOCK_##__STATE),
+
+enum {
+#define LOCKDEP_STATE(__STATE) \
+ __LOCKF(USED_IN_##__STATE) \
+ __LOCKF(USED_IN_##__STATE##_READ) \
+ __LOCKF(ENABLED_##__STATE) \
+ __LOCKF(ENABLED_##__STATE##_READ)
+#include "lockdep_states.h"
+#undef LOCKDEP_STATE
+ __LOCKF(USED)
+};
+
+#define LOCKF_ENABLED_IRQ (LOCKF_ENABLED_HARDIRQ | LOCKF_ENABLED_SOFTIRQ)
+#define LOCKF_USED_IN_IRQ (LOCKF_USED_IN_HARDIRQ | LOCKF_USED_IN_SOFTIRQ)
+
+#define LOCKF_ENABLED_IRQ_READ \
+ (LOCKF_ENABLED_HARDIRQ_READ | LOCKF_ENABLED_SOFTIRQ_READ)
+#define LOCKF_USED_IN_IRQ_READ \
+ (LOCKF_USED_IN_HARDIRQ_READ | LOCKF_USED_IN_SOFTIRQ_READ)
+
+/*
* MAX_LOCKDEP_ENTRIES is the maximum number of lock dependencies
* we track.
*
@@ -31,8 +70,10 @@
extern struct list_head all_lock_classes;
extern struct lock_chain lock_chains[];
-extern void
-get_usage_chars(struct lock_class *class, char *c1, char *c2, char *c3, char *c4);
+#define LOCK_USAGE_CHARS (1+LOCK_USAGE_STATES/2)
+
+extern void get_usage_chars(struct lock_class *class,
+ char usage[LOCK_USAGE_CHARS]);
extern const char * __get_key_name(struct lockdep_subclass_key *key, char *str);
diff --git a/kernel/lockdep_proc.c b/kernel/lockdep_proc.c
index 13716b813896..d7135aa2d2c4 100644
--- a/kernel/lockdep_proc.c
+++ b/kernel/lockdep_proc.c
@@ -84,7 +84,7 @@ static int l_show(struct seq_file *m, void *v)
{
struct lock_class *class = v;
struct lock_list *entry;
- char c1, c2, c3, c4;
+ char usage[LOCK_USAGE_CHARS];
if (v == SEQ_START_TOKEN) {
seq_printf(m, "all lock classes:\n");
@@ -100,8 +100,8 @@ static int l_show(struct seq_file *m, void *v)
seq_printf(m, " BD:%5ld", lockdep_count_backward_deps(class));
#endif
- get_usage_chars(class, &c1, &c2, &c3, &c4);
- seq_printf(m, " %c%c%c%c", c1, c2, c3, c4);
+ get_usage_chars(class, usage);
+ seq_printf(m, " %s", usage);
seq_printf(m, ": ");
print_name(m, class);
@@ -300,27 +300,27 @@ static int lockdep_stats_show(struct seq_file *m, void *v)
nr_uncategorized++;
if (class->usage_mask & LOCKF_USED_IN_IRQ)
nr_irq_safe++;
- if (class->usage_mask & LOCKF_ENABLED_IRQS)
+ if (class->usage_mask & LOCKF_ENABLED_IRQ)
nr_irq_unsafe++;
if (class->usage_mask & LOCKF_USED_IN_SOFTIRQ)
nr_softirq_safe++;
- if (class->usage_mask & LOCKF_ENABLED_SOFTIRQS)
+ if (class->usage_mask & LOCKF_ENABLED_SOFTIRQ)
nr_softirq_unsafe++;
if (class->usage_mask & LOCKF_USED_IN_HARDIRQ)
nr_hardirq_safe++;
- if (class->usage_mask & LOCKF_ENABLED_HARDIRQS)
+ if (class->usage_mask & LOCKF_ENABLED_HARDIRQ)
nr_hardirq_unsafe++;
if (class->usage_mask & LOCKF_USED_IN_IRQ_READ)
nr_irq_read_safe++;
- if (class->usage_mask & LOCKF_ENABLED_IRQS_READ)
+ if (class->usage_mask & LOCKF_ENABLED_IRQ_READ)
nr_irq_read_unsafe++;
if (class->usage_mask & LOCKF_USED_IN_SOFTIRQ_READ)
nr_softirq_read_safe++;
- if (class->usage_mask & LOCKF_ENABLED_SOFTIRQS_READ)
+ if (class->usage_mask & LOCKF_ENABLED_SOFTIRQ_READ)
nr_softirq_read_unsafe++;
if (class->usage_mask & LOCKF_USED_IN_HARDIRQ_READ)
nr_hardirq_read_safe++;
- if (class->usage_mask & LOCKF_ENABLED_HARDIRQS_READ)
+ if (class->usage_mask & LOCKF_ENABLED_HARDIRQ_READ)
nr_hardirq_read_unsafe++;
#ifdef CONFIG_PROVE_LOCKING
@@ -601,6 +601,10 @@ static void seq_stats(struct seq_file *m, struct lock_stat_data *data)
static void seq_header(struct seq_file *m)
{
seq_printf(m, "lock_stat version 0.3\n");
+
+ if (unlikely(!debug_locks))
+ seq_printf(m, "*WARNING* lock debugging disabled!! - possibly due to a lockdep warning\n");
+
seq_line(m, '-', 0, 40 + 1 + 10 * (14 + 1));
seq_printf(m, "%40s %14s %14s %14s %14s %14s %14s %14s %14s "
"%14s %14s\n",
diff --git a/kernel/lockdep_states.h b/kernel/lockdep_states.h
new file mode 100644
index 000000000000..995b0cc2b84c
--- /dev/null
+++ b/kernel/lockdep_states.h
@@ -0,0 +1,9 @@
+/*
+ * Lockdep states,
+ *
+ * please update XXX_LOCK_USAGE_STATES in include/linux/lockdep.h whenever
+ * you add one, or come up with a nice dynamic solution.
+ */
+LOCKDEP_STATE(HARDIRQ)
+LOCKDEP_STATE(SOFTIRQ)
+LOCKDEP_STATE(RECLAIM_FS)
diff --git a/kernel/module.c b/kernel/module.c
index ee7ab612dafa..cb3887e770e2 100644
--- a/kernel/module.c
+++ b/kernel/module.c
@@ -68,7 +68,8 @@
/* List of modules, protected by module_mutex or preempt_disable
* (delete uses stop_machine/add uses RCU list operations). */
-static DEFINE_MUTEX(module_mutex);
+DEFINE_MUTEX(module_mutex);
+EXPORT_SYMBOL_GPL(module_mutex);
static LIST_HEAD(modules);
/* Block module loading/unloading? */
@@ -79,7 +80,7 @@ static DECLARE_WAIT_QUEUE_HEAD(module_wq);
static BLOCKING_NOTIFIER_HEAD(module_notify_list);
-/* Bounds of module allocation, for speeding __module_text_address */
+/* Bounds of module allocation, for speeding __module_address */
static unsigned long module_addr_min = -1UL, module_addr_max = 0;
int register_module_notifier(struct notifier_block * nb)
@@ -189,17 +190,6 @@ extern const unsigned long __start___kcrctab_unused_gpl[];
#define symversion(base, idx) ((base != NULL) ? ((base) + (idx)) : NULL)
#endif
-struct symsearch {
- const struct kernel_symbol *start, *stop;
- const unsigned long *crcs;
- enum {
- NOT_GPL_ONLY,
- GPL_ONLY,
- WILL_BE_GPL_ONLY,
- } licence;
- bool unused;
-};
-
static bool each_symbol_in_section(const struct symsearch *arr,
unsigned int arrsize,
struct module *owner,
@@ -220,10 +210,8 @@ static bool each_symbol_in_section(const struct symsearch *arr,
}
/* Returns true as soon as fn returns true, otherwise false. */
-static bool each_symbol(bool (*fn)(const struct symsearch *arr,
- struct module *owner,
- unsigned int symnum, void *data),
- void *data)
+bool each_symbol(bool (*fn)(const struct symsearch *arr, struct module *owner,
+ unsigned int symnum, void *data), void *data)
{
struct module *mod;
const struct symsearch arr[] = {
@@ -276,6 +264,7 @@ static bool each_symbol(bool (*fn)(const struct symsearch *arr,
}
return false;
}
+EXPORT_SYMBOL_GPL(each_symbol);
struct find_symbol_arg {
/* Input */
@@ -286,7 +275,7 @@ struct find_symbol_arg {
/* Output */
struct module *owner;
const unsigned long *crc;
- unsigned long value;
+ const struct kernel_symbol *sym;
};
static bool find_symbol_in_section(const struct symsearch *syms,
@@ -327,17 +316,17 @@ static bool find_symbol_in_section(const struct symsearch *syms,
fsa->owner = owner;
fsa->crc = symversion(syms->crcs, symnum);
- fsa->value = syms->start[symnum].value;
+ fsa->sym = &syms->start[symnum];
return true;
}
-/* Find a symbol, return value, (optional) crc and (optional) module
- * which owns it */
-static unsigned long find_symbol(const char *name,
- struct module **owner,
- const unsigned long **crc,
- bool gplok,
- bool warn)
+/* Find a symbol and return it, along with, (optional) crc and
+ * (optional) module which owns it */
+const struct kernel_symbol *find_symbol(const char *name,
+ struct module **owner,
+ const unsigned long **crc,
+ bool gplok,
+ bool warn)
{
struct find_symbol_arg fsa;
@@ -350,15 +339,16 @@ static unsigned long find_symbol(const char *name,
*owner = fsa.owner;
if (crc)
*crc = fsa.crc;
- return fsa.value;
+ return fsa.sym;
}
DEBUGP("Failed to find symbol %s\n", name);
- return -ENOENT;
+ return NULL;
}
+EXPORT_SYMBOL_GPL(find_symbol);
/* Search for module by name: must hold module_mutex. */
-static struct module *find_module(const char *name)
+struct module *find_module(const char *name)
{
struct module *mod;
@@ -368,6 +358,7 @@ static struct module *find_module(const char *name)
}
return NULL;
}
+EXPORT_SYMBOL_GPL(find_module);
#ifdef CONFIG_SMP
@@ -644,7 +635,7 @@ static int already_uses(struct module *a, struct module *b)
}
/* Module a uses b */
-static int use_module(struct module *a, struct module *b)
+int use_module(struct module *a, struct module *b)
{
struct module_use *use;
int no_warn, err;
@@ -677,6 +668,7 @@ static int use_module(struct module *a, struct module *b)
no_warn = sysfs_create_link(b->holders_dir, &a->mkobj.kobj, a->name);
return 1;
}
+EXPORT_SYMBOL_GPL(use_module);
/* Clear the unload stuff of the module. */
static void module_unload_free(struct module *mod)
@@ -897,7 +889,7 @@ void __symbol_put(const char *symbol)
struct module *owner;
preempt_disable();
- if (IS_ERR_VALUE(find_symbol(symbol, &owner, NULL, true, false)))
+ if (!find_symbol(symbol, &owner, NULL, true, false))
BUG();
module_put(owner);
preempt_enable();
@@ -911,8 +903,10 @@ void symbol_put_addr(void *addr)
if (core_kernel_text((unsigned long)addr))
return;
- if (!(modaddr = module_text_address((unsigned long)addr)))
- BUG();
+ /* module_text_address is safe here: we're supposed to have reference
+ * to module from symbol_get, so it can't go away. */
+ modaddr = __module_text_address((unsigned long)addr);
+ BUG_ON(!modaddr);
module_put(modaddr);
}
EXPORT_SYMBOL_GPL(symbol_put_addr);
@@ -952,10 +946,11 @@ static inline void module_unload_free(struct module *mod)
{
}
-static inline int use_module(struct module *a, struct module *b)
+int use_module(struct module *a, struct module *b)
{
return strong_try_module_get(b) == 0;
}
+EXPORT_SYMBOL_GPL(use_module);
static inline void module_unload_init(struct module *mod)
{
@@ -998,12 +993,12 @@ static struct module_attribute *modinfo_attrs[] = {
static const char vermagic[] = VERMAGIC_STRING;
-static int try_to_force_load(struct module *mod, const char *symname)
+static int try_to_force_load(struct module *mod, const char *reason)
{
#ifdef CONFIG_MODULE_FORCE_LOAD
if (!test_taint(TAINT_FORCED_MODULE))
- printk("%s: no version for \"%s\" found: kernel tainted.\n",
- mod->name, symname);
+ printk(KERN_WARNING "%s: %s: kernel tainted.\n",
+ mod->name, reason);
add_taint_module(mod, TAINT_FORCED_MODULE);
return 0;
#else
@@ -1060,9 +1055,9 @@ static inline int check_modstruct_version(Elf_Shdr *sechdrs,
{
const unsigned long *crc;
- if (IS_ERR_VALUE(find_symbol("struct_module", NULL, &crc, true, false)))
+ if (!find_symbol("module_layout", NULL, &crc, true, false))
BUG();
- return check_version(sechdrs, versindex, "struct_module", mod, crc);
+ return check_version(sechdrs, versindex, "module_layout", mod, crc);
}
/* First part is kernel version, which we ignore if module has crcs. */
@@ -1101,25 +1096,25 @@ static inline int same_magic(const char *amagic, const char *bmagic,
/* Resolve a symbol for this module. I.e. if we find one, record usage.
Must be holding module_mutex. */
-static unsigned long resolve_symbol(Elf_Shdr *sechdrs,
- unsigned int versindex,
- const char *name,
- struct module *mod)
+static const struct kernel_symbol *resolve_symbol(Elf_Shdr *sechdrs,
+ unsigned int versindex,
+ const char *name,
+ struct module *mod)
{
struct module *owner;
- unsigned long ret;
+ const struct kernel_symbol *sym;
const unsigned long *crc;
- ret = find_symbol(name, &owner, &crc,
+ sym = find_symbol(name, &owner, &crc,
!(mod->taints & (1 << TAINT_PROPRIETARY_MODULE)), true);
- if (!IS_ERR_VALUE(ret)) {
- /* use_module can fail due to OOM,
- or module initialization or unloading */
+ /* use_module can fail due to OOM,
+ or module initialization or unloading */
+ if (sym) {
if (!check_version(sechdrs, versindex, name, mod, crc) ||
!use_module(mod, owner))
- ret = -EINVAL;
+ sym = NULL;
}
- return ret;
+ return sym;
}
/*
@@ -1494,6 +1489,9 @@ static void free_module(struct module *mod)
/* Module unload stuff */
module_unload_free(mod);
+ /* Free any allocated parameters. */
+ destroy_params(mod->kp, mod->num_kp);
+
/* release any pointers to mcount in this module */
ftrace_release(mod->module_core, mod->core_size);
@@ -1516,17 +1514,15 @@ static void free_module(struct module *mod)
void *__symbol_get(const char *symbol)
{
struct module *owner;
- unsigned long value;
+ const struct kernel_symbol *sym;
preempt_disable();
- value = find_symbol(symbol, &owner, NULL, true, true);
- if (IS_ERR_VALUE(value))
- value = 0;
- else if (strong_try_module_get(owner))
- value = 0;
+ sym = find_symbol(symbol, &owner, NULL, true, true);
+ if (sym && strong_try_module_get(owner))
+ sym = NULL;
preempt_enable();
- return (void *)value;
+ return sym ? (void *)sym->value : NULL;
}
EXPORT_SYMBOL_GPL(__symbol_get);
@@ -1554,8 +1550,7 @@ static int verify_export_symbols(struct module *mod)
for (i = 0; i < ARRAY_SIZE(arr); i++) {
for (s = arr[i].sym; s < arr[i].sym + arr[i].num; s++) {
- if (!IS_ERR_VALUE(find_symbol(s->name, &owner,
- NULL, true, false))) {
+ if (find_symbol(s->name, &owner, NULL, true, false)) {
printk(KERN_ERR
"%s: exports duplicate symbol %s"
" (owned by %s)\n",
@@ -1579,6 +1574,7 @@ static int simplify_symbols(Elf_Shdr *sechdrs,
unsigned long secbase;
unsigned int i, n = sechdrs[symindex].sh_size / sizeof(Elf_Sym);
int ret = 0;
+ const struct kernel_symbol *ksym;
for (i = 1; i < n; i++) {
switch (sym[i].st_shndx) {
@@ -1598,13 +1594,14 @@ static int simplify_symbols(Elf_Shdr *sechdrs,
break;
case SHN_UNDEF:
- sym[i].st_value
- = resolve_symbol(sechdrs, versindex,
- strtab + sym[i].st_name, mod);
-
+ ksym = resolve_symbol(sechdrs, versindex,
+ strtab + sym[i].st_name, mod);
/* Ok if resolved. */
- if (!IS_ERR_VALUE(sym[i].st_value))
+ if (ksym) {
+ sym[i].st_value = ksym->value;
break;
+ }
+
/* Ok if weak. */
if (ELF_ST_BIND(sym[i].st_info) == STB_WEAK)
break;
@@ -1679,8 +1676,7 @@ static void layout_sections(struct module *mod,
if ((s->sh_flags & masks[m][0]) != masks[m][0]
|| (s->sh_flags & masks[m][1])
|| s->sh_entsize != ~0UL
- || strncmp(secstrings + s->sh_name,
- ".init", 5) == 0)
+ || strstarts(secstrings + s->sh_name, ".init"))
continue;
s->sh_entsize = get_offset(mod, &mod->core_size, s, i);
DEBUGP("\t%s\n", secstrings + s->sh_name);
@@ -1697,8 +1693,7 @@ static void layout_sections(struct module *mod,
if ((s->sh_flags & masks[m][0]) != masks[m][0]
|| (s->sh_flags & masks[m][1])
|| s->sh_entsize != ~0UL
- || strncmp(secstrings + s->sh_name,
- ".init", 5) != 0)
+ || !strstarts(secstrings + s->sh_name, ".init"))
continue;
s->sh_entsize = (get_offset(mod, &mod->init_size, s, i)
| INIT_OFFSET_MASK);
@@ -1831,8 +1826,7 @@ static char elf_type(const Elf_Sym *sym,
else
return 'b';
}
- if (strncmp(secstrings + sechdrs[sym->st_shndx].sh_name,
- ".debug", strlen(".debug")) == 0)
+ if (strstarts(secstrings + sechdrs[sym->st_shndx].sh_name, ".debug"))
return 'n';
return '?';
}
@@ -1901,8 +1895,7 @@ static noinline struct module *load_module(void __user *umod,
unsigned int symindex = 0;
unsigned int strindex = 0;
unsigned int modindex, versindex, infoindex, pcpuindex;
- unsigned int num_kp, num_mcount;
- struct kernel_param *kp;
+ unsigned int num_mcount;
struct module *mod;
long err = 0;
void *percpu = NULL, *ptr = NULL; /* Stops spurious gcc warning */
@@ -1919,12 +1912,6 @@ static noinline struct module *load_module(void __user *umod,
if (len > 64 * 1024 * 1024 || (hdr = vmalloc(len)) == NULL)
return ERR_PTR(-ENOMEM);
- /* Create stop_machine threads since the error path relies on
- * a non-failing stop_machine call. */
- err = stop_machine_create();
- if (err)
- goto free_hdr;
-
if (copy_from_user(hdr, umod, len) != 0) {
err = -EFAULT;
goto free_hdr;
@@ -1965,7 +1952,7 @@ static noinline struct module *load_module(void __user *umod,
}
#ifndef CONFIG_MODULE_UNLOAD
/* Don't load .exit sections */
- if (strncmp(secstrings+sechdrs[i].sh_name, ".exit", 5) == 0)
+ if (strstarts(secstrings+sechdrs[i].sh_name, ".exit"))
sechdrs[i].sh_flags &= ~(unsigned long)SHF_ALLOC;
#endif
}
@@ -2009,7 +1996,7 @@ static noinline struct module *load_module(void __user *umod,
modmagic = get_modinfo(sechdrs, infoindex, "vermagic");
/* This is allowed: modprobe --force will invalidate it. */
if (!modmagic) {
- err = try_to_force_load(mod, "magic");
+ err = try_to_force_load(mod, "bad vermagic");
if (err)
goto free_hdr;
} else if (!same_magic(modmagic, vermagic, versindex)) {
@@ -2147,8 +2134,8 @@ static noinline struct module *load_module(void __user *umod,
/* Now we've got everything in the final locations, we can
* find optional sections. */
- kp = section_objs(hdr, sechdrs, secstrings, "__param", sizeof(*kp),
- &num_kp);
+ mod->kp = section_objs(hdr, sechdrs, secstrings, "__param",
+ sizeof(*mod->kp), &mod->num_kp);
mod->syms = section_objs(hdr, sechdrs, secstrings, "__ksymtab",
sizeof(*mod->syms), &mod->num_syms);
mod->crcs = section_addr(hdr, sechdrs, secstrings, "__kcrctab");
@@ -2198,8 +2185,8 @@ static noinline struct module *load_module(void __user *umod,
|| (mod->num_unused_gpl_syms && !mod->unused_gpl_crcs)
#endif
) {
- printk(KERN_WARNING "%s: No versions for exported symbols.\n", mod->name);
- err = try_to_force_load(mod, "nocrc");
+ err = try_to_force_load(mod,
+ "no versions for exported symbols");
if (err)
goto cleanup;
}
@@ -2294,11 +2281,11 @@ static noinline struct module *load_module(void __user *umod,
*/
list_add_rcu(&mod->list, &modules);
- err = parse_args(mod->name, mod->args, kp, num_kp, NULL);
+ err = parse_args(mod->name, mod->args, mod->kp, mod->num_kp, NULL);
if (err < 0)
goto unlink;
- err = mod_sysfs_setup(mod, kp, num_kp);
+ err = mod_sysfs_setup(mod, mod->kp, mod->num_kp);
if (err < 0)
goto unlink;
add_sect_attrs(mod, hdr->e_shnum, secstrings, sechdrs);
@@ -2307,12 +2294,13 @@ static noinline struct module *load_module(void __user *umod,
/* Get rid of temporary copy */
vfree(hdr);
- stop_machine_destroy();
/* Done! */
return mod;
unlink:
- stop_machine(__unlink_module, mod, NULL);
+ /* Unlink carefully: kallsyms could be walking list. */
+ list_del_rcu(&mod->list);
+ synchronize_sched();
module_arch_cleanup(mod);
cleanup:
kobject_del(&mod->mkobj.kobj);
@@ -2320,8 +2308,8 @@ static noinline struct module *load_module(void __user *umod,
ftrace_release(mod->module_core, mod->core_size);
free_unload:
module_unload_free(mod);
- free_init:
#if defined(CONFIG_MODULE_UNLOAD) && defined(CONFIG_SMP)
+ free_init:
percpu_modfree(mod->refptr);
#endif
module_free(mod, mod->module_init);
@@ -2335,7 +2323,6 @@ static noinline struct module *load_module(void __user *umod,
kfree(args);
free_hdr:
vfree(hdr);
- stop_machine_destroy();
return ERR_PTR(err);
truncated:
@@ -2404,6 +2391,9 @@ SYSCALL_DEFINE3(init_module, void __user *, umod,
blocking_notifier_call_chain(&module_notify_list,
MODULE_STATE_LIVE, mod);
+ /* We need to finish all async code before the module init sequence is done */
+ async_synchronize_full();
+
mutex_lock(&module_mutex);
/* Drop initial reference. */
module_put(mod);
@@ -2612,6 +2602,25 @@ unsigned long module_kallsyms_lookup_name(const char *name)
preempt_enable();
return ret;
}
+
+int module_kallsyms_on_each_symbol(int (*fn)(void *, const char *,
+ struct module *, unsigned long),
+ void *data)
+{
+ struct module *mod;
+ unsigned int i;
+ int ret;
+
+ list_for_each_entry(mod, &modules, list) {
+ for (i = 0; i < mod->num_symtab; i++) {
+ ret = fn(data, mod->strtab + mod->symtab[i].st_name,
+ mod, mod->symtab[i].st_value);
+ if (ret != 0)
+ return ret;
+ }
+ }
+ return 0;
+}
#endif /* CONFIG_KALLSYMS */
static char *module_flags(struct module *mod, char *buf)
@@ -2747,29 +2756,31 @@ const struct exception_table_entry *search_module_extables(unsigned long addr)
}
/*
- * Is this a valid module address?
+ * is_module_address - is this address inside a module?
+ * @addr: the address to check.
+ *
+ * See is_module_text_address() if you simply want to see if the address
+ * is code (not data).
*/
-int is_module_address(unsigned long addr)
+bool is_module_address(unsigned long addr)
{
- struct module *mod;
+ bool ret;
preempt_disable();
-
- list_for_each_entry_rcu(mod, &modules, list) {
- if (within_module_core(addr, mod)) {
- preempt_enable();
- return 1;
- }
- }
-
+ ret = __module_address(addr) != NULL;
preempt_enable();
- return 0;
+ return ret;
}
-
-/* Is this a valid kernel address? */
-__notrace_funcgraph struct module *__module_text_address(unsigned long addr)
+/*
+ * __module_address - get the module which contains an address.
+ * @addr: the address.
+ *
+ * Must be called with preempt disabled or module mutex held so that
+ * module doesn't get freed during this.
+ */
+struct module *__module_address(unsigned long addr)
{
struct module *mod;
@@ -2777,22 +2788,51 @@ __notrace_funcgraph struct module *__module_text_address(unsigned long addr)
return NULL;
list_for_each_entry_rcu(mod, &modules, list)
- if (within(addr, mod->module_init, mod->init_text_size)
- || within(addr, mod->module_core, mod->core_text_size))
+ if (within_module_core(addr, mod)
+ || within_module_init(addr, mod))
return mod;
return NULL;
}
+EXPORT_SYMBOL_GPL(__module_address);
-struct module *module_text_address(unsigned long addr)
+/*
+ * is_module_text_address - is this address inside module code?
+ * @addr: the address to check.
+ *
+ * See is_module_address() if you simply want to see if the address is
+ * anywhere in a module. See kernel_text_address() for testing if an
+ * address corresponds to kernel or module code.
+ */
+bool is_module_text_address(unsigned long addr)
{
- struct module *mod;
+ bool ret;
preempt_disable();
- mod = __module_text_address(addr);
+ ret = __module_text_address(addr) != NULL;
preempt_enable();
+ return ret;
+}
+
+/*
+ * __module_text_address - get the module whose code contains an address.
+ * @addr: the address.
+ *
+ * Must be called with preempt disabled or module mutex held so that
+ * module doesn't get freed during this.
+ */
+struct module *__module_text_address(unsigned long addr)
+{
+ struct module *mod = __module_address(addr);
+ if (mod) {
+ /* Make sure it's within the text section. */
+ if (!within(addr, mod->module_init, mod->init_text_size)
+ && !within(addr, mod->module_core, mod->core_text_size))
+ mod = NULL;
+ }
return mod;
}
+EXPORT_SYMBOL_GPL(__module_text_address);
/* Don't grab lock, we're oopsing. */
void print_modules(void)
@@ -2812,9 +2852,17 @@ void print_modules(void)
}
#ifdef CONFIG_MODVERSIONS
-/* Generate the signature for struct module here, too, for modversions. */
-void struct_module(struct module *mod) { return; }
-EXPORT_SYMBOL(struct_module);
+/* Generate the signature for all relevant module structures here.
+ * If these change, we don't want to try to parse the module. */
+void module_layout(struct module *mod,
+ struct modversion_info *ver,
+ struct kernel_param *kp,
+ struct kernel_symbol *ks,
+ struct marker *marker,
+ struct tracepoint *tp)
+{
+}
+EXPORT_SYMBOL(module_layout);
#endif
#ifdef CONFIG_MARKERS
diff --git a/kernel/mutex-debug.c b/kernel/mutex-debug.c
index 1d94160eb532..50d022e5a560 100644
--- a/kernel/mutex-debug.c
+++ b/kernel/mutex-debug.c
@@ -26,11 +26,6 @@
/*
* Must be called with lock->wait_lock held.
*/
-void debug_mutex_set_owner(struct mutex *lock, struct thread_info *new_owner)
-{
- lock->owner = new_owner;
-}
-
void debug_mutex_lock_common(struct mutex *lock, struct mutex_waiter *waiter)
{
memset(waiter, MUTEX_DEBUG_INIT, sizeof(*waiter));
@@ -59,7 +54,6 @@ void debug_mutex_add_waiter(struct mutex *lock, struct mutex_waiter *waiter,
/* Mark the current thread as blocked on the lock: */
ti->task->blocked_on = waiter;
- waiter->lock = lock;
}
void mutex_remove_waiter(struct mutex *lock, struct mutex_waiter *waiter,
@@ -82,7 +76,7 @@ void debug_mutex_unlock(struct mutex *lock)
DEBUG_LOCKS_WARN_ON(lock->magic != lock);
DEBUG_LOCKS_WARN_ON(lock->owner != current_thread_info());
DEBUG_LOCKS_WARN_ON(!lock->wait_list.prev && !lock->wait_list.next);
- DEBUG_LOCKS_WARN_ON(lock->owner != current_thread_info());
+ mutex_clear_owner(lock);
}
void debug_mutex_init(struct mutex *lock, const char *name,
@@ -95,7 +89,6 @@ void debug_mutex_init(struct mutex *lock, const char *name,
debug_check_no_locks_freed((void *)lock, sizeof(*lock));
lockdep_init_map(&lock->dep_map, name, key, 0);
#endif
- lock->owner = NULL;
lock->magic = lock;
}
diff --git a/kernel/mutex-debug.h b/kernel/mutex-debug.h
index babfbdfc534b..6b2d735846a5 100644
--- a/kernel/mutex-debug.h
+++ b/kernel/mutex-debug.h
@@ -13,14 +13,6 @@
/*
* This must be called with lock->wait_lock held.
*/
-extern void
-debug_mutex_set_owner(struct mutex *lock, struct thread_info *new_owner);
-
-static inline void debug_mutex_clear_owner(struct mutex *lock)
-{
- lock->owner = NULL;
-}
-
extern void debug_mutex_lock_common(struct mutex *lock,
struct mutex_waiter *waiter);
extern void debug_mutex_wake_waiter(struct mutex *lock,
@@ -35,6 +27,16 @@ extern void debug_mutex_unlock(struct mutex *lock);
extern void debug_mutex_init(struct mutex *lock, const char *name,
struct lock_class_key *key);
+static inline void mutex_set_owner(struct mutex *lock)
+{
+ lock->owner = current_thread_info();
+}
+
+static inline void mutex_clear_owner(struct mutex *lock)
+{
+ lock->owner = NULL;
+}
+
#define spin_lock_mutex(lock, flags) \
do { \
struct mutex *l = container_of(lock, struct mutex, wait_lock); \
diff --git a/kernel/mutex.c b/kernel/mutex.c
index 4f45d4b658ef..507cf2b5e9f1 100644
--- a/kernel/mutex.c
+++ b/kernel/mutex.c
@@ -10,6 +10,11 @@
* Many thanks to Arjan van de Ven, Thomas Gleixner, Steven Rostedt and
* David Howells for suggestions and improvements.
*
+ * - Adaptive spinning for mutexes by Peter Zijlstra. (Ported to mainline
+ * from the -rt tree, where it was originally implemented for rtmutexes
+ * by Steven Rostedt, based on work by Gregory Haskins, Peter Morreale
+ * and Sven Dietrich.
+ *
* Also see Documentation/mutex-design.txt.
*/
#include <linux/mutex.h>
@@ -46,6 +51,7 @@ __mutex_init(struct mutex *lock, const char *name, struct lock_class_key *key)
atomic_set(&lock->count, 1);
spin_lock_init(&lock->wait_lock);
INIT_LIST_HEAD(&lock->wait_list);
+ mutex_clear_owner(lock);
debug_mutex_init(lock, name, key);
}
@@ -91,6 +97,7 @@ void inline __sched mutex_lock(struct mutex *lock)
* 'unlocked' into 'locked' state.
*/
__mutex_fastpath_lock(&lock->count, __mutex_lock_slowpath);
+ mutex_set_owner(lock);
}
EXPORT_SYMBOL(mutex_lock);
@@ -115,6 +122,14 @@ void __sched mutex_unlock(struct mutex *lock)
* The unlocking fastpath is the 0->1 transition from 'locked'
* into 'unlocked' state:
*/
+#ifndef CONFIG_DEBUG_MUTEXES
+ /*
+ * When debugging is enabled we must not clear the owner before time,
+ * the slow path will always be taken, and that clears the owner field
+ * after verifying that it was indeed current.
+ */
+ mutex_clear_owner(lock);
+#endif
__mutex_fastpath_unlock(&lock->count, __mutex_unlock_slowpath);
}
@@ -129,21 +144,76 @@ __mutex_lock_common(struct mutex *lock, long state, unsigned int subclass,
{
struct task_struct *task = current;
struct mutex_waiter waiter;
- unsigned int old_val;
unsigned long flags;
+ preempt_disable();
+ mutex_acquire(&lock->dep_map, subclass, 0, ip);
+#if defined(CONFIG_SMP) && !defined(CONFIG_DEBUG_MUTEXES) && \
+ !defined(CONFIG_HAVE_DEFAULT_NO_SPIN_MUTEXES)
+ /*
+ * Optimistic spinning.
+ *
+ * We try to spin for acquisition when we find that there are no
+ * pending waiters and the lock owner is currently running on a
+ * (different) CPU.
+ *
+ * The rationale is that if the lock owner is running, it is likely to
+ * release the lock soon.
+ *
+ * Since this needs the lock owner, and this mutex implementation
+ * doesn't track the owner atomically in the lock field, we need to
+ * track it non-atomically.
+ *
+ * We can't do this for DEBUG_MUTEXES because that relies on wait_lock
+ * to serialize everything.
+ */
+
+ for (;;) {
+ struct thread_info *owner;
+
+ /*
+ * If there's an owner, wait for it to either
+ * release the lock or go to sleep.
+ */
+ owner = ACCESS_ONCE(lock->owner);
+ if (owner && !mutex_spin_on_owner(lock, owner))
+ break;
+
+ if (atomic_cmpxchg(&lock->count, 1, 0) == 1) {
+ lock_acquired(&lock->dep_map, ip);
+ mutex_set_owner(lock);
+ preempt_enable();
+ return 0;
+ }
+
+ /*
+ * When there's no owner, we might have preempted between the
+ * owner acquiring the lock and setting the owner field. If
+ * we're an RT task that will live-lock because we won't let
+ * the owner complete.
+ */
+ if (!owner && (need_resched() || rt_task(task)))
+ break;
+
+ /*
+ * The cpu_relax() call is a compiler barrier which forces
+ * everything in this loop to be re-loaded. We don't need
+ * memory barriers as we'll eventually observe the right
+ * values at the cost of a few extra spins.
+ */
+ cpu_relax();
+ }
+#endif
spin_lock_mutex(&lock->wait_lock, flags);
debug_mutex_lock_common(lock, &waiter);
- mutex_acquire(&lock->dep_map, subclass, 0, ip);
debug_mutex_add_waiter(lock, &waiter, task_thread_info(task));
/* add waiting tasks to the end of the waitqueue (FIFO): */
list_add_tail(&waiter.list, &lock->wait_list);
waiter.task = task;
- old_val = atomic_xchg(&lock->count, -1);
- if (old_val == 1)
+ if (atomic_xchg(&lock->count, -1) == 1)
goto done;
lock_contended(&lock->dep_map, ip);
@@ -158,8 +228,7 @@ __mutex_lock_common(struct mutex *lock, long state, unsigned int subclass,
* that when we release the lock, we properly wake up the
* other waiters:
*/
- old_val = atomic_xchg(&lock->count, -1);
- if (old_val == 1)
+ if (atomic_xchg(&lock->count, -1) == 1)
break;
/*
@@ -173,21 +242,22 @@ __mutex_lock_common(struct mutex *lock, long state, unsigned int subclass,
spin_unlock_mutex(&lock->wait_lock, flags);
debug_mutex_free_waiter(&waiter);
+ preempt_enable();
return -EINTR;
}
__set_task_state(task, state);
/* didnt get the lock, go to sleep: */
spin_unlock_mutex(&lock->wait_lock, flags);
- schedule();
+ __schedule();
spin_lock_mutex(&lock->wait_lock, flags);
}
done:
lock_acquired(&lock->dep_map, ip);
/* got the lock - rejoice! */
- mutex_remove_waiter(lock, &waiter, task_thread_info(task));
- debug_mutex_set_owner(lock, task_thread_info(task));
+ mutex_remove_waiter(lock, &waiter, current_thread_info());
+ mutex_set_owner(lock);
/* set it to 0 if there are no waiters left: */
if (likely(list_empty(&lock->wait_list)))
@@ -196,6 +266,7 @@ done:
spin_unlock_mutex(&lock->wait_lock, flags);
debug_mutex_free_waiter(&waiter);
+ preempt_enable();
return 0;
}
@@ -222,7 +293,8 @@ int __sched
mutex_lock_interruptible_nested(struct mutex *lock, unsigned int subclass)
{
might_sleep();
- return __mutex_lock_common(lock, TASK_INTERRUPTIBLE, subclass, _RET_IP_);
+ return __mutex_lock_common(lock, TASK_INTERRUPTIBLE,
+ subclass, _RET_IP_);
}
EXPORT_SYMBOL_GPL(mutex_lock_interruptible_nested);
@@ -260,8 +332,6 @@ __mutex_unlock_common_slowpath(atomic_t *lock_count, int nested)
wake_up_process(waiter->task);
}
- debug_mutex_clear_owner(lock);
-
spin_unlock_mutex(&lock->wait_lock, flags);
}
@@ -298,18 +368,30 @@ __mutex_lock_interruptible_slowpath(atomic_t *lock_count);
*/
int __sched mutex_lock_interruptible(struct mutex *lock)
{
+ int ret;
+
might_sleep();
- return __mutex_fastpath_lock_retval
+ ret = __mutex_fastpath_lock_retval
(&lock->count, __mutex_lock_interruptible_slowpath);
+ if (!ret)
+ mutex_set_owner(lock);
+
+ return ret;
}
EXPORT_SYMBOL(mutex_lock_interruptible);
int __sched mutex_lock_killable(struct mutex *lock)
{
+ int ret;
+
might_sleep();
- return __mutex_fastpath_lock_retval
+ ret = __mutex_fastpath_lock_retval
(&lock->count, __mutex_lock_killable_slowpath);
+ if (!ret)
+ mutex_set_owner(lock);
+
+ return ret;
}
EXPORT_SYMBOL(mutex_lock_killable);
@@ -352,9 +434,10 @@ static inline int __mutex_trylock_slowpath(atomic_t *lock_count)
prev = atomic_xchg(&lock->count, -1);
if (likely(prev == 1)) {
- debug_mutex_set_owner(lock, current_thread_info());
+ mutex_set_owner(lock);
mutex_acquire(&lock->dep_map, 0, 1, _RET_IP_);
}
+
/* Set it back to 0 if there are no waiters: */
if (likely(list_empty(&lock->wait_list)))
atomic_set(&lock->count, 0);
@@ -380,8 +463,13 @@ static inline int __mutex_trylock_slowpath(atomic_t *lock_count)
*/
int __sched mutex_trylock(struct mutex *lock)
{
- return __mutex_fastpath_trylock(&lock->count,
- __mutex_trylock_slowpath);
+ int ret;
+
+ ret = __mutex_fastpath_trylock(&lock->count, __mutex_trylock_slowpath);
+ if (ret)
+ mutex_set_owner(lock);
+
+ return ret;
}
EXPORT_SYMBOL(mutex_trylock);
diff --git a/kernel/mutex.h b/kernel/mutex.h
index a075dafbb290..67578ca48f94 100644
--- a/kernel/mutex.h
+++ b/kernel/mutex.h
@@ -16,8 +16,26 @@
#define mutex_remove_waiter(lock, waiter, ti) \
__list_del((waiter)->list.prev, (waiter)->list.next)
-#define debug_mutex_set_owner(lock, new_owner) do { } while (0)
-#define debug_mutex_clear_owner(lock) do { } while (0)
+#ifdef CONFIG_SMP
+static inline void mutex_set_owner(struct mutex *lock)
+{
+ lock->owner = current_thread_info();
+}
+
+static inline void mutex_clear_owner(struct mutex *lock)
+{
+ lock->owner = NULL;
+}
+#else
+static inline void mutex_set_owner(struct mutex *lock)
+{
+}
+
+static inline void mutex_clear_owner(struct mutex *lock)
+{
+}
+#endif
+
#define debug_mutex_wake_waiter(lock, waiter) do { } while (0)
#define debug_mutex_free_waiter(waiter) do { } while (0)
#define debug_mutex_add_waiter(lock, waiter, ti) do { } while (0)
diff --git a/kernel/ns_cgroup.c b/kernel/ns_cgroup.c
index 78bc3fdac0d2..5aa854f9e5ae 100644
--- a/kernel/ns_cgroup.c
+++ b/kernel/ns_cgroup.c
@@ -34,7 +34,7 @@ int ns_cgroup_clone(struct task_struct *task, struct pid *pid)
/*
* Rules:
- * 1. you can only enter a cgroup which is a child of your current
+ * 1. you can only enter a cgroup which is a descendant of your current
* cgroup
* 2. you can only place another process into a cgroup if
* a. you have CAP_SYS_ADMIN
@@ -45,21 +45,15 @@ int ns_cgroup_clone(struct task_struct *task, struct pid *pid)
static int ns_can_attach(struct cgroup_subsys *ss,
struct cgroup *new_cgroup, struct task_struct *task)
{
- struct cgroup *orig;
-
if (current != task) {
if (!capable(CAP_SYS_ADMIN))
return -EPERM;
- if (!cgroup_is_descendant(new_cgroup))
+ if (!cgroup_is_descendant(new_cgroup, current))
return -EPERM;
}
- if (atomic_read(&new_cgroup->count) != 0)
- return -EPERM;
-
- orig = task_cgroup(task, ns_subsys_id);
- if (orig && orig != new_cgroup->parent)
+ if (!cgroup_is_descendant(new_cgroup, task))
return -EPERM;
return 0;
@@ -77,7 +71,7 @@ static struct cgroup_subsys_state *ns_create(struct cgroup_subsys *ss,
if (!capable(CAP_SYS_ADMIN))
return ERR_PTR(-EPERM);
- if (!cgroup_is_descendant(cgroup))
+ if (!cgroup_is_descendant(cgroup, current))
return ERR_PTR(-EPERM);
ns_cgroup = kzalloc(sizeof(*ns_cgroup), GFP_KERNEL);
diff --git a/kernel/panic.c b/kernel/panic.c
index 32fe4eff1b89..874ecf1307ae 100644
--- a/kernel/panic.c
+++ b/kernel/panic.c
@@ -8,19 +8,19 @@
* This function is used through-out the kernel (including mm and fs)
* to indicate a major problem.
*/
+#include <linux/debug_locks.h>
+#include <linux/interrupt.h>
+#include <linux/kallsyms.h>
+#include <linux/notifier.h>
#include <linux/module.h>
-#include <linux/sched.h>
-#include <linux/delay.h>
+#include <linux/random.h>
#include <linux/reboot.h>
-#include <linux/notifier.h>
-#include <linux/init.h>
+#include <linux/delay.h>
+#include <linux/kexec.h>
+#include <linux/sched.h>
#include <linux/sysrq.h>
-#include <linux/interrupt.h>
+#include <linux/init.h>
#include <linux/nmi.h>
-#include <linux/kexec.h>
-#include <linux/debug_locks.h>
-#include <linux/random.h>
-#include <linux/kallsyms.h>
#include <linux/dmi.h>
int panic_on_oops;
@@ -52,19 +52,15 @@ EXPORT_SYMBOL(panic_blink);
*
* This function never returns.
*/
-
NORET_TYPE void panic(const char * fmt, ...)
{
- long i;
static char buf[1024];
va_list args;
-#if defined(CONFIG_S390)
- unsigned long caller = (unsigned long) __builtin_return_address(0);
-#endif
+ long i;
/*
- * It's possible to come here directly from a panic-assertion and not
- * have preempt disabled. Some functions called from here want
+ * It's possible to come here directly from a panic-assertion and
+ * not have preempt disabled. Some functions called from here want
* preempt to be disabled. No point enabling it later though...
*/
preempt_disable();
@@ -77,7 +73,6 @@ NORET_TYPE void panic(const char * fmt, ...)
#ifdef CONFIG_DEBUG_BUGVERBOSE
dump_stack();
#endif
- bust_spinlocks(0);
/*
* If we have crashed and we have a crash kernel loaded let it handle
@@ -86,14 +81,12 @@ NORET_TYPE void panic(const char * fmt, ...)
*/
crash_kexec(NULL);
-#ifdef CONFIG_SMP
/*
* Note smp_send_stop is the usual smp shutdown function, which
* unfortunately means it may not be hardened to work in a panic
* situation.
*/
smp_send_stop();
-#endif
atomic_notifier_call_chain(&panic_notifier_list, 0, buf);
@@ -102,19 +95,21 @@ NORET_TYPE void panic(const char * fmt, ...)
if (panic_timeout > 0) {
/*
- * Delay timeout seconds before rebooting the machine.
- * We can't use the "normal" timers since we just panicked..
- */
- printk(KERN_EMERG "Rebooting in %d seconds..",panic_timeout);
+ * Delay timeout seconds before rebooting the machine.
+ * We can't use the "normal" timers since we just panicked.
+ */
+ printk(KERN_EMERG "Rebooting in %d seconds..", panic_timeout);
+
for (i = 0; i < panic_timeout*1000; ) {
touch_nmi_watchdog();
i += panic_blink(i);
mdelay(1);
i++;
}
- /* This will not be a clean reboot, with everything
- * shutting down. But if there is a chance of
- * rebooting the system it will be rebooted.
+ /*
+ * This will not be a clean reboot, with everything
+ * shutting down. But if there is a chance of
+ * rebooting the system it will be rebooted.
*/
emergency_restart();
}
@@ -127,38 +122,44 @@ NORET_TYPE void panic(const char * fmt, ...)
}
#endif
#if defined(CONFIG_S390)
- disabled_wait(caller);
+ {
+ unsigned long caller;
+
+ caller = (unsigned long)__builtin_return_address(0);
+ disabled_wait(caller);
+ }
#endif
local_irq_enable();
- for (i = 0;;) {
+ for (i = 0; ; ) {
touch_softlockup_watchdog();
i += panic_blink(i);
mdelay(1);
i++;
}
+ bust_spinlocks(0);
}
EXPORT_SYMBOL(panic);
struct tnt {
- u8 bit;
- char true;
- char false;
+ u8 bit;
+ char true;
+ char false;
};
static const struct tnt tnts[] = {
- { TAINT_PROPRIETARY_MODULE, 'P', 'G' },
- { TAINT_FORCED_MODULE, 'F', ' ' },
- { TAINT_UNSAFE_SMP, 'S', ' ' },
- { TAINT_FORCED_RMMOD, 'R', ' ' },
- { TAINT_MACHINE_CHECK, 'M', ' ' },
- { TAINT_BAD_PAGE, 'B', ' ' },
- { TAINT_USER, 'U', ' ' },
- { TAINT_DIE, 'D', ' ' },
- { TAINT_OVERRIDDEN_ACPI_TABLE, 'A', ' ' },
- { TAINT_WARN, 'W', ' ' },
- { TAINT_CRAP, 'C', ' ' },
+ { TAINT_PROPRIETARY_MODULE, 'P', 'G' },
+ { TAINT_FORCED_MODULE, 'F', ' ' },
+ { TAINT_UNSAFE_SMP, 'S', ' ' },
+ { TAINT_FORCED_RMMOD, 'R', ' ' },
+ { TAINT_MACHINE_CHECK, 'M', ' ' },
+ { TAINT_BAD_PAGE, 'B', ' ' },
+ { TAINT_USER, 'U', ' ' },
+ { TAINT_DIE, 'D', ' ' },
+ { TAINT_OVERRIDDEN_ACPI_TABLE, 'A', ' ' },
+ { TAINT_WARN, 'W', ' ' },
+ { TAINT_CRAP, 'C', ' ' },
};
/**
@@ -195,7 +196,8 @@ const char *print_tainted(void)
*s = 0;
} else
snprintf(buf, sizeof(buf), "Not tainted");
- return(buf);
+
+ return buf;
}
int test_taint(unsigned flag)
@@ -211,7 +213,16 @@ unsigned long get_taint(void)
void add_taint(unsigned flag)
{
- debug_locks = 0; /* can't trust the integrity of the kernel anymore */
+ /*
+ * Can't trust the integrity of the kernel anymore.
+ * We don't call directly debug_locks_off() because the issue
+ * is not necessarily serious enough to set oops_in_progress to 1
+ * Also we want to keep up lockdep for staging development and
+ * post-warning case.
+ */
+ if (flag != TAINT_CRAP && flag != TAINT_WARN && __debug_locks_off())
+ printk(KERN_WARNING "Disabling lock debugging due to kernel taint\n");
+
set_bit(flag, &tainted_mask);
}
EXPORT_SYMBOL(add_taint);
@@ -266,8 +277,8 @@ static void do_oops_enter_exit(void)
}
/*
- * Return true if the calling CPU is allowed to print oops-related info. This
- * is a bit racy..
+ * Return true if the calling CPU is allowed to print oops-related info.
+ * This is a bit racy..
*/
int oops_may_print(void)
{
@@ -276,20 +287,22 @@ int oops_may_print(void)
/*
* Called when the architecture enters its oops handler, before it prints
- * anything. If this is the first CPU to oops, and it's oopsing the first time
- * then let it proceed.
+ * anything. If this is the first CPU to oops, and it's oopsing the first
+ * time then let it proceed.
*
- * This is all enabled by the pause_on_oops kernel boot option. We do all this
- * to ensure that oopses don't scroll off the screen. It has the side-effect
- * of preventing later-oopsing CPUs from mucking up the display, too.
+ * This is all enabled by the pause_on_oops kernel boot option. We do all
+ * this to ensure that oopses don't scroll off the screen. It has the
+ * side-effect of preventing later-oopsing CPUs from mucking up the display,
+ * too.
*
- * It turns out that the CPU which is allowed to print ends up pausing for the
- * right duration, whereas all the other CPUs pause for twice as long: once in
- * oops_enter(), once in oops_exit().
+ * It turns out that the CPU which is allowed to print ends up pausing for
+ * the right duration, whereas all the other CPUs pause for twice as long:
+ * once in oops_enter(), once in oops_exit().
*/
void oops_enter(void)
{
- debug_locks_off(); /* can't trust the integrity of the kernel anymore */
+ /* can't trust the integrity of the kernel anymore: */
+ debug_locks_off();
do_oops_enter_exit();
}
@@ -327,7 +340,7 @@ void oops_exit(void)
}
#ifdef WANT_WARN_ON_SLOWPATH
-void warn_slowpath(const char *file, int line, const char *fmt, ...)
+void warn_slowpath_fmt(const char *file, int line, const char *fmt, ...)
{
va_list args;
char function[KSYM_SYMBOL_LEN];
@@ -343,7 +356,7 @@ void warn_slowpath(const char *file, int line, const char *fmt, ...)
if (board)
printk(KERN_WARNING "Hardware name: %s\n", board);
- if (fmt) {
+ if (*fmt) {
va_start(args, fmt);
vprintk(fmt, args);
va_end(args);
@@ -354,7 +367,14 @@ void warn_slowpath(const char *file, int line, const char *fmt, ...)
print_oops_end_marker();
add_taint(TAINT_WARN);
}
-EXPORT_SYMBOL(warn_slowpath);
+EXPORT_SYMBOL(warn_slowpath_fmt);
+
+void warn_slowpath_null(const char *file, int line)
+{
+ static const char *empty = "";
+ warn_slowpath_fmt(file, line, empty);
+}
+EXPORT_SYMBOL(warn_slowpath_null);
#endif
#ifdef CONFIG_CC_STACKPROTECTOR
diff --git a/kernel/params.c b/kernel/params.c
index a1e3025b19a9..de273ec85bd2 100644
--- a/kernel/params.c
+++ b/kernel/params.c
@@ -24,6 +24,9 @@
#include <linux/err.h>
#include <linux/slab.h>
+/* We abuse the high bits of "perm" to record whether we kmalloc'ed. */
+#define KPARAM_KMALLOCED 0x80000000
+
#if 0
#define DEBUGP printk
#else
@@ -217,7 +220,19 @@ int param_set_charp(const char *val, struct kernel_param *kp)
return -ENOSPC;
}
- *(char **)kp->arg = (char *)val;
+ if (kp->perm & KPARAM_KMALLOCED)
+ kfree(*(char **)kp->arg);
+
+ /* This is a hack. We can't need to strdup in early boot, and we
+ * don't need to; this mangled commandline is preserved. */
+ if (slab_is_available()) {
+ kp->perm |= KPARAM_KMALLOCED;
+ *(char **)kp->arg = kstrdup(val, GFP_KERNEL);
+ if (!kp->arg)
+ return -ENOMEM;
+ } else
+ *(const char **)kp->arg = val;
+
return 0;
}
@@ -571,6 +586,15 @@ void module_param_sysfs_remove(struct module *mod)
}
#endif
+void destroy_params(const struct kernel_param *params, unsigned num)
+{
+ unsigned int i;
+
+ for (i = 0; i < num; i++)
+ if (params[i].perm & KPARAM_KMALLOCED)
+ kfree(*(char **)params[i].arg);
+}
+
static void __init kernel_add_sysfs_param(const char *name,
struct kernel_param *kparam,
unsigned int name_skip)
diff --git a/kernel/pid.c b/kernel/pid.c
index 1b3586fe753a..b2e5f78fd281 100644
--- a/kernel/pid.c
+++ b/kernel/pid.c
@@ -403,6 +403,8 @@ struct pid *get_task_pid(struct task_struct *task, enum pid_type type)
{
struct pid *pid;
rcu_read_lock();
+ if (type != PIDTYPE_PID)
+ task = task->group_leader;
pid = get_pid(task->pids[type].pid);
rcu_read_unlock();
return pid;
@@ -450,11 +452,24 @@ pid_t pid_vnr(struct pid *pid)
}
EXPORT_SYMBOL_GPL(pid_vnr);
-pid_t task_pid_nr_ns(struct task_struct *tsk, struct pid_namespace *ns)
+pid_t __task_pid_nr_ns(struct task_struct *task, enum pid_type type,
+ struct pid_namespace *ns)
{
- return pid_nr_ns(task_pid(tsk), ns);
+ pid_t nr = 0;
+
+ rcu_read_lock();
+ if (!ns)
+ ns = current->nsproxy->pid_ns;
+ if (likely(pid_alive(task))) {
+ if (type != PIDTYPE_PID)
+ task = task->group_leader;
+ nr = pid_nr_ns(task->pids[type].pid, ns);
+ }
+ rcu_read_unlock();
+
+ return nr;
}
-EXPORT_SYMBOL(task_pid_nr_ns);
+EXPORT_SYMBOL(__task_pid_nr_ns);
pid_t task_tgid_nr_ns(struct task_struct *tsk, struct pid_namespace *ns)
{
@@ -462,18 +477,6 @@ pid_t task_tgid_nr_ns(struct task_struct *tsk, struct pid_namespace *ns)
}
EXPORT_SYMBOL(task_tgid_nr_ns);
-pid_t task_pgrp_nr_ns(struct task_struct *tsk, struct pid_namespace *ns)
-{
- return pid_nr_ns(task_pgrp(tsk), ns);
-}
-EXPORT_SYMBOL(task_pgrp_nr_ns);
-
-pid_t task_session_nr_ns(struct task_struct *tsk, struct pid_namespace *ns)
-{
- return pid_nr_ns(task_session(tsk), ns);
-}
-EXPORT_SYMBOL(task_session_nr_ns);
-
struct pid_namespace *task_active_pid_ns(struct task_struct *tsk)
{
return ns_of_pid(task_pid(tsk));
diff --git a/kernel/pid_namespace.c b/kernel/pid_namespace.c
index fab8ea86fac3..2d1001b4858d 100644
--- a/kernel/pid_namespace.c
+++ b/kernel/pid_namespace.c
@@ -152,6 +152,7 @@ void zap_pid_ns_processes(struct pid_namespace *pid_ns)
{
int nr;
int rc;
+ struct task_struct *task;
/*
* The last thread in the cgroup-init thread group is terminating.
@@ -169,7 +170,19 @@ void zap_pid_ns_processes(struct pid_namespace *pid_ns)
read_lock(&tasklist_lock);
nr = next_pidmap(pid_ns, 1);
while (nr > 0) {
- kill_proc_info(SIGKILL, SEND_SIG_PRIV, nr);
+ rcu_read_lock();
+
+ /*
+ * Use force_sig() since it clears SIGNAL_UNKILLABLE ensuring
+ * any nested-container's init processes don't ignore the
+ * signal
+ */
+ task = pid_task(find_vpid(nr), PIDTYPE_PID);
+ if (task)
+ force_sig(SIGKILL, task);
+
+ rcu_read_unlock();
+
nr = next_pidmap(pid_ns, nr);
}
read_unlock(&tasklist_lock);
diff --git a/kernel/posix-cpu-timers.c b/kernel/posix-cpu-timers.c
index 8e5d9a68b022..bece7c0b67b2 100644
--- a/kernel/posix-cpu-timers.c
+++ b/kernel/posix-cpu-timers.c
@@ -18,7 +18,7 @@ void update_rlimit_cpu(unsigned long rlim_new)
cputime = secs_to_cputime(rlim_new);
if (cputime_eq(current->signal->it_prof_expires, cputime_zero) ||
- cputime_lt(current->signal->it_prof_expires, cputime)) {
+ cputime_gt(current->signal->it_prof_expires, cputime)) {
spin_lock_irq(&current->sighand->siglock);
set_process_cpu_timer(current, CPUCLOCK_PROF, &cputime, NULL);
spin_unlock_irq(&current->sighand->siglock);
@@ -224,7 +224,7 @@ static int cpu_clock_sample(const clockid_t which_clock, struct task_struct *p,
cpu->cpu = virt_ticks(p);
break;
case CPUCLOCK_SCHED:
- cpu->sched = p->se.sum_exec_runtime + task_delta_exec(p);
+ cpu->sched = task_sched_runtime(p);
break;
}
return 0;
@@ -305,18 +305,19 @@ static int cpu_clock_sample_group(const clockid_t which_clock,
{
struct task_cputime cputime;
- thread_group_cputime(p, &cputime);
switch (CPUCLOCK_WHICH(which_clock)) {
default:
return -EINVAL;
case CPUCLOCK_PROF:
+ thread_group_cputime(p, &cputime);
cpu->cpu = cputime_add(cputime.utime, cputime.stime);
break;
case CPUCLOCK_VIRT:
+ thread_group_cputime(p, &cputime);
cpu->cpu = cputime.utime;
break;
case CPUCLOCK_SCHED:
- cpu->sched = cputime.sum_exec_runtime + task_delta_exec(p);
+ cpu->sched = thread_group_sched_runtime(p);
break;
}
return 0;
@@ -1419,19 +1420,19 @@ void run_posix_cpu_timers(struct task_struct *tsk)
* timer call will interfere.
*/
list_for_each_entry_safe(timer, next, &firing, it.cpu.entry) {
- int firing;
+ int cpu_firing;
+
spin_lock(&timer->it_lock);
list_del_init(&timer->it.cpu.entry);
- firing = timer->it.cpu.firing;
+ cpu_firing = timer->it.cpu.firing;
timer->it.cpu.firing = 0;
/*
* The firing flag is -1 if we collided with a reset
* of the timer, which already reported this
* almost-firing as an overrun. So don't generate an event.
*/
- if (likely(firing >= 0)) {
+ if (likely(cpu_firing >= 0))
cpu_timer_fire(timer);
- }
spin_unlock(&timer->it_lock);
}
}
diff --git a/kernel/power/disk.c b/kernel/power/disk.c
index 4a4a206b1979..e71ca9cd81b2 100644
--- a/kernel/power/disk.c
+++ b/kernel/power/disk.c
@@ -22,6 +22,8 @@
#include <linux/console.h>
#include <linux/cpu.h>
#include <linux/freezer.h>
+#include <scsi/scsi_scan.h>
+#include <asm/suspend.h>
#include "power.h"
@@ -214,7 +216,7 @@ static int create_image(int platform_mode)
return error;
device_pm_lock();
- local_irq_disable();
+
/* At this point, device_suspend() has been called, but *not*
* device_power_down(). We *must* call device_power_down() now.
* Otherwise, drivers for some devices (e.g. interrupt controllers)
@@ -225,13 +227,25 @@ static int create_image(int platform_mode)
if (error) {
printk(KERN_ERR "PM: Some devices failed to power down, "
"aborting hibernation\n");
- goto Enable_irqs;
+ goto Unlock;
}
+
+ error = platform_pre_snapshot(platform_mode);
+ if (error || hibernation_test(TEST_PLATFORM))
+ goto Platform_finish;
+
+ error = disable_nonboot_cpus();
+ if (error || hibernation_test(TEST_CPUS)
+ || hibernation_testmode(HIBERNATION_TEST))
+ goto Enable_cpus;
+
+ local_irq_disable();
+
sysdev_suspend(PMSG_FREEZE);
if (error) {
printk(KERN_ERR "PM: Some devices failed to power down, "
"aborting hibernation\n");
- goto Power_up_devices;
+ goto Enable_irqs;
}
if (hibernation_test(TEST_CORE))
@@ -247,17 +261,28 @@ static int create_image(int platform_mode)
restore_processor_state();
if (!in_suspend)
platform_leave(platform_mode);
+
Power_up:
sysdev_resume();
/* NOTE: device_power_up() is just a resume() for devices
* that suspended with irqs off ... no overall powerup.
*/
- Power_up_devices:
- device_power_up(in_suspend ?
- (error ? PMSG_RECOVER : PMSG_THAW) : PMSG_RESTORE);
+
Enable_irqs:
local_irq_enable();
+
+ Enable_cpus:
+ enable_nonboot_cpus();
+
+ Platform_finish:
+ platform_finish(platform_mode);
+
+ device_power_up(in_suspend ?
+ (error ? PMSG_RECOVER : PMSG_THAW) : PMSG_RESTORE);
+
+ Unlock:
device_pm_unlock();
+
return error;
}
@@ -265,7 +290,7 @@ static int create_image(int platform_mode)
* hibernation_snapshot - quiesce devices and create the hibernation
* snapshot image.
* @platform_mode - if set, use the platform driver, if available, to
- * prepare the platform frimware for the power transition.
+ * prepare the platform firmware for the power transition.
*
* Must be called with pm_mutex held
*/
@@ -291,25 +316,9 @@ int hibernation_snapshot(int platform_mode)
if (hibernation_test(TEST_DEVICES))
goto Recover_platform;
- error = platform_pre_snapshot(platform_mode);
- if (error || hibernation_test(TEST_PLATFORM))
- goto Finish;
+ error = create_image(platform_mode);
+ /* Control returns here after successful restore */
- error = disable_nonboot_cpus();
- if (!error) {
- if (hibernation_test(TEST_CPUS))
- goto Enable_cpus;
-
- if (hibernation_testmode(HIBERNATION_TEST))
- goto Enable_cpus;
-
- error = create_image(platform_mode);
- /* Control returns here after successful restore */
- }
- Enable_cpus:
- enable_nonboot_cpus();
- Finish:
- platform_finish(platform_mode);
Resume_devices:
device_resume(in_suspend ?
(error ? PMSG_RECOVER : PMSG_THAW) : PMSG_RESTORE);
@@ -331,19 +340,33 @@ int hibernation_snapshot(int platform_mode)
* kernel.
*/
-static int resume_target_kernel(void)
+static int resume_target_kernel(bool platform_mode)
{
int error;
device_pm_lock();
- local_irq_disable();
+
error = device_power_down(PMSG_QUIESCE);
if (error) {
printk(KERN_ERR "PM: Some devices failed to power down, "
"aborting resume\n");
- goto Enable_irqs;
+ goto Unlock;
}
- sysdev_suspend(PMSG_QUIESCE);
+
+ error = platform_pre_restore(platform_mode);
+ if (error)
+ goto Cleanup;
+
+ error = disable_nonboot_cpus();
+ if (error)
+ goto Enable_cpus;
+
+ local_irq_disable();
+
+ error = sysdev_suspend(PMSG_QUIESCE);
+ if (error)
+ goto Enable_irqs;
+
/* We'll ignore saved state, but this gets preempt count (etc) right */
save_processor_state();
error = restore_highmem();
@@ -366,11 +389,23 @@ static int resume_target_kernel(void)
swsusp_free();
restore_processor_state();
touch_softlockup_watchdog();
+
sysdev_resume();
- device_power_up(PMSG_RECOVER);
+
Enable_irqs:
local_irq_enable();
+
+ Enable_cpus:
+ enable_nonboot_cpus();
+
+ Cleanup:
+ platform_restore_cleanup(platform_mode);
+
+ device_power_up(PMSG_RECOVER);
+
+ Unlock:
device_pm_unlock();
+
return error;
}
@@ -378,7 +413,7 @@ static int resume_target_kernel(void)
* hibernation_restore - quiesce devices and restore the hibernation
* snapshot image. If successful, control returns in hibernation_snaphot()
* @platform_mode - if set, use the platform driver, if available, to
- * prepare the platform frimware for the transition.
+ * prepare the platform firmware for the transition.
*
* Must be called with pm_mutex held
*/
@@ -390,19 +425,10 @@ int hibernation_restore(int platform_mode)
pm_prepare_console();
suspend_console();
error = device_suspend(PMSG_QUIESCE);
- if (error)
- goto Finish;
-
- error = platform_pre_restore(platform_mode);
if (!error) {
- error = disable_nonboot_cpus();
- if (!error)
- error = resume_target_kernel();
- enable_nonboot_cpus();
+ error = resume_target_kernel(platform_mode);
+ device_resume(PMSG_RECOVER);
}
- platform_restore_cleanup(platform_mode);
- device_resume(PMSG_RECOVER);
- Finish:
resume_console();
pm_restore_console();
return error;
@@ -438,38 +464,46 @@ int hibernation_platform_enter(void)
goto Resume_devices;
}
+ device_pm_lock();
+
+ error = device_power_down(PMSG_HIBERNATE);
+ if (error)
+ goto Unlock;
+
error = hibernation_ops->prepare();
if (error)
- goto Resume_devices;
+ goto Platofrm_finish;
error = disable_nonboot_cpus();
if (error)
- goto Finish;
+ goto Platofrm_finish;
- device_pm_lock();
local_irq_disable();
- error = device_power_down(PMSG_HIBERNATE);
- if (!error) {
- sysdev_suspend(PMSG_HIBERNATE);
- hibernation_ops->enter();
- /* We should never get here */
- while (1);
- }
- local_irq_enable();
- device_pm_unlock();
+ sysdev_suspend(PMSG_HIBERNATE);
+ hibernation_ops->enter();
+ /* We should never get here */
+ while (1);
/*
* We don't need to reenable the nonboot CPUs or resume consoles, since
* the system is going to be halted anyway.
*/
- Finish:
+ Platofrm_finish:
hibernation_ops->finish();
+
+ device_power_up(PMSG_RESTORE);
+
+ Unlock:
+ device_pm_unlock();
+
Resume_devices:
entering_platform_hibernation = false;
device_resume(PMSG_RESTORE);
resume_console();
+
Close:
hibernation_ops->end();
+
return error;
}
@@ -622,32 +656,42 @@ static int software_resume(void)
* here to avoid lockdep complaining.
*/
mutex_lock_nested(&pm_mutex, SINGLE_DEPTH_NESTING);
+
+ if (swsusp_resume_device)
+ goto Check_image;
+
+ if (!strlen(resume_file)) {
+ error = -ENOENT;
+ goto Unlock;
+ }
+
+ pr_debug("PM: Checking image partition %s\n", resume_file);
+
+ /* Check if the device is there */
+ swsusp_resume_device = name_to_dev_t(resume_file);
if (!swsusp_resume_device) {
- if (!strlen(resume_file)) {
- mutex_unlock(&pm_mutex);
- return -ENOENT;
- }
/*
* Some device discovery might still be in progress; we need
* to wait for this to finish.
*/
wait_for_device_probe();
+ /*
+ * We can't depend on SCSI devices being available after loading
+ * one of their modules until scsi_complete_async_scans() is
+ * called and the resume device usually is a SCSI one.
+ */
+ scsi_complete_async_scans();
+
swsusp_resume_device = name_to_dev_t(resume_file);
- pr_debug("PM: Resume from partition %s\n", resume_file);
- } else {
- pr_debug("PM: Resume from partition %d:%d\n",
- MAJOR(swsusp_resume_device),
- MINOR(swsusp_resume_device));
+ if (!swsusp_resume_device) {
+ error = -ENODEV;
+ goto Unlock;
+ }
}
- if (noresume) {
- /**
- * FIXME: If noresume is specified, we need to find the
- * partition and reset it back to normal swap space.
- */
- mutex_unlock(&pm_mutex);
- return 0;
- }
+ Check_image:
+ pr_debug("PM: Resume from partition %d:%d\n",
+ MAJOR(swsusp_resume_device), MINOR(swsusp_resume_device));
pr_debug("PM: Checking hibernation image.\n");
error = swsusp_check();
diff --git a/kernel/power/main.c b/kernel/power/main.c
index c9632f841f64..f99ed6a75eac 100644
--- a/kernel/power/main.c
+++ b/kernel/power/main.c
@@ -287,17 +287,38 @@ void __attribute__ ((weak)) arch_suspend_enable_irqs(void)
*/
static int suspend_enter(suspend_state_t state)
{
- int error = 0;
+ int error;
device_pm_lock();
- arch_suspend_disable_irqs();
- BUG_ON(!irqs_disabled());
- if ((error = device_power_down(PMSG_SUSPEND))) {
+ if (suspend_ops->prepare) {
+ error = suspend_ops->prepare();
+ if (error)
+ goto Done;
+ }
+
+ error = device_power_down(PMSG_SUSPEND);
+ if (error) {
printk(KERN_ERR "PM: Some devices failed to power down\n");
- goto Done;
+ goto Platfrom_finish;
+ }
+
+ if (suspend_ops->prepare_late) {
+ error = suspend_ops->prepare_late();
+ if (error)
+ goto Power_up_devices;
}
+ if (suspend_test(TEST_PLATFORM))
+ goto Platform_wake;
+
+ error = disable_nonboot_cpus();
+ if (error || suspend_test(TEST_CPUS))
+ goto Enable_cpus;
+
+ arch_suspend_disable_irqs();
+ BUG_ON(!irqs_disabled());
+
error = sysdev_suspend(PMSG_SUSPEND);
if (!error) {
if (!suspend_test(TEST_CORE))
@@ -305,11 +326,26 @@ static int suspend_enter(suspend_state_t state)
sysdev_resume();
}
- device_power_up(PMSG_RESUME);
- Done:
arch_suspend_enable_irqs();
BUG_ON(irqs_disabled());
+
+ Enable_cpus:
+ enable_nonboot_cpus();
+
+ Platform_wake:
+ if (suspend_ops->wake)
+ suspend_ops->wake();
+
+ Power_up_devices:
+ device_power_up(PMSG_RESUME);
+
+ Platfrom_finish:
+ if (suspend_ops->finish)
+ suspend_ops->finish();
+
+ Done:
device_pm_unlock();
+
return error;
}
@@ -341,23 +377,8 @@ int suspend_devices_and_enter(suspend_state_t state)
if (suspend_test(TEST_DEVICES))
goto Recover_platform;
- if (suspend_ops->prepare) {
- error = suspend_ops->prepare();
- if (error)
- goto Resume_devices;
- }
-
- if (suspend_test(TEST_PLATFORM))
- goto Finish;
+ suspend_enter(state);
- error = disable_nonboot_cpus();
- if (!error && !suspend_test(TEST_CPUS))
- suspend_enter(state);
-
- enable_nonboot_cpus();
- Finish:
- if (suspend_ops->finish)
- suspend_ops->finish();
Resume_devices:
suspend_test_start();
device_resume(PMSG_RESUME);
diff --git a/kernel/power/snapshot.c b/kernel/power/snapshot.c
index f5fc2d7680f2..33e2e4a819f9 100644
--- a/kernel/power/snapshot.c
+++ b/kernel/power/snapshot.c
@@ -321,13 +321,10 @@ static int create_mem_extents(struct list_head *list, gfp_t gfp_mask)
INIT_LIST_HEAD(list);
- for_each_zone(zone) {
+ for_each_populated_zone(zone) {
unsigned long zone_start, zone_end;
struct mem_extent *ext, *cur, *aux;
- if (!populated_zone(zone))
- continue;
-
zone_start = zone->zone_start_pfn;
zone_end = zone->zone_start_pfn + zone->spanned_pages;
@@ -804,8 +801,8 @@ static unsigned int count_free_highmem_pages(void)
struct zone *zone;
unsigned int cnt = 0;
- for_each_zone(zone)
- if (populated_zone(zone) && is_highmem(zone))
+ for_each_populated_zone(zone)
+ if (is_highmem(zone))
cnt += zone_page_state(zone, NR_FREE_PAGES);
return cnt;
diff --git a/kernel/power/swap.c b/kernel/power/swap.c
index 505f319e489c..8ba052c86d48 100644
--- a/kernel/power/swap.c
+++ b/kernel/power/swap.c
@@ -64,8 +64,6 @@ static int submit(int rw, pgoff_t page_off, struct page *page,
struct bio *bio;
bio = bio_alloc(__GFP_WAIT | __GFP_HIGH, 1);
- if (!bio)
- return -ENOMEM;
bio->bi_sector = page_off * (PAGE_SIZE >> 9);
bio->bi_bdev = resume_bdev;
bio->bi_end_io = end_swap_bio_read;
diff --git a/kernel/power/swsusp.c b/kernel/power/swsusp.c
index a92c91451559..78c35047586d 100644
--- a/kernel/power/swsusp.c
+++ b/kernel/power/swsusp.c
@@ -51,6 +51,7 @@
#include <linux/highmem.h>
#include <linux/time.h>
#include <linux/rbtree.h>
+#include <linux/io.h>
#include "power.h"
@@ -229,17 +230,16 @@ int swsusp_shrink_memory(void)
size = count_data_pages() + PAGES_FOR_IO + SPARE_PAGES;
tmp = size;
size += highmem_size;
- for_each_zone (zone)
- if (populated_zone(zone)) {
- tmp += snapshot_additional_pages(zone);
- if (is_highmem(zone)) {
- highmem_size -=
+ for_each_populated_zone(zone) {
+ tmp += snapshot_additional_pages(zone);
+ if (is_highmem(zone)) {
+ highmem_size -=
zone_page_state(zone, NR_FREE_PAGES);
- } else {
- tmp -= zone_page_state(zone, NR_FREE_PAGES);
- tmp += zone->lowmem_reserve[ZONE_NORMAL];
- }
+ } else {
+ tmp -= zone_page_state(zone, NR_FREE_PAGES);
+ tmp += zone->lowmem_reserve[ZONE_NORMAL];
}
+ }
if (highmem_size < 0)
highmem_size = 0;
diff --git a/kernel/power/user.c b/kernel/power/user.c
index 6c85359364f2..ed97375daae9 100644
--- a/kernel/power/user.c
+++ b/kernel/power/user.c
@@ -24,6 +24,7 @@
#include <linux/cpu.h>
#include <linux/freezer.h>
#include <linux/smp_lock.h>
+#include <scsi/scsi_scan.h>
#include <asm/uaccess.h>
@@ -92,6 +93,7 @@ static int snapshot_open(struct inode *inode, struct file *filp)
filp->private_data = data;
memset(&data->handle, 0, sizeof(struct snapshot_handle));
if ((filp->f_flags & O_ACCMODE) == O_RDONLY) {
+ /* Hibernating. The image device should be accessible. */
data->swap = swsusp_resume_device ?
swap_type_of(swsusp_resume_device, 0, NULL) : -1;
data->mode = O_RDONLY;
@@ -99,6 +101,13 @@ static int snapshot_open(struct inode *inode, struct file *filp)
if (error)
pm_notifier_call_chain(PM_POST_HIBERNATION);
} else {
+ /*
+ * Resuming. We may need to wait for the image device to
+ * appear.
+ */
+ wait_for_device_probe();
+ scsi_complete_async_scans();
+
data->swap = -1;
data->mode = O_WRONLY;
error = pm_notifier_call_chain(PM_RESTORE_PREPARE);
diff --git a/kernel/printk.c b/kernel/printk.c
index e3602d0755b0..5052b5497c67 100644
--- a/kernel/printk.c
+++ b/kernel/printk.c
@@ -32,6 +32,7 @@
#include <linux/security.h>
#include <linux/bootmem.h>
#include <linux/syscalls.h>
+#include <linux/kexec.h>
#include <asm/uaccess.h>
@@ -135,6 +136,24 @@ static char *log_buf = __log_buf;
static int log_buf_len = __LOG_BUF_LEN;
static unsigned logged_chars; /* Number of chars produced since last read+clear operation */
+#ifdef CONFIG_KEXEC
+/*
+ * This appends the listed symbols to /proc/vmcoreinfo
+ *
+ * /proc/vmcoreinfo is used by various utiilties, like crash and makedumpfile to
+ * obtain access to symbols that are otherwise very difficult to locate. These
+ * symbols are specifically used so that utilities can access and extract the
+ * dmesg log from a vmcore file after a crash.
+ */
+void log_buf_kexec_setup(void)
+{
+ VMCOREINFO_SYMBOL(log_buf);
+ VMCOREINFO_SYMBOL(log_end);
+ VMCOREINFO_SYMBOL(log_buf_len);
+ VMCOREINFO_SYMBOL(logged_chars);
+}
+#endif
+
static int __init log_buf_len_setup(char *str)
{
unsigned size = memparse(str, &str);
@@ -1292,8 +1311,11 @@ EXPORT_SYMBOL(printk_ratelimit);
bool printk_timed_ratelimit(unsigned long *caller_jiffies,
unsigned int interval_msecs)
{
- if (*caller_jiffies == 0 || time_after(jiffies, *caller_jiffies)) {
- *caller_jiffies = jiffies + msecs_to_jiffies(interval_msecs);
+ if (*caller_jiffies == 0
+ || !time_in_range(jiffies, *caller_jiffies,
+ *caller_jiffies
+ + msecs_to_jiffies(interval_msecs))) {
+ *caller_jiffies = jiffies;
return true;
}
return false;
diff --git a/kernel/ptrace.c b/kernel/ptrace.c
index c9cf48b21f05..0692ab5a0d67 100644
--- a/kernel/ptrace.c
+++ b/kernel/ptrace.c
@@ -21,9 +21,7 @@
#include <linux/audit.h>
#include <linux/pid_namespace.h>
#include <linux/syscalls.h>
-
-#include <asm/pgtable.h>
-#include <asm/uaccess.h>
+#include <linux/uaccess.h>
/*
@@ -48,7 +46,7 @@ void __ptrace_link(struct task_struct *child, struct task_struct *new_parent)
list_add(&child->ptrace_entry, &new_parent->ptraced);
child->parent = new_parent;
}
-
+
/*
* Turn a tracing stop into a normal stop now, since with no tracer there
* would be no way to wake it up with SIGCONT or SIGKILL. If there was a
@@ -60,11 +58,15 @@ static void ptrace_untrace(struct task_struct *child)
{
spin_lock(&child->sighand->siglock);
if (task_is_traced(child)) {
- if (child->signal->flags & SIGNAL_STOP_STOPPED) {
+ /*
+ * If the group stop is completed or in progress,
+ * this thread was already counted as stopped.
+ */
+ if (child->signal->flags & SIGNAL_STOP_STOPPED ||
+ child->signal->group_stop_count)
__set_task_state(child, TASK_STOPPED);
- } else {
+ else
signal_wake_up(child, 1);
- }
}
spin_unlock(&child->sighand->siglock);
}
@@ -169,7 +171,7 @@ bool ptrace_may_access(struct task_struct *task, unsigned int mode)
task_lock(task);
err = __ptrace_may_access(task, mode);
task_unlock(task);
- return (!err ? true : false);
+ return !err;
}
int ptrace_attach(struct task_struct *task)
@@ -186,7 +188,7 @@ int ptrace_attach(struct task_struct *task)
/* Protect exec's credential calculations against our interference;
* SUID, SGID and LSM creds get determined differently under ptrace.
*/
- retval = mutex_lock_interruptible(&current->cred_exec_mutex);
+ retval = mutex_lock_interruptible(&task->cred_exec_mutex);
if (retval < 0)
goto out;
@@ -230,23 +232,63 @@ repeat:
bad:
write_unlock_irqrestore(&tasklist_lock, flags);
task_unlock(task);
- mutex_unlock(&current->cred_exec_mutex);
+ mutex_unlock(&task->cred_exec_mutex);
out:
return retval;
}
-static inline void __ptrace_detach(struct task_struct *child, unsigned int data)
+/*
+ * Called with irqs disabled, returns true if childs should reap themselves.
+ */
+static int ignoring_children(struct sighand_struct *sigh)
{
- child->exit_code = data;
- /* .. re-parent .. */
- __ptrace_unlink(child);
- /* .. and wake it up. */
- if (child->exit_state != EXIT_ZOMBIE)
- wake_up_process(child);
+ int ret;
+ spin_lock(&sigh->siglock);
+ ret = (sigh->action[SIGCHLD-1].sa.sa_handler == SIG_IGN) ||
+ (sigh->action[SIGCHLD-1].sa.sa_flags & SA_NOCLDWAIT);
+ spin_unlock(&sigh->siglock);
+ return ret;
+}
+
+/*
+ * Called with tasklist_lock held for writing.
+ * Unlink a traced task, and clean it up if it was a traced zombie.
+ * Return true if it needs to be reaped with release_task().
+ * (We can't call release_task() here because we already hold tasklist_lock.)
+ *
+ * If it's a zombie, our attachedness prevented normal parent notification
+ * or self-reaping. Do notification now if it would have happened earlier.
+ * If it should reap itself, return true.
+ *
+ * If it's our own child, there is no notification to do.
+ * But if our normal children self-reap, then this child
+ * was prevented by ptrace and we must reap it now.
+ */
+static bool __ptrace_detach(struct task_struct *tracer, struct task_struct *p)
+{
+ __ptrace_unlink(p);
+
+ if (p->exit_state == EXIT_ZOMBIE) {
+ if (!task_detached(p) && thread_group_empty(p)) {
+ if (!same_thread_group(p->real_parent, tracer))
+ do_notify_parent(p, p->exit_signal);
+ else if (ignoring_children(tracer->sighand))
+ p->exit_signal = -1;
+ }
+ if (task_detached(p)) {
+ /* Mark it as in the process of being reaped. */
+ p->exit_state = EXIT_DEAD;
+ return true;
+ }
+ }
+
+ return false;
}
int ptrace_detach(struct task_struct *child, unsigned int data)
{
+ bool dead = false;
+
if (!valid_signal(data))
return -EIO;
@@ -255,14 +297,45 @@ int ptrace_detach(struct task_struct *child, unsigned int data)
clear_tsk_thread_flag(child, TIF_SYSCALL_TRACE);
write_lock_irq(&tasklist_lock);
- /* protect against de_thread()->release_task() */
- if (child->ptrace)
- __ptrace_detach(child, data);
+ /*
+ * This child can be already killed. Make sure de_thread() or
+ * our sub-thread doing do_wait() didn't do release_task() yet.
+ */
+ if (child->ptrace) {
+ child->exit_code = data;
+ dead = __ptrace_detach(current, child);
+ }
write_unlock_irq(&tasklist_lock);
+ if (unlikely(dead))
+ release_task(child);
+
return 0;
}
+/*
+ * Detach all tasks we were using ptrace on.
+ */
+void exit_ptrace(struct task_struct *tracer)
+{
+ struct task_struct *p, *n;
+ LIST_HEAD(ptrace_dead);
+
+ write_lock_irq(&tasklist_lock);
+ list_for_each_entry_safe(p, n, &tracer->ptraced, ptrace_entry) {
+ if (__ptrace_detach(tracer, p))
+ list_add(&p->ptrace_entry, &ptrace_dead);
+ }
+ write_unlock_irq(&tasklist_lock);
+
+ BUG_ON(!list_empty(&tracer->ptraced));
+
+ list_for_each_entry_safe(p, n, &ptrace_dead, ptrace_entry) {
+ list_del_init(&p->ptrace_entry);
+ release_task(p);
+ }
+}
+
int ptrace_readdata(struct task_struct *tsk, unsigned long src, char __user *dst, int len)
{
int copied = 0;
@@ -283,7 +356,7 @@ int ptrace_readdata(struct task_struct *tsk, unsigned long src, char __user *dst
copied += retval;
src += retval;
dst += retval;
- len -= retval;
+ len -= retval;
}
return copied;
}
@@ -308,7 +381,7 @@ int ptrace_writedata(struct task_struct *tsk, char __user *src, unsigned long ds
copied += retval;
src += retval;
dst += retval;
- len -= retval;
+ len -= retval;
}
return copied;
}
@@ -421,9 +494,9 @@ static int ptrace_resume(struct task_struct *child, long request, long data)
if (unlikely(!arch_has_single_step()))
return -EIO;
user_enable_single_step(child);
- }
- else
+ } else {
user_disable_single_step(child);
+ }
child->exit_code = data;
wake_up_process(child);
@@ -531,10 +604,11 @@ repeat:
ret = security_ptrace_traceme(current->parent);
/*
- * Set the ptrace bit in the process ptrace flags.
- * Then link us on our parent's ptraced list.
+ * Check PF_EXITING to ensure ->real_parent has not passed
+ * exit_ptrace(). Otherwise we don't report the error but
+ * pretend ->real_parent untraces us right after return.
*/
- if (!ret) {
+ if (!ret && !(current->real_parent->flags & PF_EXITING)) {
current->ptrace |= PT_PTRACED;
__ptrace_link(current, current->real_parent);
}
@@ -612,8 +686,6 @@ SYSCALL_DEFINE4(ptrace, long, request, long, pid, long, addr, long, data)
goto out_put_task_struct;
ret = arch_ptrace(child, request, addr, data);
- if (ret < 0)
- goto out_put_task_struct;
out_put_task_struct:
put_task_struct(child);
diff --git a/kernel/rcuclassic.c b/kernel/rcuclassic.c
index 654c640a6b9c..0f2b0b311304 100644
--- a/kernel/rcuclassic.c
+++ b/kernel/rcuclassic.c
@@ -65,6 +65,7 @@ static struct rcu_ctrlblk rcu_ctrlblk = {
.lock = __SPIN_LOCK_UNLOCKED(&rcu_ctrlblk.lock),
.cpumask = CPU_BITS_NONE,
};
+
static struct rcu_ctrlblk rcu_bh_ctrlblk = {
.cur = -300,
.completed = -300,
@@ -73,8 +74,26 @@ static struct rcu_ctrlblk rcu_bh_ctrlblk = {
.cpumask = CPU_BITS_NONE,
};
-DEFINE_PER_CPU(struct rcu_data, rcu_data) = { 0L };
-DEFINE_PER_CPU(struct rcu_data, rcu_bh_data) = { 0L };
+static DEFINE_PER_CPU(struct rcu_data, rcu_data);
+static DEFINE_PER_CPU(struct rcu_data, rcu_bh_data);
+
+/*
+ * Increment the quiescent state counter.
+ * The counter is a bit degenerated: We do not need to know
+ * how many quiescent states passed, just if there was at least
+ * one since the start of the grace period. Thus just a flag.
+ */
+void rcu_qsctr_inc(int cpu)
+{
+ struct rcu_data *rdp = &per_cpu(rcu_data, cpu);
+ rdp->passed_quiesc = 1;
+}
+
+void rcu_bh_qsctr_inc(int cpu)
+{
+ struct rcu_data *rdp = &per_cpu(rcu_bh_data, cpu);
+ rdp->passed_quiesc = 1;
+}
static int blimit = 10;
static int qhimark = 10000;
diff --git a/kernel/rcupdate.c b/kernel/rcupdate.c
index cae8a059cf47..a967c9feb90a 100644
--- a/kernel/rcupdate.c
+++ b/kernel/rcupdate.c
@@ -58,6 +58,10 @@ static DEFINE_MUTEX(rcu_barrier_mutex);
static struct completion rcu_barrier_completion;
int rcu_scheduler_active __read_mostly;
+static atomic_t rcu_migrate_type_count = ATOMIC_INIT(0);
+static struct rcu_head rcu_migrate_head[3];
+static DECLARE_WAIT_QUEUE_HEAD(rcu_migrate_wq);
+
/*
* Awaken the corresponding synchronize_rcu() instance now that a
* grace period has elapsed.
@@ -122,6 +126,11 @@ static void rcu_barrier_func(void *type)
}
}
+static inline void wait_migrated_callbacks(void)
+{
+ wait_event(rcu_migrate_wq, !atomic_read(&rcu_migrate_type_count));
+}
+
/*
* Orchestrate the specified type of RCU barrier, waiting for all
* RCU callbacks of the specified type to complete.
@@ -147,6 +156,7 @@ static void _rcu_barrier(enum rcu_barrier type)
complete(&rcu_barrier_completion);
wait_for_completion(&rcu_barrier_completion);
mutex_unlock(&rcu_barrier_mutex);
+ wait_migrated_callbacks();
}
/**
@@ -176,9 +186,41 @@ void rcu_barrier_sched(void)
}
EXPORT_SYMBOL_GPL(rcu_barrier_sched);
+static void rcu_migrate_callback(struct rcu_head *notused)
+{
+ if (atomic_dec_and_test(&rcu_migrate_type_count))
+ wake_up(&rcu_migrate_wq);
+}
+
+static int __cpuinit rcu_barrier_cpu_hotplug(struct notifier_block *self,
+ unsigned long action, void *hcpu)
+{
+ if (action == CPU_DYING) {
+ /*
+ * preempt_disable() in on_each_cpu() prevents stop_machine(),
+ * so when "on_each_cpu(rcu_barrier_func, (void *)type, 1);"
+ * returns, all online cpus have queued rcu_barrier_func(),
+ * and the dead cpu(if it exist) queues rcu_migrate_callback()s.
+ *
+ * These callbacks ensure _rcu_barrier() waits for all
+ * RCU callbacks of the specified type to complete.
+ */
+ atomic_set(&rcu_migrate_type_count, 3);
+ call_rcu_bh(rcu_migrate_head, rcu_migrate_callback);
+ call_rcu_sched(rcu_migrate_head + 1, rcu_migrate_callback);
+ call_rcu(rcu_migrate_head + 2, rcu_migrate_callback);
+ } else if (action == CPU_POST_DEAD) {
+ /* rcu_migrate_head is protected by cpu_add_remove_lock */
+ wait_migrated_callbacks();
+ }
+
+ return NOTIFY_OK;
+}
+
void __init rcu_init(void)
{
__rcu_init();
+ hotcpu_notifier(rcu_barrier_cpu_hotplug, 0);
}
void rcu_scheduler_starting(void)
diff --git a/kernel/rcupreempt.c b/kernel/rcupreempt.c
index 5d59e850fb71..ce97a4df64d3 100644
--- a/kernel/rcupreempt.c
+++ b/kernel/rcupreempt.c
@@ -147,7 +147,51 @@ struct rcu_ctrlblk {
wait_queue_head_t sched_wq; /* Place for rcu_sched to sleep. */
};
+struct rcu_dyntick_sched {
+ int dynticks;
+ int dynticks_snap;
+ int sched_qs;
+ int sched_qs_snap;
+ int sched_dynticks_snap;
+};
+
+static DEFINE_PER_CPU_SHARED_ALIGNED(struct rcu_dyntick_sched, rcu_dyntick_sched) = {
+ .dynticks = 1,
+};
+
+void rcu_qsctr_inc(int cpu)
+{
+ struct rcu_dyntick_sched *rdssp = &per_cpu(rcu_dyntick_sched, cpu);
+
+ rdssp->sched_qs++;
+}
+
+#ifdef CONFIG_NO_HZ
+
+void rcu_enter_nohz(void)
+{
+ static DEFINE_RATELIMIT_STATE(rs, 10 * HZ, 1);
+
+ smp_mb(); /* CPUs seeing ++ must see prior RCU read-side crit sects */
+ __get_cpu_var(rcu_dyntick_sched).dynticks++;
+ WARN_ON_RATELIMIT(__get_cpu_var(rcu_dyntick_sched).dynticks & 0x1, &rs);
+}
+
+void rcu_exit_nohz(void)
+{
+ static DEFINE_RATELIMIT_STATE(rs, 10 * HZ, 1);
+
+ __get_cpu_var(rcu_dyntick_sched).dynticks++;
+ smp_mb(); /* CPUs seeing ++ must see later RCU read-side crit sects */
+ WARN_ON_RATELIMIT(!(__get_cpu_var(rcu_dyntick_sched).dynticks & 0x1),
+ &rs);
+}
+
+#endif /* CONFIG_NO_HZ */
+
+
static DEFINE_PER_CPU(struct rcu_data, rcu_data);
+
static struct rcu_ctrlblk rcu_ctrlblk = {
.fliplock = __SPIN_LOCK_UNLOCKED(rcu_ctrlblk.fliplock),
.completed = 0,
@@ -427,10 +471,6 @@ static void __rcu_advance_callbacks(struct rcu_data *rdp)
}
}
-DEFINE_PER_CPU_SHARED_ALIGNED(struct rcu_dyntick_sched, rcu_dyntick_sched) = {
- .dynticks = 1,
-};
-
#ifdef CONFIG_NO_HZ
static DEFINE_PER_CPU(int, rcu_update_flag);
diff --git a/kernel/rcutorture.c b/kernel/rcutorture.c
index 7c4142a79f0a..9b4a975a4b4a 100644
--- a/kernel/rcutorture.c
+++ b/kernel/rcutorture.c
@@ -126,6 +126,7 @@ static atomic_t n_rcu_torture_mberror;
static atomic_t n_rcu_torture_error;
static long n_rcu_torture_timers = 0;
static struct list_head rcu_torture_removed;
+static cpumask_var_t shuffle_tmp_mask;
static int stutter_pause_test = 0;
@@ -889,10 +890,9 @@ static int rcu_idle_cpu; /* Force all torture tasks off this CPU */
*/
static void rcu_torture_shuffle_tasks(void)
{
- cpumask_t tmp_mask;
int i;
- cpus_setall(tmp_mask);
+ cpumask_setall(shuffle_tmp_mask);
get_online_cpus();
/* No point in shuffling if there is only one online CPU (ex: UP) */
@@ -902,29 +902,29 @@ static void rcu_torture_shuffle_tasks(void)
}
if (rcu_idle_cpu != -1)
- cpu_clear(rcu_idle_cpu, tmp_mask);
+ cpumask_clear_cpu(rcu_idle_cpu, shuffle_tmp_mask);
- set_cpus_allowed_ptr(current, &tmp_mask);
+ set_cpus_allowed_ptr(current, shuffle_tmp_mask);
if (reader_tasks) {
for (i = 0; i < nrealreaders; i++)
if (reader_tasks[i])
set_cpus_allowed_ptr(reader_tasks[i],
- &tmp_mask);
+ shuffle_tmp_mask);
}
if (fakewriter_tasks) {
for (i = 0; i < nfakewriters; i++)
if (fakewriter_tasks[i])
set_cpus_allowed_ptr(fakewriter_tasks[i],
- &tmp_mask);
+ shuffle_tmp_mask);
}
if (writer_task)
- set_cpus_allowed_ptr(writer_task, &tmp_mask);
+ set_cpus_allowed_ptr(writer_task, shuffle_tmp_mask);
if (stats_task)
- set_cpus_allowed_ptr(stats_task, &tmp_mask);
+ set_cpus_allowed_ptr(stats_task, shuffle_tmp_mask);
if (rcu_idle_cpu == -1)
rcu_idle_cpu = num_online_cpus() - 1;
@@ -1012,6 +1012,7 @@ rcu_torture_cleanup(void)
if (shuffler_task) {
VERBOSE_PRINTK_STRING("Stopping rcu_torture_shuffle task");
kthread_stop(shuffler_task);
+ free_cpumask_var(shuffle_tmp_mask);
}
shuffler_task = NULL;
@@ -1190,10 +1191,18 @@ rcu_torture_init(void)
}
if (test_no_idle_hz) {
rcu_idle_cpu = num_online_cpus() - 1;
+
+ if (!alloc_cpumask_var(&shuffle_tmp_mask, GFP_KERNEL)) {
+ firsterr = -ENOMEM;
+ VERBOSE_PRINTK_ERRSTRING("Failed to alloc mask");
+ goto unwind;
+ }
+
/* Create the shuffler thread */
shuffler_task = kthread_run(rcu_torture_shuffle, NULL,
"rcu_torture_shuffle");
if (IS_ERR(shuffler_task)) {
+ free_cpumask_var(shuffle_tmp_mask);
firsterr = PTR_ERR(shuffler_task);
VERBOSE_PRINTK_ERRSTRING("Failed to create shuffler");
shuffler_task = NULL;
diff --git a/kernel/rcutree.c b/kernel/rcutree.c
index 97ce31579ec0..d2a372fb0b9b 100644
--- a/kernel/rcutree.c
+++ b/kernel/rcutree.c
@@ -78,6 +78,26 @@ DEFINE_PER_CPU(struct rcu_data, rcu_data);
struct rcu_state rcu_bh_state = RCU_STATE_INITIALIZER(rcu_bh_state);
DEFINE_PER_CPU(struct rcu_data, rcu_bh_data);
+/*
+ * Increment the quiescent state counter.
+ * The counter is a bit degenerated: We do not need to know
+ * how many quiescent states passed, just if there was at least
+ * one since the start of the grace period. Thus just a flag.
+ */
+void rcu_qsctr_inc(int cpu)
+{
+ struct rcu_data *rdp = &per_cpu(rcu_data, cpu);
+ rdp->passed_quiesc = 1;
+ rdp->passed_quiesc_completed = rdp->completed;
+}
+
+void rcu_bh_qsctr_inc(int cpu)
+{
+ struct rcu_data *rdp = &per_cpu(rcu_bh_data, cpu);
+ rdp->passed_quiesc = 1;
+ rdp->passed_quiesc_completed = rdp->completed;
+}
+
#ifdef CONFIG_NO_HZ
DEFINE_PER_CPU(struct rcu_dynticks, rcu_dynticks) = {
.dynticks_nesting = 1,
@@ -510,8 +530,6 @@ static void note_new_gpnum(struct rcu_state *rsp, struct rcu_data *rdp)
rdp->qs_pending = 1;
rdp->passed_quiesc = 0;
rdp->gpnum = rsp->gpnum;
- rdp->n_rcu_pending_force_qs = rdp->n_rcu_pending +
- RCU_JIFFIES_TILL_FORCE_QS;
}
/*
@@ -558,8 +576,6 @@ rcu_start_gp(struct rcu_state *rsp, unsigned long flags)
rsp->gpnum++;
rsp->signaled = RCU_GP_INIT; /* Hold off force_quiescent_state. */
rsp->jiffies_force_qs = jiffies + RCU_JIFFIES_TILL_FORCE_QS;
- rdp->n_rcu_pending_force_qs = rdp->n_rcu_pending +
- RCU_JIFFIES_TILL_FORCE_QS;
record_gp_stall_check_time(rsp);
dyntick_record_completed(rsp, rsp->completed - 1);
note_new_gpnum(rsp, rdp);
@@ -1035,7 +1051,6 @@ static void force_quiescent_state(struct rcu_state *rsp, int relaxed)
{
unsigned long flags;
long lastcomp;
- struct rcu_data *rdp = rsp->rda[smp_processor_id()];
struct rcu_node *rnp = rcu_get_root(rsp);
u8 signaled;
@@ -1046,16 +1061,13 @@ static void force_quiescent_state(struct rcu_state *rsp, int relaxed)
return; /* Someone else is already on the job. */
}
if (relaxed &&
- (long)(rsp->jiffies_force_qs - jiffies) >= 0 &&
- (rdp->n_rcu_pending_force_qs - rdp->n_rcu_pending) >= 0)
+ (long)(rsp->jiffies_force_qs - jiffies) >= 0)
goto unlock_ret; /* no emergency and done recently. */
rsp->n_force_qs++;
spin_lock(&rnp->lock);
lastcomp = rsp->completed;
signaled = rsp->signaled;
rsp->jiffies_force_qs = jiffies + RCU_JIFFIES_TILL_FORCE_QS;
- rdp->n_rcu_pending_force_qs = rdp->n_rcu_pending +
- RCU_JIFFIES_TILL_FORCE_QS;
if (lastcomp == rsp->gpnum) {
rsp->n_force_qs_ngp++;
spin_unlock(&rnp->lock);
@@ -1124,8 +1136,7 @@ __rcu_process_callbacks(struct rcu_state *rsp, struct rcu_data *rdp)
* If an RCU GP has gone long enough, go check for dyntick
* idle CPUs and, if needed, send resched IPIs.
*/
- if ((long)(ACCESS_ONCE(rsp->jiffies_force_qs) - jiffies) < 0 ||
- (rdp->n_rcu_pending_force_qs - rdp->n_rcu_pending) < 0)
+ if ((long)(ACCESS_ONCE(rsp->jiffies_force_qs) - jiffies) < 0)
force_quiescent_state(rsp, 1);
/*
@@ -1210,8 +1221,7 @@ __call_rcu(struct rcu_head *head, void (*func)(struct rcu_head *rcu),
if (unlikely(++rdp->qlen > qhimark)) {
rdp->blimit = LONG_MAX;
force_quiescent_state(rsp, 0);
- } else if ((long)(ACCESS_ONCE(rsp->jiffies_force_qs) - jiffies) < 0 ||
- (rdp->n_rcu_pending_force_qs - rdp->n_rcu_pending) < 0)
+ } else if ((long)(ACCESS_ONCE(rsp->jiffies_force_qs) - jiffies) < 0)
force_quiescent_state(rsp, 1);
local_irq_restore(flags);
}
@@ -1270,8 +1280,7 @@ static int __rcu_pending(struct rcu_state *rsp, struct rcu_data *rdp)
/* Has an RCU GP gone long enough to send resched IPIs &c? */
if (ACCESS_ONCE(rsp->completed) != ACCESS_ONCE(rsp->gpnum) &&
- ((long)(ACCESS_ONCE(rsp->jiffies_force_qs) - jiffies) < 0 ||
- (rdp->n_rcu_pending_force_qs - rdp->n_rcu_pending) < 0))
+ ((long)(ACCESS_ONCE(rsp->jiffies_force_qs) - jiffies) < 0))
return 1;
/* nothing to do */
diff --git a/kernel/rcutree.h b/kernel/rcutree.h
new file mode 100644
index 000000000000..5e872bbf07f5
--- /dev/null
+++ b/kernel/rcutree.h
@@ -0,0 +1,10 @@
+
+/*
+ * RCU implementation internal declarations:
+ */
+extern struct rcu_state rcu_state;
+DECLARE_PER_CPU(struct rcu_data, rcu_data);
+
+extern struct rcu_state rcu_bh_state;
+DECLARE_PER_CPU(struct rcu_data, rcu_bh_data);
+
diff --git a/kernel/rcutree_trace.c b/kernel/rcutree_trace.c
index d6db3e837826..4b1875ba9404 100644
--- a/kernel/rcutree_trace.c
+++ b/kernel/rcutree_trace.c
@@ -43,18 +43,18 @@
#include <linux/debugfs.h>
#include <linux/seq_file.h>
+#include "rcutree.h"
+
static void print_one_rcu_data(struct seq_file *m, struct rcu_data *rdp)
{
if (!rdp->beenonline)
return;
- seq_printf(m, "%3d%cc=%ld g=%ld pq=%d pqc=%ld qp=%d rpfq=%ld rp=%x",
+ seq_printf(m, "%3d%cc=%ld g=%ld pq=%d pqc=%ld qp=%d",
rdp->cpu,
cpu_is_offline(rdp->cpu) ? '!' : ' ',
rdp->completed, rdp->gpnum,
rdp->passed_quiesc, rdp->passed_quiesc_completed,
- rdp->qs_pending,
- rdp->n_rcu_pending_force_qs - rdp->n_rcu_pending,
- (int)(rdp->n_rcu_pending & 0xffff));
+ rdp->qs_pending);
#ifdef CONFIG_NO_HZ
seq_printf(m, " dt=%d/%d dn=%d df=%lu",
rdp->dynticks->dynticks,
@@ -100,14 +100,12 @@ static void print_one_rcu_data_csv(struct seq_file *m, struct rcu_data *rdp)
{
if (!rdp->beenonline)
return;
- seq_printf(m, "%d,%s,%ld,%ld,%d,%ld,%d,%ld,%ld",
+ seq_printf(m, "%d,%s,%ld,%ld,%d,%ld,%d",
rdp->cpu,
cpu_is_offline(rdp->cpu) ? "\"Y\"" : "\"N\"",
rdp->completed, rdp->gpnum,
rdp->passed_quiesc, rdp->passed_quiesc_completed,
- rdp->qs_pending,
- rdp->n_rcu_pending_force_qs - rdp->n_rcu_pending,
- rdp->n_rcu_pending);
+ rdp->qs_pending);
#ifdef CONFIG_NO_HZ
seq_printf(m, ",%d,%d,%d,%lu",
rdp->dynticks->dynticks,
@@ -121,7 +119,7 @@ static void print_one_rcu_data_csv(struct seq_file *m, struct rcu_data *rdp)
static int show_rcudata_csv(struct seq_file *m, void *unused)
{
- seq_puts(m, "\"CPU\",\"Online?\",\"c\",\"g\",\"pq\",\"pqc\",\"pq\",\"rpfq\",\"rp\",");
+ seq_puts(m, "\"CPU\",\"Online?\",\"c\",\"g\",\"pq\",\"pqc\",\"pq\",");
#ifdef CONFIG_NO_HZ
seq_puts(m, "\"dt\",\"dt nesting\",\"dn\",\"df\",");
#endif /* #ifdef CONFIG_NO_HZ */
diff --git a/kernel/relay.c b/kernel/relay.c
index 8f2179c8056f..bc188549788f 100644
--- a/kernel/relay.c
+++ b/kernel/relay.c
@@ -677,9 +677,7 @@ int relay_late_setup_files(struct rchan *chan,
*/
for_each_online_cpu(i) {
if (unlikely(!chan->buf[i])) {
- printk(KERN_ERR "relay_late_setup_files: CPU %u "
- "has no buffer, it must have!\n", i);
- BUG();
+ WARN_ONCE(1, KERN_ERR "CPU has no buffer!\n");
err = -EINVAL;
break;
}
@@ -797,13 +795,15 @@ void relay_subbufs_consumed(struct rchan *chan,
if (!chan)
return;
- if (cpu >= NR_CPUS || !chan->buf[cpu])
+ if (cpu >= NR_CPUS || !chan->buf[cpu] ||
+ subbufs_consumed > chan->n_subbufs)
return;
buf = chan->buf[cpu];
- buf->subbufs_consumed += subbufs_consumed;
- if (buf->subbufs_consumed > buf->subbufs_produced)
+ if (subbufs_consumed > buf->subbufs_produced - buf->subbufs_consumed)
buf->subbufs_consumed = buf->subbufs_produced;
+ else
+ buf->subbufs_consumed += subbufs_consumed;
}
EXPORT_SYMBOL_GPL(relay_subbufs_consumed);
diff --git a/kernel/resource.c b/kernel/resource.c
index fd5d7d574bb9..ac5f3a36923f 100644
--- a/kernel/resource.c
+++ b/kernel/resource.c
@@ -533,43 +533,21 @@ static void __init __reserve_region_with_split(struct resource *root,
res->end = end;
res->flags = IORESOURCE_BUSY;
- for (;;) {
- conflict = __request_resource(parent, res);
- if (!conflict)
- break;
- if (conflict != parent) {
- parent = conflict;
- if (!(conflict->flags & IORESOURCE_BUSY))
- continue;
- }
-
- /* Uhhuh, that didn't work out.. */
- kfree(res);
- res = NULL;
- break;
- }
-
- if (!res) {
- /* failed, split and try again */
-
- /* conflict covered whole area */
- if (conflict->start <= start && conflict->end >= end)
- return;
+ conflict = __request_resource(parent, res);
+ if (!conflict)
+ return;
- if (conflict->start > start)
- __reserve_region_with_split(root, start, conflict->start-1, name);
- if (!(conflict->flags & IORESOURCE_BUSY)) {
- resource_size_t common_start, common_end;
+ /* failed, split and try again */
+ kfree(res);
- common_start = max(conflict->start, start);
- common_end = min(conflict->end, end);
- if (common_start < common_end)
- __reserve_region_with_split(root, common_start, common_end, name);
- }
- if (conflict->end < end)
- __reserve_region_with_split(root, conflict->end+1, end, name);
- }
+ /* conflict covered whole area */
+ if (conflict->start <= start && conflict->end >= end)
+ return;
+ if (conflict->start > start)
+ __reserve_region_with_split(root, start, conflict->start-1, name);
+ if (conflict->end < end)
+ __reserve_region_with_split(root, conflict->end+1, end, name);
}
void __init reserve_region_with_split(struct resource *root,
diff --git a/kernel/sched.c b/kernel/sched.c
index f4c413bdd38d..26efa475bdc1 100644
--- a/kernel/sched.c
+++ b/kernel/sched.c
@@ -231,13 +231,20 @@ static void start_rt_bandwidth(struct rt_bandwidth *rt_b)
spin_lock(&rt_b->rt_runtime_lock);
for (;;) {
+ unsigned long delta;
+ ktime_t soft, hard;
+
if (hrtimer_active(&rt_b->rt_period_timer))
break;
now = hrtimer_cb_get_time(&rt_b->rt_period_timer);
hrtimer_forward(&rt_b->rt_period_timer, now, rt_b->rt_period);
- hrtimer_start_expires(&rt_b->rt_period_timer,
- HRTIMER_MODE_ABS);
+
+ soft = hrtimer_get_softexpires(&rt_b->rt_period_timer);
+ hard = hrtimer_get_expires(&rt_b->rt_period_timer);
+ delta = ktime_to_ns(ktime_sub(hard, soft));
+ __hrtimer_start_range_ns(&rt_b->rt_period_timer, soft, delta,
+ HRTIMER_MODE_ABS, 0);
}
spin_unlock(&rt_b->rt_runtime_lock);
}
@@ -1110,7 +1117,7 @@ static void hrtick_start(struct rq *rq, u64 delay)
if (rq == this_rq()) {
hrtimer_restart(timer);
} else if (!rq->hrtick_csd_pending) {
- __smp_call_function_single(cpu_of(rq), &rq->hrtick_csd);
+ __smp_call_function_single(cpu_of(rq), &rq->hrtick_csd, 0);
rq->hrtick_csd_pending = 1;
}
}
@@ -1146,7 +1153,8 @@ static __init void init_hrtick(void)
*/
static void hrtick_start(struct rq *rq, u64 delay)
{
- hrtimer_start(&rq->hrtick_timer, ns_to_ktime(delay), HRTIMER_MODE_REL);
+ __hrtimer_start_range_ns(&rq->hrtick_timer, ns_to_ktime(delay), 0,
+ HRTIMER_MODE_REL, 0);
}
static inline void init_hrtick(void)
@@ -1410,10 +1418,22 @@ iter_move_one_task(struct rq *this_rq, int this_cpu, struct rq *busiest,
struct rq_iterator *iterator);
#endif
+/* Time spent by the tasks of the cpu accounting group executing in ... */
+enum cpuacct_stat_index {
+ CPUACCT_STAT_USER, /* ... user mode */
+ CPUACCT_STAT_SYSTEM, /* ... kernel mode */
+
+ CPUACCT_STAT_NSTATS,
+};
+
#ifdef CONFIG_CGROUP_CPUACCT
static void cpuacct_charge(struct task_struct *tsk, u64 cputime);
+static void cpuacct_update_stats(struct task_struct *tsk,
+ enum cpuacct_stat_index idx, cputime_t val);
#else
static inline void cpuacct_charge(struct task_struct *tsk, u64 cputime) {}
+static inline void cpuacct_update_stats(struct task_struct *tsk,
+ enum cpuacct_stat_index idx, cputime_t val) {}
#endif
static inline void inc_cpu_load(struct rq *rq, unsigned long load)
@@ -3190,7 +3210,7 @@ static int move_one_task(struct rq *this_rq, int this_cpu, struct rq *busiest,
return 0;
}
/********** Helpers for find_busiest_group ************************/
-/**
+/*
* sd_lb_stats - Structure to store the statistics of a sched_domain
* during load balancing.
*/
@@ -3222,7 +3242,7 @@ struct sd_lb_stats {
#endif
};
-/**
+/*
* sg_lb_stats - stats of a sched_group required for load_balancing
*/
struct sg_lb_stats {
@@ -3360,16 +3380,17 @@ static inline void update_sd_power_savings_stats(struct sched_group *group,
}
/**
- * check_power_save_busiest_group - Check if we have potential to perform
- * some power-savings balance. If yes, set the busiest group to be
- * the least loaded group in the sched_domain, so that it's CPUs can
- * be put to idle.
- *
+ * check_power_save_busiest_group - see if there is potential for some power-savings balance
* @sds: Variable containing the statistics of the sched_domain
* under consideration.
* @this_cpu: Cpu at which we're currently performing load-balancing.
* @imbalance: Variable to store the imbalance.
*
+ * Description:
+ * Check if we have potential to perform some power-savings balance.
+ * If yes, set the busiest group to be the least loaded group in the
+ * sched_domain, so that it's CPUs can be put to idle.
+ *
* Returns 1 if there is potential to perform power-savings balance.
* Else returns 0.
*/
@@ -3817,19 +3838,23 @@ find_busiest_queue(struct sched_group *group, enum cpu_idle_type idle,
*/
#define MAX_PINNED_INTERVAL 512
+/* Working cpumask for load_balance and load_balance_newidle. */
+static DEFINE_PER_CPU(cpumask_var_t, load_balance_tmpmask);
+
/*
* Check this_cpu to ensure it is balanced within domain. Attempt to move
* tasks if there is an imbalance.
*/
static int load_balance(int this_cpu, struct rq *this_rq,
struct sched_domain *sd, enum cpu_idle_type idle,
- int *balance, struct cpumask *cpus)
+ int *balance)
{
int ld_moved, all_pinned = 0, active_balance = 0, sd_idle = 0;
struct sched_group *group;
unsigned long imbalance;
struct rq *busiest;
unsigned long flags;
+ struct cpumask *cpus = __get_cpu_var(load_balance_tmpmask);
cpumask_setall(cpus);
@@ -3984,8 +4009,7 @@ out:
* this_rq is locked.
*/
static int
-load_balance_newidle(int this_cpu, struct rq *this_rq, struct sched_domain *sd,
- struct cpumask *cpus)
+load_balance_newidle(int this_cpu, struct rq *this_rq, struct sched_domain *sd)
{
struct sched_group *group;
struct rq *busiest = NULL;
@@ -3993,6 +4017,7 @@ load_balance_newidle(int this_cpu, struct rq *this_rq, struct sched_domain *sd,
int ld_moved = 0;
int sd_idle = 0;
int all_pinned = 0;
+ struct cpumask *cpus = __get_cpu_var(load_balance_tmpmask);
cpumask_setall(cpus);
@@ -4133,10 +4158,6 @@ static void idle_balance(int this_cpu, struct rq *this_rq)
struct sched_domain *sd;
int pulled_task = 0;
unsigned long next_balance = jiffies + HZ;
- cpumask_var_t tmpmask;
-
- if (!alloc_cpumask_var(&tmpmask, GFP_ATOMIC))
- return;
for_each_domain(this_cpu, sd) {
unsigned long interval;
@@ -4147,7 +4168,7 @@ static void idle_balance(int this_cpu, struct rq *this_rq)
if (sd->flags & SD_BALANCE_NEWIDLE)
/* If we've pulled tasks over stop searching: */
pulled_task = load_balance_newidle(this_cpu, this_rq,
- sd, tmpmask);
+ sd);
interval = msecs_to_jiffies(sd->balance_interval);
if (time_after(next_balance, sd->last_balance + interval))
@@ -4162,7 +4183,6 @@ static void idle_balance(int this_cpu, struct rq *this_rq)
*/
this_rq->next_balance = next_balance;
}
- free_cpumask_var(tmpmask);
}
/*
@@ -4312,11 +4332,6 @@ static void rebalance_domains(int cpu, enum cpu_idle_type idle)
unsigned long next_balance = jiffies + 60*HZ;
int update_next_balance = 0;
int need_serialize;
- cpumask_var_t tmp;
-
- /* Fails alloc? Rebalancing probably not a priority right now. */
- if (!alloc_cpumask_var(&tmp, GFP_ATOMIC))
- return;
for_each_domain(cpu, sd) {
if (!(sd->flags & SD_LOAD_BALANCE))
@@ -4341,7 +4356,7 @@ static void rebalance_domains(int cpu, enum cpu_idle_type idle)
}
if (time_after_eq(jiffies, sd->last_balance + interval)) {
- if (load_balance(cpu, rq, sd, idle, &balance, tmp)) {
+ if (load_balance(cpu, rq, sd, idle, &balance)) {
/*
* We've pulled tasks over so either we're no
* longer idle, or one of our SMT siblings is
@@ -4375,8 +4390,6 @@ out:
*/
if (likely(update_next_balance))
rq->next_balance = next_balance;
-
- free_cpumask_var(tmp);
}
/*
@@ -4510,9 +4523,25 @@ DEFINE_PER_CPU(struct kernel_stat, kstat);
EXPORT_PER_CPU_SYMBOL(kstat);
/*
- * Return any ns on the sched_clock that have not yet been banked in
+ * Return any ns on the sched_clock that have not yet been accounted in
* @p in case that task is currently running.
+ *
+ * Called with task_rq_lock() held on @rq.
*/
+static u64 do_task_delta_exec(struct task_struct *p, struct rq *rq)
+{
+ u64 ns = 0;
+
+ if (task_current(rq, p)) {
+ update_rq_clock(rq);
+ ns = rq->clock - p->se.exec_start;
+ if ((s64)ns < 0)
+ ns = 0;
+ }
+
+ return ns;
+}
+
unsigned long long task_delta_exec(struct task_struct *p)
{
unsigned long flags;
@@ -4520,16 +4549,49 @@ unsigned long long task_delta_exec(struct task_struct *p)
u64 ns = 0;
rq = task_rq_lock(p, &flags);
+ ns = do_task_delta_exec(p, rq);
+ task_rq_unlock(rq, &flags);
- if (task_current(rq, p)) {
- u64 delta_exec;
+ return ns;
+}
- update_rq_clock(rq);
- delta_exec = rq->clock - p->se.exec_start;
- if ((s64)delta_exec > 0)
- ns = delta_exec;
- }
+/*
+ * Return accounted runtime for the task.
+ * In case the task is currently running, return the runtime plus current's
+ * pending runtime that have not been accounted yet.
+ */
+unsigned long long task_sched_runtime(struct task_struct *p)
+{
+ unsigned long flags;
+ struct rq *rq;
+ u64 ns = 0;
+
+ rq = task_rq_lock(p, &flags);
+ ns = p->se.sum_exec_runtime + do_task_delta_exec(p, rq);
+ task_rq_unlock(rq, &flags);
+
+ return ns;
+}
+/*
+ * Return sum_exec_runtime for the thread group.
+ * In case the task is currently running, return the sum plus current's
+ * pending runtime that have not been accounted yet.
+ *
+ * Note that the thread group might have other running tasks as well,
+ * so the return value not includes other pending runtime that other
+ * running tasks might have.
+ */
+unsigned long long thread_group_sched_runtime(struct task_struct *p)
+{
+ struct task_cputime totals;
+ unsigned long flags;
+ struct rq *rq;
+ u64 ns;
+
+ rq = task_rq_lock(p, &flags);
+ thread_group_cputime(p, &totals);
+ ns = totals.sum_exec_runtime + do_task_delta_exec(p, rq);
task_rq_unlock(rq, &flags);
return ns;
@@ -4558,6 +4620,8 @@ void account_user_time(struct task_struct *p, cputime_t cputime,
cpustat->nice = cputime64_add(cpustat->nice, tmp);
else
cpustat->user = cputime64_add(cpustat->user, tmp);
+
+ cpuacct_update_stats(p, CPUACCT_STAT_USER, cputime);
/* Account for user time used */
acct_update_integrals(p);
}
@@ -4619,6 +4683,8 @@ void account_system_time(struct task_struct *p, int hardirq_offset,
else
cpustat->system = cputime64_add(cpustat->system, tmp);
+ cpuacct_update_stats(p, CPUACCT_STAT_SYSTEM, cputime);
+
/* Account for system time used */
acct_update_integrals(p);
}
@@ -4666,7 +4732,7 @@ void account_process_tick(struct task_struct *p, int user_tick)
if (user_tick)
account_user_time(p, one_jiffy, one_jiffy_scaled);
- else if (p != rq->idle)
+ else if ((p != rq->idle) || (irq_count() != HARDIRQ_OFFSET))
account_system_time(p, HARDIRQ_OFFSET, one_jiffy,
one_jiffy_scaled);
else
@@ -4780,10 +4846,7 @@ void scheduler_tick(void)
#endif
}
-#if defined(CONFIG_PREEMPT) && (defined(CONFIG_DEBUG_PREEMPT) || \
- defined(CONFIG_PREEMPT_TRACER))
-
-static inline unsigned long get_parent_ip(unsigned long addr)
+notrace unsigned long get_parent_ip(unsigned long addr)
{
if (in_lock_functions(addr)) {
addr = CALLER_ADDR2;
@@ -4793,6 +4856,9 @@ static inline unsigned long get_parent_ip(unsigned long addr)
return addr;
}
+#if defined(CONFIG_PREEMPT) && (defined(CONFIG_DEBUG_PREEMPT) || \
+ defined(CONFIG_PREEMPT_TRACER))
+
void __kprobes add_preempt_count(int val)
{
#ifdef CONFIG_DEBUG_PREEMPT
@@ -4941,15 +5007,13 @@ pick_next_task(struct rq *rq)
/*
* schedule() is the main scheduler function.
*/
-asmlinkage void __sched schedule(void)
+asmlinkage void __sched __schedule(void)
{
struct task_struct *prev, *next;
unsigned long *switch_count;
struct rq *rq;
int cpu;
-need_resched:
- preempt_disable();
cpu = smp_processor_id();
rq = cpu_rq(cpu);
rcu_qsctr_inc(cpu);
@@ -5006,13 +5070,80 @@ need_resched_nonpreemptible:
if (unlikely(reacquire_kernel_lock(current) < 0))
goto need_resched_nonpreemptible;
+}
+asmlinkage void __sched schedule(void)
+{
+need_resched:
+ preempt_disable();
+ __schedule();
preempt_enable_no_resched();
if (unlikely(test_thread_flag(TIF_NEED_RESCHED)))
goto need_resched;
}
EXPORT_SYMBOL(schedule);
+#ifdef CONFIG_SMP
+/*
+ * Look out! "owner" is an entirely speculative pointer
+ * access and not reliable.
+ */
+int mutex_spin_on_owner(struct mutex *lock, struct thread_info *owner)
+{
+ unsigned int cpu;
+ struct rq *rq;
+
+ if (!sched_feat(OWNER_SPIN))
+ return 0;
+
+#ifdef CONFIG_DEBUG_PAGEALLOC
+ /*
+ * Need to access the cpu field knowing that
+ * DEBUG_PAGEALLOC could have unmapped it if
+ * the mutex owner just released it and exited.
+ */
+ if (probe_kernel_address(&owner->cpu, cpu))
+ goto out;
+#else
+ cpu = owner->cpu;
+#endif
+
+ /*
+ * Even if the access succeeded (likely case),
+ * the cpu field may no longer be valid.
+ */
+ if (cpu >= nr_cpumask_bits)
+ goto out;
+
+ /*
+ * We need to validate that we can do a
+ * get_cpu() and that we have the percpu area.
+ */
+ if (!cpu_online(cpu))
+ goto out;
+
+ rq = cpu_rq(cpu);
+
+ for (;;) {
+ /*
+ * Owner changed, break to re-assess state.
+ */
+ if (lock->owner != owner)
+ break;
+
+ /*
+ * Is that owner really running on that cpu?
+ */
+ if (task_thread_info(rq->curr) != owner || need_resched())
+ return 0;
+
+ cpu_relax();
+ }
+out:
+ return 1;
+}
+#endif
+
#ifdef CONFIG_PREEMPT
/*
* this is the entry point to schedule() from in-kernel preemption
@@ -5130,11 +5261,17 @@ void __wake_up_locked(wait_queue_head_t *q, unsigned int mode)
__wake_up_common(q, mode, 1, 0, NULL);
}
+void __wake_up_locked_key(wait_queue_head_t *q, unsigned int mode, void *key)
+{
+ __wake_up_common(q, mode, 1, 0, key);
+}
+
/**
- * __wake_up_sync - wake up threads blocked on a waitqueue.
+ * __wake_up_sync_key - wake up threads blocked on a waitqueue.
* @q: the waitqueue
* @mode: which threads
* @nr_exclusive: how many wake-one or wake-many threads to wake up
+ * @key: opaque value to be passed to wakeup targets
*
* The sync wakeup differs that the waker knows that it will schedule
* away soon, so while the target thread will be woken up, it will not
@@ -5143,8 +5280,8 @@ void __wake_up_locked(wait_queue_head_t *q, unsigned int mode)
*
* On UP it can prevent extra preemption.
*/
-void
-__wake_up_sync(wait_queue_head_t *q, unsigned int mode, int nr_exclusive)
+void __wake_up_sync_key(wait_queue_head_t *q, unsigned int mode,
+ int nr_exclusive, void *key)
{
unsigned long flags;
int sync = 1;
@@ -5156,9 +5293,18 @@ __wake_up_sync(wait_queue_head_t *q, unsigned int mode, int nr_exclusive)
sync = 0;
spin_lock_irqsave(&q->lock, flags);
- __wake_up_common(q, mode, nr_exclusive, sync, NULL);
+ __wake_up_common(q, mode, nr_exclusive, sync, key);
spin_unlock_irqrestore(&q->lock, flags);
}
+EXPORT_SYMBOL_GPL(__wake_up_sync_key);
+
+/*
+ * __wake_up_sync - see __wake_up_sync_key()
+ */
+void __wake_up_sync(wait_queue_head_t *q, unsigned int mode, int nr_exclusive)
+{
+ __wake_up_sync_key(q, mode, nr_exclusive, NULL);
+}
EXPORT_SYMBOL_GPL(__wake_up_sync); /* For internal use only */
/**
@@ -7221,7 +7367,12 @@ static int sched_domain_debug_one(struct sched_domain *sd, int cpu, int level,
cpumask_or(groupmask, groupmask, sched_group_cpus(group));
cpulist_scnprintf(str, sizeof(str), sched_group_cpus(group));
+
printk(KERN_CONT " %s", str);
+ if (group->__cpu_power != SCHED_LOAD_SCALE) {
+ printk(KERN_CONT " (__cpu_power = %d)",
+ group->__cpu_power);
+ }
group = group->next;
} while (group != sd->groups);
@@ -7647,7 +7798,7 @@ cpu_to_core_group(int cpu, const struct cpumask *cpu_map,
{
int group;
- cpumask_and(mask, &per_cpu(cpu_sibling_map, cpu), cpu_map);
+ cpumask_and(mask, topology_thread_cpumask(cpu), cpu_map);
group = cpumask_first(mask);
if (sg)
*sg = &per_cpu(sched_group_core, group).sg;
@@ -7676,7 +7827,7 @@ cpu_to_phys_group(int cpu, const struct cpumask *cpu_map,
cpumask_and(mask, cpu_coregroup_mask(cpu), cpu_map);
group = cpumask_first(mask);
#elif defined(CONFIG_SCHED_SMT)
- cpumask_and(mask, &per_cpu(cpu_sibling_map, cpu), cpu_map);
+ cpumask_and(mask, topology_thread_cpumask(cpu), cpu_map);
group = cpumask_first(mask);
#else
group = cpu;
@@ -8019,7 +8170,7 @@ static int __build_sched_domains(const struct cpumask *cpu_map,
SD_INIT(sd, SIBLING);
set_domain_attribute(sd, attr);
cpumask_and(sched_domain_span(sd),
- &per_cpu(cpu_sibling_map, i), cpu_map);
+ topology_thread_cpumask(i), cpu_map);
sd->parent = p;
p->child = sd;
cpu_to_cpu_group(i, cpu_map, &sd->groups, tmpmask);
@@ -8030,7 +8181,7 @@ static int __build_sched_domains(const struct cpumask *cpu_map,
/* Set up CPU (sibling) groups */
for_each_cpu(i, cpu_map) {
cpumask_and(this_sibling_map,
- &per_cpu(cpu_sibling_map, i), cpu_map);
+ topology_thread_cpumask(i), cpu_map);
if (i != cpumask_first(this_sibling_map))
continue;
@@ -8706,6 +8857,9 @@ void __init sched_init(void)
#ifdef CONFIG_USER_SCHED
alloc_size *= 2;
#endif
+#ifdef CONFIG_CPUMASK_OFFSTACK
+ alloc_size += num_possible_cpus() * cpumask_size();
+#endif
/*
* As sched_init() is called before page_alloc is setup,
* we use alloc_bootmem().
@@ -8743,6 +8897,12 @@ void __init sched_init(void)
ptr += nr_cpu_ids * sizeof(void **);
#endif /* CONFIG_USER_SCHED */
#endif /* CONFIG_RT_GROUP_SCHED */
+#ifdef CONFIG_CPUMASK_OFFSTACK
+ for_each_possible_cpu(i) {
+ per_cpu(load_balance_tmpmask, i) = (void *)ptr;
+ ptr += cpumask_size();
+ }
+#endif /* CONFIG_CPUMASK_OFFSTACK */
}
#ifdef CONFIG_SMP
@@ -9835,6 +9995,7 @@ struct cpuacct {
struct cgroup_subsys_state css;
/* cpuusage holds pointer to a u64-type object on every cpu */
u64 *cpuusage;
+ struct percpu_counter cpustat[CPUACCT_STAT_NSTATS];
struct cpuacct *parent;
};
@@ -9859,20 +10020,32 @@ static struct cgroup_subsys_state *cpuacct_create(
struct cgroup_subsys *ss, struct cgroup *cgrp)
{
struct cpuacct *ca = kzalloc(sizeof(*ca), GFP_KERNEL);
+ int i;
if (!ca)
- return ERR_PTR(-ENOMEM);
+ goto out;
ca->cpuusage = alloc_percpu(u64);
- if (!ca->cpuusage) {
- kfree(ca);
- return ERR_PTR(-ENOMEM);
- }
+ if (!ca->cpuusage)
+ goto out_free_ca;
+
+ for (i = 0; i < CPUACCT_STAT_NSTATS; i++)
+ if (percpu_counter_init(&ca->cpustat[i], 0))
+ goto out_free_counters;
if (cgrp->parent)
ca->parent = cgroup_ca(cgrp->parent);
return &ca->css;
+
+out_free_counters:
+ while (--i >= 0)
+ percpu_counter_destroy(&ca->cpustat[i]);
+ free_percpu(ca->cpuusage);
+out_free_ca:
+ kfree(ca);
+out:
+ return ERR_PTR(-ENOMEM);
}
/* destroy an existing cpu accounting group */
@@ -9880,7 +10053,10 @@ static void
cpuacct_destroy(struct cgroup_subsys *ss, struct cgroup *cgrp)
{
struct cpuacct *ca = cgroup_ca(cgrp);
+ int i;
+ for (i = 0; i < CPUACCT_STAT_NSTATS; i++)
+ percpu_counter_destroy(&ca->cpustat[i]);
free_percpu(ca->cpuusage);
kfree(ca);
}
@@ -9967,6 +10143,25 @@ static int cpuacct_percpu_seq_read(struct cgroup *cgroup, struct cftype *cft,
return 0;
}
+static const char *cpuacct_stat_desc[] = {
+ [CPUACCT_STAT_USER] = "user",
+ [CPUACCT_STAT_SYSTEM] = "system",
+};
+
+static int cpuacct_stats_show(struct cgroup *cgrp, struct cftype *cft,
+ struct cgroup_map_cb *cb)
+{
+ struct cpuacct *ca = cgroup_ca(cgrp);
+ int i;
+
+ for (i = 0; i < CPUACCT_STAT_NSTATS; i++) {
+ s64 val = percpu_counter_read(&ca->cpustat[i]);
+ val = cputime64_to_clock_t(val);
+ cb->fill(cb, cpuacct_stat_desc[i], val);
+ }
+ return 0;
+}
+
static struct cftype files[] = {
{
.name = "usage",
@@ -9977,7 +10172,10 @@ static struct cftype files[] = {
.name = "usage_percpu",
.read_seq_string = cpuacct_percpu_seq_read,
},
-
+ {
+ .name = "stat",
+ .read_map = cpuacct_stats_show,
+ },
};
static int cpuacct_populate(struct cgroup_subsys *ss, struct cgroup *cgrp)
@@ -9999,12 +10197,38 @@ static void cpuacct_charge(struct task_struct *tsk, u64 cputime)
return;
cpu = task_cpu(tsk);
+
+ rcu_read_lock();
+
ca = task_ca(tsk);
for (; ca; ca = ca->parent) {
u64 *cpuusage = per_cpu_ptr(ca->cpuusage, cpu);
*cpuusage += cputime;
}
+
+ rcu_read_unlock();
+}
+
+/*
+ * Charge the system/user time to the task's accounting group.
+ */
+static void cpuacct_update_stats(struct task_struct *tsk,
+ enum cpuacct_stat_index idx, cputime_t val)
+{
+ struct cpuacct *ca;
+
+ if (unlikely(!cpuacct_subsys.active))
+ return;
+
+ rcu_read_lock();
+ ca = task_ca(tsk);
+
+ do {
+ percpu_counter_add(&ca->cpustat[idx], val);
+ ca = ca->parent;
+ } while (ca);
+ rcu_read_unlock();
}
struct cgroup_subsys cpuacct_subsys = {
diff --git a/kernel/sched_clock.c b/kernel/sched_clock.c
index 390f33234bd0..819f17ac796e 100644
--- a/kernel/sched_clock.c
+++ b/kernel/sched_clock.c
@@ -25,6 +25,7 @@
* consistent between cpus (never more than 2 jiffies difference).
*/
#include <linux/spinlock.h>
+#include <linux/hardirq.h>
#include <linux/module.h>
#include <linux/percpu.h>
#include <linux/ktime.h>
@@ -154,6 +155,17 @@ u64 sched_clock_cpu(int cpu)
return sched_clock();
scd = cpu_sdc(cpu);
+
+ /*
+ * Normally this is not called in NMI context - but if it is,
+ * trying to do any locking here is totally lethal.
+ */
+ if (unlikely(in_nmi()))
+ return scd->clock;
+
+ if (unlikely(!sched_clock_running))
+ return 0ull;
+
WARN_ON_ONCE(!irqs_disabled());
now = sched_clock();
diff --git a/kernel/sched_cpupri.c b/kernel/sched_cpupri.c
index 1e00bfacf9b8..cdd3c89574cd 100644
--- a/kernel/sched_cpupri.c
+++ b/kernel/sched_cpupri.c
@@ -55,7 +55,7 @@ static int convert_prio(int prio)
* cpupri_find - find the best (lowest-pri) CPU in the system
* @cp: The cpupri context
* @p: The task
- * @lowest_mask: A mask to fill in with selected CPUs
+ * @lowest_mask: A mask to fill in with selected CPUs (or NULL)
*
* Note: This function returns the recommended CPUs as calculated during the
* current invokation. By the time the call returns, the CPUs may have in
@@ -81,7 +81,8 @@ int cpupri_find(struct cpupri *cp, struct task_struct *p,
if (cpumask_any_and(&p->cpus_allowed, vec->mask) >= nr_cpu_ids)
continue;
- cpumask_and(lowest_mask, &p->cpus_allowed, vec->mask);
+ if (lowest_mask)
+ cpumask_and(lowest_mask, &p->cpus_allowed, vec->mask);
return 1;
}
diff --git a/kernel/sched_cpupri.h b/kernel/sched_cpupri.h
index 642a94ef8a0a..9a7e859b8fbf 100644
--- a/kernel/sched_cpupri.h
+++ b/kernel/sched_cpupri.h
@@ -25,7 +25,7 @@ struct cpupri {
#ifdef CONFIG_SMP
int cpupri_find(struct cpupri *cp,
- struct task_struct *p, cpumask_t *lowest_mask);
+ struct task_struct *p, struct cpumask *lowest_mask);
void cpupri_set(struct cpupri *cp, int cpu, int pri);
int cpupri_init(struct cpupri *cp, bool bootmem);
void cpupri_cleanup(struct cpupri *cp);
diff --git a/kernel/sched_features.h b/kernel/sched_features.h
index 76f61756e677..4569bfa7df9b 100644
--- a/kernel/sched_features.h
+++ b/kernel/sched_features.h
@@ -14,3 +14,4 @@ SCHED_FEAT(LB_WAKEUP_UPDATE, 1)
SCHED_FEAT(ASYM_EFF_LOAD, 1)
SCHED_FEAT(WAKEUP_OVERLAP, 0)
SCHED_FEAT(LAST_BUDDY, 1)
+SCHED_FEAT(OWNER_SPIN, 1)
diff --git a/kernel/sched_rt.c b/kernel/sched_rt.c
index 299d012b4394..f2c66f8f9712 100644
--- a/kernel/sched_rt.c
+++ b/kernel/sched_rt.c
@@ -948,20 +948,15 @@ static int select_task_rq_rt(struct task_struct *p, int sync)
static void check_preempt_equal_prio(struct rq *rq, struct task_struct *p)
{
- cpumask_var_t mask;
-
if (rq->curr->rt.nr_cpus_allowed == 1)
return;
- if (!alloc_cpumask_var(&mask, GFP_ATOMIC))
- return;
-
if (p->rt.nr_cpus_allowed != 1
- && cpupri_find(&rq->rd->cpupri, p, mask))
- goto free;
+ && cpupri_find(&rq->rd->cpupri, p, NULL))
+ return;
- if (!cpupri_find(&rq->rd->cpupri, rq->curr, mask))
- goto free;
+ if (!cpupri_find(&rq->rd->cpupri, rq->curr, NULL))
+ return;
/*
* There appears to be other cpus that can accept
@@ -970,8 +965,6 @@ static void check_preempt_equal_prio(struct rq *rq, struct task_struct *p)
*/
requeue_task_rt(rq, p, 1);
resched_task(rq->curr);
-free:
- free_cpumask_var(mask);
}
#endif /* CONFIG_SMP */
diff --git a/kernel/signal.c b/kernel/signal.c
index f93efec14ff5..d2dd9cf5dcc6 100644
--- a/kernel/signal.c
+++ b/kernel/signal.c
@@ -55,10 +55,22 @@ static int sig_handler_ignored(void __user *handler, int sig)
(handler == SIG_DFL && sig_kernel_ignore(sig));
}
-static int sig_ignored(struct task_struct *t, int sig)
+static int sig_task_ignored(struct task_struct *t, int sig,
+ int from_ancestor_ns)
{
void __user *handler;
+ handler = sig_handler(t, sig);
+
+ if (unlikely(t->signal->flags & SIGNAL_UNKILLABLE) &&
+ handler == SIG_DFL && !from_ancestor_ns)
+ return 1;
+
+ return sig_handler_ignored(handler, sig);
+}
+
+static int sig_ignored(struct task_struct *t, int sig, int from_ancestor_ns)
+{
/*
* Blocked signals are never ignored, since the
* signal handler may change by the time it is
@@ -67,14 +79,13 @@ static int sig_ignored(struct task_struct *t, int sig)
if (sigismember(&t->blocked, sig) || sigismember(&t->real_blocked, sig))
return 0;
- handler = sig_handler(t, sig);
- if (!sig_handler_ignored(handler, sig))
+ if (!sig_task_ignored(t, sig, from_ancestor_ns))
return 0;
/*
* Tracers may want to know about even ignored signals.
*/
- return !tracehook_consider_ignored_signal(t, sig, handler);
+ return !tracehook_consider_ignored_signal(t, sig);
}
/*
@@ -323,7 +334,7 @@ int unhandled_signal(struct task_struct *tsk, int sig)
return 1;
if (handler != SIG_IGN && handler != SIG_DFL)
return 0;
- return !tracehook_consider_fatal_signal(tsk, sig, handler);
+ return !tracehook_consider_fatal_signal(tsk, sig);
}
@@ -629,7 +640,7 @@ static int check_kill_permission(int sig, struct siginfo *info,
* Returns true if the signal should be actually delivered, otherwise
* it should be dropped.
*/
-static int prepare_signal(int sig, struct task_struct *p)
+static int prepare_signal(int sig, struct task_struct *p, int from_ancestor_ns)
{
struct signal_struct *signal = p->signal;
struct task_struct *t;
@@ -713,7 +724,7 @@ static int prepare_signal(int sig, struct task_struct *p)
}
}
- return !sig_ignored(p, sig);
+ return !sig_ignored(p, sig, from_ancestor_ns);
}
/*
@@ -782,7 +793,7 @@ static void complete_signal(int sig, struct task_struct *p, int group)
!(signal->flags & (SIGNAL_UNKILLABLE | SIGNAL_GROUP_EXIT)) &&
!sigismember(&t->real_blocked, sig) &&
(sig == SIGKILL ||
- !tracehook_consider_fatal_signal(t, sig, SIG_DFL))) {
+ !tracehook_consider_fatal_signal(t, sig))) {
/*
* This signal will be fatal to the whole group.
*/
@@ -818,8 +829,8 @@ static inline int legacy_queue(struct sigpending *signals, int sig)
return (sig < SIGRTMIN) && sigismember(&signals->signal, sig);
}
-static int send_signal(int sig, struct siginfo *info, struct task_struct *t,
- int group)
+static int __send_signal(int sig, struct siginfo *info, struct task_struct *t,
+ int group, int from_ancestor_ns)
{
struct sigpending *pending;
struct sigqueue *q;
@@ -827,7 +838,8 @@ static int send_signal(int sig, struct siginfo *info, struct task_struct *t,
trace_sched_signal_send(sig, t);
assert_spin_locked(&t->sighand->siglock);
- if (!prepare_signal(sig, t))
+
+ if (!prepare_signal(sig, t, from_ancestor_ns))
return 0;
pending = group ? &t->signal->shared_pending : &t->pending;
@@ -876,6 +888,8 @@ static int send_signal(int sig, struct siginfo *info, struct task_struct *t,
break;
default:
copy_siginfo(&q->info, info);
+ if (from_ancestor_ns)
+ q->info.si_pid = 0;
break;
}
} else if (!is_si_special(info)) {
@@ -894,6 +908,20 @@ out_set:
return 0;
}
+static int send_signal(int sig, struct siginfo *info, struct task_struct *t,
+ int group)
+{
+ int from_ancestor_ns = 0;
+
+#ifdef CONFIG_PID_NS
+ if (!is_si_special(info) && SI_FROMUSER(info) &&
+ task_pid_nr_ns(current, task_active_pid_ns(t)) <= 0)
+ from_ancestor_ns = 1;
+#endif
+
+ return __send_signal(sig, info, t, group, from_ancestor_ns);
+}
+
int print_fatal_signals;
static void print_fatal_signal(struct pt_regs *regs, int signr)
@@ -1138,7 +1166,7 @@ int kill_pid_info_as_uid(int sig, struct siginfo *info, struct pid *pid,
if (sig && p->sighand) {
unsigned long flags;
spin_lock_irqsave(&p->sighand->siglock, flags);
- ret = __group_send_sig_info(sig, info, p);
+ ret = __send_signal(sig, info, p, 1, 0);
spin_unlock_irqrestore(&p->sighand->siglock, flags);
}
out_unlock:
@@ -1325,7 +1353,7 @@ int send_sigqueue(struct sigqueue *q, struct task_struct *t, int group)
goto ret;
ret = 1; /* the signal is ignored */
- if (!prepare_signal(sig, t))
+ if (!prepare_signal(sig, t, 0))
goto out;
ret = 0;
@@ -1849,9 +1877,16 @@ relock:
/*
* Global init gets no signals it doesn't want.
+ * Container-init gets no signals it doesn't want from same
+ * container.
+ *
+ * Note that if global/container-init sees a sig_kernel_only()
+ * signal here, the signal must have been generated internally
+ * or must have come from an ancestor namespace. In either
+ * case, the signal cannot be dropped.
*/
if (unlikely(signal->flags & SIGNAL_UNKILLABLE) &&
- !signal_group_exit(signal))
+ !sig_kernel_only(signr))
continue;
if (sig_kernel_stop(signr)) {
diff --git a/kernel/slow-work.c b/kernel/slow-work.c
new file mode 100644
index 000000000000..b28d19135f43
--- /dev/null
+++ b/kernel/slow-work.c
@@ -0,0 +1,640 @@
+/* Worker thread pool for slow items, such as filesystem lookups or mkdirs
+ *
+ * Copyright (C) 2008 Red Hat, Inc. All Rights Reserved.
+ * Written by David Howells (dhowells@redhat.com)
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public Licence
+ * as published by the Free Software Foundation; either version
+ * 2 of the Licence, or (at your option) any later version.
+ *
+ * See Documentation/slow-work.txt
+ */
+
+#include <linux/module.h>
+#include <linux/slow-work.h>
+#include <linux/kthread.h>
+#include <linux/freezer.h>
+#include <linux/wait.h>
+
+#define SLOW_WORK_CULL_TIMEOUT (5 * HZ) /* cull threads 5s after running out of
+ * things to do */
+#define SLOW_WORK_OOM_TIMEOUT (5 * HZ) /* can't start new threads for 5s after
+ * OOM */
+
+static void slow_work_cull_timeout(unsigned long);
+static void slow_work_oom_timeout(unsigned long);
+
+#ifdef CONFIG_SYSCTL
+static int slow_work_min_threads_sysctl(struct ctl_table *, int, struct file *,
+ void __user *, size_t *, loff_t *);
+
+static int slow_work_max_threads_sysctl(struct ctl_table *, int , struct file *,
+ void __user *, size_t *, loff_t *);
+#endif
+
+/*
+ * The pool of threads has at least min threads in it as long as someone is
+ * using the facility, and may have as many as max.
+ *
+ * A portion of the pool may be processing very slow operations.
+ */
+static unsigned slow_work_min_threads = 2;
+static unsigned slow_work_max_threads = 4;
+static unsigned vslow_work_proportion = 50; /* % of threads that may process
+ * very slow work */
+
+#ifdef CONFIG_SYSCTL
+static const int slow_work_min_min_threads = 2;
+static int slow_work_max_max_threads = 255;
+static const int slow_work_min_vslow = 1;
+static const int slow_work_max_vslow = 99;
+
+ctl_table slow_work_sysctls[] = {
+ {
+ .ctl_name = CTL_UNNUMBERED,
+ .procname = "min-threads",
+ .data = &slow_work_min_threads,
+ .maxlen = sizeof(unsigned),
+ .mode = 0644,
+ .proc_handler = slow_work_min_threads_sysctl,
+ .extra1 = (void *) &slow_work_min_min_threads,
+ .extra2 = &slow_work_max_threads,
+ },
+ {
+ .ctl_name = CTL_UNNUMBERED,
+ .procname = "max-threads",
+ .data = &slow_work_max_threads,
+ .maxlen = sizeof(unsigned),
+ .mode = 0644,
+ .proc_handler = slow_work_max_threads_sysctl,
+ .extra1 = &slow_work_min_threads,
+ .extra2 = (void *) &slow_work_max_max_threads,
+ },
+ {
+ .ctl_name = CTL_UNNUMBERED,
+ .procname = "vslow-percentage",
+ .data = &vslow_work_proportion,
+ .maxlen = sizeof(unsigned),
+ .mode = 0644,
+ .proc_handler = &proc_dointvec_minmax,
+ .extra1 = (void *) &slow_work_min_vslow,
+ .extra2 = (void *) &slow_work_max_vslow,
+ },
+ { .ctl_name = 0 }
+};
+#endif
+
+/*
+ * The active state of the thread pool
+ */
+static atomic_t slow_work_thread_count;
+static atomic_t vslow_work_executing_count;
+
+static bool slow_work_may_not_start_new_thread;
+static bool slow_work_cull; /* cull a thread due to lack of activity */
+static DEFINE_TIMER(slow_work_cull_timer, slow_work_cull_timeout, 0, 0);
+static DEFINE_TIMER(slow_work_oom_timer, slow_work_oom_timeout, 0, 0);
+static struct slow_work slow_work_new_thread; /* new thread starter */
+
+/*
+ * The queues of work items and the lock governing access to them. These are
+ * shared between all the CPUs. It doesn't make sense to have per-CPU queues
+ * as the number of threads bears no relation to the number of CPUs.
+ *
+ * There are two queues of work items: one for slow work items, and one for
+ * very slow work items.
+ */
+static LIST_HEAD(slow_work_queue);
+static LIST_HEAD(vslow_work_queue);
+static DEFINE_SPINLOCK(slow_work_queue_lock);
+
+/*
+ * The thread controls. A variable used to signal to the threads that they
+ * should exit when the queue is empty, a waitqueue used by the threads to wait
+ * for signals, and a completion set by the last thread to exit.
+ */
+static bool slow_work_threads_should_exit;
+static DECLARE_WAIT_QUEUE_HEAD(slow_work_thread_wq);
+static DECLARE_COMPLETION(slow_work_last_thread_exited);
+
+/*
+ * The number of users of the thread pool and its lock. Whilst this is zero we
+ * have no threads hanging around, and when this reaches zero, we wait for all
+ * active or queued work items to complete and kill all the threads we do have.
+ */
+static int slow_work_user_count;
+static DEFINE_MUTEX(slow_work_user_lock);
+
+/*
+ * Calculate the maximum number of active threads in the pool that are
+ * permitted to process very slow work items.
+ *
+ * The answer is rounded up to at least 1, but may not equal or exceed the
+ * maximum number of the threads in the pool. This means we always have at
+ * least one thread that can process slow work items, and we always have at
+ * least one thread that won't get tied up doing so.
+ */
+static unsigned slow_work_calc_vsmax(void)
+{
+ unsigned vsmax;
+
+ vsmax = atomic_read(&slow_work_thread_count) * vslow_work_proportion;
+ vsmax /= 100;
+ vsmax = max(vsmax, 1U);
+ return min(vsmax, slow_work_max_threads - 1);
+}
+
+/*
+ * Attempt to execute stuff queued on a slow thread. Return true if we managed
+ * it, false if there was nothing to do.
+ */
+static bool slow_work_execute(void)
+{
+ struct slow_work *work = NULL;
+ unsigned vsmax;
+ bool very_slow;
+
+ vsmax = slow_work_calc_vsmax();
+
+ /* see if we can schedule a new thread to be started if we're not
+ * keeping up with the work */
+ if (!waitqueue_active(&slow_work_thread_wq) &&
+ (!list_empty(&slow_work_queue) || !list_empty(&vslow_work_queue)) &&
+ atomic_read(&slow_work_thread_count) < slow_work_max_threads &&
+ !slow_work_may_not_start_new_thread)
+ slow_work_enqueue(&slow_work_new_thread);
+
+ /* find something to execute */
+ spin_lock_irq(&slow_work_queue_lock);
+ if (!list_empty(&vslow_work_queue) &&
+ atomic_read(&vslow_work_executing_count) < vsmax) {
+ work = list_entry(vslow_work_queue.next,
+ struct slow_work, link);
+ if (test_and_set_bit_lock(SLOW_WORK_EXECUTING, &work->flags))
+ BUG();
+ list_del_init(&work->link);
+ atomic_inc(&vslow_work_executing_count);
+ very_slow = true;
+ } else if (!list_empty(&slow_work_queue)) {
+ work = list_entry(slow_work_queue.next,
+ struct slow_work, link);
+ if (test_and_set_bit_lock(SLOW_WORK_EXECUTING, &work->flags))
+ BUG();
+ list_del_init(&work->link);
+ very_slow = false;
+ } else {
+ very_slow = false; /* avoid the compiler warning */
+ }
+ spin_unlock_irq(&slow_work_queue_lock);
+
+ if (!work)
+ return false;
+
+ if (!test_and_clear_bit(SLOW_WORK_PENDING, &work->flags))
+ BUG();
+
+ work->ops->execute(work);
+
+ if (very_slow)
+ atomic_dec(&vslow_work_executing_count);
+ clear_bit_unlock(SLOW_WORK_EXECUTING, &work->flags);
+
+ /* if someone tried to enqueue the item whilst we were executing it,
+ * then it'll be left unenqueued to avoid multiple threads trying to
+ * execute it simultaneously
+ *
+ * there is, however, a race between us testing the pending flag and
+ * getting the spinlock, and between the enqueuer setting the pending
+ * flag and getting the spinlock, so we use a deferral bit to tell us
+ * if the enqueuer got there first
+ */
+ if (test_bit(SLOW_WORK_PENDING, &work->flags)) {
+ spin_lock_irq(&slow_work_queue_lock);
+
+ if (!test_bit(SLOW_WORK_EXECUTING, &work->flags) &&
+ test_and_clear_bit(SLOW_WORK_ENQ_DEFERRED, &work->flags))
+ goto auto_requeue;
+
+ spin_unlock_irq(&slow_work_queue_lock);
+ }
+
+ work->ops->put_ref(work);
+ return true;
+
+auto_requeue:
+ /* we must complete the enqueue operation
+ * - we transfer our ref on the item back to the appropriate queue
+ * - don't wake another thread up as we're awake already
+ */
+ if (test_bit(SLOW_WORK_VERY_SLOW, &work->flags))
+ list_add_tail(&work->link, &vslow_work_queue);
+ else
+ list_add_tail(&work->link, &slow_work_queue);
+ spin_unlock_irq(&slow_work_queue_lock);
+ return true;
+}
+
+/**
+ * slow_work_enqueue - Schedule a slow work item for processing
+ * @work: The work item to queue
+ *
+ * Schedule a slow work item for processing. If the item is already undergoing
+ * execution, this guarantees not to re-enter the execution routine until the
+ * first execution finishes.
+ *
+ * The item is pinned by this function as it retains a reference to it, managed
+ * through the item operations. The item is unpinned once it has been
+ * executed.
+ *
+ * An item may hog the thread that is running it for a relatively large amount
+ * of time, sufficient, for example, to perform several lookup, mkdir, create
+ * and setxattr operations. It may sleep on I/O and may sleep to obtain locks.
+ *
+ * Conversely, if a number of items are awaiting processing, it may take some
+ * time before any given item is given attention. The number of threads in the
+ * pool may be increased to deal with demand, but only up to a limit.
+ *
+ * If SLOW_WORK_VERY_SLOW is set on the work item, then it will be placed in
+ * the very slow queue, from which only a portion of the threads will be
+ * allowed to pick items to execute. This ensures that very slow items won't
+ * overly block ones that are just ordinarily slow.
+ *
+ * Returns 0 if successful, -EAGAIN if not.
+ */
+int slow_work_enqueue(struct slow_work *work)
+{
+ unsigned long flags;
+
+ BUG_ON(slow_work_user_count <= 0);
+ BUG_ON(!work);
+ BUG_ON(!work->ops);
+ BUG_ON(!work->ops->get_ref);
+
+ /* when honouring an enqueue request, we only promise that we will run
+ * the work function in the future; we do not promise to run it once
+ * per enqueue request
+ *
+ * we use the PENDING bit to merge together repeat requests without
+ * having to disable IRQs and take the spinlock, whilst still
+ * maintaining our promise
+ */
+ if (!test_and_set_bit_lock(SLOW_WORK_PENDING, &work->flags)) {
+ spin_lock_irqsave(&slow_work_queue_lock, flags);
+
+ /* we promise that we will not attempt to execute the work
+ * function in more than one thread simultaneously
+ *
+ * this, however, leaves us with a problem if we're asked to
+ * enqueue the work whilst someone is executing the work
+ * function as simply queueing the work immediately means that
+ * another thread may try executing it whilst it is already
+ * under execution
+ *
+ * to deal with this, we set the ENQ_DEFERRED bit instead of
+ * enqueueing, and the thread currently executing the work
+ * function will enqueue the work item when the work function
+ * returns and it has cleared the EXECUTING bit
+ */
+ if (test_bit(SLOW_WORK_EXECUTING, &work->flags)) {
+ set_bit(SLOW_WORK_ENQ_DEFERRED, &work->flags);
+ } else {
+ if (work->ops->get_ref(work) < 0)
+ goto cant_get_ref;
+ if (test_bit(SLOW_WORK_VERY_SLOW, &work->flags))
+ list_add_tail(&work->link, &vslow_work_queue);
+ else
+ list_add_tail(&work->link, &slow_work_queue);
+ wake_up(&slow_work_thread_wq);
+ }
+
+ spin_unlock_irqrestore(&slow_work_queue_lock, flags);
+ }
+ return 0;
+
+cant_get_ref:
+ spin_unlock_irqrestore(&slow_work_queue_lock, flags);
+ return -EAGAIN;
+}
+EXPORT_SYMBOL(slow_work_enqueue);
+
+/*
+ * Worker thread culling algorithm
+ */
+static bool slow_work_cull_thread(void)
+{
+ unsigned long flags;
+ bool do_cull = false;
+
+ spin_lock_irqsave(&slow_work_queue_lock, flags);
+
+ if (slow_work_cull) {
+ slow_work_cull = false;
+
+ if (list_empty(&slow_work_queue) &&
+ list_empty(&vslow_work_queue) &&
+ atomic_read(&slow_work_thread_count) >
+ slow_work_min_threads) {
+ mod_timer(&slow_work_cull_timer,
+ jiffies + SLOW_WORK_CULL_TIMEOUT);
+ do_cull = true;
+ }
+ }
+
+ spin_unlock_irqrestore(&slow_work_queue_lock, flags);
+ return do_cull;
+}
+
+/*
+ * Determine if there is slow work available for dispatch
+ */
+static inline bool slow_work_available(int vsmax)
+{
+ return !list_empty(&slow_work_queue) ||
+ (!list_empty(&vslow_work_queue) &&
+ atomic_read(&vslow_work_executing_count) < vsmax);
+}
+
+/*
+ * Worker thread dispatcher
+ */
+static int slow_work_thread(void *_data)
+{
+ int vsmax;
+
+ DEFINE_WAIT(wait);
+
+ set_freezable();
+ set_user_nice(current, -5);
+
+ for (;;) {
+ vsmax = vslow_work_proportion;
+ vsmax *= atomic_read(&slow_work_thread_count);
+ vsmax /= 100;
+
+ prepare_to_wait(&slow_work_thread_wq, &wait,
+ TASK_INTERRUPTIBLE);
+ if (!freezing(current) &&
+ !slow_work_threads_should_exit &&
+ !slow_work_available(vsmax) &&
+ !slow_work_cull)
+ schedule();
+ finish_wait(&slow_work_thread_wq, &wait);
+
+ try_to_freeze();
+
+ vsmax = vslow_work_proportion;
+ vsmax *= atomic_read(&slow_work_thread_count);
+ vsmax /= 100;
+
+ if (slow_work_available(vsmax) && slow_work_execute()) {
+ cond_resched();
+ if (list_empty(&slow_work_queue) &&
+ list_empty(&vslow_work_queue) &&
+ atomic_read(&slow_work_thread_count) >
+ slow_work_min_threads)
+ mod_timer(&slow_work_cull_timer,
+ jiffies + SLOW_WORK_CULL_TIMEOUT);
+ continue;
+ }
+
+ if (slow_work_threads_should_exit)
+ break;
+
+ if (slow_work_cull && slow_work_cull_thread())
+ break;
+ }
+
+ if (atomic_dec_and_test(&slow_work_thread_count))
+ complete_and_exit(&slow_work_last_thread_exited, 0);
+ return 0;
+}
+
+/*
+ * Handle thread cull timer expiration
+ */
+static void slow_work_cull_timeout(unsigned long data)
+{
+ slow_work_cull = true;
+ wake_up(&slow_work_thread_wq);
+}
+
+/*
+ * Get a reference on slow work thread starter
+ */
+static int slow_work_new_thread_get_ref(struct slow_work *work)
+{
+ return 0;
+}
+
+/*
+ * Drop a reference on slow work thread starter
+ */
+static void slow_work_new_thread_put_ref(struct slow_work *work)
+{
+}
+
+/*
+ * Start a new slow work thread
+ */
+static void slow_work_new_thread_execute(struct slow_work *work)
+{
+ struct task_struct *p;
+
+ if (slow_work_threads_should_exit)
+ return;
+
+ if (atomic_read(&slow_work_thread_count) >= slow_work_max_threads)
+ return;
+
+ if (!mutex_trylock(&slow_work_user_lock))
+ return;
+
+ slow_work_may_not_start_new_thread = true;
+ atomic_inc(&slow_work_thread_count);
+ p = kthread_run(slow_work_thread, NULL, "kslowd");
+ if (IS_ERR(p)) {
+ printk(KERN_DEBUG "Slow work thread pool: OOM\n");
+ if (atomic_dec_and_test(&slow_work_thread_count))
+ BUG(); /* we're running on a slow work thread... */
+ mod_timer(&slow_work_oom_timer,
+ jiffies + SLOW_WORK_OOM_TIMEOUT);
+ } else {
+ /* ratelimit the starting of new threads */
+ mod_timer(&slow_work_oom_timer, jiffies + 1);
+ }
+
+ mutex_unlock(&slow_work_user_lock);
+}
+
+static const struct slow_work_ops slow_work_new_thread_ops = {
+ .get_ref = slow_work_new_thread_get_ref,
+ .put_ref = slow_work_new_thread_put_ref,
+ .execute = slow_work_new_thread_execute,
+};
+
+/*
+ * post-OOM new thread start suppression expiration
+ */
+static void slow_work_oom_timeout(unsigned long data)
+{
+ slow_work_may_not_start_new_thread = false;
+}
+
+#ifdef CONFIG_SYSCTL
+/*
+ * Handle adjustment of the minimum number of threads
+ */
+static int slow_work_min_threads_sysctl(struct ctl_table *table, int write,
+ struct file *filp, void __user *buffer,
+ size_t *lenp, loff_t *ppos)
+{
+ int ret = proc_dointvec_minmax(table, write, filp, buffer, lenp, ppos);
+ int n;
+
+ if (ret == 0) {
+ mutex_lock(&slow_work_user_lock);
+ if (slow_work_user_count > 0) {
+ /* see if we need to start or stop threads */
+ n = atomic_read(&slow_work_thread_count) -
+ slow_work_min_threads;
+
+ if (n < 0 && !slow_work_may_not_start_new_thread)
+ slow_work_enqueue(&slow_work_new_thread);
+ else if (n > 0)
+ mod_timer(&slow_work_cull_timer,
+ jiffies + SLOW_WORK_CULL_TIMEOUT);
+ }
+ mutex_unlock(&slow_work_user_lock);
+ }
+
+ return ret;
+}
+
+/*
+ * Handle adjustment of the maximum number of threads
+ */
+static int slow_work_max_threads_sysctl(struct ctl_table *table, int write,
+ struct file *filp, void __user *buffer,
+ size_t *lenp, loff_t *ppos)
+{
+ int ret = proc_dointvec_minmax(table, write, filp, buffer, lenp, ppos);
+ int n;
+
+ if (ret == 0) {
+ mutex_lock(&slow_work_user_lock);
+ if (slow_work_user_count > 0) {
+ /* see if we need to stop threads */
+ n = slow_work_max_threads -
+ atomic_read(&slow_work_thread_count);
+
+ if (n < 0)
+ mod_timer(&slow_work_cull_timer,
+ jiffies + SLOW_WORK_CULL_TIMEOUT);
+ }
+ mutex_unlock(&slow_work_user_lock);
+ }
+
+ return ret;
+}
+#endif /* CONFIG_SYSCTL */
+
+/**
+ * slow_work_register_user - Register a user of the facility
+ *
+ * Register a user of the facility, starting up the initial threads if there
+ * aren't any other users at this point. This will return 0 if successful, or
+ * an error if not.
+ */
+int slow_work_register_user(void)
+{
+ struct task_struct *p;
+ int loop;
+
+ mutex_lock(&slow_work_user_lock);
+
+ if (slow_work_user_count == 0) {
+ printk(KERN_NOTICE "Slow work thread pool: Starting up\n");
+ init_completion(&slow_work_last_thread_exited);
+
+ slow_work_threads_should_exit = false;
+ slow_work_init(&slow_work_new_thread,
+ &slow_work_new_thread_ops);
+ slow_work_may_not_start_new_thread = false;
+ slow_work_cull = false;
+
+ /* start the minimum number of threads */
+ for (loop = 0; loop < slow_work_min_threads; loop++) {
+ atomic_inc(&slow_work_thread_count);
+ p = kthread_run(slow_work_thread, NULL, "kslowd");
+ if (IS_ERR(p))
+ goto error;
+ }
+ printk(KERN_NOTICE "Slow work thread pool: Ready\n");
+ }
+
+ slow_work_user_count++;
+ mutex_unlock(&slow_work_user_lock);
+ return 0;
+
+error:
+ if (atomic_dec_and_test(&slow_work_thread_count))
+ complete(&slow_work_last_thread_exited);
+ if (loop > 0) {
+ printk(KERN_ERR "Slow work thread pool:"
+ " Aborting startup on ENOMEM\n");
+ slow_work_threads_should_exit = true;
+ wake_up_all(&slow_work_thread_wq);
+ wait_for_completion(&slow_work_last_thread_exited);
+ printk(KERN_ERR "Slow work thread pool: Aborted\n");
+ }
+ mutex_unlock(&slow_work_user_lock);
+ return PTR_ERR(p);
+}
+EXPORT_SYMBOL(slow_work_register_user);
+
+/**
+ * slow_work_unregister_user - Unregister a user of the facility
+ *
+ * Unregister a user of the facility, killing all the threads if this was the
+ * last one.
+ */
+void slow_work_unregister_user(void)
+{
+ mutex_lock(&slow_work_user_lock);
+
+ BUG_ON(slow_work_user_count <= 0);
+
+ slow_work_user_count--;
+ if (slow_work_user_count == 0) {
+ printk(KERN_NOTICE "Slow work thread pool: Shutting down\n");
+ slow_work_threads_should_exit = true;
+ del_timer_sync(&slow_work_cull_timer);
+ del_timer_sync(&slow_work_oom_timer);
+ wake_up_all(&slow_work_thread_wq);
+ wait_for_completion(&slow_work_last_thread_exited);
+ printk(KERN_NOTICE "Slow work thread pool:"
+ " Shut down complete\n");
+ }
+
+ mutex_unlock(&slow_work_user_lock);
+}
+EXPORT_SYMBOL(slow_work_unregister_user);
+
+/*
+ * Initialise the slow work facility
+ */
+static int __init init_slow_work(void)
+{
+ unsigned nr_cpus = num_possible_cpus();
+
+ if (slow_work_max_threads < nr_cpus)
+ slow_work_max_threads = nr_cpus;
+#ifdef CONFIG_SYSCTL
+ if (slow_work_max_max_threads < nr_cpus * 2)
+ slow_work_max_max_threads = nr_cpus * 2;
+#endif
+ return 0;
+}
+
+subsys_initcall(init_slow_work);
diff --git a/kernel/smp.c b/kernel/smp.c
index bbedbb7efe32..858baac568ee 100644
--- a/kernel/smp.c
+++ b/kernel/smp.c
@@ -2,40 +2,82 @@
* Generic helpers for smp ipi calls
*
* (C) Jens Axboe <jens.axboe@oracle.com> 2008
- *
*/
-#include <linux/init.h>
-#include <linux/module.h>
-#include <linux/percpu.h>
#include <linux/rcupdate.h>
#include <linux/rculist.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/percpu.h>
+#include <linux/init.h>
#include <linux/smp.h>
+#include <linux/cpu.h>
static DEFINE_PER_CPU(struct call_single_queue, call_single_queue);
-static LIST_HEAD(call_function_queue);
-__cacheline_aligned_in_smp DEFINE_SPINLOCK(call_function_lock);
+
+static struct {
+ struct list_head queue;
+ spinlock_t lock;
+} call_function __cacheline_aligned_in_smp =
+ {
+ .queue = LIST_HEAD_INIT(call_function.queue),
+ .lock = __SPIN_LOCK_UNLOCKED(call_function.lock),
+ };
enum {
- CSD_FLAG_WAIT = 0x01,
- CSD_FLAG_ALLOC = 0x02,
- CSD_FLAG_LOCK = 0x04,
+ CSD_FLAG_LOCK = 0x01,
};
struct call_function_data {
- struct call_single_data csd;
- spinlock_t lock;
- unsigned int refs;
- struct rcu_head rcu_head;
- unsigned long cpumask_bits[];
+ struct call_single_data csd;
+ spinlock_t lock;
+ unsigned int refs;
+ cpumask_var_t cpumask;
};
struct call_single_queue {
- struct list_head list;
- spinlock_t lock;
+ struct list_head list;
+ spinlock_t lock;
+};
+
+static DEFINE_PER_CPU(struct call_function_data, cfd_data) = {
+ .lock = __SPIN_LOCK_UNLOCKED(cfd_data.lock),
+};
+
+static int
+hotplug_cfd(struct notifier_block *nfb, unsigned long action, void *hcpu)
+{
+ long cpu = (long)hcpu;
+ struct call_function_data *cfd = &per_cpu(cfd_data, cpu);
+
+ switch (action) {
+ case CPU_UP_PREPARE:
+ case CPU_UP_PREPARE_FROZEN:
+ if (!alloc_cpumask_var_node(&cfd->cpumask, GFP_KERNEL,
+ cpu_to_node(cpu)))
+ return NOTIFY_BAD;
+ break;
+
+#ifdef CONFIG_CPU_HOTPLUG
+ case CPU_UP_CANCELED:
+ case CPU_UP_CANCELED_FROZEN:
+
+ case CPU_DEAD:
+ case CPU_DEAD_FROZEN:
+ free_cpumask_var(cfd->cpumask);
+ break;
+#endif
+ };
+
+ return NOTIFY_OK;
+}
+
+static struct notifier_block __cpuinitdata hotplug_cfd_notifier = {
+ .notifier_call = hotplug_cfd,
};
static int __cpuinit init_call_single_data(void)
{
+ void *cpu = (void *)(long)smp_processor_id();
int i;
for_each_possible_cpu(i) {
@@ -44,29 +86,63 @@ static int __cpuinit init_call_single_data(void)
spin_lock_init(&q->lock);
INIT_LIST_HEAD(&q->list);
}
+
+ hotplug_cfd(&hotplug_cfd_notifier, CPU_UP_PREPARE, cpu);
+ register_cpu_notifier(&hotplug_cfd_notifier);
+
return 0;
}
early_initcall(init_call_single_data);
-static void csd_flag_wait(struct call_single_data *data)
+/*
+ * csd_lock/csd_unlock used to serialize access to per-cpu csd resources
+ *
+ * For non-synchronous ipi calls the csd can still be in use by the
+ * previous function call. For multi-cpu calls its even more interesting
+ * as we'll have to ensure no other cpu is observing our csd.
+ */
+static void csd_lock_wait(struct call_single_data *data)
{
- /* Wait for response */
- do {
- if (!(data->flags & CSD_FLAG_WAIT))
- break;
+ while (data->flags & CSD_FLAG_LOCK)
cpu_relax();
- } while (1);
+}
+
+static void csd_lock(struct call_single_data *data)
+{
+ csd_lock_wait(data);
+ data->flags = CSD_FLAG_LOCK;
+
+ /*
+ * prevent CPU from reordering the above assignment
+ * to ->flags with any subsequent assignments to other
+ * fields of the specified call_single_data structure:
+ */
+ smp_mb();
+}
+
+static void csd_unlock(struct call_single_data *data)
+{
+ WARN_ON(!(data->flags & CSD_FLAG_LOCK));
+
+ /*
+ * ensure we're all done before releasing data:
+ */
+ smp_mb();
+
+ data->flags &= ~CSD_FLAG_LOCK;
}
/*
- * Insert a previously allocated call_single_data element for execution
- * on the given CPU. data must already have ->func, ->info, and ->flags set.
+ * Insert a previously allocated call_single_data element
+ * for execution on the given CPU. data must already have
+ * ->func, ->info, and ->flags set.
*/
-static void generic_exec_single(int cpu, struct call_single_data *data)
+static
+void generic_exec_single(int cpu, struct call_single_data *data, int wait)
{
struct call_single_queue *dst = &per_cpu(call_single_queue, cpu);
- int wait = data->flags & CSD_FLAG_WAIT, ipi;
unsigned long flags;
+ int ipi;
spin_lock_irqsave(&dst->lock, flags);
ipi = list_empty(&dst->list);
@@ -74,24 +150,21 @@ static void generic_exec_single(int cpu, struct call_single_data *data)
spin_unlock_irqrestore(&dst->lock, flags);
/*
- * Make the list addition visible before sending the ipi.
+ * The list addition should be visible before sending the IPI
+ * handler locks the list to pull the entry off it because of
+ * normal cache coherency rules implied by spinlocks.
+ *
+ * If IPIs can go out of order to the cache coherency protocol
+ * in an architecture, sufficient synchronisation should be added
+ * to arch code to make it appear to obey cache coherency WRT
+ * locking and barrier primitives. Generic code isn't really
+ * equipped to do the right thing...
*/
- smp_mb();
-
if (ipi)
arch_send_call_function_single_ipi(cpu);
if (wait)
- csd_flag_wait(data);
-}
-
-static void rcu_free_call_data(struct rcu_head *head)
-{
- struct call_function_data *data;
-
- data = container_of(head, struct call_function_data, rcu_head);
-
- kfree(data);
+ csd_lock_wait(data);
}
/*
@@ -104,99 +177,83 @@ void generic_smp_call_function_interrupt(void)
int cpu = get_cpu();
/*
- * It's ok to use list_for_each_rcu() here even though we may delete
- * 'pos', since list_del_rcu() doesn't clear ->next
+ * Ensure entry is visible on call_function_queue after we have
+ * entered the IPI. See comment in smp_call_function_many.
+ * If we don't have this, then we may miss an entry on the list
+ * and never get another IPI to process it.
+ */
+ smp_mb();
+
+ /*
+ * It's ok to use list_for_each_rcu() here even though we may
+ * delete 'pos', since list_del_rcu() doesn't clear ->next
*/
- rcu_read_lock();
- list_for_each_entry_rcu(data, &call_function_queue, csd.list) {
+ list_for_each_entry_rcu(data, &call_function.queue, csd.list) {
int refs;
- if (!cpumask_test_cpu(cpu, to_cpumask(data->cpumask_bits)))
+ spin_lock(&data->lock);
+ if (!cpumask_test_cpu(cpu, data->cpumask)) {
+ spin_unlock(&data->lock);
continue;
+ }
+ cpumask_clear_cpu(cpu, data->cpumask);
+ spin_unlock(&data->lock);
data->csd.func(data->csd.info);
spin_lock(&data->lock);
- cpumask_clear_cpu(cpu, to_cpumask(data->cpumask_bits));
WARN_ON(data->refs == 0);
- data->refs--;
- refs = data->refs;
+ refs = --data->refs;
+ if (!refs) {
+ spin_lock(&call_function.lock);
+ list_del_rcu(&data->csd.list);
+ spin_unlock(&call_function.lock);
+ }
spin_unlock(&data->lock);
if (refs)
continue;
- spin_lock(&call_function_lock);
- list_del_rcu(&data->csd.list);
- spin_unlock(&call_function_lock);
-
- if (data->csd.flags & CSD_FLAG_WAIT) {
- /*
- * serialize stores to data with the flag clear
- * and wakeup
- */
- smp_wmb();
- data->csd.flags &= ~CSD_FLAG_WAIT;
- }
- if (data->csd.flags & CSD_FLAG_ALLOC)
- call_rcu(&data->rcu_head, rcu_free_call_data);
+ csd_unlock(&data->csd);
}
- rcu_read_unlock();
put_cpu();
}
/*
- * Invoked by arch to handle an IPI for call function single. Must be called
- * from the arch with interrupts disabled.
+ * Invoked by arch to handle an IPI for call function single. Must be
+ * called from the arch with interrupts disabled.
*/
void generic_smp_call_function_single_interrupt(void)
{
struct call_single_queue *q = &__get_cpu_var(call_single_queue);
+ unsigned int data_flags;
LIST_HEAD(list);
- /*
- * Need to see other stores to list head for checking whether
- * list is empty without holding q->lock
- */
- smp_read_barrier_depends();
- while (!list_empty(&q->list)) {
- unsigned int data_flags;
-
- spin_lock(&q->lock);
- list_replace_init(&q->list, &list);
- spin_unlock(&q->lock);
-
- while (!list_empty(&list)) {
- struct call_single_data *data;
-
- data = list_entry(list.next, struct call_single_data,
- list);
- list_del(&data->list);
-
- /*
- * 'data' can be invalid after this call if
- * flags == 0 (when called through
- * generic_exec_single(), so save them away before
- * making the call.
- */
- data_flags = data->flags;
-
- data->func(data->info);
-
- if (data_flags & CSD_FLAG_WAIT) {
- smp_wmb();
- data->flags &= ~CSD_FLAG_WAIT;
- } else if (data_flags & CSD_FLAG_LOCK) {
- smp_wmb();
- data->flags &= ~CSD_FLAG_LOCK;
- } else if (data_flags & CSD_FLAG_ALLOC)
- kfree(data);
- }
+ spin_lock(&q->lock);
+ list_replace_init(&q->list, &list);
+ spin_unlock(&q->lock);
+
+ while (!list_empty(&list)) {
+ struct call_single_data *data;
+
+ data = list_entry(list.next, struct call_single_data, list);
+ list_del(&data->list);
+
+ /*
+ * 'data' can be invalid after this call if flags == 0
+ * (when called through generic_exec_single()),
+ * so save them away before making the call:
+ */
+ data_flags = data->flags;
+
+ data->func(data->info);
+
/*
- * See comment on outer loop
+ * Unlocked CSDs are valid through generic_exec_single():
*/
- smp_read_barrier_depends();
+ if (data_flags & CSD_FLAG_LOCK)
+ csd_unlock(data);
}
}
@@ -215,65 +272,45 @@ static DEFINE_PER_CPU(struct call_single_data, csd_data);
int smp_call_function_single(int cpu, void (*func) (void *info), void *info,
int wait)
{
- struct call_single_data d;
+ struct call_single_data d = {
+ .flags = 0,
+ };
unsigned long flags;
- /* prevent preemption and reschedule on another processor,
- as well as CPU removal */
- int me = get_cpu();
+ int this_cpu;
int err = 0;
+ /*
+ * prevent preemption and reschedule on another processor,
+ * as well as CPU removal
+ */
+ this_cpu = get_cpu();
+
/* Can deadlock when called with interrupts disabled */
- WARN_ON(irqs_disabled());
+ WARN_ON_ONCE(irqs_disabled() && !oops_in_progress);
- if (cpu == me) {
+ if (cpu == this_cpu) {
local_irq_save(flags);
func(info);
local_irq_restore(flags);
- } else if ((unsigned)cpu < nr_cpu_ids && cpu_online(cpu)) {
- struct call_single_data *data;
+ } else {
+ if ((unsigned)cpu < nr_cpu_ids && cpu_online(cpu)) {
+ struct call_single_data *data = &d;
+
+ if (!wait)
+ data = &__get_cpu_var(csd_data);
- if (!wait) {
- /*
- * We are calling a function on a single CPU
- * and we are not going to wait for it to finish.
- * We first try to allocate the data, but if we
- * fail, we fall back to use a per cpu data to pass
- * the information to that CPU. Since all callers
- * of this code will use the same data, we must
- * synchronize the callers to prevent a new caller
- * from corrupting the data before the callee
- * can access it.
- *
- * The CSD_FLAG_LOCK is used to let us know when
- * the IPI handler is done with the data.
- * The first caller will set it, and the callee
- * will clear it. The next caller must wait for
- * it to clear before we set it again. This
- * will make sure the callee is done with the
- * data before a new caller will use it.
- */
- data = kmalloc(sizeof(*data), GFP_ATOMIC);
- if (data)
- data->flags = CSD_FLAG_ALLOC;
- else {
- data = &per_cpu(csd_data, me);
- while (data->flags & CSD_FLAG_LOCK)
- cpu_relax();
- data->flags = CSD_FLAG_LOCK;
- }
+ csd_lock(data);
+
+ data->func = func;
+ data->info = info;
+ generic_exec_single(cpu, data, wait);
} else {
- data = &d;
- data->flags = CSD_FLAG_WAIT;
+ err = -ENXIO; /* CPU not online */
}
-
- data->func = func;
- data->info = info;
- generic_exec_single(cpu, data);
- } else {
- err = -ENXIO; /* CPU not online */
}
put_cpu();
+
return err;
}
EXPORT_SYMBOL(smp_call_function_single);
@@ -283,23 +320,26 @@ EXPORT_SYMBOL(smp_call_function_single);
* @cpu: The CPU to run on.
* @data: Pre-allocated and setup data structure
*
- * Like smp_call_function_single(), but allow caller to pass in a pre-allocated
- * data structure. Useful for embedding @data inside other structures, for
- * instance.
- *
+ * Like smp_call_function_single(), but allow caller to pass in a
+ * pre-allocated data structure. Useful for embedding @data inside
+ * other structures, for instance.
*/
-void __smp_call_function_single(int cpu, struct call_single_data *data)
+void __smp_call_function_single(int cpu, struct call_single_data *data,
+ int wait)
{
+ csd_lock(data);
+
/* Can deadlock when called with interrupts disabled */
- WARN_ON((data->flags & CSD_FLAG_WAIT) && irqs_disabled());
+ WARN_ON_ONCE(wait && irqs_disabled() && !oops_in_progress);
- generic_exec_single(cpu, data);
+ generic_exec_single(cpu, data, wait);
}
-/* FIXME: Shim for archs using old arch_send_call_function_ipi API. */
+/* Deprecated: shim for archs using old arch_send_call_function_ipi API. */
+
#ifndef arch_send_call_function_ipi_mask
-#define arch_send_call_function_ipi_mask(maskp) \
- arch_send_call_function_ipi(*(maskp))
+# define arch_send_call_function_ipi_mask(maskp) \
+ arch_send_call_function_ipi(*(maskp))
#endif
/**
@@ -307,7 +347,8 @@ void __smp_call_function_single(int cpu, struct call_single_data *data)
* @mask: The set of cpus to run on (only runs on online subset).
* @func: The function to run. This must be fast and non-blocking.
* @info: An arbitrary pointer to pass to the function.
- * @wait: If true, wait (atomically) until function has completed on other CPUs.
+ * @wait: If true, wait (atomically) until function has completed
+ * on other CPUs.
*
* If @wait is true, then returns once @func has returned. Note that @wait
* will be implicitly turned on in case of allocation failures, since
@@ -318,27 +359,27 @@ void __smp_call_function_single(int cpu, struct call_single_data *data)
* must be disabled when calling this function.
*/
void smp_call_function_many(const struct cpumask *mask,
- void (*func)(void *), void *info,
- bool wait)
+ void (*func)(void *), void *info, bool wait)
{
struct call_function_data *data;
unsigned long flags;
- int cpu, next_cpu;
+ int cpu, next_cpu, this_cpu = smp_processor_id();
/* Can deadlock when called with interrupts disabled */
- WARN_ON(irqs_disabled());
+ WARN_ON_ONCE(irqs_disabled() && !oops_in_progress);
- /* So, what's a CPU they want? Ignoring this one. */
+ /* So, what's a CPU they want? Ignoring this one. */
cpu = cpumask_first_and(mask, cpu_online_mask);
- if (cpu == smp_processor_id())
+ if (cpu == this_cpu)
cpu = cpumask_next_and(cpu, mask, cpu_online_mask);
+
/* No online cpus? We're done. */
if (cpu >= nr_cpu_ids)
return;
/* Do we have another CPU which isn't us? */
next_cpu = cpumask_next_and(cpu, mask, cpu_online_mask);
- if (next_cpu == smp_processor_id())
+ if (next_cpu == this_cpu)
next_cpu = cpumask_next_and(next_cpu, mask, cpu_online_mask);
/* Fastpath: do that cpu by itself. */
@@ -347,43 +388,40 @@ void smp_call_function_many(const struct cpumask *mask,
return;
}
- data = kmalloc(sizeof(*data) + cpumask_size(), GFP_ATOMIC);
- if (unlikely(!data)) {
- /* Slow path. */
- for_each_online_cpu(cpu) {
- if (cpu == smp_processor_id())
- continue;
- if (cpumask_test_cpu(cpu, mask))
- smp_call_function_single(cpu, func, info, wait);
- }
- return;
- }
+ data = &__get_cpu_var(cfd_data);
+ csd_lock(&data->csd);
- spin_lock_init(&data->lock);
- data->csd.flags = CSD_FLAG_ALLOC;
- if (wait)
- data->csd.flags |= CSD_FLAG_WAIT;
+ spin_lock_irqsave(&data->lock, flags);
data->csd.func = func;
data->csd.info = info;
- cpumask_and(to_cpumask(data->cpumask_bits), mask, cpu_online_mask);
- cpumask_clear_cpu(smp_processor_id(), to_cpumask(data->cpumask_bits));
- data->refs = cpumask_weight(to_cpumask(data->cpumask_bits));
+ cpumask_and(data->cpumask, mask, cpu_online_mask);
+ cpumask_clear_cpu(this_cpu, data->cpumask);
+ data->refs = cpumask_weight(data->cpumask);
- spin_lock_irqsave(&call_function_lock, flags);
- list_add_tail_rcu(&data->csd.list, &call_function_queue);
- spin_unlock_irqrestore(&call_function_lock, flags);
+ spin_lock(&call_function.lock);
+ /*
+ * Place entry at the _HEAD_ of the list, so that any cpu still
+ * observing the entry in generic_smp_call_function_interrupt()
+ * will not miss any other list entries:
+ */
+ list_add_rcu(&data->csd.list, &call_function.queue);
+ spin_unlock(&call_function.lock);
+
+ spin_unlock_irqrestore(&data->lock, flags);
/*
* Make the list addition visible before sending the ipi.
+ * (IPIs must obey or appear to obey normal Linux cache
+ * coherency rules -- see comment in generic_exec_single).
*/
smp_mb();
/* Send a message to all CPUs in the map */
- arch_send_call_function_ipi_mask(to_cpumask(data->cpumask_bits));
+ arch_send_call_function_ipi_mask(data->cpumask);
- /* optionally wait for the CPUs to complete */
+ /* Optionally wait for the CPUs to complete */
if (wait)
- csd_flag_wait(&data->csd);
+ csd_lock_wait(&data->csd);
}
EXPORT_SYMBOL(smp_call_function_many);
@@ -391,7 +429,8 @@ EXPORT_SYMBOL(smp_call_function_many);
* smp_call_function(): Run a function on all other CPUs.
* @func: The function to run. This must be fast and non-blocking.
* @info: An arbitrary pointer to pass to the function.
- * @wait: If true, wait (atomically) until function has completed on other CPUs.
+ * @wait: If true, wait (atomically) until function has completed
+ * on other CPUs.
*
* Returns 0.
*
@@ -407,26 +446,27 @@ int smp_call_function(void (*func)(void *), void *info, int wait)
preempt_disable();
smp_call_function_many(cpu_online_mask, func, info, wait);
preempt_enable();
+
return 0;
}
EXPORT_SYMBOL(smp_call_function);
void ipi_call_lock(void)
{
- spin_lock(&call_function_lock);
+ spin_lock(&call_function.lock);
}
void ipi_call_unlock(void)
{
- spin_unlock(&call_function_lock);
+ spin_unlock(&call_function.lock);
}
void ipi_call_lock_irq(void)
{
- spin_lock_irq(&call_function_lock);
+ spin_lock_irq(&call_function.lock);
}
void ipi_call_unlock_irq(void)
{
- spin_unlock_irq(&call_function_lock);
+ spin_unlock_irq(&call_function.lock);
}
diff --git a/kernel/softirq.c b/kernel/softirq.c
index 57d3f67f6f38..b525dd348511 100644
--- a/kernel/softirq.c
+++ b/kernel/softirq.c
@@ -21,8 +21,10 @@
#include <linux/freezer.h>
#include <linux/kthread.h>
#include <linux/rcupdate.h>
+#include <linux/ftrace.h>
#include <linux/smp.h>
#include <linux/tick.h>
+#include <trace/irq.h>
#include <asm/irq.h>
/*
@@ -52,13 +54,18 @@ static struct softirq_action softirq_vec[NR_SOFTIRQS] __cacheline_aligned_in_smp
static DEFINE_PER_CPU(struct task_struct *, ksoftirqd);
+char *softirq_to_name[NR_SOFTIRQS] = {
+ "HI", "TIMER", "NET_TX", "NET_RX", "BLOCK",
+ "TASKLET", "SCHED", "HRTIMER", "RCU"
+};
+
/*
* we cannot loop indefinitely here to avoid userspace starvation,
* but we also don't want to introduce a worst case 1/HZ latency
* to the pending events, so lets the scheduler to balance
* the softirq load for us.
*/
-static inline void wakeup_softirqd(void)
+void wakeup_softirqd(void)
{
/* Interrupts are disabled: no need to stop preemption */
struct task_struct *tsk = __get_cpu_var(ksoftirqd);
@@ -79,13 +86,23 @@ static void __local_bh_disable(unsigned long ip)
WARN_ON_ONCE(in_irq());
raw_local_irq_save(flags);
- add_preempt_count(SOFTIRQ_OFFSET);
+ /*
+ * The preempt tracer hooks into add_preempt_count and will break
+ * lockdep because it calls back into lockdep after SOFTIRQ_OFFSET
+ * is set and before current->softirq_enabled is cleared.
+ * We must manually increment preempt_count here and manually
+ * call the trace_preempt_off later.
+ */
+ preempt_count() += SOFTIRQ_OFFSET;
/*
* Were softirqs turned off above:
*/
if (softirq_count() == SOFTIRQ_OFFSET)
trace_softirqs_off(ip);
raw_local_irq_restore(flags);
+
+ if (preempt_count() == SOFTIRQ_OFFSET)
+ trace_preempt_off(CALLER_ADDR0, get_parent_ip(CALLER_ADDR1));
}
#else /* !CONFIG_TRACE_IRQFLAGS */
static inline void __local_bh_disable(unsigned long ip)
@@ -169,6 +186,9 @@ EXPORT_SYMBOL(local_bh_enable_ip);
*/
#define MAX_SOFTIRQ_RESTART 10
+DEFINE_TRACE(softirq_entry);
+DEFINE_TRACE(softirq_exit);
+
asmlinkage void __do_softirq(void)
{
struct softirq_action *h;
@@ -180,7 +200,7 @@ asmlinkage void __do_softirq(void)
account_system_vtime(current);
__local_bh_disable((unsigned long)__builtin_return_address(0));
- trace_softirq_enter();
+ lockdep_softirq_enter();
cpu = smp_processor_id();
restart:
@@ -195,12 +215,14 @@ restart:
if (pending & 1) {
int prev_count = preempt_count();
+ trace_softirq_entry(h, softirq_vec);
h->action(h);
-
+ trace_softirq_exit(h, softirq_vec);
if (unlikely(prev_count != preempt_count())) {
- printk(KERN_ERR "huh, entered softirq %td %p"
+ printk(KERN_ERR "huh, entered softirq %td %s %p"
"with preempt_count %08x,"
" exited with %08x?\n", h - softirq_vec,
+ softirq_to_name[h - softirq_vec],
h->action, prev_count, preempt_count());
preempt_count() = prev_count;
}
@@ -220,7 +242,7 @@ restart:
if (pending)
wakeup_softirqd();
- trace_softirq_exit();
+ lockdep_softirq_exit();
account_system_vtime(current);
_local_bh_enable();
@@ -450,9 +472,9 @@ void tasklet_kill(struct tasklet_struct *t)
printk("Attempt to kill tasklet from interrupt\n");
while (test_and_set_bit(TASKLET_STATE_SCHED, &t->state)) {
- do
+ do {
yield();
- while (test_bit(TASKLET_STATE_SCHED, &t->state));
+ } while (test_bit(TASKLET_STATE_SCHED, &t->state));
}
tasklet_unlock_wait(t);
clear_bit(TASKLET_STATE_SCHED, &t->state);
@@ -496,7 +518,7 @@ static int __try_remote_softirq(struct call_single_data *cp, int cpu, int softir
cp->flags = 0;
cp->priv = softirq;
- __smp_call_function_single(cpu, cp);
+ __smp_call_function_single(cpu, cp, 0);
return 0;
}
return 1;
diff --git a/kernel/softlockup.c b/kernel/softlockup.c
index 85d5a2455103..88796c330838 100644
--- a/kernel/softlockup.c
+++ b/kernel/softlockup.c
@@ -166,97 +166,11 @@ void softlockup_tick(void)
}
/*
- * Have a reasonable limit on the number of tasks checked:
- */
-unsigned long __read_mostly sysctl_hung_task_check_count = 1024;
-
-/*
- * Zero means infinite timeout - no checking done:
- */
-unsigned long __read_mostly sysctl_hung_task_timeout_secs = 480;
-
-unsigned long __read_mostly sysctl_hung_task_warnings = 10;
-
-/*
- * Only do the hung-tasks check on one CPU:
- */
-static int check_cpu __read_mostly = -1;
-
-static void check_hung_task(struct task_struct *t, unsigned long now)
-{
- unsigned long switch_count = t->nvcsw + t->nivcsw;
-
- if (t->flags & PF_FROZEN)
- return;
-
- if (switch_count != t->last_switch_count || !t->last_switch_timestamp) {
- t->last_switch_count = switch_count;
- t->last_switch_timestamp = now;
- return;
- }
- if ((long)(now - t->last_switch_timestamp) <
- sysctl_hung_task_timeout_secs)
- return;
- if (!sysctl_hung_task_warnings)
- return;
- sysctl_hung_task_warnings--;
-
- /*
- * Ok, the task did not get scheduled for more than 2 minutes,
- * complain:
- */
- printk(KERN_ERR "INFO: task %s:%d blocked for more than "
- "%ld seconds.\n", t->comm, t->pid,
- sysctl_hung_task_timeout_secs);
- printk(KERN_ERR "\"echo 0 > /proc/sys/kernel/hung_task_timeout_secs\""
- " disables this message.\n");
- sched_show_task(t);
- __debug_show_held_locks(t);
-
- t->last_switch_timestamp = now;
- touch_nmi_watchdog();
-
- if (softlockup_panic)
- panic("softlockup: blocked tasks");
-}
-
-/*
- * Check whether a TASK_UNINTERRUPTIBLE does not get woken up for
- * a really long time (120 seconds). If that happens, print out
- * a warning.
- */
-static void check_hung_uninterruptible_tasks(int this_cpu)
-{
- int max_count = sysctl_hung_task_check_count;
- unsigned long now = get_timestamp(this_cpu);
- struct task_struct *g, *t;
-
- /*
- * If the system crashed already then all bets are off,
- * do not report extra hung tasks:
- */
- if (test_taint(TAINT_DIE) || did_panic)
- return;
-
- read_lock(&tasklist_lock);
- do_each_thread(g, t) {
- if (!--max_count)
- goto unlock;
- /* use "==" to skip the TASK_KILLABLE tasks waiting on NFS */
- if (t->state == TASK_UNINTERRUPTIBLE)
- check_hung_task(t, now);
- } while_each_thread(g, t);
- unlock:
- read_unlock(&tasklist_lock);
-}
-
-/*
* The watchdog thread - runs every second and touches the timestamp.
*/
static int watchdog(void *__bind_cpu)
{
struct sched_param param = { .sched_priority = MAX_RT_PRIO-1 };
- int this_cpu = (long)__bind_cpu;
sched_setscheduler(current, SCHED_FIFO, &param);
@@ -276,11 +190,6 @@ static int watchdog(void *__bind_cpu)
if (kthread_should_stop())
break;
- if (this_cpu == check_cpu) {
- if (sysctl_hung_task_timeout_secs)
- check_hung_uninterruptible_tasks(this_cpu);
- }
-
set_current_state(TASK_INTERRUPTIBLE);
}
__set_current_state(TASK_RUNNING);
@@ -312,18 +221,9 @@ cpu_callback(struct notifier_block *nfb, unsigned long action, void *hcpu)
break;
case CPU_ONLINE:
case CPU_ONLINE_FROZEN:
- check_cpu = cpumask_any(cpu_online_mask);
wake_up_process(per_cpu(watchdog_task, hotcpu));
break;
#ifdef CONFIG_HOTPLUG_CPU
- case CPU_DOWN_PREPARE:
- case CPU_DOWN_PREPARE_FROZEN:
- if (hotcpu == check_cpu) {
- /* Pick any other online cpu. */
- check_cpu = cpumask_any_but(cpu_online_mask, hotcpu);
- }
- break;
-
case CPU_UP_CANCELED:
case CPU_UP_CANCELED_FROZEN:
if (!per_cpu(watchdog_task, hotcpu))
diff --git a/kernel/spinlock.c b/kernel/spinlock.c
index 29ab20749dd3..7932653c4ebd 100644
--- a/kernel/spinlock.c
+++ b/kernel/spinlock.c
@@ -121,7 +121,8 @@ unsigned long __lockfunc _read_lock_irqsave(rwlock_t *lock)
local_irq_save(flags);
preempt_disable();
rwlock_acquire_read(&lock->dep_map, 0, 0, _RET_IP_);
- LOCK_CONTENDED(lock, _raw_read_trylock, _raw_read_lock);
+ LOCK_CONTENDED_FLAGS(lock, _raw_read_trylock, _raw_read_lock,
+ _raw_read_lock_flags, &flags);
return flags;
}
EXPORT_SYMBOL(_read_lock_irqsave);
@@ -151,7 +152,8 @@ unsigned long __lockfunc _write_lock_irqsave(rwlock_t *lock)
local_irq_save(flags);
preempt_disable();
rwlock_acquire(&lock->dep_map, 0, 0, _RET_IP_);
- LOCK_CONTENDED(lock, _raw_write_trylock, _raw_write_lock);
+ LOCK_CONTENDED_FLAGS(lock, _raw_write_trylock, _raw_write_lock,
+ _raw_write_lock_flags, &flags);
return flags;
}
EXPORT_SYMBOL(_write_lock_irqsave);
@@ -299,16 +301,8 @@ unsigned long __lockfunc _spin_lock_irqsave_nested(spinlock_t *lock, int subclas
local_irq_save(flags);
preempt_disable();
spin_acquire(&lock->dep_map, subclass, 0, _RET_IP_);
- /*
- * On lockdep we dont want the hand-coded irq-enable of
- * _raw_spin_lock_flags() code, because lockdep assumes
- * that interrupts are not re-enabled during lock-acquire:
- */
-#ifdef CONFIG_LOCKDEP
- LOCK_CONTENDED(lock, _raw_spin_trylock, _raw_spin_lock);
-#else
- _raw_spin_lock_flags(lock, &flags);
-#endif
+ LOCK_CONTENDED_FLAGS(lock, _raw_spin_trylock, _raw_spin_lock,
+ _raw_spin_lock_flags, &flags);
return flags;
}
EXPORT_SYMBOL(_spin_lock_irqsave_nested);
diff --git a/kernel/stop_machine.c b/kernel/stop_machine.c
index 74541ca49536..912823e2a11b 100644
--- a/kernel/stop_machine.c
+++ b/kernel/stop_machine.c
@@ -44,7 +44,7 @@ static DEFINE_MUTEX(setup_lock);
static int refcount;
static struct workqueue_struct *stop_machine_wq;
static struct stop_machine_data active, idle;
-static const cpumask_t *active_cpus;
+static const struct cpumask *active_cpus;
static void *stop_machine_work;
static void set_state(enum stopmachine_state newstate)
diff --git a/kernel/sys.c b/kernel/sys.c
index 37f458e6882a..e7998cf31498 100644
--- a/kernel/sys.c
+++ b/kernel/sys.c
@@ -34,6 +34,7 @@
#include <linux/seccomp.h>
#include <linux/cpu.h>
#include <linux/ptrace.h>
+#include <linux/fs_struct.h>
#include <linux/compat.h>
#include <linux/syscalls.h>
@@ -359,6 +360,7 @@ SYSCALL_DEFINE4(reboot, int, magic1, int, magic2, unsigned int, cmd,
void __user *, arg)
{
char buffer[256];
+ int ret = 0;
/* We only trust the superuser with rebooting the system. */
if (!capable(CAP_SYS_BOOT))
@@ -396,7 +398,7 @@ SYSCALL_DEFINE4(reboot, int, magic1, int, magic2, unsigned int, cmd,
kernel_halt();
unlock_kernel();
do_exit(0);
- break;
+ panic("cannot halt");
case LINUX_REBOOT_CMD_POWER_OFF:
kernel_power_off();
@@ -416,29 +418,22 @@ SYSCALL_DEFINE4(reboot, int, magic1, int, magic2, unsigned int, cmd,
#ifdef CONFIG_KEXEC
case LINUX_REBOOT_CMD_KEXEC:
- {
- int ret;
- ret = kernel_kexec();
- unlock_kernel();
- return ret;
- }
+ ret = kernel_kexec();
+ break;
#endif
#ifdef CONFIG_HIBERNATION
case LINUX_REBOOT_CMD_SW_SUSPEND:
- {
- int ret = hibernate();
- unlock_kernel();
- return ret;
- }
+ ret = hibernate();
+ break;
#endif
default:
- unlock_kernel();
- return -EINVAL;
+ ret = -EINVAL;
+ break;
}
unlock_kernel();
- return 0;
+ return ret;
}
static void deferred_cad(struct work_struct *dummy)
@@ -1013,10 +1008,8 @@ SYSCALL_DEFINE2(setpgid, pid_t, pid, pid_t, pgid)
if (err)
goto out;
- if (task_pgrp(p) != pgrp) {
+ if (task_pgrp(p) != pgrp)
change_pid(p, PIDTYPE_PGID, pgrp);
- set_task_pgrp(p, pid_nr(pgrp));
- }
err = 0;
out:
diff --git a/kernel/sysctl.c b/kernel/sysctl.c
index 2fb4246d27de..98ecf35d5bf2 100644
--- a/kernel/sysctl.c
+++ b/kernel/sysctl.c
@@ -48,6 +48,7 @@
#include <linux/acpi.h>
#include <linux/reboot.h>
#include <linux/ftrace.h>
+#include <linux/slow-work.h>
#include <asm/uaccess.h>
#include <asm/processor.h>
@@ -95,14 +96,15 @@ static int sixty = 60;
static int neg_one = -1;
#endif
-#if defined(CONFIG_MMU) && defined(CONFIG_FILE_LOCKING)
-static int two = 2;
-#endif
-
static int zero;
-static int one = 1;
+static int __maybe_unused one = 1;
+static int __maybe_unused two = 2;
static unsigned long one_ul = 1;
static int one_hundred = 100;
+static int one_thousand = 1000;
+
+/* this is needed for the proc_doulongvec_minmax of vm_dirty_bytes */
+static unsigned long dirty_bytes_min = 2 * PAGE_SIZE;
/* this is needed for the proc_dointvec_minmax for [fs_]overflow UID and GID */
static int maxolduid = 65535;
@@ -827,6 +829,19 @@ static struct ctl_table kern_table[] = {
.extra1 = &neg_one,
.extra2 = &sixty,
},
+#endif
+#ifdef CONFIG_DETECT_HUNG_TASK
+ {
+ .ctl_name = CTL_UNNUMBERED,
+ .procname = "hung_task_panic",
+ .data = &sysctl_hung_task_panic,
+ .maxlen = sizeof(int),
+ .mode = 0644,
+ .proc_handler = &proc_dointvec_minmax,
+ .strategy = &sysctl_intvec,
+ .extra1 = &zero,
+ .extra2 = &one,
+ },
{
.ctl_name = CTL_UNNUMBERED,
.procname = "hung_task_check_count",
@@ -842,7 +857,7 @@ static struct ctl_table kern_table[] = {
.data = &sysctl_hung_task_timeout_secs,
.maxlen = sizeof(unsigned long),
.mode = 0644,
- .proc_handler = &proc_doulongvec_minmax,
+ .proc_handler = &proc_dohung_task_timeout_secs,
.strategy = &sysctl_intvec,
},
{
@@ -902,14 +917,12 @@ static struct ctl_table kern_table[] = {
.proc_handler = &proc_dointvec,
},
#endif
-#ifdef CONFIG_UNEVICTABLE_LRU
+#ifdef CONFIG_SLOW_WORK
{
.ctl_name = CTL_UNNUMBERED,
- .procname = "scan_unevictable_pages",
- .data = &scan_unevictable_pages,
- .maxlen = sizeof(scan_unevictable_pages),
- .mode = 0644,
- .proc_handler = &scan_unevictable_handler,
+ .procname = "slow-work",
+ .mode = 0555,
+ .child = slow_work_sysctls,
},
#endif
/*
@@ -1008,7 +1021,7 @@ static struct ctl_table vm_table[] = {
.mode = 0644,
.proc_handler = &dirty_bytes_handler,
.strategy = &sysctl_intvec,
- .extra1 = &one_ul,
+ .extra1 = &dirty_bytes_min,
},
{
.procname = "dirty_writeback_centisecs",
@@ -1022,7 +1035,7 @@ static struct ctl_table vm_table[] = {
.data = &dirty_expire_interval,
.maxlen = sizeof(dirty_expire_interval),
.mode = 0644,
- .proc_handler = &proc_dointvec_userhz_jiffies,
+ .proc_handler = &proc_dointvec,
},
{
.ctl_name = VM_NR_PDFLUSH_THREADS,
@@ -1033,6 +1046,28 @@ static struct ctl_table vm_table[] = {
.proc_handler = &proc_dointvec,
},
{
+ .ctl_name = CTL_UNNUMBERED,
+ .procname = "nr_pdflush_threads_min",
+ .data = &nr_pdflush_threads_min,
+ .maxlen = sizeof nr_pdflush_threads_min,
+ .mode = 0644 /* read-write */,
+ .proc_handler = &proc_dointvec_minmax,
+ .strategy = &sysctl_intvec,
+ .extra1 = &one,
+ .extra2 = &nr_pdflush_threads_max,
+ },
+ {
+ .ctl_name = CTL_UNNUMBERED,
+ .procname = "nr_pdflush_threads_max",
+ .data = &nr_pdflush_threads_max,
+ .maxlen = sizeof nr_pdflush_threads_max,
+ .mode = 0644 /* read-write */,
+ .proc_handler = &proc_dointvec_minmax,
+ .strategy = &sysctl_intvec,
+ .extra1 = &nr_pdflush_threads_min,
+ .extra2 = &one_thousand,
+ },
+ {
.ctl_name = VM_SWAPPINESS,
.procname = "swappiness",
.data = &vm_swappiness,
@@ -1272,6 +1307,16 @@ static struct ctl_table vm_table[] = {
.extra2 = &one,
},
#endif
+#ifdef CONFIG_UNEVICTABLE_LRU
+ {
+ .ctl_name = CTL_UNNUMBERED,
+ .procname = "scan_unevictable_pages",
+ .data = &scan_unevictable_pages,
+ .maxlen = sizeof(scan_unevictable_pages),
+ .mode = 0644,
+ .proc_handler = &scan_unevictable_handler,
+ },
+#endif
/*
* NOTE: do not add new entries to this table unless you have read
* Documentation/sysctl/ctl_unnumbered.txt
@@ -1385,10 +1430,7 @@ static struct ctl_table fs_table[] = {
.data = &lease_break_time,
.maxlen = sizeof(int),
.mode = 0644,
- .proc_handler = &proc_dointvec_minmax,
- .strategy = &sysctl_intvec,
- .extra1 = &zero,
- .extra2 = &two,
+ .proc_handler = &proc_dointvec,
},
#endif
#ifdef CONFIG_AIO
@@ -1429,7 +1471,10 @@ static struct ctl_table fs_table[] = {
.data = &suid_dumpable,
.maxlen = sizeof(int),
.mode = 0644,
- .proc_handler = &proc_dointvec,
+ .proc_handler = &proc_dointvec_minmax,
+ .strategy = &sysctl_intvec,
+ .extra1 = &zero,
+ .extra2 = &two,
},
#if defined(CONFIG_BINFMT_MISC) || defined(CONFIG_BINFMT_MISC_MODULE)
{
diff --git a/kernel/time/clocksource.c b/kernel/time/clocksource.c
index c46c931a7fe7..ecfd7b5187e0 100644
--- a/kernel/time/clocksource.c
+++ b/kernel/time/clocksource.c
@@ -181,12 +181,12 @@ static void clocksource_watchdog(unsigned long data)
resumed = test_and_clear_bit(0, &watchdog_resumed);
- wdnow = watchdog->read();
+ wdnow = watchdog->read(watchdog);
wd_nsec = cyc2ns(watchdog, (wdnow - watchdog_last) & watchdog->mask);
watchdog_last = wdnow;
list_for_each_entry_safe(cs, tmp, &watchdog_list, wd_list) {
- csnow = cs->read();
+ csnow = cs->read(cs);
if (unlikely(resumed)) {
cs->wd_last = csnow;
@@ -247,7 +247,7 @@ static void clocksource_check_watchdog(struct clocksource *cs)
list_add(&cs->wd_list, &watchdog_list);
if (!started && watchdog) {
- watchdog_last = watchdog->read();
+ watchdog_last = watchdog->read(watchdog);
watchdog_timer.expires = jiffies + WATCHDOG_INTERVAL;
add_timer_on(&watchdog_timer,
cpumask_first(cpu_online_mask));
@@ -268,7 +268,7 @@ static void clocksource_check_watchdog(struct clocksource *cs)
cse->flags &= ~CLOCK_SOURCE_WATCHDOG;
/* Start if list is not empty */
if (!list_empty(&watchdog_list)) {
- watchdog_last = watchdog->read();
+ watchdog_last = watchdog->read(watchdog);
watchdog_timer.expires =
jiffies + WATCHDOG_INTERVAL;
add_timer_on(&watchdog_timer,
diff --git a/kernel/time/jiffies.c b/kernel/time/jiffies.c
index 06f197560f3b..c3f6c30816e3 100644
--- a/kernel/time/jiffies.c
+++ b/kernel/time/jiffies.c
@@ -50,7 +50,7 @@
*/
#define JIFFIES_SHIFT 8
-static cycle_t jiffies_read(void)
+static cycle_t jiffies_read(struct clocksource *cs)
{
return (cycle_t) jiffies;
}
diff --git a/kernel/time/tick-common.c b/kernel/time/tick-common.c
index 21a5ca849514..83c4417b6a3c 100644
--- a/kernel/time/tick-common.c
+++ b/kernel/time/tick-common.c
@@ -93,7 +93,17 @@ void tick_handle_periodic(struct clock_event_device *dev)
for (;;) {
if (!clockevents_program_event(dev, next, ktime_get()))
return;
- tick_periodic(cpu);
+ /*
+ * Have to be careful here. If we're in oneshot mode,
+ * before we call tick_periodic() in a loop, we need
+ * to be sure we're using a real hardware clocksource.
+ * Otherwise we could get trapped in an infinite
+ * loop, as the tick_periodic() increments jiffies,
+ * when then will increment time, posibly causing
+ * the loop to trigger again and again.
+ */
+ if (timekeeping_valid_for_hres())
+ tick_periodic(cpu);
next = ktime_add(next, tick_period);
}
}
diff --git a/kernel/time/timekeeping.c b/kernel/time/timekeeping.c
index 900f1b6598d1..687dff49f6e7 100644
--- a/kernel/time/timekeeping.c
+++ b/kernel/time/timekeeping.c
@@ -182,7 +182,7 @@ EXPORT_SYMBOL(do_settimeofday);
*/
static void change_clocksource(void)
{
- struct clocksource *new;
+ struct clocksource *new, *old;
new = clocksource_get_next();
@@ -191,11 +191,16 @@ static void change_clocksource(void)
clocksource_forward_now();
- new->raw_time = clock->raw_time;
+ if (clocksource_enable(new))
+ return;
+ new->raw_time = clock->raw_time;
+ old = clock;
clock = new;
+ clocksource_disable(old);
+
clock->cycle_last = 0;
- clock->cycle_last = clocksource_read(new);
+ clock->cycle_last = clocksource_read(clock);
clock->error = 0;
clock->xtime_nsec = 0;
clocksource_calculate_interval(clock, NTP_INTERVAL_LENGTH);
@@ -292,6 +297,7 @@ void __init timekeeping_init(void)
ntp_init();
clock = clocksource_get_next();
+ clocksource_enable(clock);
clocksource_calculate_interval(clock, NTP_INTERVAL_LENGTH);
clock->cycle_last = clocksource_read(clock);
diff --git a/kernel/timer.c b/kernel/timer.c
index 9b77fc9a9ac8..cffffad01c31 100644
--- a/kernel/timer.c
+++ b/kernel/timer.c
@@ -491,14 +491,18 @@ static inline void debug_timer_free(struct timer_list *timer)
debug_object_free(timer, &timer_debug_descr);
}
-static void __init_timer(struct timer_list *timer);
+static void __init_timer(struct timer_list *timer,
+ const char *name,
+ struct lock_class_key *key);
-void init_timer_on_stack(struct timer_list *timer)
+void init_timer_on_stack_key(struct timer_list *timer,
+ const char *name,
+ struct lock_class_key *key)
{
debug_object_init_on_stack(timer, &timer_debug_descr);
- __init_timer(timer);
+ __init_timer(timer, name, key);
}
-EXPORT_SYMBOL_GPL(init_timer_on_stack);
+EXPORT_SYMBOL_GPL(init_timer_on_stack_key);
void destroy_timer_on_stack(struct timer_list *timer)
{
@@ -512,7 +516,9 @@ static inline void debug_timer_activate(struct timer_list *timer) { }
static inline void debug_timer_deactivate(struct timer_list *timer) { }
#endif
-static void __init_timer(struct timer_list *timer)
+static void __init_timer(struct timer_list *timer,
+ const char *name,
+ struct lock_class_key *key)
{
timer->entry.next = NULL;
timer->base = __raw_get_cpu_var(tvec_bases);
@@ -521,28 +527,36 @@ static void __init_timer(struct timer_list *timer)
timer->start_pid = -1;
memset(timer->start_comm, 0, TASK_COMM_LEN);
#endif
+ lockdep_init_map(&timer->lockdep_map, name, key, 0);
}
/**
- * init_timer - initialize a timer.
+ * init_timer_key - initialize a timer
* @timer: the timer to be initialized
+ * @name: name of the timer
+ * @key: lockdep class key of the fake lock used for tracking timer
+ * sync lock dependencies
*
- * init_timer() must be done to a timer prior calling *any* of the
+ * init_timer_key() must be done to a timer prior calling *any* of the
* other timer functions.
*/
-void init_timer(struct timer_list *timer)
+void init_timer_key(struct timer_list *timer,
+ const char *name,
+ struct lock_class_key *key)
{
debug_timer_init(timer);
- __init_timer(timer);
+ __init_timer(timer, name, key);
}
-EXPORT_SYMBOL(init_timer);
+EXPORT_SYMBOL(init_timer_key);
-void init_timer_deferrable(struct timer_list *timer)
+void init_timer_deferrable_key(struct timer_list *timer,
+ const char *name,
+ struct lock_class_key *key)
{
- init_timer(timer);
+ init_timer_key(timer, name, key);
timer_set_deferrable(timer);
}
-EXPORT_SYMBOL(init_timer_deferrable);
+EXPORT_SYMBOL(init_timer_deferrable_key);
static inline void detach_timer(struct timer_list *timer,
int clear_pending)
@@ -826,6 +840,15 @@ EXPORT_SYMBOL(try_to_del_timer_sync);
*/
int del_timer_sync(struct timer_list *timer)
{
+#ifdef CONFIG_LOCKDEP
+ unsigned long flags;
+
+ local_irq_save(flags);
+ lock_map_acquire(&timer->lockdep_map);
+ lock_map_release(&timer->lockdep_map);
+ local_irq_restore(flags);
+#endif
+
for (;;) {
int ret = try_to_del_timer_sync(timer);
if (ret >= 0)
@@ -897,10 +920,36 @@ static inline void __run_timers(struct tvec_base *base)
set_running_timer(base, timer);
detach_timer(timer, 1);
+
spin_unlock_irq(&base->lock);
{
int preempt_count = preempt_count();
+
+#ifdef CONFIG_LOCKDEP
+ /*
+ * It is permissible to free the timer from
+ * inside the function that is called from
+ * it, this we need to take into account for
+ * lockdep too. To avoid bogus "held lock
+ * freed" warnings as well as problems when
+ * looking into timer->lockdep_map, make a
+ * copy and use that here.
+ */
+ struct lockdep_map lockdep_map =
+ timer->lockdep_map;
+#endif
+ /*
+ * Couple the lock chain with the lock chain at
+ * del_timer_sync() by acquiring the lock_map
+ * around the fn() call here and in
+ * del_timer_sync().
+ */
+ lock_map_acquire(&lockdep_map);
+
fn(data);
+
+ lock_map_release(&lockdep_map);
+
if (preempt_count != preempt_count()) {
printk(KERN_ERR "huh, entered %p "
"with preempt_count %08x, exited"
diff --git a/kernel/trace/Kconfig b/kernel/trace/Kconfig
index 34e707e5ab87..417d1985e299 100644
--- a/kernel/trace/Kconfig
+++ b/kernel/trace/Kconfig
@@ -9,6 +9,9 @@ config USER_STACKTRACE_SUPPORT
config NOP_TRACER
bool
+config HAVE_FTRACE_NMI_ENTER
+ bool
+
config HAVE_FUNCTION_TRACER
bool
@@ -31,12 +34,20 @@ config HAVE_FTRACE_MCOUNT_RECORD
config HAVE_HW_BRANCH_TRACER
bool
+config HAVE_FTRACE_SYSCALLS
+ bool
+
config TRACER_MAX_TRACE
bool
config RING_BUFFER
bool
+config FTRACE_NMI_ENTER
+ bool
+ depends on HAVE_FTRACE_NMI_ENTER
+ default y
+
config TRACING
bool
select DEBUG_FS
@@ -44,13 +55,29 @@ config TRACING
select STACKTRACE if STACKTRACE_SUPPORT
select TRACEPOINTS
select NOP_TRACER
+ select BINARY_PRINTF
+
+#
+# Minimum requirements an architecture has to meet for us to
+# be able to offer generic tracing facilities:
+#
+config TRACING_SUPPORT
+ bool
+ # PPC32 has no irqflags tracing support, but it can use most of the
+ # tracers anyway, they were tested to build and work. Note that new
+ # exceptions to this list aren't welcomed, better implement the
+ # irqflags tracing for your architecture.
+ depends on TRACE_IRQFLAGS_SUPPORT || PPC32
+ depends on STACKTRACE_SUPPORT
+ default y
+
+if TRACING_SUPPORT
menu "Tracers"
config FUNCTION_TRACER
bool "Kernel Function Tracer"
depends on HAVE_FUNCTION_TRACER
- depends on DEBUG_KERNEL
select FRAME_POINTER
select KALLSYMS
select TRACING
@@ -72,18 +99,16 @@ config FUNCTION_GRAPH_TRACER
help
Enable the kernel to trace a function at both its return
and its entry.
- It's first purpose is to trace the duration of functions and
- draw a call graph for each thread with some informations like
- the return value.
- This is done by setting the current return address on the current
- task structure into a stack of calls.
+ Its first purpose is to trace the duration of functions and
+ draw a call graph for each thread with some information like
+ the return value. This is done by setting the current return
+ address on the current task structure into a stack of calls.
config IRQSOFF_TRACER
bool "Interrupts-off Latency Tracer"
default n
depends on TRACE_IRQFLAGS_SUPPORT
depends on GENERIC_TIME
- depends on DEBUG_KERNEL
select TRACE_IRQFLAGS
select TRACING
select TRACER_MAX_TRACE
@@ -106,7 +131,6 @@ config PREEMPT_TRACER
default n
depends on GENERIC_TIME
depends on PREEMPT
- depends on DEBUG_KERNEL
select TRACING
select TRACER_MAX_TRACE
help
@@ -127,13 +151,13 @@ config SYSPROF_TRACER
bool "Sysprof Tracer"
depends on X86
select TRACING
+ select CONTEXT_SWITCH_TRACER
help
This tracer provides the trace needed by the 'Sysprof' userspace
tool.
config SCHED_TRACER
bool "Scheduling Latency Tracer"
- depends on DEBUG_KERNEL
select TRACING
select CONTEXT_SWITCH_TRACER
select TRACER_MAX_TRACE
@@ -143,16 +167,30 @@ config SCHED_TRACER
config CONTEXT_SWITCH_TRACER
bool "Trace process context switches"
- depends on DEBUG_KERNEL
select TRACING
select MARKERS
help
This tracer gets called from the context switch and records
all switching of tasks.
+config EVENT_TRACER
+ bool "Trace various events in the kernel"
+ select TRACING
+ help
+ This tracer hooks to various trace points in the kernel
+ allowing the user to pick and choose which trace point they
+ want to trace.
+
+config FTRACE_SYSCALLS
+ bool "Trace syscalls"
+ depends on HAVE_FTRACE_SYSCALLS
+ select TRACING
+ select KALLSYMS
+ help
+ Basic tracer to catch the syscall entry and exit events.
+
config BOOT_TRACER
bool "Trace boot initcalls"
- depends on DEBUG_KERNEL
select TRACING
select CONTEXT_SWITCH_TRACER
help
@@ -165,13 +203,11 @@ config BOOT_TRACER
representation of the delays during initcalls - but the raw
/debug/tracing/trace text output is readable too.
- ( Note that tracing self tests can't be enabled if this tracer is
- selected, because the self-tests are an initcall as well and that
- would invalidate the boot trace. )
+ You must pass in ftrace=initcall to the kernel command line
+ to enable this on bootup.
config TRACE_BRANCH_PROFILING
bool "Trace likely/unlikely profiler"
- depends on DEBUG_KERNEL
select TRACING
help
This tracer profiles all the the likely and unlikely macros
@@ -224,7 +260,6 @@ config BRANCH_TRACER
config POWER_TRACER
bool "Trace power consumption behavior"
- depends on DEBUG_KERNEL
depends on X86
select TRACING
help
@@ -236,7 +271,6 @@ config POWER_TRACER
config STACK_TRACER
bool "Trace max stack"
depends on HAVE_FUNCTION_TRACER
- depends on DEBUG_KERNEL
select FUNCTION_TRACER
select STACKTRACE
select KALLSYMS
@@ -266,11 +300,66 @@ config HW_BRANCH_TRACER
This tracer records all branches on the system in a circular
buffer giving access to the last N branches for each cpu.
+config KMEMTRACE
+ bool "Trace SLAB allocations"
+ select TRACING
+ help
+ kmemtrace provides tracing for slab allocator functions, such as
+ kmalloc, kfree, kmem_cache_alloc, kmem_cache_free etc.. Collected
+ data is then fed to the userspace application in order to analyse
+ allocation hotspots, internal fragmentation and so on, making it
+ possible to see how well an allocator performs, as well as debug
+ and profile kernel code.
+
+ This requires an userspace application to use. See
+ Documentation/trace/kmemtrace.txt for more information.
+
+ Saying Y will make the kernel somewhat larger and slower. However,
+ if you disable kmemtrace at run-time or boot-time, the performance
+ impact is minimal (depending on the arch the kernel is built for).
+
+ If unsure, say N.
+
+config WORKQUEUE_TRACER
+ bool "Trace workqueues"
+ select TRACING
+ help
+ The workqueue tracer provides some statistical informations
+ about each cpu workqueue thread such as the number of the
+ works inserted and executed since their creation. It can help
+ to evaluate the amount of work each of them have to perform.
+ For example it can help a developer to decide whether he should
+ choose a per cpu workqueue instead of a singlethreaded one.
+
+config BLK_DEV_IO_TRACE
+ bool "Support for tracing block io actions"
+ depends on SYSFS
+ depends on BLOCK
+ select RELAY
+ select DEBUG_FS
+ select TRACEPOINTS
+ select TRACING
+ select STACKTRACE
+ help
+ Say Y here if you want to be able to trace the block layer actions
+ on a given queue. Tracing allows you to see any traffic happening
+ on a block device queue. For more information (and the userspace
+ support tools needed), fetch the blktrace tools from:
+
+ git://git.kernel.dk/blktrace.git
+
+ Tracing also is possible using the ftrace interface, e.g.:
+
+ echo 1 > /sys/block/sda/sda1/trace/enable
+ echo blk > /sys/kernel/debug/tracing/current_tracer
+ cat /sys/kernel/debug/tracing/trace_pipe
+
+ If unsure, say N.
+
config DYNAMIC_FTRACE
bool "enable/disable ftrace tracepoints dynamically"
depends on FUNCTION_TRACER
depends on HAVE_DYNAMIC_FTRACE
- depends on DEBUG_KERNEL
default y
help
This option will modify all the calls to ftrace dynamically
@@ -296,7 +385,7 @@ config FTRACE_SELFTEST
config FTRACE_STARTUP_TEST
bool "Perform a startup test on ftrace"
- depends on TRACING && DEBUG_KERNEL && !BOOT_TRACER
+ depends on TRACING
select FTRACE_SELFTEST
help
This option performs a series of startup tests on ftrace. On bootup
@@ -306,7 +395,7 @@ config FTRACE_STARTUP_TEST
config MMIOTRACE
bool "Memory mapped IO tracing"
- depends on HAVE_MMIOTRACE_SUPPORT && DEBUG_KERNEL && PCI
+ depends on HAVE_MMIOTRACE_SUPPORT && PCI
select TRACING
help
Mmiotrace traces Memory Mapped I/O access and is meant for
@@ -314,7 +403,7 @@ config MMIOTRACE
implementation and works via page faults. Tracing is disabled by
default and can be enabled at run-time.
- See Documentation/tracers/mmiotrace.txt.
+ See Documentation/trace/mmiotrace.txt.
If you are not helping to develop drivers, say N.
config MMIOTRACE_TEST
@@ -328,3 +417,6 @@ config MMIOTRACE_TEST
Say N, unless you absolutely know what you are doing.
endmenu
+
+endif # TRACING_SUPPORT
+
diff --git a/kernel/trace/Makefile b/kernel/trace/Makefile
index 349d5a93653f..2630f5121ec1 100644
--- a/kernel/trace/Makefile
+++ b/kernel/trace/Makefile
@@ -19,6 +19,10 @@ obj-$(CONFIG_FUNCTION_TRACER) += libftrace.o
obj-$(CONFIG_RING_BUFFER) += ring_buffer.o
obj-$(CONFIG_TRACING) += trace.o
+obj-$(CONFIG_TRACING) += trace_clock.o
+obj-$(CONFIG_TRACING) += trace_output.o
+obj-$(CONFIG_TRACING) += trace_stat.o
+obj-$(CONFIG_TRACING) += trace_printk.o
obj-$(CONFIG_CONTEXT_SWITCH_TRACER) += trace_sched_switch.o
obj-$(CONFIG_SYSPROF_TRACER) += trace_sysprof.o
obj-$(CONFIG_FUNCTION_TRACER) += trace_functions.o
@@ -33,5 +37,14 @@ obj-$(CONFIG_FUNCTION_GRAPH_TRACER) += trace_functions_graph.o
obj-$(CONFIG_TRACE_BRANCH_PROFILING) += trace_branch.o
obj-$(CONFIG_HW_BRANCH_TRACER) += trace_hw_branches.o
obj-$(CONFIG_POWER_TRACER) += trace_power.o
+obj-$(CONFIG_KMEMTRACE) += kmemtrace.o
+obj-$(CONFIG_WORKQUEUE_TRACER) += trace_workqueue.o
+obj-$(CONFIG_BLK_DEV_IO_TRACE) += blktrace.o
+obj-$(CONFIG_EVENT_TRACER) += trace_events.o
+obj-$(CONFIG_EVENT_TRACER) += events.o
+obj-$(CONFIG_EVENT_TRACER) += trace_export.o
+obj-$(CONFIG_FTRACE_SYSCALLS) += trace_syscalls.o
+obj-$(CONFIG_EVENT_PROFILE) += trace_event_profile.o
+obj-$(CONFIG_EVENT_TRACER) += trace_events_filter.o
libftrace-y := ftrace.o
diff --git a/kernel/trace/blktrace.c b/kernel/trace/blktrace.c
new file mode 100644
index 000000000000..921ef5d1f0ba
--- /dev/null
+++ b/kernel/trace/blktrace.c
@@ -0,0 +1,1550 @@
+/*
+ * Copyright (C) 2006 Jens Axboe <axboe@kernel.dk>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
+ *
+ */
+#include <linux/kernel.h>
+#include <linux/blkdev.h>
+#include <linux/blktrace_api.h>
+#include <linux/percpu.h>
+#include <linux/init.h>
+#include <linux/mutex.h>
+#include <linux/debugfs.h>
+#include <linux/time.h>
+#include <trace/block.h>
+#include <linux/uaccess.h>
+#include "trace_output.h"
+
+static unsigned int blktrace_seq __read_mostly = 1;
+
+static struct trace_array *blk_tr;
+static bool blk_tracer_enabled __read_mostly;
+
+/* Select an alternative, minimalistic output than the original one */
+#define TRACE_BLK_OPT_CLASSIC 0x1
+
+static struct tracer_opt blk_tracer_opts[] = {
+ /* Default disable the minimalistic output */
+ { TRACER_OPT(blk_classic, TRACE_BLK_OPT_CLASSIC) },
+ { }
+};
+
+static struct tracer_flags blk_tracer_flags = {
+ .val = 0,
+ .opts = blk_tracer_opts,
+};
+
+/* Global reference count of probes */
+static atomic_t blk_probes_ref = ATOMIC_INIT(0);
+
+static void blk_register_tracepoints(void);
+static void blk_unregister_tracepoints(void);
+
+/*
+ * Send out a notify message.
+ */
+static void trace_note(struct blk_trace *bt, pid_t pid, int action,
+ const void *data, size_t len)
+{
+ struct blk_io_trace *t;
+ struct ring_buffer_event *event = NULL;
+ int pc = 0;
+ int cpu = smp_processor_id();
+ bool blk_tracer = blk_tracer_enabled;
+
+ if (blk_tracer) {
+ pc = preempt_count();
+ event = trace_buffer_lock_reserve(blk_tr, TRACE_BLK,
+ sizeof(*t) + len,
+ 0, pc);
+ if (!event)
+ return;
+ t = ring_buffer_event_data(event);
+ goto record_it;
+ }
+
+ if (!bt->rchan)
+ return;
+
+ t = relay_reserve(bt->rchan, sizeof(*t) + len);
+ if (t) {
+ t->magic = BLK_IO_TRACE_MAGIC | BLK_IO_TRACE_VERSION;
+ t->time = ktime_to_ns(ktime_get());
+record_it:
+ t->device = bt->dev;
+ t->action = action;
+ t->pid = pid;
+ t->cpu = cpu;
+ t->pdu_len = len;
+ memcpy((void *) t + sizeof(*t), data, len);
+
+ if (blk_tracer)
+ trace_buffer_unlock_commit(blk_tr, event, 0, pc);
+ }
+}
+
+/*
+ * Send out a notify for this process, if we haven't done so since a trace
+ * started
+ */
+static void trace_note_tsk(struct blk_trace *bt, struct task_struct *tsk)
+{
+ tsk->btrace_seq = blktrace_seq;
+ trace_note(bt, tsk->pid, BLK_TN_PROCESS, tsk->comm, sizeof(tsk->comm));
+}
+
+static void trace_note_time(struct blk_trace *bt)
+{
+ struct timespec now;
+ unsigned long flags;
+ u32 words[2];
+
+ getnstimeofday(&now);
+ words[0] = now.tv_sec;
+ words[1] = now.tv_nsec;
+
+ local_irq_save(flags);
+ trace_note(bt, 0, BLK_TN_TIMESTAMP, words, sizeof(words));
+ local_irq_restore(flags);
+}
+
+void __trace_note_message(struct blk_trace *bt, const char *fmt, ...)
+{
+ int n;
+ va_list args;
+ unsigned long flags;
+ char *buf;
+
+ if (unlikely(bt->trace_state != Blktrace_running &&
+ !blk_tracer_enabled))
+ return;
+
+ local_irq_save(flags);
+ buf = per_cpu_ptr(bt->msg_data, smp_processor_id());
+ va_start(args, fmt);
+ n = vscnprintf(buf, BLK_TN_MAX_MSG, fmt, args);
+ va_end(args);
+
+ trace_note(bt, 0, BLK_TN_MESSAGE, buf, n);
+ local_irq_restore(flags);
+}
+EXPORT_SYMBOL_GPL(__trace_note_message);
+
+static int act_log_check(struct blk_trace *bt, u32 what, sector_t sector,
+ pid_t pid)
+{
+ if (((bt->act_mask << BLK_TC_SHIFT) & what) == 0)
+ return 1;
+ if (sector < bt->start_lba || sector > bt->end_lba)
+ return 1;
+ if (bt->pid && pid != bt->pid)
+ return 1;
+
+ return 0;
+}
+
+/*
+ * Data direction bit lookup
+ */
+static const u32 ddir_act[2] = { BLK_TC_ACT(BLK_TC_READ),
+ BLK_TC_ACT(BLK_TC_WRITE) };
+
+/* The ilog2() calls fall out because they're constant */
+#define MASK_TC_BIT(rw, __name) ((rw & (1 << BIO_RW_ ## __name)) << \
+ (ilog2(BLK_TC_ ## __name) + BLK_TC_SHIFT - BIO_RW_ ## __name))
+
+/*
+ * The worker for the various blk_add_trace*() types. Fills out a
+ * blk_io_trace structure and places it in a per-cpu subbuffer.
+ */
+static void __blk_add_trace(struct blk_trace *bt, sector_t sector, int bytes,
+ int rw, u32 what, int error, int pdu_len, void *pdu_data)
+{
+ struct task_struct *tsk = current;
+ struct ring_buffer_event *event = NULL;
+ struct blk_io_trace *t;
+ unsigned long flags = 0;
+ unsigned long *sequence;
+ pid_t pid;
+ int cpu, pc = 0;
+ bool blk_tracer = blk_tracer_enabled;
+
+ if (unlikely(bt->trace_state != Blktrace_running && !blk_tracer))
+ return;
+
+ what |= ddir_act[rw & WRITE];
+ what |= MASK_TC_BIT(rw, BARRIER);
+ what |= MASK_TC_BIT(rw, SYNCIO);
+ what |= MASK_TC_BIT(rw, AHEAD);
+ what |= MASK_TC_BIT(rw, META);
+ what |= MASK_TC_BIT(rw, DISCARD);
+
+ pid = tsk->pid;
+ if (unlikely(act_log_check(bt, what, sector, pid)))
+ return;
+ cpu = raw_smp_processor_id();
+
+ if (blk_tracer) {
+ tracing_record_cmdline(current);
+
+ pc = preempt_count();
+ event = trace_buffer_lock_reserve(blk_tr, TRACE_BLK,
+ sizeof(*t) + pdu_len,
+ 0, pc);
+ if (!event)
+ return;
+ t = ring_buffer_event_data(event);
+ goto record_it;
+ }
+
+ /*
+ * A word about the locking here - we disable interrupts to reserve
+ * some space in the relay per-cpu buffer, to prevent an irq
+ * from coming in and stepping on our toes.
+ */
+ local_irq_save(flags);
+
+ if (unlikely(tsk->btrace_seq != blktrace_seq))
+ trace_note_tsk(bt, tsk);
+
+ t = relay_reserve(bt->rchan, sizeof(*t) + pdu_len);
+ if (t) {
+ sequence = per_cpu_ptr(bt->sequence, cpu);
+
+ t->magic = BLK_IO_TRACE_MAGIC | BLK_IO_TRACE_VERSION;
+ t->sequence = ++(*sequence);
+ t->time = ktime_to_ns(ktime_get());
+record_it:
+ /*
+ * These two are not needed in ftrace as they are in the
+ * generic trace_entry, filled by tracing_generic_entry_update,
+ * but for the trace_event->bin() synthesizer benefit we do it
+ * here too.
+ */
+ t->cpu = cpu;
+ t->pid = pid;
+
+ t->sector = sector;
+ t->bytes = bytes;
+ t->action = what;
+ t->device = bt->dev;
+ t->error = error;
+ t->pdu_len = pdu_len;
+
+ if (pdu_len)
+ memcpy((void *) t + sizeof(*t), pdu_data, pdu_len);
+
+ if (blk_tracer) {
+ trace_buffer_unlock_commit(blk_tr, event, 0, pc);
+ return;
+ }
+ }
+
+ local_irq_restore(flags);
+}
+
+static struct dentry *blk_tree_root;
+static DEFINE_MUTEX(blk_tree_mutex);
+
+static void blk_trace_free(struct blk_trace *bt)
+{
+ debugfs_remove(bt->msg_file);
+ debugfs_remove(bt->dropped_file);
+ relay_close(bt->rchan);
+ free_percpu(bt->sequence);
+ free_percpu(bt->msg_data);
+ kfree(bt);
+}
+
+static void blk_trace_cleanup(struct blk_trace *bt)
+{
+ blk_trace_free(bt);
+ if (atomic_dec_and_test(&blk_probes_ref))
+ blk_unregister_tracepoints();
+}
+
+int blk_trace_remove(struct request_queue *q)
+{
+ struct blk_trace *bt;
+
+ bt = xchg(&q->blk_trace, NULL);
+ if (!bt)
+ return -EINVAL;
+
+ if (bt->trace_state != Blktrace_running)
+ blk_trace_cleanup(bt);
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(blk_trace_remove);
+
+static int blk_dropped_open(struct inode *inode, struct file *filp)
+{
+ filp->private_data = inode->i_private;
+
+ return 0;
+}
+
+static ssize_t blk_dropped_read(struct file *filp, char __user *buffer,
+ size_t count, loff_t *ppos)
+{
+ struct blk_trace *bt = filp->private_data;
+ char buf[16];
+
+ snprintf(buf, sizeof(buf), "%u\n", atomic_read(&bt->dropped));
+
+ return simple_read_from_buffer(buffer, count, ppos, buf, strlen(buf));
+}
+
+static const struct file_operations blk_dropped_fops = {
+ .owner = THIS_MODULE,
+ .open = blk_dropped_open,
+ .read = blk_dropped_read,
+};
+
+static int blk_msg_open(struct inode *inode, struct file *filp)
+{
+ filp->private_data = inode->i_private;
+
+ return 0;
+}
+
+static ssize_t blk_msg_write(struct file *filp, const char __user *buffer,
+ size_t count, loff_t *ppos)
+{
+ char *msg;
+ struct blk_trace *bt;
+
+ if (count >= BLK_TN_MAX_MSG)
+ return -EINVAL;
+
+ msg = kmalloc(count + 1, GFP_KERNEL);
+ if (msg == NULL)
+ return -ENOMEM;
+
+ if (copy_from_user(msg, buffer, count)) {
+ kfree(msg);
+ return -EFAULT;
+ }
+
+ msg[count] = '\0';
+ bt = filp->private_data;
+ __trace_note_message(bt, "%s", msg);
+ kfree(msg);
+
+ return count;
+}
+
+static const struct file_operations blk_msg_fops = {
+ .owner = THIS_MODULE,
+ .open = blk_msg_open,
+ .write = blk_msg_write,
+};
+
+/*
+ * Keep track of how many times we encountered a full subbuffer, to aid
+ * the user space app in telling how many lost events there were.
+ */
+static int blk_subbuf_start_callback(struct rchan_buf *buf, void *subbuf,
+ void *prev_subbuf, size_t prev_padding)
+{
+ struct blk_trace *bt;
+
+ if (!relay_buf_full(buf))
+ return 1;
+
+ bt = buf->chan->private_data;
+ atomic_inc(&bt->dropped);
+ return 0;
+}
+
+static int blk_remove_buf_file_callback(struct dentry *dentry)
+{
+ struct dentry *parent = dentry->d_parent;
+ debugfs_remove(dentry);
+
+ /*
+ * this will fail for all but the last file, but that is ok. what we
+ * care about is the top level buts->name directory going away, when
+ * the last trace file is gone. Then we don't have to rmdir() that
+ * manually on trace stop, so it nicely solves the issue with
+ * force killing of running traces.
+ */
+
+ debugfs_remove(parent);
+ return 0;
+}
+
+static struct dentry *blk_create_buf_file_callback(const char *filename,
+ struct dentry *parent,
+ int mode,
+ struct rchan_buf *buf,
+ int *is_global)
+{
+ return debugfs_create_file(filename, mode, parent, buf,
+ &relay_file_operations);
+}
+
+static struct rchan_callbacks blk_relay_callbacks = {
+ .subbuf_start = blk_subbuf_start_callback,
+ .create_buf_file = blk_create_buf_file_callback,
+ .remove_buf_file = blk_remove_buf_file_callback,
+};
+
+/*
+ * Setup everything required to start tracing
+ */
+int do_blk_trace_setup(struct request_queue *q, char *name, dev_t dev,
+ struct blk_user_trace_setup *buts)
+{
+ struct blk_trace *old_bt, *bt = NULL;
+ struct dentry *dir = NULL;
+ int ret, i;
+
+ if (!buts->buf_size || !buts->buf_nr)
+ return -EINVAL;
+
+ strncpy(buts->name, name, BLKTRACE_BDEV_SIZE);
+ buts->name[BLKTRACE_BDEV_SIZE - 1] = '\0';
+
+ /*
+ * some device names have larger paths - convert the slashes
+ * to underscores for this to work as expected
+ */
+ for (i = 0; i < strlen(buts->name); i++)
+ if (buts->name[i] == '/')
+ buts->name[i] = '_';
+
+ bt = kzalloc(sizeof(*bt), GFP_KERNEL);
+ if (!bt)
+ return -ENOMEM;
+
+ ret = -ENOMEM;
+ bt->sequence = alloc_percpu(unsigned long);
+ if (!bt->sequence)
+ goto err;
+
+ bt->msg_data = __alloc_percpu(BLK_TN_MAX_MSG, __alignof__(char));
+ if (!bt->msg_data)
+ goto err;
+
+ ret = -ENOENT;
+
+ mutex_lock(&blk_tree_mutex);
+ if (!blk_tree_root) {
+ blk_tree_root = debugfs_create_dir("block", NULL);
+ if (!blk_tree_root) {
+ mutex_unlock(&blk_tree_mutex);
+ goto err;
+ }
+ }
+ mutex_unlock(&blk_tree_mutex);
+
+ dir = debugfs_create_dir(buts->name, blk_tree_root);
+
+ if (!dir)
+ goto err;
+
+ bt->dir = dir;
+ bt->dev = dev;
+ atomic_set(&bt->dropped, 0);
+
+ ret = -EIO;
+ bt->dropped_file = debugfs_create_file("dropped", 0444, dir, bt,
+ &blk_dropped_fops);
+ if (!bt->dropped_file)
+ goto err;
+
+ bt->msg_file = debugfs_create_file("msg", 0222, dir, bt, &blk_msg_fops);
+ if (!bt->msg_file)
+ goto err;
+
+ bt->rchan = relay_open("trace", dir, buts->buf_size,
+ buts->buf_nr, &blk_relay_callbacks, bt);
+ if (!bt->rchan)
+ goto err;
+
+ bt->act_mask = buts->act_mask;
+ if (!bt->act_mask)
+ bt->act_mask = (u16) -1;
+
+ bt->start_lba = buts->start_lba;
+ bt->end_lba = buts->end_lba;
+ if (!bt->end_lba)
+ bt->end_lba = -1ULL;
+
+ bt->pid = buts->pid;
+ bt->trace_state = Blktrace_setup;
+
+ ret = -EBUSY;
+ old_bt = xchg(&q->blk_trace, bt);
+ if (old_bt) {
+ (void) xchg(&q->blk_trace, old_bt);
+ goto err;
+ }
+
+ if (atomic_inc_return(&blk_probes_ref) == 1)
+ blk_register_tracepoints();
+
+ return 0;
+err:
+ blk_trace_free(bt);
+ return ret;
+}
+
+int blk_trace_setup(struct request_queue *q, char *name, dev_t dev,
+ char __user *arg)
+{
+ struct blk_user_trace_setup buts;
+ int ret;
+
+ ret = copy_from_user(&buts, arg, sizeof(buts));
+ if (ret)
+ return -EFAULT;
+
+ ret = do_blk_trace_setup(q, name, dev, &buts);
+ if (ret)
+ return ret;
+
+ if (copy_to_user(arg, &buts, sizeof(buts)))
+ return -EFAULT;
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(blk_trace_setup);
+
+int blk_trace_startstop(struct request_queue *q, int start)
+{
+ int ret;
+ struct blk_trace *bt = q->blk_trace;
+
+ if (bt == NULL)
+ return -EINVAL;
+
+ /*
+ * For starting a trace, we can transition from a setup or stopped
+ * trace. For stopping a trace, the state must be running
+ */
+ ret = -EINVAL;
+ if (start) {
+ if (bt->trace_state == Blktrace_setup ||
+ bt->trace_state == Blktrace_stopped) {
+ blktrace_seq++;
+ smp_mb();
+ bt->trace_state = Blktrace_running;
+
+ trace_note_time(bt);
+ ret = 0;
+ }
+ } else {
+ if (bt->trace_state == Blktrace_running) {
+ bt->trace_state = Blktrace_stopped;
+ relay_flush(bt->rchan);
+ ret = 0;
+ }
+ }
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(blk_trace_startstop);
+
+/**
+ * blk_trace_ioctl: - handle the ioctls associated with tracing
+ * @bdev: the block device
+ * @cmd: the ioctl cmd
+ * @arg: the argument data, if any
+ *
+ **/
+int blk_trace_ioctl(struct block_device *bdev, unsigned cmd, char __user *arg)
+{
+ struct request_queue *q;
+ int ret, start = 0;
+ char b[BDEVNAME_SIZE];
+
+ q = bdev_get_queue(bdev);
+ if (!q)
+ return -ENXIO;
+
+ mutex_lock(&bdev->bd_mutex);
+
+ switch (cmd) {
+ case BLKTRACESETUP:
+ bdevname(bdev, b);
+ ret = blk_trace_setup(q, b, bdev->bd_dev, arg);
+ break;
+ case BLKTRACESTART:
+ start = 1;
+ case BLKTRACESTOP:
+ ret = blk_trace_startstop(q, start);
+ break;
+ case BLKTRACETEARDOWN:
+ ret = blk_trace_remove(q);
+ break;
+ default:
+ ret = -ENOTTY;
+ break;
+ }
+
+ mutex_unlock(&bdev->bd_mutex);
+ return ret;
+}
+
+/**
+ * blk_trace_shutdown: - stop and cleanup trace structures
+ * @q: the request queue associated with the device
+ *
+ **/
+void blk_trace_shutdown(struct request_queue *q)
+{
+ if (q->blk_trace) {
+ blk_trace_startstop(q, 0);
+ blk_trace_remove(q);
+ }
+}
+
+/*
+ * blktrace probes
+ */
+
+/**
+ * blk_add_trace_rq - Add a trace for a request oriented action
+ * @q: queue the io is for
+ * @rq: the source request
+ * @what: the action
+ *
+ * Description:
+ * Records an action against a request. Will log the bio offset + size.
+ *
+ **/
+static void blk_add_trace_rq(struct request_queue *q, struct request *rq,
+ u32 what)
+{
+ struct blk_trace *bt = q->blk_trace;
+ int rw = rq->cmd_flags & 0x03;
+
+ if (likely(!bt))
+ return;
+
+ if (blk_discard_rq(rq))
+ rw |= (1 << BIO_RW_DISCARD);
+
+ if (blk_pc_request(rq)) {
+ what |= BLK_TC_ACT(BLK_TC_PC);
+ __blk_add_trace(bt, 0, rq->data_len, rw, what, rq->errors,
+ rq->cmd_len, rq->cmd);
+ } else {
+ what |= BLK_TC_ACT(BLK_TC_FS);
+ __blk_add_trace(bt, rq->hard_sector, rq->hard_nr_sectors << 9,
+ rw, what, rq->errors, 0, NULL);
+ }
+}
+
+static void blk_add_trace_rq_abort(struct request_queue *q, struct request *rq)
+{
+ blk_add_trace_rq(q, rq, BLK_TA_ABORT);
+}
+
+static void blk_add_trace_rq_insert(struct request_queue *q, struct request *rq)
+{
+ blk_add_trace_rq(q, rq, BLK_TA_INSERT);
+}
+
+static void blk_add_trace_rq_issue(struct request_queue *q, struct request *rq)
+{
+ blk_add_trace_rq(q, rq, BLK_TA_ISSUE);
+}
+
+static void blk_add_trace_rq_requeue(struct request_queue *q,
+ struct request *rq)
+{
+ blk_add_trace_rq(q, rq, BLK_TA_REQUEUE);
+}
+
+static void blk_add_trace_rq_complete(struct request_queue *q,
+ struct request *rq)
+{
+ blk_add_trace_rq(q, rq, BLK_TA_COMPLETE);
+}
+
+/**
+ * blk_add_trace_bio - Add a trace for a bio oriented action
+ * @q: queue the io is for
+ * @bio: the source bio
+ * @what: the action
+ *
+ * Description:
+ * Records an action against a bio. Will log the bio offset + size.
+ *
+ **/
+static void blk_add_trace_bio(struct request_queue *q, struct bio *bio,
+ u32 what)
+{
+ struct blk_trace *bt = q->blk_trace;
+
+ if (likely(!bt))
+ return;
+
+ __blk_add_trace(bt, bio->bi_sector, bio->bi_size, bio->bi_rw, what,
+ !bio_flagged(bio, BIO_UPTODATE), 0, NULL);
+}
+
+static void blk_add_trace_bio_bounce(struct request_queue *q, struct bio *bio)
+{
+ blk_add_trace_bio(q, bio, BLK_TA_BOUNCE);
+}
+
+static void blk_add_trace_bio_complete(struct request_queue *q, struct bio *bio)
+{
+ blk_add_trace_bio(q, bio, BLK_TA_COMPLETE);
+}
+
+static void blk_add_trace_bio_backmerge(struct request_queue *q,
+ struct bio *bio)
+{
+ blk_add_trace_bio(q, bio, BLK_TA_BACKMERGE);
+}
+
+static void blk_add_trace_bio_frontmerge(struct request_queue *q,
+ struct bio *bio)
+{
+ blk_add_trace_bio(q, bio, BLK_TA_FRONTMERGE);
+}
+
+static void blk_add_trace_bio_queue(struct request_queue *q, struct bio *bio)
+{
+ blk_add_trace_bio(q, bio, BLK_TA_QUEUE);
+}
+
+static void blk_add_trace_getrq(struct request_queue *q,
+ struct bio *bio, int rw)
+{
+ if (bio)
+ blk_add_trace_bio(q, bio, BLK_TA_GETRQ);
+ else {
+ struct blk_trace *bt = q->blk_trace;
+
+ if (bt)
+ __blk_add_trace(bt, 0, 0, rw, BLK_TA_GETRQ, 0, 0, NULL);
+ }
+}
+
+
+static void blk_add_trace_sleeprq(struct request_queue *q,
+ struct bio *bio, int rw)
+{
+ if (bio)
+ blk_add_trace_bio(q, bio, BLK_TA_SLEEPRQ);
+ else {
+ struct blk_trace *bt = q->blk_trace;
+
+ if (bt)
+ __blk_add_trace(bt, 0, 0, rw, BLK_TA_SLEEPRQ,
+ 0, 0, NULL);
+ }
+}
+
+static void blk_add_trace_plug(struct request_queue *q)
+{
+ struct blk_trace *bt = q->blk_trace;
+
+ if (bt)
+ __blk_add_trace(bt, 0, 0, 0, BLK_TA_PLUG, 0, 0, NULL);
+}
+
+static void blk_add_trace_unplug_io(struct request_queue *q)
+{
+ struct blk_trace *bt = q->blk_trace;
+
+ if (bt) {
+ unsigned int pdu = q->rq.count[READ] + q->rq.count[WRITE];
+ __be64 rpdu = cpu_to_be64(pdu);
+
+ __blk_add_trace(bt, 0, 0, 0, BLK_TA_UNPLUG_IO, 0,
+ sizeof(rpdu), &rpdu);
+ }
+}
+
+static void blk_add_trace_unplug_timer(struct request_queue *q)
+{
+ struct blk_trace *bt = q->blk_trace;
+
+ if (bt) {
+ unsigned int pdu = q->rq.count[READ] + q->rq.count[WRITE];
+ __be64 rpdu = cpu_to_be64(pdu);
+
+ __blk_add_trace(bt, 0, 0, 0, BLK_TA_UNPLUG_TIMER, 0,
+ sizeof(rpdu), &rpdu);
+ }
+}
+
+static void blk_add_trace_split(struct request_queue *q, struct bio *bio,
+ unsigned int pdu)
+{
+ struct blk_trace *bt = q->blk_trace;
+
+ if (bt) {
+ __be64 rpdu = cpu_to_be64(pdu);
+
+ __blk_add_trace(bt, bio->bi_sector, bio->bi_size, bio->bi_rw,
+ BLK_TA_SPLIT, !bio_flagged(bio, BIO_UPTODATE),
+ sizeof(rpdu), &rpdu);
+ }
+}
+
+/**
+ * blk_add_trace_remap - Add a trace for a remap operation
+ * @q: queue the io is for
+ * @bio: the source bio
+ * @dev: target device
+ * @from: source sector
+ * @to: target sector
+ *
+ * Description:
+ * Device mapper or raid target sometimes need to split a bio because
+ * it spans a stripe (or similar). Add a trace for that action.
+ *
+ **/
+static void blk_add_trace_remap(struct request_queue *q, struct bio *bio,
+ dev_t dev, sector_t from, sector_t to)
+{
+ struct blk_trace *bt = q->blk_trace;
+ struct blk_io_trace_remap r;
+
+ if (likely(!bt))
+ return;
+
+ r.device = cpu_to_be32(dev);
+ r.device_from = cpu_to_be32(bio->bi_bdev->bd_dev);
+ r.sector = cpu_to_be64(to);
+
+ __blk_add_trace(bt, from, bio->bi_size, bio->bi_rw, BLK_TA_REMAP,
+ !bio_flagged(bio, BIO_UPTODATE), sizeof(r), &r);
+}
+
+/**
+ * blk_add_driver_data - Add binary message with driver-specific data
+ * @q: queue the io is for
+ * @rq: io request
+ * @data: driver-specific data
+ * @len: length of driver-specific data
+ *
+ * Description:
+ * Some drivers might want to write driver-specific data per request.
+ *
+ **/
+void blk_add_driver_data(struct request_queue *q,
+ struct request *rq,
+ void *data, size_t len)
+{
+ struct blk_trace *bt = q->blk_trace;
+
+ if (likely(!bt))
+ return;
+
+ if (blk_pc_request(rq))
+ __blk_add_trace(bt, 0, rq->data_len, 0, BLK_TA_DRV_DATA,
+ rq->errors, len, data);
+ else
+ __blk_add_trace(bt, rq->hard_sector, rq->hard_nr_sectors << 9,
+ 0, BLK_TA_DRV_DATA, rq->errors, len, data);
+}
+EXPORT_SYMBOL_GPL(blk_add_driver_data);
+
+static void blk_register_tracepoints(void)
+{
+ int ret;
+
+ ret = register_trace_block_rq_abort(blk_add_trace_rq_abort);
+ WARN_ON(ret);
+ ret = register_trace_block_rq_insert(blk_add_trace_rq_insert);
+ WARN_ON(ret);
+ ret = register_trace_block_rq_issue(blk_add_trace_rq_issue);
+ WARN_ON(ret);
+ ret = register_trace_block_rq_requeue(blk_add_trace_rq_requeue);
+ WARN_ON(ret);
+ ret = register_trace_block_rq_complete(blk_add_trace_rq_complete);
+ WARN_ON(ret);
+ ret = register_trace_block_bio_bounce(blk_add_trace_bio_bounce);
+ WARN_ON(ret);
+ ret = register_trace_block_bio_complete(blk_add_trace_bio_complete);
+ WARN_ON(ret);
+ ret = register_trace_block_bio_backmerge(blk_add_trace_bio_backmerge);
+ WARN_ON(ret);
+ ret = register_trace_block_bio_frontmerge(blk_add_trace_bio_frontmerge);
+ WARN_ON(ret);
+ ret = register_trace_block_bio_queue(blk_add_trace_bio_queue);
+ WARN_ON(ret);
+ ret = register_trace_block_getrq(blk_add_trace_getrq);
+ WARN_ON(ret);
+ ret = register_trace_block_sleeprq(blk_add_trace_sleeprq);
+ WARN_ON(ret);
+ ret = register_trace_block_plug(blk_add_trace_plug);
+ WARN_ON(ret);
+ ret = register_trace_block_unplug_timer(blk_add_trace_unplug_timer);
+ WARN_ON(ret);
+ ret = register_trace_block_unplug_io(blk_add_trace_unplug_io);
+ WARN_ON(ret);
+ ret = register_trace_block_split(blk_add_trace_split);
+ WARN_ON(ret);
+ ret = register_trace_block_remap(blk_add_trace_remap);
+ WARN_ON(ret);
+}
+
+static void blk_unregister_tracepoints(void)
+{
+ unregister_trace_block_remap(blk_add_trace_remap);
+ unregister_trace_block_split(blk_add_trace_split);
+ unregister_trace_block_unplug_io(blk_add_trace_unplug_io);
+ unregister_trace_block_unplug_timer(blk_add_trace_unplug_timer);
+ unregister_trace_block_plug(blk_add_trace_plug);
+ unregister_trace_block_sleeprq(blk_add_trace_sleeprq);
+ unregister_trace_block_getrq(blk_add_trace_getrq);
+ unregister_trace_block_bio_queue(blk_add_trace_bio_queue);
+ unregister_trace_block_bio_frontmerge(blk_add_trace_bio_frontmerge);
+ unregister_trace_block_bio_backmerge(blk_add_trace_bio_backmerge);
+ unregister_trace_block_bio_complete(blk_add_trace_bio_complete);
+ unregister_trace_block_bio_bounce(blk_add_trace_bio_bounce);
+ unregister_trace_block_rq_complete(blk_add_trace_rq_complete);
+ unregister_trace_block_rq_requeue(blk_add_trace_rq_requeue);
+ unregister_trace_block_rq_issue(blk_add_trace_rq_issue);
+ unregister_trace_block_rq_insert(blk_add_trace_rq_insert);
+ unregister_trace_block_rq_abort(blk_add_trace_rq_abort);
+
+ tracepoint_synchronize_unregister();
+}
+
+/*
+ * struct blk_io_tracer formatting routines
+ */
+
+static void fill_rwbs(char *rwbs, const struct blk_io_trace *t)
+{
+ int i = 0;
+ int tc = t->action >> BLK_TC_SHIFT;
+
+ if (t->action == BLK_TN_MESSAGE) {
+ rwbs[i++] = 'N';
+ goto out;
+ }
+
+ if (tc & BLK_TC_DISCARD)
+ rwbs[i++] = 'D';
+ else if (tc & BLK_TC_WRITE)
+ rwbs[i++] = 'W';
+ else if (t->bytes)
+ rwbs[i++] = 'R';
+ else
+ rwbs[i++] = 'N';
+
+ if (tc & BLK_TC_AHEAD)
+ rwbs[i++] = 'A';
+ if (tc & BLK_TC_BARRIER)
+ rwbs[i++] = 'B';
+ if (tc & BLK_TC_SYNC)
+ rwbs[i++] = 'S';
+ if (tc & BLK_TC_META)
+ rwbs[i++] = 'M';
+out:
+ rwbs[i] = '\0';
+}
+
+static inline
+const struct blk_io_trace *te_blk_io_trace(const struct trace_entry *ent)
+{
+ return (const struct blk_io_trace *)ent;
+}
+
+static inline const void *pdu_start(const struct trace_entry *ent)
+{
+ return te_blk_io_trace(ent) + 1;
+}
+
+static inline u32 t_sec(const struct trace_entry *ent)
+{
+ return te_blk_io_trace(ent)->bytes >> 9;
+}
+
+static inline unsigned long long t_sector(const struct trace_entry *ent)
+{
+ return te_blk_io_trace(ent)->sector;
+}
+
+static inline __u16 t_error(const struct trace_entry *ent)
+{
+ return te_blk_io_trace(ent)->error;
+}
+
+static __u64 get_pdu_int(const struct trace_entry *ent)
+{
+ const __u64 *val = pdu_start(ent);
+ return be64_to_cpu(*val);
+}
+
+static void get_pdu_remap(const struct trace_entry *ent,
+ struct blk_io_trace_remap *r)
+{
+ const struct blk_io_trace_remap *__r = pdu_start(ent);
+ __u64 sector = __r->sector;
+
+ r->device = be32_to_cpu(__r->device);
+ r->device_from = be32_to_cpu(__r->device_from);
+ r->sector = be64_to_cpu(sector);
+}
+
+typedef int (blk_log_action_t) (struct trace_iterator *iter, const char *act);
+
+static int blk_log_action_classic(struct trace_iterator *iter, const char *act)
+{
+ char rwbs[6];
+ unsigned long long ts = iter->ts;
+ unsigned long nsec_rem = do_div(ts, NSEC_PER_SEC);
+ unsigned secs = (unsigned long)ts;
+ const struct blk_io_trace *t = te_blk_io_trace(iter->ent);
+
+ fill_rwbs(rwbs, t);
+
+ return trace_seq_printf(&iter->seq,
+ "%3d,%-3d %2d %5d.%09lu %5u %2s %3s ",
+ MAJOR(t->device), MINOR(t->device), iter->cpu,
+ secs, nsec_rem, iter->ent->pid, act, rwbs);
+}
+
+static int blk_log_action(struct trace_iterator *iter, const char *act)
+{
+ char rwbs[6];
+ const struct blk_io_trace *t = te_blk_io_trace(iter->ent);
+
+ fill_rwbs(rwbs, t);
+ return trace_seq_printf(&iter->seq, "%3d,%-3d %2s %3s ",
+ MAJOR(t->device), MINOR(t->device), act, rwbs);
+}
+
+static int blk_log_generic(struct trace_seq *s, const struct trace_entry *ent)
+{
+ char cmd[TASK_COMM_LEN];
+
+ trace_find_cmdline(ent->pid, cmd);
+
+ if (t_sec(ent))
+ return trace_seq_printf(s, "%llu + %u [%s]\n",
+ t_sector(ent), t_sec(ent), cmd);
+ return trace_seq_printf(s, "[%s]\n", cmd);
+}
+
+static int blk_log_with_error(struct trace_seq *s,
+ const struct trace_entry *ent)
+{
+ if (t_sec(ent))
+ return trace_seq_printf(s, "%llu + %u [%d]\n", t_sector(ent),
+ t_sec(ent), t_error(ent));
+ return trace_seq_printf(s, "%llu [%d]\n", t_sector(ent), t_error(ent));
+}
+
+static int blk_log_remap(struct trace_seq *s, const struct trace_entry *ent)
+{
+ struct blk_io_trace_remap r = { .device = 0, };
+
+ get_pdu_remap(ent, &r);
+ return trace_seq_printf(s, "%llu + %u <- (%d,%d) %llu\n",
+ t_sector(ent),
+ t_sec(ent), MAJOR(r.device), MINOR(r.device),
+ (unsigned long long)r.sector);
+}
+
+static int blk_log_plug(struct trace_seq *s, const struct trace_entry *ent)
+{
+ char cmd[TASK_COMM_LEN];
+
+ trace_find_cmdline(ent->pid, cmd);
+
+ return trace_seq_printf(s, "[%s]\n", cmd);
+}
+
+static int blk_log_unplug(struct trace_seq *s, const struct trace_entry *ent)
+{
+ char cmd[TASK_COMM_LEN];
+
+ trace_find_cmdline(ent->pid, cmd);
+
+ return trace_seq_printf(s, "[%s] %llu\n", cmd, get_pdu_int(ent));
+}
+
+static int blk_log_split(struct trace_seq *s, const struct trace_entry *ent)
+{
+ char cmd[TASK_COMM_LEN];
+
+ trace_find_cmdline(ent->pid, cmd);
+
+ return trace_seq_printf(s, "%llu / %llu [%s]\n", t_sector(ent),
+ get_pdu_int(ent), cmd);
+}
+
+static int blk_log_msg(struct trace_seq *s, const struct trace_entry *ent)
+{
+ int ret;
+ const struct blk_io_trace *t = te_blk_io_trace(ent);
+
+ ret = trace_seq_putmem(s, t + 1, t->pdu_len);
+ if (ret)
+ return trace_seq_putc(s, '\n');
+ return ret;
+}
+
+/*
+ * struct tracer operations
+ */
+
+static void blk_tracer_print_header(struct seq_file *m)
+{
+ if (!(blk_tracer_flags.val & TRACE_BLK_OPT_CLASSIC))
+ return;
+ seq_puts(m, "# DEV CPU TIMESTAMP PID ACT FLG\n"
+ "# | | | | | |\n");
+}
+
+static void blk_tracer_start(struct trace_array *tr)
+{
+ blk_tracer_enabled = true;
+ trace_flags &= ~TRACE_ITER_CONTEXT_INFO;
+}
+
+static int blk_tracer_init(struct trace_array *tr)
+{
+ blk_tr = tr;
+ blk_tracer_start(tr);
+ return 0;
+}
+
+static void blk_tracer_stop(struct trace_array *tr)
+{
+ blk_tracer_enabled = false;
+ trace_flags |= TRACE_ITER_CONTEXT_INFO;
+}
+
+static void blk_tracer_reset(struct trace_array *tr)
+{
+ blk_tracer_stop(tr);
+}
+
+static const struct {
+ const char *act[2];
+ int (*print)(struct trace_seq *s, const struct trace_entry *ent);
+} what2act[] = {
+ [__BLK_TA_QUEUE] = {{ "Q", "queue" }, blk_log_generic },
+ [__BLK_TA_BACKMERGE] = {{ "M", "backmerge" }, blk_log_generic },
+ [__BLK_TA_FRONTMERGE] = {{ "F", "frontmerge" }, blk_log_generic },
+ [__BLK_TA_GETRQ] = {{ "G", "getrq" }, blk_log_generic },
+ [__BLK_TA_SLEEPRQ] = {{ "S", "sleeprq" }, blk_log_generic },
+ [__BLK_TA_REQUEUE] = {{ "R", "requeue" }, blk_log_with_error },
+ [__BLK_TA_ISSUE] = {{ "D", "issue" }, blk_log_generic },
+ [__BLK_TA_COMPLETE] = {{ "C", "complete" }, blk_log_with_error },
+ [__BLK_TA_PLUG] = {{ "P", "plug" }, blk_log_plug },
+ [__BLK_TA_UNPLUG_IO] = {{ "U", "unplug_io" }, blk_log_unplug },
+ [__BLK_TA_UNPLUG_TIMER] = {{ "UT", "unplug_timer" }, blk_log_unplug },
+ [__BLK_TA_INSERT] = {{ "I", "insert" }, blk_log_generic },
+ [__BLK_TA_SPLIT] = {{ "X", "split" }, blk_log_split },
+ [__BLK_TA_BOUNCE] = {{ "B", "bounce" }, blk_log_generic },
+ [__BLK_TA_REMAP] = {{ "A", "remap" }, blk_log_remap },
+};
+
+static enum print_line_t print_one_line(struct trace_iterator *iter,
+ bool classic)
+{
+ struct trace_seq *s = &iter->seq;
+ const struct blk_io_trace *t;
+ u16 what;
+ int ret;
+ bool long_act;
+ blk_log_action_t *log_action;
+
+ t = te_blk_io_trace(iter->ent);
+ what = t->action & ((1 << BLK_TC_SHIFT) - 1);
+ long_act = !!(trace_flags & TRACE_ITER_VERBOSE);
+ log_action = classic ? &blk_log_action_classic : &blk_log_action;
+
+ if (t->action == BLK_TN_MESSAGE) {
+ ret = log_action(iter, long_act ? "message" : "m");
+ if (ret)
+ ret = blk_log_msg(s, iter->ent);
+ goto out;
+ }
+
+ if (unlikely(what == 0 || what >= ARRAY_SIZE(what2act)))
+ ret = trace_seq_printf(s, "Bad pc action %x\n", what);
+ else {
+ ret = log_action(iter, what2act[what].act[long_act]);
+ if (ret)
+ ret = what2act[what].print(s, iter->ent);
+ }
+out:
+ return ret ? TRACE_TYPE_HANDLED : TRACE_TYPE_PARTIAL_LINE;
+}
+
+static enum print_line_t blk_trace_event_print(struct trace_iterator *iter,
+ int flags)
+{
+ if (!trace_print_context(iter))
+ return TRACE_TYPE_PARTIAL_LINE;
+
+ return print_one_line(iter, false);
+}
+
+static int blk_trace_synthesize_old_trace(struct trace_iterator *iter)
+{
+ struct trace_seq *s = &iter->seq;
+ struct blk_io_trace *t = (struct blk_io_trace *)iter->ent;
+ const int offset = offsetof(struct blk_io_trace, sector);
+ struct blk_io_trace old = {
+ .magic = BLK_IO_TRACE_MAGIC | BLK_IO_TRACE_VERSION,
+ .time = iter->ts,
+ };
+
+ if (!trace_seq_putmem(s, &old, offset))
+ return 0;
+ return trace_seq_putmem(s, &t->sector,
+ sizeof(old) - offset + t->pdu_len);
+}
+
+static enum print_line_t
+blk_trace_event_print_binary(struct trace_iterator *iter, int flags)
+{
+ return blk_trace_synthesize_old_trace(iter) ?
+ TRACE_TYPE_HANDLED : TRACE_TYPE_PARTIAL_LINE;
+}
+
+static enum print_line_t blk_tracer_print_line(struct trace_iterator *iter)
+{
+ if (!(blk_tracer_flags.val & TRACE_BLK_OPT_CLASSIC))
+ return TRACE_TYPE_UNHANDLED;
+
+ return print_one_line(iter, true);
+}
+
+static struct tracer blk_tracer __read_mostly = {
+ .name = "blk",
+ .init = blk_tracer_init,
+ .reset = blk_tracer_reset,
+ .start = blk_tracer_start,
+ .stop = blk_tracer_stop,
+ .print_header = blk_tracer_print_header,
+ .print_line = blk_tracer_print_line,
+ .flags = &blk_tracer_flags,
+};
+
+static struct trace_event trace_blk_event = {
+ .type = TRACE_BLK,
+ .trace = blk_trace_event_print,
+ .binary = blk_trace_event_print_binary,
+};
+
+static int __init init_blk_tracer(void)
+{
+ if (!register_ftrace_event(&trace_blk_event)) {
+ pr_warning("Warning: could not register block events\n");
+ return 1;
+ }
+
+ if (register_tracer(&blk_tracer) != 0) {
+ pr_warning("Warning: could not register the block tracer\n");
+ unregister_ftrace_event(&trace_blk_event);
+ return 1;
+ }
+
+ return 0;
+}
+
+device_initcall(init_blk_tracer);
+
+static int blk_trace_remove_queue(struct request_queue *q)
+{
+ struct blk_trace *bt;
+
+ bt = xchg(&q->blk_trace, NULL);
+ if (bt == NULL)
+ return -EINVAL;
+
+ if (atomic_dec_and_test(&blk_probes_ref))
+ blk_unregister_tracepoints();
+
+ blk_trace_free(bt);
+ return 0;
+}
+
+/*
+ * Setup everything required to start tracing
+ */
+static int blk_trace_setup_queue(struct request_queue *q, dev_t dev)
+{
+ struct blk_trace *old_bt, *bt = NULL;
+ int ret = -ENOMEM;
+
+ bt = kzalloc(sizeof(*bt), GFP_KERNEL);
+ if (!bt)
+ return -ENOMEM;
+
+ bt->msg_data = __alloc_percpu(BLK_TN_MAX_MSG, __alignof__(char));
+ if (!bt->msg_data)
+ goto free_bt;
+
+ bt->dev = dev;
+ bt->act_mask = (u16)-1;
+ bt->end_lba = -1ULL;
+
+ old_bt = xchg(&q->blk_trace, bt);
+ if (old_bt != NULL) {
+ (void)xchg(&q->blk_trace, old_bt);
+ ret = -EBUSY;
+ goto free_bt;
+ }
+
+ if (atomic_inc_return(&blk_probes_ref) == 1)
+ blk_register_tracepoints();
+ return 0;
+
+free_bt:
+ blk_trace_free(bt);
+ return ret;
+}
+
+/*
+ * sysfs interface to enable and configure tracing
+ */
+
+static ssize_t sysfs_blk_trace_attr_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buf);
+static ssize_t sysfs_blk_trace_attr_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count);
+#define BLK_TRACE_DEVICE_ATTR(_name) \
+ DEVICE_ATTR(_name, S_IRUGO | S_IWUSR, \
+ sysfs_blk_trace_attr_show, \
+ sysfs_blk_trace_attr_store)
+
+static BLK_TRACE_DEVICE_ATTR(enable);
+static BLK_TRACE_DEVICE_ATTR(act_mask);
+static BLK_TRACE_DEVICE_ATTR(pid);
+static BLK_TRACE_DEVICE_ATTR(start_lba);
+static BLK_TRACE_DEVICE_ATTR(end_lba);
+
+static struct attribute *blk_trace_attrs[] = {
+ &dev_attr_enable.attr,
+ &dev_attr_act_mask.attr,
+ &dev_attr_pid.attr,
+ &dev_attr_start_lba.attr,
+ &dev_attr_end_lba.attr,
+ NULL
+};
+
+struct attribute_group blk_trace_attr_group = {
+ .name = "trace",
+ .attrs = blk_trace_attrs,
+};
+
+static const struct {
+ int mask;
+ const char *str;
+} mask_maps[] = {
+ { BLK_TC_READ, "read" },
+ { BLK_TC_WRITE, "write" },
+ { BLK_TC_BARRIER, "barrier" },
+ { BLK_TC_SYNC, "sync" },
+ { BLK_TC_QUEUE, "queue" },
+ { BLK_TC_REQUEUE, "requeue" },
+ { BLK_TC_ISSUE, "issue" },
+ { BLK_TC_COMPLETE, "complete" },
+ { BLK_TC_FS, "fs" },
+ { BLK_TC_PC, "pc" },
+ { BLK_TC_AHEAD, "ahead" },
+ { BLK_TC_META, "meta" },
+ { BLK_TC_DISCARD, "discard" },
+ { BLK_TC_DRV_DATA, "drv_data" },
+};
+
+static int blk_trace_str2mask(const char *str)
+{
+ int i;
+ int mask = 0;
+ char *buf, *s, *token;
+
+ buf = kstrdup(str, GFP_KERNEL);
+ if (buf == NULL)
+ return -ENOMEM;
+ s = strstrip(buf);
+
+ while (1) {
+ token = strsep(&s, ",");
+ if (token == NULL)
+ break;
+
+ if (*token == '\0')
+ continue;
+
+ for (i = 0; i < ARRAY_SIZE(mask_maps); i++) {
+ if (strcasecmp(token, mask_maps[i].str) == 0) {
+ mask |= mask_maps[i].mask;
+ break;
+ }
+ }
+ if (i == ARRAY_SIZE(mask_maps)) {
+ mask = -EINVAL;
+ break;
+ }
+ }
+ kfree(buf);
+
+ return mask;
+}
+
+static ssize_t blk_trace_mask2str(char *buf, int mask)
+{
+ int i;
+ char *p = buf;
+
+ for (i = 0; i < ARRAY_SIZE(mask_maps); i++) {
+ if (mask & mask_maps[i].mask) {
+ p += sprintf(p, "%s%s",
+ (p == buf) ? "" : ",", mask_maps[i].str);
+ }
+ }
+ *p++ = '\n';
+
+ return p - buf;
+}
+
+static struct request_queue *blk_trace_get_queue(struct block_device *bdev)
+{
+ if (bdev->bd_disk == NULL)
+ return NULL;
+
+ return bdev_get_queue(bdev);
+}
+
+static ssize_t sysfs_blk_trace_attr_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct hd_struct *p = dev_to_part(dev);
+ struct request_queue *q;
+ struct block_device *bdev;
+ ssize_t ret = -ENXIO;
+
+ lock_kernel();
+ bdev = bdget(part_devt(p));
+ if (bdev == NULL)
+ goto out_unlock_kernel;
+
+ q = blk_trace_get_queue(bdev);
+ if (q == NULL)
+ goto out_bdput;
+
+ mutex_lock(&bdev->bd_mutex);
+
+ if (attr == &dev_attr_enable) {
+ ret = sprintf(buf, "%u\n", !!q->blk_trace);
+ goto out_unlock_bdev;
+ }
+
+ if (q->blk_trace == NULL)
+ ret = sprintf(buf, "disabled\n");
+ else if (attr == &dev_attr_act_mask)
+ ret = blk_trace_mask2str(buf, q->blk_trace->act_mask);
+ else if (attr == &dev_attr_pid)
+ ret = sprintf(buf, "%u\n", q->blk_trace->pid);
+ else if (attr == &dev_attr_start_lba)
+ ret = sprintf(buf, "%llu\n", q->blk_trace->start_lba);
+ else if (attr == &dev_attr_end_lba)
+ ret = sprintf(buf, "%llu\n", q->blk_trace->end_lba);
+
+out_unlock_bdev:
+ mutex_unlock(&bdev->bd_mutex);
+out_bdput:
+ bdput(bdev);
+out_unlock_kernel:
+ unlock_kernel();
+ return ret;
+}
+
+static ssize_t sysfs_blk_trace_attr_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct block_device *bdev;
+ struct request_queue *q;
+ struct hd_struct *p;
+ u64 value;
+ ssize_t ret = -EINVAL;
+
+ if (count == 0)
+ goto out;
+
+ if (attr == &dev_attr_act_mask) {
+ if (sscanf(buf, "%llx", &value) != 1) {
+ /* Assume it is a list of trace category names */
+ ret = blk_trace_str2mask(buf);
+ if (ret < 0)
+ goto out;
+ value = ret;
+ }
+ } else if (sscanf(buf, "%llu", &value) != 1)
+ goto out;
+
+ ret = -ENXIO;
+
+ lock_kernel();
+ p = dev_to_part(dev);
+ bdev = bdget(part_devt(p));
+ if (bdev == NULL)
+ goto out_unlock_kernel;
+
+ q = blk_trace_get_queue(bdev);
+ if (q == NULL)
+ goto out_bdput;
+
+ mutex_lock(&bdev->bd_mutex);
+
+ if (attr == &dev_attr_enable) {
+ if (value)
+ ret = blk_trace_setup_queue(q, bdev->bd_dev);
+ else
+ ret = blk_trace_remove_queue(q);
+ goto out_unlock_bdev;
+ }
+
+ ret = 0;
+ if (q->blk_trace == NULL)
+ ret = blk_trace_setup_queue(q, bdev->bd_dev);
+
+ if (ret == 0) {
+ if (attr == &dev_attr_act_mask)
+ q->blk_trace->act_mask = value;
+ else if (attr == &dev_attr_pid)
+ q->blk_trace->pid = value;
+ else if (attr == &dev_attr_start_lba)
+ q->blk_trace->start_lba = value;
+ else if (attr == &dev_attr_end_lba)
+ q->blk_trace->end_lba = value;
+ }
+
+out_unlock_bdev:
+ mutex_unlock(&bdev->bd_mutex);
+out_bdput:
+ bdput(bdev);
+out_unlock_kernel:
+ unlock_kernel();
+out:
+ return ret ? ret : count;
+}
+
diff --git a/kernel/trace/events.c b/kernel/trace/events.c
new file mode 100644
index 000000000000..246f2aa6dc46
--- /dev/null
+++ b/kernel/trace/events.c
@@ -0,0 +1,14 @@
+/*
+ * This is the place to register all trace points as events.
+ */
+
+#include <linux/stringify.h>
+
+#include <trace/trace_events.h>
+
+#include "trace_output.h"
+
+#include "trace_events_stage_1.h"
+#include "trace_events_stage_2.h"
+#include "trace_events_stage_3.h"
+
diff --git a/kernel/trace/ftrace.c b/kernel/trace/ftrace.c
index fdf913dfc7e8..f1ed080406c3 100644
--- a/kernel/trace/ftrace.c
+++ b/kernel/trace/ftrace.c
@@ -27,6 +27,9 @@
#include <linux/sysctl.h>
#include <linux/ctype.h>
#include <linux/list.h>
+#include <linux/hash.h>
+
+#include <trace/sched.h>
#include <asm/ftrace.h>
@@ -44,14 +47,14 @@
ftrace_kill(); \
} while (0)
+/* hash bits for specific function selection */
+#define FTRACE_HASH_BITS 7
+#define FTRACE_FUNC_HASHSIZE (1 << FTRACE_HASH_BITS)
+
/* ftrace_enabled is a method to turn ftrace on or off */
int ftrace_enabled __read_mostly;
static int last_ftrace_enabled;
-/* set when tracing only a pid */
-struct pid *ftrace_pid_trace;
-static struct pid * const ftrace_swapper_pid = &init_struct_pid;
-
/* Quick disabling of function tracer. */
int function_trace_stop;
@@ -61,9 +64,7 @@ int function_trace_stop;
*/
static int ftrace_disabled __read_mostly;
-static DEFINE_SPINLOCK(ftrace_lock);
-static DEFINE_MUTEX(ftrace_sysctl_lock);
-static DEFINE_MUTEX(ftrace_start_lock);
+static DEFINE_MUTEX(ftrace_lock);
static struct ftrace_ops ftrace_list_end __read_mostly =
{
@@ -134,9 +135,6 @@ static void ftrace_test_stop_func(unsigned long ip, unsigned long parent_ip)
static int __register_ftrace_function(struct ftrace_ops *ops)
{
- /* should not be called from interrupt context */
- spin_lock(&ftrace_lock);
-
ops->next = ftrace_list;
/*
* We are entering ops into the ftrace_list but another
@@ -172,18 +170,12 @@ static int __register_ftrace_function(struct ftrace_ops *ops)
#endif
}
- spin_unlock(&ftrace_lock);
-
return 0;
}
static int __unregister_ftrace_function(struct ftrace_ops *ops)
{
struct ftrace_ops **p;
- int ret = 0;
-
- /* should not be called from interrupt context */
- spin_lock(&ftrace_lock);
/*
* If we are removing the last function, then simply point
@@ -192,17 +184,15 @@ static int __unregister_ftrace_function(struct ftrace_ops *ops)
if (ftrace_list == ops && ops->next == &ftrace_list_end) {
ftrace_trace_function = ftrace_stub;
ftrace_list = &ftrace_list_end;
- goto out;
+ return 0;
}
for (p = &ftrace_list; *p != &ftrace_list_end; p = &(*p)->next)
if (*p == ops)
break;
- if (*p != ops) {
- ret = -1;
- goto out;
- }
+ if (*p != ops)
+ return -1;
*p = (*p)->next;
@@ -223,21 +213,15 @@ static int __unregister_ftrace_function(struct ftrace_ops *ops)
}
}
- out:
- spin_unlock(&ftrace_lock);
-
- return ret;
+ return 0;
}
static void ftrace_update_pid_func(void)
{
ftrace_func_t func;
- /* should not be called from interrupt context */
- spin_lock(&ftrace_lock);
-
if (ftrace_trace_function == ftrace_stub)
- goto out;
+ return;
func = ftrace_trace_function;
@@ -254,23 +238,29 @@ static void ftrace_update_pid_func(void)
#else
__ftrace_trace_function = func;
#endif
-
- out:
- spin_unlock(&ftrace_lock);
}
+/* set when tracing only a pid */
+struct pid *ftrace_pid_trace;
+static struct pid * const ftrace_swapper_pid = &init_struct_pid;
+
#ifdef CONFIG_DYNAMIC_FTRACE
+
#ifndef CONFIG_FTRACE_MCOUNT_RECORD
# error Dynamic ftrace depends on MCOUNT_RECORD
#endif
-/*
- * Since MCOUNT_ADDR may point to mcount itself, we do not want
- * to get it confused by reading a reference in the code as we
- * are parsing on objcopy output of text. Use a variable for
- * it instead.
- */
-static unsigned long mcount_addr = MCOUNT_ADDR;
+static struct hlist_head ftrace_func_hash[FTRACE_FUNC_HASHSIZE] __read_mostly;
+
+struct ftrace_func_probe {
+ struct hlist_node node;
+ struct ftrace_probe_ops *ops;
+ unsigned long flags;
+ unsigned long ip;
+ void *data;
+ struct rcu_head rcu;
+};
+
enum {
FTRACE_ENABLE_CALLS = (1 << 0),
@@ -284,13 +274,13 @@ enum {
static int ftrace_filtered;
-static LIST_HEAD(ftrace_new_addrs);
+static struct dyn_ftrace *ftrace_new_addrs;
static DEFINE_MUTEX(ftrace_regex_lock);
struct ftrace_page {
struct ftrace_page *next;
- unsigned long index;
+ int index;
struct dyn_ftrace records[];
};
@@ -305,6 +295,19 @@ static struct ftrace_page *ftrace_pages;
static struct dyn_ftrace *ftrace_free_records;
+/*
+ * This is a double for. Do not use 'break' to break out of the loop,
+ * you must use a goto.
+ */
+#define do_for_each_ftrace_rec(pg, rec) \
+ for (pg = ftrace_pages_start; pg; pg = pg->next) { \
+ int _____i; \
+ for (_____i = 0; _____i < pg->index; _____i++) { \
+ rec = &pg->records[_____i];
+
+#define while_for_each_ftrace_rec() \
+ } \
+ }
#ifdef CONFIG_KPROBES
@@ -338,7 +341,7 @@ static inline int record_frozen(struct dyn_ftrace *rec)
static void ftrace_free_rec(struct dyn_ftrace *rec)
{
- rec->ip = (unsigned long)ftrace_free_records;
+ rec->freelist = ftrace_free_records;
ftrace_free_records = rec;
rec->flags |= FTRACE_FL_FREE;
}
@@ -349,23 +352,22 @@ void ftrace_release(void *start, unsigned long size)
struct ftrace_page *pg;
unsigned long s = (unsigned long)start;
unsigned long e = s + size;
- int i;
if (ftrace_disabled || !start)
return;
- /* should not be called from interrupt context */
- spin_lock(&ftrace_lock);
-
- for (pg = ftrace_pages_start; pg; pg = pg->next) {
- for (i = 0; i < pg->index; i++) {
- rec = &pg->records[i];
-
- if ((rec->ip >= s) && (rec->ip < e))
- ftrace_free_rec(rec);
+ mutex_lock(&ftrace_lock);
+ do_for_each_ftrace_rec(pg, rec) {
+ if ((rec->ip >= s) && (rec->ip < e)) {
+ /*
+ * rec->ip is changed in ftrace_free_rec()
+ * It should not between s and e if record was freed.
+ */
+ FTRACE_WARN_ON(rec->flags & FTRACE_FL_FREE);
+ ftrace_free_rec(rec);
}
- }
- spin_unlock(&ftrace_lock);
+ } while_for_each_ftrace_rec();
+ mutex_unlock(&ftrace_lock);
}
static struct dyn_ftrace *ftrace_alloc_dyn_node(unsigned long ip)
@@ -382,7 +384,7 @@ static struct dyn_ftrace *ftrace_alloc_dyn_node(unsigned long ip)
return NULL;
}
- ftrace_free_records = (void *)rec->ip;
+ ftrace_free_records = rec->freelist;
memset(rec, 0, sizeof(*rec));
return rec;
}
@@ -414,8 +416,8 @@ ftrace_record_ip(unsigned long ip)
return NULL;
rec->ip = ip;
-
- list_add(&rec->list, &ftrace_new_addrs);
+ rec->newlist = ftrace_new_addrs;
+ ftrace_new_addrs = rec;
return rec;
}
@@ -461,10 +463,10 @@ static void ftrace_bug(int failed, unsigned long ip)
static int
__ftrace_replace_code(struct dyn_ftrace *rec, int enable)
{
- unsigned long ip, fl;
unsigned long ftrace_addr;
+ unsigned long ip, fl;
- ftrace_addr = (unsigned long)ftrace_caller;
+ ftrace_addr = (unsigned long)FTRACE_ADDR;
ip = rec->ip;
@@ -473,7 +475,7 @@ __ftrace_replace_code(struct dyn_ftrace *rec, int enable)
* it is not enabled then do nothing.
*
* If this record is not to be traced and
- * it is enabled then disabled it.
+ * it is enabled then disable it.
*
*/
if (rec->flags & FTRACE_FL_NOTRACE) {
@@ -493,7 +495,7 @@ __ftrace_replace_code(struct dyn_ftrace *rec, int enable)
if (fl == (FTRACE_FL_FILTER | FTRACE_FL_ENABLED))
return 0;
- /* Record is not filtered and is not enabled do nothing */
+ /* Record is not filtered or enabled, do nothing */
if (!fl)
return 0;
@@ -515,7 +517,7 @@ __ftrace_replace_code(struct dyn_ftrace *rec, int enable)
} else {
- /* if record is not enabled do nothing */
+ /* if record is not enabled, do nothing */
if (!(rec->flags & FTRACE_FL_ENABLED))
return 0;
@@ -531,41 +533,41 @@ __ftrace_replace_code(struct dyn_ftrace *rec, int enable)
static void ftrace_replace_code(int enable)
{
- int i, failed;
struct dyn_ftrace *rec;
struct ftrace_page *pg;
+ int failed;
- for (pg = ftrace_pages_start; pg; pg = pg->next) {
- for (i = 0; i < pg->index; i++) {
- rec = &pg->records[i];
-
- /*
- * Skip over free records and records that have
- * failed.
- */
- if (rec->flags & FTRACE_FL_FREE ||
- rec->flags & FTRACE_FL_FAILED)
- continue;
+ do_for_each_ftrace_rec(pg, rec) {
+ /*
+ * Skip over free records, records that have
+ * failed and not converted.
+ */
+ if (rec->flags & FTRACE_FL_FREE ||
+ rec->flags & FTRACE_FL_FAILED ||
+ !(rec->flags & FTRACE_FL_CONVERTED))
+ continue;
- /* ignore updates to this record's mcount site */
- if (get_kprobe((void *)rec->ip)) {
- freeze_record(rec);
- continue;
- } else {
- unfreeze_record(rec);
- }
+ /* ignore updates to this record's mcount site */
+ if (get_kprobe((void *)rec->ip)) {
+ freeze_record(rec);
+ continue;
+ } else {
+ unfreeze_record(rec);
+ }
- failed = __ftrace_replace_code(rec, enable);
- if (failed && (rec->flags & FTRACE_FL_CONVERTED)) {
- rec->flags |= FTRACE_FL_FAILED;
- if ((system_state == SYSTEM_BOOTING) ||
- !core_kernel_text(rec->ip)) {
- ftrace_free_rec(rec);
- } else
- ftrace_bug(failed, rec->ip);
- }
+ failed = __ftrace_replace_code(rec, enable);
+ if (failed) {
+ rec->flags |= FTRACE_FL_FAILED;
+ if ((system_state == SYSTEM_BOOTING) ||
+ !core_kernel_text(rec->ip)) {
+ ftrace_free_rec(rec);
+ } else {
+ ftrace_bug(failed, rec->ip);
+ /* Stop processing */
+ return;
+ }
}
- }
+ } while_for_each_ftrace_rec();
}
static int
@@ -576,7 +578,7 @@ ftrace_code_disable(struct module *mod, struct dyn_ftrace *rec)
ip = rec->ip;
- ret = ftrace_make_nop(mod, rec, mcount_addr);
+ ret = ftrace_make_nop(mod, rec, MCOUNT_ADDR);
if (ret) {
ftrace_bug(ret, ip);
rec->flags |= FTRACE_FL_FAILED;
@@ -585,6 +587,24 @@ ftrace_code_disable(struct module *mod, struct dyn_ftrace *rec)
return 1;
}
+/*
+ * archs can override this function if they must do something
+ * before the modifying code is performed.
+ */
+int __weak ftrace_arch_code_modify_prepare(void)
+{
+ return 0;
+}
+
+/*
+ * archs can override this function if they must do something
+ * after the modifying code is performed.
+ */
+int __weak ftrace_arch_code_modify_post_process(void)
+{
+ return 0;
+}
+
static int __ftrace_modify_code(void *data)
{
int *command = data;
@@ -607,7 +627,17 @@ static int __ftrace_modify_code(void *data)
static void ftrace_run_update_code(int command)
{
+ int ret;
+
+ ret = ftrace_arch_code_modify_prepare();
+ FTRACE_WARN_ON(ret);
+ if (ret)
+ return;
+
stop_machine(__ftrace_modify_code, &command, NULL);
+
+ ret = ftrace_arch_code_modify_post_process();
+ FTRACE_WARN_ON(ret);
}
static ftrace_func_t saved_ftrace_func;
@@ -631,13 +661,10 @@ static void ftrace_startup(int command)
if (unlikely(ftrace_disabled))
return;
- mutex_lock(&ftrace_start_lock);
ftrace_start_up++;
command |= FTRACE_ENABLE_CALLS;
ftrace_startup_enable(command);
-
- mutex_unlock(&ftrace_start_lock);
}
static void ftrace_shutdown(int command)
@@ -645,7 +672,6 @@ static void ftrace_shutdown(int command)
if (unlikely(ftrace_disabled))
return;
- mutex_lock(&ftrace_start_lock);
ftrace_start_up--;
if (!ftrace_start_up)
command |= FTRACE_DISABLE_CALLS;
@@ -656,11 +682,9 @@ static void ftrace_shutdown(int command)
}
if (!command || !ftrace_enabled)
- goto out;
+ return;
ftrace_run_update_code(command);
- out:
- mutex_unlock(&ftrace_start_lock);
}
static void ftrace_startup_sysctl(void)
@@ -670,7 +694,6 @@ static void ftrace_startup_sysctl(void)
if (unlikely(ftrace_disabled))
return;
- mutex_lock(&ftrace_start_lock);
/* Force update next time */
saved_ftrace_func = NULL;
/* ftrace_start_up is true if we want ftrace running */
@@ -678,7 +701,6 @@ static void ftrace_startup_sysctl(void)
command |= FTRACE_ENABLE_CALLS;
ftrace_run_update_code(command);
- mutex_unlock(&ftrace_start_lock);
}
static void ftrace_shutdown_sysctl(void)
@@ -688,13 +710,11 @@ static void ftrace_shutdown_sysctl(void)
if (unlikely(ftrace_disabled))
return;
- mutex_lock(&ftrace_start_lock);
/* ftrace_start_up is true if ftrace is running */
if (ftrace_start_up)
command |= FTRACE_DISABLE_CALLS;
ftrace_run_update_code(command);
- mutex_unlock(&ftrace_start_lock);
}
static cycle_t ftrace_update_time;
@@ -703,19 +723,21 @@ unsigned long ftrace_update_tot_cnt;
static int ftrace_update_code(struct module *mod)
{
- struct dyn_ftrace *p, *t;
+ struct dyn_ftrace *p;
cycle_t start, stop;
start = ftrace_now(raw_smp_processor_id());
ftrace_update_cnt = 0;
- list_for_each_entry_safe(p, t, &ftrace_new_addrs, list) {
+ while (ftrace_new_addrs) {
/* If something went wrong, bail without enabling anything */
if (unlikely(ftrace_disabled))
return -1;
- list_del_init(&p->list);
+ p = ftrace_new_addrs;
+ ftrace_new_addrs = p->newlist;
+ p->flags = 0L;
/* convert record (i.e, patch mcount-call with NOP) */
if (ftrace_code_disable(mod, p)) {
@@ -781,13 +803,16 @@ enum {
FTRACE_ITER_CONT = (1 << 1),
FTRACE_ITER_NOTRACE = (1 << 2),
FTRACE_ITER_FAILURES = (1 << 3),
+ FTRACE_ITER_PRINTALL = (1 << 4),
+ FTRACE_ITER_HASH = (1 << 5),
};
#define FTRACE_BUFF_MAX (KSYM_SYMBOL_LEN+4) /* room for wildcards */
struct ftrace_iterator {
struct ftrace_page *pg;
- unsigned idx;
+ int hidx;
+ int idx;
unsigned flags;
unsigned char buffer[FTRACE_BUFF_MAX+1];
unsigned buffer_idx;
@@ -795,15 +820,89 @@ struct ftrace_iterator {
};
static void *
+t_hash_next(struct seq_file *m, void *v, loff_t *pos)
+{
+ struct ftrace_iterator *iter = m->private;
+ struct hlist_node *hnd = v;
+ struct hlist_head *hhd;
+
+ WARN_ON(!(iter->flags & FTRACE_ITER_HASH));
+
+ (*pos)++;
+
+ retry:
+ if (iter->hidx >= FTRACE_FUNC_HASHSIZE)
+ return NULL;
+
+ hhd = &ftrace_func_hash[iter->hidx];
+
+ if (hlist_empty(hhd)) {
+ iter->hidx++;
+ hnd = NULL;
+ goto retry;
+ }
+
+ if (!hnd)
+ hnd = hhd->first;
+ else {
+ hnd = hnd->next;
+ if (!hnd) {
+ iter->hidx++;
+ goto retry;
+ }
+ }
+
+ return hnd;
+}
+
+static void *t_hash_start(struct seq_file *m, loff_t *pos)
+{
+ struct ftrace_iterator *iter = m->private;
+ void *p = NULL;
+
+ iter->flags |= FTRACE_ITER_HASH;
+
+ return t_hash_next(m, p, pos);
+}
+
+static int t_hash_show(struct seq_file *m, void *v)
+{
+ struct ftrace_func_probe *rec;
+ struct hlist_node *hnd = v;
+ char str[KSYM_SYMBOL_LEN];
+
+ rec = hlist_entry(hnd, struct ftrace_func_probe, node);
+
+ if (rec->ops->print)
+ return rec->ops->print(m, rec->ip, rec->ops, rec->data);
+
+ kallsyms_lookup(rec->ip, NULL, NULL, NULL, str);
+ seq_printf(m, "%s:", str);
+
+ kallsyms_lookup((unsigned long)rec->ops->func, NULL, NULL, NULL, str);
+ seq_printf(m, "%s", str);
+
+ if (rec->data)
+ seq_printf(m, ":%p", rec->data);
+ seq_putc(m, '\n');
+
+ return 0;
+}
+
+static void *
t_next(struct seq_file *m, void *v, loff_t *pos)
{
struct ftrace_iterator *iter = m->private;
struct dyn_ftrace *rec = NULL;
+ if (iter->flags & FTRACE_ITER_HASH)
+ return t_hash_next(m, v, pos);
+
(*pos)++;
- /* should not be called from interrupt context */
- spin_lock(&ftrace_lock);
+ if (iter->flags & FTRACE_ITER_PRINTALL)
+ return NULL;
+
retry:
if (iter->idx >= iter->pg->index) {
if (iter->pg->next) {
@@ -832,7 +931,6 @@ t_next(struct seq_file *m, void *v, loff_t *pos)
goto retry;
}
}
- spin_unlock(&ftrace_lock);
return rec;
}
@@ -842,6 +940,23 @@ static void *t_start(struct seq_file *m, loff_t *pos)
struct ftrace_iterator *iter = m->private;
void *p = NULL;
+ mutex_lock(&ftrace_lock);
+ /*
+ * For set_ftrace_filter reading, if we have the filter
+ * off, we can short cut and just print out that all
+ * functions are enabled.
+ */
+ if (iter->flags & FTRACE_ITER_FILTER && !ftrace_filtered) {
+ if (*pos > 0)
+ return t_hash_start(m, pos);
+ iter->flags |= FTRACE_ITER_PRINTALL;
+ (*pos)++;
+ return iter;
+ }
+
+ if (iter->flags & FTRACE_ITER_HASH)
+ return t_hash_start(m, pos);
+
if (*pos > 0) {
if (iter->idx < 0)
return p;
@@ -851,18 +966,31 @@ static void *t_start(struct seq_file *m, loff_t *pos)
p = t_next(m, p, pos);
+ if (!p)
+ return t_hash_start(m, pos);
+
return p;
}
static void t_stop(struct seq_file *m, void *p)
{
+ mutex_unlock(&ftrace_lock);
}
static int t_show(struct seq_file *m, void *v)
{
+ struct ftrace_iterator *iter = m->private;
struct dyn_ftrace *rec = v;
char str[KSYM_SYMBOL_LEN];
+ if (iter->flags & FTRACE_ITER_HASH)
+ return t_hash_show(m, v);
+
+ if (iter->flags & FTRACE_ITER_PRINTALL) {
+ seq_printf(m, "#### all functions enabled ####\n");
+ return 0;
+ }
+
if (!rec)
return 0;
@@ -941,23 +1069,16 @@ static void ftrace_filter_reset(int enable)
struct ftrace_page *pg;
struct dyn_ftrace *rec;
unsigned long type = enable ? FTRACE_FL_FILTER : FTRACE_FL_NOTRACE;
- unsigned i;
- /* should not be called from interrupt context */
- spin_lock(&ftrace_lock);
+ mutex_lock(&ftrace_lock);
if (enable)
ftrace_filtered = 0;
- pg = ftrace_pages_start;
- while (pg) {
- for (i = 0; i < pg->index; i++) {
- rec = &pg->records[i];
- if (rec->flags & FTRACE_FL_FAILED)
- continue;
- rec->flags &= ~type;
- }
- pg = pg->next;
- }
- spin_unlock(&ftrace_lock);
+ do_for_each_ftrace_rec(pg, rec) {
+ if (rec->flags & FTRACE_FL_FAILED)
+ continue;
+ rec->flags &= ~type;
+ } while_for_each_ftrace_rec();
+ mutex_unlock(&ftrace_lock);
}
static int
@@ -1008,16 +1129,6 @@ ftrace_notrace_open(struct inode *inode, struct file *file)
return ftrace_regex_open(inode, file, 0);
}
-static ssize_t
-ftrace_regex_read(struct file *file, char __user *ubuf,
- size_t cnt, loff_t *ppos)
-{
- if (file->f_mode & FMODE_READ)
- return seq_read(file, ubuf, cnt, ppos);
- else
- return -EPERM;
-}
-
static loff_t
ftrace_regex_lseek(struct file *file, loff_t offset, int origin)
{
@@ -1038,86 +1149,536 @@ enum {
MATCH_END_ONLY,
};
-static void
-ftrace_match(unsigned char *buff, int len, int enable)
+/*
+ * (static function - no need for kernel doc)
+ *
+ * Pass in a buffer containing a glob and this function will
+ * set search to point to the search part of the buffer and
+ * return the type of search it is (see enum above).
+ * This does modify buff.
+ *
+ * Returns enum type.
+ * search returns the pointer to use for comparison.
+ * not returns 1 if buff started with a '!'
+ * 0 otherwise.
+ */
+static int
+ftrace_setup_glob(char *buff, int len, char **search, int *not)
{
- char str[KSYM_SYMBOL_LEN];
- char *search = NULL;
- struct ftrace_page *pg;
- struct dyn_ftrace *rec;
int type = MATCH_FULL;
- unsigned long flag = enable ? FTRACE_FL_FILTER : FTRACE_FL_NOTRACE;
- unsigned i, match = 0, search_len = 0;
- int not = 0;
+ int i;
if (buff[0] == '!') {
- not = 1;
+ *not = 1;
buff++;
len--;
- }
+ } else
+ *not = 0;
+
+ *search = buff;
for (i = 0; i < len; i++) {
if (buff[i] == '*') {
if (!i) {
- search = buff + i + 1;
+ *search = buff + 1;
type = MATCH_END_ONLY;
- search_len = len - (i + 1);
} else {
- if (type == MATCH_END_ONLY) {
+ if (type == MATCH_END_ONLY)
type = MATCH_MIDDLE_ONLY;
- } else {
- match = i;
+ else
type = MATCH_FRONT_ONLY;
- }
buff[i] = 0;
break;
}
}
}
- /* should not be called from interrupt context */
- spin_lock(&ftrace_lock);
- if (enable)
- ftrace_filtered = 1;
- pg = ftrace_pages_start;
- while (pg) {
- for (i = 0; i < pg->index; i++) {
- int matched = 0;
- char *ptr;
-
- rec = &pg->records[i];
- if (rec->flags & FTRACE_FL_FAILED)
+ return type;
+}
+
+static int ftrace_match(char *str, char *regex, int len, int type)
+{
+ int matched = 0;
+ char *ptr;
+
+ switch (type) {
+ case MATCH_FULL:
+ if (strcmp(str, regex) == 0)
+ matched = 1;
+ break;
+ case MATCH_FRONT_ONLY:
+ if (strncmp(str, regex, len) == 0)
+ matched = 1;
+ break;
+ case MATCH_MIDDLE_ONLY:
+ if (strstr(str, regex))
+ matched = 1;
+ break;
+ case MATCH_END_ONLY:
+ ptr = strstr(str, regex);
+ if (ptr && (ptr[len] == 0))
+ matched = 1;
+ break;
+ }
+
+ return matched;
+}
+
+static int
+ftrace_match_record(struct dyn_ftrace *rec, char *regex, int len, int type)
+{
+ char str[KSYM_SYMBOL_LEN];
+
+ kallsyms_lookup(rec->ip, NULL, NULL, NULL, str);
+ return ftrace_match(str, regex, len, type);
+}
+
+static void ftrace_match_records(char *buff, int len, int enable)
+{
+ unsigned int search_len;
+ struct ftrace_page *pg;
+ struct dyn_ftrace *rec;
+ unsigned long flag;
+ char *search;
+ int type;
+ int not;
+
+ flag = enable ? FTRACE_FL_FILTER : FTRACE_FL_NOTRACE;
+ type = ftrace_setup_glob(buff, len, &search, &not);
+
+ search_len = strlen(search);
+
+ mutex_lock(&ftrace_lock);
+ do_for_each_ftrace_rec(pg, rec) {
+
+ if (rec->flags & FTRACE_FL_FAILED)
+ continue;
+
+ if (ftrace_match_record(rec, search, search_len, type)) {
+ if (not)
+ rec->flags &= ~flag;
+ else
+ rec->flags |= flag;
+ }
+ /*
+ * Only enable filtering if we have a function that
+ * is filtered on.
+ */
+ if (enable && (rec->flags & FTRACE_FL_FILTER))
+ ftrace_filtered = 1;
+ } while_for_each_ftrace_rec();
+ mutex_unlock(&ftrace_lock);
+}
+
+static int
+ftrace_match_module_record(struct dyn_ftrace *rec, char *mod,
+ char *regex, int len, int type)
+{
+ char str[KSYM_SYMBOL_LEN];
+ char *modname;
+
+ kallsyms_lookup(rec->ip, NULL, NULL, &modname, str);
+
+ if (!modname || strcmp(modname, mod))
+ return 0;
+
+ /* blank search means to match all funcs in the mod */
+ if (len)
+ return ftrace_match(str, regex, len, type);
+ else
+ return 1;
+}
+
+static void ftrace_match_module_records(char *buff, char *mod, int enable)
+{
+ unsigned search_len = 0;
+ struct ftrace_page *pg;
+ struct dyn_ftrace *rec;
+ int type = MATCH_FULL;
+ char *search = buff;
+ unsigned long flag;
+ int not = 0;
+
+ flag = enable ? FTRACE_FL_FILTER : FTRACE_FL_NOTRACE;
+
+ /* blank or '*' mean the same */
+ if (strcmp(buff, "*") == 0)
+ buff[0] = 0;
+
+ /* handle the case of 'dont filter this module' */
+ if (strcmp(buff, "!") == 0 || strcmp(buff, "!*") == 0) {
+ buff[0] = 0;
+ not = 1;
+ }
+
+ if (strlen(buff)) {
+ type = ftrace_setup_glob(buff, strlen(buff), &search, &not);
+ search_len = strlen(search);
+ }
+
+ mutex_lock(&ftrace_lock);
+ do_for_each_ftrace_rec(pg, rec) {
+
+ if (rec->flags & FTRACE_FL_FAILED)
+ continue;
+
+ if (ftrace_match_module_record(rec, mod,
+ search, search_len, type)) {
+ if (not)
+ rec->flags &= ~flag;
+ else
+ rec->flags |= flag;
+ }
+ if (enable && (rec->flags & FTRACE_FL_FILTER))
+ ftrace_filtered = 1;
+
+ } while_for_each_ftrace_rec();
+ mutex_unlock(&ftrace_lock);
+}
+
+/*
+ * We register the module command as a template to show others how
+ * to register the a command as well.
+ */
+
+static int
+ftrace_mod_callback(char *func, char *cmd, char *param, int enable)
+{
+ char *mod;
+
+ /*
+ * cmd == 'mod' because we only registered this func
+ * for the 'mod' ftrace_func_command.
+ * But if you register one func with multiple commands,
+ * you can tell which command was used by the cmd
+ * parameter.
+ */
+
+ /* we must have a module name */
+ if (!param)
+ return -EINVAL;
+
+ mod = strsep(&param, ":");
+ if (!strlen(mod))
+ return -EINVAL;
+
+ ftrace_match_module_records(func, mod, enable);
+ return 0;
+}
+
+static struct ftrace_func_command ftrace_mod_cmd = {
+ .name = "mod",
+ .func = ftrace_mod_callback,
+};
+
+static int __init ftrace_mod_cmd_init(void)
+{
+ return register_ftrace_command(&ftrace_mod_cmd);
+}
+device_initcall(ftrace_mod_cmd_init);
+
+static void
+function_trace_probe_call(unsigned long ip, unsigned long parent_ip)
+{
+ struct ftrace_func_probe *entry;
+ struct hlist_head *hhd;
+ struct hlist_node *n;
+ unsigned long key;
+ int resched;
+
+ key = hash_long(ip, FTRACE_HASH_BITS);
+
+ hhd = &ftrace_func_hash[key];
+
+ if (hlist_empty(hhd))
+ return;
+
+ /*
+ * Disable preemption for these calls to prevent a RCU grace
+ * period. This syncs the hash iteration and freeing of items
+ * on the hash. rcu_read_lock is too dangerous here.
+ */
+ resched = ftrace_preempt_disable();
+ hlist_for_each_entry_rcu(entry, n, hhd, node) {
+ if (entry->ip == ip)
+ entry->ops->func(ip, parent_ip, &entry->data);
+ }
+ ftrace_preempt_enable(resched);
+}
+
+static struct ftrace_ops trace_probe_ops __read_mostly =
+{
+ .func = function_trace_probe_call,
+};
+
+static int ftrace_probe_registered;
+
+static void __enable_ftrace_function_probe(void)
+{
+ int i;
+
+ if (ftrace_probe_registered)
+ return;
+
+ for (i = 0; i < FTRACE_FUNC_HASHSIZE; i++) {
+ struct hlist_head *hhd = &ftrace_func_hash[i];
+ if (hhd->first)
+ break;
+ }
+ /* Nothing registered? */
+ if (i == FTRACE_FUNC_HASHSIZE)
+ return;
+
+ __register_ftrace_function(&trace_probe_ops);
+ ftrace_startup(0);
+ ftrace_probe_registered = 1;
+}
+
+static void __disable_ftrace_function_probe(void)
+{
+ int i;
+
+ if (!ftrace_probe_registered)
+ return;
+
+ for (i = 0; i < FTRACE_FUNC_HASHSIZE; i++) {
+ struct hlist_head *hhd = &ftrace_func_hash[i];
+ if (hhd->first)
+ return;
+ }
+
+ /* no more funcs left */
+ __unregister_ftrace_function(&trace_probe_ops);
+ ftrace_shutdown(0);
+ ftrace_probe_registered = 0;
+}
+
+
+static void ftrace_free_entry_rcu(struct rcu_head *rhp)
+{
+ struct ftrace_func_probe *entry =
+ container_of(rhp, struct ftrace_func_probe, rcu);
+
+ if (entry->ops->free)
+ entry->ops->free(&entry->data);
+ kfree(entry);
+}
+
+
+int
+register_ftrace_function_probe(char *glob, struct ftrace_probe_ops *ops,
+ void *data)
+{
+ struct ftrace_func_probe *entry;
+ struct ftrace_page *pg;
+ struct dyn_ftrace *rec;
+ int type, len, not;
+ unsigned long key;
+ int count = 0;
+ char *search;
+
+ type = ftrace_setup_glob(glob, strlen(glob), &search, &not);
+ len = strlen(search);
+
+ /* we do not support '!' for function probes */
+ if (WARN_ON(not))
+ return -EINVAL;
+
+ mutex_lock(&ftrace_lock);
+ do_for_each_ftrace_rec(pg, rec) {
+
+ if (rec->flags & FTRACE_FL_FAILED)
+ continue;
+
+ if (!ftrace_match_record(rec, search, len, type))
+ continue;
+
+ entry = kmalloc(sizeof(*entry), GFP_KERNEL);
+ if (!entry) {
+ /* If we did not process any, then return error */
+ if (!count)
+ count = -ENOMEM;
+ goto out_unlock;
+ }
+
+ count++;
+
+ entry->data = data;
+
+ /*
+ * The caller might want to do something special
+ * for each function we find. We call the callback
+ * to give the caller an opportunity to do so.
+ */
+ if (ops->callback) {
+ if (ops->callback(rec->ip, &entry->data) < 0) {
+ /* caller does not like this func */
+ kfree(entry);
continue;
- kallsyms_lookup(rec->ip, NULL, NULL, NULL, str);
- switch (type) {
- case MATCH_FULL:
- if (strcmp(str, buff) == 0)
- matched = 1;
- break;
- case MATCH_FRONT_ONLY:
- if (memcmp(str, buff, match) == 0)
- matched = 1;
- break;
- case MATCH_MIDDLE_ONLY:
- if (strstr(str, search))
- matched = 1;
- break;
- case MATCH_END_ONLY:
- ptr = strstr(str, search);
- if (ptr && (ptr[search_len] == 0))
- matched = 1;
- break;
}
- if (matched) {
- if (not)
- rec->flags &= ~flag;
- else
- rec->flags |= flag;
+ }
+
+ entry->ops = ops;
+ entry->ip = rec->ip;
+
+ key = hash_long(entry->ip, FTRACE_HASH_BITS);
+ hlist_add_head_rcu(&entry->node, &ftrace_func_hash[key]);
+
+ } while_for_each_ftrace_rec();
+ __enable_ftrace_function_probe();
+
+ out_unlock:
+ mutex_unlock(&ftrace_lock);
+
+ return count;
+}
+
+enum {
+ PROBE_TEST_FUNC = 1,
+ PROBE_TEST_DATA = 2
+};
+
+static void
+__unregister_ftrace_function_probe(char *glob, struct ftrace_probe_ops *ops,
+ void *data, int flags)
+{
+ struct ftrace_func_probe *entry;
+ struct hlist_node *n, *tmp;
+ char str[KSYM_SYMBOL_LEN];
+ int type = MATCH_FULL;
+ int i, len = 0;
+ char *search;
+
+ if (glob && (strcmp(glob, "*") || !strlen(glob)))
+ glob = NULL;
+ else {
+ int not;
+
+ type = ftrace_setup_glob(glob, strlen(glob), &search, &not);
+ len = strlen(search);
+
+ /* we do not support '!' for function probes */
+ if (WARN_ON(not))
+ return;
+ }
+
+ mutex_lock(&ftrace_lock);
+ for (i = 0; i < FTRACE_FUNC_HASHSIZE; i++) {
+ struct hlist_head *hhd = &ftrace_func_hash[i];
+
+ hlist_for_each_entry_safe(entry, n, tmp, hhd, node) {
+
+ /* break up if statements for readability */
+ if ((flags & PROBE_TEST_FUNC) && entry->ops != ops)
+ continue;
+
+ if ((flags & PROBE_TEST_DATA) && entry->data != data)
+ continue;
+
+ /* do this last, since it is the most expensive */
+ if (glob) {
+ kallsyms_lookup(entry->ip, NULL, NULL,
+ NULL, str);
+ if (!ftrace_match(str, glob, len, type))
+ continue;
}
+
+ hlist_del(&entry->node);
+ call_rcu(&entry->rcu, ftrace_free_entry_rcu);
}
- pg = pg->next;
}
- spin_unlock(&ftrace_lock);
+ __disable_ftrace_function_probe();
+ mutex_unlock(&ftrace_lock);
+}
+
+void
+unregister_ftrace_function_probe(char *glob, struct ftrace_probe_ops *ops,
+ void *data)
+{
+ __unregister_ftrace_function_probe(glob, ops, data,
+ PROBE_TEST_FUNC | PROBE_TEST_DATA);
+}
+
+void
+unregister_ftrace_function_probe_func(char *glob, struct ftrace_probe_ops *ops)
+{
+ __unregister_ftrace_function_probe(glob, ops, NULL, PROBE_TEST_FUNC);
+}
+
+void unregister_ftrace_function_probe_all(char *glob)
+{
+ __unregister_ftrace_function_probe(glob, NULL, NULL, 0);
+}
+
+static LIST_HEAD(ftrace_commands);
+static DEFINE_MUTEX(ftrace_cmd_mutex);
+
+int register_ftrace_command(struct ftrace_func_command *cmd)
+{
+ struct ftrace_func_command *p;
+ int ret = 0;
+
+ mutex_lock(&ftrace_cmd_mutex);
+ list_for_each_entry(p, &ftrace_commands, list) {
+ if (strcmp(cmd->name, p->name) == 0) {
+ ret = -EBUSY;
+ goto out_unlock;
+ }
+ }
+ list_add(&cmd->list, &ftrace_commands);
+ out_unlock:
+ mutex_unlock(&ftrace_cmd_mutex);
+
+ return ret;
+}
+
+int unregister_ftrace_command(struct ftrace_func_command *cmd)
+{
+ struct ftrace_func_command *p, *n;
+ int ret = -ENODEV;
+
+ mutex_lock(&ftrace_cmd_mutex);
+ list_for_each_entry_safe(p, n, &ftrace_commands, list) {
+ if (strcmp(cmd->name, p->name) == 0) {
+ ret = 0;
+ list_del_init(&p->list);
+ goto out_unlock;
+ }
+ }
+ out_unlock:
+ mutex_unlock(&ftrace_cmd_mutex);
+
+ return ret;
+}
+
+static int ftrace_process_regex(char *buff, int len, int enable)
+{
+ char *func, *command, *next = buff;
+ struct ftrace_func_command *p;
+ int ret = -EINVAL;
+
+ func = strsep(&next, ":");
+
+ if (!next) {
+ ftrace_match_records(func, len, enable);
+ return 0;
+ }
+
+ /* command found */
+
+ command = strsep(&next, ":");
+
+ mutex_lock(&ftrace_cmd_mutex);
+ list_for_each_entry(p, &ftrace_commands, list) {
+ if (strcmp(p->name, command) == 0) {
+ ret = p->func(func, command, next, enable);
+ goto out_unlock;
+ }
+ }
+ out_unlock:
+ mutex_unlock(&ftrace_cmd_mutex);
+
+ return ret;
}
static ssize_t
@@ -1187,7 +1748,10 @@ ftrace_regex_write(struct file *file, const char __user *ubuf,
if (isspace(ch)) {
iter->filtered++;
iter->buffer[iter->buffer_idx] = 0;
- ftrace_match(iter->buffer, iter->buffer_idx, enable);
+ ret = ftrace_process_regex(iter->buffer,
+ iter->buffer_idx, enable);
+ if (ret)
+ goto out;
iter->buffer_idx = 0;
} else
iter->flags |= FTRACE_ITER_CONT;
@@ -1226,7 +1790,7 @@ ftrace_set_regex(unsigned char *buf, int len, int reset, int enable)
if (reset)
ftrace_filter_reset(enable);
if (buf)
- ftrace_match(buf, len, enable);
+ ftrace_match_records(buf, len, enable);
mutex_unlock(&ftrace_regex_lock);
}
@@ -1276,15 +1840,13 @@ ftrace_regex_release(struct inode *inode, struct file *file, int enable)
if (iter->buffer_idx) {
iter->filtered++;
iter->buffer[iter->buffer_idx] = 0;
- ftrace_match(iter->buffer, iter->buffer_idx, enable);
+ ftrace_match_records(iter->buffer, iter->buffer_idx, enable);
}
- mutex_lock(&ftrace_sysctl_lock);
- mutex_lock(&ftrace_start_lock);
+ mutex_lock(&ftrace_lock);
if (ftrace_start_up && ftrace_enabled)
ftrace_run_update_code(FTRACE_ENABLE_CALLS);
- mutex_unlock(&ftrace_start_lock);
- mutex_unlock(&ftrace_sysctl_lock);
+ mutex_unlock(&ftrace_lock);
kfree(iter);
mutex_unlock(&ftrace_regex_lock);
@@ -1303,31 +1865,31 @@ ftrace_notrace_release(struct inode *inode, struct file *file)
return ftrace_regex_release(inode, file, 0);
}
-static struct file_operations ftrace_avail_fops = {
+static const struct file_operations ftrace_avail_fops = {
.open = ftrace_avail_open,
.read = seq_read,
.llseek = seq_lseek,
.release = ftrace_avail_release,
};
-static struct file_operations ftrace_failures_fops = {
+static const struct file_operations ftrace_failures_fops = {
.open = ftrace_failures_open,
.read = seq_read,
.llseek = seq_lseek,
.release = ftrace_avail_release,
};
-static struct file_operations ftrace_filter_fops = {
+static const struct file_operations ftrace_filter_fops = {
.open = ftrace_filter_open,
- .read = ftrace_regex_read,
+ .read = seq_read,
.write = ftrace_filter_write,
.llseek = ftrace_regex_lseek,
.release = ftrace_filter_release,
};
-static struct file_operations ftrace_notrace_fops = {
+static const struct file_operations ftrace_notrace_fops = {
.open = ftrace_notrace_open,
- .read = ftrace_regex_read,
+ .read = seq_read,
.write = ftrace_notrace_write,
.llseek = ftrace_regex_lseek,
.release = ftrace_notrace_release,
@@ -1360,6 +1922,10 @@ static void *g_start(struct seq_file *m, loff_t *pos)
mutex_lock(&graph_lock);
+ /* Nothing, tell g_show to print all functions are enabled */
+ if (!ftrace_graph_count && !*pos)
+ return (void *)1;
+
p = g_next(m, p, pos);
return p;
@@ -1378,6 +1944,11 @@ static int g_show(struct seq_file *m, void *v)
if (!ptr)
return 0;
+ if (ptr == (unsigned long *)1) {
+ seq_printf(m, "#### all functions enabled ####\n");
+ return 0;
+ }
+
kallsyms_lookup(*ptr, NULL, NULL, NULL, str);
seq_printf(m, "%s\n", str);
@@ -1420,53 +1991,53 @@ ftrace_graph_open(struct inode *inode, struct file *file)
return ret;
}
-static ssize_t
-ftrace_graph_read(struct file *file, char __user *ubuf,
- size_t cnt, loff_t *ppos)
-{
- if (file->f_mode & FMODE_READ)
- return seq_read(file, ubuf, cnt, ppos);
- else
- return -EPERM;
-}
-
static int
-ftrace_set_func(unsigned long *array, int idx, char *buffer)
+ftrace_set_func(unsigned long *array, int *idx, char *buffer)
{
- char str[KSYM_SYMBOL_LEN];
struct dyn_ftrace *rec;
struct ftrace_page *pg;
+ int search_len;
int found = 0;
- int i, j;
+ int type, not;
+ char *search;
+ bool exists;
+ int i;
if (ftrace_disabled)
return -ENODEV;
- /* should not be called from interrupt context */
- spin_lock(&ftrace_lock);
+ /* decode regex */
+ type = ftrace_setup_glob(buffer, strlen(buffer), &search, &not);
+ if (not)
+ return -EINVAL;
+
+ search_len = strlen(search);
- for (pg = ftrace_pages_start; pg; pg = pg->next) {
- for (i = 0; i < pg->index; i++) {
- rec = &pg->records[i];
+ mutex_lock(&ftrace_lock);
+ do_for_each_ftrace_rec(pg, rec) {
- if (rec->flags & (FTRACE_FL_FAILED | FTRACE_FL_FREE))
- continue;
+ if (*idx >= FTRACE_GRAPH_MAX_FUNCS)
+ break;
+
+ if (rec->flags & (FTRACE_FL_FAILED | FTRACE_FL_FREE))
+ continue;
- kallsyms_lookup(rec->ip, NULL, NULL, NULL, str);
- if (strcmp(str, buffer) == 0) {
+ if (ftrace_match_record(rec, search, search_len, type)) {
+ /* ensure it is not already in the array */
+ exists = false;
+ for (i = 0; i < *idx; i++)
+ if (array[i] == rec->ip) {
+ exists = true;
+ break;
+ }
+ if (!exists) {
+ array[(*idx)++] = rec->ip;
found = 1;
- for (j = 0; j < idx; j++)
- if (array[j] == rec->ip) {
- found = 0;
- break;
- }
- if (found)
- array[idx] = rec->ip;
- break;
}
}
- }
- spin_unlock(&ftrace_lock);
+ } while_for_each_ftrace_rec();
+
+ mutex_unlock(&ftrace_lock);
return found ? 0 : -EINVAL;
}
@@ -1534,13 +2105,11 @@ ftrace_graph_write(struct file *file, const char __user *ubuf,
}
buffer[index] = 0;
- /* we allow only one at a time */
- ret = ftrace_set_func(array, ftrace_graph_count, buffer);
+ /* we allow only one expression at a time */
+ ret = ftrace_set_func(array, &ftrace_graph_count, buffer);
if (ret)
goto out;
- ftrace_graph_count++;
-
file->f_pos += read;
ret = read;
@@ -1552,7 +2121,7 @@ ftrace_graph_write(struct file *file, const char __user *ubuf,
static const struct file_operations ftrace_graph_fops = {
.open = ftrace_graph_open,
- .read = ftrace_graph_read,
+ .read = seq_read,
.write = ftrace_graph_write,
};
#endif /* CONFIG_FUNCTION_GRAPH_TRACER */
@@ -1604,7 +2173,7 @@ static int ftrace_convert_nops(struct module *mod,
unsigned long addr;
unsigned long flags;
- mutex_lock(&ftrace_start_lock);
+ mutex_lock(&ftrace_lock);
p = start;
while (p < end) {
addr = ftrace_call_adjust(*p++);
@@ -1623,7 +2192,7 @@ static int ftrace_convert_nops(struct module *mod,
local_irq_save(flags);
ftrace_update_code(mod);
local_irq_restore(flags);
- mutex_unlock(&ftrace_start_lock);
+ mutex_unlock(&ftrace_lock);
return 0;
}
@@ -1700,7 +2269,7 @@ ftrace_pid_read(struct file *file, char __user *ubuf,
if (ftrace_pid_trace == ftrace_swapper_pid)
r = sprintf(buf, "swapper tasks\n");
else if (ftrace_pid_trace)
- r = sprintf(buf, "%u\n", pid_nr(ftrace_pid_trace));
+ r = sprintf(buf, "%u\n", pid_vnr(ftrace_pid_trace));
else
r = sprintf(buf, "no pid\n");
@@ -1796,7 +2365,7 @@ ftrace_pid_write(struct file *filp, const char __user *ubuf,
if (ret < 0)
return ret;
- mutex_lock(&ftrace_start_lock);
+ mutex_lock(&ftrace_lock);
if (val < 0) {
/* disable pid tracing */
if (!ftrace_pid_trace)
@@ -1835,12 +2404,12 @@ ftrace_pid_write(struct file *filp, const char __user *ubuf,
ftrace_startup_enable(0);
out:
- mutex_unlock(&ftrace_start_lock);
+ mutex_unlock(&ftrace_lock);
return cnt;
}
-static struct file_operations ftrace_pid_fops = {
+static const struct file_operations ftrace_pid_fops = {
.read = ftrace_pid_read,
.write = ftrace_pid_write,
};
@@ -1863,7 +2432,6 @@ static __init int ftrace_init_debugfs(void)
"'set_ftrace_pid' entry\n");
return 0;
}
-
fs_initcall(ftrace_init_debugfs);
/**
@@ -1898,17 +2466,17 @@ int register_ftrace_function(struct ftrace_ops *ops)
if (unlikely(ftrace_disabled))
return -1;
- mutex_lock(&ftrace_sysctl_lock);
+ mutex_lock(&ftrace_lock);
ret = __register_ftrace_function(ops);
ftrace_startup(0);
- mutex_unlock(&ftrace_sysctl_lock);
+ mutex_unlock(&ftrace_lock);
return ret;
}
/**
- * unregister_ftrace_function - unresgister a function for profiling.
+ * unregister_ftrace_function - unregister a function for profiling.
* @ops - ops structure that holds the function to unregister
*
* Unregister a function that was added to be called by ftrace profiling.
@@ -1917,10 +2485,10 @@ int unregister_ftrace_function(struct ftrace_ops *ops)
{
int ret;
- mutex_lock(&ftrace_sysctl_lock);
+ mutex_lock(&ftrace_lock);
ret = __unregister_ftrace_function(ops);
ftrace_shutdown(0);
- mutex_unlock(&ftrace_sysctl_lock);
+ mutex_unlock(&ftrace_lock);
return ret;
}
@@ -1935,7 +2503,7 @@ ftrace_enable_sysctl(struct ctl_table *table, int write,
if (unlikely(ftrace_disabled))
return -ENODEV;
- mutex_lock(&ftrace_sysctl_lock);
+ mutex_lock(&ftrace_lock);
ret = proc_dointvec(table, write, file, buffer, lenp, ppos);
@@ -1964,7 +2532,7 @@ ftrace_enable_sysctl(struct ctl_table *table, int write,
}
out:
- mutex_unlock(&ftrace_sysctl_lock);
+ mutex_unlock(&ftrace_lock);
return ret;
}
@@ -2029,6 +2597,38 @@ free:
return ret;
}
+static void
+ftrace_graph_probe_sched_switch(struct rq *__rq, struct task_struct *prev,
+ struct task_struct *next)
+{
+ unsigned long long timestamp;
+ int index;
+
+ /*
+ * Does the user want to count the time a function was asleep.
+ * If so, do not update the time stamps.
+ */
+ if (trace_flags & TRACE_ITER_SLEEP_TIME)
+ return;
+
+ timestamp = trace_clock_local();
+
+ prev->ftrace_timestamp = timestamp;
+
+ /* only process tasks that we timestamped */
+ if (!next->ftrace_timestamp)
+ return;
+
+ /*
+ * Update all the counters in next to make up for the
+ * time next was sleeping.
+ */
+ timestamp -= next->ftrace_timestamp;
+
+ for (index = next->curr_ret_stack; index >= 0; index--)
+ next->ret_stack[index].calltime += timestamp;
+}
+
/* Allocate a return stack for each task */
static int start_graph_tracing(void)
{
@@ -2050,6 +2650,13 @@ static int start_graph_tracing(void)
ret = alloc_retstack_tasklist(ret_stack_list);
} while (ret == -EAGAIN);
+ if (!ret) {
+ ret = register_trace_sched_switch(ftrace_graph_probe_sched_switch);
+ if (ret)
+ pr_info("ftrace_graph: Couldn't activate tracepoint"
+ " probe to kernel_sched_switch\n");
+ }
+
kfree(ret_stack_list);
return ret;
}
@@ -2080,7 +2687,13 @@ int register_ftrace_graph(trace_func_graph_ret_t retfunc,
{
int ret = 0;
- mutex_lock(&ftrace_sysctl_lock);
+ mutex_lock(&ftrace_lock);
+
+ /* we currently allow only one tracer registered at a time */
+ if (atomic_read(&ftrace_graph_active)) {
+ ret = -EBUSY;
+ goto out;
+ }
ftrace_suspend_notifier.notifier_call = ftrace_suspend_notifier_call;
register_pm_notifier(&ftrace_suspend_notifier);
@@ -2098,21 +2711,26 @@ int register_ftrace_graph(trace_func_graph_ret_t retfunc,
ftrace_startup(FTRACE_START_FUNC_RET);
out:
- mutex_unlock(&ftrace_sysctl_lock);
+ mutex_unlock(&ftrace_lock);
return ret;
}
void unregister_ftrace_graph(void)
{
- mutex_lock(&ftrace_sysctl_lock);
+ mutex_lock(&ftrace_lock);
+
+ if (!unlikely(atomic_read(&ftrace_graph_active)))
+ goto out;
atomic_dec(&ftrace_graph_active);
+ unregister_trace_sched_switch(ftrace_graph_probe_sched_switch);
ftrace_graph_return = (trace_func_graph_ret_t)ftrace_stub;
ftrace_graph_entry = ftrace_graph_entry_stub;
ftrace_shutdown(FTRACE_STOP_FUNC_RET);
unregister_pm_notifier(&ftrace_suspend_notifier);
- mutex_unlock(&ftrace_sysctl_lock);
+ out:
+ mutex_unlock(&ftrace_lock);
}
/* Allocate a return stack for newly created task */
@@ -2127,6 +2745,7 @@ void ftrace_graph_init_task(struct task_struct *t)
t->curr_ret_stack = -1;
atomic_set(&t->tracing_graph_pause, 0);
atomic_set(&t->trace_overrun, 0);
+ t->ftrace_timestamp = 0;
} else
t->ret_stack = NULL;
}
diff --git a/kernel/trace/kmemtrace.c b/kernel/trace/kmemtrace.c
new file mode 100644
index 000000000000..5011f4d91e37
--- /dev/null
+++ b/kernel/trace/kmemtrace.c
@@ -0,0 +1,464 @@
+/*
+ * Memory allocator tracing
+ *
+ * Copyright (C) 2008 Eduard - Gabriel Munteanu
+ * Copyright (C) 2008 Pekka Enberg <penberg@cs.helsinki.fi>
+ * Copyright (C) 2008 Frederic Weisbecker <fweisbec@gmail.com>
+ */
+
+#include <linux/tracepoint.h>
+#include <linux/seq_file.h>
+#include <linux/debugfs.h>
+#include <linux/dcache.h>
+#include <linux/fs.h>
+
+#include <trace/kmemtrace.h>
+
+#include "trace_output.h"
+#include "trace.h"
+
+/* Select an alternative, minimalistic output than the original one */
+#define TRACE_KMEM_OPT_MINIMAL 0x1
+
+static struct tracer_opt kmem_opts[] = {
+ /* Default disable the minimalistic output */
+ { TRACER_OPT(kmem_minimalistic, TRACE_KMEM_OPT_MINIMAL) },
+ { }
+};
+
+static struct tracer_flags kmem_tracer_flags = {
+ .val = 0,
+ .opts = kmem_opts
+};
+
+static struct trace_array *kmemtrace_array;
+
+/* Trace allocations */
+static inline void kmemtrace_alloc(enum kmemtrace_type_id type_id,
+ unsigned long call_site,
+ const void *ptr,
+ size_t bytes_req,
+ size_t bytes_alloc,
+ gfp_t gfp_flags,
+ int node)
+{
+ struct trace_array *tr = kmemtrace_array;
+ struct kmemtrace_alloc_entry *entry;
+ struct ring_buffer_event *event;
+
+ event = ring_buffer_lock_reserve(tr->buffer, sizeof(*entry));
+ if (!event)
+ return;
+
+ entry = ring_buffer_event_data(event);
+ tracing_generic_entry_update(&entry->ent, 0, 0);
+
+ entry->ent.type = TRACE_KMEM_ALLOC;
+ entry->type_id = type_id;
+ entry->call_site = call_site;
+ entry->ptr = ptr;
+ entry->bytes_req = bytes_req;
+ entry->bytes_alloc = bytes_alloc;
+ entry->gfp_flags = gfp_flags;
+ entry->node = node;
+
+ ring_buffer_unlock_commit(tr->buffer, event);
+
+ trace_wake_up();
+}
+
+static inline void kmemtrace_free(enum kmemtrace_type_id type_id,
+ unsigned long call_site,
+ const void *ptr)
+{
+ struct trace_array *tr = kmemtrace_array;
+ struct kmemtrace_free_entry *entry;
+ struct ring_buffer_event *event;
+
+ event = ring_buffer_lock_reserve(tr->buffer, sizeof(*entry));
+ if (!event)
+ return;
+ entry = ring_buffer_event_data(event);
+ tracing_generic_entry_update(&entry->ent, 0, 0);
+
+ entry->ent.type = TRACE_KMEM_FREE;
+ entry->type_id = type_id;
+ entry->call_site = call_site;
+ entry->ptr = ptr;
+
+ ring_buffer_unlock_commit(tr->buffer, event);
+
+ trace_wake_up();
+}
+
+static void kmemtrace_kmalloc(unsigned long call_site,
+ const void *ptr,
+ size_t bytes_req,
+ size_t bytes_alloc,
+ gfp_t gfp_flags)
+{
+ kmemtrace_alloc(KMEMTRACE_TYPE_KMALLOC, call_site, ptr,
+ bytes_req, bytes_alloc, gfp_flags, -1);
+}
+
+static void kmemtrace_kmem_cache_alloc(unsigned long call_site,
+ const void *ptr,
+ size_t bytes_req,
+ size_t bytes_alloc,
+ gfp_t gfp_flags)
+{
+ kmemtrace_alloc(KMEMTRACE_TYPE_CACHE, call_site, ptr,
+ bytes_req, bytes_alloc, gfp_flags, -1);
+}
+
+static void kmemtrace_kmalloc_node(unsigned long call_site,
+ const void *ptr,
+ size_t bytes_req,
+ size_t bytes_alloc,
+ gfp_t gfp_flags,
+ int node)
+{
+ kmemtrace_alloc(KMEMTRACE_TYPE_KMALLOC, call_site, ptr,
+ bytes_req, bytes_alloc, gfp_flags, node);
+}
+
+static void kmemtrace_kmem_cache_alloc_node(unsigned long call_site,
+ const void *ptr,
+ size_t bytes_req,
+ size_t bytes_alloc,
+ gfp_t gfp_flags,
+ int node)
+{
+ kmemtrace_alloc(KMEMTRACE_TYPE_CACHE, call_site, ptr,
+ bytes_req, bytes_alloc, gfp_flags, node);
+}
+
+static void kmemtrace_kfree(unsigned long call_site, const void *ptr)
+{
+ kmemtrace_free(KMEMTRACE_TYPE_KMALLOC, call_site, ptr);
+}
+
+static void kmemtrace_kmem_cache_free(unsigned long call_site, const void *ptr)
+{
+ kmemtrace_free(KMEMTRACE_TYPE_CACHE, call_site, ptr);
+}
+
+static int kmemtrace_start_probes(void)
+{
+ int err;
+
+ err = register_trace_kmalloc(kmemtrace_kmalloc);
+ if (err)
+ return err;
+ err = register_trace_kmem_cache_alloc(kmemtrace_kmem_cache_alloc);
+ if (err)
+ return err;
+ err = register_trace_kmalloc_node(kmemtrace_kmalloc_node);
+ if (err)
+ return err;
+ err = register_trace_kmem_cache_alloc_node(kmemtrace_kmem_cache_alloc_node);
+ if (err)
+ return err;
+ err = register_trace_kfree(kmemtrace_kfree);
+ if (err)
+ return err;
+ err = register_trace_kmem_cache_free(kmemtrace_kmem_cache_free);
+
+ return err;
+}
+
+static void kmemtrace_stop_probes(void)
+{
+ unregister_trace_kmalloc(kmemtrace_kmalloc);
+ unregister_trace_kmem_cache_alloc(kmemtrace_kmem_cache_alloc);
+ unregister_trace_kmalloc_node(kmemtrace_kmalloc_node);
+ unregister_trace_kmem_cache_alloc_node(kmemtrace_kmem_cache_alloc_node);
+ unregister_trace_kfree(kmemtrace_kfree);
+ unregister_trace_kmem_cache_free(kmemtrace_kmem_cache_free);
+}
+
+static int kmem_trace_init(struct trace_array *tr)
+{
+ int cpu;
+ kmemtrace_array = tr;
+
+ for_each_cpu_mask(cpu, cpu_possible_map)
+ tracing_reset(tr, cpu);
+
+ kmemtrace_start_probes();
+
+ return 0;
+}
+
+static void kmem_trace_reset(struct trace_array *tr)
+{
+ kmemtrace_stop_probes();
+}
+
+static void kmemtrace_headers(struct seq_file *s)
+{
+ /* Don't need headers for the original kmemtrace output */
+ if (!(kmem_tracer_flags.val & TRACE_KMEM_OPT_MINIMAL))
+ return;
+
+ seq_printf(s, "#\n");
+ seq_printf(s, "# ALLOC TYPE REQ GIVEN FLAGS "
+ " POINTER NODE CALLER\n");
+ seq_printf(s, "# FREE | | | | "
+ " | | | |\n");
+ seq_printf(s, "# |\n\n");
+}
+
+/*
+ * The following functions give the original output from kmemtrace,
+ * plus the origin CPU, since reordering occurs in-kernel now.
+ */
+
+#define KMEMTRACE_USER_ALLOC 0
+#define KMEMTRACE_USER_FREE 1
+
+struct kmemtrace_user_event {
+ u8 event_id;
+ u8 type_id;
+ u16 event_size;
+ u32 cpu;
+ u64 timestamp;
+ unsigned long call_site;
+ unsigned long ptr;
+};
+
+struct kmemtrace_user_event_alloc {
+ size_t bytes_req;
+ size_t bytes_alloc;
+ unsigned gfp_flags;
+ int node;
+};
+
+static enum print_line_t
+kmemtrace_print_alloc_user(struct trace_iterator *iter,
+ struct kmemtrace_alloc_entry *entry)
+{
+ struct kmemtrace_user_event_alloc *ev_alloc;
+ struct trace_seq *s = &iter->seq;
+ struct kmemtrace_user_event *ev;
+
+ ev = trace_seq_reserve(s, sizeof(*ev));
+ if (!ev)
+ return TRACE_TYPE_PARTIAL_LINE;
+
+ ev->event_id = KMEMTRACE_USER_ALLOC;
+ ev->type_id = entry->type_id;
+ ev->event_size = sizeof(*ev) + sizeof(*ev_alloc);
+ ev->cpu = iter->cpu;
+ ev->timestamp = iter->ts;
+ ev->call_site = entry->call_site;
+ ev->ptr = (unsigned long)entry->ptr;
+
+ ev_alloc = trace_seq_reserve(s, sizeof(*ev_alloc));
+ if (!ev_alloc)
+ return TRACE_TYPE_PARTIAL_LINE;
+
+ ev_alloc->bytes_req = entry->bytes_req;
+ ev_alloc->bytes_alloc = entry->bytes_alloc;
+ ev_alloc->gfp_flags = entry->gfp_flags;
+ ev_alloc->node = entry->node;
+
+ return TRACE_TYPE_HANDLED;
+}
+
+static enum print_line_t
+kmemtrace_print_free_user(struct trace_iterator *iter,
+ struct kmemtrace_free_entry *entry)
+{
+ struct trace_seq *s = &iter->seq;
+ struct kmemtrace_user_event *ev;
+
+ ev = trace_seq_reserve(s, sizeof(*ev));
+ if (!ev)
+ return TRACE_TYPE_PARTIAL_LINE;
+
+ ev->event_id = KMEMTRACE_USER_FREE;
+ ev->type_id = entry->type_id;
+ ev->event_size = sizeof(*ev);
+ ev->cpu = iter->cpu;
+ ev->timestamp = iter->ts;
+ ev->call_site = entry->call_site;
+ ev->ptr = (unsigned long)entry->ptr;
+
+ return TRACE_TYPE_HANDLED;
+}
+
+/* The two other following provide a more minimalistic output */
+static enum print_line_t
+kmemtrace_print_alloc_compress(struct trace_iterator *iter,
+ struct kmemtrace_alloc_entry *entry)
+{
+ struct trace_seq *s = &iter->seq;
+ int ret;
+
+ /* Alloc entry */
+ ret = trace_seq_printf(s, " + ");
+ if (!ret)
+ return TRACE_TYPE_PARTIAL_LINE;
+
+ /* Type */
+ switch (entry->type_id) {
+ case KMEMTRACE_TYPE_KMALLOC:
+ ret = trace_seq_printf(s, "K ");
+ break;
+ case KMEMTRACE_TYPE_CACHE:
+ ret = trace_seq_printf(s, "C ");
+ break;
+ case KMEMTRACE_TYPE_PAGES:
+ ret = trace_seq_printf(s, "P ");
+ break;
+ default:
+ ret = trace_seq_printf(s, "? ");
+ }
+
+ if (!ret)
+ return TRACE_TYPE_PARTIAL_LINE;
+
+ /* Requested */
+ ret = trace_seq_printf(s, "%4zu ", entry->bytes_req);
+ if (!ret)
+ return TRACE_TYPE_PARTIAL_LINE;
+
+ /* Allocated */
+ ret = trace_seq_printf(s, "%4zu ", entry->bytes_alloc);
+ if (!ret)
+ return TRACE_TYPE_PARTIAL_LINE;
+
+ /* Flags
+ * TODO: would be better to see the name of the GFP flag names
+ */
+ ret = trace_seq_printf(s, "%08x ", entry->gfp_flags);
+ if (!ret)
+ return TRACE_TYPE_PARTIAL_LINE;
+
+ /* Pointer to allocated */
+ ret = trace_seq_printf(s, "0x%tx ", (ptrdiff_t)entry->ptr);
+ if (!ret)
+ return TRACE_TYPE_PARTIAL_LINE;
+
+ /* Node */
+ ret = trace_seq_printf(s, "%4d ", entry->node);
+ if (!ret)
+ return TRACE_TYPE_PARTIAL_LINE;
+
+ /* Call site */
+ ret = seq_print_ip_sym(s, entry->call_site, 0);
+ if (!ret)
+ return TRACE_TYPE_PARTIAL_LINE;
+
+ if (!trace_seq_printf(s, "\n"))
+ return TRACE_TYPE_PARTIAL_LINE;
+
+ return TRACE_TYPE_HANDLED;
+}
+
+static enum print_line_t
+kmemtrace_print_free_compress(struct trace_iterator *iter,
+ struct kmemtrace_free_entry *entry)
+{
+ struct trace_seq *s = &iter->seq;
+ int ret;
+
+ /* Free entry */
+ ret = trace_seq_printf(s, " - ");
+ if (!ret)
+ return TRACE_TYPE_PARTIAL_LINE;
+
+ /* Type */
+ switch (entry->type_id) {
+ case KMEMTRACE_TYPE_KMALLOC:
+ ret = trace_seq_printf(s, "K ");
+ break;
+ case KMEMTRACE_TYPE_CACHE:
+ ret = trace_seq_printf(s, "C ");
+ break;
+ case KMEMTRACE_TYPE_PAGES:
+ ret = trace_seq_printf(s, "P ");
+ break;
+ default:
+ ret = trace_seq_printf(s, "? ");
+ }
+
+ if (!ret)
+ return TRACE_TYPE_PARTIAL_LINE;
+
+ /* Skip requested/allocated/flags */
+ ret = trace_seq_printf(s, " ");
+ if (!ret)
+ return TRACE_TYPE_PARTIAL_LINE;
+
+ /* Pointer to allocated */
+ ret = trace_seq_printf(s, "0x%tx ", (ptrdiff_t)entry->ptr);
+ if (!ret)
+ return TRACE_TYPE_PARTIAL_LINE;
+
+ /* Skip node */
+ ret = trace_seq_printf(s, " ");
+ if (!ret)
+ return TRACE_TYPE_PARTIAL_LINE;
+
+ /* Call site */
+ ret = seq_print_ip_sym(s, entry->call_site, 0);
+ if (!ret)
+ return TRACE_TYPE_PARTIAL_LINE;
+
+ if (!trace_seq_printf(s, "\n"))
+ return TRACE_TYPE_PARTIAL_LINE;
+
+ return TRACE_TYPE_HANDLED;
+}
+
+static enum print_line_t kmemtrace_print_line(struct trace_iterator *iter)
+{
+ struct trace_entry *entry = iter->ent;
+
+ switch (entry->type) {
+ case TRACE_KMEM_ALLOC: {
+ struct kmemtrace_alloc_entry *field;
+
+ trace_assign_type(field, entry);
+ if (kmem_tracer_flags.val & TRACE_KMEM_OPT_MINIMAL)
+ return kmemtrace_print_alloc_compress(iter, field);
+ else
+ return kmemtrace_print_alloc_user(iter, field);
+ }
+
+ case TRACE_KMEM_FREE: {
+ struct kmemtrace_free_entry *field;
+
+ trace_assign_type(field, entry);
+ if (kmem_tracer_flags.val & TRACE_KMEM_OPT_MINIMAL)
+ return kmemtrace_print_free_compress(iter, field);
+ else
+ return kmemtrace_print_free_user(iter, field);
+ }
+
+ default:
+ return TRACE_TYPE_UNHANDLED;
+ }
+}
+
+static struct tracer kmem_tracer __read_mostly = {
+ .name = "kmemtrace",
+ .init = kmem_trace_init,
+ .reset = kmem_trace_reset,
+ .print_line = kmemtrace_print_line,
+ .print_header = kmemtrace_headers,
+ .flags = &kmem_tracer_flags
+};
+
+void kmemtrace_init(void)
+{
+ /* earliest opportunity to start kmem tracing */
+}
+
+static int __init init_kmem_tracer(void)
+{
+ return register_tracer(&kmem_tracer);
+}
+device_initcall(init_kmem_tracer);
diff --git a/kernel/trace/ring_buffer.c b/kernel/trace/ring_buffer.c
index bd38c5cfd8ad..960cbf44c844 100644
--- a/kernel/trace/ring_buffer.c
+++ b/kernel/trace/ring_buffer.c
@@ -4,21 +4,92 @@
* Copyright (C) 2008 Steven Rostedt <srostedt@redhat.com>
*/
#include <linux/ring_buffer.h>
+#include <linux/trace_clock.h>
+#include <linux/ftrace_irq.h>
#include <linux/spinlock.h>
#include <linux/debugfs.h>
#include <linux/uaccess.h>
+#include <linux/hardirq.h>
#include <linux/module.h>
#include <linux/percpu.h>
#include <linux/mutex.h>
-#include <linux/sched.h> /* used for sched_clock() (for now) */
#include <linux/init.h>
#include <linux/hash.h>
#include <linux/list.h>
+#include <linux/cpu.h>
#include <linux/fs.h>
#include "trace.h"
/*
+ * The ring buffer is made up of a list of pages. A separate list of pages is
+ * allocated for each CPU. A writer may only write to a buffer that is
+ * associated with the CPU it is currently executing on. A reader may read
+ * from any per cpu buffer.
+ *
+ * The reader is special. For each per cpu buffer, the reader has its own
+ * reader page. When a reader has read the entire reader page, this reader
+ * page is swapped with another page in the ring buffer.
+ *
+ * Now, as long as the writer is off the reader page, the reader can do what
+ * ever it wants with that page. The writer will never write to that page
+ * again (as long as it is out of the ring buffer).
+ *
+ * Here's some silly ASCII art.
+ *
+ * +------+
+ * |reader| RING BUFFER
+ * |page |
+ * +------+ +---+ +---+ +---+
+ * | |-->| |-->| |
+ * +---+ +---+ +---+
+ * ^ |
+ * | |
+ * +---------------+
+ *
+ *
+ * +------+
+ * |reader| RING BUFFER
+ * |page |------------------v
+ * +------+ +---+ +---+ +---+
+ * | |-->| |-->| |
+ * +---+ +---+ +---+
+ * ^ |
+ * | |
+ * +---------------+
+ *
+ *
+ * +------+
+ * |reader| RING BUFFER
+ * |page |------------------v
+ * +------+ +---+ +---+ +---+
+ * ^ | |-->| |-->| |
+ * | +---+ +---+ +---+
+ * | |
+ * | |
+ * +------------------------------+
+ *
+ *
+ * +------+
+ * |buffer| RING BUFFER
+ * |page |------------------v
+ * +------+ +---+ +---+ +---+
+ * ^ | | | |-->| |
+ * | New +---+ +---+ +---+
+ * | Reader------^ |
+ * | page |
+ * +------------------------------+
+ *
+ *
+ * After we make this swap, the reader can hand this page off to the splice
+ * code and be done with it. It can even allocate a new page if it needs to
+ * and swap that into the ring buffer.
+ *
+ * We will be using cmpxchg soon to make all this lockless.
+ *
+ */
+
+/*
* A fast way to enable or disable all ring buffers is to
* call tracing_on or tracing_off. Turning off the ring buffers
* prevents all ring buffers from being recorded to.
@@ -57,7 +128,9 @@ enum {
RB_BUFFERS_DISABLED = 1 << RB_BUFFERS_DISABLED_BIT,
};
-static long ring_buffer_flags __read_mostly = RB_BUFFERS_ON;
+static unsigned long ring_buffer_flags __read_mostly = RB_BUFFERS_ON;
+
+#define BUF_PAGE_HDR_SIZE offsetof(struct buffer_data_page, data)
/**
* tracing_on - enable all tracing buffers
@@ -89,59 +162,92 @@ EXPORT_SYMBOL_GPL(tracing_off);
* tracing_off_permanent - permanently disable ring buffers
*
* This function, once called, will disable all ring buffers
- * permanenty.
+ * permanently.
*/
void tracing_off_permanent(void)
{
set_bit(RB_BUFFERS_DISABLED_BIT, &ring_buffer_flags);
}
+/**
+ * tracing_is_on - show state of ring buffers enabled
+ */
+int tracing_is_on(void)
+{
+ return ring_buffer_flags == RB_BUFFERS_ON;
+}
+EXPORT_SYMBOL_GPL(tracing_is_on);
+
#include "trace.h"
-/* Up this if you want to test the TIME_EXTENTS and normalization */
-#define DEBUG_SHIFT 0
+#define RB_EVNT_HDR_SIZE (offsetof(struct ring_buffer_event, array))
+#define RB_ALIGNMENT 4U
+#define RB_MAX_SMALL_DATA 28
+
+enum {
+ RB_LEN_TIME_EXTEND = 8,
+ RB_LEN_TIME_STAMP = 16,
+};
-/* FIXME!!! */
-u64 ring_buffer_time_stamp(int cpu)
+static inline int rb_null_event(struct ring_buffer_event *event)
{
- u64 time;
+ return event->type == RINGBUF_TYPE_PADDING && event->time_delta == 0;
+}
- preempt_disable_notrace();
- /* shift to debug/test normalization and TIME_EXTENTS */
- time = sched_clock() << DEBUG_SHIFT;
- preempt_enable_no_resched_notrace();
+static inline int rb_discarded_event(struct ring_buffer_event *event)
+{
+ return event->type == RINGBUF_TYPE_PADDING && event->time_delta;
+}
- return time;
+static void rb_event_set_padding(struct ring_buffer_event *event)
+{
+ event->type = RINGBUF_TYPE_PADDING;
+ event->time_delta = 0;
}
-EXPORT_SYMBOL_GPL(ring_buffer_time_stamp);
-void ring_buffer_normalize_time_stamp(int cpu, u64 *ts)
+/**
+ * ring_buffer_event_discard - discard an event in the ring buffer
+ * @buffer: the ring buffer
+ * @event: the event to discard
+ *
+ * Sometimes a event that is in the ring buffer needs to be ignored.
+ * This function lets the user discard an event in the ring buffer
+ * and then that event will not be read later.
+ *
+ * Note, it is up to the user to be careful with this, and protect
+ * against races. If the user discards an event that has been consumed
+ * it is possible that it could corrupt the ring buffer.
+ */
+void ring_buffer_event_discard(struct ring_buffer_event *event)
{
- /* Just stupid testing the normalize function and deltas */
- *ts >>= DEBUG_SHIFT;
+ event->type = RINGBUF_TYPE_PADDING;
+ /* time delta must be non zero */
+ if (!event->time_delta)
+ event->time_delta = 1;
}
-EXPORT_SYMBOL_GPL(ring_buffer_normalize_time_stamp);
-#define RB_EVNT_HDR_SIZE (sizeof(struct ring_buffer_event))
-#define RB_ALIGNMENT_SHIFT 2
-#define RB_ALIGNMENT (1 << RB_ALIGNMENT_SHIFT)
-#define RB_MAX_SMALL_DATA 28
+static unsigned
+rb_event_data_length(struct ring_buffer_event *event)
+{
+ unsigned length;
-enum {
- RB_LEN_TIME_EXTEND = 8,
- RB_LEN_TIME_STAMP = 16,
-};
+ if (event->len)
+ length = event->len * RB_ALIGNMENT;
+ else
+ length = event->array[0];
+ return length + RB_EVNT_HDR_SIZE;
+}
/* inline for ring buffer fast paths */
-static inline unsigned
+static unsigned
rb_event_length(struct ring_buffer_event *event)
{
- unsigned length;
-
switch (event->type) {
case RINGBUF_TYPE_PADDING:
- /* undefined */
- return -1;
+ if (rb_null_event(event))
+ /* undefined */
+ return -1;
+ return rb_event_data_length(event);
case RINGBUF_TYPE_TIME_EXTEND:
return RB_LEN_TIME_EXTEND;
@@ -150,11 +256,7 @@ rb_event_length(struct ring_buffer_event *event)
return RB_LEN_TIME_STAMP;
case RINGBUF_TYPE_DATA:
- if (event->len)
- length = event->len << RB_ALIGNMENT_SHIFT;
- else
- length = event->array[0];
- return length + RB_EVNT_HDR_SIZE;
+ return rb_event_data_length(event);
default:
BUG();
}
@@ -179,7 +281,7 @@ unsigned ring_buffer_event_length(struct ring_buffer_event *event)
EXPORT_SYMBOL_GPL(ring_buffer_event_length);
/* inline for ring buffer fast paths */
-static inline void *
+static void *
rb_event_data(struct ring_buffer_event *event)
{
BUG_ON(event->type != RINGBUF_TYPE_DATA);
@@ -209,7 +311,7 @@ EXPORT_SYMBOL_GPL(ring_buffer_event_data);
struct buffer_data_page {
u64 time_stamp; /* page time stamp */
- local_t commit; /* write commited index */
+ local_t commit; /* write committed index */
unsigned char data[]; /* data of buffer page */
};
@@ -225,14 +327,25 @@ static void rb_init_page(struct buffer_data_page *bpage)
local_set(&bpage->commit, 0);
}
+/**
+ * ring_buffer_page_len - the size of data on the page.
+ * @page: The page to read
+ *
+ * Returns the amount of data on the page, including buffer page header.
+ */
+size_t ring_buffer_page_len(void *page)
+{
+ return local_read(&((struct buffer_data_page *)page)->commit)
+ + BUF_PAGE_HDR_SIZE;
+}
+
/*
* Also stolen from mm/slob.c. Thanks to Mathieu Desnoyers for pointing
* this issue out.
*/
-static inline void free_buffer_page(struct buffer_page *bpage)
+static void free_buffer_page(struct buffer_page *bpage)
{
- if (bpage->page)
- free_page((unsigned long)bpage->page);
+ free_page((unsigned long)bpage->page);
kfree(bpage);
}
@@ -246,7 +359,7 @@ static inline int test_time_stamp(u64 delta)
return 0;
}
-#define BUF_PAGE_SIZE (PAGE_SIZE - offsetof(struct buffer_data_page, data))
+#define BUF_PAGE_SIZE (PAGE_SIZE - BUF_PAGE_HDR_SIZE)
/*
* head_page == tail_page && head == tail then buffer is empty.
@@ -260,7 +373,7 @@ struct ring_buffer_per_cpu {
struct list_head pages;
struct buffer_page *head_page; /* read from head */
struct buffer_page *tail_page; /* write to tail */
- struct buffer_page *commit_page; /* commited pages */
+ struct buffer_page *commit_page; /* committed pages */
struct buffer_page *reader_page;
unsigned long overrun;
unsigned long entries;
@@ -273,12 +386,17 @@ struct ring_buffer {
unsigned pages;
unsigned flags;
int cpus;
- cpumask_var_t cpumask;
atomic_t record_disabled;
+ cpumask_var_t cpumask;
struct mutex mutex;
struct ring_buffer_per_cpu **buffers;
+
+#ifdef CONFIG_HOTPLUG_CPU
+ struct notifier_block cpu_notify;
+#endif
+ u64 (*clock)(void);
};
struct ring_buffer_iter {
@@ -299,11 +417,35 @@ struct ring_buffer_iter {
_____ret; \
})
+/* Up this if you want to test the TIME_EXTENTS and normalization */
+#define DEBUG_SHIFT 0
+
+u64 ring_buffer_time_stamp(struct ring_buffer *buffer, int cpu)
+{
+ u64 time;
+
+ preempt_disable_notrace();
+ /* shift to debug/test normalization and TIME_EXTENTS */
+ time = buffer->clock() << DEBUG_SHIFT;
+ preempt_enable_no_resched_notrace();
+
+ return time;
+}
+EXPORT_SYMBOL_GPL(ring_buffer_time_stamp);
+
+void ring_buffer_normalize_time_stamp(struct ring_buffer *buffer,
+ int cpu, u64 *ts)
+{
+ /* Just stupid testing the normalize function and deltas */
+ *ts >>= DEBUG_SHIFT;
+}
+EXPORT_SYMBOL_GPL(ring_buffer_normalize_time_stamp);
+
/**
* check_pages - integrity check of buffer pages
* @cpu_buffer: CPU buffer with pages to test
*
- * As a safty measure we check to make sure the data pages have not
+ * As a safety measure we check to make sure the data pages have not
* been corrupted.
*/
static int rb_check_pages(struct ring_buffer_per_cpu *cpu_buffer)
@@ -421,7 +563,6 @@ static void rb_free_cpu_buffer(struct ring_buffer_per_cpu *cpu_buffer)
struct list_head *head = &cpu_buffer->pages;
struct buffer_page *bpage, *tmp;
- list_del_init(&cpu_buffer->reader_page->list);
free_buffer_page(cpu_buffer->reader_page);
list_for_each_entry_safe(bpage, tmp, head, list) {
@@ -437,6 +578,11 @@ static void rb_free_cpu_buffer(struct ring_buffer_per_cpu *cpu_buffer)
*/
extern int ring_buffer_page_too_big(void);
+#ifdef CONFIG_HOTPLUG_CPU
+static int rb_cpu_notify(struct notifier_block *self,
+ unsigned long action, void *hcpu);
+#endif
+
/**
* ring_buffer_alloc - allocate a new ring_buffer
* @size: the size in bytes per cpu that is needed.
@@ -469,12 +615,23 @@ struct ring_buffer *ring_buffer_alloc(unsigned long size, unsigned flags)
buffer->pages = DIV_ROUND_UP(size, BUF_PAGE_SIZE);
buffer->flags = flags;
+ buffer->clock = trace_clock_local;
/* need at least two pages */
if (buffer->pages == 1)
buffer->pages++;
+ /*
+ * In case of non-hotplug cpu, if the ring-buffer is allocated
+ * in early initcall, it will not be notified of secondary cpus.
+ * In that off case, we need to allocate for all possible cpus.
+ */
+#ifdef CONFIG_HOTPLUG_CPU
+ get_online_cpus();
+ cpumask_copy(buffer->cpumask, cpu_online_mask);
+#else
cpumask_copy(buffer->cpumask, cpu_possible_mask);
+#endif
buffer->cpus = nr_cpu_ids;
bsize = sizeof(void *) * nr_cpu_ids;
@@ -490,6 +647,13 @@ struct ring_buffer *ring_buffer_alloc(unsigned long size, unsigned flags)
goto fail_free_buffers;
}
+#ifdef CONFIG_HOTPLUG_CPU
+ buffer->cpu_notify.notifier_call = rb_cpu_notify;
+ buffer->cpu_notify.priority = 0;
+ register_cpu_notifier(&buffer->cpu_notify);
+#endif
+
+ put_online_cpus();
mutex_init(&buffer->mutex);
return buffer;
@@ -503,6 +667,7 @@ struct ring_buffer *ring_buffer_alloc(unsigned long size, unsigned flags)
fail_free_cpumask:
free_cpumask_var(buffer->cpumask);
+ put_online_cpus();
fail_free_buffer:
kfree(buffer);
@@ -519,15 +684,29 @@ ring_buffer_free(struct ring_buffer *buffer)
{
int cpu;
+ get_online_cpus();
+
+#ifdef CONFIG_HOTPLUG_CPU
+ unregister_cpu_notifier(&buffer->cpu_notify);
+#endif
+
for_each_buffer_cpu(buffer, cpu)
rb_free_cpu_buffer(buffer->buffers[cpu]);
+ put_online_cpus();
+
free_cpumask_var(buffer->cpumask);
kfree(buffer);
}
EXPORT_SYMBOL_GPL(ring_buffer_free);
+void ring_buffer_set_clock(struct ring_buffer *buffer,
+ u64 (*clock)(void))
+{
+ buffer->clock = clock;
+}
+
static void rb_reset_cpu(struct ring_buffer_per_cpu *cpu_buffer);
static void
@@ -627,16 +806,15 @@ int ring_buffer_resize(struct ring_buffer *buffer, unsigned long size)
return size;
mutex_lock(&buffer->mutex);
+ get_online_cpus();
nr_pages = DIV_ROUND_UP(size, BUF_PAGE_SIZE);
if (size < buffer_size) {
/* easy case, just free pages */
- if (RB_WARN_ON(buffer, nr_pages >= buffer->pages)) {
- mutex_unlock(&buffer->mutex);
- return -1;
- }
+ if (RB_WARN_ON(buffer, nr_pages >= buffer->pages))
+ goto out_fail;
rm_pages = buffer->pages - nr_pages;
@@ -655,10 +833,8 @@ int ring_buffer_resize(struct ring_buffer *buffer, unsigned long size)
* add these pages to the cpu_buffers. Otherwise we just free
* them all and return -ENOMEM;
*/
- if (RB_WARN_ON(buffer, nr_pages <= buffer->pages)) {
- mutex_unlock(&buffer->mutex);
- return -1;
- }
+ if (RB_WARN_ON(buffer, nr_pages <= buffer->pages))
+ goto out_fail;
new_pages = nr_pages - buffer->pages;
@@ -683,13 +859,12 @@ int ring_buffer_resize(struct ring_buffer *buffer, unsigned long size)
rb_insert_pages(cpu_buffer, &pages, new_pages);
}
- if (RB_WARN_ON(buffer, !list_empty(&pages))) {
- mutex_unlock(&buffer->mutex);
- return -1;
- }
+ if (RB_WARN_ON(buffer, !list_empty(&pages)))
+ goto out_fail;
out:
buffer->pages = nr_pages;
+ put_online_cpus();
mutex_unlock(&buffer->mutex);
return size;
@@ -699,15 +874,20 @@ int ring_buffer_resize(struct ring_buffer *buffer, unsigned long size)
list_del_init(&bpage->list);
free_buffer_page(bpage);
}
+ put_online_cpus();
mutex_unlock(&buffer->mutex);
return -ENOMEM;
-}
-EXPORT_SYMBOL_GPL(ring_buffer_resize);
-static inline int rb_null_event(struct ring_buffer_event *event)
-{
- return event->type == RINGBUF_TYPE_PADDING;
+ /*
+ * Something went totally wrong, and we are too paranoid
+ * to even clean up the mess.
+ */
+ out_fail:
+ put_online_cpus();
+ mutex_unlock(&buffer->mutex);
+ return -1;
}
+EXPORT_SYMBOL_GPL(ring_buffer_resize);
static inline void *
__rb_data_page_index(struct buffer_data_page *bpage, unsigned index)
@@ -811,7 +991,7 @@ rb_event_index(struct ring_buffer_event *event)
return (addr & ~PAGE_MASK) - (PAGE_SIZE - BUF_PAGE_SIZE);
}
-static inline int
+static int
rb_is_commit(struct ring_buffer_per_cpu *cpu_buffer,
struct ring_buffer_event *event)
{
@@ -825,7 +1005,7 @@ rb_is_commit(struct ring_buffer_per_cpu *cpu_buffer,
rb_commit_index(cpu_buffer) == index;
}
-static inline void
+static void
rb_set_commit_event(struct ring_buffer_per_cpu *cpu_buffer,
struct ring_buffer_event *event)
{
@@ -850,7 +1030,7 @@ rb_set_commit_event(struct ring_buffer_per_cpu *cpu_buffer,
local_set(&cpu_buffer->commit_page->page->commit, index);
}
-static inline void
+static void
rb_set_commit_to_write(struct ring_buffer_per_cpu *cpu_buffer)
{
/*
@@ -896,7 +1076,7 @@ static void rb_reset_reader_page(struct ring_buffer_per_cpu *cpu_buffer)
cpu_buffer->reader_page->read = 0;
}
-static inline void rb_inc_iter(struct ring_buffer_iter *iter)
+static void rb_inc_iter(struct ring_buffer_iter *iter)
{
struct ring_buffer_per_cpu *cpu_buffer = iter->cpu_buffer;
@@ -926,7 +1106,7 @@ static inline void rb_inc_iter(struct ring_buffer_iter *iter)
* and with this, we can determine what to place into the
* data field.
*/
-static inline void
+static void
rb_update_event(struct ring_buffer_event *event,
unsigned type, unsigned length)
{
@@ -938,15 +1118,11 @@ rb_update_event(struct ring_buffer_event *event,
break;
case RINGBUF_TYPE_TIME_EXTEND:
- event->len =
- (RB_LEN_TIME_EXTEND + (RB_ALIGNMENT-1))
- >> RB_ALIGNMENT_SHIFT;
+ event->len = DIV_ROUND_UP(RB_LEN_TIME_EXTEND, RB_ALIGNMENT);
break;
case RINGBUF_TYPE_TIME_STAMP:
- event->len =
- (RB_LEN_TIME_STAMP + (RB_ALIGNMENT-1))
- >> RB_ALIGNMENT_SHIFT;
+ event->len = DIV_ROUND_UP(RB_LEN_TIME_STAMP, RB_ALIGNMENT);
break;
case RINGBUF_TYPE_DATA:
@@ -955,16 +1131,14 @@ rb_update_event(struct ring_buffer_event *event,
event->len = 0;
event->array[0] = length;
} else
- event->len =
- (length + (RB_ALIGNMENT-1))
- >> RB_ALIGNMENT_SHIFT;
+ event->len = DIV_ROUND_UP(length, RB_ALIGNMENT);
break;
default:
BUG();
}
}
-static inline unsigned rb_calculate_event_length(unsigned length)
+static unsigned rb_calculate_event_length(unsigned length)
{
struct ring_buffer_event event; /* Used only for sizeof array */
@@ -990,6 +1164,7 @@ __rb_reserve_next(struct ring_buffer_per_cpu *cpu_buffer,
struct ring_buffer *buffer = cpu_buffer->buffer;
struct ring_buffer_event *event;
unsigned long flags;
+ bool lock_taken = false;
commit_page = cpu_buffer->commit_page;
/* we just need to protect against interrupts */
@@ -1003,7 +1178,30 @@ __rb_reserve_next(struct ring_buffer_per_cpu *cpu_buffer,
struct buffer_page *next_page = tail_page;
local_irq_save(flags);
- __raw_spin_lock(&cpu_buffer->lock);
+ /*
+ * Since the write to the buffer is still not
+ * fully lockless, we must be careful with NMIs.
+ * The locks in the writers are taken when a write
+ * crosses to a new page. The locks protect against
+ * races with the readers (this will soon be fixed
+ * with a lockless solution).
+ *
+ * Because we can not protect against NMIs, and we
+ * want to keep traces reentrant, we need to manage
+ * what happens when we are in an NMI.
+ *
+ * NMIs can happen after we take the lock.
+ * If we are in an NMI, only take the lock
+ * if it is not already taken. Otherwise
+ * simply fail.
+ */
+ if (unlikely(in_nmi())) {
+ if (!__raw_spin_trylock(&cpu_buffer->lock))
+ goto out_reset;
+ } else
+ __raw_spin_lock(&cpu_buffer->lock);
+
+ lock_taken = true;
rb_inc_page(cpu_buffer, &next_page);
@@ -1012,7 +1210,7 @@ __rb_reserve_next(struct ring_buffer_per_cpu *cpu_buffer,
/* we grabbed the lock before incrementing */
if (RB_WARN_ON(cpu_buffer, next_page == reader_page))
- goto out_unlock;
+ goto out_reset;
/*
* If for some reason, we had an interrupt storm that made
@@ -1021,12 +1219,12 @@ __rb_reserve_next(struct ring_buffer_per_cpu *cpu_buffer,
*/
if (unlikely(next_page == commit_page)) {
WARN_ON_ONCE(1);
- goto out_unlock;
+ goto out_reset;
}
if (next_page == head_page) {
if (!(buffer->flags & RB_FL_OVERWRITE))
- goto out_unlock;
+ goto out_reset;
/* tail_page has not moved yet? */
if (tail_page == cpu_buffer->tail_page) {
@@ -1050,7 +1248,7 @@ __rb_reserve_next(struct ring_buffer_per_cpu *cpu_buffer,
cpu_buffer->tail_page = next_page;
/* reread the time stamp */
- *ts = ring_buffer_time_stamp(cpu_buffer->cpu);
+ *ts = ring_buffer_time_stamp(buffer, cpu_buffer->cpu);
cpu_buffer->tail_page->page->time_stamp = *ts;
}
@@ -1060,7 +1258,7 @@ __rb_reserve_next(struct ring_buffer_per_cpu *cpu_buffer,
if (tail < BUF_PAGE_SIZE) {
/* Mark the rest of the page with padding */
event = __rb_page_index(tail_page, tail);
- event->type = RINGBUF_TYPE_PADDING;
+ rb_event_set_padding(event);
}
if (tail <= BUF_PAGE_SIZE)
@@ -1100,12 +1298,13 @@ __rb_reserve_next(struct ring_buffer_per_cpu *cpu_buffer,
return event;
- out_unlock:
+ out_reset:
/* reset write */
if (tail <= BUF_PAGE_SIZE)
local_set(&tail_page->write, tail);
- __raw_spin_unlock(&cpu_buffer->lock);
+ if (likely(lock_taken))
+ __raw_spin_unlock(&cpu_buffer->lock);
local_irq_restore(flags);
return NULL;
}
@@ -1192,7 +1391,7 @@ rb_reserve_next_event(struct ring_buffer_per_cpu *cpu_buffer,
if (RB_WARN_ON(cpu_buffer, ++nr_loops > 1000))
return NULL;
- ts = ring_buffer_time_stamp(cpu_buffer->cpu);
+ ts = ring_buffer_time_stamp(cpu_buffer->buffer, cpu_buffer->cpu);
/*
* Only the first commit can update the timestamp.
@@ -1265,7 +1464,6 @@ static DEFINE_PER_CPU(int, rb_need_resched);
* ring_buffer_lock_reserve - reserve a part of the buffer
* @buffer: the ring buffer to reserve from
* @length: the length of the data to reserve (excluding event header)
- * @flags: a pointer to save the interrupt flags
*
* Returns a reseverd event on the ring buffer to copy directly to.
* The user of this interface will need to get the body to write into
@@ -1278,9 +1476,7 @@ static DEFINE_PER_CPU(int, rb_need_resched);
* If NULL is returned, then nothing has been allocated or locked.
*/
struct ring_buffer_event *
-ring_buffer_lock_reserve(struct ring_buffer *buffer,
- unsigned long length,
- unsigned long *flags)
+ring_buffer_lock_reserve(struct ring_buffer *buffer, unsigned long length)
{
struct ring_buffer_per_cpu *cpu_buffer;
struct ring_buffer_event *event;
@@ -1347,15 +1543,13 @@ static void rb_commit(struct ring_buffer_per_cpu *cpu_buffer,
* ring_buffer_unlock_commit - commit a reserved
* @buffer: The buffer to commit to
* @event: The event pointer to commit.
- * @flags: the interrupt flags received from ring_buffer_lock_reserve.
*
* This commits the data to the ring buffer, and releases any locks held.
*
* Must be paired with ring_buffer_lock_reserve.
*/
int ring_buffer_unlock_commit(struct ring_buffer *buffer,
- struct ring_buffer_event *event,
- unsigned long flags)
+ struct ring_buffer_event *event)
{
struct ring_buffer_per_cpu *cpu_buffer;
int cpu = raw_smp_processor_id();
@@ -1438,7 +1632,7 @@ int ring_buffer_write(struct ring_buffer *buffer,
}
EXPORT_SYMBOL_GPL(ring_buffer_write);
-static inline int rb_per_cpu_empty(struct ring_buffer_per_cpu *cpu_buffer)
+static int rb_per_cpu_empty(struct ring_buffer_per_cpu *cpu_buffer)
{
struct buffer_page *reader = cpu_buffer->reader_page;
struct buffer_page *head = cpu_buffer->head_page;
@@ -1528,12 +1722,15 @@ EXPORT_SYMBOL_GPL(ring_buffer_record_enable_cpu);
unsigned long ring_buffer_entries_cpu(struct ring_buffer *buffer, int cpu)
{
struct ring_buffer_per_cpu *cpu_buffer;
+ unsigned long ret;
if (!cpumask_test_cpu(cpu, buffer->cpumask))
return 0;
cpu_buffer = buffer->buffers[cpu];
- return cpu_buffer->entries;
+ ret = cpu_buffer->entries;
+
+ return ret;
}
EXPORT_SYMBOL_GPL(ring_buffer_entries_cpu);
@@ -1545,12 +1742,15 @@ EXPORT_SYMBOL_GPL(ring_buffer_entries_cpu);
unsigned long ring_buffer_overrun_cpu(struct ring_buffer *buffer, int cpu)
{
struct ring_buffer_per_cpu *cpu_buffer;
+ unsigned long ret;
if (!cpumask_test_cpu(cpu, buffer->cpumask))
return 0;
cpu_buffer = buffer->buffers[cpu];
- return cpu_buffer->overrun;
+ ret = cpu_buffer->overrun;
+
+ return ret;
}
EXPORT_SYMBOL_GPL(ring_buffer_overrun_cpu);
@@ -1627,9 +1827,14 @@ static void rb_iter_reset(struct ring_buffer_iter *iter)
*/
void ring_buffer_iter_reset(struct ring_buffer_iter *iter)
{
- struct ring_buffer_per_cpu *cpu_buffer = iter->cpu_buffer;
+ struct ring_buffer_per_cpu *cpu_buffer;
unsigned long flags;
+ if (!iter)
+ return;
+
+ cpu_buffer = iter->cpu_buffer;
+
spin_lock_irqsave(&cpu_buffer->reader_lock, flags);
rb_iter_reset(iter);
spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags);
@@ -1803,7 +2008,7 @@ static void rb_advance_reader(struct ring_buffer_per_cpu *cpu_buffer)
event = rb_reader_event(cpu_buffer);
- if (event->type == RINGBUF_TYPE_DATA)
+ if (event->type == RINGBUF_TYPE_DATA || rb_discarded_event(event))
cpu_buffer->entries--;
rb_update_read_stamp(cpu_buffer, event);
@@ -1864,9 +2069,6 @@ rb_buffer_peek(struct ring_buffer *buffer, int cpu, u64 *ts)
struct buffer_page *reader;
int nr_loops = 0;
- if (!cpumask_test_cpu(cpu, buffer->cpumask))
- return NULL;
-
cpu_buffer = buffer->buffers[cpu];
again:
@@ -1889,9 +2091,18 @@ rb_buffer_peek(struct ring_buffer *buffer, int cpu, u64 *ts)
switch (event->type) {
case RINGBUF_TYPE_PADDING:
- RB_WARN_ON(cpu_buffer, 1);
+ if (rb_null_event(event))
+ RB_WARN_ON(cpu_buffer, 1);
+ /*
+ * Because the writer could be discarding every
+ * event it creates (which would probably be bad)
+ * if we were to go back to "again" then we may never
+ * catch up, and will trigger the warn on, or lock
+ * the box. Return the padding, and we will release
+ * the current locks, and try again.
+ */
rb_advance_reader(cpu_buffer);
- return NULL;
+ return event;
case RINGBUF_TYPE_TIME_EXTEND:
/* Internal data, OK to advance */
@@ -1906,7 +2117,8 @@ rb_buffer_peek(struct ring_buffer *buffer, int cpu, u64 *ts)
case RINGBUF_TYPE_DATA:
if (ts) {
*ts = cpu_buffer->read_stamp + event->time_delta;
- ring_buffer_normalize_time_stamp(cpu_buffer->cpu, ts);
+ ring_buffer_normalize_time_stamp(buffer,
+ cpu_buffer->cpu, ts);
}
return event;
@@ -1951,8 +2163,12 @@ rb_iter_peek(struct ring_buffer_iter *iter, u64 *ts)
switch (event->type) {
case RINGBUF_TYPE_PADDING:
- rb_inc_iter(iter);
- goto again;
+ if (rb_null_event(event)) {
+ rb_inc_iter(iter);
+ goto again;
+ }
+ rb_advance_iter(iter);
+ return event;
case RINGBUF_TYPE_TIME_EXTEND:
/* Internal data, OK to advance */
@@ -1967,7 +2183,8 @@ rb_iter_peek(struct ring_buffer_iter *iter, u64 *ts)
case RINGBUF_TYPE_DATA:
if (ts) {
*ts = iter->read_stamp + event->time_delta;
- ring_buffer_normalize_time_stamp(cpu_buffer->cpu, ts);
+ ring_buffer_normalize_time_stamp(buffer,
+ cpu_buffer->cpu, ts);
}
return event;
@@ -1995,10 +2212,19 @@ ring_buffer_peek(struct ring_buffer *buffer, int cpu, u64 *ts)
struct ring_buffer_event *event;
unsigned long flags;
+ if (!cpumask_test_cpu(cpu, buffer->cpumask))
+ return NULL;
+
+ again:
spin_lock_irqsave(&cpu_buffer->reader_lock, flags);
event = rb_buffer_peek(buffer, cpu, ts);
spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags);
+ if (event && event->type == RINGBUF_TYPE_PADDING) {
+ cpu_relax();
+ goto again;
+ }
+
return event;
}
@@ -2017,10 +2243,16 @@ ring_buffer_iter_peek(struct ring_buffer_iter *iter, u64 *ts)
struct ring_buffer_event *event;
unsigned long flags;
+ again:
spin_lock_irqsave(&cpu_buffer->reader_lock, flags);
event = rb_iter_peek(iter, ts);
spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags);
+ if (event && event->type == RINGBUF_TYPE_PADDING) {
+ cpu_relax();
+ goto again;
+ }
+
return event;
}
@@ -2035,24 +2267,37 @@ ring_buffer_iter_peek(struct ring_buffer_iter *iter, u64 *ts)
struct ring_buffer_event *
ring_buffer_consume(struct ring_buffer *buffer, int cpu, u64 *ts)
{
- struct ring_buffer_per_cpu *cpu_buffer = buffer->buffers[cpu];
- struct ring_buffer_event *event;
+ struct ring_buffer_per_cpu *cpu_buffer;
+ struct ring_buffer_event *event = NULL;
unsigned long flags;
+ again:
+ /* might be called in atomic */
+ preempt_disable();
+
if (!cpumask_test_cpu(cpu, buffer->cpumask))
- return NULL;
+ goto out;
+ cpu_buffer = buffer->buffers[cpu];
spin_lock_irqsave(&cpu_buffer->reader_lock, flags);
event = rb_buffer_peek(buffer, cpu, ts);
if (!event)
- goto out;
+ goto out_unlock;
rb_advance_reader(cpu_buffer);
- out:
+ out_unlock:
spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags);
+ out:
+ preempt_enable();
+
+ if (event && event->type == RINGBUF_TYPE_PADDING) {
+ cpu_relax();
+ goto again;
+ }
+
return event;
}
EXPORT_SYMBOL_GPL(ring_buffer_consume);
@@ -2131,6 +2376,7 @@ ring_buffer_read(struct ring_buffer_iter *iter, u64 *ts)
struct ring_buffer_per_cpu *cpu_buffer = iter->cpu_buffer;
unsigned long flags;
+ again:
spin_lock_irqsave(&cpu_buffer->reader_lock, flags);
event = rb_iter_peek(iter, ts);
if (!event)
@@ -2140,6 +2386,11 @@ ring_buffer_read(struct ring_buffer_iter *iter, u64 *ts)
out:
spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags);
+ if (event && event->type == RINGBUF_TYPE_PADDING) {
+ cpu_relax();
+ goto again;
+ }
+
return event;
}
EXPORT_SYMBOL_GPL(ring_buffer_read);
@@ -2232,6 +2483,7 @@ int ring_buffer_empty(struct ring_buffer *buffer)
if (!rb_per_cpu_empty(cpu_buffer))
return 0;
}
+
return 1;
}
EXPORT_SYMBOL_GPL(ring_buffer_empty);
@@ -2244,12 +2496,16 @@ EXPORT_SYMBOL_GPL(ring_buffer_empty);
int ring_buffer_empty_cpu(struct ring_buffer *buffer, int cpu)
{
struct ring_buffer_per_cpu *cpu_buffer;
+ int ret;
if (!cpumask_test_cpu(cpu, buffer->cpumask))
return 1;
cpu_buffer = buffer->buffers[cpu];
- return rb_per_cpu_empty(cpu_buffer);
+ ret = rb_per_cpu_empty(cpu_buffer);
+
+
+ return ret;
}
EXPORT_SYMBOL_GPL(ring_buffer_empty_cpu);
@@ -2268,18 +2524,36 @@ int ring_buffer_swap_cpu(struct ring_buffer *buffer_a,
{
struct ring_buffer_per_cpu *cpu_buffer_a;
struct ring_buffer_per_cpu *cpu_buffer_b;
+ int ret = -EINVAL;
if (!cpumask_test_cpu(cpu, buffer_a->cpumask) ||
!cpumask_test_cpu(cpu, buffer_b->cpumask))
- return -EINVAL;
+ goto out;
/* At least make sure the two buffers are somewhat the same */
if (buffer_a->pages != buffer_b->pages)
- return -EINVAL;
+ goto out;
+
+ ret = -EAGAIN;
+
+ if (ring_buffer_flags != RB_BUFFERS_ON)
+ goto out;
+
+ if (atomic_read(&buffer_a->record_disabled))
+ goto out;
+
+ if (atomic_read(&buffer_b->record_disabled))
+ goto out;
cpu_buffer_a = buffer_a->buffers[cpu];
cpu_buffer_b = buffer_b->buffers[cpu];
+ if (atomic_read(&cpu_buffer_a->record_disabled))
+ goto out;
+
+ if (atomic_read(&cpu_buffer_b->record_disabled))
+ goto out;
+
/*
* We can't do a synchronize_sched here because this
* function can be called in atomic context.
@@ -2298,18 +2572,21 @@ int ring_buffer_swap_cpu(struct ring_buffer *buffer_a,
atomic_dec(&cpu_buffer_a->record_disabled);
atomic_dec(&cpu_buffer_b->record_disabled);
- return 0;
+ ret = 0;
+out:
+ return ret;
}
EXPORT_SYMBOL_GPL(ring_buffer_swap_cpu);
static void rb_remove_entries(struct ring_buffer_per_cpu *cpu_buffer,
- struct buffer_data_page *bpage)
+ struct buffer_data_page *bpage,
+ unsigned int offset)
{
struct ring_buffer_event *event;
unsigned long head;
__raw_spin_lock(&cpu_buffer->lock);
- for (head = 0; head < local_read(&bpage->commit);
+ for (head = offset; head < local_read(&bpage->commit);
head += rb_event_length(event)) {
event = __rb_data_page_index(bpage, head);
@@ -2340,8 +2617,8 @@ static void rb_remove_entries(struct ring_buffer_per_cpu *cpu_buffer,
*/
void *ring_buffer_alloc_read_page(struct ring_buffer *buffer)
{
- unsigned long addr;
struct buffer_data_page *bpage;
+ unsigned long addr;
addr = __get_free_page(GFP_KERNEL);
if (!addr)
@@ -2349,6 +2626,8 @@ void *ring_buffer_alloc_read_page(struct ring_buffer *buffer)
bpage = (void *)addr;
+ rb_init_page(bpage);
+
return bpage;
}
@@ -2368,6 +2647,7 @@ void ring_buffer_free_read_page(struct ring_buffer *buffer, void *data)
* ring_buffer_read_page - extract a page from the ring buffer
* @buffer: buffer to extract from
* @data_page: the page to use allocated from ring_buffer_alloc_read_page
+ * @len: amount to extract
* @cpu: the cpu of the buffer to extract
* @full: should the extraction only happen when the page is full.
*
@@ -2377,12 +2657,12 @@ void ring_buffer_free_read_page(struct ring_buffer *buffer, void *data)
* to swap with a page in the ring buffer.
*
* for example:
- * rpage = ring_buffer_alloc_page(buffer);
+ * rpage = ring_buffer_alloc_read_page(buffer);
* if (!rpage)
* return error;
- * ret = ring_buffer_read_page(buffer, &rpage, cpu, 0);
- * if (ret)
- * process_page(rpage);
+ * ret = ring_buffer_read_page(buffer, &rpage, len, cpu, 0);
+ * if (ret >= 0)
+ * process_page(rpage, ret);
*
* When @full is set, the function will not return true unless
* the writer is off the reader page.
@@ -2393,72 +2673,118 @@ void ring_buffer_free_read_page(struct ring_buffer *buffer, void *data)
* responsible for that.
*
* Returns:
- * 1 if data has been transferred
- * 0 if no data has been transferred.
+ * >=0 if data has been transferred, returns the offset of consumed data.
+ * <0 if no data has been transferred.
*/
int ring_buffer_read_page(struct ring_buffer *buffer,
- void **data_page, int cpu, int full)
+ void **data_page, size_t len, int cpu, int full)
{
struct ring_buffer_per_cpu *cpu_buffer = buffer->buffers[cpu];
struct ring_buffer_event *event;
struct buffer_data_page *bpage;
+ struct buffer_page *reader;
unsigned long flags;
- int ret = 0;
+ unsigned int commit;
+ unsigned int read;
+ u64 save_timestamp;
+ int ret = -1;
+
+ if (!cpumask_test_cpu(cpu, buffer->cpumask))
+ goto out;
+
+ /*
+ * If len is not big enough to hold the page header, then
+ * we can not copy anything.
+ */
+ if (len <= BUF_PAGE_HDR_SIZE)
+ goto out;
+
+ len -= BUF_PAGE_HDR_SIZE;
if (!data_page)
- return 0;
+ goto out;
bpage = *data_page;
if (!bpage)
- return 0;
+ goto out;
spin_lock_irqsave(&cpu_buffer->reader_lock, flags);
- /*
- * rb_buffer_peek will get the next ring buffer if
- * the current reader page is empty.
- */
- event = rb_buffer_peek(buffer, cpu, NULL);
- if (!event)
- goto out;
+ reader = rb_get_reader_page(cpu_buffer);
+ if (!reader)
+ goto out_unlock;
+
+ event = rb_reader_event(cpu_buffer);
+
+ read = reader->read;
+ commit = rb_page_commit(reader);
- /* check for data */
- if (!local_read(&cpu_buffer->reader_page->page->commit))
- goto out;
/*
- * If the writer is already off of the read page, then simply
- * switch the read page with the given page. Otherwise
- * we need to copy the data from the reader to the writer.
+ * If this page has been partially read or
+ * if len is not big enough to read the rest of the page or
+ * a writer is still on the page, then
+ * we must copy the data from the page to the buffer.
+ * Otherwise, we can simply swap the page with the one passed in.
*/
- if (cpu_buffer->reader_page == cpu_buffer->commit_page) {
- unsigned int read = cpu_buffer->reader_page->read;
+ if (read || (len < (commit - read)) ||
+ cpu_buffer->reader_page == cpu_buffer->commit_page) {
+ struct buffer_data_page *rpage = cpu_buffer->reader_page->page;
+ unsigned int rpos = read;
+ unsigned int pos = 0;
+ unsigned int size;
if (full)
- goto out;
- /* The writer is still on the reader page, we must copy */
- bpage = cpu_buffer->reader_page->page;
- memcpy(bpage->data,
- cpu_buffer->reader_page->page->data + read,
- local_read(&bpage->commit) - read);
+ goto out_unlock;
+
+ if (len > (commit - read))
+ len = (commit - read);
+
+ size = rb_event_length(event);
+
+ if (len < size)
+ goto out_unlock;
- /* consume what was read */
- cpu_buffer->reader_page += read;
+ /* save the current timestamp, since the user will need it */
+ save_timestamp = cpu_buffer->read_stamp;
+ /* Need to copy one event at a time */
+ do {
+ memcpy(bpage->data + pos, rpage->data + rpos, size);
+
+ len -= size;
+
+ rb_advance_reader(cpu_buffer);
+ rpos = reader->read;
+ pos += size;
+
+ event = rb_reader_event(cpu_buffer);
+ size = rb_event_length(event);
+ } while (len > size);
+
+ /* update bpage */
+ local_set(&bpage->commit, pos);
+ bpage->time_stamp = save_timestamp;
+
+ /* we copied everything to the beginning */
+ read = 0;
} else {
/* swap the pages */
rb_init_page(bpage);
- bpage = cpu_buffer->reader_page->page;
- cpu_buffer->reader_page->page = *data_page;
- cpu_buffer->reader_page->read = 0;
+ bpage = reader->page;
+ reader->page = *data_page;
+ local_set(&reader->write, 0);
+ reader->read = 0;
*data_page = bpage;
+
+ /* update the entry counter */
+ rb_remove_entries(cpu_buffer, bpage, read);
}
- ret = 1;
+ ret = read;
- /* update the entry counter */
- rb_remove_entries(cpu_buffer, bpage);
- out:
+ out_unlock:
spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags);
+ out:
return ret;
}
@@ -2466,7 +2792,7 @@ static ssize_t
rb_simple_read(struct file *filp, char __user *ubuf,
size_t cnt, loff_t *ppos)
{
- long *p = filp->private_data;
+ unsigned long *p = filp->private_data;
char buf[64];
int r;
@@ -2482,9 +2808,9 @@ static ssize_t
rb_simple_write(struct file *filp, const char __user *ubuf,
size_t cnt, loff_t *ppos)
{
- long *p = filp->private_data;
+ unsigned long *p = filp->private_data;
char buf[64];
- long val;
+ unsigned long val;
int ret;
if (cnt >= sizeof(buf))
@@ -2509,7 +2835,7 @@ rb_simple_write(struct file *filp, const char __user *ubuf,
return cnt;
}
-static struct file_operations rb_simple_fops = {
+static const struct file_operations rb_simple_fops = {
.open = tracing_open_generic,
.read = rb_simple_read,
.write = rb_simple_write,
@@ -2532,3 +2858,42 @@ static __init int rb_init_debugfs(void)
}
fs_initcall(rb_init_debugfs);
+
+#ifdef CONFIG_HOTPLUG_CPU
+static int rb_cpu_notify(struct notifier_block *self,
+ unsigned long action, void *hcpu)
+{
+ struct ring_buffer *buffer =
+ container_of(self, struct ring_buffer, cpu_notify);
+ long cpu = (long)hcpu;
+
+ switch (action) {
+ case CPU_UP_PREPARE:
+ case CPU_UP_PREPARE_FROZEN:
+ if (cpu_isset(cpu, *buffer->cpumask))
+ return NOTIFY_OK;
+
+ buffer->buffers[cpu] =
+ rb_allocate_cpu_buffer(buffer, cpu);
+ if (!buffer->buffers[cpu]) {
+ WARN(1, "failed to allocate ring buffer on CPU %ld\n",
+ cpu);
+ return NOTIFY_OK;
+ }
+ smp_wmb();
+ cpu_set(cpu, *buffer->cpumask);
+ break;
+ case CPU_DOWN_PREPARE:
+ case CPU_DOWN_PREPARE_FROZEN:
+ /*
+ * Do nothing.
+ * If we were to free the buffer, then the user would
+ * lose any trace that was in the buffer.
+ */
+ break;
+ default:
+ break;
+ }
+ return NOTIFY_OK;
+}
+#endif
diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c
index 17bb88d86ac2..a884c09006c4 100644
--- a/kernel/trace/trace.c
+++ b/kernel/trace/trace.c
@@ -11,32 +11,34 @@
* Copyright (C) 2004-2006 Ingo Molnar
* Copyright (C) 2004 William Lee Irwin III
*/
+#include <linux/ring_buffer.h>
#include <linux/utsrelease.h>
+#include <linux/stacktrace.h>
+#include <linux/writeback.h>
#include <linux/kallsyms.h>
#include <linux/seq_file.h>
#include <linux/notifier.h>
+#include <linux/irqflags.h>
#include <linux/debugfs.h>
#include <linux/pagemap.h>
#include <linux/hardirq.h>
#include <linux/linkage.h>
#include <linux/uaccess.h>
+#include <linux/kprobes.h>
#include <linux/ftrace.h>
#include <linux/module.h>
#include <linux/percpu.h>
+#include <linux/splice.h>
#include <linux/kdebug.h>
+#include <linux/string.h>
#include <linux/ctype.h>
#include <linux/init.h>
#include <linux/poll.h>
#include <linux/gfp.h>
#include <linux/fs.h>
-#include <linux/kprobes.h>
-#include <linux/writeback.h>
-
-#include <linux/stacktrace.h>
-#include <linux/ring_buffer.h>
-#include <linux/irqflags.h>
#include "trace.h"
+#include "trace_output.h"
#define TRACE_BUFFER_FLAGS (RB_FL_OVERWRITE)
@@ -44,14 +46,25 @@ unsigned long __read_mostly tracing_max_latency;
unsigned long __read_mostly tracing_thresh;
/*
+ * On boot up, the ring buffer is set to the minimum size, so that
+ * we do not waste memory on systems that are not using tracing.
+ */
+static int ring_buffer_expanded;
+
+/*
* We need to change this state when a selftest is running.
* A selftest will lurk into the ring-buffer to count the
* entries inserted during the selftest although some concurrent
- * insertions into the ring-buffer such as ftrace_printk could occurred
+ * insertions into the ring-buffer such as trace_printk could occurred
* at the same time, giving false positive or negative results.
*/
static bool __read_mostly tracing_selftest_running;
+/*
+ * If a tracer is running, we do not want to run SELFTEST.
+ */
+static bool __read_mostly tracing_selftest_disabled;
+
/* For tracers that don't implement custom flags */
static struct tracer_opt dummy_tracer_opt[] = {
{ }
@@ -73,7 +86,7 @@ static int dummy_set_flag(u32 old_flags, u32 bit, int set)
* of the tracer is successful. But that is the only place that sets
* this back to zero.
*/
-int tracing_disabled = 1;
+static int tracing_disabled = 1;
static DEFINE_PER_CPU(local_t, ftrace_cpu_disabled);
@@ -91,6 +104,9 @@ static inline void ftrace_enable_cpu(void)
static cpumask_var_t __read_mostly tracing_buffer_mask;
+/* Define which cpu buffers are currently read in trace_pipe */
+static cpumask_var_t tracing_reader_cpumask;
+
#define for_each_tracing_cpu(cpu) \
for_each_cpu(cpu, tracing_buffer_mask)
@@ -109,14 +125,21 @@ static cpumask_var_t __read_mostly tracing_buffer_mask;
*/
int ftrace_dump_on_oops;
-static int tracing_set_tracer(char *buf);
+static int tracing_set_tracer(const char *buf);
+
+#define BOOTUP_TRACER_SIZE 100
+static char bootup_tracer_buf[BOOTUP_TRACER_SIZE] __initdata;
+static char *default_bootup_tracer;
static int __init set_ftrace(char *str)
{
- tracing_set_tracer(str);
+ strncpy(bootup_tracer_buf, str, BOOTUP_TRACER_SIZE);
+ default_bootup_tracer = bootup_tracer_buf;
+ /* We are using ftrace early, expand it */
+ ring_buffer_expanded = 1;
return 1;
}
-__setup("ftrace", set_ftrace);
+__setup("ftrace=", set_ftrace);
static int __init set_ftrace_dump_on_oops(char *str)
{
@@ -125,21 +148,13 @@ static int __init set_ftrace_dump_on_oops(char *str)
}
__setup("ftrace_dump_on_oops", set_ftrace_dump_on_oops);
-long
-ns2usecs(cycle_t nsec)
+unsigned long long ns2usecs(cycle_t nsec)
{
nsec += 500;
do_div(nsec, 1000);
return nsec;
}
-cycle_t ftrace_now(int cpu)
-{
- u64 ts = ring_buffer_time_stamp(cpu);
- ring_buffer_normalize_time_stamp(cpu, &ts);
- return ts;
-}
-
/*
* The global_trace is the descriptor that holds the tracing
* buffers for the live tracing. For each CPU, it contains
@@ -156,6 +171,20 @@ static struct trace_array global_trace;
static DEFINE_PER_CPU(struct trace_array_cpu, global_trace_cpu);
+cycle_t ftrace_now(int cpu)
+{
+ u64 ts;
+
+ /* Early boot up does not have a buffer yet */
+ if (!global_trace.buffer)
+ return trace_clock_local();
+
+ ts = ring_buffer_time_stamp(global_trace.buffer, cpu);
+ ring_buffer_normalize_time_stamp(global_trace.buffer, cpu, &ts);
+
+ return ts;
+}
+
/*
* The max_tr is used to snapshot the global_trace when a maximum
* latency is reached. Some tracers will use this to store a maximum
@@ -186,9 +215,6 @@ int tracing_is_enabled(void)
return tracer_enabled;
}
-/* function tracing enabled */
-int ftrace_function_enabled;
-
/*
* trace_buf_size is the size in bytes that is allocated
* for a buffer. Note, the number of bytes is always rounded
@@ -229,7 +255,7 @@ static DECLARE_WAIT_QUEUE_HEAD(trace_wait);
/* trace_flags holds trace_options default values */
unsigned long trace_flags = TRACE_ITER_PRINT_PARENT | TRACE_ITER_PRINTK |
- TRACE_ITER_ANNOTATE;
+ TRACE_ITER_ANNOTATE | TRACE_ITER_CONTEXT_INFO | TRACE_ITER_SLEEP_TIME;
/**
* trace_wake_up - wake up tasks waiting for trace input
@@ -280,13 +306,17 @@ static const char *trace_options[] = {
"block",
"stacktrace",
"sched-tree",
- "ftrace_printk",
+ "trace_printk",
"ftrace_preempt",
"branch",
"annotate",
"userstacktrace",
"sym-userobj",
"printk-msg-only",
+ "context-info",
+ "latency-format",
+ "global-clock",
+ "sleep-time",
NULL
};
@@ -326,146 +356,37 @@ __update_max_tr(struct trace_array *tr, struct task_struct *tsk, int cpu)
data->rt_priority = tsk->rt_priority;
/* record this tasks comm */
- tracing_record_cmdline(current);
+ tracing_record_cmdline(tsk);
}
-/**
- * trace_seq_printf - sequence printing of trace information
- * @s: trace sequence descriptor
- * @fmt: printf format string
- *
- * The tracer may use either sequence operations or its own
- * copy to user routines. To simplify formating of a trace
- * trace_seq_printf is used to store strings into a special
- * buffer (@s). Then the output may be either used by
- * the sequencer or pulled into another buffer.
- */
-int
-trace_seq_printf(struct trace_seq *s, const char *fmt, ...)
+ssize_t trace_seq_to_user(struct trace_seq *s, char __user *ubuf, size_t cnt)
{
- int len = (PAGE_SIZE - 1) - s->len;
- va_list ap;
+ int len;
int ret;
- if (!len)
- return 0;
-
- va_start(ap, fmt);
- ret = vsnprintf(s->buffer + s->len, len, fmt, ap);
- va_end(ap);
-
- /* If we can't write it all, don't bother writing anything */
- if (ret >= len)
- return 0;
-
- s->len += ret;
-
- return len;
-}
-
-/**
- * trace_seq_puts - trace sequence printing of simple string
- * @s: trace sequence descriptor
- * @str: simple string to record
- *
- * The tracer may use either the sequence operations or its own
- * copy to user routines. This function records a simple string
- * into a special buffer (@s) for later retrieval by a sequencer
- * or other mechanism.
- */
-static int
-trace_seq_puts(struct trace_seq *s, const char *str)
-{
- int len = strlen(str);
-
- if (len > ((PAGE_SIZE - 1) - s->len))
- return 0;
-
- memcpy(s->buffer + s->len, str, len);
- s->len += len;
-
- return len;
-}
-
-static int
-trace_seq_putc(struct trace_seq *s, unsigned char c)
-{
- if (s->len >= (PAGE_SIZE - 1))
- return 0;
-
- s->buffer[s->len++] = c;
-
- return 1;
-}
-
-static int
-trace_seq_putmem(struct trace_seq *s, void *mem, size_t len)
-{
- if (len > ((PAGE_SIZE - 1) - s->len))
+ if (!cnt)
return 0;
- memcpy(s->buffer + s->len, mem, len);
- s->len += len;
-
- return len;
-}
-
-#define MAX_MEMHEX_BYTES 8
-#define HEX_CHARS (MAX_MEMHEX_BYTES*2 + 1)
-
-static int
-trace_seq_putmem_hex(struct trace_seq *s, void *mem, size_t len)
-{
- unsigned char hex[HEX_CHARS];
- unsigned char *data = mem;
- int i, j;
-
-#ifdef __BIG_ENDIAN
- for (i = 0, j = 0; i < len; i++) {
-#else
- for (i = len-1, j = 0; i >= 0; i--) {
-#endif
- hex[j++] = hex_asc_hi(data[i]);
- hex[j++] = hex_asc_lo(data[i]);
- }
- hex[j++] = ' ';
-
- return trace_seq_putmem(s, hex, j);
-}
-
-static int
-trace_seq_path(struct trace_seq *s, struct path *path)
-{
- unsigned char *p;
+ if (s->len <= s->readpos)
+ return -EBUSY;
- if (s->len >= (PAGE_SIZE - 1))
- return 0;
- p = d_path(path, s->buffer + s->len, PAGE_SIZE - s->len);
- if (!IS_ERR(p)) {
- p = mangle_path(s->buffer + s->len, p, "\n");
- if (p) {
- s->len = p - s->buffer;
- return 1;
- }
- } else {
- s->buffer[s->len++] = '?';
- return 1;
- }
+ len = s->len - s->readpos;
+ if (cnt > len)
+ cnt = len;
+ ret = copy_to_user(ubuf, s->buffer + s->readpos, cnt);
+ if (ret == cnt)
+ return -EFAULT;
- return 0;
-}
+ cnt -= ret;
-static void
-trace_seq_reset(struct trace_seq *s)
-{
- s->len = 0;
- s->readpos = 0;
+ s->readpos += cnt;
+ return cnt;
}
-ssize_t trace_seq_to_user(struct trace_seq *s, char __user *ubuf, size_t cnt)
+static ssize_t trace_seq_to_buffer(struct trace_seq *s, void *buf, size_t cnt)
{
int len;
- int ret;
+ void *ret;
if (s->len <= s->readpos)
return -EBUSY;
@@ -473,11 +394,11 @@ ssize_t trace_seq_to_user(struct trace_seq *s, char __user *ubuf, size_t cnt)
len = s->len - s->readpos;
if (cnt > len)
cnt = len;
- ret = copy_to_user(ubuf, s->buffer + s->readpos, cnt);
- if (ret)
+ ret = memcpy(buf, s->buffer + s->readpos, cnt);
+ if (!ret)
return -EFAULT;
- s->readpos += len;
+ s->readpos += cnt;
return cnt;
}
@@ -489,7 +410,7 @@ trace_print_seq(struct seq_file *m, struct trace_seq *s)
s->buffer[len] = 0;
seq_puts(m, s->buffer);
- trace_seq_reset(s);
+ trace_seq_init(s);
}
/**
@@ -543,7 +464,7 @@ update_max_tr_single(struct trace_array *tr, struct task_struct *tsk, int cpu)
ftrace_enable_cpu();
- WARN_ON_ONCE(ret);
+ WARN_ON_ONCE(ret && ret != -EAGAIN);
__update_max_tr(tr, tsk, cpu);
__raw_spin_unlock(&ftrace_max_lock);
@@ -556,6 +477,8 @@ update_max_tr_single(struct trace_array *tr, struct task_struct *tsk, int cpu)
* Register a new plugin tracer.
*/
int register_tracer(struct tracer *type)
+__releases(kernel_lock)
+__acquires(kernel_lock)
{
struct tracer *t;
int len;
@@ -594,9 +517,12 @@ int register_tracer(struct tracer *type)
else
if (!type->flags->opts)
type->flags->opts = dummy_tracer_opt;
+ if (!type->wait_pipe)
+ type->wait_pipe = default_wait_pipe;
+
#ifdef CONFIG_FTRACE_STARTUP_TEST
- if (type->selftest) {
+ if (type->selftest && !tracing_selftest_disabled) {
struct tracer *saved_tracer = current_trace;
struct trace_array *tr = &global_trace;
int i;
@@ -638,8 +564,26 @@ int register_tracer(struct tracer *type)
out:
tracing_selftest_running = false;
mutex_unlock(&trace_types_lock);
- lock_kernel();
+ if (ret || !default_bootup_tracer)
+ goto out_unlock;
+
+ if (strncmp(default_bootup_tracer, type->name, BOOTUP_TRACER_SIZE))
+ goto out_unlock;
+
+ printk(KERN_INFO "Starting tracer '%s'\n", type->name);
+ /* Do we want this tracer to start on bootup? */
+ tracing_set_tracer(type->name);
+ default_bootup_tracer = NULL;
+ /* disable other selftests, since this will break it. */
+ tracing_selftest_disabled = 1;
+#ifdef CONFIG_FTRACE_STARTUP_TEST
+ printk(KERN_INFO "Disabling FTRACE selftests due to running tracer '%s'\n",
+ type->name);
+#endif
+
+ out_unlock:
+ lock_kernel();
return ret;
}
@@ -658,6 +602,15 @@ void unregister_tracer(struct tracer *type)
found:
*t = (*t)->next;
+
+ if (type == current_trace && tracer_enabled) {
+ tracer_enabled = 0;
+ tracing_stop();
+ if (current_trace->stop)
+ current_trace->stop(&global_trace);
+ current_trace = &nop_trace;
+ }
+
if (strlen(type->name) != max_tracer_type_len)
goto out;
@@ -689,19 +642,20 @@ void tracing_reset_online_cpus(struct trace_array *tr)
}
#define SAVED_CMDLINES 128
+#define NO_CMDLINE_MAP UINT_MAX
static unsigned map_pid_to_cmdline[PID_MAX_DEFAULT+1];
static unsigned map_cmdline_to_pid[SAVED_CMDLINES];
static char saved_cmdlines[SAVED_CMDLINES][TASK_COMM_LEN];
static int cmdline_idx;
-static DEFINE_SPINLOCK(trace_cmdline_lock);
+static raw_spinlock_t trace_cmdline_lock = __RAW_SPIN_LOCK_UNLOCKED;
/* temporary disable recording */
-atomic_t trace_record_cmdline_disabled __read_mostly;
+static atomic_t trace_record_cmdline_disabled __read_mostly;
static void trace_init_cmdlines(void)
{
- memset(&map_pid_to_cmdline, -1, sizeof(map_pid_to_cmdline));
- memset(&map_cmdline_to_pid, -1, sizeof(map_cmdline_to_pid));
+ memset(&map_pid_to_cmdline, NO_CMDLINE_MAP, sizeof(map_pid_to_cmdline));
+ memset(&map_cmdline_to_pid, NO_CMDLINE_MAP, sizeof(map_cmdline_to_pid));
cmdline_idx = 0;
}
@@ -738,13 +692,12 @@ void tracing_start(void)
return;
spin_lock_irqsave(&tracing_start_lock, flags);
- if (--trace_stop_count)
- goto out;
-
- if (trace_stop_count < 0) {
- /* Someone screwed up their debugging */
- WARN_ON_ONCE(1);
- trace_stop_count = 0;
+ if (--trace_stop_count) {
+ if (trace_stop_count < 0) {
+ /* Someone screwed up their debugging */
+ WARN_ON_ONCE(1);
+ trace_stop_count = 0;
+ }
goto out;
}
@@ -794,8 +747,7 @@ void trace_stop_cmdline_recording(void);
static void trace_save_cmdline(struct task_struct *tsk)
{
- unsigned map;
- unsigned idx;
+ unsigned pid, idx;
if (!tsk->pid || unlikely(tsk->pid > PID_MAX_DEFAULT))
return;
@@ -806,17 +758,24 @@ static void trace_save_cmdline(struct task_struct *tsk)
* nor do we want to disable interrupts,
* so if we miss here, then better luck next time.
*/
- if (!spin_trylock(&trace_cmdline_lock))
+ if (!__raw_spin_trylock(&trace_cmdline_lock))
return;
idx = map_pid_to_cmdline[tsk->pid];
- if (idx >= SAVED_CMDLINES) {
+ if (idx == NO_CMDLINE_MAP) {
idx = (cmdline_idx + 1) % SAVED_CMDLINES;
- map = map_cmdline_to_pid[idx];
- if (map <= PID_MAX_DEFAULT)
- map_pid_to_cmdline[map] = (unsigned)-1;
+ /*
+ * Check whether the cmdline buffer at idx has a pid
+ * mapped. We are going to overwrite that entry so we
+ * need to clear the map_pid_to_cmdline. Otherwise we
+ * would read the new comm for the old pid.
+ */
+ pid = map_cmdline_to_pid[idx];
+ if (pid != NO_CMDLINE_MAP)
+ map_pid_to_cmdline[pid] = NO_CMDLINE_MAP;
+ map_cmdline_to_pid[idx] = tsk->pid;
map_pid_to_cmdline[tsk->pid] = idx;
cmdline_idx = idx;
@@ -824,33 +783,37 @@ static void trace_save_cmdline(struct task_struct *tsk)
memcpy(&saved_cmdlines[idx], tsk->comm, TASK_COMM_LEN);
- spin_unlock(&trace_cmdline_lock);
+ __raw_spin_unlock(&trace_cmdline_lock);
}
-char *trace_find_cmdline(int pid)
+void trace_find_cmdline(int pid, char comm[])
{
- char *cmdline = "<...>";
unsigned map;
- if (!pid)
- return "<idle>";
+ if (!pid) {
+ strcpy(comm, "<idle>");
+ return;
+ }
- if (pid > PID_MAX_DEFAULT)
- goto out;
+ if (pid > PID_MAX_DEFAULT) {
+ strcpy(comm, "<...>");
+ return;
+ }
+ __raw_spin_lock(&trace_cmdline_lock);
map = map_pid_to_cmdline[pid];
- if (map >= SAVED_CMDLINES)
- goto out;
-
- cmdline = saved_cmdlines[map];
+ if (map != NO_CMDLINE_MAP)
+ strcpy(comm, saved_cmdlines[map]);
+ else
+ strcpy(comm, "<...>");
- out:
- return cmdline;
+ __raw_spin_unlock(&trace_cmdline_lock);
}
void tracing_record_cmdline(struct task_struct *tsk)
{
- if (atomic_read(&trace_record_cmdline_disabled))
+ if (atomic_read(&trace_record_cmdline_disabled) || !tracer_enabled ||
+ !tracing_is_on())
return;
trace_save_cmdline(tsk);
@@ -864,7 +827,7 @@ tracing_generic_entry_update(struct trace_entry *entry, unsigned long flags,
entry->preempt_count = pc & 0xff;
entry->pid = (tsk) ? tsk->pid : 0;
- entry->tgid = (tsk) ? tsk->tgid : 0;
+ entry->tgid = (tsk) ? tsk->tgid : 0;
entry->flags =
#ifdef CONFIG_TRACE_IRQFLAGS_SUPPORT
(irqs_disabled_flags(flags) ? TRACE_FLAG_IRQS_OFF : 0) |
@@ -876,78 +839,132 @@ tracing_generic_entry_update(struct trace_entry *entry, unsigned long flags,
(need_resched() ? TRACE_FLAG_NEED_RESCHED : 0);
}
+struct ring_buffer_event *trace_buffer_lock_reserve(struct trace_array *tr,
+ unsigned char type,
+ unsigned long len,
+ unsigned long flags, int pc)
+{
+ struct ring_buffer_event *event;
+
+ event = ring_buffer_lock_reserve(tr->buffer, len);
+ if (event != NULL) {
+ struct trace_entry *ent = ring_buffer_event_data(event);
+
+ tracing_generic_entry_update(ent, flags, pc);
+ ent->type = type;
+ }
+
+ return event;
+}
+static void ftrace_trace_stack(struct trace_array *tr,
+ unsigned long flags, int skip, int pc);
+static void ftrace_trace_userstack(struct trace_array *tr,
+ unsigned long flags, int pc);
+
+static inline void __trace_buffer_unlock_commit(struct trace_array *tr,
+ struct ring_buffer_event *event,
+ unsigned long flags, int pc,
+ int wake)
+{
+ ring_buffer_unlock_commit(tr->buffer, event);
+
+ ftrace_trace_stack(tr, flags, 6, pc);
+ ftrace_trace_userstack(tr, flags, pc);
+
+ if (wake)
+ trace_wake_up();
+}
+
+void trace_buffer_unlock_commit(struct trace_array *tr,
+ struct ring_buffer_event *event,
+ unsigned long flags, int pc)
+{
+ __trace_buffer_unlock_commit(tr, event, flags, pc, 1);
+}
+
+struct ring_buffer_event *
+trace_current_buffer_lock_reserve(unsigned char type, unsigned long len,
+ unsigned long flags, int pc)
+{
+ return trace_buffer_lock_reserve(&global_trace,
+ type, len, flags, pc);
+}
+
+void trace_current_buffer_unlock_commit(struct ring_buffer_event *event,
+ unsigned long flags, int pc)
+{
+ return __trace_buffer_unlock_commit(&global_trace, event, flags, pc, 1);
+}
+
+void trace_nowake_buffer_unlock_commit(struct ring_buffer_event *event,
+ unsigned long flags, int pc)
+{
+ return __trace_buffer_unlock_commit(&global_trace, event, flags, pc, 0);
+}
+
void
-trace_function(struct trace_array *tr, struct trace_array_cpu *data,
+trace_function(struct trace_array *tr,
unsigned long ip, unsigned long parent_ip, unsigned long flags,
int pc)
{
struct ring_buffer_event *event;
struct ftrace_entry *entry;
- unsigned long irq_flags;
/* If we are reading the ring buffer, don't trace */
if (unlikely(local_read(&__get_cpu_var(ftrace_cpu_disabled))))
return;
- event = ring_buffer_lock_reserve(tr->buffer, sizeof(*entry),
- &irq_flags);
+ event = trace_buffer_lock_reserve(tr, TRACE_FN, sizeof(*entry),
+ flags, pc);
if (!event)
return;
entry = ring_buffer_event_data(event);
- tracing_generic_entry_update(&entry->ent, flags, pc);
- entry->ent.type = TRACE_FN;
entry->ip = ip;
entry->parent_ip = parent_ip;
- ring_buffer_unlock_commit(tr->buffer, event, irq_flags);
+ ring_buffer_unlock_commit(tr->buffer, event);
}
#ifdef CONFIG_FUNCTION_GRAPH_TRACER
-static void __trace_graph_entry(struct trace_array *tr,
- struct trace_array_cpu *data,
+static int __trace_graph_entry(struct trace_array *tr,
struct ftrace_graph_ent *trace,
unsigned long flags,
int pc)
{
struct ring_buffer_event *event;
struct ftrace_graph_ent_entry *entry;
- unsigned long irq_flags;
if (unlikely(local_read(&__get_cpu_var(ftrace_cpu_disabled))))
- return;
+ return 0;
- event = ring_buffer_lock_reserve(global_trace.buffer, sizeof(*entry),
- &irq_flags);
+ event = trace_buffer_lock_reserve(&global_trace, TRACE_GRAPH_ENT,
+ sizeof(*entry), flags, pc);
if (!event)
- return;
+ return 0;
entry = ring_buffer_event_data(event);
- tracing_generic_entry_update(&entry->ent, flags, pc);
- entry->ent.type = TRACE_GRAPH_ENT;
entry->graph_ent = *trace;
- ring_buffer_unlock_commit(global_trace.buffer, event, irq_flags);
+ ring_buffer_unlock_commit(global_trace.buffer, event);
+
+ return 1;
}
static void __trace_graph_return(struct trace_array *tr,
- struct trace_array_cpu *data,
struct ftrace_graph_ret *trace,
unsigned long flags,
int pc)
{
struct ring_buffer_event *event;
struct ftrace_graph_ret_entry *entry;
- unsigned long irq_flags;
if (unlikely(local_read(&__get_cpu_var(ftrace_cpu_disabled))))
return;
- event = ring_buffer_lock_reserve(global_trace.buffer, sizeof(*entry),
- &irq_flags);
+ event = trace_buffer_lock_reserve(&global_trace, TRACE_GRAPH_RET,
+ sizeof(*entry), flags, pc);
if (!event)
return;
entry = ring_buffer_event_data(event);
- tracing_generic_entry_update(&entry->ent, flags, pc);
- entry->ent.type = TRACE_GRAPH_RET;
entry->ret = *trace;
- ring_buffer_unlock_commit(global_trace.buffer, event, irq_flags);
+ ring_buffer_unlock_commit(global_trace.buffer, event);
}
#endif
@@ -957,31 +974,23 @@ ftrace(struct trace_array *tr, struct trace_array_cpu *data,
int pc)
{
if (likely(!atomic_read(&data->disabled)))
- trace_function(tr, data, ip, parent_ip, flags, pc);
+ trace_function(tr, ip, parent_ip, flags, pc);
}
-static void ftrace_trace_stack(struct trace_array *tr,
- struct trace_array_cpu *data,
- unsigned long flags,
- int skip, int pc)
+static void __ftrace_trace_stack(struct trace_array *tr,
+ unsigned long flags,
+ int skip, int pc)
{
#ifdef CONFIG_STACKTRACE
struct ring_buffer_event *event;
struct stack_entry *entry;
struct stack_trace trace;
- unsigned long irq_flags;
- if (!(trace_flags & TRACE_ITER_STACKTRACE))
- return;
-
- event = ring_buffer_lock_reserve(tr->buffer, sizeof(*entry),
- &irq_flags);
+ event = trace_buffer_lock_reserve(tr, TRACE_STACK,
+ sizeof(*entry), flags, pc);
if (!event)
return;
entry = ring_buffer_event_data(event);
- tracing_generic_entry_update(&entry->ent, flags, pc);
- entry->ent.type = TRACE_STACK;
-
memset(&entry->caller, 0, sizeof(entry->caller));
trace.nr_entries = 0;
@@ -990,38 +999,43 @@ static void ftrace_trace_stack(struct trace_array *tr,
trace.entries = entry->caller;
save_stack_trace(&trace);
- ring_buffer_unlock_commit(tr->buffer, event, irq_flags);
+ ring_buffer_unlock_commit(tr->buffer, event);
#endif
}
+static void ftrace_trace_stack(struct trace_array *tr,
+ unsigned long flags,
+ int skip, int pc)
+{
+ if (!(trace_flags & TRACE_ITER_STACKTRACE))
+ return;
+
+ __ftrace_trace_stack(tr, flags, skip, pc);
+}
+
void __trace_stack(struct trace_array *tr,
- struct trace_array_cpu *data,
unsigned long flags,
- int skip)
+ int skip, int pc)
{
- ftrace_trace_stack(tr, data, flags, skip, preempt_count());
+ __ftrace_trace_stack(tr, flags, skip, pc);
}
static void ftrace_trace_userstack(struct trace_array *tr,
- struct trace_array_cpu *data,
- unsigned long flags, int pc)
+ unsigned long flags, int pc)
{
#ifdef CONFIG_STACKTRACE
struct ring_buffer_event *event;
struct userstack_entry *entry;
struct stack_trace trace;
- unsigned long irq_flags;
if (!(trace_flags & TRACE_ITER_USERSTACKTRACE))
return;
- event = ring_buffer_lock_reserve(tr->buffer, sizeof(*entry),
- &irq_flags);
+ event = trace_buffer_lock_reserve(tr, TRACE_USER_STACK,
+ sizeof(*entry), flags, pc);
if (!event)
return;
entry = ring_buffer_event_data(event);
- tracing_generic_entry_update(&entry->ent, flags, pc);
- entry->ent.type = TRACE_USER_STACK;
memset(&entry->caller, 0, sizeof(entry->caller));
@@ -1031,70 +1045,58 @@ static void ftrace_trace_userstack(struct trace_array *tr,
trace.entries = entry->caller;
save_stack_trace_user(&trace);
- ring_buffer_unlock_commit(tr->buffer, event, irq_flags);
+ ring_buffer_unlock_commit(tr->buffer, event);
#endif
}
-void __trace_userstack(struct trace_array *tr,
- struct trace_array_cpu *data,
- unsigned long flags)
+#ifdef UNUSED
+static void __trace_userstack(struct trace_array *tr, unsigned long flags)
{
- ftrace_trace_userstack(tr, data, flags, preempt_count());
+ ftrace_trace_userstack(tr, flags, preempt_count());
}
+#endif /* UNUSED */
static void
-ftrace_trace_special(void *__tr, void *__data,
+ftrace_trace_special(void *__tr,
unsigned long arg1, unsigned long arg2, unsigned long arg3,
int pc)
{
struct ring_buffer_event *event;
- struct trace_array_cpu *data = __data;
struct trace_array *tr = __tr;
struct special_entry *entry;
- unsigned long irq_flags;
- event = ring_buffer_lock_reserve(tr->buffer, sizeof(*entry),
- &irq_flags);
+ event = trace_buffer_lock_reserve(tr, TRACE_SPECIAL,
+ sizeof(*entry), 0, pc);
if (!event)
return;
entry = ring_buffer_event_data(event);
- tracing_generic_entry_update(&entry->ent, 0, pc);
- entry->ent.type = TRACE_SPECIAL;
entry->arg1 = arg1;
entry->arg2 = arg2;
entry->arg3 = arg3;
- ring_buffer_unlock_commit(tr->buffer, event, irq_flags);
- ftrace_trace_stack(tr, data, irq_flags, 4, pc);
- ftrace_trace_userstack(tr, data, irq_flags, pc);
-
- trace_wake_up();
+ trace_buffer_unlock_commit(tr, event, 0, pc);
}
void
__trace_special(void *__tr, void *__data,
unsigned long arg1, unsigned long arg2, unsigned long arg3)
{
- ftrace_trace_special(__tr, __data, arg1, arg2, arg3, preempt_count());
+ ftrace_trace_special(__tr, arg1, arg2, arg3, preempt_count());
}
void
tracing_sched_switch_trace(struct trace_array *tr,
- struct trace_array_cpu *data,
struct task_struct *prev,
struct task_struct *next,
unsigned long flags, int pc)
{
struct ring_buffer_event *event;
struct ctx_switch_entry *entry;
- unsigned long irq_flags;
- event = ring_buffer_lock_reserve(tr->buffer, sizeof(*entry),
- &irq_flags);
+ event = trace_buffer_lock_reserve(tr, TRACE_CTX,
+ sizeof(*entry), flags, pc);
if (!event)
return;
entry = ring_buffer_event_data(event);
- tracing_generic_entry_update(&entry->ent, flags, pc);
- entry->ent.type = TRACE_CTX;
entry->prev_pid = prev->pid;
entry->prev_prio = prev->prio;
entry->prev_state = prev->state;
@@ -1102,29 +1104,23 @@ tracing_sched_switch_trace(struct trace_array *tr,
entry->next_prio = next->prio;
entry->next_state = next->state;
entry->next_cpu = task_cpu(next);
- ring_buffer_unlock_commit(tr->buffer, event, irq_flags);
- ftrace_trace_stack(tr, data, flags, 5, pc);
- ftrace_trace_userstack(tr, data, flags, pc);
+ trace_buffer_unlock_commit(tr, event, flags, pc);
}
void
tracing_sched_wakeup_trace(struct trace_array *tr,
- struct trace_array_cpu *data,
struct task_struct *wakee,
struct task_struct *curr,
unsigned long flags, int pc)
{
struct ring_buffer_event *event;
struct ctx_switch_entry *entry;
- unsigned long irq_flags;
- event = ring_buffer_lock_reserve(tr->buffer, sizeof(*entry),
- &irq_flags);
+ event = trace_buffer_lock_reserve(tr, TRACE_WAKE,
+ sizeof(*entry), flags, pc);
if (!event)
return;
entry = ring_buffer_event_data(event);
- tracing_generic_entry_update(&entry->ent, flags, pc);
- entry->ent.type = TRACE_WAKE;
entry->prev_pid = curr->pid;
entry->prev_prio = curr->prio;
entry->prev_state = curr->state;
@@ -1132,11 +1128,10 @@ tracing_sched_wakeup_trace(struct trace_array *tr,
entry->next_prio = wakee->prio;
entry->next_state = wakee->state;
entry->next_cpu = task_cpu(wakee);
- ring_buffer_unlock_commit(tr->buffer, event, irq_flags);
- ftrace_trace_stack(tr, data, flags, 6, pc);
- ftrace_trace_userstack(tr, data, flags, pc);
- trace_wake_up();
+ ring_buffer_unlock_commit(tr->buffer, event);
+ ftrace_trace_stack(tr, flags, 6, pc);
+ ftrace_trace_userstack(tr, flags, pc);
}
void
@@ -1157,66 +1152,7 @@ ftrace_special(unsigned long arg1, unsigned long arg2, unsigned long arg3)
data = tr->data[cpu];
if (likely(atomic_inc_return(&data->disabled) == 1))
- ftrace_trace_special(tr, data, arg1, arg2, arg3, pc);
-
- atomic_dec(&data->disabled);
- local_irq_restore(flags);
-}
-
-#ifdef CONFIG_FUNCTION_TRACER
-static void
-function_trace_call_preempt_only(unsigned long ip, unsigned long parent_ip)
-{
- struct trace_array *tr = &global_trace;
- struct trace_array_cpu *data;
- unsigned long flags;
- long disabled;
- int cpu, resched;
- int pc;
-
- if (unlikely(!ftrace_function_enabled))
- return;
-
- pc = preempt_count();
- resched = ftrace_preempt_disable();
- local_save_flags(flags);
- cpu = raw_smp_processor_id();
- data = tr->data[cpu];
- disabled = atomic_inc_return(&data->disabled);
-
- if (likely(disabled == 1))
- trace_function(tr, data, ip, parent_ip, flags, pc);
-
- atomic_dec(&data->disabled);
- ftrace_preempt_enable(resched);
-}
-
-static void
-function_trace_call(unsigned long ip, unsigned long parent_ip)
-{
- struct trace_array *tr = &global_trace;
- struct trace_array_cpu *data;
- unsigned long flags;
- long disabled;
- int cpu;
- int pc;
-
- if (unlikely(!ftrace_function_enabled))
- return;
-
- /*
- * Need to use raw, since this must be called before the
- * recursive protection is performed.
- */
- local_irq_save(flags);
- cpu = raw_smp_processor_id();
- data = tr->data[cpu];
- disabled = atomic_inc_return(&data->disabled);
-
- if (likely(disabled == 1)) {
- pc = preempt_count();
- trace_function(tr, data, ip, parent_ip, flags, pc);
- }
+ ftrace_trace_special(tr, arg1, arg2, arg3, pc);
atomic_dec(&data->disabled);
local_irq_restore(flags);
@@ -1229,6 +1165,7 @@ int trace_graph_entry(struct ftrace_graph_ent *trace)
struct trace_array_cpu *data;
unsigned long flags;
long disabled;
+ int ret;
int cpu;
int pc;
@@ -1244,15 +1181,18 @@ int trace_graph_entry(struct ftrace_graph_ent *trace)
disabled = atomic_inc_return(&data->disabled);
if (likely(disabled == 1)) {
pc = preempt_count();
- __trace_graph_entry(tr, data, trace, flags, pc);
+ ret = __trace_graph_entry(tr, trace, flags, pc);
+ } else {
+ ret = 0;
}
/* Only do the atomic if it is not already set */
if (!test_tsk_trace_graph(current))
set_tsk_trace_graph(current);
+
atomic_dec(&data->disabled);
local_irq_restore(flags);
- return 1;
+ return ret;
}
void trace_graph_return(struct ftrace_graph_ret *trace)
@@ -1270,7 +1210,7 @@ void trace_graph_return(struct ftrace_graph_ret *trace)
disabled = atomic_inc_return(&data->disabled);
if (likely(disabled == 1)) {
pc = preempt_count();
- __trace_graph_return(tr, data, trace, flags, pc);
+ __trace_graph_return(tr, trace, flags, pc);
}
if (!trace->depth)
clear_tsk_trace_graph(current);
@@ -1279,30 +1219,122 @@ void trace_graph_return(struct ftrace_graph_ret *trace)
}
#endif /* CONFIG_FUNCTION_GRAPH_TRACER */
-static struct ftrace_ops trace_ops __read_mostly =
-{
- .func = function_trace_call,
-};
-void tracing_start_function_trace(void)
+/**
+ * trace_vbprintk - write binary msg to tracing buffer
+ *
+ */
+int trace_vbprintk(unsigned long ip, const char *fmt, va_list args)
{
- ftrace_function_enabled = 0;
+ static raw_spinlock_t trace_buf_lock =
+ (raw_spinlock_t)__RAW_SPIN_LOCK_UNLOCKED;
+ static u32 trace_buf[TRACE_BUF_SIZE];
- if (trace_flags & TRACE_ITER_PREEMPTONLY)
- trace_ops.func = function_trace_call_preempt_only;
- else
- trace_ops.func = function_trace_call;
+ struct ring_buffer_event *event;
+ struct trace_array *tr = &global_trace;
+ struct trace_array_cpu *data;
+ struct bprint_entry *entry;
+ unsigned long flags;
+ int resched;
+ int cpu, len = 0, size, pc;
+
+ if (unlikely(tracing_selftest_running || tracing_disabled))
+ return 0;
+
+ /* Don't pollute graph traces with trace_vprintk internals */
+ pause_graph_tracing();
+
+ pc = preempt_count();
+ resched = ftrace_preempt_disable();
+ cpu = raw_smp_processor_id();
+ data = tr->data[cpu];
+
+ if (unlikely(atomic_read(&data->disabled)))
+ goto out;
+
+ /* Lockdep uses trace_printk for lock tracing */
+ local_irq_save(flags);
+ __raw_spin_lock(&trace_buf_lock);
+ len = vbin_printf(trace_buf, TRACE_BUF_SIZE, fmt, args);
+
+ if (len > TRACE_BUF_SIZE || len < 0)
+ goto out_unlock;
+
+ size = sizeof(*entry) + sizeof(u32) * len;
+ event = trace_buffer_lock_reserve(tr, TRACE_BPRINT, size, flags, pc);
+ if (!event)
+ goto out_unlock;
+ entry = ring_buffer_event_data(event);
+ entry->ip = ip;
+ entry->fmt = fmt;
+
+ memcpy(entry->buf, trace_buf, sizeof(u32) * len);
+ ring_buffer_unlock_commit(tr->buffer, event);
+
+out_unlock:
+ __raw_spin_unlock(&trace_buf_lock);
+ local_irq_restore(flags);
+
+out:
+ ftrace_preempt_enable(resched);
+ unpause_graph_tracing();
- register_ftrace_function(&trace_ops);
- ftrace_function_enabled = 1;
+ return len;
}
+EXPORT_SYMBOL_GPL(trace_vbprintk);
-void tracing_stop_function_trace(void)
+int trace_vprintk(unsigned long ip, const char *fmt, va_list args)
{
- ftrace_function_enabled = 0;
- unregister_ftrace_function(&trace_ops);
+ static raw_spinlock_t trace_buf_lock = __RAW_SPIN_LOCK_UNLOCKED;
+ static char trace_buf[TRACE_BUF_SIZE];
+
+ struct ring_buffer_event *event;
+ struct trace_array *tr = &global_trace;
+ struct trace_array_cpu *data;
+ int cpu, len = 0, size, pc;
+ struct print_entry *entry;
+ unsigned long irq_flags;
+
+ if (tracing_disabled || tracing_selftest_running)
+ return 0;
+
+ pc = preempt_count();
+ preempt_disable_notrace();
+ cpu = raw_smp_processor_id();
+ data = tr->data[cpu];
+
+ if (unlikely(atomic_read(&data->disabled)))
+ goto out;
+
+ pause_graph_tracing();
+ raw_local_irq_save(irq_flags);
+ __raw_spin_lock(&trace_buf_lock);
+ len = vsnprintf(trace_buf, TRACE_BUF_SIZE, fmt, args);
+
+ len = min(len, TRACE_BUF_SIZE-1);
+ trace_buf[len] = 0;
+
+ size = sizeof(*entry) + len + 1;
+ event = trace_buffer_lock_reserve(tr, TRACE_PRINT, size, irq_flags, pc);
+ if (!event)
+ goto out_unlock;
+ entry = ring_buffer_event_data(event);
+ entry->ip = ip;
+
+ memcpy(&entry->buf, trace_buf, len);
+ entry->buf[len] = 0;
+ ring_buffer_unlock_commit(tr->buffer, event);
+
+ out_unlock:
+ __raw_spin_unlock(&trace_buf_lock);
+ raw_local_irq_restore(irq_flags);
+ unpause_graph_tracing();
+ out:
+ preempt_enable_notrace();
+
+ return len;
}
-#endif
+EXPORT_SYMBOL_GPL(trace_vprintk);
enum trace_file_type {
TRACE_FILE_LAT_FMT = 1,
@@ -1345,10 +1377,25 @@ __find_next_entry(struct trace_iterator *iter, int *ent_cpu, u64 *ent_ts)
{
struct ring_buffer *buffer = iter->tr->buffer;
struct trace_entry *ent, *next = NULL;
+ int cpu_file = iter->cpu_file;
u64 next_ts = 0, ts;
int next_cpu = -1;
int cpu;
+ /*
+ * If we are in a per_cpu trace file, don't bother by iterating over
+ * all cpu and peek directly.
+ */
+ if (cpu_file > TRACE_PIPE_ALL_CPU) {
+ if (ring_buffer_empty_cpu(buffer, cpu_file))
+ return NULL;
+ ent = peek_next_entry(iter, cpu_file, ent_ts);
+ if (ent_cpu)
+ *ent_cpu = cpu_file;
+
+ return ent;
+ }
+
for_each_tracing_cpu(cpu) {
if (ring_buffer_empty_cpu(buffer, cpu))
@@ -1376,8 +1423,8 @@ __find_next_entry(struct trace_iterator *iter, int *ent_cpu, u64 *ent_ts)
}
/* Find the next real entry, without updating the iterator itself */
-static struct trace_entry *
-find_next_entry(struct trace_iterator *iter, int *ent_cpu, u64 *ent_ts)
+struct trace_entry *trace_find_next_entry(struct trace_iterator *iter,
+ int *ent_cpu, u64 *ent_ts)
{
return __find_next_entry(iter, ent_cpu, ent_ts);
}
@@ -1426,19 +1473,32 @@ static void *s_next(struct seq_file *m, void *v, loff_t *pos)
return ent;
}
+/*
+ * No necessary locking here. The worst thing which can
+ * happen is loosing events consumed at the same time
+ * by a trace_pipe reader.
+ * Other than that, we don't risk to crash the ring buffer
+ * because it serializes the readers.
+ *
+ * The current tracer is copied to avoid a global locking
+ * all around.
+ */
static void *s_start(struct seq_file *m, loff_t *pos)
{
struct trace_iterator *iter = m->private;
+ static struct tracer *old_tracer;
+ int cpu_file = iter->cpu_file;
void *p = NULL;
loff_t l = 0;
int cpu;
+ /* copy the tracer to avoid using a global lock all around */
mutex_lock(&trace_types_lock);
-
- if (!current_trace || current_trace != iter->trace) {
- mutex_unlock(&trace_types_lock);
- return NULL;
+ if (unlikely(old_tracer != current_trace && current_trace)) {
+ old_tracer = current_trace;
+ *iter->trace = *current_trace;
}
+ mutex_unlock(&trace_types_lock);
atomic_inc(&trace_record_cmdline_disabled);
@@ -1449,9 +1509,12 @@ static void *s_start(struct seq_file *m, loff_t *pos)
ftrace_disable_cpu();
- for_each_tracing_cpu(cpu) {
- ring_buffer_iter_reset(iter->buffer_iter[cpu]);
- }
+ if (cpu_file == TRACE_PIPE_ALL_CPU) {
+ for_each_tracing_cpu(cpu)
+ ring_buffer_iter_reset(iter->buffer_iter[cpu]);
+ } else
+ ring_buffer_iter_reset(iter->buffer_iter[cpu_file]);
+
ftrace_enable_cpu();
@@ -1469,155 +1532,6 @@ static void *s_start(struct seq_file *m, loff_t *pos)
static void s_stop(struct seq_file *m, void *p)
{
atomic_dec(&trace_record_cmdline_disabled);
- mutex_unlock(&trace_types_lock);
-}
-
-#ifdef CONFIG_KRETPROBES
-static inline const char *kretprobed(const char *name)
-{
- static const char tramp_name[] = "kretprobe_trampoline";
- int size = sizeof(tramp_name);
-
- if (strncmp(tramp_name, name, size) == 0)
- return "[unknown/kretprobe'd]";
- return name;
-}
-#else
-static inline const char *kretprobed(const char *name)
-{
- return name;
-}
-#endif /* CONFIG_KRETPROBES */
-
-static int
-seq_print_sym_short(struct trace_seq *s, const char *fmt, unsigned long address)
-{
-#ifdef CONFIG_KALLSYMS
- char str[KSYM_SYMBOL_LEN];
- const char *name;
-
- kallsyms_lookup(address, NULL, NULL, NULL, str);
-
- name = kretprobed(str);
-
- return trace_seq_printf(s, fmt, name);
-#endif
- return 1;
-}
-
-static int
-seq_print_sym_offset(struct trace_seq *s, const char *fmt,
- unsigned long address)
-{
-#ifdef CONFIG_KALLSYMS
- char str[KSYM_SYMBOL_LEN];
- const char *name;
-
- sprint_symbol(str, address);
- name = kretprobed(str);
-
- return trace_seq_printf(s, fmt, name);
-#endif
- return 1;
-}
-
-#ifndef CONFIG_64BIT
-# define IP_FMT "%08lx"
-#else
-# define IP_FMT "%016lx"
-#endif
-
-int
-seq_print_ip_sym(struct trace_seq *s, unsigned long ip, unsigned long sym_flags)
-{
- int ret;
-
- if (!ip)
- return trace_seq_printf(s, "0");
-
- if (sym_flags & TRACE_ITER_SYM_OFFSET)
- ret = seq_print_sym_offset(s, "%s", ip);
- else
- ret = seq_print_sym_short(s, "%s", ip);
-
- if (!ret)
- return 0;
-
- if (sym_flags & TRACE_ITER_SYM_ADDR)
- ret = trace_seq_printf(s, " <" IP_FMT ">", ip);
- return ret;
-}
-
-static inline int seq_print_user_ip(struct trace_seq *s, struct mm_struct *mm,
- unsigned long ip, unsigned long sym_flags)
-{
- struct file *file = NULL;
- unsigned long vmstart = 0;
- int ret = 1;
-
- if (mm) {
- const struct vm_area_struct *vma;
-
- down_read(&mm->mmap_sem);
- vma = find_vma(mm, ip);
- if (vma) {
- file = vma->vm_file;
- vmstart = vma->vm_start;
- }
- if (file) {
- ret = trace_seq_path(s, &file->f_path);
- if (ret)
- ret = trace_seq_printf(s, "[+0x%lx]", ip - vmstart);
- }
- up_read(&mm->mmap_sem);
- }
- if (ret && ((sym_flags & TRACE_ITER_SYM_ADDR) || !file))
- ret = trace_seq_printf(s, " <" IP_FMT ">", ip);
- return ret;
-}
-
-static int
-seq_print_userip_objs(const struct userstack_entry *entry, struct trace_seq *s,
- unsigned long sym_flags)
-{
- struct mm_struct *mm = NULL;
- int ret = 1;
- unsigned int i;
-
- if (trace_flags & TRACE_ITER_SYM_USEROBJ) {
- struct task_struct *task;
- /*
- * we do the lookup on the thread group leader,
- * since individual threads might have already quit!
- */
- rcu_read_lock();
- task = find_task_by_vpid(entry->ent.tgid);
- if (task)
- mm = get_task_mm(task);
- rcu_read_unlock();
- }
-
- for (i = 0; i < FTRACE_STACK_ENTRIES; i++) {
- unsigned long ip = entry->caller[i];
-
- if (ip == ULONG_MAX || !ret)
- break;
- if (i && ret)
- ret = trace_seq_puts(s, " <- ");
- if (!ip) {
- if (ret)
- ret = trace_seq_puts(s, "??");
- continue;
- }
- if (!ret)
- break;
- if (ret)
- ret = seq_print_user_ip(s, mm, ip, sym_flags);
- }
-
- if (mm)
- mmput(mm);
- return ret;
}
static void print_lat_help_header(struct seq_file *m)
@@ -1658,11 +1572,11 @@ print_trace_header(struct seq_file *m, struct trace_iterator *iter)
total = entries +
ring_buffer_overruns(iter->tr->buffer);
- seq_printf(m, "%s latency trace v1.1.5 on %s\n",
+ seq_printf(m, "# %s latency trace v1.1.5 on %s\n",
name, UTS_RELEASE);
- seq_puts(m, "-----------------------------------"
+ seq_puts(m, "# -----------------------------------"
"---------------------------------\n");
- seq_printf(m, " latency: %lu us, #%lu/%lu, CPU#%d |"
+ seq_printf(m, "# latency: %lu us, #%lu/%lu, CPU#%d |"
" (M:%s VP:%d, KP:%d, SP:%d HP:%d",
nsecs_to_usecs(data->saved_latency),
entries,
@@ -1684,121 +1598,24 @@ print_trace_header(struct seq_file *m, struct trace_iterator *iter)
#else
seq_puts(m, ")\n");
#endif
- seq_puts(m, " -----------------\n");
- seq_printf(m, " | task: %.16s-%d "
+ seq_puts(m, "# -----------------\n");
+ seq_printf(m, "# | task: %.16s-%d "
"(uid:%d nice:%ld policy:%ld rt_prio:%ld)\n",
data->comm, data->pid, data->uid, data->nice,
data->policy, data->rt_priority);
- seq_puts(m, " -----------------\n");
+ seq_puts(m, "# -----------------\n");
if (data->critical_start) {
- seq_puts(m, " => started at: ");
+ seq_puts(m, "# => started at: ");
seq_print_ip_sym(&iter->seq, data->critical_start, sym_flags);
trace_print_seq(m, &iter->seq);
- seq_puts(m, "\n => ended at: ");
+ seq_puts(m, "\n# => ended at: ");
seq_print_ip_sym(&iter->seq, data->critical_end, sym_flags);
trace_print_seq(m, &iter->seq);
- seq_puts(m, "\n");
- }
-
- seq_puts(m, "\n");
-}
-
-static void
-lat_print_generic(struct trace_seq *s, struct trace_entry *entry, int cpu)
-{
- int hardirq, softirq;
- char *comm;
-
- comm = trace_find_cmdline(entry->pid);
-
- trace_seq_printf(s, "%8.8s-%-5d ", comm, entry->pid);
- trace_seq_printf(s, "%3d", cpu);
- trace_seq_printf(s, "%c%c",
- (entry->flags & TRACE_FLAG_IRQS_OFF) ? 'd' :
- (entry->flags & TRACE_FLAG_IRQS_NOSUPPORT) ? 'X' : '.',
- ((entry->flags & TRACE_FLAG_NEED_RESCHED) ? 'N' : '.'));
-
- hardirq = entry->flags & TRACE_FLAG_HARDIRQ;
- softirq = entry->flags & TRACE_FLAG_SOFTIRQ;
- if (hardirq && softirq) {
- trace_seq_putc(s, 'H');
- } else {
- if (hardirq) {
- trace_seq_putc(s, 'h');
- } else {
- if (softirq)
- trace_seq_putc(s, 's');
- else
- trace_seq_putc(s, '.');
- }
- }
-
- if (entry->preempt_count)
- trace_seq_printf(s, "%x", entry->preempt_count);
- else
- trace_seq_puts(s, ".");
-}
-
-unsigned long preempt_mark_thresh = 100;
-
-static void
-lat_print_timestamp(struct trace_seq *s, u64 abs_usecs,
- unsigned long rel_usecs)
-{
- trace_seq_printf(s, " %4lldus", abs_usecs);
- if (rel_usecs > preempt_mark_thresh)
- trace_seq_puts(s, "!: ");
- else if (rel_usecs > 1)
- trace_seq_puts(s, "+: ");
- else
- trace_seq_puts(s, " : ");
-}
-
-static const char state_to_char[] = TASK_STATE_TO_CHAR_STR;
-
-static int task_state_char(unsigned long state)
-{
- int bit = state ? __ffs(state) + 1 : 0;
-
- return bit < sizeof(state_to_char) - 1 ? state_to_char[bit] : '?';
-}
-
-/*
- * The message is supposed to contain an ending newline.
- * If the printing stops prematurely, try to add a newline of our own.
- */
-void trace_seq_print_cont(struct trace_seq *s, struct trace_iterator *iter)
-{
- struct trace_entry *ent;
- struct trace_field_cont *cont;
- bool ok = true;
-
- ent = peek_next_entry(iter, iter->cpu, NULL);
- if (!ent || ent->type != TRACE_CONT) {
- trace_seq_putc(s, '\n');
- return;
+ seq_puts(m, "#\n");
}
- do {
- cont = (struct trace_field_cont *)ent;
- if (ok)
- ok = (trace_seq_printf(s, "%s", cont->buf) > 0);
-
- ftrace_disable_cpu();
-
- if (iter->buffer_iter[iter->cpu])
- ring_buffer_read(iter->buffer_iter[iter->cpu], NULL);
- else
- ring_buffer_consume(iter->tr->buffer, iter->cpu, NULL);
-
- ftrace_enable_cpu();
-
- ent = peek_next_entry(iter, iter->cpu, NULL);
- } while (ent && ent->type == TRACE_CONT);
-
- if (!ok)
- trace_seq_putc(s, '\n');
+ seq_puts(m, "#\n");
}
static void test_cpu_buff_start(struct trace_iterator *iter)
@@ -1815,141 +1632,11 @@ static void test_cpu_buff_start(struct trace_iterator *iter)
return;
cpumask_set_cpu(iter->cpu, iter->started);
- trace_seq_printf(s, "##### CPU %u buffer started ####\n", iter->cpu);
-}
-
-static enum print_line_t
-print_lat_fmt(struct trace_iterator *iter, unsigned int trace_idx, int cpu)
-{
- struct trace_seq *s = &iter->seq;
- unsigned long sym_flags = (trace_flags & TRACE_ITER_SYM_MASK);
- struct trace_entry *next_entry;
- unsigned long verbose = (trace_flags & TRACE_ITER_VERBOSE);
- struct trace_entry *entry = iter->ent;
- unsigned long abs_usecs;
- unsigned long rel_usecs;
- u64 next_ts;
- char *comm;
- int S, T;
- int i;
-
- if (entry->type == TRACE_CONT)
- return TRACE_TYPE_HANDLED;
-
- test_cpu_buff_start(iter);
-
- next_entry = find_next_entry(iter, NULL, &next_ts);
- if (!next_entry)
- next_ts = iter->ts;
- rel_usecs = ns2usecs(next_ts - iter->ts);
- abs_usecs = ns2usecs(iter->ts - iter->tr->time_start);
-
- if (verbose) {
- comm = trace_find_cmdline(entry->pid);
- trace_seq_printf(s, "%16s %5d %3d %d %08x %08x [%08lx]"
- " %ld.%03ldms (+%ld.%03ldms): ",
- comm,
- entry->pid, cpu, entry->flags,
- entry->preempt_count, trace_idx,
- ns2usecs(iter->ts),
- abs_usecs/1000,
- abs_usecs % 1000, rel_usecs/1000,
- rel_usecs % 1000);
- } else {
- lat_print_generic(s, entry, cpu);
- lat_print_timestamp(s, abs_usecs, rel_usecs);
- }
- switch (entry->type) {
- case TRACE_FN: {
- struct ftrace_entry *field;
-
- trace_assign_type(field, entry);
-
- seq_print_ip_sym(s, field->ip, sym_flags);
- trace_seq_puts(s, " (");
- seq_print_ip_sym(s, field->parent_ip, sym_flags);
- trace_seq_puts(s, ")\n");
- break;
- }
- case TRACE_CTX:
- case TRACE_WAKE: {
- struct ctx_switch_entry *field;
-
- trace_assign_type(field, entry);
-
- T = task_state_char(field->next_state);
- S = task_state_char(field->prev_state);
- comm = trace_find_cmdline(field->next_pid);
- trace_seq_printf(s, " %5d:%3d:%c %s [%03d] %5d:%3d:%c %s\n",
- field->prev_pid,
- field->prev_prio,
- S, entry->type == TRACE_CTX ? "==>" : " +",
- field->next_cpu,
- field->next_pid,
- field->next_prio,
- T, comm);
- break;
- }
- case TRACE_SPECIAL: {
- struct special_entry *field;
-
- trace_assign_type(field, entry);
-
- trace_seq_printf(s, "# %ld %ld %ld\n",
- field->arg1,
- field->arg2,
- field->arg3);
- break;
- }
- case TRACE_STACK: {
- struct stack_entry *field;
-
- trace_assign_type(field, entry);
-
- for (i = 0; i < FTRACE_STACK_ENTRIES; i++) {
- if (i)
- trace_seq_puts(s, " <= ");
- seq_print_ip_sym(s, field->caller[i], sym_flags);
- }
- trace_seq_puts(s, "\n");
- break;
- }
- case TRACE_PRINT: {
- struct print_entry *field;
-
- trace_assign_type(field, entry);
-
- seq_print_ip_sym(s, field->ip, sym_flags);
- trace_seq_printf(s, ": %s", field->buf);
- if (entry->flags & TRACE_FLAG_CONT)
- trace_seq_print_cont(s, iter);
- break;
- }
- case TRACE_BRANCH: {
- struct trace_branch *field;
-
- trace_assign_type(field, entry);
-
- trace_seq_printf(s, "[%s] %s:%s:%d\n",
- field->correct ? " ok " : " MISS ",
- field->func,
- field->file,
- field->line);
- break;
- }
- case TRACE_USER_STACK: {
- struct userstack_entry *field;
-
- trace_assign_type(field, entry);
- seq_print_userip_objs(field, s, sym_flags);
- trace_seq_putc(s, '\n');
- break;
- }
- default:
- trace_seq_printf(s, "Unknown type %d\n", entry->type);
- }
- return TRACE_TYPE_HANDLED;
+ /* Don't print started cpu buffer for the first entry of the trace */
+ if (iter->idx > 1)
+ trace_seq_printf(s, "##### CPU %u buffer started ####\n",
+ iter->cpu);
}
static enum print_line_t print_trace_fmt(struct trace_iterator *iter)
@@ -1957,333 +1644,84 @@ static enum print_line_t print_trace_fmt(struct trace_iterator *iter)
struct trace_seq *s = &iter->seq;
unsigned long sym_flags = (trace_flags & TRACE_ITER_SYM_MASK);
struct trace_entry *entry;
- unsigned long usec_rem;
- unsigned long long t;
- unsigned long secs;
- char *comm;
- int ret;
- int S, T;
- int i;
+ struct trace_event *event;
entry = iter->ent;
- if (entry->type == TRACE_CONT)
- return TRACE_TYPE_HANDLED;
-
test_cpu_buff_start(iter);
- comm = trace_find_cmdline(iter->ent->pid);
-
- t = ns2usecs(iter->ts);
- usec_rem = do_div(t, 1000000ULL);
- secs = (unsigned long)t;
-
- ret = trace_seq_printf(s, "%16s-%-5d ", comm, entry->pid);
- if (!ret)
- return TRACE_TYPE_PARTIAL_LINE;
- ret = trace_seq_printf(s, "[%03d] ", iter->cpu);
- if (!ret)
- return TRACE_TYPE_PARTIAL_LINE;
- ret = trace_seq_printf(s, "%5lu.%06lu: ", secs, usec_rem);
- if (!ret)
- return TRACE_TYPE_PARTIAL_LINE;
-
- switch (entry->type) {
- case TRACE_FN: {
- struct ftrace_entry *field;
-
- trace_assign_type(field, entry);
-
- ret = seq_print_ip_sym(s, field->ip, sym_flags);
- if (!ret)
- return TRACE_TYPE_PARTIAL_LINE;
- if ((sym_flags & TRACE_ITER_PRINT_PARENT) &&
- field->parent_ip) {
- ret = trace_seq_printf(s, " <-");
- if (!ret)
- return TRACE_TYPE_PARTIAL_LINE;
- ret = seq_print_ip_sym(s,
- field->parent_ip,
- sym_flags);
- if (!ret)
- return TRACE_TYPE_PARTIAL_LINE;
- }
- ret = trace_seq_printf(s, "\n");
- if (!ret)
- return TRACE_TYPE_PARTIAL_LINE;
- break;
- }
- case TRACE_CTX:
- case TRACE_WAKE: {
- struct ctx_switch_entry *field;
-
- trace_assign_type(field, entry);
-
- T = task_state_char(field->next_state);
- S = task_state_char(field->prev_state);
- ret = trace_seq_printf(s, " %5d:%3d:%c %s [%03d] %5d:%3d:%c\n",
- field->prev_pid,
- field->prev_prio,
- S,
- entry->type == TRACE_CTX ? "==>" : " +",
- field->next_cpu,
- field->next_pid,
- field->next_prio,
- T);
- if (!ret)
- return TRACE_TYPE_PARTIAL_LINE;
- break;
- }
- case TRACE_SPECIAL: {
- struct special_entry *field;
+ event = ftrace_find_event(entry->type);
- trace_assign_type(field, entry);
-
- ret = trace_seq_printf(s, "# %ld %ld %ld\n",
- field->arg1,
- field->arg2,
- field->arg3);
- if (!ret)
- return TRACE_TYPE_PARTIAL_LINE;
- break;
- }
- case TRACE_STACK: {
- struct stack_entry *field;
-
- trace_assign_type(field, entry);
-
- for (i = 0; i < FTRACE_STACK_ENTRIES; i++) {
- if (i) {
- ret = trace_seq_puts(s, " <= ");
- if (!ret)
- return TRACE_TYPE_PARTIAL_LINE;
- }
- ret = seq_print_ip_sym(s, field->caller[i],
- sym_flags);
- if (!ret)
- return TRACE_TYPE_PARTIAL_LINE;
+ if (trace_flags & TRACE_ITER_CONTEXT_INFO) {
+ if (iter->iter_flags & TRACE_FILE_LAT_FMT) {
+ if (!trace_print_lat_context(iter))
+ goto partial;
+ } else {
+ if (!trace_print_context(iter))
+ goto partial;
}
- ret = trace_seq_puts(s, "\n");
- if (!ret)
- return TRACE_TYPE_PARTIAL_LINE;
- break;
- }
- case TRACE_PRINT: {
- struct print_entry *field;
-
- trace_assign_type(field, entry);
-
- seq_print_ip_sym(s, field->ip, sym_flags);
- trace_seq_printf(s, ": %s", field->buf);
- if (entry->flags & TRACE_FLAG_CONT)
- trace_seq_print_cont(s, iter);
- break;
- }
- case TRACE_GRAPH_RET: {
- return print_graph_function(iter);
}
- case TRACE_GRAPH_ENT: {
- return print_graph_function(iter);
- }
- case TRACE_BRANCH: {
- struct trace_branch *field;
-
- trace_assign_type(field, entry);
- trace_seq_printf(s, "[%s] %s:%s:%d\n",
- field->correct ? " ok " : " MISS ",
- field->func,
- field->file,
- field->line);
- break;
- }
- case TRACE_USER_STACK: {
- struct userstack_entry *field;
+ if (event)
+ return event->trace(iter, sym_flags);
- trace_assign_type(field, entry);
+ if (!trace_seq_printf(s, "Unknown type %d\n", entry->type))
+ goto partial;
- ret = seq_print_userip_objs(field, s, sym_flags);
- if (!ret)
- return TRACE_TYPE_PARTIAL_LINE;
- ret = trace_seq_putc(s, '\n');
- if (!ret)
- return TRACE_TYPE_PARTIAL_LINE;
- break;
- }
- }
return TRACE_TYPE_HANDLED;
+partial:
+ return TRACE_TYPE_PARTIAL_LINE;
}
static enum print_line_t print_raw_fmt(struct trace_iterator *iter)
{
struct trace_seq *s = &iter->seq;
struct trace_entry *entry;
- int ret;
- int S, T;
+ struct trace_event *event;
entry = iter->ent;
- if (entry->type == TRACE_CONT)
- return TRACE_TYPE_HANDLED;
-
- ret = trace_seq_printf(s, "%d %d %llu ",
- entry->pid, iter->cpu, iter->ts);
- if (!ret)
- return TRACE_TYPE_PARTIAL_LINE;
-
- switch (entry->type) {
- case TRACE_FN: {
- struct ftrace_entry *field;
-
- trace_assign_type(field, entry);
-
- ret = trace_seq_printf(s, "%x %x\n",
- field->ip,
- field->parent_ip);
- if (!ret)
- return TRACE_TYPE_PARTIAL_LINE;
- break;
+ if (trace_flags & TRACE_ITER_CONTEXT_INFO) {
+ if (!trace_seq_printf(s, "%d %d %llu ",
+ entry->pid, iter->cpu, iter->ts))
+ goto partial;
}
- case TRACE_CTX:
- case TRACE_WAKE: {
- struct ctx_switch_entry *field;
-
- trace_assign_type(field, entry);
-
- T = task_state_char(field->next_state);
- S = entry->type == TRACE_WAKE ? '+' :
- task_state_char(field->prev_state);
- ret = trace_seq_printf(s, "%d %d %c %d %d %d %c\n",
- field->prev_pid,
- field->prev_prio,
- S,
- field->next_cpu,
- field->next_pid,
- field->next_prio,
- T);
- if (!ret)
- return TRACE_TYPE_PARTIAL_LINE;
- break;
- }
- case TRACE_SPECIAL:
- case TRACE_USER_STACK:
- case TRACE_STACK: {
- struct special_entry *field;
-
- trace_assign_type(field, entry);
- ret = trace_seq_printf(s, "# %ld %ld %ld\n",
- field->arg1,
- field->arg2,
- field->arg3);
- if (!ret)
- return TRACE_TYPE_PARTIAL_LINE;
- break;
- }
- case TRACE_PRINT: {
- struct print_entry *field;
+ event = ftrace_find_event(entry->type);
+ if (event)
+ return event->raw(iter, 0);
- trace_assign_type(field, entry);
+ if (!trace_seq_printf(s, "%d ?\n", entry->type))
+ goto partial;
- trace_seq_printf(s, "# %lx %s", field->ip, field->buf);
- if (entry->flags & TRACE_FLAG_CONT)
- trace_seq_print_cont(s, iter);
- break;
- }
- }
return TRACE_TYPE_HANDLED;
+partial:
+ return TRACE_TYPE_PARTIAL_LINE;
}
-#define SEQ_PUT_FIELD_RET(s, x) \
-do { \
- if (!trace_seq_putmem(s, &(x), sizeof(x))) \
- return 0; \
-} while (0)
-
-#define SEQ_PUT_HEX_FIELD_RET(s, x) \
-do { \
- BUILD_BUG_ON(sizeof(x) > MAX_MEMHEX_BYTES); \
- if (!trace_seq_putmem_hex(s, &(x), sizeof(x))) \
- return 0; \
-} while (0)
-
static enum print_line_t print_hex_fmt(struct trace_iterator *iter)
{
struct trace_seq *s = &iter->seq;
unsigned char newline = '\n';
struct trace_entry *entry;
- int S, T;
+ struct trace_event *event;
entry = iter->ent;
- if (entry->type == TRACE_CONT)
- return TRACE_TYPE_HANDLED;
-
- SEQ_PUT_HEX_FIELD_RET(s, entry->pid);
- SEQ_PUT_HEX_FIELD_RET(s, iter->cpu);
- SEQ_PUT_HEX_FIELD_RET(s, iter->ts);
-
- switch (entry->type) {
- case TRACE_FN: {
- struct ftrace_entry *field;
-
- trace_assign_type(field, entry);
-
- SEQ_PUT_HEX_FIELD_RET(s, field->ip);
- SEQ_PUT_HEX_FIELD_RET(s, field->parent_ip);
- break;
+ if (trace_flags & TRACE_ITER_CONTEXT_INFO) {
+ SEQ_PUT_HEX_FIELD_RET(s, entry->pid);
+ SEQ_PUT_HEX_FIELD_RET(s, iter->cpu);
+ SEQ_PUT_HEX_FIELD_RET(s, iter->ts);
}
- case TRACE_CTX:
- case TRACE_WAKE: {
- struct ctx_switch_entry *field;
-
- trace_assign_type(field, entry);
-
- T = task_state_char(field->next_state);
- S = entry->type == TRACE_WAKE ? '+' :
- task_state_char(field->prev_state);
- SEQ_PUT_HEX_FIELD_RET(s, field->prev_pid);
- SEQ_PUT_HEX_FIELD_RET(s, field->prev_prio);
- SEQ_PUT_HEX_FIELD_RET(s, S);
- SEQ_PUT_HEX_FIELD_RET(s, field->next_cpu);
- SEQ_PUT_HEX_FIELD_RET(s, field->next_pid);
- SEQ_PUT_HEX_FIELD_RET(s, field->next_prio);
- SEQ_PUT_HEX_FIELD_RET(s, T);
- break;
- }
- case TRACE_SPECIAL:
- case TRACE_USER_STACK:
- case TRACE_STACK: {
- struct special_entry *field;
-
- trace_assign_type(field, entry);
- SEQ_PUT_HEX_FIELD_RET(s, field->arg1);
- SEQ_PUT_HEX_FIELD_RET(s, field->arg2);
- SEQ_PUT_HEX_FIELD_RET(s, field->arg3);
- break;
- }
+ event = ftrace_find_event(entry->type);
+ if (event) {
+ enum print_line_t ret = event->hex(iter, 0);
+ if (ret != TRACE_TYPE_HANDLED)
+ return ret;
}
- SEQ_PUT_FIELD_RET(s, newline);
-
- return TRACE_TYPE_HANDLED;
-}
-
-static enum print_line_t print_printk_msg_only(struct trace_iterator *iter)
-{
- struct trace_seq *s = &iter->seq;
- struct trace_entry *entry = iter->ent;
- struct print_entry *field;
- int ret;
-
- trace_assign_type(field, entry);
- ret = trace_seq_printf(s, field->buf);
- if (!ret)
- return TRACE_TYPE_PARTIAL_LINE;
-
- if (entry->flags & TRACE_FLAG_CONT)
- trace_seq_print_cont(s, iter);
+ SEQ_PUT_FIELD_RET(s, newline);
return TRACE_TYPE_HANDLED;
}
@@ -2292,59 +1730,37 @@ static enum print_line_t print_bin_fmt(struct trace_iterator *iter)
{
struct trace_seq *s = &iter->seq;
struct trace_entry *entry;
+ struct trace_event *event;
entry = iter->ent;
- if (entry->type == TRACE_CONT)
- return TRACE_TYPE_HANDLED;
-
- SEQ_PUT_FIELD_RET(s, entry->pid);
- SEQ_PUT_FIELD_RET(s, entry->cpu);
- SEQ_PUT_FIELD_RET(s, iter->ts);
-
- switch (entry->type) {
- case TRACE_FN: {
- struct ftrace_entry *field;
-
- trace_assign_type(field, entry);
-
- SEQ_PUT_FIELD_RET(s, field->ip);
- SEQ_PUT_FIELD_RET(s, field->parent_ip);
- break;
- }
- case TRACE_CTX: {
- struct ctx_switch_entry *field;
-
- trace_assign_type(field, entry);
-
- SEQ_PUT_FIELD_RET(s, field->prev_pid);
- SEQ_PUT_FIELD_RET(s, field->prev_prio);
- SEQ_PUT_FIELD_RET(s, field->prev_state);
- SEQ_PUT_FIELD_RET(s, field->next_pid);
- SEQ_PUT_FIELD_RET(s, field->next_prio);
- SEQ_PUT_FIELD_RET(s, field->next_state);
- break;
+ if (trace_flags & TRACE_ITER_CONTEXT_INFO) {
+ SEQ_PUT_FIELD_RET(s, entry->pid);
+ SEQ_PUT_FIELD_RET(s, iter->cpu);
+ SEQ_PUT_FIELD_RET(s, iter->ts);
}
- case TRACE_SPECIAL:
- case TRACE_USER_STACK:
- case TRACE_STACK: {
- struct special_entry *field;
-
- trace_assign_type(field, entry);
- SEQ_PUT_FIELD_RET(s, field->arg1);
- SEQ_PUT_FIELD_RET(s, field->arg2);
- SEQ_PUT_FIELD_RET(s, field->arg3);
- break;
- }
- }
- return 1;
+ event = ftrace_find_event(entry->type);
+ return event ? event->binary(iter, 0) : TRACE_TYPE_HANDLED;
}
static int trace_empty(struct trace_iterator *iter)
{
int cpu;
+ /* If we are looking at one CPU buffer, only check that one */
+ if (iter->cpu_file != TRACE_PIPE_ALL_CPU) {
+ cpu = iter->cpu_file;
+ if (iter->buffer_iter[cpu]) {
+ if (!ring_buffer_iter_empty(iter->buffer_iter[cpu]))
+ return 0;
+ } else {
+ if (!ring_buffer_empty_cpu(iter->tr->buffer, cpu))
+ return 0;
+ }
+ return 1;
+ }
+
for_each_tracing_cpu(cpu) {
if (iter->buffer_iter[cpu]) {
if (!ring_buffer_iter_empty(iter->buffer_iter[cpu]))
@@ -2368,10 +1784,15 @@ static enum print_line_t print_trace_line(struct trace_iterator *iter)
return ret;
}
+ if (iter->ent->type == TRACE_BPRINT &&
+ trace_flags & TRACE_ITER_PRINTK &&
+ trace_flags & TRACE_ITER_PRINTK_MSGONLY)
+ return trace_print_bprintk_msg_only(iter);
+
if (iter->ent->type == TRACE_PRINT &&
trace_flags & TRACE_ITER_PRINTK &&
trace_flags & TRACE_ITER_PRINTK_MSGONLY)
- return print_printk_msg_only(iter);
+ return trace_print_printk_msg_only(iter);
if (trace_flags & TRACE_ITER_BIN)
return print_bin_fmt(iter);
@@ -2382,9 +1803,6 @@ static enum print_line_t print_trace_line(struct trace_iterator *iter)
if (trace_flags & TRACE_ITER_RAW)
return print_raw_fmt(iter);
- if (iter->iter_flags & TRACE_FILE_LAT_FMT)
- return print_lat_fmt(iter, iter->idx, iter->cpu);
-
return print_trace_fmt(iter);
}
@@ -2426,30 +1844,45 @@ static struct seq_operations tracer_seq_ops = {
};
static struct trace_iterator *
-__tracing_open(struct inode *inode, struct file *file, int *ret)
+__tracing_open(struct inode *inode, struct file *file)
{
+ long cpu_file = (long) inode->i_private;
+ void *fail_ret = ERR_PTR(-ENOMEM);
struct trace_iterator *iter;
struct seq_file *m;
- int cpu;
+ int cpu, ret;
- if (tracing_disabled) {
- *ret = -ENODEV;
- return NULL;
- }
+ if (tracing_disabled)
+ return ERR_PTR(-ENODEV);
iter = kzalloc(sizeof(*iter), GFP_KERNEL);
- if (!iter) {
- *ret = -ENOMEM;
- goto out;
- }
+ if (!iter)
+ return ERR_PTR(-ENOMEM);
+ /*
+ * We make a copy of the current tracer to avoid concurrent
+ * changes on it while we are reading.
+ */
mutex_lock(&trace_types_lock);
+ iter->trace = kzalloc(sizeof(*iter->trace), GFP_KERNEL);
+ if (!iter->trace)
+ goto fail;
+
+ if (current_trace)
+ *iter->trace = *current_trace;
+
+ if (!alloc_cpumask_var(&iter->started, GFP_KERNEL))
+ goto fail;
+
+ cpumask_clear(iter->started);
+
if (current_trace && current_trace->print_max)
iter->tr = &max_tr;
else
- iter->tr = inode->i_private;
- iter->trace = current_trace;
+ iter->tr = &global_trace;
iter->pos = -1;
+ mutex_init(&iter->mutex);
+ iter->cpu_file = cpu_file;
/* Notify the tracer early; before we stop tracing. */
if (iter->trace && iter->trace->open)
@@ -2459,20 +1892,24 @@ __tracing_open(struct inode *inode, struct file *file, int *ret)
if (ring_buffer_overruns(iter->tr->buffer))
iter->iter_flags |= TRACE_FILE_ANNOTATE;
+ if (iter->cpu_file == TRACE_PIPE_ALL_CPU) {
+ for_each_tracing_cpu(cpu) {
- for_each_tracing_cpu(cpu) {
-
+ iter->buffer_iter[cpu] =
+ ring_buffer_read_start(iter->tr->buffer, cpu);
+ }
+ } else {
+ cpu = iter->cpu_file;
iter->buffer_iter[cpu] =
- ring_buffer_read_start(iter->tr->buffer, cpu);
-
- if (!iter->buffer_iter[cpu])
- goto fail_buffer;
+ ring_buffer_read_start(iter->tr->buffer, cpu);
}
/* TODO stop tracer */
- *ret = seq_open(file, &tracer_seq_ops);
- if (*ret)
+ ret = seq_open(file, &tracer_seq_ops);
+ if (ret < 0) {
+ fail_ret = ERR_PTR(ret);
goto fail_buffer;
+ }
m = file->private_data;
m->private = iter;
@@ -2482,7 +1919,6 @@ __tracing_open(struct inode *inode, struct file *file, int *ret)
mutex_unlock(&trace_types_lock);
- out:
return iter;
fail_buffer:
@@ -2490,10 +1926,13 @@ __tracing_open(struct inode *inode, struct file *file, int *ret)
if (iter->buffer_iter[cpu])
ring_buffer_read_finish(iter->buffer_iter[cpu]);
}
+ free_cpumask_var(iter->started);
+ fail:
mutex_unlock(&trace_types_lock);
+ kfree(iter->trace);
kfree(iter);
- return ERR_PTR(-ENOMEM);
+ return fail_ret;
}
int tracing_open_generic(struct inode *inode, struct file *filp)
@@ -2505,12 +1944,17 @@ int tracing_open_generic(struct inode *inode, struct file *filp)
return 0;
}
-int tracing_release(struct inode *inode, struct file *file)
+static int tracing_release(struct inode *inode, struct file *file)
{
struct seq_file *m = (struct seq_file *)file->private_data;
- struct trace_iterator *iter = m->private;
+ struct trace_iterator *iter;
int cpu;
+ if (!(file->f_mode & FMODE_READ))
+ return 0;
+
+ iter = m->private;
+
mutex_lock(&trace_types_lock);
for_each_tracing_cpu(cpu) {
if (iter->buffer_iter[cpu])
@@ -2525,33 +1969,39 @@ int tracing_release(struct inode *inode, struct file *file)
mutex_unlock(&trace_types_lock);
seq_release(inode, file);
+ mutex_destroy(&iter->mutex);
+ free_cpumask_var(iter->started);
+ kfree(iter->trace);
kfree(iter);
return 0;
}
static int tracing_open(struct inode *inode, struct file *file)
{
- int ret;
-
- __tracing_open(inode, file, &ret);
-
- return ret;
-}
-
-static int tracing_lt_open(struct inode *inode, struct file *file)
-{
struct trace_iterator *iter;
- int ret;
+ int ret = 0;
- iter = __tracing_open(inode, file, &ret);
+ /* If this file was open for write, then erase contents */
+ if ((file->f_mode & FMODE_WRITE) &&
+ !(file->f_flags & O_APPEND)) {
+ long cpu = (long) inode->i_private;
- if (!ret)
- iter->iter_flags |= TRACE_FILE_LAT_FMT;
+ if (cpu == TRACE_PIPE_ALL_CPU)
+ tracing_reset_online_cpus(&global_trace);
+ else
+ tracing_reset(&global_trace, cpu);
+ }
+ if (file->f_mode & FMODE_READ) {
+ iter = __tracing_open(inode, file);
+ if (IS_ERR(iter))
+ ret = PTR_ERR(iter);
+ else if (trace_flags & TRACE_ITER_LATENCY_FMT)
+ iter->iter_flags |= TRACE_FILE_LAT_FMT;
+ }
return ret;
}
-
static void *
t_next(struct seq_file *m, void *v, loff_t *pos)
{
@@ -2623,21 +2073,22 @@ static int show_traces_open(struct inode *inode, struct file *file)
return ret;
}
-static struct file_operations tracing_fops = {
- .open = tracing_open,
- .read = seq_read,
- .llseek = seq_lseek,
- .release = tracing_release,
-};
+static ssize_t
+tracing_write_stub(struct file *filp, const char __user *ubuf,
+ size_t count, loff_t *ppos)
+{
+ return count;
+}
-static struct file_operations tracing_lt_fops = {
- .open = tracing_lt_open,
+static const struct file_operations tracing_fops = {
+ .open = tracing_open,
.read = seq_read,
+ .write = tracing_write_stub,
.llseek = seq_lseek,
.release = tracing_release,
};
-static struct file_operations show_traces_fops = {
+static const struct file_operations show_traces_fops = {
.open = show_traces_open,
.read = seq_read,
.release = seq_release,
@@ -2730,7 +2181,7 @@ err_unlock:
return err;
}
-static struct file_operations tracing_cpumask_fops = {
+static const struct file_operations tracing_cpumask_fops = {
.open = tracing_open_generic,
.read = tracing_cpumask_read,
.write = tracing_cpumask_write,
@@ -2740,57 +2191,62 @@ static ssize_t
tracing_trace_options_read(struct file *filp, char __user *ubuf,
size_t cnt, loff_t *ppos)
{
- int i;
+ struct tracer_opt *trace_opts;
+ u32 tracer_flags;
+ int len = 0;
char *buf;
int r = 0;
- int len = 0;
- u32 tracer_flags = current_trace->flags->val;
- struct tracer_opt *trace_opts = current_trace->flags->opts;
+ int i;
- /* calulate max size */
+ /* calculate max size */
for (i = 0; trace_options[i]; i++) {
len += strlen(trace_options[i]);
- len += 3; /* "no" and space */
+ len += 3; /* "no" and newline */
}
+ mutex_lock(&trace_types_lock);
+ tracer_flags = current_trace->flags->val;
+ trace_opts = current_trace->flags->opts;
+
/*
* Increase the size with names of options specific
* of the current tracer.
*/
for (i = 0; trace_opts[i].name; i++) {
len += strlen(trace_opts[i].name);
- len += 3; /* "no" and space */
+ len += 3; /* "no" and newline */
}
/* +2 for \n and \0 */
buf = kmalloc(len + 2, GFP_KERNEL);
- if (!buf)
+ if (!buf) {
+ mutex_unlock(&trace_types_lock);
return -ENOMEM;
+ }
for (i = 0; trace_options[i]; i++) {
if (trace_flags & (1 << i))
- r += sprintf(buf + r, "%s ", trace_options[i]);
+ r += sprintf(buf + r, "%s\n", trace_options[i]);
else
- r += sprintf(buf + r, "no%s ", trace_options[i]);
+ r += sprintf(buf + r, "no%s\n", trace_options[i]);
}
for (i = 0; trace_opts[i].name; i++) {
if (tracer_flags & trace_opts[i].bit)
- r += sprintf(buf + r, "%s ",
+ r += sprintf(buf + r, "%s\n",
trace_opts[i].name);
else
- r += sprintf(buf + r, "no%s ",
+ r += sprintf(buf + r, "no%s\n",
trace_opts[i].name);
}
+ mutex_unlock(&trace_types_lock);
- r += sprintf(buf + r, "\n");
WARN_ON(r >= len + 2);
r = simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
kfree(buf);
-
return r;
}
@@ -2828,6 +2284,34 @@ static int set_tracer_option(struct tracer *trace, char *cmp, int neg)
return 0;
}
+static void set_tracer_flags(unsigned int mask, int enabled)
+{
+ /* do nothing if flag is already set */
+ if (!!(trace_flags & mask) == !!enabled)
+ return;
+
+ if (enabled)
+ trace_flags |= mask;
+ else
+ trace_flags &= ~mask;
+
+ if (mask == TRACE_ITER_GLOBAL_CLK) {
+ u64 (*func)(void);
+
+ if (enabled)
+ func = trace_clock_global;
+ else
+ func = trace_clock_local;
+
+ mutex_lock(&trace_types_lock);
+ ring_buffer_set_clock(global_trace.buffer, func);
+
+ if (max_tr.buffer)
+ ring_buffer_set_clock(max_tr.buffer, func);
+ mutex_unlock(&trace_types_lock);
+ }
+}
+
static ssize_t
tracing_trace_options_write(struct file *filp, const char __user *ubuf,
size_t cnt, loff_t *ppos)
@@ -2855,17 +2339,16 @@ tracing_trace_options_write(struct file *filp, const char __user *ubuf,
int len = strlen(trace_options[i]);
if (strncmp(cmp, trace_options[i], len) == 0) {
- if (neg)
- trace_flags &= ~(1 << i);
- else
- trace_flags |= (1 << i);
+ set_tracer_flags(1 << i, !neg);
break;
}
}
/* If no option could be set, test the specific tracer options */
if (!trace_options[i]) {
+ mutex_lock(&trace_types_lock);
ret = set_tracer_option(current_trace, cmp, neg);
+ mutex_unlock(&trace_types_lock);
if (ret)
return ret;
}
@@ -2875,7 +2358,7 @@ tracing_trace_options_write(struct file *filp, const char __user *ubuf,
return cnt;
}
-static struct file_operations tracing_iter_fops = {
+static const struct file_operations tracing_iter_fops = {
.open = tracing_open_generic,
.read = tracing_trace_options_read,
.write = tracing_trace_options_write,
@@ -2886,9 +2369,9 @@ static const char readme_msg[] =
"# mkdir /debug\n"
"# mount -t debugfs nodev /debug\n\n"
"# cat /debug/tracing/available_tracers\n"
- "wakeup preemptirqsoff preemptoff irqsoff ftrace sched_switch none\n\n"
+ "wakeup preemptirqsoff preemptoff irqsoff function sched_switch nop\n\n"
"# cat /debug/tracing/current_tracer\n"
- "none\n"
+ "nop\n"
"# echo sched_switch > /debug/tracing/current_tracer\n"
"# cat /debug/tracing/current_tracer\n"
"sched_switch\n"
@@ -2908,7 +2391,7 @@ tracing_readme_read(struct file *filp, char __user *ubuf,
readme_msg, strlen(readme_msg));
}
-static struct file_operations tracing_readme_fops = {
+static const struct file_operations tracing_readme_fops = {
.open = tracing_open_generic,
.read = tracing_readme_read,
};
@@ -2930,7 +2413,7 @@ tracing_ctrl_write(struct file *filp, const char __user *ubuf,
{
struct trace_array *tr = filp->private_data;
char buf[64];
- long val;
+ unsigned long val;
int ret;
if (cnt >= sizeof(buf))
@@ -2985,13 +2468,105 @@ tracing_set_trace_read(struct file *filp, char __user *ubuf,
return simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
}
-static int tracing_set_tracer(char *buf)
+int tracer_init(struct tracer *t, struct trace_array *tr)
+{
+ tracing_reset_online_cpus(tr);
+ return t->init(tr);
+}
+
+static int tracing_resize_ring_buffer(unsigned long size)
+{
+ int ret;
+
+ /*
+ * If kernel or user changes the size of the ring buffer
+ * we use the size that was given, and we can forget about
+ * expanding it later.
+ */
+ ring_buffer_expanded = 1;
+
+ ret = ring_buffer_resize(global_trace.buffer, size);
+ if (ret < 0)
+ return ret;
+
+ ret = ring_buffer_resize(max_tr.buffer, size);
+ if (ret < 0) {
+ int r;
+
+ r = ring_buffer_resize(global_trace.buffer,
+ global_trace.entries);
+ if (r < 0) {
+ /*
+ * AARGH! We are left with different
+ * size max buffer!!!!
+ * The max buffer is our "snapshot" buffer.
+ * When a tracer needs a snapshot (one of the
+ * latency tracers), it swaps the max buffer
+ * with the saved snap shot. We succeeded to
+ * update the size of the main buffer, but failed to
+ * update the size of the max buffer. But when we tried
+ * to reset the main buffer to the original size, we
+ * failed there too. This is very unlikely to
+ * happen, but if it does, warn and kill all
+ * tracing.
+ */
+ WARN_ON(1);
+ tracing_disabled = 1;
+ }
+ return ret;
+ }
+
+ global_trace.entries = size;
+
+ return ret;
+}
+
+/**
+ * tracing_update_buffers - used by tracing facility to expand ring buffers
+ *
+ * To save on memory when the tracing is never used on a system with it
+ * configured in. The ring buffers are set to a minimum size. But once
+ * a user starts to use the tracing facility, then they need to grow
+ * to their default size.
+ *
+ * This function is to be called when a tracer is about to be used.
+ */
+int tracing_update_buffers(void)
+{
+ int ret = 0;
+
+ mutex_lock(&trace_types_lock);
+ if (!ring_buffer_expanded)
+ ret = tracing_resize_ring_buffer(trace_buf_size);
+ mutex_unlock(&trace_types_lock);
+
+ return ret;
+}
+
+struct trace_option_dentry;
+
+static struct trace_option_dentry *
+create_trace_option_files(struct tracer *tracer);
+
+static void
+destroy_trace_option_files(struct trace_option_dentry *topts);
+
+static int tracing_set_tracer(const char *buf)
{
+ static struct trace_option_dentry *topts;
struct trace_array *tr = &global_trace;
struct tracer *t;
int ret = 0;
mutex_lock(&trace_types_lock);
+
+ if (!ring_buffer_expanded) {
+ ret = tracing_resize_ring_buffer(trace_buf_size);
+ if (ret < 0)
+ goto out;
+ ret = 0;
+ }
+
for (t = trace_types; t; t = t->next) {
if (strcmp(t->name, buf) == 0)
break;
@@ -3007,9 +2582,14 @@ static int tracing_set_tracer(char *buf)
if (current_trace && current_trace->reset)
current_trace->reset(tr);
+ destroy_trace_option_files(topts);
+
current_trace = t;
+
+ topts = create_trace_option_files(current_trace);
+
if (t->init) {
- ret = t->init(tr);
+ ret = tracer_init(t, tr);
if (ret)
goto out;
}
@@ -3072,9 +2652,9 @@ static ssize_t
tracing_max_lat_write(struct file *filp, const char __user *ubuf,
size_t cnt, loff_t *ppos)
{
- long *ptr = filp->private_data;
+ unsigned long *ptr = filp->private_data;
char buf[64];
- long val;
+ unsigned long val;
int ret;
if (cnt >= sizeof(buf))
@@ -3094,54 +2674,96 @@ tracing_max_lat_write(struct file *filp, const char __user *ubuf,
return cnt;
}
-static atomic_t tracing_reader;
-
static int tracing_open_pipe(struct inode *inode, struct file *filp)
{
+ long cpu_file = (long) inode->i_private;
struct trace_iterator *iter;
+ int ret = 0;
if (tracing_disabled)
return -ENODEV;
- /* We only allow for reader of the pipe */
- if (atomic_inc_return(&tracing_reader) != 1) {
- atomic_dec(&tracing_reader);
- return -EBUSY;
+ mutex_lock(&trace_types_lock);
+
+ /* We only allow one reader per cpu */
+ if (cpu_file == TRACE_PIPE_ALL_CPU) {
+ if (!cpumask_empty(tracing_reader_cpumask)) {
+ ret = -EBUSY;
+ goto out;
+ }
+ cpumask_setall(tracing_reader_cpumask);
+ } else {
+ if (!cpumask_test_cpu(cpu_file, tracing_reader_cpumask))
+ cpumask_set_cpu(cpu_file, tracing_reader_cpumask);
+ else {
+ ret = -EBUSY;
+ goto out;
+ }
}
/* create a buffer to store the information to pass to userspace */
iter = kzalloc(sizeof(*iter), GFP_KERNEL);
- if (!iter)
- return -ENOMEM;
+ if (!iter) {
+ ret = -ENOMEM;
+ goto out;
+ }
- if (!alloc_cpumask_var(&iter->started, GFP_KERNEL)) {
- kfree(iter);
- return -ENOMEM;
+ /*
+ * We make a copy of the current tracer to avoid concurrent
+ * changes on it while we are reading.
+ */
+ iter->trace = kmalloc(sizeof(*iter->trace), GFP_KERNEL);
+ if (!iter->trace) {
+ ret = -ENOMEM;
+ goto fail;
}
+ if (current_trace)
+ *iter->trace = *current_trace;
- mutex_lock(&trace_types_lock);
+ if (!alloc_cpumask_var(&iter->started, GFP_KERNEL)) {
+ ret = -ENOMEM;
+ goto fail;
+ }
/* trace pipe does not show start of buffer */
cpumask_setall(iter->started);
+ iter->cpu_file = cpu_file;
iter->tr = &global_trace;
- iter->trace = current_trace;
+ mutex_init(&iter->mutex);
filp->private_data = iter;
if (iter->trace->pipe_open)
iter->trace->pipe_open(iter);
+
+out:
mutex_unlock(&trace_types_lock);
+ return ret;
- return 0;
+fail:
+ kfree(iter->trace);
+ kfree(iter);
+ mutex_unlock(&trace_types_lock);
+ return ret;
}
static int tracing_release_pipe(struct inode *inode, struct file *file)
{
struct trace_iterator *iter = file->private_data;
+ mutex_lock(&trace_types_lock);
+
+ if (iter->cpu_file == TRACE_PIPE_ALL_CPU)
+ cpumask_clear(tracing_reader_cpumask);
+ else
+ cpumask_clear_cpu(iter->cpu_file, tracing_reader_cpumask);
+
+ mutex_unlock(&trace_types_lock);
+
free_cpumask_var(iter->started);
+ mutex_destroy(&iter->mutex);
+ kfree(iter->trace);
kfree(iter);
- atomic_dec(&tracing_reader);
return 0;
}
@@ -3167,67 +2789,57 @@ tracing_poll_pipe(struct file *filp, poll_table *poll_table)
}
}
-/*
- * Consumer reader.
- */
-static ssize_t
-tracing_read_pipe(struct file *filp, char __user *ubuf,
- size_t cnt, loff_t *ppos)
+
+void default_wait_pipe(struct trace_iterator *iter)
{
- struct trace_iterator *iter = filp->private_data;
- ssize_t sret;
+ DEFINE_WAIT(wait);
- /* return any leftover data */
- sret = trace_seq_to_user(&iter->seq, ubuf, cnt);
- if (sret != -EBUSY)
- return sret;
+ prepare_to_wait(&trace_wait, &wait, TASK_INTERRUPTIBLE);
+
+ if (trace_empty(iter))
+ schedule();
- trace_seq_reset(&iter->seq);
+ finish_wait(&trace_wait, &wait);
+}
- mutex_lock(&trace_types_lock);
- if (iter->trace->read) {
- sret = iter->trace->read(iter, filp, ubuf, cnt, ppos);
- if (sret)
- goto out;
- }
+/*
+ * This is a make-shift waitqueue.
+ * A tracer might use this callback on some rare cases:
+ *
+ * 1) the current tracer might hold the runqueue lock when it wakes up
+ * a reader, hence a deadlock (sched, function, and function graph tracers)
+ * 2) the function tracers, trace all functions, we don't want
+ * the overhead of calling wake_up and friends
+ * (and tracing them too)
+ *
+ * Anyway, this is really very primitive wakeup.
+ */
+void poll_wait_pipe(struct trace_iterator *iter)
+{
+ set_current_state(TASK_INTERRUPTIBLE);
+ /* sleep for 100 msecs, and try again. */
+ schedule_timeout(HZ / 10);
+}
+
+/* Must be called with trace_types_lock mutex held. */
+static int tracing_wait_pipe(struct file *filp)
+{
+ struct trace_iterator *iter = filp->private_data;
-waitagain:
- sret = 0;
while (trace_empty(iter)) {
if ((filp->f_flags & O_NONBLOCK)) {
- sret = -EAGAIN;
- goto out;
+ return -EAGAIN;
}
- /*
- * This is a make-shift waitqueue. The reason we don't use
- * an actual wait queue is because:
- * 1) we only ever have one waiter
- * 2) the tracing, traces all functions, we don't want
- * the overhead of calling wake_up and friends
- * (and tracing them too)
- * Anyway, this is really very primitive wakeup.
- */
- set_current_state(TASK_INTERRUPTIBLE);
- iter->tr->waiter = current;
-
- mutex_unlock(&trace_types_lock);
-
- /* sleep for 100 msecs, and try again. */
- schedule_timeout(HZ/10);
-
- mutex_lock(&trace_types_lock);
+ mutex_unlock(&iter->mutex);
- iter->tr->waiter = NULL;
+ iter->trace->wait_pipe(iter);
- if (signal_pending(current)) {
- sret = -EINTR;
- goto out;
- }
+ mutex_lock(&iter->mutex);
- if (iter->trace != current_trace)
- goto out;
+ if (signal_pending(current))
+ return -EINTR;
/*
* We block until we read something and tracing is disabled.
@@ -3240,13 +2852,59 @@ waitagain:
*/
if (!tracer_enabled && iter->pos)
break;
+ }
+
+ return 1;
+}
+
+/*
+ * Consumer reader.
+ */
+static ssize_t
+tracing_read_pipe(struct file *filp, char __user *ubuf,
+ size_t cnt, loff_t *ppos)
+{
+ struct trace_iterator *iter = filp->private_data;
+ static struct tracer *old_tracer;
+ ssize_t sret;
+
+ /* return any leftover data */
+ sret = trace_seq_to_user(&iter->seq, ubuf, cnt);
+ if (sret != -EBUSY)
+ return sret;
+
+ trace_seq_init(&iter->seq);
+
+ /* copy the tracer to avoid using a global lock all around */
+ mutex_lock(&trace_types_lock);
+ if (unlikely(old_tracer != current_trace && current_trace)) {
+ old_tracer = current_trace;
+ *iter->trace = *current_trace;
+ }
+ mutex_unlock(&trace_types_lock);
- continue;
+ /*
+ * Avoid more than one consumer on a single file descriptor
+ * This is just a matter of traces coherency, the ring buffer itself
+ * is protected.
+ */
+ mutex_lock(&iter->mutex);
+ if (iter->trace->read) {
+ sret = iter->trace->read(iter, filp, ubuf, cnt, ppos);
+ if (sret)
+ goto out;
}
+waitagain:
+ sret = tracing_wait_pipe(filp);
+ if (sret <= 0)
+ goto out;
+
/* stop when tracing is finished */
- if (trace_empty(iter))
+ if (trace_empty(iter)) {
+ sret = 0;
goto out;
+ }
if (cnt >= PAGE_SIZE)
cnt = PAGE_SIZE - 1;
@@ -3267,8 +2925,8 @@ waitagain:
iter->seq.len = len;
break;
}
-
- trace_consume(iter);
+ if (ret != TRACE_TYPE_NO_CONSUME)
+ trace_consume(iter);
if (iter->seq.len >= cnt)
break;
@@ -3277,7 +2935,7 @@ waitagain:
/* Now copy what we have to the user */
sret = trace_seq_to_user(&iter->seq, ubuf, cnt);
if (iter->seq.readpos >= iter->seq.len)
- trace_seq_reset(&iter->seq);
+ trace_seq_init(&iter->seq);
/*
* If there was nothing to send to user, inspite of consuming trace
@@ -3287,20 +2945,165 @@ waitagain:
goto waitagain;
out:
- mutex_unlock(&trace_types_lock);
+ mutex_unlock(&iter->mutex);
return sret;
}
+static void tracing_pipe_buf_release(struct pipe_inode_info *pipe,
+ struct pipe_buffer *buf)
+{
+ __free_page(buf->page);
+}
+
+static void tracing_spd_release_pipe(struct splice_pipe_desc *spd,
+ unsigned int idx)
+{
+ __free_page(spd->pages[idx]);
+}
+
+static struct pipe_buf_operations tracing_pipe_buf_ops = {
+ .can_merge = 0,
+ .map = generic_pipe_buf_map,
+ .unmap = generic_pipe_buf_unmap,
+ .confirm = generic_pipe_buf_confirm,
+ .release = tracing_pipe_buf_release,
+ .steal = generic_pipe_buf_steal,
+ .get = generic_pipe_buf_get,
+};
+
+static size_t
+tracing_fill_pipe_page(size_t rem, struct trace_iterator *iter)
+{
+ size_t count;
+ int ret;
+
+ /* Seq buffer is page-sized, exactly what we need. */
+ for (;;) {
+ count = iter->seq.len;
+ ret = print_trace_line(iter);
+ count = iter->seq.len - count;
+ if (rem < count) {
+ rem = 0;
+ iter->seq.len -= count;
+ break;
+ }
+ if (ret == TRACE_TYPE_PARTIAL_LINE) {
+ iter->seq.len -= count;
+ break;
+ }
+
+ trace_consume(iter);
+ rem -= count;
+ if (!find_next_entry_inc(iter)) {
+ rem = 0;
+ iter->ent = NULL;
+ break;
+ }
+ }
+
+ return rem;
+}
+
+static ssize_t tracing_splice_read_pipe(struct file *filp,
+ loff_t *ppos,
+ struct pipe_inode_info *pipe,
+ size_t len,
+ unsigned int flags)
+{
+ struct page *pages[PIPE_BUFFERS];
+ struct partial_page partial[PIPE_BUFFERS];
+ struct trace_iterator *iter = filp->private_data;
+ struct splice_pipe_desc spd = {
+ .pages = pages,
+ .partial = partial,
+ .nr_pages = 0, /* This gets updated below. */
+ .flags = flags,
+ .ops = &tracing_pipe_buf_ops,
+ .spd_release = tracing_spd_release_pipe,
+ };
+ static struct tracer *old_tracer;
+ ssize_t ret;
+ size_t rem;
+ unsigned int i;
+
+ /* copy the tracer to avoid using a global lock all around */
+ mutex_lock(&trace_types_lock);
+ if (unlikely(old_tracer != current_trace && current_trace)) {
+ old_tracer = current_trace;
+ *iter->trace = *current_trace;
+ }
+ mutex_unlock(&trace_types_lock);
+
+ mutex_lock(&iter->mutex);
+
+ if (iter->trace->splice_read) {
+ ret = iter->trace->splice_read(iter, filp,
+ ppos, pipe, len, flags);
+ if (ret)
+ goto out_err;
+ }
+
+ ret = tracing_wait_pipe(filp);
+ if (ret <= 0)
+ goto out_err;
+
+ if (!iter->ent && !find_next_entry_inc(iter)) {
+ ret = -EFAULT;
+ goto out_err;
+ }
+
+ /* Fill as many pages as possible. */
+ for (i = 0, rem = len; i < PIPE_BUFFERS && rem; i++) {
+ pages[i] = alloc_page(GFP_KERNEL);
+ if (!pages[i])
+ break;
+
+ rem = tracing_fill_pipe_page(rem, iter);
+
+ /* Copy the data into the page, so we can start over. */
+ ret = trace_seq_to_buffer(&iter->seq,
+ page_address(pages[i]),
+ iter->seq.len);
+ if (ret < 0) {
+ __free_page(pages[i]);
+ break;
+ }
+ partial[i].offset = 0;
+ partial[i].len = iter->seq.len;
+
+ trace_seq_init(&iter->seq);
+ }
+
+ mutex_unlock(&iter->mutex);
+
+ spd.nr_pages = i;
+
+ return splice_to_pipe(pipe, &spd);
+
+out_err:
+ mutex_unlock(&iter->mutex);
+
+ return ret;
+}
+
static ssize_t
tracing_entries_read(struct file *filp, char __user *ubuf,
size_t cnt, loff_t *ppos)
{
struct trace_array *tr = filp->private_data;
- char buf[64];
+ char buf[96];
int r;
- r = sprintf(buf, "%lu\n", tr->entries >> 10);
+ mutex_lock(&trace_types_lock);
+ if (!ring_buffer_expanded)
+ r = sprintf(buf, "%lu (expanded: %lu)\n",
+ tr->entries >> 10,
+ trace_buf_size >> 10);
+ else
+ r = sprintf(buf, "%lu\n", tr->entries >> 10);
+ mutex_unlock(&trace_types_lock);
+
return simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
}
@@ -3344,28 +3147,11 @@ tracing_entries_write(struct file *filp, const char __user *ubuf,
val <<= 10;
if (val != global_trace.entries) {
- ret = ring_buffer_resize(global_trace.buffer, val);
- if (ret < 0) {
- cnt = ret;
- goto out;
- }
-
- ret = ring_buffer_resize(max_tr.buffer, val);
+ ret = tracing_resize_ring_buffer(val);
if (ret < 0) {
- int r;
cnt = ret;
- r = ring_buffer_resize(global_trace.buffer,
- global_trace.entries);
- if (r < 0) {
- /* AARGH! We are left with different
- * size max buffer!!!! */
- WARN_ON(1);
- tracing_disabled = 1;
- }
goto out;
}
-
- global_trace.entries = val;
}
filp->f_pos += cnt;
@@ -3393,7 +3179,7 @@ static int mark_printk(const char *fmt, ...)
int ret;
va_list args;
va_start(args, fmt);
- ret = trace_vprintk(0, -1, fmt, args);
+ ret = trace_vprintk(0, fmt, args);
va_end(args);
return ret;
}
@@ -3433,42 +3219,295 @@ tracing_mark_write(struct file *filp, const char __user *ubuf,
return cnt;
}
-static struct file_operations tracing_max_lat_fops = {
+static const struct file_operations tracing_max_lat_fops = {
.open = tracing_open_generic,
.read = tracing_max_lat_read,
.write = tracing_max_lat_write,
};
-static struct file_operations tracing_ctrl_fops = {
+static const struct file_operations tracing_ctrl_fops = {
.open = tracing_open_generic,
.read = tracing_ctrl_read,
.write = tracing_ctrl_write,
};
-static struct file_operations set_tracer_fops = {
+static const struct file_operations set_tracer_fops = {
.open = tracing_open_generic,
.read = tracing_set_trace_read,
.write = tracing_set_trace_write,
};
-static struct file_operations tracing_pipe_fops = {
+static const struct file_operations tracing_pipe_fops = {
.open = tracing_open_pipe,
.poll = tracing_poll_pipe,
.read = tracing_read_pipe,
+ .splice_read = tracing_splice_read_pipe,
.release = tracing_release_pipe,
};
-static struct file_operations tracing_entries_fops = {
+static const struct file_operations tracing_entries_fops = {
.open = tracing_open_generic,
.read = tracing_entries_read,
.write = tracing_entries_write,
};
-static struct file_operations tracing_mark_fops = {
+static const struct file_operations tracing_mark_fops = {
.open = tracing_open_generic,
.write = tracing_mark_write,
};
+struct ftrace_buffer_info {
+ struct trace_array *tr;
+ void *spare;
+ int cpu;
+ unsigned int read;
+};
+
+static int tracing_buffers_open(struct inode *inode, struct file *filp)
+{
+ int cpu = (int)(long)inode->i_private;
+ struct ftrace_buffer_info *info;
+
+ if (tracing_disabled)
+ return -ENODEV;
+
+ info = kzalloc(sizeof(*info), GFP_KERNEL);
+ if (!info)
+ return -ENOMEM;
+
+ info->tr = &global_trace;
+ info->cpu = cpu;
+ info->spare = NULL;
+ /* Force reading ring buffer for first read */
+ info->read = (unsigned int)-1;
+
+ filp->private_data = info;
+
+ return nonseekable_open(inode, filp);
+}
+
+static ssize_t
+tracing_buffers_read(struct file *filp, char __user *ubuf,
+ size_t count, loff_t *ppos)
+{
+ struct ftrace_buffer_info *info = filp->private_data;
+ unsigned int pos;
+ ssize_t ret;
+ size_t size;
+
+ if (!count)
+ return 0;
+
+ if (!info->spare)
+ info->spare = ring_buffer_alloc_read_page(info->tr->buffer);
+ if (!info->spare)
+ return -ENOMEM;
+
+ /* Do we have previous read data to read? */
+ if (info->read < PAGE_SIZE)
+ goto read;
+
+ info->read = 0;
+
+ ret = ring_buffer_read_page(info->tr->buffer,
+ &info->spare,
+ count,
+ info->cpu, 0);
+ if (ret < 0)
+ return 0;
+
+ pos = ring_buffer_page_len(info->spare);
+
+ if (pos < PAGE_SIZE)
+ memset(info->spare + pos, 0, PAGE_SIZE - pos);
+
+read:
+ size = PAGE_SIZE - info->read;
+ if (size > count)
+ size = count;
+
+ ret = copy_to_user(ubuf, info->spare + info->read, size);
+ if (ret == size)
+ return -EFAULT;
+ size -= ret;
+
+ *ppos += size;
+ info->read += size;
+
+ return size;
+}
+
+static int tracing_buffers_release(struct inode *inode, struct file *file)
+{
+ struct ftrace_buffer_info *info = file->private_data;
+
+ if (info->spare)
+ ring_buffer_free_read_page(info->tr->buffer, info->spare);
+ kfree(info);
+
+ return 0;
+}
+
+struct buffer_ref {
+ struct ring_buffer *buffer;
+ void *page;
+ int ref;
+};
+
+static void buffer_pipe_buf_release(struct pipe_inode_info *pipe,
+ struct pipe_buffer *buf)
+{
+ struct buffer_ref *ref = (struct buffer_ref *)buf->private;
+
+ if (--ref->ref)
+ return;
+
+ ring_buffer_free_read_page(ref->buffer, ref->page);
+ kfree(ref);
+ buf->private = 0;
+}
+
+static int buffer_pipe_buf_steal(struct pipe_inode_info *pipe,
+ struct pipe_buffer *buf)
+{
+ return 1;
+}
+
+static void buffer_pipe_buf_get(struct pipe_inode_info *pipe,
+ struct pipe_buffer *buf)
+{
+ struct buffer_ref *ref = (struct buffer_ref *)buf->private;
+
+ ref->ref++;
+}
+
+/* Pipe buffer operations for a buffer. */
+static struct pipe_buf_operations buffer_pipe_buf_ops = {
+ .can_merge = 0,
+ .map = generic_pipe_buf_map,
+ .unmap = generic_pipe_buf_unmap,
+ .confirm = generic_pipe_buf_confirm,
+ .release = buffer_pipe_buf_release,
+ .steal = buffer_pipe_buf_steal,
+ .get = buffer_pipe_buf_get,
+};
+
+/*
+ * Callback from splice_to_pipe(), if we need to release some pages
+ * at the end of the spd in case we error'ed out in filling the pipe.
+ */
+static void buffer_spd_release(struct splice_pipe_desc *spd, unsigned int i)
+{
+ struct buffer_ref *ref =
+ (struct buffer_ref *)spd->partial[i].private;
+
+ if (--ref->ref)
+ return;
+
+ ring_buffer_free_read_page(ref->buffer, ref->page);
+ kfree(ref);
+ spd->partial[i].private = 0;
+}
+
+static ssize_t
+tracing_buffers_splice_read(struct file *file, loff_t *ppos,
+ struct pipe_inode_info *pipe, size_t len,
+ unsigned int flags)
+{
+ struct ftrace_buffer_info *info = file->private_data;
+ struct partial_page partial[PIPE_BUFFERS];
+ struct page *pages[PIPE_BUFFERS];
+ struct splice_pipe_desc spd = {
+ .pages = pages,
+ .partial = partial,
+ .flags = flags,
+ .ops = &buffer_pipe_buf_ops,
+ .spd_release = buffer_spd_release,
+ };
+ struct buffer_ref *ref;
+ int size, i;
+ size_t ret;
+
+ if (*ppos & (PAGE_SIZE - 1)) {
+ WARN_ONCE(1, "Ftrace: previous read must page-align\n");
+ return -EINVAL;
+ }
+
+ if (len & (PAGE_SIZE - 1)) {
+ WARN_ONCE(1, "Ftrace: splice_read should page-align\n");
+ if (len < PAGE_SIZE)
+ return -EINVAL;
+ len &= PAGE_MASK;
+ }
+
+ for (i = 0; i < PIPE_BUFFERS && len; i++, len -= PAGE_SIZE) {
+ struct page *page;
+ int r;
+
+ ref = kzalloc(sizeof(*ref), GFP_KERNEL);
+ if (!ref)
+ break;
+
+ ref->ref = 1;
+ ref->buffer = info->tr->buffer;
+ ref->page = ring_buffer_alloc_read_page(ref->buffer);
+ if (!ref->page) {
+ kfree(ref);
+ break;
+ }
+
+ r = ring_buffer_read_page(ref->buffer, &ref->page,
+ len, info->cpu, 0);
+ if (r < 0) {
+ ring_buffer_free_read_page(ref->buffer,
+ ref->page);
+ kfree(ref);
+ break;
+ }
+
+ /*
+ * zero out any left over data, this is going to
+ * user land.
+ */
+ size = ring_buffer_page_len(ref->page);
+ if (size < PAGE_SIZE)
+ memset(ref->page + size, 0, PAGE_SIZE - size);
+
+ page = virt_to_page(ref->page);
+
+ spd.pages[i] = page;
+ spd.partial[i].len = PAGE_SIZE;
+ spd.partial[i].offset = 0;
+ spd.partial[i].private = (unsigned long)ref;
+ spd.nr_pages++;
+ *ppos += PAGE_SIZE;
+ }
+
+ spd.nr_pages = i;
+
+ /* did we read anything? */
+ if (!spd.nr_pages) {
+ if (flags & SPLICE_F_NONBLOCK)
+ ret = -EAGAIN;
+ else
+ ret = 0;
+ /* TODO: block */
+ return ret;
+ }
+
+ ret = splice_to_pipe(pipe, &spd);
+
+ return ret;
+}
+
+static const struct file_operations tracing_buffers_fops = {
+ .open = tracing_buffers_open,
+ .read = tracing_buffers_read,
+ .release = tracing_buffers_release,
+ .splice_read = tracing_buffers_splice_read,
+ .llseek = no_llseek,
+};
+
#ifdef CONFIG_DYNAMIC_FTRACE
int __weak ftrace_arch_read_dyn_info(char *buf, int size)
@@ -3500,7 +3539,7 @@ tracing_read_dyn_info(struct file *filp, char __user *ubuf,
return r;
}
-static struct file_operations tracing_dyn_info_fops = {
+static const struct file_operations tracing_dyn_info_fops = {
.open = tracing_open_generic,
.read = tracing_read_dyn_info,
};
@@ -3515,6 +3554,9 @@ struct dentry *tracing_init_dentry(void)
if (d_tracer)
return d_tracer;
+ if (!debugfs_initialized())
+ return NULL;
+
d_tracer = debugfs_create_dir("tracing", NULL);
if (!d_tracer && !once) {
@@ -3526,15 +3568,350 @@ struct dentry *tracing_init_dentry(void)
return d_tracer;
}
+static struct dentry *d_percpu;
+
+struct dentry *tracing_dentry_percpu(void)
+{
+ static int once;
+ struct dentry *d_tracer;
+
+ if (d_percpu)
+ return d_percpu;
+
+ d_tracer = tracing_init_dentry();
+
+ if (!d_tracer)
+ return NULL;
+
+ d_percpu = debugfs_create_dir("per_cpu", d_tracer);
+
+ if (!d_percpu && !once) {
+ once = 1;
+ pr_warning("Could not create debugfs directory 'per_cpu'\n");
+ return NULL;
+ }
+
+ return d_percpu;
+}
+
+static void tracing_init_debugfs_percpu(long cpu)
+{
+ struct dentry *d_percpu = tracing_dentry_percpu();
+ struct dentry *entry, *d_cpu;
+ /* strlen(cpu) + MAX(log10(cpu)) + '\0' */
+ char cpu_dir[7];
+
+ if (cpu > 999 || cpu < 0)
+ return;
+
+ sprintf(cpu_dir, "cpu%ld", cpu);
+ d_cpu = debugfs_create_dir(cpu_dir, d_percpu);
+ if (!d_cpu) {
+ pr_warning("Could not create debugfs '%s' entry\n", cpu_dir);
+ return;
+ }
+
+ /* per cpu trace_pipe */
+ entry = debugfs_create_file("trace_pipe", 0444, d_cpu,
+ (void *) cpu, &tracing_pipe_fops);
+ if (!entry)
+ pr_warning("Could not create debugfs 'trace_pipe' entry\n");
+
+ /* per cpu trace */
+ entry = debugfs_create_file("trace", 0644, d_cpu,
+ (void *) cpu, &tracing_fops);
+ if (!entry)
+ pr_warning("Could not create debugfs 'trace' entry\n");
+
+ entry = debugfs_create_file("trace_pipe_raw", 0444, d_cpu,
+ (void *) cpu, &tracing_buffers_fops);
+ if (!entry)
+ pr_warning("Could not create debugfs 'trace_pipe_raw' entry\n");
+}
+
#ifdef CONFIG_FTRACE_SELFTEST
/* Let selftest have access to static functions in this file */
#include "trace_selftest.c"
#endif
+struct trace_option_dentry {
+ struct tracer_opt *opt;
+ struct tracer_flags *flags;
+ struct dentry *entry;
+};
+
+static ssize_t
+trace_options_read(struct file *filp, char __user *ubuf, size_t cnt,
+ loff_t *ppos)
+{
+ struct trace_option_dentry *topt = filp->private_data;
+ char *buf;
+
+ if (topt->flags->val & topt->opt->bit)
+ buf = "1\n";
+ else
+ buf = "0\n";
+
+ return simple_read_from_buffer(ubuf, cnt, ppos, buf, 2);
+}
+
+static ssize_t
+trace_options_write(struct file *filp, const char __user *ubuf, size_t cnt,
+ loff_t *ppos)
+{
+ struct trace_option_dentry *topt = filp->private_data;
+ unsigned long val;
+ char buf[64];
+ int ret;
+
+ if (cnt >= sizeof(buf))
+ return -EINVAL;
+
+ if (copy_from_user(&buf, ubuf, cnt))
+ return -EFAULT;
+
+ buf[cnt] = 0;
+
+ ret = strict_strtoul(buf, 10, &val);
+ if (ret < 0)
+ return ret;
+
+ ret = 0;
+ switch (val) {
+ case 0:
+ /* do nothing if already cleared */
+ if (!(topt->flags->val & topt->opt->bit))
+ break;
+
+ mutex_lock(&trace_types_lock);
+ if (current_trace->set_flag)
+ ret = current_trace->set_flag(topt->flags->val,
+ topt->opt->bit, 0);
+ mutex_unlock(&trace_types_lock);
+ if (ret)
+ return ret;
+ topt->flags->val &= ~topt->opt->bit;
+ break;
+ case 1:
+ /* do nothing if already set */
+ if (topt->flags->val & topt->opt->bit)
+ break;
+
+ mutex_lock(&trace_types_lock);
+ if (current_trace->set_flag)
+ ret = current_trace->set_flag(topt->flags->val,
+ topt->opt->bit, 1);
+ mutex_unlock(&trace_types_lock);
+ if (ret)
+ return ret;
+ topt->flags->val |= topt->opt->bit;
+ break;
+
+ default:
+ return -EINVAL;
+ }
+
+ *ppos += cnt;
+
+ return cnt;
+}
+
+
+static const struct file_operations trace_options_fops = {
+ .open = tracing_open_generic,
+ .read = trace_options_read,
+ .write = trace_options_write,
+};
+
+static ssize_t
+trace_options_core_read(struct file *filp, char __user *ubuf, size_t cnt,
+ loff_t *ppos)
+{
+ long index = (long)filp->private_data;
+ char *buf;
+
+ if (trace_flags & (1 << index))
+ buf = "1\n";
+ else
+ buf = "0\n";
+
+ return simple_read_from_buffer(ubuf, cnt, ppos, buf, 2);
+}
+
+static ssize_t
+trace_options_core_write(struct file *filp, const char __user *ubuf, size_t cnt,
+ loff_t *ppos)
+{
+ long index = (long)filp->private_data;
+ char buf[64];
+ unsigned long val;
+ int ret;
+
+ if (cnt >= sizeof(buf))
+ return -EINVAL;
+
+ if (copy_from_user(&buf, ubuf, cnt))
+ return -EFAULT;
+
+ buf[cnt] = 0;
+
+ ret = strict_strtoul(buf, 10, &val);
+ if (ret < 0)
+ return ret;
+
+ switch (val) {
+ case 0:
+ trace_flags &= ~(1 << index);
+ break;
+ case 1:
+ trace_flags |= 1 << index;
+ break;
+
+ default:
+ return -EINVAL;
+ }
+
+ *ppos += cnt;
+
+ return cnt;
+}
+
+static const struct file_operations trace_options_core_fops = {
+ .open = tracing_open_generic,
+ .read = trace_options_core_read,
+ .write = trace_options_core_write,
+};
+
+static struct dentry *trace_options_init_dentry(void)
+{
+ struct dentry *d_tracer;
+ static struct dentry *t_options;
+
+ if (t_options)
+ return t_options;
+
+ d_tracer = tracing_init_dentry();
+ if (!d_tracer)
+ return NULL;
+
+ t_options = debugfs_create_dir("options", d_tracer);
+ if (!t_options) {
+ pr_warning("Could not create debugfs directory 'options'\n");
+ return NULL;
+ }
+
+ return t_options;
+}
+
+static void
+create_trace_option_file(struct trace_option_dentry *topt,
+ struct tracer_flags *flags,
+ struct tracer_opt *opt)
+{
+ struct dentry *t_options;
+ struct dentry *entry;
+
+ t_options = trace_options_init_dentry();
+ if (!t_options)
+ return;
+
+ topt->flags = flags;
+ topt->opt = opt;
+
+ entry = debugfs_create_file(opt->name, 0644, t_options, topt,
+ &trace_options_fops);
+
+ topt->entry = entry;
+
+}
+
+static struct trace_option_dentry *
+create_trace_option_files(struct tracer *tracer)
+{
+ struct trace_option_dentry *topts;
+ struct tracer_flags *flags;
+ struct tracer_opt *opts;
+ int cnt;
+
+ if (!tracer)
+ return NULL;
+
+ flags = tracer->flags;
+
+ if (!flags || !flags->opts)
+ return NULL;
+
+ opts = flags->opts;
+
+ for (cnt = 0; opts[cnt].name; cnt++)
+ ;
+
+ topts = kcalloc(cnt + 1, sizeof(*topts), GFP_KERNEL);
+ if (!topts)
+ return NULL;
+
+ for (cnt = 0; opts[cnt].name; cnt++)
+ create_trace_option_file(&topts[cnt], flags,
+ &opts[cnt]);
+
+ return topts;
+}
+
+static void
+destroy_trace_option_files(struct trace_option_dentry *topts)
+{
+ int cnt;
+
+ if (!topts)
+ return;
+
+ for (cnt = 0; topts[cnt].opt; cnt++) {
+ if (topts[cnt].entry)
+ debugfs_remove(topts[cnt].entry);
+ }
+
+ kfree(topts);
+}
+
+static struct dentry *
+create_trace_option_core_file(const char *option, long index)
+{
+ struct dentry *t_options;
+ struct dentry *entry;
+
+ t_options = trace_options_init_dentry();
+ if (!t_options)
+ return NULL;
+
+ entry = debugfs_create_file(option, 0644, t_options, (void *)index,
+ &trace_options_core_fops);
+
+ return entry;
+}
+
+static __init void create_trace_options_dir(void)
+{
+ struct dentry *t_options;
+ struct dentry *entry;
+ int i;
+
+ t_options = trace_options_init_dentry();
+ if (!t_options)
+ return;
+
+ for (i = 0; trace_options[i]; i++) {
+ entry = create_trace_option_core_file(trace_options[i], i);
+ if (!entry)
+ pr_warning("Could not create debugfs %s entry\n",
+ trace_options[i]);
+ }
+}
+
static __init int tracer_init_debugfs(void)
{
struct dentry *d_tracer;
struct dentry *entry;
+ int cpu;
d_tracer = tracing_init_dentry();
@@ -3548,18 +3925,15 @@ static __init int tracer_init_debugfs(void)
if (!entry)
pr_warning("Could not create debugfs 'trace_options' entry\n");
+ create_trace_options_dir();
+
entry = debugfs_create_file("tracing_cpumask", 0644, d_tracer,
NULL, &tracing_cpumask_fops);
if (!entry)
pr_warning("Could not create debugfs 'tracing_cpumask' entry\n");
- entry = debugfs_create_file("latency_trace", 0444, d_tracer,
- &global_trace, &tracing_lt_fops);
- if (!entry)
- pr_warning("Could not create debugfs 'latency_trace' entry\n");
-
- entry = debugfs_create_file("trace", 0444, d_tracer,
- &global_trace, &tracing_fops);
+ entry = debugfs_create_file("trace", 0644, d_tracer,
+ (void *) TRACE_PIPE_ALL_CPU, &tracing_fops);
if (!entry)
pr_warning("Could not create debugfs 'trace' entry\n");
@@ -3590,8 +3964,8 @@ static __init int tracer_init_debugfs(void)
if (!entry)
pr_warning("Could not create debugfs 'README' entry\n");
- entry = debugfs_create_file("trace_pipe", 0644, d_tracer,
- NULL, &tracing_pipe_fops);
+ entry = debugfs_create_file("trace_pipe", 0444, d_tracer,
+ (void *) TRACE_PIPE_ALL_CPU, &tracing_pipe_fops);
if (!entry)
pr_warning("Could not create debugfs "
"'trace_pipe' entry\n");
@@ -3619,77 +3993,12 @@ static __init int tracer_init_debugfs(void)
#ifdef CONFIG_SYSPROF_TRACER
init_tracer_sysprof_debugfs(d_tracer);
#endif
- return 0;
-}
-
-int trace_vprintk(unsigned long ip, int depth, const char *fmt, va_list args)
-{
- static DEFINE_SPINLOCK(trace_buf_lock);
- static char trace_buf[TRACE_BUF_SIZE];
-
- struct ring_buffer_event *event;
- struct trace_array *tr = &global_trace;
- struct trace_array_cpu *data;
- int cpu, len = 0, size, pc;
- struct print_entry *entry;
- unsigned long irq_flags;
-
- if (tracing_disabled || tracing_selftest_running)
- return 0;
-
- pc = preempt_count();
- preempt_disable_notrace();
- cpu = raw_smp_processor_id();
- data = tr->data[cpu];
-
- if (unlikely(atomic_read(&data->disabled)))
- goto out;
-
- pause_graph_tracing();
- spin_lock_irqsave(&trace_buf_lock, irq_flags);
- len = vsnprintf(trace_buf, TRACE_BUF_SIZE, fmt, args);
-
- len = min(len, TRACE_BUF_SIZE-1);
- trace_buf[len] = 0;
-
- size = sizeof(*entry) + len + 1;
- event = ring_buffer_lock_reserve(tr->buffer, size, &irq_flags);
- if (!event)
- goto out_unlock;
- entry = ring_buffer_event_data(event);
- tracing_generic_entry_update(&entry->ent, irq_flags, pc);
- entry->ent.type = TRACE_PRINT;
- entry->ip = ip;
- entry->depth = depth;
-
- memcpy(&entry->buf, trace_buf, len);
- entry->buf[len] = 0;
- ring_buffer_unlock_commit(tr->buffer, event, irq_flags);
-
- out_unlock:
- spin_unlock_irqrestore(&trace_buf_lock, irq_flags);
- unpause_graph_tracing();
- out:
- preempt_enable_notrace();
-
- return len;
-}
-EXPORT_SYMBOL_GPL(trace_vprintk);
-int __ftrace_printk(unsigned long ip, const char *fmt, ...)
-{
- int ret;
- va_list ap;
-
- if (!(trace_flags & TRACE_ITER_PRINTK))
- return 0;
+ for_each_tracing_cpu(cpu)
+ tracing_init_debugfs_percpu(cpu);
- va_start(ap, fmt);
- ret = trace_vprintk(ip, task_curr_ret_stack(current), fmt, ap);
- va_end(ap);
- return ret;
+ return 0;
}
-EXPORT_SYMBOL_GPL(__ftrace_printk);
static int trace_panic_handler(struct notifier_block *this,
unsigned long event, void *unused)
@@ -3750,14 +4059,15 @@ trace_printk_seq(struct trace_seq *s)
printk(KERN_TRACE "%s", s->buffer);
- trace_seq_reset(s);
+ trace_seq_init(s);
}
-void ftrace_dump(void)
+static void __ftrace_dump(bool disable_tracing)
{
static DEFINE_SPINLOCK(ftrace_dump_lock);
/* use static because iter can be a bit big for the stack */
static struct trace_iterator iter;
+ unsigned int old_userobj;
static int dump_ran;
unsigned long flags;
int cnt = 0, cpu;
@@ -3769,21 +4079,26 @@ void ftrace_dump(void)
dump_ran = 1;
- /* No turning back! */
tracing_off();
- ftrace_kill();
+
+ if (disable_tracing)
+ ftrace_kill();
for_each_tracing_cpu(cpu) {
atomic_inc(&global_trace.data[cpu]->disabled);
}
+ old_userobj = trace_flags & TRACE_ITER_SYM_USEROBJ;
+
/* don't look at user memory in panic mode */
trace_flags &= ~TRACE_ITER_SYM_USEROBJ;
printk(KERN_TRACE "Dumping ftrace buffer:\n");
+ /* Simulate the iterator */
iter.tr = &global_trace;
iter.trace = current_trace;
+ iter.cpu_file = TRACE_PIPE_ALL_CPU;
/*
* We need to stop all tracing on all CPUS to read the
@@ -3819,13 +4134,30 @@ void ftrace_dump(void)
else
printk(KERN_TRACE "---------------------------------\n");
+ /* Re-enable tracing if requested */
+ if (!disable_tracing) {
+ trace_flags |= old_userobj;
+
+ for_each_tracing_cpu(cpu) {
+ atomic_dec(&global_trace.data[cpu]->disabled);
+ }
+ tracing_on();
+ }
+
out:
spin_unlock_irqrestore(&ftrace_dump_lock, flags);
}
+/* By default: disable tracing after the dump */
+void ftrace_dump(void)
+{
+ __ftrace_dump(true);
+}
+
__init static int tracer_alloc_buffers(void)
{
struct trace_array_cpu *data;
+ int ring_buf_size;
int i;
int ret = -ENOMEM;
@@ -3835,11 +4167,21 @@ __init static int tracer_alloc_buffers(void)
if (!alloc_cpumask_var(&tracing_cpumask, GFP_KERNEL))
goto out_free_buffer_mask;
+ if (!alloc_cpumask_var(&tracing_reader_cpumask, GFP_KERNEL))
+ goto out_free_tracing_cpumask;
+
+ /* To save memory, keep the ring buffer size to its minimum */
+ if (ring_buffer_expanded)
+ ring_buf_size = trace_buf_size;
+ else
+ ring_buf_size = 1;
+
cpumask_copy(tracing_buffer_mask, cpu_possible_mask);
cpumask_copy(tracing_cpumask, cpu_all_mask);
+ cpumask_clear(tracing_reader_cpumask);
/* TODO: make the number of buffers hot pluggable with CPUS */
- global_trace.buffer = ring_buffer_alloc(trace_buf_size,
+ global_trace.buffer = ring_buffer_alloc(ring_buf_size,
TRACE_BUFFER_FLAGS);
if (!global_trace.buffer) {
printk(KERN_ERR "tracer: failed to allocate ring buffer!\n");
@@ -3850,7 +4192,7 @@ __init static int tracer_alloc_buffers(void)
#ifdef CONFIG_TRACER_MAX_TRACE
- max_tr.buffer = ring_buffer_alloc(trace_buf_size,
+ max_tr.buffer = ring_buffer_alloc(ring_buf_size,
TRACE_BUFFER_FLAGS);
if (!max_tr.buffer) {
printk(KERN_ERR "tracer: failed to allocate max ring buffer!\n");
@@ -3871,14 +4213,10 @@ __init static int tracer_alloc_buffers(void)
trace_init_cmdlines();
register_tracer(&nop_trace);
+ current_trace = &nop_trace;
#ifdef CONFIG_BOOT_TRACER
register_tracer(&boot_tracer);
- current_trace = &boot_tracer;
- current_trace->init(&global_trace);
-#else
- current_trace = &nop_trace;
#endif
-
/* All seems OK, enable tracing */
tracing_disabled = 0;
@@ -3886,14 +4224,38 @@ __init static int tracer_alloc_buffers(void)
&trace_panic_notifier);
register_die_notifier(&trace_die_notifier);
- ret = 0;
+
+ return 0;
out_free_cpumask:
+ free_cpumask_var(tracing_reader_cpumask);
+out_free_tracing_cpumask:
free_cpumask_var(tracing_cpumask);
out_free_buffer_mask:
free_cpumask_var(tracing_buffer_mask);
out:
return ret;
}
+
+__init static int clear_boot_tracer(void)
+{
+ /*
+ * The default tracer at boot buffer is an init section.
+ * This function is called in lateinit. If we did not
+ * find the boot tracer, then clear it out, to prevent
+ * later registration from accessing the buffer that is
+ * about to be freed.
+ */
+ if (!default_bootup_tracer)
+ return 0;
+
+ printk(KERN_INFO "ftrace bootup tracer '%s' not registered.\n",
+ default_bootup_tracer);
+ default_bootup_tracer = NULL;
+
+ return 0;
+}
+
early_initcall(tracer_alloc_buffers);
fs_initcall(tracer_init_debugfs);
+late_initcall(clear_boot_tracer);
diff --git a/kernel/trace/trace.h b/kernel/trace/trace.h
index 4d3d381bfd95..e685ac2b2ba1 100644
--- a/kernel/trace/trace.h
+++ b/kernel/trace/trace.h
@@ -9,6 +9,8 @@
#include <linux/mmiotrace.h>
#include <linux/ftrace.h>
#include <trace/boot.h>
+#include <trace/kmemtrace.h>
+#include <trace/power.h>
enum trace_type {
__TRACE_FIRST_TYPE = 0,
@@ -16,9 +18,9 @@ enum trace_type {
TRACE_FN,
TRACE_CTX,
TRACE_WAKE,
- TRACE_CONT,
TRACE_STACK,
TRACE_PRINT,
+ TRACE_BPRINT,
TRACE_SPECIAL,
TRACE_MMIO_RW,
TRACE_MMIO_MAP,
@@ -29,9 +31,14 @@ enum trace_type {
TRACE_GRAPH_ENT,
TRACE_USER_STACK,
TRACE_HW_BRANCHES,
+ TRACE_SYSCALL_ENTER,
+ TRACE_SYSCALL_EXIT,
+ TRACE_KMEM_ALLOC,
+ TRACE_KMEM_FREE,
TRACE_POWER,
+ TRACE_BLK,
- __TRACE_LAST_TYPE
+ __TRACE_LAST_TYPE,
};
/*
@@ -42,7 +49,6 @@ enum trace_type {
*/
struct trace_entry {
unsigned char type;
- unsigned char cpu;
unsigned char flags;
unsigned char preempt_count;
int pid;
@@ -60,13 +66,13 @@ struct ftrace_entry {
/* Function call entry */
struct ftrace_graph_ent_entry {
- struct trace_entry ent;
+ struct trace_entry ent;
struct ftrace_graph_ent graph_ent;
};
/* Function return entry */
struct ftrace_graph_ret_entry {
- struct trace_entry ent;
+ struct trace_entry ent;
struct ftrace_graph_ret ret;
};
extern struct tracer boot_tracer;
@@ -112,12 +118,18 @@ struct userstack_entry {
};
/*
- * ftrace_printk entry:
+ * trace_printk entry:
*/
+struct bprint_entry {
+ struct trace_entry ent;
+ unsigned long ip;
+ const char *fmt;
+ u32 buf[];
+};
+
struct print_entry {
struct trace_entry ent;
unsigned long ip;
- int depth;
char buf[];
};
@@ -170,15 +182,51 @@ struct trace_power {
struct power_trace state_data;
};
+enum kmemtrace_type_id {
+ KMEMTRACE_TYPE_KMALLOC = 0, /* kmalloc() or kfree(). */
+ KMEMTRACE_TYPE_CACHE, /* kmem_cache_*(). */
+ KMEMTRACE_TYPE_PAGES, /* __get_free_pages() and friends. */
+};
+
+struct kmemtrace_alloc_entry {
+ struct trace_entry ent;
+ enum kmemtrace_type_id type_id;
+ unsigned long call_site;
+ const void *ptr;
+ size_t bytes_req;
+ size_t bytes_alloc;
+ gfp_t gfp_flags;
+ int node;
+};
+
+struct kmemtrace_free_entry {
+ struct trace_entry ent;
+ enum kmemtrace_type_id type_id;
+ unsigned long call_site;
+ const void *ptr;
+};
+
+struct syscall_trace_enter {
+ struct trace_entry ent;
+ int nr;
+ unsigned long args[];
+};
+
+struct syscall_trace_exit {
+ struct trace_entry ent;
+ int nr;
+ unsigned long ret;
+};
+
+
/*
* trace_flag_type is an enumeration that holds different
* states when a trace occurs. These are:
* IRQS_OFF - interrupts were disabled
- * IRQS_NOSUPPORT - arch does not support irqs_disabled_flags
+ * IRQS_NOSUPPORT - arch does not support irqs_disabled_flags
* NEED_RESCED - reschedule is requested
* HARDIRQ - inside an interrupt handler
* SOFTIRQ - inside a softirq handler
- * CONT - multiple entries hold the trace item
*/
enum trace_flag_type {
TRACE_FLAG_IRQS_OFF = 0x01,
@@ -186,7 +234,6 @@ enum trace_flag_type {
TRACE_FLAG_NEED_RESCHED = 0x04,
TRACE_FLAG_HARDIRQ = 0x08,
TRACE_FLAG_SOFTIRQ = 0x10,
- TRACE_FLAG_CONT = 0x20,
};
#define TRACE_BUF_SIZE 1024
@@ -198,6 +245,7 @@ enum trace_flag_type {
*/
struct trace_array_cpu {
atomic_t disabled;
+ void *buffer_page; /* ring buffer spare */
/* these fields get copied into max-trace: */
unsigned long trace_idx;
@@ -262,10 +310,10 @@ extern void __ftrace_bad_type(void);
do { \
IF_ASSIGN(var, ent, struct ftrace_entry, TRACE_FN); \
IF_ASSIGN(var, ent, struct ctx_switch_entry, 0); \
- IF_ASSIGN(var, ent, struct trace_field_cont, TRACE_CONT); \
IF_ASSIGN(var, ent, struct stack_entry, TRACE_STACK); \
IF_ASSIGN(var, ent, struct userstack_entry, TRACE_USER_STACK);\
IF_ASSIGN(var, ent, struct print_entry, TRACE_PRINT); \
+ IF_ASSIGN(var, ent, struct bprint_entry, TRACE_BPRINT); \
IF_ASSIGN(var, ent, struct special_entry, 0); \
IF_ASSIGN(var, ent, struct trace_mmiotrace_rw, \
TRACE_MMIO_RW); \
@@ -279,7 +327,15 @@ extern void __ftrace_bad_type(void);
IF_ASSIGN(var, ent, struct ftrace_graph_ret_entry, \
TRACE_GRAPH_RET); \
IF_ASSIGN(var, ent, struct hw_branch_entry, TRACE_HW_BRANCHES);\
- IF_ASSIGN(var, ent, struct trace_power, TRACE_POWER); \
+ IF_ASSIGN(var, ent, struct trace_power, TRACE_POWER); \
+ IF_ASSIGN(var, ent, struct kmemtrace_alloc_entry, \
+ TRACE_KMEM_ALLOC); \
+ IF_ASSIGN(var, ent, struct kmemtrace_free_entry, \
+ TRACE_KMEM_FREE); \
+ IF_ASSIGN(var, ent, struct syscall_trace_enter, \
+ TRACE_SYSCALL_ENTER); \
+ IF_ASSIGN(var, ent, struct syscall_trace_exit, \
+ TRACE_SYSCALL_EXIT); \
__ftrace_bad_type(); \
} while (0)
@@ -287,7 +343,8 @@ extern void __ftrace_bad_type(void);
enum print_line_t {
TRACE_TYPE_PARTIAL_LINE = 0, /* Retry after flushing the seq */
TRACE_TYPE_HANDLED = 1,
- TRACE_TYPE_UNHANDLED = 2 /* Relay to other output functions */
+ TRACE_TYPE_UNHANDLED = 2, /* Relay to other output functions */
+ TRACE_TYPE_NO_CONSUME = 3 /* Handled but ask to not consume */
};
@@ -297,8 +354,8 @@ enum print_line_t {
* flags value in struct tracer_flags.
*/
struct tracer_opt {
- const char *name; /* Will appear on the trace_options file */
- u32 bit; /* Mask assigned in val field in tracer_flags */
+ const char *name; /* Will appear on the trace_options file */
+ u32 bit; /* Mask assigned in val field in tracer_flags */
};
/*
@@ -307,28 +364,51 @@ struct tracer_opt {
*/
struct tracer_flags {
u32 val;
- struct tracer_opt *opts;
+ struct tracer_opt *opts;
};
/* Makes more easy to define a tracer opt */
#define TRACER_OPT(s, b) .name = #s, .bit = b
-/*
- * A specific tracer, represented by methods that operate on a trace array:
+
+/**
+ * struct tracer - a specific tracer and its callbacks to interact with debugfs
+ * @name: the name chosen to select it on the available_tracers file
+ * @init: called when one switches to this tracer (echo name > current_tracer)
+ * @reset: called when one switches to another tracer
+ * @start: called when tracing is unpaused (echo 1 > tracing_enabled)
+ * @stop: called when tracing is paused (echo 0 > tracing_enabled)
+ * @open: called when the trace file is opened
+ * @pipe_open: called when the trace_pipe file is opened
+ * @wait_pipe: override how the user waits for traces on trace_pipe
+ * @close: called when the trace file is released
+ * @read: override the default read callback on trace_pipe
+ * @splice_read: override the default splice_read callback on trace_pipe
+ * @selftest: selftest to run on boot (see trace_selftest.c)
+ * @print_headers: override the first lines that describe your columns
+ * @print_line: callback that prints a trace
+ * @set_flag: signals one of your private flags changed (trace_options file)
+ * @flags: your private flags
*/
struct tracer {
const char *name;
- /* Your tracer should raise a warning if init fails */
int (*init)(struct trace_array *tr);
void (*reset)(struct trace_array *tr);
void (*start)(struct trace_array *tr);
void (*stop)(struct trace_array *tr);
void (*open)(struct trace_iterator *iter);
void (*pipe_open)(struct trace_iterator *iter);
+ void (*wait_pipe)(struct trace_iterator *iter);
void (*close)(struct trace_iterator *iter);
ssize_t (*read)(struct trace_iterator *iter,
struct file *filp, char __user *ubuf,
size_t cnt, loff_t *ppos);
+ ssize_t (*splice_read)(struct trace_iterator *iter,
+ struct file *filp,
+ loff_t *ppos,
+ struct pipe_inode_info *pipe,
+ size_t len,
+ unsigned int flags);
#ifdef CONFIG_FTRACE_STARTUP_TEST
int (*selftest)(struct tracer *trace,
struct trace_array *tr);
@@ -339,7 +419,8 @@ struct tracer {
int (*set_flag)(u32 old_flags, u32 bit, int set);
struct tracer *next;
int print_max;
- struct tracer_flags *flags;
+ struct tracer_flags *flags;
+ struct tracer_stat *stats;
};
struct trace_seq {
@@ -348,6 +429,16 @@ struct trace_seq {
unsigned int readpos;
};
+static inline void
+trace_seq_init(struct trace_seq *s)
+{
+ s->len = 0;
+ s->readpos = 0;
+}
+
+
+#define TRACE_PIPE_ALL_CPU -1
+
/*
* Trace iterator - used by printout routines who present trace
* results to users and which routines might sleep, etc:
@@ -356,6 +447,8 @@ struct trace_iterator {
struct trace_array *tr;
struct tracer *trace;
void *private;
+ int cpu_file;
+ struct mutex mutex;
struct ring_buffer_iter *buffer_iter[NR_CPUS];
/* The below is zeroed out in pipe_read */
@@ -371,6 +464,7 @@ struct trace_iterator {
cpumask_var_t started;
};
+int tracer_init(struct tracer *t, struct trace_array *tr);
int tracing_is_enabled(void);
void trace_wake_up(void);
void tracing_reset(struct trace_array *tr, int cpu);
@@ -379,26 +473,50 @@ int tracing_open_generic(struct inode *inode, struct file *filp);
struct dentry *tracing_init_dentry(void);
void init_tracer_sysprof_debugfs(struct dentry *d_tracer);
+struct ring_buffer_event;
+
+struct ring_buffer_event *trace_buffer_lock_reserve(struct trace_array *tr,
+ unsigned char type,
+ unsigned long len,
+ unsigned long flags,
+ int pc);
+void trace_buffer_unlock_commit(struct trace_array *tr,
+ struct ring_buffer_event *event,
+ unsigned long flags, int pc);
+
+struct ring_buffer_event *
+trace_current_buffer_lock_reserve(unsigned char type, unsigned long len,
+ unsigned long flags, int pc);
+void trace_current_buffer_unlock_commit(struct ring_buffer_event *event,
+ unsigned long flags, int pc);
+void trace_nowake_buffer_unlock_commit(struct ring_buffer_event *event,
+ unsigned long flags, int pc);
+
struct trace_entry *tracing_get_trace_entry(struct trace_array *tr,
struct trace_array_cpu *data);
+
+struct trace_entry *trace_find_next_entry(struct trace_iterator *iter,
+ int *ent_cpu, u64 *ent_ts);
+
void tracing_generic_entry_update(struct trace_entry *entry,
unsigned long flags,
int pc);
+void default_wait_pipe(struct trace_iterator *iter);
+void poll_wait_pipe(struct trace_iterator *iter);
+
void ftrace(struct trace_array *tr,
struct trace_array_cpu *data,
unsigned long ip,
unsigned long parent_ip,
unsigned long flags, int pc);
void tracing_sched_switch_trace(struct trace_array *tr,
- struct trace_array_cpu *data,
struct task_struct *prev,
struct task_struct *next,
unsigned long flags, int pc);
void tracing_record_cmdline(struct task_struct *tsk);
void tracing_sched_wakeup_trace(struct trace_array *tr,
- struct trace_array_cpu *data,
struct task_struct *wakee,
struct task_struct *cur,
unsigned long flags, int pc);
@@ -408,14 +526,12 @@ void trace_special(struct trace_array *tr,
unsigned long arg2,
unsigned long arg3, int pc);
void trace_function(struct trace_array *tr,
- struct trace_array_cpu *data,
unsigned long ip,
unsigned long parent_ip,
unsigned long flags, int pc);
void trace_graph_return(struct ftrace_graph_ret *trace);
int trace_graph_entry(struct ftrace_graph_ent *trace);
-void trace_hw_branch(struct trace_array *tr, u64 from, u64 to);
void tracing_start_cmdline_record(void);
void tracing_stop_cmdline_record(void);
@@ -434,15 +550,11 @@ void update_max_tr(struct trace_array *tr, struct task_struct *tsk, int cpu);
void update_max_tr_single(struct trace_array *tr,
struct task_struct *tsk, int cpu);
-extern cycle_t ftrace_now(int cpu);
+void __trace_stack(struct trace_array *tr,
+ unsigned long flags,
+ int skip, int pc);
-#ifdef CONFIG_FUNCTION_TRACER
-void tracing_start_function_trace(void);
-void tracing_stop_function_trace(void);
-#else
-# define tracing_start_function_trace() do { } while (0)
-# define tracing_stop_function_trace() do { } while (0)
-#endif
+extern cycle_t ftrace_now(int cpu);
#ifdef CONFIG_CONTEXT_SWITCH_TRACER
typedef void
@@ -456,10 +568,10 @@ struct tracer_switch_ops {
void *private;
struct tracer_switch_ops *next;
};
-
-char *trace_find_cmdline(int pid);
#endif /* CONFIG_CONTEXT_SWITCH_TRACER */
+extern void trace_find_cmdline(int pid, char comm[]);
+
#ifdef CONFIG_DYNAMIC_FTRACE
extern unsigned long ftrace_update_tot_cnt;
#define DYN_FTRACE_TEST_NAME trace_selftest_dynamic_test_func
@@ -469,6 +581,8 @@ extern int DYN_FTRACE_TEST_NAME(void);
#ifdef CONFIG_FTRACE_STARTUP_TEST
extern int trace_selftest_startup_function(struct tracer *trace,
struct trace_array *tr);
+extern int trace_selftest_startup_function_graph(struct tracer *trace,
+ struct trace_array *tr);
extern int trace_selftest_startup_irqsoff(struct tracer *trace,
struct trace_array *tr);
extern int trace_selftest_startup_preemptoff(struct tracer *trace,
@@ -488,18 +602,11 @@ extern int trace_selftest_startup_branch(struct tracer *trace,
#endif /* CONFIG_FTRACE_STARTUP_TEST */
extern void *head_page(struct trace_array_cpu *data);
-extern int trace_seq_printf(struct trace_seq *s, const char *fmt, ...);
-extern void trace_seq_print_cont(struct trace_seq *s,
- struct trace_iterator *iter);
-
+extern unsigned long long ns2usecs(cycle_t nsec);
extern int
-seq_print_ip_sym(struct trace_seq *s, unsigned long ip,
- unsigned long sym_flags);
-extern ssize_t trace_seq_to_user(struct trace_seq *s, char __user *ubuf,
- size_t cnt);
-extern long ns2usecs(cycle_t nsec);
+trace_vbprintk(unsigned long ip, const char *fmt, va_list args);
extern int
-trace_vprintk(unsigned long ip, int depth, const char *fmt, va_list args);
+trace_vprintk(unsigned long ip, const char *fmt, va_list args);
extern unsigned long trace_flags;
@@ -580,7 +687,11 @@ enum trace_iterator_flags {
TRACE_ITER_ANNOTATE = 0x2000,
TRACE_ITER_USERSTACKTRACE = 0x4000,
TRACE_ITER_SYM_USEROBJ = 0x8000,
- TRACE_ITER_PRINTK_MSGONLY = 0x10000
+ TRACE_ITER_PRINTK_MSGONLY = 0x10000,
+ TRACE_ITER_CONTEXT_INFO = 0x20000, /* Print pid/cpu/time */
+ TRACE_ITER_LATENCY_FMT = 0x40000,
+ TRACE_ITER_GLOBAL_CLK = 0x80000,
+ TRACE_ITER_SLEEP_TIME = 0x100000,
};
/*
@@ -601,12 +712,12 @@ extern struct tracer nop_trace;
* preempt_enable (after a disable), a schedule might take place
* causing an infinite recursion.
*
- * To prevent this, we read the need_recshed flag before
+ * To prevent this, we read the need_resched flag before
* disabling preemption. When we want to enable preemption we
* check the flag, if it is set, then we call preempt_enable_no_resched.
* Otherwise, we call preempt_enable.
*
- * The rational for doing the above is that if need resched is set
+ * The rational for doing the above is that if need_resched is set
* and we have yet to reschedule, we are either in an atomic location
* (where we do not need to check for scheduling) or we are inside
* the scheduler and do not want to resched.
@@ -627,7 +738,7 @@ static inline int ftrace_preempt_disable(void)
*
* This is a scheduler safe way to enable preemption and not miss
* any preemption checks. The disabled saved the state of preemption.
- * If resched is set, then we were either inside an atomic or
+ * If resched is set, then we are either inside an atomic or
* are inside the scheduler (we would have already scheduled
* otherwise). In this case, we do not want to call normal
* preempt_enable, but preempt_enable_no_resched instead.
@@ -664,4 +775,118 @@ static inline void trace_branch_disable(void)
}
#endif /* CONFIG_BRANCH_TRACER */
+/* set ring buffers to default size if not already done so */
+int tracing_update_buffers(void);
+
+/* trace event type bit fields, not numeric */
+enum {
+ TRACE_EVENT_TYPE_PRINTF = 1,
+ TRACE_EVENT_TYPE_RAW = 2,
+};
+
+struct ftrace_event_field {
+ struct list_head link;
+ char *name;
+ char *type;
+ int offset;
+ int size;
+};
+
+struct ftrace_event_call {
+ char *name;
+ char *system;
+ struct dentry *dir;
+ int enabled;
+ int (*regfunc)(void);
+ void (*unregfunc)(void);
+ int id;
+ int (*raw_init)(void);
+ int (*show_format)(struct trace_seq *s);
+ int (*define_fields)(void);
+ struct list_head fields;
+ struct filter_pred **preds;
+
+#ifdef CONFIG_EVENT_PROFILE
+ atomic_t profile_count;
+ int (*profile_enable)(struct ftrace_event_call *);
+ void (*profile_disable)(struct ftrace_event_call *);
+#endif
+};
+
+struct event_subsystem {
+ struct list_head list;
+ const char *name;
+ struct dentry *entry;
+ struct filter_pred **preds;
+};
+
+#define events_for_each(event) \
+ for (event = __start_ftrace_events; \
+ (unsigned long)event < (unsigned long)__stop_ftrace_events; \
+ event++)
+
+#define MAX_FILTER_PRED 8
+
+struct filter_pred;
+
+typedef int (*filter_pred_fn_t) (struct filter_pred *pred, void *event);
+
+struct filter_pred {
+ filter_pred_fn_t fn;
+ u64 val;
+ char *str_val;
+ int str_len;
+ char *field_name;
+ int offset;
+ int not;
+ int or;
+ int compound;
+ int clear;
+};
+
+int trace_define_field(struct ftrace_event_call *call, char *type,
+ char *name, int offset, int size);
+extern void filter_free_pred(struct filter_pred *pred);
+extern void filter_print_preds(struct filter_pred **preds,
+ struct trace_seq *s);
+extern int filter_parse(char **pbuf, struct filter_pred *pred);
+extern int filter_add_pred(struct ftrace_event_call *call,
+ struct filter_pred *pred);
+extern void filter_free_preds(struct ftrace_event_call *call);
+extern int filter_match_preds(struct ftrace_event_call *call, void *rec);
+extern void filter_free_subsystem_preds(struct event_subsystem *system);
+extern int filter_add_subsystem_pred(struct event_subsystem *system,
+ struct filter_pred *pred);
+
+void event_trace_printk(unsigned long ip, const char *fmt, ...);
+extern struct ftrace_event_call __start_ftrace_events[];
+extern struct ftrace_event_call __stop_ftrace_events[];
+
+#define for_each_event(event) \
+ for (event = __start_ftrace_events; \
+ (unsigned long)event < (unsigned long)__stop_ftrace_events; \
+ event++)
+
+extern const char *__start___trace_bprintk_fmt[];
+extern const char *__stop___trace_bprintk_fmt[];
+
+/*
+ * The double __builtin_constant_p is because gcc will give us an error
+ * if we try to allocate the static variable to fmt if it is not a
+ * constant. Even with the outer if statement optimizing out.
+ */
+#define event_trace_printk(ip, fmt, args...) \
+do { \
+ __trace_printk_check_format(fmt, ##args); \
+ tracing_record_cmdline(current); \
+ if (__builtin_constant_p(fmt)) { \
+ static const char *trace_printk_fmt \
+ __attribute__((section("__trace_printk_fmt"))) = \
+ __builtin_constant_p(fmt) ? fmt : NULL; \
+ \
+ __trace_bprintk(ip, trace_printk_fmt, ##args); \
+ } else \
+ __trace_printk(ip, fmt, ##args); \
+} while (0)
+
#endif /* _LINUX_KERNEL_TRACE_H */
diff --git a/kernel/trace/trace_boot.c b/kernel/trace/trace_boot.c
index 366c8c333e13..7a30fc4c3642 100644
--- a/kernel/trace/trace_boot.c
+++ b/kernel/trace/trace_boot.c
@@ -11,6 +11,7 @@
#include <linux/kallsyms.h>
#include "trace.h"
+#include "trace_output.h"
static struct trace_array *boot_trace;
static bool pre_initcalls_finished;
@@ -27,13 +28,13 @@ void start_boot_trace(void)
void enable_boot_trace(void)
{
- if (pre_initcalls_finished)
+ if (boot_trace && pre_initcalls_finished)
tracing_start_sched_switch_record();
}
void disable_boot_trace(void)
{
- if (pre_initcalls_finished)
+ if (boot_trace && pre_initcalls_finished)
tracing_stop_sched_switch_record();
}
@@ -42,6 +43,9 @@ static int boot_trace_init(struct trace_array *tr)
int cpu;
boot_trace = tr;
+ if (!tr)
+ return 0;
+
for_each_cpu(cpu, cpu_possible_mask)
tracing_reset(tr, cpu);
@@ -128,10 +132,9 @@ void trace_boot_call(struct boot_trace_call *bt, initcall_t fn)
{
struct ring_buffer_event *event;
struct trace_boot_call *entry;
- unsigned long irq_flags;
struct trace_array *tr = boot_trace;
- if (!pre_initcalls_finished)
+ if (!tr || !pre_initcalls_finished)
return;
/* Get its name now since this function could
@@ -140,18 +143,13 @@ void trace_boot_call(struct boot_trace_call *bt, initcall_t fn)
sprint_symbol(bt->func, (unsigned long)fn);
preempt_disable();
- event = ring_buffer_lock_reserve(tr->buffer, sizeof(*entry),
- &irq_flags);
+ event = trace_buffer_lock_reserve(tr, TRACE_BOOT_CALL,
+ sizeof(*entry), 0, 0);
if (!event)
goto out;
entry = ring_buffer_event_data(event);
- tracing_generic_entry_update(&entry->ent, 0, 0);
- entry->ent.type = TRACE_BOOT_CALL;
entry->boot_call = *bt;
- ring_buffer_unlock_commit(tr->buffer, event, irq_flags);
-
- trace_wake_up();
-
+ trace_buffer_unlock_commit(tr, event, 0, 0);
out:
preempt_enable();
}
@@ -160,27 +158,21 @@ void trace_boot_ret(struct boot_trace_ret *bt, initcall_t fn)
{
struct ring_buffer_event *event;
struct trace_boot_ret *entry;
- unsigned long irq_flags;
struct trace_array *tr = boot_trace;
- if (!pre_initcalls_finished)
+ if (!tr || !pre_initcalls_finished)
return;
sprint_symbol(bt->func, (unsigned long)fn);
preempt_disable();
- event = ring_buffer_lock_reserve(tr->buffer, sizeof(*entry),
- &irq_flags);
+ event = trace_buffer_lock_reserve(tr, TRACE_BOOT_RET,
+ sizeof(*entry), 0, 0);
if (!event)
goto out;
entry = ring_buffer_event_data(event);
- tracing_generic_entry_update(&entry->ent, 0, 0);
- entry->ent.type = TRACE_BOOT_RET;
entry->boot_ret = *bt;
- ring_buffer_unlock_commit(tr->buffer, event, irq_flags);
-
- trace_wake_up();
-
+ trace_buffer_unlock_commit(tr, event, 0, 0);
out:
preempt_enable();
}
diff --git a/kernel/trace/trace_branch.c b/kernel/trace/trace_branch.c
index 6c00feb3bac7..8333715e4066 100644
--- a/kernel/trace/trace_branch.c
+++ b/kernel/trace/trace_branch.c
@@ -14,12 +14,17 @@
#include <linux/hash.h>
#include <linux/fs.h>
#include <asm/local.h>
+
#include "trace.h"
+#include "trace_stat.h"
+#include "trace_output.h"
#ifdef CONFIG_BRANCH_TRACER
+static struct tracer branch_trace;
static int branch_tracing_enabled __read_mostly;
static DEFINE_MUTEX(branch_tracing_mutex);
+
static struct trace_array *branch_tracer;
static void
@@ -28,7 +33,7 @@ probe_likely_condition(struct ftrace_branch_data *f, int val, int expect)
struct trace_array *tr = branch_tracer;
struct ring_buffer_event *event;
struct trace_branch *entry;
- unsigned long flags, irq_flags;
+ unsigned long flags;
int cpu, pc;
const char *p;
@@ -47,15 +52,13 @@ probe_likely_condition(struct ftrace_branch_data *f, int val, int expect)
if (atomic_inc_return(&tr->data[cpu]->disabled) != 1)
goto out;
- event = ring_buffer_lock_reserve(tr->buffer, sizeof(*entry),
- &irq_flags);
+ pc = preempt_count();
+ event = trace_buffer_lock_reserve(tr, TRACE_BRANCH,
+ sizeof(*entry), flags, pc);
if (!event)
goto out;
- pc = preempt_count();
entry = ring_buffer_event_data(event);
- tracing_generic_entry_update(&entry->ent, flags, pc);
- entry->ent.type = TRACE_BRANCH;
/* Strip off the path, only save the file */
p = f->file + strlen(f->file);
@@ -70,7 +73,7 @@ probe_likely_condition(struct ftrace_branch_data *f, int val, int expect)
entry->line = f->line;
entry->correct = val == expect;
- ring_buffer_unlock_commit(tr->buffer, event, irq_flags);
+ ring_buffer_unlock_commit(tr->buffer, event);
out:
atomic_dec(&tr->data[cpu]->disabled);
@@ -88,8 +91,6 @@ void trace_likely_condition(struct ftrace_branch_data *f, int val, int expect)
int enable_branch_tracing(struct trace_array *tr)
{
- int ret = 0;
-
mutex_lock(&branch_tracing_mutex);
branch_tracer = tr;
/*
@@ -100,7 +101,7 @@ int enable_branch_tracing(struct trace_array *tr)
branch_tracing_enabled++;
mutex_unlock(&branch_tracing_mutex);
- return ret;
+ return 0;
}
void disable_branch_tracing(void)
@@ -128,11 +129,6 @@ static void stop_branch_trace(struct trace_array *tr)
static int branch_trace_init(struct trace_array *tr)
{
- int cpu;
-
- for_each_online_cpu(cpu)
- tracing_reset(tr, cpu);
-
start_branch_trace(tr);
return 0;
}
@@ -142,22 +138,61 @@ static void branch_trace_reset(struct trace_array *tr)
stop_branch_trace(tr);
}
-struct tracer branch_trace __read_mostly =
+static enum print_line_t trace_branch_print(struct trace_iterator *iter,
+ int flags)
+{
+ struct trace_branch *field;
+
+ trace_assign_type(field, iter->ent);
+
+ if (trace_seq_printf(&iter->seq, "[%s] %s:%s:%d\n",
+ field->correct ? " ok " : " MISS ",
+ field->func,
+ field->file,
+ field->line))
+ return TRACE_TYPE_PARTIAL_LINE;
+
+ return TRACE_TYPE_HANDLED;
+}
+
+static void branch_print_header(struct seq_file *s)
+{
+ seq_puts(s, "# TASK-PID CPU# TIMESTAMP CORRECT"
+ " FUNC:FILE:LINE\n");
+ seq_puts(s, "# | | | | | "
+ " |\n");
+}
+
+static struct trace_event trace_branch_event = {
+ .type = TRACE_BRANCH,
+ .trace = trace_branch_print,
+};
+
+static struct tracer branch_trace __read_mostly =
{
.name = "branch",
.init = branch_trace_init,
.reset = branch_trace_reset,
#ifdef CONFIG_FTRACE_SELFTEST
.selftest = trace_selftest_startup_branch,
-#endif
+#endif /* CONFIG_FTRACE_SELFTEST */
+ .print_header = branch_print_header,
};
-__init static int init_branch_trace(void)
+__init static int init_branch_tracer(void)
{
+ int ret;
+
+ ret = register_ftrace_event(&trace_branch_event);
+ if (!ret) {
+ printk(KERN_WARNING "Warning: could not register "
+ "branch events\n");
+ return 1;
+ }
return register_tracer(&branch_trace);
}
+device_initcall(init_branch_tracer);
-device_initcall(init_branch_trace);
#else
static inline
void trace_likely_condition(struct ftrace_branch_data *f, int val, int expect)
@@ -183,66 +218,39 @@ void ftrace_likely_update(struct ftrace_branch_data *f, int val, int expect)
}
EXPORT_SYMBOL(ftrace_likely_update);
-struct ftrace_pointer {
- void *start;
- void *stop;
- int hit;
-};
+extern unsigned long __start_annotated_branch_profile[];
+extern unsigned long __stop_annotated_branch_profile[];
-static void *
-t_next(struct seq_file *m, void *v, loff_t *pos)
+static int annotated_branch_stat_headers(struct seq_file *m)
{
- const struct ftrace_pointer *f = m->private;
- struct ftrace_branch_data *p = v;
-
- (*pos)++;
-
- if (v == (void *)1)
- return f->start;
-
- ++p;
-
- if ((void *)p >= (void *)f->stop)
- return NULL;
-
- return p;
+ seq_printf(m, " correct incorrect %% ");
+ seq_printf(m, " Function "
+ " File Line\n"
+ " ------- --------- - "
+ " -------- "
+ " ---- ----\n");
+ return 0;
}
-static void *t_start(struct seq_file *m, loff_t *pos)
+static inline long get_incorrect_percent(struct ftrace_branch_data *p)
{
- void *t = (void *)1;
- loff_t l = 0;
-
- for (; t && l < *pos; t = t_next(m, t, &l))
- ;
+ long percent;
- return t;
-}
+ if (p->correct) {
+ percent = p->incorrect * 100;
+ percent /= p->correct + p->incorrect;
+ } else
+ percent = p->incorrect ? 100 : -1;
-static void t_stop(struct seq_file *m, void *p)
-{
+ return percent;
}
-static int t_show(struct seq_file *m, void *v)
+static int branch_stat_show(struct seq_file *m, void *v)
{
- const struct ftrace_pointer *fp = m->private;
struct ftrace_branch_data *p = v;
const char *f;
long percent;
- if (v == (void *)1) {
- if (fp->hit)
- seq_printf(m, " miss hit %% ");
- else
- seq_printf(m, " correct incorrect %% ");
- seq_printf(m, " Function "
- " File Line\n"
- " ------- --------- - "
- " -------- "
- " ---- ----\n");
- return 0;
- }
-
/* Only print the file, not the path */
f = p->file + strlen(p->file);
while (f >= p->file && *f != '/')
@@ -252,11 +260,7 @@ static int t_show(struct seq_file *m, void *v)
/*
* The miss is overlayed on correct, and hit on incorrect.
*/
- if (p->correct) {
- percent = p->incorrect * 100;
- percent /= p->correct + p->incorrect;
- } else
- percent = p->incorrect ? 100 : -1;
+ percent = get_incorrect_percent(p);
seq_printf(m, "%8lu %8lu ", p->correct, p->incorrect);
if (percent < 0)
@@ -267,76 +271,118 @@ static int t_show(struct seq_file *m, void *v)
return 0;
}
-static struct seq_operations tracing_likely_seq_ops = {
- .start = t_start,
- .next = t_next,
- .stop = t_stop,
- .show = t_show,
+static void *annotated_branch_stat_start(void)
+{
+ return __start_annotated_branch_profile;
+}
+
+static void *
+annotated_branch_stat_next(void *v, int idx)
+{
+ struct ftrace_branch_data *p = v;
+
+ ++p;
+
+ if ((void *)p >= (void *)__stop_annotated_branch_profile)
+ return NULL;
+
+ return p;
+}
+
+static int annotated_branch_stat_cmp(void *p1, void *p2)
+{
+ struct ftrace_branch_data *a = p1;
+ struct ftrace_branch_data *b = p2;
+
+ long percent_a, percent_b;
+
+ percent_a = get_incorrect_percent(a);
+ percent_b = get_incorrect_percent(b);
+
+ if (percent_a < percent_b)
+ return -1;
+ if (percent_a > percent_b)
+ return 1;
+ else
+ return 0;
+}
+
+static struct tracer_stat annotated_branch_stats = {
+ .name = "branch_annotated",
+ .stat_start = annotated_branch_stat_start,
+ .stat_next = annotated_branch_stat_next,
+ .stat_cmp = annotated_branch_stat_cmp,
+ .stat_headers = annotated_branch_stat_headers,
+ .stat_show = branch_stat_show
};
-static int tracing_branch_open(struct inode *inode, struct file *file)
+__init static int init_annotated_branch_stats(void)
{
int ret;
- ret = seq_open(file, &tracing_likely_seq_ops);
+ ret = register_stat_tracer(&annotated_branch_stats);
if (!ret) {
- struct seq_file *m = file->private_data;
- m->private = (void *)inode->i_private;
+ printk(KERN_WARNING "Warning: could not register "
+ "annotated branches stats\n");
+ return 1;
}
-
- return ret;
+ return 0;
}
-
-static const struct file_operations tracing_branch_fops = {
- .open = tracing_branch_open,
- .read = seq_read,
- .llseek = seq_lseek,
-};
+fs_initcall(init_annotated_branch_stats);
#ifdef CONFIG_PROFILE_ALL_BRANCHES
+
extern unsigned long __start_branch_profile[];
extern unsigned long __stop_branch_profile[];
-static const struct ftrace_pointer ftrace_branch_pos = {
- .start = __start_branch_profile,
- .stop = __stop_branch_profile,
- .hit = 1,
-};
+static int all_branch_stat_headers(struct seq_file *m)
+{
+ seq_printf(m, " miss hit %% ");
+ seq_printf(m, " Function "
+ " File Line\n"
+ " ------- --------- - "
+ " -------- "
+ " ---- ----\n");
+ return 0;
+}
-#endif /* CONFIG_PROFILE_ALL_BRANCHES */
+static void *all_branch_stat_start(void)
+{
+ return __start_branch_profile;
+}
-extern unsigned long __start_annotated_branch_profile[];
-extern unsigned long __stop_annotated_branch_profile[];
+static void *
+all_branch_stat_next(void *v, int idx)
+{
+ struct ftrace_branch_data *p = v;
-static const struct ftrace_pointer ftrace_annotated_branch_pos = {
- .start = __start_annotated_branch_profile,
- .stop = __stop_annotated_branch_profile,
-};
+ ++p;
-static __init int ftrace_branch_init(void)
-{
- struct dentry *d_tracer;
- struct dentry *entry;
+ if ((void *)p >= (void *)__stop_branch_profile)
+ return NULL;
- d_tracer = tracing_init_dentry();
+ return p;
+}
- entry = debugfs_create_file("profile_annotated_branch", 0444, d_tracer,
- (void *)&ftrace_annotated_branch_pos,
- &tracing_branch_fops);
- if (!entry)
- pr_warning("Could not create debugfs "
- "'profile_annotatet_branch' entry\n");
+static struct tracer_stat all_branch_stats = {
+ .name = "branch_all",
+ .stat_start = all_branch_stat_start,
+ .stat_next = all_branch_stat_next,
+ .stat_headers = all_branch_stat_headers,
+ .stat_show = branch_stat_show
+};
-#ifdef CONFIG_PROFILE_ALL_BRANCHES
- entry = debugfs_create_file("profile_branch", 0444, d_tracer,
- (void *)&ftrace_branch_pos,
- &tracing_branch_fops);
- if (!entry)
- pr_warning("Could not create debugfs"
- " 'profile_branch' entry\n");
-#endif
+__init static int all_annotated_branch_stats(void)
+{
+ int ret;
+ ret = register_stat_tracer(&all_branch_stats);
+ if (!ret) {
+ printk(KERN_WARNING "Warning: could not register "
+ "all branches stats\n");
+ return 1;
+ }
return 0;
}
-
-device_initcall(ftrace_branch_init);
+fs_initcall(all_annotated_branch_stats);
+#endif /* CONFIG_PROFILE_ALL_BRANCHES */
diff --git a/kernel/trace/trace_clock.c b/kernel/trace/trace_clock.c
new file mode 100644
index 000000000000..b588fd81f7f9
--- /dev/null
+++ b/kernel/trace/trace_clock.c
@@ -0,0 +1,109 @@
+/*
+ * tracing clocks
+ *
+ * Copyright (C) 2009 Red Hat, Inc., Ingo Molnar <mingo@redhat.com>
+ *
+ * Implements 3 trace clock variants, with differing scalability/precision
+ * tradeoffs:
+ *
+ * - local: CPU-local trace clock
+ * - medium: scalable global clock with some jitter
+ * - global: globally monotonic, serialized clock
+ *
+ * Tracer plugins will chose a default from these clocks.
+ */
+#include <linux/spinlock.h>
+#include <linux/hardirq.h>
+#include <linux/module.h>
+#include <linux/percpu.h>
+#include <linux/sched.h>
+#include <linux/ktime.h>
+#include <linux/trace_clock.h>
+
+/*
+ * trace_clock_local(): the simplest and least coherent tracing clock.
+ *
+ * Useful for tracing that does not cross to other CPUs nor
+ * does it go through idle events.
+ */
+u64 notrace trace_clock_local(void)
+{
+ unsigned long flags;
+ u64 clock;
+
+ /*
+ * sched_clock() is an architecture implemented, fast, scalable,
+ * lockless clock. It is not guaranteed to be coherent across
+ * CPUs, nor across CPU idle events.
+ */
+ raw_local_irq_save(flags);
+ clock = sched_clock();
+ raw_local_irq_restore(flags);
+
+ return clock;
+}
+
+/*
+ * trace_clock(): 'inbetween' trace clock. Not completely serialized,
+ * but not completely incorrect when crossing CPUs either.
+ *
+ * This is based on cpu_clock(), which will allow at most ~1 jiffy of
+ * jitter between CPUs. So it's a pretty scalable clock, but there
+ * can be offsets in the trace data.
+ */
+u64 notrace trace_clock(void)
+{
+ return cpu_clock(raw_smp_processor_id());
+}
+
+
+/*
+ * trace_clock_global(): special globally coherent trace clock
+ *
+ * It has higher overhead than the other trace clocks but is still
+ * an order of magnitude faster than GTOD derived hardware clocks.
+ *
+ * Used by plugins that need globally coherent timestamps.
+ */
+
+static u64 prev_trace_clock_time;
+
+static raw_spinlock_t trace_clock_lock ____cacheline_aligned_in_smp =
+ (raw_spinlock_t)__RAW_SPIN_LOCK_UNLOCKED;
+
+u64 notrace trace_clock_global(void)
+{
+ unsigned long flags;
+ int this_cpu;
+ u64 now;
+
+ raw_local_irq_save(flags);
+
+ this_cpu = raw_smp_processor_id();
+ now = cpu_clock(this_cpu);
+ /*
+ * If in an NMI context then dont risk lockups and return the
+ * cpu_clock() time:
+ */
+ if (unlikely(in_nmi()))
+ goto out;
+
+ __raw_spin_lock(&trace_clock_lock);
+
+ /*
+ * TODO: if this happens often then maybe we should reset
+ * my_scd->clock to prev_trace_clock_time+1, to make sure
+ * we start ticking with the local clock from now on?
+ */
+ if ((s64)(now - prev_trace_clock_time) < 0)
+ now = prev_trace_clock_time + 1;
+
+ prev_trace_clock_time = now;
+
+ __raw_spin_unlock(&trace_clock_lock);
+
+ out:
+ raw_local_irq_restore(flags);
+
+ return now;
+}
diff --git a/kernel/trace/trace_event_profile.c b/kernel/trace/trace_event_profile.c
new file mode 100644
index 000000000000..22cba9970776
--- /dev/null
+++ b/kernel/trace/trace_event_profile.c
@@ -0,0 +1,31 @@
+/*
+ * trace event based perf counter profiling
+ *
+ * Copyright (C) 2009 Red Hat Inc, Peter Zijlstra <pzijlstr@redhat.com>
+ *
+ */
+
+#include "trace.h"
+
+int ftrace_profile_enable(int event_id)
+{
+ struct ftrace_event_call *event;
+
+ for_each_event(event) {
+ if (event->id == event_id)
+ return event->profile_enable(event);
+ }
+
+ return -EINVAL;
+}
+
+void ftrace_profile_disable(int event_id)
+{
+ struct ftrace_event_call *event;
+
+ for_each_event(event) {
+ if (event->id == event_id)
+ return event->profile_disable(event);
+ }
+}
+
diff --git a/kernel/trace/trace_event_types.h b/kernel/trace/trace_event_types.h
new file mode 100644
index 000000000000..fd78bee71dd7
--- /dev/null
+++ b/kernel/trace/trace_event_types.h
@@ -0,0 +1,173 @@
+#undef TRACE_SYSTEM
+#define TRACE_SYSTEM ftrace
+
+/*
+ * We cheat and use the proto type field as the ID
+ * and args as the entry type (minus 'struct')
+ */
+TRACE_EVENT_FORMAT(function, TRACE_FN, ftrace_entry, ignore,
+ TRACE_STRUCT(
+ TRACE_FIELD(unsigned long, ip, ip)
+ TRACE_FIELD(unsigned long, parent_ip, parent_ip)
+ ),
+ TP_RAW_FMT(" %lx <-- %lx")
+);
+
+TRACE_EVENT_FORMAT(funcgraph_entry, TRACE_GRAPH_ENT,
+ ftrace_graph_ent_entry, ignore,
+ TRACE_STRUCT(
+ TRACE_FIELD(unsigned long, graph_ent.func, func)
+ TRACE_FIELD(int, graph_ent.depth, depth)
+ ),
+ TP_RAW_FMT("--> %lx (%d)")
+);
+
+TRACE_EVENT_FORMAT(funcgraph_exit, TRACE_GRAPH_RET,
+ ftrace_graph_ret_entry, ignore,
+ TRACE_STRUCT(
+ TRACE_FIELD(unsigned long, ret.func, func)
+ TRACE_FIELD(int, ret.depth, depth)
+ ),
+ TP_RAW_FMT("<-- %lx (%d)")
+);
+
+TRACE_EVENT_FORMAT(wakeup, TRACE_WAKE, ctx_switch_entry, ignore,
+ TRACE_STRUCT(
+ TRACE_FIELD(unsigned int, prev_pid, prev_pid)
+ TRACE_FIELD(unsigned char, prev_prio, prev_prio)
+ TRACE_FIELD(unsigned char, prev_state, prev_state)
+ TRACE_FIELD(unsigned int, next_pid, next_pid)
+ TRACE_FIELD(unsigned char, next_prio, next_prio)
+ TRACE_FIELD(unsigned char, next_state, next_state)
+ TRACE_FIELD(unsigned int, next_cpu, next_cpu)
+ ),
+ TP_RAW_FMT("%u:%u:%u ==+ %u:%u:%u [%03u]")
+);
+
+TRACE_EVENT_FORMAT(context_switch, TRACE_CTX, ctx_switch_entry, ignore,
+ TRACE_STRUCT(
+ TRACE_FIELD(unsigned int, prev_pid, prev_pid)
+ TRACE_FIELD(unsigned char, prev_prio, prev_prio)
+ TRACE_FIELD(unsigned char, prev_state, prev_state)
+ TRACE_FIELD(unsigned int, next_pid, next_pid)
+ TRACE_FIELD(unsigned char, next_prio, next_prio)
+ TRACE_FIELD(unsigned char, next_state, next_state)
+ TRACE_FIELD(unsigned int, next_cpu, next_cpu)
+ ),
+ TP_RAW_FMT("%u:%u:%u ==+ %u:%u:%u [%03u]")
+);
+
+TRACE_EVENT_FORMAT(special, TRACE_SPECIAL, special_entry, ignore,
+ TRACE_STRUCT(
+ TRACE_FIELD(unsigned long, arg1, arg1)
+ TRACE_FIELD(unsigned long, arg2, arg2)
+ TRACE_FIELD(unsigned long, arg3, arg3)
+ ),
+ TP_RAW_FMT("(%08lx) (%08lx) (%08lx)")
+);
+
+/*
+ * Stack-trace entry:
+ */
+
+/* #define FTRACE_STACK_ENTRIES 8 */
+
+TRACE_EVENT_FORMAT(kernel_stack, TRACE_STACK, stack_entry, ignore,
+ TRACE_STRUCT(
+ TRACE_FIELD(unsigned long, caller[0], stack0)
+ TRACE_FIELD(unsigned long, caller[1], stack1)
+ TRACE_FIELD(unsigned long, caller[2], stack2)
+ TRACE_FIELD(unsigned long, caller[3], stack3)
+ TRACE_FIELD(unsigned long, caller[4], stack4)
+ TRACE_FIELD(unsigned long, caller[5], stack5)
+ TRACE_FIELD(unsigned long, caller[6], stack6)
+ TRACE_FIELD(unsigned long, caller[7], stack7)
+ ),
+ TP_RAW_FMT("\t=> (%08lx)\n\t=> (%08lx)\n\t=> (%08lx)\n\t=> (%08lx)\n"
+ "\t=> (%08lx)\n\t=> (%08lx)\n\t=> (%08lx)\n\t=> (%08lx)\n")
+);
+
+TRACE_EVENT_FORMAT(user_stack, TRACE_USER_STACK, userstack_entry, ignore,
+ TRACE_STRUCT(
+ TRACE_FIELD(unsigned long, caller[0], stack0)
+ TRACE_FIELD(unsigned long, caller[1], stack1)
+ TRACE_FIELD(unsigned long, caller[2], stack2)
+ TRACE_FIELD(unsigned long, caller[3], stack3)
+ TRACE_FIELD(unsigned long, caller[4], stack4)
+ TRACE_FIELD(unsigned long, caller[5], stack5)
+ TRACE_FIELD(unsigned long, caller[6], stack6)
+ TRACE_FIELD(unsigned long, caller[7], stack7)
+ ),
+ TP_RAW_FMT("\t=> (%08lx)\n\t=> (%08lx)\n\t=> (%08lx)\n\t=> (%08lx)\n"
+ "\t=> (%08lx)\n\t=> (%08lx)\n\t=> (%08lx)\n\t=> (%08lx)\n")
+);
+
+TRACE_EVENT_FORMAT(bprint, TRACE_BPRINT, bprint_entry, ignore,
+ TRACE_STRUCT(
+ TRACE_FIELD(unsigned long, ip, ip)
+ TRACE_FIELD(char *, fmt, fmt)
+ TRACE_FIELD_ZERO_CHAR(buf)
+ ),
+ TP_RAW_FMT("%08lx (%d) fmt:%p %s")
+);
+
+TRACE_EVENT_FORMAT(print, TRACE_PRINT, print_entry, ignore,
+ TRACE_STRUCT(
+ TRACE_FIELD(unsigned long, ip, ip)
+ TRACE_FIELD_ZERO_CHAR(buf)
+ ),
+ TP_RAW_FMT("%08lx (%d) fmt:%p %s")
+);
+
+TRACE_EVENT_FORMAT(branch, TRACE_BRANCH, trace_branch, ignore,
+ TRACE_STRUCT(
+ TRACE_FIELD(unsigned int, line, line)
+ TRACE_FIELD_SPECIAL(char func[TRACE_FUNC_SIZE+1], func, func)
+ TRACE_FIELD_SPECIAL(char file[TRACE_FUNC_SIZE+1], file, file)
+ TRACE_FIELD(char, correct, correct)
+ ),
+ TP_RAW_FMT("%u:%s:%s (%u)")
+);
+
+TRACE_EVENT_FORMAT(hw_branch, TRACE_HW_BRANCHES, hw_branch_entry, ignore,
+ TRACE_STRUCT(
+ TRACE_FIELD(u64, from, from)
+ TRACE_FIELD(u64, to, to)
+ ),
+ TP_RAW_FMT("from: %llx to: %llx")
+);
+
+TRACE_EVENT_FORMAT(power, TRACE_POWER, trace_power, ignore,
+ TRACE_STRUCT(
+ TRACE_FIELD(ktime_t, state_data.stamp, stamp)
+ TRACE_FIELD(ktime_t, state_data.end, end)
+ TRACE_FIELD(int, state_data.type, type)
+ TRACE_FIELD(int, state_data.state, state)
+ ),
+ TP_RAW_FMT("%llx->%llx type:%u state:%u")
+);
+
+TRACE_EVENT_FORMAT(kmem_alloc, TRACE_KMEM_ALLOC, kmemtrace_alloc_entry, ignore,
+ TRACE_STRUCT(
+ TRACE_FIELD(enum kmemtrace_type_id, type_id, type_id)
+ TRACE_FIELD(unsigned long, call_site, call_site)
+ TRACE_FIELD(const void *, ptr, ptr)
+ TRACE_FIELD(size_t, bytes_req, bytes_req)
+ TRACE_FIELD(size_t, bytes_alloc, bytes_alloc)
+ TRACE_FIELD(gfp_t, gfp_flags, gfp_flags)
+ TRACE_FIELD(int, node, node)
+ ),
+ TP_RAW_FMT("type:%u call_site:%lx ptr:%p req:%lu alloc:%lu"
+ " flags:%x node:%d")
+);
+
+TRACE_EVENT_FORMAT(kmem_free, TRACE_KMEM_FREE, kmemtrace_free_entry, ignore,
+ TRACE_STRUCT(
+ TRACE_FIELD(enum kmemtrace_type_id, type_id, type_id)
+ TRACE_FIELD(unsigned long, call_site, call_site)
+ TRACE_FIELD(const void *, ptr, ptr)
+ ),
+ TP_RAW_FMT("type:%u call_site:%lx ptr:%p")
+);
+
+#undef TRACE_SYSTEM
diff --git a/kernel/trace/trace_events.c b/kernel/trace/trace_events.c
new file mode 100644
index 000000000000..576f4fa2af0d
--- /dev/null
+++ b/kernel/trace/trace_events.c
@@ -0,0 +1,828 @@
+/*
+ * event tracer
+ *
+ * Copyright (C) 2008 Red Hat Inc, Steven Rostedt <srostedt@redhat.com>
+ *
+ * - Added format output of fields of the trace point.
+ * This was based off of work by Tom Zanussi <tzanussi@gmail.com>.
+ *
+ */
+
+#include <linux/debugfs.h>
+#include <linux/uaccess.h>
+#include <linux/module.h>
+#include <linux/ctype.h>
+
+#include "trace_output.h"
+
+#define TRACE_SYSTEM "TRACE_SYSTEM"
+
+static DEFINE_MUTEX(event_mutex);
+
+int trace_define_field(struct ftrace_event_call *call, char *type,
+ char *name, int offset, int size)
+{
+ struct ftrace_event_field *field;
+
+ field = kzalloc(sizeof(*field), GFP_KERNEL);
+ if (!field)
+ goto err;
+
+ field->name = kstrdup(name, GFP_KERNEL);
+ if (!field->name)
+ goto err;
+
+ field->type = kstrdup(type, GFP_KERNEL);
+ if (!field->type)
+ goto err;
+
+ field->offset = offset;
+ field->size = size;
+ list_add(&field->link, &call->fields);
+
+ return 0;
+
+err:
+ if (field) {
+ kfree(field->name);
+ kfree(field->type);
+ }
+ kfree(field);
+
+ return -ENOMEM;
+}
+
+static void ftrace_clear_events(void)
+{
+ struct ftrace_event_call *call = (void *)__start_ftrace_events;
+
+
+ while ((unsigned long)call < (unsigned long)__stop_ftrace_events) {
+
+ if (call->enabled) {
+ call->enabled = 0;
+ call->unregfunc();
+ }
+ call++;
+ }
+}
+
+static void ftrace_event_enable_disable(struct ftrace_event_call *call,
+ int enable)
+{
+
+ switch (enable) {
+ case 0:
+ if (call->enabled) {
+ call->enabled = 0;
+ call->unregfunc();
+ }
+ break;
+ case 1:
+ if (!call->enabled) {
+ call->enabled = 1;
+ call->regfunc();
+ }
+ break;
+ }
+}
+
+static int ftrace_set_clr_event(char *buf, int set)
+{
+ struct ftrace_event_call *call = __start_ftrace_events;
+ char *event = NULL, *sub = NULL, *match;
+ int ret = -EINVAL;
+
+ /*
+ * The buf format can be <subsystem>:<event-name>
+ * *:<event-name> means any event by that name.
+ * :<event-name> is the same.
+ *
+ * <subsystem>:* means all events in that subsystem
+ * <subsystem>: means the same.
+ *
+ * <name> (no ':') means all events in a subsystem with
+ * the name <name> or any event that matches <name>
+ */
+
+ match = strsep(&buf, ":");
+ if (buf) {
+ sub = match;
+ event = buf;
+ match = NULL;
+
+ if (!strlen(sub) || strcmp(sub, "*") == 0)
+ sub = NULL;
+ if (!strlen(event) || strcmp(event, "*") == 0)
+ event = NULL;
+ }
+
+ mutex_lock(&event_mutex);
+ for_each_event(call) {
+
+ if (!call->name || !call->regfunc)
+ continue;
+
+ if (match &&
+ strcmp(match, call->name) != 0 &&
+ strcmp(match, call->system) != 0)
+ continue;
+
+ if (sub && strcmp(sub, call->system) != 0)
+ continue;
+
+ if (event && strcmp(event, call->name) != 0)
+ continue;
+
+ ftrace_event_enable_disable(call, set);
+
+ ret = 0;
+ }
+ mutex_unlock(&event_mutex);
+
+ return ret;
+}
+
+/* 128 should be much more than enough */
+#define EVENT_BUF_SIZE 127
+
+static ssize_t
+ftrace_event_write(struct file *file, const char __user *ubuf,
+ size_t cnt, loff_t *ppos)
+{
+ size_t read = 0;
+ int i, set = 1;
+ ssize_t ret;
+ char *buf;
+ char ch;
+
+ if (!cnt || cnt < 0)
+ return 0;
+
+ ret = tracing_update_buffers();
+ if (ret < 0)
+ return ret;
+
+ ret = get_user(ch, ubuf++);
+ if (ret)
+ return ret;
+ read++;
+ cnt--;
+
+ /* skip white space */
+ while (cnt && isspace(ch)) {
+ ret = get_user(ch, ubuf++);
+ if (ret)
+ return ret;
+ read++;
+ cnt--;
+ }
+
+ /* Only white space found? */
+ if (isspace(ch)) {
+ file->f_pos += read;
+ ret = read;
+ return ret;
+ }
+
+ buf = kmalloc(EVENT_BUF_SIZE+1, GFP_KERNEL);
+ if (!buf)
+ return -ENOMEM;
+
+ if (cnt > EVENT_BUF_SIZE)
+ cnt = EVENT_BUF_SIZE;
+
+ i = 0;
+ while (cnt && !isspace(ch)) {
+ if (!i && ch == '!')
+ set = 0;
+ else
+ buf[i++] = ch;
+
+ ret = get_user(ch, ubuf++);
+ if (ret)
+ goto out_free;
+ read++;
+ cnt--;
+ }
+ buf[i] = 0;
+
+ file->f_pos += read;
+
+ ret = ftrace_set_clr_event(buf, set);
+ if (ret)
+ goto out_free;
+
+ ret = read;
+
+ out_free:
+ kfree(buf);
+
+ return ret;
+}
+
+static void *
+t_next(struct seq_file *m, void *v, loff_t *pos)
+{
+ struct ftrace_event_call *call = m->private;
+ struct ftrace_event_call *next = call;
+
+ (*pos)++;
+
+ for (;;) {
+ if ((unsigned long)call >= (unsigned long)__stop_ftrace_events)
+ return NULL;
+
+ /*
+ * The ftrace subsystem is for showing formats only.
+ * They can not be enabled or disabled via the event files.
+ */
+ if (call->regfunc)
+ break;
+
+ call++;
+ next = call;
+ }
+
+ m->private = ++next;
+
+ return call;
+}
+
+static void *t_start(struct seq_file *m, loff_t *pos)
+{
+ return t_next(m, NULL, pos);
+}
+
+static void *
+s_next(struct seq_file *m, void *v, loff_t *pos)
+{
+ struct ftrace_event_call *call = m->private;
+ struct ftrace_event_call *next;
+
+ (*pos)++;
+
+ retry:
+ if ((unsigned long)call >= (unsigned long)__stop_ftrace_events)
+ return NULL;
+
+ if (!call->enabled) {
+ call++;
+ goto retry;
+ }
+
+ next = call;
+ m->private = ++next;
+
+ return call;
+}
+
+static void *s_start(struct seq_file *m, loff_t *pos)
+{
+ return s_next(m, NULL, pos);
+}
+
+static int t_show(struct seq_file *m, void *v)
+{
+ struct ftrace_event_call *call = v;
+
+ if (strcmp(call->system, TRACE_SYSTEM) != 0)
+ seq_printf(m, "%s:", call->system);
+ seq_printf(m, "%s\n", call->name);
+
+ return 0;
+}
+
+static void t_stop(struct seq_file *m, void *p)
+{
+}
+
+static int
+ftrace_event_seq_open(struct inode *inode, struct file *file)
+{
+ int ret;
+ const struct seq_operations *seq_ops;
+
+ if ((file->f_mode & FMODE_WRITE) &&
+ !(file->f_flags & O_APPEND))
+ ftrace_clear_events();
+
+ seq_ops = inode->i_private;
+ ret = seq_open(file, seq_ops);
+ if (!ret) {
+ struct seq_file *m = file->private_data;
+
+ m->private = __start_ftrace_events;
+ }
+ return ret;
+}
+
+static ssize_t
+event_enable_read(struct file *filp, char __user *ubuf, size_t cnt,
+ loff_t *ppos)
+{
+ struct ftrace_event_call *call = filp->private_data;
+ char *buf;
+
+ if (call->enabled)
+ buf = "1\n";
+ else
+ buf = "0\n";
+
+ return simple_read_from_buffer(ubuf, cnt, ppos, buf, 2);
+}
+
+static ssize_t
+event_enable_write(struct file *filp, const char __user *ubuf, size_t cnt,
+ loff_t *ppos)
+{
+ struct ftrace_event_call *call = filp->private_data;
+ char buf[64];
+ unsigned long val;
+ int ret;
+
+ if (cnt >= sizeof(buf))
+ return -EINVAL;
+
+ if (copy_from_user(&buf, ubuf, cnt))
+ return -EFAULT;
+
+ buf[cnt] = 0;
+
+ ret = strict_strtoul(buf, 10, &val);
+ if (ret < 0)
+ return ret;
+
+ ret = tracing_update_buffers();
+ if (ret < 0)
+ return ret;
+
+ switch (val) {
+ case 0:
+ case 1:
+ mutex_lock(&event_mutex);
+ ftrace_event_enable_disable(call, val);
+ mutex_unlock(&event_mutex);
+ break;
+
+ default:
+ return -EINVAL;
+ }
+
+ *ppos += cnt;
+
+ return cnt;
+}
+
+#undef FIELD
+#define FIELD(type, name) \
+ #type, "common_" #name, offsetof(typeof(field), name), \
+ sizeof(field.name)
+
+static int trace_write_header(struct trace_seq *s)
+{
+ struct trace_entry field;
+
+ /* struct trace_entry */
+ return trace_seq_printf(s,
+ "\tfield:%s %s;\toffset:%zu;\tsize:%zu;\n"
+ "\tfield:%s %s;\toffset:%zu;\tsize:%zu;\n"
+ "\tfield:%s %s;\toffset:%zu;\tsize:%zu;\n"
+ "\tfield:%s %s;\toffset:%zu;\tsize:%zu;\n"
+ "\tfield:%s %s;\toffset:%zu;\tsize:%zu;\n"
+ "\n",
+ FIELD(unsigned char, type),
+ FIELD(unsigned char, flags),
+ FIELD(unsigned char, preempt_count),
+ FIELD(int, pid),
+ FIELD(int, tgid));
+}
+
+static ssize_t
+event_format_read(struct file *filp, char __user *ubuf, size_t cnt,
+ loff_t *ppos)
+{
+ struct ftrace_event_call *call = filp->private_data;
+ struct trace_seq *s;
+ char *buf;
+ int r;
+
+ if (*ppos)
+ return 0;
+
+ s = kmalloc(sizeof(*s), GFP_KERNEL);
+ if (!s)
+ return -ENOMEM;
+
+ trace_seq_init(s);
+
+ /* If any of the first writes fail, so will the show_format. */
+
+ trace_seq_printf(s, "name: %s\n", call->name);
+ trace_seq_printf(s, "ID: %d\n", call->id);
+ trace_seq_printf(s, "format:\n");
+ trace_write_header(s);
+
+ r = call->show_format(s);
+ if (!r) {
+ /*
+ * ug! The format output is bigger than a PAGE!!
+ */
+ buf = "FORMAT TOO BIG\n";
+ r = simple_read_from_buffer(ubuf, cnt, ppos,
+ buf, strlen(buf));
+ goto out;
+ }
+
+ r = simple_read_from_buffer(ubuf, cnt, ppos,
+ s->buffer, s->len);
+ out:
+ kfree(s);
+ return r;
+}
+
+static ssize_t
+event_id_read(struct file *filp, char __user *ubuf, size_t cnt, loff_t *ppos)
+{
+ struct ftrace_event_call *call = filp->private_data;
+ struct trace_seq *s;
+ int r;
+
+ if (*ppos)
+ return 0;
+
+ s = kmalloc(sizeof(*s), GFP_KERNEL);
+ if (!s)
+ return -ENOMEM;
+
+ trace_seq_init(s);
+ trace_seq_printf(s, "%d\n", call->id);
+
+ r = simple_read_from_buffer(ubuf, cnt, ppos,
+ s->buffer, s->len);
+ kfree(s);
+ return r;
+}
+
+static ssize_t
+event_filter_read(struct file *filp, char __user *ubuf, size_t cnt,
+ loff_t *ppos)
+{
+ struct ftrace_event_call *call = filp->private_data;
+ struct trace_seq *s;
+ int r;
+
+ if (*ppos)
+ return 0;
+
+ s = kmalloc(sizeof(*s), GFP_KERNEL);
+ if (!s)
+ return -ENOMEM;
+
+ trace_seq_init(s);
+
+ filter_print_preds(call->preds, s);
+ r = simple_read_from_buffer(ubuf, cnt, ppos, s->buffer, s->len);
+
+ kfree(s);
+
+ return r;
+}
+
+static ssize_t
+event_filter_write(struct file *filp, const char __user *ubuf, size_t cnt,
+ loff_t *ppos)
+{
+ struct ftrace_event_call *call = filp->private_data;
+ char buf[64], *pbuf = buf;
+ struct filter_pred *pred;
+ int err;
+
+ if (cnt >= sizeof(buf))
+ return -EINVAL;
+
+ if (copy_from_user(&buf, ubuf, cnt))
+ return -EFAULT;
+ buf[cnt] = '\0';
+
+ pred = kzalloc(sizeof(*pred), GFP_KERNEL);
+ if (!pred)
+ return -ENOMEM;
+
+ err = filter_parse(&pbuf, pred);
+ if (err < 0) {
+ filter_free_pred(pred);
+ return err;
+ }
+
+ if (pred->clear) {
+ filter_free_preds(call);
+ filter_free_pred(pred);
+ return cnt;
+ }
+
+ err = filter_add_pred(call, pred);
+ if (err < 0) {
+ filter_free_pred(pred);
+ return err;
+ }
+
+ *ppos += cnt;
+
+ return cnt;
+}
+
+static ssize_t
+subsystem_filter_read(struct file *filp, char __user *ubuf, size_t cnt,
+ loff_t *ppos)
+{
+ struct event_subsystem *system = filp->private_data;
+ struct trace_seq *s;
+ int r;
+
+ if (*ppos)
+ return 0;
+
+ s = kmalloc(sizeof(*s), GFP_KERNEL);
+ if (!s)
+ return -ENOMEM;
+
+ trace_seq_init(s);
+
+ filter_print_preds(system->preds, s);
+ r = simple_read_from_buffer(ubuf, cnt, ppos, s->buffer, s->len);
+
+ kfree(s);
+
+ return r;
+}
+
+static ssize_t
+subsystem_filter_write(struct file *filp, const char __user *ubuf, size_t cnt,
+ loff_t *ppos)
+{
+ struct event_subsystem *system = filp->private_data;
+ char buf[64], *pbuf = buf;
+ struct filter_pred *pred;
+ int err;
+
+ if (cnt >= sizeof(buf))
+ return -EINVAL;
+
+ if (copy_from_user(&buf, ubuf, cnt))
+ return -EFAULT;
+ buf[cnt] = '\0';
+
+ pred = kzalloc(sizeof(*pred), GFP_KERNEL);
+ if (!pred)
+ return -ENOMEM;
+
+ err = filter_parse(&pbuf, pred);
+ if (err < 0) {
+ filter_free_pred(pred);
+ return err;
+ }
+
+ if (pred->clear) {
+ filter_free_subsystem_preds(system);
+ filter_free_pred(pred);
+ return cnt;
+ }
+
+ err = filter_add_subsystem_pred(system, pred);
+ if (err < 0) {
+ filter_free_subsystem_preds(system);
+ filter_free_pred(pred);
+ return err;
+ }
+
+ *ppos += cnt;
+
+ return cnt;
+}
+
+static const struct seq_operations show_event_seq_ops = {
+ .start = t_start,
+ .next = t_next,
+ .show = t_show,
+ .stop = t_stop,
+};
+
+static const struct seq_operations show_set_event_seq_ops = {
+ .start = s_start,
+ .next = s_next,
+ .show = t_show,
+ .stop = t_stop,
+};
+
+static const struct file_operations ftrace_avail_fops = {
+ .open = ftrace_event_seq_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = seq_release,
+};
+
+static const struct file_operations ftrace_set_event_fops = {
+ .open = ftrace_event_seq_open,
+ .read = seq_read,
+ .write = ftrace_event_write,
+ .llseek = seq_lseek,
+ .release = seq_release,
+};
+
+static const struct file_operations ftrace_enable_fops = {
+ .open = tracing_open_generic,
+ .read = event_enable_read,
+ .write = event_enable_write,
+};
+
+static const struct file_operations ftrace_event_format_fops = {
+ .open = tracing_open_generic,
+ .read = event_format_read,
+};
+
+static const struct file_operations ftrace_event_id_fops = {
+ .open = tracing_open_generic,
+ .read = event_id_read,
+};
+
+static const struct file_operations ftrace_event_filter_fops = {
+ .open = tracing_open_generic,
+ .read = event_filter_read,
+ .write = event_filter_write,
+};
+
+static const struct file_operations ftrace_subsystem_filter_fops = {
+ .open = tracing_open_generic,
+ .read = subsystem_filter_read,
+ .write = subsystem_filter_write,
+};
+
+static struct dentry *event_trace_events_dir(void)
+{
+ static struct dentry *d_tracer;
+ static struct dentry *d_events;
+
+ if (d_events)
+ return d_events;
+
+ d_tracer = tracing_init_dentry();
+ if (!d_tracer)
+ return NULL;
+
+ d_events = debugfs_create_dir("events", d_tracer);
+ if (!d_events)
+ pr_warning("Could not create debugfs "
+ "'events' directory\n");
+
+ return d_events;
+}
+
+static LIST_HEAD(event_subsystems);
+
+static struct dentry *
+event_subsystem_dir(const char *name, struct dentry *d_events)
+{
+ struct event_subsystem *system;
+
+ /* First see if we did not already create this dir */
+ list_for_each_entry(system, &event_subsystems, list) {
+ if (strcmp(system->name, name) == 0)
+ return system->entry;
+ }
+
+ /* need to create new entry */
+ system = kmalloc(sizeof(*system), GFP_KERNEL);
+ if (!system) {
+ pr_warning("No memory to create event subsystem %s\n",
+ name);
+ return d_events;
+ }
+
+ system->entry = debugfs_create_dir(name, d_events);
+ if (!system->entry) {
+ pr_warning("Could not create event subsystem %s\n",
+ name);
+ kfree(system);
+ return d_events;
+ }
+
+ system->name = name;
+ list_add(&system->list, &event_subsystems);
+
+ system->preds = NULL;
+
+ return system->entry;
+}
+
+static int
+event_create_dir(struct ftrace_event_call *call, struct dentry *d_events)
+{
+ struct dentry *entry;
+ int ret;
+
+ /*
+ * If the trace point header did not define TRACE_SYSTEM
+ * then the system would be called "TRACE_SYSTEM".
+ */
+ if (strcmp(call->system, "TRACE_SYSTEM") != 0)
+ d_events = event_subsystem_dir(call->system, d_events);
+
+ if (call->raw_init) {
+ ret = call->raw_init();
+ if (ret < 0) {
+ pr_warning("Could not initialize trace point"
+ " events/%s\n", call->name);
+ return ret;
+ }
+ }
+
+ call->dir = debugfs_create_dir(call->name, d_events);
+ if (!call->dir) {
+ pr_warning("Could not create debugfs "
+ "'%s' directory\n", call->name);
+ return -1;
+ }
+
+ if (call->regfunc) {
+ entry = debugfs_create_file("enable", 0644, call->dir, call,
+ &ftrace_enable_fops);
+ if (!entry)
+ pr_warning("Could not create debugfs "
+ "'%s/enable' entry\n", call->name);
+ }
+
+ if (call->id) {
+ entry = debugfs_create_file("id", 0444, call->dir, call,
+ &ftrace_event_id_fops);
+ if (!entry)
+ pr_warning("Could not create debugfs '%s/id' entry\n",
+ call->name);
+ }
+
+ if (call->define_fields) {
+ ret = call->define_fields();
+ if (ret < 0) {
+ pr_warning("Could not initialize trace point"
+ " events/%s\n", call->name);
+ return ret;
+ }
+ entry = debugfs_create_file("filter", 0644, call->dir, call,
+ &ftrace_event_filter_fops);
+ if (!entry)
+ pr_warning("Could not create debugfs "
+ "'%s/filter' entry\n", call->name);
+ }
+
+ /* A trace may not want to export its format */
+ if (!call->show_format)
+ return 0;
+
+ entry = debugfs_create_file("format", 0444, call->dir, call,
+ &ftrace_event_format_fops);
+ if (!entry)
+ pr_warning("Could not create debugfs "
+ "'%s/format' entry\n", call->name);
+
+ return 0;
+}
+
+static __init int event_trace_init(void)
+{
+ struct ftrace_event_call *call = __start_ftrace_events;
+ struct dentry *d_tracer;
+ struct dentry *entry;
+ struct dentry *d_events;
+
+ d_tracer = tracing_init_dentry();
+ if (!d_tracer)
+ return 0;
+
+ entry = debugfs_create_file("available_events", 0444, d_tracer,
+ (void *)&show_event_seq_ops,
+ &ftrace_avail_fops);
+ if (!entry)
+ pr_warning("Could not create debugfs "
+ "'available_events' entry\n");
+
+ entry = debugfs_create_file("set_event", 0644, d_tracer,
+ (void *)&show_set_event_seq_ops,
+ &ftrace_set_event_fops);
+ if (!entry)
+ pr_warning("Could not create debugfs "
+ "'set_event' entry\n");
+
+ d_events = event_trace_events_dir();
+ if (!d_events)
+ return 0;
+
+ for_each_event(call) {
+ /* The linker may leave blanks */
+ if (!call->name)
+ continue;
+ event_create_dir(call, d_events);
+ }
+
+ return 0;
+}
+fs_initcall(event_trace_init);
diff --git a/kernel/trace/trace_events_filter.c b/kernel/trace/trace_events_filter.c
new file mode 100644
index 000000000000..e03cbf1e38f3
--- /dev/null
+++ b/kernel/trace/trace_events_filter.c
@@ -0,0 +1,433 @@
+/*
+ * trace_events_filter - generic event filtering
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ *
+ * Copyright (C) 2009 Tom Zanussi <tzanussi@gmail.com>
+ */
+
+#include <linux/debugfs.h>
+#include <linux/uaccess.h>
+#include <linux/module.h>
+#include <linux/ctype.h>
+
+#include "trace.h"
+#include "trace_output.h"
+
+static int filter_pred_64(struct filter_pred *pred, void *event)
+{
+ u64 *addr = (u64 *)(event + pred->offset);
+ u64 val = (u64)pred->val;
+ int match;
+
+ match = (val == *addr) ^ pred->not;
+
+ return match;
+}
+
+static int filter_pred_32(struct filter_pred *pred, void *event)
+{
+ u32 *addr = (u32 *)(event + pred->offset);
+ u32 val = (u32)pred->val;
+ int match;
+
+ match = (val == *addr) ^ pred->not;
+
+ return match;
+}
+
+static int filter_pred_16(struct filter_pred *pred, void *event)
+{
+ u16 *addr = (u16 *)(event + pred->offset);
+ u16 val = (u16)pred->val;
+ int match;
+
+ match = (val == *addr) ^ pred->not;
+
+ return match;
+}
+
+static int filter_pred_8(struct filter_pred *pred, void *event)
+{
+ u8 *addr = (u8 *)(event + pred->offset);
+ u8 val = (u8)pred->val;
+ int match;
+
+ match = (val == *addr) ^ pred->not;
+
+ return match;
+}
+
+static int filter_pred_string(struct filter_pred *pred, void *event)
+{
+ char *addr = (char *)(event + pred->offset);
+ int cmp, match;
+
+ cmp = strncmp(addr, pred->str_val, pred->str_len);
+
+ match = (!cmp) ^ pred->not;
+
+ return match;
+}
+
+/* return 1 if event matches, 0 otherwise (discard) */
+int filter_match_preds(struct ftrace_event_call *call, void *rec)
+{
+ int i, matched, and_failed = 0;
+ struct filter_pred *pred;
+
+ for (i = 0; i < MAX_FILTER_PRED; i++) {
+ if (call->preds[i]) {
+ pred = call->preds[i];
+ if (and_failed && !pred->or)
+ continue;
+ matched = pred->fn(pred, rec);
+ if (!matched && !pred->or) {
+ and_failed = 1;
+ continue;
+ } else if (matched && pred->or)
+ return 1;
+ } else
+ break;
+ }
+
+ if (and_failed)
+ return 0;
+
+ return 1;
+}
+
+void filter_print_preds(struct filter_pred **preds, struct trace_seq *s)
+{
+ char *field_name;
+ struct filter_pred *pred;
+ int i;
+
+ if (!preds) {
+ trace_seq_printf(s, "none\n");
+ return;
+ }
+
+ for (i = 0; i < MAX_FILTER_PRED; i++) {
+ if (preds[i]) {
+ pred = preds[i];
+ field_name = pred->field_name;
+ if (i)
+ trace_seq_printf(s, pred->or ? "|| " : "&& ");
+ trace_seq_printf(s, "%s ", field_name);
+ trace_seq_printf(s, pred->not ? "!= " : "== ");
+ if (pred->str_val)
+ trace_seq_printf(s, "%s\n", pred->str_val);
+ else
+ trace_seq_printf(s, "%llu\n", pred->val);
+ } else
+ break;
+ }
+}
+
+static struct ftrace_event_field *
+find_event_field(struct ftrace_event_call *call, char *name)
+{
+ struct ftrace_event_field *field;
+
+ list_for_each_entry(field, &call->fields, link) {
+ if (!strcmp(field->name, name))
+ return field;
+ }
+
+ return NULL;
+}
+
+void filter_free_pred(struct filter_pred *pred)
+{
+ if (!pred)
+ return;
+
+ kfree(pred->field_name);
+ kfree(pred->str_val);
+ kfree(pred);
+}
+
+void filter_free_preds(struct ftrace_event_call *call)
+{
+ int i;
+
+ if (call->preds) {
+ for (i = 0; i < MAX_FILTER_PRED; i++)
+ filter_free_pred(call->preds[i]);
+ kfree(call->preds);
+ call->preds = NULL;
+ }
+}
+
+void filter_free_subsystem_preds(struct event_subsystem *system)
+{
+ struct ftrace_event_call *call = __start_ftrace_events;
+ int i;
+
+ if (system->preds) {
+ for (i = 0; i < MAX_FILTER_PRED; i++)
+ filter_free_pred(system->preds[i]);
+ kfree(system->preds);
+ system->preds = NULL;
+ }
+
+ events_for_each(call) {
+ if (!call->name || !call->regfunc)
+ continue;
+
+ if (!strcmp(call->system, system->name))
+ filter_free_preds(call);
+ }
+}
+
+static int __filter_add_pred(struct ftrace_event_call *call,
+ struct filter_pred *pred)
+{
+ int i;
+
+ if (call->preds && !pred->compound)
+ filter_free_preds(call);
+
+ if (!call->preds) {
+ call->preds = kzalloc(MAX_FILTER_PRED * sizeof(pred),
+ GFP_KERNEL);
+ if (!call->preds)
+ return -ENOMEM;
+ }
+
+ for (i = 0; i < MAX_FILTER_PRED; i++) {
+ if (!call->preds[i]) {
+ call->preds[i] = pred;
+ return 0;
+ }
+ }
+
+ return -ENOSPC;
+}
+
+static int is_string_field(const char *type)
+{
+ if (strchr(type, '[') && strstr(type, "char"))
+ return 1;
+
+ return 0;
+}
+
+int filter_add_pred(struct ftrace_event_call *call, struct filter_pred *pred)
+{
+ struct ftrace_event_field *field;
+
+ field = find_event_field(call, pred->field_name);
+ if (!field)
+ return -EINVAL;
+
+ pred->offset = field->offset;
+
+ if (is_string_field(field->type)) {
+ if (!pred->str_val)
+ return -EINVAL;
+ pred->fn = filter_pred_string;
+ pred->str_len = field->size;
+ return __filter_add_pred(call, pred);
+ } else {
+ if (pred->str_val)
+ return -EINVAL;
+ }
+
+ switch (field->size) {
+ case 8:
+ pred->fn = filter_pred_64;
+ break;
+ case 4:
+ pred->fn = filter_pred_32;
+ break;
+ case 2:
+ pred->fn = filter_pred_16;
+ break;
+ case 1:
+ pred->fn = filter_pred_8;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ return __filter_add_pred(call, pred);
+}
+
+static struct filter_pred *copy_pred(struct filter_pred *pred)
+{
+ struct filter_pred *new_pred = kmalloc(sizeof(*pred), GFP_KERNEL);
+ if (!new_pred)
+ return NULL;
+
+ memcpy(new_pred, pred, sizeof(*pred));
+
+ if (pred->field_name) {
+ new_pred->field_name = kstrdup(pred->field_name, GFP_KERNEL);
+ if (!new_pred->field_name) {
+ kfree(new_pred);
+ return NULL;
+ }
+ }
+
+ if (pred->str_val) {
+ new_pred->str_val = kstrdup(pred->str_val, GFP_KERNEL);
+ if (!new_pred->str_val) {
+ filter_free_pred(new_pred);
+ return NULL;
+ }
+ }
+
+ return new_pred;
+}
+
+int filter_add_subsystem_pred(struct event_subsystem *system,
+ struct filter_pred *pred)
+{
+ struct ftrace_event_call *call = __start_ftrace_events;
+ struct filter_pred *event_pred;
+ int i;
+
+ if (system->preds && !pred->compound)
+ filter_free_subsystem_preds(system);
+
+ if (!system->preds) {
+ system->preds = kzalloc(MAX_FILTER_PRED * sizeof(pred),
+ GFP_KERNEL);
+ if (!system->preds)
+ return -ENOMEM;
+ }
+
+ for (i = 0; i < MAX_FILTER_PRED; i++) {
+ if (!system->preds[i]) {
+ system->preds[i] = pred;
+ break;
+ }
+ }
+
+ if (i == MAX_FILTER_PRED)
+ return -ENOSPC;
+
+ events_for_each(call) {
+ int err;
+
+ if (!call->name || !call->regfunc)
+ continue;
+
+ if (strcmp(call->system, system->name))
+ continue;
+
+ if (!find_event_field(call, pred->field_name))
+ continue;
+
+ event_pred = copy_pred(pred);
+ if (!event_pred)
+ goto oom;
+
+ err = filter_add_pred(call, event_pred);
+ if (err)
+ filter_free_pred(event_pred);
+ if (err == -ENOMEM)
+ goto oom;
+ }
+
+ return 0;
+
+oom:
+ system->preds[i] = NULL;
+ return -ENOMEM;
+}
+
+int filter_parse(char **pbuf, struct filter_pred *pred)
+{
+ char *tmp, *tok, *val_str = NULL;
+ int tok_n = 0;
+
+ /* field ==/!= number, or/and field ==/!= number, number */
+ while ((tok = strsep(pbuf, " \n"))) {
+ if (tok_n == 0) {
+ if (!strcmp(tok, "0")) {
+ pred->clear = 1;
+ return 0;
+ } else if (!strcmp(tok, "&&")) {
+ pred->or = 0;
+ pred->compound = 1;
+ } else if (!strcmp(tok, "||")) {
+ pred->or = 1;
+ pred->compound = 1;
+ } else
+ pred->field_name = tok;
+ tok_n = 1;
+ continue;
+ }
+ if (tok_n == 1) {
+ if (!pred->field_name)
+ pred->field_name = tok;
+ else if (!strcmp(tok, "!="))
+ pred->not = 1;
+ else if (!strcmp(tok, "=="))
+ pred->not = 0;
+ else {
+ pred->field_name = NULL;
+ return -EINVAL;
+ }
+ tok_n = 2;
+ continue;
+ }
+ if (tok_n == 2) {
+ if (pred->compound) {
+ if (!strcmp(tok, "!="))
+ pred->not = 1;
+ else if (!strcmp(tok, "=="))
+ pred->not = 0;
+ else {
+ pred->field_name = NULL;
+ return -EINVAL;
+ }
+ } else {
+ val_str = tok;
+ break; /* done */
+ }
+ tok_n = 3;
+ continue;
+ }
+ if (tok_n == 3) {
+ val_str = tok;
+ break; /* done */
+ }
+ }
+
+ if (!val_str) {
+ pred->field_name = NULL;
+ return -EINVAL;
+ }
+
+ pred->field_name = kstrdup(pred->field_name, GFP_KERNEL);
+ if (!pred->field_name)
+ return -ENOMEM;
+
+ pred->val = simple_strtoull(val_str, &tmp, 0);
+ if (tmp == val_str) {
+ pred->str_val = kstrdup(val_str, GFP_KERNEL);
+ if (!pred->str_val)
+ return -ENOMEM;
+ } else if (*tmp != '\0')
+ return -EINVAL;
+
+ return 0;
+}
+
+
diff --git a/kernel/trace/trace_events_stage_1.h b/kernel/trace/trace_events_stage_1.h
new file mode 100644
index 000000000000..38985f9b379c
--- /dev/null
+++ b/kernel/trace/trace_events_stage_1.h
@@ -0,0 +1,39 @@
+/*
+ * Stage 1 of the trace events.
+ *
+ * Override the macros in <trace/trace_event_types.h> to include the following:
+ *
+ * struct ftrace_raw_<call> {
+ * struct trace_entry ent;
+ * <type> <item>;
+ * <type2> <item2>[<len>];
+ * [...]
+ * };
+ *
+ * The <type> <item> is created by the __field(type, item) macro or
+ * the __array(type2, item2, len) macro.
+ * We simply do "type item;", and that will create the fields
+ * in the structure.
+ */
+
+#undef TRACE_FORMAT
+#define TRACE_FORMAT(call, proto, args, fmt)
+
+#undef __array
+#define __array(type, item, len) type item[len];
+
+#undef __field
+#define __field(type, item) type item;
+
+#undef TP_STRUCT__entry
+#define TP_STRUCT__entry(args...) args
+
+#undef TRACE_EVENT
+#define TRACE_EVENT(name, proto, args, tstruct, assign, print) \
+ struct ftrace_raw_##name { \
+ struct trace_entry ent; \
+ tstruct \
+ }; \
+ static struct ftrace_event_call event_##name
+
+#include <trace/trace_event_types.h>
diff --git a/kernel/trace/trace_events_stage_2.h b/kernel/trace/trace_events_stage_2.h
new file mode 100644
index 000000000000..d363c6672c6c
--- /dev/null
+++ b/kernel/trace/trace_events_stage_2.h
@@ -0,0 +1,176 @@
+/*
+ * Stage 2 of the trace events.
+ *
+ * Override the macros in <trace/trace_event_types.h> to include the following:
+ *
+ * enum print_line_t
+ * ftrace_raw_output_<call>(struct trace_iterator *iter, int flags)
+ * {
+ * struct trace_seq *s = &iter->seq;
+ * struct ftrace_raw_<call> *field; <-- defined in stage 1
+ * struct trace_entry *entry;
+ * int ret;
+ *
+ * entry = iter->ent;
+ *
+ * if (entry->type != event_<call>.id) {
+ * WARN_ON_ONCE(1);
+ * return TRACE_TYPE_UNHANDLED;
+ * }
+ *
+ * field = (typeof(field))entry;
+ *
+ * ret = trace_seq_printf(s, <TP_printk> "\n");
+ * if (!ret)
+ * return TRACE_TYPE_PARTIAL_LINE;
+ *
+ * return TRACE_TYPE_HANDLED;
+ * }
+ *
+ * This is the method used to print the raw event to the trace
+ * output format. Note, this is not needed if the data is read
+ * in binary.
+ */
+
+#undef __entry
+#define __entry field
+
+#undef TP_printk
+#define TP_printk(fmt, args...) fmt "\n", args
+
+#undef TRACE_EVENT
+#define TRACE_EVENT(call, proto, args, tstruct, assign, print) \
+enum print_line_t \
+ftrace_raw_output_##call(struct trace_iterator *iter, int flags) \
+{ \
+ struct trace_seq *s = &iter->seq; \
+ struct ftrace_raw_##call *field; \
+ struct trace_entry *entry; \
+ int ret; \
+ \
+ entry = iter->ent; \
+ \
+ if (entry->type != event_##call.id) { \
+ WARN_ON_ONCE(1); \
+ return TRACE_TYPE_UNHANDLED; \
+ } \
+ \
+ field = (typeof(field))entry; \
+ \
+ ret = trace_seq_printf(s, #call ": " print); \
+ if (!ret) \
+ return TRACE_TYPE_PARTIAL_LINE; \
+ \
+ return TRACE_TYPE_HANDLED; \
+}
+
+#include <trace/trace_event_types.h>
+
+/*
+ * Setup the showing format of trace point.
+ *
+ * int
+ * ftrace_format_##call(struct trace_seq *s)
+ * {
+ * struct ftrace_raw_##call field;
+ * int ret;
+ *
+ * ret = trace_seq_printf(s, #type " " #item ";"
+ * " offset:%u; size:%u;\n",
+ * offsetof(struct ftrace_raw_##call, item),
+ * sizeof(field.type));
+ *
+ * }
+ */
+
+#undef TP_STRUCT__entry
+#define TP_STRUCT__entry(args...) args
+
+#undef __field
+#define __field(type, item) \
+ ret = trace_seq_printf(s, "\tfield:" #type " " #item ";\t" \
+ "offset:%u;\tsize:%u;\n", \
+ (unsigned int)offsetof(typeof(field), item), \
+ (unsigned int)sizeof(field.item)); \
+ if (!ret) \
+ return 0;
+
+#undef __array
+#define __array(type, item, len) \
+ ret = trace_seq_printf(s, "\tfield:" #type " " #item "[" #len "];\t" \
+ "offset:%u;\tsize:%u;\n", \
+ (unsigned int)offsetof(typeof(field), item), \
+ (unsigned int)sizeof(field.item)); \
+ if (!ret) \
+ return 0;
+
+#undef __entry
+#define __entry REC
+
+#undef TP_printk
+#define TP_printk(fmt, args...) "%s, %s\n", #fmt, __stringify(args)
+
+#undef TP_fast_assign
+#define TP_fast_assign(args...) args
+
+#undef TRACE_EVENT
+#define TRACE_EVENT(call, proto, args, tstruct, func, print) \
+static int \
+ftrace_format_##call(struct trace_seq *s) \
+{ \
+ struct ftrace_raw_##call field; \
+ int ret; \
+ \
+ tstruct; \
+ \
+ trace_seq_printf(s, "\nprint fmt: " print); \
+ \
+ return ret; \
+}
+
+#include <trace/trace_event_types.h>
+
+#undef __field
+#define __field(type, item) \
+ ret = trace_define_field(event_call, #type, #item, \
+ offsetof(typeof(field), item), \
+ sizeof(field.item)); \
+ if (ret) \
+ return ret;
+
+#undef __array
+#define __array(type, item, len) \
+ ret = trace_define_field(event_call, #type "[" #len "]", #item, \
+ offsetof(typeof(field), item), \
+ sizeof(field.item)); \
+ if (ret) \
+ return ret;
+
+#define __common_field(type, item) \
+ ret = trace_define_field(event_call, #type, "common_" #item, \
+ offsetof(typeof(field.ent), item), \
+ sizeof(field.ent.item)); \
+ if (ret) \
+ return ret;
+
+#undef TRACE_EVENT
+#define TRACE_EVENT(call, proto, args, tstruct, func, print) \
+int \
+ftrace_define_fields_##call(void) \
+{ \
+ struct ftrace_raw_##call field; \
+ struct ftrace_event_call *event_call = &event_##call; \
+ int ret; \
+ \
+ __common_field(unsigned char, type); \
+ __common_field(unsigned char, flags); \
+ __common_field(unsigned char, preempt_count); \
+ __common_field(int, pid); \
+ __common_field(int, tgid); \
+ \
+ tstruct; \
+ \
+ return ret; \
+}
+
+#include <trace/trace_event_types.h>
diff --git a/kernel/trace/trace_events_stage_3.h b/kernel/trace/trace_events_stage_3.h
new file mode 100644
index 000000000000..9d2fa78cecca
--- /dev/null
+++ b/kernel/trace/trace_events_stage_3.h
@@ -0,0 +1,281 @@
+/*
+ * Stage 3 of the trace events.
+ *
+ * Override the macros in <trace/trace_event_types.h> to include the following:
+ *
+ * static void ftrace_event_<call>(proto)
+ * {
+ * event_trace_printk(_RET_IP_, "<call>: " <fmt>);
+ * }
+ *
+ * static int ftrace_reg_event_<call>(void)
+ * {
+ * int ret;
+ *
+ * ret = register_trace_<call>(ftrace_event_<call>);
+ * if (!ret)
+ * pr_info("event trace: Could not activate trace point "
+ * "probe to <call>");
+ * return ret;
+ * }
+ *
+ * static void ftrace_unreg_event_<call>(void)
+ * {
+ * unregister_trace_<call>(ftrace_event_<call>);
+ * }
+ *
+ * For those macros defined with TRACE_FORMAT:
+ *
+ * static struct ftrace_event_call __used
+ * __attribute__((__aligned__(4)))
+ * __attribute__((section("_ftrace_events"))) event_<call> = {
+ * .name = "<call>",
+ * .regfunc = ftrace_reg_event_<call>,
+ * .unregfunc = ftrace_unreg_event_<call>,
+ * }
+ *
+ *
+ * For those macros defined with TRACE_EVENT:
+ *
+ * static struct ftrace_event_call event_<call>;
+ *
+ * static void ftrace_raw_event_<call>(proto)
+ * {
+ * struct ring_buffer_event *event;
+ * struct ftrace_raw_<call> *entry; <-- defined in stage 1
+ * unsigned long irq_flags;
+ * int pc;
+ *
+ * local_save_flags(irq_flags);
+ * pc = preempt_count();
+ *
+ * event = trace_current_buffer_lock_reserve(event_<call>.id,
+ * sizeof(struct ftrace_raw_<call>),
+ * irq_flags, pc);
+ * if (!event)
+ * return;
+ * entry = ring_buffer_event_data(event);
+ *
+ * <assign>; <-- Here we assign the entries by the __field and
+ * __array macros.
+ *
+ * trace_current_buffer_unlock_commit(event, irq_flags, pc);
+ * }
+ *
+ * static int ftrace_raw_reg_event_<call>(void)
+ * {
+ * int ret;
+ *
+ * ret = register_trace_<call>(ftrace_raw_event_<call>);
+ * if (!ret)
+ * pr_info("event trace: Could not activate trace point "
+ * "probe to <call>");
+ * return ret;
+ * }
+ *
+ * static void ftrace_unreg_event_<call>(void)
+ * {
+ * unregister_trace_<call>(ftrace_raw_event_<call>);
+ * }
+ *
+ * static struct trace_event ftrace_event_type_<call> = {
+ * .trace = ftrace_raw_output_<call>, <-- stage 2
+ * };
+ *
+ * static int ftrace_raw_init_event_<call>(void)
+ * {
+ * int id;
+ *
+ * id = register_ftrace_event(&ftrace_event_type_<call>);
+ * if (!id)
+ * return -ENODEV;
+ * event_<call>.id = id;
+ * return 0;
+ * }
+ *
+ * static struct ftrace_event_call __used
+ * __attribute__((__aligned__(4)))
+ * __attribute__((section("_ftrace_events"))) event_<call> = {
+ * .name = "<call>",
+ * .system = "<system>",
+ * .raw_init = ftrace_raw_init_event_<call>,
+ * .regfunc = ftrace_reg_event_<call>,
+ * .unregfunc = ftrace_unreg_event_<call>,
+ * .show_format = ftrace_format_<call>,
+ * }
+ *
+ */
+
+#undef TP_FMT
+#define TP_FMT(fmt, args...) fmt "\n", ##args
+
+#ifdef CONFIG_EVENT_PROFILE
+#define _TRACE_PROFILE(call, proto, args) \
+static void ftrace_profile_##call(proto) \
+{ \
+ extern void perf_tpcounter_event(int); \
+ perf_tpcounter_event(event_##call.id); \
+} \
+ \
+static int ftrace_profile_enable_##call(struct ftrace_event_call *call) \
+{ \
+ int ret = 0; \
+ \
+ if (!atomic_inc_return(&call->profile_count)) \
+ ret = register_trace_##call(ftrace_profile_##call); \
+ \
+ return ret; \
+} \
+ \
+static void ftrace_profile_disable_##call(struct ftrace_event_call *call) \
+{ \
+ if (atomic_add_negative(-1, &call->profile_count)) \
+ unregister_trace_##call(ftrace_profile_##call); \
+}
+
+#define _TRACE_PROFILE_INIT(call) \
+ .profile_count = ATOMIC_INIT(-1), \
+ .profile_enable = ftrace_profile_enable_##call, \
+ .profile_disable = ftrace_profile_disable_##call,
+
+#else
+#define _TRACE_PROFILE(call, proto, args)
+#define _TRACE_PROFILE_INIT(call)
+#endif
+
+#define _TRACE_FORMAT(call, proto, args, fmt) \
+static void ftrace_event_##call(proto) \
+{ \
+ event_trace_printk(_RET_IP_, #call ": " fmt); \
+} \
+ \
+static int ftrace_reg_event_##call(void) \
+{ \
+ int ret; \
+ \
+ ret = register_trace_##call(ftrace_event_##call); \
+ if (ret) \
+ pr_info("event trace: Could not activate trace point " \
+ "probe to " #call "\n"); \
+ return ret; \
+} \
+ \
+static void ftrace_unreg_event_##call(void) \
+{ \
+ unregister_trace_##call(ftrace_event_##call); \
+} \
+ \
+static struct ftrace_event_call event_##call; \
+ \
+static int ftrace_init_event_##call(void) \
+{ \
+ int id; \
+ \
+ id = register_ftrace_event(NULL); \
+ if (!id) \
+ return -ENODEV; \
+ event_##call.id = id; \
+ return 0; \
+}
+
+#undef TRACE_FORMAT
+#define TRACE_FORMAT(call, proto, args, fmt) \
+_TRACE_FORMAT(call, PARAMS(proto), PARAMS(args), PARAMS(fmt)) \
+_TRACE_PROFILE(call, PARAMS(proto), PARAMS(args)) \
+static struct ftrace_event_call __used \
+__attribute__((__aligned__(4))) \
+__attribute__((section("_ftrace_events"))) event_##call = { \
+ .name = #call, \
+ .system = __stringify(TRACE_SYSTEM), \
+ .raw_init = ftrace_init_event_##call, \
+ .regfunc = ftrace_reg_event_##call, \
+ .unregfunc = ftrace_unreg_event_##call, \
+ _TRACE_PROFILE_INIT(call) \
+}
+
+#undef __entry
+#define __entry entry
+
+#undef TRACE_EVENT
+#define TRACE_EVENT(call, proto, args, tstruct, assign, print) \
+_TRACE_PROFILE(call, PARAMS(proto), PARAMS(args)) \
+ \
+static struct ftrace_event_call event_##call; \
+ \
+static void ftrace_raw_event_##call(proto) \
+{ \
+ struct ftrace_event_call *call = &event_##call; \
+ struct ring_buffer_event *event; \
+ struct ftrace_raw_##call *entry; \
+ unsigned long irq_flags; \
+ int pc; \
+ \
+ local_save_flags(irq_flags); \
+ pc = preempt_count(); \
+ \
+ event = trace_current_buffer_lock_reserve(event_##call.id, \
+ sizeof(struct ftrace_raw_##call), \
+ irq_flags, pc); \
+ if (!event) \
+ return; \
+ entry = ring_buffer_event_data(event); \
+ \
+ assign; \
+ \
+ if (call->preds && !filter_match_preds(call, entry)) \
+ ring_buffer_event_discard(event); \
+ \
+ trace_nowake_buffer_unlock_commit(event, irq_flags, pc); \
+ \
+} \
+ \
+static int ftrace_raw_reg_event_##call(void) \
+{ \
+ int ret; \
+ \
+ ret = register_trace_##call(ftrace_raw_event_##call); \
+ if (ret) \
+ pr_info("event trace: Could not activate trace point " \
+ "probe to " #call "\n"); \
+ return ret; \
+} \
+ \
+static void ftrace_raw_unreg_event_##call(void) \
+{ \
+ unregister_trace_##call(ftrace_raw_event_##call); \
+} \
+ \
+static struct trace_event ftrace_event_type_##call = { \
+ .trace = ftrace_raw_output_##call, \
+}; \
+ \
+static int ftrace_raw_init_event_##call(void) \
+{ \
+ int id; \
+ \
+ id = register_ftrace_event(&ftrace_event_type_##call); \
+ if (!id) \
+ return -ENODEV; \
+ event_##call.id = id; \
+ INIT_LIST_HEAD(&event_##call.fields); \
+ return 0; \
+} \
+ \
+static struct ftrace_event_call __used \
+__attribute__((__aligned__(4))) \
+__attribute__((section("_ftrace_events"))) event_##call = { \
+ .name = #call, \
+ .system = __stringify(TRACE_SYSTEM), \
+ .raw_init = ftrace_raw_init_event_##call, \
+ .regfunc = ftrace_raw_reg_event_##call, \
+ .unregfunc = ftrace_raw_unreg_event_##call, \
+ .show_format = ftrace_format_##call, \
+ .define_fields = ftrace_define_fields_##call, \
+ _TRACE_PROFILE_INIT(call) \
+}
+
+#include <trace/trace_event_types.h>
+
+#undef _TRACE_PROFILE
+#undef _TRACE_PROFILE_INIT
+
diff --git a/kernel/trace/trace_export.c b/kernel/trace/trace_export.c
new file mode 100644
index 000000000000..07a22c33ebf3
--- /dev/null
+++ b/kernel/trace/trace_export.c
@@ -0,0 +1,102 @@
+/*
+ * trace_export.c - export basic ftrace utilities to user space
+ *
+ * Copyright (C) 2009 Steven Rostedt <srostedt@redhat.com>
+ */
+#include <linux/stringify.h>
+#include <linux/kallsyms.h>
+#include <linux/seq_file.h>
+#include <linux/debugfs.h>
+#include <linux/uaccess.h>
+#include <linux/ftrace.h>
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/fs.h>
+
+#include "trace_output.h"
+
+
+#undef TRACE_STRUCT
+#define TRACE_STRUCT(args...) args
+
+#undef TRACE_FIELD
+#define TRACE_FIELD(type, item, assign) \
+ ret = trace_seq_printf(s, "\tfield:" #type " " #item ";\t" \
+ "offset:%u;\tsize:%u;\n", \
+ (unsigned int)offsetof(typeof(field), item), \
+ (unsigned int)sizeof(field.item)); \
+ if (!ret) \
+ return 0;
+
+
+#undef TRACE_FIELD_SPECIAL
+#define TRACE_FIELD_SPECIAL(type_item, item, cmd) \
+ ret = trace_seq_printf(s, "\tfield special:" #type_item ";\t" \
+ "offset:%u;\tsize:%u;\n", \
+ (unsigned int)offsetof(typeof(field), item), \
+ (unsigned int)sizeof(field.item)); \
+ if (!ret) \
+ return 0;
+
+#undef TRACE_FIELD_ZERO_CHAR
+#define TRACE_FIELD_ZERO_CHAR(item) \
+ ret = trace_seq_printf(s, "\tfield:char " #item ";\t" \
+ "offset:%u;\tsize:0;\n", \
+ (unsigned int)offsetof(typeof(field), item)); \
+ if (!ret) \
+ return 0;
+
+
+#undef TP_RAW_FMT
+#define TP_RAW_FMT(args...) args
+
+#undef TRACE_EVENT_FORMAT
+#define TRACE_EVENT_FORMAT(call, proto, args, fmt, tstruct, tpfmt) \
+static int \
+ftrace_format_##call(struct trace_seq *s) \
+{ \
+ struct args field; \
+ int ret; \
+ \
+ tstruct; \
+ \
+ trace_seq_printf(s, "\nprint fmt: \"%s\"\n", tpfmt); \
+ \
+ return ret; \
+}
+
+#include "trace_event_types.h"
+
+#undef TRACE_ZERO_CHAR
+#define TRACE_ZERO_CHAR(arg)
+
+#undef TRACE_FIELD
+#define TRACE_FIELD(type, item, assign)\
+ entry->item = assign;
+
+#undef TRACE_FIELD
+#define TRACE_FIELD(type, item, assign)\
+ entry->item = assign;
+
+#undef TP_CMD
+#define TP_CMD(cmd...) cmd
+
+#undef TRACE_ENTRY
+#define TRACE_ENTRY entry
+
+#undef TRACE_FIELD_SPECIAL
+#define TRACE_FIELD_SPECIAL(type_item, item, cmd) \
+ cmd;
+
+#undef TRACE_EVENT_FORMAT
+#define TRACE_EVENT_FORMAT(call, proto, args, fmt, tstruct, tpfmt) \
+ \
+static struct ftrace_event_call __used \
+__attribute__((__aligned__(4))) \
+__attribute__((section("_ftrace_events"))) event_##call = { \
+ .name = #call, \
+ .id = proto, \
+ .system = __stringify(TRACE_SYSTEM), \
+ .show_format = ftrace_format_##call, \
+}
+#include "trace_event_types.h"
diff --git a/kernel/trace/trace_functions.c b/kernel/trace/trace_functions.c
index 9236d7e25a16..c9a0b7df44ff 100644
--- a/kernel/trace/trace_functions.c
+++ b/kernel/trace/trace_functions.c
@@ -9,6 +9,7 @@
* Copyright (C) 2004-2006 Ingo Molnar
* Copyright (C) 2004 William Lee Irwin III
*/
+#include <linux/ring_buffer.h>
#include <linux/debugfs.h>
#include <linux/uaccess.h>
#include <linux/ftrace.h>
@@ -16,52 +17,388 @@
#include "trace.h"
-static void start_function_trace(struct trace_array *tr)
+/* function tracing enabled */
+static int ftrace_function_enabled;
+
+static struct trace_array *func_trace;
+
+static void tracing_start_function_trace(void);
+static void tracing_stop_function_trace(void);
+
+static int function_trace_init(struct trace_array *tr)
{
+ func_trace = tr;
tr->cpu = get_cpu();
- tracing_reset_online_cpus(tr);
put_cpu();
tracing_start_cmdline_record();
tracing_start_function_trace();
+ return 0;
}
-static void stop_function_trace(struct trace_array *tr)
+static void function_trace_reset(struct trace_array *tr)
{
tracing_stop_function_trace();
tracing_stop_cmdline_record();
}
-static int function_trace_init(struct trace_array *tr)
+static void function_trace_start(struct trace_array *tr)
{
- start_function_trace(tr);
- return 0;
+ tracing_reset_online_cpus(tr);
}
-static void function_trace_reset(struct trace_array *tr)
+static void
+function_trace_call_preempt_only(unsigned long ip, unsigned long parent_ip)
+{
+ struct trace_array *tr = func_trace;
+ struct trace_array_cpu *data;
+ unsigned long flags;
+ long disabled;
+ int cpu, resched;
+ int pc;
+
+ if (unlikely(!ftrace_function_enabled))
+ return;
+
+ pc = preempt_count();
+ resched = ftrace_preempt_disable();
+ local_save_flags(flags);
+ cpu = raw_smp_processor_id();
+ data = tr->data[cpu];
+ disabled = atomic_inc_return(&data->disabled);
+
+ if (likely(disabled == 1))
+ trace_function(tr, ip, parent_ip, flags, pc);
+
+ atomic_dec(&data->disabled);
+ ftrace_preempt_enable(resched);
+}
+
+static void
+function_trace_call(unsigned long ip, unsigned long parent_ip)
{
- stop_function_trace(tr);
+ struct trace_array *tr = func_trace;
+ struct trace_array_cpu *data;
+ unsigned long flags;
+ long disabled;
+ int cpu;
+ int pc;
+
+ if (unlikely(!ftrace_function_enabled))
+ return;
+
+ /*
+ * Need to use raw, since this must be called before the
+ * recursive protection is performed.
+ */
+ local_irq_save(flags);
+ cpu = raw_smp_processor_id();
+ data = tr->data[cpu];
+ disabled = atomic_inc_return(&data->disabled);
+
+ if (likely(disabled == 1)) {
+ pc = preempt_count();
+ trace_function(tr, ip, parent_ip, flags, pc);
+ }
+
+ atomic_dec(&data->disabled);
+ local_irq_restore(flags);
}
-static void function_trace_start(struct trace_array *tr)
+static void
+function_stack_trace_call(unsigned long ip, unsigned long parent_ip)
{
- tracing_reset_online_cpus(tr);
+ struct trace_array *tr = func_trace;
+ struct trace_array_cpu *data;
+ unsigned long flags;
+ long disabled;
+ int cpu;
+ int pc;
+
+ if (unlikely(!ftrace_function_enabled))
+ return;
+
+ /*
+ * Need to use raw, since this must be called before the
+ * recursive protection is performed.
+ */
+ local_irq_save(flags);
+ cpu = raw_smp_processor_id();
+ data = tr->data[cpu];
+ disabled = atomic_inc_return(&data->disabled);
+
+ if (likely(disabled == 1)) {
+ pc = preempt_count();
+ trace_function(tr, ip, parent_ip, flags, pc);
+ /*
+ * skip over 5 funcs:
+ * __ftrace_trace_stack,
+ * __trace_stack,
+ * function_stack_trace_call
+ * ftrace_list_func
+ * ftrace_call
+ */
+ __trace_stack(tr, flags, 5, pc);
+ }
+
+ atomic_dec(&data->disabled);
+ local_irq_restore(flags);
+}
+
+
+static struct ftrace_ops trace_ops __read_mostly =
+{
+ .func = function_trace_call,
+};
+
+static struct ftrace_ops trace_stack_ops __read_mostly =
+{
+ .func = function_stack_trace_call,
+};
+
+/* Our two options */
+enum {
+ TRACE_FUNC_OPT_STACK = 0x1,
+};
+
+static struct tracer_opt func_opts[] = {
+#ifdef CONFIG_STACKTRACE
+ { TRACER_OPT(func_stack_trace, TRACE_FUNC_OPT_STACK) },
+#endif
+ { } /* Always set a last empty entry */
+};
+
+static struct tracer_flags func_flags = {
+ .val = 0, /* By default: all flags disabled */
+ .opts = func_opts
+};
+
+static void tracing_start_function_trace(void)
+{
+ ftrace_function_enabled = 0;
+
+ if (trace_flags & TRACE_ITER_PREEMPTONLY)
+ trace_ops.func = function_trace_call_preempt_only;
+ else
+ trace_ops.func = function_trace_call;
+
+ if (func_flags.val & TRACE_FUNC_OPT_STACK)
+ register_ftrace_function(&trace_stack_ops);
+ else
+ register_ftrace_function(&trace_ops);
+
+ ftrace_function_enabled = 1;
+}
+
+static void tracing_stop_function_trace(void)
+{
+ ftrace_function_enabled = 0;
+ /* OK if they are not registered */
+ unregister_ftrace_function(&trace_stack_ops);
+ unregister_ftrace_function(&trace_ops);
+}
+
+static int func_set_flag(u32 old_flags, u32 bit, int set)
+{
+ if (bit == TRACE_FUNC_OPT_STACK) {
+ /* do nothing if already set */
+ if (!!set == !!(func_flags.val & TRACE_FUNC_OPT_STACK))
+ return 0;
+
+ if (set) {
+ unregister_ftrace_function(&trace_ops);
+ register_ftrace_function(&trace_stack_ops);
+ } else {
+ unregister_ftrace_function(&trace_stack_ops);
+ register_ftrace_function(&trace_ops);
+ }
+
+ return 0;
+ }
+
+ return -EINVAL;
}
static struct tracer function_trace __read_mostly =
{
- .name = "function",
- .init = function_trace_init,
- .reset = function_trace_reset,
- .start = function_trace_start,
+ .name = "function",
+ .init = function_trace_init,
+ .reset = function_trace_reset,
+ .start = function_trace_start,
+ .wait_pipe = poll_wait_pipe,
+ .flags = &func_flags,
+ .set_flag = func_set_flag,
#ifdef CONFIG_FTRACE_SELFTEST
- .selftest = trace_selftest_startup_function,
+ .selftest = trace_selftest_startup_function,
#endif
};
+#ifdef CONFIG_DYNAMIC_FTRACE
+static void
+ftrace_traceon(unsigned long ip, unsigned long parent_ip, void **data)
+{
+ long *count = (long *)data;
+
+ if (tracing_is_on())
+ return;
+
+ if (!*count)
+ return;
+
+ if (*count != -1)
+ (*count)--;
+
+ tracing_on();
+}
+
+static void
+ftrace_traceoff(unsigned long ip, unsigned long parent_ip, void **data)
+{
+ long *count = (long *)data;
+
+ if (!tracing_is_on())
+ return;
+
+ if (!*count)
+ return;
+
+ if (*count != -1)
+ (*count)--;
+
+ tracing_off();
+}
+
+static int
+ftrace_trace_onoff_print(struct seq_file *m, unsigned long ip,
+ struct ftrace_probe_ops *ops, void *data);
+
+static struct ftrace_probe_ops traceon_probe_ops = {
+ .func = ftrace_traceon,
+ .print = ftrace_trace_onoff_print,
+};
+
+static struct ftrace_probe_ops traceoff_probe_ops = {
+ .func = ftrace_traceoff,
+ .print = ftrace_trace_onoff_print,
+};
+
+static int
+ftrace_trace_onoff_print(struct seq_file *m, unsigned long ip,
+ struct ftrace_probe_ops *ops, void *data)
+{
+ char str[KSYM_SYMBOL_LEN];
+ long count = (long)data;
+
+ kallsyms_lookup(ip, NULL, NULL, NULL, str);
+ seq_printf(m, "%s:", str);
+
+ if (ops == &traceon_probe_ops)
+ seq_printf(m, "traceon");
+ else
+ seq_printf(m, "traceoff");
+
+ if (count == -1)
+ seq_printf(m, ":unlimited\n");
+ else
+ seq_printf(m, ":count=%ld", count);
+ seq_putc(m, '\n');
+
+ return 0;
+}
+
+static int
+ftrace_trace_onoff_unreg(char *glob, char *cmd, char *param)
+{
+ struct ftrace_probe_ops *ops;
+
+ /* we register both traceon and traceoff to this callback */
+ if (strcmp(cmd, "traceon") == 0)
+ ops = &traceon_probe_ops;
+ else
+ ops = &traceoff_probe_ops;
+
+ unregister_ftrace_function_probe_func(glob, ops);
+
+ return 0;
+}
+
+static int
+ftrace_trace_onoff_callback(char *glob, char *cmd, char *param, int enable)
+{
+ struct ftrace_probe_ops *ops;
+ void *count = (void *)-1;
+ char *number;
+ int ret;
+
+ /* hash funcs only work with set_ftrace_filter */
+ if (!enable)
+ return -EINVAL;
+
+ if (glob[0] == '!')
+ return ftrace_trace_onoff_unreg(glob+1, cmd, param);
+
+ /* we register both traceon and traceoff to this callback */
+ if (strcmp(cmd, "traceon") == 0)
+ ops = &traceon_probe_ops;
+ else
+ ops = &traceoff_probe_ops;
+
+ if (!param)
+ goto out_reg;
+
+ number = strsep(&param, ":");
+
+ if (!strlen(number))
+ goto out_reg;
+
+ /*
+ * We use the callback data field (which is a pointer)
+ * as our counter.
+ */
+ ret = strict_strtoul(number, 0, (unsigned long *)&count);
+ if (ret)
+ return ret;
+
+ out_reg:
+ ret = register_ftrace_function_probe(glob, ops, count);
+
+ return ret;
+}
+
+static struct ftrace_func_command ftrace_traceon_cmd = {
+ .name = "traceon",
+ .func = ftrace_trace_onoff_callback,
+};
+
+static struct ftrace_func_command ftrace_traceoff_cmd = {
+ .name = "traceoff",
+ .func = ftrace_trace_onoff_callback,
+};
+
+static int __init init_func_cmd_traceon(void)
+{
+ int ret;
+
+ ret = register_ftrace_command(&ftrace_traceoff_cmd);
+ if (ret)
+ return ret;
+
+ ret = register_ftrace_command(&ftrace_traceon_cmd);
+ if (ret)
+ unregister_ftrace_command(&ftrace_traceoff_cmd);
+ return ret;
+}
+#else
+static inline int init_func_cmd_traceon(void)
+{
+ return 0;
+}
+#endif /* CONFIG_DYNAMIC_FTRACE */
+
static __init int init_function_trace(void)
{
+ init_func_cmd_traceon();
return register_tracer(&function_trace);
}
-
device_initcall(init_function_trace);
+
diff --git a/kernel/trace/trace_functions_graph.c b/kernel/trace/trace_functions_graph.c
index 930c08e5b38e..d28687e7b3a7 100644
--- a/kernel/trace/trace_functions_graph.c
+++ b/kernel/trace/trace_functions_graph.c
@@ -1,7 +1,7 @@
/*
*
* Function graph tracer.
- * Copyright (c) 2008 Frederic Weisbecker <fweisbec@gmail.com>
+ * Copyright (c) 2008-2009 Frederic Weisbecker <fweisbec@gmail.com>
* Mostly borrowed from function tracer which
* is Copyright (c) Steven Rostedt <srostedt@redhat.com>
*
@@ -12,6 +12,12 @@
#include <linux/fs.h>
#include "trace.h"
+#include "trace_output.h"
+
+struct fgraph_data {
+ pid_t last_pid;
+ int depth;
+};
#define TRACE_GRAPH_INDENT 2
@@ -20,9 +26,11 @@
#define TRACE_GRAPH_PRINT_CPU 0x2
#define TRACE_GRAPH_PRINT_OVERHEAD 0x4
#define TRACE_GRAPH_PRINT_PROC 0x8
+#define TRACE_GRAPH_PRINT_DURATION 0x10
+#define TRACE_GRAPH_PRINT_ABS_TIME 0X20
static struct tracer_opt trace_opts[] = {
- /* Display overruns ? */
+ /* Display overruns? (for self-debug purpose) */
{ TRACER_OPT(funcgraph-overrun, TRACE_GRAPH_PRINT_OVERRUN) },
/* Display CPU ? */
{ TRACER_OPT(funcgraph-cpu, TRACE_GRAPH_PRINT_CPU) },
@@ -30,26 +38,103 @@ static struct tracer_opt trace_opts[] = {
{ TRACER_OPT(funcgraph-overhead, TRACE_GRAPH_PRINT_OVERHEAD) },
/* Display proc name/pid */
{ TRACER_OPT(funcgraph-proc, TRACE_GRAPH_PRINT_PROC) },
+ /* Display duration of execution */
+ { TRACER_OPT(funcgraph-duration, TRACE_GRAPH_PRINT_DURATION) },
+ /* Display absolute time of an entry */
+ { TRACER_OPT(funcgraph-abstime, TRACE_GRAPH_PRINT_ABS_TIME) },
{ } /* Empty entry */
};
static struct tracer_flags tracer_flags = {
/* Don't display overruns and proc by default */
- .val = TRACE_GRAPH_PRINT_CPU | TRACE_GRAPH_PRINT_OVERHEAD,
+ .val = TRACE_GRAPH_PRINT_CPU | TRACE_GRAPH_PRINT_OVERHEAD |
+ TRACE_GRAPH_PRINT_DURATION,
.opts = trace_opts
};
/* pid on the last trace processed */
-static pid_t last_pid[NR_CPUS] = { [0 ... NR_CPUS-1] = -1 };
-static int graph_trace_init(struct trace_array *tr)
+
+/* Add a function return address to the trace stack on thread info.*/
+int
+ftrace_push_return_trace(unsigned long ret, unsigned long func, int *depth)
{
- int cpu, ret;
+ unsigned long long calltime;
+ int index;
+
+ if (!current->ret_stack)
+ return -EBUSY;
+
+ /* The return trace stack is full */
+ if (current->curr_ret_stack == FTRACE_RETFUNC_DEPTH - 1) {
+ atomic_inc(&current->trace_overrun);
+ return -EBUSY;
+ }
+
+ calltime = trace_clock_local();
+
+ index = ++current->curr_ret_stack;
+ barrier();
+ current->ret_stack[index].ret = ret;
+ current->ret_stack[index].func = func;
+ current->ret_stack[index].calltime = calltime;
+ *depth = index;
+
+ return 0;
+}
+
+/* Retrieve a function return address to the trace stack on thread info.*/
+void
+ftrace_pop_return_trace(struct ftrace_graph_ret *trace, unsigned long *ret)
+{
+ int index;
+
+ index = current->curr_ret_stack;
+
+ if (unlikely(index < 0)) {
+ ftrace_graph_stop();
+ WARN_ON(1);
+ /* Might as well panic, otherwise we have no where to go */
+ *ret = (unsigned long)panic;
+ return;
+ }
- for_each_online_cpu(cpu)
- tracing_reset(tr, cpu);
+ *ret = current->ret_stack[index].ret;
+ trace->func = current->ret_stack[index].func;
+ trace->calltime = current->ret_stack[index].calltime;
+ trace->overrun = atomic_read(&current->trace_overrun);
+ trace->depth = index;
+ barrier();
+ current->curr_ret_stack--;
- ret = register_ftrace_graph(&trace_graph_return,
+}
+
+/*
+ * Send the trace to the ring-buffer.
+ * @return the original return address.
+ */
+unsigned long ftrace_return_to_handler(void)
+{
+ struct ftrace_graph_ret trace;
+ unsigned long ret;
+
+ ftrace_pop_return_trace(&trace, &ret);
+ trace.rettime = trace_clock_local();
+ ftrace_graph_return(&trace);
+
+ if (unlikely(!ret)) {
+ ftrace_graph_stop();
+ WARN_ON(1);
+ /* Might as well panic. What else to do? */
+ ret = (unsigned long)panic;
+ }
+
+ return ret;
+}
+
+static int graph_trace_init(struct trace_array *tr)
+{
+ int ret = register_ftrace_graph(&trace_graph_return,
&trace_graph_entry);
if (ret)
return ret;
@@ -112,15 +197,15 @@ print_graph_cpu(struct trace_seq *s, int cpu)
static enum print_line_t
print_graph_proc(struct trace_seq *s, pid_t pid)
{
- int i;
- int ret;
- int len;
- char comm[8];
- int spaces = 0;
+ char comm[TASK_COMM_LEN];
/* sign + log10(MAX_INT) + '\0' */
char pid_str[11];
+ int spaces = 0;
+ int ret;
+ int len;
+ int i;
- strncpy(comm, trace_find_cmdline(pid), 7);
+ trace_find_cmdline(pid, comm);
comm[7] = '\0';
sprintf(pid_str, "%d", pid);
@@ -153,17 +238,25 @@ print_graph_proc(struct trace_seq *s, pid_t pid)
/* If the pid changed since the last trace, output this event */
static enum print_line_t
-verif_pid(struct trace_seq *s, pid_t pid, int cpu)
+verif_pid(struct trace_seq *s, pid_t pid, int cpu, struct fgraph_data *data)
{
pid_t prev_pid;
+ pid_t *last_pid;
int ret;
- if (last_pid[cpu] != -1 && last_pid[cpu] == pid)
+ if (!data)
return TRACE_TYPE_HANDLED;
- prev_pid = last_pid[cpu];
- last_pid[cpu] = pid;
+ last_pid = &(per_cpu_ptr(data, cpu)->last_pid);
+ if (*last_pid == pid)
+ return TRACE_TYPE_HANDLED;
+
+ prev_pid = *last_pid;
+ *last_pid = pid;
+
+ if (prev_pid == -1)
+ return TRACE_TYPE_HANDLED;
/*
* Context-switch trace line:
@@ -175,34 +268,34 @@ verif_pid(struct trace_seq *s, pid_t pid, int cpu)
ret = trace_seq_printf(s,
" ------------------------------------------\n");
if (!ret)
- TRACE_TYPE_PARTIAL_LINE;
+ return TRACE_TYPE_PARTIAL_LINE;
ret = print_graph_cpu(s, cpu);
if (ret == TRACE_TYPE_PARTIAL_LINE)
- TRACE_TYPE_PARTIAL_LINE;
+ return TRACE_TYPE_PARTIAL_LINE;
ret = print_graph_proc(s, prev_pid);
if (ret == TRACE_TYPE_PARTIAL_LINE)
- TRACE_TYPE_PARTIAL_LINE;
+ return TRACE_TYPE_PARTIAL_LINE;
ret = trace_seq_printf(s, " => ");
if (!ret)
- TRACE_TYPE_PARTIAL_LINE;
+ return TRACE_TYPE_PARTIAL_LINE;
ret = print_graph_proc(s, pid);
if (ret == TRACE_TYPE_PARTIAL_LINE)
- TRACE_TYPE_PARTIAL_LINE;
+ return TRACE_TYPE_PARTIAL_LINE;
ret = trace_seq_printf(s,
"\n ------------------------------------------\n\n");
if (!ret)
- TRACE_TYPE_PARTIAL_LINE;
+ return TRACE_TYPE_PARTIAL_LINE;
- return ret;
+ return TRACE_TYPE_HANDLED;
}
-static bool
-trace_branch_is_leaf(struct trace_iterator *iter,
+static struct ftrace_graph_ret_entry *
+get_return_for_leaf(struct trace_iterator *iter,
struct ftrace_graph_ent_entry *curr)
{
struct ring_buffer_iter *ring_iter;
@@ -211,65 +304,123 @@ trace_branch_is_leaf(struct trace_iterator *iter,
ring_iter = iter->buffer_iter[iter->cpu];
- if (!ring_iter)
- return false;
-
- event = ring_buffer_iter_peek(ring_iter, NULL);
+ /* First peek to compare current entry and the next one */
+ if (ring_iter)
+ event = ring_buffer_iter_peek(ring_iter, NULL);
+ else {
+ /* We need to consume the current entry to see the next one */
+ ring_buffer_consume(iter->tr->buffer, iter->cpu, NULL);
+ event = ring_buffer_peek(iter->tr->buffer, iter->cpu,
+ NULL);
+ }
if (!event)
- return false;
+ return NULL;
next = ring_buffer_event_data(event);
if (next->ent.type != TRACE_GRAPH_RET)
- return false;
+ return NULL;
if (curr->ent.pid != next->ent.pid ||
curr->graph_ent.func != next->ret.func)
- return false;
+ return NULL;
+
+ /* this is a leaf, now advance the iterator */
+ if (ring_iter)
+ ring_buffer_read(ring_iter, NULL);
+
+ return next;
+}
+
+/* Signal a overhead of time execution to the output */
+static int
+print_graph_overhead(unsigned long long duration, struct trace_seq *s)
+{
+ /* If duration disappear, we don't need anything */
+ if (!(tracer_flags.val & TRACE_GRAPH_PRINT_DURATION))
+ return 1;
+
+ /* Non nested entry or return */
+ if (duration == -1)
+ return trace_seq_printf(s, " ");
+
+ if (tracer_flags.val & TRACE_GRAPH_PRINT_OVERHEAD) {
+ /* Duration exceeded 100 msecs */
+ if (duration > 100000ULL)
+ return trace_seq_printf(s, "! ");
- return true;
+ /* Duration exceeded 10 msecs */
+ if (duration > 10000ULL)
+ return trace_seq_printf(s, "+ ");
+ }
+
+ return trace_seq_printf(s, " ");
+}
+
+static int print_graph_abs_time(u64 t, struct trace_seq *s)
+{
+ unsigned long usecs_rem;
+
+ usecs_rem = do_div(t, NSEC_PER_SEC);
+ usecs_rem /= 1000;
+
+ return trace_seq_printf(s, "%5lu.%06lu | ",
+ (unsigned long)t, usecs_rem);
}
static enum print_line_t
-print_graph_irq(struct trace_seq *s, unsigned long addr,
- enum trace_type type, int cpu, pid_t pid)
+print_graph_irq(struct trace_iterator *iter, unsigned long addr,
+ enum trace_type type, int cpu, pid_t pid)
{
int ret;
+ struct trace_seq *s = &iter->seq;
if (addr < (unsigned long)__irqentry_text_start ||
addr >= (unsigned long)__irqentry_text_end)
return TRACE_TYPE_UNHANDLED;
- if (type == TRACE_GRAPH_ENT) {
- ret = trace_seq_printf(s, "==========> | ");
- } else {
- /* Cpu */
- if (tracer_flags.val & TRACE_GRAPH_PRINT_CPU) {
- ret = print_graph_cpu(s, cpu);
- if (ret == TRACE_TYPE_PARTIAL_LINE)
- return TRACE_TYPE_PARTIAL_LINE;
- }
- /* Proc */
- if (tracer_flags.val & TRACE_GRAPH_PRINT_PROC) {
- ret = print_graph_proc(s, pid);
- if (ret == TRACE_TYPE_PARTIAL_LINE)
- return TRACE_TYPE_PARTIAL_LINE;
+ /* Absolute time */
+ if (tracer_flags.val & TRACE_GRAPH_PRINT_ABS_TIME) {
+ ret = print_graph_abs_time(iter->ts, s);
+ if (!ret)
+ return TRACE_TYPE_PARTIAL_LINE;
+ }
- ret = trace_seq_printf(s, " | ");
- if (!ret)
- return TRACE_TYPE_PARTIAL_LINE;
- }
+ /* Cpu */
+ if (tracer_flags.val & TRACE_GRAPH_PRINT_CPU) {
+ ret = print_graph_cpu(s, cpu);
+ if (ret == TRACE_TYPE_PARTIAL_LINE)
+ return TRACE_TYPE_PARTIAL_LINE;
+ }
+ /* Proc */
+ if (tracer_flags.val & TRACE_GRAPH_PRINT_PROC) {
+ ret = print_graph_proc(s, pid);
+ if (ret == TRACE_TYPE_PARTIAL_LINE)
+ return TRACE_TYPE_PARTIAL_LINE;
+ ret = trace_seq_printf(s, " | ");
+ if (!ret)
+ return TRACE_TYPE_PARTIAL_LINE;
+ }
- /* No overhead */
- if (tracer_flags.val & TRACE_GRAPH_PRINT_OVERHEAD) {
- ret = trace_seq_printf(s, " ");
- if (!ret)
- return TRACE_TYPE_PARTIAL_LINE;
- }
+ /* No overhead */
+ ret = print_graph_overhead(-1, s);
+ if (!ret)
+ return TRACE_TYPE_PARTIAL_LINE;
+
+ if (type == TRACE_GRAPH_ENT)
+ ret = trace_seq_printf(s, "==========>");
+ else
+ ret = trace_seq_printf(s, "<==========");
+
+ if (!ret)
+ return TRACE_TYPE_PARTIAL_LINE;
+
+ /* Don't close the duration column if haven't one */
+ if (tracer_flags.val & TRACE_GRAPH_PRINT_DURATION)
+ trace_seq_printf(s, " |");
+ ret = trace_seq_printf(s, "\n");
- ret = trace_seq_printf(s, "<========== |\n");
- }
if (!ret)
return TRACE_TYPE_PARTIAL_LINE;
return TRACE_TYPE_HANDLED;
@@ -288,7 +439,7 @@ print_graph_duration(unsigned long long duration, struct trace_seq *s)
sprintf(msecs_str, "%lu", (unsigned long) duration);
/* Print msecs */
- ret = trace_seq_printf(s, msecs_str);
+ ret = trace_seq_printf(s, "%s", msecs_str);
if (!ret)
return TRACE_TYPE_PARTIAL_LINE;
@@ -321,52 +472,47 @@ print_graph_duration(unsigned long long duration, struct trace_seq *s)
}
-/* Signal a overhead of time execution to the output */
-static int
-print_graph_overhead(unsigned long long duration, struct trace_seq *s)
-{
- /* Duration exceeded 100 msecs */
- if (duration > 100000ULL)
- return trace_seq_printf(s, "! ");
-
- /* Duration exceeded 10 msecs */
- if (duration > 10000ULL)
- return trace_seq_printf(s, "+ ");
-
- return trace_seq_printf(s, " ");
-}
-
/* Case of a leaf function on its call entry */
static enum print_line_t
print_graph_entry_leaf(struct trace_iterator *iter,
- struct ftrace_graph_ent_entry *entry, struct trace_seq *s)
+ struct ftrace_graph_ent_entry *entry,
+ struct ftrace_graph_ret_entry *ret_entry, struct trace_seq *s)
{
- struct ftrace_graph_ret_entry *ret_entry;
+ struct fgraph_data *data = iter->private;
struct ftrace_graph_ret *graph_ret;
- struct ring_buffer_event *event;
struct ftrace_graph_ent *call;
unsigned long long duration;
int ret;
int i;
- event = ring_buffer_read(iter->buffer_iter[iter->cpu], NULL);
- ret_entry = ring_buffer_event_data(event);
graph_ret = &ret_entry->ret;
call = &entry->graph_ent;
duration = graph_ret->rettime - graph_ret->calltime;
- /* Overhead */
- if (tracer_flags.val & TRACE_GRAPH_PRINT_OVERHEAD) {
- ret = print_graph_overhead(duration, s);
- if (!ret)
- return TRACE_TYPE_PARTIAL_LINE;
+ if (data) {
+ int cpu = iter->cpu;
+ int *depth = &(per_cpu_ptr(data, cpu)->depth);
+
+ /*
+ * Comments display at + 1 to depth. Since
+ * this is a leaf function, keep the comments
+ * equal to this depth.
+ */
+ *depth = call->depth - 1;
}
- /* Duration */
- ret = print_graph_duration(duration, s);
- if (ret == TRACE_TYPE_PARTIAL_LINE)
+ /* Overhead */
+ ret = print_graph_overhead(duration, s);
+ if (!ret)
return TRACE_TYPE_PARTIAL_LINE;
+ /* Duration */
+ if (tracer_flags.val & TRACE_GRAPH_PRINT_DURATION) {
+ ret = print_graph_duration(duration, s);
+ if (ret == TRACE_TYPE_PARTIAL_LINE)
+ return TRACE_TYPE_PARTIAL_LINE;
+ }
+
/* Function */
for (i = 0; i < call->depth * TRACE_GRAPH_INDENT; i++) {
ret = trace_seq_printf(s, " ");
@@ -386,33 +532,34 @@ print_graph_entry_leaf(struct trace_iterator *iter,
}
static enum print_line_t
-print_graph_entry_nested(struct ftrace_graph_ent_entry *entry,
- struct trace_seq *s, pid_t pid, int cpu)
+print_graph_entry_nested(struct trace_iterator *iter,
+ struct ftrace_graph_ent_entry *entry,
+ struct trace_seq *s, int cpu)
{
- int i;
- int ret;
struct ftrace_graph_ent *call = &entry->graph_ent;
+ struct fgraph_data *data = iter->private;
+ int ret;
+ int i;
- /* No overhead */
- if (tracer_flags.val & TRACE_GRAPH_PRINT_OVERHEAD) {
- ret = trace_seq_printf(s, " ");
- if (!ret)
- return TRACE_TYPE_PARTIAL_LINE;
+ if (data) {
+ int cpu = iter->cpu;
+ int *depth = &(per_cpu_ptr(data, cpu)->depth);
+
+ *depth = call->depth;
}
- /* Interrupt */
- ret = print_graph_irq(s, call->func, TRACE_GRAPH_ENT, cpu, pid);
- if (ret == TRACE_TYPE_UNHANDLED) {
- /* No time */
+ /* No overhead */
+ ret = print_graph_overhead(-1, s);
+ if (!ret)
+ return TRACE_TYPE_PARTIAL_LINE;
+
+ /* No time */
+ if (tracer_flags.val & TRACE_GRAPH_PRINT_DURATION) {
ret = trace_seq_printf(s, " | ");
if (!ret)
return TRACE_TYPE_PARTIAL_LINE;
- } else {
- if (ret == TRACE_TYPE_PARTIAL_LINE)
- return TRACE_TYPE_PARTIAL_LINE;
}
-
/* Function */
for (i = 0; i < call->depth * TRACE_GRAPH_INDENT; i++) {
ret = trace_seq_printf(s, " ");
@@ -428,20 +575,40 @@ print_graph_entry_nested(struct ftrace_graph_ent_entry *entry,
if (!ret)
return TRACE_TYPE_PARTIAL_LINE;
- return TRACE_TYPE_HANDLED;
+ /*
+ * we already consumed the current entry to check the next one
+ * and see if this is a leaf.
+ */
+ return TRACE_TYPE_NO_CONSUME;
}
static enum print_line_t
-print_graph_entry(struct ftrace_graph_ent_entry *field, struct trace_seq *s,
- struct trace_iterator *iter, int cpu)
+print_graph_prologue(struct trace_iterator *iter, struct trace_seq *s,
+ int type, unsigned long addr)
{
- int ret;
+ struct fgraph_data *data = iter->private;
struct trace_entry *ent = iter->ent;
+ int cpu = iter->cpu;
+ int ret;
/* Pid */
- if (verif_pid(s, ent->pid, cpu) == TRACE_TYPE_PARTIAL_LINE)
+ if (verif_pid(s, ent->pid, cpu, data) == TRACE_TYPE_PARTIAL_LINE)
return TRACE_TYPE_PARTIAL_LINE;
+ if (type) {
+ /* Interrupt */
+ ret = print_graph_irq(iter, addr, type, cpu, ent->pid);
+ if (ret == TRACE_TYPE_PARTIAL_LINE)
+ return TRACE_TYPE_PARTIAL_LINE;
+ }
+
+ /* Absolute time */
+ if (tracer_flags.val & TRACE_GRAPH_PRINT_ABS_TIME) {
+ ret = print_graph_abs_time(iter->ts, s);
+ if (!ret)
+ return TRACE_TYPE_PARTIAL_LINE;
+ }
+
/* Cpu */
if (tracer_flags.val & TRACE_GRAPH_PRINT_CPU) {
ret = print_graph_cpu(s, cpu);
@@ -460,54 +627,65 @@ print_graph_entry(struct ftrace_graph_ent_entry *field, struct trace_seq *s,
return TRACE_TYPE_PARTIAL_LINE;
}
- if (trace_branch_is_leaf(iter, field))
- return print_graph_entry_leaf(iter, field, s);
+ return 0;
+}
+
+static enum print_line_t
+print_graph_entry(struct ftrace_graph_ent_entry *field, struct trace_seq *s,
+ struct trace_iterator *iter)
+{
+ int cpu = iter->cpu;
+ struct ftrace_graph_ent *call = &field->graph_ent;
+ struct ftrace_graph_ret_entry *leaf_ret;
+
+ if (print_graph_prologue(iter, s, TRACE_GRAPH_ENT, call->func))
+ return TRACE_TYPE_PARTIAL_LINE;
+
+ leaf_ret = get_return_for_leaf(iter, field);
+ if (leaf_ret)
+ return print_graph_entry_leaf(iter, field, leaf_ret, s);
else
- return print_graph_entry_nested(field, s, iter->ent->pid, cpu);
+ return print_graph_entry_nested(iter, field, s, cpu);
}
static enum print_line_t
print_graph_return(struct ftrace_graph_ret *trace, struct trace_seq *s,
- struct trace_entry *ent, int cpu)
+ struct trace_entry *ent, struct trace_iterator *iter)
{
- int i;
- int ret;
unsigned long long duration = trace->rettime - trace->calltime;
+ struct fgraph_data *data = iter->private;
+ pid_t pid = ent->pid;
+ int cpu = iter->cpu;
+ int ret;
+ int i;
- /* Pid */
- if (verif_pid(s, ent->pid, cpu) == TRACE_TYPE_PARTIAL_LINE)
- return TRACE_TYPE_PARTIAL_LINE;
+ if (data) {
+ int cpu = iter->cpu;
+ int *depth = &(per_cpu_ptr(data, cpu)->depth);
- /* Cpu */
- if (tracer_flags.val & TRACE_GRAPH_PRINT_CPU) {
- ret = print_graph_cpu(s, cpu);
- if (ret == TRACE_TYPE_PARTIAL_LINE)
- return TRACE_TYPE_PARTIAL_LINE;
+ /*
+ * Comments display at + 1 to depth. This is the
+ * return from a function, we now want the comments
+ * to display at the same level of the bracket.
+ */
+ *depth = trace->depth - 1;
}
- /* Proc */
- if (tracer_flags.val & TRACE_GRAPH_PRINT_PROC) {
- ret = print_graph_proc(s, ent->pid);
- if (ret == TRACE_TYPE_PARTIAL_LINE)
- return TRACE_TYPE_PARTIAL_LINE;
-
- ret = trace_seq_printf(s, " | ");
- if (!ret)
- return TRACE_TYPE_PARTIAL_LINE;
- }
+ if (print_graph_prologue(iter, s, 0, 0))
+ return TRACE_TYPE_PARTIAL_LINE;
/* Overhead */
- if (tracer_flags.val & TRACE_GRAPH_PRINT_OVERHEAD) {
- ret = print_graph_overhead(duration, s);
- if (!ret)
- return TRACE_TYPE_PARTIAL_LINE;
- }
+ ret = print_graph_overhead(duration, s);
+ if (!ret)
+ return TRACE_TYPE_PARTIAL_LINE;
/* Duration */
- ret = print_graph_duration(duration, s);
- if (ret == TRACE_TYPE_PARTIAL_LINE)
- return TRACE_TYPE_PARTIAL_LINE;
+ if (tracer_flags.val & TRACE_GRAPH_PRINT_DURATION) {
+ ret = print_graph_duration(duration, s);
+ if (ret == TRACE_TYPE_PARTIAL_LINE)
+ return TRACE_TYPE_PARTIAL_LINE;
+ }
/* Closing brace */
for (i = 0; i < trace->depth * TRACE_GRAPH_INDENT; i++) {
@@ -528,7 +706,7 @@ print_graph_return(struct ftrace_graph_ret *trace, struct trace_seq *s,
return TRACE_TYPE_PARTIAL_LINE;
}
- ret = print_graph_irq(s, trace->func, TRACE_GRAPH_RET, cpu, ent->pid);
+ ret = print_graph_irq(iter, trace->func, TRACE_GRAPH_RET, cpu, pid);
if (ret == TRACE_TYPE_PARTIAL_LINE)
return TRACE_TYPE_PARTIAL_LINE;
@@ -536,61 +714,73 @@ print_graph_return(struct ftrace_graph_ret *trace, struct trace_seq *s,
}
static enum print_line_t
-print_graph_comment(struct print_entry *trace, struct trace_seq *s,
- struct trace_entry *ent, struct trace_iterator *iter)
+print_graph_comment(struct trace_seq *s, struct trace_entry *ent,
+ struct trace_iterator *iter)
{
- int i;
+ unsigned long sym_flags = (trace_flags & TRACE_ITER_SYM_MASK);
+ struct fgraph_data *data = iter->private;
+ struct trace_event *event;
+ int depth = 0;
int ret;
+ int i;
- /* Pid */
- if (verif_pid(s, ent->pid, iter->cpu) == TRACE_TYPE_PARTIAL_LINE)
- return TRACE_TYPE_PARTIAL_LINE;
-
- /* Cpu */
- if (tracer_flags.val & TRACE_GRAPH_PRINT_CPU) {
- ret = print_graph_cpu(s, iter->cpu);
- if (ret == TRACE_TYPE_PARTIAL_LINE)
- return TRACE_TYPE_PARTIAL_LINE;
- }
-
- /* Proc */
- if (tracer_flags.val & TRACE_GRAPH_PRINT_PROC) {
- ret = print_graph_proc(s, ent->pid);
- if (ret == TRACE_TYPE_PARTIAL_LINE)
- return TRACE_TYPE_PARTIAL_LINE;
+ if (data)
+ depth = per_cpu_ptr(data, iter->cpu)->depth;
- ret = trace_seq_printf(s, " | ");
- if (!ret)
- return TRACE_TYPE_PARTIAL_LINE;
- }
+ if (print_graph_prologue(iter, s, 0, 0))
+ return TRACE_TYPE_PARTIAL_LINE;
/* No overhead */
- if (tracer_flags.val & TRACE_GRAPH_PRINT_OVERHEAD) {
- ret = trace_seq_printf(s, " ");
+ ret = print_graph_overhead(-1, s);
+ if (!ret)
+ return TRACE_TYPE_PARTIAL_LINE;
+
+ /* No time */
+ if (tracer_flags.val & TRACE_GRAPH_PRINT_DURATION) {
+ ret = trace_seq_printf(s, " | ");
if (!ret)
return TRACE_TYPE_PARTIAL_LINE;
}
- /* No time */
- ret = trace_seq_printf(s, " | ");
- if (!ret)
- return TRACE_TYPE_PARTIAL_LINE;
-
/* Indentation */
- if (trace->depth > 0)
- for (i = 0; i < (trace->depth + 1) * TRACE_GRAPH_INDENT; i++) {
+ if (depth > 0)
+ for (i = 0; i < (depth + 1) * TRACE_GRAPH_INDENT; i++) {
ret = trace_seq_printf(s, " ");
if (!ret)
return TRACE_TYPE_PARTIAL_LINE;
}
/* The comment */
- ret = trace_seq_printf(s, "/* %s", trace->buf);
+ ret = trace_seq_printf(s, "/* ");
if (!ret)
return TRACE_TYPE_PARTIAL_LINE;
- if (ent->flags & TRACE_FLAG_CONT)
- trace_seq_print_cont(s, iter);
+ switch (iter->ent->type) {
+ case TRACE_BPRINT:
+ ret = trace_print_bprintk_msg_only(iter);
+ if (ret != TRACE_TYPE_HANDLED)
+ return ret;
+ break;
+ case TRACE_PRINT:
+ ret = trace_print_printk_msg_only(iter);
+ if (ret != TRACE_TYPE_HANDLED)
+ return ret;
+ break;
+ default:
+ event = ftrace_find_event(ent->type);
+ if (!event)
+ return TRACE_TYPE_UNHANDLED;
+
+ ret = event->trace(iter, sym_flags);
+ if (ret != TRACE_TYPE_HANDLED)
+ return ret;
+ }
+
+ /* Strip ending newline */
+ if (s->buffer[s->len - 1] == '\n') {
+ s->buffer[s->len - 1] = '\0';
+ s->len--;
+ }
ret = trace_seq_printf(s, " */\n");
if (!ret)
@@ -603,62 +793,91 @@ print_graph_comment(struct print_entry *trace, struct trace_seq *s,
enum print_line_t
print_graph_function(struct trace_iterator *iter)
{
- struct trace_seq *s = &iter->seq;
struct trace_entry *entry = iter->ent;
+ struct trace_seq *s = &iter->seq;
switch (entry->type) {
case TRACE_GRAPH_ENT: {
struct ftrace_graph_ent_entry *field;
trace_assign_type(field, entry);
- return print_graph_entry(field, s, iter,
- iter->cpu);
+ return print_graph_entry(field, s, iter);
}
case TRACE_GRAPH_RET: {
struct ftrace_graph_ret_entry *field;
trace_assign_type(field, entry);
- return print_graph_return(&field->ret, s, entry, iter->cpu);
- }
- case TRACE_PRINT: {
- struct print_entry *field;
- trace_assign_type(field, entry);
- return print_graph_comment(field, s, entry, iter);
+ return print_graph_return(&field->ret, s, entry, iter);
}
default:
- return TRACE_TYPE_UNHANDLED;
+ return print_graph_comment(s, entry, iter);
}
+
+ return TRACE_TYPE_HANDLED;
}
static void print_graph_headers(struct seq_file *s)
{
/* 1st line */
seq_printf(s, "# ");
+ if (tracer_flags.val & TRACE_GRAPH_PRINT_ABS_TIME)
+ seq_printf(s, " TIME ");
if (tracer_flags.val & TRACE_GRAPH_PRINT_CPU)
- seq_printf(s, "CPU ");
+ seq_printf(s, "CPU");
if (tracer_flags.val & TRACE_GRAPH_PRINT_PROC)
- seq_printf(s, "TASK/PID ");
- if (tracer_flags.val & TRACE_GRAPH_PRINT_OVERHEAD)
- seq_printf(s, "OVERHEAD/");
- seq_printf(s, "DURATION FUNCTION CALLS\n");
+ seq_printf(s, " TASK/PID ");
+ if (tracer_flags.val & TRACE_GRAPH_PRINT_DURATION)
+ seq_printf(s, " DURATION ");
+ seq_printf(s, " FUNCTION CALLS\n");
/* 2nd line */
seq_printf(s, "# ");
+ if (tracer_flags.val & TRACE_GRAPH_PRINT_ABS_TIME)
+ seq_printf(s, " | ");
if (tracer_flags.val & TRACE_GRAPH_PRINT_CPU)
- seq_printf(s, "| ");
+ seq_printf(s, "| ");
if (tracer_flags.val & TRACE_GRAPH_PRINT_PROC)
- seq_printf(s, "| | ");
- if (tracer_flags.val & TRACE_GRAPH_PRINT_OVERHEAD) {
- seq_printf(s, "| ");
- seq_printf(s, "| | | | |\n");
- } else
- seq_printf(s, " | | | | |\n");
+ seq_printf(s, " | | ");
+ if (tracer_flags.val & TRACE_GRAPH_PRINT_DURATION)
+ seq_printf(s, " | | ");
+ seq_printf(s, " | | | |\n");
+}
+
+static void graph_trace_open(struct trace_iterator *iter)
+{
+ /* pid and depth on the last trace processed */
+ struct fgraph_data *data = alloc_percpu(struct fgraph_data);
+ int cpu;
+
+ if (!data)
+ pr_warning("function graph tracer: not enough memory\n");
+ else
+ for_each_possible_cpu(cpu) {
+ pid_t *pid = &(per_cpu_ptr(data, cpu)->last_pid);
+ int *depth = &(per_cpu_ptr(data, cpu)->depth);
+ *pid = -1;
+ *depth = 0;
+ }
+
+ iter->private = data;
}
+
+static void graph_trace_close(struct trace_iterator *iter)
+{
+ free_percpu(iter->private);
+}
+
static struct tracer graph_trace __read_mostly = {
- .name = "function_graph",
- .init = graph_trace_init,
- .reset = graph_trace_reset,
+ .name = "function_graph",
+ .open = graph_trace_open,
+ .close = graph_trace_close,
+ .wait_pipe = poll_wait_pipe,
+ .init = graph_trace_init,
+ .reset = graph_trace_reset,
.print_line = print_graph_function,
.print_header = print_graph_headers,
.flags = &tracer_flags,
+#ifdef CONFIG_FTRACE_SELFTEST
+ .selftest = trace_selftest_startup_function_graph,
+#endif
};
static __init int init_graph_trace(void)
diff --git a/kernel/trace/trace_hw_branches.c b/kernel/trace/trace_hw_branches.c
index 649df22d435f..7bfdf4c2347f 100644
--- a/kernel/trace/trace_hw_branches.c
+++ b/kernel/trace/trace_hw_branches.c
@@ -1,30 +1,53 @@
/*
* h/w branch tracer for x86 based on bts
*
- * Copyright (C) 2008 Markus Metzger <markus.t.metzger@gmail.com>
- *
+ * Copyright (C) 2008-2009 Intel Corporation.
+ * Markus Metzger <markus.t.metzger@gmail.com>, 2008-2009
*/
-
-#include <linux/module.h>
-#include <linux/fs.h>
+#include <linux/spinlock.h>
+#include <linux/kallsyms.h>
#include <linux/debugfs.h>
#include <linux/ftrace.h>
-#include <linux/kallsyms.h>
+#include <linux/module.h>
+#include <linux/cpu.h>
+#include <linux/smp.h>
+#include <linux/fs.h>
#include <asm/ds.h>
#include "trace.h"
+#include "trace_output.h"
#define SIZEOF_BTS (1 << 13)
+/*
+ * The tracer lock protects the below per-cpu tracer array.
+ * It needs to be held to:
+ * - start tracing on all cpus
+ * - stop tracing on all cpus
+ * - start tracing on a single hotplug cpu
+ * - stop tracing on a single hotplug cpu
+ * - read the trace from all cpus
+ * - read the trace from a single cpu
+ */
+static DEFINE_SPINLOCK(bts_tracer_lock);
static DEFINE_PER_CPU(struct bts_tracer *, tracer);
static DEFINE_PER_CPU(unsigned char[SIZEOF_BTS], buffer);
#define this_tracer per_cpu(tracer, smp_processor_id())
#define this_buffer per_cpu(buffer, smp_processor_id())
+static int __read_mostly trace_hw_branches_enabled;
+static struct trace_array *hw_branch_trace __read_mostly;
+
+/*
+ * Start tracing on the current cpu.
+ * The argument is ignored.
+ *
+ * pre: bts_tracer_lock must be locked.
+ */
static void bts_trace_start_cpu(void *arg)
{
if (this_tracer)
@@ -42,14 +65,20 @@ static void bts_trace_start_cpu(void *arg)
static void bts_trace_start(struct trace_array *tr)
{
- int cpu;
+ spin_lock(&bts_tracer_lock);
- tracing_reset_online_cpus(tr);
+ on_each_cpu(bts_trace_start_cpu, NULL, 1);
+ trace_hw_branches_enabled = 1;
- for_each_cpu(cpu, cpu_possible_mask)
- smp_call_function_single(cpu, bts_trace_start_cpu, NULL, 1);
+ spin_unlock(&bts_tracer_lock);
}
+/*
+ * Stop tracing on the current cpu.
+ * The argument is ignored.
+ *
+ * pre: bts_tracer_lock must be locked.
+ */
static void bts_trace_stop_cpu(void *arg)
{
if (this_tracer) {
@@ -60,26 +89,60 @@ static void bts_trace_stop_cpu(void *arg)
static void bts_trace_stop(struct trace_array *tr)
{
- int cpu;
+ spin_lock(&bts_tracer_lock);
+
+ trace_hw_branches_enabled = 0;
+ on_each_cpu(bts_trace_stop_cpu, NULL, 1);
+
+ spin_unlock(&bts_tracer_lock);
+}
+
+static int __cpuinit bts_hotcpu_handler(struct notifier_block *nfb,
+ unsigned long action, void *hcpu)
+{
+ unsigned int cpu = (unsigned long)hcpu;
- for_each_cpu(cpu, cpu_possible_mask)
+ spin_lock(&bts_tracer_lock);
+
+ if (!trace_hw_branches_enabled)
+ goto out;
+
+ switch (action) {
+ case CPU_ONLINE:
+ case CPU_DOWN_FAILED:
+ smp_call_function_single(cpu, bts_trace_start_cpu, NULL, 1);
+ break;
+ case CPU_DOWN_PREPARE:
smp_call_function_single(cpu, bts_trace_stop_cpu, NULL, 1);
+ break;
+ }
+
+ out:
+ spin_unlock(&bts_tracer_lock);
+ return NOTIFY_DONE;
}
+static struct notifier_block bts_hotcpu_notifier __cpuinitdata = {
+ .notifier_call = bts_hotcpu_handler
+};
+
static int bts_trace_init(struct trace_array *tr)
{
- tracing_reset_online_cpus(tr);
+ hw_branch_trace = tr;
+
bts_trace_start(tr);
return 0;
}
+static void bts_trace_reset(struct trace_array *tr)
+{
+ bts_trace_stop(tr);
+}
+
static void bts_trace_print_header(struct seq_file *m)
{
- seq_puts(m,
- "# CPU# FROM TO FUNCTION\n");
- seq_puts(m,
- "# | | | |\n");
+ seq_puts(m, "# CPU# TO <- FROM\n");
}
static enum print_line_t bts_trace_print_line(struct trace_iterator *iter)
@@ -87,15 +150,15 @@ static enum print_line_t bts_trace_print_line(struct trace_iterator *iter)
struct trace_entry *entry = iter->ent;
struct trace_seq *seq = &iter->seq;
struct hw_branch_entry *it;
+ unsigned long symflags = TRACE_ITER_SYM_OFFSET;
trace_assign_type(it, entry);
if (entry->type == TRACE_HW_BRANCHES) {
- if (trace_seq_printf(seq, "%4d ", entry->cpu) &&
- trace_seq_printf(seq, "0x%016llx -> 0x%016llx ",
- it->from, it->to) &&
- (!it->from ||
- seq_print_ip_sym(seq, it->from, /* sym_flags = */ 0)) &&
+ if (trace_seq_printf(seq, "%4d ", iter->cpu) &&
+ seq_print_ip_sym(seq, it->to, symflags) &&
+ trace_seq_printf(seq, "\t <- ") &&
+ seq_print_ip_sym(seq, it->from, symflags) &&
trace_seq_printf(seq, "\n"))
return TRACE_TYPE_HANDLED;
return TRACE_TYPE_PARTIAL_LINE;;
@@ -103,26 +166,42 @@ static enum print_line_t bts_trace_print_line(struct trace_iterator *iter)
return TRACE_TYPE_UNHANDLED;
}
-void trace_hw_branch(struct trace_array *tr, u64 from, u64 to)
+void trace_hw_branch(u64 from, u64 to)
{
+ struct trace_array *tr = hw_branch_trace;
struct ring_buffer_event *event;
struct hw_branch_entry *entry;
- unsigned long irq;
+ unsigned long irq1;
+ int cpu;
- event = ring_buffer_lock_reserve(tr->buffer, sizeof(*entry), &irq);
- if (!event)
+ if (unlikely(!tr))
return;
+
+ if (unlikely(!trace_hw_branches_enabled))
+ return;
+
+ local_irq_save(irq1);
+ cpu = raw_smp_processor_id();
+ if (atomic_inc_return(&tr->data[cpu]->disabled) != 1)
+ goto out;
+
+ event = trace_buffer_lock_reserve(tr, TRACE_HW_BRANCHES,
+ sizeof(*entry), 0, 0);
+ if (!event)
+ goto out;
entry = ring_buffer_event_data(event);
tracing_generic_entry_update(&entry->ent, 0, from);
entry->ent.type = TRACE_HW_BRANCHES;
- entry->ent.cpu = smp_processor_id();
entry->from = from;
entry->to = to;
- ring_buffer_unlock_commit(tr->buffer, event, irq);
+ trace_buffer_unlock_commit(tr, event, 0, 0);
+
+ out:
+ atomic_dec(&tr->data[cpu]->disabled);
+ local_irq_restore(irq1);
}
-static void trace_bts_at(struct trace_array *tr,
- const struct bts_trace *trace, void *at)
+static void trace_bts_at(const struct bts_trace *trace, void *at)
{
struct bts_struct bts;
int err = 0;
@@ -137,18 +216,29 @@ static void trace_bts_at(struct trace_array *tr,
switch (bts.qualifier) {
case BTS_BRANCH:
- trace_hw_branch(tr, bts.variant.lbr.from, bts.variant.lbr.to);
+ trace_hw_branch(bts.variant.lbr.from, bts.variant.lbr.to);
break;
}
}
+/*
+ * Collect the trace on the current cpu and write it into the ftrace buffer.
+ *
+ * pre: bts_tracer_lock must be locked
+ */
static void trace_bts_cpu(void *arg)
{
struct trace_array *tr = (struct trace_array *) arg;
const struct bts_trace *trace;
unsigned char *at;
- if (!this_tracer)
+ if (unlikely(!tr))
+ return;
+
+ if (unlikely(atomic_read(&tr->data[raw_smp_processor_id()]->disabled)))
+ return;
+
+ if (unlikely(!this_tracer))
return;
ds_suspend_bts(this_tracer);
@@ -158,11 +248,11 @@ static void trace_bts_cpu(void *arg)
for (at = trace->ds.top; (void *)at < trace->ds.end;
at += trace->ds.size)
- trace_bts_at(tr, trace, at);
+ trace_bts_at(trace, at);
for (at = trace->ds.begin; (void *)at < trace->ds.top;
at += trace->ds.size)
- trace_bts_at(tr, trace, at);
+ trace_bts_at(trace, at);
out:
ds_resume_bts(this_tracer);
@@ -170,26 +260,43 @@ out:
static void trace_bts_prepare(struct trace_iterator *iter)
{
- int cpu;
+ spin_lock(&bts_tracer_lock);
+
+ on_each_cpu(trace_bts_cpu, iter->tr, 1);
+
+ spin_unlock(&bts_tracer_lock);
+}
+
+static void trace_bts_close(struct trace_iterator *iter)
+{
+ tracing_reset_online_cpus(iter->tr);
+}
+
+void trace_hw_branch_oops(void)
+{
+ spin_lock(&bts_tracer_lock);
+
+ trace_bts_cpu(hw_branch_trace);
- for_each_cpu(cpu, cpu_possible_mask)
- smp_call_function_single(cpu, trace_bts_cpu, iter->tr, 1);
+ spin_unlock(&bts_tracer_lock);
}
struct tracer bts_tracer __read_mostly =
{
.name = "hw-branch-tracer",
.init = bts_trace_init,
- .reset = bts_trace_stop,
+ .reset = bts_trace_reset,
.print_header = bts_trace_print_header,
.print_line = bts_trace_print_line,
.start = bts_trace_start,
.stop = bts_trace_stop,
- .open = trace_bts_prepare
+ .open = trace_bts_prepare,
+ .close = trace_bts_close
};
__init static int init_bts_trace(void)
{
+ register_hotcpu_notifier(&bts_hotcpu_notifier);
return register_tracer(&bts_tracer);
}
device_initcall(init_bts_trace);
diff --git a/kernel/trace/trace_irqsoff.c b/kernel/trace/trace_irqsoff.c
index 62a78d943534..b923d13e2fad 100644
--- a/kernel/trace/trace_irqsoff.c
+++ b/kernel/trace/trace_irqsoff.c
@@ -1,5 +1,5 @@
/*
- * trace irqs off criticall timings
+ * trace irqs off critical timings
*
* Copyright (C) 2007-2008 Steven Rostedt <srostedt@redhat.com>
* Copyright (C) 2008 Ingo Molnar <mingo@redhat.com>
@@ -32,6 +32,8 @@ enum {
static int trace_type __read_mostly;
+static int save_lat_flag;
+
#ifdef CONFIG_PREEMPT_TRACER
static inline int
preempt_trace(void)
@@ -95,7 +97,7 @@ irqsoff_tracer_call(unsigned long ip, unsigned long parent_ip)
disabled = atomic_inc_return(&data->disabled);
if (likely(disabled == 1))
- trace_function(tr, data, ip, parent_ip, flags, preempt_count());
+ trace_function(tr, ip, parent_ip, flags, preempt_count());
atomic_dec(&data->disabled);
}
@@ -153,7 +155,7 @@ check_critical_timing(struct trace_array *tr,
if (!report_latency(delta))
goto out_unlock;
- trace_function(tr, data, CALLER_ADDR0, parent_ip, flags, pc);
+ trace_function(tr, CALLER_ADDR0, parent_ip, flags, pc);
latency = nsecs_to_usecs(delta);
@@ -177,7 +179,7 @@ out:
data->critical_sequence = max_sequence;
data->preempt_timestamp = ftrace_now(cpu);
tracing_reset(tr, cpu);
- trace_function(tr, data, CALLER_ADDR0, parent_ip, flags, pc);
+ trace_function(tr, CALLER_ADDR0, parent_ip, flags, pc);
}
static inline void
@@ -210,7 +212,7 @@ start_critical_timing(unsigned long ip, unsigned long parent_ip)
local_save_flags(flags);
- trace_function(tr, data, ip, parent_ip, flags, preempt_count());
+ trace_function(tr, ip, parent_ip, flags, preempt_count());
per_cpu(tracing_cpu, cpu) = 1;
@@ -244,7 +246,7 @@ stop_critical_timing(unsigned long ip, unsigned long parent_ip)
atomic_inc(&data->disabled);
local_save_flags(flags);
- trace_function(tr, data, ip, parent_ip, flags, preempt_count());
+ trace_function(tr, ip, parent_ip, flags, preempt_count());
check_critical_timing(tr, data, parent_ip ? : ip, cpu);
data->critical_start = 0;
atomic_dec(&data->disabled);
@@ -353,33 +355,26 @@ void trace_preempt_off(unsigned long a0, unsigned long a1)
}
#endif /* CONFIG_PREEMPT_TRACER */
-/*
- * save_tracer_enabled is used to save the state of the tracer_enabled
- * variable when we disable it when we open a trace output file.
- */
-static int save_tracer_enabled;
-
static void start_irqsoff_tracer(struct trace_array *tr)
{
register_ftrace_function(&trace_ops);
- if (tracing_is_enabled()) {
+ if (tracing_is_enabled())
tracer_enabled = 1;
- save_tracer_enabled = 1;
- } else {
+ else
tracer_enabled = 0;
- save_tracer_enabled = 0;
- }
}
static void stop_irqsoff_tracer(struct trace_array *tr)
{
tracer_enabled = 0;
- save_tracer_enabled = 0;
unregister_ftrace_function(&trace_ops);
}
static void __irqsoff_tracer_init(struct trace_array *tr)
{
+ save_lat_flag = trace_flags & TRACE_ITER_LATENCY_FMT;
+ trace_flags |= TRACE_ITER_LATENCY_FMT;
+
tracing_max_latency = 0;
irqsoff_trace = tr;
/* make sure that the tracer is visible */
@@ -390,30 +385,19 @@ static void __irqsoff_tracer_init(struct trace_array *tr)
static void irqsoff_tracer_reset(struct trace_array *tr)
{
stop_irqsoff_tracer(tr);
+
+ if (!save_lat_flag)
+ trace_flags &= ~TRACE_ITER_LATENCY_FMT;
}
static void irqsoff_tracer_start(struct trace_array *tr)
{
tracer_enabled = 1;
- save_tracer_enabled = 1;
}
static void irqsoff_tracer_stop(struct trace_array *tr)
{
tracer_enabled = 0;
- save_tracer_enabled = 0;
-}
-
-static void irqsoff_tracer_open(struct trace_iterator *iter)
-{
- /* stop the trace while dumping */
- tracer_enabled = 0;
-}
-
-static void irqsoff_tracer_close(struct trace_iterator *iter)
-{
- /* restart tracing */
- tracer_enabled = save_tracer_enabled;
}
#ifdef CONFIG_IRQSOFF_TRACER
@@ -431,8 +415,6 @@ static struct tracer irqsoff_tracer __read_mostly =
.reset = irqsoff_tracer_reset,
.start = irqsoff_tracer_start,
.stop = irqsoff_tracer_stop,
- .open = irqsoff_tracer_open,
- .close = irqsoff_tracer_close,
.print_max = 1,
#ifdef CONFIG_FTRACE_SELFTEST
.selftest = trace_selftest_startup_irqsoff,
@@ -459,8 +441,6 @@ static struct tracer preemptoff_tracer __read_mostly =
.reset = irqsoff_tracer_reset,
.start = irqsoff_tracer_start,
.stop = irqsoff_tracer_stop,
- .open = irqsoff_tracer_open,
- .close = irqsoff_tracer_close,
.print_max = 1,
#ifdef CONFIG_FTRACE_SELFTEST
.selftest = trace_selftest_startup_preemptoff,
@@ -489,8 +469,6 @@ static struct tracer preemptirqsoff_tracer __read_mostly =
.reset = irqsoff_tracer_reset,
.start = irqsoff_tracer_start,
.stop = irqsoff_tracer_stop,
- .open = irqsoff_tracer_open,
- .close = irqsoff_tracer_close,
.print_max = 1,
#ifdef CONFIG_FTRACE_SELFTEST
.selftest = trace_selftest_startup_preemptirqsoff,
diff --git a/kernel/trace/trace_mmiotrace.c b/kernel/trace/trace_mmiotrace.c
index 80e503ef6136..8e37fcddd8b4 100644
--- a/kernel/trace/trace_mmiotrace.c
+++ b/kernel/trace/trace_mmiotrace.c
@@ -12,6 +12,7 @@
#include <asm/atomic.h>
#include "trace.h"
+#include "trace_output.h"
struct header_iter {
struct pci_dev *dev;
@@ -183,21 +184,22 @@ static enum print_line_t mmio_print_rw(struct trace_iterator *iter)
switch (rw->opcode) {
case MMIO_READ:
ret = trace_seq_printf(s,
- "R %d %lu.%06lu %d 0x%llx 0x%lx 0x%lx %d\n",
+ "R %d %u.%06lu %d 0x%llx 0x%lx 0x%lx %d\n",
rw->width, secs, usec_rem, rw->map_id,
(unsigned long long)rw->phys,
rw->value, rw->pc, 0);
break;
case MMIO_WRITE:
ret = trace_seq_printf(s,
- "W %d %lu.%06lu %d 0x%llx 0x%lx 0x%lx %d\n",
+ "W %d %u.%06lu %d 0x%llx 0x%lx 0x%lx %d\n",
rw->width, secs, usec_rem, rw->map_id,
(unsigned long long)rw->phys,
rw->value, rw->pc, 0);
break;
case MMIO_UNKNOWN_OP:
ret = trace_seq_printf(s,
- "UNKNOWN %lu.%06lu %d 0x%llx %02x,%02x,%02x 0x%lx %d\n",
+ "UNKNOWN %u.%06lu %d 0x%llx %02lx,%02lx,"
+ "%02lx 0x%lx %d\n",
secs, usec_rem, rw->map_id,
(unsigned long long)rw->phys,
(rw->value >> 16) & 0xff, (rw->value >> 8) & 0xff,
@@ -229,14 +231,14 @@ static enum print_line_t mmio_print_map(struct trace_iterator *iter)
switch (m->opcode) {
case MMIO_PROBE:
ret = trace_seq_printf(s,
- "MAP %lu.%06lu %d 0x%llx 0x%lx 0x%lx 0x%lx %d\n",
+ "MAP %u.%06lu %d 0x%llx 0x%lx 0x%lx 0x%lx %d\n",
secs, usec_rem, m->map_id,
(unsigned long long)m->phys, m->virt, m->len,
0UL, 0);
break;
case MMIO_UNPROBE:
ret = trace_seq_printf(s,
- "UNMAP %lu.%06lu %d 0x%lx %d\n",
+ "UNMAP %u.%06lu %d 0x%lx %d\n",
secs, usec_rem, m->map_id, 0UL, 0);
break;
default:
@@ -255,18 +257,15 @@ static enum print_line_t mmio_print_mark(struct trace_iterator *iter)
const char *msg = print->buf;
struct trace_seq *s = &iter->seq;
unsigned long long t = ns2usecs(iter->ts);
- unsigned long usec_rem = do_div(t, 1000000ULL);
+ unsigned long usec_rem = do_div(t, USEC_PER_SEC);
unsigned secs = (unsigned long)t;
int ret;
/* The trailing newline must be in the message. */
- ret = trace_seq_printf(s, "MARK %lu.%06lu %s", secs, usec_rem, msg);
+ ret = trace_seq_printf(s, "MARK %u.%06lu %s", secs, usec_rem, msg);
if (!ret)
return TRACE_TYPE_PARTIAL_LINE;
- if (entry->flags & TRACE_FLAG_CONT)
- trace_seq_print_cont(s, iter);
-
return TRACE_TYPE_HANDLED;
}
@@ -308,21 +307,17 @@ static void __trace_mmiotrace_rw(struct trace_array *tr,
{
struct ring_buffer_event *event;
struct trace_mmiotrace_rw *entry;
- unsigned long irq_flags;
+ int pc = preempt_count();
- event = ring_buffer_lock_reserve(tr->buffer, sizeof(*entry),
- &irq_flags);
+ event = trace_buffer_lock_reserve(tr, TRACE_MMIO_RW,
+ sizeof(*entry), 0, pc);
if (!event) {
atomic_inc(&dropped_count);
return;
}
entry = ring_buffer_event_data(event);
- tracing_generic_entry_update(&entry->ent, 0, preempt_count());
- entry->ent.type = TRACE_MMIO_RW;
entry->rw = *rw;
- ring_buffer_unlock_commit(tr->buffer, event, irq_flags);
-
- trace_wake_up();
+ trace_buffer_unlock_commit(tr, event, 0, pc);
}
void mmio_trace_rw(struct mmiotrace_rw *rw)
@@ -338,21 +333,17 @@ static void __trace_mmiotrace_map(struct trace_array *tr,
{
struct ring_buffer_event *event;
struct trace_mmiotrace_map *entry;
- unsigned long irq_flags;
+ int pc = preempt_count();
- event = ring_buffer_lock_reserve(tr->buffer, sizeof(*entry),
- &irq_flags);
+ event = trace_buffer_lock_reserve(tr, TRACE_MMIO_MAP,
+ sizeof(*entry), 0, pc);
if (!event) {
atomic_inc(&dropped_count);
return;
}
entry = ring_buffer_event_data(event);
- tracing_generic_entry_update(&entry->ent, 0, preempt_count());
- entry->ent.type = TRACE_MMIO_MAP;
entry->map = *map;
- ring_buffer_unlock_commit(tr->buffer, event, irq_flags);
-
- trace_wake_up();
+ trace_buffer_unlock_commit(tr, event, 0, pc);
}
void mmio_trace_mapping(struct mmiotrace_map *map)
@@ -368,5 +359,5 @@ void mmio_trace_mapping(struct mmiotrace_map *map)
int mmio_trace_printk(const char *fmt, va_list args)
{
- return trace_vprintk(0, -1, fmt, args);
+ return trace_vprintk(0, fmt, args);
}
diff --git a/kernel/trace/trace_nop.c b/kernel/trace/trace_nop.c
index b9767acd30ac..394f94417e2f 100644
--- a/kernel/trace/trace_nop.c
+++ b/kernel/trace/trace_nop.c
@@ -47,12 +47,7 @@ static void stop_nop_trace(struct trace_array *tr)
static int nop_trace_init(struct trace_array *tr)
{
- int cpu;
ctx_trace = tr;
-
- for_each_online_cpu(cpu)
- tracing_reset(tr, cpu);
-
start_nop_trace(tr);
return 0;
}
@@ -96,6 +91,7 @@ struct tracer nop_trace __read_mostly =
.name = "nop",
.init = nop_trace_init,
.reset = nop_trace_reset,
+ .wait_pipe = poll_wait_pipe,
#ifdef CONFIG_FTRACE_SELFTEST
.selftest = trace_selftest_startup_nop,
#endif
diff --git a/kernel/trace/trace_output.c b/kernel/trace/trace_output.c
new file mode 100644
index 000000000000..64b54a59c55b
--- /dev/null
+++ b/kernel/trace/trace_output.c
@@ -0,0 +1,1017 @@
+/*
+ * trace_output.c
+ *
+ * Copyright (C) 2008 Red Hat Inc, Steven Rostedt <srostedt@redhat.com>
+ *
+ */
+
+#include <linux/module.h>
+#include <linux/mutex.h>
+#include <linux/ftrace.h>
+
+#include "trace_output.h"
+
+/* must be a power of 2 */
+#define EVENT_HASHSIZE 128
+
+static DEFINE_MUTEX(trace_event_mutex);
+static struct hlist_head event_hash[EVENT_HASHSIZE] __read_mostly;
+
+static int next_event_type = __TRACE_LAST_TYPE + 1;
+
+enum print_line_t trace_print_bprintk_msg_only(struct trace_iterator *iter)
+{
+ struct trace_seq *s = &iter->seq;
+ struct trace_entry *entry = iter->ent;
+ struct bprint_entry *field;
+ int ret;
+
+ trace_assign_type(field, entry);
+
+ ret = trace_seq_bprintf(s, field->fmt, field->buf);
+ if (!ret)
+ return TRACE_TYPE_PARTIAL_LINE;
+
+ return TRACE_TYPE_HANDLED;
+}
+
+enum print_line_t trace_print_printk_msg_only(struct trace_iterator *iter)
+{
+ struct trace_seq *s = &iter->seq;
+ struct trace_entry *entry = iter->ent;
+ struct print_entry *field;
+ int ret;
+
+ trace_assign_type(field, entry);
+
+ ret = trace_seq_printf(s, "%s", field->buf);
+ if (!ret)
+ return TRACE_TYPE_PARTIAL_LINE;
+
+ return TRACE_TYPE_HANDLED;
+}
+
+/**
+ * trace_seq_printf - sequence printing of trace information
+ * @s: trace sequence descriptor
+ * @fmt: printf format string
+ *
+ * The tracer may use either sequence operations or its own
+ * copy to user routines. To simplify formating of a trace
+ * trace_seq_printf is used to store strings into a special
+ * buffer (@s). Then the output may be either used by
+ * the sequencer or pulled into another buffer.
+ */
+int
+trace_seq_printf(struct trace_seq *s, const char *fmt, ...)
+{
+ int len = (PAGE_SIZE - 1) - s->len;
+ va_list ap;
+ int ret;
+
+ if (!len)
+ return 0;
+
+ va_start(ap, fmt);
+ ret = vsnprintf(s->buffer + s->len, len, fmt, ap);
+ va_end(ap);
+
+ /* If we can't write it all, don't bother writing anything */
+ if (ret >= len)
+ return 0;
+
+ s->len += ret;
+
+ return len;
+}
+
+int trace_seq_bprintf(struct trace_seq *s, const char *fmt, const u32 *binary)
+{
+ int len = (PAGE_SIZE - 1) - s->len;
+ int ret;
+
+ if (!len)
+ return 0;
+
+ ret = bstr_printf(s->buffer + s->len, len, fmt, binary);
+
+ /* If we can't write it all, don't bother writing anything */
+ if (ret >= len)
+ return 0;
+
+ s->len += ret;
+
+ return len;
+}
+
+/**
+ * trace_seq_puts - trace sequence printing of simple string
+ * @s: trace sequence descriptor
+ * @str: simple string to record
+ *
+ * The tracer may use either the sequence operations or its own
+ * copy to user routines. This function records a simple string
+ * into a special buffer (@s) for later retrieval by a sequencer
+ * or other mechanism.
+ */
+int trace_seq_puts(struct trace_seq *s, const char *str)
+{
+ int len = strlen(str);
+
+ if (len > ((PAGE_SIZE - 1) - s->len))
+ return 0;
+
+ memcpy(s->buffer + s->len, str, len);
+ s->len += len;
+
+ return len;
+}
+
+int trace_seq_putc(struct trace_seq *s, unsigned char c)
+{
+ if (s->len >= (PAGE_SIZE - 1))
+ return 0;
+
+ s->buffer[s->len++] = c;
+
+ return 1;
+}
+
+int trace_seq_putmem(struct trace_seq *s, const void *mem, size_t len)
+{
+ if (len > ((PAGE_SIZE - 1) - s->len))
+ return 0;
+
+ memcpy(s->buffer + s->len, mem, len);
+ s->len += len;
+
+ return len;
+}
+
+int trace_seq_putmem_hex(struct trace_seq *s, const void *mem, size_t len)
+{
+ unsigned char hex[HEX_CHARS];
+ const unsigned char *data = mem;
+ int i, j;
+
+#ifdef __BIG_ENDIAN
+ for (i = 0, j = 0; i < len; i++) {
+#else
+ for (i = len-1, j = 0; i >= 0; i--) {
+#endif
+ hex[j++] = hex_asc_hi(data[i]);
+ hex[j++] = hex_asc_lo(data[i]);
+ }
+ hex[j++] = ' ';
+
+ return trace_seq_putmem(s, hex, j);
+}
+
+void *trace_seq_reserve(struct trace_seq *s, size_t len)
+{
+ void *ret;
+
+ if (len > ((PAGE_SIZE - 1) - s->len))
+ return NULL;
+
+ ret = s->buffer + s->len;
+ s->len += len;
+
+ return ret;
+}
+
+int trace_seq_path(struct trace_seq *s, struct path *path)
+{
+ unsigned char *p;
+
+ if (s->len >= (PAGE_SIZE - 1))
+ return 0;
+ p = d_path(path, s->buffer + s->len, PAGE_SIZE - s->len);
+ if (!IS_ERR(p)) {
+ p = mangle_path(s->buffer + s->len, p, "\n");
+ if (p) {
+ s->len = p - s->buffer;
+ return 1;
+ }
+ } else {
+ s->buffer[s->len++] = '?';
+ return 1;
+ }
+
+ return 0;
+}
+
+#ifdef CONFIG_KRETPROBES
+static inline const char *kretprobed(const char *name)
+{
+ static const char tramp_name[] = "kretprobe_trampoline";
+ int size = sizeof(tramp_name);
+
+ if (strncmp(tramp_name, name, size) == 0)
+ return "[unknown/kretprobe'd]";
+ return name;
+}
+#else
+static inline const char *kretprobed(const char *name)
+{
+ return name;
+}
+#endif /* CONFIG_KRETPROBES */
+
+static int
+seq_print_sym_short(struct trace_seq *s, const char *fmt, unsigned long address)
+{
+#ifdef CONFIG_KALLSYMS
+ char str[KSYM_SYMBOL_LEN];
+ const char *name;
+
+ kallsyms_lookup(address, NULL, NULL, NULL, str);
+
+ name = kretprobed(str);
+
+ return trace_seq_printf(s, fmt, name);
+#endif
+ return 1;
+}
+
+static int
+seq_print_sym_offset(struct trace_seq *s, const char *fmt,
+ unsigned long address)
+{
+#ifdef CONFIG_KALLSYMS
+ char str[KSYM_SYMBOL_LEN];
+ const char *name;
+
+ sprint_symbol(str, address);
+ name = kretprobed(str);
+
+ return trace_seq_printf(s, fmt, name);
+#endif
+ return 1;
+}
+
+#ifndef CONFIG_64BIT
+# define IP_FMT "%08lx"
+#else
+# define IP_FMT "%016lx"
+#endif
+
+int seq_print_user_ip(struct trace_seq *s, struct mm_struct *mm,
+ unsigned long ip, unsigned long sym_flags)
+{
+ struct file *file = NULL;
+ unsigned long vmstart = 0;
+ int ret = 1;
+
+ if (mm) {
+ const struct vm_area_struct *vma;
+
+ down_read(&mm->mmap_sem);
+ vma = find_vma(mm, ip);
+ if (vma) {
+ file = vma->vm_file;
+ vmstart = vma->vm_start;
+ }
+ if (file) {
+ ret = trace_seq_path(s, &file->f_path);
+ if (ret)
+ ret = trace_seq_printf(s, "[+0x%lx]",
+ ip - vmstart);
+ }
+ up_read(&mm->mmap_sem);
+ }
+ if (ret && ((sym_flags & TRACE_ITER_SYM_ADDR) || !file))
+ ret = trace_seq_printf(s, " <" IP_FMT ">", ip);
+ return ret;
+}
+
+int
+seq_print_userip_objs(const struct userstack_entry *entry, struct trace_seq *s,
+ unsigned long sym_flags)
+{
+ struct mm_struct *mm = NULL;
+ int ret = 1;
+ unsigned int i;
+
+ if (trace_flags & TRACE_ITER_SYM_USEROBJ) {
+ struct task_struct *task;
+ /*
+ * we do the lookup on the thread group leader,
+ * since individual threads might have already quit!
+ */
+ rcu_read_lock();
+ task = find_task_by_vpid(entry->ent.tgid);
+ if (task)
+ mm = get_task_mm(task);
+ rcu_read_unlock();
+ }
+
+ for (i = 0; i < FTRACE_STACK_ENTRIES; i++) {
+ unsigned long ip = entry->caller[i];
+
+ if (ip == ULONG_MAX || !ret)
+ break;
+ if (i && ret)
+ ret = trace_seq_puts(s, " <- ");
+ if (!ip) {
+ if (ret)
+ ret = trace_seq_puts(s, "??");
+ continue;
+ }
+ if (!ret)
+ break;
+ if (ret)
+ ret = seq_print_user_ip(s, mm, ip, sym_flags);
+ }
+
+ if (mm)
+ mmput(mm);
+ return ret;
+}
+
+int
+seq_print_ip_sym(struct trace_seq *s, unsigned long ip, unsigned long sym_flags)
+{
+ int ret;
+
+ if (!ip)
+ return trace_seq_printf(s, "0");
+
+ if (sym_flags & TRACE_ITER_SYM_OFFSET)
+ ret = seq_print_sym_offset(s, "%s", ip);
+ else
+ ret = seq_print_sym_short(s, "%s", ip);
+
+ if (!ret)
+ return 0;
+
+ if (sym_flags & TRACE_ITER_SYM_ADDR)
+ ret = trace_seq_printf(s, " <" IP_FMT ">", ip);
+ return ret;
+}
+
+static int
+lat_print_generic(struct trace_seq *s, struct trace_entry *entry, int cpu)
+{
+ int hardirq, softirq;
+ char comm[TASK_COMM_LEN];
+
+ trace_find_cmdline(entry->pid, comm);
+ hardirq = entry->flags & TRACE_FLAG_HARDIRQ;
+ softirq = entry->flags & TRACE_FLAG_SOFTIRQ;
+
+ if (!trace_seq_printf(s, "%8.8s-%-5d %3d%c%c%c",
+ comm, entry->pid, cpu,
+ (entry->flags & TRACE_FLAG_IRQS_OFF) ? 'd' :
+ (entry->flags & TRACE_FLAG_IRQS_NOSUPPORT) ?
+ 'X' : '.',
+ (entry->flags & TRACE_FLAG_NEED_RESCHED) ?
+ 'N' : '.',
+ (hardirq && softirq) ? 'H' :
+ hardirq ? 'h' : softirq ? 's' : '.'))
+ return 0;
+
+ if (entry->preempt_count)
+ return trace_seq_printf(s, "%x", entry->preempt_count);
+ return trace_seq_puts(s, ".");
+}
+
+static unsigned long preempt_mark_thresh = 100;
+
+static int
+lat_print_timestamp(struct trace_seq *s, u64 abs_usecs,
+ unsigned long rel_usecs)
+{
+ return trace_seq_printf(s, " %4lldus%c: ", abs_usecs,
+ rel_usecs > preempt_mark_thresh ? '!' :
+ rel_usecs > 1 ? '+' : ' ');
+}
+
+int trace_print_context(struct trace_iterator *iter)
+{
+ struct trace_seq *s = &iter->seq;
+ struct trace_entry *entry = iter->ent;
+ unsigned long long t = ns2usecs(iter->ts);
+ unsigned long usec_rem = do_div(t, USEC_PER_SEC);
+ unsigned long secs = (unsigned long)t;
+ char comm[TASK_COMM_LEN];
+
+ trace_find_cmdline(entry->pid, comm);
+
+ return trace_seq_printf(s, "%16s-%-5d [%03d] %5lu.%06lu: ",
+ comm, entry->pid, iter->cpu, secs, usec_rem);
+}
+
+int trace_print_lat_context(struct trace_iterator *iter)
+{
+ u64 next_ts;
+ int ret;
+ struct trace_seq *s = &iter->seq;
+ struct trace_entry *entry = iter->ent,
+ *next_entry = trace_find_next_entry(iter, NULL,
+ &next_ts);
+ unsigned long verbose = (trace_flags & TRACE_ITER_VERBOSE);
+ unsigned long abs_usecs = ns2usecs(iter->ts - iter->tr->time_start);
+ unsigned long rel_usecs;
+
+ if (!next_entry)
+ next_ts = iter->ts;
+ rel_usecs = ns2usecs(next_ts - iter->ts);
+
+ if (verbose) {
+ char comm[TASK_COMM_LEN];
+
+ trace_find_cmdline(entry->pid, comm);
+
+ ret = trace_seq_printf(s, "%16s %5d %3d %d %08x %08lx [%08llx]"
+ " %ld.%03ldms (+%ld.%03ldms): ", comm,
+ entry->pid, iter->cpu, entry->flags,
+ entry->preempt_count, iter->idx,
+ ns2usecs(iter->ts),
+ abs_usecs / USEC_PER_MSEC,
+ abs_usecs % USEC_PER_MSEC,
+ rel_usecs / USEC_PER_MSEC,
+ rel_usecs % USEC_PER_MSEC);
+ } else {
+ ret = lat_print_generic(s, entry, iter->cpu);
+ if (ret)
+ ret = lat_print_timestamp(s, abs_usecs, rel_usecs);
+ }
+
+ return ret;
+}
+
+static const char state_to_char[] = TASK_STATE_TO_CHAR_STR;
+
+static int task_state_char(unsigned long state)
+{
+ int bit = state ? __ffs(state) + 1 : 0;
+
+ return bit < sizeof(state_to_char) - 1 ? state_to_char[bit] : '?';
+}
+
+/**
+ * ftrace_find_event - find a registered event
+ * @type: the type of event to look for
+ *
+ * Returns an event of type @type otherwise NULL
+ */
+struct trace_event *ftrace_find_event(int type)
+{
+ struct trace_event *event;
+ struct hlist_node *n;
+ unsigned key;
+
+ key = type & (EVENT_HASHSIZE - 1);
+
+ hlist_for_each_entry_rcu(event, n, &event_hash[key], node) {
+ if (event->type == type)
+ return event;
+ }
+
+ return NULL;
+}
+
+/**
+ * register_ftrace_event - register output for an event type
+ * @event: the event type to register
+ *
+ * Event types are stored in a hash and this hash is used to
+ * find a way to print an event. If the @event->type is set
+ * then it will use that type, otherwise it will assign a
+ * type to use.
+ *
+ * If you assign your own type, please make sure it is added
+ * to the trace_type enum in trace.h, to avoid collisions
+ * with the dynamic types.
+ *
+ * Returns the event type number or zero on error.
+ */
+int register_ftrace_event(struct trace_event *event)
+{
+ unsigned key;
+ int ret = 0;
+
+ mutex_lock(&trace_event_mutex);
+
+ if (!event) {
+ ret = next_event_type++;
+ goto out;
+ }
+
+ if (!event->type)
+ event->type = next_event_type++;
+ else if (event->type > __TRACE_LAST_TYPE) {
+ printk(KERN_WARNING "Need to add type to trace.h\n");
+ WARN_ON(1);
+ }
+
+ if (ftrace_find_event(event->type))
+ goto out;
+
+ if (event->trace == NULL)
+ event->trace = trace_nop_print;
+ if (event->raw == NULL)
+ event->raw = trace_nop_print;
+ if (event->hex == NULL)
+ event->hex = trace_nop_print;
+ if (event->binary == NULL)
+ event->binary = trace_nop_print;
+
+ key = event->type & (EVENT_HASHSIZE - 1);
+
+ hlist_add_head_rcu(&event->node, &event_hash[key]);
+
+ ret = event->type;
+ out:
+ mutex_unlock(&trace_event_mutex);
+
+ return ret;
+}
+
+/**
+ * unregister_ftrace_event - remove a no longer used event
+ * @event: the event to remove
+ */
+int unregister_ftrace_event(struct trace_event *event)
+{
+ mutex_lock(&trace_event_mutex);
+ hlist_del(&event->node);
+ mutex_unlock(&trace_event_mutex);
+
+ return 0;
+}
+
+/*
+ * Standard events
+ */
+
+enum print_line_t trace_nop_print(struct trace_iterator *iter, int flags)
+{
+ return TRACE_TYPE_HANDLED;
+}
+
+/* TRACE_FN */
+static enum print_line_t trace_fn_trace(struct trace_iterator *iter, int flags)
+{
+ struct ftrace_entry *field;
+ struct trace_seq *s = &iter->seq;
+
+ trace_assign_type(field, iter->ent);
+
+ if (!seq_print_ip_sym(s, field->ip, flags))
+ goto partial;
+
+ if ((flags & TRACE_ITER_PRINT_PARENT) && field->parent_ip) {
+ if (!trace_seq_printf(s, " <-"))
+ goto partial;
+ if (!seq_print_ip_sym(s,
+ field->parent_ip,
+ flags))
+ goto partial;
+ }
+ if (!trace_seq_printf(s, "\n"))
+ goto partial;
+
+ return TRACE_TYPE_HANDLED;
+
+ partial:
+ return TRACE_TYPE_PARTIAL_LINE;
+}
+
+static enum print_line_t trace_fn_raw(struct trace_iterator *iter, int flags)
+{
+ struct ftrace_entry *field;
+
+ trace_assign_type(field, iter->ent);
+
+ if (!trace_seq_printf(&iter->seq, "%lx %lx\n",
+ field->ip,
+ field->parent_ip))
+ return TRACE_TYPE_PARTIAL_LINE;
+
+ return TRACE_TYPE_HANDLED;
+}
+
+static enum print_line_t trace_fn_hex(struct trace_iterator *iter, int flags)
+{
+ struct ftrace_entry *field;
+ struct trace_seq *s = &iter->seq;
+
+ trace_assign_type(field, iter->ent);
+
+ SEQ_PUT_HEX_FIELD_RET(s, field->ip);
+ SEQ_PUT_HEX_FIELD_RET(s, field->parent_ip);
+
+ return TRACE_TYPE_HANDLED;
+}
+
+static enum print_line_t trace_fn_bin(struct trace_iterator *iter, int flags)
+{
+ struct ftrace_entry *field;
+ struct trace_seq *s = &iter->seq;
+
+ trace_assign_type(field, iter->ent);
+
+ SEQ_PUT_FIELD_RET(s, field->ip);
+ SEQ_PUT_FIELD_RET(s, field->parent_ip);
+
+ return TRACE_TYPE_HANDLED;
+}
+
+static struct trace_event trace_fn_event = {
+ .type = TRACE_FN,
+ .trace = trace_fn_trace,
+ .raw = trace_fn_raw,
+ .hex = trace_fn_hex,
+ .binary = trace_fn_bin,
+};
+
+/* TRACE_CTX an TRACE_WAKE */
+static enum print_line_t trace_ctxwake_print(struct trace_iterator *iter,
+ char *delim)
+{
+ struct ctx_switch_entry *field;
+ char comm[TASK_COMM_LEN];
+ int S, T;
+
+
+ trace_assign_type(field, iter->ent);
+
+ T = task_state_char(field->next_state);
+ S = task_state_char(field->prev_state);
+ trace_find_cmdline(field->next_pid, comm);
+ if (!trace_seq_printf(&iter->seq,
+ " %5d:%3d:%c %s [%03d] %5d:%3d:%c %s\n",
+ field->prev_pid,
+ field->prev_prio,
+ S, delim,
+ field->next_cpu,
+ field->next_pid,
+ field->next_prio,
+ T, comm))
+ return TRACE_TYPE_PARTIAL_LINE;
+
+ return TRACE_TYPE_HANDLED;
+}
+
+static enum print_line_t trace_ctx_print(struct trace_iterator *iter, int flags)
+{
+ return trace_ctxwake_print(iter, "==>");
+}
+
+static enum print_line_t trace_wake_print(struct trace_iterator *iter,
+ int flags)
+{
+ return trace_ctxwake_print(iter, " +");
+}
+
+static int trace_ctxwake_raw(struct trace_iterator *iter, char S)
+{
+ struct ctx_switch_entry *field;
+ int T;
+
+ trace_assign_type(field, iter->ent);
+
+ if (!S)
+ task_state_char(field->prev_state);
+ T = task_state_char(field->next_state);
+ if (!trace_seq_printf(&iter->seq, "%d %d %c %d %d %d %c\n",
+ field->prev_pid,
+ field->prev_prio,
+ S,
+ field->next_cpu,
+ field->next_pid,
+ field->next_prio,
+ T))
+ return TRACE_TYPE_PARTIAL_LINE;
+
+ return TRACE_TYPE_HANDLED;
+}
+
+static enum print_line_t trace_ctx_raw(struct trace_iterator *iter, int flags)
+{
+ return trace_ctxwake_raw(iter, 0);
+}
+
+static enum print_line_t trace_wake_raw(struct trace_iterator *iter, int flags)
+{
+ return trace_ctxwake_raw(iter, '+');
+}
+
+
+static int trace_ctxwake_hex(struct trace_iterator *iter, char S)
+{
+ struct ctx_switch_entry *field;
+ struct trace_seq *s = &iter->seq;
+ int T;
+
+ trace_assign_type(field, iter->ent);
+
+ if (!S)
+ task_state_char(field->prev_state);
+ T = task_state_char(field->next_state);
+
+ SEQ_PUT_HEX_FIELD_RET(s, field->prev_pid);
+ SEQ_PUT_HEX_FIELD_RET(s, field->prev_prio);
+ SEQ_PUT_HEX_FIELD_RET(s, S);
+ SEQ_PUT_HEX_FIELD_RET(s, field->next_cpu);
+ SEQ_PUT_HEX_FIELD_RET(s, field->next_pid);
+ SEQ_PUT_HEX_FIELD_RET(s, field->next_prio);
+ SEQ_PUT_HEX_FIELD_RET(s, T);
+
+ return TRACE_TYPE_HANDLED;
+}
+
+static enum print_line_t trace_ctx_hex(struct trace_iterator *iter, int flags)
+{
+ return trace_ctxwake_hex(iter, 0);
+}
+
+static enum print_line_t trace_wake_hex(struct trace_iterator *iter, int flags)
+{
+ return trace_ctxwake_hex(iter, '+');
+}
+
+static enum print_line_t trace_ctxwake_bin(struct trace_iterator *iter,
+ int flags)
+{
+ struct ctx_switch_entry *field;
+ struct trace_seq *s = &iter->seq;
+
+ trace_assign_type(field, iter->ent);
+
+ SEQ_PUT_FIELD_RET(s, field->prev_pid);
+ SEQ_PUT_FIELD_RET(s, field->prev_prio);
+ SEQ_PUT_FIELD_RET(s, field->prev_state);
+ SEQ_PUT_FIELD_RET(s, field->next_pid);
+ SEQ_PUT_FIELD_RET(s, field->next_prio);
+ SEQ_PUT_FIELD_RET(s, field->next_state);
+
+ return TRACE_TYPE_HANDLED;
+}
+
+static struct trace_event trace_ctx_event = {
+ .type = TRACE_CTX,
+ .trace = trace_ctx_print,
+ .raw = trace_ctx_raw,
+ .hex = trace_ctx_hex,
+ .binary = trace_ctxwake_bin,
+};
+
+static struct trace_event trace_wake_event = {
+ .type = TRACE_WAKE,
+ .trace = trace_wake_print,
+ .raw = trace_wake_raw,
+ .hex = trace_wake_hex,
+ .binary = trace_ctxwake_bin,
+};
+
+/* TRACE_SPECIAL */
+static enum print_line_t trace_special_print(struct trace_iterator *iter,
+ int flags)
+{
+ struct special_entry *field;
+
+ trace_assign_type(field, iter->ent);
+
+ if (!trace_seq_printf(&iter->seq, "# %ld %ld %ld\n",
+ field->arg1,
+ field->arg2,
+ field->arg3))
+ return TRACE_TYPE_PARTIAL_LINE;
+
+ return TRACE_TYPE_HANDLED;
+}
+
+static enum print_line_t trace_special_hex(struct trace_iterator *iter,
+ int flags)
+{
+ struct special_entry *field;
+ struct trace_seq *s = &iter->seq;
+
+ trace_assign_type(field, iter->ent);
+
+ SEQ_PUT_HEX_FIELD_RET(s, field->arg1);
+ SEQ_PUT_HEX_FIELD_RET(s, field->arg2);
+ SEQ_PUT_HEX_FIELD_RET(s, field->arg3);
+
+ return TRACE_TYPE_HANDLED;
+}
+
+static enum print_line_t trace_special_bin(struct trace_iterator *iter,
+ int flags)
+{
+ struct special_entry *field;
+ struct trace_seq *s = &iter->seq;
+
+ trace_assign_type(field, iter->ent);
+
+ SEQ_PUT_FIELD_RET(s, field->arg1);
+ SEQ_PUT_FIELD_RET(s, field->arg2);
+ SEQ_PUT_FIELD_RET(s, field->arg3);
+
+ return TRACE_TYPE_HANDLED;
+}
+
+static struct trace_event trace_special_event = {
+ .type = TRACE_SPECIAL,
+ .trace = trace_special_print,
+ .raw = trace_special_print,
+ .hex = trace_special_hex,
+ .binary = trace_special_bin,
+};
+
+/* TRACE_STACK */
+
+static enum print_line_t trace_stack_print(struct trace_iterator *iter,
+ int flags)
+{
+ struct stack_entry *field;
+ struct trace_seq *s = &iter->seq;
+ int i;
+
+ trace_assign_type(field, iter->ent);
+
+ for (i = 0; i < FTRACE_STACK_ENTRIES; i++) {
+ if (i) {
+ if (!trace_seq_puts(s, " <= "))
+ goto partial;
+
+ if (!seq_print_ip_sym(s, field->caller[i], flags))
+ goto partial;
+ }
+ if (!trace_seq_puts(s, "\n"))
+ goto partial;
+ }
+
+ return TRACE_TYPE_HANDLED;
+
+ partial:
+ return TRACE_TYPE_PARTIAL_LINE;
+}
+
+static struct trace_event trace_stack_event = {
+ .type = TRACE_STACK,
+ .trace = trace_stack_print,
+ .raw = trace_special_print,
+ .hex = trace_special_hex,
+ .binary = trace_special_bin,
+};
+
+/* TRACE_USER_STACK */
+static enum print_line_t trace_user_stack_print(struct trace_iterator *iter,
+ int flags)
+{
+ struct userstack_entry *field;
+ struct trace_seq *s = &iter->seq;
+
+ trace_assign_type(field, iter->ent);
+
+ if (!seq_print_userip_objs(field, s, flags))
+ goto partial;
+
+ if (!trace_seq_putc(s, '\n'))
+ goto partial;
+
+ return TRACE_TYPE_HANDLED;
+
+ partial:
+ return TRACE_TYPE_PARTIAL_LINE;
+}
+
+static struct trace_event trace_user_stack_event = {
+ .type = TRACE_USER_STACK,
+ .trace = trace_user_stack_print,
+ .raw = trace_special_print,
+ .hex = trace_special_hex,
+ .binary = trace_special_bin,
+};
+
+/* TRACE_BPRINT */
+static enum print_line_t
+trace_bprint_print(struct trace_iterator *iter, int flags)
+{
+ struct trace_entry *entry = iter->ent;
+ struct trace_seq *s = &iter->seq;
+ struct bprint_entry *field;
+
+ trace_assign_type(field, entry);
+
+ if (!seq_print_ip_sym(s, field->ip, flags))
+ goto partial;
+
+ if (!trace_seq_puts(s, ": "))
+ goto partial;
+
+ if (!trace_seq_bprintf(s, field->fmt, field->buf))
+ goto partial;
+
+ return TRACE_TYPE_HANDLED;
+
+ partial:
+ return TRACE_TYPE_PARTIAL_LINE;
+}
+
+
+static enum print_line_t
+trace_bprint_raw(struct trace_iterator *iter, int flags)
+{
+ struct bprint_entry *field;
+ struct trace_seq *s = &iter->seq;
+
+ trace_assign_type(field, iter->ent);
+
+ if (!trace_seq_printf(s, ": %lx : ", field->ip))
+ goto partial;
+
+ if (!trace_seq_bprintf(s, field->fmt, field->buf))
+ goto partial;
+
+ return TRACE_TYPE_HANDLED;
+
+ partial:
+ return TRACE_TYPE_PARTIAL_LINE;
+}
+
+
+static struct trace_event trace_bprint_event = {
+ .type = TRACE_BPRINT,
+ .trace = trace_bprint_print,
+ .raw = trace_bprint_raw,
+};
+
+/* TRACE_PRINT */
+static enum print_line_t trace_print_print(struct trace_iterator *iter,
+ int flags)
+{
+ struct print_entry *field;
+ struct trace_seq *s = &iter->seq;
+
+ trace_assign_type(field, iter->ent);
+
+ if (!seq_print_ip_sym(s, field->ip, flags))
+ goto partial;
+
+ if (!trace_seq_printf(s, ": %s", field->buf))
+ goto partial;
+
+ return TRACE_TYPE_HANDLED;
+
+ partial:
+ return TRACE_TYPE_PARTIAL_LINE;
+}
+
+static enum print_line_t trace_print_raw(struct trace_iterator *iter, int flags)
+{
+ struct print_entry *field;
+
+ trace_assign_type(field, iter->ent);
+
+ if (!trace_seq_printf(&iter->seq, "# %lx %s", field->ip, field->buf))
+ goto partial;
+
+ return TRACE_TYPE_HANDLED;
+
+ partial:
+ return TRACE_TYPE_PARTIAL_LINE;
+}
+
+static struct trace_event trace_print_event = {
+ .type = TRACE_PRINT,
+ .trace = trace_print_print,
+ .raw = trace_print_raw,
+};
+
+
+static struct trace_event *events[] __initdata = {
+ &trace_fn_event,
+ &trace_ctx_event,
+ &trace_wake_event,
+ &trace_special_event,
+ &trace_stack_event,
+ &trace_user_stack_event,
+ &trace_bprint_event,
+ &trace_print_event,
+ NULL
+};
+
+__init static int init_events(void)
+{
+ struct trace_event *event;
+ int i, ret;
+
+ for (i = 0; events[i]; i++) {
+ event = events[i];
+
+ ret = register_ftrace_event(event);
+ if (!ret) {
+ printk(KERN_WARNING "event %d failed to register\n",
+ event->type);
+ WARN_ON_ONCE(1);
+ }
+ }
+
+ return 0;
+}
+device_initcall(init_events);
diff --git a/kernel/trace/trace_output.h b/kernel/trace/trace_output.h
new file mode 100644
index 000000000000..e0bde39c2dd9
--- /dev/null
+++ b/kernel/trace/trace_output.h
@@ -0,0 +1,71 @@
+#ifndef __TRACE_EVENTS_H
+#define __TRACE_EVENTS_H
+
+#include "trace.h"
+
+typedef enum print_line_t (*trace_print_func)(struct trace_iterator *iter,
+ int flags);
+
+struct trace_event {
+ struct hlist_node node;
+ int type;
+ trace_print_func trace;
+ trace_print_func raw;
+ trace_print_func hex;
+ trace_print_func binary;
+};
+
+extern enum print_line_t
+trace_print_bprintk_msg_only(struct trace_iterator *iter);
+extern enum print_line_t
+trace_print_printk_msg_only(struct trace_iterator *iter);
+
+extern int trace_seq_printf(struct trace_seq *s, const char *fmt, ...)
+ __attribute__ ((format (printf, 2, 3)));
+extern int
+trace_seq_bprintf(struct trace_seq *s, const char *fmt, const u32 *binary);
+extern int
+seq_print_ip_sym(struct trace_seq *s, unsigned long ip,
+ unsigned long sym_flags);
+extern ssize_t trace_seq_to_user(struct trace_seq *s, char __user *ubuf,
+ size_t cnt);
+extern int trace_seq_puts(struct trace_seq *s, const char *str);
+extern int trace_seq_putc(struct trace_seq *s, unsigned char c);
+extern int trace_seq_putmem(struct trace_seq *s, const void *mem, size_t len);
+extern int trace_seq_putmem_hex(struct trace_seq *s, const void *mem,
+ size_t len);
+extern void *trace_seq_reserve(struct trace_seq *s, size_t len);
+extern int trace_seq_path(struct trace_seq *s, struct path *path);
+extern int seq_print_userip_objs(const struct userstack_entry *entry,
+ struct trace_seq *s, unsigned long sym_flags);
+extern int seq_print_user_ip(struct trace_seq *s, struct mm_struct *mm,
+ unsigned long ip, unsigned long sym_flags);
+
+extern int trace_print_context(struct trace_iterator *iter);
+extern int trace_print_lat_context(struct trace_iterator *iter);
+
+extern struct trace_event *ftrace_find_event(int type);
+extern int register_ftrace_event(struct trace_event *event);
+extern int unregister_ftrace_event(struct trace_event *event);
+
+extern enum print_line_t trace_nop_print(struct trace_iterator *iter,
+ int flags);
+
+#define MAX_MEMHEX_BYTES 8
+#define HEX_CHARS (MAX_MEMHEX_BYTES*2 + 1)
+
+#define SEQ_PUT_FIELD_RET(s, x) \
+do { \
+ if (!trace_seq_putmem(s, &(x), sizeof(x))) \
+ return TRACE_TYPE_PARTIAL_LINE; \
+} while (0)
+
+#define SEQ_PUT_HEX_FIELD_RET(s, x) \
+do { \
+ BUILD_BUG_ON(sizeof(x) > MAX_MEMHEX_BYTES); \
+ if (!trace_seq_putmem_hex(s, &(x), sizeof(x))) \
+ return TRACE_TYPE_PARTIAL_LINE; \
+} while (0)
+
+#endif
+
diff --git a/kernel/trace/trace_power.c b/kernel/trace/trace_power.c
index 7bda248daf55..118439709fb7 100644
--- a/kernel/trace/trace_power.c
+++ b/kernel/trace/trace_power.c
@@ -11,15 +11,113 @@
#include <linux/init.h>
#include <linux/debugfs.h>
-#include <linux/ftrace.h>
+#include <trace/power.h>
#include <linux/kallsyms.h>
#include <linux/module.h>
#include "trace.h"
+#include "trace_output.h"
static struct trace_array *power_trace;
static int __read_mostly trace_power_enabled;
+static void probe_power_start(struct power_trace *it, unsigned int type,
+ unsigned int level)
+{
+ if (!trace_power_enabled)
+ return;
+
+ memset(it, 0, sizeof(struct power_trace));
+ it->state = level;
+ it->type = type;
+ it->stamp = ktime_get();
+}
+
+
+static void probe_power_end(struct power_trace *it)
+{
+ struct ring_buffer_event *event;
+ struct trace_power *entry;
+ struct trace_array_cpu *data;
+ struct trace_array *tr = power_trace;
+
+ if (!trace_power_enabled)
+ return;
+
+ preempt_disable();
+ it->end = ktime_get();
+ data = tr->data[smp_processor_id()];
+
+ event = trace_buffer_lock_reserve(tr, TRACE_POWER,
+ sizeof(*entry), 0, 0);
+ if (!event)
+ goto out;
+ entry = ring_buffer_event_data(event);
+ entry->state_data = *it;
+ trace_buffer_unlock_commit(tr, event, 0, 0);
+ out:
+ preempt_enable();
+}
+
+static void probe_power_mark(struct power_trace *it, unsigned int type,
+ unsigned int level)
+{
+ struct ring_buffer_event *event;
+ struct trace_power *entry;
+ struct trace_array_cpu *data;
+ struct trace_array *tr = power_trace;
+
+ if (!trace_power_enabled)
+ return;
+
+ memset(it, 0, sizeof(struct power_trace));
+ it->state = level;
+ it->type = type;
+ it->stamp = ktime_get();
+ preempt_disable();
+ it->end = it->stamp;
+ data = tr->data[smp_processor_id()];
+
+ event = trace_buffer_lock_reserve(tr, TRACE_POWER,
+ sizeof(*entry), 0, 0);
+ if (!event)
+ goto out;
+ entry = ring_buffer_event_data(event);
+ entry->state_data = *it;
+ trace_buffer_unlock_commit(tr, event, 0, 0);
+ out:
+ preempt_enable();
+}
+
+static int tracing_power_register(void)
+{
+ int ret;
+
+ ret = register_trace_power_start(probe_power_start);
+ if (ret) {
+ pr_info("power trace: Couldn't activate tracepoint"
+ " probe to trace_power_start\n");
+ return ret;
+ }
+ ret = register_trace_power_end(probe_power_end);
+ if (ret) {
+ pr_info("power trace: Couldn't activate tracepoint"
+ " probe to trace_power_end\n");
+ goto fail_start;
+ }
+ ret = register_trace_power_mark(probe_power_mark);
+ if (ret) {
+ pr_info("power trace: Couldn't activate tracepoint"
+ " probe to trace_power_mark\n");
+ goto fail_end;
+ }
+ return ret;
+fail_end:
+ unregister_trace_power_end(probe_power_end);
+fail_start:
+ unregister_trace_power_start(probe_power_start);
+ return ret;
+}
static void start_power_trace(struct trace_array *tr)
{
@@ -31,6 +129,14 @@ static void stop_power_trace(struct trace_array *tr)
trace_power_enabled = 0;
}
+static void power_trace_reset(struct trace_array *tr)
+{
+ trace_power_enabled = 0;
+ unregister_trace_power_start(probe_power_start);
+ unregister_trace_power_end(probe_power_end);
+ unregister_trace_power_mark(probe_power_mark);
+}
+
static int power_trace_init(struct trace_array *tr)
{
@@ -38,6 +144,7 @@ static int power_trace_init(struct trace_array *tr)
power_trace = tr;
trace_power_enabled = 1;
+ tracing_power_register();
for_each_cpu(cpu, cpu_possible_mask)
tracing_reset(tr, cpu);
@@ -79,14 +186,21 @@ static enum print_line_t power_print_line(struct trace_iterator *iter)
return TRACE_TYPE_UNHANDLED;
}
+static void power_print_header(struct seq_file *s)
+{
+ seq_puts(s, "# TIMESTAMP STATE EVENT\n");
+ seq_puts(s, "# | | |\n");
+}
+
static struct tracer power_tracer __read_mostly =
{
.name = "power",
.init = power_trace_init,
.start = start_power_trace,
.stop = stop_power_trace,
- .reset = stop_power_trace,
+ .reset = power_trace_reset,
.print_line = power_print_line,
+ .print_header = power_print_header,
};
static int init_power_trace(void)
@@ -94,86 +208,3 @@ static int init_power_trace(void)
return register_tracer(&power_tracer);
}
device_initcall(init_power_trace);
-
-void trace_power_start(struct power_trace *it, unsigned int type,
- unsigned int level)
-{
- if (!trace_power_enabled)
- return;
-
- memset(it, 0, sizeof(struct power_trace));
- it->state = level;
- it->type = type;
- it->stamp = ktime_get();
-}
-EXPORT_SYMBOL_GPL(trace_power_start);
-
-
-void trace_power_end(struct power_trace *it)
-{
- struct ring_buffer_event *event;
- struct trace_power *entry;
- struct trace_array_cpu *data;
- unsigned long irq_flags;
- struct trace_array *tr = power_trace;
-
- if (!trace_power_enabled)
- return;
-
- preempt_disable();
- it->end = ktime_get();
- data = tr->data[smp_processor_id()];
-
- event = ring_buffer_lock_reserve(tr->buffer, sizeof(*entry),
- &irq_flags);
- if (!event)
- goto out;
- entry = ring_buffer_event_data(event);
- tracing_generic_entry_update(&entry->ent, 0, 0);
- entry->ent.type = TRACE_POWER;
- entry->state_data = *it;
- ring_buffer_unlock_commit(tr->buffer, event, irq_flags);
-
- trace_wake_up();
-
- out:
- preempt_enable();
-}
-EXPORT_SYMBOL_GPL(trace_power_end);
-
-void trace_power_mark(struct power_trace *it, unsigned int type,
- unsigned int level)
-{
- struct ring_buffer_event *event;
- struct trace_power *entry;
- struct trace_array_cpu *data;
- unsigned long irq_flags;
- struct trace_array *tr = power_trace;
-
- if (!trace_power_enabled)
- return;
-
- memset(it, 0, sizeof(struct power_trace));
- it->state = level;
- it->type = type;
- it->stamp = ktime_get();
- preempt_disable();
- it->end = it->stamp;
- data = tr->data[smp_processor_id()];
-
- event = ring_buffer_lock_reserve(tr->buffer, sizeof(*entry),
- &irq_flags);
- if (!event)
- goto out;
- entry = ring_buffer_event_data(event);
- tracing_generic_entry_update(&entry->ent, 0, 0);
- entry->ent.type = TRACE_POWER;
- entry->state_data = *it;
- ring_buffer_unlock_commit(tr->buffer, event, irq_flags);
-
- trace_wake_up();
-
- out:
- preempt_enable();
-}
-EXPORT_SYMBOL_GPL(trace_power_mark);
diff --git a/kernel/trace/trace_printk.c b/kernel/trace/trace_printk.c
new file mode 100644
index 000000000000..eb81556107fe
--- /dev/null
+++ b/kernel/trace/trace_printk.c
@@ -0,0 +1,270 @@
+/*
+ * trace binary printk
+ *
+ * Copyright (C) 2008 Lai Jiangshan <laijs@cn.fujitsu.com>
+ *
+ */
+#include <linux/seq_file.h>
+#include <linux/debugfs.h>
+#include <linux/uaccess.h>
+#include <linux/kernel.h>
+#include <linux/ftrace.h>
+#include <linux/string.h>
+#include <linux/module.h>
+#include <linux/marker.h>
+#include <linux/mutex.h>
+#include <linux/ctype.h>
+#include <linux/list.h>
+#include <linux/slab.h>
+#include <linux/fs.h>
+
+#include "trace.h"
+
+#ifdef CONFIG_MODULES
+
+/*
+ * modules trace_printk()'s formats are autosaved in struct trace_bprintk_fmt
+ * which are queued on trace_bprintk_fmt_list.
+ */
+static LIST_HEAD(trace_bprintk_fmt_list);
+
+/* serialize accesses to trace_bprintk_fmt_list */
+static DEFINE_MUTEX(btrace_mutex);
+
+struct trace_bprintk_fmt {
+ struct list_head list;
+ char fmt[0];
+};
+
+static inline struct trace_bprintk_fmt *lookup_format(const char *fmt)
+{
+ struct trace_bprintk_fmt *pos;
+ list_for_each_entry(pos, &trace_bprintk_fmt_list, list) {
+ if (!strcmp(pos->fmt, fmt))
+ return pos;
+ }
+ return NULL;
+}
+
+static
+void hold_module_trace_bprintk_format(const char **start, const char **end)
+{
+ const char **iter;
+
+ mutex_lock(&btrace_mutex);
+ for (iter = start; iter < end; iter++) {
+ struct trace_bprintk_fmt *tb_fmt = lookup_format(*iter);
+ if (tb_fmt) {
+ *iter = tb_fmt->fmt;
+ continue;
+ }
+
+ tb_fmt = kmalloc(offsetof(struct trace_bprintk_fmt, fmt)
+ + strlen(*iter) + 1, GFP_KERNEL);
+ if (tb_fmt) {
+ list_add_tail(&tb_fmt->list, &trace_bprintk_fmt_list);
+ strcpy(tb_fmt->fmt, *iter);
+ *iter = tb_fmt->fmt;
+ } else
+ *iter = NULL;
+ }
+ mutex_unlock(&btrace_mutex);
+}
+
+static int module_trace_bprintk_format_notify(struct notifier_block *self,
+ unsigned long val, void *data)
+{
+ struct module *mod = data;
+ if (mod->num_trace_bprintk_fmt) {
+ const char **start = mod->trace_bprintk_fmt_start;
+ const char **end = start + mod->num_trace_bprintk_fmt;
+
+ if (val == MODULE_STATE_COMING)
+ hold_module_trace_bprintk_format(start, end);
+ }
+ return 0;
+}
+
+#else /* !CONFIG_MODULES */
+__init static int
+module_trace_bprintk_format_notify(struct notifier_block *self,
+ unsigned long val, void *data)
+{
+ return 0;
+}
+#endif /* CONFIG_MODULES */
+
+
+__initdata_or_module static
+struct notifier_block module_trace_bprintk_format_nb = {
+ .notifier_call = module_trace_bprintk_format_notify,
+};
+
+int __trace_bprintk(unsigned long ip, const char *fmt, ...)
+ {
+ int ret;
+ va_list ap;
+
+ if (unlikely(!fmt))
+ return 0;
+
+ if (!(trace_flags & TRACE_ITER_PRINTK))
+ return 0;
+
+ va_start(ap, fmt);
+ ret = trace_vbprintk(ip, fmt, ap);
+ va_end(ap);
+ return ret;
+}
+EXPORT_SYMBOL_GPL(__trace_bprintk);
+
+int __ftrace_vbprintk(unsigned long ip, const char *fmt, va_list ap)
+ {
+ if (unlikely(!fmt))
+ return 0;
+
+ if (!(trace_flags & TRACE_ITER_PRINTK))
+ return 0;
+
+ return trace_vbprintk(ip, fmt, ap);
+}
+EXPORT_SYMBOL_GPL(__ftrace_vbprintk);
+
+int __trace_printk(unsigned long ip, const char *fmt, ...)
+{
+ int ret;
+ va_list ap;
+
+ if (!(trace_flags & TRACE_ITER_PRINTK))
+ return 0;
+
+ va_start(ap, fmt);
+ ret = trace_vprintk(ip, fmt, ap);
+ va_end(ap);
+ return ret;
+}
+EXPORT_SYMBOL_GPL(__trace_printk);
+
+int __ftrace_vprintk(unsigned long ip, const char *fmt, va_list ap)
+{
+ if (!(trace_flags & TRACE_ITER_PRINTK))
+ return 0;
+
+ return trace_vprintk(ip, fmt, ap);
+}
+EXPORT_SYMBOL_GPL(__ftrace_vprintk);
+
+static void *
+t_next(struct seq_file *m, void *v, loff_t *pos)
+{
+ const char **fmt = m->private;
+ const char **next = fmt;
+
+ (*pos)++;
+
+ if ((unsigned long)fmt >= (unsigned long)__stop___trace_bprintk_fmt)
+ return NULL;
+
+ next = fmt;
+ m->private = ++next;
+
+ return fmt;
+}
+
+static void *t_start(struct seq_file *m, loff_t *pos)
+{
+ return t_next(m, NULL, pos);
+}
+
+static int t_show(struct seq_file *m, void *v)
+{
+ const char **fmt = v;
+ const char *str = *fmt;
+ int i;
+
+ seq_printf(m, "0x%lx : \"", (unsigned long)fmt);
+
+ /*
+ * Tabs and new lines need to be converted.
+ */
+ for (i = 0; str[i]; i++) {
+ switch (str[i]) {
+ case '\n':
+ seq_puts(m, "\\n");
+ break;
+ case '\t':
+ seq_puts(m, "\\t");
+ break;
+ case '\\':
+ seq_puts(m, "\\");
+ break;
+ case '"':
+ seq_puts(m, "\\\"");
+ break;
+ default:
+ seq_putc(m, str[i]);
+ }
+ }
+ seq_puts(m, "\"\n");
+
+ return 0;
+}
+
+static void t_stop(struct seq_file *m, void *p)
+{
+}
+
+static const struct seq_operations show_format_seq_ops = {
+ .start = t_start,
+ .next = t_next,
+ .show = t_show,
+ .stop = t_stop,
+};
+
+static int
+ftrace_formats_open(struct inode *inode, struct file *file)
+{
+ int ret;
+
+ ret = seq_open(file, &show_format_seq_ops);
+ if (!ret) {
+ struct seq_file *m = file->private_data;
+
+ m->private = __start___trace_bprintk_fmt;
+ }
+ return ret;
+}
+
+static const struct file_operations ftrace_formats_fops = {
+ .open = ftrace_formats_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = seq_release,
+};
+
+static __init int init_trace_printk_function_export(void)
+{
+ struct dentry *d_tracer;
+ struct dentry *entry;
+
+ d_tracer = tracing_init_dentry();
+ if (!d_tracer)
+ return 0;
+
+ entry = debugfs_create_file("printk_formats", 0444, d_tracer,
+ NULL, &ftrace_formats_fops);
+ if (!entry)
+ pr_warning("Could not create debugfs "
+ "'printk_formats' entry\n");
+
+ return 0;
+}
+
+fs_initcall(init_trace_printk_function_export);
+
+static __init int init_trace_printk(void)
+{
+ return register_module_notifier(&module_trace_bprintk_format_nb);
+}
+
+early_initcall(init_trace_printk);
diff --git a/kernel/trace/trace_sched_switch.c b/kernel/trace/trace_sched_switch.c
index df175cb4564f..9117cea6f1ae 100644
--- a/kernel/trace/trace_sched_switch.c
+++ b/kernel/trace/trace_sched_switch.c
@@ -18,6 +18,7 @@ static struct trace_array *ctx_trace;
static int __read_mostly tracer_enabled;
static int sched_ref;
static DEFINE_MUTEX(sched_register_mutex);
+static int sched_stopped;
static void
probe_sched_switch(struct rq *__rq, struct task_struct *prev,
@@ -28,7 +29,7 @@ probe_sched_switch(struct rq *__rq, struct task_struct *prev,
int cpu;
int pc;
- if (!sched_ref)
+ if (!sched_ref || sched_stopped)
return;
tracing_record_cmdline(prev);
@@ -43,7 +44,7 @@ probe_sched_switch(struct rq *__rq, struct task_struct *prev,
data = ctx_trace->data[cpu];
if (likely(!atomic_read(&data->disabled)))
- tracing_sched_switch_trace(ctx_trace, data, prev, next, flags, pc);
+ tracing_sched_switch_trace(ctx_trace, prev, next, flags, pc);
local_irq_restore(flags);
}
@@ -61,12 +62,15 @@ probe_sched_wakeup(struct rq *__rq, struct task_struct *wakee, int success)
pc = preempt_count();
tracing_record_cmdline(current);
+ if (sched_stopped)
+ return;
+
local_irq_save(flags);
cpu = raw_smp_processor_id();
data = ctx_trace->data[cpu];
if (likely(!atomic_read(&data->disabled)))
- tracing_sched_wakeup_trace(ctx_trace, data, wakee, current,
+ tracing_sched_wakeup_trace(ctx_trace, wakee, current,
flags, pc);
local_irq_restore(flags);
@@ -93,7 +97,7 @@ static int tracing_sched_register(void)
ret = register_trace_sched_switch(probe_sched_switch);
if (ret) {
pr_info("sched trace: Couldn't activate tracepoint"
- " probe to kernel_sched_schedule\n");
+ " probe to kernel_sched_switch\n");
goto fail_deprobe_wake_new;
}
@@ -185,12 +189,6 @@ void tracing_sched_switch_assign_trace(struct trace_array *tr)
ctx_trace = tr;
}
-static void start_sched_trace(struct trace_array *tr)
-{
- tracing_reset_online_cpus(tr);
- tracing_start_sched_switch_record();
-}
-
static void stop_sched_trace(struct trace_array *tr)
{
tracing_stop_sched_switch_record();
@@ -199,7 +197,8 @@ static void stop_sched_trace(struct trace_array *tr)
static int sched_switch_trace_init(struct trace_array *tr)
{
ctx_trace = tr;
- start_sched_trace(tr);
+ tracing_reset_online_cpus(tr);
+ tracing_start_sched_switch_record();
return 0;
}
@@ -211,13 +210,12 @@ static void sched_switch_trace_reset(struct trace_array *tr)
static void sched_switch_trace_start(struct trace_array *tr)
{
- tracing_reset_online_cpus(tr);
- tracing_start_sched_switch();
+ sched_stopped = 0;
}
static void sched_switch_trace_stop(struct trace_array *tr)
{
- tracing_stop_sched_switch();
+ sched_stopped = 1;
}
static struct tracer sched_switch_trace __read_mostly =
@@ -227,6 +225,7 @@ static struct tracer sched_switch_trace __read_mostly =
.reset = sched_switch_trace_reset,
.start = sched_switch_trace_start,
.stop = sched_switch_trace_stop,
+ .wait_pipe = poll_wait_pipe,
#ifdef CONFIG_FTRACE_SELFTEST
.selftest = trace_selftest_startup_sched_switch,
#endif
diff --git a/kernel/trace/trace_sched_wakeup.c b/kernel/trace/trace_sched_wakeup.c
index 42ae1e77b6b3..5bc00e8f153e 100644
--- a/kernel/trace/trace_sched_wakeup.c
+++ b/kernel/trace/trace_sched_wakeup.c
@@ -25,12 +25,15 @@ static int __read_mostly tracer_enabled;
static struct task_struct *wakeup_task;
static int wakeup_cpu;
static unsigned wakeup_prio = -1;
+static int wakeup_rt;
static raw_spinlock_t wakeup_lock =
(raw_spinlock_t)__RAW_SPIN_LOCK_UNLOCKED;
static void __wakeup_reset(struct trace_array *tr);
+static int save_lat_flag;
+
#ifdef CONFIG_FUNCTION_TRACER
/*
* irqsoff uses its own tracer function to keep the overhead down:
@@ -71,7 +74,7 @@ wakeup_tracer_call(unsigned long ip, unsigned long parent_ip)
if (task_cpu(wakeup_task) != cpu)
goto unlock;
- trace_function(tr, data, ip, parent_ip, flags, pc);
+ trace_function(tr, ip, parent_ip, flags, pc);
unlock:
__raw_spin_unlock(&wakeup_lock);
@@ -151,7 +154,8 @@ probe_wakeup_sched_switch(struct rq *rq, struct task_struct *prev,
if (unlikely(!tracer_enabled || next != wakeup_task))
goto out_unlock;
- trace_function(wakeup_trace, data, CALLER_ADDR1, CALLER_ADDR2, flags, pc);
+ trace_function(wakeup_trace, CALLER_ADDR0, CALLER_ADDR1, flags, pc);
+ tracing_sched_switch_trace(wakeup_trace, prev, next, flags, pc);
/*
* usecs conversion is slow so we try to delay the conversion
@@ -182,13 +186,10 @@ out:
static void __wakeup_reset(struct trace_array *tr)
{
- struct trace_array_cpu *data;
int cpu;
- for_each_possible_cpu(cpu) {
- data = tr->data[cpu];
+ for_each_possible_cpu(cpu)
tracing_reset(tr, cpu);
- }
wakeup_cpu = -1;
wakeup_prio = -1;
@@ -213,6 +214,7 @@ static void wakeup_reset(struct trace_array *tr)
static void
probe_wakeup(struct rq *rq, struct task_struct *p, int success)
{
+ struct trace_array_cpu *data;
int cpu = smp_processor_id();
unsigned long flags;
long disabled;
@@ -224,7 +226,7 @@ probe_wakeup(struct rq *rq, struct task_struct *p, int success)
tracing_record_cmdline(p);
tracing_record_cmdline(current);
- if (likely(!rt_task(p)) ||
+ if ((wakeup_rt && !rt_task(p)) ||
p->prio >= wakeup_prio ||
p->prio >= current->prio)
return;
@@ -252,9 +254,16 @@ probe_wakeup(struct rq *rq, struct task_struct *p, int success)
local_save_flags(flags);
- wakeup_trace->data[wakeup_cpu]->preempt_timestamp = ftrace_now(cpu);
- trace_function(wakeup_trace, wakeup_trace->data[wakeup_cpu],
- CALLER_ADDR1, CALLER_ADDR2, flags, pc);
+ data = wakeup_trace->data[wakeup_cpu];
+ data->preempt_timestamp = ftrace_now(cpu);
+ tracing_sched_wakeup_trace(wakeup_trace, p, current, flags, pc);
+
+ /*
+ * We must be careful in using CALLER_ADDR2. But since wake_up
+ * is not called by an assembly function (where as schedule is)
+ * it should be safe to use it here.
+ */
+ trace_function(wakeup_trace, CALLER_ADDR1, CALLER_ADDR2, flags, pc);
out_locked:
__raw_spin_unlock(&wakeup_lock);
@@ -262,12 +271,6 @@ out:
atomic_dec(&wakeup_trace->data[cpu]->disabled);
}
-/*
- * save_tracer_enabled is used to save the state of the tracer_enabled
- * variable when we disable it when we open a trace output file.
- */
-static int save_tracer_enabled;
-
static void start_wakeup_tracer(struct trace_array *tr)
{
int ret;
@@ -289,7 +292,7 @@ static void start_wakeup_tracer(struct trace_array *tr)
ret = register_trace_sched_switch(probe_wakeup_sched_switch);
if (ret) {
pr_info("sched trace: Couldn't activate tracepoint"
- " probe to kernel_sched_schedule\n");
+ " probe to kernel_sched_switch\n");
goto fail_deprobe_wake_new;
}
@@ -306,13 +309,10 @@ static void start_wakeup_tracer(struct trace_array *tr)
register_ftrace_function(&trace_ops);
- if (tracing_is_enabled()) {
+ if (tracing_is_enabled())
tracer_enabled = 1;
- save_tracer_enabled = 1;
- } else {
+ else
tracer_enabled = 0;
- save_tracer_enabled = 0;
- }
return;
fail_deprobe_wake_new:
@@ -324,54 +324,54 @@ fail_deprobe:
static void stop_wakeup_tracer(struct trace_array *tr)
{
tracer_enabled = 0;
- save_tracer_enabled = 0;
unregister_ftrace_function(&trace_ops);
unregister_trace_sched_switch(probe_wakeup_sched_switch);
unregister_trace_sched_wakeup_new(probe_wakeup);
unregister_trace_sched_wakeup(probe_wakeup);
}
-static int wakeup_tracer_init(struct trace_array *tr)
+static int __wakeup_tracer_init(struct trace_array *tr)
{
+ save_lat_flag = trace_flags & TRACE_ITER_LATENCY_FMT;
+ trace_flags |= TRACE_ITER_LATENCY_FMT;
+
tracing_max_latency = 0;
wakeup_trace = tr;
start_wakeup_tracer(tr);
return 0;
}
+static int wakeup_tracer_init(struct trace_array *tr)
+{
+ wakeup_rt = 0;
+ return __wakeup_tracer_init(tr);
+}
+
+static int wakeup_rt_tracer_init(struct trace_array *tr)
+{
+ wakeup_rt = 1;
+ return __wakeup_tracer_init(tr);
+}
+
static void wakeup_tracer_reset(struct trace_array *tr)
{
stop_wakeup_tracer(tr);
/* make sure we put back any tasks we are tracing */
wakeup_reset(tr);
+
+ if (!save_lat_flag)
+ trace_flags &= ~TRACE_ITER_LATENCY_FMT;
}
static void wakeup_tracer_start(struct trace_array *tr)
{
wakeup_reset(tr);
tracer_enabled = 1;
- save_tracer_enabled = 1;
}
static void wakeup_tracer_stop(struct trace_array *tr)
{
tracer_enabled = 0;
- save_tracer_enabled = 0;
-}
-
-static void wakeup_tracer_open(struct trace_iterator *iter)
-{
- /* stop the trace while dumping */
- tracer_enabled = 0;
-}
-
-static void wakeup_tracer_close(struct trace_iterator *iter)
-{
- /* forget about any processes we were recording */
- if (save_tracer_enabled) {
- wakeup_reset(iter->tr);
- tracer_enabled = 1;
- }
}
static struct tracer wakeup_tracer __read_mostly =
@@ -381,8 +381,20 @@ static struct tracer wakeup_tracer __read_mostly =
.reset = wakeup_tracer_reset,
.start = wakeup_tracer_start,
.stop = wakeup_tracer_stop,
- .open = wakeup_tracer_open,
- .close = wakeup_tracer_close,
+ .print_max = 1,
+#ifdef CONFIG_FTRACE_SELFTEST
+ .selftest = trace_selftest_startup_wakeup,
+#endif
+};
+
+static struct tracer wakeup_rt_tracer __read_mostly =
+{
+ .name = "wakeup_rt",
+ .init = wakeup_rt_tracer_init,
+ .reset = wakeup_tracer_reset,
+ .start = wakeup_tracer_start,
+ .stop = wakeup_tracer_stop,
+ .wait_pipe = poll_wait_pipe,
.print_max = 1,
#ifdef CONFIG_FTRACE_SELFTEST
.selftest = trace_selftest_startup_wakeup,
@@ -397,6 +409,10 @@ __init static int init_wakeup_tracer(void)
if (ret)
return ret;
+ ret = register_tracer(&wakeup_rt_tracer);
+ if (ret)
+ return ret;
+
return 0;
}
device_initcall(init_wakeup_tracer);
diff --git a/kernel/trace/trace_selftest.c b/kernel/trace/trace_selftest.c
index bc8e80a86bca..08f4eb2763d1 100644
--- a/kernel/trace/trace_selftest.c
+++ b/kernel/trace/trace_selftest.c
@@ -1,5 +1,6 @@
/* Include in trace.c */
+#include <linux/stringify.h>
#include <linux/kthread.h>
#include <linux/delay.h>
@@ -9,11 +10,12 @@ static inline int trace_valid_entry(struct trace_entry *entry)
case TRACE_FN:
case TRACE_CTX:
case TRACE_WAKE:
- case TRACE_CONT:
case TRACE_STACK:
case TRACE_PRINT:
case TRACE_SPECIAL:
case TRACE_BRANCH:
+ case TRACE_GRAPH_ENT:
+ case TRACE_GRAPH_RET:
return 1;
}
return 0;
@@ -99,9 +101,6 @@ static inline void warn_failed_init_tracer(struct tracer *trace, int init_ret)
#ifdef CONFIG_DYNAMIC_FTRACE
-#define __STR(x) #x
-#define STR(x) __STR(x)
-
/* Test dynamic code modification and ftrace filters */
int trace_selftest_startup_dynamic_tracing(struct tracer *trace,
struct trace_array *tr,
@@ -125,17 +124,17 @@ int trace_selftest_startup_dynamic_tracing(struct tracer *trace,
func();
/*
- * Some archs *cough*PowerPC*cough* add charachters to the
+ * Some archs *cough*PowerPC*cough* add characters to the
* start of the function names. We simply put a '*' to
- * accomodate them.
+ * accommodate them.
*/
- func_name = "*" STR(DYN_FTRACE_TEST_NAME);
+ func_name = "*" __stringify(DYN_FTRACE_TEST_NAME);
/* filter only on our function */
ftrace_set_filter(func_name, strlen(func_name), 1);
/* enable tracing */
- ret = trace->init(tr);
+ ret = tracer_init(trace, tr);
if (ret) {
warn_failed_init_tracer(trace, ret);
goto out;
@@ -209,7 +208,7 @@ trace_selftest_startup_function(struct tracer *trace, struct trace_array *tr)
ftrace_enabled = 1;
tracer_enabled = 1;
- ret = trace->init(tr);
+ ret = tracer_init(trace, tr);
if (ret) {
warn_failed_init_tracer(trace, ret);
goto out;
@@ -247,6 +246,90 @@ trace_selftest_startup_function(struct tracer *trace, struct trace_array *tr)
}
#endif /* CONFIG_FUNCTION_TRACER */
+
+#ifdef CONFIG_FUNCTION_GRAPH_TRACER
+
+/* Maximum number of functions to trace before diagnosing a hang */
+#define GRAPH_MAX_FUNC_TEST 100000000
+
+static void __ftrace_dump(bool disable_tracing);
+static unsigned int graph_hang_thresh;
+
+/* Wrap the real function entry probe to avoid possible hanging */
+static int trace_graph_entry_watchdog(struct ftrace_graph_ent *trace)
+{
+ /* This is harmlessly racy, we want to approximately detect a hang */
+ if (unlikely(++graph_hang_thresh > GRAPH_MAX_FUNC_TEST)) {
+ ftrace_graph_stop();
+ printk(KERN_WARNING "BUG: Function graph tracer hang!\n");
+ if (ftrace_dump_on_oops)
+ __ftrace_dump(false);
+ return 0;
+ }
+
+ return trace_graph_entry(trace);
+}
+
+/*
+ * Pretty much the same than for the function tracer from which the selftest
+ * has been borrowed.
+ */
+int
+trace_selftest_startup_function_graph(struct tracer *trace,
+ struct trace_array *tr)
+{
+ int ret;
+ unsigned long count;
+
+ /*
+ * Simulate the init() callback but we attach a watchdog callback
+ * to detect and recover from possible hangs
+ */
+ tracing_reset_online_cpus(tr);
+ ret = register_ftrace_graph(&trace_graph_return,
+ &trace_graph_entry_watchdog);
+ if (ret) {
+ warn_failed_init_tracer(trace, ret);
+ goto out;
+ }
+ tracing_start_cmdline_record();
+
+ /* Sleep for a 1/10 of a second */
+ msleep(100);
+
+ /* Have we just recovered from a hang? */
+ if (graph_hang_thresh > GRAPH_MAX_FUNC_TEST) {
+ tracing_selftest_disabled = true;
+ ret = -1;
+ goto out;
+ }
+
+ tracing_stop();
+
+ /* check the trace buffer */
+ ret = trace_test_buffer(tr, &count);
+
+ trace->reset(tr);
+ tracing_start();
+
+ if (!ret && !count) {
+ printk(KERN_CONT ".. no entries found ..");
+ ret = -1;
+ goto out;
+ }
+
+ /* Don't test dynamic tracing, the function tracer already did */
+
+out:
+ /* Stop it if we failed */
+ if (ret)
+ ftrace_graph_stop();
+
+ return ret;
+}
+#endif /* CONFIG_FUNCTION_GRAPH_TRACER */
+
+
#ifdef CONFIG_IRQSOFF_TRACER
int
trace_selftest_startup_irqsoff(struct tracer *trace, struct trace_array *tr)
@@ -256,7 +339,7 @@ trace_selftest_startup_irqsoff(struct tracer *trace, struct trace_array *tr)
int ret;
/* start the tracing */
- ret = trace->init(tr);
+ ret = tracer_init(trace, tr);
if (ret) {
warn_failed_init_tracer(trace, ret);
return ret;
@@ -268,6 +351,14 @@ trace_selftest_startup_irqsoff(struct tracer *trace, struct trace_array *tr)
local_irq_disable();
udelay(100);
local_irq_enable();
+
+ /*
+ * Stop the tracer to avoid a warning subsequent
+ * to buffer flipping failure because tracing_stop()
+ * disables the tr and max buffers, making flipping impossible
+ * in case of parallels max irqs off latencies.
+ */
+ trace->stop(tr);
/* stop the tracing. */
tracing_stop();
/* check both trace buffers */
@@ -310,7 +401,7 @@ trace_selftest_startup_preemptoff(struct tracer *trace, struct trace_array *tr)
}
/* start the tracing */
- ret = trace->init(tr);
+ ret = tracer_init(trace, tr);
if (ret) {
warn_failed_init_tracer(trace, ret);
return ret;
@@ -322,6 +413,14 @@ trace_selftest_startup_preemptoff(struct tracer *trace, struct trace_array *tr)
preempt_disable();
udelay(100);
preempt_enable();
+
+ /*
+ * Stop the tracer to avoid a warning subsequent
+ * to buffer flipping failure because tracing_stop()
+ * disables the tr and max buffers, making flipping impossible
+ * in case of parallels max preempt off latencies.
+ */
+ trace->stop(tr);
/* stop the tracing. */
tracing_stop();
/* check both trace buffers */
@@ -364,10 +463,10 @@ trace_selftest_startup_preemptirqsoff(struct tracer *trace, struct trace_array *
}
/* start the tracing */
- ret = trace->init(tr);
+ ret = tracer_init(trace, tr);
if (ret) {
warn_failed_init_tracer(trace, ret);
- goto out;
+ goto out_no_start;
}
/* reset the max latency */
@@ -381,31 +480,35 @@ trace_selftest_startup_preemptirqsoff(struct tracer *trace, struct trace_array *
/* reverse the order of preempt vs irqs */
local_irq_enable();
+ /*
+ * Stop the tracer to avoid a warning subsequent
+ * to buffer flipping failure because tracing_stop()
+ * disables the tr and max buffers, making flipping impossible
+ * in case of parallels max irqs/preempt off latencies.
+ */
+ trace->stop(tr);
/* stop the tracing. */
tracing_stop();
/* check both trace buffers */
ret = trace_test_buffer(tr, NULL);
- if (ret) {
- tracing_start();
+ if (ret)
goto out;
- }
ret = trace_test_buffer(&max_tr, &count);
- if (ret) {
- tracing_start();
+ if (ret)
goto out;
- }
if (!ret && !count) {
printk(KERN_CONT ".. no entries found ..");
ret = -1;
- tracing_start();
goto out;
}
/* do the test by disabling interrupts first this time */
tracing_max_latency = 0;
tracing_start();
+ trace->start(tr);
+
preempt_disable();
local_irq_disable();
udelay(100);
@@ -413,6 +516,7 @@ trace_selftest_startup_preemptirqsoff(struct tracer *trace, struct trace_array *
/* reverse the order of preempt vs irqs */
local_irq_enable();
+ trace->stop(tr);
/* stop the tracing. */
tracing_stop();
/* check both trace buffers */
@@ -428,9 +532,10 @@ trace_selftest_startup_preemptirqsoff(struct tracer *trace, struct trace_array *
goto out;
}
- out:
- trace->reset(tr);
+out:
tracing_start();
+out_no_start:
+ trace->reset(tr);
tracing_max_latency = save_max;
return ret;
@@ -496,7 +601,7 @@ trace_selftest_startup_wakeup(struct tracer *trace, struct trace_array *tr)
wait_for_completion(&isrt);
/* start the tracing */
- ret = trace->init(tr);
+ ret = tracer_init(trace, tr);
if (ret) {
warn_failed_init_tracer(trace, ret);
return ret;
@@ -557,7 +662,7 @@ trace_selftest_startup_sched_switch(struct tracer *trace, struct trace_array *tr
int ret;
/* start the tracing */
- ret = trace->init(tr);
+ ret = tracer_init(trace, tr);
if (ret) {
warn_failed_init_tracer(trace, ret);
return ret;
@@ -589,10 +694,10 @@ trace_selftest_startup_sysprof(struct tracer *trace, struct trace_array *tr)
int ret;
/* start the tracing */
- ret = trace->init(tr);
+ ret = tracer_init(trace, tr);
if (ret) {
warn_failed_init_tracer(trace, ret);
- return 0;
+ return ret;
}
/* Sleep for a 1/10 of a second */
@@ -604,6 +709,11 @@ trace_selftest_startup_sysprof(struct tracer *trace, struct trace_array *tr)
trace->reset(tr);
tracing_start();
+ if (!ret && !count) {
+ printk(KERN_CONT ".. no entries found ..");
+ ret = -1;
+ }
+
return ret;
}
#endif /* CONFIG_SYSPROF_TRACER */
@@ -616,7 +726,7 @@ trace_selftest_startup_branch(struct tracer *trace, struct trace_array *tr)
int ret;
/* start the tracing */
- ret = trace->init(tr);
+ ret = tracer_init(trace, tr);
if (ret) {
warn_failed_init_tracer(trace, ret);
return ret;
@@ -631,6 +741,11 @@ trace_selftest_startup_branch(struct tracer *trace, struct trace_array *tr)
trace->reset(tr);
tracing_start();
+ if (!ret && !count) {
+ printk(KERN_CONT ".. no entries found ..");
+ ret = -1;
+ }
+
return ret;
}
#endif /* CONFIG_BRANCH_TRACER */
diff --git a/kernel/trace/trace_stack.c b/kernel/trace/trace_stack.c
index d0871bc0aca5..c750f65f9661 100644
--- a/kernel/trace/trace_stack.c
+++ b/kernel/trace/trace_stack.c
@@ -245,16 +245,31 @@ static int trace_lookup_stack(struct seq_file *m, long i)
#endif
}
+static void print_disabled(struct seq_file *m)
+{
+ seq_puts(m, "#\n"
+ "# Stack tracer disabled\n"
+ "#\n"
+ "# To enable the stack tracer, either add 'stacktrace' to the\n"
+ "# kernel command line\n"
+ "# or 'echo 1 > /proc/sys/kernel/stack_tracer_enabled'\n"
+ "#\n");
+}
+
static int t_show(struct seq_file *m, void *v)
{
long i;
int size;
if (v == SEQ_START_TOKEN) {
- seq_printf(m, " Depth Size Location"
+ seq_printf(m, " Depth Size Location"
" (%d entries)\n"
- " ----- ---- --------\n",
+ " ----- ---- --------\n",
max_stack_trace.nr_entries);
+
+ if (!stack_tracer_enabled && !max_stack_size)
+ print_disabled(m);
+
return 0;
}
diff --git a/kernel/trace/trace_stat.c b/kernel/trace/trace_stat.c
new file mode 100644
index 000000000000..acdebd771a93
--- /dev/null
+++ b/kernel/trace/trace_stat.c
@@ -0,0 +1,326 @@
+/*
+ * Infrastructure for statistic tracing (histogram output).
+ *
+ * Copyright (C) 2008 Frederic Weisbecker <fweisbec@gmail.com>
+ *
+ * Based on the code from trace_branch.c which is
+ * Copyright (C) 2008 Steven Rostedt <srostedt@redhat.com>
+ *
+ */
+
+
+#include <linux/list.h>
+#include <linux/debugfs.h>
+#include "trace_stat.h"
+#include "trace.h"
+
+
+/* List of stat entries from a tracer */
+struct trace_stat_list {
+ struct list_head list;
+ void *stat;
+};
+
+/* A stat session is the stats output in one file */
+struct tracer_stat_session {
+ struct list_head session_list;
+ struct tracer_stat *ts;
+ struct list_head stat_list;
+ struct mutex stat_mutex;
+ struct dentry *file;
+};
+
+/* All of the sessions currently in use. Each stat file embed one session */
+static LIST_HEAD(all_stat_sessions);
+static DEFINE_MUTEX(all_stat_sessions_mutex);
+
+/* The root directory for all stat files */
+static struct dentry *stat_dir;
+
+
+static void reset_stat_session(struct tracer_stat_session *session)
+{
+ struct trace_stat_list *node, *next;
+
+ list_for_each_entry_safe(node, next, &session->stat_list, list)
+ kfree(node);
+
+ INIT_LIST_HEAD(&session->stat_list);
+}
+
+static void destroy_session(struct tracer_stat_session *session)
+{
+ debugfs_remove(session->file);
+ reset_stat_session(session);
+ mutex_destroy(&session->stat_mutex);
+ kfree(session);
+}
+
+/*
+ * For tracers that don't provide a stat_cmp callback.
+ * This one will force an immediate insertion on tail of
+ * the list.
+ */
+static int dummy_cmp(void *p1, void *p2)
+{
+ return 1;
+}
+
+/*
+ * Initialize the stat list at each trace_stat file opening.
+ * All of these copies and sorting are required on all opening
+ * since the stats could have changed between two file sessions.
+ */
+static int stat_seq_init(struct tracer_stat_session *session)
+{
+ struct trace_stat_list *iter_entry, *new_entry;
+ struct tracer_stat *ts = session->ts;
+ void *stat;
+ int ret = 0;
+ int i;
+
+ mutex_lock(&session->stat_mutex);
+ reset_stat_session(session);
+
+ if (!ts->stat_cmp)
+ ts->stat_cmp = dummy_cmp;
+
+ stat = ts->stat_start();
+ if (!stat)
+ goto exit;
+
+ /*
+ * The first entry. Actually this is the second, but the first
+ * one (the stat_list head) is pointless.
+ */
+ new_entry = kmalloc(sizeof(struct trace_stat_list), GFP_KERNEL);
+ if (!new_entry) {
+ ret = -ENOMEM;
+ goto exit;
+ }
+
+ INIT_LIST_HEAD(&new_entry->list);
+
+ list_add(&new_entry->list, &session->stat_list);
+
+ new_entry->stat = stat;
+
+ /*
+ * Iterate over the tracer stat entries and store them in a sorted
+ * list.
+ */
+ for (i = 1; ; i++) {
+ stat = ts->stat_next(stat, i);
+
+ /* End of insertion */
+ if (!stat)
+ break;
+
+ new_entry = kmalloc(sizeof(struct trace_stat_list), GFP_KERNEL);
+ if (!new_entry) {
+ ret = -ENOMEM;
+ goto exit_free_list;
+ }
+
+ INIT_LIST_HEAD(&new_entry->list);
+ new_entry->stat = stat;
+
+ list_for_each_entry_reverse(iter_entry, &session->stat_list,
+ list) {
+
+ /* Insertion with a descendent sorting */
+ if (ts->stat_cmp(iter_entry->stat,
+ new_entry->stat) >= 0) {
+
+ list_add(&new_entry->list, &iter_entry->list);
+ break;
+ }
+ }
+
+ /* The current larger value */
+ if (list_empty(&new_entry->list))
+ list_add(&new_entry->list, &session->stat_list);
+ }
+exit:
+ mutex_unlock(&session->stat_mutex);
+ return ret;
+
+exit_free_list:
+ reset_stat_session(session);
+ mutex_unlock(&session->stat_mutex);
+ return ret;
+}
+
+
+static void *stat_seq_start(struct seq_file *s, loff_t *pos)
+{
+ struct tracer_stat_session *session = s->private;
+
+ /* Prevent from tracer switch or stat_list modification */
+ mutex_lock(&session->stat_mutex);
+
+ /* If we are in the beginning of the file, print the headers */
+ if (!*pos && session->ts->stat_headers)
+ return SEQ_START_TOKEN;
+
+ return seq_list_start(&session->stat_list, *pos);
+}
+
+static void *stat_seq_next(struct seq_file *s, void *p, loff_t *pos)
+{
+ struct tracer_stat_session *session = s->private;
+
+ if (p == SEQ_START_TOKEN)
+ return seq_list_start(&session->stat_list, *pos);
+
+ return seq_list_next(p, &session->stat_list, pos);
+}
+
+static void stat_seq_stop(struct seq_file *s, void *p)
+{
+ struct tracer_stat_session *session = s->private;
+ mutex_unlock(&session->stat_mutex);
+}
+
+static int stat_seq_show(struct seq_file *s, void *v)
+{
+ struct tracer_stat_session *session = s->private;
+ struct trace_stat_list *l = list_entry(v, struct trace_stat_list, list);
+
+ if (v == SEQ_START_TOKEN)
+ return session->ts->stat_headers(s);
+
+ return session->ts->stat_show(s, l->stat);
+}
+
+static const struct seq_operations trace_stat_seq_ops = {
+ .start = stat_seq_start,
+ .next = stat_seq_next,
+ .stop = stat_seq_stop,
+ .show = stat_seq_show
+};
+
+/* The session stat is refilled and resorted at each stat file opening */
+static int tracing_stat_open(struct inode *inode, struct file *file)
+{
+ int ret;
+
+ struct tracer_stat_session *session = inode->i_private;
+
+ ret = seq_open(file, &trace_stat_seq_ops);
+ if (!ret) {
+ struct seq_file *m = file->private_data;
+ m->private = session;
+ ret = stat_seq_init(session);
+ }
+
+ return ret;
+}
+
+/*
+ * Avoid consuming memory with our now useless list.
+ */
+static int tracing_stat_release(struct inode *i, struct file *f)
+{
+ struct tracer_stat_session *session = i->i_private;
+
+ mutex_lock(&session->stat_mutex);
+ reset_stat_session(session);
+ mutex_unlock(&session->stat_mutex);
+
+ return 0;
+}
+
+static const struct file_operations tracing_stat_fops = {
+ .open = tracing_stat_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = tracing_stat_release
+};
+
+static int tracing_stat_init(void)
+{
+ struct dentry *d_tracing;
+
+ d_tracing = tracing_init_dentry();
+
+ stat_dir = debugfs_create_dir("trace_stat", d_tracing);
+ if (!stat_dir)
+ pr_warning("Could not create debugfs "
+ "'trace_stat' entry\n");
+ return 0;
+}
+
+static int init_stat_file(struct tracer_stat_session *session)
+{
+ if (!stat_dir && tracing_stat_init())
+ return -ENODEV;
+
+ session->file = debugfs_create_file(session->ts->name, 0644,
+ stat_dir,
+ session, &tracing_stat_fops);
+ if (!session->file)
+ return -ENOMEM;
+ return 0;
+}
+
+int register_stat_tracer(struct tracer_stat *trace)
+{
+ struct tracer_stat_session *session, *node, *tmp;
+ int ret;
+
+ if (!trace)
+ return -EINVAL;
+
+ if (!trace->stat_start || !trace->stat_next || !trace->stat_show)
+ return -EINVAL;
+
+ /* Already registered? */
+ mutex_lock(&all_stat_sessions_mutex);
+ list_for_each_entry_safe(node, tmp, &all_stat_sessions, session_list) {
+ if (node->ts == trace) {
+ mutex_unlock(&all_stat_sessions_mutex);
+ return -EINVAL;
+ }
+ }
+ mutex_unlock(&all_stat_sessions_mutex);
+
+ /* Init the session */
+ session = kmalloc(sizeof(struct tracer_stat_session), GFP_KERNEL);
+ if (!session)
+ return -ENOMEM;
+
+ session->ts = trace;
+ INIT_LIST_HEAD(&session->session_list);
+ INIT_LIST_HEAD(&session->stat_list);
+ mutex_init(&session->stat_mutex);
+ session->file = NULL;
+
+ ret = init_stat_file(session);
+ if (ret) {
+ destroy_session(session);
+ return ret;
+ }
+
+ /* Register */
+ mutex_lock(&all_stat_sessions_mutex);
+ list_add_tail(&session->session_list, &all_stat_sessions);
+ mutex_unlock(&all_stat_sessions_mutex);
+
+ return 0;
+}
+
+void unregister_stat_tracer(struct tracer_stat *trace)
+{
+ struct tracer_stat_session *node, *tmp;
+
+ mutex_lock(&all_stat_sessions_mutex);
+ list_for_each_entry_safe(node, tmp, &all_stat_sessions, session_list) {
+ if (node->ts == trace) {
+ list_del(&node->session_list);
+ destroy_session(node);
+ break;
+ }
+ }
+ mutex_unlock(&all_stat_sessions_mutex);
+}
diff --git a/kernel/trace/trace_stat.h b/kernel/trace/trace_stat.h
new file mode 100644
index 000000000000..202274cf7f3d
--- /dev/null
+++ b/kernel/trace/trace_stat.h
@@ -0,0 +1,31 @@
+#ifndef __TRACE_STAT_H
+#define __TRACE_STAT_H
+
+#include <linux/seq_file.h>
+
+/*
+ * If you want to provide a stat file (one-shot statistics), fill
+ * an iterator with stat_start/stat_next and a stat_show callbacks.
+ * The others callbacks are optional.
+ */
+struct tracer_stat {
+ /* The name of your stat file */
+ const char *name;
+ /* Iteration over statistic entries */
+ void *(*stat_start)(void);
+ void *(*stat_next)(void *prev, int idx);
+ /* Compare two entries for stats sorting */
+ int (*stat_cmp)(void *p1, void *p2);
+ /* Print a stat entry */
+ int (*stat_show)(struct seq_file *s, void *p);
+ /* Print the headers of your stat entries */
+ int (*stat_headers)(struct seq_file *s);
+};
+
+/*
+ * Destroy or create a stat file
+ */
+extern int register_stat_tracer(struct tracer_stat *trace);
+extern void unregister_stat_tracer(struct tracer_stat *trace);
+
+#endif /* __TRACE_STAT_H */
diff --git a/kernel/trace/trace_syscalls.c b/kernel/trace/trace_syscalls.c
new file mode 100644
index 000000000000..5e579645ac86
--- /dev/null
+++ b/kernel/trace/trace_syscalls.c
@@ -0,0 +1,250 @@
+#include <trace/syscall.h>
+#include <linux/kernel.h>
+#include <asm/syscall.h>
+
+#include "trace_output.h"
+#include "trace.h"
+
+/* Keep a counter of the syscall tracing users */
+static int refcount;
+
+/* Prevent from races on thread flags toggling */
+static DEFINE_MUTEX(syscall_trace_lock);
+
+/* Option to display the parameters types */
+enum {
+ TRACE_SYSCALLS_OPT_TYPES = 0x1,
+};
+
+static struct tracer_opt syscalls_opts[] = {
+ { TRACER_OPT(syscall_arg_type, TRACE_SYSCALLS_OPT_TYPES) },
+ { }
+};
+
+static struct tracer_flags syscalls_flags = {
+ .val = 0, /* By default: no parameters types */
+ .opts = syscalls_opts
+};
+
+enum print_line_t
+print_syscall_enter(struct trace_iterator *iter, int flags)
+{
+ struct trace_seq *s = &iter->seq;
+ struct trace_entry *ent = iter->ent;
+ struct syscall_trace_enter *trace;
+ struct syscall_metadata *entry;
+ int i, ret, syscall;
+
+ trace_assign_type(trace, ent);
+
+ syscall = trace->nr;
+
+ entry = syscall_nr_to_meta(syscall);
+ if (!entry)
+ goto end;
+
+ ret = trace_seq_printf(s, "%s(", entry->name);
+ if (!ret)
+ return TRACE_TYPE_PARTIAL_LINE;
+
+ for (i = 0; i < entry->nb_args; i++) {
+ /* parameter types */
+ if (syscalls_flags.val & TRACE_SYSCALLS_OPT_TYPES) {
+ ret = trace_seq_printf(s, "%s ", entry->types[i]);
+ if (!ret)
+ return TRACE_TYPE_PARTIAL_LINE;
+ }
+ /* parameter values */
+ ret = trace_seq_printf(s, "%s: %lx%s ", entry->args[i],
+ trace->args[i],
+ i == entry->nb_args - 1 ? ")" : ",");
+ if (!ret)
+ return TRACE_TYPE_PARTIAL_LINE;
+ }
+
+end:
+ trace_seq_printf(s, "\n");
+ return TRACE_TYPE_HANDLED;
+}
+
+enum print_line_t
+print_syscall_exit(struct trace_iterator *iter, int flags)
+{
+ struct trace_seq *s = &iter->seq;
+ struct trace_entry *ent = iter->ent;
+ struct syscall_trace_exit *trace;
+ int syscall;
+ struct syscall_metadata *entry;
+ int ret;
+
+ trace_assign_type(trace, ent);
+
+ syscall = trace->nr;
+
+ entry = syscall_nr_to_meta(syscall);
+ if (!entry) {
+ trace_seq_printf(s, "\n");
+ return TRACE_TYPE_HANDLED;
+ }
+
+ ret = trace_seq_printf(s, "%s -> 0x%lx\n", entry->name,
+ trace->ret);
+ if (!ret)
+ return TRACE_TYPE_PARTIAL_LINE;
+
+ return TRACE_TYPE_HANDLED;
+}
+
+void start_ftrace_syscalls(void)
+{
+ unsigned long flags;
+ struct task_struct *g, *t;
+
+ mutex_lock(&syscall_trace_lock);
+
+ /* Don't enable the flag on the tasks twice */
+ if (++refcount != 1)
+ goto unlock;
+
+ arch_init_ftrace_syscalls();
+ read_lock_irqsave(&tasklist_lock, flags);
+
+ do_each_thread(g, t) {
+ set_tsk_thread_flag(t, TIF_SYSCALL_FTRACE);
+ } while_each_thread(g, t);
+
+ read_unlock_irqrestore(&tasklist_lock, flags);
+
+unlock:
+ mutex_unlock(&syscall_trace_lock);
+}
+
+void stop_ftrace_syscalls(void)
+{
+ unsigned long flags;
+ struct task_struct *g, *t;
+
+ mutex_lock(&syscall_trace_lock);
+
+ /* There are perhaps still some users */
+ if (--refcount)
+ goto unlock;
+
+ read_lock_irqsave(&tasklist_lock, flags);
+
+ do_each_thread(g, t) {
+ clear_tsk_thread_flag(t, TIF_SYSCALL_FTRACE);
+ } while_each_thread(g, t);
+
+ read_unlock_irqrestore(&tasklist_lock, flags);
+
+unlock:
+ mutex_unlock(&syscall_trace_lock);
+}
+
+void ftrace_syscall_enter(struct pt_regs *regs)
+{
+ struct syscall_trace_enter *entry;
+ struct syscall_metadata *sys_data;
+ struct ring_buffer_event *event;
+ int size;
+ int syscall_nr;
+
+ syscall_nr = syscall_get_nr(current, regs);
+
+ sys_data = syscall_nr_to_meta(syscall_nr);
+ if (!sys_data)
+ return;
+
+ size = sizeof(*entry) + sizeof(unsigned long) * sys_data->nb_args;
+
+ event = trace_current_buffer_lock_reserve(TRACE_SYSCALL_ENTER, size,
+ 0, 0);
+ if (!event)
+ return;
+
+ entry = ring_buffer_event_data(event);
+ entry->nr = syscall_nr;
+ syscall_get_arguments(current, regs, 0, sys_data->nb_args, entry->args);
+
+ trace_current_buffer_unlock_commit(event, 0, 0);
+ trace_wake_up();
+}
+
+void ftrace_syscall_exit(struct pt_regs *regs)
+{
+ struct syscall_trace_exit *entry;
+ struct syscall_metadata *sys_data;
+ struct ring_buffer_event *event;
+ int syscall_nr;
+
+ syscall_nr = syscall_get_nr(current, regs);
+
+ sys_data = syscall_nr_to_meta(syscall_nr);
+ if (!sys_data)
+ return;
+
+ event = trace_current_buffer_lock_reserve(TRACE_SYSCALL_EXIT,
+ sizeof(*entry), 0, 0);
+ if (!event)
+ return;
+
+ entry = ring_buffer_event_data(event);
+ entry->nr = syscall_nr;
+ entry->ret = syscall_get_return_value(current, regs);
+
+ trace_current_buffer_unlock_commit(event, 0, 0);
+ trace_wake_up();
+}
+
+static int init_syscall_tracer(struct trace_array *tr)
+{
+ start_ftrace_syscalls();
+
+ return 0;
+}
+
+static void reset_syscall_tracer(struct trace_array *tr)
+{
+ stop_ftrace_syscalls();
+ tracing_reset_online_cpus(tr);
+}
+
+static struct trace_event syscall_enter_event = {
+ .type = TRACE_SYSCALL_ENTER,
+ .trace = print_syscall_enter,
+};
+
+static struct trace_event syscall_exit_event = {
+ .type = TRACE_SYSCALL_EXIT,
+ .trace = print_syscall_exit,
+};
+
+static struct tracer syscall_tracer __read_mostly = {
+ .name = "syscall",
+ .init = init_syscall_tracer,
+ .reset = reset_syscall_tracer,
+ .flags = &syscalls_flags,
+};
+
+__init int register_ftrace_syscalls(void)
+{
+ int ret;
+
+ ret = register_ftrace_event(&syscall_enter_event);
+ if (!ret) {
+ printk(KERN_WARNING "event %d failed to register\n",
+ syscall_enter_event.type);
+ WARN_ON_ONCE(1);
+ }
+
+ ret = register_ftrace_event(&syscall_exit_event);
+ if (!ret) {
+ printk(KERN_WARNING "event %d failed to register\n",
+ syscall_exit_event.type);
+ WARN_ON_ONCE(1);
+ }
+
+ return register_tracer(&syscall_tracer);
+}
+device_initcall(register_ftrace_syscalls);
diff --git a/kernel/trace/trace_sysprof.c b/kernel/trace/trace_sysprof.c
index eaca5ad803ff..91fd19c2149f 100644
--- a/kernel/trace/trace_sysprof.c
+++ b/kernel/trace/trace_sysprof.c
@@ -88,7 +88,7 @@ static void backtrace_address(void *data, unsigned long addr, int reliable)
}
}
-const static struct stacktrace_ops backtrace_ops = {
+static const struct stacktrace_ops backtrace_ops = {
.warning = backtrace_warning,
.warning_symbol = backtrace_warning_symbol,
.stack = backtrace_stack,
@@ -226,15 +226,6 @@ static void stop_stack_timers(void)
stop_stack_timer(cpu);
}
-static void start_stack_trace(struct trace_array *tr)
-{
- mutex_lock(&sample_timer_lock);
- tracing_reset_online_cpus(tr);
- start_stack_timers();
- tracer_enabled = 1;
- mutex_unlock(&sample_timer_lock);
-}
-
static void stop_stack_trace(struct trace_array *tr)
{
mutex_lock(&sample_timer_lock);
@@ -247,12 +238,18 @@ static int stack_trace_init(struct trace_array *tr)
{
sysprof_trace = tr;
- start_stack_trace(tr);
+ tracing_start_cmdline_record();
+
+ mutex_lock(&sample_timer_lock);
+ start_stack_timers();
+ tracer_enabled = 1;
+ mutex_unlock(&sample_timer_lock);
return 0;
}
static void stack_trace_reset(struct trace_array *tr)
{
+ tracing_stop_cmdline_record();
stop_stack_trace(tr);
}
@@ -317,7 +314,7 @@ sysprof_sample_write(struct file *filp, const char __user *ubuf,
return cnt;
}
-static struct file_operations sysprof_sample_fops = {
+static const struct file_operations sysprof_sample_fops = {
.read = sysprof_sample_read,
.write = sysprof_sample_write,
};
@@ -330,5 +327,5 @@ void init_tracer_sysprof_debugfs(struct dentry *d_tracer)
d_tracer, NULL, &sysprof_sample_fops);
if (entry)
return;
- pr_warning("Could not create debugfs 'dyn_ftrace_total_info' entry\n");
+ pr_warning("Could not create debugfs 'sysprof_sample_period' entry\n");
}
diff --git a/kernel/trace/trace_workqueue.c b/kernel/trace/trace_workqueue.c
new file mode 100644
index 000000000000..797201e4a137
--- /dev/null
+++ b/kernel/trace/trace_workqueue.c
@@ -0,0 +1,288 @@
+/*
+ * Workqueue statistical tracer.
+ *
+ * Copyright (C) 2008 Frederic Weisbecker <fweisbec@gmail.com>
+ *
+ */
+
+
+#include <trace/workqueue.h>
+#include <linux/list.h>
+#include <linux/percpu.h>
+#include "trace_stat.h"
+#include "trace.h"
+
+
+/* A cpu workqueue thread */
+struct cpu_workqueue_stats {
+ struct list_head list;
+/* Useful to know if we print the cpu headers */
+ bool first_entry;
+ int cpu;
+ pid_t pid;
+/* Can be inserted from interrupt or user context, need to be atomic */
+ atomic_t inserted;
+/*
+ * Don't need to be atomic, works are serialized in a single workqueue thread
+ * on a single CPU.
+ */
+ unsigned int executed;
+};
+
+/* List of workqueue threads on one cpu */
+struct workqueue_global_stats {
+ struct list_head list;
+ spinlock_t lock;
+};
+
+/* Don't need a global lock because allocated before the workqueues, and
+ * never freed.
+ */
+static DEFINE_PER_CPU(struct workqueue_global_stats, all_workqueue_stat);
+#define workqueue_cpu_stat(cpu) (&per_cpu(all_workqueue_stat, cpu))
+
+/* Insertion of a work */
+static void
+probe_workqueue_insertion(struct task_struct *wq_thread,
+ struct work_struct *work)
+{
+ int cpu = cpumask_first(&wq_thread->cpus_allowed);
+ struct cpu_workqueue_stats *node, *next;
+ unsigned long flags;
+
+ spin_lock_irqsave(&workqueue_cpu_stat(cpu)->lock, flags);
+ list_for_each_entry_safe(node, next, &workqueue_cpu_stat(cpu)->list,
+ list) {
+ if (node->pid == wq_thread->pid) {
+ atomic_inc(&node->inserted);
+ goto found;
+ }
+ }
+ pr_debug("trace_workqueue: entry not found\n");
+found:
+ spin_unlock_irqrestore(&workqueue_cpu_stat(cpu)->lock, flags);
+}
+
+/* Execution of a work */
+static void
+probe_workqueue_execution(struct task_struct *wq_thread,
+ struct work_struct *work)
+{
+ int cpu = cpumask_first(&wq_thread->cpus_allowed);
+ struct cpu_workqueue_stats *node, *next;
+ unsigned long flags;
+
+ spin_lock_irqsave(&workqueue_cpu_stat(cpu)->lock, flags);
+ list_for_each_entry_safe(node, next, &workqueue_cpu_stat(cpu)->list,
+ list) {
+ if (node->pid == wq_thread->pid) {
+ node->executed++;
+ goto found;
+ }
+ }
+ pr_debug("trace_workqueue: entry not found\n");
+found:
+ spin_unlock_irqrestore(&workqueue_cpu_stat(cpu)->lock, flags);
+}
+
+/* Creation of a cpu workqueue thread */
+static void probe_workqueue_creation(struct task_struct *wq_thread, int cpu)
+{
+ struct cpu_workqueue_stats *cws;
+ unsigned long flags;
+
+ WARN_ON(cpu < 0);
+
+ /* Workqueues are sometimes created in atomic context */
+ cws = kzalloc(sizeof(struct cpu_workqueue_stats), GFP_ATOMIC);
+ if (!cws) {
+ pr_warning("trace_workqueue: not enough memory\n");
+ return;
+ }
+ INIT_LIST_HEAD(&cws->list);
+ cws->cpu = cpu;
+
+ cws->pid = wq_thread->pid;
+
+ spin_lock_irqsave(&workqueue_cpu_stat(cpu)->lock, flags);
+ if (list_empty(&workqueue_cpu_stat(cpu)->list))
+ cws->first_entry = true;
+ list_add_tail(&cws->list, &workqueue_cpu_stat(cpu)->list);
+ spin_unlock_irqrestore(&workqueue_cpu_stat(cpu)->lock, flags);
+}
+
+/* Destruction of a cpu workqueue thread */
+static void probe_workqueue_destruction(struct task_struct *wq_thread)
+{
+ /* Workqueue only execute on one cpu */
+ int cpu = cpumask_first(&wq_thread->cpus_allowed);
+ struct cpu_workqueue_stats *node, *next;
+ unsigned long flags;
+
+ spin_lock_irqsave(&workqueue_cpu_stat(cpu)->lock, flags);
+ list_for_each_entry_safe(node, next, &workqueue_cpu_stat(cpu)->list,
+ list) {
+ if (node->pid == wq_thread->pid) {
+ list_del(&node->list);
+ kfree(node);
+ goto found;
+ }
+ }
+
+ pr_debug("trace_workqueue: don't find workqueue to destroy\n");
+found:
+ spin_unlock_irqrestore(&workqueue_cpu_stat(cpu)->lock, flags);
+
+}
+
+static struct cpu_workqueue_stats *workqueue_stat_start_cpu(int cpu)
+{
+ unsigned long flags;
+ struct cpu_workqueue_stats *ret = NULL;
+
+
+ spin_lock_irqsave(&workqueue_cpu_stat(cpu)->lock, flags);
+
+ if (!list_empty(&workqueue_cpu_stat(cpu)->list))
+ ret = list_entry(workqueue_cpu_stat(cpu)->list.next,
+ struct cpu_workqueue_stats, list);
+
+ spin_unlock_irqrestore(&workqueue_cpu_stat(cpu)->lock, flags);
+
+ return ret;
+}
+
+static void *workqueue_stat_start(void)
+{
+ int cpu;
+ void *ret = NULL;
+
+ for_each_possible_cpu(cpu) {
+ ret = workqueue_stat_start_cpu(cpu);
+ if (ret)
+ return ret;
+ }
+ return NULL;
+}
+
+static void *workqueue_stat_next(void *prev, int idx)
+{
+ struct cpu_workqueue_stats *prev_cws = prev;
+ int cpu = prev_cws->cpu;
+ unsigned long flags;
+ void *ret = NULL;
+
+ spin_lock_irqsave(&workqueue_cpu_stat(cpu)->lock, flags);
+ if (list_is_last(&prev_cws->list, &workqueue_cpu_stat(cpu)->list)) {
+ spin_unlock_irqrestore(&workqueue_cpu_stat(cpu)->lock, flags);
+ do {
+ cpu = cpumask_next(cpu, cpu_possible_mask);
+ if (cpu >= nr_cpu_ids)
+ return NULL;
+ } while (!(ret = workqueue_stat_start_cpu(cpu)));
+ return ret;
+ }
+ spin_unlock_irqrestore(&workqueue_cpu_stat(cpu)->lock, flags);
+
+ return list_entry(prev_cws->list.next, struct cpu_workqueue_stats,
+ list);
+}
+
+static int workqueue_stat_show(struct seq_file *s, void *p)
+{
+ struct cpu_workqueue_stats *cws = p;
+ unsigned long flags;
+ int cpu = cws->cpu;
+ struct pid *pid;
+ struct task_struct *tsk;
+
+ spin_lock_irqsave(&workqueue_cpu_stat(cpu)->lock, flags);
+ if (&cws->list == workqueue_cpu_stat(cpu)->list.next)
+ seq_printf(s, "\n");
+ spin_unlock_irqrestore(&workqueue_cpu_stat(cpu)->lock, flags);
+
+ pid = find_get_pid(cws->pid);
+ if (pid) {
+ tsk = get_pid_task(pid, PIDTYPE_PID);
+ if (tsk) {
+ seq_printf(s, "%3d %6d %6u %s\n", cws->cpu,
+ atomic_read(&cws->inserted), cws->executed,
+ tsk->comm);
+ put_task_struct(tsk);
+ }
+ put_pid(pid);
+ }
+
+ return 0;
+}
+
+static int workqueue_stat_headers(struct seq_file *s)
+{
+ seq_printf(s, "# CPU INSERTED EXECUTED NAME\n");
+ seq_printf(s, "# | | | |\n");
+ return 0;
+}
+
+struct tracer_stat workqueue_stats __read_mostly = {
+ .name = "workqueues",
+ .stat_start = workqueue_stat_start,
+ .stat_next = workqueue_stat_next,
+ .stat_show = workqueue_stat_show,
+ .stat_headers = workqueue_stat_headers
+};
+
+
+int __init stat_workqueue_init(void)
+{
+ if (register_stat_tracer(&workqueue_stats)) {
+ pr_warning("Unable to register workqueue stat tracer\n");
+ return 1;
+ }
+
+ return 0;
+}
+fs_initcall(stat_workqueue_init);
+
+/*
+ * Workqueues are created very early, just after pre-smp initcalls.
+ * So we must register our tracepoints at this stage.
+ */
+int __init trace_workqueue_early_init(void)
+{
+ int ret, cpu;
+
+ ret = register_trace_workqueue_insertion(probe_workqueue_insertion);
+ if (ret)
+ goto out;
+
+ ret = register_trace_workqueue_execution(probe_workqueue_execution);
+ if (ret)
+ goto no_insertion;
+
+ ret = register_trace_workqueue_creation(probe_workqueue_creation);
+ if (ret)
+ goto no_execution;
+
+ ret = register_trace_workqueue_destruction(probe_workqueue_destruction);
+ if (ret)
+ goto no_creation;
+
+ for_each_possible_cpu(cpu) {
+ spin_lock_init(&workqueue_cpu_stat(cpu)->lock);
+ INIT_LIST_HEAD(&workqueue_cpu_stat(cpu)->list);
+ }
+
+ return 0;
+
+no_creation:
+ unregister_trace_workqueue_creation(probe_workqueue_creation);
+no_execution:
+ unregister_trace_workqueue_execution(probe_workqueue_execution);
+no_insertion:
+ unregister_trace_workqueue_insertion(probe_workqueue_insertion);
+out:
+ pr_warning("trace_workqueue: unable to trace workqueues\n");
+
+ return 1;
+}
+early_initcall(trace_workqueue_early_init);
diff --git a/kernel/tracepoint.c b/kernel/tracepoint.c
index 79602740bbb5..1ef5d3a601c7 100644
--- a/kernel/tracepoint.c
+++ b/kernel/tracepoint.c
@@ -272,12 +272,15 @@ static void disable_tracepoint(struct tracepoint *elem)
*
* Updates the probe callback corresponding to a range of tracepoints.
*/
-void tracepoint_update_probe_range(struct tracepoint *begin,
- struct tracepoint *end)
+void
+tracepoint_update_probe_range(struct tracepoint *begin, struct tracepoint *end)
{
struct tracepoint *iter;
struct tracepoint_entry *mark_entry;
+ if (!begin)
+ return;
+
mutex_lock(&tracepoints_mutex);
for (iter = begin; iter < end; iter++) {
mark_entry = get_tracepoint(iter->name);
diff --git a/kernel/utsname_sysctl.c b/kernel/utsname_sysctl.c
index 3b34b3545936..92359cc747a7 100644
--- a/kernel/utsname_sysctl.c
+++ b/kernel/utsname_sysctl.c
@@ -37,7 +37,7 @@ static void put_uts(ctl_table *table, int write, void *which)
up_write(&uts_sem);
}
-#ifdef CONFIG_PROC_FS
+#ifdef CONFIG_PROC_SYSCTL
/*
* Special case of dostring for the UTS structure. This has locks
* to observe. Should this be in kernel/sys.c ????
diff --git a/kernel/workqueue.c b/kernel/workqueue.c
index 1f0c509b40d3..f71fb2a08950 100644
--- a/kernel/workqueue.c
+++ b/kernel/workqueue.c
@@ -33,6 +33,7 @@
#include <linux/kallsyms.h>
#include <linux/debug_locks.h>
#include <linux/lockdep.h>
+#include <trace/workqueue.h>
/*
* The per-CPU workqueue (if single thread, we always use the first
@@ -48,8 +49,6 @@ struct cpu_workqueue_struct {
struct workqueue_struct *wq;
struct task_struct *thread;
-
- int run_depth; /* Detect run_workqueue() recursion depth */
} ____cacheline_aligned;
/*
@@ -125,9 +124,13 @@ struct cpu_workqueue_struct *get_wq_data(struct work_struct *work)
return (void *) (atomic_long_read(&work->data) & WORK_STRUCT_WQ_DATA_MASK);
}
+DEFINE_TRACE(workqueue_insertion);
+
static void insert_work(struct cpu_workqueue_struct *cwq,
struct work_struct *work, struct list_head *head)
{
+ trace_workqueue_insertion(cwq->thread, work);
+
set_wq_data(work, cwq);
/*
* Ensure that we get the right work->data if we see the
@@ -259,16 +262,11 @@ int queue_delayed_work_on(int cpu, struct workqueue_struct *wq,
}
EXPORT_SYMBOL_GPL(queue_delayed_work_on);
+DEFINE_TRACE(workqueue_execution);
+
static void run_workqueue(struct cpu_workqueue_struct *cwq)
{
spin_lock_irq(&cwq->lock);
- cwq->run_depth++;
- if (cwq->run_depth > 3) {
- /* morton gets to eat his hat */
- printk("%s: recursion depth exceeded: %d\n",
- __func__, cwq->run_depth);
- dump_stack();
- }
while (!list_empty(&cwq->worklist)) {
struct work_struct *work = list_entry(cwq->worklist.next,
struct work_struct, entry);
@@ -284,7 +282,7 @@ static void run_workqueue(struct cpu_workqueue_struct *cwq)
*/
struct lockdep_map lockdep_map = work->lockdep_map;
#endif
-
+ trace_workqueue_execution(cwq->thread, work);
cwq->current_work = work;
list_del_init(cwq->worklist.next);
spin_unlock_irq(&cwq->lock);
@@ -311,7 +309,6 @@ static void run_workqueue(struct cpu_workqueue_struct *cwq)
spin_lock_irq(&cwq->lock);
cwq->current_work = NULL;
}
- cwq->run_depth--;
spin_unlock_irq(&cwq->lock);
}
@@ -368,29 +365,20 @@ static void insert_wq_barrier(struct cpu_workqueue_struct *cwq,
static int flush_cpu_workqueue(struct cpu_workqueue_struct *cwq)
{
- int active;
-
- if (cwq->thread == current) {
- /*
- * Probably keventd trying to flush its own queue. So simply run
- * it by hand rather than deadlocking.
- */
- run_workqueue(cwq);
- active = 1;
- } else {
- struct wq_barrier barr;
+ int active = 0;
+ struct wq_barrier barr;
- active = 0;
- spin_lock_irq(&cwq->lock);
- if (!list_empty(&cwq->worklist) || cwq->current_work != NULL) {
- insert_wq_barrier(cwq, &barr, &cwq->worklist);
- active = 1;
- }
- spin_unlock_irq(&cwq->lock);
+ WARN_ON(cwq->thread == current);
- if (active)
- wait_for_completion(&barr.done);
+ spin_lock_irq(&cwq->lock);
+ if (!list_empty(&cwq->worklist) || cwq->current_work != NULL) {
+ insert_wq_barrier(cwq, &barr, &cwq->worklist);
+ active = 1;
}
+ spin_unlock_irq(&cwq->lock);
+
+ if (active)
+ wait_for_completion(&barr.done);
return active;
}
@@ -416,7 +404,7 @@ void flush_workqueue(struct workqueue_struct *wq)
might_sleep();
lock_map_acquire(&wq->lockdep_map);
lock_map_release(&wq->lockdep_map);
- for_each_cpu_mask_nr(cpu, *cpu_map)
+ for_each_cpu(cpu, cpu_map)
flush_cpu_workqueue(per_cpu_ptr(wq->cpu_wq, cpu));
}
EXPORT_SYMBOL_GPL(flush_workqueue);
@@ -547,7 +535,7 @@ static void wait_on_work(struct work_struct *work)
wq = cwq->wq;
cpu_map = wq_cpu_map(wq);
- for_each_cpu_mask_nr(cpu, *cpu_map)
+ for_each_cpu(cpu, cpu_map)
wait_on_cpu_work(per_cpu_ptr(wq->cpu_wq, cpu), work);
}
@@ -765,6 +753,8 @@ init_cpu_workqueue(struct workqueue_struct *wq, int cpu)
return cwq;
}
+DEFINE_TRACE(workqueue_creation);
+
static int create_workqueue_thread(struct cpu_workqueue_struct *cwq, int cpu)
{
struct sched_param param = { .sched_priority = MAX_RT_PRIO-1 };
@@ -787,6 +777,8 @@ static int create_workqueue_thread(struct cpu_workqueue_struct *cwq, int cpu)
sched_setscheduler_nocheck(p, SCHED_FIFO, &param);
cwq->thread = p;
+ trace_workqueue_creation(cwq->thread, cpu);
+
return 0;
}
@@ -868,6 +860,8 @@ struct workqueue_struct *__create_workqueue_key(const char *name,
}
EXPORT_SYMBOL_GPL(__create_workqueue_key);
+DEFINE_TRACE(workqueue_destruction);
+
static void cleanup_workqueue_thread(struct cpu_workqueue_struct *cwq)
{
/*
@@ -891,6 +885,7 @@ static void cleanup_workqueue_thread(struct cpu_workqueue_struct *cwq)
* checks list_empty(), and a "normal" queue_work() can't use
* a dead CPU.
*/
+ trace_workqueue_destruction(cwq->thread);
kthread_stop(cwq->thread);
cwq->thread = NULL;
}
@@ -911,7 +906,7 @@ void destroy_workqueue(struct workqueue_struct *wq)
list_del(&wq->list);
spin_unlock(&workqueue_lock);
- for_each_cpu_mask_nr(cpu, *cpu_map)
+ for_each_cpu(cpu, cpu_map)
cleanup_workqueue_thread(per_cpu_ptr(wq->cpu_wq, cpu));
cpu_maps_update_done();
@@ -971,20 +966,20 @@ undo:
}
#ifdef CONFIG_SMP
-static struct workqueue_struct *work_on_cpu_wq __read_mostly;
struct work_for_cpu {
- struct work_struct work;
+ struct completion completion;
long (*fn)(void *);
void *arg;
long ret;
};
-static void do_work_for_cpu(struct work_struct *w)
+static int do_work_for_cpu(void *_wfc)
{
- struct work_for_cpu *wfc = container_of(w, struct work_for_cpu, work);
-
+ struct work_for_cpu *wfc = _wfc;
wfc->ret = wfc->fn(wfc->arg);
+ complete(&wfc->completion);
+ return 0;
}
/**
@@ -995,17 +990,23 @@ static void do_work_for_cpu(struct work_struct *w)
*
* This will return the value @fn returns.
* It is up to the caller to ensure that the cpu doesn't go offline.
+ * The caller must not hold any locks which would prevent @fn from completing.
*/
long work_on_cpu(unsigned int cpu, long (*fn)(void *), void *arg)
{
- struct work_for_cpu wfc;
-
- INIT_WORK(&wfc.work, do_work_for_cpu);
- wfc.fn = fn;
- wfc.arg = arg;
- queue_work_on(cpu, work_on_cpu_wq, &wfc.work);
- flush_work(&wfc.work);
-
+ struct task_struct *sub_thread;
+ struct work_for_cpu wfc = {
+ .completion = COMPLETION_INITIALIZER_ONSTACK(wfc.completion),
+ .fn = fn,
+ .arg = arg,
+ };
+
+ sub_thread = kthread_create(do_work_for_cpu, &wfc, "work_for_cpu");
+ if (IS_ERR(sub_thread))
+ return PTR_ERR(sub_thread);
+ kthread_bind(sub_thread, cpu);
+ wake_up_process(sub_thread);
+ wait_for_completion(&wfc.completion);
return wfc.ret;
}
EXPORT_SYMBOL_GPL(work_on_cpu);
@@ -1021,8 +1022,4 @@ void __init init_workqueues(void)
hotcpu_notifier(workqueue_cpu_callback, 0);
keventd_wq = create_workqueue("events");
BUG_ON(!keventd_wq);
-#ifdef CONFIG_SMP
- work_on_cpu_wq = create_workqueue("work_on_cpu");
- BUG_ON(!work_on_cpu_wq);
-#endif
}