summaryrefslogtreecommitdiffstats
path: root/kernel
diff options
context:
space:
mode:
Diffstat (limited to 'kernel')
-rw-r--r--kernel/Kconfig.preempt3
-rw-r--r--kernel/async.c12
-rw-r--r--kernel/audit.c29
-rw-r--r--kernel/audit_tree.c8
-rw-r--r--kernel/events/Makefile2
-rw-r--r--kernel/events/core.c938
-rw-r--r--kernel/events/hw_breakpoint.c10
-rw-r--r--kernel/events/internal.h96
-rw-r--r--kernel/events/ring_buffer.c380
-rw-r--r--kernel/exit.c91
-rw-r--r--kernel/fork.c35
-rw-r--r--kernel/futex.c2
-rw-r--r--kernel/irq/generic-chip.c18
-rw-r--r--kernel/jump_label.c14
-rw-r--r--kernel/kprobes.c33
-rw-r--r--kernel/lockdep.c33
-rw-r--r--kernel/pm_qos_params.c6
-rw-r--r--kernel/power/Kconfig8
-rw-r--r--kernel/power/main.c5
-rw-r--r--kernel/power/snapshot.c6
-rw-r--r--kernel/power/suspend.c20
-rw-r--r--kernel/power/user.c4
-rw-r--r--kernel/printk.c24
-rw-r--r--kernel/ptrace.c197
-rw-r--r--kernel/rcutree.c26
-rw-r--r--kernel/rcutree_plugin.h68
-rw-r--r--kernel/resource.c116
-rw-r--r--kernel/rtmutex.c2
-rw-r--r--kernel/sched.c368
-rw-r--r--kernel/sched_autogroup.h1
-rw-r--r--kernel/sched_fair.c118
-rw-r--r--kernel/sched_features.h2
-rw-r--r--kernel/sched_rt.c26
-rw-r--r--kernel/signal.c444
-rw-r--r--kernel/softirq.c12
-rw-r--r--kernel/stacktrace.c12
-rw-r--r--kernel/sysctl.c11
-rw-r--r--kernel/taskstats.c15
-rw-r--r--kernel/time/alarmtimer.c158
-rw-r--r--kernel/trace/ftrace.c157
-rw-r--r--kernel/trace/ring_buffer.c66
-rw-r--r--kernel/trace/ring_buffer_benchmark.c2
-rw-r--r--kernel/trace/trace.c326
-rw-r--r--kernel/trace/trace.h61
-rw-r--r--kernel/trace/trace_entries.h3
-rw-r--r--kernel/trace/trace_events.c139
-rw-r--r--kernel/trace/trace_events_filter.c6
-rw-r--r--kernel/trace/trace_functions.c3
-rw-r--r--kernel/trace/trace_functions_graph.c225
-rw-r--r--kernel/trace/trace_irqsoff.c4
-rw-r--r--kernel/trace/trace_kprobe.c324
-rw-r--r--kernel/trace/trace_output.c11
-rw-r--r--kernel/trace/trace_sched_wakeup.c4
-rw-r--r--kernel/trace/trace_stack.c13
-rw-r--r--kernel/watchdog.c8
-rw-r--r--kernel/workqueue.c81
56 files changed, 2955 insertions, 1831 deletions
diff --git a/kernel/Kconfig.preempt b/kernel/Kconfig.preempt
index bf987b95b356..24e7cb0ba26a 100644
--- a/kernel/Kconfig.preempt
+++ b/kernel/Kconfig.preempt
@@ -35,6 +35,7 @@ config PREEMPT_VOLUNTARY
config PREEMPT
bool "Preemptible Kernel (Low-Latency Desktop)"
+ select PREEMPT_COUNT
help
This option reduces the latency of the kernel by making
all kernel code (that is not executing in a critical section)
@@ -52,3 +53,5 @@ config PREEMPT
endchoice
+config PREEMPT_COUNT
+ bool \ No newline at end of file
diff --git a/kernel/async.c b/kernel/async.c
index cd9dbb913c77..d5fe7af0de2e 100644
--- a/kernel/async.c
+++ b/kernel/async.c
@@ -49,12 +49,13 @@ asynchronous and synchronous parts of the kernel.
*/
#include <linux/async.h>
+#include <linux/atomic.h>
+#include <linux/ktime.h>
#include <linux/module.h>
#include <linux/wait.h>
#include <linux/sched.h>
#include <linux/slab.h>
#include <linux/workqueue.h>
-#include <asm/atomic.h>
static async_cookie_t next_cookie = 1;
@@ -128,7 +129,8 @@ static void async_run_entry_fn(struct work_struct *work)
/* 2) run (and print duration) */
if (initcall_debug && system_state == SYSTEM_BOOTING) {
- printk("calling %lli_%pF @ %i\n", (long long)entry->cookie,
+ printk(KERN_DEBUG "calling %lli_%pF @ %i\n",
+ (long long)entry->cookie,
entry->func, task_pid_nr(current));
calltime = ktime_get();
}
@@ -136,7 +138,7 @@ static void async_run_entry_fn(struct work_struct *work)
if (initcall_debug && system_state == SYSTEM_BOOTING) {
rettime = ktime_get();
delta = ktime_sub(rettime, calltime);
- printk("initcall %lli_%pF returned 0 after %lld usecs\n",
+ printk(KERN_DEBUG "initcall %lli_%pF returned 0 after %lld usecs\n",
(long long)entry->cookie,
entry->func,
(long long)ktime_to_ns(delta) >> 10);
@@ -270,7 +272,7 @@ void async_synchronize_cookie_domain(async_cookie_t cookie,
ktime_t starttime, delta, endtime;
if (initcall_debug && system_state == SYSTEM_BOOTING) {
- printk("async_waiting @ %i\n", task_pid_nr(current));
+ printk(KERN_DEBUG "async_waiting @ %i\n", task_pid_nr(current));
starttime = ktime_get();
}
@@ -280,7 +282,7 @@ void async_synchronize_cookie_domain(async_cookie_t cookie,
endtime = ktime_get();
delta = ktime_sub(endtime, starttime);
- printk("async_continuing @ %i after %lli usec\n",
+ printk(KERN_DEBUG "async_continuing @ %i after %lli usec\n",
task_pid_nr(current),
(long long)ktime_to_ns(delta) >> 10);
}
diff --git a/kernel/audit.c b/kernel/audit.c
index 939500317066..52501b5d4902 100644
--- a/kernel/audit.c
+++ b/kernel/audit.c
@@ -55,6 +55,9 @@
#include <net/sock.h>
#include <net/netlink.h>
#include <linux/skbuff.h>
+#ifdef CONFIG_SECURITY
+#include <linux/security.h>
+#endif
#include <linux/netlink.h>
#include <linux/freezer.h>
#include <linux/tty.h>
@@ -1502,6 +1505,32 @@ void audit_log(struct audit_context *ctx, gfp_t gfp_mask, int type,
}
}
+#ifdef CONFIG_SECURITY
+/**
+ * audit_log_secctx - Converts and logs SELinux context
+ * @ab: audit_buffer
+ * @secid: security number
+ *
+ * This is a helper function that calls security_secid_to_secctx to convert
+ * secid to secctx and then adds the (converted) SELinux context to the audit
+ * log by calling audit_log_format, thus also preventing leak of internal secid
+ * to userspace. If secid cannot be converted audit_panic is called.
+ */
+void audit_log_secctx(struct audit_buffer *ab, u32 secid)
+{
+ u32 len;
+ char *secctx;
+
+ if (security_secid_to_secctx(secid, &secctx, &len)) {
+ audit_panic("Cannot convert secid to context");
+ } else {
+ audit_log_format(ab, " obj=%s", secctx);
+ security_release_secctx(secctx, len);
+ }
+}
+EXPORT_SYMBOL(audit_log_secctx);
+#endif
+
EXPORT_SYMBOL(audit_log_start);
EXPORT_SYMBOL(audit_log_end);
EXPORT_SYMBOL(audit_log_format);
diff --git a/kernel/audit_tree.c b/kernel/audit_tree.c
index e99dda04b126..5bf0790497e7 100644
--- a/kernel/audit_tree.c
+++ b/kernel/audit_tree.c
@@ -93,16 +93,10 @@ static inline void get_tree(struct audit_tree *tree)
atomic_inc(&tree->count);
}
-static void __put_tree(struct rcu_head *rcu)
-{
- struct audit_tree *tree = container_of(rcu, struct audit_tree, head);
- kfree(tree);
-}
-
static inline void put_tree(struct audit_tree *tree)
{
if (atomic_dec_and_test(&tree->count))
- call_rcu(&tree->head, __put_tree);
+ kfree_rcu(tree, head);
}
/* to avoid bringing the entire thing in audit.h */
diff --git a/kernel/events/Makefile b/kernel/events/Makefile
index 1ce23d3d8394..89e5e8aa4c36 100644
--- a/kernel/events/Makefile
+++ b/kernel/events/Makefile
@@ -2,5 +2,5 @@ ifdef CONFIG_FUNCTION_TRACER
CFLAGS_REMOVE_core.o = -pg
endif
-obj-y := core.o
+obj-y := core.o ring_buffer.o
obj-$(CONFIG_HAVE_HW_BREAKPOINT) += hw_breakpoint.o
diff --git a/kernel/events/core.c b/kernel/events/core.c
index 9efe7108ccaf..b8785e26ee1c 100644
--- a/kernel/events/core.c
+++ b/kernel/events/core.c
@@ -36,6 +36,8 @@
#include <linux/ftrace_event.h>
#include <linux/hw_breakpoint.h>
+#include "internal.h"
+
#include <asm/irq_regs.h>
struct remote_function_call {
@@ -200,6 +202,22 @@ __get_cpu_context(struct perf_event_context *ctx)
return this_cpu_ptr(ctx->pmu->pmu_cpu_context);
}
+static void perf_ctx_lock(struct perf_cpu_context *cpuctx,
+ struct perf_event_context *ctx)
+{
+ raw_spin_lock(&cpuctx->ctx.lock);
+ if (ctx)
+ raw_spin_lock(&ctx->lock);
+}
+
+static void perf_ctx_unlock(struct perf_cpu_context *cpuctx,
+ struct perf_event_context *ctx)
+{
+ if (ctx)
+ raw_spin_unlock(&ctx->lock);
+ raw_spin_unlock(&cpuctx->ctx.lock);
+}
+
#ifdef CONFIG_CGROUP_PERF
/*
@@ -340,11 +358,8 @@ void perf_cgroup_switch(struct task_struct *task, int mode)
rcu_read_lock();
list_for_each_entry_rcu(pmu, &pmus, entry) {
-
cpuctx = this_cpu_ptr(pmu->pmu_cpu_context);
- perf_pmu_disable(cpuctx->ctx.pmu);
-
/*
* perf_cgroup_events says at least one
* context on this CPU has cgroup events.
@@ -353,6 +368,8 @@ void perf_cgroup_switch(struct task_struct *task, int mode)
* events for a context.
*/
if (cpuctx->ctx.nr_cgroups > 0) {
+ perf_ctx_lock(cpuctx, cpuctx->task_ctx);
+ perf_pmu_disable(cpuctx->ctx.pmu);
if (mode & PERF_CGROUP_SWOUT) {
cpu_ctx_sched_out(cpuctx, EVENT_ALL);
@@ -372,9 +389,9 @@ void perf_cgroup_switch(struct task_struct *task, int mode)
cpuctx->cgrp = perf_cgroup_from_task(task);
cpu_ctx_sched_in(cpuctx, EVENT_ALL, task);
}
+ perf_pmu_enable(cpuctx->ctx.pmu);
+ perf_ctx_unlock(cpuctx, cpuctx->task_ctx);
}
-
- perf_pmu_enable(cpuctx->ctx.pmu);
}
rcu_read_unlock();
@@ -731,6 +748,7 @@ static u64 perf_event_time(struct perf_event *event)
/*
* Update the total_time_enabled and total_time_running fields for a event.
+ * The caller of this function needs to hold the ctx->lock.
*/
static void update_event_times(struct perf_event *event)
{
@@ -1105,6 +1123,10 @@ static int __perf_remove_from_context(void *info)
raw_spin_lock(&ctx->lock);
event_sched_out(event, cpuctx, ctx);
list_del_event(event, ctx);
+ if (!ctx->nr_events && cpuctx->task_ctx == ctx) {
+ ctx->is_active = 0;
+ cpuctx->task_ctx = NULL;
+ }
raw_spin_unlock(&ctx->lock);
return 0;
@@ -1454,8 +1476,24 @@ static void add_event_to_ctx(struct perf_event *event,
event->tstamp_stopped = tstamp;
}
-static void perf_event_context_sched_in(struct perf_event_context *ctx,
- struct task_struct *tsk);
+static void task_ctx_sched_out(struct perf_event_context *ctx);
+static void
+ctx_sched_in(struct perf_event_context *ctx,
+ struct perf_cpu_context *cpuctx,
+ enum event_type_t event_type,
+ struct task_struct *task);
+
+static void perf_event_sched_in(struct perf_cpu_context *cpuctx,
+ struct perf_event_context *ctx,
+ struct task_struct *task)
+{
+ cpu_ctx_sched_in(cpuctx, EVENT_PINNED, task);
+ if (ctx)
+ ctx_sched_in(ctx, cpuctx, EVENT_PINNED, task);
+ cpu_ctx_sched_in(cpuctx, EVENT_FLEXIBLE, task);
+ if (ctx)
+ ctx_sched_in(ctx, cpuctx, EVENT_FLEXIBLE, task);
+}
/*
* Cross CPU call to install and enable a performance event
@@ -1466,20 +1504,37 @@ static int __perf_install_in_context(void *info)
{
struct perf_event *event = info;
struct perf_event_context *ctx = event->ctx;
- struct perf_event *leader = event->group_leader;
struct perf_cpu_context *cpuctx = __get_cpu_context(ctx);
- int err;
+ struct perf_event_context *task_ctx = cpuctx->task_ctx;
+ struct task_struct *task = current;
+
+ perf_ctx_lock(cpuctx, task_ctx);
+ perf_pmu_disable(cpuctx->ctx.pmu);
/*
- * In case we're installing a new context to an already running task,
- * could also happen before perf_event_task_sched_in() on architectures
- * which do context switches with IRQs enabled.
+ * If there was an active task_ctx schedule it out.
*/
- if (ctx->task && !cpuctx->task_ctx)
- perf_event_context_sched_in(ctx, ctx->task);
+ if (task_ctx)
+ task_ctx_sched_out(task_ctx);
+
+ /*
+ * If the context we're installing events in is not the
+ * active task_ctx, flip them.
+ */
+ if (ctx->task && task_ctx != ctx) {
+ if (task_ctx)
+ raw_spin_unlock(&task_ctx->lock);
+ raw_spin_lock(&ctx->lock);
+ task_ctx = ctx;
+ }
+
+ if (task_ctx) {
+ cpuctx->task_ctx = task_ctx;
+ task = task_ctx->task;
+ }
+
+ cpu_ctx_sched_out(cpuctx, EVENT_ALL);
- raw_spin_lock(&ctx->lock);
- ctx->is_active = 1;
update_context_time(ctx);
/*
* update cgrp time only if current cgrp
@@ -1490,43 +1545,13 @@ static int __perf_install_in_context(void *info)
add_event_to_ctx(event, ctx);
- if (!event_filter_match(event))
- goto unlock;
-
- /*
- * Don't put the event on if it is disabled or if
- * it is in a group and the group isn't on.
- */
- if (event->state != PERF_EVENT_STATE_INACTIVE ||
- (leader != event && leader->state != PERF_EVENT_STATE_ACTIVE))
- goto unlock;
-
/*
- * An exclusive event can't go on if there are already active
- * hardware events, and no hardware event can go on if there
- * is already an exclusive event on.
+ * Schedule everything back in
*/
- if (!group_can_go_on(event, cpuctx, 1))
- err = -EEXIST;
- else
- err = event_sched_in(event, cpuctx, ctx);
-
- if (err) {
- /*
- * This event couldn't go on. If it is in a group
- * then we have to pull the whole group off.
- * If the event group is pinned then put it in error state.
- */
- if (leader != event)
- group_sched_out(leader, cpuctx, ctx);
- if (leader->attr.pinned) {
- update_group_times(leader);
- leader->state = PERF_EVENT_STATE_ERROR;
- }
- }
+ perf_event_sched_in(cpuctx, task_ctx, task);
-unlock:
- raw_spin_unlock(&ctx->lock);
+ perf_pmu_enable(cpuctx->ctx.pmu);
+ perf_ctx_unlock(cpuctx, task_ctx);
return 0;
}
@@ -1739,7 +1764,7 @@ out:
raw_spin_unlock_irq(&ctx->lock);
}
-static int perf_event_refresh(struct perf_event *event, int refresh)
+int perf_event_refresh(struct perf_event *event, int refresh)
{
/*
* not supported on inherited events
@@ -1752,36 +1777,35 @@ static int perf_event_refresh(struct perf_event *event, int refresh)
return 0;
}
+EXPORT_SYMBOL_GPL(perf_event_refresh);
static void ctx_sched_out(struct perf_event_context *ctx,
struct perf_cpu_context *cpuctx,
enum event_type_t event_type)
{
struct perf_event *event;
+ int is_active = ctx->is_active;
- raw_spin_lock(&ctx->lock);
- perf_pmu_disable(ctx->pmu);
- ctx->is_active = 0;
+ ctx->is_active &= ~event_type;
if (likely(!ctx->nr_events))
- goto out;
+ return;
+
update_context_time(ctx);
update_cgrp_time_from_cpuctx(cpuctx);
-
if (!ctx->nr_active)
- goto out;
+ return;
- if (event_type & EVENT_PINNED) {
+ perf_pmu_disable(ctx->pmu);
+ if ((is_active & EVENT_PINNED) && (event_type & EVENT_PINNED)) {
list_for_each_entry(event, &ctx->pinned_groups, group_entry)
group_sched_out(event, cpuctx, ctx);
}
- if (event_type & EVENT_FLEXIBLE) {
+ if ((is_active & EVENT_FLEXIBLE) && (event_type & EVENT_FLEXIBLE)) {
list_for_each_entry(event, &ctx->flexible_groups, group_entry)
group_sched_out(event, cpuctx, ctx);
}
-out:
perf_pmu_enable(ctx->pmu);
- raw_spin_unlock(&ctx->lock);
}
/*
@@ -1929,8 +1953,10 @@ static void perf_event_context_sched_out(struct task_struct *task, int ctxn,
rcu_read_unlock();
if (do_switch) {
+ raw_spin_lock(&ctx->lock);
ctx_sched_out(ctx, cpuctx, EVENT_ALL);
cpuctx->task_ctx = NULL;
+ raw_spin_unlock(&ctx->lock);
}
}
@@ -1965,8 +1991,7 @@ void __perf_event_task_sched_out(struct task_struct *task,
perf_cgroup_sched_out(task);
}
-static void task_ctx_sched_out(struct perf_event_context *ctx,
- enum event_type_t event_type)
+static void task_ctx_sched_out(struct perf_event_context *ctx)
{
struct perf_cpu_context *cpuctx = __get_cpu_context(ctx);
@@ -1976,7 +2001,7 @@ static void task_ctx_sched_out(struct perf_event_context *ctx,
if (WARN_ON_ONCE(ctx != cpuctx->task_ctx))
return;
- ctx_sched_out(ctx, cpuctx, event_type);
+ ctx_sched_out(ctx, cpuctx, EVENT_ALL);
cpuctx->task_ctx = NULL;
}
@@ -2055,11 +2080,11 @@ ctx_sched_in(struct perf_event_context *ctx,
struct task_struct *task)
{
u64 now;
+ int is_active = ctx->is_active;
- raw_spin_lock(&ctx->lock);
- ctx->is_active = 1;
+ ctx->is_active |= event_type;
if (likely(!ctx->nr_events))
- goto out;
+ return;
now = perf_clock();
ctx->timestamp = now;
@@ -2068,15 +2093,12 @@ ctx_sched_in(struct perf_event_context *ctx,
* First go through the list and put on any pinned groups
* in order to give them the best chance of going on.
*/
- if (event_type & EVENT_PINNED)
+ if (!(is_active & EVENT_PINNED) && (event_type & EVENT_PINNED))
ctx_pinned_sched_in(ctx, cpuctx);
/* Then walk through the lower prio flexible groups */
- if (event_type & EVENT_FLEXIBLE)
+ if (!(is_active & EVENT_FLEXIBLE) && (event_type & EVENT_FLEXIBLE))
ctx_flexible_sched_in(ctx, cpuctx);
-
-out:
- raw_spin_unlock(&ctx->lock);
}
static void cpu_ctx_sched_in(struct perf_cpu_context *cpuctx,
@@ -2088,19 +2110,6 @@ static void cpu_ctx_sched_in(struct perf_cpu_context *cpuctx,
ctx_sched_in(ctx, cpuctx, event_type, task);
}
-static void task_ctx_sched_in(struct perf_event_context *ctx,
- enum event_type_t event_type)
-{
- struct perf_cpu_context *cpuctx;
-
- cpuctx = __get_cpu_context(ctx);
- if (cpuctx->task_ctx == ctx)
- return;
-
- ctx_sched_in(ctx, cpuctx, event_type, NULL);
- cpuctx->task_ctx = ctx;
-}
-
static void perf_event_context_sched_in(struct perf_event_context *ctx,
struct task_struct *task)
{
@@ -2110,6 +2119,7 @@ static void perf_event_context_sched_in(struct perf_event_context *ctx,
if (cpuctx->task_ctx == ctx)
return;
+ perf_ctx_lock(cpuctx, ctx);
perf_pmu_disable(ctx->pmu);
/*
* We want to keep the following priority order:
@@ -2118,18 +2128,18 @@ static void perf_event_context_sched_in(struct perf_event_context *ctx,
*/
cpu_ctx_sched_out(cpuctx, EVENT_FLEXIBLE);
- ctx_sched_in(ctx, cpuctx, EVENT_PINNED, task);
- cpu_ctx_sched_in(cpuctx, EVENT_FLEXIBLE, task);
- ctx_sched_in(ctx, cpuctx, EVENT_FLEXIBLE, task);
+ perf_event_sched_in(cpuctx, ctx, task);
cpuctx->task_ctx = ctx;
+ perf_pmu_enable(ctx->pmu);
+ perf_ctx_unlock(cpuctx, ctx);
+
/*
* Since these rotations are per-cpu, we need to ensure the
* cpu-context we got scheduled on is actually rotating.
*/
perf_pmu_rotate_start(ctx->pmu);
- perf_pmu_enable(ctx->pmu);
}
/*
@@ -2269,7 +2279,6 @@ static void perf_ctx_adjust_freq(struct perf_event_context *ctx, u64 period)
u64 interrupts, now;
s64 delta;
- raw_spin_lock(&ctx->lock);
list_for_each_entry_rcu(event, &ctx->event_list, event_entry) {
if (event->state != PERF_EVENT_STATE_ACTIVE)
continue;
@@ -2301,7 +2310,6 @@ static void perf_ctx_adjust_freq(struct perf_event_context *ctx, u64 period)
if (delta > 0)
perf_adjust_period(event, period, delta);
}
- raw_spin_unlock(&ctx->lock);
}
/*
@@ -2309,16 +2317,12 @@ static void perf_ctx_adjust_freq(struct perf_event_context *ctx, u64 period)
*/
static void rotate_ctx(struct perf_event_context *ctx)
{
- raw_spin_lock(&ctx->lock);
-
/*
* Rotate the first entry last of non-pinned groups. Rotation might be
* disabled by the inheritance code.
*/
if (!ctx->rotate_disable)
list_rotate_left(&ctx->flexible_groups);
-
- raw_spin_unlock(&ctx->lock);
}
/*
@@ -2345,6 +2349,7 @@ static void perf_rotate_context(struct perf_cpu_context *cpuctx)
rotate = 1;
}
+ perf_ctx_lock(cpuctx, cpuctx->task_ctx);
perf_pmu_disable(cpuctx->ctx.pmu);
perf_ctx_adjust_freq(&cpuctx->ctx, interval);
if (ctx)
@@ -2355,21 +2360,20 @@ static void perf_rotate_context(struct perf_cpu_context *cpuctx)
cpu_ctx_sched_out(cpuctx, EVENT_FLEXIBLE);
if (ctx)
- task_ctx_sched_out(ctx, EVENT_FLEXIBLE);
+ ctx_sched_out(ctx, cpuctx, EVENT_FLEXIBLE);
rotate_ctx(&cpuctx->ctx);
if (ctx)
rotate_ctx(ctx);
- cpu_ctx_sched_in(cpuctx, EVENT_FLEXIBLE, current);
- if (ctx)
- task_ctx_sched_in(ctx, EVENT_FLEXIBLE);
+ perf_event_sched_in(cpuctx, ctx, current);
done:
if (remove)
list_del_init(&cpuctx->rotation_list);
perf_pmu_enable(cpuctx->ctx.pmu);
+ perf_ctx_unlock(cpuctx, cpuctx->task_ctx);
}
void perf_event_task_tick(void)
@@ -2424,9 +2428,9 @@ static void perf_event_enable_on_exec(struct perf_event_context *ctx)
* in.
*/
perf_cgroup_sched_out(current);
- task_ctx_sched_out(ctx, EVENT_ALL);
raw_spin_lock(&ctx->lock);
+ task_ctx_sched_out(ctx);
list_for_each_entry(event, &ctx->pinned_groups, group_entry) {
ret = event_enable_on_exec(event, ctx);
@@ -2835,16 +2839,12 @@ retry:
unclone_ctx(ctx);
++ctx->pin_count;
raw_spin_unlock_irqrestore(&ctx->lock, flags);
- }
-
- if (!ctx) {
+ } else {
ctx = alloc_perf_context(pmu, task);
err = -ENOMEM;
if (!ctx)
goto errout;
- get_ctx(ctx);
-
err = 0;
mutex_lock(&task->perf_event_mutex);
/*
@@ -2856,14 +2856,14 @@ retry:
else if (task->perf_event_ctxp[ctxn])
err = -EAGAIN;
else {
+ get_ctx(ctx);
++ctx->pin_count;
rcu_assign_pointer(task->perf_event_ctxp[ctxn], ctx);
}
mutex_unlock(&task->perf_event_mutex);
if (unlikely(err)) {
- put_task_struct(task);
- kfree(ctx);
+ put_ctx(ctx);
if (err == -EAGAIN)
goto retry;
@@ -2890,7 +2890,7 @@ static void free_event_rcu(struct rcu_head *head)
kfree(event);
}
-static void perf_buffer_put(struct perf_buffer *buffer);
+static void ring_buffer_put(struct ring_buffer *rb);
static void free_event(struct perf_event *event)
{
@@ -2913,9 +2913,9 @@ static void free_event(struct perf_event *event)
}
}
- if (event->buffer) {
- perf_buffer_put(event->buffer);
- event->buffer = NULL;
+ if (event->rb) {
+ ring_buffer_put(event->rb);
+ event->rb = NULL;
}
if (is_cgroup_event(event))
@@ -2934,12 +2934,6 @@ int perf_event_release_kernel(struct perf_event *event)
{
struct perf_event_context *ctx = event->ctx;
- /*
- * Remove from the PMU, can't get re-enabled since we got
- * here because the last ref went.
- */
- perf_event_disable(event);
-
WARN_ON_ONCE(ctx->parent_ctx);
/*
* There are two ways this annotation is useful:
@@ -2956,8 +2950,8 @@ int perf_event_release_kernel(struct perf_event *event)
mutex_lock_nested(&ctx->mutex, SINGLE_DEPTH_NESTING);
raw_spin_lock_irq(&ctx->lock);
perf_group_detach(event);
- list_del_event(event, ctx);
raw_spin_unlock_irq(&ctx->lock);
+ perf_remove_from_context(event);
mutex_unlock(&ctx->mutex);
free_event(event);
@@ -3149,13 +3143,13 @@ perf_read(struct file *file, char __user *buf, size_t count, loff_t *ppos)
static unsigned int perf_poll(struct file *file, poll_table *wait)
{
struct perf_event *event = file->private_data;
- struct perf_buffer *buffer;
+ struct ring_buffer *rb;
unsigned int events = POLL_HUP;
rcu_read_lock();
- buffer = rcu_dereference(event->buffer);
- if (buffer)
- events = atomic_xchg(&buffer->poll, 0);
+ rb = rcu_dereference(event->rb);
+ if (rb)
+ events = atomic_xchg(&rb->poll, 0);
rcu_read_unlock();
poll_wait(file, &event->waitq, wait);
@@ -3358,6 +3352,18 @@ static int perf_event_index(struct perf_event *event)
return event->hw.idx + 1 - PERF_EVENT_INDEX_OFFSET;
}
+static void calc_timer_values(struct perf_event *event,
+ u64 *running,
+ u64 *enabled)
+{
+ u64 now, ctx_time;
+
+ now = perf_clock();
+ ctx_time = event->shadow_ctx_time + now;
+ *enabled = ctx_time - event->tstamp_enabled;
+ *running = ctx_time - event->tstamp_running;
+}
+
/*
* Callers need to ensure there can be no nesting of this function, otherwise
* the seqlock logic goes bad. We can not serialize this because the arch
@@ -3366,14 +3372,25 @@ static int perf_event_index(struct perf_event *event)
void perf_event_update_userpage(struct perf_event *event)
{
struct perf_event_mmap_page *userpg;
- struct perf_buffer *buffer;
+ struct ring_buffer *rb;
+ u64 enabled, running;
rcu_read_lock();
- buffer = rcu_dereference(event->buffer);
- if (!buffer)
+ /*
+ * compute total_time_enabled, total_time_running
+ * based on snapshot values taken when the event
+ * was last scheduled in.
+ *
+ * we cannot simply called update_context_time()
+ * because of locking issue as we can be called in
+ * NMI context
+ */
+ calc_timer_values(event, &enabled, &running);
+ rb = rcu_dereference(event->rb);
+ if (!rb)
goto unlock;
- userpg = buffer->user_page;
+ userpg = rb->user_page;
/*
* Disable preemption so as to not let the corresponding user-space
@@ -3387,10 +3404,10 @@ void perf_event_update_userpage(struct perf_event *event)
if (event->state == PERF_EVENT_STATE_ACTIVE)
userpg->offset -= local64_read(&event->hw.prev_count);
- userpg->time_enabled = event->total_time_enabled +
+ userpg->time_enabled = enabled +
atomic64_read(&event->child_total_time_enabled);
- userpg->time_running = event->total_time_running +
+ userpg->time_running = running +
atomic64_read(&event->child_total_time_running);
barrier();
@@ -3400,220 +3417,10 @@ unlock:
rcu_read_unlock();
}
-static unsigned long perf_data_size(struct perf_buffer *buffer);
-
-static void
-perf_buffer_init(struct perf_buffer *buffer, long watermark, int flags)
-{
- long max_size = perf_data_size(buffer);
-
- if (watermark)
- buffer->watermark = min(max_size, watermark);
-
- if (!buffer->watermark)
- buffer->watermark = max_size / 2;
-
- if (flags & PERF_BUFFER_WRITABLE)
- buffer->writable = 1;
-
- atomic_set(&buffer->refcount, 1);
-}
-
-#ifndef CONFIG_PERF_USE_VMALLOC
-
-/*
- * Back perf_mmap() with regular GFP_KERNEL-0 pages.
- */
-
-static struct page *
-perf_mmap_to_page(struct perf_buffer *buffer, unsigned long pgoff)
-{
- if (pgoff > buffer->nr_pages)
- return NULL;
-
- if (pgoff == 0)
- return virt_to_page(buffer->user_page);
-
- return virt_to_page(buffer->data_pages[pgoff - 1]);
-}
-
-static void *perf_mmap_alloc_page(int cpu)
-{
- struct page *page;
- int node;
-
- node = (cpu == -1) ? cpu : cpu_to_node(cpu);
- page = alloc_pages_node(node, GFP_KERNEL | __GFP_ZERO, 0);
- if (!page)
- return NULL;
-
- return page_address(page);
-}
-
-static struct perf_buffer *
-perf_buffer_alloc(int nr_pages, long watermark, int cpu, int flags)
-{
- struct perf_buffer *buffer;
- unsigned long size;
- int i;
-
- size = sizeof(struct perf_buffer);
- size += nr_pages * sizeof(void *);
-
- buffer = kzalloc(size, GFP_KERNEL);
- if (!buffer)
- goto fail;
-
- buffer->user_page = perf_mmap_alloc_page(cpu);
- if (!buffer->user_page)
- goto fail_user_page;
-
- for (i = 0; i < nr_pages; i++) {
- buffer->data_pages[i] = perf_mmap_alloc_page(cpu);
- if (!buffer->data_pages[i])
- goto fail_data_pages;
- }
-
- buffer->nr_pages = nr_pages;
-
- perf_buffer_init(buffer, watermark, flags);
-
- return buffer;
-
-fail_data_pages:
- for (i--; i >= 0; i--)
- free_page((unsigned long)buffer->data_pages[i]);
-
- free_page((unsigned long)buffer->user_page);
-
-fail_user_page:
- kfree(buffer);
-
-fail:
- return NULL;
-}
-
-static void perf_mmap_free_page(unsigned long addr)
-{
- struct page *page = virt_to_page((void *)addr);
-
- page->mapping = NULL;
- __free_page(page);
-}
-
-static void perf_buffer_free(struct perf_buffer *buffer)
-{
- int i;
-
- perf_mmap_free_page((unsigned long)buffer->user_page);
- for (i = 0; i < buffer->nr_pages; i++)
- perf_mmap_free_page((unsigned long)buffer->data_pages[i]);
- kfree(buffer);
-}
-
-static inline int page_order(struct perf_buffer *buffer)
-{
- return 0;
-}
-
-#else
-
-/*
- * Back perf_mmap() with vmalloc memory.
- *
- * Required for architectures that have d-cache aliasing issues.
- */
-
-static inline int page_order(struct perf_buffer *buffer)
-{
- return buffer->page_order;
-}
-
-static struct page *
-perf_mmap_to_page(struct perf_buffer *buffer, unsigned long pgoff)
-{
- if (pgoff > (1UL << page_order(buffer)))
- return NULL;
-
- return vmalloc_to_page((void *)buffer->user_page + pgoff * PAGE_SIZE);
-}
-
-static void perf_mmap_unmark_page(void *addr)
-{
- struct page *page = vmalloc_to_page(addr);
-
- page->mapping = NULL;
-}
-
-static void perf_buffer_free_work(struct work_struct *work)
-{
- struct perf_buffer *buffer;
- void *base;
- int i, nr;
-
- buffer = container_of(work, struct perf_buffer, work);
- nr = 1 << page_order(buffer);
-
- base = buffer->user_page;
- for (i = 0; i < nr + 1; i++)
- perf_mmap_unmark_page(base + (i * PAGE_SIZE));
-
- vfree(base);
- kfree(buffer);
-}
-
-static void perf_buffer_free(struct perf_buffer *buffer)
-{
- schedule_work(&buffer->work);
-}
-
-static struct perf_buffer *
-perf_buffer_alloc(int nr_pages, long watermark, int cpu, int flags)
-{
- struct perf_buffer *buffer;
- unsigned long size;
- void *all_buf;
-
- size = sizeof(struct perf_buffer);
- size += sizeof(void *);
-
- buffer = kzalloc(size, GFP_KERNEL);
- if (!buffer)
- goto fail;
-
- INIT_WORK(&buffer->work, perf_buffer_free_work);
-
- all_buf = vmalloc_user((nr_pages + 1) * PAGE_SIZE);
- if (!all_buf)
- goto fail_all_buf;
-
- buffer->user_page = all_buf;
- buffer->data_pages[0] = all_buf + PAGE_SIZE;
- buffer->page_order = ilog2(nr_pages);
- buffer->nr_pages = 1;
-
- perf_buffer_init(buffer, watermark, flags);
-
- return buffer;
-
-fail_all_buf:
- kfree(buffer);
-
-fail:
- return NULL;
-}
-
-#endif
-
-static unsigned long perf_data_size(struct perf_buffer *buffer)
-{
- return buffer->nr_pages << (PAGE_SHIFT + page_order(buffer));
-}
-
static int perf_mmap_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
{
struct perf_event *event = vma->vm_file->private_data;
- struct perf_buffer *buffer;
+ struct ring_buffer *rb;
int ret = VM_FAULT_SIGBUS;
if (vmf->flags & FAULT_FLAG_MKWRITE) {
@@ -3623,14 +3430,14 @@ static int perf_mmap_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
}
rcu_read_lock();
- buffer = rcu_dereference(event->buffer);
- if (!buffer)
+ rb = rcu_dereference(event->rb);
+ if (!rb)
goto unlock;
if (vmf->pgoff && (vmf->flags & FAULT_FLAG_WRITE))
goto unlock;
- vmf->page = perf_mmap_to_page(buffer, vmf->pgoff);
+ vmf->page = perf_mmap_to_page(rb, vmf->pgoff);
if (!vmf->page)
goto unlock;
@@ -3645,35 +3452,35 @@ unlock:
return ret;
}
-static void perf_buffer_free_rcu(struct rcu_head *rcu_head)
+static void rb_free_rcu(struct rcu_head *rcu_head)
{
- struct perf_buffer *buffer;
+ struct ring_buffer *rb;
- buffer = container_of(rcu_head, struct perf_buffer, rcu_head);
- perf_buffer_free(buffer);
+ rb = container_of(rcu_head, struct ring_buffer, rcu_head);
+ rb_free(rb);
}
-static struct perf_buffer *perf_buffer_get(struct perf_event *event)
+static struct ring_buffer *ring_buffer_get(struct perf_event *event)
{
- struct perf_buffer *buffer;
+ struct ring_buffer *rb;
rcu_read_lock();
- buffer = rcu_dereference(event->buffer);
- if (buffer) {
- if (!atomic_inc_not_zero(&buffer->refcount))
- buffer = NULL;
+ rb = rcu_dereference(event->rb);
+ if (rb) {
+ if (!atomic_inc_not_zero(&rb->refcount))
+ rb = NULL;
}
rcu_read_unlock();
- return buffer;
+ return rb;
}
-static void perf_buffer_put(struct perf_buffer *buffer)
+static void ring_buffer_put(struct ring_buffer *rb)
{
- if (!atomic_dec_and_test(&buffer->refcount))
+ if (!atomic_dec_and_test(&rb->refcount))
return;
- call_rcu(&buffer->rcu_head, perf_buffer_free_rcu);
+ call_rcu(&rb->rcu_head, rb_free_rcu);
}
static void perf_mmap_open(struct vm_area_struct *vma)
@@ -3688,16 +3495,16 @@ static void perf_mmap_close(struct vm_area_struct *vma)
struct perf_event *event = vma->vm_file->private_data;
if (atomic_dec_and_mutex_lock(&event->mmap_count, &event->mmap_mutex)) {
- unsigned long size = perf_data_size(event->buffer);
+ unsigned long size = perf_data_size(event->rb);
struct user_struct *user = event->mmap_user;
- struct perf_buffer *buffer = event->buffer;
+ struct ring_buffer *rb = event->rb;
atomic_long_sub((size >> PAGE_SHIFT) + 1, &user->locked_vm);
vma->vm_mm->locked_vm -= event->mmap_locked;
- rcu_assign_pointer(event->buffer, NULL);
+ rcu_assign_pointer(event->rb, NULL);
mutex_unlock(&event->mmap_mutex);
- perf_buffer_put(buffer);
+ ring_buffer_put(rb);
free_uid(user);
}
}
@@ -3715,7 +3522,7 @@ static int perf_mmap(struct file *file, struct vm_area_struct *vma)
unsigned long user_locked, user_lock_limit;
struct user_struct *user = current_user();
unsigned long locked, lock_limit;
- struct perf_buffer *buffer;
+ struct ring_buffer *rb;
unsigned long vma_size;
unsigned long nr_pages;
long user_extra, extra;
@@ -3724,7 +3531,7 @@ static int perf_mmap(struct file *file, struct vm_area_struct *vma)
/*
* Don't allow mmap() of inherited per-task counters. This would
* create a performance issue due to all children writing to the
- * same buffer.
+ * same rb.
*/
if (event->cpu == -1 && event->attr.inherit)
return -EINVAL;
@@ -3736,7 +3543,7 @@ static int perf_mmap(struct file *file, struct vm_area_struct *vma)
nr_pages = (vma_size / PAGE_SIZE) - 1;
/*
- * If we have buffer pages ensure they're a power-of-two number, so we
+ * If we have rb pages ensure they're a power-of-two number, so we
* can do bitmasks instead of modulo.
*/
if (nr_pages != 0 && !is_power_of_2(nr_pages))
@@ -3750,9 +3557,9 @@ static int perf_mmap(struct file *file, struct vm_area_struct *vma)
WARN_ON_ONCE(event->ctx->parent_ctx);
mutex_lock(&event->mmap_mutex);
- if (event->buffer) {
- if (event->buffer->nr_pages == nr_pages)
- atomic_inc(&event->buffer->refcount);
+ if (event->rb) {
+ if (event->rb->nr_pages == nr_pages)
+ atomic_inc(&event->rb->refcount);
else
ret = -EINVAL;
goto unlock;
@@ -3782,18 +3589,20 @@ static int perf_mmap(struct file *file, struct vm_area_struct *vma)
goto unlock;
}
- WARN_ON(event->buffer);
+ WARN_ON(event->rb);
if (vma->vm_flags & VM_WRITE)
- flags |= PERF_BUFFER_WRITABLE;
+ flags |= RING_BUFFER_WRITABLE;
- buffer = perf_buffer_alloc(nr_pages, event->attr.wakeup_watermark,
- event->cpu, flags);
- if (!buffer) {
+ rb = rb_alloc(nr_pages,
+ event->attr.watermark ? event->attr.wakeup_watermark : 0,
+ event->cpu, flags);
+
+ if (!rb) {
ret = -ENOMEM;
goto unlock;
}
- rcu_assign_pointer(event->buffer, buffer);
+ rcu_assign_pointer(event->rb, rb);
atomic_long_add(user_extra, &user->locked_vm);
event->mmap_locked = extra;
@@ -3892,117 +3701,6 @@ int perf_unregister_guest_info_callbacks(struct perf_guest_info_callbacks *cbs)
}
EXPORT_SYMBOL_GPL(perf_unregister_guest_info_callbacks);
-/*
- * Output
- */
-static bool perf_output_space(struct perf_buffer *buffer, unsigned long tail,
- unsigned long offset, unsigned long head)
-{
- unsigned long mask;
-
- if (!buffer->writable)
- return true;
-
- mask = perf_data_size(buffer) - 1;
-
- offset = (offset - tail) & mask;
- head = (head - tail) & mask;
-
- if ((int)(head - offset) < 0)
- return false;
-
- return true;
-}
-
-static void perf_output_wakeup(struct perf_output_handle *handle)
-{
- atomic_set(&handle->buffer->poll, POLL_IN);
-
- if (handle->nmi) {
- handle->event->pending_wakeup = 1;
- irq_work_queue(&handle->event->pending);
- } else
- perf_event_wakeup(handle->event);
-}
-
-/*
- * We need to ensure a later event_id doesn't publish a head when a former
- * event isn't done writing. However since we need to deal with NMIs we
- * cannot fully serialize things.
- *
- * We only publish the head (and generate a wakeup) when the outer-most
- * event completes.
- */
-static void perf_output_get_handle(struct perf_output_handle *handle)
-{
- struct perf_buffer *buffer = handle->buffer;
-
- preempt_disable();
- local_inc(&buffer->nest);
- handle->wakeup = local_read(&buffer->wakeup);
-}
-
-static void perf_output_put_handle(struct perf_output_handle *handle)
-{
- struct perf_buffer *buffer = handle->buffer;
- unsigned long head;
-
-again:
- head = local_read(&buffer->head);
-
- /*
- * IRQ/NMI can happen here, which means we can miss a head update.
- */
-
- if (!local_dec_and_test(&buffer->nest))
- goto out;
-
- /*
- * Publish the known good head. Rely on the full barrier implied
- * by atomic_dec_and_test() order the buffer->head read and this
- * write.
- */
- buffer->user_page->data_head = head;
-
- /*
- * Now check if we missed an update, rely on the (compiler)
- * barrier in atomic_dec_and_test() to re-read buffer->head.
- */
- if (unlikely(head != local_read(&buffer->head))) {
- local_inc(&buffer->nest);
- goto again;
- }
-
- if (handle->wakeup != local_read(&buffer->wakeup))
- perf_output_wakeup(handle);
-
-out:
- preempt_enable();
-}
-
-__always_inline void perf_output_copy(struct perf_output_handle *handle,
- const void *buf, unsigned int len)
-{
- do {
- unsigned long size = min_t(unsigned long, handle->size, len);
-
- memcpy(handle->addr, buf, size);
-
- len -= size;
- handle->addr += size;
- buf += size;
- handle->size -= size;
- if (!handle->size) {
- struct perf_buffer *buffer = handle->buffer;
-
- handle->page++;
- handle->page &= buffer->nr_pages - 1;
- handle->addr = buffer->data_pages[handle->page];
- handle->size = PAGE_SIZE << page_order(buffer);
- }
- } while (len);
-}
-
static void __perf_event_header__init_id(struct perf_event_header *header,
struct perf_sample_data *data,
struct perf_event *event)
@@ -4033,9 +3731,9 @@ static void __perf_event_header__init_id(struct perf_event_header *header,
}
}
-static void perf_event_header__init_id(struct perf_event_header *header,
- struct perf_sample_data *data,
- struct perf_event *event)
+void perf_event_header__init_id(struct perf_event_header *header,
+ struct perf_sample_data *data,
+ struct perf_event *event)
{
if (event->attr.sample_id_all)
__perf_event_header__init_id(header, data, event);
@@ -4062,121 +3760,14 @@ static void __perf_event__output_id_sample(struct perf_output_handle *handle,
perf_output_put(handle, data->cpu_entry);
}
-static void perf_event__output_id_sample(struct perf_event *event,
- struct perf_output_handle *handle,
- struct perf_sample_data *sample)
+void perf_event__output_id_sample(struct perf_event *event,
+ struct perf_output_handle *handle,
+ struct perf_sample_data *sample)
{
if (event->attr.sample_id_all)
__perf_event__output_id_sample(handle, sample);
}
-int perf_output_begin(struct perf_output_handle *handle,
- struct perf_event *event, unsigned int size,
- int nmi, int sample)
-{
- struct perf_buffer *buffer;
- unsigned long tail, offset, head;
- int have_lost;
- struct perf_sample_data sample_data;
- struct {
- struct perf_event_header header;
- u64 id;
- u64 lost;
- } lost_event;
-
- rcu_read_lock();
- /*
- * For inherited events we send all the output towards the parent.
- */
- if (event->parent)
- event = event->parent;
-
- buffer = rcu_dereference(event->buffer);
- if (!buffer)
- goto out;
-
- handle->buffer = buffer;
- handle->event = event;
- handle->nmi = nmi;
- handle->sample = sample;
-
- if (!buffer->nr_pages)
- goto out;
-
- have_lost = local_read(&buffer->lost);
- if (have_lost) {
- lost_event.header.size = sizeof(lost_event);
- perf_event_header__init_id(&lost_event.header, &sample_data,
- event);
- size += lost_event.header.size;
- }
-
- perf_output_get_handle(handle);
-
- do {
- /*
- * Userspace could choose to issue a mb() before updating the
- * tail pointer. So that all reads will be completed before the
- * write is issued.
- */
- tail = ACCESS_ONCE(buffer->user_page->data_tail);
- smp_rmb();
- offset = head = local_read(&buffer->head);
- head += size;
- if (unlikely(!perf_output_space(buffer, tail, offset, head)))
- goto fail;
- } while (local_cmpxchg(&buffer->head, offset, head) != offset);
-
- if (head - local_read(&buffer->wakeup) > buffer->watermark)
- local_add(buffer->watermark, &buffer->wakeup);
-
- handle->page = offset >> (PAGE_SHIFT + page_order(buffer));
- handle->page &= buffer->nr_pages - 1;
- handle->size = offset & ((PAGE_SIZE << page_order(buffer)) - 1);
- handle->addr = buffer->data_pages[handle->page];
- handle->addr += handle->size;
- handle->size = (PAGE_SIZE << page_order(buffer)) - handle->size;
-
- if (have_lost) {
- lost_event.header.type = PERF_RECORD_LOST;
- lost_event.header.misc = 0;
- lost_event.id = event->id;
- lost_event.lost = local_xchg(&buffer->lost, 0);
-
- perf_output_put(handle, lost_event);
- perf_event__output_id_sample(event, handle, &sample_data);
- }
-
- return 0;
-
-fail:
- local_inc(&buffer->lost);
- perf_output_put_handle(handle);
-out:
- rcu_read_unlock();
-
- return -ENOSPC;
-}
-
-void perf_output_end(struct perf_output_handle *handle)
-{
- struct perf_event *event = handle->event;
- struct perf_buffer *buffer = handle->buffer;
-
- int wakeup_events = event->attr.wakeup_events;
-
- if (handle->sample && wakeup_events) {
- int events = local_inc_return(&buffer->events);
- if (events >= wakeup_events) {
- local_sub(wakeup_events, &buffer->events);
- local_inc(&buffer->wakeup);
- }
- }
-
- perf_output_put_handle(handle);
- rcu_read_unlock();
-}
-
static void perf_output_read_one(struct perf_output_handle *handle,
struct perf_event *event,
u64 enabled, u64 running)
@@ -4197,7 +3788,7 @@ static void perf_output_read_one(struct perf_output_handle *handle,
if (read_format & PERF_FORMAT_ID)
values[n++] = primary_event_id(event);
- perf_output_copy(handle, values, n * sizeof(u64));
+ __output_copy(handle, values, n * sizeof(u64));
}
/*
@@ -4227,7 +3818,7 @@ static void perf_output_read_group(struct perf_output_handle *handle,
if (read_format & PERF_FORMAT_ID)
values[n++] = primary_event_id(leader);
- perf_output_copy(handle, values, n * sizeof(u64));
+ __output_copy(handle, values, n * sizeof(u64));
list_for_each_entry(sub, &leader->sibling_list, group_entry) {
n = 0;
@@ -4239,7 +3830,7 @@ static void perf_output_read_group(struct perf_output_handle *handle,
if (read_format & PERF_FORMAT_ID)
values[n++] = primary_event_id(sub);
- perf_output_copy(handle, values, n * sizeof(u64));
+ __output_copy(handle, values, n * sizeof(u64));
}
}
@@ -4249,7 +3840,7 @@ static void perf_output_read_group(struct perf_output_handle *handle,
static void perf_output_read(struct perf_output_handle *handle,
struct perf_event *event)
{
- u64 enabled = 0, running = 0, now, ctx_time;
+ u64 enabled = 0, running = 0;
u64 read_format = event->attr.read_format;
/*
@@ -4261,12 +3852,8 @@ static void perf_output_read(struct perf_output_handle *handle,
* because of locking issue as we are called in
* NMI context
*/
- if (read_format & PERF_FORMAT_TOTAL_TIMES) {
- now = perf_clock();
- ctx_time = event->shadow_ctx_time + now;
- enabled = ctx_time - event->tstamp_enabled;
- running = ctx_time - event->tstamp_running;
- }
+ if (read_format & PERF_FORMAT_TOTAL_TIMES)
+ calc_timer_values(event, &enabled, &running);
if (event->attr.read_format & PERF_FORMAT_GROUP)
perf_output_read_group(handle, event, enabled, running);
@@ -4319,7 +3906,7 @@ void perf_output_sample(struct perf_output_handle *handle,
size *= sizeof(u64);
- perf_output_copy(handle, data->callchain, size);
+ __output_copy(handle, data->callchain, size);
} else {
u64 nr = 0;
perf_output_put(handle, nr);
@@ -4329,8 +3916,8 @@ void perf_output_sample(struct perf_output_handle *handle,
if (sample_type & PERF_SAMPLE_RAW) {
if (data->raw) {
perf_output_put(handle, data->raw->size);
- perf_output_copy(handle, data->raw->data,
- data->raw->size);
+ __output_copy(handle, data->raw->data,
+ data->raw->size);
} else {
struct {
u32 size;
@@ -4342,6 +3929,20 @@ void perf_output_sample(struct perf_output_handle *handle,
perf_output_put(handle, raw);
}
}
+
+ if (!event->attr.watermark) {
+ int wakeup_events = event->attr.wakeup_events;
+
+ if (wakeup_events) {
+ struct ring_buffer *rb = handle->rb;
+ int events = local_inc_return(&rb->events);
+
+ if (events >= wakeup_events) {
+ local_sub(wakeup_events, &rb->events);
+ local_inc(&rb->wakeup);
+ }
+ }
+ }
}
void perf_prepare_sample(struct perf_event_header *header,
@@ -4386,7 +3987,7 @@ void perf_prepare_sample(struct perf_event_header *header,
}
}
-static void perf_event_output(struct perf_event *event, int nmi,
+static void perf_event_output(struct perf_event *event,
struct perf_sample_data *data,
struct pt_regs *regs)
{
@@ -4398,7 +3999,7 @@ static void perf_event_output(struct perf_event *event, int nmi,
perf_prepare_sample(&header, data, event, regs);
- if (perf_output_begin(&handle, event, header.size, nmi, 1))
+ if (perf_output_begin(&handle, event, header.size))
goto exit;
perf_output_sample(&handle, &header, data, event);
@@ -4438,7 +4039,7 @@ perf_event_read_event(struct perf_event *event,
int ret;
perf_event_header__init_id(&read_event.header, &sample, event);
- ret = perf_output_begin(&handle, event, read_event.header.size, 0, 0);
+ ret = perf_output_begin(&handle, event, read_event.header.size);
if (ret)
return;
@@ -4481,7 +4082,7 @@ static void perf_event_task_output(struct perf_event *event,
perf_event_header__init_id(&task_event->event_id.header, &sample, event);
ret = perf_output_begin(&handle, event,
- task_event->event_id.header.size, 0, 0);
+ task_event->event_id.header.size);
if (ret)
goto out;
@@ -4618,7 +4219,7 @@ static void perf_event_comm_output(struct perf_event *event,
perf_event_header__init_id(&comm_event->event_id.header, &sample, event);
ret = perf_output_begin(&handle, event,
- comm_event->event_id.header.size, 0, 0);
+ comm_event->event_id.header.size);
if (ret)
goto out;
@@ -4627,7 +4228,7 @@ static void perf_event_comm_output(struct perf_event *event,
comm_event->event_id.tid = perf_event_tid(event, comm_event->task);
perf_output_put(&handle, comm_event->event_id);
- perf_output_copy(&handle, comm_event->comm,
+ __output_copy(&handle, comm_event->comm,
comm_event->comm_size);
perf_event__output_id_sample(event, &handle, &sample);
@@ -4765,7 +4366,7 @@ static void perf_event_mmap_output(struct perf_event *event,
perf_event_header__init_id(&mmap_event->event_id.header, &sample, event);
ret = perf_output_begin(&handle, event,
- mmap_event->event_id.header.size, 0, 0);
+ mmap_event->event_id.header.size);
if (ret)
goto out;
@@ -4773,7 +4374,7 @@ static void perf_event_mmap_output(struct perf_event *event,
mmap_event->event_id.tid = perf_event_tid(event, current);
perf_output_put(&handle, mmap_event->event_id);
- perf_output_copy(&handle, mmap_event->file_name,
+ __output_copy(&handle, mmap_event->file_name,
mmap_event->file_size);
perf_event__output_id_sample(event, &handle, &sample);
@@ -4829,7 +4430,7 @@ static void perf_event_mmap_event(struct perf_mmap_event *mmap_event)
if (file) {
/*
- * d_path works from the end of the buffer backwards, so we
+ * d_path works from the end of the rb backwards, so we
* need to add enough zero bytes after the string to handle
* the 64bit alignment we do later.
*/
@@ -4960,7 +4561,7 @@ static void perf_log_throttle(struct perf_event *event, int enable)
perf_event_header__init_id(&throttle_event.header, &sample, event);
ret = perf_output_begin(&handle, event,
- throttle_event.header.size, 1, 0);
+ throttle_event.header.size);
if (ret)
return;
@@ -4973,7 +4574,7 @@ static void perf_log_throttle(struct perf_event *event, int enable)
* Generic event overflow handling, sampling.
*/
-static int __perf_event_overflow(struct perf_event *event, int nmi,
+static int __perf_event_overflow(struct perf_event *event,
int throttle, struct perf_sample_data *data,
struct pt_regs *regs)
{
@@ -5016,34 +4617,28 @@ static int __perf_event_overflow(struct perf_event *event, int nmi,
if (events && atomic_dec_and_test(&event->event_limit)) {
ret = 1;
event->pending_kill = POLL_HUP;
- if (nmi) {
- event->pending_disable = 1;
- irq_work_queue(&event->pending);
- } else
- perf_event_disable(event);
+ event->pending_disable = 1;
+ irq_work_queue(&event->pending);
}
if (event->overflow_handler)
- event->overflow_handler(event, nmi, data, regs);
+ event->overflow_handler(event, data, regs);
else
- perf_event_output(event, nmi, data, regs);
+ perf_event_output(event, data, regs);
if (event->fasync && event->pending_kill) {
- if (nmi) {
- event->pending_wakeup = 1;
- irq_work_queue(&event->pending);
- } else
- perf_event_wakeup(event);
+ event->pending_wakeup = 1;
+ irq_work_queue(&event->pending);
}
return ret;
}
-int perf_event_overflow(struct perf_event *event, int nmi,
+int perf_event_overflow(struct perf_event *event,
struct perf_sample_data *data,
struct pt_regs *regs)
{
- return __perf_event_overflow(event, nmi, 1, data, regs);
+ return __perf_event_overflow(event, 1, data, regs);
}
/*
@@ -5092,7 +4687,7 @@ again:
}
static void perf_swevent_overflow(struct perf_event *event, u64 overflow,
- int nmi, struct perf_sample_data *data,
+ struct perf_sample_data *data,
struct pt_regs *regs)
{
struct hw_perf_event *hwc = &event->hw;
@@ -5106,7 +4701,7 @@ static void perf_swevent_overflow(struct perf_event *event, u64 overflow,
return;
for (; overflow; overflow--) {
- if (__perf_event_overflow(event, nmi, throttle,
+ if (__perf_event_overflow(event, throttle,
data, regs)) {
/*
* We inhibit the overflow from happening when
@@ -5119,7 +4714,7 @@ static void perf_swevent_overflow(struct perf_event *event, u64 overflow,
}
static void perf_swevent_event(struct perf_event *event, u64 nr,
- int nmi, struct perf_sample_data *data,
+ struct perf_sample_data *data,
struct pt_regs *regs)
{
struct hw_perf_event *hwc = &event->hw;
@@ -5133,12 +4728,12 @@ static void perf_swevent_event(struct perf_event *event, u64 nr,
return;
if (nr == 1 && hwc->sample_period == 1 && !event->attr.freq)
- return perf_swevent_overflow(event, 1, nmi, data, regs);
+ return perf_swevent_overflow(event, 1, data, regs);
if (local64_add_negative(nr, &hwc->period_left))
return;
- perf_swevent_overflow(event, 0, nmi, data, regs);
+ perf_swevent_overflow(event, 0, data, regs);
}
static int perf_exclude_event(struct perf_event *event,
@@ -5226,7 +4821,7 @@ find_swevent_head(struct swevent_htable *swhash, struct perf_event *event)
}
static void do_perf_sw_event(enum perf_type_id type, u32 event_id,
- u64 nr, int nmi,
+ u64 nr,
struct perf_sample_data *data,
struct pt_regs *regs)
{
@@ -5242,7 +4837,7 @@ static void do_perf_sw_event(enum perf_type_id type, u32 event_id,
hlist_for_each_entry_rcu(event, node, head, hlist_entry) {
if (perf_swevent_match(event, type, event_id, data, regs))
- perf_swevent_event(event, nr, nmi, data, regs);
+ perf_swevent_event(event, nr, data, regs);
}
end:
rcu_read_unlock();
@@ -5263,8 +4858,7 @@ inline void perf_swevent_put_recursion_context(int rctx)
put_recursion_context(swhash->recursion, rctx);
}
-void __perf_sw_event(u32 event_id, u64 nr, int nmi,
- struct pt_regs *regs, u64 addr)
+void __perf_sw_event(u32 event_id, u64 nr, struct pt_regs *regs, u64 addr)
{
struct perf_sample_data data;
int rctx;
@@ -5276,7 +4870,7 @@ void __perf_sw_event(u32 event_id, u64 nr, int nmi,
perf_sample_data_init(&data, addr);
- do_perf_sw_event(PERF_TYPE_SOFTWARE, event_id, nr, nmi, &data, regs);
+ do_perf_sw_event(PERF_TYPE_SOFTWARE, event_id, nr, &data, regs);
perf_swevent_put_recursion_context(rctx);
preempt_enable_notrace();
@@ -5524,7 +5118,7 @@ void perf_tp_event(u64 addr, u64 count, void *record, int entry_size,
hlist_for_each_entry_rcu(event, node, head, hlist_entry) {
if (perf_tp_event_match(event, &data, regs))
- perf_swevent_event(event, count, 1, &data, regs);
+ perf_swevent_event(event, count, &data, regs);
}
perf_swevent_put_recursion_context(rctx);
@@ -5617,7 +5211,7 @@ void perf_bp_event(struct perf_event *bp, void *data)
perf_sample_data_init(&sample, bp->attr.bp_addr);
if (!bp->hw.state && !perf_exclude_event(bp, regs))
- perf_swevent_event(bp, 1, 1, &sample, regs);
+ perf_swevent_event(bp, 1, &sample, regs);
}
#endif
@@ -5646,7 +5240,7 @@ static enum hrtimer_restart perf_swevent_hrtimer(struct hrtimer *hrtimer)
if (regs && !perf_exclude_event(event, regs)) {
if (!(event->attr.exclude_idle && current->pid == 0))
- if (perf_event_overflow(event, 0, &data, regs))
+ if (perf_event_overflow(event, &data, regs))
ret = HRTIMER_NORESTART;
}
@@ -5986,6 +5580,7 @@ free_dev:
}
static struct lock_class_key cpuctx_mutex;
+static struct lock_class_key cpuctx_lock;
int perf_pmu_register(struct pmu *pmu, char *name, int type)
{
@@ -6036,6 +5631,7 @@ skip_type:
cpuctx = per_cpu_ptr(pmu->pmu_cpu_context, cpu);
__perf_event_init_context(&cpuctx->ctx);
lockdep_set_class(&cpuctx->ctx.mutex, &cpuctx_mutex);
+ lockdep_set_class(&cpuctx->ctx.lock, &cpuctx_lock);
cpuctx->ctx.type = cpu_context;
cpuctx->ctx.pmu = pmu;
cpuctx->jiffies_interval = 1;
@@ -6150,7 +5746,8 @@ perf_event_alloc(struct perf_event_attr *attr, int cpu,
struct task_struct *task,
struct perf_event *group_leader,
struct perf_event *parent_event,
- perf_overflow_handler_t overflow_handler)
+ perf_overflow_handler_t overflow_handler,
+ void *context)
{
struct pmu *pmu;
struct perf_event *event;
@@ -6208,10 +5805,13 @@ perf_event_alloc(struct perf_event_attr *attr, int cpu,
#endif
}
- if (!overflow_handler && parent_event)
+ if (!overflow_handler && parent_event) {
overflow_handler = parent_event->overflow_handler;
+ context = parent_event->overflow_handler_context;
+ }
event->overflow_handler = overflow_handler;
+ event->overflow_handler_context = context;
if (attr->disabled)
event->state = PERF_EVENT_STATE_OFF;
@@ -6326,13 +5926,6 @@ static int perf_copy_attr(struct perf_event_attr __user *uattr,
if (ret)
return -EFAULT;
- /*
- * If the type exists, the corresponding creation will verify
- * the attr->config.
- */
- if (attr->type >= PERF_TYPE_MAX)
- return -EINVAL;
-
if (attr->__reserved_1)
return -EINVAL;
@@ -6354,7 +5947,7 @@ err_size:
static int
perf_event_set_output(struct perf_event *event, struct perf_event *output_event)
{
- struct perf_buffer *buffer = NULL, *old_buffer = NULL;
+ struct ring_buffer *rb = NULL, *old_rb = NULL;
int ret = -EINVAL;
if (!output_event)
@@ -6371,7 +5964,7 @@ perf_event_set_output(struct perf_event *event, struct perf_event *output_event)
goto out;
/*
- * If its not a per-cpu buffer, it must be the same task.
+ * If its not a per-cpu rb, it must be the same task.
*/
if (output_event->cpu == -1 && output_event->ctx != event->ctx)
goto out;
@@ -6383,20 +5976,20 @@ set:
goto unlock;
if (output_event) {
- /* get the buffer we want to redirect to */
- buffer = perf_buffer_get(output_event);
- if (!buffer)
+ /* get the rb we want to redirect to */
+ rb = ring_buffer_get(output_event);
+ if (!rb)
goto unlock;
}
- old_buffer = event->buffer;
- rcu_assign_pointer(event->buffer, buffer);
+ old_rb = event->rb;
+ rcu_assign_pointer(event->rb, rb);
ret = 0;
unlock:
mutex_unlock(&event->mmap_mutex);
- if (old_buffer)
- perf_buffer_put(old_buffer);
+ if (old_rb)
+ ring_buffer_put(old_rb);
out:
return ret;
}
@@ -6478,7 +6071,8 @@ SYSCALL_DEFINE5(perf_event_open,
}
}
- event = perf_event_alloc(&attr, cpu, task, group_leader, NULL, NULL);
+ event = perf_event_alloc(&attr, cpu, task, group_leader, NULL,
+ NULL, NULL);
if (IS_ERR(event)) {
err = PTR_ERR(event);
goto err_task;
@@ -6663,7 +6257,8 @@ err_fd:
struct perf_event *
perf_event_create_kernel_counter(struct perf_event_attr *attr, int cpu,
struct task_struct *task,
- perf_overflow_handler_t overflow_handler)
+ perf_overflow_handler_t overflow_handler,
+ void *context)
{
struct perf_event_context *ctx;
struct perf_event *event;
@@ -6673,7 +6268,8 @@ perf_event_create_kernel_counter(struct perf_event_attr *attr, int cpu,
* Get the target context (task or percpu):
*/
- event = perf_event_alloc(attr, cpu, task, NULL, NULL, overflow_handler);
+ event = perf_event_alloc(attr, cpu, task, NULL, NULL,
+ overflow_handler, context);
if (IS_ERR(event)) {
err = PTR_ERR(event);
goto err;
@@ -6780,7 +6376,6 @@ static void perf_event_exit_task_context(struct task_struct *child, int ctxn)
* our context.
*/
child_ctx = rcu_dereference_raw(child->perf_event_ctxp[ctxn]);
- task_ctx_sched_out(child_ctx, EVENT_ALL);
/*
* Take the context lock here so that if find_get_context is
@@ -6788,6 +6383,7 @@ static void perf_event_exit_task_context(struct task_struct *child, int ctxn)
* incremented the context's refcount before we do put_ctx below.
*/
raw_spin_lock(&child_ctx->lock);
+ task_ctx_sched_out(child_ctx);
child->perf_event_ctxp[ctxn] = NULL;
/*
* If this context is a clone; unclone it so it can't get
@@ -6957,7 +6553,7 @@ inherit_event(struct perf_event *parent_event,
parent_event->cpu,
child,
group_leader, parent_event,
- NULL);
+ NULL, NULL);
if (IS_ERR(child_event))
return child_event;
get_ctx(child_ctx);
@@ -6984,6 +6580,8 @@ inherit_event(struct perf_event *parent_event,
child_event->ctx = child_ctx;
child_event->overflow_handler = parent_event->overflow_handler;
+ child_event->overflow_handler_context
+ = parent_event->overflow_handler_context;
/*
* Precalculate sample_data sizes
diff --git a/kernel/events/hw_breakpoint.c b/kernel/events/hw_breakpoint.c
index 086adf25a55e..b7971d6f38bf 100644
--- a/kernel/events/hw_breakpoint.c
+++ b/kernel/events/hw_breakpoint.c
@@ -431,9 +431,11 @@ int register_perf_hw_breakpoint(struct perf_event *bp)
struct perf_event *
register_user_hw_breakpoint(struct perf_event_attr *attr,
perf_overflow_handler_t triggered,
+ void *context,
struct task_struct *tsk)
{
- return perf_event_create_kernel_counter(attr, -1, tsk, triggered);
+ return perf_event_create_kernel_counter(attr, -1, tsk, triggered,
+ context);
}
EXPORT_SYMBOL_GPL(register_user_hw_breakpoint);
@@ -502,7 +504,8 @@ EXPORT_SYMBOL_GPL(unregister_hw_breakpoint);
*/
struct perf_event * __percpu *
register_wide_hw_breakpoint(struct perf_event_attr *attr,
- perf_overflow_handler_t triggered)
+ perf_overflow_handler_t triggered,
+ void *context)
{
struct perf_event * __percpu *cpu_events, **pevent, *bp;
long err;
@@ -515,7 +518,8 @@ register_wide_hw_breakpoint(struct perf_event_attr *attr,
get_online_cpus();
for_each_online_cpu(cpu) {
pevent = per_cpu_ptr(cpu_events, cpu);
- bp = perf_event_create_kernel_counter(attr, cpu, NULL, triggered);
+ bp = perf_event_create_kernel_counter(attr, cpu, NULL,
+ triggered, context);
*pevent = bp;
diff --git a/kernel/events/internal.h b/kernel/events/internal.h
new file mode 100644
index 000000000000..09097dd8116c
--- /dev/null
+++ b/kernel/events/internal.h
@@ -0,0 +1,96 @@
+#ifndef _KERNEL_EVENTS_INTERNAL_H
+#define _KERNEL_EVENTS_INTERNAL_H
+
+#define RING_BUFFER_WRITABLE 0x01
+
+struct ring_buffer {
+ atomic_t refcount;
+ struct rcu_head rcu_head;
+#ifdef CONFIG_PERF_USE_VMALLOC
+ struct work_struct work;
+ int page_order; /* allocation order */
+#endif
+ int nr_pages; /* nr of data pages */
+ int writable; /* are we writable */
+
+ atomic_t poll; /* POLL_ for wakeups */
+
+ local_t head; /* write position */
+ local_t nest; /* nested writers */
+ local_t events; /* event limit */
+ local_t wakeup; /* wakeup stamp */
+ local_t lost; /* nr records lost */
+
+ long watermark; /* wakeup watermark */
+
+ struct perf_event_mmap_page *user_page;
+ void *data_pages[0];
+};
+
+extern void rb_free(struct ring_buffer *rb);
+extern struct ring_buffer *
+rb_alloc(int nr_pages, long watermark, int cpu, int flags);
+extern void perf_event_wakeup(struct perf_event *event);
+
+extern void
+perf_event_header__init_id(struct perf_event_header *header,
+ struct perf_sample_data *data,
+ struct perf_event *event);
+extern void
+perf_event__output_id_sample(struct perf_event *event,
+ struct perf_output_handle *handle,
+ struct perf_sample_data *sample);
+
+extern struct page *
+perf_mmap_to_page(struct ring_buffer *rb, unsigned long pgoff);
+
+#ifdef CONFIG_PERF_USE_VMALLOC
+/*
+ * Back perf_mmap() with vmalloc memory.
+ *
+ * Required for architectures that have d-cache aliasing issues.
+ */
+
+static inline int page_order(struct ring_buffer *rb)
+{
+ return rb->page_order;
+}
+
+#else
+
+static inline int page_order(struct ring_buffer *rb)
+{
+ return 0;
+}
+#endif
+
+static unsigned long perf_data_size(struct ring_buffer *rb)
+{
+ return rb->nr_pages << (PAGE_SHIFT + page_order(rb));
+}
+
+static inline void
+__output_copy(struct perf_output_handle *handle,
+ const void *buf, unsigned int len)
+{
+ do {
+ unsigned long size = min_t(unsigned long, handle->size, len);
+
+ memcpy(handle->addr, buf, size);
+
+ len -= size;
+ handle->addr += size;
+ buf += size;
+ handle->size -= size;
+ if (!handle->size) {
+ struct ring_buffer *rb = handle->rb;
+
+ handle->page++;
+ handle->page &= rb->nr_pages - 1;
+ handle->addr = rb->data_pages[handle->page];
+ handle->size = PAGE_SIZE << page_order(rb);
+ }
+ } while (len);
+}
+
+#endif /* _KERNEL_EVENTS_INTERNAL_H */
diff --git a/kernel/events/ring_buffer.c b/kernel/events/ring_buffer.c
new file mode 100644
index 000000000000..a2a29205cc0f
--- /dev/null
+++ b/kernel/events/ring_buffer.c
@@ -0,0 +1,380 @@
+/*
+ * Performance events ring-buffer code:
+ *
+ * Copyright (C) 2008 Thomas Gleixner <tglx@linutronix.de>
+ * Copyright (C) 2008-2011 Red Hat, Inc., Ingo Molnar
+ * Copyright (C) 2008-2011 Red Hat, Inc., Peter Zijlstra <pzijlstr@redhat.com>
+ * Copyright © 2009 Paul Mackerras, IBM Corp. <paulus@au1.ibm.com>
+ *
+ * For licensing details see kernel-base/COPYING
+ */
+
+#include <linux/perf_event.h>
+#include <linux/vmalloc.h>
+#include <linux/slab.h>
+
+#include "internal.h"
+
+static bool perf_output_space(struct ring_buffer *rb, unsigned long tail,
+ unsigned long offset, unsigned long head)
+{
+ unsigned long mask;
+
+ if (!rb->writable)
+ return true;
+
+ mask = perf_data_size(rb) - 1;
+
+ offset = (offset - tail) & mask;
+ head = (head - tail) & mask;
+
+ if ((int)(head - offset) < 0)
+ return false;
+
+ return true;
+}
+
+static void perf_output_wakeup(struct perf_output_handle *handle)
+{
+ atomic_set(&handle->rb->poll, POLL_IN);
+
+ handle->event->pending_wakeup = 1;
+ irq_work_queue(&handle->event->pending);
+}
+
+/*
+ * We need to ensure a later event_id doesn't publish a head when a former
+ * event isn't done writing. However since we need to deal with NMIs we
+ * cannot fully serialize things.
+ *
+ * We only publish the head (and generate a wakeup) when the outer-most
+ * event completes.
+ */
+static void perf_output_get_handle(struct perf_output_handle *handle)
+{
+ struct ring_buffer *rb = handle->rb;
+
+ preempt_disable();
+ local_inc(&rb->nest);
+ handle->wakeup = local_read(&rb->wakeup);
+}
+
+static void perf_output_put_handle(struct perf_output_handle *handle)
+{
+ struct ring_buffer *rb = handle->rb;
+ unsigned long head;
+
+again:
+ head = local_read(&rb->head);
+
+ /*
+ * IRQ/NMI can happen here, which means we can miss a head update.
+ */
+
+ if (!local_dec_and_test(&rb->nest))
+ goto out;
+
+ /*
+ * Publish the known good head. Rely on the full barrier implied
+ * by atomic_dec_and_test() order the rb->head read and this
+ * write.
+ */
+ rb->user_page->data_head = head;
+
+ /*
+ * Now check if we missed an update, rely on the (compiler)
+ * barrier in atomic_dec_and_test() to re-read rb->head.
+ */
+ if (unlikely(head != local_read(&rb->head))) {
+ local_inc(&rb->nest);
+ goto again;
+ }
+
+ if (handle->wakeup != local_read(&rb->wakeup))
+ perf_output_wakeup(handle);
+
+out:
+ preempt_enable();
+}
+
+int perf_output_begin(struct perf_output_handle *handle,
+ struct perf_event *event, unsigned int size)
+{
+ struct ring_buffer *rb;
+ unsigned long tail, offset, head;
+ int have_lost;
+ struct perf_sample_data sample_data;
+ struct {
+ struct perf_event_header header;
+ u64 id;
+ u64 lost;
+ } lost_event;
+
+ rcu_read_lock();
+ /*
+ * For inherited events we send all the output towards the parent.
+ */
+ if (event->parent)
+ event = event->parent;
+
+ rb = rcu_dereference(event->rb);
+ if (!rb)
+ goto out;
+
+ handle->rb = rb;
+ handle->event = event;
+
+ if (!rb->nr_pages)
+ goto out;
+
+ have_lost = local_read(&rb->lost);
+ if (have_lost) {
+ lost_event.header.size = sizeof(lost_event);
+ perf_event_header__init_id(&lost_event.header, &sample_data,
+ event);
+ size += lost_event.header.size;
+ }
+
+ perf_output_get_handle(handle);
+
+ do {
+ /*
+ * Userspace could choose to issue a mb() before updating the
+ * tail pointer. So that all reads will be completed before the
+ * write is issued.
+ */
+ tail = ACCESS_ONCE(rb->user_page->data_tail);
+ smp_rmb();
+ offset = head = local_read(&rb->head);
+ head += size;
+ if (unlikely(!perf_output_space(rb, tail, offset, head)))
+ goto fail;
+ } while (local_cmpxchg(&rb->head, offset, head) != offset);
+
+ if (head - local_read(&rb->wakeup) > rb->watermark)
+ local_add(rb->watermark, &rb->wakeup);
+
+ handle->page = offset >> (PAGE_SHIFT + page_order(rb));
+ handle->page &= rb->nr_pages - 1;
+ handle->size = offset & ((PAGE_SIZE << page_order(rb)) - 1);
+ handle->addr = rb->data_pages[handle->page];
+ handle->addr += handle->size;
+ handle->size = (PAGE_SIZE << page_order(rb)) - handle->size;
+
+ if (have_lost) {
+ lost_event.header.type = PERF_RECORD_LOST;
+ lost_event.header.misc = 0;
+ lost_event.id = event->id;
+ lost_event.lost = local_xchg(&rb->lost, 0);
+
+ perf_output_put(handle, lost_event);
+ perf_event__output_id_sample(event, handle, &sample_data);
+ }
+
+ return 0;
+
+fail:
+ local_inc(&rb->lost);
+ perf_output_put_handle(handle);
+out:
+ rcu_read_unlock();
+
+ return -ENOSPC;
+}
+
+void perf_output_copy(struct perf_output_handle *handle,
+ const void *buf, unsigned int len)
+{
+ __output_copy(handle, buf, len);
+}
+
+void perf_output_end(struct perf_output_handle *handle)
+{
+ perf_output_put_handle(handle);
+ rcu_read_unlock();
+}
+
+static void
+ring_buffer_init(struct ring_buffer *rb, long watermark, int flags)
+{
+ long max_size = perf_data_size(rb);
+
+ if (watermark)
+ rb->watermark = min(max_size, watermark);
+
+ if (!rb->watermark)
+ rb->watermark = max_size / 2;
+
+ if (flags & RING_BUFFER_WRITABLE)
+ rb->writable = 1;
+
+ atomic_set(&rb->refcount, 1);
+}
+
+#ifndef CONFIG_PERF_USE_VMALLOC
+
+/*
+ * Back perf_mmap() with regular GFP_KERNEL-0 pages.
+ */
+
+struct page *
+perf_mmap_to_page(struct ring_buffer *rb, unsigned long pgoff)
+{
+ if (pgoff > rb->nr_pages)
+ return NULL;
+
+ if (pgoff == 0)
+ return virt_to_page(rb->user_page);
+
+ return virt_to_page(rb->data_pages[pgoff - 1]);
+}
+
+static void *perf_mmap_alloc_page(int cpu)
+{
+ struct page *page;
+ int node;
+
+ node = (cpu == -1) ? cpu : cpu_to_node(cpu);
+ page = alloc_pages_node(node, GFP_KERNEL | __GFP_ZERO, 0);
+ if (!page)
+ return NULL;
+
+ return page_address(page);
+}
+
+struct ring_buffer *rb_alloc(int nr_pages, long watermark, int cpu, int flags)
+{
+ struct ring_buffer *rb;
+ unsigned long size;
+ int i;
+
+ size = sizeof(struct ring_buffer);
+ size += nr_pages * sizeof(void *);
+
+ rb = kzalloc(size, GFP_KERNEL);
+ if (!rb)
+ goto fail;
+
+ rb->user_page = perf_mmap_alloc_page(cpu);
+ if (!rb->user_page)
+ goto fail_user_page;
+
+ for (i = 0; i < nr_pages; i++) {
+ rb->data_pages[i] = perf_mmap_alloc_page(cpu);
+ if (!rb->data_pages[i])
+ goto fail_data_pages;
+ }
+
+ rb->nr_pages = nr_pages;
+
+ ring_buffer_init(rb, watermark, flags);
+
+ return rb;
+
+fail_data_pages:
+ for (i--; i >= 0; i--)
+ free_page((unsigned long)rb->data_pages[i]);
+
+ free_page((unsigned long)rb->user_page);
+
+fail_user_page:
+ kfree(rb);
+
+fail:
+ return NULL;
+}
+
+static void perf_mmap_free_page(unsigned long addr)
+{
+ struct page *page = virt_to_page((void *)addr);
+
+ page->mapping = NULL;
+ __free_page(page);
+}
+
+void rb_free(struct ring_buffer *rb)
+{
+ int i;
+
+ perf_mmap_free_page((unsigned long)rb->user_page);
+ for (i = 0; i < rb->nr_pages; i++)
+ perf_mmap_free_page((unsigned long)rb->data_pages[i]);
+ kfree(rb);
+}
+
+#else
+
+struct page *
+perf_mmap_to_page(struct ring_buffer *rb, unsigned long pgoff)
+{
+ if (pgoff > (1UL << page_order(rb)))
+ return NULL;
+
+ return vmalloc_to_page((void *)rb->user_page + pgoff * PAGE_SIZE);
+}
+
+static void perf_mmap_unmark_page(void *addr)
+{
+ struct page *page = vmalloc_to_page(addr);
+
+ page->mapping = NULL;
+}
+
+static void rb_free_work(struct work_struct *work)
+{
+ struct ring_buffer *rb;
+ void *base;
+ int i, nr;
+
+ rb = container_of(work, struct ring_buffer, work);
+ nr = 1 << page_order(rb);
+
+ base = rb->user_page;
+ for (i = 0; i < nr + 1; i++)
+ perf_mmap_unmark_page(base + (i * PAGE_SIZE));
+
+ vfree(base);
+ kfree(rb);
+}
+
+void rb_free(struct ring_buffer *rb)
+{
+ schedule_work(&rb->work);
+}
+
+struct ring_buffer *rb_alloc(int nr_pages, long watermark, int cpu, int flags)
+{
+ struct ring_buffer *rb;
+ unsigned long size;
+ void *all_buf;
+
+ size = sizeof(struct ring_buffer);
+ size += sizeof(void *);
+
+ rb = kzalloc(size, GFP_KERNEL);
+ if (!rb)
+ goto fail;
+
+ INIT_WORK(&rb->work, rb_free_work);
+
+ all_buf = vmalloc_user((nr_pages + 1) * PAGE_SIZE);
+ if (!all_buf)
+ goto fail_all_buf;
+
+ rb->user_page = all_buf;
+ rb->data_pages[0] = all_buf + PAGE_SIZE;
+ rb->page_order = ilog2(nr_pages);
+ rb->nr_pages = 1;
+
+ ring_buffer_init(rb, watermark, flags);
+
+ return rb;
+
+fail_all_buf:
+ kfree(rb);
+
+fail:
+ return NULL;
+}
+
+#endif
diff --git a/kernel/exit.c b/kernel/exit.c
index f2b321bae440..73bb192a3d32 100644
--- a/kernel/exit.c
+++ b/kernel/exit.c
@@ -169,7 +169,6 @@ void release_task(struct task_struct * p)
struct task_struct *leader;
int zap_leader;
repeat:
- tracehook_prepare_release_task(p);
/* don't need to get the RCU readlock here - the process is dead and
* can't be modifying its own credentials. But shut RCU-lockdep up */
rcu_read_lock();
@@ -179,7 +178,7 @@ repeat:
proc_flush_task(p);
write_lock_irq(&tasklist_lock);
- tracehook_finish_release_task(p);
+ ptrace_release_task(p);
__exit_signal(p);
/*
@@ -190,22 +189,12 @@ repeat:
zap_leader = 0;
leader = p->group_leader;
if (leader != p && thread_group_empty(leader) && leader->exit_state == EXIT_ZOMBIE) {
- BUG_ON(task_detached(leader));
- do_notify_parent(leader, leader->exit_signal);
/*
* If we were the last child thread and the leader has
* exited already, and the leader's parent ignores SIGCHLD,
* then we are the one who should release the leader.
- *
- * do_notify_parent() will have marked it self-reaping in
- * that case.
- */
- zap_leader = task_detached(leader);
-
- /*
- * This maintains the invariant that release_task()
- * only runs on a task in EXIT_DEAD, just for sanity.
*/
+ zap_leader = do_notify_parent(leader, leader->exit_signal);
if (zap_leader)
leader->exit_state = EXIT_DEAD;
}
@@ -277,18 +266,16 @@ int is_current_pgrp_orphaned(void)
return retval;
}
-static int has_stopped_jobs(struct pid *pgrp)
+static bool has_stopped_jobs(struct pid *pgrp)
{
- int retval = 0;
struct task_struct *p;
do_each_pid_task(pgrp, PIDTYPE_PGID, p) {
- if (!task_is_stopped(p))
- continue;
- retval = 1;
- break;
+ if (p->signal->flags & SIGNAL_STOP_STOPPED)
+ return true;
} while_each_pid_task(pgrp, PIDTYPE_PGID, p);
- return retval;
+
+ return false;
}
/*
@@ -751,7 +738,7 @@ static void reparent_leader(struct task_struct *father, struct task_struct *p,
{
list_move_tail(&p->sibling, &p->real_parent->children);
- if (task_detached(p))
+ if (p->exit_state == EXIT_DEAD)
return;
/*
* If this is a threaded reparent there is no need to
@@ -764,10 +751,9 @@ static void reparent_leader(struct task_struct *father, struct task_struct *p,
p->exit_signal = SIGCHLD;
/* If it has exited notify the new parent about this child's death. */
- if (!task_ptrace(p) &&
+ if (!p->ptrace &&
p->exit_state == EXIT_ZOMBIE && thread_group_empty(p)) {
- do_notify_parent(p, p->exit_signal);
- if (task_detached(p)) {
+ if (do_notify_parent(p, p->exit_signal)) {
p->exit_state = EXIT_DEAD;
list_move_tail(&p->sibling, dead);
}
@@ -794,7 +780,7 @@ static void forget_original_parent(struct task_struct *father)
do {
t->real_parent = reaper;
if (t->parent == father) {
- BUG_ON(task_ptrace(t));
+ BUG_ON(t->ptrace);
t->parent = t->real_parent;
}
if (t->pdeath_signal)
@@ -819,8 +805,7 @@ static void forget_original_parent(struct task_struct *father)
*/
static void exit_notify(struct task_struct *tsk, int group_dead)
{
- int signal;
- void *cookie;
+ bool autoreap;
/*
* This does two things:
@@ -851,26 +836,33 @@ static void exit_notify(struct task_struct *tsk, int group_dead)
* we have changed execution domain as these two values started
* the same after a fork.
*/
- if (tsk->exit_signal != SIGCHLD && !task_detached(tsk) &&
+ if (thread_group_leader(tsk) && tsk->exit_signal != SIGCHLD &&
(tsk->parent_exec_id != tsk->real_parent->self_exec_id ||
tsk->self_exec_id != tsk->parent_exec_id))
tsk->exit_signal = SIGCHLD;
- signal = tracehook_notify_death(tsk, &cookie, group_dead);
- if (signal >= 0)
- signal = do_notify_parent(tsk, signal);
+ if (unlikely(tsk->ptrace)) {
+ int sig = thread_group_leader(tsk) &&
+ thread_group_empty(tsk) &&
+ !ptrace_reparented(tsk) ?
+ tsk->exit_signal : SIGCHLD;
+ autoreap = do_notify_parent(tsk, sig);
+ } else if (thread_group_leader(tsk)) {
+ autoreap = thread_group_empty(tsk) &&
+ do_notify_parent(tsk, tsk->exit_signal);
+ } else {
+ autoreap = true;
+ }
- tsk->exit_state = signal == DEATH_REAP ? EXIT_DEAD : EXIT_ZOMBIE;
+ tsk->exit_state = autoreap ? EXIT_DEAD : EXIT_ZOMBIE;
/* mt-exec, de_thread() is waiting for group leader */
if (unlikely(tsk->signal->notify_count < 0))
wake_up_process(tsk->signal->group_exit_task);
write_unlock_irq(&tasklist_lock);
- tracehook_report_death(tsk, signal, cookie, group_dead);
-
/* If the process is dead, release it - nobody will wait for it */
- if (signal == DEATH_REAP)
+ if (autoreap)
release_task(tsk);
}
@@ -923,7 +915,7 @@ NORET_TYPE void do_exit(long code)
*/
set_fs(USER_DS);
- tracehook_report_exit(&code);
+ ptrace_event(PTRACE_EVENT_EXIT, code);
validate_creds_for_do_exit(tsk);
@@ -1235,9 +1227,9 @@ static int wait_task_zombie(struct wait_opts *wo, struct task_struct *p)
traced = ptrace_reparented(p);
/*
* It can be ptraced but not reparented, check
- * !task_detached() to filter out sub-threads.
+ * thread_group_leader() to filter out sub-threads.
*/
- if (likely(!traced) && likely(!task_detached(p))) {
+ if (likely(!traced) && thread_group_leader(p)) {
struct signal_struct *psig;
struct signal_struct *sig;
unsigned long maxrss;
@@ -1345,16 +1337,13 @@ static int wait_task_zombie(struct wait_opts *wo, struct task_struct *p)
/* We dropped tasklist, ptracer could die and untrace */
ptrace_unlink(p);
/*
- * If this is not a detached task, notify the parent.
- * If it's still not detached after that, don't release
- * it now.
+ * If this is not a sub-thread, notify the parent.
+ * If parent wants a zombie, don't release it now.
*/
- if (!task_detached(p)) {
- do_notify_parent(p, p->exit_signal);
- if (!task_detached(p)) {
- p->exit_state = EXIT_ZOMBIE;
- p = NULL;
- }
+ if (thread_group_leader(p) &&
+ !do_notify_parent(p, p->exit_signal)) {
+ p->exit_state = EXIT_ZOMBIE;
+ p = NULL;
}
write_unlock_irq(&tasklist_lock);
}
@@ -1367,7 +1356,8 @@ static int wait_task_zombie(struct wait_opts *wo, struct task_struct *p)
static int *task_stopped_code(struct task_struct *p, bool ptrace)
{
if (ptrace) {
- if (task_is_stopped_or_traced(p))
+ if (task_is_stopped_or_traced(p) &&
+ !(p->jobctl & JOBCTL_LISTENING))
return &p->exit_code;
} else {
if (p->signal->flags & SIGNAL_STOP_STOPPED)
@@ -1563,7 +1553,7 @@ static int wait_consider_task(struct wait_opts *wo, int ptrace,
* Notification and reaping will be cascaded to the real
* parent when the ptracer detaches.
*/
- if (likely(!ptrace) && unlikely(task_ptrace(p))) {
+ if (likely(!ptrace) && unlikely(p->ptrace)) {
/* it will become visible, clear notask_error */
wo->notask_error = 0;
return 0;
@@ -1606,8 +1596,7 @@ static int wait_consider_task(struct wait_opts *wo, int ptrace,
* own children, it should create a separate process which
* takes the role of real parent.
*/
- if (likely(!ptrace) && task_ptrace(p) &&
- same_thread_group(p->parent, p->real_parent))
+ if (likely(!ptrace) && p->ptrace && !ptrace_reparented(p))
return 0;
/*
diff --git a/kernel/fork.c b/kernel/fork.c
index 0276c30401a0..ca339c5c5819 100644
--- a/kernel/fork.c
+++ b/kernel/fork.c
@@ -37,7 +37,6 @@
#include <linux/swap.h>
#include <linux/syscalls.h>
#include <linux/jiffies.h>
-#include <linux/tracehook.h>
#include <linux/futex.h>
#include <linux/compat.h>
#include <linux/kthread.h>
@@ -1013,7 +1012,7 @@ static void rt_mutex_init_task(struct task_struct *p)
{
raw_spin_lock_init(&p->pi_lock);
#ifdef CONFIG_RT_MUTEXES
- plist_head_init_raw(&p->pi_waiters, &p->pi_lock);
+ plist_head_init(&p->pi_waiters);
p->pi_blocked_on = NULL;
#endif
}
@@ -1340,7 +1339,7 @@ static struct task_struct *copy_process(unsigned long clone_flags,
}
if (likely(p->pid)) {
- tracehook_finish_clone(p, clone_flags, trace);
+ ptrace_init_task(p, (clone_flags & CLONE_PTRACE) || trace);
if (thread_group_leader(p)) {
if (is_child_reaper(pid))
@@ -1481,10 +1480,22 @@ long do_fork(unsigned long clone_flags,
}
/*
- * When called from kernel_thread, don't do user tracing stuff.
+ * Determine whether and which event to report to ptracer. When
+ * called from kernel_thread or CLONE_UNTRACED is explicitly
+ * requested, no event is reported; otherwise, report if the event
+ * for the type of forking is enabled.
*/
- if (likely(user_mode(regs)))
- trace = tracehook_prepare_clone(clone_flags);
+ if (likely(user_mode(regs)) && !(clone_flags & CLONE_UNTRACED)) {
+ if (clone_flags & CLONE_VFORK)
+ trace = PTRACE_EVENT_VFORK;
+ else if ((clone_flags & CSIGNAL) != SIGCHLD)
+ trace = PTRACE_EVENT_CLONE;
+ else
+ trace = PTRACE_EVENT_FORK;
+
+ if (likely(!ptrace_event_enabled(current, trace)))
+ trace = 0;
+ }
p = copy_process(clone_flags, stack_start, regs, stack_size,
child_tidptr, NULL, trace);
@@ -1508,26 +1519,26 @@ long do_fork(unsigned long clone_flags,
}
audit_finish_fork(p);
- tracehook_report_clone(regs, clone_flags, nr, p);
/*
* We set PF_STARTING at creation in case tracing wants to
* use this to distinguish a fully live task from one that
- * hasn't gotten to tracehook_report_clone() yet. Now we
- * clear it and set the child going.
+ * hasn't finished SIGSTOP raising yet. Now we clear it
+ * and set the child going.
*/
p->flags &= ~PF_STARTING;
wake_up_new_task(p);
- tracehook_report_clone_complete(trace, regs,
- clone_flags, nr, p);
+ /* forking complete and child started to run, tell ptracer */
+ if (unlikely(trace))
+ ptrace_event(trace, nr);
if (clone_flags & CLONE_VFORK) {
freezer_do_not_count();
wait_for_completion(&vfork);
freezer_count();
- tracehook_report_vfork_done(p, nr);
+ ptrace_event(PTRACE_EVENT_VFORK_DONE, nr);
}
} else {
nr = PTR_ERR(p);
diff --git a/kernel/futex.c b/kernel/futex.c
index fe28dc282eae..3fbc76cbb9aa 100644
--- a/kernel/futex.c
+++ b/kernel/futex.c
@@ -2697,7 +2697,7 @@ static int __init futex_init(void)
futex_cmpxchg_enabled = 1;
for (i = 0; i < ARRAY_SIZE(futex_queues); i++) {
- plist_head_init(&futex_queues[i].chain, &futex_queues[i].lock);
+ plist_head_init(&futex_queues[i].chain);
spin_lock_init(&futex_queues[i].lock);
}
diff --git a/kernel/irq/generic-chip.c b/kernel/irq/generic-chip.c
index 31a9db711906..3a2cab407b93 100644
--- a/kernel/irq/generic-chip.c
+++ b/kernel/irq/generic-chip.c
@@ -101,10 +101,10 @@ void irq_gc_unmask_enable_reg(struct irq_data *d)
}
/**
- * irq_gc_ack - Ack pending interrupt
+ * irq_gc_ack_set_bit - Ack pending interrupt via setting bit
* @d: irq_data
*/
-void irq_gc_ack(struct irq_data *d)
+void irq_gc_ack_set_bit(struct irq_data *d)
{
struct irq_chip_generic *gc = irq_data_get_irq_chip_data(d);
u32 mask = 1 << (d->irq - gc->irq_base);
@@ -115,6 +115,20 @@ void irq_gc_ack(struct irq_data *d)
}
/**
+ * irq_gc_ack_clr_bit - Ack pending interrupt via clearing bit
+ * @d: irq_data
+ */
+void irq_gc_ack_clr_bit(struct irq_data *d)
+{
+ struct irq_chip_generic *gc = irq_data_get_irq_chip_data(d);
+ u32 mask = ~(1 << (d->irq - gc->irq_base));
+
+ irq_gc_lock(gc);
+ irq_reg_writel(mask, gc->reg_base + cur_regs(d)->ack);
+ irq_gc_unlock(gc);
+}
+
+/**
* irq_gc_mask_disable_reg_and_ack- Mask and ack pending interrupt
* @d: irq_data
*/
diff --git a/kernel/jump_label.c b/kernel/jump_label.c
index fa27e750dbc0..a8ce45097f3d 100644
--- a/kernel/jump_label.c
+++ b/kernel/jump_label.c
@@ -375,15 +375,19 @@ int jump_label_text_reserved(void *start, void *end)
static void jump_label_update(struct jump_label_key *key, int enable)
{
- struct jump_entry *entry = key->entries;
-
- /* if there are no users, entry can be NULL */
- if (entry)
- __jump_label_update(key, entry, __stop___jump_table, enable);
+ struct jump_entry *entry = key->entries, *stop = __stop___jump_table;
#ifdef CONFIG_MODULES
+ struct module *mod = __module_address((jump_label_t)key);
+
__jump_label_mod_update(key, enable);
+
+ if (mod)
+ stop = mod->jump_entries + mod->num_jump_entries;
#endif
+ /* if there are no users, entry can be NULL */
+ if (entry)
+ __jump_label_update(key, entry, stop, enable);
}
#endif
diff --git a/kernel/kprobes.c b/kernel/kprobes.c
index 77981813a1e7..b30fd54eb985 100644
--- a/kernel/kprobes.c
+++ b/kernel/kprobes.c
@@ -1255,19 +1255,29 @@ static int __kprobes in_kprobes_functions(unsigned long addr)
/*
* If we have a symbol_name argument, look it up and add the offset field
* to it. This way, we can specify a relative address to a symbol.
+ * This returns encoded errors if it fails to look up symbol or invalid
+ * combination of parameters.
*/
static kprobe_opcode_t __kprobes *kprobe_addr(struct kprobe *p)
{
kprobe_opcode_t *addr = p->addr;
+
+ if ((p->symbol_name && p->addr) ||
+ (!p->symbol_name && !p->addr))
+ goto invalid;
+
if (p->symbol_name) {
- if (addr)
- return NULL;
kprobe_lookup_name(p->symbol_name, addr);
+ if (!addr)
+ return ERR_PTR(-ENOENT);
}
- if (!addr)
- return NULL;
- return (kprobe_opcode_t *)(((char *)addr) + p->offset);
+ addr = (kprobe_opcode_t *)(((char *)addr) + p->offset);
+ if (addr)
+ return addr;
+
+invalid:
+ return ERR_PTR(-EINVAL);
}
/* Check passed kprobe is valid and return kprobe in kprobe_table. */
@@ -1311,8 +1321,8 @@ int __kprobes register_kprobe(struct kprobe *p)
kprobe_opcode_t *addr;
addr = kprobe_addr(p);
- if (!addr)
- return -EINVAL;
+ if (IS_ERR(addr))
+ return PTR_ERR(addr);
p->addr = addr;
ret = check_kprobe_rereg(p);
@@ -1335,6 +1345,8 @@ int __kprobes register_kprobe(struct kprobe *p)
*/
probed_mod = __module_text_address((unsigned long) p->addr);
if (probed_mod) {
+ /* Return -ENOENT if fail. */
+ ret = -ENOENT;
/*
* We must hold a refcount of the probed module while updating
* its code to prohibit unexpected unloading.
@@ -1351,6 +1363,7 @@ int __kprobes register_kprobe(struct kprobe *p)
module_put(probed_mod);
goto fail_with_jump_label;
}
+ /* ret will be updated by following code */
}
preempt_enable();
jump_label_unlock();
@@ -1399,7 +1412,7 @@ out:
fail_with_jump_label:
preempt_enable();
jump_label_unlock();
- return -EINVAL;
+ return ret;
}
EXPORT_SYMBOL_GPL(register_kprobe);
@@ -1686,8 +1699,8 @@ int __kprobes register_kretprobe(struct kretprobe *rp)
if (kretprobe_blacklist_size) {
addr = kprobe_addr(&rp->kp);
- if (!addr)
- return -EINVAL;
+ if (IS_ERR(addr))
+ return PTR_ERR(addr);
for (i = 0; kretprobe_blacklist[i].name != NULL; i++) {
if (kretprobe_blacklist[i].addr == addr)
diff --git a/kernel/lockdep.c b/kernel/lockdep.c
index 298c9276dfdb..3956f5149e25 100644
--- a/kernel/lockdep.c
+++ b/kernel/lockdep.c
@@ -2468,6 +2468,9 @@ mark_held_locks(struct task_struct *curr, enum mark_type mark)
BUG_ON(usage_bit >= LOCK_USAGE_STATES);
+ if (hlock_class(hlock)->key == &__lockdep_no_validate__)
+ continue;
+
if (!mark_lock(curr, hlock, usage_bit))
return 0;
}
@@ -2478,15 +2481,10 @@ mark_held_locks(struct task_struct *curr, enum mark_type mark)
/*
* Hardirqs will be enabled:
*/
-void trace_hardirqs_on_caller(unsigned long ip)
+static void __trace_hardirqs_on_caller(unsigned long ip)
{
struct task_struct *curr = current;
- time_hardirqs_on(CALLER_ADDR0, ip);
-
- if (unlikely(!debug_locks || current->lockdep_recursion))
- return;
-
if (DEBUG_LOCKS_WARN_ON(unlikely(early_boot_irqs_disabled)))
return;
@@ -2502,8 +2500,6 @@ void trace_hardirqs_on_caller(unsigned long ip)
/* we'll do an OFF -> ON transition: */
curr->hardirqs_enabled = 1;
- if (DEBUG_LOCKS_WARN_ON(!irqs_disabled()))
- return;
if (DEBUG_LOCKS_WARN_ON(current->hardirq_context))
return;
/*
@@ -2525,6 +2521,21 @@ void trace_hardirqs_on_caller(unsigned long ip)
curr->hardirq_enable_event = ++curr->irq_events;
debug_atomic_inc(hardirqs_on_events);
}
+
+void trace_hardirqs_on_caller(unsigned long ip)
+{
+ time_hardirqs_on(CALLER_ADDR0, ip);
+
+ if (unlikely(!debug_locks || current->lockdep_recursion))
+ return;
+
+ if (DEBUG_LOCKS_WARN_ON(!irqs_disabled()))
+ return;
+
+ current->lockdep_recursion = 1;
+ __trace_hardirqs_on_caller(ip);
+ current->lockdep_recursion = 0;
+}
EXPORT_SYMBOL(trace_hardirqs_on_caller);
void trace_hardirqs_on(void)
@@ -2574,7 +2585,7 @@ void trace_softirqs_on(unsigned long ip)
{
struct task_struct *curr = current;
- if (unlikely(!debug_locks))
+ if (unlikely(!debug_locks || current->lockdep_recursion))
return;
if (DEBUG_LOCKS_WARN_ON(!irqs_disabled()))
@@ -2585,6 +2596,7 @@ void trace_softirqs_on(unsigned long ip)
return;
}
+ current->lockdep_recursion = 1;
/*
* We'll do an OFF -> ON transition:
*/
@@ -2599,6 +2611,7 @@ void trace_softirqs_on(unsigned long ip)
*/
if (curr->hardirqs_enabled)
mark_held_locks(curr, SOFTIRQ);
+ current->lockdep_recursion = 0;
}
/*
@@ -2608,7 +2621,7 @@ void trace_softirqs_off(unsigned long ip)
{
struct task_struct *curr = current;
- if (unlikely(!debug_locks))
+ if (unlikely(!debug_locks || current->lockdep_recursion))
return;
if (DEBUG_LOCKS_WARN_ON(!irqs_disabled()))
diff --git a/kernel/pm_qos_params.c b/kernel/pm_qos_params.c
index 6824ca7d4d0c..37f05d0f0793 100644
--- a/kernel/pm_qos_params.c
+++ b/kernel/pm_qos_params.c
@@ -74,7 +74,7 @@ static DEFINE_SPINLOCK(pm_qos_lock);
static struct pm_qos_object null_pm_qos;
static BLOCKING_NOTIFIER_HEAD(cpu_dma_lat_notifier);
static struct pm_qos_object cpu_dma_pm_qos = {
- .requests = PLIST_HEAD_INIT(cpu_dma_pm_qos.requests, pm_qos_lock),
+ .requests = PLIST_HEAD_INIT(cpu_dma_pm_qos.requests),
.notifiers = &cpu_dma_lat_notifier,
.name = "cpu_dma_latency",
.target_value = PM_QOS_CPU_DMA_LAT_DEFAULT_VALUE,
@@ -84,7 +84,7 @@ static struct pm_qos_object cpu_dma_pm_qos = {
static BLOCKING_NOTIFIER_HEAD(network_lat_notifier);
static struct pm_qos_object network_lat_pm_qos = {
- .requests = PLIST_HEAD_INIT(network_lat_pm_qos.requests, pm_qos_lock),
+ .requests = PLIST_HEAD_INIT(network_lat_pm_qos.requests),
.notifiers = &network_lat_notifier,
.name = "network_latency",
.target_value = PM_QOS_NETWORK_LAT_DEFAULT_VALUE,
@@ -95,7 +95,7 @@ static struct pm_qos_object network_lat_pm_qos = {
static BLOCKING_NOTIFIER_HEAD(network_throughput_notifier);
static struct pm_qos_object network_throughput_pm_qos = {
- .requests = PLIST_HEAD_INIT(network_throughput_pm_qos.requests, pm_qos_lock),
+ .requests = PLIST_HEAD_INIT(network_throughput_pm_qos.requests),
.notifiers = &network_throughput_notifier,
.name = "network_throughput",
.target_value = PM_QOS_NETWORK_THROUGHPUT_DEFAULT_VALUE,
diff --git a/kernel/power/Kconfig b/kernel/power/Kconfig
index 87f4d24b55b0..7b856b3458d2 100644
--- a/kernel/power/Kconfig
+++ b/kernel/power/Kconfig
@@ -224,6 +224,10 @@ config PM_OPP
implementations a ready to use framework to manage OPPs.
For more information, read <file:Documentation/power/opp.txt>
-config PM_RUNTIME_CLK
+config PM_CLK
def_bool y
- depends on PM_RUNTIME && HAVE_CLK
+ depends on PM && HAVE_CLK
+
+config PM_GENERIC_DOMAINS
+ bool
+ depends on PM
diff --git a/kernel/power/main.c b/kernel/power/main.c
index 2981af4ce7cb..6c601f871964 100644
--- a/kernel/power/main.c
+++ b/kernel/power/main.c
@@ -37,8 +37,9 @@ EXPORT_SYMBOL_GPL(unregister_pm_notifier);
int pm_notifier_call_chain(unsigned long val)
{
- return (blocking_notifier_call_chain(&pm_chain_head, val, NULL)
- == NOTIFY_BAD) ? -EINVAL : 0;
+ int ret = blocking_notifier_call_chain(&pm_chain_head, val, NULL);
+
+ return notifier_to_errno(ret);
}
/* If set, devices may be suspended and resumed asynchronously. */
diff --git a/kernel/power/snapshot.c b/kernel/power/snapshot.c
index ace55889f702..06efa54f93d6 100644
--- a/kernel/power/snapshot.c
+++ b/kernel/power/snapshot.c
@@ -1211,7 +1211,11 @@ static void free_unnecessary_pages(void)
to_free_highmem = alloc_highmem - save;
} else {
to_free_highmem = 0;
- to_free_normal -= save - alloc_highmem;
+ save -= alloc_highmem;
+ if (to_free_normal > save)
+ to_free_normal -= save;
+ else
+ to_free_normal = 0;
}
memory_bm_position_reset(&copy_bm);
diff --git a/kernel/power/suspend.c b/kernel/power/suspend.c
index 1c41ba215419..b6b71ad2208f 100644
--- a/kernel/power/suspend.c
+++ b/kernel/power/suspend.c
@@ -44,6 +44,7 @@ void suspend_set_ops(const struct platform_suspend_ops *ops)
suspend_ops = ops;
mutex_unlock(&pm_mutex);
}
+EXPORT_SYMBOL_GPL(suspend_set_ops);
bool valid_state(suspend_state_t state)
{
@@ -65,6 +66,7 @@ int suspend_valid_only_mem(suspend_state_t state)
{
return state == PM_SUSPEND_MEM;
}
+EXPORT_SYMBOL_GPL(suspend_valid_only_mem);
static int suspend_test(int level)
{
@@ -126,12 +128,13 @@ void __attribute__ ((weak)) arch_suspend_enable_irqs(void)
}
/**
- * suspend_enter - enter the desired system sleep state.
- * @state: state to enter
+ * suspend_enter - enter the desired system sleep state.
+ * @state: State to enter
+ * @wakeup: Returns information that suspend should not be entered again.
*
- * This function should be called after devices have been suspended.
+ * This function should be called after devices have been suspended.
*/
-static int suspend_enter(suspend_state_t state)
+static int suspend_enter(suspend_state_t state, bool *wakeup)
{
int error;
@@ -165,7 +168,8 @@ static int suspend_enter(suspend_state_t state)
error = syscore_suspend();
if (!error) {
- if (!(suspend_test(TEST_CORE) || pm_wakeup_pending())) {
+ *wakeup = pm_wakeup_pending();
+ if (!(suspend_test(TEST_CORE) || *wakeup)) {
error = suspend_ops->enter(state);
events_check_enabled = false;
}
@@ -199,6 +203,7 @@ static int suspend_enter(suspend_state_t state)
int suspend_devices_and_enter(suspend_state_t state)
{
int error;
+ bool wakeup = false;
if (!suspend_ops)
return -ENOSYS;
@@ -220,7 +225,10 @@ int suspend_devices_and_enter(suspend_state_t state)
if (suspend_test(TEST_DEVICES))
goto Recover_platform;
- error = suspend_enter(state);
+ do {
+ error = suspend_enter(state, &wakeup);
+ } while (!error && !wakeup
+ && suspend_ops->suspend_again && suspend_ops->suspend_again());
Resume_devices:
suspend_test_start();
diff --git a/kernel/power/user.c b/kernel/power/user.c
index 7d02d33be699..42ddbc6f0de6 100644
--- a/kernel/power/user.c
+++ b/kernel/power/user.c
@@ -113,8 +113,10 @@ static int snapshot_open(struct inode *inode, struct file *filp)
if (error)
pm_notifier_call_chain(PM_POST_RESTORE);
}
- if (error)
+ if (error) {
+ free_basic_memory_bitmaps();
atomic_inc(&snapshot_device_available);
+ }
data->frozen = 0;
data->ready = 0;
data->platform_support = 0;
diff --git a/kernel/printk.c b/kernel/printk.c
index 35185392173f..37dff3429adb 100644
--- a/kernel/printk.c
+++ b/kernel/printk.c
@@ -782,7 +782,7 @@ static inline int can_use_console(unsigned int cpu)
static int console_trylock_for_printk(unsigned int cpu)
__releases(&logbuf_lock)
{
- int retval = 0;
+ int retval = 0, wake = 0;
if (console_trylock()) {
retval = 1;
@@ -795,12 +795,14 @@ static int console_trylock_for_printk(unsigned int cpu)
*/
if (!can_use_console(cpu)) {
console_locked = 0;
- up(&console_sem);
+ wake = 1;
retval = 0;
}
}
printk_cpu = UINT_MAX;
spin_unlock(&logbuf_lock);
+ if (wake)
+ up(&console_sem);
return retval;
}
static const char recursion_bug_msg [] =
@@ -1242,7 +1244,7 @@ void console_unlock(void)
{
unsigned long flags;
unsigned _con_start, _log_end;
- unsigned wake_klogd = 0;
+ unsigned wake_klogd = 0, retry = 0;
if (console_suspended) {
up(&console_sem);
@@ -1251,6 +1253,7 @@ void console_unlock(void)
console_may_schedule = 0;
+again:
for ( ; ; ) {
spin_lock_irqsave(&logbuf_lock, flags);
wake_klogd |= log_start - log_end;
@@ -1271,8 +1274,23 @@ void console_unlock(void)
if (unlikely(exclusive_console))
exclusive_console = NULL;
+ spin_unlock(&logbuf_lock);
+
up(&console_sem);
+
+ /*
+ * Someone could have filled up the buffer again, so re-check if there's
+ * something to flush. In case we cannot trylock the console_sem again,
+ * there's a new owner and the console_unlock() from them will do the
+ * flush, no worries.
+ */
+ spin_lock(&logbuf_lock);
+ if (con_start != log_end)
+ retry = 1;
spin_unlock_irqrestore(&logbuf_lock, flags);
+ if (retry && console_trylock())
+ goto again;
+
if (wake_klogd)
wake_up_klogd();
}
diff --git a/kernel/ptrace.c b/kernel/ptrace.c
index 2df115790cd9..9de3ecfd20f9 100644
--- a/kernel/ptrace.c
+++ b/kernel/ptrace.c
@@ -23,8 +23,15 @@
#include <linux/uaccess.h>
#include <linux/regset.h>
#include <linux/hw_breakpoint.h>
+#include <linux/cn_proc.h>
+static int ptrace_trapping_sleep_fn(void *flags)
+{
+ schedule();
+ return 0;
+}
+
/*
* ptrace a task: make the debugger its new parent and
* move it to the ptrace list.
@@ -77,13 +84,20 @@ void __ptrace_unlink(struct task_struct *child)
spin_lock(&child->sighand->siglock);
/*
- * Reinstate GROUP_STOP_PENDING if group stop is in effect and
+ * Clear all pending traps and TRAPPING. TRAPPING should be
+ * cleared regardless of JOBCTL_STOP_PENDING. Do it explicitly.
+ */
+ task_clear_jobctl_pending(child, JOBCTL_TRAP_MASK);
+ task_clear_jobctl_trapping(child);
+
+ /*
+ * Reinstate JOBCTL_STOP_PENDING if group stop is in effect and
* @child isn't dead.
*/
if (!(child->flags & PF_EXITING) &&
(child->signal->flags & SIGNAL_STOP_STOPPED ||
child->signal->group_stop_count))
- child->group_stop |= GROUP_STOP_PENDING;
+ child->jobctl |= JOBCTL_STOP_PENDING;
/*
* If transition to TASK_STOPPED is pending or in TASK_TRACED, kick
@@ -91,16 +105,30 @@ void __ptrace_unlink(struct task_struct *child)
* is in TASK_TRACED; otherwise, we might unduly disrupt
* TASK_KILLABLE sleeps.
*/
- if (child->group_stop & GROUP_STOP_PENDING || task_is_traced(child))
+ if (child->jobctl & JOBCTL_STOP_PENDING || task_is_traced(child))
signal_wake_up(child, task_is_traced(child));
spin_unlock(&child->sighand->siglock);
}
-/*
- * Check that we have indeed attached to the thing..
+/**
+ * ptrace_check_attach - check whether ptracee is ready for ptrace operation
+ * @child: ptracee to check for
+ * @ignore_state: don't check whether @child is currently %TASK_TRACED
+ *
+ * Check whether @child is being ptraced by %current and ready for further
+ * ptrace operations. If @ignore_state is %false, @child also should be in
+ * %TASK_TRACED state and on return the child is guaranteed to be traced
+ * and not executing. If @ignore_state is %true, @child can be in any
+ * state.
+ *
+ * CONTEXT:
+ * Grabs and releases tasklist_lock and @child->sighand->siglock.
+ *
+ * RETURNS:
+ * 0 on success, -ESRCH if %child is not ready.
*/
-int ptrace_check_attach(struct task_struct *child, int kill)
+int ptrace_check_attach(struct task_struct *child, bool ignore_state)
{
int ret = -ESRCH;
@@ -119,13 +147,14 @@ int ptrace_check_attach(struct task_struct *child, int kill)
*/
spin_lock_irq(&child->sighand->siglock);
WARN_ON_ONCE(task_is_stopped(child));
- if (task_is_traced(child) || kill)
+ if (ignore_state || (task_is_traced(child) &&
+ !(child->jobctl & JOBCTL_LISTENING)))
ret = 0;
spin_unlock_irq(&child->sighand->siglock);
}
read_unlock(&tasklist_lock);
- if (!ret && !kill)
+ if (!ret && !ignore_state)
ret = wait_task_inactive(child, TASK_TRACED) ? 0 : -ESRCH;
/* All systems go.. */
@@ -182,11 +211,28 @@ bool ptrace_may_access(struct task_struct *task, unsigned int mode)
return !err;
}
-static int ptrace_attach(struct task_struct *task)
+static int ptrace_attach(struct task_struct *task, long request,
+ unsigned long flags)
{
- bool wait_trap = false;
+ bool seize = (request == PTRACE_SEIZE);
int retval;
+ /*
+ * SEIZE will enable new ptrace behaviors which will be implemented
+ * gradually. SEIZE_DEVEL is used to prevent applications
+ * expecting full SEIZE behaviors trapping on kernel commits which
+ * are still in the process of implementing them.
+ *
+ * Only test programs for new ptrace behaviors being implemented
+ * should set SEIZE_DEVEL. If unset, SEIZE will fail with -EIO.
+ *
+ * Once SEIZE behaviors are completely implemented, this flag and
+ * the following test will be removed.
+ */
+ retval = -EIO;
+ if (seize && !(flags & PTRACE_SEIZE_DEVEL))
+ goto out;
+
audit_ptrace(task);
retval = -EPERM;
@@ -218,16 +264,21 @@ static int ptrace_attach(struct task_struct *task)
goto unlock_tasklist;
task->ptrace = PT_PTRACED;
+ if (seize)
+ task->ptrace |= PT_SEIZED;
if (task_ns_capable(task, CAP_SYS_PTRACE))
task->ptrace |= PT_PTRACE_CAP;
__ptrace_link(task, current);
- send_sig_info(SIGSTOP, SEND_SIG_FORCED, task);
+
+ /* SEIZE doesn't trap tracee on attach */
+ if (!seize)
+ send_sig_info(SIGSTOP, SEND_SIG_FORCED, task);
spin_lock(&task->sighand->siglock);
/*
- * If the task is already STOPPED, set GROUP_STOP_PENDING and
+ * If the task is already STOPPED, set JOBCTL_TRAP_STOP and
* TRAPPING, and kick it so that it transits to TRACED. TRAPPING
* will be cleared if the child completes the transition or any
* event which clears the group stop states happens. We'll wait
@@ -243,11 +294,9 @@ static int ptrace_attach(struct task_struct *task)
* The following task_is_stopped() test is safe as both transitions
* in and out of STOPPED are protected by siglock.
*/
- if (task_is_stopped(task)) {
- task->group_stop |= GROUP_STOP_PENDING | GROUP_STOP_TRAPPING;
+ if (task_is_stopped(task) &&
+ task_set_jobctl_pending(task, JOBCTL_TRAP_STOP | JOBCTL_TRAPPING))
signal_wake_up(task, 1);
- wait_trap = true;
- }
spin_unlock(&task->sighand->siglock);
@@ -257,9 +306,12 @@ unlock_tasklist:
unlock_creds:
mutex_unlock(&task->signal->cred_guard_mutex);
out:
- if (wait_trap)
- wait_event(current->signal->wait_chldexit,
- !(task->group_stop & GROUP_STOP_TRAPPING));
+ if (!retval) {
+ wait_on_bit(&task->jobctl, JOBCTL_TRAPPING_BIT,
+ ptrace_trapping_sleep_fn, TASK_UNINTERRUPTIBLE);
+ proc_ptrace_connector(task, PTRACE_ATTACH);
+ }
+
return retval;
}
@@ -322,25 +374,27 @@ static int ignoring_children(struct sighand_struct *sigh)
*/
static bool __ptrace_detach(struct task_struct *tracer, struct task_struct *p)
{
+ bool dead;
+
__ptrace_unlink(p);
- if (p->exit_state == EXIT_ZOMBIE) {
- if (!task_detached(p) && thread_group_empty(p)) {
- if (!same_thread_group(p->real_parent, tracer))
- do_notify_parent(p, p->exit_signal);
- else if (ignoring_children(tracer->sighand)) {
- __wake_up_parent(p, tracer);
- p->exit_signal = -1;
- }
- }
- if (task_detached(p)) {
- /* Mark it as in the process of being reaped. */
- p->exit_state = EXIT_DEAD;
- return true;
+ if (p->exit_state != EXIT_ZOMBIE)
+ return false;
+
+ dead = !thread_group_leader(p);
+
+ if (!dead && thread_group_empty(p)) {
+ if (!same_thread_group(p->real_parent, tracer))
+ dead = do_notify_parent(p, p->exit_signal);
+ else if (ignoring_children(tracer->sighand)) {
+ __wake_up_parent(p, tracer);
+ dead = true;
}
}
-
- return false;
+ /* Mark it as in the process of being reaped. */
+ if (dead)
+ p->exit_state = EXIT_DEAD;
+ return dead;
}
static int ptrace_detach(struct task_struct *child, unsigned int data)
@@ -365,6 +419,7 @@ static int ptrace_detach(struct task_struct *child, unsigned int data)
}
write_unlock_irq(&tasklist_lock);
+ proc_ptrace_connector(child, PTRACE_DETACH);
if (unlikely(dead))
release_task(child);
@@ -611,10 +666,12 @@ static int ptrace_regset(struct task_struct *task, int req, unsigned int type,
int ptrace_request(struct task_struct *child, long request,
unsigned long addr, unsigned long data)
{
+ bool seized = child->ptrace & PT_SEIZED;
int ret = -EIO;
- siginfo_t siginfo;
+ siginfo_t siginfo, *si;
void __user *datavp = (void __user *) data;
unsigned long __user *datalp = datavp;
+ unsigned long flags;
switch (request) {
case PTRACE_PEEKTEXT:
@@ -647,6 +704,62 @@ int ptrace_request(struct task_struct *child, long request,
ret = ptrace_setsiginfo(child, &siginfo);
break;
+ case PTRACE_INTERRUPT:
+ /*
+ * Stop tracee without any side-effect on signal or job
+ * control. At least one trap is guaranteed to happen
+ * after this request. If @child is already trapped, the
+ * current trap is not disturbed and another trap will
+ * happen after the current trap is ended with PTRACE_CONT.
+ *
+ * The actual trap might not be PTRACE_EVENT_STOP trap but
+ * the pending condition is cleared regardless.
+ */
+ if (unlikely(!seized || !lock_task_sighand(child, &flags)))
+ break;
+
+ /*
+ * INTERRUPT doesn't disturb existing trap sans one
+ * exception. If ptracer issued LISTEN for the current
+ * STOP, this INTERRUPT should clear LISTEN and re-trap
+ * tracee into STOP.
+ */
+ if (likely(task_set_jobctl_pending(child, JOBCTL_TRAP_STOP)))
+ signal_wake_up(child, child->jobctl & JOBCTL_LISTENING);
+
+ unlock_task_sighand(child, &flags);
+ ret = 0;
+ break;
+
+ case PTRACE_LISTEN:
+ /*
+ * Listen for events. Tracee must be in STOP. It's not
+ * resumed per-se but is not considered to be in TRACED by
+ * wait(2) or ptrace(2). If an async event (e.g. group
+ * stop state change) happens, tracee will enter STOP trap
+ * again. Alternatively, ptracer can issue INTERRUPT to
+ * finish listening and re-trap tracee into STOP.
+ */
+ if (unlikely(!seized || !lock_task_sighand(child, &flags)))
+ break;
+
+ si = child->last_siginfo;
+ if (unlikely(!si || si->si_code >> 8 != PTRACE_EVENT_STOP))
+ break;
+
+ child->jobctl |= JOBCTL_LISTENING;
+
+ /*
+ * If NOTIFY is set, it means event happened between start
+ * of this trap and now. Trigger re-trap immediately.
+ */
+ if (child->jobctl & JOBCTL_TRAP_NOTIFY)
+ signal_wake_up(child, true);
+
+ unlock_task_sighand(child, &flags);
+ ret = 0;
+ break;
+
case PTRACE_DETACH: /* detach a process that was attached. */
ret = ptrace_detach(child, data);
break;
@@ -761,8 +874,8 @@ SYSCALL_DEFINE4(ptrace, long, request, long, pid, unsigned long, addr,
goto out;
}
- if (request == PTRACE_ATTACH) {
- ret = ptrace_attach(child);
+ if (request == PTRACE_ATTACH || request == PTRACE_SEIZE) {
+ ret = ptrace_attach(child, request, data);
/*
* Some architectures need to do book-keeping after
* a ptrace attach.
@@ -772,7 +885,8 @@ SYSCALL_DEFINE4(ptrace, long, request, long, pid, unsigned long, addr,
goto out_put_task_struct;
}
- ret = ptrace_check_attach(child, request == PTRACE_KILL);
+ ret = ptrace_check_attach(child, request == PTRACE_KILL ||
+ request == PTRACE_INTERRUPT);
if (ret < 0)
goto out_put_task_struct;
@@ -903,8 +1017,8 @@ asmlinkage long compat_sys_ptrace(compat_long_t request, compat_long_t pid,
goto out;
}
- if (request == PTRACE_ATTACH) {
- ret = ptrace_attach(child);
+ if (request == PTRACE_ATTACH || request == PTRACE_SEIZE) {
+ ret = ptrace_attach(child, request, data);
/*
* Some architectures need to do book-keeping after
* a ptrace attach.
@@ -914,7 +1028,8 @@ asmlinkage long compat_sys_ptrace(compat_long_t request, compat_long_t pid,
goto out_put_task_struct;
}
- ret = ptrace_check_attach(child, request == PTRACE_KILL);
+ ret = ptrace_check_attach(child, request == PTRACE_KILL ||
+ request == PTRACE_INTERRUPT);
if (!ret)
ret = compat_arch_ptrace(child, request, addr, data);
diff --git a/kernel/rcutree.c b/kernel/rcutree.c
index 7e59ffb3d0ba..ba06207b1dd3 100644
--- a/kernel/rcutree.c
+++ b/kernel/rcutree.c
@@ -84,9 +84,32 @@ DEFINE_PER_CPU(struct rcu_data, rcu_bh_data);
static struct rcu_state *rcu_state;
+/*
+ * The rcu_scheduler_active variable transitions from zero to one just
+ * before the first task is spawned. So when this variable is zero, RCU
+ * can assume that there is but one task, allowing RCU to (for example)
+ * optimized synchronize_sched() to a simple barrier(). When this variable
+ * is one, RCU must actually do all the hard work required to detect real
+ * grace periods. This variable is also used to suppress boot-time false
+ * positives from lockdep-RCU error checking.
+ */
int rcu_scheduler_active __read_mostly;
EXPORT_SYMBOL_GPL(rcu_scheduler_active);
+/*
+ * The rcu_scheduler_fully_active variable transitions from zero to one
+ * during the early_initcall() processing, which is after the scheduler
+ * is capable of creating new tasks. So RCU processing (for example,
+ * creating tasks for RCU priority boosting) must be delayed until after
+ * rcu_scheduler_fully_active transitions from zero to one. We also
+ * currently delay invocation of any RCU callbacks until after this point.
+ *
+ * It might later prove better for people registering RCU callbacks during
+ * early boot to take responsibility for these callbacks, but one step at
+ * a time.
+ */
+static int rcu_scheduler_fully_active __read_mostly;
+
#ifdef CONFIG_RCU_BOOST
/*
@@ -98,7 +121,6 @@ DEFINE_PER_CPU(unsigned int, rcu_cpu_kthread_status);
DEFINE_PER_CPU(int, rcu_cpu_kthread_cpu);
DEFINE_PER_CPU(unsigned int, rcu_cpu_kthread_loops);
DEFINE_PER_CPU(char, rcu_cpu_has_work);
-static char rcu_kthreads_spawnable;
#endif /* #ifdef CONFIG_RCU_BOOST */
@@ -1467,6 +1489,8 @@ static void rcu_process_callbacks(struct softirq_action *unused)
*/
static void invoke_rcu_callbacks(struct rcu_state *rsp, struct rcu_data *rdp)
{
+ if (unlikely(!ACCESS_ONCE(rcu_scheduler_fully_active)))
+ return;
if (likely(!rsp->boost)) {
rcu_do_batch(rsp, rdp);
return;
diff --git a/kernel/rcutree_plugin.h b/kernel/rcutree_plugin.h
index 14dc7dd00902..8aafbb80b8b0 100644
--- a/kernel/rcutree_plugin.h
+++ b/kernel/rcutree_plugin.h
@@ -68,6 +68,7 @@ struct rcu_state rcu_preempt_state = RCU_STATE_INITIALIZER(rcu_preempt_state);
DEFINE_PER_CPU(struct rcu_data, rcu_preempt_data);
static struct rcu_state *rcu_state = &rcu_preempt_state;
+static void rcu_read_unlock_special(struct task_struct *t);
static int rcu_preempted_readers_exp(struct rcu_node *rnp);
/*
@@ -147,7 +148,7 @@ static void rcu_preempt_note_context_switch(int cpu)
struct rcu_data *rdp;
struct rcu_node *rnp;
- if (t->rcu_read_lock_nesting &&
+ if (t->rcu_read_lock_nesting > 0 &&
(t->rcu_read_unlock_special & RCU_READ_UNLOCK_BLOCKED) == 0) {
/* Possibly blocking in an RCU read-side critical section. */
@@ -190,6 +191,14 @@ static void rcu_preempt_note_context_switch(int cpu)
rnp->gp_tasks = &t->rcu_node_entry;
}
raw_spin_unlock_irqrestore(&rnp->lock, flags);
+ } else if (t->rcu_read_lock_nesting < 0 &&
+ t->rcu_read_unlock_special) {
+
+ /*
+ * Complete exit from RCU read-side critical section on
+ * behalf of preempted instance of __rcu_read_unlock().
+ */
+ rcu_read_unlock_special(t);
}
/*
@@ -284,7 +293,7 @@ static struct list_head *rcu_next_node_entry(struct task_struct *t,
* notify RCU core processing or task having blocked during the RCU
* read-side critical section.
*/
-static void rcu_read_unlock_special(struct task_struct *t)
+static noinline void rcu_read_unlock_special(struct task_struct *t)
{
int empty;
int empty_exp;
@@ -309,7 +318,7 @@ static void rcu_read_unlock_special(struct task_struct *t)
}
/* Hardware IRQ handlers cannot block. */
- if (in_irq()) {
+ if (in_irq() || in_serving_softirq()) {
local_irq_restore(flags);
return;
}
@@ -342,6 +351,11 @@ static void rcu_read_unlock_special(struct task_struct *t)
#ifdef CONFIG_RCU_BOOST
if (&t->rcu_node_entry == rnp->boost_tasks)
rnp->boost_tasks = np;
+ /* Snapshot and clear ->rcu_boosted with rcu_node lock held. */
+ if (t->rcu_boosted) {
+ special |= RCU_READ_UNLOCK_BOOSTED;
+ t->rcu_boosted = 0;
+ }
#endif /* #ifdef CONFIG_RCU_BOOST */
t->rcu_blocked_node = NULL;
@@ -358,7 +372,6 @@ static void rcu_read_unlock_special(struct task_struct *t)
#ifdef CONFIG_RCU_BOOST
/* Unboost if we were boosted. */
if (special & RCU_READ_UNLOCK_BOOSTED) {
- t->rcu_read_unlock_special &= ~RCU_READ_UNLOCK_BOOSTED;
rt_mutex_unlock(t->rcu_boost_mutex);
t->rcu_boost_mutex = NULL;
}
@@ -387,13 +400,22 @@ void __rcu_read_unlock(void)
struct task_struct *t = current;
barrier(); /* needed if we ever invoke rcu_read_unlock in rcutree.c */
- --t->rcu_read_lock_nesting;
- barrier(); /* decrement before load of ->rcu_read_unlock_special */
- if (t->rcu_read_lock_nesting == 0 &&
- unlikely(ACCESS_ONCE(t->rcu_read_unlock_special)))
- rcu_read_unlock_special(t);
+ if (t->rcu_read_lock_nesting != 1)
+ --t->rcu_read_lock_nesting;
+ else {
+ t->rcu_read_lock_nesting = INT_MIN;
+ barrier(); /* assign before ->rcu_read_unlock_special load */
+ if (unlikely(ACCESS_ONCE(t->rcu_read_unlock_special)))
+ rcu_read_unlock_special(t);
+ barrier(); /* ->rcu_read_unlock_special load before assign */
+ t->rcu_read_lock_nesting = 0;
+ }
#ifdef CONFIG_PROVE_LOCKING
- WARN_ON_ONCE(ACCESS_ONCE(t->rcu_read_lock_nesting) < 0);
+ {
+ int rrln = ACCESS_ONCE(t->rcu_read_lock_nesting);
+
+ WARN_ON_ONCE(rrln < 0 && rrln > INT_MIN / 2);
+ }
#endif /* #ifdef CONFIG_PROVE_LOCKING */
}
EXPORT_SYMBOL_GPL(__rcu_read_unlock);
@@ -589,7 +611,8 @@ static void rcu_preempt_check_callbacks(int cpu)
rcu_preempt_qs(cpu);
return;
}
- if (per_cpu(rcu_preempt_data, cpu).qs_pending)
+ if (t->rcu_read_lock_nesting > 0 &&
+ per_cpu(rcu_preempt_data, cpu).qs_pending)
t->rcu_read_unlock_special |= RCU_READ_UNLOCK_NEED_QS;
}
@@ -695,9 +718,12 @@ static void rcu_report_exp_rnp(struct rcu_state *rsp, struct rcu_node *rnp)
raw_spin_lock_irqsave(&rnp->lock, flags);
for (;;) {
- if (!sync_rcu_preempt_exp_done(rnp))
+ if (!sync_rcu_preempt_exp_done(rnp)) {
+ raw_spin_unlock_irqrestore(&rnp->lock, flags);
break;
+ }
if (rnp->parent == NULL) {
+ raw_spin_unlock_irqrestore(&rnp->lock, flags);
wake_up(&sync_rcu_preempt_exp_wq);
break;
}
@@ -707,7 +733,6 @@ static void rcu_report_exp_rnp(struct rcu_state *rsp, struct rcu_node *rnp)
raw_spin_lock(&rnp->lock); /* irqs already disabled */
rnp->expmask &= ~mask;
}
- raw_spin_unlock_irqrestore(&rnp->lock, flags);
}
/*
@@ -1174,7 +1199,7 @@ static int rcu_boost(struct rcu_node *rnp)
t = container_of(tb, struct task_struct, rcu_node_entry);
rt_mutex_init_proxy_locked(&mtx, t);
t->rcu_boost_mutex = &mtx;
- t->rcu_read_unlock_special |= RCU_READ_UNLOCK_BOOSTED;
+ t->rcu_boosted = 1;
raw_spin_unlock_irqrestore(&rnp->lock, flags);
rt_mutex_lock(&mtx); /* Side effect: boosts task t's priority. */
rt_mutex_unlock(&mtx); /* Keep lockdep happy. */
@@ -1532,7 +1557,7 @@ static int __cpuinit rcu_spawn_one_cpu_kthread(int cpu)
struct sched_param sp;
struct task_struct *t;
- if (!rcu_kthreads_spawnable ||
+ if (!rcu_scheduler_fully_active ||
per_cpu(rcu_cpu_kthread_task, cpu) != NULL)
return 0;
t = kthread_create(rcu_cpu_kthread, (void *)(long)cpu, "rcuc%d", cpu);
@@ -1639,7 +1664,7 @@ static int __cpuinit rcu_spawn_one_node_kthread(struct rcu_state *rsp,
struct sched_param sp;
struct task_struct *t;
- if (!rcu_kthreads_spawnable ||
+ if (!rcu_scheduler_fully_active ||
rnp->qsmaskinit == 0)
return 0;
if (rnp->node_kthread_task == NULL) {
@@ -1665,7 +1690,7 @@ static int __init rcu_spawn_kthreads(void)
int cpu;
struct rcu_node *rnp;
- rcu_kthreads_spawnable = 1;
+ rcu_scheduler_fully_active = 1;
for_each_possible_cpu(cpu) {
per_cpu(rcu_cpu_has_work, cpu) = 0;
if (cpu_online(cpu))
@@ -1687,7 +1712,7 @@ static void __cpuinit rcu_prepare_kthreads(int cpu)
struct rcu_node *rnp = rdp->mynode;
/* Fire up the incoming CPU's kthread and leaf rcu_node kthread. */
- if (rcu_kthreads_spawnable) {
+ if (rcu_scheduler_fully_active) {
(void)rcu_spawn_one_cpu_kthread(cpu);
if (rnp->node_kthread_task == NULL)
(void)rcu_spawn_one_node_kthread(rcu_state, rnp);
@@ -1726,6 +1751,13 @@ static void rcu_cpu_kthread_setrt(int cpu, int to_rt)
{
}
+static int __init rcu_scheduler_really_started(void)
+{
+ rcu_scheduler_fully_active = 1;
+ return 0;
+}
+early_initcall(rcu_scheduler_really_started);
+
static void __cpuinit rcu_prepare_kthreads(int cpu)
{
}
diff --git a/kernel/resource.c b/kernel/resource.c
index 798e2fae2a06..3ff40178dce7 100644
--- a/kernel/resource.c
+++ b/kernel/resource.c
@@ -38,6 +38,14 @@ struct resource iomem_resource = {
};
EXPORT_SYMBOL(iomem_resource);
+/* constraints to be met while allocating resources */
+struct resource_constraint {
+ resource_size_t min, max, align;
+ resource_size_t (*alignf)(void *, const struct resource *,
+ resource_size_t, resource_size_t);
+ void *alignf_data;
+};
+
static DEFINE_RWLOCK(resource_lock);
static void *r_next(struct seq_file *m, void *v, loff_t *pos)
@@ -384,16 +392,13 @@ static bool resource_contains(struct resource *res1, struct resource *res2)
}
/*
- * Find empty slot in the resource tree given range and alignment.
+ * Find empty slot in the resource tree with the given range and
+ * alignment constraints
*/
-static int find_resource(struct resource *root, struct resource *new,
- resource_size_t size, resource_size_t min,
- resource_size_t max, resource_size_t align,
- resource_size_t (*alignf)(void *,
- const struct resource *,
- resource_size_t,
- resource_size_t),
- void *alignf_data)
+static int __find_resource(struct resource *root, struct resource *old,
+ struct resource *new,
+ resource_size_t size,
+ struct resource_constraint *constraint)
{
struct resource *this = root->child;
struct resource tmp = *new, avail, alloc;
@@ -404,25 +409,26 @@ static int find_resource(struct resource *root, struct resource *new,
* Skip past an allocated resource that starts at 0, since the assignment
* of this->start - 1 to tmp->end below would cause an underflow.
*/
- if (this && this->start == 0) {
- tmp.start = this->end + 1;
+ if (this && this->start == root->start) {
+ tmp.start = (this == old) ? old->start : this->end + 1;
this = this->sibling;
}
for(;;) {
if (this)
- tmp.end = this->start - 1;
+ tmp.end = (this == old) ? this->end : this->start - 1;
else
tmp.end = root->end;
- resource_clip(&tmp, min, max);
+ resource_clip(&tmp, constraint->min, constraint->max);
arch_remove_reservations(&tmp);
/* Check for overflow after ALIGN() */
avail = *new;
- avail.start = ALIGN(tmp.start, align);
+ avail.start = ALIGN(tmp.start, constraint->align);
avail.end = tmp.end;
if (avail.start >= tmp.start) {
- alloc.start = alignf(alignf_data, &avail, size, align);
+ alloc.start = constraint->alignf(constraint->alignf_data, &avail,
+ size, constraint->align);
alloc.end = alloc.start + size - 1;
if (resource_contains(&avail, &alloc)) {
new->start = alloc.start;
@@ -432,14 +438,75 @@ static int find_resource(struct resource *root, struct resource *new,
}
if (!this)
break;
- tmp.start = this->end + 1;
+ if (this != old)
+ tmp.start = this->end + 1;
this = this->sibling;
}
return -EBUSY;
}
+/*
+ * Find empty slot in the resource tree given range and alignment.
+ */
+static int find_resource(struct resource *root, struct resource *new,
+ resource_size_t size,
+ struct resource_constraint *constraint)
+{
+ return __find_resource(root, NULL, new, size, constraint);
+}
+
/**
- * allocate_resource - allocate empty slot in the resource tree given range & alignment
+ * reallocate_resource - allocate a slot in the resource tree given range & alignment.
+ * The resource will be relocated if the new size cannot be reallocated in the
+ * current location.
+ *
+ * @root: root resource descriptor
+ * @old: resource descriptor desired by caller
+ * @newsize: new size of the resource descriptor
+ * @constraint: the size and alignment constraints to be met.
+ */
+int reallocate_resource(struct resource *root, struct resource *old,
+ resource_size_t newsize,
+ struct resource_constraint *constraint)
+{
+ int err=0;
+ struct resource new = *old;
+ struct resource *conflict;
+
+ write_lock(&resource_lock);
+
+ if ((err = __find_resource(root, old, &new, newsize, constraint)))
+ goto out;
+
+ if (resource_contains(&new, old)) {
+ old->start = new.start;
+ old->end = new.end;
+ goto out;
+ }
+
+ if (old->child) {
+ err = -EBUSY;
+ goto out;
+ }
+
+ if (resource_contains(old, &new)) {
+ old->start = new.start;
+ old->end = new.end;
+ } else {
+ __release_resource(old);
+ *old = new;
+ conflict = __request_resource(root, old);
+ BUG_ON(conflict);
+ }
+out:
+ write_unlock(&resource_lock);
+ return err;
+}
+
+
+/**
+ * allocate_resource - allocate empty slot in the resource tree given range & alignment.
+ * The resource will be reallocated with a new size if it was already allocated
* @root: root resource descriptor
* @new: resource descriptor desired by caller
* @size: requested resource region size
@@ -459,12 +526,25 @@ int allocate_resource(struct resource *root, struct resource *new,
void *alignf_data)
{
int err;
+ struct resource_constraint constraint;
if (!alignf)
alignf = simple_align_resource;
+ constraint.min = min;
+ constraint.max = max;
+ constraint.align = align;
+ constraint.alignf = alignf;
+ constraint.alignf_data = alignf_data;
+
+ if ( new->parent ) {
+ /* resource is already allocated, try reallocating with
+ the new constraints */
+ return reallocate_resource(root, new, size, &constraint);
+ }
+
write_lock(&resource_lock);
- err = find_resource(root, new, size, min, max, align, alignf, alignf_data);
+ err = find_resource(root, new, size, &constraint);
if (err >= 0 && __request_resource(root, new))
err = -EBUSY;
write_unlock(&resource_lock);
diff --git a/kernel/rtmutex.c b/kernel/rtmutex.c
index ab449117aaf2..255e1662acdb 100644
--- a/kernel/rtmutex.c
+++ b/kernel/rtmutex.c
@@ -890,7 +890,7 @@ void __rt_mutex_init(struct rt_mutex *lock, const char *name)
{
lock->owner = NULL;
raw_spin_lock_init(&lock->wait_lock);
- plist_head_init_raw(&lock->wait_list, &lock->wait_lock);
+ plist_head_init(&lock->wait_list);
debug_rt_mutex_init(lock, name);
}
diff --git a/kernel/sched.c b/kernel/sched.c
index 3f2e502d609b..9aaf567c5da5 100644
--- a/kernel/sched.c
+++ b/kernel/sched.c
@@ -124,7 +124,7 @@
static inline int rt_policy(int policy)
{
- if (unlikely(policy == SCHED_FIFO || policy == SCHED_RR))
+ if (policy == SCHED_FIFO || policy == SCHED_RR)
return 1;
return 0;
}
@@ -292,8 +292,8 @@ static DEFINE_SPINLOCK(task_group_lock);
* (The default weight is 1024 - so there's no practical
* limitation from this.)
*/
-#define MIN_SHARES 2
-#define MAX_SHARES (1UL << (18 + SCHED_LOAD_RESOLUTION))
+#define MIN_SHARES (1UL << 1)
+#define MAX_SHARES (1UL << 18)
static int root_task_group_load = ROOT_TASK_GROUP_LOAD;
#endif
@@ -422,6 +422,7 @@ struct rt_rq {
*/
struct root_domain {
atomic_t refcount;
+ atomic_t rto_count;
struct rcu_head rcu;
cpumask_var_t span;
cpumask_var_t online;
@@ -431,7 +432,6 @@ struct root_domain {
* one runnable RT task.
*/
cpumask_var_t rto_mask;
- atomic_t rto_count;
struct cpupri cpupri;
};
@@ -1568,38 +1568,6 @@ static unsigned long cpu_avg_load_per_task(int cpu)
return rq->avg_load_per_task;
}
-#ifdef CONFIG_FAIR_GROUP_SCHED
-
-/*
- * Compute the cpu's hierarchical load factor for each task group.
- * This needs to be done in a top-down fashion because the load of a child
- * group is a fraction of its parents load.
- */
-static int tg_load_down(struct task_group *tg, void *data)
-{
- unsigned long load;
- long cpu = (long)data;
-
- if (!tg->parent) {
- load = cpu_rq(cpu)->load.weight;
- } else {
- load = tg->parent->cfs_rq[cpu]->h_load;
- load *= tg->se[cpu]->load.weight;
- load /= tg->parent->cfs_rq[cpu]->load.weight + 1;
- }
-
- tg->cfs_rq[cpu]->h_load = load;
-
- return 0;
-}
-
-static void update_h_load(long cpu)
-{
- walk_tg_tree(tg_load_down, tg_nop, (void *)cpu);
-}
-
-#endif
-
#ifdef CONFIG_PREEMPT
static void double_rq_lock(struct rq *rq1, struct rq *rq2);
@@ -2220,7 +2188,7 @@ void set_task_cpu(struct task_struct *p, unsigned int new_cpu)
if (task_cpu(p) != new_cpu) {
p->se.nr_migrations++;
- perf_sw_event(PERF_COUNT_SW_CPU_MIGRATIONS, 1, 1, NULL, 0);
+ perf_sw_event(PERF_COUNT_SW_CPU_MIGRATIONS, 1, NULL, 0);
}
__set_task_cpu(p, new_cpu);
@@ -2497,7 +2465,7 @@ ttwu_do_wakeup(struct rq *rq, struct task_struct *p, int wake_flags)
if (p->sched_class->task_woken)
p->sched_class->task_woken(rq, p);
- if (unlikely(rq->idle_stamp)) {
+ if (rq->idle_stamp) {
u64 delta = rq->clock - rq->idle_stamp;
u64 max = 2*sysctl_sched_migration_cost;
@@ -2544,13 +2512,9 @@ static int ttwu_remote(struct task_struct *p, int wake_flags)
}
#ifdef CONFIG_SMP
-static void sched_ttwu_pending(void)
+static void sched_ttwu_do_pending(struct task_struct *list)
{
struct rq *rq = this_rq();
- struct task_struct *list = xchg(&rq->wake_list, NULL);
-
- if (!list)
- return;
raw_spin_lock(&rq->lock);
@@ -2563,9 +2527,45 @@ static void sched_ttwu_pending(void)
raw_spin_unlock(&rq->lock);
}
+#ifdef CONFIG_HOTPLUG_CPU
+
+static void sched_ttwu_pending(void)
+{
+ struct rq *rq = this_rq();
+ struct task_struct *list = xchg(&rq->wake_list, NULL);
+
+ if (!list)
+ return;
+
+ sched_ttwu_do_pending(list);
+}
+
+#endif /* CONFIG_HOTPLUG_CPU */
+
void scheduler_ipi(void)
{
- sched_ttwu_pending();
+ struct rq *rq = this_rq();
+ struct task_struct *list = xchg(&rq->wake_list, NULL);
+
+ if (!list)
+ return;
+
+ /*
+ * Not all reschedule IPI handlers call irq_enter/irq_exit, since
+ * traditionally all their work was done from the interrupt return
+ * path. Now that we actually do some work, we need to make sure
+ * we do call them.
+ *
+ * Some archs already do call them, luckily irq_enter/exit nest
+ * properly.
+ *
+ * Arguably we should visit all archs and update all handlers,
+ * however a fair share of IPIs are still resched only so this would
+ * somewhat pessimize the simple resched case.
+ */
+ irq_enter();
+ sched_ttwu_do_pending(list);
+ irq_exit();
}
static void ttwu_queue_remote(struct task_struct *p, int cpu)
@@ -2854,7 +2854,7 @@ void sched_fork(struct task_struct *p)
#if defined(CONFIG_SMP)
p->on_cpu = 0;
#endif
-#ifdef CONFIG_PREEMPT
+#ifdef CONFIG_PREEMPT_COUNT
/* Want to start with kernel preemption disabled. */
task_thread_info(p)->preempt_count = 1;
#endif
@@ -4306,11 +4306,8 @@ EXPORT_SYMBOL(schedule);
static inline bool owner_running(struct mutex *lock, struct task_struct *owner)
{
- bool ret = false;
-
- rcu_read_lock();
if (lock->owner != owner)
- goto fail;
+ return false;
/*
* Ensure we emit the owner->on_cpu, dereference _after_ checking
@@ -4320,11 +4317,7 @@ static inline bool owner_running(struct mutex *lock, struct task_struct *owner)
*/
barrier();
- ret = owner->on_cpu;
-fail:
- rcu_read_unlock();
-
- return ret;
+ return owner->on_cpu;
}
/*
@@ -4336,21 +4329,21 @@ int mutex_spin_on_owner(struct mutex *lock, struct task_struct *owner)
if (!sched_feat(OWNER_SPIN))
return 0;
+ rcu_read_lock();
while (owner_running(lock, owner)) {
if (need_resched())
- return 0;
+ break;
arch_mutex_cpu_relax();
}
+ rcu_read_unlock();
/*
- * If the owner changed to another task there is likely
- * heavy contention, stop spinning.
+ * We break out the loop above on need_resched() and when the
+ * owner changed, which is a sign for heavy contention. Return
+ * success only when lock->owner is NULL.
*/
- if (lock->owner)
- return 0;
-
- return 1;
+ return lock->owner == NULL;
}
#endif
@@ -6557,7 +6550,7 @@ static int sched_domain_debug_one(struct sched_domain *sd, int cpu, int level,
break;
}
- if (!group->cpu_power) {
+ if (!group->sgp->power) {
printk(KERN_CONT "\n");
printk(KERN_ERR "ERROR: domain->cpu_power not "
"set\n");
@@ -6581,9 +6574,9 @@ static int sched_domain_debug_one(struct sched_domain *sd, int cpu, int level,
cpulist_scnprintf(str, sizeof(str), sched_group_cpus(group));
printk(KERN_CONT " %s", str);
- if (group->cpu_power != SCHED_POWER_SCALE) {
+ if (group->sgp->power != SCHED_POWER_SCALE) {
printk(KERN_CONT " (cpu_power = %d)",
- group->cpu_power);
+ group->sgp->power);
}
group = group->next;
@@ -6774,11 +6767,39 @@ static struct root_domain *alloc_rootdomain(void)
return rd;
}
+static void free_sched_groups(struct sched_group *sg, int free_sgp)
+{
+ struct sched_group *tmp, *first;
+
+ if (!sg)
+ return;
+
+ first = sg;
+ do {
+ tmp = sg->next;
+
+ if (free_sgp && atomic_dec_and_test(&sg->sgp->ref))
+ kfree(sg->sgp);
+
+ kfree(sg);
+ sg = tmp;
+ } while (sg != first);
+}
+
static void free_sched_domain(struct rcu_head *rcu)
{
struct sched_domain *sd = container_of(rcu, struct sched_domain, rcu);
- if (atomic_dec_and_test(&sd->groups->ref))
+
+ /*
+ * If its an overlapping domain it has private groups, iterate and
+ * nuke them all.
+ */
+ if (sd->flags & SD_OVERLAP) {
+ free_sched_groups(sd->groups, 1);
+ } else if (atomic_dec_and_test(&sd->groups->ref)) {
+ kfree(sd->groups->sgp);
kfree(sd->groups);
+ }
kfree(sd);
}
@@ -6945,6 +6966,7 @@ int sched_smt_power_savings = 0, sched_mc_power_savings = 0;
struct sd_data {
struct sched_domain **__percpu sd;
struct sched_group **__percpu sg;
+ struct sched_group_power **__percpu sgp;
};
struct s_data {
@@ -6964,15 +6986,73 @@ struct sched_domain_topology_level;
typedef struct sched_domain *(*sched_domain_init_f)(struct sched_domain_topology_level *tl, int cpu);
typedef const struct cpumask *(*sched_domain_mask_f)(int cpu);
+#define SDTL_OVERLAP 0x01
+
struct sched_domain_topology_level {
sched_domain_init_f init;
sched_domain_mask_f mask;
+ int flags;
struct sd_data data;
};
-/*
- * Assumes the sched_domain tree is fully constructed
- */
+static int
+build_overlap_sched_groups(struct sched_domain *sd, int cpu)
+{
+ struct sched_group *first = NULL, *last = NULL, *groups = NULL, *sg;
+ const struct cpumask *span = sched_domain_span(sd);
+ struct cpumask *covered = sched_domains_tmpmask;
+ struct sd_data *sdd = sd->private;
+ struct sched_domain *child;
+ int i;
+
+ cpumask_clear(covered);
+
+ for_each_cpu(i, span) {
+ struct cpumask *sg_span;
+
+ if (cpumask_test_cpu(i, covered))
+ continue;
+
+ sg = kzalloc_node(sizeof(struct sched_group) + cpumask_size(),
+ GFP_KERNEL, cpu_to_node(i));
+
+ if (!sg)
+ goto fail;
+
+ sg_span = sched_group_cpus(sg);
+
+ child = *per_cpu_ptr(sdd->sd, i);
+ if (child->child) {
+ child = child->child;
+ cpumask_copy(sg_span, sched_domain_span(child));
+ } else
+ cpumask_set_cpu(i, sg_span);
+
+ cpumask_or(covered, covered, sg_span);
+
+ sg->sgp = *per_cpu_ptr(sdd->sgp, cpumask_first(sg_span));
+ atomic_inc(&sg->sgp->ref);
+
+ if (cpumask_test_cpu(cpu, sg_span))
+ groups = sg;
+
+ if (!first)
+ first = sg;
+ if (last)
+ last->next = sg;
+ last = sg;
+ last->next = first;
+ }
+ sd->groups = groups;
+
+ return 0;
+
+fail:
+ free_sched_groups(first, 0);
+
+ return -ENOMEM;
+}
+
static int get_group(int cpu, struct sd_data *sdd, struct sched_group **sg)
{
struct sched_domain *sd = *per_cpu_ptr(sdd->sd, cpu);
@@ -6981,24 +7061,24 @@ static int get_group(int cpu, struct sd_data *sdd, struct sched_group **sg)
if (child)
cpu = cpumask_first(sched_domain_span(child));
- if (sg)
+ if (sg) {
*sg = *per_cpu_ptr(sdd->sg, cpu);
+ (*sg)->sgp = *per_cpu_ptr(sdd->sgp, cpu);
+ atomic_set(&(*sg)->sgp->ref, 1); /* for claim_allocations */
+ }
return cpu;
}
/*
- * build_sched_groups takes the cpumask we wish to span, and a pointer
- * to a function which identifies what group(along with sched group) a CPU
- * belongs to. The return value of group_fn must be a >= 0 and < nr_cpu_ids
- * (due to the fact that we keep track of groups covered with a struct cpumask).
- *
* build_sched_groups will build a circular linked list of the groups
* covered by the given span, and will set each group's ->cpumask correctly,
* and ->cpu_power to 0.
+ *
+ * Assumes the sched_domain tree is fully constructed
*/
-static void
-build_sched_groups(struct sched_domain *sd)
+static int
+build_sched_groups(struct sched_domain *sd, int cpu)
{
struct sched_group *first = NULL, *last = NULL;
struct sd_data *sdd = sd->private;
@@ -7006,6 +7086,12 @@ build_sched_groups(struct sched_domain *sd)
struct cpumask *covered;
int i;
+ get_group(cpu, sdd, &sd->groups);
+ atomic_inc(&sd->groups->ref);
+
+ if (cpu != cpumask_first(sched_domain_span(sd)))
+ return 0;
+
lockdep_assert_held(&sched_domains_mutex);
covered = sched_domains_tmpmask;
@@ -7020,7 +7106,7 @@ build_sched_groups(struct sched_domain *sd)
continue;
cpumask_clear(sched_group_cpus(sg));
- sg->cpu_power = 0;
+ sg->sgp->power = 0;
for_each_cpu(j, span) {
if (get_group(j, sdd, NULL) != group)
@@ -7037,6 +7123,8 @@ build_sched_groups(struct sched_domain *sd)
last = sg;
}
last->next = first;
+
+ return 0;
}
/*
@@ -7051,12 +7139,17 @@ build_sched_groups(struct sched_domain *sd)
*/
static void init_sched_groups_power(int cpu, struct sched_domain *sd)
{
- WARN_ON(!sd || !sd->groups);
+ struct sched_group *sg = sd->groups;
- if (cpu != group_first_cpu(sd->groups))
- return;
+ WARN_ON(!sd || !sg);
- sd->groups->group_weight = cpumask_weight(sched_group_cpus(sd->groups));
+ do {
+ sg->group_weight = cpumask_weight(sched_group_cpus(sg));
+ sg = sg->next;
+ } while (sg != sd->groups);
+
+ if (cpu != group_first_cpu(sg))
+ return;
update_group_power(sd, cpu);
}
@@ -7177,15 +7270,15 @@ static enum s_alloc __visit_domain_allocation_hell(struct s_data *d,
static void claim_allocations(int cpu, struct sched_domain *sd)
{
struct sd_data *sdd = sd->private;
- struct sched_group *sg = sd->groups;
WARN_ON_ONCE(*per_cpu_ptr(sdd->sd, cpu) != sd);
*per_cpu_ptr(sdd->sd, cpu) = NULL;
- if (cpu == cpumask_first(sched_group_cpus(sg))) {
- WARN_ON_ONCE(*per_cpu_ptr(sdd->sg, cpu) != sg);
+ if (atomic_read(&(*per_cpu_ptr(sdd->sg, cpu))->ref))
*per_cpu_ptr(sdd->sg, cpu) = NULL;
- }
+
+ if (atomic_read(&(*per_cpu_ptr(sdd->sgp, cpu))->ref))
+ *per_cpu_ptr(sdd->sgp, cpu) = NULL;
}
#ifdef CONFIG_SCHED_SMT
@@ -7210,7 +7303,7 @@ static struct sched_domain_topology_level default_topology[] = {
#endif
{ sd_init_CPU, cpu_cpu_mask, },
#ifdef CONFIG_NUMA
- { sd_init_NODE, cpu_node_mask, },
+ { sd_init_NODE, cpu_node_mask, SDTL_OVERLAP, },
{ sd_init_ALLNODES, cpu_allnodes_mask, },
#endif
{ NULL, },
@@ -7234,9 +7327,14 @@ static int __sdt_alloc(const struct cpumask *cpu_map)
if (!sdd->sg)
return -ENOMEM;
+ sdd->sgp = alloc_percpu(struct sched_group_power *);
+ if (!sdd->sgp)
+ return -ENOMEM;
+
for_each_cpu(j, cpu_map) {
struct sched_domain *sd;
struct sched_group *sg;
+ struct sched_group_power *sgp;
sd = kzalloc_node(sizeof(struct sched_domain) + cpumask_size(),
GFP_KERNEL, cpu_to_node(j));
@@ -7251,6 +7349,13 @@ static int __sdt_alloc(const struct cpumask *cpu_map)
return -ENOMEM;
*per_cpu_ptr(sdd->sg, j) = sg;
+
+ sgp = kzalloc_node(sizeof(struct sched_group_power),
+ GFP_KERNEL, cpu_to_node(j));
+ if (!sgp)
+ return -ENOMEM;
+
+ *per_cpu_ptr(sdd->sgp, j) = sgp;
}
}
@@ -7266,11 +7371,15 @@ static void __sdt_free(const struct cpumask *cpu_map)
struct sd_data *sdd = &tl->data;
for_each_cpu(j, cpu_map) {
- kfree(*per_cpu_ptr(sdd->sd, j));
+ struct sched_domain *sd = *per_cpu_ptr(sdd->sd, j);
+ if (sd && (sd->flags & SD_OVERLAP))
+ free_sched_groups(sd->groups, 0);
kfree(*per_cpu_ptr(sdd->sg, j));
+ kfree(*per_cpu_ptr(sdd->sgp, j));
}
free_percpu(sdd->sd);
free_percpu(sdd->sg);
+ free_percpu(sdd->sgp);
}
}
@@ -7316,8 +7425,13 @@ static int build_sched_domains(const struct cpumask *cpu_map,
struct sched_domain_topology_level *tl;
sd = NULL;
- for (tl = sched_domain_topology; tl->init; tl++)
+ for (tl = sched_domain_topology; tl->init; tl++) {
sd = build_sched_domain(tl, &d, cpu_map, attr, sd, i);
+ if (tl->flags & SDTL_OVERLAP || sched_feat(FORCE_SD_OVERLAP))
+ sd->flags |= SD_OVERLAP;
+ if (cpumask_equal(cpu_map, sched_domain_span(sd)))
+ break;
+ }
while (sd->child)
sd = sd->child;
@@ -7329,13 +7443,13 @@ static int build_sched_domains(const struct cpumask *cpu_map,
for_each_cpu(i, cpu_map) {
for (sd = *per_cpu_ptr(d.sd, i); sd; sd = sd->parent) {
sd->span_weight = cpumask_weight(sched_domain_span(sd));
- get_group(i, sd->private, &sd->groups);
- atomic_inc(&sd->groups->ref);
-
- if (i != cpumask_first(sched_domain_span(sd)))
- continue;
-
- build_sched_groups(sd);
+ if (sd->flags & SD_OVERLAP) {
+ if (build_overlap_sched_groups(sd, i))
+ goto error;
+ } else {
+ if (build_sched_groups(sd, i))
+ goto error;
+ }
}
}
@@ -7745,18 +7859,14 @@ int in_sched_functions(unsigned long addr)
&& addr < (unsigned long)__sched_text_end);
}
-static void init_cfs_rq(struct cfs_rq *cfs_rq, struct rq *rq)
+static void init_cfs_rq(struct cfs_rq *cfs_rq)
{
cfs_rq->tasks_timeline = RB_ROOT;
INIT_LIST_HEAD(&cfs_rq->tasks);
-#ifdef CONFIG_FAIR_GROUP_SCHED
- cfs_rq->rq = rq;
- /* allow initial update_cfs_load() to truncate */
-#ifdef CONFIG_SMP
- cfs_rq->load_stamp = 1;
-#endif
-#endif
cfs_rq->min_vruntime = (u64)(-(1LL << 20));
+#ifndef CONFIG_64BIT
+ cfs_rq->min_vruntime_copy = cfs_rq->min_vruntime;
+#endif
}
static void init_rt_rq(struct rt_rq *rt_rq, struct rq *rq)
@@ -7772,27 +7882,18 @@ static void init_rt_rq(struct rt_rq *rt_rq, struct rq *rq)
/* delimiter for bitsearch: */
__set_bit(MAX_RT_PRIO, array->bitmap);
-#if defined CONFIG_SMP || defined CONFIG_RT_GROUP_SCHED
+#if defined CONFIG_SMP
rt_rq->highest_prio.curr = MAX_RT_PRIO;
-#ifdef CONFIG_SMP
rt_rq->highest_prio.next = MAX_RT_PRIO;
-#endif
-#endif
-#ifdef CONFIG_SMP
rt_rq->rt_nr_migratory = 0;
rt_rq->overloaded = 0;
- plist_head_init_raw(&rt_rq->pushable_tasks, &rq->lock);
+ plist_head_init(&rt_rq->pushable_tasks);
#endif
rt_rq->rt_time = 0;
rt_rq->rt_throttled = 0;
rt_rq->rt_runtime = 0;
raw_spin_lock_init(&rt_rq->rt_runtime_lock);
-
-#ifdef CONFIG_RT_GROUP_SCHED
- rt_rq->rt_nr_boosted = 0;
- rt_rq->rq = rq;
-#endif
}
#ifdef CONFIG_FAIR_GROUP_SCHED
@@ -7801,11 +7902,17 @@ static void init_tg_cfs_entry(struct task_group *tg, struct cfs_rq *cfs_rq,
struct sched_entity *parent)
{
struct rq *rq = cpu_rq(cpu);
- tg->cfs_rq[cpu] = cfs_rq;
- init_cfs_rq(cfs_rq, rq);
+
cfs_rq->tg = tg;
+ cfs_rq->rq = rq;
+#ifdef CONFIG_SMP
+ /* allow initial update_cfs_load() to truncate */
+ cfs_rq->load_stamp = 1;
+#endif
+ tg->cfs_rq[cpu] = cfs_rq;
tg->se[cpu] = se;
+
/* se could be NULL for root_task_group */
if (!se)
return;
@@ -7828,12 +7935,14 @@ static void init_tg_rt_entry(struct task_group *tg, struct rt_rq *rt_rq,
{
struct rq *rq = cpu_rq(cpu);
- tg->rt_rq[cpu] = rt_rq;
- init_rt_rq(rt_rq, rq);
+ rt_rq->highest_prio.curr = MAX_RT_PRIO;
+ rt_rq->rt_nr_boosted = 0;
+ rt_rq->rq = rq;
rt_rq->tg = tg;
- rt_rq->rt_runtime = tg->rt_bandwidth.rt_runtime;
+ tg->rt_rq[cpu] = rt_rq;
tg->rt_se[cpu] = rt_se;
+
if (!rt_se)
return;
@@ -7915,7 +8024,7 @@ void __init sched_init(void)
rq->nr_running = 0;
rq->calc_load_active = 0;
rq->calc_load_update = jiffies + LOAD_FREQ;
- init_cfs_rq(&rq->cfs, rq);
+ init_cfs_rq(&rq->cfs);
init_rt_rq(&rq->rt, rq);
#ifdef CONFIG_FAIR_GROUP_SCHED
root_task_group.shares = root_task_group_load;
@@ -7986,7 +8095,7 @@ void __init sched_init(void)
#endif
#ifdef CONFIG_RT_MUTEXES
- plist_head_init_raw(&init_task.pi_waiters, &init_task.pi_lock);
+ plist_head_init(&init_task.pi_waiters);
#endif
/*
@@ -8029,7 +8138,7 @@ void __init sched_init(void)
scheduler_running = 1;
}
-#ifdef CONFIG_DEBUG_SPINLOCK_SLEEP
+#ifdef CONFIG_DEBUG_ATOMIC_SLEEP
static inline int preempt_count_equals(int preempt_offset)
{
int nested = (preempt_count() & ~PREEMPT_ACTIVE) + rcu_preempt_depth();
@@ -8039,7 +8148,6 @@ static inline int preempt_count_equals(int preempt_offset)
void __might_sleep(const char *file, int line, int preempt_offset)
{
-#ifdef in_atomic
static unsigned long prev_jiffy; /* ratelimiting */
if ((preempt_count_equals(preempt_offset) && !irqs_disabled()) ||
@@ -8061,7 +8169,6 @@ void __might_sleep(const char *file, int line, int preempt_offset)
if (irqs_disabled())
print_irqtrace_events(current);
dump_stack();
-#endif
}
EXPORT_SYMBOL(__might_sleep);
#endif
@@ -8220,6 +8327,7 @@ int alloc_fair_sched_group(struct task_group *tg, struct task_group *parent)
if (!se)
goto err_free_rq;
+ init_cfs_rq(cfs_rq);
init_tg_cfs_entry(tg, cfs_rq, se, i, parent->se[i]);
}
@@ -8247,7 +8355,7 @@ static inline void unregister_fair_sched_group(struct task_group *tg, int cpu)
list_del_leaf_cfs_rq(tg->cfs_rq[cpu]);
raw_spin_unlock_irqrestore(&rq->lock, flags);
}
-#else /* !CONFG_FAIR_GROUP_SCHED */
+#else /* !CONFIG_FAIR_GROUP_SCHED */
static inline void free_fair_sched_group(struct task_group *tg)
{
}
@@ -8268,7 +8376,8 @@ static void free_rt_sched_group(struct task_group *tg)
{
int i;
- destroy_rt_bandwidth(&tg->rt_bandwidth);
+ if (tg->rt_se)
+ destroy_rt_bandwidth(&tg->rt_bandwidth);
for_each_possible_cpu(i) {
if (tg->rt_rq)
@@ -8309,6 +8418,8 @@ int alloc_rt_sched_group(struct task_group *tg, struct task_group *parent)
if (!rt_se)
goto err_free_rq;
+ init_rt_rq(rt_rq, cpu_rq(i));
+ rt_rq->rt_runtime = tg->rt_bandwidth.rt_runtime;
init_tg_rt_entry(tg, rt_rq, rt_se, i, parent->rt_se[i]);
}
@@ -8450,10 +8561,7 @@ int sched_group_set_shares(struct task_group *tg, unsigned long shares)
if (!tg->se[0])
return -EINVAL;
- if (shares < MIN_SHARES)
- shares = MIN_SHARES;
- else if (shares > MAX_SHARES)
- shares = MAX_SHARES;
+ shares = clamp(shares, scale_load(MIN_SHARES), scale_load(MAX_SHARES));
mutex_lock(&shares_mutex);
if (tg->shares == shares)
diff --git a/kernel/sched_autogroup.h b/kernel/sched_autogroup.h
index 05577055cfca..c2f0e7248dca 100644
--- a/kernel/sched_autogroup.h
+++ b/kernel/sched_autogroup.h
@@ -13,6 +13,7 @@ struct autogroup {
int nice;
};
+static inline bool task_group_is_autogroup(struct task_group *tg);
static inline struct task_group *
autogroup_task_group(struct task_struct *p, struct task_group *tg);
diff --git a/kernel/sched_fair.c b/kernel/sched_fair.c
index 433491c2dc8f..bc8ee9993814 100644
--- a/kernel/sched_fair.c
+++ b/kernel/sched_fair.c
@@ -135,14 +135,6 @@ static inline struct cfs_rq *group_cfs_rq(struct sched_entity *grp)
return grp->my_q;
}
-/* Given a group's cfs_rq on one cpu, return its corresponding cfs_rq on
- * another cpu ('this_cpu')
- */
-static inline struct cfs_rq *cpu_cfs_rq(struct cfs_rq *cfs_rq, int this_cpu)
-{
- return cfs_rq->tg->cfs_rq[this_cpu];
-}
-
static inline void list_add_leaf_cfs_rq(struct cfs_rq *cfs_rq)
{
if (!cfs_rq->on_list) {
@@ -271,11 +263,6 @@ static inline struct cfs_rq *group_cfs_rq(struct sched_entity *grp)
return NULL;
}
-static inline struct cfs_rq *cpu_cfs_rq(struct cfs_rq *cfs_rq, int this_cpu)
-{
- return &cpu_rq(this_cpu)->cfs;
-}
-
static inline void list_add_leaf_cfs_rq(struct cfs_rq *cfs_rq)
{
}
@@ -334,11 +321,6 @@ static inline int entity_before(struct sched_entity *a,
return (s64)(a->vruntime - b->vruntime) < 0;
}
-static inline s64 entity_key(struct cfs_rq *cfs_rq, struct sched_entity *se)
-{
- return se->vruntime - cfs_rq->min_vruntime;
-}
-
static void update_min_vruntime(struct cfs_rq *cfs_rq)
{
u64 vruntime = cfs_rq->min_vruntime;
@@ -372,7 +354,6 @@ static void __enqueue_entity(struct cfs_rq *cfs_rq, struct sched_entity *se)
struct rb_node **link = &cfs_rq->tasks_timeline.rb_node;
struct rb_node *parent = NULL;
struct sched_entity *entry;
- s64 key = entity_key(cfs_rq, se);
int leftmost = 1;
/*
@@ -385,7 +366,7 @@ static void __enqueue_entity(struct cfs_rq *cfs_rq, struct sched_entity *se)
* We dont care about collisions. Nodes with
* the same key stay together.
*/
- if (key < entity_key(cfs_rq, entry)) {
+ if (entity_before(se, entry)) {
link = &parent->rb_left;
} else {
link = &parent->rb_right;
@@ -1336,7 +1317,7 @@ enqueue_task_fair(struct rq *rq, struct task_struct *p, int flags)
}
for_each_sched_entity(se) {
- struct cfs_rq *cfs_rq = cfs_rq_of(se);
+ cfs_rq = cfs_rq_of(se);
update_cfs_load(cfs_rq, 0);
update_cfs_shares(cfs_rq);
@@ -1370,13 +1351,16 @@ static void dequeue_task_fair(struct rq *rq, struct task_struct *p, int flags)
*/
if (task_sleep && parent_entity(se))
set_next_buddy(parent_entity(se));
+
+ /* avoid re-evaluating load for this entity */
+ se = parent_entity(se);
break;
}
flags |= DEQUEUE_SLEEP;
}
for_each_sched_entity(se) {
- struct cfs_rq *cfs_rq = cfs_rq_of(se);
+ cfs_rq = cfs_rq_of(se);
update_cfs_load(cfs_rq, 0);
update_cfs_shares(cfs_rq);
@@ -1481,7 +1465,6 @@ static int wake_affine(struct sched_domain *sd, struct task_struct *p, int sync)
* effect of the currently running task from the load
* of the current CPU:
*/
- rcu_read_lock();
if (sync) {
tg = task_group(current);
weight = current->se.load.weight;
@@ -1517,7 +1500,6 @@ static int wake_affine(struct sched_domain *sd, struct task_struct *p, int sync)
balanced = this_eff_load <= prev_eff_load;
} else
balanced = true;
- rcu_read_unlock();
/*
* If the currently running task will sleep within
@@ -1585,7 +1567,7 @@ find_idlest_group(struct sched_domain *sd, struct task_struct *p,
}
/* Adjust by relative CPU power of the group */
- avg_load = (avg_load * SCHED_POWER_SCALE) / group->cpu_power;
+ avg_load = (avg_load * SCHED_POWER_SCALE) / group->sgp->power;
if (local_group) {
this_load = avg_load;
@@ -1921,8 +1903,8 @@ static void check_preempt_wakeup(struct rq *rq, struct task_struct *p, int wake_
if (!sched_feat(WAKEUP_PREEMPT))
return;
- update_curr(cfs_rq);
find_matching_se(&se, &pse);
+ update_curr(cfs_rq_of(se));
BUG_ON(!pse);
if (wakeup_preempt_entity(se, pse) == 1) {
/*
@@ -2231,11 +2213,43 @@ static void update_shares(int cpu)
struct rq *rq = cpu_rq(cpu);
rcu_read_lock();
+ /*
+ * Iterates the task_group tree in a bottom up fashion, see
+ * list_add_leaf_cfs_rq() for details.
+ */
for_each_leaf_cfs_rq(rq, cfs_rq)
update_shares_cpu(cfs_rq->tg, cpu);
rcu_read_unlock();
}
+/*
+ * Compute the cpu's hierarchical load factor for each task group.
+ * This needs to be done in a top-down fashion because the load of a child
+ * group is a fraction of its parents load.
+ */
+static int tg_load_down(struct task_group *tg, void *data)
+{
+ unsigned long load;
+ long cpu = (long)data;
+
+ if (!tg->parent) {
+ load = cpu_rq(cpu)->load.weight;
+ } else {
+ load = tg->parent->cfs_rq[cpu]->h_load;
+ load *= tg->se[cpu]->load.weight;
+ load /= tg->parent->cfs_rq[cpu]->load.weight + 1;
+ }
+
+ tg->cfs_rq[cpu]->h_load = load;
+
+ return 0;
+}
+
+static void update_h_load(long cpu)
+{
+ walk_tg_tree(tg_load_down, tg_nop, (void *)cpu);
+}
+
static unsigned long
load_balance_fair(struct rq *this_rq, int this_cpu, struct rq *busiest,
unsigned long max_load_move,
@@ -2243,14 +2257,12 @@ load_balance_fair(struct rq *this_rq, int this_cpu, struct rq *busiest,
int *all_pinned)
{
long rem_load_move = max_load_move;
- int busiest_cpu = cpu_of(busiest);
- struct task_group *tg;
+ struct cfs_rq *busiest_cfs_rq;
rcu_read_lock();
- update_h_load(busiest_cpu);
+ update_h_load(cpu_of(busiest));
- list_for_each_entry_rcu(tg, &task_groups, list) {
- struct cfs_rq *busiest_cfs_rq = tg->cfs_rq[busiest_cpu];
+ for_each_leaf_cfs_rq(busiest, busiest_cfs_rq) {
unsigned long busiest_h_load = busiest_cfs_rq->h_load;
unsigned long busiest_weight = busiest_cfs_rq->load.weight;
u64 rem_load, moved_load;
@@ -2631,7 +2643,7 @@ static void update_cpu_power(struct sched_domain *sd, int cpu)
power >>= SCHED_POWER_SHIFT;
}
- sdg->cpu_power_orig = power;
+ sdg->sgp->power_orig = power;
if (sched_feat(ARCH_POWER))
power *= arch_scale_freq_power(sd, cpu);
@@ -2647,7 +2659,7 @@ static void update_cpu_power(struct sched_domain *sd, int cpu)
power = 1;
cpu_rq(cpu)->cpu_power = power;
- sdg->cpu_power = power;
+ sdg->sgp->power = power;
}
static void update_group_power(struct sched_domain *sd, int cpu)
@@ -2665,11 +2677,11 @@ static void update_group_power(struct sched_domain *sd, int cpu)
group = child->groups;
do {
- power += group->cpu_power;
+ power += group->sgp->power;
group = group->next;
} while (group != child->groups);
- sdg->cpu_power = power;
+ sdg->sgp->power = power;
}
/*
@@ -2691,7 +2703,7 @@ fix_small_capacity(struct sched_domain *sd, struct sched_group *group)
/*
* If ~90% of the cpu_power is still there, we're good.
*/
- if (group->cpu_power * 32 > group->cpu_power_orig * 29)
+ if (group->sgp->power * 32 > group->sgp->power_orig * 29)
return 1;
return 0;
@@ -2771,7 +2783,7 @@ static inline void update_sg_lb_stats(struct sched_domain *sd,
}
/* Adjust by relative CPU power of the group */
- sgs->avg_load = (sgs->group_load*SCHED_POWER_SCALE) / group->cpu_power;
+ sgs->avg_load = (sgs->group_load*SCHED_POWER_SCALE) / group->sgp->power;
/*
* Consider the group unbalanced when the imbalance is larger
@@ -2788,7 +2800,7 @@ static inline void update_sg_lb_stats(struct sched_domain *sd,
if ((max_cpu_load - min_cpu_load) >= avg_load_per_task && max_nr_running > 1)
sgs->group_imb = 1;
- sgs->group_capacity = DIV_ROUND_CLOSEST(group->cpu_power,
+ sgs->group_capacity = DIV_ROUND_CLOSEST(group->sgp->power,
SCHED_POWER_SCALE);
if (!sgs->group_capacity)
sgs->group_capacity = fix_small_capacity(sd, group);
@@ -2877,7 +2889,7 @@ static inline void update_sd_lb_stats(struct sched_domain *sd, int this_cpu,
return;
sds->total_load += sgs.group_load;
- sds->total_pwr += sg->cpu_power;
+ sds->total_pwr += sg->sgp->power;
/*
* In case the child domain prefers tasks go to siblings
@@ -2962,7 +2974,7 @@ static int check_asym_packing(struct sched_domain *sd,
if (this_cpu > busiest_cpu)
return 0;
- *imbalance = DIV_ROUND_CLOSEST(sds->max_load * sds->busiest->cpu_power,
+ *imbalance = DIV_ROUND_CLOSEST(sds->max_load * sds->busiest->sgp->power,
SCHED_POWER_SCALE);
return 1;
}
@@ -2993,7 +3005,7 @@ static inline void fix_small_imbalance(struct sd_lb_stats *sds,
scaled_busy_load_per_task = sds->busiest_load_per_task
* SCHED_POWER_SCALE;
- scaled_busy_load_per_task /= sds->busiest->cpu_power;
+ scaled_busy_load_per_task /= sds->busiest->sgp->power;
if (sds->max_load - sds->this_load + scaled_busy_load_per_task >=
(scaled_busy_load_per_task * imbn)) {
@@ -3007,28 +3019,28 @@ static inline void fix_small_imbalance(struct sd_lb_stats *sds,
* moving them.
*/
- pwr_now += sds->busiest->cpu_power *
+ pwr_now += sds->busiest->sgp->power *
min(sds->busiest_load_per_task, sds->max_load);
- pwr_now += sds->this->cpu_power *
+ pwr_now += sds->this->sgp->power *
min(sds->this_load_per_task, sds->this_load);
pwr_now /= SCHED_POWER_SCALE;
/* Amount of load we'd subtract */
tmp = (sds->busiest_load_per_task * SCHED_POWER_SCALE) /
- sds->busiest->cpu_power;
+ sds->busiest->sgp->power;
if (sds->max_load > tmp)
- pwr_move += sds->busiest->cpu_power *
+ pwr_move += sds->busiest->sgp->power *
min(sds->busiest_load_per_task, sds->max_load - tmp);
/* Amount of load we'd add */
- if (sds->max_load * sds->busiest->cpu_power <
+ if (sds->max_load * sds->busiest->sgp->power <
sds->busiest_load_per_task * SCHED_POWER_SCALE)
- tmp = (sds->max_load * sds->busiest->cpu_power) /
- sds->this->cpu_power;
+ tmp = (sds->max_load * sds->busiest->sgp->power) /
+ sds->this->sgp->power;
else
tmp = (sds->busiest_load_per_task * SCHED_POWER_SCALE) /
- sds->this->cpu_power;
- pwr_move += sds->this->cpu_power *
+ sds->this->sgp->power;
+ pwr_move += sds->this->sgp->power *
min(sds->this_load_per_task, sds->this_load + tmp);
pwr_move /= SCHED_POWER_SCALE;
@@ -3074,7 +3086,7 @@ static inline void calculate_imbalance(struct sd_lb_stats *sds, int this_cpu,
load_above_capacity *= (SCHED_LOAD_SCALE * SCHED_POWER_SCALE);
- load_above_capacity /= sds->busiest->cpu_power;
+ load_above_capacity /= sds->busiest->sgp->power;
}
/*
@@ -3090,8 +3102,8 @@ static inline void calculate_imbalance(struct sd_lb_stats *sds, int this_cpu,
max_pull = min(sds->max_load - sds->avg_load, load_above_capacity);
/* How much load to actually move to equalise the imbalance */
- *imbalance = min(max_pull * sds->busiest->cpu_power,
- (sds->avg_load - sds->this_load) * sds->this->cpu_power)
+ *imbalance = min(max_pull * sds->busiest->sgp->power,
+ (sds->avg_load - sds->this_load) * sds->this->sgp->power)
/ SCHED_POWER_SCALE;
/*
diff --git a/kernel/sched_features.h b/kernel/sched_features.h
index be40f7371ee1..1e7066d76c26 100644
--- a/kernel/sched_features.h
+++ b/kernel/sched_features.h
@@ -70,3 +70,5 @@ SCHED_FEAT(NONIRQ_POWER, 1)
* using the scheduler IPI. Reduces rq->lock contention/bounces.
*/
SCHED_FEAT(TTWU_QUEUE, 1)
+
+SCHED_FEAT(FORCE_SD_OVERLAP, 0)
diff --git a/kernel/sched_rt.c b/kernel/sched_rt.c
index 10d018212bab..97540f0c9e47 100644
--- a/kernel/sched_rt.c
+++ b/kernel/sched_rt.c
@@ -185,11 +185,23 @@ static inline u64 sched_rt_period(struct rt_rq *rt_rq)
typedef struct task_group *rt_rq_iter_t;
-#define for_each_rt_rq(rt_rq, iter, rq) \
- for (iter = list_entry_rcu(task_groups.next, typeof(*iter), list); \
- (&iter->list != &task_groups) && \
- (rt_rq = iter->rt_rq[cpu_of(rq)]); \
- iter = list_entry_rcu(iter->list.next, typeof(*iter), list))
+static inline struct task_group *next_task_group(struct task_group *tg)
+{
+ do {
+ tg = list_entry_rcu(tg->list.next,
+ typeof(struct task_group), list);
+ } while (&tg->list != &task_groups && task_group_is_autogroup(tg));
+
+ if (&tg->list == &task_groups)
+ tg = NULL;
+
+ return tg;
+}
+
+#define for_each_rt_rq(rt_rq, iter, rq) \
+ for (iter = container_of(&task_groups, typeof(*iter), list); \
+ (iter = next_task_group(iter)) && \
+ (rt_rq = iter->rt_rq[cpu_of(rq)]);)
static inline void list_add_leaf_rt_rq(struct rt_rq *rt_rq)
{
@@ -1126,7 +1138,7 @@ static struct task_struct *_pick_next_task_rt(struct rq *rq)
rt_rq = &rq->rt;
- if (unlikely(!rt_rq->rt_nr_running))
+ if (!rt_rq->rt_nr_running)
return NULL;
if (rt_rq_throttled(rt_rq))
@@ -1548,7 +1560,7 @@ skip:
static void pre_schedule_rt(struct rq *rq, struct task_struct *prev)
{
/* Try to pull RT tasks here if we lower this rq's prio */
- if (unlikely(rt_task(prev)) && rq->rt.highest_prio.curr > prev->prio)
+ if (rq->rt.highest_prio.curr > prev->prio)
pull_rt_task(rq);
}
diff --git a/kernel/signal.c b/kernel/signal.c
index ff7678603328..d7f70aed1cc0 100644
--- a/kernel/signal.c
+++ b/kernel/signal.c
@@ -87,7 +87,7 @@ static int sig_ignored(struct task_struct *t, int sig, int from_ancestor_ns)
/*
* Tracers may want to know about even ignored signals.
*/
- return !tracehook_consider_ignored_signal(t, sig);
+ return !t->ptrace;
}
/*
@@ -124,7 +124,7 @@ static inline int has_pending_signals(sigset_t *signal, sigset_t *blocked)
static int recalc_sigpending_tsk(struct task_struct *t)
{
- if ((t->group_stop & GROUP_STOP_PENDING) ||
+ if ((t->jobctl & JOBCTL_PENDING_MASK) ||
PENDING(&t->pending, &t->blocked) ||
PENDING(&t->signal->shared_pending, &t->blocked)) {
set_tsk_thread_flag(t, TIF_SIGPENDING);
@@ -150,9 +150,7 @@ void recalc_sigpending_and_wake(struct task_struct *t)
void recalc_sigpending(void)
{
- if (unlikely(tracehook_force_sigpending()))
- set_thread_flag(TIF_SIGPENDING);
- else if (!recalc_sigpending_tsk(current) && !freezing(current))
+ if (!recalc_sigpending_tsk(current) && !freezing(current))
clear_thread_flag(TIF_SIGPENDING);
}
@@ -224,47 +222,93 @@ static inline void print_dropped_signal(int sig)
}
/**
- * task_clear_group_stop_trapping - clear group stop trapping bit
+ * task_set_jobctl_pending - set jobctl pending bits
* @task: target task
+ * @mask: pending bits to set
*
- * If GROUP_STOP_TRAPPING is set, a ptracer is waiting for us. Clear it
- * and wake up the ptracer. Note that we don't need any further locking.
- * @task->siglock guarantees that @task->parent points to the ptracer.
+ * Clear @mask from @task->jobctl. @mask must be subset of
+ * %JOBCTL_PENDING_MASK | %JOBCTL_STOP_CONSUME | %JOBCTL_STOP_SIGMASK |
+ * %JOBCTL_TRAPPING. If stop signo is being set, the existing signo is
+ * cleared. If @task is already being killed or exiting, this function
+ * becomes noop.
+ *
+ * CONTEXT:
+ * Must be called with @task->sighand->siglock held.
+ *
+ * RETURNS:
+ * %true if @mask is set, %false if made noop because @task was dying.
+ */
+bool task_set_jobctl_pending(struct task_struct *task, unsigned int mask)
+{
+ BUG_ON(mask & ~(JOBCTL_PENDING_MASK | JOBCTL_STOP_CONSUME |
+ JOBCTL_STOP_SIGMASK | JOBCTL_TRAPPING));
+ BUG_ON((mask & JOBCTL_TRAPPING) && !(mask & JOBCTL_PENDING_MASK));
+
+ if (unlikely(fatal_signal_pending(task) || (task->flags & PF_EXITING)))
+ return false;
+
+ if (mask & JOBCTL_STOP_SIGMASK)
+ task->jobctl &= ~JOBCTL_STOP_SIGMASK;
+
+ task->jobctl |= mask;
+ return true;
+}
+
+/**
+ * task_clear_jobctl_trapping - clear jobctl trapping bit
+ * @task: target task
+ *
+ * If JOBCTL_TRAPPING is set, a ptracer is waiting for us to enter TRACED.
+ * Clear it and wake up the ptracer. Note that we don't need any further
+ * locking. @task->siglock guarantees that @task->parent points to the
+ * ptracer.
*
* CONTEXT:
* Must be called with @task->sighand->siglock held.
*/
-static void task_clear_group_stop_trapping(struct task_struct *task)
+void task_clear_jobctl_trapping(struct task_struct *task)
{
- if (unlikely(task->group_stop & GROUP_STOP_TRAPPING)) {
- task->group_stop &= ~GROUP_STOP_TRAPPING;
- __wake_up_sync_key(&task->parent->signal->wait_chldexit,
- TASK_UNINTERRUPTIBLE, 1, task);
+ if (unlikely(task->jobctl & JOBCTL_TRAPPING)) {
+ task->jobctl &= ~JOBCTL_TRAPPING;
+ wake_up_bit(&task->jobctl, JOBCTL_TRAPPING_BIT);
}
}
/**
- * task_clear_group_stop_pending - clear pending group stop
+ * task_clear_jobctl_pending - clear jobctl pending bits
* @task: target task
+ * @mask: pending bits to clear
*
- * Clear group stop states for @task.
+ * Clear @mask from @task->jobctl. @mask must be subset of
+ * %JOBCTL_PENDING_MASK. If %JOBCTL_STOP_PENDING is being cleared, other
+ * STOP bits are cleared together.
+ *
+ * If clearing of @mask leaves no stop or trap pending, this function calls
+ * task_clear_jobctl_trapping().
*
* CONTEXT:
* Must be called with @task->sighand->siglock held.
*/
-void task_clear_group_stop_pending(struct task_struct *task)
+void task_clear_jobctl_pending(struct task_struct *task, unsigned int mask)
{
- task->group_stop &= ~(GROUP_STOP_PENDING | GROUP_STOP_CONSUME |
- GROUP_STOP_DEQUEUED);
+ BUG_ON(mask & ~JOBCTL_PENDING_MASK);
+
+ if (mask & JOBCTL_STOP_PENDING)
+ mask |= JOBCTL_STOP_CONSUME | JOBCTL_STOP_DEQUEUED;
+
+ task->jobctl &= ~mask;
+
+ if (!(task->jobctl & JOBCTL_PENDING_MASK))
+ task_clear_jobctl_trapping(task);
}
/**
* task_participate_group_stop - participate in a group stop
* @task: task participating in a group stop
*
- * @task has GROUP_STOP_PENDING set and is participating in a group stop.
+ * @task has %JOBCTL_STOP_PENDING set and is participating in a group stop.
* Group stop states are cleared and the group stop count is consumed if
- * %GROUP_STOP_CONSUME was set. If the consumption completes the group
+ * %JOBCTL_STOP_CONSUME was set. If the consumption completes the group
* stop, the appropriate %SIGNAL_* flags are set.
*
* CONTEXT:
@@ -277,11 +321,11 @@ void task_clear_group_stop_pending(struct task_struct *task)
static bool task_participate_group_stop(struct task_struct *task)
{
struct signal_struct *sig = task->signal;
- bool consume = task->group_stop & GROUP_STOP_CONSUME;
+ bool consume = task->jobctl & JOBCTL_STOP_CONSUME;
- WARN_ON_ONCE(!(task->group_stop & GROUP_STOP_PENDING));
+ WARN_ON_ONCE(!(task->jobctl & JOBCTL_STOP_PENDING));
- task_clear_group_stop_pending(task);
+ task_clear_jobctl_pending(task, JOBCTL_STOP_PENDING);
if (!consume)
return false;
@@ -449,7 +493,8 @@ int unhandled_signal(struct task_struct *tsk, int sig)
return 1;
if (handler != SIG_IGN && handler != SIG_DFL)
return 0;
- return !tracehook_consider_fatal_signal(tsk, sig);
+ /* if ptraced, let the tracer determine */
+ return !tsk->ptrace;
}
/*
@@ -604,7 +649,7 @@ int dequeue_signal(struct task_struct *tsk, sigset_t *mask, siginfo_t *info)
* is to alert stop-signal processing code when another
* processor has come along and cleared the flag.
*/
- current->group_stop |= GROUP_STOP_DEQUEUED;
+ current->jobctl |= JOBCTL_STOP_DEQUEUED;
}
if ((info->si_code & __SI_MASK) == __SI_TIMER && info->si_sys_private) {
/*
@@ -773,6 +818,32 @@ static int check_kill_permission(int sig, struct siginfo *info,
return security_task_kill(t, info, sig, 0);
}
+/**
+ * ptrace_trap_notify - schedule trap to notify ptracer
+ * @t: tracee wanting to notify tracer
+ *
+ * This function schedules sticky ptrace trap which is cleared on the next
+ * TRAP_STOP to notify ptracer of an event. @t must have been seized by
+ * ptracer.
+ *
+ * If @t is running, STOP trap will be taken. If trapped for STOP and
+ * ptracer is listening for events, tracee is woken up so that it can
+ * re-trap for the new event. If trapped otherwise, STOP trap will be
+ * eventually taken without returning to userland after the existing traps
+ * are finished by PTRACE_CONT.
+ *
+ * CONTEXT:
+ * Must be called with @task->sighand->siglock held.
+ */
+static void ptrace_trap_notify(struct task_struct *t)
+{
+ WARN_ON_ONCE(!(t->ptrace & PT_SEIZED));
+ assert_spin_locked(&t->sighand->siglock);
+
+ task_set_jobctl_pending(t, JOBCTL_TRAP_NOTIFY);
+ signal_wake_up(t, t->jobctl & JOBCTL_LISTENING);
+}
+
/*
* Handle magic process-wide effects of stop/continue signals. Unlike
* the signal actions, these happen immediately at signal-generation
@@ -809,9 +880,12 @@ static int prepare_signal(int sig, struct task_struct *p, int from_ancestor_ns)
rm_from_queue(SIG_KERNEL_STOP_MASK, &signal->shared_pending);
t = p;
do {
- task_clear_group_stop_pending(t);
+ task_clear_jobctl_pending(t, JOBCTL_STOP_PENDING);
rm_from_queue(SIG_KERNEL_STOP_MASK, &t->pending);
- wake_up_state(t, __TASK_STOPPED);
+ if (likely(!(t->ptrace & PT_SEIZED)))
+ wake_up_state(t, __TASK_STOPPED);
+ else
+ ptrace_trap_notify(t);
} while_each_thread(p, t);
/*
@@ -908,8 +982,7 @@ static void complete_signal(int sig, struct task_struct *p, int group)
if (sig_fatal(p, sig) &&
!(signal->flags & (SIGNAL_UNKILLABLE | SIGNAL_GROUP_EXIT)) &&
!sigismember(&t->real_blocked, sig) &&
- (sig == SIGKILL ||
- !tracehook_consider_fatal_signal(t, sig))) {
+ (sig == SIGKILL || !t->ptrace)) {
/*
* This signal will be fatal to the whole group.
*/
@@ -925,7 +998,7 @@ static void complete_signal(int sig, struct task_struct *p, int group)
signal->group_stop_count = 0;
t = p;
do {
- task_clear_group_stop_pending(t);
+ task_clear_jobctl_pending(t, JOBCTL_PENDING_MASK);
sigaddset(&t->pending.signal, SIGKILL);
signal_wake_up(t, 1);
} while_each_thread(p, t);
@@ -1160,7 +1233,7 @@ int zap_other_threads(struct task_struct *p)
p->signal->group_stop_count = 0;
while_each_thread(p, t) {
- task_clear_group_stop_pending(t);
+ task_clear_jobctl_pending(t, JOBCTL_PENDING_MASK);
count++;
/* Don't bother with already dead threads */
@@ -1178,18 +1251,25 @@ struct sighand_struct *__lock_task_sighand(struct task_struct *tsk,
{
struct sighand_struct *sighand;
- rcu_read_lock();
for (;;) {
+ local_irq_save(*flags);
+ rcu_read_lock();
sighand = rcu_dereference(tsk->sighand);
- if (unlikely(sighand == NULL))
+ if (unlikely(sighand == NULL)) {
+ rcu_read_unlock();
+ local_irq_restore(*flags);
break;
+ }
- spin_lock_irqsave(&sighand->siglock, *flags);
- if (likely(sighand == tsk->sighand))
+ spin_lock(&sighand->siglock);
+ if (likely(sighand == tsk->sighand)) {
+ rcu_read_unlock();
break;
- spin_unlock_irqrestore(&sighand->siglock, *flags);
+ }
+ spin_unlock(&sighand->siglock);
+ rcu_read_unlock();
+ local_irq_restore(*flags);
}
- rcu_read_unlock();
return sighand;
}
@@ -1504,22 +1584,22 @@ ret:
* Let a parent know about the death of a child.
* For a stopped/continued status change, use do_notify_parent_cldstop instead.
*
- * Returns -1 if our parent ignored us and so we've switched to
- * self-reaping, or else @sig.
+ * Returns true if our parent ignored us and so we've switched to
+ * self-reaping.
*/
-int do_notify_parent(struct task_struct *tsk, int sig)
+bool do_notify_parent(struct task_struct *tsk, int sig)
{
struct siginfo info;
unsigned long flags;
struct sighand_struct *psig;
- int ret = sig;
+ bool autoreap = false;
BUG_ON(sig == -1);
/* do_notify_parent_cldstop should have been called instead. */
BUG_ON(task_is_stopped_or_traced(tsk));
- BUG_ON(!task_ptrace(tsk) &&
+ BUG_ON(!tsk->ptrace &&
(tsk->group_leader != tsk || !thread_group_empty(tsk)));
info.si_signo = sig;
@@ -1558,7 +1638,7 @@ int do_notify_parent(struct task_struct *tsk, int sig)
psig = tsk->parent->sighand;
spin_lock_irqsave(&psig->siglock, flags);
- if (!task_ptrace(tsk) && sig == SIGCHLD &&
+ if (!tsk->ptrace && sig == SIGCHLD &&
(psig->action[SIGCHLD-1].sa.sa_handler == SIG_IGN ||
(psig->action[SIGCHLD-1].sa.sa_flags & SA_NOCLDWAIT))) {
/*
@@ -1576,16 +1656,16 @@ int do_notify_parent(struct task_struct *tsk, int sig)
* is implementation-defined: we do (if you don't want
* it, just use SIG_IGN instead).
*/
- ret = tsk->exit_signal = -1;
+ autoreap = true;
if (psig->action[SIGCHLD-1].sa.sa_handler == SIG_IGN)
- sig = -1;
+ sig = 0;
}
- if (valid_signal(sig) && sig > 0)
+ if (valid_signal(sig) && sig)
__group_send_sig_info(sig, &info, tsk->parent);
__wake_up_parent(tsk, tsk->parent);
spin_unlock_irqrestore(&psig->siglock, flags);
- return ret;
+ return autoreap;
}
/**
@@ -1658,7 +1738,7 @@ static void do_notify_parent_cldstop(struct task_struct *tsk,
static inline int may_ptrace_stop(void)
{
- if (!likely(task_ptrace(current)))
+ if (!likely(current->ptrace))
return 0;
/*
* Are we in the middle of do_coredump?
@@ -1687,15 +1767,6 @@ static int sigkill_pending(struct task_struct *tsk)
}
/*
- * Test whether the target task of the usual cldstop notification - the
- * real_parent of @child - is in the same group as the ptracer.
- */
-static bool real_parent_is_ptracer(struct task_struct *child)
-{
- return same_thread_group(child->parent, child->real_parent);
-}
-
-/*
* This must be called with current->sighand->siglock held.
*
* This should be the path for all ptrace stops.
@@ -1732,31 +1803,34 @@ static void ptrace_stop(int exit_code, int why, int clear_code, siginfo_t *info)
}
/*
- * If @why is CLD_STOPPED, we're trapping to participate in a group
- * stop. Do the bookkeeping. Note that if SIGCONT was delievered
- * while siglock was released for the arch hook, PENDING could be
- * clear now. We act as if SIGCONT is received after TASK_TRACED
- * is entered - ignore it.
+ * We're committing to trapping. TRACED should be visible before
+ * TRAPPING is cleared; otherwise, the tracer might fail do_wait().
+ * Also, transition to TRACED and updates to ->jobctl should be
+ * atomic with respect to siglock and should be done after the arch
+ * hook as siglock is released and regrabbed across it.
*/
- if (why == CLD_STOPPED && (current->group_stop & GROUP_STOP_PENDING))
- gstop_done = task_participate_group_stop(current);
+ set_current_state(TASK_TRACED);
current->last_siginfo = info;
current->exit_code = exit_code;
/*
- * TRACED should be visible before TRAPPING is cleared; otherwise,
- * the tracer might fail do_wait().
+ * If @why is CLD_STOPPED, we're trapping to participate in a group
+ * stop. Do the bookkeeping. Note that if SIGCONT was delievered
+ * across siglock relocks since INTERRUPT was scheduled, PENDING
+ * could be clear now. We act as if SIGCONT is received after
+ * TASK_TRACED is entered - ignore it.
*/
- set_current_state(TASK_TRACED);
+ if (why == CLD_STOPPED && (current->jobctl & JOBCTL_STOP_PENDING))
+ gstop_done = task_participate_group_stop(current);
- /*
- * We're committing to trapping. Clearing GROUP_STOP_TRAPPING and
- * transition to TASK_TRACED should be atomic with respect to
- * siglock. This hsould be done after the arch hook as siglock is
- * released and regrabbed across it.
- */
- task_clear_group_stop_trapping(current);
+ /* any trap clears pending STOP trap, STOP trap clears NOTIFY */
+ task_clear_jobctl_pending(current, JOBCTL_TRAP_STOP);
+ if (info && info->si_code >> 8 == PTRACE_EVENT_STOP)
+ task_clear_jobctl_pending(current, JOBCTL_TRAP_NOTIFY);
+
+ /* entering a trap, clear TRAPPING */
+ task_clear_jobctl_trapping(current);
spin_unlock_irq(&current->sighand->siglock);
read_lock(&tasklist_lock);
@@ -1772,7 +1846,7 @@ static void ptrace_stop(int exit_code, int why, int clear_code, siginfo_t *info)
* separately unless they're gonna be duplicates.
*/
do_notify_parent_cldstop(current, true, why);
- if (gstop_done && !real_parent_is_ptracer(current))
+ if (gstop_done && ptrace_reparented(current))
do_notify_parent_cldstop(current, false, why);
/*
@@ -1792,9 +1866,9 @@ static void ptrace_stop(int exit_code, int why, int clear_code, siginfo_t *info)
*
* If @gstop_done, the ptracer went away between group stop
* completion and here. During detach, it would have set
- * GROUP_STOP_PENDING on us and we'll re-enter TASK_STOPPED
- * in do_signal_stop() on return, so notifying the real
- * parent of the group stop completion is enough.
+ * JOBCTL_STOP_PENDING on us and we'll re-enter
+ * TASK_STOPPED in do_signal_stop() on return, so notifying
+ * the real parent of the group stop completion is enough.
*/
if (gstop_done)
do_notify_parent_cldstop(current, false, why);
@@ -1820,6 +1894,9 @@ static void ptrace_stop(int exit_code, int why, int clear_code, siginfo_t *info)
spin_lock_irq(&current->sighand->siglock);
current->last_siginfo = NULL;
+ /* LISTENING can be set only during STOP traps, clear it */
+ current->jobctl &= ~JOBCTL_LISTENING;
+
/*
* Queued signals ignored us while we were stopped for tracing.
* So check for any that we should take before resuming user mode.
@@ -1828,44 +1905,66 @@ static void ptrace_stop(int exit_code, int why, int clear_code, siginfo_t *info)
recalc_sigpending_tsk(current);
}
-void ptrace_notify(int exit_code)
+static void ptrace_do_notify(int signr, int exit_code, int why)
{
siginfo_t info;
- BUG_ON((exit_code & (0x7f | ~0xffff)) != SIGTRAP);
-
memset(&info, 0, sizeof info);
- info.si_signo = SIGTRAP;
+ info.si_signo = signr;
info.si_code = exit_code;
info.si_pid = task_pid_vnr(current);
info.si_uid = current_uid();
/* Let the debugger run. */
+ ptrace_stop(exit_code, why, 1, &info);
+}
+
+void ptrace_notify(int exit_code)
+{
+ BUG_ON((exit_code & (0x7f | ~0xffff)) != SIGTRAP);
+
spin_lock_irq(&current->sighand->siglock);
- ptrace_stop(exit_code, CLD_TRAPPED, 1, &info);
+ ptrace_do_notify(SIGTRAP, exit_code, CLD_TRAPPED);
spin_unlock_irq(&current->sighand->siglock);
}
-/*
- * This performs the stopping for SIGSTOP and other stop signals.
- * We have to stop all threads in the thread group.
- * Returns non-zero if we've actually stopped and released the siglock.
- * Returns zero if we didn't stop and still hold the siglock.
+/**
+ * do_signal_stop - handle group stop for SIGSTOP and other stop signals
+ * @signr: signr causing group stop if initiating
+ *
+ * If %JOBCTL_STOP_PENDING is not set yet, initiate group stop with @signr
+ * and participate in it. If already set, participate in the existing
+ * group stop. If participated in a group stop (and thus slept), %true is
+ * returned with siglock released.
+ *
+ * If ptraced, this function doesn't handle stop itself. Instead,
+ * %JOBCTL_TRAP_STOP is scheduled and %false is returned with siglock
+ * untouched. The caller must ensure that INTERRUPT trap handling takes
+ * places afterwards.
+ *
+ * CONTEXT:
+ * Must be called with @current->sighand->siglock held, which is released
+ * on %true return.
+ *
+ * RETURNS:
+ * %false if group stop is already cancelled or ptrace trap is scheduled.
+ * %true if participated in group stop.
*/
-static int do_signal_stop(int signr)
+static bool do_signal_stop(int signr)
+ __releases(&current->sighand->siglock)
{
struct signal_struct *sig = current->signal;
- if (!(current->group_stop & GROUP_STOP_PENDING)) {
- unsigned int gstop = GROUP_STOP_PENDING | GROUP_STOP_CONSUME;
+ if (!(current->jobctl & JOBCTL_STOP_PENDING)) {
+ unsigned int gstop = JOBCTL_STOP_PENDING | JOBCTL_STOP_CONSUME;
struct task_struct *t;
- /* signr will be recorded in task->group_stop for retries */
- WARN_ON_ONCE(signr & ~GROUP_STOP_SIGMASK);
+ /* signr will be recorded in task->jobctl for retries */
+ WARN_ON_ONCE(signr & ~JOBCTL_STOP_SIGMASK);
- if (!likely(current->group_stop & GROUP_STOP_DEQUEUED) ||
+ if (!likely(current->jobctl & JOBCTL_STOP_DEQUEUED) ||
unlikely(signal_group_exit(sig)))
- return 0;
+ return false;
/*
* There is no group stop already in progress. We must
* initiate one now.
@@ -1888,28 +1987,32 @@ static int do_signal_stop(int signr)
if (!(sig->flags & SIGNAL_STOP_STOPPED))
sig->group_exit_code = signr;
else
- WARN_ON_ONCE(!task_ptrace(current));
+ WARN_ON_ONCE(!current->ptrace);
+
+ sig->group_stop_count = 0;
+
+ if (task_set_jobctl_pending(current, signr | gstop))
+ sig->group_stop_count++;
- current->group_stop &= ~GROUP_STOP_SIGMASK;
- current->group_stop |= signr | gstop;
- sig->group_stop_count = 1;
for (t = next_thread(current); t != current;
t = next_thread(t)) {
- t->group_stop &= ~GROUP_STOP_SIGMASK;
/*
* Setting state to TASK_STOPPED for a group
* stop is always done with the siglock held,
* so this check has no races.
*/
- if (!(t->flags & PF_EXITING) && !task_is_stopped(t)) {
- t->group_stop |= signr | gstop;
+ if (!task_is_stopped(t) &&
+ task_set_jobctl_pending(t, signr | gstop)) {
sig->group_stop_count++;
- signal_wake_up(t, 0);
+ if (likely(!(t->ptrace & PT_SEIZED)))
+ signal_wake_up(t, 0);
+ else
+ ptrace_trap_notify(t);
}
}
}
-retry:
- if (likely(!task_ptrace(current))) {
+
+ if (likely(!current->ptrace)) {
int notify = 0;
/*
@@ -1940,43 +2043,65 @@ retry:
/* Now we don't run again until woken by SIGCONT or SIGKILL */
schedule();
-
- spin_lock_irq(&current->sighand->siglock);
+ return true;
} else {
- ptrace_stop(current->group_stop & GROUP_STOP_SIGMASK,
- CLD_STOPPED, 0, NULL);
- current->exit_code = 0;
+ /*
+ * While ptraced, group stop is handled by STOP trap.
+ * Schedule it and let the caller deal with it.
+ */
+ task_set_jobctl_pending(current, JOBCTL_TRAP_STOP);
+ return false;
}
+}
- /*
- * GROUP_STOP_PENDING could be set if another group stop has
- * started since being woken up or ptrace wants us to transit
- * between TASK_STOPPED and TRACED. Retry group stop.
- */
- if (current->group_stop & GROUP_STOP_PENDING) {
- WARN_ON_ONCE(!(current->group_stop & GROUP_STOP_SIGMASK));
- goto retry;
+/**
+ * do_jobctl_trap - take care of ptrace jobctl traps
+ *
+ * When PT_SEIZED, it's used for both group stop and explicit
+ * SEIZE/INTERRUPT traps. Both generate PTRACE_EVENT_STOP trap with
+ * accompanying siginfo. If stopped, lower eight bits of exit_code contain
+ * the stop signal; otherwise, %SIGTRAP.
+ *
+ * When !PT_SEIZED, it's used only for group stop trap with stop signal
+ * number as exit_code and no siginfo.
+ *
+ * CONTEXT:
+ * Must be called with @current->sighand->siglock held, which may be
+ * released and re-acquired before returning with intervening sleep.
+ */
+static void do_jobctl_trap(void)
+{
+ struct signal_struct *signal = current->signal;
+ int signr = current->jobctl & JOBCTL_STOP_SIGMASK;
+
+ if (current->ptrace & PT_SEIZED) {
+ if (!signal->group_stop_count &&
+ !(signal->flags & SIGNAL_STOP_STOPPED))
+ signr = SIGTRAP;
+ WARN_ON_ONCE(!signr);
+ ptrace_do_notify(signr, signr | (PTRACE_EVENT_STOP << 8),
+ CLD_STOPPED);
+ } else {
+ WARN_ON_ONCE(!signr);
+ ptrace_stop(signr, CLD_STOPPED, 0, NULL);
+ current->exit_code = 0;
}
-
- /* PTRACE_ATTACH might have raced with task killing, clear trapping */
- task_clear_group_stop_trapping(current);
-
- spin_unlock_irq(&current->sighand->siglock);
-
- tracehook_finish_jctl();
-
- return 1;
}
static int ptrace_signal(int signr, siginfo_t *info,
struct pt_regs *regs, void *cookie)
{
- if (!task_ptrace(current))
- return signr;
-
ptrace_signal_deliver(regs, cookie);
-
- /* Let the debugger run. */
+ /*
+ * We do not check sig_kernel_stop(signr) but set this marker
+ * unconditionally because we do not know whether debugger will
+ * change signr. This flag has no meaning unless we are going
+ * to stop after return from ptrace_stop(). In this case it will
+ * be checked in do_signal_stop(), we should only stop if it was
+ * not cleared by SIGCONT while we were sleeping. See also the
+ * comment in dequeue_signal().
+ */
+ current->jobctl |= JOBCTL_STOP_DEQUEUED;
ptrace_stop(signr, CLD_TRAPPED, 0, info);
/* We're back. Did the debugger cancel the sig? */
@@ -2032,7 +2157,6 @@ relock:
* the CLD_ si_code into SIGNAL_CLD_MASK bits.
*/
if (unlikely(signal->flags & SIGNAL_CLD_MASK)) {
- struct task_struct *leader;
int why;
if (signal->flags & SIGNAL_CLD_CONTINUED)
@@ -2053,13 +2177,11 @@ relock:
* a duplicate.
*/
read_lock(&tasklist_lock);
-
do_notify_parent_cldstop(current, false, why);
- leader = current->group_leader;
- if (task_ptrace(leader) && !real_parent_is_ptracer(leader))
- do_notify_parent_cldstop(leader, true, why);
-
+ if (ptrace_reparented(current->group_leader))
+ do_notify_parent_cldstop(current->group_leader,
+ true, why);
read_unlock(&tasklist_lock);
goto relock;
@@ -2067,37 +2189,31 @@ relock:
for (;;) {
struct k_sigaction *ka;
- /*
- * Tracing can induce an artificial signal and choose sigaction.
- * The return value in @signr determines the default action,
- * but @info->si_signo is the signal number we will report.
- */
- signr = tracehook_get_signal(current, regs, info, return_ka);
- if (unlikely(signr < 0))
+
+ if (unlikely(current->jobctl & JOBCTL_STOP_PENDING) &&
+ do_signal_stop(0))
goto relock;
- if (unlikely(signr != 0))
- ka = return_ka;
- else {
- if (unlikely(current->group_stop &
- GROUP_STOP_PENDING) && do_signal_stop(0))
- goto relock;
- signr = dequeue_signal(current, &current->blocked,
- info);
+ if (unlikely(current->jobctl & JOBCTL_TRAP_MASK)) {
+ do_jobctl_trap();
+ spin_unlock_irq(&sighand->siglock);
+ goto relock;
+ }
- if (!signr)
- break; /* will return 0 */
+ signr = dequeue_signal(current, &current->blocked, info);
- if (signr != SIGKILL) {
- signr = ptrace_signal(signr, info,
- regs, cookie);
- if (!signr)
- continue;
- }
+ if (!signr)
+ break; /* will return 0 */
- ka = &sighand->action[signr-1];
+ if (unlikely(current->ptrace) && signr != SIGKILL) {
+ signr = ptrace_signal(signr, info,
+ regs, cookie);
+ if (!signr)
+ continue;
}
+ ka = &sighand->action[signr-1];
+
/* Trace actually delivered signals. */
trace_signal_deliver(signr, info, ka);
@@ -2253,7 +2369,7 @@ void exit_signals(struct task_struct *tsk)
signotset(&unblocked);
retarget_shared_pending(tsk, &unblocked);
- if (unlikely(tsk->group_stop & GROUP_STOP_PENDING) &&
+ if (unlikely(tsk->jobctl & JOBCTL_STOP_PENDING) &&
task_participate_group_stop(tsk))
group_stop = CLD_STOPPED;
out:
diff --git a/kernel/softirq.c b/kernel/softirq.c
index 40cf63ddd4b3..fca82c32042b 100644
--- a/kernel/softirq.c
+++ b/kernel/softirq.c
@@ -315,16 +315,24 @@ static inline void invoke_softirq(void)
{
if (!force_irqthreads)
__do_softirq();
- else
+ else {
+ __local_bh_disable((unsigned long)__builtin_return_address(0),
+ SOFTIRQ_OFFSET);
wakeup_softirqd();
+ __local_bh_enable(SOFTIRQ_OFFSET);
+ }
}
#else
static inline void invoke_softirq(void)
{
if (!force_irqthreads)
do_softirq();
- else
+ else {
+ __local_bh_disable((unsigned long)__builtin_return_address(0),
+ SOFTIRQ_OFFSET);
wakeup_softirqd();
+ __local_bh_enable(SOFTIRQ_OFFSET);
+ }
}
#endif
diff --git a/kernel/stacktrace.c b/kernel/stacktrace.c
index eb212f8f8bc8..d20c6983aad9 100644
--- a/kernel/stacktrace.c
+++ b/kernel/stacktrace.c
@@ -26,12 +26,18 @@ void print_stack_trace(struct stack_trace *trace, int spaces)
EXPORT_SYMBOL_GPL(print_stack_trace);
/*
- * Architectures that do not implement save_stack_trace_tsk get this
- * weak alias and a once-per-bootup warning (whenever this facility
- * is utilized - for example by procfs):
+ * Architectures that do not implement save_stack_trace_tsk or
+ * save_stack_trace_regs get this weak alias and a once-per-bootup warning
+ * (whenever this facility is utilized - for example by procfs):
*/
__weak void
save_stack_trace_tsk(struct task_struct *tsk, struct stack_trace *trace)
{
WARN_ONCE(1, KERN_INFO "save_stack_trace_tsk() not implemented yet.\n");
}
+
+__weak void
+save_stack_trace_regs(struct pt_regs *regs, struct stack_trace *trace)
+{
+ WARN_ONCE(1, KERN_INFO "save_stack_trace_regs() not implemented yet.\n");
+}
diff --git a/kernel/sysctl.c b/kernel/sysctl.c
index f175d98bd355..11d65b531e50 100644
--- a/kernel/sysctl.c
+++ b/kernel/sysctl.c
@@ -1590,16 +1590,11 @@ void sysctl_head_get(struct ctl_table_header *head)
spin_unlock(&sysctl_lock);
}
-static void free_head(struct rcu_head *rcu)
-{
- kfree(container_of(rcu, struct ctl_table_header, rcu));
-}
-
void sysctl_head_put(struct ctl_table_header *head)
{
spin_lock(&sysctl_lock);
if (!--head->count)
- call_rcu(&head->rcu, free_head);
+ kfree_rcu(head, rcu);
spin_unlock(&sysctl_lock);
}
@@ -1971,10 +1966,10 @@ void unregister_sysctl_table(struct ctl_table_header * header)
start_unregistering(header);
if (!--header->parent->count) {
WARN_ON(1);
- call_rcu(&header->parent->rcu, free_head);
+ kfree_rcu(header->parent, rcu);
}
if (!--header->count)
- call_rcu(&header->rcu, free_head);
+ kfree_rcu(header, rcu);
spin_unlock(&sysctl_lock);
}
diff --git a/kernel/taskstats.c b/kernel/taskstats.c
index 9ffea360a778..fc0f22005417 100644
--- a/kernel/taskstats.c
+++ b/kernel/taskstats.c
@@ -285,16 +285,18 @@ ret:
static int add_del_listener(pid_t pid, const struct cpumask *mask, int isadd)
{
struct listener_list *listeners;
- struct listener *s, *tmp;
+ struct listener *s, *tmp, *s2;
unsigned int cpu;
if (!cpumask_subset(mask, cpu_possible_mask))
return -EINVAL;
+ s = NULL;
if (isadd == REGISTER) {
for_each_cpu(cpu, mask) {
- s = kmalloc_node(sizeof(struct listener), GFP_KERNEL,
- cpu_to_node(cpu));
+ if (!s)
+ s = kmalloc_node(sizeof(struct listener),
+ GFP_KERNEL, cpu_to_node(cpu));
if (!s)
goto cleanup;
s->pid = pid;
@@ -303,9 +305,16 @@ static int add_del_listener(pid_t pid, const struct cpumask *mask, int isadd)
listeners = &per_cpu(listener_array, cpu);
down_write(&listeners->sem);
+ list_for_each_entry_safe(s2, tmp, &listeners->list, list) {
+ if (s2->pid == pid)
+ goto next_cpu;
+ }
list_add(&s->list, &listeners->list);
+ s = NULL;
+next_cpu:
up_write(&listeners->sem);
}
+ kfree(s);
return 0;
}
diff --git a/kernel/time/alarmtimer.c b/kernel/time/alarmtimer.c
index 2d966244ea60..59f369f98a04 100644
--- a/kernel/time/alarmtimer.c
+++ b/kernel/time/alarmtimer.c
@@ -42,15 +42,75 @@ static struct alarm_base {
clockid_t base_clockid;
} alarm_bases[ALARM_NUMTYPE];
+/* freezer delta & lock used to handle clock_nanosleep triggered wakeups */
+static ktime_t freezer_delta;
+static DEFINE_SPINLOCK(freezer_delta_lock);
+
#ifdef CONFIG_RTC_CLASS
/* rtc timer and device for setting alarm wakeups at suspend */
static struct rtc_timer rtctimer;
static struct rtc_device *rtcdev;
-#endif
+static DEFINE_SPINLOCK(rtcdev_lock);
-/* freezer delta & lock used to handle clock_nanosleep triggered wakeups */
-static ktime_t freezer_delta;
-static DEFINE_SPINLOCK(freezer_delta_lock);
+/**
+ * has_wakealarm - check rtc device has wakealarm ability
+ * @dev: current device
+ * @name_ptr: name to be returned
+ *
+ * This helper function checks to see if the rtc device can wake
+ * from suspend.
+ */
+static int has_wakealarm(struct device *dev, void *name_ptr)
+{
+ struct rtc_device *candidate = to_rtc_device(dev);
+
+ if (!candidate->ops->set_alarm)
+ return 0;
+ if (!device_may_wakeup(candidate->dev.parent))
+ return 0;
+
+ *(const char **)name_ptr = dev_name(dev);
+ return 1;
+}
+
+/**
+ * alarmtimer_get_rtcdev - Return selected rtcdevice
+ *
+ * This function returns the rtc device to use for wakealarms.
+ * If one has not already been chosen, it checks to see if a
+ * functional rtc device is available.
+ */
+static struct rtc_device *alarmtimer_get_rtcdev(void)
+{
+ struct device *dev;
+ char *str;
+ unsigned long flags;
+ struct rtc_device *ret;
+
+ spin_lock_irqsave(&rtcdev_lock, flags);
+ if (!rtcdev) {
+ /* Find an rtc device and init the rtc_timer */
+ dev = class_find_device(rtc_class, NULL, &str, has_wakealarm);
+ /* If we have a device then str is valid. See has_wakealarm() */
+ if (dev) {
+ rtcdev = rtc_class_open(str);
+ /*
+ * Drop the reference we got in class_find_device,
+ * rtc_open takes its own.
+ */
+ put_device(dev);
+ rtc_timer_init(&rtctimer, NULL, NULL);
+ }
+ }
+ ret = rtcdev;
+ spin_unlock_irqrestore(&rtcdev_lock, flags);
+
+ return ret;
+}
+#else
+#define alarmtimer_get_rtcdev() (0)
+#define rtcdev (0)
+#endif
/**
@@ -166,6 +226,7 @@ static int alarmtimer_suspend(struct device *dev)
struct rtc_time tm;
ktime_t min, now;
unsigned long flags;
+ struct rtc_device *rtc;
int i;
spin_lock_irqsave(&freezer_delta_lock, flags);
@@ -173,8 +234,9 @@ static int alarmtimer_suspend(struct device *dev)
freezer_delta = ktime_set(0, 0);
spin_unlock_irqrestore(&freezer_delta_lock, flags);
+ rtc = rtcdev;
/* If we have no rtcdev, just return */
- if (!rtcdev)
+ if (!rtc)
return 0;
/* Find the soonest timer to expire*/
@@ -199,12 +261,12 @@ static int alarmtimer_suspend(struct device *dev)
WARN_ON(min.tv64 < NSEC_PER_SEC);
/* Setup an rtc timer to fire that far in the future */
- rtc_timer_cancel(rtcdev, &rtctimer);
- rtc_read_time(rtcdev, &tm);
+ rtc_timer_cancel(rtc, &rtctimer);
+ rtc_read_time(rtc, &tm);
now = rtc_tm_to_ktime(tm);
now = ktime_add(now, min);
- rtc_timer_start(rtcdev, &rtctimer, now, ktime_set(0, 0));
+ rtc_timer_start(rtc, &rtctimer, now, ktime_set(0, 0));
return 0;
}
@@ -322,6 +384,9 @@ static int alarm_clock_getres(const clockid_t which_clock, struct timespec *tp)
{
clockid_t baseid = alarm_bases[clock2alarm(which_clock)].base_clockid;
+ if (!alarmtimer_get_rtcdev())
+ return -ENOTSUPP;
+
return hrtimer_get_res(baseid, tp);
}
@@ -336,6 +401,9 @@ static int alarm_clock_get(clockid_t which_clock, struct timespec *tp)
{
struct alarm_base *base = &alarm_bases[clock2alarm(which_clock)];
+ if (!alarmtimer_get_rtcdev())
+ return -ENOTSUPP;
+
*tp = ktime_to_timespec(base->gettime());
return 0;
}
@@ -351,6 +419,9 @@ static int alarm_timer_create(struct k_itimer *new_timer)
enum alarmtimer_type type;
struct alarm_base *base;
+ if (!alarmtimer_get_rtcdev())
+ return -ENOTSUPP;
+
if (!capable(CAP_WAKE_ALARM))
return -EPERM;
@@ -385,6 +456,9 @@ static void alarm_timer_get(struct k_itimer *timr,
*/
static int alarm_timer_del(struct k_itimer *timr)
{
+ if (!rtcdev)
+ return -ENOTSUPP;
+
alarm_cancel(&timr->it.alarmtimer);
return 0;
}
@@ -402,6 +476,9 @@ static int alarm_timer_set(struct k_itimer *timr, int flags,
struct itimerspec *new_setting,
struct itimerspec *old_setting)
{
+ if (!rtcdev)
+ return -ENOTSUPP;
+
/* Save old values */
old_setting->it_interval =
ktime_to_timespec(timr->it.alarmtimer.period);
@@ -541,6 +618,9 @@ static int alarm_timer_nsleep(const clockid_t which_clock, int flags,
int ret = 0;
struct restart_block *restart;
+ if (!alarmtimer_get_rtcdev())
+ return -ENOTSUPP;
+
if (!capable(CAP_WAKE_ALARM))
return -EPERM;
@@ -638,65 +718,3 @@ static int __init alarmtimer_init(void)
}
device_initcall(alarmtimer_init);
-#ifdef CONFIG_RTC_CLASS
-/**
- * has_wakealarm - check rtc device has wakealarm ability
- * @dev: current device
- * @name_ptr: name to be returned
- *
- * This helper function checks to see if the rtc device can wake
- * from suspend.
- */
-static int __init has_wakealarm(struct device *dev, void *name_ptr)
-{
- struct rtc_device *candidate = to_rtc_device(dev);
-
- if (!candidate->ops->set_alarm)
- return 0;
- if (!device_may_wakeup(candidate->dev.parent))
- return 0;
-
- *(const char **)name_ptr = dev_name(dev);
- return 1;
-}
-
-/**
- * alarmtimer_init_late - Late initializing of alarmtimer code
- *
- * This function locates a rtc device to use for wakealarms.
- * Run as late_initcall to make sure rtc devices have been
- * registered.
- */
-static int __init alarmtimer_init_late(void)
-{
- struct device *dev;
- char *str;
-
- /* Find an rtc device and init the rtc_timer */
- dev = class_find_device(rtc_class, NULL, &str, has_wakealarm);
- /* If we have a device then str is valid. See has_wakealarm() */
- if (dev) {
- rtcdev = rtc_class_open(str);
- /*
- * Drop the reference we got in class_find_device,
- * rtc_open takes its own.
- */
- put_device(dev);
- }
- if (!rtcdev) {
- printk(KERN_WARNING "No RTC device found, ALARM timers will"
- " not wake from suspend");
- }
- rtc_timer_init(&rtctimer, NULL, NULL);
-
- return 0;
-}
-#else
-static int __init alarmtimer_init_late(void)
-{
- printk(KERN_WARNING "Kernel not built with RTC support, ALARM timers"
- " will not wake from suspend");
- return 0;
-}
-#endif
-late_initcall(alarmtimer_init_late);
diff --git a/kernel/trace/ftrace.c b/kernel/trace/ftrace.c
index 908038f57440..c3e4575e7829 100644
--- a/kernel/trace/ftrace.c
+++ b/kernel/trace/ftrace.c
@@ -32,7 +32,6 @@
#include <trace/events/sched.h>
-#include <asm/ftrace.h>
#include <asm/setup.h>
#include "trace_output.h"
@@ -82,14 +81,14 @@ static int ftrace_disabled __read_mostly;
static DEFINE_MUTEX(ftrace_lock);
-static struct ftrace_ops ftrace_list_end __read_mostly =
-{
+static struct ftrace_ops ftrace_list_end __read_mostly = {
.func = ftrace_stub,
};
static struct ftrace_ops *ftrace_global_list __read_mostly = &ftrace_list_end;
static struct ftrace_ops *ftrace_ops_list __read_mostly = &ftrace_list_end;
ftrace_func_t ftrace_trace_function __read_mostly = ftrace_stub;
+static ftrace_func_t __ftrace_trace_function_delay __read_mostly = ftrace_stub;
ftrace_func_t __ftrace_trace_function __read_mostly = ftrace_stub;
ftrace_func_t ftrace_pid_function __read_mostly = ftrace_stub;
static struct ftrace_ops global_ops;
@@ -148,9 +147,11 @@ void clear_ftrace_function(void)
{
ftrace_trace_function = ftrace_stub;
__ftrace_trace_function = ftrace_stub;
+ __ftrace_trace_function_delay = ftrace_stub;
ftrace_pid_function = ftrace_stub;
}
+#undef CONFIG_HAVE_FUNCTION_TRACE_MCOUNT_TEST
#ifndef CONFIG_HAVE_FUNCTION_TRACE_MCOUNT_TEST
/*
* For those archs that do not test ftrace_trace_stop in their
@@ -210,7 +211,12 @@ static void update_ftrace_function(void)
#ifdef CONFIG_HAVE_FUNCTION_TRACE_MCOUNT_TEST
ftrace_trace_function = func;
#else
+#ifdef CONFIG_DYNAMIC_FTRACE
+ /* do not update till all functions have been modified */
+ __ftrace_trace_function_delay = func;
+#else
__ftrace_trace_function = func;
+#endif
ftrace_trace_function = ftrace_test_stop_func;
#endif
}
@@ -785,8 +791,7 @@ static void unregister_ftrace_profiler(void)
unregister_ftrace_graph();
}
#else
-static struct ftrace_ops ftrace_profile_ops __read_mostly =
-{
+static struct ftrace_ops ftrace_profile_ops __read_mostly = {
.func = function_profile_call,
};
@@ -806,19 +811,10 @@ ftrace_profile_write(struct file *filp, const char __user *ubuf,
size_t cnt, loff_t *ppos)
{
unsigned long val;
- char buf[64]; /* big enough to hold a number */
int ret;
- if (cnt >= sizeof(buf))
- return -EINVAL;
-
- if (copy_from_user(&buf, ubuf, cnt))
- return -EFAULT;
-
- buf[cnt] = 0;
-
- ret = strict_strtoul(buf, 10, &val);
- if (ret < 0)
+ ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
+ if (ret)
return ret;
val = !!val;
@@ -1182,8 +1178,14 @@ alloc_and_copy_ftrace_hash(int size_bits, struct ftrace_hash *hash)
return NULL;
}
+static void
+ftrace_hash_rec_disable(struct ftrace_ops *ops, int filter_hash);
+static void
+ftrace_hash_rec_enable(struct ftrace_ops *ops, int filter_hash);
+
static int
-ftrace_hash_move(struct ftrace_hash **dst, struct ftrace_hash *src)
+ftrace_hash_move(struct ftrace_ops *ops, int enable,
+ struct ftrace_hash **dst, struct ftrace_hash *src)
{
struct ftrace_func_entry *entry;
struct hlist_node *tp, *tn;
@@ -1193,9 +1195,16 @@ ftrace_hash_move(struct ftrace_hash **dst, struct ftrace_hash *src)
unsigned long key;
int size = src->count;
int bits = 0;
+ int ret;
int i;
/*
+ * Remove the current set, update the hash and add
+ * them back.
+ */
+ ftrace_hash_rec_disable(ops, enable);
+
+ /*
* If the new source is empty, just free dst and assign it
* the empty_hash.
*/
@@ -1215,9 +1224,10 @@ ftrace_hash_move(struct ftrace_hash **dst, struct ftrace_hash *src)
if (bits > FTRACE_HASH_MAX_BITS)
bits = FTRACE_HASH_MAX_BITS;
+ ret = -ENOMEM;
new_hash = alloc_ftrace_hash(bits);
if (!new_hash)
- return -ENOMEM;
+ goto out;
size = 1 << src->size_bits;
for (i = 0; i < size; i++) {
@@ -1236,7 +1246,16 @@ ftrace_hash_move(struct ftrace_hash **dst, struct ftrace_hash *src)
rcu_assign_pointer(*dst, new_hash);
free_ftrace_hash_rcu(old_hash);
- return 0;
+ ret = 0;
+ out:
+ /*
+ * Enable regardless of ret:
+ * On success, we enable the new hash.
+ * On failure, we re-enable the original hash.
+ */
+ ftrace_hash_rec_enable(ops, enable);
+
+ return ret;
}
/*
@@ -1596,6 +1615,12 @@ static int __ftrace_modify_code(void *data)
{
int *command = data;
+ /*
+ * Do not call function tracer while we update the code.
+ * We are in stop machine, no worrying about races.
+ */
+ function_trace_stop++;
+
if (*command & FTRACE_ENABLE_CALLS)
ftrace_replace_code(1);
else if (*command & FTRACE_DISABLE_CALLS)
@@ -1609,6 +1634,18 @@ static int __ftrace_modify_code(void *data)
else if (*command & FTRACE_STOP_FUNC_RET)
ftrace_disable_ftrace_graph_caller();
+#ifndef CONFIG_HAVE_FUNCTION_TRACE_MCOUNT_TEST
+ /*
+ * For archs that call ftrace_test_stop_func(), we must
+ * wait till after we update all the function callers
+ * before we update the callback. This keeps different
+ * ops that record different functions from corrupting
+ * each other.
+ */
+ __ftrace_trace_function = __ftrace_trace_function_delay;
+#endif
+ function_trace_stop--;
+
return 0;
}
@@ -1744,10 +1781,36 @@ static cycle_t ftrace_update_time;
static unsigned long ftrace_update_cnt;
unsigned long ftrace_update_tot_cnt;
+static int ops_traces_mod(struct ftrace_ops *ops)
+{
+ struct ftrace_hash *hash;
+
+ hash = ops->filter_hash;
+ return !!(!hash || !hash->count);
+}
+
static int ftrace_update_code(struct module *mod)
{
struct dyn_ftrace *p;
cycle_t start, stop;
+ unsigned long ref = 0;
+
+ /*
+ * When adding a module, we need to check if tracers are
+ * currently enabled and if they are set to trace all functions.
+ * If they are, we need to enable the module functions as well
+ * as update the reference counts for those function records.
+ */
+ if (mod) {
+ struct ftrace_ops *ops;
+
+ for (ops = ftrace_ops_list;
+ ops != &ftrace_list_end; ops = ops->next) {
+ if (ops->flags & FTRACE_OPS_FL_ENABLED &&
+ ops_traces_mod(ops))
+ ref++;
+ }
+ }
start = ftrace_now(raw_smp_processor_id());
ftrace_update_cnt = 0;
@@ -1760,7 +1823,7 @@ static int ftrace_update_code(struct module *mod)
p = ftrace_new_addrs;
ftrace_new_addrs = p->newlist;
- p->flags = 0L;
+ p->flags = ref;
/*
* Do the initial record conversion from mcount jump
@@ -1783,7 +1846,7 @@ static int ftrace_update_code(struct module *mod)
* conversion puts the module to the correct state, thus
* passing the ftrace_make_call check.
*/
- if (ftrace_start_up) {
+ if (ftrace_start_up && ref) {
int failed = __ftrace_replace_code(p, 1);
if (failed) {
ftrace_bug(failed, p->ip);
@@ -2407,10 +2470,9 @@ ftrace_match_module_records(struct ftrace_hash *hash, char *buff, char *mod)
*/
static int
-ftrace_mod_callback(char *func, char *cmd, char *param, int enable)
+ftrace_mod_callback(struct ftrace_hash *hash,
+ char *func, char *cmd, char *param, int enable)
{
- struct ftrace_ops *ops = &global_ops;
- struct ftrace_hash *hash;
char *mod;
int ret = -EINVAL;
@@ -2430,11 +2492,6 @@ ftrace_mod_callback(char *func, char *cmd, char *param, int enable)
if (!strlen(mod))
return ret;
- if (enable)
- hash = ops->filter_hash;
- else
- hash = ops->notrace_hash;
-
ret = ftrace_match_module_records(hash, func, mod);
if (!ret)
ret = -EINVAL;
@@ -2760,7 +2817,7 @@ static int ftrace_process_regex(struct ftrace_hash *hash,
mutex_lock(&ftrace_cmd_mutex);
list_for_each_entry(p, &ftrace_commands, list) {
if (strcmp(p->name, command) == 0) {
- ret = p->func(func, command, next, enable);
+ ret = p->func(hash, func, command, next, enable);
goto out_unlock;
}
}
@@ -2857,7 +2914,11 @@ ftrace_set_regex(struct ftrace_ops *ops, unsigned char *buf, int len,
ftrace_match_records(hash, buf, len);
mutex_lock(&ftrace_lock);
- ret = ftrace_hash_move(orig_hash, hash);
+ ret = ftrace_hash_move(ops, enable, orig_hash, hash);
+ if (!ret && ops->flags & FTRACE_OPS_FL_ENABLED
+ && ftrace_enabled)
+ ftrace_run_update_code(FTRACE_ENABLE_CALLS);
+
mutex_unlock(&ftrace_lock);
mutex_unlock(&ftrace_regex_lock);
@@ -3040,18 +3101,12 @@ ftrace_regex_release(struct inode *inode, struct file *file)
orig_hash = &iter->ops->notrace_hash;
mutex_lock(&ftrace_lock);
- /*
- * Remove the current set, update the hash and add
- * them back.
- */
- ftrace_hash_rec_disable(iter->ops, filter_hash);
- ret = ftrace_hash_move(orig_hash, iter->hash);
- if (!ret) {
- ftrace_hash_rec_enable(iter->ops, filter_hash);
- if (iter->ops->flags & FTRACE_OPS_FL_ENABLED
- && ftrace_enabled)
- ftrace_run_update_code(FTRACE_ENABLE_CALLS);
- }
+ ret = ftrace_hash_move(iter->ops, filter_hash,
+ orig_hash, iter->hash);
+ if (!ret && (iter->ops->flags & FTRACE_OPS_FL_ENABLED)
+ && ftrace_enabled)
+ ftrace_run_update_code(FTRACE_ENABLE_CALLS);
+
mutex_unlock(&ftrace_lock);
}
free_ftrace_hash(iter->hash);
@@ -3330,7 +3385,7 @@ static int ftrace_process_locs(struct module *mod,
{
unsigned long *p;
unsigned long addr;
- unsigned long flags;
+ unsigned long flags = 0; /* Shut up gcc */
mutex_lock(&ftrace_lock);
p = start;
@@ -3348,12 +3403,18 @@ static int ftrace_process_locs(struct module *mod,
}
/*
- * Disable interrupts to prevent interrupts from executing
- * code that is being modified.
+ * We only need to disable interrupts on start up
+ * because we are modifying code that an interrupt
+ * may execute, and the modification is not atomic.
+ * But for modules, nothing runs the code we modify
+ * until we are finished with it, and there's no
+ * reason to cause large interrupt latencies while we do it.
*/
- local_irq_save(flags);
+ if (!mod)
+ local_irq_save(flags);
ftrace_update_code(mod);
- local_irq_restore(flags);
+ if (!mod)
+ local_irq_restore(flags);
mutex_unlock(&ftrace_lock);
return 0;
diff --git a/kernel/trace/ring_buffer.c b/kernel/trace/ring_buffer.c
index b0c7aa407943..731201bf4acc 100644
--- a/kernel/trace/ring_buffer.c
+++ b/kernel/trace/ring_buffer.c
@@ -997,15 +997,21 @@ static int rb_allocate_pages(struct ring_buffer_per_cpu *cpu_buffer,
unsigned nr_pages)
{
struct buffer_page *bpage, *tmp;
- unsigned long addr;
LIST_HEAD(pages);
unsigned i;
WARN_ON(!nr_pages);
for (i = 0; i < nr_pages; i++) {
+ struct page *page;
+ /*
+ * __GFP_NORETRY flag makes sure that the allocation fails
+ * gracefully without invoking oom-killer and the system is
+ * not destabilized.
+ */
bpage = kzalloc_node(ALIGN(sizeof(*bpage), cache_line_size()),
- GFP_KERNEL, cpu_to_node(cpu_buffer->cpu));
+ GFP_KERNEL | __GFP_NORETRY,
+ cpu_to_node(cpu_buffer->cpu));
if (!bpage)
goto free_pages;
@@ -1013,10 +1019,11 @@ static int rb_allocate_pages(struct ring_buffer_per_cpu *cpu_buffer,
list_add(&bpage->list, &pages);
- addr = __get_free_page(GFP_KERNEL);
- if (!addr)
+ page = alloc_pages_node(cpu_to_node(cpu_buffer->cpu),
+ GFP_KERNEL | __GFP_NORETRY, 0);
+ if (!page)
goto free_pages;
- bpage->page = (void *)addr;
+ bpage->page = page_address(page);
rb_init_page(bpage->page);
}
@@ -1045,7 +1052,7 @@ rb_allocate_cpu_buffer(struct ring_buffer *buffer, int cpu)
{
struct ring_buffer_per_cpu *cpu_buffer;
struct buffer_page *bpage;
- unsigned long addr;
+ struct page *page;
int ret;
cpu_buffer = kzalloc_node(ALIGN(sizeof(*cpu_buffer), cache_line_size()),
@@ -1067,10 +1074,10 @@ rb_allocate_cpu_buffer(struct ring_buffer *buffer, int cpu)
rb_check_bpage(cpu_buffer, bpage);
cpu_buffer->reader_page = bpage;
- addr = __get_free_page(GFP_KERNEL);
- if (!addr)
+ page = alloc_pages_node(cpu_to_node(cpu), GFP_KERNEL, 0);
+ if (!page)
goto fail_free_reader;
- bpage->page = (void *)addr;
+ bpage->page = page_address(page);
rb_init_page(bpage->page);
INIT_LIST_HEAD(&cpu_buffer->reader_page->list);
@@ -1314,7 +1321,6 @@ int ring_buffer_resize(struct ring_buffer *buffer, unsigned long size)
unsigned nr_pages, rm_pages, new_pages;
struct buffer_page *bpage, *tmp;
unsigned long buffer_size;
- unsigned long addr;
LIST_HEAD(pages);
int i, cpu;
@@ -1375,16 +1381,24 @@ int ring_buffer_resize(struct ring_buffer *buffer, unsigned long size)
for_each_buffer_cpu(buffer, cpu) {
for (i = 0; i < new_pages; i++) {
+ struct page *page;
+ /*
+ * __GFP_NORETRY flag makes sure that the allocation
+ * fails gracefully without invoking oom-killer and
+ * the system is not destabilized.
+ */
bpage = kzalloc_node(ALIGN(sizeof(*bpage),
cache_line_size()),
- GFP_KERNEL, cpu_to_node(cpu));
+ GFP_KERNEL | __GFP_NORETRY,
+ cpu_to_node(cpu));
if (!bpage)
goto free_pages;
list_add(&bpage->list, &pages);
- addr = __get_free_page(GFP_KERNEL);
- if (!addr)
+ page = alloc_pages_node(cpu_to_node(cpu),
+ GFP_KERNEL | __GFP_NORETRY, 0);
+ if (!page)
goto free_pages;
- bpage->page = (void *)addr;
+ bpage->page = page_address(page);
rb_init_page(bpage->page);
}
}
@@ -3730,16 +3744,17 @@ EXPORT_SYMBOL_GPL(ring_buffer_swap_cpu);
* Returns:
* The page allocated, or NULL on error.
*/
-void *ring_buffer_alloc_read_page(struct ring_buffer *buffer)
+void *ring_buffer_alloc_read_page(struct ring_buffer *buffer, int cpu)
{
struct buffer_data_page *bpage;
- unsigned long addr;
+ struct page *page;
- addr = __get_free_page(GFP_KERNEL);
- if (!addr)
+ page = alloc_pages_node(cpu_to_node(cpu),
+ GFP_KERNEL | __GFP_NORETRY, 0);
+ if (!page)
return NULL;
- bpage = (void *)addr;
+ bpage = page_address(page);
rb_init_page(bpage);
@@ -3978,20 +3993,11 @@ rb_simple_write(struct file *filp, const char __user *ubuf,
size_t cnt, loff_t *ppos)
{
unsigned long *p = filp->private_data;
- char buf[64];
unsigned long val;
int ret;
- if (cnt >= sizeof(buf))
- return -EINVAL;
-
- if (copy_from_user(&buf, ubuf, cnt))
- return -EFAULT;
-
- buf[cnt] = 0;
-
- ret = strict_strtoul(buf, 10, &val);
- if (ret < 0)
+ ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
+ if (ret)
return ret;
if (val)
diff --git a/kernel/trace/ring_buffer_benchmark.c b/kernel/trace/ring_buffer_benchmark.c
index 302f8a614635..a5457d577b98 100644
--- a/kernel/trace/ring_buffer_benchmark.c
+++ b/kernel/trace/ring_buffer_benchmark.c
@@ -106,7 +106,7 @@ static enum event_status read_page(int cpu)
int inc;
int i;
- bpage = ring_buffer_alloc_read_page(buffer);
+ bpage = ring_buffer_alloc_read_page(buffer, cpu);
if (!bpage)
return EVENT_DROPPED;
diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c
index ee9c921d7f21..e5df02c69b1d 100644
--- a/kernel/trace/trace.c
+++ b/kernel/trace/trace.c
@@ -343,26 +343,27 @@ unsigned long trace_flags = TRACE_ITER_PRINT_PARENT | TRACE_ITER_PRINTK |
static int trace_stop_count;
static DEFINE_SPINLOCK(tracing_start_lock);
+static void wakeup_work_handler(struct work_struct *work)
+{
+ wake_up(&trace_wait);
+}
+
+static DECLARE_DELAYED_WORK(wakeup_work, wakeup_work_handler);
+
/**
* trace_wake_up - wake up tasks waiting for trace input
*
- * Simply wakes up any task that is blocked on the trace_wait
- * queue. These is used with trace_poll for tasks polling the trace.
+ * Schedules a delayed work to wake up any task that is blocked on the
+ * trace_wait queue. These is used with trace_poll for tasks polling the
+ * trace.
*/
void trace_wake_up(void)
{
- int cpu;
+ const unsigned long delay = msecs_to_jiffies(2);
if (trace_flags & TRACE_ITER_BLOCK)
return;
- /*
- * The runqueue_is_locked() can fail, but this is the best we
- * have for now:
- */
- cpu = get_cpu();
- if (!runqueue_is_locked(cpu))
- wake_up(&trace_wait);
- put_cpu();
+ schedule_delayed_work(&wakeup_work, delay);
}
static int __init set_buf_size(char *str)
@@ -424,6 +425,7 @@ static const char *trace_options[] = {
"graph-time",
"record-cmd",
"overwrite",
+ "disable_on_free",
NULL
};
@@ -1191,6 +1193,18 @@ void trace_nowake_buffer_unlock_commit(struct ring_buffer *buffer,
}
EXPORT_SYMBOL_GPL(trace_nowake_buffer_unlock_commit);
+void trace_nowake_buffer_unlock_commit_regs(struct ring_buffer *buffer,
+ struct ring_buffer_event *event,
+ unsigned long flags, int pc,
+ struct pt_regs *regs)
+{
+ ring_buffer_unlock_commit(buffer, event);
+
+ ftrace_trace_stack_regs(buffer, flags, 0, pc, regs);
+ ftrace_trace_userstack(buffer, flags, pc);
+}
+EXPORT_SYMBOL_GPL(trace_nowake_buffer_unlock_commit_regs);
+
void trace_current_buffer_discard_commit(struct ring_buffer *buffer,
struct ring_buffer_event *event)
{
@@ -1234,30 +1248,103 @@ ftrace(struct trace_array *tr, struct trace_array_cpu *data,
}
#ifdef CONFIG_STACKTRACE
+
+#define FTRACE_STACK_MAX_ENTRIES (PAGE_SIZE / sizeof(unsigned long))
+struct ftrace_stack {
+ unsigned long calls[FTRACE_STACK_MAX_ENTRIES];
+};
+
+static DEFINE_PER_CPU(struct ftrace_stack, ftrace_stack);
+static DEFINE_PER_CPU(int, ftrace_stack_reserve);
+
static void __ftrace_trace_stack(struct ring_buffer *buffer,
unsigned long flags,
- int skip, int pc)
+ int skip, int pc, struct pt_regs *regs)
{
struct ftrace_event_call *call = &event_kernel_stack;
struct ring_buffer_event *event;
struct stack_entry *entry;
struct stack_trace trace;
+ int use_stack;
+ int size = FTRACE_STACK_ENTRIES;
+
+ trace.nr_entries = 0;
+ trace.skip = skip;
+
+ /*
+ * Since events can happen in NMIs there's no safe way to
+ * use the per cpu ftrace_stacks. We reserve it and if an interrupt
+ * or NMI comes in, it will just have to use the default
+ * FTRACE_STACK_SIZE.
+ */
+ preempt_disable_notrace();
+
+ use_stack = ++__get_cpu_var(ftrace_stack_reserve);
+ /*
+ * We don't need any atomic variables, just a barrier.
+ * If an interrupt comes in, we don't care, because it would
+ * have exited and put the counter back to what we want.
+ * We just need a barrier to keep gcc from moving things
+ * around.
+ */
+ barrier();
+ if (use_stack == 1) {
+ trace.entries = &__get_cpu_var(ftrace_stack).calls[0];
+ trace.max_entries = FTRACE_STACK_MAX_ENTRIES;
+
+ if (regs)
+ save_stack_trace_regs(regs, &trace);
+ else
+ save_stack_trace(&trace);
+
+ if (trace.nr_entries > size)
+ size = trace.nr_entries;
+ } else
+ /* From now on, use_stack is a boolean */
+ use_stack = 0;
+
+ size *= sizeof(unsigned long);
event = trace_buffer_lock_reserve(buffer, TRACE_STACK,
- sizeof(*entry), flags, pc);
+ sizeof(*entry) + size, flags, pc);
if (!event)
- return;
- entry = ring_buffer_event_data(event);
- memset(&entry->caller, 0, sizeof(entry->caller));
+ goto out;
+ entry = ring_buffer_event_data(event);
- trace.nr_entries = 0;
- trace.max_entries = FTRACE_STACK_ENTRIES;
- trace.skip = skip;
- trace.entries = entry->caller;
+ memset(&entry->caller, 0, size);
+
+ if (use_stack)
+ memcpy(&entry->caller, trace.entries,
+ trace.nr_entries * sizeof(unsigned long));
+ else {
+ trace.max_entries = FTRACE_STACK_ENTRIES;
+ trace.entries = entry->caller;
+ if (regs)
+ save_stack_trace_regs(regs, &trace);
+ else
+ save_stack_trace(&trace);
+ }
+
+ entry->size = trace.nr_entries;
- save_stack_trace(&trace);
if (!filter_check_discard(call, entry, buffer, event))
ring_buffer_unlock_commit(buffer, event);
+
+ out:
+ /* Again, don't let gcc optimize things here */
+ barrier();
+ __get_cpu_var(ftrace_stack_reserve)--;
+ preempt_enable_notrace();
+
+}
+
+void ftrace_trace_stack_regs(struct ring_buffer *buffer, unsigned long flags,
+ int skip, int pc, struct pt_regs *regs)
+{
+ if (!(trace_flags & TRACE_ITER_STACKTRACE))
+ return;
+
+ __ftrace_trace_stack(buffer, flags, skip, pc, regs);
}
void ftrace_trace_stack(struct ring_buffer *buffer, unsigned long flags,
@@ -1266,13 +1353,13 @@ void ftrace_trace_stack(struct ring_buffer *buffer, unsigned long flags,
if (!(trace_flags & TRACE_ITER_STACKTRACE))
return;
- __ftrace_trace_stack(buffer, flags, skip, pc);
+ __ftrace_trace_stack(buffer, flags, skip, pc, NULL);
}
void __trace_stack(struct trace_array *tr, unsigned long flags, int skip,
int pc)
{
- __ftrace_trace_stack(tr->buffer, flags, skip, pc);
+ __ftrace_trace_stack(tr->buffer, flags, skip, pc, NULL);
}
/**
@@ -1288,7 +1375,7 @@ void trace_dump_stack(void)
local_save_flags(flags);
/* skipping 3 traces, seems to get us at the caller of this function */
- __ftrace_trace_stack(global_trace.buffer, flags, 3, preempt_count());
+ __ftrace_trace_stack(global_trace.buffer, flags, 3, preempt_count(), NULL);
}
static DEFINE_PER_CPU(int, user_stack_count);
@@ -1536,7 +1623,12 @@ peek_next_entry(struct trace_iterator *iter, int cpu, u64 *ts,
ftrace_enable_cpu();
- return event ? ring_buffer_event_data(event) : NULL;
+ if (event) {
+ iter->ent_size = ring_buffer_event_length(event);
+ return ring_buffer_event_data(event);
+ }
+ iter->ent_size = 0;
+ return NULL;
}
static struct trace_entry *
@@ -2051,6 +2143,9 @@ void trace_default_header(struct seq_file *m)
{
struct trace_iterator *iter = m->private;
+ if (!(trace_flags & TRACE_ITER_CONTEXT_INFO))
+ return;
+
if (iter->iter_flags & TRACE_FILE_LAT_FMT) {
/* print nothing if the buffers are empty */
if (trace_empty(iter))
@@ -2701,20 +2796,11 @@ tracing_ctrl_write(struct file *filp, const char __user *ubuf,
size_t cnt, loff_t *ppos)
{
struct trace_array *tr = filp->private_data;
- char buf[64];
unsigned long val;
int ret;
- if (cnt >= sizeof(buf))
- return -EINVAL;
-
- if (copy_from_user(&buf, ubuf, cnt))
- return -EFAULT;
-
- buf[cnt] = 0;
-
- ret = strict_strtoul(buf, 10, &val);
- if (ret < 0)
+ ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
+ if (ret)
return ret;
val = !!val;
@@ -2767,7 +2853,7 @@ int tracer_init(struct tracer *t, struct trace_array *tr)
return t->init(tr);
}
-static int tracing_resize_ring_buffer(unsigned long size)
+static int __tracing_resize_ring_buffer(unsigned long size)
{
int ret;
@@ -2819,6 +2905,41 @@ static int tracing_resize_ring_buffer(unsigned long size)
return ret;
}
+static ssize_t tracing_resize_ring_buffer(unsigned long size)
+{
+ int cpu, ret = size;
+
+ mutex_lock(&trace_types_lock);
+
+ tracing_stop();
+
+ /* disable all cpu buffers */
+ for_each_tracing_cpu(cpu) {
+ if (global_trace.data[cpu])
+ atomic_inc(&global_trace.data[cpu]->disabled);
+ if (max_tr.data[cpu])
+ atomic_inc(&max_tr.data[cpu]->disabled);
+ }
+
+ if (size != global_trace.entries)
+ ret = __tracing_resize_ring_buffer(size);
+
+ if (ret < 0)
+ ret = -ENOMEM;
+
+ for_each_tracing_cpu(cpu) {
+ if (global_trace.data[cpu])
+ atomic_dec(&global_trace.data[cpu]->disabled);
+ if (max_tr.data[cpu])
+ atomic_dec(&max_tr.data[cpu]->disabled);
+ }
+
+ tracing_start();
+ mutex_unlock(&trace_types_lock);
+
+ return ret;
+}
+
/**
* tracing_update_buffers - used by tracing facility to expand ring buffers
@@ -2836,7 +2957,7 @@ int tracing_update_buffers(void)
mutex_lock(&trace_types_lock);
if (!ring_buffer_expanded)
- ret = tracing_resize_ring_buffer(trace_buf_size);
+ ret = __tracing_resize_ring_buffer(trace_buf_size);
mutex_unlock(&trace_types_lock);
return ret;
@@ -2860,7 +2981,7 @@ static int tracing_set_tracer(const char *buf)
mutex_lock(&trace_types_lock);
if (!ring_buffer_expanded) {
- ret = tracing_resize_ring_buffer(trace_buf_size);
+ ret = __tracing_resize_ring_buffer(trace_buf_size);
if (ret < 0)
goto out;
ret = 0;
@@ -2966,20 +3087,11 @@ tracing_max_lat_write(struct file *filp, const char __user *ubuf,
size_t cnt, loff_t *ppos)
{
unsigned long *ptr = filp->private_data;
- char buf[64];
unsigned long val;
int ret;
- if (cnt >= sizeof(buf))
- return -EINVAL;
-
- if (copy_from_user(&buf, ubuf, cnt))
- return -EFAULT;
-
- buf[cnt] = 0;
-
- ret = strict_strtoul(buf, 10, &val);
- if (ret < 0)
+ ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
+ if (ret)
return ret;
*ptr = val * 1000;
@@ -3434,67 +3546,54 @@ tracing_entries_write(struct file *filp, const char __user *ubuf,
size_t cnt, loff_t *ppos)
{
unsigned long val;
- char buf[64];
- int ret, cpu;
-
- if (cnt >= sizeof(buf))
- return -EINVAL;
-
- if (copy_from_user(&buf, ubuf, cnt))
- return -EFAULT;
-
- buf[cnt] = 0;
+ int ret;
- ret = strict_strtoul(buf, 10, &val);
- if (ret < 0)
+ ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
+ if (ret)
return ret;
/* must have at least 1 entry */
if (!val)
return -EINVAL;
- mutex_lock(&trace_types_lock);
-
- tracing_stop();
-
- /* disable all cpu buffers */
- for_each_tracing_cpu(cpu) {
- if (global_trace.data[cpu])
- atomic_inc(&global_trace.data[cpu]->disabled);
- if (max_tr.data[cpu])
- atomic_inc(&max_tr.data[cpu]->disabled);
- }
-
/* value is in KB */
val <<= 10;
- if (val != global_trace.entries) {
- ret = tracing_resize_ring_buffer(val);
- if (ret < 0) {
- cnt = ret;
- goto out;
- }
- }
+ ret = tracing_resize_ring_buffer(val);
+ if (ret < 0)
+ return ret;
*ppos += cnt;
- /* If check pages failed, return ENOMEM */
- if (tracing_disabled)
- cnt = -ENOMEM;
- out:
- for_each_tracing_cpu(cpu) {
- if (global_trace.data[cpu])
- atomic_dec(&global_trace.data[cpu]->disabled);
- if (max_tr.data[cpu])
- atomic_dec(&max_tr.data[cpu]->disabled);
- }
+ return cnt;
+}
- tracing_start();
- mutex_unlock(&trace_types_lock);
+static ssize_t
+tracing_free_buffer_write(struct file *filp, const char __user *ubuf,
+ size_t cnt, loff_t *ppos)
+{
+ /*
+ * There is no need to read what the user has written, this function
+ * is just to make sure that there is no error when "echo" is used
+ */
+
+ *ppos += cnt;
return cnt;
}
+static int
+tracing_free_buffer_release(struct inode *inode, struct file *filp)
+{
+ /* disable tracing ? */
+ if (trace_flags & TRACE_ITER_STOP_ON_FREE)
+ tracing_off();
+ /* resize the ring buffer to 0 */
+ tracing_resize_ring_buffer(0);
+
+ return 0;
+}
+
static int mark_printk(const char *fmt, ...)
{
int ret;
@@ -3640,6 +3739,11 @@ static const struct file_operations tracing_entries_fops = {
.llseek = generic_file_llseek,
};
+static const struct file_operations tracing_free_buffer_fops = {
+ .write = tracing_free_buffer_write,
+ .release = tracing_free_buffer_release,
+};
+
static const struct file_operations tracing_mark_fops = {
.open = tracing_open_generic,
.write = tracing_mark_write,
@@ -3696,7 +3800,7 @@ tracing_buffers_read(struct file *filp, char __user *ubuf,
return 0;
if (!info->spare)
- info->spare = ring_buffer_alloc_read_page(info->tr->buffer);
+ info->spare = ring_buffer_alloc_read_page(info->tr->buffer, info->cpu);
if (!info->spare)
return -ENOMEM;
@@ -3853,7 +3957,7 @@ tracing_buffers_splice_read(struct file *file, loff_t *ppos,
ref->ref = 1;
ref->buffer = info->tr->buffer;
- ref->page = ring_buffer_alloc_read_page(ref->buffer);
+ ref->page = ring_buffer_alloc_read_page(ref->buffer, info->cpu);
if (!ref->page) {
kfree(ref);
break;
@@ -3862,8 +3966,7 @@ tracing_buffers_splice_read(struct file *file, loff_t *ppos,
r = ring_buffer_read_page(ref->buffer, &ref->page,
len, info->cpu, 1);
if (r < 0) {
- ring_buffer_free_read_page(ref->buffer,
- ref->page);
+ ring_buffer_free_read_page(ref->buffer, ref->page);
kfree(ref);
break;
}
@@ -4099,19 +4202,10 @@ trace_options_write(struct file *filp, const char __user *ubuf, size_t cnt,
{
struct trace_option_dentry *topt = filp->private_data;
unsigned long val;
- char buf[64];
int ret;
- if (cnt >= sizeof(buf))
- return -EINVAL;
-
- if (copy_from_user(&buf, ubuf, cnt))
- return -EFAULT;
-
- buf[cnt] = 0;
-
- ret = strict_strtoul(buf, 10, &val);
- if (ret < 0)
+ ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
+ if (ret)
return ret;
if (val != 0 && val != 1)
@@ -4159,20 +4253,11 @@ trace_options_core_write(struct file *filp, const char __user *ubuf, size_t cnt,
loff_t *ppos)
{
long index = (long)filp->private_data;
- char buf[64];
unsigned long val;
int ret;
- if (cnt >= sizeof(buf))
- return -EINVAL;
-
- if (copy_from_user(&buf, ubuf, cnt))
- return -EFAULT;
-
- buf[cnt] = 0;
-
- ret = strict_strtoul(buf, 10, &val);
- if (ret < 0)
+ ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
+ if (ret)
return ret;
if (val != 0 && val != 1)
@@ -4365,6 +4450,9 @@ static __init int tracer_init_debugfs(void)
trace_create_file("buffer_size_kb", 0644, d_tracer,
&global_trace, &tracing_entries_fops);
+ trace_create_file("free_buffer", 0644, d_tracer,
+ &global_trace, &tracing_free_buffer_fops);
+
trace_create_file("trace_marker", 0220, d_tracer,
NULL, &tracing_mark_fops);
diff --git a/kernel/trace/trace.h b/kernel/trace/trace.h
index 229f8591f61d..3f381d0b20a8 100644
--- a/kernel/trace/trace.h
+++ b/kernel/trace/trace.h
@@ -278,6 +278,29 @@ struct tracer {
};
+/* Only current can touch trace_recursion */
+#define trace_recursion_inc() do { (current)->trace_recursion++; } while (0)
+#define trace_recursion_dec() do { (current)->trace_recursion--; } while (0)
+
+/* Ring buffer has the 10 LSB bits to count */
+#define trace_recursion_buffer() ((current)->trace_recursion & 0x3ff)
+
+/* for function tracing recursion */
+#define TRACE_INTERNAL_BIT (1<<11)
+#define TRACE_GLOBAL_BIT (1<<12)
+/*
+ * Abuse of the trace_recursion.
+ * As we need a way to maintain state if we are tracing the function
+ * graph in irq because we want to trace a particular function that
+ * was called in irq context but we have irq tracing off. Since this
+ * can only be modified by current, we can reuse trace_recursion.
+ */
+#define TRACE_IRQ_BIT (1<<13)
+
+#define trace_recursion_set(bit) do { (current)->trace_recursion |= (bit); } while (0)
+#define trace_recursion_clear(bit) do { (current)->trace_recursion &= ~(bit); } while (0)
+#define trace_recursion_test(bit) ((current)->trace_recursion & (bit))
+
#define TRACE_PIPE_ALL_CPU -1
int tracer_init(struct tracer *t, struct trace_array *tr);
@@ -389,6 +412,9 @@ void update_max_tr_single(struct trace_array *tr,
void ftrace_trace_stack(struct ring_buffer *buffer, unsigned long flags,
int skip, int pc);
+void ftrace_trace_stack_regs(struct ring_buffer *buffer, unsigned long flags,
+ int skip, int pc, struct pt_regs *regs);
+
void ftrace_trace_userstack(struct ring_buffer *buffer, unsigned long flags,
int pc);
@@ -400,6 +426,12 @@ static inline void ftrace_trace_stack(struct ring_buffer *buffer,
{
}
+static inline void ftrace_trace_stack_regs(struct ring_buffer *buffer,
+ unsigned long flags, int skip,
+ int pc, struct pt_regs *regs)
+{
+}
+
static inline void ftrace_trace_userstack(struct ring_buffer *buffer,
unsigned long flags, int pc)
{
@@ -507,8 +539,18 @@ static inline int ftrace_graph_addr(unsigned long addr)
return 1;
for (i = 0; i < ftrace_graph_count; i++) {
- if (addr == ftrace_graph_funcs[i])
+ if (addr == ftrace_graph_funcs[i]) {
+ /*
+ * If no irqs are to be traced, but a set_graph_function
+ * is set, and called by an interrupt handler, we still
+ * want to trace it.
+ */
+ if (in_irq())
+ trace_recursion_set(TRACE_IRQ_BIT);
+ else
+ trace_recursion_clear(TRACE_IRQ_BIT);
return 1;
+ }
}
return 0;
@@ -609,6 +651,7 @@ enum trace_iterator_flags {
TRACE_ITER_GRAPH_TIME = 0x80000,
TRACE_ITER_RECORD_CMD = 0x100000,
TRACE_ITER_OVERWRITE = 0x200000,
+ TRACE_ITER_STOP_ON_FREE = 0x400000,
};
/*
@@ -677,6 +720,7 @@ struct event_subsystem {
struct dentry *entry;
struct event_filter *filter;
int nr_events;
+ int ref_count;
};
#define FILTER_PRED_INVALID ((unsigned short)-1)
@@ -784,19 +828,4 @@ extern const char *__stop___trace_bprintk_fmt[];
FTRACE_ENTRY(call, struct_name, id, PARAMS(tstruct), PARAMS(print))
#include "trace_entries.h"
-/* Only current can touch trace_recursion */
-#define trace_recursion_inc() do { (current)->trace_recursion++; } while (0)
-#define trace_recursion_dec() do { (current)->trace_recursion--; } while (0)
-
-/* Ring buffer has the 10 LSB bits to count */
-#define trace_recursion_buffer() ((current)->trace_recursion & 0x3ff)
-
-/* for function tracing recursion */
-#define TRACE_INTERNAL_BIT (1<<11)
-#define TRACE_GLOBAL_BIT (1<<12)
-
-#define trace_recursion_set(bit) do { (current)->trace_recursion |= (bit); } while (0)
-#define trace_recursion_clear(bit) do { (current)->trace_recursion &= ~(bit); } while (0)
-#define trace_recursion_test(bit) ((current)->trace_recursion & (bit))
-
#endif /* _LINUX_KERNEL_TRACE_H */
diff --git a/kernel/trace/trace_entries.h b/kernel/trace/trace_entries.h
index e32744c84d94..93365907f219 100644
--- a/kernel/trace/trace_entries.h
+++ b/kernel/trace/trace_entries.h
@@ -161,7 +161,8 @@ FTRACE_ENTRY(kernel_stack, stack_entry,
TRACE_STACK,
F_STRUCT(
- __array( unsigned long, caller, FTRACE_STACK_ENTRIES )
+ __field( int, size )
+ __dynamic_array(unsigned long, caller )
),
F_printk("\t=> (%08lx)\n\t=> (%08lx)\n\t=> (%08lx)\n\t=> (%08lx)\n"
diff --git a/kernel/trace/trace_events.c b/kernel/trace/trace_events.c
index 686ec399f2a8..581876f9f387 100644
--- a/kernel/trace/trace_events.c
+++ b/kernel/trace/trace_events.c
@@ -244,6 +244,35 @@ static void ftrace_clear_events(void)
mutex_unlock(&event_mutex);
}
+static void __put_system(struct event_subsystem *system)
+{
+ struct event_filter *filter = system->filter;
+
+ WARN_ON_ONCE(system->ref_count == 0);
+ if (--system->ref_count)
+ return;
+
+ if (filter) {
+ kfree(filter->filter_string);
+ kfree(filter);
+ }
+ kfree(system->name);
+ kfree(system);
+}
+
+static void __get_system(struct event_subsystem *system)
+{
+ WARN_ON_ONCE(system->ref_count == 0);
+ system->ref_count++;
+}
+
+static void put_system(struct event_subsystem *system)
+{
+ mutex_lock(&event_mutex);
+ __put_system(system);
+ mutex_unlock(&event_mutex);
+}
+
/*
* __ftrace_set_clr_event(NULL, NULL, NULL, set) will set/unset all events.
*/
@@ -486,20 +515,11 @@ event_enable_write(struct file *filp, const char __user *ubuf, size_t cnt,
loff_t *ppos)
{
struct ftrace_event_call *call = filp->private_data;
- char buf[64];
unsigned long val;
int ret;
- if (cnt >= sizeof(buf))
- return -EINVAL;
-
- if (copy_from_user(&buf, ubuf, cnt))
- return -EFAULT;
-
- buf[cnt] = 0;
-
- ret = strict_strtoul(buf, 10, &val);
- if (ret < 0)
+ ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
+ if (ret)
return ret;
ret = tracing_update_buffers();
@@ -528,7 +548,7 @@ system_enable_read(struct file *filp, char __user *ubuf, size_t cnt,
loff_t *ppos)
{
const char set_to_char[4] = { '?', '0', '1', 'X' };
- const char *system = filp->private_data;
+ struct event_subsystem *system = filp->private_data;
struct ftrace_event_call *call;
char buf[2];
int set = 0;
@@ -539,7 +559,7 @@ system_enable_read(struct file *filp, char __user *ubuf, size_t cnt,
if (!call->name || !call->class || !call->class->reg)
continue;
- if (system && strcmp(call->class->system, system) != 0)
+ if (system && strcmp(call->class->system, system->name) != 0)
continue;
/*
@@ -569,21 +589,13 @@ static ssize_t
system_enable_write(struct file *filp, const char __user *ubuf, size_t cnt,
loff_t *ppos)
{
- const char *system = filp->private_data;
+ struct event_subsystem *system = filp->private_data;
+ const char *name = NULL;
unsigned long val;
- char buf[64];
ssize_t ret;
- if (cnt >= sizeof(buf))
- return -EINVAL;
-
- if (copy_from_user(&buf, ubuf, cnt))
- return -EFAULT;
-
- buf[cnt] = 0;
-
- ret = strict_strtoul(buf, 10, &val);
- if (ret < 0)
+ ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
+ if (ret)
return ret;
ret = tracing_update_buffers();
@@ -593,7 +605,14 @@ system_enable_write(struct file *filp, const char __user *ubuf, size_t cnt,
if (val != 0 && val != 1)
return -EINVAL;
- ret = __ftrace_set_clr_event(NULL, system, NULL, val);
+ /*
+ * Opening of "enable" adds a ref count to system,
+ * so the name is safe to use.
+ */
+ if (system)
+ name = system->name;
+
+ ret = __ftrace_set_clr_event(NULL, name, NULL, val);
if (ret)
goto out;
@@ -826,6 +845,52 @@ event_filter_write(struct file *filp, const char __user *ubuf, size_t cnt,
return cnt;
}
+static LIST_HEAD(event_subsystems);
+
+static int subsystem_open(struct inode *inode, struct file *filp)
+{
+ struct event_subsystem *system = NULL;
+ int ret;
+
+ if (!inode->i_private)
+ goto skip_search;
+
+ /* Make sure the system still exists */
+ mutex_lock(&event_mutex);
+ list_for_each_entry(system, &event_subsystems, list) {
+ if (system == inode->i_private) {
+ /* Don't open systems with no events */
+ if (!system->nr_events) {
+ system = NULL;
+ break;
+ }
+ __get_system(system);
+ break;
+ }
+ }
+ mutex_unlock(&event_mutex);
+
+ if (system != inode->i_private)
+ return -ENODEV;
+
+ skip_search:
+ ret = tracing_open_generic(inode, filp);
+ if (ret < 0 && system)
+ put_system(system);
+
+ return ret;
+}
+
+static int subsystem_release(struct inode *inode, struct file *file)
+{
+ struct event_subsystem *system = inode->i_private;
+
+ if (system)
+ put_system(system);
+
+ return 0;
+}
+
static ssize_t
subsystem_filter_read(struct file *filp, char __user *ubuf, size_t cnt,
loff_t *ppos)
@@ -963,17 +1028,19 @@ static const struct file_operations ftrace_event_filter_fops = {
};
static const struct file_operations ftrace_subsystem_filter_fops = {
- .open = tracing_open_generic,
+ .open = subsystem_open,
.read = subsystem_filter_read,
.write = subsystem_filter_write,
.llseek = default_llseek,
+ .release = subsystem_release,
};
static const struct file_operations ftrace_system_enable_fops = {
- .open = tracing_open_generic,
+ .open = subsystem_open,
.read = system_enable_read,
.write = system_enable_write,
.llseek = default_llseek,
+ .release = subsystem_release,
};
static const struct file_operations ftrace_show_header_fops = {
@@ -1002,8 +1069,6 @@ static struct dentry *event_trace_events_dir(void)
return d_events;
}
-static LIST_HEAD(event_subsystems);
-
static struct dentry *
event_subsystem_dir(const char *name, struct dentry *d_events)
{
@@ -1013,6 +1078,7 @@ event_subsystem_dir(const char *name, struct dentry *d_events)
/* First see if we did not already create this dir */
list_for_each_entry(system, &event_subsystems, list) {
if (strcmp(system->name, name) == 0) {
+ __get_system(system);
system->nr_events++;
return system->entry;
}
@@ -1035,6 +1101,7 @@ event_subsystem_dir(const char *name, struct dentry *d_events)
}
system->nr_events = 1;
+ system->ref_count = 1;
system->name = kstrdup(name, GFP_KERNEL);
if (!system->name) {
debugfs_remove(system->entry);
@@ -1062,8 +1129,7 @@ event_subsystem_dir(const char *name, struct dentry *d_events)
"'%s/filter' entry\n", name);
}
- trace_create_file("enable", 0644, system->entry,
- (void *)system->name,
+ trace_create_file("enable", 0644, system->entry, system,
&ftrace_system_enable_fops);
return system->entry;
@@ -1184,16 +1250,9 @@ static void remove_subsystem_dir(const char *name)
list_for_each_entry(system, &event_subsystems, list) {
if (strcmp(system->name, name) == 0) {
if (!--system->nr_events) {
- struct event_filter *filter = system->filter;
-
debugfs_remove_recursive(system->entry);
list_del(&system->list);
- if (filter) {
- kfree(filter->filter_string);
- kfree(filter);
- }
- kfree(system->name);
- kfree(system);
+ __put_system(system);
}
break;
}
diff --git a/kernel/trace/trace_events_filter.c b/kernel/trace/trace_events_filter.c
index 8008ddcfbf20..256764ecccd6 100644
--- a/kernel/trace/trace_events_filter.c
+++ b/kernel/trace/trace_events_filter.c
@@ -1886,6 +1886,12 @@ int apply_subsystem_event_filter(struct event_subsystem *system,
mutex_lock(&event_mutex);
+ /* Make sure the system still has events */
+ if (!system->nr_events) {
+ err = -ENODEV;
+ goto out_unlock;
+ }
+
if (!strcmp(strstrip(filter_string), "0")) {
filter_free_subsystem_preds(system);
remove_filter_string(system->filter);
diff --git a/kernel/trace/trace_functions.c b/kernel/trace/trace_functions.c
index 8d0e1cc4e974..c7b0c6a7db09 100644
--- a/kernel/trace/trace_functions.c
+++ b/kernel/trace/trace_functions.c
@@ -324,7 +324,8 @@ ftrace_trace_onoff_unreg(char *glob, char *cmd, char *param)
}
static int
-ftrace_trace_onoff_callback(char *glob, char *cmd, char *param, int enable)
+ftrace_trace_onoff_callback(struct ftrace_hash *hash,
+ char *glob, char *cmd, char *param, int enable)
{
struct ftrace_probe_ops *ops;
void *count = (void *)-1;
diff --git a/kernel/trace/trace_functions_graph.c b/kernel/trace/trace_functions_graph.c
index 962cdb24ed81..a7d2a4c653d8 100644
--- a/kernel/trace/trace_functions_graph.c
+++ b/kernel/trace/trace_functions_graph.c
@@ -74,6 +74,20 @@ static struct tracer_flags tracer_flags = {
static struct trace_array *graph_array;
+/*
+ * DURATION column is being also used to display IRQ signs,
+ * following values are used by print_graph_irq and others
+ * to fill in space into DURATION column.
+ */
+enum {
+ DURATION_FILL_FULL = -1,
+ DURATION_FILL_START = -2,
+ DURATION_FILL_END = -3,
+};
+
+static enum print_line_t
+print_graph_duration(unsigned long long duration, struct trace_seq *s,
+ u32 flags);
/* Add a function return address to the trace stack on thread info.*/
int
@@ -213,7 +227,7 @@ int __trace_graph_entry(struct trace_array *tr,
static inline int ftrace_graph_ignore_irqs(void)
{
- if (!ftrace_graph_skip_irqs)
+ if (!ftrace_graph_skip_irqs || trace_recursion_test(TRACE_IRQ_BIT))
return 0;
return in_irq();
@@ -577,32 +591,6 @@ get_return_for_leaf(struct trace_iterator *iter,
return next;
}
-/* Signal a overhead of time execution to the output */
-static int
-print_graph_overhead(unsigned long long duration, struct trace_seq *s,
- u32 flags)
-{
- /* If duration disappear, we don't need anything */
- if (!(flags & TRACE_GRAPH_PRINT_DURATION))
- return 1;
-
- /* Non nested entry or return */
- if (duration == -1)
- return trace_seq_printf(s, " ");
-
- if (flags & TRACE_GRAPH_PRINT_OVERHEAD) {
- /* Duration exceeded 100 msecs */
- if (duration > 100000ULL)
- return trace_seq_printf(s, "! ");
-
- /* Duration exceeded 10 msecs */
- if (duration > 10000ULL)
- return trace_seq_printf(s, "+ ");
- }
-
- return trace_seq_printf(s, " ");
-}
-
static int print_graph_abs_time(u64 t, struct trace_seq *s)
{
unsigned long usecs_rem;
@@ -625,34 +613,36 @@ print_graph_irq(struct trace_iterator *iter, unsigned long addr,
addr >= (unsigned long)__irqentry_text_end)
return TRACE_TYPE_UNHANDLED;
- /* Absolute time */
- if (flags & TRACE_GRAPH_PRINT_ABS_TIME) {
- ret = print_graph_abs_time(iter->ts, s);
- if (!ret)
- return TRACE_TYPE_PARTIAL_LINE;
- }
+ if (trace_flags & TRACE_ITER_CONTEXT_INFO) {
+ /* Absolute time */
+ if (flags & TRACE_GRAPH_PRINT_ABS_TIME) {
+ ret = print_graph_abs_time(iter->ts, s);
+ if (!ret)
+ return TRACE_TYPE_PARTIAL_LINE;
+ }
- /* Cpu */
- if (flags & TRACE_GRAPH_PRINT_CPU) {
- ret = print_graph_cpu(s, cpu);
- if (ret == TRACE_TYPE_PARTIAL_LINE)
- return TRACE_TYPE_PARTIAL_LINE;
- }
+ /* Cpu */
+ if (flags & TRACE_GRAPH_PRINT_CPU) {
+ ret = print_graph_cpu(s, cpu);
+ if (ret == TRACE_TYPE_PARTIAL_LINE)
+ return TRACE_TYPE_PARTIAL_LINE;
+ }
- /* Proc */
- if (flags & TRACE_GRAPH_PRINT_PROC) {
- ret = print_graph_proc(s, pid);
- if (ret == TRACE_TYPE_PARTIAL_LINE)
- return TRACE_TYPE_PARTIAL_LINE;
- ret = trace_seq_printf(s, " | ");
- if (!ret)
- return TRACE_TYPE_PARTIAL_LINE;
+ /* Proc */
+ if (flags & TRACE_GRAPH_PRINT_PROC) {
+ ret = print_graph_proc(s, pid);
+ if (ret == TRACE_TYPE_PARTIAL_LINE)
+ return TRACE_TYPE_PARTIAL_LINE;
+ ret = trace_seq_printf(s, " | ");
+ if (!ret)
+ return TRACE_TYPE_PARTIAL_LINE;
+ }
}
/* No overhead */
- ret = print_graph_overhead(-1, s, flags);
- if (!ret)
- return TRACE_TYPE_PARTIAL_LINE;
+ ret = print_graph_duration(DURATION_FILL_START, s, flags);
+ if (ret != TRACE_TYPE_HANDLED)
+ return ret;
if (type == TRACE_GRAPH_ENT)
ret = trace_seq_printf(s, "==========>");
@@ -662,9 +652,10 @@ print_graph_irq(struct trace_iterator *iter, unsigned long addr,
if (!ret)
return TRACE_TYPE_PARTIAL_LINE;
- /* Don't close the duration column if haven't one */
- if (flags & TRACE_GRAPH_PRINT_DURATION)
- trace_seq_printf(s, " |");
+ ret = print_graph_duration(DURATION_FILL_END, s, flags);
+ if (ret != TRACE_TYPE_HANDLED)
+ return ret;
+
ret = trace_seq_printf(s, "\n");
if (!ret)
@@ -716,9 +707,49 @@ trace_print_graph_duration(unsigned long long duration, struct trace_seq *s)
}
static enum print_line_t
-print_graph_duration(unsigned long long duration, struct trace_seq *s)
+print_graph_duration(unsigned long long duration, struct trace_seq *s,
+ u32 flags)
{
- int ret;
+ int ret = -1;
+
+ if (!(flags & TRACE_GRAPH_PRINT_DURATION) ||
+ !(trace_flags & TRACE_ITER_CONTEXT_INFO))
+ return TRACE_TYPE_HANDLED;
+
+ /* No real adata, just filling the column with spaces */
+ switch (duration) {
+ case DURATION_FILL_FULL:
+ ret = trace_seq_printf(s, " | ");
+ return ret ? TRACE_TYPE_HANDLED : TRACE_TYPE_PARTIAL_LINE;
+ case DURATION_FILL_START:
+ ret = trace_seq_printf(s, " ");
+ return ret ? TRACE_TYPE_HANDLED : TRACE_TYPE_PARTIAL_LINE;
+ case DURATION_FILL_END:
+ ret = trace_seq_printf(s, " |");
+ return ret ? TRACE_TYPE_HANDLED : TRACE_TYPE_PARTIAL_LINE;
+ }
+
+ /* Signal a overhead of time execution to the output */
+ if (flags & TRACE_GRAPH_PRINT_OVERHEAD) {
+ /* Duration exceeded 100 msecs */
+ if (duration > 100000ULL)
+ ret = trace_seq_printf(s, "! ");
+ /* Duration exceeded 10 msecs */
+ else if (duration > 10000ULL)
+ ret = trace_seq_printf(s, "+ ");
+ }
+
+ /*
+ * The -1 means we either did not exceed the duration tresholds
+ * or we dont want to print out the overhead. Either way we need
+ * to fill out the space.
+ */
+ if (ret == -1)
+ ret = trace_seq_printf(s, " ");
+
+ /* Catching here any failure happenned above */
+ if (!ret)
+ return TRACE_TYPE_PARTIAL_LINE;
ret = trace_print_graph_duration(duration, s);
if (ret != TRACE_TYPE_HANDLED)
@@ -767,18 +798,11 @@ print_graph_entry_leaf(struct trace_iterator *iter,
cpu_data->enter_funcs[call->depth] = 0;
}
- /* Overhead */
- ret = print_graph_overhead(duration, s, flags);
- if (!ret)
+ /* Overhead and duration */
+ ret = print_graph_duration(duration, s, flags);
+ if (ret == TRACE_TYPE_PARTIAL_LINE)
return TRACE_TYPE_PARTIAL_LINE;
- /* Duration */
- if (flags & TRACE_GRAPH_PRINT_DURATION) {
- ret = print_graph_duration(duration, s);
- if (ret == TRACE_TYPE_PARTIAL_LINE)
- return TRACE_TYPE_PARTIAL_LINE;
- }
-
/* Function */
for (i = 0; i < call->depth * TRACE_GRAPH_INDENT; i++) {
ret = trace_seq_printf(s, " ");
@@ -815,17 +839,10 @@ print_graph_entry_nested(struct trace_iterator *iter,
cpu_data->enter_funcs[call->depth] = call->func;
}
- /* No overhead */
- ret = print_graph_overhead(-1, s, flags);
- if (!ret)
- return TRACE_TYPE_PARTIAL_LINE;
-
/* No time */
- if (flags & TRACE_GRAPH_PRINT_DURATION) {
- ret = trace_seq_printf(s, " | ");
- if (!ret)
- return TRACE_TYPE_PARTIAL_LINE;
- }
+ ret = print_graph_duration(DURATION_FILL_FULL, s, flags);
+ if (ret != TRACE_TYPE_HANDLED)
+ return ret;
/* Function */
for (i = 0; i < call->depth * TRACE_GRAPH_INDENT; i++) {
@@ -865,6 +882,9 @@ print_graph_prologue(struct trace_iterator *iter, struct trace_seq *s,
return TRACE_TYPE_PARTIAL_LINE;
}
+ if (!(trace_flags & TRACE_ITER_CONTEXT_INFO))
+ return 0;
+
/* Absolute time */
if (flags & TRACE_GRAPH_PRINT_ABS_TIME) {
ret = print_graph_abs_time(iter->ts, s);
@@ -1078,18 +1098,11 @@ print_graph_return(struct ftrace_graph_ret *trace, struct trace_seq *s,
if (print_graph_prologue(iter, s, 0, 0, flags))
return TRACE_TYPE_PARTIAL_LINE;
- /* Overhead */
- ret = print_graph_overhead(duration, s, flags);
- if (!ret)
+ /* Overhead and duration */
+ ret = print_graph_duration(duration, s, flags);
+ if (ret == TRACE_TYPE_PARTIAL_LINE)
return TRACE_TYPE_PARTIAL_LINE;
- /* Duration */
- if (flags & TRACE_GRAPH_PRINT_DURATION) {
- ret = print_graph_duration(duration, s);
- if (ret == TRACE_TYPE_PARTIAL_LINE)
- return TRACE_TYPE_PARTIAL_LINE;
- }
-
/* Closing brace */
for (i = 0; i < trace->depth * TRACE_GRAPH_INDENT; i++) {
ret = trace_seq_printf(s, " ");
@@ -1146,17 +1159,10 @@ print_graph_comment(struct trace_seq *s, struct trace_entry *ent,
if (print_graph_prologue(iter, s, 0, 0, flags))
return TRACE_TYPE_PARTIAL_LINE;
- /* No overhead */
- ret = print_graph_overhead(-1, s, flags);
- if (!ret)
- return TRACE_TYPE_PARTIAL_LINE;
-
/* No time */
- if (flags & TRACE_GRAPH_PRINT_DURATION) {
- ret = trace_seq_printf(s, " | ");
- if (!ret)
- return TRACE_TYPE_PARTIAL_LINE;
- }
+ ret = print_graph_duration(DURATION_FILL_FULL, s, flags);
+ if (ret != TRACE_TYPE_HANDLED)
+ return ret;
/* Indentation */
if (depth > 0)
@@ -1207,7 +1213,7 @@ print_graph_comment(struct trace_seq *s, struct trace_entry *ent,
enum print_line_t
-__print_graph_function_flags(struct trace_iterator *iter, u32 flags)
+print_graph_function_flags(struct trace_iterator *iter, u32 flags)
{
struct ftrace_graph_ent_entry *field;
struct fgraph_data *data = iter->private;
@@ -1270,18 +1276,7 @@ __print_graph_function_flags(struct trace_iterator *iter, u32 flags)
static enum print_line_t
print_graph_function(struct trace_iterator *iter)
{
- return __print_graph_function_flags(iter, tracer_flags.val);
-}
-
-enum print_line_t print_graph_function_flags(struct trace_iterator *iter,
- u32 flags)
-{
- if (trace_flags & TRACE_ITER_LATENCY_FMT)
- flags |= TRACE_GRAPH_PRINT_DURATION;
- else
- flags |= TRACE_GRAPH_PRINT_ABS_TIME;
-
- return __print_graph_function_flags(iter, flags);
+ return print_graph_function_flags(iter, tracer_flags.val);
}
static enum print_line_t
@@ -1309,8 +1304,7 @@ static void print_lat_header(struct seq_file *s, u32 flags)
seq_printf(s, "#%.*s / _----=> need-resched \n", size, spaces);
seq_printf(s, "#%.*s| / _---=> hardirq/softirq \n", size, spaces);
seq_printf(s, "#%.*s|| / _--=> preempt-depth \n", size, spaces);
- seq_printf(s, "#%.*s||| / _-=> lock-depth \n", size, spaces);
- seq_printf(s, "#%.*s|||| / \n", size, spaces);
+ seq_printf(s, "#%.*s||| / \n", size, spaces);
}
static void __print_graph_headers_flags(struct seq_file *s, u32 flags)
@@ -1329,7 +1323,7 @@ static void __print_graph_headers_flags(struct seq_file *s, u32 flags)
if (flags & TRACE_GRAPH_PRINT_PROC)
seq_printf(s, " TASK/PID ");
if (lat)
- seq_printf(s, "|||||");
+ seq_printf(s, "||||");
if (flags & TRACE_GRAPH_PRINT_DURATION)
seq_printf(s, " DURATION ");
seq_printf(s, " FUNCTION CALLS\n");
@@ -1343,7 +1337,7 @@ static void __print_graph_headers_flags(struct seq_file *s, u32 flags)
if (flags & TRACE_GRAPH_PRINT_PROC)
seq_printf(s, " | | ");
if (lat)
- seq_printf(s, "|||||");
+ seq_printf(s, "||||");
if (flags & TRACE_GRAPH_PRINT_DURATION)
seq_printf(s, " | | ");
seq_printf(s, " | | | |\n");
@@ -1358,15 +1352,16 @@ void print_graph_headers_flags(struct seq_file *s, u32 flags)
{
struct trace_iterator *iter = s->private;
+ if (!(trace_flags & TRACE_ITER_CONTEXT_INFO))
+ return;
+
if (trace_flags & TRACE_ITER_LATENCY_FMT) {
/* print nothing if the buffers are empty */
if (trace_empty(iter))
return;
print_trace_header(s, iter);
- flags |= TRACE_GRAPH_PRINT_DURATION;
- } else
- flags |= TRACE_GRAPH_PRINT_ABS_TIME;
+ }
__print_graph_headers_flags(s, flags);
}
diff --git a/kernel/trace/trace_irqsoff.c b/kernel/trace/trace_irqsoff.c
index c77424be284d..667aa8cc0cfc 100644
--- a/kernel/trace/trace_irqsoff.c
+++ b/kernel/trace/trace_irqsoff.c
@@ -226,7 +226,9 @@ static void irqsoff_trace_close(struct trace_iterator *iter)
}
#define GRAPH_TRACER_FLAGS (TRACE_GRAPH_PRINT_CPU | \
- TRACE_GRAPH_PRINT_PROC)
+ TRACE_GRAPH_PRINT_PROC | \
+ TRACE_GRAPH_PRINT_ABS_TIME | \
+ TRACE_GRAPH_PRINT_DURATION)
static enum print_line_t irqsoff_print_line(struct trace_iterator *iter)
{
diff --git a/kernel/trace/trace_kprobe.c b/kernel/trace/trace_kprobe.c
index 27d13b36b8be..5fb3697bf0e5 100644
--- a/kernel/trace/trace_kprobe.c
+++ b/kernel/trace/trace_kprobe.c
@@ -343,6 +343,14 @@ DEFINE_BASIC_FETCH_FUNCS(deref)
DEFINE_FETCH_deref(string)
DEFINE_FETCH_deref(string_size)
+static __kprobes void update_deref_fetch_param(struct deref_fetch_param *data)
+{
+ if (CHECK_FETCH_FUNCS(deref, data->orig.fn))
+ update_deref_fetch_param(data->orig.data);
+ else if (CHECK_FETCH_FUNCS(symbol, data->orig.fn))
+ update_symbol_cache(data->orig.data);
+}
+
static __kprobes void free_deref_fetch_param(struct deref_fetch_param *data)
{
if (CHECK_FETCH_FUNCS(deref, data->orig.fn))
@@ -377,6 +385,19 @@ DEFINE_BASIC_FETCH_FUNCS(bitfield)
#define fetch_bitfield_string_size NULL
static __kprobes void
+update_bitfield_fetch_param(struct bitfield_fetch_param *data)
+{
+ /*
+ * Don't check the bitfield itself, because this must be the
+ * last fetch function.
+ */
+ if (CHECK_FETCH_FUNCS(deref, data->orig.fn))
+ update_deref_fetch_param(data->orig.data);
+ else if (CHECK_FETCH_FUNCS(symbol, data->orig.fn))
+ update_symbol_cache(data->orig.data);
+}
+
+static __kprobes void
free_bitfield_fetch_param(struct bitfield_fetch_param *data)
{
/*
@@ -389,6 +410,7 @@ free_bitfield_fetch_param(struct bitfield_fetch_param *data)
free_symbol_cache(data->orig.data);
kfree(data);
}
+
/* Default (unsigned long) fetch type */
#define __DEFAULT_FETCH_TYPE(t) u##t
#define _DEFAULT_FETCH_TYPE(t) __DEFAULT_FETCH_TYPE(t)
@@ -536,6 +558,7 @@ struct probe_arg {
/* Flags for trace_probe */
#define TP_FLAG_TRACE 1
#define TP_FLAG_PROFILE 2
+#define TP_FLAG_REGISTERED 4
struct trace_probe {
struct list_head list;
@@ -555,16 +578,49 @@ struct trace_probe {
(sizeof(struct probe_arg) * (n)))
-static __kprobes int probe_is_return(struct trace_probe *tp)
+static __kprobes int trace_probe_is_return(struct trace_probe *tp)
{
return tp->rp.handler != NULL;
}
-static __kprobes const char *probe_symbol(struct trace_probe *tp)
+static __kprobes const char *trace_probe_symbol(struct trace_probe *tp)
{
return tp->symbol ? tp->symbol : "unknown";
}
+static __kprobes unsigned long trace_probe_offset(struct trace_probe *tp)
+{
+ return tp->rp.kp.offset;
+}
+
+static __kprobes bool trace_probe_is_enabled(struct trace_probe *tp)
+{
+ return !!(tp->flags & (TP_FLAG_TRACE | TP_FLAG_PROFILE));
+}
+
+static __kprobes bool trace_probe_is_registered(struct trace_probe *tp)
+{
+ return !!(tp->flags & TP_FLAG_REGISTERED);
+}
+
+static __kprobes bool trace_probe_has_gone(struct trace_probe *tp)
+{
+ return !!(kprobe_gone(&tp->rp.kp));
+}
+
+static __kprobes bool trace_probe_within_module(struct trace_probe *tp,
+ struct module *mod)
+{
+ int len = strlen(mod->name);
+ const char *name = trace_probe_symbol(tp);
+ return strncmp(mod->name, name, len) == 0 && name[len] == ':';
+}
+
+static __kprobes bool trace_probe_is_on_module(struct trace_probe *tp)
+{
+ return !!strchr(trace_probe_symbol(tp), ':');
+}
+
static int register_probe_event(struct trace_probe *tp);
static void unregister_probe_event(struct trace_probe *tp);
@@ -646,6 +702,16 @@ error:
return ERR_PTR(ret);
}
+static void update_probe_arg(struct probe_arg *arg)
+{
+ if (CHECK_FETCH_FUNCS(bitfield, arg->fetch.fn))
+ update_bitfield_fetch_param(arg->fetch.data);
+ else if (CHECK_FETCH_FUNCS(deref, arg->fetch.fn))
+ update_deref_fetch_param(arg->fetch.data);
+ else if (CHECK_FETCH_FUNCS(symbol, arg->fetch.fn))
+ update_symbol_cache(arg->fetch.data);
+}
+
static void free_probe_arg(struct probe_arg *arg)
{
if (CHECK_FETCH_FUNCS(bitfield, arg->fetch.fn))
@@ -671,7 +737,7 @@ static void free_trace_probe(struct trace_probe *tp)
kfree(tp);
}
-static struct trace_probe *find_probe_event(const char *event,
+static struct trace_probe *find_trace_probe(const char *event,
const char *group)
{
struct trace_probe *tp;
@@ -683,13 +749,96 @@ static struct trace_probe *find_probe_event(const char *event,
return NULL;
}
+/* Enable trace_probe - @flag must be TP_FLAG_TRACE or TP_FLAG_PROFILE */
+static int enable_trace_probe(struct trace_probe *tp, int flag)
+{
+ int ret = 0;
+
+ tp->flags |= flag;
+ if (trace_probe_is_enabled(tp) && trace_probe_is_registered(tp) &&
+ !trace_probe_has_gone(tp)) {
+ if (trace_probe_is_return(tp))
+ ret = enable_kretprobe(&tp->rp);
+ else
+ ret = enable_kprobe(&tp->rp.kp);
+ }
+
+ return ret;
+}
+
+/* Disable trace_probe - @flag must be TP_FLAG_TRACE or TP_FLAG_PROFILE */
+static void disable_trace_probe(struct trace_probe *tp, int flag)
+{
+ tp->flags &= ~flag;
+ if (!trace_probe_is_enabled(tp) && trace_probe_is_registered(tp)) {
+ if (trace_probe_is_return(tp))
+ disable_kretprobe(&tp->rp);
+ else
+ disable_kprobe(&tp->rp.kp);
+ }
+}
+
+/* Internal register function - just handle k*probes and flags */
+static int __register_trace_probe(struct trace_probe *tp)
+{
+ int i, ret;
+
+ if (trace_probe_is_registered(tp))
+ return -EINVAL;
+
+ for (i = 0; i < tp->nr_args; i++)
+ update_probe_arg(&tp->args[i]);
+
+ /* Set/clear disabled flag according to tp->flag */
+ if (trace_probe_is_enabled(tp))
+ tp->rp.kp.flags &= ~KPROBE_FLAG_DISABLED;
+ else
+ tp->rp.kp.flags |= KPROBE_FLAG_DISABLED;
+
+ if (trace_probe_is_return(tp))
+ ret = register_kretprobe(&tp->rp);
+ else
+ ret = register_kprobe(&tp->rp.kp);
+
+ if (ret == 0)
+ tp->flags |= TP_FLAG_REGISTERED;
+ else {
+ pr_warning("Could not insert probe at %s+%lu: %d\n",
+ trace_probe_symbol(tp), trace_probe_offset(tp), ret);
+ if (ret == -ENOENT && trace_probe_is_on_module(tp)) {
+ pr_warning("This probe might be able to register after"
+ "target module is loaded. Continue.\n");
+ ret = 0;
+ } else if (ret == -EILSEQ) {
+ pr_warning("Probing address(0x%p) is not an "
+ "instruction boundary.\n",
+ tp->rp.kp.addr);
+ ret = -EINVAL;
+ }
+ }
+
+ return ret;
+}
+
+/* Internal unregister function - just handle k*probes and flags */
+static void __unregister_trace_probe(struct trace_probe *tp)
+{
+ if (trace_probe_is_registered(tp)) {
+ if (trace_probe_is_return(tp))
+ unregister_kretprobe(&tp->rp);
+ else
+ unregister_kprobe(&tp->rp.kp);
+ tp->flags &= ~TP_FLAG_REGISTERED;
+ /* Cleanup kprobe for reuse */
+ if (tp->rp.kp.symbol_name)
+ tp->rp.kp.addr = NULL;
+ }
+}
+
/* Unregister a trace_probe and probe_event: call with locking probe_lock */
static void unregister_trace_probe(struct trace_probe *tp)
{
- if (probe_is_return(tp))
- unregister_kretprobe(&tp->rp);
- else
- unregister_kprobe(&tp->rp.kp);
+ __unregister_trace_probe(tp);
list_del(&tp->list);
unregister_probe_event(tp);
}
@@ -702,41 +851,65 @@ static int register_trace_probe(struct trace_probe *tp)
mutex_lock(&probe_lock);
- /* register as an event */
- old_tp = find_probe_event(tp->call.name, tp->call.class->system);
+ /* Delete old (same name) event if exist */
+ old_tp = find_trace_probe(tp->call.name, tp->call.class->system);
if (old_tp) {
- /* delete old event */
unregister_trace_probe(old_tp);
free_trace_probe(old_tp);
}
+
+ /* Register new event */
ret = register_probe_event(tp);
if (ret) {
pr_warning("Failed to register probe event(%d)\n", ret);
goto end;
}
- tp->rp.kp.flags |= KPROBE_FLAG_DISABLED;
- if (probe_is_return(tp))
- ret = register_kretprobe(&tp->rp);
- else
- ret = register_kprobe(&tp->rp.kp);
-
- if (ret) {
- pr_warning("Could not insert probe(%d)\n", ret);
- if (ret == -EILSEQ) {
- pr_warning("Probing address(0x%p) is not an "
- "instruction boundary.\n",
- tp->rp.kp.addr);
- ret = -EINVAL;
- }
+ /* Register k*probe */
+ ret = __register_trace_probe(tp);
+ if (ret < 0)
unregister_probe_event(tp);
- } else
+ else
list_add_tail(&tp->list, &probe_list);
+
end:
mutex_unlock(&probe_lock);
return ret;
}
+/* Module notifier call back, checking event on the module */
+static int trace_probe_module_callback(struct notifier_block *nb,
+ unsigned long val, void *data)
+{
+ struct module *mod = data;
+ struct trace_probe *tp;
+ int ret;
+
+ if (val != MODULE_STATE_COMING)
+ return NOTIFY_DONE;
+
+ /* Update probes on coming module */
+ mutex_lock(&probe_lock);
+ list_for_each_entry(tp, &probe_list, list) {
+ if (trace_probe_within_module(tp, mod)) {
+ __unregister_trace_probe(tp);
+ ret = __register_trace_probe(tp);
+ if (ret)
+ pr_warning("Failed to re-register probe %s on"
+ "%s: %d\n",
+ tp->call.name, mod->name, ret);
+ }
+ }
+ mutex_unlock(&probe_lock);
+
+ return NOTIFY_DONE;
+}
+
+static struct notifier_block trace_probe_module_nb = {
+ .notifier_call = trace_probe_module_callback,
+ .priority = 1 /* Invoked after kprobe module callback */
+};
+
/* Split symbol and offset. */
static int split_symbol_offset(char *symbol, unsigned long *offset)
{
@@ -962,8 +1135,8 @@ static int create_trace_probe(int argc, char **argv)
{
/*
* Argument syntax:
- * - Add kprobe: p[:[GRP/]EVENT] KSYM[+OFFS]|KADDR [FETCHARGS]
- * - Add kretprobe: r[:[GRP/]EVENT] KSYM[+0] [FETCHARGS]
+ * - Add kprobe: p[:[GRP/]EVENT] [MOD:]KSYM[+OFFS]|KADDR [FETCHARGS]
+ * - Add kretprobe: r[:[GRP/]EVENT] [MOD:]KSYM[+0] [FETCHARGS]
* Fetch args:
* $retval : fetch return value
* $stack : fetch stack address
@@ -1025,7 +1198,7 @@ static int create_trace_probe(int argc, char **argv)
return -EINVAL;
}
mutex_lock(&probe_lock);
- tp = find_probe_event(event, group);
+ tp = find_trace_probe(event, group);
if (!tp) {
mutex_unlock(&probe_lock);
pr_info("Event %s/%s doesn't exist.\n", group, event);
@@ -1144,7 +1317,7 @@ error:
return ret;
}
-static void cleanup_all_probes(void)
+static void release_all_trace_probes(void)
{
struct trace_probe *tp;
@@ -1158,7 +1331,6 @@ static void cleanup_all_probes(void)
mutex_unlock(&probe_lock);
}
-
/* Probes listing interfaces */
static void *probes_seq_start(struct seq_file *m, loff_t *pos)
{
@@ -1181,15 +1353,16 @@ static int probes_seq_show(struct seq_file *m, void *v)
struct trace_probe *tp = v;
int i;
- seq_printf(m, "%c", probe_is_return(tp) ? 'r' : 'p');
+ seq_printf(m, "%c", trace_probe_is_return(tp) ? 'r' : 'p');
seq_printf(m, ":%s/%s", tp->call.class->system, tp->call.name);
if (!tp->symbol)
seq_printf(m, " 0x%p", tp->rp.kp.addr);
else if (tp->rp.kp.offset)
- seq_printf(m, " %s+%u", probe_symbol(tp), tp->rp.kp.offset);
+ seq_printf(m, " %s+%u", trace_probe_symbol(tp),
+ tp->rp.kp.offset);
else
- seq_printf(m, " %s", probe_symbol(tp));
+ seq_printf(m, " %s", trace_probe_symbol(tp));
for (i = 0; i < tp->nr_args; i++)
seq_printf(m, " %s=%s", tp->args[i].name, tp->args[i].comm);
@@ -1209,7 +1382,7 @@ static int probes_open(struct inode *inode, struct file *file)
{
if ((file->f_mode & FMODE_WRITE) &&
(file->f_flags & O_TRUNC))
- cleanup_all_probes();
+ release_all_trace_probes();
return seq_open(file, &probes_seq_op);
}
@@ -1397,7 +1570,8 @@ static __kprobes void kprobe_trace_func(struct kprobe *kp, struct pt_regs *regs)
store_trace_args(sizeof(*entry), tp, regs, (u8 *)&entry[1], dsize);
if (!filter_current_check_discard(buffer, call, entry, event))
- trace_nowake_buffer_unlock_commit(buffer, event, irq_flags, pc);
+ trace_nowake_buffer_unlock_commit_regs(buffer, event,
+ irq_flags, pc, regs);
}
/* Kretprobe handler */
@@ -1429,7 +1603,8 @@ static __kprobes void kretprobe_trace_func(struct kretprobe_instance *ri,
store_trace_args(sizeof(*entry), tp, regs, (u8 *)&entry[1], dsize);
if (!filter_current_check_discard(buffer, call, entry, event))
- trace_nowake_buffer_unlock_commit(buffer, event, irq_flags, pc);
+ trace_nowake_buffer_unlock_commit_regs(buffer, event,
+ irq_flags, pc, regs);
}
/* Event entry printers */
@@ -1511,30 +1686,6 @@ partial:
return TRACE_TYPE_PARTIAL_LINE;
}
-static int probe_event_enable(struct ftrace_event_call *call)
-{
- struct trace_probe *tp = (struct trace_probe *)call->data;
-
- tp->flags |= TP_FLAG_TRACE;
- if (probe_is_return(tp))
- return enable_kretprobe(&tp->rp);
- else
- return enable_kprobe(&tp->rp.kp);
-}
-
-static void probe_event_disable(struct ftrace_event_call *call)
-{
- struct trace_probe *tp = (struct trace_probe *)call->data;
-
- tp->flags &= ~TP_FLAG_TRACE;
- if (!(tp->flags & (TP_FLAG_TRACE | TP_FLAG_PROFILE))) {
- if (probe_is_return(tp))
- disable_kretprobe(&tp->rp);
- else
- disable_kprobe(&tp->rp.kp);
- }
-}
-
#undef DEFINE_FIELD
#define DEFINE_FIELD(type, item, name, is_signed) \
do { \
@@ -1596,7 +1747,7 @@ static int __set_print_fmt(struct trace_probe *tp, char *buf, int len)
const char *fmt, *arg;
- if (!probe_is_return(tp)) {
+ if (!trace_probe_is_return(tp)) {
fmt = "(%lx)";
arg = "REC->" FIELD_STRING_IP;
} else {
@@ -1713,49 +1864,25 @@ static __kprobes void kretprobe_perf_func(struct kretprobe_instance *ri,
head = this_cpu_ptr(call->perf_events);
perf_trace_buf_submit(entry, size, rctx, entry->ret_ip, 1, regs, head);
}
-
-static int probe_perf_enable(struct ftrace_event_call *call)
-{
- struct trace_probe *tp = (struct trace_probe *)call->data;
-
- tp->flags |= TP_FLAG_PROFILE;
-
- if (probe_is_return(tp))
- return enable_kretprobe(&tp->rp);
- else
- return enable_kprobe(&tp->rp.kp);
-}
-
-static void probe_perf_disable(struct ftrace_event_call *call)
-{
- struct trace_probe *tp = (struct trace_probe *)call->data;
-
- tp->flags &= ~TP_FLAG_PROFILE;
-
- if (!(tp->flags & TP_FLAG_TRACE)) {
- if (probe_is_return(tp))
- disable_kretprobe(&tp->rp);
- else
- disable_kprobe(&tp->rp.kp);
- }
-}
#endif /* CONFIG_PERF_EVENTS */
static __kprobes
int kprobe_register(struct ftrace_event_call *event, enum trace_reg type)
{
+ struct trace_probe *tp = (struct trace_probe *)event->data;
+
switch (type) {
case TRACE_REG_REGISTER:
- return probe_event_enable(event);
+ return enable_trace_probe(tp, TP_FLAG_TRACE);
case TRACE_REG_UNREGISTER:
- probe_event_disable(event);
+ disable_trace_probe(tp, TP_FLAG_TRACE);
return 0;
#ifdef CONFIG_PERF_EVENTS
case TRACE_REG_PERF_REGISTER:
- return probe_perf_enable(event);
+ return enable_trace_probe(tp, TP_FLAG_PROFILE);
case TRACE_REG_PERF_UNREGISTER:
- probe_perf_disable(event);
+ disable_trace_probe(tp, TP_FLAG_PROFILE);
return 0;
#endif
}
@@ -1805,7 +1932,7 @@ static int register_probe_event(struct trace_probe *tp)
/* Initialize ftrace_event_call */
INIT_LIST_HEAD(&call->class->fields);
- if (probe_is_return(tp)) {
+ if (trace_probe_is_return(tp)) {
call->event.funcs = &kretprobe_funcs;
call->class->define_fields = kretprobe_event_define_fields;
} else {
@@ -1844,6 +1971,9 @@ static __init int init_kprobe_trace(void)
struct dentry *d_tracer;
struct dentry *entry;
+ if (register_module_notifier(&trace_probe_module_nb))
+ return -EINVAL;
+
d_tracer = tracing_init_dentry();
if (!d_tracer)
return 0;
@@ -1897,12 +2027,12 @@ static __init int kprobe_trace_self_tests_init(void)
warn++;
} else {
/* Enable trace point */
- tp = find_probe_event("testprobe", KPROBE_EVENT_SYSTEM);
+ tp = find_trace_probe("testprobe", KPROBE_EVENT_SYSTEM);
if (WARN_ON_ONCE(tp == NULL)) {
pr_warning("error on getting new probe.\n");
warn++;
} else
- probe_event_enable(&tp->call);
+ enable_trace_probe(tp, TP_FLAG_TRACE);
}
ret = command_trace_probe("r:testprobe2 kprobe_trace_selftest_target "
@@ -1912,12 +2042,12 @@ static __init int kprobe_trace_self_tests_init(void)
warn++;
} else {
/* Enable trace point */
- tp = find_probe_event("testprobe2", KPROBE_EVENT_SYSTEM);
+ tp = find_trace_probe("testprobe2", KPROBE_EVENT_SYSTEM);
if (WARN_ON_ONCE(tp == NULL)) {
pr_warning("error on getting new probe.\n");
warn++;
} else
- probe_event_enable(&tp->call);
+ enable_trace_probe(tp, TP_FLAG_TRACE);
}
if (warn)
@@ -1938,7 +2068,7 @@ static __init int kprobe_trace_self_tests_init(void)
}
end:
- cleanup_all_probes();
+ release_all_trace_probes();
if (warn)
pr_cont("NG: Some tests are failed. Please check them.\n");
else
diff --git a/kernel/trace/trace_output.c b/kernel/trace/trace_output.c
index e37de492a9e1..51999309a6cf 100644
--- a/kernel/trace/trace_output.c
+++ b/kernel/trace/trace_output.c
@@ -1107,19 +1107,20 @@ static enum print_line_t trace_stack_print(struct trace_iterator *iter,
{
struct stack_entry *field;
struct trace_seq *s = &iter->seq;
- int i;
+ unsigned long *p;
+ unsigned long *end;
trace_assign_type(field, iter->ent);
+ end = (unsigned long *)((long)iter->ent + iter->ent_size);
if (!trace_seq_puts(s, "<stack trace>\n"))
goto partial;
- for (i = 0; i < FTRACE_STACK_ENTRIES; i++) {
- if (!field->caller[i] || (field->caller[i] == ULONG_MAX))
- break;
+
+ for (p = field->caller; p && *p != ULONG_MAX && p < end; p++) {
if (!trace_seq_puts(s, " => "))
goto partial;
- if (!seq_print_ip_sym(s, field->caller[i], flags))
+ if (!seq_print_ip_sym(s, *p, flags))
goto partial;
if (!trace_seq_puts(s, "\n"))
goto partial;
diff --git a/kernel/trace/trace_sched_wakeup.c b/kernel/trace/trace_sched_wakeup.c
index f029dd4fd2ca..e4a70c0c71b6 100644
--- a/kernel/trace/trace_sched_wakeup.c
+++ b/kernel/trace/trace_sched_wakeup.c
@@ -227,7 +227,9 @@ static void wakeup_trace_close(struct trace_iterator *iter)
graph_trace_close(iter);
}
-#define GRAPH_TRACER_FLAGS (TRACE_GRAPH_PRINT_PROC)
+#define GRAPH_TRACER_FLAGS (TRACE_GRAPH_PRINT_PROC | \
+ TRACE_GRAPH_PRINT_ABS_TIME | \
+ TRACE_GRAPH_PRINT_DURATION)
static enum print_line_t wakeup_print_line(struct trace_iterator *iter)
{
diff --git a/kernel/trace/trace_stack.c b/kernel/trace/trace_stack.c
index b0b53b8e4c25..77575b386d97 100644
--- a/kernel/trace/trace_stack.c
+++ b/kernel/trace/trace_stack.c
@@ -156,20 +156,11 @@ stack_max_size_write(struct file *filp, const char __user *ubuf,
{
long *ptr = filp->private_data;
unsigned long val, flags;
- char buf[64];
int ret;
int cpu;
- if (count >= sizeof(buf))
- return -EINVAL;
-
- if (copy_from_user(&buf, ubuf, count))
- return -EFAULT;
-
- buf[count] = 0;
-
- ret = strict_strtoul(buf, 10, &val);
- if (ret < 0)
+ ret = kstrtoul_from_user(ubuf, count, 10, &val);
+ if (ret)
return ret;
local_irq_save(flags);
diff --git a/kernel/watchdog.c b/kernel/watchdog.c
index 3d0c56ad4792..36491cd5b7d4 100644
--- a/kernel/watchdog.c
+++ b/kernel/watchdog.c
@@ -200,6 +200,7 @@ static int is_softlockup(unsigned long touch_ts)
}
#ifdef CONFIG_HARDLOCKUP_DETECTOR
+
static struct perf_event_attr wd_hw_attr = {
.type = PERF_TYPE_HARDWARE,
.config = PERF_COUNT_HW_CPU_CYCLES,
@@ -209,7 +210,7 @@ static struct perf_event_attr wd_hw_attr = {
};
/* Callback function for perf event subsystem */
-static void watchdog_overflow_callback(struct perf_event *event, int nmi,
+static void watchdog_overflow_callback(struct perf_event *event,
struct perf_sample_data *data,
struct pt_regs *regs)
{
@@ -368,10 +369,11 @@ static int watchdog_nmi_enable(int cpu)
if (event != NULL)
goto out_enable;
- /* Try to register using hardware perf events */
wd_attr = &wd_hw_attr;
wd_attr->sample_period = hw_nmi_get_sample_period(watchdog_thresh);
- event = perf_event_create_kernel_counter(wd_attr, cpu, NULL, watchdog_overflow_callback);
+
+ /* Try to register using hardware perf events */
+ event = perf_event_create_kernel_counter(wd_attr, cpu, NULL, watchdog_overflow_callback, NULL);
if (!IS_ERR(event)) {
printk(KERN_INFO "NMI watchdog enabled, takes one hw-pmu counter.\n");
goto out_save;
diff --git a/kernel/workqueue.c b/kernel/workqueue.c
index 0400553f0d04..25fb1b0e53fa 100644
--- a/kernel/workqueue.c
+++ b/kernel/workqueue.c
@@ -221,7 +221,7 @@ typedef unsigned long mayday_mask_t;
* per-CPU workqueues:
*/
struct workqueue_struct {
- unsigned int flags; /* I: WQ_* flags */
+ unsigned int flags; /* W: WQ_* flags */
union {
struct cpu_workqueue_struct __percpu *pcpu;
struct cpu_workqueue_struct *single;
@@ -240,6 +240,7 @@ struct workqueue_struct {
mayday_mask_t mayday_mask; /* cpus requesting rescue */
struct worker *rescuer; /* I: rescue worker */
+ int nr_drainers; /* W: drain in progress */
int saved_max_active; /* W: saved cwq max_active */
const char *name; /* I: workqueue name */
#ifdef CONFIG_LOCKDEP
@@ -990,7 +991,7 @@ static void __queue_work(unsigned int cpu, struct workqueue_struct *wq,
debug_work_activate(work);
/* if dying, only works from the same workqueue are allowed */
- if (unlikely(wq->flags & WQ_DYING) &&
+ if (unlikely(wq->flags & WQ_DRAINING) &&
WARN_ON_ONCE(!is_chained_work(wq)))
return;
@@ -2381,6 +2382,54 @@ out_unlock:
}
EXPORT_SYMBOL_GPL(flush_workqueue);
+/**
+ * drain_workqueue - drain a workqueue
+ * @wq: workqueue to drain
+ *
+ * Wait until the workqueue becomes empty. While draining is in progress,
+ * only chain queueing is allowed. IOW, only currently pending or running
+ * work items on @wq can queue further work items on it. @wq is flushed
+ * repeatedly until it becomes empty. The number of flushing is detemined
+ * by the depth of chaining and should be relatively short. Whine if it
+ * takes too long.
+ */
+void drain_workqueue(struct workqueue_struct *wq)
+{
+ unsigned int flush_cnt = 0;
+ unsigned int cpu;
+
+ /*
+ * __queue_work() needs to test whether there are drainers, is much
+ * hotter than drain_workqueue() and already looks at @wq->flags.
+ * Use WQ_DRAINING so that queue doesn't have to check nr_drainers.
+ */
+ spin_lock(&workqueue_lock);
+ if (!wq->nr_drainers++)
+ wq->flags |= WQ_DRAINING;
+ spin_unlock(&workqueue_lock);
+reflush:
+ flush_workqueue(wq);
+
+ for_each_cwq_cpu(cpu, wq) {
+ struct cpu_workqueue_struct *cwq = get_cwq(cpu, wq);
+
+ if (!cwq->nr_active && list_empty(&cwq->delayed_works))
+ continue;
+
+ if (++flush_cnt == 10 ||
+ (flush_cnt % 100 == 0 && flush_cnt <= 1000))
+ pr_warning("workqueue %s: flush on destruction isn't complete after %u tries\n",
+ wq->name, flush_cnt);
+ goto reflush;
+ }
+
+ spin_lock(&workqueue_lock);
+ if (!--wq->nr_drainers)
+ wq->flags &= ~WQ_DRAINING;
+ spin_unlock(&workqueue_lock);
+}
+EXPORT_SYMBOL_GPL(drain_workqueue);
+
static bool start_flush_work(struct work_struct *work, struct wq_barrier *barr,
bool wait_executing)
{
@@ -3009,34 +3058,10 @@ EXPORT_SYMBOL_GPL(__alloc_workqueue_key);
*/
void destroy_workqueue(struct workqueue_struct *wq)
{
- unsigned int flush_cnt = 0;
unsigned int cpu;
- /*
- * Mark @wq dying and drain all pending works. Once WQ_DYING is
- * set, only chain queueing is allowed. IOW, only currently
- * pending or running work items on @wq can queue further work
- * items on it. @wq is flushed repeatedly until it becomes empty.
- * The number of flushing is detemined by the depth of chaining and
- * should be relatively short. Whine if it takes too long.
- */
- wq->flags |= WQ_DYING;
-reflush:
- flush_workqueue(wq);
-
- for_each_cwq_cpu(cpu, wq) {
- struct cpu_workqueue_struct *cwq = get_cwq(cpu, wq);
-
- if (!cwq->nr_active && list_empty(&cwq->delayed_works))
- continue;
-
- if (++flush_cnt == 10 ||
- (flush_cnt % 100 == 0 && flush_cnt <= 1000))
- printk(KERN_WARNING "workqueue %s: flush on "
- "destruction isn't complete after %u tries\n",
- wq->name, flush_cnt);
- goto reflush;
- }
+ /* drain it before proceeding with destruction */
+ drain_workqueue(wq);
/*
* wq list is used to freeze wq, remove from list after