From 16addf954d3954a72fd56abc02ffcba3c18529a1 Mon Sep 17 00:00:00 2001 From: Randy Dunlap Date: Fri, 18 Mar 2011 09:34:53 -0700 Subject: sched: Fix yield_to kernel-doc Add missing function parameters for yield_to(): Warning(kernel/sched.c:5470): No description found for parameter 'p' Warning(kernel/sched.c:5470): No description found for parameter 'preempt' Signed-off-by: Randy Dunlap Cc: Peter Zijlstra LKML-Reference: <20110318093453.8f7489a4.randy.dunlap@oracle.com> Signed-off-by: Ingo Molnar --- kernel/sched.c | 2 ++ 1 file changed, 2 insertions(+) (limited to 'kernel') diff --git a/kernel/sched.c b/kernel/sched.c index 58d66ea7d200..052120d67706 100644 --- a/kernel/sched.c +++ b/kernel/sched.c @@ -5467,6 +5467,8 @@ EXPORT_SYMBOL(yield); * yield_to - yield the current processor to another thread in * your thread group, or accelerate that thread toward the * processor it's on. + * @p: target task + * @preempt: whether task preemption is allowed or not * * It's the caller's job to ensure that the target task struct * can't go away on us before we can do any checks. -- cgit v1.2.3 From 1106b6997df7d0c0487e21fd9c9dd2ce3d4a52db Mon Sep 17 00:00:00 2001 From: Jiri Olsa Date: Wed, 16 Feb 2011 17:35:34 +0100 Subject: tracing: Fix set_ftrace_filter probe function display If one or more function probes (like traceon) are enabled, and there's no other function filter, the first probe func is skipped (which one depends on the position in the hash). $ echo sys_open:traceon sys_close:traceon > ./set_ftrace_filter $ cat set_ftrace_filter #### all functions enabled #### sys_close:traceon:unlimited $ The reason was, that in the case of no other function filter, the func_pos was not properly updated before calling t_hash_start. Signed-off-by: Jiri Olsa LKML-Reference: <1297874134-7008-1-git-send-email-jolsa@redhat.com> Signed-off-by: Steven Rostedt --- kernel/trace/ftrace.c | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) (limited to 'kernel') diff --git a/kernel/trace/ftrace.c b/kernel/trace/ftrace.c index 888b611897d3..c075f4ea6b94 100644 --- a/kernel/trace/ftrace.c +++ b/kernel/trace/ftrace.c @@ -1467,7 +1467,7 @@ t_next(struct seq_file *m, void *v, loff_t *pos) return t_hash_next(m, pos); (*pos)++; - iter->pos = *pos; + iter->pos = iter->func_pos = *pos; if (iter->flags & FTRACE_ITER_PRINTALL) return t_hash_start(m, pos); @@ -1502,7 +1502,6 @@ t_next(struct seq_file *m, void *v, loff_t *pos) if (!rec) return t_hash_start(m, pos); - iter->func_pos = *pos; iter->func = rec; return iter; -- cgit v1.2.3 From 20dd67407160eac577656cd2f8ee9a1fead960b8 Mon Sep 17 00:00:00 2001 From: Sergey Senozhatsky Date: Wed, 23 Mar 2011 13:17:23 +0200 Subject: sched: Remove unused 'rq' variable and cpu_rq() call from alloc_fair_sched_group() Signed-off-by: Sergey Senozhatsky Cc: Steven Rostedt Cc: Peter Zijlstra LKML-Reference: <20110323111722.GA4244@swordfish.minsk.epam.com> Signed-off-by: Ingo Molnar --- kernel/sched.c | 3 --- 1 file changed, 3 deletions(-) (limited to 'kernel') diff --git a/kernel/sched.c b/kernel/sched.c index 052120d67706..a361e20ec2cd 100644 --- a/kernel/sched.c +++ b/kernel/sched.c @@ -8443,7 +8443,6 @@ int alloc_fair_sched_group(struct task_group *tg, struct task_group *parent) { struct cfs_rq *cfs_rq; struct sched_entity *se; - struct rq *rq; int i; tg->cfs_rq = kzalloc(sizeof(cfs_rq) * nr_cpu_ids, GFP_KERNEL); @@ -8456,8 +8455,6 @@ int alloc_fair_sched_group(struct task_group *tg, struct task_group *parent) tg->shares = NICE_0_LOAD; for_each_possible_cpu(i) { - rq = cpu_rq(i); - cfs_rq = kzalloc_node(sizeof(struct cfs_rq), GFP_KERNEL, cpu_to_node(i)); if (!cfs_rq) -- cgit v1.2.3 From dec2960827c85253d76938dbfa909df3be34958b Mon Sep 17 00:00:00 2001 From: Sergey Senozhatsky Date: Wed, 23 Mar 2011 14:38:28 +0200 Subject: lockdep: Remove unused 'factor' variable from lockdep_stats_show() Signed-off-by: Sergey Senozhatsky Cc: Peter Zijlstra LKML-Reference: <20110323123828.GB4244@swordfish.minsk.epam.com> Signed-off-by: Ingo Molnar --- kernel/lockdep_proc.c | 9 +-------- 1 file changed, 1 insertion(+), 8 deletions(-) (limited to 'kernel') diff --git a/kernel/lockdep_proc.c b/kernel/lockdep_proc.c index 1969d2fc4b36..71edd2f60c02 100644 --- a/kernel/lockdep_proc.c +++ b/kernel/lockdep_proc.c @@ -225,7 +225,7 @@ static int lockdep_stats_show(struct seq_file *m, void *v) nr_irq_read_safe = 0, nr_irq_read_unsafe = 0, nr_softirq_read_safe = 0, nr_softirq_read_unsafe = 0, nr_hardirq_read_safe = 0, nr_hardirq_read_unsafe = 0, - sum_forward_deps = 0, factor = 0; + sum_forward_deps = 0; list_for_each_entry(class, &all_lock_classes, lock_entry) { @@ -283,13 +283,6 @@ static int lockdep_stats_show(struct seq_file *m, void *v) nr_hardirq_unsafe * nr_hardirq_safe + nr_list_entries); - /* - * Estimated factor between direct and indirect - * dependencies: - */ - if (nr_list_entries) - factor = sum_forward_deps / nr_list_entries; - #ifdef CONFIG_PROVE_LOCKING seq_printf(m, " dependency chains: %11lu [max: %lu]\n", nr_lock_chains, MAX_LOCKDEP_CHAINS); -- cgit v1.2.3 From 1232d6132a986125f6a687ab9b61a4330e319270 Mon Sep 17 00:00:00 2001 From: Borislav Petkov Date: Tue, 22 Mar 2011 18:46:18 +0100 Subject: sched, doc: Update sched-design-CFS.txt Correct ->dequeue_tree() thinko into sched_class->dequeue_task and drop all references to ->task_new() since it is obviously gone. Signed-off-by: Borislav Petkov Cc: Peter Zijlstra Cc: Mike Galbraith LKML-Reference: <1300815978-16618-1-git-send-email-bp@amd64.org> Signed-off-by: Ingo Molnar --- Documentation/scheduler/sched-design-CFS.txt | 7 +------ kernel/sched_idletask.c | 2 -- kernel/sched_stoptask.c | 2 -- 3 files changed, 1 insertion(+), 10 deletions(-) (limited to 'kernel') diff --git a/Documentation/scheduler/sched-design-CFS.txt b/Documentation/scheduler/sched-design-CFS.txt index 8239ebbcddce..99961993257a 100644 --- a/Documentation/scheduler/sched-design-CFS.txt +++ b/Documentation/scheduler/sched-design-CFS.txt @@ -164,7 +164,7 @@ This is the (partial) list of the hooks: It puts the scheduling entity (task) into the red-black tree and increments the nr_running variable. - - dequeue_tree(...) + - dequeue_task(...) When a task is no longer runnable, this function is called to keep the corresponding scheduling entity out of the red-black tree. It decrements @@ -195,11 +195,6 @@ This is the (partial) list of the hooks: This function is mostly called from time tick functions; it might lead to process switch. This drives the running preemption. - - task_new(...) - - The core scheduler gives the scheduling module an opportunity to manage new - task startup. The CFS scheduling module uses it for group scheduling, while - the scheduling module for a real-time task does not use it. diff --git a/kernel/sched_idletask.c b/kernel/sched_idletask.c index c82f26c1b7c3..a776a6396427 100644 --- a/kernel/sched_idletask.c +++ b/kernel/sched_idletask.c @@ -94,6 +94,4 @@ static const struct sched_class idle_sched_class = { .prio_changed = prio_changed_idle, .switched_to = switched_to_idle, - - /* no .task_new for idle tasks */ }; diff --git a/kernel/sched_stoptask.c b/kernel/sched_stoptask.c index 84ec9bcf82d9..1ba2bd40fdac 100644 --- a/kernel/sched_stoptask.c +++ b/kernel/sched_stoptask.c @@ -102,6 +102,4 @@ static const struct sched_class stop_sched_class = { .prio_changed = prio_changed_stop, .switched_to = switched_to_stop, - - /* no .task_new for stop tasks */ }; -- cgit v1.2.3 From 68cacd29167b1926d237bd1b153aa2a990201729 Mon Sep 17 00:00:00 2001 From: Stephane Eranian Date: Wed, 23 Mar 2011 16:03:06 +0100 Subject: perf_events: Fix stale ->cgrp pointer in update_cgrp_time_from_cpuctx() This patch solves a stale pointer problem in update_cgrp_time_from_cpuctx(). The cpuctx->cgrp was not cleared on all possible event exit paths, including: close() perf_release() perf_release_kernel() list_del_event() This patch fixes list_del_event() to clear cpuctx->cgrp when there are no cgroup events left in the context. [ This second version makes the code compile when CONFIG_CGROUP_PERF is not enabled. We unconditionally define perf_cpu_context->cgrp. ] Signed-off-by: Stephane Eranian Cc: peterz@infradead.org Cc: perfmon2-devel@lists.sf.net Cc: paulus@samba.org Cc: davem@davemloft.net LKML-Reference: <20110323150306.GA1580@quad> Signed-off-by: Ingo Molnar --- include/linux/perf_event.h | 2 -- kernel/perf_event.c | 12 +++++++++++- 2 files changed, 11 insertions(+), 3 deletions(-) (limited to 'kernel') diff --git a/include/linux/perf_event.h b/include/linux/perf_event.h index f495c0147240..311b4dc785a1 100644 --- a/include/linux/perf_event.h +++ b/include/linux/perf_event.h @@ -938,9 +938,7 @@ struct perf_cpu_context { struct list_head rotation_list; int jiffies_interval; struct pmu *active_pmu; -#ifdef CONFIG_CGROUP_PERF struct perf_cgroup *cgrp; -#endif }; struct perf_output_handle { diff --git a/kernel/perf_event.c b/kernel/perf_event.c index 3472bb1a070c..0c714226ae0c 100644 --- a/kernel/perf_event.c +++ b/kernel/perf_event.c @@ -941,6 +941,7 @@ static void perf_group_attach(struct perf_event *event) static void list_del_event(struct perf_event *event, struct perf_event_context *ctx) { + struct perf_cpu_context *cpuctx; /* * We can have double detach due to exit/hot-unplug + close. */ @@ -949,8 +950,17 @@ list_del_event(struct perf_event *event, struct perf_event_context *ctx) event->attach_state &= ~PERF_ATTACH_CONTEXT; - if (is_cgroup_event(event)) + if (is_cgroup_event(event)) { ctx->nr_cgroups--; + cpuctx = __get_cpu_context(ctx); + /* + * if there are no more cgroup events + * then cler cgrp to avoid stale pointer + * in update_cgrp_time_from_cpuctx() + */ + if (!ctx->nr_cgroups) + cpuctx->cgrp = NULL; + } ctx->nr_events--; if (event->attr.inherit_stat) -- cgit v1.2.3 From 3b9038912828384e38d82409c281124631c8533b Mon Sep 17 00:00:00 2001 From: Thomas Gleixner Date: Wed, 23 Mar 2011 00:24:11 +0100 Subject: genirq; Remove the last leftovers of the old sparse irq code All users converted. Get rid of it. Signed-off-by: Thomas Gleixner --- include/linux/irqdesc.h | 7 ------- kernel/irq/irqdesc.c | 14 -------------- 2 files changed, 21 deletions(-) (limited to 'kernel') diff --git a/include/linux/irqdesc.h b/include/linux/irqdesc.h index 00218371518b..0b30662bc36c 100644 --- a/include/linux/irqdesc.h +++ b/include/linux/irqdesc.h @@ -100,13 +100,6 @@ struct irq_desc { extern struct irq_desc irq_desc[NR_IRQS]; #endif -/* Will be removed once the last users in power and sh are gone */ -extern struct irq_desc *irq_to_desc_alloc_node(unsigned int irq, int node); -static inline struct irq_desc *move_irq_desc(struct irq_desc *desc, int node) -{ - return desc; -} - #ifdef CONFIG_GENERIC_HARDIRQS static inline struct irq_data *irq_desc_get_irq_data(struct irq_desc *desc) diff --git a/kernel/irq/irqdesc.c b/kernel/irq/irqdesc.c index dbccc799407f..6fb014f172f7 100644 --- a/kernel/irq/irqdesc.c +++ b/kernel/irq/irqdesc.c @@ -198,15 +198,6 @@ err: return -ENOMEM; } -struct irq_desc * __ref irq_to_desc_alloc_node(unsigned int irq, int node) -{ - int res = irq_alloc_descs(irq, irq, 1, node); - - if (res == -EEXIST || res == irq) - return irq_to_desc(irq); - return NULL; -} - static int irq_expand_nr_irqs(unsigned int nr) { if (nr > IRQ_BITMAP_BITS) @@ -283,11 +274,6 @@ struct irq_desc *irq_to_desc(unsigned int irq) return (irq < NR_IRQS) ? irq_desc + irq : NULL; } -struct irq_desc *irq_to_desc_alloc_node(unsigned int irq, int node) -{ - return irq_to_desc(irq); -} - static void free_desc(unsigned int irq) { dynamic_irq_cleanup(irq); -- cgit v1.2.3 From 880f57318450dbead6a03f9e31a1468924d6dd88 Mon Sep 17 00:00:00 2001 From: Frederic Weisbecker Date: Wed, 23 Mar 2011 19:29:39 +0100 Subject: perf: Better fit max unprivileged mlock pages for tools needs The maximum kilobytes of locked memory that an unprivileged user can reserve is of 512 kB = 128 pages by default, scaled to the number of onlined CPUs, which fits well with the tools that use 128 data pages by default. However tools actually use 129 pages, because they need one more for the user control page. Thus the default mlock threshold is not sufficient for the default tools needs and we always end up to evaluate the constant mlock rlimit policy, which doesn't have this scaling with the number of online CPUs. Hence, on systems that have more than 16 CPUs, we overlap the rlimit threshold and fail to mmap: $ perf record ls Error: failed to mmap with 1 (Operation not permitted) Just increase the max unprivileged mlock threshold by one page so that it supports well perf tools even after 16 CPUs. Reported-by: Han Pingtian Reported-by: Peter Zijlstra Reported-by: Arnaldo Carvalho de Melo Signed-off-by: Frederic Weisbecker Acked-by: Arnaldo Carvalho de Melo Cc: Stephane Eranian Cc: Stable LKML-Reference: <1300904979-5508-1-git-send-email-fweisbec@gmail.com> Signed-off-by: Ingo Molnar --- kernel/perf_event.c | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) (limited to 'kernel') diff --git a/kernel/perf_event.c b/kernel/perf_event.c index 0c714226ae0c..c75925c4d1e2 100644 --- a/kernel/perf_event.c +++ b/kernel/perf_event.c @@ -145,7 +145,8 @@ static struct srcu_struct pmus_srcu; */ int sysctl_perf_event_paranoid __read_mostly = 1; -int sysctl_perf_event_mlock __read_mostly = 512; /* 'free' kb per user */ +/* Minimum for 128 pages + 1 for the user control page */ +int sysctl_perf_event_mlock __read_mostly = 516; /* 'free' kb per user */ /* * max perf event sample rate -- cgit v1.2.3 From e1a85b2c519551d4792180cdab4074d7e99bf2c9 Mon Sep 17 00:00:00 2001 From: "Rafael J. Wysocki" Date: Wed, 23 Mar 2011 22:16:04 +0100 Subject: timekeeping: Use syscore_ops instead of sysdev class and sysdev The timekeeping subsystem uses a sysdev class and a sysdev for executing timekeeping_suspend() after interrupts have been turned off on the boot CPU (during system suspend) and for executing timekeeping_resume() before turning on interrupts on the boot CPU (during system resume). However, since both of these functions ignore their arguments, the entire mechanism may be replaced with a struct syscore_ops object which is simpler. Signed-off-by: Rafael J. Wysocki Reviewed-by: Thomas Gleixner --- kernel/time/timekeeping.c | 27 ++++++++------------------- 1 file changed, 8 insertions(+), 19 deletions(-) (limited to 'kernel') diff --git a/kernel/time/timekeeping.c b/kernel/time/timekeeping.c index 3bd7e3d5c632..8ad5d576755e 100644 --- a/kernel/time/timekeeping.c +++ b/kernel/time/timekeeping.c @@ -14,7 +14,7 @@ #include #include #include -#include +#include #include #include #include @@ -597,13 +597,12 @@ static struct timespec timekeeping_suspend_time; /** * timekeeping_resume - Resumes the generic timekeeping subsystem. - * @dev: unused * * This is for the generic clocksource timekeeping. * xtime/wall_to_monotonic/jiffies/etc are * still managed by arch specific suspend/resume code. */ -static int timekeeping_resume(struct sys_device *dev) +static void timekeeping_resume(void) { unsigned long flags; struct timespec ts; @@ -632,11 +631,9 @@ static int timekeeping_resume(struct sys_device *dev) /* Resume hrtimers */ hres_timers_resume(); - - return 0; } -static int timekeeping_suspend(struct sys_device *dev, pm_message_t state) +static int timekeeping_suspend(void) { unsigned long flags; @@ -654,26 +651,18 @@ static int timekeeping_suspend(struct sys_device *dev, pm_message_t state) } /* sysfs resume/suspend bits for timekeeping */ -static struct sysdev_class timekeeping_sysclass = { - .name = "timekeeping", +static struct syscore_ops timekeeping_syscore_ops = { .resume = timekeeping_resume, .suspend = timekeeping_suspend, }; -static struct sys_device device_timer = { - .id = 0, - .cls = &timekeeping_sysclass, -}; - -static int __init timekeeping_init_device(void) +static int __init timekeeping_init_ops(void) { - int error = sysdev_class_register(&timekeeping_sysclass); - if (!error) - error = sysdev_register(&device_timer); - return error; + register_syscore_ops(&timekeeping_syscore_ops); + return 0; } -device_initcall(timekeeping_init_device); +device_initcall(timekeeping_init_ops); /* * If the error is already larger, we look ahead even further -- cgit v1.2.3 From 0f77a8d378254f27df4a114a5da67223af1fe93f Mon Sep 17 00:00:00 2001 From: Namhyung Kim Date: Thu, 24 Mar 2011 11:42:29 +0900 Subject: vsprintf: Introduce %pB format specifier The %pB format specifier is for stack backtrace. Its handler sprint_backtrace() does symbol lookup using (address-1) to ensure the address will not point outside of the function. If there is a tail-call to the function marked "noreturn", gcc optimized out the code after the call then causes saved return address points outside of the function (i.e. the start of the next function), so pollutes call trace somewhat. This patch adds the %pB printk mechanism that allows architecture call-trace printout functions to improve backtrace printouts. Signed-off-by: Namhyung Kim Acked-by: Steven Rostedt Acked-by: Frederic Weisbecker Cc: Linus Torvalds Cc: Andrew Morton Cc: linux-arch@vger.kernel.org LKML-Reference: <1300934550-21394-1-git-send-email-namhyung@gmail.com> Signed-off-by: Ingo Molnar --- include/linux/kallsyms.h | 7 +++++++ kernel/kallsyms.c | 44 +++++++++++++++++++++++++++++++++++++++++--- lib/vsprintf.c | 7 ++++++- 3 files changed, 54 insertions(+), 4 deletions(-) (limited to 'kernel') diff --git a/include/linux/kallsyms.h b/include/linux/kallsyms.h index d8e9b3d1c23c..0df513b7a9f8 100644 --- a/include/linux/kallsyms.h +++ b/include/linux/kallsyms.h @@ -36,6 +36,7 @@ const char *kallsyms_lookup(unsigned long addr, /* Look up a kernel symbol and return it in a text buffer. */ extern int sprint_symbol(char *buffer, unsigned long address); +extern int sprint_backtrace(char *buffer, unsigned long address); /* Look up a kernel symbol and print it to the kernel messages. */ extern void __print_symbol(const char *fmt, unsigned long address); @@ -79,6 +80,12 @@ static inline int sprint_symbol(char *buffer, unsigned long addr) return 0; } +static inline int sprint_backtrace(char *buffer, unsigned long addr) +{ + *buffer = '\0'; + return 0; +} + static inline int lookup_symbol_name(unsigned long addr, char *symname) { return -ERANGE; diff --git a/kernel/kallsyms.c b/kernel/kallsyms.c index 6f6d091b5757..59e879929b17 100644 --- a/kernel/kallsyms.c +++ b/kernel/kallsyms.c @@ -342,13 +342,15 @@ int lookup_symbol_attrs(unsigned long addr, unsigned long *size, } /* Look up a kernel symbol and return it in a text buffer. */ -int sprint_symbol(char *buffer, unsigned long address) +static int __sprint_symbol(char *buffer, unsigned long address, + int symbol_offset) { char *modname; const char *name; unsigned long offset, size; int len; + address += symbol_offset; name = kallsyms_lookup(address, &size, &offset, &modname, buffer); if (!name) return sprintf(buffer, "0x%lx", address); @@ -357,17 +359,53 @@ int sprint_symbol(char *buffer, unsigned long address) strcpy(buffer, name); len = strlen(buffer); buffer += len; + offset -= symbol_offset; if (modname) - len += sprintf(buffer, "+%#lx/%#lx [%s]", - offset, size, modname); + len += sprintf(buffer, "+%#lx/%#lx [%s]", offset, size, modname); else len += sprintf(buffer, "+%#lx/%#lx", offset, size); return len; } + +/** + * sprint_symbol - Look up a kernel symbol and return it in a text buffer + * @buffer: buffer to be stored + * @address: address to lookup + * + * This function looks up a kernel symbol with @address and stores its name, + * offset, size and module name to @buffer if possible. If no symbol was found, + * just saves its @address as is. + * + * This function returns the number of bytes stored in @buffer. + */ +int sprint_symbol(char *buffer, unsigned long address) +{ + return __sprint_symbol(buffer, address, 0); +} + EXPORT_SYMBOL_GPL(sprint_symbol); +/** + * sprint_backtrace - Look up a backtrace symbol and return it in a text buffer + * @buffer: buffer to be stored + * @address: address to lookup + * + * This function is for stack backtrace and does the same thing as + * sprint_symbol() but with modified/decreased @address. If there is a + * tail-call to the function marked "noreturn", gcc optimized out code after + * the call so that the stack-saved return address could point outside of the + * caller. This function ensures that kallsyms will find the original caller + * by decreasing @address. + * + * This function returns the number of bytes stored in @buffer. + */ +int sprint_backtrace(char *buffer, unsigned long address) +{ + return __sprint_symbol(buffer, address, -1); +} + /* Look up a kernel symbol and print it to the kernel messages. */ void __print_symbol(const char *fmt, unsigned long address) { diff --git a/lib/vsprintf.c b/lib/vsprintf.c index d3023df8477f..d9e01fc3168e 100644 --- a/lib/vsprintf.c +++ b/lib/vsprintf.c @@ -574,7 +574,9 @@ char *symbol_string(char *buf, char *end, void *ptr, unsigned long value = (unsigned long) ptr; #ifdef CONFIG_KALLSYMS char sym[KSYM_SYMBOL_LEN]; - if (ext != 'f' && ext != 's') + if (ext == 'B') + sprint_backtrace(sym, value); + else if (ext != 'f' && ext != 's') sprint_symbol(sym, value); else kallsyms_lookup(value, NULL, NULL, NULL, sym); @@ -949,6 +951,7 @@ int kptr_restrict = 1; * - 'f' For simple symbolic function names without offset * - 'S' For symbolic direct pointers with offset * - 's' For symbolic direct pointers without offset + * - 'B' For backtraced symbolic direct pointers with offset * - 'R' For decoded struct resource, e.g., [mem 0x0-0x1f 64bit pref] * - 'r' For raw struct resource, e.g., [mem 0x0-0x1f flags 0x201] * - 'M' For a 6-byte MAC address, it prints the address in the @@ -1008,6 +1011,7 @@ char *pointer(const char *fmt, char *buf, char *end, void *ptr, /* Fallthrough */ case 'S': case 's': + case 'B': return symbol_string(buf, end, ptr, spec, *fmt); case 'R': case 'r': @@ -1279,6 +1283,7 @@ qualifier: * %ps output the name of a text symbol without offset * %pF output the name of a function pointer with its offset * %pf output the name of a function pointer without its offset + * %pB output the name of a backtrace symbol with its offset * %pR output the address range in a struct resource with decoded flags * %pr output the address range in a struct resource with raw flags * %pM output a 6-byte MAC address with colons -- cgit v1.2.3 From 29096202176ceaa5016a17ea2dd1aea19a4e90e2 Mon Sep 17 00:00:00 2001 From: Steven Rostedt Date: Thu, 17 Mar 2011 15:21:07 -0400 Subject: futex: Fix WARN_ON() test for UP An update of the futex code had a WARN_ON(!spin_is_locked(q->lock_ptr)) But on UP, spin_is_locked() is always false, and will trigger this warning, and even worse, it will exit the function without doing the necessary work. Converting this to a WARN_ON_SMP() fixes the problem. Reported-by: Richard Weinberger Tested-by: Richard Weinberger Signed-off-by: Steven Rostedt Acked-by: Thomas Gleixner Acked-by: Peter Zijlstra Acked-by: Darren Hart Cc: Lai Jiangshan LKML-Reference: <20110317192208.682654502@goodmis.org> Signed-off-by: Ingo Molnar --- kernel/futex.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) (limited to 'kernel') diff --git a/kernel/futex.c b/kernel/futex.c index bda415715382..823aae3e2a96 100644 --- a/kernel/futex.c +++ b/kernel/futex.c @@ -782,8 +782,8 @@ static void __unqueue_futex(struct futex_q *q) { struct futex_hash_bucket *hb; - if (WARN_ON(!q->lock_ptr || !spin_is_locked(q->lock_ptr) - || plist_node_empty(&q->list))) + if (WARN_ON_SMP(!q->lock_ptr || !spin_is_locked(q->lock_ptr)) + || WARN_ON(plist_node_empty(&q->list))) return; hb = container_of(q->lock_ptr, struct futex_hash_bucket, lock); -- cgit v1.2.3 From ab7798ffcf98b11a9525cf65bacdae3fd58d357f Mon Sep 17 00:00:00 2001 From: Thomas Gleixner Date: Fri, 25 Mar 2011 16:48:50 +0100 Subject: genirq: Expand generic show_interrupts() Some archs want to print extra information for certain irq_chips which is per irq and not per chip. Allow them to provide a chip callback to print the chip name and the extra information. PowerPC wants to print the LEVEL/EDGE type information. Make it configurable. Signed-off-by: Thomas Gleixner --- include/linux/irq.h | 4 ++++ kernel/irq/Kconfig | 4 ++++ kernel/irq/proc.c | 15 ++++++++++++++- 3 files changed, 22 insertions(+), 1 deletion(-) (limited to 'kernel') diff --git a/include/linux/irq.h b/include/linux/irq.h index 1d3577f30d45..5d876c9b3a3d 100644 --- a/include/linux/irq.h +++ b/include/linux/irq.h @@ -28,6 +28,7 @@ #include #include +struct seq_file; struct irq_desc; struct irq_data; typedef void (*irq_flow_handler_t)(unsigned int irq, @@ -270,6 +271,7 @@ static inline bool irqd_can_move_in_process_context(struct irq_data *d) * @irq_set_wake: enable/disable power-management wake-on of an IRQ * @irq_bus_lock: function to lock access to slow bus (i2c) chips * @irq_bus_sync_unlock:function to sync and unlock slow bus (i2c) chips + * @irq_print_chip: optional to print special chip info in show_interrupts * @flags: chip specific flags * * @release: release function solely used by UML @@ -317,6 +319,8 @@ struct irq_chip { void (*irq_bus_lock)(struct irq_data *data); void (*irq_bus_sync_unlock)(struct irq_data *data); + void (*irq_print_chip)(struct irq_data *data, struct seq_file *p); + unsigned long flags; /* Currently used only by UML, might disappear one day.*/ diff --git a/kernel/irq/Kconfig b/kernel/irq/Kconfig index 09bef82d74cb..00f2c037267a 100644 --- a/kernel/irq/Kconfig +++ b/kernel/irq/Kconfig @@ -31,6 +31,10 @@ config GENERIC_IRQ_PROBE config GENERIC_IRQ_SHOW bool +# Print level/edge extra information +config GENERIC_IRQ_SHOW_LEVEL + bool + # Support for delayed migration from interrupt context config GENERIC_PENDING_IRQ bool diff --git a/kernel/irq/proc.c b/kernel/irq/proc.c index 760248de109d..626d092eed9a 100644 --- a/kernel/irq/proc.c +++ b/kernel/irq/proc.c @@ -404,7 +404,20 @@ int show_interrupts(struct seq_file *p, void *v) seq_printf(p, "%*d: ", prec, i); for_each_online_cpu(j) seq_printf(p, "%10u ", kstat_irqs_cpu(i, j)); - seq_printf(p, " %8s", desc->irq_data.chip->name); + + if (desc->irq_data.chip) { + if (desc->irq_data.chip->irq_print_chip) + desc->irq_data.chip->irq_print_chip(&desc->irq_data, p); + else if (desc->irq_data.chip->name) + seq_printf(p, " %8s", desc->irq_data.chip->name); + else + seq_printf(p, " %8s", "-"); + } else { + seq_printf(p, " %8s", "None"); + } +#ifdef CONFIG_GENIRC_IRQ_SHOW_LEVEL + seq_printf(p, " %-8s", irqd_is_level_type(&desc->irq_data) ? "Level" : "Edge"); +#endif if (desc->name) seq_printf(p, "-%-8s", desc->name); -- cgit v1.2.3 From 27029c339b1beebe79bb4e64422ad1bb8d0b6440 Mon Sep 17 00:00:00 2001 From: Jovi Zhang Date: Mon, 15 Mar 2010 07:28:00 -0500 Subject: kdb: code cleanup to use macro instead of value It's better to use macro KDB_BASE_CMD_MAX instead of 50 Signed-off-by: Jovi Zhang Signed-off-by: Jason Wessel --- kernel/debug/kdb/kdb_main.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'kernel') diff --git a/kernel/debug/kdb/kdb_main.c b/kernel/debug/kdb/kdb_main.c index bd3e8e29caa3..38a85428c70f 100644 --- a/kernel/debug/kdb/kdb_main.c +++ b/kernel/debug/kdb/kdb_main.c @@ -78,7 +78,7 @@ static unsigned int kdb_continue_catastrophic; static kdbtab_t *kdb_commands; #define KDB_BASE_CMD_MAX 50 static int kdb_max_commands = KDB_BASE_CMD_MAX; -static kdbtab_t kdb_base_commands[50]; +static kdbtab_t kdb_base_commands[KDB_BASE_CMD_MAX]; #define for_each_kdbcmd(cmd, num) \ for ((cmd) = kdb_base_commands, (num) = 0; \ num < kdb_max_commands; \ -- cgit v1.2.3 From 0d3db28daed2529ab90933a3aaaaf46446fdfda8 Mon Sep 17 00:00:00 2001 From: Namhyung Kim Date: Mon, 15 Mar 2010 07:28:00 -0500 Subject: kdb: add usage string of 'per_cpu' command Signed-off-by: Namhyung Kim Signed-off-by: Jason Wessel --- kernel/debug/kdb/kdb_main.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'kernel') diff --git a/kernel/debug/kdb/kdb_main.c b/kernel/debug/kdb/kdb_main.c index 38a85428c70f..6bc6e3bc4f9c 100644 --- a/kernel/debug/kdb/kdb_main.c +++ b/kernel/debug/kdb/kdb_main.c @@ -2892,7 +2892,7 @@ static void __init kdb_inittab(void) "Send a signal to a process", 0, KDB_REPEAT_NONE); kdb_register_repeat("summary", kdb_summary, "", "Summarize the system", 4, KDB_REPEAT_NONE); - kdb_register_repeat("per_cpu", kdb_per_cpu, "", + kdb_register_repeat("per_cpu", kdb_per_cpu, " [] []", "Display per_cpu variables", 3, KDB_REPEAT_NONE); kdb_register_repeat("grephelp", kdb_grep_help, "", "Display help on | grep", 0, KDB_REPEAT_NONE); -- cgit v1.2.3