diff options
author | Linus Torvalds <torvalds@linux-foundation.org> | 2012-12-11 18:10:49 -0800 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2012-12-11 18:10:49 -0800 |
commit | 37ea95a959d4a49846ecbf2dd45326b6b34bf049 (patch) | |
tree | 43791e1244ce06d8ca18ecbfd0b0f6dcb86ebb8b /include | |
parent | de0c276b31538fcd56611132f20b63eae2891876 (diff) | |
parent | 630e1e0bcddfda9566462d4f9a0d58b31c29d467 (diff) | |
download | linux-37ea95a959d4a49846ecbf2dd45326b6b34bf049.tar.bz2 |
Merge branch 'core-rcu-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip
Pull RCU update from Ingo Molnar:
"The major features of this tree are:
1. A first version of no-callbacks CPUs. This version prohibits
offlining CPU 0, but only when enabled via CONFIG_RCU_NOCB_CPU=y.
Relaxing this constraint is in progress, but not yet ready
for prime time. These commits were posted to LKML at
https://lkml.org/lkml/2012/10/30/724.
2. Changes to SRCU that allows statically initialized srcu_struct
structures. These commits were posted to LKML at
https://lkml.org/lkml/2012/10/30/296.
3. Restructuring of RCU's debugfs output. These commits were posted
to LKML at https://lkml.org/lkml/2012/10/30/341.
4. Additional CPU-hotplug/RCU improvements, posted to LKML at
https://lkml.org/lkml/2012/10/30/327.
Note that the commit eliminating __stop_machine() was judged to
be too-high of risk, so is deferred to 3.9.
5. Changes to RCU's idle interface, most notably a new module
parameter that redirects normal grace-period operations to
their expedited equivalents. These were posted to LKML at
https://lkml.org/lkml/2012/10/30/739.
6. Additional diagnostics for RCU's CPU stall warning facility,
posted to LKML at https://lkml.org/lkml/2012/10/30/315.
The most notable change reduces the
default RCU CPU stall-warning time from 60 seconds to 21 seconds,
so that it once again happens sooner than the softlockup timeout.
7. Documentation updates, which were posted to LKML at
https://lkml.org/lkml/2012/10/30/280.
A couple of late-breaking changes were posted at
https://lkml.org/lkml/2012/11/16/634 and
https://lkml.org/lkml/2012/11/16/547.
8. Miscellaneous fixes, which were posted to LKML at
https://lkml.org/lkml/2012/10/30/309.
9. Finally, a fix for an lockdep-RCU splat was posted to LKML
at https://lkml.org/lkml/2012/11/7/486."
* 'core-rcu-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip: (49 commits)
context_tracking: New context tracking susbsystem
sched: Mark RCU reader in sched_show_task()
rcu: Separate accounting of callbacks from callback-free CPUs
rcu: Add callback-free CPUs
rcu: Add documentation for the new rcuexp debugfs trace file
rcu: Update documentation for TREE_RCU debugfs tracing
rcu: Reduce default RCU CPU stall warning timeout
rcu: Fix TINY_RCU rcu_is_cpu_rrupt_from_idle check
rcu: Clarify memory-ordering properties of grace-period primitives
rcu: Add new rcutorture module parameters to start/end test messages
rcu: Remove list_for_each_continue_rcu()
rcu: Fix batch-limit size problem
rcu: Add tracing for synchronize_sched_expedited()
rcu: Remove old debugfs interfaces and also RCU flavor name
rcu: split 'rcuhier' to each flavor
rcu: split 'rcugp' to each flavor
rcu: split 'rcuboost' to each flavor
rcu: split 'rcubarrier' to each flavor
rcu: Fix tracing formatting
rcu: Remove the interface "rcudata.csv"
...
Diffstat (limited to 'include')
-rw-r--r-- | include/linux/context_tracking.h | 18 | ||||
-rw-r--r-- | include/linux/rculist.h | 17 | ||||
-rw-r--r-- | include/linux/rcupdate.h | 29 | ||||
-rw-r--r-- | include/linux/sched.h | 10 | ||||
-rw-r--r-- | include/linux/srcu.h | 34 | ||||
-rw-r--r-- | include/trace/events/rcu.h | 1 |
6 files changed, 82 insertions, 27 deletions
diff --git a/include/linux/context_tracking.h b/include/linux/context_tracking.h new file mode 100644 index 000000000000..e24339ccb7f0 --- /dev/null +++ b/include/linux/context_tracking.h @@ -0,0 +1,18 @@ +#ifndef _LINUX_CONTEXT_TRACKING_H +#define _LINUX_CONTEXT_TRACKING_H + +#ifdef CONFIG_CONTEXT_TRACKING +#include <linux/sched.h> + +extern void user_enter(void); +extern void user_exit(void); +extern void context_tracking_task_switch(struct task_struct *prev, + struct task_struct *next); +#else +static inline void user_enter(void) { } +static inline void user_exit(void) { } +static inline void context_tracking_task_switch(struct task_struct *prev, + struct task_struct *next) { } +#endif /* !CONFIG_CONTEXT_TRACKING */ + +#endif diff --git a/include/linux/rculist.h b/include/linux/rculist.h index e0f0fab20415..c92dd28eaa6c 100644 --- a/include/linux/rculist.h +++ b/include/linux/rculist.h @@ -286,23 +286,6 @@ static inline void list_splice_init_rcu(struct list_head *list, &pos->member != (head); \ pos = list_entry_rcu(pos->member.next, typeof(*pos), member)) - -/** - * list_for_each_continue_rcu - * @pos: the &struct list_head to use as a loop cursor. - * @head: the head for your list. - * - * Iterate over an rcu-protected list, continuing after current point. - * - * This list-traversal primitive may safely run concurrently with - * the _rcu list-mutation primitives such as list_add_rcu() - * as long as the traversal is guarded by rcu_read_lock(). - */ -#define list_for_each_continue_rcu(pos, head) \ - for ((pos) = rcu_dereference_raw(list_next_rcu(pos)); \ - (pos) != (head); \ - (pos) = rcu_dereference_raw(list_next_rcu(pos))) - /** * list_for_each_entry_continue_rcu - continue iteration over list of given type * @pos: the type * to use as a loop cursor. diff --git a/include/linux/rcupdate.h b/include/linux/rcupdate.h index 7c968e4f929e..275aa3f1062d 100644 --- a/include/linux/rcupdate.h +++ b/include/linux/rcupdate.h @@ -90,6 +90,25 @@ extern void do_trace_rcu_torture_read(char *rcutorturename, * that started after call_rcu() was invoked. RCU read-side critical * sections are delimited by rcu_read_lock() and rcu_read_unlock(), * and may be nested. + * + * Note that all CPUs must agree that the grace period extended beyond + * all pre-existing RCU read-side critical section. On systems with more + * than one CPU, this means that when "func()" is invoked, each CPU is + * guaranteed to have executed a full memory barrier since the end of its + * last RCU read-side critical section whose beginning preceded the call + * to call_rcu(). It also means that each CPU executing an RCU read-side + * critical section that continues beyond the start of "func()" must have + * executed a memory barrier after the call_rcu() but before the beginning + * of that RCU read-side critical section. Note that these guarantees + * include CPUs that are offline, idle, or executing in user mode, as + * well as CPUs that are executing in the kernel. + * + * Furthermore, if CPU A invoked call_rcu() and CPU B invoked the + * resulting RCU callback function "func()", then both CPU A and CPU B are + * guaranteed to execute a full memory barrier during the time interval + * between the call to call_rcu() and the invocation of "func()" -- even + * if CPU A and CPU B are the same CPU (but again only if the system has + * more than one CPU). */ extern void call_rcu(struct rcu_head *head, void (*func)(struct rcu_head *head)); @@ -118,6 +137,9 @@ extern void call_rcu(struct rcu_head *head, * OR * - rcu_read_lock_bh() and rcu_read_unlock_bh(), if in process context. * These may be nested. + * + * See the description of call_rcu() for more detailed information on + * memory ordering guarantees. */ extern void call_rcu_bh(struct rcu_head *head, void (*func)(struct rcu_head *head)); @@ -137,6 +159,9 @@ extern void call_rcu_bh(struct rcu_head *head, * OR * anything that disables preemption. * These may be nested. + * + * See the description of call_rcu() for more detailed information on + * memory ordering guarantees. */ extern void call_rcu_sched(struct rcu_head *head, void (*func)(struct rcu_head *rcu)); @@ -197,13 +222,13 @@ extern void rcu_user_enter(void); extern void rcu_user_exit(void); extern void rcu_user_enter_after_irq(void); extern void rcu_user_exit_after_irq(void); -extern void rcu_user_hooks_switch(struct task_struct *prev, - struct task_struct *next); #else static inline void rcu_user_enter(void) { } static inline void rcu_user_exit(void) { } static inline void rcu_user_enter_after_irq(void) { } static inline void rcu_user_exit_after_irq(void) { } +static inline void rcu_user_hooks_switch(struct task_struct *prev, + struct task_struct *next) { } #endif /* CONFIG_RCU_USER_QS */ extern void exit_rcu(void); diff --git a/include/linux/sched.h b/include/linux/sched.h index 3e387df065fc..29116b853ece 100644 --- a/include/linux/sched.h +++ b/include/linux/sched.h @@ -109,6 +109,8 @@ extern void update_cpu_load_nohz(void); extern unsigned long get_parent_ip(unsigned long addr); +extern void dump_cpu_task(int cpu); + struct seq_file; struct cfs_rq; struct task_group; @@ -1845,14 +1847,6 @@ static inline void rcu_copy_process(struct task_struct *p) #endif -static inline void rcu_switch(struct task_struct *prev, - struct task_struct *next) -{ -#ifdef CONFIG_RCU_USER_QS - rcu_user_hooks_switch(prev, next); -#endif -} - static inline void tsk_restore_flags(struct task_struct *task, unsigned long orig_flags, unsigned long flags) { diff --git a/include/linux/srcu.h b/include/linux/srcu.h index 55a5c52cbb25..6eb691b08358 100644 --- a/include/linux/srcu.h +++ b/include/linux/srcu.h @@ -16,8 +16,10 @@ * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. * * Copyright (C) IBM Corporation, 2006 + * Copyright (C) Fujitsu, 2012 * * Author: Paul McKenney <paulmck@us.ibm.com> + * Lai Jiangshan <laijs@cn.fujitsu.com> * * For detailed explanation of Read-Copy Update mechanism see - * Documentation/RCU/ *.txt @@ -40,6 +42,8 @@ struct rcu_batch { struct rcu_head *head, **tail; }; +#define RCU_BATCH_INIT(name) { NULL, &(name.head) } + struct srcu_struct { unsigned completed; struct srcu_struct_array __percpu *per_cpu_ref; @@ -70,12 +74,42 @@ int __init_srcu_struct(struct srcu_struct *sp, const char *name, __init_srcu_struct((sp), #sp, &__srcu_key); \ }) +#define __SRCU_DEP_MAP_INIT(srcu_name) .dep_map = { .name = #srcu_name }, #else /* #ifdef CONFIG_DEBUG_LOCK_ALLOC */ int init_srcu_struct(struct srcu_struct *sp); +#define __SRCU_DEP_MAP_INIT(srcu_name) #endif /* #else #ifdef CONFIG_DEBUG_LOCK_ALLOC */ +void process_srcu(struct work_struct *work); + +#define __SRCU_STRUCT_INIT(name) \ + { \ + .completed = -300, \ + .per_cpu_ref = &name##_srcu_array, \ + .queue_lock = __SPIN_LOCK_UNLOCKED(name.queue_lock), \ + .running = false, \ + .batch_queue = RCU_BATCH_INIT(name.batch_queue), \ + .batch_check0 = RCU_BATCH_INIT(name.batch_check0), \ + .batch_check1 = RCU_BATCH_INIT(name.batch_check1), \ + .batch_done = RCU_BATCH_INIT(name.batch_done), \ + .work = __DELAYED_WORK_INITIALIZER(name.work, process_srcu, 0),\ + __SRCU_DEP_MAP_INIT(name) \ + } + +/* + * define and init a srcu struct at build time. + * dont't call init_srcu_struct() nor cleanup_srcu_struct() on it. + */ +#define DEFINE_SRCU(name) \ + static DEFINE_PER_CPU(struct srcu_struct_array, name##_srcu_array);\ + struct srcu_struct name = __SRCU_STRUCT_INIT(name); + +#define DEFINE_STATIC_SRCU(name) \ + static DEFINE_PER_CPU(struct srcu_struct_array, name##_srcu_array);\ + static struct srcu_struct name = __SRCU_STRUCT_INIT(name); + /** * call_srcu() - Queue a callback for invocation after an SRCU grace period * @sp: srcu_struct in queue the callback diff --git a/include/trace/events/rcu.h b/include/trace/events/rcu.h index 5bde94d8585b..d4f559b1ec34 100644 --- a/include/trace/events/rcu.h +++ b/include/trace/events/rcu.h @@ -549,6 +549,7 @@ TRACE_EVENT(rcu_torture_read, * "EarlyExit": rcu_barrier_callback() piggybacked, thus early exit. * "Inc1": rcu_barrier_callback() piggyback check counter incremented. * "Offline": rcu_barrier_callback() found offline CPU + * "OnlineNoCB": rcu_barrier_callback() found online no-CBs CPU. * "OnlineQ": rcu_barrier_callback() found online CPU with callbacks. * "OnlineNQ": rcu_barrier_callback() found online CPU, no callbacks. * "IRQ": An rcu_barrier_callback() callback posted on remote CPU. |