summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorPaul E. McKenney <paulmck@linux.vnet.ibm.com>2017-10-03 16:51:47 -0700
committerPaul E. McKenney <paulmck@linux.vnet.ibm.com>2017-11-27 08:42:03 -0800
commit844ccdd7dce2c1a6ea9b437fcf8c3265b136e4a5 (patch)
tree849c27c829544f9cde57bd2fd311060d5206c1d6
parent51a1fd30f13090be7750fed86cf3728afaf4e394 (diff)
downloadlinux-844ccdd7dce2c1a6ea9b437fcf8c3265b136e4a5.tar.bz2
rcu: Eliminate rcu_irq_enter_disabled()
Now that the irq path uses the rcu_nmi_{enter,exit}() algorithm, rcu_irq_enter() and rcu_irq_exit() may be used from any context. There is thus no need for rcu_irq_enter_disabled() and for the checks using it. This commit therefore eliminates rcu_irq_enter_disabled(). Signed-off-by: Paul E. McKenney <paulmck@linux.vnet.ibm.com>
-rw-r--r--include/linux/rcutiny.h1
-rw-r--r--include/linux/rcutree.h1
-rw-r--r--include/linux/tracepoint.h5
-rw-r--r--kernel/rcu/tree.c22
-rw-r--r--kernel/trace/trace.c11
5 files changed, 3 insertions, 37 deletions
diff --git a/include/linux/rcutiny.h b/include/linux/rcutiny.h
index b3dbf9502fd0..ce9beec35e34 100644
--- a/include/linux/rcutiny.h
+++ b/include/linux/rcutiny.h
@@ -111,7 +111,6 @@ static inline void rcu_cpu_stall_reset(void) { }
static inline void rcu_idle_enter(void) { }
static inline void rcu_idle_exit(void) { }
static inline void rcu_irq_enter(void) { }
-static inline bool rcu_irq_enter_disabled(void) { return false; }
static inline void rcu_irq_exit_irqson(void) { }
static inline void rcu_irq_enter_irqson(void) { }
static inline void rcu_irq_exit(void) { }
diff --git a/include/linux/rcutree.h b/include/linux/rcutree.h
index 37d6fd3b7ff8..fd996cdf1833 100644
--- a/include/linux/rcutree.h
+++ b/include/linux/rcutree.h
@@ -85,7 +85,6 @@ void rcu_irq_enter(void);
void rcu_irq_exit(void);
void rcu_irq_enter_irqson(void);
void rcu_irq_exit_irqson(void);
-bool rcu_irq_enter_disabled(void);
void exit_rcu(void);
diff --git a/include/linux/tracepoint.h b/include/linux/tracepoint.h
index a26ffbe09e71..c94f466d57ef 100644
--- a/include/linux/tracepoint.h
+++ b/include/linux/tracepoint.h
@@ -137,11 +137,8 @@ extern void syscall_unregfunc(void);
\
if (!(cond)) \
return; \
- if (rcucheck) { \
- if (WARN_ON_ONCE(rcu_irq_enter_disabled())) \
- return; \
+ if (rcucheck) \
rcu_irq_enter_irqson(); \
- } \
rcu_read_lock_sched_notrace(); \
it_func_ptr = rcu_dereference_sched((tp)->funcs); \
if (it_func_ptr) { \
diff --git a/kernel/rcu/tree.c b/kernel/rcu/tree.c
index d123474fe829..444aa2b3f24d 100644
--- a/kernel/rcu/tree.c
+++ b/kernel/rcu/tree.c
@@ -271,20 +271,6 @@ static DEFINE_PER_CPU(struct rcu_dynticks, rcu_dynticks) = {
};
/*
- * There's a few places, currently just in the tracing infrastructure,
- * that uses rcu_irq_enter() to make sure RCU is watching. But there's
- * a small location where that will not even work. In those cases
- * rcu_irq_enter_disabled() needs to be checked to make sure rcu_irq_enter()
- * can be called.
- */
-static DEFINE_PER_CPU(bool, disable_rcu_irq_enter);
-
-bool rcu_irq_enter_disabled(void)
-{
- return this_cpu_read(disable_rcu_irq_enter);
-}
-
-/*
* Record entry into an extended quiescent state. This is only to be
* called when not already in an extended quiescent state.
*/
@@ -792,10 +778,8 @@ static void rcu_eqs_enter_common(bool user)
do_nocb_deferred_wakeup(rdp);
}
rcu_prepare_for_idle();
- __this_cpu_inc(disable_rcu_irq_enter);
- rdtp->dynticks_nesting = 0; /* Breaks tracing momentarily. */
- rcu_dynticks_eqs_enter(); /* After this, tracing works again. */
- __this_cpu_dec(disable_rcu_irq_enter);
+ rdtp->dynticks_nesting = 0;
+ rcu_dynticks_eqs_enter();
rcu_dynticks_task_enter();
/*
@@ -1001,10 +985,8 @@ static void rcu_eqs_exit(bool user)
if (oldval) {
rdtp->dynticks_nesting++;
} else {
- __this_cpu_inc(disable_rcu_irq_enter);
rcu_eqs_exit_common(1, user);
rdtp->dynticks_nesting = 1;
- __this_cpu_dec(disable_rcu_irq_enter);
WRITE_ONCE(rdtp->dynticks_nmi_nesting, DYNTICK_IRQ_NONIDLE);
}
}
diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c
index 73e67b68c53b..dbce1be3bab8 100644
--- a/kernel/trace/trace.c
+++ b/kernel/trace/trace.c
@@ -2682,17 +2682,6 @@ void __trace_stack(struct trace_array *tr, unsigned long flags, int skip,
if (unlikely(in_nmi()))
return;
- /*
- * It is possible that a function is being traced in a
- * location that RCU is not watching. A call to
- * rcu_irq_enter() will make sure that it is, but there's
- * a few internal rcu functions that could be traced
- * where that wont work either. In those cases, we just
- * do nothing.
- */
- if (unlikely(rcu_irq_enter_disabled()))
- return;
-
rcu_irq_enter_irqson();
__ftrace_trace_stack(buffer, flags, skip, pc, NULL);
rcu_irq_exit_irqson();