summaryrefslogtreecommitdiffstats
path: root/kernel/time
diff options
context:
space:
mode:
authorThomas Gleixner <tglx@linutronix.de>2017-05-30 23:15:46 +0200
committerThomas Gleixner <tglx@linutronix.de>2017-06-04 15:40:25 +0200
commit96fe3b072f134e4993f829d599eaa1e0eb5a10e5 (patch)
tree63bd0034d7a797130dba236f1e6922372d4d6b86 /kernel/time
parent30802945893bc944b5971b408b37511a03b54e5c (diff)
downloadlinux-96fe3b072f134e4993f829d599eaa1e0eb5a10e5.tar.bz2
posix-timers: Rename do_schedule_next_timer
That function is a misnomer. Rename it with a proper prefix to posixtimer_rearm(). Signed-off-by: Thomas Gleixner <tglx@linutronix.de> Cc: Peter Zijlstra <peterz@infradead.org> Cc: John Stultz <john.stultz@linaro.org> Link: http://lkml.kernel.org/r/20170530211656.811362578@linutronix.de
Diffstat (limited to 'kernel/time')
-rw-r--r--kernel/time/posix-cpu-timers.c2
-rw-r--r--kernel/time/posix-timers.c10
2 files changed, 6 insertions, 6 deletions
diff --git a/kernel/time/posix-cpu-timers.c b/kernel/time/posix-cpu-timers.c
index 0123ece6851b..1ba576d3151a 100644
--- a/kernel/time/posix-cpu-timers.c
+++ b/kernel/time/posix-cpu-timers.c
@@ -981,7 +981,7 @@ static void check_process_timers(struct task_struct *tsk,
}
/*
- * This is called from the signal code (via do_schedule_next_timer)
+ * This is called from the signal code (via posixtimer_rearm)
* when the last timer signal was delivered and we have to reload the timer.
*/
void posix_cpu_timer_schedule(struct k_itimer *timer)
diff --git a/kernel/time/posix-timers.c b/kernel/time/posix-timers.c
index eb007e19811d..036b7e70c65c 100644
--- a/kernel/time/posix-timers.c
+++ b/kernel/time/posix-timers.c
@@ -305,7 +305,7 @@ static void schedule_next_timer(struct k_itimer *timr)
* To protect against the timer going away while the interrupt is queued,
* we require that the it_requeue_pending flag be set.
*/
-void do_schedule_next_timer(struct siginfo *info)
+void posixtimer_rearm(struct siginfo *info)
{
struct k_itimer *timr;
unsigned long flags;
@@ -336,12 +336,12 @@ int posix_timer_event(struct k_itimer *timr, int si_private)
int shared, ret = -1;
/*
* FIXME: if ->sigq is queued we can race with
- * dequeue_signal()->do_schedule_next_timer().
+ * dequeue_signal()->posixtimer_rearm().
*
* If dequeue_signal() sees the "right" value of
- * si_sys_private it calls do_schedule_next_timer().
+ * si_sys_private it calls posixtimer_rearm().
* We re-queue ->sigq and drop ->it_lock().
- * do_schedule_next_timer() locks the timer
+ * posixtimer_rearm() locks the timer
* and re-schedules it while ->sigq is pending.
* Not really bad, but not that we want.
*/
@@ -701,7 +701,7 @@ SYSCALL_DEFINE2(timer_gettime, timer_t, timer_id,
* accumulating overruns on the next timer. The overrun is frozen when
* the signal is delivered, either at the notify time (if the info block
* is not queued) or at the actual delivery time (as we are informed by
- * the call back to do_schedule_next_timer(). So all we need to do is
+ * the call back to posixtimer_rearm(). So all we need to do is
* to pick up the frozen overrun.
*/
SYSCALL_DEFINE1(timer_getoverrun, timer_t, timer_id)