summaryrefslogtreecommitdiffstats
path: root/kernel/time/posix-cpu-timers.c
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2020-03-30 18:51:47 -0700
committerLinus Torvalds <torvalds@linux-foundation.org>2020-03-30 18:51:47 -0700
commitdbb381b619aa5242c9cb1a8fd54d71c4d79c91eb (patch)
treee2bcd337c4eb7f4f3a44e0f0f05a19e775f18f90 /kernel/time/posix-cpu-timers.c
parent336622e9fce7a0b8243e1cd9f6e1c24d96f13cd8 (diff)
parent4479730e9263befbb9ce9563a09563db2acb8f7c (diff)
downloadlinux-dbb381b619aa5242c9cb1a8fd54d71c4d79c91eb.tar.bz2
Merge tag 'timers-core-2020-03-30' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip
Pull timekeeping and timer updates from Thomas Gleixner: "Core: - Consolidation of the vDSO build infrastructure to address the difficulties of cross-builds for ARM64 compat vDSO libraries by restricting the exposure of header content to the vDSO build. This is achieved by splitting out header content into separate headers. which contain only the minimaly required information which is necessary to build the vDSO. These new headers are included from the kernel headers and the vDSO specific files. - Enhancements to the generic vDSO library allowing more fine grained control over the compiled in code, further reducing architecture specific storage and preparing for adopting the generic library by PPC. - Cleanup and consolidation of the exit related code in posix CPU timers. - Small cleanups and enhancements here and there Drivers: - The obligatory new drivers: Ingenic JZ47xx and X1000 TCU support - Correct the clock rate of PIT64b global clock - setup_irq() cleanup - Preparation for PWM and suspend support for the TI DM timer - Expand the fttmr010 driver to support ast2600 systems - The usual small fixes, enhancements and cleanups all over the place" * tag 'timers-core-2020-03-30' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip: (80 commits) Revert "clocksource/drivers/timer-probe: Avoid creating dead devices" vdso: Fix clocksource.h macro detection um: Fix header inclusion arm64: vdso32: Enable Clang Compilation lib/vdso: Enable common headers arm: vdso: Enable arm to use common headers x86/vdso: Enable x86 to use common headers mips: vdso: Enable mips to use common headers arm64: vdso32: Include common headers in the vdso library arm64: vdso: Include common headers in the vdso library arm64: Introduce asm/vdso/processor.h arm64: vdso32: Code clean up linux/elfnote.h: Replace elf.h with UAPI equivalent scripts: Fix the inclusion order in modpost common: Introduce processor.h linux/ktime.h: Extract common header for vDSO linux/jiffies.h: Extract common header for vDSO linux/time64.h: Extract common header for vDSO linux/time32.h: Extract common header for vDSO linux/time.h: Extract common header for vDSO ...
Diffstat (limited to 'kernel/time/posix-cpu-timers.c')
-rw-r--r--kernel/time/posix-cpu-timers.c148
1 files changed, 70 insertions, 78 deletions
diff --git a/kernel/time/posix-cpu-timers.c b/kernel/time/posix-cpu-timers.c
index 2c48a7233b19..2fd3b3fa68bf 100644
--- a/kernel/time/posix-cpu-timers.c
+++ b/kernel/time/posix-cpu-timers.c
@@ -118,6 +118,16 @@ static inline int validate_clock_permissions(const clockid_t clock)
return __get_task_for_clock(clock, false, false) ? 0 : -EINVAL;
}
+static inline enum pid_type cpu_timer_pid_type(struct k_itimer *timer)
+{
+ return CPUCLOCK_PERTHREAD(timer->it_clock) ? PIDTYPE_PID : PIDTYPE_TGID;
+}
+
+static inline struct task_struct *cpu_timer_task_rcu(struct k_itimer *timer)
+{
+ return pid_task(timer->it.cpu.pid, cpu_timer_pid_type(timer));
+}
+
/*
* Update expiry time from increment, and increase overrun count,
* given the current clock sample.
@@ -336,9 +346,7 @@ static void __thread_group_cputime(struct task_struct *tsk, u64 *samples)
/*
* Sample a process (thread group) clock for the given task clkid. If the
* group's cputime accounting is already enabled, read the atomic
- * store. Otherwise a full update is required. Task's sighand lock must be
- * held to protect the task traversal on a full update. clkid is already
- * validated.
+ * store. Otherwise a full update is required. clkid is already validated.
*/
static u64 cpu_clock_sample_group(const clockid_t clkid, struct task_struct *p,
bool start)
@@ -393,7 +401,12 @@ static int posix_cpu_timer_create(struct k_itimer *new_timer)
new_timer->kclock = &clock_posix_cpu;
timerqueue_init(&new_timer->it.cpu.node);
- new_timer->it.cpu.task = p;
+ new_timer->it.cpu.pid = get_task_pid(p, cpu_timer_pid_type(new_timer));
+ /*
+ * get_task_for_clock() took a reference on @p. Drop it as the timer
+ * holds a reference on the pid of @p.
+ */
+ put_task_struct(p);
return 0;
}
@@ -406,13 +419,15 @@ static int posix_cpu_timer_create(struct k_itimer *new_timer)
static int posix_cpu_timer_del(struct k_itimer *timer)
{
struct cpu_timer *ctmr = &timer->it.cpu;
- struct task_struct *p = ctmr->task;
struct sighand_struct *sighand;
+ struct task_struct *p;
unsigned long flags;
int ret = 0;
- if (WARN_ON_ONCE(!p))
- return -EINVAL;
+ rcu_read_lock();
+ p = cpu_timer_task_rcu(timer);
+ if (!p)
+ goto out;
/*
* Protect against sighand release/switch in exit/exec and process/
@@ -434,8 +449,10 @@ static int posix_cpu_timer_del(struct k_itimer *timer)
unlock_task_sighand(p, &flags);
}
+out:
+ rcu_read_unlock();
if (!ret)
- put_task_struct(p);
+ put_pid(ctmr->pid);
return ret;
}
@@ -484,12 +501,11 @@ void posix_cpu_timers_exit_group(struct task_struct *tsk)
* Insert the timer on the appropriate list before any timers that
* expire later. This must be called with the sighand lock held.
*/
-static void arm_timer(struct k_itimer *timer)
+static void arm_timer(struct k_itimer *timer, struct task_struct *p)
{
int clkidx = CPUCLOCK_WHICH(timer->it_clock);
struct cpu_timer *ctmr = &timer->it.cpu;
u64 newexp = cpu_timer_getexpires(ctmr);
- struct task_struct *p = ctmr->task;
struct posix_cputimer_base *base;
if (CPUCLOCK_PERTHREAD(timer->it_clock))
@@ -564,13 +580,21 @@ static int posix_cpu_timer_set(struct k_itimer *timer, int timer_flags,
clockid_t clkid = CPUCLOCK_WHICH(timer->it_clock);
u64 old_expires, new_expires, old_incr, val;
struct cpu_timer *ctmr = &timer->it.cpu;
- struct task_struct *p = ctmr->task;
struct sighand_struct *sighand;
+ struct task_struct *p;
unsigned long flags;
int ret = 0;
- if (WARN_ON_ONCE(!p))
- return -EINVAL;
+ rcu_read_lock();
+ p = cpu_timer_task_rcu(timer);
+ if (!p) {
+ /*
+ * If p has just been reaped, we can no
+ * longer get any information about it at all.
+ */
+ rcu_read_unlock();
+ return -ESRCH;
+ }
/*
* Use the to_ktime conversion because that clamps the maximum
@@ -587,8 +611,10 @@ static int posix_cpu_timer_set(struct k_itimer *timer, int timer_flags,
* If p has just been reaped, we can no
* longer get any information about it at all.
*/
- if (unlikely(sighand == NULL))
+ if (unlikely(sighand == NULL)) {
+ rcu_read_unlock();
return -ESRCH;
+ }
/*
* Disarm any old timer after extracting its expiry time.
@@ -662,7 +688,7 @@ static int posix_cpu_timer_set(struct k_itimer *timer, int timer_flags,
*/
cpu_timer_setexpires(ctmr, new_expires);
if (new_expires != 0 && val < new_expires) {
- arm_timer(timer);
+ arm_timer(timer, p);
}
unlock_task_sighand(p, &flags);
@@ -693,6 +719,7 @@ static int posix_cpu_timer_set(struct k_itimer *timer, int timer_flags,
ret = 0;
out:
+ rcu_read_unlock();
if (old)
old->it_interval = ns_to_timespec64(old_incr);
@@ -704,10 +731,12 @@ static void posix_cpu_timer_get(struct k_itimer *timer, struct itimerspec64 *itp
clockid_t clkid = CPUCLOCK_WHICH(timer->it_clock);
struct cpu_timer *ctmr = &timer->it.cpu;
u64 now, expires = cpu_timer_getexpires(ctmr);
- struct task_struct *p = ctmr->task;
+ struct task_struct *p;
- if (WARN_ON_ONCE(!p))
- return;
+ rcu_read_lock();
+ p = cpu_timer_task_rcu(timer);
+ if (!p)
+ goto out;
/*
* Easy part: convert the reload time.
@@ -715,36 +744,15 @@ static void posix_cpu_timer_get(struct k_itimer *timer, struct itimerspec64 *itp
itp->it_interval = ktime_to_timespec64(timer->it_interval);
if (!expires)
- return;
+ goto out;
/*
* Sample the clock to take the difference with the expiry time.
*/
- if (CPUCLOCK_PERTHREAD(timer->it_clock)) {
+ if (CPUCLOCK_PERTHREAD(timer->it_clock))
now = cpu_clock_sample(clkid, p);
- } else {
- struct sighand_struct *sighand;
- unsigned long flags;
-
- /*
- * Protect against sighand release/switch in exit/exec and
- * also make timer sampling safe if it ends up calling
- * thread_group_cputime().
- */
- sighand = lock_task_sighand(p, &flags);
- if (unlikely(sighand == NULL)) {
- /*
- * The process has been reaped.
- * We can't even collect a sample any more.
- * Disarm the timer, nothing else to do.
- */
- cpu_timer_setexpires(ctmr, 0);
- return;
- } else {
- now = cpu_clock_sample_group(clkid, p, false);
- unlock_task_sighand(p, &flags);
- }
- }
+ else
+ now = cpu_clock_sample_group(clkid, p, false);
if (now < expires) {
itp->it_value = ns_to_timespec64(expires - now);
@@ -756,6 +764,8 @@ static void posix_cpu_timer_get(struct k_itimer *timer, struct itimerspec64 *itp
itp->it_value.tv_nsec = 1;
itp->it_value.tv_sec = 0;
}
+out:
+ rcu_read_unlock();
}
#define MAX_COLLECTED 20
@@ -976,56 +986,38 @@ static void check_process_timers(struct task_struct *tsk,
static void posix_cpu_timer_rearm(struct k_itimer *timer)
{
clockid_t clkid = CPUCLOCK_WHICH(timer->it_clock);
- struct cpu_timer *ctmr = &timer->it.cpu;
- struct task_struct *p = ctmr->task;
+ struct task_struct *p;
struct sighand_struct *sighand;
unsigned long flags;
u64 now;
- if (WARN_ON_ONCE(!p))
- return;
+ rcu_read_lock();
+ p = cpu_timer_task_rcu(timer);
+ if (!p)
+ goto out;
/*
* Fetch the current sample and update the timer's expiry time.
*/
- if (CPUCLOCK_PERTHREAD(timer->it_clock)) {
+ if (CPUCLOCK_PERTHREAD(timer->it_clock))
now = cpu_clock_sample(clkid, p);
- bump_cpu_timer(timer, now);
- if (unlikely(p->exit_state))
- return;
-
- /* Protect timer list r/w in arm_timer() */
- sighand = lock_task_sighand(p, &flags);
- if (!sighand)
- return;
- } else {
- /*
- * Protect arm_timer() and timer sampling in case of call to
- * thread_group_cputime().
- */
- sighand = lock_task_sighand(p, &flags);
- if (unlikely(sighand == NULL)) {
- /*
- * The process has been reaped.
- * We can't even collect a sample any more.
- */
- cpu_timer_setexpires(ctmr, 0);
- return;
- } else if (unlikely(p->exit_state) && thread_group_empty(p)) {
- /* If the process is dying, no need to rearm */
- goto unlock;
- }
+ else
now = cpu_clock_sample_group(clkid, p, true);
- bump_cpu_timer(timer, now);
- /* Leave the sighand locked for the call below. */
- }
+
+ bump_cpu_timer(timer, now);
+
+ /* Protect timer list r/w in arm_timer() */
+ sighand = lock_task_sighand(p, &flags);
+ if (unlikely(sighand == NULL))
+ goto out;
/*
* Now re-arm for the new expiry time.
*/
- arm_timer(timer);
-unlock:
+ arm_timer(timer, p);
unlock_task_sighand(p, &flags);
+out:
+ rcu_read_unlock();
}
/**