diff options
-rw-r--r-- | arch/arm64/kernel/psci.c | 4 | ||||
-rw-r--r-- | include/linux/percpu-refcount.h | 4 | ||||
-rw-r--r-- | kernel/irq_work.c | 4 | ||||
-rw-r--r-- | kernel/time/tick-sched.c | 2 | ||||
-rw-r--r-- | mm/percpu.c | 2 |
5 files changed, 7 insertions, 9 deletions
diff --git a/arch/arm64/kernel/psci.c b/arch/arm64/kernel/psci.c index 663da771580a..3425f311c49e 100644 --- a/arch/arm64/kernel/psci.c +++ b/arch/arm64/kernel/psci.c @@ -511,7 +511,7 @@ static int cpu_psci_cpu_kill(unsigned int cpu) static int psci_suspend_finisher(unsigned long index) { - struct psci_power_state *state = __get_cpu_var(psci_power_state); + struct psci_power_state *state = __this_cpu_read(psci_power_state); return psci_ops.cpu_suspend(state[index - 1], virt_to_phys(cpu_resume)); @@ -520,7 +520,7 @@ static int psci_suspend_finisher(unsigned long index) static int __maybe_unused cpu_psci_cpu_suspend(unsigned long index) { int ret; - struct psci_power_state *state = __get_cpu_var(psci_power_state); + struct psci_power_state *state = __this_cpu_read(psci_power_state); /* * idle state index 0 corresponds to wfi, should never be called * from the cpu_suspend operations diff --git a/include/linux/percpu-refcount.h b/include/linux/percpu-refcount.h index 530b249f7ea4..b4337646388b 100644 --- a/include/linux/percpu-refcount.h +++ b/include/linux/percpu-refcount.h @@ -128,10 +128,8 @@ static inline void percpu_ref_kill(struct percpu_ref *ref) static inline bool __ref_is_percpu(struct percpu_ref *ref, unsigned long __percpu **percpu_countp) { - unsigned long percpu_ptr = ACCESS_ONCE(ref->percpu_count_ptr); - /* paired with smp_store_release() in percpu_ref_reinit() */ - smp_read_barrier_depends(); + unsigned long percpu_ptr = lockless_dereference(ref->percpu_count_ptr); /* * Theoretically, the following could test just ATOMIC; however, diff --git a/kernel/irq_work.c b/kernel/irq_work.c index 3ab9048483fa..cbf9fb899d92 100644 --- a/kernel/irq_work.c +++ b/kernel/irq_work.c @@ -175,11 +175,11 @@ EXPORT_SYMBOL_GPL(irq_work_run); void irq_work_tick(void) { - struct llist_head *raised = &__get_cpu_var(raised_list); + struct llist_head *raised = this_cpu_ptr(&raised_list); if (!llist_empty(raised) && !arch_irq_work_has_interrupt()) irq_work_run_list(raised); - irq_work_run_list(&__get_cpu_var(lazy_list)); + irq_work_run_list(this_cpu_ptr(&lazy_list)); } /* diff --git a/kernel/time/tick-sched.c b/kernel/time/tick-sched.c index 1f4356037a7d..4d54b7540585 100644 --- a/kernel/time/tick-sched.c +++ b/kernel/time/tick-sched.c @@ -235,7 +235,7 @@ void tick_nohz_full_kick(void) if (!tick_nohz_full_cpu(smp_processor_id())) return; - irq_work_queue(&__get_cpu_var(nohz_full_kick_work)); + irq_work_queue(this_cpu_ptr(&nohz_full_kick_work)); } /* diff --git a/mm/percpu.c b/mm/percpu.c index 014bab65e0ff..d39e2f4e335c 100644 --- a/mm/percpu.c +++ b/mm/percpu.c @@ -1591,7 +1591,7 @@ int __init pcpu_setup_first_chunk(const struct pcpu_alloc_info *ai, if (cpu == NR_CPUS) continue; - PCPU_SETUP_BUG_ON(cpu > nr_cpu_ids); + PCPU_SETUP_BUG_ON(cpu >= nr_cpu_ids); PCPU_SETUP_BUG_ON(!cpu_possible(cpu)); PCPU_SETUP_BUG_ON(unit_map[cpu] != UINT_MAX); |