From cb38f82043d1641a27f96b58b402ca4b7a88f52d Mon Sep 17 00:00:00 2001 From: Qian Cai Date: Mon, 10 Feb 2020 09:10:16 -0500 Subject: x86/mm/pat: Mark an intentional data race cpa_4k_install could be accessed concurrently as noticed by KCSAN, read to 0xffffffffaa59a000 of 8 bytes by interrupt on cpu 7: cpa_inc_4k_install arch/x86/mm/pat/set_memory.c:131 [inline] __change_page_attr+0x10cf/0x1840 arch/x86/mm/pat/set_memory.c:1514 __change_page_attr_set_clr+0xce/0x490 arch/x86/mm/pat/set_memory.c:1636 __set_pages_np+0xc4/0xf0 arch/x86/mm/pat/set_memory.c:2148 __kernel_map_pages+0xb0/0xc8 arch/x86/mm/pat/set_memory.c:2178 kernel_map_pages include/linux/mm.h:2719 [inline] write to 0xffffffffaa59a000 of 8 bytes by task 1 on cpu 6: cpa_inc_4k_install arch/x86/mm/pat/set_memory.c:131 [inline] __change_page_attr+0x10ea/0x1840 arch/x86/mm/pat/set_memory.c:1514 __change_page_attr_set_clr+0xce/0x490 arch/x86/mm/pat/set_memory.c:1636 __set_pages_p+0xc4/0xf0 arch/x86/mm/pat/set_memory.c:2129 __kernel_map_pages+0x2e/0xc8 arch/x86/mm/pat/set_memory.c:2176 kernel_map_pages include/linux/mm.h:2719 [inline] Both accesses are due to the same "cpa_4k_install++" in cpa_inc_4k_install. A data race here could be potentially undesirable: depending on compiler optimizations or how x86 executes a non-LOCK'd increment, it may lose increments, corrupt the counter, etc. Since this counter only seems to be used for printing some stats, this data race itself is unlikely to cause harm to the system though. Thus, mark this intentional data race using the data_race() marco. Suggested-by: Macro Elver Signed-off-by: Qian Cai Acked-by: Borislav Petkov Signed-off-by: Paul E. McKenney --- arch/x86/mm/pat/set_memory.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'arch/x86') diff --git a/arch/x86/mm/pat/set_memory.c b/arch/x86/mm/pat/set_memory.c index 77e04304a2a7..d1b2a889f035 100644 --- a/arch/x86/mm/pat/set_memory.c +++ b/arch/x86/mm/pat/set_memory.c @@ -135,7 +135,7 @@ static inline void cpa_inc_2m_checked(void) static inline void cpa_inc_4k_install(void) { - cpa_4k_install++; + data_race(cpa_4k_install++); } static inline void cpa_inc_lp_sameprot(int level) -- cgit v1.2.3 From b037b09b9058d84882fa2c4db3806433e2b0f912 Mon Sep 17 00:00:00 2001 From: Andy Lutomirski Date: Fri, 3 Jul 2020 10:02:58 -0700 Subject: x86/entry: Rename idtentry_enter/exit_cond_rcu() to idtentry_enter/exit() They were originally called _cond_rcu because they were special versions with conditional RCU handling. Now they're the standard entry and exit path, so the _cond_rcu part is just confusing. Drop it. Also change the signature to make them more extensible and more foolproof. No functional change -- it's pure refactoring. Signed-off-by: Andy Lutomirski Signed-off-by: Thomas Gleixner Acked-by: Peter Zijlstra (Intel) Link: https://lkml.kernel.org/r/247fc67685263e0b673e1d7f808182d28ff80359.1593795633.git.luto@kernel.org --- arch/x86/entry/common.c | 50 +++++++++++++++++++++++------------------ arch/x86/include/asm/idtentry.h | 28 +++++++++++++---------- arch/x86/kernel/kvm.c | 6 ++--- arch/x86/kernel/traps.c | 6 ++--- arch/x86/mm/fault.c | 6 ++--- 5 files changed, 53 insertions(+), 43 deletions(-) (limited to 'arch/x86') diff --git a/arch/x86/entry/common.c b/arch/x86/entry/common.c index e83b3f14897c..0521546022cb 100644 --- a/arch/x86/entry/common.c +++ b/arch/x86/entry/common.c @@ -559,8 +559,7 @@ SYSCALL_DEFINE0(ni_syscall) } /** - * idtentry_enter_cond_rcu - Handle state tracking on idtentry with conditional - * RCU handling + * idtentry_enter - Handle state tracking on ordinary idtentries * @regs: Pointer to pt_regs of interrupted context * * Invokes: @@ -572,6 +571,9 @@ SYSCALL_DEFINE0(ni_syscall) * - The hardirq tracer to keep the state consistent as low level ASM * entry disabled interrupts. * + * As a precondition, this requires that the entry came from user mode, + * idle, or a kernel context in which RCU is watching. + * * For kernel mode entries RCU handling is done conditional. If RCU is * watching then the only RCU requirement is to check whether the tick has * to be restarted. If RCU is not watching then rcu_irq_enter() has to be @@ -585,18 +587,21 @@ SYSCALL_DEFINE0(ni_syscall) * establish the proper context for NOHZ_FULL. Otherwise scheduling on exit * would not be possible. * - * Returns: True if RCU has been adjusted on a kernel entry - * False otherwise + * Returns: An opaque object that must be passed to idtentry_exit() * - * The return value must be fed into the rcu_exit argument of - * idtentry_exit_cond_rcu(). + * The return value must be fed into the state argument of + * idtentry_exit(). */ -bool noinstr idtentry_enter_cond_rcu(struct pt_regs *regs) +idtentry_state_t noinstr idtentry_enter(struct pt_regs *regs) { + idtentry_state_t ret = { + .exit_rcu = false, + }; + if (user_mode(regs)) { check_user_regs(regs); enter_from_user_mode(); - return false; + return ret; } /* @@ -634,7 +639,8 @@ bool noinstr idtentry_enter_cond_rcu(struct pt_regs *regs) trace_hardirqs_off_finish(); instrumentation_end(); - return true; + ret.exit_rcu = true; + return ret; } /* @@ -649,7 +655,7 @@ bool noinstr idtentry_enter_cond_rcu(struct pt_regs *regs) trace_hardirqs_off(); instrumentation_end(); - return false; + return ret; } static void idtentry_exit_cond_resched(struct pt_regs *regs, bool may_sched) @@ -667,10 +673,9 @@ static void idtentry_exit_cond_resched(struct pt_regs *regs, bool may_sched) } /** - * idtentry_exit_cond_rcu - Handle return from exception with conditional RCU - * handling + * idtentry_exit - Handle return from exception that used idtentry_enter() * @regs: Pointer to pt_regs (exception entry regs) - * @rcu_exit: Invoke rcu_irq_exit() if true + * @state: Return value from matching call to idtentry_enter() * * Depending on the return target (kernel/user) this runs the necessary * preemption and work checks if possible and reguired and returns to @@ -679,10 +684,10 @@ static void idtentry_exit_cond_resched(struct pt_regs *regs, bool may_sched) * This is the last action before returning to the low level ASM code which * just needs to return to the appropriate context. * - * Counterpart to idtentry_enter_cond_rcu(). The return value of the entry - * function must be fed into the @rcu_exit argument. + * Counterpart to idtentry_enter(). The return value of the entry + * function must be fed into the @state argument. */ -void noinstr idtentry_exit_cond_rcu(struct pt_regs *regs, bool rcu_exit) +void noinstr idtentry_exit(struct pt_regs *regs, idtentry_state_t state) { lockdep_assert_irqs_disabled(); @@ -695,7 +700,7 @@ void noinstr idtentry_exit_cond_rcu(struct pt_regs *regs, bool rcu_exit) * carefully and needs the same ordering of lockdep/tracing * and RCU as the return to user mode path. */ - if (rcu_exit) { + if (state.exit_rcu) { instrumentation_begin(); /* Tell the tracer that IRET will enable interrupts */ trace_hardirqs_on_prepare(); @@ -714,7 +719,7 @@ void noinstr idtentry_exit_cond_rcu(struct pt_regs *regs, bool rcu_exit) * IRQ flags state is correct already. Just tell RCU if it * was not watching on entry. */ - if (rcu_exit) + if (state.exit_rcu) rcu_irq_exit(); } } @@ -800,9 +805,10 @@ static void __xen_pv_evtchn_do_upcall(void) __visible noinstr void xen_pv_evtchn_do_upcall(struct pt_regs *regs) { struct pt_regs *old_regs; - bool inhcall, rcu_exit; + bool inhcall; + idtentry_state_t state; - rcu_exit = idtentry_enter_cond_rcu(regs); + state = idtentry_enter(regs); old_regs = set_irq_regs(regs); instrumentation_begin(); @@ -812,13 +818,13 @@ __visible noinstr void xen_pv_evtchn_do_upcall(struct pt_regs *regs) set_irq_regs(old_regs); inhcall = get_and_clear_inhcall(); - if (inhcall && !WARN_ON_ONCE(rcu_exit)) { + if (inhcall && !WARN_ON_ONCE(state.exit_rcu)) { instrumentation_begin(); idtentry_exit_cond_resched(regs, true); instrumentation_end(); restore_inhcall(inhcall); } else { - idtentry_exit_cond_rcu(regs, rcu_exit); + idtentry_exit(regs, state); } } #endif /* CONFIG_XEN_PV */ diff --git a/arch/x86/include/asm/idtentry.h b/arch/x86/include/asm/idtentry.h index eeac6dc2adaa..7227225cf45d 100644 --- a/arch/x86/include/asm/idtentry.h +++ b/arch/x86/include/asm/idtentry.h @@ -13,8 +13,12 @@ void idtentry_enter_user(struct pt_regs *regs); void idtentry_exit_user(struct pt_regs *regs); -bool idtentry_enter_cond_rcu(struct pt_regs *regs); -void idtentry_exit_cond_rcu(struct pt_regs *regs, bool rcu_exit); +typedef struct idtentry_state { + bool exit_rcu; +} idtentry_state_t; + +idtentry_state_t idtentry_enter(struct pt_regs *regs); +void idtentry_exit(struct pt_regs *regs, idtentry_state_t state); /** * DECLARE_IDTENTRY - Declare functions for simple IDT entry points @@ -54,12 +58,12 @@ static __always_inline void __##func(struct pt_regs *regs); \ \ __visible noinstr void func(struct pt_regs *regs) \ { \ - bool rcu_exit = idtentry_enter_cond_rcu(regs); \ + idtentry_state_t state = idtentry_enter(regs); \ \ instrumentation_begin(); \ __##func (regs); \ instrumentation_end(); \ - idtentry_exit_cond_rcu(regs, rcu_exit); \ + idtentry_exit(regs, state); \ } \ \ static __always_inline void __##func(struct pt_regs *regs) @@ -101,12 +105,12 @@ static __always_inline void __##func(struct pt_regs *regs, \ __visible noinstr void func(struct pt_regs *regs, \ unsigned long error_code) \ { \ - bool rcu_exit = idtentry_enter_cond_rcu(regs); \ + idtentry_state_t state = idtentry_enter(regs); \ \ instrumentation_begin(); \ __##func (regs, error_code); \ instrumentation_end(); \ - idtentry_exit_cond_rcu(regs, rcu_exit); \ + idtentry_exit(regs, state); \ } \ \ static __always_inline void __##func(struct pt_regs *regs, \ @@ -199,7 +203,7 @@ static __always_inline void __##func(struct pt_regs *regs, u8 vector); \ __visible noinstr void func(struct pt_regs *regs, \ unsigned long error_code) \ { \ - bool rcu_exit = idtentry_enter_cond_rcu(regs); \ + idtentry_state_t state = idtentry_enter(regs); \ \ instrumentation_begin(); \ irq_enter_rcu(); \ @@ -207,7 +211,7 @@ __visible noinstr void func(struct pt_regs *regs, \ __##func (regs, (u8)error_code); \ irq_exit_rcu(); \ instrumentation_end(); \ - idtentry_exit_cond_rcu(regs, rcu_exit); \ + idtentry_exit(regs, state); \ } \ \ static __always_inline void __##func(struct pt_regs *regs, u8 vector) @@ -241,7 +245,7 @@ static void __##func(struct pt_regs *regs); \ \ __visible noinstr void func(struct pt_regs *regs) \ { \ - bool rcu_exit = idtentry_enter_cond_rcu(regs); \ + idtentry_state_t state = idtentry_enter(regs); \ \ instrumentation_begin(); \ irq_enter_rcu(); \ @@ -249,7 +253,7 @@ __visible noinstr void func(struct pt_regs *regs) \ run_on_irqstack_cond(__##func, regs, regs); \ irq_exit_rcu(); \ instrumentation_end(); \ - idtentry_exit_cond_rcu(regs, rcu_exit); \ + idtentry_exit(regs, state); \ } \ \ static noinline void __##func(struct pt_regs *regs) @@ -270,7 +274,7 @@ static __always_inline void __##func(struct pt_regs *regs); \ \ __visible noinstr void func(struct pt_regs *regs) \ { \ - bool rcu_exit = idtentry_enter_cond_rcu(regs); \ + idtentry_state_t state = idtentry_enter(regs); \ \ instrumentation_begin(); \ __irq_enter_raw(); \ @@ -278,7 +282,7 @@ __visible noinstr void func(struct pt_regs *regs) \ __##func (regs); \ __irq_exit_raw(); \ instrumentation_end(); \ - idtentry_exit_cond_rcu(regs, rcu_exit); \ + idtentry_exit(regs, state); \ } \ \ static __always_inline void __##func(struct pt_regs *regs) diff --git a/arch/x86/kernel/kvm.c b/arch/x86/kernel/kvm.c index df63786e7bfa..3f78482d9496 100644 --- a/arch/x86/kernel/kvm.c +++ b/arch/x86/kernel/kvm.c @@ -233,7 +233,7 @@ EXPORT_SYMBOL_GPL(kvm_read_and_reset_apf_flags); noinstr bool __kvm_handle_async_pf(struct pt_regs *regs, u32 token) { u32 reason = kvm_read_and_reset_apf_flags(); - bool rcu_exit; + idtentry_state_t state; switch (reason) { case KVM_PV_REASON_PAGE_NOT_PRESENT: @@ -243,7 +243,7 @@ noinstr bool __kvm_handle_async_pf(struct pt_regs *regs, u32 token) return false; } - rcu_exit = idtentry_enter_cond_rcu(regs); + state = idtentry_enter(regs); instrumentation_begin(); /* @@ -264,7 +264,7 @@ noinstr bool __kvm_handle_async_pf(struct pt_regs *regs, u32 token) } instrumentation_end(); - idtentry_exit_cond_rcu(regs, rcu_exit); + idtentry_exit(regs, state); return true; } diff --git a/arch/x86/kernel/traps.c b/arch/x86/kernel/traps.c index b038695f36c5..4627f826fb57 100644 --- a/arch/x86/kernel/traps.c +++ b/arch/x86/kernel/traps.c @@ -245,7 +245,7 @@ static noinstr bool handle_bug(struct pt_regs *regs) DEFINE_IDTENTRY_RAW(exc_invalid_op) { - bool rcu_exit; + idtentry_state_t state; /* * We use UD2 as a short encoding for 'CALL __WARN', as such @@ -255,11 +255,11 @@ DEFINE_IDTENTRY_RAW(exc_invalid_op) if (!user_mode(regs) && handle_bug(regs)) return; - rcu_exit = idtentry_enter_cond_rcu(regs); + state = idtentry_enter(regs); instrumentation_begin(); handle_invalid_op(regs); instrumentation_end(); - idtentry_exit_cond_rcu(regs, rcu_exit); + idtentry_exit(regs, state); } DEFINE_IDTENTRY(exc_coproc_segment_overrun) diff --git a/arch/x86/mm/fault.c b/arch/x86/mm/fault.c index 1ead568c0101..5e41949453cc 100644 --- a/arch/x86/mm/fault.c +++ b/arch/x86/mm/fault.c @@ -1377,7 +1377,7 @@ handle_page_fault(struct pt_regs *regs, unsigned long error_code, DEFINE_IDTENTRY_RAW_ERRORCODE(exc_page_fault) { unsigned long address = read_cr2(); - bool rcu_exit; + idtentry_state_t state; prefetchw(¤t->mm->mmap_lock); @@ -1412,11 +1412,11 @@ DEFINE_IDTENTRY_RAW_ERRORCODE(exc_page_fault) * code reenabled RCU to avoid subsequent wreckage which helps * debugability. */ - rcu_exit = idtentry_enter_cond_rcu(regs); + state = idtentry_enter(regs); instrumentation_begin(); handle_page_fault(regs, error_code, address); instrumentation_end(); - idtentry_exit_cond_rcu(regs, rcu_exit); + idtentry_exit(regs, state); } -- cgit v1.2.3 From ba1f2b2eaa2a529dba722507c55ff3d761d325dd Mon Sep 17 00:00:00 2001 From: Peter Zijlstra Date: Wed, 27 May 2020 15:50:29 +0200 Subject: x86/entry: Fix NMI vs IRQ state tracking While the nmi_enter() users did trace_hardirqs_{off_prepare,on_finish}() there was no matching lockdep_hardirqs_*() calls to complete the picture. Introduce idtentry_{enter,exit}_nmi() to enable proper IRQ state tracking across the NMIs. Signed-off-by: Peter Zijlstra (Intel) Reviewed-by: Ingo Molnar Link: https://lkml.kernel.org/r/20200623083721.216740948@infradead.org --- arch/x86/entry/common.c | 42 +++++++++++++++++++++++++++++++++++++---- arch/x86/include/asm/idtentry.h | 3 +++ arch/x86/kernel/nmi.c | 9 ++++----- arch/x86/kernel/traps.c | 17 ++++++----------- include/linux/hardirq.h | 28 ++++++++++++++++++--------- 5 files changed, 70 insertions(+), 29 deletions(-) (limited to 'arch/x86') diff --git a/arch/x86/entry/common.c b/arch/x86/entry/common.c index 0521546022cb..63c607dd6c52 100644 --- a/arch/x86/entry/common.c +++ b/arch/x86/entry/common.c @@ -592,7 +592,7 @@ SYSCALL_DEFINE0(ni_syscall) * The return value must be fed into the state argument of * idtentry_exit(). */ -idtentry_state_t noinstr idtentry_enter(struct pt_regs *regs) +noinstr idtentry_state_t idtentry_enter(struct pt_regs *regs) { idtentry_state_t ret = { .exit_rcu = false, @@ -687,7 +687,7 @@ static void idtentry_exit_cond_resched(struct pt_regs *regs, bool may_sched) * Counterpart to idtentry_enter(). The return value of the entry * function must be fed into the @state argument. */ -void noinstr idtentry_exit(struct pt_regs *regs, idtentry_state_t state) +noinstr void idtentry_exit(struct pt_regs *regs, idtentry_state_t state) { lockdep_assert_irqs_disabled(); @@ -731,7 +731,7 @@ void noinstr idtentry_exit(struct pt_regs *regs, idtentry_state_t state) * Invokes enter_from_user_mode() to establish the proper context for * NOHZ_FULL. Otherwise scheduling on exit would not be possible. */ -void noinstr idtentry_enter_user(struct pt_regs *regs) +noinstr void idtentry_enter_user(struct pt_regs *regs) { check_user_regs(regs); enter_from_user_mode(); @@ -749,13 +749,47 @@ void noinstr idtentry_enter_user(struct pt_regs *regs) * * Counterpart to idtentry_enter_user(). */ -void noinstr idtentry_exit_user(struct pt_regs *regs) +noinstr void idtentry_exit_user(struct pt_regs *regs) { lockdep_assert_irqs_disabled(); prepare_exit_to_usermode(regs); } +noinstr bool idtentry_enter_nmi(struct pt_regs *regs) +{ + bool irq_state = lockdep_hardirqs_enabled(current); + + __nmi_enter(); + lockdep_hardirqs_off(CALLER_ADDR0); + lockdep_hardirq_enter(); + rcu_nmi_enter(); + + instrumentation_begin(); + trace_hardirqs_off_finish(); + ftrace_nmi_enter(); + instrumentation_end(); + + return irq_state; +} + +noinstr void idtentry_exit_nmi(struct pt_regs *regs, bool restore) +{ + instrumentation_begin(); + ftrace_nmi_exit(); + if (restore) { + trace_hardirqs_on_prepare(); + lockdep_hardirqs_on_prepare(CALLER_ADDR0); + } + instrumentation_end(); + + rcu_nmi_exit(); + lockdep_hardirq_exit(); + if (restore) + lockdep_hardirqs_on(CALLER_ADDR0); + __nmi_exit(); +} + #ifdef CONFIG_XEN_PV #ifndef CONFIG_PREEMPTION /* diff --git a/arch/x86/include/asm/idtentry.h b/arch/x86/include/asm/idtentry.h index 7227225cf45d..2b0497486525 100644 --- a/arch/x86/include/asm/idtentry.h +++ b/arch/x86/include/asm/idtentry.h @@ -20,6 +20,9 @@ typedef struct idtentry_state { idtentry_state_t idtentry_enter(struct pt_regs *regs); void idtentry_exit(struct pt_regs *regs, idtentry_state_t state); +bool idtentry_enter_nmi(struct pt_regs *regs); +void idtentry_exit_nmi(struct pt_regs *regs, bool irq_state); + /** * DECLARE_IDTENTRY - Declare functions for simple IDT entry points * No error code pushed by hardware diff --git a/arch/x86/kernel/nmi.c b/arch/x86/kernel/nmi.c index d7c5e44b26f7..4fc9954a9560 100644 --- a/arch/x86/kernel/nmi.c +++ b/arch/x86/kernel/nmi.c @@ -330,7 +330,6 @@ static noinstr void default_do_nmi(struct pt_regs *regs) __this_cpu_write(last_nmi_rip, regs->ip); instrumentation_begin(); - trace_hardirqs_off_finish(); handled = nmi_handle(NMI_LOCAL, regs); __this_cpu_add(nmi_stats.normal, handled); @@ -417,8 +416,6 @@ static noinstr void default_do_nmi(struct pt_regs *regs) unknown_nmi_error(reason, regs); out: - if (regs->flags & X86_EFLAGS_IF) - trace_hardirqs_on_prepare(); instrumentation_end(); } @@ -478,6 +475,8 @@ static DEFINE_PER_CPU(unsigned long, nmi_dr7); DEFINE_IDTENTRY_RAW(exc_nmi) { + bool irq_state; + if (IS_ENABLED(CONFIG_SMP) && arch_cpu_is_offline(smp_processor_id())) return; @@ -491,14 +490,14 @@ nmi_restart: this_cpu_write(nmi_dr7, local_db_save()); - nmi_enter(); + irq_state = idtentry_enter_nmi(regs); inc_irq_stat(__nmi_count); if (!ignore_nmis) default_do_nmi(regs); - nmi_exit(); + idtentry_exit_nmi(regs, irq_state); local_db_restore(this_cpu_read(nmi_dr7)); diff --git a/arch/x86/kernel/traps.c b/arch/x86/kernel/traps.c index 4627f826fb57..cdd73829e637 100644 --- a/arch/x86/kernel/traps.c +++ b/arch/x86/kernel/traps.c @@ -403,7 +403,7 @@ DEFINE_IDTENTRY_DF(exc_double_fault) } #endif - nmi_enter(); + idtentry_enter_nmi(regs); instrumentation_begin(); notify_die(DIE_TRAP, str, regs, error_code, X86_TRAP_DF, SIGSEGV); @@ -649,15 +649,12 @@ DEFINE_IDTENTRY_RAW(exc_int3) instrumentation_end(); idtentry_exit_user(regs); } else { - nmi_enter(); + bool irq_state = idtentry_enter_nmi(regs); instrumentation_begin(); - trace_hardirqs_off_finish(); if (!do_int3(regs)) die("int3", regs, 0); - if (regs->flags & X86_EFLAGS_IF) - trace_hardirqs_on_prepare(); instrumentation_end(); - nmi_exit(); + idtentry_exit_nmi(regs, irq_state); } } @@ -865,9 +862,8 @@ out: static __always_inline void exc_debug_kernel(struct pt_regs *regs, unsigned long dr6) { - nmi_enter(); + bool irq_state = idtentry_enter_nmi(regs); instrumentation_begin(); - trace_hardirqs_off_finish(); /* * If something gets miswired and we end up here for a user mode @@ -884,10 +880,8 @@ static __always_inline void exc_debug_kernel(struct pt_regs *regs, handle_debug(regs, dr6, false); - if (regs->flags & X86_EFLAGS_IF) - trace_hardirqs_on_prepare(); instrumentation_end(); - nmi_exit(); + idtentry_exit_nmi(regs, irq_state); } static __always_inline void exc_debug_user(struct pt_regs *regs, @@ -903,6 +897,7 @@ static __always_inline void exc_debug_user(struct pt_regs *regs, instrumentation_begin(); handle_debug(regs, dr6, true); + instrumentation_end(); idtentry_exit_user(regs); } diff --git a/include/linux/hardirq.h b/include/linux/hardirq.h index 03c9fece7d43..754f67ac4326 100644 --- a/include/linux/hardirq.h +++ b/include/linux/hardirq.h @@ -111,32 +111,42 @@ extern void rcu_nmi_exit(void); /* * nmi_enter() can nest up to 15 times; see NMI_BITS. */ -#define nmi_enter() \ +#define __nmi_enter() \ do { \ + lockdep_off(); \ arch_nmi_enter(); \ printk_nmi_enter(); \ - lockdep_off(); \ BUG_ON(in_nmi() == NMI_MASK); \ __preempt_count_add(NMI_OFFSET + HARDIRQ_OFFSET); \ - rcu_nmi_enter(); \ + } while (0) + +#define nmi_enter() \ + do { \ + __nmi_enter(); \ lockdep_hardirq_enter(); \ + rcu_nmi_enter(); \ instrumentation_begin(); \ ftrace_nmi_enter(); \ instrumentation_end(); \ } while (0) +#define __nmi_exit() \ + do { \ + BUG_ON(!in_nmi()); \ + __preempt_count_sub(NMI_OFFSET + HARDIRQ_OFFSET); \ + printk_nmi_exit(); \ + arch_nmi_exit(); \ + lockdep_on(); \ + } while (0) + #define nmi_exit() \ do { \ instrumentation_begin(); \ ftrace_nmi_exit(); \ instrumentation_end(); \ - lockdep_hardirq_exit(); \ rcu_nmi_exit(); \ - BUG_ON(!in_nmi()); \ - __preempt_count_sub(NMI_OFFSET + HARDIRQ_OFFSET); \ - lockdep_on(); \ - printk_nmi_exit(); \ - arch_nmi_exit(); \ + lockdep_hardirq_exit(); \ + __nmi_exit(); \ } while (0) #endif /* LINUX_HARDIRQ_H */ -- cgit v1.2.3 From f9ad4a5f3f20bee022b1bdde94e5ece6dc0b0edc Mon Sep 17 00:00:00 2001 From: Peter Zijlstra Date: Wed, 27 May 2020 13:03:26 +0200 Subject: lockdep: Remove lockdep_hardirq{s_enabled,_context}() argument Now that the macros use per-cpu data, we no longer need the argument. Signed-off-by: Peter Zijlstra (Intel) Reviewed-by: Ingo Molnar Link: https://lkml.kernel.org/r/20200623083721.571835311@infradead.org --- arch/x86/entry/common.c | 2 +- include/linux/irqflags.h | 8 ++++---- include/linux/lockdep.h | 2 +- kernel/locking/lockdep.c | 30 +++++++++++++++--------------- kernel/softirq.c | 2 +- tools/include/linux/irqflags.h | 4 ++-- 6 files changed, 24 insertions(+), 24 deletions(-) (limited to 'arch/x86') diff --git a/arch/x86/entry/common.c b/arch/x86/entry/common.c index 63c607dd6c52..4ea640363f5d 100644 --- a/arch/x86/entry/common.c +++ b/arch/x86/entry/common.c @@ -758,7 +758,7 @@ noinstr void idtentry_exit_user(struct pt_regs *regs) noinstr bool idtentry_enter_nmi(struct pt_regs *regs) { - bool irq_state = lockdep_hardirqs_enabled(current); + bool irq_state = lockdep_hardirqs_enabled(); __nmi_enter(); lockdep_hardirqs_off(CALLER_ADDR0); diff --git a/include/linux/irqflags.h b/include/linux/irqflags.h index 255444fe4609..5811ee8a5cd8 100644 --- a/include/linux/irqflags.h +++ b/include/linux/irqflags.h @@ -40,9 +40,9 @@ DECLARE_PER_CPU(int, hardirq_context); extern void trace_hardirqs_off_finish(void); extern void trace_hardirqs_on(void); extern void trace_hardirqs_off(void); -# define lockdep_hardirq_context(p) (this_cpu_read(hardirq_context)) +# define lockdep_hardirq_context() (this_cpu_read(hardirq_context)) # define lockdep_softirq_context(p) ((p)->softirq_context) -# define lockdep_hardirqs_enabled(p) (this_cpu_read(hardirqs_enabled)) +# define lockdep_hardirqs_enabled() (this_cpu_read(hardirqs_enabled)) # define lockdep_softirqs_enabled(p) ((p)->softirqs_enabled) # define lockdep_hardirq_enter() \ do { \ @@ -109,9 +109,9 @@ do { \ # define trace_hardirqs_off_finish() do { } while (0) # define trace_hardirqs_on() do { } while (0) # define trace_hardirqs_off() do { } while (0) -# define lockdep_hardirq_context(p) 0 +# define lockdep_hardirq_context() 0 # define lockdep_softirq_context(p) 0 -# define lockdep_hardirqs_enabled(p) 0 +# define lockdep_hardirqs_enabled() 0 # define lockdep_softirqs_enabled(p) 0 # define lockdep_hardirq_enter() do { } while (0) # define lockdep_hardirq_threaded() do { } while (0) diff --git a/include/linux/lockdep.h b/include/linux/lockdep.h index be6cb17a8879..fd04b9e96091 100644 --- a/include/linux/lockdep.h +++ b/include/linux/lockdep.h @@ -562,7 +562,7 @@ do { \ # define lockdep_assert_RT_in_threaded_ctx() do { \ WARN_ONCE(debug_locks && !current->lockdep_recursion && \ - lockdep_hardirq_context(current) && \ + lockdep_hardirq_context() && \ !(current->hardirq_threaded || current->irq_config), \ "Not in threaded context on PREEMPT_RT as expected\n"); \ } while (0) diff --git a/kernel/locking/lockdep.c b/kernel/locking/lockdep.c index ab4ffbe0e9e9..c9ea05edce25 100644 --- a/kernel/locking/lockdep.c +++ b/kernel/locking/lockdep.c @@ -2062,9 +2062,9 @@ print_bad_irq_dependency(struct task_struct *curr, pr_warn("-----------------------------------------------------\n"); pr_warn("%s/%d [HC%u[%lu]:SC%u[%lu]:HE%u:SE%u] is trying to acquire:\n", curr->comm, task_pid_nr(curr), - lockdep_hardirq_context(curr), hardirq_count() >> HARDIRQ_SHIFT, + lockdep_hardirq_context(), hardirq_count() >> HARDIRQ_SHIFT, curr->softirq_context, softirq_count() >> SOFTIRQ_SHIFT, - lockdep_hardirqs_enabled(curr), + lockdep_hardirqs_enabled(), curr->softirqs_enabled); print_lock(next); @@ -3331,9 +3331,9 @@ print_usage_bug(struct task_struct *curr, struct held_lock *this, pr_warn("%s/%d [HC%u[%lu]:SC%u[%lu]:HE%u:SE%u] takes:\n", curr->comm, task_pid_nr(curr), - lockdep_hardirq_context(curr), hardirq_count() >> HARDIRQ_SHIFT, + lockdep_hardirq_context(), hardirq_count() >> HARDIRQ_SHIFT, lockdep_softirq_context(curr), softirq_count() >> SOFTIRQ_SHIFT, - lockdep_hardirqs_enabled(curr), + lockdep_hardirqs_enabled(), lockdep_softirqs_enabled(curr)); print_lock(this); @@ -3658,7 +3658,7 @@ void lockdep_hardirqs_on_prepare(unsigned long ip) if (unlikely(current->lockdep_recursion & LOCKDEP_RECURSION_MASK)) return; - if (unlikely(lockdep_hardirqs_enabled(current))) { + if (unlikely(lockdep_hardirqs_enabled())) { /* * Neither irq nor preemption are disabled here * so this is racy by nature but losing one hit @@ -3686,7 +3686,7 @@ void lockdep_hardirqs_on_prepare(unsigned long ip) * Can't allow enabling interrupts while in an interrupt handler, * that's general bad form and such. Recursion, limited stack etc.. */ - if (DEBUG_LOCKS_WARN_ON(lockdep_hardirq_context(current))) + if (DEBUG_LOCKS_WARN_ON(lockdep_hardirq_context())) return; current->hardirq_chain_key = current->curr_chain_key; @@ -3724,7 +3724,7 @@ void noinstr lockdep_hardirqs_on(unsigned long ip) if (unlikely(current->lockdep_recursion & LOCKDEP_RECURSION_MASK)) return; - if (lockdep_hardirqs_enabled(curr)) { + if (lockdep_hardirqs_enabled()) { /* * Neither irq nor preemption are disabled here * so this is racy by nature but losing one hit @@ -3783,7 +3783,7 @@ void noinstr lockdep_hardirqs_off(unsigned long ip) if (DEBUG_LOCKS_WARN_ON(!irqs_disabled())) return; - if (lockdep_hardirqs_enabled(curr)) { + if (lockdep_hardirqs_enabled()) { /* * We have done an ON -> OFF transition: */ @@ -3832,7 +3832,7 @@ void lockdep_softirqs_on(unsigned long ip) * usage bit for all held locks, if hardirqs are * enabled too: */ - if (lockdep_hardirqs_enabled(curr)) + if (lockdep_hardirqs_enabled()) mark_held_locks(curr, LOCK_ENABLED_SOFTIRQ); lockdep_recursion_finish(); } @@ -3881,7 +3881,7 @@ mark_usage(struct task_struct *curr, struct held_lock *hlock, int check) */ if (!hlock->trylock) { if (hlock->read) { - if (lockdep_hardirq_context(curr)) + if (lockdep_hardirq_context()) if (!mark_lock(curr, hlock, LOCK_USED_IN_HARDIRQ_READ)) return 0; @@ -3890,7 +3890,7 @@ mark_usage(struct task_struct *curr, struct held_lock *hlock, int check) LOCK_USED_IN_SOFTIRQ_READ)) return 0; } else { - if (lockdep_hardirq_context(curr)) + if (lockdep_hardirq_context()) if (!mark_lock(curr, hlock, LOCK_USED_IN_HARDIRQ)) return 0; if (curr->softirq_context) @@ -3928,7 +3928,7 @@ lock_used: static inline unsigned int task_irq_context(struct task_struct *task) { - return LOCK_CHAIN_HARDIRQ_CONTEXT * !!lockdep_hardirq_context(task) + + return LOCK_CHAIN_HARDIRQ_CONTEXT * !!lockdep_hardirq_context() + LOCK_CHAIN_SOFTIRQ_CONTEXT * !!task->softirq_context; } @@ -4021,7 +4021,7 @@ static inline short task_wait_context(struct task_struct *curr) * Set appropriate wait type for the context; for IRQs we have to take * into account force_irqthread as that is implied by PREEMPT_RT. */ - if (lockdep_hardirq_context(curr)) { + if (lockdep_hardirq_context()) { /* * Check if force_irqthreads will run us threaded. */ @@ -4864,11 +4864,11 @@ static void check_flags(unsigned long flags) return; if (irqs_disabled_flags(flags)) { - if (DEBUG_LOCKS_WARN_ON(lockdep_hardirqs_enabled(current))) { + if (DEBUG_LOCKS_WARN_ON(lockdep_hardirqs_enabled())) { printk("possible reason: unannotated irqs-off.\n"); } } else { - if (DEBUG_LOCKS_WARN_ON(!lockdep_hardirqs_enabled(current))) { + if (DEBUG_LOCKS_WARN_ON(!lockdep_hardirqs_enabled())) { printk("possible reason: unannotated irqs-on.\n"); } } diff --git a/kernel/softirq.c b/kernel/softirq.c index 342c53feaa7a..5e9aaa648a74 100644 --- a/kernel/softirq.c +++ b/kernel/softirq.c @@ -230,7 +230,7 @@ static inline bool lockdep_softirq_start(void) { bool in_hardirq = false; - if (lockdep_hardirq_context(current)) { + if (lockdep_hardirq_context()) { in_hardirq = true; lockdep_hardirq_exit(); } diff --git a/tools/include/linux/irqflags.h b/tools/include/linux/irqflags.h index 67e01bbadbfe..501262aee8ff 100644 --- a/tools/include/linux/irqflags.h +++ b/tools/include/linux/irqflags.h @@ -2,9 +2,9 @@ #ifndef _LIBLOCKDEP_LINUX_TRACE_IRQFLAGS_H_ #define _LIBLOCKDEP_LINUX_TRACE_IRQFLAGS_H_ -# define lockdep_hardirq_context(p) 0 +# define lockdep_hardirq_context() 0 # define lockdep_softirq_context(p) 0 -# define lockdep_hardirqs_enabled(p) 0 +# define lockdep_hardirqs_enabled() 0 # define lockdep_softirqs_enabled(p) 0 # define lockdep_hardirq_enter() do { } while (0) # define lockdep_hardirq_exit() do { } while (0) -- cgit v1.2.3 From ed00495333ccc80fc8fb86fb43773c3c2a499466 Mon Sep 17 00:00:00 2001 From: "peterz@infradead.org" Date: Mon, 27 Jul 2020 14:48:52 +0200 Subject: locking/lockdep: Fix TRACE_IRQFLAGS vs. NMIs Prior to commit: 859d069ee1dd ("lockdep: Prepare for NMI IRQ state tracking") IRQ state tracking was disabled in NMIs due to nmi_enter() doing lockdep_off() -- with the obvious requirement that NMI entry call nmi_enter() before trace_hardirqs_off(). [ AFAICT, PowerPC and SH violate this order on their NMI entry ] However, that commit explicitly changed lockdep_hardirqs_*() to ignore lockdep_off() and breaks every architecture that has irq-tracing in it's NMI entry that hasn't been fixed up (x86 being the only fixed one at this point). The reason for this change is that by ignoring lockdep_off() we can: - get rid of 'current->lockdep_recursion' in lockdep_assert_irqs*() which was going to to give header-recursion issues with the seqlock rework. - allow these lockdep_assert_*() macros to function in NMI context. Restore the previous state of things and allow an architecture to opt-in to the NMI IRQ tracking support, however instead of relying on lockdep_off(), rely on in_nmi(), both are part of nmi_enter() and so over-all entry ordering doesn't need to change. Signed-off-by: Peter Zijlstra (Intel) Signed-off-by: Ingo Molnar Link: https://lore.kernel.org/r/20200727124852.GK119549@hirez.programming.kicks-ass.net --- arch/x86/Kconfig.debug | 3 +++ kernel/locking/lockdep.c | 8 +++++++- lib/Kconfig.debug | 6 ++++++ 3 files changed, 16 insertions(+), 1 deletion(-) (limited to 'arch/x86') diff --git a/arch/x86/Kconfig.debug b/arch/x86/Kconfig.debug index 0dd319e6e5b4..ee1d3c5834c6 100644 --- a/arch/x86/Kconfig.debug +++ b/arch/x86/Kconfig.debug @@ -3,6 +3,9 @@ config TRACE_IRQFLAGS_SUPPORT def_bool y +config TRACE_IRQFLAGS_NMI_SUPPORT + def_bool y + config EARLY_PRINTK_USB bool diff --git a/kernel/locking/lockdep.c b/kernel/locking/lockdep.c index d595623c4b34..8b0b28b4546b 100644 --- a/kernel/locking/lockdep.c +++ b/kernel/locking/lockdep.c @@ -3712,6 +3712,9 @@ void noinstr lockdep_hardirqs_on(unsigned long ip) * and not rely on hardware state like normal interrupts. */ if (unlikely(in_nmi())) { + if (!IS_ENABLED(CONFIG_TRACE_IRQFLAGS_NMI)) + return; + /* * Skip: * - recursion check, because NMI can hit lockdep; @@ -3773,7 +3776,10 @@ void noinstr lockdep_hardirqs_off(unsigned long ip) * they will restore the software state. This ensures the software * state is consistent inside NMIs as well. */ - if (unlikely(!in_nmi() && (current->lockdep_recursion & LOCKDEP_RECURSION_MASK))) + if (in_nmi()) { + if (!IS_ENABLED(CONFIG_TRACE_IRQFLAGS_NMI)) + return; + } else if (current->lockdep_recursion & LOCKDEP_RECURSION_MASK) return; /* diff --git a/lib/Kconfig.debug b/lib/Kconfig.debug index 9ad9210d70a1..fa964b51f066 100644 --- a/lib/Kconfig.debug +++ b/lib/Kconfig.debug @@ -1325,11 +1325,17 @@ config WW_MUTEX_SELFTEST endmenu # lock debugging config TRACE_IRQFLAGS + depends on TRACE_IRQFLAGS_SUPPORT bool help Enables hooks to interrupt enabling and disabling for either tracing or lock debugging. +config TRACE_IRQFLAGS_NMI + def_bool y + depends on TRACE_IRQFLAGS + depends on TRACE_IRQFLAGS_NMI_SUPPORT + config STACKTRACE bool "Stack backtrace support" depends on STACKTRACE_SUPPORT -- cgit v1.2.3 From 7ca8cf5347f720b07a0b32a924b768f5710547e7 Mon Sep 17 00:00:00 2001 From: Herbert Xu Date: Wed, 29 Jul 2020 22:31:05 +1000 Subject: locking/atomic: Move ATOMIC_INIT into linux/types.h This patch moves ATOMIC_INIT from asm/atomic.h into linux/types.h. This allows users of atomic_t to use ATOMIC_INIT without having to include atomic.h as that way may lead to header loops. Signed-off-by: Herbert Xu Signed-off-by: Peter Zijlstra (Intel) Acked-by: Waiman Long Link: https://lkml.kernel.org/r/20200729123105.GB7047@gondor.apana.org.au --- arch/alpha/include/asm/atomic.h | 1 - arch/arc/include/asm/atomic.h | 2 -- arch/arm/include/asm/atomic.h | 2 -- arch/arm64/include/asm/atomic.h | 2 -- arch/h8300/include/asm/atomic.h | 2 -- arch/hexagon/include/asm/atomic.h | 2 -- arch/ia64/include/asm/atomic.h | 1 - arch/m68k/include/asm/atomic.h | 2 -- arch/mips/include/asm/atomic.h | 1 - arch/parisc/include/asm/atomic.h | 2 -- arch/powerpc/include/asm/atomic.h | 2 -- arch/riscv/include/asm/atomic.h | 2 -- arch/s390/include/asm/atomic.h | 2 -- arch/sh/include/asm/atomic.h | 2 -- arch/sparc/include/asm/atomic_32.h | 2 -- arch/sparc/include/asm/atomic_64.h | 1 - arch/x86/include/asm/atomic.h | 2 -- arch/xtensa/include/asm/atomic.h | 2 -- include/asm-generic/atomic.h | 2 -- include/linux/types.h | 2 ++ 20 files changed, 2 insertions(+), 34 deletions(-) (limited to 'arch/x86') diff --git a/arch/alpha/include/asm/atomic.h b/arch/alpha/include/asm/atomic.h index 2144530d1428..e2093994fd0d 100644 --- a/arch/alpha/include/asm/atomic.h +++ b/arch/alpha/include/asm/atomic.h @@ -24,7 +24,6 @@ #define __atomic_acquire_fence() #define __atomic_post_full_fence() -#define ATOMIC_INIT(i) { (i) } #define ATOMIC64_INIT(i) { (i) } #define atomic_read(v) READ_ONCE((v)->counter) diff --git a/arch/arc/include/asm/atomic.h b/arch/arc/include/asm/atomic.h index 7298ce84762e..c614857eb209 100644 --- a/arch/arc/include/asm/atomic.h +++ b/arch/arc/include/asm/atomic.h @@ -14,8 +14,6 @@ #include #include -#define ATOMIC_INIT(i) { (i) } - #ifndef CONFIG_ARC_PLAT_EZNPS #define atomic_read(v) READ_ONCE((v)->counter) diff --git a/arch/arm/include/asm/atomic.h b/arch/arm/include/asm/atomic.h index 75bb2c543e59..455eb19a5ac1 100644 --- a/arch/arm/include/asm/atomic.h +++ b/arch/arm/include/asm/atomic.h @@ -15,8 +15,6 @@ #include #include -#define ATOMIC_INIT(i) { (i) } - #ifdef __KERNEL__ /* diff --git a/arch/arm64/include/asm/atomic.h b/arch/arm64/include/asm/atomic.h index a08890da696c..015ddffaf6ca 100644 --- a/arch/arm64/include/asm/atomic.h +++ b/arch/arm64/include/asm/atomic.h @@ -99,8 +99,6 @@ static inline long arch_atomic64_dec_if_positive(atomic64_t *v) return __lse_ll_sc_body(atomic64_dec_if_positive, v); } -#define ATOMIC_INIT(i) { (i) } - #define arch_atomic_read(v) __READ_ONCE((v)->counter) #define arch_atomic_set(v, i) __WRITE_ONCE(((v)->counter), (i)) diff --git a/arch/h8300/include/asm/atomic.h b/arch/h8300/include/asm/atomic.h index c6b6a06231b2..a990d151f163 100644 --- a/arch/h8300/include/asm/atomic.h +++ b/arch/h8300/include/asm/atomic.h @@ -12,8 +12,6 @@ * resource counting etc.. */ -#define ATOMIC_INIT(i) { (i) } - #define atomic_read(v) READ_ONCE((v)->counter) #define atomic_set(v, i) WRITE_ONCE(((v)->counter), (i)) diff --git a/arch/hexagon/include/asm/atomic.h b/arch/hexagon/include/asm/atomic.h index 0231d69c8bf2..4ab895d7111f 100644 --- a/arch/hexagon/include/asm/atomic.h +++ b/arch/hexagon/include/asm/atomic.h @@ -12,8 +12,6 @@ #include #include -#define ATOMIC_INIT(i) { (i) } - /* Normal writes in our arch don't clear lock reservations */ static inline void atomic_set(atomic_t *v, int new) diff --git a/arch/ia64/include/asm/atomic.h b/arch/ia64/include/asm/atomic.h index 50440f3ddc43..f267d956458f 100644 --- a/arch/ia64/include/asm/atomic.h +++ b/arch/ia64/include/asm/atomic.h @@ -19,7 +19,6 @@ #include -#define ATOMIC_INIT(i) { (i) } #define ATOMIC64_INIT(i) { (i) } #define atomic_read(v) READ_ONCE((v)->counter) diff --git a/arch/m68k/include/asm/atomic.h b/arch/m68k/include/asm/atomic.h index 47228b0d4163..756c5cc58f94 100644 --- a/arch/m68k/include/asm/atomic.h +++ b/arch/m68k/include/asm/atomic.h @@ -16,8 +16,6 @@ * We do not have SMP m68k systems, so we don't have to deal with that. */ -#define ATOMIC_INIT(i) { (i) } - #define atomic_read(v) READ_ONCE((v)->counter) #define atomic_set(v, i) WRITE_ONCE(((v)->counter), (i)) diff --git a/arch/mips/include/asm/atomic.h b/arch/mips/include/asm/atomic.h index e5ac88392d1f..f904084fcb1f 100644 --- a/arch/mips/include/asm/atomic.h +++ b/arch/mips/include/asm/atomic.h @@ -45,7 +45,6 @@ static __always_inline type pfx##_xchg(pfx##_t *v, type n) \ return xchg(&v->counter, n); \ } -#define ATOMIC_INIT(i) { (i) } ATOMIC_OPS(atomic, int) #ifdef CONFIG_64BIT diff --git a/arch/parisc/include/asm/atomic.h b/arch/parisc/include/asm/atomic.h index 118953d41763..f960e2f32b1b 100644 --- a/arch/parisc/include/asm/atomic.h +++ b/arch/parisc/include/asm/atomic.h @@ -136,8 +136,6 @@ ATOMIC_OPS(xor, ^=) #undef ATOMIC_OP_RETURN #undef ATOMIC_OP -#define ATOMIC_INIT(i) { (i) } - #ifdef CONFIG_64BIT #define ATOMIC64_INIT(i) { (i) } diff --git a/arch/powerpc/include/asm/atomic.h b/arch/powerpc/include/asm/atomic.h index 498785ffc25f..0311c3c42960 100644 --- a/arch/powerpc/include/asm/atomic.h +++ b/arch/powerpc/include/asm/atomic.h @@ -11,8 +11,6 @@ #include #include -#define ATOMIC_INIT(i) { (i) } - /* * Since *_return_relaxed and {cmp}xchg_relaxed are implemented with * a "bne-" instruction at the end, so an isync is enough as a acquire barrier diff --git a/arch/riscv/include/asm/atomic.h b/arch/riscv/include/asm/atomic.h index 96f95c9ebd97..400a8c8b6de7 100644 --- a/arch/riscv/include/asm/atomic.h +++ b/arch/riscv/include/asm/atomic.h @@ -19,8 +19,6 @@ #include #include -#define ATOMIC_INIT(i) { (i) } - #define __atomic_acquire_fence() \ __asm__ __volatile__(RISCV_ACQUIRE_BARRIER "" ::: "memory") diff --git a/arch/s390/include/asm/atomic.h b/arch/s390/include/asm/atomic.h index 491ad53a0d4e..cae473a7b6f7 100644 --- a/arch/s390/include/asm/atomic.h +++ b/arch/s390/include/asm/atomic.h @@ -15,8 +15,6 @@ #include #include -#define ATOMIC_INIT(i) { (i) } - static inline int atomic_read(const atomic_t *v) { int c; diff --git a/arch/sh/include/asm/atomic.h b/arch/sh/include/asm/atomic.h index f37b95a80232..7c2a8a703b9a 100644 --- a/arch/sh/include/asm/atomic.h +++ b/arch/sh/include/asm/atomic.h @@ -19,8 +19,6 @@ #include #include -#define ATOMIC_INIT(i) { (i) } - #define atomic_read(v) READ_ONCE((v)->counter) #define atomic_set(v,i) WRITE_ONCE((v)->counter, (i)) diff --git a/arch/sparc/include/asm/atomic_32.h b/arch/sparc/include/asm/atomic_32.h index 94c930f0bc62..efad5532f169 100644 --- a/arch/sparc/include/asm/atomic_32.h +++ b/arch/sparc/include/asm/atomic_32.h @@ -18,8 +18,6 @@ #include #include -#define ATOMIC_INIT(i) { (i) } - int atomic_add_return(int, atomic_t *); int atomic_fetch_add(int, atomic_t *); int atomic_fetch_and(int, atomic_t *); diff --git a/arch/sparc/include/asm/atomic_64.h b/arch/sparc/include/asm/atomic_64.h index b60448397d4f..6b235d3d1d9d 100644 --- a/arch/sparc/include/asm/atomic_64.h +++ b/arch/sparc/include/asm/atomic_64.h @@ -12,7 +12,6 @@ #include #include -#define ATOMIC_INIT(i) { (i) } #define ATOMIC64_INIT(i) { (i) } #define atomic_read(v) READ_ONCE((v)->counter) diff --git a/arch/x86/include/asm/atomic.h b/arch/x86/include/asm/atomic.h index bf35e476a776..b6cac6e9bb70 100644 --- a/arch/x86/include/asm/atomic.h +++ b/arch/x86/include/asm/atomic.h @@ -14,8 +14,6 @@ * resource counting etc.. */ -#define ATOMIC_INIT(i) { (i) } - /** * arch_atomic_read - read atomic variable * @v: pointer of type atomic_t diff --git a/arch/xtensa/include/asm/atomic.h b/arch/xtensa/include/asm/atomic.h index 3e7c6134ed32..744c2f463845 100644 --- a/arch/xtensa/include/asm/atomic.h +++ b/arch/xtensa/include/asm/atomic.h @@ -19,8 +19,6 @@ #include #include -#define ATOMIC_INIT(i) { (i) } - /* * This Xtensa implementation assumes that the right mechanism * for exclusion is for locking interrupts to level EXCM_LEVEL. diff --git a/include/asm-generic/atomic.h b/include/asm-generic/atomic.h index 286867f593d2..11f96f40f4a7 100644 --- a/include/asm-generic/atomic.h +++ b/include/asm-generic/atomic.h @@ -159,8 +159,6 @@ ATOMIC_OP(xor, ^) * resource counting etc.. */ -#define ATOMIC_INIT(i) { (i) } - /** * atomic_read - read atomic variable * @v: pointer of type atomic_t diff --git a/include/linux/types.h b/include/linux/types.h index d3021c879179..a147977602b5 100644 --- a/include/linux/types.h +++ b/include/linux/types.h @@ -167,6 +167,8 @@ typedef struct { int counter; } atomic_t; +#define ATOMIC_INIT(i) { (i) } + #ifdef CONFIG_64BIT typedef struct { s64 counter; -- cgit v1.2.3