From bd922477d9350a3006d73dabb241400e6c4181b0 Mon Sep 17 00:00:00 2001 From: "Michael S. Tsirkin" Date: Thu, 28 Jan 2016 19:02:29 +0200 Subject: locking/x86: Add cc clobber for ADDL ADDL clobbers flags (such as CF) but barrier.h didn't tell this to GCC. Historically, GCC doesn't need one on x86, and always considers flags clobbered. We are probably missing the cc clobber in a *lot* of places for this reason. But even if not necessary, it's probably a good thing to add for documentation, and in case GCC semantcs ever change. Reported-by: Borislav Petkov Signed-off-by: Michael S. Tsirkin Acked-by: Peter Zijlstra (Intel) Cc: Andrew Morton Cc: Andrey Konovalov Cc: Andy Lutomirski Cc: Andy Lutomirski Cc: Borislav Petkov Cc: Brian Gerst Cc: Davidlohr Bueso Cc: Davidlohr Bueso Cc: Denys Vlasenko Cc: H. Peter Anvin Cc: Linus Torvalds Cc: Paul E. McKenney Cc: Peter Zijlstra Cc: Thomas Gleixner Cc: virtualization Link: http://lkml.kernel.org/r/1453921746-16178-2-git-send-email-mst@redhat.com Signed-off-by: Ingo Molnar --- arch/x86/include/asm/barrier.h | 9 ++++++--- 1 file changed, 6 insertions(+), 3 deletions(-) (limited to 'arch/x86/include') diff --git a/arch/x86/include/asm/barrier.h b/arch/x86/include/asm/barrier.h index a584e1c50918..a65bdb10246a 100644 --- a/arch/x86/include/asm/barrier.h +++ b/arch/x86/include/asm/barrier.h @@ -15,9 +15,12 @@ * Some non-Intel clones support out of order store. wmb() ceases to be a * nop for these. */ -#define mb() alternative("lock; addl $0,0(%%esp)", "mfence", X86_FEATURE_XMM2) -#define rmb() alternative("lock; addl $0,0(%%esp)", "lfence", X86_FEATURE_XMM2) -#define wmb() alternative("lock; addl $0,0(%%esp)", "sfence", X86_FEATURE_XMM) +#define mb() asm volatile(ALTERNATIVE("lock; addl $0,0(%%esp)", "mfence", \ + X86_FEATURE_XMM2) ::: "memory", "cc") +#define rmb() asm volatile(ALTERNATIVE("lock; addl $0,0(%%esp)", "lfence", \ + X86_FEATURE_XMM2) ::: "memory", "cc") +#define wmb() asm volatile(ALTERNATIVE("lock; addl $0,0(%%esp)", "sfence", \ + X86_FEATURE_XMM2) ::: "memory", "cc") #else #define mb() asm volatile("mfence":::"memory") #define rmb() asm volatile("lfence":::"memory") -- cgit v1.2.3 From e37cee133c72c9529f74a20d9b7eb3b6dfb928b5 Mon Sep 17 00:00:00 2001 From: "Michael S. Tsirkin" Date: Thu, 28 Jan 2016 19:02:37 +0200 Subject: locking/x86: Drop a comment left over from X86_OOSTORE The comment about wmb being non-NOP to deal with non-Intel CPUs is a left over from before the following commit: 09df7c4c8097 ("x86: Remove CONFIG_X86_OOSTORE") It makes no sense now: in particular, wmb() is not a NOP even for regular Intel CPUs because of weird use-cases e.g. dealing with WC memory. Drop this comment. Signed-off-by: Michael S. Tsirkin Acked-by: Peter Zijlstra (Intel) Cc: Andrew Morton Cc: Andrey Konovalov Cc: Andy Lutomirski Cc: Andy Lutomirski Cc: Borislav Petkov Cc: Borislav Petkov Cc: Brian Gerst Cc: Davidlohr Bueso Cc: Davidlohr Bueso Cc: Denys Vlasenko Cc: H. Peter Anvin Cc: Linus Torvalds Cc: Paul E. McKenney Cc: Peter Zijlstra Cc: Thomas Gleixner Cc: virtualization Link: http://lkml.kernel.org/r/1453921746-16178-3-git-send-email-mst@redhat.com Signed-off-by: Ingo Molnar --- arch/x86/include/asm/barrier.h | 4 ---- 1 file changed, 4 deletions(-) (limited to 'arch/x86/include') diff --git a/arch/x86/include/asm/barrier.h b/arch/x86/include/asm/barrier.h index a65bdb10246a..a29174599a98 100644 --- a/arch/x86/include/asm/barrier.h +++ b/arch/x86/include/asm/barrier.h @@ -11,10 +11,6 @@ */ #ifdef CONFIG_X86_32 -/* - * Some non-Intel clones support out of order store. wmb() ceases to be a - * nop for these. - */ #define mb() asm volatile(ALTERNATIVE("lock; addl $0,0(%%esp)", "mfence", \ X86_FEATURE_XMM2) ::: "memory", "cc") #define rmb() asm volatile(ALTERNATIVE("lock; addl $0,0(%%esp)", "lfence", \ -- cgit v1.2.3 From 57d9b1b43433a6ba7267c80b87d8e8f6e86edceb Mon Sep 17 00:00:00 2001 From: "Michael S. Tsirkin" Date: Thu, 28 Jan 2016 19:02:44 +0200 Subject: locking/x86: Tweak the comment about use of wmb() for IO On x86, we *do* still use the non-NOP rmb()/wmb() for IO barriers, but even that is generally questionable. Leave them around as historial unless somebody can point to a case where they care about the performance, but tweak the comment so people don't think they are strictly required in all cases. Signed-off-by: Michael S. Tsirkin Acked-by: Peter Zijlstra (Intel) Cc: Andrew Morton Cc: Andrey Konovalov Cc: Andy Lutomirski Cc: Andy Lutomirski Cc: Borislav Petkov Cc: Borislav Petkov Cc: Brian Gerst Cc: Davidlohr Bueso Cc: Davidlohr Bueso Cc: Denys Vlasenko Cc: H. Peter Anvin Cc: Linus Torvalds Cc: Paul E. McKenney Cc: Peter Zijlstra Cc: Thomas Gleixner Cc: virtualization Link: http://lkml.kernel.org/r/1453921746-16178-4-git-send-email-mst@redhat.com Signed-off-by: Ingo Molnar --- arch/x86/include/asm/barrier.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'arch/x86/include') diff --git a/arch/x86/include/asm/barrier.h b/arch/x86/include/asm/barrier.h index a29174599a98..bfb28caf97b1 100644 --- a/arch/x86/include/asm/barrier.h +++ b/arch/x86/include/asm/barrier.h @@ -6,7 +6,7 @@ /* * Force strict CPU ordering. - * And yes, this is required on UP too when we're talking + * And yes, this might be required on UP too when we're talking * to devices. */ -- cgit v1.2.3