diff options
Diffstat (limited to 'include/linux/spinlock_up.h')
| -rw-r--r-- | include/linux/spinlock_up.h | 29 | 
1 files changed, 18 insertions, 11 deletions
diff --git a/include/linux/spinlock_up.h b/include/linux/spinlock_up.h index a26e2fb604e6..e2369c167dbd 100644 --- a/include/linux/spinlock_up.h +++ b/include/linux/spinlock_up.h @@ -16,7 +16,10 @@   * In the debug case, 1 means unlocked, 0 means locked. (the values   * are inverted, to catch initialization bugs)   * - * No atomicity anywhere, we are on UP. + * No atomicity anywhere, we are on UP. However, we still need + * the compiler barriers, because we do not want the compiler to + * move potentially faulting instructions (notably user accesses) + * into the locked sequence, resulting in non-atomic execution.   */  #ifdef CONFIG_DEBUG_SPINLOCK @@ -25,6 +28,7 @@  static inline void arch_spin_lock(arch_spinlock_t *lock)  {  	lock->slock = 0; +	barrier();  }  static inline void @@ -32,6 +36,7 @@ arch_spin_lock_flags(arch_spinlock_t *lock, unsigned long flags)  {  	local_irq_save(flags);  	lock->slock = 0; +	barrier();  }  static inline int arch_spin_trylock(arch_spinlock_t *lock) @@ -39,32 +44,34 @@ static inline int arch_spin_trylock(arch_spinlock_t *lock)  	char oldval = lock->slock;  	lock->slock = 0; +	barrier();  	return oldval > 0;  }  static inline void arch_spin_unlock(arch_spinlock_t *lock)  { +	barrier();  	lock->slock = 1;  }  /*   * Read-write spinlocks. No debug version.   */ -#define arch_read_lock(lock)		do { (void)(lock); } while (0) -#define arch_write_lock(lock)		do { (void)(lock); } while (0) -#define arch_read_trylock(lock)	({ (void)(lock); 1; }) -#define arch_write_trylock(lock)	({ (void)(lock); 1; }) -#define arch_read_unlock(lock)		do { (void)(lock); } while (0) -#define arch_write_unlock(lock)	do { (void)(lock); } while (0) +#define arch_read_lock(lock)		do { barrier(); (void)(lock); } while (0) +#define arch_write_lock(lock)		do { barrier(); (void)(lock); } while (0) +#define arch_read_trylock(lock)	({ barrier(); (void)(lock); 1; }) +#define arch_write_trylock(lock)	({ barrier(); (void)(lock); 1; }) +#define arch_read_unlock(lock)		do { barrier(); (void)(lock); } while (0) +#define arch_write_unlock(lock)	do { barrier(); (void)(lock); } while (0)  #else /* DEBUG_SPINLOCK */  #define arch_spin_is_locked(lock)	((void)(lock), 0)  /* for sched.c and kernel_lock.c: */ -# define arch_spin_lock(lock)		do { (void)(lock); } while (0) -# define arch_spin_lock_flags(lock, flags)	do { (void)(lock); } while (0) -# define arch_spin_unlock(lock)	do { (void)(lock); } while (0) -# define arch_spin_trylock(lock)	({ (void)(lock); 1; }) +# define arch_spin_lock(lock)		do { barrier(); (void)(lock); } while (0) +# define arch_spin_lock_flags(lock, flags)	do { barrier(); (void)(lock); } while (0) +# define arch_spin_unlock(lock)	do { barrier(); (void)(lock); } while (0) +# define arch_spin_trylock(lock)	({ barrier(); (void)(lock); 1; })  #endif /* DEBUG_SPINLOCK */  #define arch_spin_is_contended(lock)	(((void)(lock), 0))  |