diff options
author | Ingo Molnar <mingo@elte.hu> | 2006-01-10 22:07:44 +0100 |
---|---|---|
committer | Linus Torvalds <torvalds@g5.osdl.org> | 2006-01-10 13:20:47 -0800 |
commit | 73165b88ffd29813bf73b331eaf90d3521443236 (patch) | |
tree | 224b510df182c5cba7b64fea6202ed9dd414835e /kernel/mutex.c | |
parent | 042c904c3e35e95ac911e8a2bf4097099b059e1a (diff) | |
download | linux-73165b88ffd29813bf73b331eaf90d3521443236.tar.bz2 |
[PATCH] fix i386 mutex fastpath on FRAME_POINTER && !DEBUG_MUTEXES
Call the mutex slowpath more conservatively - e.g. FRAME_POINTERS can
change the calling convention, in which case a direct branch to the
slowpath becomes illegal. Bug found by Hugh Dickins.
Signed-off-by: Ingo Molnar <mingo@elte.hu>
Signed-off-by: Linus Torvalds <torvalds@osdl.org>
Diffstat (limited to 'kernel/mutex.c')
-rw-r--r-- | kernel/mutex.c | 9 |
1 files changed, 0 insertions, 9 deletions
diff --git a/kernel/mutex.c b/kernel/mutex.c index 7eb960661441..d3dcb8b44bac 100644 --- a/kernel/mutex.c +++ b/kernel/mutex.c @@ -84,12 +84,6 @@ void fastcall __sched mutex_lock(struct mutex *lock) /* * The locking fastpath is the 1->0 transition from * 'unlocked' into 'locked' state. - * - * NOTE: if asm/mutex.h is included, then some architectures - * rely on mutex_lock() having _no other code_ here but this - * fastpath. That allows the assembly fastpath to do - * tail-merging optimizations. (If you want to put testcode - * here, do it under #ifndef CONFIG_MUTEX_DEBUG.) */ __mutex_fastpath_lock(&lock->count, __mutex_lock_slowpath); } @@ -115,8 +109,6 @@ void fastcall __sched mutex_unlock(struct mutex *lock) /* * The unlocking fastpath is the 0->1 transition from 'locked' * into 'unlocked' state: - * - * NOTE: no other code must be here - see mutex_lock() . */ __mutex_fastpath_unlock(&lock->count, __mutex_unlock_slowpath); } @@ -261,7 +253,6 @@ __mutex_lock_interruptible_slowpath(atomic_t *lock_count __IP_DECL__); */ int fastcall __sched mutex_lock_interruptible(struct mutex *lock) { - /* NOTE: no other code must be here - see mutex_lock() */ return __mutex_fastpath_lock_retval (&lock->count, __mutex_lock_interruptible_slowpath); } |