diff options
author | Liam R. Howlett <howlett@gmail.com> | 2007-12-06 17:37:59 -0500 |
---|---|---|
committer | Matthew Wilcox <willy@linux.intel.com> | 2007-12-06 17:37:59 -0500 |
commit | ad776537cc6b4b936cfd11893e7b698dfa072666 (patch) | |
tree | b553f03f34ce8750150765755abaaec27943fa0d /kernel/mutex.c | |
parent | 0b94e97a25d9b06ef17fca8da23169200bead1e2 (diff) | |
download | linux-ad776537cc6b4b936cfd11893e7b698dfa072666.tar.bz2 |
Add mutex_lock_killable
Similar to mutex_lock_interruptible, it can be interrupted by a fatal
signal only.
Signed-off-by: Liam R. Howlett <howlett@gmail.com>
Acked-by: Ingo Molnar <mingo@elte.hu>
Signed-off-by: Matthew Wilcox <willy@linux.intel.com>
Diffstat (limited to 'kernel/mutex.c')
-rw-r--r-- | kernel/mutex.c | 36 |
1 files changed, 33 insertions, 3 deletions
diff --git a/kernel/mutex.c b/kernel/mutex.c index d7fe50cc556f..d9ec9b666250 100644 --- a/kernel/mutex.c +++ b/kernel/mutex.c @@ -166,9 +166,12 @@ __mutex_lock_common(struct mutex *lock, long state, unsigned int subclass, * got a signal? (This code gets eliminated in the * TASK_UNINTERRUPTIBLE case.) */ - if (unlikely(state == TASK_INTERRUPTIBLE && - signal_pending(task))) { - mutex_remove_waiter(lock, &waiter, task_thread_info(task)); + if (unlikely((state == TASK_INTERRUPTIBLE && + signal_pending(task)) || + (state == TASK_KILLABLE && + fatal_signal_pending(task)))) { + mutex_remove_waiter(lock, &waiter, + task_thread_info(task)); mutex_release(&lock->dep_map, 1, ip); spin_unlock_mutex(&lock->wait_lock, flags); @@ -211,6 +214,14 @@ mutex_lock_nested(struct mutex *lock, unsigned int subclass) EXPORT_SYMBOL_GPL(mutex_lock_nested); int __sched +mutex_lock_killable_nested(struct mutex *lock, unsigned int subclass) +{ + might_sleep(); + return __mutex_lock_common(lock, TASK_KILLABLE, subclass, _RET_IP_); +} +EXPORT_SYMBOL_GPL(mutex_lock_killable_nested); + +int __sched mutex_lock_interruptible_nested(struct mutex *lock, unsigned int subclass) { might_sleep(); @@ -272,6 +283,9 @@ __mutex_unlock_slowpath(atomic_t *lock_count) * mutex_lock_interruptible() and mutex_trylock(). */ static int fastcall noinline __sched +__mutex_lock_killable_slowpath(atomic_t *lock_count); + +static noinline int fastcall __sched __mutex_lock_interruptible_slowpath(atomic_t *lock_count); /*** @@ -294,6 +308,14 @@ int fastcall __sched mutex_lock_interruptible(struct mutex *lock) EXPORT_SYMBOL(mutex_lock_interruptible); +int fastcall __sched mutex_lock_killable(struct mutex *lock) +{ + might_sleep(); + return __mutex_fastpath_lock_retval + (&lock->count, __mutex_lock_killable_slowpath); +} +EXPORT_SYMBOL(mutex_lock_killable); + static void fastcall noinline __sched __mutex_lock_slowpath(atomic_t *lock_count) { @@ -303,6 +325,14 @@ __mutex_lock_slowpath(atomic_t *lock_count) } static int fastcall noinline __sched +__mutex_lock_killable_slowpath(atomic_t *lock_count) +{ + struct mutex *lock = container_of(lock_count, struct mutex, count); + + return __mutex_lock_common(lock, TASK_KILLABLE, 0, _RET_IP_); +} + +static noinline int fastcall __sched __mutex_lock_interruptible_slowpath(atomic_t *lock_count) { struct mutex *lock = container_of(lock_count, struct mutex, count); |