X-Git-Url: http://pilppa.org/gitweb/gitweb.cgi?a=blobdiff_plain;f=kernel%2Fmutex.c;h=d046a345d365793e84076e2517ec02cdd11bc40d;hb=4a96db3c780f30432653f99cbff193ba3e474e0b;hp=d7fe50cc556fd79624196a175b74996d9b23cf41;hpb=e457f790d8b05977853aa238bbc667b3bb375671;p=linux-2.6-omap-h63xx.git diff --git a/kernel/mutex.c b/kernel/mutex.c index d7fe50cc556..d046a345d36 100644 --- a/kernel/mutex.c +++ b/kernel/mutex.c @@ -58,7 +58,7 @@ EXPORT_SYMBOL(__mutex_init); * We also put the fastpath first in the kernel image, to make sure the * branch is predicted by the CPU as default-untaken. */ -static void fastcall noinline __sched +static void noinline __sched __mutex_lock_slowpath(atomic_t *lock_count); /*** @@ -82,7 +82,7 @@ __mutex_lock_slowpath(atomic_t *lock_count); * * This function is similar to (but not equivalent to) down(). */ -void inline fastcall __sched mutex_lock(struct mutex *lock) +void inline __sched mutex_lock(struct mutex *lock) { might_sleep(); /* @@ -95,8 +95,7 @@ void inline fastcall __sched mutex_lock(struct mutex *lock) EXPORT_SYMBOL(mutex_lock); #endif -static void fastcall noinline __sched -__mutex_unlock_slowpath(atomic_t *lock_count); +static noinline void __sched __mutex_unlock_slowpath(atomic_t *lock_count); /*** * mutex_unlock - release the mutex @@ -109,7 +108,7 @@ __mutex_unlock_slowpath(atomic_t *lock_count); * * This function is similar to (but not equivalent to) up(). */ -void fastcall __sched mutex_unlock(struct mutex *lock) +void __sched mutex_unlock(struct mutex *lock) { /* * The unlocking fastpath is the 0->1 transition from 'locked' @@ -166,9 +165,12 @@ __mutex_lock_common(struct mutex *lock, long state, unsigned int subclass, * got a signal? (This code gets eliminated in the * TASK_UNINTERRUPTIBLE case.) */ - if (unlikely(state == TASK_INTERRUPTIBLE && - signal_pending(task))) { - mutex_remove_waiter(lock, &waiter, task_thread_info(task)); + if (unlikely((state == TASK_INTERRUPTIBLE && + signal_pending(task)) || + (state == TASK_KILLABLE && + fatal_signal_pending(task)))) { + mutex_remove_waiter(lock, &waiter, + task_thread_info(task)); mutex_release(&lock->dep_map, 1, ip); spin_unlock_mutex(&lock->wait_lock, flags); @@ -210,6 +212,14 @@ mutex_lock_nested(struct mutex *lock, unsigned int subclass) EXPORT_SYMBOL_GPL(mutex_lock_nested); +int __sched +mutex_lock_killable_nested(struct mutex *lock, unsigned int subclass) +{ + might_sleep(); + return __mutex_lock_common(lock, TASK_KILLABLE, subclass, _RET_IP_); +} +EXPORT_SYMBOL_GPL(mutex_lock_killable_nested); + int __sched mutex_lock_interruptible_nested(struct mutex *lock, unsigned int subclass) { @@ -223,7 +233,7 @@ EXPORT_SYMBOL_GPL(mutex_lock_interruptible_nested); /* * Release the lock, slowpath: */ -static fastcall inline void +static inline void __mutex_unlock_common_slowpath(atomic_t *lock_count, int nested) { struct mutex *lock = container_of(lock_count, struct mutex, count); @@ -260,7 +270,7 @@ __mutex_unlock_common_slowpath(atomic_t *lock_count, int nested) /* * Release the lock, slowpath: */ -static fastcall noinline void +static noinline void __mutex_unlock_slowpath(atomic_t *lock_count) { __mutex_unlock_common_slowpath(lock_count, 1); @@ -271,7 +281,10 @@ __mutex_unlock_slowpath(atomic_t *lock_count) * Here come the less common (and hence less performance-critical) APIs: * mutex_lock_interruptible() and mutex_trylock(). */ -static int fastcall noinline __sched +static noinline int __sched +__mutex_lock_killable_slowpath(atomic_t *lock_count); + +static noinline int __sched __mutex_lock_interruptible_slowpath(atomic_t *lock_count); /*** @@ -285,7 +298,7 @@ __mutex_lock_interruptible_slowpath(atomic_t *lock_count); * * This function is similar to (but not equivalent to) down_interruptible(). */ -int fastcall __sched mutex_lock_interruptible(struct mutex *lock) +int __sched mutex_lock_interruptible(struct mutex *lock) { might_sleep(); return __mutex_fastpath_lock_retval @@ -294,7 +307,15 @@ int fastcall __sched mutex_lock_interruptible(struct mutex *lock) EXPORT_SYMBOL(mutex_lock_interruptible); -static void fastcall noinline __sched +int __sched mutex_lock_killable(struct mutex *lock) +{ + might_sleep(); + return __mutex_fastpath_lock_retval + (&lock->count, __mutex_lock_killable_slowpath); +} +EXPORT_SYMBOL(mutex_lock_killable); + +static noinline void __sched __mutex_lock_slowpath(atomic_t *lock_count) { struct mutex *lock = container_of(lock_count, struct mutex, count); @@ -302,7 +323,15 @@ __mutex_lock_slowpath(atomic_t *lock_count) __mutex_lock_common(lock, TASK_UNINTERRUPTIBLE, 0, _RET_IP_); } -static int fastcall noinline __sched +static noinline int __sched +__mutex_lock_killable_slowpath(atomic_t *lock_count) +{ + struct mutex *lock = container_of(lock_count, struct mutex, count); + + return __mutex_lock_common(lock, TASK_KILLABLE, 0, _RET_IP_); +} + +static noinline int __sched __mutex_lock_interruptible_slowpath(atomic_t *lock_count) { struct mutex *lock = container_of(lock_count, struct mutex, count); @@ -351,7 +380,7 @@ static inline int __mutex_trylock_slowpath(atomic_t *lock_count) * This function must not be used in interrupt context. The * mutex must be released by the same task that acquired it. */ -int fastcall __sched mutex_trylock(struct mutex *lock) +int __sched mutex_trylock(struct mutex *lock) { return __mutex_fastpath_trylock(&lock->count, __mutex_trylock_slowpath);