*/
void fastcall __sched mutex_lock(struct mutex *lock)
{
+ might_sleep();
/*
* The locking fastpath is the 1->0 transition from
* 'unlocked' into 'locked' state.
- *
- * NOTE: if asm/mutex.h is included, then some architectures
- * rely on mutex_lock() having _no other code_ here but this
- * fastpath. That allows the assembly fastpath to do
- * tail-merging optimizations. (If you want to put testcode
- * here, do it under #ifndef CONFIG_MUTEX_DEBUG.)
*/
__mutex_fastpath_lock(&lock->count, __mutex_lock_slowpath);
}
/*
* The unlocking fastpath is the 0->1 transition from 'locked'
* into 'unlocked' state:
- *
- * NOTE: no other code must be here - see mutex_lock() .
*/
__mutex_fastpath_unlock(&lock->count, __mutex_unlock_slowpath);
}
static fastcall noinline void
__mutex_unlock_slowpath(atomic_t *lock_count __IP_DECL__)
{
- struct mutex *lock = container_of(lock_count, struct mutex, count);
+ struct mutex *lock = container_of(lock_count, struct mutex, count);
DEBUG_WARN_ON(lock->owner != current_thread_info());
*/
int fastcall __sched mutex_lock_interruptible(struct mutex *lock)
{
- /* NOTE: no other code must be here - see mutex_lock() */
+ might_sleep();
return __mutex_fastpath_lock_retval
(&lock->count, __mutex_lock_interruptible_slowpath);
}
}
EXPORT_SYMBOL(mutex_trylock);
-
-
-