2 * RT-Mutexes: simple blocking mutual exclusion locks with PI support
4 * started by Ingo Molnar and Thomas Gleixner.
6 * Copyright (C) 2004-2006 Red Hat, Inc., Ingo Molnar <mingo@redhat.com>
7 * Copyright (C) 2005-2006 Timesys Corp., Thomas Gleixner <tglx@timesys.com>
8 * Copyright (C) 2005 Kihon Technologies Inc., Steven Rostedt
9 * Copyright (C) 2006 Esben Nielsen
11 #include <linux/spinlock.h>
12 #include <linux/module.h>
13 #include <linux/sched.h>
14 #include <linux/timer.h>
16 #include "rtmutex_common.h"
18 #ifdef CONFIG_DEBUG_RT_MUTEXES
19 # include "rtmutex-debug.h"
25 * lock->owner state tracking:
27 * lock->owner holds the task_struct pointer of the owner. Bit 0 and 1
28 * are used to keep track of the "owner is pending" and "lock has
32 * NULL 0 0 lock is free (fast acquire possible)
33 * NULL 0 1 invalid state
34 * NULL 1 0 Transitional State*
35 * NULL 1 1 invalid state
36 * taskpointer 0 0 lock is held (fast release possible)
37 * taskpointer 0 1 task is pending owner
38 * taskpointer 1 0 lock is held and has waiters
39 * taskpointer 1 1 task is pending owner and lock has more waiters
41 * Pending ownership is assigned to the top (highest priority)
42 * waiter of the lock, when the lock is released. The thread is woken
43 * up and can now take the lock. Until the lock is taken (bit 0
44 * cleared) a competing higher priority thread can steal the lock
45 * which puts the woken up thread back on the waiters list.
47 * The fast atomic compare exchange based acquire and release is only
48 * possible when bit 0 and 1 of lock->owner are 0.
50 * (*) There's a small time where the owner can be NULL and the
51 * "lock has waiters" bit is set. This can happen when grabbing the lock.
52 * To prevent a cmpxchg of the owner releasing the lock, we need to set this
53 * bit before looking at the lock, hence the reason this is a transitional
58 rt_mutex_set_owner(struct rt_mutex *lock, struct task_struct *owner,
61 unsigned long val = (unsigned long)owner | mask;
63 if (rt_mutex_has_waiters(lock))
64 val |= RT_MUTEX_HAS_WAITERS;
66 lock->owner = (struct task_struct *)val;
69 static inline void clear_rt_mutex_waiters(struct rt_mutex *lock)
71 lock->owner = (struct task_struct *)
72 ((unsigned long)lock->owner & ~RT_MUTEX_HAS_WAITERS);
75 static void fixup_rt_mutex_waiters(struct rt_mutex *lock)
77 if (!rt_mutex_has_waiters(lock))
78 clear_rt_mutex_waiters(lock);
82 * We can speed up the acquire/release, if the architecture
83 * supports cmpxchg and if there's no debugging state to be set up
85 #if defined(__HAVE_ARCH_CMPXCHG) && !defined(CONFIG_DEBUG_RT_MUTEXES)
86 # define rt_mutex_cmpxchg(l,c,n) (cmpxchg(&l->owner, c, n) == c)
87 static inline void mark_rt_mutex_waiters(struct rt_mutex *lock)
89 unsigned long owner, *p = (unsigned long *) &lock->owner;
93 } while (cmpxchg(p, owner, owner | RT_MUTEX_HAS_WAITERS) != owner);
96 # define rt_mutex_cmpxchg(l,c,n) (0)
97 static inline void mark_rt_mutex_waiters(struct rt_mutex *lock)
99 lock->owner = (struct task_struct *)
100 ((unsigned long)lock->owner | RT_MUTEX_HAS_WAITERS);
105 * Calculate task priority from the waiter list priority
107 * Return task->normal_prio when the waiter list is empty or when
108 * the waiter is not allowed to do priority boosting
110 int rt_mutex_getprio(struct task_struct *task)
112 if (likely(!task_has_pi_waiters(task)))
113 return task->normal_prio;
115 return min(task_top_pi_waiter(task)->pi_list_entry.prio,
120 * Adjust the priority of a task, after its pi_waiters got modified.
122 * This can be both boosting and unboosting. task->pi_lock must be held.
124 static void __rt_mutex_adjust_prio(struct task_struct *task)
126 int prio = rt_mutex_getprio(task);
128 if (task->prio != prio)
129 rt_mutex_setprio(task, prio);
133 * Adjust task priority (undo boosting). Called from the exit path of
134 * rt_mutex_slowunlock() and rt_mutex_slowlock().
136 * (Note: We do this outside of the protection of lock->wait_lock to
137 * allow the lock to be taken while or before we readjust the priority
138 * of task. We do not use the spin_xx_mutex() variants here as we are
139 * outside of the debug path.)
141 static void rt_mutex_adjust_prio(struct task_struct *task)
145 spin_lock_irqsave(&task->pi_lock, flags);
146 __rt_mutex_adjust_prio(task);
147 spin_unlock_irqrestore(&task->pi_lock, flags);
151 * Max number of times we'll walk the boosting chain:
153 int max_lock_depth = 1024;
156 * Adjust the priority chain. Also used for deadlock detection.
157 * Decreases task's usage by one - may thus free the task.
158 * Returns 0 or -EDEADLK.
160 static int rt_mutex_adjust_prio_chain(task_t *task,
162 struct rt_mutex *orig_lock,
163 struct rt_mutex_waiter *orig_waiter,
164 struct task_struct *top_task)
166 struct rt_mutex *lock;
167 struct rt_mutex_waiter *waiter, *top_waiter = orig_waiter;
168 int detect_deadlock, ret = 0, depth = 0;
171 detect_deadlock = debug_rt_mutex_detect_deadlock(orig_waiter,
175 * The (de)boosting is a step by step approach with a lot of
176 * pitfalls. We want this to be preemptible and we want hold a
177 * maximum of two locks per step. So we have to check
178 * carefully whether things change under us.
181 if (++depth > max_lock_depth) {
185 * Print this only once. If the admin changes the limit,
186 * print a new message when reaching the limit again.
188 if (prev_max != max_lock_depth) {
189 prev_max = max_lock_depth;
190 printk(KERN_WARNING "Maximum lock depth %d reached "
191 "task: %s (%d)\n", max_lock_depth,
192 top_task->comm, top_task->pid);
194 put_task_struct(task);
196 return deadlock_detect ? -EDEADLK : 0;
200 * Task can not go away as we did a get_task() before !
202 spin_lock_irqsave(&task->pi_lock, flags);
204 waiter = task->pi_blocked_on;
206 * Check whether the end of the boosting chain has been
207 * reached or the state of the chain has changed while we
210 if (!waiter || !waiter->task)
213 if (top_waiter && (!task_has_pi_waiters(task) ||
214 top_waiter != task_top_pi_waiter(task)))
218 * When deadlock detection is off then we check, if further
219 * priority adjustment is necessary.
221 if (!detect_deadlock && waiter->list_entry.prio == task->prio)
225 if (!spin_trylock(&lock->wait_lock)) {
226 spin_unlock_irqrestore(&task->pi_lock, flags);
231 /* Deadlock detection */
232 if (lock == orig_lock || rt_mutex_owner(lock) == top_task) {
233 debug_rt_mutex_deadlock(deadlock_detect, orig_waiter, lock);
234 spin_unlock(&lock->wait_lock);
235 ret = deadlock_detect ? -EDEADLK : 0;
239 top_waiter = rt_mutex_top_waiter(lock);
241 /* Requeue the waiter */
242 plist_del(&waiter->list_entry, &lock->wait_list);
243 waiter->list_entry.prio = task->prio;
244 plist_add(&waiter->list_entry, &lock->wait_list);
246 /* Release the task */
247 spin_unlock_irqrestore(&task->pi_lock, flags);
248 put_task_struct(task);
250 /* Grab the next task */
251 task = rt_mutex_owner(lock);
252 spin_lock_irqsave(&task->pi_lock, flags);
254 if (waiter == rt_mutex_top_waiter(lock)) {
255 /* Boost the owner */
256 plist_del(&top_waiter->pi_list_entry, &task->pi_waiters);
257 waiter->pi_list_entry.prio = waiter->list_entry.prio;
258 plist_add(&waiter->pi_list_entry, &task->pi_waiters);
259 __rt_mutex_adjust_prio(task);
261 } else if (top_waiter == waiter) {
262 /* Deboost the owner */
263 plist_del(&waiter->pi_list_entry, &task->pi_waiters);
264 waiter = rt_mutex_top_waiter(lock);
265 waiter->pi_list_entry.prio = waiter->list_entry.prio;
266 plist_add(&waiter->pi_list_entry, &task->pi_waiters);
267 __rt_mutex_adjust_prio(task);
270 get_task_struct(task);
271 spin_unlock_irqrestore(&task->pi_lock, flags);
273 top_waiter = rt_mutex_top_waiter(lock);
274 spin_unlock(&lock->wait_lock);
276 if (!detect_deadlock && waiter != top_waiter)
282 spin_unlock_irqrestore(&task->pi_lock, flags);
284 put_task_struct(task);
289 * Optimization: check if we can steal the lock from the
290 * assigned pending owner [which might not have taken the
293 static inline int try_to_steal_lock(struct rt_mutex *lock)
295 struct task_struct *pendowner = rt_mutex_owner(lock);
296 struct rt_mutex_waiter *next;
299 if (!rt_mutex_owner_pending(lock))
302 if (pendowner == current)
305 spin_lock_irqsave(&pendowner->pi_lock, flags);
306 if (current->prio >= pendowner->prio) {
307 spin_unlock_irqrestore(&pendowner->pi_lock, flags);
312 * Check if a waiter is enqueued on the pending owners
313 * pi_waiters list. Remove it and readjust pending owners
316 if (likely(!rt_mutex_has_waiters(lock))) {
317 spin_unlock_irqrestore(&pendowner->pi_lock, flags);
321 /* No chain handling, pending owner is not blocked on anything: */
322 next = rt_mutex_top_waiter(lock);
323 plist_del(&next->pi_list_entry, &pendowner->pi_waiters);
324 __rt_mutex_adjust_prio(pendowner);
325 spin_unlock_irqrestore(&pendowner->pi_lock, flags);
328 * We are going to steal the lock and a waiter was
329 * enqueued on the pending owners pi_waiters queue. So
330 * we have to enqueue this waiter into
331 * current->pi_waiters list. This covers the case,
332 * where current is boosted because it holds another
333 * lock and gets unboosted because the booster is
334 * interrupted, so we would delay a waiter with higher
335 * priority as current->normal_prio.
337 * Note: in the rare case of a SCHED_OTHER task changing
338 * its priority and thus stealing the lock, next->task
341 if (likely(next->task != current)) {
342 spin_lock_irqsave(¤t->pi_lock, flags);
343 plist_add(&next->pi_list_entry, ¤t->pi_waiters);
344 __rt_mutex_adjust_prio(current);
345 spin_unlock_irqrestore(¤t->pi_lock, flags);
351 * Try to take an rt-mutex
354 * - when the lock has a real owner
355 * - when a different pending owner exists and has higher priority than current
357 * Must be called with lock->wait_lock held.
359 static int try_to_take_rt_mutex(struct rt_mutex *lock)
362 * We have to be careful here if the atomic speedups are
363 * enabled, such that, when
364 * - no other waiter is on the lock
365 * - the lock has been released since we did the cmpxchg
366 * the lock can be released or taken while we are doing the
367 * checks and marking the lock with RT_MUTEX_HAS_WAITERS.
369 * The atomic acquire/release aware variant of
370 * mark_rt_mutex_waiters uses a cmpxchg loop. After setting
371 * the WAITERS bit, the atomic release / acquire can not
372 * happen anymore and lock->wait_lock protects us from the
375 * Note, that this might set lock->owner =
376 * RT_MUTEX_HAS_WAITERS in the case the lock is not contended
377 * any more. This is fixed up when we take the ownership.
378 * This is the transitional state explained at the top of this file.
380 mark_rt_mutex_waiters(lock);
382 if (rt_mutex_owner(lock) && !try_to_steal_lock(lock))
385 /* We got the lock. */
386 debug_rt_mutex_lock(lock);
388 rt_mutex_set_owner(lock, current, 0);
390 rt_mutex_deadlock_account_lock(lock, current);
396 * Task blocks on lock.
398 * Prepare waiter and propagate pi chain
400 * This must be called with lock->wait_lock held.
402 static int task_blocks_on_rt_mutex(struct rt_mutex *lock,
403 struct rt_mutex_waiter *waiter,
406 struct rt_mutex_waiter *top_waiter = waiter;
407 task_t *owner = rt_mutex_owner(lock);
411 spin_lock_irqsave(¤t->pi_lock, flags);
412 __rt_mutex_adjust_prio(current);
413 waiter->task = current;
415 plist_node_init(&waiter->list_entry, current->prio);
416 plist_node_init(&waiter->pi_list_entry, current->prio);
418 /* Get the top priority waiter on the lock */
419 if (rt_mutex_has_waiters(lock))
420 top_waiter = rt_mutex_top_waiter(lock);
421 plist_add(&waiter->list_entry, &lock->wait_list);
423 current->pi_blocked_on = waiter;
425 spin_unlock_irqrestore(¤t->pi_lock, flags);
427 if (waiter == rt_mutex_top_waiter(lock)) {
428 spin_lock_irqsave(&owner->pi_lock, flags);
429 plist_del(&top_waiter->pi_list_entry, &owner->pi_waiters);
430 plist_add(&waiter->pi_list_entry, &owner->pi_waiters);
432 __rt_mutex_adjust_prio(owner);
433 if (owner->pi_blocked_on) {
435 /* gets dropped in rt_mutex_adjust_prio_chain()! */
436 get_task_struct(owner);
438 spin_unlock_irqrestore(&owner->pi_lock, flags);
440 else if (debug_rt_mutex_detect_deadlock(waiter, detect_deadlock)) {
441 spin_lock_irqsave(&owner->pi_lock, flags);
442 if (owner->pi_blocked_on) {
444 /* gets dropped in rt_mutex_adjust_prio_chain()! */
445 get_task_struct(owner);
447 spin_unlock_irqrestore(&owner->pi_lock, flags);
452 spin_unlock(&lock->wait_lock);
454 res = rt_mutex_adjust_prio_chain(owner, detect_deadlock, lock, waiter,
457 spin_lock(&lock->wait_lock);
463 * Wake up the next waiter on the lock.
465 * Remove the top waiter from the current tasks waiter list and from
466 * the lock waiter list. Set it as pending owner. Then wake it up.
468 * Called with lock->wait_lock held.
470 static void wakeup_next_waiter(struct rt_mutex *lock)
472 struct rt_mutex_waiter *waiter;
473 struct task_struct *pendowner;
476 spin_lock_irqsave(¤t->pi_lock, flags);
478 waiter = rt_mutex_top_waiter(lock);
479 plist_del(&waiter->list_entry, &lock->wait_list);
482 * Remove it from current->pi_waiters. We do not adjust a
483 * possible priority boost right now. We execute wakeup in the
484 * boosted mode and go back to normal after releasing
487 plist_del(&waiter->pi_list_entry, ¤t->pi_waiters);
488 pendowner = waiter->task;
491 rt_mutex_set_owner(lock, pendowner, RT_MUTEX_OWNER_PENDING);
493 spin_unlock_irqrestore(¤t->pi_lock, flags);
496 * Clear the pi_blocked_on variable and enqueue a possible
497 * waiter into the pi_waiters list of the pending owner. This
498 * prevents that in case the pending owner gets unboosted a
499 * waiter with higher priority than pending-owner->normal_prio
500 * is blocked on the unboosted (pending) owner.
502 spin_lock_irqsave(&pendowner->pi_lock, flags);
504 WARN_ON(!pendowner->pi_blocked_on);
505 WARN_ON(pendowner->pi_blocked_on != waiter);
506 WARN_ON(pendowner->pi_blocked_on->lock != lock);
508 pendowner->pi_blocked_on = NULL;
510 if (rt_mutex_has_waiters(lock)) {
511 struct rt_mutex_waiter *next;
513 next = rt_mutex_top_waiter(lock);
514 plist_add(&next->pi_list_entry, &pendowner->pi_waiters);
516 spin_unlock_irqrestore(&pendowner->pi_lock, flags);
518 wake_up_process(pendowner);
522 * Remove a waiter from a lock
524 * Must be called with lock->wait_lock held
526 static void remove_waiter(struct rt_mutex *lock,
527 struct rt_mutex_waiter *waiter)
529 int first = (waiter == rt_mutex_top_waiter(lock));
531 task_t *owner = rt_mutex_owner(lock);
534 spin_lock_irqsave(¤t->pi_lock, flags);
535 plist_del(&waiter->list_entry, &lock->wait_list);
537 current->pi_blocked_on = NULL;
538 spin_unlock_irqrestore(¤t->pi_lock, flags);
540 if (first && owner != current) {
542 spin_lock_irqsave(&owner->pi_lock, flags);
544 plist_del(&waiter->pi_list_entry, &owner->pi_waiters);
546 if (rt_mutex_has_waiters(lock)) {
547 struct rt_mutex_waiter *next;
549 next = rt_mutex_top_waiter(lock);
550 plist_add(&next->pi_list_entry, &owner->pi_waiters);
552 __rt_mutex_adjust_prio(owner);
554 if (owner->pi_blocked_on) {
556 /* gets dropped in rt_mutex_adjust_prio_chain()! */
557 get_task_struct(owner);
559 spin_unlock_irqrestore(&owner->pi_lock, flags);
562 WARN_ON(!plist_node_empty(&waiter->pi_list_entry));
567 spin_unlock(&lock->wait_lock);
569 rt_mutex_adjust_prio_chain(owner, 0, lock, NULL, current);
571 spin_lock(&lock->wait_lock);
575 * Recheck the pi chain, in case we got a priority setting
577 * Called from sched_setscheduler
579 void rt_mutex_adjust_pi(struct task_struct *task)
581 struct rt_mutex_waiter *waiter;
584 spin_lock_irqsave(&task->pi_lock, flags);
586 waiter = task->pi_blocked_on;
587 if (!waiter || waiter->list_entry.prio == task->prio) {
588 spin_unlock_irqrestore(&task->pi_lock, flags);
592 /* gets dropped in rt_mutex_adjust_prio_chain()! */
593 get_task_struct(task);
594 spin_unlock_irqrestore(&task->pi_lock, flags);
596 rt_mutex_adjust_prio_chain(task, 0, NULL, NULL, task);
600 * Slow path lock function:
603 rt_mutex_slowlock(struct rt_mutex *lock, int state,
604 struct hrtimer_sleeper *timeout,
607 struct rt_mutex_waiter waiter;
610 debug_rt_mutex_init_waiter(&waiter);
613 spin_lock(&lock->wait_lock);
615 /* Try to acquire the lock again: */
616 if (try_to_take_rt_mutex(lock)) {
617 spin_unlock(&lock->wait_lock);
621 set_current_state(state);
623 /* Setup the timer, when timeout != NULL */
624 if (unlikely(timeout))
625 hrtimer_start(&timeout->timer, timeout->timer.expires,
629 /* Try to acquire the lock: */
630 if (try_to_take_rt_mutex(lock))
634 * TASK_INTERRUPTIBLE checks for signals and
635 * timeout. Ignored otherwise.
637 if (unlikely(state == TASK_INTERRUPTIBLE)) {
638 /* Signal pending? */
639 if (signal_pending(current))
641 if (timeout && !timeout->task)
648 * waiter.task is NULL the first time we come here and
649 * when we have been woken up by the previous owner
650 * but the lock got stolen by a higher prio task.
653 ret = task_blocks_on_rt_mutex(lock, &waiter,
656 * If we got woken up by the owner then start loop
657 * all over without going into schedule to try
658 * to get the lock now:
660 if (unlikely(!waiter.task))
667 spin_unlock(&lock->wait_lock);
669 debug_rt_mutex_print_deadlock(&waiter);
672 schedule_rt_mutex(lock);
674 spin_lock(&lock->wait_lock);
675 set_current_state(state);
678 set_current_state(TASK_RUNNING);
680 if (unlikely(waiter.task))
681 remove_waiter(lock, &waiter);
684 * try_to_take_rt_mutex() sets the waiter bit
685 * unconditionally. We might have to fix that up.
687 fixup_rt_mutex_waiters(lock);
689 spin_unlock(&lock->wait_lock);
691 /* Remove pending timer: */
692 if (unlikely(timeout))
693 hrtimer_cancel(&timeout->timer);
696 * Readjust priority, when we did not get the lock. We might
697 * have been the pending owner and boosted. Since we did not
698 * take the lock, the PI boost has to go.
701 rt_mutex_adjust_prio(current);
703 debug_rt_mutex_free_waiter(&waiter);
709 * Slow path try-lock function:
712 rt_mutex_slowtrylock(struct rt_mutex *lock)
716 spin_lock(&lock->wait_lock);
718 if (likely(rt_mutex_owner(lock) != current)) {
720 ret = try_to_take_rt_mutex(lock);
722 * try_to_take_rt_mutex() sets the lock waiters
723 * bit unconditionally. Clean this up.
725 fixup_rt_mutex_waiters(lock);
728 spin_unlock(&lock->wait_lock);
734 * Slow path to release a rt-mutex:
737 rt_mutex_slowunlock(struct rt_mutex *lock)
739 spin_lock(&lock->wait_lock);
741 debug_rt_mutex_unlock(lock);
743 rt_mutex_deadlock_account_unlock(current);
745 if (!rt_mutex_has_waiters(lock)) {
747 spin_unlock(&lock->wait_lock);
751 wakeup_next_waiter(lock);
753 spin_unlock(&lock->wait_lock);
755 /* Undo pi boosting if necessary: */
756 rt_mutex_adjust_prio(current);
760 * debug aware fast / slowpath lock,trylock,unlock
762 * The atomic acquire/release ops are compiled away, when either the
763 * architecture does not support cmpxchg or when debugging is enabled.
766 rt_mutex_fastlock(struct rt_mutex *lock, int state,
768 int (*slowfn)(struct rt_mutex *lock, int state,
769 struct hrtimer_sleeper *timeout,
770 int detect_deadlock))
772 if (!detect_deadlock && likely(rt_mutex_cmpxchg(lock, NULL, current))) {
773 rt_mutex_deadlock_account_lock(lock, current);
776 return slowfn(lock, state, NULL, detect_deadlock);
780 rt_mutex_timed_fastlock(struct rt_mutex *lock, int state,
781 struct hrtimer_sleeper *timeout, int detect_deadlock,
782 int (*slowfn)(struct rt_mutex *lock, int state,
783 struct hrtimer_sleeper *timeout,
784 int detect_deadlock))
786 if (!detect_deadlock && likely(rt_mutex_cmpxchg(lock, NULL, current))) {
787 rt_mutex_deadlock_account_lock(lock, current);
790 return slowfn(lock, state, timeout, detect_deadlock);
794 rt_mutex_fasttrylock(struct rt_mutex *lock,
795 int (*slowfn)(struct rt_mutex *lock))
797 if (likely(rt_mutex_cmpxchg(lock, NULL, current))) {
798 rt_mutex_deadlock_account_lock(lock, current);
805 rt_mutex_fastunlock(struct rt_mutex *lock,
806 void (*slowfn)(struct rt_mutex *lock))
808 if (likely(rt_mutex_cmpxchg(lock, current, NULL)))
809 rt_mutex_deadlock_account_unlock(current);
815 * rt_mutex_lock - lock a rt_mutex
817 * @lock: the rt_mutex to be locked
819 void __sched rt_mutex_lock(struct rt_mutex *lock)
823 rt_mutex_fastlock(lock, TASK_UNINTERRUPTIBLE, 0, rt_mutex_slowlock);
825 EXPORT_SYMBOL_GPL(rt_mutex_lock);
828 * rt_mutex_lock_interruptible - lock a rt_mutex interruptible
830 * @lock: the rt_mutex to be locked
831 * @detect_deadlock: deadlock detection on/off
835 * -EINTR when interrupted by a signal
836 * -EDEADLK when the lock would deadlock (when deadlock detection is on)
838 int __sched rt_mutex_lock_interruptible(struct rt_mutex *lock,
843 return rt_mutex_fastlock(lock, TASK_INTERRUPTIBLE,
844 detect_deadlock, rt_mutex_slowlock);
846 EXPORT_SYMBOL_GPL(rt_mutex_lock_interruptible);
849 * rt_mutex_lock_interruptible_ktime - lock a rt_mutex interruptible
850 * the timeout structure is provided
853 * @lock: the rt_mutex to be locked
854 * @timeout: timeout structure or NULL (no timeout)
855 * @detect_deadlock: deadlock detection on/off
859 * -EINTR when interrupted by a signal
860 * -ETIMEOUT when the timeout expired
861 * -EDEADLK when the lock would deadlock (when deadlock detection is on)
864 rt_mutex_timed_lock(struct rt_mutex *lock, struct hrtimer_sleeper *timeout,
869 return rt_mutex_timed_fastlock(lock, TASK_INTERRUPTIBLE, timeout,
870 detect_deadlock, rt_mutex_slowlock);
872 EXPORT_SYMBOL_GPL(rt_mutex_timed_lock);
875 * rt_mutex_trylock - try to lock a rt_mutex
877 * @lock: the rt_mutex to be locked
879 * Returns 1 on success and 0 on contention
881 int __sched rt_mutex_trylock(struct rt_mutex *lock)
883 return rt_mutex_fasttrylock(lock, rt_mutex_slowtrylock);
885 EXPORT_SYMBOL_GPL(rt_mutex_trylock);
888 * rt_mutex_unlock - unlock a rt_mutex
890 * @lock: the rt_mutex to be unlocked
892 void __sched rt_mutex_unlock(struct rt_mutex *lock)
894 rt_mutex_fastunlock(lock, rt_mutex_slowunlock);
896 EXPORT_SYMBOL_GPL(rt_mutex_unlock);
899 * rt_mutex_destroy - mark a mutex unusable
900 * @lock: the mutex to be destroyed
902 * This function marks the mutex uninitialized, and any subsequent
903 * use of the mutex is forbidden. The mutex must not be locked when
904 * this function is called.
906 void rt_mutex_destroy(struct rt_mutex *lock)
908 WARN_ON(rt_mutex_is_locked(lock));
909 #ifdef CONFIG_DEBUG_RT_MUTEXES
914 EXPORT_SYMBOL_GPL(rt_mutex_destroy);
917 * __rt_mutex_init - initialize the rt lock
919 * @lock: the rt lock to be initialized
921 * Initialize the rt lock to unlocked state.
923 * Initializing of a locked rt lock is not allowed
925 void __rt_mutex_init(struct rt_mutex *lock, const char *name)
928 spin_lock_init(&lock->wait_lock);
929 plist_head_init(&lock->wait_list, &lock->wait_lock);
931 debug_rt_mutex_init(lock, name);
933 EXPORT_SYMBOL_GPL(__rt_mutex_init);
936 * rt_mutex_init_proxy_locked - initialize and lock a rt_mutex on behalf of a
939 * @lock: the rt_mutex to be locked
940 * @proxy_owner:the task to set as owner
942 * No locking. Caller has to do serializing itself
943 * Special API call for PI-futex support
945 void rt_mutex_init_proxy_locked(struct rt_mutex *lock,
946 struct task_struct *proxy_owner)
948 __rt_mutex_init(lock, NULL);
949 debug_rt_mutex_proxy_lock(lock, proxy_owner);
950 rt_mutex_set_owner(lock, proxy_owner, 0);
951 rt_mutex_deadlock_account_lock(lock, proxy_owner);
955 * rt_mutex_proxy_unlock - release a lock on behalf of owner
957 * @lock: the rt_mutex to be locked
959 * No locking. Caller has to do serializing itself
960 * Special API call for PI-futex support
962 void rt_mutex_proxy_unlock(struct rt_mutex *lock,
963 struct task_struct *proxy_owner)
965 debug_rt_mutex_proxy_unlock(lock);
966 rt_mutex_set_owner(lock, NULL, 0);
967 rt_mutex_deadlock_account_unlock(proxy_owner);
971 * rt_mutex_next_owner - return the next owner of the lock
973 * @lock: the rt lock query
975 * Returns the next owner of the lock or NULL
977 * Caller has to serialize against other accessors to the lock
980 * Special API call for PI-futex support
982 struct task_struct *rt_mutex_next_owner(struct rt_mutex *lock)
984 if (!rt_mutex_has_waiters(lock))
987 return rt_mutex_top_waiter(lock)->task;