]> pilppa.org Git - linux-2.6-omap-h63xx.git/blobdiff - include/asm-x86/mutex_32.h
Merge branch 'for-2.6.28' of git://git.kernel.dk/linux-2.6-block
[linux-2.6-omap-h63xx.git] / include / asm-x86 / mutex_32.h
index bbeefb96ddfd6062657334b15915598ed4b26595..25c16d8ba3c753995394e204258583a27eebbdf4 100644 (file)
@@ -6,10 +6,10 @@
  *
  *  Copyright (C) 2004, 2005, 2006 Red Hat, Inc., Ingo Molnar <mingo@redhat.com>
  */
-#ifndef _ASM_MUTEX_H
-#define _ASM_MUTEX_H
+#ifndef ASM_X86__MUTEX_32_H
+#define ASM_X86__MUTEX_32_H
 
-#include "asm/alternative.h"
+#include <asm/alternative.h>
 
 /**
  *  __mutex_fastpath_lock - try to take the lock by moving the count
  * wasn't 1 originally. This function MUST leave the value lower than 1
  * even when the "1" assertion wasn't true.
  */
-#define __mutex_fastpath_lock(count, fail_fn)                          \
-do {                                                                   \
-       unsigned int dummy;                                             \
-                                                                       \
-       typecheck(atomic_t *, count);                                   \
+#define __mutex_fastpath_lock(count, fail_fn)                  \
+do {                                                           \
+       unsigned int dummy;                                     \
+                                                               \
+       typecheck(atomic_t *, count);                           \
        typecheck_fn(void (*)(atomic_t *), fail_fn);            \
-                                                                       \
-       __asm__ __volatile__(                                           \
-               LOCK_PREFIX "   decl (%%eax)    \n"                     \
-                       "   jns 1f              \n"                     \
-                       "   call "#fail_fn"     \n"                     \
-                       "1:                     \n"                     \
-                                                                       \
-               :"=a" (dummy)                                           \
-               : "a" (count)                                           \
-               : "memory", "ecx", "edx");                              \
+                                                               \
+       asm volatile(LOCK_PREFIX "   decl (%%eax)\n"            \
+                    "   jns 1f \n"                             \
+                    "   call " #fail_fn "\n"                   \
+                    "1:\n"                                     \
+                    : "=a" (dummy)                             \
+                    : "a" (count)                              \
+                    : "memory", "ecx", "edx");                 \
 } while (0)
 
 
@@ -50,8 +48,8 @@ do {                                                                  \
  * wasn't 1 originally. This function returns 0 if the fastpath succeeds,
  * or anything the slow path function returns
  */
-static inline int
-__mutex_fastpath_lock_retval(atomic_t *count, int (*fail_fn)(atomic_t *))
+static inline int __mutex_fastpath_lock_retval(atomic_t *count,
+                                              int (*fail_fn)(atomic_t *))
 {
        if (unlikely(atomic_dec_return(count) < 0))
                return fail_fn(count);
@@ -72,22 +70,20 @@ __mutex_fastpath_lock_retval(atomic_t *count, int (*fail_fn)(atomic_t *))
  * __mutex_slowpath_needs_to_unlock() macro needs to return 1, it needs
  * to return 0 otherwise.
  */
-#define __mutex_fastpath_unlock(count, fail_fn)                                \
-do {                                                                   \
-       unsigned int dummy;                                             \
-                                                                       \
-       typecheck(atomic_t *, count);                                   \
+#define __mutex_fastpath_unlock(count, fail_fn)                        \
+do {                                                           \
+       unsigned int dummy;                                     \
+                                                               \
+       typecheck(atomic_t *, count);                           \
        typecheck_fn(void (*)(atomic_t *), fail_fn);            \
-                                                                       \
-       __asm__ __volatile__(                                           \
-               LOCK_PREFIX "   incl (%%eax)    \n"                     \
-                       "   jg  1f              \n"                     \
-                       "   call "#fail_fn"     \n"                     \
-                       "1:                     \n"                     \
-                                                                       \
-               :"=a" (dummy)                                           \
-               : "a" (count)                                           \
-               : "memory", "ecx", "edx");                              \
+                                                               \
+       asm volatile(LOCK_PREFIX "   incl (%%eax)\n"            \
+                    "   jg     1f\n"                           \
+                    "   call " #fail_fn "\n"                   \
+                    "1:\n"                                     \
+                    : "=a" (dummy)                             \
+                    : "a" (count)                              \
+                    : "memory", "ecx", "edx");                 \
 } while (0)
 
 #define __mutex_slowpath_needs_to_unlock()     1
@@ -104,8 +100,8 @@ do {                                                                        \
  * Additionally, if the value was < 0 originally, this function must not leave
  * it to 0 on failure.
  */
-static inline int
-__mutex_fastpath_trylock(atomic_t *count, int (*fail_fn)(atomic_t *))
+static inline int __mutex_fastpath_trylock(atomic_t *count,
+                                          int (*fail_fn)(atomic_t *))
 {
        /*
         * We have two variants here. The cmpxchg based one is the best one
@@ -126,4 +122,4 @@ __mutex_fastpath_trylock(atomic_t *count, int (*fail_fn)(atomic_t *))
 #endif
 }
 
-#endif
+#endif /* ASM_X86__MUTEX_32_H */