]> pilppa.org Git - linux-2.6-omap-h63xx.git/blobdiff - include/asm-i386/uaccess.h
sky2: ifdown kills irq mask
[linux-2.6-omap-h63xx.git] / include / asm-i386 / uaccess.h
index 89ab7e2bc5aaac8a1e230fbd7772ff5340e7b3a1..371457b1ceb6de2b47e8fcf7d125f8e57488cfb5 100644 (file)
@@ -197,13 +197,15 @@ extern void __put_user_8(void);
 
 #define put_user(x,ptr)                                                \
 ({     int __ret_pu;                                           \
+       __typeof__(*(ptr)) __pu_val;                            \
        __chk_user_ptr(ptr);                                    \
+       __pu_val = x;                                           \
        switch(sizeof(*(ptr))) {                                \
-       case 1: __put_user_1(x, ptr); break;                    \
-       case 2: __put_user_2(x, ptr); break;                    \
-       case 4: __put_user_4(x, ptr); break;                    \
-       case 8: __put_user_8(x, ptr); break;                    \
-       default:__put_user_X(x, ptr); break;                    \
+       case 1: __put_user_1(__pu_val, ptr); break;             \
+       case 2: __put_user_2(__pu_val, ptr); break;             \
+       case 4: __put_user_4(__pu_val, ptr); break;             \
+       case 8: __put_user_8(__pu_val, ptr); break;             \
+       default:__put_user_X(__pu_val, ptr); break;             \
        }                                                       \
        __ret_pu;                                               \
 })
@@ -411,7 +413,7 @@ unsigned long __must_check __copy_from_user_ll(void *to,
  * Returns number of bytes that could not be copied.
  * On success, this will be zero.
  */
-static inline unsigned long __must_check
+static __always_inline unsigned long __must_check
 __copy_to_user_inatomic(void __user *to, const void *from, unsigned long n)
 {
        if (__builtin_constant_p(n)) {
@@ -432,7 +434,7 @@ __copy_to_user_inatomic(void __user *to, const void *from, unsigned long n)
        return __copy_to_user_ll(to, from, n);
 }
 
-static inline unsigned long __must_check
+static __always_inline unsigned long __must_check
 __copy_to_user(void __user *to, const void *from, unsigned long n)
 {
        might_sleep();
@@ -456,7 +458,7 @@ __copy_to_user(void __user *to, const void *from, unsigned long n)
  * If some data could not be copied, this function will pad the copied
  * data to the requested size using zero bytes.
  */
-static inline unsigned long
+static __always_inline unsigned long
 __copy_from_user_inatomic(void *to, const void __user *from, unsigned long n)
 {
        if (__builtin_constant_p(n)) {
@@ -477,7 +479,7 @@ __copy_from_user_inatomic(void *to, const void __user *from, unsigned long n)
        return __copy_from_user_ll(to, from, n);
 }
 
-static inline unsigned long
+static __always_inline unsigned long
 __copy_from_user(void *to, const void __user *from, unsigned long n)
 {
        might_sleep();