]> pilppa.org Git - linux-2.6-omap-h63xx.git/blobdiff - include/asm-parisc/uaccess.h
[SCSI] mptfusion: Increase scsi-timeouts, similariy to the LSI 4.x driver.
[linux-2.6-omap-h63xx.git] / include / asm-parisc / uaccess.h
index 8a08423b757062b273077365196bf7c5b6be0370..4878b9501f245ec77bb826a800d2878e34c7cc0e 100644 (file)
@@ -4,7 +4,6 @@
 /*
  * User space memory access functions
  */
-#include <linux/sched.h>
 #include <asm/page.h>
 #include <asm/system.h>
 #include <asm/cache.h>
@@ -24,7 +23,7 @@
 
 /*
  * Note that since kernel addresses are in a separate address space on
- * parisc, we don't need to do anything for access_ok() or verify_area().
+ * parisc, we don't need to do anything for access_ok().
  * We just let the page fault handler do the right thing. This also means
  * that put_user is the same as __put_user, etc.
  */
@@ -40,23 +39,21 @@ static inline long access_ok(int type, const void __user * addr,
        return 1;
 }
 
-#define verify_area(type,addr,size) (0)        /* FIXME: all users should go away soon,
-                                         * and use access_ok instead, then this
-                                         * should be removed. */
-
 #define put_user __put_user
 #define get_user __get_user
 
-#if BITS_PER_LONG == 32
+#if !defined(CONFIG_64BIT)
 #define LDD_KERNEL(ptr)                __get_kernel_bad();
 #define LDD_USER(ptr)          __get_user_bad();
 #define STD_KERNEL(x, ptr)     __put_kernel_asm64(x,ptr)
 #define STD_USER(x, ptr)       __put_user_asm64(x,ptr)
+#define ASM_WORD_INSN          ".word\t"
 #else
-#define LDD_KERNEL(ptr) __get_kernel_asm("ldd",ptr)
-#define LDD_USER(ptr) __get_user_asm("ldd",ptr)
-#define STD_KERNEL(x, ptr) __put_kernel_asm("std",x,ptr)
-#define STD_USER(x, ptr) __put_user_asm("std",x,ptr)
+#define LDD_KERNEL(ptr)                __get_kernel_asm("ldd",ptr)
+#define LDD_USER(ptr)          __get_user_asm("ldd",ptr)
+#define STD_KERNEL(x, ptr)     __put_kernel_asm("std",x,ptr)
+#define STD_USER(x, ptr)       __put_user_asm("std",x,ptr)
+#define ASM_WORD_INSN          ".dword\t"
 #endif
 
 /*
@@ -70,6 +67,11 @@ struct exception_table_entry {
        long fixup;          /* fixup routine */
 };
 
+#define ASM_EXCEPTIONTABLE_ENTRY( fault_addr, except_addr )\
+       ".section __ex_table,\"aw\"\n"                     \
+       ASM_WORD_INSN #fault_addr ", " #except_addr "\n\t" \
+       ".previous\n"
+
 /*
  * The page fault handler stores, in a per-cpu area, the following information
  * if a fixup routine is available.
@@ -108,44 +110,20 @@ struct exception_data {
        __gu_err;                                       \
 })
 
-#ifdef __LP64__
 #define __get_kernel_asm(ldx,ptr)                       \
-       __asm__("\n1:\t" ldx "\t0(%2),%0\n"             \
-               "\t.section __ex_table,\"aw\"\n"        \
-               "\t.dword\t1b,fixup_get_user_skip_1\n"  \
-               "\t.previous"                           \
+       __asm__("\n1:\t" ldx "\t0(%2),%0\n\t"           \
+               ASM_EXCEPTIONTABLE_ENTRY(1b, fixup_get_user_skip_1)\
                : "=r"(__gu_val), "=r"(__gu_err)        \
                : "r"(ptr), "1"(__gu_err)               \
                : "r1");
 
 #define __get_user_asm(ldx,ptr)                         \
-       __asm__("\n1:\t" ldx "\t0(%%sr3,%2),%0\n"       \
-               "\t.section __ex_table,\"aw\"\n"        \
-               "\t.dword\t1b,fixup_get_user_skip_1\n"  \
-               "\t.previous"                           \
-               : "=r"(__gu_val), "=r"(__gu_err)        \
-               : "r"(ptr), "1"(__gu_err)               \
-               : "r1");
-#else
-#define __get_kernel_asm(ldx,ptr)                       \
-       __asm__("\n1:\t" ldx "\t0(%2),%0\n"             \
-               "\t.section __ex_table,\"aw\"\n"        \
-               "\t.word\t1b,fixup_get_user_skip_1\n"   \
-               "\t.previous"                           \
+       __asm__("\n1:\t" ldx "\t0(%%sr3,%2),%0\n\t"     \
+               ASM_EXCEPTIONTABLE_ENTRY(1b,fixup_get_user_skip_1)\
                : "=r"(__gu_val), "=r"(__gu_err)        \
                : "r"(ptr), "1"(__gu_err)               \
                : "r1");
 
-#define __get_user_asm(ldx,ptr)                         \
-       __asm__("\n1:\t" ldx "\t0(%%sr3,%2),%0\n"       \
-               "\t.section __ex_table,\"aw\"\n"        \
-                "\t.word\t1b,fixup_get_user_skip_1\n"  \
-                "\t.previous"                          \
-               : "=r"(__gu_val), "=r"(__gu_err)        \
-               : "r"(ptr), "1"(__gu_err)               \
-               : "r1");
-#endif /* !__LP64__ */
-
 #define __put_user(x,ptr)                                       \
 ({                                                             \
        register long __pu_err __asm__ ("r8") = 0;              \
@@ -176,82 +154,61 @@ struct exception_data {
 /*
  * The "__put_user/kernel_asm()" macros tell gcc they read from memory
  * instead of writing. This is because they do not write to any memory
- * gcc knows about, so there are no aliasing issues.
+ * gcc knows about, so there are no aliasing issues. These macros must
+ * also be aware that "fixup_put_user_skip_[12]" are executed in the
+ * context of the fault, and any registers used there must be listed
+ * as clobbers. In this case only "r1" is used by the current routines.
+ * r8/r9 are already listed as err/val.
  */
 
-#ifdef __LP64__
 #define __put_kernel_asm(stx,x,ptr)                         \
        __asm__ __volatile__ (                              \
-               "\n1:\t" stx "\t%2,0(%1)\n"                 \
-               "\t.section __ex_table,\"aw\"\n"            \
-               "\t.dword\t1b,fixup_put_user_skip_1\n"      \
-               "\t.previous"                               \
-               : "=r"(__pu_err)                            \
-               : "r"(ptr), "r"(x), "0"(__pu_err))
-
-#define __put_user_asm(stx,x,ptr)                           \
-       __asm__ __volatile__ (                              \
-               "\n1:\t" stx "\t%2,0(%%sr3,%1)\n"           \
-               "\t.section __ex_table,\"aw\"\n"            \
-                "\t.dword\t1b,fixup_put_user_skip_1\n"     \
-                "\t.previous"                              \
-               : "=r"(__pu_err)                            \
-               : "r"(ptr), "r"(x), "0"(__pu_err)           \
-               : "r1")
-#else
-#define __put_kernel_asm(stx,x,ptr)                         \
-       __asm__ __volatile__ (                              \
-               "\n1:\t" stx "\t%2,0(%1)\n"                 \
-               "\t.section __ex_table,\"aw\"\n"            \
-                "\t.word\t1b,fixup_put_user_skip_1\n"      \
-                "\t.previous"                              \
+               "\n1:\t" stx "\t%2,0(%1)\n\t"               \
+               ASM_EXCEPTIONTABLE_ENTRY(1b,fixup_put_user_skip_1)\
                : "=r"(__pu_err)                            \
                : "r"(ptr), "r"(x), "0"(__pu_err)           \
-               : "r1")
+               : "r1")
 
 #define __put_user_asm(stx,x,ptr)                           \
        __asm__ __volatile__ (                              \
-               "\n1:\t" stx "\t%2,0(%%sr3,%1)\n"           \
-               "\t.section __ex_table,\"aw\"\n"            \
-                "\t.word\t1b,fixup_put_user_skip_1\n"      \
-                "\t.previous"                              \
+               "\n1:\t" stx "\t%2,0(%%sr3,%1)\n\t"         \
+               ASM_EXCEPTIONTABLE_ENTRY(1b,fixup_put_user_skip_1)\
                : "=r"(__pu_err)                            \
                : "r"(ptr), "r"(x), "0"(__pu_err)           \
                : "r1")
 
-#define __put_kernel_asm64(__val,ptr) do {                         \
-       u64 __val64 = (u64)(__val);                                 \
-       u32 hi = (__val64) >> 32;                                           \
-       u32 lo = (__val64) & 0xffffffff;                                    \
+
+#if !defined(CONFIG_64BIT)
+
+#define __put_kernel_asm64(__val,ptr) do {                 \
+       u64 __val64 = (u64)(__val);                         \
+       u32 hi = (__val64) >> 32;                           \
+       u32 lo = (__val64) & 0xffffffff;                    \
        __asm__ __volatile__ (                              \
-               "\n1:\tstw %2,0(%1)\n"                      \
-               "\n2:\tstw %3,4(%1)\n"                      \
-               "\t.section __ex_table,\"aw\"\n"            \
-                "\t.word\t1b,fixup_put_user_skip_2\n"      \
-                "\t.word\t2b,fixup_put_user_skip_1\n"      \
-                "\t.previous"                              \
+               "\n1:\tstw %2,0(%1)"                        \
+               "\n2:\tstw %3,4(%1)\n\t"                    \
+               ASM_EXCEPTIONTABLE_ENTRY(1b,fixup_put_user_skip_2)\
+               ASM_EXCEPTIONTABLE_ENTRY(2b,fixup_put_user_skip_1)\
                : "=r"(__pu_err)                            \
                : "r"(ptr), "r"(hi), "r"(lo), "0"(__pu_err) \
                : "r1");                                    \
 } while (0)
 
-#define __put_user_asm64(__val,ptr) do {                           \
-       u64 __val64 = (u64)__val;                                   \
-       u32 hi = (__val64) >> 32;                                           \
-       u32 lo = (__val64) & 0xffffffff;                                    \
+#define __put_user_asm64(__val,ptr) do {                   \
+       u64 __val64 = (u64)(__val);                         \
+       u32 hi = (__val64) >> 32;                           \
+       u32 lo = (__val64) & 0xffffffff;                    \
        __asm__ __volatile__ (                              \
-               "\n1:\tstw %2,0(%%sr3,%1)\n"                \
-               "\n2:\tstw %3,4(%%sr3,%1)\n"                \
-               "\t.section __ex_table,\"aw\"\n"            \
-                "\t.word\t1b,fixup_get_user_skip_2\n"      \
-                "\t.word\t2b,fixup_get_user_skip_1\n"      \
-                "\t.previous"                              \
+               "\n1:\tstw %2,0(%%sr3,%1)"                  \
+               "\n2:\tstw %3,4(%%sr3,%1)\n\t"              \
+               ASM_EXCEPTIONTABLE_ENTRY(1b,fixup_put_user_skip_2)\
+               ASM_EXCEPTIONTABLE_ENTRY(2b,fixup_put_user_skip_1)\
                : "=r"(__pu_err)                            \
                : "r"(ptr), "r"(hi), "r"(lo), "0"(__pu_err) \
                : "r1");                                    \
 } while (0)
 
-#endif /* !__LP64__ */
+#endif /* !defined(CONFIG_64BIT) */
 
 
 /*