]> pilppa.org Git - linux-2.6-omap-h63xx.git/blob - include/asm-x86/uaccess_64.h
x86: merge getuser.
[linux-2.6-omap-h63xx.git] / include / asm-x86 / uaccess_64.h
1 #ifndef __X86_64_UACCESS_H
2 #define __X86_64_UACCESS_H
3
4 /*
5  * User space memory access functions
6  */
7 #include <linux/compiler.h>
8 #include <linux/errno.h>
9 #include <linux/prefetch.h>
10 #include <asm/page.h>
11
12 #define __addr_ok(addr) (!((unsigned long)(addr) &                      \
13                            (current_thread_info()->addr_limit.seg)))
14
15 #define ARCH_HAS_SEARCH_EXTABLE
16
17 extern void __put_user_1(void);
18 extern void __put_user_2(void);
19 extern void __put_user_4(void);
20 extern void __put_user_8(void);
21 extern void __put_user_bad(void);
22
23 #define __put_user_x(size, ret, x, ptr)                                 \
24         asm volatile("call __put_user_" #size                           \
25                      :"=a" (ret)                                        \
26                      :"c" (ptr),"a" (x)                                 \
27                      :"ebx")
28
29 #define put_user(x, ptr)                                                \
30         __put_user_check((__typeof__(*(ptr)))(x), (ptr), sizeof(*(ptr)))
31
32 #define __get_user(x, ptr)                                              \
33         __get_user_nocheck((x), (ptr), sizeof(*(ptr)))
34 #define __put_user(x, ptr)                                              \
35         __put_user_nocheck((__typeof__(*(ptr)))(x), (ptr), sizeof(*(ptr)))
36
37 #define __get_user_unaligned __get_user
38 #define __put_user_unaligned __put_user
39
40 #define __put_user_nocheck(x, ptr, size)                \
41 ({                                                      \
42         int __pu_err;                                   \
43         __put_user_size((x), (ptr), (size), __pu_err);  \
44         __pu_err;                                       \
45 })
46
47
48 #define __put_user_check(x, ptr, size)                          \
49 ({                                                              \
50         int __pu_err;                                           \
51         typeof(*(ptr)) __user *__pu_addr = (ptr);               \
52         switch (size) {                                         \
53         case 1:                                                 \
54                 __put_user_x(1, __pu_err, x, __pu_addr);        \
55                 break;                                          \
56         case 2:                                                 \
57                 __put_user_x(2, __pu_err, x, __pu_addr);        \
58                 break;                                          \
59         case 4:                                                 \
60                 __put_user_x(4, __pu_err, x, __pu_addr);        \
61                 break;                                          \
62         case 8:                                                 \
63                 __put_user_x(8, __pu_err, x, __pu_addr);        \
64                 break;                                          \
65         default:                                                \
66                 __put_user_bad();                               \
67         }                                                       \
68         __pu_err;                                               \
69 })
70
71 #define __put_user_size(x, ptr, size, retval)                           \
72 do {                                                                    \
73         retval = 0;                                                     \
74         __chk_user_ptr(ptr);                                            \
75         switch (size) {                                                 \
76         case 1:                                                         \
77                 __put_user_asm(x, ptr, retval, "b", "b", "iq", -EFAULT);\
78                 break;                                                  \
79         case 2:                                                         \
80                 __put_user_asm(x, ptr, retval, "w", "w", "ir", -EFAULT);\
81                 break;                                                  \
82         case 4:                                                         \
83                 __put_user_asm(x, ptr, retval, "l", "k", "ir", -EFAULT);\
84                 break;                                                  \
85         case 8:                                                         \
86                 __put_user_asm(x, ptr, retval, "q", "", "Zr", -EFAULT); \
87                 break;                                                  \
88         default:                                                        \
89                 __put_user_bad();                                       \
90         }                                                               \
91 } while (0)
92
93 /* FIXME: this hack is definitely wrong -AK */
94 struct __large_struct { unsigned long buf[100]; };
95 #define __m(x) (*(struct __large_struct __user *)(x))
96
97 /*
98  * Tell gcc we read from memory instead of writing: this is because
99  * we do not write to any memory gcc knows about, so there are no
100  * aliasing issues.
101  */
102 #define __put_user_asm(x, addr, err, itype, rtype, ltype, errno)        \
103         asm volatile("1:        mov"itype" %"rtype"1,%2\n"              \
104                      "2:\n"                                             \
105                      ".section .fixup, \"ax\"\n"                        \
106                      "3:        mov %3,%0\n"                            \
107                      "  jmp 2b\n"                                       \
108                      ".previous\n"                                      \
109                      _ASM_EXTABLE(1b, 3b)                               \
110                      : "=r"(err)                                        \
111                      : ltype (x), "m" (__m(addr)), "i" (errno), "0" (err))
112
113
114 #define __get_user_nocheck(x, ptr, size)                        \
115 ({                                                              \
116         int __gu_err;                                           \
117         unsigned long __gu_val;                                 \
118         __get_user_size(__gu_val, (ptr), (size), __gu_err);     \
119         (x) = (__force typeof(*(ptr)))__gu_val;                 \
120         __gu_err;                                               \
121 })
122
123 #define __get_user_size(x, ptr, size, retval)                           \
124 do {                                                                    \
125         retval = 0;                                                     \
126         __chk_user_ptr(ptr);                                            \
127         switch (size) {                                                 \
128         case 1:                                                         \
129                 __get_user_asm(x, ptr, retval, "b", "b", "=q", -EFAULT);\
130                 break;                                                  \
131         case 2:                                                         \
132                 __get_user_asm(x, ptr, retval, "w", "w", "=r", -EFAULT);\
133                 break;                                                  \
134         case 4:                                                         \
135                 __get_user_asm(x, ptr, retval, "l", "k", "=r", -EFAULT);\
136                 break;                                                  \
137         case 8:                                                         \
138                 __get_user_asm(x, ptr, retval, "q", "", "=r", -EFAULT); \
139                 break;                                                  \
140         default:                                                        \
141                 (x) = __get_user_bad();                                 \
142         }                                                               \
143 } while (0)
144
145 #define __get_user_asm(x, addr, err, itype, rtype, ltype, errno)        \
146         asm volatile("1:        mov"itype" %2,%"rtype"1\n"              \
147                      "2:\n"                                             \
148                      ".section .fixup, \"ax\"\n"                        \
149                      "3:        mov %3,%0\n"                            \
150                      "  xor"itype" %"rtype"1,%"rtype"1\n"               \
151                      "  jmp 2b\n"                                       \
152                      ".previous\n"                                      \
153                      _ASM_EXTABLE(1b, 3b)                               \
154                      : "=r" (err), ltype (x)                            \
155                      : "m" (__m(addr)), "i"(errno), "0"(err))
156
157 /*
158  * Copy To/From Userspace
159  */
160
161 /* Handles exceptions in both to and from, but doesn't do access_ok */
162 __must_check unsigned long
163 copy_user_generic(void *to, const void *from, unsigned len);
164
165 __must_check unsigned long
166 copy_to_user(void __user *to, const void *from, unsigned len);
167 __must_check unsigned long
168 copy_from_user(void *to, const void __user *from, unsigned len);
169 __must_check unsigned long
170 copy_in_user(void __user *to, const void __user *from, unsigned len);
171
172 static __always_inline __must_check
173 int __copy_from_user(void *dst, const void __user *src, unsigned size)
174 {
175         int ret = 0;
176         if (!__builtin_constant_p(size))
177                 return copy_user_generic(dst, (__force void *)src, size);
178         switch (size) {
179         case 1:__get_user_asm(*(u8 *)dst, (u8 __user *)src,
180                               ret, "b", "b", "=q", 1);
181                 return ret;
182         case 2:__get_user_asm(*(u16 *)dst, (u16 __user *)src,
183                               ret, "w", "w", "=r", 2);
184                 return ret;
185         case 4:__get_user_asm(*(u32 *)dst, (u32 __user *)src,
186                               ret, "l", "k", "=r", 4);
187                 return ret;
188         case 8:__get_user_asm(*(u64 *)dst, (u64 __user *)src,
189                               ret, "q", "", "=r", 8);
190                 return ret;
191         case 10:
192                 __get_user_asm(*(u64 *)dst, (u64 __user *)src,
193                                ret, "q", "", "=r", 16);
194                 if (unlikely(ret))
195                         return ret;
196                 __get_user_asm(*(u16 *)(8 + (char *)dst),
197                                (u16 __user *)(8 + (char __user *)src),
198                                ret, "w", "w", "=r", 2);
199                 return ret;
200         case 16:
201                 __get_user_asm(*(u64 *)dst, (u64 __user *)src,
202                                ret, "q", "", "=r", 16);
203                 if (unlikely(ret))
204                         return ret;
205                 __get_user_asm(*(u64 *)(8 + (char *)dst),
206                                (u64 __user *)(8 + (char __user *)src),
207                                ret, "q", "", "=r", 8);
208                 return ret;
209         default:
210                 return copy_user_generic(dst, (__force void *)src, size);
211         }
212 }
213
214 static __always_inline __must_check
215 int __copy_to_user(void __user *dst, const void *src, unsigned size)
216 {
217         int ret = 0;
218         if (!__builtin_constant_p(size))
219                 return copy_user_generic((__force void *)dst, src, size);
220         switch (size) {
221         case 1:__put_user_asm(*(u8 *)src, (u8 __user *)dst,
222                               ret, "b", "b", "iq", 1);
223                 return ret;
224         case 2:__put_user_asm(*(u16 *)src, (u16 __user *)dst,
225                               ret, "w", "w", "ir", 2);
226                 return ret;
227         case 4:__put_user_asm(*(u32 *)src, (u32 __user *)dst,
228                               ret, "l", "k", "ir", 4);
229                 return ret;
230         case 8:__put_user_asm(*(u64 *)src, (u64 __user *)dst,
231                               ret, "q", "", "ir", 8);
232                 return ret;
233         case 10:
234                 __put_user_asm(*(u64 *)src, (u64 __user *)dst,
235                                ret, "q", "", "ir", 10);
236                 if (unlikely(ret))
237                         return ret;
238                 asm("":::"memory");
239                 __put_user_asm(4[(u16 *)src], 4 + (u16 __user *)dst,
240                                ret, "w", "w", "ir", 2);
241                 return ret;
242         case 16:
243                 __put_user_asm(*(u64 *)src, (u64 __user *)dst,
244                                ret, "q", "", "ir", 16);
245                 if (unlikely(ret))
246                         return ret;
247                 asm("":::"memory");
248                 __put_user_asm(1[(u64 *)src], 1 + (u64 __user *)dst,
249                                ret, "q", "", "ir", 8);
250                 return ret;
251         default:
252                 return copy_user_generic((__force void *)dst, src, size);
253         }
254 }
255
256 static __always_inline __must_check
257 int __copy_in_user(void __user *dst, const void __user *src, unsigned size)
258 {
259         int ret = 0;
260         if (!__builtin_constant_p(size))
261                 return copy_user_generic((__force void *)dst,
262                                          (__force void *)src, size);
263         switch (size) {
264         case 1: {
265                 u8 tmp;
266                 __get_user_asm(tmp, (u8 __user *)src,
267                                ret, "b", "b", "=q", 1);
268                 if (likely(!ret))
269                         __put_user_asm(tmp, (u8 __user *)dst,
270                                        ret, "b", "b", "iq", 1);
271                 return ret;
272         }
273         case 2: {
274                 u16 tmp;
275                 __get_user_asm(tmp, (u16 __user *)src,
276                                ret, "w", "w", "=r", 2);
277                 if (likely(!ret))
278                         __put_user_asm(tmp, (u16 __user *)dst,
279                                        ret, "w", "w", "ir", 2);
280                 return ret;
281         }
282
283         case 4: {
284                 u32 tmp;
285                 __get_user_asm(tmp, (u32 __user *)src,
286                                ret, "l", "k", "=r", 4);
287                 if (likely(!ret))
288                         __put_user_asm(tmp, (u32 __user *)dst,
289                                        ret, "l", "k", "ir", 4);
290                 return ret;
291         }
292         case 8: {
293                 u64 tmp;
294                 __get_user_asm(tmp, (u64 __user *)src,
295                                ret, "q", "", "=r", 8);
296                 if (likely(!ret))
297                         __put_user_asm(tmp, (u64 __user *)dst,
298                                        ret, "q", "", "ir", 8);
299                 return ret;
300         }
301         default:
302                 return copy_user_generic((__force void *)dst,
303                                          (__force void *)src, size);
304         }
305 }
306
307 __must_check long
308 strncpy_from_user(char *dst, const char __user *src, long count);
309 __must_check long
310 __strncpy_from_user(char *dst, const char __user *src, long count);
311 __must_check long strnlen_user(const char __user *str, long n);
312 __must_check long __strnlen_user(const char __user *str, long n);
313 __must_check long strlen_user(const char __user *str);
314 __must_check unsigned long clear_user(void __user *mem, unsigned long len);
315 __must_check unsigned long __clear_user(void __user *mem, unsigned long len);
316
317 __must_check long __copy_from_user_inatomic(void *dst, const void __user *src,
318                                             unsigned size);
319
320 static __must_check __always_inline int
321 __copy_to_user_inatomic(void __user *dst, const void *src, unsigned size)
322 {
323         return copy_user_generic((__force void *)dst, src, size);
324 }
325
326 #define ARCH_HAS_NOCACHE_UACCESS 1
327 extern long __copy_user_nocache(void *dst, const void __user *src,
328                                 unsigned size, int zerorest);
329
330 static inline int __copy_from_user_nocache(void *dst, const void __user *src,
331                                            unsigned size)
332 {
333         might_sleep();
334         return __copy_user_nocache(dst, src, size, 1);
335 }
336
337 static inline int __copy_from_user_inatomic_nocache(void *dst,
338                                                     const void __user *src,
339                                                     unsigned size)
340 {
341         return __copy_user_nocache(dst, src, size, 0);
342 }
343
344 #endif /* __X86_64_UACCESS_H */