1 #ifndef __X86_64_UACCESS_H
2 #define __X86_64_UACCESS_H
5 * User space memory access functions
7 #include <linux/compiler.h>
8 #include <linux/errno.h>
9 #include <linux/prefetch.h>
13 #define VERIFY_WRITE 1
16 * The fs value determines whether argument validity checking should be
17 * performed or not. If get_fs() == USER_DS, checking is performed, with
18 * get_fs() == KERNEL_DS, checking is bypassed.
20 * For historical reasons, these macros are grossly misnamed.
23 #define MAKE_MM_SEG(s) ((mm_segment_t) { (s) })
25 #define KERNEL_DS MAKE_MM_SEG(0xFFFFFFFFFFFFFFFFUL)
26 #define USER_DS MAKE_MM_SEG(PAGE_OFFSET)
28 #define get_ds() (KERNEL_DS)
29 #define get_fs() (current_thread_info()->addr_limit)
30 #define set_fs(x) (current_thread_info()->addr_limit = (x))
32 #define segment_eq(a, b) ((a).seg == (b).seg)
34 #define __addr_ok(addr) (!((unsigned long)(addr) & \
35 (current_thread_info()->addr_limit.seg)))
38 * Uhhuh, this needs 65-bit arithmetic. We have a carry..
40 #define __range_not_ok(addr, size) \
42 unsigned long flag, roksum; \
43 __chk_user_ptr(addr); \
44 asm("add %3,%1 ; sbb %0,%0 ; cmp %1,%4 ; sbb $0,%0" \
45 : "=&r" (flag), "=r" (roksum) \
46 : "1" (addr), "g" ((long)(size)), \
47 "rm" (current_thread_info()->addr_limit.seg)); \
51 #define access_ok(type, addr, size) (likely(__range_not_ok(addr, size) == 0))
54 * The exception table consists of pairs of addresses: the first is the
55 * address of an instruction that is allowed to fault, and the second is
56 * the address at which the program should continue. No registers are
57 * modified, so it is entirely up to the continuation code to figure out
60 * All the routines below use bits of fixup code that are out of line
61 * with the main instruction path. This means when everything is well,
62 * we don't even have to jump over them. Further, they do not intrude
63 * on our cache or tlb entries.
66 struct exception_table_entry {
67 unsigned long insn, fixup;
70 extern int fixup_exception(struct pt_regs *regs);
72 #define ARCH_HAS_SEARCH_EXTABLE
75 * These are the main single-value transfer routines. They automatically
76 * use the right size if we just have the right pointer type.
78 * This gets kind of ugly. We want to return _two_ values in "get_user()"
79 * and yet we don't want to do any pointers, because that is too much
80 * of a performance impact. Thus we have a few rather ugly macros here,
81 * and hide all the ugliness from the user.
83 * The "__xxx" versions of the user access functions are versions that
84 * do not verify the address space, that must have been done previously
85 * with a separate "access_ok()" call (this is used when we do multiple
86 * accesses to the same area of user memory).
89 #define __get_user_x(size, ret, x, ptr) \
90 asm volatile("call __get_user_" #size \
91 : "=a" (ret),"=d" (x) \
94 /* Careful: we have to cast the result to the type of the pointer
97 #define get_user(x, ptr) \
99 unsigned long __val_gu; \
101 __chk_user_ptr(ptr); \
102 switch (sizeof(*(ptr))) { \
104 __get_user_x(1, __ret_gu, __val_gu, ptr); \
107 __get_user_x(2, __ret_gu, __val_gu, ptr); \
110 __get_user_x(4, __ret_gu, __val_gu, ptr); \
113 __get_user_x(8, __ret_gu, __val_gu, ptr); \
119 (x) = (__force typeof(*(ptr)))__val_gu; \
123 extern void __put_user_1(void);
124 extern void __put_user_2(void);
125 extern void __put_user_4(void);
126 extern void __put_user_8(void);
127 extern void __put_user_bad(void);
129 #define __put_user_x(size, ret, x, ptr) \
130 asm volatile("call __put_user_" #size \
135 #define put_user(x, ptr) \
136 __put_user_check((__typeof__(*(ptr)))(x), (ptr), sizeof(*(ptr)))
138 #define __get_user(x, ptr) \
139 __get_user_nocheck((x), (ptr), sizeof(*(ptr)))
140 #define __put_user(x, ptr) \
141 __put_user_nocheck((__typeof__(*(ptr)))(x), (ptr), sizeof(*(ptr)))
143 #define __get_user_unaligned __get_user
144 #define __put_user_unaligned __put_user
146 #define __put_user_nocheck(x, ptr, size) \
149 __put_user_size((x), (ptr), (size), __pu_err); \
154 #define __put_user_check(x, ptr, size) \
157 typeof(*(ptr)) __user *__pu_addr = (ptr); \
160 __put_user_x(1, __pu_err, x, __pu_addr); \
163 __put_user_x(2, __pu_err, x, __pu_addr); \
166 __put_user_x(4, __pu_err, x, __pu_addr); \
169 __put_user_x(8, __pu_err, x, __pu_addr); \
177 #define __put_user_size(x, ptr, size, retval) \
180 __chk_user_ptr(ptr); \
183 __put_user_asm(x, ptr, retval, "b", "b", "iq", -EFAULT);\
186 __put_user_asm(x, ptr, retval, "w", "w", "ir", -EFAULT);\
189 __put_user_asm(x, ptr, retval, "l", "k", "ir", -EFAULT);\
192 __put_user_asm(x, ptr, retval, "q", "", "Zr", -EFAULT); \
199 /* FIXME: this hack is definitely wrong -AK */
200 struct __large_struct { unsigned long buf[100]; };
201 #define __m(x) (*(struct __large_struct __user *)(x))
204 * Tell gcc we read from memory instead of writing: this is because
205 * we do not write to any memory gcc knows about, so there are no
208 #define __put_user_asm(x, addr, err, itype, rtype, ltype, errno) \
209 asm volatile("1: mov"itype" %"rtype"1,%2\n" \
211 ".section .fixup, \"ax\"\n" \
215 _ASM_EXTABLE(1b, 3b) \
217 : ltype (x), "m" (__m(addr)), "i" (errno), "0" (err))
220 #define __get_user_nocheck(x, ptr, size) \
223 unsigned long __gu_val; \
224 __get_user_size(__gu_val, (ptr), (size), __gu_err); \
225 (x) = (__force typeof(*(ptr)))__gu_val; \
229 extern int __get_user_1(void);
230 extern int __get_user_2(void);
231 extern int __get_user_4(void);
232 extern int __get_user_8(void);
233 extern int __get_user_bad(void);
235 #define __get_user_size(x, ptr, size, retval) \
238 __chk_user_ptr(ptr); \
241 __get_user_asm(x, ptr, retval, "b", "b", "=q", -EFAULT);\
244 __get_user_asm(x, ptr, retval, "w", "w", "=r", -EFAULT);\
247 __get_user_asm(x, ptr, retval, "l", "k", "=r", -EFAULT);\
250 __get_user_asm(x, ptr, retval, "q", "", "=r", -EFAULT); \
253 (x) = __get_user_bad(); \
257 #define __get_user_asm(x, addr, err, itype, rtype, ltype, errno) \
258 asm volatile("1: mov"itype" %2,%"rtype"1\n" \
260 ".section .fixup, \"ax\"\n" \
262 " xor"itype" %"rtype"1,%"rtype"1\n" \
265 _ASM_EXTABLE(1b, 3b) \
266 : "=r" (err), ltype (x) \
267 : "m" (__m(addr)), "i"(errno), "0"(err))
270 * Copy To/From Userspace
273 /* Handles exceptions in both to and from, but doesn't do access_ok */
274 __must_check unsigned long
275 copy_user_generic(void *to, const void *from, unsigned len);
277 __must_check unsigned long
278 copy_to_user(void __user *to, const void *from, unsigned len);
279 __must_check unsigned long
280 copy_from_user(void *to, const void __user *from, unsigned len);
281 __must_check unsigned long
282 copy_in_user(void __user *to, const void __user *from, unsigned len);
284 static __always_inline __must_check
285 int __copy_from_user(void *dst, const void __user *src, unsigned size)
288 if (!__builtin_constant_p(size))
289 return copy_user_generic(dst, (__force void *)src, size);
291 case 1:__get_user_asm(*(u8 *)dst, (u8 __user *)src,
292 ret, "b", "b", "=q", 1);
294 case 2:__get_user_asm(*(u16 *)dst, (u16 __user *)src,
295 ret, "w", "w", "=r", 2);
297 case 4:__get_user_asm(*(u32 *)dst, (u32 __user *)src,
298 ret, "l", "k", "=r", 4);
300 case 8:__get_user_asm(*(u64 *)dst, (u64 __user *)src,
301 ret, "q", "", "=r", 8);
304 __get_user_asm(*(u64 *)dst, (u64 __user *)src,
305 ret, "q", "", "=r", 16);
308 __get_user_asm(*(u16 *)(8 + (char *)dst),
309 (u16 __user *)(8 + (char __user *)src),
310 ret, "w", "w", "=r", 2);
313 __get_user_asm(*(u64 *)dst, (u64 __user *)src,
314 ret, "q", "", "=r", 16);
317 __get_user_asm(*(u64 *)(8 + (char *)dst),
318 (u64 __user *)(8 + (char __user *)src),
319 ret, "q", "", "=r", 8);
322 return copy_user_generic(dst, (__force void *)src, size);
326 static __always_inline __must_check
327 int __copy_to_user(void __user *dst, const void *src, unsigned size)
330 if (!__builtin_constant_p(size))
331 return copy_user_generic((__force void *)dst, src, size);
333 case 1:__put_user_asm(*(u8 *)src, (u8 __user *)dst,
334 ret, "b", "b", "iq", 1);
336 case 2:__put_user_asm(*(u16 *)src, (u16 __user *)dst,
337 ret, "w", "w", "ir", 2);
339 case 4:__put_user_asm(*(u32 *)src, (u32 __user *)dst,
340 ret, "l", "k", "ir", 4);
342 case 8:__put_user_asm(*(u64 *)src, (u64 __user *)dst,
343 ret, "q", "", "ir", 8);
346 __put_user_asm(*(u64 *)src, (u64 __user *)dst,
347 ret, "q", "", "ir", 10);
351 __put_user_asm(4[(u16 *)src], 4 + (u16 __user *)dst,
352 ret, "w", "w", "ir", 2);
355 __put_user_asm(*(u64 *)src, (u64 __user *)dst,
356 ret, "q", "", "ir", 16);
360 __put_user_asm(1[(u64 *)src], 1 + (u64 __user *)dst,
361 ret, "q", "", "ir", 8);
364 return copy_user_generic((__force void *)dst, src, size);
368 static __always_inline __must_check
369 int __copy_in_user(void __user *dst, const void __user *src, unsigned size)
372 if (!__builtin_constant_p(size))
373 return copy_user_generic((__force void *)dst,
374 (__force void *)src, size);
378 __get_user_asm(tmp, (u8 __user *)src,
379 ret, "b", "b", "=q", 1);
381 __put_user_asm(tmp, (u8 __user *)dst,
382 ret, "b", "b", "iq", 1);
387 __get_user_asm(tmp, (u16 __user *)src,
388 ret, "w", "w", "=r", 2);
390 __put_user_asm(tmp, (u16 __user *)dst,
391 ret, "w", "w", "ir", 2);
397 __get_user_asm(tmp, (u32 __user *)src,
398 ret, "l", "k", "=r", 4);
400 __put_user_asm(tmp, (u32 __user *)dst,
401 ret, "l", "k", "ir", 4);
406 __get_user_asm(tmp, (u64 __user *)src,
407 ret, "q", "", "=r", 8);
409 __put_user_asm(tmp, (u64 __user *)dst,
410 ret, "q", "", "ir", 8);
414 return copy_user_generic((__force void *)dst,
415 (__force void *)src, size);
420 strncpy_from_user(char *dst, const char __user *src, long count);
422 __strncpy_from_user(char *dst, const char __user *src, long count);
423 __must_check long strnlen_user(const char __user *str, long n);
424 __must_check long __strnlen_user(const char __user *str, long n);
425 __must_check long strlen_user(const char __user *str);
426 __must_check unsigned long clear_user(void __user *mem, unsigned long len);
427 __must_check unsigned long __clear_user(void __user *mem, unsigned long len);
429 __must_check long __copy_from_user_inatomic(void *dst, const void __user *src,
432 static __must_check __always_inline int
433 __copy_to_user_inatomic(void __user *dst, const void *src, unsigned size)
435 return copy_user_generic((__force void *)dst, src, size);
438 #define ARCH_HAS_NOCACHE_UACCESS 1
439 extern long __copy_user_nocache(void *dst, const void __user *src,
440 unsigned size, int zerorest);
442 static inline int __copy_from_user_nocache(void *dst, const void __user *src,
446 return __copy_user_nocache(dst, src, size, 1);
449 static inline int __copy_from_user_inatomic_nocache(void *dst,
450 const void __user *src,
453 return __copy_user_nocache(dst, src, size, 0);
456 #endif /* __X86_64_UACCESS_H */