static inline int __movsl_is_ok(unsigned long a1, unsigned long a2, unsigned long n)
{
#ifdef CONFIG_X86_INTEL_USERCOPY
static inline int __movsl_is_ok(unsigned long a1, unsigned long a2, unsigned long n)
{
#ifdef CONFIG_X86_INTEL_USERCOPY
-#define movsl_is_ok(a1,a2,n) \
- __movsl_is_ok((unsigned long)(a1),(unsigned long)(a2),(n))
+#define movsl_is_ok(a1, a2, n) \
+ __movsl_is_ok((unsigned long)(a1), (unsigned long)(a2), (n))
-#define __do_strncpy_from_user(dst,src,count,res) \
+#define __do_strncpy_from_user(dst, src, count, res) \
* least @count bytes long.
* @src: Source address, in user space.
* @count: Maximum number of bytes to copy, including the trailing NUL.
* least @count bytes long.
* @src: Source address, in user space.
* @count: Maximum number of bytes to copy, including the trailing NUL.
* Copies a NUL-terminated string from userspace to kernel space.
* Caller must check the specified block with access_ok() before calling
* this function.
* Copies a NUL-terminated string from userspace to kernel space.
* Caller must check the specified block with access_ok() before calling
* this function.
* least @count bytes long.
* @src: Source address, in user space.
* @count: Maximum number of bytes to copy, including the trailing NUL.
* least @count bytes long.
* @src: Source address, in user space.
* @count: Maximum number of bytes to copy, including the trailing NUL.
* Copies a NUL-terminated string from userspace to kernel space.
*
* On success, returns the length of the string (not including the trailing
* Copies a NUL-terminated string from userspace to kernel space.
*
* On success, returns the length of the string (not including the trailing
- " .align 2,0x90\n"
- "2: movl 0(%4), %%eax\n"
- "21: movl 4(%4), %%edx\n"
- " movl %%eax, 0(%3)\n"
- " movl %%edx, 4(%3)\n"
- "3: movl 8(%4), %%eax\n"
- "31: movl 12(%4),%%edx\n"
- " movl %%eax, 8(%3)\n"
+ " .align 2,0x90\n"
+ "2: movl 0(%4), %%eax\n"
+ "21: movl 4(%4), %%edx\n"
+ " movl %%eax, 0(%3)\n"
+ " movl %%edx, 4(%3)\n"
+ "3: movl 8(%4), %%eax\n"
+ "31: movl 12(%4),%%edx\n"
+ " movl %%eax, 8(%3)\n"
" movl %%edx, 12(%3)\n"
"4: movl 16(%4), %%eax\n"
"41: movl 20(%4), %%edx\n"
" movl %%edx, 12(%3)\n"
"4: movl 16(%4), %%eax\n"
"41: movl 20(%4), %%edx\n"
- "9: lea 0(%%eax,%0,4),%0\n"
- "16: pushl %0\n"
- " pushl %%eax\n"
+ "9: lea 0(%%eax,%0,4),%0\n"
+ "16: pushl %0\n"
+ " pushl %%eax\n"
: "=&c"(size), "=&D" (d0), "=&S" (d1)
: "1"(to), "2"(from), "0"(size)
: "eax", "edx", "memory");
: "=&c"(size), "=&D" (d0), "=&S" (d1)
: "1"(to), "2"(from), "0"(size)
: "eax", "edx", "memory");
static unsigned long __copy_user_zeroing_intel_nocache(void *to,
const void __user *from, unsigned long size)
{
static unsigned long __copy_user_zeroing_intel_nocache(void *to,
const void __user *from, unsigned long size)
{
static unsigned long __copy_user_intel_nocache(void *to,
const void __user *from, unsigned long size)
{
static unsigned long __copy_user_intel_nocache(void *to,
const void __user *from, unsigned long size)
{
/*
* When we are in an atomic section (see
* mm/filemap.c:file_read_actor), return the full
/*
* When we are in an atomic section (see
* mm/filemap.c:file_read_actor), return the full
* CPU does not honor the WP bit when writing
* from supervisory mode, and due to preemption or SMP,
* the page tables can change at any time.
* Do it manually. Manfred <manfred@colorfullife.com>
*/
while (n) {
* CPU does not honor the WP bit when writing
* from supervisory mode, and due to preemption or SMP,
* the page tables can change at any time.
* Do it manually. Manfred <manfred@colorfullife.com>
*/
while (n) {
if (len > n)
len = n;
survive:
down_read(¤t->mm->mmap_sem);
retval = get_user_pages(current, current->mm,
if (len > n)
len = n;
survive:
down_read(¤t->mm->mmap_sem);
retval = get_user_pages(current, current->mm,
- (unsigned long )to, 1, 1, 0, &pg, NULL);
+ (unsigned long)to, 1, 1, 0, &pg, NULL);
maddr = kmap_atomic(pg, KM_USER0);
memcpy(maddr + offset, from, len);
maddr = kmap_atomic(pg, KM_USER0);
memcpy(maddr + offset, from, len);
- if ( n > 64 && cpu_has_xmm2)
- n = __copy_user_zeroing_intel_nocache(to, from, n);
+ if (n > 64 && cpu_has_xmm2)
+ n = __copy_user_zeroing_intel_nocache(to, from, n);
- if ( n > 64 && cpu_has_xmm2)
- n = __copy_user_intel_nocache(to, from, n);
+ if (n > 64 && cpu_has_xmm2)
+ n = __copy_user_intel_nocache(to, from, n);