X-Git-Url: http://pilppa.org/gitweb/gitweb.cgi?a=blobdiff_plain;f=include%2Fasm-sparc64%2Fuaccess.h;h=203e8eee63515db1fa9a7dd5ed998aacfe99b460;hb=4ddccb8eb9d1c57c7dd3838d8328690bf1869145;hp=80a65d7e3dbff92308d2b4e1bf5e394e45c97551;hpb=d344c5e0856ad03278d8700b503762dbc8b86e12;p=linux-2.6-omap-h63xx.git diff --git a/include/asm-sparc64/uaccess.h b/include/asm-sparc64/uaccess.h index 80a65d7e3db..203e8eee635 100644 --- a/include/asm-sparc64/uaccess.h +++ b/include/asm-sparc64/uaccess.h @@ -70,26 +70,14 @@ static inline int access_ok(int type, const void __user * addr, unsigned long si * with the main instruction path. This means when everything is well, * we don't even have to jump over them. Further, they do not intrude * on our cache or tlb entries. - * - * There is a special way how to put a range of potentially faulting - * insns (like twenty ldd/std's with now intervening other instructions) - * You specify address of first in insn and 0 in fixup and in the next - * exception_table_entry you specify last potentially faulting insn + 1 - * and in fixup the routine which should handle the fault. - * That fixup code will get - * (faulting_insn_address - first_insn_in_the_range_address)/4 - * in %g2 (ie. index of the faulting instruction in the range). */ -struct exception_table_entry -{ - unsigned insn, fixup; +struct exception_table_entry { + unsigned int insn, fixup; }; -/* Special exable search, which handles ranges. Returns fixup */ -unsigned long search_extables_range(unsigned long addr, unsigned long *g2); - extern void __ret_efault(void); +extern void __retl_efault(void); /* Uh, these should become the main single-value transfer routines.. * They automatically use the right size if we just have the right @@ -263,7 +251,7 @@ copy_from_user(void *to, const void __user *from, unsigned long size) { unsigned long ret = ___copy_from_user(to, from, size); - if (ret) + if (unlikely(ret)) ret = copy_from_user_fixup(to, from, size); return ret; } @@ -279,7 +267,7 @@ copy_to_user(void __user *to, const void *from, unsigned long size) { unsigned long ret = ___copy_to_user(to, from, size); - if (ret) + if (unlikely(ret)) ret = copy_to_user_fixup(to, from, size); return ret; } @@ -295,7 +283,7 @@ copy_in_user(void __user *to, void __user *from, unsigned long size) { unsigned long ret = ___copy_in_user(to, from, size); - if (ret) + if (unlikely(ret)) ret = copy_in_user_fixup(to, from, size); return ret; }