1 --- linux-2.6.orig/arch/arm/kernel/entry-armv.S
2 +++ linux-2.6/arch/arm/kernel/entry-armv.S
5 ldmia r7, {r2 - r4} @ Get USR pc, cpsr
7 +#if __LINUX_ARM_ARCH__ < 6
8 + @ make sure our user space atomic helper is aborted
10 + bichs r3, r3, #PSR_Z_BIT
14 @ We are now ready to fill in the remaining blanks on the stack:
20 +#ifdef CONFIG_HAS_TLS_REG
21 + mcr p15, 0, r3, c13, c0, 3 @ set TLS register
24 - str r3, [r4, #-3] @ Set TLS ptr
25 + str r3, [r4, #-15] @ TLS val at 0xffff0ff0
27 mcr p15, 0, r6, c3, c0, 0 @ Set domain register
29 @ Always disable VFP so we can lazily save/restore the old
31 ldmib r2, {r4 - sl, fp, sp, pc} @ Load all regs saved previously
38 + * These are segment of kernel provided user code reachable from user space
39 + * at a fixed address in kernel memory. This is used to provide user space
40 + * with some operations which require kernel help because of unimplemented
41 + * native feature and/or instructions in many ARM CPUs. The idea is for
42 + * this code to be executed directly in user mode for best efficiency but
43 + * which is too intimate with the kernel counter part to be left to user
44 + * libraries. In fact this code might even differ from one CPU to another
45 + * depending on the available instruction set and restrictions like on
46 + * SMP systems. In other words, the kernel reserves the right to change
47 + * this code as needed without warning. Only the entry points and their
48 + * results are guaranteed to be stable.
50 + * Each segment is 32-byte aligned and will be moved to the top of the high
51 + * vector page. New segments (if ever needed) must be added in front of
52 + * existing ones. This mechanism should be used only for things that are
53 + * really small and justified, and not be abused freely.
55 + * User space is expected to implement those things inline when optimizing
56 + * for a processor that has the necessary native support, but only if such
57 + * resulting binaries are already to be incompatible with earlier ARM
58 + * processors due to the use of unsupported instructions other than what
59 + * is provided here. In other words don't make binaries unable to run on
60 + * earlier processors just for the sake of not using these kernel helpers
61 + * if your compiled code is not going to use the new instructions for other
66 +__kuser_helper_start:
69 + * Reference prototype:
71 + * int __kernel_cmpxchg(int oldval, int newval, int *ptr)
78 + * lr = return address
82 + * r0 = returned value (zero or non-zero)
83 + * C flag = set if r0 == 0, clear if r0 != 0
89 + * Definition and user space usage example:
91 + * typedef int (__kernel_cmpxchg_t)(int oldval, int newval, int *ptr);
92 + * #define __kernel_cmpxchg (*(__kernel_cmpxchg_t *)0xffff0fc0)
94 + * Atomically store newval in *ptr if *ptr is equal to oldval for user space.
95 + * Return zero if *ptr was changed or non-zero if no exchange happened.
96 + * The C flag is also set if *ptr was changed to allow for assembly
97 + * optimization in the calling code.
99 + * For example, a user space atomic_add implementation could look like this:
101 + * #define atomic_add(ptr, val) \
102 + * ({ register unsigned int *__ptr asm("r2") = (ptr); \
103 + * register unsigned int __result asm("r1"); \
105 + * "1: @ atomic_add\n\t" \
106 + * "ldr r0, [r2]\n\t" \
107 + * "mov r3, #0xffff0fff\n\t" \
108 + * "add lr, pc, #4\n\t" \
109 + * "add r1, r0, %2\n\t" \
110 + * "add pc, r3, #(0xffff0fc0 - 0xffff0fff)\n\t" \
112 + * : "=&r" (__result) \
113 + * : "r" (__ptr), "rIL" (val) \
114 + * : "r0","r3","ip","lr","cc","memory" ); \
118 +__kuser_cmpxchg: @ 0xffff0fc0
120 +#if __LINUX_ARM_ARCH__ < 6
123 + * Theory of operation:
125 + * We set the Z flag before loading oldval. If ever an exception
126 + * occurs we can not be sure the loaded value will still be the same
127 + * when the exception returns, therefore the user exception handler
128 + * will clear the Z flag whenever the interrupted user code was
129 + * actually from the kernel address space (see the usr_entry macro).
131 + * The post-increment on the str is used to prevent a race with an
132 + * exception happening just after the str instruction which would
133 + * clear the Z flag although the exchange was done.
135 + teq ip, ip @ set Z flag
136 + ldr ip, [r2] @ load current val
137 + add r3, r2, #1 @ prepare store ptr
138 + teqeq ip, r0 @ compare with oldval if still allowed
139 + streq r1, [r3, #-1]! @ store newval if still allowed
140 + subs r0, r2, r3 @ if r2 == r3 the str occured
147 + strexeq r3, r1, [r2]
156 + * Reference prototype:
158 + * int __kernel_get_tls(void)
162 + * lr = return address
170 + * the Z flag might be lost
172 + * Definition and user space usage example:
174 + * typedef int (__kernel_get_tls_t)(void);
175 + * #define __kernel_get_tls (*(__kernel_get_tls_t *)0xffff0fe0)
177 + * Get the TLS value as previously set via the __ARM_NR_set_tls syscall.
179 + * This could be used as follows:
181 + * #define __kernel_get_tls() \
182 + * ({ register unsigned int __val asm("r0"); \
183 + * asm( "mov r0, #0xffff0fff; mov lr, pc; sub pc, r0, #31" \
184 + * : "=r" (__val) : : "lr","cc" ); \
188 +__kuser_get_tls: @ 0xffff0fe0
190 +#ifndef CONFIG_HAS_TLS_REG
192 + ldr r0, [pc, #(16 - 8)] @ TLS stored at 0xffff0ff0
197 + mrc p15, 0, r0, c13, c0, 3 @ read TLS register
203 + .word 0 @ pad up to __kuser_helper_version
207 + * Reference declaration:
209 + * extern unsigned int __kernel_helper_version;
211 + * Definition and user space usage example:
213 + * #define __kernel_helper_version (*(unsigned int *)0xffff0ffc)
215 + * User space may read this to determine the curent number of helpers
219 +__kuser_helper_version: @ 0xffff0ffc
220 + .word ((__kuser_helper_end - __kuser_helper_start) >> 5)
227 @@ -710,12 +910,21 @@
228 stmia r0, {r1, r2, r3, r4, r5, r6, ip, lr}
231 - adr r0, __stubs_start @ copy stubs to 0x200
232 - adr r1, __stubs_end
234 + adr r1, __stubs_start @ copy stubs to 0x200
235 + adr r4, __stubs_end
243 + add r2, r0, #0x1000 @ top of high vector page
244 + adr r4, __kuser_helper_end @ user helpers to top of page
245 + adr r1, __kuser_helper_start @ going downwards.
246 +1: ldr r3, [r4, #-4]!
251 LOADREGS(fd, sp!, {r4 - r6, pc})
254 Index: linux-2.6/arch/arm/kernel/traps.c
255 ===================================================================
256 --- linux-2.6.orig/arch/arm/kernel/traps.c
257 +++ linux-2.6/arch/arm/kernel/traps.c
258 @@ -454,13 +454,17 @@
261 thread->tp_value = regs->ARM_r0;
262 +#ifdef CONFIG_HAS_TLS_REG
263 + asm ("mcr p15, 0, %0, c13, c0, 3" : : "r" (regs->ARM_r0) );
266 - * Our user accessible TLS ptr is located at 0xffff0ffc.
267 - * On SMP read access to this address must raise a fault
268 - * and be emulated from the data abort handler.
270 + * User space must never try to access this directly.
271 + * Expect your app to break eventually if you do so.
272 + * The user helper at 0xffff0fe0 must be used instead.
273 + * (see entry-armv.S for details)
275 - *((unsigned long *)0xffff0ffc) = thread->tp_value;
276 + *((unsigned int *)0xffff0ff0) = regs->ARM_r0;
281 --- kernel26/include/asm-arm/unistd.h.old 2005-04-16 05:17:08.344899152 +0100
282 +++ kernel26/include/asm-arm/unistd.h 2005-04-16 05:17:54.027954272 +0100
284 #define __ARM_NR_cacheflush (__ARM_NR_BASE+2)
285 #define __ARM_NR_usr26 (__ARM_NR_BASE+3)
286 #define __ARM_NR_usr32 (__ARM_NR_BASE+4)
287 +#define __ARM_NR_set_tls (__ARM_NR_BASE+5)
288 #define __ARM_NR_lbl (__ARM_NR_BASE+9)
290 -#define __ARM_NR_set_tls (__ARM_NR_BASE+0x800)
293 #define __sys1(x) __sys2(x)