]> pilppa.org Git - linux-2.6-omap-h63xx.git/blob - include/asm-sh/system.h
Merge git://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux-2.6
[linux-2.6-omap-h63xx.git] / include / asm-sh / system.h
1 #ifndef __ASM_SH_SYSTEM_H
2 #define __ASM_SH_SYSTEM_H
3
4 /*
5  * Copyright (C) 1999, 2000  Niibe Yutaka  &  Kaz Kojima
6  * Copyright (C) 2002 Paul Mundt
7  */
8
9
10 /*
11  *      switch_to() should switch tasks to task nr n, first
12  */
13
14 #define switch_to(prev, next, last) do {                                \
15  task_t *__last;                                                        \
16  register unsigned long *__ts1 __asm__ ("r1") = &prev->thread.sp;       \
17  register unsigned long *__ts2 __asm__ ("r2") = &prev->thread.pc;       \
18  register unsigned long *__ts4 __asm__ ("r4") = (unsigned long *)prev;  \
19  register unsigned long *__ts5 __asm__ ("r5") = (unsigned long *)next;  \
20  register unsigned long *__ts6 __asm__ ("r6") = &next->thread.sp;       \
21  register unsigned long __ts7 __asm__ ("r7") = next->thread.pc;         \
22  __asm__ __volatile__ (".balign 4\n\t"                                  \
23                        "stc.l   gbr, @-r15\n\t"                         \
24                        "sts.l   pr, @-r15\n\t"                          \
25                        "mov.l   r8, @-r15\n\t"                          \
26                        "mov.l   r9, @-r15\n\t"                          \
27                        "mov.l   r10, @-r15\n\t"                         \
28                        "mov.l   r11, @-r15\n\t"                         \
29                        "mov.l   r12, @-r15\n\t"                         \
30                        "mov.l   r13, @-r15\n\t"                         \
31                        "mov.l   r14, @-r15\n\t"                         \
32                        "mov.l   r15, @r1        ! save SP\n\t"          \
33                        "mov.l   @r6, r15        ! change to new stack\n\t" \
34                        "mova    1f, %0\n\t"                             \
35                        "mov.l   %0, @r2         ! save PC\n\t"          \
36                        "mov.l   2f, %0\n\t"                             \
37                        "jmp     @%0             ! call __switch_to\n\t" \
38                        " lds    r7, pr          !  with return to new PC\n\t" \
39                        ".balign 4\n"                                    \
40                        "2:\n\t"                                         \
41                        ".long   __switch_to\n"                          \
42                        "1:\n\t"                                         \
43                        "mov.l   @r15+, r14\n\t"                         \
44                        "mov.l   @r15+, r13\n\t"                         \
45                        "mov.l   @r15+, r12\n\t"                         \
46                        "mov.l   @r15+, r11\n\t"                         \
47                        "mov.l   @r15+, r10\n\t"                         \
48                        "mov.l   @r15+, r9\n\t"                          \
49                        "mov.l   @r15+, r8\n\t"                          \
50                        "lds.l   @r15+, pr\n\t"                          \
51                        "ldc.l   @r15+, gbr\n\t"                         \
52                        : "=z" (__last)                                  \
53                        : "r" (__ts1), "r" (__ts2), "r" (__ts4),         \
54                          "r" (__ts5), "r" (__ts6), "r" (__ts7)          \
55                        : "r3", "t");                                    \
56         last = __last;                                                  \
57 } while (0)
58
59 /*
60  * On SMP systems, when the scheduler does migration-cost autodetection,
61  * it needs a way to flush as much of the CPU's caches as possible.
62  *
63  * TODO: fill this in!
64  */
65 static inline void sched_cacheflush(void)
66 {
67 }
68
69 #define nop() __asm__ __volatile__ ("nop")
70
71
72 #define xchg(ptr,x) ((__typeof__(*(ptr)))__xchg((unsigned long)(x),(ptr),sizeof(*(ptr))))
73
74 static __inline__ unsigned long tas(volatile int *m)
75 { /* #define tas(ptr) (xchg((ptr),1)) */
76         unsigned long retval;
77
78         __asm__ __volatile__ ("tas.b    @%1\n\t"
79                               "movt     %0"
80                               : "=r" (retval): "r" (m): "t", "memory");
81         return retval;
82 }
83
84 extern void __xchg_called_with_bad_pointer(void);
85
86 #define mb()    __asm__ __volatile__ ("": : :"memory")
87 #define rmb()   mb()
88 #define wmb()   __asm__ __volatile__ ("": : :"memory")
89 #define read_barrier_depends()  do { } while(0)
90
91 #ifdef CONFIG_SMP
92 #define smp_mb()        mb()
93 #define smp_rmb()       rmb()
94 #define smp_wmb()       wmb()
95 #define smp_read_barrier_depends()      read_barrier_depends()
96 #else
97 #define smp_mb()        barrier()
98 #define smp_rmb()       barrier()
99 #define smp_wmb()       barrier()
100 #define smp_read_barrier_depends()      do { } while(0)
101 #endif
102
103 #define set_mb(var, value) do { xchg(&var, value); } while (0)
104 #define set_wmb(var, value) do { var = value; wmb(); } while (0)
105
106 /* Interrupt Control */
107 static __inline__ void local_irq_enable(void)
108 {
109         unsigned long __dummy0, __dummy1;
110
111         __asm__ __volatile__("stc       sr, %0\n\t"
112                              "and       %1, %0\n\t"
113                              "stc       r6_bank, %1\n\t"
114                              "or        %1, %0\n\t"
115                              "ldc       %0, sr"
116                              : "=&r" (__dummy0), "=r" (__dummy1)
117                              : "1" (~0x000000f0)
118                              : "memory");
119 }
120
121 static __inline__ void local_irq_disable(void)
122 {
123         unsigned long __dummy;
124         __asm__ __volatile__("stc       sr, %0\n\t"
125                              "or        #0xf0, %0\n\t"
126                              "ldc       %0, sr"
127                              : "=&z" (__dummy)
128                              : /* no inputs */
129                              : "memory");
130 }
131
132 #define local_save_flags(x) \
133         __asm__("stc sr, %0; and #0xf0, %0" : "=&z" (x) :/**/: "memory" )
134
135 #define irqs_disabled()                 \
136 ({                                      \
137         unsigned long flags;            \
138         local_save_flags(flags);        \
139         (flags != 0);                   \
140 })
141
142 static __inline__ unsigned long local_irq_save(void)
143 {
144         unsigned long flags, __dummy;
145
146         __asm__ __volatile__("stc       sr, %1\n\t"
147                              "mov       %1, %0\n\t"
148                              "or        #0xf0, %0\n\t"
149                              "ldc       %0, sr\n\t"
150                              "mov       %1, %0\n\t"
151                              "and       #0xf0, %0"
152                              : "=&z" (flags), "=&r" (__dummy)
153                              :/**/
154                              : "memory" );
155         return flags;
156 }
157
158 #ifdef DEBUG_CLI_STI
159 static __inline__ void  local_irq_restore(unsigned long x)
160 {
161         if ((x & 0x000000f0) != 0x000000f0)
162                 local_irq_enable();
163         else {
164                 unsigned long flags;
165                 local_save_flags(flags);
166
167                 if (flags == 0) {
168                         extern void dump_stack(void);
169                         printk(KERN_ERR "BUG!\n");
170                         dump_stack();
171                         local_irq_disable();
172                 }
173         }
174 }
175 #else
176 #define local_irq_restore(x) do {                       \
177         if ((x & 0x000000f0) != 0x000000f0)             \
178                 local_irq_enable();                             \
179 } while (0)
180 #endif
181
182 #define really_restore_flags(x) do {                    \
183         if ((x & 0x000000f0) != 0x000000f0)             \
184                 local_irq_enable();                             \
185         else                                            \
186                 local_irq_disable();                            \
187 } while (0)
188
189 /*
190  * Jump to P2 area.
191  * When handling TLB or caches, we need to do it from P2 area.
192  */
193 #define jump_to_P2()                    \
194 do {                                    \
195         unsigned long __dummy;          \
196         __asm__ __volatile__(           \
197                 "mov.l  1f, %0\n\t"     \
198                 "or     %1, %0\n\t"     \
199                 "jmp    @%0\n\t"        \
200                 " nop\n\t"              \
201                 ".balign 4\n"           \
202                 "1:     .long 2f\n"     \
203                 "2:"                    \
204                 : "=&r" (__dummy)       \
205                 : "r" (0x20000000));    \
206 } while (0)
207
208 /*
209  * Back to P1 area.
210  */
211 #define back_to_P1()                                    \
212 do {                                                    \
213         unsigned long __dummy;                          \
214         __asm__ __volatile__(                           \
215                 "nop;nop;nop;nop;nop;nop;nop\n\t"       \
216                 "mov.l  1f, %0\n\t"                     \
217                 "jmp    @%0\n\t"                        \
218                 " nop\n\t"                              \
219                 ".balign 4\n"                           \
220                 "1:     .long 2f\n"                     \
221                 "2:"                                    \
222                 : "=&r" (__dummy));                     \
223 } while (0)
224
225 /* For spinlocks etc */
226 #define local_irq_save(x)       x = local_irq_save()
227
228 static __inline__ unsigned long xchg_u32(volatile int * m, unsigned long val)
229 {
230         unsigned long flags, retval;
231
232         local_irq_save(flags);
233         retval = *m;
234         *m = val;
235         local_irq_restore(flags);
236         return retval;
237 }
238
239 static __inline__ unsigned long xchg_u8(volatile unsigned char * m, unsigned long val)
240 {
241         unsigned long flags, retval;
242
243         local_irq_save(flags);
244         retval = *m;
245         *m = val & 0xff;
246         local_irq_restore(flags);
247         return retval;
248 }
249
250 static __inline__ unsigned long __xchg(unsigned long x, volatile void * ptr, int size)
251 {
252         switch (size) {
253         case 4:
254                 return xchg_u32(ptr, x);
255                 break;
256         case 1:
257                 return xchg_u8(ptr, x);
258                 break;
259         }
260         __xchg_called_with_bad_pointer();
261         return x;
262 }
263
264 /* XXX
265  * disable hlt during certain critical i/o operations
266  */
267 #define HAVE_DISABLE_HLT
268 void disable_hlt(void);
269 void enable_hlt(void);
270
271 #define arch_align_stack(x) (x)
272
273 #endif