]> pilppa.org Git - linux-2.6-omap-h63xx.git/blob - arch/um/kernel/process.c
uml: rename pt_regs general-purpose register file
[linux-2.6-omap-h63xx.git] / arch / um / kernel / process.c
1 /*
2  * Copyright (C) 2000 - 2007 Jeff Dike (jdike@{addtoit,linux.intel}.com)
3  * Copyright 2003 PathScale, Inc.
4  * Licensed under the GPL
5  */
6
7 #include "linux/stddef.h"
8 #include "linux/err.h"
9 #include "linux/hardirq.h"
10 #include "linux/mm.h"
11 #include "linux/personality.h"
12 #include "linux/proc_fs.h"
13 #include "linux/ptrace.h"
14 #include "linux/random.h"
15 #include "linux/sched.h"
16 #include "linux/threads.h"
17 #include "asm/pgtable.h"
18 #include "asm/uaccess.h"
19 #include "as-layout.h"
20 #include "kern_util.h"
21 #include "os.h"
22 #include "skas.h"
23 #include "tlb.h"
24
25 /*
26  * This is a per-cpu array.  A processor only modifies its entry and it only
27  * cares about its entry, so it's OK if another processor is modifying its
28  * entry.
29  */
30 struct cpu_task cpu_tasks[NR_CPUS] = { [0 ... NR_CPUS - 1] = { -1, NULL } };
31
32 static inline int external_pid(struct task_struct *task)
33 {
34         /* FIXME: Need to look up userspace_pid by cpu */
35         return userspace_pid[0];
36 }
37
38 int pid_to_processor_id(int pid)
39 {
40         int i;
41
42         for(i = 0; i < ncpus; i++) {
43                 if (cpu_tasks[i].pid == pid)
44                         return i;
45         }
46         return -1;
47 }
48
49 void free_stack(unsigned long stack, int order)
50 {
51         free_pages(stack, order);
52 }
53
54 unsigned long alloc_stack(int order, int atomic)
55 {
56         unsigned long page;
57         gfp_t flags = GFP_KERNEL;
58
59         if (atomic)
60                 flags = GFP_ATOMIC;
61         page = __get_free_pages(flags, order);
62         if (page == 0)
63                 return 0;
64
65         return page;
66 }
67
68 int kernel_thread(int (*fn)(void *), void * arg, unsigned long flags)
69 {
70         int pid;
71
72         current->thread.request.u.thread.proc = fn;
73         current->thread.request.u.thread.arg = arg;
74         pid = do_fork(CLONE_VM | CLONE_UNTRACED | flags, 0,
75                       &current->thread.regs, 0, NULL, NULL);
76         return pid;
77 }
78
79 static inline void set_current(struct task_struct *task)
80 {
81         cpu_tasks[task_thread_info(task)->cpu] = ((struct cpu_task)
82                 { external_pid(task), task });
83 }
84
85 extern void arch_switch_to(struct task_struct *from, struct task_struct *to);
86
87 void *_switch_to(void *prev, void *next, void *last)
88 {
89         struct task_struct *from = prev;
90         struct task_struct *to= next;
91
92         to->thread.prev_sched = from;
93         set_current(to);
94
95         do {
96                 current->thread.saved_task = NULL;
97
98                 /* XXX need to check runqueues[cpu].idle */
99                 if (current->pid == 0)
100                         switch_timers(0);
101
102                 switch_threads(&from->thread.switch_buf,
103                                &to->thread.switch_buf);
104
105                 arch_switch_to(current->thread.prev_sched, current);
106
107                 if (current->pid == 0)
108                         switch_timers(1);
109
110                 if (current->thread.saved_task)
111                         show_regs(&(current->thread.regs));
112                 next= current->thread.saved_task;
113                 prev= current;
114         } while(current->thread.saved_task);
115
116         return current->thread.prev_sched;
117
118 }
119
120 void interrupt_end(void)
121 {
122         if (need_resched())
123                 schedule();
124         if (test_tsk_thread_flag(current, TIF_SIGPENDING))
125                 do_signal();
126 }
127
128 void exit_thread(void)
129 {
130 }
131
132 void *get_current(void)
133 {
134         return current;
135 }
136
137 extern void schedule_tail(struct task_struct *prev);
138
139 /*
140  * This is called magically, by its address being stuffed in a jmp_buf
141  * and being longjmp-d to.
142  */
143 void new_thread_handler(void)
144 {
145         int (*fn)(void *), n;
146         void *arg;
147
148         if (current->thread.prev_sched != NULL)
149                 schedule_tail(current->thread.prev_sched);
150         current->thread.prev_sched = NULL;
151
152         fn = current->thread.request.u.thread.proc;
153         arg = current->thread.request.u.thread.arg;
154
155         /*
156          * The return value is 1 if the kernel thread execs a process,
157          * 0 if it just exits
158          */
159         n = run_kernel_thread(fn, arg, &current->thread.exec_buf);
160         if (n == 1) {
161                 /* Handle any immediate reschedules or signals */
162                 interrupt_end();
163                 userspace(&current->thread.regs.regs);
164         }
165         else do_exit(0);
166 }
167
168 /* Called magically, see new_thread_handler above */
169 void fork_handler(void)
170 {
171         force_flush_all();
172         if (current->thread.prev_sched == NULL)
173                 panic("blech");
174
175         schedule_tail(current->thread.prev_sched);
176
177         /*
178          * XXX: if interrupt_end() calls schedule, this call to
179          * arch_switch_to isn't needed. We could want to apply this to
180          * improve performance. -bb
181          */
182         arch_switch_to(current->thread.prev_sched, current);
183
184         current->thread.prev_sched = NULL;
185
186         /* Handle any immediate reschedules or signals */
187         interrupt_end();
188
189         userspace(&current->thread.regs.regs);
190 }
191
192 int copy_thread(int nr, unsigned long clone_flags, unsigned long sp,
193                 unsigned long stack_top, struct task_struct * p,
194                 struct pt_regs *regs)
195 {
196         void (*handler)(void);
197         int ret = 0;
198
199         p->thread = (struct thread_struct) INIT_THREAD;
200
201         if (current->thread.forking) {
202                 memcpy(&p->thread.regs.regs, &regs->regs,
203                        sizeof(p->thread.regs.regs));
204                 REGS_SET_SYSCALL_RETURN(p->thread.regs.regs.gp, 0);
205                 if (sp != 0)
206                         REGS_SP(p->thread.regs.regs.gp) = sp;
207
208                 handler = fork_handler;
209
210                 arch_copy_thread(&current->thread.arch, &p->thread.arch);
211         }
212         else {
213                 init_thread_registers(&p->thread.regs.regs);
214                 p->thread.request.u.thread = current->thread.request.u.thread;
215                 handler = new_thread_handler;
216         }
217
218         new_thread(task_stack_page(p), &p->thread.switch_buf, handler);
219
220         if (current->thread.forking) {
221                 clear_flushed_tls(p);
222
223                 /*
224                  * Set a new TLS for the child thread?
225                  */
226                 if (clone_flags & CLONE_SETTLS)
227                         ret = arch_copy_tls(p);
228         }
229
230         return ret;
231 }
232
233 void initial_thread_cb(void (*proc)(void *), void *arg)
234 {
235         int save_kmalloc_ok = kmalloc_ok;
236
237         kmalloc_ok = 0;
238         initial_thread_cb_skas(proc, arg);
239         kmalloc_ok = save_kmalloc_ok;
240 }
241
242 void default_idle(void)
243 {
244         while(1) {
245                 /* endless idle loop with no priority at all */
246
247                 /*
248                  * although we are an idle CPU, we do not want to
249                  * get into the scheduler unnecessarily.
250                  */
251                 if (need_resched())
252                         schedule();
253
254                 idle_sleep(10);
255         }
256 }
257
258 void cpu_idle(void)
259 {
260         cpu_tasks[current_thread->cpu].pid = os_getpid();
261         default_idle();
262 }
263
264 void *um_virt_to_phys(struct task_struct *task, unsigned long addr,
265                       pte_t *pte_out)
266 {
267         pgd_t *pgd;
268         pud_t *pud;
269         pmd_t *pmd;
270         pte_t *pte;
271         pte_t ptent;
272
273         if (task->mm == NULL)
274                 return ERR_PTR(-EINVAL);
275         pgd = pgd_offset(task->mm, addr);
276         if (!pgd_present(*pgd))
277                 return ERR_PTR(-EINVAL);
278
279         pud = pud_offset(pgd, addr);
280         if (!pud_present(*pud))
281                 return ERR_PTR(-EINVAL);
282
283         pmd = pmd_offset(pud, addr);
284         if (!pmd_present(*pmd))
285                 return ERR_PTR(-EINVAL);
286
287         pte = pte_offset_kernel(pmd, addr);
288         ptent = *pte;
289         if (!pte_present(ptent))
290                 return ERR_PTR(-EINVAL);
291
292         if (pte_out != NULL)
293                 *pte_out = ptent;
294         return (void *) (pte_val(ptent) & PAGE_MASK) + (addr & ~PAGE_MASK);
295 }
296
297 char *current_cmd(void)
298 {
299 #if defined(CONFIG_SMP) || defined(CONFIG_HIGHMEM)
300         return "(Unknown)";
301 #else
302         void *addr = um_virt_to_phys(current, current->mm->arg_start, NULL);
303         return IS_ERR(addr) ? "(Unknown)": __va((unsigned long) addr);
304 #endif
305 }
306
307 void dump_thread(struct pt_regs *regs, struct user *u)
308 {
309 }
310
311 int __cant_sleep(void) {
312         return in_atomic() || irqs_disabled() || in_interrupt();
313         /* Is in_interrupt() really needed? */
314 }
315
316 int user_context(unsigned long sp)
317 {
318         unsigned long stack;
319
320         stack = sp & (PAGE_MASK << CONFIG_KERNEL_STACK_ORDER);
321         return stack != (unsigned long) current_thread;
322 }
323
324 extern exitcall_t __uml_exitcall_begin, __uml_exitcall_end;
325
326 void do_uml_exitcalls(void)
327 {
328         exitcall_t *call;
329
330         call = &__uml_exitcall_end;
331         while (--call >= &__uml_exitcall_begin)
332                 (*call)();
333 }
334
335 char *uml_strdup(char *string)
336 {
337         return kstrdup(string, GFP_KERNEL);
338 }
339
340 int copy_to_user_proc(void __user *to, void *from, int size)
341 {
342         return copy_to_user(to, from, size);
343 }
344
345 int copy_from_user_proc(void *to, void __user *from, int size)
346 {
347         return copy_from_user(to, from, size);
348 }
349
350 int clear_user_proc(void __user *buf, int size)
351 {
352         return clear_user(buf, size);
353 }
354
355 int strlen_user_proc(char __user *str)
356 {
357         return strlen_user(str);
358 }
359
360 int smp_sigio_handler(void)
361 {
362 #ifdef CONFIG_SMP
363         int cpu = current_thread->cpu;
364         IPI_handler(cpu);
365         if (cpu != 0)
366                 return 1;
367 #endif
368         return 0;
369 }
370
371 int cpu(void)
372 {
373         return current_thread->cpu;
374 }
375
376 static atomic_t using_sysemu = ATOMIC_INIT(0);
377 int sysemu_supported;
378
379 void set_using_sysemu(int value)
380 {
381         if (value > sysemu_supported)
382                 return;
383         atomic_set(&using_sysemu, value);
384 }
385
386 int get_using_sysemu(void)
387 {
388         return atomic_read(&using_sysemu);
389 }
390
391 static int proc_read_sysemu(char *buf, char **start, off_t offset, int size,int *eof, void *data)
392 {
393         if (snprintf(buf, size, "%d\n", get_using_sysemu()) < size)
394                 /* No overflow */
395                 *eof = 1;
396
397         return strlen(buf);
398 }
399
400 static int proc_write_sysemu(struct file *file,const char __user *buf, unsigned long count,void *data)
401 {
402         char tmp[2];
403
404         if (copy_from_user(tmp, buf, 1))
405                 return -EFAULT;
406
407         if (tmp[0] >= '0' && tmp[0] <= '2')
408                 set_using_sysemu(tmp[0] - '0');
409         /* We use the first char, but pretend to write everything */
410         return count;
411 }
412
413 int __init make_proc_sysemu(void)
414 {
415         struct proc_dir_entry *ent;
416         if (!sysemu_supported)
417                 return 0;
418
419         ent = create_proc_entry("sysemu", 0600, &proc_root);
420
421         if (ent == NULL)
422         {
423                 printk(KERN_WARNING "Failed to register /proc/sysemu\n");
424                 return 0;
425         }
426
427         ent->read_proc  = proc_read_sysemu;
428         ent->write_proc = proc_write_sysemu;
429
430         return 0;
431 }
432
433 late_initcall(make_proc_sysemu);
434
435 int singlestepping(void * t)
436 {
437         struct task_struct *task = t ? t : current;
438
439         if ( ! (task->ptrace & PT_DTRACE) )
440                 return 0;
441
442         if (task->thread.singlestep_syscall)
443                 return 1;
444
445         return 2;
446 }
447
448 /*
449  * Only x86 and x86_64 have an arch_align_stack().
450  * All other arches have "#define arch_align_stack(x) (x)"
451  * in their asm/system.h
452  * As this is included in UML from asm-um/system-generic.h,
453  * we can use it to behave as the subarch does.
454  */
455 #ifndef arch_align_stack
456 unsigned long arch_align_stack(unsigned long sp)
457 {
458         if (!(current->personality & ADDR_NO_RANDOMIZE) && randomize_va_space)
459                 sp -= get_random_int() % 8192;
460         return sp & ~0xf;
461 }
462 #endif