]> pilppa.org Git - linux-2.6-omap-h63xx.git/blob - arch/um/sys-i386/ldt.c
uml: throw out CONFIG_MODE_TT
[linux-2.6-omap-h63xx.git] / arch / um / sys-i386 / ldt.c
1 /*
2  * Copyright (C) 2001, 2002 Jeff Dike (jdike@karaya.com)
3  * Licensed under the GPL
4  */
5
6 #include "linux/sched.h"
7 #include "linux/slab.h"
8 #include "linux/types.h"
9 #include "linux/errno.h"
10 #include "linux/spinlock.h"
11 #include "asm/uaccess.h"
12 #include "asm/smp.h"
13 #include "asm/ldt.h"
14 #include "asm/unistd.h"
15 #include "choose-mode.h"
16 #include "kern.h"
17 #include "mode_kern.h"
18 #include "os.h"
19
20 extern int modify_ldt(int func, void *ptr, unsigned long bytecount);
21
22 #include "skas.h"
23 #include "skas_ptrace.h"
24 #include "asm/mmu_context.h"
25 #include "proc_mm.h"
26
27 long write_ldt_entry(struct mm_id * mm_idp, int func, struct user_desc * desc,
28                      void **addr, int done)
29 {
30         long res;
31
32         if(proc_mm){
33                 /* This is a special handling for the case, that the mm to
34                  * modify isn't current->active_mm.
35                  * If this is called directly by modify_ldt,
36                  *     (current->active_mm->context.skas.u == mm_idp)
37                  * will be true. So no call to switch_mm_skas(mm_idp) is done.
38                  * If this is called in case of init_new_ldt or PTRACE_LDT,
39                  * mm_idp won't belong to current->active_mm, but child->mm.
40                  * So we need to switch child's mm into our userspace, then
41                  * later switch back.
42                  *
43                  * Note: I'm unsure: should interrupts be disabled here?
44                  */
45                 if(!current->active_mm || current->active_mm == &init_mm ||
46                    mm_idp != &current->active_mm->context.skas.id)
47                         switch_mm_skas(mm_idp);
48         }
49
50         if(ptrace_ldt) {
51                 struct ptrace_ldt ldt_op = (struct ptrace_ldt) {
52                         .func = func,
53                         .ptr = desc,
54                         .bytecount = sizeof(*desc)};
55                 u32 cpu;
56                 int pid;
57
58                 if(!proc_mm)
59                         pid = mm_idp->u.pid;
60                 else {
61                         cpu = get_cpu();
62                         pid = userspace_pid[cpu];
63                 }
64
65                 res = os_ptrace_ldt(pid, 0, (unsigned long) &ldt_op);
66
67                 if(proc_mm)
68                         put_cpu();
69         }
70         else {
71                 void *stub_addr;
72                 res = syscall_stub_data(mm_idp, (unsigned long *)desc,
73                                         (sizeof(*desc) + sizeof(long) - 1) &
74                                             ~(sizeof(long) - 1),
75                                         addr, &stub_addr);
76                 if(!res){
77                         unsigned long args[] = { func,
78                                                  (unsigned long)stub_addr,
79                                                  sizeof(*desc),
80                                                  0, 0, 0 };
81                         res = run_syscall_stub(mm_idp, __NR_modify_ldt, args,
82                                                0, addr, done);
83                 }
84         }
85
86         if(proc_mm){
87                 /* This is the second part of special handling, that makes
88                  * PTRACE_LDT possible to implement.
89                  */
90                 if(current->active_mm && current->active_mm != &init_mm &&
91                    mm_idp != &current->active_mm->context.skas.id)
92                         switch_mm_skas(&current->active_mm->context.skas.id);
93         }
94
95         return res;
96 }
97
98 static long read_ldt_from_host(void __user * ptr, unsigned long bytecount)
99 {
100         int res, n;
101         struct ptrace_ldt ptrace_ldt = (struct ptrace_ldt) {
102                         .func = 0,
103                         .bytecount = bytecount,
104                         .ptr = kmalloc(bytecount, GFP_KERNEL)};
105         u32 cpu;
106
107         if(ptrace_ldt.ptr == NULL)
108                 return -ENOMEM;
109
110         /* This is called from sys_modify_ldt only, so userspace_pid gives
111          * us the right number
112          */
113
114         cpu = get_cpu();
115         res = os_ptrace_ldt(userspace_pid[cpu], 0, (unsigned long) &ptrace_ldt);
116         put_cpu();
117         if(res < 0)
118                 goto out;
119
120         n = copy_to_user(ptr, ptrace_ldt.ptr, res);
121         if(n != 0)
122                 res = -EFAULT;
123
124   out:
125         kfree(ptrace_ldt.ptr);
126
127         return res;
128 }
129
130 /*
131  * In skas mode, we hold our own ldt data in UML.
132  * Thus, the code implementing sys_modify_ldt_skas
133  * is very similar to (and mostly stolen from) sys_modify_ldt
134  * for arch/i386/kernel/ldt.c
135  * The routines copied and modified in part are:
136  * - read_ldt
137  * - read_default_ldt
138  * - write_ldt
139  * - sys_modify_ldt_skas
140  */
141
142 static int read_ldt(void __user * ptr, unsigned long bytecount)
143 {
144         int i, err = 0;
145         unsigned long size;
146         uml_ldt_t * ldt = &current->mm->context.skas.ldt;
147
148         if(!ldt->entry_count)
149                 goto out;
150         if(bytecount > LDT_ENTRY_SIZE*LDT_ENTRIES)
151                 bytecount = LDT_ENTRY_SIZE*LDT_ENTRIES;
152         err = bytecount;
153
154         if(ptrace_ldt){
155                 return read_ldt_from_host(ptr, bytecount);
156         }
157
158         down(&ldt->semaphore);
159         if(ldt->entry_count <= LDT_DIRECT_ENTRIES){
160                 size = LDT_ENTRY_SIZE*LDT_DIRECT_ENTRIES;
161                 if(size > bytecount)
162                         size = bytecount;
163                 if(copy_to_user(ptr, ldt->u.entries, size))
164                         err = -EFAULT;
165                 bytecount -= size;
166                 ptr += size;
167         }
168         else {
169                 for(i=0; i<ldt->entry_count/LDT_ENTRIES_PER_PAGE && bytecount;
170                          i++){
171                         size = PAGE_SIZE;
172                         if(size > bytecount)
173                                 size = bytecount;
174                         if(copy_to_user(ptr, ldt->u.pages[i], size)){
175                                 err = -EFAULT;
176                                 break;
177                         }
178                         bytecount -= size;
179                         ptr += size;
180                 }
181         }
182         up(&ldt->semaphore);
183
184         if(bytecount == 0 || err == -EFAULT)
185                 goto out;
186
187         if(clear_user(ptr, bytecount))
188                 err = -EFAULT;
189
190 out:
191         return err;
192 }
193
194 static int read_default_ldt(void __user * ptr, unsigned long bytecount)
195 {
196         int err;
197
198         if(bytecount > 5*LDT_ENTRY_SIZE)
199                 bytecount = 5*LDT_ENTRY_SIZE;
200
201         err = bytecount;
202         /* UML doesn't support lcall7 and lcall27.
203          * So, we don't really have a default ldt, but emulate
204          * an empty ldt of common host default ldt size.
205          */
206         if(clear_user(ptr, bytecount))
207                 err = -EFAULT;
208
209         return err;
210 }
211
212 static int write_ldt(void __user * ptr, unsigned long bytecount, int func)
213 {
214         uml_ldt_t * ldt = &current->mm->context.skas.ldt;
215         struct mm_id * mm_idp = &current->mm->context.skas.id;
216         int i, err;
217         struct user_desc ldt_info;
218         struct ldt_entry entry0, *ldt_p;
219         void *addr = NULL;
220
221         err = -EINVAL;
222         if(bytecount != sizeof(ldt_info))
223                 goto out;
224         err = -EFAULT;
225         if(copy_from_user(&ldt_info, ptr, sizeof(ldt_info)))
226                 goto out;
227
228         err = -EINVAL;
229         if(ldt_info.entry_number >= LDT_ENTRIES)
230                 goto out;
231         if(ldt_info.contents == 3){
232                 if (func == 1)
233                         goto out;
234                 if (ldt_info.seg_not_present == 0)
235                         goto out;
236         }
237
238         if(!ptrace_ldt)
239                 down(&ldt->semaphore);
240
241         err = write_ldt_entry(mm_idp, func, &ldt_info, &addr, 1);
242         if(err)
243                 goto out_unlock;
244         else if(ptrace_ldt) {
245         /* With PTRACE_LDT available, this is used as a flag only */
246                 ldt->entry_count = 1;
247                 goto out;
248         }
249
250         if(ldt_info.entry_number >= ldt->entry_count &&
251            ldt_info.entry_number >= LDT_DIRECT_ENTRIES){
252                 for(i=ldt->entry_count/LDT_ENTRIES_PER_PAGE;
253                     i*LDT_ENTRIES_PER_PAGE <= ldt_info.entry_number;
254                     i++){
255                         if(i == 0)
256                                 memcpy(&entry0, ldt->u.entries,
257                                        sizeof(entry0));
258                         ldt->u.pages[i] = (struct ldt_entry *)
259                                 __get_free_page(GFP_KERNEL|__GFP_ZERO);
260                         if(!ldt->u.pages[i]){
261                                 err = -ENOMEM;
262                                 /* Undo the change in host */
263                                 memset(&ldt_info, 0, sizeof(ldt_info));
264                                 write_ldt_entry(mm_idp, 1, &ldt_info, &addr, 1);
265                                 goto out_unlock;
266                         }
267                         if(i == 0) {
268                                 memcpy(ldt->u.pages[0], &entry0,
269                                        sizeof(entry0));
270                                 memcpy(ldt->u.pages[0]+1, ldt->u.entries+1,
271                                        sizeof(entry0)*(LDT_DIRECT_ENTRIES-1));
272                         }
273                         ldt->entry_count = (i + 1) * LDT_ENTRIES_PER_PAGE;
274                 }
275         }
276         if(ldt->entry_count <= ldt_info.entry_number)
277                 ldt->entry_count = ldt_info.entry_number + 1;
278
279         if(ldt->entry_count <= LDT_DIRECT_ENTRIES)
280                 ldt_p = ldt->u.entries + ldt_info.entry_number;
281         else
282                 ldt_p = ldt->u.pages[ldt_info.entry_number/LDT_ENTRIES_PER_PAGE] +
283                         ldt_info.entry_number%LDT_ENTRIES_PER_PAGE;
284
285         if(ldt_info.base_addr == 0 && ldt_info.limit == 0 &&
286            (func == 1 || LDT_empty(&ldt_info))){
287                 ldt_p->a = 0;
288                 ldt_p->b = 0;
289         }
290         else{
291                 if (func == 1)
292                         ldt_info.useable = 0;
293                 ldt_p->a = LDT_entry_a(&ldt_info);
294                 ldt_p->b = LDT_entry_b(&ldt_info);
295         }
296         err = 0;
297
298 out_unlock:
299         up(&ldt->semaphore);
300 out:
301         return err;
302 }
303
304 static long do_modify_ldt_skas(int func, void __user *ptr,
305                                unsigned long bytecount)
306 {
307         int ret = -ENOSYS;
308
309         switch (func) {
310                 case 0:
311                         ret = read_ldt(ptr, bytecount);
312                         break;
313                 case 1:
314                 case 0x11:
315                         ret = write_ldt(ptr, bytecount, func);
316                         break;
317                 case 2:
318                         ret = read_default_ldt(ptr, bytecount);
319                         break;
320         }
321         return ret;
322 }
323
324 static DEFINE_SPINLOCK(host_ldt_lock);
325 static short dummy_list[9] = {0, -1};
326 static short * host_ldt_entries = NULL;
327
328 static void ldt_get_host_info(void)
329 {
330         long ret;
331         struct ldt_entry * ldt;
332         short *tmp;
333         int i, size, k, order;
334
335         spin_lock(&host_ldt_lock);
336
337         if(host_ldt_entries != NULL){
338                 spin_unlock(&host_ldt_lock);
339                 return;
340         }
341         host_ldt_entries = dummy_list+1;
342
343         spin_unlock(&host_ldt_lock);
344
345         for(i = LDT_PAGES_MAX-1, order=0; i; i>>=1, order++);
346
347         ldt = (struct ldt_entry *)
348               __get_free_pages(GFP_KERNEL|__GFP_ZERO, order);
349         if(ldt == NULL) {
350                 printk("ldt_get_host_info: couldn't allocate buffer for host "
351                        "ldt\n");
352                 return;
353         }
354
355         ret = modify_ldt(0, ldt, (1<<order)*PAGE_SIZE);
356         if(ret < 0) {
357                 printk("ldt_get_host_info: couldn't read host ldt\n");
358                 goto out_free;
359         }
360         if(ret == 0) {
361                 /* default_ldt is active, simply write an empty entry 0 */
362                 host_ldt_entries = dummy_list;
363                 goto out_free;
364         }
365
366         for(i=0, size=0; i<ret/LDT_ENTRY_SIZE; i++){
367                 if(ldt[i].a != 0 || ldt[i].b != 0)
368                         size++;
369         }
370
371         if(size < ARRAY_SIZE(dummy_list))
372                 host_ldt_entries = dummy_list;
373         else {
374                 size = (size + 1) * sizeof(dummy_list[0]);
375                 tmp = kmalloc(size, GFP_KERNEL);
376                 if(tmp == NULL) {
377                         printk("ldt_get_host_info: couldn't allocate host ldt "
378                                "list\n");
379                         goto out_free;
380                 }
381                 host_ldt_entries = tmp;
382         }
383
384         for(i=0, k=0; i<ret/LDT_ENTRY_SIZE; i++){
385                 if(ldt[i].a != 0 || ldt[i].b != 0) {
386                         host_ldt_entries[k++] = i;
387                 }
388         }
389         host_ldt_entries[k] = -1;
390
391 out_free:
392         free_pages((unsigned long)ldt, order);
393 }
394
395 long init_new_ldt(struct mmu_context_skas * new_mm,
396                   struct mmu_context_skas * from_mm)
397 {
398         struct user_desc desc;
399         short * num_p;
400         int i;
401         long page, err=0;
402         void *addr = NULL;
403         struct proc_mm_op copy;
404
405
406         if(!ptrace_ldt)
407                 init_MUTEX(&new_mm->ldt.semaphore);
408
409         if(!from_mm){
410                 memset(&desc, 0, sizeof(desc));
411                 /*
412                  * We have to initialize a clean ldt.
413                  */
414                 if(proc_mm) {
415                         /*
416                          * If the new mm was created using proc_mm, host's
417                          * default-ldt currently is assigned, which normally
418                          * contains the call-gates for lcall7 and lcall27.
419                          * To remove these gates, we simply write an empty
420                          * entry as number 0 to the host.
421                          */
422                         err = write_ldt_entry(&new_mm->id, 1, &desc,
423                                               &addr, 1);
424                 }
425                 else{
426                         /*
427                          * Now we try to retrieve info about the ldt, we
428                          * inherited from the host. All ldt-entries found
429                          * will be reset in the following loop
430                          */
431                         ldt_get_host_info();
432                         for(num_p=host_ldt_entries; *num_p != -1; num_p++){
433                                 desc.entry_number = *num_p;
434                                 err = write_ldt_entry(&new_mm->id, 1, &desc,
435                                                       &addr, *(num_p + 1) == -1);
436                                 if(err)
437                                         break;
438                         }
439                 }
440                 new_mm->ldt.entry_count = 0;
441
442                 goto out;
443         }
444
445         if(proc_mm){
446                 /* We have a valid from_mm, so we now have to copy the LDT of
447                  * from_mm to new_mm, because using proc_mm an new mm with
448                  * an empty/default LDT was created in new_mm()
449                  */
450                 copy = ((struct proc_mm_op) { .op       = MM_COPY_SEGMENTS,
451                                               .u        =
452                                               { .copy_segments =
453                                                         from_mm->id.u.mm_fd } } );
454                 i = os_write_file(new_mm->id.u.mm_fd, &copy, sizeof(copy));
455                 if(i != sizeof(copy))
456                         printk("new_mm : /proc/mm copy_segments failed, "
457                                "err = %d\n", -i);
458         }
459
460         if(!ptrace_ldt) {
461                 /* Our local LDT is used to supply the data for
462                  * modify_ldt(READLDT), if PTRACE_LDT isn't available,
463                  * i.e., we have to use the stub for modify_ldt, which
464                  * can't handle the big read buffer of up to 64kB.
465                  */
466                 down(&from_mm->ldt.semaphore);
467                 if(from_mm->ldt.entry_count <= LDT_DIRECT_ENTRIES){
468                         memcpy(new_mm->ldt.u.entries, from_mm->ldt.u.entries,
469                                sizeof(new_mm->ldt.u.entries));
470                 }
471                 else{
472                         i = from_mm->ldt.entry_count / LDT_ENTRIES_PER_PAGE;
473                         while(i-->0){
474                                 page = __get_free_page(GFP_KERNEL|__GFP_ZERO);
475                                 if (!page){
476                                         err = -ENOMEM;
477                                         break;
478                                 }
479                                 new_mm->ldt.u.pages[i] =
480                                         (struct ldt_entry *) page;
481                                 memcpy(new_mm->ldt.u.pages[i],
482                                        from_mm->ldt.u.pages[i], PAGE_SIZE);
483                         }
484                 }
485                 new_mm->ldt.entry_count = from_mm->ldt.entry_count;
486                 up(&from_mm->ldt.semaphore);
487         }
488
489     out:
490         return err;
491 }
492
493
494 void free_ldt(struct mmu_context_skas * mm)
495 {
496         int i;
497
498         if(!ptrace_ldt && mm->ldt.entry_count > LDT_DIRECT_ENTRIES){
499                 i = mm->ldt.entry_count / LDT_ENTRIES_PER_PAGE;
500                 while(i-- > 0){
501                         free_page((long )mm->ldt.u.pages[i]);
502                 }
503         }
504         mm->ldt.entry_count = 0;
505 }
506
507 int sys_modify_ldt(int func, void __user *ptr, unsigned long bytecount)
508 {
509         return CHOOSE_MODE_PROC(do_modify_ldt_tt, do_modify_ldt_skas, func,
510                                 ptr, bytecount);
511 }