]> pilppa.org Git - linux-2.6-omap-h63xx.git/blobdiff - kernel/fork.c
[PATCH] hrtimer: make clockid_t arguments const
[linux-2.6-omap-h63xx.git] / kernel / fork.c
index 8a069612eac39e285afc9bc9314311e5980ef536..b18d64554feb7e48f1cbde8e2d58bafd52af1f04 100644 (file)
@@ -42,6 +42,7 @@
 #include <linux/profile.h>
 #include <linux/rmap.h>
 #include <linux/acct.h>
+#include <linux/cn_proc.h>
 
 #include <asm/pgtable.h>
 #include <asm/pgalloc.h>
@@ -170,10 +171,9 @@ static struct task_struct *dup_task_struct(struct task_struct *orig)
                return NULL;
        }
 
-       *ti = *orig->thread_info;
        *tsk = *orig;
        tsk->thread_info = ti;
-       ti->task = tsk;
+       setup_thread_stack(tsk, orig);
 
        /* One for us, one for whoever does the "release_task()" (usually parent) */
        atomic_set(&tsk->usage,2);
@@ -263,7 +263,7 @@ static inline int dup_mmap(struct mm_struct *mm, struct mm_struct *oldmm)
                rb_parent = &tmp->vm_rb;
 
                mm->map_count++;
-               retval = copy_page_range(mm, oldmm, tmp);
+               retval = copy_page_range(mm, oldmm, mpnt);
 
                if (tmp->vm_ops && tmp->vm_ops->open)
                        tmp->vm_ops->open(tmp);
@@ -323,7 +323,6 @@ static struct mm_struct * mm_init(struct mm_struct * mm)
        spin_lock_init(&mm->page_table_lock);
        rwlock_init(&mm->ioctx_list_lock);
        mm->ioctx_list = NULL;
-       mm->default_kioctx = (struct kioctx)INIT_KIOCTX(mm->default_kioctx, *mm);
        mm->free_area_cache = TASK_UNMAPPED_BASE;
        mm->cached_hole_size = ~0UL;
 
@@ -469,13 +468,6 @@ static int copy_mm(unsigned long clone_flags, struct task_struct * tsk)
        if (clone_flags & CLONE_VM) {
                atomic_inc(&oldmm->mm_users);
                mm = oldmm;
-               /*
-                * There are cases where the PTL is held to ensure no
-                * new threads start up in user mode using an mm, which
-                * allows optimizing out ipis; the tlb_gather_mmu code
-                * is an example.
-                */
-               spin_unlock_wait(&oldmm->page_table_lock);
                goto good_mm;
        }
 
@@ -751,6 +743,14 @@ int unshare_files(void)
 
 EXPORT_SYMBOL(unshare_files);
 
+void sighand_free_cb(struct rcu_head *rhp)
+{
+       struct sighand_struct *sp;
+
+       sp = container_of(rhp, struct sighand_struct, rcu);
+       kmem_cache_free(sighand_cachep, sp);
+}
+
 static inline int copy_sighand(unsigned long clone_flags, struct task_struct * tsk)
 {
        struct sighand_struct *sig;
@@ -760,7 +760,7 @@ static inline int copy_sighand(unsigned long clone_flags, struct task_struct * t
                return 0;
        }
        sig = kmem_cache_alloc(sighand_cachep, GFP_KERNEL);
-       tsk->sighand = sig;
+       rcu_assign_pointer(tsk->sighand, sig);
        if (!sig)
                return -ENOMEM;
        spin_lock_init(&sig->siglock);
@@ -811,9 +811,6 @@ static inline int copy_signal(unsigned long clone_flags, struct task_struct * ts
        sig->it_prof_expires = cputime_zero;
        sig->it_prof_incr = cputime_zero;
 
-       sig->tty = current->signal->tty;
-       sig->pgrp = process_group(current);
-       sig->session = current->signal->session;
        sig->leader = 0;        /* session leadership doesn't inherit */
        sig->tty_old_pgrp = 0;
 
@@ -925,7 +922,7 @@ static task_t *copy_process(unsigned long clone_flags,
        if (nr_threads >= max_threads)
                goto bad_fork_cleanup_count;
 
-       if (!try_module_get(p->thread_info->exec_domain->module))
+       if (!try_module_get(task_thread_info(p)->exec_domain->module))
                goto bad_fork_cleanup_count;
 
        if (p->binfmt && !try_module_get(p->binfmt->module))
@@ -972,15 +969,20 @@ static task_t *copy_process(unsigned long clone_flags,
        p->io_context = NULL;
        p->io_wait = NULL;
        p->audit_context = NULL;
+       cpuset_fork(p);
 #ifdef CONFIG_NUMA
        p->mempolicy = mpol_copy(p->mempolicy);
        if (IS_ERR(p->mempolicy)) {
                retval = PTR_ERR(p->mempolicy);
                p->mempolicy = NULL;
-               goto bad_fork_cleanup;
+               goto bad_fork_cleanup_cpuset;
        }
 #endif
 
+#ifdef CONFIG_DEBUG_MUTEXES
+       p->blocked_on = NULL; /* not blocked yet */
+#endif
+
        p->tgid = p->pid;
        if (clone_flags & CLONE_THREAD)
                p->tgid = current->tgid;
@@ -1132,28 +1134,22 @@ static task_t *copy_process(unsigned long clone_flags,
        if (unlikely(p->ptrace & PT_PTRACED))
                __ptrace_link(p, current->parent);
 
-       cpuset_fork(p);
-
        attach_pid(p, PIDTYPE_PID, p->pid);
        attach_pid(p, PIDTYPE_TGID, p->tgid);
        if (thread_group_leader(p)) {
+               p->signal->tty = current->signal->tty;
+               p->signal->pgrp = process_group(current);
+               p->signal->session = current->signal->session;
                attach_pid(p, PIDTYPE_PGID, process_group(p));
                attach_pid(p, PIDTYPE_SID, p->signal->session);
                if (p->pid)
                        __get_cpu_var(process_counts)++;
        }
 
-       if (!current->signal->tty && p->signal->tty)
-               p->signal->tty = NULL;
-
        nr_threads++;
        total_forks++;
        write_unlock_irq(&tasklist_lock);
-       retval = 0;
-
-fork_out:
-       if (retval)
-               return ERR_PTR(retval);
+       proc_fork_connector(p);
        return p;
 
 bad_fork_cleanup_namespace:
@@ -1180,19 +1176,22 @@ bad_fork_cleanup_security:
 bad_fork_cleanup_policy:
 #ifdef CONFIG_NUMA
        mpol_free(p->mempolicy);
+bad_fork_cleanup_cpuset:
 #endif
+       cpuset_exit(p);
 bad_fork_cleanup:
        if (p->binfmt)
                module_put(p->binfmt->module);
 bad_fork_cleanup_put_domain:
-       module_put(p->thread_info->exec_domain->module);
+       module_put(task_thread_info(p)->exec_domain->module);
 bad_fork_cleanup_count:
        put_group_info(p->group_info);
        atomic_dec(&p->user->processes);
        free_uid(p->user);
 bad_fork_free:
        free_task(p);
-       goto fork_out;
+fork_out:
+       return ERR_PTR(retval);
 }
 
 struct pt_regs * __devinit __attribute__((weak)) idle_regs(struct pt_regs *regs)