X-Git-Url: http://pilppa.org/gitweb/gitweb.cgi?a=blobdiff_plain;f=kernel%2Fexit.c;h=3b893e78ce61a09c486f1351249f3d94701e89f5;hb=7824bc6b474caca6d74489498d9c2c2dfcc86d10;hp=bfb1c0e940e814f7e64a3c707c65a6f5608c0388;hpb=6208e77e7fa9e69f399fddc55b1cf9527fbde599;p=linux-2.6-omap-h63xx.git diff --git a/kernel/exit.c b/kernel/exit.c index bfb1c0e940e..3b893e78ce6 100644 --- a/kernel/exit.c +++ b/kernel/exit.c @@ -50,8 +50,6 @@ #include #include -extern void sem_exit (void); - static void exit_mm(struct task_struct * tsk); static void __unhash_process(struct task_struct *p) @@ -295,26 +293,27 @@ static void reparent_to_kthreadd(void) switch_uid(INIT_USER); } -void __set_special_pids(pid_t session, pid_t pgrp) +void __set_special_pids(struct pid *pid) { struct task_struct *curr = current->group_leader; + pid_t nr = pid_nr(pid); - if (task_session_nr(curr) != session) { + if (task_session(curr) != pid) { detach_pid(curr, PIDTYPE_SID); - set_task_session(curr, session); - attach_pid(curr, PIDTYPE_SID, find_pid(session)); + attach_pid(curr, PIDTYPE_SID, pid); + set_task_session(curr, nr); } - if (task_pgrp_nr(curr) != pgrp) { + if (task_pgrp(curr) != pid) { detach_pid(curr, PIDTYPE_PGID); - set_task_pgrp(curr, pgrp); - attach_pid(curr, PIDTYPE_PGID, find_pid(pgrp)); + attach_pid(curr, PIDTYPE_PGID, pid); + set_task_pgrp(curr, nr); } } -static void set_special_pids(pid_t session, pid_t pgrp) +static void set_special_pids(struct pid *pid) { write_lock_irq(&tasklist_lock); - __set_special_pids(session, pgrp); + __set_special_pids(pid); write_unlock_irq(&tasklist_lock); } @@ -385,7 +384,11 @@ void daemonize(const char *name, ...) */ current->flags |= PF_NOFREEZE; - set_special_pids(1, 1); + if (current->nsproxy != &init_nsproxy) { + get_nsproxy(&init_nsproxy); + switch_task_namespaces(current, &init_nsproxy); + } + set_special_pids(&init_struct_pid); proc_clear_tty(current); /* Block and flush all signals */ @@ -400,11 +403,6 @@ void daemonize(const char *name, ...) current->fs = fs; atomic_inc(&fs->count); - if (current->nsproxy != init_task.nsproxy) { - get_nsproxy(init_task.nsproxy); - switch_task_namespaces(current, init_task.nsproxy); - } - exit_files(current); current->files = init_task.files; atomic_inc(¤t->files->count); @@ -460,7 +458,7 @@ struct files_struct *get_files_struct(struct task_struct *task) return files; } -void fastcall put_files_struct(struct files_struct *files) +void put_files_struct(struct files_struct *files) { struct fdtable *fdt; @@ -747,24 +745,6 @@ static void exit_notify(struct task_struct *tsk) struct task_struct *t; struct pid *pgrp; - if (signal_pending(tsk) && !(tsk->signal->flags & SIGNAL_GROUP_EXIT) - && !thread_group_empty(tsk)) { - /* - * This occurs when there was a race between our exit - * syscall and a group signal choosing us as the one to - * wake up. It could be that we are the only thread - * alerted to check for pending signals, but another thread - * should be woken now to take the signal since we will not. - * Now we'll wake all the threads in the group just to make - * sure someone gets all the pending signals. - */ - spin_lock_irq(&tsk->sighand->siglock); - for (t = next_thread(tsk); t != tsk; t = next_thread(t)) - if (!signal_pending(t) && !(t->flags & PF_EXITING)) - recalc_sigpending_and_wake(t); - spin_unlock_irq(&tsk->sighand->siglock); - } - /* * This does two things: * @@ -907,7 +887,7 @@ static inline void exit_child_reaper(struct task_struct *tsk) zap_pid_ns_processes(tsk->nsproxy->pid_ns); } -fastcall NORET_TYPE void do_exit(long code) +NORET_TYPE void do_exit(long code) { struct task_struct *tsk = current; int group_dead; @@ -949,7 +929,7 @@ fastcall NORET_TYPE void do_exit(long code) schedule(); } - tsk->flags |= PF_EXITING; + exit_signals(tsk); /* sets PF_EXITING */ /* * tsk->flags are checked in the futex code to protect against * an exiting task cleaning up the robust pi futexes. @@ -1085,11 +1065,12 @@ do_group_exit(int exit_code) struct signal_struct *const sig = current->signal; struct sighand_struct *const sighand = current->sighand; spin_lock_irq(&sighand->siglock); - if (sig->flags & SIGNAL_GROUP_EXIT) + if (signal_group_exit(sig)) /* Another thread got here before we took the lock. */ exit_code = sig->group_exit_code; else { sig->group_exit_code = exit_code; + sig->flags = SIGNAL_GROUP_EXIT; zap_other_threads(current); } spin_unlock_irq(&sighand->siglock); @@ -1109,20 +1090,23 @@ asmlinkage void sys_exit_group(int error_code) do_group_exit((error_code & 0xff) << 8); } -static int eligible_child(pid_t pid, int options, struct task_struct *p) +static struct pid *task_pid_type(struct task_struct *task, enum pid_type type) +{ + struct pid *pid = NULL; + if (type == PIDTYPE_PID) + pid = task->pids[type].pid; + else if (type < PIDTYPE_MAX) + pid = task->group_leader->pids[type].pid; + return pid; +} + +static int eligible_child(enum pid_type type, struct pid *pid, int options, + struct task_struct *p) { int err; - struct pid_namespace *ns; - ns = current->nsproxy->pid_ns; - if (pid > 0) { - if (task_pid_nr_ns(p, ns) != pid) - return 0; - } else if (!pid) { - if (task_pgrp_nr_ns(p, ns) != task_pgrp_vnr(current)) - return 0; - } else if (pid != -1) { - if (task_pgrp_nr_ns(p, ns) != -pid) + if (type < PIDTYPE_MAX) { + if (task_pid_type(p, type) != pid) return 0; } @@ -1141,18 +1125,16 @@ static int eligible_child(pid_t pid, int options, struct task_struct *p) if (((p->exit_signal != SIGCHLD) ^ ((options & __WCLONE) != 0)) && !(options & __WALL)) return 0; - /* - * Do not consider thread group leaders that are - * in a non-empty thread group: - */ - if (delay_group_leader(p)) - return 2; err = security_task_wait(p); - if (err) - return err; + if (likely(!err)) + return 1; - return 1; + if (type != PIDTYPE_PID) + return 0; + /* This child was explicitly requested, abort */ + read_unlock(&tasklist_lock); + return err; } static int wait_noreap_copyout(struct task_struct *p, pid_t pid, uid_t uid, @@ -1192,20 +1174,13 @@ static int wait_task_zombie(struct task_struct *p, int noreap, { unsigned long state; int retval, status, traced; - struct pid_namespace *ns; - - ns = current->nsproxy->pid_ns; + pid_t pid = task_pid_vnr(p); if (unlikely(noreap)) { - pid_t pid = task_pid_nr_ns(p, ns); uid_t uid = p->uid; int exit_code = p->exit_code; int why, status; - if (unlikely(p->exit_state != EXIT_ZOMBIE)) - return 0; - if (unlikely(p->exit_signal == -1 && p->ptrace == 0)) - return 0; get_task_struct(p); read_unlock(&tasklist_lock); if ((exit_code & 0x7f) == 0) { @@ -1316,11 +1291,11 @@ static int wait_task_zombie(struct task_struct *p, int noreap, retval = put_user(status, &infop->si_status); } if (!retval && infop) - retval = put_user(task_pid_nr_ns(p, ns), &infop->si_pid); + retval = put_user(pid, &infop->si_pid); if (!retval && infop) retval = put_user(p->uid, &infop->si_uid); if (!retval) - retval = task_pid_nr_ns(p, ns); + retval = pid; if (traced) { write_lock_irq(&tasklist_lock); @@ -1352,21 +1327,38 @@ static int wait_task_zombie(struct task_struct *p, int noreap, * the lock and this task is uninteresting. If we return nonzero, we have * released the lock and the system call should return. */ -static int wait_task_stopped(struct task_struct *p, int delayed_group_leader, +static int wait_task_stopped(struct task_struct *p, int noreap, struct siginfo __user *infop, int __user *stat_addr, struct rusage __user *ru) { - int retval, exit_code; + int retval, exit_code, why; + uid_t uid = 0; /* unneeded, required by compiler */ pid_t pid; - if (!p->exit_code) - return 0; - if (delayed_group_leader && !(p->ptrace & PT_PTRACED) && - p->signal->group_stop_count > 0) + exit_code = 0; + spin_lock_irq(&p->sighand->siglock); + + if (unlikely(!task_is_stopped_or_traced(p))) + goto unlock_sig; + + if (!(p->ptrace & PT_PTRACED) && p->signal->group_stop_count > 0) /* * A group stop is in progress and this is the group leader. * We won't report until all threads have stopped. */ + goto unlock_sig; + + exit_code = p->exit_code; + if (!exit_code) + goto unlock_sig; + + if (!noreap) + p->exit_code = 0; + + uid = p->uid; +unlock_sig: + spin_unlock_irq(&p->sighand->siglock); + if (!exit_code) return 0; /* @@ -1376,65 +1368,15 @@ static int wait_task_stopped(struct task_struct *p, int delayed_group_leader, * keep holding onto the tasklist_lock while we call getrusage and * possibly take page faults for user memory. */ - pid = task_pid_nr_ns(p, current->nsproxy->pid_ns); get_task_struct(p); + pid = task_pid_vnr(p); + why = (p->ptrace & PT_PTRACED) ? CLD_TRAPPED : CLD_STOPPED; read_unlock(&tasklist_lock); - if (unlikely(noreap)) { - uid_t uid = p->uid; - int why = (p->ptrace & PT_PTRACED) ? CLD_TRAPPED : CLD_STOPPED; - - exit_code = p->exit_code; - if (unlikely(!exit_code) || unlikely(p->exit_state)) - goto bail_ref; + if (unlikely(noreap)) return wait_noreap_copyout(p, pid, uid, why, exit_code, infop, ru); - } - - write_lock_irq(&tasklist_lock); - - /* - * This uses xchg to be atomic with the thread resuming and setting - * it. It must also be done with the write lock held to prevent a - * race with the EXIT_ZOMBIE case. - */ - exit_code = xchg(&p->exit_code, 0); - if (unlikely(p->exit_state)) { - /* - * The task resumed and then died. Let the next iteration - * catch it in EXIT_ZOMBIE. Note that exit_code might - * already be zero here if it resumed and did _exit(0). - * The task itself is dead and won't touch exit_code again; - * other processors in this function are locked out. - */ - p->exit_code = exit_code; - exit_code = 0; - } - if (unlikely(exit_code == 0)) { - /* - * Another thread in this function got to it first, or it - * resumed, or it resumed and then died. - */ - write_unlock_irq(&tasklist_lock); -bail_ref: - put_task_struct(p); - /* - * We are returning to the wait loop without having successfully - * removed the process and having released the lock. We cannot - * continue, since the "p" task pointer is potentially stale. - * - * Return -EAGAIN, and do_wait() will restart the loop from the - * beginning. Do _not_ re-acquire the lock. - */ - return -EAGAIN; - } - - /* move to end of parent's list to avoid starvation */ - remove_parent(p); - add_parent(p); - - write_unlock_irq(&tasklist_lock); retval = ru ? getrusage(p, RUSAGE_BOTH, ru) : 0; if (!retval && stat_addr) @@ -1444,15 +1386,13 @@ bail_ref: if (!retval && infop) retval = put_user(0, &infop->si_errno); if (!retval && infop) - retval = put_user((short)((p->ptrace & PT_PTRACED) - ? CLD_TRAPPED : CLD_STOPPED), - &infop->si_code); + retval = put_user(why, &infop->si_code); if (!retval && infop) retval = put_user(exit_code, &infop->si_status); if (!retval && infop) retval = put_user(pid, &infop->si_pid); if (!retval && infop) - retval = put_user(p->uid, &infop->si_uid); + retval = put_user(uid, &infop->si_uid); if (!retval) retval = pid; put_task_struct(p); @@ -1474,7 +1414,6 @@ static int wait_task_continued(struct task_struct *p, int noreap, int retval; pid_t pid; uid_t uid; - struct pid_namespace *ns; if (!(p->signal->flags & SIGNAL_STOP_CONTINUED)) return 0; @@ -1489,8 +1428,7 @@ static int wait_task_continued(struct task_struct *p, int noreap, p->signal->flags &= ~SIGNAL_STOP_CONTINUED; spin_unlock_irq(&p->sighand->siglock); - ns = current->nsproxy->pid_ns; - pid = task_pid_nr_ns(p, ns); + pid = task_pid_vnr(p); uid = p->uid; get_task_struct(p); read_unlock(&tasklist_lock); @@ -1501,7 +1439,7 @@ static int wait_task_continued(struct task_struct *p, int noreap, if (!retval && stat_addr) retval = put_user(0xffff, stat_addr); if (!retval) - retval = task_pid_nr_ns(p, ns); + retval = pid; } else { retval = wait_noreap_copyout(p, pid, uid, CLD_CONTINUED, SIGCONT, @@ -1512,103 +1450,63 @@ static int wait_task_continued(struct task_struct *p, int noreap, return retval; } - -static inline int my_ptrace_child(struct task_struct *p) -{ - if (!(p->ptrace & PT_PTRACED)) - return 0; - if (!(p->ptrace & PT_ATTACHED)) - return 1; - /* - * This child was PTRACE_ATTACH'd. We should be seeing it only if - * we are the attacher. If we are the real parent, this is a race - * inside ptrace_attach. It is waiting for the tasklist_lock, - * which we have to switch the parent links, but has already set - * the flags in p->ptrace. - */ - return (p->parent != p->real_parent); -} - -static long do_wait(pid_t pid, int options, struct siginfo __user *infop, - int __user *stat_addr, struct rusage __user *ru) +static long do_wait(enum pid_type type, struct pid *pid, int options, + struct siginfo __user *infop, int __user *stat_addr, + struct rusage __user *ru) { DECLARE_WAITQUEUE(wait, current); struct task_struct *tsk; int flag, retval; - int allowed, denied; add_wait_queue(¤t->signal->wait_chldexit,&wait); repeat: + /* If there is nothing that can match our critier just get out */ + retval = -ECHILD; + if ((type < PIDTYPE_MAX) && (!pid || hlist_empty(&pid->tasks[type]))) + goto end; + /* * We will set this flag if we see any child that might later * match our criteria, even if we are not able to reap it yet. */ - flag = 0; - allowed = denied = 0; + flag = retval = 0; current->state = TASK_INTERRUPTIBLE; read_lock(&tasklist_lock); tsk = current; do { struct task_struct *p; - int ret; list_for_each_entry(p, &tsk->children, sibling) { - ret = eligible_child(pid, options, p); + int ret = eligible_child(type, pid, options, p); if (!ret) continue; if (unlikely(ret < 0)) { - denied = ret; - continue; - } - allowed = 1; - - if (task_is_stopped_or_traced(p)) { + retval = ret; + } else if (task_is_stopped_or_traced(p)) { /* * It's stopped now, so it might later * continue, exit, or stop again. - * - * When we hit the race with PTRACE_ATTACH, we - * will not report this child. But the race - * means it has not yet been moved to our - * ptrace_children list, so we need to set the - * flag here to avoid a spurious ECHILD when - * the race happens with the only child. */ flag = 1; + if (!(p->ptrace & PT_PTRACED) && + !(options & WUNTRACED)) + continue; - if (!my_ptrace_child(p)) { - if (task_is_traced(p)) - continue; - if (!(options & WUNTRACED)) - continue; - } - - retval = wait_task_stopped(p, ret == 2, + retval = wait_task_stopped(p, (options & WNOWAIT), infop, stat_addr, ru); - if (retval == -EAGAIN) - goto repeat; - if (retval != 0) /* He released the lock. */ - goto end; - } else if (p->exit_state == EXIT_DEAD) { - continue; - } else if (p->exit_state == EXIT_ZOMBIE) { + } else if (p->exit_state == EXIT_ZOMBIE && + !delay_group_leader(p)) { /* - * Eligible but we cannot release it yet: + * We don't reap group leaders with subthreads. */ - if (ret == 2) - goto check_continued; if (!likely(options & WEXITED)) continue; retval = wait_task_zombie(p, (options & WNOWAIT), infop, stat_addr, ru); - /* He released the lock. */ - if (retval != 0) - goto end; - } else { -check_continued: + } else if (p->exit_state != EXIT_DEAD) { /* * It's running now, so it might later * exit, stop, or stop and then continue. @@ -1619,17 +1517,20 @@ check_continued: retval = wait_task_continued(p, (options & WNOWAIT), infop, stat_addr, ru); - if (retval != 0) /* He released the lock. */ - goto end; } + if (retval != 0) /* tasklist_lock released */ + goto end; } if (!flag) { list_for_each_entry(p, &tsk->ptrace_children, - ptrace_list) { - if (!eligible_child(pid, options, p)) + ptrace_list) { + flag = eligible_child(type, pid, options, p); + if (!flag) continue; - flag = 1; - break; + if (likely(flag > 0)) + break; + retval = flag; + goto end; } } if (options & __WNOTHREAD) @@ -1637,10 +1538,9 @@ check_continued: tsk = next_thread(tsk); BUG_ON(tsk->signal != current->signal); } while (tsk != current); - read_unlock(&tasklist_lock); + if (flag) { - retval = 0; if (options & WNOHANG) goto end; retval = -ERESTARTSYS; @@ -1650,14 +1550,12 @@ check_continued: goto repeat; } retval = -ECHILD; - if (unlikely(denied) && !allowed) - retval = denied; end: current->state = TASK_RUNNING; remove_wait_queue(¤t->signal->wait_chldexit,&wait); if (infop) { if (retval > 0) - retval = 0; + retval = 0; else { /* * For a WNOHANG return, clear out all the fields @@ -1681,10 +1579,12 @@ end: return retval; } -asmlinkage long sys_waitid(int which, pid_t pid, +asmlinkage long sys_waitid(int which, pid_t upid, struct siginfo __user *infop, int options, struct rusage __user *ru) { + struct pid *pid = NULL; + enum pid_type type; long ret; if (options & ~(WNOHANG|WNOWAIT|WEXITED|WSTOPPED|WCONTINUED)) @@ -1694,37 +1594,58 @@ asmlinkage long sys_waitid(int which, pid_t pid, switch (which) { case P_ALL: - pid = -1; + type = PIDTYPE_MAX; break; case P_PID: - if (pid <= 0) + type = PIDTYPE_PID; + if (upid <= 0) return -EINVAL; break; case P_PGID: - if (pid <= 0) + type = PIDTYPE_PGID; + if (upid <= 0) return -EINVAL; - pid = -pid; break; default: return -EINVAL; } - ret = do_wait(pid, options, infop, NULL, ru); + if (type < PIDTYPE_MAX) + pid = find_get_pid(upid); + ret = do_wait(type, pid, options, infop, NULL, ru); + put_pid(pid); /* avoid REGPARM breakage on x86: */ prevent_tail_call(ret); return ret; } -asmlinkage long sys_wait4(pid_t pid, int __user *stat_addr, +asmlinkage long sys_wait4(pid_t upid, int __user *stat_addr, int options, struct rusage __user *ru) { + struct pid *pid = NULL; + enum pid_type type; long ret; if (options & ~(WNOHANG|WUNTRACED|WCONTINUED| __WNOTHREAD|__WCLONE|__WALL)) return -EINVAL; - ret = do_wait(pid, options | WEXITED, NULL, stat_addr, ru); + + if (upid == -1) + type = PIDTYPE_MAX; + else if (upid < 0) { + type = PIDTYPE_PGID; + pid = find_get_pid(-upid); + } else if (upid == 0) { + type = PIDTYPE_PGID; + pid = get_pid(task_pgrp(current)); + } else /* upid > 0 */ { + type = PIDTYPE_PID; + pid = find_get_pid(upid); + } + + ret = do_wait(type, pid, options | WEXITED, NULL, stat_addr, ru); + put_pid(pid); /* avoid REGPARM breakage on x86: */ prevent_tail_call(ret);