char core_pattern[CORENAME_MAX_SIZE] = "core";
int suid_dumpable = 0;
-EXPORT_SYMBOL(suid_dumpable);
/* The maximal length of core_pattern is also specified in sysctl.c */
static LIST_HEAD(formats);
static int de_thread(struct task_struct *tsk)
{
struct signal_struct *sig = tsk->signal;
- struct sighand_struct *newsighand, *oldsighand = tsk->sighand;
+ struct sighand_struct *oldsighand = tsk->sighand;
spinlock_t *lock = &oldsighand->siglock;
struct task_struct *leader = NULL;
int count;
- /*
- * If we don't share sighandlers, then we aren't sharing anything
- * and we can just re-use it all.
- */
- if (atomic_read(&oldsighand->count) <= 1) {
- exit_itimers(sig);
- return 0;
- }
-
- newsighand = kmem_cache_alloc(sighand_cachep, GFP_KERNEL);
- if (!newsighand)
- return -ENOMEM;
-
if (thread_group_empty(tsk))
goto no_thread_group;
*/
spin_unlock_irq(lock);
read_unlock(&tasklist_lock);
- kmem_cache_free(sighand_cachep, newsighand);
return -EAGAIN;
}
hrtimer_restart(&sig->real_timer);
spin_lock_irq(lock);
}
+
+ sig->notify_count = count;
+ sig->group_exit_task = tsk;
while (atomic_read(&sig->count) > count) {
- sig->group_exit_task = tsk;
- sig->notify_count = count;
__set_current_state(TASK_UNINTERRUPTIBLE);
spin_unlock_irq(lock);
schedule();
spin_lock_irq(lock);
}
- sig->group_exit_task = NULL;
- sig->notify_count = 0;
spin_unlock_irq(lock);
/*
* and to assume its PID:
*/
if (!thread_group_leader(tsk)) {
- /*
- * Wait for the thread group leader to be a zombie.
- * It should already be zombie at this point, most
- * of the time.
- */
leader = tsk->group_leader;
- while (leader->exit_state != EXIT_ZOMBIE)
- yield();
+
+ sig->notify_count = -1;
+ for (;;) {
+ write_lock_irq(&tasklist_lock);
+ if (likely(leader->exit_state))
+ break;
+ __set_current_state(TASK_UNINTERRUPTIBLE);
+ write_unlock_irq(&tasklist_lock);
+ schedule();
+ }
/*
* The only record we have of the real-time age of a
*/
tsk->start_time = leader->start_time;
- write_lock_irq(&tasklist_lock);
-
BUG_ON(leader->tgid != tsk->tgid);
BUG_ON(tsk->pid == tsk->tgid);
/*
write_unlock_irq(&tasklist_lock);
}
+ sig->group_exit_task = NULL;
+ sig->notify_count = 0;
/*
* There may be one thread left which is just exiting,
* but it's safe to stop telling the group to kill themselves.
if (leader)
release_task(leader);
- if (atomic_read(&oldsighand->count) == 1) {
- /*
- * Now that we nuked the rest of the thread group,
- * it turns out we are not sharing sighand any more either.
- * So we can just keep it.
- */
- kmem_cache_free(sighand_cachep, newsighand);
- } else {
+ if (atomic_read(&oldsighand->count) != 1) {
+ struct sighand_struct *newsighand;
/*
- * Move our state over to newsighand and switch it in.
+ * This ->sighand is shared with the CLONE_SIGHAND
+ * but not CLONE_THREAD task, switch to the new one.
*/
+ newsighand = kmem_cache_alloc(sighand_cachep, GFP_KERNEL);
+ if (!newsighand)
+ return -ENOMEM;
+
atomic_set(&newsighand->count, 1);
memcpy(newsighand->action, oldsighand->action,
sizeof(newsighand->action));
break;
}
}
-EXPORT_SYMBOL_GPL(set_dumpable);
int get_dumpable(struct mm_struct *mm)
{