2 * linux/kernel/signal.c
4 * Copyright (C) 1991, 1992 Linus Torvalds
6 * 1997-11-02 Modified for POSIX.1b signals by Richard Henderson
8 * 2003-06-02 Jim Houston - Concurrent Computer Corp.
9 * Changes to use preallocated sigqueue structures
10 * to allow signals to be sent reliably.
13 #include <linux/slab.h>
14 #include <linux/module.h>
15 #include <linux/init.h>
16 #include <linux/sched.h>
18 #include <linux/tty.h>
19 #include <linux/binfmts.h>
20 #include <linux/security.h>
21 #include <linux/syscalls.h>
22 #include <linux/ptrace.h>
23 #include <linux/signal.h>
24 #include <linux/signalfd.h>
25 #include <linux/capability.h>
26 #include <linux/freezer.h>
27 #include <linux/pid_namespace.h>
28 #include <linux/nsproxy.h>
30 #include <asm/param.h>
31 #include <asm/uaccess.h>
32 #include <asm/unistd.h>
33 #include <asm/siginfo.h>
34 #include "audit.h" /* audit_signal_info() */
37 * SLAB caches for signal bits.
40 static struct kmem_cache *sigqueue_cachep;
42 static int __sig_ignored(struct task_struct *t, int sig)
46 /* Is it explicitly or implicitly ignored? */
48 handler = t->sighand->action[sig - 1].sa.sa_handler;
49 return handler == SIG_IGN ||
50 (handler == SIG_DFL && sig_kernel_ignore(sig));
53 static int sig_ignored(struct task_struct *t, int sig)
56 * Tracers always want to know about signals..
58 if (t->ptrace & PT_PTRACED)
62 * Blocked signals are never ignored, since the
63 * signal handler may change by the time it is
66 if (sigismember(&t->blocked, sig) || sigismember(&t->real_blocked, sig))
69 return __sig_ignored(t, sig);
73 * Re-calculate pending state from the set of locally pending
74 * signals, globally pending signals, and blocked signals.
76 static inline int has_pending_signals(sigset_t *signal, sigset_t *blocked)
81 switch (_NSIG_WORDS) {
83 for (i = _NSIG_WORDS, ready = 0; --i >= 0 ;)
84 ready |= signal->sig[i] &~ blocked->sig[i];
87 case 4: ready = signal->sig[3] &~ blocked->sig[3];
88 ready |= signal->sig[2] &~ blocked->sig[2];
89 ready |= signal->sig[1] &~ blocked->sig[1];
90 ready |= signal->sig[0] &~ blocked->sig[0];
93 case 2: ready = signal->sig[1] &~ blocked->sig[1];
94 ready |= signal->sig[0] &~ blocked->sig[0];
97 case 1: ready = signal->sig[0] &~ blocked->sig[0];
102 #define PENDING(p,b) has_pending_signals(&(p)->signal, (b))
104 static int recalc_sigpending_tsk(struct task_struct *t)
106 if (t->signal->group_stop_count > 0 ||
107 PENDING(&t->pending, &t->blocked) ||
108 PENDING(&t->signal->shared_pending, &t->blocked)) {
109 set_tsk_thread_flag(t, TIF_SIGPENDING);
113 * We must never clear the flag in another thread, or in current
114 * when it's possible the current syscall is returning -ERESTART*.
115 * So we don't clear it here, and only callers who know they should do.
121 * After recalculating TIF_SIGPENDING, we need to make sure the task wakes up.
122 * This is superfluous when called on current, the wakeup is a harmless no-op.
124 void recalc_sigpending_and_wake(struct task_struct *t)
126 if (recalc_sigpending_tsk(t))
127 signal_wake_up(t, 0);
130 void recalc_sigpending(void)
132 if (!recalc_sigpending_tsk(current) && !freezing(current))
133 clear_thread_flag(TIF_SIGPENDING);
137 /* Given the mask, find the first available signal that should be serviced. */
139 int next_signal(struct sigpending *pending, sigset_t *mask)
141 unsigned long i, *s, *m, x;
144 s = pending->signal.sig;
146 switch (_NSIG_WORDS) {
148 for (i = 0; i < _NSIG_WORDS; ++i, ++s, ++m)
149 if ((x = *s &~ *m) != 0) {
150 sig = ffz(~x) + i*_NSIG_BPW + 1;
155 case 2: if ((x = s[0] &~ m[0]) != 0)
157 else if ((x = s[1] &~ m[1]) != 0)
164 case 1: if ((x = *s &~ *m) != 0)
172 static struct sigqueue *__sigqueue_alloc(struct task_struct *t, gfp_t flags,
175 struct sigqueue *q = NULL;
176 struct user_struct *user;
179 * In order to avoid problems with "switch_user()", we want to make
180 * sure that the compiler doesn't re-load "t->user"
184 atomic_inc(&user->sigpending);
185 if (override_rlimit ||
186 atomic_read(&user->sigpending) <=
187 t->signal->rlim[RLIMIT_SIGPENDING].rlim_cur)
188 q = kmem_cache_alloc(sigqueue_cachep, flags);
189 if (unlikely(q == NULL)) {
190 atomic_dec(&user->sigpending);
192 INIT_LIST_HEAD(&q->list);
194 q->user = get_uid(user);
199 static void __sigqueue_free(struct sigqueue *q)
201 if (q->flags & SIGQUEUE_PREALLOC)
203 atomic_dec(&q->user->sigpending);
205 kmem_cache_free(sigqueue_cachep, q);
208 void flush_sigqueue(struct sigpending *queue)
212 sigemptyset(&queue->signal);
213 while (!list_empty(&queue->list)) {
214 q = list_entry(queue->list.next, struct sigqueue , list);
215 list_del_init(&q->list);
221 * Flush all pending signals for a task.
223 void flush_signals(struct task_struct *t)
227 spin_lock_irqsave(&t->sighand->siglock, flags);
228 clear_tsk_thread_flag(t, TIF_SIGPENDING);
229 flush_sigqueue(&t->pending);
230 flush_sigqueue(&t->signal->shared_pending);
231 spin_unlock_irqrestore(&t->sighand->siglock, flags);
234 void ignore_signals(struct task_struct *t)
238 for (i = 0; i < _NSIG; ++i)
239 t->sighand->action[i].sa.sa_handler = SIG_IGN;
245 * Flush all handlers for a task.
249 flush_signal_handlers(struct task_struct *t, int force_default)
252 struct k_sigaction *ka = &t->sighand->action[0];
253 for (i = _NSIG ; i != 0 ; i--) {
254 if (force_default || ka->sa.sa_handler != SIG_IGN)
255 ka->sa.sa_handler = SIG_DFL;
257 sigemptyset(&ka->sa.sa_mask);
262 int unhandled_signal(struct task_struct *tsk, int sig)
264 if (is_global_init(tsk))
266 if (tsk->ptrace & PT_PTRACED)
268 return (tsk->sighand->action[sig-1].sa.sa_handler == SIG_IGN) ||
269 (tsk->sighand->action[sig-1].sa.sa_handler == SIG_DFL);
273 /* Notify the system that a driver wants to block all signals for this
274 * process, and wants to be notified if any signals at all were to be
275 * sent/acted upon. If the notifier routine returns non-zero, then the
276 * signal will be acted upon after all. If the notifier routine returns 0,
277 * then then signal will be blocked. Only one block per process is
278 * allowed. priv is a pointer to private data that the notifier routine
279 * can use to determine if the signal should be blocked or not. */
282 block_all_signals(int (*notifier)(void *priv), void *priv, sigset_t *mask)
286 spin_lock_irqsave(¤t->sighand->siglock, flags);
287 current->notifier_mask = mask;
288 current->notifier_data = priv;
289 current->notifier = notifier;
290 spin_unlock_irqrestore(¤t->sighand->siglock, flags);
293 /* Notify the system that blocking has ended. */
296 unblock_all_signals(void)
300 spin_lock_irqsave(¤t->sighand->siglock, flags);
301 current->notifier = NULL;
302 current->notifier_data = NULL;
304 spin_unlock_irqrestore(¤t->sighand->siglock, flags);
307 static int collect_signal(int sig, struct sigpending *list, siginfo_t *info)
309 struct sigqueue *q, *first = NULL;
310 int still_pending = 0;
312 if (unlikely(!sigismember(&list->signal, sig)))
316 * Collect the siginfo appropriate to this signal. Check if
317 * there is another siginfo for the same signal.
319 list_for_each_entry(q, &list->list, list) {
320 if (q->info.si_signo == sig) {
329 list_del_init(&first->list);
330 copy_siginfo(info, &first->info);
331 __sigqueue_free(first);
333 sigdelset(&list->signal, sig);
336 /* Ok, it wasn't in the queue. This must be
337 a fast-pathed signal or we must have been
338 out of queue space. So zero out the info.
340 sigdelset(&list->signal, sig);
341 info->si_signo = sig;
350 static int __dequeue_signal(struct sigpending *pending, sigset_t *mask,
353 int sig = next_signal(pending, mask);
356 if (current->notifier) {
357 if (sigismember(current->notifier_mask, sig)) {
358 if (!(current->notifier)(current->notifier_data)) {
359 clear_thread_flag(TIF_SIGPENDING);
365 if (!collect_signal(sig, pending, info))
373 * Dequeue a signal and return the element to the caller, which is
374 * expected to free it.
376 * All callers have to hold the siglock.
378 int dequeue_signal(struct task_struct *tsk, sigset_t *mask, siginfo_t *info)
382 /* We only dequeue private signals from ourselves, we don't let
383 * signalfd steal them
385 signr = __dequeue_signal(&tsk->pending, mask, info);
387 signr = __dequeue_signal(&tsk->signal->shared_pending,
392 * itimers are process shared and we restart periodic
393 * itimers in the signal delivery path to prevent DoS
394 * attacks in the high resolution timer case. This is
395 * compliant with the old way of self restarting
396 * itimers, as the SIGALRM is a legacy signal and only
397 * queued once. Changing the restart behaviour to
398 * restart the timer in the signal dequeue path is
399 * reducing the timer noise on heavy loaded !highres
402 if (unlikely(signr == SIGALRM)) {
403 struct hrtimer *tmr = &tsk->signal->real_timer;
405 if (!hrtimer_is_queued(tmr) &&
406 tsk->signal->it_real_incr.tv64 != 0) {
407 hrtimer_forward(tmr, tmr->base->get_time(),
408 tsk->signal->it_real_incr);
409 hrtimer_restart(tmr);
418 if (unlikely(sig_kernel_stop(signr))) {
420 * Set a marker that we have dequeued a stop signal. Our
421 * caller might release the siglock and then the pending
422 * stop signal it is about to process is no longer in the
423 * pending bitmasks, but must still be cleared by a SIGCONT
424 * (and overruled by a SIGKILL). So those cases clear this
425 * shared flag after we've set it. Note that this flag may
426 * remain set after the signal we return is ignored or
427 * handled. That doesn't matter because its only purpose
428 * is to alert stop-signal processing code when another
429 * processor has come along and cleared the flag.
431 if (!(tsk->signal->flags & SIGNAL_GROUP_EXIT))
432 tsk->signal->flags |= SIGNAL_STOP_DEQUEUED;
434 if ((info->si_code & __SI_MASK) == __SI_TIMER && info->si_sys_private) {
436 * Release the siglock to ensure proper locking order
437 * of timer locks outside of siglocks. Note, we leave
438 * irqs disabled here, since the posix-timers code is
439 * about to disable them again anyway.
441 spin_unlock(&tsk->sighand->siglock);
442 do_schedule_next_timer(info);
443 spin_lock(&tsk->sighand->siglock);
449 * Tell a process that it has a new active signal..
451 * NOTE! we rely on the previous spin_lock to
452 * lock interrupts for us! We can only be called with
453 * "siglock" held, and the local interrupt must
454 * have been disabled when that got acquired!
456 * No need to set need_resched since signal event passing
457 * goes through ->blocked
459 void signal_wake_up(struct task_struct *t, int resume)
463 set_tsk_thread_flag(t, TIF_SIGPENDING);
466 * For SIGKILL, we want to wake it up in the stopped/traced/killable
467 * case. We don't check t->state here because there is a race with it
468 * executing another processor and just now entering stopped state.
469 * By using wake_up_state, we ensure the process will wake up and
470 * handle its death signal.
472 mask = TASK_INTERRUPTIBLE;
474 mask |= TASK_WAKEKILL;
475 if (!wake_up_state(t, mask))
480 * Remove signals in mask from the pending set and queue.
481 * Returns 1 if any signals were found.
483 * All callers must be holding the siglock.
485 * This version takes a sigset mask and looks at all signals,
486 * not just those in the first mask word.
488 static int rm_from_queue_full(sigset_t *mask, struct sigpending *s)
490 struct sigqueue *q, *n;
493 sigandsets(&m, mask, &s->signal);
494 if (sigisemptyset(&m))
497 signandsets(&s->signal, &s->signal, mask);
498 list_for_each_entry_safe(q, n, &s->list, list) {
499 if (sigismember(mask, q->info.si_signo)) {
500 list_del_init(&q->list);
507 * Remove signals in mask from the pending set and queue.
508 * Returns 1 if any signals were found.
510 * All callers must be holding the siglock.
512 static int rm_from_queue(unsigned long mask, struct sigpending *s)
514 struct sigqueue *q, *n;
516 if (!sigtestsetmask(&s->signal, mask))
519 sigdelsetmask(&s->signal, mask);
520 list_for_each_entry_safe(q, n, &s->list, list) {
521 if (q->info.si_signo < SIGRTMIN &&
522 (mask & sigmask(q->info.si_signo))) {
523 list_del_init(&q->list);
531 * Bad permissions for sending the signal
533 static int check_kill_permission(int sig, struct siginfo *info,
534 struct task_struct *t)
538 if (!valid_signal(sig))
541 if (info != SEND_SIG_NOINFO && (is_si_special(info) || SI_FROMKERNEL(info)))
544 error = audit_signal_info(sig, t); /* Let audit system see the signal */
548 if (((sig != SIGCONT) || (task_session_nr(current) != task_session_nr(t)))
549 && (current->euid ^ t->suid) && (current->euid ^ t->uid)
550 && (current->uid ^ t->suid) && (current->uid ^ t->uid)
551 && !capable(CAP_KILL))
554 return security_task_kill(t, info, sig, 0);
558 static void do_notify_parent_cldstop(struct task_struct *tsk, int why);
561 * Handle magic process-wide effects of stop/continue signals.
562 * Unlike the signal actions, these happen immediately at signal-generation
563 * time regardless of blocking, ignoring, or handling. This does the
564 * actual continuing for SIGCONT, but not the actual stopping for stop
565 * signals. The process stop is done as a signal action for SIG_DFL.
567 static void handle_stop_signal(int sig, struct task_struct *p)
569 struct task_struct *t;
571 if (p->signal->flags & SIGNAL_GROUP_EXIT)
573 * The process is in the middle of dying already.
577 if (sig_kernel_stop(sig)) {
579 * This is a stop signal. Remove SIGCONT from all queues.
581 rm_from_queue(sigmask(SIGCONT), &p->signal->shared_pending);
584 rm_from_queue(sigmask(SIGCONT), &t->pending);
587 } else if (sig == SIGCONT) {
590 * Remove all stop signals from all queues,
591 * and wake all threads.
593 rm_from_queue(SIG_KERNEL_STOP_MASK, &p->signal->shared_pending);
597 rm_from_queue(SIG_KERNEL_STOP_MASK, &t->pending);
599 * If there is a handler for SIGCONT, we must make
600 * sure that no thread returns to user mode before
601 * we post the signal, in case it was the only
602 * thread eligible to run the signal handler--then
603 * it must not do anything between resuming and
604 * running the handler. With the TIF_SIGPENDING
605 * flag set, the thread will pause and acquire the
606 * siglock that we hold now and until we've queued
607 * the pending signal.
609 * Wake up the stopped thread _after_ setting
612 state = __TASK_STOPPED;
613 if (sig_user_defined(t, SIGCONT) && !sigismember(&t->blocked, SIGCONT)) {
614 set_tsk_thread_flag(t, TIF_SIGPENDING);
615 state |= TASK_INTERRUPTIBLE;
617 wake_up_state(t, state);
623 * Notify the parent with CLD_CONTINUED if we were stopped.
625 * If we were in the middle of a group stop, we pretend it
626 * was already finished, and then continued. Since SIGCHLD
627 * doesn't queue we report only CLD_STOPPED, as if the next
628 * CLD_CONTINUED was dropped.
631 if (p->signal->flags & SIGNAL_STOP_STOPPED)
632 why |= SIGNAL_CLD_CONTINUED;
633 else if (p->signal->group_stop_count)
634 why |= SIGNAL_CLD_STOPPED;
637 p->signal->flags = why | SIGNAL_STOP_CONTINUED;
638 p->signal->group_stop_count = 0;
639 p->signal->group_exit_code = 0;
642 * We are not stopped, but there could be a stop
643 * signal in the middle of being processed after
644 * being removed from the queue. Clear that too.
646 p->signal->flags &= ~SIGNAL_STOP_DEQUEUED;
648 } else if (sig == SIGKILL) {
650 * Make sure that any pending stop signal already dequeued
651 * is undone by the wakeup for SIGKILL.
653 p->signal->flags &= ~SIGNAL_STOP_DEQUEUED;
657 static inline int legacy_queue(struct sigpending *signals, int sig)
659 return (sig < SIGRTMIN) && sigismember(&signals->signal, sig);
662 static int send_signal(int sig, struct siginfo *info, struct task_struct *t,
663 struct sigpending *signals)
665 struct sigqueue * q = NULL;
668 * Short-circuit ignored signals and support queuing
669 * exactly one non-rt signal, so that we can get more
670 * detailed information about the cause of the signal.
672 if (sig_ignored(t, sig) || legacy_queue(signals, sig))
676 * Deliver the signal to listening signalfds. This must be called
677 * with the sighand lock held.
679 signalfd_notify(t, sig);
682 * fast-pathed signals for kernel-internal things like SIGSTOP
685 if (info == SEND_SIG_FORCED)
688 /* Real-time signals must be queued if sent by sigqueue, or
689 some other real-time mechanism. It is implementation
690 defined whether kill() does so. We attempt to do so, on
691 the principle of least surprise, but since kill is not
692 allowed to fail with EAGAIN when low on memory we just
693 make sure at least one signal gets delivered and don't
694 pass on the info struct. */
696 q = __sigqueue_alloc(t, GFP_ATOMIC, (sig < SIGRTMIN &&
697 (is_si_special(info) ||
698 info->si_code >= 0)));
700 list_add_tail(&q->list, &signals->list);
701 switch ((unsigned long) info) {
702 case (unsigned long) SEND_SIG_NOINFO:
703 q->info.si_signo = sig;
704 q->info.si_errno = 0;
705 q->info.si_code = SI_USER;
706 q->info.si_pid = task_pid_vnr(current);
707 q->info.si_uid = current->uid;
709 case (unsigned long) SEND_SIG_PRIV:
710 q->info.si_signo = sig;
711 q->info.si_errno = 0;
712 q->info.si_code = SI_KERNEL;
717 copy_siginfo(&q->info, info);
720 } else if (!is_si_special(info)) {
721 if (sig >= SIGRTMIN && info->si_code != SI_USER)
723 * Queue overflow, abort. We may abort if the signal was rt
724 * and sent by user using something other than kill().
730 sigaddset(&signals->signal, sig);
734 int print_fatal_signals;
736 static void print_fatal_signal(struct pt_regs *regs, int signr)
738 printk("%s/%d: potentially unexpected fatal signal %d.\n",
739 current->comm, task_pid_nr(current), signr);
741 #if defined(__i386__) && !defined(__arch_um__)
742 printk("code at %08lx: ", regs->ip);
745 for (i = 0; i < 16; i++) {
748 __get_user(insn, (unsigned char *)(regs->ip + i));
749 printk("%02x ", insn);
757 static int __init setup_print_fatal_signals(char *str)
759 get_option (&str, &print_fatal_signals);
764 __setup("print-fatal-signals=", setup_print_fatal_signals);
767 specific_send_sig_info(int sig, struct siginfo *info, struct task_struct *t)
771 BUG_ON(!irqs_disabled());
772 assert_spin_locked(&t->sighand->siglock);
774 ret = send_signal(sig, info, t, &t->pending);
778 if (!sigismember(&t->blocked, sig))
779 signal_wake_up(t, sig == SIGKILL);
784 * Force a signal that the process can't ignore: if necessary
785 * we unblock the signal and change any SIG_IGN to SIG_DFL.
787 * Note: If we unblock the signal, we always reset it to SIG_DFL,
788 * since we do not want to have a signal handler that was blocked
789 * be invoked when user space had explicitly blocked it.
791 * We don't want to have recursive SIGSEGV's etc, for example.
794 force_sig_info(int sig, struct siginfo *info, struct task_struct *t)
796 unsigned long int flags;
797 int ret, blocked, ignored;
798 struct k_sigaction *action;
800 spin_lock_irqsave(&t->sighand->siglock, flags);
801 action = &t->sighand->action[sig-1];
802 ignored = action->sa.sa_handler == SIG_IGN;
803 blocked = sigismember(&t->blocked, sig);
804 if (blocked || ignored) {
805 action->sa.sa_handler = SIG_DFL;
807 sigdelset(&t->blocked, sig);
808 recalc_sigpending_and_wake(t);
811 ret = specific_send_sig_info(sig, info, t);
812 spin_unlock_irqrestore(&t->sighand->siglock, flags);
818 force_sig_specific(int sig, struct task_struct *t)
820 force_sig_info(sig, SEND_SIG_FORCED, t);
824 * Test if P wants to take SIG. After we've checked all threads with this,
825 * it's equivalent to finding no threads not blocking SIG. Any threads not
826 * blocking SIG were ruled out because they are not running and already
827 * have pending signals. Such threads will dequeue from the shared queue
828 * as soon as they're available, so putting the signal on the shared queue
829 * will be equivalent to sending it to one such thread.
831 static inline int wants_signal(int sig, struct task_struct *p)
833 if (sigismember(&p->blocked, sig))
835 if (p->flags & PF_EXITING)
839 if (task_is_stopped_or_traced(p))
841 return task_curr(p) || !signal_pending(p);
845 __group_complete_signal(int sig, struct task_struct *p)
847 struct task_struct *t;
850 * Now find a thread we can wake up to take the signal off the queue.
852 * If the main thread wants the signal, it gets first crack.
853 * Probably the least surprising to the average bear.
855 if (wants_signal(sig, p))
857 else if (thread_group_empty(p))
859 * There is just one thread and it does not need to be woken.
860 * It will dequeue unblocked signals before it runs again.
865 * Otherwise try to find a suitable thread.
867 t = p->signal->curr_target;
869 /* restart balancing at this thread */
870 t = p->signal->curr_target = p;
872 while (!wants_signal(sig, t)) {
874 if (t == p->signal->curr_target)
876 * No thread needs to be woken.
877 * Any eligible threads will see
878 * the signal in the queue soon.
882 p->signal->curr_target = t;
886 * Found a killable thread. If the signal will be fatal,
887 * then start taking the whole group down immediately.
889 if (sig_fatal(p, sig) && !(p->signal->flags & SIGNAL_GROUP_EXIT) &&
890 !sigismember(&t->real_blocked, sig) &&
891 (sig == SIGKILL || !(t->ptrace & PT_PTRACED))) {
893 * This signal will be fatal to the whole group.
895 if (!sig_kernel_coredump(sig)) {
897 * Start a group exit and wake everybody up.
898 * This way we don't have other threads
899 * running and doing things after a slower
900 * thread has the fatal signal pending.
902 p->signal->flags = SIGNAL_GROUP_EXIT;
903 p->signal->group_exit_code = sig;
904 p->signal->group_stop_count = 0;
907 sigaddset(&t->pending.signal, SIGKILL);
908 signal_wake_up(t, 1);
909 } while_each_thread(p, t);
915 * The signal is already in the shared-pending queue.
916 * Tell the chosen thread to wake up and dequeue it.
918 signal_wake_up(t, sig == SIGKILL);
923 __group_send_sig_info(int sig, struct siginfo *info, struct task_struct *p)
927 assert_spin_locked(&p->sighand->siglock);
928 handle_stop_signal(sig, p);
931 * Put this signal on the shared-pending queue, or fail with EAGAIN.
932 * We always use the shared queue for process-wide signals,
933 * to avoid several races.
935 ret = send_signal(sig, info, p, &p->signal->shared_pending);
939 __group_complete_signal(sig, p);
944 * Nuke all other threads in the group.
946 void zap_other_threads(struct task_struct *p)
948 struct task_struct *t;
950 p->signal->group_stop_count = 0;
952 for (t = next_thread(p); t != p; t = next_thread(t)) {
954 * Don't bother with already dead threads
959 /* SIGKILL will be handled before any pending SIGSTOP */
960 sigaddset(&t->pending.signal, SIGKILL);
961 signal_wake_up(t, 1);
965 int __fatal_signal_pending(struct task_struct *tsk)
967 return sigismember(&tsk->pending.signal, SIGKILL);
969 EXPORT_SYMBOL(__fatal_signal_pending);
971 struct sighand_struct *lock_task_sighand(struct task_struct *tsk, unsigned long *flags)
973 struct sighand_struct *sighand;
977 sighand = rcu_dereference(tsk->sighand);
978 if (unlikely(sighand == NULL))
981 spin_lock_irqsave(&sighand->siglock, *flags);
982 if (likely(sighand == tsk->sighand))
984 spin_unlock_irqrestore(&sighand->siglock, *flags);
991 int group_send_sig_info(int sig, struct siginfo *info, struct task_struct *p)
996 ret = check_kill_permission(sig, info, p);
1000 if (lock_task_sighand(p, &flags)) {
1001 ret = __group_send_sig_info(sig, info, p);
1002 unlock_task_sighand(p, &flags);
1010 * __kill_pgrp_info() sends a signal to a process group: this is what the tty
1011 * control characters do (^C, ^Z etc)
1014 int __kill_pgrp_info(int sig, struct siginfo *info, struct pid *pgrp)
1016 struct task_struct *p = NULL;
1017 int retval, success;
1021 do_each_pid_task(pgrp, PIDTYPE_PGID, p) {
1022 int err = group_send_sig_info(sig, info, p);
1025 } while_each_pid_task(pgrp, PIDTYPE_PGID, p);
1026 return success ? 0 : retval;
1029 int kill_pid_info(int sig, struct siginfo *info, struct pid *pid)
1032 struct task_struct *p;
1036 p = pid_task(pid, PIDTYPE_PID);
1038 error = group_send_sig_info(sig, info, p);
1039 if (unlikely(error == -ESRCH))
1041 * The task was unhashed in between, try again.
1042 * If it is dead, pid_task() will return NULL,
1043 * if we race with de_thread() it will find the
1054 kill_proc_info(int sig, struct siginfo *info, pid_t pid)
1058 error = kill_pid_info(sig, info, find_vpid(pid));
1063 /* like kill_pid_info(), but doesn't use uid/euid of "current" */
1064 int kill_pid_info_as_uid(int sig, struct siginfo *info, struct pid *pid,
1065 uid_t uid, uid_t euid, u32 secid)
1068 struct task_struct *p;
1070 if (!valid_signal(sig))
1073 read_lock(&tasklist_lock);
1074 p = pid_task(pid, PIDTYPE_PID);
1079 if ((info == SEND_SIG_NOINFO || (!is_si_special(info) && SI_FROMUSER(info)))
1080 && (euid != p->suid) && (euid != p->uid)
1081 && (uid != p->suid) && (uid != p->uid)) {
1085 ret = security_task_kill(p, info, sig, secid);
1088 if (sig && p->sighand) {
1089 unsigned long flags;
1090 spin_lock_irqsave(&p->sighand->siglock, flags);
1091 ret = __group_send_sig_info(sig, info, p);
1092 spin_unlock_irqrestore(&p->sighand->siglock, flags);
1095 read_unlock(&tasklist_lock);
1098 EXPORT_SYMBOL_GPL(kill_pid_info_as_uid);
1101 * kill_something_info() interprets pid in interesting ways just like kill(2).
1103 * POSIX specifies that kill(-1,sig) is unspecified, but what we have
1104 * is probably wrong. Should make it like BSD or SYSV.
1107 static int kill_something_info(int sig, struct siginfo *info, int pid)
1113 ret = kill_pid_info(sig, info, find_vpid(pid));
1118 read_lock(&tasklist_lock);
1120 ret = __kill_pgrp_info(sig, info,
1121 pid ? find_vpid(-pid) : task_pgrp(current));
1123 int retval = 0, count = 0;
1124 struct task_struct * p;
1126 for_each_process(p) {
1127 if (p->pid > 1 && !same_thread_group(p, current)) {
1128 int err = group_send_sig_info(sig, info, p);
1134 ret = count ? retval : -ESRCH;
1136 read_unlock(&tasklist_lock);
1142 * These are for backward compatibility with the rest of the kernel source.
1146 * These two are the most common entry points. They send a signal
1147 * just to the specific thread.
1150 send_sig_info(int sig, struct siginfo *info, struct task_struct *p)
1153 unsigned long flags;
1156 * Make sure legacy kernel users don't send in bad values
1157 * (normal paths check this in check_kill_permission).
1159 if (!valid_signal(sig))
1163 * We need the tasklist lock even for the specific
1164 * thread case (when we don't need to follow the group
1165 * lists) in order to avoid races with "p->sighand"
1166 * going away or changing from under us.
1168 read_lock(&tasklist_lock);
1169 spin_lock_irqsave(&p->sighand->siglock, flags);
1170 ret = specific_send_sig_info(sig, info, p);
1171 spin_unlock_irqrestore(&p->sighand->siglock, flags);
1172 read_unlock(&tasklist_lock);
1176 #define __si_special(priv) \
1177 ((priv) ? SEND_SIG_PRIV : SEND_SIG_NOINFO)
1180 send_sig(int sig, struct task_struct *p, int priv)
1182 return send_sig_info(sig, __si_special(priv), p);
1186 force_sig(int sig, struct task_struct *p)
1188 force_sig_info(sig, SEND_SIG_PRIV, p);
1192 * When things go south during signal handling, we
1193 * will force a SIGSEGV. And if the signal that caused
1194 * the problem was already a SIGSEGV, we'll want to
1195 * make sure we don't even try to deliver the signal..
1198 force_sigsegv(int sig, struct task_struct *p)
1200 if (sig == SIGSEGV) {
1201 unsigned long flags;
1202 spin_lock_irqsave(&p->sighand->siglock, flags);
1203 p->sighand->action[sig - 1].sa.sa_handler = SIG_DFL;
1204 spin_unlock_irqrestore(&p->sighand->siglock, flags);
1206 force_sig(SIGSEGV, p);
1210 int kill_pgrp(struct pid *pid, int sig, int priv)
1214 read_lock(&tasklist_lock);
1215 ret = __kill_pgrp_info(sig, __si_special(priv), pid);
1216 read_unlock(&tasklist_lock);
1220 EXPORT_SYMBOL(kill_pgrp);
1222 int kill_pid(struct pid *pid, int sig, int priv)
1224 return kill_pid_info(sig, __si_special(priv), pid);
1226 EXPORT_SYMBOL(kill_pid);
1229 kill_proc(pid_t pid, int sig, int priv)
1234 ret = kill_pid_info(sig, __si_special(priv), find_pid(pid));
1240 * These functions support sending signals using preallocated sigqueue
1241 * structures. This is needed "because realtime applications cannot
1242 * afford to lose notifications of asynchronous events, like timer
1243 * expirations or I/O completions". In the case of Posix Timers
1244 * we allocate the sigqueue structure from the timer_create. If this
1245 * allocation fails we are able to report the failure to the application
1246 * with an EAGAIN error.
1249 struct sigqueue *sigqueue_alloc(void)
1253 if ((q = __sigqueue_alloc(current, GFP_KERNEL, 0)))
1254 q->flags |= SIGQUEUE_PREALLOC;
1258 void sigqueue_free(struct sigqueue *q)
1260 unsigned long flags;
1261 spinlock_t *lock = ¤t->sighand->siglock;
1263 BUG_ON(!(q->flags & SIGQUEUE_PREALLOC));
1265 * If the signal is still pending remove it from the
1266 * pending queue. We must hold ->siglock while testing
1267 * q->list to serialize with collect_signal().
1269 spin_lock_irqsave(lock, flags);
1270 if (!list_empty(&q->list))
1271 list_del_init(&q->list);
1272 spin_unlock_irqrestore(lock, flags);
1274 q->flags &= ~SIGQUEUE_PREALLOC;
1278 static int do_send_sigqueue(int sig, struct sigqueue *q, struct task_struct *t,
1279 struct sigpending *pending)
1281 if (unlikely(!list_empty(&q->list))) {
1283 * If an SI_TIMER entry is already queue just increment
1284 * the overrun count.
1287 BUG_ON(q->info.si_code != SI_TIMER);
1288 q->info.si_overrun++;
1292 if (sig_ignored(t, sig))
1295 signalfd_notify(t, sig);
1296 list_add_tail(&q->list, &pending->list);
1297 sigaddset(&pending->signal, sig);
1301 int send_sigqueue(int sig, struct sigqueue *q, struct task_struct *p)
1303 unsigned long flags;
1306 BUG_ON(!(q->flags & SIGQUEUE_PREALLOC));
1309 * The rcu based delayed sighand destroy makes it possible to
1310 * run this without tasklist lock held. The task struct itself
1311 * cannot go away as create_timer did get_task_struct().
1313 * We return -1, when the task is marked exiting, so
1314 * posix_timer_event can redirect it to the group leader
1318 if (!likely(lock_task_sighand(p, &flags)))
1321 ret = do_send_sigqueue(sig, q, p, &p->pending);
1323 if (!sigismember(&p->blocked, sig))
1324 signal_wake_up(p, sig == SIGKILL);
1326 unlock_task_sighand(p, &flags);
1334 send_group_sigqueue(int sig, struct sigqueue *q, struct task_struct *p)
1336 unsigned long flags;
1339 BUG_ON(!(q->flags & SIGQUEUE_PREALLOC));
1341 read_lock(&tasklist_lock);
1342 /* Since it_lock is held, p->sighand cannot be NULL. */
1343 spin_lock_irqsave(&p->sighand->siglock, flags);
1344 handle_stop_signal(sig, p);
1346 ret = do_send_sigqueue(sig, q, p, &p->signal->shared_pending);
1348 __group_complete_signal(sig, p);
1350 spin_unlock_irqrestore(&p->sighand->siglock, flags);
1351 read_unlock(&tasklist_lock);
1356 * Wake up any threads in the parent blocked in wait* syscalls.
1358 static inline void __wake_up_parent(struct task_struct *p,
1359 struct task_struct *parent)
1361 wake_up_interruptible_sync(&parent->signal->wait_chldexit);
1365 * Let a parent know about the death of a child.
1366 * For a stopped/continued status change, use do_notify_parent_cldstop instead.
1369 void do_notify_parent(struct task_struct *tsk, int sig)
1371 struct siginfo info;
1372 unsigned long flags;
1373 struct sighand_struct *psig;
1377 /* do_notify_parent_cldstop should have been called instead. */
1378 BUG_ON(task_is_stopped_or_traced(tsk));
1380 BUG_ON(!tsk->ptrace &&
1381 (tsk->group_leader != tsk || !thread_group_empty(tsk)));
1383 info.si_signo = sig;
1386 * we are under tasklist_lock here so our parent is tied to
1387 * us and cannot exit and release its namespace.
1389 * the only it can is to switch its nsproxy with sys_unshare,
1390 * bu uncharing pid namespaces is not allowed, so we'll always
1391 * see relevant namespace
1393 * write_lock() currently calls preempt_disable() which is the
1394 * same as rcu_read_lock(), but according to Oleg, this is not
1395 * correct to rely on this
1398 info.si_pid = task_pid_nr_ns(tsk, tsk->parent->nsproxy->pid_ns);
1401 info.si_uid = tsk->uid;
1403 /* FIXME: find out whether or not this is supposed to be c*time. */
1404 info.si_utime = cputime_to_jiffies(cputime_add(tsk->utime,
1405 tsk->signal->utime));
1406 info.si_stime = cputime_to_jiffies(cputime_add(tsk->stime,
1407 tsk->signal->stime));
1409 info.si_status = tsk->exit_code & 0x7f;
1410 if (tsk->exit_code & 0x80)
1411 info.si_code = CLD_DUMPED;
1412 else if (tsk->exit_code & 0x7f)
1413 info.si_code = CLD_KILLED;
1415 info.si_code = CLD_EXITED;
1416 info.si_status = tsk->exit_code >> 8;
1419 psig = tsk->parent->sighand;
1420 spin_lock_irqsave(&psig->siglock, flags);
1421 if (!tsk->ptrace && sig == SIGCHLD &&
1422 (psig->action[SIGCHLD-1].sa.sa_handler == SIG_IGN ||
1423 (psig->action[SIGCHLD-1].sa.sa_flags & SA_NOCLDWAIT))) {
1425 * We are exiting and our parent doesn't care. POSIX.1
1426 * defines special semantics for setting SIGCHLD to SIG_IGN
1427 * or setting the SA_NOCLDWAIT flag: we should be reaped
1428 * automatically and not left for our parent's wait4 call.
1429 * Rather than having the parent do it as a magic kind of
1430 * signal handler, we just set this to tell do_exit that we
1431 * can be cleaned up without becoming a zombie. Note that
1432 * we still call __wake_up_parent in this case, because a
1433 * blocked sys_wait4 might now return -ECHILD.
1435 * Whether we send SIGCHLD or not for SA_NOCLDWAIT
1436 * is implementation-defined: we do (if you don't want
1437 * it, just use SIG_IGN instead).
1439 tsk->exit_signal = -1;
1440 if (psig->action[SIGCHLD-1].sa.sa_handler == SIG_IGN)
1443 if (valid_signal(sig) && sig > 0)
1444 __group_send_sig_info(sig, &info, tsk->parent);
1445 __wake_up_parent(tsk, tsk->parent);
1446 spin_unlock_irqrestore(&psig->siglock, flags);
1449 static void do_notify_parent_cldstop(struct task_struct *tsk, int why)
1451 struct siginfo info;
1452 unsigned long flags;
1453 struct task_struct *parent;
1454 struct sighand_struct *sighand;
1456 if (tsk->ptrace & PT_PTRACED)
1457 parent = tsk->parent;
1459 tsk = tsk->group_leader;
1460 parent = tsk->real_parent;
1463 info.si_signo = SIGCHLD;
1466 * see comment in do_notify_parent() abot the following 3 lines
1469 info.si_pid = task_pid_nr_ns(tsk, tsk->parent->nsproxy->pid_ns);
1472 info.si_uid = tsk->uid;
1474 /* FIXME: find out whether or not this is supposed to be c*time. */
1475 info.si_utime = cputime_to_jiffies(tsk->utime);
1476 info.si_stime = cputime_to_jiffies(tsk->stime);
1481 info.si_status = SIGCONT;
1484 info.si_status = tsk->signal->group_exit_code & 0x7f;
1487 info.si_status = tsk->exit_code & 0x7f;
1493 sighand = parent->sighand;
1494 spin_lock_irqsave(&sighand->siglock, flags);
1495 if (sighand->action[SIGCHLD-1].sa.sa_handler != SIG_IGN &&
1496 !(sighand->action[SIGCHLD-1].sa.sa_flags & SA_NOCLDSTOP))
1497 __group_send_sig_info(SIGCHLD, &info, parent);
1499 * Even if SIGCHLD is not generated, we must wake up wait4 calls.
1501 __wake_up_parent(tsk, parent);
1502 spin_unlock_irqrestore(&sighand->siglock, flags);
1505 static inline int may_ptrace_stop(void)
1507 if (!likely(current->ptrace & PT_PTRACED))
1510 * Are we in the middle of do_coredump?
1511 * If so and our tracer is also part of the coredump stopping
1512 * is a deadlock situation, and pointless because our tracer
1513 * is dead so don't allow us to stop.
1514 * If SIGKILL was already sent before the caller unlocked
1515 * ->siglock we must see ->core_waiters != 0. Otherwise it
1516 * is safe to enter schedule().
1518 if (unlikely(current->mm->core_waiters) &&
1519 unlikely(current->mm == current->parent->mm))
1526 * Return nonzero if there is a SIGKILL that should be waking us up.
1527 * Called with the siglock held.
1529 static int sigkill_pending(struct task_struct *tsk)
1531 return ((sigismember(&tsk->pending.signal, SIGKILL) ||
1532 sigismember(&tsk->signal->shared_pending.signal, SIGKILL)) &&
1533 !unlikely(sigismember(&tsk->blocked, SIGKILL)));
1537 * This must be called with current->sighand->siglock held.
1539 * This should be the path for all ptrace stops.
1540 * We always set current->last_siginfo while stopped here.
1541 * That makes it a way to test a stopped process for
1542 * being ptrace-stopped vs being job-control-stopped.
1544 * If we actually decide not to stop at all because the tracer
1545 * is gone, we keep current->exit_code unless clear_code.
1547 static void ptrace_stop(int exit_code, int clear_code, siginfo_t *info)
1551 if (arch_ptrace_stop_needed(exit_code, info)) {
1553 * The arch code has something special to do before a
1554 * ptrace stop. This is allowed to block, e.g. for faults
1555 * on user stack pages. We can't keep the siglock while
1556 * calling arch_ptrace_stop, so we must release it now.
1557 * To preserve proper semantics, we must do this before
1558 * any signal bookkeeping like checking group_stop_count.
1559 * Meanwhile, a SIGKILL could come in before we retake the
1560 * siglock. That must prevent us from sleeping in TASK_TRACED.
1561 * So after regaining the lock, we must check for SIGKILL.
1563 spin_unlock_irq(¤t->sighand->siglock);
1564 arch_ptrace_stop(exit_code, info);
1565 spin_lock_irq(¤t->sighand->siglock);
1566 killed = sigkill_pending(current);
1570 * If there is a group stop in progress,
1571 * we must participate in the bookkeeping.
1573 if (current->signal->group_stop_count > 0)
1574 --current->signal->group_stop_count;
1576 current->last_siginfo = info;
1577 current->exit_code = exit_code;
1579 /* Let the debugger run. */
1580 __set_current_state(TASK_TRACED);
1581 spin_unlock_irq(¤t->sighand->siglock);
1582 read_lock(&tasklist_lock);
1583 if (!unlikely(killed) && may_ptrace_stop()) {
1584 do_notify_parent_cldstop(current, CLD_TRAPPED);
1585 read_unlock(&tasklist_lock);
1589 * By the time we got the lock, our tracer went away.
1590 * Don't drop the lock yet, another tracer may come.
1592 __set_current_state(TASK_RUNNING);
1594 current->exit_code = 0;
1595 read_unlock(&tasklist_lock);
1599 * While in TASK_TRACED, we were considered "frozen enough".
1600 * Now that we woke up, it's crucial if we're supposed to be
1601 * frozen that we freeze now before running anything substantial.
1606 * We are back. Now reacquire the siglock before touching
1607 * last_siginfo, so that we are sure to have synchronized with
1608 * any signal-sending on another CPU that wants to examine it.
1610 spin_lock_irq(¤t->sighand->siglock);
1611 current->last_siginfo = NULL;
1614 * Queued signals ignored us while we were stopped for tracing.
1615 * So check for any that we should take before resuming user mode.
1616 * This sets TIF_SIGPENDING, but never clears it.
1618 recalc_sigpending_tsk(current);
1621 void ptrace_notify(int exit_code)
1625 BUG_ON((exit_code & (0x7f | ~0xffff)) != SIGTRAP);
1627 memset(&info, 0, sizeof info);
1628 info.si_signo = SIGTRAP;
1629 info.si_code = exit_code;
1630 info.si_pid = task_pid_vnr(current);
1631 info.si_uid = current->uid;
1633 /* Let the debugger run. */
1634 spin_lock_irq(¤t->sighand->siglock);
1635 ptrace_stop(exit_code, 1, &info);
1636 spin_unlock_irq(¤t->sighand->siglock);
1640 finish_stop(int stop_count)
1643 * If there are no other threads in the group, or if there is
1644 * a group stop in progress and we are the last to stop,
1645 * report to the parent. When ptraced, every thread reports itself.
1647 if (stop_count == 0 || (current->ptrace & PT_PTRACED)) {
1648 read_lock(&tasklist_lock);
1649 do_notify_parent_cldstop(current, CLD_STOPPED);
1650 read_unlock(&tasklist_lock);
1655 } while (try_to_freeze());
1657 * Now we don't run again until continued.
1659 current->exit_code = 0;
1663 * This performs the stopping for SIGSTOP and other stop signals.
1664 * We have to stop all threads in the thread group.
1665 * Returns nonzero if we've actually stopped and released the siglock.
1666 * Returns zero if we didn't stop and still hold the siglock.
1668 static int do_signal_stop(int signr)
1670 struct signal_struct *sig = current->signal;
1673 if (sig->group_stop_count > 0) {
1675 * There is a group stop in progress. We don't need to
1676 * start another one.
1678 stop_count = --sig->group_stop_count;
1680 struct task_struct *t;
1682 if (!likely(sig->flags & SIGNAL_STOP_DEQUEUED) ||
1683 unlikely(signal_group_exit(sig)))
1686 * There is no group stop already in progress.
1687 * We must initiate one now.
1689 sig->group_exit_code = signr;
1692 for (t = next_thread(current); t != current; t = next_thread(t))
1694 * Setting state to TASK_STOPPED for a group
1695 * stop is always done with the siglock held,
1696 * so this check has no races.
1698 if (!(t->flags & PF_EXITING) &&
1699 !task_is_stopped_or_traced(t)) {
1701 signal_wake_up(t, 0);
1703 sig->group_stop_count = stop_count;
1706 if (stop_count == 0)
1707 sig->flags = SIGNAL_STOP_STOPPED;
1708 current->exit_code = sig->group_exit_code;
1709 __set_current_state(TASK_STOPPED);
1711 spin_unlock_irq(¤t->sighand->siglock);
1712 finish_stop(stop_count);
1716 static int ptrace_signal(int signr, siginfo_t *info,
1717 struct pt_regs *regs, void *cookie)
1719 if (!(current->ptrace & PT_PTRACED))
1722 ptrace_signal_deliver(regs, cookie);
1724 /* Let the debugger run. */
1725 ptrace_stop(signr, 0, info);
1727 /* We're back. Did the debugger cancel the sig? */
1728 signr = current->exit_code;
1732 current->exit_code = 0;
1734 /* Update the siginfo structure if the signal has
1735 changed. If the debugger wanted something
1736 specific in the siginfo structure then it should
1737 have updated *info via PTRACE_SETSIGINFO. */
1738 if (signr != info->si_signo) {
1739 info->si_signo = signr;
1741 info->si_code = SI_USER;
1742 info->si_pid = task_pid_vnr(current->parent);
1743 info->si_uid = current->parent->uid;
1746 /* If the (new) signal is now blocked, requeue it. */
1747 if (sigismember(¤t->blocked, signr)) {
1748 specific_send_sig_info(signr, info, current);
1755 int get_signal_to_deliver(siginfo_t *info, struct k_sigaction *return_ka,
1756 struct pt_regs *regs, void *cookie)
1758 sigset_t *mask = ¤t->blocked;
1763 * We'll jump back here after any time we were stopped in TASK_STOPPED.
1764 * While in TASK_STOPPED, we were considered "frozen enough".
1765 * Now that we woke up, it's crucial if we're supposed to be
1766 * frozen that we freeze now before running anything substantial.
1770 spin_lock_irq(¤t->sighand->siglock);
1772 if (unlikely(current->signal->flags & SIGNAL_CLD_MASK)) {
1773 int why = (current->signal->flags & SIGNAL_STOP_CONTINUED)
1774 ? CLD_CONTINUED : CLD_STOPPED;
1775 current->signal->flags &= ~SIGNAL_CLD_MASK;
1776 spin_unlock_irq(¤t->sighand->siglock);
1778 read_lock(&tasklist_lock);
1779 do_notify_parent_cldstop(current->group_leader, why);
1780 read_unlock(&tasklist_lock);
1785 struct k_sigaction *ka;
1787 if (unlikely(current->signal->group_stop_count > 0) &&
1791 signr = dequeue_signal(current, mask, info);
1794 break; /* will return 0 */
1796 if (signr != SIGKILL) {
1797 signr = ptrace_signal(signr, info, regs, cookie);
1802 ka = ¤t->sighand->action[signr-1];
1803 if (ka->sa.sa_handler == SIG_IGN) /* Do nothing. */
1805 if (ka->sa.sa_handler != SIG_DFL) {
1806 /* Run the handler. */
1809 if (ka->sa.sa_flags & SA_ONESHOT)
1810 ka->sa.sa_handler = SIG_DFL;
1812 break; /* will return non-zero "signr" value */
1816 * Now we are doing the default action for this signal.
1818 if (sig_kernel_ignore(signr)) /* Default is nothing. */
1822 * Global init gets no signals it doesn't want.
1824 if (is_global_init(current))
1827 if (sig_kernel_stop(signr)) {
1829 * The default action is to stop all threads in
1830 * the thread group. The job control signals
1831 * do nothing in an orphaned pgrp, but SIGSTOP
1832 * always works. Note that siglock needs to be
1833 * dropped during the call to is_orphaned_pgrp()
1834 * because of lock ordering with tasklist_lock.
1835 * This allows an intervening SIGCONT to be posted.
1836 * We need to check for that and bail out if necessary.
1838 if (signr != SIGSTOP) {
1839 spin_unlock_irq(¤t->sighand->siglock);
1841 /* signals can be posted during this window */
1843 if (is_current_pgrp_orphaned())
1846 spin_lock_irq(¤t->sighand->siglock);
1849 if (likely(do_signal_stop(signr))) {
1850 /* It released the siglock. */
1855 * We didn't actually stop, due to a race
1856 * with SIGCONT or something like that.
1861 spin_unlock_irq(¤t->sighand->siglock);
1864 * Anything else is fatal, maybe with a core dump.
1866 current->flags |= PF_SIGNALED;
1867 if ((signr != SIGKILL) && print_fatal_signals)
1868 print_fatal_signal(regs, signr);
1869 if (sig_kernel_coredump(signr)) {
1871 * If it was able to dump core, this kills all
1872 * other threads in the group and synchronizes with
1873 * their demise. If we lost the race with another
1874 * thread getting here, it set group_exit_code
1875 * first and our do_group_exit call below will use
1876 * that value and ignore the one we pass it.
1878 do_coredump((long)signr, signr, regs);
1882 * Death signals, no core dump.
1884 do_group_exit(signr);
1887 spin_unlock_irq(¤t->sighand->siglock);
1891 void exit_signals(struct task_struct *tsk)
1894 struct task_struct *t;
1896 if (thread_group_empty(tsk) || signal_group_exit(tsk->signal)) {
1897 tsk->flags |= PF_EXITING;
1901 spin_lock_irq(&tsk->sighand->siglock);
1903 * From now this task is not visible for group-wide signals,
1904 * see wants_signal(), do_signal_stop().
1906 tsk->flags |= PF_EXITING;
1907 if (!signal_pending(tsk))
1910 /* It could be that __group_complete_signal() choose us to
1911 * notify about group-wide signal. Another thread should be
1912 * woken now to take the signal since we will not.
1914 for (t = tsk; (t = next_thread(t)) != tsk; )
1915 if (!signal_pending(t) && !(t->flags & PF_EXITING))
1916 recalc_sigpending_and_wake(t);
1918 if (unlikely(tsk->signal->group_stop_count) &&
1919 !--tsk->signal->group_stop_count) {
1920 tsk->signal->flags = SIGNAL_STOP_STOPPED;
1924 spin_unlock_irq(&tsk->sighand->siglock);
1926 if (unlikely(group_stop)) {
1927 read_lock(&tasklist_lock);
1928 do_notify_parent_cldstop(tsk, CLD_STOPPED);
1929 read_unlock(&tasklist_lock);
1933 EXPORT_SYMBOL(recalc_sigpending);
1934 EXPORT_SYMBOL_GPL(dequeue_signal);
1935 EXPORT_SYMBOL(flush_signals);
1936 EXPORT_SYMBOL(force_sig);
1937 EXPORT_SYMBOL(kill_proc);
1938 EXPORT_SYMBOL(ptrace_notify);
1939 EXPORT_SYMBOL(send_sig);
1940 EXPORT_SYMBOL(send_sig_info);
1941 EXPORT_SYMBOL(sigprocmask);
1942 EXPORT_SYMBOL(block_all_signals);
1943 EXPORT_SYMBOL(unblock_all_signals);
1947 * System call entry points.
1950 asmlinkage long sys_restart_syscall(void)
1952 struct restart_block *restart = ¤t_thread_info()->restart_block;
1953 return restart->fn(restart);
1956 long do_no_restart_syscall(struct restart_block *param)
1962 * We don't need to get the kernel lock - this is all local to this
1963 * particular thread.. (and that's good, because this is _heavily_
1964 * used by various programs)
1968 * This is also useful for kernel threads that want to temporarily
1969 * (or permanently) block certain signals.
1971 * NOTE! Unlike the user-mode sys_sigprocmask(), the kernel
1972 * interface happily blocks "unblockable" signals like SIGKILL
1975 int sigprocmask(int how, sigset_t *set, sigset_t *oldset)
1979 spin_lock_irq(¤t->sighand->siglock);
1981 *oldset = current->blocked;
1986 sigorsets(¤t->blocked, ¤t->blocked, set);
1989 signandsets(¤t->blocked, ¤t->blocked, set);
1992 current->blocked = *set;
1997 recalc_sigpending();
1998 spin_unlock_irq(¤t->sighand->siglock);
2004 sys_rt_sigprocmask(int how, sigset_t __user *set, sigset_t __user *oset, size_t sigsetsize)
2006 int error = -EINVAL;
2007 sigset_t old_set, new_set;
2009 /* XXX: Don't preclude handling different sized sigset_t's. */
2010 if (sigsetsize != sizeof(sigset_t))
2015 if (copy_from_user(&new_set, set, sizeof(*set)))
2017 sigdelsetmask(&new_set, sigmask(SIGKILL)|sigmask(SIGSTOP));
2019 error = sigprocmask(how, &new_set, &old_set);
2025 spin_lock_irq(¤t->sighand->siglock);
2026 old_set = current->blocked;
2027 spin_unlock_irq(¤t->sighand->siglock);
2031 if (copy_to_user(oset, &old_set, sizeof(*oset)))
2039 long do_sigpending(void __user *set, unsigned long sigsetsize)
2041 long error = -EINVAL;
2044 if (sigsetsize > sizeof(sigset_t))
2047 spin_lock_irq(¤t->sighand->siglock);
2048 sigorsets(&pending, ¤t->pending.signal,
2049 ¤t->signal->shared_pending.signal);
2050 spin_unlock_irq(¤t->sighand->siglock);
2052 /* Outside the lock because only this thread touches it. */
2053 sigandsets(&pending, ¤t->blocked, &pending);
2056 if (!copy_to_user(set, &pending, sigsetsize))
2064 sys_rt_sigpending(sigset_t __user *set, size_t sigsetsize)
2066 return do_sigpending(set, sigsetsize);
2069 #ifndef HAVE_ARCH_COPY_SIGINFO_TO_USER
2071 int copy_siginfo_to_user(siginfo_t __user *to, siginfo_t *from)
2075 if (!access_ok (VERIFY_WRITE, to, sizeof(siginfo_t)))
2077 if (from->si_code < 0)
2078 return __copy_to_user(to, from, sizeof(siginfo_t))
2081 * If you change siginfo_t structure, please be sure
2082 * this code is fixed accordingly.
2083 * Please remember to update the signalfd_copyinfo() function
2084 * inside fs/signalfd.c too, in case siginfo_t changes.
2085 * It should never copy any pad contained in the structure
2086 * to avoid security leaks, but must copy the generic
2087 * 3 ints plus the relevant union member.
2089 err = __put_user(from->si_signo, &to->si_signo);
2090 err |= __put_user(from->si_errno, &to->si_errno);
2091 err |= __put_user((short)from->si_code, &to->si_code);
2092 switch (from->si_code & __SI_MASK) {
2094 err |= __put_user(from->si_pid, &to->si_pid);
2095 err |= __put_user(from->si_uid, &to->si_uid);
2098 err |= __put_user(from->si_tid, &to->si_tid);
2099 err |= __put_user(from->si_overrun, &to->si_overrun);
2100 err |= __put_user(from->si_ptr, &to->si_ptr);
2103 err |= __put_user(from->si_band, &to->si_band);
2104 err |= __put_user(from->si_fd, &to->si_fd);
2107 err |= __put_user(from->si_addr, &to->si_addr);
2108 #ifdef __ARCH_SI_TRAPNO
2109 err |= __put_user(from->si_trapno, &to->si_trapno);
2113 err |= __put_user(from->si_pid, &to->si_pid);
2114 err |= __put_user(from->si_uid, &to->si_uid);
2115 err |= __put_user(from->si_status, &to->si_status);
2116 err |= __put_user(from->si_utime, &to->si_utime);
2117 err |= __put_user(from->si_stime, &to->si_stime);
2119 case __SI_RT: /* This is not generated by the kernel as of now. */
2120 case __SI_MESGQ: /* But this is */
2121 err |= __put_user(from->si_pid, &to->si_pid);
2122 err |= __put_user(from->si_uid, &to->si_uid);
2123 err |= __put_user(from->si_ptr, &to->si_ptr);
2125 default: /* this is just in case for now ... */
2126 err |= __put_user(from->si_pid, &to->si_pid);
2127 err |= __put_user(from->si_uid, &to->si_uid);
2136 sys_rt_sigtimedwait(const sigset_t __user *uthese,
2137 siginfo_t __user *uinfo,
2138 const struct timespec __user *uts,
2147 /* XXX: Don't preclude handling different sized sigset_t's. */
2148 if (sigsetsize != sizeof(sigset_t))
2151 if (copy_from_user(&these, uthese, sizeof(these)))
2155 * Invert the set of allowed signals to get those we
2158 sigdelsetmask(&these, sigmask(SIGKILL)|sigmask(SIGSTOP));
2162 if (copy_from_user(&ts, uts, sizeof(ts)))
2164 if (ts.tv_nsec >= 1000000000L || ts.tv_nsec < 0
2169 spin_lock_irq(¤t->sighand->siglock);
2170 sig = dequeue_signal(current, &these, &info);
2172 timeout = MAX_SCHEDULE_TIMEOUT;
2174 timeout = (timespec_to_jiffies(&ts)
2175 + (ts.tv_sec || ts.tv_nsec));
2178 /* None ready -- temporarily unblock those we're
2179 * interested while we are sleeping in so that we'll
2180 * be awakened when they arrive. */
2181 current->real_blocked = current->blocked;
2182 sigandsets(¤t->blocked, ¤t->blocked, &these);
2183 recalc_sigpending();
2184 spin_unlock_irq(¤t->sighand->siglock);
2186 timeout = schedule_timeout_interruptible(timeout);
2188 spin_lock_irq(¤t->sighand->siglock);
2189 sig = dequeue_signal(current, &these, &info);
2190 current->blocked = current->real_blocked;
2191 siginitset(¤t->real_blocked, 0);
2192 recalc_sigpending();
2195 spin_unlock_irq(¤t->sighand->siglock);
2200 if (copy_siginfo_to_user(uinfo, &info))
2213 sys_kill(int pid, int sig)
2215 struct siginfo info;
2217 info.si_signo = sig;
2219 info.si_code = SI_USER;
2220 info.si_pid = task_tgid_vnr(current);
2221 info.si_uid = current->uid;
2223 return kill_something_info(sig, &info, pid);
2226 static int do_tkill(int tgid, int pid, int sig)
2229 struct siginfo info;
2230 struct task_struct *p;
2233 info.si_signo = sig;
2235 info.si_code = SI_TKILL;
2236 info.si_pid = task_tgid_vnr(current);
2237 info.si_uid = current->uid;
2239 read_lock(&tasklist_lock);
2240 p = find_task_by_vpid(pid);
2241 if (p && (tgid <= 0 || task_tgid_vnr(p) == tgid)) {
2242 error = check_kill_permission(sig, &info, p);
2244 * The null signal is a permissions and process existence
2245 * probe. No signal is actually delivered.
2247 if (!error && sig && p->sighand) {
2248 spin_lock_irq(&p->sighand->siglock);
2249 handle_stop_signal(sig, p);
2250 error = specific_send_sig_info(sig, &info, p);
2251 spin_unlock_irq(&p->sighand->siglock);
2254 read_unlock(&tasklist_lock);
2260 * sys_tgkill - send signal to one specific thread
2261 * @tgid: the thread group ID of the thread
2262 * @pid: the PID of the thread
2263 * @sig: signal to be sent
2265 * This syscall also checks the @tgid and returns -ESRCH even if the PID
2266 * exists but it's not belonging to the target process anymore. This
2267 * method solves the problem of threads exiting and PIDs getting reused.
2269 asmlinkage long sys_tgkill(int tgid, int pid, int sig)
2271 /* This is only valid for single tasks */
2272 if (pid <= 0 || tgid <= 0)
2275 return do_tkill(tgid, pid, sig);
2279 * Send a signal to only one task, even if it's a CLONE_THREAD task.
2282 sys_tkill(int pid, int sig)
2284 /* This is only valid for single tasks */
2288 return do_tkill(0, pid, sig);
2292 sys_rt_sigqueueinfo(int pid, int sig, siginfo_t __user *uinfo)
2296 if (copy_from_user(&info, uinfo, sizeof(siginfo_t)))
2299 /* Not even root can pretend to send signals from the kernel.
2300 Nor can they impersonate a kill(), which adds source info. */
2301 if (info.si_code >= 0)
2303 info.si_signo = sig;
2305 /* POSIX.1b doesn't mention process groups. */
2306 return kill_proc_info(sig, &info, pid);
2309 int do_sigaction(int sig, struct k_sigaction *act, struct k_sigaction *oact)
2311 struct task_struct *t = current;
2312 struct k_sigaction *k;
2315 if (!valid_signal(sig) || sig < 1 || (act && sig_kernel_only(sig)))
2318 k = &t->sighand->action[sig-1];
2320 spin_lock_irq(¤t->sighand->siglock);
2325 sigdelsetmask(&act->sa.sa_mask,
2326 sigmask(SIGKILL) | sigmask(SIGSTOP));
2330 * "Setting a signal action to SIG_IGN for a signal that is
2331 * pending shall cause the pending signal to be discarded,
2332 * whether or not it is blocked."
2334 * "Setting a signal action to SIG_DFL for a signal that is
2335 * pending and whose default action is to ignore the signal
2336 * (for example, SIGCHLD), shall cause the pending signal to
2337 * be discarded, whether or not it is blocked"
2339 if (__sig_ignored(t, sig)) {
2341 sigaddset(&mask, sig);
2342 rm_from_queue_full(&mask, &t->signal->shared_pending);
2344 rm_from_queue_full(&mask, &t->pending);
2346 } while (t != current);
2350 spin_unlock_irq(¤t->sighand->siglock);
2355 do_sigaltstack (const stack_t __user *uss, stack_t __user *uoss, unsigned long sp)
2361 oss.ss_sp = (void __user *) current->sas_ss_sp;
2362 oss.ss_size = current->sas_ss_size;
2363 oss.ss_flags = sas_ss_flags(sp);
2372 if (!access_ok(VERIFY_READ, uss, sizeof(*uss))
2373 || __get_user(ss_sp, &uss->ss_sp)
2374 || __get_user(ss_flags, &uss->ss_flags)
2375 || __get_user(ss_size, &uss->ss_size))
2379 if (on_sig_stack(sp))
2385 * Note - this code used to test ss_flags incorrectly
2386 * old code may have been written using ss_flags==0
2387 * to mean ss_flags==SS_ONSTACK (as this was the only
2388 * way that worked) - this fix preserves that older
2391 if (ss_flags != SS_DISABLE && ss_flags != SS_ONSTACK && ss_flags != 0)
2394 if (ss_flags == SS_DISABLE) {
2399 if (ss_size < MINSIGSTKSZ)
2403 current->sas_ss_sp = (unsigned long) ss_sp;
2404 current->sas_ss_size = ss_size;
2409 if (copy_to_user(uoss, &oss, sizeof(oss)))
2418 #ifdef __ARCH_WANT_SYS_SIGPENDING
2421 sys_sigpending(old_sigset_t __user *set)
2423 return do_sigpending(set, sizeof(*set));
2428 #ifdef __ARCH_WANT_SYS_SIGPROCMASK
2429 /* Some platforms have their own version with special arguments others
2430 support only sys_rt_sigprocmask. */
2433 sys_sigprocmask(int how, old_sigset_t __user *set, old_sigset_t __user *oset)
2436 old_sigset_t old_set, new_set;
2440 if (copy_from_user(&new_set, set, sizeof(*set)))
2442 new_set &= ~(sigmask(SIGKILL) | sigmask(SIGSTOP));
2444 spin_lock_irq(¤t->sighand->siglock);
2445 old_set = current->blocked.sig[0];
2453 sigaddsetmask(¤t->blocked, new_set);
2456 sigdelsetmask(¤t->blocked, new_set);
2459 current->blocked.sig[0] = new_set;
2463 recalc_sigpending();
2464 spin_unlock_irq(¤t->sighand->siglock);
2470 old_set = current->blocked.sig[0];
2473 if (copy_to_user(oset, &old_set, sizeof(*oset)))
2480 #endif /* __ARCH_WANT_SYS_SIGPROCMASK */
2482 #ifdef __ARCH_WANT_SYS_RT_SIGACTION
2484 sys_rt_sigaction(int sig,
2485 const struct sigaction __user *act,
2486 struct sigaction __user *oact,
2489 struct k_sigaction new_sa, old_sa;
2492 /* XXX: Don't preclude handling different sized sigset_t's. */
2493 if (sigsetsize != sizeof(sigset_t))
2497 if (copy_from_user(&new_sa.sa, act, sizeof(new_sa.sa)))
2501 ret = do_sigaction(sig, act ? &new_sa : NULL, oact ? &old_sa : NULL);
2504 if (copy_to_user(oact, &old_sa.sa, sizeof(old_sa.sa)))
2510 #endif /* __ARCH_WANT_SYS_RT_SIGACTION */
2512 #ifdef __ARCH_WANT_SYS_SGETMASK
2515 * For backwards compatibility. Functionality superseded by sigprocmask.
2521 return current->blocked.sig[0];
2525 sys_ssetmask(int newmask)
2529 spin_lock_irq(¤t->sighand->siglock);
2530 old = current->blocked.sig[0];
2532 siginitset(¤t->blocked, newmask & ~(sigmask(SIGKILL)|
2534 recalc_sigpending();
2535 spin_unlock_irq(¤t->sighand->siglock);
2539 #endif /* __ARCH_WANT_SGETMASK */
2541 #ifdef __ARCH_WANT_SYS_SIGNAL
2543 * For backwards compatibility. Functionality superseded by sigaction.
2545 asmlinkage unsigned long
2546 sys_signal(int sig, __sighandler_t handler)
2548 struct k_sigaction new_sa, old_sa;
2551 new_sa.sa.sa_handler = handler;
2552 new_sa.sa.sa_flags = SA_ONESHOT | SA_NOMASK;
2553 sigemptyset(&new_sa.sa.sa_mask);
2555 ret = do_sigaction(sig, &new_sa, &old_sa);
2557 return ret ? ret : (unsigned long)old_sa.sa.sa_handler;
2559 #endif /* __ARCH_WANT_SYS_SIGNAL */
2561 #ifdef __ARCH_WANT_SYS_PAUSE
2566 current->state = TASK_INTERRUPTIBLE;
2568 return -ERESTARTNOHAND;
2573 #ifdef __ARCH_WANT_SYS_RT_SIGSUSPEND
2574 asmlinkage long sys_rt_sigsuspend(sigset_t __user *unewset, size_t sigsetsize)
2578 /* XXX: Don't preclude handling different sized sigset_t's. */
2579 if (sigsetsize != sizeof(sigset_t))
2582 if (copy_from_user(&newset, unewset, sizeof(newset)))
2584 sigdelsetmask(&newset, sigmask(SIGKILL)|sigmask(SIGSTOP));
2586 spin_lock_irq(¤t->sighand->siglock);
2587 current->saved_sigmask = current->blocked;
2588 current->blocked = newset;
2589 recalc_sigpending();
2590 spin_unlock_irq(¤t->sighand->siglock);
2592 current->state = TASK_INTERRUPTIBLE;
2594 set_thread_flag(TIF_RESTORE_SIGMASK);
2595 return -ERESTARTNOHAND;
2597 #endif /* __ARCH_WANT_SYS_RT_SIGSUSPEND */
2599 __attribute__((weak)) const char *arch_vma_name(struct vm_area_struct *vma)
2604 void __init signals_init(void)
2606 sigqueue_cachep = KMEM_CACHE(sigqueue, SLAB_PANIC);