]> pilppa.org Git - linux-2.6-omap-h63xx.git/blob - kernel/signal.c
[PATCH] Unify sys_tkill() and sys_tgkill()
[linux-2.6-omap-h63xx.git] / kernel / signal.c
1 /*
2  *  linux/kernel/signal.c
3  *
4  *  Copyright (C) 1991, 1992  Linus Torvalds
5  *
6  *  1997-11-02  Modified for POSIX.1b signals by Richard Henderson
7  *
8  *  2003-06-02  Jim Houston - Concurrent Computer Corp.
9  *              Changes to use preallocated sigqueue structures
10  *              to allow signals to be sent reliably.
11  */
12
13 #include <linux/config.h>
14 #include <linux/slab.h>
15 #include <linux/module.h>
16 #include <linux/smp_lock.h>
17 #include <linux/init.h>
18 #include <linux/sched.h>
19 #include <linux/fs.h>
20 #include <linux/tty.h>
21 #include <linux/binfmts.h>
22 #include <linux/security.h>
23 #include <linux/syscalls.h>
24 #include <linux/ptrace.h>
25 #include <linux/posix-timers.h>
26 #include <linux/signal.h>
27 #include <linux/audit.h>
28 #include <asm/param.h>
29 #include <asm/uaccess.h>
30 #include <asm/unistd.h>
31 #include <asm/siginfo.h>
32
33 /*
34  * SLAB caches for signal bits.
35  */
36
37 static kmem_cache_t *sigqueue_cachep;
38
39 /*
40  * In POSIX a signal is sent either to a specific thread (Linux task)
41  * or to the process as a whole (Linux thread group).  How the signal
42  * is sent determines whether it's to one thread or the whole group,
43  * which determines which signal mask(s) are involved in blocking it
44  * from being delivered until later.  When the signal is delivered,
45  * either it's caught or ignored by a user handler or it has a default
46  * effect that applies to the whole thread group (POSIX process).
47  *
48  * The possible effects an unblocked signal set to SIG_DFL can have are:
49  *   ignore     - Nothing Happens
50  *   terminate  - kill the process, i.e. all threads in the group,
51  *                similar to exit_group.  The group leader (only) reports
52  *                WIFSIGNALED status to its parent.
53  *   coredump   - write a core dump file describing all threads using
54  *                the same mm and then kill all those threads
55  *   stop       - stop all the threads in the group, i.e. TASK_STOPPED state
56  *
57  * SIGKILL and SIGSTOP cannot be caught, blocked, or ignored.
58  * Other signals when not blocked and set to SIG_DFL behaves as follows.
59  * The job control signals also have other special effects.
60  *
61  *      +--------------------+------------------+
62  *      |  POSIX signal      |  default action  |
63  *      +--------------------+------------------+
64  *      |  SIGHUP            |  terminate       |
65  *      |  SIGINT            |  terminate       |
66  *      |  SIGQUIT           |  coredump        |
67  *      |  SIGILL            |  coredump        |
68  *      |  SIGTRAP           |  coredump        |
69  *      |  SIGABRT/SIGIOT    |  coredump        |
70  *      |  SIGBUS            |  coredump        |
71  *      |  SIGFPE            |  coredump        |
72  *      |  SIGKILL           |  terminate(+)    |
73  *      |  SIGUSR1           |  terminate       |
74  *      |  SIGSEGV           |  coredump        |
75  *      |  SIGUSR2           |  terminate       |
76  *      |  SIGPIPE           |  terminate       |
77  *      |  SIGALRM           |  terminate       |
78  *      |  SIGTERM           |  terminate       |
79  *      |  SIGCHLD           |  ignore          |
80  *      |  SIGCONT           |  ignore(*)       |
81  *      |  SIGSTOP           |  stop(*)(+)      |
82  *      |  SIGTSTP           |  stop(*)         |
83  *      |  SIGTTIN           |  stop(*)         |
84  *      |  SIGTTOU           |  stop(*)         |
85  *      |  SIGURG            |  ignore          |
86  *      |  SIGXCPU           |  coredump        |
87  *      |  SIGXFSZ           |  coredump        |
88  *      |  SIGVTALRM         |  terminate       |
89  *      |  SIGPROF           |  terminate       |
90  *      |  SIGPOLL/SIGIO     |  terminate       |
91  *      |  SIGSYS/SIGUNUSED  |  coredump        |
92  *      |  SIGSTKFLT         |  terminate       |
93  *      |  SIGWINCH          |  ignore          |
94  *      |  SIGPWR            |  terminate       |
95  *      |  SIGRTMIN-SIGRTMAX |  terminate       |
96  *      +--------------------+------------------+
97  *      |  non-POSIX signal  |  default action  |
98  *      +--------------------+------------------+
99  *      |  SIGEMT            |  coredump        |
100  *      +--------------------+------------------+
101  *
102  * (+) For SIGKILL and SIGSTOP the action is "always", not just "default".
103  * (*) Special job control effects:
104  * When SIGCONT is sent, it resumes the process (all threads in the group)
105  * from TASK_STOPPED state and also clears any pending/queued stop signals
106  * (any of those marked with "stop(*)").  This happens regardless of blocking,
107  * catching, or ignoring SIGCONT.  When any stop signal is sent, it clears
108  * any pending/queued SIGCONT signals; this happens regardless of blocking,
109  * catching, or ignored the stop signal, though (except for SIGSTOP) the
110  * default action of stopping the process may happen later or never.
111  */
112
113 #ifdef SIGEMT
114 #define M_SIGEMT        M(SIGEMT)
115 #else
116 #define M_SIGEMT        0
117 #endif
118
119 #if SIGRTMIN > BITS_PER_LONG
120 #define M(sig) (1ULL << ((sig)-1))
121 #else
122 #define M(sig) (1UL << ((sig)-1))
123 #endif
124 #define T(sig, mask) (M(sig) & (mask))
125
126 #define SIG_KERNEL_ONLY_MASK (\
127         M(SIGKILL)   |  M(SIGSTOP)                                   )
128
129 #define SIG_KERNEL_STOP_MASK (\
130         M(SIGSTOP)   |  M(SIGTSTP)   |  M(SIGTTIN)   |  M(SIGTTOU)   )
131
132 #define SIG_KERNEL_COREDUMP_MASK (\
133         M(SIGQUIT)   |  M(SIGILL)    |  M(SIGTRAP)   |  M(SIGABRT)   | \
134         M(SIGFPE)    |  M(SIGSEGV)   |  M(SIGBUS)    |  M(SIGSYS)    | \
135         M(SIGXCPU)   |  M(SIGXFSZ)   |  M_SIGEMT                     )
136
137 #define SIG_KERNEL_IGNORE_MASK (\
138         M(SIGCONT)   |  M(SIGCHLD)   |  M(SIGWINCH)  |  M(SIGURG)    )
139
140 #define sig_kernel_only(sig) \
141                 (((sig) < SIGRTMIN)  && T(sig, SIG_KERNEL_ONLY_MASK))
142 #define sig_kernel_coredump(sig) \
143                 (((sig) < SIGRTMIN)  && T(sig, SIG_KERNEL_COREDUMP_MASK))
144 #define sig_kernel_ignore(sig) \
145                 (((sig) < SIGRTMIN)  && T(sig, SIG_KERNEL_IGNORE_MASK))
146 #define sig_kernel_stop(sig) \
147                 (((sig) < SIGRTMIN)  && T(sig, SIG_KERNEL_STOP_MASK))
148
149 #define sig_user_defined(t, signr) \
150         (((t)->sighand->action[(signr)-1].sa.sa_handler != SIG_DFL) &&  \
151          ((t)->sighand->action[(signr)-1].sa.sa_handler != SIG_IGN))
152
153 #define sig_fatal(t, signr) \
154         (!T(signr, SIG_KERNEL_IGNORE_MASK|SIG_KERNEL_STOP_MASK) && \
155          (t)->sighand->action[(signr)-1].sa.sa_handler == SIG_DFL)
156
157 static int sig_ignored(struct task_struct *t, int sig)
158 {
159         void __user * handler;
160
161         /*
162          * Tracers always want to know about signals..
163          */
164         if (t->ptrace & PT_PTRACED)
165                 return 0;
166
167         /*
168          * Blocked signals are never ignored, since the
169          * signal handler may change by the time it is
170          * unblocked.
171          */
172         if (sigismember(&t->blocked, sig))
173                 return 0;
174
175         /* Is it explicitly or implicitly ignored? */
176         handler = t->sighand->action[sig-1].sa.sa_handler;
177         return   handler == SIG_IGN ||
178                 (handler == SIG_DFL && sig_kernel_ignore(sig));
179 }
180
181 /*
182  * Re-calculate pending state from the set of locally pending
183  * signals, globally pending signals, and blocked signals.
184  */
185 static inline int has_pending_signals(sigset_t *signal, sigset_t *blocked)
186 {
187         unsigned long ready;
188         long i;
189
190         switch (_NSIG_WORDS) {
191         default:
192                 for (i = _NSIG_WORDS, ready = 0; --i >= 0 ;)
193                         ready |= signal->sig[i] &~ blocked->sig[i];
194                 break;
195
196         case 4: ready  = signal->sig[3] &~ blocked->sig[3];
197                 ready |= signal->sig[2] &~ blocked->sig[2];
198                 ready |= signal->sig[1] &~ blocked->sig[1];
199                 ready |= signal->sig[0] &~ blocked->sig[0];
200                 break;
201
202         case 2: ready  = signal->sig[1] &~ blocked->sig[1];
203                 ready |= signal->sig[0] &~ blocked->sig[0];
204                 break;
205
206         case 1: ready  = signal->sig[0] &~ blocked->sig[0];
207         }
208         return ready != 0;
209 }
210
211 #define PENDING(p,b) has_pending_signals(&(p)->signal, (b))
212
213 fastcall void recalc_sigpending_tsk(struct task_struct *t)
214 {
215         if (t->signal->group_stop_count > 0 ||
216             (freezing(t)) ||
217             PENDING(&t->pending, &t->blocked) ||
218             PENDING(&t->signal->shared_pending, &t->blocked))
219                 set_tsk_thread_flag(t, TIF_SIGPENDING);
220         else
221                 clear_tsk_thread_flag(t, TIF_SIGPENDING);
222 }
223
224 void recalc_sigpending(void)
225 {
226         recalc_sigpending_tsk(current);
227 }
228
229 /* Given the mask, find the first available signal that should be serviced. */
230
231 static int
232 next_signal(struct sigpending *pending, sigset_t *mask)
233 {
234         unsigned long i, *s, *m, x;
235         int sig = 0;
236         
237         s = pending->signal.sig;
238         m = mask->sig;
239         switch (_NSIG_WORDS) {
240         default:
241                 for (i = 0; i < _NSIG_WORDS; ++i, ++s, ++m)
242                         if ((x = *s &~ *m) != 0) {
243                                 sig = ffz(~x) + i*_NSIG_BPW + 1;
244                                 break;
245                         }
246                 break;
247
248         case 2: if ((x = s[0] &~ m[0]) != 0)
249                         sig = 1;
250                 else if ((x = s[1] &~ m[1]) != 0)
251                         sig = _NSIG_BPW + 1;
252                 else
253                         break;
254                 sig += ffz(~x);
255                 break;
256
257         case 1: if ((x = *s &~ *m) != 0)
258                         sig = ffz(~x) + 1;
259                 break;
260         }
261         
262         return sig;
263 }
264
265 static struct sigqueue *__sigqueue_alloc(struct task_struct *t, gfp_t flags,
266                                          int override_rlimit)
267 {
268         struct sigqueue *q = NULL;
269
270         atomic_inc(&t->user->sigpending);
271         if (override_rlimit ||
272             atomic_read(&t->user->sigpending) <=
273                         t->signal->rlim[RLIMIT_SIGPENDING].rlim_cur)
274                 q = kmem_cache_alloc(sigqueue_cachep, flags);
275         if (unlikely(q == NULL)) {
276                 atomic_dec(&t->user->sigpending);
277         } else {
278                 INIT_LIST_HEAD(&q->list);
279                 q->flags = 0;
280                 q->user = get_uid(t->user);
281         }
282         return(q);
283 }
284
285 static inline void __sigqueue_free(struct sigqueue *q)
286 {
287         if (q->flags & SIGQUEUE_PREALLOC)
288                 return;
289         atomic_dec(&q->user->sigpending);
290         free_uid(q->user);
291         kmem_cache_free(sigqueue_cachep, q);
292 }
293
294 static void flush_sigqueue(struct sigpending *queue)
295 {
296         struct sigqueue *q;
297
298         sigemptyset(&queue->signal);
299         while (!list_empty(&queue->list)) {
300                 q = list_entry(queue->list.next, struct sigqueue , list);
301                 list_del_init(&q->list);
302                 __sigqueue_free(q);
303         }
304 }
305
306 /*
307  * Flush all pending signals for a task.
308  */
309
310 void
311 flush_signals(struct task_struct *t)
312 {
313         unsigned long flags;
314
315         spin_lock_irqsave(&t->sighand->siglock, flags);
316         clear_tsk_thread_flag(t,TIF_SIGPENDING);
317         flush_sigqueue(&t->pending);
318         flush_sigqueue(&t->signal->shared_pending);
319         spin_unlock_irqrestore(&t->sighand->siglock, flags);
320 }
321
322 /*
323  * This function expects the tasklist_lock write-locked.
324  */
325 void __exit_sighand(struct task_struct *tsk)
326 {
327         struct sighand_struct * sighand = tsk->sighand;
328
329         /* Ok, we're done with the signal handlers */
330         tsk->sighand = NULL;
331         if (atomic_dec_and_test(&sighand->count))
332                 kmem_cache_free(sighand_cachep, sighand);
333 }
334
335 void exit_sighand(struct task_struct *tsk)
336 {
337         write_lock_irq(&tasklist_lock);
338         __exit_sighand(tsk);
339         write_unlock_irq(&tasklist_lock);
340 }
341
342 /*
343  * This function expects the tasklist_lock write-locked.
344  */
345 void __exit_signal(struct task_struct *tsk)
346 {
347         struct signal_struct * sig = tsk->signal;
348         struct sighand_struct * sighand = tsk->sighand;
349
350         if (!sig)
351                 BUG();
352         if (!atomic_read(&sig->count))
353                 BUG();
354         spin_lock(&sighand->siglock);
355         posix_cpu_timers_exit(tsk);
356         if (atomic_dec_and_test(&sig->count)) {
357                 posix_cpu_timers_exit_group(tsk);
358                 if (tsk == sig->curr_target)
359                         sig->curr_target = next_thread(tsk);
360                 tsk->signal = NULL;
361                 spin_unlock(&sighand->siglock);
362                 flush_sigqueue(&sig->shared_pending);
363         } else {
364                 /*
365                  * If there is any task waiting for the group exit
366                  * then notify it:
367                  */
368                 if (sig->group_exit_task && atomic_read(&sig->count) == sig->notify_count) {
369                         wake_up_process(sig->group_exit_task);
370                         sig->group_exit_task = NULL;
371                 }
372                 if (tsk == sig->curr_target)
373                         sig->curr_target = next_thread(tsk);
374                 tsk->signal = NULL;
375                 /*
376                  * Accumulate here the counters for all threads but the
377                  * group leader as they die, so they can be added into
378                  * the process-wide totals when those are taken.
379                  * The group leader stays around as a zombie as long
380                  * as there are other threads.  When it gets reaped,
381                  * the exit.c code will add its counts into these totals.
382                  * We won't ever get here for the group leader, since it
383                  * will have been the last reference on the signal_struct.
384                  */
385                 sig->utime = cputime_add(sig->utime, tsk->utime);
386                 sig->stime = cputime_add(sig->stime, tsk->stime);
387                 sig->min_flt += tsk->min_flt;
388                 sig->maj_flt += tsk->maj_flt;
389                 sig->nvcsw += tsk->nvcsw;
390                 sig->nivcsw += tsk->nivcsw;
391                 sig->sched_time += tsk->sched_time;
392                 spin_unlock(&sighand->siglock);
393                 sig = NULL;     /* Marker for below.  */
394         }
395         clear_tsk_thread_flag(tsk,TIF_SIGPENDING);
396         flush_sigqueue(&tsk->pending);
397         if (sig) {
398                 /*
399                  * We are cleaning up the signal_struct here.
400                  */
401                 exit_thread_group_keys(sig);
402                 kmem_cache_free(signal_cachep, sig);
403         }
404 }
405
406 void exit_signal(struct task_struct *tsk)
407 {
408         atomic_dec(&tsk->signal->live);
409
410         write_lock_irq(&tasklist_lock);
411         __exit_signal(tsk);
412         write_unlock_irq(&tasklist_lock);
413 }
414
415 /*
416  * Flush all handlers for a task.
417  */
418
419 void
420 flush_signal_handlers(struct task_struct *t, int force_default)
421 {
422         int i;
423         struct k_sigaction *ka = &t->sighand->action[0];
424         for (i = _NSIG ; i != 0 ; i--) {
425                 if (force_default || ka->sa.sa_handler != SIG_IGN)
426                         ka->sa.sa_handler = SIG_DFL;
427                 ka->sa.sa_flags = 0;
428                 sigemptyset(&ka->sa.sa_mask);
429                 ka++;
430         }
431 }
432
433
434 /* Notify the system that a driver wants to block all signals for this
435  * process, and wants to be notified if any signals at all were to be
436  * sent/acted upon.  If the notifier routine returns non-zero, then the
437  * signal will be acted upon after all.  If the notifier routine returns 0,
438  * then then signal will be blocked.  Only one block per process is
439  * allowed.  priv is a pointer to private data that the notifier routine
440  * can use to determine if the signal should be blocked or not.  */
441
442 void
443 block_all_signals(int (*notifier)(void *priv), void *priv, sigset_t *mask)
444 {
445         unsigned long flags;
446
447         spin_lock_irqsave(&current->sighand->siglock, flags);
448         current->notifier_mask = mask;
449         current->notifier_data = priv;
450         current->notifier = notifier;
451         spin_unlock_irqrestore(&current->sighand->siglock, flags);
452 }
453
454 /* Notify the system that blocking has ended. */
455
456 void
457 unblock_all_signals(void)
458 {
459         unsigned long flags;
460
461         spin_lock_irqsave(&current->sighand->siglock, flags);
462         current->notifier = NULL;
463         current->notifier_data = NULL;
464         recalc_sigpending();
465         spin_unlock_irqrestore(&current->sighand->siglock, flags);
466 }
467
468 static inline int collect_signal(int sig, struct sigpending *list, siginfo_t *info)
469 {
470         struct sigqueue *q, *first = NULL;
471         int still_pending = 0;
472
473         if (unlikely(!sigismember(&list->signal, sig)))
474                 return 0;
475
476         /*
477          * Collect the siginfo appropriate to this signal.  Check if
478          * there is another siginfo for the same signal.
479         */
480         list_for_each_entry(q, &list->list, list) {
481                 if (q->info.si_signo == sig) {
482                         if (first) {
483                                 still_pending = 1;
484                                 break;
485                         }
486                         first = q;
487                 }
488         }
489         if (first) {
490                 list_del_init(&first->list);
491                 copy_siginfo(info, &first->info);
492                 __sigqueue_free(first);
493                 if (!still_pending)
494                         sigdelset(&list->signal, sig);
495         } else {
496
497                 /* Ok, it wasn't in the queue.  This must be
498                    a fast-pathed signal or we must have been
499                    out of queue space.  So zero out the info.
500                  */
501                 sigdelset(&list->signal, sig);
502                 info->si_signo = sig;
503                 info->si_errno = 0;
504                 info->si_code = 0;
505                 info->si_pid = 0;
506                 info->si_uid = 0;
507         }
508         return 1;
509 }
510
511 static int __dequeue_signal(struct sigpending *pending, sigset_t *mask,
512                         siginfo_t *info)
513 {
514         int sig = 0;
515
516         /* SIGKILL must have priority, otherwise it is quite easy
517          * to create an unkillable process, sending sig < SIGKILL
518          * to self */
519         if (unlikely(sigismember(&pending->signal, SIGKILL))) {
520                 if (!sigismember(mask, SIGKILL))
521                         sig = SIGKILL;
522         }
523
524         if (likely(!sig))
525                 sig = next_signal(pending, mask);
526         if (sig) {
527                 if (current->notifier) {
528                         if (sigismember(current->notifier_mask, sig)) {
529                                 if (!(current->notifier)(current->notifier_data)) {
530                                         clear_thread_flag(TIF_SIGPENDING);
531                                         return 0;
532                                 }
533                         }
534                 }
535
536                 if (!collect_signal(sig, pending, info))
537                         sig = 0;
538                                 
539         }
540         recalc_sigpending();
541
542         return sig;
543 }
544
545 /*
546  * Dequeue a signal and return the element to the caller, which is 
547  * expected to free it.
548  *
549  * All callers have to hold the siglock.
550  */
551 int dequeue_signal(struct task_struct *tsk, sigset_t *mask, siginfo_t *info)
552 {
553         int signr = __dequeue_signal(&tsk->pending, mask, info);
554         if (!signr)
555                 signr = __dequeue_signal(&tsk->signal->shared_pending,
556                                          mask, info);
557         if (signr && unlikely(sig_kernel_stop(signr))) {
558                 /*
559                  * Set a marker that we have dequeued a stop signal.  Our
560                  * caller might release the siglock and then the pending
561                  * stop signal it is about to process is no longer in the
562                  * pending bitmasks, but must still be cleared by a SIGCONT
563                  * (and overruled by a SIGKILL).  So those cases clear this
564                  * shared flag after we've set it.  Note that this flag may
565                  * remain set after the signal we return is ignored or
566                  * handled.  That doesn't matter because its only purpose
567                  * is to alert stop-signal processing code when another
568                  * processor has come along and cleared the flag.
569                  */
570                 if (!(tsk->signal->flags & SIGNAL_GROUP_EXIT))
571                         tsk->signal->flags |= SIGNAL_STOP_DEQUEUED;
572         }
573         if ( signr &&
574              ((info->si_code & __SI_MASK) == __SI_TIMER) &&
575              info->si_sys_private){
576                 /*
577                  * Release the siglock to ensure proper locking order
578                  * of timer locks outside of siglocks.  Note, we leave
579                  * irqs disabled here, since the posix-timers code is
580                  * about to disable them again anyway.
581                  */
582                 spin_unlock(&tsk->sighand->siglock);
583                 do_schedule_next_timer(info);
584                 spin_lock(&tsk->sighand->siglock);
585         }
586         return signr;
587 }
588
589 /*
590  * Tell a process that it has a new active signal..
591  *
592  * NOTE! we rely on the previous spin_lock to
593  * lock interrupts for us! We can only be called with
594  * "siglock" held, and the local interrupt must
595  * have been disabled when that got acquired!
596  *
597  * No need to set need_resched since signal event passing
598  * goes through ->blocked
599  */
600 void signal_wake_up(struct task_struct *t, int resume)
601 {
602         unsigned int mask;
603
604         set_tsk_thread_flag(t, TIF_SIGPENDING);
605
606         /*
607          * For SIGKILL, we want to wake it up in the stopped/traced case.
608          * We don't check t->state here because there is a race with it
609          * executing another processor and just now entering stopped state.
610          * By using wake_up_state, we ensure the process will wake up and
611          * handle its death signal.
612          */
613         mask = TASK_INTERRUPTIBLE;
614         if (resume)
615                 mask |= TASK_STOPPED | TASK_TRACED;
616         if (!wake_up_state(t, mask))
617                 kick_process(t);
618 }
619
620 /*
621  * Remove signals in mask from the pending set and queue.
622  * Returns 1 if any signals were found.
623  *
624  * All callers must be holding the siglock.
625  */
626 static int rm_from_queue(unsigned long mask, struct sigpending *s)
627 {
628         struct sigqueue *q, *n;
629
630         if (!sigtestsetmask(&s->signal, mask))
631                 return 0;
632
633         sigdelsetmask(&s->signal, mask);
634         list_for_each_entry_safe(q, n, &s->list, list) {
635                 if (q->info.si_signo < SIGRTMIN &&
636                     (mask & sigmask(q->info.si_signo))) {
637                         list_del_init(&q->list);
638                         __sigqueue_free(q);
639                 }
640         }
641         return 1;
642 }
643
644 /*
645  * Bad permissions for sending the signal
646  */
647 static int check_kill_permission(int sig, struct siginfo *info,
648                                  struct task_struct *t)
649 {
650         int error = -EINVAL;
651         if (!valid_signal(sig))
652                 return error;
653         error = -EPERM;
654         if ((!info || ((unsigned long)info != 1 &&
655                         (unsigned long)info != 2 && SI_FROMUSER(info)))
656             && ((sig != SIGCONT) ||
657                 (current->signal->session != t->signal->session))
658             && (current->euid ^ t->suid) && (current->euid ^ t->uid)
659             && (current->uid ^ t->suid) && (current->uid ^ t->uid)
660             && !capable(CAP_KILL))
661                 return error;
662
663         error = security_task_kill(t, info, sig);
664         if (!error)
665                 audit_signal_info(sig, t); /* Let audit system see the signal */
666         return error;
667 }
668
669 /* forward decl */
670 static void do_notify_parent_cldstop(struct task_struct *tsk,
671                                      int to_self,
672                                      int why);
673
674 /*
675  * Handle magic process-wide effects of stop/continue signals.
676  * Unlike the signal actions, these happen immediately at signal-generation
677  * time regardless of blocking, ignoring, or handling.  This does the
678  * actual continuing for SIGCONT, but not the actual stopping for stop
679  * signals.  The process stop is done as a signal action for SIG_DFL.
680  */
681 static void handle_stop_signal(int sig, struct task_struct *p)
682 {
683         struct task_struct *t;
684
685         if (p->signal->flags & SIGNAL_GROUP_EXIT)
686                 /*
687                  * The process is in the middle of dying already.
688                  */
689                 return;
690
691         if (sig_kernel_stop(sig)) {
692                 /*
693                  * This is a stop signal.  Remove SIGCONT from all queues.
694                  */
695                 rm_from_queue(sigmask(SIGCONT), &p->signal->shared_pending);
696                 t = p;
697                 do {
698                         rm_from_queue(sigmask(SIGCONT), &t->pending);
699                         t = next_thread(t);
700                 } while (t != p);
701         } else if (sig == SIGCONT) {
702                 /*
703                  * Remove all stop signals from all queues,
704                  * and wake all threads.
705                  */
706                 if (unlikely(p->signal->group_stop_count > 0)) {
707                         /*
708                          * There was a group stop in progress.  We'll
709                          * pretend it finished before we got here.  We are
710                          * obliged to report it to the parent: if the
711                          * SIGSTOP happened "after" this SIGCONT, then it
712                          * would have cleared this pending SIGCONT.  If it
713                          * happened "before" this SIGCONT, then the parent
714                          * got the SIGCHLD about the stop finishing before
715                          * the continue happened.  We do the notification
716                          * now, and it's as if the stop had finished and
717                          * the SIGCHLD was pending on entry to this kill.
718                          */
719                         p->signal->group_stop_count = 0;
720                         p->signal->flags = SIGNAL_STOP_CONTINUED;
721                         spin_unlock(&p->sighand->siglock);
722                         do_notify_parent_cldstop(p, (p->ptrace & PT_PTRACED), CLD_STOPPED);
723                         spin_lock(&p->sighand->siglock);
724                 }
725                 rm_from_queue(SIG_KERNEL_STOP_MASK, &p->signal->shared_pending);
726                 t = p;
727                 do {
728                         unsigned int state;
729                         rm_from_queue(SIG_KERNEL_STOP_MASK, &t->pending);
730                         
731                         /*
732                          * If there is a handler for SIGCONT, we must make
733                          * sure that no thread returns to user mode before
734                          * we post the signal, in case it was the only
735                          * thread eligible to run the signal handler--then
736                          * it must not do anything between resuming and
737                          * running the handler.  With the TIF_SIGPENDING
738                          * flag set, the thread will pause and acquire the
739                          * siglock that we hold now and until we've queued
740                          * the pending signal. 
741                          *
742                          * Wake up the stopped thread _after_ setting
743                          * TIF_SIGPENDING
744                          */
745                         state = TASK_STOPPED;
746                         if (sig_user_defined(t, SIGCONT) && !sigismember(&t->blocked, SIGCONT)) {
747                                 set_tsk_thread_flag(t, TIF_SIGPENDING);
748                                 state |= TASK_INTERRUPTIBLE;
749                         }
750                         wake_up_state(t, state);
751
752                         t = next_thread(t);
753                 } while (t != p);
754
755                 if (p->signal->flags & SIGNAL_STOP_STOPPED) {
756                         /*
757                          * We were in fact stopped, and are now continued.
758                          * Notify the parent with CLD_CONTINUED.
759                          */
760                         p->signal->flags = SIGNAL_STOP_CONTINUED;
761                         p->signal->group_exit_code = 0;
762                         spin_unlock(&p->sighand->siglock);
763                         do_notify_parent_cldstop(p, (p->ptrace & PT_PTRACED), CLD_CONTINUED);
764                         spin_lock(&p->sighand->siglock);
765                 } else {
766                         /*
767                          * We are not stopped, but there could be a stop
768                          * signal in the middle of being processed after
769                          * being removed from the queue.  Clear that too.
770                          */
771                         p->signal->flags = 0;
772                 }
773         } else if (sig == SIGKILL) {
774                 /*
775                  * Make sure that any pending stop signal already dequeued
776                  * is undone by the wakeup for SIGKILL.
777                  */
778                 p->signal->flags = 0;
779         }
780 }
781
782 static int send_signal(int sig, struct siginfo *info, struct task_struct *t,
783                         struct sigpending *signals)
784 {
785         struct sigqueue * q = NULL;
786         int ret = 0;
787
788         /*
789          * fast-pathed signals for kernel-internal things like SIGSTOP
790          * or SIGKILL.
791          */
792         if ((unsigned long)info == 2)
793                 goto out_set;
794
795         /* Real-time signals must be queued if sent by sigqueue, or
796            some other real-time mechanism.  It is implementation
797            defined whether kill() does so.  We attempt to do so, on
798            the principle of least surprise, but since kill is not
799            allowed to fail with EAGAIN when low on memory we just
800            make sure at least one signal gets delivered and don't
801            pass on the info struct.  */
802
803         q = __sigqueue_alloc(t, GFP_ATOMIC, (sig < SIGRTMIN &&
804                                              ((unsigned long) info < 2 ||
805                                               info->si_code >= 0)));
806         if (q) {
807                 list_add_tail(&q->list, &signals->list);
808                 switch ((unsigned long) info) {
809                 case 0:
810                         q->info.si_signo = sig;
811                         q->info.si_errno = 0;
812                         q->info.si_code = SI_USER;
813                         q->info.si_pid = current->pid;
814                         q->info.si_uid = current->uid;
815                         break;
816                 case 1:
817                         q->info.si_signo = sig;
818                         q->info.si_errno = 0;
819                         q->info.si_code = SI_KERNEL;
820                         q->info.si_pid = 0;
821                         q->info.si_uid = 0;
822                         break;
823                 default:
824                         copy_siginfo(&q->info, info);
825                         break;
826                 }
827         } else {
828                 if (sig >= SIGRTMIN && info && (unsigned long)info != 1
829                    && info->si_code != SI_USER)
830                 /*
831                  * Queue overflow, abort.  We may abort if the signal was rt
832                  * and sent by user using something other than kill().
833                  */
834                         return -EAGAIN;
835                 if (((unsigned long)info > 1) && (info->si_code == SI_TIMER))
836                         /*
837                          * Set up a return to indicate that we dropped 
838                          * the signal.
839                          */
840                         ret = info->si_sys_private;
841         }
842
843 out_set:
844         sigaddset(&signals->signal, sig);
845         return ret;
846 }
847
848 #define LEGACY_QUEUE(sigptr, sig) \
849         (((sig) < SIGRTMIN) && sigismember(&(sigptr)->signal, (sig)))
850
851
852 static int
853 specific_send_sig_info(int sig, struct siginfo *info, struct task_struct *t)
854 {
855         int ret = 0;
856
857         if (!irqs_disabled())
858                 BUG();
859         assert_spin_locked(&t->sighand->siglock);
860
861         if (((unsigned long)info > 2) && (info->si_code == SI_TIMER))
862                 /*
863                  * Set up a return to indicate that we dropped the signal.
864                  */
865                 ret = info->si_sys_private;
866
867         /* Short-circuit ignored signals.  */
868         if (sig_ignored(t, sig))
869                 goto out;
870
871         /* Support queueing exactly one non-rt signal, so that we
872            can get more detailed information about the cause of
873            the signal. */
874         if (LEGACY_QUEUE(&t->pending, sig))
875                 goto out;
876
877         ret = send_signal(sig, info, t, &t->pending);
878         if (!ret && !sigismember(&t->blocked, sig))
879                 signal_wake_up(t, sig == SIGKILL);
880 out:
881         return ret;
882 }
883
884 /*
885  * Force a signal that the process can't ignore: if necessary
886  * we unblock the signal and change any SIG_IGN to SIG_DFL.
887  */
888
889 int
890 force_sig_info(int sig, struct siginfo *info, struct task_struct *t)
891 {
892         unsigned long int flags;
893         int ret;
894
895         spin_lock_irqsave(&t->sighand->siglock, flags);
896         if (sigismember(&t->blocked, sig) || t->sighand->action[sig-1].sa.sa_handler == SIG_IGN) {
897                 t->sighand->action[sig-1].sa.sa_handler = SIG_DFL;
898                 sigdelset(&t->blocked, sig);
899                 recalc_sigpending_tsk(t);
900         }
901         ret = specific_send_sig_info(sig, info, t);
902         spin_unlock_irqrestore(&t->sighand->siglock, flags);
903
904         return ret;
905 }
906
907 void
908 force_sig_specific(int sig, struct task_struct *t)
909 {
910         unsigned long int flags;
911
912         spin_lock_irqsave(&t->sighand->siglock, flags);
913         if (t->sighand->action[sig-1].sa.sa_handler == SIG_IGN)
914                 t->sighand->action[sig-1].sa.sa_handler = SIG_DFL;
915         sigdelset(&t->blocked, sig);
916         recalc_sigpending_tsk(t);
917         specific_send_sig_info(sig, (void *)2, t);
918         spin_unlock_irqrestore(&t->sighand->siglock, flags);
919 }
920
921 /*
922  * Test if P wants to take SIG.  After we've checked all threads with this,
923  * it's equivalent to finding no threads not blocking SIG.  Any threads not
924  * blocking SIG were ruled out because they are not running and already
925  * have pending signals.  Such threads will dequeue from the shared queue
926  * as soon as they're available, so putting the signal on the shared queue
927  * will be equivalent to sending it to one such thread.
928  */
929 static inline int wants_signal(int sig, struct task_struct *p)
930 {
931         if (sigismember(&p->blocked, sig))
932                 return 0;
933         if (p->flags & PF_EXITING)
934                 return 0;
935         if (sig == SIGKILL)
936                 return 1;
937         if (p->state & (TASK_STOPPED | TASK_TRACED))
938                 return 0;
939         return task_curr(p) || !signal_pending(p);
940 }
941
942 static void
943 __group_complete_signal(int sig, struct task_struct *p)
944 {
945         struct task_struct *t;
946
947         /*
948          * Now find a thread we can wake up to take the signal off the queue.
949          *
950          * If the main thread wants the signal, it gets first crack.
951          * Probably the least surprising to the average bear.
952          */
953         if (wants_signal(sig, p))
954                 t = p;
955         else if (thread_group_empty(p))
956                 /*
957                  * There is just one thread and it does not need to be woken.
958                  * It will dequeue unblocked signals before it runs again.
959                  */
960                 return;
961         else {
962                 /*
963                  * Otherwise try to find a suitable thread.
964                  */
965                 t = p->signal->curr_target;
966                 if (t == NULL)
967                         /* restart balancing at this thread */
968                         t = p->signal->curr_target = p;
969                 BUG_ON(t->tgid != p->tgid);
970
971                 while (!wants_signal(sig, t)) {
972                         t = next_thread(t);
973                         if (t == p->signal->curr_target)
974                                 /*
975                                  * No thread needs to be woken.
976                                  * Any eligible threads will see
977                                  * the signal in the queue soon.
978                                  */
979                                 return;
980                 }
981                 p->signal->curr_target = t;
982         }
983
984         /*
985          * Found a killable thread.  If the signal will be fatal,
986          * then start taking the whole group down immediately.
987          */
988         if (sig_fatal(p, sig) && !(p->signal->flags & SIGNAL_GROUP_EXIT) &&
989             !sigismember(&t->real_blocked, sig) &&
990             (sig == SIGKILL || !(t->ptrace & PT_PTRACED))) {
991                 /*
992                  * This signal will be fatal to the whole group.
993                  */
994                 if (!sig_kernel_coredump(sig)) {
995                         /*
996                          * Start a group exit and wake everybody up.
997                          * This way we don't have other threads
998                          * running and doing things after a slower
999                          * thread has the fatal signal pending.
1000                          */
1001                         p->signal->flags = SIGNAL_GROUP_EXIT;
1002                         p->signal->group_exit_code = sig;
1003                         p->signal->group_stop_count = 0;
1004                         t = p;
1005                         do {
1006                                 sigaddset(&t->pending.signal, SIGKILL);
1007                                 signal_wake_up(t, 1);
1008                                 t = next_thread(t);
1009                         } while (t != p);
1010                         return;
1011                 }
1012
1013                 /*
1014                  * There will be a core dump.  We make all threads other
1015                  * than the chosen one go into a group stop so that nothing
1016                  * happens until it gets scheduled, takes the signal off
1017                  * the shared queue, and does the core dump.  This is a
1018                  * little more complicated than strictly necessary, but it
1019                  * keeps the signal state that winds up in the core dump
1020                  * unchanged from the death state, e.g. which thread had
1021                  * the core-dump signal unblocked.
1022                  */
1023                 rm_from_queue(SIG_KERNEL_STOP_MASK, &t->pending);
1024                 rm_from_queue(SIG_KERNEL_STOP_MASK, &p->signal->shared_pending);
1025                 p->signal->group_stop_count = 0;
1026                 p->signal->group_exit_task = t;
1027                 t = p;
1028                 do {
1029                         p->signal->group_stop_count++;
1030                         signal_wake_up(t, 0);
1031                         t = next_thread(t);
1032                 } while (t != p);
1033                 wake_up_process(p->signal->group_exit_task);
1034                 return;
1035         }
1036
1037         /*
1038          * The signal is already in the shared-pending queue.
1039          * Tell the chosen thread to wake up and dequeue it.
1040          */
1041         signal_wake_up(t, sig == SIGKILL);
1042         return;
1043 }
1044
1045 int
1046 __group_send_sig_info(int sig, struct siginfo *info, struct task_struct *p)
1047 {
1048         int ret = 0;
1049
1050         assert_spin_locked(&p->sighand->siglock);
1051         handle_stop_signal(sig, p);
1052
1053         if (((unsigned long)info > 2) && (info->si_code == SI_TIMER))
1054                 /*
1055                  * Set up a return to indicate that we dropped the signal.
1056                  */
1057                 ret = info->si_sys_private;
1058
1059         /* Short-circuit ignored signals.  */
1060         if (sig_ignored(p, sig))
1061                 return ret;
1062
1063         if (LEGACY_QUEUE(&p->signal->shared_pending, sig))
1064                 /* This is a non-RT signal and we already have one queued.  */
1065                 return ret;
1066
1067         /*
1068          * Put this signal on the shared-pending queue, or fail with EAGAIN.
1069          * We always use the shared queue for process-wide signals,
1070          * to avoid several races.
1071          */
1072         ret = send_signal(sig, info, p, &p->signal->shared_pending);
1073         if (unlikely(ret))
1074                 return ret;
1075
1076         __group_complete_signal(sig, p);
1077         return 0;
1078 }
1079
1080 /*
1081  * Nuke all other threads in the group.
1082  */
1083 void zap_other_threads(struct task_struct *p)
1084 {
1085         struct task_struct *t;
1086
1087         p->signal->flags = SIGNAL_GROUP_EXIT;
1088         p->signal->group_stop_count = 0;
1089
1090         if (thread_group_empty(p))
1091                 return;
1092
1093         for (t = next_thread(p); t != p; t = next_thread(t)) {
1094                 /*
1095                  * Don't bother with already dead threads
1096                  */
1097                 if (t->exit_state)
1098                         continue;
1099
1100                 /*
1101                  * We don't want to notify the parent, since we are
1102                  * killed as part of a thread group due to another
1103                  * thread doing an execve() or similar. So set the
1104                  * exit signal to -1 to allow immediate reaping of
1105                  * the process.  But don't detach the thread group
1106                  * leader.
1107                  */
1108                 if (t != p->group_leader)
1109                         t->exit_signal = -1;
1110
1111                 sigaddset(&t->pending.signal, SIGKILL);
1112                 rm_from_queue(SIG_KERNEL_STOP_MASK, &t->pending);
1113                 signal_wake_up(t, 1);
1114         }
1115 }
1116
1117 /*
1118  * Must be called with the tasklist_lock held for reading!
1119  */
1120 int group_send_sig_info(int sig, struct siginfo *info, struct task_struct *p)
1121 {
1122         unsigned long flags;
1123         int ret;
1124
1125         ret = check_kill_permission(sig, info, p);
1126         if (!ret && sig && p->sighand) {
1127                 spin_lock_irqsave(&p->sighand->siglock, flags);
1128                 ret = __group_send_sig_info(sig, info, p);
1129                 spin_unlock_irqrestore(&p->sighand->siglock, flags);
1130         }
1131
1132         return ret;
1133 }
1134
1135 /*
1136  * kill_pg_info() sends a signal to a process group: this is what the tty
1137  * control characters do (^C, ^Z etc)
1138  */
1139
1140 int __kill_pg_info(int sig, struct siginfo *info, pid_t pgrp)
1141 {
1142         struct task_struct *p = NULL;
1143         int retval, success;
1144
1145         if (pgrp <= 0)
1146                 return -EINVAL;
1147
1148         success = 0;
1149         retval = -ESRCH;
1150         do_each_task_pid(pgrp, PIDTYPE_PGID, p) {
1151                 int err = group_send_sig_info(sig, info, p);
1152                 success |= !err;
1153                 retval = err;
1154         } while_each_task_pid(pgrp, PIDTYPE_PGID, p);
1155         return success ? 0 : retval;
1156 }
1157
1158 int
1159 kill_pg_info(int sig, struct siginfo *info, pid_t pgrp)
1160 {
1161         int retval;
1162
1163         read_lock(&tasklist_lock);
1164         retval = __kill_pg_info(sig, info, pgrp);
1165         read_unlock(&tasklist_lock);
1166
1167         return retval;
1168 }
1169
1170 int
1171 kill_proc_info(int sig, struct siginfo *info, pid_t pid)
1172 {
1173         int error;
1174         struct task_struct *p;
1175
1176         read_lock(&tasklist_lock);
1177         p = find_task_by_pid(pid);
1178         error = -ESRCH;
1179         if (p)
1180                 error = group_send_sig_info(sig, info, p);
1181         read_unlock(&tasklist_lock);
1182         return error;
1183 }
1184
1185 /* like kill_proc_info(), but doesn't use uid/euid of "current" */
1186 int kill_proc_info_as_uid(int sig, struct siginfo *info, pid_t pid,
1187                       uid_t uid, uid_t euid)
1188 {
1189         int ret = -EINVAL;
1190         struct task_struct *p;
1191
1192         if (!valid_signal(sig))
1193                 return ret;
1194
1195         read_lock(&tasklist_lock);
1196         p = find_task_by_pid(pid);
1197         if (!p) {
1198                 ret = -ESRCH;
1199                 goto out_unlock;
1200         }
1201         if ((!info || ((unsigned long)info != 1 &&
1202                         (unsigned long)info != 2 && SI_FROMUSER(info)))
1203             && (euid != p->suid) && (euid != p->uid)
1204             && (uid != p->suid) && (uid != p->uid)) {
1205                 ret = -EPERM;
1206                 goto out_unlock;
1207         }
1208         if (sig && p->sighand) {
1209                 unsigned long flags;
1210                 spin_lock_irqsave(&p->sighand->siglock, flags);
1211                 ret = __group_send_sig_info(sig, info, p);
1212                 spin_unlock_irqrestore(&p->sighand->siglock, flags);
1213         }
1214 out_unlock:
1215         read_unlock(&tasklist_lock);
1216         return ret;
1217 }
1218 EXPORT_SYMBOL_GPL(kill_proc_info_as_uid);
1219
1220 /*
1221  * kill_something_info() interprets pid in interesting ways just like kill(2).
1222  *
1223  * POSIX specifies that kill(-1,sig) is unspecified, but what we have
1224  * is probably wrong.  Should make it like BSD or SYSV.
1225  */
1226
1227 static int kill_something_info(int sig, struct siginfo *info, int pid)
1228 {
1229         if (!pid) {
1230                 return kill_pg_info(sig, info, process_group(current));
1231         } else if (pid == -1) {
1232                 int retval = 0, count = 0;
1233                 struct task_struct * p;
1234
1235                 read_lock(&tasklist_lock);
1236                 for_each_process(p) {
1237                         if (p->pid > 1 && p->tgid != current->tgid) {
1238                                 int err = group_send_sig_info(sig, info, p);
1239                                 ++count;
1240                                 if (err != -EPERM)
1241                                         retval = err;
1242                         }
1243                 }
1244                 read_unlock(&tasklist_lock);
1245                 return count ? retval : -ESRCH;
1246         } else if (pid < 0) {
1247                 return kill_pg_info(sig, info, -pid);
1248         } else {
1249                 return kill_proc_info(sig, info, pid);
1250         }
1251 }
1252
1253 /*
1254  * These are for backward compatibility with the rest of the kernel source.
1255  */
1256
1257 /*
1258  * These two are the most common entry points.  They send a signal
1259  * just to the specific thread.
1260  */
1261 int
1262 send_sig_info(int sig, struct siginfo *info, struct task_struct *p)
1263 {
1264         int ret;
1265         unsigned long flags;
1266
1267         /*
1268          * Make sure legacy kernel users don't send in bad values
1269          * (normal paths check this in check_kill_permission).
1270          */
1271         if (!valid_signal(sig))
1272                 return -EINVAL;
1273
1274         /*
1275          * We need the tasklist lock even for the specific
1276          * thread case (when we don't need to follow the group
1277          * lists) in order to avoid races with "p->sighand"
1278          * going away or changing from under us.
1279          */
1280         read_lock(&tasklist_lock);  
1281         spin_lock_irqsave(&p->sighand->siglock, flags);
1282         ret = specific_send_sig_info(sig, info, p);
1283         spin_unlock_irqrestore(&p->sighand->siglock, flags);
1284         read_unlock(&tasklist_lock);
1285         return ret;
1286 }
1287
1288 int
1289 send_sig(int sig, struct task_struct *p, int priv)
1290 {
1291         return send_sig_info(sig, (void*)(long)(priv != 0), p);
1292 }
1293
1294 /*
1295  * This is the entry point for "process-wide" signals.
1296  * They will go to an appropriate thread in the thread group.
1297  */
1298 int
1299 send_group_sig_info(int sig, struct siginfo *info, struct task_struct *p)
1300 {
1301         int ret;
1302         read_lock(&tasklist_lock);
1303         ret = group_send_sig_info(sig, info, p);
1304         read_unlock(&tasklist_lock);
1305         return ret;
1306 }
1307
1308 void
1309 force_sig(int sig, struct task_struct *p)
1310 {
1311         force_sig_info(sig, (void*)1L, p);
1312 }
1313
1314 /*
1315  * When things go south during signal handling, we
1316  * will force a SIGSEGV. And if the signal that caused
1317  * the problem was already a SIGSEGV, we'll want to
1318  * make sure we don't even try to deliver the signal..
1319  */
1320 int
1321 force_sigsegv(int sig, struct task_struct *p)
1322 {
1323         if (sig == SIGSEGV) {
1324                 unsigned long flags;
1325                 spin_lock_irqsave(&p->sighand->siglock, flags);
1326                 p->sighand->action[sig - 1].sa.sa_handler = SIG_DFL;
1327                 spin_unlock_irqrestore(&p->sighand->siglock, flags);
1328         }
1329         force_sig(SIGSEGV, p);
1330         return 0;
1331 }
1332
1333 int
1334 kill_pg(pid_t pgrp, int sig, int priv)
1335 {
1336         return kill_pg_info(sig, (void *)(long)(priv != 0), pgrp);
1337 }
1338
1339 int
1340 kill_proc(pid_t pid, int sig, int priv)
1341 {
1342         return kill_proc_info(sig, (void *)(long)(priv != 0), pid);
1343 }
1344
1345 /*
1346  * These functions support sending signals using preallocated sigqueue
1347  * structures.  This is needed "because realtime applications cannot
1348  * afford to lose notifications of asynchronous events, like timer
1349  * expirations or I/O completions".  In the case of Posix Timers 
1350  * we allocate the sigqueue structure from the timer_create.  If this
1351  * allocation fails we are able to report the failure to the application
1352  * with an EAGAIN error.
1353  */
1354  
1355 struct sigqueue *sigqueue_alloc(void)
1356 {
1357         struct sigqueue *q;
1358
1359         if ((q = __sigqueue_alloc(current, GFP_KERNEL, 0)))
1360                 q->flags |= SIGQUEUE_PREALLOC;
1361         return(q);
1362 }
1363
1364 void sigqueue_free(struct sigqueue *q)
1365 {
1366         unsigned long flags;
1367         BUG_ON(!(q->flags & SIGQUEUE_PREALLOC));
1368         /*
1369          * If the signal is still pending remove it from the
1370          * pending queue.
1371          */
1372         if (unlikely(!list_empty(&q->list))) {
1373                 spinlock_t *lock = &current->sighand->siglock;
1374                 read_lock(&tasklist_lock);
1375                 spin_lock_irqsave(lock, flags);
1376                 if (!list_empty(&q->list))
1377                         list_del_init(&q->list);
1378                 spin_unlock_irqrestore(lock, flags);
1379                 read_unlock(&tasklist_lock);
1380         }
1381         q->flags &= ~SIGQUEUE_PREALLOC;
1382         __sigqueue_free(q);
1383 }
1384
1385 int
1386 send_sigqueue(int sig, struct sigqueue *q, struct task_struct *p)
1387 {
1388         unsigned long flags;
1389         int ret = 0;
1390
1391         BUG_ON(!(q->flags & SIGQUEUE_PREALLOC));
1392         read_lock(&tasklist_lock);
1393
1394         if (unlikely(p->flags & PF_EXITING)) {
1395                 ret = -1;
1396                 goto out_err;
1397         }
1398
1399         spin_lock_irqsave(&p->sighand->siglock, flags);
1400
1401         if (unlikely(!list_empty(&q->list))) {
1402                 /*
1403                  * If an SI_TIMER entry is already queue just increment
1404                  * the overrun count.
1405                  */
1406                 if (q->info.si_code != SI_TIMER)
1407                         BUG();
1408                 q->info.si_overrun++;
1409                 goto out;
1410         }
1411         /* Short-circuit ignored signals.  */
1412         if (sig_ignored(p, sig)) {
1413                 ret = 1;
1414                 goto out;
1415         }
1416
1417         list_add_tail(&q->list, &p->pending.list);
1418         sigaddset(&p->pending.signal, sig);
1419         if (!sigismember(&p->blocked, sig))
1420                 signal_wake_up(p, sig == SIGKILL);
1421
1422 out:
1423         spin_unlock_irqrestore(&p->sighand->siglock, flags);
1424 out_err:
1425         read_unlock(&tasklist_lock);
1426
1427         return ret;
1428 }
1429
1430 int
1431 send_group_sigqueue(int sig, struct sigqueue *q, struct task_struct *p)
1432 {
1433         unsigned long flags;
1434         int ret = 0;
1435
1436         BUG_ON(!(q->flags & SIGQUEUE_PREALLOC));
1437         read_lock(&tasklist_lock);
1438         spin_lock_irqsave(&p->sighand->siglock, flags);
1439         handle_stop_signal(sig, p);
1440
1441         /* Short-circuit ignored signals.  */
1442         if (sig_ignored(p, sig)) {
1443                 ret = 1;
1444                 goto out;
1445         }
1446
1447         if (unlikely(!list_empty(&q->list))) {
1448                 /*
1449                  * If an SI_TIMER entry is already queue just increment
1450                  * the overrun count.  Other uses should not try to
1451                  * send the signal multiple times.
1452                  */
1453                 if (q->info.si_code != SI_TIMER)
1454                         BUG();
1455                 q->info.si_overrun++;
1456                 goto out;
1457         } 
1458
1459         /*
1460          * Put this signal on the shared-pending queue.
1461          * We always use the shared queue for process-wide signals,
1462          * to avoid several races.
1463          */
1464         list_add_tail(&q->list, &p->signal->shared_pending.list);
1465         sigaddset(&p->signal->shared_pending.signal, sig);
1466
1467         __group_complete_signal(sig, p);
1468 out:
1469         spin_unlock_irqrestore(&p->sighand->siglock, flags);
1470         read_unlock(&tasklist_lock);
1471         return(ret);
1472 }
1473
1474 /*
1475  * Wake up any threads in the parent blocked in wait* syscalls.
1476  */
1477 static inline void __wake_up_parent(struct task_struct *p,
1478                                     struct task_struct *parent)
1479 {
1480         wake_up_interruptible_sync(&parent->signal->wait_chldexit);
1481 }
1482
1483 /*
1484  * Let a parent know about the death of a child.
1485  * For a stopped/continued status change, use do_notify_parent_cldstop instead.
1486  */
1487
1488 void do_notify_parent(struct task_struct *tsk, int sig)
1489 {
1490         struct siginfo info;
1491         unsigned long flags;
1492         struct sighand_struct *psig;
1493
1494         BUG_ON(sig == -1);
1495
1496         /* do_notify_parent_cldstop should have been called instead.  */
1497         BUG_ON(tsk->state & (TASK_STOPPED|TASK_TRACED));
1498
1499         BUG_ON(!tsk->ptrace &&
1500                (tsk->group_leader != tsk || !thread_group_empty(tsk)));
1501
1502         info.si_signo = sig;
1503         info.si_errno = 0;
1504         info.si_pid = tsk->pid;
1505         info.si_uid = tsk->uid;
1506
1507         /* FIXME: find out whether or not this is supposed to be c*time. */
1508         info.si_utime = cputime_to_jiffies(cputime_add(tsk->utime,
1509                                                        tsk->signal->utime));
1510         info.si_stime = cputime_to_jiffies(cputime_add(tsk->stime,
1511                                                        tsk->signal->stime));
1512
1513         info.si_status = tsk->exit_code & 0x7f;
1514         if (tsk->exit_code & 0x80)
1515                 info.si_code = CLD_DUMPED;
1516         else if (tsk->exit_code & 0x7f)
1517                 info.si_code = CLD_KILLED;
1518         else {
1519                 info.si_code = CLD_EXITED;
1520                 info.si_status = tsk->exit_code >> 8;
1521         }
1522
1523         psig = tsk->parent->sighand;
1524         spin_lock_irqsave(&psig->siglock, flags);
1525         if (sig == SIGCHLD &&
1526             (psig->action[SIGCHLD-1].sa.sa_handler == SIG_IGN ||
1527              (psig->action[SIGCHLD-1].sa.sa_flags & SA_NOCLDWAIT))) {
1528                 /*
1529                  * We are exiting and our parent doesn't care.  POSIX.1
1530                  * defines special semantics for setting SIGCHLD to SIG_IGN
1531                  * or setting the SA_NOCLDWAIT flag: we should be reaped
1532                  * automatically and not left for our parent's wait4 call.
1533                  * Rather than having the parent do it as a magic kind of
1534                  * signal handler, we just set this to tell do_exit that we
1535                  * can be cleaned up without becoming a zombie.  Note that
1536                  * we still call __wake_up_parent in this case, because a
1537                  * blocked sys_wait4 might now return -ECHILD.
1538                  *
1539                  * Whether we send SIGCHLD or not for SA_NOCLDWAIT
1540                  * is implementation-defined: we do (if you don't want
1541                  * it, just use SIG_IGN instead).
1542                  */
1543                 tsk->exit_signal = -1;
1544                 if (psig->action[SIGCHLD-1].sa.sa_handler == SIG_IGN)
1545                         sig = 0;
1546         }
1547         if (valid_signal(sig) && sig > 0)
1548                 __group_send_sig_info(sig, &info, tsk->parent);
1549         __wake_up_parent(tsk, tsk->parent);
1550         spin_unlock_irqrestore(&psig->siglock, flags);
1551 }
1552
1553 static void do_notify_parent_cldstop(struct task_struct *tsk, int to_self, int why)
1554 {
1555         struct siginfo info;
1556         unsigned long flags;
1557         struct task_struct *parent;
1558         struct sighand_struct *sighand;
1559
1560         if (to_self)
1561                 parent = tsk->parent;
1562         else {
1563                 tsk = tsk->group_leader;
1564                 parent = tsk->real_parent;
1565         }
1566
1567         info.si_signo = SIGCHLD;
1568         info.si_errno = 0;
1569         info.si_pid = tsk->pid;
1570         info.si_uid = tsk->uid;
1571
1572         /* FIXME: find out whether or not this is supposed to be c*time. */
1573         info.si_utime = cputime_to_jiffies(tsk->utime);
1574         info.si_stime = cputime_to_jiffies(tsk->stime);
1575
1576         info.si_code = why;
1577         switch (why) {
1578         case CLD_CONTINUED:
1579                 info.si_status = SIGCONT;
1580                 break;
1581         case CLD_STOPPED:
1582                 info.si_status = tsk->signal->group_exit_code & 0x7f;
1583                 break;
1584         case CLD_TRAPPED:
1585                 info.si_status = tsk->exit_code & 0x7f;
1586                 break;
1587         default:
1588                 BUG();
1589         }
1590
1591         sighand = parent->sighand;
1592         spin_lock_irqsave(&sighand->siglock, flags);
1593         if (sighand->action[SIGCHLD-1].sa.sa_handler != SIG_IGN &&
1594             !(sighand->action[SIGCHLD-1].sa.sa_flags & SA_NOCLDSTOP))
1595                 __group_send_sig_info(SIGCHLD, &info, parent);
1596         /*
1597          * Even if SIGCHLD is not generated, we must wake up wait4 calls.
1598          */
1599         __wake_up_parent(tsk, parent);
1600         spin_unlock_irqrestore(&sighand->siglock, flags);
1601 }
1602
1603 /*
1604  * This must be called with current->sighand->siglock held.
1605  *
1606  * This should be the path for all ptrace stops.
1607  * We always set current->last_siginfo while stopped here.
1608  * That makes it a way to test a stopped process for
1609  * being ptrace-stopped vs being job-control-stopped.
1610  *
1611  * If we actually decide not to stop at all because the tracer is gone,
1612  * we leave nostop_code in current->exit_code.
1613  */
1614 static void ptrace_stop(int exit_code, int nostop_code, siginfo_t *info)
1615 {
1616         /*
1617          * If there is a group stop in progress,
1618          * we must participate in the bookkeeping.
1619          */
1620         if (current->signal->group_stop_count > 0)
1621                 --current->signal->group_stop_count;
1622
1623         current->last_siginfo = info;
1624         current->exit_code = exit_code;
1625
1626         /* Let the debugger run.  */
1627         set_current_state(TASK_TRACED);
1628         spin_unlock_irq(&current->sighand->siglock);
1629         read_lock(&tasklist_lock);
1630         if (likely(current->ptrace & PT_PTRACED) &&
1631             likely(current->parent != current->real_parent ||
1632                    !(current->ptrace & PT_ATTACHED)) &&
1633             (likely(current->parent->signal != current->signal) ||
1634              !unlikely(current->signal->flags & SIGNAL_GROUP_EXIT))) {
1635                 do_notify_parent_cldstop(current, 1, CLD_TRAPPED);
1636                 read_unlock(&tasklist_lock);
1637                 schedule();
1638         } else {
1639                 /*
1640                  * By the time we got the lock, our tracer went away.
1641                  * Don't stop here.
1642                  */
1643                 read_unlock(&tasklist_lock);
1644                 set_current_state(TASK_RUNNING);
1645                 current->exit_code = nostop_code;
1646         }
1647
1648         /*
1649          * We are back.  Now reacquire the siglock before touching
1650          * last_siginfo, so that we are sure to have synchronized with
1651          * any signal-sending on another CPU that wants to examine it.
1652          */
1653         spin_lock_irq(&current->sighand->siglock);
1654         current->last_siginfo = NULL;
1655
1656         /*
1657          * Queued signals ignored us while we were stopped for tracing.
1658          * So check for any that we should take before resuming user mode.
1659          */
1660         recalc_sigpending();
1661 }
1662
1663 void ptrace_notify(int exit_code)
1664 {
1665         siginfo_t info;
1666
1667         BUG_ON((exit_code & (0x7f | ~0xffff)) != SIGTRAP);
1668
1669         memset(&info, 0, sizeof info);
1670         info.si_signo = SIGTRAP;
1671         info.si_code = exit_code;
1672         info.si_pid = current->pid;
1673         info.si_uid = current->uid;
1674
1675         /* Let the debugger run.  */
1676         spin_lock_irq(&current->sighand->siglock);
1677         ptrace_stop(exit_code, 0, &info);
1678         spin_unlock_irq(&current->sighand->siglock);
1679 }
1680
1681 static void
1682 finish_stop(int stop_count)
1683 {
1684         int to_self;
1685
1686         /*
1687          * If there are no other threads in the group, or if there is
1688          * a group stop in progress and we are the last to stop,
1689          * report to the parent.  When ptraced, every thread reports itself.
1690          */
1691         if (stop_count < 0 || (current->ptrace & PT_PTRACED))
1692                 to_self = 1;
1693         else if (stop_count == 0)
1694                 to_self = 0;
1695         else
1696                 goto out;
1697
1698         read_lock(&tasklist_lock);
1699         do_notify_parent_cldstop(current, to_self, CLD_STOPPED);
1700         read_unlock(&tasklist_lock);
1701
1702 out:
1703         schedule();
1704         /*
1705          * Now we don't run again until continued.
1706          */
1707         current->exit_code = 0;
1708 }
1709
1710 /*
1711  * This performs the stopping for SIGSTOP and other stop signals.
1712  * We have to stop all threads in the thread group.
1713  * Returns nonzero if we've actually stopped and released the siglock.
1714  * Returns zero if we didn't stop and still hold the siglock.
1715  */
1716 static int
1717 do_signal_stop(int signr)
1718 {
1719         struct signal_struct *sig = current->signal;
1720         struct sighand_struct *sighand = current->sighand;
1721         int stop_count = -1;
1722
1723         if (!likely(sig->flags & SIGNAL_STOP_DEQUEUED))
1724                 return 0;
1725
1726         if (sig->group_stop_count > 0) {
1727                 /*
1728                  * There is a group stop in progress.  We don't need to
1729                  * start another one.
1730                  */
1731                 signr = sig->group_exit_code;
1732                 stop_count = --sig->group_stop_count;
1733                 current->exit_code = signr;
1734                 set_current_state(TASK_STOPPED);
1735                 if (stop_count == 0)
1736                         sig->flags = SIGNAL_STOP_STOPPED;
1737                 spin_unlock_irq(&sighand->siglock);
1738         }
1739         else if (thread_group_empty(current)) {
1740                 /*
1741                  * Lock must be held through transition to stopped state.
1742                  */
1743                 current->exit_code = current->signal->group_exit_code = signr;
1744                 set_current_state(TASK_STOPPED);
1745                 sig->flags = SIGNAL_STOP_STOPPED;
1746                 spin_unlock_irq(&sighand->siglock);
1747         }
1748         else {
1749                 /*
1750                  * There is no group stop already in progress.
1751                  * We must initiate one now, but that requires
1752                  * dropping siglock to get both the tasklist lock
1753                  * and siglock again in the proper order.  Note that
1754                  * this allows an intervening SIGCONT to be posted.
1755                  * We need to check for that and bail out if necessary.
1756                  */
1757                 struct task_struct *t;
1758
1759                 spin_unlock_irq(&sighand->siglock);
1760
1761                 /* signals can be posted during this window */
1762
1763                 read_lock(&tasklist_lock);
1764                 spin_lock_irq(&sighand->siglock);
1765
1766                 if (!likely(sig->flags & SIGNAL_STOP_DEQUEUED)) {
1767                         /*
1768                          * Another stop or continue happened while we
1769                          * didn't have the lock.  We can just swallow this
1770                          * signal now.  If we raced with a SIGCONT, that
1771                          * should have just cleared it now.  If we raced
1772                          * with another processor delivering a stop signal,
1773                          * then the SIGCONT that wakes us up should clear it.
1774                          */
1775                         read_unlock(&tasklist_lock);
1776                         return 0;
1777                 }
1778
1779                 if (sig->group_stop_count == 0) {
1780                         sig->group_exit_code = signr;
1781                         stop_count = 0;
1782                         for (t = next_thread(current); t != current;
1783                              t = next_thread(t))
1784                                 /*
1785                                  * Setting state to TASK_STOPPED for a group
1786                                  * stop is always done with the siglock held,
1787                                  * so this check has no races.
1788                                  */
1789                                 if (!t->exit_state &&
1790                                     !(t->state & (TASK_STOPPED|TASK_TRACED))) {
1791                                         stop_count++;
1792                                         signal_wake_up(t, 0);
1793                                 }
1794                         sig->group_stop_count = stop_count;
1795                 }
1796                 else {
1797                         /* A race with another thread while unlocked.  */
1798                         signr = sig->group_exit_code;
1799                         stop_count = --sig->group_stop_count;
1800                 }
1801
1802                 current->exit_code = signr;
1803                 set_current_state(TASK_STOPPED);
1804                 if (stop_count == 0)
1805                         sig->flags = SIGNAL_STOP_STOPPED;
1806
1807                 spin_unlock_irq(&sighand->siglock);
1808                 read_unlock(&tasklist_lock);
1809         }
1810
1811         finish_stop(stop_count);
1812         return 1;
1813 }
1814
1815 /*
1816  * Do appropriate magic when group_stop_count > 0.
1817  * We return nonzero if we stopped, after releasing the siglock.
1818  * We return zero if we still hold the siglock and should look
1819  * for another signal without checking group_stop_count again.
1820  */
1821 static inline int handle_group_stop(void)
1822 {
1823         int stop_count;
1824
1825         if (current->signal->group_exit_task == current) {
1826                 /*
1827                  * Group stop is so we can do a core dump,
1828                  * We are the initiating thread, so get on with it.
1829                  */
1830                 current->signal->group_exit_task = NULL;
1831                 return 0;
1832         }
1833
1834         if (current->signal->flags & SIGNAL_GROUP_EXIT)
1835                 /*
1836                  * Group stop is so another thread can do a core dump,
1837                  * or else we are racing against a death signal.
1838                  * Just punt the stop so we can get the next signal.
1839                  */
1840                 return 0;
1841
1842         /*
1843          * There is a group stop in progress.  We stop
1844          * without any associated signal being in our queue.
1845          */
1846         stop_count = --current->signal->group_stop_count;
1847         if (stop_count == 0)
1848                 current->signal->flags = SIGNAL_STOP_STOPPED;
1849         current->exit_code = current->signal->group_exit_code;
1850         set_current_state(TASK_STOPPED);
1851         spin_unlock_irq(&current->sighand->siglock);
1852         finish_stop(stop_count);
1853         return 1;
1854 }
1855
1856 int get_signal_to_deliver(siginfo_t *info, struct k_sigaction *return_ka,
1857                           struct pt_regs *regs, void *cookie)
1858 {
1859         sigset_t *mask = &current->blocked;
1860         int signr = 0;
1861
1862 relock:
1863         spin_lock_irq(&current->sighand->siglock);
1864         for (;;) {
1865                 struct k_sigaction *ka;
1866
1867                 if (unlikely(current->signal->group_stop_count > 0) &&
1868                     handle_group_stop())
1869                         goto relock;
1870
1871                 signr = dequeue_signal(current, mask, info);
1872
1873                 if (!signr)
1874                         break; /* will return 0 */
1875
1876                 if ((current->ptrace & PT_PTRACED) && signr != SIGKILL) {
1877                         ptrace_signal_deliver(regs, cookie);
1878
1879                         /* Let the debugger run.  */
1880                         ptrace_stop(signr, signr, info);
1881
1882                         /* We're back.  Did the debugger cancel the sig?  */
1883                         signr = current->exit_code;
1884                         if (signr == 0)
1885                                 continue;
1886
1887                         current->exit_code = 0;
1888
1889                         /* Update the siginfo structure if the signal has
1890                            changed.  If the debugger wanted something
1891                            specific in the siginfo structure then it should
1892                            have updated *info via PTRACE_SETSIGINFO.  */
1893                         if (signr != info->si_signo) {
1894                                 info->si_signo = signr;
1895                                 info->si_errno = 0;
1896                                 info->si_code = SI_USER;
1897                                 info->si_pid = current->parent->pid;
1898                                 info->si_uid = current->parent->uid;
1899                         }
1900
1901                         /* If the (new) signal is now blocked, requeue it.  */
1902                         if (sigismember(&current->blocked, signr)) {
1903                                 specific_send_sig_info(signr, info, current);
1904                                 continue;
1905                         }
1906                 }
1907
1908                 ka = &current->sighand->action[signr-1];
1909                 if (ka->sa.sa_handler == SIG_IGN) /* Do nothing.  */
1910                         continue;
1911                 if (ka->sa.sa_handler != SIG_DFL) {
1912                         /* Run the handler.  */
1913                         *return_ka = *ka;
1914
1915                         if (ka->sa.sa_flags & SA_ONESHOT)
1916                                 ka->sa.sa_handler = SIG_DFL;
1917
1918                         break; /* will return non-zero "signr" value */
1919                 }
1920
1921                 /*
1922                  * Now we are doing the default action for this signal.
1923                  */
1924                 if (sig_kernel_ignore(signr)) /* Default is nothing. */
1925                         continue;
1926
1927                 /* Init gets no signals it doesn't want.  */
1928                 if (current->pid == 1)
1929                         continue;
1930
1931                 if (sig_kernel_stop(signr)) {
1932                         /*
1933                          * The default action is to stop all threads in
1934                          * the thread group.  The job control signals
1935                          * do nothing in an orphaned pgrp, but SIGSTOP
1936                          * always works.  Note that siglock needs to be
1937                          * dropped during the call to is_orphaned_pgrp()
1938                          * because of lock ordering with tasklist_lock.
1939                          * This allows an intervening SIGCONT to be posted.
1940                          * We need to check for that and bail out if necessary.
1941                          */
1942                         if (signr != SIGSTOP) {
1943                                 spin_unlock_irq(&current->sighand->siglock);
1944
1945                                 /* signals can be posted during this window */
1946
1947                                 if (is_orphaned_pgrp(process_group(current)))
1948                                         goto relock;
1949
1950                                 spin_lock_irq(&current->sighand->siglock);
1951                         }
1952
1953                         if (likely(do_signal_stop(signr))) {
1954                                 /* It released the siglock.  */
1955                                 goto relock;
1956                         }
1957
1958                         /*
1959                          * We didn't actually stop, due to a race
1960                          * with SIGCONT or something like that.
1961                          */
1962                         continue;
1963                 }
1964
1965                 spin_unlock_irq(&current->sighand->siglock);
1966
1967                 /*
1968                  * Anything else is fatal, maybe with a core dump.
1969                  */
1970                 current->flags |= PF_SIGNALED;
1971                 if (sig_kernel_coredump(signr)) {
1972                         /*
1973                          * If it was able to dump core, this kills all
1974                          * other threads in the group and synchronizes with
1975                          * their demise.  If we lost the race with another
1976                          * thread getting here, it set group_exit_code
1977                          * first and our do_group_exit call below will use
1978                          * that value and ignore the one we pass it.
1979                          */
1980                         do_coredump((long)signr, signr, regs);
1981                 }
1982
1983                 /*
1984                  * Death signals, no core dump.
1985                  */
1986                 do_group_exit(signr);
1987                 /* NOTREACHED */
1988         }
1989         spin_unlock_irq(&current->sighand->siglock);
1990         return signr;
1991 }
1992
1993 EXPORT_SYMBOL(recalc_sigpending);
1994 EXPORT_SYMBOL_GPL(dequeue_signal);
1995 EXPORT_SYMBOL(flush_signals);
1996 EXPORT_SYMBOL(force_sig);
1997 EXPORT_SYMBOL(kill_pg);
1998 EXPORT_SYMBOL(kill_proc);
1999 EXPORT_SYMBOL(ptrace_notify);
2000 EXPORT_SYMBOL(send_sig);
2001 EXPORT_SYMBOL(send_sig_info);
2002 EXPORT_SYMBOL(sigprocmask);
2003 EXPORT_SYMBOL(block_all_signals);
2004 EXPORT_SYMBOL(unblock_all_signals);
2005
2006
2007 /*
2008  * System call entry points.
2009  */
2010
2011 asmlinkage long sys_restart_syscall(void)
2012 {
2013         struct restart_block *restart = &current_thread_info()->restart_block;
2014         return restart->fn(restart);
2015 }
2016
2017 long do_no_restart_syscall(struct restart_block *param)
2018 {
2019         return -EINTR;
2020 }
2021
2022 /*
2023  * We don't need to get the kernel lock - this is all local to this
2024  * particular thread.. (and that's good, because this is _heavily_
2025  * used by various programs)
2026  */
2027
2028 /*
2029  * This is also useful for kernel threads that want to temporarily
2030  * (or permanently) block certain signals.
2031  *
2032  * NOTE! Unlike the user-mode sys_sigprocmask(), the kernel
2033  * interface happily blocks "unblockable" signals like SIGKILL
2034  * and friends.
2035  */
2036 int sigprocmask(int how, sigset_t *set, sigset_t *oldset)
2037 {
2038         int error;
2039         sigset_t old_block;
2040
2041         spin_lock_irq(&current->sighand->siglock);
2042         old_block = current->blocked;
2043         error = 0;
2044         switch (how) {
2045         case SIG_BLOCK:
2046                 sigorsets(&current->blocked, &current->blocked, set);
2047                 break;
2048         case SIG_UNBLOCK:
2049                 signandsets(&current->blocked, &current->blocked, set);
2050                 break;
2051         case SIG_SETMASK:
2052                 current->blocked = *set;
2053                 break;
2054         default:
2055                 error = -EINVAL;
2056         }
2057         recalc_sigpending();
2058         spin_unlock_irq(&current->sighand->siglock);
2059         if (oldset)
2060                 *oldset = old_block;
2061         return error;
2062 }
2063
2064 asmlinkage long
2065 sys_rt_sigprocmask(int how, sigset_t __user *set, sigset_t __user *oset, size_t sigsetsize)
2066 {
2067         int error = -EINVAL;
2068         sigset_t old_set, new_set;
2069
2070         /* XXX: Don't preclude handling different sized sigset_t's.  */
2071         if (sigsetsize != sizeof(sigset_t))
2072                 goto out;
2073
2074         if (set) {
2075                 error = -EFAULT;
2076                 if (copy_from_user(&new_set, set, sizeof(*set)))
2077                         goto out;
2078                 sigdelsetmask(&new_set, sigmask(SIGKILL)|sigmask(SIGSTOP));
2079
2080                 error = sigprocmask(how, &new_set, &old_set);
2081                 if (error)
2082                         goto out;
2083                 if (oset)
2084                         goto set_old;
2085         } else if (oset) {
2086                 spin_lock_irq(&current->sighand->siglock);
2087                 old_set = current->blocked;
2088                 spin_unlock_irq(&current->sighand->siglock);
2089
2090         set_old:
2091                 error = -EFAULT;
2092                 if (copy_to_user(oset, &old_set, sizeof(*oset)))
2093                         goto out;
2094         }
2095         error = 0;
2096 out:
2097         return error;
2098 }
2099
2100 long do_sigpending(void __user *set, unsigned long sigsetsize)
2101 {
2102         long error = -EINVAL;
2103         sigset_t pending;
2104
2105         if (sigsetsize > sizeof(sigset_t))
2106                 goto out;
2107
2108         spin_lock_irq(&current->sighand->siglock);
2109         sigorsets(&pending, &current->pending.signal,
2110                   &current->signal->shared_pending.signal);
2111         spin_unlock_irq(&current->sighand->siglock);
2112
2113         /* Outside the lock because only this thread touches it.  */
2114         sigandsets(&pending, &current->blocked, &pending);
2115
2116         error = -EFAULT;
2117         if (!copy_to_user(set, &pending, sigsetsize))
2118                 error = 0;
2119
2120 out:
2121         return error;
2122 }       
2123
2124 asmlinkage long
2125 sys_rt_sigpending(sigset_t __user *set, size_t sigsetsize)
2126 {
2127         return do_sigpending(set, sigsetsize);
2128 }
2129
2130 #ifndef HAVE_ARCH_COPY_SIGINFO_TO_USER
2131
2132 int copy_siginfo_to_user(siginfo_t __user *to, siginfo_t *from)
2133 {
2134         int err;
2135
2136         if (!access_ok (VERIFY_WRITE, to, sizeof(siginfo_t)))
2137                 return -EFAULT;
2138         if (from->si_code < 0)
2139                 return __copy_to_user(to, from, sizeof(siginfo_t))
2140                         ? -EFAULT : 0;
2141         /*
2142          * If you change siginfo_t structure, please be sure
2143          * this code is fixed accordingly.
2144          * It should never copy any pad contained in the structure
2145          * to avoid security leaks, but must copy the generic
2146          * 3 ints plus the relevant union member.
2147          */
2148         err = __put_user(from->si_signo, &to->si_signo);
2149         err |= __put_user(from->si_errno, &to->si_errno);
2150         err |= __put_user((short)from->si_code, &to->si_code);
2151         switch (from->si_code & __SI_MASK) {
2152         case __SI_KILL:
2153                 err |= __put_user(from->si_pid, &to->si_pid);
2154                 err |= __put_user(from->si_uid, &to->si_uid);
2155                 break;
2156         case __SI_TIMER:
2157                  err |= __put_user(from->si_tid, &to->si_tid);
2158                  err |= __put_user(from->si_overrun, &to->si_overrun);
2159                  err |= __put_user(from->si_ptr, &to->si_ptr);
2160                 break;
2161         case __SI_POLL:
2162                 err |= __put_user(from->si_band, &to->si_band);
2163                 err |= __put_user(from->si_fd, &to->si_fd);
2164                 break;
2165         case __SI_FAULT:
2166                 err |= __put_user(from->si_addr, &to->si_addr);
2167 #ifdef __ARCH_SI_TRAPNO
2168                 err |= __put_user(from->si_trapno, &to->si_trapno);
2169 #endif
2170                 break;
2171         case __SI_CHLD:
2172                 err |= __put_user(from->si_pid, &to->si_pid);
2173                 err |= __put_user(from->si_uid, &to->si_uid);
2174                 err |= __put_user(from->si_status, &to->si_status);
2175                 err |= __put_user(from->si_utime, &to->si_utime);
2176                 err |= __put_user(from->si_stime, &to->si_stime);
2177                 break;
2178         case __SI_RT: /* This is not generated by the kernel as of now. */
2179         case __SI_MESGQ: /* But this is */
2180                 err |= __put_user(from->si_pid, &to->si_pid);
2181                 err |= __put_user(from->si_uid, &to->si_uid);
2182                 err |= __put_user(from->si_ptr, &to->si_ptr);
2183                 break;
2184         default: /* this is just in case for now ... */
2185                 err |= __put_user(from->si_pid, &to->si_pid);
2186                 err |= __put_user(from->si_uid, &to->si_uid);
2187                 break;
2188         }
2189         return err;
2190 }
2191
2192 #endif
2193
2194 asmlinkage long
2195 sys_rt_sigtimedwait(const sigset_t __user *uthese,
2196                     siginfo_t __user *uinfo,
2197                     const struct timespec __user *uts,
2198                     size_t sigsetsize)
2199 {
2200         int ret, sig;
2201         sigset_t these;
2202         struct timespec ts;
2203         siginfo_t info;
2204         long timeout = 0;
2205
2206         /* XXX: Don't preclude handling different sized sigset_t's.  */
2207         if (sigsetsize != sizeof(sigset_t))
2208                 return -EINVAL;
2209
2210         if (copy_from_user(&these, uthese, sizeof(these)))
2211                 return -EFAULT;
2212                 
2213         /*
2214          * Invert the set of allowed signals to get those we
2215          * want to block.
2216          */
2217         sigdelsetmask(&these, sigmask(SIGKILL)|sigmask(SIGSTOP));
2218         signotset(&these);
2219
2220         if (uts) {
2221                 if (copy_from_user(&ts, uts, sizeof(ts)))
2222                         return -EFAULT;
2223                 if (ts.tv_nsec >= 1000000000L || ts.tv_nsec < 0
2224                     || ts.tv_sec < 0)
2225                         return -EINVAL;
2226         }
2227
2228         spin_lock_irq(&current->sighand->siglock);
2229         sig = dequeue_signal(current, &these, &info);
2230         if (!sig) {
2231                 timeout = MAX_SCHEDULE_TIMEOUT;
2232                 if (uts)
2233                         timeout = (timespec_to_jiffies(&ts)
2234                                    + (ts.tv_sec || ts.tv_nsec));
2235
2236                 if (timeout) {
2237                         /* None ready -- temporarily unblock those we're
2238                          * interested while we are sleeping in so that we'll
2239                          * be awakened when they arrive.  */
2240                         current->real_blocked = current->blocked;
2241                         sigandsets(&current->blocked, &current->blocked, &these);
2242                         recalc_sigpending();
2243                         spin_unlock_irq(&current->sighand->siglock);
2244
2245                         timeout = schedule_timeout_interruptible(timeout);
2246
2247                         try_to_freeze();
2248                         spin_lock_irq(&current->sighand->siglock);
2249                         sig = dequeue_signal(current, &these, &info);
2250                         current->blocked = current->real_blocked;
2251                         siginitset(&current->real_blocked, 0);
2252                         recalc_sigpending();
2253                 }
2254         }
2255         spin_unlock_irq(&current->sighand->siglock);
2256
2257         if (sig) {
2258                 ret = sig;
2259                 if (uinfo) {
2260                         if (copy_siginfo_to_user(uinfo, &info))
2261                                 ret = -EFAULT;
2262                 }
2263         } else {
2264                 ret = -EAGAIN;
2265                 if (timeout)
2266                         ret = -EINTR;
2267         }
2268
2269         return ret;
2270 }
2271
2272 asmlinkage long
2273 sys_kill(int pid, int sig)
2274 {
2275         struct siginfo info;
2276
2277         info.si_signo = sig;
2278         info.si_errno = 0;
2279         info.si_code = SI_USER;
2280         info.si_pid = current->tgid;
2281         info.si_uid = current->uid;
2282
2283         return kill_something_info(sig, &info, pid);
2284 }
2285
2286 static int do_tkill(int tgid, int pid, int sig)
2287 {
2288         int error;
2289         struct siginfo info;
2290         struct task_struct *p;
2291
2292         error = -ESRCH;
2293         info.si_signo = sig;
2294         info.si_errno = 0;
2295         info.si_code = SI_TKILL;
2296         info.si_pid = current->tgid;
2297         info.si_uid = current->uid;
2298
2299         read_lock(&tasklist_lock);
2300         p = find_task_by_pid(pid);
2301         if (p && (tgid <= 0 || p->tgid == tgid)) {
2302                 error = check_kill_permission(sig, &info, p);
2303                 /*
2304                  * The null signal is a permissions and process existence
2305                  * probe.  No signal is actually delivered.
2306                  */
2307                 if (!error && sig && p->sighand) {
2308                         spin_lock_irq(&p->sighand->siglock);
2309                         handle_stop_signal(sig, p);
2310                         error = specific_send_sig_info(sig, &info, p);
2311                         spin_unlock_irq(&p->sighand->siglock);
2312                 }
2313         }
2314         read_unlock(&tasklist_lock);
2315
2316         return error;
2317 }
2318
2319 /**
2320  *  sys_tgkill - send signal to one specific thread
2321  *  @tgid: the thread group ID of the thread
2322  *  @pid: the PID of the thread
2323  *  @sig: signal to be sent
2324  *
2325  *  This syscall also checks the tgid and returns -ESRCH even if the PID
2326  *  exists but it's not belonging to the target process anymore. This
2327  *  method solves the problem of threads exiting and PIDs getting reused.
2328  */
2329 asmlinkage long sys_tgkill(int tgid, int pid, int sig)
2330 {
2331         /* This is only valid for single tasks */
2332         if (pid <= 0 || tgid <= 0)
2333                 return -EINVAL;
2334
2335         return do_tkill(tgid, pid, sig);
2336 }
2337
2338 /*
2339  *  Send a signal to only one task, even if it's a CLONE_THREAD task.
2340  */
2341 asmlinkage long
2342 sys_tkill(int pid, int sig)
2343 {
2344         /* This is only valid for single tasks */
2345         if (pid <= 0)
2346                 return -EINVAL;
2347
2348         return do_tkill(0, pid, sig);
2349 }
2350
2351 asmlinkage long
2352 sys_rt_sigqueueinfo(int pid, int sig, siginfo_t __user *uinfo)
2353 {
2354         siginfo_t info;
2355
2356         if (copy_from_user(&info, uinfo, sizeof(siginfo_t)))
2357                 return -EFAULT;
2358
2359         /* Not even root can pretend to send signals from the kernel.
2360            Nor can they impersonate a kill(), which adds source info.  */
2361         if (info.si_code >= 0)
2362                 return -EPERM;
2363         info.si_signo = sig;
2364
2365         /* POSIX.1b doesn't mention process groups.  */
2366         return kill_proc_info(sig, &info, pid);
2367 }
2368
2369 int
2370 do_sigaction(int sig, const struct k_sigaction *act, struct k_sigaction *oact)
2371 {
2372         struct k_sigaction *k;
2373
2374         if (!valid_signal(sig) || sig < 1 || (act && sig_kernel_only(sig)))
2375                 return -EINVAL;
2376
2377         k = &current->sighand->action[sig-1];
2378
2379         spin_lock_irq(&current->sighand->siglock);
2380         if (signal_pending(current)) {
2381                 /*
2382                  * If there might be a fatal signal pending on multiple
2383                  * threads, make sure we take it before changing the action.
2384                  */
2385                 spin_unlock_irq(&current->sighand->siglock);
2386                 return -ERESTARTNOINTR;
2387         }
2388
2389         if (oact)
2390                 *oact = *k;
2391
2392         if (act) {
2393                 /*
2394                  * POSIX 3.3.1.3:
2395                  *  "Setting a signal action to SIG_IGN for a signal that is
2396                  *   pending shall cause the pending signal to be discarded,
2397                  *   whether or not it is blocked."
2398                  *
2399                  *  "Setting a signal action to SIG_DFL for a signal that is
2400                  *   pending and whose default action is to ignore the signal
2401                  *   (for example, SIGCHLD), shall cause the pending signal to
2402                  *   be discarded, whether or not it is blocked"
2403                  */
2404                 if (act->sa.sa_handler == SIG_IGN ||
2405                     (act->sa.sa_handler == SIG_DFL &&
2406                      sig_kernel_ignore(sig))) {
2407                         /*
2408                          * This is a fairly rare case, so we only take the
2409                          * tasklist_lock once we're sure we'll need it.
2410                          * Now we must do this little unlock and relock
2411                          * dance to maintain the lock hierarchy.
2412                          */
2413                         struct task_struct *t = current;
2414                         spin_unlock_irq(&t->sighand->siglock);
2415                         read_lock(&tasklist_lock);
2416                         spin_lock_irq(&t->sighand->siglock);
2417                         *k = *act;
2418                         sigdelsetmask(&k->sa.sa_mask,
2419                                       sigmask(SIGKILL) | sigmask(SIGSTOP));
2420                         rm_from_queue(sigmask(sig), &t->signal->shared_pending);
2421                         do {
2422                                 rm_from_queue(sigmask(sig), &t->pending);
2423                                 recalc_sigpending_tsk(t);
2424                                 t = next_thread(t);
2425                         } while (t != current);
2426                         spin_unlock_irq(&current->sighand->siglock);
2427                         read_unlock(&tasklist_lock);
2428                         return 0;
2429                 }
2430
2431                 *k = *act;
2432                 sigdelsetmask(&k->sa.sa_mask,
2433                               sigmask(SIGKILL) | sigmask(SIGSTOP));
2434         }
2435
2436         spin_unlock_irq(&current->sighand->siglock);
2437         return 0;
2438 }
2439
2440 int 
2441 do_sigaltstack (const stack_t __user *uss, stack_t __user *uoss, unsigned long sp)
2442 {
2443         stack_t oss;
2444         int error;
2445
2446         if (uoss) {
2447                 oss.ss_sp = (void __user *) current->sas_ss_sp;
2448                 oss.ss_size = current->sas_ss_size;
2449                 oss.ss_flags = sas_ss_flags(sp);
2450         }
2451
2452         if (uss) {
2453                 void __user *ss_sp;
2454                 size_t ss_size;
2455                 int ss_flags;
2456
2457                 error = -EFAULT;
2458                 if (!access_ok(VERIFY_READ, uss, sizeof(*uss))
2459                     || __get_user(ss_sp, &uss->ss_sp)
2460                     || __get_user(ss_flags, &uss->ss_flags)
2461                     || __get_user(ss_size, &uss->ss_size))
2462                         goto out;
2463
2464                 error = -EPERM;
2465                 if (on_sig_stack(sp))
2466                         goto out;
2467
2468                 error = -EINVAL;
2469                 /*
2470                  *
2471                  * Note - this code used to test ss_flags incorrectly
2472                  *        old code may have been written using ss_flags==0
2473                  *        to mean ss_flags==SS_ONSTACK (as this was the only
2474                  *        way that worked) - this fix preserves that older
2475                  *        mechanism
2476                  */
2477                 if (ss_flags != SS_DISABLE && ss_flags != SS_ONSTACK && ss_flags != 0)
2478                         goto out;
2479
2480                 if (ss_flags == SS_DISABLE) {
2481                         ss_size = 0;
2482                         ss_sp = NULL;
2483                 } else {
2484                         error = -ENOMEM;
2485                         if (ss_size < MINSIGSTKSZ)
2486                                 goto out;
2487                 }
2488
2489                 current->sas_ss_sp = (unsigned long) ss_sp;
2490                 current->sas_ss_size = ss_size;
2491         }
2492
2493         if (uoss) {
2494                 error = -EFAULT;
2495                 if (copy_to_user(uoss, &oss, sizeof(oss)))
2496                         goto out;
2497         }
2498
2499         error = 0;
2500 out:
2501         return error;
2502 }
2503
2504 #ifdef __ARCH_WANT_SYS_SIGPENDING
2505
2506 asmlinkage long
2507 sys_sigpending(old_sigset_t __user *set)
2508 {
2509         return do_sigpending(set, sizeof(*set));
2510 }
2511
2512 #endif
2513
2514 #ifdef __ARCH_WANT_SYS_SIGPROCMASK
2515 /* Some platforms have their own version with special arguments others
2516    support only sys_rt_sigprocmask.  */
2517
2518 asmlinkage long
2519 sys_sigprocmask(int how, old_sigset_t __user *set, old_sigset_t __user *oset)
2520 {
2521         int error;
2522         old_sigset_t old_set, new_set;
2523
2524         if (set) {
2525                 error = -EFAULT;
2526                 if (copy_from_user(&new_set, set, sizeof(*set)))
2527                         goto out;
2528                 new_set &= ~(sigmask(SIGKILL) | sigmask(SIGSTOP));
2529
2530                 spin_lock_irq(&current->sighand->siglock);
2531                 old_set = current->blocked.sig[0];
2532
2533                 error = 0;
2534                 switch (how) {
2535                 default:
2536                         error = -EINVAL;
2537                         break;
2538                 case SIG_BLOCK:
2539                         sigaddsetmask(&current->blocked, new_set);
2540                         break;
2541                 case SIG_UNBLOCK:
2542                         sigdelsetmask(&current->blocked, new_set);
2543                         break;
2544                 case SIG_SETMASK:
2545                         current->blocked.sig[0] = new_set;
2546                         break;
2547                 }
2548
2549                 recalc_sigpending();
2550                 spin_unlock_irq(&current->sighand->siglock);
2551                 if (error)
2552                         goto out;
2553                 if (oset)
2554                         goto set_old;
2555         } else if (oset) {
2556                 old_set = current->blocked.sig[0];
2557         set_old:
2558                 error = -EFAULT;
2559                 if (copy_to_user(oset, &old_set, sizeof(*oset)))
2560                         goto out;
2561         }
2562         error = 0;
2563 out:
2564         return error;
2565 }
2566 #endif /* __ARCH_WANT_SYS_SIGPROCMASK */
2567
2568 #ifdef __ARCH_WANT_SYS_RT_SIGACTION
2569 asmlinkage long
2570 sys_rt_sigaction(int sig,
2571                  const struct sigaction __user *act,
2572                  struct sigaction __user *oact,
2573                  size_t sigsetsize)
2574 {
2575         struct k_sigaction new_sa, old_sa;
2576         int ret = -EINVAL;
2577
2578         /* XXX: Don't preclude handling different sized sigset_t's.  */
2579         if (sigsetsize != sizeof(sigset_t))
2580                 goto out;
2581
2582         if (act) {
2583                 if (copy_from_user(&new_sa.sa, act, sizeof(new_sa.sa)))
2584                         return -EFAULT;
2585         }
2586
2587         ret = do_sigaction(sig, act ? &new_sa : NULL, oact ? &old_sa : NULL);
2588
2589         if (!ret && oact) {
2590                 if (copy_to_user(oact, &old_sa.sa, sizeof(old_sa.sa)))
2591                         return -EFAULT;
2592         }
2593 out:
2594         return ret;
2595 }
2596 #endif /* __ARCH_WANT_SYS_RT_SIGACTION */
2597
2598 #ifdef __ARCH_WANT_SYS_SGETMASK
2599
2600 /*
2601  * For backwards compatibility.  Functionality superseded by sigprocmask.
2602  */
2603 asmlinkage long
2604 sys_sgetmask(void)
2605 {
2606         /* SMP safe */
2607         return current->blocked.sig[0];
2608 }
2609
2610 asmlinkage long
2611 sys_ssetmask(int newmask)
2612 {
2613         int old;
2614
2615         spin_lock_irq(&current->sighand->siglock);
2616         old = current->blocked.sig[0];
2617
2618         siginitset(&current->blocked, newmask & ~(sigmask(SIGKILL)|
2619                                                   sigmask(SIGSTOP)));
2620         recalc_sigpending();
2621         spin_unlock_irq(&current->sighand->siglock);
2622
2623         return old;
2624 }
2625 #endif /* __ARCH_WANT_SGETMASK */
2626
2627 #ifdef __ARCH_WANT_SYS_SIGNAL
2628 /*
2629  * For backwards compatibility.  Functionality superseded by sigaction.
2630  */
2631 asmlinkage unsigned long
2632 sys_signal(int sig, __sighandler_t handler)
2633 {
2634         struct k_sigaction new_sa, old_sa;
2635         int ret;
2636
2637         new_sa.sa.sa_handler = handler;
2638         new_sa.sa.sa_flags = SA_ONESHOT | SA_NOMASK;
2639
2640         ret = do_sigaction(sig, &new_sa, &old_sa);
2641
2642         return ret ? ret : (unsigned long)old_sa.sa.sa_handler;
2643 }
2644 #endif /* __ARCH_WANT_SYS_SIGNAL */
2645
2646 #ifdef __ARCH_WANT_SYS_PAUSE
2647
2648 asmlinkage long
2649 sys_pause(void)
2650 {
2651         current->state = TASK_INTERRUPTIBLE;
2652         schedule();
2653         return -ERESTARTNOHAND;
2654 }
2655
2656 #endif
2657
2658 void __init signals_init(void)
2659 {
2660         sigqueue_cachep =
2661                 kmem_cache_create("sigqueue",
2662                                   sizeof(struct sigqueue),
2663                                   __alignof__(struct sigqueue),
2664                                   SLAB_PANIC, NULL, NULL);
2665 }