]> bbs.cooldavid.org Git - net-next-2.6.git/blame - kernel/signal.c
[PATCH] rename __exit_sighand to cleanup_sighand
[net-next-2.6.git] / kernel / signal.c
CommitLineData
1da177e4
LT
1/*
2 * linux/kernel/signal.c
3 *
4 * Copyright (C) 1991, 1992 Linus Torvalds
5 *
6 * 1997-11-02 Modified for POSIX.1b signals by Richard Henderson
7 *
8 * 2003-06-02 Jim Houston - Concurrent Computer Corp.
9 * Changes to use preallocated sigqueue structures
10 * to allow signals to be sent reliably.
11 */
12
13#include <linux/config.h>
14#include <linux/slab.h>
15#include <linux/module.h>
16#include <linux/smp_lock.h>
17#include <linux/init.h>
18#include <linux/sched.h>
19#include <linux/fs.h>
20#include <linux/tty.h>
21#include <linux/binfmts.h>
22#include <linux/security.h>
23#include <linux/syscalls.h>
24#include <linux/ptrace.h>
25#include <linux/posix-timers.h>
7ed20e1a 26#include <linux/signal.h>
c2f0c7c3 27#include <linux/audit.h>
c59ede7b 28#include <linux/capability.h>
1da177e4
LT
29#include <asm/param.h>
30#include <asm/uaccess.h>
31#include <asm/unistd.h>
32#include <asm/siginfo.h>
33
34/*
35 * SLAB caches for signal bits.
36 */
37
38static kmem_cache_t *sigqueue_cachep;
39
40/*
41 * In POSIX a signal is sent either to a specific thread (Linux task)
42 * or to the process as a whole (Linux thread group). How the signal
43 * is sent determines whether it's to one thread or the whole group,
44 * which determines which signal mask(s) are involved in blocking it
45 * from being delivered until later. When the signal is delivered,
46 * either it's caught or ignored by a user handler or it has a default
47 * effect that applies to the whole thread group (POSIX process).
48 *
49 * The possible effects an unblocked signal set to SIG_DFL can have are:
50 * ignore - Nothing Happens
51 * terminate - kill the process, i.e. all threads in the group,
52 * similar to exit_group. The group leader (only) reports
53 * WIFSIGNALED status to its parent.
54 * coredump - write a core dump file describing all threads using
55 * the same mm and then kill all those threads
56 * stop - stop all the threads in the group, i.e. TASK_STOPPED state
57 *
58 * SIGKILL and SIGSTOP cannot be caught, blocked, or ignored.
59 * Other signals when not blocked and set to SIG_DFL behaves as follows.
60 * The job control signals also have other special effects.
61 *
62 * +--------------------+------------------+
63 * | POSIX signal | default action |
64 * +--------------------+------------------+
65 * | SIGHUP | terminate |
66 * | SIGINT | terminate |
67 * | SIGQUIT | coredump |
68 * | SIGILL | coredump |
69 * | SIGTRAP | coredump |
70 * | SIGABRT/SIGIOT | coredump |
71 * | SIGBUS | coredump |
72 * | SIGFPE | coredump |
73 * | SIGKILL | terminate(+) |
74 * | SIGUSR1 | terminate |
75 * | SIGSEGV | coredump |
76 * | SIGUSR2 | terminate |
77 * | SIGPIPE | terminate |
78 * | SIGALRM | terminate |
79 * | SIGTERM | terminate |
80 * | SIGCHLD | ignore |
81 * | SIGCONT | ignore(*) |
82 * | SIGSTOP | stop(*)(+) |
83 * | SIGTSTP | stop(*) |
84 * | SIGTTIN | stop(*) |
85 * | SIGTTOU | stop(*) |
86 * | SIGURG | ignore |
87 * | SIGXCPU | coredump |
88 * | SIGXFSZ | coredump |
89 * | SIGVTALRM | terminate |
90 * | SIGPROF | terminate |
91 * | SIGPOLL/SIGIO | terminate |
92 * | SIGSYS/SIGUNUSED | coredump |
93 * | SIGSTKFLT | terminate |
94 * | SIGWINCH | ignore |
95 * | SIGPWR | terminate |
96 * | SIGRTMIN-SIGRTMAX | terminate |
97 * +--------------------+------------------+
98 * | non-POSIX signal | default action |
99 * +--------------------+------------------+
100 * | SIGEMT | coredump |
101 * +--------------------+------------------+
102 *
103 * (+) For SIGKILL and SIGSTOP the action is "always", not just "default".
104 * (*) Special job control effects:
105 * When SIGCONT is sent, it resumes the process (all threads in the group)
106 * from TASK_STOPPED state and also clears any pending/queued stop signals
107 * (any of those marked with "stop(*)"). This happens regardless of blocking,
108 * catching, or ignoring SIGCONT. When any stop signal is sent, it clears
109 * any pending/queued SIGCONT signals; this happens regardless of blocking,
110 * catching, or ignored the stop signal, though (except for SIGSTOP) the
111 * default action of stopping the process may happen later or never.
112 */
113
114#ifdef SIGEMT
115#define M_SIGEMT M(SIGEMT)
116#else
117#define M_SIGEMT 0
118#endif
119
120#if SIGRTMIN > BITS_PER_LONG
121#define M(sig) (1ULL << ((sig)-1))
122#else
123#define M(sig) (1UL << ((sig)-1))
124#endif
125#define T(sig, mask) (M(sig) & (mask))
126
127#define SIG_KERNEL_ONLY_MASK (\
128 M(SIGKILL) | M(SIGSTOP) )
129
130#define SIG_KERNEL_STOP_MASK (\
131 M(SIGSTOP) | M(SIGTSTP) | M(SIGTTIN) | M(SIGTTOU) )
132
133#define SIG_KERNEL_COREDUMP_MASK (\
134 M(SIGQUIT) | M(SIGILL) | M(SIGTRAP) | M(SIGABRT) | \
135 M(SIGFPE) | M(SIGSEGV) | M(SIGBUS) | M(SIGSYS) | \
136 M(SIGXCPU) | M(SIGXFSZ) | M_SIGEMT )
137
138#define SIG_KERNEL_IGNORE_MASK (\
139 M(SIGCONT) | M(SIGCHLD) | M(SIGWINCH) | M(SIGURG) )
140
141#define sig_kernel_only(sig) \
142 (((sig) < SIGRTMIN) && T(sig, SIG_KERNEL_ONLY_MASK))
143#define sig_kernel_coredump(sig) \
144 (((sig) < SIGRTMIN) && T(sig, SIG_KERNEL_COREDUMP_MASK))
145#define sig_kernel_ignore(sig) \
146 (((sig) < SIGRTMIN) && T(sig, SIG_KERNEL_IGNORE_MASK))
147#define sig_kernel_stop(sig) \
148 (((sig) < SIGRTMIN) && T(sig, SIG_KERNEL_STOP_MASK))
149
a9e88e84
ON
150#define sig_needs_tasklist(sig) \
151 (((sig) < SIGRTMIN) && T(sig, SIG_KERNEL_STOP_MASK | M(SIGCONT)))
152
1da177e4
LT
153#define sig_user_defined(t, signr) \
154 (((t)->sighand->action[(signr)-1].sa.sa_handler != SIG_DFL) && \
155 ((t)->sighand->action[(signr)-1].sa.sa_handler != SIG_IGN))
156
157#define sig_fatal(t, signr) \
158 (!T(signr, SIG_KERNEL_IGNORE_MASK|SIG_KERNEL_STOP_MASK) && \
159 (t)->sighand->action[(signr)-1].sa.sa_handler == SIG_DFL)
160
161static int sig_ignored(struct task_struct *t, int sig)
162{
163 void __user * handler;
164
165 /*
166 * Tracers always want to know about signals..
167 */
168 if (t->ptrace & PT_PTRACED)
169 return 0;
170
171 /*
172 * Blocked signals are never ignored, since the
173 * signal handler may change by the time it is
174 * unblocked.
175 */
176 if (sigismember(&t->blocked, sig))
177 return 0;
178
179 /* Is it explicitly or implicitly ignored? */
180 handler = t->sighand->action[sig-1].sa.sa_handler;
181 return handler == SIG_IGN ||
182 (handler == SIG_DFL && sig_kernel_ignore(sig));
183}
184
185/*
186 * Re-calculate pending state from the set of locally pending
187 * signals, globally pending signals, and blocked signals.
188 */
189static inline int has_pending_signals(sigset_t *signal, sigset_t *blocked)
190{
191 unsigned long ready;
192 long i;
193
194 switch (_NSIG_WORDS) {
195 default:
196 for (i = _NSIG_WORDS, ready = 0; --i >= 0 ;)
197 ready |= signal->sig[i] &~ blocked->sig[i];
198 break;
199
200 case 4: ready = signal->sig[3] &~ blocked->sig[3];
201 ready |= signal->sig[2] &~ blocked->sig[2];
202 ready |= signal->sig[1] &~ blocked->sig[1];
203 ready |= signal->sig[0] &~ blocked->sig[0];
204 break;
205
206 case 2: ready = signal->sig[1] &~ blocked->sig[1];
207 ready |= signal->sig[0] &~ blocked->sig[0];
208 break;
209
210 case 1: ready = signal->sig[0] &~ blocked->sig[0];
211 }
212 return ready != 0;
213}
214
215#define PENDING(p,b) has_pending_signals(&(p)->signal, (b))
216
217fastcall void recalc_sigpending_tsk(struct task_struct *t)
218{
219 if (t->signal->group_stop_count > 0 ||
3e1d1d28 220 (freezing(t)) ||
1da177e4
LT
221 PENDING(&t->pending, &t->blocked) ||
222 PENDING(&t->signal->shared_pending, &t->blocked))
223 set_tsk_thread_flag(t, TIF_SIGPENDING);
224 else
225 clear_tsk_thread_flag(t, TIF_SIGPENDING);
226}
227
228void recalc_sigpending(void)
229{
230 recalc_sigpending_tsk(current);
231}
232
233/* Given the mask, find the first available signal that should be serviced. */
234
235static int
236next_signal(struct sigpending *pending, sigset_t *mask)
237{
238 unsigned long i, *s, *m, x;
239 int sig = 0;
240
241 s = pending->signal.sig;
242 m = mask->sig;
243 switch (_NSIG_WORDS) {
244 default:
245 for (i = 0; i < _NSIG_WORDS; ++i, ++s, ++m)
246 if ((x = *s &~ *m) != 0) {
247 sig = ffz(~x) + i*_NSIG_BPW + 1;
248 break;
249 }
250 break;
251
252 case 2: if ((x = s[0] &~ m[0]) != 0)
253 sig = 1;
254 else if ((x = s[1] &~ m[1]) != 0)
255 sig = _NSIG_BPW + 1;
256 else
257 break;
258 sig += ffz(~x);
259 break;
260
261 case 1: if ((x = *s &~ *m) != 0)
262 sig = ffz(~x) + 1;
263 break;
264 }
265
266 return sig;
267}
268
dd0fc66f 269static struct sigqueue *__sigqueue_alloc(struct task_struct *t, gfp_t flags,
1da177e4
LT
270 int override_rlimit)
271{
272 struct sigqueue *q = NULL;
273
274 atomic_inc(&t->user->sigpending);
275 if (override_rlimit ||
276 atomic_read(&t->user->sigpending) <=
277 t->signal->rlim[RLIMIT_SIGPENDING].rlim_cur)
278 q = kmem_cache_alloc(sigqueue_cachep, flags);
279 if (unlikely(q == NULL)) {
280 atomic_dec(&t->user->sigpending);
281 } else {
282 INIT_LIST_HEAD(&q->list);
283 q->flags = 0;
1da177e4
LT
284 q->user = get_uid(t->user);
285 }
286 return(q);
287}
288
514a01b8 289static void __sigqueue_free(struct sigqueue *q)
1da177e4
LT
290{
291 if (q->flags & SIGQUEUE_PREALLOC)
292 return;
293 atomic_dec(&q->user->sigpending);
294 free_uid(q->user);
295 kmem_cache_free(sigqueue_cachep, q);
296}
297
298static void flush_sigqueue(struct sigpending *queue)
299{
300 struct sigqueue *q;
301
302 sigemptyset(&queue->signal);
303 while (!list_empty(&queue->list)) {
304 q = list_entry(queue->list.next, struct sigqueue , list);
305 list_del_init(&q->list);
306 __sigqueue_free(q);
307 }
308}
309
310/*
311 * Flush all pending signals for a task.
312 */
c81addc9 313void flush_signals(struct task_struct *t)
1da177e4
LT
314{
315 unsigned long flags;
316
317 spin_lock_irqsave(&t->sighand->siglock, flags);
318 clear_tsk_thread_flag(t,TIF_SIGPENDING);
319 flush_sigqueue(&t->pending);
320 flush_sigqueue(&t->signal->shared_pending);
321 spin_unlock_irqrestore(&t->sighand->siglock, flags);
322}
323
1da177e4
LT
324/*
325 * This function expects the tasklist_lock write-locked.
326 */
327void __exit_signal(struct task_struct *tsk)
328{
29ff4712
ON
329 struct signal_struct *sig = tsk->signal;
330 struct sighand_struct *sighand;
331
332 BUG_ON(!sig);
333 BUG_ON(!atomic_read(&sig->count));
1da177e4 334
e56d0903
IM
335 rcu_read_lock();
336 sighand = rcu_dereference(tsk->sighand);
1da177e4 337 spin_lock(&sighand->siglock);
29ff4712 338
1da177e4 339 posix_cpu_timers_exit(tsk);
29ff4712 340 if (atomic_dec_and_test(&sig->count))
1da177e4 341 posix_cpu_timers_exit_group(tsk);
29ff4712 342 else {
1da177e4
LT
343 /*
344 * If there is any task waiting for the group exit
345 * then notify it:
346 */
347 if (sig->group_exit_task && atomic_read(&sig->count) == sig->notify_count) {
348 wake_up_process(sig->group_exit_task);
349 sig->group_exit_task = NULL;
350 }
351 if (tsk == sig->curr_target)
352 sig->curr_target = next_thread(tsk);
1da177e4
LT
353 /*
354 * Accumulate here the counters for all threads but the
355 * group leader as they die, so they can be added into
356 * the process-wide totals when those are taken.
357 * The group leader stays around as a zombie as long
358 * as there are other threads. When it gets reaped,
359 * the exit.c code will add its counts into these totals.
360 * We won't ever get here for the group leader, since it
361 * will have been the last reference on the signal_struct.
362 */
363 sig->utime = cputime_add(sig->utime, tsk->utime);
364 sig->stime = cputime_add(sig->stime, tsk->stime);
365 sig->min_flt += tsk->min_flt;
366 sig->maj_flt += tsk->maj_flt;
367 sig->nvcsw += tsk->nvcsw;
368 sig->nivcsw += tsk->nivcsw;
369 sig->sched_time += tsk->sched_time;
29ff4712 370 sig = NULL; /* Marker for below. */
1da177e4 371 }
29ff4712
ON
372
373 tsk->signal = NULL;
c81addc9 374 cleanup_sighand(tsk);
29ff4712 375 spin_unlock(&sighand->siglock);
e56d0903 376 rcu_read_unlock();
29ff4712 377
1da177e4
LT
378 clear_tsk_thread_flag(tsk,TIF_SIGPENDING);
379 flush_sigqueue(&tsk->pending);
380 if (sig) {
29ff4712 381 flush_sigqueue(&sig->shared_pending);
6b3934ef 382 __cleanup_signal(sig);
1da177e4
LT
383 }
384}
385
1da177e4
LT
386/*
387 * Flush all handlers for a task.
388 */
389
390void
391flush_signal_handlers(struct task_struct *t, int force_default)
392{
393 int i;
394 struct k_sigaction *ka = &t->sighand->action[0];
395 for (i = _NSIG ; i != 0 ; i--) {
396 if (force_default || ka->sa.sa_handler != SIG_IGN)
397 ka->sa.sa_handler = SIG_DFL;
398 ka->sa.sa_flags = 0;
399 sigemptyset(&ka->sa.sa_mask);
400 ka++;
401 }
402}
403
404
405/* Notify the system that a driver wants to block all signals for this
406 * process, and wants to be notified if any signals at all were to be
407 * sent/acted upon. If the notifier routine returns non-zero, then the
408 * signal will be acted upon after all. If the notifier routine returns 0,
409 * then then signal will be blocked. Only one block per process is
410 * allowed. priv is a pointer to private data that the notifier routine
411 * can use to determine if the signal should be blocked or not. */
412
413void
414block_all_signals(int (*notifier)(void *priv), void *priv, sigset_t *mask)
415{
416 unsigned long flags;
417
418 spin_lock_irqsave(&current->sighand->siglock, flags);
419 current->notifier_mask = mask;
420 current->notifier_data = priv;
421 current->notifier = notifier;
422 spin_unlock_irqrestore(&current->sighand->siglock, flags);
423}
424
425/* Notify the system that blocking has ended. */
426
427void
428unblock_all_signals(void)
429{
430 unsigned long flags;
431
432 spin_lock_irqsave(&current->sighand->siglock, flags);
433 current->notifier = NULL;
434 current->notifier_data = NULL;
435 recalc_sigpending();
436 spin_unlock_irqrestore(&current->sighand->siglock, flags);
437}
438
858119e1 439static int collect_signal(int sig, struct sigpending *list, siginfo_t *info)
1da177e4
LT
440{
441 struct sigqueue *q, *first = NULL;
442 int still_pending = 0;
443
444 if (unlikely(!sigismember(&list->signal, sig)))
445 return 0;
446
447 /*
448 * Collect the siginfo appropriate to this signal. Check if
449 * there is another siginfo for the same signal.
450 */
451 list_for_each_entry(q, &list->list, list) {
452 if (q->info.si_signo == sig) {
453 if (first) {
454 still_pending = 1;
455 break;
456 }
457 first = q;
458 }
459 }
460 if (first) {
461 list_del_init(&first->list);
462 copy_siginfo(info, &first->info);
463 __sigqueue_free(first);
464 if (!still_pending)
465 sigdelset(&list->signal, sig);
466 } else {
467
468 /* Ok, it wasn't in the queue. This must be
469 a fast-pathed signal or we must have been
470 out of queue space. So zero out the info.
471 */
472 sigdelset(&list->signal, sig);
473 info->si_signo = sig;
474 info->si_errno = 0;
475 info->si_code = 0;
476 info->si_pid = 0;
477 info->si_uid = 0;
478 }
479 return 1;
480}
481
482static int __dequeue_signal(struct sigpending *pending, sigset_t *mask,
483 siginfo_t *info)
484{
485 int sig = 0;
486
b17b0421 487 sig = next_signal(pending, mask);
1da177e4
LT
488 if (sig) {
489 if (current->notifier) {
490 if (sigismember(current->notifier_mask, sig)) {
491 if (!(current->notifier)(current->notifier_data)) {
492 clear_thread_flag(TIF_SIGPENDING);
493 return 0;
494 }
495 }
496 }
497
498 if (!collect_signal(sig, pending, info))
499 sig = 0;
500
501 }
502 recalc_sigpending();
503
504 return sig;
505}
506
507/*
508 * Dequeue a signal and return the element to the caller, which is
509 * expected to free it.
510 *
511 * All callers have to hold the siglock.
512 */
513int dequeue_signal(struct task_struct *tsk, sigset_t *mask, siginfo_t *info)
514{
515 int signr = __dequeue_signal(&tsk->pending, mask, info);
516 if (!signr)
517 signr = __dequeue_signal(&tsk->signal->shared_pending,
518 mask, info);
519 if (signr && unlikely(sig_kernel_stop(signr))) {
520 /*
521 * Set a marker that we have dequeued a stop signal. Our
522 * caller might release the siglock and then the pending
523 * stop signal it is about to process is no longer in the
524 * pending bitmasks, but must still be cleared by a SIGCONT
525 * (and overruled by a SIGKILL). So those cases clear this
526 * shared flag after we've set it. Note that this flag may
527 * remain set after the signal we return is ignored or
528 * handled. That doesn't matter because its only purpose
529 * is to alert stop-signal processing code when another
530 * processor has come along and cleared the flag.
531 */
788e05a6
ON
532 if (!(tsk->signal->flags & SIGNAL_GROUP_EXIT))
533 tsk->signal->flags |= SIGNAL_STOP_DEQUEUED;
1da177e4
LT
534 }
535 if ( signr &&
536 ((info->si_code & __SI_MASK) == __SI_TIMER) &&
537 info->si_sys_private){
538 /*
539 * Release the siglock to ensure proper locking order
540 * of timer locks outside of siglocks. Note, we leave
541 * irqs disabled here, since the posix-timers code is
542 * about to disable them again anyway.
543 */
544 spin_unlock(&tsk->sighand->siglock);
545 do_schedule_next_timer(info);
546 spin_lock(&tsk->sighand->siglock);
547 }
548 return signr;
549}
550
551/*
552 * Tell a process that it has a new active signal..
553 *
554 * NOTE! we rely on the previous spin_lock to
555 * lock interrupts for us! We can only be called with
556 * "siglock" held, and the local interrupt must
557 * have been disabled when that got acquired!
558 *
559 * No need to set need_resched since signal event passing
560 * goes through ->blocked
561 */
562void signal_wake_up(struct task_struct *t, int resume)
563{
564 unsigned int mask;
565
566 set_tsk_thread_flag(t, TIF_SIGPENDING);
567
568 /*
569 * For SIGKILL, we want to wake it up in the stopped/traced case.
570 * We don't check t->state here because there is a race with it
571 * executing another processor and just now entering stopped state.
572 * By using wake_up_state, we ensure the process will wake up and
573 * handle its death signal.
574 */
575 mask = TASK_INTERRUPTIBLE;
576 if (resume)
577 mask |= TASK_STOPPED | TASK_TRACED;
578 if (!wake_up_state(t, mask))
579 kick_process(t);
580}
581
71fabd5e
GA
582/*
583 * Remove signals in mask from the pending set and queue.
584 * Returns 1 if any signals were found.
585 *
586 * All callers must be holding the siglock.
587 *
588 * This version takes a sigset mask and looks at all signals,
589 * not just those in the first mask word.
590 */
591static int rm_from_queue_full(sigset_t *mask, struct sigpending *s)
592{
593 struct sigqueue *q, *n;
594 sigset_t m;
595
596 sigandsets(&m, mask, &s->signal);
597 if (sigisemptyset(&m))
598 return 0;
599
600 signandsets(&s->signal, &s->signal, mask);
601 list_for_each_entry_safe(q, n, &s->list, list) {
602 if (sigismember(mask, q->info.si_signo)) {
603 list_del_init(&q->list);
604 __sigqueue_free(q);
605 }
606 }
607 return 1;
608}
1da177e4
LT
609/*
610 * Remove signals in mask from the pending set and queue.
611 * Returns 1 if any signals were found.
612 *
613 * All callers must be holding the siglock.
614 */
615static int rm_from_queue(unsigned long mask, struct sigpending *s)
616{
617 struct sigqueue *q, *n;
618
619 if (!sigtestsetmask(&s->signal, mask))
620 return 0;
621
622 sigdelsetmask(&s->signal, mask);
623 list_for_each_entry_safe(q, n, &s->list, list) {
624 if (q->info.si_signo < SIGRTMIN &&
625 (mask & sigmask(q->info.si_signo))) {
626 list_del_init(&q->list);
627 __sigqueue_free(q);
628 }
629 }
630 return 1;
631}
632
633/*
634 * Bad permissions for sending the signal
635 */
636static int check_kill_permission(int sig, struct siginfo *info,
637 struct task_struct *t)
638{
639 int error = -EINVAL;
7ed20e1a 640 if (!valid_signal(sig))
1da177e4
LT
641 return error;
642 error = -EPERM;
621d3121 643 if ((info == SEND_SIG_NOINFO || (!is_si_special(info) && SI_FROMUSER(info)))
1da177e4
LT
644 && ((sig != SIGCONT) ||
645 (current->signal->session != t->signal->session))
646 && (current->euid ^ t->suid) && (current->euid ^ t->uid)
647 && (current->uid ^ t->suid) && (current->uid ^ t->uid)
648 && !capable(CAP_KILL))
649 return error;
c2f0c7c3
SG
650
651 error = security_task_kill(t, info, sig);
652 if (!error)
653 audit_signal_info(sig, t); /* Let audit system see the signal */
654 return error;
1da177e4
LT
655}
656
657/* forward decl */
658static void do_notify_parent_cldstop(struct task_struct *tsk,
bc505a47 659 int to_self,
1da177e4
LT
660 int why);
661
662/*
663 * Handle magic process-wide effects of stop/continue signals.
664 * Unlike the signal actions, these happen immediately at signal-generation
665 * time regardless of blocking, ignoring, or handling. This does the
666 * actual continuing for SIGCONT, but not the actual stopping for stop
667 * signals. The process stop is done as a signal action for SIG_DFL.
668 */
669static void handle_stop_signal(int sig, struct task_struct *p)
670{
671 struct task_struct *t;
672
dd12f48d 673 if (p->signal->flags & SIGNAL_GROUP_EXIT)
1da177e4
LT
674 /*
675 * The process is in the middle of dying already.
676 */
677 return;
678
679 if (sig_kernel_stop(sig)) {
680 /*
681 * This is a stop signal. Remove SIGCONT from all queues.
682 */
683 rm_from_queue(sigmask(SIGCONT), &p->signal->shared_pending);
684 t = p;
685 do {
686 rm_from_queue(sigmask(SIGCONT), &t->pending);
687 t = next_thread(t);
688 } while (t != p);
689 } else if (sig == SIGCONT) {
690 /*
691 * Remove all stop signals from all queues,
692 * and wake all threads.
693 */
694 if (unlikely(p->signal->group_stop_count > 0)) {
695 /*
696 * There was a group stop in progress. We'll
697 * pretend it finished before we got here. We are
698 * obliged to report it to the parent: if the
699 * SIGSTOP happened "after" this SIGCONT, then it
700 * would have cleared this pending SIGCONT. If it
701 * happened "before" this SIGCONT, then the parent
702 * got the SIGCHLD about the stop finishing before
703 * the continue happened. We do the notification
704 * now, and it's as if the stop had finished and
705 * the SIGCHLD was pending on entry to this kill.
706 */
707 p->signal->group_stop_count = 0;
708 p->signal->flags = SIGNAL_STOP_CONTINUED;
709 spin_unlock(&p->sighand->siglock);
bc505a47 710 do_notify_parent_cldstop(p, (p->ptrace & PT_PTRACED), CLD_STOPPED);
1da177e4
LT
711 spin_lock(&p->sighand->siglock);
712 }
713 rm_from_queue(SIG_KERNEL_STOP_MASK, &p->signal->shared_pending);
714 t = p;
715 do {
716 unsigned int state;
717 rm_from_queue(SIG_KERNEL_STOP_MASK, &t->pending);
718
719 /*
720 * If there is a handler for SIGCONT, we must make
721 * sure that no thread returns to user mode before
722 * we post the signal, in case it was the only
723 * thread eligible to run the signal handler--then
724 * it must not do anything between resuming and
725 * running the handler. With the TIF_SIGPENDING
726 * flag set, the thread will pause and acquire the
727 * siglock that we hold now and until we've queued
728 * the pending signal.
729 *
730 * Wake up the stopped thread _after_ setting
731 * TIF_SIGPENDING
732 */
733 state = TASK_STOPPED;
734 if (sig_user_defined(t, SIGCONT) && !sigismember(&t->blocked, SIGCONT)) {
735 set_tsk_thread_flag(t, TIF_SIGPENDING);
736 state |= TASK_INTERRUPTIBLE;
737 }
738 wake_up_state(t, state);
739
740 t = next_thread(t);
741 } while (t != p);
742
743 if (p->signal->flags & SIGNAL_STOP_STOPPED) {
744 /*
745 * We were in fact stopped, and are now continued.
746 * Notify the parent with CLD_CONTINUED.
747 */
748 p->signal->flags = SIGNAL_STOP_CONTINUED;
749 p->signal->group_exit_code = 0;
750 spin_unlock(&p->sighand->siglock);
bc505a47 751 do_notify_parent_cldstop(p, (p->ptrace & PT_PTRACED), CLD_CONTINUED);
1da177e4
LT
752 spin_lock(&p->sighand->siglock);
753 } else {
754 /*
755 * We are not stopped, but there could be a stop
756 * signal in the middle of being processed after
757 * being removed from the queue. Clear that too.
758 */
759 p->signal->flags = 0;
760 }
761 } else if (sig == SIGKILL) {
762 /*
763 * Make sure that any pending stop signal already dequeued
764 * is undone by the wakeup for SIGKILL.
765 */
766 p->signal->flags = 0;
767 }
768}
769
770static int send_signal(int sig, struct siginfo *info, struct task_struct *t,
771 struct sigpending *signals)
772{
773 struct sigqueue * q = NULL;
774 int ret = 0;
775
776 /*
777 * fast-pathed signals for kernel-internal things like SIGSTOP
778 * or SIGKILL.
779 */
b67a1b9e 780 if (info == SEND_SIG_FORCED)
1da177e4
LT
781 goto out_set;
782
783 /* Real-time signals must be queued if sent by sigqueue, or
784 some other real-time mechanism. It is implementation
785 defined whether kill() does so. We attempt to do so, on
786 the principle of least surprise, but since kill is not
787 allowed to fail with EAGAIN when low on memory we just
788 make sure at least one signal gets delivered and don't
789 pass on the info struct. */
790
791 q = __sigqueue_alloc(t, GFP_ATOMIC, (sig < SIGRTMIN &&
621d3121 792 (is_si_special(info) ||
1da177e4
LT
793 info->si_code >= 0)));
794 if (q) {
795 list_add_tail(&q->list, &signals->list);
796 switch ((unsigned long) info) {
b67a1b9e 797 case (unsigned long) SEND_SIG_NOINFO:
1da177e4
LT
798 q->info.si_signo = sig;
799 q->info.si_errno = 0;
800 q->info.si_code = SI_USER;
801 q->info.si_pid = current->pid;
802 q->info.si_uid = current->uid;
803 break;
b67a1b9e 804 case (unsigned long) SEND_SIG_PRIV:
1da177e4
LT
805 q->info.si_signo = sig;
806 q->info.si_errno = 0;
807 q->info.si_code = SI_KERNEL;
808 q->info.si_pid = 0;
809 q->info.si_uid = 0;
810 break;
811 default:
812 copy_siginfo(&q->info, info);
813 break;
814 }
621d3121
ON
815 } else if (!is_si_special(info)) {
816 if (sig >= SIGRTMIN && info->si_code != SI_USER)
1da177e4
LT
817 /*
818 * Queue overflow, abort. We may abort if the signal was rt
819 * and sent by user using something other than kill().
820 */
821 return -EAGAIN;
1da177e4
LT
822 }
823
824out_set:
825 sigaddset(&signals->signal, sig);
826 return ret;
827}
828
829#define LEGACY_QUEUE(sigptr, sig) \
830 (((sig) < SIGRTMIN) && sigismember(&(sigptr)->signal, (sig)))
831
832
833static int
834specific_send_sig_info(int sig, struct siginfo *info, struct task_struct *t)
835{
836 int ret = 0;
837
838 if (!irqs_disabled())
839 BUG();
840 assert_spin_locked(&t->sighand->siglock);
841
1da177e4
LT
842 /* Short-circuit ignored signals. */
843 if (sig_ignored(t, sig))
844 goto out;
845
846 /* Support queueing exactly one non-rt signal, so that we
847 can get more detailed information about the cause of
848 the signal. */
849 if (LEGACY_QUEUE(&t->pending, sig))
850 goto out;
851
852 ret = send_signal(sig, info, t, &t->pending);
853 if (!ret && !sigismember(&t->blocked, sig))
854 signal_wake_up(t, sig == SIGKILL);
855out:
856 return ret;
857}
858
859/*
860 * Force a signal that the process can't ignore: if necessary
861 * we unblock the signal and change any SIG_IGN to SIG_DFL.
862 */
863
864int
865force_sig_info(int sig, struct siginfo *info, struct task_struct *t)
866{
867 unsigned long int flags;
868 int ret;
869
870 spin_lock_irqsave(&t->sighand->siglock, flags);
b0423a0d 871 if (t->sighand->action[sig-1].sa.sa_handler == SIG_IGN) {
1da177e4 872 t->sighand->action[sig-1].sa.sa_handler = SIG_DFL;
b0423a0d
PM
873 }
874 if (sigismember(&t->blocked, sig)) {
1da177e4 875 sigdelset(&t->blocked, sig);
1da177e4 876 }
b0423a0d 877 recalc_sigpending_tsk(t);
1da177e4
LT
878 ret = specific_send_sig_info(sig, info, t);
879 spin_unlock_irqrestore(&t->sighand->siglock, flags);
880
881 return ret;
882}
883
884void
885force_sig_specific(int sig, struct task_struct *t)
886{
b0423a0d 887 force_sig_info(sig, SEND_SIG_FORCED, t);
1da177e4
LT
888}
889
890/*
891 * Test if P wants to take SIG. After we've checked all threads with this,
892 * it's equivalent to finding no threads not blocking SIG. Any threads not
893 * blocking SIG were ruled out because they are not running and already
894 * have pending signals. Such threads will dequeue from the shared queue
895 * as soon as they're available, so putting the signal on the shared queue
896 * will be equivalent to sending it to one such thread.
897 */
188a1eaf
LT
898static inline int wants_signal(int sig, struct task_struct *p)
899{
900 if (sigismember(&p->blocked, sig))
901 return 0;
902 if (p->flags & PF_EXITING)
903 return 0;
904 if (sig == SIGKILL)
905 return 1;
906 if (p->state & (TASK_STOPPED | TASK_TRACED))
907 return 0;
908 return task_curr(p) || !signal_pending(p);
909}
1da177e4
LT
910
911static void
912__group_complete_signal(int sig, struct task_struct *p)
913{
1da177e4
LT
914 struct task_struct *t;
915
1da177e4
LT
916 /*
917 * Now find a thread we can wake up to take the signal off the queue.
918 *
919 * If the main thread wants the signal, it gets first crack.
920 * Probably the least surprising to the average bear.
921 */
188a1eaf 922 if (wants_signal(sig, p))
1da177e4
LT
923 t = p;
924 else if (thread_group_empty(p))
925 /*
926 * There is just one thread and it does not need to be woken.
927 * It will dequeue unblocked signals before it runs again.
928 */
929 return;
930 else {
931 /*
932 * Otherwise try to find a suitable thread.
933 */
934 t = p->signal->curr_target;
935 if (t == NULL)
936 /* restart balancing at this thread */
937 t = p->signal->curr_target = p;
938 BUG_ON(t->tgid != p->tgid);
939
188a1eaf 940 while (!wants_signal(sig, t)) {
1da177e4
LT
941 t = next_thread(t);
942 if (t == p->signal->curr_target)
943 /*
944 * No thread needs to be woken.
945 * Any eligible threads will see
946 * the signal in the queue soon.
947 */
948 return;
949 }
950 p->signal->curr_target = t;
951 }
952
953 /*
954 * Found a killable thread. If the signal will be fatal,
955 * then start taking the whole group down immediately.
956 */
957 if (sig_fatal(p, sig) && !(p->signal->flags & SIGNAL_GROUP_EXIT) &&
958 !sigismember(&t->real_blocked, sig) &&
959 (sig == SIGKILL || !(t->ptrace & PT_PTRACED))) {
960 /*
961 * This signal will be fatal to the whole group.
962 */
963 if (!sig_kernel_coredump(sig)) {
964 /*
965 * Start a group exit and wake everybody up.
966 * This way we don't have other threads
967 * running and doing things after a slower
968 * thread has the fatal signal pending.
969 */
970 p->signal->flags = SIGNAL_GROUP_EXIT;
971 p->signal->group_exit_code = sig;
972 p->signal->group_stop_count = 0;
973 t = p;
974 do {
975 sigaddset(&t->pending.signal, SIGKILL);
976 signal_wake_up(t, 1);
977 t = next_thread(t);
978 } while (t != p);
979 return;
980 }
981
982 /*
983 * There will be a core dump. We make all threads other
984 * than the chosen one go into a group stop so that nothing
985 * happens until it gets scheduled, takes the signal off
986 * the shared queue, and does the core dump. This is a
987 * little more complicated than strictly necessary, but it
988 * keeps the signal state that winds up in the core dump
989 * unchanged from the death state, e.g. which thread had
990 * the core-dump signal unblocked.
991 */
992 rm_from_queue(SIG_KERNEL_STOP_MASK, &t->pending);
993 rm_from_queue(SIG_KERNEL_STOP_MASK, &p->signal->shared_pending);
994 p->signal->group_stop_count = 0;
995 p->signal->group_exit_task = t;
996 t = p;
997 do {
998 p->signal->group_stop_count++;
999 signal_wake_up(t, 0);
1000 t = next_thread(t);
1001 } while (t != p);
1002 wake_up_process(p->signal->group_exit_task);
1003 return;
1004 }
1005
1006 /*
1007 * The signal is already in the shared-pending queue.
1008 * Tell the chosen thread to wake up and dequeue it.
1009 */
1010 signal_wake_up(t, sig == SIGKILL);
1011 return;
1012}
1013
1014int
1015__group_send_sig_info(int sig, struct siginfo *info, struct task_struct *p)
1016{
1017 int ret = 0;
1018
1019 assert_spin_locked(&p->sighand->siglock);
1020 handle_stop_signal(sig, p);
1021
1da177e4
LT
1022 /* Short-circuit ignored signals. */
1023 if (sig_ignored(p, sig))
1024 return ret;
1025
1026 if (LEGACY_QUEUE(&p->signal->shared_pending, sig))
1027 /* This is a non-RT signal and we already have one queued. */
1028 return ret;
1029
1030 /*
1031 * Put this signal on the shared-pending queue, or fail with EAGAIN.
1032 * We always use the shared queue for process-wide signals,
1033 * to avoid several races.
1034 */
1035 ret = send_signal(sig, info, p, &p->signal->shared_pending);
1036 if (unlikely(ret))
1037 return ret;
1038
1039 __group_complete_signal(sig, p);
1040 return 0;
1041}
1042
1043/*
1044 * Nuke all other threads in the group.
1045 */
1046void zap_other_threads(struct task_struct *p)
1047{
1048 struct task_struct *t;
1049
1050 p->signal->flags = SIGNAL_GROUP_EXIT;
1051 p->signal->group_stop_count = 0;
1052
1053 if (thread_group_empty(p))
1054 return;
1055
1056 for (t = next_thread(p); t != p; t = next_thread(t)) {
1057 /*
1058 * Don't bother with already dead threads
1059 */
1060 if (t->exit_state)
1061 continue;
1062
1063 /*
1064 * We don't want to notify the parent, since we are
1065 * killed as part of a thread group due to another
1066 * thread doing an execve() or similar. So set the
1067 * exit signal to -1 to allow immediate reaping of
1068 * the process. But don't detach the thread group
1069 * leader.
1070 */
1071 if (t != p->group_leader)
1072 t->exit_signal = -1;
1073
30e0fca6 1074 /* SIGKILL will be handled before any pending SIGSTOP */
1da177e4 1075 sigaddset(&t->pending.signal, SIGKILL);
1da177e4
LT
1076 signal_wake_up(t, 1);
1077 }
1078}
1079
1080/*
e56d0903 1081 * Must be called under rcu_read_lock() or with tasklist_lock read-held.
1da177e4 1082 */
f63ee72e
ON
1083struct sighand_struct *lock_task_sighand(struct task_struct *tsk, unsigned long *flags)
1084{
1085 struct sighand_struct *sighand;
1086
1087 for (;;) {
1088 sighand = rcu_dereference(tsk->sighand);
1089 if (unlikely(sighand == NULL))
1090 break;
1091
1092 spin_lock_irqsave(&sighand->siglock, *flags);
1093 if (likely(sighand == tsk->sighand))
1094 break;
1095 spin_unlock_irqrestore(&sighand->siglock, *flags);
1096 }
1097
1098 return sighand;
1099}
1100
1da177e4
LT
1101int group_send_sig_info(int sig, struct siginfo *info, struct task_struct *p)
1102{
1103 unsigned long flags;
1104 int ret;
1105
1106 ret = check_kill_permission(sig, info, p);
f63ee72e
ON
1107
1108 if (!ret && sig) {
1109 ret = -ESRCH;
1110 if (lock_task_sighand(p, &flags)) {
1111 ret = __group_send_sig_info(sig, info, p);
1112 unlock_task_sighand(p, &flags);
2d89c929 1113 }
1da177e4
LT
1114 }
1115
1116 return ret;
1117}
1118
1119/*
1120 * kill_pg_info() sends a signal to a process group: this is what the tty
1121 * control characters do (^C, ^Z etc)
1122 */
1123
1124int __kill_pg_info(int sig, struct siginfo *info, pid_t pgrp)
1125{
1126 struct task_struct *p = NULL;
1127 int retval, success;
1128
1129 if (pgrp <= 0)
1130 return -EINVAL;
1131
1132 success = 0;
1133 retval = -ESRCH;
1134 do_each_task_pid(pgrp, PIDTYPE_PGID, p) {
1135 int err = group_send_sig_info(sig, info, p);
1136 success |= !err;
1137 retval = err;
1138 } while_each_task_pid(pgrp, PIDTYPE_PGID, p);
1139 return success ? 0 : retval;
1140}
1141
1142int
1143kill_pg_info(int sig, struct siginfo *info, pid_t pgrp)
1144{
1145 int retval;
1146
1147 read_lock(&tasklist_lock);
1148 retval = __kill_pg_info(sig, info, pgrp);
1149 read_unlock(&tasklist_lock);
1150
1151 return retval;
1152}
1153
1154int
1155kill_proc_info(int sig, struct siginfo *info, pid_t pid)
1156{
1157 int error;
e56d0903 1158 int acquired_tasklist_lock = 0;
1da177e4
LT
1159 struct task_struct *p;
1160
e56d0903 1161 rcu_read_lock();
a9e88e84 1162 if (unlikely(sig_needs_tasklist(sig))) {
e56d0903
IM
1163 read_lock(&tasklist_lock);
1164 acquired_tasklist_lock = 1;
1165 }
1da177e4
LT
1166 p = find_task_by_pid(pid);
1167 error = -ESRCH;
1168 if (p)
1169 error = group_send_sig_info(sig, info, p);
e56d0903
IM
1170 if (unlikely(acquired_tasklist_lock))
1171 read_unlock(&tasklist_lock);
1172 rcu_read_unlock();
1da177e4
LT
1173 return error;
1174}
1175
46113830
HW
1176/* like kill_proc_info(), but doesn't use uid/euid of "current" */
1177int kill_proc_info_as_uid(int sig, struct siginfo *info, pid_t pid,
1178 uid_t uid, uid_t euid)
1179{
1180 int ret = -EINVAL;
1181 struct task_struct *p;
1182
1183 if (!valid_signal(sig))
1184 return ret;
1185
1186 read_lock(&tasklist_lock);
1187 p = find_task_by_pid(pid);
1188 if (!p) {
1189 ret = -ESRCH;
1190 goto out_unlock;
1191 }
0811af28 1192 if ((info == SEND_SIG_NOINFO || (!is_si_special(info) && SI_FROMUSER(info)))
46113830
HW
1193 && (euid != p->suid) && (euid != p->uid)
1194 && (uid != p->suid) && (uid != p->uid)) {
1195 ret = -EPERM;
1196 goto out_unlock;
1197 }
1198 if (sig && p->sighand) {
1199 unsigned long flags;
1200 spin_lock_irqsave(&p->sighand->siglock, flags);
1201 ret = __group_send_sig_info(sig, info, p);
1202 spin_unlock_irqrestore(&p->sighand->siglock, flags);
1203 }
1204out_unlock:
1205 read_unlock(&tasklist_lock);
1206 return ret;
1207}
1208EXPORT_SYMBOL_GPL(kill_proc_info_as_uid);
1da177e4
LT
1209
1210/*
1211 * kill_something_info() interprets pid in interesting ways just like kill(2).
1212 *
1213 * POSIX specifies that kill(-1,sig) is unspecified, but what we have
1214 * is probably wrong. Should make it like BSD or SYSV.
1215 */
1216
1217static int kill_something_info(int sig, struct siginfo *info, int pid)
1218{
1219 if (!pid) {
1220 return kill_pg_info(sig, info, process_group(current));
1221 } else if (pid == -1) {
1222 int retval = 0, count = 0;
1223 struct task_struct * p;
1224
1225 read_lock(&tasklist_lock);
1226 for_each_process(p) {
1227 if (p->pid > 1 && p->tgid != current->tgid) {
1228 int err = group_send_sig_info(sig, info, p);
1229 ++count;
1230 if (err != -EPERM)
1231 retval = err;
1232 }
1233 }
1234 read_unlock(&tasklist_lock);
1235 return count ? retval : -ESRCH;
1236 } else if (pid < 0) {
1237 return kill_pg_info(sig, info, -pid);
1238 } else {
1239 return kill_proc_info(sig, info, pid);
1240 }
1241}
1242
1243/*
1244 * These are for backward compatibility with the rest of the kernel source.
1245 */
1246
1247/*
1248 * These two are the most common entry points. They send a signal
1249 * just to the specific thread.
1250 */
1251int
1252send_sig_info(int sig, struct siginfo *info, struct task_struct *p)
1253{
1254 int ret;
1255 unsigned long flags;
1256
1257 /*
1258 * Make sure legacy kernel users don't send in bad values
1259 * (normal paths check this in check_kill_permission).
1260 */
7ed20e1a 1261 if (!valid_signal(sig))
1da177e4
LT
1262 return -EINVAL;
1263
1264 /*
1265 * We need the tasklist lock even for the specific
1266 * thread case (when we don't need to follow the group
1267 * lists) in order to avoid races with "p->sighand"
1268 * going away or changing from under us.
1269 */
1270 read_lock(&tasklist_lock);
1271 spin_lock_irqsave(&p->sighand->siglock, flags);
1272 ret = specific_send_sig_info(sig, info, p);
1273 spin_unlock_irqrestore(&p->sighand->siglock, flags);
1274 read_unlock(&tasklist_lock);
1275 return ret;
1276}
1277
b67a1b9e
ON
1278#define __si_special(priv) \
1279 ((priv) ? SEND_SIG_PRIV : SEND_SIG_NOINFO)
1280
1da177e4
LT
1281int
1282send_sig(int sig, struct task_struct *p, int priv)
1283{
b67a1b9e 1284 return send_sig_info(sig, __si_special(priv), p);
1da177e4
LT
1285}
1286
1287/*
1288 * This is the entry point for "process-wide" signals.
1289 * They will go to an appropriate thread in the thread group.
1290 */
1291int
1292send_group_sig_info(int sig, struct siginfo *info, struct task_struct *p)
1293{
1294 int ret;
1295 read_lock(&tasklist_lock);
1296 ret = group_send_sig_info(sig, info, p);
1297 read_unlock(&tasklist_lock);
1298 return ret;
1299}
1300
1301void
1302force_sig(int sig, struct task_struct *p)
1303{
b67a1b9e 1304 force_sig_info(sig, SEND_SIG_PRIV, p);
1da177e4
LT
1305}
1306
1307/*
1308 * When things go south during signal handling, we
1309 * will force a SIGSEGV. And if the signal that caused
1310 * the problem was already a SIGSEGV, we'll want to
1311 * make sure we don't even try to deliver the signal..
1312 */
1313int
1314force_sigsegv(int sig, struct task_struct *p)
1315{
1316 if (sig == SIGSEGV) {
1317 unsigned long flags;
1318 spin_lock_irqsave(&p->sighand->siglock, flags);
1319 p->sighand->action[sig - 1].sa.sa_handler = SIG_DFL;
1320 spin_unlock_irqrestore(&p->sighand->siglock, flags);
1321 }
1322 force_sig(SIGSEGV, p);
1323 return 0;
1324}
1325
1326int
1327kill_pg(pid_t pgrp, int sig, int priv)
1328{
b67a1b9e 1329 return kill_pg_info(sig, __si_special(priv), pgrp);
1da177e4
LT
1330}
1331
1332int
1333kill_proc(pid_t pid, int sig, int priv)
1334{
b67a1b9e 1335 return kill_proc_info(sig, __si_special(priv), pid);
1da177e4
LT
1336}
1337
1338/*
1339 * These functions support sending signals using preallocated sigqueue
1340 * structures. This is needed "because realtime applications cannot
1341 * afford to lose notifications of asynchronous events, like timer
1342 * expirations or I/O completions". In the case of Posix Timers
1343 * we allocate the sigqueue structure from the timer_create. If this
1344 * allocation fails we are able to report the failure to the application
1345 * with an EAGAIN error.
1346 */
1347
1348struct sigqueue *sigqueue_alloc(void)
1349{
1350 struct sigqueue *q;
1351
1352 if ((q = __sigqueue_alloc(current, GFP_KERNEL, 0)))
1353 q->flags |= SIGQUEUE_PREALLOC;
1354 return(q);
1355}
1356
1357void sigqueue_free(struct sigqueue *q)
1358{
1359 unsigned long flags;
1360 BUG_ON(!(q->flags & SIGQUEUE_PREALLOC));
1361 /*
1362 * If the signal is still pending remove it from the
1363 * pending queue.
1364 */
1365 if (unlikely(!list_empty(&q->list))) {
19a4fcb5
ON
1366 spinlock_t *lock = &current->sighand->siglock;
1367 read_lock(&tasklist_lock);
1368 spin_lock_irqsave(lock, flags);
1da177e4
LT
1369 if (!list_empty(&q->list))
1370 list_del_init(&q->list);
19a4fcb5 1371 spin_unlock_irqrestore(lock, flags);
1da177e4
LT
1372 read_unlock(&tasklist_lock);
1373 }
1374 q->flags &= ~SIGQUEUE_PREALLOC;
1375 __sigqueue_free(q);
1376}
1377
1378int
1379send_sigqueue(int sig, struct sigqueue *q, struct task_struct *p)
1380{
1381 unsigned long flags;
1382 int ret = 0;
e56d0903 1383 struct sighand_struct *sh;
1da177e4 1384
1da177e4 1385 BUG_ON(!(q->flags & SIGQUEUE_PREALLOC));
e56d0903
IM
1386
1387 /*
1388 * The rcu based delayed sighand destroy makes it possible to
1389 * run this without tasklist lock held. The task struct itself
1390 * cannot go away as create_timer did get_task_struct().
1391 *
1392 * We return -1, when the task is marked exiting, so
1393 * posix_timer_event can redirect it to the group leader
1394 */
1395 rcu_read_lock();
e752dd6c
ON
1396
1397 if (unlikely(p->flags & PF_EXITING)) {
1398 ret = -1;
1399 goto out_err;
1400 }
1401
e56d0903
IM
1402retry:
1403 sh = rcu_dereference(p->sighand);
1404
1405 spin_lock_irqsave(&sh->siglock, flags);
1406 if (p->sighand != sh) {
1407 /* We raced with exec() in a multithreaded process... */
1408 spin_unlock_irqrestore(&sh->siglock, flags);
1409 goto retry;
1410 }
1411
1412 /*
1413 * We do the check here again to handle the following scenario:
1414 *
1415 * CPU 0 CPU 1
1416 * send_sigqueue
1417 * check PF_EXITING
1418 * interrupt exit code running
1419 * __exit_signal
1420 * lock sighand->siglock
1421 * unlock sighand->siglock
1422 * lock sh->siglock
1423 * add(tsk->pending) flush_sigqueue(tsk->pending)
1424 *
1425 */
1426
1427 if (unlikely(p->flags & PF_EXITING)) {
1428 ret = -1;
1429 goto out;
1430 }
e752dd6c 1431
1da177e4
LT
1432 if (unlikely(!list_empty(&q->list))) {
1433 /*
1434 * If an SI_TIMER entry is already queue just increment
1435 * the overrun count.
1436 */
1437 if (q->info.si_code != SI_TIMER)
1438 BUG();
1439 q->info.si_overrun++;
1440 goto out;
e752dd6c 1441 }
1da177e4
LT
1442 /* Short-circuit ignored signals. */
1443 if (sig_ignored(p, sig)) {
1444 ret = 1;
1445 goto out;
1446 }
1447
1da177e4
LT
1448 list_add_tail(&q->list, &p->pending.list);
1449 sigaddset(&p->pending.signal, sig);
1450 if (!sigismember(&p->blocked, sig))
1451 signal_wake_up(p, sig == SIGKILL);
1452
1453out:
e56d0903 1454 spin_unlock_irqrestore(&sh->siglock, flags);
e752dd6c 1455out_err:
e56d0903 1456 rcu_read_unlock();
e752dd6c
ON
1457
1458 return ret;
1da177e4
LT
1459}
1460
1461int
1462send_group_sigqueue(int sig, struct sigqueue *q, struct task_struct *p)
1463{
1464 unsigned long flags;
1465 int ret = 0;
1466
1467 BUG_ON(!(q->flags & SIGQUEUE_PREALLOC));
e56d0903 1468
1da177e4 1469 read_lock(&tasklist_lock);
e56d0903 1470 /* Since it_lock is held, p->sighand cannot be NULL. */
1da177e4
LT
1471 spin_lock_irqsave(&p->sighand->siglock, flags);
1472 handle_stop_signal(sig, p);
1473
1474 /* Short-circuit ignored signals. */
1475 if (sig_ignored(p, sig)) {
1476 ret = 1;
1477 goto out;
1478 }
1479
1480 if (unlikely(!list_empty(&q->list))) {
1481 /*
1482 * If an SI_TIMER entry is already queue just increment
1483 * the overrun count. Other uses should not try to
1484 * send the signal multiple times.
1485 */
1486 if (q->info.si_code != SI_TIMER)
1487 BUG();
1488 q->info.si_overrun++;
1489 goto out;
1490 }
1491
1492 /*
1493 * Put this signal on the shared-pending queue.
1494 * We always use the shared queue for process-wide signals,
1495 * to avoid several races.
1496 */
1da177e4
LT
1497 list_add_tail(&q->list, &p->signal->shared_pending.list);
1498 sigaddset(&p->signal->shared_pending.signal, sig);
1499
1500 __group_complete_signal(sig, p);
1501out:
1502 spin_unlock_irqrestore(&p->sighand->siglock, flags);
1503 read_unlock(&tasklist_lock);
e56d0903 1504 return ret;
1da177e4
LT
1505}
1506
1507/*
1508 * Wake up any threads in the parent blocked in wait* syscalls.
1509 */
1510static inline void __wake_up_parent(struct task_struct *p,
1511 struct task_struct *parent)
1512{
1513 wake_up_interruptible_sync(&parent->signal->wait_chldexit);
1514}
1515
1516/*
1517 * Let a parent know about the death of a child.
1518 * For a stopped/continued status change, use do_notify_parent_cldstop instead.
1519 */
1520
1521void do_notify_parent(struct task_struct *tsk, int sig)
1522{
1523 struct siginfo info;
1524 unsigned long flags;
1525 struct sighand_struct *psig;
1526
1527 BUG_ON(sig == -1);
1528
1529 /* do_notify_parent_cldstop should have been called instead. */
1530 BUG_ON(tsk->state & (TASK_STOPPED|TASK_TRACED));
1531
1532 BUG_ON(!tsk->ptrace &&
1533 (tsk->group_leader != tsk || !thread_group_empty(tsk)));
1534
1535 info.si_signo = sig;
1536 info.si_errno = 0;
1537 info.si_pid = tsk->pid;
1538 info.si_uid = tsk->uid;
1539
1540 /* FIXME: find out whether or not this is supposed to be c*time. */
1541 info.si_utime = cputime_to_jiffies(cputime_add(tsk->utime,
1542 tsk->signal->utime));
1543 info.si_stime = cputime_to_jiffies(cputime_add(tsk->stime,
1544 tsk->signal->stime));
1545
1546 info.si_status = tsk->exit_code & 0x7f;
1547 if (tsk->exit_code & 0x80)
1548 info.si_code = CLD_DUMPED;
1549 else if (tsk->exit_code & 0x7f)
1550 info.si_code = CLD_KILLED;
1551 else {
1552 info.si_code = CLD_EXITED;
1553 info.si_status = tsk->exit_code >> 8;
1554 }
1555
1556 psig = tsk->parent->sighand;
1557 spin_lock_irqsave(&psig->siglock, flags);
7ed0175a 1558 if (!tsk->ptrace && sig == SIGCHLD &&
1da177e4
LT
1559 (psig->action[SIGCHLD-1].sa.sa_handler == SIG_IGN ||
1560 (psig->action[SIGCHLD-1].sa.sa_flags & SA_NOCLDWAIT))) {
1561 /*
1562 * We are exiting and our parent doesn't care. POSIX.1
1563 * defines special semantics for setting SIGCHLD to SIG_IGN
1564 * or setting the SA_NOCLDWAIT flag: we should be reaped
1565 * automatically and not left for our parent's wait4 call.
1566 * Rather than having the parent do it as a magic kind of
1567 * signal handler, we just set this to tell do_exit that we
1568 * can be cleaned up without becoming a zombie. Note that
1569 * we still call __wake_up_parent in this case, because a
1570 * blocked sys_wait4 might now return -ECHILD.
1571 *
1572 * Whether we send SIGCHLD or not for SA_NOCLDWAIT
1573 * is implementation-defined: we do (if you don't want
1574 * it, just use SIG_IGN instead).
1575 */
1576 tsk->exit_signal = -1;
1577 if (psig->action[SIGCHLD-1].sa.sa_handler == SIG_IGN)
1578 sig = 0;
1579 }
7ed20e1a 1580 if (valid_signal(sig) && sig > 0)
1da177e4
LT
1581 __group_send_sig_info(sig, &info, tsk->parent);
1582 __wake_up_parent(tsk, tsk->parent);
1583 spin_unlock_irqrestore(&psig->siglock, flags);
1584}
1585
bc505a47 1586static void do_notify_parent_cldstop(struct task_struct *tsk, int to_self, int why)
1da177e4
LT
1587{
1588 struct siginfo info;
1589 unsigned long flags;
bc505a47 1590 struct task_struct *parent;
1da177e4
LT
1591 struct sighand_struct *sighand;
1592
bc505a47
ON
1593 if (to_self)
1594 parent = tsk->parent;
1595 else {
1596 tsk = tsk->group_leader;
1597 parent = tsk->real_parent;
1598 }
1599
1da177e4
LT
1600 info.si_signo = SIGCHLD;
1601 info.si_errno = 0;
1602 info.si_pid = tsk->pid;
1603 info.si_uid = tsk->uid;
1604
1605 /* FIXME: find out whether or not this is supposed to be c*time. */
1606 info.si_utime = cputime_to_jiffies(tsk->utime);
1607 info.si_stime = cputime_to_jiffies(tsk->stime);
1608
1609 info.si_code = why;
1610 switch (why) {
1611 case CLD_CONTINUED:
1612 info.si_status = SIGCONT;
1613 break;
1614 case CLD_STOPPED:
1615 info.si_status = tsk->signal->group_exit_code & 0x7f;
1616 break;
1617 case CLD_TRAPPED:
1618 info.si_status = tsk->exit_code & 0x7f;
1619 break;
1620 default:
1621 BUG();
1622 }
1623
1624 sighand = parent->sighand;
1625 spin_lock_irqsave(&sighand->siglock, flags);
1626 if (sighand->action[SIGCHLD-1].sa.sa_handler != SIG_IGN &&
1627 !(sighand->action[SIGCHLD-1].sa.sa_flags & SA_NOCLDSTOP))
1628 __group_send_sig_info(SIGCHLD, &info, parent);
1629 /*
1630 * Even if SIGCHLD is not generated, we must wake up wait4 calls.
1631 */
1632 __wake_up_parent(tsk, parent);
1633 spin_unlock_irqrestore(&sighand->siglock, flags);
1634}
1635
1636/*
1637 * This must be called with current->sighand->siglock held.
1638 *
1639 * This should be the path for all ptrace stops.
1640 * We always set current->last_siginfo while stopped here.
1641 * That makes it a way to test a stopped process for
1642 * being ptrace-stopped vs being job-control-stopped.
1643 *
1644 * If we actually decide not to stop at all because the tracer is gone,
1645 * we leave nostop_code in current->exit_code.
1646 */
1647static void ptrace_stop(int exit_code, int nostop_code, siginfo_t *info)
1648{
1649 /*
1650 * If there is a group stop in progress,
1651 * we must participate in the bookkeeping.
1652 */
1653 if (current->signal->group_stop_count > 0)
1654 --current->signal->group_stop_count;
1655
1656 current->last_siginfo = info;
1657 current->exit_code = exit_code;
1658
1659 /* Let the debugger run. */
1660 set_current_state(TASK_TRACED);
1661 spin_unlock_irq(&current->sighand->siglock);
1662 read_lock(&tasklist_lock);
1663 if (likely(current->ptrace & PT_PTRACED) &&
1664 likely(current->parent != current->real_parent ||
1665 !(current->ptrace & PT_ATTACHED)) &&
1666 (likely(current->parent->signal != current->signal) ||
1667 !unlikely(current->signal->flags & SIGNAL_GROUP_EXIT))) {
bc505a47 1668 do_notify_parent_cldstop(current, 1, CLD_TRAPPED);
1da177e4
LT
1669 read_unlock(&tasklist_lock);
1670 schedule();
1671 } else {
1672 /*
1673 * By the time we got the lock, our tracer went away.
1674 * Don't stop here.
1675 */
1676 read_unlock(&tasklist_lock);
1677 set_current_state(TASK_RUNNING);
1678 current->exit_code = nostop_code;
1679 }
1680
1681 /*
1682 * We are back. Now reacquire the siglock before touching
1683 * last_siginfo, so that we are sure to have synchronized with
1684 * any signal-sending on another CPU that wants to examine it.
1685 */
1686 spin_lock_irq(&current->sighand->siglock);
1687 current->last_siginfo = NULL;
1688
1689 /*
1690 * Queued signals ignored us while we were stopped for tracing.
1691 * So check for any that we should take before resuming user mode.
1692 */
1693 recalc_sigpending();
1694}
1695
1696void ptrace_notify(int exit_code)
1697{
1698 siginfo_t info;
1699
1700 BUG_ON((exit_code & (0x7f | ~0xffff)) != SIGTRAP);
1701
1702 memset(&info, 0, sizeof info);
1703 info.si_signo = SIGTRAP;
1704 info.si_code = exit_code;
1705 info.si_pid = current->pid;
1706 info.si_uid = current->uid;
1707
1708 /* Let the debugger run. */
1709 spin_lock_irq(&current->sighand->siglock);
1710 ptrace_stop(exit_code, 0, &info);
1711 spin_unlock_irq(&current->sighand->siglock);
1712}
1713
1da177e4
LT
1714static void
1715finish_stop(int stop_count)
1716{
bc505a47
ON
1717 int to_self;
1718
1da177e4
LT
1719 /*
1720 * If there are no other threads in the group, or if there is
1721 * a group stop in progress and we are the last to stop,
1722 * report to the parent. When ptraced, every thread reports itself.
1723 */
bc505a47
ON
1724 if (stop_count < 0 || (current->ptrace & PT_PTRACED))
1725 to_self = 1;
1726 else if (stop_count == 0)
1727 to_self = 0;
1728 else
1729 goto out;
1da177e4 1730
bc505a47
ON
1731 read_lock(&tasklist_lock);
1732 do_notify_parent_cldstop(current, to_self, CLD_STOPPED);
1733 read_unlock(&tasklist_lock);
1734
1735out:
1da177e4
LT
1736 schedule();
1737 /*
1738 * Now we don't run again until continued.
1739 */
1740 current->exit_code = 0;
1741}
1742
1743/*
1744 * This performs the stopping for SIGSTOP and other stop signals.
1745 * We have to stop all threads in the thread group.
1746 * Returns nonzero if we've actually stopped and released the siglock.
1747 * Returns zero if we didn't stop and still hold the siglock.
1748 */
1749static int
1750do_signal_stop(int signr)
1751{
1752 struct signal_struct *sig = current->signal;
1753 struct sighand_struct *sighand = current->sighand;
1754 int stop_count = -1;
1755
1756 if (!likely(sig->flags & SIGNAL_STOP_DEQUEUED))
1757 return 0;
1758
1759 if (sig->group_stop_count > 0) {
1760 /*
1761 * There is a group stop in progress. We don't need to
1762 * start another one.
1763 */
1764 signr = sig->group_exit_code;
1765 stop_count = --sig->group_stop_count;
1766 current->exit_code = signr;
1767 set_current_state(TASK_STOPPED);
1768 if (stop_count == 0)
1769 sig->flags = SIGNAL_STOP_STOPPED;
1770 spin_unlock_irq(&sighand->siglock);
1771 }
1772 else if (thread_group_empty(current)) {
1773 /*
1774 * Lock must be held through transition to stopped state.
1775 */
1776 current->exit_code = current->signal->group_exit_code = signr;
1777 set_current_state(TASK_STOPPED);
1778 sig->flags = SIGNAL_STOP_STOPPED;
1779 spin_unlock_irq(&sighand->siglock);
1780 }
1781 else {
1782 /*
1783 * There is no group stop already in progress.
1784 * We must initiate one now, but that requires
1785 * dropping siglock to get both the tasklist lock
1786 * and siglock again in the proper order. Note that
1787 * this allows an intervening SIGCONT to be posted.
1788 * We need to check for that and bail out if necessary.
1789 */
1790 struct task_struct *t;
1791
1792 spin_unlock_irq(&sighand->siglock);
1793
1794 /* signals can be posted during this window */
1795
1796 read_lock(&tasklist_lock);
1797 spin_lock_irq(&sighand->siglock);
1798
1799 if (!likely(sig->flags & SIGNAL_STOP_DEQUEUED)) {
1800 /*
1801 * Another stop or continue happened while we
1802 * didn't have the lock. We can just swallow this
1803 * signal now. If we raced with a SIGCONT, that
1804 * should have just cleared it now. If we raced
1805 * with another processor delivering a stop signal,
1806 * then the SIGCONT that wakes us up should clear it.
1807 */
1808 read_unlock(&tasklist_lock);
1809 return 0;
1810 }
1811
1812 if (sig->group_stop_count == 0) {
1813 sig->group_exit_code = signr;
1814 stop_count = 0;
1815 for (t = next_thread(current); t != current;
1816 t = next_thread(t))
1817 /*
1818 * Setting state to TASK_STOPPED for a group
1819 * stop is always done with the siglock held,
1820 * so this check has no races.
1821 */
5acbc5cb
RM
1822 if (!t->exit_state &&
1823 !(t->state & (TASK_STOPPED|TASK_TRACED))) {
1da177e4
LT
1824 stop_count++;
1825 signal_wake_up(t, 0);
1826 }
1827 sig->group_stop_count = stop_count;
1828 }
1829 else {
1830 /* A race with another thread while unlocked. */
1831 signr = sig->group_exit_code;
1832 stop_count = --sig->group_stop_count;
1833 }
1834
1835 current->exit_code = signr;
1836 set_current_state(TASK_STOPPED);
1837 if (stop_count == 0)
1838 sig->flags = SIGNAL_STOP_STOPPED;
1839
1840 spin_unlock_irq(&sighand->siglock);
1841 read_unlock(&tasklist_lock);
1842 }
1843
1844 finish_stop(stop_count);
1845 return 1;
1846}
1847
1848/*
1849 * Do appropriate magic when group_stop_count > 0.
1850 * We return nonzero if we stopped, after releasing the siglock.
1851 * We return zero if we still hold the siglock and should look
1852 * for another signal without checking group_stop_count again.
1853 */
858119e1 1854static int handle_group_stop(void)
1da177e4
LT
1855{
1856 int stop_count;
1857
1858 if (current->signal->group_exit_task == current) {
1859 /*
1860 * Group stop is so we can do a core dump,
1861 * We are the initiating thread, so get on with it.
1862 */
1863 current->signal->group_exit_task = NULL;
1864 return 0;
1865 }
1866
1867 if (current->signal->flags & SIGNAL_GROUP_EXIT)
1868 /*
1869 * Group stop is so another thread can do a core dump,
1870 * or else we are racing against a death signal.
1871 * Just punt the stop so we can get the next signal.
1872 */
1873 return 0;
1874
1875 /*
1876 * There is a group stop in progress. We stop
1877 * without any associated signal being in our queue.
1878 */
1879 stop_count = --current->signal->group_stop_count;
1880 if (stop_count == 0)
1881 current->signal->flags = SIGNAL_STOP_STOPPED;
1882 current->exit_code = current->signal->group_exit_code;
1883 set_current_state(TASK_STOPPED);
1884 spin_unlock_irq(&current->sighand->siglock);
1885 finish_stop(stop_count);
1886 return 1;
1887}
1888
1889int get_signal_to_deliver(siginfo_t *info, struct k_sigaction *return_ka,
1890 struct pt_regs *regs, void *cookie)
1891{
1892 sigset_t *mask = &current->blocked;
1893 int signr = 0;
1894
fc558a74
RW
1895 try_to_freeze();
1896
1da177e4
LT
1897relock:
1898 spin_lock_irq(&current->sighand->siglock);
1899 for (;;) {
1900 struct k_sigaction *ka;
1901
1902 if (unlikely(current->signal->group_stop_count > 0) &&
1903 handle_group_stop())
1904 goto relock;
1905
1906 signr = dequeue_signal(current, mask, info);
1907
1908 if (!signr)
1909 break; /* will return 0 */
1910
1911 if ((current->ptrace & PT_PTRACED) && signr != SIGKILL) {
1912 ptrace_signal_deliver(regs, cookie);
1913
1914 /* Let the debugger run. */
1915 ptrace_stop(signr, signr, info);
1916
30e0fca6 1917 /* We're back. Did the debugger cancel the sig or group_exit? */
1da177e4 1918 signr = current->exit_code;
30e0fca6 1919 if (signr == 0 || current->signal->flags & SIGNAL_GROUP_EXIT)
1da177e4
LT
1920 continue;
1921
1922 current->exit_code = 0;
1923
1924 /* Update the siginfo structure if the signal has
1925 changed. If the debugger wanted something
1926 specific in the siginfo structure then it should
1927 have updated *info via PTRACE_SETSIGINFO. */
1928 if (signr != info->si_signo) {
1929 info->si_signo = signr;
1930 info->si_errno = 0;
1931 info->si_code = SI_USER;
1932 info->si_pid = current->parent->pid;
1933 info->si_uid = current->parent->uid;
1934 }
1935
1936 /* If the (new) signal is now blocked, requeue it. */
1937 if (sigismember(&current->blocked, signr)) {
1938 specific_send_sig_info(signr, info, current);
1939 continue;
1940 }
1941 }
1942
1943 ka = &current->sighand->action[signr-1];
1944 if (ka->sa.sa_handler == SIG_IGN) /* Do nothing. */
1945 continue;
1946 if (ka->sa.sa_handler != SIG_DFL) {
1947 /* Run the handler. */
1948 *return_ka = *ka;
1949
1950 if (ka->sa.sa_flags & SA_ONESHOT)
1951 ka->sa.sa_handler = SIG_DFL;
1952
1953 break; /* will return non-zero "signr" value */
1954 }
1955
1956 /*
1957 * Now we are doing the default action for this signal.
1958 */
1959 if (sig_kernel_ignore(signr)) /* Default is nothing. */
1960 continue;
1961
1962 /* Init gets no signals it doesn't want. */
fef23e7f 1963 if (current == child_reaper)
1da177e4
LT
1964 continue;
1965
1966 if (sig_kernel_stop(signr)) {
1967 /*
1968 * The default action is to stop all threads in
1969 * the thread group. The job control signals
1970 * do nothing in an orphaned pgrp, but SIGSTOP
1971 * always works. Note that siglock needs to be
1972 * dropped during the call to is_orphaned_pgrp()
1973 * because of lock ordering with tasklist_lock.
1974 * This allows an intervening SIGCONT to be posted.
1975 * We need to check for that and bail out if necessary.
1976 */
1977 if (signr != SIGSTOP) {
1978 spin_unlock_irq(&current->sighand->siglock);
1979
1980 /* signals can be posted during this window */
1981
1982 if (is_orphaned_pgrp(process_group(current)))
1983 goto relock;
1984
1985 spin_lock_irq(&current->sighand->siglock);
1986 }
1987
1988 if (likely(do_signal_stop(signr))) {
1989 /* It released the siglock. */
1990 goto relock;
1991 }
1992
1993 /*
1994 * We didn't actually stop, due to a race
1995 * with SIGCONT or something like that.
1996 */
1997 continue;
1998 }
1999
2000 spin_unlock_irq(&current->sighand->siglock);
2001
2002 /*
2003 * Anything else is fatal, maybe with a core dump.
2004 */
2005 current->flags |= PF_SIGNALED;
2006 if (sig_kernel_coredump(signr)) {
2007 /*
2008 * If it was able to dump core, this kills all
2009 * other threads in the group and synchronizes with
2010 * their demise. If we lost the race with another
2011 * thread getting here, it set group_exit_code
2012 * first and our do_group_exit call below will use
2013 * that value and ignore the one we pass it.
2014 */
2015 do_coredump((long)signr, signr, regs);
2016 }
2017
2018 /*
2019 * Death signals, no core dump.
2020 */
2021 do_group_exit(signr);
2022 /* NOTREACHED */
2023 }
2024 spin_unlock_irq(&current->sighand->siglock);
2025 return signr;
2026}
2027
1da177e4
LT
2028EXPORT_SYMBOL(recalc_sigpending);
2029EXPORT_SYMBOL_GPL(dequeue_signal);
2030EXPORT_SYMBOL(flush_signals);
2031EXPORT_SYMBOL(force_sig);
2032EXPORT_SYMBOL(kill_pg);
2033EXPORT_SYMBOL(kill_proc);
2034EXPORT_SYMBOL(ptrace_notify);
2035EXPORT_SYMBOL(send_sig);
2036EXPORT_SYMBOL(send_sig_info);
2037EXPORT_SYMBOL(sigprocmask);
2038EXPORT_SYMBOL(block_all_signals);
2039EXPORT_SYMBOL(unblock_all_signals);
2040
2041
2042/*
2043 * System call entry points.
2044 */
2045
2046asmlinkage long sys_restart_syscall(void)
2047{
2048 struct restart_block *restart = &current_thread_info()->restart_block;
2049 return restart->fn(restart);
2050}
2051
2052long do_no_restart_syscall(struct restart_block *param)
2053{
2054 return -EINTR;
2055}
2056
2057/*
2058 * We don't need to get the kernel lock - this is all local to this
2059 * particular thread.. (and that's good, because this is _heavily_
2060 * used by various programs)
2061 */
2062
2063/*
2064 * This is also useful for kernel threads that want to temporarily
2065 * (or permanently) block certain signals.
2066 *
2067 * NOTE! Unlike the user-mode sys_sigprocmask(), the kernel
2068 * interface happily blocks "unblockable" signals like SIGKILL
2069 * and friends.
2070 */
2071int sigprocmask(int how, sigset_t *set, sigset_t *oldset)
2072{
2073 int error;
1da177e4
LT
2074
2075 spin_lock_irq(&current->sighand->siglock);
a26fd335
ON
2076 if (oldset)
2077 *oldset = current->blocked;
2078
1da177e4
LT
2079 error = 0;
2080 switch (how) {
2081 case SIG_BLOCK:
2082 sigorsets(&current->blocked, &current->blocked, set);
2083 break;
2084 case SIG_UNBLOCK:
2085 signandsets(&current->blocked, &current->blocked, set);
2086 break;
2087 case SIG_SETMASK:
2088 current->blocked = *set;
2089 break;
2090 default:
2091 error = -EINVAL;
2092 }
2093 recalc_sigpending();
2094 spin_unlock_irq(&current->sighand->siglock);
a26fd335 2095
1da177e4
LT
2096 return error;
2097}
2098
2099asmlinkage long
2100sys_rt_sigprocmask(int how, sigset_t __user *set, sigset_t __user *oset, size_t sigsetsize)
2101{
2102 int error = -EINVAL;
2103 sigset_t old_set, new_set;
2104
2105 /* XXX: Don't preclude handling different sized sigset_t's. */
2106 if (sigsetsize != sizeof(sigset_t))
2107 goto out;
2108
2109 if (set) {
2110 error = -EFAULT;
2111 if (copy_from_user(&new_set, set, sizeof(*set)))
2112 goto out;
2113 sigdelsetmask(&new_set, sigmask(SIGKILL)|sigmask(SIGSTOP));
2114
2115 error = sigprocmask(how, &new_set, &old_set);
2116 if (error)
2117 goto out;
2118 if (oset)
2119 goto set_old;
2120 } else if (oset) {
2121 spin_lock_irq(&current->sighand->siglock);
2122 old_set = current->blocked;
2123 spin_unlock_irq(&current->sighand->siglock);
2124
2125 set_old:
2126 error = -EFAULT;
2127 if (copy_to_user(oset, &old_set, sizeof(*oset)))
2128 goto out;
2129 }
2130 error = 0;
2131out:
2132 return error;
2133}
2134
2135long do_sigpending(void __user *set, unsigned long sigsetsize)
2136{
2137 long error = -EINVAL;
2138 sigset_t pending;
2139
2140 if (sigsetsize > sizeof(sigset_t))
2141 goto out;
2142
2143 spin_lock_irq(&current->sighand->siglock);
2144 sigorsets(&pending, &current->pending.signal,
2145 &current->signal->shared_pending.signal);
2146 spin_unlock_irq(&current->sighand->siglock);
2147
2148 /* Outside the lock because only this thread touches it. */
2149 sigandsets(&pending, &current->blocked, &pending);
2150
2151 error = -EFAULT;
2152 if (!copy_to_user(set, &pending, sigsetsize))
2153 error = 0;
2154
2155out:
2156 return error;
2157}
2158
2159asmlinkage long
2160sys_rt_sigpending(sigset_t __user *set, size_t sigsetsize)
2161{
2162 return do_sigpending(set, sigsetsize);
2163}
2164
2165#ifndef HAVE_ARCH_COPY_SIGINFO_TO_USER
2166
2167int copy_siginfo_to_user(siginfo_t __user *to, siginfo_t *from)
2168{
2169 int err;
2170
2171 if (!access_ok (VERIFY_WRITE, to, sizeof(siginfo_t)))
2172 return -EFAULT;
2173 if (from->si_code < 0)
2174 return __copy_to_user(to, from, sizeof(siginfo_t))
2175 ? -EFAULT : 0;
2176 /*
2177 * If you change siginfo_t structure, please be sure
2178 * this code is fixed accordingly.
2179 * It should never copy any pad contained in the structure
2180 * to avoid security leaks, but must copy the generic
2181 * 3 ints plus the relevant union member.
2182 */
2183 err = __put_user(from->si_signo, &to->si_signo);
2184 err |= __put_user(from->si_errno, &to->si_errno);
2185 err |= __put_user((short)from->si_code, &to->si_code);
2186 switch (from->si_code & __SI_MASK) {
2187 case __SI_KILL:
2188 err |= __put_user(from->si_pid, &to->si_pid);
2189 err |= __put_user(from->si_uid, &to->si_uid);
2190 break;
2191 case __SI_TIMER:
2192 err |= __put_user(from->si_tid, &to->si_tid);
2193 err |= __put_user(from->si_overrun, &to->si_overrun);
2194 err |= __put_user(from->si_ptr, &to->si_ptr);
2195 break;
2196 case __SI_POLL:
2197 err |= __put_user(from->si_band, &to->si_band);
2198 err |= __put_user(from->si_fd, &to->si_fd);
2199 break;
2200 case __SI_FAULT:
2201 err |= __put_user(from->si_addr, &to->si_addr);
2202#ifdef __ARCH_SI_TRAPNO
2203 err |= __put_user(from->si_trapno, &to->si_trapno);
2204#endif
2205 break;
2206 case __SI_CHLD:
2207 err |= __put_user(from->si_pid, &to->si_pid);
2208 err |= __put_user(from->si_uid, &to->si_uid);
2209 err |= __put_user(from->si_status, &to->si_status);
2210 err |= __put_user(from->si_utime, &to->si_utime);
2211 err |= __put_user(from->si_stime, &to->si_stime);
2212 break;
2213 case __SI_RT: /* This is not generated by the kernel as of now. */
2214 case __SI_MESGQ: /* But this is */
2215 err |= __put_user(from->si_pid, &to->si_pid);
2216 err |= __put_user(from->si_uid, &to->si_uid);
2217 err |= __put_user(from->si_ptr, &to->si_ptr);
2218 break;
2219 default: /* this is just in case for now ... */
2220 err |= __put_user(from->si_pid, &to->si_pid);
2221 err |= __put_user(from->si_uid, &to->si_uid);
2222 break;
2223 }
2224 return err;
2225}
2226
2227#endif
2228
2229asmlinkage long
2230sys_rt_sigtimedwait(const sigset_t __user *uthese,
2231 siginfo_t __user *uinfo,
2232 const struct timespec __user *uts,
2233 size_t sigsetsize)
2234{
2235 int ret, sig;
2236 sigset_t these;
2237 struct timespec ts;
2238 siginfo_t info;
2239 long timeout = 0;
2240
2241 /* XXX: Don't preclude handling different sized sigset_t's. */
2242 if (sigsetsize != sizeof(sigset_t))
2243 return -EINVAL;
2244
2245 if (copy_from_user(&these, uthese, sizeof(these)))
2246 return -EFAULT;
2247
2248 /*
2249 * Invert the set of allowed signals to get those we
2250 * want to block.
2251 */
2252 sigdelsetmask(&these, sigmask(SIGKILL)|sigmask(SIGSTOP));
2253 signotset(&these);
2254
2255 if (uts) {
2256 if (copy_from_user(&ts, uts, sizeof(ts)))
2257 return -EFAULT;
2258 if (ts.tv_nsec >= 1000000000L || ts.tv_nsec < 0
2259 || ts.tv_sec < 0)
2260 return -EINVAL;
2261 }
2262
2263 spin_lock_irq(&current->sighand->siglock);
2264 sig = dequeue_signal(current, &these, &info);
2265 if (!sig) {
2266 timeout = MAX_SCHEDULE_TIMEOUT;
2267 if (uts)
2268 timeout = (timespec_to_jiffies(&ts)
2269 + (ts.tv_sec || ts.tv_nsec));
2270
2271 if (timeout) {
2272 /* None ready -- temporarily unblock those we're
2273 * interested while we are sleeping in so that we'll
2274 * be awakened when they arrive. */
2275 current->real_blocked = current->blocked;
2276 sigandsets(&current->blocked, &current->blocked, &these);
2277 recalc_sigpending();
2278 spin_unlock_irq(&current->sighand->siglock);
2279
75bcc8c5 2280 timeout = schedule_timeout_interruptible(timeout);
1da177e4 2281
1da177e4
LT
2282 spin_lock_irq(&current->sighand->siglock);
2283 sig = dequeue_signal(current, &these, &info);
2284 current->blocked = current->real_blocked;
2285 siginitset(&current->real_blocked, 0);
2286 recalc_sigpending();
2287 }
2288 }
2289 spin_unlock_irq(&current->sighand->siglock);
2290
2291 if (sig) {
2292 ret = sig;
2293 if (uinfo) {
2294 if (copy_siginfo_to_user(uinfo, &info))
2295 ret = -EFAULT;
2296 }
2297 } else {
2298 ret = -EAGAIN;
2299 if (timeout)
2300 ret = -EINTR;
2301 }
2302
2303 return ret;
2304}
2305
2306asmlinkage long
2307sys_kill(int pid, int sig)
2308{
2309 struct siginfo info;
2310
2311 info.si_signo = sig;
2312 info.si_errno = 0;
2313 info.si_code = SI_USER;
2314 info.si_pid = current->tgid;
2315 info.si_uid = current->uid;
2316
2317 return kill_something_info(sig, &info, pid);
2318}
2319
6dd69f10 2320static int do_tkill(int tgid, int pid, int sig)
1da177e4 2321{
1da177e4 2322 int error;
6dd69f10 2323 struct siginfo info;
1da177e4
LT
2324 struct task_struct *p;
2325
6dd69f10 2326 error = -ESRCH;
1da177e4
LT
2327 info.si_signo = sig;
2328 info.si_errno = 0;
2329 info.si_code = SI_TKILL;
2330 info.si_pid = current->tgid;
2331 info.si_uid = current->uid;
2332
2333 read_lock(&tasklist_lock);
2334 p = find_task_by_pid(pid);
6dd69f10 2335 if (p && (tgid <= 0 || p->tgid == tgid)) {
1da177e4
LT
2336 error = check_kill_permission(sig, &info, p);
2337 /*
2338 * The null signal is a permissions and process existence
2339 * probe. No signal is actually delivered.
2340 */
2341 if (!error && sig && p->sighand) {
2342 spin_lock_irq(&p->sighand->siglock);
2343 handle_stop_signal(sig, p);
2344 error = specific_send_sig_info(sig, &info, p);
2345 spin_unlock_irq(&p->sighand->siglock);
2346 }
2347 }
2348 read_unlock(&tasklist_lock);
6dd69f10 2349
1da177e4
LT
2350 return error;
2351}
2352
6dd69f10
VL
2353/**
2354 * sys_tgkill - send signal to one specific thread
2355 * @tgid: the thread group ID of the thread
2356 * @pid: the PID of the thread
2357 * @sig: signal to be sent
2358 *
2359 * This syscall also checks the tgid and returns -ESRCH even if the PID
2360 * exists but it's not belonging to the target process anymore. This
2361 * method solves the problem of threads exiting and PIDs getting reused.
2362 */
2363asmlinkage long sys_tgkill(int tgid, int pid, int sig)
2364{
2365 /* This is only valid for single tasks */
2366 if (pid <= 0 || tgid <= 0)
2367 return -EINVAL;
2368
2369 return do_tkill(tgid, pid, sig);
2370}
2371
1da177e4
LT
2372/*
2373 * Send a signal to only one task, even if it's a CLONE_THREAD task.
2374 */
2375asmlinkage long
2376sys_tkill(int pid, int sig)
2377{
1da177e4
LT
2378 /* This is only valid for single tasks */
2379 if (pid <= 0)
2380 return -EINVAL;
2381
6dd69f10 2382 return do_tkill(0, pid, sig);
1da177e4
LT
2383}
2384
2385asmlinkage long
2386sys_rt_sigqueueinfo(int pid, int sig, siginfo_t __user *uinfo)
2387{
2388 siginfo_t info;
2389
2390 if (copy_from_user(&info, uinfo, sizeof(siginfo_t)))
2391 return -EFAULT;
2392
2393 /* Not even root can pretend to send signals from the kernel.
2394 Nor can they impersonate a kill(), which adds source info. */
2395 if (info.si_code >= 0)
2396 return -EPERM;
2397 info.si_signo = sig;
2398
2399 /* POSIX.1b doesn't mention process groups. */
2400 return kill_proc_info(sig, &info, pid);
2401}
2402
2403int
9ac95f2f 2404do_sigaction(int sig, struct k_sigaction *act, struct k_sigaction *oact)
1da177e4
LT
2405{
2406 struct k_sigaction *k;
71fabd5e 2407 sigset_t mask;
1da177e4 2408
7ed20e1a 2409 if (!valid_signal(sig) || sig < 1 || (act && sig_kernel_only(sig)))
1da177e4
LT
2410 return -EINVAL;
2411
2412 k = &current->sighand->action[sig-1];
2413
2414 spin_lock_irq(&current->sighand->siglock);
2415 if (signal_pending(current)) {
2416 /*
2417 * If there might be a fatal signal pending on multiple
2418 * threads, make sure we take it before changing the action.
2419 */
2420 spin_unlock_irq(&current->sighand->siglock);
2421 return -ERESTARTNOINTR;
2422 }
2423
2424 if (oact)
2425 *oact = *k;
2426
2427 if (act) {
9ac95f2f
ON
2428 sigdelsetmask(&act->sa.sa_mask,
2429 sigmask(SIGKILL) | sigmask(SIGSTOP));
1da177e4
LT
2430 /*
2431 * POSIX 3.3.1.3:
2432 * "Setting a signal action to SIG_IGN for a signal that is
2433 * pending shall cause the pending signal to be discarded,
2434 * whether or not it is blocked."
2435 *
2436 * "Setting a signal action to SIG_DFL for a signal that is
2437 * pending and whose default action is to ignore the signal
2438 * (for example, SIGCHLD), shall cause the pending signal to
2439 * be discarded, whether or not it is blocked"
2440 */
2441 if (act->sa.sa_handler == SIG_IGN ||
2442 (act->sa.sa_handler == SIG_DFL &&
2443 sig_kernel_ignore(sig))) {
2444 /*
2445 * This is a fairly rare case, so we only take the
2446 * tasklist_lock once we're sure we'll need it.
2447 * Now we must do this little unlock and relock
2448 * dance to maintain the lock hierarchy.
2449 */
2450 struct task_struct *t = current;
2451 spin_unlock_irq(&t->sighand->siglock);
2452 read_lock(&tasklist_lock);
2453 spin_lock_irq(&t->sighand->siglock);
2454 *k = *act;
71fabd5e
GA
2455 sigemptyset(&mask);
2456 sigaddset(&mask, sig);
2457 rm_from_queue_full(&mask, &t->signal->shared_pending);
1da177e4 2458 do {
71fabd5e 2459 rm_from_queue_full(&mask, &t->pending);
1da177e4
LT
2460 recalc_sigpending_tsk(t);
2461 t = next_thread(t);
2462 } while (t != current);
2463 spin_unlock_irq(&current->sighand->siglock);
2464 read_unlock(&tasklist_lock);
2465 return 0;
2466 }
2467
2468 *k = *act;
1da177e4
LT
2469 }
2470
2471 spin_unlock_irq(&current->sighand->siglock);
2472 return 0;
2473}
2474
2475int
2476do_sigaltstack (const stack_t __user *uss, stack_t __user *uoss, unsigned long sp)
2477{
2478 stack_t oss;
2479 int error;
2480
2481 if (uoss) {
2482 oss.ss_sp = (void __user *) current->sas_ss_sp;
2483 oss.ss_size = current->sas_ss_size;
2484 oss.ss_flags = sas_ss_flags(sp);
2485 }
2486
2487 if (uss) {
2488 void __user *ss_sp;
2489 size_t ss_size;
2490 int ss_flags;
2491
2492 error = -EFAULT;
2493 if (!access_ok(VERIFY_READ, uss, sizeof(*uss))
2494 || __get_user(ss_sp, &uss->ss_sp)
2495 || __get_user(ss_flags, &uss->ss_flags)
2496 || __get_user(ss_size, &uss->ss_size))
2497 goto out;
2498
2499 error = -EPERM;
2500 if (on_sig_stack(sp))
2501 goto out;
2502
2503 error = -EINVAL;
2504 /*
2505 *
2506 * Note - this code used to test ss_flags incorrectly
2507 * old code may have been written using ss_flags==0
2508 * to mean ss_flags==SS_ONSTACK (as this was the only
2509 * way that worked) - this fix preserves that older
2510 * mechanism
2511 */
2512 if (ss_flags != SS_DISABLE && ss_flags != SS_ONSTACK && ss_flags != 0)
2513 goto out;
2514
2515 if (ss_flags == SS_DISABLE) {
2516 ss_size = 0;
2517 ss_sp = NULL;
2518 } else {
2519 error = -ENOMEM;
2520 if (ss_size < MINSIGSTKSZ)
2521 goto out;
2522 }
2523
2524 current->sas_ss_sp = (unsigned long) ss_sp;
2525 current->sas_ss_size = ss_size;
2526 }
2527
2528 if (uoss) {
2529 error = -EFAULT;
2530 if (copy_to_user(uoss, &oss, sizeof(oss)))
2531 goto out;
2532 }
2533
2534 error = 0;
2535out:
2536 return error;
2537}
2538
2539#ifdef __ARCH_WANT_SYS_SIGPENDING
2540
2541asmlinkage long
2542sys_sigpending(old_sigset_t __user *set)
2543{
2544 return do_sigpending(set, sizeof(*set));
2545}
2546
2547#endif
2548
2549#ifdef __ARCH_WANT_SYS_SIGPROCMASK
2550/* Some platforms have their own version with special arguments others
2551 support only sys_rt_sigprocmask. */
2552
2553asmlinkage long
2554sys_sigprocmask(int how, old_sigset_t __user *set, old_sigset_t __user *oset)
2555{
2556 int error;
2557 old_sigset_t old_set, new_set;
2558
2559 if (set) {
2560 error = -EFAULT;
2561 if (copy_from_user(&new_set, set, sizeof(*set)))
2562 goto out;
2563 new_set &= ~(sigmask(SIGKILL) | sigmask(SIGSTOP));
2564
2565 spin_lock_irq(&current->sighand->siglock);
2566 old_set = current->blocked.sig[0];
2567
2568 error = 0;
2569 switch (how) {
2570 default:
2571 error = -EINVAL;
2572 break;
2573 case SIG_BLOCK:
2574 sigaddsetmask(&current->blocked, new_set);
2575 break;
2576 case SIG_UNBLOCK:
2577 sigdelsetmask(&current->blocked, new_set);
2578 break;
2579 case SIG_SETMASK:
2580 current->blocked.sig[0] = new_set;
2581 break;
2582 }
2583
2584 recalc_sigpending();
2585 spin_unlock_irq(&current->sighand->siglock);
2586 if (error)
2587 goto out;
2588 if (oset)
2589 goto set_old;
2590 } else if (oset) {
2591 old_set = current->blocked.sig[0];
2592 set_old:
2593 error = -EFAULT;
2594 if (copy_to_user(oset, &old_set, sizeof(*oset)))
2595 goto out;
2596 }
2597 error = 0;
2598out:
2599 return error;
2600}
2601#endif /* __ARCH_WANT_SYS_SIGPROCMASK */
2602
2603#ifdef __ARCH_WANT_SYS_RT_SIGACTION
2604asmlinkage long
2605sys_rt_sigaction(int sig,
2606 const struct sigaction __user *act,
2607 struct sigaction __user *oact,
2608 size_t sigsetsize)
2609{
2610 struct k_sigaction new_sa, old_sa;
2611 int ret = -EINVAL;
2612
2613 /* XXX: Don't preclude handling different sized sigset_t's. */
2614 if (sigsetsize != sizeof(sigset_t))
2615 goto out;
2616
2617 if (act) {
2618 if (copy_from_user(&new_sa.sa, act, sizeof(new_sa.sa)))
2619 return -EFAULT;
2620 }
2621
2622 ret = do_sigaction(sig, act ? &new_sa : NULL, oact ? &old_sa : NULL);
2623
2624 if (!ret && oact) {
2625 if (copy_to_user(oact, &old_sa.sa, sizeof(old_sa.sa)))
2626 return -EFAULT;
2627 }
2628out:
2629 return ret;
2630}
2631#endif /* __ARCH_WANT_SYS_RT_SIGACTION */
2632
2633#ifdef __ARCH_WANT_SYS_SGETMASK
2634
2635/*
2636 * For backwards compatibility. Functionality superseded by sigprocmask.
2637 */
2638asmlinkage long
2639sys_sgetmask(void)
2640{
2641 /* SMP safe */
2642 return current->blocked.sig[0];
2643}
2644
2645asmlinkage long
2646sys_ssetmask(int newmask)
2647{
2648 int old;
2649
2650 spin_lock_irq(&current->sighand->siglock);
2651 old = current->blocked.sig[0];
2652
2653 siginitset(&current->blocked, newmask & ~(sigmask(SIGKILL)|
2654 sigmask(SIGSTOP)));
2655 recalc_sigpending();
2656 spin_unlock_irq(&current->sighand->siglock);
2657
2658 return old;
2659}
2660#endif /* __ARCH_WANT_SGETMASK */
2661
2662#ifdef __ARCH_WANT_SYS_SIGNAL
2663/*
2664 * For backwards compatibility. Functionality superseded by sigaction.
2665 */
2666asmlinkage unsigned long
2667sys_signal(int sig, __sighandler_t handler)
2668{
2669 struct k_sigaction new_sa, old_sa;
2670 int ret;
2671
2672 new_sa.sa.sa_handler = handler;
2673 new_sa.sa.sa_flags = SA_ONESHOT | SA_NOMASK;
c70d3d70 2674 sigemptyset(&new_sa.sa.sa_mask);
1da177e4
LT
2675
2676 ret = do_sigaction(sig, &new_sa, &old_sa);
2677
2678 return ret ? ret : (unsigned long)old_sa.sa.sa_handler;
2679}
2680#endif /* __ARCH_WANT_SYS_SIGNAL */
2681
2682#ifdef __ARCH_WANT_SYS_PAUSE
2683
2684asmlinkage long
2685sys_pause(void)
2686{
2687 current->state = TASK_INTERRUPTIBLE;
2688 schedule();
2689 return -ERESTARTNOHAND;
2690}
2691
2692#endif
2693
150256d8
DW
2694#ifdef __ARCH_WANT_SYS_RT_SIGSUSPEND
2695asmlinkage long sys_rt_sigsuspend(sigset_t __user *unewset, size_t sigsetsize)
2696{
2697 sigset_t newset;
2698
2699 /* XXX: Don't preclude handling different sized sigset_t's. */
2700 if (sigsetsize != sizeof(sigset_t))
2701 return -EINVAL;
2702
2703 if (copy_from_user(&newset, unewset, sizeof(newset)))
2704 return -EFAULT;
2705 sigdelsetmask(&newset, sigmask(SIGKILL)|sigmask(SIGSTOP));
2706
2707 spin_lock_irq(&current->sighand->siglock);
2708 current->saved_sigmask = current->blocked;
2709 current->blocked = newset;
2710 recalc_sigpending();
2711 spin_unlock_irq(&current->sighand->siglock);
2712
2713 current->state = TASK_INTERRUPTIBLE;
2714 schedule();
2715 set_thread_flag(TIF_RESTORE_SIGMASK);
2716 return -ERESTARTNOHAND;
2717}
2718#endif /* __ARCH_WANT_SYS_RT_SIGSUSPEND */
2719
1da177e4
LT
2720void __init signals_init(void)
2721{
2722 sigqueue_cachep =
2723 kmem_cache_create("sigqueue",
2724 sizeof(struct sigqueue),
2725 __alignof__(struct sigqueue),
2726 SLAB_PANIC, NULL, NULL);
2727}