]> bbs.cooldavid.org Git - net-next-2.6.git/blame - kernel/signal.c
tracepoints: use modules notifiers
[net-next-2.6.git] / kernel / signal.c
CommitLineData
1da177e4
LT
1/*
2 * linux/kernel/signal.c
3 *
4 * Copyright (C) 1991, 1992 Linus Torvalds
5 *
6 * 1997-11-02 Modified for POSIX.1b signals by Richard Henderson
7 *
8 * 2003-06-02 Jim Houston - Concurrent Computer Corp.
9 * Changes to use preallocated sigqueue structures
10 * to allow signals to be sent reliably.
11 */
12
1da177e4
LT
13#include <linux/slab.h>
14#include <linux/module.h>
1da177e4
LT
15#include <linux/init.h>
16#include <linux/sched.h>
17#include <linux/fs.h>
18#include <linux/tty.h>
19#include <linux/binfmts.h>
20#include <linux/security.h>
21#include <linux/syscalls.h>
22#include <linux/ptrace.h>
7ed20e1a 23#include <linux/signal.h>
fba2afaa 24#include <linux/signalfd.h>
35de254d 25#include <linux/tracehook.h>
c59ede7b 26#include <linux/capability.h>
7dfb7103 27#include <linux/freezer.h>
84d73786
SB
28#include <linux/pid_namespace.h>
29#include <linux/nsproxy.h>
0a16b607 30#include <trace/sched.h>
84d73786 31
1da177e4
LT
32#include <asm/param.h>
33#include <asm/uaccess.h>
34#include <asm/unistd.h>
35#include <asm/siginfo.h>
e1396065 36#include "audit.h" /* audit_signal_info() */
1da177e4
LT
37
38/*
39 * SLAB caches for signal bits.
40 */
41
e18b890b 42static struct kmem_cache *sigqueue_cachep;
1da177e4 43
35de254d 44static void __user *sig_handler(struct task_struct *t, int sig)
93585eea 45{
35de254d
RM
46 return t->sighand->action[sig - 1].sa.sa_handler;
47}
93585eea 48
35de254d
RM
49static int sig_handler_ignored(void __user *handler, int sig)
50{
93585eea 51 /* Is it explicitly or implicitly ignored? */
93585eea
PE
52 return handler == SIG_IGN ||
53 (handler == SIG_DFL && sig_kernel_ignore(sig));
54}
1da177e4
LT
55
56static int sig_ignored(struct task_struct *t, int sig)
57{
35de254d 58 void __user *handler;
1da177e4
LT
59
60 /*
61 * Blocked signals are never ignored, since the
62 * signal handler may change by the time it is
63 * unblocked.
64 */
325d22df 65 if (sigismember(&t->blocked, sig) || sigismember(&t->real_blocked, sig))
1da177e4
LT
66 return 0;
67
35de254d
RM
68 handler = sig_handler(t, sig);
69 if (!sig_handler_ignored(handler, sig))
70 return 0;
71
72 /*
73 * Tracers may want to know about even ignored signals.
74 */
75 return !tracehook_consider_ignored_signal(t, sig, handler);
1da177e4
LT
76}
77
78/*
79 * Re-calculate pending state from the set of locally pending
80 * signals, globally pending signals, and blocked signals.
81 */
82static inline int has_pending_signals(sigset_t *signal, sigset_t *blocked)
83{
84 unsigned long ready;
85 long i;
86
87 switch (_NSIG_WORDS) {
88 default:
89 for (i = _NSIG_WORDS, ready = 0; --i >= 0 ;)
90 ready |= signal->sig[i] &~ blocked->sig[i];
91 break;
92
93 case 4: ready = signal->sig[3] &~ blocked->sig[3];
94 ready |= signal->sig[2] &~ blocked->sig[2];
95 ready |= signal->sig[1] &~ blocked->sig[1];
96 ready |= signal->sig[0] &~ blocked->sig[0];
97 break;
98
99 case 2: ready = signal->sig[1] &~ blocked->sig[1];
100 ready |= signal->sig[0] &~ blocked->sig[0];
101 break;
102
103 case 1: ready = signal->sig[0] &~ blocked->sig[0];
104 }
105 return ready != 0;
106}
107
108#define PENDING(p,b) has_pending_signals(&(p)->signal, (b))
109
7bb44ade 110static int recalc_sigpending_tsk(struct task_struct *t)
1da177e4
LT
111{
112 if (t->signal->group_stop_count > 0 ||
113 PENDING(&t->pending, &t->blocked) ||
7bb44ade 114 PENDING(&t->signal->shared_pending, &t->blocked)) {
1da177e4 115 set_tsk_thread_flag(t, TIF_SIGPENDING);
7bb44ade
RM
116 return 1;
117 }
b74d0deb
RM
118 /*
119 * We must never clear the flag in another thread, or in current
120 * when it's possible the current syscall is returning -ERESTART*.
121 * So we don't clear it here, and only callers who know they should do.
122 */
7bb44ade
RM
123 return 0;
124}
125
126/*
127 * After recalculating TIF_SIGPENDING, we need to make sure the task wakes up.
128 * This is superfluous when called on current, the wakeup is a harmless no-op.
129 */
130void recalc_sigpending_and_wake(struct task_struct *t)
131{
132 if (recalc_sigpending_tsk(t))
133 signal_wake_up(t, 0);
1da177e4
LT
134}
135
136void recalc_sigpending(void)
137{
b787f7ba
RM
138 if (unlikely(tracehook_force_sigpending()))
139 set_thread_flag(TIF_SIGPENDING);
140 else if (!recalc_sigpending_tsk(current) && !freezing(current))
b74d0deb
RM
141 clear_thread_flag(TIF_SIGPENDING);
142
1da177e4
LT
143}
144
145/* Given the mask, find the first available signal that should be serviced. */
146
fba2afaa 147int next_signal(struct sigpending *pending, sigset_t *mask)
1da177e4
LT
148{
149 unsigned long i, *s, *m, x;
150 int sig = 0;
151
152 s = pending->signal.sig;
153 m = mask->sig;
154 switch (_NSIG_WORDS) {
155 default:
156 for (i = 0; i < _NSIG_WORDS; ++i, ++s, ++m)
157 if ((x = *s &~ *m) != 0) {
158 sig = ffz(~x) + i*_NSIG_BPW + 1;
159 break;
160 }
161 break;
162
163 case 2: if ((x = s[0] &~ m[0]) != 0)
164 sig = 1;
165 else if ((x = s[1] &~ m[1]) != 0)
166 sig = _NSIG_BPW + 1;
167 else
168 break;
169 sig += ffz(~x);
170 break;
171
172 case 1: if ((x = *s &~ *m) != 0)
173 sig = ffz(~x) + 1;
174 break;
175 }
176
177 return sig;
178}
179
dd0fc66f 180static struct sigqueue *__sigqueue_alloc(struct task_struct *t, gfp_t flags,
1da177e4
LT
181 int override_rlimit)
182{
183 struct sigqueue *q = NULL;
10b1fbdb 184 struct user_struct *user;
1da177e4 185
10b1fbdb
LT
186 /*
187 * In order to avoid problems with "switch_user()", we want to make
188 * sure that the compiler doesn't re-load "t->user"
189 */
190 user = t->user;
191 barrier();
192 atomic_inc(&user->sigpending);
1da177e4 193 if (override_rlimit ||
10b1fbdb 194 atomic_read(&user->sigpending) <=
1da177e4
LT
195 t->signal->rlim[RLIMIT_SIGPENDING].rlim_cur)
196 q = kmem_cache_alloc(sigqueue_cachep, flags);
197 if (unlikely(q == NULL)) {
10b1fbdb 198 atomic_dec(&user->sigpending);
1da177e4
LT
199 } else {
200 INIT_LIST_HEAD(&q->list);
201 q->flags = 0;
10b1fbdb 202 q->user = get_uid(user);
1da177e4
LT
203 }
204 return(q);
205}
206
514a01b8 207static void __sigqueue_free(struct sigqueue *q)
1da177e4
LT
208{
209 if (q->flags & SIGQUEUE_PREALLOC)
210 return;
211 atomic_dec(&q->user->sigpending);
212 free_uid(q->user);
213 kmem_cache_free(sigqueue_cachep, q);
214}
215
6a14c5c9 216void flush_sigqueue(struct sigpending *queue)
1da177e4
LT
217{
218 struct sigqueue *q;
219
220 sigemptyset(&queue->signal);
221 while (!list_empty(&queue->list)) {
222 q = list_entry(queue->list.next, struct sigqueue , list);
223 list_del_init(&q->list);
224 __sigqueue_free(q);
225 }
226}
227
228/*
229 * Flush all pending signals for a task.
230 */
c81addc9 231void flush_signals(struct task_struct *t)
1da177e4
LT
232{
233 unsigned long flags;
234
235 spin_lock_irqsave(&t->sighand->siglock, flags);
f5264481 236 clear_tsk_thread_flag(t, TIF_SIGPENDING);
1da177e4
LT
237 flush_sigqueue(&t->pending);
238 flush_sigqueue(&t->signal->shared_pending);
239 spin_unlock_irqrestore(&t->sighand->siglock, flags);
240}
241
cbaffba1
ON
242static void __flush_itimer_signals(struct sigpending *pending)
243{
244 sigset_t signal, retain;
245 struct sigqueue *q, *n;
246
247 signal = pending->signal;
248 sigemptyset(&retain);
249
250 list_for_each_entry_safe(q, n, &pending->list, list) {
251 int sig = q->info.si_signo;
252
253 if (likely(q->info.si_code != SI_TIMER)) {
254 sigaddset(&retain, sig);
255 } else {
256 sigdelset(&signal, sig);
257 list_del_init(&q->list);
258 __sigqueue_free(q);
259 }
260 }
261
262 sigorsets(&pending->signal, &signal, &retain);
263}
264
265void flush_itimer_signals(void)
266{
267 struct task_struct *tsk = current;
268 unsigned long flags;
269
270 spin_lock_irqsave(&tsk->sighand->siglock, flags);
271 __flush_itimer_signals(&tsk->pending);
272 __flush_itimer_signals(&tsk->signal->shared_pending);
273 spin_unlock_irqrestore(&tsk->sighand->siglock, flags);
274}
275
10ab825b
ON
276void ignore_signals(struct task_struct *t)
277{
278 int i;
279
280 for (i = 0; i < _NSIG; ++i)
281 t->sighand->action[i].sa.sa_handler = SIG_IGN;
282
283 flush_signals(t);
284}
285
1da177e4
LT
286/*
287 * Flush all handlers for a task.
288 */
289
290void
291flush_signal_handlers(struct task_struct *t, int force_default)
292{
293 int i;
294 struct k_sigaction *ka = &t->sighand->action[0];
295 for (i = _NSIG ; i != 0 ; i--) {
296 if (force_default || ka->sa.sa_handler != SIG_IGN)
297 ka->sa.sa_handler = SIG_DFL;
298 ka->sa.sa_flags = 0;
299 sigemptyset(&ka->sa.sa_mask);
300 ka++;
301 }
302}
303
abd4f750
MAS
304int unhandled_signal(struct task_struct *tsk, int sig)
305{
445a91d2 306 void __user *handler = tsk->sighand->action[sig-1].sa.sa_handler;
b460cbc5 307 if (is_global_init(tsk))
abd4f750 308 return 1;
445a91d2 309 if (handler != SIG_IGN && handler != SIG_DFL)
abd4f750 310 return 0;
445a91d2 311 return !tracehook_consider_fatal_signal(tsk, sig, handler);
abd4f750
MAS
312}
313
1da177e4
LT
314
315/* Notify the system that a driver wants to block all signals for this
316 * process, and wants to be notified if any signals at all were to be
317 * sent/acted upon. If the notifier routine returns non-zero, then the
318 * signal will be acted upon after all. If the notifier routine returns 0,
319 * then then signal will be blocked. Only one block per process is
320 * allowed. priv is a pointer to private data that the notifier routine
321 * can use to determine if the signal should be blocked or not. */
322
323void
324block_all_signals(int (*notifier)(void *priv), void *priv, sigset_t *mask)
325{
326 unsigned long flags;
327
328 spin_lock_irqsave(&current->sighand->siglock, flags);
329 current->notifier_mask = mask;
330 current->notifier_data = priv;
331 current->notifier = notifier;
332 spin_unlock_irqrestore(&current->sighand->siglock, flags);
333}
334
335/* Notify the system that blocking has ended. */
336
337void
338unblock_all_signals(void)
339{
340 unsigned long flags;
341
342 spin_lock_irqsave(&current->sighand->siglock, flags);
343 current->notifier = NULL;
344 current->notifier_data = NULL;
345 recalc_sigpending();
346 spin_unlock_irqrestore(&current->sighand->siglock, flags);
347}
348
100360f0 349static void collect_signal(int sig, struct sigpending *list, siginfo_t *info)
1da177e4
LT
350{
351 struct sigqueue *q, *first = NULL;
1da177e4 352
1da177e4
LT
353 /*
354 * Collect the siginfo appropriate to this signal. Check if
355 * there is another siginfo for the same signal.
356 */
357 list_for_each_entry(q, &list->list, list) {
358 if (q->info.si_signo == sig) {
d4434207
ON
359 if (first)
360 goto still_pending;
1da177e4
LT
361 first = q;
362 }
363 }
d4434207
ON
364
365 sigdelset(&list->signal, sig);
366
1da177e4 367 if (first) {
d4434207 368still_pending:
1da177e4
LT
369 list_del_init(&first->list);
370 copy_siginfo(info, &first->info);
371 __sigqueue_free(first);
1da177e4 372 } else {
1da177e4
LT
373 /* Ok, it wasn't in the queue. This must be
374 a fast-pathed signal or we must have been
375 out of queue space. So zero out the info.
376 */
1da177e4
LT
377 info->si_signo = sig;
378 info->si_errno = 0;
379 info->si_code = 0;
380 info->si_pid = 0;
381 info->si_uid = 0;
382 }
1da177e4
LT
383}
384
385static int __dequeue_signal(struct sigpending *pending, sigset_t *mask,
386 siginfo_t *info)
387{
27d91e07 388 int sig = next_signal(pending, mask);
1da177e4 389
1da177e4
LT
390 if (sig) {
391 if (current->notifier) {
392 if (sigismember(current->notifier_mask, sig)) {
393 if (!(current->notifier)(current->notifier_data)) {
394 clear_thread_flag(TIF_SIGPENDING);
395 return 0;
396 }
397 }
398 }
399
100360f0 400 collect_signal(sig, pending, info);
1da177e4 401 }
1da177e4
LT
402
403 return sig;
404}
405
406/*
407 * Dequeue a signal and return the element to the caller, which is
408 * expected to free it.
409 *
410 * All callers have to hold the siglock.
411 */
412int dequeue_signal(struct task_struct *tsk, sigset_t *mask, siginfo_t *info)
413{
c5363d03 414 int signr;
caec4e8d
BH
415
416 /* We only dequeue private signals from ourselves, we don't let
417 * signalfd steal them
418 */
b8fceee1 419 signr = __dequeue_signal(&tsk->pending, mask, info);
8bfd9a7a 420 if (!signr) {
1da177e4
LT
421 signr = __dequeue_signal(&tsk->signal->shared_pending,
422 mask, info);
8bfd9a7a
TG
423 /*
424 * itimer signal ?
425 *
426 * itimers are process shared and we restart periodic
427 * itimers in the signal delivery path to prevent DoS
428 * attacks in the high resolution timer case. This is
429 * compliant with the old way of self restarting
430 * itimers, as the SIGALRM is a legacy signal and only
431 * queued once. Changing the restart behaviour to
432 * restart the timer in the signal dequeue path is
433 * reducing the timer noise on heavy loaded !highres
434 * systems too.
435 */
436 if (unlikely(signr == SIGALRM)) {
437 struct hrtimer *tmr = &tsk->signal->real_timer;
438
439 if (!hrtimer_is_queued(tmr) &&
440 tsk->signal->it_real_incr.tv64 != 0) {
441 hrtimer_forward(tmr, tmr->base->get_time(),
442 tsk->signal->it_real_incr);
443 hrtimer_restart(tmr);
444 }
445 }
446 }
c5363d03 447
b8fceee1 448 recalc_sigpending();
c5363d03
PE
449 if (!signr)
450 return 0;
451
452 if (unlikely(sig_kernel_stop(signr))) {
8bfd9a7a
TG
453 /*
454 * Set a marker that we have dequeued a stop signal. Our
455 * caller might release the siglock and then the pending
456 * stop signal it is about to process is no longer in the
457 * pending bitmasks, but must still be cleared by a SIGCONT
458 * (and overruled by a SIGKILL). So those cases clear this
459 * shared flag after we've set it. Note that this flag may
460 * remain set after the signal we return is ignored or
461 * handled. That doesn't matter because its only purpose
462 * is to alert stop-signal processing code when another
463 * processor has come along and cleared the flag.
464 */
92413d77 465 tsk->signal->flags |= SIGNAL_STOP_DEQUEUED;
8bfd9a7a 466 }
c5363d03 467 if ((info->si_code & __SI_MASK) == __SI_TIMER && info->si_sys_private) {
1da177e4
LT
468 /*
469 * Release the siglock to ensure proper locking order
470 * of timer locks outside of siglocks. Note, we leave
471 * irqs disabled here, since the posix-timers code is
472 * about to disable them again anyway.
473 */
474 spin_unlock(&tsk->sighand->siglock);
475 do_schedule_next_timer(info);
476 spin_lock(&tsk->sighand->siglock);
477 }
478 return signr;
479}
480
481/*
482 * Tell a process that it has a new active signal..
483 *
484 * NOTE! we rely on the previous spin_lock to
485 * lock interrupts for us! We can only be called with
486 * "siglock" held, and the local interrupt must
487 * have been disabled when that got acquired!
488 *
489 * No need to set need_resched since signal event passing
490 * goes through ->blocked
491 */
492void signal_wake_up(struct task_struct *t, int resume)
493{
494 unsigned int mask;
495
496 set_tsk_thread_flag(t, TIF_SIGPENDING);
497
498 /*
f021a3c2
MW
499 * For SIGKILL, we want to wake it up in the stopped/traced/killable
500 * case. We don't check t->state here because there is a race with it
1da177e4
LT
501 * executing another processor and just now entering stopped state.
502 * By using wake_up_state, we ensure the process will wake up and
503 * handle its death signal.
504 */
505 mask = TASK_INTERRUPTIBLE;
506 if (resume)
f021a3c2 507 mask |= TASK_WAKEKILL;
1da177e4
LT
508 if (!wake_up_state(t, mask))
509 kick_process(t);
510}
511
71fabd5e
GA
512/*
513 * Remove signals in mask from the pending set and queue.
514 * Returns 1 if any signals were found.
515 *
516 * All callers must be holding the siglock.
517 *
518 * This version takes a sigset mask and looks at all signals,
519 * not just those in the first mask word.
520 */
521static int rm_from_queue_full(sigset_t *mask, struct sigpending *s)
522{
523 struct sigqueue *q, *n;
524 sigset_t m;
525
526 sigandsets(&m, mask, &s->signal);
527 if (sigisemptyset(&m))
528 return 0;
529
530 signandsets(&s->signal, &s->signal, mask);
531 list_for_each_entry_safe(q, n, &s->list, list) {
532 if (sigismember(mask, q->info.si_signo)) {
533 list_del_init(&q->list);
534 __sigqueue_free(q);
535 }
536 }
537 return 1;
538}
1da177e4
LT
539/*
540 * Remove signals in mask from the pending set and queue.
541 * Returns 1 if any signals were found.
542 *
543 * All callers must be holding the siglock.
544 */
545static int rm_from_queue(unsigned long mask, struct sigpending *s)
546{
547 struct sigqueue *q, *n;
548
549 if (!sigtestsetmask(&s->signal, mask))
550 return 0;
551
552 sigdelsetmask(&s->signal, mask);
553 list_for_each_entry_safe(q, n, &s->list, list) {
554 if (q->info.si_signo < SIGRTMIN &&
555 (mask & sigmask(q->info.si_signo))) {
556 list_del_init(&q->list);
557 __sigqueue_free(q);
558 }
559 }
560 return 1;
561}
562
563/*
564 * Bad permissions for sending the signal
565 */
566static int check_kill_permission(int sig, struct siginfo *info,
567 struct task_struct *t)
568{
2e2ba22e 569 struct pid *sid;
3b5e9e53
ON
570 int error;
571
7ed20e1a 572 if (!valid_signal(sig))
3b5e9e53
ON
573 return -EINVAL;
574
575 if (info != SEND_SIG_NOINFO && (is_si_special(info) || SI_FROMKERNEL(info)))
576 return 0;
e54dc243 577
3b5e9e53
ON
578 error = audit_signal_info(sig, t); /* Let audit system see the signal */
579 if (error)
1da177e4 580 return error;
3b5e9e53 581
2e2ba22e
ON
582 if ((current->euid ^ t->suid) && (current->euid ^ t->uid) &&
583 (current->uid ^ t->suid) && (current->uid ^ t->uid) &&
584 !capable(CAP_KILL)) {
585 switch (sig) {
586 case SIGCONT:
2e2ba22e 587 sid = task_session(t);
2e2ba22e
ON
588 /*
589 * We don't return the error if sid == NULL. The
590 * task was unhashed, the caller must notice this.
591 */
592 if (!sid || sid == task_session(current))
593 break;
594 default:
595 return -EPERM;
596 }
597 }
c2f0c7c3 598
e54dc243 599 return security_task_kill(t, info, sig, 0);
1da177e4
LT
600}
601
1da177e4 602/*
7e695a5e
ON
603 * Handle magic process-wide effects of stop/continue signals. Unlike
604 * the signal actions, these happen immediately at signal-generation
1da177e4
LT
605 * time regardless of blocking, ignoring, or handling. This does the
606 * actual continuing for SIGCONT, but not the actual stopping for stop
7e695a5e
ON
607 * signals. The process stop is done as a signal action for SIG_DFL.
608 *
609 * Returns true if the signal should be actually delivered, otherwise
610 * it should be dropped.
1da177e4 611 */
7e695a5e 612static int prepare_signal(int sig, struct task_struct *p)
1da177e4 613{
ad16a460 614 struct signal_struct *signal = p->signal;
1da177e4
LT
615 struct task_struct *t;
616
7e695a5e 617 if (unlikely(signal->flags & SIGNAL_GROUP_EXIT)) {
1da177e4 618 /*
7e695a5e 619 * The process is in the middle of dying, nothing to do.
1da177e4 620 */
7e695a5e 621 } else if (sig_kernel_stop(sig)) {
1da177e4
LT
622 /*
623 * This is a stop signal. Remove SIGCONT from all queues.
624 */
ad16a460 625 rm_from_queue(sigmask(SIGCONT), &signal->shared_pending);
1da177e4
LT
626 t = p;
627 do {
628 rm_from_queue(sigmask(SIGCONT), &t->pending);
ad16a460 629 } while_each_thread(p, t);
1da177e4 630 } else if (sig == SIGCONT) {
fc321d2e 631 unsigned int why;
1da177e4
LT
632 /*
633 * Remove all stop signals from all queues,
634 * and wake all threads.
635 */
ad16a460 636 rm_from_queue(SIG_KERNEL_STOP_MASK, &signal->shared_pending);
1da177e4
LT
637 t = p;
638 do {
639 unsigned int state;
640 rm_from_queue(SIG_KERNEL_STOP_MASK, &t->pending);
1da177e4
LT
641 /*
642 * If there is a handler for SIGCONT, we must make
643 * sure that no thread returns to user mode before
644 * we post the signal, in case it was the only
645 * thread eligible to run the signal handler--then
646 * it must not do anything between resuming and
647 * running the handler. With the TIF_SIGPENDING
648 * flag set, the thread will pause and acquire the
649 * siglock that we hold now and until we've queued
fc321d2e 650 * the pending signal.
1da177e4
LT
651 *
652 * Wake up the stopped thread _after_ setting
653 * TIF_SIGPENDING
654 */
f021a3c2 655 state = __TASK_STOPPED;
1da177e4
LT
656 if (sig_user_defined(t, SIGCONT) && !sigismember(&t->blocked, SIGCONT)) {
657 set_tsk_thread_flag(t, TIF_SIGPENDING);
658 state |= TASK_INTERRUPTIBLE;
659 }
660 wake_up_state(t, state);
ad16a460 661 } while_each_thread(p, t);
1da177e4 662
fc321d2e
ON
663 /*
664 * Notify the parent with CLD_CONTINUED if we were stopped.
665 *
666 * If we were in the middle of a group stop, we pretend it
667 * was already finished, and then continued. Since SIGCHLD
668 * doesn't queue we report only CLD_STOPPED, as if the next
669 * CLD_CONTINUED was dropped.
670 */
671 why = 0;
ad16a460 672 if (signal->flags & SIGNAL_STOP_STOPPED)
fc321d2e 673 why |= SIGNAL_CLD_CONTINUED;
ad16a460 674 else if (signal->group_stop_count)
fc321d2e
ON
675 why |= SIGNAL_CLD_STOPPED;
676
677 if (why) {
021e1ae3
ON
678 /*
679 * The first thread which returns from finish_stop()
680 * will take ->siglock, notice SIGNAL_CLD_MASK, and
681 * notify its parent. See get_signal_to_deliver().
682 */
ad16a460
ON
683 signal->flags = why | SIGNAL_STOP_CONTINUED;
684 signal->group_stop_count = 0;
685 signal->group_exit_code = 0;
1da177e4
LT
686 } else {
687 /*
688 * We are not stopped, but there could be a stop
689 * signal in the middle of being processed after
690 * being removed from the queue. Clear that too.
691 */
ad16a460 692 signal->flags &= ~SIGNAL_STOP_DEQUEUED;
1da177e4 693 }
1da177e4 694 }
7e695a5e
ON
695
696 return !sig_ignored(p, sig);
1da177e4
LT
697}
698
71f11dc0
ON
699/*
700 * Test if P wants to take SIG. After we've checked all threads with this,
701 * it's equivalent to finding no threads not blocking SIG. Any threads not
702 * blocking SIG were ruled out because they are not running and already
703 * have pending signals. Such threads will dequeue from the shared queue
704 * as soon as they're available, so putting the signal on the shared queue
705 * will be equivalent to sending it to one such thread.
706 */
707static inline int wants_signal(int sig, struct task_struct *p)
708{
709 if (sigismember(&p->blocked, sig))
710 return 0;
711 if (p->flags & PF_EXITING)
712 return 0;
713 if (sig == SIGKILL)
714 return 1;
715 if (task_is_stopped_or_traced(p))
716 return 0;
717 return task_curr(p) || !signal_pending(p);
718}
719
5fcd835b 720static void complete_signal(int sig, struct task_struct *p, int group)
71f11dc0
ON
721{
722 struct signal_struct *signal = p->signal;
723 struct task_struct *t;
724
725 /*
726 * Now find a thread we can wake up to take the signal off the queue.
727 *
728 * If the main thread wants the signal, it gets first crack.
729 * Probably the least surprising to the average bear.
730 */
731 if (wants_signal(sig, p))
732 t = p;
5fcd835b 733 else if (!group || thread_group_empty(p))
71f11dc0
ON
734 /*
735 * There is just one thread and it does not need to be woken.
736 * It will dequeue unblocked signals before it runs again.
737 */
738 return;
739 else {
740 /*
741 * Otherwise try to find a suitable thread.
742 */
743 t = signal->curr_target;
744 while (!wants_signal(sig, t)) {
745 t = next_thread(t);
746 if (t == signal->curr_target)
747 /*
748 * No thread needs to be woken.
749 * Any eligible threads will see
750 * the signal in the queue soon.
751 */
752 return;
753 }
754 signal->curr_target = t;
755 }
756
757 /*
758 * Found a killable thread. If the signal will be fatal,
759 * then start taking the whole group down immediately.
760 */
fae5fa44
ON
761 if (sig_fatal(p, sig) &&
762 !(signal->flags & (SIGNAL_UNKILLABLE | SIGNAL_GROUP_EXIT)) &&
71f11dc0 763 !sigismember(&t->real_blocked, sig) &&
445a91d2
RM
764 (sig == SIGKILL ||
765 !tracehook_consider_fatal_signal(t, sig, SIG_DFL))) {
71f11dc0
ON
766 /*
767 * This signal will be fatal to the whole group.
768 */
769 if (!sig_kernel_coredump(sig)) {
770 /*
771 * Start a group exit and wake everybody up.
772 * This way we don't have other threads
773 * running and doing things after a slower
774 * thread has the fatal signal pending.
775 */
776 signal->flags = SIGNAL_GROUP_EXIT;
777 signal->group_exit_code = sig;
778 signal->group_stop_count = 0;
779 t = p;
780 do {
781 sigaddset(&t->pending.signal, SIGKILL);
782 signal_wake_up(t, 1);
783 } while_each_thread(p, t);
784 return;
785 }
786 }
787
788 /*
789 * The signal is already in the shared-pending queue.
790 * Tell the chosen thread to wake up and dequeue it.
791 */
792 signal_wake_up(t, sig == SIGKILL);
793 return;
794}
795
af7fff9c
PE
796static inline int legacy_queue(struct sigpending *signals, int sig)
797{
798 return (sig < SIGRTMIN) && sigismember(&signals->signal, sig);
799}
800
1da177e4 801static int send_signal(int sig, struct siginfo *info, struct task_struct *t,
2ca3515a 802 int group)
1da177e4 803{
2ca3515a 804 struct sigpending *pending;
6e65acba 805 struct sigqueue *q;
1da177e4 806
0a16b607
MD
807 trace_sched_signal_send(sig, t);
808
6e65acba 809 assert_spin_locked(&t->sighand->siglock);
7e695a5e
ON
810 if (!prepare_signal(sig, t))
811 return 0;
2ca3515a
ON
812
813 pending = group ? &t->signal->shared_pending : &t->pending;
2acb024d
PE
814 /*
815 * Short-circuit ignored signals and support queuing
816 * exactly one non-rt signal, so that we can get more
817 * detailed information about the cause of the signal.
818 */
7e695a5e 819 if (legacy_queue(pending, sig))
2acb024d 820 return 0;
1da177e4
LT
821 /*
822 * fast-pathed signals for kernel-internal things like SIGSTOP
823 * or SIGKILL.
824 */
b67a1b9e 825 if (info == SEND_SIG_FORCED)
1da177e4
LT
826 goto out_set;
827
828 /* Real-time signals must be queued if sent by sigqueue, or
829 some other real-time mechanism. It is implementation
830 defined whether kill() does so. We attempt to do so, on
831 the principle of least surprise, but since kill is not
832 allowed to fail with EAGAIN when low on memory we just
833 make sure at least one signal gets delivered and don't
834 pass on the info struct. */
835
836 q = __sigqueue_alloc(t, GFP_ATOMIC, (sig < SIGRTMIN &&
621d3121 837 (is_si_special(info) ||
1da177e4
LT
838 info->si_code >= 0)));
839 if (q) {
2ca3515a 840 list_add_tail(&q->list, &pending->list);
1da177e4 841 switch ((unsigned long) info) {
b67a1b9e 842 case (unsigned long) SEND_SIG_NOINFO:
1da177e4
LT
843 q->info.si_signo = sig;
844 q->info.si_errno = 0;
845 q->info.si_code = SI_USER;
b488893a 846 q->info.si_pid = task_pid_vnr(current);
1da177e4
LT
847 q->info.si_uid = current->uid;
848 break;
b67a1b9e 849 case (unsigned long) SEND_SIG_PRIV:
1da177e4
LT
850 q->info.si_signo = sig;
851 q->info.si_errno = 0;
852 q->info.si_code = SI_KERNEL;
853 q->info.si_pid = 0;
854 q->info.si_uid = 0;
855 break;
856 default:
857 copy_siginfo(&q->info, info);
858 break;
859 }
621d3121
ON
860 } else if (!is_si_special(info)) {
861 if (sig >= SIGRTMIN && info->si_code != SI_USER)
1da177e4
LT
862 /*
863 * Queue overflow, abort. We may abort if the signal was rt
864 * and sent by user using something other than kill().
865 */
866 return -EAGAIN;
1da177e4
LT
867 }
868
869out_set:
53c30337 870 signalfd_notify(t, sig);
2ca3515a 871 sigaddset(&pending->signal, sig);
4cd4b6d4
PE
872 complete_signal(sig, t, group);
873 return 0;
1da177e4
LT
874}
875
45807a1d
IM
876int print_fatal_signals;
877
878static void print_fatal_signal(struct pt_regs *regs, int signr)
879{
880 printk("%s/%d: potentially unexpected fatal signal %d.\n",
ba25f9dc 881 current->comm, task_pid_nr(current), signr);
45807a1d 882
ca5cd877 883#if defined(__i386__) && !defined(__arch_um__)
65ea5b03 884 printk("code at %08lx: ", regs->ip);
45807a1d
IM
885 {
886 int i;
887 for (i = 0; i < 16; i++) {
888 unsigned char insn;
889
65ea5b03 890 __get_user(insn, (unsigned char *)(regs->ip + i));
45807a1d
IM
891 printk("%02x ", insn);
892 }
893 }
894#endif
895 printk("\n");
896 show_regs(regs);
897}
898
899static int __init setup_print_fatal_signals(char *str)
900{
901 get_option (&str, &print_fatal_signals);
902
903 return 1;
904}
905
906__setup("print-fatal-signals=", setup_print_fatal_signals);
1da177e4 907
4cd4b6d4
PE
908int
909__group_send_sig_info(int sig, struct siginfo *info, struct task_struct *p)
910{
911 return send_signal(sig, info, p, 1);
912}
913
1da177e4
LT
914static int
915specific_send_sig_info(int sig, struct siginfo *info, struct task_struct *t)
916{
4cd4b6d4 917 return send_signal(sig, info, t, 0);
1da177e4
LT
918}
919
920/*
921 * Force a signal that the process can't ignore: if necessary
922 * we unblock the signal and change any SIG_IGN to SIG_DFL.
ae74c3b6
LT
923 *
924 * Note: If we unblock the signal, we always reset it to SIG_DFL,
925 * since we do not want to have a signal handler that was blocked
926 * be invoked when user space had explicitly blocked it.
927 *
80fe728d
ON
928 * We don't want to have recursive SIGSEGV's etc, for example,
929 * that is why we also clear SIGNAL_UNKILLABLE.
1da177e4 930 */
1da177e4
LT
931int
932force_sig_info(int sig, struct siginfo *info, struct task_struct *t)
933{
934 unsigned long int flags;
ae74c3b6
LT
935 int ret, blocked, ignored;
936 struct k_sigaction *action;
1da177e4
LT
937
938 spin_lock_irqsave(&t->sighand->siglock, flags);
ae74c3b6
LT
939 action = &t->sighand->action[sig-1];
940 ignored = action->sa.sa_handler == SIG_IGN;
941 blocked = sigismember(&t->blocked, sig);
942 if (blocked || ignored) {
943 action->sa.sa_handler = SIG_DFL;
944 if (blocked) {
945 sigdelset(&t->blocked, sig);
7bb44ade 946 recalc_sigpending_and_wake(t);
ae74c3b6 947 }
1da177e4 948 }
80fe728d
ON
949 if (action->sa.sa_handler == SIG_DFL)
950 t->signal->flags &= ~SIGNAL_UNKILLABLE;
1da177e4
LT
951 ret = specific_send_sig_info(sig, info, t);
952 spin_unlock_irqrestore(&t->sighand->siglock, flags);
953
954 return ret;
955}
956
957void
958force_sig_specific(int sig, struct task_struct *t)
959{
b0423a0d 960 force_sig_info(sig, SEND_SIG_FORCED, t);
1da177e4
LT
961}
962
1da177e4
LT
963/*
964 * Nuke all other threads in the group.
965 */
966void zap_other_threads(struct task_struct *p)
967{
968 struct task_struct *t;
969
1da177e4
LT
970 p->signal->group_stop_count = 0;
971
1da177e4
LT
972 for (t = next_thread(p); t != p; t = next_thread(t)) {
973 /*
974 * Don't bother with already dead threads
975 */
976 if (t->exit_state)
977 continue;
978
30e0fca6 979 /* SIGKILL will be handled before any pending SIGSTOP */
1da177e4 980 sigaddset(&t->pending.signal, SIGKILL);
1da177e4
LT
981 signal_wake_up(t, 1);
982 }
983}
984
b5606c2d 985int __fatal_signal_pending(struct task_struct *tsk)
f776d12d
MW
986{
987 return sigismember(&tsk->pending.signal, SIGKILL);
988}
13f09b95 989EXPORT_SYMBOL(__fatal_signal_pending);
f776d12d 990
f63ee72e
ON
991struct sighand_struct *lock_task_sighand(struct task_struct *tsk, unsigned long *flags)
992{
993 struct sighand_struct *sighand;
994
1406f2d3 995 rcu_read_lock();
f63ee72e
ON
996 for (;;) {
997 sighand = rcu_dereference(tsk->sighand);
998 if (unlikely(sighand == NULL))
999 break;
1000
1001 spin_lock_irqsave(&sighand->siglock, *flags);
1002 if (likely(sighand == tsk->sighand))
1003 break;
1004 spin_unlock_irqrestore(&sighand->siglock, *flags);
1005 }
1406f2d3 1006 rcu_read_unlock();
f63ee72e
ON
1007
1008 return sighand;
1009}
1010
1da177e4
LT
1011int group_send_sig_info(int sig, struct siginfo *info, struct task_struct *p)
1012{
1013 unsigned long flags;
1014 int ret;
1015
1016 ret = check_kill_permission(sig, info, p);
f63ee72e
ON
1017
1018 if (!ret && sig) {
1019 ret = -ESRCH;
1020 if (lock_task_sighand(p, &flags)) {
1021 ret = __group_send_sig_info(sig, info, p);
1022 unlock_task_sighand(p, &flags);
2d89c929 1023 }
1da177e4
LT
1024 }
1025
1026 return ret;
1027}
1028
1029/*
146a505d 1030 * __kill_pgrp_info() sends a signal to a process group: this is what the tty
1da177e4
LT
1031 * control characters do (^C, ^Z etc)
1032 */
1033
c4b92fc1 1034int __kill_pgrp_info(int sig, struct siginfo *info, struct pid *pgrp)
1da177e4
LT
1035{
1036 struct task_struct *p = NULL;
1037 int retval, success;
1038
1da177e4
LT
1039 success = 0;
1040 retval = -ESRCH;
c4b92fc1 1041 do_each_pid_task(pgrp, PIDTYPE_PGID, p) {
1da177e4
LT
1042 int err = group_send_sig_info(sig, info, p);
1043 success |= !err;
1044 retval = err;
c4b92fc1 1045 } while_each_pid_task(pgrp, PIDTYPE_PGID, p);
1da177e4
LT
1046 return success ? 0 : retval;
1047}
1048
c4b92fc1 1049int kill_pid_info(int sig, struct siginfo *info, struct pid *pid)
1da177e4 1050{
d36174bc 1051 int error = -ESRCH;
1da177e4
LT
1052 struct task_struct *p;
1053
e56d0903 1054 rcu_read_lock();
d36174bc 1055retry:
c4b92fc1 1056 p = pid_task(pid, PIDTYPE_PID);
d36174bc 1057 if (p) {
1da177e4 1058 error = group_send_sig_info(sig, info, p);
d36174bc
ON
1059 if (unlikely(error == -ESRCH))
1060 /*
1061 * The task was unhashed in between, try again.
1062 * If it is dead, pid_task() will return NULL,
1063 * if we race with de_thread() it will find the
1064 * new leader.
1065 */
1066 goto retry;
1067 }
e56d0903 1068 rcu_read_unlock();
6ca25b55 1069
1da177e4
LT
1070 return error;
1071}
1072
c3de4b38
MW
1073int
1074kill_proc_info(int sig, struct siginfo *info, pid_t pid)
c4b92fc1
EB
1075{
1076 int error;
1077 rcu_read_lock();
b488893a 1078 error = kill_pid_info(sig, info, find_vpid(pid));
c4b92fc1
EB
1079 rcu_read_unlock();
1080 return error;
1081}
1082
2425c08b
EB
1083/* like kill_pid_info(), but doesn't use uid/euid of "current" */
1084int kill_pid_info_as_uid(int sig, struct siginfo *info, struct pid *pid,
8f95dc58 1085 uid_t uid, uid_t euid, u32 secid)
46113830
HW
1086{
1087 int ret = -EINVAL;
1088 struct task_struct *p;
1089
1090 if (!valid_signal(sig))
1091 return ret;
1092
1093 read_lock(&tasklist_lock);
2425c08b 1094 p = pid_task(pid, PIDTYPE_PID);
46113830
HW
1095 if (!p) {
1096 ret = -ESRCH;
1097 goto out_unlock;
1098 }
0811af28 1099 if ((info == SEND_SIG_NOINFO || (!is_si_special(info) && SI_FROMUSER(info)))
46113830
HW
1100 && (euid != p->suid) && (euid != p->uid)
1101 && (uid != p->suid) && (uid != p->uid)) {
1102 ret = -EPERM;
1103 goto out_unlock;
1104 }
8f95dc58
DQ
1105 ret = security_task_kill(p, info, sig, secid);
1106 if (ret)
1107 goto out_unlock;
46113830
HW
1108 if (sig && p->sighand) {
1109 unsigned long flags;
1110 spin_lock_irqsave(&p->sighand->siglock, flags);
1111 ret = __group_send_sig_info(sig, info, p);
1112 spin_unlock_irqrestore(&p->sighand->siglock, flags);
1113 }
1114out_unlock:
1115 read_unlock(&tasklist_lock);
1116 return ret;
1117}
2425c08b 1118EXPORT_SYMBOL_GPL(kill_pid_info_as_uid);
1da177e4
LT
1119
1120/*
1121 * kill_something_info() interprets pid in interesting ways just like kill(2).
1122 *
1123 * POSIX specifies that kill(-1,sig) is unspecified, but what we have
1124 * is probably wrong. Should make it like BSD or SYSV.
1125 */
1126
bc64efd2 1127static int kill_something_info(int sig, struct siginfo *info, pid_t pid)
1da177e4 1128{
8d42db18 1129 int ret;
d5df763b
PE
1130
1131 if (pid > 0) {
1132 rcu_read_lock();
1133 ret = kill_pid_info(sig, info, find_vpid(pid));
1134 rcu_read_unlock();
1135 return ret;
1136 }
1137
1138 read_lock(&tasklist_lock);
1139 if (pid != -1) {
1140 ret = __kill_pgrp_info(sig, info,
1141 pid ? find_vpid(-pid) : task_pgrp(current));
1142 } else {
1da177e4
LT
1143 int retval = 0, count = 0;
1144 struct task_struct * p;
1145
1da177e4 1146 for_each_process(p) {
d25141a8
SB
1147 if (task_pid_vnr(p) > 1 &&
1148 !same_thread_group(p, current)) {
1da177e4
LT
1149 int err = group_send_sig_info(sig, info, p);
1150 ++count;
1151 if (err != -EPERM)
1152 retval = err;
1153 }
1154 }
8d42db18 1155 ret = count ? retval : -ESRCH;
1da177e4 1156 }
d5df763b
PE
1157 read_unlock(&tasklist_lock);
1158
8d42db18 1159 return ret;
1da177e4
LT
1160}
1161
1162/*
1163 * These are for backward compatibility with the rest of the kernel source.
1164 */
1165
1166/*
08d2c30c 1167 * The caller must ensure the task can't exit.
1da177e4
LT
1168 */
1169int
1170send_sig_info(int sig, struct siginfo *info, struct task_struct *p)
1171{
1172 int ret;
1173 unsigned long flags;
1174
1175 /*
1176 * Make sure legacy kernel users don't send in bad values
1177 * (normal paths check this in check_kill_permission).
1178 */
7ed20e1a 1179 if (!valid_signal(sig))
1da177e4
LT
1180 return -EINVAL;
1181
1da177e4
LT
1182 spin_lock_irqsave(&p->sighand->siglock, flags);
1183 ret = specific_send_sig_info(sig, info, p);
1184 spin_unlock_irqrestore(&p->sighand->siglock, flags);
1da177e4
LT
1185 return ret;
1186}
1187
b67a1b9e
ON
1188#define __si_special(priv) \
1189 ((priv) ? SEND_SIG_PRIV : SEND_SIG_NOINFO)
1190
1da177e4
LT
1191int
1192send_sig(int sig, struct task_struct *p, int priv)
1193{
b67a1b9e 1194 return send_sig_info(sig, __si_special(priv), p);
1da177e4
LT
1195}
1196
1da177e4
LT
1197void
1198force_sig(int sig, struct task_struct *p)
1199{
b67a1b9e 1200 force_sig_info(sig, SEND_SIG_PRIV, p);
1da177e4
LT
1201}
1202
1203/*
1204 * When things go south during signal handling, we
1205 * will force a SIGSEGV. And if the signal that caused
1206 * the problem was already a SIGSEGV, we'll want to
1207 * make sure we don't even try to deliver the signal..
1208 */
1209int
1210force_sigsegv(int sig, struct task_struct *p)
1211{
1212 if (sig == SIGSEGV) {
1213 unsigned long flags;
1214 spin_lock_irqsave(&p->sighand->siglock, flags);
1215 p->sighand->action[sig - 1].sa.sa_handler = SIG_DFL;
1216 spin_unlock_irqrestore(&p->sighand->siglock, flags);
1217 }
1218 force_sig(SIGSEGV, p);
1219 return 0;
1220}
1221
c4b92fc1
EB
1222int kill_pgrp(struct pid *pid, int sig, int priv)
1223{
146a505d
PE
1224 int ret;
1225
1226 read_lock(&tasklist_lock);
1227 ret = __kill_pgrp_info(sig, __si_special(priv), pid);
1228 read_unlock(&tasklist_lock);
1229
1230 return ret;
c4b92fc1
EB
1231}
1232EXPORT_SYMBOL(kill_pgrp);
1233
1234int kill_pid(struct pid *pid, int sig, int priv)
1235{
1236 return kill_pid_info(sig, __si_special(priv), pid);
1237}
1238EXPORT_SYMBOL(kill_pid);
1239
1da177e4
LT
1240/*
1241 * These functions support sending signals using preallocated sigqueue
1242 * structures. This is needed "because realtime applications cannot
1243 * afford to lose notifications of asynchronous events, like timer
1244 * expirations or I/O completions". In the case of Posix Timers
1245 * we allocate the sigqueue structure from the timer_create. If this
1246 * allocation fails we are able to report the failure to the application
1247 * with an EAGAIN error.
1248 */
1249
1250struct sigqueue *sigqueue_alloc(void)
1251{
1252 struct sigqueue *q;
1253
1254 if ((q = __sigqueue_alloc(current, GFP_KERNEL, 0)))
1255 q->flags |= SIGQUEUE_PREALLOC;
1256 return(q);
1257}
1258
1259void sigqueue_free(struct sigqueue *q)
1260{
1261 unsigned long flags;
60187d27
ON
1262 spinlock_t *lock = &current->sighand->siglock;
1263
1da177e4
LT
1264 BUG_ON(!(q->flags & SIGQUEUE_PREALLOC));
1265 /*
c8e85b4f
ON
1266 * We must hold ->siglock while testing q->list
1267 * to serialize with collect_signal() or with
da7978b0 1268 * __exit_signal()->flush_sigqueue().
1da177e4 1269 */
60187d27 1270 spin_lock_irqsave(lock, flags);
c8e85b4f
ON
1271 q->flags &= ~SIGQUEUE_PREALLOC;
1272 /*
1273 * If it is queued it will be freed when dequeued,
1274 * like the "regular" sigqueue.
1275 */
60187d27 1276 if (!list_empty(&q->list))
c8e85b4f 1277 q = NULL;
60187d27
ON
1278 spin_unlock_irqrestore(lock, flags);
1279
c8e85b4f
ON
1280 if (q)
1281 __sigqueue_free(q);
1da177e4
LT
1282}
1283
ac5c2153 1284int send_sigqueue(struct sigqueue *q, struct task_struct *t, int group)
9e3bd6c3 1285{
e62e6650 1286 int sig = q->info.si_signo;
2ca3515a 1287 struct sigpending *pending;
e62e6650
ON
1288 unsigned long flags;
1289 int ret;
2ca3515a 1290
4cd4b6d4 1291 BUG_ON(!(q->flags & SIGQUEUE_PREALLOC));
e62e6650
ON
1292
1293 ret = -1;
1294 if (!likely(lock_task_sighand(t, &flags)))
1295 goto ret;
1296
7e695a5e
ON
1297 ret = 1; /* the signal is ignored */
1298 if (!prepare_signal(sig, t))
e62e6650
ON
1299 goto out;
1300
1301 ret = 0;
9e3bd6c3
PE
1302 if (unlikely(!list_empty(&q->list))) {
1303 /*
1304 * If an SI_TIMER entry is already queue just increment
1305 * the overrun count.
1306 */
9e3bd6c3
PE
1307 BUG_ON(q->info.si_code != SI_TIMER);
1308 q->info.si_overrun++;
e62e6650 1309 goto out;
9e3bd6c3 1310 }
ba661292 1311 q->info.si_overrun = 0;
9e3bd6c3 1312
9e3bd6c3 1313 signalfd_notify(t, sig);
2ca3515a 1314 pending = group ? &t->signal->shared_pending : &t->pending;
9e3bd6c3
PE
1315 list_add_tail(&q->list, &pending->list);
1316 sigaddset(&pending->signal, sig);
4cd4b6d4 1317 complete_signal(sig, t, group);
e62e6650
ON
1318out:
1319 unlock_task_sighand(t, &flags);
1320ret:
1321 return ret;
9e3bd6c3
PE
1322}
1323
1da177e4
LT
1324/*
1325 * Wake up any threads in the parent blocked in wait* syscalls.
1326 */
1327static inline void __wake_up_parent(struct task_struct *p,
1328 struct task_struct *parent)
1329{
1330 wake_up_interruptible_sync(&parent->signal->wait_chldexit);
1331}
1332
1333/*
1334 * Let a parent know about the death of a child.
1335 * For a stopped/continued status change, use do_notify_parent_cldstop instead.
2b2a1ff6
RM
1336 *
1337 * Returns -1 if our parent ignored us and so we've switched to
1338 * self-reaping, or else @sig.
1da177e4 1339 */
2b2a1ff6 1340int do_notify_parent(struct task_struct *tsk, int sig)
1da177e4
LT
1341{
1342 struct siginfo info;
1343 unsigned long flags;
1344 struct sighand_struct *psig;
f06febc9 1345 struct task_cputime cputime;
1b04624f 1346 int ret = sig;
1da177e4
LT
1347
1348 BUG_ON(sig == -1);
1349
1350 /* do_notify_parent_cldstop should have been called instead. */
e1abb39c 1351 BUG_ON(task_is_stopped_or_traced(tsk));
1da177e4
LT
1352
1353 BUG_ON(!tsk->ptrace &&
1354 (tsk->group_leader != tsk || !thread_group_empty(tsk)));
1355
1356 info.si_signo = sig;
1357 info.si_errno = 0;
b488893a
PE
1358 /*
1359 * we are under tasklist_lock here so our parent is tied to
1360 * us and cannot exit and release its namespace.
1361 *
1362 * the only it can is to switch its nsproxy with sys_unshare,
1363 * bu uncharing pid namespaces is not allowed, so we'll always
1364 * see relevant namespace
1365 *
1366 * write_lock() currently calls preempt_disable() which is the
1367 * same as rcu_read_lock(), but according to Oleg, this is not
1368 * correct to rely on this
1369 */
1370 rcu_read_lock();
1371 info.si_pid = task_pid_nr_ns(tsk, tsk->parent->nsproxy->pid_ns);
1372 rcu_read_unlock();
1373
1da177e4
LT
1374 info.si_uid = tsk->uid;
1375
f06febc9
FM
1376 thread_group_cputime(tsk, &cputime);
1377 info.si_utime = cputime_to_jiffies(cputime.utime);
1378 info.si_stime = cputime_to_jiffies(cputime.stime);
1da177e4
LT
1379
1380 info.si_status = tsk->exit_code & 0x7f;
1381 if (tsk->exit_code & 0x80)
1382 info.si_code = CLD_DUMPED;
1383 else if (tsk->exit_code & 0x7f)
1384 info.si_code = CLD_KILLED;
1385 else {
1386 info.si_code = CLD_EXITED;
1387 info.si_status = tsk->exit_code >> 8;
1388 }
1389
1390 psig = tsk->parent->sighand;
1391 spin_lock_irqsave(&psig->siglock, flags);
7ed0175a 1392 if (!tsk->ptrace && sig == SIGCHLD &&
1da177e4
LT
1393 (psig->action[SIGCHLD-1].sa.sa_handler == SIG_IGN ||
1394 (psig->action[SIGCHLD-1].sa.sa_flags & SA_NOCLDWAIT))) {
1395 /*
1396 * We are exiting and our parent doesn't care. POSIX.1
1397 * defines special semantics for setting SIGCHLD to SIG_IGN
1398 * or setting the SA_NOCLDWAIT flag: we should be reaped
1399 * automatically and not left for our parent's wait4 call.
1400 * Rather than having the parent do it as a magic kind of
1401 * signal handler, we just set this to tell do_exit that we
1402 * can be cleaned up without becoming a zombie. Note that
1403 * we still call __wake_up_parent in this case, because a
1404 * blocked sys_wait4 might now return -ECHILD.
1405 *
1406 * Whether we send SIGCHLD or not for SA_NOCLDWAIT
1407 * is implementation-defined: we do (if you don't want
1408 * it, just use SIG_IGN instead).
1409 */
1b04624f 1410 ret = tsk->exit_signal = -1;
1da177e4 1411 if (psig->action[SIGCHLD-1].sa.sa_handler == SIG_IGN)
2b2a1ff6 1412 sig = -1;
1da177e4 1413 }
7ed20e1a 1414 if (valid_signal(sig) && sig > 0)
1da177e4
LT
1415 __group_send_sig_info(sig, &info, tsk->parent);
1416 __wake_up_parent(tsk, tsk->parent);
1417 spin_unlock_irqrestore(&psig->siglock, flags);
2b2a1ff6 1418
1b04624f 1419 return ret;
1da177e4
LT
1420}
1421
a1d5e21e 1422static void do_notify_parent_cldstop(struct task_struct *tsk, int why)
1da177e4
LT
1423{
1424 struct siginfo info;
1425 unsigned long flags;
bc505a47 1426 struct task_struct *parent;
1da177e4
LT
1427 struct sighand_struct *sighand;
1428
a1d5e21e 1429 if (tsk->ptrace & PT_PTRACED)
bc505a47
ON
1430 parent = tsk->parent;
1431 else {
1432 tsk = tsk->group_leader;
1433 parent = tsk->real_parent;
1434 }
1435
1da177e4
LT
1436 info.si_signo = SIGCHLD;
1437 info.si_errno = 0;
b488893a
PE
1438 /*
1439 * see comment in do_notify_parent() abot the following 3 lines
1440 */
1441 rcu_read_lock();
1442 info.si_pid = task_pid_nr_ns(tsk, tsk->parent->nsproxy->pid_ns);
1443 rcu_read_unlock();
1444
1da177e4
LT
1445 info.si_uid = tsk->uid;
1446
d8878ba3
MK
1447 info.si_utime = cputime_to_clock_t(tsk->utime);
1448 info.si_stime = cputime_to_clock_t(tsk->stime);
1da177e4
LT
1449
1450 info.si_code = why;
1451 switch (why) {
1452 case CLD_CONTINUED:
1453 info.si_status = SIGCONT;
1454 break;
1455 case CLD_STOPPED:
1456 info.si_status = tsk->signal->group_exit_code & 0x7f;
1457 break;
1458 case CLD_TRAPPED:
1459 info.si_status = tsk->exit_code & 0x7f;
1460 break;
1461 default:
1462 BUG();
1463 }
1464
1465 sighand = parent->sighand;
1466 spin_lock_irqsave(&sighand->siglock, flags);
1467 if (sighand->action[SIGCHLD-1].sa.sa_handler != SIG_IGN &&
1468 !(sighand->action[SIGCHLD-1].sa.sa_flags & SA_NOCLDSTOP))
1469 __group_send_sig_info(SIGCHLD, &info, parent);
1470 /*
1471 * Even if SIGCHLD is not generated, we must wake up wait4 calls.
1472 */
1473 __wake_up_parent(tsk, parent);
1474 spin_unlock_irqrestore(&sighand->siglock, flags);
1475}
1476
d5f70c00
ON
1477static inline int may_ptrace_stop(void)
1478{
1479 if (!likely(current->ptrace & PT_PTRACED))
1480 return 0;
d5f70c00
ON
1481 /*
1482 * Are we in the middle of do_coredump?
1483 * If so and our tracer is also part of the coredump stopping
1484 * is a deadlock situation, and pointless because our tracer
1485 * is dead so don't allow us to stop.
1486 * If SIGKILL was already sent before the caller unlocked
999d9fc1 1487 * ->siglock we must see ->core_state != NULL. Otherwise it
d5f70c00
ON
1488 * is safe to enter schedule().
1489 */
999d9fc1 1490 if (unlikely(current->mm->core_state) &&
d5f70c00
ON
1491 unlikely(current->mm == current->parent->mm))
1492 return 0;
1493
1494 return 1;
1495}
1496
1a669c2f
RM
1497/*
1498 * Return nonzero if there is a SIGKILL that should be waking us up.
1499 * Called with the siglock held.
1500 */
1501static int sigkill_pending(struct task_struct *tsk)
1502{
3d749b9e
ON
1503 return sigismember(&tsk->pending.signal, SIGKILL) ||
1504 sigismember(&tsk->signal->shared_pending.signal, SIGKILL);
1a669c2f
RM
1505}
1506
1da177e4
LT
1507/*
1508 * This must be called with current->sighand->siglock held.
1509 *
1510 * This should be the path for all ptrace stops.
1511 * We always set current->last_siginfo while stopped here.
1512 * That makes it a way to test a stopped process for
1513 * being ptrace-stopped vs being job-control-stopped.
1514 *
20686a30
ON
1515 * If we actually decide not to stop at all because the tracer
1516 * is gone, we keep current->exit_code unless clear_code.
1da177e4 1517 */
20686a30 1518static void ptrace_stop(int exit_code, int clear_code, siginfo_t *info)
1da177e4 1519{
1a669c2f
RM
1520 if (arch_ptrace_stop_needed(exit_code, info)) {
1521 /*
1522 * The arch code has something special to do before a
1523 * ptrace stop. This is allowed to block, e.g. for faults
1524 * on user stack pages. We can't keep the siglock while
1525 * calling arch_ptrace_stop, so we must release it now.
1526 * To preserve proper semantics, we must do this before
1527 * any signal bookkeeping like checking group_stop_count.
1528 * Meanwhile, a SIGKILL could come in before we retake the
1529 * siglock. That must prevent us from sleeping in TASK_TRACED.
1530 * So after regaining the lock, we must check for SIGKILL.
1531 */
1532 spin_unlock_irq(&current->sighand->siglock);
1533 arch_ptrace_stop(exit_code, info);
1534 spin_lock_irq(&current->sighand->siglock);
3d749b9e
ON
1535 if (sigkill_pending(current))
1536 return;
1a669c2f
RM
1537 }
1538
1da177e4
LT
1539 /*
1540 * If there is a group stop in progress,
1541 * we must participate in the bookkeeping.
1542 */
1543 if (current->signal->group_stop_count > 0)
1544 --current->signal->group_stop_count;
1545
1546 current->last_siginfo = info;
1547 current->exit_code = exit_code;
1548
1549 /* Let the debugger run. */
d9ae90ac 1550 __set_current_state(TASK_TRACED);
1da177e4
LT
1551 spin_unlock_irq(&current->sighand->siglock);
1552 read_lock(&tasklist_lock);
3d749b9e 1553 if (may_ptrace_stop()) {
a1d5e21e 1554 do_notify_parent_cldstop(current, CLD_TRAPPED);
1da177e4
LT
1555 read_unlock(&tasklist_lock);
1556 schedule();
1557 } else {
1558 /*
1559 * By the time we got the lock, our tracer went away.
6405f7f4 1560 * Don't drop the lock yet, another tracer may come.
1da177e4 1561 */
6405f7f4 1562 __set_current_state(TASK_RUNNING);
20686a30
ON
1563 if (clear_code)
1564 current->exit_code = 0;
6405f7f4 1565 read_unlock(&tasklist_lock);
1da177e4
LT
1566 }
1567
13b1c3d4
RM
1568 /*
1569 * While in TASK_TRACED, we were considered "frozen enough".
1570 * Now that we woke up, it's crucial if we're supposed to be
1571 * frozen that we freeze now before running anything substantial.
1572 */
1573 try_to_freeze();
1574
1da177e4
LT
1575 /*
1576 * We are back. Now reacquire the siglock before touching
1577 * last_siginfo, so that we are sure to have synchronized with
1578 * any signal-sending on another CPU that wants to examine it.
1579 */
1580 spin_lock_irq(&current->sighand->siglock);
1581 current->last_siginfo = NULL;
1582
1583 /*
1584 * Queued signals ignored us while we were stopped for tracing.
1585 * So check for any that we should take before resuming user mode.
b74d0deb 1586 * This sets TIF_SIGPENDING, but never clears it.
1da177e4 1587 */
b74d0deb 1588 recalc_sigpending_tsk(current);
1da177e4
LT
1589}
1590
1591void ptrace_notify(int exit_code)
1592{
1593 siginfo_t info;
1594
1595 BUG_ON((exit_code & (0x7f | ~0xffff)) != SIGTRAP);
1596
1597 memset(&info, 0, sizeof info);
1598 info.si_signo = SIGTRAP;
1599 info.si_code = exit_code;
b488893a 1600 info.si_pid = task_pid_vnr(current);
1da177e4
LT
1601 info.si_uid = current->uid;
1602
1603 /* Let the debugger run. */
1604 spin_lock_irq(&current->sighand->siglock);
20686a30 1605 ptrace_stop(exit_code, 1, &info);
1da177e4
LT
1606 spin_unlock_irq(&current->sighand->siglock);
1607}
1608
1da177e4
LT
1609static void
1610finish_stop(int stop_count)
1611{
1612 /*
1613 * If there are no other threads in the group, or if there is
1614 * a group stop in progress and we are the last to stop,
1615 * report to the parent. When ptraced, every thread reports itself.
1616 */
fa00b80b 1617 if (tracehook_notify_jctl(stop_count == 0, CLD_STOPPED)) {
a1d5e21e
ON
1618 read_lock(&tasklist_lock);
1619 do_notify_parent_cldstop(current, CLD_STOPPED);
1620 read_unlock(&tasklist_lock);
1621 }
bc505a47 1622
3df494a3
RW
1623 do {
1624 schedule();
1625 } while (try_to_freeze());
1da177e4
LT
1626 /*
1627 * Now we don't run again until continued.
1628 */
1629 current->exit_code = 0;
1630}
1631
1632/*
1633 * This performs the stopping for SIGSTOP and other stop signals.
1634 * We have to stop all threads in the thread group.
1635 * Returns nonzero if we've actually stopped and released the siglock.
1636 * Returns zero if we didn't stop and still hold the siglock.
1637 */
a122b341 1638static int do_signal_stop(int signr)
1da177e4
LT
1639{
1640 struct signal_struct *sig = current->signal;
dac27f4a 1641 int stop_count;
1da177e4 1642
1da177e4
LT
1643 if (sig->group_stop_count > 0) {
1644 /*
1645 * There is a group stop in progress. We don't need to
1646 * start another one.
1647 */
1da177e4 1648 stop_count = --sig->group_stop_count;
dac27f4a 1649 } else {
f558b7e4
ON
1650 struct task_struct *t;
1651
2b201a9e 1652 if (!likely(sig->flags & SIGNAL_STOP_DEQUEUED) ||
573cf9ad 1653 unlikely(signal_group_exit(sig)))
f558b7e4 1654 return 0;
1da177e4
LT
1655 /*
1656 * There is no group stop already in progress.
a122b341 1657 * We must initiate one now.
1da177e4 1658 */
a122b341 1659 sig->group_exit_code = signr;
1da177e4 1660
a122b341
ON
1661 stop_count = 0;
1662 for (t = next_thread(current); t != current; t = next_thread(t))
1da177e4 1663 /*
a122b341
ON
1664 * Setting state to TASK_STOPPED for a group
1665 * stop is always done with the siglock held,
1666 * so this check has no races.
1da177e4 1667 */
d12619b5 1668 if (!(t->flags & PF_EXITING) &&
e1abb39c 1669 !task_is_stopped_or_traced(t)) {
a122b341
ON
1670 stop_count++;
1671 signal_wake_up(t, 0);
1672 }
1673 sig->group_stop_count = stop_count;
1da177e4
LT
1674 }
1675
dac27f4a
ON
1676 if (stop_count == 0)
1677 sig->flags = SIGNAL_STOP_STOPPED;
1678 current->exit_code = sig->group_exit_code;
1679 __set_current_state(TASK_STOPPED);
1680
1681 spin_unlock_irq(&current->sighand->siglock);
1da177e4
LT
1682 finish_stop(stop_count);
1683 return 1;
1684}
1685
18c98b65
RM
1686static int ptrace_signal(int signr, siginfo_t *info,
1687 struct pt_regs *regs, void *cookie)
1688{
1689 if (!(current->ptrace & PT_PTRACED))
1690 return signr;
1691
1692 ptrace_signal_deliver(regs, cookie);
1693
1694 /* Let the debugger run. */
1695 ptrace_stop(signr, 0, info);
1696
1697 /* We're back. Did the debugger cancel the sig? */
1698 signr = current->exit_code;
1699 if (signr == 0)
1700 return signr;
1701
1702 current->exit_code = 0;
1703
1704 /* Update the siginfo structure if the signal has
1705 changed. If the debugger wanted something
1706 specific in the siginfo structure then it should
1707 have updated *info via PTRACE_SETSIGINFO. */
1708 if (signr != info->si_signo) {
1709 info->si_signo = signr;
1710 info->si_errno = 0;
1711 info->si_code = SI_USER;
1712 info->si_pid = task_pid_vnr(current->parent);
1713 info->si_uid = current->parent->uid;
1714 }
1715
1716 /* If the (new) signal is now blocked, requeue it. */
1717 if (sigismember(&current->blocked, signr)) {
1718 specific_send_sig_info(signr, info, current);
1719 signr = 0;
1720 }
1721
1722 return signr;
1723}
1724
1da177e4
LT
1725int get_signal_to_deliver(siginfo_t *info, struct k_sigaction *return_ka,
1726 struct pt_regs *regs, void *cookie)
1727{
f6b76d4f
ON
1728 struct sighand_struct *sighand = current->sighand;
1729 struct signal_struct *signal = current->signal;
1730 int signr;
1da177e4 1731
13b1c3d4
RM
1732relock:
1733 /*
1734 * We'll jump back here after any time we were stopped in TASK_STOPPED.
1735 * While in TASK_STOPPED, we were considered "frozen enough".
1736 * Now that we woke up, it's crucial if we're supposed to be
1737 * frozen that we freeze now before running anything substantial.
1738 */
fc558a74
RW
1739 try_to_freeze();
1740
f6b76d4f 1741 spin_lock_irq(&sighand->siglock);
021e1ae3
ON
1742 /*
1743 * Every stopped thread goes here after wakeup. Check to see if
1744 * we should notify the parent, prepare_signal(SIGCONT) encodes
1745 * the CLD_ si_code into SIGNAL_CLD_MASK bits.
1746 */
f6b76d4f
ON
1747 if (unlikely(signal->flags & SIGNAL_CLD_MASK)) {
1748 int why = (signal->flags & SIGNAL_STOP_CONTINUED)
e4420551 1749 ? CLD_CONTINUED : CLD_STOPPED;
f6b76d4f
ON
1750 signal->flags &= ~SIGNAL_CLD_MASK;
1751 spin_unlock_irq(&sighand->siglock);
e4420551 1752
fa00b80b
RM
1753 if (unlikely(!tracehook_notify_jctl(1, why)))
1754 goto relock;
1755
e4420551
ON
1756 read_lock(&tasklist_lock);
1757 do_notify_parent_cldstop(current->group_leader, why);
1758 read_unlock(&tasklist_lock);
1759 goto relock;
1760 }
1761
1da177e4
LT
1762 for (;;) {
1763 struct k_sigaction *ka;
1764
f6b76d4f 1765 if (unlikely(signal->group_stop_count > 0) &&
f558b7e4 1766 do_signal_stop(0))
1da177e4
LT
1767 goto relock;
1768
7bcf6a2c
RM
1769 /*
1770 * Tracing can induce an artifical signal and choose sigaction.
1771 * The return value in @signr determines the default action,
1772 * but @info->si_signo is the signal number we will report.
1773 */
1774 signr = tracehook_get_signal(current, regs, info, return_ka);
1775 if (unlikely(signr < 0))
1776 goto relock;
1777 if (unlikely(signr != 0))
1778 ka = return_ka;
1779 else {
1780 signr = dequeue_signal(current, &current->blocked,
1781 info);
1da177e4 1782
18c98b65 1783 if (!signr)
7bcf6a2c
RM
1784 break; /* will return 0 */
1785
1786 if (signr != SIGKILL) {
1787 signr = ptrace_signal(signr, info,
1788 regs, cookie);
1789 if (!signr)
1790 continue;
1791 }
1792
1793 ka = &sighand->action[signr-1];
1da177e4
LT
1794 }
1795
1da177e4
LT
1796 if (ka->sa.sa_handler == SIG_IGN) /* Do nothing. */
1797 continue;
1798 if (ka->sa.sa_handler != SIG_DFL) {
1799 /* Run the handler. */
1800 *return_ka = *ka;
1801
1802 if (ka->sa.sa_flags & SA_ONESHOT)
1803 ka->sa.sa_handler = SIG_DFL;
1804
1805 break; /* will return non-zero "signr" value */
1806 }
1807
1808 /*
1809 * Now we are doing the default action for this signal.
1810 */
1811 if (sig_kernel_ignore(signr)) /* Default is nothing. */
1812 continue;
1813
84d73786 1814 /*
0fbc26a6 1815 * Global init gets no signals it doesn't want.
84d73786 1816 */
fae5fa44
ON
1817 if (unlikely(signal->flags & SIGNAL_UNKILLABLE) &&
1818 !signal_group_exit(signal))
1da177e4
LT
1819 continue;
1820
1821 if (sig_kernel_stop(signr)) {
1822 /*
1823 * The default action is to stop all threads in
1824 * the thread group. The job control signals
1825 * do nothing in an orphaned pgrp, but SIGSTOP
1826 * always works. Note that siglock needs to be
1827 * dropped during the call to is_orphaned_pgrp()
1828 * because of lock ordering with tasklist_lock.
1829 * This allows an intervening SIGCONT to be posted.
1830 * We need to check for that and bail out if necessary.
1831 */
1832 if (signr != SIGSTOP) {
f6b76d4f 1833 spin_unlock_irq(&sighand->siglock);
1da177e4
LT
1834
1835 /* signals can be posted during this window */
1836
3e7cd6c4 1837 if (is_current_pgrp_orphaned())
1da177e4
LT
1838 goto relock;
1839
f6b76d4f 1840 spin_lock_irq(&sighand->siglock);
1da177e4
LT
1841 }
1842
7bcf6a2c 1843 if (likely(do_signal_stop(info->si_signo))) {
1da177e4
LT
1844 /* It released the siglock. */
1845 goto relock;
1846 }
1847
1848 /*
1849 * We didn't actually stop, due to a race
1850 * with SIGCONT or something like that.
1851 */
1852 continue;
1853 }
1854
f6b76d4f 1855 spin_unlock_irq(&sighand->siglock);
1da177e4
LT
1856
1857 /*
1858 * Anything else is fatal, maybe with a core dump.
1859 */
1860 current->flags |= PF_SIGNALED;
2dce81bf 1861
1da177e4 1862 if (sig_kernel_coredump(signr)) {
2dce81bf 1863 if (print_fatal_signals)
7bcf6a2c 1864 print_fatal_signal(regs, info->si_signo);
1da177e4
LT
1865 /*
1866 * If it was able to dump core, this kills all
1867 * other threads in the group and synchronizes with
1868 * their demise. If we lost the race with another
1869 * thread getting here, it set group_exit_code
1870 * first and our do_group_exit call below will use
1871 * that value and ignore the one we pass it.
1872 */
7bcf6a2c 1873 do_coredump(info->si_signo, info->si_signo, regs);
1da177e4
LT
1874 }
1875
1876 /*
1877 * Death signals, no core dump.
1878 */
7bcf6a2c 1879 do_group_exit(info->si_signo);
1da177e4
LT
1880 /* NOTREACHED */
1881 }
f6b76d4f 1882 spin_unlock_irq(&sighand->siglock);
1da177e4
LT
1883 return signr;
1884}
1885
d12619b5
ON
1886void exit_signals(struct task_struct *tsk)
1887{
1888 int group_stop = 0;
5dee1707 1889 struct task_struct *t;
d12619b5 1890
5dee1707
ON
1891 if (thread_group_empty(tsk) || signal_group_exit(tsk->signal)) {
1892 tsk->flags |= PF_EXITING;
1893 return;
d12619b5
ON
1894 }
1895
5dee1707 1896 spin_lock_irq(&tsk->sighand->siglock);
d12619b5
ON
1897 /*
1898 * From now this task is not visible for group-wide signals,
1899 * see wants_signal(), do_signal_stop().
1900 */
1901 tsk->flags |= PF_EXITING;
5dee1707
ON
1902 if (!signal_pending(tsk))
1903 goto out;
1904
1905 /* It could be that __group_complete_signal() choose us to
1906 * notify about group-wide signal. Another thread should be
1907 * woken now to take the signal since we will not.
1908 */
1909 for (t = tsk; (t = next_thread(t)) != tsk; )
1910 if (!signal_pending(t) && !(t->flags & PF_EXITING))
1911 recalc_sigpending_and_wake(t);
1912
1913 if (unlikely(tsk->signal->group_stop_count) &&
1914 !--tsk->signal->group_stop_count) {
1915 tsk->signal->flags = SIGNAL_STOP_STOPPED;
1916 group_stop = 1;
1917 }
1918out:
d12619b5
ON
1919 spin_unlock_irq(&tsk->sighand->siglock);
1920
fa00b80b 1921 if (unlikely(group_stop) && tracehook_notify_jctl(1, CLD_STOPPED)) {
d12619b5
ON
1922 read_lock(&tasklist_lock);
1923 do_notify_parent_cldstop(tsk, CLD_STOPPED);
1924 read_unlock(&tasklist_lock);
1925 }
1926}
1927
1da177e4
LT
1928EXPORT_SYMBOL(recalc_sigpending);
1929EXPORT_SYMBOL_GPL(dequeue_signal);
1930EXPORT_SYMBOL(flush_signals);
1931EXPORT_SYMBOL(force_sig);
1da177e4
LT
1932EXPORT_SYMBOL(send_sig);
1933EXPORT_SYMBOL(send_sig_info);
1934EXPORT_SYMBOL(sigprocmask);
1935EXPORT_SYMBOL(block_all_signals);
1936EXPORT_SYMBOL(unblock_all_signals);
1937
1938
1939/*
1940 * System call entry points.
1941 */
1942
1943asmlinkage long sys_restart_syscall(void)
1944{
1945 struct restart_block *restart = &current_thread_info()->restart_block;
1946 return restart->fn(restart);
1947}
1948
1949long do_no_restart_syscall(struct restart_block *param)
1950{
1951 return -EINTR;
1952}
1953
1954/*
1955 * We don't need to get the kernel lock - this is all local to this
1956 * particular thread.. (and that's good, because this is _heavily_
1957 * used by various programs)
1958 */
1959
1960/*
1961 * This is also useful for kernel threads that want to temporarily
1962 * (or permanently) block certain signals.
1963 *
1964 * NOTE! Unlike the user-mode sys_sigprocmask(), the kernel
1965 * interface happily blocks "unblockable" signals like SIGKILL
1966 * and friends.
1967 */
1968int sigprocmask(int how, sigset_t *set, sigset_t *oldset)
1969{
1970 int error;
1da177e4
LT
1971
1972 spin_lock_irq(&current->sighand->siglock);
a26fd335
ON
1973 if (oldset)
1974 *oldset = current->blocked;
1975
1da177e4
LT
1976 error = 0;
1977 switch (how) {
1978 case SIG_BLOCK:
1979 sigorsets(&current->blocked, &current->blocked, set);
1980 break;
1981 case SIG_UNBLOCK:
1982 signandsets(&current->blocked, &current->blocked, set);
1983 break;
1984 case SIG_SETMASK:
1985 current->blocked = *set;
1986 break;
1987 default:
1988 error = -EINVAL;
1989 }
1990 recalc_sigpending();
1991 spin_unlock_irq(&current->sighand->siglock);
a26fd335 1992
1da177e4
LT
1993 return error;
1994}
1995
1996asmlinkage long
1997sys_rt_sigprocmask(int how, sigset_t __user *set, sigset_t __user *oset, size_t sigsetsize)
1998{
1999 int error = -EINVAL;
2000 sigset_t old_set, new_set;
2001
2002 /* XXX: Don't preclude handling different sized sigset_t's. */
2003 if (sigsetsize != sizeof(sigset_t))
2004 goto out;
2005
2006 if (set) {
2007 error = -EFAULT;
2008 if (copy_from_user(&new_set, set, sizeof(*set)))
2009 goto out;
2010 sigdelsetmask(&new_set, sigmask(SIGKILL)|sigmask(SIGSTOP));
2011
2012 error = sigprocmask(how, &new_set, &old_set);
2013 if (error)
2014 goto out;
2015 if (oset)
2016 goto set_old;
2017 } else if (oset) {
2018 spin_lock_irq(&current->sighand->siglock);
2019 old_set = current->blocked;
2020 spin_unlock_irq(&current->sighand->siglock);
2021
2022 set_old:
2023 error = -EFAULT;
2024 if (copy_to_user(oset, &old_set, sizeof(*oset)))
2025 goto out;
2026 }
2027 error = 0;
2028out:
2029 return error;
2030}
2031
2032long do_sigpending(void __user *set, unsigned long sigsetsize)
2033{
2034 long error = -EINVAL;
2035 sigset_t pending;
2036
2037 if (sigsetsize > sizeof(sigset_t))
2038 goto out;
2039
2040 spin_lock_irq(&current->sighand->siglock);
2041 sigorsets(&pending, &current->pending.signal,
2042 &current->signal->shared_pending.signal);
2043 spin_unlock_irq(&current->sighand->siglock);
2044
2045 /* Outside the lock because only this thread touches it. */
2046 sigandsets(&pending, &current->blocked, &pending);
2047
2048 error = -EFAULT;
2049 if (!copy_to_user(set, &pending, sigsetsize))
2050 error = 0;
2051
2052out:
2053 return error;
2054}
2055
2056asmlinkage long
2057sys_rt_sigpending(sigset_t __user *set, size_t sigsetsize)
2058{
2059 return do_sigpending(set, sigsetsize);
2060}
2061
2062#ifndef HAVE_ARCH_COPY_SIGINFO_TO_USER
2063
2064int copy_siginfo_to_user(siginfo_t __user *to, siginfo_t *from)
2065{
2066 int err;
2067
2068 if (!access_ok (VERIFY_WRITE, to, sizeof(siginfo_t)))
2069 return -EFAULT;
2070 if (from->si_code < 0)
2071 return __copy_to_user(to, from, sizeof(siginfo_t))
2072 ? -EFAULT : 0;
2073 /*
2074 * If you change siginfo_t structure, please be sure
2075 * this code is fixed accordingly.
fba2afaa
DL
2076 * Please remember to update the signalfd_copyinfo() function
2077 * inside fs/signalfd.c too, in case siginfo_t changes.
1da177e4
LT
2078 * It should never copy any pad contained in the structure
2079 * to avoid security leaks, but must copy the generic
2080 * 3 ints plus the relevant union member.
2081 */
2082 err = __put_user(from->si_signo, &to->si_signo);
2083 err |= __put_user(from->si_errno, &to->si_errno);
2084 err |= __put_user((short)from->si_code, &to->si_code);
2085 switch (from->si_code & __SI_MASK) {
2086 case __SI_KILL:
2087 err |= __put_user(from->si_pid, &to->si_pid);
2088 err |= __put_user(from->si_uid, &to->si_uid);
2089 break;
2090 case __SI_TIMER:
2091 err |= __put_user(from->si_tid, &to->si_tid);
2092 err |= __put_user(from->si_overrun, &to->si_overrun);
2093 err |= __put_user(from->si_ptr, &to->si_ptr);
2094 break;
2095 case __SI_POLL:
2096 err |= __put_user(from->si_band, &to->si_band);
2097 err |= __put_user(from->si_fd, &to->si_fd);
2098 break;
2099 case __SI_FAULT:
2100 err |= __put_user(from->si_addr, &to->si_addr);
2101#ifdef __ARCH_SI_TRAPNO
2102 err |= __put_user(from->si_trapno, &to->si_trapno);
2103#endif
2104 break;
2105 case __SI_CHLD:
2106 err |= __put_user(from->si_pid, &to->si_pid);
2107 err |= __put_user(from->si_uid, &to->si_uid);
2108 err |= __put_user(from->si_status, &to->si_status);
2109 err |= __put_user(from->si_utime, &to->si_utime);
2110 err |= __put_user(from->si_stime, &to->si_stime);
2111 break;
2112 case __SI_RT: /* This is not generated by the kernel as of now. */
2113 case __SI_MESGQ: /* But this is */
2114 err |= __put_user(from->si_pid, &to->si_pid);
2115 err |= __put_user(from->si_uid, &to->si_uid);
2116 err |= __put_user(from->si_ptr, &to->si_ptr);
2117 break;
2118 default: /* this is just in case for now ... */
2119 err |= __put_user(from->si_pid, &to->si_pid);
2120 err |= __put_user(from->si_uid, &to->si_uid);
2121 break;
2122 }
2123 return err;
2124}
2125
2126#endif
2127
2128asmlinkage long
2129sys_rt_sigtimedwait(const sigset_t __user *uthese,
2130 siginfo_t __user *uinfo,
2131 const struct timespec __user *uts,
2132 size_t sigsetsize)
2133{
2134 int ret, sig;
2135 sigset_t these;
2136 struct timespec ts;
2137 siginfo_t info;
2138 long timeout = 0;
2139
2140 /* XXX: Don't preclude handling different sized sigset_t's. */
2141 if (sigsetsize != sizeof(sigset_t))
2142 return -EINVAL;
2143
2144 if (copy_from_user(&these, uthese, sizeof(these)))
2145 return -EFAULT;
2146
2147 /*
2148 * Invert the set of allowed signals to get those we
2149 * want to block.
2150 */
2151 sigdelsetmask(&these, sigmask(SIGKILL)|sigmask(SIGSTOP));
2152 signotset(&these);
2153
2154 if (uts) {
2155 if (copy_from_user(&ts, uts, sizeof(ts)))
2156 return -EFAULT;
2157 if (ts.tv_nsec >= 1000000000L || ts.tv_nsec < 0
2158 || ts.tv_sec < 0)
2159 return -EINVAL;
2160 }
2161
2162 spin_lock_irq(&current->sighand->siglock);
2163 sig = dequeue_signal(current, &these, &info);
2164 if (!sig) {
2165 timeout = MAX_SCHEDULE_TIMEOUT;
2166 if (uts)
2167 timeout = (timespec_to_jiffies(&ts)
2168 + (ts.tv_sec || ts.tv_nsec));
2169
2170 if (timeout) {
2171 /* None ready -- temporarily unblock those we're
2172 * interested while we are sleeping in so that we'll
2173 * be awakened when they arrive. */
2174 current->real_blocked = current->blocked;
2175 sigandsets(&current->blocked, &current->blocked, &these);
2176 recalc_sigpending();
2177 spin_unlock_irq(&current->sighand->siglock);
2178
75bcc8c5 2179 timeout = schedule_timeout_interruptible(timeout);
1da177e4 2180
1da177e4
LT
2181 spin_lock_irq(&current->sighand->siglock);
2182 sig = dequeue_signal(current, &these, &info);
2183 current->blocked = current->real_blocked;
2184 siginitset(&current->real_blocked, 0);
2185 recalc_sigpending();
2186 }
2187 }
2188 spin_unlock_irq(&current->sighand->siglock);
2189
2190 if (sig) {
2191 ret = sig;
2192 if (uinfo) {
2193 if (copy_siginfo_to_user(uinfo, &info))
2194 ret = -EFAULT;
2195 }
2196 } else {
2197 ret = -EAGAIN;
2198 if (timeout)
2199 ret = -EINTR;
2200 }
2201
2202 return ret;
2203}
2204
2205asmlinkage long
bc64efd2 2206sys_kill(pid_t pid, int sig)
1da177e4
LT
2207{
2208 struct siginfo info;
2209
2210 info.si_signo = sig;
2211 info.si_errno = 0;
2212 info.si_code = SI_USER;
b488893a 2213 info.si_pid = task_tgid_vnr(current);
1da177e4
LT
2214 info.si_uid = current->uid;
2215
2216 return kill_something_info(sig, &info, pid);
2217}
2218
bc64efd2 2219static int do_tkill(pid_t tgid, pid_t pid, int sig)
1da177e4 2220{
1da177e4 2221 int error;
6dd69f10 2222 struct siginfo info;
1da177e4 2223 struct task_struct *p;
3547ff3a 2224 unsigned long flags;
1da177e4 2225
6dd69f10 2226 error = -ESRCH;
1da177e4
LT
2227 info.si_signo = sig;
2228 info.si_errno = 0;
2229 info.si_code = SI_TKILL;
b488893a 2230 info.si_pid = task_tgid_vnr(current);
1da177e4
LT
2231 info.si_uid = current->uid;
2232
3547ff3a 2233 rcu_read_lock();
228ebcbe 2234 p = find_task_by_vpid(pid);
b488893a 2235 if (p && (tgid <= 0 || task_tgid_vnr(p) == tgid)) {
1da177e4
LT
2236 error = check_kill_permission(sig, &info, p);
2237 /*
2238 * The null signal is a permissions and process existence
2239 * probe. No signal is actually delivered.
3547ff3a
ON
2240 *
2241 * If lock_task_sighand() fails we pretend the task dies
2242 * after receiving the signal. The window is tiny, and the
2243 * signal is private anyway.
1da177e4 2244 */
3547ff3a 2245 if (!error && sig && lock_task_sighand(p, &flags)) {
1da177e4 2246 error = specific_send_sig_info(sig, &info, p);
3547ff3a 2247 unlock_task_sighand(p, &flags);
1da177e4
LT
2248 }
2249 }
3547ff3a 2250 rcu_read_unlock();
6dd69f10 2251
1da177e4
LT
2252 return error;
2253}
2254
6dd69f10
VL
2255/**
2256 * sys_tgkill - send signal to one specific thread
2257 * @tgid: the thread group ID of the thread
2258 * @pid: the PID of the thread
2259 * @sig: signal to be sent
2260 *
72fd4a35 2261 * This syscall also checks the @tgid and returns -ESRCH even if the PID
6dd69f10
VL
2262 * exists but it's not belonging to the target process anymore. This
2263 * method solves the problem of threads exiting and PIDs getting reused.
2264 */
bc64efd2 2265asmlinkage long sys_tgkill(pid_t tgid, pid_t pid, int sig)
6dd69f10
VL
2266{
2267 /* This is only valid for single tasks */
2268 if (pid <= 0 || tgid <= 0)
2269 return -EINVAL;
2270
2271 return do_tkill(tgid, pid, sig);
2272}
2273
1da177e4
LT
2274/*
2275 * Send a signal to only one task, even if it's a CLONE_THREAD task.
2276 */
2277asmlinkage long
bc64efd2 2278sys_tkill(pid_t pid, int sig)
1da177e4 2279{
1da177e4
LT
2280 /* This is only valid for single tasks */
2281 if (pid <= 0)
2282 return -EINVAL;
2283
6dd69f10 2284 return do_tkill(0, pid, sig);
1da177e4
LT
2285}
2286
2287asmlinkage long
bc64efd2 2288sys_rt_sigqueueinfo(pid_t pid, int sig, siginfo_t __user *uinfo)
1da177e4
LT
2289{
2290 siginfo_t info;
2291
2292 if (copy_from_user(&info, uinfo, sizeof(siginfo_t)))
2293 return -EFAULT;
2294
2295 /* Not even root can pretend to send signals from the kernel.
2296 Nor can they impersonate a kill(), which adds source info. */
2297 if (info.si_code >= 0)
2298 return -EPERM;
2299 info.si_signo = sig;
2300
2301 /* POSIX.1b doesn't mention process groups. */
2302 return kill_proc_info(sig, &info, pid);
2303}
2304
88531f72 2305int do_sigaction(int sig, struct k_sigaction *act, struct k_sigaction *oact)
1da177e4 2306{
93585eea 2307 struct task_struct *t = current;
1da177e4 2308 struct k_sigaction *k;
71fabd5e 2309 sigset_t mask;
1da177e4 2310
7ed20e1a 2311 if (!valid_signal(sig) || sig < 1 || (act && sig_kernel_only(sig)))
1da177e4
LT
2312 return -EINVAL;
2313
93585eea 2314 k = &t->sighand->action[sig-1];
1da177e4
LT
2315
2316 spin_lock_irq(&current->sighand->siglock);
1da177e4
LT
2317 if (oact)
2318 *oact = *k;
2319
2320 if (act) {
9ac95f2f
ON
2321 sigdelsetmask(&act->sa.sa_mask,
2322 sigmask(SIGKILL) | sigmask(SIGSTOP));
88531f72 2323 *k = *act;
1da177e4
LT
2324 /*
2325 * POSIX 3.3.1.3:
2326 * "Setting a signal action to SIG_IGN for a signal that is
2327 * pending shall cause the pending signal to be discarded,
2328 * whether or not it is blocked."
2329 *
2330 * "Setting a signal action to SIG_DFL for a signal that is
2331 * pending and whose default action is to ignore the signal
2332 * (for example, SIGCHLD), shall cause the pending signal to
2333 * be discarded, whether or not it is blocked"
2334 */
35de254d 2335 if (sig_handler_ignored(sig_handler(t, sig), sig)) {
71fabd5e
GA
2336 sigemptyset(&mask);
2337 sigaddset(&mask, sig);
2338 rm_from_queue_full(&mask, &t->signal->shared_pending);
1da177e4 2339 do {
71fabd5e 2340 rm_from_queue_full(&mask, &t->pending);
1da177e4
LT
2341 t = next_thread(t);
2342 } while (t != current);
1da177e4 2343 }
1da177e4
LT
2344 }
2345
2346 spin_unlock_irq(&current->sighand->siglock);
2347 return 0;
2348}
2349
2350int
2351do_sigaltstack (const stack_t __user *uss, stack_t __user *uoss, unsigned long sp)
2352{
2353 stack_t oss;
2354 int error;
2355
2356 if (uoss) {
2357 oss.ss_sp = (void __user *) current->sas_ss_sp;
2358 oss.ss_size = current->sas_ss_size;
2359 oss.ss_flags = sas_ss_flags(sp);
2360 }
2361
2362 if (uss) {
2363 void __user *ss_sp;
2364 size_t ss_size;
2365 int ss_flags;
2366
2367 error = -EFAULT;
2368 if (!access_ok(VERIFY_READ, uss, sizeof(*uss))
2369 || __get_user(ss_sp, &uss->ss_sp)
2370 || __get_user(ss_flags, &uss->ss_flags)
2371 || __get_user(ss_size, &uss->ss_size))
2372 goto out;
2373
2374 error = -EPERM;
2375 if (on_sig_stack(sp))
2376 goto out;
2377
2378 error = -EINVAL;
2379 /*
2380 *
2381 * Note - this code used to test ss_flags incorrectly
2382 * old code may have been written using ss_flags==0
2383 * to mean ss_flags==SS_ONSTACK (as this was the only
2384 * way that worked) - this fix preserves that older
2385 * mechanism
2386 */
2387 if (ss_flags != SS_DISABLE && ss_flags != SS_ONSTACK && ss_flags != 0)
2388 goto out;
2389
2390 if (ss_flags == SS_DISABLE) {
2391 ss_size = 0;
2392 ss_sp = NULL;
2393 } else {
2394 error = -ENOMEM;
2395 if (ss_size < MINSIGSTKSZ)
2396 goto out;
2397 }
2398
2399 current->sas_ss_sp = (unsigned long) ss_sp;
2400 current->sas_ss_size = ss_size;
2401 }
2402
2403 if (uoss) {
2404 error = -EFAULT;
2405 if (copy_to_user(uoss, &oss, sizeof(oss)))
2406 goto out;
2407 }
2408
2409 error = 0;
2410out:
2411 return error;
2412}
2413
2414#ifdef __ARCH_WANT_SYS_SIGPENDING
2415
2416asmlinkage long
2417sys_sigpending(old_sigset_t __user *set)
2418{
2419 return do_sigpending(set, sizeof(*set));
2420}
2421
2422#endif
2423
2424#ifdef __ARCH_WANT_SYS_SIGPROCMASK
2425/* Some platforms have their own version with special arguments others
2426 support only sys_rt_sigprocmask. */
2427
2428asmlinkage long
2429sys_sigprocmask(int how, old_sigset_t __user *set, old_sigset_t __user *oset)
2430{
2431 int error;
2432 old_sigset_t old_set, new_set;
2433
2434 if (set) {
2435 error = -EFAULT;
2436 if (copy_from_user(&new_set, set, sizeof(*set)))
2437 goto out;
2438 new_set &= ~(sigmask(SIGKILL) | sigmask(SIGSTOP));
2439
2440 spin_lock_irq(&current->sighand->siglock);
2441 old_set = current->blocked.sig[0];
2442
2443 error = 0;
2444 switch (how) {
2445 default:
2446 error = -EINVAL;
2447 break;
2448 case SIG_BLOCK:
2449 sigaddsetmask(&current->blocked, new_set);
2450 break;
2451 case SIG_UNBLOCK:
2452 sigdelsetmask(&current->blocked, new_set);
2453 break;
2454 case SIG_SETMASK:
2455 current->blocked.sig[0] = new_set;
2456 break;
2457 }
2458
2459 recalc_sigpending();
2460 spin_unlock_irq(&current->sighand->siglock);
2461 if (error)
2462 goto out;
2463 if (oset)
2464 goto set_old;
2465 } else if (oset) {
2466 old_set = current->blocked.sig[0];
2467 set_old:
2468 error = -EFAULT;
2469 if (copy_to_user(oset, &old_set, sizeof(*oset)))
2470 goto out;
2471 }
2472 error = 0;
2473out:
2474 return error;
2475}
2476#endif /* __ARCH_WANT_SYS_SIGPROCMASK */
2477
2478#ifdef __ARCH_WANT_SYS_RT_SIGACTION
2479asmlinkage long
2480sys_rt_sigaction(int sig,
2481 const struct sigaction __user *act,
2482 struct sigaction __user *oact,
2483 size_t sigsetsize)
2484{
2485 struct k_sigaction new_sa, old_sa;
2486 int ret = -EINVAL;
2487
2488 /* XXX: Don't preclude handling different sized sigset_t's. */
2489 if (sigsetsize != sizeof(sigset_t))
2490 goto out;
2491
2492 if (act) {
2493 if (copy_from_user(&new_sa.sa, act, sizeof(new_sa.sa)))
2494 return -EFAULT;
2495 }
2496
2497 ret = do_sigaction(sig, act ? &new_sa : NULL, oact ? &old_sa : NULL);
2498
2499 if (!ret && oact) {
2500 if (copy_to_user(oact, &old_sa.sa, sizeof(old_sa.sa)))
2501 return -EFAULT;
2502 }
2503out:
2504 return ret;
2505}
2506#endif /* __ARCH_WANT_SYS_RT_SIGACTION */
2507
2508#ifdef __ARCH_WANT_SYS_SGETMASK
2509
2510/*
2511 * For backwards compatibility. Functionality superseded by sigprocmask.
2512 */
2513asmlinkage long
2514sys_sgetmask(void)
2515{
2516 /* SMP safe */
2517 return current->blocked.sig[0];
2518}
2519
2520asmlinkage long
2521sys_ssetmask(int newmask)
2522{
2523 int old;
2524
2525 spin_lock_irq(&current->sighand->siglock);
2526 old = current->blocked.sig[0];
2527
2528 siginitset(&current->blocked, newmask & ~(sigmask(SIGKILL)|
2529 sigmask(SIGSTOP)));
2530 recalc_sigpending();
2531 spin_unlock_irq(&current->sighand->siglock);
2532
2533 return old;
2534}
2535#endif /* __ARCH_WANT_SGETMASK */
2536
2537#ifdef __ARCH_WANT_SYS_SIGNAL
2538/*
2539 * For backwards compatibility. Functionality superseded by sigaction.
2540 */
2541asmlinkage unsigned long
2542sys_signal(int sig, __sighandler_t handler)
2543{
2544 struct k_sigaction new_sa, old_sa;
2545 int ret;
2546
2547 new_sa.sa.sa_handler = handler;
2548 new_sa.sa.sa_flags = SA_ONESHOT | SA_NOMASK;
c70d3d70 2549 sigemptyset(&new_sa.sa.sa_mask);
1da177e4
LT
2550
2551 ret = do_sigaction(sig, &new_sa, &old_sa);
2552
2553 return ret ? ret : (unsigned long)old_sa.sa.sa_handler;
2554}
2555#endif /* __ARCH_WANT_SYS_SIGNAL */
2556
2557#ifdef __ARCH_WANT_SYS_PAUSE
2558
2559asmlinkage long
2560sys_pause(void)
2561{
2562 current->state = TASK_INTERRUPTIBLE;
2563 schedule();
2564 return -ERESTARTNOHAND;
2565}
2566
2567#endif
2568
150256d8
DW
2569#ifdef __ARCH_WANT_SYS_RT_SIGSUSPEND
2570asmlinkage long sys_rt_sigsuspend(sigset_t __user *unewset, size_t sigsetsize)
2571{
2572 sigset_t newset;
2573
2574 /* XXX: Don't preclude handling different sized sigset_t's. */
2575 if (sigsetsize != sizeof(sigset_t))
2576 return -EINVAL;
2577
2578 if (copy_from_user(&newset, unewset, sizeof(newset)))
2579 return -EFAULT;
2580 sigdelsetmask(&newset, sigmask(SIGKILL)|sigmask(SIGSTOP));
2581
2582 spin_lock_irq(&current->sighand->siglock);
2583 current->saved_sigmask = current->blocked;
2584 current->blocked = newset;
2585 recalc_sigpending();
2586 spin_unlock_irq(&current->sighand->siglock);
2587
2588 current->state = TASK_INTERRUPTIBLE;
2589 schedule();
4e4c22c7 2590 set_restore_sigmask();
150256d8
DW
2591 return -ERESTARTNOHAND;
2592}
2593#endif /* __ARCH_WANT_SYS_RT_SIGSUSPEND */
2594
f269fdd1
DH
2595__attribute__((weak)) const char *arch_vma_name(struct vm_area_struct *vma)
2596{
2597 return NULL;
2598}
2599
1da177e4
LT
2600void __init signals_init(void)
2601{
0a31bd5f 2602 sigqueue_cachep = KMEM_CACHE(sigqueue, SLAB_PANIC);
1da177e4 2603}