]> bbs.cooldavid.org Git - net-next-2.6.git/blame - kernel/exit.c
[PATCH] Kconfig: swap VIDEO_CX88_ALSA and VIDEO_CX88_DVB
[net-next-2.6.git] / kernel / exit.c
CommitLineData
1da177e4
LT
1/*
2 * linux/kernel/exit.c
3 *
4 * Copyright (C) 1991, 1992 Linus Torvalds
5 */
6
7#include <linux/config.h>
8#include <linux/mm.h>
9#include <linux/slab.h>
10#include <linux/interrupt.h>
11#include <linux/smp_lock.h>
12#include <linux/module.h>
c59ede7b 13#include <linux/capability.h>
1da177e4
LT
14#include <linux/completion.h>
15#include <linux/personality.h>
16#include <linux/tty.h>
17#include <linux/namespace.h>
18#include <linux/key.h>
19#include <linux/security.h>
20#include <linux/cpu.h>
21#include <linux/acct.h>
22#include <linux/file.h>
23#include <linux/binfmts.h>
24#include <linux/ptrace.h>
25#include <linux/profile.h>
26#include <linux/mount.h>
27#include <linux/proc_fs.h>
28#include <linux/mempolicy.h>
29#include <linux/cpuset.h>
30#include <linux/syscalls.h>
7ed20e1a 31#include <linux/signal.h>
9f46080c 32#include <linux/cn_proc.h>
de5097c2 33#include <linux/mutex.h>
1da177e4
LT
34
35#include <asm/uaccess.h>
36#include <asm/unistd.h>
37#include <asm/pgtable.h>
38#include <asm/mmu_context.h>
39
40extern void sem_exit (void);
41extern struct task_struct *child_reaper;
42
43int getrusage(struct task_struct *, int, struct rusage __user *);
44
408b664a
AB
45static void exit_mm(struct task_struct * tsk);
46
1da177e4
LT
47static void __unhash_process(struct task_struct *p)
48{
49 nr_threads--;
50 detach_pid(p, PIDTYPE_PID);
51 detach_pid(p, PIDTYPE_TGID);
52 if (thread_group_leader(p)) {
53 detach_pid(p, PIDTYPE_PGID);
54 detach_pid(p, PIDTYPE_SID);
55 if (p->pid)
56 __get_cpu_var(process_counts)--;
57 }
58
59 REMOVE_LINKS(p);
60}
61
62void release_task(struct task_struct * p)
63{
64 int zap_leader;
65 task_t *leader;
66 struct dentry *proc_dentry;
67
68repeat:
69 atomic_dec(&p->user->processes);
70 spin_lock(&p->proc_lock);
71 proc_dentry = proc_pid_unhash(p);
72 write_lock_irq(&tasklist_lock);
73 if (unlikely(p->ptrace))
74 __ptrace_unlink(p);
75 BUG_ON(!list_empty(&p->ptrace_list) || !list_empty(&p->ptrace_children));
76 __exit_signal(p);
71a2224d
CL
77 /*
78 * Note that the fastpath in sys_times depends on __exit_signal having
79 * updated the counters before a task is removed from the tasklist of
80 * the process by __unhash_process.
81 */
1da177e4
LT
82 __unhash_process(p);
83
84 /*
85 * If we are the last non-leader member of the thread
86 * group, and the leader is zombie, then notify the
87 * group leader's parent process. (if it wants notification.)
88 */
89 zap_leader = 0;
90 leader = p->group_leader;
91 if (leader != p && thread_group_empty(leader) && leader->exit_state == EXIT_ZOMBIE) {
92 BUG_ON(leader->exit_signal == -1);
93 do_notify_parent(leader, leader->exit_signal);
94 /*
95 * If we were the last child thread and the leader has
96 * exited already, and the leader's parent ignores SIGCHLD,
97 * then we are the one who should release the leader.
98 *
99 * do_notify_parent() will have marked it self-reaping in
100 * that case.
101 */
102 zap_leader = (leader->exit_signal == -1);
103 }
104
105 sched_exit(p);
106 write_unlock_irq(&tasklist_lock);
107 spin_unlock(&p->proc_lock);
108 proc_pid_flush(proc_dentry);
109 release_thread(p);
110 put_task_struct(p);
111
112 p = leader;
113 if (unlikely(zap_leader))
114 goto repeat;
115}
116
117/* we are using it only for SMP init */
118
119void unhash_process(struct task_struct *p)
120{
121 struct dentry *proc_dentry;
122
123 spin_lock(&p->proc_lock);
124 proc_dentry = proc_pid_unhash(p);
125 write_lock_irq(&tasklist_lock);
126 __unhash_process(p);
127 write_unlock_irq(&tasklist_lock);
128 spin_unlock(&p->proc_lock);
129 proc_pid_flush(proc_dentry);
130}
131
132/*
133 * This checks not only the pgrp, but falls back on the pid if no
134 * satisfactory pgrp is found. I dunno - gdb doesn't work correctly
135 * without this...
136 */
137int session_of_pgrp(int pgrp)
138{
139 struct task_struct *p;
140 int sid = -1;
141
142 read_lock(&tasklist_lock);
143 do_each_task_pid(pgrp, PIDTYPE_PGID, p) {
144 if (p->signal->session > 0) {
145 sid = p->signal->session;
146 goto out;
147 }
148 } while_each_task_pid(pgrp, PIDTYPE_PGID, p);
149 p = find_task_by_pid(pgrp);
150 if (p)
151 sid = p->signal->session;
152out:
153 read_unlock(&tasklist_lock);
154
155 return sid;
156}
157
158/*
159 * Determine if a process group is "orphaned", according to the POSIX
160 * definition in 2.2.2.52. Orphaned process groups are not to be affected
161 * by terminal-generated stop signals. Newly orphaned process groups are
162 * to receive a SIGHUP and a SIGCONT.
163 *
164 * "I ask you, have you ever known what it is to be an orphan?"
165 */
166static int will_become_orphaned_pgrp(int pgrp, task_t *ignored_task)
167{
168 struct task_struct *p;
169 int ret = 1;
170
171 do_each_task_pid(pgrp, PIDTYPE_PGID, p) {
172 if (p == ignored_task
173 || p->exit_state
174 || p->real_parent->pid == 1)
175 continue;
176 if (process_group(p->real_parent) != pgrp
177 && p->real_parent->signal->session == p->signal->session) {
178 ret = 0;
179 break;
180 }
181 } while_each_task_pid(pgrp, PIDTYPE_PGID, p);
182 return ret; /* (sighing) "Often!" */
183}
184
185int is_orphaned_pgrp(int pgrp)
186{
187 int retval;
188
189 read_lock(&tasklist_lock);
190 retval = will_become_orphaned_pgrp(pgrp, NULL);
191 read_unlock(&tasklist_lock);
192
193 return retval;
194}
195
858119e1 196static int has_stopped_jobs(int pgrp)
1da177e4
LT
197{
198 int retval = 0;
199 struct task_struct *p;
200
201 do_each_task_pid(pgrp, PIDTYPE_PGID, p) {
202 if (p->state != TASK_STOPPED)
203 continue;
204
205 /* If p is stopped by a debugger on a signal that won't
206 stop it, then don't count p as stopped. This isn't
207 perfect but it's a good approximation. */
208 if (unlikely (p->ptrace)
209 && p->exit_code != SIGSTOP
210 && p->exit_code != SIGTSTP
211 && p->exit_code != SIGTTOU
212 && p->exit_code != SIGTTIN)
213 continue;
214
215 retval = 1;
216 break;
217 } while_each_task_pid(pgrp, PIDTYPE_PGID, p);
218 return retval;
219}
220
221/**
4dc3b16b 222 * reparent_to_init - Reparent the calling kernel thread to the init task.
1da177e4
LT
223 *
224 * If a kernel thread is launched as a result of a system call, or if
225 * it ever exits, it should generally reparent itself to init so that
226 * it is correctly cleaned up on exit.
227 *
228 * The various task state such as scheduling policy and priority may have
229 * been inherited from a user process, so we reset them to sane values here.
230 *
231 * NOTE that reparent_to_init() gives the caller full capabilities.
232 */
858119e1 233static void reparent_to_init(void)
1da177e4
LT
234{
235 write_lock_irq(&tasklist_lock);
236
237 ptrace_unlink(current);
238 /* Reparent to init */
239 REMOVE_LINKS(current);
240 current->parent = child_reaper;
241 current->real_parent = child_reaper;
242 SET_LINKS(current);
243
244 /* Set the exit signal to SIGCHLD so we signal init on exit */
245 current->exit_signal = SIGCHLD;
246
b0a9499c
IM
247 if ((current->policy == SCHED_NORMAL ||
248 current->policy == SCHED_BATCH)
249 && (task_nice(current) < 0))
1da177e4
LT
250 set_user_nice(current, 0);
251 /* cpus_allowed? */
252 /* rt_priority? */
253 /* signals? */
254 security_task_reparent_to_init(current);
255 memcpy(current->signal->rlim, init_task.signal->rlim,
256 sizeof(current->signal->rlim));
257 atomic_inc(&(INIT_USER->__count));
258 write_unlock_irq(&tasklist_lock);
259 switch_uid(INIT_USER);
260}
261
262void __set_special_pids(pid_t session, pid_t pgrp)
263{
e19f247a 264 struct task_struct *curr = current->group_leader;
1da177e4
LT
265
266 if (curr->signal->session != session) {
267 detach_pid(curr, PIDTYPE_SID);
268 curr->signal->session = session;
269 attach_pid(curr, PIDTYPE_SID, session);
270 }
271 if (process_group(curr) != pgrp) {
272 detach_pid(curr, PIDTYPE_PGID);
273 curr->signal->pgrp = pgrp;
274 attach_pid(curr, PIDTYPE_PGID, pgrp);
275 }
276}
277
278void set_special_pids(pid_t session, pid_t pgrp)
279{
280 write_lock_irq(&tasklist_lock);
281 __set_special_pids(session, pgrp);
282 write_unlock_irq(&tasklist_lock);
283}
284
285/*
286 * Let kernel threads use this to say that they
287 * allow a certain signal (since daemonize() will
288 * have disabled all of them by default).
289 */
290int allow_signal(int sig)
291{
7ed20e1a 292 if (!valid_signal(sig) || sig < 1)
1da177e4
LT
293 return -EINVAL;
294
295 spin_lock_irq(&current->sighand->siglock);
296 sigdelset(&current->blocked, sig);
297 if (!current->mm) {
298 /* Kernel threads handle their own signals.
299 Let the signal code know it'll be handled, so
300 that they don't get converted to SIGKILL or
301 just silently dropped */
302 current->sighand->action[(sig)-1].sa.sa_handler = (void __user *)2;
303 }
304 recalc_sigpending();
305 spin_unlock_irq(&current->sighand->siglock);
306 return 0;
307}
308
309EXPORT_SYMBOL(allow_signal);
310
311int disallow_signal(int sig)
312{
7ed20e1a 313 if (!valid_signal(sig) || sig < 1)
1da177e4
LT
314 return -EINVAL;
315
316 spin_lock_irq(&current->sighand->siglock);
317 sigaddset(&current->blocked, sig);
318 recalc_sigpending();
319 spin_unlock_irq(&current->sighand->siglock);
320 return 0;
321}
322
323EXPORT_SYMBOL(disallow_signal);
324
325/*
326 * Put all the gunge required to become a kernel thread without
327 * attached user resources in one place where it belongs.
328 */
329
330void daemonize(const char *name, ...)
331{
332 va_list args;
333 struct fs_struct *fs;
334 sigset_t blocked;
335
336 va_start(args, name);
337 vsnprintf(current->comm, sizeof(current->comm), name, args);
338 va_end(args);
339
340 /*
341 * If we were started as result of loading a module, close all of the
342 * user space pages. We don't need them, and if we didn't close them
343 * they would be locked into memory.
344 */
345 exit_mm(current);
346
347 set_special_pids(1, 1);
348 down(&tty_sem);
349 current->signal->tty = NULL;
350 up(&tty_sem);
351
352 /* Block and flush all signals */
353 sigfillset(&blocked);
354 sigprocmask(SIG_BLOCK, &blocked, NULL);
355 flush_signals(current);
356
357 /* Become as one with the init task */
358
359 exit_fs(current); /* current->fs->count--; */
360 fs = init_task.fs;
361 current->fs = fs;
362 atomic_inc(&fs->count);
5914811a
BS
363 exit_namespace(current);
364 current->namespace = init_task.namespace;
365 get_namespace(current->namespace);
1da177e4
LT
366 exit_files(current);
367 current->files = init_task.files;
368 atomic_inc(&current->files->count);
369
370 reparent_to_init();
371}
372
373EXPORT_SYMBOL(daemonize);
374
858119e1 375static void close_files(struct files_struct * files)
1da177e4
LT
376{
377 int i, j;
badf1662 378 struct fdtable *fdt;
1da177e4
LT
379
380 j = 0;
4fb3a538
DS
381
382 /*
383 * It is safe to dereference the fd table without RCU or
384 * ->file_lock because this is the last reference to the
385 * files structure.
386 */
badf1662 387 fdt = files_fdtable(files);
1da177e4
LT
388 for (;;) {
389 unsigned long set;
390 i = j * __NFDBITS;
badf1662 391 if (i >= fdt->max_fdset || i >= fdt->max_fds)
1da177e4 392 break;
badf1662 393 set = fdt->open_fds->fds_bits[j++];
1da177e4
LT
394 while (set) {
395 if (set & 1) {
badf1662 396 struct file * file = xchg(&fdt->fd[i], NULL);
1da177e4
LT
397 if (file)
398 filp_close(file, files);
399 }
400 i++;
401 set >>= 1;
402 }
403 }
404}
405
406struct files_struct *get_files_struct(struct task_struct *task)
407{
408 struct files_struct *files;
409
410 task_lock(task);
411 files = task->files;
412 if (files)
413 atomic_inc(&files->count);
414 task_unlock(task);
415
416 return files;
417}
418
419void fastcall put_files_struct(struct files_struct *files)
420{
badf1662
DS
421 struct fdtable *fdt;
422
1da177e4
LT
423 if (atomic_dec_and_test(&files->count)) {
424 close_files(files);
425 /*
426 * Free the fd and fdset arrays if we expanded them.
ab2af1f5
DS
427 * If the fdtable was embedded, pass files for freeing
428 * at the end of the RCU grace period. Otherwise,
429 * you can free files immediately.
1da177e4 430 */
badf1662 431 fdt = files_fdtable(files);
ab2af1f5
DS
432 if (fdt == &files->fdtab)
433 fdt->free_files = files;
434 else
435 kmem_cache_free(files_cachep, files);
436 free_fdtable(fdt);
1da177e4
LT
437 }
438}
439
440EXPORT_SYMBOL(put_files_struct);
441
442static inline void __exit_files(struct task_struct *tsk)
443{
444 struct files_struct * files = tsk->files;
445
446 if (files) {
447 task_lock(tsk);
448 tsk->files = NULL;
449 task_unlock(tsk);
450 put_files_struct(files);
451 }
452}
453
454void exit_files(struct task_struct *tsk)
455{
456 __exit_files(tsk);
457}
458
459static inline void __put_fs_struct(struct fs_struct *fs)
460{
461 /* No need to hold fs->lock if we are killing it */
462 if (atomic_dec_and_test(&fs->count)) {
463 dput(fs->root);
464 mntput(fs->rootmnt);
465 dput(fs->pwd);
466 mntput(fs->pwdmnt);
467 if (fs->altroot) {
468 dput(fs->altroot);
469 mntput(fs->altrootmnt);
470 }
471 kmem_cache_free(fs_cachep, fs);
472 }
473}
474
475void put_fs_struct(struct fs_struct *fs)
476{
477 __put_fs_struct(fs);
478}
479
480static inline void __exit_fs(struct task_struct *tsk)
481{
482 struct fs_struct * fs = tsk->fs;
483
484 if (fs) {
485 task_lock(tsk);
486 tsk->fs = NULL;
487 task_unlock(tsk);
488 __put_fs_struct(fs);
489 }
490}
491
492void exit_fs(struct task_struct *tsk)
493{
494 __exit_fs(tsk);
495}
496
497EXPORT_SYMBOL_GPL(exit_fs);
498
499/*
500 * Turn us into a lazy TLB process if we
501 * aren't already..
502 */
408b664a 503static void exit_mm(struct task_struct * tsk)
1da177e4
LT
504{
505 struct mm_struct *mm = tsk->mm;
506
507 mm_release(tsk, mm);
508 if (!mm)
509 return;
510 /*
511 * Serialize with any possible pending coredump.
512 * We must hold mmap_sem around checking core_waiters
513 * and clearing tsk->mm. The core-inducing thread
514 * will increment core_waiters for each thread in the
515 * group with ->mm != NULL.
516 */
517 down_read(&mm->mmap_sem);
518 if (mm->core_waiters) {
519 up_read(&mm->mmap_sem);
520 down_write(&mm->mmap_sem);
521 if (!--mm->core_waiters)
522 complete(mm->core_startup_done);
523 up_write(&mm->mmap_sem);
524
525 wait_for_completion(&mm->core_done);
526 down_read(&mm->mmap_sem);
527 }
528 atomic_inc(&mm->mm_count);
529 if (mm != tsk->active_mm) BUG();
530 /* more a memory barrier than a real lock */
531 task_lock(tsk);
532 tsk->mm = NULL;
533 up_read(&mm->mmap_sem);
534 enter_lazy_tlb(mm, current);
535 task_unlock(tsk);
536 mmput(mm);
537}
538
539static inline void choose_new_parent(task_t *p, task_t *reaper, task_t *child_reaper)
540{
541 /*
542 * Make sure we're not reparenting to ourselves and that
543 * the parent is not a zombie.
544 */
545 BUG_ON(p == reaper || reaper->exit_state >= EXIT_ZOMBIE);
546 p->real_parent = reaper;
1da177e4
LT
547}
548
858119e1 549static void reparent_thread(task_t *p, task_t *father, int traced)
1da177e4
LT
550{
551 /* We don't want people slaying init. */
552 if (p->exit_signal != -1)
553 p->exit_signal = SIGCHLD;
554
555 if (p->pdeath_signal)
556 /* We already hold the tasklist_lock here. */
b67a1b9e 557 group_send_sig_info(p->pdeath_signal, SEND_SIG_NOINFO, p);
1da177e4
LT
558
559 /* Move the child from its dying parent to the new one. */
560 if (unlikely(traced)) {
561 /* Preserve ptrace links if someone else is tracing this child. */
562 list_del_init(&p->ptrace_list);
563 if (p->parent != p->real_parent)
564 list_add(&p->ptrace_list, &p->real_parent->ptrace_children);
565 } else {
566 /* If this child is being traced, then we're the one tracing it
567 * anyway, so let go of it.
568 */
569 p->ptrace = 0;
570 list_del_init(&p->sibling);
571 p->parent = p->real_parent;
572 list_add_tail(&p->sibling, &p->parent->children);
573
574 /* If we'd notified the old parent about this child's death,
575 * also notify the new parent.
576 */
577 if (p->exit_state == EXIT_ZOMBIE && p->exit_signal != -1 &&
578 thread_group_empty(p))
579 do_notify_parent(p, p->exit_signal);
580 else if (p->state == TASK_TRACED) {
581 /*
582 * If it was at a trace stop, turn it into
583 * a normal stop since it's no longer being
584 * traced.
585 */
586 ptrace_untrace(p);
587 }
588 }
589
590 /*
591 * process group orphan check
592 * Case ii: Our child is in a different pgrp
593 * than we are, and it was the only connection
594 * outside, so the child pgrp is now orphaned.
595 */
596 if ((process_group(p) != process_group(father)) &&
597 (p->signal->session == father->signal->session)) {
598 int pgrp = process_group(p);
599
600 if (will_become_orphaned_pgrp(pgrp, NULL) && has_stopped_jobs(pgrp)) {
b67a1b9e
ON
601 __kill_pg_info(SIGHUP, SEND_SIG_PRIV, pgrp);
602 __kill_pg_info(SIGCONT, SEND_SIG_PRIV, pgrp);
1da177e4
LT
603 }
604 }
605}
606
607/*
608 * When we die, we re-parent all our children.
609 * Try to give them to another thread in our thread
610 * group, and if no such member exists, give it to
611 * the global child reaper process (ie "init")
612 */
858119e1 613static void forget_original_parent(struct task_struct * father,
1da177e4
LT
614 struct list_head *to_release)
615{
616 struct task_struct *p, *reaper = father;
617 struct list_head *_p, *_n;
618
619 do {
620 reaper = next_thread(reaper);
621 if (reaper == father) {
622 reaper = child_reaper;
623 break;
624 }
625 } while (reaper->exit_state);
626
627 /*
628 * There are only two places where our children can be:
629 *
630 * - in our child list
631 * - in our ptraced child list
632 *
633 * Search them and reparent children.
634 */
635 list_for_each_safe(_p, _n, &father->children) {
636 int ptrace;
637 p = list_entry(_p,struct task_struct,sibling);
638
639 ptrace = p->ptrace;
640
641 /* if father isn't the real parent, then ptrace must be enabled */
642 BUG_ON(father != p->real_parent && !ptrace);
643
644 if (father == p->real_parent) {
645 /* reparent with a reaper, real father it's us */
646 choose_new_parent(p, reaper, child_reaper);
647 reparent_thread(p, father, 0);
648 } else {
649 /* reparent ptraced task to its real parent */
650 __ptrace_unlink (p);
651 if (p->exit_state == EXIT_ZOMBIE && p->exit_signal != -1 &&
652 thread_group_empty(p))
653 do_notify_parent(p, p->exit_signal);
654 }
655
656 /*
657 * if the ptraced child is a zombie with exit_signal == -1
658 * we must collect it before we exit, or it will remain
659 * zombie forever since we prevented it from self-reap itself
660 * while it was being traced by us, to be able to see it in wait4.
661 */
662 if (unlikely(ptrace && p->exit_state == EXIT_ZOMBIE && p->exit_signal == -1))
663 list_add(&p->ptrace_list, to_release);
664 }
665 list_for_each_safe(_p, _n, &father->ptrace_children) {
666 p = list_entry(_p,struct task_struct,ptrace_list);
667 choose_new_parent(p, reaper, child_reaper);
668 reparent_thread(p, father, 1);
669 }
670}
671
672/*
673 * Send signals to all our closest relatives so that they know
674 * to properly mourn us..
675 */
676static void exit_notify(struct task_struct *tsk)
677{
678 int state;
679 struct task_struct *t;
680 struct list_head ptrace_dead, *_p, *_n;
681
682 if (signal_pending(tsk) && !(tsk->signal->flags & SIGNAL_GROUP_EXIT)
683 && !thread_group_empty(tsk)) {
684 /*
685 * This occurs when there was a race between our exit
686 * syscall and a group signal choosing us as the one to
687 * wake up. It could be that we are the only thread
688 * alerted to check for pending signals, but another thread
689 * should be woken now to take the signal since we will not.
690 * Now we'll wake all the threads in the group just to make
691 * sure someone gets all the pending signals.
692 */
693 read_lock(&tasklist_lock);
694 spin_lock_irq(&tsk->sighand->siglock);
695 for (t = next_thread(tsk); t != tsk; t = next_thread(t))
696 if (!signal_pending(t) && !(t->flags & PF_EXITING)) {
697 recalc_sigpending_tsk(t);
698 if (signal_pending(t))
699 signal_wake_up(t, 0);
700 }
701 spin_unlock_irq(&tsk->sighand->siglock);
702 read_unlock(&tasklist_lock);
703 }
704
705 write_lock_irq(&tasklist_lock);
706
707 /*
708 * This does two things:
709 *
710 * A. Make init inherit all the child processes
711 * B. Check to see if any process groups have become orphaned
712 * as a result of our exiting, and if they have any stopped
713 * jobs, send them a SIGHUP and then a SIGCONT. (POSIX 3.2.2.2)
714 */
715
716 INIT_LIST_HEAD(&ptrace_dead);
717 forget_original_parent(tsk, &ptrace_dead);
718 BUG_ON(!list_empty(&tsk->children));
719 BUG_ON(!list_empty(&tsk->ptrace_children));
720
721 /*
722 * Check to see if any process groups have become orphaned
723 * as a result of our exiting, and if they have any stopped
724 * jobs, send them a SIGHUP and then a SIGCONT. (POSIX 3.2.2.2)
725 *
726 * Case i: Our father is in a different pgrp than we are
727 * and we were the only connection outside, so our pgrp
728 * is about to become orphaned.
729 */
730
731 t = tsk->real_parent;
732
733 if ((process_group(t) != process_group(tsk)) &&
734 (t->signal->session == tsk->signal->session) &&
735 will_become_orphaned_pgrp(process_group(tsk), tsk) &&
736 has_stopped_jobs(process_group(tsk))) {
b67a1b9e
ON
737 __kill_pg_info(SIGHUP, SEND_SIG_PRIV, process_group(tsk));
738 __kill_pg_info(SIGCONT, SEND_SIG_PRIV, process_group(tsk));
1da177e4
LT
739 }
740
741 /* Let father know we died
742 *
743 * Thread signals are configurable, but you aren't going to use
744 * that to send signals to arbitary processes.
745 * That stops right now.
746 *
747 * If the parent exec id doesn't match the exec id we saved
748 * when we started then we know the parent has changed security
749 * domain.
750 *
751 * If our self_exec id doesn't match our parent_exec_id then
752 * we have changed execution domain as these two values started
753 * the same after a fork.
754 *
755 */
756
757 if (tsk->exit_signal != SIGCHLD && tsk->exit_signal != -1 &&
758 ( tsk->parent_exec_id != t->self_exec_id ||
759 tsk->self_exec_id != tsk->parent_exec_id)
760 && !capable(CAP_KILL))
761 tsk->exit_signal = SIGCHLD;
762
763
764 /* If something other than our normal parent is ptracing us, then
765 * send it a SIGCHLD instead of honoring exit_signal. exit_signal
766 * only has special meaning to our real parent.
767 */
768 if (tsk->exit_signal != -1 && thread_group_empty(tsk)) {
769 int signal = tsk->parent == tsk->real_parent ? tsk->exit_signal : SIGCHLD;
770 do_notify_parent(tsk, signal);
771 } else if (tsk->ptrace) {
772 do_notify_parent(tsk, SIGCHLD);
773 }
774
775 state = EXIT_ZOMBIE;
776 if (tsk->exit_signal == -1 &&
777 (likely(tsk->ptrace == 0) ||
778 unlikely(tsk->parent->signal->flags & SIGNAL_GROUP_EXIT)))
779 state = EXIT_DEAD;
780 tsk->exit_state = state;
781
782 write_unlock_irq(&tasklist_lock);
783
784 list_for_each_safe(_p, _n, &ptrace_dead) {
785 list_del_init(_p);
786 t = list_entry(_p,struct task_struct,ptrace_list);
787 release_task(t);
788 }
789
790 /* If the process is dead, release it - nobody will wait for it */
791 if (state == EXIT_DEAD)
792 release_task(tsk);
1da177e4
LT
793}
794
795fastcall NORET_TYPE void do_exit(long code)
796{
797 struct task_struct *tsk = current;
798 int group_dead;
799
800 profile_task_exit(tsk);
801
22e2c507
JA
802 WARN_ON(atomic_read(&tsk->fs_excl));
803
1da177e4
LT
804 if (unlikely(in_interrupt()))
805 panic("Aiee, killing interrupt handler!");
806 if (unlikely(!tsk->pid))
807 panic("Attempted to kill the idle task!");
808 if (unlikely(tsk->pid == 1))
809 panic("Attempted to kill init!");
810 if (tsk->io_context)
811 exit_io_context();
812
813 if (unlikely(current->ptrace & PT_TRACE_EXIT)) {
814 current->ptrace_message = code;
815 ptrace_notify((PTRACE_EVENT_EXIT << 8) | SIGTRAP);
816 }
817
df164db5
AN
818 /*
819 * We're taking recursive faults here in do_exit. Safest is to just
820 * leave this task alone and wait for reboot.
821 */
822 if (unlikely(tsk->flags & PF_EXITING)) {
823 printk(KERN_ALERT
824 "Fixing recursive fault but reboot is needed!\n");
825 set_current_state(TASK_UNINTERRUPTIBLE);
826 schedule();
827 }
828
1da177e4
LT
829 tsk->flags |= PF_EXITING;
830
a362f463
LT
831 /*
832 * Make sure we don't try to process any timer firings
833 * while we are already exiting.
834 */
835 tsk->it_virt_expires = cputime_zero;
836 tsk->it_prof_expires = cputime_zero;
837 tsk->it_sched_expires = 0;
838
1da177e4
LT
839 if (unlikely(in_atomic()))
840 printk(KERN_INFO "note: %s[%d] exited with preempt_count %d\n",
841 current->comm, current->pid,
842 preempt_count());
843
844 acct_update_integrals(tsk);
365e9c87
HD
845 if (tsk->mm) {
846 update_hiwater_rss(tsk->mm);
847 update_hiwater_vm(tsk->mm);
848 }
1da177e4 849 group_dead = atomic_dec_and_test(&tsk->signal->live);
c3068951 850 if (group_dead) {
2ff678b8 851 hrtimer_cancel(&tsk->signal->real_timer);
25f407f0 852 exit_itimers(tsk->signal);
1da177e4 853 acct_process(code);
c3068951 854 }
1da177e4
LT
855 exit_mm(tsk);
856
857 exit_sem(tsk);
858 __exit_files(tsk);
859 __exit_fs(tsk);
860 exit_namespace(tsk);
861 exit_thread();
862 cpuset_exit(tsk);
863 exit_keys(tsk);
864
865 if (group_dead && tsk->signal->leader)
866 disassociate_ctty(1);
867
a1261f54 868 module_put(task_thread_info(tsk)->exec_domain->module);
1da177e4
LT
869 if (tsk->binfmt)
870 module_put(tsk->binfmt->module);
871
872 tsk->exit_code = code;
9f46080c 873 proc_exit_connector(tsk);
1da177e4
LT
874 exit_notify(tsk);
875#ifdef CONFIG_NUMA
876 mpol_free(tsk->mempolicy);
877 tsk->mempolicy = NULL;
878#endif
de5097c2
IM
879 /*
880 * If DEBUG_MUTEXES is on, make sure we are holding no locks:
881 */
882 mutex_debug_check_no_locks_held(tsk);
1da177e4 883
7407251a
CQH
884 /* PF_DEAD causes final put_task_struct after we schedule. */
885 preempt_disable();
886 BUG_ON(tsk->flags & PF_DEAD);
887 tsk->flags |= PF_DEAD;
888
1da177e4
LT
889 schedule();
890 BUG();
891 /* Avoid "noreturn function does return". */
892 for (;;) ;
893}
894
012914da
RA
895EXPORT_SYMBOL_GPL(do_exit);
896
1da177e4
LT
897NORET_TYPE void complete_and_exit(struct completion *comp, long code)
898{
899 if (comp)
900 complete(comp);
901
902 do_exit(code);
903}
904
905EXPORT_SYMBOL(complete_and_exit);
906
907asmlinkage long sys_exit(int error_code)
908{
909 do_exit((error_code&0xff)<<8);
910}
911
912task_t fastcall *next_thread(const task_t *p)
913{
914 return pid_task(p->pids[PIDTYPE_TGID].pid_list.next, PIDTYPE_TGID);
915}
916
917EXPORT_SYMBOL(next_thread);
918
919/*
920 * Take down every thread in the group. This is called by fatal signals
921 * as well as by sys_exit_group (below).
922 */
923NORET_TYPE void
924do_group_exit(int exit_code)
925{
926 BUG_ON(exit_code & 0x80); /* core dumps don't get here */
927
928 if (current->signal->flags & SIGNAL_GROUP_EXIT)
929 exit_code = current->signal->group_exit_code;
930 else if (!thread_group_empty(current)) {
931 struct signal_struct *const sig = current->signal;
932 struct sighand_struct *const sighand = current->sighand;
933 read_lock(&tasklist_lock);
934 spin_lock_irq(&sighand->siglock);
935 if (sig->flags & SIGNAL_GROUP_EXIT)
936 /* Another thread got here before we took the lock. */
937 exit_code = sig->group_exit_code;
938 else {
1da177e4
LT
939 sig->group_exit_code = exit_code;
940 zap_other_threads(current);
941 }
942 spin_unlock_irq(&sighand->siglock);
943 read_unlock(&tasklist_lock);
944 }
945
946 do_exit(exit_code);
947 /* NOTREACHED */
948}
949
950/*
951 * this kills every thread in the thread group. Note that any externally
952 * wait4()-ing process will get the correct exit code - even if this
953 * thread is not the thread group leader.
954 */
955asmlinkage void sys_exit_group(int error_code)
956{
957 do_group_exit((error_code & 0xff) << 8);
958}
959
960static int eligible_child(pid_t pid, int options, task_t *p)
961{
962 if (pid > 0) {
963 if (p->pid != pid)
964 return 0;
965 } else if (!pid) {
966 if (process_group(p) != process_group(current))
967 return 0;
968 } else if (pid != -1) {
969 if (process_group(p) != -pid)
970 return 0;
971 }
972
973 /*
974 * Do not consider detached threads that are
975 * not ptraced:
976 */
977 if (p->exit_signal == -1 && !p->ptrace)
978 return 0;
979
980 /* Wait for all children (clone and not) if __WALL is set;
981 * otherwise, wait for clone children *only* if __WCLONE is
982 * set; otherwise, wait for non-clone children *only*. (Note:
983 * A "clone" child here is one that reports to its parent
984 * using a signal other than SIGCHLD.) */
985 if (((p->exit_signal != SIGCHLD) ^ ((options & __WCLONE) != 0))
986 && !(options & __WALL))
987 return 0;
988 /*
989 * Do not consider thread group leaders that are
990 * in a non-empty thread group:
991 */
992 if (current->tgid != p->tgid && delay_group_leader(p))
993 return 2;
994
995 if (security_task_wait(p))
996 return 0;
997
998 return 1;
999}
1000
1001static int wait_noreap_copyout(task_t *p, pid_t pid, uid_t uid,
1002 int why, int status,
1003 struct siginfo __user *infop,
1004 struct rusage __user *rusagep)
1005{
1006 int retval = rusagep ? getrusage(p, RUSAGE_BOTH, rusagep) : 0;
1007 put_task_struct(p);
1008 if (!retval)
1009 retval = put_user(SIGCHLD, &infop->si_signo);
1010 if (!retval)
1011 retval = put_user(0, &infop->si_errno);
1012 if (!retval)
1013 retval = put_user((short)why, &infop->si_code);
1014 if (!retval)
1015 retval = put_user(pid, &infop->si_pid);
1016 if (!retval)
1017 retval = put_user(uid, &infop->si_uid);
1018 if (!retval)
1019 retval = put_user(status, &infop->si_status);
1020 if (!retval)
1021 retval = pid;
1022 return retval;
1023}
1024
1025/*
1026 * Handle sys_wait4 work for one task in state EXIT_ZOMBIE. We hold
1027 * read_lock(&tasklist_lock) on entry. If we return zero, we still hold
1028 * the lock and this task is uninteresting. If we return nonzero, we have
1029 * released the lock and the system call should return.
1030 */
1031static int wait_task_zombie(task_t *p, int noreap,
1032 struct siginfo __user *infop,
1033 int __user *stat_addr, struct rusage __user *ru)
1034{
1035 unsigned long state;
1036 int retval;
1037 int status;
1038
1039 if (unlikely(noreap)) {
1040 pid_t pid = p->pid;
1041 uid_t uid = p->uid;
1042 int exit_code = p->exit_code;
1043 int why, status;
1044
1045 if (unlikely(p->exit_state != EXIT_ZOMBIE))
1046 return 0;
1047 if (unlikely(p->exit_signal == -1 && p->ptrace == 0))
1048 return 0;
1049 get_task_struct(p);
1050 read_unlock(&tasklist_lock);
1051 if ((exit_code & 0x7f) == 0) {
1052 why = CLD_EXITED;
1053 status = exit_code >> 8;
1054 } else {
1055 why = (exit_code & 0x80) ? CLD_DUMPED : CLD_KILLED;
1056 status = exit_code & 0x7f;
1057 }
1058 return wait_noreap_copyout(p, pid, uid, why,
1059 status, infop, ru);
1060 }
1061
1062 /*
1063 * Try to move the task's state to DEAD
1064 * only one thread is allowed to do this:
1065 */
1066 state = xchg(&p->exit_state, EXIT_DEAD);
1067 if (state != EXIT_ZOMBIE) {
1068 BUG_ON(state != EXIT_DEAD);
1069 return 0;
1070 }
1071 if (unlikely(p->exit_signal == -1 && p->ptrace == 0)) {
1072 /*
1073 * This can only happen in a race with a ptraced thread
1074 * dying on another processor.
1075 */
1076 return 0;
1077 }
1078
1079 if (likely(p->real_parent == p->parent) && likely(p->signal)) {
3795e161
JJ
1080 struct signal_struct *psig;
1081 struct signal_struct *sig;
1082
1da177e4
LT
1083 /*
1084 * The resource counters for the group leader are in its
1085 * own task_struct. Those for dead threads in the group
1086 * are in its signal_struct, as are those for the child
1087 * processes it has previously reaped. All these
1088 * accumulate in the parent's signal_struct c* fields.
1089 *
1090 * We don't bother to take a lock here to protect these
1091 * p->signal fields, because they are only touched by
1092 * __exit_signal, which runs with tasklist_lock
1093 * write-locked anyway, and so is excluded here. We do
1094 * need to protect the access to p->parent->signal fields,
1095 * as other threads in the parent group can be right
1096 * here reaping other children at the same time.
1097 */
1098 spin_lock_irq(&p->parent->sighand->siglock);
3795e161
JJ
1099 psig = p->parent->signal;
1100 sig = p->signal;
1101 psig->cutime =
1102 cputime_add(psig->cutime,
1da177e4 1103 cputime_add(p->utime,
3795e161
JJ
1104 cputime_add(sig->utime,
1105 sig->cutime)));
1106 psig->cstime =
1107 cputime_add(psig->cstime,
1da177e4 1108 cputime_add(p->stime,
3795e161
JJ
1109 cputime_add(sig->stime,
1110 sig->cstime)));
1111 psig->cmin_flt +=
1112 p->min_flt + sig->min_flt + sig->cmin_flt;
1113 psig->cmaj_flt +=
1114 p->maj_flt + sig->maj_flt + sig->cmaj_flt;
1115 psig->cnvcsw +=
1116 p->nvcsw + sig->nvcsw + sig->cnvcsw;
1117 psig->cnivcsw +=
1118 p->nivcsw + sig->nivcsw + sig->cnivcsw;
1da177e4
LT
1119 spin_unlock_irq(&p->parent->sighand->siglock);
1120 }
1121
1122 /*
1123 * Now we are sure this task is interesting, and no other
1124 * thread can reap it because we set its state to EXIT_DEAD.
1125 */
1126 read_unlock(&tasklist_lock);
1127
1128 retval = ru ? getrusage(p, RUSAGE_BOTH, ru) : 0;
1129 status = (p->signal->flags & SIGNAL_GROUP_EXIT)
1130 ? p->signal->group_exit_code : p->exit_code;
1131 if (!retval && stat_addr)
1132 retval = put_user(status, stat_addr);
1133 if (!retval && infop)
1134 retval = put_user(SIGCHLD, &infop->si_signo);
1135 if (!retval && infop)
1136 retval = put_user(0, &infop->si_errno);
1137 if (!retval && infop) {
1138 int why;
1139
1140 if ((status & 0x7f) == 0) {
1141 why = CLD_EXITED;
1142 status >>= 8;
1143 } else {
1144 why = (status & 0x80) ? CLD_DUMPED : CLD_KILLED;
1145 status &= 0x7f;
1146 }
1147 retval = put_user((short)why, &infop->si_code);
1148 if (!retval)
1149 retval = put_user(status, &infop->si_status);
1150 }
1151 if (!retval && infop)
1152 retval = put_user(p->pid, &infop->si_pid);
1153 if (!retval && infop)
1154 retval = put_user(p->uid, &infop->si_uid);
1155 if (retval) {
1156 // TODO: is this safe?
1157 p->exit_state = EXIT_ZOMBIE;
1158 return retval;
1159 }
1160 retval = p->pid;
1161 if (p->real_parent != p->parent) {
1162 write_lock_irq(&tasklist_lock);
1163 /* Double-check with lock held. */
1164 if (p->real_parent != p->parent) {
1165 __ptrace_unlink(p);
1166 // TODO: is this safe?
1167 p->exit_state = EXIT_ZOMBIE;
1168 /*
1169 * If this is not a detached task, notify the parent.
1170 * If it's still not detached after that, don't release
1171 * it now.
1172 */
1173 if (p->exit_signal != -1) {
1174 do_notify_parent(p, p->exit_signal);
1175 if (p->exit_signal != -1)
1176 p = NULL;
1177 }
1178 }
1179 write_unlock_irq(&tasklist_lock);
1180 }
1181 if (p != NULL)
1182 release_task(p);
1183 BUG_ON(!retval);
1184 return retval;
1185}
1186
1187/*
1188 * Handle sys_wait4 work for one task in state TASK_STOPPED. We hold
1189 * read_lock(&tasklist_lock) on entry. If we return zero, we still hold
1190 * the lock and this task is uninteresting. If we return nonzero, we have
1191 * released the lock and the system call should return.
1192 */
1193static int wait_task_stopped(task_t *p, int delayed_group_leader, int noreap,
1194 struct siginfo __user *infop,
1195 int __user *stat_addr, struct rusage __user *ru)
1196{
1197 int retval, exit_code;
1198
1199 if (!p->exit_code)
1200 return 0;
1201 if (delayed_group_leader && !(p->ptrace & PT_PTRACED) &&
1202 p->signal && p->signal->group_stop_count > 0)
1203 /*
1204 * A group stop is in progress and this is the group leader.
1205 * We won't report until all threads have stopped.
1206 */
1207 return 0;
1208
1209 /*
1210 * Now we are pretty sure this task is interesting.
1211 * Make sure it doesn't get reaped out from under us while we
1212 * give up the lock and then examine it below. We don't want to
1213 * keep holding onto the tasklist_lock while we call getrusage and
1214 * possibly take page faults for user memory.
1215 */
1216 get_task_struct(p);
1217 read_unlock(&tasklist_lock);
1218
1219 if (unlikely(noreap)) {
1220 pid_t pid = p->pid;
1221 uid_t uid = p->uid;
1222 int why = (p->ptrace & PT_PTRACED) ? CLD_TRAPPED : CLD_STOPPED;
1223
1224 exit_code = p->exit_code;
1225 if (unlikely(!exit_code) ||
14bf01bb 1226 unlikely(p->state & TASK_TRACED))
1da177e4
LT
1227 goto bail_ref;
1228 return wait_noreap_copyout(p, pid, uid,
1229 why, (exit_code << 8) | 0x7f,
1230 infop, ru);
1231 }
1232
1233 write_lock_irq(&tasklist_lock);
1234
1235 /*
1236 * This uses xchg to be atomic with the thread resuming and setting
1237 * it. It must also be done with the write lock held to prevent a
1238 * race with the EXIT_ZOMBIE case.
1239 */
1240 exit_code = xchg(&p->exit_code, 0);
1241 if (unlikely(p->exit_state)) {
1242 /*
1243 * The task resumed and then died. Let the next iteration
1244 * catch it in EXIT_ZOMBIE. Note that exit_code might
1245 * already be zero here if it resumed and did _exit(0).
1246 * The task itself is dead and won't touch exit_code again;
1247 * other processors in this function are locked out.
1248 */
1249 p->exit_code = exit_code;
1250 exit_code = 0;
1251 }
1252 if (unlikely(exit_code == 0)) {
1253 /*
1254 * Another thread in this function got to it first, or it
1255 * resumed, or it resumed and then died.
1256 */
1257 write_unlock_irq(&tasklist_lock);
1258bail_ref:
1259 put_task_struct(p);
1260 /*
1261 * We are returning to the wait loop without having successfully
1262 * removed the process and having released the lock. We cannot
1263 * continue, since the "p" task pointer is potentially stale.
1264 *
1265 * Return -EAGAIN, and do_wait() will restart the loop from the
1266 * beginning. Do _not_ re-acquire the lock.
1267 */
1268 return -EAGAIN;
1269 }
1270
1271 /* move to end of parent's list to avoid starvation */
1272 remove_parent(p);
1273 add_parent(p, p->parent);
1274
1275 write_unlock_irq(&tasklist_lock);
1276
1277 retval = ru ? getrusage(p, RUSAGE_BOTH, ru) : 0;
1278 if (!retval && stat_addr)
1279 retval = put_user((exit_code << 8) | 0x7f, stat_addr);
1280 if (!retval && infop)
1281 retval = put_user(SIGCHLD, &infop->si_signo);
1282 if (!retval && infop)
1283 retval = put_user(0, &infop->si_errno);
1284 if (!retval && infop)
1285 retval = put_user((short)((p->ptrace & PT_PTRACED)
1286 ? CLD_TRAPPED : CLD_STOPPED),
1287 &infop->si_code);
1288 if (!retval && infop)
1289 retval = put_user(exit_code, &infop->si_status);
1290 if (!retval && infop)
1291 retval = put_user(p->pid, &infop->si_pid);
1292 if (!retval && infop)
1293 retval = put_user(p->uid, &infop->si_uid);
1294 if (!retval)
1295 retval = p->pid;
1296 put_task_struct(p);
1297
1298 BUG_ON(!retval);
1299 return retval;
1300}
1301
1302/*
1303 * Handle do_wait work for one task in a live, non-stopped state.
1304 * read_lock(&tasklist_lock) on entry. If we return zero, we still hold
1305 * the lock and this task is uninteresting. If we return nonzero, we have
1306 * released the lock and the system call should return.
1307 */
1308static int wait_task_continued(task_t *p, int noreap,
1309 struct siginfo __user *infop,
1310 int __user *stat_addr, struct rusage __user *ru)
1311{
1312 int retval;
1313 pid_t pid;
1314 uid_t uid;
1315
1316 if (unlikely(!p->signal))
1317 return 0;
1318
1319 if (!(p->signal->flags & SIGNAL_STOP_CONTINUED))
1320 return 0;
1321
1322 spin_lock_irq(&p->sighand->siglock);
1323 /* Re-check with the lock held. */
1324 if (!(p->signal->flags & SIGNAL_STOP_CONTINUED)) {
1325 spin_unlock_irq(&p->sighand->siglock);
1326 return 0;
1327 }
1328 if (!noreap)
1329 p->signal->flags &= ~SIGNAL_STOP_CONTINUED;
1330 spin_unlock_irq(&p->sighand->siglock);
1331
1332 pid = p->pid;
1333 uid = p->uid;
1334 get_task_struct(p);
1335 read_unlock(&tasklist_lock);
1336
1337 if (!infop) {
1338 retval = ru ? getrusage(p, RUSAGE_BOTH, ru) : 0;
1339 put_task_struct(p);
1340 if (!retval && stat_addr)
1341 retval = put_user(0xffff, stat_addr);
1342 if (!retval)
1343 retval = p->pid;
1344 } else {
1345 retval = wait_noreap_copyout(p, pid, uid,
1346 CLD_CONTINUED, SIGCONT,
1347 infop, ru);
1348 BUG_ON(retval == 0);
1349 }
1350
1351 return retval;
1352}
1353
1354
1355static inline int my_ptrace_child(struct task_struct *p)
1356{
1357 if (!(p->ptrace & PT_PTRACED))
1358 return 0;
1359 if (!(p->ptrace & PT_ATTACHED))
1360 return 1;
1361 /*
1362 * This child was PTRACE_ATTACH'd. We should be seeing it only if
1363 * we are the attacher. If we are the real parent, this is a race
1364 * inside ptrace_attach. It is waiting for the tasklist_lock,
1365 * which we have to switch the parent links, but has already set
1366 * the flags in p->ptrace.
1367 */
1368 return (p->parent != p->real_parent);
1369}
1370
1371static long do_wait(pid_t pid, int options, struct siginfo __user *infop,
1372 int __user *stat_addr, struct rusage __user *ru)
1373{
1374 DECLARE_WAITQUEUE(wait, current);
1375 struct task_struct *tsk;
1376 int flag, retval;
1377
1378 add_wait_queue(&current->signal->wait_chldexit,&wait);
1379repeat:
1380 /*
1381 * We will set this flag if we see any child that might later
1382 * match our criteria, even if we are not able to reap it yet.
1383 */
1384 flag = 0;
1385 current->state = TASK_INTERRUPTIBLE;
1386 read_lock(&tasklist_lock);
1387 tsk = current;
1388 do {
1389 struct task_struct *p;
1390 struct list_head *_p;
1391 int ret;
1392
1393 list_for_each(_p,&tsk->children) {
1394 p = list_entry(_p,struct task_struct,sibling);
1395
1396 ret = eligible_child(pid, options, p);
1397 if (!ret)
1398 continue;
1399
1400 switch (p->state) {
1401 case TASK_TRACED:
7f2a5255
RM
1402 /*
1403 * When we hit the race with PTRACE_ATTACH,
1404 * we will not report this child. But the
1405 * race means it has not yet been moved to
1406 * our ptrace_children list, so we need to
1407 * set the flag here to avoid a spurious ECHILD
1408 * when the race happens with the only child.
1409 */
1410 flag = 1;
1da177e4
LT
1411 if (!my_ptrace_child(p))
1412 continue;
1413 /*FALLTHROUGH*/
1414 case TASK_STOPPED:
1415 /*
1416 * It's stopped now, so it might later
1417 * continue, exit, or stop again.
1418 */
1419 flag = 1;
1420 if (!(options & WUNTRACED) &&
1421 !my_ptrace_child(p))
1422 continue;
1423 retval = wait_task_stopped(p, ret == 2,
1424 (options & WNOWAIT),
1425 infop,
1426 stat_addr, ru);
1427 if (retval == -EAGAIN)
1428 goto repeat;
1429 if (retval != 0) /* He released the lock. */
1430 goto end;
1431 break;
1432 default:
1433 // case EXIT_DEAD:
1434 if (p->exit_state == EXIT_DEAD)
1435 continue;
1436 // case EXIT_ZOMBIE:
1437 if (p->exit_state == EXIT_ZOMBIE) {
1438 /*
1439 * Eligible but we cannot release
1440 * it yet:
1441 */
1442 if (ret == 2)
1443 goto check_continued;
1444 if (!likely(options & WEXITED))
1445 continue;
1446 retval = wait_task_zombie(
1447 p, (options & WNOWAIT),
1448 infop, stat_addr, ru);
1449 /* He released the lock. */
1450 if (retval != 0)
1451 goto end;
1452 break;
1453 }
1454check_continued:
1455 /*
1456 * It's running now, so it might later
1457 * exit, stop, or stop and then continue.
1458 */
1459 flag = 1;
1460 if (!unlikely(options & WCONTINUED))
1461 continue;
1462 retval = wait_task_continued(
1463 p, (options & WNOWAIT),
1464 infop, stat_addr, ru);
1465 if (retval != 0) /* He released the lock. */
1466 goto end;
1467 break;
1468 }
1469 }
1470 if (!flag) {
1471 list_for_each(_p, &tsk->ptrace_children) {
1472 p = list_entry(_p, struct task_struct,
1473 ptrace_list);
1474 if (!eligible_child(pid, options, p))
1475 continue;
1476 flag = 1;
1477 break;
1478 }
1479 }
1480 if (options & __WNOTHREAD)
1481 break;
1482 tsk = next_thread(tsk);
1483 if (tsk->signal != current->signal)
1484 BUG();
1485 } while (tsk != current);
1486
1487 read_unlock(&tasklist_lock);
1488 if (flag) {
1489 retval = 0;
1490 if (options & WNOHANG)
1491 goto end;
1492 retval = -ERESTARTSYS;
1493 if (signal_pending(current))
1494 goto end;
1495 schedule();
1496 goto repeat;
1497 }
1498 retval = -ECHILD;
1499end:
1500 current->state = TASK_RUNNING;
1501 remove_wait_queue(&current->signal->wait_chldexit,&wait);
1502 if (infop) {
1503 if (retval > 0)
1504 retval = 0;
1505 else {
1506 /*
1507 * For a WNOHANG return, clear out all the fields
1508 * we would set so the user can easily tell the
1509 * difference.
1510 */
1511 if (!retval)
1512 retval = put_user(0, &infop->si_signo);
1513 if (!retval)
1514 retval = put_user(0, &infop->si_errno);
1515 if (!retval)
1516 retval = put_user(0, &infop->si_code);
1517 if (!retval)
1518 retval = put_user(0, &infop->si_pid);
1519 if (!retval)
1520 retval = put_user(0, &infop->si_uid);
1521 if (!retval)
1522 retval = put_user(0, &infop->si_status);
1523 }
1524 }
1525 return retval;
1526}
1527
1528asmlinkage long sys_waitid(int which, pid_t pid,
1529 struct siginfo __user *infop, int options,
1530 struct rusage __user *ru)
1531{
1532 long ret;
1533
1534 if (options & ~(WNOHANG|WNOWAIT|WEXITED|WSTOPPED|WCONTINUED))
1535 return -EINVAL;
1536 if (!(options & (WEXITED|WSTOPPED|WCONTINUED)))
1537 return -EINVAL;
1538
1539 switch (which) {
1540 case P_ALL:
1541 pid = -1;
1542 break;
1543 case P_PID:
1544 if (pid <= 0)
1545 return -EINVAL;
1546 break;
1547 case P_PGID:
1548 if (pid <= 0)
1549 return -EINVAL;
1550 pid = -pid;
1551 break;
1552 default:
1553 return -EINVAL;
1554 }
1555
1556 ret = do_wait(pid, options, infop, NULL, ru);
1557
1558 /* avoid REGPARM breakage on x86: */
1559 prevent_tail_call(ret);
1560 return ret;
1561}
1562
1563asmlinkage long sys_wait4(pid_t pid, int __user *stat_addr,
1564 int options, struct rusage __user *ru)
1565{
1566 long ret;
1567
1568 if (options & ~(WNOHANG|WUNTRACED|WCONTINUED|
1569 __WNOTHREAD|__WCLONE|__WALL))
1570 return -EINVAL;
1571 ret = do_wait(pid, options | WEXITED, NULL, stat_addr, ru);
1572
1573 /* avoid REGPARM breakage on x86: */
1574 prevent_tail_call(ret);
1575 return ret;
1576}
1577
1578#ifdef __ARCH_WANT_SYS_WAITPID
1579
1580/*
1581 * sys_waitpid() remains for compatibility. waitpid() should be
1582 * implemented by calling sys_wait4() from libc.a.
1583 */
1584asmlinkage long sys_waitpid(pid_t pid, int __user *stat_addr, int options)
1585{
1586 return sys_wait4(pid, stat_addr, options, NULL);
1587}
1588
1589#endif