]>
Commit | Line | Data |
---|---|---|
1 | /* | |
2 | * linux/kernel/exit.c | |
3 | * | |
4 | * Copyright (C) 1991, 1992 Linus Torvalds | |
5 | */ | |
6 | ||
7 | #include <linux/mm.h> | |
8 | #include <linux/slab.h> | |
9 | #include <linux/interrupt.h> | |
10 | #include <linux/module.h> | |
11 | #include <linux/capability.h> | |
12 | #include <linux/completion.h> | |
13 | #include <linux/personality.h> | |
14 | #include <linux/tty.h> | |
15 | #include <linux/mnt_namespace.h> | |
16 | #include <linux/key.h> | |
17 | #include <linux/security.h> | |
18 | #include <linux/cpu.h> | |
19 | #include <linux/acct.h> | |
20 | #include <linux/tsacct_kern.h> | |
21 | #include <linux/file.h> | |
22 | #include <linux/binfmts.h> | |
23 | #include <linux/nsproxy.h> | |
24 | #include <linux/pid_namespace.h> | |
25 | #include <linux/ptrace.h> | |
26 | #include <linux/profile.h> | |
27 | #include <linux/signalfd.h> | |
28 | #include <linux/mount.h> | |
29 | #include <linux/proc_fs.h> | |
30 | #include <linux/kthread.h> | |
31 | #include <linux/mempolicy.h> | |
32 | #include <linux/taskstats_kern.h> | |
33 | #include <linux/delayacct.h> | |
34 | #include <linux/cpuset.h> | |
35 | #include <linux/syscalls.h> | |
36 | #include <linux/signal.h> | |
37 | #include <linux/posix-timers.h> | |
38 | #include <linux/cn_proc.h> | |
39 | #include <linux/mutex.h> | |
40 | #include <linux/futex.h> | |
41 | #include <linux/compat.h> | |
42 | #include <linux/pipe_fs_i.h> | |
43 | #include <linux/audit.h> /* for audit_free() */ | |
44 | #include <linux/resource.h> | |
45 | #include <linux/blkdev.h> | |
46 | #include <linux/task_io_accounting_ops.h> | |
47 | ||
48 | #include <asm/uaccess.h> | |
49 | #include <asm/unistd.h> | |
50 | #include <asm/pgtable.h> | |
51 | #include <asm/mmu_context.h> | |
52 | ||
53 | extern void sem_exit (void); | |
54 | ||
55 | static void exit_mm(struct task_struct * tsk); | |
56 | ||
57 | static void __unhash_process(struct task_struct *p) | |
58 | { | |
59 | nr_threads--; | |
60 | detach_pid(p, PIDTYPE_PID); | |
61 | if (thread_group_leader(p)) { | |
62 | detach_pid(p, PIDTYPE_PGID); | |
63 | detach_pid(p, PIDTYPE_SID); | |
64 | ||
65 | list_del_rcu(&p->tasks); | |
66 | __get_cpu_var(process_counts)--; | |
67 | } | |
68 | list_del_rcu(&p->thread_group); | |
69 | remove_parent(p); | |
70 | } | |
71 | ||
72 | /* | |
73 | * This function expects the tasklist_lock write-locked. | |
74 | */ | |
75 | static void __exit_signal(struct task_struct *tsk) | |
76 | { | |
77 | struct signal_struct *sig = tsk->signal; | |
78 | struct sighand_struct *sighand; | |
79 | ||
80 | BUG_ON(!sig); | |
81 | BUG_ON(!atomic_read(&sig->count)); | |
82 | ||
83 | rcu_read_lock(); | |
84 | sighand = rcu_dereference(tsk->sighand); | |
85 | spin_lock(&sighand->siglock); | |
86 | ||
87 | /* | |
88 | * Notify that this sighand has been detached. This must | |
89 | * be called with the tsk->sighand lock held. Also, this | |
90 | * access tsk->sighand internally, so it must be called | |
91 | * before tsk->sighand is reset. | |
92 | */ | |
93 | signalfd_detach_locked(tsk); | |
94 | ||
95 | posix_cpu_timers_exit(tsk); | |
96 | if (atomic_dec_and_test(&sig->count)) | |
97 | posix_cpu_timers_exit_group(tsk); | |
98 | else { | |
99 | /* | |
100 | * If there is any task waiting for the group exit | |
101 | * then notify it: | |
102 | */ | |
103 | if (sig->group_exit_task && atomic_read(&sig->count) == sig->notify_count) { | |
104 | wake_up_process(sig->group_exit_task); | |
105 | sig->group_exit_task = NULL; | |
106 | } | |
107 | if (tsk == sig->curr_target) | |
108 | sig->curr_target = next_thread(tsk); | |
109 | /* | |
110 | * Accumulate here the counters for all threads but the | |
111 | * group leader as they die, so they can be added into | |
112 | * the process-wide totals when those are taken. | |
113 | * The group leader stays around as a zombie as long | |
114 | * as there are other threads. When it gets reaped, | |
115 | * the exit.c code will add its counts into these totals. | |
116 | * We won't ever get here for the group leader, since it | |
117 | * will have been the last reference on the signal_struct. | |
118 | */ | |
119 | sig->utime = cputime_add(sig->utime, tsk->utime); | |
120 | sig->stime = cputime_add(sig->stime, tsk->stime); | |
121 | sig->min_flt += tsk->min_flt; | |
122 | sig->maj_flt += tsk->maj_flt; | |
123 | sig->nvcsw += tsk->nvcsw; | |
124 | sig->nivcsw += tsk->nivcsw; | |
125 | sig->sched_time += tsk->sched_time; | |
126 | sig->inblock += task_io_get_inblock(tsk); | |
127 | sig->oublock += task_io_get_oublock(tsk); | |
128 | sig = NULL; /* Marker for below. */ | |
129 | } | |
130 | ||
131 | __unhash_process(tsk); | |
132 | ||
133 | tsk->signal = NULL; | |
134 | tsk->sighand = NULL; | |
135 | spin_unlock(&sighand->siglock); | |
136 | rcu_read_unlock(); | |
137 | ||
138 | __cleanup_sighand(sighand); | |
139 | clear_tsk_thread_flag(tsk,TIF_SIGPENDING); | |
140 | flush_sigqueue(&tsk->pending); | |
141 | if (sig) { | |
142 | flush_sigqueue(&sig->shared_pending); | |
143 | taskstats_tgid_free(sig); | |
144 | __cleanup_signal(sig); | |
145 | } | |
146 | } | |
147 | ||
148 | static void delayed_put_task_struct(struct rcu_head *rhp) | |
149 | { | |
150 | put_task_struct(container_of(rhp, struct task_struct, rcu)); | |
151 | } | |
152 | ||
153 | void release_task(struct task_struct * p) | |
154 | { | |
155 | struct task_struct *leader; | |
156 | int zap_leader; | |
157 | repeat: | |
158 | atomic_dec(&p->user->processes); | |
159 | write_lock_irq(&tasklist_lock); | |
160 | ptrace_unlink(p); | |
161 | BUG_ON(!list_empty(&p->ptrace_list) || !list_empty(&p->ptrace_children)); | |
162 | __exit_signal(p); | |
163 | ||
164 | /* | |
165 | * If we are the last non-leader member of the thread | |
166 | * group, and the leader is zombie, then notify the | |
167 | * group leader's parent process. (if it wants notification.) | |
168 | */ | |
169 | zap_leader = 0; | |
170 | leader = p->group_leader; | |
171 | if (leader != p && thread_group_empty(leader) && leader->exit_state == EXIT_ZOMBIE) { | |
172 | BUG_ON(leader->exit_signal == -1); | |
173 | do_notify_parent(leader, leader->exit_signal); | |
174 | /* | |
175 | * If we were the last child thread and the leader has | |
176 | * exited already, and the leader's parent ignores SIGCHLD, | |
177 | * then we are the one who should release the leader. | |
178 | * | |
179 | * do_notify_parent() will have marked it self-reaping in | |
180 | * that case. | |
181 | */ | |
182 | zap_leader = (leader->exit_signal == -1); | |
183 | } | |
184 | ||
185 | write_unlock_irq(&tasklist_lock); | |
186 | proc_flush_task(p); | |
187 | release_thread(p); | |
188 | call_rcu(&p->rcu, delayed_put_task_struct); | |
189 | ||
190 | p = leader; | |
191 | if (unlikely(zap_leader)) | |
192 | goto repeat; | |
193 | } | |
194 | ||
195 | /* | |
196 | * This checks not only the pgrp, but falls back on the pid if no | |
197 | * satisfactory pgrp is found. I dunno - gdb doesn't work correctly | |
198 | * without this... | |
199 | * | |
200 | * The caller must hold rcu lock or the tasklist lock. | |
201 | */ | |
202 | struct pid *session_of_pgrp(struct pid *pgrp) | |
203 | { | |
204 | struct task_struct *p; | |
205 | struct pid *sid = NULL; | |
206 | ||
207 | p = pid_task(pgrp, PIDTYPE_PGID); | |
208 | if (p == NULL) | |
209 | p = pid_task(pgrp, PIDTYPE_PID); | |
210 | if (p != NULL) | |
211 | sid = task_session(p); | |
212 | ||
213 | return sid; | |
214 | } | |
215 | ||
216 | /* | |
217 | * Determine if a process group is "orphaned", according to the POSIX | |
218 | * definition in 2.2.2.52. Orphaned process groups are not to be affected | |
219 | * by terminal-generated stop signals. Newly orphaned process groups are | |
220 | * to receive a SIGHUP and a SIGCONT. | |
221 | * | |
222 | * "I ask you, have you ever known what it is to be an orphan?" | |
223 | */ | |
224 | static int will_become_orphaned_pgrp(struct pid *pgrp, struct task_struct *ignored_task) | |
225 | { | |
226 | struct task_struct *p; | |
227 | int ret = 1; | |
228 | ||
229 | do_each_pid_task(pgrp, PIDTYPE_PGID, p) { | |
230 | if (p == ignored_task | |
231 | || p->exit_state | |
232 | || is_init(p->real_parent)) | |
233 | continue; | |
234 | if (task_pgrp(p->real_parent) != pgrp && | |
235 | task_session(p->real_parent) == task_session(p)) { | |
236 | ret = 0; | |
237 | break; | |
238 | } | |
239 | } while_each_pid_task(pgrp, PIDTYPE_PGID, p); | |
240 | return ret; /* (sighing) "Often!" */ | |
241 | } | |
242 | ||
243 | int is_current_pgrp_orphaned(void) | |
244 | { | |
245 | int retval; | |
246 | ||
247 | read_lock(&tasklist_lock); | |
248 | retval = will_become_orphaned_pgrp(task_pgrp(current), NULL); | |
249 | read_unlock(&tasklist_lock); | |
250 | ||
251 | return retval; | |
252 | } | |
253 | ||
254 | static int has_stopped_jobs(struct pid *pgrp) | |
255 | { | |
256 | int retval = 0; | |
257 | struct task_struct *p; | |
258 | ||
259 | do_each_pid_task(pgrp, PIDTYPE_PGID, p) { | |
260 | if (p->state != TASK_STOPPED) | |
261 | continue; | |
262 | retval = 1; | |
263 | break; | |
264 | } while_each_pid_task(pgrp, PIDTYPE_PGID, p); | |
265 | return retval; | |
266 | } | |
267 | ||
268 | /** | |
269 | * reparent_to_kthreadd - Reparent the calling kernel thread to kthreadd | |
270 | * | |
271 | * If a kernel thread is launched as a result of a system call, or if | |
272 | * it ever exits, it should generally reparent itself to kthreadd so it | |
273 | * isn't in the way of other processes and is correctly cleaned up on exit. | |
274 | * | |
275 | * The various task state such as scheduling policy and priority may have | |
276 | * been inherited from a user process, so we reset them to sane values here. | |
277 | * | |
278 | * NOTE that reparent_to_kthreadd() gives the caller full capabilities. | |
279 | */ | |
280 | static void reparent_to_kthreadd(void) | |
281 | { | |
282 | write_lock_irq(&tasklist_lock); | |
283 | ||
284 | ptrace_unlink(current); | |
285 | /* Reparent to init */ | |
286 | remove_parent(current); | |
287 | current->real_parent = current->parent = kthreadd_task; | |
288 | add_parent(current); | |
289 | ||
290 | /* Set the exit signal to SIGCHLD so we signal init on exit */ | |
291 | current->exit_signal = SIGCHLD; | |
292 | ||
293 | if (!has_rt_policy(current) && (task_nice(current) < 0)) | |
294 | set_user_nice(current, 0); | |
295 | /* cpus_allowed? */ | |
296 | /* rt_priority? */ | |
297 | /* signals? */ | |
298 | security_task_reparent_to_init(current); | |
299 | memcpy(current->signal->rlim, init_task.signal->rlim, | |
300 | sizeof(current->signal->rlim)); | |
301 | atomic_inc(&(INIT_USER->__count)); | |
302 | write_unlock_irq(&tasklist_lock); | |
303 | switch_uid(INIT_USER); | |
304 | } | |
305 | ||
306 | void __set_special_pids(pid_t session, pid_t pgrp) | |
307 | { | |
308 | struct task_struct *curr = current->group_leader; | |
309 | ||
310 | if (process_session(curr) != session) { | |
311 | detach_pid(curr, PIDTYPE_SID); | |
312 | set_signal_session(curr->signal, session); | |
313 | attach_pid(curr, PIDTYPE_SID, find_pid(session)); | |
314 | } | |
315 | if (process_group(curr) != pgrp) { | |
316 | detach_pid(curr, PIDTYPE_PGID); | |
317 | curr->signal->pgrp = pgrp; | |
318 | attach_pid(curr, PIDTYPE_PGID, find_pid(pgrp)); | |
319 | } | |
320 | } | |
321 | ||
322 | static void set_special_pids(pid_t session, pid_t pgrp) | |
323 | { | |
324 | write_lock_irq(&tasklist_lock); | |
325 | __set_special_pids(session, pgrp); | |
326 | write_unlock_irq(&tasklist_lock); | |
327 | } | |
328 | ||
329 | /* | |
330 | * Let kernel threads use this to say that they | |
331 | * allow a certain signal (since daemonize() will | |
332 | * have disabled all of them by default). | |
333 | */ | |
334 | int allow_signal(int sig) | |
335 | { | |
336 | if (!valid_signal(sig) || sig < 1) | |
337 | return -EINVAL; | |
338 | ||
339 | spin_lock_irq(¤t->sighand->siglock); | |
340 | sigdelset(¤t->blocked, sig); | |
341 | if (!current->mm) { | |
342 | /* Kernel threads handle their own signals. | |
343 | Let the signal code know it'll be handled, so | |
344 | that they don't get converted to SIGKILL or | |
345 | just silently dropped */ | |
346 | current->sighand->action[(sig)-1].sa.sa_handler = (void __user *)2; | |
347 | } | |
348 | recalc_sigpending(); | |
349 | spin_unlock_irq(¤t->sighand->siglock); | |
350 | return 0; | |
351 | } | |
352 | ||
353 | EXPORT_SYMBOL(allow_signal); | |
354 | ||
355 | int disallow_signal(int sig) | |
356 | { | |
357 | if (!valid_signal(sig) || sig < 1) | |
358 | return -EINVAL; | |
359 | ||
360 | spin_lock_irq(¤t->sighand->siglock); | |
361 | current->sighand->action[(sig)-1].sa.sa_handler = SIG_IGN; | |
362 | recalc_sigpending(); | |
363 | spin_unlock_irq(¤t->sighand->siglock); | |
364 | return 0; | |
365 | } | |
366 | ||
367 | EXPORT_SYMBOL(disallow_signal); | |
368 | ||
369 | /* | |
370 | * Put all the gunge required to become a kernel thread without | |
371 | * attached user resources in one place where it belongs. | |
372 | */ | |
373 | ||
374 | void daemonize(const char *name, ...) | |
375 | { | |
376 | va_list args; | |
377 | struct fs_struct *fs; | |
378 | sigset_t blocked; | |
379 | ||
380 | va_start(args, name); | |
381 | vsnprintf(current->comm, sizeof(current->comm), name, args); | |
382 | va_end(args); | |
383 | ||
384 | /* | |
385 | * If we were started as result of loading a module, close all of the | |
386 | * user space pages. We don't need them, and if we didn't close them | |
387 | * they would be locked into memory. | |
388 | */ | |
389 | exit_mm(current); | |
390 | ||
391 | set_special_pids(1, 1); | |
392 | proc_clear_tty(current); | |
393 | ||
394 | /* Block and flush all signals */ | |
395 | sigfillset(&blocked); | |
396 | sigprocmask(SIG_BLOCK, &blocked, NULL); | |
397 | flush_signals(current); | |
398 | ||
399 | /* Become as one with the init task */ | |
400 | ||
401 | exit_fs(current); /* current->fs->count--; */ | |
402 | fs = init_task.fs; | |
403 | current->fs = fs; | |
404 | atomic_inc(&fs->count); | |
405 | ||
406 | exit_task_namespaces(current); | |
407 | current->nsproxy = init_task.nsproxy; | |
408 | get_task_namespaces(current); | |
409 | ||
410 | exit_files(current); | |
411 | current->files = init_task.files; | |
412 | atomic_inc(¤t->files->count); | |
413 | ||
414 | reparent_to_kthreadd(); | |
415 | } | |
416 | ||
417 | EXPORT_SYMBOL(daemonize); | |
418 | ||
419 | static void close_files(struct files_struct * files) | |
420 | { | |
421 | int i, j; | |
422 | struct fdtable *fdt; | |
423 | ||
424 | j = 0; | |
425 | ||
426 | /* | |
427 | * It is safe to dereference the fd table without RCU or | |
428 | * ->file_lock because this is the last reference to the | |
429 | * files structure. | |
430 | */ | |
431 | fdt = files_fdtable(files); | |
432 | for (;;) { | |
433 | unsigned long set; | |
434 | i = j * __NFDBITS; | |
435 | if (i >= fdt->max_fds) | |
436 | break; | |
437 | set = fdt->open_fds->fds_bits[j++]; | |
438 | while (set) { | |
439 | if (set & 1) { | |
440 | struct file * file = xchg(&fdt->fd[i], NULL); | |
441 | if (file) { | |
442 | filp_close(file, files); | |
443 | cond_resched(); | |
444 | } | |
445 | } | |
446 | i++; | |
447 | set >>= 1; | |
448 | } | |
449 | } | |
450 | } | |
451 | ||
452 | struct files_struct *get_files_struct(struct task_struct *task) | |
453 | { | |
454 | struct files_struct *files; | |
455 | ||
456 | task_lock(task); | |
457 | files = task->files; | |
458 | if (files) | |
459 | atomic_inc(&files->count); | |
460 | task_unlock(task); | |
461 | ||
462 | return files; | |
463 | } | |
464 | ||
465 | void fastcall put_files_struct(struct files_struct *files) | |
466 | { | |
467 | struct fdtable *fdt; | |
468 | ||
469 | if (atomic_dec_and_test(&files->count)) { | |
470 | close_files(files); | |
471 | /* | |
472 | * Free the fd and fdset arrays if we expanded them. | |
473 | * If the fdtable was embedded, pass files for freeing | |
474 | * at the end of the RCU grace period. Otherwise, | |
475 | * you can free files immediately. | |
476 | */ | |
477 | fdt = files_fdtable(files); | |
478 | if (fdt != &files->fdtab) | |
479 | kmem_cache_free(files_cachep, files); | |
480 | free_fdtable(fdt); | |
481 | } | |
482 | } | |
483 | ||
484 | EXPORT_SYMBOL(put_files_struct); | |
485 | ||
486 | void reset_files_struct(struct task_struct *tsk, struct files_struct *files) | |
487 | { | |
488 | struct files_struct *old; | |
489 | ||
490 | old = tsk->files; | |
491 | task_lock(tsk); | |
492 | tsk->files = files; | |
493 | task_unlock(tsk); | |
494 | put_files_struct(old); | |
495 | } | |
496 | EXPORT_SYMBOL(reset_files_struct); | |
497 | ||
498 | static inline void __exit_files(struct task_struct *tsk) | |
499 | { | |
500 | struct files_struct * files = tsk->files; | |
501 | ||
502 | if (files) { | |
503 | task_lock(tsk); | |
504 | tsk->files = NULL; | |
505 | task_unlock(tsk); | |
506 | put_files_struct(files); | |
507 | } | |
508 | } | |
509 | ||
510 | void exit_files(struct task_struct *tsk) | |
511 | { | |
512 | __exit_files(tsk); | |
513 | } | |
514 | ||
515 | static inline void __put_fs_struct(struct fs_struct *fs) | |
516 | { | |
517 | /* No need to hold fs->lock if we are killing it */ | |
518 | if (atomic_dec_and_test(&fs->count)) { | |
519 | dput(fs->root); | |
520 | mntput(fs->rootmnt); | |
521 | dput(fs->pwd); | |
522 | mntput(fs->pwdmnt); | |
523 | if (fs->altroot) { | |
524 | dput(fs->altroot); | |
525 | mntput(fs->altrootmnt); | |
526 | } | |
527 | kmem_cache_free(fs_cachep, fs); | |
528 | } | |
529 | } | |
530 | ||
531 | void put_fs_struct(struct fs_struct *fs) | |
532 | { | |
533 | __put_fs_struct(fs); | |
534 | } | |
535 | ||
536 | static inline void __exit_fs(struct task_struct *tsk) | |
537 | { | |
538 | struct fs_struct * fs = tsk->fs; | |
539 | ||
540 | if (fs) { | |
541 | task_lock(tsk); | |
542 | tsk->fs = NULL; | |
543 | task_unlock(tsk); | |
544 | __put_fs_struct(fs); | |
545 | } | |
546 | } | |
547 | ||
548 | void exit_fs(struct task_struct *tsk) | |
549 | { | |
550 | __exit_fs(tsk); | |
551 | } | |
552 | ||
553 | EXPORT_SYMBOL_GPL(exit_fs); | |
554 | ||
555 | /* | |
556 | * Turn us into a lazy TLB process if we | |
557 | * aren't already.. | |
558 | */ | |
559 | static void exit_mm(struct task_struct * tsk) | |
560 | { | |
561 | struct mm_struct *mm = tsk->mm; | |
562 | ||
563 | mm_release(tsk, mm); | |
564 | if (!mm) | |
565 | return; | |
566 | /* | |
567 | * Serialize with any possible pending coredump. | |
568 | * We must hold mmap_sem around checking core_waiters | |
569 | * and clearing tsk->mm. The core-inducing thread | |
570 | * will increment core_waiters for each thread in the | |
571 | * group with ->mm != NULL. | |
572 | */ | |
573 | down_read(&mm->mmap_sem); | |
574 | if (mm->core_waiters) { | |
575 | up_read(&mm->mmap_sem); | |
576 | down_write(&mm->mmap_sem); | |
577 | if (!--mm->core_waiters) | |
578 | complete(mm->core_startup_done); | |
579 | up_write(&mm->mmap_sem); | |
580 | ||
581 | wait_for_completion(&mm->core_done); | |
582 | down_read(&mm->mmap_sem); | |
583 | } | |
584 | atomic_inc(&mm->mm_count); | |
585 | BUG_ON(mm != tsk->active_mm); | |
586 | /* more a memory barrier than a real lock */ | |
587 | task_lock(tsk); | |
588 | tsk->mm = NULL; | |
589 | up_read(&mm->mmap_sem); | |
590 | enter_lazy_tlb(mm, current); | |
591 | task_unlock(tsk); | |
592 | mmput(mm); | |
593 | } | |
594 | ||
595 | static inline void | |
596 | choose_new_parent(struct task_struct *p, struct task_struct *reaper) | |
597 | { | |
598 | /* | |
599 | * Make sure we're not reparenting to ourselves and that | |
600 | * the parent is not a zombie. | |
601 | */ | |
602 | BUG_ON(p == reaper || reaper->exit_state); | |
603 | p->real_parent = reaper; | |
604 | } | |
605 | ||
606 | static void | |
607 | reparent_thread(struct task_struct *p, struct task_struct *father, int traced) | |
608 | { | |
609 | if (p->pdeath_signal) | |
610 | /* We already hold the tasklist_lock here. */ | |
611 | group_send_sig_info(p->pdeath_signal, SEND_SIG_NOINFO, p); | |
612 | ||
613 | /* Move the child from its dying parent to the new one. */ | |
614 | if (unlikely(traced)) { | |
615 | /* Preserve ptrace links if someone else is tracing this child. */ | |
616 | list_del_init(&p->ptrace_list); | |
617 | if (p->parent != p->real_parent) | |
618 | list_add(&p->ptrace_list, &p->real_parent->ptrace_children); | |
619 | } else { | |
620 | /* If this child is being traced, then we're the one tracing it | |
621 | * anyway, so let go of it. | |
622 | */ | |
623 | p->ptrace = 0; | |
624 | remove_parent(p); | |
625 | p->parent = p->real_parent; | |
626 | add_parent(p); | |
627 | ||
628 | if (p->state == TASK_TRACED) { | |
629 | /* | |
630 | * If it was at a trace stop, turn it into | |
631 | * a normal stop since it's no longer being | |
632 | * traced. | |
633 | */ | |
634 | ptrace_untrace(p); | |
635 | } | |
636 | } | |
637 | ||
638 | /* If this is a threaded reparent there is no need to | |
639 | * notify anyone anything has happened. | |
640 | */ | |
641 | if (p->real_parent->group_leader == father->group_leader) | |
642 | return; | |
643 | ||
644 | /* We don't want people slaying init. */ | |
645 | if (p->exit_signal != -1) | |
646 | p->exit_signal = SIGCHLD; | |
647 | ||
648 | /* If we'd notified the old parent about this child's death, | |
649 | * also notify the new parent. | |
650 | */ | |
651 | if (!traced && p->exit_state == EXIT_ZOMBIE && | |
652 | p->exit_signal != -1 && thread_group_empty(p)) | |
653 | do_notify_parent(p, p->exit_signal); | |
654 | ||
655 | /* | |
656 | * process group orphan check | |
657 | * Case ii: Our child is in a different pgrp | |
658 | * than we are, and it was the only connection | |
659 | * outside, so the child pgrp is now orphaned. | |
660 | */ | |
661 | if ((task_pgrp(p) != task_pgrp(father)) && | |
662 | (task_session(p) == task_session(father))) { | |
663 | struct pid *pgrp = task_pgrp(p); | |
664 | ||
665 | if (will_become_orphaned_pgrp(pgrp, NULL) && | |
666 | has_stopped_jobs(pgrp)) { | |
667 | __kill_pgrp_info(SIGHUP, SEND_SIG_PRIV, pgrp); | |
668 | __kill_pgrp_info(SIGCONT, SEND_SIG_PRIV, pgrp); | |
669 | } | |
670 | } | |
671 | } | |
672 | ||
673 | /* | |
674 | * When we die, we re-parent all our children. | |
675 | * Try to give them to another thread in our thread | |
676 | * group, and if no such member exists, give it to | |
677 | * the child reaper process (ie "init") in our pid | |
678 | * space. | |
679 | */ | |
680 | static void | |
681 | forget_original_parent(struct task_struct *father, struct list_head *to_release) | |
682 | { | |
683 | struct task_struct *p, *reaper = father; | |
684 | struct list_head *_p, *_n; | |
685 | ||
686 | do { | |
687 | reaper = next_thread(reaper); | |
688 | if (reaper == father) { | |
689 | reaper = child_reaper(father); | |
690 | break; | |
691 | } | |
692 | } while (reaper->exit_state); | |
693 | ||
694 | /* | |
695 | * There are only two places where our children can be: | |
696 | * | |
697 | * - in our child list | |
698 | * - in our ptraced child list | |
699 | * | |
700 | * Search them and reparent children. | |
701 | */ | |
702 | list_for_each_safe(_p, _n, &father->children) { | |
703 | int ptrace; | |
704 | p = list_entry(_p, struct task_struct, sibling); | |
705 | ||
706 | ptrace = p->ptrace; | |
707 | ||
708 | /* if father isn't the real parent, then ptrace must be enabled */ | |
709 | BUG_ON(father != p->real_parent && !ptrace); | |
710 | ||
711 | if (father == p->real_parent) { | |
712 | /* reparent with a reaper, real father it's us */ | |
713 | choose_new_parent(p, reaper); | |
714 | reparent_thread(p, father, 0); | |
715 | } else { | |
716 | /* reparent ptraced task to its real parent */ | |
717 | __ptrace_unlink (p); | |
718 | if (p->exit_state == EXIT_ZOMBIE && p->exit_signal != -1 && | |
719 | thread_group_empty(p)) | |
720 | do_notify_parent(p, p->exit_signal); | |
721 | } | |
722 | ||
723 | /* | |
724 | * if the ptraced child is a zombie with exit_signal == -1 | |
725 | * we must collect it before we exit, or it will remain | |
726 | * zombie forever since we prevented it from self-reap itself | |
727 | * while it was being traced by us, to be able to see it in wait4. | |
728 | */ | |
729 | if (unlikely(ptrace && p->exit_state == EXIT_ZOMBIE && p->exit_signal == -1)) | |
730 | list_add(&p->ptrace_list, to_release); | |
731 | } | |
732 | list_for_each_safe(_p, _n, &father->ptrace_children) { | |
733 | p = list_entry(_p, struct task_struct, ptrace_list); | |
734 | choose_new_parent(p, reaper); | |
735 | reparent_thread(p, father, 1); | |
736 | } | |
737 | } | |
738 | ||
739 | /* | |
740 | * Send signals to all our closest relatives so that they know | |
741 | * to properly mourn us.. | |
742 | */ | |
743 | static void exit_notify(struct task_struct *tsk) | |
744 | { | |
745 | int state; | |
746 | struct task_struct *t; | |
747 | struct list_head ptrace_dead, *_p, *_n; | |
748 | struct pid *pgrp; | |
749 | ||
750 | if (signal_pending(tsk) && !(tsk->signal->flags & SIGNAL_GROUP_EXIT) | |
751 | && !thread_group_empty(tsk)) { | |
752 | /* | |
753 | * This occurs when there was a race between our exit | |
754 | * syscall and a group signal choosing us as the one to | |
755 | * wake up. It could be that we are the only thread | |
756 | * alerted to check for pending signals, but another thread | |
757 | * should be woken now to take the signal since we will not. | |
758 | * Now we'll wake all the threads in the group just to make | |
759 | * sure someone gets all the pending signals. | |
760 | */ | |
761 | read_lock(&tasklist_lock); | |
762 | spin_lock_irq(&tsk->sighand->siglock); | |
763 | for (t = next_thread(tsk); t != tsk; t = next_thread(t)) | |
764 | if (!signal_pending(t) && !(t->flags & PF_EXITING)) | |
765 | recalc_sigpending_and_wake(t); | |
766 | spin_unlock_irq(&tsk->sighand->siglock); | |
767 | read_unlock(&tasklist_lock); | |
768 | } | |
769 | ||
770 | write_lock_irq(&tasklist_lock); | |
771 | ||
772 | /* | |
773 | * This does two things: | |
774 | * | |
775 | * A. Make init inherit all the child processes | |
776 | * B. Check to see if any process groups have become orphaned | |
777 | * as a result of our exiting, and if they have any stopped | |
778 | * jobs, send them a SIGHUP and then a SIGCONT. (POSIX 3.2.2.2) | |
779 | */ | |
780 | ||
781 | INIT_LIST_HEAD(&ptrace_dead); | |
782 | forget_original_parent(tsk, &ptrace_dead); | |
783 | BUG_ON(!list_empty(&tsk->children)); | |
784 | BUG_ON(!list_empty(&tsk->ptrace_children)); | |
785 | ||
786 | /* | |
787 | * Check to see if any process groups have become orphaned | |
788 | * as a result of our exiting, and if they have any stopped | |
789 | * jobs, send them a SIGHUP and then a SIGCONT. (POSIX 3.2.2.2) | |
790 | * | |
791 | * Case i: Our father is in a different pgrp than we are | |
792 | * and we were the only connection outside, so our pgrp | |
793 | * is about to become orphaned. | |
794 | */ | |
795 | ||
796 | t = tsk->real_parent; | |
797 | ||
798 | pgrp = task_pgrp(tsk); | |
799 | if ((task_pgrp(t) != pgrp) && | |
800 | (task_session(t) == task_session(tsk)) && | |
801 | will_become_orphaned_pgrp(pgrp, tsk) && | |
802 | has_stopped_jobs(pgrp)) { | |
803 | __kill_pgrp_info(SIGHUP, SEND_SIG_PRIV, pgrp); | |
804 | __kill_pgrp_info(SIGCONT, SEND_SIG_PRIV, pgrp); | |
805 | } | |
806 | ||
807 | /* Let father know we died | |
808 | * | |
809 | * Thread signals are configurable, but you aren't going to use | |
810 | * that to send signals to arbitary processes. | |
811 | * That stops right now. | |
812 | * | |
813 | * If the parent exec id doesn't match the exec id we saved | |
814 | * when we started then we know the parent has changed security | |
815 | * domain. | |
816 | * | |
817 | * If our self_exec id doesn't match our parent_exec_id then | |
818 | * we have changed execution domain as these two values started | |
819 | * the same after a fork. | |
820 | * | |
821 | */ | |
822 | ||
823 | if (tsk->exit_signal != SIGCHLD && tsk->exit_signal != -1 && | |
824 | ( tsk->parent_exec_id != t->self_exec_id || | |
825 | tsk->self_exec_id != tsk->parent_exec_id) | |
826 | && !capable(CAP_KILL)) | |
827 | tsk->exit_signal = SIGCHLD; | |
828 | ||
829 | ||
830 | /* If something other than our normal parent is ptracing us, then | |
831 | * send it a SIGCHLD instead of honoring exit_signal. exit_signal | |
832 | * only has special meaning to our real parent. | |
833 | */ | |
834 | if (tsk->exit_signal != -1 && thread_group_empty(tsk)) { | |
835 | int signal = tsk->parent == tsk->real_parent ? tsk->exit_signal : SIGCHLD; | |
836 | do_notify_parent(tsk, signal); | |
837 | } else if (tsk->ptrace) { | |
838 | do_notify_parent(tsk, SIGCHLD); | |
839 | } | |
840 | ||
841 | state = EXIT_ZOMBIE; | |
842 | if (tsk->exit_signal == -1 && | |
843 | (likely(tsk->ptrace == 0) || | |
844 | unlikely(tsk->parent->signal->flags & SIGNAL_GROUP_EXIT))) | |
845 | state = EXIT_DEAD; | |
846 | tsk->exit_state = state; | |
847 | ||
848 | write_unlock_irq(&tasklist_lock); | |
849 | ||
850 | list_for_each_safe(_p, _n, &ptrace_dead) { | |
851 | list_del_init(_p); | |
852 | t = list_entry(_p, struct task_struct, ptrace_list); | |
853 | release_task(t); | |
854 | } | |
855 | ||
856 | /* If the process is dead, release it - nobody will wait for it */ | |
857 | if (state == EXIT_DEAD) | |
858 | release_task(tsk); | |
859 | } | |
860 | ||
861 | fastcall NORET_TYPE void do_exit(long code) | |
862 | { | |
863 | struct task_struct *tsk = current; | |
864 | int group_dead; | |
865 | ||
866 | profile_task_exit(tsk); | |
867 | ||
868 | WARN_ON(atomic_read(&tsk->fs_excl)); | |
869 | ||
870 | if (unlikely(in_interrupt())) | |
871 | panic("Aiee, killing interrupt handler!"); | |
872 | if (unlikely(!tsk->pid)) | |
873 | panic("Attempted to kill the idle task!"); | |
874 | if (unlikely(tsk == child_reaper(tsk))) { | |
875 | if (tsk->nsproxy->pid_ns != &init_pid_ns) | |
876 | tsk->nsproxy->pid_ns->child_reaper = init_pid_ns.child_reaper; | |
877 | else | |
878 | panic("Attempted to kill init!"); | |
879 | } | |
880 | ||
881 | ||
882 | if (unlikely(current->ptrace & PT_TRACE_EXIT)) { | |
883 | current->ptrace_message = code; | |
884 | ptrace_notify((PTRACE_EVENT_EXIT << 8) | SIGTRAP); | |
885 | } | |
886 | ||
887 | /* | |
888 | * We're taking recursive faults here in do_exit. Safest is to just | |
889 | * leave this task alone and wait for reboot. | |
890 | */ | |
891 | if (unlikely(tsk->flags & PF_EXITING)) { | |
892 | printk(KERN_ALERT | |
893 | "Fixing recursive fault but reboot is needed!\n"); | |
894 | /* | |
895 | * We can do this unlocked here. The futex code uses | |
896 | * this flag just to verify whether the pi state | |
897 | * cleanup has been done or not. In the worst case it | |
898 | * loops once more. We pretend that the cleanup was | |
899 | * done as there is no way to return. Either the | |
900 | * OWNER_DIED bit is set by now or we push the blocked | |
901 | * task into the wait for ever nirwana as well. | |
902 | */ | |
903 | tsk->flags |= PF_EXITPIDONE; | |
904 | if (tsk->io_context) | |
905 | exit_io_context(); | |
906 | set_current_state(TASK_UNINTERRUPTIBLE); | |
907 | schedule(); | |
908 | } | |
909 | ||
910 | /* | |
911 | * tsk->flags are checked in the futex code to protect against | |
912 | * an exiting task cleaning up the robust pi futexes. | |
913 | */ | |
914 | spin_lock_irq(&tsk->pi_lock); | |
915 | tsk->flags |= PF_EXITING; | |
916 | spin_unlock_irq(&tsk->pi_lock); | |
917 | ||
918 | if (unlikely(in_atomic())) | |
919 | printk(KERN_INFO "note: %s[%d] exited with preempt_count %d\n", | |
920 | current->comm, current->pid, | |
921 | preempt_count()); | |
922 | ||
923 | acct_update_integrals(tsk); | |
924 | if (tsk->mm) { | |
925 | update_hiwater_rss(tsk->mm); | |
926 | update_hiwater_vm(tsk->mm); | |
927 | } | |
928 | group_dead = atomic_dec_and_test(&tsk->signal->live); | |
929 | if (group_dead) { | |
930 | hrtimer_cancel(&tsk->signal->real_timer); | |
931 | exit_itimers(tsk->signal); | |
932 | } | |
933 | acct_collect(code, group_dead); | |
934 | if (unlikely(tsk->robust_list)) | |
935 | exit_robust_list(tsk); | |
936 | #if defined(CONFIG_FUTEX) && defined(CONFIG_COMPAT) | |
937 | if (unlikely(tsk->compat_robust_list)) | |
938 | compat_exit_robust_list(tsk); | |
939 | #endif | |
940 | if (unlikely(tsk->audit_context)) | |
941 | audit_free(tsk); | |
942 | ||
943 | taskstats_exit(tsk, group_dead); | |
944 | ||
945 | exit_mm(tsk); | |
946 | ||
947 | if (group_dead) | |
948 | acct_process(); | |
949 | exit_sem(tsk); | |
950 | __exit_files(tsk); | |
951 | __exit_fs(tsk); | |
952 | exit_thread(); | |
953 | cpuset_exit(tsk); | |
954 | exit_keys(tsk); | |
955 | ||
956 | if (group_dead && tsk->signal->leader) | |
957 | disassociate_ctty(1); | |
958 | ||
959 | module_put(task_thread_info(tsk)->exec_domain->module); | |
960 | if (tsk->binfmt) | |
961 | module_put(tsk->binfmt->module); | |
962 | ||
963 | tsk->exit_code = code; | |
964 | proc_exit_connector(tsk); | |
965 | exit_task_namespaces(tsk); | |
966 | exit_notify(tsk); | |
967 | #ifdef CONFIG_NUMA | |
968 | mpol_free(tsk->mempolicy); | |
969 | tsk->mempolicy = NULL; | |
970 | #endif | |
971 | /* | |
972 | * This must happen late, after the PID is not | |
973 | * hashed anymore: | |
974 | */ | |
975 | if (unlikely(!list_empty(&tsk->pi_state_list))) | |
976 | exit_pi_state_list(tsk); | |
977 | if (unlikely(current->pi_state_cache)) | |
978 | kfree(current->pi_state_cache); | |
979 | /* | |
980 | * Make sure we are holding no locks: | |
981 | */ | |
982 | debug_check_no_locks_held(tsk); | |
983 | /* | |
984 | * We can do this unlocked here. The futex code uses this flag | |
985 | * just to verify whether the pi state cleanup has been done | |
986 | * or not. In the worst case it loops once more. | |
987 | */ | |
988 | tsk->flags |= PF_EXITPIDONE; | |
989 | ||
990 | if (tsk->io_context) | |
991 | exit_io_context(); | |
992 | ||
993 | if (tsk->splice_pipe) | |
994 | __free_pipe_info(tsk->splice_pipe); | |
995 | ||
996 | preempt_disable(); | |
997 | /* causes final put_task_struct in finish_task_switch(). */ | |
998 | tsk->state = TASK_DEAD; | |
999 | ||
1000 | schedule(); | |
1001 | BUG(); | |
1002 | /* Avoid "noreturn function does return". */ | |
1003 | for (;;) | |
1004 | cpu_relax(); /* For when BUG is null */ | |
1005 | } | |
1006 | ||
1007 | EXPORT_SYMBOL_GPL(do_exit); | |
1008 | ||
1009 | NORET_TYPE void complete_and_exit(struct completion *comp, long code) | |
1010 | { | |
1011 | if (comp) | |
1012 | complete(comp); | |
1013 | ||
1014 | do_exit(code); | |
1015 | } | |
1016 | ||
1017 | EXPORT_SYMBOL(complete_and_exit); | |
1018 | ||
1019 | asmlinkage long sys_exit(int error_code) | |
1020 | { | |
1021 | do_exit((error_code&0xff)<<8); | |
1022 | } | |
1023 | ||
1024 | /* | |
1025 | * Take down every thread in the group. This is called by fatal signals | |
1026 | * as well as by sys_exit_group (below). | |
1027 | */ | |
1028 | NORET_TYPE void | |
1029 | do_group_exit(int exit_code) | |
1030 | { | |
1031 | BUG_ON(exit_code & 0x80); /* core dumps don't get here */ | |
1032 | ||
1033 | if (current->signal->flags & SIGNAL_GROUP_EXIT) | |
1034 | exit_code = current->signal->group_exit_code; | |
1035 | else if (!thread_group_empty(current)) { | |
1036 | struct signal_struct *const sig = current->signal; | |
1037 | struct sighand_struct *const sighand = current->sighand; | |
1038 | spin_lock_irq(&sighand->siglock); | |
1039 | if (sig->flags & SIGNAL_GROUP_EXIT) | |
1040 | /* Another thread got here before we took the lock. */ | |
1041 | exit_code = sig->group_exit_code; | |
1042 | else { | |
1043 | sig->group_exit_code = exit_code; | |
1044 | zap_other_threads(current); | |
1045 | } | |
1046 | spin_unlock_irq(&sighand->siglock); | |
1047 | } | |
1048 | ||
1049 | do_exit(exit_code); | |
1050 | /* NOTREACHED */ | |
1051 | } | |
1052 | ||
1053 | /* | |
1054 | * this kills every thread in the thread group. Note that any externally | |
1055 | * wait4()-ing process will get the correct exit code - even if this | |
1056 | * thread is not the thread group leader. | |
1057 | */ | |
1058 | asmlinkage void sys_exit_group(int error_code) | |
1059 | { | |
1060 | do_group_exit((error_code & 0xff) << 8); | |
1061 | } | |
1062 | ||
1063 | static int eligible_child(pid_t pid, int options, struct task_struct *p) | |
1064 | { | |
1065 | int err; | |
1066 | ||
1067 | if (pid > 0) { | |
1068 | if (p->pid != pid) | |
1069 | return 0; | |
1070 | } else if (!pid) { | |
1071 | if (process_group(p) != process_group(current)) | |
1072 | return 0; | |
1073 | } else if (pid != -1) { | |
1074 | if (process_group(p) != -pid) | |
1075 | return 0; | |
1076 | } | |
1077 | ||
1078 | /* | |
1079 | * Do not consider detached threads that are | |
1080 | * not ptraced: | |
1081 | */ | |
1082 | if (p->exit_signal == -1 && !p->ptrace) | |
1083 | return 0; | |
1084 | ||
1085 | /* Wait for all children (clone and not) if __WALL is set; | |
1086 | * otherwise, wait for clone children *only* if __WCLONE is | |
1087 | * set; otherwise, wait for non-clone children *only*. (Note: | |
1088 | * A "clone" child here is one that reports to its parent | |
1089 | * using a signal other than SIGCHLD.) */ | |
1090 | if (((p->exit_signal != SIGCHLD) ^ ((options & __WCLONE) != 0)) | |
1091 | && !(options & __WALL)) | |
1092 | return 0; | |
1093 | /* | |
1094 | * Do not consider thread group leaders that are | |
1095 | * in a non-empty thread group: | |
1096 | */ | |
1097 | if (delay_group_leader(p)) | |
1098 | return 2; | |
1099 | ||
1100 | err = security_task_wait(p); | |
1101 | if (err) | |
1102 | return err; | |
1103 | ||
1104 | return 1; | |
1105 | } | |
1106 | ||
1107 | static int wait_noreap_copyout(struct task_struct *p, pid_t pid, uid_t uid, | |
1108 | int why, int status, | |
1109 | struct siginfo __user *infop, | |
1110 | struct rusage __user *rusagep) | |
1111 | { | |
1112 | int retval = rusagep ? getrusage(p, RUSAGE_BOTH, rusagep) : 0; | |
1113 | ||
1114 | put_task_struct(p); | |
1115 | if (!retval) | |
1116 | retval = put_user(SIGCHLD, &infop->si_signo); | |
1117 | if (!retval) | |
1118 | retval = put_user(0, &infop->si_errno); | |
1119 | if (!retval) | |
1120 | retval = put_user((short)why, &infop->si_code); | |
1121 | if (!retval) | |
1122 | retval = put_user(pid, &infop->si_pid); | |
1123 | if (!retval) | |
1124 | retval = put_user(uid, &infop->si_uid); | |
1125 | if (!retval) | |
1126 | retval = put_user(status, &infop->si_status); | |
1127 | if (!retval) | |
1128 | retval = pid; | |
1129 | return retval; | |
1130 | } | |
1131 | ||
1132 | /* | |
1133 | * Handle sys_wait4 work for one task in state EXIT_ZOMBIE. We hold | |
1134 | * read_lock(&tasklist_lock) on entry. If we return zero, we still hold | |
1135 | * the lock and this task is uninteresting. If we return nonzero, we have | |
1136 | * released the lock and the system call should return. | |
1137 | */ | |
1138 | static int wait_task_zombie(struct task_struct *p, int noreap, | |
1139 | struct siginfo __user *infop, | |
1140 | int __user *stat_addr, struct rusage __user *ru) | |
1141 | { | |
1142 | unsigned long state; | |
1143 | int retval; | |
1144 | int status; | |
1145 | ||
1146 | if (unlikely(noreap)) { | |
1147 | pid_t pid = p->pid; | |
1148 | uid_t uid = p->uid; | |
1149 | int exit_code = p->exit_code; | |
1150 | int why, status; | |
1151 | ||
1152 | if (unlikely(p->exit_state != EXIT_ZOMBIE)) | |
1153 | return 0; | |
1154 | if (unlikely(p->exit_signal == -1 && p->ptrace == 0)) | |
1155 | return 0; | |
1156 | get_task_struct(p); | |
1157 | read_unlock(&tasklist_lock); | |
1158 | if ((exit_code & 0x7f) == 0) { | |
1159 | why = CLD_EXITED; | |
1160 | status = exit_code >> 8; | |
1161 | } else { | |
1162 | why = (exit_code & 0x80) ? CLD_DUMPED : CLD_KILLED; | |
1163 | status = exit_code & 0x7f; | |
1164 | } | |
1165 | return wait_noreap_copyout(p, pid, uid, why, | |
1166 | status, infop, ru); | |
1167 | } | |
1168 | ||
1169 | /* | |
1170 | * Try to move the task's state to DEAD | |
1171 | * only one thread is allowed to do this: | |
1172 | */ | |
1173 | state = xchg(&p->exit_state, EXIT_DEAD); | |
1174 | if (state != EXIT_ZOMBIE) { | |
1175 | BUG_ON(state != EXIT_DEAD); | |
1176 | return 0; | |
1177 | } | |
1178 | if (unlikely(p->exit_signal == -1 && p->ptrace == 0)) { | |
1179 | /* | |
1180 | * This can only happen in a race with a ptraced thread | |
1181 | * dying on another processor. | |
1182 | */ | |
1183 | return 0; | |
1184 | } | |
1185 | ||
1186 | if (likely(p->real_parent == p->parent) && likely(p->signal)) { | |
1187 | struct signal_struct *psig; | |
1188 | struct signal_struct *sig; | |
1189 | ||
1190 | /* | |
1191 | * The resource counters for the group leader are in its | |
1192 | * own task_struct. Those for dead threads in the group | |
1193 | * are in its signal_struct, as are those for the child | |
1194 | * processes it has previously reaped. All these | |
1195 | * accumulate in the parent's signal_struct c* fields. | |
1196 | * | |
1197 | * We don't bother to take a lock here to protect these | |
1198 | * p->signal fields, because they are only touched by | |
1199 | * __exit_signal, which runs with tasklist_lock | |
1200 | * write-locked anyway, and so is excluded here. We do | |
1201 | * need to protect the access to p->parent->signal fields, | |
1202 | * as other threads in the parent group can be right | |
1203 | * here reaping other children at the same time. | |
1204 | */ | |
1205 | spin_lock_irq(&p->parent->sighand->siglock); | |
1206 | psig = p->parent->signal; | |
1207 | sig = p->signal; | |
1208 | psig->cutime = | |
1209 | cputime_add(psig->cutime, | |
1210 | cputime_add(p->utime, | |
1211 | cputime_add(sig->utime, | |
1212 | sig->cutime))); | |
1213 | psig->cstime = | |
1214 | cputime_add(psig->cstime, | |
1215 | cputime_add(p->stime, | |
1216 | cputime_add(sig->stime, | |
1217 | sig->cstime))); | |
1218 | psig->cmin_flt += | |
1219 | p->min_flt + sig->min_flt + sig->cmin_flt; | |
1220 | psig->cmaj_flt += | |
1221 | p->maj_flt + sig->maj_flt + sig->cmaj_flt; | |
1222 | psig->cnvcsw += | |
1223 | p->nvcsw + sig->nvcsw + sig->cnvcsw; | |
1224 | psig->cnivcsw += | |
1225 | p->nivcsw + sig->nivcsw + sig->cnivcsw; | |
1226 | psig->cinblock += | |
1227 | task_io_get_inblock(p) + | |
1228 | sig->inblock + sig->cinblock; | |
1229 | psig->coublock += | |
1230 | task_io_get_oublock(p) + | |
1231 | sig->oublock + sig->coublock; | |
1232 | spin_unlock_irq(&p->parent->sighand->siglock); | |
1233 | } | |
1234 | ||
1235 | /* | |
1236 | * Now we are sure this task is interesting, and no other | |
1237 | * thread can reap it because we set its state to EXIT_DEAD. | |
1238 | */ | |
1239 | read_unlock(&tasklist_lock); | |
1240 | ||
1241 | retval = ru ? getrusage(p, RUSAGE_BOTH, ru) : 0; | |
1242 | status = (p->signal->flags & SIGNAL_GROUP_EXIT) | |
1243 | ? p->signal->group_exit_code : p->exit_code; | |
1244 | if (!retval && stat_addr) | |
1245 | retval = put_user(status, stat_addr); | |
1246 | if (!retval && infop) | |
1247 | retval = put_user(SIGCHLD, &infop->si_signo); | |
1248 | if (!retval && infop) | |
1249 | retval = put_user(0, &infop->si_errno); | |
1250 | if (!retval && infop) { | |
1251 | int why; | |
1252 | ||
1253 | if ((status & 0x7f) == 0) { | |
1254 | why = CLD_EXITED; | |
1255 | status >>= 8; | |
1256 | } else { | |
1257 | why = (status & 0x80) ? CLD_DUMPED : CLD_KILLED; | |
1258 | status &= 0x7f; | |
1259 | } | |
1260 | retval = put_user((short)why, &infop->si_code); | |
1261 | if (!retval) | |
1262 | retval = put_user(status, &infop->si_status); | |
1263 | } | |
1264 | if (!retval && infop) | |
1265 | retval = put_user(p->pid, &infop->si_pid); | |
1266 | if (!retval && infop) | |
1267 | retval = put_user(p->uid, &infop->si_uid); | |
1268 | if (retval) { | |
1269 | // TODO: is this safe? | |
1270 | p->exit_state = EXIT_ZOMBIE; | |
1271 | return retval; | |
1272 | } | |
1273 | retval = p->pid; | |
1274 | if (p->real_parent != p->parent) { | |
1275 | write_lock_irq(&tasklist_lock); | |
1276 | /* Double-check with lock held. */ | |
1277 | if (p->real_parent != p->parent) { | |
1278 | __ptrace_unlink(p); | |
1279 | // TODO: is this safe? | |
1280 | p->exit_state = EXIT_ZOMBIE; | |
1281 | /* | |
1282 | * If this is not a detached task, notify the parent. | |
1283 | * If it's still not detached after that, don't release | |
1284 | * it now. | |
1285 | */ | |
1286 | if (p->exit_signal != -1) { | |
1287 | do_notify_parent(p, p->exit_signal); | |
1288 | if (p->exit_signal != -1) | |
1289 | p = NULL; | |
1290 | } | |
1291 | } | |
1292 | write_unlock_irq(&tasklist_lock); | |
1293 | } | |
1294 | if (p != NULL) | |
1295 | release_task(p); | |
1296 | BUG_ON(!retval); | |
1297 | return retval; | |
1298 | } | |
1299 | ||
1300 | /* | |
1301 | * Handle sys_wait4 work for one task in state TASK_STOPPED. We hold | |
1302 | * read_lock(&tasklist_lock) on entry. If we return zero, we still hold | |
1303 | * the lock and this task is uninteresting. If we return nonzero, we have | |
1304 | * released the lock and the system call should return. | |
1305 | */ | |
1306 | static int wait_task_stopped(struct task_struct *p, int delayed_group_leader, | |
1307 | int noreap, struct siginfo __user *infop, | |
1308 | int __user *stat_addr, struct rusage __user *ru) | |
1309 | { | |
1310 | int retval, exit_code; | |
1311 | ||
1312 | if (!p->exit_code) | |
1313 | return 0; | |
1314 | if (delayed_group_leader && !(p->ptrace & PT_PTRACED) && | |
1315 | p->signal && p->signal->group_stop_count > 0) | |
1316 | /* | |
1317 | * A group stop is in progress and this is the group leader. | |
1318 | * We won't report until all threads have stopped. | |
1319 | */ | |
1320 | return 0; | |
1321 | ||
1322 | /* | |
1323 | * Now we are pretty sure this task is interesting. | |
1324 | * Make sure it doesn't get reaped out from under us while we | |
1325 | * give up the lock and then examine it below. We don't want to | |
1326 | * keep holding onto the tasklist_lock while we call getrusage and | |
1327 | * possibly take page faults for user memory. | |
1328 | */ | |
1329 | get_task_struct(p); | |
1330 | read_unlock(&tasklist_lock); | |
1331 | ||
1332 | if (unlikely(noreap)) { | |
1333 | pid_t pid = p->pid; | |
1334 | uid_t uid = p->uid; | |
1335 | int why = (p->ptrace & PT_PTRACED) ? CLD_TRAPPED : CLD_STOPPED; | |
1336 | ||
1337 | exit_code = p->exit_code; | |
1338 | if (unlikely(!exit_code) || | |
1339 | unlikely(p->state & TASK_TRACED)) | |
1340 | goto bail_ref; | |
1341 | return wait_noreap_copyout(p, pid, uid, | |
1342 | why, (exit_code << 8) | 0x7f, | |
1343 | infop, ru); | |
1344 | } | |
1345 | ||
1346 | write_lock_irq(&tasklist_lock); | |
1347 | ||
1348 | /* | |
1349 | * This uses xchg to be atomic with the thread resuming and setting | |
1350 | * it. It must also be done with the write lock held to prevent a | |
1351 | * race with the EXIT_ZOMBIE case. | |
1352 | */ | |
1353 | exit_code = xchg(&p->exit_code, 0); | |
1354 | if (unlikely(p->exit_state)) { | |
1355 | /* | |
1356 | * The task resumed and then died. Let the next iteration | |
1357 | * catch it in EXIT_ZOMBIE. Note that exit_code might | |
1358 | * already be zero here if it resumed and did _exit(0). | |
1359 | * The task itself is dead and won't touch exit_code again; | |
1360 | * other processors in this function are locked out. | |
1361 | */ | |
1362 | p->exit_code = exit_code; | |
1363 | exit_code = 0; | |
1364 | } | |
1365 | if (unlikely(exit_code == 0)) { | |
1366 | /* | |
1367 | * Another thread in this function got to it first, or it | |
1368 | * resumed, or it resumed and then died. | |
1369 | */ | |
1370 | write_unlock_irq(&tasklist_lock); | |
1371 | bail_ref: | |
1372 | put_task_struct(p); | |
1373 | /* | |
1374 | * We are returning to the wait loop without having successfully | |
1375 | * removed the process and having released the lock. We cannot | |
1376 | * continue, since the "p" task pointer is potentially stale. | |
1377 | * | |
1378 | * Return -EAGAIN, and do_wait() will restart the loop from the | |
1379 | * beginning. Do _not_ re-acquire the lock. | |
1380 | */ | |
1381 | return -EAGAIN; | |
1382 | } | |
1383 | ||
1384 | /* move to end of parent's list to avoid starvation */ | |
1385 | remove_parent(p); | |
1386 | add_parent(p); | |
1387 | ||
1388 | write_unlock_irq(&tasklist_lock); | |
1389 | ||
1390 | retval = ru ? getrusage(p, RUSAGE_BOTH, ru) : 0; | |
1391 | if (!retval && stat_addr) | |
1392 | retval = put_user((exit_code << 8) | 0x7f, stat_addr); | |
1393 | if (!retval && infop) | |
1394 | retval = put_user(SIGCHLD, &infop->si_signo); | |
1395 | if (!retval && infop) | |
1396 | retval = put_user(0, &infop->si_errno); | |
1397 | if (!retval && infop) | |
1398 | retval = put_user((short)((p->ptrace & PT_PTRACED) | |
1399 | ? CLD_TRAPPED : CLD_STOPPED), | |
1400 | &infop->si_code); | |
1401 | if (!retval && infop) | |
1402 | retval = put_user(exit_code, &infop->si_status); | |
1403 | if (!retval && infop) | |
1404 | retval = put_user(p->pid, &infop->si_pid); | |
1405 | if (!retval && infop) | |
1406 | retval = put_user(p->uid, &infop->si_uid); | |
1407 | if (!retval) | |
1408 | retval = p->pid; | |
1409 | put_task_struct(p); | |
1410 | ||
1411 | BUG_ON(!retval); | |
1412 | return retval; | |
1413 | } | |
1414 | ||
1415 | /* | |
1416 | * Handle do_wait work for one task in a live, non-stopped state. | |
1417 | * read_lock(&tasklist_lock) on entry. If we return zero, we still hold | |
1418 | * the lock and this task is uninteresting. If we return nonzero, we have | |
1419 | * released the lock and the system call should return. | |
1420 | */ | |
1421 | static int wait_task_continued(struct task_struct *p, int noreap, | |
1422 | struct siginfo __user *infop, | |
1423 | int __user *stat_addr, struct rusage __user *ru) | |
1424 | { | |
1425 | int retval; | |
1426 | pid_t pid; | |
1427 | uid_t uid; | |
1428 | ||
1429 | if (unlikely(!p->signal)) | |
1430 | return 0; | |
1431 | ||
1432 | if (!(p->signal->flags & SIGNAL_STOP_CONTINUED)) | |
1433 | return 0; | |
1434 | ||
1435 | spin_lock_irq(&p->sighand->siglock); | |
1436 | /* Re-check with the lock held. */ | |
1437 | if (!(p->signal->flags & SIGNAL_STOP_CONTINUED)) { | |
1438 | spin_unlock_irq(&p->sighand->siglock); | |
1439 | return 0; | |
1440 | } | |
1441 | if (!noreap) | |
1442 | p->signal->flags &= ~SIGNAL_STOP_CONTINUED; | |
1443 | spin_unlock_irq(&p->sighand->siglock); | |
1444 | ||
1445 | pid = p->pid; | |
1446 | uid = p->uid; | |
1447 | get_task_struct(p); | |
1448 | read_unlock(&tasklist_lock); | |
1449 | ||
1450 | if (!infop) { | |
1451 | retval = ru ? getrusage(p, RUSAGE_BOTH, ru) : 0; | |
1452 | put_task_struct(p); | |
1453 | if (!retval && stat_addr) | |
1454 | retval = put_user(0xffff, stat_addr); | |
1455 | if (!retval) | |
1456 | retval = p->pid; | |
1457 | } else { | |
1458 | retval = wait_noreap_copyout(p, pid, uid, | |
1459 | CLD_CONTINUED, SIGCONT, | |
1460 | infop, ru); | |
1461 | BUG_ON(retval == 0); | |
1462 | } | |
1463 | ||
1464 | return retval; | |
1465 | } | |
1466 | ||
1467 | ||
1468 | static inline int my_ptrace_child(struct task_struct *p) | |
1469 | { | |
1470 | if (!(p->ptrace & PT_PTRACED)) | |
1471 | return 0; | |
1472 | if (!(p->ptrace & PT_ATTACHED)) | |
1473 | return 1; | |
1474 | /* | |
1475 | * This child was PTRACE_ATTACH'd. We should be seeing it only if | |
1476 | * we are the attacher. If we are the real parent, this is a race | |
1477 | * inside ptrace_attach. It is waiting for the tasklist_lock, | |
1478 | * which we have to switch the parent links, but has already set | |
1479 | * the flags in p->ptrace. | |
1480 | */ | |
1481 | return (p->parent != p->real_parent); | |
1482 | } | |
1483 | ||
1484 | static long do_wait(pid_t pid, int options, struct siginfo __user *infop, | |
1485 | int __user *stat_addr, struct rusage __user *ru) | |
1486 | { | |
1487 | DECLARE_WAITQUEUE(wait, current); | |
1488 | struct task_struct *tsk; | |
1489 | int flag, retval; | |
1490 | int allowed, denied; | |
1491 | ||
1492 | add_wait_queue(¤t->signal->wait_chldexit,&wait); | |
1493 | repeat: | |
1494 | /* | |
1495 | * We will set this flag if we see any child that might later | |
1496 | * match our criteria, even if we are not able to reap it yet. | |
1497 | */ | |
1498 | flag = 0; | |
1499 | allowed = denied = 0; | |
1500 | current->state = TASK_INTERRUPTIBLE; | |
1501 | read_lock(&tasklist_lock); | |
1502 | tsk = current; | |
1503 | do { | |
1504 | struct task_struct *p; | |
1505 | struct list_head *_p; | |
1506 | int ret; | |
1507 | ||
1508 | list_for_each(_p,&tsk->children) { | |
1509 | p = list_entry(_p, struct task_struct, sibling); | |
1510 | ||
1511 | ret = eligible_child(pid, options, p); | |
1512 | if (!ret) | |
1513 | continue; | |
1514 | ||
1515 | if (unlikely(ret < 0)) { | |
1516 | denied = ret; | |
1517 | continue; | |
1518 | } | |
1519 | allowed = 1; | |
1520 | ||
1521 | switch (p->state) { | |
1522 | case TASK_TRACED: | |
1523 | /* | |
1524 | * When we hit the race with PTRACE_ATTACH, | |
1525 | * we will not report this child. But the | |
1526 | * race means it has not yet been moved to | |
1527 | * our ptrace_children list, so we need to | |
1528 | * set the flag here to avoid a spurious ECHILD | |
1529 | * when the race happens with the only child. | |
1530 | */ | |
1531 | flag = 1; | |
1532 | if (!my_ptrace_child(p)) | |
1533 | continue; | |
1534 | /*FALLTHROUGH*/ | |
1535 | case TASK_STOPPED: | |
1536 | /* | |
1537 | * It's stopped now, so it might later | |
1538 | * continue, exit, or stop again. | |
1539 | */ | |
1540 | flag = 1; | |
1541 | if (!(options & WUNTRACED) && | |
1542 | !my_ptrace_child(p)) | |
1543 | continue; | |
1544 | retval = wait_task_stopped(p, ret == 2, | |
1545 | (options & WNOWAIT), | |
1546 | infop, | |
1547 | stat_addr, ru); | |
1548 | if (retval == -EAGAIN) | |
1549 | goto repeat; | |
1550 | if (retval != 0) /* He released the lock. */ | |
1551 | goto end; | |
1552 | break; | |
1553 | default: | |
1554 | // case EXIT_DEAD: | |
1555 | if (p->exit_state == EXIT_DEAD) | |
1556 | continue; | |
1557 | // case EXIT_ZOMBIE: | |
1558 | if (p->exit_state == EXIT_ZOMBIE) { | |
1559 | /* | |
1560 | * Eligible but we cannot release | |
1561 | * it yet: | |
1562 | */ | |
1563 | if (ret == 2) | |
1564 | goto check_continued; | |
1565 | if (!likely(options & WEXITED)) | |
1566 | continue; | |
1567 | retval = wait_task_zombie( | |
1568 | p, (options & WNOWAIT), | |
1569 | infop, stat_addr, ru); | |
1570 | /* He released the lock. */ | |
1571 | if (retval != 0) | |
1572 | goto end; | |
1573 | break; | |
1574 | } | |
1575 | check_continued: | |
1576 | /* | |
1577 | * It's running now, so it might later | |
1578 | * exit, stop, or stop and then continue. | |
1579 | */ | |
1580 | flag = 1; | |
1581 | if (!unlikely(options & WCONTINUED)) | |
1582 | continue; | |
1583 | retval = wait_task_continued( | |
1584 | p, (options & WNOWAIT), | |
1585 | infop, stat_addr, ru); | |
1586 | if (retval != 0) /* He released the lock. */ | |
1587 | goto end; | |
1588 | break; | |
1589 | } | |
1590 | } | |
1591 | if (!flag) { | |
1592 | list_for_each(_p, &tsk->ptrace_children) { | |
1593 | p = list_entry(_p, struct task_struct, | |
1594 | ptrace_list); | |
1595 | if (!eligible_child(pid, options, p)) | |
1596 | continue; | |
1597 | flag = 1; | |
1598 | break; | |
1599 | } | |
1600 | } | |
1601 | if (options & __WNOTHREAD) | |
1602 | break; | |
1603 | tsk = next_thread(tsk); | |
1604 | BUG_ON(tsk->signal != current->signal); | |
1605 | } while (tsk != current); | |
1606 | ||
1607 | read_unlock(&tasklist_lock); | |
1608 | if (flag) { | |
1609 | retval = 0; | |
1610 | if (options & WNOHANG) | |
1611 | goto end; | |
1612 | retval = -ERESTARTSYS; | |
1613 | if (signal_pending(current)) | |
1614 | goto end; | |
1615 | schedule(); | |
1616 | goto repeat; | |
1617 | } | |
1618 | retval = -ECHILD; | |
1619 | if (unlikely(denied) && !allowed) | |
1620 | retval = denied; | |
1621 | end: | |
1622 | current->state = TASK_RUNNING; | |
1623 | remove_wait_queue(¤t->signal->wait_chldexit,&wait); | |
1624 | if (infop) { | |
1625 | if (retval > 0) | |
1626 | retval = 0; | |
1627 | else { | |
1628 | /* | |
1629 | * For a WNOHANG return, clear out all the fields | |
1630 | * we would set so the user can easily tell the | |
1631 | * difference. | |
1632 | */ | |
1633 | if (!retval) | |
1634 | retval = put_user(0, &infop->si_signo); | |
1635 | if (!retval) | |
1636 | retval = put_user(0, &infop->si_errno); | |
1637 | if (!retval) | |
1638 | retval = put_user(0, &infop->si_code); | |
1639 | if (!retval) | |
1640 | retval = put_user(0, &infop->si_pid); | |
1641 | if (!retval) | |
1642 | retval = put_user(0, &infop->si_uid); | |
1643 | if (!retval) | |
1644 | retval = put_user(0, &infop->si_status); | |
1645 | } | |
1646 | } | |
1647 | return retval; | |
1648 | } | |
1649 | ||
1650 | asmlinkage long sys_waitid(int which, pid_t pid, | |
1651 | struct siginfo __user *infop, int options, | |
1652 | struct rusage __user *ru) | |
1653 | { | |
1654 | long ret; | |
1655 | ||
1656 | if (options & ~(WNOHANG|WNOWAIT|WEXITED|WSTOPPED|WCONTINUED)) | |
1657 | return -EINVAL; | |
1658 | if (!(options & (WEXITED|WSTOPPED|WCONTINUED))) | |
1659 | return -EINVAL; | |
1660 | ||
1661 | switch (which) { | |
1662 | case P_ALL: | |
1663 | pid = -1; | |
1664 | break; | |
1665 | case P_PID: | |
1666 | if (pid <= 0) | |
1667 | return -EINVAL; | |
1668 | break; | |
1669 | case P_PGID: | |
1670 | if (pid <= 0) | |
1671 | return -EINVAL; | |
1672 | pid = -pid; | |
1673 | break; | |
1674 | default: | |
1675 | return -EINVAL; | |
1676 | } | |
1677 | ||
1678 | ret = do_wait(pid, options, infop, NULL, ru); | |
1679 | ||
1680 | /* avoid REGPARM breakage on x86: */ | |
1681 | prevent_tail_call(ret); | |
1682 | return ret; | |
1683 | } | |
1684 | ||
1685 | asmlinkage long sys_wait4(pid_t pid, int __user *stat_addr, | |
1686 | int options, struct rusage __user *ru) | |
1687 | { | |
1688 | long ret; | |
1689 | ||
1690 | if (options & ~(WNOHANG|WUNTRACED|WCONTINUED| | |
1691 | __WNOTHREAD|__WCLONE|__WALL)) | |
1692 | return -EINVAL; | |
1693 | ret = do_wait(pid, options | WEXITED, NULL, stat_addr, ru); | |
1694 | ||
1695 | /* avoid REGPARM breakage on x86: */ | |
1696 | prevent_tail_call(ret); | |
1697 | return ret; | |
1698 | } | |
1699 | ||
1700 | #ifdef __ARCH_WANT_SYS_WAITPID | |
1701 | ||
1702 | /* | |
1703 | * sys_waitpid() remains for compatibility. waitpid() should be | |
1704 | * implemented by calling sys_wait4() from libc.a. | |
1705 | */ | |
1706 | asmlinkage long sys_waitpid(pid_t pid, int __user *stat_addr, int options) | |
1707 | { | |
1708 | return sys_wait4(pid, stat_addr, options, NULL); | |
1709 | } | |
1710 | ||
1711 | #endif |