]> bbs.cooldavid.org Git - net-next-2.6.git/blame - kernel/fork.c
Remove unnecessary include from include/linux/capability.h
[net-next-2.6.git] / kernel / fork.c
CommitLineData
1da177e4
LT
1/*
2 * linux/kernel/fork.c
3 *
4 * Copyright (C) 1991, 1992 Linus Torvalds
5 */
6
7/*
8 * 'fork.c' contains the help-routines for the 'fork' system call
9 * (see also entry.S and others).
10 * Fork is rather simple, once you get the hang of it, but the memory
11 * management can be a bitch. See 'mm/memory.c': 'copy_page_range()'
12 */
13
1da177e4
LT
14#include <linux/slab.h>
15#include <linux/init.h>
16#include <linux/unistd.h>
1da177e4
LT
17#include <linux/module.h>
18#include <linux/vmalloc.h>
19#include <linux/completion.h>
6b3286ed 20#include <linux/mnt_namespace.h>
1da177e4
LT
21#include <linux/personality.h>
22#include <linux/mempolicy.h>
23#include <linux/sem.h>
24#include <linux/file.h>
25#include <linux/key.h>
26#include <linux/binfmts.h>
27#include <linux/mman.h>
28#include <linux/fs.h>
ab516013 29#include <linux/nsproxy.h>
c59ede7b 30#include <linux/capability.h>
1da177e4 31#include <linux/cpu.h>
b4f48b63 32#include <linux/cgroup.h>
1da177e4
LT
33#include <linux/security.h>
34#include <linux/swap.h>
35#include <linux/syscalls.h>
36#include <linux/jiffies.h>
37#include <linux/futex.h>
7c3ab738 38#include <linux/task_io_accounting_ops.h>
ab2af1f5 39#include <linux/rcupdate.h>
1da177e4
LT
40#include <linux/ptrace.h>
41#include <linux/mount.h>
42#include <linux/audit.h>
43#include <linux/profile.h>
44#include <linux/rmap.h>
45#include <linux/acct.h>
8f0ab514 46#include <linux/tsacct_kern.h>
9f46080c 47#include <linux/cn_proc.h>
ba96a0c8 48#include <linux/freezer.h>
ca74e92b 49#include <linux/delayacct.h>
ad4ecbcb 50#include <linux/taskstats_kern.h>
0a425405 51#include <linux/random.h>
522ed776 52#include <linux/tty.h>
6f4e6433 53#include <linux/proc_fs.h>
fd0928df 54#include <linux/blkdev.h>
1da177e4
LT
55
56#include <asm/pgtable.h>
57#include <asm/pgalloc.h>
58#include <asm/uaccess.h>
59#include <asm/mmu_context.h>
60#include <asm/cacheflush.h>
61#include <asm/tlbflush.h>
62
63/*
64 * Protected counters by write_lock_irq(&tasklist_lock)
65 */
66unsigned long total_forks; /* Handle normal Linux uptimes. */
67int nr_threads; /* The idle threads do not count.. */
68
69int max_threads; /* tunable limit on nr_threads */
70
71DEFINE_PER_CPU(unsigned long, process_counts) = 0;
72
c59923a1 73__cacheline_aligned DEFINE_RWLOCK(tasklist_lock); /* outer */
1da177e4
LT
74
75int nr_processes(void)
76{
77 int cpu;
78 int total = 0;
79
80 for_each_online_cpu(cpu)
81 total += per_cpu(process_counts, cpu);
82
83 return total;
84}
85
86#ifndef __HAVE_ARCH_TASK_STRUCT_ALLOCATOR
87# define alloc_task_struct() kmem_cache_alloc(task_struct_cachep, GFP_KERNEL)
88# define free_task_struct(tsk) kmem_cache_free(task_struct_cachep, (tsk))
e18b890b 89static struct kmem_cache *task_struct_cachep;
1da177e4
LT
90#endif
91
92/* SLAB cache for signal_struct structures (tsk->signal) */
e18b890b 93static struct kmem_cache *signal_cachep;
1da177e4
LT
94
95/* SLAB cache for sighand_struct structures (tsk->sighand) */
e18b890b 96struct kmem_cache *sighand_cachep;
1da177e4
LT
97
98/* SLAB cache for files_struct structures (tsk->files) */
e18b890b 99struct kmem_cache *files_cachep;
1da177e4
LT
100
101/* SLAB cache for fs_struct structures (tsk->fs) */
e18b890b 102struct kmem_cache *fs_cachep;
1da177e4
LT
103
104/* SLAB cache for vm_area_struct structures */
e18b890b 105struct kmem_cache *vm_area_cachep;
1da177e4
LT
106
107/* SLAB cache for mm_struct structures (tsk->mm) */
e18b890b 108static struct kmem_cache *mm_cachep;
1da177e4
LT
109
110void free_task(struct task_struct *tsk)
111{
3e26c149 112 prop_local_destroy_single(&tsk->dirties);
f7e4217b 113 free_thread_info(tsk->stack);
23f78d4a 114 rt_mutex_debug_task_free(tsk);
1da177e4
LT
115 free_task_struct(tsk);
116}
117EXPORT_SYMBOL(free_task);
118
158d9ebd 119void __put_task_struct(struct task_struct *tsk)
1da177e4 120{
270f722d 121 WARN_ON(!tsk->exit_state);
1da177e4
LT
122 WARN_ON(atomic_read(&tsk->usage));
123 WARN_ON(tsk == current);
124
1da177e4
LT
125 security_task_free(tsk);
126 free_uid(tsk->user);
127 put_group_info(tsk->group_info);
35df17c5 128 delayacct_tsk_free(tsk);
1da177e4
LT
129
130 if (!profile_handoff_task(tsk))
131 free_task(tsk);
132}
133
134void __init fork_init(unsigned long mempages)
135{
136#ifndef __HAVE_ARCH_TASK_STRUCT_ALLOCATOR
137#ifndef ARCH_MIN_TASKALIGN
138#define ARCH_MIN_TASKALIGN L1_CACHE_BYTES
139#endif
140 /* create a slab on which task_structs can be allocated */
141 task_struct_cachep =
142 kmem_cache_create("task_struct", sizeof(struct task_struct),
20c2df83 143 ARCH_MIN_TASKALIGN, SLAB_PANIC, NULL);
1da177e4
LT
144#endif
145
146 /*
147 * The default maximum number of threads is set to a safe
148 * value: the thread structures can take up at most half
149 * of memory.
150 */
151 max_threads = mempages / (8 * THREAD_SIZE / PAGE_SIZE);
152
153 /*
154 * we need to allow at least 20 threads to boot a system
155 */
156 if(max_threads < 20)
157 max_threads = 20;
158
159 init_task.signal->rlim[RLIMIT_NPROC].rlim_cur = max_threads/2;
160 init_task.signal->rlim[RLIMIT_NPROC].rlim_max = max_threads/2;
161 init_task.signal->rlim[RLIMIT_SIGPENDING] =
162 init_task.signal->rlim[RLIMIT_NPROC];
163}
164
165static struct task_struct *dup_task_struct(struct task_struct *orig)
166{
167 struct task_struct *tsk;
168 struct thread_info *ti;
3e26c149 169 int err;
1da177e4
LT
170
171 prepare_to_copy(orig);
172
173 tsk = alloc_task_struct();
174 if (!tsk)
175 return NULL;
176
177 ti = alloc_thread_info(tsk);
178 if (!ti) {
179 free_task_struct(tsk);
180 return NULL;
181 }
182
1da177e4 183 *tsk = *orig;
f7e4217b 184 tsk->stack = ti;
3e26c149
PZ
185
186 err = prop_local_init_single(&tsk->dirties);
187 if (err) {
188 free_thread_info(ti);
189 free_task_struct(tsk);
190 return NULL;
191 }
192
10ebffde 193 setup_thread_stack(tsk, orig);
1da177e4 194
0a425405
AV
195#ifdef CONFIG_CC_STACKPROTECTOR
196 tsk->stack_canary = get_random_int();
197#endif
198
1da177e4
LT
199 /* One for us, one for whoever does the "release_task()" (usually parent) */
200 atomic_set(&tsk->usage,2);
4b5d37ac 201 atomic_set(&tsk->fs_excl, 0);
6c5c9341 202#ifdef CONFIG_BLK_DEV_IO_TRACE
2056a782 203 tsk->btrace_seq = 0;
6c5c9341 204#endif
a0aa7f68 205 tsk->splice_pipe = NULL;
1da177e4
LT
206 return tsk;
207}
208
209#ifdef CONFIG_MMU
a39bc516 210static int dup_mmap(struct mm_struct *mm, struct mm_struct *oldmm)
1da177e4 211{
fd3e42fc 212 struct vm_area_struct *mpnt, *tmp, **pprev;
1da177e4
LT
213 struct rb_node **rb_link, *rb_parent;
214 int retval;
215 unsigned long charge;
216 struct mempolicy *pol;
217
218 down_write(&oldmm->mmap_sem);
ec8c0446 219 flush_cache_dup_mm(oldmm);
ad339451
IM
220 /*
221 * Not linked in yet - no deadlock potential:
222 */
223 down_write_nested(&mm->mmap_sem, SINGLE_DEPTH_NESTING);
7ee78232 224
1da177e4
LT
225 mm->locked_vm = 0;
226 mm->mmap = NULL;
227 mm->mmap_cache = NULL;
228 mm->free_area_cache = oldmm->mmap_base;
1363c3cd 229 mm->cached_hole_size = ~0UL;
1da177e4 230 mm->map_count = 0;
1da177e4
LT
231 cpus_clear(mm->cpu_vm_mask);
232 mm->mm_rb = RB_ROOT;
233 rb_link = &mm->mm_rb.rb_node;
234 rb_parent = NULL;
235 pprev = &mm->mmap;
236
fd3e42fc 237 for (mpnt = oldmm->mmap; mpnt; mpnt = mpnt->vm_next) {
1da177e4
LT
238 struct file *file;
239
240 if (mpnt->vm_flags & VM_DONTCOPY) {
3b6bfcdb
HD
241 long pages = vma_pages(mpnt);
242 mm->total_vm -= pages;
ab50b8ed 243 vm_stat_account(mm, mpnt->vm_flags, mpnt->vm_file,
3b6bfcdb 244 -pages);
1da177e4
LT
245 continue;
246 }
247 charge = 0;
248 if (mpnt->vm_flags & VM_ACCOUNT) {
249 unsigned int len = (mpnt->vm_end - mpnt->vm_start) >> PAGE_SHIFT;
250 if (security_vm_enough_memory(len))
251 goto fail_nomem;
252 charge = len;
253 }
e94b1766 254 tmp = kmem_cache_alloc(vm_area_cachep, GFP_KERNEL);
1da177e4
LT
255 if (!tmp)
256 goto fail_nomem;
257 *tmp = *mpnt;
258 pol = mpol_copy(vma_policy(mpnt));
259 retval = PTR_ERR(pol);
260 if (IS_ERR(pol))
261 goto fail_nomem_policy;
262 vma_set_policy(tmp, pol);
263 tmp->vm_flags &= ~VM_LOCKED;
264 tmp->vm_mm = mm;
265 tmp->vm_next = NULL;
266 anon_vma_link(tmp);
267 file = tmp->vm_file;
268 if (file) {
f3a43f3f 269 struct inode *inode = file->f_path.dentry->d_inode;
1da177e4
LT
270 get_file(file);
271 if (tmp->vm_flags & VM_DENYWRITE)
272 atomic_dec(&inode->i_writecount);
23ff4440 273
1da177e4
LT
274 /* insert tmp into the share list, just after mpnt */
275 spin_lock(&file->f_mapping->i_mmap_lock);
276 tmp->vm_truncate_count = mpnt->vm_truncate_count;
277 flush_dcache_mmap_lock(file->f_mapping);
278 vma_prio_tree_add(tmp, mpnt);
279 flush_dcache_mmap_unlock(file->f_mapping);
280 spin_unlock(&file->f_mapping->i_mmap_lock);
281 }
282
283 /*
7ee78232 284 * Link in the new vma and copy the page table entries.
1da177e4 285 */
1da177e4
LT
286 *pprev = tmp;
287 pprev = &tmp->vm_next;
288
289 __vma_link_rb(mm, tmp, rb_link, rb_parent);
290 rb_link = &tmp->vm_rb.rb_right;
291 rb_parent = &tmp->vm_rb;
292
293 mm->map_count++;
0b0db14c 294 retval = copy_page_range(mm, oldmm, mpnt);
1da177e4
LT
295
296 if (tmp->vm_ops && tmp->vm_ops->open)
297 tmp->vm_ops->open(tmp);
298
299 if (retval)
300 goto out;
301 }
d6dd61c8
JF
302 /* a new mm has just been created */
303 arch_dup_mmap(oldmm, mm);
1da177e4 304 retval = 0;
1da177e4 305out:
7ee78232 306 up_write(&mm->mmap_sem);
fd3e42fc 307 flush_tlb_mm(oldmm);
1da177e4
LT
308 up_write(&oldmm->mmap_sem);
309 return retval;
310fail_nomem_policy:
311 kmem_cache_free(vm_area_cachep, tmp);
312fail_nomem:
313 retval = -ENOMEM;
314 vm_unacct_memory(charge);
315 goto out;
316}
317
318static inline int mm_alloc_pgd(struct mm_struct * mm)
319{
320 mm->pgd = pgd_alloc(mm);
321 if (unlikely(!mm->pgd))
322 return -ENOMEM;
323 return 0;
324}
325
326static inline void mm_free_pgd(struct mm_struct * mm)
327{
5e541973 328 pgd_free(mm, mm->pgd);
1da177e4
LT
329}
330#else
331#define dup_mmap(mm, oldmm) (0)
332#define mm_alloc_pgd(mm) (0)
333#define mm_free_pgd(mm)
334#endif /* CONFIG_MMU */
335
23ff4440 336__cacheline_aligned_in_smp DEFINE_SPINLOCK(mmlist_lock);
1da177e4 337
e94b1766 338#define allocate_mm() (kmem_cache_alloc(mm_cachep, GFP_KERNEL))
1da177e4
LT
339#define free_mm(mm) (kmem_cache_free(mm_cachep, (mm)))
340
341#include <linux/init_task.h>
342
343static struct mm_struct * mm_init(struct mm_struct * mm)
344{
345 atomic_set(&mm->mm_users, 1);
346 atomic_set(&mm->mm_count, 1);
347 init_rwsem(&mm->mmap_sem);
348 INIT_LIST_HEAD(&mm->mmlist);
3cb4a0bb
KH
349 mm->flags = (current->mm) ? current->mm->flags
350 : MMF_DUMP_FILTER_DEFAULT;
1da177e4
LT
351 mm->core_waiters = 0;
352 mm->nr_ptes = 0;
4294621f 353 set_mm_counter(mm, file_rss, 0);
404351e6 354 set_mm_counter(mm, anon_rss, 0);
1da177e4
LT
355 spin_lock_init(&mm->page_table_lock);
356 rwlock_init(&mm->ioctx_list_lock);
357 mm->ioctx_list = NULL;
1da177e4 358 mm->free_area_cache = TASK_UNMAPPED_BASE;
1363c3cd 359 mm->cached_hole_size = ~0UL;
1da177e4
LT
360
361 if (likely(!mm_alloc_pgd(mm))) {
362 mm->def_flags = 0;
363 return mm;
364 }
365 free_mm(mm);
366 return NULL;
367}
368
369/*
370 * Allocate and initialize an mm_struct.
371 */
372struct mm_struct * mm_alloc(void)
373{
374 struct mm_struct * mm;
375
376 mm = allocate_mm();
377 if (mm) {
378 memset(mm, 0, sizeof(*mm));
379 mm = mm_init(mm);
380 }
381 return mm;
382}
383
384/*
385 * Called when the last reference to the mm
386 * is dropped: either by a lazy thread or by
387 * mmput. Free the page directory and the mm.
388 */
389void fastcall __mmdrop(struct mm_struct *mm)
390{
391 BUG_ON(mm == &init_mm);
392 mm_free_pgd(mm);
393 destroy_context(mm);
394 free_mm(mm);
395}
6d4e4c4f 396EXPORT_SYMBOL_GPL(__mmdrop);
1da177e4
LT
397
398/*
399 * Decrement the use count and release all resources for an mm.
400 */
401void mmput(struct mm_struct *mm)
402{
0ae26f1b
AM
403 might_sleep();
404
1da177e4
LT
405 if (atomic_dec_and_test(&mm->mm_users)) {
406 exit_aio(mm);
407 exit_mmap(mm);
408 if (!list_empty(&mm->mmlist)) {
409 spin_lock(&mmlist_lock);
410 list_del(&mm->mmlist);
411 spin_unlock(&mmlist_lock);
412 }
413 put_swap_token(mm);
414 mmdrop(mm);
415 }
416}
417EXPORT_SYMBOL_GPL(mmput);
418
419/**
420 * get_task_mm - acquire a reference to the task's mm
421 *
422 * Returns %NULL if the task has no mm. Checks PF_BORROWED_MM (meaning
423 * this kernel workthread has transiently adopted a user mm with use_mm,
424 * to do its AIO) is not set and if so returns a reference to it, after
425 * bumping up the use count. User must release the mm via mmput()
426 * after use. Typically used by /proc and ptrace.
427 */
428struct mm_struct *get_task_mm(struct task_struct *task)
429{
430 struct mm_struct *mm;
431
432 task_lock(task);
433 mm = task->mm;
434 if (mm) {
435 if (task->flags & PF_BORROWED_MM)
436 mm = NULL;
437 else
438 atomic_inc(&mm->mm_users);
439 }
440 task_unlock(task);
441 return mm;
442}
443EXPORT_SYMBOL_GPL(get_task_mm);
444
445/* Please note the differences between mmput and mm_release.
446 * mmput is called whenever we stop holding onto a mm_struct,
447 * error success whatever.
448 *
449 * mm_release is called after a mm_struct has been removed
450 * from the current process.
451 *
452 * This difference is important for error handling, when we
453 * only half set up a mm_struct for a new process and need to restore
454 * the old one. Because we mmput the new mm_struct before
455 * restoring the old one. . .
456 * Eric Biederman 10 January 1998
457 */
458void mm_release(struct task_struct *tsk, struct mm_struct *mm)
459{
460 struct completion *vfork_done = tsk->vfork_done;
461
462 /* Get rid of any cached register state */
463 deactivate_mm(tsk, mm);
464
465 /* notify parent sleeping on vfork() */
466 if (vfork_done) {
467 tsk->vfork_done = NULL;
468 complete(vfork_done);
469 }
fec1d011
RM
470
471 /*
472 * If we're exiting normally, clear a user-space tid field if
473 * requested. We leave this alone when dying by signal, to leave
474 * the value intact in a core dump, and to save the unnecessary
475 * trouble otherwise. Userland only wants this done for a sys_exit.
476 */
477 if (tsk->clear_child_tid
478 && !(tsk->flags & PF_SIGNALED)
479 && atomic_read(&mm->mm_users) > 1) {
1da177e4
LT
480 u32 __user * tidptr = tsk->clear_child_tid;
481 tsk->clear_child_tid = NULL;
482
483 /*
484 * We don't check the error code - if userspace has
485 * not set up a proper pointer then tough luck.
486 */
487 put_user(0, tidptr);
488 sys_futex(tidptr, FUTEX_WAKE, 1, NULL, NULL, 0);
489 }
490}
491
a0a7ec30
JD
492/*
493 * Allocate a new mm structure and copy contents from the
494 * mm structure of the passed in task structure.
495 */
496static struct mm_struct *dup_mm(struct task_struct *tsk)
497{
498 struct mm_struct *mm, *oldmm = current->mm;
499 int err;
500
501 if (!oldmm)
502 return NULL;
503
504 mm = allocate_mm();
505 if (!mm)
506 goto fail_nomem;
507
508 memcpy(mm, oldmm, sizeof(*mm));
509
7602bdf2
AC
510 /* Initializing for Swap token stuff */
511 mm->token_priority = 0;
512 mm->last_interval = 0;
513
a0a7ec30
JD
514 if (!mm_init(mm))
515 goto fail_nomem;
516
517 if (init_new_context(tsk, mm))
518 goto fail_nocontext;
519
520 err = dup_mmap(mm, oldmm);
521 if (err)
522 goto free_pt;
523
524 mm->hiwater_rss = get_mm_rss(mm);
525 mm->hiwater_vm = mm->total_vm;
526
527 return mm;
528
529free_pt:
530 mmput(mm);
531
532fail_nomem:
533 return NULL;
534
535fail_nocontext:
536 /*
537 * If init_new_context() failed, we cannot use mmput() to free the mm
538 * because it calls destroy_context()
539 */
540 mm_free_pgd(mm);
541 free_mm(mm);
542 return NULL;
543}
544
1da177e4
LT
545static int copy_mm(unsigned long clone_flags, struct task_struct * tsk)
546{
547 struct mm_struct * mm, *oldmm;
548 int retval;
549
550 tsk->min_flt = tsk->maj_flt = 0;
551 tsk->nvcsw = tsk->nivcsw = 0;
552
553 tsk->mm = NULL;
554 tsk->active_mm = NULL;
555
556 /*
557 * Are we cloning a kernel thread?
558 *
559 * We need to steal a active VM for that..
560 */
561 oldmm = current->mm;
562 if (!oldmm)
563 return 0;
564
565 if (clone_flags & CLONE_VM) {
566 atomic_inc(&oldmm->mm_users);
567 mm = oldmm;
1da177e4
LT
568 goto good_mm;
569 }
570
571 retval = -ENOMEM;
a0a7ec30 572 mm = dup_mm(tsk);
1da177e4
LT
573 if (!mm)
574 goto fail_nomem;
575
1da177e4 576good_mm:
7602bdf2
AC
577 /* Initializing for Swap token stuff */
578 mm->token_priority = 0;
579 mm->last_interval = 0;
580
1da177e4
LT
581 tsk->mm = mm;
582 tsk->active_mm = mm;
583 return 0;
584
1da177e4
LT
585fail_nomem:
586 return retval;
1da177e4
LT
587}
588
a39bc516 589static struct fs_struct *__copy_fs_struct(struct fs_struct *old)
1da177e4
LT
590{
591 struct fs_struct *fs = kmem_cache_alloc(fs_cachep, GFP_KERNEL);
592 /* We don't need to lock fs - think why ;-) */
593 if (fs) {
594 atomic_set(&fs->count, 1);
595 rwlock_init(&fs->lock);
596 fs->umask = old->umask;
597 read_lock(&old->lock);
598 fs->rootmnt = mntget(old->rootmnt);
599 fs->root = dget(old->root);
600 fs->pwdmnt = mntget(old->pwdmnt);
601 fs->pwd = dget(old->pwd);
602 if (old->altroot) {
603 fs->altrootmnt = mntget(old->altrootmnt);
604 fs->altroot = dget(old->altroot);
605 } else {
606 fs->altrootmnt = NULL;
607 fs->altroot = NULL;
608 }
609 read_unlock(&old->lock);
610 }
611 return fs;
612}
613
614struct fs_struct *copy_fs_struct(struct fs_struct *old)
615{
616 return __copy_fs_struct(old);
617}
618
619EXPORT_SYMBOL_GPL(copy_fs_struct);
620
a39bc516 621static int copy_fs(unsigned long clone_flags, struct task_struct *tsk)
1da177e4
LT
622{
623 if (clone_flags & CLONE_FS) {
624 atomic_inc(&current->fs->count);
625 return 0;
626 }
627 tsk->fs = __copy_fs_struct(current->fs);
628 if (!tsk->fs)
629 return -ENOMEM;
630 return 0;
631}
632
ab2af1f5 633static int count_open_files(struct fdtable *fdt)
1da177e4 634{
bbea9f69 635 int size = fdt->max_fds;
1da177e4
LT
636 int i;
637
638 /* Find the last open fd */
639 for (i = size/(8*sizeof(long)); i > 0; ) {
badf1662 640 if (fdt->open_fds->fds_bits[--i])
1da177e4
LT
641 break;
642 }
643 i = (i+1) * 8 * sizeof(long);
644 return i;
645}
646
badf1662
DS
647static struct files_struct *alloc_files(void)
648{
649 struct files_struct *newf;
650 struct fdtable *fdt;
651
e94b1766 652 newf = kmem_cache_alloc(files_cachep, GFP_KERNEL);
badf1662
DS
653 if (!newf)
654 goto out;
655
656 atomic_set(&newf->count, 1);
657
658 spin_lock_init(&newf->file_lock);
0c9e63fd 659 newf->next_fd = 0;
ab2af1f5 660 fdt = &newf->fdtab;
badf1662 661 fdt->max_fds = NR_OPEN_DEFAULT;
0c9e63fd
ED
662 fdt->close_on_exec = (fd_set *)&newf->close_on_exec_init;
663 fdt->open_fds = (fd_set *)&newf->open_fds_init;
badf1662 664 fdt->fd = &newf->fd_array[0];
ab2af1f5 665 INIT_RCU_HEAD(&fdt->rcu);
ab2af1f5
DS
666 fdt->next = NULL;
667 rcu_assign_pointer(newf->fdt, fdt);
badf1662
DS
668out:
669 return newf;
670}
671
a016f338
JD
672/*
673 * Allocate a new files structure and copy contents from the
674 * passed in files structure.
6e667260 675 * errorp will be valid only when the returned files_struct is NULL.
a016f338
JD
676 */
677static struct files_struct *dup_fd(struct files_struct *oldf, int *errorp)
1da177e4 678{
a016f338 679 struct files_struct *newf;
1da177e4 680 struct file **old_fds, **new_fds;
bbea9f69 681 int open_files, size, i;
badf1662 682 struct fdtable *old_fdt, *new_fdt;
1da177e4 683
6e667260 684 *errorp = -ENOMEM;
badf1662
DS
685 newf = alloc_files();
686 if (!newf)
1da177e4
LT
687 goto out;
688
1da177e4 689 spin_lock(&oldf->file_lock);
badf1662
DS
690 old_fdt = files_fdtable(oldf);
691 new_fdt = files_fdtable(newf);
ab2af1f5 692 open_files = count_open_files(old_fdt);
1da177e4
LT
693
694 /*
bbea9f69
VL
695 * Check whether we need to allocate a larger fd array and fd set.
696 * Note: we're not a clone task, so the open count won't change.
1da177e4 697 */
badf1662
DS
698 if (open_files > new_fdt->max_fds) {
699 new_fdt->max_fds = 0;
1da177e4
LT
700 spin_unlock(&oldf->file_lock);
701 spin_lock(&newf->file_lock);
a016f338 702 *errorp = expand_files(newf, open_files-1);
1da177e4 703 spin_unlock(&newf->file_lock);
a016f338 704 if (*errorp < 0)
1da177e4 705 goto out_release;
ab2af1f5
DS
706 new_fdt = files_fdtable(newf);
707 /*
708 * Reacquire the oldf lock and a pointer to its fd table
709 * who knows it may have a new bigger fd table. We need
710 * the latest pointer.
711 */
1da177e4 712 spin_lock(&oldf->file_lock);
ab2af1f5 713 old_fdt = files_fdtable(oldf);
1da177e4
LT
714 }
715
badf1662
DS
716 old_fds = old_fdt->fd;
717 new_fds = new_fdt->fd;
1da177e4 718
f3d19c90
VL
719 memcpy(new_fdt->open_fds->fds_bits,
720 old_fdt->open_fds->fds_bits, open_files/8);
721 memcpy(new_fdt->close_on_exec->fds_bits,
722 old_fdt->close_on_exec->fds_bits, open_files/8);
1da177e4
LT
723
724 for (i = open_files; i != 0; i--) {
725 struct file *f = *old_fds++;
726 if (f) {
727 get_file(f);
728 } else {
729 /*
730 * The fd may be claimed in the fd bitmap but not yet
731 * instantiated in the files array if a sibling thread
732 * is partway through open(). So make sure that this
733 * fd is available to the new process.
734 */
badf1662 735 FD_CLR(open_files - i, new_fdt->open_fds);
1da177e4 736 }
ab2af1f5 737 rcu_assign_pointer(*new_fds++, f);
1da177e4
LT
738 }
739 spin_unlock(&oldf->file_lock);
740
741 /* compute the remainder to be cleared */
badf1662 742 size = (new_fdt->max_fds - open_files) * sizeof(struct file *);
1da177e4 743
23ff4440
DW
744 /* This is long word aligned thus could use a optimized version */
745 memset(new_fds, 0, size);
1da177e4 746
bbea9f69
VL
747 if (new_fdt->max_fds > open_files) {
748 int left = (new_fdt->max_fds-open_files)/8;
1da177e4
LT
749 int start = open_files / (8 * sizeof(unsigned long));
750
badf1662
DS
751 memset(&new_fdt->open_fds->fds_bits[start], 0, left);
752 memset(&new_fdt->close_on_exec->fds_bits[start], 0, left);
1da177e4
LT
753 }
754
a016f338 755 return newf;
1da177e4
LT
756
757out_release:
1da177e4 758 kmem_cache_free(files_cachep, newf);
f3d19c90 759out:
42862298 760 return NULL;
1da177e4
LT
761}
762
a016f338
JD
763static int copy_files(unsigned long clone_flags, struct task_struct * tsk)
764{
765 struct files_struct *oldf, *newf;
766 int error = 0;
767
768 /*
769 * A background process may not have any files ...
770 */
771 oldf = current->files;
772 if (!oldf)
773 goto out;
774
775 if (clone_flags & CLONE_FILES) {
776 atomic_inc(&oldf->count);
777 goto out;
778 }
779
780 /*
781 * Note: we may be using current for both targets (See exec.c)
782 * This works because we cache current->files (old) as oldf. Don't
783 * break this.
784 */
785 tsk->files = NULL;
a016f338
JD
786 newf = dup_fd(oldf, &error);
787 if (!newf)
788 goto out;
789
790 tsk->files = newf;
791 error = 0;
792out:
793 return error;
794}
795
fadad878 796static int copy_io(unsigned long clone_flags, struct task_struct *tsk)
fd0928df
JA
797{
798#ifdef CONFIG_BLOCK
799 struct io_context *ioc = current->io_context;
800
801 if (!ioc)
802 return 0;
fadad878
JA
803 /*
804 * Share io context with parent, if CLONE_IO is set
805 */
806 if (clone_flags & CLONE_IO) {
807 tsk->io_context = ioc_task_link(ioc);
808 if (unlikely(!tsk->io_context))
809 return -ENOMEM;
810 } else if (ioprio_valid(ioc->ioprio)) {
fd0928df
JA
811 tsk->io_context = alloc_io_context(GFP_KERNEL, -1);
812 if (unlikely(!tsk->io_context))
813 return -ENOMEM;
814
fd0928df
JA
815 tsk->io_context->ioprio = ioc->ioprio;
816 }
817#endif
818 return 0;
819}
820
1da177e4
LT
821/*
822 * Helper to unshare the files of the current task.
823 * We don't want to expose copy_files internals to
824 * the exec layer of the kernel.
825 */
826
827int unshare_files(void)
828{
829 struct files_struct *files = current->files;
830 int rc;
831
910dea7f 832 BUG_ON(!files);
1da177e4
LT
833
834 /* This can race but the race causes us to copy when we don't
835 need to and drop the copy */
836 if(atomic_read(&files->count) == 1)
837 {
838 atomic_inc(&files->count);
839 return 0;
840 }
841 rc = copy_files(0, current);
842 if(rc)
843 current->files = files;
844 return rc;
845}
846
847EXPORT_SYMBOL(unshare_files);
848
a39bc516 849static int copy_sighand(unsigned long clone_flags, struct task_struct *tsk)
1da177e4
LT
850{
851 struct sighand_struct *sig;
852
853 if (clone_flags & (CLONE_SIGHAND | CLONE_THREAD)) {
854 atomic_inc(&current->sighand->count);
855 return 0;
856 }
857 sig = kmem_cache_alloc(sighand_cachep, GFP_KERNEL);
e56d0903 858 rcu_assign_pointer(tsk->sighand, sig);
1da177e4
LT
859 if (!sig)
860 return -ENOMEM;
1da177e4
LT
861 atomic_set(&sig->count, 1);
862 memcpy(sig->action, current->sighand->action, sizeof(sig->action));
863 return 0;
864}
865
a7e5328a 866void __cleanup_sighand(struct sighand_struct *sighand)
c81addc9 867{
c81addc9
ON
868 if (atomic_dec_and_test(&sighand->count))
869 kmem_cache_free(sighand_cachep, sighand);
870}
871
a39bc516 872static int copy_signal(unsigned long clone_flags, struct task_struct *tsk)
1da177e4
LT
873{
874 struct signal_struct *sig;
875 int ret;
876
877 if (clone_flags & CLONE_THREAD) {
878 atomic_inc(&current->signal->count);
879 atomic_inc(&current->signal->live);
880 return 0;
881 }
882 sig = kmem_cache_alloc(signal_cachep, GFP_KERNEL);
883 tsk->signal = sig;
884 if (!sig)
885 return -ENOMEM;
886
887 ret = copy_thread_group_keys(tsk);
888 if (ret < 0) {
889 kmem_cache_free(signal_cachep, sig);
890 return ret;
891 }
892
893 atomic_set(&sig->count, 1);
894 atomic_set(&sig->live, 1);
895 init_waitqueue_head(&sig->wait_chldexit);
896 sig->flags = 0;
897 sig->group_exit_code = 0;
898 sig->group_exit_task = NULL;
899 sig->group_stop_count = 0;
900 sig->curr_target = NULL;
901 init_sigpending(&sig->shared_pending);
902 INIT_LIST_HEAD(&sig->posix_timers);
903
c9cb2e3d 904 hrtimer_init(&sig->real_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
2ff678b8 905 sig->it_real_incr.tv64 = 0;
1da177e4 906 sig->real_timer.function = it_real_fn;
05cfb614 907 sig->tsk = tsk;
1da177e4
LT
908
909 sig->it_virt_expires = cputime_zero;
910 sig->it_virt_incr = cputime_zero;
911 sig->it_prof_expires = cputime_zero;
912 sig->it_prof_incr = cputime_zero;
913
1da177e4 914 sig->leader = 0; /* session leadership doesn't inherit */
ab521dc0 915 sig->tty_old_pgrp = NULL;
1da177e4
LT
916
917 sig->utime = sig->stime = sig->cutime = sig->cstime = cputime_zero;
9ac52315
LV
918 sig->gtime = cputime_zero;
919 sig->cgtime = cputime_zero;
1da177e4
LT
920 sig->nvcsw = sig->nivcsw = sig->cnvcsw = sig->cnivcsw = 0;
921 sig->min_flt = sig->maj_flt = sig->cmin_flt = sig->cmaj_flt = 0;
6eaeeaba 922 sig->inblock = sig->oublock = sig->cinblock = sig->coublock = 0;
172ba844 923 sig->sum_sched_runtime = 0;
1da177e4
LT
924 INIT_LIST_HEAD(&sig->cpu_timers[0]);
925 INIT_LIST_HEAD(&sig->cpu_timers[1]);
926 INIT_LIST_HEAD(&sig->cpu_timers[2]);
ad4ecbcb 927 taskstats_tgid_init(sig);
1da177e4
LT
928
929 task_lock(current->group_leader);
930 memcpy(sig->rlim, current->signal->rlim, sizeof sig->rlim);
931 task_unlock(current->group_leader);
932
933 if (sig->rlim[RLIMIT_CPU].rlim_cur != RLIM_INFINITY) {
934 /*
935 * New sole thread in the process gets an expiry time
936 * of the whole CPU time limit.
937 */
938 tsk->it_prof_expires =
939 secs_to_cputime(sig->rlim[RLIMIT_CPU].rlim_cur);
940 }
0e464814 941 acct_init_pacct(&sig->pacct);
1da177e4 942
522ed776
MT
943 tty_audit_fork(sig);
944
1da177e4
LT
945 return 0;
946}
947
6b3934ef
ON
948void __cleanup_signal(struct signal_struct *sig)
949{
950 exit_thread_group_keys(sig);
951 kmem_cache_free(signal_cachep, sig);
952}
953
a39bc516 954static void cleanup_signal(struct task_struct *tsk)
6b3934ef
ON
955{
956 struct signal_struct *sig = tsk->signal;
957
958 atomic_dec(&sig->live);
959
960 if (atomic_dec_and_test(&sig->count))
961 __cleanup_signal(sig);
962}
963
a39bc516 964static void copy_flags(unsigned long clone_flags, struct task_struct *p)
1da177e4
LT
965{
966 unsigned long new_flags = p->flags;
967
83144186 968 new_flags &= ~PF_SUPERPRIV;
1da177e4
LT
969 new_flags |= PF_FORKNOEXEC;
970 if (!(clone_flags & CLONE_PTRACE))
971 p->ptrace = 0;
972 p->flags = new_flags;
2e131895 973 clear_freeze_flag(p);
1da177e4
LT
974}
975
976asmlinkage long sys_set_tid_address(int __user *tidptr)
977{
978 current->clear_child_tid = tidptr;
979
b488893a 980 return task_pid_vnr(current);
1da177e4
LT
981}
982
a39bc516 983static void rt_mutex_init_task(struct task_struct *p)
23f78d4a 984{
23f78d4a 985 spin_lock_init(&p->pi_lock);
e29e175b 986#ifdef CONFIG_RT_MUTEXES
23f78d4a
IM
987 plist_head_init(&p->pi_waiters, &p->pi_lock);
988 p->pi_blocked_on = NULL;
23f78d4a
IM
989#endif
990}
991
1da177e4
LT
992/*
993 * This creates a new process as a copy of the old one,
994 * but does not actually start it yet.
995 *
996 * It copies the registers, and all the appropriate
997 * parts of the process environment (as per the clone
998 * flags). The actual kick-off is left to the caller.
999 */
36c8b586
IM
1000static struct task_struct *copy_process(unsigned long clone_flags,
1001 unsigned long stack_start,
1002 struct pt_regs *regs,
1003 unsigned long stack_size,
36c8b586 1004 int __user *child_tidptr,
85868995 1005 struct pid *pid)
1da177e4
LT
1006{
1007 int retval;
a24efe62 1008 struct task_struct *p;
b4f48b63 1009 int cgroup_callbacks_done = 0;
1da177e4
LT
1010
1011 if ((clone_flags & (CLONE_NEWNS|CLONE_FS)) == (CLONE_NEWNS|CLONE_FS))
1012 return ERR_PTR(-EINVAL);
1013
1014 /*
1015 * Thread groups must share signals as well, and detached threads
1016 * can only be started up within the thread group.
1017 */
1018 if ((clone_flags & CLONE_THREAD) && !(clone_flags & CLONE_SIGHAND))
1019 return ERR_PTR(-EINVAL);
1020
1021 /*
1022 * Shared signal handlers imply shared VM. By way of the above,
1023 * thread groups also imply shared VM. Blocking this case allows
1024 * for various simplifications in other code.
1025 */
1026 if ((clone_flags & CLONE_SIGHAND) && !(clone_flags & CLONE_VM))
1027 return ERR_PTR(-EINVAL);
1028
1029 retval = security_task_create(clone_flags);
1030 if (retval)
1031 goto fork_out;
1032
1033 retval = -ENOMEM;
1034 p = dup_task_struct(current);
1035 if (!p)
1036 goto fork_out;
1037
bea493a0
PZ
1038 rt_mutex_init_task(p);
1039
de30a2b3
IM
1040#ifdef CONFIG_TRACE_IRQFLAGS
1041 DEBUG_LOCKS_WARN_ON(!p->hardirqs_enabled);
1042 DEBUG_LOCKS_WARN_ON(!p->softirqs_enabled);
1043#endif
1da177e4
LT
1044 retval = -EAGAIN;
1045 if (atomic_read(&p->user->processes) >=
1046 p->signal->rlim[RLIMIT_NPROC].rlim_cur) {
1047 if (!capable(CAP_SYS_ADMIN) && !capable(CAP_SYS_RESOURCE) &&
acce292c 1048 p->user != current->nsproxy->user_ns->root_user)
1da177e4
LT
1049 goto bad_fork_free;
1050 }
1051
1052 atomic_inc(&p->user->__count);
1053 atomic_inc(&p->user->processes);
1054 get_group_info(p->group_info);
1055
1056 /*
1057 * If multiple threads are within copy_process(), then this check
1058 * triggers too late. This doesn't hurt, the check is only there
1059 * to stop root fork bombs.
1060 */
1061 if (nr_threads >= max_threads)
1062 goto bad_fork_cleanup_count;
1063
a1261f54 1064 if (!try_module_get(task_thread_info(p)->exec_domain->module))
1da177e4
LT
1065 goto bad_fork_cleanup_count;
1066
1067 if (p->binfmt && !try_module_get(p->binfmt->module))
1068 goto bad_fork_cleanup_put_domain;
1069
1070 p->did_exec = 0;
ca74e92b 1071 delayacct_tsk_init(p); /* Must remain after dup_task_struct() */
1da177e4 1072 copy_flags(clone_flags, p);
1da177e4
LT
1073 INIT_LIST_HEAD(&p->children);
1074 INIT_LIST_HEAD(&p->sibling);
e260be67
PM
1075#ifdef CONFIG_PREEMPT_RCU
1076 p->rcu_read_lock_nesting = 0;
1077 p->rcu_flipctr_idx = 0;
1078#endif /* #ifdef CONFIG_PREEMPT_RCU */
1da177e4
LT
1079 p->vfork_done = NULL;
1080 spin_lock_init(&p->alloc_lock);
1da177e4
LT
1081
1082 clear_tsk_thread_flag(p, TIF_SIGPENDING);
1083 init_sigpending(&p->pending);
1084
1085 p->utime = cputime_zero;
1086 p->stime = cputime_zero;
9ac52315 1087 p->gtime = cputime_zero;
c66f08be
MN
1088 p->utimescaled = cputime_zero;
1089 p->stimescaled = cputime_zero;
73a2bcb0 1090 p->prev_utime = cputime_zero;
9301899b 1091 p->prev_stime = cputime_zero;
172ba844 1092
82a1fcb9
IM
1093#ifdef CONFIG_DETECT_SOFTLOCKUP
1094 p->last_switch_count = 0;
1095 p->last_switch_timestamp = 0;
1096#endif
1097
4b98d11b 1098#ifdef CONFIG_TASK_XACCT
1da177e4
LT
1099 p->rchar = 0; /* I/O counter: bytes read */
1100 p->wchar = 0; /* I/O counter: bytes written */
1101 p->syscr = 0; /* I/O counter: read syscalls */
1102 p->syscw = 0; /* I/O counter: write syscalls */
4b98d11b 1103#endif
7c3ab738 1104 task_io_accounting_init(p);
1da177e4
LT
1105 acct_clear_integrals(p);
1106
23ff4440 1107 p->it_virt_expires = cputime_zero;
1da177e4 1108 p->it_prof_expires = cputime_zero;
23ff4440
DW
1109 p->it_sched_expires = 0;
1110 INIT_LIST_HEAD(&p->cpu_timers[0]);
1111 INIT_LIST_HEAD(&p->cpu_timers[1]);
1112 INIT_LIST_HEAD(&p->cpu_timers[2]);
1da177e4
LT
1113
1114 p->lock_depth = -1; /* -1 = no lock */
1115 do_posix_clock_monotonic_gettime(&p->start_time);
924b42d5
TJ
1116 p->real_start_time = p->start_time;
1117 monotonic_to_bootbased(&p->real_start_time);
57c521ce 1118#ifdef CONFIG_SECURITY
1da177e4 1119 p->security = NULL;
57c521ce 1120#endif
1da177e4 1121 p->io_context = NULL;
1da177e4 1122 p->audit_context = NULL;
b4f48b63 1123 cgroup_fork(p);
1da177e4
LT
1124#ifdef CONFIG_NUMA
1125 p->mempolicy = mpol_copy(p->mempolicy);
1126 if (IS_ERR(p->mempolicy)) {
1127 retval = PTR_ERR(p->mempolicy);
1128 p->mempolicy = NULL;
b4f48b63 1129 goto bad_fork_cleanup_cgroup;
1da177e4 1130 }
c61afb18 1131 mpol_fix_fork_child_flag(p);
1da177e4 1132#endif
de30a2b3
IM
1133#ifdef CONFIG_TRACE_IRQFLAGS
1134 p->irq_events = 0;
b36e4758
RK
1135#ifdef __ARCH_WANT_INTERRUPTS_ON_CTXSW
1136 p->hardirqs_enabled = 1;
1137#else
de30a2b3 1138 p->hardirqs_enabled = 0;
b36e4758 1139#endif
de30a2b3
IM
1140 p->hardirq_enable_ip = 0;
1141 p->hardirq_enable_event = 0;
1142 p->hardirq_disable_ip = _THIS_IP_;
1143 p->hardirq_disable_event = 0;
1144 p->softirqs_enabled = 1;
1145 p->softirq_enable_ip = _THIS_IP_;
1146 p->softirq_enable_event = 0;
1147 p->softirq_disable_ip = 0;
1148 p->softirq_disable_event = 0;
1149 p->hardirq_context = 0;
1150 p->softirq_context = 0;
1151#endif
fbb9ce95
IM
1152#ifdef CONFIG_LOCKDEP
1153 p->lockdep_depth = 0; /* no locks held yet */
1154 p->curr_chain_key = 0;
1155 p->lockdep_recursion = 0;
1156#endif
1da177e4 1157
408894ee
IM
1158#ifdef CONFIG_DEBUG_MUTEXES
1159 p->blocked_on = NULL; /* not blocked yet */
1160#endif
1161
3c90e6e9
SV
1162 /* Perform scheduler related setup. Assign this task to a CPU. */
1163 sched_fork(p, clone_flags);
1164
1da177e4
LT
1165 if ((retval = security_task_alloc(p)))
1166 goto bad_fork_cleanup_policy;
1167 if ((retval = audit_alloc(p)))
1168 goto bad_fork_cleanup_security;
1169 /* copy all the process information */
1170 if ((retval = copy_semundo(clone_flags, p)))
1171 goto bad_fork_cleanup_audit;
1172 if ((retval = copy_files(clone_flags, p)))
1173 goto bad_fork_cleanup_semundo;
1174 if ((retval = copy_fs(clone_flags, p)))
1175 goto bad_fork_cleanup_files;
1176 if ((retval = copy_sighand(clone_flags, p)))
1177 goto bad_fork_cleanup_fs;
1178 if ((retval = copy_signal(clone_flags, p)))
1179 goto bad_fork_cleanup_sighand;
1180 if ((retval = copy_mm(clone_flags, p)))
1181 goto bad_fork_cleanup_signal;
1182 if ((retval = copy_keys(clone_flags, p)))
1183 goto bad_fork_cleanup_mm;
ab516013 1184 if ((retval = copy_namespaces(clone_flags, p)))
1da177e4 1185 goto bad_fork_cleanup_keys;
fadad878 1186 if ((retval = copy_io(clone_flags, p)))
fd0928df 1187 goto bad_fork_cleanup_namespaces;
1da177e4
LT
1188 retval = copy_thread(0, clone_flags, stack_start, stack_size, p, regs);
1189 if (retval)
fd0928df 1190 goto bad_fork_cleanup_io;
1da177e4 1191
425fb2b4
PE
1192 if (pid != &init_struct_pid) {
1193 retval = -ENOMEM;
1194 pid = alloc_pid(task_active_pid_ns(p));
1195 if (!pid)
fd0928df 1196 goto bad_fork_cleanup_io;
6f4e6433
PE
1197
1198 if (clone_flags & CLONE_NEWPID) {
1199 retval = pid_ns_prepare_proc(task_active_pid_ns(p));
1200 if (retval < 0)
1201 goto bad_fork_free_pid;
1202 }
425fb2b4
PE
1203 }
1204
1205 p->pid = pid_nr(pid);
1206 p->tgid = p->pid;
1207 if (clone_flags & CLONE_THREAD)
1208 p->tgid = current->tgid;
1209
1da177e4
LT
1210 p->set_child_tid = (clone_flags & CLONE_CHILD_SETTID) ? child_tidptr : NULL;
1211 /*
1212 * Clear TID on mm_release()?
1213 */
1214 p->clear_child_tid = (clone_flags & CLONE_CHILD_CLEARTID) ? child_tidptr: NULL;
42b2dd0a 1215#ifdef CONFIG_FUTEX
8f17d3a5
IM
1216 p->robust_list = NULL;
1217#ifdef CONFIG_COMPAT
1218 p->compat_robust_list = NULL;
1219#endif
c87e2837
IM
1220 INIT_LIST_HEAD(&p->pi_state_list);
1221 p->pi_state_cache = NULL;
42b2dd0a 1222#endif
f9a3879a
GM
1223 /*
1224 * sigaltstack should be cleared when sharing the same VM
1225 */
1226 if ((clone_flags & (CLONE_VM|CLONE_VFORK)) == CLONE_VM)
1227 p->sas_ss_sp = p->sas_ss_size = 0;
1228
1da177e4
LT
1229 /*
1230 * Syscall tracing should be turned off in the child regardless
1231 * of CLONE_PTRACE.
1232 */
1233 clear_tsk_thread_flag(p, TIF_SYSCALL_TRACE);
ed75e8d5
LV
1234#ifdef TIF_SYSCALL_EMU
1235 clear_tsk_thread_flag(p, TIF_SYSCALL_EMU);
1236#endif
9745512c 1237 clear_all_latency_tracing(p);
1da177e4
LT
1238
1239 /* Our parent execution domain becomes current domain
1240 These must match for thread signalling to apply */
1da177e4
LT
1241 p->parent_exec_id = p->self_exec_id;
1242
1243 /* ok, now we should be set up.. */
1244 p->exit_signal = (clone_flags & CLONE_THREAD) ? -1 : (clone_flags & CSIGNAL);
1245 p->pdeath_signal = 0;
1246 p->exit_state = 0;
1247
1da177e4
LT
1248 /*
1249 * Ok, make it visible to the rest of the system.
1250 * We dont wake it up yet.
1251 */
1252 p->group_leader = p;
47e65328 1253 INIT_LIST_HEAD(&p->thread_group);
1da177e4
LT
1254 INIT_LIST_HEAD(&p->ptrace_children);
1255 INIT_LIST_HEAD(&p->ptrace_list);
1256
b4f48b63
PM
1257 /* Now that the task is set up, run cgroup callbacks if
1258 * necessary. We need to run them before the task is visible
1259 * on the tasklist. */
1260 cgroup_fork_callbacks(p);
1261 cgroup_callbacks_done = 1;
1262
1da177e4
LT
1263 /* Need tasklist lock for parent etc handling! */
1264 write_lock_irq(&tasklist_lock);
1265
1266 /*
476d139c
NP
1267 * The task hasn't been attached yet, so its cpus_allowed mask will
1268 * not be changed, nor will its assigned CPU.
1269 *
1270 * The cpus_allowed mask of the parent may have changed after it was
1271 * copied first time - so re-copy it here, then check the child's CPU
1272 * to ensure it is on a valid CPU (and if not, just force it back to
1273 * parent's CPU). This avoids alot of nasty races.
1da177e4
LT
1274 */
1275 p->cpus_allowed = current->cpus_allowed;
6f505b16 1276 p->rt.nr_cpus_allowed = current->rt.nr_cpus_allowed;
26ff6ad9
SV
1277 if (unlikely(!cpu_isset(task_cpu(p), p->cpus_allowed) ||
1278 !cpu_online(task_cpu(p))))
476d139c 1279 set_task_cpu(p, smp_processor_id());
1da177e4 1280
1da177e4
LT
1281 /* CLONE_PARENT re-uses the old parent */
1282 if (clone_flags & (CLONE_PARENT|CLONE_THREAD))
1283 p->real_parent = current->real_parent;
1284 else
1285 p->real_parent = current;
1286 p->parent = p->real_parent;
1287
3f17da69 1288 spin_lock(&current->sighand->siglock);
4a2c7a78
ON
1289
1290 /*
1291 * Process group and session signals need to be delivered to just the
1292 * parent before the fork or both the parent and the child after the
1293 * fork. Restart if a signal comes in before we add the new process to
1294 * it's process group.
1295 * A fatal signal pending means that current will exit, so the new
1296 * thread can't slip out of an OOM kill (or normal SIGKILL).
1297 */
23ff4440 1298 recalc_sigpending();
4a2c7a78
ON
1299 if (signal_pending(current)) {
1300 spin_unlock(&current->sighand->siglock);
1301 write_unlock_irq(&tasklist_lock);
1302 retval = -ERESTARTNOINTR;
425fb2b4 1303 goto bad_fork_free_pid;
4a2c7a78
ON
1304 }
1305
1da177e4 1306 if (clone_flags & CLONE_THREAD) {
1da177e4 1307 p->group_leader = current->group_leader;
47e65328 1308 list_add_tail_rcu(&p->thread_group, &p->group_leader->thread_group);
1da177e4 1309
1da177e4
LT
1310 if (!cputime_eq(current->signal->it_virt_expires,
1311 cputime_zero) ||
1312 !cputime_eq(current->signal->it_prof_expires,
1313 cputime_zero) ||
1314 current->signal->rlim[RLIMIT_CPU].rlim_cur != RLIM_INFINITY ||
1315 !list_empty(&current->signal->cpu_timers[0]) ||
1316 !list_empty(&current->signal->cpu_timers[1]) ||
1317 !list_empty(&current->signal->cpu_timers[2])) {
1318 /*
1319 * Have child wake up on its first tick to check
1320 * for process CPU timers.
1321 */
1322 p->it_prof_expires = jiffies_to_cputime(1);
1323 }
1da177e4
LT
1324 }
1325
73b9ebfe
ON
1326 if (likely(p->pid)) {
1327 add_parent(p);
1328 if (unlikely(p->ptrace & PT_PTRACED))
1329 __ptrace_link(p, current->parent);
1330
1331 if (thread_group_leader(p)) {
5cd17569 1332 if (clone_flags & CLONE_NEWPID)
30e49c26 1333 p->nsproxy->pid_ns->child_reaper = p;
73b9ebfe 1334
5cd17569
EB
1335 p->signal->tty = current->signal->tty;
1336 set_task_pgrp(p, task_pgrp_nr(current));
1337 set_task_session(p, task_session_nr(current));
1338 attach_pid(p, PIDTYPE_PGID, task_pgrp(current));
1339 attach_pid(p, PIDTYPE_SID, task_session(current));
5e85d4ab 1340 list_add_tail_rcu(&p->tasks, &init_task.tasks);
1da177e4 1341 __get_cpu_var(process_counts)++;
73b9ebfe 1342 }
85868995 1343 attach_pid(p, PIDTYPE_PID, pid);
73b9ebfe 1344 nr_threads++;
1da177e4
LT
1345 }
1346
1da177e4 1347 total_forks++;
3f17da69 1348 spin_unlock(&current->sighand->siglock);
1da177e4 1349 write_unlock_irq(&tasklist_lock);
c13cf856 1350 proc_fork_connector(p);
817929ec 1351 cgroup_post_fork(p);
1da177e4
LT
1352 return p;
1353
425fb2b4
PE
1354bad_fork_free_pid:
1355 if (pid != &init_struct_pid)
1356 free_pid(pid);
fd0928df
JA
1357bad_fork_cleanup_io:
1358 put_io_context(p->io_context);
ab516013 1359bad_fork_cleanup_namespaces:
444f378b 1360 exit_task_namespaces(p);
1da177e4
LT
1361bad_fork_cleanup_keys:
1362 exit_keys(p);
1363bad_fork_cleanup_mm:
1364 if (p->mm)
1365 mmput(p->mm);
1366bad_fork_cleanup_signal:
6b3934ef 1367 cleanup_signal(p);
1da177e4 1368bad_fork_cleanup_sighand:
a7e5328a 1369 __cleanup_sighand(p->sighand);
1da177e4
LT
1370bad_fork_cleanup_fs:
1371 exit_fs(p); /* blocking */
1372bad_fork_cleanup_files:
1373 exit_files(p); /* blocking */
1374bad_fork_cleanup_semundo:
1375 exit_sem(p);
1376bad_fork_cleanup_audit:
1377 audit_free(p);
1378bad_fork_cleanup_security:
1379 security_task_free(p);
1380bad_fork_cleanup_policy:
1381#ifdef CONFIG_NUMA
1382 mpol_free(p->mempolicy);
b4f48b63 1383bad_fork_cleanup_cgroup:
1da177e4 1384#endif
b4f48b63 1385 cgroup_exit(p, cgroup_callbacks_done);
35df17c5 1386 delayacct_tsk_free(p);
1da177e4
LT
1387 if (p->binfmt)
1388 module_put(p->binfmt->module);
1389bad_fork_cleanup_put_domain:
a1261f54 1390 module_put(task_thread_info(p)->exec_domain->module);
1da177e4
LT
1391bad_fork_cleanup_count:
1392 put_group_info(p->group_info);
1393 atomic_dec(&p->user->processes);
1394 free_uid(p->user);
1395bad_fork_free:
1396 free_task(p);
fe7d37d1
ON
1397fork_out:
1398 return ERR_PTR(retval);
1da177e4
LT
1399}
1400
f95d47ca 1401noinline struct pt_regs * __devinit __attribute__((weak)) idle_regs(struct pt_regs *regs)
1da177e4
LT
1402{
1403 memset(regs, 0, sizeof(struct pt_regs));
1404 return regs;
1405}
1406
9abcf40b 1407struct task_struct * __cpuinit fork_idle(int cpu)
1da177e4 1408{
36c8b586 1409 struct task_struct *task;
1da177e4
LT
1410 struct pt_regs regs;
1411
30e49c26 1412 task = copy_process(CLONE_VM, 0, idle_regs(&regs), 0, NULL,
85868995 1413 &init_struct_pid);
753ca4f3
AM
1414 if (!IS_ERR(task))
1415 init_idle(task, cpu);
73b9ebfe 1416
1da177e4
LT
1417 return task;
1418}
1419
a39bc516 1420static int fork_traceflag(unsigned clone_flags)
1da177e4
LT
1421{
1422 if (clone_flags & CLONE_UNTRACED)
1423 return 0;
1424 else if (clone_flags & CLONE_VFORK) {
1425 if (current->ptrace & PT_TRACE_VFORK)
1426 return PTRACE_EVENT_VFORK;
1427 } else if ((clone_flags & CSIGNAL) != SIGCHLD) {
1428 if (current->ptrace & PT_TRACE_CLONE)
1429 return PTRACE_EVENT_CLONE;
1430 } else if (current->ptrace & PT_TRACE_FORK)
1431 return PTRACE_EVENT_FORK;
1432
1433 return 0;
1434}
1435
1436/*
1437 * Ok, this is the main fork-routine.
1438 *
1439 * It copies the process, and if successful kick-starts
1440 * it and waits for it to finish using the VM if required.
1441 */
1442long do_fork(unsigned long clone_flags,
1443 unsigned long stack_start,
1444 struct pt_regs *regs,
1445 unsigned long stack_size,
1446 int __user *parent_tidptr,
1447 int __user *child_tidptr)
1448{
1449 struct task_struct *p;
1450 int trace = 0;
92476d7f 1451 long nr;
1da177e4 1452
bdff746a
AM
1453 /*
1454 * We hope to recycle these flags after 2.6.26
1455 */
1456 if (unlikely(clone_flags & CLONE_STOPPED)) {
1457 static int __read_mostly count = 100;
1458
1459 if (count > 0 && printk_ratelimit()) {
1460 char comm[TASK_COMM_LEN];
1461
1462 count--;
1463 printk(KERN_INFO "fork(): process `%s' used deprecated "
1464 "clone flags 0x%lx\n",
1465 get_task_comm(comm, current),
1466 clone_flags & CLONE_STOPPED);
1467 }
1468 }
1469
1da177e4
LT
1470 if (unlikely(current->ptrace)) {
1471 trace = fork_traceflag (clone_flags);
1472 if (trace)
1473 clone_flags |= CLONE_PTRACE;
1474 }
1475
a6f5e063 1476 p = copy_process(clone_flags, stack_start, regs, stack_size,
30e49c26 1477 child_tidptr, NULL);
1da177e4
LT
1478 /*
1479 * Do this prior waking up the new thread - the thread pointer
1480 * might get invalid after that point, if the thread exits quickly.
1481 */
1482 if (!IS_ERR(p)) {
1483 struct completion vfork;
1484
30e49c26
PE
1485 /*
1486 * this is enough to call pid_nr_ns here, but this if
1487 * improves optimisation of regular fork()
1488 */
1489 nr = (clone_flags & CLONE_NEWPID) ?
1490 task_pid_nr_ns(p, current->nsproxy->pid_ns) :
1491 task_pid_vnr(p);
1492
1493 if (clone_flags & CLONE_PARENT_SETTID)
1494 put_user(nr, parent_tidptr);
a6f5e063 1495
1da177e4
LT
1496 if (clone_flags & CLONE_VFORK) {
1497 p->vfork_done = &vfork;
1498 init_completion(&vfork);
1499 }
1500
1501 if ((p->ptrace & PT_PTRACED) || (clone_flags & CLONE_STOPPED)) {
1502 /*
1503 * We'll start up with an immediate SIGSTOP.
1504 */
1505 sigaddset(&p->pending.signal, SIGSTOP);
1506 set_tsk_thread_flag(p, TIF_SIGPENDING);
1507 }
1508
1509 if (!(clone_flags & CLONE_STOPPED))
1510 wake_up_new_task(p, clone_flags);
1511 else
1512 p->state = TASK_STOPPED;
1513
1514 if (unlikely (trace)) {
92476d7f 1515 current->ptrace_message = nr;
1da177e4
LT
1516 ptrace_notify ((trace << 8) | SIGTRAP);
1517 }
1518
1519 if (clone_flags & CLONE_VFORK) {
ba96a0c8 1520 freezer_do_not_count();
1da177e4 1521 wait_for_completion(&vfork);
ba96a0c8 1522 freezer_count();
9f59ce5d
CE
1523 if (unlikely (current->ptrace & PT_TRACE_VFORK_DONE)) {
1524 current->ptrace_message = nr;
1da177e4 1525 ptrace_notify ((PTRACE_EVENT_VFORK_DONE << 8) | SIGTRAP);
9f59ce5d 1526 }
1da177e4
LT
1527 }
1528 } else {
92476d7f 1529 nr = PTR_ERR(p);
1da177e4 1530 }
92476d7f 1531 return nr;
1da177e4
LT
1532}
1533
5fd63b30
RT
1534#ifndef ARCH_MIN_MMSTRUCT_ALIGN
1535#define ARCH_MIN_MMSTRUCT_ALIGN 0
1536#endif
1537
4ba9b9d0 1538static void sighand_ctor(struct kmem_cache *cachep, void *data)
aa1757f9
ON
1539{
1540 struct sighand_struct *sighand = data;
1541
a35afb83 1542 spin_lock_init(&sighand->siglock);
b8fceee1 1543 init_waitqueue_head(&sighand->signalfd_wqh);
aa1757f9
ON
1544}
1545
1da177e4
LT
1546void __init proc_caches_init(void)
1547{
1548 sighand_cachep = kmem_cache_create("sighand_cache",
1549 sizeof(struct sighand_struct), 0,
aa1757f9 1550 SLAB_HWCACHE_ALIGN|SLAB_PANIC|SLAB_DESTROY_BY_RCU,
20c2df83 1551 sighand_ctor);
1da177e4
LT
1552 signal_cachep = kmem_cache_create("signal_cache",
1553 sizeof(struct signal_struct), 0,
20c2df83
PM
1554 SLAB_HWCACHE_ALIGN|SLAB_PANIC, NULL);
1555 files_cachep = kmem_cache_create("files_cache",
1da177e4 1556 sizeof(struct files_struct), 0,
20c2df83
PM
1557 SLAB_HWCACHE_ALIGN|SLAB_PANIC, NULL);
1558 fs_cachep = kmem_cache_create("fs_cache",
1da177e4 1559 sizeof(struct fs_struct), 0,
20c2df83 1560 SLAB_HWCACHE_ALIGN|SLAB_PANIC, NULL);
1da177e4
LT
1561 vm_area_cachep = kmem_cache_create("vm_area_struct",
1562 sizeof(struct vm_area_struct), 0,
20c2df83 1563 SLAB_PANIC, NULL);
1da177e4 1564 mm_cachep = kmem_cache_create("mm_struct",
5fd63b30 1565 sizeof(struct mm_struct), ARCH_MIN_MMSTRUCT_ALIGN,
20c2df83 1566 SLAB_HWCACHE_ALIGN|SLAB_PANIC, NULL);
1da177e4 1567}
cf2e340f 1568
cf2e340f
JD
1569/*
1570 * Check constraints on flags passed to the unshare system call and
1571 * force unsharing of additional process context as appropriate.
1572 */
a39bc516 1573static void check_unshare_flags(unsigned long *flags_ptr)
cf2e340f
JD
1574{
1575 /*
1576 * If unsharing a thread from a thread group, must also
1577 * unshare vm.
1578 */
1579 if (*flags_ptr & CLONE_THREAD)
1580 *flags_ptr |= CLONE_VM;
1581
1582 /*
1583 * If unsharing vm, must also unshare signal handlers.
1584 */
1585 if (*flags_ptr & CLONE_VM)
1586 *flags_ptr |= CLONE_SIGHAND;
1587
1588 /*
1589 * If unsharing signal handlers and the task was created
1590 * using CLONE_THREAD, then must unshare the thread
1591 */
1592 if ((*flags_ptr & CLONE_SIGHAND) &&
1593 (atomic_read(&current->signal->count) > 1))
1594 *flags_ptr |= CLONE_THREAD;
1595
1596 /*
1597 * If unsharing namespace, must also unshare filesystem information.
1598 */
1599 if (*flags_ptr & CLONE_NEWNS)
1600 *flags_ptr |= CLONE_FS;
1601}
1602
1603/*
1604 * Unsharing of tasks created with CLONE_THREAD is not supported yet
1605 */
1606static int unshare_thread(unsigned long unshare_flags)
1607{
1608 if (unshare_flags & CLONE_THREAD)
1609 return -EINVAL;
1610
1611 return 0;
1612}
1613
1614/*
99d1419d 1615 * Unshare the filesystem structure if it is being shared
cf2e340f
JD
1616 */
1617static int unshare_fs(unsigned long unshare_flags, struct fs_struct **new_fsp)
1618{
1619 struct fs_struct *fs = current->fs;
1620
1621 if ((unshare_flags & CLONE_FS) &&
99d1419d
JD
1622 (fs && atomic_read(&fs->count) > 1)) {
1623 *new_fsp = __copy_fs_struct(current->fs);
1624 if (!*new_fsp)
1625 return -ENOMEM;
1626 }
cf2e340f
JD
1627
1628 return 0;
1629}
1630
cf2e340f 1631/*
dae3c5a0 1632 * Unsharing of sighand is not supported yet
cf2e340f
JD
1633 */
1634static int unshare_sighand(unsigned long unshare_flags, struct sighand_struct **new_sighp)
1635{
1636 struct sighand_struct *sigh = current->sighand;
1637
dae3c5a0 1638 if ((unshare_flags & CLONE_SIGHAND) && atomic_read(&sigh->count) > 1)
cf2e340f
JD
1639 return -EINVAL;
1640 else
1641 return 0;
1642}
1643
1644/*
a0a7ec30 1645 * Unshare vm if it is being shared
cf2e340f
JD
1646 */
1647static int unshare_vm(unsigned long unshare_flags, struct mm_struct **new_mmp)
1648{
1649 struct mm_struct *mm = current->mm;
1650
1651 if ((unshare_flags & CLONE_VM) &&
a0a7ec30 1652 (mm && atomic_read(&mm->mm_users) > 1)) {
2d61b867 1653 return -EINVAL;
a0a7ec30 1654 }
cf2e340f
JD
1655
1656 return 0;
cf2e340f
JD
1657}
1658
1659/*
a016f338 1660 * Unshare file descriptor table if it is being shared
cf2e340f
JD
1661 */
1662static int unshare_fd(unsigned long unshare_flags, struct files_struct **new_fdp)
1663{
1664 struct files_struct *fd = current->files;
a016f338 1665 int error = 0;
cf2e340f
JD
1666
1667 if ((unshare_flags & CLONE_FILES) &&
a016f338
JD
1668 (fd && atomic_read(&fd->count) > 1)) {
1669 *new_fdp = dup_fd(fd, &error);
1670 if (!*new_fdp)
1671 return error;
1672 }
cf2e340f
JD
1673
1674 return 0;
1675}
1676
1677/*
1678 * Unsharing of semundo for tasks created with CLONE_SYSVSEM is not
1679 * supported yet
1680 */
1681static int unshare_semundo(unsigned long unshare_flags, struct sem_undo_list **new_ulistp)
1682{
1683 if (unshare_flags & CLONE_SYSVSEM)
1684 return -EINVAL;
1685
1686 return 0;
1687}
1688
1689/*
1690 * unshare allows a process to 'unshare' part of the process
1691 * context which was originally shared using clone. copy_*
1692 * functions used by do_fork() cannot be used here directly
1693 * because they modify an inactive task_struct that is being
1694 * constructed. Here we are modifying the current, active,
1695 * task_struct.
1696 */
1697asmlinkage long sys_unshare(unsigned long unshare_flags)
1698{
1699 int err = 0;
1700 struct fs_struct *fs, *new_fs = NULL;
dae3c5a0 1701 struct sighand_struct *new_sigh = NULL;
cf2e340f
JD
1702 struct mm_struct *mm, *new_mm = NULL, *active_mm = NULL;
1703 struct files_struct *fd, *new_fd = NULL;
1704 struct sem_undo_list *new_ulist = NULL;
cf7b708c 1705 struct nsproxy *new_nsproxy = NULL;
cf2e340f
JD
1706
1707 check_unshare_flags(&unshare_flags);
1708
06f9d4f9
EB
1709 /* Return -EINVAL for all unsupported flags */
1710 err = -EINVAL;
1711 if (unshare_flags & ~(CLONE_THREAD|CLONE_FS|CLONE_NEWNS|CLONE_SIGHAND|
25b21cb2 1712 CLONE_VM|CLONE_FILES|CLONE_SYSVSEM|
9dd776b6
EB
1713 CLONE_NEWUTS|CLONE_NEWIPC|CLONE_NEWUSER|
1714 CLONE_NEWNET))
06f9d4f9
EB
1715 goto bad_unshare_out;
1716
cf2e340f
JD
1717 if ((err = unshare_thread(unshare_flags)))
1718 goto bad_unshare_out;
1719 if ((err = unshare_fs(unshare_flags, &new_fs)))
1720 goto bad_unshare_cleanup_thread;
cf2e340f 1721 if ((err = unshare_sighand(unshare_flags, &new_sigh)))
e3222c4e 1722 goto bad_unshare_cleanup_fs;
cf2e340f
JD
1723 if ((err = unshare_vm(unshare_flags, &new_mm)))
1724 goto bad_unshare_cleanup_sigh;
1725 if ((err = unshare_fd(unshare_flags, &new_fd)))
1726 goto bad_unshare_cleanup_vm;
1727 if ((err = unshare_semundo(unshare_flags, &new_ulist)))
1728 goto bad_unshare_cleanup_fd;
e3222c4e
BP
1729 if ((err = unshare_nsproxy_namespaces(unshare_flags, &new_nsproxy,
1730 new_fs)))
071df104 1731 goto bad_unshare_cleanup_semundo;
c0b2fc31 1732
e3222c4e 1733 if (new_fs || new_mm || new_fd || new_ulist || new_nsproxy) {
ab516013 1734
c0b2fc31 1735 if (new_nsproxy) {
cf7b708c
PE
1736 switch_task_namespaces(current, new_nsproxy);
1737 new_nsproxy = NULL;
c0b2fc31 1738 }
cf2e340f 1739
cf7b708c
PE
1740 task_lock(current);
1741
cf2e340f
JD
1742 if (new_fs) {
1743 fs = current->fs;
1744 current->fs = new_fs;
1745 new_fs = fs;
1746 }
1747
cf2e340f
JD
1748 if (new_mm) {
1749 mm = current->mm;
1750 active_mm = current->active_mm;
1751 current->mm = new_mm;
1752 current->active_mm = new_mm;
1753 activate_mm(active_mm, new_mm);
1754 new_mm = mm;
1755 }
1756
1757 if (new_fd) {
1758 fd = current->files;
1759 current->files = new_fd;
1760 new_fd = fd;
1761 }
1762
1763 task_unlock(current);
1764 }
1765
c0b2fc31 1766 if (new_nsproxy)
444f378b 1767 put_nsproxy(new_nsproxy);
c0b2fc31 1768
ab516013 1769bad_unshare_cleanup_semundo:
cf2e340f
JD
1770bad_unshare_cleanup_fd:
1771 if (new_fd)
1772 put_files_struct(new_fd);
1773
1774bad_unshare_cleanup_vm:
1775 if (new_mm)
1776 mmput(new_mm);
1777
1778bad_unshare_cleanup_sigh:
1779 if (new_sigh)
1780 if (atomic_dec_and_test(&new_sigh->count))
1781 kmem_cache_free(sighand_cachep, new_sigh);
1782
cf2e340f
JD
1783bad_unshare_cleanup_fs:
1784 if (new_fs)
1785 put_fs_struct(new_fs);
1786
1787bad_unshare_cleanup_thread:
1788bad_unshare_out:
1789 return err;
1790}