]> bbs.cooldavid.org Git - net-next-2.6.git/blame - kernel/fork.c
[PATCH] s390: remove BINFMT_ELF32 config option
[net-next-2.6.git] / kernel / fork.c
CommitLineData
1da177e4
LT
1/*
2 * linux/kernel/fork.c
3 *
4 * Copyright (C) 1991, 1992 Linus Torvalds
5 */
6
7/*
8 * 'fork.c' contains the help-routines for the 'fork' system call
9 * (see also entry.S and others).
10 * Fork is rather simple, once you get the hang of it, but the memory
11 * management can be a bitch. See 'mm/memory.c': 'copy_page_range()'
12 */
13
1da177e4
LT
14#include <linux/slab.h>
15#include <linux/init.h>
16#include <linux/unistd.h>
17#include <linux/smp_lock.h>
18#include <linux/module.h>
19#include <linux/vmalloc.h>
20#include <linux/completion.h>
21#include <linux/namespace.h>
22#include <linux/personality.h>
23#include <linux/mempolicy.h>
24#include <linux/sem.h>
25#include <linux/file.h>
26#include <linux/key.h>
27#include <linux/binfmts.h>
28#include <linux/mman.h>
29#include <linux/fs.h>
c59ede7b 30#include <linux/capability.h>
1da177e4
LT
31#include <linux/cpu.h>
32#include <linux/cpuset.h>
33#include <linux/security.h>
34#include <linux/swap.h>
35#include <linux/syscalls.h>
36#include <linux/jiffies.h>
37#include <linux/futex.h>
ab2af1f5 38#include <linux/rcupdate.h>
1da177e4
LT
39#include <linux/ptrace.h>
40#include <linux/mount.h>
41#include <linux/audit.h>
42#include <linux/profile.h>
43#include <linux/rmap.h>
44#include <linux/acct.h>
9f46080c 45#include <linux/cn_proc.h>
1da177e4
LT
46
47#include <asm/pgtable.h>
48#include <asm/pgalloc.h>
49#include <asm/uaccess.h>
50#include <asm/mmu_context.h>
51#include <asm/cacheflush.h>
52#include <asm/tlbflush.h>
53
54/*
55 * Protected counters by write_lock_irq(&tasklist_lock)
56 */
57unsigned long total_forks; /* Handle normal Linux uptimes. */
58int nr_threads; /* The idle threads do not count.. */
59
60int max_threads; /* tunable limit on nr_threads */
61
62DEFINE_PER_CPU(unsigned long, process_counts) = 0;
63
64 __cacheline_aligned DEFINE_RWLOCK(tasklist_lock); /* outer */
65
66EXPORT_SYMBOL(tasklist_lock);
67
68int nr_processes(void)
69{
70 int cpu;
71 int total = 0;
72
73 for_each_online_cpu(cpu)
74 total += per_cpu(process_counts, cpu);
75
76 return total;
77}
78
79#ifndef __HAVE_ARCH_TASK_STRUCT_ALLOCATOR
80# define alloc_task_struct() kmem_cache_alloc(task_struct_cachep, GFP_KERNEL)
81# define free_task_struct(tsk) kmem_cache_free(task_struct_cachep, (tsk))
82static kmem_cache_t *task_struct_cachep;
83#endif
84
85/* SLAB cache for signal_struct structures (tsk->signal) */
6b3934ef 86static kmem_cache_t *signal_cachep;
1da177e4
LT
87
88/* SLAB cache for sighand_struct structures (tsk->sighand) */
89kmem_cache_t *sighand_cachep;
90
91/* SLAB cache for files_struct structures (tsk->files) */
92kmem_cache_t *files_cachep;
93
94/* SLAB cache for fs_struct structures (tsk->fs) */
95kmem_cache_t *fs_cachep;
96
97/* SLAB cache for vm_area_struct structures */
98kmem_cache_t *vm_area_cachep;
99
100/* SLAB cache for mm_struct structures (tsk->mm) */
101static kmem_cache_t *mm_cachep;
102
103void free_task(struct task_struct *tsk)
104{
105 free_thread_info(tsk->thread_info);
23f78d4a 106 rt_mutex_debug_task_free(tsk);
1da177e4
LT
107 free_task_struct(tsk);
108}
109EXPORT_SYMBOL(free_task);
110
158d9ebd 111void __put_task_struct(struct task_struct *tsk)
1da177e4
LT
112{
113 WARN_ON(!(tsk->exit_state & (EXIT_DEAD | EXIT_ZOMBIE)));
114 WARN_ON(atomic_read(&tsk->usage));
115 WARN_ON(tsk == current);
116
1da177e4
LT
117 security_task_free(tsk);
118 free_uid(tsk->user);
119 put_group_info(tsk->group_info);
120
121 if (!profile_handoff_task(tsk))
122 free_task(tsk);
123}
124
125void __init fork_init(unsigned long mempages)
126{
127#ifndef __HAVE_ARCH_TASK_STRUCT_ALLOCATOR
128#ifndef ARCH_MIN_TASKALIGN
129#define ARCH_MIN_TASKALIGN L1_CACHE_BYTES
130#endif
131 /* create a slab on which task_structs can be allocated */
132 task_struct_cachep =
133 kmem_cache_create("task_struct", sizeof(struct task_struct),
134 ARCH_MIN_TASKALIGN, SLAB_PANIC, NULL, NULL);
135#endif
136
137 /*
138 * The default maximum number of threads is set to a safe
139 * value: the thread structures can take up at most half
140 * of memory.
141 */
142 max_threads = mempages / (8 * THREAD_SIZE / PAGE_SIZE);
143
144 /*
145 * we need to allow at least 20 threads to boot a system
146 */
147 if(max_threads < 20)
148 max_threads = 20;
149
150 init_task.signal->rlim[RLIMIT_NPROC].rlim_cur = max_threads/2;
151 init_task.signal->rlim[RLIMIT_NPROC].rlim_max = max_threads/2;
152 init_task.signal->rlim[RLIMIT_SIGPENDING] =
153 init_task.signal->rlim[RLIMIT_NPROC];
154}
155
156static struct task_struct *dup_task_struct(struct task_struct *orig)
157{
158 struct task_struct *tsk;
159 struct thread_info *ti;
160
161 prepare_to_copy(orig);
162
163 tsk = alloc_task_struct();
164 if (!tsk)
165 return NULL;
166
167 ti = alloc_thread_info(tsk);
168 if (!ti) {
169 free_task_struct(tsk);
170 return NULL;
171 }
172
1da177e4
LT
173 *tsk = *orig;
174 tsk->thread_info = ti;
10ebffde 175 setup_thread_stack(tsk, orig);
1da177e4
LT
176
177 /* One for us, one for whoever does the "release_task()" (usually parent) */
178 atomic_set(&tsk->usage,2);
4b5d37ac 179 atomic_set(&tsk->fs_excl, 0);
2056a782 180 tsk->btrace_seq = 0;
a0aa7f68 181 tsk->splice_pipe = NULL;
1da177e4
LT
182 return tsk;
183}
184
185#ifdef CONFIG_MMU
fd3e42fc 186static inline int dup_mmap(struct mm_struct *mm, struct mm_struct *oldmm)
1da177e4 187{
fd3e42fc 188 struct vm_area_struct *mpnt, *tmp, **pprev;
1da177e4
LT
189 struct rb_node **rb_link, *rb_parent;
190 int retval;
191 unsigned long charge;
192 struct mempolicy *pol;
193
194 down_write(&oldmm->mmap_sem);
fd3e42fc 195 flush_cache_mm(oldmm);
ad339451
IM
196 /*
197 * Not linked in yet - no deadlock potential:
198 */
199 down_write_nested(&mm->mmap_sem, SINGLE_DEPTH_NESTING);
7ee78232 200
1da177e4
LT
201 mm->locked_vm = 0;
202 mm->mmap = NULL;
203 mm->mmap_cache = NULL;
204 mm->free_area_cache = oldmm->mmap_base;
1363c3cd 205 mm->cached_hole_size = ~0UL;
1da177e4 206 mm->map_count = 0;
1da177e4
LT
207 cpus_clear(mm->cpu_vm_mask);
208 mm->mm_rb = RB_ROOT;
209 rb_link = &mm->mm_rb.rb_node;
210 rb_parent = NULL;
211 pprev = &mm->mmap;
212
fd3e42fc 213 for (mpnt = oldmm->mmap; mpnt; mpnt = mpnt->vm_next) {
1da177e4
LT
214 struct file *file;
215
216 if (mpnt->vm_flags & VM_DONTCOPY) {
3b6bfcdb
HD
217 long pages = vma_pages(mpnt);
218 mm->total_vm -= pages;
ab50b8ed 219 vm_stat_account(mm, mpnt->vm_flags, mpnt->vm_file,
3b6bfcdb 220 -pages);
1da177e4
LT
221 continue;
222 }
223 charge = 0;
224 if (mpnt->vm_flags & VM_ACCOUNT) {
225 unsigned int len = (mpnt->vm_end - mpnt->vm_start) >> PAGE_SHIFT;
226 if (security_vm_enough_memory(len))
227 goto fail_nomem;
228 charge = len;
229 }
230 tmp = kmem_cache_alloc(vm_area_cachep, SLAB_KERNEL);
231 if (!tmp)
232 goto fail_nomem;
233 *tmp = *mpnt;
234 pol = mpol_copy(vma_policy(mpnt));
235 retval = PTR_ERR(pol);
236 if (IS_ERR(pol))
237 goto fail_nomem_policy;
238 vma_set_policy(tmp, pol);
239 tmp->vm_flags &= ~VM_LOCKED;
240 tmp->vm_mm = mm;
241 tmp->vm_next = NULL;
242 anon_vma_link(tmp);
243 file = tmp->vm_file;
244 if (file) {
245 struct inode *inode = file->f_dentry->d_inode;
246 get_file(file);
247 if (tmp->vm_flags & VM_DENYWRITE)
248 atomic_dec(&inode->i_writecount);
249
250 /* insert tmp into the share list, just after mpnt */
251 spin_lock(&file->f_mapping->i_mmap_lock);
252 tmp->vm_truncate_count = mpnt->vm_truncate_count;
253 flush_dcache_mmap_lock(file->f_mapping);
254 vma_prio_tree_add(tmp, mpnt);
255 flush_dcache_mmap_unlock(file->f_mapping);
256 spin_unlock(&file->f_mapping->i_mmap_lock);
257 }
258
259 /*
7ee78232 260 * Link in the new vma and copy the page table entries.
1da177e4 261 */
1da177e4
LT
262 *pprev = tmp;
263 pprev = &tmp->vm_next;
264
265 __vma_link_rb(mm, tmp, rb_link, rb_parent);
266 rb_link = &tmp->vm_rb.rb_right;
267 rb_parent = &tmp->vm_rb;
268
269 mm->map_count++;
0b0db14c 270 retval = copy_page_range(mm, oldmm, mpnt);
1da177e4
LT
271
272 if (tmp->vm_ops && tmp->vm_ops->open)
273 tmp->vm_ops->open(tmp);
274
275 if (retval)
276 goto out;
277 }
278 retval = 0;
1da177e4 279out:
7ee78232 280 up_write(&mm->mmap_sem);
fd3e42fc 281 flush_tlb_mm(oldmm);
1da177e4
LT
282 up_write(&oldmm->mmap_sem);
283 return retval;
284fail_nomem_policy:
285 kmem_cache_free(vm_area_cachep, tmp);
286fail_nomem:
287 retval = -ENOMEM;
288 vm_unacct_memory(charge);
289 goto out;
290}
291
292static inline int mm_alloc_pgd(struct mm_struct * mm)
293{
294 mm->pgd = pgd_alloc(mm);
295 if (unlikely(!mm->pgd))
296 return -ENOMEM;
297 return 0;
298}
299
300static inline void mm_free_pgd(struct mm_struct * mm)
301{
302 pgd_free(mm->pgd);
303}
304#else
305#define dup_mmap(mm, oldmm) (0)
306#define mm_alloc_pgd(mm) (0)
307#define mm_free_pgd(mm)
308#endif /* CONFIG_MMU */
309
310 __cacheline_aligned_in_smp DEFINE_SPINLOCK(mmlist_lock);
311
312#define allocate_mm() (kmem_cache_alloc(mm_cachep, SLAB_KERNEL))
313#define free_mm(mm) (kmem_cache_free(mm_cachep, (mm)))
314
315#include <linux/init_task.h>
316
317static struct mm_struct * mm_init(struct mm_struct * mm)
318{
319 atomic_set(&mm->mm_users, 1);
320 atomic_set(&mm->mm_count, 1);
321 init_rwsem(&mm->mmap_sem);
322 INIT_LIST_HEAD(&mm->mmlist);
323 mm->core_waiters = 0;
324 mm->nr_ptes = 0;
4294621f 325 set_mm_counter(mm, file_rss, 0);
404351e6 326 set_mm_counter(mm, anon_rss, 0);
1da177e4
LT
327 spin_lock_init(&mm->page_table_lock);
328 rwlock_init(&mm->ioctx_list_lock);
329 mm->ioctx_list = NULL;
1da177e4 330 mm->free_area_cache = TASK_UNMAPPED_BASE;
1363c3cd 331 mm->cached_hole_size = ~0UL;
1da177e4
LT
332
333 if (likely(!mm_alloc_pgd(mm))) {
334 mm->def_flags = 0;
335 return mm;
336 }
337 free_mm(mm);
338 return NULL;
339}
340
341/*
342 * Allocate and initialize an mm_struct.
343 */
344struct mm_struct * mm_alloc(void)
345{
346 struct mm_struct * mm;
347
348 mm = allocate_mm();
349 if (mm) {
350 memset(mm, 0, sizeof(*mm));
351 mm = mm_init(mm);
352 }
353 return mm;
354}
355
356/*
357 * Called when the last reference to the mm
358 * is dropped: either by a lazy thread or by
359 * mmput. Free the page directory and the mm.
360 */
361void fastcall __mmdrop(struct mm_struct *mm)
362{
363 BUG_ON(mm == &init_mm);
364 mm_free_pgd(mm);
365 destroy_context(mm);
366 free_mm(mm);
367}
368
369/*
370 * Decrement the use count and release all resources for an mm.
371 */
372void mmput(struct mm_struct *mm)
373{
0ae26f1b
AM
374 might_sleep();
375
1da177e4
LT
376 if (atomic_dec_and_test(&mm->mm_users)) {
377 exit_aio(mm);
378 exit_mmap(mm);
379 if (!list_empty(&mm->mmlist)) {
380 spin_lock(&mmlist_lock);
381 list_del(&mm->mmlist);
382 spin_unlock(&mmlist_lock);
383 }
384 put_swap_token(mm);
385 mmdrop(mm);
386 }
387}
388EXPORT_SYMBOL_GPL(mmput);
389
390/**
391 * get_task_mm - acquire a reference to the task's mm
392 *
393 * Returns %NULL if the task has no mm. Checks PF_BORROWED_MM (meaning
394 * this kernel workthread has transiently adopted a user mm with use_mm,
395 * to do its AIO) is not set and if so returns a reference to it, after
396 * bumping up the use count. User must release the mm via mmput()
397 * after use. Typically used by /proc and ptrace.
398 */
399struct mm_struct *get_task_mm(struct task_struct *task)
400{
401 struct mm_struct *mm;
402
403 task_lock(task);
404 mm = task->mm;
405 if (mm) {
406 if (task->flags & PF_BORROWED_MM)
407 mm = NULL;
408 else
409 atomic_inc(&mm->mm_users);
410 }
411 task_unlock(task);
412 return mm;
413}
414EXPORT_SYMBOL_GPL(get_task_mm);
415
416/* Please note the differences between mmput and mm_release.
417 * mmput is called whenever we stop holding onto a mm_struct,
418 * error success whatever.
419 *
420 * mm_release is called after a mm_struct has been removed
421 * from the current process.
422 *
423 * This difference is important for error handling, when we
424 * only half set up a mm_struct for a new process and need to restore
425 * the old one. Because we mmput the new mm_struct before
426 * restoring the old one. . .
427 * Eric Biederman 10 January 1998
428 */
429void mm_release(struct task_struct *tsk, struct mm_struct *mm)
430{
431 struct completion *vfork_done = tsk->vfork_done;
432
433 /* Get rid of any cached register state */
434 deactivate_mm(tsk, mm);
435
436 /* notify parent sleeping on vfork() */
437 if (vfork_done) {
438 tsk->vfork_done = NULL;
439 complete(vfork_done);
440 }
441 if (tsk->clear_child_tid && atomic_read(&mm->mm_users) > 1) {
442 u32 __user * tidptr = tsk->clear_child_tid;
443 tsk->clear_child_tid = NULL;
444
445 /*
446 * We don't check the error code - if userspace has
447 * not set up a proper pointer then tough luck.
448 */
449 put_user(0, tidptr);
450 sys_futex(tidptr, FUTEX_WAKE, 1, NULL, NULL, 0);
451 }
452}
453
a0a7ec30
JD
454/*
455 * Allocate a new mm structure and copy contents from the
456 * mm structure of the passed in task structure.
457 */
458static struct mm_struct *dup_mm(struct task_struct *tsk)
459{
460 struct mm_struct *mm, *oldmm = current->mm;
461 int err;
462
463 if (!oldmm)
464 return NULL;
465
466 mm = allocate_mm();
467 if (!mm)
468 goto fail_nomem;
469
470 memcpy(mm, oldmm, sizeof(*mm));
471
472 if (!mm_init(mm))
473 goto fail_nomem;
474
475 if (init_new_context(tsk, mm))
476 goto fail_nocontext;
477
478 err = dup_mmap(mm, oldmm);
479 if (err)
480 goto free_pt;
481
482 mm->hiwater_rss = get_mm_rss(mm);
483 mm->hiwater_vm = mm->total_vm;
484
485 return mm;
486
487free_pt:
488 mmput(mm);
489
490fail_nomem:
491 return NULL;
492
493fail_nocontext:
494 /*
495 * If init_new_context() failed, we cannot use mmput() to free the mm
496 * because it calls destroy_context()
497 */
498 mm_free_pgd(mm);
499 free_mm(mm);
500 return NULL;
501}
502
1da177e4
LT
503static int copy_mm(unsigned long clone_flags, struct task_struct * tsk)
504{
505 struct mm_struct * mm, *oldmm;
506 int retval;
507
508 tsk->min_flt = tsk->maj_flt = 0;
509 tsk->nvcsw = tsk->nivcsw = 0;
510
511 tsk->mm = NULL;
512 tsk->active_mm = NULL;
513
514 /*
515 * Are we cloning a kernel thread?
516 *
517 * We need to steal a active VM for that..
518 */
519 oldmm = current->mm;
520 if (!oldmm)
521 return 0;
522
523 if (clone_flags & CLONE_VM) {
524 atomic_inc(&oldmm->mm_users);
525 mm = oldmm;
1da177e4
LT
526 goto good_mm;
527 }
528
529 retval = -ENOMEM;
a0a7ec30 530 mm = dup_mm(tsk);
1da177e4
LT
531 if (!mm)
532 goto fail_nomem;
533
1da177e4
LT
534good_mm:
535 tsk->mm = mm;
536 tsk->active_mm = mm;
537 return 0;
538
1da177e4
LT
539fail_nomem:
540 return retval;
1da177e4
LT
541}
542
543static inline struct fs_struct *__copy_fs_struct(struct fs_struct *old)
544{
545 struct fs_struct *fs = kmem_cache_alloc(fs_cachep, GFP_KERNEL);
546 /* We don't need to lock fs - think why ;-) */
547 if (fs) {
548 atomic_set(&fs->count, 1);
549 rwlock_init(&fs->lock);
550 fs->umask = old->umask;
551 read_lock(&old->lock);
552 fs->rootmnt = mntget(old->rootmnt);
553 fs->root = dget(old->root);
554 fs->pwdmnt = mntget(old->pwdmnt);
555 fs->pwd = dget(old->pwd);
556 if (old->altroot) {
557 fs->altrootmnt = mntget(old->altrootmnt);
558 fs->altroot = dget(old->altroot);
559 } else {
560 fs->altrootmnt = NULL;
561 fs->altroot = NULL;
562 }
563 read_unlock(&old->lock);
564 }
565 return fs;
566}
567
568struct fs_struct *copy_fs_struct(struct fs_struct *old)
569{
570 return __copy_fs_struct(old);
571}
572
573EXPORT_SYMBOL_GPL(copy_fs_struct);
574
575static inline int copy_fs(unsigned long clone_flags, struct task_struct * tsk)
576{
577 if (clone_flags & CLONE_FS) {
578 atomic_inc(&current->fs->count);
579 return 0;
580 }
581 tsk->fs = __copy_fs_struct(current->fs);
582 if (!tsk->fs)
583 return -ENOMEM;
584 return 0;
585}
586
ab2af1f5 587static int count_open_files(struct fdtable *fdt)
1da177e4 588{
ab2af1f5 589 int size = fdt->max_fdset;
1da177e4
LT
590 int i;
591
592 /* Find the last open fd */
593 for (i = size/(8*sizeof(long)); i > 0; ) {
badf1662 594 if (fdt->open_fds->fds_bits[--i])
1da177e4
LT
595 break;
596 }
597 i = (i+1) * 8 * sizeof(long);
598 return i;
599}
600
badf1662
DS
601static struct files_struct *alloc_files(void)
602{
603 struct files_struct *newf;
604 struct fdtable *fdt;
605
606 newf = kmem_cache_alloc(files_cachep, SLAB_KERNEL);
607 if (!newf)
608 goto out;
609
610 atomic_set(&newf->count, 1);
611
612 spin_lock_init(&newf->file_lock);
0c9e63fd 613 newf->next_fd = 0;
ab2af1f5 614 fdt = &newf->fdtab;
badf1662 615 fdt->max_fds = NR_OPEN_DEFAULT;
0c9e63fd
ED
616 fdt->max_fdset = EMBEDDED_FD_SET_SIZE;
617 fdt->close_on_exec = (fd_set *)&newf->close_on_exec_init;
618 fdt->open_fds = (fd_set *)&newf->open_fds_init;
badf1662 619 fdt->fd = &newf->fd_array[0];
ab2af1f5
DS
620 INIT_RCU_HEAD(&fdt->rcu);
621 fdt->free_files = NULL;
622 fdt->next = NULL;
623 rcu_assign_pointer(newf->fdt, fdt);
badf1662
DS
624out:
625 return newf;
626}
627
a016f338
JD
628/*
629 * Allocate a new files structure and copy contents from the
630 * passed in files structure.
6e667260 631 * errorp will be valid only when the returned files_struct is NULL.
a016f338
JD
632 */
633static struct files_struct *dup_fd(struct files_struct *oldf, int *errorp)
1da177e4 634{
a016f338 635 struct files_struct *newf;
1da177e4 636 struct file **old_fds, **new_fds;
a016f338 637 int open_files, size, i, expand;
badf1662 638 struct fdtable *old_fdt, *new_fdt;
1da177e4 639
6e667260 640 *errorp = -ENOMEM;
badf1662
DS
641 newf = alloc_files();
642 if (!newf)
1da177e4
LT
643 goto out;
644
1da177e4 645 spin_lock(&oldf->file_lock);
badf1662
DS
646 old_fdt = files_fdtable(oldf);
647 new_fdt = files_fdtable(newf);
648 size = old_fdt->max_fdset;
ab2af1f5 649 open_files = count_open_files(old_fdt);
1da177e4
LT
650 expand = 0;
651
652 /*
653 * Check whether we need to allocate a larger fd array or fd set.
654 * Note: we're not a clone task, so the open count won't change.
655 */
badf1662
DS
656 if (open_files > new_fdt->max_fdset) {
657 new_fdt->max_fdset = 0;
1da177e4
LT
658 expand = 1;
659 }
badf1662
DS
660 if (open_files > new_fdt->max_fds) {
661 new_fdt->max_fds = 0;
1da177e4
LT
662 expand = 1;
663 }
664
665 /* if the old fdset gets grown now, we'll only copy up to "size" fds */
666 if (expand) {
667 spin_unlock(&oldf->file_lock);
668 spin_lock(&newf->file_lock);
a016f338 669 *errorp = expand_files(newf, open_files-1);
1da177e4 670 spin_unlock(&newf->file_lock);
a016f338 671 if (*errorp < 0)
1da177e4 672 goto out_release;
ab2af1f5
DS
673 new_fdt = files_fdtable(newf);
674 /*
675 * Reacquire the oldf lock and a pointer to its fd table
676 * who knows it may have a new bigger fd table. We need
677 * the latest pointer.
678 */
1da177e4 679 spin_lock(&oldf->file_lock);
ab2af1f5 680 old_fdt = files_fdtable(oldf);
1da177e4
LT
681 }
682
badf1662
DS
683 old_fds = old_fdt->fd;
684 new_fds = new_fdt->fd;
1da177e4 685
badf1662
DS
686 memcpy(new_fdt->open_fds->fds_bits, old_fdt->open_fds->fds_bits, open_files/8);
687 memcpy(new_fdt->close_on_exec->fds_bits, old_fdt->close_on_exec->fds_bits, open_files/8);
1da177e4
LT
688
689 for (i = open_files; i != 0; i--) {
690 struct file *f = *old_fds++;
691 if (f) {
692 get_file(f);
693 } else {
694 /*
695 * The fd may be claimed in the fd bitmap but not yet
696 * instantiated in the files array if a sibling thread
697 * is partway through open(). So make sure that this
698 * fd is available to the new process.
699 */
badf1662 700 FD_CLR(open_files - i, new_fdt->open_fds);
1da177e4 701 }
ab2af1f5 702 rcu_assign_pointer(*new_fds++, f);
1da177e4
LT
703 }
704 spin_unlock(&oldf->file_lock);
705
706 /* compute the remainder to be cleared */
badf1662 707 size = (new_fdt->max_fds - open_files) * sizeof(struct file *);
1da177e4
LT
708
709 /* This is long word aligned thus could use a optimized version */
710 memset(new_fds, 0, size);
711
badf1662
DS
712 if (new_fdt->max_fdset > open_files) {
713 int left = (new_fdt->max_fdset-open_files)/8;
1da177e4
LT
714 int start = open_files / (8 * sizeof(unsigned long));
715
badf1662
DS
716 memset(&new_fdt->open_fds->fds_bits[start], 0, left);
717 memset(&new_fdt->close_on_exec->fds_bits[start], 0, left);
1da177e4
LT
718 }
719
1da177e4 720out:
a016f338 721 return newf;
1da177e4
LT
722
723out_release:
badf1662
DS
724 free_fdset (new_fdt->close_on_exec, new_fdt->max_fdset);
725 free_fdset (new_fdt->open_fds, new_fdt->max_fdset);
726 free_fd_array(new_fdt->fd, new_fdt->max_fds);
1da177e4 727 kmem_cache_free(files_cachep, newf);
42862298 728 return NULL;
1da177e4
LT
729}
730
a016f338
JD
731static int copy_files(unsigned long clone_flags, struct task_struct * tsk)
732{
733 struct files_struct *oldf, *newf;
734 int error = 0;
735
736 /*
737 * A background process may not have any files ...
738 */
739 oldf = current->files;
740 if (!oldf)
741 goto out;
742
743 if (clone_flags & CLONE_FILES) {
744 atomic_inc(&oldf->count);
745 goto out;
746 }
747
748 /*
749 * Note: we may be using current for both targets (See exec.c)
750 * This works because we cache current->files (old) as oldf. Don't
751 * break this.
752 */
753 tsk->files = NULL;
a016f338
JD
754 newf = dup_fd(oldf, &error);
755 if (!newf)
756 goto out;
757
758 tsk->files = newf;
759 error = 0;
760out:
761 return error;
762}
763
1da177e4
LT
764/*
765 * Helper to unshare the files of the current task.
766 * We don't want to expose copy_files internals to
767 * the exec layer of the kernel.
768 */
769
770int unshare_files(void)
771{
772 struct files_struct *files = current->files;
773 int rc;
774
910dea7f 775 BUG_ON(!files);
1da177e4
LT
776
777 /* This can race but the race causes us to copy when we don't
778 need to and drop the copy */
779 if(atomic_read(&files->count) == 1)
780 {
781 atomic_inc(&files->count);
782 return 0;
783 }
784 rc = copy_files(0, current);
785 if(rc)
786 current->files = files;
787 return rc;
788}
789
790EXPORT_SYMBOL(unshare_files);
791
792static inline int copy_sighand(unsigned long clone_flags, struct task_struct * tsk)
793{
794 struct sighand_struct *sig;
795
796 if (clone_flags & (CLONE_SIGHAND | CLONE_THREAD)) {
797 atomic_inc(&current->sighand->count);
798 return 0;
799 }
800 sig = kmem_cache_alloc(sighand_cachep, GFP_KERNEL);
e56d0903 801 rcu_assign_pointer(tsk->sighand, sig);
1da177e4
LT
802 if (!sig)
803 return -ENOMEM;
1da177e4
LT
804 atomic_set(&sig->count, 1);
805 memcpy(sig->action, current->sighand->action, sizeof(sig->action));
806 return 0;
807}
808
a7e5328a 809void __cleanup_sighand(struct sighand_struct *sighand)
c81addc9 810{
c81addc9
ON
811 if (atomic_dec_and_test(&sighand->count))
812 kmem_cache_free(sighand_cachep, sighand);
813}
814
1da177e4
LT
815static inline int copy_signal(unsigned long clone_flags, struct task_struct * tsk)
816{
817 struct signal_struct *sig;
818 int ret;
819
820 if (clone_flags & CLONE_THREAD) {
821 atomic_inc(&current->signal->count);
822 atomic_inc(&current->signal->live);
823 return 0;
824 }
825 sig = kmem_cache_alloc(signal_cachep, GFP_KERNEL);
826 tsk->signal = sig;
827 if (!sig)
828 return -ENOMEM;
829
830 ret = copy_thread_group_keys(tsk);
831 if (ret < 0) {
832 kmem_cache_free(signal_cachep, sig);
833 return ret;
834 }
835
836 atomic_set(&sig->count, 1);
837 atomic_set(&sig->live, 1);
838 init_waitqueue_head(&sig->wait_chldexit);
839 sig->flags = 0;
840 sig->group_exit_code = 0;
841 sig->group_exit_task = NULL;
842 sig->group_stop_count = 0;
843 sig->curr_target = NULL;
844 init_sigpending(&sig->shared_pending);
845 INIT_LIST_HEAD(&sig->posix_timers);
846
7978672c 847 hrtimer_init(&sig->real_timer, CLOCK_MONOTONIC, HRTIMER_REL);
2ff678b8 848 sig->it_real_incr.tv64 = 0;
1da177e4 849 sig->real_timer.function = it_real_fn;
05cfb614 850 sig->tsk = tsk;
1da177e4
LT
851
852 sig->it_virt_expires = cputime_zero;
853 sig->it_virt_incr = cputime_zero;
854 sig->it_prof_expires = cputime_zero;
855 sig->it_prof_incr = cputime_zero;
856
1da177e4
LT
857 sig->leader = 0; /* session leadership doesn't inherit */
858 sig->tty_old_pgrp = 0;
859
860 sig->utime = sig->stime = sig->cutime = sig->cstime = cputime_zero;
861 sig->nvcsw = sig->nivcsw = sig->cnvcsw = sig->cnivcsw = 0;
862 sig->min_flt = sig->maj_flt = sig->cmin_flt = sig->cmaj_flt = 0;
863 sig->sched_time = 0;
864 INIT_LIST_HEAD(&sig->cpu_timers[0]);
865 INIT_LIST_HEAD(&sig->cpu_timers[1]);
866 INIT_LIST_HEAD(&sig->cpu_timers[2]);
867
868 task_lock(current->group_leader);
869 memcpy(sig->rlim, current->signal->rlim, sizeof sig->rlim);
870 task_unlock(current->group_leader);
871
872 if (sig->rlim[RLIMIT_CPU].rlim_cur != RLIM_INFINITY) {
873 /*
874 * New sole thread in the process gets an expiry time
875 * of the whole CPU time limit.
876 */
877 tsk->it_prof_expires =
878 secs_to_cputime(sig->rlim[RLIMIT_CPU].rlim_cur);
879 }
0e464814 880 acct_init_pacct(&sig->pacct);
1da177e4
LT
881
882 return 0;
883}
884
6b3934ef
ON
885void __cleanup_signal(struct signal_struct *sig)
886{
887 exit_thread_group_keys(sig);
888 kmem_cache_free(signal_cachep, sig);
889}
890
891static inline void cleanup_signal(struct task_struct *tsk)
892{
893 struct signal_struct *sig = tsk->signal;
894
895 atomic_dec(&sig->live);
896
897 if (atomic_dec_and_test(&sig->count))
898 __cleanup_signal(sig);
899}
900
1da177e4
LT
901static inline void copy_flags(unsigned long clone_flags, struct task_struct *p)
902{
903 unsigned long new_flags = p->flags;
904
d1209d04 905 new_flags &= ~(PF_SUPERPRIV | PF_NOFREEZE);
1da177e4
LT
906 new_flags |= PF_FORKNOEXEC;
907 if (!(clone_flags & CLONE_PTRACE))
908 p->ptrace = 0;
909 p->flags = new_flags;
910}
911
912asmlinkage long sys_set_tid_address(int __user *tidptr)
913{
914 current->clear_child_tid = tidptr;
915
916 return current->pid;
917}
918
23f78d4a
IM
919static inline void rt_mutex_init_task(struct task_struct *p)
920{
921#ifdef CONFIG_RT_MUTEXES
922 spin_lock_init(&p->pi_lock);
923 plist_head_init(&p->pi_waiters, &p->pi_lock);
924 p->pi_blocked_on = NULL;
23f78d4a
IM
925#endif
926}
927
1da177e4
LT
928/*
929 * This creates a new process as a copy of the old one,
930 * but does not actually start it yet.
931 *
932 * It copies the registers, and all the appropriate
933 * parts of the process environment (as per the clone
934 * flags). The actual kick-off is left to the caller.
935 */
36c8b586
IM
936static struct task_struct *copy_process(unsigned long clone_flags,
937 unsigned long stack_start,
938 struct pt_regs *regs,
939 unsigned long stack_size,
940 int __user *parent_tidptr,
941 int __user *child_tidptr,
942 int pid)
1da177e4
LT
943{
944 int retval;
945 struct task_struct *p = NULL;
946
947 if ((clone_flags & (CLONE_NEWNS|CLONE_FS)) == (CLONE_NEWNS|CLONE_FS))
948 return ERR_PTR(-EINVAL);
949
950 /*
951 * Thread groups must share signals as well, and detached threads
952 * can only be started up within the thread group.
953 */
954 if ((clone_flags & CLONE_THREAD) && !(clone_flags & CLONE_SIGHAND))
955 return ERR_PTR(-EINVAL);
956
957 /*
958 * Shared signal handlers imply shared VM. By way of the above,
959 * thread groups also imply shared VM. Blocking this case allows
960 * for various simplifications in other code.
961 */
962 if ((clone_flags & CLONE_SIGHAND) && !(clone_flags & CLONE_VM))
963 return ERR_PTR(-EINVAL);
964
965 retval = security_task_create(clone_flags);
966 if (retval)
967 goto fork_out;
968
969 retval = -ENOMEM;
970 p = dup_task_struct(current);
971 if (!p)
972 goto fork_out;
973
de30a2b3
IM
974#ifdef CONFIG_TRACE_IRQFLAGS
975 DEBUG_LOCKS_WARN_ON(!p->hardirqs_enabled);
976 DEBUG_LOCKS_WARN_ON(!p->softirqs_enabled);
977#endif
1da177e4
LT
978 retval = -EAGAIN;
979 if (atomic_read(&p->user->processes) >=
980 p->signal->rlim[RLIMIT_NPROC].rlim_cur) {
981 if (!capable(CAP_SYS_ADMIN) && !capable(CAP_SYS_RESOURCE) &&
982 p->user != &root_user)
983 goto bad_fork_free;
984 }
985
986 atomic_inc(&p->user->__count);
987 atomic_inc(&p->user->processes);
988 get_group_info(p->group_info);
989
990 /*
991 * If multiple threads are within copy_process(), then this check
992 * triggers too late. This doesn't hurt, the check is only there
993 * to stop root fork bombs.
994 */
995 if (nr_threads >= max_threads)
996 goto bad_fork_cleanup_count;
997
a1261f54 998 if (!try_module_get(task_thread_info(p)->exec_domain->module))
1da177e4
LT
999 goto bad_fork_cleanup_count;
1000
1001 if (p->binfmt && !try_module_get(p->binfmt->module))
1002 goto bad_fork_cleanup_put_domain;
1003
1004 p->did_exec = 0;
1005 copy_flags(clone_flags, p);
1006 p->pid = pid;
1007 retval = -EFAULT;
1008 if (clone_flags & CLONE_PARENT_SETTID)
1009 if (put_user(p->pid, parent_tidptr))
1010 goto bad_fork_cleanup;
1011
1da177e4
LT
1012 INIT_LIST_HEAD(&p->children);
1013 INIT_LIST_HEAD(&p->sibling);
1014 p->vfork_done = NULL;
1015 spin_lock_init(&p->alloc_lock);
1da177e4
LT
1016
1017 clear_tsk_thread_flag(p, TIF_SIGPENDING);
1018 init_sigpending(&p->pending);
1019
1020 p->utime = cputime_zero;
1021 p->stime = cputime_zero;
1022 p->sched_time = 0;
1023 p->rchar = 0; /* I/O counter: bytes read */
1024 p->wchar = 0; /* I/O counter: bytes written */
1025 p->syscr = 0; /* I/O counter: read syscalls */
1026 p->syscw = 0; /* I/O counter: write syscalls */
1027 acct_clear_integrals(p);
1028
1029 p->it_virt_expires = cputime_zero;
1030 p->it_prof_expires = cputime_zero;
1031 p->it_sched_expires = 0;
1032 INIT_LIST_HEAD(&p->cpu_timers[0]);
1033 INIT_LIST_HEAD(&p->cpu_timers[1]);
1034 INIT_LIST_HEAD(&p->cpu_timers[2]);
1035
1036 p->lock_depth = -1; /* -1 = no lock */
1037 do_posix_clock_monotonic_gettime(&p->start_time);
1038 p->security = NULL;
1039 p->io_context = NULL;
1040 p->io_wait = NULL;
1041 p->audit_context = NULL;
b4b26418 1042 cpuset_fork(p);
1da177e4
LT
1043#ifdef CONFIG_NUMA
1044 p->mempolicy = mpol_copy(p->mempolicy);
1045 if (IS_ERR(p->mempolicy)) {
1046 retval = PTR_ERR(p->mempolicy);
1047 p->mempolicy = NULL;
b4b26418 1048 goto bad_fork_cleanup_cpuset;
1da177e4 1049 }
c61afb18 1050 mpol_fix_fork_child_flag(p);
1da177e4 1051#endif
de30a2b3
IM
1052#ifdef CONFIG_TRACE_IRQFLAGS
1053 p->irq_events = 0;
1054 p->hardirqs_enabled = 0;
1055 p->hardirq_enable_ip = 0;
1056 p->hardirq_enable_event = 0;
1057 p->hardirq_disable_ip = _THIS_IP_;
1058 p->hardirq_disable_event = 0;
1059 p->softirqs_enabled = 1;
1060 p->softirq_enable_ip = _THIS_IP_;
1061 p->softirq_enable_event = 0;
1062 p->softirq_disable_ip = 0;
1063 p->softirq_disable_event = 0;
1064 p->hardirq_context = 0;
1065 p->softirq_context = 0;
1066#endif
fbb9ce95
IM
1067#ifdef CONFIG_LOCKDEP
1068 p->lockdep_depth = 0; /* no locks held yet */
1069 p->curr_chain_key = 0;
1070 p->lockdep_recursion = 0;
1071#endif
1da177e4 1072
23f78d4a
IM
1073 rt_mutex_init_task(p);
1074
408894ee
IM
1075#ifdef CONFIG_DEBUG_MUTEXES
1076 p->blocked_on = NULL; /* not blocked yet */
1077#endif
1078
1da177e4
LT
1079 p->tgid = p->pid;
1080 if (clone_flags & CLONE_THREAD)
1081 p->tgid = current->tgid;
1082
1083 if ((retval = security_task_alloc(p)))
1084 goto bad_fork_cleanup_policy;
1085 if ((retval = audit_alloc(p)))
1086 goto bad_fork_cleanup_security;
1087 /* copy all the process information */
1088 if ((retval = copy_semundo(clone_flags, p)))
1089 goto bad_fork_cleanup_audit;
1090 if ((retval = copy_files(clone_flags, p)))
1091 goto bad_fork_cleanup_semundo;
1092 if ((retval = copy_fs(clone_flags, p)))
1093 goto bad_fork_cleanup_files;
1094 if ((retval = copy_sighand(clone_flags, p)))
1095 goto bad_fork_cleanup_fs;
1096 if ((retval = copy_signal(clone_flags, p)))
1097 goto bad_fork_cleanup_sighand;
1098 if ((retval = copy_mm(clone_flags, p)))
1099 goto bad_fork_cleanup_signal;
1100 if ((retval = copy_keys(clone_flags, p)))
1101 goto bad_fork_cleanup_mm;
1102 if ((retval = copy_namespace(clone_flags, p)))
1103 goto bad_fork_cleanup_keys;
1104 retval = copy_thread(0, clone_flags, stack_start, stack_size, p, regs);
1105 if (retval)
1106 goto bad_fork_cleanup_namespace;
1107
1108 p->set_child_tid = (clone_flags & CLONE_CHILD_SETTID) ? child_tidptr : NULL;
1109 /*
1110 * Clear TID on mm_release()?
1111 */
1112 p->clear_child_tid = (clone_flags & CLONE_CHILD_CLEARTID) ? child_tidptr: NULL;
8f17d3a5
IM
1113 p->robust_list = NULL;
1114#ifdef CONFIG_COMPAT
1115 p->compat_robust_list = NULL;
1116#endif
c87e2837
IM
1117 INIT_LIST_HEAD(&p->pi_state_list);
1118 p->pi_state_cache = NULL;
1119
f9a3879a
GM
1120 /*
1121 * sigaltstack should be cleared when sharing the same VM
1122 */
1123 if ((clone_flags & (CLONE_VM|CLONE_VFORK)) == CLONE_VM)
1124 p->sas_ss_sp = p->sas_ss_size = 0;
1125
1da177e4
LT
1126 /*
1127 * Syscall tracing should be turned off in the child regardless
1128 * of CLONE_PTRACE.
1129 */
1130 clear_tsk_thread_flag(p, TIF_SYSCALL_TRACE);
ed75e8d5
LV
1131#ifdef TIF_SYSCALL_EMU
1132 clear_tsk_thread_flag(p, TIF_SYSCALL_EMU);
1133#endif
1da177e4
LT
1134
1135 /* Our parent execution domain becomes current domain
1136 These must match for thread signalling to apply */
1137
1138 p->parent_exec_id = p->self_exec_id;
1139
1140 /* ok, now we should be set up.. */
1141 p->exit_signal = (clone_flags & CLONE_THREAD) ? -1 : (clone_flags & CSIGNAL);
1142 p->pdeath_signal = 0;
1143 p->exit_state = 0;
1144
1da177e4
LT
1145 /*
1146 * Ok, make it visible to the rest of the system.
1147 * We dont wake it up yet.
1148 */
1149 p->group_leader = p;
47e65328 1150 INIT_LIST_HEAD(&p->thread_group);
1da177e4
LT
1151 INIT_LIST_HEAD(&p->ptrace_children);
1152 INIT_LIST_HEAD(&p->ptrace_list);
1153
476d139c
NP
1154 /* Perform scheduler related setup. Assign this task to a CPU. */
1155 sched_fork(p, clone_flags);
1156
1da177e4
LT
1157 /* Need tasklist lock for parent etc handling! */
1158 write_lock_irq(&tasklist_lock);
1159
1160 /*
476d139c
NP
1161 * The task hasn't been attached yet, so its cpus_allowed mask will
1162 * not be changed, nor will its assigned CPU.
1163 *
1164 * The cpus_allowed mask of the parent may have changed after it was
1165 * copied first time - so re-copy it here, then check the child's CPU
1166 * to ensure it is on a valid CPU (and if not, just force it back to
1167 * parent's CPU). This avoids alot of nasty races.
1da177e4
LT
1168 */
1169 p->cpus_allowed = current->cpus_allowed;
26ff6ad9
SV
1170 if (unlikely(!cpu_isset(task_cpu(p), p->cpus_allowed) ||
1171 !cpu_online(task_cpu(p))))
476d139c 1172 set_task_cpu(p, smp_processor_id());
1da177e4 1173
1da177e4
LT
1174 /* CLONE_PARENT re-uses the old parent */
1175 if (clone_flags & (CLONE_PARENT|CLONE_THREAD))
1176 p->real_parent = current->real_parent;
1177 else
1178 p->real_parent = current;
1179 p->parent = p->real_parent;
1180
3f17da69 1181 spin_lock(&current->sighand->siglock);
4a2c7a78
ON
1182
1183 /*
1184 * Process group and session signals need to be delivered to just the
1185 * parent before the fork or both the parent and the child after the
1186 * fork. Restart if a signal comes in before we add the new process to
1187 * it's process group.
1188 * A fatal signal pending means that current will exit, so the new
1189 * thread can't slip out of an OOM kill (or normal SIGKILL).
1190 */
1191 recalc_sigpending();
1192 if (signal_pending(current)) {
1193 spin_unlock(&current->sighand->siglock);
1194 write_unlock_irq(&tasklist_lock);
1195 retval = -ERESTARTNOINTR;
1196 goto bad_fork_cleanup_namespace;
1197 }
1198
1da177e4 1199 if (clone_flags & CLONE_THREAD) {
1da177e4 1200 p->group_leader = current->group_leader;
47e65328 1201 list_add_tail_rcu(&p->thread_group, &p->group_leader->thread_group);
1da177e4 1202
1da177e4
LT
1203 if (!cputime_eq(current->signal->it_virt_expires,
1204 cputime_zero) ||
1205 !cputime_eq(current->signal->it_prof_expires,
1206 cputime_zero) ||
1207 current->signal->rlim[RLIMIT_CPU].rlim_cur != RLIM_INFINITY ||
1208 !list_empty(&current->signal->cpu_timers[0]) ||
1209 !list_empty(&current->signal->cpu_timers[1]) ||
1210 !list_empty(&current->signal->cpu_timers[2])) {
1211 /*
1212 * Have child wake up on its first tick to check
1213 * for process CPU timers.
1214 */
1215 p->it_prof_expires = jiffies_to_cputime(1);
1216 }
1da177e4
LT
1217 }
1218
22e2c507
JA
1219 /*
1220 * inherit ioprio
1221 */
1222 p->ioprio = current->ioprio;
1223
73b9ebfe
ON
1224 if (likely(p->pid)) {
1225 add_parent(p);
1226 if (unlikely(p->ptrace & PT_PTRACED))
1227 __ptrace_link(p, current->parent);
1228
1229 if (thread_group_leader(p)) {
1230 p->signal->tty = current->signal->tty;
1231 p->signal->pgrp = process_group(current);
1232 p->signal->session = current->signal->session;
1233 attach_pid(p, PIDTYPE_PGID, process_group(p));
1234 attach_pid(p, PIDTYPE_SID, p->signal->session);
1235
5e85d4ab 1236 list_add_tail_rcu(&p->tasks, &init_task.tasks);
1da177e4 1237 __get_cpu_var(process_counts)++;
73b9ebfe 1238 }
73b9ebfe
ON
1239 attach_pid(p, PIDTYPE_PID, p->pid);
1240 nr_threads++;
1da177e4
LT
1241 }
1242
1da177e4 1243 total_forks++;
3f17da69 1244 spin_unlock(&current->sighand->siglock);
1da177e4 1245 write_unlock_irq(&tasklist_lock);
c13cf856 1246 proc_fork_connector(p);
1da177e4
LT
1247 return p;
1248
1249bad_fork_cleanup_namespace:
1250 exit_namespace(p);
1251bad_fork_cleanup_keys:
1252 exit_keys(p);
1253bad_fork_cleanup_mm:
1254 if (p->mm)
1255 mmput(p->mm);
1256bad_fork_cleanup_signal:
6b3934ef 1257 cleanup_signal(p);
1da177e4 1258bad_fork_cleanup_sighand:
a7e5328a 1259 __cleanup_sighand(p->sighand);
1da177e4
LT
1260bad_fork_cleanup_fs:
1261 exit_fs(p); /* blocking */
1262bad_fork_cleanup_files:
1263 exit_files(p); /* blocking */
1264bad_fork_cleanup_semundo:
1265 exit_sem(p);
1266bad_fork_cleanup_audit:
1267 audit_free(p);
1268bad_fork_cleanup_security:
1269 security_task_free(p);
1270bad_fork_cleanup_policy:
1271#ifdef CONFIG_NUMA
1272 mpol_free(p->mempolicy);
b4b26418 1273bad_fork_cleanup_cpuset:
1da177e4 1274#endif
b4b26418 1275 cpuset_exit(p);
1da177e4
LT
1276bad_fork_cleanup:
1277 if (p->binfmt)
1278 module_put(p->binfmt->module);
1279bad_fork_cleanup_put_domain:
a1261f54 1280 module_put(task_thread_info(p)->exec_domain->module);
1da177e4
LT
1281bad_fork_cleanup_count:
1282 put_group_info(p->group_info);
1283 atomic_dec(&p->user->processes);
1284 free_uid(p->user);
1285bad_fork_free:
1286 free_task(p);
fe7d37d1
ON
1287fork_out:
1288 return ERR_PTR(retval);
1da177e4
LT
1289}
1290
1291struct pt_regs * __devinit __attribute__((weak)) idle_regs(struct pt_regs *regs)
1292{
1293 memset(regs, 0, sizeof(struct pt_regs));
1294 return regs;
1295}
1296
36c8b586 1297struct task_struct * __devinit fork_idle(int cpu)
1da177e4 1298{
36c8b586 1299 struct task_struct *task;
1da177e4
LT
1300 struct pt_regs regs;
1301
1302 task = copy_process(CLONE_VM, 0, idle_regs(&regs), 0, NULL, NULL, 0);
1303 if (!task)
1304 return ERR_PTR(-ENOMEM);
1305 init_idle(task, cpu);
73b9ebfe 1306
1da177e4
LT
1307 return task;
1308}
1309
1310static inline int fork_traceflag (unsigned clone_flags)
1311{
1312 if (clone_flags & CLONE_UNTRACED)
1313 return 0;
1314 else if (clone_flags & CLONE_VFORK) {
1315 if (current->ptrace & PT_TRACE_VFORK)
1316 return PTRACE_EVENT_VFORK;
1317 } else if ((clone_flags & CSIGNAL) != SIGCHLD) {
1318 if (current->ptrace & PT_TRACE_CLONE)
1319 return PTRACE_EVENT_CLONE;
1320 } else if (current->ptrace & PT_TRACE_FORK)
1321 return PTRACE_EVENT_FORK;
1322
1323 return 0;
1324}
1325
1326/*
1327 * Ok, this is the main fork-routine.
1328 *
1329 * It copies the process, and if successful kick-starts
1330 * it and waits for it to finish using the VM if required.
1331 */
1332long do_fork(unsigned long clone_flags,
1333 unsigned long stack_start,
1334 struct pt_regs *regs,
1335 unsigned long stack_size,
1336 int __user *parent_tidptr,
1337 int __user *child_tidptr)
1338{
1339 struct task_struct *p;
1340 int trace = 0;
92476d7f
EB
1341 struct pid *pid = alloc_pid();
1342 long nr;
1da177e4 1343
92476d7f 1344 if (!pid)
1da177e4 1345 return -EAGAIN;
92476d7f 1346 nr = pid->nr;
1da177e4
LT
1347 if (unlikely(current->ptrace)) {
1348 trace = fork_traceflag (clone_flags);
1349 if (trace)
1350 clone_flags |= CLONE_PTRACE;
1351 }
1352
92476d7f 1353 p = copy_process(clone_flags, stack_start, regs, stack_size, parent_tidptr, child_tidptr, nr);
1da177e4
LT
1354 /*
1355 * Do this prior waking up the new thread - the thread pointer
1356 * might get invalid after that point, if the thread exits quickly.
1357 */
1358 if (!IS_ERR(p)) {
1359 struct completion vfork;
1360
1361 if (clone_flags & CLONE_VFORK) {
1362 p->vfork_done = &vfork;
1363 init_completion(&vfork);
1364 }
1365
1366 if ((p->ptrace & PT_PTRACED) || (clone_flags & CLONE_STOPPED)) {
1367 /*
1368 * We'll start up with an immediate SIGSTOP.
1369 */
1370 sigaddset(&p->pending.signal, SIGSTOP);
1371 set_tsk_thread_flag(p, TIF_SIGPENDING);
1372 }
1373
1374 if (!(clone_flags & CLONE_STOPPED))
1375 wake_up_new_task(p, clone_flags);
1376 else
1377 p->state = TASK_STOPPED;
1378
1379 if (unlikely (trace)) {
92476d7f 1380 current->ptrace_message = nr;
1da177e4
LT
1381 ptrace_notify ((trace << 8) | SIGTRAP);
1382 }
1383
1384 if (clone_flags & CLONE_VFORK) {
1385 wait_for_completion(&vfork);
1386 if (unlikely (current->ptrace & PT_TRACE_VFORK_DONE))
1387 ptrace_notify ((PTRACE_EVENT_VFORK_DONE << 8) | SIGTRAP);
1388 }
1389 } else {
92476d7f
EB
1390 free_pid(pid);
1391 nr = PTR_ERR(p);
1da177e4 1392 }
92476d7f 1393 return nr;
1da177e4
LT
1394}
1395
5fd63b30
RT
1396#ifndef ARCH_MIN_MMSTRUCT_ALIGN
1397#define ARCH_MIN_MMSTRUCT_ALIGN 0
1398#endif
1399
aa1757f9
ON
1400static void sighand_ctor(void *data, kmem_cache_t *cachep, unsigned long flags)
1401{
1402 struct sighand_struct *sighand = data;
1403
1404 if ((flags & (SLAB_CTOR_VERIFY | SLAB_CTOR_CONSTRUCTOR)) ==
1405 SLAB_CTOR_CONSTRUCTOR)
1406 spin_lock_init(&sighand->siglock);
1407}
1408
1da177e4
LT
1409void __init proc_caches_init(void)
1410{
1411 sighand_cachep = kmem_cache_create("sighand_cache",
1412 sizeof(struct sighand_struct), 0,
aa1757f9
ON
1413 SLAB_HWCACHE_ALIGN|SLAB_PANIC|SLAB_DESTROY_BY_RCU,
1414 sighand_ctor, NULL);
1da177e4
LT
1415 signal_cachep = kmem_cache_create("signal_cache",
1416 sizeof(struct signal_struct), 0,
1417 SLAB_HWCACHE_ALIGN|SLAB_PANIC, NULL, NULL);
1418 files_cachep = kmem_cache_create("files_cache",
1419 sizeof(struct files_struct), 0,
1420 SLAB_HWCACHE_ALIGN|SLAB_PANIC, NULL, NULL);
1421 fs_cachep = kmem_cache_create("fs_cache",
1422 sizeof(struct fs_struct), 0,
1423 SLAB_HWCACHE_ALIGN|SLAB_PANIC, NULL, NULL);
1424 vm_area_cachep = kmem_cache_create("vm_area_struct",
1425 sizeof(struct vm_area_struct), 0,
1426 SLAB_PANIC, NULL, NULL);
1427 mm_cachep = kmem_cache_create("mm_struct",
5fd63b30 1428 sizeof(struct mm_struct), ARCH_MIN_MMSTRUCT_ALIGN,
1da177e4
LT
1429 SLAB_HWCACHE_ALIGN|SLAB_PANIC, NULL, NULL);
1430}
cf2e340f
JD
1431
1432
1433/*
1434 * Check constraints on flags passed to the unshare system call and
1435 * force unsharing of additional process context as appropriate.
1436 */
1437static inline void check_unshare_flags(unsigned long *flags_ptr)
1438{
1439 /*
1440 * If unsharing a thread from a thread group, must also
1441 * unshare vm.
1442 */
1443 if (*flags_ptr & CLONE_THREAD)
1444 *flags_ptr |= CLONE_VM;
1445
1446 /*
1447 * If unsharing vm, must also unshare signal handlers.
1448 */
1449 if (*flags_ptr & CLONE_VM)
1450 *flags_ptr |= CLONE_SIGHAND;
1451
1452 /*
1453 * If unsharing signal handlers and the task was created
1454 * using CLONE_THREAD, then must unshare the thread
1455 */
1456 if ((*flags_ptr & CLONE_SIGHAND) &&
1457 (atomic_read(&current->signal->count) > 1))
1458 *flags_ptr |= CLONE_THREAD;
1459
1460 /*
1461 * If unsharing namespace, must also unshare filesystem information.
1462 */
1463 if (*flags_ptr & CLONE_NEWNS)
1464 *flags_ptr |= CLONE_FS;
1465}
1466
1467/*
1468 * Unsharing of tasks created with CLONE_THREAD is not supported yet
1469 */
1470static int unshare_thread(unsigned long unshare_flags)
1471{
1472 if (unshare_flags & CLONE_THREAD)
1473 return -EINVAL;
1474
1475 return 0;
1476}
1477
1478/*
99d1419d 1479 * Unshare the filesystem structure if it is being shared
cf2e340f
JD
1480 */
1481static int unshare_fs(unsigned long unshare_flags, struct fs_struct **new_fsp)
1482{
1483 struct fs_struct *fs = current->fs;
1484
1485 if ((unshare_flags & CLONE_FS) &&
99d1419d
JD
1486 (fs && atomic_read(&fs->count) > 1)) {
1487 *new_fsp = __copy_fs_struct(current->fs);
1488 if (!*new_fsp)
1489 return -ENOMEM;
1490 }
cf2e340f
JD
1491
1492 return 0;
1493}
1494
1495/*
741a2951 1496 * Unshare the namespace structure if it is being shared
cf2e340f 1497 */
741a2951 1498static int unshare_namespace(unsigned long unshare_flags, struct namespace **new_nsp, struct fs_struct *new_fs)
cf2e340f
JD
1499{
1500 struct namespace *ns = current->namespace;
1501
1502 if ((unshare_flags & CLONE_NEWNS) &&
741a2951
JD
1503 (ns && atomic_read(&ns->count) > 1)) {
1504 if (!capable(CAP_SYS_ADMIN))
1505 return -EPERM;
1506
1507 *new_nsp = dup_namespace(current, new_fs ? new_fs : current->fs);
1508 if (!*new_nsp)
1509 return -ENOMEM;
1510 }
cf2e340f
JD
1511
1512 return 0;
1513}
1514
1515/*
1516 * Unsharing of sighand for tasks created with CLONE_SIGHAND is not
1517 * supported yet
1518 */
1519static int unshare_sighand(unsigned long unshare_flags, struct sighand_struct **new_sighp)
1520{
1521 struct sighand_struct *sigh = current->sighand;
1522
1523 if ((unshare_flags & CLONE_SIGHAND) &&
1524 (sigh && atomic_read(&sigh->count) > 1))
1525 return -EINVAL;
1526 else
1527 return 0;
1528}
1529
1530/*
a0a7ec30 1531 * Unshare vm if it is being shared
cf2e340f
JD
1532 */
1533static int unshare_vm(unsigned long unshare_flags, struct mm_struct **new_mmp)
1534{
1535 struct mm_struct *mm = current->mm;
1536
1537 if ((unshare_flags & CLONE_VM) &&
a0a7ec30 1538 (mm && atomic_read(&mm->mm_users) > 1)) {
2d61b867 1539 return -EINVAL;
a0a7ec30 1540 }
cf2e340f
JD
1541
1542 return 0;
cf2e340f
JD
1543}
1544
1545/*
a016f338 1546 * Unshare file descriptor table if it is being shared
cf2e340f
JD
1547 */
1548static int unshare_fd(unsigned long unshare_flags, struct files_struct **new_fdp)
1549{
1550 struct files_struct *fd = current->files;
a016f338 1551 int error = 0;
cf2e340f
JD
1552
1553 if ((unshare_flags & CLONE_FILES) &&
a016f338
JD
1554 (fd && atomic_read(&fd->count) > 1)) {
1555 *new_fdp = dup_fd(fd, &error);
1556 if (!*new_fdp)
1557 return error;
1558 }
cf2e340f
JD
1559
1560 return 0;
1561}
1562
1563/*
1564 * Unsharing of semundo for tasks created with CLONE_SYSVSEM is not
1565 * supported yet
1566 */
1567static int unshare_semundo(unsigned long unshare_flags, struct sem_undo_list **new_ulistp)
1568{
1569 if (unshare_flags & CLONE_SYSVSEM)
1570 return -EINVAL;
1571
1572 return 0;
1573}
1574
1575/*
1576 * unshare allows a process to 'unshare' part of the process
1577 * context which was originally shared using clone. copy_*
1578 * functions used by do_fork() cannot be used here directly
1579 * because they modify an inactive task_struct that is being
1580 * constructed. Here we are modifying the current, active,
1581 * task_struct.
1582 */
1583asmlinkage long sys_unshare(unsigned long unshare_flags)
1584{
1585 int err = 0;
1586 struct fs_struct *fs, *new_fs = NULL;
1587 struct namespace *ns, *new_ns = NULL;
1588 struct sighand_struct *sigh, *new_sigh = NULL;
1589 struct mm_struct *mm, *new_mm = NULL, *active_mm = NULL;
1590 struct files_struct *fd, *new_fd = NULL;
1591 struct sem_undo_list *new_ulist = NULL;
1592
1593 check_unshare_flags(&unshare_flags);
1594
06f9d4f9
EB
1595 /* Return -EINVAL for all unsupported flags */
1596 err = -EINVAL;
1597 if (unshare_flags & ~(CLONE_THREAD|CLONE_FS|CLONE_NEWNS|CLONE_SIGHAND|
1598 CLONE_VM|CLONE_FILES|CLONE_SYSVSEM))
1599 goto bad_unshare_out;
1600
cf2e340f
JD
1601 if ((err = unshare_thread(unshare_flags)))
1602 goto bad_unshare_out;
1603 if ((err = unshare_fs(unshare_flags, &new_fs)))
1604 goto bad_unshare_cleanup_thread;
741a2951 1605 if ((err = unshare_namespace(unshare_flags, &new_ns, new_fs)))
cf2e340f
JD
1606 goto bad_unshare_cleanup_fs;
1607 if ((err = unshare_sighand(unshare_flags, &new_sigh)))
1608 goto bad_unshare_cleanup_ns;
1609 if ((err = unshare_vm(unshare_flags, &new_mm)))
1610 goto bad_unshare_cleanup_sigh;
1611 if ((err = unshare_fd(unshare_flags, &new_fd)))
1612 goto bad_unshare_cleanup_vm;
1613 if ((err = unshare_semundo(unshare_flags, &new_ulist)))
1614 goto bad_unshare_cleanup_fd;
1615
1616 if (new_fs || new_ns || new_sigh || new_mm || new_fd || new_ulist) {
1617
1618 task_lock(current);
1619
1620 if (new_fs) {
1621 fs = current->fs;
1622 current->fs = new_fs;
1623 new_fs = fs;
1624 }
1625
1626 if (new_ns) {
1627 ns = current->namespace;
1628 current->namespace = new_ns;
1629 new_ns = ns;
1630 }
1631
1632 if (new_sigh) {
1633 sigh = current->sighand;
e0e8eb54 1634 rcu_assign_pointer(current->sighand, new_sigh);
cf2e340f
JD
1635 new_sigh = sigh;
1636 }
1637
1638 if (new_mm) {
1639 mm = current->mm;
1640 active_mm = current->active_mm;
1641 current->mm = new_mm;
1642 current->active_mm = new_mm;
1643 activate_mm(active_mm, new_mm);
1644 new_mm = mm;
1645 }
1646
1647 if (new_fd) {
1648 fd = current->files;
1649 current->files = new_fd;
1650 new_fd = fd;
1651 }
1652
1653 task_unlock(current);
1654 }
1655
1656bad_unshare_cleanup_fd:
1657 if (new_fd)
1658 put_files_struct(new_fd);
1659
1660bad_unshare_cleanup_vm:
1661 if (new_mm)
1662 mmput(new_mm);
1663
1664bad_unshare_cleanup_sigh:
1665 if (new_sigh)
1666 if (atomic_dec_and_test(&new_sigh->count))
1667 kmem_cache_free(sighand_cachep, new_sigh);
1668
1669bad_unshare_cleanup_ns:
1670 if (new_ns)
1671 put_namespace(new_ns);
1672
1673bad_unshare_cleanup_fs:
1674 if (new_fs)
1675 put_fs_struct(new_fs);
1676
1677bad_unshare_cleanup_thread:
1678bad_unshare_out:
1679 return err;
1680}