Merge remote-tracking branch 'lsk/v3.10/topic/arm64-crypto' into linux-linaro-lsk
[firefly-linux-kernel-4.4.55.git] / kernel / fork.c
1 /*
2  *  linux/kernel/fork.c
3  *
4  *  Copyright (C) 1991, 1992  Linus Torvalds
5  */
6
7 /*
8  *  'fork.c' contains the help-routines for the 'fork' system call
9  * (see also entry.S and others).
10  * Fork is rather simple, once you get the hang of it, but the memory
11  * management can be a bitch. See 'mm/memory.c': 'copy_page_range()'
12  */
13
14 #include <linux/slab.h>
15 #include <linux/init.h>
16 #include <linux/unistd.h>
17 #include <linux/module.h>
18 #include <linux/vmalloc.h>
19 #include <linux/completion.h>
20 #include <linux/personality.h>
21 #include <linux/mempolicy.h>
22 #include <linux/sem.h>
23 #include <linux/file.h>
24 #include <linux/fdtable.h>
25 #include <linux/iocontext.h>
26 #include <linux/key.h>
27 #include <linux/binfmts.h>
28 #include <linux/mman.h>
29 #include <linux/mmu_notifier.h>
30 #include <linux/fs.h>
31 #include <linux/nsproxy.h>
32 #include <linux/capability.h>
33 #include <linux/cpu.h>
34 #include <linux/cgroup.h>
35 #include <linux/security.h>
36 #include <linux/hugetlb.h>
37 #include <linux/seccomp.h>
38 #include <linux/swap.h>
39 #include <linux/syscalls.h>
40 #include <linux/jiffies.h>
41 #include <linux/futex.h>
42 #include <linux/compat.h>
43 #include <linux/kthread.h>
44 #include <linux/task_io_accounting_ops.h>
45 #include <linux/rcupdate.h>
46 #include <linux/ptrace.h>
47 #include <linux/mount.h>
48 #include <linux/audit.h>
49 #include <linux/memcontrol.h>
50 #include <linux/ftrace.h>
51 #include <linux/proc_fs.h>
52 #include <linux/profile.h>
53 #include <linux/rmap.h>
54 #include <linux/ksm.h>
55 #include <linux/acct.h>
56 #include <linux/tsacct_kern.h>
57 #include <linux/cn_proc.h>
58 #include <linux/freezer.h>
59 #include <linux/delayacct.h>
60 #include <linux/taskstats_kern.h>
61 #include <linux/random.h>
62 #include <linux/tty.h>
63 #include <linux/blkdev.h>
64 #include <linux/fs_struct.h>
65 #include <linux/magic.h>
66 #include <linux/perf_event.h>
67 #include <linux/posix-timers.h>
68 #include <linux/user-return-notifier.h>
69 #include <linux/oom.h>
70 #include <linux/khugepaged.h>
71 #include <linux/signalfd.h>
72 #include <linux/uprobes.h>
73 #include <linux/aio.h>
74
75 #include <asm/pgtable.h>
76 #include <asm/pgalloc.h>
77 #include <asm/uaccess.h>
78 #include <asm/mmu_context.h>
79 #include <asm/cacheflush.h>
80 #include <asm/tlbflush.h>
81
82 #include <trace/events/sched.h>
83
84 #define CREATE_TRACE_POINTS
85 #include <trace/events/task.h>
86
87 /*
88  * Protected counters by write_lock_irq(&tasklist_lock)
89  */
90 unsigned long total_forks;      /* Handle normal Linux uptimes. */
91 int nr_threads;                 /* The idle threads do not count.. */
92
93 int max_threads;                /* tunable limit on nr_threads */
94
95 DEFINE_PER_CPU(unsigned long, process_counts) = 0;
96
97 __cacheline_aligned DEFINE_RWLOCK(tasklist_lock);  /* outer */
98
99 #ifdef CONFIG_PROVE_RCU
100 int lockdep_tasklist_lock_is_held(void)
101 {
102         return lockdep_is_held(&tasklist_lock);
103 }
104 EXPORT_SYMBOL_GPL(lockdep_tasklist_lock_is_held);
105 #endif /* #ifdef CONFIG_PROVE_RCU */
106
107 int nr_processes(void)
108 {
109         int cpu;
110         int total = 0;
111
112         for_each_possible_cpu(cpu)
113                 total += per_cpu(process_counts, cpu);
114
115         return total;
116 }
117
118 void __weak arch_release_task_struct(struct task_struct *tsk)
119 {
120 }
121
122 #ifndef CONFIG_ARCH_TASK_STRUCT_ALLOCATOR
123 static struct kmem_cache *task_struct_cachep;
124
125 static inline struct task_struct *alloc_task_struct_node(int node)
126 {
127         return kmem_cache_alloc_node(task_struct_cachep, GFP_KERNEL, node);
128 }
129
130 static inline void free_task_struct(struct task_struct *tsk)
131 {
132         kmem_cache_free(task_struct_cachep, tsk);
133 }
134 #endif
135
136 void __weak arch_release_thread_info(struct thread_info *ti)
137 {
138 }
139
140 #ifndef CONFIG_ARCH_THREAD_INFO_ALLOCATOR
141
142 /*
143  * Allocate pages if THREAD_SIZE is >= PAGE_SIZE, otherwise use a
144  * kmemcache based allocator.
145  */
146 # if THREAD_SIZE >= PAGE_SIZE
147 static struct thread_info *alloc_thread_info_node(struct task_struct *tsk,
148                                                   int node)
149 {
150         struct page *page = alloc_pages_node(node, THREADINFO_GFP_ACCOUNTED,
151                                              THREAD_SIZE_ORDER);
152
153         return page ? page_address(page) : NULL;
154 }
155
156 static inline void free_thread_info(struct thread_info *ti)
157 {
158         free_memcg_kmem_pages((unsigned long)ti, THREAD_SIZE_ORDER);
159 }
160 # else
161 static struct kmem_cache *thread_info_cache;
162
163 static struct thread_info *alloc_thread_info_node(struct task_struct *tsk,
164                                                   int node)
165 {
166         return kmem_cache_alloc_node(thread_info_cache, THREADINFO_GFP, node);
167 }
168
169 static void free_thread_info(struct thread_info *ti)
170 {
171         kmem_cache_free(thread_info_cache, ti);
172 }
173
174 void thread_info_cache_init(void)
175 {
176         thread_info_cache = kmem_cache_create("thread_info", THREAD_SIZE,
177                                               THREAD_SIZE, 0, NULL);
178         BUG_ON(thread_info_cache == NULL);
179 }
180 # endif
181 #endif
182
183 /* SLAB cache for signal_struct structures (tsk->signal) */
184 static struct kmem_cache *signal_cachep;
185
186 /* SLAB cache for sighand_struct structures (tsk->sighand) */
187 struct kmem_cache *sighand_cachep;
188
189 /* SLAB cache for files_struct structures (tsk->files) */
190 struct kmem_cache *files_cachep;
191
192 /* SLAB cache for fs_struct structures (tsk->fs) */
193 struct kmem_cache *fs_cachep;
194
195 /* SLAB cache for vm_area_struct structures */
196 struct kmem_cache *vm_area_cachep;
197
198 /* SLAB cache for mm_struct structures (tsk->mm) */
199 static struct kmem_cache *mm_cachep;
200
201 static void account_kernel_stack(struct thread_info *ti, int account)
202 {
203         struct zone *zone = page_zone(virt_to_page(ti));
204
205         mod_zone_page_state(zone, NR_KERNEL_STACK, account);
206 }
207
208 void free_task(struct task_struct *tsk)
209 {
210         account_kernel_stack(tsk->stack, -1);
211         arch_release_thread_info(tsk->stack);
212         free_thread_info(tsk->stack);
213         rt_mutex_debug_task_free(tsk);
214         ftrace_graph_exit_task(tsk);
215         put_seccomp_filter(tsk);
216         arch_release_task_struct(tsk);
217         free_task_struct(tsk);
218 }
219 EXPORT_SYMBOL(free_task);
220
221 static inline void free_signal_struct(struct signal_struct *sig)
222 {
223         taskstats_tgid_free(sig);
224         sched_autogroup_exit(sig);
225         kmem_cache_free(signal_cachep, sig);
226 }
227
228 static inline void put_signal_struct(struct signal_struct *sig)
229 {
230         if (atomic_dec_and_test(&sig->sigcnt))
231                 free_signal_struct(sig);
232 }
233
234 void __put_task_struct(struct task_struct *tsk)
235 {
236         WARN_ON(!tsk->exit_state);
237         WARN_ON(atomic_read(&tsk->usage));
238         WARN_ON(tsk == current);
239
240         security_task_free(tsk);
241         exit_creds(tsk);
242         delayacct_tsk_free(tsk);
243         put_signal_struct(tsk->signal);
244
245         if (!profile_handoff_task(tsk))
246                 free_task(tsk);
247 }
248 EXPORT_SYMBOL_GPL(__put_task_struct);
249
250 void __init __weak arch_task_cache_init(void) { }
251
252 void __init fork_init(unsigned long mempages)
253 {
254 #ifndef CONFIG_ARCH_TASK_STRUCT_ALLOCATOR
255 #ifndef ARCH_MIN_TASKALIGN
256 #define ARCH_MIN_TASKALIGN      L1_CACHE_BYTES
257 #endif
258         /* create a slab on which task_structs can be allocated */
259         task_struct_cachep =
260                 kmem_cache_create("task_struct", sizeof(struct task_struct),
261                         ARCH_MIN_TASKALIGN, SLAB_PANIC | SLAB_NOTRACK, NULL);
262 #endif
263
264         /* do the arch specific task caches init */
265         arch_task_cache_init();
266
267         /*
268          * The default maximum number of threads is set to a safe
269          * value: the thread structures can take up at most half
270          * of memory.
271          */
272         max_threads = mempages / (8 * THREAD_SIZE / PAGE_SIZE);
273
274         /*
275          * we need to allow at least 20 threads to boot a system
276          */
277         if (max_threads < 20)
278                 max_threads = 20;
279
280         init_task.signal->rlim[RLIMIT_NPROC].rlim_cur = max_threads/2;
281         init_task.signal->rlim[RLIMIT_NPROC].rlim_max = max_threads/2;
282         init_task.signal->rlim[RLIMIT_SIGPENDING] =
283                 init_task.signal->rlim[RLIMIT_NPROC];
284 }
285
286 int __attribute__((weak)) arch_dup_task_struct(struct task_struct *dst,
287                                                struct task_struct *src)
288 {
289         *dst = *src;
290         return 0;
291 }
292
293 static struct task_struct *dup_task_struct(struct task_struct *orig)
294 {
295         struct task_struct *tsk;
296         struct thread_info *ti;
297         unsigned long *stackend;
298         int node = tsk_fork_get_node(orig);
299         int err;
300
301         tsk = alloc_task_struct_node(node);
302         if (!tsk)
303                 return NULL;
304
305         ti = alloc_thread_info_node(tsk, node);
306         if (!ti)
307                 goto free_tsk;
308
309         err = arch_dup_task_struct(tsk, orig);
310         if (err)
311                 goto free_ti;
312
313         tsk->stack = ti;
314
315         setup_thread_stack(tsk, orig);
316         clear_user_return_notifier(tsk);
317         clear_tsk_need_resched(tsk);
318         stackend = end_of_stack(tsk);
319         *stackend = STACK_END_MAGIC;    /* for overflow detection */
320
321 #ifdef CONFIG_CC_STACKPROTECTOR
322         tsk->stack_canary = get_random_int();
323 #endif
324
325         /*
326          * One for us, one for whoever does the "release_task()" (usually
327          * parent)
328          */
329         atomic_set(&tsk->usage, 2);
330 #ifdef CONFIG_BLK_DEV_IO_TRACE
331         tsk->btrace_seq = 0;
332 #endif
333         tsk->splice_pipe = NULL;
334         tsk->task_frag.page = NULL;
335
336         account_kernel_stack(ti, 1);
337
338         return tsk;
339
340 free_ti:
341         free_thread_info(ti);
342 free_tsk:
343         free_task_struct(tsk);
344         return NULL;
345 }
346
347 #ifdef CONFIG_MMU
348 static int dup_mmap(struct mm_struct *mm, struct mm_struct *oldmm)
349 {
350         struct vm_area_struct *mpnt, *tmp, *prev, **pprev;
351         struct rb_node **rb_link, *rb_parent;
352         int retval;
353         unsigned long charge;
354         struct mempolicy *pol;
355
356         uprobe_start_dup_mmap();
357         down_write(&oldmm->mmap_sem);
358         flush_cache_dup_mm(oldmm);
359         uprobe_dup_mmap(oldmm, mm);
360         /*
361          * Not linked in yet - no deadlock potential:
362          */
363         down_write_nested(&mm->mmap_sem, SINGLE_DEPTH_NESTING);
364
365         mm->locked_vm = 0;
366         mm->mmap = NULL;
367         mm->mmap_cache = NULL;
368         mm->free_area_cache = oldmm->mmap_base;
369         mm->cached_hole_size = ~0UL;
370         mm->map_count = 0;
371         cpumask_clear(mm_cpumask(mm));
372         mm->mm_rb = RB_ROOT;
373         rb_link = &mm->mm_rb.rb_node;
374         rb_parent = NULL;
375         pprev = &mm->mmap;
376         retval = ksm_fork(mm, oldmm);
377         if (retval)
378                 goto out;
379         retval = khugepaged_fork(mm, oldmm);
380         if (retval)
381                 goto out;
382
383         prev = NULL;
384         for (mpnt = oldmm->mmap; mpnt; mpnt = mpnt->vm_next) {
385                 struct file *file;
386
387                 if (mpnt->vm_flags & VM_DONTCOPY) {
388                         vm_stat_account(mm, mpnt->vm_flags, mpnt->vm_file,
389                                                         -vma_pages(mpnt));
390                         continue;
391                 }
392                 charge = 0;
393                 if (mpnt->vm_flags & VM_ACCOUNT) {
394                         unsigned long len = vma_pages(mpnt);
395
396                         if (security_vm_enough_memory_mm(oldmm, len)) /* sic */
397                                 goto fail_nomem;
398                         charge = len;
399                 }
400                 tmp = kmem_cache_alloc(vm_area_cachep, GFP_KERNEL);
401                 if (!tmp)
402                         goto fail_nomem;
403                 *tmp = *mpnt;
404                 INIT_LIST_HEAD(&tmp->anon_vma_chain);
405                 pol = mpol_dup(vma_policy(mpnt));
406                 retval = PTR_ERR(pol);
407                 if (IS_ERR(pol))
408                         goto fail_nomem_policy;
409                 vma_set_policy(tmp, pol);
410                 tmp->vm_mm = mm;
411                 if (anon_vma_fork(tmp, mpnt))
412                         goto fail_nomem_anon_vma_fork;
413                 tmp->vm_flags &= ~VM_LOCKED;
414                 tmp->vm_next = tmp->vm_prev = NULL;
415                 file = tmp->vm_file;
416                 if (file) {
417                         struct inode *inode = file_inode(file);
418                         struct address_space *mapping = file->f_mapping;
419
420                         get_file(file);
421                         if (tmp->vm_flags & VM_DENYWRITE)
422                                 atomic_dec(&inode->i_writecount);
423                         mutex_lock(&mapping->i_mmap_mutex);
424                         if (tmp->vm_flags & VM_SHARED)
425                                 mapping->i_mmap_writable++;
426                         flush_dcache_mmap_lock(mapping);
427                         /* insert tmp into the share list, just after mpnt */
428                         if (unlikely(tmp->vm_flags & VM_NONLINEAR))
429                                 vma_nonlinear_insert(tmp,
430                                                 &mapping->i_mmap_nonlinear);
431                         else
432                                 vma_interval_tree_insert_after(tmp, mpnt,
433                                                         &mapping->i_mmap);
434                         flush_dcache_mmap_unlock(mapping);
435                         mutex_unlock(&mapping->i_mmap_mutex);
436                 }
437
438                 /*
439                  * Clear hugetlb-related page reserves for children. This only
440                  * affects MAP_PRIVATE mappings. Faults generated by the child
441                  * are not guaranteed to succeed, even if read-only
442                  */
443                 if (is_vm_hugetlb_page(tmp))
444                         reset_vma_resv_huge_pages(tmp);
445
446                 /*
447                  * Link in the new vma and copy the page table entries.
448                  */
449                 *pprev = tmp;
450                 pprev = &tmp->vm_next;
451                 tmp->vm_prev = prev;
452                 prev = tmp;
453
454                 __vma_link_rb(mm, tmp, rb_link, rb_parent);
455                 rb_link = &tmp->vm_rb.rb_right;
456                 rb_parent = &tmp->vm_rb;
457
458                 mm->map_count++;
459                 retval = copy_page_range(mm, oldmm, mpnt);
460
461                 if (tmp->vm_ops && tmp->vm_ops->open)
462                         tmp->vm_ops->open(tmp);
463
464                 if (retval)
465                         goto out;
466         }
467         /* a new mm has just been created */
468         arch_dup_mmap(oldmm, mm);
469         retval = 0;
470 out:
471         up_write(&mm->mmap_sem);
472         flush_tlb_mm(oldmm);
473         up_write(&oldmm->mmap_sem);
474         uprobe_end_dup_mmap();
475         return retval;
476 fail_nomem_anon_vma_fork:
477         mpol_put(pol);
478 fail_nomem_policy:
479         kmem_cache_free(vm_area_cachep, tmp);
480 fail_nomem:
481         retval = -ENOMEM;
482         vm_unacct_memory(charge);
483         goto out;
484 }
485
486 static inline int mm_alloc_pgd(struct mm_struct *mm)
487 {
488         mm->pgd = pgd_alloc(mm);
489         if (unlikely(!mm->pgd))
490                 return -ENOMEM;
491         return 0;
492 }
493
494 static inline void mm_free_pgd(struct mm_struct *mm)
495 {
496         pgd_free(mm, mm->pgd);
497 }
498 #else
499 #define dup_mmap(mm, oldmm)     (0)
500 #define mm_alloc_pgd(mm)        (0)
501 #define mm_free_pgd(mm)
502 #endif /* CONFIG_MMU */
503
504 __cacheline_aligned_in_smp DEFINE_SPINLOCK(mmlist_lock);
505
506 #define allocate_mm()   (kmem_cache_alloc(mm_cachep, GFP_KERNEL))
507 #define free_mm(mm)     (kmem_cache_free(mm_cachep, (mm)))
508
509 static unsigned long default_dump_filter = MMF_DUMP_FILTER_DEFAULT;
510
511 static int __init coredump_filter_setup(char *s)
512 {
513         default_dump_filter =
514                 (simple_strtoul(s, NULL, 0) << MMF_DUMP_FILTER_SHIFT) &
515                 MMF_DUMP_FILTER_MASK;
516         return 1;
517 }
518
519 __setup("coredump_filter=", coredump_filter_setup);
520
521 #include <linux/init_task.h>
522
523 static void mm_init_aio(struct mm_struct *mm)
524 {
525 #ifdef CONFIG_AIO
526         spin_lock_init(&mm->ioctx_lock);
527         INIT_HLIST_HEAD(&mm->ioctx_list);
528 #endif
529 }
530
531 static struct mm_struct *mm_init(struct mm_struct *mm, struct task_struct *p)
532 {
533         atomic_set(&mm->mm_users, 1);
534         atomic_set(&mm->mm_count, 1);
535         init_rwsem(&mm->mmap_sem);
536         INIT_LIST_HEAD(&mm->mmlist);
537         mm->flags = (current->mm) ?
538                 (current->mm->flags & MMF_INIT_MASK) : default_dump_filter;
539         mm->core_state = NULL;
540         mm->nr_ptes = 0;
541         memset(&mm->rss_stat, 0, sizeof(mm->rss_stat));
542         spin_lock_init(&mm->page_table_lock);
543         mm->free_area_cache = TASK_UNMAPPED_BASE;
544         mm->cached_hole_size = ~0UL;
545         mm_init_aio(mm);
546         mm_init_owner(mm, p);
547         clear_tlb_flush_pending(mm);
548
549         if (likely(!mm_alloc_pgd(mm))) {
550                 mm->def_flags = 0;
551                 mmu_notifier_mm_init(mm);
552                 return mm;
553         }
554
555         free_mm(mm);
556         return NULL;
557 }
558
559 static void check_mm(struct mm_struct *mm)
560 {
561         int i;
562
563         for (i = 0; i < NR_MM_COUNTERS; i++) {
564                 long x = atomic_long_read(&mm->rss_stat.count[i]);
565
566                 if (unlikely(x))
567                         printk(KERN_ALERT "BUG: Bad rss-counter state "
568                                           "mm:%p idx:%d val:%ld\n", mm, i, x);
569         }
570
571 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
572         VM_BUG_ON(mm->pmd_huge_pte);
573 #endif
574 }
575
576 /*
577  * Allocate and initialize an mm_struct.
578  */
579 struct mm_struct *mm_alloc(void)
580 {
581         struct mm_struct *mm;
582
583         mm = allocate_mm();
584         if (!mm)
585                 return NULL;
586
587         memset(mm, 0, sizeof(*mm));
588         mm_init_cpumask(mm);
589         return mm_init(mm, current);
590 }
591
592 /*
593  * Called when the last reference to the mm
594  * is dropped: either by a lazy thread or by
595  * mmput. Free the page directory and the mm.
596  */
597 void __mmdrop(struct mm_struct *mm)
598 {
599         BUG_ON(mm == &init_mm);
600         mm_free_pgd(mm);
601         destroy_context(mm);
602         mmu_notifier_mm_destroy(mm);
603         check_mm(mm);
604         free_mm(mm);
605 }
606 EXPORT_SYMBOL_GPL(__mmdrop);
607
608 /*
609  * Decrement the use count and release all resources for an mm.
610  */
611 void mmput(struct mm_struct *mm)
612 {
613         might_sleep();
614
615         if (atomic_dec_and_test(&mm->mm_users)) {
616                 uprobe_clear_state(mm);
617                 exit_aio(mm);
618                 ksm_exit(mm);
619                 khugepaged_exit(mm); /* must run before exit_mmap */
620                 exit_mmap(mm);
621                 set_mm_exe_file(mm, NULL);
622                 if (!list_empty(&mm->mmlist)) {
623                         spin_lock(&mmlist_lock);
624                         list_del(&mm->mmlist);
625                         spin_unlock(&mmlist_lock);
626                 }
627                 if (mm->binfmt)
628                         module_put(mm->binfmt->module);
629                 mmdrop(mm);
630         }
631 }
632 EXPORT_SYMBOL_GPL(mmput);
633
634 void set_mm_exe_file(struct mm_struct *mm, struct file *new_exe_file)
635 {
636         if (new_exe_file)
637                 get_file(new_exe_file);
638         if (mm->exe_file)
639                 fput(mm->exe_file);
640         mm->exe_file = new_exe_file;
641 }
642
643 struct file *get_mm_exe_file(struct mm_struct *mm)
644 {
645         struct file *exe_file;
646
647         /* We need mmap_sem to protect against races with removal of exe_file */
648         down_read(&mm->mmap_sem);
649         exe_file = mm->exe_file;
650         if (exe_file)
651                 get_file(exe_file);
652         up_read(&mm->mmap_sem);
653         return exe_file;
654 }
655
656 static void dup_mm_exe_file(struct mm_struct *oldmm, struct mm_struct *newmm)
657 {
658         /* It's safe to write the exe_file pointer without exe_file_lock because
659          * this is called during fork when the task is not yet in /proc */
660         newmm->exe_file = get_mm_exe_file(oldmm);
661 }
662
663 /**
664  * get_task_mm - acquire a reference to the task's mm
665  *
666  * Returns %NULL if the task has no mm.  Checks PF_KTHREAD (meaning
667  * this kernel workthread has transiently adopted a user mm with use_mm,
668  * to do its AIO) is not set and if so returns a reference to it, after
669  * bumping up the use count.  User must release the mm via mmput()
670  * after use.  Typically used by /proc and ptrace.
671  */
672 struct mm_struct *get_task_mm(struct task_struct *task)
673 {
674         struct mm_struct *mm;
675
676         task_lock(task);
677         mm = task->mm;
678         if (mm) {
679                 if (task->flags & PF_KTHREAD)
680                         mm = NULL;
681                 else
682                         atomic_inc(&mm->mm_users);
683         }
684         task_unlock(task);
685         return mm;
686 }
687 EXPORT_SYMBOL_GPL(get_task_mm);
688
689 struct mm_struct *mm_access(struct task_struct *task, unsigned int mode)
690 {
691         struct mm_struct *mm;
692         int err;
693
694         err =  mutex_lock_killable(&task->signal->cred_guard_mutex);
695         if (err)
696                 return ERR_PTR(err);
697
698         mm = get_task_mm(task);
699         if (mm && mm != current->mm &&
700                         !ptrace_may_access(task, mode)) {
701                 mmput(mm);
702                 mm = ERR_PTR(-EACCES);
703         }
704         mutex_unlock(&task->signal->cred_guard_mutex);
705
706         return mm;
707 }
708
709 static void complete_vfork_done(struct task_struct *tsk)
710 {
711         struct completion *vfork;
712
713         task_lock(tsk);
714         vfork = tsk->vfork_done;
715         if (likely(vfork)) {
716                 tsk->vfork_done = NULL;
717                 complete(vfork);
718         }
719         task_unlock(tsk);
720 }
721
722 static int wait_for_vfork_done(struct task_struct *child,
723                                 struct completion *vfork)
724 {
725         int killed;
726
727         freezer_do_not_count();
728         killed = wait_for_completion_killable(vfork);
729         freezer_count();
730
731         if (killed) {
732                 task_lock(child);
733                 child->vfork_done = NULL;
734                 task_unlock(child);
735         }
736
737         put_task_struct(child);
738         return killed;
739 }
740
741 /* Please note the differences between mmput and mm_release.
742  * mmput is called whenever we stop holding onto a mm_struct,
743  * error success whatever.
744  *
745  * mm_release is called after a mm_struct has been removed
746  * from the current process.
747  *
748  * This difference is important for error handling, when we
749  * only half set up a mm_struct for a new process and need to restore
750  * the old one.  Because we mmput the new mm_struct before
751  * restoring the old one. . .
752  * Eric Biederman 10 January 1998
753  */
754 void mm_release(struct task_struct *tsk, struct mm_struct *mm)
755 {
756         /* Get rid of any futexes when releasing the mm */
757 #ifdef CONFIG_FUTEX
758         if (unlikely(tsk->robust_list)) {
759                 exit_robust_list(tsk);
760                 tsk->robust_list = NULL;
761         }
762 #ifdef CONFIG_COMPAT
763         if (unlikely(tsk->compat_robust_list)) {
764                 compat_exit_robust_list(tsk);
765                 tsk->compat_robust_list = NULL;
766         }
767 #endif
768         if (unlikely(!list_empty(&tsk->pi_state_list)))
769                 exit_pi_state_list(tsk);
770 #endif
771
772         uprobe_free_utask(tsk);
773
774         /* Get rid of any cached register state */
775         deactivate_mm(tsk, mm);
776
777         /*
778          * If we're exiting normally, clear a user-space tid field if
779          * requested.  We leave this alone when dying by signal, to leave
780          * the value intact in a core dump, and to save the unnecessary
781          * trouble, say, a killed vfork parent shouldn't touch this mm.
782          * Userland only wants this done for a sys_exit.
783          */
784         if (tsk->clear_child_tid) {
785                 if (!(tsk->flags & PF_SIGNALED) &&
786                     atomic_read(&mm->mm_users) > 1) {
787                         /*
788                          * We don't check the error code - if userspace has
789                          * not set up a proper pointer then tough luck.
790                          */
791                         put_user(0, tsk->clear_child_tid);
792                         sys_futex(tsk->clear_child_tid, FUTEX_WAKE,
793                                         1, NULL, NULL, 0);
794                 }
795                 tsk->clear_child_tid = NULL;
796         }
797
798         /*
799          * All done, finally we can wake up parent and return this mm to him.
800          * Also kthread_stop() uses this completion for synchronization.
801          */
802         if (tsk->vfork_done)
803                 complete_vfork_done(tsk);
804 }
805
806 /*
807  * Allocate a new mm structure and copy contents from the
808  * mm structure of the passed in task structure.
809  */
810 struct mm_struct *dup_mm(struct task_struct *tsk)
811 {
812         struct mm_struct *mm, *oldmm = current->mm;
813         int err;
814
815         if (!oldmm)
816                 return NULL;
817
818         mm = allocate_mm();
819         if (!mm)
820                 goto fail_nomem;
821
822         memcpy(mm, oldmm, sizeof(*mm));
823         mm_init_cpumask(mm);
824
825 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
826         mm->pmd_huge_pte = NULL;
827 #endif
828 #ifdef CONFIG_NUMA_BALANCING
829         mm->first_nid = NUMA_PTE_SCAN_INIT;
830 #endif
831         if (!mm_init(mm, tsk))
832                 goto fail_nomem;
833
834         if (init_new_context(tsk, mm))
835                 goto fail_nocontext;
836
837         dup_mm_exe_file(oldmm, mm);
838
839         err = dup_mmap(mm, oldmm);
840         if (err)
841                 goto free_pt;
842
843         mm->hiwater_rss = get_mm_rss(mm);
844         mm->hiwater_vm = mm->total_vm;
845
846         if (mm->binfmt && !try_module_get(mm->binfmt->module))
847                 goto free_pt;
848
849         return mm;
850
851 free_pt:
852         /* don't put binfmt in mmput, we haven't got module yet */
853         mm->binfmt = NULL;
854         mmput(mm);
855
856 fail_nomem:
857         return NULL;
858
859 fail_nocontext:
860         /*
861          * If init_new_context() failed, we cannot use mmput() to free the mm
862          * because it calls destroy_context()
863          */
864         mm_free_pgd(mm);
865         free_mm(mm);
866         return NULL;
867 }
868
869 static int copy_mm(unsigned long clone_flags, struct task_struct *tsk)
870 {
871         struct mm_struct *mm, *oldmm;
872         int retval;
873
874         tsk->min_flt = tsk->maj_flt = 0;
875         tsk->nvcsw = tsk->nivcsw = 0;
876 #ifdef CONFIG_DETECT_HUNG_TASK
877         tsk->last_switch_count = tsk->nvcsw + tsk->nivcsw;
878 #endif
879
880         tsk->mm = NULL;
881         tsk->active_mm = NULL;
882
883         /*
884          * Are we cloning a kernel thread?
885          *
886          * We need to steal a active VM for that..
887          */
888         oldmm = current->mm;
889         if (!oldmm)
890                 return 0;
891
892         if (clone_flags & CLONE_VM) {
893                 atomic_inc(&oldmm->mm_users);
894                 mm = oldmm;
895                 goto good_mm;
896         }
897
898         retval = -ENOMEM;
899         mm = dup_mm(tsk);
900         if (!mm)
901                 goto fail_nomem;
902
903 good_mm:
904         tsk->mm = mm;
905         tsk->active_mm = mm;
906         return 0;
907
908 fail_nomem:
909         return retval;
910 }
911
912 static int copy_fs(unsigned long clone_flags, struct task_struct *tsk)
913 {
914         struct fs_struct *fs = current->fs;
915         if (clone_flags & CLONE_FS) {
916                 /* tsk->fs is already what we want */
917                 spin_lock(&fs->lock);
918                 if (fs->in_exec) {
919                         spin_unlock(&fs->lock);
920                         return -EAGAIN;
921                 }
922                 fs->users++;
923                 spin_unlock(&fs->lock);
924                 return 0;
925         }
926         tsk->fs = copy_fs_struct(fs);
927         if (!tsk->fs)
928                 return -ENOMEM;
929         return 0;
930 }
931
932 static int copy_files(unsigned long clone_flags, struct task_struct *tsk)
933 {
934         struct files_struct *oldf, *newf;
935         int error = 0;
936
937         /*
938          * A background process may not have any files ...
939          */
940         oldf = current->files;
941         if (!oldf)
942                 goto out;
943
944         if (clone_flags & CLONE_FILES) {
945                 atomic_inc(&oldf->count);
946                 goto out;
947         }
948
949         newf = dup_fd(oldf, &error);
950         if (!newf)
951                 goto out;
952
953         tsk->files = newf;
954         error = 0;
955 out:
956         return error;
957 }
958
959 static int copy_io(unsigned long clone_flags, struct task_struct *tsk)
960 {
961 #ifdef CONFIG_BLOCK
962         struct io_context *ioc = current->io_context;
963         struct io_context *new_ioc;
964
965         if (!ioc)
966                 return 0;
967         /*
968          * Share io context with parent, if CLONE_IO is set
969          */
970         if (clone_flags & CLONE_IO) {
971                 ioc_task_link(ioc);
972                 tsk->io_context = ioc;
973         } else if (ioprio_valid(ioc->ioprio)) {
974                 new_ioc = get_task_io_context(tsk, GFP_KERNEL, NUMA_NO_NODE);
975                 if (unlikely(!new_ioc))
976                         return -ENOMEM;
977
978                 new_ioc->ioprio = ioc->ioprio;
979                 put_io_context(new_ioc);
980         }
981 #endif
982         return 0;
983 }
984
985 static int copy_sighand(unsigned long clone_flags, struct task_struct *tsk)
986 {
987         struct sighand_struct *sig;
988
989         if (clone_flags & CLONE_SIGHAND) {
990                 atomic_inc(&current->sighand->count);
991                 return 0;
992         }
993         sig = kmem_cache_alloc(sighand_cachep, GFP_KERNEL);
994         rcu_assign_pointer(tsk->sighand, sig);
995         if (!sig)
996                 return -ENOMEM;
997         atomic_set(&sig->count, 1);
998         memcpy(sig->action, current->sighand->action, sizeof(sig->action));
999         return 0;
1000 }
1001
1002 void __cleanup_sighand(struct sighand_struct *sighand)
1003 {
1004         if (atomic_dec_and_test(&sighand->count)) {
1005                 signalfd_cleanup(sighand);
1006                 kmem_cache_free(sighand_cachep, sighand);
1007         }
1008 }
1009
1010
1011 /*
1012  * Initialize POSIX timer handling for a thread group.
1013  */
1014 static void posix_cpu_timers_init_group(struct signal_struct *sig)
1015 {
1016         unsigned long cpu_limit;
1017
1018         /* Thread group counters. */
1019         thread_group_cputime_init(sig);
1020
1021         cpu_limit = ACCESS_ONCE(sig->rlim[RLIMIT_CPU].rlim_cur);
1022         if (cpu_limit != RLIM_INFINITY) {
1023                 sig->cputime_expires.prof_exp = secs_to_cputime(cpu_limit);
1024                 sig->cputimer.running = 1;
1025         }
1026
1027         /* The timer lists. */
1028         INIT_LIST_HEAD(&sig->cpu_timers[0]);
1029         INIT_LIST_HEAD(&sig->cpu_timers[1]);
1030         INIT_LIST_HEAD(&sig->cpu_timers[2]);
1031 }
1032
1033 static int copy_signal(unsigned long clone_flags, struct task_struct *tsk)
1034 {
1035         struct signal_struct *sig;
1036
1037         if (clone_flags & CLONE_THREAD)
1038                 return 0;
1039
1040         sig = kmem_cache_zalloc(signal_cachep, GFP_KERNEL);
1041         tsk->signal = sig;
1042         if (!sig)
1043                 return -ENOMEM;
1044
1045         sig->nr_threads = 1;
1046         atomic_set(&sig->live, 1);
1047         atomic_set(&sig->sigcnt, 1);
1048         init_waitqueue_head(&sig->wait_chldexit);
1049         sig->curr_target = tsk;
1050         init_sigpending(&sig->shared_pending);
1051         INIT_LIST_HEAD(&sig->posix_timers);
1052
1053         hrtimer_init(&sig->real_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
1054         sig->real_timer.function = it_real_fn;
1055
1056         task_lock(current->group_leader);
1057         memcpy(sig->rlim, current->signal->rlim, sizeof sig->rlim);
1058         task_unlock(current->group_leader);
1059
1060         posix_cpu_timers_init_group(sig);
1061
1062         tty_audit_fork(sig);
1063         sched_autogroup_fork(sig);
1064
1065 #ifdef CONFIG_CGROUPS
1066         init_rwsem(&sig->group_rwsem);
1067 #endif
1068
1069         sig->oom_score_adj = current->signal->oom_score_adj;
1070         sig->oom_score_adj_min = current->signal->oom_score_adj_min;
1071
1072         sig->has_child_subreaper = current->signal->has_child_subreaper ||
1073                                    current->signal->is_child_subreaper;
1074
1075         mutex_init(&sig->cred_guard_mutex);
1076
1077         return 0;
1078 }
1079
1080 static void copy_flags(unsigned long clone_flags, struct task_struct *p)
1081 {
1082         unsigned long new_flags = p->flags;
1083
1084         new_flags &= ~(PF_SUPERPRIV | PF_WQ_WORKER);
1085         new_flags |= PF_FORKNOEXEC;
1086         p->flags = new_flags;
1087 }
1088
1089 SYSCALL_DEFINE1(set_tid_address, int __user *, tidptr)
1090 {
1091         current->clear_child_tid = tidptr;
1092
1093         return task_pid_vnr(current);
1094 }
1095
1096 static void rt_mutex_init_task(struct task_struct *p)
1097 {
1098         raw_spin_lock_init(&p->pi_lock);
1099 #ifdef CONFIG_RT_MUTEXES
1100         plist_head_init(&p->pi_waiters);
1101         p->pi_blocked_on = NULL;
1102 #endif
1103 }
1104
1105 #ifdef CONFIG_MM_OWNER
1106 void mm_init_owner(struct mm_struct *mm, struct task_struct *p)
1107 {
1108         mm->owner = p;
1109 }
1110 #endif /* CONFIG_MM_OWNER */
1111
1112 /*
1113  * Initialize POSIX timer handling for a single task.
1114  */
1115 static void posix_cpu_timers_init(struct task_struct *tsk)
1116 {
1117         tsk->cputime_expires.prof_exp = 0;
1118         tsk->cputime_expires.virt_exp = 0;
1119         tsk->cputime_expires.sched_exp = 0;
1120         INIT_LIST_HEAD(&tsk->cpu_timers[0]);
1121         INIT_LIST_HEAD(&tsk->cpu_timers[1]);
1122         INIT_LIST_HEAD(&tsk->cpu_timers[2]);
1123 }
1124
1125 /*
1126  * This creates a new process as a copy of the old one,
1127  * but does not actually start it yet.
1128  *
1129  * It copies the registers, and all the appropriate
1130  * parts of the process environment (as per the clone
1131  * flags). The actual kick-off is left to the caller.
1132  */
1133 static struct task_struct *copy_process(unsigned long clone_flags,
1134                                         unsigned long stack_start,
1135                                         unsigned long stack_size,
1136                                         int __user *child_tidptr,
1137                                         struct pid *pid,
1138                                         int trace)
1139 {
1140         int retval;
1141         struct task_struct *p;
1142
1143         if ((clone_flags & (CLONE_NEWNS|CLONE_FS)) == (CLONE_NEWNS|CLONE_FS))
1144                 return ERR_PTR(-EINVAL);
1145
1146         if ((clone_flags & (CLONE_NEWUSER|CLONE_FS)) == (CLONE_NEWUSER|CLONE_FS))
1147                 return ERR_PTR(-EINVAL);
1148
1149         /*
1150          * Thread groups must share signals as well, and detached threads
1151          * can only be started up within the thread group.
1152          */
1153         if ((clone_flags & CLONE_THREAD) && !(clone_flags & CLONE_SIGHAND))
1154                 return ERR_PTR(-EINVAL);
1155
1156         /*
1157          * Shared signal handlers imply shared VM. By way of the above,
1158          * thread groups also imply shared VM. Blocking this case allows
1159          * for various simplifications in other code.
1160          */
1161         if ((clone_flags & CLONE_SIGHAND) && !(clone_flags & CLONE_VM))
1162                 return ERR_PTR(-EINVAL);
1163
1164         /*
1165          * Siblings of global init remain as zombies on exit since they are
1166          * not reaped by their parent (swapper). To solve this and to avoid
1167          * multi-rooted process trees, prevent global and container-inits
1168          * from creating siblings.
1169          */
1170         if ((clone_flags & CLONE_PARENT) &&
1171                                 current->signal->flags & SIGNAL_UNKILLABLE)
1172                 return ERR_PTR(-EINVAL);
1173
1174         /*
1175          * If the new process will be in a different pid namespace don't
1176          * allow it to share a thread group or signal handlers with the
1177          * forking task.
1178          */
1179         if ((clone_flags & (CLONE_SIGHAND | CLONE_NEWPID)) &&
1180             (task_active_pid_ns(current) != current->nsproxy->pid_ns))
1181                 return ERR_PTR(-EINVAL);
1182
1183         retval = security_task_create(clone_flags);
1184         if (retval)
1185                 goto fork_out;
1186
1187         retval = -ENOMEM;
1188         p = dup_task_struct(current);
1189         if (!p)
1190                 goto fork_out;
1191
1192         ftrace_graph_init_task(p);
1193         get_seccomp_filter(p);
1194
1195         rt_mutex_init_task(p);
1196
1197 #ifdef CONFIG_PROVE_LOCKING
1198         DEBUG_LOCKS_WARN_ON(!p->hardirqs_enabled);
1199         DEBUG_LOCKS_WARN_ON(!p->softirqs_enabled);
1200 #endif
1201         retval = -EAGAIN;
1202         if (atomic_read(&p->real_cred->user->processes) >=
1203                         task_rlimit(p, RLIMIT_NPROC)) {
1204                 if (!capable(CAP_SYS_ADMIN) && !capable(CAP_SYS_RESOURCE) &&
1205                     p->real_cred->user != INIT_USER)
1206                         goto bad_fork_free;
1207         }
1208         current->flags &= ~PF_NPROC_EXCEEDED;
1209
1210         retval = copy_creds(p, clone_flags);
1211         if (retval < 0)
1212                 goto bad_fork_free;
1213
1214         /*
1215          * If multiple threads are within copy_process(), then this check
1216          * triggers too late. This doesn't hurt, the check is only there
1217          * to stop root fork bombs.
1218          */
1219         retval = -EAGAIN;
1220         if (nr_threads >= max_threads)
1221                 goto bad_fork_cleanup_count;
1222
1223         if (!try_module_get(task_thread_info(p)->exec_domain->module))
1224                 goto bad_fork_cleanup_count;
1225
1226         p->did_exec = 0;
1227         delayacct_tsk_init(p);  /* Must remain after dup_task_struct() */
1228         copy_flags(clone_flags, p);
1229         INIT_LIST_HEAD(&p->children);
1230         INIT_LIST_HEAD(&p->sibling);
1231         rcu_copy_process(p);
1232         p->vfork_done = NULL;
1233         spin_lock_init(&p->alloc_lock);
1234
1235         init_sigpending(&p->pending);
1236
1237         p->utime = p->stime = p->gtime = 0;
1238         p->utimescaled = p->stimescaled = 0;
1239 #ifndef CONFIG_VIRT_CPU_ACCOUNTING_NATIVE
1240         p->prev_cputime.utime = p->prev_cputime.stime = 0;
1241 #endif
1242 #ifdef CONFIG_VIRT_CPU_ACCOUNTING_GEN
1243         seqlock_init(&p->vtime_seqlock);
1244         p->vtime_snap = 0;
1245         p->vtime_snap_whence = VTIME_SLEEPING;
1246 #endif
1247
1248 #if defined(SPLIT_RSS_COUNTING)
1249         memset(&p->rss_stat, 0, sizeof(p->rss_stat));
1250 #endif
1251
1252         p->default_timer_slack_ns = current->timer_slack_ns;
1253
1254         task_io_accounting_init(&p->ioac);
1255         acct_clear_integrals(p);
1256
1257         posix_cpu_timers_init(p);
1258
1259         do_posix_clock_monotonic_gettime(&p->start_time);
1260         p->real_start_time = p->start_time;
1261         monotonic_to_bootbased(&p->real_start_time);
1262         p->io_context = NULL;
1263         p->audit_context = NULL;
1264         if (clone_flags & CLONE_THREAD)
1265                 threadgroup_change_begin(current);
1266         cgroup_fork(p);
1267 #ifdef CONFIG_NUMA
1268         p->mempolicy = mpol_dup(p->mempolicy);
1269         if (IS_ERR(p->mempolicy)) {
1270                 retval = PTR_ERR(p->mempolicy);
1271                 p->mempolicy = NULL;
1272                 goto bad_fork_cleanup_cgroup;
1273         }
1274         mpol_fix_fork_child_flag(p);
1275 #endif
1276 #ifdef CONFIG_CPUSETS
1277         p->cpuset_mem_spread_rotor = NUMA_NO_NODE;
1278         p->cpuset_slab_spread_rotor = NUMA_NO_NODE;
1279         seqcount_init(&p->mems_allowed_seq);
1280 #endif
1281 #ifdef CONFIG_TRACE_IRQFLAGS
1282         p->irq_events = 0;
1283         p->hardirqs_enabled = 0;
1284         p->hardirq_enable_ip = 0;
1285         p->hardirq_enable_event = 0;
1286         p->hardirq_disable_ip = _THIS_IP_;
1287         p->hardirq_disable_event = 0;
1288         p->softirqs_enabled = 1;
1289         p->softirq_enable_ip = _THIS_IP_;
1290         p->softirq_enable_event = 0;
1291         p->softirq_disable_ip = 0;
1292         p->softirq_disable_event = 0;
1293         p->hardirq_context = 0;
1294         p->softirq_context = 0;
1295 #endif
1296 #ifdef CONFIG_LOCKDEP
1297         p->lockdep_depth = 0; /* no locks held yet */
1298         p->curr_chain_key = 0;
1299         p->lockdep_recursion = 0;
1300 #endif
1301
1302 #ifdef CONFIG_DEBUG_MUTEXES
1303         p->blocked_on = NULL; /* not blocked yet */
1304 #endif
1305 #ifdef CONFIG_MEMCG
1306         p->memcg_batch.do_batch = 0;
1307         p->memcg_batch.memcg = NULL;
1308 #endif
1309 #ifdef CONFIG_BCACHE
1310         p->sequential_io        = 0;
1311         p->sequential_io_avg    = 0;
1312 #endif
1313
1314         /* Perform scheduler related setup. Assign this task to a CPU. */
1315         sched_fork(p);
1316
1317         retval = perf_event_init_task(p);
1318         if (retval)
1319                 goto bad_fork_cleanup_policy;
1320         retval = audit_alloc(p);
1321         if (retval)
1322                 goto bad_fork_cleanup_policy;
1323         /* copy all the process information */
1324         retval = copy_semundo(clone_flags, p);
1325         if (retval)
1326                 goto bad_fork_cleanup_audit;
1327         retval = copy_files(clone_flags, p);
1328         if (retval)
1329                 goto bad_fork_cleanup_semundo;
1330         retval = copy_fs(clone_flags, p);
1331         if (retval)
1332                 goto bad_fork_cleanup_files;
1333         retval = copy_sighand(clone_flags, p);
1334         if (retval)
1335                 goto bad_fork_cleanup_fs;
1336         retval = copy_signal(clone_flags, p);
1337         if (retval)
1338                 goto bad_fork_cleanup_sighand;
1339         retval = copy_mm(clone_flags, p);
1340         if (retval)
1341                 goto bad_fork_cleanup_signal;
1342         retval = copy_namespaces(clone_flags, p);
1343         if (retval)
1344                 goto bad_fork_cleanup_mm;
1345         retval = copy_io(clone_flags, p);
1346         if (retval)
1347                 goto bad_fork_cleanup_namespaces;
1348         retval = copy_thread(clone_flags, stack_start, stack_size, p);
1349         if (retval)
1350                 goto bad_fork_cleanup_io;
1351
1352         if (pid != &init_struct_pid) {
1353                 retval = -ENOMEM;
1354                 pid = alloc_pid(p->nsproxy->pid_ns);
1355                 if (!pid)
1356                         goto bad_fork_cleanup_io;
1357         }
1358
1359         p->pid = pid_nr(pid);
1360         p->tgid = p->pid;
1361         if (clone_flags & CLONE_THREAD)
1362                 p->tgid = current->tgid;
1363
1364         p->set_child_tid = (clone_flags & CLONE_CHILD_SETTID) ? child_tidptr : NULL;
1365         /*
1366          * Clear TID on mm_release()?
1367          */
1368         p->clear_child_tid = (clone_flags & CLONE_CHILD_CLEARTID) ? child_tidptr : NULL;
1369 #ifdef CONFIG_BLOCK
1370         p->plug = NULL;
1371 #endif
1372 #ifdef CONFIG_FUTEX
1373         p->robust_list = NULL;
1374 #ifdef CONFIG_COMPAT
1375         p->compat_robust_list = NULL;
1376 #endif
1377         INIT_LIST_HEAD(&p->pi_state_list);
1378         p->pi_state_cache = NULL;
1379 #endif
1380         uprobe_copy_process(p);
1381         /*
1382          * sigaltstack should be cleared when sharing the same VM
1383          */
1384         if ((clone_flags & (CLONE_VM|CLONE_VFORK)) == CLONE_VM)
1385                 p->sas_ss_sp = p->sas_ss_size = 0;
1386
1387         /*
1388          * Syscall tracing and stepping should be turned off in the
1389          * child regardless of CLONE_PTRACE.
1390          */
1391         user_disable_single_step(p);
1392         clear_tsk_thread_flag(p, TIF_SYSCALL_TRACE);
1393 #ifdef TIF_SYSCALL_EMU
1394         clear_tsk_thread_flag(p, TIF_SYSCALL_EMU);
1395 #endif
1396         clear_all_latency_tracing(p);
1397
1398         /* ok, now we should be set up.. */
1399         if (clone_flags & CLONE_THREAD)
1400                 p->exit_signal = -1;
1401         else if (clone_flags & CLONE_PARENT)
1402                 p->exit_signal = current->group_leader->exit_signal;
1403         else
1404                 p->exit_signal = (clone_flags & CSIGNAL);
1405
1406         p->pdeath_signal = 0;
1407         p->exit_state = 0;
1408
1409         p->nr_dirtied = 0;
1410         p->nr_dirtied_pause = 128 >> (PAGE_SHIFT - 10);
1411         p->dirty_paused_when = 0;
1412
1413         /*
1414          * Ok, make it visible to the rest of the system.
1415          * We dont wake it up yet.
1416          */
1417         p->group_leader = p;
1418         INIT_LIST_HEAD(&p->thread_group);
1419         p->task_works = NULL;
1420
1421         /* Need tasklist lock for parent etc handling! */
1422         write_lock_irq(&tasklist_lock);
1423
1424         /* CLONE_PARENT re-uses the old parent */
1425         if (clone_flags & (CLONE_PARENT|CLONE_THREAD)) {
1426                 p->real_parent = current->real_parent;
1427                 p->parent_exec_id = current->parent_exec_id;
1428         } else {
1429                 p->real_parent = current;
1430                 p->parent_exec_id = current->self_exec_id;
1431         }
1432
1433         spin_lock(&current->sighand->siglock);
1434
1435         /*
1436          * Process group and session signals need to be delivered to just the
1437          * parent before the fork or both the parent and the child after the
1438          * fork. Restart if a signal comes in before we add the new process to
1439          * it's process group.
1440          * A fatal signal pending means that current will exit, so the new
1441          * thread can't slip out of an OOM kill (or normal SIGKILL).
1442         */
1443         recalc_sigpending();
1444         if (signal_pending(current)) {
1445                 spin_unlock(&current->sighand->siglock);
1446                 write_unlock_irq(&tasklist_lock);
1447                 retval = -ERESTARTNOINTR;
1448                 goto bad_fork_free_pid;
1449         }
1450
1451         if (clone_flags & CLONE_THREAD) {
1452                 current->signal->nr_threads++;
1453                 atomic_inc(&current->signal->live);
1454                 atomic_inc(&current->signal->sigcnt);
1455                 p->group_leader = current->group_leader;
1456                 list_add_tail_rcu(&p->thread_group, &p->group_leader->thread_group);
1457         }
1458
1459         if (likely(p->pid)) {
1460                 ptrace_init_task(p, (clone_flags & CLONE_PTRACE) || trace);
1461
1462                 if (thread_group_leader(p)) {
1463                         if (is_child_reaper(pid)) {
1464                                 ns_of_pid(pid)->child_reaper = p;
1465                                 p->signal->flags |= SIGNAL_UNKILLABLE;
1466                         }
1467
1468                         p->signal->leader_pid = pid;
1469                         p->signal->tty = tty_kref_get(current->signal->tty);
1470                         attach_pid(p, PIDTYPE_PGID, task_pgrp(current));
1471                         attach_pid(p, PIDTYPE_SID, task_session(current));
1472                         list_add_tail(&p->sibling, &p->real_parent->children);
1473                         list_add_tail_rcu(&p->tasks, &init_task.tasks);
1474                         __this_cpu_inc(process_counts);
1475                 }
1476                 attach_pid(p, PIDTYPE_PID, pid);
1477                 nr_threads++;
1478         }
1479
1480         total_forks++;
1481         spin_unlock(&current->sighand->siglock);
1482         write_unlock_irq(&tasklist_lock);
1483         proc_fork_connector(p);
1484         cgroup_post_fork(p);
1485         if (clone_flags & CLONE_THREAD)
1486                 threadgroup_change_end(current);
1487         perf_event_fork(p);
1488
1489         trace_task_newtask(p, clone_flags);
1490
1491         return p;
1492
1493 bad_fork_free_pid:
1494         if (pid != &init_struct_pid)
1495                 free_pid(pid);
1496 bad_fork_cleanup_io:
1497         if (p->io_context)
1498                 exit_io_context(p);
1499 bad_fork_cleanup_namespaces:
1500         exit_task_namespaces(p);
1501 bad_fork_cleanup_mm:
1502         if (p->mm)
1503                 mmput(p->mm);
1504 bad_fork_cleanup_signal:
1505         if (!(clone_flags & CLONE_THREAD))
1506                 free_signal_struct(p->signal);
1507 bad_fork_cleanup_sighand:
1508         __cleanup_sighand(p->sighand);
1509 bad_fork_cleanup_fs:
1510         exit_fs(p); /* blocking */
1511 bad_fork_cleanup_files:
1512         exit_files(p); /* blocking */
1513 bad_fork_cleanup_semundo:
1514         exit_sem(p);
1515 bad_fork_cleanup_audit:
1516         audit_free(p);
1517 bad_fork_cleanup_policy:
1518         perf_event_free_task(p);
1519 #ifdef CONFIG_NUMA
1520         mpol_put(p->mempolicy);
1521 bad_fork_cleanup_cgroup:
1522 #endif
1523         if (clone_flags & CLONE_THREAD)
1524                 threadgroup_change_end(current);
1525         cgroup_exit(p, 0);
1526         delayacct_tsk_free(p);
1527         module_put(task_thread_info(p)->exec_domain->module);
1528 bad_fork_cleanup_count:
1529         atomic_dec(&p->cred->user->processes);
1530         exit_creds(p);
1531 bad_fork_free:
1532         free_task(p);
1533 fork_out:
1534         return ERR_PTR(retval);
1535 }
1536
1537 static inline void init_idle_pids(struct pid_link *links)
1538 {
1539         enum pid_type type;
1540
1541         for (type = PIDTYPE_PID; type < PIDTYPE_MAX; ++type) {
1542                 INIT_HLIST_NODE(&links[type].node); /* not really needed */
1543                 links[type].pid = &init_struct_pid;
1544         }
1545 }
1546
1547 struct task_struct * __cpuinit fork_idle(int cpu)
1548 {
1549         struct task_struct *task;
1550         task = copy_process(CLONE_VM, 0, 0, NULL, &init_struct_pid, 0);
1551         if (!IS_ERR(task)) {
1552                 init_idle_pids(task->pids);
1553                 init_idle(task, cpu);
1554         }
1555
1556         return task;
1557 }
1558
1559 /*
1560  *  Ok, this is the main fork-routine.
1561  *
1562  * It copies the process, and if successful kick-starts
1563  * it and waits for it to finish using the VM if required.
1564  */
1565 long do_fork(unsigned long clone_flags,
1566               unsigned long stack_start,
1567               unsigned long stack_size,
1568               int __user *parent_tidptr,
1569               int __user *child_tidptr)
1570 {
1571         struct task_struct *p;
1572         int trace = 0;
1573         long nr;
1574
1575         /*
1576          * Do some preliminary argument and permissions checking before we
1577          * actually start allocating stuff
1578          */
1579         if (clone_flags & (CLONE_NEWUSER | CLONE_NEWPID)) {
1580                 if (clone_flags & (CLONE_THREAD|CLONE_PARENT))
1581                         return -EINVAL;
1582         }
1583
1584         /*
1585          * Determine whether and which event to report to ptracer.  When
1586          * called from kernel_thread or CLONE_UNTRACED is explicitly
1587          * requested, no event is reported; otherwise, report if the event
1588          * for the type of forking is enabled.
1589          */
1590         if (!(clone_flags & CLONE_UNTRACED)) {
1591                 if (clone_flags & CLONE_VFORK)
1592                         trace = PTRACE_EVENT_VFORK;
1593                 else if ((clone_flags & CSIGNAL) != SIGCHLD)
1594                         trace = PTRACE_EVENT_CLONE;
1595                 else
1596                         trace = PTRACE_EVENT_FORK;
1597
1598                 if (likely(!ptrace_event_enabled(current, trace)))
1599                         trace = 0;
1600         }
1601
1602         p = copy_process(clone_flags, stack_start, stack_size,
1603                          child_tidptr, NULL, trace);
1604         /*
1605          * Do this prior waking up the new thread - the thread pointer
1606          * might get invalid after that point, if the thread exits quickly.
1607          */
1608         if (!IS_ERR(p)) {
1609                 struct completion vfork;
1610
1611                 trace_sched_process_fork(current, p);
1612
1613                 nr = task_pid_vnr(p);
1614
1615                 if (clone_flags & CLONE_PARENT_SETTID)
1616                         put_user(nr, parent_tidptr);
1617
1618                 if (clone_flags & CLONE_VFORK) {
1619                         p->vfork_done = &vfork;
1620                         init_completion(&vfork);
1621                         get_task_struct(p);
1622                 }
1623
1624                 wake_up_new_task(p);
1625
1626                 /* forking complete and child started to run, tell ptracer */
1627                 if (unlikely(trace))
1628                         ptrace_event(trace, nr);
1629
1630                 if (clone_flags & CLONE_VFORK) {
1631                         if (!wait_for_vfork_done(p, &vfork))
1632                                 ptrace_event(PTRACE_EVENT_VFORK_DONE, nr);
1633                 }
1634         } else {
1635                 nr = PTR_ERR(p);
1636         }
1637         return nr;
1638 }
1639
1640 /*
1641  * Create a kernel thread.
1642  */
1643 pid_t kernel_thread(int (*fn)(void *), void *arg, unsigned long flags)
1644 {
1645         return do_fork(flags|CLONE_VM|CLONE_UNTRACED, (unsigned long)fn,
1646                 (unsigned long)arg, NULL, NULL);
1647 }
1648
1649 #ifdef __ARCH_WANT_SYS_FORK
1650 SYSCALL_DEFINE0(fork)
1651 {
1652 #ifdef CONFIG_MMU
1653         return do_fork(SIGCHLD, 0, 0, NULL, NULL);
1654 #else
1655         /* can not support in nommu mode */
1656         return(-EINVAL);
1657 #endif
1658 }
1659 #endif
1660
1661 #ifdef __ARCH_WANT_SYS_VFORK
1662 SYSCALL_DEFINE0(vfork)
1663 {
1664         return do_fork(CLONE_VFORK | CLONE_VM | SIGCHLD, 0, 
1665                         0, NULL, NULL);
1666 }
1667 #endif
1668
1669 #ifdef __ARCH_WANT_SYS_CLONE
1670 #ifdef CONFIG_CLONE_BACKWARDS
1671 SYSCALL_DEFINE5(clone, unsigned long, clone_flags, unsigned long, newsp,
1672                  int __user *, parent_tidptr,
1673                  int, tls_val,
1674                  int __user *, child_tidptr)
1675 #elif defined(CONFIG_CLONE_BACKWARDS2)
1676 SYSCALL_DEFINE5(clone, unsigned long, newsp, unsigned long, clone_flags,
1677                  int __user *, parent_tidptr,
1678                  int __user *, child_tidptr,
1679                  int, tls_val)
1680 #elif defined(CONFIG_CLONE_BACKWARDS3)
1681 SYSCALL_DEFINE6(clone, unsigned long, clone_flags, unsigned long, newsp,
1682                 int, stack_size,
1683                 int __user *, parent_tidptr,
1684                 int __user *, child_tidptr,
1685                 int, tls_val)
1686 #else
1687 SYSCALL_DEFINE5(clone, unsigned long, clone_flags, unsigned long, newsp,
1688                  int __user *, parent_tidptr,
1689                  int __user *, child_tidptr,
1690                  int, tls_val)
1691 #endif
1692 {
1693         return do_fork(clone_flags, newsp, 0, parent_tidptr, child_tidptr);
1694 }
1695 #endif
1696
1697 #ifndef ARCH_MIN_MMSTRUCT_ALIGN
1698 #define ARCH_MIN_MMSTRUCT_ALIGN 0
1699 #endif
1700
1701 static void sighand_ctor(void *data)
1702 {
1703         struct sighand_struct *sighand = data;
1704
1705         spin_lock_init(&sighand->siglock);
1706         init_waitqueue_head(&sighand->signalfd_wqh);
1707 }
1708
1709 void __init proc_caches_init(void)
1710 {
1711         sighand_cachep = kmem_cache_create("sighand_cache",
1712                         sizeof(struct sighand_struct), 0,
1713                         SLAB_HWCACHE_ALIGN|SLAB_PANIC|SLAB_DESTROY_BY_RCU|
1714                         SLAB_NOTRACK, sighand_ctor);
1715         signal_cachep = kmem_cache_create("signal_cache",
1716                         sizeof(struct signal_struct), 0,
1717                         SLAB_HWCACHE_ALIGN|SLAB_PANIC|SLAB_NOTRACK, NULL);
1718         files_cachep = kmem_cache_create("files_cache",
1719                         sizeof(struct files_struct), 0,
1720                         SLAB_HWCACHE_ALIGN|SLAB_PANIC|SLAB_NOTRACK, NULL);
1721         fs_cachep = kmem_cache_create("fs_cache",
1722                         sizeof(struct fs_struct), 0,
1723                         SLAB_HWCACHE_ALIGN|SLAB_PANIC|SLAB_NOTRACK, NULL);
1724         /*
1725          * FIXME! The "sizeof(struct mm_struct)" currently includes the
1726          * whole struct cpumask for the OFFSTACK case. We could change
1727          * this to *only* allocate as much of it as required by the
1728          * maximum number of CPU's we can ever have.  The cpumask_allocation
1729          * is at the end of the structure, exactly for that reason.
1730          */
1731         mm_cachep = kmem_cache_create("mm_struct",
1732                         sizeof(struct mm_struct), ARCH_MIN_MMSTRUCT_ALIGN,
1733                         SLAB_HWCACHE_ALIGN|SLAB_PANIC|SLAB_NOTRACK, NULL);
1734         vm_area_cachep = KMEM_CACHE(vm_area_struct, SLAB_PANIC);
1735         mmap_init();
1736         nsproxy_cache_init();
1737 }
1738
1739 /*
1740  * Check constraints on flags passed to the unshare system call.
1741  */
1742 static int check_unshare_flags(unsigned long unshare_flags)
1743 {
1744         if (unshare_flags & ~(CLONE_THREAD|CLONE_FS|CLONE_NEWNS|CLONE_SIGHAND|
1745                                 CLONE_VM|CLONE_FILES|CLONE_SYSVSEM|
1746                                 CLONE_NEWUTS|CLONE_NEWIPC|CLONE_NEWNET|
1747                                 CLONE_NEWUSER|CLONE_NEWPID))
1748                 return -EINVAL;
1749         /*
1750          * Not implemented, but pretend it works if there is nothing to
1751          * unshare. Note that unsharing CLONE_THREAD or CLONE_SIGHAND
1752          * needs to unshare vm.
1753          */
1754         if (unshare_flags & (CLONE_THREAD | CLONE_SIGHAND | CLONE_VM)) {
1755                 /* FIXME: get_task_mm() increments ->mm_users */
1756                 if (atomic_read(&current->mm->mm_users) > 1)
1757                         return -EINVAL;
1758         }
1759
1760         return 0;
1761 }
1762
1763 /*
1764  * Unshare the filesystem structure if it is being shared
1765  */
1766 static int unshare_fs(unsigned long unshare_flags, struct fs_struct **new_fsp)
1767 {
1768         struct fs_struct *fs = current->fs;
1769
1770         if (!(unshare_flags & CLONE_FS) || !fs)
1771                 return 0;
1772
1773         /* don't need lock here; in the worst case we'll do useless copy */
1774         if (fs->users == 1)
1775                 return 0;
1776
1777         *new_fsp = copy_fs_struct(fs);
1778         if (!*new_fsp)
1779                 return -ENOMEM;
1780
1781         return 0;
1782 }
1783
1784 /*
1785  * Unshare file descriptor table if it is being shared
1786  */
1787 static int unshare_fd(unsigned long unshare_flags, struct files_struct **new_fdp)
1788 {
1789         struct files_struct *fd = current->files;
1790         int error = 0;
1791
1792         if ((unshare_flags & CLONE_FILES) &&
1793             (fd && atomic_read(&fd->count) > 1)) {
1794                 *new_fdp = dup_fd(fd, &error);
1795                 if (!*new_fdp)
1796                         return error;
1797         }
1798
1799         return 0;
1800 }
1801
1802 /*
1803  * unshare allows a process to 'unshare' part of the process
1804  * context which was originally shared using clone.  copy_*
1805  * functions used by do_fork() cannot be used here directly
1806  * because they modify an inactive task_struct that is being
1807  * constructed. Here we are modifying the current, active,
1808  * task_struct.
1809  */
1810 SYSCALL_DEFINE1(unshare, unsigned long, unshare_flags)
1811 {
1812         struct fs_struct *fs, *new_fs = NULL;
1813         struct files_struct *fd, *new_fd = NULL;
1814         struct cred *new_cred = NULL;
1815         struct nsproxy *new_nsproxy = NULL;
1816         int do_sysvsem = 0;
1817         int err;
1818
1819         /*
1820          * If unsharing a user namespace must also unshare the thread.
1821          */
1822         if (unshare_flags & CLONE_NEWUSER)
1823                 unshare_flags |= CLONE_THREAD | CLONE_FS;
1824         /*
1825          * If unsharing a pid namespace must also unshare the thread.
1826          */
1827         if (unshare_flags & CLONE_NEWPID)
1828                 unshare_flags |= CLONE_THREAD;
1829         /*
1830          * If unsharing a thread from a thread group, must also unshare vm.
1831          */
1832         if (unshare_flags & CLONE_THREAD)
1833                 unshare_flags |= CLONE_VM;
1834         /*
1835          * If unsharing vm, must also unshare signal handlers.
1836          */
1837         if (unshare_flags & CLONE_VM)
1838                 unshare_flags |= CLONE_SIGHAND;
1839         /*
1840          * If unsharing namespace, must also unshare filesystem information.
1841          */
1842         if (unshare_flags & CLONE_NEWNS)
1843                 unshare_flags |= CLONE_FS;
1844
1845         err = check_unshare_flags(unshare_flags);
1846         if (err)
1847                 goto bad_unshare_out;
1848         /*
1849          * CLONE_NEWIPC must also detach from the undolist: after switching
1850          * to a new ipc namespace, the semaphore arrays from the old
1851          * namespace are unreachable.
1852          */
1853         if (unshare_flags & (CLONE_NEWIPC|CLONE_SYSVSEM))
1854                 do_sysvsem = 1;
1855         err = unshare_fs(unshare_flags, &new_fs);
1856         if (err)
1857                 goto bad_unshare_out;
1858         err = unshare_fd(unshare_flags, &new_fd);
1859         if (err)
1860                 goto bad_unshare_cleanup_fs;
1861         err = unshare_userns(unshare_flags, &new_cred);
1862         if (err)
1863                 goto bad_unshare_cleanup_fd;
1864         err = unshare_nsproxy_namespaces(unshare_flags, &new_nsproxy,
1865                                          new_cred, new_fs);
1866         if (err)
1867                 goto bad_unshare_cleanup_cred;
1868
1869         if (new_fs || new_fd || do_sysvsem || new_cred || new_nsproxy) {
1870                 if (do_sysvsem) {
1871                         /*
1872                          * CLONE_SYSVSEM is equivalent to sys_exit().
1873                          */
1874                         exit_sem(current);
1875                 }
1876
1877                 if (new_nsproxy)
1878                         switch_task_namespaces(current, new_nsproxy);
1879
1880                 task_lock(current);
1881
1882                 if (new_fs) {
1883                         fs = current->fs;
1884                         spin_lock(&fs->lock);
1885                         current->fs = new_fs;
1886                         if (--fs->users)
1887                                 new_fs = NULL;
1888                         else
1889                                 new_fs = fs;
1890                         spin_unlock(&fs->lock);
1891                 }
1892
1893                 if (new_fd) {
1894                         fd = current->files;
1895                         current->files = new_fd;
1896                         new_fd = fd;
1897                 }
1898
1899                 task_unlock(current);
1900
1901                 if (new_cred) {
1902                         /* Install the new user namespace */
1903                         commit_creds(new_cred);
1904                         new_cred = NULL;
1905                 }
1906         }
1907
1908 bad_unshare_cleanup_cred:
1909         if (new_cred)
1910                 put_cred(new_cred);
1911 bad_unshare_cleanup_fd:
1912         if (new_fd)
1913                 put_files_struct(new_fd);
1914
1915 bad_unshare_cleanup_fs:
1916         if (new_fs)
1917                 free_fs_struct(new_fs);
1918
1919 bad_unshare_out:
1920         return err;
1921 }
1922
1923 /*
1924  *      Helper to unshare the files of the current task.
1925  *      We don't want to expose copy_files internals to
1926  *      the exec layer of the kernel.
1927  */
1928
1929 int unshare_files(struct files_struct **displaced)
1930 {
1931         struct task_struct *task = current;
1932         struct files_struct *copy = NULL;
1933         int error;
1934
1935         error = unshare_fd(CLONE_FILES, &copy);
1936         if (error || !copy) {
1937                 *displaced = NULL;
1938                 return error;
1939         }
1940         *displaced = task->files;
1941         task_lock(task);
1942         task->files = copy;
1943         task_unlock(task);
1944         return 0;
1945 }