Version:  2.6.34 2.6.35 2.6.36 2.6.37 2.6.38 2.6.39 3.0 3.1 3.2 3.3 3.4 3.5 3.6 3.7 3.8 3.9 3.10 3.11 3.12 3.13 3.14

Linux/kernel/fork.c

  1 /*
  2  *  linux/kernel/fork.c
  3  *
  4  *  Copyright (C) 1991, 1992  Linus Torvalds
  5  */
  6 
  7 /*
  8  *  'fork.c' contains the help-routines for the 'fork' system call
  9  * (see also entry.S and others).
 10  * Fork is rather simple, once you get the hang of it, but the memory
 11  * management can be a bitch. See 'mm/memory.c': 'copy_page_range()'
 12  */
 13 
 14 #include <linux/slab.h>
 15 #include <linux/init.h>
 16 #include <linux/unistd.h>
 17 #include <linux/module.h>
 18 #include <linux/vmalloc.h>
 19 #include <linux/completion.h>
 20 #include <linux/personality.h>
 21 #include <linux/mempolicy.h>
 22 #include <linux/sem.h>
 23 #include <linux/file.h>
 24 #include <linux/fdtable.h>
 25 #include <linux/iocontext.h>
 26 #include <linux/key.h>
 27 #include <linux/binfmts.h>
 28 #include <linux/mman.h>
 29 #include <linux/mmu_notifier.h>
 30 #include <linux/fs.h>
 31 #include <linux/nsproxy.h>
 32 #include <linux/capability.h>
 33 #include <linux/cpu.h>
 34 #include <linux/cgroup.h>
 35 #include <linux/security.h>
 36 #include <linux/hugetlb.h>
 37 #include <linux/seccomp.h>
 38 #include <linux/swap.h>
 39 #include <linux/syscalls.h>
 40 #include <linux/jiffies.h>
 41 #include <linux/futex.h>
 42 #include <linux/compat.h>
 43 #include <linux/kthread.h>
 44 #include <linux/task_io_accounting_ops.h>
 45 #include <linux/rcupdate.h>
 46 #include <linux/ptrace.h>
 47 #include <linux/mount.h>
 48 #include <linux/audit.h>
 49 #include <linux/memcontrol.h>
 50 #include <linux/ftrace.h>
 51 #include <linux/proc_fs.h>
 52 #include <linux/profile.h>
 53 #include <linux/rmap.h>
 54 #include <linux/ksm.h>
 55 #include <linux/acct.h>
 56 #include <linux/tsacct_kern.h>
 57 #include <linux/cn_proc.h>
 58 #include <linux/freezer.h>
 59 #include <linux/delayacct.h>
 60 #include <linux/taskstats_kern.h>
 61 #include <linux/random.h>
 62 #include <linux/tty.h>
 63 #include <linux/blkdev.h>
 64 #include <linux/fs_struct.h>
 65 #include <linux/magic.h>
 66 #include <linux/perf_event.h>
 67 #include <linux/posix-timers.h>
 68 #include <linux/user-return-notifier.h>
 69 #include <linux/oom.h>
 70 #include <linux/khugepaged.h>
 71 #include <linux/signalfd.h>
 72 #include <linux/uprobes.h>
 73 #include <linux/aio.h>
 74 
 75 #include <asm/pgtable.h>
 76 #include <asm/pgalloc.h>
 77 #include <asm/uaccess.h>
 78 #include <asm/mmu_context.h>
 79 #include <asm/cacheflush.h>
 80 #include <asm/tlbflush.h>
 81 
 82 #include <trace/events/sched.h>
 83 
 84 #define CREATE_TRACE_POINTS
 85 #include <trace/events/task.h>
 86 
 87 /*
 88  * Protected counters by write_lock_irq(&tasklist_lock)
 89  */
 90 unsigned long total_forks;      /* Handle normal Linux uptimes. */
 91 int nr_threads;                 /* The idle threads do not count.. */
 92 
 93 int max_threads;                /* tunable limit on nr_threads */
 94 
 95 DEFINE_PER_CPU(unsigned long, process_counts) = 0;
 96 
 97 __cacheline_aligned DEFINE_RWLOCK(tasklist_lock);  /* outer */
 98 
 99 #ifdef CONFIG_PROVE_RCU
100 int lockdep_tasklist_lock_is_held(void)
101 {
102         return lockdep_is_held(&tasklist_lock);
103 }
104 EXPORT_SYMBOL_GPL(lockdep_tasklist_lock_is_held);
105 #endif /* #ifdef CONFIG_PROVE_RCU */
106 
107 int nr_processes(void)
108 {
109         int cpu;
110         int total = 0;
111 
112         for_each_possible_cpu(cpu)
113                 total += per_cpu(process_counts, cpu);
114 
115         return total;
116 }
117 
118 void __weak arch_release_task_struct(struct task_struct *tsk)
119 {
120 }
121 
122 #ifndef CONFIG_ARCH_TASK_STRUCT_ALLOCATOR
123 static struct kmem_cache *task_struct_cachep;
124 
125 static inline struct task_struct *alloc_task_struct_node(int node)
126 {
127         return kmem_cache_alloc_node(task_struct_cachep, GFP_KERNEL, node);
128 }
129 
130 static inline void free_task_struct(struct task_struct *tsk)
131 {
132         kmem_cache_free(task_struct_cachep, tsk);
133 }
134 #endif
135 
136 void __weak arch_release_thread_info(struct thread_info *ti)
137 {
138 }
139 
140 #ifndef CONFIG_ARCH_THREAD_INFO_ALLOCATOR
141 
142 /*
143  * Allocate pages if THREAD_SIZE is >= PAGE_SIZE, otherwise use a
144  * kmemcache based allocator.
145  */
146 # if THREAD_SIZE >= PAGE_SIZE
147 static struct thread_info *alloc_thread_info_node(struct task_struct *tsk,
148                                                   int node)
149 {
150         struct page *page = alloc_pages_node(node, THREADINFO_GFP_ACCOUNTED,
151                                              THREAD_SIZE_ORDER);
152 
153         return page ? page_address(page) : NULL;
154 }
155 
156 static inline void free_thread_info(struct thread_info *ti)
157 {
158         free_memcg_kmem_pages((unsigned long)ti, THREAD_SIZE_ORDER);
159 }
160 # else
161 static struct kmem_cache *thread_info_cache;
162 
163 static struct thread_info *alloc_thread_info_node(struct task_struct *tsk,
164                                                   int node)
165 {
166         return kmem_cache_alloc_node(thread_info_cache, THREADINFO_GFP, node);
167 }
168 
169 static void free_thread_info(struct thread_info *ti)
170 {
171         kmem_cache_free(thread_info_cache, ti);
172 }
173 
174 void thread_info_cache_init(void)
175 {
176         thread_info_cache = kmem_cache_create("thread_info", THREAD_SIZE,
177                                               THREAD_SIZE, 0, NULL);
178         BUG_ON(thread_info_cache == NULL);
179 }
180 # endif
181 #endif
182 
183 /* SLAB cache for signal_struct structures (tsk->signal) */
184 static struct kmem_cache *signal_cachep;
185 
186 /* SLAB cache for sighand_struct structures (tsk->sighand) */
187 struct kmem_cache *sighand_cachep;
188 
189 /* SLAB cache for files_struct structures (tsk->files) */
190 struct kmem_cache *files_cachep;
191 
192 /* SLAB cache for fs_struct structures (tsk->fs) */
193 struct kmem_cache *fs_cachep;
194 
195 /* SLAB cache for vm_area_struct structures */
196 struct kmem_cache *vm_area_cachep;
197 
198 /* SLAB cache for mm_struct structures (tsk->mm) */
199 static struct kmem_cache *mm_cachep;
200 
201 static void account_kernel_stack(struct thread_info *ti, int account)
202 {
203         struct zone *zone = page_zone(virt_to_page(ti));
204 
205         mod_zone_page_state(zone, NR_KERNEL_STACK, account);
206 }
207 
208 void free_task(struct task_struct *tsk)
209 {
210         account_kernel_stack(tsk->stack, -1);
211         arch_release_thread_info(tsk->stack);
212         free_thread_info(tsk->stack);
213         rt_mutex_debug_task_free(tsk);
214         ftrace_graph_exit_task(tsk);
215         put_seccomp_filter(tsk);
216         arch_release_task_struct(tsk);
217         free_task_struct(tsk);
218 }
219 EXPORT_SYMBOL(free_task);
220 
221 static inline void free_signal_struct(struct signal_struct *sig)
222 {
223         taskstats_tgid_free(sig);
224         sched_autogroup_exit(sig);
225         kmem_cache_free(signal_cachep, sig);
226 }
227 
228 static inline void put_signal_struct(struct signal_struct *sig)
229 {
230         if (atomic_dec_and_test(&sig->sigcnt))
231                 free_signal_struct(sig);
232 }
233 
234 void __put_task_struct(struct task_struct *tsk)
235 {
236         WARN_ON(!tsk->exit_state);
237         WARN_ON(atomic_read(&tsk->usage));
238         WARN_ON(tsk == current);
239 
240         security_task_free(tsk);
241         exit_creds(tsk);
242         delayacct_tsk_free(tsk);
243         put_signal_struct(tsk->signal);
244 
245         if (!profile_handoff_task(tsk))
246                 free_task(tsk);
247 }
248 EXPORT_SYMBOL_GPL(__put_task_struct);
249 
250 void __init __weak arch_task_cache_init(void) { }
251 
252 void __init fork_init(unsigned long mempages)
253 {
254 #ifndef CONFIG_ARCH_TASK_STRUCT_ALLOCATOR
255 #ifndef ARCH_MIN_TASKALIGN
256 #define ARCH_MIN_TASKALIGN      L1_CACHE_BYTES
257 #endif
258         /* create a slab on which task_structs can be allocated */
259         task_struct_cachep =
260                 kmem_cache_create("task_struct", sizeof(struct task_struct),
261                         ARCH_MIN_TASKALIGN, SLAB_PANIC | SLAB_NOTRACK, NULL);
262 #endif
263 
264         /* do the arch specific task caches init */
265         arch_task_cache_init();
266 
267         /*
268          * The default maximum number of threads is set to a safe
269          * value: the thread structures can take up at most half
270          * of memory.
271          */
272         max_threads = mempages / (8 * THREAD_SIZE / PAGE_SIZE);
273 
274         /*
275          * we need to allow at least 20 threads to boot a system
276          */
277         if (max_threads < 20)
278                 max_threads = 20;
279 
280         init_task.signal->rlim[RLIMIT_NPROC].rlim_cur = max_threads/2;
281         init_task.signal->rlim[RLIMIT_NPROC].rlim_max = max_threads/2;
282         init_task.signal->rlim[RLIMIT_SIGPENDING] =
283                 init_task.signal->rlim[RLIMIT_NPROC];
284 }
285 
286 int __attribute__((weak)) arch_dup_task_struct(struct task_struct *dst,
287                                                struct task_struct *src)
288 {
289         *dst = *src;
290         return 0;
291 }
292 
293 static struct task_struct *dup_task_struct(struct task_struct *orig)
294 {
295         struct task_struct *tsk;
296         struct thread_info *ti;
297         unsigned long *stackend;
298         int node = tsk_fork_get_node(orig);
299         int err;
300 
301         tsk = alloc_task_struct_node(node);
302         if (!tsk)
303                 return NULL;
304 
305         ti = alloc_thread_info_node(tsk, node);
306         if (!ti)
307                 goto free_tsk;
308 
309         err = arch_dup_task_struct(tsk, orig);
310         if (err)
311                 goto free_ti;
312 
313         tsk->stack = ti;
314 
315         setup_thread_stack(tsk, orig);
316         clear_user_return_notifier(tsk);
317         clear_tsk_need_resched(tsk);
318         stackend = end_of_stack(tsk);
319         *stackend = STACK_END_MAGIC;    /* for overflow detection */
320 
321 #ifdef CONFIG_CC_STACKPROTECTOR
322         tsk->stack_canary = get_random_int();
323 #endif
324 
325         /*
326          * One for us, one for whoever does the "release_task()" (usually
327          * parent)
328          */
329         atomic_set(&tsk->usage, 2);
330 #ifdef CONFIG_BLK_DEV_IO_TRACE
331         tsk->btrace_seq = 0;
332 #endif
333         tsk->splice_pipe = NULL;
334         tsk->task_frag.page = NULL;
335 
336         account_kernel_stack(ti, 1);
337 
338         return tsk;
339 
340 free_ti:
341         free_thread_info(ti);
342 free_tsk:
343         free_task_struct(tsk);
344         return NULL;
345 }
346 
347 #ifdef CONFIG_MMU
348 static int dup_mmap(struct mm_struct *mm, struct mm_struct *oldmm)
349 {
350         struct vm_area_struct *mpnt, *tmp, *prev, **pprev;
351         struct rb_node **rb_link, *rb_parent;
352         int retval;
353         unsigned long charge;
354 
355         uprobe_start_dup_mmap();
356         down_write(&oldmm->mmap_sem);
357         flush_cache_dup_mm(oldmm);
358         uprobe_dup_mmap(oldmm, mm);
359         /*
360          * Not linked in yet - no deadlock potential:
361          */
362         down_write_nested(&mm->mmap_sem, SINGLE_DEPTH_NESTING);
363 
364         mm->locked_vm = 0;
365         mm->mmap = NULL;
366         mm->mmap_cache = NULL;
367         mm->map_count = 0;
368         cpumask_clear(mm_cpumask(mm));
369         mm->mm_rb = RB_ROOT;
370         rb_link = &mm->mm_rb.rb_node;
371         rb_parent = NULL;
372         pprev = &mm->mmap;
373         retval = ksm_fork(mm, oldmm);
374         if (retval)
375                 goto out;
376         retval = khugepaged_fork(mm, oldmm);
377         if (retval)
378                 goto out;
379 
380         prev = NULL;
381         for (mpnt = oldmm->mmap; mpnt; mpnt = mpnt->vm_next) {
382                 struct file *file;
383 
384                 if (mpnt->vm_flags & VM_DONTCOPY) {
385                         vm_stat_account(mm, mpnt->vm_flags, mpnt->vm_file,
386                                                         -vma_pages(mpnt));
387                         continue;
388                 }
389                 charge = 0;
390                 if (mpnt->vm_flags & VM_ACCOUNT) {
391                         unsigned long len = vma_pages(mpnt);
392 
393                         if (security_vm_enough_memory_mm(oldmm, len)) /* sic */
394                                 goto fail_nomem;
395                         charge = len;
396                 }
397                 tmp = kmem_cache_alloc(vm_area_cachep, GFP_KERNEL);
398                 if (!tmp)
399                         goto fail_nomem;
400                 *tmp = *mpnt;
401                 INIT_LIST_HEAD(&tmp->anon_vma_chain);
402                 retval = vma_dup_policy(mpnt, tmp);
403                 if (retval)
404                         goto fail_nomem_policy;
405                 tmp->vm_mm = mm;
406                 if (anon_vma_fork(tmp, mpnt))
407                         goto fail_nomem_anon_vma_fork;
408                 tmp->vm_flags &= ~VM_LOCKED;
409                 tmp->vm_next = tmp->vm_prev = NULL;
410                 file = tmp->vm_file;
411                 if (file) {
412                         struct inode *inode = file_inode(file);
413                         struct address_space *mapping = file->f_mapping;
414 
415                         get_file(file);
416                         if (tmp->vm_flags & VM_DENYWRITE)
417                                 atomic_dec(&inode->i_writecount);
418                         mutex_lock(&mapping->i_mmap_mutex);
419                         if (tmp->vm_flags & VM_SHARED)
420                                 mapping->i_mmap_writable++;
421                         flush_dcache_mmap_lock(mapping);
422                         /* insert tmp into the share list, just after mpnt */
423                         if (unlikely(tmp->vm_flags & VM_NONLINEAR))
424                                 vma_nonlinear_insert(tmp,
425                                                 &mapping->i_mmap_nonlinear);
426                         else
427                                 vma_interval_tree_insert_after(tmp, mpnt,
428                                                         &mapping->i_mmap);
429                         flush_dcache_mmap_unlock(mapping);
430                         mutex_unlock(&mapping->i_mmap_mutex);
431                 }
432 
433                 /*
434                  * Clear hugetlb-related page reserves for children. This only
435                  * affects MAP_PRIVATE mappings. Faults generated by the child
436                  * are not guaranteed to succeed, even if read-only
437                  */
438                 if (is_vm_hugetlb_page(tmp))
439                         reset_vma_resv_huge_pages(tmp);
440 
441                 /*
442                  * Link in the new vma and copy the page table entries.
443                  */
444                 *pprev = tmp;
445                 pprev = &tmp->vm_next;
446                 tmp->vm_prev = prev;
447                 prev = tmp;
448 
449                 __vma_link_rb(mm, tmp, rb_link, rb_parent);
450                 rb_link = &tmp->vm_rb.rb_right;
451                 rb_parent = &tmp->vm_rb;
452 
453                 mm->map_count++;
454                 retval = copy_page_range(mm, oldmm, mpnt);
455 
456                 if (tmp->vm_ops && tmp->vm_ops->open)
457                         tmp->vm_ops->open(tmp);
458 
459                 if (retval)
460                         goto out;
461         }
462         /* a new mm has just been created */
463         arch_dup_mmap(oldmm, mm);
464         retval = 0;
465 out:
466         up_write(&mm->mmap_sem);
467         flush_tlb_mm(oldmm);
468         up_write(&oldmm->mmap_sem);
469         uprobe_end_dup_mmap();
470         return retval;
471 fail_nomem_anon_vma_fork:
472         mpol_put(vma_policy(tmp));
473 fail_nomem_policy:
474         kmem_cache_free(vm_area_cachep, tmp);
475 fail_nomem:
476         retval = -ENOMEM;
477         vm_unacct_memory(charge);
478         goto out;
479 }
480 
481 static inline int mm_alloc_pgd(struct mm_struct *mm)
482 {
483         mm->pgd = pgd_alloc(mm);
484         if (unlikely(!mm->pgd))
485                 return -ENOMEM;
486         return 0;
487 }
488 
489 static inline void mm_free_pgd(struct mm_struct *mm)
490 {
491         pgd_free(mm, mm->pgd);
492 }
493 #else
494 #define dup_mmap(mm, oldmm)     (0)
495 #define mm_alloc_pgd(mm)        (0)
496 #define mm_free_pgd(mm)
497 #endif /* CONFIG_MMU */
498 
499 __cacheline_aligned_in_smp DEFINE_SPINLOCK(mmlist_lock);
500 
501 #define allocate_mm()   (kmem_cache_alloc(mm_cachep, GFP_KERNEL))
502 #define free_mm(mm)     (kmem_cache_free(mm_cachep, (mm)))
503 
504 static unsigned long default_dump_filter = MMF_DUMP_FILTER_DEFAULT;
505 
506 static int __init coredump_filter_setup(char *s)
507 {
508         default_dump_filter =
509                 (simple_strtoul(s, NULL, 0) << MMF_DUMP_FILTER_SHIFT) &
510                 MMF_DUMP_FILTER_MASK;
511         return 1;
512 }
513 
514 __setup("coredump_filter=", coredump_filter_setup);
515 
516 #include <linux/init_task.h>
517 
518 static void mm_init_aio(struct mm_struct *mm)
519 {
520 #ifdef CONFIG_AIO
521         spin_lock_init(&mm->ioctx_lock);
522         mm->ioctx_table = NULL;
523 #endif
524 }
525 
526 static struct mm_struct *mm_init(struct mm_struct *mm, struct task_struct *p)
527 {
528         atomic_set(&mm->mm_users, 1);
529         atomic_set(&mm->mm_count, 1);
530         init_rwsem(&mm->mmap_sem);
531         INIT_LIST_HEAD(&mm->mmlist);
532         mm->flags = (current->mm) ?
533                 (current->mm->flags & MMF_INIT_MASK) : default_dump_filter;
534         mm->core_state = NULL;
535         atomic_long_set(&mm->nr_ptes, 0);
536         memset(&mm->rss_stat, 0, sizeof(mm->rss_stat));
537         spin_lock_init(&mm->page_table_lock);
538         mm_init_aio(mm);
539         mm_init_owner(mm, p);
540         clear_tlb_flush_pending(mm);
541 
542         if (likely(!mm_alloc_pgd(mm))) {
543                 mm->def_flags = 0;
544                 mmu_notifier_mm_init(mm);
545                 return mm;
546         }
547 
548         free_mm(mm);
549         return NULL;
550 }
551 
552 static void check_mm(struct mm_struct *mm)
553 {
554         int i;
555 
556         for (i = 0; i < NR_MM_COUNTERS; i++) {
557                 long x = atomic_long_read(&mm->rss_stat.count[i]);
558 
559                 if (unlikely(x))
560                         printk(KERN_ALERT "BUG: Bad rss-counter state "
561                                           "mm:%p idx:%d val:%ld\n", mm, i, x);
562         }
563 
564 #if defined(CONFIG_TRANSPARENT_HUGEPAGE) && !USE_SPLIT_PMD_PTLOCKS
565         VM_BUG_ON(mm->pmd_huge_pte);
566 #endif
567 }
568 
569 /*
570  * Allocate and initialize an mm_struct.
571  */
572 struct mm_struct *mm_alloc(void)
573 {
574         struct mm_struct *mm;
575 
576         mm = allocate_mm();
577         if (!mm)
578                 return NULL;
579 
580         memset(mm, 0, sizeof(*mm));
581         mm_init_cpumask(mm);
582         return mm_init(mm, current);
583 }
584 
585 /*
586  * Called when the last reference to the mm
587  * is dropped: either by a lazy thread or by
588  * mmput. Free the page directory and the mm.
589  */
590 void __mmdrop(struct mm_struct *mm)
591 {
592         BUG_ON(mm == &init_mm);
593         mm_free_pgd(mm);
594         destroy_context(mm);
595         mmu_notifier_mm_destroy(mm);
596         check_mm(mm);
597         free_mm(mm);
598 }
599 EXPORT_SYMBOL_GPL(__mmdrop);
600 
601 /*
602  * Decrement the use count and release all resources for an mm.
603  */
604 void mmput(struct mm_struct *mm)
605 {
606         might_sleep();
607 
608         if (atomic_dec_and_test(&mm->mm_users)) {
609                 uprobe_clear_state(mm);
610                 exit_aio(mm);
611                 ksm_exit(mm);
612                 khugepaged_exit(mm); /* must run before exit_mmap */
613                 exit_mmap(mm);
614                 set_mm_exe_file(mm, NULL);
615                 if (!list_empty(&mm->mmlist)) {
616                         spin_lock(&mmlist_lock);
617                         list_del(&mm->mmlist);
618                         spin_unlock(&mmlist_lock);
619                 }
620                 if (mm->binfmt)
621                         module_put(mm->binfmt->module);
622                 mmdrop(mm);
623         }
624 }
625 EXPORT_SYMBOL_GPL(mmput);
626 
627 void set_mm_exe_file(struct mm_struct *mm, struct file *new_exe_file)
628 {
629         if (new_exe_file)
630                 get_file(new_exe_file);
631         if (mm->exe_file)
632                 fput(mm->exe_file);
633         mm->exe_file = new_exe_file;
634 }
635 
636 struct file *get_mm_exe_file(struct mm_struct *mm)
637 {
638         struct file *exe_file;
639 
640         /* We need mmap_sem to protect against races with removal of exe_file */
641         down_read(&mm->mmap_sem);
642         exe_file = mm->exe_file;
643         if (exe_file)
644                 get_file(exe_file);
645         up_read(&mm->mmap_sem);
646         return exe_file;
647 }
648 
649 static void dup_mm_exe_file(struct mm_struct *oldmm, struct mm_struct *newmm)
650 {
651         /* It's safe to write the exe_file pointer without exe_file_lock because
652          * this is called during fork when the task is not yet in /proc */
653         newmm->exe_file = get_mm_exe_file(oldmm);
654 }
655 
656 /**
657  * get_task_mm - acquire a reference to the task's mm
658  *
659  * Returns %NULL if the task has no mm.  Checks PF_KTHREAD (meaning
660  * this kernel workthread has transiently adopted a user mm with use_mm,
661  * to do its AIO) is not set and if so returns a reference to it, after
662  * bumping up the use count.  User must release the mm via mmput()
663  * after use.  Typically used by /proc and ptrace.
664  */
665 struct mm_struct *get_task_mm(struct task_struct *task)
666 {
667         struct mm_struct *mm;
668 
669         task_lock(task);
670         mm = task->mm;
671         if (mm) {
672                 if (task->flags & PF_KTHREAD)
673                         mm = NULL;
674                 else
675                         atomic_inc(&mm->mm_users);
676         }
677         task_unlock(task);
678         return mm;
679 }
680 EXPORT_SYMBOL_GPL(get_task_mm);
681 
682 struct mm_struct *mm_access(struct task_struct *task, unsigned int mode)
683 {
684         struct mm_struct *mm;
685         int err;
686 
687         err =  mutex_lock_killable(&task->signal->cred_guard_mutex);
688         if (err)
689                 return ERR_PTR(err);
690 
691         mm = get_task_mm(task);
692         if (mm && mm != current->mm &&
693                         !ptrace_may_access(task, mode)) {
694                 mmput(mm);
695                 mm = ERR_PTR(-EACCES);
696         }
697         mutex_unlock(&task->signal->cred_guard_mutex);
698 
699         return mm;
700 }
701 
702 static void complete_vfork_done(struct task_struct *tsk)
703 {
704         struct completion *vfork;
705 
706         task_lock(tsk);
707         vfork = tsk->vfork_done;
708         if (likely(vfork)) {
709                 tsk->vfork_done = NULL;
710                 complete(vfork);
711         }
712         task_unlock(tsk);
713 }
714 
715 static int wait_for_vfork_done(struct task_struct *child,
716                                 struct completion *vfork)
717 {
718         int killed;
719 
720         freezer_do_not_count();
721         killed = wait_for_completion_killable(vfork);
722         freezer_count();
723 
724         if (killed) {
725                 task_lock(child);
726                 child->vfork_done = NULL;
727                 task_unlock(child);
728         }
729 
730         put_task_struct(child);
731         return killed;
732 }
733 
734 /* Please note the differences between mmput and mm_release.
735  * mmput is called whenever we stop holding onto a mm_struct,
736  * error success whatever.
737  *
738  * mm_release is called after a mm_struct has been removed
739  * from the current process.
740  *
741  * This difference is important for error handling, when we
742  * only half set up a mm_struct for a new process and need to restore
743  * the old one.  Because we mmput the new mm_struct before
744  * restoring the old one. . .
745  * Eric Biederman 10 January 1998
746  */
747 void mm_release(struct task_struct *tsk, struct mm_struct *mm)
748 {
749         /* Get rid of any futexes when releasing the mm */
750 #ifdef CONFIG_FUTEX
751         if (unlikely(tsk->robust_list)) {
752                 exit_robust_list(tsk);
753                 tsk->robust_list = NULL;
754         }
755 #ifdef CONFIG_COMPAT
756         if (unlikely(tsk->compat_robust_list)) {
757                 compat_exit_robust_list(tsk);
758                 tsk->compat_robust_list = NULL;
759         }
760 #endif
761         if (unlikely(!list_empty(&tsk->pi_state_list)))
762                 exit_pi_state_list(tsk);
763 #endif
764 
765         uprobe_free_utask(tsk);
766 
767         /* Get rid of any cached register state */
768         deactivate_mm(tsk, mm);
769 
770         /*
771          * If we're exiting normally, clear a user-space tid field if
772          * requested.  We leave this alone when dying by signal, to leave
773          * the value intact in a core dump, and to save the unnecessary
774          * trouble, say, a killed vfork parent shouldn't touch this mm.
775          * Userland only wants this done for a sys_exit.
776          */
777         if (tsk->clear_child_tid) {
778                 if (!(tsk->flags & PF_SIGNALED) &&
779                     atomic_read(&mm->mm_users) > 1) {
780                         /*
781                          * We don't check the error code - if userspace has
782                          * not set up a proper pointer then tough luck.
783                          */
784                         put_user(0, tsk->clear_child_tid);
785                         sys_futex(tsk->clear_child_tid, FUTEX_WAKE,
786                                         1, NULL, NULL, 0);
787                 }
788                 tsk->clear_child_tid = NULL;
789         }
790 
791         /*
792          * All done, finally we can wake up parent and return this mm to him.
793          * Also kthread_stop() uses this completion for synchronization.
794          */
795         if (tsk->vfork_done)
796                 complete_vfork_done(tsk);
797 }
798 
799 /*
800  * Allocate a new mm structure and copy contents from the
801  * mm structure of the passed in task structure.
802  */
803 static struct mm_struct *dup_mm(struct task_struct *tsk)
804 {
805         struct mm_struct *mm, *oldmm = current->mm;
806         int err;
807 
808         mm = allocate_mm();
809         if (!mm)
810                 goto fail_nomem;
811 
812         memcpy(mm, oldmm, sizeof(*mm));
813         mm_init_cpumask(mm);
814 
815 #if defined(CONFIG_TRANSPARENT_HUGEPAGE) && !USE_SPLIT_PMD_PTLOCKS
816         mm->pmd_huge_pte = NULL;
817 #endif
818         if (!mm_init(mm, tsk))
819                 goto fail_nomem;
820 
821         if (init_new_context(tsk, mm))
822                 goto fail_nocontext;
823 
824         dup_mm_exe_file(oldmm, mm);
825 
826         err = dup_mmap(mm, oldmm);
827         if (err)
828                 goto free_pt;
829 
830         mm->hiwater_rss = get_mm_rss(mm);
831         mm->hiwater_vm = mm->total_vm;
832 
833         if (mm->binfmt && !try_module_get(mm->binfmt->module))
834                 goto free_pt;
835 
836         return mm;
837 
838 free_pt:
839         /* don't put binfmt in mmput, we haven't got module yet */
840         mm->binfmt = NULL;
841         mmput(mm);
842 
843 fail_nomem:
844         return NULL;
845 
846 fail_nocontext:
847         /*
848          * If init_new_context() failed, we cannot use mmput() to free the mm
849          * because it calls destroy_context()
850          */
851         mm_free_pgd(mm);
852         free_mm(mm);
853         return NULL;
854 }
855 
856 static int copy_mm(unsigned long clone_flags, struct task_struct *tsk)
857 {
858         struct mm_struct *mm, *oldmm;
859         int retval;
860 
861         tsk->min_flt = tsk->maj_flt = 0;
862         tsk->nvcsw = tsk->nivcsw = 0;
863 #ifdef CONFIG_DETECT_HUNG_TASK
864         tsk->last_switch_count = tsk->nvcsw + tsk->nivcsw;
865 #endif
866 
867         tsk->mm = NULL;
868         tsk->active_mm = NULL;
869 
870         /*
871          * Are we cloning a kernel thread?
872          *
873          * We need to steal a active VM for that..
874          */
875         oldmm = current->mm;
876         if (!oldmm)
877                 return 0;
878 
879         if (clone_flags & CLONE_VM) {
880                 atomic_inc(&oldmm->mm_users);
881                 mm = oldmm;
882                 goto good_mm;
883         }
884 
885         retval = -ENOMEM;
886         mm = dup_mm(tsk);
887         if (!mm)
888                 goto fail_nomem;
889 
890 good_mm:
891         tsk->mm = mm;
892         tsk->active_mm = mm;
893         return 0;
894 
895 fail_nomem:
896         return retval;
897 }
898 
899 static int copy_fs(unsigned long clone_flags, struct task_struct *tsk)
900 {
901         struct fs_struct *fs = current->fs;
902         if (clone_flags & CLONE_FS) {
903                 /* tsk->fs is already what we want */
904                 spin_lock(&fs->lock);
905                 if (fs->in_exec) {
906                         spin_unlock(&fs->lock);
907                         return -EAGAIN;
908                 }
909                 fs->users++;
910                 spin_unlock(&fs->lock);
911                 return 0;
912         }
913         tsk->fs = copy_fs_struct(fs);
914         if (!tsk->fs)
915                 return -ENOMEM;
916         return 0;
917 }
918 
919 static int copy_files(unsigned long clone_flags, struct task_struct *tsk)
920 {
921         struct files_struct *oldf, *newf;
922         int error = 0;
923 
924         /*
925          * A background process may not have any files ...
926          */
927         oldf = current->files;
928         if (!oldf)
929                 goto out;
930 
931         if (clone_flags & CLONE_FILES) {
932                 atomic_inc(&oldf->count);
933                 goto out;
934         }
935 
936         newf = dup_fd(oldf, &error);
937         if (!newf)
938                 goto out;
939 
940         tsk->files = newf;
941         error = 0;
942 out:
943         return error;
944 }
945 
946 static int copy_io(unsigned long clone_flags, struct task_struct *tsk)
947 {
948 #ifdef CONFIG_BLOCK
949         struct io_context *ioc = current->io_context;
950         struct io_context *new_ioc;
951 
952         if (!ioc)
953                 return 0;
954         /*
955          * Share io context with parent, if CLONE_IO is set
956          */
957         if (clone_flags & CLONE_IO) {
958                 ioc_task_link(ioc);
959                 tsk->io_context = ioc;
960         } else if (ioprio_valid(ioc->ioprio)) {
961                 new_ioc = get_task_io_context(tsk, GFP_KERNEL, NUMA_NO_NODE);
962                 if (unlikely(!new_ioc))
963                         return -ENOMEM;
964 
965                 new_ioc->ioprio = ioc->ioprio;
966                 put_io_context(new_ioc);
967         }
968 #endif
969         return 0;
970 }
971 
972 static int copy_sighand(unsigned long clone_flags, struct task_struct *tsk)
973 {
974         struct sighand_struct *sig;
975 
976         if (clone_flags & CLONE_SIGHAND) {
977                 atomic_inc(&current->sighand->count);
978                 return 0;
979         }
980         sig = kmem_cache_alloc(sighand_cachep, GFP_KERNEL);
981         rcu_assign_pointer(tsk->sighand, sig);
982         if (!sig)
983                 return -ENOMEM;
984         atomic_set(&sig->count, 1);
985         memcpy(sig->action, current->sighand->action, sizeof(sig->action));
986         return 0;
987 }
988 
989 void __cleanup_sighand(struct sighand_struct *sighand)
990 {
991         if (atomic_dec_and_test(&sighand->count)) {
992                 signalfd_cleanup(sighand);
993                 kmem_cache_free(sighand_cachep, sighand);
994         }
995 }
996 
997 
998 /*
999  * Initialize POSIX timer handling for a thread group.
1000  */
1001 static void posix_cpu_timers_init_group(struct signal_struct *sig)
1002 {
1003         unsigned long cpu_limit;
1004 
1005         /* Thread group counters. */
1006         thread_group_cputime_init(sig);
1007 
1008         cpu_limit = ACCESS_ONCE(sig->rlim[RLIMIT_CPU].rlim_cur);
1009         if (cpu_limit != RLIM_INFINITY) {
1010                 sig->cputime_expires.prof_exp = secs_to_cputime(cpu_limit);
1011                 sig->cputimer.running = 1;
1012         }
1013 
1014         /* The timer lists. */
1015         INIT_LIST_HEAD(&sig->cpu_timers[0]);
1016         INIT_LIST_HEAD(&sig->cpu_timers[1]);
1017         INIT_LIST_HEAD(&sig->cpu_timers[2]);
1018 }
1019 
1020 static int copy_signal(unsigned long clone_flags, struct task_struct *tsk)
1021 {
1022         struct signal_struct *sig;
1023 
1024         if (clone_flags & CLONE_THREAD)
1025                 return 0;
1026 
1027         sig = kmem_cache_zalloc(signal_cachep, GFP_KERNEL);
1028         tsk->signal = sig;
1029         if (!sig)
1030                 return -ENOMEM;
1031 
1032         sig->nr_threads = 1;
1033         atomic_set(&sig->live, 1);
1034         atomic_set(&sig->sigcnt, 1);
1035 
1036         /* list_add(thread_node, thread_head) without INIT_LIST_HEAD() */
1037         sig->thread_head = (struct list_head)LIST_HEAD_INIT(tsk->thread_node);
1038         tsk->thread_node = (struct list_head)LIST_HEAD_INIT(sig->thread_head);
1039 
1040         init_waitqueue_head(&sig->wait_chldexit);
1041         sig->curr_target = tsk;
1042         init_sigpending(&sig->shared_pending);
1043         INIT_LIST_HEAD(&sig->posix_timers);
1044 
1045         hrtimer_init(&sig->real_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
1046         sig->real_timer.function = it_real_fn;
1047 
1048         task_lock(current->group_leader);
1049         memcpy(sig->rlim, current->signal->rlim, sizeof sig->rlim);
1050         task_unlock(current->group_leader);
1051 
1052         posix_cpu_timers_init_group(sig);
1053 
1054         tty_audit_fork(sig);
1055         sched_autogroup_fork(sig);
1056 
1057 #ifdef CONFIG_CGROUPS
1058         init_rwsem(&sig->group_rwsem);
1059 #endif
1060 
1061         sig->oom_score_adj = current->signal->oom_score_adj;
1062         sig->oom_score_adj_min = current->signal->oom_score_adj_min;
1063 
1064         sig->has_child_subreaper = current->signal->has_child_subreaper ||
1065                                    current->signal->is_child_subreaper;
1066 
1067         mutex_init(&sig->cred_guard_mutex);
1068 
1069         return 0;
1070 }
1071 
1072 static void copy_flags(unsigned long clone_flags, struct task_struct *p)
1073 {
1074         unsigned long new_flags = p->flags;
1075 
1076         new_flags &= ~(PF_SUPERPRIV | PF_WQ_WORKER);
1077         new_flags |= PF_FORKNOEXEC;
1078         p->flags = new_flags;
1079 }
1080 
1081 SYSCALL_DEFINE1(set_tid_address, int __user *, tidptr)
1082 {
1083         current->clear_child_tid = tidptr;
1084 
1085         return task_pid_vnr(current);
1086 }
1087 
1088 static void rt_mutex_init_task(struct task_struct *p)
1089 {
1090         raw_spin_lock_init(&p->pi_lock);
1091 #ifdef CONFIG_RT_MUTEXES
1092         p->pi_waiters = RB_ROOT;
1093         p->pi_waiters_leftmost = NULL;
1094         p->pi_blocked_on = NULL;
1095         p->pi_top_task = NULL;
1096 #endif
1097 }
1098 
1099 #ifdef CONFIG_MM_OWNER
1100 void mm_init_owner(struct mm_struct *mm, struct task_struct *p)
1101 {
1102         mm->owner = p;
1103 }
1104 #endif /* CONFIG_MM_OWNER */
1105 
1106 /*
1107  * Initialize POSIX timer handling for a single task.
1108  */
1109 static void posix_cpu_timers_init(struct task_struct *tsk)
1110 {
1111         tsk->cputime_expires.prof_exp = 0;
1112         tsk->cputime_expires.virt_exp = 0;
1113         tsk->cputime_expires.sched_exp = 0;
1114         INIT_LIST_HEAD(&tsk->cpu_timers[0]);
1115         INIT_LIST_HEAD(&tsk->cpu_timers[1]);
1116         INIT_LIST_HEAD(&tsk->cpu_timers[2]);
1117 }
1118 
1119 static inline void
1120 init_task_pid(struct task_struct *task, enum pid_type type, struct pid *pid)
1121 {
1122          task->pids[type].pid = pid;
1123 }
1124 
1125 /*
1126  * This creates a new process as a copy of the old one,
1127  * but does not actually start it yet.
1128  *
1129  * It copies the registers, and all the appropriate
1130  * parts of the process environment (as per the clone
1131  * flags). The actual kick-off is left to the caller.
1132  */
1133 static struct task_struct *copy_process(unsigned long clone_flags,
1134                                         unsigned long stack_start,
1135                                         unsigned long stack_size,
1136                                         int __user *child_tidptr,
1137                                         struct pid *pid,
1138                                         int trace)
1139 {
1140         int retval;
1141         struct task_struct *p;
1142 
1143         if ((clone_flags & (CLONE_NEWNS|CLONE_FS)) == (CLONE_NEWNS|CLONE_FS))
1144                 return ERR_PTR(-EINVAL);
1145 
1146         if ((clone_flags & (CLONE_NEWUSER|CLONE_FS)) == (CLONE_NEWUSER|CLONE_FS))
1147                 return ERR_PTR(-EINVAL);
1148 
1149         /*
1150          * Thread groups must share signals as well, and detached threads
1151          * can only be started up within the thread group.
1152          */
1153         if ((clone_flags & CLONE_THREAD) && !(clone_flags & CLONE_SIGHAND))
1154                 return ERR_PTR(-EINVAL);
1155 
1156         /*
1157          * Shared signal handlers imply shared VM. By way of the above,
1158          * thread groups also imply shared VM. Blocking this case allows
1159          * for various simplifications in other code.
1160          */
1161         if ((clone_flags & CLONE_SIGHAND) && !(clone_flags & CLONE_VM))
1162                 return ERR_PTR(-EINVAL);
1163 
1164         /*
1165          * Siblings of global init remain as zombies on exit since they are
1166          * not reaped by their parent (swapper). To solve this and to avoid
1167          * multi-rooted process trees, prevent global and container-inits
1168          * from creating siblings.
1169          */
1170         if ((clone_flags & CLONE_PARENT) &&
1171                                 current->signal->flags & SIGNAL_UNKILLABLE)
1172                 return ERR_PTR(-EINVAL);
1173 
1174         /*
1175          * If the new process will be in a different pid or user namespace
1176          * do not allow it to share a thread group or signal handlers or
1177          * parent with the forking task.
1178          */
1179         if (clone_flags & CLONE_SIGHAND) {
1180                 if ((clone_flags & (CLONE_NEWUSER | CLONE_NEWPID)) ||
1181                     (task_active_pid_ns(current) !=
1182                                 current->nsproxy->pid_ns_for_children))
1183                         return ERR_PTR(-EINVAL);
1184         }
1185 
1186         retval = security_task_create(clone_flags);
1187         if (retval)
1188                 goto fork_out;
1189 
1190         retval = -ENOMEM;
1191         p = dup_task_struct(current);
1192         if (!p)
1193                 goto fork_out;
1194 
1195         ftrace_graph_init_task(p);
1196         get_seccomp_filter(p);
1197 
1198         rt_mutex_init_task(p);
1199 
1200 #ifdef CONFIG_PROVE_LOCKING
1201         DEBUG_LOCKS_WARN_ON(!p->hardirqs_enabled);
1202         DEBUG_LOCKS_WARN_ON(!p->softirqs_enabled);
1203 #endif
1204         retval = -EAGAIN;
1205         if (atomic_read(&p->real_cred->user->processes) >=
1206                         task_rlimit(p, RLIMIT_NPROC)) {
1207                 if (p->real_cred->user != INIT_USER &&
1208                     !capable(CAP_SYS_RESOURCE) && !capable(CAP_SYS_ADMIN))
1209                         goto bad_fork_free;
1210         }
1211         current->flags &= ~PF_NPROC_EXCEEDED;
1212 
1213         retval = copy_creds(p, clone_flags);
1214         if (retval < 0)
1215                 goto bad_fork_free;
1216 
1217         /*
1218          * If multiple threads are within copy_process(), then this check
1219          * triggers too late. This doesn't hurt, the check is only there
1220          * to stop root fork bombs.
1221          */
1222         retval = -EAGAIN;
1223         if (nr_threads >= max_threads)
1224                 goto bad_fork_cleanup_count;
1225 
1226         if (!try_module_get(task_thread_info(p)->exec_domain->module))
1227                 goto bad_fork_cleanup_count;
1228 
1229         delayacct_tsk_init(p);  /* Must remain after dup_task_struct() */
1230         copy_flags(clone_flags, p);
1231         INIT_LIST_HEAD(&p->children);
1232         INIT_LIST_HEAD(&p->sibling);
1233         rcu_copy_process(p);
1234         p->vfork_done = NULL;
1235         spin_lock_init(&p->alloc_lock);
1236 
1237         init_sigpending(&p->pending);
1238 
1239         p->utime = p->stime = p->gtime = 0;
1240         p->utimescaled = p->stimescaled = 0;
1241 #ifndef CONFIG_VIRT_CPU_ACCOUNTING_NATIVE
1242         p->prev_cputime.utime = p->prev_cputime.stime = 0;
1243 #endif
1244 #ifdef CONFIG_VIRT_CPU_ACCOUNTING_GEN
1245         seqlock_init(&p->vtime_seqlock);
1246         p->vtime_snap = 0;
1247         p->vtime_snap_whence = VTIME_SLEEPING;
1248 #endif
1249 
1250 #if defined(SPLIT_RSS_COUNTING)
1251         memset(&p->rss_stat, 0, sizeof(p->rss_stat));
1252 #endif
1253 
1254         p->default_timer_slack_ns = current->timer_slack_ns;
1255 
1256         task_io_accounting_init(&p->ioac);
1257         acct_clear_integrals(p);
1258 
1259         posix_cpu_timers_init(p);
1260 
1261         do_posix_clock_monotonic_gettime(&p->start_time);
1262         p->real_start_time = p->start_time;
1263         monotonic_to_bootbased(&p->real_start_time);
1264         p->io_context = NULL;
1265         p->audit_context = NULL;
1266         if (clone_flags & CLONE_THREAD)
1267                 threadgroup_change_begin(current);
1268         cgroup_fork(p);
1269 #ifdef CONFIG_NUMA
1270         p->mempolicy = mpol_dup(p->mempolicy);
1271         if (IS_ERR(p->mempolicy)) {
1272                 retval = PTR_ERR(p->mempolicy);
1273                 p->mempolicy = NULL;
1274                 goto bad_fork_cleanup_cgroup;
1275         }
1276         mpol_fix_fork_child_flag(p);
1277 #endif
1278 #ifdef CONFIG_CPUSETS
1279         p->cpuset_mem_spread_rotor = NUMA_NO_NODE;
1280         p->cpuset_slab_spread_rotor = NUMA_NO_NODE;
1281         seqcount_init(&p->mems_allowed_seq);
1282 #endif
1283 #ifdef CONFIG_TRACE_IRQFLAGS
1284         p->irq_events = 0;
1285         p->hardirqs_enabled = 0;
1286         p->hardirq_enable_ip = 0;
1287         p->hardirq_enable_event = 0;
1288         p->hardirq_disable_ip = _THIS_IP_;
1289         p->hardirq_disable_event = 0;
1290         p->softirqs_enabled = 1;
1291         p->softirq_enable_ip = _THIS_IP_;
1292         p->softirq_enable_event = 0;
1293         p->softirq_disable_ip = 0;
1294         p->softirq_disable_event = 0;
1295         p->hardirq_context = 0;
1296         p->softirq_context = 0;
1297 #endif
1298 #ifdef CONFIG_LOCKDEP
1299         p->lockdep_depth = 0; /* no locks held yet */
1300         p->curr_chain_key = 0;
1301         p->lockdep_recursion = 0;
1302 #endif
1303 
1304 #ifdef CONFIG_DEBUG_MUTEXES
1305         p->blocked_on = NULL; /* not blocked yet */
1306 #endif
1307 #ifdef CONFIG_MEMCG
1308         p->memcg_batch.do_batch = 0;
1309         p->memcg_batch.memcg = NULL;
1310 #endif
1311 #ifdef CONFIG_BCACHE
1312         p->sequential_io        = 0;
1313         p->sequential_io_avg    = 0;
1314 #endif
1315 
1316         /* Perform scheduler related setup. Assign this task to a CPU. */
1317         retval = sched_fork(clone_flags, p);
1318         if (retval)
1319                 goto bad_fork_cleanup_policy;
1320 
1321         retval = perf_event_init_task(p);
1322         if (retval)
1323                 goto bad_fork_cleanup_policy;
1324         retval = audit_alloc(p);
1325         if (retval)
1326                 goto bad_fork_cleanup_policy;
1327         /* copy all the process information */
1328         retval = copy_semundo(clone_flags, p);
1329         if (retval)
1330                 goto bad_fork_cleanup_audit;
1331         retval = copy_files(clone_flags, p);
1332         if (retval)
1333                 goto bad_fork_cleanup_semundo;
1334         retval = copy_fs(clone_flags, p);
1335         if (retval)
1336                 goto bad_fork_cleanup_files;
1337         retval = copy_sighand(clone_flags, p);
1338         if (retval)
1339                 goto bad_fork_cleanup_fs;
1340         retval = copy_signal(clone_flags, p);
1341         if (retval)
1342                 goto bad_fork_cleanup_sighand;
1343         retval = copy_mm(clone_flags, p);
1344         if (retval)
1345                 goto bad_fork_cleanup_signal;
1346         retval = copy_namespaces(clone_flags, p);
1347         if (retval)
1348                 goto bad_fork_cleanup_mm;
1349         retval = copy_io(clone_flags, p);
1350         if (retval)
1351                 goto bad_fork_cleanup_namespaces;
1352         retval = copy_thread(clone_flags, stack_start, stack_size, p);
1353         if (retval)
1354                 goto bad_fork_cleanup_io;
1355 
1356         if (pid != &init_struct_pid) {
1357                 retval = -ENOMEM;
1358                 pid = alloc_pid(p->nsproxy->pid_ns_for_children);
1359                 if (!pid)
1360                         goto bad_fork_cleanup_io;
1361         }
1362 
1363         p->set_child_tid = (clone_flags & CLONE_CHILD_SETTID) ? child_tidptr : NULL;
1364         /*
1365          * Clear TID on mm_release()?
1366          */
1367         p->clear_child_tid = (clone_flags & CLONE_CHILD_CLEARTID) ? child_tidptr : NULL;
1368 #ifdef CONFIG_BLOCK
1369         p->plug = NULL;
1370 #endif
1371 #ifdef CONFIG_FUTEX
1372         p->robust_list = NULL;
1373 #ifdef CONFIG_COMPAT
1374         p->compat_robust_list = NULL;
1375 #endif
1376         INIT_LIST_HEAD(&p->pi_state_list);
1377         p->pi_state_cache = NULL;
1378 #endif
1379         /*
1380          * sigaltstack should be cleared when sharing the same VM
1381          */
1382         if ((clone_flags & (CLONE_VM|CLONE_VFORK)) == CLONE_VM)
1383                 p->sas_ss_sp = p->sas_ss_size = 0;
1384 
1385         /*
1386          * Syscall tracing and stepping should be turned off in the
1387          * child regardless of CLONE_PTRACE.
1388          */
1389         user_disable_single_step(p);
1390         clear_tsk_thread_flag(p, TIF_SYSCALL_TRACE);
1391 #ifdef TIF_SYSCALL_EMU
1392         clear_tsk_thread_flag(p, TIF_SYSCALL_EMU);
1393 #endif
1394         clear_all_latency_tracing(p);
1395 
1396         /* ok, now we should be set up.. */
1397         p->pid = pid_nr(pid);
1398         if (clone_flags & CLONE_THREAD) {
1399                 p->exit_signal = -1;
1400                 p->group_leader = current->group_leader;
1401                 p->tgid = current->tgid;
1402         } else {
1403                 if (clone_flags & CLONE_PARENT)
1404                         p->exit_signal = current->group_leader->exit_signal;
1405                 else
1406                         p->exit_signal = (clone_flags & CSIGNAL);
1407                 p->group_leader = p;
1408                 p->tgid = p->pid;
1409         }
1410 
1411         p->nr_dirtied = 0;
1412         p->nr_dirtied_pause = 128 >> (PAGE_SHIFT - 10);
1413         p->dirty_paused_when = 0;
1414 
1415         p->pdeath_signal = 0;
1416         INIT_LIST_HEAD(&p->thread_group);
1417         p->task_works = NULL;
1418 
1419         /*
1420          * Make it visible to the rest of the system, but dont wake it up yet.
1421          * Need tasklist lock for parent etc handling!
1422          */
1423         write_lock_irq(&tasklist_lock);
1424 
1425         /* CLONE_PARENT re-uses the old parent */
1426         if (clone_flags & (CLONE_PARENT|CLONE_THREAD)) {
1427                 p->real_parent = current->real_parent;
1428                 p->parent_exec_id = current->parent_exec_id;
1429         } else {
1430                 p->real_parent = current;
1431                 p->parent_exec_id = current->self_exec_id;
1432         }
1433 
1434         spin_lock(&current->sighand->siglock);
1435 
1436         /*
1437          * Process group and session signals need to be delivered to just the
1438          * parent before the fork or both the parent and the child after the
1439          * fork. Restart if a signal comes in before we add the new process to
1440          * it's process group.
1441          * A fatal signal pending means that current will exit, so the new
1442          * thread can't slip out of an OOM kill (or normal SIGKILL).
1443         */
1444         recalc_sigpending();
1445         if (signal_pending(current)) {
1446                 spin_unlock(&current->sighand->siglock);
1447                 write_unlock_irq(&tasklist_lock);
1448                 retval = -ERESTARTNOINTR;
1449                 goto bad_fork_free_pid;
1450         }
1451 
1452         if (likely(p->pid)) {
1453                 ptrace_init_task(p, (clone_flags & CLONE_PTRACE) || trace);
1454 
1455                 init_task_pid(p, PIDTYPE_PID, pid);
1456                 if (thread_group_leader(p)) {
1457                         init_task_pid(p, PIDTYPE_PGID, task_pgrp(current));
1458                         init_task_pid(p, PIDTYPE_SID, task_session(current));
1459 
1460                         if (is_child_reaper(pid)) {
1461                                 ns_of_pid(pid)->child_reaper = p;
1462                                 p->signal->flags |= SIGNAL_UNKILLABLE;
1463                         }
1464 
1465                         p->signal->leader_pid = pid;
1466                         p->signal->tty = tty_kref_get(current->signal->tty);
1467                         list_add_tail(&p->sibling, &p->real_parent->children);
1468                         list_add_tail_rcu(&p->tasks, &init_task.tasks);
1469                         attach_pid(p, PIDTYPE_PGID);
1470                         attach_pid(p, PIDTYPE_SID);
1471                         __this_cpu_inc(process_counts);
1472                 } else {
1473                         current->signal->nr_threads++;
1474                         atomic_inc(&current->signal->live);
1475                         atomic_inc(&current->signal->sigcnt);
1476                         list_add_tail_rcu(&p->thread_group,
1477                                           &p->group_leader->thread_group);
1478                         list_add_tail_rcu(&p->thread_node,
1479                                           &p->signal->thread_head);
1480                 }
1481                 attach_pid(p, PIDTYPE_PID);
1482                 nr_threads++;
1483         }
1484 
1485         total_forks++;
1486         spin_unlock(&current->sighand->siglock);
1487         write_unlock_irq(&tasklist_lock);
1488         proc_fork_connector(p);
1489         cgroup_post_fork(p);
1490         if (clone_flags & CLONE_THREAD)
1491                 threadgroup_change_end(current);
1492         perf_event_fork(p);
1493 
1494         trace_task_newtask(p, clone_flags);
1495         uprobe_copy_process(p, clone_flags);
1496 
1497         return p;
1498 
1499 bad_fork_free_pid:
1500         if (pid != &init_struct_pid)
1501                 free_pid(pid);
1502 bad_fork_cleanup_io:
1503         if (p->io_context)
1504                 exit_io_context(p);
1505 bad_fork_cleanup_namespaces:
1506         exit_task_namespaces(p);
1507 bad_fork_cleanup_mm:
1508         if (p->mm)
1509                 mmput(p->mm);
1510 bad_fork_cleanup_signal:
1511         if (!(clone_flags & CLONE_THREAD))
1512                 free_signal_struct(p->signal);
1513 bad_fork_cleanup_sighand:
1514         __cleanup_sighand(p->sighand);
1515 bad_fork_cleanup_fs:
1516         exit_fs(p); /* blocking */
1517 bad_fork_cleanup_files:
1518         exit_files(p); /* blocking */
1519 bad_fork_cleanup_semundo:
1520         exit_sem(p);
1521 bad_fork_cleanup_audit:
1522         audit_free(p);
1523 bad_fork_cleanup_policy:
1524         perf_event_free_task(p);
1525 #ifdef CONFIG_NUMA
1526         mpol_put(p->mempolicy);
1527 bad_fork_cleanup_cgroup:
1528 #endif
1529         if (clone_flags & CLONE_THREAD)
1530                 threadgroup_change_end(current);
1531         cgroup_exit(p, 0);
1532         delayacct_tsk_free(p);
1533         module_put(task_thread_info(p)->exec_domain->module);
1534 bad_fork_cleanup_count:
1535         atomic_dec(&p->cred->user->processes);
1536         exit_creds(p);
1537 bad_fork_free:
1538         free_task(p);
1539 fork_out:
1540         return ERR_PTR(retval);
1541 }
1542 
1543 static inline void init_idle_pids(struct pid_link *links)
1544 {
1545         enum pid_type type;
1546 
1547         for (type = PIDTYPE_PID; type < PIDTYPE_MAX; ++type) {
1548                 INIT_HLIST_NODE(&links[type].node); /* not really needed */
1549                 links[type].pid = &init_struct_pid;
1550         }
1551 }
1552 
1553 struct task_struct *fork_idle(int cpu)
1554 {
1555         struct task_struct *task;
1556         task = copy_process(CLONE_VM, 0, 0, NULL, &init_struct_pid, 0);
1557         if (!IS_ERR(task)) {
1558                 init_idle_pids(task->pids);
1559                 init_idle(task, cpu);
1560         }
1561 
1562         return task;
1563 }
1564 
1565 /*
1566  *  Ok, this is the main fork-routine.
1567  *
1568  * It copies the process, and if successful kick-starts
1569  * it and waits for it to finish using the VM if required.
1570  */
1571 long do_fork(unsigned long clone_flags,
1572               unsigned long stack_start,
1573               unsigned long stack_size,
1574               int __user *parent_tidptr,
1575               int __user *child_tidptr)
1576 {
1577         struct task_struct *p;
1578         int trace = 0;
1579         long nr;
1580 
1581         /*
1582          * Determine whether and which event to report to ptracer.  When
1583          * called from kernel_thread or CLONE_UNTRACED is explicitly
1584          * requested, no event is reported; otherwise, report if the event
1585          * for the type of forking is enabled.
1586          */
1587         if (!(clone_flags & CLONE_UNTRACED)) {
1588                 if (clone_flags & CLONE_VFORK)
1589                         trace = PTRACE_EVENT_VFORK;
1590                 else if ((clone_flags & CSIGNAL) != SIGCHLD)
1591                         trace = PTRACE_EVENT_CLONE;
1592                 else
1593                         trace = PTRACE_EVENT_FORK;
1594 
1595                 if (likely(!ptrace_event_enabled(current, trace)))
1596                         trace = 0;
1597         }
1598 
1599         p = copy_process(clone_flags, stack_start, stack_size,
1600                          child_tidptr, NULL, trace);
1601         /*
1602          * Do this prior waking up the new thread - the thread pointer
1603          * might get invalid after that point, if the thread exits quickly.
1604          */
1605         if (!IS_ERR(p)) {
1606                 struct completion vfork;
1607 
1608                 trace_sched_process_fork(current, p);
1609 
1610                 nr = task_pid_vnr(p);
1611 
1612                 if (clone_flags & CLONE_PARENT_SETTID)
1613                         put_user(nr, parent_tidptr);
1614 
1615                 if (clone_flags & CLONE_VFORK) {
1616                         p->vfork_done = &vfork;
1617                         init_completion(&vfork);
1618                         get_task_struct(p);
1619                 }
1620 
1621                 wake_up_new_task(p);
1622 
1623                 /* forking complete and child started to run, tell ptracer */
1624                 if (unlikely(trace))
1625                         ptrace_event(trace, nr);
1626 
1627                 if (clone_flags & CLONE_VFORK) {
1628                         if (!wait_for_vfork_done(p, &vfork))
1629                                 ptrace_event(PTRACE_EVENT_VFORK_DONE, nr);
1630                 }
1631         } else {
1632                 nr = PTR_ERR(p);
1633         }
1634         return nr;
1635 }
1636 
1637 /*
1638  * Create a kernel thread.
1639  */
1640 pid_t kernel_thread(int (*fn)(void *), void *arg, unsigned long flags)
1641 {
1642         return do_fork(flags|CLONE_VM|CLONE_UNTRACED, (unsigned long)fn,
1643                 (unsigned long)arg, NULL, NULL);
1644 }
1645 
1646 #ifdef __ARCH_WANT_SYS_FORK
1647 SYSCALL_DEFINE0(fork)
1648 {
1649 #ifdef CONFIG_MMU
1650         return do_fork(SIGCHLD, 0, 0, NULL, NULL);
1651 #else
1652         /* can not support in nommu mode */
1653         return -EINVAL;
1654 #endif
1655 }
1656 #endif
1657 
1658 #ifdef __ARCH_WANT_SYS_VFORK
1659 SYSCALL_DEFINE0(vfork)
1660 {
1661         return do_fork(CLONE_VFORK | CLONE_VM | SIGCHLD, 0,
1662                         0, NULL, NULL);
1663 }
1664 #endif
1665 
1666 #ifdef __ARCH_WANT_SYS_CLONE
1667 #ifdef CONFIG_CLONE_BACKWARDS
1668 SYSCALL_DEFINE5(clone, unsigned long, clone_flags, unsigned long, newsp,
1669                  int __user *, parent_tidptr,
1670                  int, tls_val,
1671                  int __user *, child_tidptr)
1672 #elif defined(CONFIG_CLONE_BACKWARDS2)
1673 SYSCALL_DEFINE5(clone, unsigned long, newsp, unsigned long, clone_flags,
1674                  int __user *, parent_tidptr,
1675                  int __user *, child_tidptr,
1676                  int, tls_val)
1677 #elif defined(CONFIG_CLONE_BACKWARDS3)
1678 SYSCALL_DEFINE6(clone, unsigned long, clone_flags, unsigned long, newsp,
1679                 int, stack_size,
1680                 int __user *, parent_tidptr,
1681                 int __user *, child_tidptr,
1682                 int, tls_val)
1683 #else
1684 SYSCALL_DEFINE5(clone, unsigned long, clone_flags, unsigned long, newsp,
1685                  int __user *, parent_tidptr,
1686                  int __user *, child_tidptr,
1687                  int, tls_val)
1688 #endif
1689 {
1690         return do_fork(clone_flags, newsp, 0, parent_tidptr, child_tidptr);
1691 }
1692 #endif
1693 
1694 #ifndef ARCH_MIN_MMSTRUCT_ALIGN
1695 #define ARCH_MIN_MMSTRUCT_ALIGN 0
1696 #endif
1697 
1698 static void sighand_ctor(void *data)
1699 {
1700         struct sighand_struct *sighand = data;
1701 
1702         spin_lock_init(&sighand->siglock);
1703         init_waitqueue_head(&sighand->signalfd_wqh);
1704 }
1705 
1706 void __init proc_caches_init(void)
1707 {
1708         sighand_cachep = kmem_cache_create("sighand_cache",
1709                         sizeof(struct sighand_struct), 0,
1710                         SLAB_HWCACHE_ALIGN|SLAB_PANIC|SLAB_DESTROY_BY_RCU|
1711                         SLAB_NOTRACK, sighand_ctor);
1712         signal_cachep = kmem_cache_create("signal_cache",
1713                         sizeof(struct signal_struct), 0,
1714                         SLAB_HWCACHE_ALIGN|SLAB_PANIC|SLAB_NOTRACK, NULL);
1715         files_cachep = kmem_cache_create("files_cache",
1716                         sizeof(struct files_struct), 0,
1717                         SLAB_HWCACHE_ALIGN|SLAB_PANIC|SLAB_NOTRACK, NULL);
1718         fs_cachep = kmem_cache_create("fs_cache",
1719                         sizeof(struct fs_struct), 0,
1720                         SLAB_HWCACHE_ALIGN|SLAB_PANIC|SLAB_NOTRACK, NULL);
1721         /*
1722          * FIXME! The "sizeof(struct mm_struct)" currently includes the
1723          * whole struct cpumask for the OFFSTACK case. We could change
1724          * this to *only* allocate as much of it as required by the
1725          * maximum number of CPU's we can ever have.  The cpumask_allocation
1726          * is at the end of the structure, exactly for that reason.
1727          */
1728         mm_cachep = kmem_cache_create("mm_struct",
1729                         sizeof(struct mm_struct), ARCH_MIN_MMSTRUCT_ALIGN,
1730                         SLAB_HWCACHE_ALIGN|SLAB_PANIC|SLAB_NOTRACK, NULL);
1731         vm_area_cachep = KMEM_CACHE(vm_area_struct, SLAB_PANIC);
1732         mmap_init();
1733         nsproxy_cache_init();
1734 }
1735 
1736 /*
1737  * Check constraints on flags passed to the unshare system call.
1738  */
1739 static int check_unshare_flags(unsigned long unshare_flags)
1740 {
1741         if (unshare_flags & ~(CLONE_THREAD|CLONE_FS|CLONE_NEWNS|CLONE_SIGHAND|
1742                                 CLONE_VM|CLONE_FILES|CLONE_SYSVSEM|
1743                                 CLONE_NEWUTS|CLONE_NEWIPC|CLONE_NEWNET|
1744                                 CLONE_NEWUSER|CLONE_NEWPID))
1745                 return -EINVAL;
1746         /*
1747          * Not implemented, but pretend it works if there is nothing to
1748          * unshare. Note that unsharing CLONE_THREAD or CLONE_SIGHAND
1749          * needs to unshare vm.
1750          */
1751         if (unshare_flags & (CLONE_THREAD | CLONE_SIGHAND | CLONE_VM)) {
1752                 /* FIXME: get_task_mm() increments ->mm_users */
1753                 if (atomic_read(&current->mm->mm_users) > 1)
1754                         return -EINVAL;
1755         }
1756 
1757         return 0;
1758 }
1759 
1760 /*
1761  * Unshare the filesystem structure if it is being shared
1762  */
1763 static int unshare_fs(unsigned long unshare_flags, struct fs_struct **new_fsp)
1764 {
1765         struct fs_struct *fs = current->fs;
1766 
1767         if (!(unshare_flags & CLONE_FS) || !fs)
1768                 return 0;
1769 
1770         /* don't need lock here; in the worst case we'll do useless copy */
1771         if (fs->users == 1)
1772                 return 0;
1773 
1774         *new_fsp = copy_fs_struct(fs);
1775         if (!*new_fsp)
1776                 return -ENOMEM;
1777 
1778         return 0;
1779 }
1780 
1781 /*
1782  * Unshare file descriptor table if it is being shared
1783  */
1784 static int unshare_fd(unsigned long unshare_flags, struct files_struct **new_fdp)
1785 {
1786         struct files_struct *fd = current->files;
1787         int error = 0;
1788 
1789         if ((unshare_flags & CLONE_FILES) &&
1790             (fd && atomic_read(&fd->count) > 1)) {
1791                 *new_fdp = dup_fd(fd, &error);
1792                 if (!*new_fdp)
1793                         return error;
1794         }
1795 
1796         return 0;
1797 }
1798 
1799 /*
1800  * unshare allows a process to 'unshare' part of the process
1801  * context which was originally shared using clone.  copy_*
1802  * functions used by do_fork() cannot be used here directly
1803  * because they modify an inactive task_struct that is being
1804  * constructed. Here we are modifying the current, active,
1805  * task_struct.
1806  */
1807 SYSCALL_DEFINE1(unshare, unsigned long, unshare_flags)
1808 {
1809         struct fs_struct *fs, *new_fs = NULL;
1810         struct files_struct *fd, *new_fd = NULL;
1811         struct cred *new_cred = NULL;
1812         struct nsproxy *new_nsproxy = NULL;
1813         int do_sysvsem = 0;
1814         int err;
1815 
1816         /*
1817          * If unsharing a user namespace must also unshare the thread.
1818          */
1819         if (unshare_flags & CLONE_NEWUSER)
1820                 unshare_flags |= CLONE_THREAD | CLONE_FS;
1821         /*
1822          * If unsharing a thread from a thread group, must also unshare vm.
1823          */
1824         if (unshare_flags & CLONE_THREAD)
1825                 unshare_flags |= CLONE_VM;
1826         /*
1827          * If unsharing vm, must also unshare signal handlers.
1828          */
1829         if (unshare_flags & CLONE_VM)
1830                 unshare_flags |= CLONE_SIGHAND;
1831         /*
1832          * If unsharing namespace, must also unshare filesystem information.
1833          */
1834         if (unshare_flags & CLONE_NEWNS)
1835                 unshare_flags |= CLONE_FS;
1836 
1837         err = check_unshare_flags(unshare_flags);
1838         if (err)
1839                 goto bad_unshare_out;
1840         /*
1841          * CLONE_NEWIPC must also detach from the undolist: after switching
1842          * to a new ipc namespace, the semaphore arrays from the old
1843          * namespace are unreachable.
1844          */
1845         if (unshare_flags & (CLONE_NEWIPC|CLONE_SYSVSEM))
1846                 do_sysvsem = 1;
1847         err = unshare_fs(unshare_flags, &new_fs);
1848         if (err)
1849                 goto bad_unshare_out;
1850         err = unshare_fd(unshare_flags, &new_fd);
1851         if (err)
1852                 goto bad_unshare_cleanup_fs;
1853         err = unshare_userns(unshare_flags, &new_cred);
1854         if (err)
1855                 goto bad_unshare_cleanup_fd;
1856         err = unshare_nsproxy_namespaces(unshare_flags, &new_nsproxy,
1857                                          new_cred, new_fs);
1858         if (err)
1859                 goto bad_unshare_cleanup_cred;
1860 
1861         if (new_fs || new_fd || do_sysvsem || new_cred || new_nsproxy) {
1862                 if (do_sysvsem) {
1863                         /*
1864                          * CLONE_SYSVSEM is equivalent to sys_exit().
1865                          */
1866                         exit_sem(current);
1867                 }
1868 
1869                 if (new_nsproxy)
1870                         switch_task_namespaces(current, new_nsproxy);
1871 
1872                 task_lock(current);
1873 
1874                 if (new_fs) {
1875                         fs = current->fs;
1876                         spin_lock(&fs->lock);
1877                         current->fs = new_fs;
1878                         if (--fs->users)
1879                                 new_fs = NULL;
1880                         else
1881                                 new_fs = fs;
1882                         spin_unlock(&fs->lock);
1883                 }
1884 
1885                 if (new_fd) {
1886                         fd = current->files;
1887                         current->files = new_fd;
1888                         new_fd = fd;
1889                 }
1890 
1891                 task_unlock(current);
1892 
1893                 if (new_cred) {
1894                         /* Install the new user namespace */
1895                         commit_creds(new_cred);
1896                         new_cred = NULL;
1897                 }
1898         }
1899 
1900 bad_unshare_cleanup_cred:
1901         if (new_cred)
1902                 put_cred(new_cred);
1903 bad_unshare_cleanup_fd:
1904         if (new_fd)
1905                 put_files_struct(new_fd);
1906 
1907 bad_unshare_cleanup_fs:
1908         if (new_fs)
1909                 free_fs_struct(new_fs);
1910 
1911 bad_unshare_out:
1912         return err;
1913 }
1914 
1915 /*
1916  *      Helper to unshare the files of the current task.
1917  *      We don't want to expose copy_files internals to
1918  *      the exec layer of the kernel.
1919  */
1920 
1921 int unshare_files(struct files_struct **displaced)
1922 {
1923         struct task_struct *task = current;
1924         struct files_struct *copy = NULL;
1925         int error;
1926 
1927         error = unshare_fd(CLONE_FILES, &copy);
1928         if (error || !copy) {
1929                 *displaced = NULL;
1930                 return error;
1931         }
1932         *displaced = task->files;
1933         task_lock(task);
1934         task->files = copy;
1935         task_unlock(task);
1936         return 0;
1937 }
1938 

This page was automatically generated by LXR 0.3.1 (source).  •  Linux is a registered trademark of Linus Torvalds  •  Contact us