Merge branch 'linux-linaro-lsk' into linux-linaro-lsk-android
[firefly-linux-kernel-4.4.55.git] / kernel / fork.c
index 0aa1bb5c8d6edc7e60f41d9d84cfacec9cd37694..8011604b3317d23386615cbbebd5456e149b61b3 100644 (file)
@@ -327,6 +327,15 @@ static struct task_struct *dup_task_struct(struct task_struct *orig)
                goto free_ti;
 
        tsk->stack = ti;
+#ifdef CONFIG_SECCOMP
+       /*
+        * We must handle setting up seccomp filters once we're under
+        * the sighand lock in case orig has changed between now and
+        * then. Until then, filter must be NULL to avoid messing up
+        * the usage counts on the error path calling free_task.
+        */
+       tsk->seccomp.filter = NULL;
+#endif
 
        setup_thread_stack(tsk, orig);
        clear_user_return_notifier(tsk);
@@ -1062,6 +1071,11 @@ static int copy_signal(unsigned long clone_flags, struct task_struct *tsk)
        sig->nr_threads = 1;
        atomic_set(&sig->live, 1);
        atomic_set(&sig->sigcnt, 1);
+
+       /* list_add(thread_node, thread_head) without INIT_LIST_HEAD() */
+       sig->thread_head = (struct list_head)LIST_HEAD_INIT(tsk->thread_node);
+       tsk->thread_node = (struct list_head)LIST_HEAD_INIT(sig->thread_head);
+
        init_waitqueue_head(&sig->wait_chldexit);
        sig->curr_target = tsk;
        init_sigpending(&sig->shared_pending);
@@ -1103,6 +1117,39 @@ static void copy_flags(unsigned long clone_flags, struct task_struct *p)
        p->flags = new_flags;
 }
 
+static void copy_seccomp(struct task_struct *p)
+{
+#ifdef CONFIG_SECCOMP
+       /*
+        * Must be called with sighand->lock held, which is common to
+        * all threads in the group. Holding cred_guard_mutex is not
+        * needed because this new task is not yet running and cannot
+        * be racing exec.
+        */
+       BUG_ON(!spin_is_locked(&current->sighand->siglock));
+
+       /* Ref-count the new filter user, and assign it. */
+       get_seccomp_filter(current);
+       p->seccomp = current->seccomp;
+
+       /*
+        * Explicitly enable no_new_privs here in case it got set
+        * between the task_struct being duplicated and holding the
+        * sighand lock. The seccomp state and nnp must be in sync.
+        */
+       if (task_no_new_privs(current))
+               task_set_no_new_privs(p);
+
+       /*
+        * If the parent gained a seccomp mode after copying thread
+        * flags and between before we held the sighand lock, we have
+        * to manually enable the seccomp thread flag here.
+        */
+       if (p->seccomp.mode != SECCOMP_MODE_DISABLED)
+               set_tsk_thread_flag(p, TIF_SECCOMP);
+#endif
+}
+
 SYSCALL_DEFINE1(set_tid_address, int __user *, tidptr)
 {
        current->clear_child_tid = tidptr;
@@ -1207,7 +1254,6 @@ static struct task_struct *copy_process(unsigned long clone_flags,
                goto fork_out;
 
        ftrace_graph_init_task(p);
-       get_seccomp_filter(p);
 
        rt_mutex_init_task(p);
 
@@ -1336,7 +1382,7 @@ static struct task_struct *copy_process(unsigned long clone_flags,
                goto bad_fork_cleanup_policy;
        retval = audit_alloc(p);
        if (retval)
-               goto bad_fork_cleanup_policy;
+               goto bad_fork_cleanup_perf;
        /* copy all the process information */
        retval = copy_semundo(clone_flags, p);
        if (retval)
@@ -1449,6 +1495,12 @@ static struct task_struct *copy_process(unsigned long clone_flags,
 
        spin_lock(&current->sighand->siglock);
 
+       /*
+        * Copy seccomp details explicitly here, in case they were changed
+        * before holding sighand lock.
+        */
+       copy_seccomp(p);
+
        /*
         * Process group and session signals need to be delivered to just the
         * parent before the fork or both the parent and the child after the
@@ -1465,14 +1517,6 @@ static struct task_struct *copy_process(unsigned long clone_flags,
                goto bad_fork_free_pid;
        }
 
-       if (clone_flags & CLONE_THREAD) {
-               current->signal->nr_threads++;
-               atomic_inc(&current->signal->live);
-               atomic_inc(&current->signal->sigcnt);
-               p->group_leader = current->group_leader;
-               list_add_tail_rcu(&p->thread_group, &p->group_leader->thread_group);
-       }
-
        if (likely(p->pid)) {
                ptrace_init_task(p, (clone_flags & CLONE_PTRACE) || trace);
 
@@ -1489,6 +1533,15 @@ static struct task_struct *copy_process(unsigned long clone_flags,
                        list_add_tail(&p->sibling, &p->real_parent->children);
                        list_add_tail_rcu(&p->tasks, &init_task.tasks);
                        __this_cpu_inc(process_counts);
+               } else {
+                       current->signal->nr_threads++;
+                       atomic_inc(&current->signal->live);
+                       atomic_inc(&current->signal->sigcnt);
+                       p->group_leader = current->group_leader;
+                       list_add_tail_rcu(&p->thread_group,
+                                         &p->group_leader->thread_group);
+                       list_add_tail_rcu(&p->thread_node,
+                                         &p->signal->thread_head);
                }
                attach_pid(p, PIDTYPE_PID, pid);
                nr_threads++;
@@ -1496,7 +1549,9 @@ static struct task_struct *copy_process(unsigned long clone_flags,
 
        total_forks++;
        spin_unlock(&current->sighand->siglock);
+       syscall_tracepoint_update(p);
        write_unlock_irq(&tasklist_lock);
+
        proc_fork_connector(p);
        cgroup_post_fork(p);
        if (clone_flags & CLONE_THREAD)
@@ -1531,8 +1586,9 @@ bad_fork_cleanup_semundo:
        exit_sem(p);
 bad_fork_cleanup_audit:
        audit_free(p);
-bad_fork_cleanup_policy:
+bad_fork_cleanup_perf:
        perf_event_free_task(p);
+bad_fork_cleanup_policy:
 #ifdef CONFIG_NUMA
        mpol_put(p->mempolicy);
 bad_fork_cleanup_cgroup:
@@ -1624,10 +1680,12 @@ long do_fork(unsigned long clone_flags,
         */
        if (!IS_ERR(p)) {
                struct completion vfork;
+               struct pid *pid;
 
                trace_sched_process_fork(current, p);
 
-               nr = task_pid_vnr(p);
+               pid = get_task_pid(p, PIDTYPE_PID);
+               nr = pid_vnr(pid);
 
                if (clone_flags & CLONE_PARENT_SETTID)
                        put_user(nr, parent_tidptr);
@@ -1642,12 +1700,14 @@ long do_fork(unsigned long clone_flags,
 
                /* forking complete and child started to run, tell ptracer */
                if (unlikely(trace))
-                       ptrace_event(trace, nr);
+                       ptrace_event_pid(trace, pid);
 
                if (clone_flags & CLONE_VFORK) {
                        if (!wait_for_vfork_done(p, &vfork))
-                               ptrace_event(PTRACE_EVENT_VFORK_DONE, nr);
+                               ptrace_event_pid(PTRACE_EVENT_VFORK_DONE, pid);
                }
+
+               put_pid(pid);
        } else {
                nr = PTR_ERR(p);
        }