arm64: rockchip_defconfig: enable TCP SYN_COOKIES
[firefly-linux-kernel-4.4.55.git] / kernel / cpuset.c
index b271353d5202ba4fa2e8f9d469e1efd4307cf609..3f9db31c5d043287b7e566fdaace6446ee46e6a1 100644 (file)
@@ -98,6 +98,7 @@ struct cpuset {
 
        /* user-configured CPUs and Memory Nodes allow to tasks */
        cpumask_var_t cpus_allowed;
+       cpumask_var_t cpus_requested;
        nodemask_t mems_allowed;
 
        /* effective CPUs and Memory Nodes allow to tasks */
@@ -397,7 +398,7 @@ static void cpuset_update_task_spread_flag(struct cpuset *cs,
 
 static int is_cpuset_subset(const struct cpuset *p, const struct cpuset *q)
 {
-       return  cpumask_subset(p->cpus_allowed, q->cpus_allowed) &&
+       return  cpumask_subset(p->cpus_requested, q->cpus_requested) &&
                nodes_subset(p->mems_allowed, q->mems_allowed) &&
                is_cpu_exclusive(p) <= is_cpu_exclusive(q) &&
                is_mem_exclusive(p) <= is_mem_exclusive(q);
@@ -497,7 +498,7 @@ static int validate_change(struct cpuset *cur, struct cpuset *trial)
        cpuset_for_each_child(c, css, par) {
                if ((is_cpu_exclusive(trial) || is_cpu_exclusive(c)) &&
                    c != cur &&
-                   cpumask_intersects(trial->cpus_allowed, c->cpus_allowed))
+                   cpumask_intersects(trial->cpus_requested, c->cpus_requested))
                        goto out;
                if ((is_mem_exclusive(trial) || is_mem_exclusive(c)) &&
                    c != cur &&
@@ -956,17 +957,18 @@ static int update_cpumask(struct cpuset *cs, struct cpuset *trialcs,
        if (!*buf) {
                cpumask_clear(trialcs->cpus_allowed);
        } else {
-               retval = cpulist_parse(buf, trialcs->cpus_allowed);
+               retval = cpulist_parse(buf, trialcs->cpus_requested);
                if (retval < 0)
                        return retval;
 
-               if (!cpumask_subset(trialcs->cpus_allowed,
-                                   top_cpuset.cpus_allowed))
+               if (!cpumask_subset(trialcs->cpus_requested, cpu_present_mask))
                        return -EINVAL;
+
+               cpumask_and(trialcs->cpus_allowed, trialcs->cpus_requested, cpu_active_mask);
        }
 
        /* Nothing to do if the cpus didn't change */
-       if (cpumask_equal(cs->cpus_allowed, trialcs->cpus_allowed))
+       if (cpumask_equal(cs->cpus_requested, trialcs->cpus_requested))
                return 0;
 
        retval = validate_change(cs, trialcs);
@@ -975,6 +977,7 @@ static int update_cpumask(struct cpuset *cs, struct cpuset *trialcs,
 
        spin_lock_irq(&callback_lock);
        cpumask_copy(cs->cpus_allowed, trialcs->cpus_allowed);
+       cpumask_copy(cs->cpus_requested, trialcs->cpus_requested);
        spin_unlock_irq(&callback_lock);
 
        /* use trialcs->cpus_allowed as a temp variable */
@@ -1765,7 +1768,7 @@ static int cpuset_common_seq_show(struct seq_file *sf, void *v)
 
        switch (type) {
        case FILE_CPULIST:
-               seq_printf(sf, "%*pbl\n", cpumask_pr_args(cs->cpus_allowed));
+               seq_printf(sf, "%*pbl\n", cpumask_pr_args(cs->cpus_requested));
                break;
        case FILE_MEMLIST:
                seq_printf(sf, "%*pbl\n", nodemask_pr_args(&cs->mems_allowed));
@@ -1954,11 +1957,14 @@ cpuset_css_alloc(struct cgroup_subsys_state *parent_css)
                return ERR_PTR(-ENOMEM);
        if (!alloc_cpumask_var(&cs->cpus_allowed, GFP_KERNEL))
                goto free_cs;
+       if (!alloc_cpumask_var(&cs->cpus_requested, GFP_KERNEL))
+               goto free_allowed;
        if (!alloc_cpumask_var(&cs->effective_cpus, GFP_KERNEL))
-               goto free_cpus;
+               goto free_requested;
 
        set_bit(CS_SCHED_LOAD_BALANCE, &cs->flags);
        cpumask_clear(cs->cpus_allowed);
+       cpumask_clear(cs->cpus_requested);
        nodes_clear(cs->mems_allowed);
        cpumask_clear(cs->effective_cpus);
        nodes_clear(cs->effective_mems);
@@ -1967,7 +1973,9 @@ cpuset_css_alloc(struct cgroup_subsys_state *parent_css)
 
        return &cs->css;
 
-free_cpus:
+free_requested:
+       free_cpumask_var(cs->cpus_requested);
+free_allowed:
        free_cpumask_var(cs->cpus_allowed);
 free_cs:
        kfree(cs);
@@ -2030,6 +2038,7 @@ static int cpuset_css_online(struct cgroup_subsys_state *css)
        cs->mems_allowed = parent->mems_allowed;
        cs->effective_mems = parent->mems_allowed;
        cpumask_copy(cs->cpus_allowed, parent->cpus_allowed);
+       cpumask_copy(cs->cpus_requested, parent->cpus_requested);
        cpumask_copy(cs->effective_cpus, parent->cpus_allowed);
        spin_unlock_irq(&callback_lock);
 out_unlock:
@@ -2064,6 +2073,7 @@ static void cpuset_css_free(struct cgroup_subsys_state *css)
 
        free_cpumask_var(cs->effective_cpus);
        free_cpumask_var(cs->cpus_allowed);
+       free_cpumask_var(cs->cpus_requested);
        kfree(cs);
 }
 
@@ -2128,8 +2138,11 @@ int __init cpuset_init(void)
                BUG();
        if (!alloc_cpumask_var(&top_cpuset.effective_cpus, GFP_KERNEL))
                BUG();
+       if (!alloc_cpumask_var(&top_cpuset.cpus_requested, GFP_KERNEL))
+               BUG();
 
        cpumask_setall(top_cpuset.cpus_allowed);
+       cpumask_setall(top_cpuset.cpus_requested);
        nodes_setall(top_cpuset.mems_allowed);
        cpumask_setall(top_cpuset.effective_cpus);
        nodes_setall(top_cpuset.effective_mems);
@@ -2263,7 +2276,7 @@ retry:
                goto retry;
        }
 
-       cpumask_and(&new_cpus, cs->cpus_allowed, parent_cs(cs)->effective_cpus);
+       cpumask_and(&new_cpus, cs->cpus_requested, parent_cs(cs)->effective_cpus);
        nodes_and(new_mems, cs->mems_allowed, parent_cs(cs)->effective_mems);
 
        cpus_updated = !cpumask_equal(&new_cpus, cs->effective_cpus);