Merge tag 'v3.10.52' into linux-linaro-lsk
authorAlex Shi <alex.shi@linaro.org>
Fri, 8 Aug 2014 05:34:23 +0000 (13:34 +0800)
committerAlex Shi <alex.shi@linaro.org>
Fri, 8 Aug 2014 05:34:23 +0000 (13:34 +0800)
This is the 3.10.52 stable release

1  2 
arch/x86/Kconfig
kernel/sched/core.c

diff --combined arch/x86/Kconfig
index 93148e26acad5adfe2083507de29d44da4d9d3a4,a649cb686692bddbb082cacdd51589921926d8df..78592b17a259bdb1eea4bdb3cd81093dce08df8a
@@@ -208,12 -208,6 +208,12 @@@ config ARCH_HIBERNATION_POSSIBL
  config ARCH_SUSPEND_POSSIBLE
        def_bool y
  
 +config ARCH_WANT_HUGE_PMD_SHARE
 +      def_bool y
 +
 +config ARCH_WANT_GENERAL_HUGETLB
 +      def_bool y
 +
  config ZONE_DMA32
        bool
        default X86_64
@@@ -958,10 -952,27 +958,27 @@@ config VM8
        default y
        depends on X86_32
        ---help---
-         This option is required by programs like DOSEMU to run 16-bit legacy
-         code on X86 processors. It also may be needed by software like
-         XFree86 to initialize some video cards via BIOS. Disabling this
-         option saves about 6k.
+         This option is required by programs like DOSEMU to run
+         16-bit real mode legacy code on x86 processors. It also may
+         be needed by software like XFree86 to initialize some video
+         cards via BIOS. Disabling this option saves about 6K.
+ config X86_16BIT
+       bool "Enable support for 16-bit segments" if EXPERT
+       default y
+       ---help---
+         This option is required by programs like Wine to run 16-bit
+         protected mode legacy code on x86 processors.  Disabling
+         this option saves about 300 bytes on i386, or around 6K text
+         plus 16K runtime memory on x86-64,
+ config X86_ESPFIX32
+       def_bool y
+       depends on X86_16BIT && X86_32
+ config X86_ESPFIX64
+       def_bool y
+       depends on X86_16BIT && X86_64
  
  config TOSHIBA
        tristate "Toshiba Laptop support"
diff --combined kernel/sched/core.c
index 6299bb47de74c270bbbd8e85d66b244c924a11b7,c771f2547bef323328759caea593bf8883a99484..f44c01b9d713a1416552e8fb372a5331ebd857d3
@@@ -1235,7 -1235,7 +1235,7 @@@ out
                 * leave kernel.
                 */
                if (p->mm && printk_ratelimit()) {
-                       printk_sched("process %d (%s) no longer affine to cpu%d\n",
+                       printk_deferred("process %d (%s) no longer affine to cpu%d\n",
                                        task_pid_nr(p), p->comm, cpu);
                }
        }
@@@ -1407,11 -1407,7 +1407,11 @@@ void scheduler_ipi(void
  {
        if (llist_empty(&this_rq()->wake_list)
                        && !tick_nohz_full_cpu(smp_processor_id())
 -                      && !got_nohz_idle_kick())
 +                      && !got_nohz_idle_kick()
 +#ifdef CONFIG_SCHED_HMP
 +                      && !this_rq()->wake_for_idle_pull
 +#endif
 +                      )
                return;
  
        /*
                this_rq()->idle_balance = 1;
                raise_softirq_irqoff(SCHED_SOFTIRQ);
        }
 +#ifdef CONFIG_SCHED_HMP
 +      else if (unlikely(this_rq()->wake_for_idle_pull))
 +              raise_softirq_irqoff(SCHED_SOFTIRQ);
 +#endif
 +
        irq_exit();
  }
  
@@@ -1632,20 -1623,6 +1632,20 @@@ static void __sched_fork(struct task_st
  #if defined(CONFIG_SMP) && defined(CONFIG_FAIR_GROUP_SCHED)
        p->se.avg.runnable_avg_period = 0;
        p->se.avg.runnable_avg_sum = 0;
 +#ifdef CONFIG_SCHED_HMP
 +      /* keep LOAD_AVG_MAX in sync with fair.c if load avg series is changed */
 +#define LOAD_AVG_MAX 47742
 +      if (p->mm) {
 +              p->se.avg.hmp_last_up_migration = 0;
 +              p->se.avg.hmp_last_down_migration = 0;
 +              p->se.avg.load_avg_ratio = 1023;
 +              p->se.avg.load_avg_contrib =
 +                              (1023 * scale_load_down(p->se.load.weight));
 +              p->se.avg.runnable_avg_period = LOAD_AVG_MAX;
 +              p->se.avg.runnable_avg_sum = LOAD_AVG_MAX;
 +              p->se.avg.usage_avg_sum = LOAD_AVG_MAX;
 +      }
 +#endif
  #endif
  #ifdef CONFIG_SCHEDSTATS
        memset(&p->se.statistics, 0, sizeof(p->se.statistics));
@@@ -3848,8 -3825,6 +3848,8 @@@ static struct task_struct *find_process
        return pid ? find_task_by_vpid(pid) : current;
  }
  
 +extern struct cpumask hmp_slow_cpu_mask;
 +
  /* Actually do priority change: must hold rq lock. */
  static void
  __setscheduler(struct rq *rq, struct task_struct *p, int policy, int prio)
        p->normal_prio = normal_prio(p);
        /* we are holding p->pi_lock already */
        p->prio = rt_mutex_getprio(p);
 -      if (rt_prio(p->prio))
 +      if (rt_prio(p->prio)) {
                p->sched_class = &rt_sched_class;
 +#ifdef CONFIG_SCHED_HMP
 +              if (!cpumask_empty(&hmp_slow_cpu_mask))
 +                      if (cpumask_equal(&p->cpus_allowed, cpu_all_mask)) {
 +                              p->nr_cpus_allowed =
 +                                      cpumask_weight(&hmp_slow_cpu_mask);
 +                              do_set_cpus_allowed(p, &hmp_slow_cpu_mask);
 +                      }
 +#endif
 +      }
        else
                p->sched_class = &fair_sched_class;
        set_load_weight(p);