Merge remote-tracking branch 'lsk/v3.10/topic/arm64-topology' into lsk-v3.10-arm64-hmp
authorMark Brown <broonie@linaro.org>
Fri, 9 May 2014 21:09:24 +0000 (22:09 +0100)
committerMark Brown <broonie@linaro.org>
Fri, 9 May 2014 21:27:03 +0000 (22:27 +0100)
Conflicts:
arch/arm64/Kconfig
arch/arm64/include/asm/topology.h
arch/arm64/kernel/smp.c
arch/arm64/kernel/topology.c

1  2 
arch/arm64/Kconfig
arch/arm64/include/asm/topology.h
arch/arm64/kernel/Makefile
arch/arm64/kernel/smp.c
arch/arm64/kernel/topology.c

Simple merge
Simple merge
Simple merge
Simple merge
index 971064a0c6b42a8f99748e8bb0ba2696417d547f,d450a6d3dad8364ab7c7e5c57eddc6ac33ca2690..49c94ff29479066c8004814536e447fe388ab6f3
  #include <linux/sched.h>
  #include <linux/slab.h>
  
- #include <asm/cputype.h>
 +#include <asm/smp_plat.h>
  #include <asm/topology.h>
  
- /*
-  * cpu power scale management
-  */
  /*
   * cpu power table
   * This per cpu data structure describes the relative capacity of each core.
@@@ -347,175 -388,15 +390,163 @@@ static void update_siblings_masks(unsig
                if (cpu != cpuid)
                        cpumask_set_cpu(cpu, &cpuid_topo->thread_sibling);
        }
-       smp_wmb();
- }
- void store_cpu_topology(unsigned int cpuid)
- {
-       struct cputopo_arm *cpuid_topo = &cpu_topology[cpuid];
-       /* DT should have been parsed by the time we get here */
-       if (cpuid_topo->core_id == -1)
-               pr_info("CPU%u: No topology information configured\n", cpuid);
-       else
-               update_siblings_masks(cpuid);
-       update_cpu_power(cpuid);
  }
  
- /*
-  * init_cpu_topology is called at boot when only one cpu is running
-  * which prevent simultaneous write access to cpu_topology array
-  */
- void __init init_cpu_topology(void)
 +#ifdef CONFIG_SCHED_HMP
 +
 +/*
 + * Retrieve logical cpu index corresponding to a given MPIDR[23:0]
 + *  - mpidr: MPIDR[23:0] to be used for the look-up
 + *
 + * Returns the cpu logical index or -EINVAL on look-up error
 + */
 +static inline int get_logical_index(u32 mpidr)
 +{
 +      int cpu;
 +      for (cpu = 0; cpu < nr_cpu_ids; cpu++)
 +              if (cpu_logical_map(cpu) == mpidr)
 +                      return cpu;
 +      return -EINVAL;
 +}
 +
 +static const char * const little_cores[] = {
 +      "arm,cortex-a53",
 +      NULL,
 +};
 +
 +static bool is_little_cpu(struct device_node *cn)
 +{
 +      const char * const *lc;
 +      for (lc = little_cores; *lc; lc++)
 +              if (of_device_is_compatible(cn, *lc))
 +                      return true;
 +      return false;
 +}
 +
 +void __init arch_get_fast_and_slow_cpus(struct cpumask *fast,
 +                                      struct cpumask *slow)
 +{
 +      struct device_node *cn = NULL;
 +      int cpu;
 +
 +      cpumask_clear(fast);
 +      cpumask_clear(slow);
 +
 +      /*
 +       * Use the config options if they are given. This helps testing
 +       * HMP scheduling on systems without a big.LITTLE architecture.
 +       */
 +      if (strlen(CONFIG_HMP_FAST_CPU_MASK) && strlen(CONFIG_HMP_SLOW_CPU_MASK)) {
 +              if (cpulist_parse(CONFIG_HMP_FAST_CPU_MASK, fast))
 +                      WARN(1, "Failed to parse HMP fast cpu mask!\n");
 +              if (cpulist_parse(CONFIG_HMP_SLOW_CPU_MASK, slow))
 +                      WARN(1, "Failed to parse HMP slow cpu mask!\n");
 +              return;
 +      }
 +
 +      /*
 +       * Else, parse device tree for little cores.
 +       */
 +      while ((cn = of_find_node_by_type(cn, "cpu"))) {
 +
 +              const u32 *mpidr;
 +              int len;
 +
 +              mpidr = of_get_property(cn, "reg", &len);
 +              if (!mpidr || len != 8) {
 +                      pr_err("%s missing reg property\n", cn->full_name);
 +                      continue;
 +              }
 +
 +              cpu = get_logical_index(be32_to_cpup(mpidr+1));
 +              if (cpu == -EINVAL) {
 +                      pr_err("couldn't get logical index for mpidr %x\n",
 +                                                      be32_to_cpup(mpidr+1));
 +                      break;
 +              }
 +
 +              if (is_little_cpu(cn))
 +                      cpumask_set_cpu(cpu, slow);
 +              else
 +                      cpumask_set_cpu(cpu, fast);
 +      }
 +
 +      if (!cpumask_empty(fast) && !cpumask_empty(slow))
 +              return;
 +
 +      /*
 +       * We didn't find both big and little cores so let's call all cores
 +       * fast as this will keep the system running, with all cores being
 +       * treated equal.
 +       */
 +      cpumask_setall(fast);
 +      cpumask_clear(slow);
 +}
 +
 +struct cpumask hmp_slow_cpu_mask;
 +
 +void __init arch_get_hmp_domains(struct list_head *hmp_domains_list)
 +{
 +      struct cpumask hmp_fast_cpu_mask;
 +      struct hmp_domain *domain;
 +
 +      arch_get_fast_and_slow_cpus(&hmp_fast_cpu_mask, &hmp_slow_cpu_mask);
 +
 +      /*
 +       * Initialize hmp_domains
 +       * Must be ordered with respect to compute capacity.
 +       * Fastest domain at head of list.
 +       */
 +      if(!cpumask_empty(&hmp_slow_cpu_mask)) {
 +              domain = (struct hmp_domain *)
 +                      kmalloc(sizeof(struct hmp_domain), GFP_KERNEL);
 +              cpumask_copy(&domain->possible_cpus, &hmp_slow_cpu_mask);
 +              cpumask_and(&domain->cpus, cpu_online_mask, &domain->possible_cpus);
 +              list_add(&domain->hmp_domains, hmp_domains_list);
 +      }
 +      domain = (struct hmp_domain *)
 +              kmalloc(sizeof(struct hmp_domain), GFP_KERNEL);
 +      cpumask_copy(&domain->possible_cpus, &hmp_fast_cpu_mask);
 +      cpumask_and(&domain->cpus, cpu_online_mask, &domain->possible_cpus);
 +      list_add(&domain->hmp_domains, hmp_domains_list);
 +}
 +#endif /* CONFIG_SCHED_HMP */
 +
 +/*
 + * cluster_to_logical_mask - return cpu logical mask of CPUs in a cluster
 + * @socket_id:                cluster HW identifier
 + * @cluster_mask:     the cpumask location to be initialized, modified by the
 + *                    function only if return value == 0
 + *
 + * Return:
 + *
 + * 0 on success
 + * -EINVAL if cluster_mask is NULL or there is no record matching socket_id
 + */
 +int cluster_to_logical_mask(unsigned int socket_id, cpumask_t *cluster_mask)
 +{
 +      int cpu;
 +
 +      if (!cluster_mask)
 +              return -EINVAL;
 +
 +      for_each_online_cpu(cpu) {
 +              if (socket_id == topology_physical_package_id(cpu)) {
 +                      cpumask_copy(cluster_mask, topology_core_cpumask(cpu));
 +                      return 0;
 +              }
 +      }
 +
 +      return -EINVAL;
 +}
 +
+ void store_cpu_topology(unsigned int cpuid)
+ {
+       update_siblings_masks(cpuid);
+       update_cpu_power(cpuid);
+ }
+ static void __init reset_cpu_topology(void)
  {
        unsigned int cpu;