Merge remote-tracking branch 'lsk/v3.10/topic/arm64-topology' into lsk-v3.10-arm64-hmp
[firefly-linux-kernel-4.4.55.git] / arch / arm64 / kernel / topology.c
index d450a6d3dad8364ab7c7e5c57eddc6ac33ca2690..49c94ff29479066c8004814536e447fe388ab6f3 100644 (file)
@@ -13,6 +13,7 @@
 
 #include <linux/cpu.h>
 #include <linux/cpumask.h>
+#include <linux/export.h>
 #include <linux/init.h>
 #include <linux/percpu.h>
 #include <linux/node.h>
@@ -21,6 +22,7 @@
 #include <linux/sched.h>
 #include <linux/slab.h>
 
+#include <asm/smp_plat.h>
 #include <asm/topology.h>
 
 /*
@@ -390,6 +392,154 @@ static void update_siblings_masks(unsigned int cpuid)
        }
 }
 
+#ifdef CONFIG_SCHED_HMP
+
+/*
+ * Retrieve logical cpu index corresponding to a given MPIDR[23:0]
+ *  - mpidr: MPIDR[23:0] to be used for the look-up
+ *
+ * Returns the cpu logical index or -EINVAL on look-up error
+ */
+static inline int get_logical_index(u32 mpidr)
+{
+       int cpu;
+       for (cpu = 0; cpu < nr_cpu_ids; cpu++)
+               if (cpu_logical_map(cpu) == mpidr)
+                       return cpu;
+       return -EINVAL;
+}
+
+static const char * const little_cores[] = {
+       "arm,cortex-a53",
+       NULL,
+};
+
+static bool is_little_cpu(struct device_node *cn)
+{
+       const char * const *lc;
+       for (lc = little_cores; *lc; lc++)
+               if (of_device_is_compatible(cn, *lc))
+                       return true;
+       return false;
+}
+
+void __init arch_get_fast_and_slow_cpus(struct cpumask *fast,
+                                       struct cpumask *slow)
+{
+       struct device_node *cn = NULL;
+       int cpu;
+
+       cpumask_clear(fast);
+       cpumask_clear(slow);
+
+       /*
+        * Use the config options if they are given. This helps testing
+        * HMP scheduling on systems without a big.LITTLE architecture.
+        */
+       if (strlen(CONFIG_HMP_FAST_CPU_MASK) && strlen(CONFIG_HMP_SLOW_CPU_MASK)) {
+               if (cpulist_parse(CONFIG_HMP_FAST_CPU_MASK, fast))
+                       WARN(1, "Failed to parse HMP fast cpu mask!\n");
+               if (cpulist_parse(CONFIG_HMP_SLOW_CPU_MASK, slow))
+                       WARN(1, "Failed to parse HMP slow cpu mask!\n");
+               return;
+       }
+
+       /*
+        * Else, parse device tree for little cores.
+        */
+       while ((cn = of_find_node_by_type(cn, "cpu"))) {
+
+               const u32 *mpidr;
+               int len;
+
+               mpidr = of_get_property(cn, "reg", &len);
+               if (!mpidr || len != 8) {
+                       pr_err("%s missing reg property\n", cn->full_name);
+                       continue;
+               }
+
+               cpu = get_logical_index(be32_to_cpup(mpidr+1));
+               if (cpu == -EINVAL) {
+                       pr_err("couldn't get logical index for mpidr %x\n",
+                                                       be32_to_cpup(mpidr+1));
+                       break;
+               }
+
+               if (is_little_cpu(cn))
+                       cpumask_set_cpu(cpu, slow);
+               else
+                       cpumask_set_cpu(cpu, fast);
+       }
+
+       if (!cpumask_empty(fast) && !cpumask_empty(slow))
+               return;
+
+       /*
+        * We didn't find both big and little cores so let's call all cores
+        * fast as this will keep the system running, with all cores being
+        * treated equal.
+        */
+       cpumask_setall(fast);
+       cpumask_clear(slow);
+}
+
+struct cpumask hmp_slow_cpu_mask;
+
+void __init arch_get_hmp_domains(struct list_head *hmp_domains_list)
+{
+       struct cpumask hmp_fast_cpu_mask;
+       struct hmp_domain *domain;
+
+       arch_get_fast_and_slow_cpus(&hmp_fast_cpu_mask, &hmp_slow_cpu_mask);
+
+       /*
+        * Initialize hmp_domains
+        * Must be ordered with respect to compute capacity.
+        * Fastest domain at head of list.
+        */
+       if(!cpumask_empty(&hmp_slow_cpu_mask)) {
+               domain = (struct hmp_domain *)
+                       kmalloc(sizeof(struct hmp_domain), GFP_KERNEL);
+               cpumask_copy(&domain->possible_cpus, &hmp_slow_cpu_mask);
+               cpumask_and(&domain->cpus, cpu_online_mask, &domain->possible_cpus);
+               list_add(&domain->hmp_domains, hmp_domains_list);
+       }
+       domain = (struct hmp_domain *)
+               kmalloc(sizeof(struct hmp_domain), GFP_KERNEL);
+       cpumask_copy(&domain->possible_cpus, &hmp_fast_cpu_mask);
+       cpumask_and(&domain->cpus, cpu_online_mask, &domain->possible_cpus);
+       list_add(&domain->hmp_domains, hmp_domains_list);
+}
+#endif /* CONFIG_SCHED_HMP */
+
+/*
+ * cluster_to_logical_mask - return cpu logical mask of CPUs in a cluster
+ * @socket_id:         cluster HW identifier
+ * @cluster_mask:      the cpumask location to be initialized, modified by the
+ *                     function only if return value == 0
+ *
+ * Return:
+ *
+ * 0 on success
+ * -EINVAL if cluster_mask is NULL or there is no record matching socket_id
+ */
+int cluster_to_logical_mask(unsigned int socket_id, cpumask_t *cluster_mask)
+{
+       int cpu;
+
+       if (!cluster_mask)
+               return -EINVAL;
+
+       for_each_online_cpu(cpu) {
+               if (socket_id == topology_physical_package_id(cpu)) {
+                       cpumask_copy(cluster_mask, topology_core_cpumask(cpu));
+                       return 0;
+               }
+       }
+
+       return -EINVAL;
+}
+
 void store_cpu_topology(unsigned int cpuid)
 {
        update_siblings_masks(cpuid);