2 * arch/arm64/kernel/topology.c
4 * Copyright (C) 2011,2013 Linaro Limited.
5 * Written by: Vincent Guittot
7 * based on arch/sh/kernel/topology.c
9 * This file is subject to the terms and conditions of the GNU General Public
10 * License. See the file "COPYING" in the main directory of this archive
14 #include <linux/cpu.h>
15 #include <linux/cpumask.h>
16 #include <linux/export.h>
17 #include <linux/init.h>
18 #include <linux/percpu.h>
19 #include <linux/node.h>
20 #include <linux/nodemask.h>
22 #include <linux/sched.h>
23 #include <linux/slab.h>
25 #include <asm/cputype.h>
26 #include <asm/smp_plat.h>
27 #include <asm/topology.h>
30 * cpu power scale management
35 * This per cpu data structure describes the relative capacity of each core.
36 * On a heteregenous system, cores don't have the same computation capacity
37 * and we reflect that difference in the cpu_power field so the scheduler can
38 * take this difference into account during load balance. A per cpu structure
39 * is preferred because each CPU updates its own cpu_power field during the
40 * load balance except for idle cores. One idle core is selected to run the
41 * rebalance_domains for all idle cores and the cpu_power can be updated
42 * during this sequence.
44 static DEFINE_PER_CPU(unsigned long, cpu_scale);
46 unsigned long arch_scale_freq_power(struct sched_domain *sd, int cpu)
48 return per_cpu(cpu_scale, cpu);
51 static void set_power_scale(unsigned int cpu, unsigned long power)
53 per_cpu(cpu_scale, cpu) = power;
57 struct cpu_efficiency {
58 const char *compatible;
59 unsigned long efficiency;
63 * Table of relative efficiency of each processors
64 * The efficiency value must fit in 20bit and the final
65 * cpu_scale value must be in the range
66 * 0 < cpu_scale < 3*SCHED_POWER_SCALE/2
67 * in order to return at most 1 when DIV_ROUND_CLOSEST
68 * is used to compute the capacity of a CPU.
69 * Processors that are not defined in the table,
70 * use the default SCHED_POWER_SCALE value for cpu_scale.
72 static const struct cpu_efficiency table_efficiency[] = {
73 { "arm,cortex-a57", 3891 },
74 { "arm,cortex-a53", 2048 },
78 static unsigned long *__cpu_capacity;
79 #define cpu_capacity(cpu) __cpu_capacity[cpu]
81 static unsigned long middle_capacity = 1;
82 static int cluster_id;
84 static int __init get_cpu_for_node(struct device_node *node)
86 struct device_node *cpu_node;
89 cpu_node = of_parse_phandle(node, "cpu", 0);
91 pr_crit("%s: Unable to parse CPU phandle\n", node->full_name);
95 for_each_possible_cpu(cpu) {
96 if (of_get_cpu_node(cpu, NULL) == cpu_node)
100 pr_crit("Unable to find CPU node for %s\n", cpu_node->full_name);
104 static void __init parse_core(struct device_node *core, int core_id)
109 struct device_node *t;
113 snprintf(name, sizeof(name), "thread%d", i);
114 t = of_get_child_by_name(core, name);
117 cpu = get_cpu_for_node(t);
119 pr_info("CPU%d: socket %d core %d thread %d\n",
120 cpu, cluster_id, core_id, i);
121 cpu_topology[cpu].socket_id = cluster_id;
122 cpu_topology[cpu].core_id = core_id;
123 cpu_topology[cpu].thread_id = i;
125 pr_err("%s: Can't get CPU for thread\n",
132 cpu = get_cpu_for_node(core);
135 pr_err("%s: Core has both threads and CPU\n",
140 pr_info("CPU%d: socket %d core %d\n",
141 cpu, cluster_id, core_id);
142 cpu_topology[cpu].socket_id = cluster_id;
143 cpu_topology[cpu].core_id = core_id;
145 pr_err("%s: Can't get CPU for leaf core\n", core->full_name);
149 static void __init parse_cluster(struct device_node *cluster)
153 bool has_cores = false;
154 struct device_node *c;
159 * First check for child clusters; we currently ignore any
160 * information about the nesting of clusters and present the
161 * scheduler with a flat list of them.
165 snprintf(name, sizeof(name), "cluster%d", i);
166 c = of_get_child_by_name(cluster, name);
174 /* Now check for cores */
177 snprintf(name, sizeof(name), "core%d", i);
178 c = of_get_child_by_name(cluster, name);
183 parse_core(c, core_id++);
185 pr_err("%s: Non-leaf cluster with core %s\n",
186 cluster->full_name, name);
191 if (leaf && !has_cores)
192 pr_warn("%s: empty cluster\n", cluster->full_name);
199 * Iterate all CPUs' descriptor in DT and compute the efficiency
200 * (as per table_efficiency). Also calculate a middle efficiency
201 * as close as possible to (max{eff_i} - min{eff_i}) / 2
202 * This is later used to scale the cpu_power field such that an
203 * 'average' CPU is of middle power. Also see the comments near
204 * table_efficiency[] and update_cpu_power().
206 static void __init parse_dt_topology(void)
208 const struct cpu_efficiency *cpu_eff;
209 struct device_node *cn = NULL;
210 unsigned long min_capacity = (unsigned long)(-1);
211 unsigned long max_capacity = 0;
212 unsigned long capacity = 0;
215 alloc_size = nr_cpu_ids * sizeof(*__cpu_capacity);
216 __cpu_capacity = kzalloc(alloc_size, GFP_NOWAIT);
218 cn = of_find_node_by_path("/cpus");
220 pr_err("No CPU information found in DT\n");
225 * If topology is provided as a cpu-map it is essentially a
228 cn = of_find_node_by_name(cn, "cpu-map");
233 for_each_possible_cpu(cpu) {
237 /* Too early to use cpu->of_node */
238 cn = of_get_cpu_node(cpu, NULL);
240 pr_err("Missing device node for CPU %d\n", cpu);
244 /* check if the cpu is marked as "disabled", if so ignore */
245 if (!of_device_is_available(cn))
248 for (cpu_eff = table_efficiency; cpu_eff->compatible; cpu_eff++)
249 if (of_device_is_compatible(cn, cpu_eff->compatible))
252 if (cpu_eff->compatible == NULL) {
253 pr_warn("%s: Unknown CPU type\n", cn->full_name);
257 rate = of_get_property(cn, "clock-frequency", &len);
258 if (!rate || len != 4) {
259 pr_err("%s: Missing clock-frequency property\n",
264 capacity = ((be32_to_cpup(rate)) >> 20) * cpu_eff->efficiency;
266 /* Save min capacity of the system */
267 if (capacity < min_capacity)
268 min_capacity = capacity;
270 /* Save max capacity of the system */
271 if (capacity > max_capacity)
272 max_capacity = capacity;
274 cpu_capacity(cpu) = capacity;
277 /* If min and max capacities are equal we bypass the update of the
278 * cpu_scale because all CPUs have the same capacity. Otherwise, we
279 * compute a middle_capacity factor that will ensure that the capacity
280 * of an 'average' CPU of the system will be as close as possible to
281 * SCHED_POWER_SCALE, which is the default value, but with the
282 * constraint explained near table_efficiency[].
284 if (min_capacity == max_capacity)
286 else if (4 * max_capacity < (3 * (max_capacity + min_capacity)))
287 middle_capacity = (min_capacity + max_capacity)
288 >> (SCHED_POWER_SHIFT+1);
290 middle_capacity = ((max_capacity / 3)
291 >> (SCHED_POWER_SHIFT-1)) + 1;
296 * Look for a customed capacity of a CPU in the cpu_topo_data table during the
297 * boot. The update of all CPUs is in O(n^2) for heteregeneous system but the
298 * function returns directly for SMP system.
300 static void update_cpu_power(unsigned int cpu)
302 if (!cpu_capacity(cpu))
305 set_power_scale(cpu, cpu_capacity(cpu) / middle_capacity);
307 pr_info("CPU%u: update cpu_power %lu\n",
308 cpu, arch_scale_freq_power(NULL, cpu));
312 static inline void parse_dt_topology(void) {}
313 static inline void update_cpu_power(unsigned int cpuid) {}
319 struct cputopo_arm cpu_topology[NR_CPUS];
320 EXPORT_SYMBOL_GPL(cpu_topology);
322 const struct cpumask *cpu_coregroup_mask(int cpu)
324 return &cpu_topology[cpu].core_sibling;
327 static void update_siblings_masks(unsigned int cpuid)
329 struct cputopo_arm *cpu_topo, *cpuid_topo = &cpu_topology[cpuid];
332 /* update core and thread sibling masks */
333 for_each_possible_cpu(cpu) {
334 cpu_topo = &cpu_topology[cpu];
336 if (cpuid_topo->socket_id != cpu_topo->socket_id)
339 cpumask_set_cpu(cpuid, &cpu_topo->core_sibling);
341 cpumask_set_cpu(cpu, &cpuid_topo->core_sibling);
343 if (cpuid_topo->core_id != cpu_topo->core_id)
346 cpumask_set_cpu(cpuid, &cpu_topo->thread_sibling);
348 cpumask_set_cpu(cpu, &cpuid_topo->thread_sibling);
353 void store_cpu_topology(unsigned int cpuid)
355 struct cputopo_arm *cpuid_topo = &cpu_topology[cpuid];
357 /* DT should have been parsed by the time we get here */
358 if (cpuid_topo->core_id == -1)
359 pr_info("CPU%u: No topology information configured\n", cpuid);
361 update_siblings_masks(cpuid);
363 update_cpu_power(cpuid);
366 #ifdef CONFIG_SCHED_HMP
369 * Retrieve logical cpu index corresponding to a given MPIDR[23:0]
370 * - mpidr: MPIDR[23:0] to be used for the look-up
372 * Returns the cpu logical index or -EINVAL on look-up error
374 static inline int get_logical_index(u32 mpidr)
377 for (cpu = 0; cpu < nr_cpu_ids; cpu++)
378 if (cpu_logical_map(cpu) == mpidr)
383 static const char * const little_cores[] = {
388 static bool is_little_cpu(struct device_node *cn)
390 const char * const *lc;
391 for (lc = little_cores; *lc; lc++)
392 if (of_device_is_compatible(cn, *lc))
397 void __init arch_get_fast_and_slow_cpus(struct cpumask *fast,
398 struct cpumask *slow)
400 struct device_node *cn = NULL;
407 * Use the config options if they are given. This helps testing
408 * HMP scheduling on systems without a big.LITTLE architecture.
410 if (strlen(CONFIG_HMP_FAST_CPU_MASK) && strlen(CONFIG_HMP_SLOW_CPU_MASK)) {
411 if (cpulist_parse(CONFIG_HMP_FAST_CPU_MASK, fast))
412 WARN(1, "Failed to parse HMP fast cpu mask!\n");
413 if (cpulist_parse(CONFIG_HMP_SLOW_CPU_MASK, slow))
414 WARN(1, "Failed to parse HMP slow cpu mask!\n");
419 * Else, parse device tree for little cores.
421 while ((cn = of_find_node_by_type(cn, "cpu"))) {
426 mpidr = of_get_property(cn, "reg", &len);
427 if (!mpidr || len != 8) {
428 pr_err("%s missing reg property\n", cn->full_name);
432 cpu = get_logical_index(be32_to_cpup(mpidr+1));
433 if (cpu == -EINVAL) {
434 pr_err("couldn't get logical index for mpidr %x\n",
435 be32_to_cpup(mpidr+1));
439 if (is_little_cpu(cn))
440 cpumask_set_cpu(cpu, slow);
442 cpumask_set_cpu(cpu, fast);
445 if (!cpumask_empty(fast) && !cpumask_empty(slow))
449 * We didn't find both big and little cores so let's call all cores
450 * fast as this will keep the system running, with all cores being
453 cpumask_setall(fast);
457 struct cpumask hmp_slow_cpu_mask;
459 void __init arch_get_hmp_domains(struct list_head *hmp_domains_list)
461 struct cpumask hmp_fast_cpu_mask;
462 struct hmp_domain *domain;
464 arch_get_fast_and_slow_cpus(&hmp_fast_cpu_mask, &hmp_slow_cpu_mask);
467 * Initialize hmp_domains
468 * Must be ordered with respect to compute capacity.
469 * Fastest domain at head of list.
471 if(!cpumask_empty(&hmp_slow_cpu_mask)) {
472 domain = (struct hmp_domain *)
473 kmalloc(sizeof(struct hmp_domain), GFP_KERNEL);
474 cpumask_copy(&domain->possible_cpus, &hmp_slow_cpu_mask);
475 cpumask_and(&domain->cpus, cpu_online_mask, &domain->possible_cpus);
476 list_add(&domain->hmp_domains, hmp_domains_list);
478 domain = (struct hmp_domain *)
479 kmalloc(sizeof(struct hmp_domain), GFP_KERNEL);
480 cpumask_copy(&domain->possible_cpus, &hmp_fast_cpu_mask);
481 cpumask_and(&domain->cpus, cpu_online_mask, &domain->possible_cpus);
482 list_add(&domain->hmp_domains, hmp_domains_list);
484 #endif /* CONFIG_SCHED_HMP */
487 * cluster_to_logical_mask - return cpu logical mask of CPUs in a cluster
488 * @socket_id: cluster HW identifier
489 * @cluster_mask: the cpumask location to be initialized, modified by the
490 * function only if return value == 0
495 * -EINVAL if cluster_mask is NULL or there is no record matching socket_id
497 int cluster_to_logical_mask(unsigned int socket_id, cpumask_t *cluster_mask)
504 for_each_online_cpu(cpu) {
505 if (socket_id == topology_physical_package_id(cpu)) {
506 cpumask_copy(cluster_mask, topology_core_cpumask(cpu));
515 * init_cpu_topology is called at boot when only one cpu is running
516 * which prevent simultaneous write access to cpu_topology array
518 void __init init_cpu_topology(void)
522 /* init core mask and power*/
523 for_each_possible_cpu(cpu) {
524 struct cputopo_arm *cpu_topo = &(cpu_topology[cpu]);
526 cpu_topo->thread_id = -1;
527 cpu_topo->core_id = -1;
528 cpu_topo->socket_id = -1;
529 cpumask_clear(&cpu_topo->core_sibling);
530 cpumask_clear(&cpu_topo->thread_sibling);
532 set_power_scale(cpu, SCHED_POWER_SCALE);