Merge branch 'v3.10/topic/arm64-hmp' into linux-linaro-lsk-v3.10
[firefly-linux-kernel-4.4.55.git] / arch / arm64 / kernel / topology.c
1 /*
2  * arch/arm64/kernel/topology.c
3  *
4  * Copyright (C) 2011,2013,2014 Linaro Limited.
5  *
6  * Based on the arm32 version written by Vincent Guittot in turn based on
7  * arch/sh/kernel/topology.c
8  *
9  * This file is subject to the terms and conditions of the GNU General Public
10  * License.  See the file "COPYING" in the main directory of this archive
11  * for more details.
12  */
13
14 #include <linux/cpu.h>
15 #include <linux/cpumask.h>
16 #include <linux/export.h>
17 #include <linux/init.h>
18 #include <linux/percpu.h>
19 #include <linux/node.h>
20 #include <linux/nodemask.h>
21 #include <linux/of.h>
22 #include <linux/sched.h>
23 #include <linux/slab.h>
24
25 #include <asm/cputype.h>
26 #include <asm/topology.h>
27 #include <asm/smp_plat.h>
28
29
30 /*
31  * cpu power table
32  * This per cpu data structure describes the relative capacity of each core.
33  * On a heteregenous system, cores don't have the same computation capacity
34  * and we reflect that difference in the cpu_power field so the scheduler can
35  * take this difference into account during load balance. A per cpu structure
36  * is preferred because each CPU updates its own cpu_power field during the
37  * load balance except for idle cores. One idle core is selected to run the
38  * rebalance_domains for all idle cores and the cpu_power can be updated
39  * during this sequence.
40  */
41 static DEFINE_PER_CPU(unsigned long, cpu_scale);
42
43 unsigned long arch_scale_freq_power(struct sched_domain *sd, int cpu)
44 {
45         return per_cpu(cpu_scale, cpu);
46 }
47
48 static void set_power_scale(unsigned int cpu, unsigned long power)
49 {
50         per_cpu(cpu_scale, cpu) = power;
51 }
52
53 static int __init get_cpu_for_node(struct device_node *node)
54 {
55         struct device_node *cpu_node;
56         int cpu;
57
58         cpu_node = of_parse_phandle(node, "cpu", 0);
59         if (!cpu_node)
60                 return -1;
61
62         for_each_possible_cpu(cpu) {
63                 if (of_get_cpu_node(cpu, NULL) == cpu_node) {
64                         of_node_put(cpu_node);
65                         return cpu;
66                 }
67         }
68
69         pr_crit("Unable to find CPU node for %s\n", cpu_node->full_name);
70
71         of_node_put(cpu_node);
72         return -1;
73 }
74
75 static int __init parse_core(struct device_node *core, int cluster_id,
76                              int core_id)
77 {
78         char name[10];
79         bool leaf = true;
80         int i = 0;
81         int cpu;
82         struct device_node *t;
83
84         do {
85                 snprintf(name, sizeof(name), "thread%d", i);
86                 t = of_get_child_by_name(core, name);
87                 if (t) {
88                         leaf = false;
89                         cpu = get_cpu_for_node(t);
90                         if (cpu >= 0) {
91                                 cpu_topology[cpu].cluster_id = cluster_id;
92                                 cpu_topology[cpu].core_id = core_id;
93                                 cpu_topology[cpu].thread_id = i;
94                         } else {
95                                 pr_err("%s: Can't get CPU for thread\n",
96                                        t->full_name);
97                                 of_node_put(t);
98                                 return -EINVAL;
99                         }
100                         of_node_put(t);
101                 }
102                 i++;
103         } while (t);
104
105         cpu = get_cpu_for_node(core);
106         if (cpu >= 0) {
107                 if (!leaf) {
108                         pr_err("%s: Core has both threads and CPU\n",
109                                core->full_name);
110                         return -EINVAL;
111                 }
112
113                 cpu_topology[cpu].cluster_id = cluster_id;
114                 cpu_topology[cpu].core_id = core_id;
115         } else if (leaf) {
116                 pr_err("%s: Can't get CPU for leaf core\n", core->full_name);
117                 return -EINVAL;
118         }
119
120         return 0;
121 }
122
123 static int __init parse_cluster(struct device_node *cluster, int depth)
124 {
125         char name[10];
126         bool leaf = true;
127         bool has_cores = false;
128         struct device_node *c;
129         static int cluster_id __initdata;
130         int core_id = 0;
131         int i, ret;
132
133         /*
134          * First check for child clusters; we currently ignore any
135          * information about the nesting of clusters and present the
136          * scheduler with a flat list of them.
137          */
138         i = 0;
139         do {
140                 snprintf(name, sizeof(name), "cluster%d", i);
141                 c = of_get_child_by_name(cluster, name);
142                 if (c) {
143                         leaf = false;
144                         ret = parse_cluster(c, depth + 1);
145                         of_node_put(c);
146                         if (ret != 0)
147                                 return ret;
148                 }
149                 i++;
150         } while (c);
151
152         /* Now check for cores */
153         i = 0;
154         do {
155                 snprintf(name, sizeof(name), "core%d", i);
156                 c = of_get_child_by_name(cluster, name);
157                 if (c) {
158                         has_cores = true;
159
160                         if (depth == 0) {
161                                 pr_err("%s: cpu-map children should be clusters\n",
162                                        c->full_name);
163                                 of_node_put(c);
164                                 return -EINVAL;
165                         }
166
167                         if (leaf) {
168                                 ret = parse_core(c, cluster_id, core_id++);
169                         } else {
170                                 pr_err("%s: Non-leaf cluster with core %s\n",
171                                        cluster->full_name, name);
172                                 ret = -EINVAL;
173                         }
174
175                         of_node_put(c);
176                         if (ret != 0)
177                                 return ret;
178                 }
179                 i++;
180         } while (c);
181
182         if (leaf && !has_cores)
183                 pr_warn("%s: empty cluster\n", cluster->full_name);
184
185         if (leaf)
186                 cluster_id++;
187
188         return 0;
189 }
190
191 struct cpu_efficiency {
192         const char *compatible;
193         unsigned long efficiency;
194 };
195
196 /*
197  * Table of relative efficiency of each processors
198  * The efficiency value must fit in 20bit and the final
199  * cpu_scale value must be in the range
200  *   0 < cpu_scale < 3*SCHED_POWER_SCALE/2
201  * in order to return at most 1 when DIV_ROUND_CLOSEST
202  * is used to compute the capacity of a CPU.
203  * Processors that are not defined in the table,
204  * use the default SCHED_POWER_SCALE value for cpu_scale.
205  */
206 static const struct cpu_efficiency table_efficiency[] = {
207         { "arm,cortex-a57", 3891 },
208         { "arm,cortex-a53", 2048 },
209         { NULL, },
210 };
211
212 static unsigned long *__cpu_capacity;
213 #define cpu_capacity(cpu)       __cpu_capacity[cpu]
214
215 static unsigned long middle_capacity = 1;
216
217 /*
218  * Iterate all CPUs' descriptor in DT and compute the efficiency
219  * (as per table_efficiency). Also calculate a middle efficiency
220  * as close as possible to  (max{eff_i} - min{eff_i}) / 2
221  * This is later used to scale the cpu_power field such that an
222  * 'average' CPU is of middle power. Also see the comments near
223  * table_efficiency[] and update_cpu_power().
224  */
225 static int __init parse_dt_topology(void)
226 {
227         struct device_node *cn, *map;
228         int ret = 0;
229         int cpu;
230
231         cn = of_find_node_by_path("/cpus");
232         if (!cn) {
233                 pr_err("No CPU information found in DT\n");
234                 return 0;
235         }
236
237         /*
238          * When topology is provided cpu-map is essentially a root
239          * cluster with restricted subnodes.
240          */
241         map = of_get_child_by_name(cn, "cpu-map");
242         if (!map)
243                 goto out;
244
245         ret = parse_cluster(map, 0);
246         if (ret != 0)
247                 goto out_map;
248
249         /*
250          * Check that all cores are in the topology; the SMP code will
251          * only mark cores described in the DT as possible.
252          */
253         for_each_possible_cpu(cpu) {
254                 if (cpu_topology[cpu].cluster_id == -1) {
255                         pr_err("CPU%d: No topology information specified\n",
256                                cpu);
257                         ret = -EINVAL;
258                 }
259         }
260
261 out_map:
262         of_node_put(map);
263 out:
264         of_node_put(cn);
265         return ret;
266 }
267
268 static void __init parse_dt_cpu_power(void)
269 {
270         const struct cpu_efficiency *cpu_eff;
271         struct device_node *cn;
272         unsigned long min_capacity = ULONG_MAX;
273         unsigned long max_capacity = 0;
274         unsigned long capacity = 0;
275         int cpu;
276
277         __cpu_capacity = kcalloc(nr_cpu_ids, sizeof(*__cpu_capacity),
278                                  GFP_NOWAIT);
279
280         for_each_possible_cpu(cpu) {
281                 const u32 *rate;
282                 int len;
283
284                 /* Too early to use cpu->of_node */
285                 cn = of_get_cpu_node(cpu, NULL);
286                 if (!cn) {
287                         pr_err("Missing device node for CPU %d\n", cpu);
288                         continue;
289                 }
290
291                 for (cpu_eff = table_efficiency; cpu_eff->compatible; cpu_eff++)
292                         if (of_device_is_compatible(cn, cpu_eff->compatible))
293                                 break;
294
295                 if (cpu_eff->compatible == NULL) {
296                         pr_warn("%s: Unknown CPU type\n", cn->full_name);
297                         continue;
298                 }
299
300                 rate = of_get_property(cn, "clock-frequency", &len);
301                 if (!rate || len != 4) {
302                         pr_err("%s: Missing clock-frequency property\n",
303                                 cn->full_name);
304                         continue;
305                 }
306
307                 capacity = ((be32_to_cpup(rate)) >> 20) * cpu_eff->efficiency;
308
309                 /* Save min capacity of the system */
310                 if (capacity < min_capacity)
311                         min_capacity = capacity;
312
313                 /* Save max capacity of the system */
314                 if (capacity > max_capacity)
315                         max_capacity = capacity;
316
317                 cpu_capacity(cpu) = capacity;
318         }
319
320         /* compute a middle_capacity factor that will ensure that the capacity
321          * of an 'average' CPU of the system will be as close as possible to
322          * SCHED_POWER_SCALE, which is the default value, but with the
323          * constraint explained near table_efficiency[].
324          */
325         if (4 * max_capacity < (3 * (max_capacity + min_capacity)))
326                 middle_capacity = (min_capacity + max_capacity)
327                                 >> (SCHED_POWER_SHIFT+1);
328         else
329                 middle_capacity = ((max_capacity / 3)
330                                 >> (SCHED_POWER_SHIFT-1)) + 1;
331 }
332
333 /*
334  * Look for a customed capacity of a CPU in the cpu_topo_data table during the
335  * boot. The update of all CPUs is in O(n^2) for heteregeneous system but the
336  * function returns directly for SMP system.
337  */
338 static void update_cpu_power(unsigned int cpu)
339 {
340         if (!cpu_capacity(cpu))
341                 return;
342
343         set_power_scale(cpu, cpu_capacity(cpu) / middle_capacity);
344
345         pr_info("CPU%u: update cpu_power %lu\n",
346                 cpu, arch_scale_freq_power(NULL, cpu));
347 }
348
349 /*
350  * cpu topology table
351  */
352 struct cpu_topology cpu_topology[NR_CPUS];
353 EXPORT_SYMBOL_GPL(cpu_topology);
354
355 const struct cpumask *cpu_coregroup_mask(int cpu)
356 {
357         return &cpu_topology[cpu].core_sibling;
358 }
359
360 static void update_siblings_masks(unsigned int cpuid)
361 {
362         struct cpu_topology *cpu_topo, *cpuid_topo = &cpu_topology[cpuid];
363         int cpu;
364
365         if (cpuid_topo->cluster_id == -1) {
366                 /*
367                  * DT does not contain topology information for this cpu.
368                  */
369                 pr_debug("CPU%u: No topology information configured\n", cpuid);
370                 return;
371         }
372
373         /* update core and thread sibling masks */
374         for_each_possible_cpu(cpu) {
375                 cpu_topo = &cpu_topology[cpu];
376
377                 if (cpuid_topo->cluster_id != cpu_topo->cluster_id)
378                         continue;
379
380                 cpumask_set_cpu(cpuid, &cpu_topo->core_sibling);
381                 if (cpu != cpuid)
382                         cpumask_set_cpu(cpu, &cpuid_topo->core_sibling);
383
384                 if (cpuid_topo->core_id != cpu_topo->core_id)
385                         continue;
386
387                 cpumask_set_cpu(cpuid, &cpu_topo->thread_sibling);
388                 if (cpu != cpuid)
389                         cpumask_set_cpu(cpu, &cpuid_topo->thread_sibling);
390         }
391 }
392
393 #ifdef CONFIG_SCHED_HMP
394
395 /*
396  * Retrieve logical cpu index corresponding to a given MPIDR[23:0]
397  *  - mpidr: MPIDR[23:0] to be used for the look-up
398  *
399  * Returns the cpu logical index or -EINVAL on look-up error
400  */
401 static inline int get_logical_index(u32 mpidr)
402 {
403         int cpu;
404         for (cpu = 0; cpu < nr_cpu_ids; cpu++)
405                 if (cpu_logical_map(cpu) == mpidr)
406                         return cpu;
407         return -EINVAL;
408 }
409
410 static const char * const little_cores[] = {
411         "arm,cortex-a53",
412         NULL,
413 };
414
415 static bool is_little_cpu(struct device_node *cn)
416 {
417         const char * const *lc;
418         for (lc = little_cores; *lc; lc++)
419                 if (of_device_is_compatible(cn, *lc))
420                         return true;
421         return false;
422 }
423
424 void __init arch_get_fast_and_slow_cpus(struct cpumask *fast,
425                                         struct cpumask *slow)
426 {
427         struct device_node *cn = NULL;
428         int cpu;
429
430         cpumask_clear(fast);
431         cpumask_clear(slow);
432
433         /*
434          * Use the config options if they are given. This helps testing
435          * HMP scheduling on systems without a big.LITTLE architecture.
436          */
437         if (strlen(CONFIG_HMP_FAST_CPU_MASK) && strlen(CONFIG_HMP_SLOW_CPU_MASK)) {
438                 if (cpulist_parse(CONFIG_HMP_FAST_CPU_MASK, fast))
439                         WARN(1, "Failed to parse HMP fast cpu mask!\n");
440                 if (cpulist_parse(CONFIG_HMP_SLOW_CPU_MASK, slow))
441                         WARN(1, "Failed to parse HMP slow cpu mask!\n");
442                 return;
443         }
444
445         /*
446          * Else, parse device tree for little cores.
447          */
448         while ((cn = of_find_node_by_type(cn, "cpu"))) {
449
450                 const u32 *mpidr;
451                 int len;
452
453                 mpidr = of_get_property(cn, "reg", &len);
454                 if (!mpidr || len != 8) {
455                         pr_err("%s missing reg property\n", cn->full_name);
456                         continue;
457                 }
458
459                 cpu = get_logical_index(be32_to_cpup(mpidr+1));
460                 if (cpu == -EINVAL) {
461                         pr_err("couldn't get logical index for mpidr %x\n",
462                                                         be32_to_cpup(mpidr+1));
463                         break;
464                 }
465
466                 if (is_little_cpu(cn))
467                         cpumask_set_cpu(cpu, slow);
468                 else
469                         cpumask_set_cpu(cpu, fast);
470         }
471
472         if (!cpumask_empty(fast) && !cpumask_empty(slow))
473                 return;
474
475         /*
476          * We didn't find both big and little cores so let's call all cores
477          * fast as this will keep the system running, with all cores being
478          * treated equal.
479          */
480         cpumask_setall(fast);
481         cpumask_clear(slow);
482 }
483
484 struct cpumask hmp_slow_cpu_mask;
485
486 void __init arch_get_hmp_domains(struct list_head *hmp_domains_list)
487 {
488         struct cpumask hmp_fast_cpu_mask;
489         struct hmp_domain *domain;
490
491         arch_get_fast_and_slow_cpus(&hmp_fast_cpu_mask, &hmp_slow_cpu_mask);
492
493         /*
494          * Initialize hmp_domains
495          * Must be ordered with respect to compute capacity.
496          * Fastest domain at head of list.
497          */
498         if(!cpumask_empty(&hmp_slow_cpu_mask)) {
499                 domain = (struct hmp_domain *)
500                         kmalloc(sizeof(struct hmp_domain), GFP_KERNEL);
501                 cpumask_copy(&domain->possible_cpus, &hmp_slow_cpu_mask);
502                 cpumask_and(&domain->cpus, cpu_online_mask, &domain->possible_cpus);
503                 list_add(&domain->hmp_domains, hmp_domains_list);
504         }
505         domain = (struct hmp_domain *)
506                 kmalloc(sizeof(struct hmp_domain), GFP_KERNEL);
507         cpumask_copy(&domain->possible_cpus, &hmp_fast_cpu_mask);
508         cpumask_and(&domain->cpus, cpu_online_mask, &domain->possible_cpus);
509         list_add(&domain->hmp_domains, hmp_domains_list);
510 }
511 #endif /* CONFIG_SCHED_HMP */
512
513 /*
514  * cluster_to_logical_mask - return cpu logical mask of CPUs in a cluster
515  * @socket_id:          cluster HW identifier
516  * @cluster_mask:       the cpumask location to be initialized, modified by the
517  *                      function only if return value == 0
518  *
519  * Return:
520  *
521  * 0 on success
522  * -EINVAL if cluster_mask is NULL or there is no record matching socket_id
523  */
524 int cluster_to_logical_mask(unsigned int socket_id, cpumask_t *cluster_mask)
525 {
526         int cpu;
527
528         if (!cluster_mask)
529                 return -EINVAL;
530
531         for_each_online_cpu(cpu) {
532                 if (socket_id == topology_physical_package_id(cpu)) {
533                         cpumask_copy(cluster_mask, topology_core_cpumask(cpu));
534                         return 0;
535                 }
536         }
537
538         return -EINVAL;
539 }
540
541 void store_cpu_topology(unsigned int cpuid)
542 {
543         update_siblings_masks(cpuid);
544         update_cpu_power(cpuid);
545 }
546
547 static void __init reset_cpu_topology(void)
548 {
549         unsigned int cpu;
550
551         for_each_possible_cpu(cpu) {
552                 struct cpu_topology *cpu_topo = &cpu_topology[cpu];
553
554                 cpu_topo->thread_id = -1;
555                 cpu_topo->core_id = 0;
556                 cpu_topo->cluster_id = -1;
557
558                 cpumask_clear(&cpu_topo->core_sibling);
559                 cpumask_set_cpu(cpu, &cpu_topo->core_sibling);
560                 cpumask_clear(&cpu_topo->thread_sibling);
561                 cpumask_set_cpu(cpu, &cpu_topo->thread_sibling);
562         }
563 }
564
565 static void __init reset_cpu_power(void)
566 {
567         unsigned int cpu;
568
569         for_each_possible_cpu(cpu)
570                 set_power_scale(cpu, SCHED_POWER_SCALE);
571 }
572
573 void __init init_cpu_topology(void)
574 {
575         reset_cpu_topology();
576
577         /*
578          * Discard anything that was parsed if we hit an error so we
579          * don't use partial information.
580          */
581         if (parse_dt_topology())
582                 reset_cpu_topology();
583
584         reset_cpu_power();
585         parse_dt_cpu_power();
586 }