Merge remote-tracking branch 'lsk/v3.10/topic/gator' into linux-linaro-lsk
[firefly-linux-kernel-4.4.55.git] / arch / arm64 / kernel / topology.c
1 /*
2  * arch/arm64/kernel/topology.c
3  *
4  * Copyright (C) 2011,2013,2014 Linaro Limited.
5  *
6  * Based on the arm32 version written by Vincent Guittot in turn based on
7  * arch/sh/kernel/topology.c
8  *
9  * This file is subject to the terms and conditions of the GNU General Public
10  * License.  See the file "COPYING" in the main directory of this archive
11  * for more details.
12  */
13
14 #include <linux/cpu.h>
15 #include <linux/cpumask.h>
16 #include <linux/export.h>
17 #include <linux/init.h>
18 #include <linux/percpu.h>
19 #include <linux/node.h>
20 #include <linux/nodemask.h>
21 #include <linux/of.h>
22 #include <linux/sched.h>
23 #include <linux/slab.h>
24
25 #include <asm/cputype.h>
26 #include <asm/topology.h>
27 #include <asm/smp_plat.h>
28
29
30 /*
31  * cpu power table
32  * This per cpu data structure describes the relative capacity of each core.
33  * On a heteregenous system, cores don't have the same computation capacity
34  * and we reflect that difference in the cpu_power field so the scheduler can
35  * take this difference into account during load balance. A per cpu structure
36  * is preferred because each CPU updates its own cpu_power field during the
37  * load balance except for idle cores. One idle core is selected to run the
38  * rebalance_domains for all idle cores and the cpu_power can be updated
39  * during this sequence.
40  */
41 static DEFINE_PER_CPU(unsigned long, cpu_scale);
42
43 unsigned long arch_scale_freq_power(struct sched_domain *sd, int cpu)
44 {
45         return per_cpu(cpu_scale, cpu);
46 }
47
48 static void set_power_scale(unsigned int cpu, unsigned long power)
49 {
50         per_cpu(cpu_scale, cpu) = power;
51 }
52
53 static int __init get_cpu_for_node(struct device_node *node)
54 {
55         struct device_node *cpu_node;
56         int cpu;
57
58         cpu_node = of_parse_phandle(node, "cpu", 0);
59         if (!cpu_node)
60                 return -1;
61
62         for_each_possible_cpu(cpu) {
63                 if (of_get_cpu_node(cpu, NULL) == cpu_node) {
64                         of_node_put(cpu_node);
65                         return cpu;
66                 }
67         }
68
69         pr_crit("Unable to find CPU node for %s\n", cpu_node->full_name);
70
71         of_node_put(cpu_node);
72         return -1;
73 }
74
75 static int __init parse_core(struct device_node *core, int cluster_id,
76                              int core_id)
77 {
78         char name[10];
79         bool leaf = true;
80         int i = 0;
81         int cpu;
82         struct device_node *t;
83
84         do {
85                 snprintf(name, sizeof(name), "thread%d", i);
86                 t = of_get_child_by_name(core, name);
87                 if (t) {
88                         leaf = false;
89                         cpu = get_cpu_for_node(t);
90                         if (cpu >= 0) {
91                                 cpu_topology[cpu].cluster_id = cluster_id;
92                                 cpu_topology[cpu].core_id = core_id;
93                                 cpu_topology[cpu].thread_id = i;
94                         } else {
95                                 pr_err("%s: Can't get CPU for thread\n",
96                                        t->full_name);
97                                 of_node_put(t);
98                                 return -EINVAL;
99                         }
100                         of_node_put(t);
101                 }
102                 i++;
103         } while (t);
104
105         cpu = get_cpu_for_node(core);
106         if (cpu >= 0) {
107                 if (!leaf) {
108                         pr_err("%s: Core has both threads and CPU\n",
109                                core->full_name);
110                         return -EINVAL;
111                 }
112
113                 cpu_topology[cpu].cluster_id = cluster_id;
114                 cpu_topology[cpu].core_id = core_id;
115         } else if (leaf) {
116                 pr_err("%s: Can't get CPU for leaf core\n", core->full_name);
117                 return -EINVAL;
118         }
119
120         return 0;
121 }
122
123 static int __init parse_cluster(struct device_node *cluster, int depth)
124 {
125         char name[10];
126         bool leaf = true;
127         bool has_cores = false;
128         struct device_node *c;
129         static int cluster_id __initdata;
130         int core_id = 0;
131         int i, ret;
132
133         /*
134          * First check for child clusters; we currently ignore any
135          * information about the nesting of clusters and present the
136          * scheduler with a flat list of them.
137          */
138         i = 0;
139         do {
140                 snprintf(name, sizeof(name), "cluster%d", i);
141                 c = of_get_child_by_name(cluster, name);
142                 if (c) {
143                         leaf = false;
144                         ret = parse_cluster(c, depth + 1);
145                         of_node_put(c);
146                         if (ret != 0)
147                                 return ret;
148                 }
149                 i++;
150         } while (c);
151
152         /* Now check for cores */
153         i = 0;
154         do {
155                 snprintf(name, sizeof(name), "core%d", i);
156                 c = of_get_child_by_name(cluster, name);
157                 if (c) {
158                         has_cores = true;
159
160                         if (depth == 0) {
161                                 pr_err("%s: cpu-map children should be clusters\n",
162                                        c->full_name);
163                                 of_node_put(c);
164                                 return -EINVAL;
165                         }
166
167                         if (leaf) {
168                                 ret = parse_core(c, cluster_id, core_id++);
169                         } else {
170                                 pr_err("%s: Non-leaf cluster with core %s\n",
171                                        cluster->full_name, name);
172                                 ret = -EINVAL;
173                         }
174
175                         of_node_put(c);
176                         if (ret != 0)
177                                 return ret;
178                 }
179                 i++;
180         } while (c);
181
182         if (leaf && !has_cores)
183                 pr_warn("%s: empty cluster\n", cluster->full_name);
184
185         if (leaf)
186                 cluster_id++;
187
188         return 0;
189 }
190
191 struct cpu_efficiency {
192         const char *compatible;
193         unsigned long efficiency;
194 };
195
196 /*
197  * Table of relative efficiency of each processors
198  * The efficiency value must fit in 20bit and the final
199  * cpu_scale value must be in the range
200  *   0 < cpu_scale < 3*SCHED_POWER_SCALE/2
201  * in order to return at most 1 when DIV_ROUND_CLOSEST
202  * is used to compute the capacity of a CPU.
203  * Processors that are not defined in the table,
204  * use the default SCHED_POWER_SCALE value for cpu_scale.
205  */
206 static const struct cpu_efficiency table_efficiency[] = {
207         { "arm,cortex-a57", 3891 },
208         { "arm,cortex-a53", 2048 },
209         { NULL, },
210 };
211
212 static unsigned long *__cpu_capacity;
213 #define cpu_capacity(cpu)       __cpu_capacity[cpu]
214
215 static unsigned long middle_capacity = 1;
216
217 /*
218  * Iterate all CPUs' descriptor in DT and compute the efficiency
219  * (as per table_efficiency). Also calculate a middle efficiency
220  * as close as possible to  (max{eff_i} - min{eff_i}) / 2
221  * This is later used to scale the cpu_power field such that an
222  * 'average' CPU is of middle power. Also see the comments near
223  * table_efficiency[] and update_cpu_power().
224  */
225 static int __init parse_dt_topology(void)
226 {
227         struct device_node *cn, *map;
228         int ret = 0;
229         int cpu;
230
231         cn = of_find_node_by_path("/cpus");
232         if (!cn) {
233                 pr_err("No CPU information found in DT\n");
234                 return 0;
235         }
236
237         /*
238          * When topology is provided cpu-map is essentially a root
239          * cluster with restricted subnodes.
240          */
241         map = of_get_child_by_name(cn, "cpu-map");
242         if (!map)
243                 goto out;
244
245         ret = parse_cluster(map, 0);
246         if (ret != 0)
247                 goto out_map;
248
249         /*
250          * Check that all cores are in the topology; the SMP code will
251          * only mark cores described in the DT as possible.
252          */
253         for_each_possible_cpu(cpu) {
254                 if (cpu_topology[cpu].cluster_id == -1) {
255                         pr_err("CPU%d: No topology information specified\n",
256                                cpu);
257                         ret = -EINVAL;
258                 }
259         }
260
261 out_map:
262         of_node_put(map);
263 out:
264         of_node_put(cn);
265         return ret;
266 }
267
268 static void __init parse_dt_cpu_power(void)
269 {
270         const struct cpu_efficiency *cpu_eff;
271         struct device_node *cn;
272         unsigned long min_capacity = ULONG_MAX;
273         unsigned long max_capacity = 0;
274         unsigned long capacity = 0;
275         int cpu;
276
277         __cpu_capacity = kcalloc(nr_cpu_ids, sizeof(*__cpu_capacity),
278                                  GFP_NOWAIT);
279
280         for_each_possible_cpu(cpu) {
281                 const u32 *rate;
282                 int len;
283
284                 /* Too early to use cpu->of_node */
285                 cn = of_get_cpu_node(cpu, NULL);
286                 if (!cn) {
287                         pr_err("Missing device node for CPU %d\n", cpu);
288                         continue;
289                 }
290
291                 for (cpu_eff = table_efficiency; cpu_eff->compatible; cpu_eff++)
292                         if (of_device_is_compatible(cn, cpu_eff->compatible))
293                                 break;
294
295                 if (cpu_eff->compatible == NULL) {
296                         pr_warn("%s: Unknown CPU type\n", cn->full_name);
297                         continue;
298                 }
299
300                 rate = of_get_property(cn, "clock-frequency", &len);
301                 if (!rate || len != 4) {
302                         pr_err("%s: Missing clock-frequency property\n",
303                                 cn->full_name);
304                         continue;
305                 }
306
307                 capacity = ((be32_to_cpup(rate)) >> 20) * cpu_eff->efficiency;
308
309                 /* Save min capacity of the system */
310                 if (capacity < min_capacity)
311                         min_capacity = capacity;
312
313                 /* Save max capacity of the system */
314                 if (capacity > max_capacity)
315                         max_capacity = capacity;
316
317                 cpu_capacity(cpu) = capacity;
318         }
319
320         /* If min and max capacities are equal we bypass the update of the
321          * cpu_scale because all CPUs have the same capacity. Otherwise, we
322          * compute a middle_capacity factor that will ensure that the capacity
323          * of an 'average' CPU of the system will be as close as possible to
324          * SCHED_POWER_SCALE, which is the default value, but with the
325          * constraint explained near table_efficiency[].
326          */
327         if (min_capacity == max_capacity)
328                 return;
329         else if (4 * max_capacity < (3 * (max_capacity + min_capacity)))
330                 middle_capacity = (min_capacity + max_capacity)
331                                 >> (SCHED_POWER_SHIFT+1);
332         else
333                 middle_capacity = ((max_capacity / 3)
334                                 >> (SCHED_POWER_SHIFT-1)) + 1;
335 }
336
337 /*
338  * Look for a customed capacity of a CPU in the cpu_topo_data table during the
339  * boot. The update of all CPUs is in O(n^2) for heteregeneous system but the
340  * function returns directly for SMP system.
341  */
342 static void update_cpu_power(unsigned int cpu)
343 {
344         if (!cpu_capacity(cpu))
345                 return;
346
347         set_power_scale(cpu, cpu_capacity(cpu) / middle_capacity);
348
349         pr_info("CPU%u: update cpu_power %lu\n",
350                 cpu, arch_scale_freq_power(NULL, cpu));
351 }
352
353 /*
354  * cpu topology table
355  */
356 struct cpu_topology cpu_topology[NR_CPUS];
357 EXPORT_SYMBOL_GPL(cpu_topology);
358
359 const struct cpumask *cpu_coregroup_mask(int cpu)
360 {
361         return &cpu_topology[cpu].core_sibling;
362 }
363
364 static void update_siblings_masks(unsigned int cpuid)
365 {
366         struct cpu_topology *cpu_topo, *cpuid_topo = &cpu_topology[cpuid];
367         int cpu;
368
369         if (cpuid_topo->cluster_id == -1) {
370                 /*
371                  * DT does not contain topology information for this cpu.
372                  */
373                 pr_debug("CPU%u: No topology information configured\n", cpuid);
374                 return;
375         }
376
377         /* update core and thread sibling masks */
378         for_each_possible_cpu(cpu) {
379                 cpu_topo = &cpu_topology[cpu];
380
381                 if (cpuid_topo->cluster_id != cpu_topo->cluster_id)
382                         continue;
383
384                 cpumask_set_cpu(cpuid, &cpu_topo->core_sibling);
385                 if (cpu != cpuid)
386                         cpumask_set_cpu(cpu, &cpuid_topo->core_sibling);
387
388                 if (cpuid_topo->core_id != cpu_topo->core_id)
389                         continue;
390
391                 cpumask_set_cpu(cpuid, &cpu_topo->thread_sibling);
392                 if (cpu != cpuid)
393                         cpumask_set_cpu(cpu, &cpuid_topo->thread_sibling);
394         }
395 }
396
397 #ifdef CONFIG_SCHED_HMP
398
399 /*
400  * Retrieve logical cpu index corresponding to a given MPIDR[23:0]
401  *  - mpidr: MPIDR[23:0] to be used for the look-up
402  *
403  * Returns the cpu logical index or -EINVAL on look-up error
404  */
405 static inline int get_logical_index(u32 mpidr)
406 {
407         int cpu;
408         for (cpu = 0; cpu < nr_cpu_ids; cpu++)
409                 if (cpu_logical_map(cpu) == mpidr)
410                         return cpu;
411         return -EINVAL;
412 }
413
414 static const char * const little_cores[] = {
415         "arm,cortex-a53",
416         NULL,
417 };
418
419 static bool is_little_cpu(struct device_node *cn)
420 {
421         const char * const *lc;
422         for (lc = little_cores; *lc; lc++)
423                 if (of_device_is_compatible(cn, *lc))
424                         return true;
425         return false;
426 }
427
428 void __init arch_get_fast_and_slow_cpus(struct cpumask *fast,
429                                         struct cpumask *slow)
430 {
431         struct device_node *cn = NULL;
432         int cpu;
433
434         cpumask_clear(fast);
435         cpumask_clear(slow);
436
437         /*
438          * Use the config options if they are given. This helps testing
439          * HMP scheduling on systems without a big.LITTLE architecture.
440          */
441         if (strlen(CONFIG_HMP_FAST_CPU_MASK) && strlen(CONFIG_HMP_SLOW_CPU_MASK)) {
442                 if (cpulist_parse(CONFIG_HMP_FAST_CPU_MASK, fast))
443                         WARN(1, "Failed to parse HMP fast cpu mask!\n");
444                 if (cpulist_parse(CONFIG_HMP_SLOW_CPU_MASK, slow))
445                         WARN(1, "Failed to parse HMP slow cpu mask!\n");
446                 return;
447         }
448
449         /*
450          * Else, parse device tree for little cores.
451          */
452         while ((cn = of_find_node_by_type(cn, "cpu"))) {
453
454                 const u32 *mpidr;
455                 int len;
456
457                 mpidr = of_get_property(cn, "reg", &len);
458                 if (!mpidr || len != 8) {
459                         pr_err("%s missing reg property\n", cn->full_name);
460                         continue;
461                 }
462
463                 cpu = get_logical_index(be32_to_cpup(mpidr+1));
464                 if (cpu == -EINVAL) {
465                         pr_err("couldn't get logical index for mpidr %x\n",
466                                                         be32_to_cpup(mpidr+1));
467                         break;
468                 }
469
470                 if (is_little_cpu(cn))
471                         cpumask_set_cpu(cpu, slow);
472                 else
473                         cpumask_set_cpu(cpu, fast);
474         }
475
476         if (!cpumask_empty(fast) && !cpumask_empty(slow))
477                 return;
478
479         /*
480          * We didn't find both big and little cores so let's call all cores
481          * fast as this will keep the system running, with all cores being
482          * treated equal.
483          */
484         cpumask_setall(fast);
485         cpumask_clear(slow);
486 }
487
488 struct cpumask hmp_slow_cpu_mask;
489
490 void __init arch_get_hmp_domains(struct list_head *hmp_domains_list)
491 {
492         struct cpumask hmp_fast_cpu_mask;
493         struct hmp_domain *domain;
494
495         arch_get_fast_and_slow_cpus(&hmp_fast_cpu_mask, &hmp_slow_cpu_mask);
496
497         /*
498          * Initialize hmp_domains
499          * Must be ordered with respect to compute capacity.
500          * Fastest domain at head of list.
501          */
502         if(!cpumask_empty(&hmp_slow_cpu_mask)) {
503                 domain = (struct hmp_domain *)
504                         kmalloc(sizeof(struct hmp_domain), GFP_KERNEL);
505                 cpumask_copy(&domain->possible_cpus, &hmp_slow_cpu_mask);
506                 cpumask_and(&domain->cpus, cpu_online_mask, &domain->possible_cpus);
507                 list_add(&domain->hmp_domains, hmp_domains_list);
508         }
509         domain = (struct hmp_domain *)
510                 kmalloc(sizeof(struct hmp_domain), GFP_KERNEL);
511         cpumask_copy(&domain->possible_cpus, &hmp_fast_cpu_mask);
512         cpumask_and(&domain->cpus, cpu_online_mask, &domain->possible_cpus);
513         list_add(&domain->hmp_domains, hmp_domains_list);
514 }
515 #endif /* CONFIG_SCHED_HMP */
516
517 /*
518  * cluster_to_logical_mask - return cpu logical mask of CPUs in a cluster
519  * @socket_id:          cluster HW identifier
520  * @cluster_mask:       the cpumask location to be initialized, modified by the
521  *                      function only if return value == 0
522  *
523  * Return:
524  *
525  * 0 on success
526  * -EINVAL if cluster_mask is NULL or there is no record matching socket_id
527  */
528 int cluster_to_logical_mask(unsigned int socket_id, cpumask_t *cluster_mask)
529 {
530         int cpu;
531
532         if (!cluster_mask)
533                 return -EINVAL;
534
535         for_each_online_cpu(cpu) {
536                 if (socket_id == topology_physical_package_id(cpu)) {
537                         cpumask_copy(cluster_mask, topology_core_cpumask(cpu));
538                         return 0;
539                 }
540         }
541
542         return -EINVAL;
543 }
544
545 void store_cpu_topology(unsigned int cpuid)
546 {
547         update_siblings_masks(cpuid);
548         update_cpu_power(cpuid);
549 }
550
551 static void __init reset_cpu_topology(void)
552 {
553         unsigned int cpu;
554
555         for_each_possible_cpu(cpu) {
556                 struct cpu_topology *cpu_topo = &cpu_topology[cpu];
557
558                 cpu_topo->thread_id = -1;
559                 cpu_topo->core_id = 0;
560                 cpu_topo->cluster_id = -1;
561
562                 cpumask_clear(&cpu_topo->core_sibling);
563                 cpumask_set_cpu(cpu, &cpu_topo->core_sibling);
564                 cpumask_clear(&cpu_topo->thread_sibling);
565                 cpumask_set_cpu(cpu, &cpu_topo->thread_sibling);
566         }
567 }
568
569 static void __init reset_cpu_power(void)
570 {
571         unsigned int cpu;
572
573         for_each_possible_cpu(cpu)
574                 set_power_scale(cpu, SCHED_POWER_SCALE);
575 }
576
577 void __init init_cpu_topology(void)
578 {
579         reset_cpu_topology();
580
581         /*
582          * Discard anything that was parsed if we hit an error so we
583          * don't use partial information.
584          */
585         if (parse_dt_topology())
586                 reset_cpu_topology();
587
588         reset_cpu_power();
589         parse_dt_cpu_power();
590 }