arm64: dts: rockchip: amend usb-otg related nodes for rk3368-tb
[firefly-linux-kernel-4.4.55.git] / arch / arm64 / kernel / topology.c
1 /*
2  * arch/arm64/kernel/topology.c
3  *
4  * Copyright (C) 2011,2013,2014 Linaro Limited.
5  *
6  * Based on the arm32 version written by Vincent Guittot in turn based on
7  * arch/sh/kernel/topology.c
8  *
9  * This file is subject to the terms and conditions of the GNU General Public
10  * License.  See the file "COPYING" in the main directory of this archive
11  * for more details.
12  */
13
14 #include <linux/cpu.h>
15 #include <linux/cpumask.h>
16 #include <linux/init.h>
17 #include <linux/percpu.h>
18 #include <linux/node.h>
19 #include <linux/nodemask.h>
20 #include <linux/of.h>
21 #include <linux/sched.h>
22 #include <linux/sched.h>
23 #include <linux/sched_energy.h>
24
25 #include <asm/cputype.h>
26 #include <asm/topology.h>
27
28 static DEFINE_PER_CPU(unsigned long, cpu_scale) = SCHED_CAPACITY_SCALE;
29
30 unsigned long scale_cpu_capacity(struct sched_domain *sd, int cpu)
31 {
32 #ifdef CONFIG_CPU_FREQ
33         unsigned long max_freq_scale = cpufreq_scale_max_freq_capacity(cpu);
34
35         return per_cpu(cpu_scale, cpu) * max_freq_scale >> SCHED_CAPACITY_SHIFT;
36 #else
37         return per_cpu(cpu_scale, cpu);
38 #endif
39 }
40
41 static void set_capacity_scale(unsigned int cpu, unsigned long capacity)
42 {
43         per_cpu(cpu_scale, cpu) = capacity;
44 }
45
46 static int __init get_cpu_for_node(struct device_node *node)
47 {
48         struct device_node *cpu_node;
49         int cpu;
50
51         cpu_node = of_parse_phandle(node, "cpu", 0);
52         if (!cpu_node)
53                 return -1;
54
55         for_each_possible_cpu(cpu) {
56                 if (of_get_cpu_node(cpu, NULL) == cpu_node) {
57                         of_node_put(cpu_node);
58                         return cpu;
59                 }
60         }
61
62         pr_crit("Unable to find CPU node for %s\n", cpu_node->full_name);
63
64         of_node_put(cpu_node);
65         return -1;
66 }
67
68 static int __init parse_core(struct device_node *core, int cluster_id,
69                              int core_id)
70 {
71         char name[10];
72         bool leaf = true;
73         int i = 0;
74         int cpu;
75         struct device_node *t;
76
77         do {
78                 snprintf(name, sizeof(name), "thread%d", i);
79                 t = of_get_child_by_name(core, name);
80                 if (t) {
81                         leaf = false;
82                         cpu = get_cpu_for_node(t);
83                         if (cpu >= 0) {
84                                 cpu_topology[cpu].cluster_id = cluster_id;
85                                 cpu_topology[cpu].core_id = core_id;
86                                 cpu_topology[cpu].thread_id = i;
87                         } else {
88                                 pr_err("%s: Can't get CPU for thread\n",
89                                        t->full_name);
90                                 of_node_put(t);
91                                 return -EINVAL;
92                         }
93                         of_node_put(t);
94                 }
95                 i++;
96         } while (t);
97
98         cpu = get_cpu_for_node(core);
99         if (cpu >= 0) {
100                 if (!leaf) {
101                         pr_err("%s: Core has both threads and CPU\n",
102                                core->full_name);
103                         return -EINVAL;
104                 }
105
106                 cpu_topology[cpu].cluster_id = cluster_id;
107                 cpu_topology[cpu].core_id = core_id;
108         } else if (leaf) {
109                 pr_err("%s: Can't get CPU for leaf core\n", core->full_name);
110                 return -EINVAL;
111         }
112
113         return 0;
114 }
115
116 static int __init parse_cluster(struct device_node *cluster, int depth)
117 {
118         char name[10];
119         bool leaf = true;
120         bool has_cores = false;
121         struct device_node *c;
122         static int cluster_id __initdata;
123         int core_id = 0;
124         int i, ret;
125
126         /*
127          * First check for child clusters; we currently ignore any
128          * information about the nesting of clusters and present the
129          * scheduler with a flat list of them.
130          */
131         i = 0;
132         do {
133                 snprintf(name, sizeof(name), "cluster%d", i);
134                 c = of_get_child_by_name(cluster, name);
135                 if (c) {
136                         leaf = false;
137                         ret = parse_cluster(c, depth + 1);
138                         of_node_put(c);
139                         if (ret != 0)
140                                 return ret;
141                 }
142                 i++;
143         } while (c);
144
145         /* Now check for cores */
146         i = 0;
147         do {
148                 snprintf(name, sizeof(name), "core%d", i);
149                 c = of_get_child_by_name(cluster, name);
150                 if (c) {
151                         has_cores = true;
152
153                         if (depth == 0) {
154                                 pr_err("%s: cpu-map children should be clusters\n",
155                                        c->full_name);
156                                 of_node_put(c);
157                                 return -EINVAL;
158                         }
159
160                         if (leaf) {
161                                 ret = parse_core(c, cluster_id, core_id++);
162                         } else {
163                                 pr_err("%s: Non-leaf cluster with core %s\n",
164                                        cluster->full_name, name);
165                                 ret = -EINVAL;
166                         }
167
168                         of_node_put(c);
169                         if (ret != 0)
170                                 return ret;
171                 }
172                 i++;
173         } while (c);
174
175         if (leaf && !has_cores)
176                 pr_warn("%s: empty cluster\n", cluster->full_name);
177
178         if (leaf)
179                 cluster_id++;
180
181         return 0;
182 }
183
184 static int __init parse_dt_topology(void)
185 {
186         struct device_node *cn, *map;
187         int ret = 0;
188         int cpu;
189
190         cn = of_find_node_by_path("/cpus");
191         if (!cn) {
192                 pr_err("No CPU information found in DT\n");
193                 return 0;
194         }
195
196         /*
197          * When topology is provided cpu-map is essentially a root
198          * cluster with restricted subnodes.
199          */
200         map = of_get_child_by_name(cn, "cpu-map");
201         if (!map)
202                 goto out;
203
204         ret = parse_cluster(map, 0);
205         if (ret != 0)
206                 goto out_map;
207
208         /*
209          * Check that all cores are in the topology; the SMP code will
210          * only mark cores described in the DT as possible.
211          */
212         for_each_possible_cpu(cpu)
213                 if (cpu_topology[cpu].cluster_id == -1)
214                         ret = -EINVAL;
215
216 out_map:
217         of_node_put(map);
218 out:
219         of_node_put(cn);
220         return ret;
221 }
222
223 /*
224  * cpu topology table
225  */
226 struct cpu_topology cpu_topology[NR_CPUS];
227 EXPORT_SYMBOL_GPL(cpu_topology);
228
229 /* sd energy functions */
230 static inline
231 const struct sched_group_energy * const cpu_cluster_energy(int cpu)
232 {
233         struct sched_group_energy *sge = sge_array[cpu][SD_LEVEL1];
234
235         if (!sge) {
236                 pr_warn("Invalid sched_group_energy for Cluster%d\n", cpu);
237                 return NULL;
238         }
239
240         return sge;
241 }
242
243 static inline
244 const struct sched_group_energy * const cpu_core_energy(int cpu)
245 {
246         struct sched_group_energy *sge = sge_array[cpu][SD_LEVEL0];
247
248         if (!sge) {
249                 pr_warn("Invalid sched_group_energy for CPU%d\n", cpu);
250                 return NULL;
251         }
252
253         return sge;
254 }
255
256 const struct cpumask *cpu_coregroup_mask(int cpu)
257 {
258         return &cpu_topology[cpu].core_sibling;
259 }
260
261 static inline int cpu_corepower_flags(void)
262 {
263         return SD_SHARE_PKG_RESOURCES  | SD_SHARE_POWERDOMAIN | \
264                SD_SHARE_CAP_STATES;
265 }
266
267 static struct sched_domain_topology_level arm64_topology[] = {
268 #ifdef CONFIG_SCHED_MC
269         { cpu_coregroup_mask, cpu_corepower_flags, cpu_core_energy, SD_INIT_NAME(MC) },
270 #endif
271         { cpu_cpu_mask, NULL, cpu_cluster_energy, SD_INIT_NAME(DIE) },
272         { NULL, },
273 };
274
275 static void update_cpu_capacity(unsigned int cpu)
276 {
277         unsigned long capacity = SCHED_CAPACITY_SCALE;
278
279         if (cpu_core_energy(cpu)) {
280                 int max_cap_idx = cpu_core_energy(cpu)->nr_cap_states - 1;
281                 capacity = cpu_core_energy(cpu)->cap_states[max_cap_idx].cap;
282         }
283
284         set_capacity_scale(cpu, capacity);
285
286         pr_info("CPU%d: update cpu_capacity %lu\n",
287                 cpu, arch_scale_cpu_capacity(NULL, cpu));
288 }
289
290 static void update_siblings_masks(unsigned int cpuid)
291 {
292         struct cpu_topology *cpu_topo, *cpuid_topo = &cpu_topology[cpuid];
293         int cpu;
294
295         /* update core and thread sibling masks */
296         for_each_possible_cpu(cpu) {
297                 cpu_topo = &cpu_topology[cpu];
298
299                 if (cpuid_topo->cluster_id != cpu_topo->cluster_id)
300                         continue;
301
302                 cpumask_set_cpu(cpuid, &cpu_topo->core_sibling);
303                 if (cpu != cpuid)
304                         cpumask_set_cpu(cpu, &cpuid_topo->core_sibling);
305
306                 if (cpuid_topo->core_id != cpu_topo->core_id)
307                         continue;
308
309                 cpumask_set_cpu(cpuid, &cpu_topo->thread_sibling);
310                 if (cpu != cpuid)
311                         cpumask_set_cpu(cpu, &cpuid_topo->thread_sibling);
312         }
313 }
314
315 void store_cpu_topology(unsigned int cpuid)
316 {
317         struct cpu_topology *cpuid_topo = &cpu_topology[cpuid];
318         u64 mpidr;
319
320         if (cpuid_topo->cluster_id != -1)
321                 goto topology_populated;
322
323         mpidr = read_cpuid_mpidr();
324
325         /* Uniprocessor systems can rely on default topology values */
326         if (mpidr & MPIDR_UP_BITMASK)
327                 return;
328
329         /* Create cpu topology mapping based on MPIDR. */
330         if (mpidr & MPIDR_MT_BITMASK) {
331                 /* Multiprocessor system : Multi-threads per core */
332                 cpuid_topo->thread_id  = MPIDR_AFFINITY_LEVEL(mpidr, 0);
333                 cpuid_topo->core_id    = MPIDR_AFFINITY_LEVEL(mpidr, 1);
334                 cpuid_topo->cluster_id = MPIDR_AFFINITY_LEVEL(mpidr, 2) |
335                                          MPIDR_AFFINITY_LEVEL(mpidr, 3) << 8;
336         } else {
337                 /* Multiprocessor system : Single-thread per core */
338                 cpuid_topo->thread_id  = -1;
339                 cpuid_topo->core_id    = MPIDR_AFFINITY_LEVEL(mpidr, 0);
340                 cpuid_topo->cluster_id = MPIDR_AFFINITY_LEVEL(mpidr, 1) |
341                                          MPIDR_AFFINITY_LEVEL(mpidr, 2) << 8 |
342                                          MPIDR_AFFINITY_LEVEL(mpidr, 3) << 16;
343         }
344
345         pr_debug("CPU%u: cluster %d core %d thread %d mpidr %#016llx\n",
346                  cpuid, cpuid_topo->cluster_id, cpuid_topo->core_id,
347                  cpuid_topo->thread_id, mpidr);
348
349 topology_populated:
350         update_siblings_masks(cpuid);
351         update_cpu_capacity(cpuid);
352 }
353
354 static void __init reset_cpu_topology(void)
355 {
356         unsigned int cpu;
357
358         for_each_possible_cpu(cpu) {
359                 struct cpu_topology *cpu_topo = &cpu_topology[cpu];
360
361                 cpu_topo->thread_id = -1;
362                 cpu_topo->core_id = 0;
363                 cpu_topo->cluster_id = -1;
364
365                 cpumask_clear(&cpu_topo->core_sibling);
366                 cpumask_set_cpu(cpu, &cpu_topo->core_sibling);
367                 cpumask_clear(&cpu_topo->thread_sibling);
368                 cpumask_set_cpu(cpu, &cpu_topo->thread_sibling);
369         }
370 }
371
372 void __init init_cpu_topology(void)
373 {
374         reset_cpu_topology();
375
376         /*
377          * Discard anything that was parsed if we hit an error so we
378          * don't use partial information.
379          */
380         if (of_have_populated_dt() && parse_dt_topology())
381                 reset_cpu_topology();
382         else
383                 set_sched_topology(arm64_topology);
384
385         init_sched_energy_costs();
386 }