Merge branch develop-3.10 into develop-3.10-next
[firefly-linux-kernel-4.4.55.git] / drivers / base / cpu.c
1 /*
2  * CPU subsystem support
3  */
4
5 #include <linux/kernel.h>
6 #include <linux/module.h>
7 #include <linux/init.h>
8 #include <linux/sched.h>
9 #include <linux/cpu.h>
10 #include <linux/topology.h>
11 #include <linux/device.h>
12 #include <linux/node.h>
13 #include <linux/gfp.h>
14 #include <linux/slab.h>
15 #include <linux/percpu.h>
16 #include <linux/acpi.h>
17 #include <linux/of.h>
18 #include <linux/cpufeature.h>
19
20 #include "base.h"
21
22 struct bus_type cpu_subsys = {
23         .name = "cpu",
24         .dev_name = "cpu",
25 };
26 EXPORT_SYMBOL_GPL(cpu_subsys);
27
28 static DEFINE_PER_CPU(struct device *, cpu_sys_devices);
29
30 #ifdef CONFIG_HOTPLUG_CPU
31 static void change_cpu_under_node(struct cpu *cpu,
32                         unsigned int from_nid, unsigned int to_nid)
33 {
34         int cpuid = cpu->dev.id;
35         unregister_cpu_under_node(cpuid, from_nid);
36         register_cpu_under_node(cpuid, to_nid);
37         cpu->node_id = to_nid;
38 }
39
40 static ssize_t show_online(struct device *dev,
41                            struct device_attribute *attr,
42                            char *buf)
43 {
44         struct cpu *cpu = container_of(dev, struct cpu, dev);
45
46         return sprintf(buf, "%u\n", !!cpu_online(cpu->dev.id));
47 }
48
49 static ssize_t __ref store_online(struct device *dev,
50                                   struct device_attribute *attr,
51                                   const char *buf, size_t count)
52 {
53         struct cpu *cpu = container_of(dev, struct cpu, dev);
54         int cpuid = cpu->dev.id;
55         int from_nid, to_nid;
56         ssize_t ret;
57
58         cpu_hotplug_driver_lock();
59         switch (buf[0]) {
60         case '0':
61                 ret = cpu_down(cpuid);
62                 if (!ret)
63                         kobject_uevent(&dev->kobj, KOBJ_OFFLINE);
64                 break;
65         case '1':
66                 from_nid = cpu_to_node(cpuid);
67                 ret = cpu_up(cpuid);
68
69                 /*
70                  * When hot adding memory to memoryless node and enabling a cpu
71                  * on the node, node number of the cpu may internally change.
72                  */
73                 to_nid = cpu_to_node(cpuid);
74                 if (from_nid != to_nid)
75                         change_cpu_under_node(cpu, from_nid, to_nid);
76
77                 if (!ret)
78                         kobject_uevent(&dev->kobj, KOBJ_ONLINE);
79                 break;
80         default:
81                 ret = -EINVAL;
82         }
83         cpu_hotplug_driver_unlock();
84
85         if (ret >= 0)
86                 ret = count;
87         return ret;
88 }
89 static DEVICE_ATTR(online, 0644, show_online, store_online);
90
91 static void __cpuinit register_cpu_control(struct cpu *cpu)
92 {
93         device_create_file(&cpu->dev, &dev_attr_online);
94 }
95 void unregister_cpu(struct cpu *cpu)
96 {
97         int logical_cpu = cpu->dev.id;
98
99         unregister_cpu_under_node(logical_cpu, cpu_to_node(logical_cpu));
100
101         device_remove_file(&cpu->dev, &dev_attr_online);
102
103         device_unregister(&cpu->dev);
104         per_cpu(cpu_sys_devices, logical_cpu) = NULL;
105         return;
106 }
107
108 #ifdef CONFIG_ARCH_CPU_PROBE_RELEASE
109 static ssize_t cpu_probe_store(struct device *dev,
110                                struct device_attribute *attr,
111                                const char *buf,
112                                size_t count)
113 {
114         return arch_cpu_probe(buf, count);
115 }
116
117 static ssize_t cpu_release_store(struct device *dev,
118                                  struct device_attribute *attr,
119                                  const char *buf,
120                                  size_t count)
121 {
122         return arch_cpu_release(buf, count);
123 }
124
125 static DEVICE_ATTR(probe, S_IWUSR, NULL, cpu_probe_store);
126 static DEVICE_ATTR(release, S_IWUSR, NULL, cpu_release_store);
127 #endif /* CONFIG_ARCH_CPU_PROBE_RELEASE */
128
129 #else /* ... !CONFIG_HOTPLUG_CPU */
130 static inline void register_cpu_control(struct cpu *cpu)
131 {
132 }
133 #endif /* CONFIG_HOTPLUG_CPU */
134
135 #ifdef CONFIG_KEXEC
136 #include <linux/kexec.h>
137
138 static ssize_t show_crash_notes(struct device *dev, struct device_attribute *attr,
139                                 char *buf)
140 {
141         struct cpu *cpu = container_of(dev, struct cpu, dev);
142         ssize_t rc;
143         unsigned long long addr;
144         int cpunum;
145
146         cpunum = cpu->dev.id;
147
148         /*
149          * Might be reading other cpu's data based on which cpu read thread
150          * has been scheduled. But cpu data (memory) is allocated once during
151          * boot up and this data does not change there after. Hence this
152          * operation should be safe. No locking required.
153          */
154         addr = per_cpu_ptr_to_phys(per_cpu_ptr(crash_notes, cpunum));
155         rc = sprintf(buf, "%Lx\n", addr);
156         return rc;
157 }
158 static DEVICE_ATTR(crash_notes, 0400, show_crash_notes, NULL);
159
160 static ssize_t show_crash_notes_size(struct device *dev,
161                                      struct device_attribute *attr,
162                                      char *buf)
163 {
164         ssize_t rc;
165
166         rc = sprintf(buf, "%zu\n", sizeof(note_buf_t));
167         return rc;
168 }
169 static DEVICE_ATTR(crash_notes_size, 0400, show_crash_notes_size, NULL);
170 #endif
171
172 /*
173  * Print cpu online, possible, present, and system maps
174  */
175
176 struct cpu_attr {
177         struct device_attribute attr;
178         const struct cpumask *const * const map;
179 };
180
181 static ssize_t show_cpus_attr(struct device *dev,
182                               struct device_attribute *attr,
183                               char *buf)
184 {
185         struct cpu_attr *ca = container_of(attr, struct cpu_attr, attr);
186         int n = cpulist_scnprintf(buf, PAGE_SIZE-2, *(ca->map));
187
188         buf[n++] = '\n';
189         buf[n] = '\0';
190         return n;
191 }
192
193 #define _CPU_ATTR(name, map) \
194         { __ATTR(name, 0444, show_cpus_attr, NULL), map }
195
196 /* Keep in sync with cpu_subsys_attrs */
197 static struct cpu_attr cpu_attrs[] = {
198         _CPU_ATTR(online, &cpu_online_mask),
199         _CPU_ATTR(possible, &cpu_possible_mask),
200         _CPU_ATTR(present, &cpu_present_mask),
201 };
202
203 /*
204  * Print values for NR_CPUS and offlined cpus
205  */
206 static ssize_t print_cpus_kernel_max(struct device *dev,
207                                      struct device_attribute *attr, char *buf)
208 {
209         int n = snprintf(buf, PAGE_SIZE-2, "%d\n", NR_CPUS - 1);
210         return n;
211 }
212 static DEVICE_ATTR(kernel_max, 0444, print_cpus_kernel_max, NULL);
213
214 /* arch-optional setting to enable display of offline cpus >= nr_cpu_ids */
215 unsigned int total_cpus;
216
217 static ssize_t print_cpus_offline(struct device *dev,
218                                   struct device_attribute *attr, char *buf)
219 {
220         int n = 0, len = PAGE_SIZE-2;
221         cpumask_var_t offline;
222
223         /* display offline cpus < nr_cpu_ids */
224         if (!alloc_cpumask_var(&offline, GFP_KERNEL))
225                 return -ENOMEM;
226         cpumask_andnot(offline, cpu_possible_mask, cpu_online_mask);
227         n = cpulist_scnprintf(buf, len, offline);
228         free_cpumask_var(offline);
229
230         /* display offline cpus >= nr_cpu_ids */
231         if (total_cpus && nr_cpu_ids < total_cpus) {
232                 if (n && n < len)
233                         buf[n++] = ',';
234
235                 if (nr_cpu_ids == total_cpus-1)
236                         n += snprintf(&buf[n], len - n, "%d", nr_cpu_ids);
237                 else
238                         n += snprintf(&buf[n], len - n, "%d-%d",
239                                                       nr_cpu_ids, total_cpus-1);
240         }
241
242         n += snprintf(&buf[n], len - n, "\n");
243         return n;
244 }
245 static DEVICE_ATTR(offline, 0444, print_cpus_offline, NULL);
246
247 static void cpu_device_release(struct device *dev)
248 {
249         /*
250          * This is an empty function to prevent the driver core from spitting a
251          * warning at us.  Yes, I know this is directly opposite of what the
252          * documentation for the driver core and kobjects say, and the author
253          * of this code has already been publically ridiculed for doing
254          * something as foolish as this.  However, at this point in time, it is
255          * the only way to handle the issue of statically allocated cpu
256          * devices.  The different architectures will have their cpu device
257          * code reworked to properly handle this in the near future, so this
258          * function will then be changed to correctly free up the memory held
259          * by the cpu device.
260          *
261          * Never copy this way of doing things, or you too will be made fun of
262          * on the linux-kernel list, you have been warned.
263          */
264 }
265
266 #ifdef CONFIG_HAVE_CPU_AUTOPROBE
267 #ifdef CONFIG_GENERIC_CPU_AUTOPROBE
268 static ssize_t print_cpu_modalias(struct device *dev,
269                                   struct device_attribute *attr,
270                                   char *buf)
271 {
272         ssize_t n;
273         u32 i;
274
275         n = sprintf(buf, "cpu:type:" CPU_FEATURE_TYPEFMT ":feature:",
276                     CPU_FEATURE_TYPEVAL);
277
278         for (i = 0; i < MAX_CPU_FEATURES; i++)
279                 if (cpu_have_feature(i)) {
280                         if (PAGE_SIZE < n + sizeof(",XXXX\n")) {
281                                 WARN(1, "CPU features overflow page\n");
282                                 break;
283                         }
284                         n += sprintf(&buf[n], ",%04X", i);
285                 }
286         buf[n++] = '\n';
287         return n;
288 }
289 #else
290 #define print_cpu_modalias      arch_print_cpu_modalias
291 #endif
292
293 static int cpu_uevent(struct device *dev, struct kobj_uevent_env *env)
294 {
295         char *buf = kzalloc(PAGE_SIZE, GFP_KERNEL);
296         if (buf) {
297                 print_cpu_modalias(NULL, NULL, buf);
298                 add_uevent_var(env, "MODALIAS=%s", buf);
299                 kfree(buf);
300         }
301         return 0;
302 }
303 #endif
304
305 /*
306  * register_cpu - Setup a sysfs device for a CPU.
307  * @cpu - cpu->hotpluggable field set to 1 will generate a control file in
308  *        sysfs for this CPU.
309  * @num - CPU number to use when creating the device.
310  *
311  * Initialize and register the CPU device.
312  */
313 int __cpuinit register_cpu(struct cpu *cpu, int num)
314 {
315         int error;
316
317         cpu->node_id = cpu_to_node(num);
318         memset(&cpu->dev, 0x00, sizeof(struct device));
319         cpu->dev.id = num;
320         cpu->dev.bus = &cpu_subsys;
321         cpu->dev.release = cpu_device_release;
322 #ifdef CONFIG_ARCH_HAS_CPU_AUTOPROBE
323         cpu->dev.bus->uevent = cpu_uevent;
324 #endif
325         error = device_register(&cpu->dev);
326         if (!error && cpu->hotpluggable)
327                 register_cpu_control(cpu);
328         if (!error)
329                 per_cpu(cpu_sys_devices, num) = &cpu->dev;
330         if (!error)
331                 register_cpu_under_node(num, cpu_to_node(num));
332
333 #ifdef CONFIG_KEXEC
334         if (!error)
335                 error = device_create_file(&cpu->dev, &dev_attr_crash_notes);
336         if (!error)
337                 error = device_create_file(&cpu->dev,
338                                            &dev_attr_crash_notes_size);
339 #endif
340         return error;
341 }
342
343 struct device *get_cpu_device(unsigned cpu)
344 {
345         if (cpu < nr_cpu_ids && cpu_possible(cpu))
346                 return per_cpu(cpu_sys_devices, cpu);
347         else
348                 return NULL;
349 }
350 EXPORT_SYMBOL_GPL(get_cpu_device);
351
352 #ifdef CONFIG_HAVE_CPU_AUTOPROBE
353 static DEVICE_ATTR(modalias, 0444, print_cpu_modalias, NULL);
354 #endif
355
356 static struct attribute *cpu_root_attrs[] = {
357 #ifdef CONFIG_ARCH_CPU_PROBE_RELEASE
358         &dev_attr_probe.attr,
359         &dev_attr_release.attr,
360 #endif
361         &cpu_attrs[0].attr.attr,
362         &cpu_attrs[1].attr.attr,
363         &cpu_attrs[2].attr.attr,
364         &dev_attr_kernel_max.attr,
365         &dev_attr_offline.attr,
366 #ifdef CONFIG_HAVE_CPU_AUTOPROBE
367         &dev_attr_modalias.attr,
368 #endif
369         NULL
370 };
371
372 static struct attribute_group cpu_root_attr_group = {
373         .attrs = cpu_root_attrs,
374 };
375
376 static const struct attribute_group *cpu_root_attr_groups[] = {
377         &cpu_root_attr_group,
378         NULL,
379 };
380
381 bool cpu_is_hotpluggable(unsigned cpu)
382 {
383         struct device *dev = get_cpu_device(cpu);
384         return dev && container_of(dev, struct cpu, dev)->hotpluggable;
385 }
386 EXPORT_SYMBOL_GPL(cpu_is_hotpluggable);
387
388 #ifdef CONFIG_GENERIC_CPU_DEVICES
389 static DEFINE_PER_CPU(struct cpu, cpu_devices);
390 #endif
391
392 static void __init cpu_dev_register_generic(void)
393 {
394 #ifdef CONFIG_GENERIC_CPU_DEVICES
395         int i;
396
397         for_each_possible_cpu(i) {
398                 if (register_cpu(&per_cpu(cpu_devices, i), i))
399                         panic("Failed to register CPU device");
400         }
401 #endif
402 }
403
404 void __init cpu_dev_init(void)
405 {
406         if (subsys_system_register(&cpu_subsys, cpu_root_attr_groups))
407                 panic("Failed to register CPU subsystem");
408
409         cpu_dev_register_generic();
410 }