ACPI / processor: Make it possible to get local x2apic id via _MAT
[firefly-linux-kernel-4.4.55.git] / drivers / acpi / processor_core.c
1 /*
2  * Copyright (C) 2005 Intel Corporation
3  * Copyright (C) 2009 Hewlett-Packard Development Company, L.P.
4  *
5  *      Alex Chiang <achiang@hp.com>
6  *      - Unified x86/ia64 implementations
7  *      Venkatesh Pallipadi <venkatesh.pallipadi@intel.com>
8  *      - Added _PDC for platforms with Intel CPUs
9  */
10 #include <linux/export.h>
11 #include <linux/dmi.h>
12 #include <linux/slab.h>
13 #include <linux/acpi.h>
14 #include <acpi/processor.h>
15
16 #include "internal.h"
17
18 #define _COMPONENT              ACPI_PROCESSOR_COMPONENT
19 ACPI_MODULE_NAME("processor_core");
20
21 static int map_lapic_id(struct acpi_subtable_header *entry,
22                  u32 acpi_id, int *apic_id)
23 {
24         struct acpi_madt_local_apic *lapic =
25                 (struct acpi_madt_local_apic *)entry;
26
27         if (!(lapic->lapic_flags & ACPI_MADT_ENABLED))
28                 return -ENODEV;
29
30         if (lapic->processor_id != acpi_id)
31                 return -EINVAL;
32
33         *apic_id = lapic->id;
34         return 0;
35 }
36
37 static int map_x2apic_id(struct acpi_subtable_header *entry,
38                          int device_declaration, u32 acpi_id, int *apic_id)
39 {
40         struct acpi_madt_local_x2apic *apic =
41                 (struct acpi_madt_local_x2apic *)entry;
42
43         if (!(apic->lapic_flags & ACPI_MADT_ENABLED))
44                 return -ENODEV;
45
46         if (device_declaration && (apic->uid == acpi_id)) {
47                 *apic_id = apic->local_apic_id;
48                 return 0;
49         }
50
51         return -EINVAL;
52 }
53
54 static int map_lsapic_id(struct acpi_subtable_header *entry,
55                 int device_declaration, u32 acpi_id, int *apic_id)
56 {
57         struct acpi_madt_local_sapic *lsapic =
58                 (struct acpi_madt_local_sapic *)entry;
59
60         if (!(lsapic->lapic_flags & ACPI_MADT_ENABLED))
61                 return -ENODEV;
62
63         if (device_declaration) {
64                 if ((entry->length < 16) || (lsapic->uid != acpi_id))
65                         return -EINVAL;
66         } else if (lsapic->processor_id != acpi_id)
67                 return -EINVAL;
68
69         *apic_id = (lsapic->id << 8) | lsapic->eid;
70         return 0;
71 }
72
73 static int map_madt_entry(int type, u32 acpi_id)
74 {
75         unsigned long madt_end, entry;
76         static struct acpi_table_madt *madt;
77         static int read_madt;
78         int apic_id = -1;
79
80         if (!read_madt) {
81                 if (ACPI_FAILURE(acpi_get_table(ACPI_SIG_MADT, 0,
82                                         (struct acpi_table_header **)&madt)))
83                         madt = NULL;
84                 read_madt++;
85         }
86
87         if (!madt)
88                 return apic_id;
89
90         entry = (unsigned long)madt;
91         madt_end = entry + madt->header.length;
92
93         /* Parse all entries looking for a match. */
94
95         entry += sizeof(struct acpi_table_madt);
96         while (entry + sizeof(struct acpi_subtable_header) < madt_end) {
97                 struct acpi_subtable_header *header =
98                         (struct acpi_subtable_header *)entry;
99                 if (header->type == ACPI_MADT_TYPE_LOCAL_APIC) {
100                         if (!map_lapic_id(header, acpi_id, &apic_id))
101                                 break;
102                 } else if (header->type == ACPI_MADT_TYPE_LOCAL_X2APIC) {
103                         if (!map_x2apic_id(header, type, acpi_id, &apic_id))
104                                 break;
105                 } else if (header->type == ACPI_MADT_TYPE_LOCAL_SAPIC) {
106                         if (!map_lsapic_id(header, type, acpi_id, &apic_id))
107                                 break;
108                 }
109                 entry += header->length;
110         }
111         return apic_id;
112 }
113
114 static int map_mat_entry(acpi_handle handle, int type, u32 acpi_id)
115 {
116         struct acpi_buffer buffer = { ACPI_ALLOCATE_BUFFER, NULL };
117         union acpi_object *obj;
118         struct acpi_subtable_header *header;
119         int apic_id = -1;
120
121         if (ACPI_FAILURE(acpi_evaluate_object(handle, "_MAT", NULL, &buffer)))
122                 goto exit;
123
124         if (!buffer.length || !buffer.pointer)
125                 goto exit;
126
127         obj = buffer.pointer;
128         if (obj->type != ACPI_TYPE_BUFFER ||
129             obj->buffer.length < sizeof(struct acpi_subtable_header)) {
130                 goto exit;
131         }
132
133         header = (struct acpi_subtable_header *)obj->buffer.pointer;
134         if (header->type == ACPI_MADT_TYPE_LOCAL_APIC) {
135                 map_lapic_id(header, acpi_id, &apic_id);
136         } else if (header->type == ACPI_MADT_TYPE_LOCAL_SAPIC) {
137                 map_lsapic_id(header, type, acpi_id, &apic_id);
138         } else if (header->type == ACPI_MADT_TYPE_LOCAL_X2APIC) {
139                 map_x2apic_id(header, type, acpi_id, &apic_id);
140         }
141
142 exit:
143         kfree(buffer.pointer);
144         return apic_id;
145 }
146
147 int acpi_get_apicid(acpi_handle handle, int type, u32 acpi_id)
148 {
149         int apic_id;
150
151         apic_id = map_mat_entry(handle, type, acpi_id);
152         if (apic_id == -1)
153                 apic_id = map_madt_entry(type, acpi_id);
154
155         return apic_id;
156 }
157
158 int acpi_map_cpuid(int apic_id, u32 acpi_id)
159 {
160 #ifdef CONFIG_SMP
161         int i;
162 #endif
163
164         if (apic_id == -1) {
165                 /*
166                  * On UP processor, there is no _MAT or MADT table.
167                  * So above apic_id is always set to -1.
168                  *
169                  * BIOS may define multiple CPU handles even for UP processor.
170                  * For example,
171                  *
172                  * Scope (_PR)
173                  * {
174                  *     Processor (CPU0, 0x00, 0x00000410, 0x06) {}
175                  *     Processor (CPU1, 0x01, 0x00000410, 0x06) {}
176                  *     Processor (CPU2, 0x02, 0x00000410, 0x06) {}
177                  *     Processor (CPU3, 0x03, 0x00000410, 0x06) {}
178                  * }
179                  *
180                  * Ignores apic_id and always returns 0 for the processor
181                  * handle with acpi id 0 if nr_cpu_ids is 1.
182                  * This should be the case if SMP tables are not found.
183                  * Return -1 for other CPU's handle.
184                  */
185                 if (nr_cpu_ids <= 1 && acpi_id == 0)
186                         return acpi_id;
187                 else
188                         return apic_id;
189         }
190
191 #ifdef CONFIG_SMP
192         for_each_possible_cpu(i) {
193                 if (cpu_physical_id(i) == apic_id)
194                         return i;
195         }
196 #else
197         /* In UP kernel, only processor 0 is valid */
198         if (apic_id == 0)
199                 return apic_id;
200 #endif
201         return -1;
202 }
203
204 int acpi_get_cpuid(acpi_handle handle, int type, u32 acpi_id)
205 {
206         int apic_id;
207
208         apic_id = acpi_get_apicid(handle, type, acpi_id);
209
210         return acpi_map_cpuid(apic_id, acpi_id);
211 }
212 EXPORT_SYMBOL_GPL(acpi_get_cpuid);
213
214 static bool __init processor_physically_present(acpi_handle handle)
215 {
216         int cpuid, type;
217         u32 acpi_id;
218         acpi_status status;
219         acpi_object_type acpi_type;
220         unsigned long long tmp;
221         union acpi_object object = { 0 };
222         struct acpi_buffer buffer = { sizeof(union acpi_object), &object };
223
224         status = acpi_get_type(handle, &acpi_type);
225         if (ACPI_FAILURE(status))
226                 return false;
227
228         switch (acpi_type) {
229         case ACPI_TYPE_PROCESSOR:
230                 status = acpi_evaluate_object(handle, NULL, NULL, &buffer);
231                 if (ACPI_FAILURE(status))
232                         return false;
233                 acpi_id = object.processor.proc_id;
234                 break;
235         case ACPI_TYPE_DEVICE:
236                 status = acpi_evaluate_integer(handle, "_UID", NULL, &tmp);
237                 if (ACPI_FAILURE(status))
238                         return false;
239                 acpi_id = tmp;
240                 break;
241         default:
242                 return false;
243         }
244
245         type = (acpi_type == ACPI_TYPE_DEVICE) ? 1 : 0;
246         cpuid = acpi_get_cpuid(handle, type, acpi_id);
247
248         if (cpuid == -1)
249                 return false;
250
251         return true;
252 }
253
254 static void acpi_set_pdc_bits(u32 *buf)
255 {
256         buf[0] = ACPI_PDC_REVISION_ID;
257         buf[1] = 1;
258
259         /* Enable coordination with firmware's _TSD info */
260         buf[2] = ACPI_PDC_SMP_T_SWCOORD;
261
262         /* Twiddle arch-specific bits needed for _PDC */
263         arch_acpi_set_pdc_bits(buf);
264 }
265
266 static struct acpi_object_list *acpi_processor_alloc_pdc(void)
267 {
268         struct acpi_object_list *obj_list;
269         union acpi_object *obj;
270         u32 *buf;
271
272         /* allocate and initialize pdc. It will be used later. */
273         obj_list = kmalloc(sizeof(struct acpi_object_list), GFP_KERNEL);
274         if (!obj_list) {
275                 printk(KERN_ERR "Memory allocation error\n");
276                 return NULL;
277         }
278
279         obj = kmalloc(sizeof(union acpi_object), GFP_KERNEL);
280         if (!obj) {
281                 printk(KERN_ERR "Memory allocation error\n");
282                 kfree(obj_list);
283                 return NULL;
284         }
285
286         buf = kmalloc(12, GFP_KERNEL);
287         if (!buf) {
288                 printk(KERN_ERR "Memory allocation error\n");
289                 kfree(obj);
290                 kfree(obj_list);
291                 return NULL;
292         }
293
294         acpi_set_pdc_bits(buf);
295
296         obj->type = ACPI_TYPE_BUFFER;
297         obj->buffer.length = 12;
298         obj->buffer.pointer = (u8 *) buf;
299         obj_list->count = 1;
300         obj_list->pointer = obj;
301
302         return obj_list;
303 }
304
305 /*
306  * _PDC is required for a BIOS-OS handshake for most of the newer
307  * ACPI processor features.
308  */
309 static acpi_status
310 acpi_processor_eval_pdc(acpi_handle handle, struct acpi_object_list *pdc_in)
311 {
312         acpi_status status = AE_OK;
313
314         if (boot_option_idle_override == IDLE_NOMWAIT) {
315                 /*
316                  * If mwait is disabled for CPU C-states, the C2C3_FFH access
317                  * mode will be disabled in the parameter of _PDC object.
318                  * Of course C1_FFH access mode will also be disabled.
319                  */
320                 union acpi_object *obj;
321                 u32 *buffer = NULL;
322
323                 obj = pdc_in->pointer;
324                 buffer = (u32 *)(obj->buffer.pointer);
325                 buffer[2] &= ~(ACPI_PDC_C_C2C3_FFH | ACPI_PDC_C_C1_FFH);
326
327         }
328         status = acpi_evaluate_object(handle, "_PDC", pdc_in, NULL);
329
330         if (ACPI_FAILURE(status))
331                 ACPI_DEBUG_PRINT((ACPI_DB_INFO,
332                     "Could not evaluate _PDC, using legacy perf. control.\n"));
333
334         return status;
335 }
336
337 void acpi_processor_set_pdc(acpi_handle handle)
338 {
339         struct acpi_object_list *obj_list;
340
341         if (arch_has_acpi_pdc() == false)
342                 return;
343
344         obj_list = acpi_processor_alloc_pdc();
345         if (!obj_list)
346                 return;
347
348         acpi_processor_eval_pdc(handle, obj_list);
349
350         kfree(obj_list->pointer->buffer.pointer);
351         kfree(obj_list->pointer);
352         kfree(obj_list);
353 }
354
355 static acpi_status __init
356 early_init_pdc(acpi_handle handle, u32 lvl, void *context, void **rv)
357 {
358         if (processor_physically_present(handle) == false)
359                 return AE_OK;
360
361         acpi_processor_set_pdc(handle);
362         return AE_OK;
363 }
364
365 #if defined(CONFIG_X86) || defined(CONFIG_IA64)
366 static int __init set_no_mwait(const struct dmi_system_id *id)
367 {
368         pr_notice(PREFIX "%s detected - disabling mwait for CPU C-states\n",
369                   id->ident);
370         boot_option_idle_override = IDLE_NOMWAIT;
371         return 0;
372 }
373
374 static struct dmi_system_id processor_idle_dmi_table[] __initdata = {
375         {
376         set_no_mwait, "Extensa 5220", {
377         DMI_MATCH(DMI_BIOS_VENDOR, "Phoenix Technologies LTD"),
378         DMI_MATCH(DMI_SYS_VENDOR, "Acer"),
379         DMI_MATCH(DMI_PRODUCT_VERSION, "0100"),
380         DMI_MATCH(DMI_BOARD_NAME, "Columbia") }, NULL},
381         {},
382 };
383
384 static void __init processor_dmi_check(void)
385 {
386         /*
387          * Check whether the system is DMI table. If yes, OSPM
388          * should not use mwait for CPU-states.
389          */
390         dmi_check_system(processor_idle_dmi_table);
391 }
392 #else
393 static inline void processor_dmi_check(void) {}
394 #endif
395
396 void __init acpi_early_processor_set_pdc(void)
397 {
398         processor_dmi_check();
399
400         acpi_walk_namespace(ACPI_TYPE_PROCESSOR, ACPI_ROOT_OBJECT,
401                             ACPI_UINT32_MAX,
402                             early_init_pdc, NULL, NULL, NULL);
403         acpi_get_devices(ACPI_PROCESSOR_DEVICE_HID, early_init_pdc, NULL, NULL);
404 }