ACPI: Add interfaces for ioremapping/iounmapping ACPI registers
[firefly-linux-kernel-4.4.55.git] / drivers / acpi / osl.c
1 /*
2  *  acpi_osl.c - OS-dependent functions ($Revision: 83 $)
3  *
4  *  Copyright (C) 2000       Andrew Henroid
5  *  Copyright (C) 2001, 2002 Andy Grover <andrew.grover@intel.com>
6  *  Copyright (C) 2001, 2002 Paul Diefenbaugh <paul.s.diefenbaugh@intel.com>
7  *  Copyright (c) 2008 Intel Corporation
8  *   Author: Matthew Wilcox <willy@linux.intel.com>
9  *
10  * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
11  *
12  *  This program is free software; you can redistribute it and/or modify
13  *  it under the terms of the GNU General Public License as published by
14  *  the Free Software Foundation; either version 2 of the License, or
15  *  (at your option) any later version.
16  *
17  *  This program is distributed in the hope that it will be useful,
18  *  but WITHOUT ANY WARRANTY; without even the implied warranty of
19  *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
20  *  GNU General Public License for more details.
21  *
22  *  You should have received a copy of the GNU General Public License
23  *  along with this program; if not, write to the Free Software
24  *  Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
25  *
26  * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
27  *
28  */
29
30 #include <linux/module.h>
31 #include <linux/kernel.h>
32 #include <linux/slab.h>
33 #include <linux/mm.h>
34 #include <linux/pci.h>
35 #include <linux/interrupt.h>
36 #include <linux/kmod.h>
37 #include <linux/delay.h>
38 #include <linux/workqueue.h>
39 #include <linux/nmi.h>
40 #include <linux/acpi.h>
41 #include <linux/efi.h>
42 #include <linux/ioport.h>
43 #include <linux/list.h>
44 #include <linux/jiffies.h>
45 #include <linux/semaphore.h>
46
47 #include <asm/io.h>
48 #include <asm/uaccess.h>
49
50 #include <acpi/acpi.h>
51 #include <acpi/acpi_bus.h>
52 #include <acpi/processor.h>
53
54 #define _COMPONENT              ACPI_OS_SERVICES
55 ACPI_MODULE_NAME("osl");
56 #define PREFIX          "ACPI: "
57 struct acpi_os_dpc {
58         acpi_osd_exec_callback function;
59         void *context;
60         struct work_struct work;
61         int wait;
62 };
63
64 #ifdef CONFIG_ACPI_CUSTOM_DSDT
65 #include CONFIG_ACPI_CUSTOM_DSDT_FILE
66 #endif
67
68 #ifdef ENABLE_DEBUGGER
69 #include <linux/kdb.h>
70
71 /* stuff for debugger support */
72 int acpi_in_debugger;
73 EXPORT_SYMBOL(acpi_in_debugger);
74
75 extern char line_buf[80];
76 #endif                          /*ENABLE_DEBUGGER */
77
78 static unsigned int acpi_irq_irq;
79 static acpi_osd_handler acpi_irq_handler;
80 static void *acpi_irq_context;
81 static struct workqueue_struct *kacpid_wq;
82 static struct workqueue_struct *kacpi_notify_wq;
83 static struct workqueue_struct *kacpi_hotplug_wq;
84
85 struct acpi_res_list {
86         resource_size_t start;
87         resource_size_t end;
88         acpi_adr_space_type resource_type; /* IO port, System memory, ...*/
89         char name[5];   /* only can have a length of 4 chars, make use of this
90                            one instead of res->name, no need to kalloc then */
91         struct list_head resource_list;
92         int count;
93 };
94
95 static LIST_HEAD(resource_list_head);
96 static DEFINE_SPINLOCK(acpi_res_lock);
97
98 /*
99  * This list of permanent mappings is for memory that may be accessed from
100  * interrupt context, where we can't do the ioremap().
101  */
102 struct acpi_ioremap {
103         struct list_head list;
104         void __iomem *virt;
105         acpi_physical_address phys;
106         acpi_size size;
107 };
108
109 static LIST_HEAD(acpi_ioremaps);
110 static DEFINE_SPINLOCK(acpi_ioremap_lock);
111
112 #define OSI_STRING_LENGTH_MAX 64        /* arbitrary */
113 static char osi_additional_string[OSI_STRING_LENGTH_MAX];
114
115 /*
116  * The story of _OSI(Linux)
117  *
118  * From pre-history through Linux-2.6.22,
119  * Linux responded TRUE upon a BIOS OSI(Linux) query.
120  *
121  * Unfortunately, reference BIOS writers got wind of this
122  * and put OSI(Linux) in their example code, quickly exposing
123  * this string as ill-conceived and opening the door to
124  * an un-bounded number of BIOS incompatibilities.
125  *
126  * For example, OSI(Linux) was used on resume to re-POST a
127  * video card on one system, because Linux at that time
128  * could not do a speedy restore in its native driver.
129  * But then upon gaining quick native restore capability,
130  * Linux has no way to tell the BIOS to skip the time-consuming
131  * POST -- putting Linux at a permanent performance disadvantage.
132  * On another system, the BIOS writer used OSI(Linux)
133  * to infer native OS support for IPMI!  On other systems,
134  * OSI(Linux) simply got in the way of Linux claiming to
135  * be compatible with other operating systems, exposing
136  * BIOS issues such as skipped device initialization.
137  *
138  * So "Linux" turned out to be a really poor chose of
139  * OSI string, and from Linux-2.6.23 onward we respond FALSE.
140  *
141  * BIOS writers should NOT query _OSI(Linux) on future systems.
142  * Linux will complain on the console when it sees it, and return FALSE.
143  * To get Linux to return TRUE for your system  will require
144  * a kernel source update to add a DMI entry,
145  * or boot with "acpi_osi=Linux"
146  */
147
148 static struct osi_linux {
149         unsigned int    enable:1;
150         unsigned int    dmi:1;
151         unsigned int    cmdline:1;
152         unsigned int    known:1;
153 } osi_linux = { 0, 0, 0, 0};
154
155 static void __init acpi_request_region (struct acpi_generic_address *addr,
156         unsigned int length, char *desc)
157 {
158         if (!addr->address || !length)
159                 return;
160
161         /* Resources are never freed */
162         if (addr->space_id == ACPI_ADR_SPACE_SYSTEM_IO)
163                 request_region(addr->address, length, desc);
164         else if (addr->space_id == ACPI_ADR_SPACE_SYSTEM_MEMORY)
165                 request_mem_region(addr->address, length, desc);
166 }
167
168 static int __init acpi_reserve_resources(void)
169 {
170         acpi_request_region(&acpi_gbl_FADT.xpm1a_event_block, acpi_gbl_FADT.pm1_event_length,
171                 "ACPI PM1a_EVT_BLK");
172
173         acpi_request_region(&acpi_gbl_FADT.xpm1b_event_block, acpi_gbl_FADT.pm1_event_length,
174                 "ACPI PM1b_EVT_BLK");
175
176         acpi_request_region(&acpi_gbl_FADT.xpm1a_control_block, acpi_gbl_FADT.pm1_control_length,
177                 "ACPI PM1a_CNT_BLK");
178
179         acpi_request_region(&acpi_gbl_FADT.xpm1b_control_block, acpi_gbl_FADT.pm1_control_length,
180                 "ACPI PM1b_CNT_BLK");
181
182         if (acpi_gbl_FADT.pm_timer_length == 4)
183                 acpi_request_region(&acpi_gbl_FADT.xpm_timer_block, 4, "ACPI PM_TMR");
184
185         acpi_request_region(&acpi_gbl_FADT.xpm2_control_block, acpi_gbl_FADT.pm2_control_length,
186                 "ACPI PM2_CNT_BLK");
187
188         /* Length of GPE blocks must be a non-negative multiple of 2 */
189
190         if (!(acpi_gbl_FADT.gpe0_block_length & 0x1))
191                 acpi_request_region(&acpi_gbl_FADT.xgpe0_block,
192                                acpi_gbl_FADT.gpe0_block_length, "ACPI GPE0_BLK");
193
194         if (!(acpi_gbl_FADT.gpe1_block_length & 0x1))
195                 acpi_request_region(&acpi_gbl_FADT.xgpe1_block,
196                                acpi_gbl_FADT.gpe1_block_length, "ACPI GPE1_BLK");
197
198         return 0;
199 }
200 device_initcall(acpi_reserve_resources);
201
202 acpi_status __init acpi_os_initialize(void)
203 {
204         return AE_OK;
205 }
206
207 acpi_status acpi_os_initialize1(void)
208 {
209         kacpid_wq = create_workqueue("kacpid");
210         kacpi_notify_wq = create_workqueue("kacpi_notify");
211         kacpi_hotplug_wq = create_workqueue("kacpi_hotplug");
212         BUG_ON(!kacpid_wq);
213         BUG_ON(!kacpi_notify_wq);
214         BUG_ON(!kacpi_hotplug_wq);
215         return AE_OK;
216 }
217
218 acpi_status acpi_os_terminate(void)
219 {
220         if (acpi_irq_handler) {
221                 acpi_os_remove_interrupt_handler(acpi_irq_irq,
222                                                  acpi_irq_handler);
223         }
224
225         destroy_workqueue(kacpid_wq);
226         destroy_workqueue(kacpi_notify_wq);
227         destroy_workqueue(kacpi_hotplug_wq);
228
229         return AE_OK;
230 }
231
232 void acpi_os_printf(const char *fmt, ...)
233 {
234         va_list args;
235         va_start(args, fmt);
236         acpi_os_vprintf(fmt, args);
237         va_end(args);
238 }
239
240 void acpi_os_vprintf(const char *fmt, va_list args)
241 {
242         static char buffer[512];
243
244         vsprintf(buffer, fmt, args);
245
246 #ifdef ENABLE_DEBUGGER
247         if (acpi_in_debugger) {
248                 kdb_printf("%s", buffer);
249         } else {
250                 printk(KERN_CONT "%s", buffer);
251         }
252 #else
253         printk(KERN_CONT "%s", buffer);
254 #endif
255 }
256
257 acpi_physical_address __init acpi_os_get_root_pointer(void)
258 {
259         if (efi_enabled) {
260                 if (efi.acpi20 != EFI_INVALID_TABLE_ADDR)
261                         return efi.acpi20;
262                 else if (efi.acpi != EFI_INVALID_TABLE_ADDR)
263                         return efi.acpi;
264                 else {
265                         printk(KERN_ERR PREFIX
266                                "System description tables not found\n");
267                         return 0;
268                 }
269         } else {
270                 acpi_physical_address pa = 0;
271
272                 acpi_find_root_pointer(&pa);
273                 return pa;
274         }
275 }
276
277 /* Must be called with 'acpi_ioremap_lock' lock held. */
278 static void __iomem *
279 acpi_map_vaddr_lookup(acpi_physical_address phys, acpi_size size)
280 {
281         struct acpi_ioremap *map;
282
283         list_for_each_entry(map, &acpi_ioremaps, list)
284                 if (map->phys <= phys &&
285                     phys + size <= map->phys + map->size)
286                         return map->virt + (phys - map->phys);
287
288         return NULL;
289 }
290
291 /* Must be called with 'acpi_ioremap_lock' lock held. */
292 static struct acpi_ioremap *
293 acpi_map_lookup_virt(void __iomem *virt, acpi_size size)
294 {
295         struct acpi_ioremap *map;
296
297         list_for_each_entry(map, &acpi_ioremaps, list)
298                 if (map->virt == virt && map->size == size)
299                         return map;
300
301         return NULL;
302 }
303
304 void __iomem *__init_refok
305 acpi_os_map_memory(acpi_physical_address phys, acpi_size size)
306 {
307         struct acpi_ioremap *map;
308         unsigned long flags;
309         void __iomem *virt;
310
311         if (phys > ULONG_MAX) {
312                 printk(KERN_ERR PREFIX "Cannot map memory that high\n");
313                 return NULL;
314         }
315
316         if (!acpi_gbl_permanent_mmap)
317                 return __acpi_map_table((unsigned long)phys, size);
318
319         map = kzalloc(sizeof(*map), GFP_KERNEL);
320         if (!map)
321                 return NULL;
322
323         virt = ioremap(phys, size);
324         if (!virt) {
325                 kfree(map);
326                 return NULL;
327         }
328
329         INIT_LIST_HEAD(&map->list);
330         map->virt = virt;
331         map->phys = phys;
332         map->size = size;
333
334         spin_lock_irqsave(&acpi_ioremap_lock, flags);
335         list_add_tail(&map->list, &acpi_ioremaps);
336         spin_unlock_irqrestore(&acpi_ioremap_lock, flags);
337
338         return virt;
339 }
340 EXPORT_SYMBOL_GPL(acpi_os_map_memory);
341
342 void __ref acpi_os_unmap_memory(void __iomem *virt, acpi_size size)
343 {
344         struct acpi_ioremap *map;
345         unsigned long flags;
346
347         if (!acpi_gbl_permanent_mmap) {
348                 __acpi_unmap_table(virt, size);
349                 return;
350         }
351
352         spin_lock_irqsave(&acpi_ioremap_lock, flags);
353         map = acpi_map_lookup_virt(virt, size);
354         if (!map) {
355                 spin_unlock_irqrestore(&acpi_ioremap_lock, flags);
356                 printk(KERN_ERR PREFIX "%s: bad address %p\n", __func__, virt);
357                 dump_stack();
358                 return;
359         }
360
361         list_del(&map->list);
362         spin_unlock_irqrestore(&acpi_ioremap_lock, flags);
363
364         iounmap(map->virt);
365         kfree(map);
366 }
367 EXPORT_SYMBOL_GPL(acpi_os_unmap_memory);
368
369 void __init early_acpi_os_unmap_memory(void __iomem *virt, acpi_size size)
370 {
371         if (!acpi_gbl_permanent_mmap)
372                 __acpi_unmap_table(virt, size);
373 }
374
375 int acpi_os_map_generic_address(struct acpi_generic_address *addr)
376 {
377         void __iomem *virt;
378
379         if (addr->space_id != ACPI_ADR_SPACE_SYSTEM_MEMORY)
380                 return 0;
381
382         if (!addr->address || !addr->bit_width)
383                 return -EINVAL;
384
385         virt = acpi_os_map_memory(addr->address, addr->bit_width / 8);
386         if (!virt)
387                 return -EIO;
388
389         return 0;
390 }
391 EXPORT_SYMBOL_GPL(acpi_os_map_generic_address);
392
393 void acpi_os_unmap_generic_address(struct acpi_generic_address *addr)
394 {
395         void __iomem *virt;
396         unsigned long flags;
397         acpi_size size = addr->bit_width / 8;
398
399         if (addr->space_id != ACPI_ADR_SPACE_SYSTEM_MEMORY)
400                 return;
401
402         if (!addr->address || !addr->bit_width)
403                 return;
404
405         spin_lock_irqsave(&acpi_ioremap_lock, flags);
406         virt = acpi_map_vaddr_lookup(addr->address, size);
407         spin_unlock_irqrestore(&acpi_ioremap_lock, flags);
408
409         acpi_os_unmap_memory(virt, size);
410 }
411 EXPORT_SYMBOL_GPL(acpi_os_unmap_generic_address);
412
413 #ifdef ACPI_FUTURE_USAGE
414 acpi_status
415 acpi_os_get_physical_address(void *virt, acpi_physical_address * phys)
416 {
417         if (!phys || !virt)
418                 return AE_BAD_PARAMETER;
419
420         *phys = virt_to_phys(virt);
421
422         return AE_OK;
423 }
424 #endif
425
426 #define ACPI_MAX_OVERRIDE_LEN 100
427
428 static char acpi_os_name[ACPI_MAX_OVERRIDE_LEN];
429
430 acpi_status
431 acpi_os_predefined_override(const struct acpi_predefined_names *init_val,
432                             acpi_string * new_val)
433 {
434         if (!init_val || !new_val)
435                 return AE_BAD_PARAMETER;
436
437         *new_val = NULL;
438         if (!memcmp(init_val->name, "_OS_", 4) && strlen(acpi_os_name)) {
439                 printk(KERN_INFO PREFIX "Overriding _OS definition to '%s'\n",
440                        acpi_os_name);
441                 *new_val = acpi_os_name;
442         }
443
444         return AE_OK;
445 }
446
447 acpi_status
448 acpi_os_table_override(struct acpi_table_header * existing_table,
449                        struct acpi_table_header ** new_table)
450 {
451         if (!existing_table || !new_table)
452                 return AE_BAD_PARAMETER;
453
454         *new_table = NULL;
455
456 #ifdef CONFIG_ACPI_CUSTOM_DSDT
457         if (strncmp(existing_table->signature, "DSDT", 4) == 0)
458                 *new_table = (struct acpi_table_header *)AmlCode;
459 #endif
460         if (*new_table != NULL) {
461                 printk(KERN_WARNING PREFIX "Override [%4.4s-%8.8s], "
462                            "this is unsafe: tainting kernel\n",
463                        existing_table->signature,
464                        existing_table->oem_table_id);
465                 add_taint(TAINT_OVERRIDDEN_ACPI_TABLE);
466         }
467         return AE_OK;
468 }
469
470 static irqreturn_t acpi_irq(int irq, void *dev_id)
471 {
472         u32 handled;
473
474         handled = (*acpi_irq_handler) (acpi_irq_context);
475
476         if (handled) {
477                 acpi_irq_handled++;
478                 return IRQ_HANDLED;
479         } else {
480                 acpi_irq_not_handled++;
481                 return IRQ_NONE;
482         }
483 }
484
485 acpi_status
486 acpi_os_install_interrupt_handler(u32 gsi, acpi_osd_handler handler,
487                                   void *context)
488 {
489         unsigned int irq;
490
491         acpi_irq_stats_init();
492
493         /*
494          * Ignore the GSI from the core, and use the value in our copy of the
495          * FADT. It may not be the same if an interrupt source override exists
496          * for the SCI.
497          */
498         gsi = acpi_gbl_FADT.sci_interrupt;
499         if (acpi_gsi_to_irq(gsi, &irq) < 0) {
500                 printk(KERN_ERR PREFIX "SCI (ACPI GSI %d) not registered\n",
501                        gsi);
502                 return AE_OK;
503         }
504
505         acpi_irq_handler = handler;
506         acpi_irq_context = context;
507         if (request_irq(irq, acpi_irq, IRQF_SHARED, "acpi", acpi_irq)) {
508                 printk(KERN_ERR PREFIX "SCI (IRQ%d) allocation failed\n", irq);
509                 return AE_NOT_ACQUIRED;
510         }
511         acpi_irq_irq = irq;
512
513         return AE_OK;
514 }
515
516 acpi_status acpi_os_remove_interrupt_handler(u32 irq, acpi_osd_handler handler)
517 {
518         if (irq) {
519                 free_irq(irq, acpi_irq);
520                 acpi_irq_handler = NULL;
521                 acpi_irq_irq = 0;
522         }
523
524         return AE_OK;
525 }
526
527 /*
528  * Running in interpreter thread context, safe to sleep
529  */
530
531 void acpi_os_sleep(u64 ms)
532 {
533         schedule_timeout_interruptible(msecs_to_jiffies(ms));
534 }
535
536 void acpi_os_stall(u32 us)
537 {
538         while (us) {
539                 u32 delay = 1000;
540
541                 if (delay > us)
542                         delay = us;
543                 udelay(delay);
544                 touch_nmi_watchdog();
545                 us -= delay;
546         }
547 }
548
549 /*
550  * Support ACPI 3.0 AML Timer operand
551  * Returns 64-bit free-running, monotonically increasing timer
552  * with 100ns granularity
553  */
554 u64 acpi_os_get_timer(void)
555 {
556         static u64 t;
557
558 #ifdef  CONFIG_HPET
559         /* TBD: use HPET if available */
560 #endif
561
562 #ifdef  CONFIG_X86_PM_TIMER
563         /* TBD: default to PM timer if HPET was not available */
564 #endif
565         if (!t)
566                 printk(KERN_ERR PREFIX "acpi_os_get_timer() TBD\n");
567
568         return ++t;
569 }
570
571 acpi_status acpi_os_read_port(acpi_io_address port, u32 * value, u32 width)
572 {
573         u32 dummy;
574
575         if (!value)
576                 value = &dummy;
577
578         *value = 0;
579         if (width <= 8) {
580                 *(u8 *) value = inb(port);
581         } else if (width <= 16) {
582                 *(u16 *) value = inw(port);
583         } else if (width <= 32) {
584                 *(u32 *) value = inl(port);
585         } else {
586                 BUG();
587         }
588
589         return AE_OK;
590 }
591
592 EXPORT_SYMBOL(acpi_os_read_port);
593
594 acpi_status acpi_os_write_port(acpi_io_address port, u32 value, u32 width)
595 {
596         if (width <= 8) {
597                 outb(value, port);
598         } else if (width <= 16) {
599                 outw(value, port);
600         } else if (width <= 32) {
601                 outl(value, port);
602         } else {
603                 BUG();
604         }
605
606         return AE_OK;
607 }
608
609 EXPORT_SYMBOL(acpi_os_write_port);
610
611 acpi_status
612 acpi_os_read_memory(acpi_physical_address phys_addr, u32 * value, u32 width)
613 {
614         u32 dummy;
615         void __iomem *virt_addr;
616         int size = width / 8, unmap = 0;
617         unsigned long flags;
618
619         spin_lock_irqsave(&acpi_ioremap_lock, flags);
620         virt_addr = acpi_map_vaddr_lookup(phys_addr, size);
621         spin_unlock_irqrestore(&acpi_ioremap_lock, flags);
622         if (!virt_addr) {
623                 virt_addr = ioremap(phys_addr, size);
624                 unmap = 1;
625         }
626         if (!value)
627                 value = &dummy;
628
629         switch (width) {
630         case 8:
631                 *(u8 *) value = readb(virt_addr);
632                 break;
633         case 16:
634                 *(u16 *) value = readw(virt_addr);
635                 break;
636         case 32:
637                 *(u32 *) value = readl(virt_addr);
638                 break;
639         default:
640                 BUG();
641         }
642
643         if (unmap)
644                 iounmap(virt_addr);
645
646         return AE_OK;
647 }
648
649 acpi_status
650 acpi_os_write_memory(acpi_physical_address phys_addr, u32 value, u32 width)
651 {
652         void __iomem *virt_addr;
653         int size = width / 8, unmap = 0;
654         unsigned long flags;
655
656         spin_lock_irqsave(&acpi_ioremap_lock, flags);
657         virt_addr = acpi_map_vaddr_lookup(phys_addr, size);
658         spin_unlock_irqrestore(&acpi_ioremap_lock, flags);
659         if (!virt_addr) {
660                 virt_addr = ioremap(phys_addr, size);
661                 unmap = 1;
662         }
663
664         switch (width) {
665         case 8:
666                 writeb(value, virt_addr);
667                 break;
668         case 16:
669                 writew(value, virt_addr);
670                 break;
671         case 32:
672                 writel(value, virt_addr);
673                 break;
674         default:
675                 BUG();
676         }
677
678         if (unmap)
679                 iounmap(virt_addr);
680
681         return AE_OK;
682 }
683
684 acpi_status
685 acpi_os_read_pci_configuration(struct acpi_pci_id * pci_id, u32 reg,
686                                u32 *value, u32 width)
687 {
688         int result, size;
689
690         if (!value)
691                 return AE_BAD_PARAMETER;
692
693         switch (width) {
694         case 8:
695                 size = 1;
696                 break;
697         case 16:
698                 size = 2;
699                 break;
700         case 32:
701                 size = 4;
702                 break;
703         default:
704                 return AE_ERROR;
705         }
706
707         result = raw_pci_read(pci_id->segment, pci_id->bus,
708                                 PCI_DEVFN(pci_id->device, pci_id->function),
709                                 reg, size, value);
710
711         return (result ? AE_ERROR : AE_OK);
712 }
713
714 acpi_status
715 acpi_os_write_pci_configuration(struct acpi_pci_id * pci_id, u32 reg,
716                                 u64 value, u32 width)
717 {
718         int result, size;
719
720         switch (width) {
721         case 8:
722                 size = 1;
723                 break;
724         case 16:
725                 size = 2;
726                 break;
727         case 32:
728                 size = 4;
729                 break;
730         default:
731                 return AE_ERROR;
732         }
733
734         result = raw_pci_write(pci_id->segment, pci_id->bus,
735                                 PCI_DEVFN(pci_id->device, pci_id->function),
736                                 reg, size, value);
737
738         return (result ? AE_ERROR : AE_OK);
739 }
740
741 /* TODO: Change code to take advantage of driver model more */
742 static void acpi_os_derive_pci_id_2(acpi_handle rhandle,        /* upper bound  */
743                                     acpi_handle chandle,        /* current node */
744                                     struct acpi_pci_id **id,
745                                     int *is_bridge, u8 * bus_number)
746 {
747         acpi_handle handle;
748         struct acpi_pci_id *pci_id = *id;
749         acpi_status status;
750         unsigned long long temp;
751         acpi_object_type type;
752
753         acpi_get_parent(chandle, &handle);
754         if (handle != rhandle) {
755                 acpi_os_derive_pci_id_2(rhandle, handle, &pci_id, is_bridge,
756                                         bus_number);
757
758                 status = acpi_get_type(handle, &type);
759                 if ((ACPI_FAILURE(status)) || (type != ACPI_TYPE_DEVICE))
760                         return;
761
762                 status = acpi_evaluate_integer(handle, METHOD_NAME__ADR, NULL,
763                                           &temp);
764                 if (ACPI_SUCCESS(status)) {
765                         u32 val;
766                         pci_id->device = ACPI_HIWORD(ACPI_LODWORD(temp));
767                         pci_id->function = ACPI_LOWORD(ACPI_LODWORD(temp));
768
769                         if (*is_bridge)
770                                 pci_id->bus = *bus_number;
771
772                         /* any nicer way to get bus number of bridge ? */
773                         status =
774                             acpi_os_read_pci_configuration(pci_id, 0x0e, &val,
775                                                            8);
776                         if (ACPI_SUCCESS(status)
777                             && ((val & 0x7f) == 1 || (val & 0x7f) == 2)) {
778                                 status =
779                                     acpi_os_read_pci_configuration(pci_id, 0x18,
780                                                                    &val, 8);
781                                 if (!ACPI_SUCCESS(status)) {
782                                         /* Certainly broken...  FIX ME */
783                                         return;
784                                 }
785                                 *is_bridge = 1;
786                                 pci_id->bus = val;
787                                 status =
788                                     acpi_os_read_pci_configuration(pci_id, 0x19,
789                                                                    &val, 8);
790                                 if (ACPI_SUCCESS(status)) {
791                                         *bus_number = val;
792                                 }
793                         } else
794                                 *is_bridge = 0;
795                 }
796         }
797 }
798
799 void acpi_os_derive_pci_id(acpi_handle rhandle, /* upper bound  */
800                            acpi_handle chandle, /* current node */
801                            struct acpi_pci_id **id)
802 {
803         int is_bridge = 1;
804         u8 bus_number = (*id)->bus;
805
806         acpi_os_derive_pci_id_2(rhandle, chandle, id, &is_bridge, &bus_number);
807 }
808
809 static void acpi_os_execute_deferred(struct work_struct *work)
810 {
811         struct acpi_os_dpc *dpc = container_of(work, struct acpi_os_dpc, work);
812
813         if (dpc->wait)
814                 acpi_os_wait_events_complete(NULL);
815
816         dpc->function(dpc->context);
817         kfree(dpc);
818 }
819
820 /*******************************************************************************
821  *
822  * FUNCTION:    acpi_os_execute
823  *
824  * PARAMETERS:  Type               - Type of the callback
825  *              Function           - Function to be executed
826  *              Context            - Function parameters
827  *
828  * RETURN:      Status
829  *
830  * DESCRIPTION: Depending on type, either queues function for deferred execution or
831  *              immediately executes function on a separate thread.
832  *
833  ******************************************************************************/
834
835 static acpi_status __acpi_os_execute(acpi_execute_type type,
836         acpi_osd_exec_callback function, void *context, int hp)
837 {
838         acpi_status status = AE_OK;
839         struct acpi_os_dpc *dpc;
840         struct workqueue_struct *queue;
841         int ret;
842         ACPI_DEBUG_PRINT((ACPI_DB_EXEC,
843                           "Scheduling function [%p(%p)] for deferred execution.\n",
844                           function, context));
845
846         /*
847          * Allocate/initialize DPC structure.  Note that this memory will be
848          * freed by the callee.  The kernel handles the work_struct list  in a
849          * way that allows us to also free its memory inside the callee.
850          * Because we may want to schedule several tasks with different
851          * parameters we can't use the approach some kernel code uses of
852          * having a static work_struct.
853          */
854
855         dpc = kmalloc(sizeof(struct acpi_os_dpc), GFP_ATOMIC);
856         if (!dpc)
857                 return AE_NO_MEMORY;
858
859         dpc->function = function;
860         dpc->context = context;
861
862         /*
863          * We can't run hotplug code in keventd_wq/kacpid_wq/kacpid_notify_wq
864          * because the hotplug code may call driver .remove() functions,
865          * which invoke flush_scheduled_work/acpi_os_wait_events_complete
866          * to flush these workqueues.
867          */
868         queue = hp ? kacpi_hotplug_wq :
869                 (type == OSL_NOTIFY_HANDLER ? kacpi_notify_wq : kacpid_wq);
870         dpc->wait = hp ? 1 : 0;
871
872         if (queue == kacpi_hotplug_wq)
873                 INIT_WORK(&dpc->work, acpi_os_execute_deferred);
874         else if (queue == kacpi_notify_wq)
875                 INIT_WORK(&dpc->work, acpi_os_execute_deferred);
876         else
877                 INIT_WORK(&dpc->work, acpi_os_execute_deferred);
878
879         /*
880          * On some machines, a software-initiated SMI causes corruption unless
881          * the SMI runs on CPU 0.  An SMI can be initiated by any AML, but
882          * typically it's done in GPE-related methods that are run via
883          * workqueues, so we can avoid the known corruption cases by always
884          * queueing on CPU 0.
885          */
886         ret = queue_work_on(0, queue, &dpc->work);
887
888         if (!ret) {
889                 printk(KERN_ERR PREFIX
890                           "Call to queue_work() failed.\n");
891                 status = AE_ERROR;
892                 kfree(dpc);
893         }
894         return status;
895 }
896
897 acpi_status acpi_os_execute(acpi_execute_type type,
898                             acpi_osd_exec_callback function, void *context)
899 {
900         return __acpi_os_execute(type, function, context, 0);
901 }
902 EXPORT_SYMBOL(acpi_os_execute);
903
904 acpi_status acpi_os_hotplug_execute(acpi_osd_exec_callback function,
905         void *context)
906 {
907         return __acpi_os_execute(0, function, context, 1);
908 }
909
910 void acpi_os_wait_events_complete(void *context)
911 {
912         flush_workqueue(kacpid_wq);
913         flush_workqueue(kacpi_notify_wq);
914 }
915
916 EXPORT_SYMBOL(acpi_os_wait_events_complete);
917
918 /*
919  * Allocate the memory for a spinlock and initialize it.
920  */
921 acpi_status acpi_os_create_lock(acpi_spinlock * handle)
922 {
923         spin_lock_init(*handle);
924
925         return AE_OK;
926 }
927
928 /*
929  * Deallocate the memory for a spinlock.
930  */
931 void acpi_os_delete_lock(acpi_spinlock handle)
932 {
933         return;
934 }
935
936 acpi_status
937 acpi_os_create_semaphore(u32 max_units, u32 initial_units, acpi_handle * handle)
938 {
939         struct semaphore *sem = NULL;
940
941         sem = acpi_os_allocate(sizeof(struct semaphore));
942         if (!sem)
943                 return AE_NO_MEMORY;
944         memset(sem, 0, sizeof(struct semaphore));
945
946         sema_init(sem, initial_units);
947
948         *handle = (acpi_handle *) sem;
949
950         ACPI_DEBUG_PRINT((ACPI_DB_MUTEX, "Creating semaphore[%p|%d].\n",
951                           *handle, initial_units));
952
953         return AE_OK;
954 }
955
956 /*
957  * TODO: A better way to delete semaphores?  Linux doesn't have a
958  * 'delete_semaphore()' function -- may result in an invalid
959  * pointer dereference for non-synchronized consumers.  Should
960  * we at least check for blocked threads and signal/cancel them?
961  */
962
963 acpi_status acpi_os_delete_semaphore(acpi_handle handle)
964 {
965         struct semaphore *sem = (struct semaphore *)handle;
966
967         if (!sem)
968                 return AE_BAD_PARAMETER;
969
970         ACPI_DEBUG_PRINT((ACPI_DB_MUTEX, "Deleting semaphore[%p].\n", handle));
971
972         BUG_ON(!list_empty(&sem->wait_list));
973         kfree(sem);
974         sem = NULL;
975
976         return AE_OK;
977 }
978
979 /*
980  * TODO: Support for units > 1?
981  */
982 acpi_status acpi_os_wait_semaphore(acpi_handle handle, u32 units, u16 timeout)
983 {
984         acpi_status status = AE_OK;
985         struct semaphore *sem = (struct semaphore *)handle;
986         long jiffies;
987         int ret = 0;
988
989         if (!sem || (units < 1))
990                 return AE_BAD_PARAMETER;
991
992         if (units > 1)
993                 return AE_SUPPORT;
994
995         ACPI_DEBUG_PRINT((ACPI_DB_MUTEX, "Waiting for semaphore[%p|%d|%d]\n",
996                           handle, units, timeout));
997
998         if (timeout == ACPI_WAIT_FOREVER)
999                 jiffies = MAX_SCHEDULE_TIMEOUT;
1000         else
1001                 jiffies = msecs_to_jiffies(timeout);
1002         
1003         ret = down_timeout(sem, jiffies);
1004         if (ret)
1005                 status = AE_TIME;
1006
1007         if (ACPI_FAILURE(status)) {
1008                 ACPI_DEBUG_PRINT((ACPI_DB_MUTEX,
1009                                   "Failed to acquire semaphore[%p|%d|%d], %s",
1010                                   handle, units, timeout,
1011                                   acpi_format_exception(status)));
1012         } else {
1013                 ACPI_DEBUG_PRINT((ACPI_DB_MUTEX,
1014                                   "Acquired semaphore[%p|%d|%d]", handle,
1015                                   units, timeout));
1016         }
1017
1018         return status;
1019 }
1020
1021 /*
1022  * TODO: Support for units > 1?
1023  */
1024 acpi_status acpi_os_signal_semaphore(acpi_handle handle, u32 units)
1025 {
1026         struct semaphore *sem = (struct semaphore *)handle;
1027
1028         if (!sem || (units < 1))
1029                 return AE_BAD_PARAMETER;
1030
1031         if (units > 1)
1032                 return AE_SUPPORT;
1033
1034         ACPI_DEBUG_PRINT((ACPI_DB_MUTEX, "Signaling semaphore[%p|%d]\n", handle,
1035                           units));
1036
1037         up(sem);
1038
1039         return AE_OK;
1040 }
1041
1042 #ifdef ACPI_FUTURE_USAGE
1043 u32 acpi_os_get_line(char *buffer)
1044 {
1045
1046 #ifdef ENABLE_DEBUGGER
1047         if (acpi_in_debugger) {
1048                 u32 chars;
1049
1050                 kdb_read(buffer, sizeof(line_buf));
1051
1052                 /* remove the CR kdb includes */
1053                 chars = strlen(buffer) - 1;
1054                 buffer[chars] = '\0';
1055         }
1056 #endif
1057
1058         return 0;
1059 }
1060 #endif                          /*  ACPI_FUTURE_USAGE  */
1061
1062 acpi_status acpi_os_signal(u32 function, void *info)
1063 {
1064         switch (function) {
1065         case ACPI_SIGNAL_FATAL:
1066                 printk(KERN_ERR PREFIX "Fatal opcode executed\n");
1067                 break;
1068         case ACPI_SIGNAL_BREAKPOINT:
1069                 /*
1070                  * AML Breakpoint
1071                  * ACPI spec. says to treat it as a NOP unless
1072                  * you are debugging.  So if/when we integrate
1073                  * AML debugger into the kernel debugger its
1074                  * hook will go here.  But until then it is
1075                  * not useful to print anything on breakpoints.
1076                  */
1077                 break;
1078         default:
1079                 break;
1080         }
1081
1082         return AE_OK;
1083 }
1084
1085 static int __init acpi_os_name_setup(char *str)
1086 {
1087         char *p = acpi_os_name;
1088         int count = ACPI_MAX_OVERRIDE_LEN - 1;
1089
1090         if (!str || !*str)
1091                 return 0;
1092
1093         for (; count-- && str && *str; str++) {
1094                 if (isalnum(*str) || *str == ' ' || *str == ':')
1095                         *p++ = *str;
1096                 else if (*str == '\'' || *str == '"')
1097                         continue;
1098                 else
1099                         break;
1100         }
1101         *p = 0;
1102
1103         return 1;
1104
1105 }
1106
1107 __setup("acpi_os_name=", acpi_os_name_setup);
1108
1109 static void __init set_osi_linux(unsigned int enable)
1110 {
1111         if (osi_linux.enable != enable) {
1112                 osi_linux.enable = enable;
1113                 printk(KERN_NOTICE PREFIX "%sed _OSI(Linux)\n",
1114                         enable ? "Add": "Delet");
1115         }
1116         return;
1117 }
1118
1119 static void __init acpi_cmdline_osi_linux(unsigned int enable)
1120 {
1121         osi_linux.cmdline = 1;  /* cmdline set the default */
1122         set_osi_linux(enable);
1123
1124         return;
1125 }
1126
1127 void __init acpi_dmi_osi_linux(int enable, const struct dmi_system_id *d)
1128 {
1129         osi_linux.dmi = 1;      /* DMI knows that this box asks OSI(Linux) */
1130
1131         printk(KERN_NOTICE PREFIX "DMI detected: %s\n", d->ident);
1132
1133         if (enable == -1)
1134                 return;
1135
1136         osi_linux.known = 1;    /* DMI knows which OSI(Linux) default needed */
1137
1138         set_osi_linux(enable);
1139
1140         return;
1141 }
1142
1143 /*
1144  * Modify the list of "OS Interfaces" reported to BIOS via _OSI
1145  *
1146  * empty string disables _OSI
1147  * string starting with '!' disables that string
1148  * otherwise string is added to list, augmenting built-in strings
1149  */
1150 int __init acpi_osi_setup(char *str)
1151 {
1152         if (str == NULL || *str == '\0') {
1153                 printk(KERN_INFO PREFIX "_OSI method disabled\n");
1154                 acpi_gbl_create_osi_method = FALSE;
1155         } else if (!strcmp("!Linux", str)) {
1156                 acpi_cmdline_osi_linux(0);      /* !enable */
1157         } else if (*str == '!') {
1158                 if (acpi_osi_invalidate(++str) == AE_OK)
1159                         printk(KERN_INFO PREFIX "Deleted _OSI(%s)\n", str);
1160         } else if (!strcmp("Linux", str)) {
1161                 acpi_cmdline_osi_linux(1);      /* enable */
1162         } else if (*osi_additional_string == '\0') {
1163                 strncpy(osi_additional_string, str, OSI_STRING_LENGTH_MAX);
1164                 printk(KERN_INFO PREFIX "Added _OSI(%s)\n", str);
1165         }
1166
1167         return 1;
1168 }
1169
1170 __setup("acpi_osi=", acpi_osi_setup);
1171
1172 /* enable serialization to combat AE_ALREADY_EXISTS errors */
1173 static int __init acpi_serialize_setup(char *str)
1174 {
1175         printk(KERN_INFO PREFIX "serialize enabled\n");
1176
1177         acpi_gbl_all_methods_serialized = TRUE;
1178
1179         return 1;
1180 }
1181
1182 __setup("acpi_serialize", acpi_serialize_setup);
1183
1184 /* Check of resource interference between native drivers and ACPI
1185  * OperationRegions (SystemIO and System Memory only).
1186  * IO ports and memory declared in ACPI might be used by the ACPI subsystem
1187  * in arbitrary AML code and can interfere with legacy drivers.
1188  * acpi_enforce_resources= can be set to:
1189  *
1190  *   - strict (default) (2)
1191  *     -> further driver trying to access the resources will not load
1192  *   - lax              (1)
1193  *     -> further driver trying to access the resources will load, but you
1194  *     get a system message that something might go wrong...
1195  *
1196  *   - no               (0)
1197  *     -> ACPI Operation Region resources will not be registered
1198  *
1199  */
1200 #define ENFORCE_RESOURCES_STRICT 2
1201 #define ENFORCE_RESOURCES_LAX    1
1202 #define ENFORCE_RESOURCES_NO     0
1203
1204 static unsigned int acpi_enforce_resources = ENFORCE_RESOURCES_STRICT;
1205
1206 static int __init acpi_enforce_resources_setup(char *str)
1207 {
1208         if (str == NULL || *str == '\0')
1209                 return 0;
1210
1211         if (!strcmp("strict", str))
1212                 acpi_enforce_resources = ENFORCE_RESOURCES_STRICT;
1213         else if (!strcmp("lax", str))
1214                 acpi_enforce_resources = ENFORCE_RESOURCES_LAX;
1215         else if (!strcmp("no", str))
1216                 acpi_enforce_resources = ENFORCE_RESOURCES_NO;
1217
1218         return 1;
1219 }
1220
1221 __setup("acpi_enforce_resources=", acpi_enforce_resources_setup);
1222
1223 /* Check for resource conflicts between ACPI OperationRegions and native
1224  * drivers */
1225 int acpi_check_resource_conflict(const struct resource *res)
1226 {
1227         struct acpi_res_list *res_list_elem;
1228         int ioport;
1229         int clash = 0;
1230
1231         if (acpi_enforce_resources == ENFORCE_RESOURCES_NO)
1232                 return 0;
1233         if (!(res->flags & IORESOURCE_IO) && !(res->flags & IORESOURCE_MEM))
1234                 return 0;
1235
1236         ioport = res->flags & IORESOURCE_IO;
1237
1238         spin_lock(&acpi_res_lock);
1239         list_for_each_entry(res_list_elem, &resource_list_head,
1240                             resource_list) {
1241                 if (ioport && (res_list_elem->resource_type
1242                                != ACPI_ADR_SPACE_SYSTEM_IO))
1243                         continue;
1244                 if (!ioport && (res_list_elem->resource_type
1245                                 != ACPI_ADR_SPACE_SYSTEM_MEMORY))
1246                         continue;
1247
1248                 if (res->end < res_list_elem->start
1249                     || res_list_elem->end < res->start)
1250                         continue;
1251                 clash = 1;
1252                 break;
1253         }
1254         spin_unlock(&acpi_res_lock);
1255
1256         if (clash) {
1257                 if (acpi_enforce_resources != ENFORCE_RESOURCES_NO) {
1258                         printk(KERN_WARNING "ACPI: resource %s %pR"
1259                                " conflicts with ACPI region %s %pR\n",
1260                                res->name, res, res_list_elem->name,
1261                                res_list_elem);
1262                         if (acpi_enforce_resources == ENFORCE_RESOURCES_LAX)
1263                                 printk(KERN_NOTICE "ACPI: This conflict may"
1264                                        " cause random problems and system"
1265                                        " instability\n");
1266                         printk(KERN_INFO "ACPI: If an ACPI driver is available"
1267                                " for this device, you should use it instead of"
1268                                " the native driver\n");
1269                 }
1270                 if (acpi_enforce_resources == ENFORCE_RESOURCES_STRICT)
1271                         return -EBUSY;
1272         }
1273         return 0;
1274 }
1275 EXPORT_SYMBOL(acpi_check_resource_conflict);
1276
1277 int acpi_check_region(resource_size_t start, resource_size_t n,
1278                       const char *name)
1279 {
1280         struct resource res = {
1281                 .start = start,
1282                 .end   = start + n - 1,
1283                 .name  = name,
1284                 .flags = IORESOURCE_IO,
1285         };
1286
1287         return acpi_check_resource_conflict(&res);
1288 }
1289 EXPORT_SYMBOL(acpi_check_region);
1290
1291 int acpi_check_mem_region(resource_size_t start, resource_size_t n,
1292                       const char *name)
1293 {
1294         struct resource res = {
1295                 .start = start,
1296                 .end   = start + n - 1,
1297                 .name  = name,
1298                 .flags = IORESOURCE_MEM,
1299         };
1300
1301         return acpi_check_resource_conflict(&res);
1302
1303 }
1304 EXPORT_SYMBOL(acpi_check_mem_region);
1305
1306 /*
1307  * Let drivers know whether the resource checks are effective
1308  */
1309 int acpi_resources_are_enforced(void)
1310 {
1311         return acpi_enforce_resources == ENFORCE_RESOURCES_STRICT;
1312 }
1313 EXPORT_SYMBOL(acpi_resources_are_enforced);
1314
1315 /*
1316  * Acquire a spinlock.
1317  *
1318  * handle is a pointer to the spinlock_t.
1319  */
1320
1321 acpi_cpu_flags acpi_os_acquire_lock(acpi_spinlock lockp)
1322 {
1323         acpi_cpu_flags flags;
1324         spin_lock_irqsave(lockp, flags);
1325         return flags;
1326 }
1327
1328 /*
1329  * Release a spinlock. See above.
1330  */
1331
1332 void acpi_os_release_lock(acpi_spinlock lockp, acpi_cpu_flags flags)
1333 {
1334         spin_unlock_irqrestore(lockp, flags);
1335 }
1336
1337 #ifndef ACPI_USE_LOCAL_CACHE
1338
1339 /*******************************************************************************
1340  *
1341  * FUNCTION:    acpi_os_create_cache
1342  *
1343  * PARAMETERS:  name      - Ascii name for the cache
1344  *              size      - Size of each cached object
1345  *              depth     - Maximum depth of the cache (in objects) <ignored>
1346  *              cache     - Where the new cache object is returned
1347  *
1348  * RETURN:      status
1349  *
1350  * DESCRIPTION: Create a cache object
1351  *
1352  ******************************************************************************/
1353
1354 acpi_status
1355 acpi_os_create_cache(char *name, u16 size, u16 depth, acpi_cache_t ** cache)
1356 {
1357         *cache = kmem_cache_create(name, size, 0, 0, NULL);
1358         if (*cache == NULL)
1359                 return AE_ERROR;
1360         else
1361                 return AE_OK;
1362 }
1363
1364 /*******************************************************************************
1365  *
1366  * FUNCTION:    acpi_os_purge_cache
1367  *
1368  * PARAMETERS:  Cache           - Handle to cache object
1369  *
1370  * RETURN:      Status
1371  *
1372  * DESCRIPTION: Free all objects within the requested cache.
1373  *
1374  ******************************************************************************/
1375
1376 acpi_status acpi_os_purge_cache(acpi_cache_t * cache)
1377 {
1378         kmem_cache_shrink(cache);
1379         return (AE_OK);
1380 }
1381
1382 /*******************************************************************************
1383  *
1384  * FUNCTION:    acpi_os_delete_cache
1385  *
1386  * PARAMETERS:  Cache           - Handle to cache object
1387  *
1388  * RETURN:      Status
1389  *
1390  * DESCRIPTION: Free all objects within the requested cache and delete the
1391  *              cache object.
1392  *
1393  ******************************************************************************/
1394
1395 acpi_status acpi_os_delete_cache(acpi_cache_t * cache)
1396 {
1397         kmem_cache_destroy(cache);
1398         return (AE_OK);
1399 }
1400
1401 /*******************************************************************************
1402  *
1403  * FUNCTION:    acpi_os_release_object
1404  *
1405  * PARAMETERS:  Cache       - Handle to cache object
1406  *              Object      - The object to be released
1407  *
1408  * RETURN:      None
1409  *
1410  * DESCRIPTION: Release an object to the specified cache.  If cache is full,
1411  *              the object is deleted.
1412  *
1413  ******************************************************************************/
1414
1415 acpi_status acpi_os_release_object(acpi_cache_t * cache, void *object)
1416 {
1417         kmem_cache_free(cache, object);
1418         return (AE_OK);
1419 }
1420
1421 /******************************************************************************
1422  *
1423  * FUNCTION:    acpi_os_validate_interface
1424  *
1425  * PARAMETERS:  interface           - Requested interface to be validated
1426  *
1427  * RETURN:      AE_OK if interface is supported, AE_SUPPORT otherwise
1428  *
1429  * DESCRIPTION: Match an interface string to the interfaces supported by the
1430  *              host. Strings originate from an AML call to the _OSI method.
1431  *
1432  *****************************************************************************/
1433
1434 acpi_status
1435 acpi_os_validate_interface (char *interface)
1436 {
1437         if (!strncmp(osi_additional_string, interface, OSI_STRING_LENGTH_MAX))
1438                 return AE_OK;
1439         if (!strcmp("Linux", interface)) {
1440
1441                 printk(KERN_NOTICE PREFIX
1442                         "BIOS _OSI(Linux) query %s%s\n",
1443                         osi_linux.enable ? "honored" : "ignored",
1444                         osi_linux.cmdline ? " via cmdline" :
1445                         osi_linux.dmi ? " via DMI" : "");
1446
1447                 if (osi_linux.enable)
1448                         return AE_OK;
1449         }
1450         return AE_SUPPORT;
1451 }
1452
1453 static inline int acpi_res_list_add(struct acpi_res_list *res)
1454 {
1455         struct acpi_res_list *res_list_elem;
1456
1457         list_for_each_entry(res_list_elem, &resource_list_head,
1458                             resource_list) {
1459
1460                 if (res->resource_type == res_list_elem->resource_type &&
1461                     res->start == res_list_elem->start &&
1462                     res->end == res_list_elem->end) {
1463
1464                         /*
1465                          * The Region(addr,len) already exist in the list,
1466                          * just increase the count
1467                          */
1468
1469                         res_list_elem->count++;
1470                         return 0;
1471                 }
1472         }
1473
1474         res->count = 1;
1475         list_add(&res->resource_list, &resource_list_head);
1476         return 1;
1477 }
1478
1479 static inline void acpi_res_list_del(struct acpi_res_list *res)
1480 {
1481         struct acpi_res_list *res_list_elem;
1482
1483         list_for_each_entry(res_list_elem, &resource_list_head,
1484                             resource_list) {
1485
1486                 if (res->resource_type == res_list_elem->resource_type &&
1487                     res->start == res_list_elem->start &&
1488                     res->end == res_list_elem->end) {
1489
1490                         /*
1491                          * If the res count is decreased to 0,
1492                          * remove and free it
1493                          */
1494
1495                         if (--res_list_elem->count == 0) {
1496                                 list_del(&res_list_elem->resource_list);
1497                                 kfree(res_list_elem);
1498                         }
1499                         return;
1500                 }
1501         }
1502 }
1503
1504 acpi_status
1505 acpi_os_invalidate_address(
1506     u8                   space_id,
1507     acpi_physical_address   address,
1508     acpi_size               length)
1509 {
1510         struct acpi_res_list res;
1511
1512         switch (space_id) {
1513         case ACPI_ADR_SPACE_SYSTEM_IO:
1514         case ACPI_ADR_SPACE_SYSTEM_MEMORY:
1515                 /* Only interference checks against SystemIO and SystemMemory
1516                    are needed */
1517                 res.start = address;
1518                 res.end = address + length - 1;
1519                 res.resource_type = space_id;
1520                 spin_lock(&acpi_res_lock);
1521                 acpi_res_list_del(&res);
1522                 spin_unlock(&acpi_res_lock);
1523                 break;
1524         case ACPI_ADR_SPACE_PCI_CONFIG:
1525         case ACPI_ADR_SPACE_EC:
1526         case ACPI_ADR_SPACE_SMBUS:
1527         case ACPI_ADR_SPACE_CMOS:
1528         case ACPI_ADR_SPACE_PCI_BAR_TARGET:
1529         case ACPI_ADR_SPACE_DATA_TABLE:
1530         case ACPI_ADR_SPACE_FIXED_HARDWARE:
1531                 break;
1532         }
1533         return AE_OK;
1534 }
1535
1536 /******************************************************************************
1537  *
1538  * FUNCTION:    acpi_os_validate_address
1539  *
1540  * PARAMETERS:  space_id             - ACPI space ID
1541  *              address             - Physical address
1542  *              length              - Address length
1543  *
1544  * RETURN:      AE_OK if address/length is valid for the space_id. Otherwise,
1545  *              should return AE_AML_ILLEGAL_ADDRESS.
1546  *
1547  * DESCRIPTION: Validate a system address via the host OS. Used to validate
1548  *              the addresses accessed by AML operation regions.
1549  *
1550  *****************************************************************************/
1551
1552 acpi_status
1553 acpi_os_validate_address (
1554     u8                   space_id,
1555     acpi_physical_address   address,
1556     acpi_size               length,
1557     char *name)
1558 {
1559         struct acpi_res_list *res;
1560         int added;
1561         if (acpi_enforce_resources == ENFORCE_RESOURCES_NO)
1562                 return AE_OK;
1563
1564         switch (space_id) {
1565         case ACPI_ADR_SPACE_SYSTEM_IO:
1566         case ACPI_ADR_SPACE_SYSTEM_MEMORY:
1567                 /* Only interference checks against SystemIO and SystemMemory
1568                    are needed */
1569                 res = kzalloc(sizeof(struct acpi_res_list), GFP_KERNEL);
1570                 if (!res)
1571                         return AE_OK;
1572                 /* ACPI names are fixed to 4 bytes, still better use strlcpy */
1573                 strlcpy(res->name, name, 5);
1574                 res->start = address;
1575                 res->end = address + length - 1;
1576                 res->resource_type = space_id;
1577                 spin_lock(&acpi_res_lock);
1578                 added = acpi_res_list_add(res);
1579                 spin_unlock(&acpi_res_lock);
1580                 pr_debug("%s %s resource: start: 0x%llx, end: 0x%llx, "
1581                          "name: %s\n", added ? "Added" : "Already exist",
1582                          (space_id == ACPI_ADR_SPACE_SYSTEM_IO)
1583                          ? "SystemIO" : "System Memory",
1584                          (unsigned long long)res->start,
1585                          (unsigned long long)res->end,
1586                          res->name);
1587                 if (!added)
1588                         kfree(res);
1589                 break;
1590         case ACPI_ADR_SPACE_PCI_CONFIG:
1591         case ACPI_ADR_SPACE_EC:
1592         case ACPI_ADR_SPACE_SMBUS:
1593         case ACPI_ADR_SPACE_CMOS:
1594         case ACPI_ADR_SPACE_PCI_BAR_TARGET:
1595         case ACPI_ADR_SPACE_DATA_TABLE:
1596         case ACPI_ADR_SPACE_FIXED_HARDWARE:
1597                 break;
1598         }
1599         return AE_OK;
1600 }
1601
1602 #endif