ee4cb1906e45d8b221c3a6807b84629abe3c4ac6
[firefly-linux-kernel-4.4.55.git] / drivers / iommu / dmar.c
1 /*
2  * Copyright (c) 2006, Intel Corporation.
3  *
4  * This program is free software; you can redistribute it and/or modify it
5  * under the terms and conditions of the GNU General Public License,
6  * version 2, as published by the Free Software Foundation.
7  *
8  * This program is distributed in the hope it will be useful, but WITHOUT
9  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
10  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
11  * more details.
12  *
13  * You should have received a copy of the GNU General Public License along with
14  * this program; if not, write to the Free Software Foundation, Inc., 59 Temple
15  * Place - Suite 330, Boston, MA 02111-1307 USA.
16  *
17  * Copyright (C) 2006-2008 Intel Corporation
18  * Author: Ashok Raj <ashok.raj@intel.com>
19  * Author: Shaohua Li <shaohua.li@intel.com>
20  * Author: Anil S Keshavamurthy <anil.s.keshavamurthy@intel.com>
21  *
22  * This file implements early detection/parsing of Remapping Devices
23  * reported to OS through BIOS via DMA remapping reporting (DMAR) ACPI
24  * tables.
25  *
26  * These routines are used by both DMA-remapping and Interrupt-remapping
27  */
28
29 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt /* has to precede printk.h */
30
31 #include <linux/pci.h>
32 #include <linux/dmar.h>
33 #include <linux/iova.h>
34 #include <linux/intel-iommu.h>
35 #include <linux/timer.h>
36 #include <linux/irq.h>
37 #include <linux/interrupt.h>
38 #include <linux/tboot.h>
39 #include <linux/dmi.h>
40 #include <linux/slab.h>
41 #include <asm/irq_remapping.h>
42 #include <asm/iommu_table.h>
43
44 #include "irq_remapping.h"
45
46 /* No locks are needed as DMA remapping hardware unit
47  * list is constructed at boot time and hotplug of
48  * these units are not supported by the architecture.
49  */
50 LIST_HEAD(dmar_drhd_units);
51
52 struct acpi_table_header * __initdata dmar_tbl;
53 static acpi_size dmar_tbl_size;
54
55 static int alloc_iommu(struct dmar_drhd_unit *drhd);
56
57 static void __init dmar_register_drhd_unit(struct dmar_drhd_unit *drhd)
58 {
59         /*
60          * add INCLUDE_ALL at the tail, so scan the list will find it at
61          * the very end.
62          */
63         if (drhd->include_all)
64                 list_add_tail(&drhd->list, &dmar_drhd_units);
65         else
66                 list_add(&drhd->list, &dmar_drhd_units);
67 }
68
69 static int __init dmar_parse_one_dev_scope(struct acpi_dmar_device_scope *scope,
70                                            struct pci_dev **dev, u16 segment)
71 {
72         struct pci_bus *bus;
73         struct pci_dev *pdev = NULL;
74         struct acpi_dmar_pci_path *path;
75         int count;
76
77         bus = pci_find_bus(segment, scope->bus);
78         path = (struct acpi_dmar_pci_path *)(scope + 1);
79         count = (scope->length - sizeof(struct acpi_dmar_device_scope))
80                 / sizeof(struct acpi_dmar_pci_path);
81
82         while (count) {
83                 if (pdev)
84                         pci_dev_put(pdev);
85                 /*
86                  * Some BIOSes list non-exist devices in DMAR table, just
87                  * ignore it
88                  */
89                 if (!bus) {
90                         pr_warn("Device scope bus [%d] not found\n", scope->bus);
91                         break;
92                 }
93                 pdev = pci_get_slot(bus, PCI_DEVFN(path->device, path->function));
94                 if (!pdev) {
95                         /* warning will be printed below */
96                         break;
97                 }
98                 path ++;
99                 count --;
100                 bus = pdev->subordinate;
101         }
102         if (!pdev) {
103                 pr_warn("Device scope device [%04x:%02x:%02x.%02x] not found\n",
104                         segment, scope->bus, path->device, path->function);
105                 return 0;
106         }
107         if ((scope->entry_type == ACPI_DMAR_SCOPE_TYPE_ENDPOINT && \
108                         pdev->subordinate) || (scope->entry_type == \
109                         ACPI_DMAR_SCOPE_TYPE_BRIDGE && !pdev->subordinate)) {
110                 pci_dev_put(pdev);
111                 pr_warn("Device scope type does not match for %s\n",
112                         pci_name(pdev));
113                 return -EINVAL;
114         }
115         *dev = pdev;
116         return 0;
117 }
118
119 int __init dmar_parse_dev_scope(void *start, void *end, int *cnt,
120                                 struct pci_dev ***devices, u16 segment)
121 {
122         struct acpi_dmar_device_scope *scope;
123         void * tmp = start;
124         int index;
125         int ret;
126
127         *cnt = 0;
128         while (start < end) {
129                 scope = start;
130                 if (scope->entry_type == ACPI_DMAR_SCOPE_TYPE_ENDPOINT ||
131                     scope->entry_type == ACPI_DMAR_SCOPE_TYPE_BRIDGE)
132                         (*cnt)++;
133                 else if (scope->entry_type != ACPI_DMAR_SCOPE_TYPE_IOAPIC &&
134                         scope->entry_type != ACPI_DMAR_SCOPE_TYPE_HPET) {
135                         pr_warn("Unsupported device scope\n");
136                 }
137                 start += scope->length;
138         }
139         if (*cnt == 0)
140                 return 0;
141
142         *devices = kcalloc(*cnt, sizeof(struct pci_dev *), GFP_KERNEL);
143         if (!*devices)
144                 return -ENOMEM;
145
146         start = tmp;
147         index = 0;
148         while (start < end) {
149                 scope = start;
150                 if (scope->entry_type == ACPI_DMAR_SCOPE_TYPE_ENDPOINT ||
151                     scope->entry_type == ACPI_DMAR_SCOPE_TYPE_BRIDGE) {
152                         ret = dmar_parse_one_dev_scope(scope,
153                                 &(*devices)[index], segment);
154                         if (ret) {
155                                 dmar_free_dev_scope(devices, cnt);
156                                 return ret;
157                         }
158                         index ++;
159                 }
160                 start += scope->length;
161         }
162
163         return 0;
164 }
165
166 void dmar_free_dev_scope(struct pci_dev ***devices, int *cnt)
167 {
168         if (*devices && *cnt) {
169                 while (--*cnt >= 0)
170                         pci_dev_put((*devices)[*cnt]);
171                 kfree(*devices);
172                 *devices = NULL;
173                 *cnt = 0;
174         }
175 }
176
177 /**
178  * dmar_parse_one_drhd - parses exactly one DMA remapping hardware definition
179  * structure which uniquely represent one DMA remapping hardware unit
180  * present in the platform
181  */
182 static int __init
183 dmar_parse_one_drhd(struct acpi_dmar_header *header)
184 {
185         struct acpi_dmar_hardware_unit *drhd;
186         struct dmar_drhd_unit *dmaru;
187         int ret = 0;
188
189         drhd = (struct acpi_dmar_hardware_unit *)header;
190         dmaru = kzalloc(sizeof(*dmaru), GFP_KERNEL);
191         if (!dmaru)
192                 return -ENOMEM;
193
194         dmaru->hdr = header;
195         dmaru->reg_base_addr = drhd->address;
196         dmaru->segment = drhd->segment;
197         dmaru->include_all = drhd->flags & 0x1; /* BIT0: INCLUDE_ALL */
198
199         ret = alloc_iommu(dmaru);
200         if (ret) {
201                 kfree(dmaru);
202                 return ret;
203         }
204         dmar_register_drhd_unit(dmaru);
205         return 0;
206 }
207
208 static int __init dmar_parse_dev(struct dmar_drhd_unit *dmaru)
209 {
210         struct acpi_dmar_hardware_unit *drhd;
211         int ret = 0;
212
213         drhd = (struct acpi_dmar_hardware_unit *) dmaru->hdr;
214
215         if (dmaru->include_all)
216                 return 0;
217
218         ret = dmar_parse_dev_scope((void *)(drhd + 1),
219                                 ((void *)drhd) + drhd->header.length,
220                                 &dmaru->devices_cnt, &dmaru->devices,
221                                 drhd->segment);
222         if (ret) {
223                 list_del(&dmaru->list);
224                 kfree(dmaru);
225         }
226         return ret;
227 }
228
229 #ifdef CONFIG_ACPI_NUMA
230 static int __init
231 dmar_parse_one_rhsa(struct acpi_dmar_header *header)
232 {
233         struct acpi_dmar_rhsa *rhsa;
234         struct dmar_drhd_unit *drhd;
235
236         rhsa = (struct acpi_dmar_rhsa *)header;
237         for_each_drhd_unit(drhd) {
238                 if (drhd->reg_base_addr == rhsa->base_address) {
239                         int node = acpi_map_pxm_to_node(rhsa->proximity_domain);
240
241                         if (!node_online(node))
242                                 node = -1;
243                         drhd->iommu->node = node;
244                         return 0;
245                 }
246         }
247         WARN_TAINT(
248                 1, TAINT_FIRMWARE_WORKAROUND,
249                 "Your BIOS is broken; RHSA refers to non-existent DMAR unit at %llx\n"
250                 "BIOS vendor: %s; Ver: %s; Product Version: %s\n",
251                 drhd->reg_base_addr,
252                 dmi_get_system_info(DMI_BIOS_VENDOR),
253                 dmi_get_system_info(DMI_BIOS_VERSION),
254                 dmi_get_system_info(DMI_PRODUCT_VERSION));
255
256         return 0;
257 }
258 #endif
259
260 static void __init
261 dmar_table_print_dmar_entry(struct acpi_dmar_header *header)
262 {
263         struct acpi_dmar_hardware_unit *drhd;
264         struct acpi_dmar_reserved_memory *rmrr;
265         struct acpi_dmar_atsr *atsr;
266         struct acpi_dmar_rhsa *rhsa;
267
268         switch (header->type) {
269         case ACPI_DMAR_TYPE_HARDWARE_UNIT:
270                 drhd = container_of(header, struct acpi_dmar_hardware_unit,
271                                     header);
272                 pr_info("DRHD base: %#016Lx flags: %#x\n",
273                         (unsigned long long)drhd->address, drhd->flags);
274                 break;
275         case ACPI_DMAR_TYPE_RESERVED_MEMORY:
276                 rmrr = container_of(header, struct acpi_dmar_reserved_memory,
277                                     header);
278                 pr_info("RMRR base: %#016Lx end: %#016Lx\n",
279                         (unsigned long long)rmrr->base_address,
280                         (unsigned long long)rmrr->end_address);
281                 break;
282         case ACPI_DMAR_TYPE_ATSR:
283                 atsr = container_of(header, struct acpi_dmar_atsr, header);
284                 pr_info("ATSR flags: %#x\n", atsr->flags);
285                 break;
286         case ACPI_DMAR_HARDWARE_AFFINITY:
287                 rhsa = container_of(header, struct acpi_dmar_rhsa, header);
288                 pr_info("RHSA base: %#016Lx proximity domain: %#x\n",
289                        (unsigned long long)rhsa->base_address,
290                        rhsa->proximity_domain);
291                 break;
292         }
293 }
294
295 /**
296  * dmar_table_detect - checks to see if the platform supports DMAR devices
297  */
298 static int __init dmar_table_detect(void)
299 {
300         acpi_status status = AE_OK;
301
302         /* if we could find DMAR table, then there are DMAR devices */
303         status = acpi_get_table_with_size(ACPI_SIG_DMAR, 0,
304                                 (struct acpi_table_header **)&dmar_tbl,
305                                 &dmar_tbl_size);
306
307         if (ACPI_SUCCESS(status) && !dmar_tbl) {
308                 pr_warn("Unable to map DMAR\n");
309                 status = AE_NOT_FOUND;
310         }
311
312         return (ACPI_SUCCESS(status) ? 1 : 0);
313 }
314
315 /**
316  * parse_dmar_table - parses the DMA reporting table
317  */
318 static int __init
319 parse_dmar_table(void)
320 {
321         struct acpi_table_dmar *dmar;
322         struct acpi_dmar_header *entry_header;
323         int ret = 0;
324         int drhd_count = 0;
325
326         /*
327          * Do it again, earlier dmar_tbl mapping could be mapped with
328          * fixed map.
329          */
330         dmar_table_detect();
331
332         /*
333          * ACPI tables may not be DMA protected by tboot, so use DMAR copy
334          * SINIT saved in SinitMleData in TXT heap (which is DMA protected)
335          */
336         dmar_tbl = tboot_get_dmar_table(dmar_tbl);
337
338         dmar = (struct acpi_table_dmar *)dmar_tbl;
339         if (!dmar)
340                 return -ENODEV;
341
342         if (dmar->width < PAGE_SHIFT - 1) {
343                 pr_warn("Invalid DMAR haw\n");
344                 return -EINVAL;
345         }
346
347         pr_info("Host address width %d\n", dmar->width + 1);
348
349         entry_header = (struct acpi_dmar_header *)(dmar + 1);
350         while (((unsigned long)entry_header) <
351                         (((unsigned long)dmar) + dmar_tbl->length)) {
352                 /* Avoid looping forever on bad ACPI tables */
353                 if (entry_header->length == 0) {
354                         pr_warn("Invalid 0-length structure\n");
355                         ret = -EINVAL;
356                         break;
357                 }
358
359                 dmar_table_print_dmar_entry(entry_header);
360
361                 switch (entry_header->type) {
362                 case ACPI_DMAR_TYPE_HARDWARE_UNIT:
363                         drhd_count++;
364                         ret = dmar_parse_one_drhd(entry_header);
365                         break;
366                 case ACPI_DMAR_TYPE_RESERVED_MEMORY:
367                         ret = dmar_parse_one_rmrr(entry_header);
368                         break;
369                 case ACPI_DMAR_TYPE_ATSR:
370                         ret = dmar_parse_one_atsr(entry_header);
371                         break;
372                 case ACPI_DMAR_HARDWARE_AFFINITY:
373 #ifdef CONFIG_ACPI_NUMA
374                         ret = dmar_parse_one_rhsa(entry_header);
375 #endif
376                         break;
377                 default:
378                         pr_warn("Unknown DMAR structure type %d\n",
379                                 entry_header->type);
380                         ret = 0; /* for forward compatibility */
381                         break;
382                 }
383                 if (ret)
384                         break;
385
386                 entry_header = ((void *)entry_header + entry_header->length);
387         }
388         if (drhd_count == 0)
389                 pr_warn(FW_BUG "No DRHD structure found in DMAR table\n");
390         return ret;
391 }
392
393 static int dmar_pci_device_match(struct pci_dev *devices[], int cnt,
394                           struct pci_dev *dev)
395 {
396         int index;
397
398         while (dev) {
399                 for (index = 0; index < cnt; index++)
400                         if (dev == devices[index])
401                                 return 1;
402
403                 /* Check our parent */
404                 dev = dev->bus->self;
405         }
406
407         return 0;
408 }
409
410 struct dmar_drhd_unit *
411 dmar_find_matched_drhd_unit(struct pci_dev *dev)
412 {
413         struct dmar_drhd_unit *dmaru = NULL;
414         struct acpi_dmar_hardware_unit *drhd;
415
416         dev = pci_physfn(dev);
417
418         for_each_drhd_unit(dmaru) {
419                 drhd = container_of(dmaru->hdr,
420                                     struct acpi_dmar_hardware_unit,
421                                     header);
422
423                 if (dmaru->include_all &&
424                     drhd->segment == pci_domain_nr(dev->bus))
425                         return dmaru;
426
427                 if (dmar_pci_device_match(dmaru->devices,
428                                           dmaru->devices_cnt, dev))
429                         return dmaru;
430         }
431
432         return NULL;
433 }
434
435 int __init dmar_dev_scope_init(void)
436 {
437         static int dmar_dev_scope_initialized;
438         struct dmar_drhd_unit *drhd, *drhd_n;
439         int ret = -ENODEV;
440
441         if (dmar_dev_scope_initialized)
442                 return dmar_dev_scope_initialized;
443
444         if (list_empty(&dmar_drhd_units))
445                 goto fail;
446
447         list_for_each_entry_safe(drhd, drhd_n, &dmar_drhd_units, list) {
448                 ret = dmar_parse_dev(drhd);
449                 if (ret)
450                         goto fail;
451         }
452
453         ret = dmar_parse_rmrr_atsr_dev();
454         if (ret)
455                 goto fail;
456
457         dmar_dev_scope_initialized = 1;
458         return 0;
459
460 fail:
461         dmar_dev_scope_initialized = ret;
462         return ret;
463 }
464
465
466 int __init dmar_table_init(void)
467 {
468         static int dmar_table_initialized;
469         int ret;
470
471         if (dmar_table_initialized)
472                 return 0;
473
474         dmar_table_initialized = 1;
475
476         ret = parse_dmar_table();
477         if (ret) {
478                 if (ret != -ENODEV)
479                         pr_info("parse DMAR table failure.\n");
480                 return ret;
481         }
482
483         if (list_empty(&dmar_drhd_units)) {
484                 pr_info("No DMAR devices found\n");
485                 return -ENODEV;
486         }
487
488         return 0;
489 }
490
491 static void warn_invalid_dmar(u64 addr, const char *message)
492 {
493         WARN_TAINT_ONCE(
494                 1, TAINT_FIRMWARE_WORKAROUND,
495                 "Your BIOS is broken; DMAR reported at address %llx%s!\n"
496                 "BIOS vendor: %s; Ver: %s; Product Version: %s\n",
497                 addr, message,
498                 dmi_get_system_info(DMI_BIOS_VENDOR),
499                 dmi_get_system_info(DMI_BIOS_VERSION),
500                 dmi_get_system_info(DMI_PRODUCT_VERSION));
501 }
502
503 static int __init check_zero_address(void)
504 {
505         struct acpi_table_dmar *dmar;
506         struct acpi_dmar_header *entry_header;
507         struct acpi_dmar_hardware_unit *drhd;
508
509         dmar = (struct acpi_table_dmar *)dmar_tbl;
510         entry_header = (struct acpi_dmar_header *)(dmar + 1);
511
512         while (((unsigned long)entry_header) <
513                         (((unsigned long)dmar) + dmar_tbl->length)) {
514                 /* Avoid looping forever on bad ACPI tables */
515                 if (entry_header->length == 0) {
516                         pr_warn("Invalid 0-length structure\n");
517                         return 0;
518                 }
519
520                 if (entry_header->type == ACPI_DMAR_TYPE_HARDWARE_UNIT) {
521                         void __iomem *addr;
522                         u64 cap, ecap;
523
524                         drhd = (void *)entry_header;
525                         if (!drhd->address) {
526                                 warn_invalid_dmar(0, "");
527                                 goto failed;
528                         }
529
530                         addr = early_ioremap(drhd->address, VTD_PAGE_SIZE);
531                         if (!addr ) {
532                                 printk("IOMMU: can't validate: %llx\n", drhd->address);
533                                 goto failed;
534                         }
535                         cap = dmar_readq(addr + DMAR_CAP_REG);
536                         ecap = dmar_readq(addr + DMAR_ECAP_REG);
537                         early_iounmap(addr, VTD_PAGE_SIZE);
538                         if (cap == (uint64_t)-1 && ecap == (uint64_t)-1) {
539                                 warn_invalid_dmar(drhd->address,
540                                                   " returns all ones");
541                                 goto failed;
542                         }
543                 }
544
545                 entry_header = ((void *)entry_header + entry_header->length);
546         }
547         return 1;
548
549 failed:
550         return 0;
551 }
552
553 int __init detect_intel_iommu(void)
554 {
555         int ret;
556
557         ret = dmar_table_detect();
558         if (ret)
559                 ret = check_zero_address();
560         {
561                 if (ret && !no_iommu && !iommu_detected && !dmar_disabled) {
562                         iommu_detected = 1;
563                         /* Make sure ACS will be enabled */
564                         pci_request_acs();
565                 }
566
567 #ifdef CONFIG_X86
568                 if (ret)
569                         x86_init.iommu.iommu_init = intel_iommu_init;
570 #endif
571         }
572         early_acpi_os_unmap_memory(dmar_tbl, dmar_tbl_size);
573         dmar_tbl = NULL;
574
575         return ret ? 1 : -ENODEV;
576 }
577
578
579 static void unmap_iommu(struct intel_iommu *iommu)
580 {
581         iounmap(iommu->reg);
582         release_mem_region(iommu->reg_phys, iommu->reg_size);
583 }
584
585 /**
586  * map_iommu: map the iommu's registers
587  * @iommu: the iommu to map
588  * @phys_addr: the physical address of the base resgister
589  *
590  * Memory map the iommu's registers.  Start w/ a single page, and
591  * possibly expand if that turns out to be insufficent.
592  */
593 static int map_iommu(struct intel_iommu *iommu, u64 phys_addr)
594 {
595         int map_size, err=0;
596
597         iommu->reg_phys = phys_addr;
598         iommu->reg_size = VTD_PAGE_SIZE;
599
600         if (!request_mem_region(iommu->reg_phys, iommu->reg_size, iommu->name)) {
601                 pr_err("IOMMU: can't reserve memory\n");
602                 err = -EBUSY;
603                 goto out;
604         }
605
606         iommu->reg = ioremap(iommu->reg_phys, iommu->reg_size);
607         if (!iommu->reg) {
608                 pr_err("IOMMU: can't map the region\n");
609                 err = -ENOMEM;
610                 goto release;
611         }
612
613         iommu->cap = dmar_readq(iommu->reg + DMAR_CAP_REG);
614         iommu->ecap = dmar_readq(iommu->reg + DMAR_ECAP_REG);
615
616         if (iommu->cap == (uint64_t)-1 && iommu->ecap == (uint64_t)-1) {
617                 err = -EINVAL;
618                 warn_invalid_dmar(phys_addr, " returns all ones");
619                 goto unmap;
620         }
621
622         /* the registers might be more than one page */
623         map_size = max_t(int, ecap_max_iotlb_offset(iommu->ecap),
624                          cap_max_fault_reg_offset(iommu->cap));
625         map_size = VTD_PAGE_ALIGN(map_size);
626         if (map_size > iommu->reg_size) {
627                 iounmap(iommu->reg);
628                 release_mem_region(iommu->reg_phys, iommu->reg_size);
629                 iommu->reg_size = map_size;
630                 if (!request_mem_region(iommu->reg_phys, iommu->reg_size,
631                                         iommu->name)) {
632                         pr_err("IOMMU: can't reserve memory\n");
633                         err = -EBUSY;
634                         goto out;
635                 }
636                 iommu->reg = ioremap(iommu->reg_phys, iommu->reg_size);
637                 if (!iommu->reg) {
638                         pr_err("IOMMU: can't map the region\n");
639                         err = -ENOMEM;
640                         goto release;
641                 }
642         }
643         err = 0;
644         goto out;
645
646 unmap:
647         iounmap(iommu->reg);
648 release:
649         release_mem_region(iommu->reg_phys, iommu->reg_size);
650 out:
651         return err;
652 }
653
654 static int alloc_iommu(struct dmar_drhd_unit *drhd)
655 {
656         struct intel_iommu *iommu;
657         u32 ver, sts;
658         static int iommu_allocated = 0;
659         int agaw = 0;
660         int msagaw = 0;
661         int err;
662
663         if (!drhd->reg_base_addr) {
664                 warn_invalid_dmar(0, "");
665                 return -EINVAL;
666         }
667
668         iommu = kzalloc(sizeof(*iommu), GFP_KERNEL);
669         if (!iommu)
670                 return -ENOMEM;
671
672         iommu->seq_id = iommu_allocated++;
673         sprintf (iommu->name, "dmar%d", iommu->seq_id);
674
675         err = map_iommu(iommu, drhd->reg_base_addr);
676         if (err) {
677                 pr_err("IOMMU: failed to map %s\n", iommu->name);
678                 goto error;
679         }
680
681         err = -EINVAL;
682         agaw = iommu_calculate_agaw(iommu);
683         if (agaw < 0) {
684                 pr_err("Cannot get a valid agaw for iommu (seq_id = %d)\n",
685                         iommu->seq_id);
686                 goto err_unmap;
687         }
688         msagaw = iommu_calculate_max_sagaw(iommu);
689         if (msagaw < 0) {
690                 pr_err("Cannot get a valid max agaw for iommu (seq_id = %d)\n",
691                         iommu->seq_id);
692                 goto err_unmap;
693         }
694         iommu->agaw = agaw;
695         iommu->msagaw = msagaw;
696
697         iommu->node = -1;
698
699         ver = readl(iommu->reg + DMAR_VER_REG);
700         pr_info("IOMMU %d: reg_base_addr %llx ver %d:%d cap %llx ecap %llx\n",
701                 iommu->seq_id,
702                 (unsigned long long)drhd->reg_base_addr,
703                 DMAR_VER_MAJOR(ver), DMAR_VER_MINOR(ver),
704                 (unsigned long long)iommu->cap,
705                 (unsigned long long)iommu->ecap);
706
707         /* Reflect status in gcmd */
708         sts = readl(iommu->reg + DMAR_GSTS_REG);
709         if (sts & DMA_GSTS_IRES)
710                 iommu->gcmd |= DMA_GCMD_IRE;
711         if (sts & DMA_GSTS_TES)
712                 iommu->gcmd |= DMA_GCMD_TE;
713         if (sts & DMA_GSTS_QIES)
714                 iommu->gcmd |= DMA_GCMD_QIE;
715
716         raw_spin_lock_init(&iommu->register_lock);
717
718         drhd->iommu = iommu;
719         return 0;
720
721  err_unmap:
722         unmap_iommu(iommu);
723  error:
724         kfree(iommu);
725         return err;
726 }
727
728 void free_iommu(struct intel_iommu *iommu)
729 {
730         if (!iommu)
731                 return;
732
733         free_dmar_iommu(iommu);
734
735         if (iommu->reg)
736                 unmap_iommu(iommu);
737
738         kfree(iommu);
739 }
740
741 /*
742  * Reclaim all the submitted descriptors which have completed its work.
743  */
744 static inline void reclaim_free_desc(struct q_inval *qi)
745 {
746         while (qi->desc_status[qi->free_tail] == QI_DONE ||
747                qi->desc_status[qi->free_tail] == QI_ABORT) {
748                 qi->desc_status[qi->free_tail] = QI_FREE;
749                 qi->free_tail = (qi->free_tail + 1) % QI_LENGTH;
750                 qi->free_cnt++;
751         }
752 }
753
754 static int qi_check_fault(struct intel_iommu *iommu, int index)
755 {
756         u32 fault;
757         int head, tail;
758         struct q_inval *qi = iommu->qi;
759         int wait_index = (index + 1) % QI_LENGTH;
760
761         if (qi->desc_status[wait_index] == QI_ABORT)
762                 return -EAGAIN;
763
764         fault = readl(iommu->reg + DMAR_FSTS_REG);
765
766         /*
767          * If IQE happens, the head points to the descriptor associated
768          * with the error. No new descriptors are fetched until the IQE
769          * is cleared.
770          */
771         if (fault & DMA_FSTS_IQE) {
772                 head = readl(iommu->reg + DMAR_IQH_REG);
773                 if ((head >> DMAR_IQ_SHIFT) == index) {
774                         pr_err("VT-d detected invalid descriptor: "
775                                 "low=%llx, high=%llx\n",
776                                 (unsigned long long)qi->desc[index].low,
777                                 (unsigned long long)qi->desc[index].high);
778                         memcpy(&qi->desc[index], &qi->desc[wait_index],
779                                         sizeof(struct qi_desc));
780                         __iommu_flush_cache(iommu, &qi->desc[index],
781                                         sizeof(struct qi_desc));
782                         writel(DMA_FSTS_IQE, iommu->reg + DMAR_FSTS_REG);
783                         return -EINVAL;
784                 }
785         }
786
787         /*
788          * If ITE happens, all pending wait_desc commands are aborted.
789          * No new descriptors are fetched until the ITE is cleared.
790          */
791         if (fault & DMA_FSTS_ITE) {
792                 head = readl(iommu->reg + DMAR_IQH_REG);
793                 head = ((head >> DMAR_IQ_SHIFT) - 1 + QI_LENGTH) % QI_LENGTH;
794                 head |= 1;
795                 tail = readl(iommu->reg + DMAR_IQT_REG);
796                 tail = ((tail >> DMAR_IQ_SHIFT) - 1 + QI_LENGTH) % QI_LENGTH;
797
798                 writel(DMA_FSTS_ITE, iommu->reg + DMAR_FSTS_REG);
799
800                 do {
801                         if (qi->desc_status[head] == QI_IN_USE)
802                                 qi->desc_status[head] = QI_ABORT;
803                         head = (head - 2 + QI_LENGTH) % QI_LENGTH;
804                 } while (head != tail);
805
806                 if (qi->desc_status[wait_index] == QI_ABORT)
807                         return -EAGAIN;
808         }
809
810         if (fault & DMA_FSTS_ICE)
811                 writel(DMA_FSTS_ICE, iommu->reg + DMAR_FSTS_REG);
812
813         return 0;
814 }
815
816 /*
817  * Submit the queued invalidation descriptor to the remapping
818  * hardware unit and wait for its completion.
819  */
820 int qi_submit_sync(struct qi_desc *desc, struct intel_iommu *iommu)
821 {
822         int rc;
823         struct q_inval *qi = iommu->qi;
824         struct qi_desc *hw, wait_desc;
825         int wait_index, index;
826         unsigned long flags;
827
828         if (!qi)
829                 return 0;
830
831         hw = qi->desc;
832
833 restart:
834         rc = 0;
835
836         raw_spin_lock_irqsave(&qi->q_lock, flags);
837         while (qi->free_cnt < 3) {
838                 raw_spin_unlock_irqrestore(&qi->q_lock, flags);
839                 cpu_relax();
840                 raw_spin_lock_irqsave(&qi->q_lock, flags);
841         }
842
843         index = qi->free_head;
844         wait_index = (index + 1) % QI_LENGTH;
845
846         qi->desc_status[index] = qi->desc_status[wait_index] = QI_IN_USE;
847
848         hw[index] = *desc;
849
850         wait_desc.low = QI_IWD_STATUS_DATA(QI_DONE) |
851                         QI_IWD_STATUS_WRITE | QI_IWD_TYPE;
852         wait_desc.high = virt_to_phys(&qi->desc_status[wait_index]);
853
854         hw[wait_index] = wait_desc;
855
856         __iommu_flush_cache(iommu, &hw[index], sizeof(struct qi_desc));
857         __iommu_flush_cache(iommu, &hw[wait_index], sizeof(struct qi_desc));
858
859         qi->free_head = (qi->free_head + 2) % QI_LENGTH;
860         qi->free_cnt -= 2;
861
862         /*
863          * update the HW tail register indicating the presence of
864          * new descriptors.
865          */
866         writel(qi->free_head << DMAR_IQ_SHIFT, iommu->reg + DMAR_IQT_REG);
867
868         while (qi->desc_status[wait_index] != QI_DONE) {
869                 /*
870                  * We will leave the interrupts disabled, to prevent interrupt
871                  * context to queue another cmd while a cmd is already submitted
872                  * and waiting for completion on this cpu. This is to avoid
873                  * a deadlock where the interrupt context can wait indefinitely
874                  * for free slots in the queue.
875                  */
876                 rc = qi_check_fault(iommu, index);
877                 if (rc)
878                         break;
879
880                 raw_spin_unlock(&qi->q_lock);
881                 cpu_relax();
882                 raw_spin_lock(&qi->q_lock);
883         }
884
885         qi->desc_status[index] = QI_DONE;
886
887         reclaim_free_desc(qi);
888         raw_spin_unlock_irqrestore(&qi->q_lock, flags);
889
890         if (rc == -EAGAIN)
891                 goto restart;
892
893         return rc;
894 }
895
896 /*
897  * Flush the global interrupt entry cache.
898  */
899 void qi_global_iec(struct intel_iommu *iommu)
900 {
901         struct qi_desc desc;
902
903         desc.low = QI_IEC_TYPE;
904         desc.high = 0;
905
906         /* should never fail */
907         qi_submit_sync(&desc, iommu);
908 }
909
910 void qi_flush_context(struct intel_iommu *iommu, u16 did, u16 sid, u8 fm,
911                       u64 type)
912 {
913         struct qi_desc desc;
914
915         desc.low = QI_CC_FM(fm) | QI_CC_SID(sid) | QI_CC_DID(did)
916                         | QI_CC_GRAN(type) | QI_CC_TYPE;
917         desc.high = 0;
918
919         qi_submit_sync(&desc, iommu);
920 }
921
922 void qi_flush_iotlb(struct intel_iommu *iommu, u16 did, u64 addr,
923                     unsigned int size_order, u64 type)
924 {
925         u8 dw = 0, dr = 0;
926
927         struct qi_desc desc;
928         int ih = 0;
929
930         if (cap_write_drain(iommu->cap))
931                 dw = 1;
932
933         if (cap_read_drain(iommu->cap))
934                 dr = 1;
935
936         desc.low = QI_IOTLB_DID(did) | QI_IOTLB_DR(dr) | QI_IOTLB_DW(dw)
937                 | QI_IOTLB_GRAN(type) | QI_IOTLB_TYPE;
938         desc.high = QI_IOTLB_ADDR(addr) | QI_IOTLB_IH(ih)
939                 | QI_IOTLB_AM(size_order);
940
941         qi_submit_sync(&desc, iommu);
942 }
943
944 void qi_flush_dev_iotlb(struct intel_iommu *iommu, u16 sid, u16 qdep,
945                         u64 addr, unsigned mask)
946 {
947         struct qi_desc desc;
948
949         if (mask) {
950                 BUG_ON(addr & ((1 << (VTD_PAGE_SHIFT + mask)) - 1));
951                 addr |= (1 << (VTD_PAGE_SHIFT + mask - 1)) - 1;
952                 desc.high = QI_DEV_IOTLB_ADDR(addr) | QI_DEV_IOTLB_SIZE;
953         } else
954                 desc.high = QI_DEV_IOTLB_ADDR(addr);
955
956         if (qdep >= QI_DEV_IOTLB_MAX_INVS)
957                 qdep = 0;
958
959         desc.low = QI_DEV_IOTLB_SID(sid) | QI_DEV_IOTLB_QDEP(qdep) |
960                    QI_DIOTLB_TYPE;
961
962         qi_submit_sync(&desc, iommu);
963 }
964
965 /*
966  * Disable Queued Invalidation interface.
967  */
968 void dmar_disable_qi(struct intel_iommu *iommu)
969 {
970         unsigned long flags;
971         u32 sts;
972         cycles_t start_time = get_cycles();
973
974         if (!ecap_qis(iommu->ecap))
975                 return;
976
977         raw_spin_lock_irqsave(&iommu->register_lock, flags);
978
979         sts =  dmar_readq(iommu->reg + DMAR_GSTS_REG);
980         if (!(sts & DMA_GSTS_QIES))
981                 goto end;
982
983         /*
984          * Give a chance to HW to complete the pending invalidation requests.
985          */
986         while ((readl(iommu->reg + DMAR_IQT_REG) !=
987                 readl(iommu->reg + DMAR_IQH_REG)) &&
988                 (DMAR_OPERATION_TIMEOUT > (get_cycles() - start_time)))
989                 cpu_relax();
990
991         iommu->gcmd &= ~DMA_GCMD_QIE;
992         writel(iommu->gcmd, iommu->reg + DMAR_GCMD_REG);
993
994         IOMMU_WAIT_OP(iommu, DMAR_GSTS_REG, readl,
995                       !(sts & DMA_GSTS_QIES), sts);
996 end:
997         raw_spin_unlock_irqrestore(&iommu->register_lock, flags);
998 }
999
1000 /*
1001  * Enable queued invalidation.
1002  */
1003 static void __dmar_enable_qi(struct intel_iommu *iommu)
1004 {
1005         u32 sts;
1006         unsigned long flags;
1007         struct q_inval *qi = iommu->qi;
1008
1009         qi->free_head = qi->free_tail = 0;
1010         qi->free_cnt = QI_LENGTH;
1011
1012         raw_spin_lock_irqsave(&iommu->register_lock, flags);
1013
1014         /* write zero to the tail reg */
1015         writel(0, iommu->reg + DMAR_IQT_REG);
1016
1017         dmar_writeq(iommu->reg + DMAR_IQA_REG, virt_to_phys(qi->desc));
1018
1019         iommu->gcmd |= DMA_GCMD_QIE;
1020         writel(iommu->gcmd, iommu->reg + DMAR_GCMD_REG);
1021
1022         /* Make sure hardware complete it */
1023         IOMMU_WAIT_OP(iommu, DMAR_GSTS_REG, readl, (sts & DMA_GSTS_QIES), sts);
1024
1025         raw_spin_unlock_irqrestore(&iommu->register_lock, flags);
1026 }
1027
1028 /*
1029  * Enable Queued Invalidation interface. This is a must to support
1030  * interrupt-remapping. Also used by DMA-remapping, which replaces
1031  * register based IOTLB invalidation.
1032  */
1033 int dmar_enable_qi(struct intel_iommu *iommu)
1034 {
1035         struct q_inval *qi;
1036         struct page *desc_page;
1037
1038         if (!ecap_qis(iommu->ecap))
1039                 return -ENOENT;
1040
1041         /*
1042          * queued invalidation is already setup and enabled.
1043          */
1044         if (iommu->qi)
1045                 return 0;
1046
1047         iommu->qi = kmalloc(sizeof(*qi), GFP_ATOMIC);
1048         if (!iommu->qi)
1049                 return -ENOMEM;
1050
1051         qi = iommu->qi;
1052
1053
1054         desc_page = alloc_pages_node(iommu->node, GFP_ATOMIC | __GFP_ZERO, 0);
1055         if (!desc_page) {
1056                 kfree(qi);
1057                 iommu->qi = 0;
1058                 return -ENOMEM;
1059         }
1060
1061         qi->desc = page_address(desc_page);
1062
1063         qi->desc_status = kzalloc(QI_LENGTH * sizeof(int), GFP_ATOMIC);
1064         if (!qi->desc_status) {
1065                 free_page((unsigned long) qi->desc);
1066                 kfree(qi);
1067                 iommu->qi = 0;
1068                 return -ENOMEM;
1069         }
1070
1071         qi->free_head = qi->free_tail = 0;
1072         qi->free_cnt = QI_LENGTH;
1073
1074         raw_spin_lock_init(&qi->q_lock);
1075
1076         __dmar_enable_qi(iommu);
1077
1078         return 0;
1079 }
1080
1081 /* iommu interrupt handling. Most stuff are MSI-like. */
1082
1083 enum faulttype {
1084         DMA_REMAP,
1085         INTR_REMAP,
1086         UNKNOWN,
1087 };
1088
1089 static const char *dma_remap_fault_reasons[] =
1090 {
1091         "Software",
1092         "Present bit in root entry is clear",
1093         "Present bit in context entry is clear",
1094         "Invalid context entry",
1095         "Access beyond MGAW",
1096         "PTE Write access is not set",
1097         "PTE Read access is not set",
1098         "Next page table ptr is invalid",
1099         "Root table address invalid",
1100         "Context table ptr is invalid",
1101         "non-zero reserved fields in RTP",
1102         "non-zero reserved fields in CTP",
1103         "non-zero reserved fields in PTE",
1104         "PCE for translation request specifies blocking",
1105 };
1106
1107 static const char *irq_remap_fault_reasons[] =
1108 {
1109         "Detected reserved fields in the decoded interrupt-remapped request",
1110         "Interrupt index exceeded the interrupt-remapping table size",
1111         "Present field in the IRTE entry is clear",
1112         "Error accessing interrupt-remapping table pointed by IRTA_REG",
1113         "Detected reserved fields in the IRTE entry",
1114         "Blocked a compatibility format interrupt request",
1115         "Blocked an interrupt request due to source-id verification failure",
1116 };
1117
1118 static const char *dmar_get_fault_reason(u8 fault_reason, int *fault_type)
1119 {
1120         if (fault_reason >= 0x20 && (fault_reason - 0x20 <
1121                                         ARRAY_SIZE(irq_remap_fault_reasons))) {
1122                 *fault_type = INTR_REMAP;
1123                 return irq_remap_fault_reasons[fault_reason - 0x20];
1124         } else if (fault_reason < ARRAY_SIZE(dma_remap_fault_reasons)) {
1125                 *fault_type = DMA_REMAP;
1126                 return dma_remap_fault_reasons[fault_reason];
1127         } else {
1128                 *fault_type = UNKNOWN;
1129                 return "Unknown";
1130         }
1131 }
1132
1133 void dmar_msi_unmask(struct irq_data *data)
1134 {
1135         struct intel_iommu *iommu = irq_data_get_irq_handler_data(data);
1136         unsigned long flag;
1137
1138         /* unmask it */
1139         raw_spin_lock_irqsave(&iommu->register_lock, flag);
1140         writel(0, iommu->reg + DMAR_FECTL_REG);
1141         /* Read a reg to force flush the post write */
1142         readl(iommu->reg + DMAR_FECTL_REG);
1143         raw_spin_unlock_irqrestore(&iommu->register_lock, flag);
1144 }
1145
1146 void dmar_msi_mask(struct irq_data *data)
1147 {
1148         unsigned long flag;
1149         struct intel_iommu *iommu = irq_data_get_irq_handler_data(data);
1150
1151         /* mask it */
1152         raw_spin_lock_irqsave(&iommu->register_lock, flag);
1153         writel(DMA_FECTL_IM, iommu->reg + DMAR_FECTL_REG);
1154         /* Read a reg to force flush the post write */
1155         readl(iommu->reg + DMAR_FECTL_REG);
1156         raw_spin_unlock_irqrestore(&iommu->register_lock, flag);
1157 }
1158
1159 void dmar_msi_write(int irq, struct msi_msg *msg)
1160 {
1161         struct intel_iommu *iommu = irq_get_handler_data(irq);
1162         unsigned long flag;
1163
1164         raw_spin_lock_irqsave(&iommu->register_lock, flag);
1165         writel(msg->data, iommu->reg + DMAR_FEDATA_REG);
1166         writel(msg->address_lo, iommu->reg + DMAR_FEADDR_REG);
1167         writel(msg->address_hi, iommu->reg + DMAR_FEUADDR_REG);
1168         raw_spin_unlock_irqrestore(&iommu->register_lock, flag);
1169 }
1170
1171 void dmar_msi_read(int irq, struct msi_msg *msg)
1172 {
1173         struct intel_iommu *iommu = irq_get_handler_data(irq);
1174         unsigned long flag;
1175
1176         raw_spin_lock_irqsave(&iommu->register_lock, flag);
1177         msg->data = readl(iommu->reg + DMAR_FEDATA_REG);
1178         msg->address_lo = readl(iommu->reg + DMAR_FEADDR_REG);
1179         msg->address_hi = readl(iommu->reg + DMAR_FEUADDR_REG);
1180         raw_spin_unlock_irqrestore(&iommu->register_lock, flag);
1181 }
1182
1183 static int dmar_fault_do_one(struct intel_iommu *iommu, int type,
1184                 u8 fault_reason, u16 source_id, unsigned long long addr)
1185 {
1186         const char *reason;
1187         int fault_type;
1188
1189         reason = dmar_get_fault_reason(fault_reason, &fault_type);
1190
1191         if (fault_type == INTR_REMAP)
1192                 pr_err("INTR-REMAP: Request device [[%02x:%02x.%d] "
1193                        "fault index %llx\n"
1194                         "INTR-REMAP:[fault reason %02d] %s\n",
1195                         (source_id >> 8), PCI_SLOT(source_id & 0xFF),
1196                         PCI_FUNC(source_id & 0xFF), addr >> 48,
1197                         fault_reason, reason);
1198         else
1199                 pr_err("DMAR:[%s] Request device [%02x:%02x.%d] "
1200                        "fault addr %llx \n"
1201                        "DMAR:[fault reason %02d] %s\n",
1202                        (type ? "DMA Read" : "DMA Write"),
1203                        (source_id >> 8), PCI_SLOT(source_id & 0xFF),
1204                        PCI_FUNC(source_id & 0xFF), addr, fault_reason, reason);
1205         return 0;
1206 }
1207
1208 #define PRIMARY_FAULT_REG_LEN (16)
1209 irqreturn_t dmar_fault(int irq, void *dev_id)
1210 {
1211         struct intel_iommu *iommu = dev_id;
1212         int reg, fault_index;
1213         u32 fault_status;
1214         unsigned long flag;
1215
1216         raw_spin_lock_irqsave(&iommu->register_lock, flag);
1217         fault_status = readl(iommu->reg + DMAR_FSTS_REG);
1218         if (fault_status)
1219                 pr_err("DRHD: handling fault status reg %x\n", fault_status);
1220
1221         /* TBD: ignore advanced fault log currently */
1222         if (!(fault_status & DMA_FSTS_PPF))
1223                 goto unlock_exit;
1224
1225         fault_index = dma_fsts_fault_record_index(fault_status);
1226         reg = cap_fault_reg_offset(iommu->cap);
1227         while (1) {
1228                 u8 fault_reason;
1229                 u16 source_id;
1230                 u64 guest_addr;
1231                 int type;
1232                 u32 data;
1233
1234                 /* highest 32 bits */
1235                 data = readl(iommu->reg + reg +
1236                                 fault_index * PRIMARY_FAULT_REG_LEN + 12);
1237                 if (!(data & DMA_FRCD_F))
1238                         break;
1239
1240                 fault_reason = dma_frcd_fault_reason(data);
1241                 type = dma_frcd_type(data);
1242
1243                 data = readl(iommu->reg + reg +
1244                                 fault_index * PRIMARY_FAULT_REG_LEN + 8);
1245                 source_id = dma_frcd_source_id(data);
1246
1247                 guest_addr = dmar_readq(iommu->reg + reg +
1248                                 fault_index * PRIMARY_FAULT_REG_LEN);
1249                 guest_addr = dma_frcd_page_addr(guest_addr);
1250                 /* clear the fault */
1251                 writel(DMA_FRCD_F, iommu->reg + reg +
1252                         fault_index * PRIMARY_FAULT_REG_LEN + 12);
1253
1254                 raw_spin_unlock_irqrestore(&iommu->register_lock, flag);
1255
1256                 dmar_fault_do_one(iommu, type, fault_reason,
1257                                 source_id, guest_addr);
1258
1259                 fault_index++;
1260                 if (fault_index >= cap_num_fault_regs(iommu->cap))
1261                         fault_index = 0;
1262                 raw_spin_lock_irqsave(&iommu->register_lock, flag);
1263         }
1264
1265         writel(DMA_FSTS_PFO | DMA_FSTS_PPF, iommu->reg + DMAR_FSTS_REG);
1266
1267 unlock_exit:
1268         raw_spin_unlock_irqrestore(&iommu->register_lock, flag);
1269         return IRQ_HANDLED;
1270 }
1271
1272 int dmar_set_interrupt(struct intel_iommu *iommu)
1273 {
1274         int irq, ret;
1275
1276         /*
1277          * Check if the fault interrupt is already initialized.
1278          */
1279         if (iommu->irq)
1280                 return 0;
1281
1282         irq = create_irq();
1283         if (!irq) {
1284                 pr_err("IOMMU: no free vectors\n");
1285                 return -EINVAL;
1286         }
1287
1288         irq_set_handler_data(irq, iommu);
1289         iommu->irq = irq;
1290
1291         ret = arch_setup_dmar_msi(irq);
1292         if (ret) {
1293                 irq_set_handler_data(irq, NULL);
1294                 iommu->irq = 0;
1295                 destroy_irq(irq);
1296                 return ret;
1297         }
1298
1299         ret = request_irq(irq, dmar_fault, IRQF_NO_THREAD, iommu->name, iommu);
1300         if (ret)
1301                 pr_err("IOMMU: can't request irq\n");
1302         return ret;
1303 }
1304
1305 int __init enable_drhd_fault_handling(void)
1306 {
1307         struct dmar_drhd_unit *drhd;
1308         struct intel_iommu *iommu;
1309
1310         /*
1311          * Enable fault control interrupt.
1312          */
1313         for_each_iommu(iommu, drhd) {
1314                 u32 fault_status;
1315                 int ret = dmar_set_interrupt(iommu);
1316
1317                 if (ret) {
1318                         pr_err("DRHD %Lx: failed to enable fault, interrupt, ret %d\n",
1319                                (unsigned long long)drhd->reg_base_addr, ret);
1320                         return -1;
1321                 }
1322
1323                 /*
1324                  * Clear any previous faults.
1325                  */
1326                 dmar_fault(iommu->irq, iommu);
1327                 fault_status = readl(iommu->reg + DMAR_FSTS_REG);
1328                 writel(fault_status, iommu->reg + DMAR_FSTS_REG);
1329         }
1330
1331         return 0;
1332 }
1333
1334 /*
1335  * Re-enable Queued Invalidation interface.
1336  */
1337 int dmar_reenable_qi(struct intel_iommu *iommu)
1338 {
1339         if (!ecap_qis(iommu->ecap))
1340                 return -ENOENT;
1341
1342         if (!iommu->qi)
1343                 return -ENOENT;
1344
1345         /*
1346          * First disable queued invalidation.
1347          */
1348         dmar_disable_qi(iommu);
1349         /*
1350          * Then enable queued invalidation again. Since there is no pending
1351          * invalidation requests now, it's safe to re-enable queued
1352          * invalidation.
1353          */
1354         __dmar_enable_qi(iommu);
1355
1356         return 0;
1357 }
1358
1359 /*
1360  * Check interrupt remapping support in DMAR table description.
1361  */
1362 int __init dmar_ir_support(void)
1363 {
1364         struct acpi_table_dmar *dmar;
1365         dmar = (struct acpi_table_dmar *)dmar_tbl;
1366         if (!dmar)
1367                 return 0;
1368         return dmar->flags & 0x1;
1369 }
1370
1371 IOMMU_INIT_POST(detect_intel_iommu);