iommu/vt-d, trivial: check suitable flag in function detect_intel_iommu()
[firefly-linux-kernel-4.4.55.git] / drivers / iommu / dmar.c
1 /*
2  * Copyright (c) 2006, Intel Corporation.
3  *
4  * This program is free software; you can redistribute it and/or modify it
5  * under the terms and conditions of the GNU General Public License,
6  * version 2, as published by the Free Software Foundation.
7  *
8  * This program is distributed in the hope it will be useful, but WITHOUT
9  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
10  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
11  * more details.
12  *
13  * You should have received a copy of the GNU General Public License along with
14  * this program; if not, write to the Free Software Foundation, Inc., 59 Temple
15  * Place - Suite 330, Boston, MA 02111-1307 USA.
16  *
17  * Copyright (C) 2006-2008 Intel Corporation
18  * Author: Ashok Raj <ashok.raj@intel.com>
19  * Author: Shaohua Li <shaohua.li@intel.com>
20  * Author: Anil S Keshavamurthy <anil.s.keshavamurthy@intel.com>
21  *
22  * This file implements early detection/parsing of Remapping Devices
23  * reported to OS through BIOS via DMA remapping reporting (DMAR) ACPI
24  * tables.
25  *
26  * These routines are used by both DMA-remapping and Interrupt-remapping
27  */
28
29 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt /* has to precede printk.h */
30
31 #include <linux/pci.h>
32 #include <linux/dmar.h>
33 #include <linux/iova.h>
34 #include <linux/intel-iommu.h>
35 #include <linux/timer.h>
36 #include <linux/irq.h>
37 #include <linux/interrupt.h>
38 #include <linux/tboot.h>
39 #include <linux/dmi.h>
40 #include <linux/slab.h>
41 #include <asm/irq_remapping.h>
42 #include <asm/iommu_table.h>
43
44 #include "irq_remapping.h"
45
46 /* No locks are needed as DMA remapping hardware unit
47  * list is constructed at boot time and hotplug of
48  * these units are not supported by the architecture.
49  */
50 LIST_HEAD(dmar_drhd_units);
51
52 struct acpi_table_header * __initdata dmar_tbl;
53 static acpi_size dmar_tbl_size;
54
55 static void __init dmar_register_drhd_unit(struct dmar_drhd_unit *drhd)
56 {
57         /*
58          * add INCLUDE_ALL at the tail, so scan the list will find it at
59          * the very end.
60          */
61         if (drhd->include_all)
62                 list_add_tail(&drhd->list, &dmar_drhd_units);
63         else
64                 list_add(&drhd->list, &dmar_drhd_units);
65 }
66
67 static int __init dmar_parse_one_dev_scope(struct acpi_dmar_device_scope *scope,
68                                            struct pci_dev **dev, u16 segment)
69 {
70         struct pci_bus *bus;
71         struct pci_dev *pdev = NULL;
72         struct acpi_dmar_pci_path *path;
73         int count;
74
75         bus = pci_find_bus(segment, scope->bus);
76         path = (struct acpi_dmar_pci_path *)(scope + 1);
77         count = (scope->length - sizeof(struct acpi_dmar_device_scope))
78                 / sizeof(struct acpi_dmar_pci_path);
79
80         while (count) {
81                 if (pdev)
82                         pci_dev_put(pdev);
83                 /*
84                  * Some BIOSes list non-exist devices in DMAR table, just
85                  * ignore it
86                  */
87                 if (!bus) {
88                         pr_warn("Device scope bus [%d] not found\n", scope->bus);
89                         break;
90                 }
91                 pdev = pci_get_slot(bus, PCI_DEVFN(path->device, path->function));
92                 if (!pdev) {
93                         /* warning will be printed below */
94                         break;
95                 }
96                 path ++;
97                 count --;
98                 bus = pdev->subordinate;
99         }
100         if (!pdev) {
101                 pr_warn("Device scope device [%04x:%02x:%02x.%02x] not found\n",
102                         segment, scope->bus, path->device, path->function);
103                 return 0;
104         }
105         if ((scope->entry_type == ACPI_DMAR_SCOPE_TYPE_ENDPOINT && \
106                         pdev->subordinate) || (scope->entry_type == \
107                         ACPI_DMAR_SCOPE_TYPE_BRIDGE && !pdev->subordinate)) {
108                 pci_dev_put(pdev);
109                 pr_warn("Device scope type does not match for %s\n",
110                         pci_name(pdev));
111                 return -EINVAL;
112         }
113         *dev = pdev;
114         return 0;
115 }
116
117 int __init dmar_parse_dev_scope(void *start, void *end, int *cnt,
118                                 struct pci_dev ***devices, u16 segment)
119 {
120         struct acpi_dmar_device_scope *scope;
121         void * tmp = start;
122         int index;
123         int ret;
124
125         *cnt = 0;
126         while (start < end) {
127                 scope = start;
128                 if (scope->entry_type == ACPI_DMAR_SCOPE_TYPE_ENDPOINT ||
129                     scope->entry_type == ACPI_DMAR_SCOPE_TYPE_BRIDGE)
130                         (*cnt)++;
131                 else if (scope->entry_type != ACPI_DMAR_SCOPE_TYPE_IOAPIC &&
132                         scope->entry_type != ACPI_DMAR_SCOPE_TYPE_HPET) {
133                         pr_warn("Unsupported device scope\n");
134                 }
135                 start += scope->length;
136         }
137         if (*cnt == 0)
138                 return 0;
139
140         *devices = kcalloc(*cnt, sizeof(struct pci_dev *), GFP_KERNEL);
141         if (!*devices)
142                 return -ENOMEM;
143
144         start = tmp;
145         index = 0;
146         while (start < end) {
147                 scope = start;
148                 if (scope->entry_type == ACPI_DMAR_SCOPE_TYPE_ENDPOINT ||
149                     scope->entry_type == ACPI_DMAR_SCOPE_TYPE_BRIDGE) {
150                         ret = dmar_parse_one_dev_scope(scope,
151                                 &(*devices)[index], segment);
152                         if (ret) {
153                                 dmar_free_dev_scope(devices, cnt);
154                                 return ret;
155                         }
156                         index ++;
157                 }
158                 start += scope->length;
159         }
160
161         return 0;
162 }
163
164 void dmar_free_dev_scope(struct pci_dev ***devices, int *cnt)
165 {
166         if (*devices && *cnt) {
167                 while (--*cnt >= 0)
168                         pci_dev_put((*devices)[*cnt]);
169                 kfree(*devices);
170                 *devices = NULL;
171                 *cnt = 0;
172         }
173 }
174
175 /**
176  * dmar_parse_one_drhd - parses exactly one DMA remapping hardware definition
177  * structure which uniquely represent one DMA remapping hardware unit
178  * present in the platform
179  */
180 static int __init
181 dmar_parse_one_drhd(struct acpi_dmar_header *header)
182 {
183         struct acpi_dmar_hardware_unit *drhd;
184         struct dmar_drhd_unit *dmaru;
185         int ret = 0;
186
187         drhd = (struct acpi_dmar_hardware_unit *)header;
188         dmaru = kzalloc(sizeof(*dmaru), GFP_KERNEL);
189         if (!dmaru)
190                 return -ENOMEM;
191
192         dmaru->hdr = header;
193         dmaru->reg_base_addr = drhd->address;
194         dmaru->segment = drhd->segment;
195         dmaru->include_all = drhd->flags & 0x1; /* BIT0: INCLUDE_ALL */
196
197         ret = alloc_iommu(dmaru);
198         if (ret) {
199                 kfree(dmaru);
200                 return ret;
201         }
202         dmar_register_drhd_unit(dmaru);
203         return 0;
204 }
205
206 static int __init dmar_parse_dev(struct dmar_drhd_unit *dmaru)
207 {
208         struct acpi_dmar_hardware_unit *drhd;
209         int ret = 0;
210
211         drhd = (struct acpi_dmar_hardware_unit *) dmaru->hdr;
212
213         if (dmaru->include_all)
214                 return 0;
215
216         ret = dmar_parse_dev_scope((void *)(drhd + 1),
217                                 ((void *)drhd) + drhd->header.length,
218                                 &dmaru->devices_cnt, &dmaru->devices,
219                                 drhd->segment);
220         if (ret) {
221                 list_del(&dmaru->list);
222                 kfree(dmaru);
223         }
224         return ret;
225 }
226
227 #ifdef CONFIG_ACPI_NUMA
228 static int __init
229 dmar_parse_one_rhsa(struct acpi_dmar_header *header)
230 {
231         struct acpi_dmar_rhsa *rhsa;
232         struct dmar_drhd_unit *drhd;
233
234         rhsa = (struct acpi_dmar_rhsa *)header;
235         for_each_drhd_unit(drhd) {
236                 if (drhd->reg_base_addr == rhsa->base_address) {
237                         int node = acpi_map_pxm_to_node(rhsa->proximity_domain);
238
239                         if (!node_online(node))
240                                 node = -1;
241                         drhd->iommu->node = node;
242                         return 0;
243                 }
244         }
245         WARN_TAINT(
246                 1, TAINT_FIRMWARE_WORKAROUND,
247                 "Your BIOS is broken; RHSA refers to non-existent DMAR unit at %llx\n"
248                 "BIOS vendor: %s; Ver: %s; Product Version: %s\n",
249                 drhd->reg_base_addr,
250                 dmi_get_system_info(DMI_BIOS_VENDOR),
251                 dmi_get_system_info(DMI_BIOS_VERSION),
252                 dmi_get_system_info(DMI_PRODUCT_VERSION));
253
254         return 0;
255 }
256 #endif
257
258 static void __init
259 dmar_table_print_dmar_entry(struct acpi_dmar_header *header)
260 {
261         struct acpi_dmar_hardware_unit *drhd;
262         struct acpi_dmar_reserved_memory *rmrr;
263         struct acpi_dmar_atsr *atsr;
264         struct acpi_dmar_rhsa *rhsa;
265
266         switch (header->type) {
267         case ACPI_DMAR_TYPE_HARDWARE_UNIT:
268                 drhd = container_of(header, struct acpi_dmar_hardware_unit,
269                                     header);
270                 pr_info("DRHD base: %#016Lx flags: %#x\n",
271                         (unsigned long long)drhd->address, drhd->flags);
272                 break;
273         case ACPI_DMAR_TYPE_RESERVED_MEMORY:
274                 rmrr = container_of(header, struct acpi_dmar_reserved_memory,
275                                     header);
276                 pr_info("RMRR base: %#016Lx end: %#016Lx\n",
277                         (unsigned long long)rmrr->base_address,
278                         (unsigned long long)rmrr->end_address);
279                 break;
280         case ACPI_DMAR_TYPE_ATSR:
281                 atsr = container_of(header, struct acpi_dmar_atsr, header);
282                 pr_info("ATSR flags: %#x\n", atsr->flags);
283                 break;
284         case ACPI_DMAR_HARDWARE_AFFINITY:
285                 rhsa = container_of(header, struct acpi_dmar_rhsa, header);
286                 pr_info("RHSA base: %#016Lx proximity domain: %#x\n",
287                        (unsigned long long)rhsa->base_address,
288                        rhsa->proximity_domain);
289                 break;
290         }
291 }
292
293 /**
294  * dmar_table_detect - checks to see if the platform supports DMAR devices
295  */
296 static int __init dmar_table_detect(void)
297 {
298         acpi_status status = AE_OK;
299
300         /* if we could find DMAR table, then there are DMAR devices */
301         status = acpi_get_table_with_size(ACPI_SIG_DMAR, 0,
302                                 (struct acpi_table_header **)&dmar_tbl,
303                                 &dmar_tbl_size);
304
305         if (ACPI_SUCCESS(status) && !dmar_tbl) {
306                 pr_warn("Unable to map DMAR\n");
307                 status = AE_NOT_FOUND;
308         }
309
310         return (ACPI_SUCCESS(status) ? 1 : 0);
311 }
312
313 /**
314  * parse_dmar_table - parses the DMA reporting table
315  */
316 static int __init
317 parse_dmar_table(void)
318 {
319         struct acpi_table_dmar *dmar;
320         struct acpi_dmar_header *entry_header;
321         int ret = 0;
322         int drhd_count = 0;
323
324         /*
325          * Do it again, earlier dmar_tbl mapping could be mapped with
326          * fixed map.
327          */
328         dmar_table_detect();
329
330         /*
331          * ACPI tables may not be DMA protected by tboot, so use DMAR copy
332          * SINIT saved in SinitMleData in TXT heap (which is DMA protected)
333          */
334         dmar_tbl = tboot_get_dmar_table(dmar_tbl);
335
336         dmar = (struct acpi_table_dmar *)dmar_tbl;
337         if (!dmar)
338                 return -ENODEV;
339
340         if (dmar->width < PAGE_SHIFT - 1) {
341                 pr_warn("Invalid DMAR haw\n");
342                 return -EINVAL;
343         }
344
345         pr_info("Host address width %d\n", dmar->width + 1);
346
347         entry_header = (struct acpi_dmar_header *)(dmar + 1);
348         while (((unsigned long)entry_header) <
349                         (((unsigned long)dmar) + dmar_tbl->length)) {
350                 /* Avoid looping forever on bad ACPI tables */
351                 if (entry_header->length == 0) {
352                         pr_warn("Invalid 0-length structure\n");
353                         ret = -EINVAL;
354                         break;
355                 }
356
357                 dmar_table_print_dmar_entry(entry_header);
358
359                 switch (entry_header->type) {
360                 case ACPI_DMAR_TYPE_HARDWARE_UNIT:
361                         drhd_count++;
362                         ret = dmar_parse_one_drhd(entry_header);
363                         break;
364                 case ACPI_DMAR_TYPE_RESERVED_MEMORY:
365                         ret = dmar_parse_one_rmrr(entry_header);
366                         break;
367                 case ACPI_DMAR_TYPE_ATSR:
368                         ret = dmar_parse_one_atsr(entry_header);
369                         break;
370                 case ACPI_DMAR_HARDWARE_AFFINITY:
371 #ifdef CONFIG_ACPI_NUMA
372                         ret = dmar_parse_one_rhsa(entry_header);
373 #endif
374                         break;
375                 default:
376                         pr_warn("Unknown DMAR structure type %d\n",
377                                 entry_header->type);
378                         ret = 0; /* for forward compatibility */
379                         break;
380                 }
381                 if (ret)
382                         break;
383
384                 entry_header = ((void *)entry_header + entry_header->length);
385         }
386         if (drhd_count == 0)
387                 pr_warn(FW_BUG "No DRHD structure found in DMAR table\n");
388         return ret;
389 }
390
391 static int dmar_pci_device_match(struct pci_dev *devices[], int cnt,
392                           struct pci_dev *dev)
393 {
394         int index;
395
396         while (dev) {
397                 for (index = 0; index < cnt; index++)
398                         if (dev == devices[index])
399                                 return 1;
400
401                 /* Check our parent */
402                 dev = dev->bus->self;
403         }
404
405         return 0;
406 }
407
408 struct dmar_drhd_unit *
409 dmar_find_matched_drhd_unit(struct pci_dev *dev)
410 {
411         struct dmar_drhd_unit *dmaru = NULL;
412         struct acpi_dmar_hardware_unit *drhd;
413
414         dev = pci_physfn(dev);
415
416         for_each_drhd_unit(dmaru) {
417                 drhd = container_of(dmaru->hdr,
418                                     struct acpi_dmar_hardware_unit,
419                                     header);
420
421                 if (dmaru->include_all &&
422                     drhd->segment == pci_domain_nr(dev->bus))
423                         return dmaru;
424
425                 if (dmar_pci_device_match(dmaru->devices,
426                                           dmaru->devices_cnt, dev))
427                         return dmaru;
428         }
429
430         return NULL;
431 }
432
433 int __init dmar_dev_scope_init(void)
434 {
435         static int dmar_dev_scope_initialized;
436         struct dmar_drhd_unit *drhd, *drhd_n;
437         int ret = -ENODEV;
438
439         if (dmar_dev_scope_initialized)
440                 return dmar_dev_scope_initialized;
441
442         if (list_empty(&dmar_drhd_units))
443                 goto fail;
444
445         list_for_each_entry_safe(drhd, drhd_n, &dmar_drhd_units, list) {
446                 ret = dmar_parse_dev(drhd);
447                 if (ret)
448                         goto fail;
449         }
450
451         ret = dmar_parse_rmrr_atsr_dev();
452         if (ret)
453                 goto fail;
454
455         dmar_dev_scope_initialized = 1;
456         return 0;
457
458 fail:
459         dmar_dev_scope_initialized = ret;
460         return ret;
461 }
462
463
464 int __init dmar_table_init(void)
465 {
466         static int dmar_table_initialized;
467         int ret;
468
469         if (dmar_table_initialized)
470                 return 0;
471
472         dmar_table_initialized = 1;
473
474         ret = parse_dmar_table();
475         if (ret) {
476                 if (ret != -ENODEV)
477                         pr_info("parse DMAR table failure.\n");
478                 return ret;
479         }
480
481         if (list_empty(&dmar_drhd_units)) {
482                 pr_info("No DMAR devices found\n");
483                 return -ENODEV;
484         }
485
486         return 0;
487 }
488
489 static void warn_invalid_dmar(u64 addr, const char *message)
490 {
491         WARN_TAINT_ONCE(
492                 1, TAINT_FIRMWARE_WORKAROUND,
493                 "Your BIOS is broken; DMAR reported at address %llx%s!\n"
494                 "BIOS vendor: %s; Ver: %s; Product Version: %s\n",
495                 addr, message,
496                 dmi_get_system_info(DMI_BIOS_VENDOR),
497                 dmi_get_system_info(DMI_BIOS_VERSION),
498                 dmi_get_system_info(DMI_PRODUCT_VERSION));
499 }
500
501 static int __init check_zero_address(void)
502 {
503         struct acpi_table_dmar *dmar;
504         struct acpi_dmar_header *entry_header;
505         struct acpi_dmar_hardware_unit *drhd;
506
507         dmar = (struct acpi_table_dmar *)dmar_tbl;
508         entry_header = (struct acpi_dmar_header *)(dmar + 1);
509
510         while (((unsigned long)entry_header) <
511                         (((unsigned long)dmar) + dmar_tbl->length)) {
512                 /* Avoid looping forever on bad ACPI tables */
513                 if (entry_header->length == 0) {
514                         pr_warn("Invalid 0-length structure\n");
515                         return 0;
516                 }
517
518                 if (entry_header->type == ACPI_DMAR_TYPE_HARDWARE_UNIT) {
519                         void __iomem *addr;
520                         u64 cap, ecap;
521
522                         drhd = (void *)entry_header;
523                         if (!drhd->address) {
524                                 warn_invalid_dmar(0, "");
525                                 goto failed;
526                         }
527
528                         addr = early_ioremap(drhd->address, VTD_PAGE_SIZE);
529                         if (!addr ) {
530                                 printk("IOMMU: can't validate: %llx\n", drhd->address);
531                                 goto failed;
532                         }
533                         cap = dmar_readq(addr + DMAR_CAP_REG);
534                         ecap = dmar_readq(addr + DMAR_ECAP_REG);
535                         early_iounmap(addr, VTD_PAGE_SIZE);
536                         if (cap == (uint64_t)-1 && ecap == (uint64_t)-1) {
537                                 warn_invalid_dmar(drhd->address,
538                                                   " returns all ones");
539                                 goto failed;
540                         }
541                 }
542
543                 entry_header = ((void *)entry_header + entry_header->length);
544         }
545         return 1;
546
547 failed:
548         return 0;
549 }
550
551 int __init detect_intel_iommu(void)
552 {
553         int ret;
554
555         ret = dmar_table_detect();
556         if (ret)
557                 ret = check_zero_address();
558         {
559                 if (ret && !no_iommu && !iommu_detected && !dmar_disabled) {
560                         iommu_detected = 1;
561                         /* Make sure ACS will be enabled */
562                         pci_request_acs();
563                 }
564
565 #ifdef CONFIG_X86
566                 if (ret)
567                         x86_init.iommu.iommu_init = intel_iommu_init;
568 #endif
569         }
570         early_acpi_os_unmap_memory(dmar_tbl, dmar_tbl_size);
571         dmar_tbl = NULL;
572
573         return ret ? 1 : -ENODEV;
574 }
575
576
577 static void unmap_iommu(struct intel_iommu *iommu)
578 {
579         iounmap(iommu->reg);
580         release_mem_region(iommu->reg_phys, iommu->reg_size);
581 }
582
583 /**
584  * map_iommu: map the iommu's registers
585  * @iommu: the iommu to map
586  * @phys_addr: the physical address of the base resgister
587  *
588  * Memory map the iommu's registers.  Start w/ a single page, and
589  * possibly expand if that turns out to be insufficent.
590  */
591 static int map_iommu(struct intel_iommu *iommu, u64 phys_addr)
592 {
593         int map_size, err=0;
594
595         iommu->reg_phys = phys_addr;
596         iommu->reg_size = VTD_PAGE_SIZE;
597
598         if (!request_mem_region(iommu->reg_phys, iommu->reg_size, iommu->name)) {
599                 pr_err("IOMMU: can't reserve memory\n");
600                 err = -EBUSY;
601                 goto out;
602         }
603
604         iommu->reg = ioremap(iommu->reg_phys, iommu->reg_size);
605         if (!iommu->reg) {
606                 pr_err("IOMMU: can't map the region\n");
607                 err = -ENOMEM;
608                 goto release;
609         }
610
611         iommu->cap = dmar_readq(iommu->reg + DMAR_CAP_REG);
612         iommu->ecap = dmar_readq(iommu->reg + DMAR_ECAP_REG);
613
614         if (iommu->cap == (uint64_t)-1 && iommu->ecap == (uint64_t)-1) {
615                 err = -EINVAL;
616                 warn_invalid_dmar(phys_addr, " returns all ones");
617                 goto unmap;
618         }
619
620         /* the registers might be more than one page */
621         map_size = max_t(int, ecap_max_iotlb_offset(iommu->ecap),
622                          cap_max_fault_reg_offset(iommu->cap));
623         map_size = VTD_PAGE_ALIGN(map_size);
624         if (map_size > iommu->reg_size) {
625                 iounmap(iommu->reg);
626                 release_mem_region(iommu->reg_phys, iommu->reg_size);
627                 iommu->reg_size = map_size;
628                 if (!request_mem_region(iommu->reg_phys, iommu->reg_size,
629                                         iommu->name)) {
630                         pr_err("IOMMU: can't reserve memory\n");
631                         err = -EBUSY;
632                         goto out;
633                 }
634                 iommu->reg = ioremap(iommu->reg_phys, iommu->reg_size);
635                 if (!iommu->reg) {
636                         pr_err("IOMMU: can't map the region\n");
637                         err = -ENOMEM;
638                         goto release;
639                 }
640         }
641         err = 0;
642         goto out;
643
644 unmap:
645         iounmap(iommu->reg);
646 release:
647         release_mem_region(iommu->reg_phys, iommu->reg_size);
648 out:
649         return err;
650 }
651
652 int alloc_iommu(struct dmar_drhd_unit *drhd)
653 {
654         struct intel_iommu *iommu;
655         u32 ver, sts;
656         static int iommu_allocated = 0;
657         int agaw = 0;
658         int msagaw = 0;
659         int err;
660
661         if (!drhd->reg_base_addr) {
662                 warn_invalid_dmar(0, "");
663                 return -EINVAL;
664         }
665
666         iommu = kzalloc(sizeof(*iommu), GFP_KERNEL);
667         if (!iommu)
668                 return -ENOMEM;
669
670         iommu->seq_id = iommu_allocated++;
671         sprintf (iommu->name, "dmar%d", iommu->seq_id);
672
673         err = map_iommu(iommu, drhd->reg_base_addr);
674         if (err) {
675                 pr_err("IOMMU: failed to map %s\n", iommu->name);
676                 goto error;
677         }
678
679         err = -EINVAL;
680         agaw = iommu_calculate_agaw(iommu);
681         if (agaw < 0) {
682                 pr_err("Cannot get a valid agaw for iommu (seq_id = %d)\n",
683                         iommu->seq_id);
684                 goto err_unmap;
685         }
686         msagaw = iommu_calculate_max_sagaw(iommu);
687         if (msagaw < 0) {
688                 pr_err("Cannot get a valid max agaw for iommu (seq_id = %d)\n",
689                         iommu->seq_id);
690                 goto err_unmap;
691         }
692         iommu->agaw = agaw;
693         iommu->msagaw = msagaw;
694
695         iommu->node = -1;
696
697         ver = readl(iommu->reg + DMAR_VER_REG);
698         pr_info("IOMMU %d: reg_base_addr %llx ver %d:%d cap %llx ecap %llx\n",
699                 iommu->seq_id,
700                 (unsigned long long)drhd->reg_base_addr,
701                 DMAR_VER_MAJOR(ver), DMAR_VER_MINOR(ver),
702                 (unsigned long long)iommu->cap,
703                 (unsigned long long)iommu->ecap);
704
705         /* Reflect status in gcmd */
706         sts = readl(iommu->reg + DMAR_GSTS_REG);
707         if (sts & DMA_GSTS_IRES)
708                 iommu->gcmd |= DMA_GCMD_IRE;
709         if (sts & DMA_GSTS_TES)
710                 iommu->gcmd |= DMA_GCMD_TE;
711         if (sts & DMA_GSTS_QIES)
712                 iommu->gcmd |= DMA_GCMD_QIE;
713
714         raw_spin_lock_init(&iommu->register_lock);
715
716         drhd->iommu = iommu;
717         return 0;
718
719  err_unmap:
720         unmap_iommu(iommu);
721  error:
722         kfree(iommu);
723         return err;
724 }
725
726 void free_iommu(struct intel_iommu *iommu)
727 {
728         if (!iommu)
729                 return;
730
731         free_dmar_iommu(iommu);
732
733         if (iommu->reg)
734                 unmap_iommu(iommu);
735
736         kfree(iommu);
737 }
738
739 /*
740  * Reclaim all the submitted descriptors which have completed its work.
741  */
742 static inline void reclaim_free_desc(struct q_inval *qi)
743 {
744         while (qi->desc_status[qi->free_tail] == QI_DONE ||
745                qi->desc_status[qi->free_tail] == QI_ABORT) {
746                 qi->desc_status[qi->free_tail] = QI_FREE;
747                 qi->free_tail = (qi->free_tail + 1) % QI_LENGTH;
748                 qi->free_cnt++;
749         }
750 }
751
752 static int qi_check_fault(struct intel_iommu *iommu, int index)
753 {
754         u32 fault;
755         int head, tail;
756         struct q_inval *qi = iommu->qi;
757         int wait_index = (index + 1) % QI_LENGTH;
758
759         if (qi->desc_status[wait_index] == QI_ABORT)
760                 return -EAGAIN;
761
762         fault = readl(iommu->reg + DMAR_FSTS_REG);
763
764         /*
765          * If IQE happens, the head points to the descriptor associated
766          * with the error. No new descriptors are fetched until the IQE
767          * is cleared.
768          */
769         if (fault & DMA_FSTS_IQE) {
770                 head = readl(iommu->reg + DMAR_IQH_REG);
771                 if ((head >> DMAR_IQ_SHIFT) == index) {
772                         pr_err("VT-d detected invalid descriptor: "
773                                 "low=%llx, high=%llx\n",
774                                 (unsigned long long)qi->desc[index].low,
775                                 (unsigned long long)qi->desc[index].high);
776                         memcpy(&qi->desc[index], &qi->desc[wait_index],
777                                         sizeof(struct qi_desc));
778                         __iommu_flush_cache(iommu, &qi->desc[index],
779                                         sizeof(struct qi_desc));
780                         writel(DMA_FSTS_IQE, iommu->reg + DMAR_FSTS_REG);
781                         return -EINVAL;
782                 }
783         }
784
785         /*
786          * If ITE happens, all pending wait_desc commands are aborted.
787          * No new descriptors are fetched until the ITE is cleared.
788          */
789         if (fault & DMA_FSTS_ITE) {
790                 head = readl(iommu->reg + DMAR_IQH_REG);
791                 head = ((head >> DMAR_IQ_SHIFT) - 1 + QI_LENGTH) % QI_LENGTH;
792                 head |= 1;
793                 tail = readl(iommu->reg + DMAR_IQT_REG);
794                 tail = ((tail >> DMAR_IQ_SHIFT) - 1 + QI_LENGTH) % QI_LENGTH;
795
796                 writel(DMA_FSTS_ITE, iommu->reg + DMAR_FSTS_REG);
797
798                 do {
799                         if (qi->desc_status[head] == QI_IN_USE)
800                                 qi->desc_status[head] = QI_ABORT;
801                         head = (head - 2 + QI_LENGTH) % QI_LENGTH;
802                 } while (head != tail);
803
804                 if (qi->desc_status[wait_index] == QI_ABORT)
805                         return -EAGAIN;
806         }
807
808         if (fault & DMA_FSTS_ICE)
809                 writel(DMA_FSTS_ICE, iommu->reg + DMAR_FSTS_REG);
810
811         return 0;
812 }
813
814 /*
815  * Submit the queued invalidation descriptor to the remapping
816  * hardware unit and wait for its completion.
817  */
818 int qi_submit_sync(struct qi_desc *desc, struct intel_iommu *iommu)
819 {
820         int rc;
821         struct q_inval *qi = iommu->qi;
822         struct qi_desc *hw, wait_desc;
823         int wait_index, index;
824         unsigned long flags;
825
826         if (!qi)
827                 return 0;
828
829         hw = qi->desc;
830
831 restart:
832         rc = 0;
833
834         raw_spin_lock_irqsave(&qi->q_lock, flags);
835         while (qi->free_cnt < 3) {
836                 raw_spin_unlock_irqrestore(&qi->q_lock, flags);
837                 cpu_relax();
838                 raw_spin_lock_irqsave(&qi->q_lock, flags);
839         }
840
841         index = qi->free_head;
842         wait_index = (index + 1) % QI_LENGTH;
843
844         qi->desc_status[index] = qi->desc_status[wait_index] = QI_IN_USE;
845
846         hw[index] = *desc;
847
848         wait_desc.low = QI_IWD_STATUS_DATA(QI_DONE) |
849                         QI_IWD_STATUS_WRITE | QI_IWD_TYPE;
850         wait_desc.high = virt_to_phys(&qi->desc_status[wait_index]);
851
852         hw[wait_index] = wait_desc;
853
854         __iommu_flush_cache(iommu, &hw[index], sizeof(struct qi_desc));
855         __iommu_flush_cache(iommu, &hw[wait_index], sizeof(struct qi_desc));
856
857         qi->free_head = (qi->free_head + 2) % QI_LENGTH;
858         qi->free_cnt -= 2;
859
860         /*
861          * update the HW tail register indicating the presence of
862          * new descriptors.
863          */
864         writel(qi->free_head << DMAR_IQ_SHIFT, iommu->reg + DMAR_IQT_REG);
865
866         while (qi->desc_status[wait_index] != QI_DONE) {
867                 /*
868                  * We will leave the interrupts disabled, to prevent interrupt
869                  * context to queue another cmd while a cmd is already submitted
870                  * and waiting for completion on this cpu. This is to avoid
871                  * a deadlock where the interrupt context can wait indefinitely
872                  * for free slots in the queue.
873                  */
874                 rc = qi_check_fault(iommu, index);
875                 if (rc)
876                         break;
877
878                 raw_spin_unlock(&qi->q_lock);
879                 cpu_relax();
880                 raw_spin_lock(&qi->q_lock);
881         }
882
883         qi->desc_status[index] = QI_DONE;
884
885         reclaim_free_desc(qi);
886         raw_spin_unlock_irqrestore(&qi->q_lock, flags);
887
888         if (rc == -EAGAIN)
889                 goto restart;
890
891         return rc;
892 }
893
894 /*
895  * Flush the global interrupt entry cache.
896  */
897 void qi_global_iec(struct intel_iommu *iommu)
898 {
899         struct qi_desc desc;
900
901         desc.low = QI_IEC_TYPE;
902         desc.high = 0;
903
904         /* should never fail */
905         qi_submit_sync(&desc, iommu);
906 }
907
908 void qi_flush_context(struct intel_iommu *iommu, u16 did, u16 sid, u8 fm,
909                       u64 type)
910 {
911         struct qi_desc desc;
912
913         desc.low = QI_CC_FM(fm) | QI_CC_SID(sid) | QI_CC_DID(did)
914                         | QI_CC_GRAN(type) | QI_CC_TYPE;
915         desc.high = 0;
916
917         qi_submit_sync(&desc, iommu);
918 }
919
920 void qi_flush_iotlb(struct intel_iommu *iommu, u16 did, u64 addr,
921                     unsigned int size_order, u64 type)
922 {
923         u8 dw = 0, dr = 0;
924
925         struct qi_desc desc;
926         int ih = 0;
927
928         if (cap_write_drain(iommu->cap))
929                 dw = 1;
930
931         if (cap_read_drain(iommu->cap))
932                 dr = 1;
933
934         desc.low = QI_IOTLB_DID(did) | QI_IOTLB_DR(dr) | QI_IOTLB_DW(dw)
935                 | QI_IOTLB_GRAN(type) | QI_IOTLB_TYPE;
936         desc.high = QI_IOTLB_ADDR(addr) | QI_IOTLB_IH(ih)
937                 | QI_IOTLB_AM(size_order);
938
939         qi_submit_sync(&desc, iommu);
940 }
941
942 void qi_flush_dev_iotlb(struct intel_iommu *iommu, u16 sid, u16 qdep,
943                         u64 addr, unsigned mask)
944 {
945         struct qi_desc desc;
946
947         if (mask) {
948                 BUG_ON(addr & ((1 << (VTD_PAGE_SHIFT + mask)) - 1));
949                 addr |= (1 << (VTD_PAGE_SHIFT + mask - 1)) - 1;
950                 desc.high = QI_DEV_IOTLB_ADDR(addr) | QI_DEV_IOTLB_SIZE;
951         } else
952                 desc.high = QI_DEV_IOTLB_ADDR(addr);
953
954         if (qdep >= QI_DEV_IOTLB_MAX_INVS)
955                 qdep = 0;
956
957         desc.low = QI_DEV_IOTLB_SID(sid) | QI_DEV_IOTLB_QDEP(qdep) |
958                    QI_DIOTLB_TYPE;
959
960         qi_submit_sync(&desc, iommu);
961 }
962
963 /*
964  * Disable Queued Invalidation interface.
965  */
966 void dmar_disable_qi(struct intel_iommu *iommu)
967 {
968         unsigned long flags;
969         u32 sts;
970         cycles_t start_time = get_cycles();
971
972         if (!ecap_qis(iommu->ecap))
973                 return;
974
975         raw_spin_lock_irqsave(&iommu->register_lock, flags);
976
977         sts =  dmar_readq(iommu->reg + DMAR_GSTS_REG);
978         if (!(sts & DMA_GSTS_QIES))
979                 goto end;
980
981         /*
982          * Give a chance to HW to complete the pending invalidation requests.
983          */
984         while ((readl(iommu->reg + DMAR_IQT_REG) !=
985                 readl(iommu->reg + DMAR_IQH_REG)) &&
986                 (DMAR_OPERATION_TIMEOUT > (get_cycles() - start_time)))
987                 cpu_relax();
988
989         iommu->gcmd &= ~DMA_GCMD_QIE;
990         writel(iommu->gcmd, iommu->reg + DMAR_GCMD_REG);
991
992         IOMMU_WAIT_OP(iommu, DMAR_GSTS_REG, readl,
993                       !(sts & DMA_GSTS_QIES), sts);
994 end:
995         raw_spin_unlock_irqrestore(&iommu->register_lock, flags);
996 }
997
998 /*
999  * Enable queued invalidation.
1000  */
1001 static void __dmar_enable_qi(struct intel_iommu *iommu)
1002 {
1003         u32 sts;
1004         unsigned long flags;
1005         struct q_inval *qi = iommu->qi;
1006
1007         qi->free_head = qi->free_tail = 0;
1008         qi->free_cnt = QI_LENGTH;
1009
1010         raw_spin_lock_irqsave(&iommu->register_lock, flags);
1011
1012         /* write zero to the tail reg */
1013         writel(0, iommu->reg + DMAR_IQT_REG);
1014
1015         dmar_writeq(iommu->reg + DMAR_IQA_REG, virt_to_phys(qi->desc));
1016
1017         iommu->gcmd |= DMA_GCMD_QIE;
1018         writel(iommu->gcmd, iommu->reg + DMAR_GCMD_REG);
1019
1020         /* Make sure hardware complete it */
1021         IOMMU_WAIT_OP(iommu, DMAR_GSTS_REG, readl, (sts & DMA_GSTS_QIES), sts);
1022
1023         raw_spin_unlock_irqrestore(&iommu->register_lock, flags);
1024 }
1025
1026 /*
1027  * Enable Queued Invalidation interface. This is a must to support
1028  * interrupt-remapping. Also used by DMA-remapping, which replaces
1029  * register based IOTLB invalidation.
1030  */
1031 int dmar_enable_qi(struct intel_iommu *iommu)
1032 {
1033         struct q_inval *qi;
1034         struct page *desc_page;
1035
1036         if (!ecap_qis(iommu->ecap))
1037                 return -ENOENT;
1038
1039         /*
1040          * queued invalidation is already setup and enabled.
1041          */
1042         if (iommu->qi)
1043                 return 0;
1044
1045         iommu->qi = kmalloc(sizeof(*qi), GFP_ATOMIC);
1046         if (!iommu->qi)
1047                 return -ENOMEM;
1048
1049         qi = iommu->qi;
1050
1051
1052         desc_page = alloc_pages_node(iommu->node, GFP_ATOMIC | __GFP_ZERO, 0);
1053         if (!desc_page) {
1054                 kfree(qi);
1055                 iommu->qi = 0;
1056                 return -ENOMEM;
1057         }
1058
1059         qi->desc = page_address(desc_page);
1060
1061         qi->desc_status = kzalloc(QI_LENGTH * sizeof(int), GFP_ATOMIC);
1062         if (!qi->desc_status) {
1063                 free_page((unsigned long) qi->desc);
1064                 kfree(qi);
1065                 iommu->qi = 0;
1066                 return -ENOMEM;
1067         }
1068
1069         qi->free_head = qi->free_tail = 0;
1070         qi->free_cnt = QI_LENGTH;
1071
1072         raw_spin_lock_init(&qi->q_lock);
1073
1074         __dmar_enable_qi(iommu);
1075
1076         return 0;
1077 }
1078
1079 /* iommu interrupt handling. Most stuff are MSI-like. */
1080
1081 enum faulttype {
1082         DMA_REMAP,
1083         INTR_REMAP,
1084         UNKNOWN,
1085 };
1086
1087 static const char *dma_remap_fault_reasons[] =
1088 {
1089         "Software",
1090         "Present bit in root entry is clear",
1091         "Present bit in context entry is clear",
1092         "Invalid context entry",
1093         "Access beyond MGAW",
1094         "PTE Write access is not set",
1095         "PTE Read access is not set",
1096         "Next page table ptr is invalid",
1097         "Root table address invalid",
1098         "Context table ptr is invalid",
1099         "non-zero reserved fields in RTP",
1100         "non-zero reserved fields in CTP",
1101         "non-zero reserved fields in PTE",
1102         "PCE for translation request specifies blocking",
1103 };
1104
1105 static const char *irq_remap_fault_reasons[] =
1106 {
1107         "Detected reserved fields in the decoded interrupt-remapped request",
1108         "Interrupt index exceeded the interrupt-remapping table size",
1109         "Present field in the IRTE entry is clear",
1110         "Error accessing interrupt-remapping table pointed by IRTA_REG",
1111         "Detected reserved fields in the IRTE entry",
1112         "Blocked a compatibility format interrupt request",
1113         "Blocked an interrupt request due to source-id verification failure",
1114 };
1115
1116 #define MAX_FAULT_REASON_IDX    (ARRAY_SIZE(fault_reason_strings) - 1)
1117
1118 static const char *dmar_get_fault_reason(u8 fault_reason, int *fault_type)
1119 {
1120         if (fault_reason >= 0x20 && (fault_reason - 0x20 <
1121                                         ARRAY_SIZE(irq_remap_fault_reasons))) {
1122                 *fault_type = INTR_REMAP;
1123                 return irq_remap_fault_reasons[fault_reason - 0x20];
1124         } else if (fault_reason < ARRAY_SIZE(dma_remap_fault_reasons)) {
1125                 *fault_type = DMA_REMAP;
1126                 return dma_remap_fault_reasons[fault_reason];
1127         } else {
1128                 *fault_type = UNKNOWN;
1129                 return "Unknown";
1130         }
1131 }
1132
1133 void dmar_msi_unmask(struct irq_data *data)
1134 {
1135         struct intel_iommu *iommu = irq_data_get_irq_handler_data(data);
1136         unsigned long flag;
1137
1138         /* unmask it */
1139         raw_spin_lock_irqsave(&iommu->register_lock, flag);
1140         writel(0, iommu->reg + DMAR_FECTL_REG);
1141         /* Read a reg to force flush the post write */
1142         readl(iommu->reg + DMAR_FECTL_REG);
1143         raw_spin_unlock_irqrestore(&iommu->register_lock, flag);
1144 }
1145
1146 void dmar_msi_mask(struct irq_data *data)
1147 {
1148         unsigned long flag;
1149         struct intel_iommu *iommu = irq_data_get_irq_handler_data(data);
1150
1151         /* mask it */
1152         raw_spin_lock_irqsave(&iommu->register_lock, flag);
1153         writel(DMA_FECTL_IM, iommu->reg + DMAR_FECTL_REG);
1154         /* Read a reg to force flush the post write */
1155         readl(iommu->reg + DMAR_FECTL_REG);
1156         raw_spin_unlock_irqrestore(&iommu->register_lock, flag);
1157 }
1158
1159 void dmar_msi_write(int irq, struct msi_msg *msg)
1160 {
1161         struct intel_iommu *iommu = irq_get_handler_data(irq);
1162         unsigned long flag;
1163
1164         raw_spin_lock_irqsave(&iommu->register_lock, flag);
1165         writel(msg->data, iommu->reg + DMAR_FEDATA_REG);
1166         writel(msg->address_lo, iommu->reg + DMAR_FEADDR_REG);
1167         writel(msg->address_hi, iommu->reg + DMAR_FEUADDR_REG);
1168         raw_spin_unlock_irqrestore(&iommu->register_lock, flag);
1169 }
1170
1171 void dmar_msi_read(int irq, struct msi_msg *msg)
1172 {
1173         struct intel_iommu *iommu = irq_get_handler_data(irq);
1174         unsigned long flag;
1175
1176         raw_spin_lock_irqsave(&iommu->register_lock, flag);
1177         msg->data = readl(iommu->reg + DMAR_FEDATA_REG);
1178         msg->address_lo = readl(iommu->reg + DMAR_FEADDR_REG);
1179         msg->address_hi = readl(iommu->reg + DMAR_FEUADDR_REG);
1180         raw_spin_unlock_irqrestore(&iommu->register_lock, flag);
1181 }
1182
1183 static int dmar_fault_do_one(struct intel_iommu *iommu, int type,
1184                 u8 fault_reason, u16 source_id, unsigned long long addr)
1185 {
1186         const char *reason;
1187         int fault_type;
1188
1189         reason = dmar_get_fault_reason(fault_reason, &fault_type);
1190
1191         if (fault_type == INTR_REMAP)
1192                 pr_err("INTR-REMAP: Request device [[%02x:%02x.%d] "
1193                        "fault index %llx\n"
1194                         "INTR-REMAP:[fault reason %02d] %s\n",
1195                         (source_id >> 8), PCI_SLOT(source_id & 0xFF),
1196                         PCI_FUNC(source_id & 0xFF), addr >> 48,
1197                         fault_reason, reason);
1198         else
1199                 pr_err("DMAR:[%s] Request device [%02x:%02x.%d] "
1200                        "fault addr %llx \n"
1201                        "DMAR:[fault reason %02d] %s\n",
1202                        (type ? "DMA Read" : "DMA Write"),
1203                        (source_id >> 8), PCI_SLOT(source_id & 0xFF),
1204                        PCI_FUNC(source_id & 0xFF), addr, fault_reason, reason);
1205         return 0;
1206 }
1207
1208 #define PRIMARY_FAULT_REG_LEN (16)
1209 irqreturn_t dmar_fault(int irq, void *dev_id)
1210 {
1211         struct intel_iommu *iommu = dev_id;
1212         int reg, fault_index;
1213         u32 fault_status;
1214         unsigned long flag;
1215
1216         raw_spin_lock_irqsave(&iommu->register_lock, flag);
1217         fault_status = readl(iommu->reg + DMAR_FSTS_REG);
1218         if (fault_status)
1219                 pr_err("DRHD: handling fault status reg %x\n", fault_status);
1220
1221         /* TBD: ignore advanced fault log currently */
1222         if (!(fault_status & DMA_FSTS_PPF))
1223                 goto unlock_exit;
1224
1225         fault_index = dma_fsts_fault_record_index(fault_status);
1226         reg = cap_fault_reg_offset(iommu->cap);
1227         while (1) {
1228                 u8 fault_reason;
1229                 u16 source_id;
1230                 u64 guest_addr;
1231                 int type;
1232                 u32 data;
1233
1234                 /* highest 32 bits */
1235                 data = readl(iommu->reg + reg +
1236                                 fault_index * PRIMARY_FAULT_REG_LEN + 12);
1237                 if (!(data & DMA_FRCD_F))
1238                         break;
1239
1240                 fault_reason = dma_frcd_fault_reason(data);
1241                 type = dma_frcd_type(data);
1242
1243                 data = readl(iommu->reg + reg +
1244                                 fault_index * PRIMARY_FAULT_REG_LEN + 8);
1245                 source_id = dma_frcd_source_id(data);
1246
1247                 guest_addr = dmar_readq(iommu->reg + reg +
1248                                 fault_index * PRIMARY_FAULT_REG_LEN);
1249                 guest_addr = dma_frcd_page_addr(guest_addr);
1250                 /* clear the fault */
1251                 writel(DMA_FRCD_F, iommu->reg + reg +
1252                         fault_index * PRIMARY_FAULT_REG_LEN + 12);
1253
1254                 raw_spin_unlock_irqrestore(&iommu->register_lock, flag);
1255
1256                 dmar_fault_do_one(iommu, type, fault_reason,
1257                                 source_id, guest_addr);
1258
1259                 fault_index++;
1260                 if (fault_index >= cap_num_fault_regs(iommu->cap))
1261                         fault_index = 0;
1262                 raw_spin_lock_irqsave(&iommu->register_lock, flag);
1263         }
1264
1265         writel(DMA_FSTS_PFO | DMA_FSTS_PPF, iommu->reg + DMAR_FSTS_REG);
1266
1267 unlock_exit:
1268         raw_spin_unlock_irqrestore(&iommu->register_lock, flag);
1269         return IRQ_HANDLED;
1270 }
1271
1272 int dmar_set_interrupt(struct intel_iommu *iommu)
1273 {
1274         int irq, ret;
1275
1276         /*
1277          * Check if the fault interrupt is already initialized.
1278          */
1279         if (iommu->irq)
1280                 return 0;
1281
1282         irq = create_irq();
1283         if (!irq) {
1284                 pr_err("IOMMU: no free vectors\n");
1285                 return -EINVAL;
1286         }
1287
1288         irq_set_handler_data(irq, iommu);
1289         iommu->irq = irq;
1290
1291         ret = arch_setup_dmar_msi(irq);
1292         if (ret) {
1293                 irq_set_handler_data(irq, NULL);
1294                 iommu->irq = 0;
1295                 destroy_irq(irq);
1296                 return ret;
1297         }
1298
1299         ret = request_irq(irq, dmar_fault, IRQF_NO_THREAD, iommu->name, iommu);
1300         if (ret)
1301                 pr_err("IOMMU: can't request irq\n");
1302         return ret;
1303 }
1304
1305 int __init enable_drhd_fault_handling(void)
1306 {
1307         struct dmar_drhd_unit *drhd;
1308
1309         /*
1310          * Enable fault control interrupt.
1311          */
1312         for_each_drhd_unit(drhd) {
1313                 int ret;
1314                 struct intel_iommu *iommu = drhd->iommu;
1315                 u32 fault_status;
1316                 ret = dmar_set_interrupt(iommu);
1317
1318                 if (ret) {
1319                         pr_err("DRHD %Lx: failed to enable fault, interrupt, ret %d\n",
1320                                (unsigned long long)drhd->reg_base_addr, ret);
1321                         return -1;
1322                 }
1323
1324                 /*
1325                  * Clear any previous faults.
1326                  */
1327                 dmar_fault(iommu->irq, iommu);
1328                 fault_status = readl(iommu->reg + DMAR_FSTS_REG);
1329                 writel(fault_status, iommu->reg + DMAR_FSTS_REG);
1330         }
1331
1332         return 0;
1333 }
1334
1335 /*
1336  * Re-enable Queued Invalidation interface.
1337  */
1338 int dmar_reenable_qi(struct intel_iommu *iommu)
1339 {
1340         if (!ecap_qis(iommu->ecap))
1341                 return -ENOENT;
1342
1343         if (!iommu->qi)
1344                 return -ENOENT;
1345
1346         /*
1347          * First disable queued invalidation.
1348          */
1349         dmar_disable_qi(iommu);
1350         /*
1351          * Then enable queued invalidation again. Since there is no pending
1352          * invalidation requests now, it's safe to re-enable queued
1353          * invalidation.
1354          */
1355         __dmar_enable_qi(iommu);
1356
1357         return 0;
1358 }
1359
1360 /*
1361  * Check interrupt remapping support in DMAR table description.
1362  */
1363 int __init dmar_ir_support(void)
1364 {
1365         struct acpi_table_dmar *dmar;
1366         dmar = (struct acpi_table_dmar *)dmar_tbl;
1367         if (!dmar)
1368                 return 0;
1369         return dmar->flags & 0x1;
1370 }
1371 IOMMU_INIT_POST(detect_intel_iommu);