iommu/vt-d: release invalidation queue when destroying IOMMU unit
[firefly-linux-kernel-4.4.55.git] / drivers / iommu / dmar.c
1 /*
2  * Copyright (c) 2006, Intel Corporation.
3  *
4  * This program is free software; you can redistribute it and/or modify it
5  * under the terms and conditions of the GNU General Public License,
6  * version 2, as published by the Free Software Foundation.
7  *
8  * This program is distributed in the hope it will be useful, but WITHOUT
9  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
10  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
11  * more details.
12  *
13  * You should have received a copy of the GNU General Public License along with
14  * this program; if not, write to the Free Software Foundation, Inc., 59 Temple
15  * Place - Suite 330, Boston, MA 02111-1307 USA.
16  *
17  * Copyright (C) 2006-2008 Intel Corporation
18  * Author: Ashok Raj <ashok.raj@intel.com>
19  * Author: Shaohua Li <shaohua.li@intel.com>
20  * Author: Anil S Keshavamurthy <anil.s.keshavamurthy@intel.com>
21  *
22  * This file implements early detection/parsing of Remapping Devices
23  * reported to OS through BIOS via DMA remapping reporting (DMAR) ACPI
24  * tables.
25  *
26  * These routines are used by both DMA-remapping and Interrupt-remapping
27  */
28
29 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt /* has to precede printk.h */
30
31 #include <linux/pci.h>
32 #include <linux/dmar.h>
33 #include <linux/iova.h>
34 #include <linux/intel-iommu.h>
35 #include <linux/timer.h>
36 #include <linux/irq.h>
37 #include <linux/interrupt.h>
38 #include <linux/tboot.h>
39 #include <linux/dmi.h>
40 #include <linux/slab.h>
41 #include <asm/irq_remapping.h>
42 #include <asm/iommu_table.h>
43
44 #include "irq_remapping.h"
45
46 /* No locks are needed as DMA remapping hardware unit
47  * list is constructed at boot time and hotplug of
48  * these units are not supported by the architecture.
49  */
50 LIST_HEAD(dmar_drhd_units);
51
52 struct acpi_table_header * __initdata dmar_tbl;
53 static acpi_size dmar_tbl_size;
54
55 static int alloc_iommu(struct dmar_drhd_unit *drhd);
56 static void free_iommu(struct intel_iommu *iommu);
57
58 static void __init dmar_register_drhd_unit(struct dmar_drhd_unit *drhd)
59 {
60         /*
61          * add INCLUDE_ALL at the tail, so scan the list will find it at
62          * the very end.
63          */
64         if (drhd->include_all)
65                 list_add_tail(&drhd->list, &dmar_drhd_units);
66         else
67                 list_add(&drhd->list, &dmar_drhd_units);
68 }
69
70 static int __init dmar_parse_one_dev_scope(struct acpi_dmar_device_scope *scope,
71                                            struct pci_dev **dev, u16 segment)
72 {
73         struct pci_bus *bus;
74         struct pci_dev *pdev = NULL;
75         struct acpi_dmar_pci_path *path;
76         int count;
77
78         bus = pci_find_bus(segment, scope->bus);
79         path = (struct acpi_dmar_pci_path *)(scope + 1);
80         count = (scope->length - sizeof(struct acpi_dmar_device_scope))
81                 / sizeof(struct acpi_dmar_pci_path);
82
83         while (count) {
84                 if (pdev)
85                         pci_dev_put(pdev);
86                 /*
87                  * Some BIOSes list non-exist devices in DMAR table, just
88                  * ignore it
89                  */
90                 if (!bus) {
91                         pr_warn("Device scope bus [%d] not found\n", scope->bus);
92                         break;
93                 }
94                 pdev = pci_get_slot(bus, PCI_DEVFN(path->device, path->function));
95                 if (!pdev) {
96                         /* warning will be printed below */
97                         break;
98                 }
99                 path ++;
100                 count --;
101                 bus = pdev->subordinate;
102         }
103         if (!pdev) {
104                 pr_warn("Device scope device [%04x:%02x:%02x.%02x] not found\n",
105                         segment, scope->bus, path->device, path->function);
106                 return 0;
107         }
108         if ((scope->entry_type == ACPI_DMAR_SCOPE_TYPE_ENDPOINT && \
109                         pdev->subordinate) || (scope->entry_type == \
110                         ACPI_DMAR_SCOPE_TYPE_BRIDGE && !pdev->subordinate)) {
111                 pci_dev_put(pdev);
112                 pr_warn("Device scope type does not match for %s\n",
113                         pci_name(pdev));
114                 return -EINVAL;
115         }
116         *dev = pdev;
117         return 0;
118 }
119
120 int __init dmar_parse_dev_scope(void *start, void *end, int *cnt,
121                                 struct pci_dev ***devices, u16 segment)
122 {
123         struct acpi_dmar_device_scope *scope;
124         void * tmp = start;
125         int index;
126         int ret;
127
128         *cnt = 0;
129         while (start < end) {
130                 scope = start;
131                 if (scope->entry_type == ACPI_DMAR_SCOPE_TYPE_ENDPOINT ||
132                     scope->entry_type == ACPI_DMAR_SCOPE_TYPE_BRIDGE)
133                         (*cnt)++;
134                 else if (scope->entry_type != ACPI_DMAR_SCOPE_TYPE_IOAPIC &&
135                         scope->entry_type != ACPI_DMAR_SCOPE_TYPE_HPET) {
136                         pr_warn("Unsupported device scope\n");
137                 }
138                 start += scope->length;
139         }
140         if (*cnt == 0)
141                 return 0;
142
143         *devices = kcalloc(*cnt, sizeof(struct pci_dev *), GFP_KERNEL);
144         if (!*devices)
145                 return -ENOMEM;
146
147         start = tmp;
148         index = 0;
149         while (start < end) {
150                 scope = start;
151                 if (scope->entry_type == ACPI_DMAR_SCOPE_TYPE_ENDPOINT ||
152                     scope->entry_type == ACPI_DMAR_SCOPE_TYPE_BRIDGE) {
153                         ret = dmar_parse_one_dev_scope(scope,
154                                 &(*devices)[index], segment);
155                         if (ret) {
156                                 dmar_free_dev_scope(devices, cnt);
157                                 return ret;
158                         }
159                         index ++;
160                 }
161                 start += scope->length;
162         }
163
164         return 0;
165 }
166
167 void dmar_free_dev_scope(struct pci_dev ***devices, int *cnt)
168 {
169         if (*devices && *cnt) {
170                 while (--*cnt >= 0)
171                         pci_dev_put((*devices)[*cnt]);
172                 kfree(*devices);
173                 *devices = NULL;
174                 *cnt = 0;
175         }
176 }
177
178 /**
179  * dmar_parse_one_drhd - parses exactly one DMA remapping hardware definition
180  * structure which uniquely represent one DMA remapping hardware unit
181  * present in the platform
182  */
183 static int __init
184 dmar_parse_one_drhd(struct acpi_dmar_header *header)
185 {
186         struct acpi_dmar_hardware_unit *drhd;
187         struct dmar_drhd_unit *dmaru;
188         int ret = 0;
189
190         drhd = (struct acpi_dmar_hardware_unit *)header;
191         dmaru = kzalloc(sizeof(*dmaru), GFP_KERNEL);
192         if (!dmaru)
193                 return -ENOMEM;
194
195         dmaru->hdr = header;
196         dmaru->reg_base_addr = drhd->address;
197         dmaru->segment = drhd->segment;
198         dmaru->include_all = drhd->flags & 0x1; /* BIT0: INCLUDE_ALL */
199
200         ret = alloc_iommu(dmaru);
201         if (ret) {
202                 kfree(dmaru);
203                 return ret;
204         }
205         dmar_register_drhd_unit(dmaru);
206         return 0;
207 }
208
209 static void dmar_free_drhd(struct dmar_drhd_unit *dmaru)
210 {
211         if (dmaru->devices && dmaru->devices_cnt)
212                 dmar_free_dev_scope(&dmaru->devices, &dmaru->devices_cnt);
213         if (dmaru->iommu)
214                 free_iommu(dmaru->iommu);
215         kfree(dmaru);
216 }
217
218 static int __init dmar_parse_dev(struct dmar_drhd_unit *dmaru)
219 {
220         struct acpi_dmar_hardware_unit *drhd;
221
222         drhd = (struct acpi_dmar_hardware_unit *) dmaru->hdr;
223
224         if (dmaru->include_all)
225                 return 0;
226
227         return dmar_parse_dev_scope((void *)(drhd + 1),
228                                     ((void *)drhd) + drhd->header.length,
229                                     &dmaru->devices_cnt, &dmaru->devices,
230                                     drhd->segment);
231 }
232
233 #ifdef CONFIG_ACPI_NUMA
234 static int __init
235 dmar_parse_one_rhsa(struct acpi_dmar_header *header)
236 {
237         struct acpi_dmar_rhsa *rhsa;
238         struct dmar_drhd_unit *drhd;
239
240         rhsa = (struct acpi_dmar_rhsa *)header;
241         for_each_drhd_unit(drhd) {
242                 if (drhd->reg_base_addr == rhsa->base_address) {
243                         int node = acpi_map_pxm_to_node(rhsa->proximity_domain);
244
245                         if (!node_online(node))
246                                 node = -1;
247                         drhd->iommu->node = node;
248                         return 0;
249                 }
250         }
251         WARN_TAINT(
252                 1, TAINT_FIRMWARE_WORKAROUND,
253                 "Your BIOS is broken; RHSA refers to non-existent DMAR unit at %llx\n"
254                 "BIOS vendor: %s; Ver: %s; Product Version: %s\n",
255                 drhd->reg_base_addr,
256                 dmi_get_system_info(DMI_BIOS_VENDOR),
257                 dmi_get_system_info(DMI_BIOS_VERSION),
258                 dmi_get_system_info(DMI_PRODUCT_VERSION));
259
260         return 0;
261 }
262 #endif
263
264 static void __init
265 dmar_table_print_dmar_entry(struct acpi_dmar_header *header)
266 {
267         struct acpi_dmar_hardware_unit *drhd;
268         struct acpi_dmar_reserved_memory *rmrr;
269         struct acpi_dmar_atsr *atsr;
270         struct acpi_dmar_rhsa *rhsa;
271
272         switch (header->type) {
273         case ACPI_DMAR_TYPE_HARDWARE_UNIT:
274                 drhd = container_of(header, struct acpi_dmar_hardware_unit,
275                                     header);
276                 pr_info("DRHD base: %#016Lx flags: %#x\n",
277                         (unsigned long long)drhd->address, drhd->flags);
278                 break;
279         case ACPI_DMAR_TYPE_RESERVED_MEMORY:
280                 rmrr = container_of(header, struct acpi_dmar_reserved_memory,
281                                     header);
282                 pr_info("RMRR base: %#016Lx end: %#016Lx\n",
283                         (unsigned long long)rmrr->base_address,
284                         (unsigned long long)rmrr->end_address);
285                 break;
286         case ACPI_DMAR_TYPE_ATSR:
287                 atsr = container_of(header, struct acpi_dmar_atsr, header);
288                 pr_info("ATSR flags: %#x\n", atsr->flags);
289                 break;
290         case ACPI_DMAR_HARDWARE_AFFINITY:
291                 rhsa = container_of(header, struct acpi_dmar_rhsa, header);
292                 pr_info("RHSA base: %#016Lx proximity domain: %#x\n",
293                        (unsigned long long)rhsa->base_address,
294                        rhsa->proximity_domain);
295                 break;
296         }
297 }
298
299 /**
300  * dmar_table_detect - checks to see if the platform supports DMAR devices
301  */
302 static int __init dmar_table_detect(void)
303 {
304         acpi_status status = AE_OK;
305
306         /* if we could find DMAR table, then there are DMAR devices */
307         status = acpi_get_table_with_size(ACPI_SIG_DMAR, 0,
308                                 (struct acpi_table_header **)&dmar_tbl,
309                                 &dmar_tbl_size);
310
311         if (ACPI_SUCCESS(status) && !dmar_tbl) {
312                 pr_warn("Unable to map DMAR\n");
313                 status = AE_NOT_FOUND;
314         }
315
316         return (ACPI_SUCCESS(status) ? 1 : 0);
317 }
318
319 /**
320  * parse_dmar_table - parses the DMA reporting table
321  */
322 static int __init
323 parse_dmar_table(void)
324 {
325         struct acpi_table_dmar *dmar;
326         struct acpi_dmar_header *entry_header;
327         int ret = 0;
328         int drhd_count = 0;
329
330         /*
331          * Do it again, earlier dmar_tbl mapping could be mapped with
332          * fixed map.
333          */
334         dmar_table_detect();
335
336         /*
337          * ACPI tables may not be DMA protected by tboot, so use DMAR copy
338          * SINIT saved in SinitMleData in TXT heap (which is DMA protected)
339          */
340         dmar_tbl = tboot_get_dmar_table(dmar_tbl);
341
342         dmar = (struct acpi_table_dmar *)dmar_tbl;
343         if (!dmar)
344                 return -ENODEV;
345
346         if (dmar->width < PAGE_SHIFT - 1) {
347                 pr_warn("Invalid DMAR haw\n");
348                 return -EINVAL;
349         }
350
351         pr_info("Host address width %d\n", dmar->width + 1);
352
353         entry_header = (struct acpi_dmar_header *)(dmar + 1);
354         while (((unsigned long)entry_header) <
355                         (((unsigned long)dmar) + dmar_tbl->length)) {
356                 /* Avoid looping forever on bad ACPI tables */
357                 if (entry_header->length == 0) {
358                         pr_warn("Invalid 0-length structure\n");
359                         ret = -EINVAL;
360                         break;
361                 }
362
363                 dmar_table_print_dmar_entry(entry_header);
364
365                 switch (entry_header->type) {
366                 case ACPI_DMAR_TYPE_HARDWARE_UNIT:
367                         drhd_count++;
368                         ret = dmar_parse_one_drhd(entry_header);
369                         break;
370                 case ACPI_DMAR_TYPE_RESERVED_MEMORY:
371                         ret = dmar_parse_one_rmrr(entry_header);
372                         break;
373                 case ACPI_DMAR_TYPE_ATSR:
374                         ret = dmar_parse_one_atsr(entry_header);
375                         break;
376                 case ACPI_DMAR_HARDWARE_AFFINITY:
377 #ifdef CONFIG_ACPI_NUMA
378                         ret = dmar_parse_one_rhsa(entry_header);
379 #endif
380                         break;
381                 default:
382                         pr_warn("Unknown DMAR structure type %d\n",
383                                 entry_header->type);
384                         ret = 0; /* for forward compatibility */
385                         break;
386                 }
387                 if (ret)
388                         break;
389
390                 entry_header = ((void *)entry_header + entry_header->length);
391         }
392         if (drhd_count == 0)
393                 pr_warn(FW_BUG "No DRHD structure found in DMAR table\n");
394         return ret;
395 }
396
397 static int dmar_pci_device_match(struct pci_dev *devices[], int cnt,
398                           struct pci_dev *dev)
399 {
400         int index;
401
402         while (dev) {
403                 for (index = 0; index < cnt; index++)
404                         if (dev == devices[index])
405                                 return 1;
406
407                 /* Check our parent */
408                 dev = dev->bus->self;
409         }
410
411         return 0;
412 }
413
414 struct dmar_drhd_unit *
415 dmar_find_matched_drhd_unit(struct pci_dev *dev)
416 {
417         struct dmar_drhd_unit *dmaru = NULL;
418         struct acpi_dmar_hardware_unit *drhd;
419
420         dev = pci_physfn(dev);
421
422         for_each_drhd_unit(dmaru) {
423                 drhd = container_of(dmaru->hdr,
424                                     struct acpi_dmar_hardware_unit,
425                                     header);
426
427                 if (dmaru->include_all &&
428                     drhd->segment == pci_domain_nr(dev->bus))
429                         return dmaru;
430
431                 if (dmar_pci_device_match(dmaru->devices,
432                                           dmaru->devices_cnt, dev))
433                         return dmaru;
434         }
435
436         return NULL;
437 }
438
439 int __init dmar_dev_scope_init(void)
440 {
441         static int dmar_dev_scope_initialized;
442         struct dmar_drhd_unit *drhd;
443         int ret = -ENODEV;
444
445         if (dmar_dev_scope_initialized)
446                 return dmar_dev_scope_initialized;
447
448         if (list_empty(&dmar_drhd_units))
449                 goto fail;
450
451         list_for_each_entry(drhd, &dmar_drhd_units, list) {
452                 ret = dmar_parse_dev(drhd);
453                 if (ret)
454                         goto fail;
455         }
456
457         ret = dmar_parse_rmrr_atsr_dev();
458         if (ret)
459                 goto fail;
460
461         dmar_dev_scope_initialized = 1;
462         return 0;
463
464 fail:
465         dmar_dev_scope_initialized = ret;
466         return ret;
467 }
468
469
470 int __init dmar_table_init(void)
471 {
472         static int dmar_table_initialized;
473         int ret;
474
475         if (dmar_table_initialized)
476                 return 0;
477
478         dmar_table_initialized = 1;
479
480         ret = parse_dmar_table();
481         if (ret) {
482                 if (ret != -ENODEV)
483                         pr_info("parse DMAR table failure.\n");
484                 return ret;
485         }
486
487         if (list_empty(&dmar_drhd_units)) {
488                 pr_info("No DMAR devices found\n");
489                 return -ENODEV;
490         }
491
492         return 0;
493 }
494
495 static void warn_invalid_dmar(u64 addr, const char *message)
496 {
497         WARN_TAINT_ONCE(
498                 1, TAINT_FIRMWARE_WORKAROUND,
499                 "Your BIOS is broken; DMAR reported at address %llx%s!\n"
500                 "BIOS vendor: %s; Ver: %s; Product Version: %s\n",
501                 addr, message,
502                 dmi_get_system_info(DMI_BIOS_VENDOR),
503                 dmi_get_system_info(DMI_BIOS_VERSION),
504                 dmi_get_system_info(DMI_PRODUCT_VERSION));
505 }
506
507 static int __init check_zero_address(void)
508 {
509         struct acpi_table_dmar *dmar;
510         struct acpi_dmar_header *entry_header;
511         struct acpi_dmar_hardware_unit *drhd;
512
513         dmar = (struct acpi_table_dmar *)dmar_tbl;
514         entry_header = (struct acpi_dmar_header *)(dmar + 1);
515
516         while (((unsigned long)entry_header) <
517                         (((unsigned long)dmar) + dmar_tbl->length)) {
518                 /* Avoid looping forever on bad ACPI tables */
519                 if (entry_header->length == 0) {
520                         pr_warn("Invalid 0-length structure\n");
521                         return 0;
522                 }
523
524                 if (entry_header->type == ACPI_DMAR_TYPE_HARDWARE_UNIT) {
525                         void __iomem *addr;
526                         u64 cap, ecap;
527
528                         drhd = (void *)entry_header;
529                         if (!drhd->address) {
530                                 warn_invalid_dmar(0, "");
531                                 goto failed;
532                         }
533
534                         addr = early_ioremap(drhd->address, VTD_PAGE_SIZE);
535                         if (!addr ) {
536                                 printk("IOMMU: can't validate: %llx\n", drhd->address);
537                                 goto failed;
538                         }
539                         cap = dmar_readq(addr + DMAR_CAP_REG);
540                         ecap = dmar_readq(addr + DMAR_ECAP_REG);
541                         early_iounmap(addr, VTD_PAGE_SIZE);
542                         if (cap == (uint64_t)-1 && ecap == (uint64_t)-1) {
543                                 warn_invalid_dmar(drhd->address,
544                                                   " returns all ones");
545                                 goto failed;
546                         }
547                 }
548
549                 entry_header = ((void *)entry_header + entry_header->length);
550         }
551         return 1;
552
553 failed:
554         return 0;
555 }
556
557 int __init detect_intel_iommu(void)
558 {
559         int ret;
560
561         ret = dmar_table_detect();
562         if (ret)
563                 ret = check_zero_address();
564         {
565                 if (ret && !no_iommu && !iommu_detected && !dmar_disabled) {
566                         iommu_detected = 1;
567                         /* Make sure ACS will be enabled */
568                         pci_request_acs();
569                 }
570
571 #ifdef CONFIG_X86
572                 if (ret)
573                         x86_init.iommu.iommu_init = intel_iommu_init;
574 #endif
575         }
576         early_acpi_os_unmap_memory(dmar_tbl, dmar_tbl_size);
577         dmar_tbl = NULL;
578
579         return ret ? 1 : -ENODEV;
580 }
581
582
583 static void unmap_iommu(struct intel_iommu *iommu)
584 {
585         iounmap(iommu->reg);
586         release_mem_region(iommu->reg_phys, iommu->reg_size);
587 }
588
589 /**
590  * map_iommu: map the iommu's registers
591  * @iommu: the iommu to map
592  * @phys_addr: the physical address of the base resgister
593  *
594  * Memory map the iommu's registers.  Start w/ a single page, and
595  * possibly expand if that turns out to be insufficent.
596  */
597 static int map_iommu(struct intel_iommu *iommu, u64 phys_addr)
598 {
599         int map_size, err=0;
600
601         iommu->reg_phys = phys_addr;
602         iommu->reg_size = VTD_PAGE_SIZE;
603
604         if (!request_mem_region(iommu->reg_phys, iommu->reg_size, iommu->name)) {
605                 pr_err("IOMMU: can't reserve memory\n");
606                 err = -EBUSY;
607                 goto out;
608         }
609
610         iommu->reg = ioremap(iommu->reg_phys, iommu->reg_size);
611         if (!iommu->reg) {
612                 pr_err("IOMMU: can't map the region\n");
613                 err = -ENOMEM;
614                 goto release;
615         }
616
617         iommu->cap = dmar_readq(iommu->reg + DMAR_CAP_REG);
618         iommu->ecap = dmar_readq(iommu->reg + DMAR_ECAP_REG);
619
620         if (iommu->cap == (uint64_t)-1 && iommu->ecap == (uint64_t)-1) {
621                 err = -EINVAL;
622                 warn_invalid_dmar(phys_addr, " returns all ones");
623                 goto unmap;
624         }
625
626         /* the registers might be more than one page */
627         map_size = max_t(int, ecap_max_iotlb_offset(iommu->ecap),
628                          cap_max_fault_reg_offset(iommu->cap));
629         map_size = VTD_PAGE_ALIGN(map_size);
630         if (map_size > iommu->reg_size) {
631                 iounmap(iommu->reg);
632                 release_mem_region(iommu->reg_phys, iommu->reg_size);
633                 iommu->reg_size = map_size;
634                 if (!request_mem_region(iommu->reg_phys, iommu->reg_size,
635                                         iommu->name)) {
636                         pr_err("IOMMU: can't reserve memory\n");
637                         err = -EBUSY;
638                         goto out;
639                 }
640                 iommu->reg = ioremap(iommu->reg_phys, iommu->reg_size);
641                 if (!iommu->reg) {
642                         pr_err("IOMMU: can't map the region\n");
643                         err = -ENOMEM;
644                         goto release;
645                 }
646         }
647         err = 0;
648         goto out;
649
650 unmap:
651         iounmap(iommu->reg);
652 release:
653         release_mem_region(iommu->reg_phys, iommu->reg_size);
654 out:
655         return err;
656 }
657
658 static int alloc_iommu(struct dmar_drhd_unit *drhd)
659 {
660         struct intel_iommu *iommu;
661         u32 ver, sts;
662         static int iommu_allocated = 0;
663         int agaw = 0;
664         int msagaw = 0;
665         int err;
666
667         if (!drhd->reg_base_addr) {
668                 warn_invalid_dmar(0, "");
669                 return -EINVAL;
670         }
671
672         iommu = kzalloc(sizeof(*iommu), GFP_KERNEL);
673         if (!iommu)
674                 return -ENOMEM;
675
676         iommu->seq_id = iommu_allocated++;
677         sprintf (iommu->name, "dmar%d", iommu->seq_id);
678
679         err = map_iommu(iommu, drhd->reg_base_addr);
680         if (err) {
681                 pr_err("IOMMU: failed to map %s\n", iommu->name);
682                 goto error;
683         }
684
685         err = -EINVAL;
686         agaw = iommu_calculate_agaw(iommu);
687         if (agaw < 0) {
688                 pr_err("Cannot get a valid agaw for iommu (seq_id = %d)\n",
689                         iommu->seq_id);
690                 goto err_unmap;
691         }
692         msagaw = iommu_calculate_max_sagaw(iommu);
693         if (msagaw < 0) {
694                 pr_err("Cannot get a valid max agaw for iommu (seq_id = %d)\n",
695                         iommu->seq_id);
696                 goto err_unmap;
697         }
698         iommu->agaw = agaw;
699         iommu->msagaw = msagaw;
700
701         iommu->node = -1;
702
703         ver = readl(iommu->reg + DMAR_VER_REG);
704         pr_info("IOMMU %d: reg_base_addr %llx ver %d:%d cap %llx ecap %llx\n",
705                 iommu->seq_id,
706                 (unsigned long long)drhd->reg_base_addr,
707                 DMAR_VER_MAJOR(ver), DMAR_VER_MINOR(ver),
708                 (unsigned long long)iommu->cap,
709                 (unsigned long long)iommu->ecap);
710
711         /* Reflect status in gcmd */
712         sts = readl(iommu->reg + DMAR_GSTS_REG);
713         if (sts & DMA_GSTS_IRES)
714                 iommu->gcmd |= DMA_GCMD_IRE;
715         if (sts & DMA_GSTS_TES)
716                 iommu->gcmd |= DMA_GCMD_TE;
717         if (sts & DMA_GSTS_QIES)
718                 iommu->gcmd |= DMA_GCMD_QIE;
719
720         raw_spin_lock_init(&iommu->register_lock);
721
722         drhd->iommu = iommu;
723         return 0;
724
725  err_unmap:
726         unmap_iommu(iommu);
727  error:
728         kfree(iommu);
729         return err;
730 }
731
732 static void free_iommu(struct intel_iommu *iommu)
733 {
734         if (iommu->irq) {
735                 free_irq(iommu->irq, iommu);
736                 irq_set_handler_data(iommu->irq, NULL);
737                 destroy_irq(iommu->irq);
738         }
739
740         if (iommu->qi) {
741                 free_page((unsigned long)iommu->qi->desc);
742                 kfree(iommu->qi->desc_status);
743                 kfree(iommu->qi);
744         }
745
746         if (iommu->reg)
747                 unmap_iommu(iommu);
748
749         kfree(iommu);
750 }
751
752 /*
753  * Reclaim all the submitted descriptors which have completed its work.
754  */
755 static inline void reclaim_free_desc(struct q_inval *qi)
756 {
757         while (qi->desc_status[qi->free_tail] == QI_DONE ||
758                qi->desc_status[qi->free_tail] == QI_ABORT) {
759                 qi->desc_status[qi->free_tail] = QI_FREE;
760                 qi->free_tail = (qi->free_tail + 1) % QI_LENGTH;
761                 qi->free_cnt++;
762         }
763 }
764
765 static int qi_check_fault(struct intel_iommu *iommu, int index)
766 {
767         u32 fault;
768         int head, tail;
769         struct q_inval *qi = iommu->qi;
770         int wait_index = (index + 1) % QI_LENGTH;
771
772         if (qi->desc_status[wait_index] == QI_ABORT)
773                 return -EAGAIN;
774
775         fault = readl(iommu->reg + DMAR_FSTS_REG);
776
777         /*
778          * If IQE happens, the head points to the descriptor associated
779          * with the error. No new descriptors are fetched until the IQE
780          * is cleared.
781          */
782         if (fault & DMA_FSTS_IQE) {
783                 head = readl(iommu->reg + DMAR_IQH_REG);
784                 if ((head >> DMAR_IQ_SHIFT) == index) {
785                         pr_err("VT-d detected invalid descriptor: "
786                                 "low=%llx, high=%llx\n",
787                                 (unsigned long long)qi->desc[index].low,
788                                 (unsigned long long)qi->desc[index].high);
789                         memcpy(&qi->desc[index], &qi->desc[wait_index],
790                                         sizeof(struct qi_desc));
791                         __iommu_flush_cache(iommu, &qi->desc[index],
792                                         sizeof(struct qi_desc));
793                         writel(DMA_FSTS_IQE, iommu->reg + DMAR_FSTS_REG);
794                         return -EINVAL;
795                 }
796         }
797
798         /*
799          * If ITE happens, all pending wait_desc commands are aborted.
800          * No new descriptors are fetched until the ITE is cleared.
801          */
802         if (fault & DMA_FSTS_ITE) {
803                 head = readl(iommu->reg + DMAR_IQH_REG);
804                 head = ((head >> DMAR_IQ_SHIFT) - 1 + QI_LENGTH) % QI_LENGTH;
805                 head |= 1;
806                 tail = readl(iommu->reg + DMAR_IQT_REG);
807                 tail = ((tail >> DMAR_IQ_SHIFT) - 1 + QI_LENGTH) % QI_LENGTH;
808
809                 writel(DMA_FSTS_ITE, iommu->reg + DMAR_FSTS_REG);
810
811                 do {
812                         if (qi->desc_status[head] == QI_IN_USE)
813                                 qi->desc_status[head] = QI_ABORT;
814                         head = (head - 2 + QI_LENGTH) % QI_LENGTH;
815                 } while (head != tail);
816
817                 if (qi->desc_status[wait_index] == QI_ABORT)
818                         return -EAGAIN;
819         }
820
821         if (fault & DMA_FSTS_ICE)
822                 writel(DMA_FSTS_ICE, iommu->reg + DMAR_FSTS_REG);
823
824         return 0;
825 }
826
827 /*
828  * Submit the queued invalidation descriptor to the remapping
829  * hardware unit and wait for its completion.
830  */
831 int qi_submit_sync(struct qi_desc *desc, struct intel_iommu *iommu)
832 {
833         int rc;
834         struct q_inval *qi = iommu->qi;
835         struct qi_desc *hw, wait_desc;
836         int wait_index, index;
837         unsigned long flags;
838
839         if (!qi)
840                 return 0;
841
842         hw = qi->desc;
843
844 restart:
845         rc = 0;
846
847         raw_spin_lock_irqsave(&qi->q_lock, flags);
848         while (qi->free_cnt < 3) {
849                 raw_spin_unlock_irqrestore(&qi->q_lock, flags);
850                 cpu_relax();
851                 raw_spin_lock_irqsave(&qi->q_lock, flags);
852         }
853
854         index = qi->free_head;
855         wait_index = (index + 1) % QI_LENGTH;
856
857         qi->desc_status[index] = qi->desc_status[wait_index] = QI_IN_USE;
858
859         hw[index] = *desc;
860
861         wait_desc.low = QI_IWD_STATUS_DATA(QI_DONE) |
862                         QI_IWD_STATUS_WRITE | QI_IWD_TYPE;
863         wait_desc.high = virt_to_phys(&qi->desc_status[wait_index]);
864
865         hw[wait_index] = wait_desc;
866
867         __iommu_flush_cache(iommu, &hw[index], sizeof(struct qi_desc));
868         __iommu_flush_cache(iommu, &hw[wait_index], sizeof(struct qi_desc));
869
870         qi->free_head = (qi->free_head + 2) % QI_LENGTH;
871         qi->free_cnt -= 2;
872
873         /*
874          * update the HW tail register indicating the presence of
875          * new descriptors.
876          */
877         writel(qi->free_head << DMAR_IQ_SHIFT, iommu->reg + DMAR_IQT_REG);
878
879         while (qi->desc_status[wait_index] != QI_DONE) {
880                 /*
881                  * We will leave the interrupts disabled, to prevent interrupt
882                  * context to queue another cmd while a cmd is already submitted
883                  * and waiting for completion on this cpu. This is to avoid
884                  * a deadlock where the interrupt context can wait indefinitely
885                  * for free slots in the queue.
886                  */
887                 rc = qi_check_fault(iommu, index);
888                 if (rc)
889                         break;
890
891                 raw_spin_unlock(&qi->q_lock);
892                 cpu_relax();
893                 raw_spin_lock(&qi->q_lock);
894         }
895
896         qi->desc_status[index] = QI_DONE;
897
898         reclaim_free_desc(qi);
899         raw_spin_unlock_irqrestore(&qi->q_lock, flags);
900
901         if (rc == -EAGAIN)
902                 goto restart;
903
904         return rc;
905 }
906
907 /*
908  * Flush the global interrupt entry cache.
909  */
910 void qi_global_iec(struct intel_iommu *iommu)
911 {
912         struct qi_desc desc;
913
914         desc.low = QI_IEC_TYPE;
915         desc.high = 0;
916
917         /* should never fail */
918         qi_submit_sync(&desc, iommu);
919 }
920
921 void qi_flush_context(struct intel_iommu *iommu, u16 did, u16 sid, u8 fm,
922                       u64 type)
923 {
924         struct qi_desc desc;
925
926         desc.low = QI_CC_FM(fm) | QI_CC_SID(sid) | QI_CC_DID(did)
927                         | QI_CC_GRAN(type) | QI_CC_TYPE;
928         desc.high = 0;
929
930         qi_submit_sync(&desc, iommu);
931 }
932
933 void qi_flush_iotlb(struct intel_iommu *iommu, u16 did, u64 addr,
934                     unsigned int size_order, u64 type)
935 {
936         u8 dw = 0, dr = 0;
937
938         struct qi_desc desc;
939         int ih = 0;
940
941         if (cap_write_drain(iommu->cap))
942                 dw = 1;
943
944         if (cap_read_drain(iommu->cap))
945                 dr = 1;
946
947         desc.low = QI_IOTLB_DID(did) | QI_IOTLB_DR(dr) | QI_IOTLB_DW(dw)
948                 | QI_IOTLB_GRAN(type) | QI_IOTLB_TYPE;
949         desc.high = QI_IOTLB_ADDR(addr) | QI_IOTLB_IH(ih)
950                 | QI_IOTLB_AM(size_order);
951
952         qi_submit_sync(&desc, iommu);
953 }
954
955 void qi_flush_dev_iotlb(struct intel_iommu *iommu, u16 sid, u16 qdep,
956                         u64 addr, unsigned mask)
957 {
958         struct qi_desc desc;
959
960         if (mask) {
961                 BUG_ON(addr & ((1 << (VTD_PAGE_SHIFT + mask)) - 1));
962                 addr |= (1 << (VTD_PAGE_SHIFT + mask - 1)) - 1;
963                 desc.high = QI_DEV_IOTLB_ADDR(addr) | QI_DEV_IOTLB_SIZE;
964         } else
965                 desc.high = QI_DEV_IOTLB_ADDR(addr);
966
967         if (qdep >= QI_DEV_IOTLB_MAX_INVS)
968                 qdep = 0;
969
970         desc.low = QI_DEV_IOTLB_SID(sid) | QI_DEV_IOTLB_QDEP(qdep) |
971                    QI_DIOTLB_TYPE;
972
973         qi_submit_sync(&desc, iommu);
974 }
975
976 /*
977  * Disable Queued Invalidation interface.
978  */
979 void dmar_disable_qi(struct intel_iommu *iommu)
980 {
981         unsigned long flags;
982         u32 sts;
983         cycles_t start_time = get_cycles();
984
985         if (!ecap_qis(iommu->ecap))
986                 return;
987
988         raw_spin_lock_irqsave(&iommu->register_lock, flags);
989
990         sts =  dmar_readq(iommu->reg + DMAR_GSTS_REG);
991         if (!(sts & DMA_GSTS_QIES))
992                 goto end;
993
994         /*
995          * Give a chance to HW to complete the pending invalidation requests.
996          */
997         while ((readl(iommu->reg + DMAR_IQT_REG) !=
998                 readl(iommu->reg + DMAR_IQH_REG)) &&
999                 (DMAR_OPERATION_TIMEOUT > (get_cycles() - start_time)))
1000                 cpu_relax();
1001
1002         iommu->gcmd &= ~DMA_GCMD_QIE;
1003         writel(iommu->gcmd, iommu->reg + DMAR_GCMD_REG);
1004
1005         IOMMU_WAIT_OP(iommu, DMAR_GSTS_REG, readl,
1006                       !(sts & DMA_GSTS_QIES), sts);
1007 end:
1008         raw_spin_unlock_irqrestore(&iommu->register_lock, flags);
1009 }
1010
1011 /*
1012  * Enable queued invalidation.
1013  */
1014 static void __dmar_enable_qi(struct intel_iommu *iommu)
1015 {
1016         u32 sts;
1017         unsigned long flags;
1018         struct q_inval *qi = iommu->qi;
1019
1020         qi->free_head = qi->free_tail = 0;
1021         qi->free_cnt = QI_LENGTH;
1022
1023         raw_spin_lock_irqsave(&iommu->register_lock, flags);
1024
1025         /* write zero to the tail reg */
1026         writel(0, iommu->reg + DMAR_IQT_REG);
1027
1028         dmar_writeq(iommu->reg + DMAR_IQA_REG, virt_to_phys(qi->desc));
1029
1030         iommu->gcmd |= DMA_GCMD_QIE;
1031         writel(iommu->gcmd, iommu->reg + DMAR_GCMD_REG);
1032
1033         /* Make sure hardware complete it */
1034         IOMMU_WAIT_OP(iommu, DMAR_GSTS_REG, readl, (sts & DMA_GSTS_QIES), sts);
1035
1036         raw_spin_unlock_irqrestore(&iommu->register_lock, flags);
1037 }
1038
1039 /*
1040  * Enable Queued Invalidation interface. This is a must to support
1041  * interrupt-remapping. Also used by DMA-remapping, which replaces
1042  * register based IOTLB invalidation.
1043  */
1044 int dmar_enable_qi(struct intel_iommu *iommu)
1045 {
1046         struct q_inval *qi;
1047         struct page *desc_page;
1048
1049         if (!ecap_qis(iommu->ecap))
1050                 return -ENOENT;
1051
1052         /*
1053          * queued invalidation is already setup and enabled.
1054          */
1055         if (iommu->qi)
1056                 return 0;
1057
1058         iommu->qi = kmalloc(sizeof(*qi), GFP_ATOMIC);
1059         if (!iommu->qi)
1060                 return -ENOMEM;
1061
1062         qi = iommu->qi;
1063
1064
1065         desc_page = alloc_pages_node(iommu->node, GFP_ATOMIC | __GFP_ZERO, 0);
1066         if (!desc_page) {
1067                 kfree(qi);
1068                 iommu->qi = 0;
1069                 return -ENOMEM;
1070         }
1071
1072         qi->desc = page_address(desc_page);
1073
1074         qi->desc_status = kzalloc(QI_LENGTH * sizeof(int), GFP_ATOMIC);
1075         if (!qi->desc_status) {
1076                 free_page((unsigned long) qi->desc);
1077                 kfree(qi);
1078                 iommu->qi = 0;
1079                 return -ENOMEM;
1080         }
1081
1082         qi->free_head = qi->free_tail = 0;
1083         qi->free_cnt = QI_LENGTH;
1084
1085         raw_spin_lock_init(&qi->q_lock);
1086
1087         __dmar_enable_qi(iommu);
1088
1089         return 0;
1090 }
1091
1092 /* iommu interrupt handling. Most stuff are MSI-like. */
1093
1094 enum faulttype {
1095         DMA_REMAP,
1096         INTR_REMAP,
1097         UNKNOWN,
1098 };
1099
1100 static const char *dma_remap_fault_reasons[] =
1101 {
1102         "Software",
1103         "Present bit in root entry is clear",
1104         "Present bit in context entry is clear",
1105         "Invalid context entry",
1106         "Access beyond MGAW",
1107         "PTE Write access is not set",
1108         "PTE Read access is not set",
1109         "Next page table ptr is invalid",
1110         "Root table address invalid",
1111         "Context table ptr is invalid",
1112         "non-zero reserved fields in RTP",
1113         "non-zero reserved fields in CTP",
1114         "non-zero reserved fields in PTE",
1115         "PCE for translation request specifies blocking",
1116 };
1117
1118 static const char *irq_remap_fault_reasons[] =
1119 {
1120         "Detected reserved fields in the decoded interrupt-remapped request",
1121         "Interrupt index exceeded the interrupt-remapping table size",
1122         "Present field in the IRTE entry is clear",
1123         "Error accessing interrupt-remapping table pointed by IRTA_REG",
1124         "Detected reserved fields in the IRTE entry",
1125         "Blocked a compatibility format interrupt request",
1126         "Blocked an interrupt request due to source-id verification failure",
1127 };
1128
1129 static const char *dmar_get_fault_reason(u8 fault_reason, int *fault_type)
1130 {
1131         if (fault_reason >= 0x20 && (fault_reason - 0x20 <
1132                                         ARRAY_SIZE(irq_remap_fault_reasons))) {
1133                 *fault_type = INTR_REMAP;
1134                 return irq_remap_fault_reasons[fault_reason - 0x20];
1135         } else if (fault_reason < ARRAY_SIZE(dma_remap_fault_reasons)) {
1136                 *fault_type = DMA_REMAP;
1137                 return dma_remap_fault_reasons[fault_reason];
1138         } else {
1139                 *fault_type = UNKNOWN;
1140                 return "Unknown";
1141         }
1142 }
1143
1144 void dmar_msi_unmask(struct irq_data *data)
1145 {
1146         struct intel_iommu *iommu = irq_data_get_irq_handler_data(data);
1147         unsigned long flag;
1148
1149         /* unmask it */
1150         raw_spin_lock_irqsave(&iommu->register_lock, flag);
1151         writel(0, iommu->reg + DMAR_FECTL_REG);
1152         /* Read a reg to force flush the post write */
1153         readl(iommu->reg + DMAR_FECTL_REG);
1154         raw_spin_unlock_irqrestore(&iommu->register_lock, flag);
1155 }
1156
1157 void dmar_msi_mask(struct irq_data *data)
1158 {
1159         unsigned long flag;
1160         struct intel_iommu *iommu = irq_data_get_irq_handler_data(data);
1161
1162         /* mask it */
1163         raw_spin_lock_irqsave(&iommu->register_lock, flag);
1164         writel(DMA_FECTL_IM, iommu->reg + DMAR_FECTL_REG);
1165         /* Read a reg to force flush the post write */
1166         readl(iommu->reg + DMAR_FECTL_REG);
1167         raw_spin_unlock_irqrestore(&iommu->register_lock, flag);
1168 }
1169
1170 void dmar_msi_write(int irq, struct msi_msg *msg)
1171 {
1172         struct intel_iommu *iommu = irq_get_handler_data(irq);
1173         unsigned long flag;
1174
1175         raw_spin_lock_irqsave(&iommu->register_lock, flag);
1176         writel(msg->data, iommu->reg + DMAR_FEDATA_REG);
1177         writel(msg->address_lo, iommu->reg + DMAR_FEADDR_REG);
1178         writel(msg->address_hi, iommu->reg + DMAR_FEUADDR_REG);
1179         raw_spin_unlock_irqrestore(&iommu->register_lock, flag);
1180 }
1181
1182 void dmar_msi_read(int irq, struct msi_msg *msg)
1183 {
1184         struct intel_iommu *iommu = irq_get_handler_data(irq);
1185         unsigned long flag;
1186
1187         raw_spin_lock_irqsave(&iommu->register_lock, flag);
1188         msg->data = readl(iommu->reg + DMAR_FEDATA_REG);
1189         msg->address_lo = readl(iommu->reg + DMAR_FEADDR_REG);
1190         msg->address_hi = readl(iommu->reg + DMAR_FEUADDR_REG);
1191         raw_spin_unlock_irqrestore(&iommu->register_lock, flag);
1192 }
1193
1194 static int dmar_fault_do_one(struct intel_iommu *iommu, int type,
1195                 u8 fault_reason, u16 source_id, unsigned long long addr)
1196 {
1197         const char *reason;
1198         int fault_type;
1199
1200         reason = dmar_get_fault_reason(fault_reason, &fault_type);
1201
1202         if (fault_type == INTR_REMAP)
1203                 pr_err("INTR-REMAP: Request device [[%02x:%02x.%d] "
1204                        "fault index %llx\n"
1205                         "INTR-REMAP:[fault reason %02d] %s\n",
1206                         (source_id >> 8), PCI_SLOT(source_id & 0xFF),
1207                         PCI_FUNC(source_id & 0xFF), addr >> 48,
1208                         fault_reason, reason);
1209         else
1210                 pr_err("DMAR:[%s] Request device [%02x:%02x.%d] "
1211                        "fault addr %llx \n"
1212                        "DMAR:[fault reason %02d] %s\n",
1213                        (type ? "DMA Read" : "DMA Write"),
1214                        (source_id >> 8), PCI_SLOT(source_id & 0xFF),
1215                        PCI_FUNC(source_id & 0xFF), addr, fault_reason, reason);
1216         return 0;
1217 }
1218
1219 #define PRIMARY_FAULT_REG_LEN (16)
1220 irqreturn_t dmar_fault(int irq, void *dev_id)
1221 {
1222         struct intel_iommu *iommu = dev_id;
1223         int reg, fault_index;
1224         u32 fault_status;
1225         unsigned long flag;
1226
1227         raw_spin_lock_irqsave(&iommu->register_lock, flag);
1228         fault_status = readl(iommu->reg + DMAR_FSTS_REG);
1229         if (fault_status)
1230                 pr_err("DRHD: handling fault status reg %x\n", fault_status);
1231
1232         /* TBD: ignore advanced fault log currently */
1233         if (!(fault_status & DMA_FSTS_PPF))
1234                 goto unlock_exit;
1235
1236         fault_index = dma_fsts_fault_record_index(fault_status);
1237         reg = cap_fault_reg_offset(iommu->cap);
1238         while (1) {
1239                 u8 fault_reason;
1240                 u16 source_id;
1241                 u64 guest_addr;
1242                 int type;
1243                 u32 data;
1244
1245                 /* highest 32 bits */
1246                 data = readl(iommu->reg + reg +
1247                                 fault_index * PRIMARY_FAULT_REG_LEN + 12);
1248                 if (!(data & DMA_FRCD_F))
1249                         break;
1250
1251                 fault_reason = dma_frcd_fault_reason(data);
1252                 type = dma_frcd_type(data);
1253
1254                 data = readl(iommu->reg + reg +
1255                                 fault_index * PRIMARY_FAULT_REG_LEN + 8);
1256                 source_id = dma_frcd_source_id(data);
1257
1258                 guest_addr = dmar_readq(iommu->reg + reg +
1259                                 fault_index * PRIMARY_FAULT_REG_LEN);
1260                 guest_addr = dma_frcd_page_addr(guest_addr);
1261                 /* clear the fault */
1262                 writel(DMA_FRCD_F, iommu->reg + reg +
1263                         fault_index * PRIMARY_FAULT_REG_LEN + 12);
1264
1265                 raw_spin_unlock_irqrestore(&iommu->register_lock, flag);
1266
1267                 dmar_fault_do_one(iommu, type, fault_reason,
1268                                 source_id, guest_addr);
1269
1270                 fault_index++;
1271                 if (fault_index >= cap_num_fault_regs(iommu->cap))
1272                         fault_index = 0;
1273                 raw_spin_lock_irqsave(&iommu->register_lock, flag);
1274         }
1275
1276         writel(DMA_FSTS_PFO | DMA_FSTS_PPF, iommu->reg + DMAR_FSTS_REG);
1277
1278 unlock_exit:
1279         raw_spin_unlock_irqrestore(&iommu->register_lock, flag);
1280         return IRQ_HANDLED;
1281 }
1282
1283 int dmar_set_interrupt(struct intel_iommu *iommu)
1284 {
1285         int irq, ret;
1286
1287         /*
1288          * Check if the fault interrupt is already initialized.
1289          */
1290         if (iommu->irq)
1291                 return 0;
1292
1293         irq = create_irq();
1294         if (!irq) {
1295                 pr_err("IOMMU: no free vectors\n");
1296                 return -EINVAL;
1297         }
1298
1299         irq_set_handler_data(irq, iommu);
1300         iommu->irq = irq;
1301
1302         ret = arch_setup_dmar_msi(irq);
1303         if (ret) {
1304                 irq_set_handler_data(irq, NULL);
1305                 iommu->irq = 0;
1306                 destroy_irq(irq);
1307                 return ret;
1308         }
1309
1310         ret = request_irq(irq, dmar_fault, IRQF_NO_THREAD, iommu->name, iommu);
1311         if (ret)
1312                 pr_err("IOMMU: can't request irq\n");
1313         return ret;
1314 }
1315
1316 int __init enable_drhd_fault_handling(void)
1317 {
1318         struct dmar_drhd_unit *drhd;
1319         struct intel_iommu *iommu;
1320
1321         /*
1322          * Enable fault control interrupt.
1323          */
1324         for_each_iommu(iommu, drhd) {
1325                 u32 fault_status;
1326                 int ret = dmar_set_interrupt(iommu);
1327
1328                 if (ret) {
1329                         pr_err("DRHD %Lx: failed to enable fault, interrupt, ret %d\n",
1330                                (unsigned long long)drhd->reg_base_addr, ret);
1331                         return -1;
1332                 }
1333
1334                 /*
1335                  * Clear any previous faults.
1336                  */
1337                 dmar_fault(iommu->irq, iommu);
1338                 fault_status = readl(iommu->reg + DMAR_FSTS_REG);
1339                 writel(fault_status, iommu->reg + DMAR_FSTS_REG);
1340         }
1341
1342         return 0;
1343 }
1344
1345 /*
1346  * Re-enable Queued Invalidation interface.
1347  */
1348 int dmar_reenable_qi(struct intel_iommu *iommu)
1349 {
1350         if (!ecap_qis(iommu->ecap))
1351                 return -ENOENT;
1352
1353         if (!iommu->qi)
1354                 return -ENOENT;
1355
1356         /*
1357          * First disable queued invalidation.
1358          */
1359         dmar_disable_qi(iommu);
1360         /*
1361          * Then enable queued invalidation again. Since there is no pending
1362          * invalidation requests now, it's safe to re-enable queued
1363          * invalidation.
1364          */
1365         __dmar_enable_qi(iommu);
1366
1367         return 0;
1368 }
1369
1370 /*
1371  * Check interrupt remapping support in DMAR table description.
1372  */
1373 int __init dmar_ir_support(void)
1374 {
1375         struct acpi_table_dmar *dmar;
1376         dmar = (struct acpi_table_dmar *)dmar_tbl;
1377         if (!dmar)
1378                 return 0;
1379         return dmar->flags & 0x1;
1380 }
1381
1382 static int __init dmar_free_unused_resources(void)
1383 {
1384         struct dmar_drhd_unit *dmaru, *dmaru_n;
1385
1386         /* DMAR units are in use */
1387         if (irq_remapping_enabled || intel_iommu_enabled)
1388                 return 0;
1389
1390         list_for_each_entry_safe(dmaru, dmaru_n, &dmar_drhd_units, list) {
1391                 list_del(&dmaru->list);
1392                 dmar_free_drhd(dmaru);
1393         }
1394
1395         return 0;
1396 }
1397
1398 late_initcall(dmar_free_unused_resources);
1399 IOMMU_INIT_POST(detect_intel_iommu);