2 * Copyright (c) 2006, Intel Corporation.
4 * This program is free software; you can redistribute it and/or modify it
5 * under the terms and conditions of the GNU General Public License,
6 * version 2, as published by the Free Software Foundation.
8 * This program is distributed in the hope it will be useful, but WITHOUT
9 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
10 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
13 * You should have received a copy of the GNU General Public License along with
14 * this program; if not, write to the Free Software Foundation, Inc., 59 Temple
15 * Place - Suite 330, Boston, MA 02111-1307 USA.
17 * Copyright (C) 2006-2008 Intel Corporation
18 * Author: Ashok Raj <ashok.raj@intel.com>
19 * Author: Shaohua Li <shaohua.li@intel.com>
20 * Author: Anil S Keshavamurthy <anil.s.keshavamurthy@intel.com>
21 * Author: Fenghua Yu <fenghua.yu@intel.com>
24 #include <linux/init.h>
25 #include <linux/bitmap.h>
26 #include <linux/debugfs.h>
27 #include <linux/export.h>
28 #include <linux/slab.h>
29 #include <linux/irq.h>
30 #include <linux/interrupt.h>
31 #include <linux/spinlock.h>
32 #include <linux/pci.h>
33 #include <linux/dmar.h>
34 #include <linux/dma-mapping.h>
35 #include <linux/mempool.h>
36 #include <linux/timer.h>
37 #include <linux/iova.h>
38 #include <linux/iommu.h>
39 #include <linux/intel-iommu.h>
40 #include <linux/syscore_ops.h>
41 #include <linux/tboot.h>
42 #include <linux/dmi.h>
43 #include <linux/pci-ats.h>
44 #include <asm/cacheflush.h>
45 #include <asm/iommu.h>
47 #define ROOT_SIZE VTD_PAGE_SIZE
48 #define CONTEXT_SIZE VTD_PAGE_SIZE
50 #define IS_BRIDGE_HOST_DEVICE(pdev) \
51 ((pdev->class >> 8) == PCI_CLASS_BRIDGE_HOST)
52 #define IS_GFX_DEVICE(pdev) ((pdev->class >> 16) == PCI_BASE_CLASS_DISPLAY)
53 #define IS_ISA_DEVICE(pdev) ((pdev->class >> 8) == PCI_CLASS_BRIDGE_ISA)
54 #define IS_AZALIA(pdev) ((pdev)->vendor == 0x8086 && (pdev)->device == 0x3a3e)
56 #define IOAPIC_RANGE_START (0xfee00000)
57 #define IOAPIC_RANGE_END (0xfeefffff)
58 #define IOVA_START_ADDR (0x1000)
60 #define DEFAULT_DOMAIN_ADDRESS_WIDTH 48
62 #define MAX_AGAW_WIDTH 64
64 #define __DOMAIN_MAX_PFN(gaw) ((((uint64_t)1) << (gaw-VTD_PAGE_SHIFT)) - 1)
65 #define __DOMAIN_MAX_ADDR(gaw) ((((uint64_t)1) << gaw) - 1)
67 /* We limit DOMAIN_MAX_PFN to fit in an unsigned long, and DOMAIN_MAX_ADDR
68 to match. That way, we can use 'unsigned long' for PFNs with impunity. */
69 #define DOMAIN_MAX_PFN(gaw) ((unsigned long) min_t(uint64_t, \
70 __DOMAIN_MAX_PFN(gaw), (unsigned long)-1))
71 #define DOMAIN_MAX_ADDR(gaw) (((uint64_t)__DOMAIN_MAX_PFN(gaw)) << VTD_PAGE_SHIFT)
73 #define IOVA_PFN(addr) ((addr) >> PAGE_SHIFT)
74 #define DMA_32BIT_PFN IOVA_PFN(DMA_BIT_MASK(32))
75 #define DMA_64BIT_PFN IOVA_PFN(DMA_BIT_MASK(64))
77 /* page table handling */
78 #define LEVEL_STRIDE (9)
79 #define LEVEL_MASK (((u64)1 << LEVEL_STRIDE) - 1)
82 * This bitmap is used to advertise the page sizes our hardware support
83 * to the IOMMU core, which will then use this information to split
84 * physically contiguous memory regions it is mapping into page sizes
87 * Traditionally the IOMMU core just handed us the mappings directly,
88 * after making sure the size is an order of a 4KiB page and that the
89 * mapping has natural alignment.
91 * To retain this behavior, we currently advertise that we support
92 * all page sizes that are an order of 4KiB.
94 * If at some point we'd like to utilize the IOMMU core's new behavior,
95 * we could change this to advertise the real page sizes we support.
97 #define INTEL_IOMMU_PGSIZES (~0xFFFUL)
99 static inline int agaw_to_level(int agaw)
104 static inline int agaw_to_width(int agaw)
106 return 30 + agaw * LEVEL_STRIDE;
109 static inline int width_to_agaw(int width)
111 return (width - 30) / LEVEL_STRIDE;
114 static inline unsigned int level_to_offset_bits(int level)
116 return (level - 1) * LEVEL_STRIDE;
119 static inline int pfn_level_offset(unsigned long pfn, int level)
121 return (pfn >> level_to_offset_bits(level)) & LEVEL_MASK;
124 static inline unsigned long level_mask(int level)
126 return -1UL << level_to_offset_bits(level);
129 static inline unsigned long level_size(int level)
131 return 1UL << level_to_offset_bits(level);
134 static inline unsigned long align_to_level(unsigned long pfn, int level)
136 return (pfn + level_size(level) - 1) & level_mask(level);
139 static inline unsigned long lvl_to_nr_pages(unsigned int lvl)
141 return 1 << ((lvl - 1) * LEVEL_STRIDE);
144 /* VT-d pages must always be _smaller_ than MM pages. Otherwise things
145 are never going to work. */
146 static inline unsigned long dma_to_mm_pfn(unsigned long dma_pfn)
148 return dma_pfn >> (PAGE_SHIFT - VTD_PAGE_SHIFT);
151 static inline unsigned long mm_to_dma_pfn(unsigned long mm_pfn)
153 return mm_pfn << (PAGE_SHIFT - VTD_PAGE_SHIFT);
155 static inline unsigned long page_to_dma_pfn(struct page *pg)
157 return mm_to_dma_pfn(page_to_pfn(pg));
159 static inline unsigned long virt_to_dma_pfn(void *p)
161 return page_to_dma_pfn(virt_to_page(p));
164 /* global iommu list, set NULL for ignored DMAR units */
165 static struct intel_iommu **g_iommus;
167 static void __init check_tylersburg_isoch(void);
168 static int rwbf_quirk;
171 * set to 1 to panic kernel if can't successfully enable VT-d
172 * (used when kernel is launched w/ TXT)
174 static int force_on = 0;
179 * 12-63: Context Ptr (12 - (haw-1))
186 #define ROOT_ENTRY_NR (VTD_PAGE_SIZE/sizeof(struct root_entry))
187 static inline bool root_present(struct root_entry *root)
189 return (root->val & 1);
191 static inline void set_root_present(struct root_entry *root)
195 static inline void set_root_value(struct root_entry *root, unsigned long value)
197 root->val |= value & VTD_PAGE_MASK;
200 static inline struct context_entry *
201 get_context_addr_from_root(struct root_entry *root)
203 return (struct context_entry *)
204 (root_present(root)?phys_to_virt(
205 root->val & VTD_PAGE_MASK) :
212 * 1: fault processing disable
213 * 2-3: translation type
214 * 12-63: address space root
220 struct context_entry {
225 static inline bool context_present(struct context_entry *context)
227 return (context->lo & 1);
229 static inline void context_set_present(struct context_entry *context)
234 static inline void context_set_fault_enable(struct context_entry *context)
236 context->lo &= (((u64)-1) << 2) | 1;
239 static inline void context_set_translation_type(struct context_entry *context,
242 context->lo &= (((u64)-1) << 4) | 3;
243 context->lo |= (value & 3) << 2;
246 static inline void context_set_address_root(struct context_entry *context,
249 context->lo |= value & VTD_PAGE_MASK;
252 static inline void context_set_address_width(struct context_entry *context,
255 context->hi |= value & 7;
258 static inline void context_set_domain_id(struct context_entry *context,
261 context->hi |= (value & ((1 << 16) - 1)) << 8;
264 static inline void context_clear_entry(struct context_entry *context)
277 * 12-63: Host physcial address
283 static inline void dma_clear_pte(struct dma_pte *pte)
288 static inline void dma_set_pte_readable(struct dma_pte *pte)
290 pte->val |= DMA_PTE_READ;
293 static inline void dma_set_pte_writable(struct dma_pte *pte)
295 pte->val |= DMA_PTE_WRITE;
298 static inline void dma_set_pte_snp(struct dma_pte *pte)
300 pte->val |= DMA_PTE_SNP;
303 static inline void dma_set_pte_prot(struct dma_pte *pte, unsigned long prot)
305 pte->val = (pte->val & ~3) | (prot & 3);
308 static inline u64 dma_pte_addr(struct dma_pte *pte)
311 return pte->val & VTD_PAGE_MASK;
313 /* Must have a full atomic 64-bit read */
314 return __cmpxchg64(&pte->val, 0ULL, 0ULL) & VTD_PAGE_MASK;
318 static inline void dma_set_pte_pfn(struct dma_pte *pte, unsigned long pfn)
320 pte->val |= (uint64_t)pfn << VTD_PAGE_SHIFT;
323 static inline bool dma_pte_present(struct dma_pte *pte)
325 return (pte->val & 3) != 0;
328 static inline bool dma_pte_superpage(struct dma_pte *pte)
330 return (pte->val & (1 << 7));
333 static inline int first_pte_in_page(struct dma_pte *pte)
335 return !((unsigned long)pte & ~VTD_PAGE_MASK);
339 * This domain is a statically identity mapping domain.
340 * 1. This domain creats a static 1:1 mapping to all usable memory.
341 * 2. It maps to each iommu if successful.
342 * 3. Each iommu mapps to this domain if successful.
344 static struct dmar_domain *si_domain;
345 static int hw_pass_through = 1;
347 /* devices under the same p2p bridge are owned in one domain */
348 #define DOMAIN_FLAG_P2P_MULTIPLE_DEVICES (1 << 0)
350 /* domain represents a virtual machine, more than one devices
351 * across iommus may be owned in one domain, e.g. kvm guest.
353 #define DOMAIN_FLAG_VIRTUAL_MACHINE (1 << 1)
355 /* si_domain contains mulitple devices */
356 #define DOMAIN_FLAG_STATIC_IDENTITY (1 << 2)
359 int id; /* domain id */
360 int nid; /* node id */
361 unsigned long iommu_bmp; /* bitmap of iommus this domain uses*/
363 struct list_head devices; /* all devices' list */
364 struct iova_domain iovad; /* iova's that belong to this domain */
366 struct dma_pte *pgd; /* virtual address */
367 int gaw; /* max guest address width */
369 /* adjusted guest address width, 0 is level 2 30-bit */
372 int flags; /* flags to find out type of domain */
374 int iommu_coherency;/* indicate coherency of iommu access */
375 int iommu_snooping; /* indicate snooping control feature*/
376 int iommu_count; /* reference count of iommu */
377 int iommu_superpage;/* Level of superpages supported:
378 0 == 4KiB (no superpages), 1 == 2MiB,
379 2 == 1GiB, 3 == 512GiB, 4 == 1TiB */
380 spinlock_t iommu_lock; /* protect iommu set in domain */
381 u64 max_addr; /* maximum mapped address */
384 /* PCI domain-device relationship */
385 struct device_domain_info {
386 struct list_head link; /* link to domain siblings */
387 struct list_head global; /* link to global list */
388 int segment; /* PCI domain */
389 u8 bus; /* PCI bus number */
390 u8 devfn; /* PCI devfn number */
391 struct pci_dev *dev; /* it's NULL for PCIe-to-PCI bridge */
392 struct intel_iommu *iommu; /* IOMMU used by this device */
393 struct dmar_domain *domain; /* pointer to domain */
396 static void flush_unmaps_timeout(unsigned long data);
398 DEFINE_TIMER(unmap_timer, flush_unmaps_timeout, 0, 0);
400 #define HIGH_WATER_MARK 250
401 struct deferred_flush_tables {
403 struct iova *iova[HIGH_WATER_MARK];
404 struct dmar_domain *domain[HIGH_WATER_MARK];
407 static struct deferred_flush_tables *deferred_flush;
409 /* bitmap for indexing intel_iommus */
410 static int g_num_of_iommus;
412 static DEFINE_SPINLOCK(async_umap_flush_lock);
413 static LIST_HEAD(unmaps_to_do);
416 static long list_size;
418 static void domain_remove_dev_info(struct dmar_domain *domain);
420 #ifdef CONFIG_INTEL_IOMMU_DEFAULT_ON
421 int dmar_disabled = 0;
423 int dmar_disabled = 1;
424 #endif /*CONFIG_INTEL_IOMMU_DEFAULT_ON*/
426 int intel_iommu_enabled = 0;
427 EXPORT_SYMBOL_GPL(intel_iommu_enabled);
429 static int dmar_map_gfx = 1;
430 static int dmar_forcedac;
431 static int intel_iommu_strict;
432 static int intel_iommu_superpage = 1;
434 int intel_iommu_gfx_mapped;
435 EXPORT_SYMBOL_GPL(intel_iommu_gfx_mapped);
437 #define DUMMY_DEVICE_DOMAIN_INFO ((struct device_domain_info *)(-1))
438 static DEFINE_SPINLOCK(device_domain_lock);
439 static LIST_HEAD(device_domain_list);
441 static struct iommu_ops intel_iommu_ops;
443 static int __init intel_iommu_setup(char *str)
448 if (!strncmp(str, "on", 2)) {
450 printk(KERN_INFO "Intel-IOMMU: enabled\n");
451 } else if (!strncmp(str, "off", 3)) {
453 printk(KERN_INFO "Intel-IOMMU: disabled\n");
454 } else if (!strncmp(str, "igfx_off", 8)) {
457 "Intel-IOMMU: disable GFX device mapping\n");
458 } else if (!strncmp(str, "forcedac", 8)) {
460 "Intel-IOMMU: Forcing DAC for PCI devices\n");
462 } else if (!strncmp(str, "strict", 6)) {
464 "Intel-IOMMU: disable batched IOTLB flush\n");
465 intel_iommu_strict = 1;
466 } else if (!strncmp(str, "sp_off", 6)) {
468 "Intel-IOMMU: disable supported super page\n");
469 intel_iommu_superpage = 0;
472 str += strcspn(str, ",");
478 __setup("intel_iommu=", intel_iommu_setup);
480 static struct kmem_cache *iommu_domain_cache;
481 static struct kmem_cache *iommu_devinfo_cache;
482 static struct kmem_cache *iommu_iova_cache;
484 static inline void *alloc_pgtable_page(int node)
489 page = alloc_pages_node(node, GFP_ATOMIC | __GFP_ZERO, 0);
491 vaddr = page_address(page);
495 static inline void free_pgtable_page(void *vaddr)
497 free_page((unsigned long)vaddr);
500 static inline void *alloc_domain_mem(void)
502 return kmem_cache_alloc(iommu_domain_cache, GFP_ATOMIC);
505 static void free_domain_mem(void *vaddr)
507 kmem_cache_free(iommu_domain_cache, vaddr);
510 static inline void * alloc_devinfo_mem(void)
512 return kmem_cache_alloc(iommu_devinfo_cache, GFP_ATOMIC);
515 static inline void free_devinfo_mem(void *vaddr)
517 kmem_cache_free(iommu_devinfo_cache, vaddr);
520 struct iova *alloc_iova_mem(void)
522 return kmem_cache_alloc(iommu_iova_cache, GFP_ATOMIC);
525 void free_iova_mem(struct iova *iova)
527 kmem_cache_free(iommu_iova_cache, iova);
531 static int __iommu_calculate_agaw(struct intel_iommu *iommu, int max_gaw)
536 sagaw = cap_sagaw(iommu->cap);
537 for (agaw = width_to_agaw(max_gaw);
539 if (test_bit(agaw, &sagaw))
547 * Calculate max SAGAW for each iommu.
549 int iommu_calculate_max_sagaw(struct intel_iommu *iommu)
551 return __iommu_calculate_agaw(iommu, MAX_AGAW_WIDTH);
555 * calculate agaw for each iommu.
556 * "SAGAW" may be different across iommus, use a default agaw, and
557 * get a supported less agaw for iommus that don't support the default agaw.
559 int iommu_calculate_agaw(struct intel_iommu *iommu)
561 return __iommu_calculate_agaw(iommu, DEFAULT_DOMAIN_ADDRESS_WIDTH);
564 /* This functionin only returns single iommu in a domain */
565 static struct intel_iommu *domain_get_iommu(struct dmar_domain *domain)
569 /* si_domain and vm domain should not get here. */
570 BUG_ON(domain->flags & DOMAIN_FLAG_VIRTUAL_MACHINE);
571 BUG_ON(domain->flags & DOMAIN_FLAG_STATIC_IDENTITY);
573 iommu_id = find_first_bit(&domain->iommu_bmp, g_num_of_iommus);
574 if (iommu_id < 0 || iommu_id >= g_num_of_iommus)
577 return g_iommus[iommu_id];
580 static void domain_update_iommu_coherency(struct dmar_domain *domain)
584 domain->iommu_coherency = 1;
586 for_each_set_bit(i, &domain->iommu_bmp, g_num_of_iommus) {
587 if (!ecap_coherent(g_iommus[i]->ecap)) {
588 domain->iommu_coherency = 0;
594 static void domain_update_iommu_snooping(struct dmar_domain *domain)
598 domain->iommu_snooping = 1;
600 for_each_set_bit(i, &domain->iommu_bmp, g_num_of_iommus) {
601 if (!ecap_sc_support(g_iommus[i]->ecap)) {
602 domain->iommu_snooping = 0;
608 static void domain_update_iommu_superpage(struct dmar_domain *domain)
610 struct dmar_drhd_unit *drhd;
611 struct intel_iommu *iommu = NULL;
614 if (!intel_iommu_superpage) {
615 domain->iommu_superpage = 0;
619 /* set iommu_superpage to the smallest common denominator */
620 for_each_active_iommu(iommu, drhd) {
621 mask &= cap_super_page_val(iommu->cap);
626 domain->iommu_superpage = fls(mask);
629 /* Some capabilities may be different across iommus */
630 static void domain_update_iommu_cap(struct dmar_domain *domain)
632 domain_update_iommu_coherency(domain);
633 domain_update_iommu_snooping(domain);
634 domain_update_iommu_superpage(domain);
637 static struct intel_iommu *device_to_iommu(int segment, u8 bus, u8 devfn)
639 struct dmar_drhd_unit *drhd = NULL;
642 for_each_drhd_unit(drhd) {
645 if (segment != drhd->segment)
648 for (i = 0; i < drhd->devices_cnt; i++) {
649 if (drhd->devices[i] &&
650 drhd->devices[i]->bus->number == bus &&
651 drhd->devices[i]->devfn == devfn)
653 if (drhd->devices[i] &&
654 drhd->devices[i]->subordinate &&
655 drhd->devices[i]->subordinate->number <= bus &&
656 drhd->devices[i]->subordinate->subordinate >= bus)
660 if (drhd->include_all)
667 static void domain_flush_cache(struct dmar_domain *domain,
668 void *addr, int size)
670 if (!domain->iommu_coherency)
671 clflush_cache_range(addr, size);
674 /* Gets context entry for a given bus and devfn */
675 static struct context_entry * device_to_context_entry(struct intel_iommu *iommu,
678 struct root_entry *root;
679 struct context_entry *context;
680 unsigned long phy_addr;
683 spin_lock_irqsave(&iommu->lock, flags);
684 root = &iommu->root_entry[bus];
685 context = get_context_addr_from_root(root);
687 context = (struct context_entry *)
688 alloc_pgtable_page(iommu->node);
690 spin_unlock_irqrestore(&iommu->lock, flags);
693 __iommu_flush_cache(iommu, (void *)context, CONTEXT_SIZE);
694 phy_addr = virt_to_phys((void *)context);
695 set_root_value(root, phy_addr);
696 set_root_present(root);
697 __iommu_flush_cache(iommu, root, sizeof(*root));
699 spin_unlock_irqrestore(&iommu->lock, flags);
700 return &context[devfn];
703 static int device_context_mapped(struct intel_iommu *iommu, u8 bus, u8 devfn)
705 struct root_entry *root;
706 struct context_entry *context;
710 spin_lock_irqsave(&iommu->lock, flags);
711 root = &iommu->root_entry[bus];
712 context = get_context_addr_from_root(root);
717 ret = context_present(&context[devfn]);
719 spin_unlock_irqrestore(&iommu->lock, flags);
723 static void clear_context_table(struct intel_iommu *iommu, u8 bus, u8 devfn)
725 struct root_entry *root;
726 struct context_entry *context;
729 spin_lock_irqsave(&iommu->lock, flags);
730 root = &iommu->root_entry[bus];
731 context = get_context_addr_from_root(root);
733 context_clear_entry(&context[devfn]);
734 __iommu_flush_cache(iommu, &context[devfn], \
737 spin_unlock_irqrestore(&iommu->lock, flags);
740 static void free_context_table(struct intel_iommu *iommu)
742 struct root_entry *root;
745 struct context_entry *context;
747 spin_lock_irqsave(&iommu->lock, flags);
748 if (!iommu->root_entry) {
751 for (i = 0; i < ROOT_ENTRY_NR; i++) {
752 root = &iommu->root_entry[i];
753 context = get_context_addr_from_root(root);
755 free_pgtable_page(context);
757 free_pgtable_page(iommu->root_entry);
758 iommu->root_entry = NULL;
760 spin_unlock_irqrestore(&iommu->lock, flags);
763 static struct dma_pte *pfn_to_dma_pte(struct dmar_domain *domain,
764 unsigned long pfn, int target_level)
766 int addr_width = agaw_to_width(domain->agaw) - VTD_PAGE_SHIFT;
767 struct dma_pte *parent, *pte = NULL;
768 int level = agaw_to_level(domain->agaw);
771 BUG_ON(!domain->pgd);
772 BUG_ON(addr_width < BITS_PER_LONG && pfn >> addr_width);
773 parent = domain->pgd;
778 offset = pfn_level_offset(pfn, level);
779 pte = &parent[offset];
780 if (!target_level && (dma_pte_superpage(pte) || !dma_pte_present(pte)))
782 if (level == target_level)
785 if (!dma_pte_present(pte)) {
788 tmp_page = alloc_pgtable_page(domain->nid);
793 domain_flush_cache(domain, tmp_page, VTD_PAGE_SIZE);
794 pteval = ((uint64_t)virt_to_dma_pfn(tmp_page) << VTD_PAGE_SHIFT) | DMA_PTE_READ | DMA_PTE_WRITE;
795 if (cmpxchg64(&pte->val, 0ULL, pteval)) {
796 /* Someone else set it while we were thinking; use theirs. */
797 free_pgtable_page(tmp_page);
800 domain_flush_cache(domain, pte, sizeof(*pte));
803 parent = phys_to_virt(dma_pte_addr(pte));
811 /* return address's pte at specific level */
812 static struct dma_pte *dma_pfn_level_pte(struct dmar_domain *domain,
814 int level, int *large_page)
816 struct dma_pte *parent, *pte = NULL;
817 int total = agaw_to_level(domain->agaw);
820 parent = domain->pgd;
821 while (level <= total) {
822 offset = pfn_level_offset(pfn, total);
823 pte = &parent[offset];
827 if (!dma_pte_present(pte)) {
832 if (pte->val & DMA_PTE_LARGE_PAGE) {
837 parent = phys_to_virt(dma_pte_addr(pte));
843 /* clear last level pte, a tlb flush should be followed */
844 static int dma_pte_clear_range(struct dmar_domain *domain,
845 unsigned long start_pfn,
846 unsigned long last_pfn)
848 int addr_width = agaw_to_width(domain->agaw) - VTD_PAGE_SHIFT;
849 unsigned int large_page = 1;
850 struct dma_pte *first_pte, *pte;
853 BUG_ON(addr_width < BITS_PER_LONG && start_pfn >> addr_width);
854 BUG_ON(addr_width < BITS_PER_LONG && last_pfn >> addr_width);
855 BUG_ON(start_pfn > last_pfn);
857 /* we don't need lock here; nobody else touches the iova range */
860 first_pte = pte = dma_pfn_level_pte(domain, start_pfn, 1, &large_page);
862 start_pfn = align_to_level(start_pfn + 1, large_page + 1);
867 start_pfn += lvl_to_nr_pages(large_page);
869 } while (start_pfn <= last_pfn && !first_pte_in_page(pte));
871 domain_flush_cache(domain, first_pte,
872 (void *)pte - (void *)first_pte);
874 } while (start_pfn && start_pfn <= last_pfn);
876 order = (large_page - 1) * 9;
880 /* free page table pages. last level pte should already be cleared */
881 static void dma_pte_free_pagetable(struct dmar_domain *domain,
882 unsigned long start_pfn,
883 unsigned long last_pfn)
885 int addr_width = agaw_to_width(domain->agaw) - VTD_PAGE_SHIFT;
886 struct dma_pte *first_pte, *pte;
887 int total = agaw_to_level(domain->agaw);
892 BUG_ON(addr_width < BITS_PER_LONG && start_pfn >> addr_width);
893 BUG_ON(addr_width < BITS_PER_LONG && last_pfn >> addr_width);
894 BUG_ON(start_pfn > last_pfn);
896 /* We don't need lock here; nobody else touches the iova range */
898 while (level <= total) {
899 tmp = align_to_level(start_pfn, level);
901 /* If we can't even clear one PTE at this level, we're done */
902 if (tmp + level_size(level) - 1 > last_pfn)
907 first_pte = pte = dma_pfn_level_pte(domain, tmp, level, &large_page);
908 if (large_page > level)
909 level = large_page + 1;
911 tmp = align_to_level(tmp + 1, level + 1);
915 if (dma_pte_present(pte)) {
916 free_pgtable_page(phys_to_virt(dma_pte_addr(pte)));
920 tmp += level_size(level);
921 } while (!first_pte_in_page(pte) &&
922 tmp + level_size(level) - 1 <= last_pfn);
924 domain_flush_cache(domain, first_pte,
925 (void *)pte - (void *)first_pte);
927 } while (tmp && tmp + level_size(level) - 1 <= last_pfn);
931 if (start_pfn == 0 && last_pfn == DOMAIN_MAX_PFN(domain->gaw)) {
932 free_pgtable_page(domain->pgd);
938 static int iommu_alloc_root_entry(struct intel_iommu *iommu)
940 struct root_entry *root;
943 root = (struct root_entry *)alloc_pgtable_page(iommu->node);
947 __iommu_flush_cache(iommu, root, ROOT_SIZE);
949 spin_lock_irqsave(&iommu->lock, flags);
950 iommu->root_entry = root;
951 spin_unlock_irqrestore(&iommu->lock, flags);
956 static void iommu_set_root_entry(struct intel_iommu *iommu)
962 addr = iommu->root_entry;
964 raw_spin_lock_irqsave(&iommu->register_lock, flag);
965 dmar_writeq(iommu->reg + DMAR_RTADDR_REG, virt_to_phys(addr));
967 writel(iommu->gcmd | DMA_GCMD_SRTP, iommu->reg + DMAR_GCMD_REG);
969 /* Make sure hardware complete it */
970 IOMMU_WAIT_OP(iommu, DMAR_GSTS_REG,
971 readl, (sts & DMA_GSTS_RTPS), sts);
973 raw_spin_unlock_irqrestore(&iommu->register_lock, flag);
976 static void iommu_flush_write_buffer(struct intel_iommu *iommu)
981 if (!rwbf_quirk && !cap_rwbf(iommu->cap))
984 raw_spin_lock_irqsave(&iommu->register_lock, flag);
985 writel(iommu->gcmd | DMA_GCMD_WBF, iommu->reg + DMAR_GCMD_REG);
987 /* Make sure hardware complete it */
988 IOMMU_WAIT_OP(iommu, DMAR_GSTS_REG,
989 readl, (!(val & DMA_GSTS_WBFS)), val);
991 raw_spin_unlock_irqrestore(&iommu->register_lock, flag);
994 /* return value determine if we need a write buffer flush */
995 static void __iommu_flush_context(struct intel_iommu *iommu,
996 u16 did, u16 source_id, u8 function_mask,
1003 case DMA_CCMD_GLOBAL_INVL:
1004 val = DMA_CCMD_GLOBAL_INVL;
1006 case DMA_CCMD_DOMAIN_INVL:
1007 val = DMA_CCMD_DOMAIN_INVL|DMA_CCMD_DID(did);
1009 case DMA_CCMD_DEVICE_INVL:
1010 val = DMA_CCMD_DEVICE_INVL|DMA_CCMD_DID(did)
1011 | DMA_CCMD_SID(source_id) | DMA_CCMD_FM(function_mask);
1016 val |= DMA_CCMD_ICC;
1018 raw_spin_lock_irqsave(&iommu->register_lock, flag);
1019 dmar_writeq(iommu->reg + DMAR_CCMD_REG, val);
1021 /* Make sure hardware complete it */
1022 IOMMU_WAIT_OP(iommu, DMAR_CCMD_REG,
1023 dmar_readq, (!(val & DMA_CCMD_ICC)), val);
1025 raw_spin_unlock_irqrestore(&iommu->register_lock, flag);
1028 /* return value determine if we need a write buffer flush */
1029 static void __iommu_flush_iotlb(struct intel_iommu *iommu, u16 did,
1030 u64 addr, unsigned int size_order, u64 type)
1032 int tlb_offset = ecap_iotlb_offset(iommu->ecap);
1033 u64 val = 0, val_iva = 0;
1037 case DMA_TLB_GLOBAL_FLUSH:
1038 /* global flush doesn't need set IVA_REG */
1039 val = DMA_TLB_GLOBAL_FLUSH|DMA_TLB_IVT;
1041 case DMA_TLB_DSI_FLUSH:
1042 val = DMA_TLB_DSI_FLUSH|DMA_TLB_IVT|DMA_TLB_DID(did);
1044 case DMA_TLB_PSI_FLUSH:
1045 val = DMA_TLB_PSI_FLUSH|DMA_TLB_IVT|DMA_TLB_DID(did);
1046 /* Note: always flush non-leaf currently */
1047 val_iva = size_order | addr;
1052 /* Note: set drain read/write */
1055 * This is probably to be super secure.. Looks like we can
1056 * ignore it without any impact.
1058 if (cap_read_drain(iommu->cap))
1059 val |= DMA_TLB_READ_DRAIN;
1061 if (cap_write_drain(iommu->cap))
1062 val |= DMA_TLB_WRITE_DRAIN;
1064 raw_spin_lock_irqsave(&iommu->register_lock, flag);
1065 /* Note: Only uses first TLB reg currently */
1067 dmar_writeq(iommu->reg + tlb_offset, val_iva);
1068 dmar_writeq(iommu->reg + tlb_offset + 8, val);
1070 /* Make sure hardware complete it */
1071 IOMMU_WAIT_OP(iommu, tlb_offset + 8,
1072 dmar_readq, (!(val & DMA_TLB_IVT)), val);
1074 raw_spin_unlock_irqrestore(&iommu->register_lock, flag);
1076 /* check IOTLB invalidation granularity */
1077 if (DMA_TLB_IAIG(val) == 0)
1078 printk(KERN_ERR"IOMMU: flush IOTLB failed\n");
1079 if (DMA_TLB_IAIG(val) != DMA_TLB_IIRG(type))
1080 pr_debug("IOMMU: tlb flush request %Lx, actual %Lx\n",
1081 (unsigned long long)DMA_TLB_IIRG(type),
1082 (unsigned long long)DMA_TLB_IAIG(val));
1085 static struct device_domain_info *iommu_support_dev_iotlb(
1086 struct dmar_domain *domain, int segment, u8 bus, u8 devfn)
1089 unsigned long flags;
1090 struct device_domain_info *info;
1091 struct intel_iommu *iommu = device_to_iommu(segment, bus, devfn);
1093 if (!ecap_dev_iotlb_support(iommu->ecap))
1099 spin_lock_irqsave(&device_domain_lock, flags);
1100 list_for_each_entry(info, &domain->devices, link)
1101 if (info->bus == bus && info->devfn == devfn) {
1105 spin_unlock_irqrestore(&device_domain_lock, flags);
1107 if (!found || !info->dev)
1110 if (!pci_find_ext_capability(info->dev, PCI_EXT_CAP_ID_ATS))
1113 if (!dmar_find_matched_atsr_unit(info->dev))
1116 info->iommu = iommu;
1121 static void iommu_enable_dev_iotlb(struct device_domain_info *info)
1126 pci_enable_ats(info->dev, VTD_PAGE_SHIFT);
1129 static void iommu_disable_dev_iotlb(struct device_domain_info *info)
1131 if (!info->dev || !pci_ats_enabled(info->dev))
1134 pci_disable_ats(info->dev);
1137 static void iommu_flush_dev_iotlb(struct dmar_domain *domain,
1138 u64 addr, unsigned mask)
1141 unsigned long flags;
1142 struct device_domain_info *info;
1144 spin_lock_irqsave(&device_domain_lock, flags);
1145 list_for_each_entry(info, &domain->devices, link) {
1146 if (!info->dev || !pci_ats_enabled(info->dev))
1149 sid = info->bus << 8 | info->devfn;
1150 qdep = pci_ats_queue_depth(info->dev);
1151 qi_flush_dev_iotlb(info->iommu, sid, qdep, addr, mask);
1153 spin_unlock_irqrestore(&device_domain_lock, flags);
1156 static void iommu_flush_iotlb_psi(struct intel_iommu *iommu, u16 did,
1157 unsigned long pfn, unsigned int pages, int map)
1159 unsigned int mask = ilog2(__roundup_pow_of_two(pages));
1160 uint64_t addr = (uint64_t)pfn << VTD_PAGE_SHIFT;
1165 * Fallback to domain selective flush if no PSI support or the size is
1167 * PSI requires page size to be 2 ^ x, and the base address is naturally
1168 * aligned to the size
1170 if (!cap_pgsel_inv(iommu->cap) || mask > cap_max_amask_val(iommu->cap))
1171 iommu->flush.flush_iotlb(iommu, did, 0, 0,
1174 iommu->flush.flush_iotlb(iommu, did, addr, mask,
1178 * In caching mode, changes of pages from non-present to present require
1179 * flush. However, device IOTLB doesn't need to be flushed in this case.
1181 if (!cap_caching_mode(iommu->cap) || !map)
1182 iommu_flush_dev_iotlb(iommu->domains[did], addr, mask);
1185 static void iommu_disable_protect_mem_regions(struct intel_iommu *iommu)
1188 unsigned long flags;
1190 raw_spin_lock_irqsave(&iommu->register_lock, flags);
1191 pmen = readl(iommu->reg + DMAR_PMEN_REG);
1192 pmen &= ~DMA_PMEN_EPM;
1193 writel(pmen, iommu->reg + DMAR_PMEN_REG);
1195 /* wait for the protected region status bit to clear */
1196 IOMMU_WAIT_OP(iommu, DMAR_PMEN_REG,
1197 readl, !(pmen & DMA_PMEN_PRS), pmen);
1199 raw_spin_unlock_irqrestore(&iommu->register_lock, flags);
1202 static int iommu_enable_translation(struct intel_iommu *iommu)
1205 unsigned long flags;
1207 raw_spin_lock_irqsave(&iommu->register_lock, flags);
1208 iommu->gcmd |= DMA_GCMD_TE;
1209 writel(iommu->gcmd, iommu->reg + DMAR_GCMD_REG);
1211 /* Make sure hardware complete it */
1212 IOMMU_WAIT_OP(iommu, DMAR_GSTS_REG,
1213 readl, (sts & DMA_GSTS_TES), sts);
1215 raw_spin_unlock_irqrestore(&iommu->register_lock, flags);
1219 static int iommu_disable_translation(struct intel_iommu *iommu)
1224 raw_spin_lock_irqsave(&iommu->register_lock, flag);
1225 iommu->gcmd &= ~DMA_GCMD_TE;
1226 writel(iommu->gcmd, iommu->reg + DMAR_GCMD_REG);
1228 /* Make sure hardware complete it */
1229 IOMMU_WAIT_OP(iommu, DMAR_GSTS_REG,
1230 readl, (!(sts & DMA_GSTS_TES)), sts);
1232 raw_spin_unlock_irqrestore(&iommu->register_lock, flag);
1237 static int iommu_init_domains(struct intel_iommu *iommu)
1239 unsigned long ndomains;
1240 unsigned long nlongs;
1242 ndomains = cap_ndoms(iommu->cap);
1243 pr_debug("IOMMU %d: Number of Domains supportd <%ld>\n", iommu->seq_id,
1245 nlongs = BITS_TO_LONGS(ndomains);
1247 spin_lock_init(&iommu->lock);
1249 /* TBD: there might be 64K domains,
1250 * consider other allocation for future chip
1252 iommu->domain_ids = kcalloc(nlongs, sizeof(unsigned long), GFP_KERNEL);
1253 if (!iommu->domain_ids) {
1254 printk(KERN_ERR "Allocating domain id array failed\n");
1257 iommu->domains = kcalloc(ndomains, sizeof(struct dmar_domain *),
1259 if (!iommu->domains) {
1260 printk(KERN_ERR "Allocating domain array failed\n");
1265 * if Caching mode is set, then invalid translations are tagged
1266 * with domainid 0. Hence we need to pre-allocate it.
1268 if (cap_caching_mode(iommu->cap))
1269 set_bit(0, iommu->domain_ids);
1274 static void domain_exit(struct dmar_domain *domain);
1275 static void vm_domain_exit(struct dmar_domain *domain);
1277 void free_dmar_iommu(struct intel_iommu *iommu)
1279 struct dmar_domain *domain;
1281 unsigned long flags;
1283 if ((iommu->domains) && (iommu->domain_ids)) {
1284 for_each_set_bit(i, iommu->domain_ids, cap_ndoms(iommu->cap)) {
1285 domain = iommu->domains[i];
1286 clear_bit(i, iommu->domain_ids);
1288 spin_lock_irqsave(&domain->iommu_lock, flags);
1289 if (--domain->iommu_count == 0) {
1290 if (domain->flags & DOMAIN_FLAG_VIRTUAL_MACHINE)
1291 vm_domain_exit(domain);
1293 domain_exit(domain);
1295 spin_unlock_irqrestore(&domain->iommu_lock, flags);
1299 if (iommu->gcmd & DMA_GCMD_TE)
1300 iommu_disable_translation(iommu);
1303 irq_set_handler_data(iommu->irq, NULL);
1304 /* This will mask the irq */
1305 free_irq(iommu->irq, iommu);
1306 destroy_irq(iommu->irq);
1309 kfree(iommu->domains);
1310 kfree(iommu->domain_ids);
1312 g_iommus[iommu->seq_id] = NULL;
1314 /* if all iommus are freed, free g_iommus */
1315 for (i = 0; i < g_num_of_iommus; i++) {
1320 if (i == g_num_of_iommus)
1323 /* free context mapping */
1324 free_context_table(iommu);
1327 static struct dmar_domain *alloc_domain(void)
1329 struct dmar_domain *domain;
1331 domain = alloc_domain_mem();
1336 memset(&domain->iommu_bmp, 0, sizeof(unsigned long));
1342 static int iommu_attach_domain(struct dmar_domain *domain,
1343 struct intel_iommu *iommu)
1346 unsigned long ndomains;
1347 unsigned long flags;
1349 ndomains = cap_ndoms(iommu->cap);
1351 spin_lock_irqsave(&iommu->lock, flags);
1353 num = find_first_zero_bit(iommu->domain_ids, ndomains);
1354 if (num >= ndomains) {
1355 spin_unlock_irqrestore(&iommu->lock, flags);
1356 printk(KERN_ERR "IOMMU: no free domain ids\n");
1361 set_bit(num, iommu->domain_ids);
1362 set_bit(iommu->seq_id, &domain->iommu_bmp);
1363 iommu->domains[num] = domain;
1364 spin_unlock_irqrestore(&iommu->lock, flags);
1369 static void iommu_detach_domain(struct dmar_domain *domain,
1370 struct intel_iommu *iommu)
1372 unsigned long flags;
1376 spin_lock_irqsave(&iommu->lock, flags);
1377 ndomains = cap_ndoms(iommu->cap);
1378 for_each_set_bit(num, iommu->domain_ids, ndomains) {
1379 if (iommu->domains[num] == domain) {
1386 clear_bit(num, iommu->domain_ids);
1387 clear_bit(iommu->seq_id, &domain->iommu_bmp);
1388 iommu->domains[num] = NULL;
1390 spin_unlock_irqrestore(&iommu->lock, flags);
1393 static struct iova_domain reserved_iova_list;
1394 static struct lock_class_key reserved_rbtree_key;
1396 static int dmar_init_reserved_ranges(void)
1398 struct pci_dev *pdev = NULL;
1402 init_iova_domain(&reserved_iova_list, DMA_32BIT_PFN);
1404 lockdep_set_class(&reserved_iova_list.iova_rbtree_lock,
1405 &reserved_rbtree_key);
1407 /* IOAPIC ranges shouldn't be accessed by DMA */
1408 iova = reserve_iova(&reserved_iova_list, IOVA_PFN(IOAPIC_RANGE_START),
1409 IOVA_PFN(IOAPIC_RANGE_END));
1411 printk(KERN_ERR "Reserve IOAPIC range failed\n");
1415 /* Reserve all PCI MMIO to avoid peer-to-peer access */
1416 for_each_pci_dev(pdev) {
1419 for (i = 0; i < PCI_NUM_RESOURCES; i++) {
1420 r = &pdev->resource[i];
1421 if (!r->flags || !(r->flags & IORESOURCE_MEM))
1423 iova = reserve_iova(&reserved_iova_list,
1427 printk(KERN_ERR "Reserve iova failed\n");
1435 static void domain_reserve_special_ranges(struct dmar_domain *domain)
1437 copy_reserved_iova(&reserved_iova_list, &domain->iovad);
1440 static inline int guestwidth_to_adjustwidth(int gaw)
1443 int r = (gaw - 12) % 9;
1454 static int domain_init(struct dmar_domain *domain, int guest_width)
1456 struct intel_iommu *iommu;
1457 int adjust_width, agaw;
1458 unsigned long sagaw;
1460 init_iova_domain(&domain->iovad, DMA_32BIT_PFN);
1461 spin_lock_init(&domain->iommu_lock);
1463 domain_reserve_special_ranges(domain);
1465 /* calculate AGAW */
1466 iommu = domain_get_iommu(domain);
1467 if (guest_width > cap_mgaw(iommu->cap))
1468 guest_width = cap_mgaw(iommu->cap);
1469 domain->gaw = guest_width;
1470 adjust_width = guestwidth_to_adjustwidth(guest_width);
1471 agaw = width_to_agaw(adjust_width);
1472 sagaw = cap_sagaw(iommu->cap);
1473 if (!test_bit(agaw, &sagaw)) {
1474 /* hardware doesn't support it, choose a bigger one */
1475 pr_debug("IOMMU: hardware doesn't support agaw %d\n", agaw);
1476 agaw = find_next_bit(&sagaw, 5, agaw);
1480 domain->agaw = agaw;
1481 INIT_LIST_HEAD(&domain->devices);
1483 if (ecap_coherent(iommu->ecap))
1484 domain->iommu_coherency = 1;
1486 domain->iommu_coherency = 0;
1488 if (ecap_sc_support(iommu->ecap))
1489 domain->iommu_snooping = 1;
1491 domain->iommu_snooping = 0;
1493 domain->iommu_superpage = fls(cap_super_page_val(iommu->cap));
1494 domain->iommu_count = 1;
1495 domain->nid = iommu->node;
1497 /* always allocate the top pgd */
1498 domain->pgd = (struct dma_pte *)alloc_pgtable_page(domain->nid);
1501 __iommu_flush_cache(iommu, domain->pgd, PAGE_SIZE);
1505 static void domain_exit(struct dmar_domain *domain)
1507 struct dmar_drhd_unit *drhd;
1508 struct intel_iommu *iommu;
1510 /* Domain 0 is reserved, so dont process it */
1514 /* Flush any lazy unmaps that may reference this domain */
1515 if (!intel_iommu_strict)
1516 flush_unmaps_timeout(0);
1518 domain_remove_dev_info(domain);
1520 put_iova_domain(&domain->iovad);
1523 dma_pte_clear_range(domain, 0, DOMAIN_MAX_PFN(domain->gaw));
1525 /* free page tables */
1526 dma_pte_free_pagetable(domain, 0, DOMAIN_MAX_PFN(domain->gaw));
1528 for_each_active_iommu(iommu, drhd)
1529 if (test_bit(iommu->seq_id, &domain->iommu_bmp))
1530 iommu_detach_domain(domain, iommu);
1532 free_domain_mem(domain);
1535 static int domain_context_mapping_one(struct dmar_domain *domain, int segment,
1536 u8 bus, u8 devfn, int translation)
1538 struct context_entry *context;
1539 unsigned long flags;
1540 struct intel_iommu *iommu;
1541 struct dma_pte *pgd;
1543 unsigned long ndomains;
1546 struct device_domain_info *info = NULL;
1548 pr_debug("Set context mapping for %02x:%02x.%d\n",
1549 bus, PCI_SLOT(devfn), PCI_FUNC(devfn));
1551 BUG_ON(!domain->pgd);
1552 BUG_ON(translation != CONTEXT_TT_PASS_THROUGH &&
1553 translation != CONTEXT_TT_MULTI_LEVEL);
1555 iommu = device_to_iommu(segment, bus, devfn);
1559 context = device_to_context_entry(iommu, bus, devfn);
1562 spin_lock_irqsave(&iommu->lock, flags);
1563 if (context_present(context)) {
1564 spin_unlock_irqrestore(&iommu->lock, flags);
1571 if (domain->flags & DOMAIN_FLAG_VIRTUAL_MACHINE ||
1572 domain->flags & DOMAIN_FLAG_STATIC_IDENTITY) {
1575 /* find an available domain id for this device in iommu */
1576 ndomains = cap_ndoms(iommu->cap);
1577 for_each_set_bit(num, iommu->domain_ids, ndomains) {
1578 if (iommu->domains[num] == domain) {
1586 num = find_first_zero_bit(iommu->domain_ids, ndomains);
1587 if (num >= ndomains) {
1588 spin_unlock_irqrestore(&iommu->lock, flags);
1589 printk(KERN_ERR "IOMMU: no free domain ids\n");
1593 set_bit(num, iommu->domain_ids);
1594 iommu->domains[num] = domain;
1598 /* Skip top levels of page tables for
1599 * iommu which has less agaw than default.
1600 * Unnecessary for PT mode.
1602 if (translation != CONTEXT_TT_PASS_THROUGH) {
1603 for (agaw = domain->agaw; agaw != iommu->agaw; agaw--) {
1604 pgd = phys_to_virt(dma_pte_addr(pgd));
1605 if (!dma_pte_present(pgd)) {
1606 spin_unlock_irqrestore(&iommu->lock, flags);
1613 context_set_domain_id(context, id);
1615 if (translation != CONTEXT_TT_PASS_THROUGH) {
1616 info = iommu_support_dev_iotlb(domain, segment, bus, devfn);
1617 translation = info ? CONTEXT_TT_DEV_IOTLB :
1618 CONTEXT_TT_MULTI_LEVEL;
1621 * In pass through mode, AW must be programmed to indicate the largest
1622 * AGAW value supported by hardware. And ASR is ignored by hardware.
1624 if (unlikely(translation == CONTEXT_TT_PASS_THROUGH))
1625 context_set_address_width(context, iommu->msagaw);
1627 context_set_address_root(context, virt_to_phys(pgd));
1628 context_set_address_width(context, iommu->agaw);
1631 context_set_translation_type(context, translation);
1632 context_set_fault_enable(context);
1633 context_set_present(context);
1634 domain_flush_cache(domain, context, sizeof(*context));
1637 * It's a non-present to present mapping. If hardware doesn't cache
1638 * non-present entry we only need to flush the write-buffer. If the
1639 * _does_ cache non-present entries, then it does so in the special
1640 * domain #0, which we have to flush:
1642 if (cap_caching_mode(iommu->cap)) {
1643 iommu->flush.flush_context(iommu, 0,
1644 (((u16)bus) << 8) | devfn,
1645 DMA_CCMD_MASK_NOBIT,
1646 DMA_CCMD_DEVICE_INVL);
1647 iommu->flush.flush_iotlb(iommu, domain->id, 0, 0, DMA_TLB_DSI_FLUSH);
1649 iommu_flush_write_buffer(iommu);
1651 iommu_enable_dev_iotlb(info);
1652 spin_unlock_irqrestore(&iommu->lock, flags);
1654 spin_lock_irqsave(&domain->iommu_lock, flags);
1655 if (!test_and_set_bit(iommu->seq_id, &domain->iommu_bmp)) {
1656 domain->iommu_count++;
1657 if (domain->iommu_count == 1)
1658 domain->nid = iommu->node;
1659 domain_update_iommu_cap(domain);
1661 spin_unlock_irqrestore(&domain->iommu_lock, flags);
1666 domain_context_mapping(struct dmar_domain *domain, struct pci_dev *pdev,
1670 struct pci_dev *tmp, *parent;
1672 ret = domain_context_mapping_one(domain, pci_domain_nr(pdev->bus),
1673 pdev->bus->number, pdev->devfn,
1678 /* dependent device mapping */
1679 tmp = pci_find_upstream_pcie_bridge(pdev);
1682 /* Secondary interface's bus number and devfn 0 */
1683 parent = pdev->bus->self;
1684 while (parent != tmp) {
1685 ret = domain_context_mapping_one(domain,
1686 pci_domain_nr(parent->bus),
1687 parent->bus->number,
1688 parent->devfn, translation);
1691 parent = parent->bus->self;
1693 if (pci_is_pcie(tmp)) /* this is a PCIe-to-PCI bridge */
1694 return domain_context_mapping_one(domain,
1695 pci_domain_nr(tmp->subordinate),
1696 tmp->subordinate->number, 0,
1698 else /* this is a legacy PCI bridge */
1699 return domain_context_mapping_one(domain,
1700 pci_domain_nr(tmp->bus),
1706 static int domain_context_mapped(struct pci_dev *pdev)
1709 struct pci_dev *tmp, *parent;
1710 struct intel_iommu *iommu;
1712 iommu = device_to_iommu(pci_domain_nr(pdev->bus), pdev->bus->number,
1717 ret = device_context_mapped(iommu, pdev->bus->number, pdev->devfn);
1720 /* dependent device mapping */
1721 tmp = pci_find_upstream_pcie_bridge(pdev);
1724 /* Secondary interface's bus number and devfn 0 */
1725 parent = pdev->bus->self;
1726 while (parent != tmp) {
1727 ret = device_context_mapped(iommu, parent->bus->number,
1731 parent = parent->bus->self;
1733 if (pci_is_pcie(tmp))
1734 return device_context_mapped(iommu, tmp->subordinate->number,
1737 return device_context_mapped(iommu, tmp->bus->number,
1741 /* Returns a number of VTD pages, but aligned to MM page size */
1742 static inline unsigned long aligned_nrpages(unsigned long host_addr,
1745 host_addr &= ~PAGE_MASK;
1746 return PAGE_ALIGN(host_addr + size) >> VTD_PAGE_SHIFT;
1749 /* Return largest possible superpage level for a given mapping */
1750 static inline int hardware_largepage_caps(struct dmar_domain *domain,
1751 unsigned long iov_pfn,
1752 unsigned long phy_pfn,
1753 unsigned long pages)
1755 int support, level = 1;
1756 unsigned long pfnmerge;
1758 support = domain->iommu_superpage;
1760 /* To use a large page, the virtual *and* physical addresses
1761 must be aligned to 2MiB/1GiB/etc. Lower bits set in either
1762 of them will mean we have to use smaller pages. So just
1763 merge them and check both at once. */
1764 pfnmerge = iov_pfn | phy_pfn;
1766 while (support && !(pfnmerge & ~VTD_STRIDE_MASK)) {
1767 pages >>= VTD_STRIDE_SHIFT;
1770 pfnmerge >>= VTD_STRIDE_SHIFT;
1777 static int __domain_mapping(struct dmar_domain *domain, unsigned long iov_pfn,
1778 struct scatterlist *sg, unsigned long phys_pfn,
1779 unsigned long nr_pages, int prot)
1781 struct dma_pte *first_pte = NULL, *pte = NULL;
1782 phys_addr_t uninitialized_var(pteval);
1783 int addr_width = agaw_to_width(domain->agaw) - VTD_PAGE_SHIFT;
1784 unsigned long sg_res;
1785 unsigned int largepage_lvl = 0;
1786 unsigned long lvl_pages = 0;
1788 BUG_ON(addr_width < BITS_PER_LONG && (iov_pfn + nr_pages - 1) >> addr_width);
1790 if ((prot & (DMA_PTE_READ|DMA_PTE_WRITE)) == 0)
1793 prot &= DMA_PTE_READ | DMA_PTE_WRITE | DMA_PTE_SNP;
1798 sg_res = nr_pages + 1;
1799 pteval = ((phys_addr_t)phys_pfn << VTD_PAGE_SHIFT) | prot;
1802 while (nr_pages > 0) {
1806 sg_res = aligned_nrpages(sg->offset, sg->length);
1807 sg->dma_address = ((dma_addr_t)iov_pfn << VTD_PAGE_SHIFT) + sg->offset;
1808 sg->dma_length = sg->length;
1809 pteval = page_to_phys(sg_page(sg)) | prot;
1810 phys_pfn = pteval >> VTD_PAGE_SHIFT;
1814 largepage_lvl = hardware_largepage_caps(domain, iov_pfn, phys_pfn, sg_res);
1816 first_pte = pte = pfn_to_dma_pte(domain, iov_pfn, largepage_lvl);
1819 /* It is large page*/
1820 if (largepage_lvl > 1)
1821 pteval |= DMA_PTE_LARGE_PAGE;
1823 pteval &= ~(uint64_t)DMA_PTE_LARGE_PAGE;
1826 /* We don't need lock here, nobody else
1827 * touches the iova range
1829 tmp = cmpxchg64_local(&pte->val, 0ULL, pteval);
1831 static int dumps = 5;
1832 printk(KERN_CRIT "ERROR: DMA PTE for vPFN 0x%lx already set (to %llx not %llx)\n",
1833 iov_pfn, tmp, (unsigned long long)pteval);
1836 debug_dma_dump_mappings(NULL);
1841 lvl_pages = lvl_to_nr_pages(largepage_lvl);
1843 BUG_ON(nr_pages < lvl_pages);
1844 BUG_ON(sg_res < lvl_pages);
1846 nr_pages -= lvl_pages;
1847 iov_pfn += lvl_pages;
1848 phys_pfn += lvl_pages;
1849 pteval += lvl_pages * VTD_PAGE_SIZE;
1850 sg_res -= lvl_pages;
1852 /* If the next PTE would be the first in a new page, then we
1853 need to flush the cache on the entries we've just written.
1854 And then we'll need to recalculate 'pte', so clear it and
1855 let it get set again in the if (!pte) block above.
1857 If we're done (!nr_pages) we need to flush the cache too.
1859 Also if we've been setting superpages, we may need to
1860 recalculate 'pte' and switch back to smaller pages for the
1861 end of the mapping, if the trailing size is not enough to
1862 use another superpage (i.e. sg_res < lvl_pages). */
1864 if (!nr_pages || first_pte_in_page(pte) ||
1865 (largepage_lvl > 1 && sg_res < lvl_pages)) {
1866 domain_flush_cache(domain, first_pte,
1867 (void *)pte - (void *)first_pte);
1871 if (!sg_res && nr_pages)
1877 static inline int domain_sg_mapping(struct dmar_domain *domain, unsigned long iov_pfn,
1878 struct scatterlist *sg, unsigned long nr_pages,
1881 return __domain_mapping(domain, iov_pfn, sg, 0, nr_pages, prot);
1884 static inline int domain_pfn_mapping(struct dmar_domain *domain, unsigned long iov_pfn,
1885 unsigned long phys_pfn, unsigned long nr_pages,
1888 return __domain_mapping(domain, iov_pfn, NULL, phys_pfn, nr_pages, prot);
1891 static void iommu_detach_dev(struct intel_iommu *iommu, u8 bus, u8 devfn)
1896 clear_context_table(iommu, bus, devfn);
1897 iommu->flush.flush_context(iommu, 0, 0, 0,
1898 DMA_CCMD_GLOBAL_INVL);
1899 iommu->flush.flush_iotlb(iommu, 0, 0, 0, DMA_TLB_GLOBAL_FLUSH);
1902 static void domain_remove_dev_info(struct dmar_domain *domain)
1904 struct device_domain_info *info;
1905 unsigned long flags;
1906 struct intel_iommu *iommu;
1908 spin_lock_irqsave(&device_domain_lock, flags);
1909 while (!list_empty(&domain->devices)) {
1910 info = list_entry(domain->devices.next,
1911 struct device_domain_info, link);
1912 list_del(&info->link);
1913 list_del(&info->global);
1915 info->dev->dev.archdata.iommu = NULL;
1916 spin_unlock_irqrestore(&device_domain_lock, flags);
1918 iommu_disable_dev_iotlb(info);
1919 iommu = device_to_iommu(info->segment, info->bus, info->devfn);
1920 iommu_detach_dev(iommu, info->bus, info->devfn);
1921 free_devinfo_mem(info);
1923 spin_lock_irqsave(&device_domain_lock, flags);
1925 spin_unlock_irqrestore(&device_domain_lock, flags);
1930 * Note: we use struct pci_dev->dev.archdata.iommu stores the info
1932 static struct dmar_domain *
1933 find_domain(struct pci_dev *pdev)
1935 struct device_domain_info *info;
1937 /* No lock here, assumes no domain exit in normal case */
1938 info = pdev->dev.archdata.iommu;
1940 return info->domain;
1944 /* domain is initialized */
1945 static struct dmar_domain *get_domain_for_dev(struct pci_dev *pdev, int gaw)
1947 struct dmar_domain *domain, *found = NULL;
1948 struct intel_iommu *iommu;
1949 struct dmar_drhd_unit *drhd;
1950 struct device_domain_info *info, *tmp;
1951 struct pci_dev *dev_tmp;
1952 unsigned long flags;
1953 int bus = 0, devfn = 0;
1957 domain = find_domain(pdev);
1961 segment = pci_domain_nr(pdev->bus);
1963 dev_tmp = pci_find_upstream_pcie_bridge(pdev);
1965 if (pci_is_pcie(dev_tmp)) {
1966 bus = dev_tmp->subordinate->number;
1969 bus = dev_tmp->bus->number;
1970 devfn = dev_tmp->devfn;
1972 spin_lock_irqsave(&device_domain_lock, flags);
1973 list_for_each_entry(info, &device_domain_list, global) {
1974 if (info->segment == segment &&
1975 info->bus == bus && info->devfn == devfn) {
1976 found = info->domain;
1980 spin_unlock_irqrestore(&device_domain_lock, flags);
1981 /* pcie-pci bridge already has a domain, uses it */
1988 domain = alloc_domain();
1992 /* Allocate new domain for the device */
1993 drhd = dmar_find_matched_drhd_unit(pdev);
1995 printk(KERN_ERR "IOMMU: can't find DMAR for device %s\n",
1999 iommu = drhd->iommu;
2001 ret = iommu_attach_domain(domain, iommu);
2003 free_domain_mem(domain);
2007 if (domain_init(domain, gaw)) {
2008 domain_exit(domain);
2012 /* register pcie-to-pci device */
2014 info = alloc_devinfo_mem();
2016 domain_exit(domain);
2019 info->segment = segment;
2021 info->devfn = devfn;
2023 info->domain = domain;
2024 /* This domain is shared by devices under p2p bridge */
2025 domain->flags |= DOMAIN_FLAG_P2P_MULTIPLE_DEVICES;
2027 /* pcie-to-pci bridge already has a domain, uses it */
2029 spin_lock_irqsave(&device_domain_lock, flags);
2030 list_for_each_entry(tmp, &device_domain_list, global) {
2031 if (tmp->segment == segment &&
2032 tmp->bus == bus && tmp->devfn == devfn) {
2033 found = tmp->domain;
2038 spin_unlock_irqrestore(&device_domain_lock, flags);
2039 free_devinfo_mem(info);
2040 domain_exit(domain);
2043 list_add(&info->link, &domain->devices);
2044 list_add(&info->global, &device_domain_list);
2045 spin_unlock_irqrestore(&device_domain_lock, flags);
2050 info = alloc_devinfo_mem();
2053 info->segment = segment;
2054 info->bus = pdev->bus->number;
2055 info->devfn = pdev->devfn;
2057 info->domain = domain;
2058 spin_lock_irqsave(&device_domain_lock, flags);
2059 /* somebody is fast */
2060 found = find_domain(pdev);
2061 if (found != NULL) {
2062 spin_unlock_irqrestore(&device_domain_lock, flags);
2063 if (found != domain) {
2064 domain_exit(domain);
2067 free_devinfo_mem(info);
2070 list_add(&info->link, &domain->devices);
2071 list_add(&info->global, &device_domain_list);
2072 pdev->dev.archdata.iommu = info;
2073 spin_unlock_irqrestore(&device_domain_lock, flags);
2076 /* recheck it here, maybe others set it */
2077 return find_domain(pdev);
2080 static int iommu_identity_mapping;
2081 #define IDENTMAP_ALL 1
2082 #define IDENTMAP_GFX 2
2083 #define IDENTMAP_AZALIA 4
2085 static int iommu_domain_identity_map(struct dmar_domain *domain,
2086 unsigned long long start,
2087 unsigned long long end)
2089 unsigned long first_vpfn = start >> VTD_PAGE_SHIFT;
2090 unsigned long last_vpfn = end >> VTD_PAGE_SHIFT;
2092 if (!reserve_iova(&domain->iovad, dma_to_mm_pfn(first_vpfn),
2093 dma_to_mm_pfn(last_vpfn))) {
2094 printk(KERN_ERR "IOMMU: reserve iova failed\n");
2098 pr_debug("Mapping reserved region %llx-%llx for domain %d\n",
2099 start, end, domain->id);
2101 * RMRR range might have overlap with physical memory range,
2104 dma_pte_clear_range(domain, first_vpfn, last_vpfn);
2106 return domain_pfn_mapping(domain, first_vpfn, first_vpfn,
2107 last_vpfn - first_vpfn + 1,
2108 DMA_PTE_READ|DMA_PTE_WRITE);
2111 static int iommu_prepare_identity_map(struct pci_dev *pdev,
2112 unsigned long long start,
2113 unsigned long long end)
2115 struct dmar_domain *domain;
2118 domain = get_domain_for_dev(pdev, DEFAULT_DOMAIN_ADDRESS_WIDTH);
2122 /* For _hardware_ passthrough, don't bother. But for software
2123 passthrough, we do it anyway -- it may indicate a memory
2124 range which is reserved in E820, so which didn't get set
2125 up to start with in si_domain */
2126 if (domain == si_domain && hw_pass_through) {
2127 printk("Ignoring identity map for HW passthrough device %s [0x%Lx - 0x%Lx]\n",
2128 pci_name(pdev), start, end);
2133 "IOMMU: Setting identity map for device %s [0x%Lx - 0x%Lx]\n",
2134 pci_name(pdev), start, end);
2137 WARN(1, "Your BIOS is broken; RMRR ends before it starts!\n"
2138 "BIOS vendor: %s; Ver: %s; Product Version: %s\n",
2139 dmi_get_system_info(DMI_BIOS_VENDOR),
2140 dmi_get_system_info(DMI_BIOS_VERSION),
2141 dmi_get_system_info(DMI_PRODUCT_VERSION));
2146 if (end >> agaw_to_width(domain->agaw)) {
2147 WARN(1, "Your BIOS is broken; RMRR exceeds permitted address width (%d bits)\n"
2148 "BIOS vendor: %s; Ver: %s; Product Version: %s\n",
2149 agaw_to_width(domain->agaw),
2150 dmi_get_system_info(DMI_BIOS_VENDOR),
2151 dmi_get_system_info(DMI_BIOS_VERSION),
2152 dmi_get_system_info(DMI_PRODUCT_VERSION));
2157 ret = iommu_domain_identity_map(domain, start, end);
2161 /* context entry init */
2162 ret = domain_context_mapping(domain, pdev, CONTEXT_TT_MULTI_LEVEL);
2169 domain_exit(domain);
2173 static inline int iommu_prepare_rmrr_dev(struct dmar_rmrr_unit *rmrr,
2174 struct pci_dev *pdev)
2176 if (pdev->dev.archdata.iommu == DUMMY_DEVICE_DOMAIN_INFO)
2178 return iommu_prepare_identity_map(pdev, rmrr->base_address,
2182 #ifdef CONFIG_INTEL_IOMMU_FLOPPY_WA
2183 static inline void iommu_prepare_isa(void)
2185 struct pci_dev *pdev;
2188 pdev = pci_get_class(PCI_CLASS_BRIDGE_ISA << 8, NULL);
2192 printk(KERN_INFO "IOMMU: Prepare 0-16MiB unity mapping for LPC\n");
2193 ret = iommu_prepare_identity_map(pdev, 0, 16*1024*1024 - 1);
2196 printk(KERN_ERR "IOMMU: Failed to create 0-16MiB identity map; "
2197 "floppy might not work\n");
2201 static inline void iommu_prepare_isa(void)
2205 #endif /* !CONFIG_INTEL_IOMMU_FLPY_WA */
2207 static int md_domain_init(struct dmar_domain *domain, int guest_width);
2209 static int __init si_domain_work_fn(unsigned long start_pfn,
2210 unsigned long end_pfn, void *datax)
2214 *ret = iommu_domain_identity_map(si_domain,
2215 (uint64_t)start_pfn << PAGE_SHIFT,
2216 (uint64_t)end_pfn << PAGE_SHIFT);
2221 static int __init si_domain_init(int hw)
2223 struct dmar_drhd_unit *drhd;
2224 struct intel_iommu *iommu;
2227 si_domain = alloc_domain();
2231 pr_debug("Identity mapping domain is domain %d\n", si_domain->id);
2233 for_each_active_iommu(iommu, drhd) {
2234 ret = iommu_attach_domain(si_domain, iommu);
2236 domain_exit(si_domain);
2241 if (md_domain_init(si_domain, DEFAULT_DOMAIN_ADDRESS_WIDTH)) {
2242 domain_exit(si_domain);
2246 si_domain->flags = DOMAIN_FLAG_STATIC_IDENTITY;
2251 for_each_online_node(nid) {
2252 work_with_active_regions(nid, si_domain_work_fn, &ret);
2260 static void domain_remove_one_dev_info(struct dmar_domain *domain,
2261 struct pci_dev *pdev);
2262 static int identity_mapping(struct pci_dev *pdev)
2264 struct device_domain_info *info;
2266 if (likely(!iommu_identity_mapping))
2269 info = pdev->dev.archdata.iommu;
2270 if (info && info != DUMMY_DEVICE_DOMAIN_INFO)
2271 return (info->domain == si_domain);
2276 static int domain_add_dev_info(struct dmar_domain *domain,
2277 struct pci_dev *pdev,
2280 struct device_domain_info *info;
2281 unsigned long flags;
2284 info = alloc_devinfo_mem();
2288 ret = domain_context_mapping(domain, pdev, translation);
2290 free_devinfo_mem(info);
2294 info->segment = pci_domain_nr(pdev->bus);
2295 info->bus = pdev->bus->number;
2296 info->devfn = pdev->devfn;
2298 info->domain = domain;
2300 spin_lock_irqsave(&device_domain_lock, flags);
2301 list_add(&info->link, &domain->devices);
2302 list_add(&info->global, &device_domain_list);
2303 pdev->dev.archdata.iommu = info;
2304 spin_unlock_irqrestore(&device_domain_lock, flags);
2309 static int iommu_should_identity_map(struct pci_dev *pdev, int startup)
2311 if ((iommu_identity_mapping & IDENTMAP_AZALIA) && IS_AZALIA(pdev))
2314 if ((iommu_identity_mapping & IDENTMAP_GFX) && IS_GFX_DEVICE(pdev))
2317 if (!(iommu_identity_mapping & IDENTMAP_ALL))
2321 * We want to start off with all devices in the 1:1 domain, and
2322 * take them out later if we find they can't access all of memory.
2324 * However, we can't do this for PCI devices behind bridges,
2325 * because all PCI devices behind the same bridge will end up
2326 * with the same source-id on their transactions.
2328 * Practically speaking, we can't change things around for these
2329 * devices at run-time, because we can't be sure there'll be no
2330 * DMA transactions in flight for any of their siblings.
2332 * So PCI devices (unless they're on the root bus) as well as
2333 * their parent PCI-PCI or PCIe-PCI bridges must be left _out_ of
2334 * the 1:1 domain, just in _case_ one of their siblings turns out
2335 * not to be able to map all of memory.
2337 if (!pci_is_pcie(pdev)) {
2338 if (!pci_is_root_bus(pdev->bus))
2340 if (pdev->class >> 8 == PCI_CLASS_BRIDGE_PCI)
2342 } else if (pdev->pcie_type == PCI_EXP_TYPE_PCI_BRIDGE)
2346 * At boot time, we don't yet know if devices will be 64-bit capable.
2347 * Assume that they will -- if they turn out not to be, then we can
2348 * take them out of the 1:1 domain later.
2352 * If the device's dma_mask is less than the system's memory
2353 * size then this is not a candidate for identity mapping.
2355 u64 dma_mask = pdev->dma_mask;
2357 if (pdev->dev.coherent_dma_mask &&
2358 pdev->dev.coherent_dma_mask < dma_mask)
2359 dma_mask = pdev->dev.coherent_dma_mask;
2361 return dma_mask >= dma_get_required_mask(&pdev->dev);
2367 static int __init iommu_prepare_static_identity_mapping(int hw)
2369 struct pci_dev *pdev = NULL;
2372 ret = si_domain_init(hw);
2376 for_each_pci_dev(pdev) {
2377 /* Skip Host/PCI Bridge devices */
2378 if (IS_BRIDGE_HOST_DEVICE(pdev))
2380 if (iommu_should_identity_map(pdev, 1)) {
2381 printk(KERN_INFO "IOMMU: %s identity mapping for device %s\n",
2382 hw ? "hardware" : "software", pci_name(pdev));
2384 ret = domain_add_dev_info(si_domain, pdev,
2385 hw ? CONTEXT_TT_PASS_THROUGH :
2386 CONTEXT_TT_MULTI_LEVEL);
2395 static int __init init_dmars(void)
2397 struct dmar_drhd_unit *drhd;
2398 struct dmar_rmrr_unit *rmrr;
2399 struct pci_dev *pdev;
2400 struct intel_iommu *iommu;
2406 * initialize and program root entry to not present
2409 for_each_drhd_unit(drhd) {
2412 * lock not needed as this is only incremented in the single
2413 * threaded kernel __init code path all other access are read
2418 g_iommus = kcalloc(g_num_of_iommus, sizeof(struct intel_iommu *),
2421 printk(KERN_ERR "Allocating global iommu array failed\n");
2426 deferred_flush = kzalloc(g_num_of_iommus *
2427 sizeof(struct deferred_flush_tables), GFP_KERNEL);
2428 if (!deferred_flush) {
2433 for_each_drhd_unit(drhd) {
2437 iommu = drhd->iommu;
2438 g_iommus[iommu->seq_id] = iommu;
2440 ret = iommu_init_domains(iommu);
2446 * we could share the same root & context tables
2447 * among all IOMMU's. Need to Split it later.
2449 ret = iommu_alloc_root_entry(iommu);
2451 printk(KERN_ERR "IOMMU: allocate root entry failed\n");
2454 if (!ecap_pass_through(iommu->ecap))
2455 hw_pass_through = 0;
2459 * Start from the sane iommu hardware state.
2461 for_each_drhd_unit(drhd) {
2465 iommu = drhd->iommu;
2468 * If the queued invalidation is already initialized by us
2469 * (for example, while enabling interrupt-remapping) then
2470 * we got the things already rolling from a sane state.
2476 * Clear any previous faults.
2478 dmar_fault(-1, iommu);
2480 * Disable queued invalidation if supported and already enabled
2481 * before OS handover.
2483 dmar_disable_qi(iommu);
2486 for_each_drhd_unit(drhd) {
2490 iommu = drhd->iommu;
2492 if (dmar_enable_qi(iommu)) {
2494 * Queued Invalidate not enabled, use Register Based
2497 iommu->flush.flush_context = __iommu_flush_context;
2498 iommu->flush.flush_iotlb = __iommu_flush_iotlb;
2499 printk(KERN_INFO "IOMMU %d 0x%Lx: using Register based "
2502 (unsigned long long)drhd->reg_base_addr);
2504 iommu->flush.flush_context = qi_flush_context;
2505 iommu->flush.flush_iotlb = qi_flush_iotlb;
2506 printk(KERN_INFO "IOMMU %d 0x%Lx: using Queued "
2509 (unsigned long long)drhd->reg_base_addr);
2513 if (iommu_pass_through)
2514 iommu_identity_mapping |= IDENTMAP_ALL;
2516 #ifdef CONFIG_INTEL_IOMMU_BROKEN_GFX_WA
2517 iommu_identity_mapping |= IDENTMAP_GFX;
2520 check_tylersburg_isoch();
2523 * If pass through is not set or not enabled, setup context entries for
2524 * identity mappings for rmrr, gfx, and isa and may fall back to static
2525 * identity mapping if iommu_identity_mapping is set.
2527 if (iommu_identity_mapping) {
2528 ret = iommu_prepare_static_identity_mapping(hw_pass_through);
2530 printk(KERN_CRIT "Failed to setup IOMMU pass-through\n");
2536 * for each dev attached to rmrr
2538 * locate drhd for dev, alloc domain for dev
2539 * allocate free domain
2540 * allocate page table entries for rmrr
2541 * if context not allocated for bus
2542 * allocate and init context
2543 * set present in root table for this bus
2544 * init context with domain, translation etc
2548 printk(KERN_INFO "IOMMU: Setting RMRR:\n");
2549 for_each_rmrr_units(rmrr) {
2550 for (i = 0; i < rmrr->devices_cnt; i++) {
2551 pdev = rmrr->devices[i];
2553 * some BIOS lists non-exist devices in DMAR
2558 ret = iommu_prepare_rmrr_dev(rmrr, pdev);
2561 "IOMMU: mapping reserved region failed\n");
2565 iommu_prepare_isa();
2570 * global invalidate context cache
2571 * global invalidate iotlb
2572 * enable translation
2574 for_each_drhd_unit(drhd) {
2575 if (drhd->ignored) {
2577 * we always have to disable PMRs or DMA may fail on
2581 iommu_disable_protect_mem_regions(drhd->iommu);
2584 iommu = drhd->iommu;
2586 iommu_flush_write_buffer(iommu);
2588 ret = dmar_set_interrupt(iommu);
2592 iommu_set_root_entry(iommu);
2594 iommu->flush.flush_context(iommu, 0, 0, 0, DMA_CCMD_GLOBAL_INVL);
2595 iommu->flush.flush_iotlb(iommu, 0, 0, 0, DMA_TLB_GLOBAL_FLUSH);
2597 ret = iommu_enable_translation(iommu);
2601 iommu_disable_protect_mem_regions(iommu);
2606 for_each_drhd_unit(drhd) {
2609 iommu = drhd->iommu;
2616 /* This takes a number of _MM_ pages, not VTD pages */
2617 static struct iova *intel_alloc_iova(struct device *dev,
2618 struct dmar_domain *domain,
2619 unsigned long nrpages, uint64_t dma_mask)
2621 struct pci_dev *pdev = to_pci_dev(dev);
2622 struct iova *iova = NULL;
2624 /* Restrict dma_mask to the width that the iommu can handle */
2625 dma_mask = min_t(uint64_t, DOMAIN_MAX_ADDR(domain->gaw), dma_mask);
2627 if (!dmar_forcedac && dma_mask > DMA_BIT_MASK(32)) {
2629 * First try to allocate an io virtual address in
2630 * DMA_BIT_MASK(32) and if that fails then try allocating
2633 iova = alloc_iova(&domain->iovad, nrpages,
2634 IOVA_PFN(DMA_BIT_MASK(32)), 1);
2638 iova = alloc_iova(&domain->iovad, nrpages, IOVA_PFN(dma_mask), 1);
2639 if (unlikely(!iova)) {
2640 printk(KERN_ERR "Allocating %ld-page iova for %s failed",
2641 nrpages, pci_name(pdev));
2648 static struct dmar_domain *__get_valid_domain_for_dev(struct pci_dev *pdev)
2650 struct dmar_domain *domain;
2653 domain = get_domain_for_dev(pdev,
2654 DEFAULT_DOMAIN_ADDRESS_WIDTH);
2657 "Allocating domain for %s failed", pci_name(pdev));
2661 /* make sure context mapping is ok */
2662 if (unlikely(!domain_context_mapped(pdev))) {
2663 ret = domain_context_mapping(domain, pdev,
2664 CONTEXT_TT_MULTI_LEVEL);
2667 "Domain context map for %s failed",
2676 static inline struct dmar_domain *get_valid_domain_for_dev(struct pci_dev *dev)
2678 struct device_domain_info *info;
2680 /* No lock here, assumes no domain exit in normal case */
2681 info = dev->dev.archdata.iommu;
2683 return info->domain;
2685 return __get_valid_domain_for_dev(dev);
2688 static int iommu_dummy(struct pci_dev *pdev)
2690 return pdev->dev.archdata.iommu == DUMMY_DEVICE_DOMAIN_INFO;
2693 /* Check if the pdev needs to go through non-identity map and unmap process.*/
2694 static int iommu_no_mapping(struct device *dev)
2696 struct pci_dev *pdev;
2699 if (unlikely(dev->bus != &pci_bus_type))
2702 pdev = to_pci_dev(dev);
2703 if (iommu_dummy(pdev))
2706 if (!iommu_identity_mapping)
2709 found = identity_mapping(pdev);
2711 if (iommu_should_identity_map(pdev, 0))
2715 * 32 bit DMA is removed from si_domain and fall back
2716 * to non-identity mapping.
2718 domain_remove_one_dev_info(si_domain, pdev);
2719 printk(KERN_INFO "32bit %s uses non-identity mapping\n",
2725 * In case of a detached 64 bit DMA device from vm, the device
2726 * is put into si_domain for identity mapping.
2728 if (iommu_should_identity_map(pdev, 0)) {
2730 ret = domain_add_dev_info(si_domain, pdev,
2732 CONTEXT_TT_PASS_THROUGH :
2733 CONTEXT_TT_MULTI_LEVEL);
2735 printk(KERN_INFO "64bit %s uses identity mapping\n",
2745 static dma_addr_t __intel_map_single(struct device *hwdev, phys_addr_t paddr,
2746 size_t size, int dir, u64 dma_mask)
2748 struct pci_dev *pdev = to_pci_dev(hwdev);
2749 struct dmar_domain *domain;
2750 phys_addr_t start_paddr;
2754 struct intel_iommu *iommu;
2755 unsigned long paddr_pfn = paddr >> PAGE_SHIFT;
2757 BUG_ON(dir == DMA_NONE);
2759 if (iommu_no_mapping(hwdev))
2762 domain = get_valid_domain_for_dev(pdev);
2766 iommu = domain_get_iommu(domain);
2767 size = aligned_nrpages(paddr, size);
2769 iova = intel_alloc_iova(hwdev, domain, dma_to_mm_pfn(size), dma_mask);
2774 * Check if DMAR supports zero-length reads on write only
2777 if (dir == DMA_TO_DEVICE || dir == DMA_BIDIRECTIONAL || \
2778 !cap_zlr(iommu->cap))
2779 prot |= DMA_PTE_READ;
2780 if (dir == DMA_FROM_DEVICE || dir == DMA_BIDIRECTIONAL)
2781 prot |= DMA_PTE_WRITE;
2783 * paddr - (paddr + size) might be partial page, we should map the whole
2784 * page. Note: if two part of one page are separately mapped, we
2785 * might have two guest_addr mapping to the same host paddr, but this
2786 * is not a big problem
2788 ret = domain_pfn_mapping(domain, mm_to_dma_pfn(iova->pfn_lo),
2789 mm_to_dma_pfn(paddr_pfn), size, prot);
2793 /* it's a non-present to present mapping. Only flush if caching mode */
2794 if (cap_caching_mode(iommu->cap))
2795 iommu_flush_iotlb_psi(iommu, domain->id, mm_to_dma_pfn(iova->pfn_lo), size, 1);
2797 iommu_flush_write_buffer(iommu);
2799 start_paddr = (phys_addr_t)iova->pfn_lo << PAGE_SHIFT;
2800 start_paddr += paddr & ~PAGE_MASK;
2805 __free_iova(&domain->iovad, iova);
2806 printk(KERN_ERR"Device %s request: %zx@%llx dir %d --- failed\n",
2807 pci_name(pdev), size, (unsigned long long)paddr, dir);
2811 static dma_addr_t intel_map_page(struct device *dev, struct page *page,
2812 unsigned long offset, size_t size,
2813 enum dma_data_direction dir,
2814 struct dma_attrs *attrs)
2816 return __intel_map_single(dev, page_to_phys(page) + offset, size,
2817 dir, to_pci_dev(dev)->dma_mask);
2820 static void flush_unmaps(void)
2826 /* just flush them all */
2827 for (i = 0; i < g_num_of_iommus; i++) {
2828 struct intel_iommu *iommu = g_iommus[i];
2832 if (!deferred_flush[i].next)
2835 /* In caching mode, global flushes turn emulation expensive */
2836 if (!cap_caching_mode(iommu->cap))
2837 iommu->flush.flush_iotlb(iommu, 0, 0, 0,
2838 DMA_TLB_GLOBAL_FLUSH);
2839 for (j = 0; j < deferred_flush[i].next; j++) {
2841 struct iova *iova = deferred_flush[i].iova[j];
2842 struct dmar_domain *domain = deferred_flush[i].domain[j];
2844 /* On real hardware multiple invalidations are expensive */
2845 if (cap_caching_mode(iommu->cap))
2846 iommu_flush_iotlb_psi(iommu, domain->id,
2847 iova->pfn_lo, iova->pfn_hi - iova->pfn_lo + 1, 0);
2849 mask = ilog2(mm_to_dma_pfn(iova->pfn_hi - iova->pfn_lo + 1));
2850 iommu_flush_dev_iotlb(deferred_flush[i].domain[j],
2851 (uint64_t)iova->pfn_lo << PAGE_SHIFT, mask);
2853 __free_iova(&deferred_flush[i].domain[j]->iovad, iova);
2855 deferred_flush[i].next = 0;
2861 static void flush_unmaps_timeout(unsigned long data)
2863 unsigned long flags;
2865 spin_lock_irqsave(&async_umap_flush_lock, flags);
2867 spin_unlock_irqrestore(&async_umap_flush_lock, flags);
2870 static void add_unmap(struct dmar_domain *dom, struct iova *iova)
2872 unsigned long flags;
2874 struct intel_iommu *iommu;
2876 spin_lock_irqsave(&async_umap_flush_lock, flags);
2877 if (list_size == HIGH_WATER_MARK)
2880 iommu = domain_get_iommu(dom);
2881 iommu_id = iommu->seq_id;
2883 next = deferred_flush[iommu_id].next;
2884 deferred_flush[iommu_id].domain[next] = dom;
2885 deferred_flush[iommu_id].iova[next] = iova;
2886 deferred_flush[iommu_id].next++;
2889 mod_timer(&unmap_timer, jiffies + msecs_to_jiffies(10));
2893 spin_unlock_irqrestore(&async_umap_flush_lock, flags);
2896 static void intel_unmap_page(struct device *dev, dma_addr_t dev_addr,
2897 size_t size, enum dma_data_direction dir,
2898 struct dma_attrs *attrs)
2900 struct pci_dev *pdev = to_pci_dev(dev);
2901 struct dmar_domain *domain;
2902 unsigned long start_pfn, last_pfn;
2904 struct intel_iommu *iommu;
2906 if (iommu_no_mapping(dev))
2909 domain = find_domain(pdev);
2912 iommu = domain_get_iommu(domain);
2914 iova = find_iova(&domain->iovad, IOVA_PFN(dev_addr));
2915 if (WARN_ONCE(!iova, "Driver unmaps unmatched page at PFN %llx\n",
2916 (unsigned long long)dev_addr))
2919 start_pfn = mm_to_dma_pfn(iova->pfn_lo);
2920 last_pfn = mm_to_dma_pfn(iova->pfn_hi + 1) - 1;
2922 pr_debug("Device %s unmapping: pfn %lx-%lx\n",
2923 pci_name(pdev), start_pfn, last_pfn);
2925 /* clear the whole page */
2926 dma_pte_clear_range(domain, start_pfn, last_pfn);
2928 /* free page tables */
2929 dma_pte_free_pagetable(domain, start_pfn, last_pfn);
2931 if (intel_iommu_strict) {
2932 iommu_flush_iotlb_psi(iommu, domain->id, start_pfn,
2933 last_pfn - start_pfn + 1, 0);
2935 __free_iova(&domain->iovad, iova);
2937 add_unmap(domain, iova);
2939 * queue up the release of the unmap to save the 1/6th of the
2940 * cpu used up by the iotlb flush operation...
2945 static void *intel_alloc_coherent(struct device *hwdev, size_t size,
2946 dma_addr_t *dma_handle, gfp_t flags)
2951 size = PAGE_ALIGN(size);
2952 order = get_order(size);
2954 if (!iommu_no_mapping(hwdev))
2955 flags &= ~(GFP_DMA | GFP_DMA32);
2956 else if (hwdev->coherent_dma_mask < dma_get_required_mask(hwdev)) {
2957 if (hwdev->coherent_dma_mask < DMA_BIT_MASK(32))
2963 vaddr = (void *)__get_free_pages(flags, order);
2966 memset(vaddr, 0, size);
2968 *dma_handle = __intel_map_single(hwdev, virt_to_bus(vaddr), size,
2970 hwdev->coherent_dma_mask);
2973 free_pages((unsigned long)vaddr, order);
2977 static void intel_free_coherent(struct device *hwdev, size_t size, void *vaddr,
2978 dma_addr_t dma_handle)
2982 size = PAGE_ALIGN(size);
2983 order = get_order(size);
2985 intel_unmap_page(hwdev, dma_handle, size, DMA_BIDIRECTIONAL, NULL);
2986 free_pages((unsigned long)vaddr, order);
2989 static void intel_unmap_sg(struct device *hwdev, struct scatterlist *sglist,
2990 int nelems, enum dma_data_direction dir,
2991 struct dma_attrs *attrs)
2993 struct pci_dev *pdev = to_pci_dev(hwdev);
2994 struct dmar_domain *domain;
2995 unsigned long start_pfn, last_pfn;
2997 struct intel_iommu *iommu;
2999 if (iommu_no_mapping(hwdev))
3002 domain = find_domain(pdev);
3005 iommu = domain_get_iommu(domain);
3007 iova = find_iova(&domain->iovad, IOVA_PFN(sglist[0].dma_address));
3008 if (WARN_ONCE(!iova, "Driver unmaps unmatched sglist at PFN %llx\n",
3009 (unsigned long long)sglist[0].dma_address))
3012 start_pfn = mm_to_dma_pfn(iova->pfn_lo);
3013 last_pfn = mm_to_dma_pfn(iova->pfn_hi + 1) - 1;
3015 /* clear the whole page */
3016 dma_pte_clear_range(domain, start_pfn, last_pfn);
3018 /* free page tables */
3019 dma_pte_free_pagetable(domain, start_pfn, last_pfn);
3021 if (intel_iommu_strict) {
3022 iommu_flush_iotlb_psi(iommu, domain->id, start_pfn,
3023 last_pfn - start_pfn + 1, 0);
3025 __free_iova(&domain->iovad, iova);
3027 add_unmap(domain, iova);
3029 * queue up the release of the unmap to save the 1/6th of the
3030 * cpu used up by the iotlb flush operation...
3035 static int intel_nontranslate_map_sg(struct device *hddev,
3036 struct scatterlist *sglist, int nelems, int dir)
3039 struct scatterlist *sg;
3041 for_each_sg(sglist, sg, nelems, i) {
3042 BUG_ON(!sg_page(sg));
3043 sg->dma_address = page_to_phys(sg_page(sg)) + sg->offset;
3044 sg->dma_length = sg->length;
3049 static int intel_map_sg(struct device *hwdev, struct scatterlist *sglist, int nelems,
3050 enum dma_data_direction dir, struct dma_attrs *attrs)
3053 struct pci_dev *pdev = to_pci_dev(hwdev);
3054 struct dmar_domain *domain;
3057 struct iova *iova = NULL;
3059 struct scatterlist *sg;
3060 unsigned long start_vpfn;
3061 struct intel_iommu *iommu;
3063 BUG_ON(dir == DMA_NONE);
3064 if (iommu_no_mapping(hwdev))
3065 return intel_nontranslate_map_sg(hwdev, sglist, nelems, dir);
3067 domain = get_valid_domain_for_dev(pdev);
3071 iommu = domain_get_iommu(domain);
3073 for_each_sg(sglist, sg, nelems, i)
3074 size += aligned_nrpages(sg->offset, sg->length);
3076 iova = intel_alloc_iova(hwdev, domain, dma_to_mm_pfn(size),
3079 sglist->dma_length = 0;
3084 * Check if DMAR supports zero-length reads on write only
3087 if (dir == DMA_TO_DEVICE || dir == DMA_BIDIRECTIONAL || \
3088 !cap_zlr(iommu->cap))
3089 prot |= DMA_PTE_READ;
3090 if (dir == DMA_FROM_DEVICE || dir == DMA_BIDIRECTIONAL)
3091 prot |= DMA_PTE_WRITE;
3093 start_vpfn = mm_to_dma_pfn(iova->pfn_lo);
3095 ret = domain_sg_mapping(domain, start_vpfn, sglist, size, prot);
3096 if (unlikely(ret)) {
3097 /* clear the page */
3098 dma_pte_clear_range(domain, start_vpfn,
3099 start_vpfn + size - 1);
3100 /* free page tables */
3101 dma_pte_free_pagetable(domain, start_vpfn,
3102 start_vpfn + size - 1);
3104 __free_iova(&domain->iovad, iova);
3108 /* it's a non-present to present mapping. Only flush if caching mode */
3109 if (cap_caching_mode(iommu->cap))
3110 iommu_flush_iotlb_psi(iommu, domain->id, start_vpfn, size, 1);
3112 iommu_flush_write_buffer(iommu);
3117 static int intel_mapping_error(struct device *dev, dma_addr_t dma_addr)
3122 struct dma_map_ops intel_dma_ops = {
3123 .alloc_coherent = intel_alloc_coherent,
3124 .free_coherent = intel_free_coherent,
3125 .map_sg = intel_map_sg,
3126 .unmap_sg = intel_unmap_sg,
3127 .map_page = intel_map_page,
3128 .unmap_page = intel_unmap_page,
3129 .mapping_error = intel_mapping_error,
3132 static inline int iommu_domain_cache_init(void)
3136 iommu_domain_cache = kmem_cache_create("iommu_domain",
3137 sizeof(struct dmar_domain),
3142 if (!iommu_domain_cache) {
3143 printk(KERN_ERR "Couldn't create iommu_domain cache\n");
3150 static inline int iommu_devinfo_cache_init(void)
3154 iommu_devinfo_cache = kmem_cache_create("iommu_devinfo",
3155 sizeof(struct device_domain_info),
3159 if (!iommu_devinfo_cache) {
3160 printk(KERN_ERR "Couldn't create devinfo cache\n");
3167 static inline int iommu_iova_cache_init(void)
3171 iommu_iova_cache = kmem_cache_create("iommu_iova",
3172 sizeof(struct iova),
3176 if (!iommu_iova_cache) {
3177 printk(KERN_ERR "Couldn't create iova cache\n");
3184 static int __init iommu_init_mempool(void)
3187 ret = iommu_iova_cache_init();
3191 ret = iommu_domain_cache_init();
3195 ret = iommu_devinfo_cache_init();
3199 kmem_cache_destroy(iommu_domain_cache);
3201 kmem_cache_destroy(iommu_iova_cache);
3206 static void __init iommu_exit_mempool(void)
3208 kmem_cache_destroy(iommu_devinfo_cache);
3209 kmem_cache_destroy(iommu_domain_cache);
3210 kmem_cache_destroy(iommu_iova_cache);
3214 static void quirk_ioat_snb_local_iommu(struct pci_dev *pdev)
3216 struct dmar_drhd_unit *drhd;
3220 /* We know that this device on this chipset has its own IOMMU.
3221 * If we find it under a different IOMMU, then the BIOS is lying
3222 * to us. Hope that the IOMMU for this device is actually
3223 * disabled, and it needs no translation...
3225 rc = pci_bus_read_config_dword(pdev->bus, PCI_DEVFN(0, 0), 0xb0, &vtbar);
3227 /* "can't" happen */
3228 dev_info(&pdev->dev, "failed to run vt-d quirk\n");
3231 vtbar &= 0xffff0000;
3233 /* we know that the this iommu should be at offset 0xa000 from vtbar */
3234 drhd = dmar_find_matched_drhd_unit(pdev);
3235 if (WARN_TAINT_ONCE(!drhd || drhd->reg_base_addr - vtbar != 0xa000,
3236 TAINT_FIRMWARE_WORKAROUND,
3237 "BIOS assigned incorrect VT-d unit for Intel(R) QuickData Technology device\n"))
3238 pdev->dev.archdata.iommu = DUMMY_DEVICE_DOMAIN_INFO;
3240 DECLARE_PCI_FIXUP_ENABLE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_IOAT_SNB, quirk_ioat_snb_local_iommu);
3242 static void __init init_no_remapping_devices(void)
3244 struct dmar_drhd_unit *drhd;
3246 for_each_drhd_unit(drhd) {
3247 if (!drhd->include_all) {
3249 for (i = 0; i < drhd->devices_cnt; i++)
3250 if (drhd->devices[i] != NULL)
3252 /* ignore DMAR unit if no pci devices exist */
3253 if (i == drhd->devices_cnt)
3258 for_each_drhd_unit(drhd) {
3260 if (drhd->ignored || drhd->include_all)
3263 for (i = 0; i < drhd->devices_cnt; i++)
3264 if (drhd->devices[i] &&
3265 !IS_GFX_DEVICE(drhd->devices[i]))
3268 if (i < drhd->devices_cnt)
3271 /* This IOMMU has *only* gfx devices. Either bypass it or
3272 set the gfx_mapped flag, as appropriate */
3274 intel_iommu_gfx_mapped = 1;
3277 for (i = 0; i < drhd->devices_cnt; i++) {
3278 if (!drhd->devices[i])
3280 drhd->devices[i]->dev.archdata.iommu = DUMMY_DEVICE_DOMAIN_INFO;
3286 #ifdef CONFIG_SUSPEND
3287 static int init_iommu_hw(void)
3289 struct dmar_drhd_unit *drhd;
3290 struct intel_iommu *iommu = NULL;
3292 for_each_active_iommu(iommu, drhd)
3294 dmar_reenable_qi(iommu);
3296 for_each_iommu(iommu, drhd) {
3297 if (drhd->ignored) {
3299 * we always have to disable PMRs or DMA may fail on
3303 iommu_disable_protect_mem_regions(iommu);
3307 iommu_flush_write_buffer(iommu);
3309 iommu_set_root_entry(iommu);
3311 iommu->flush.flush_context(iommu, 0, 0, 0,
3312 DMA_CCMD_GLOBAL_INVL);
3313 iommu->flush.flush_iotlb(iommu, 0, 0, 0,
3314 DMA_TLB_GLOBAL_FLUSH);
3315 if (iommu_enable_translation(iommu))
3317 iommu_disable_protect_mem_regions(iommu);
3323 static void iommu_flush_all(void)
3325 struct dmar_drhd_unit *drhd;
3326 struct intel_iommu *iommu;
3328 for_each_active_iommu(iommu, drhd) {
3329 iommu->flush.flush_context(iommu, 0, 0, 0,
3330 DMA_CCMD_GLOBAL_INVL);
3331 iommu->flush.flush_iotlb(iommu, 0, 0, 0,
3332 DMA_TLB_GLOBAL_FLUSH);
3336 static int iommu_suspend(void)
3338 struct dmar_drhd_unit *drhd;
3339 struct intel_iommu *iommu = NULL;
3342 for_each_active_iommu(iommu, drhd) {
3343 iommu->iommu_state = kzalloc(sizeof(u32) * MAX_SR_DMAR_REGS,
3345 if (!iommu->iommu_state)
3351 for_each_active_iommu(iommu, drhd) {
3352 iommu_disable_translation(iommu);
3354 raw_spin_lock_irqsave(&iommu->register_lock, flag);
3356 iommu->iommu_state[SR_DMAR_FECTL_REG] =
3357 readl(iommu->reg + DMAR_FECTL_REG);
3358 iommu->iommu_state[SR_DMAR_FEDATA_REG] =
3359 readl(iommu->reg + DMAR_FEDATA_REG);
3360 iommu->iommu_state[SR_DMAR_FEADDR_REG] =
3361 readl(iommu->reg + DMAR_FEADDR_REG);
3362 iommu->iommu_state[SR_DMAR_FEUADDR_REG] =
3363 readl(iommu->reg + DMAR_FEUADDR_REG);
3365 raw_spin_unlock_irqrestore(&iommu->register_lock, flag);
3370 for_each_active_iommu(iommu, drhd)
3371 kfree(iommu->iommu_state);
3376 static void iommu_resume(void)
3378 struct dmar_drhd_unit *drhd;
3379 struct intel_iommu *iommu = NULL;
3382 if (init_iommu_hw()) {
3384 panic("tboot: IOMMU setup failed, DMAR can not resume!\n");
3386 WARN(1, "IOMMU setup failed, DMAR can not resume!\n");
3390 for_each_active_iommu(iommu, drhd) {
3392 raw_spin_lock_irqsave(&iommu->register_lock, flag);
3394 writel(iommu->iommu_state[SR_DMAR_FECTL_REG],
3395 iommu->reg + DMAR_FECTL_REG);
3396 writel(iommu->iommu_state[SR_DMAR_FEDATA_REG],
3397 iommu->reg + DMAR_FEDATA_REG);
3398 writel(iommu->iommu_state[SR_DMAR_FEADDR_REG],
3399 iommu->reg + DMAR_FEADDR_REG);
3400 writel(iommu->iommu_state[SR_DMAR_FEUADDR_REG],
3401 iommu->reg + DMAR_FEUADDR_REG);
3403 raw_spin_unlock_irqrestore(&iommu->register_lock, flag);
3406 for_each_active_iommu(iommu, drhd)
3407 kfree(iommu->iommu_state);
3410 static struct syscore_ops iommu_syscore_ops = {
3411 .resume = iommu_resume,
3412 .suspend = iommu_suspend,
3415 static void __init init_iommu_pm_ops(void)
3417 register_syscore_ops(&iommu_syscore_ops);
3421 static inline void init_iommu_pm_ops(void) {}
3422 #endif /* CONFIG_PM */
3424 LIST_HEAD(dmar_rmrr_units);
3426 static void __init dmar_register_rmrr_unit(struct dmar_rmrr_unit *rmrr)
3428 list_add(&rmrr->list, &dmar_rmrr_units);
3432 int __init dmar_parse_one_rmrr(struct acpi_dmar_header *header)
3434 struct acpi_dmar_reserved_memory *rmrr;
3435 struct dmar_rmrr_unit *rmrru;
3437 rmrru = kzalloc(sizeof(*rmrru), GFP_KERNEL);
3441 rmrru->hdr = header;
3442 rmrr = (struct acpi_dmar_reserved_memory *)header;
3443 rmrru->base_address = rmrr->base_address;
3444 rmrru->end_address = rmrr->end_address;
3446 dmar_register_rmrr_unit(rmrru);
3451 rmrr_parse_dev(struct dmar_rmrr_unit *rmrru)
3453 struct acpi_dmar_reserved_memory *rmrr;
3456 rmrr = (struct acpi_dmar_reserved_memory *) rmrru->hdr;
3457 ret = dmar_parse_dev_scope((void *)(rmrr + 1),
3458 ((void *)rmrr) + rmrr->header.length,
3459 &rmrru->devices_cnt, &rmrru->devices, rmrr->segment);
3461 if (ret || (rmrru->devices_cnt == 0)) {
3462 list_del(&rmrru->list);
3468 static LIST_HEAD(dmar_atsr_units);
3470 int __init dmar_parse_one_atsr(struct acpi_dmar_header *hdr)
3472 struct acpi_dmar_atsr *atsr;
3473 struct dmar_atsr_unit *atsru;
3475 atsr = container_of(hdr, struct acpi_dmar_atsr, header);
3476 atsru = kzalloc(sizeof(*atsru), GFP_KERNEL);
3481 atsru->include_all = atsr->flags & 0x1;
3483 list_add(&atsru->list, &dmar_atsr_units);
3488 static int __init atsr_parse_dev(struct dmar_atsr_unit *atsru)
3491 struct acpi_dmar_atsr *atsr;
3493 if (atsru->include_all)
3496 atsr = container_of(atsru->hdr, struct acpi_dmar_atsr, header);
3497 rc = dmar_parse_dev_scope((void *)(atsr + 1),
3498 (void *)atsr + atsr->header.length,
3499 &atsru->devices_cnt, &atsru->devices,
3501 if (rc || !atsru->devices_cnt) {
3502 list_del(&atsru->list);
3509 int dmar_find_matched_atsr_unit(struct pci_dev *dev)
3512 struct pci_bus *bus;
3513 struct acpi_dmar_atsr *atsr;
3514 struct dmar_atsr_unit *atsru;
3516 dev = pci_physfn(dev);
3518 list_for_each_entry(atsru, &dmar_atsr_units, list) {
3519 atsr = container_of(atsru->hdr, struct acpi_dmar_atsr, header);
3520 if (atsr->segment == pci_domain_nr(dev->bus))
3527 for (bus = dev->bus; bus; bus = bus->parent) {
3528 struct pci_dev *bridge = bus->self;
3530 if (!bridge || !pci_is_pcie(bridge) ||
3531 bridge->pcie_type == PCI_EXP_TYPE_PCI_BRIDGE)
3534 if (bridge->pcie_type == PCI_EXP_TYPE_ROOT_PORT) {
3535 for (i = 0; i < atsru->devices_cnt; i++)
3536 if (atsru->devices[i] == bridge)
3542 if (atsru->include_all)
3548 int __init dmar_parse_rmrr_atsr_dev(void)
3550 struct dmar_rmrr_unit *rmrr, *rmrr_n;
3551 struct dmar_atsr_unit *atsr, *atsr_n;
3554 list_for_each_entry_safe(rmrr, rmrr_n, &dmar_rmrr_units, list) {
3555 ret = rmrr_parse_dev(rmrr);
3560 list_for_each_entry_safe(atsr, atsr_n, &dmar_atsr_units, list) {
3561 ret = atsr_parse_dev(atsr);
3570 * Here we only respond to action of unbound device from driver.
3572 * Added device is not attached to its DMAR domain here yet. That will happen
3573 * when mapping the device to iova.
3575 static int device_notifier(struct notifier_block *nb,
3576 unsigned long action, void *data)
3578 struct device *dev = data;
3579 struct pci_dev *pdev = to_pci_dev(dev);
3580 struct dmar_domain *domain;
3582 if (iommu_no_mapping(dev))
3585 domain = find_domain(pdev);
3589 if (action == BUS_NOTIFY_UNBOUND_DRIVER && !iommu_pass_through) {
3590 domain_remove_one_dev_info(domain, pdev);
3592 if (!(domain->flags & DOMAIN_FLAG_VIRTUAL_MACHINE) &&
3593 !(domain->flags & DOMAIN_FLAG_STATIC_IDENTITY) &&
3594 list_empty(&domain->devices))
3595 domain_exit(domain);
3601 static struct notifier_block device_nb = {
3602 .notifier_call = device_notifier,
3605 int __init intel_iommu_init(void)
3609 /* VT-d is required for a TXT/tboot launch, so enforce that */
3610 force_on = tboot_force_iommu();
3612 if (dmar_table_init()) {
3614 panic("tboot: Failed to initialize DMAR table\n");
3618 if (dmar_dev_scope_init() < 0) {
3620 panic("tboot: Failed to initialize DMAR device scope\n");
3624 if (no_iommu || dmar_disabled)
3627 if (iommu_init_mempool()) {
3629 panic("tboot: Failed to initialize iommu memory\n");
3633 if (list_empty(&dmar_rmrr_units))
3634 printk(KERN_INFO "DMAR: No RMRR found\n");
3636 if (list_empty(&dmar_atsr_units))
3637 printk(KERN_INFO "DMAR: No ATSR found\n");
3639 if (dmar_init_reserved_ranges()) {
3641 panic("tboot: Failed to reserve iommu ranges\n");
3645 init_no_remapping_devices();
3650 panic("tboot: Failed to initialize DMARs\n");
3651 printk(KERN_ERR "IOMMU: dmar init failed\n");
3652 put_iova_domain(&reserved_iova_list);
3653 iommu_exit_mempool();
3657 "PCI-DMA: Intel(R) Virtualization Technology for Directed I/O\n");
3659 init_timer(&unmap_timer);
3660 #ifdef CONFIG_SWIOTLB
3663 dma_ops = &intel_dma_ops;
3665 init_iommu_pm_ops();
3667 bus_set_iommu(&pci_bus_type, &intel_iommu_ops);
3669 bus_register_notifier(&pci_bus_type, &device_nb);
3671 intel_iommu_enabled = 1;
3676 static void iommu_detach_dependent_devices(struct intel_iommu *iommu,
3677 struct pci_dev *pdev)
3679 struct pci_dev *tmp, *parent;
3681 if (!iommu || !pdev)
3684 /* dependent device detach */
3685 tmp = pci_find_upstream_pcie_bridge(pdev);
3686 /* Secondary interface's bus number and devfn 0 */
3688 parent = pdev->bus->self;
3689 while (parent != tmp) {
3690 iommu_detach_dev(iommu, parent->bus->number,
3692 parent = parent->bus->self;
3694 if (pci_is_pcie(tmp)) /* this is a PCIe-to-PCI bridge */
3695 iommu_detach_dev(iommu,
3696 tmp->subordinate->number, 0);
3697 else /* this is a legacy PCI bridge */
3698 iommu_detach_dev(iommu, tmp->bus->number,
3703 static void domain_remove_one_dev_info(struct dmar_domain *domain,
3704 struct pci_dev *pdev)
3706 struct device_domain_info *info;
3707 struct intel_iommu *iommu;
3708 unsigned long flags;
3710 struct list_head *entry, *tmp;
3712 iommu = device_to_iommu(pci_domain_nr(pdev->bus), pdev->bus->number,
3717 spin_lock_irqsave(&device_domain_lock, flags);
3718 list_for_each_safe(entry, tmp, &domain->devices) {
3719 info = list_entry(entry, struct device_domain_info, link);
3720 if (info->segment == pci_domain_nr(pdev->bus) &&
3721 info->bus == pdev->bus->number &&
3722 info->devfn == pdev->devfn) {
3723 list_del(&info->link);
3724 list_del(&info->global);
3726 info->dev->dev.archdata.iommu = NULL;
3727 spin_unlock_irqrestore(&device_domain_lock, flags);
3729 iommu_disable_dev_iotlb(info);
3730 iommu_detach_dev(iommu, info->bus, info->devfn);
3731 iommu_detach_dependent_devices(iommu, pdev);
3732 free_devinfo_mem(info);
3734 spin_lock_irqsave(&device_domain_lock, flags);
3742 /* if there is no other devices under the same iommu
3743 * owned by this domain, clear this iommu in iommu_bmp
3744 * update iommu count and coherency
3746 if (iommu == device_to_iommu(info->segment, info->bus,
3751 spin_unlock_irqrestore(&device_domain_lock, flags);
3754 unsigned long tmp_flags;
3755 spin_lock_irqsave(&domain->iommu_lock, tmp_flags);
3756 clear_bit(iommu->seq_id, &domain->iommu_bmp);
3757 domain->iommu_count--;
3758 domain_update_iommu_cap(domain);
3759 spin_unlock_irqrestore(&domain->iommu_lock, tmp_flags);
3761 if (!(domain->flags & DOMAIN_FLAG_VIRTUAL_MACHINE) &&
3762 !(domain->flags & DOMAIN_FLAG_STATIC_IDENTITY)) {
3763 spin_lock_irqsave(&iommu->lock, tmp_flags);
3764 clear_bit(domain->id, iommu->domain_ids);
3765 iommu->domains[domain->id] = NULL;
3766 spin_unlock_irqrestore(&iommu->lock, tmp_flags);
3771 static void vm_domain_remove_all_dev_info(struct dmar_domain *domain)
3773 struct device_domain_info *info;
3774 struct intel_iommu *iommu;
3775 unsigned long flags1, flags2;
3777 spin_lock_irqsave(&device_domain_lock, flags1);
3778 while (!list_empty(&domain->devices)) {
3779 info = list_entry(domain->devices.next,
3780 struct device_domain_info, link);
3781 list_del(&info->link);
3782 list_del(&info->global);
3784 info->dev->dev.archdata.iommu = NULL;
3786 spin_unlock_irqrestore(&device_domain_lock, flags1);
3788 iommu_disable_dev_iotlb(info);
3789 iommu = device_to_iommu(info->segment, info->bus, info->devfn);
3790 iommu_detach_dev(iommu, info->bus, info->devfn);
3791 iommu_detach_dependent_devices(iommu, info->dev);
3793 /* clear this iommu in iommu_bmp, update iommu count
3796 spin_lock_irqsave(&domain->iommu_lock, flags2);
3797 if (test_and_clear_bit(iommu->seq_id,
3798 &domain->iommu_bmp)) {
3799 domain->iommu_count--;
3800 domain_update_iommu_cap(domain);
3802 spin_unlock_irqrestore(&domain->iommu_lock, flags2);
3804 free_devinfo_mem(info);
3805 spin_lock_irqsave(&device_domain_lock, flags1);
3807 spin_unlock_irqrestore(&device_domain_lock, flags1);
3810 /* domain id for virtual machine, it won't be set in context */
3811 static unsigned long vm_domid;
3813 static struct dmar_domain *iommu_alloc_vm_domain(void)
3815 struct dmar_domain *domain;
3817 domain = alloc_domain_mem();
3821 domain->id = vm_domid++;
3823 memset(&domain->iommu_bmp, 0, sizeof(unsigned long));
3824 domain->flags = DOMAIN_FLAG_VIRTUAL_MACHINE;
3829 static int md_domain_init(struct dmar_domain *domain, int guest_width)
3833 init_iova_domain(&domain->iovad, DMA_32BIT_PFN);
3834 spin_lock_init(&domain->iommu_lock);
3836 domain_reserve_special_ranges(domain);
3838 /* calculate AGAW */
3839 domain->gaw = guest_width;
3840 adjust_width = guestwidth_to_adjustwidth(guest_width);
3841 domain->agaw = width_to_agaw(adjust_width);
3843 INIT_LIST_HEAD(&domain->devices);
3845 domain->iommu_count = 0;
3846 domain->iommu_coherency = 0;
3847 domain->iommu_snooping = 0;
3848 domain->iommu_superpage = 0;
3849 domain->max_addr = 0;
3852 /* always allocate the top pgd */
3853 domain->pgd = (struct dma_pte *)alloc_pgtable_page(domain->nid);
3856 domain_flush_cache(domain, domain->pgd, PAGE_SIZE);
3860 static void iommu_free_vm_domain(struct dmar_domain *domain)
3862 unsigned long flags;
3863 struct dmar_drhd_unit *drhd;
3864 struct intel_iommu *iommu;
3866 unsigned long ndomains;
3868 for_each_drhd_unit(drhd) {
3871 iommu = drhd->iommu;
3873 ndomains = cap_ndoms(iommu->cap);
3874 for_each_set_bit(i, iommu->domain_ids, ndomains) {
3875 if (iommu->domains[i] == domain) {
3876 spin_lock_irqsave(&iommu->lock, flags);
3877 clear_bit(i, iommu->domain_ids);
3878 iommu->domains[i] = NULL;
3879 spin_unlock_irqrestore(&iommu->lock, flags);
3886 static void vm_domain_exit(struct dmar_domain *domain)
3888 /* Domain 0 is reserved, so dont process it */
3892 vm_domain_remove_all_dev_info(domain);
3894 put_iova_domain(&domain->iovad);
3897 dma_pte_clear_range(domain, 0, DOMAIN_MAX_PFN(domain->gaw));
3899 /* free page tables */
3900 dma_pte_free_pagetable(domain, 0, DOMAIN_MAX_PFN(domain->gaw));
3902 iommu_free_vm_domain(domain);
3903 free_domain_mem(domain);
3906 static int intel_iommu_domain_init(struct iommu_domain *domain)
3908 struct dmar_domain *dmar_domain;
3910 dmar_domain = iommu_alloc_vm_domain();
3913 "intel_iommu_domain_init: dmar_domain == NULL\n");
3916 if (md_domain_init(dmar_domain, DEFAULT_DOMAIN_ADDRESS_WIDTH)) {
3918 "intel_iommu_domain_init() failed\n");
3919 vm_domain_exit(dmar_domain);
3922 domain_update_iommu_cap(dmar_domain);
3923 domain->priv = dmar_domain;
3928 static void intel_iommu_domain_destroy(struct iommu_domain *domain)
3930 struct dmar_domain *dmar_domain = domain->priv;
3932 domain->priv = NULL;
3933 vm_domain_exit(dmar_domain);
3936 static int intel_iommu_attach_device(struct iommu_domain *domain,
3939 struct dmar_domain *dmar_domain = domain->priv;
3940 struct pci_dev *pdev = to_pci_dev(dev);
3941 struct intel_iommu *iommu;
3944 /* normally pdev is not mapped */
3945 if (unlikely(domain_context_mapped(pdev))) {
3946 struct dmar_domain *old_domain;
3948 old_domain = find_domain(pdev);
3950 if (dmar_domain->flags & DOMAIN_FLAG_VIRTUAL_MACHINE ||
3951 dmar_domain->flags & DOMAIN_FLAG_STATIC_IDENTITY)
3952 domain_remove_one_dev_info(old_domain, pdev);
3954 domain_remove_dev_info(old_domain);
3958 iommu = device_to_iommu(pci_domain_nr(pdev->bus), pdev->bus->number,
3963 /* check if this iommu agaw is sufficient for max mapped address */
3964 addr_width = agaw_to_width(iommu->agaw);
3965 if (addr_width > cap_mgaw(iommu->cap))
3966 addr_width = cap_mgaw(iommu->cap);
3968 if (dmar_domain->max_addr > (1LL << addr_width)) {
3969 printk(KERN_ERR "%s: iommu width (%d) is not "
3970 "sufficient for the mapped address (%llx)\n",
3971 __func__, addr_width, dmar_domain->max_addr);
3974 dmar_domain->gaw = addr_width;
3977 * Knock out extra levels of page tables if necessary
3979 while (iommu->agaw < dmar_domain->agaw) {
3980 struct dma_pte *pte;
3982 pte = dmar_domain->pgd;
3983 if (dma_pte_present(pte)) {
3984 dmar_domain->pgd = (struct dma_pte *)
3985 phys_to_virt(dma_pte_addr(pte));
3986 free_pgtable_page(pte);
3988 dmar_domain->agaw--;
3991 return domain_add_dev_info(dmar_domain, pdev, CONTEXT_TT_MULTI_LEVEL);
3994 static void intel_iommu_detach_device(struct iommu_domain *domain,
3997 struct dmar_domain *dmar_domain = domain->priv;
3998 struct pci_dev *pdev = to_pci_dev(dev);
4000 domain_remove_one_dev_info(dmar_domain, pdev);
4003 static int intel_iommu_map(struct iommu_domain *domain,
4004 unsigned long iova, phys_addr_t hpa,
4005 size_t size, int iommu_prot)
4007 struct dmar_domain *dmar_domain = domain->priv;
4012 if (iommu_prot & IOMMU_READ)
4013 prot |= DMA_PTE_READ;
4014 if (iommu_prot & IOMMU_WRITE)
4015 prot |= DMA_PTE_WRITE;
4016 if ((iommu_prot & IOMMU_CACHE) && dmar_domain->iommu_snooping)
4017 prot |= DMA_PTE_SNP;
4019 max_addr = iova + size;
4020 if (dmar_domain->max_addr < max_addr) {
4023 /* check if minimum agaw is sufficient for mapped address */
4024 end = __DOMAIN_MAX_ADDR(dmar_domain->gaw) + 1;
4025 if (end < max_addr) {
4026 printk(KERN_ERR "%s: iommu width (%d) is not "
4027 "sufficient for the mapped address (%llx)\n",
4028 __func__, dmar_domain->gaw, max_addr);
4031 dmar_domain->max_addr = max_addr;
4033 /* Round up size to next multiple of PAGE_SIZE, if it and
4034 the low bits of hpa would take us onto the next page */
4035 size = aligned_nrpages(hpa, size);
4036 ret = domain_pfn_mapping(dmar_domain, iova >> VTD_PAGE_SHIFT,
4037 hpa >> VTD_PAGE_SHIFT, size, prot);
4041 static size_t intel_iommu_unmap(struct iommu_domain *domain,
4042 unsigned long iova, size_t size)
4044 struct dmar_domain *dmar_domain = domain->priv;
4047 order = dma_pte_clear_range(dmar_domain, iova >> VTD_PAGE_SHIFT,
4048 (iova + size - 1) >> VTD_PAGE_SHIFT);
4050 if (dmar_domain->max_addr == iova + size)
4051 dmar_domain->max_addr = iova;
4053 return PAGE_SIZE << order;
4056 static phys_addr_t intel_iommu_iova_to_phys(struct iommu_domain *domain,
4059 struct dmar_domain *dmar_domain = domain->priv;
4060 struct dma_pte *pte;
4063 pte = pfn_to_dma_pte(dmar_domain, iova >> VTD_PAGE_SHIFT, 0);
4065 phys = dma_pte_addr(pte);
4070 static int intel_iommu_domain_has_cap(struct iommu_domain *domain,
4073 struct dmar_domain *dmar_domain = domain->priv;
4075 if (cap == IOMMU_CAP_CACHE_COHERENCY)
4076 return dmar_domain->iommu_snooping;
4077 if (cap == IOMMU_CAP_INTR_REMAP)
4078 return intr_remapping_enabled;
4083 static struct iommu_ops intel_iommu_ops = {
4084 .domain_init = intel_iommu_domain_init,
4085 .domain_destroy = intel_iommu_domain_destroy,
4086 .attach_dev = intel_iommu_attach_device,
4087 .detach_dev = intel_iommu_detach_device,
4088 .map = intel_iommu_map,
4089 .unmap = intel_iommu_unmap,
4090 .iova_to_phys = intel_iommu_iova_to_phys,
4091 .domain_has_cap = intel_iommu_domain_has_cap,
4092 .pgsize_bitmap = INTEL_IOMMU_PGSIZES,
4095 static void __devinit quirk_iommu_rwbf(struct pci_dev *dev)
4098 * Mobile 4 Series Chipset neglects to set RWBF capability,
4101 printk(KERN_INFO "DMAR: Forcing write-buffer flush capability\n");
4104 /* https://bugzilla.redhat.com/show_bug.cgi?id=538163 */
4105 if (dev->revision == 0x07) {
4106 printk(KERN_INFO "DMAR: Disabling IOMMU for graphics on this chipset\n");
4111 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2a40, quirk_iommu_rwbf);
4114 #define GGC_MEMORY_SIZE_MASK (0xf << 8)
4115 #define GGC_MEMORY_SIZE_NONE (0x0 << 8)
4116 #define GGC_MEMORY_SIZE_1M (0x1 << 8)
4117 #define GGC_MEMORY_SIZE_2M (0x3 << 8)
4118 #define GGC_MEMORY_VT_ENABLED (0x8 << 8)
4119 #define GGC_MEMORY_SIZE_2M_VT (0x9 << 8)
4120 #define GGC_MEMORY_SIZE_3M_VT (0xa << 8)
4121 #define GGC_MEMORY_SIZE_4M_VT (0xb << 8)
4123 static void __devinit quirk_calpella_no_shadow_gtt(struct pci_dev *dev)
4127 if (pci_read_config_word(dev, GGC, &ggc))
4130 if (!(ggc & GGC_MEMORY_VT_ENABLED)) {
4131 printk(KERN_INFO "DMAR: BIOS has allocated no shadow GTT; disabling IOMMU for graphics\n");
4133 } else if (dmar_map_gfx) {
4134 /* we have to ensure the gfx device is idle before we flush */
4135 printk(KERN_INFO "DMAR: Disabling batched IOTLB flush on Ironlake\n");
4136 intel_iommu_strict = 1;
4139 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x0040, quirk_calpella_no_shadow_gtt);
4140 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x0044, quirk_calpella_no_shadow_gtt);
4141 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x0062, quirk_calpella_no_shadow_gtt);
4142 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x006a, quirk_calpella_no_shadow_gtt);
4144 /* On Tylersburg chipsets, some BIOSes have been known to enable the
4145 ISOCH DMAR unit for the Azalia sound device, but not give it any
4146 TLB entries, which causes it to deadlock. Check for that. We do
4147 this in a function called from init_dmars(), instead of in a PCI
4148 quirk, because we don't want to print the obnoxious "BIOS broken"
4149 message if VT-d is actually disabled.
4151 static void __init check_tylersburg_isoch(void)
4153 struct pci_dev *pdev;
4154 uint32_t vtisochctrl;
4156 /* If there's no Azalia in the system anyway, forget it. */
4157 pdev = pci_get_device(PCI_VENDOR_ID_INTEL, 0x3a3e, NULL);
4162 /* System Management Registers. Might be hidden, in which case
4163 we can't do the sanity check. But that's OK, because the
4164 known-broken BIOSes _don't_ actually hide it, so far. */
4165 pdev = pci_get_device(PCI_VENDOR_ID_INTEL, 0x342e, NULL);
4169 if (pci_read_config_dword(pdev, 0x188, &vtisochctrl)) {
4176 /* If Azalia DMA is routed to the non-isoch DMAR unit, fine. */
4177 if (vtisochctrl & 1)
4180 /* Drop all bits other than the number of TLB entries */
4181 vtisochctrl &= 0x1c;
4183 /* If we have the recommended number of TLB entries (16), fine. */
4184 if (vtisochctrl == 0x10)
4187 /* Zero TLB entries? You get to ride the short bus to school. */
4189 WARN(1, "Your BIOS is broken; DMA routed to ISOCH DMAR unit but no TLB space.\n"
4190 "BIOS vendor: %s; Ver: %s; Product Version: %s\n",
4191 dmi_get_system_info(DMI_BIOS_VENDOR),
4192 dmi_get_system_info(DMI_BIOS_VERSION),
4193 dmi_get_system_info(DMI_PRODUCT_VERSION));
4194 iommu_identity_mapping |= IDENTMAP_AZALIA;
4198 printk(KERN_WARNING "DMAR: Recommended TLB entries for ISOCH unit is 16; your BIOS set %d\n",