2 * Copyright (c) 2006, Intel Corporation.
4 * This program is free software; you can redistribute it and/or modify it
5 * under the terms and conditions of the GNU General Public License,
6 * version 2, as published by the Free Software Foundation.
8 * This program is distributed in the hope it will be useful, but WITHOUT
9 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
10 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
13 * You should have received a copy of the GNU General Public License along with
14 * this program; if not, write to the Free Software Foundation, Inc., 59 Temple
15 * Place - Suite 330, Boston, MA 02111-1307 USA.
17 * Copyright (C) 2006-2008 Intel Corporation
18 * Author: Ashok Raj <ashok.raj@intel.com>
19 * Author: Shaohua Li <shaohua.li@intel.com>
20 * Author: Anil S Keshavamurthy <anil.s.keshavamurthy@intel.com>
21 * Author: Fenghua Yu <fenghua.yu@intel.com>
24 #include <linux/init.h>
25 #include <linux/bitmap.h>
26 #include <linux/debugfs.h>
27 #include <linux/export.h>
28 #include <linux/slab.h>
29 #include <linux/irq.h>
30 #include <linux/interrupt.h>
31 #include <linux/spinlock.h>
32 #include <linux/pci.h>
33 #include <linux/dmar.h>
34 #include <linux/dma-mapping.h>
35 #include <linux/mempool.h>
36 #include <linux/timer.h>
37 #include <linux/iova.h>
38 #include <linux/iommu.h>
39 #include <linux/intel-iommu.h>
40 #include <linux/syscore_ops.h>
41 #include <linux/tboot.h>
42 #include <linux/dmi.h>
43 #include <linux/pci-ats.h>
44 #include <asm/cacheflush.h>
45 #include <asm/iommu.h>
47 #define ROOT_SIZE VTD_PAGE_SIZE
48 #define CONTEXT_SIZE VTD_PAGE_SIZE
50 #define IS_BRIDGE_HOST_DEVICE(pdev) \
51 ((pdev->class >> 8) == PCI_CLASS_BRIDGE_HOST)
52 #define IS_GFX_DEVICE(pdev) ((pdev->class >> 16) == PCI_BASE_CLASS_DISPLAY)
53 #define IS_ISA_DEVICE(pdev) ((pdev->class >> 8) == PCI_CLASS_BRIDGE_ISA)
54 #define IS_AZALIA(pdev) ((pdev)->vendor == 0x8086 && (pdev)->device == 0x3a3e)
56 #define IOAPIC_RANGE_START (0xfee00000)
57 #define IOAPIC_RANGE_END (0xfeefffff)
58 #define IOVA_START_ADDR (0x1000)
60 #define DEFAULT_DOMAIN_ADDRESS_WIDTH 48
62 #define MAX_AGAW_WIDTH 64
64 #define __DOMAIN_MAX_PFN(gaw) ((((uint64_t)1) << (gaw-VTD_PAGE_SHIFT)) - 1)
65 #define __DOMAIN_MAX_ADDR(gaw) ((((uint64_t)1) << gaw) - 1)
67 /* We limit DOMAIN_MAX_PFN to fit in an unsigned long, and DOMAIN_MAX_ADDR
68 to match. That way, we can use 'unsigned long' for PFNs with impunity. */
69 #define DOMAIN_MAX_PFN(gaw) ((unsigned long) min_t(uint64_t, \
70 __DOMAIN_MAX_PFN(gaw), (unsigned long)-1))
71 #define DOMAIN_MAX_ADDR(gaw) (((uint64_t)__DOMAIN_MAX_PFN(gaw)) << VTD_PAGE_SHIFT)
73 #define IOVA_PFN(addr) ((addr) >> PAGE_SHIFT)
74 #define DMA_32BIT_PFN IOVA_PFN(DMA_BIT_MASK(32))
75 #define DMA_64BIT_PFN IOVA_PFN(DMA_BIT_MASK(64))
77 /* page table handling */
78 #define LEVEL_STRIDE (9)
79 #define LEVEL_MASK (((u64)1 << LEVEL_STRIDE) - 1)
82 * This bitmap is used to advertise the page sizes our hardware support
83 * to the IOMMU core, which will then use this information to split
84 * physically contiguous memory regions it is mapping into page sizes
87 * Traditionally the IOMMU core just handed us the mappings directly,
88 * after making sure the size is an order of a 4KiB page and that the
89 * mapping has natural alignment.
91 * To retain this behavior, we currently advertise that we support
92 * all page sizes that are an order of 4KiB.
94 * If at some point we'd like to utilize the IOMMU core's new behavior,
95 * we could change this to advertise the real page sizes we support.
97 #define INTEL_IOMMU_PGSIZES (~0xFFFUL)
99 static inline int agaw_to_level(int agaw)
104 static inline int agaw_to_width(int agaw)
106 return 30 + agaw * LEVEL_STRIDE;
109 static inline int width_to_agaw(int width)
111 return (width - 30) / LEVEL_STRIDE;
114 static inline unsigned int level_to_offset_bits(int level)
116 return (level - 1) * LEVEL_STRIDE;
119 static inline int pfn_level_offset(unsigned long pfn, int level)
121 return (pfn >> level_to_offset_bits(level)) & LEVEL_MASK;
124 static inline unsigned long level_mask(int level)
126 return -1UL << level_to_offset_bits(level);
129 static inline unsigned long level_size(int level)
131 return 1UL << level_to_offset_bits(level);
134 static inline unsigned long align_to_level(unsigned long pfn, int level)
136 return (pfn + level_size(level) - 1) & level_mask(level);
139 static inline unsigned long lvl_to_nr_pages(unsigned int lvl)
141 return 1 << ((lvl - 1) * LEVEL_STRIDE);
144 /* VT-d pages must always be _smaller_ than MM pages. Otherwise things
145 are never going to work. */
146 static inline unsigned long dma_to_mm_pfn(unsigned long dma_pfn)
148 return dma_pfn >> (PAGE_SHIFT - VTD_PAGE_SHIFT);
151 static inline unsigned long mm_to_dma_pfn(unsigned long mm_pfn)
153 return mm_pfn << (PAGE_SHIFT - VTD_PAGE_SHIFT);
155 static inline unsigned long page_to_dma_pfn(struct page *pg)
157 return mm_to_dma_pfn(page_to_pfn(pg));
159 static inline unsigned long virt_to_dma_pfn(void *p)
161 return page_to_dma_pfn(virt_to_page(p));
164 /* global iommu list, set NULL for ignored DMAR units */
165 static struct intel_iommu **g_iommus;
167 static void __init check_tylersburg_isoch(void);
168 static int rwbf_quirk;
171 * set to 1 to panic kernel if can't successfully enable VT-d
172 * (used when kernel is launched w/ TXT)
174 static int force_on = 0;
179 * 12-63: Context Ptr (12 - (haw-1))
186 #define ROOT_ENTRY_NR (VTD_PAGE_SIZE/sizeof(struct root_entry))
187 static inline bool root_present(struct root_entry *root)
189 return (root->val & 1);
191 static inline void set_root_present(struct root_entry *root)
195 static inline void set_root_value(struct root_entry *root, unsigned long value)
197 root->val |= value & VTD_PAGE_MASK;
200 static inline struct context_entry *
201 get_context_addr_from_root(struct root_entry *root)
203 return (struct context_entry *)
204 (root_present(root)?phys_to_virt(
205 root->val & VTD_PAGE_MASK) :
212 * 1: fault processing disable
213 * 2-3: translation type
214 * 12-63: address space root
220 struct context_entry {
225 static inline bool context_present(struct context_entry *context)
227 return (context->lo & 1);
229 static inline void context_set_present(struct context_entry *context)
234 static inline void context_set_fault_enable(struct context_entry *context)
236 context->lo &= (((u64)-1) << 2) | 1;
239 static inline void context_set_translation_type(struct context_entry *context,
242 context->lo &= (((u64)-1) << 4) | 3;
243 context->lo |= (value & 3) << 2;
246 static inline void context_set_address_root(struct context_entry *context,
249 context->lo |= value & VTD_PAGE_MASK;
252 static inline void context_set_address_width(struct context_entry *context,
255 context->hi |= value & 7;
258 static inline void context_set_domain_id(struct context_entry *context,
261 context->hi |= (value & ((1 << 16) - 1)) << 8;
264 static inline void context_clear_entry(struct context_entry *context)
277 * 12-63: Host physcial address
283 static inline void dma_clear_pte(struct dma_pte *pte)
288 static inline void dma_set_pte_readable(struct dma_pte *pte)
290 pte->val |= DMA_PTE_READ;
293 static inline void dma_set_pte_writable(struct dma_pte *pte)
295 pte->val |= DMA_PTE_WRITE;
298 static inline void dma_set_pte_snp(struct dma_pte *pte)
300 pte->val |= DMA_PTE_SNP;
303 static inline void dma_set_pte_prot(struct dma_pte *pte, unsigned long prot)
305 pte->val = (pte->val & ~3) | (prot & 3);
308 static inline u64 dma_pte_addr(struct dma_pte *pte)
311 return pte->val & VTD_PAGE_MASK;
313 /* Must have a full atomic 64-bit read */
314 return __cmpxchg64(&pte->val, 0ULL, 0ULL) & VTD_PAGE_MASK;
318 static inline void dma_set_pte_pfn(struct dma_pte *pte, unsigned long pfn)
320 pte->val |= (uint64_t)pfn << VTD_PAGE_SHIFT;
323 static inline bool dma_pte_present(struct dma_pte *pte)
325 return (pte->val & 3) != 0;
328 static inline bool dma_pte_superpage(struct dma_pte *pte)
330 return (pte->val & (1 << 7));
333 static inline int first_pte_in_page(struct dma_pte *pte)
335 return !((unsigned long)pte & ~VTD_PAGE_MASK);
339 * This domain is a statically identity mapping domain.
340 * 1. This domain creats a static 1:1 mapping to all usable memory.
341 * 2. It maps to each iommu if successful.
342 * 3. Each iommu mapps to this domain if successful.
344 static struct dmar_domain *si_domain;
345 static int hw_pass_through = 1;
347 /* devices under the same p2p bridge are owned in one domain */
348 #define DOMAIN_FLAG_P2P_MULTIPLE_DEVICES (1 << 0)
350 /* domain represents a virtual machine, more than one devices
351 * across iommus may be owned in one domain, e.g. kvm guest.
353 #define DOMAIN_FLAG_VIRTUAL_MACHINE (1 << 1)
355 /* si_domain contains mulitple devices */
356 #define DOMAIN_FLAG_STATIC_IDENTITY (1 << 2)
359 int id; /* domain id */
360 int nid; /* node id */
361 unsigned long iommu_bmp; /* bitmap of iommus this domain uses*/
363 struct list_head devices; /* all devices' list */
364 struct iova_domain iovad; /* iova's that belong to this domain */
366 struct dma_pte *pgd; /* virtual address */
367 int gaw; /* max guest address width */
369 /* adjusted guest address width, 0 is level 2 30-bit */
372 int flags; /* flags to find out type of domain */
374 int iommu_coherency;/* indicate coherency of iommu access */
375 int iommu_snooping; /* indicate snooping control feature*/
376 int iommu_count; /* reference count of iommu */
377 int iommu_superpage;/* Level of superpages supported:
378 0 == 4KiB (no superpages), 1 == 2MiB,
379 2 == 1GiB, 3 == 512GiB, 4 == 1TiB */
380 spinlock_t iommu_lock; /* protect iommu set in domain */
381 u64 max_addr; /* maximum mapped address */
384 /* PCI domain-device relationship */
385 struct device_domain_info {
386 struct list_head link; /* link to domain siblings */
387 struct list_head global; /* link to global list */
388 int segment; /* PCI domain */
389 u8 bus; /* PCI bus number */
390 u8 devfn; /* PCI devfn number */
391 struct pci_dev *dev; /* it's NULL for PCIe-to-PCI bridge */
392 struct intel_iommu *iommu; /* IOMMU used by this device */
393 struct dmar_domain *domain; /* pointer to domain */
396 static void flush_unmaps_timeout(unsigned long data);
398 DEFINE_TIMER(unmap_timer, flush_unmaps_timeout, 0, 0);
400 #define HIGH_WATER_MARK 250
401 struct deferred_flush_tables {
403 struct iova *iova[HIGH_WATER_MARK];
404 struct dmar_domain *domain[HIGH_WATER_MARK];
407 static struct deferred_flush_tables *deferred_flush;
409 /* bitmap for indexing intel_iommus */
410 static int g_num_of_iommus;
412 static DEFINE_SPINLOCK(async_umap_flush_lock);
413 static LIST_HEAD(unmaps_to_do);
416 static long list_size;
418 static void domain_remove_dev_info(struct dmar_domain *domain);
420 #ifdef CONFIG_INTEL_IOMMU_DEFAULT_ON
421 int dmar_disabled = 0;
423 int dmar_disabled = 1;
424 #endif /*CONFIG_INTEL_IOMMU_DEFAULT_ON*/
426 static int dmar_map_gfx = 1;
427 static int dmar_forcedac;
428 static int intel_iommu_strict;
429 static int intel_iommu_superpage = 1;
431 int intel_iommu_gfx_mapped;
432 EXPORT_SYMBOL_GPL(intel_iommu_gfx_mapped);
434 #define DUMMY_DEVICE_DOMAIN_INFO ((struct device_domain_info *)(-1))
435 static DEFINE_SPINLOCK(device_domain_lock);
436 static LIST_HEAD(device_domain_list);
438 static struct iommu_ops intel_iommu_ops;
440 static int __init intel_iommu_setup(char *str)
445 if (!strncmp(str, "on", 2)) {
447 printk(KERN_INFO "Intel-IOMMU: enabled\n");
448 } else if (!strncmp(str, "off", 3)) {
450 printk(KERN_INFO "Intel-IOMMU: disabled\n");
451 } else if (!strncmp(str, "igfx_off", 8)) {
454 "Intel-IOMMU: disable GFX device mapping\n");
455 } else if (!strncmp(str, "forcedac", 8)) {
457 "Intel-IOMMU: Forcing DAC for PCI devices\n");
459 } else if (!strncmp(str, "strict", 6)) {
461 "Intel-IOMMU: disable batched IOTLB flush\n");
462 intel_iommu_strict = 1;
463 } else if (!strncmp(str, "sp_off", 6)) {
465 "Intel-IOMMU: disable supported super page\n");
466 intel_iommu_superpage = 0;
469 str += strcspn(str, ",");
475 __setup("intel_iommu=", intel_iommu_setup);
477 static struct kmem_cache *iommu_domain_cache;
478 static struct kmem_cache *iommu_devinfo_cache;
479 static struct kmem_cache *iommu_iova_cache;
481 static inline void *alloc_pgtable_page(int node)
486 page = alloc_pages_node(node, GFP_ATOMIC | __GFP_ZERO, 0);
488 vaddr = page_address(page);
492 static inline void free_pgtable_page(void *vaddr)
494 free_page((unsigned long)vaddr);
497 static inline void *alloc_domain_mem(void)
499 return kmem_cache_alloc(iommu_domain_cache, GFP_ATOMIC);
502 static void free_domain_mem(void *vaddr)
504 kmem_cache_free(iommu_domain_cache, vaddr);
507 static inline void * alloc_devinfo_mem(void)
509 return kmem_cache_alloc(iommu_devinfo_cache, GFP_ATOMIC);
512 static inline void free_devinfo_mem(void *vaddr)
514 kmem_cache_free(iommu_devinfo_cache, vaddr);
517 struct iova *alloc_iova_mem(void)
519 return kmem_cache_alloc(iommu_iova_cache, GFP_ATOMIC);
522 void free_iova_mem(struct iova *iova)
524 kmem_cache_free(iommu_iova_cache, iova);
528 static int __iommu_calculate_agaw(struct intel_iommu *iommu, int max_gaw)
533 sagaw = cap_sagaw(iommu->cap);
534 for (agaw = width_to_agaw(max_gaw);
536 if (test_bit(agaw, &sagaw))
544 * Calculate max SAGAW for each iommu.
546 int iommu_calculate_max_sagaw(struct intel_iommu *iommu)
548 return __iommu_calculate_agaw(iommu, MAX_AGAW_WIDTH);
552 * calculate agaw for each iommu.
553 * "SAGAW" may be different across iommus, use a default agaw, and
554 * get a supported less agaw for iommus that don't support the default agaw.
556 int iommu_calculate_agaw(struct intel_iommu *iommu)
558 return __iommu_calculate_agaw(iommu, DEFAULT_DOMAIN_ADDRESS_WIDTH);
561 /* This functionin only returns single iommu in a domain */
562 static struct intel_iommu *domain_get_iommu(struct dmar_domain *domain)
566 /* si_domain and vm domain should not get here. */
567 BUG_ON(domain->flags & DOMAIN_FLAG_VIRTUAL_MACHINE);
568 BUG_ON(domain->flags & DOMAIN_FLAG_STATIC_IDENTITY);
570 iommu_id = find_first_bit(&domain->iommu_bmp, g_num_of_iommus);
571 if (iommu_id < 0 || iommu_id >= g_num_of_iommus)
574 return g_iommus[iommu_id];
577 static void domain_update_iommu_coherency(struct dmar_domain *domain)
581 domain->iommu_coherency = 1;
583 for_each_set_bit(i, &domain->iommu_bmp, g_num_of_iommus) {
584 if (!ecap_coherent(g_iommus[i]->ecap)) {
585 domain->iommu_coherency = 0;
591 static void domain_update_iommu_snooping(struct dmar_domain *domain)
595 domain->iommu_snooping = 1;
597 for_each_set_bit(i, &domain->iommu_bmp, g_num_of_iommus) {
598 if (!ecap_sc_support(g_iommus[i]->ecap)) {
599 domain->iommu_snooping = 0;
605 static void domain_update_iommu_superpage(struct dmar_domain *domain)
607 struct dmar_drhd_unit *drhd;
608 struct intel_iommu *iommu = NULL;
611 if (!intel_iommu_superpage) {
612 domain->iommu_superpage = 0;
616 /* set iommu_superpage to the smallest common denominator */
617 for_each_active_iommu(iommu, drhd) {
618 mask &= cap_super_page_val(iommu->cap);
623 domain->iommu_superpage = fls(mask);
626 /* Some capabilities may be different across iommus */
627 static void domain_update_iommu_cap(struct dmar_domain *domain)
629 domain_update_iommu_coherency(domain);
630 domain_update_iommu_snooping(domain);
631 domain_update_iommu_superpage(domain);
634 static struct intel_iommu *device_to_iommu(int segment, u8 bus, u8 devfn)
636 struct dmar_drhd_unit *drhd = NULL;
639 for_each_drhd_unit(drhd) {
642 if (segment != drhd->segment)
645 for (i = 0; i < drhd->devices_cnt; i++) {
646 if (drhd->devices[i] &&
647 drhd->devices[i]->bus->number == bus &&
648 drhd->devices[i]->devfn == devfn)
650 if (drhd->devices[i] &&
651 drhd->devices[i]->subordinate &&
652 drhd->devices[i]->subordinate->number <= bus &&
653 drhd->devices[i]->subordinate->subordinate >= bus)
657 if (drhd->include_all)
664 static void domain_flush_cache(struct dmar_domain *domain,
665 void *addr, int size)
667 if (!domain->iommu_coherency)
668 clflush_cache_range(addr, size);
671 /* Gets context entry for a given bus and devfn */
672 static struct context_entry * device_to_context_entry(struct intel_iommu *iommu,
675 struct root_entry *root;
676 struct context_entry *context;
677 unsigned long phy_addr;
680 spin_lock_irqsave(&iommu->lock, flags);
681 root = &iommu->root_entry[bus];
682 context = get_context_addr_from_root(root);
684 context = (struct context_entry *)
685 alloc_pgtable_page(iommu->node);
687 spin_unlock_irqrestore(&iommu->lock, flags);
690 __iommu_flush_cache(iommu, (void *)context, CONTEXT_SIZE);
691 phy_addr = virt_to_phys((void *)context);
692 set_root_value(root, phy_addr);
693 set_root_present(root);
694 __iommu_flush_cache(iommu, root, sizeof(*root));
696 spin_unlock_irqrestore(&iommu->lock, flags);
697 return &context[devfn];
700 static int device_context_mapped(struct intel_iommu *iommu, u8 bus, u8 devfn)
702 struct root_entry *root;
703 struct context_entry *context;
707 spin_lock_irqsave(&iommu->lock, flags);
708 root = &iommu->root_entry[bus];
709 context = get_context_addr_from_root(root);
714 ret = context_present(&context[devfn]);
716 spin_unlock_irqrestore(&iommu->lock, flags);
720 static void clear_context_table(struct intel_iommu *iommu, u8 bus, u8 devfn)
722 struct root_entry *root;
723 struct context_entry *context;
726 spin_lock_irqsave(&iommu->lock, flags);
727 root = &iommu->root_entry[bus];
728 context = get_context_addr_from_root(root);
730 context_clear_entry(&context[devfn]);
731 __iommu_flush_cache(iommu, &context[devfn], \
734 spin_unlock_irqrestore(&iommu->lock, flags);
737 static void free_context_table(struct intel_iommu *iommu)
739 struct root_entry *root;
742 struct context_entry *context;
744 spin_lock_irqsave(&iommu->lock, flags);
745 if (!iommu->root_entry) {
748 for (i = 0; i < ROOT_ENTRY_NR; i++) {
749 root = &iommu->root_entry[i];
750 context = get_context_addr_from_root(root);
752 free_pgtable_page(context);
754 free_pgtable_page(iommu->root_entry);
755 iommu->root_entry = NULL;
757 spin_unlock_irqrestore(&iommu->lock, flags);
760 static struct dma_pte *pfn_to_dma_pte(struct dmar_domain *domain,
761 unsigned long pfn, int target_level)
763 int addr_width = agaw_to_width(domain->agaw) - VTD_PAGE_SHIFT;
764 struct dma_pte *parent, *pte = NULL;
765 int level = agaw_to_level(domain->agaw);
768 BUG_ON(!domain->pgd);
769 BUG_ON(addr_width < BITS_PER_LONG && pfn >> addr_width);
770 parent = domain->pgd;
775 offset = pfn_level_offset(pfn, level);
776 pte = &parent[offset];
777 if (!target_level && (dma_pte_superpage(pte) || !dma_pte_present(pte)))
779 if (level == target_level)
782 if (!dma_pte_present(pte)) {
785 tmp_page = alloc_pgtable_page(domain->nid);
790 domain_flush_cache(domain, tmp_page, VTD_PAGE_SIZE);
791 pteval = ((uint64_t)virt_to_dma_pfn(tmp_page) << VTD_PAGE_SHIFT) | DMA_PTE_READ | DMA_PTE_WRITE;
792 if (cmpxchg64(&pte->val, 0ULL, pteval)) {
793 /* Someone else set it while we were thinking; use theirs. */
794 free_pgtable_page(tmp_page);
797 domain_flush_cache(domain, pte, sizeof(*pte));
800 parent = phys_to_virt(dma_pte_addr(pte));
808 /* return address's pte at specific level */
809 static struct dma_pte *dma_pfn_level_pte(struct dmar_domain *domain,
811 int level, int *large_page)
813 struct dma_pte *parent, *pte = NULL;
814 int total = agaw_to_level(domain->agaw);
817 parent = domain->pgd;
818 while (level <= total) {
819 offset = pfn_level_offset(pfn, total);
820 pte = &parent[offset];
824 if (!dma_pte_present(pte)) {
829 if (pte->val & DMA_PTE_LARGE_PAGE) {
834 parent = phys_to_virt(dma_pte_addr(pte));
840 /* clear last level pte, a tlb flush should be followed */
841 static int dma_pte_clear_range(struct dmar_domain *domain,
842 unsigned long start_pfn,
843 unsigned long last_pfn)
845 int addr_width = agaw_to_width(domain->agaw) - VTD_PAGE_SHIFT;
846 unsigned int large_page = 1;
847 struct dma_pte *first_pte, *pte;
850 BUG_ON(addr_width < BITS_PER_LONG && start_pfn >> addr_width);
851 BUG_ON(addr_width < BITS_PER_LONG && last_pfn >> addr_width);
852 BUG_ON(start_pfn > last_pfn);
854 /* we don't need lock here; nobody else touches the iova range */
857 first_pte = pte = dma_pfn_level_pte(domain, start_pfn, 1, &large_page);
859 start_pfn = align_to_level(start_pfn + 1, large_page + 1);
864 start_pfn += lvl_to_nr_pages(large_page);
866 } while (start_pfn <= last_pfn && !first_pte_in_page(pte));
868 domain_flush_cache(domain, first_pte,
869 (void *)pte - (void *)first_pte);
871 } while (start_pfn && start_pfn <= last_pfn);
873 order = (large_page - 1) * 9;
877 /* free page table pages. last level pte should already be cleared */
878 static void dma_pte_free_pagetable(struct dmar_domain *domain,
879 unsigned long start_pfn,
880 unsigned long last_pfn)
882 int addr_width = agaw_to_width(domain->agaw) - VTD_PAGE_SHIFT;
883 struct dma_pte *first_pte, *pte;
884 int total = agaw_to_level(domain->agaw);
889 BUG_ON(addr_width < BITS_PER_LONG && start_pfn >> addr_width);
890 BUG_ON(addr_width < BITS_PER_LONG && last_pfn >> addr_width);
891 BUG_ON(start_pfn > last_pfn);
893 /* We don't need lock here; nobody else touches the iova range */
895 while (level <= total) {
896 tmp = align_to_level(start_pfn, level);
898 /* If we can't even clear one PTE at this level, we're done */
899 if (tmp + level_size(level) - 1 > last_pfn)
904 first_pte = pte = dma_pfn_level_pte(domain, tmp, level, &large_page);
905 if (large_page > level)
906 level = large_page + 1;
908 tmp = align_to_level(tmp + 1, level + 1);
912 if (dma_pte_present(pte)) {
913 free_pgtable_page(phys_to_virt(dma_pte_addr(pte)));
917 tmp += level_size(level);
918 } while (!first_pte_in_page(pte) &&
919 tmp + level_size(level) - 1 <= last_pfn);
921 domain_flush_cache(domain, first_pte,
922 (void *)pte - (void *)first_pte);
924 } while (tmp && tmp + level_size(level) - 1 <= last_pfn);
928 if (start_pfn == 0 && last_pfn == DOMAIN_MAX_PFN(domain->gaw)) {
929 free_pgtable_page(domain->pgd);
935 static int iommu_alloc_root_entry(struct intel_iommu *iommu)
937 struct root_entry *root;
940 root = (struct root_entry *)alloc_pgtable_page(iommu->node);
944 __iommu_flush_cache(iommu, root, ROOT_SIZE);
946 spin_lock_irqsave(&iommu->lock, flags);
947 iommu->root_entry = root;
948 spin_unlock_irqrestore(&iommu->lock, flags);
953 static void iommu_set_root_entry(struct intel_iommu *iommu)
959 addr = iommu->root_entry;
961 raw_spin_lock_irqsave(&iommu->register_lock, flag);
962 dmar_writeq(iommu->reg + DMAR_RTADDR_REG, virt_to_phys(addr));
964 writel(iommu->gcmd | DMA_GCMD_SRTP, iommu->reg + DMAR_GCMD_REG);
966 /* Make sure hardware complete it */
967 IOMMU_WAIT_OP(iommu, DMAR_GSTS_REG,
968 readl, (sts & DMA_GSTS_RTPS), sts);
970 raw_spin_unlock_irqrestore(&iommu->register_lock, flag);
973 static void iommu_flush_write_buffer(struct intel_iommu *iommu)
978 if (!rwbf_quirk && !cap_rwbf(iommu->cap))
981 raw_spin_lock_irqsave(&iommu->register_lock, flag);
982 writel(iommu->gcmd | DMA_GCMD_WBF, iommu->reg + DMAR_GCMD_REG);
984 /* Make sure hardware complete it */
985 IOMMU_WAIT_OP(iommu, DMAR_GSTS_REG,
986 readl, (!(val & DMA_GSTS_WBFS)), val);
988 raw_spin_unlock_irqrestore(&iommu->register_lock, flag);
991 /* return value determine if we need a write buffer flush */
992 static void __iommu_flush_context(struct intel_iommu *iommu,
993 u16 did, u16 source_id, u8 function_mask,
1000 case DMA_CCMD_GLOBAL_INVL:
1001 val = DMA_CCMD_GLOBAL_INVL;
1003 case DMA_CCMD_DOMAIN_INVL:
1004 val = DMA_CCMD_DOMAIN_INVL|DMA_CCMD_DID(did);
1006 case DMA_CCMD_DEVICE_INVL:
1007 val = DMA_CCMD_DEVICE_INVL|DMA_CCMD_DID(did)
1008 | DMA_CCMD_SID(source_id) | DMA_CCMD_FM(function_mask);
1013 val |= DMA_CCMD_ICC;
1015 raw_spin_lock_irqsave(&iommu->register_lock, flag);
1016 dmar_writeq(iommu->reg + DMAR_CCMD_REG, val);
1018 /* Make sure hardware complete it */
1019 IOMMU_WAIT_OP(iommu, DMAR_CCMD_REG,
1020 dmar_readq, (!(val & DMA_CCMD_ICC)), val);
1022 raw_spin_unlock_irqrestore(&iommu->register_lock, flag);
1025 /* return value determine if we need a write buffer flush */
1026 static void __iommu_flush_iotlb(struct intel_iommu *iommu, u16 did,
1027 u64 addr, unsigned int size_order, u64 type)
1029 int tlb_offset = ecap_iotlb_offset(iommu->ecap);
1030 u64 val = 0, val_iva = 0;
1034 case DMA_TLB_GLOBAL_FLUSH:
1035 /* global flush doesn't need set IVA_REG */
1036 val = DMA_TLB_GLOBAL_FLUSH|DMA_TLB_IVT;
1038 case DMA_TLB_DSI_FLUSH:
1039 val = DMA_TLB_DSI_FLUSH|DMA_TLB_IVT|DMA_TLB_DID(did);
1041 case DMA_TLB_PSI_FLUSH:
1042 val = DMA_TLB_PSI_FLUSH|DMA_TLB_IVT|DMA_TLB_DID(did);
1043 /* Note: always flush non-leaf currently */
1044 val_iva = size_order | addr;
1049 /* Note: set drain read/write */
1052 * This is probably to be super secure.. Looks like we can
1053 * ignore it without any impact.
1055 if (cap_read_drain(iommu->cap))
1056 val |= DMA_TLB_READ_DRAIN;
1058 if (cap_write_drain(iommu->cap))
1059 val |= DMA_TLB_WRITE_DRAIN;
1061 raw_spin_lock_irqsave(&iommu->register_lock, flag);
1062 /* Note: Only uses first TLB reg currently */
1064 dmar_writeq(iommu->reg + tlb_offset, val_iva);
1065 dmar_writeq(iommu->reg + tlb_offset + 8, val);
1067 /* Make sure hardware complete it */
1068 IOMMU_WAIT_OP(iommu, tlb_offset + 8,
1069 dmar_readq, (!(val & DMA_TLB_IVT)), val);
1071 raw_spin_unlock_irqrestore(&iommu->register_lock, flag);
1073 /* check IOTLB invalidation granularity */
1074 if (DMA_TLB_IAIG(val) == 0)
1075 printk(KERN_ERR"IOMMU: flush IOTLB failed\n");
1076 if (DMA_TLB_IAIG(val) != DMA_TLB_IIRG(type))
1077 pr_debug("IOMMU: tlb flush request %Lx, actual %Lx\n",
1078 (unsigned long long)DMA_TLB_IIRG(type),
1079 (unsigned long long)DMA_TLB_IAIG(val));
1082 static struct device_domain_info *iommu_support_dev_iotlb(
1083 struct dmar_domain *domain, int segment, u8 bus, u8 devfn)
1086 unsigned long flags;
1087 struct device_domain_info *info;
1088 struct intel_iommu *iommu = device_to_iommu(segment, bus, devfn);
1090 if (!ecap_dev_iotlb_support(iommu->ecap))
1096 spin_lock_irqsave(&device_domain_lock, flags);
1097 list_for_each_entry(info, &domain->devices, link)
1098 if (info->bus == bus && info->devfn == devfn) {
1102 spin_unlock_irqrestore(&device_domain_lock, flags);
1104 if (!found || !info->dev)
1107 if (!pci_find_ext_capability(info->dev, PCI_EXT_CAP_ID_ATS))
1110 if (!dmar_find_matched_atsr_unit(info->dev))
1113 info->iommu = iommu;
1118 static void iommu_enable_dev_iotlb(struct device_domain_info *info)
1123 pci_enable_ats(info->dev, VTD_PAGE_SHIFT);
1126 static void iommu_disable_dev_iotlb(struct device_domain_info *info)
1128 if (!info->dev || !pci_ats_enabled(info->dev))
1131 pci_disable_ats(info->dev);
1134 static void iommu_flush_dev_iotlb(struct dmar_domain *domain,
1135 u64 addr, unsigned mask)
1138 unsigned long flags;
1139 struct device_domain_info *info;
1141 spin_lock_irqsave(&device_domain_lock, flags);
1142 list_for_each_entry(info, &domain->devices, link) {
1143 if (!info->dev || !pci_ats_enabled(info->dev))
1146 sid = info->bus << 8 | info->devfn;
1147 qdep = pci_ats_queue_depth(info->dev);
1148 qi_flush_dev_iotlb(info->iommu, sid, qdep, addr, mask);
1150 spin_unlock_irqrestore(&device_domain_lock, flags);
1153 static void iommu_flush_iotlb_psi(struct intel_iommu *iommu, u16 did,
1154 unsigned long pfn, unsigned int pages, int map)
1156 unsigned int mask = ilog2(__roundup_pow_of_two(pages));
1157 uint64_t addr = (uint64_t)pfn << VTD_PAGE_SHIFT;
1162 * Fallback to domain selective flush if no PSI support or the size is
1164 * PSI requires page size to be 2 ^ x, and the base address is naturally
1165 * aligned to the size
1167 if (!cap_pgsel_inv(iommu->cap) || mask > cap_max_amask_val(iommu->cap))
1168 iommu->flush.flush_iotlb(iommu, did, 0, 0,
1171 iommu->flush.flush_iotlb(iommu, did, addr, mask,
1175 * In caching mode, changes of pages from non-present to present require
1176 * flush. However, device IOTLB doesn't need to be flushed in this case.
1178 if (!cap_caching_mode(iommu->cap) || !map)
1179 iommu_flush_dev_iotlb(iommu->domains[did], addr, mask);
1182 static void iommu_disable_protect_mem_regions(struct intel_iommu *iommu)
1185 unsigned long flags;
1187 raw_spin_lock_irqsave(&iommu->register_lock, flags);
1188 pmen = readl(iommu->reg + DMAR_PMEN_REG);
1189 pmen &= ~DMA_PMEN_EPM;
1190 writel(pmen, iommu->reg + DMAR_PMEN_REG);
1192 /* wait for the protected region status bit to clear */
1193 IOMMU_WAIT_OP(iommu, DMAR_PMEN_REG,
1194 readl, !(pmen & DMA_PMEN_PRS), pmen);
1196 raw_spin_unlock_irqrestore(&iommu->register_lock, flags);
1199 static int iommu_enable_translation(struct intel_iommu *iommu)
1202 unsigned long flags;
1204 raw_spin_lock_irqsave(&iommu->register_lock, flags);
1205 iommu->gcmd |= DMA_GCMD_TE;
1206 writel(iommu->gcmd, iommu->reg + DMAR_GCMD_REG);
1208 /* Make sure hardware complete it */
1209 IOMMU_WAIT_OP(iommu, DMAR_GSTS_REG,
1210 readl, (sts & DMA_GSTS_TES), sts);
1212 raw_spin_unlock_irqrestore(&iommu->register_lock, flags);
1216 static int iommu_disable_translation(struct intel_iommu *iommu)
1221 raw_spin_lock_irqsave(&iommu->register_lock, flag);
1222 iommu->gcmd &= ~DMA_GCMD_TE;
1223 writel(iommu->gcmd, iommu->reg + DMAR_GCMD_REG);
1225 /* Make sure hardware complete it */
1226 IOMMU_WAIT_OP(iommu, DMAR_GSTS_REG,
1227 readl, (!(sts & DMA_GSTS_TES)), sts);
1229 raw_spin_unlock_irqrestore(&iommu->register_lock, flag);
1234 static int iommu_init_domains(struct intel_iommu *iommu)
1236 unsigned long ndomains;
1237 unsigned long nlongs;
1239 ndomains = cap_ndoms(iommu->cap);
1240 pr_debug("IOMMU %d: Number of Domains supportd <%ld>\n", iommu->seq_id,
1242 nlongs = BITS_TO_LONGS(ndomains);
1244 spin_lock_init(&iommu->lock);
1246 /* TBD: there might be 64K domains,
1247 * consider other allocation for future chip
1249 iommu->domain_ids = kcalloc(nlongs, sizeof(unsigned long), GFP_KERNEL);
1250 if (!iommu->domain_ids) {
1251 printk(KERN_ERR "Allocating domain id array failed\n");
1254 iommu->domains = kcalloc(ndomains, sizeof(struct dmar_domain *),
1256 if (!iommu->domains) {
1257 printk(KERN_ERR "Allocating domain array failed\n");
1262 * if Caching mode is set, then invalid translations are tagged
1263 * with domainid 0. Hence we need to pre-allocate it.
1265 if (cap_caching_mode(iommu->cap))
1266 set_bit(0, iommu->domain_ids);
1271 static void domain_exit(struct dmar_domain *domain);
1272 static void vm_domain_exit(struct dmar_domain *domain);
1274 void free_dmar_iommu(struct intel_iommu *iommu)
1276 struct dmar_domain *domain;
1278 unsigned long flags;
1280 if ((iommu->domains) && (iommu->domain_ids)) {
1281 for_each_set_bit(i, iommu->domain_ids, cap_ndoms(iommu->cap)) {
1282 domain = iommu->domains[i];
1283 clear_bit(i, iommu->domain_ids);
1285 spin_lock_irqsave(&domain->iommu_lock, flags);
1286 if (--domain->iommu_count == 0) {
1287 if (domain->flags & DOMAIN_FLAG_VIRTUAL_MACHINE)
1288 vm_domain_exit(domain);
1290 domain_exit(domain);
1292 spin_unlock_irqrestore(&domain->iommu_lock, flags);
1296 if (iommu->gcmd & DMA_GCMD_TE)
1297 iommu_disable_translation(iommu);
1300 irq_set_handler_data(iommu->irq, NULL);
1301 /* This will mask the irq */
1302 free_irq(iommu->irq, iommu);
1303 destroy_irq(iommu->irq);
1306 kfree(iommu->domains);
1307 kfree(iommu->domain_ids);
1309 g_iommus[iommu->seq_id] = NULL;
1311 /* if all iommus are freed, free g_iommus */
1312 for (i = 0; i < g_num_of_iommus; i++) {
1317 if (i == g_num_of_iommus)
1320 /* free context mapping */
1321 free_context_table(iommu);
1324 static struct dmar_domain *alloc_domain(void)
1326 struct dmar_domain *domain;
1328 domain = alloc_domain_mem();
1333 memset(&domain->iommu_bmp, 0, sizeof(unsigned long));
1339 static int iommu_attach_domain(struct dmar_domain *domain,
1340 struct intel_iommu *iommu)
1343 unsigned long ndomains;
1344 unsigned long flags;
1346 ndomains = cap_ndoms(iommu->cap);
1348 spin_lock_irqsave(&iommu->lock, flags);
1350 num = find_first_zero_bit(iommu->domain_ids, ndomains);
1351 if (num >= ndomains) {
1352 spin_unlock_irqrestore(&iommu->lock, flags);
1353 printk(KERN_ERR "IOMMU: no free domain ids\n");
1358 set_bit(num, iommu->domain_ids);
1359 set_bit(iommu->seq_id, &domain->iommu_bmp);
1360 iommu->domains[num] = domain;
1361 spin_unlock_irqrestore(&iommu->lock, flags);
1366 static void iommu_detach_domain(struct dmar_domain *domain,
1367 struct intel_iommu *iommu)
1369 unsigned long flags;
1373 spin_lock_irqsave(&iommu->lock, flags);
1374 ndomains = cap_ndoms(iommu->cap);
1375 for_each_set_bit(num, iommu->domain_ids, ndomains) {
1376 if (iommu->domains[num] == domain) {
1383 clear_bit(num, iommu->domain_ids);
1384 clear_bit(iommu->seq_id, &domain->iommu_bmp);
1385 iommu->domains[num] = NULL;
1387 spin_unlock_irqrestore(&iommu->lock, flags);
1390 static struct iova_domain reserved_iova_list;
1391 static struct lock_class_key reserved_rbtree_key;
1393 static int dmar_init_reserved_ranges(void)
1395 struct pci_dev *pdev = NULL;
1399 init_iova_domain(&reserved_iova_list, DMA_32BIT_PFN);
1401 lockdep_set_class(&reserved_iova_list.iova_rbtree_lock,
1402 &reserved_rbtree_key);
1404 /* IOAPIC ranges shouldn't be accessed by DMA */
1405 iova = reserve_iova(&reserved_iova_list, IOVA_PFN(IOAPIC_RANGE_START),
1406 IOVA_PFN(IOAPIC_RANGE_END));
1408 printk(KERN_ERR "Reserve IOAPIC range failed\n");
1412 /* Reserve all PCI MMIO to avoid peer-to-peer access */
1413 for_each_pci_dev(pdev) {
1416 for (i = 0; i < PCI_NUM_RESOURCES; i++) {
1417 r = &pdev->resource[i];
1418 if (!r->flags || !(r->flags & IORESOURCE_MEM))
1420 iova = reserve_iova(&reserved_iova_list,
1424 printk(KERN_ERR "Reserve iova failed\n");
1432 static void domain_reserve_special_ranges(struct dmar_domain *domain)
1434 copy_reserved_iova(&reserved_iova_list, &domain->iovad);
1437 static inline int guestwidth_to_adjustwidth(int gaw)
1440 int r = (gaw - 12) % 9;
1451 static int domain_init(struct dmar_domain *domain, int guest_width)
1453 struct intel_iommu *iommu;
1454 int adjust_width, agaw;
1455 unsigned long sagaw;
1457 init_iova_domain(&domain->iovad, DMA_32BIT_PFN);
1458 spin_lock_init(&domain->iommu_lock);
1460 domain_reserve_special_ranges(domain);
1462 /* calculate AGAW */
1463 iommu = domain_get_iommu(domain);
1464 if (guest_width > cap_mgaw(iommu->cap))
1465 guest_width = cap_mgaw(iommu->cap);
1466 domain->gaw = guest_width;
1467 adjust_width = guestwidth_to_adjustwidth(guest_width);
1468 agaw = width_to_agaw(adjust_width);
1469 sagaw = cap_sagaw(iommu->cap);
1470 if (!test_bit(agaw, &sagaw)) {
1471 /* hardware doesn't support it, choose a bigger one */
1472 pr_debug("IOMMU: hardware doesn't support agaw %d\n", agaw);
1473 agaw = find_next_bit(&sagaw, 5, agaw);
1477 domain->agaw = agaw;
1478 INIT_LIST_HEAD(&domain->devices);
1480 if (ecap_coherent(iommu->ecap))
1481 domain->iommu_coherency = 1;
1483 domain->iommu_coherency = 0;
1485 if (ecap_sc_support(iommu->ecap))
1486 domain->iommu_snooping = 1;
1488 domain->iommu_snooping = 0;
1490 domain->iommu_superpage = fls(cap_super_page_val(iommu->cap));
1491 domain->iommu_count = 1;
1492 domain->nid = iommu->node;
1494 /* always allocate the top pgd */
1495 domain->pgd = (struct dma_pte *)alloc_pgtable_page(domain->nid);
1498 __iommu_flush_cache(iommu, domain->pgd, PAGE_SIZE);
1502 static void domain_exit(struct dmar_domain *domain)
1504 struct dmar_drhd_unit *drhd;
1505 struct intel_iommu *iommu;
1507 /* Domain 0 is reserved, so dont process it */
1511 /* Flush any lazy unmaps that may reference this domain */
1512 if (!intel_iommu_strict)
1513 flush_unmaps_timeout(0);
1515 domain_remove_dev_info(domain);
1517 put_iova_domain(&domain->iovad);
1520 dma_pte_clear_range(domain, 0, DOMAIN_MAX_PFN(domain->gaw));
1522 /* free page tables */
1523 dma_pte_free_pagetable(domain, 0, DOMAIN_MAX_PFN(domain->gaw));
1525 for_each_active_iommu(iommu, drhd)
1526 if (test_bit(iommu->seq_id, &domain->iommu_bmp))
1527 iommu_detach_domain(domain, iommu);
1529 free_domain_mem(domain);
1532 static int domain_context_mapping_one(struct dmar_domain *domain, int segment,
1533 u8 bus, u8 devfn, int translation)
1535 struct context_entry *context;
1536 unsigned long flags;
1537 struct intel_iommu *iommu;
1538 struct dma_pte *pgd;
1540 unsigned long ndomains;
1543 struct device_domain_info *info = NULL;
1545 pr_debug("Set context mapping for %02x:%02x.%d\n",
1546 bus, PCI_SLOT(devfn), PCI_FUNC(devfn));
1548 BUG_ON(!domain->pgd);
1549 BUG_ON(translation != CONTEXT_TT_PASS_THROUGH &&
1550 translation != CONTEXT_TT_MULTI_LEVEL);
1552 iommu = device_to_iommu(segment, bus, devfn);
1556 context = device_to_context_entry(iommu, bus, devfn);
1559 spin_lock_irqsave(&iommu->lock, flags);
1560 if (context_present(context)) {
1561 spin_unlock_irqrestore(&iommu->lock, flags);
1568 if (domain->flags & DOMAIN_FLAG_VIRTUAL_MACHINE ||
1569 domain->flags & DOMAIN_FLAG_STATIC_IDENTITY) {
1572 /* find an available domain id for this device in iommu */
1573 ndomains = cap_ndoms(iommu->cap);
1574 for_each_set_bit(num, iommu->domain_ids, ndomains) {
1575 if (iommu->domains[num] == domain) {
1583 num = find_first_zero_bit(iommu->domain_ids, ndomains);
1584 if (num >= ndomains) {
1585 spin_unlock_irqrestore(&iommu->lock, flags);
1586 printk(KERN_ERR "IOMMU: no free domain ids\n");
1590 set_bit(num, iommu->domain_ids);
1591 iommu->domains[num] = domain;
1595 /* Skip top levels of page tables for
1596 * iommu which has less agaw than default.
1597 * Unnecessary for PT mode.
1599 if (translation != CONTEXT_TT_PASS_THROUGH) {
1600 for (agaw = domain->agaw; agaw != iommu->agaw; agaw--) {
1601 pgd = phys_to_virt(dma_pte_addr(pgd));
1602 if (!dma_pte_present(pgd)) {
1603 spin_unlock_irqrestore(&iommu->lock, flags);
1610 context_set_domain_id(context, id);
1612 if (translation != CONTEXT_TT_PASS_THROUGH) {
1613 info = iommu_support_dev_iotlb(domain, segment, bus, devfn);
1614 translation = info ? CONTEXT_TT_DEV_IOTLB :
1615 CONTEXT_TT_MULTI_LEVEL;
1618 * In pass through mode, AW must be programmed to indicate the largest
1619 * AGAW value supported by hardware. And ASR is ignored by hardware.
1621 if (unlikely(translation == CONTEXT_TT_PASS_THROUGH))
1622 context_set_address_width(context, iommu->msagaw);
1624 context_set_address_root(context, virt_to_phys(pgd));
1625 context_set_address_width(context, iommu->agaw);
1628 context_set_translation_type(context, translation);
1629 context_set_fault_enable(context);
1630 context_set_present(context);
1631 domain_flush_cache(domain, context, sizeof(*context));
1634 * It's a non-present to present mapping. If hardware doesn't cache
1635 * non-present entry we only need to flush the write-buffer. If the
1636 * _does_ cache non-present entries, then it does so in the special
1637 * domain #0, which we have to flush:
1639 if (cap_caching_mode(iommu->cap)) {
1640 iommu->flush.flush_context(iommu, 0,
1641 (((u16)bus) << 8) | devfn,
1642 DMA_CCMD_MASK_NOBIT,
1643 DMA_CCMD_DEVICE_INVL);
1644 iommu->flush.flush_iotlb(iommu, domain->id, 0, 0, DMA_TLB_DSI_FLUSH);
1646 iommu_flush_write_buffer(iommu);
1648 iommu_enable_dev_iotlb(info);
1649 spin_unlock_irqrestore(&iommu->lock, flags);
1651 spin_lock_irqsave(&domain->iommu_lock, flags);
1652 if (!test_and_set_bit(iommu->seq_id, &domain->iommu_bmp)) {
1653 domain->iommu_count++;
1654 if (domain->iommu_count == 1)
1655 domain->nid = iommu->node;
1656 domain_update_iommu_cap(domain);
1658 spin_unlock_irqrestore(&domain->iommu_lock, flags);
1663 domain_context_mapping(struct dmar_domain *domain, struct pci_dev *pdev,
1667 struct pci_dev *tmp, *parent;
1669 ret = domain_context_mapping_one(domain, pci_domain_nr(pdev->bus),
1670 pdev->bus->number, pdev->devfn,
1675 /* dependent device mapping */
1676 tmp = pci_find_upstream_pcie_bridge(pdev);
1679 /* Secondary interface's bus number and devfn 0 */
1680 parent = pdev->bus->self;
1681 while (parent != tmp) {
1682 ret = domain_context_mapping_one(domain,
1683 pci_domain_nr(parent->bus),
1684 parent->bus->number,
1685 parent->devfn, translation);
1688 parent = parent->bus->self;
1690 if (pci_is_pcie(tmp)) /* this is a PCIe-to-PCI bridge */
1691 return domain_context_mapping_one(domain,
1692 pci_domain_nr(tmp->subordinate),
1693 tmp->subordinate->number, 0,
1695 else /* this is a legacy PCI bridge */
1696 return domain_context_mapping_one(domain,
1697 pci_domain_nr(tmp->bus),
1703 static int domain_context_mapped(struct pci_dev *pdev)
1706 struct pci_dev *tmp, *parent;
1707 struct intel_iommu *iommu;
1709 iommu = device_to_iommu(pci_domain_nr(pdev->bus), pdev->bus->number,
1714 ret = device_context_mapped(iommu, pdev->bus->number, pdev->devfn);
1717 /* dependent device mapping */
1718 tmp = pci_find_upstream_pcie_bridge(pdev);
1721 /* Secondary interface's bus number and devfn 0 */
1722 parent = pdev->bus->self;
1723 while (parent != tmp) {
1724 ret = device_context_mapped(iommu, parent->bus->number,
1728 parent = parent->bus->self;
1730 if (pci_is_pcie(tmp))
1731 return device_context_mapped(iommu, tmp->subordinate->number,
1734 return device_context_mapped(iommu, tmp->bus->number,
1738 /* Returns a number of VTD pages, but aligned to MM page size */
1739 static inline unsigned long aligned_nrpages(unsigned long host_addr,
1742 host_addr &= ~PAGE_MASK;
1743 return PAGE_ALIGN(host_addr + size) >> VTD_PAGE_SHIFT;
1746 /* Return largest possible superpage level for a given mapping */
1747 static inline int hardware_largepage_caps(struct dmar_domain *domain,
1748 unsigned long iov_pfn,
1749 unsigned long phy_pfn,
1750 unsigned long pages)
1752 int support, level = 1;
1753 unsigned long pfnmerge;
1755 support = domain->iommu_superpage;
1757 /* To use a large page, the virtual *and* physical addresses
1758 must be aligned to 2MiB/1GiB/etc. Lower bits set in either
1759 of them will mean we have to use smaller pages. So just
1760 merge them and check both at once. */
1761 pfnmerge = iov_pfn | phy_pfn;
1763 while (support && !(pfnmerge & ~VTD_STRIDE_MASK)) {
1764 pages >>= VTD_STRIDE_SHIFT;
1767 pfnmerge >>= VTD_STRIDE_SHIFT;
1774 static int __domain_mapping(struct dmar_domain *domain, unsigned long iov_pfn,
1775 struct scatterlist *sg, unsigned long phys_pfn,
1776 unsigned long nr_pages, int prot)
1778 struct dma_pte *first_pte = NULL, *pte = NULL;
1779 phys_addr_t uninitialized_var(pteval);
1780 int addr_width = agaw_to_width(domain->agaw) - VTD_PAGE_SHIFT;
1781 unsigned long sg_res;
1782 unsigned int largepage_lvl = 0;
1783 unsigned long lvl_pages = 0;
1785 BUG_ON(addr_width < BITS_PER_LONG && (iov_pfn + nr_pages - 1) >> addr_width);
1787 if ((prot & (DMA_PTE_READ|DMA_PTE_WRITE)) == 0)
1790 prot &= DMA_PTE_READ | DMA_PTE_WRITE | DMA_PTE_SNP;
1795 sg_res = nr_pages + 1;
1796 pteval = ((phys_addr_t)phys_pfn << VTD_PAGE_SHIFT) | prot;
1799 while (nr_pages > 0) {
1803 sg_res = aligned_nrpages(sg->offset, sg->length);
1804 sg->dma_address = ((dma_addr_t)iov_pfn << VTD_PAGE_SHIFT) + sg->offset;
1805 sg->dma_length = sg->length;
1806 pteval = page_to_phys(sg_page(sg)) | prot;
1807 phys_pfn = pteval >> VTD_PAGE_SHIFT;
1811 largepage_lvl = hardware_largepage_caps(domain, iov_pfn, phys_pfn, sg_res);
1813 first_pte = pte = pfn_to_dma_pte(domain, iov_pfn, largepage_lvl);
1816 /* It is large page*/
1817 if (largepage_lvl > 1)
1818 pteval |= DMA_PTE_LARGE_PAGE;
1820 pteval &= ~(uint64_t)DMA_PTE_LARGE_PAGE;
1823 /* We don't need lock here, nobody else
1824 * touches the iova range
1826 tmp = cmpxchg64_local(&pte->val, 0ULL, pteval);
1828 static int dumps = 5;
1829 printk(KERN_CRIT "ERROR: DMA PTE for vPFN 0x%lx already set (to %llx not %llx)\n",
1830 iov_pfn, tmp, (unsigned long long)pteval);
1833 debug_dma_dump_mappings(NULL);
1838 lvl_pages = lvl_to_nr_pages(largepage_lvl);
1840 BUG_ON(nr_pages < lvl_pages);
1841 BUG_ON(sg_res < lvl_pages);
1843 nr_pages -= lvl_pages;
1844 iov_pfn += lvl_pages;
1845 phys_pfn += lvl_pages;
1846 pteval += lvl_pages * VTD_PAGE_SIZE;
1847 sg_res -= lvl_pages;
1849 /* If the next PTE would be the first in a new page, then we
1850 need to flush the cache on the entries we've just written.
1851 And then we'll need to recalculate 'pte', so clear it and
1852 let it get set again in the if (!pte) block above.
1854 If we're done (!nr_pages) we need to flush the cache too.
1856 Also if we've been setting superpages, we may need to
1857 recalculate 'pte' and switch back to smaller pages for the
1858 end of the mapping, if the trailing size is not enough to
1859 use another superpage (i.e. sg_res < lvl_pages). */
1861 if (!nr_pages || first_pte_in_page(pte) ||
1862 (largepage_lvl > 1 && sg_res < lvl_pages)) {
1863 domain_flush_cache(domain, first_pte,
1864 (void *)pte - (void *)first_pte);
1868 if (!sg_res && nr_pages)
1874 static inline int domain_sg_mapping(struct dmar_domain *domain, unsigned long iov_pfn,
1875 struct scatterlist *sg, unsigned long nr_pages,
1878 return __domain_mapping(domain, iov_pfn, sg, 0, nr_pages, prot);
1881 static inline int domain_pfn_mapping(struct dmar_domain *domain, unsigned long iov_pfn,
1882 unsigned long phys_pfn, unsigned long nr_pages,
1885 return __domain_mapping(domain, iov_pfn, NULL, phys_pfn, nr_pages, prot);
1888 static void iommu_detach_dev(struct intel_iommu *iommu, u8 bus, u8 devfn)
1893 clear_context_table(iommu, bus, devfn);
1894 iommu->flush.flush_context(iommu, 0, 0, 0,
1895 DMA_CCMD_GLOBAL_INVL);
1896 iommu->flush.flush_iotlb(iommu, 0, 0, 0, DMA_TLB_GLOBAL_FLUSH);
1899 static void domain_remove_dev_info(struct dmar_domain *domain)
1901 struct device_domain_info *info;
1902 unsigned long flags;
1903 struct intel_iommu *iommu;
1905 spin_lock_irqsave(&device_domain_lock, flags);
1906 while (!list_empty(&domain->devices)) {
1907 info = list_entry(domain->devices.next,
1908 struct device_domain_info, link);
1909 list_del(&info->link);
1910 list_del(&info->global);
1912 info->dev->dev.archdata.iommu = NULL;
1913 spin_unlock_irqrestore(&device_domain_lock, flags);
1915 iommu_disable_dev_iotlb(info);
1916 iommu = device_to_iommu(info->segment, info->bus, info->devfn);
1917 iommu_detach_dev(iommu, info->bus, info->devfn);
1918 free_devinfo_mem(info);
1920 spin_lock_irqsave(&device_domain_lock, flags);
1922 spin_unlock_irqrestore(&device_domain_lock, flags);
1927 * Note: we use struct pci_dev->dev.archdata.iommu stores the info
1929 static struct dmar_domain *
1930 find_domain(struct pci_dev *pdev)
1932 struct device_domain_info *info;
1934 /* No lock here, assumes no domain exit in normal case */
1935 info = pdev->dev.archdata.iommu;
1937 return info->domain;
1941 /* domain is initialized */
1942 static struct dmar_domain *get_domain_for_dev(struct pci_dev *pdev, int gaw)
1944 struct dmar_domain *domain, *found = NULL;
1945 struct intel_iommu *iommu;
1946 struct dmar_drhd_unit *drhd;
1947 struct device_domain_info *info, *tmp;
1948 struct pci_dev *dev_tmp;
1949 unsigned long flags;
1950 int bus = 0, devfn = 0;
1954 domain = find_domain(pdev);
1958 segment = pci_domain_nr(pdev->bus);
1960 dev_tmp = pci_find_upstream_pcie_bridge(pdev);
1962 if (pci_is_pcie(dev_tmp)) {
1963 bus = dev_tmp->subordinate->number;
1966 bus = dev_tmp->bus->number;
1967 devfn = dev_tmp->devfn;
1969 spin_lock_irqsave(&device_domain_lock, flags);
1970 list_for_each_entry(info, &device_domain_list, global) {
1971 if (info->segment == segment &&
1972 info->bus == bus && info->devfn == devfn) {
1973 found = info->domain;
1977 spin_unlock_irqrestore(&device_domain_lock, flags);
1978 /* pcie-pci bridge already has a domain, uses it */
1985 domain = alloc_domain();
1989 /* Allocate new domain for the device */
1990 drhd = dmar_find_matched_drhd_unit(pdev);
1992 printk(KERN_ERR "IOMMU: can't find DMAR for device %s\n",
1996 iommu = drhd->iommu;
1998 ret = iommu_attach_domain(domain, iommu);
2000 free_domain_mem(domain);
2004 if (domain_init(domain, gaw)) {
2005 domain_exit(domain);
2009 /* register pcie-to-pci device */
2011 info = alloc_devinfo_mem();
2013 domain_exit(domain);
2016 info->segment = segment;
2018 info->devfn = devfn;
2020 info->domain = domain;
2021 /* This domain is shared by devices under p2p bridge */
2022 domain->flags |= DOMAIN_FLAG_P2P_MULTIPLE_DEVICES;
2024 /* pcie-to-pci bridge already has a domain, uses it */
2026 spin_lock_irqsave(&device_domain_lock, flags);
2027 list_for_each_entry(tmp, &device_domain_list, global) {
2028 if (tmp->segment == segment &&
2029 tmp->bus == bus && tmp->devfn == devfn) {
2030 found = tmp->domain;
2035 spin_unlock_irqrestore(&device_domain_lock, flags);
2036 free_devinfo_mem(info);
2037 domain_exit(domain);
2040 list_add(&info->link, &domain->devices);
2041 list_add(&info->global, &device_domain_list);
2042 spin_unlock_irqrestore(&device_domain_lock, flags);
2047 info = alloc_devinfo_mem();
2050 info->segment = segment;
2051 info->bus = pdev->bus->number;
2052 info->devfn = pdev->devfn;
2054 info->domain = domain;
2055 spin_lock_irqsave(&device_domain_lock, flags);
2056 /* somebody is fast */
2057 found = find_domain(pdev);
2058 if (found != NULL) {
2059 spin_unlock_irqrestore(&device_domain_lock, flags);
2060 if (found != domain) {
2061 domain_exit(domain);
2064 free_devinfo_mem(info);
2067 list_add(&info->link, &domain->devices);
2068 list_add(&info->global, &device_domain_list);
2069 pdev->dev.archdata.iommu = info;
2070 spin_unlock_irqrestore(&device_domain_lock, flags);
2073 /* recheck it here, maybe others set it */
2074 return find_domain(pdev);
2077 static int iommu_identity_mapping;
2078 #define IDENTMAP_ALL 1
2079 #define IDENTMAP_GFX 2
2080 #define IDENTMAP_AZALIA 4
2082 static int iommu_domain_identity_map(struct dmar_domain *domain,
2083 unsigned long long start,
2084 unsigned long long end)
2086 unsigned long first_vpfn = start >> VTD_PAGE_SHIFT;
2087 unsigned long last_vpfn = end >> VTD_PAGE_SHIFT;
2089 if (!reserve_iova(&domain->iovad, dma_to_mm_pfn(first_vpfn),
2090 dma_to_mm_pfn(last_vpfn))) {
2091 printk(KERN_ERR "IOMMU: reserve iova failed\n");
2095 pr_debug("Mapping reserved region %llx-%llx for domain %d\n",
2096 start, end, domain->id);
2098 * RMRR range might have overlap with physical memory range,
2101 dma_pte_clear_range(domain, first_vpfn, last_vpfn);
2103 return domain_pfn_mapping(domain, first_vpfn, first_vpfn,
2104 last_vpfn - first_vpfn + 1,
2105 DMA_PTE_READ|DMA_PTE_WRITE);
2108 static int iommu_prepare_identity_map(struct pci_dev *pdev,
2109 unsigned long long start,
2110 unsigned long long end)
2112 struct dmar_domain *domain;
2115 domain = get_domain_for_dev(pdev, DEFAULT_DOMAIN_ADDRESS_WIDTH);
2119 /* For _hardware_ passthrough, don't bother. But for software
2120 passthrough, we do it anyway -- it may indicate a memory
2121 range which is reserved in E820, so which didn't get set
2122 up to start with in si_domain */
2123 if (domain == si_domain && hw_pass_through) {
2124 printk("Ignoring identity map for HW passthrough device %s [0x%Lx - 0x%Lx]\n",
2125 pci_name(pdev), start, end);
2130 "IOMMU: Setting identity map for device %s [0x%Lx - 0x%Lx]\n",
2131 pci_name(pdev), start, end);
2134 WARN(1, "Your BIOS is broken; RMRR ends before it starts!\n"
2135 "BIOS vendor: %s; Ver: %s; Product Version: %s\n",
2136 dmi_get_system_info(DMI_BIOS_VENDOR),
2137 dmi_get_system_info(DMI_BIOS_VERSION),
2138 dmi_get_system_info(DMI_PRODUCT_VERSION));
2143 if (end >> agaw_to_width(domain->agaw)) {
2144 WARN(1, "Your BIOS is broken; RMRR exceeds permitted address width (%d bits)\n"
2145 "BIOS vendor: %s; Ver: %s; Product Version: %s\n",
2146 agaw_to_width(domain->agaw),
2147 dmi_get_system_info(DMI_BIOS_VENDOR),
2148 dmi_get_system_info(DMI_BIOS_VERSION),
2149 dmi_get_system_info(DMI_PRODUCT_VERSION));
2154 ret = iommu_domain_identity_map(domain, start, end);
2158 /* context entry init */
2159 ret = domain_context_mapping(domain, pdev, CONTEXT_TT_MULTI_LEVEL);
2166 domain_exit(domain);
2170 static inline int iommu_prepare_rmrr_dev(struct dmar_rmrr_unit *rmrr,
2171 struct pci_dev *pdev)
2173 if (pdev->dev.archdata.iommu == DUMMY_DEVICE_DOMAIN_INFO)
2175 return iommu_prepare_identity_map(pdev, rmrr->base_address,
2179 #ifdef CONFIG_INTEL_IOMMU_FLOPPY_WA
2180 static inline void iommu_prepare_isa(void)
2182 struct pci_dev *pdev;
2185 pdev = pci_get_class(PCI_CLASS_BRIDGE_ISA << 8, NULL);
2189 printk(KERN_INFO "IOMMU: Prepare 0-16MiB unity mapping for LPC\n");
2190 ret = iommu_prepare_identity_map(pdev, 0, 16*1024*1024 - 1);
2193 printk(KERN_ERR "IOMMU: Failed to create 0-16MiB identity map; "
2194 "floppy might not work\n");
2198 static inline void iommu_prepare_isa(void)
2202 #endif /* !CONFIG_INTEL_IOMMU_FLPY_WA */
2204 static int md_domain_init(struct dmar_domain *domain, int guest_width);
2206 static int __init si_domain_work_fn(unsigned long start_pfn,
2207 unsigned long end_pfn, void *datax)
2211 *ret = iommu_domain_identity_map(si_domain,
2212 (uint64_t)start_pfn << PAGE_SHIFT,
2213 (uint64_t)end_pfn << PAGE_SHIFT);
2218 static int __init si_domain_init(int hw)
2220 struct dmar_drhd_unit *drhd;
2221 struct intel_iommu *iommu;
2224 si_domain = alloc_domain();
2228 pr_debug("Identity mapping domain is domain %d\n", si_domain->id);
2230 for_each_active_iommu(iommu, drhd) {
2231 ret = iommu_attach_domain(si_domain, iommu);
2233 domain_exit(si_domain);
2238 if (md_domain_init(si_domain, DEFAULT_DOMAIN_ADDRESS_WIDTH)) {
2239 domain_exit(si_domain);
2243 si_domain->flags = DOMAIN_FLAG_STATIC_IDENTITY;
2248 for_each_online_node(nid) {
2249 work_with_active_regions(nid, si_domain_work_fn, &ret);
2257 static void domain_remove_one_dev_info(struct dmar_domain *domain,
2258 struct pci_dev *pdev);
2259 static int identity_mapping(struct pci_dev *pdev)
2261 struct device_domain_info *info;
2263 if (likely(!iommu_identity_mapping))
2266 info = pdev->dev.archdata.iommu;
2267 if (info && info != DUMMY_DEVICE_DOMAIN_INFO)
2268 return (info->domain == si_domain);
2273 static int domain_add_dev_info(struct dmar_domain *domain,
2274 struct pci_dev *pdev,
2277 struct device_domain_info *info;
2278 unsigned long flags;
2281 info = alloc_devinfo_mem();
2285 ret = domain_context_mapping(domain, pdev, translation);
2287 free_devinfo_mem(info);
2291 info->segment = pci_domain_nr(pdev->bus);
2292 info->bus = pdev->bus->number;
2293 info->devfn = pdev->devfn;
2295 info->domain = domain;
2297 spin_lock_irqsave(&device_domain_lock, flags);
2298 list_add(&info->link, &domain->devices);
2299 list_add(&info->global, &device_domain_list);
2300 pdev->dev.archdata.iommu = info;
2301 spin_unlock_irqrestore(&device_domain_lock, flags);
2306 static int iommu_should_identity_map(struct pci_dev *pdev, int startup)
2308 if ((iommu_identity_mapping & IDENTMAP_AZALIA) && IS_AZALIA(pdev))
2311 if ((iommu_identity_mapping & IDENTMAP_GFX) && IS_GFX_DEVICE(pdev))
2314 if (!(iommu_identity_mapping & IDENTMAP_ALL))
2318 * We want to start off with all devices in the 1:1 domain, and
2319 * take them out later if we find they can't access all of memory.
2321 * However, we can't do this for PCI devices behind bridges,
2322 * because all PCI devices behind the same bridge will end up
2323 * with the same source-id on their transactions.
2325 * Practically speaking, we can't change things around for these
2326 * devices at run-time, because we can't be sure there'll be no
2327 * DMA transactions in flight for any of their siblings.
2329 * So PCI devices (unless they're on the root bus) as well as
2330 * their parent PCI-PCI or PCIe-PCI bridges must be left _out_ of
2331 * the 1:1 domain, just in _case_ one of their siblings turns out
2332 * not to be able to map all of memory.
2334 if (!pci_is_pcie(pdev)) {
2335 if (!pci_is_root_bus(pdev->bus))
2337 if (pdev->class >> 8 == PCI_CLASS_BRIDGE_PCI)
2339 } else if (pdev->pcie_type == PCI_EXP_TYPE_PCI_BRIDGE)
2343 * At boot time, we don't yet know if devices will be 64-bit capable.
2344 * Assume that they will -- if they turn out not to be, then we can
2345 * take them out of the 1:1 domain later.
2349 * If the device's dma_mask is less than the system's memory
2350 * size then this is not a candidate for identity mapping.
2352 u64 dma_mask = pdev->dma_mask;
2354 if (pdev->dev.coherent_dma_mask &&
2355 pdev->dev.coherent_dma_mask < dma_mask)
2356 dma_mask = pdev->dev.coherent_dma_mask;
2358 return dma_mask >= dma_get_required_mask(&pdev->dev);
2364 static int __init iommu_prepare_static_identity_mapping(int hw)
2366 struct pci_dev *pdev = NULL;
2369 ret = si_domain_init(hw);
2373 for_each_pci_dev(pdev) {
2374 /* Skip Host/PCI Bridge devices */
2375 if (IS_BRIDGE_HOST_DEVICE(pdev))
2377 if (iommu_should_identity_map(pdev, 1)) {
2378 printk(KERN_INFO "IOMMU: %s identity mapping for device %s\n",
2379 hw ? "hardware" : "software", pci_name(pdev));
2381 ret = domain_add_dev_info(si_domain, pdev,
2382 hw ? CONTEXT_TT_PASS_THROUGH :
2383 CONTEXT_TT_MULTI_LEVEL);
2392 static int __init init_dmars(void)
2394 struct dmar_drhd_unit *drhd;
2395 struct dmar_rmrr_unit *rmrr;
2396 struct pci_dev *pdev;
2397 struct intel_iommu *iommu;
2403 * initialize and program root entry to not present
2406 for_each_drhd_unit(drhd) {
2409 * lock not needed as this is only incremented in the single
2410 * threaded kernel __init code path all other access are read
2415 g_iommus = kcalloc(g_num_of_iommus, sizeof(struct intel_iommu *),
2418 printk(KERN_ERR "Allocating global iommu array failed\n");
2423 deferred_flush = kzalloc(g_num_of_iommus *
2424 sizeof(struct deferred_flush_tables), GFP_KERNEL);
2425 if (!deferred_flush) {
2430 for_each_drhd_unit(drhd) {
2434 iommu = drhd->iommu;
2435 g_iommus[iommu->seq_id] = iommu;
2437 ret = iommu_init_domains(iommu);
2443 * we could share the same root & context tables
2444 * among all IOMMU's. Need to Split it later.
2446 ret = iommu_alloc_root_entry(iommu);
2448 printk(KERN_ERR "IOMMU: allocate root entry failed\n");
2451 if (!ecap_pass_through(iommu->ecap))
2452 hw_pass_through = 0;
2456 * Start from the sane iommu hardware state.
2458 for_each_drhd_unit(drhd) {
2462 iommu = drhd->iommu;
2465 * If the queued invalidation is already initialized by us
2466 * (for example, while enabling interrupt-remapping) then
2467 * we got the things already rolling from a sane state.
2473 * Clear any previous faults.
2475 dmar_fault(-1, iommu);
2477 * Disable queued invalidation if supported and already enabled
2478 * before OS handover.
2480 dmar_disable_qi(iommu);
2483 for_each_drhd_unit(drhd) {
2487 iommu = drhd->iommu;
2489 if (dmar_enable_qi(iommu)) {
2491 * Queued Invalidate not enabled, use Register Based
2494 iommu->flush.flush_context = __iommu_flush_context;
2495 iommu->flush.flush_iotlb = __iommu_flush_iotlb;
2496 printk(KERN_INFO "IOMMU %d 0x%Lx: using Register based "
2499 (unsigned long long)drhd->reg_base_addr);
2501 iommu->flush.flush_context = qi_flush_context;
2502 iommu->flush.flush_iotlb = qi_flush_iotlb;
2503 printk(KERN_INFO "IOMMU %d 0x%Lx: using Queued "
2506 (unsigned long long)drhd->reg_base_addr);
2510 if (iommu_pass_through)
2511 iommu_identity_mapping |= IDENTMAP_ALL;
2513 #ifdef CONFIG_INTEL_IOMMU_BROKEN_GFX_WA
2514 iommu_identity_mapping |= IDENTMAP_GFX;
2517 check_tylersburg_isoch();
2520 * If pass through is not set or not enabled, setup context entries for
2521 * identity mappings for rmrr, gfx, and isa and may fall back to static
2522 * identity mapping if iommu_identity_mapping is set.
2524 if (iommu_identity_mapping) {
2525 ret = iommu_prepare_static_identity_mapping(hw_pass_through);
2527 printk(KERN_CRIT "Failed to setup IOMMU pass-through\n");
2533 * for each dev attached to rmrr
2535 * locate drhd for dev, alloc domain for dev
2536 * allocate free domain
2537 * allocate page table entries for rmrr
2538 * if context not allocated for bus
2539 * allocate and init context
2540 * set present in root table for this bus
2541 * init context with domain, translation etc
2545 printk(KERN_INFO "IOMMU: Setting RMRR:\n");
2546 for_each_rmrr_units(rmrr) {
2547 for (i = 0; i < rmrr->devices_cnt; i++) {
2548 pdev = rmrr->devices[i];
2550 * some BIOS lists non-exist devices in DMAR
2555 ret = iommu_prepare_rmrr_dev(rmrr, pdev);
2558 "IOMMU: mapping reserved region failed\n");
2562 iommu_prepare_isa();
2567 * global invalidate context cache
2568 * global invalidate iotlb
2569 * enable translation
2571 for_each_drhd_unit(drhd) {
2572 if (drhd->ignored) {
2574 * we always have to disable PMRs or DMA may fail on
2578 iommu_disable_protect_mem_regions(drhd->iommu);
2581 iommu = drhd->iommu;
2583 iommu_flush_write_buffer(iommu);
2585 ret = dmar_set_interrupt(iommu);
2589 iommu_set_root_entry(iommu);
2591 iommu->flush.flush_context(iommu, 0, 0, 0, DMA_CCMD_GLOBAL_INVL);
2592 iommu->flush.flush_iotlb(iommu, 0, 0, 0, DMA_TLB_GLOBAL_FLUSH);
2594 ret = iommu_enable_translation(iommu);
2598 iommu_disable_protect_mem_regions(iommu);
2603 for_each_drhd_unit(drhd) {
2606 iommu = drhd->iommu;
2613 /* This takes a number of _MM_ pages, not VTD pages */
2614 static struct iova *intel_alloc_iova(struct device *dev,
2615 struct dmar_domain *domain,
2616 unsigned long nrpages, uint64_t dma_mask)
2618 struct pci_dev *pdev = to_pci_dev(dev);
2619 struct iova *iova = NULL;
2621 /* Restrict dma_mask to the width that the iommu can handle */
2622 dma_mask = min_t(uint64_t, DOMAIN_MAX_ADDR(domain->gaw), dma_mask);
2624 if (!dmar_forcedac && dma_mask > DMA_BIT_MASK(32)) {
2626 * First try to allocate an io virtual address in
2627 * DMA_BIT_MASK(32) and if that fails then try allocating
2630 iova = alloc_iova(&domain->iovad, nrpages,
2631 IOVA_PFN(DMA_BIT_MASK(32)), 1);
2635 iova = alloc_iova(&domain->iovad, nrpages, IOVA_PFN(dma_mask), 1);
2636 if (unlikely(!iova)) {
2637 printk(KERN_ERR "Allocating %ld-page iova for %s failed",
2638 nrpages, pci_name(pdev));
2645 static struct dmar_domain *__get_valid_domain_for_dev(struct pci_dev *pdev)
2647 struct dmar_domain *domain;
2650 domain = get_domain_for_dev(pdev,
2651 DEFAULT_DOMAIN_ADDRESS_WIDTH);
2654 "Allocating domain for %s failed", pci_name(pdev));
2658 /* make sure context mapping is ok */
2659 if (unlikely(!domain_context_mapped(pdev))) {
2660 ret = domain_context_mapping(domain, pdev,
2661 CONTEXT_TT_MULTI_LEVEL);
2664 "Domain context map for %s failed",
2673 static inline struct dmar_domain *get_valid_domain_for_dev(struct pci_dev *dev)
2675 struct device_domain_info *info;
2677 /* No lock here, assumes no domain exit in normal case */
2678 info = dev->dev.archdata.iommu;
2680 return info->domain;
2682 return __get_valid_domain_for_dev(dev);
2685 static int iommu_dummy(struct pci_dev *pdev)
2687 return pdev->dev.archdata.iommu == DUMMY_DEVICE_DOMAIN_INFO;
2690 /* Check if the pdev needs to go through non-identity map and unmap process.*/
2691 static int iommu_no_mapping(struct device *dev)
2693 struct pci_dev *pdev;
2696 if (unlikely(dev->bus != &pci_bus_type))
2699 pdev = to_pci_dev(dev);
2700 if (iommu_dummy(pdev))
2703 if (!iommu_identity_mapping)
2706 found = identity_mapping(pdev);
2708 if (iommu_should_identity_map(pdev, 0))
2712 * 32 bit DMA is removed from si_domain and fall back
2713 * to non-identity mapping.
2715 domain_remove_one_dev_info(si_domain, pdev);
2716 printk(KERN_INFO "32bit %s uses non-identity mapping\n",
2722 * In case of a detached 64 bit DMA device from vm, the device
2723 * is put into si_domain for identity mapping.
2725 if (iommu_should_identity_map(pdev, 0)) {
2727 ret = domain_add_dev_info(si_domain, pdev,
2729 CONTEXT_TT_PASS_THROUGH :
2730 CONTEXT_TT_MULTI_LEVEL);
2732 printk(KERN_INFO "64bit %s uses identity mapping\n",
2742 static dma_addr_t __intel_map_single(struct device *hwdev, phys_addr_t paddr,
2743 size_t size, int dir, u64 dma_mask)
2745 struct pci_dev *pdev = to_pci_dev(hwdev);
2746 struct dmar_domain *domain;
2747 phys_addr_t start_paddr;
2751 struct intel_iommu *iommu;
2752 unsigned long paddr_pfn = paddr >> PAGE_SHIFT;
2754 BUG_ON(dir == DMA_NONE);
2756 if (iommu_no_mapping(hwdev))
2759 domain = get_valid_domain_for_dev(pdev);
2763 iommu = domain_get_iommu(domain);
2764 size = aligned_nrpages(paddr, size);
2766 iova = intel_alloc_iova(hwdev, domain, dma_to_mm_pfn(size), dma_mask);
2771 * Check if DMAR supports zero-length reads on write only
2774 if (dir == DMA_TO_DEVICE || dir == DMA_BIDIRECTIONAL || \
2775 !cap_zlr(iommu->cap))
2776 prot |= DMA_PTE_READ;
2777 if (dir == DMA_FROM_DEVICE || dir == DMA_BIDIRECTIONAL)
2778 prot |= DMA_PTE_WRITE;
2780 * paddr - (paddr + size) might be partial page, we should map the whole
2781 * page. Note: if two part of one page are separately mapped, we
2782 * might have two guest_addr mapping to the same host paddr, but this
2783 * is not a big problem
2785 ret = domain_pfn_mapping(domain, mm_to_dma_pfn(iova->pfn_lo),
2786 mm_to_dma_pfn(paddr_pfn), size, prot);
2790 /* it's a non-present to present mapping. Only flush if caching mode */
2791 if (cap_caching_mode(iommu->cap))
2792 iommu_flush_iotlb_psi(iommu, domain->id, mm_to_dma_pfn(iova->pfn_lo), size, 1);
2794 iommu_flush_write_buffer(iommu);
2796 start_paddr = (phys_addr_t)iova->pfn_lo << PAGE_SHIFT;
2797 start_paddr += paddr & ~PAGE_MASK;
2802 __free_iova(&domain->iovad, iova);
2803 printk(KERN_ERR"Device %s request: %zx@%llx dir %d --- failed\n",
2804 pci_name(pdev), size, (unsigned long long)paddr, dir);
2808 static dma_addr_t intel_map_page(struct device *dev, struct page *page,
2809 unsigned long offset, size_t size,
2810 enum dma_data_direction dir,
2811 struct dma_attrs *attrs)
2813 return __intel_map_single(dev, page_to_phys(page) + offset, size,
2814 dir, to_pci_dev(dev)->dma_mask);
2817 static void flush_unmaps(void)
2823 /* just flush them all */
2824 for (i = 0; i < g_num_of_iommus; i++) {
2825 struct intel_iommu *iommu = g_iommus[i];
2829 if (!deferred_flush[i].next)
2832 /* In caching mode, global flushes turn emulation expensive */
2833 if (!cap_caching_mode(iommu->cap))
2834 iommu->flush.flush_iotlb(iommu, 0, 0, 0,
2835 DMA_TLB_GLOBAL_FLUSH);
2836 for (j = 0; j < deferred_flush[i].next; j++) {
2838 struct iova *iova = deferred_flush[i].iova[j];
2839 struct dmar_domain *domain = deferred_flush[i].domain[j];
2841 /* On real hardware multiple invalidations are expensive */
2842 if (cap_caching_mode(iommu->cap))
2843 iommu_flush_iotlb_psi(iommu, domain->id,
2844 iova->pfn_lo, iova->pfn_hi - iova->pfn_lo + 1, 0);
2846 mask = ilog2(mm_to_dma_pfn(iova->pfn_hi - iova->pfn_lo + 1));
2847 iommu_flush_dev_iotlb(deferred_flush[i].domain[j],
2848 (uint64_t)iova->pfn_lo << PAGE_SHIFT, mask);
2850 __free_iova(&deferred_flush[i].domain[j]->iovad, iova);
2852 deferred_flush[i].next = 0;
2858 static void flush_unmaps_timeout(unsigned long data)
2860 unsigned long flags;
2862 spin_lock_irqsave(&async_umap_flush_lock, flags);
2864 spin_unlock_irqrestore(&async_umap_flush_lock, flags);
2867 static void add_unmap(struct dmar_domain *dom, struct iova *iova)
2869 unsigned long flags;
2871 struct intel_iommu *iommu;
2873 spin_lock_irqsave(&async_umap_flush_lock, flags);
2874 if (list_size == HIGH_WATER_MARK)
2877 iommu = domain_get_iommu(dom);
2878 iommu_id = iommu->seq_id;
2880 next = deferred_flush[iommu_id].next;
2881 deferred_flush[iommu_id].domain[next] = dom;
2882 deferred_flush[iommu_id].iova[next] = iova;
2883 deferred_flush[iommu_id].next++;
2886 mod_timer(&unmap_timer, jiffies + msecs_to_jiffies(10));
2890 spin_unlock_irqrestore(&async_umap_flush_lock, flags);
2893 static void intel_unmap_page(struct device *dev, dma_addr_t dev_addr,
2894 size_t size, enum dma_data_direction dir,
2895 struct dma_attrs *attrs)
2897 struct pci_dev *pdev = to_pci_dev(dev);
2898 struct dmar_domain *domain;
2899 unsigned long start_pfn, last_pfn;
2901 struct intel_iommu *iommu;
2903 if (iommu_no_mapping(dev))
2906 domain = find_domain(pdev);
2909 iommu = domain_get_iommu(domain);
2911 iova = find_iova(&domain->iovad, IOVA_PFN(dev_addr));
2912 if (WARN_ONCE(!iova, "Driver unmaps unmatched page at PFN %llx\n",
2913 (unsigned long long)dev_addr))
2916 start_pfn = mm_to_dma_pfn(iova->pfn_lo);
2917 last_pfn = mm_to_dma_pfn(iova->pfn_hi + 1) - 1;
2919 pr_debug("Device %s unmapping: pfn %lx-%lx\n",
2920 pci_name(pdev), start_pfn, last_pfn);
2922 /* clear the whole page */
2923 dma_pte_clear_range(domain, start_pfn, last_pfn);
2925 /* free page tables */
2926 dma_pte_free_pagetable(domain, start_pfn, last_pfn);
2928 if (intel_iommu_strict) {
2929 iommu_flush_iotlb_psi(iommu, domain->id, start_pfn,
2930 last_pfn - start_pfn + 1, 0);
2932 __free_iova(&domain->iovad, iova);
2934 add_unmap(domain, iova);
2936 * queue up the release of the unmap to save the 1/6th of the
2937 * cpu used up by the iotlb flush operation...
2942 static void *intel_alloc_coherent(struct device *hwdev, size_t size,
2943 dma_addr_t *dma_handle, gfp_t flags)
2948 size = PAGE_ALIGN(size);
2949 order = get_order(size);
2951 if (!iommu_no_mapping(hwdev))
2952 flags &= ~(GFP_DMA | GFP_DMA32);
2953 else if (hwdev->coherent_dma_mask < dma_get_required_mask(hwdev)) {
2954 if (hwdev->coherent_dma_mask < DMA_BIT_MASK(32))
2960 vaddr = (void *)__get_free_pages(flags, order);
2963 memset(vaddr, 0, size);
2965 *dma_handle = __intel_map_single(hwdev, virt_to_bus(vaddr), size,
2967 hwdev->coherent_dma_mask);
2970 free_pages((unsigned long)vaddr, order);
2974 static void intel_free_coherent(struct device *hwdev, size_t size, void *vaddr,
2975 dma_addr_t dma_handle)
2979 size = PAGE_ALIGN(size);
2980 order = get_order(size);
2982 intel_unmap_page(hwdev, dma_handle, size, DMA_BIDIRECTIONAL, NULL);
2983 free_pages((unsigned long)vaddr, order);
2986 static void intel_unmap_sg(struct device *hwdev, struct scatterlist *sglist,
2987 int nelems, enum dma_data_direction dir,
2988 struct dma_attrs *attrs)
2990 struct pci_dev *pdev = to_pci_dev(hwdev);
2991 struct dmar_domain *domain;
2992 unsigned long start_pfn, last_pfn;
2994 struct intel_iommu *iommu;
2996 if (iommu_no_mapping(hwdev))
2999 domain = find_domain(pdev);
3002 iommu = domain_get_iommu(domain);
3004 iova = find_iova(&domain->iovad, IOVA_PFN(sglist[0].dma_address));
3005 if (WARN_ONCE(!iova, "Driver unmaps unmatched sglist at PFN %llx\n",
3006 (unsigned long long)sglist[0].dma_address))
3009 start_pfn = mm_to_dma_pfn(iova->pfn_lo);
3010 last_pfn = mm_to_dma_pfn(iova->pfn_hi + 1) - 1;
3012 /* clear the whole page */
3013 dma_pte_clear_range(domain, start_pfn, last_pfn);
3015 /* free page tables */
3016 dma_pte_free_pagetable(domain, start_pfn, last_pfn);
3018 if (intel_iommu_strict) {
3019 iommu_flush_iotlb_psi(iommu, domain->id, start_pfn,
3020 last_pfn - start_pfn + 1, 0);
3022 __free_iova(&domain->iovad, iova);
3024 add_unmap(domain, iova);
3026 * queue up the release of the unmap to save the 1/6th of the
3027 * cpu used up by the iotlb flush operation...
3032 static int intel_nontranslate_map_sg(struct device *hddev,
3033 struct scatterlist *sglist, int nelems, int dir)
3036 struct scatterlist *sg;
3038 for_each_sg(sglist, sg, nelems, i) {
3039 BUG_ON(!sg_page(sg));
3040 sg->dma_address = page_to_phys(sg_page(sg)) + sg->offset;
3041 sg->dma_length = sg->length;
3046 static int intel_map_sg(struct device *hwdev, struct scatterlist *sglist, int nelems,
3047 enum dma_data_direction dir, struct dma_attrs *attrs)
3050 struct pci_dev *pdev = to_pci_dev(hwdev);
3051 struct dmar_domain *domain;
3054 struct iova *iova = NULL;
3056 struct scatterlist *sg;
3057 unsigned long start_vpfn;
3058 struct intel_iommu *iommu;
3060 BUG_ON(dir == DMA_NONE);
3061 if (iommu_no_mapping(hwdev))
3062 return intel_nontranslate_map_sg(hwdev, sglist, nelems, dir);
3064 domain = get_valid_domain_for_dev(pdev);
3068 iommu = domain_get_iommu(domain);
3070 for_each_sg(sglist, sg, nelems, i)
3071 size += aligned_nrpages(sg->offset, sg->length);
3073 iova = intel_alloc_iova(hwdev, domain, dma_to_mm_pfn(size),
3076 sglist->dma_length = 0;
3081 * Check if DMAR supports zero-length reads on write only
3084 if (dir == DMA_TO_DEVICE || dir == DMA_BIDIRECTIONAL || \
3085 !cap_zlr(iommu->cap))
3086 prot |= DMA_PTE_READ;
3087 if (dir == DMA_FROM_DEVICE || dir == DMA_BIDIRECTIONAL)
3088 prot |= DMA_PTE_WRITE;
3090 start_vpfn = mm_to_dma_pfn(iova->pfn_lo);
3092 ret = domain_sg_mapping(domain, start_vpfn, sglist, size, prot);
3093 if (unlikely(ret)) {
3094 /* clear the page */
3095 dma_pte_clear_range(domain, start_vpfn,
3096 start_vpfn + size - 1);
3097 /* free page tables */
3098 dma_pte_free_pagetable(domain, start_vpfn,
3099 start_vpfn + size - 1);
3101 __free_iova(&domain->iovad, iova);
3105 /* it's a non-present to present mapping. Only flush if caching mode */
3106 if (cap_caching_mode(iommu->cap))
3107 iommu_flush_iotlb_psi(iommu, domain->id, start_vpfn, size, 1);
3109 iommu_flush_write_buffer(iommu);
3114 static int intel_mapping_error(struct device *dev, dma_addr_t dma_addr)
3119 struct dma_map_ops intel_dma_ops = {
3120 .alloc_coherent = intel_alloc_coherent,
3121 .free_coherent = intel_free_coherent,
3122 .map_sg = intel_map_sg,
3123 .unmap_sg = intel_unmap_sg,
3124 .map_page = intel_map_page,
3125 .unmap_page = intel_unmap_page,
3126 .mapping_error = intel_mapping_error,
3129 static inline int iommu_domain_cache_init(void)
3133 iommu_domain_cache = kmem_cache_create("iommu_domain",
3134 sizeof(struct dmar_domain),
3139 if (!iommu_domain_cache) {
3140 printk(KERN_ERR "Couldn't create iommu_domain cache\n");
3147 static inline int iommu_devinfo_cache_init(void)
3151 iommu_devinfo_cache = kmem_cache_create("iommu_devinfo",
3152 sizeof(struct device_domain_info),
3156 if (!iommu_devinfo_cache) {
3157 printk(KERN_ERR "Couldn't create devinfo cache\n");
3164 static inline int iommu_iova_cache_init(void)
3168 iommu_iova_cache = kmem_cache_create("iommu_iova",
3169 sizeof(struct iova),
3173 if (!iommu_iova_cache) {
3174 printk(KERN_ERR "Couldn't create iova cache\n");
3181 static int __init iommu_init_mempool(void)
3184 ret = iommu_iova_cache_init();
3188 ret = iommu_domain_cache_init();
3192 ret = iommu_devinfo_cache_init();
3196 kmem_cache_destroy(iommu_domain_cache);
3198 kmem_cache_destroy(iommu_iova_cache);
3203 static void __init iommu_exit_mempool(void)
3205 kmem_cache_destroy(iommu_devinfo_cache);
3206 kmem_cache_destroy(iommu_domain_cache);
3207 kmem_cache_destroy(iommu_iova_cache);
3211 static void quirk_ioat_snb_local_iommu(struct pci_dev *pdev)
3213 struct dmar_drhd_unit *drhd;
3217 /* We know that this device on this chipset has its own IOMMU.
3218 * If we find it under a different IOMMU, then the BIOS is lying
3219 * to us. Hope that the IOMMU for this device is actually
3220 * disabled, and it needs no translation...
3222 rc = pci_bus_read_config_dword(pdev->bus, PCI_DEVFN(0, 0), 0xb0, &vtbar);
3224 /* "can't" happen */
3225 dev_info(&pdev->dev, "failed to run vt-d quirk\n");
3228 vtbar &= 0xffff0000;
3230 /* we know that the this iommu should be at offset 0xa000 from vtbar */
3231 drhd = dmar_find_matched_drhd_unit(pdev);
3232 if (WARN_TAINT_ONCE(!drhd || drhd->reg_base_addr - vtbar != 0xa000,
3233 TAINT_FIRMWARE_WORKAROUND,
3234 "BIOS assigned incorrect VT-d unit for Intel(R) QuickData Technology device\n"))
3235 pdev->dev.archdata.iommu = DUMMY_DEVICE_DOMAIN_INFO;
3237 DECLARE_PCI_FIXUP_ENABLE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_IOAT_SNB, quirk_ioat_snb_local_iommu);
3239 static void __init init_no_remapping_devices(void)
3241 struct dmar_drhd_unit *drhd;
3243 for_each_drhd_unit(drhd) {
3244 if (!drhd->include_all) {
3246 for (i = 0; i < drhd->devices_cnt; i++)
3247 if (drhd->devices[i] != NULL)
3249 /* ignore DMAR unit if no pci devices exist */
3250 if (i == drhd->devices_cnt)
3255 for_each_drhd_unit(drhd) {
3257 if (drhd->ignored || drhd->include_all)
3260 for (i = 0; i < drhd->devices_cnt; i++)
3261 if (drhd->devices[i] &&
3262 !IS_GFX_DEVICE(drhd->devices[i]))
3265 if (i < drhd->devices_cnt)
3268 /* This IOMMU has *only* gfx devices. Either bypass it or
3269 set the gfx_mapped flag, as appropriate */
3271 intel_iommu_gfx_mapped = 1;
3274 for (i = 0; i < drhd->devices_cnt; i++) {
3275 if (!drhd->devices[i])
3277 drhd->devices[i]->dev.archdata.iommu = DUMMY_DEVICE_DOMAIN_INFO;
3283 #ifdef CONFIG_SUSPEND
3284 static int init_iommu_hw(void)
3286 struct dmar_drhd_unit *drhd;
3287 struct intel_iommu *iommu = NULL;
3289 for_each_active_iommu(iommu, drhd)
3291 dmar_reenable_qi(iommu);
3293 for_each_iommu(iommu, drhd) {
3294 if (drhd->ignored) {
3296 * we always have to disable PMRs or DMA may fail on
3300 iommu_disable_protect_mem_regions(iommu);
3304 iommu_flush_write_buffer(iommu);
3306 iommu_set_root_entry(iommu);
3308 iommu->flush.flush_context(iommu, 0, 0, 0,
3309 DMA_CCMD_GLOBAL_INVL);
3310 iommu->flush.flush_iotlb(iommu, 0, 0, 0,
3311 DMA_TLB_GLOBAL_FLUSH);
3312 if (iommu_enable_translation(iommu))
3314 iommu_disable_protect_mem_regions(iommu);
3320 static void iommu_flush_all(void)
3322 struct dmar_drhd_unit *drhd;
3323 struct intel_iommu *iommu;
3325 for_each_active_iommu(iommu, drhd) {
3326 iommu->flush.flush_context(iommu, 0, 0, 0,
3327 DMA_CCMD_GLOBAL_INVL);
3328 iommu->flush.flush_iotlb(iommu, 0, 0, 0,
3329 DMA_TLB_GLOBAL_FLUSH);
3333 static int iommu_suspend(void)
3335 struct dmar_drhd_unit *drhd;
3336 struct intel_iommu *iommu = NULL;
3339 for_each_active_iommu(iommu, drhd) {
3340 iommu->iommu_state = kzalloc(sizeof(u32) * MAX_SR_DMAR_REGS,
3342 if (!iommu->iommu_state)
3348 for_each_active_iommu(iommu, drhd) {
3349 iommu_disable_translation(iommu);
3351 raw_spin_lock_irqsave(&iommu->register_lock, flag);
3353 iommu->iommu_state[SR_DMAR_FECTL_REG] =
3354 readl(iommu->reg + DMAR_FECTL_REG);
3355 iommu->iommu_state[SR_DMAR_FEDATA_REG] =
3356 readl(iommu->reg + DMAR_FEDATA_REG);
3357 iommu->iommu_state[SR_DMAR_FEADDR_REG] =
3358 readl(iommu->reg + DMAR_FEADDR_REG);
3359 iommu->iommu_state[SR_DMAR_FEUADDR_REG] =
3360 readl(iommu->reg + DMAR_FEUADDR_REG);
3362 raw_spin_unlock_irqrestore(&iommu->register_lock, flag);
3367 for_each_active_iommu(iommu, drhd)
3368 kfree(iommu->iommu_state);
3373 static void iommu_resume(void)
3375 struct dmar_drhd_unit *drhd;
3376 struct intel_iommu *iommu = NULL;
3379 if (init_iommu_hw()) {
3381 panic("tboot: IOMMU setup failed, DMAR can not resume!\n");
3383 WARN(1, "IOMMU setup failed, DMAR can not resume!\n");
3387 for_each_active_iommu(iommu, drhd) {
3389 raw_spin_lock_irqsave(&iommu->register_lock, flag);
3391 writel(iommu->iommu_state[SR_DMAR_FECTL_REG],
3392 iommu->reg + DMAR_FECTL_REG);
3393 writel(iommu->iommu_state[SR_DMAR_FEDATA_REG],
3394 iommu->reg + DMAR_FEDATA_REG);
3395 writel(iommu->iommu_state[SR_DMAR_FEADDR_REG],
3396 iommu->reg + DMAR_FEADDR_REG);
3397 writel(iommu->iommu_state[SR_DMAR_FEUADDR_REG],
3398 iommu->reg + DMAR_FEUADDR_REG);
3400 raw_spin_unlock_irqrestore(&iommu->register_lock, flag);
3403 for_each_active_iommu(iommu, drhd)
3404 kfree(iommu->iommu_state);
3407 static struct syscore_ops iommu_syscore_ops = {
3408 .resume = iommu_resume,
3409 .suspend = iommu_suspend,
3412 static void __init init_iommu_pm_ops(void)
3414 register_syscore_ops(&iommu_syscore_ops);
3418 static inline void init_iommu_pm_ops(void) {}
3419 #endif /* CONFIG_PM */
3421 LIST_HEAD(dmar_rmrr_units);
3423 static void __init dmar_register_rmrr_unit(struct dmar_rmrr_unit *rmrr)
3425 list_add(&rmrr->list, &dmar_rmrr_units);
3429 int __init dmar_parse_one_rmrr(struct acpi_dmar_header *header)
3431 struct acpi_dmar_reserved_memory *rmrr;
3432 struct dmar_rmrr_unit *rmrru;
3434 rmrru = kzalloc(sizeof(*rmrru), GFP_KERNEL);
3438 rmrru->hdr = header;
3439 rmrr = (struct acpi_dmar_reserved_memory *)header;
3440 rmrru->base_address = rmrr->base_address;
3441 rmrru->end_address = rmrr->end_address;
3443 dmar_register_rmrr_unit(rmrru);
3448 rmrr_parse_dev(struct dmar_rmrr_unit *rmrru)
3450 struct acpi_dmar_reserved_memory *rmrr;
3453 rmrr = (struct acpi_dmar_reserved_memory *) rmrru->hdr;
3454 ret = dmar_parse_dev_scope((void *)(rmrr + 1),
3455 ((void *)rmrr) + rmrr->header.length,
3456 &rmrru->devices_cnt, &rmrru->devices, rmrr->segment);
3458 if (ret || (rmrru->devices_cnt == 0)) {
3459 list_del(&rmrru->list);
3465 static LIST_HEAD(dmar_atsr_units);
3467 int __init dmar_parse_one_atsr(struct acpi_dmar_header *hdr)
3469 struct acpi_dmar_atsr *atsr;
3470 struct dmar_atsr_unit *atsru;
3472 atsr = container_of(hdr, struct acpi_dmar_atsr, header);
3473 atsru = kzalloc(sizeof(*atsru), GFP_KERNEL);
3478 atsru->include_all = atsr->flags & 0x1;
3480 list_add(&atsru->list, &dmar_atsr_units);
3485 static int __init atsr_parse_dev(struct dmar_atsr_unit *atsru)
3488 struct acpi_dmar_atsr *atsr;
3490 if (atsru->include_all)
3493 atsr = container_of(atsru->hdr, struct acpi_dmar_atsr, header);
3494 rc = dmar_parse_dev_scope((void *)(atsr + 1),
3495 (void *)atsr + atsr->header.length,
3496 &atsru->devices_cnt, &atsru->devices,
3498 if (rc || !atsru->devices_cnt) {
3499 list_del(&atsru->list);
3506 int dmar_find_matched_atsr_unit(struct pci_dev *dev)
3509 struct pci_bus *bus;
3510 struct acpi_dmar_atsr *atsr;
3511 struct dmar_atsr_unit *atsru;
3513 dev = pci_physfn(dev);
3515 list_for_each_entry(atsru, &dmar_atsr_units, list) {
3516 atsr = container_of(atsru->hdr, struct acpi_dmar_atsr, header);
3517 if (atsr->segment == pci_domain_nr(dev->bus))
3524 for (bus = dev->bus; bus; bus = bus->parent) {
3525 struct pci_dev *bridge = bus->self;
3527 if (!bridge || !pci_is_pcie(bridge) ||
3528 bridge->pcie_type == PCI_EXP_TYPE_PCI_BRIDGE)
3531 if (bridge->pcie_type == PCI_EXP_TYPE_ROOT_PORT) {
3532 for (i = 0; i < atsru->devices_cnt; i++)
3533 if (atsru->devices[i] == bridge)
3539 if (atsru->include_all)
3545 int __init dmar_parse_rmrr_atsr_dev(void)
3547 struct dmar_rmrr_unit *rmrr, *rmrr_n;
3548 struct dmar_atsr_unit *atsr, *atsr_n;
3551 list_for_each_entry_safe(rmrr, rmrr_n, &dmar_rmrr_units, list) {
3552 ret = rmrr_parse_dev(rmrr);
3557 list_for_each_entry_safe(atsr, atsr_n, &dmar_atsr_units, list) {
3558 ret = atsr_parse_dev(atsr);
3567 * Here we only respond to action of unbound device from driver.
3569 * Added device is not attached to its DMAR domain here yet. That will happen
3570 * when mapping the device to iova.
3572 static int device_notifier(struct notifier_block *nb,
3573 unsigned long action, void *data)
3575 struct device *dev = data;
3576 struct pci_dev *pdev = to_pci_dev(dev);
3577 struct dmar_domain *domain;
3579 if (iommu_no_mapping(dev))
3582 domain = find_domain(pdev);
3586 if (action == BUS_NOTIFY_UNBOUND_DRIVER && !iommu_pass_through) {
3587 domain_remove_one_dev_info(domain, pdev);
3589 if (!(domain->flags & DOMAIN_FLAG_VIRTUAL_MACHINE) &&
3590 !(domain->flags & DOMAIN_FLAG_STATIC_IDENTITY) &&
3591 list_empty(&domain->devices))
3592 domain_exit(domain);
3598 static struct notifier_block device_nb = {
3599 .notifier_call = device_notifier,
3602 int __init intel_iommu_init(void)
3606 /* VT-d is required for a TXT/tboot launch, so enforce that */
3607 force_on = tboot_force_iommu();
3609 if (dmar_table_init()) {
3611 panic("tboot: Failed to initialize DMAR table\n");
3615 if (dmar_dev_scope_init() < 0) {
3617 panic("tboot: Failed to initialize DMAR device scope\n");
3621 if (no_iommu || dmar_disabled)
3624 if (iommu_init_mempool()) {
3626 panic("tboot: Failed to initialize iommu memory\n");
3630 if (list_empty(&dmar_rmrr_units))
3631 printk(KERN_INFO "DMAR: No RMRR found\n");
3633 if (list_empty(&dmar_atsr_units))
3634 printk(KERN_INFO "DMAR: No ATSR found\n");
3636 if (dmar_init_reserved_ranges()) {
3638 panic("tboot: Failed to reserve iommu ranges\n");
3642 init_no_remapping_devices();
3647 panic("tboot: Failed to initialize DMARs\n");
3648 printk(KERN_ERR "IOMMU: dmar init failed\n");
3649 put_iova_domain(&reserved_iova_list);
3650 iommu_exit_mempool();
3654 "PCI-DMA: Intel(R) Virtualization Technology for Directed I/O\n");
3656 init_timer(&unmap_timer);
3657 #ifdef CONFIG_SWIOTLB
3660 dma_ops = &intel_dma_ops;
3662 init_iommu_pm_ops();
3664 bus_set_iommu(&pci_bus_type, &intel_iommu_ops);
3666 bus_register_notifier(&pci_bus_type, &device_nb);
3671 static void iommu_detach_dependent_devices(struct intel_iommu *iommu,
3672 struct pci_dev *pdev)
3674 struct pci_dev *tmp, *parent;
3676 if (!iommu || !pdev)
3679 /* dependent device detach */
3680 tmp = pci_find_upstream_pcie_bridge(pdev);
3681 /* Secondary interface's bus number and devfn 0 */
3683 parent = pdev->bus->self;
3684 while (parent != tmp) {
3685 iommu_detach_dev(iommu, parent->bus->number,
3687 parent = parent->bus->self;
3689 if (pci_is_pcie(tmp)) /* this is a PCIe-to-PCI bridge */
3690 iommu_detach_dev(iommu,
3691 tmp->subordinate->number, 0);
3692 else /* this is a legacy PCI bridge */
3693 iommu_detach_dev(iommu, tmp->bus->number,
3698 static void domain_remove_one_dev_info(struct dmar_domain *domain,
3699 struct pci_dev *pdev)
3701 struct device_domain_info *info;
3702 struct intel_iommu *iommu;
3703 unsigned long flags;
3705 struct list_head *entry, *tmp;
3707 iommu = device_to_iommu(pci_domain_nr(pdev->bus), pdev->bus->number,
3712 spin_lock_irqsave(&device_domain_lock, flags);
3713 list_for_each_safe(entry, tmp, &domain->devices) {
3714 info = list_entry(entry, struct device_domain_info, link);
3715 if (info->segment == pci_domain_nr(pdev->bus) &&
3716 info->bus == pdev->bus->number &&
3717 info->devfn == pdev->devfn) {
3718 list_del(&info->link);
3719 list_del(&info->global);
3721 info->dev->dev.archdata.iommu = NULL;
3722 spin_unlock_irqrestore(&device_domain_lock, flags);
3724 iommu_disable_dev_iotlb(info);
3725 iommu_detach_dev(iommu, info->bus, info->devfn);
3726 iommu_detach_dependent_devices(iommu, pdev);
3727 free_devinfo_mem(info);
3729 spin_lock_irqsave(&device_domain_lock, flags);
3737 /* if there is no other devices under the same iommu
3738 * owned by this domain, clear this iommu in iommu_bmp
3739 * update iommu count and coherency
3741 if (iommu == device_to_iommu(info->segment, info->bus,
3746 spin_unlock_irqrestore(&device_domain_lock, flags);
3749 unsigned long tmp_flags;
3750 spin_lock_irqsave(&domain->iommu_lock, tmp_flags);
3751 clear_bit(iommu->seq_id, &domain->iommu_bmp);
3752 domain->iommu_count--;
3753 domain_update_iommu_cap(domain);
3754 spin_unlock_irqrestore(&domain->iommu_lock, tmp_flags);
3756 if (!(domain->flags & DOMAIN_FLAG_VIRTUAL_MACHINE) &&
3757 !(domain->flags & DOMAIN_FLAG_STATIC_IDENTITY)) {
3758 spin_lock_irqsave(&iommu->lock, tmp_flags);
3759 clear_bit(domain->id, iommu->domain_ids);
3760 iommu->domains[domain->id] = NULL;
3761 spin_unlock_irqrestore(&iommu->lock, tmp_flags);
3766 static void vm_domain_remove_all_dev_info(struct dmar_domain *domain)
3768 struct device_domain_info *info;
3769 struct intel_iommu *iommu;
3770 unsigned long flags1, flags2;
3772 spin_lock_irqsave(&device_domain_lock, flags1);
3773 while (!list_empty(&domain->devices)) {
3774 info = list_entry(domain->devices.next,
3775 struct device_domain_info, link);
3776 list_del(&info->link);
3777 list_del(&info->global);
3779 info->dev->dev.archdata.iommu = NULL;
3781 spin_unlock_irqrestore(&device_domain_lock, flags1);
3783 iommu_disable_dev_iotlb(info);
3784 iommu = device_to_iommu(info->segment, info->bus, info->devfn);
3785 iommu_detach_dev(iommu, info->bus, info->devfn);
3786 iommu_detach_dependent_devices(iommu, info->dev);
3788 /* clear this iommu in iommu_bmp, update iommu count
3791 spin_lock_irqsave(&domain->iommu_lock, flags2);
3792 if (test_and_clear_bit(iommu->seq_id,
3793 &domain->iommu_bmp)) {
3794 domain->iommu_count--;
3795 domain_update_iommu_cap(domain);
3797 spin_unlock_irqrestore(&domain->iommu_lock, flags2);
3799 free_devinfo_mem(info);
3800 spin_lock_irqsave(&device_domain_lock, flags1);
3802 spin_unlock_irqrestore(&device_domain_lock, flags1);
3805 /* domain id for virtual machine, it won't be set in context */
3806 static unsigned long vm_domid;
3808 static struct dmar_domain *iommu_alloc_vm_domain(void)
3810 struct dmar_domain *domain;
3812 domain = alloc_domain_mem();
3816 domain->id = vm_domid++;
3818 memset(&domain->iommu_bmp, 0, sizeof(unsigned long));
3819 domain->flags = DOMAIN_FLAG_VIRTUAL_MACHINE;
3824 static int md_domain_init(struct dmar_domain *domain, int guest_width)
3828 init_iova_domain(&domain->iovad, DMA_32BIT_PFN);
3829 spin_lock_init(&domain->iommu_lock);
3831 domain_reserve_special_ranges(domain);
3833 /* calculate AGAW */
3834 domain->gaw = guest_width;
3835 adjust_width = guestwidth_to_adjustwidth(guest_width);
3836 domain->agaw = width_to_agaw(adjust_width);
3838 INIT_LIST_HEAD(&domain->devices);
3840 domain->iommu_count = 0;
3841 domain->iommu_coherency = 0;
3842 domain->iommu_snooping = 0;
3843 domain->iommu_superpage = 0;
3844 domain->max_addr = 0;
3847 /* always allocate the top pgd */
3848 domain->pgd = (struct dma_pte *)alloc_pgtable_page(domain->nid);
3851 domain_flush_cache(domain, domain->pgd, PAGE_SIZE);
3855 static void iommu_free_vm_domain(struct dmar_domain *domain)
3857 unsigned long flags;
3858 struct dmar_drhd_unit *drhd;
3859 struct intel_iommu *iommu;
3861 unsigned long ndomains;
3863 for_each_drhd_unit(drhd) {
3866 iommu = drhd->iommu;
3868 ndomains = cap_ndoms(iommu->cap);
3869 for_each_set_bit(i, iommu->domain_ids, ndomains) {
3870 if (iommu->domains[i] == domain) {
3871 spin_lock_irqsave(&iommu->lock, flags);
3872 clear_bit(i, iommu->domain_ids);
3873 iommu->domains[i] = NULL;
3874 spin_unlock_irqrestore(&iommu->lock, flags);
3881 static void vm_domain_exit(struct dmar_domain *domain)
3883 /* Domain 0 is reserved, so dont process it */
3887 vm_domain_remove_all_dev_info(domain);
3889 put_iova_domain(&domain->iovad);
3892 dma_pte_clear_range(domain, 0, DOMAIN_MAX_PFN(domain->gaw));
3894 /* free page tables */
3895 dma_pte_free_pagetable(domain, 0, DOMAIN_MAX_PFN(domain->gaw));
3897 iommu_free_vm_domain(domain);
3898 free_domain_mem(domain);
3901 static int intel_iommu_domain_init(struct iommu_domain *domain)
3903 struct dmar_domain *dmar_domain;
3905 dmar_domain = iommu_alloc_vm_domain();
3908 "intel_iommu_domain_init: dmar_domain == NULL\n");
3911 if (md_domain_init(dmar_domain, DEFAULT_DOMAIN_ADDRESS_WIDTH)) {
3913 "intel_iommu_domain_init() failed\n");
3914 vm_domain_exit(dmar_domain);
3917 domain_update_iommu_cap(dmar_domain);
3918 domain->priv = dmar_domain;
3923 static void intel_iommu_domain_destroy(struct iommu_domain *domain)
3925 struct dmar_domain *dmar_domain = domain->priv;
3927 domain->priv = NULL;
3928 vm_domain_exit(dmar_domain);
3931 static int intel_iommu_attach_device(struct iommu_domain *domain,
3934 struct dmar_domain *dmar_domain = domain->priv;
3935 struct pci_dev *pdev = to_pci_dev(dev);
3936 struct intel_iommu *iommu;
3939 /* normally pdev is not mapped */
3940 if (unlikely(domain_context_mapped(pdev))) {
3941 struct dmar_domain *old_domain;
3943 old_domain = find_domain(pdev);
3945 if (dmar_domain->flags & DOMAIN_FLAG_VIRTUAL_MACHINE ||
3946 dmar_domain->flags & DOMAIN_FLAG_STATIC_IDENTITY)
3947 domain_remove_one_dev_info(old_domain, pdev);
3949 domain_remove_dev_info(old_domain);
3953 iommu = device_to_iommu(pci_domain_nr(pdev->bus), pdev->bus->number,
3958 /* check if this iommu agaw is sufficient for max mapped address */
3959 addr_width = agaw_to_width(iommu->agaw);
3960 if (addr_width > cap_mgaw(iommu->cap))
3961 addr_width = cap_mgaw(iommu->cap);
3963 if (dmar_domain->max_addr > (1LL << addr_width)) {
3964 printk(KERN_ERR "%s: iommu width (%d) is not "
3965 "sufficient for the mapped address (%llx)\n",
3966 __func__, addr_width, dmar_domain->max_addr);
3969 dmar_domain->gaw = addr_width;
3972 * Knock out extra levels of page tables if necessary
3974 while (iommu->agaw < dmar_domain->agaw) {
3975 struct dma_pte *pte;
3977 pte = dmar_domain->pgd;
3978 if (dma_pte_present(pte)) {
3979 dmar_domain->pgd = (struct dma_pte *)
3980 phys_to_virt(dma_pte_addr(pte));
3981 free_pgtable_page(pte);
3983 dmar_domain->agaw--;
3986 return domain_add_dev_info(dmar_domain, pdev, CONTEXT_TT_MULTI_LEVEL);
3989 static void intel_iommu_detach_device(struct iommu_domain *domain,
3992 struct dmar_domain *dmar_domain = domain->priv;
3993 struct pci_dev *pdev = to_pci_dev(dev);
3995 domain_remove_one_dev_info(dmar_domain, pdev);
3998 static int intel_iommu_map(struct iommu_domain *domain,
3999 unsigned long iova, phys_addr_t hpa,
4000 size_t size, int iommu_prot)
4002 struct dmar_domain *dmar_domain = domain->priv;
4007 if (iommu_prot & IOMMU_READ)
4008 prot |= DMA_PTE_READ;
4009 if (iommu_prot & IOMMU_WRITE)
4010 prot |= DMA_PTE_WRITE;
4011 if ((iommu_prot & IOMMU_CACHE) && dmar_domain->iommu_snooping)
4012 prot |= DMA_PTE_SNP;
4014 max_addr = iova + size;
4015 if (dmar_domain->max_addr < max_addr) {
4018 /* check if minimum agaw is sufficient for mapped address */
4019 end = __DOMAIN_MAX_ADDR(dmar_domain->gaw) + 1;
4020 if (end < max_addr) {
4021 printk(KERN_ERR "%s: iommu width (%d) is not "
4022 "sufficient for the mapped address (%llx)\n",
4023 __func__, dmar_domain->gaw, max_addr);
4026 dmar_domain->max_addr = max_addr;
4028 /* Round up size to next multiple of PAGE_SIZE, if it and
4029 the low bits of hpa would take us onto the next page */
4030 size = aligned_nrpages(hpa, size);
4031 ret = domain_pfn_mapping(dmar_domain, iova >> VTD_PAGE_SHIFT,
4032 hpa >> VTD_PAGE_SHIFT, size, prot);
4036 static size_t intel_iommu_unmap(struct iommu_domain *domain,
4037 unsigned long iova, size_t size)
4039 struct dmar_domain *dmar_domain = domain->priv;
4042 order = dma_pte_clear_range(dmar_domain, iova >> VTD_PAGE_SHIFT,
4043 (iova + size - 1) >> VTD_PAGE_SHIFT);
4045 if (dmar_domain->max_addr == iova + size)
4046 dmar_domain->max_addr = iova;
4048 return PAGE_SIZE << order;
4051 static phys_addr_t intel_iommu_iova_to_phys(struct iommu_domain *domain,
4054 struct dmar_domain *dmar_domain = domain->priv;
4055 struct dma_pte *pte;
4058 pte = pfn_to_dma_pte(dmar_domain, iova >> VTD_PAGE_SHIFT, 0);
4060 phys = dma_pte_addr(pte);
4065 static int intel_iommu_domain_has_cap(struct iommu_domain *domain,
4068 struct dmar_domain *dmar_domain = domain->priv;
4070 if (cap == IOMMU_CAP_CACHE_COHERENCY)
4071 return dmar_domain->iommu_snooping;
4072 if (cap == IOMMU_CAP_INTR_REMAP)
4073 return intr_remapping_enabled;
4078 static struct iommu_ops intel_iommu_ops = {
4079 .domain_init = intel_iommu_domain_init,
4080 .domain_destroy = intel_iommu_domain_destroy,
4081 .attach_dev = intel_iommu_attach_device,
4082 .detach_dev = intel_iommu_detach_device,
4083 .map = intel_iommu_map,
4084 .unmap = intel_iommu_unmap,
4085 .iova_to_phys = intel_iommu_iova_to_phys,
4086 .domain_has_cap = intel_iommu_domain_has_cap,
4087 .pgsize_bitmap = INTEL_IOMMU_PGSIZES,
4090 static void __devinit quirk_iommu_rwbf(struct pci_dev *dev)
4093 * Mobile 4 Series Chipset neglects to set RWBF capability,
4096 printk(KERN_INFO "DMAR: Forcing write-buffer flush capability\n");
4099 /* https://bugzilla.redhat.com/show_bug.cgi?id=538163 */
4100 if (dev->revision == 0x07) {
4101 printk(KERN_INFO "DMAR: Disabling IOMMU for graphics on this chipset\n");
4106 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2a40, quirk_iommu_rwbf);
4109 #define GGC_MEMORY_SIZE_MASK (0xf << 8)
4110 #define GGC_MEMORY_SIZE_NONE (0x0 << 8)
4111 #define GGC_MEMORY_SIZE_1M (0x1 << 8)
4112 #define GGC_MEMORY_SIZE_2M (0x3 << 8)
4113 #define GGC_MEMORY_VT_ENABLED (0x8 << 8)
4114 #define GGC_MEMORY_SIZE_2M_VT (0x9 << 8)
4115 #define GGC_MEMORY_SIZE_3M_VT (0xa << 8)
4116 #define GGC_MEMORY_SIZE_4M_VT (0xb << 8)
4118 static void __devinit quirk_calpella_no_shadow_gtt(struct pci_dev *dev)
4122 if (pci_read_config_word(dev, GGC, &ggc))
4125 if (!(ggc & GGC_MEMORY_VT_ENABLED)) {
4126 printk(KERN_INFO "DMAR: BIOS has allocated no shadow GTT; disabling IOMMU for graphics\n");
4128 } else if (dmar_map_gfx) {
4129 /* we have to ensure the gfx device is idle before we flush */
4130 printk(KERN_INFO "DMAR: Disabling batched IOTLB flush on Ironlake\n");
4131 intel_iommu_strict = 1;
4134 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x0040, quirk_calpella_no_shadow_gtt);
4135 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x0044, quirk_calpella_no_shadow_gtt);
4136 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x0062, quirk_calpella_no_shadow_gtt);
4137 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x006a, quirk_calpella_no_shadow_gtt);
4139 /* On Tylersburg chipsets, some BIOSes have been known to enable the
4140 ISOCH DMAR unit for the Azalia sound device, but not give it any
4141 TLB entries, which causes it to deadlock. Check for that. We do
4142 this in a function called from init_dmars(), instead of in a PCI
4143 quirk, because we don't want to print the obnoxious "BIOS broken"
4144 message if VT-d is actually disabled.
4146 static void __init check_tylersburg_isoch(void)
4148 struct pci_dev *pdev;
4149 uint32_t vtisochctrl;
4151 /* If there's no Azalia in the system anyway, forget it. */
4152 pdev = pci_get_device(PCI_VENDOR_ID_INTEL, 0x3a3e, NULL);
4157 /* System Management Registers. Might be hidden, in which case
4158 we can't do the sanity check. But that's OK, because the
4159 known-broken BIOSes _don't_ actually hide it, so far. */
4160 pdev = pci_get_device(PCI_VENDOR_ID_INTEL, 0x342e, NULL);
4164 if (pci_read_config_dword(pdev, 0x188, &vtisochctrl)) {
4171 /* If Azalia DMA is routed to the non-isoch DMAR unit, fine. */
4172 if (vtisochctrl & 1)
4175 /* Drop all bits other than the number of TLB entries */
4176 vtisochctrl &= 0x1c;
4178 /* If we have the recommended number of TLB entries (16), fine. */
4179 if (vtisochctrl == 0x10)
4182 /* Zero TLB entries? You get to ride the short bus to school. */
4184 WARN(1, "Your BIOS is broken; DMA routed to ISOCH DMAR unit but no TLB space.\n"
4185 "BIOS vendor: %s; Ver: %s; Product Version: %s\n",
4186 dmi_get_system_info(DMI_BIOS_VENDOR),
4187 dmi_get_system_info(DMI_BIOS_VERSION),
4188 dmi_get_system_info(DMI_PRODUCT_VERSION));
4189 iommu_identity_mapping |= IDENTMAP_AZALIA;
4193 printk(KERN_WARNING "DMAR: Recommended TLB entries for ISOCH unit is 16; your BIOS set %d\n",