2 * Copyright (c) 2006, Intel Corporation.
4 * This program is free software; you can redistribute it and/or modify it
5 * under the terms and conditions of the GNU General Public License,
6 * version 2, as published by the Free Software Foundation.
8 * This program is distributed in the hope it will be useful, but WITHOUT
9 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
10 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
13 * You should have received a copy of the GNU General Public License along with
14 * this program; if not, write to the Free Software Foundation, Inc., 59 Temple
15 * Place - Suite 330, Boston, MA 02111-1307 USA.
17 * Copyright (C) 2006-2008 Intel Corporation
18 * Author: Ashok Raj <ashok.raj@intel.com>
19 * Author: Shaohua Li <shaohua.li@intel.com>
20 * Author: Anil S Keshavamurthy <anil.s.keshavamurthy@intel.com>
21 * Author: Fenghua Yu <fenghua.yu@intel.com>
24 #include <linux/init.h>
25 #include <linux/bitmap.h>
26 #include <linux/debugfs.h>
27 #include <linux/slab.h>
28 #include <linux/irq.h>
29 #include <linux/interrupt.h>
30 #include <linux/spinlock.h>
31 #include <linux/pci.h>
32 #include <linux/dmar.h>
33 #include <linux/dma-mapping.h>
34 #include <linux/mempool.h>
35 #include <linux/timer.h>
36 #include <linux/iova.h>
37 #include <linux/iommu.h>
38 #include <linux/intel-iommu.h>
39 #include <linux/sysdev.h>
40 #include <linux/dmi.h>
41 #include <asm/cacheflush.h>
42 #include <asm/iommu.h>
45 #define ROOT_SIZE VTD_PAGE_SIZE
46 #define CONTEXT_SIZE VTD_PAGE_SIZE
48 #define IS_GFX_DEVICE(pdev) ((pdev->class >> 16) == PCI_BASE_CLASS_DISPLAY)
49 #define IS_ISA_DEVICE(pdev) ((pdev->class >> 8) == PCI_CLASS_BRIDGE_ISA)
51 #define IOAPIC_RANGE_START (0xfee00000)
52 #define IOAPIC_RANGE_END (0xfeefffff)
53 #define IOVA_START_ADDR (0x1000)
55 #define DEFAULT_DOMAIN_ADDRESS_WIDTH 48
57 #define MAX_AGAW_WIDTH 64
59 #define __DOMAIN_MAX_PFN(gaw) ((((uint64_t)1) << (gaw-VTD_PAGE_SHIFT)) - 1)
60 #define __DOMAIN_MAX_ADDR(gaw) ((((uint64_t)1) << gaw) - 1)
62 /* We limit DOMAIN_MAX_PFN to fit in an unsigned long, and DOMAIN_MAX_ADDR
63 to match. That way, we can use 'unsigned long' for PFNs with impunity. */
64 #define DOMAIN_MAX_PFN(gaw) ((unsigned long) min_t(uint64_t, \
65 __DOMAIN_MAX_PFN(gaw), (unsigned long)-1))
66 #define DOMAIN_MAX_ADDR(gaw) (((uint64_t)__DOMAIN_MAX_PFN(gaw)) << VTD_PAGE_SHIFT)
68 #define IOVA_PFN(addr) ((addr) >> PAGE_SHIFT)
69 #define DMA_32BIT_PFN IOVA_PFN(DMA_BIT_MASK(32))
70 #define DMA_64BIT_PFN IOVA_PFN(DMA_BIT_MASK(64))
73 /* VT-d pages must always be _smaller_ than MM pages. Otherwise things
74 are never going to work. */
75 static inline unsigned long dma_to_mm_pfn(unsigned long dma_pfn)
77 return dma_pfn >> (PAGE_SHIFT - VTD_PAGE_SHIFT);
80 static inline unsigned long mm_to_dma_pfn(unsigned long mm_pfn)
82 return mm_pfn << (PAGE_SHIFT - VTD_PAGE_SHIFT);
84 static inline unsigned long page_to_dma_pfn(struct page *pg)
86 return mm_to_dma_pfn(page_to_pfn(pg));
88 static inline unsigned long virt_to_dma_pfn(void *p)
90 return page_to_dma_pfn(virt_to_page(p));
93 /* global iommu list, set NULL for ignored DMAR units */
94 static struct intel_iommu **g_iommus;
96 static int rwbf_quirk;
101 * 12-63: Context Ptr (12 - (haw-1))
108 #define ROOT_ENTRY_NR (VTD_PAGE_SIZE/sizeof(struct root_entry))
109 static inline bool root_present(struct root_entry *root)
111 return (root->val & 1);
113 static inline void set_root_present(struct root_entry *root)
117 static inline void set_root_value(struct root_entry *root, unsigned long value)
119 root->val |= value & VTD_PAGE_MASK;
122 static inline struct context_entry *
123 get_context_addr_from_root(struct root_entry *root)
125 return (struct context_entry *)
126 (root_present(root)?phys_to_virt(
127 root->val & VTD_PAGE_MASK) :
134 * 1: fault processing disable
135 * 2-3: translation type
136 * 12-63: address space root
142 struct context_entry {
147 static inline bool context_present(struct context_entry *context)
149 return (context->lo & 1);
151 static inline void context_set_present(struct context_entry *context)
156 static inline void context_set_fault_enable(struct context_entry *context)
158 context->lo &= (((u64)-1) << 2) | 1;
161 static inline void context_set_translation_type(struct context_entry *context,
164 context->lo &= (((u64)-1) << 4) | 3;
165 context->lo |= (value & 3) << 2;
168 static inline void context_set_address_root(struct context_entry *context,
171 context->lo |= value & VTD_PAGE_MASK;
174 static inline void context_set_address_width(struct context_entry *context,
177 context->hi |= value & 7;
180 static inline void context_set_domain_id(struct context_entry *context,
183 context->hi |= (value & ((1 << 16) - 1)) << 8;
186 static inline void context_clear_entry(struct context_entry *context)
199 * 12-63: Host physcial address
205 static inline void dma_clear_pte(struct dma_pte *pte)
210 static inline void dma_set_pte_readable(struct dma_pte *pte)
212 pte->val |= DMA_PTE_READ;
215 static inline void dma_set_pte_writable(struct dma_pte *pte)
217 pte->val |= DMA_PTE_WRITE;
220 static inline void dma_set_pte_snp(struct dma_pte *pte)
222 pte->val |= DMA_PTE_SNP;
225 static inline void dma_set_pte_prot(struct dma_pte *pte, unsigned long prot)
227 pte->val = (pte->val & ~3) | (prot & 3);
230 static inline u64 dma_pte_addr(struct dma_pte *pte)
233 return pte->val & VTD_PAGE_MASK;
235 /* Must have a full atomic 64-bit read */
236 return __cmpxchg64(pte, 0ULL, 0ULL) & VTD_PAGE_MASK;
240 static inline void dma_set_pte_pfn(struct dma_pte *pte, unsigned long pfn)
242 pte->val |= (uint64_t)pfn << VTD_PAGE_SHIFT;
245 static inline bool dma_pte_present(struct dma_pte *pte)
247 return (pte->val & 3) != 0;
250 static inline int first_pte_in_page(struct dma_pte *pte)
252 return !((unsigned long)pte & ~VTD_PAGE_MASK);
256 * This domain is a statically identity mapping domain.
257 * 1. This domain creats a static 1:1 mapping to all usable memory.
258 * 2. It maps to each iommu if successful.
259 * 3. Each iommu mapps to this domain if successful.
261 static struct dmar_domain *si_domain;
262 static int hw_pass_through = 1;
264 /* devices under the same p2p bridge are owned in one domain */
265 #define DOMAIN_FLAG_P2P_MULTIPLE_DEVICES (1 << 0)
267 /* domain represents a virtual machine, more than one devices
268 * across iommus may be owned in one domain, e.g. kvm guest.
270 #define DOMAIN_FLAG_VIRTUAL_MACHINE (1 << 1)
272 /* si_domain contains mulitple devices */
273 #define DOMAIN_FLAG_STATIC_IDENTITY (1 << 2)
276 int id; /* domain id */
277 unsigned long iommu_bmp; /* bitmap of iommus this domain uses*/
279 struct list_head devices; /* all devices' list */
280 struct iova_domain iovad; /* iova's that belong to this domain */
282 struct dma_pte *pgd; /* virtual address */
283 int gaw; /* max guest address width */
285 /* adjusted guest address width, 0 is level 2 30-bit */
288 int flags; /* flags to find out type of domain */
290 int iommu_coherency;/* indicate coherency of iommu access */
291 int iommu_snooping; /* indicate snooping control feature*/
292 int iommu_count; /* reference count of iommu */
293 spinlock_t iommu_lock; /* protect iommu set in domain */
294 u64 max_addr; /* maximum mapped address */
297 /* PCI domain-device relationship */
298 struct device_domain_info {
299 struct list_head link; /* link to domain siblings */
300 struct list_head global; /* link to global list */
301 int segment; /* PCI domain */
302 u8 bus; /* PCI bus number */
303 u8 devfn; /* PCI devfn number */
304 struct pci_dev *dev; /* it's NULL for PCIE-to-PCI bridge */
305 struct intel_iommu *iommu; /* IOMMU used by this device */
306 struct dmar_domain *domain; /* pointer to domain */
309 static void flush_unmaps_timeout(unsigned long data);
311 DEFINE_TIMER(unmap_timer, flush_unmaps_timeout, 0, 0);
313 #define HIGH_WATER_MARK 250
314 struct deferred_flush_tables {
316 struct iova *iova[HIGH_WATER_MARK];
317 struct dmar_domain *domain[HIGH_WATER_MARK];
320 static struct deferred_flush_tables *deferred_flush;
322 /* bitmap for indexing intel_iommus */
323 static int g_num_of_iommus;
325 static DEFINE_SPINLOCK(async_umap_flush_lock);
326 static LIST_HEAD(unmaps_to_do);
329 static long list_size;
331 static void domain_remove_dev_info(struct dmar_domain *domain);
333 #ifdef CONFIG_DMAR_DEFAULT_ON
334 int dmar_disabled = 0;
336 int dmar_disabled = 1;
337 #endif /*CONFIG_DMAR_DEFAULT_ON*/
339 static int __initdata dmar_map_gfx = 1;
340 static int dmar_forcedac;
341 static int intel_iommu_strict;
343 #define DUMMY_DEVICE_DOMAIN_INFO ((struct device_domain_info *)(-1))
344 static DEFINE_SPINLOCK(device_domain_lock);
345 static LIST_HEAD(device_domain_list);
347 static struct iommu_ops intel_iommu_ops;
349 static int __init intel_iommu_setup(char *str)
354 if (!strncmp(str, "on", 2)) {
356 printk(KERN_INFO "Intel-IOMMU: enabled\n");
357 } else if (!strncmp(str, "off", 3)) {
359 printk(KERN_INFO "Intel-IOMMU: disabled\n");
360 } else if (!strncmp(str, "igfx_off", 8)) {
363 "Intel-IOMMU: disable GFX device mapping\n");
364 } else if (!strncmp(str, "forcedac", 8)) {
366 "Intel-IOMMU: Forcing DAC for PCI devices\n");
368 } else if (!strncmp(str, "strict", 6)) {
370 "Intel-IOMMU: disable batched IOTLB flush\n");
371 intel_iommu_strict = 1;
374 str += strcspn(str, ",");
380 __setup("intel_iommu=", intel_iommu_setup);
382 static struct kmem_cache *iommu_domain_cache;
383 static struct kmem_cache *iommu_devinfo_cache;
384 static struct kmem_cache *iommu_iova_cache;
386 static inline void *iommu_kmem_cache_alloc(struct kmem_cache *cachep)
391 /* trying to avoid low memory issues */
392 flags = current->flags & PF_MEMALLOC;
393 current->flags |= PF_MEMALLOC;
394 vaddr = kmem_cache_alloc(cachep, GFP_ATOMIC);
395 current->flags &= (~PF_MEMALLOC | flags);
400 static inline void *alloc_pgtable_page(void)
405 /* trying to avoid low memory issues */
406 flags = current->flags & PF_MEMALLOC;
407 current->flags |= PF_MEMALLOC;
408 vaddr = (void *)get_zeroed_page(GFP_ATOMIC);
409 current->flags &= (~PF_MEMALLOC | flags);
413 static inline void free_pgtable_page(void *vaddr)
415 free_page((unsigned long)vaddr);
418 static inline void *alloc_domain_mem(void)
420 return iommu_kmem_cache_alloc(iommu_domain_cache);
423 static void free_domain_mem(void *vaddr)
425 kmem_cache_free(iommu_domain_cache, vaddr);
428 static inline void * alloc_devinfo_mem(void)
430 return iommu_kmem_cache_alloc(iommu_devinfo_cache);
433 static inline void free_devinfo_mem(void *vaddr)
435 kmem_cache_free(iommu_devinfo_cache, vaddr);
438 struct iova *alloc_iova_mem(void)
440 return iommu_kmem_cache_alloc(iommu_iova_cache);
443 void free_iova_mem(struct iova *iova)
445 kmem_cache_free(iommu_iova_cache, iova);
449 static inline int width_to_agaw(int width);
451 static int __iommu_calculate_agaw(struct intel_iommu *iommu, int max_gaw)
456 sagaw = cap_sagaw(iommu->cap);
457 for (agaw = width_to_agaw(max_gaw);
459 if (test_bit(agaw, &sagaw))
467 * Calculate max SAGAW for each iommu.
469 int iommu_calculate_max_sagaw(struct intel_iommu *iommu)
471 return __iommu_calculate_agaw(iommu, MAX_AGAW_WIDTH);
475 * calculate agaw for each iommu.
476 * "SAGAW" may be different across iommus, use a default agaw, and
477 * get a supported less agaw for iommus that don't support the default agaw.
479 int iommu_calculate_agaw(struct intel_iommu *iommu)
481 return __iommu_calculate_agaw(iommu, DEFAULT_DOMAIN_ADDRESS_WIDTH);
484 /* This functionin only returns single iommu in a domain */
485 static struct intel_iommu *domain_get_iommu(struct dmar_domain *domain)
489 /* si_domain and vm domain should not get here. */
490 BUG_ON(domain->flags & DOMAIN_FLAG_VIRTUAL_MACHINE);
491 BUG_ON(domain->flags & DOMAIN_FLAG_STATIC_IDENTITY);
493 iommu_id = find_first_bit(&domain->iommu_bmp, g_num_of_iommus);
494 if (iommu_id < 0 || iommu_id >= g_num_of_iommus)
497 return g_iommus[iommu_id];
500 static void domain_update_iommu_coherency(struct dmar_domain *domain)
504 domain->iommu_coherency = 1;
506 i = find_first_bit(&domain->iommu_bmp, g_num_of_iommus);
507 for (; i < g_num_of_iommus; ) {
508 if (!ecap_coherent(g_iommus[i]->ecap)) {
509 domain->iommu_coherency = 0;
512 i = find_next_bit(&domain->iommu_bmp, g_num_of_iommus, i+1);
516 static void domain_update_iommu_snooping(struct dmar_domain *domain)
520 domain->iommu_snooping = 1;
522 i = find_first_bit(&domain->iommu_bmp, g_num_of_iommus);
523 for (; i < g_num_of_iommus; ) {
524 if (!ecap_sc_support(g_iommus[i]->ecap)) {
525 domain->iommu_snooping = 0;
528 i = find_next_bit(&domain->iommu_bmp, g_num_of_iommus, i+1);
532 /* Some capabilities may be different across iommus */
533 static void domain_update_iommu_cap(struct dmar_domain *domain)
535 domain_update_iommu_coherency(domain);
536 domain_update_iommu_snooping(domain);
539 static struct intel_iommu *device_to_iommu(int segment, u8 bus, u8 devfn)
541 struct dmar_drhd_unit *drhd = NULL;
544 for_each_drhd_unit(drhd) {
547 if (segment != drhd->segment)
550 for (i = 0; i < drhd->devices_cnt; i++) {
551 if (drhd->devices[i] &&
552 drhd->devices[i]->bus->number == bus &&
553 drhd->devices[i]->devfn == devfn)
555 if (drhd->devices[i] &&
556 drhd->devices[i]->subordinate &&
557 drhd->devices[i]->subordinate->number <= bus &&
558 drhd->devices[i]->subordinate->subordinate >= bus)
562 if (drhd->include_all)
569 static void domain_flush_cache(struct dmar_domain *domain,
570 void *addr, int size)
572 if (!domain->iommu_coherency)
573 clflush_cache_range(addr, size);
576 /* Gets context entry for a given bus and devfn */
577 static struct context_entry * device_to_context_entry(struct intel_iommu *iommu,
580 struct root_entry *root;
581 struct context_entry *context;
582 unsigned long phy_addr;
585 spin_lock_irqsave(&iommu->lock, flags);
586 root = &iommu->root_entry[bus];
587 context = get_context_addr_from_root(root);
589 context = (struct context_entry *)alloc_pgtable_page();
591 spin_unlock_irqrestore(&iommu->lock, flags);
594 __iommu_flush_cache(iommu, (void *)context, CONTEXT_SIZE);
595 phy_addr = virt_to_phys((void *)context);
596 set_root_value(root, phy_addr);
597 set_root_present(root);
598 __iommu_flush_cache(iommu, root, sizeof(*root));
600 spin_unlock_irqrestore(&iommu->lock, flags);
601 return &context[devfn];
604 static int device_context_mapped(struct intel_iommu *iommu, u8 bus, u8 devfn)
606 struct root_entry *root;
607 struct context_entry *context;
611 spin_lock_irqsave(&iommu->lock, flags);
612 root = &iommu->root_entry[bus];
613 context = get_context_addr_from_root(root);
618 ret = context_present(&context[devfn]);
620 spin_unlock_irqrestore(&iommu->lock, flags);
624 static void clear_context_table(struct intel_iommu *iommu, u8 bus, u8 devfn)
626 struct root_entry *root;
627 struct context_entry *context;
630 spin_lock_irqsave(&iommu->lock, flags);
631 root = &iommu->root_entry[bus];
632 context = get_context_addr_from_root(root);
634 context_clear_entry(&context[devfn]);
635 __iommu_flush_cache(iommu, &context[devfn], \
638 spin_unlock_irqrestore(&iommu->lock, flags);
641 static void free_context_table(struct intel_iommu *iommu)
643 struct root_entry *root;
646 struct context_entry *context;
648 spin_lock_irqsave(&iommu->lock, flags);
649 if (!iommu->root_entry) {
652 for (i = 0; i < ROOT_ENTRY_NR; i++) {
653 root = &iommu->root_entry[i];
654 context = get_context_addr_from_root(root);
656 free_pgtable_page(context);
658 free_pgtable_page(iommu->root_entry);
659 iommu->root_entry = NULL;
661 spin_unlock_irqrestore(&iommu->lock, flags);
664 /* page table handling */
665 #define LEVEL_STRIDE (9)
666 #define LEVEL_MASK (((u64)1 << LEVEL_STRIDE) - 1)
668 static inline int agaw_to_level(int agaw)
673 static inline int agaw_to_width(int agaw)
675 return 30 + agaw * LEVEL_STRIDE;
679 static inline int width_to_agaw(int width)
681 return (width - 30) / LEVEL_STRIDE;
684 static inline unsigned int level_to_offset_bits(int level)
686 return (level - 1) * LEVEL_STRIDE;
689 static inline int pfn_level_offset(unsigned long pfn, int level)
691 return (pfn >> level_to_offset_bits(level)) & LEVEL_MASK;
694 static inline unsigned long level_mask(int level)
696 return -1UL << level_to_offset_bits(level);
699 static inline unsigned long level_size(int level)
701 return 1UL << level_to_offset_bits(level);
704 static inline unsigned long align_to_level(unsigned long pfn, int level)
706 return (pfn + level_size(level) - 1) & level_mask(level);
709 static struct dma_pte *pfn_to_dma_pte(struct dmar_domain *domain,
712 int addr_width = agaw_to_width(domain->agaw) - VTD_PAGE_SHIFT;
713 struct dma_pte *parent, *pte = NULL;
714 int level = agaw_to_level(domain->agaw);
717 BUG_ON(!domain->pgd);
718 BUG_ON(addr_width < BITS_PER_LONG && pfn >> addr_width);
719 parent = domain->pgd;
724 offset = pfn_level_offset(pfn, level);
725 pte = &parent[offset];
729 if (!dma_pte_present(pte)) {
732 tmp_page = alloc_pgtable_page();
737 domain_flush_cache(domain, tmp_page, VTD_PAGE_SIZE);
738 pteval = (virt_to_dma_pfn(tmp_page) << VTD_PAGE_SHIFT) | DMA_PTE_READ | DMA_PTE_WRITE;
739 if (cmpxchg64(&pte->val, 0ULL, pteval)) {
740 /* Someone else set it while we were thinking; use theirs. */
741 free_pgtable_page(tmp_page);
744 domain_flush_cache(domain, pte, sizeof(*pte));
747 parent = phys_to_virt(dma_pte_addr(pte));
754 /* return address's pte at specific level */
755 static struct dma_pte *dma_pfn_level_pte(struct dmar_domain *domain,
759 struct dma_pte *parent, *pte = NULL;
760 int total = agaw_to_level(domain->agaw);
763 parent = domain->pgd;
764 while (level <= total) {
765 offset = pfn_level_offset(pfn, total);
766 pte = &parent[offset];
770 if (!dma_pte_present(pte))
772 parent = phys_to_virt(dma_pte_addr(pte));
778 /* clear last level pte, a tlb flush should be followed */
779 static void dma_pte_clear_range(struct dmar_domain *domain,
780 unsigned long start_pfn,
781 unsigned long last_pfn)
783 int addr_width = agaw_to_width(domain->agaw) - VTD_PAGE_SHIFT;
784 struct dma_pte *first_pte, *pte;
786 BUG_ON(addr_width < BITS_PER_LONG && start_pfn >> addr_width);
787 BUG_ON(addr_width < BITS_PER_LONG && last_pfn >> addr_width);
789 /* we don't need lock here; nobody else touches the iova range */
790 while (start_pfn <= last_pfn) {
791 first_pte = pte = dma_pfn_level_pte(domain, start_pfn, 1);
793 start_pfn = align_to_level(start_pfn + 1, 2);
800 } while (start_pfn <= last_pfn && !first_pte_in_page(pte));
802 domain_flush_cache(domain, first_pte,
803 (void *)pte - (void *)first_pte);
807 /* free page table pages. last level pte should already be cleared */
808 static void dma_pte_free_pagetable(struct dmar_domain *domain,
809 unsigned long start_pfn,
810 unsigned long last_pfn)
812 int addr_width = agaw_to_width(domain->agaw) - VTD_PAGE_SHIFT;
813 struct dma_pte *first_pte, *pte;
814 int total = agaw_to_level(domain->agaw);
818 BUG_ON(addr_width < BITS_PER_LONG && start_pfn >> addr_width);
819 BUG_ON(addr_width < BITS_PER_LONG && last_pfn >> addr_width);
821 /* We don't need lock here; nobody else touches the iova range */
823 while (level <= total) {
824 tmp = align_to_level(start_pfn, level);
826 /* If we can't even clear one PTE at this level, we're done */
827 if (tmp + level_size(level) - 1 > last_pfn)
830 while (tmp + level_size(level) - 1 <= last_pfn) {
831 first_pte = pte = dma_pfn_level_pte(domain, tmp, level);
833 tmp = align_to_level(tmp + 1, level + 1);
837 if (dma_pte_present(pte)) {
838 free_pgtable_page(phys_to_virt(dma_pte_addr(pte)));
842 tmp += level_size(level);
843 } while (!first_pte_in_page(pte) &&
844 tmp + level_size(level) - 1 <= last_pfn);
846 domain_flush_cache(domain, first_pte,
847 (void *)pte - (void *)first_pte);
853 if (start_pfn == 0 && last_pfn == DOMAIN_MAX_PFN(domain->gaw)) {
854 free_pgtable_page(domain->pgd);
860 static int iommu_alloc_root_entry(struct intel_iommu *iommu)
862 struct root_entry *root;
865 root = (struct root_entry *)alloc_pgtable_page();
869 __iommu_flush_cache(iommu, root, ROOT_SIZE);
871 spin_lock_irqsave(&iommu->lock, flags);
872 iommu->root_entry = root;
873 spin_unlock_irqrestore(&iommu->lock, flags);
878 static void iommu_set_root_entry(struct intel_iommu *iommu)
884 addr = iommu->root_entry;
886 spin_lock_irqsave(&iommu->register_lock, flag);
887 dmar_writeq(iommu->reg + DMAR_RTADDR_REG, virt_to_phys(addr));
889 writel(iommu->gcmd | DMA_GCMD_SRTP, iommu->reg + DMAR_GCMD_REG);
891 /* Make sure hardware complete it */
892 IOMMU_WAIT_OP(iommu, DMAR_GSTS_REG,
893 readl, (sts & DMA_GSTS_RTPS), sts);
895 spin_unlock_irqrestore(&iommu->register_lock, flag);
898 static void iommu_flush_write_buffer(struct intel_iommu *iommu)
903 if (!rwbf_quirk && !cap_rwbf(iommu->cap))
906 spin_lock_irqsave(&iommu->register_lock, flag);
907 writel(iommu->gcmd | DMA_GCMD_WBF, iommu->reg + DMAR_GCMD_REG);
909 /* Make sure hardware complete it */
910 IOMMU_WAIT_OP(iommu, DMAR_GSTS_REG,
911 readl, (!(val & DMA_GSTS_WBFS)), val);
913 spin_unlock_irqrestore(&iommu->register_lock, flag);
916 /* return value determine if we need a write buffer flush */
917 static void __iommu_flush_context(struct intel_iommu *iommu,
918 u16 did, u16 source_id, u8 function_mask,
925 case DMA_CCMD_GLOBAL_INVL:
926 val = DMA_CCMD_GLOBAL_INVL;
928 case DMA_CCMD_DOMAIN_INVL:
929 val = DMA_CCMD_DOMAIN_INVL|DMA_CCMD_DID(did);
931 case DMA_CCMD_DEVICE_INVL:
932 val = DMA_CCMD_DEVICE_INVL|DMA_CCMD_DID(did)
933 | DMA_CCMD_SID(source_id) | DMA_CCMD_FM(function_mask);
940 spin_lock_irqsave(&iommu->register_lock, flag);
941 dmar_writeq(iommu->reg + DMAR_CCMD_REG, val);
943 /* Make sure hardware complete it */
944 IOMMU_WAIT_OP(iommu, DMAR_CCMD_REG,
945 dmar_readq, (!(val & DMA_CCMD_ICC)), val);
947 spin_unlock_irqrestore(&iommu->register_lock, flag);
950 /* return value determine if we need a write buffer flush */
951 static void __iommu_flush_iotlb(struct intel_iommu *iommu, u16 did,
952 u64 addr, unsigned int size_order, u64 type)
954 int tlb_offset = ecap_iotlb_offset(iommu->ecap);
955 u64 val = 0, val_iva = 0;
959 case DMA_TLB_GLOBAL_FLUSH:
960 /* global flush doesn't need set IVA_REG */
961 val = DMA_TLB_GLOBAL_FLUSH|DMA_TLB_IVT;
963 case DMA_TLB_DSI_FLUSH:
964 val = DMA_TLB_DSI_FLUSH|DMA_TLB_IVT|DMA_TLB_DID(did);
966 case DMA_TLB_PSI_FLUSH:
967 val = DMA_TLB_PSI_FLUSH|DMA_TLB_IVT|DMA_TLB_DID(did);
968 /* Note: always flush non-leaf currently */
969 val_iva = size_order | addr;
974 /* Note: set drain read/write */
977 * This is probably to be super secure.. Looks like we can
978 * ignore it without any impact.
980 if (cap_read_drain(iommu->cap))
981 val |= DMA_TLB_READ_DRAIN;
983 if (cap_write_drain(iommu->cap))
984 val |= DMA_TLB_WRITE_DRAIN;
986 spin_lock_irqsave(&iommu->register_lock, flag);
987 /* Note: Only uses first TLB reg currently */
989 dmar_writeq(iommu->reg + tlb_offset, val_iva);
990 dmar_writeq(iommu->reg + tlb_offset + 8, val);
992 /* Make sure hardware complete it */
993 IOMMU_WAIT_OP(iommu, tlb_offset + 8,
994 dmar_readq, (!(val & DMA_TLB_IVT)), val);
996 spin_unlock_irqrestore(&iommu->register_lock, flag);
998 /* check IOTLB invalidation granularity */
999 if (DMA_TLB_IAIG(val) == 0)
1000 printk(KERN_ERR"IOMMU: flush IOTLB failed\n");
1001 if (DMA_TLB_IAIG(val) != DMA_TLB_IIRG(type))
1002 pr_debug("IOMMU: tlb flush request %Lx, actual %Lx\n",
1003 (unsigned long long)DMA_TLB_IIRG(type),
1004 (unsigned long long)DMA_TLB_IAIG(val));
1007 static struct device_domain_info *iommu_support_dev_iotlb(
1008 struct dmar_domain *domain, int segment, u8 bus, u8 devfn)
1011 unsigned long flags;
1012 struct device_domain_info *info;
1013 struct intel_iommu *iommu = device_to_iommu(segment, bus, devfn);
1015 if (!ecap_dev_iotlb_support(iommu->ecap))
1021 spin_lock_irqsave(&device_domain_lock, flags);
1022 list_for_each_entry(info, &domain->devices, link)
1023 if (info->bus == bus && info->devfn == devfn) {
1027 spin_unlock_irqrestore(&device_domain_lock, flags);
1029 if (!found || !info->dev)
1032 if (!pci_find_ext_capability(info->dev, PCI_EXT_CAP_ID_ATS))
1035 if (!dmar_find_matched_atsr_unit(info->dev))
1038 info->iommu = iommu;
1043 static void iommu_enable_dev_iotlb(struct device_domain_info *info)
1048 pci_enable_ats(info->dev, VTD_PAGE_SHIFT);
1051 static void iommu_disable_dev_iotlb(struct device_domain_info *info)
1053 if (!info->dev || !pci_ats_enabled(info->dev))
1056 pci_disable_ats(info->dev);
1059 static void iommu_flush_dev_iotlb(struct dmar_domain *domain,
1060 u64 addr, unsigned mask)
1063 unsigned long flags;
1064 struct device_domain_info *info;
1066 spin_lock_irqsave(&device_domain_lock, flags);
1067 list_for_each_entry(info, &domain->devices, link) {
1068 if (!info->dev || !pci_ats_enabled(info->dev))
1071 sid = info->bus << 8 | info->devfn;
1072 qdep = pci_ats_queue_depth(info->dev);
1073 qi_flush_dev_iotlb(info->iommu, sid, qdep, addr, mask);
1075 spin_unlock_irqrestore(&device_domain_lock, flags);
1078 static void iommu_flush_iotlb_psi(struct intel_iommu *iommu, u16 did,
1079 unsigned long pfn, unsigned int pages)
1081 unsigned int mask = ilog2(__roundup_pow_of_two(pages));
1082 uint64_t addr = (uint64_t)pfn << VTD_PAGE_SHIFT;
1087 * Fallback to domain selective flush if no PSI support or the size is
1089 * PSI requires page size to be 2 ^ x, and the base address is naturally
1090 * aligned to the size
1092 if (!cap_pgsel_inv(iommu->cap) || mask > cap_max_amask_val(iommu->cap))
1093 iommu->flush.flush_iotlb(iommu, did, 0, 0,
1096 iommu->flush.flush_iotlb(iommu, did, addr, mask,
1100 * In caching mode, domain ID 0 is reserved for non-present to present
1101 * mapping flush. Device IOTLB doesn't need to be flushed in this case.
1103 if (!cap_caching_mode(iommu->cap) || did)
1104 iommu_flush_dev_iotlb(iommu->domains[did], addr, mask);
1107 static void iommu_disable_protect_mem_regions(struct intel_iommu *iommu)
1110 unsigned long flags;
1112 spin_lock_irqsave(&iommu->register_lock, flags);
1113 pmen = readl(iommu->reg + DMAR_PMEN_REG);
1114 pmen &= ~DMA_PMEN_EPM;
1115 writel(pmen, iommu->reg + DMAR_PMEN_REG);
1117 /* wait for the protected region status bit to clear */
1118 IOMMU_WAIT_OP(iommu, DMAR_PMEN_REG,
1119 readl, !(pmen & DMA_PMEN_PRS), pmen);
1121 spin_unlock_irqrestore(&iommu->register_lock, flags);
1124 static int iommu_enable_translation(struct intel_iommu *iommu)
1127 unsigned long flags;
1129 spin_lock_irqsave(&iommu->register_lock, flags);
1130 iommu->gcmd |= DMA_GCMD_TE;
1131 writel(iommu->gcmd, iommu->reg + DMAR_GCMD_REG);
1133 /* Make sure hardware complete it */
1134 IOMMU_WAIT_OP(iommu, DMAR_GSTS_REG,
1135 readl, (sts & DMA_GSTS_TES), sts);
1137 spin_unlock_irqrestore(&iommu->register_lock, flags);
1141 static int iommu_disable_translation(struct intel_iommu *iommu)
1146 spin_lock_irqsave(&iommu->register_lock, flag);
1147 iommu->gcmd &= ~DMA_GCMD_TE;
1148 writel(iommu->gcmd, iommu->reg + DMAR_GCMD_REG);
1150 /* Make sure hardware complete it */
1151 IOMMU_WAIT_OP(iommu, DMAR_GSTS_REG,
1152 readl, (!(sts & DMA_GSTS_TES)), sts);
1154 spin_unlock_irqrestore(&iommu->register_lock, flag);
1159 static int iommu_init_domains(struct intel_iommu *iommu)
1161 unsigned long ndomains;
1162 unsigned long nlongs;
1164 ndomains = cap_ndoms(iommu->cap);
1165 pr_debug("Number of Domains supportd <%ld>\n", ndomains);
1166 nlongs = BITS_TO_LONGS(ndomains);
1168 spin_lock_init(&iommu->lock);
1170 /* TBD: there might be 64K domains,
1171 * consider other allocation for future chip
1173 iommu->domain_ids = kcalloc(nlongs, sizeof(unsigned long), GFP_KERNEL);
1174 if (!iommu->domain_ids) {
1175 printk(KERN_ERR "Allocating domain id array failed\n");
1178 iommu->domains = kcalloc(ndomains, sizeof(struct dmar_domain *),
1180 if (!iommu->domains) {
1181 printk(KERN_ERR "Allocating domain array failed\n");
1186 * if Caching mode is set, then invalid translations are tagged
1187 * with domainid 0. Hence we need to pre-allocate it.
1189 if (cap_caching_mode(iommu->cap))
1190 set_bit(0, iommu->domain_ids);
1195 static void domain_exit(struct dmar_domain *domain);
1196 static void vm_domain_exit(struct dmar_domain *domain);
1198 void free_dmar_iommu(struct intel_iommu *iommu)
1200 struct dmar_domain *domain;
1202 unsigned long flags;
1204 if ((iommu->domains) && (iommu->domain_ids)) {
1205 i = find_first_bit(iommu->domain_ids, cap_ndoms(iommu->cap));
1206 for (; i < cap_ndoms(iommu->cap); ) {
1207 domain = iommu->domains[i];
1208 clear_bit(i, iommu->domain_ids);
1210 spin_lock_irqsave(&domain->iommu_lock, flags);
1211 if (--domain->iommu_count == 0) {
1212 if (domain->flags & DOMAIN_FLAG_VIRTUAL_MACHINE)
1213 vm_domain_exit(domain);
1215 domain_exit(domain);
1217 spin_unlock_irqrestore(&domain->iommu_lock, flags);
1219 i = find_next_bit(iommu->domain_ids,
1220 cap_ndoms(iommu->cap), i+1);
1224 if (iommu->gcmd & DMA_GCMD_TE)
1225 iommu_disable_translation(iommu);
1228 set_irq_data(iommu->irq, NULL);
1229 /* This will mask the irq */
1230 free_irq(iommu->irq, iommu);
1231 destroy_irq(iommu->irq);
1234 kfree(iommu->domains);
1235 kfree(iommu->domain_ids);
1237 g_iommus[iommu->seq_id] = NULL;
1239 /* if all iommus are freed, free g_iommus */
1240 for (i = 0; i < g_num_of_iommus; i++) {
1245 if (i == g_num_of_iommus)
1248 /* free context mapping */
1249 free_context_table(iommu);
1252 static struct dmar_domain *alloc_domain(void)
1254 struct dmar_domain *domain;
1256 domain = alloc_domain_mem();
1260 memset(&domain->iommu_bmp, 0, sizeof(unsigned long));
1266 static int iommu_attach_domain(struct dmar_domain *domain,
1267 struct intel_iommu *iommu)
1270 unsigned long ndomains;
1271 unsigned long flags;
1273 ndomains = cap_ndoms(iommu->cap);
1275 spin_lock_irqsave(&iommu->lock, flags);
1277 num = find_first_zero_bit(iommu->domain_ids, ndomains);
1278 if (num >= ndomains) {
1279 spin_unlock_irqrestore(&iommu->lock, flags);
1280 printk(KERN_ERR "IOMMU: no free domain ids\n");
1285 set_bit(num, iommu->domain_ids);
1286 set_bit(iommu->seq_id, &domain->iommu_bmp);
1287 iommu->domains[num] = domain;
1288 spin_unlock_irqrestore(&iommu->lock, flags);
1293 static void iommu_detach_domain(struct dmar_domain *domain,
1294 struct intel_iommu *iommu)
1296 unsigned long flags;
1300 spin_lock_irqsave(&iommu->lock, flags);
1301 ndomains = cap_ndoms(iommu->cap);
1302 num = find_first_bit(iommu->domain_ids, ndomains);
1303 for (; num < ndomains; ) {
1304 if (iommu->domains[num] == domain) {
1308 num = find_next_bit(iommu->domain_ids,
1309 cap_ndoms(iommu->cap), num+1);
1313 clear_bit(num, iommu->domain_ids);
1314 clear_bit(iommu->seq_id, &domain->iommu_bmp);
1315 iommu->domains[num] = NULL;
1317 spin_unlock_irqrestore(&iommu->lock, flags);
1320 static struct iova_domain reserved_iova_list;
1321 static struct lock_class_key reserved_rbtree_key;
1323 static void dmar_init_reserved_ranges(void)
1325 struct pci_dev *pdev = NULL;
1329 init_iova_domain(&reserved_iova_list, DMA_32BIT_PFN);
1331 lockdep_set_class(&reserved_iova_list.iova_rbtree_lock,
1332 &reserved_rbtree_key);
1334 /* IOAPIC ranges shouldn't be accessed by DMA */
1335 iova = reserve_iova(&reserved_iova_list, IOVA_PFN(IOAPIC_RANGE_START),
1336 IOVA_PFN(IOAPIC_RANGE_END));
1338 printk(KERN_ERR "Reserve IOAPIC range failed\n");
1340 /* Reserve all PCI MMIO to avoid peer-to-peer access */
1341 for_each_pci_dev(pdev) {
1344 for (i = 0; i < PCI_NUM_RESOURCES; i++) {
1345 r = &pdev->resource[i];
1346 if (!r->flags || !(r->flags & IORESOURCE_MEM))
1348 iova = reserve_iova(&reserved_iova_list,
1352 printk(KERN_ERR "Reserve iova failed\n");
1358 static void domain_reserve_special_ranges(struct dmar_domain *domain)
1360 copy_reserved_iova(&reserved_iova_list, &domain->iovad);
1363 static inline int guestwidth_to_adjustwidth(int gaw)
1366 int r = (gaw - 12) % 9;
1377 static int domain_init(struct dmar_domain *domain, int guest_width)
1379 struct intel_iommu *iommu;
1380 int adjust_width, agaw;
1381 unsigned long sagaw;
1383 init_iova_domain(&domain->iovad, DMA_32BIT_PFN);
1384 spin_lock_init(&domain->iommu_lock);
1386 domain_reserve_special_ranges(domain);
1388 /* calculate AGAW */
1389 iommu = domain_get_iommu(domain);
1390 if (guest_width > cap_mgaw(iommu->cap))
1391 guest_width = cap_mgaw(iommu->cap);
1392 domain->gaw = guest_width;
1393 adjust_width = guestwidth_to_adjustwidth(guest_width);
1394 agaw = width_to_agaw(adjust_width);
1395 sagaw = cap_sagaw(iommu->cap);
1396 if (!test_bit(agaw, &sagaw)) {
1397 /* hardware doesn't support it, choose a bigger one */
1398 pr_debug("IOMMU: hardware doesn't support agaw %d\n", agaw);
1399 agaw = find_next_bit(&sagaw, 5, agaw);
1403 domain->agaw = agaw;
1404 INIT_LIST_HEAD(&domain->devices);
1406 if (ecap_coherent(iommu->ecap))
1407 domain->iommu_coherency = 1;
1409 domain->iommu_coherency = 0;
1411 if (ecap_sc_support(iommu->ecap))
1412 domain->iommu_snooping = 1;
1414 domain->iommu_snooping = 0;
1416 domain->iommu_count = 1;
1418 /* always allocate the top pgd */
1419 domain->pgd = (struct dma_pte *)alloc_pgtable_page();
1422 __iommu_flush_cache(iommu, domain->pgd, PAGE_SIZE);
1426 static void domain_exit(struct dmar_domain *domain)
1428 struct dmar_drhd_unit *drhd;
1429 struct intel_iommu *iommu;
1431 /* Domain 0 is reserved, so dont process it */
1435 domain_remove_dev_info(domain);
1437 put_iova_domain(&domain->iovad);
1440 dma_pte_clear_range(domain, 0, DOMAIN_MAX_PFN(domain->gaw));
1442 /* free page tables */
1443 dma_pte_free_pagetable(domain, 0, DOMAIN_MAX_PFN(domain->gaw));
1445 for_each_active_iommu(iommu, drhd)
1446 if (test_bit(iommu->seq_id, &domain->iommu_bmp))
1447 iommu_detach_domain(domain, iommu);
1449 free_domain_mem(domain);
1452 static int domain_context_mapping_one(struct dmar_domain *domain, int segment,
1453 u8 bus, u8 devfn, int translation)
1455 struct context_entry *context;
1456 unsigned long flags;
1457 struct intel_iommu *iommu;
1458 struct dma_pte *pgd;
1460 unsigned long ndomains;
1463 struct device_domain_info *info = NULL;
1465 pr_debug("Set context mapping for %02x:%02x.%d\n",
1466 bus, PCI_SLOT(devfn), PCI_FUNC(devfn));
1468 BUG_ON(!domain->pgd);
1469 BUG_ON(translation != CONTEXT_TT_PASS_THROUGH &&
1470 translation != CONTEXT_TT_MULTI_LEVEL);
1472 iommu = device_to_iommu(segment, bus, devfn);
1476 context = device_to_context_entry(iommu, bus, devfn);
1479 spin_lock_irqsave(&iommu->lock, flags);
1480 if (context_present(context)) {
1481 spin_unlock_irqrestore(&iommu->lock, flags);
1488 if (domain->flags & DOMAIN_FLAG_VIRTUAL_MACHINE ||
1489 domain->flags & DOMAIN_FLAG_STATIC_IDENTITY) {
1492 /* find an available domain id for this device in iommu */
1493 ndomains = cap_ndoms(iommu->cap);
1494 num = find_first_bit(iommu->domain_ids, ndomains);
1495 for (; num < ndomains; ) {
1496 if (iommu->domains[num] == domain) {
1501 num = find_next_bit(iommu->domain_ids,
1502 cap_ndoms(iommu->cap), num+1);
1506 num = find_first_zero_bit(iommu->domain_ids, ndomains);
1507 if (num >= ndomains) {
1508 spin_unlock_irqrestore(&iommu->lock, flags);
1509 printk(KERN_ERR "IOMMU: no free domain ids\n");
1513 set_bit(num, iommu->domain_ids);
1514 iommu->domains[num] = domain;
1518 /* Skip top levels of page tables for
1519 * iommu which has less agaw than default.
1521 for (agaw = domain->agaw; agaw != iommu->agaw; agaw--) {
1522 pgd = phys_to_virt(dma_pte_addr(pgd));
1523 if (!dma_pte_present(pgd)) {
1524 spin_unlock_irqrestore(&iommu->lock, flags);
1530 context_set_domain_id(context, id);
1532 if (translation != CONTEXT_TT_PASS_THROUGH) {
1533 info = iommu_support_dev_iotlb(domain, segment, bus, devfn);
1534 translation = info ? CONTEXT_TT_DEV_IOTLB :
1535 CONTEXT_TT_MULTI_LEVEL;
1538 * In pass through mode, AW must be programmed to indicate the largest
1539 * AGAW value supported by hardware. And ASR is ignored by hardware.
1541 if (unlikely(translation == CONTEXT_TT_PASS_THROUGH))
1542 context_set_address_width(context, iommu->msagaw);
1544 context_set_address_root(context, virt_to_phys(pgd));
1545 context_set_address_width(context, iommu->agaw);
1548 context_set_translation_type(context, translation);
1549 context_set_fault_enable(context);
1550 context_set_present(context);
1551 domain_flush_cache(domain, context, sizeof(*context));
1554 * It's a non-present to present mapping. If hardware doesn't cache
1555 * non-present entry we only need to flush the write-buffer. If the
1556 * _does_ cache non-present entries, then it does so in the special
1557 * domain #0, which we have to flush:
1559 if (cap_caching_mode(iommu->cap)) {
1560 iommu->flush.flush_context(iommu, 0,
1561 (((u16)bus) << 8) | devfn,
1562 DMA_CCMD_MASK_NOBIT,
1563 DMA_CCMD_DEVICE_INVL);
1564 iommu->flush.flush_iotlb(iommu, 0, 0, 0, DMA_TLB_DSI_FLUSH);
1566 iommu_flush_write_buffer(iommu);
1568 iommu_enable_dev_iotlb(info);
1569 spin_unlock_irqrestore(&iommu->lock, flags);
1571 spin_lock_irqsave(&domain->iommu_lock, flags);
1572 if (!test_and_set_bit(iommu->seq_id, &domain->iommu_bmp)) {
1573 domain->iommu_count++;
1574 domain_update_iommu_cap(domain);
1576 spin_unlock_irqrestore(&domain->iommu_lock, flags);
1581 domain_context_mapping(struct dmar_domain *domain, struct pci_dev *pdev,
1585 struct pci_dev *tmp, *parent;
1587 ret = domain_context_mapping_one(domain, pci_domain_nr(pdev->bus),
1588 pdev->bus->number, pdev->devfn,
1593 /* dependent device mapping */
1594 tmp = pci_find_upstream_pcie_bridge(pdev);
1597 /* Secondary interface's bus number and devfn 0 */
1598 parent = pdev->bus->self;
1599 while (parent != tmp) {
1600 ret = domain_context_mapping_one(domain,
1601 pci_domain_nr(parent->bus),
1602 parent->bus->number,
1603 parent->devfn, translation);
1606 parent = parent->bus->self;
1608 if (tmp->is_pcie) /* this is a PCIE-to-PCI bridge */
1609 return domain_context_mapping_one(domain,
1610 pci_domain_nr(tmp->subordinate),
1611 tmp->subordinate->number, 0,
1613 else /* this is a legacy PCI bridge */
1614 return domain_context_mapping_one(domain,
1615 pci_domain_nr(tmp->bus),
1621 static int domain_context_mapped(struct pci_dev *pdev)
1624 struct pci_dev *tmp, *parent;
1625 struct intel_iommu *iommu;
1627 iommu = device_to_iommu(pci_domain_nr(pdev->bus), pdev->bus->number,
1632 ret = device_context_mapped(iommu, pdev->bus->number, pdev->devfn);
1635 /* dependent device mapping */
1636 tmp = pci_find_upstream_pcie_bridge(pdev);
1639 /* Secondary interface's bus number and devfn 0 */
1640 parent = pdev->bus->self;
1641 while (parent != tmp) {
1642 ret = device_context_mapped(iommu, parent->bus->number,
1646 parent = parent->bus->self;
1649 return device_context_mapped(iommu, tmp->subordinate->number,
1652 return device_context_mapped(iommu, tmp->bus->number,
1656 /* Returns a number of VTD pages, but aligned to MM page size */
1657 static inline unsigned long aligned_nrpages(unsigned long host_addr,
1660 host_addr &= ~PAGE_MASK;
1661 return PAGE_ALIGN(host_addr + size) >> VTD_PAGE_SHIFT;
1664 static int __domain_mapping(struct dmar_domain *domain, unsigned long iov_pfn,
1665 struct scatterlist *sg, unsigned long phys_pfn,
1666 unsigned long nr_pages, int prot)
1668 struct dma_pte *first_pte = NULL, *pte = NULL;
1669 phys_addr_t uninitialized_var(pteval);
1670 int addr_width = agaw_to_width(domain->agaw) - VTD_PAGE_SHIFT;
1671 unsigned long sg_res;
1673 BUG_ON(addr_width < BITS_PER_LONG && (iov_pfn + nr_pages - 1) >> addr_width);
1675 if ((prot & (DMA_PTE_READ|DMA_PTE_WRITE)) == 0)
1678 prot &= DMA_PTE_READ | DMA_PTE_WRITE | DMA_PTE_SNP;
1683 sg_res = nr_pages + 1;
1684 pteval = ((phys_addr_t)phys_pfn << VTD_PAGE_SHIFT) | prot;
1687 while (nr_pages--) {
1691 sg_res = aligned_nrpages(sg->offset, sg->length);
1692 sg->dma_address = ((dma_addr_t)iov_pfn << VTD_PAGE_SHIFT) + sg->offset;
1693 sg->dma_length = sg->length;
1694 pteval = page_to_phys(sg_page(sg)) | prot;
1697 first_pte = pte = pfn_to_dma_pte(domain, iov_pfn);
1701 /* We don't need lock here, nobody else
1702 * touches the iova range
1704 tmp = cmpxchg64_local(&pte->val, 0ULL, pteval);
1706 static int dumps = 5;
1707 printk(KERN_CRIT "ERROR: DMA PTE for vPFN 0x%lx already set (to %llx not %llx)\n",
1708 iov_pfn, tmp, (unsigned long long)pteval);
1711 debug_dma_dump_mappings(NULL);
1716 if (!nr_pages || first_pte_in_page(pte)) {
1717 domain_flush_cache(domain, first_pte,
1718 (void *)pte - (void *)first_pte);
1722 pteval += VTD_PAGE_SIZE;
1730 static inline int domain_sg_mapping(struct dmar_domain *domain, unsigned long iov_pfn,
1731 struct scatterlist *sg, unsigned long nr_pages,
1734 return __domain_mapping(domain, iov_pfn, sg, 0, nr_pages, prot);
1737 static inline int domain_pfn_mapping(struct dmar_domain *domain, unsigned long iov_pfn,
1738 unsigned long phys_pfn, unsigned long nr_pages,
1741 return __domain_mapping(domain, iov_pfn, NULL, phys_pfn, nr_pages, prot);
1744 static void iommu_detach_dev(struct intel_iommu *iommu, u8 bus, u8 devfn)
1749 clear_context_table(iommu, bus, devfn);
1750 iommu->flush.flush_context(iommu, 0, 0, 0,
1751 DMA_CCMD_GLOBAL_INVL);
1752 iommu->flush.flush_iotlb(iommu, 0, 0, 0, DMA_TLB_GLOBAL_FLUSH);
1755 static void domain_remove_dev_info(struct dmar_domain *domain)
1757 struct device_domain_info *info;
1758 unsigned long flags;
1759 struct intel_iommu *iommu;
1761 spin_lock_irqsave(&device_domain_lock, flags);
1762 while (!list_empty(&domain->devices)) {
1763 info = list_entry(domain->devices.next,
1764 struct device_domain_info, link);
1765 list_del(&info->link);
1766 list_del(&info->global);
1768 info->dev->dev.archdata.iommu = NULL;
1769 spin_unlock_irqrestore(&device_domain_lock, flags);
1771 iommu_disable_dev_iotlb(info);
1772 iommu = device_to_iommu(info->segment, info->bus, info->devfn);
1773 iommu_detach_dev(iommu, info->bus, info->devfn);
1774 free_devinfo_mem(info);
1776 spin_lock_irqsave(&device_domain_lock, flags);
1778 spin_unlock_irqrestore(&device_domain_lock, flags);
1783 * Note: we use struct pci_dev->dev.archdata.iommu stores the info
1785 static struct dmar_domain *
1786 find_domain(struct pci_dev *pdev)
1788 struct device_domain_info *info;
1790 /* No lock here, assumes no domain exit in normal case */
1791 info = pdev->dev.archdata.iommu;
1793 return info->domain;
1797 /* domain is initialized */
1798 static struct dmar_domain *get_domain_for_dev(struct pci_dev *pdev, int gaw)
1800 struct dmar_domain *domain, *found = NULL;
1801 struct intel_iommu *iommu;
1802 struct dmar_drhd_unit *drhd;
1803 struct device_domain_info *info, *tmp;
1804 struct pci_dev *dev_tmp;
1805 unsigned long flags;
1806 int bus = 0, devfn = 0;
1810 domain = find_domain(pdev);
1814 segment = pci_domain_nr(pdev->bus);
1816 dev_tmp = pci_find_upstream_pcie_bridge(pdev);
1818 if (dev_tmp->is_pcie) {
1819 bus = dev_tmp->subordinate->number;
1822 bus = dev_tmp->bus->number;
1823 devfn = dev_tmp->devfn;
1825 spin_lock_irqsave(&device_domain_lock, flags);
1826 list_for_each_entry(info, &device_domain_list, global) {
1827 if (info->segment == segment &&
1828 info->bus == bus && info->devfn == devfn) {
1829 found = info->domain;
1833 spin_unlock_irqrestore(&device_domain_lock, flags);
1834 /* pcie-pci bridge already has a domain, uses it */
1841 domain = alloc_domain();
1845 /* Allocate new domain for the device */
1846 drhd = dmar_find_matched_drhd_unit(pdev);
1848 printk(KERN_ERR "IOMMU: can't find DMAR for device %s\n",
1852 iommu = drhd->iommu;
1854 ret = iommu_attach_domain(domain, iommu);
1856 domain_exit(domain);
1860 if (domain_init(domain, gaw)) {
1861 domain_exit(domain);
1865 /* register pcie-to-pci device */
1867 info = alloc_devinfo_mem();
1869 domain_exit(domain);
1872 info->segment = segment;
1874 info->devfn = devfn;
1876 info->domain = domain;
1877 /* This domain is shared by devices under p2p bridge */
1878 domain->flags |= DOMAIN_FLAG_P2P_MULTIPLE_DEVICES;
1880 /* pcie-to-pci bridge already has a domain, uses it */
1882 spin_lock_irqsave(&device_domain_lock, flags);
1883 list_for_each_entry(tmp, &device_domain_list, global) {
1884 if (tmp->segment == segment &&
1885 tmp->bus == bus && tmp->devfn == devfn) {
1886 found = tmp->domain;
1891 free_devinfo_mem(info);
1892 domain_exit(domain);
1895 list_add(&info->link, &domain->devices);
1896 list_add(&info->global, &device_domain_list);
1898 spin_unlock_irqrestore(&device_domain_lock, flags);
1902 info = alloc_devinfo_mem();
1905 info->segment = segment;
1906 info->bus = pdev->bus->number;
1907 info->devfn = pdev->devfn;
1909 info->domain = domain;
1910 spin_lock_irqsave(&device_domain_lock, flags);
1911 /* somebody is fast */
1912 found = find_domain(pdev);
1913 if (found != NULL) {
1914 spin_unlock_irqrestore(&device_domain_lock, flags);
1915 if (found != domain) {
1916 domain_exit(domain);
1919 free_devinfo_mem(info);
1922 list_add(&info->link, &domain->devices);
1923 list_add(&info->global, &device_domain_list);
1924 pdev->dev.archdata.iommu = info;
1925 spin_unlock_irqrestore(&device_domain_lock, flags);
1928 /* recheck it here, maybe others set it */
1929 return find_domain(pdev);
1932 static int iommu_identity_mapping;
1934 static int iommu_domain_identity_map(struct dmar_domain *domain,
1935 unsigned long long start,
1936 unsigned long long end)
1938 unsigned long first_vpfn = start >> VTD_PAGE_SHIFT;
1939 unsigned long last_vpfn = end >> VTD_PAGE_SHIFT;
1941 if (!reserve_iova(&domain->iovad, dma_to_mm_pfn(first_vpfn),
1942 dma_to_mm_pfn(last_vpfn))) {
1943 printk(KERN_ERR "IOMMU: reserve iova failed\n");
1947 pr_debug("Mapping reserved region %llx-%llx for domain %d\n",
1948 start, end, domain->id);
1950 * RMRR range might have overlap with physical memory range,
1953 dma_pte_clear_range(domain, first_vpfn, last_vpfn);
1955 return domain_pfn_mapping(domain, first_vpfn, first_vpfn,
1956 last_vpfn - first_vpfn + 1,
1957 DMA_PTE_READ|DMA_PTE_WRITE);
1960 static int iommu_prepare_identity_map(struct pci_dev *pdev,
1961 unsigned long long start,
1962 unsigned long long end)
1964 struct dmar_domain *domain;
1967 domain = get_domain_for_dev(pdev, DEFAULT_DOMAIN_ADDRESS_WIDTH);
1971 /* For _hardware_ passthrough, don't bother. But for software
1972 passthrough, we do it anyway -- it may indicate a memory
1973 range which is reserved in E820, so which didn't get set
1974 up to start with in si_domain */
1975 if (domain == si_domain && hw_pass_through) {
1976 printk("Ignoring identity map for HW passthrough device %s [0x%Lx - 0x%Lx]\n",
1977 pci_name(pdev), start, end);
1982 "IOMMU: Setting identity map for device %s [0x%Lx - 0x%Lx]\n",
1983 pci_name(pdev), start, end);
1985 if (end >> agaw_to_width(domain->agaw)) {
1986 WARN(1, "Your BIOS is broken; RMRR exceeds permitted address width (%d bits)\n"
1987 "BIOS vendor: %s; Ver: %s; Product Version: %s\n",
1988 agaw_to_width(domain->agaw),
1989 dmi_get_system_info(DMI_BIOS_VENDOR),
1990 dmi_get_system_info(DMI_BIOS_VERSION),
1991 dmi_get_system_info(DMI_PRODUCT_VERSION));
1996 ret = iommu_domain_identity_map(domain, start, end);
2000 /* context entry init */
2001 ret = domain_context_mapping(domain, pdev, CONTEXT_TT_MULTI_LEVEL);
2008 domain_exit(domain);
2012 static inline int iommu_prepare_rmrr_dev(struct dmar_rmrr_unit *rmrr,
2013 struct pci_dev *pdev)
2015 if (pdev->dev.archdata.iommu == DUMMY_DEVICE_DOMAIN_INFO)
2017 return iommu_prepare_identity_map(pdev, rmrr->base_address,
2018 rmrr->end_address + 1);
2021 #ifdef CONFIG_DMAR_FLOPPY_WA
2022 static inline void iommu_prepare_isa(void)
2024 struct pci_dev *pdev;
2027 pdev = pci_get_class(PCI_CLASS_BRIDGE_ISA << 8, NULL);
2031 printk(KERN_INFO "IOMMU: Prepare 0-16MiB unity mapping for LPC\n");
2032 ret = iommu_prepare_identity_map(pdev, 0, 16*1024*1024);
2035 printk(KERN_ERR "IOMMU: Failed to create 0-16MiB identity map; "
2036 "floppy might not work\n");
2040 static inline void iommu_prepare_isa(void)
2044 #endif /* !CONFIG_DMAR_FLPY_WA */
2046 static int md_domain_init(struct dmar_domain *domain, int guest_width);
2048 static int __init si_domain_work_fn(unsigned long start_pfn,
2049 unsigned long end_pfn, void *datax)
2053 *ret = iommu_domain_identity_map(si_domain,
2054 (uint64_t)start_pfn << PAGE_SHIFT,
2055 (uint64_t)end_pfn << PAGE_SHIFT);
2060 static int __init si_domain_init(int hw)
2062 struct dmar_drhd_unit *drhd;
2063 struct intel_iommu *iommu;
2066 si_domain = alloc_domain();
2070 pr_debug("Identity mapping domain is domain %d\n", si_domain->id);
2072 for_each_active_iommu(iommu, drhd) {
2073 ret = iommu_attach_domain(si_domain, iommu);
2075 domain_exit(si_domain);
2080 if (md_domain_init(si_domain, DEFAULT_DOMAIN_ADDRESS_WIDTH)) {
2081 domain_exit(si_domain);
2085 si_domain->flags = DOMAIN_FLAG_STATIC_IDENTITY;
2090 for_each_online_node(nid) {
2091 work_with_active_regions(nid, si_domain_work_fn, &ret);
2099 static void domain_remove_one_dev_info(struct dmar_domain *domain,
2100 struct pci_dev *pdev);
2101 static int identity_mapping(struct pci_dev *pdev)
2103 struct device_domain_info *info;
2105 if (likely(!iommu_identity_mapping))
2109 list_for_each_entry(info, &si_domain->devices, link)
2110 if (info->dev == pdev)
2115 static int domain_add_dev_info(struct dmar_domain *domain,
2116 struct pci_dev *pdev,
2119 struct device_domain_info *info;
2120 unsigned long flags;
2123 info = alloc_devinfo_mem();
2127 ret = domain_context_mapping(domain, pdev, translation);
2129 free_devinfo_mem(info);
2133 info->segment = pci_domain_nr(pdev->bus);
2134 info->bus = pdev->bus->number;
2135 info->devfn = pdev->devfn;
2137 info->domain = domain;
2139 spin_lock_irqsave(&device_domain_lock, flags);
2140 list_add(&info->link, &domain->devices);
2141 list_add(&info->global, &device_domain_list);
2142 pdev->dev.archdata.iommu = info;
2143 spin_unlock_irqrestore(&device_domain_lock, flags);
2148 static int iommu_should_identity_map(struct pci_dev *pdev, int startup)
2150 if (iommu_identity_mapping == 2)
2151 return IS_GFX_DEVICE(pdev);
2154 * We want to start off with all devices in the 1:1 domain, and
2155 * take them out later if we find they can't access all of memory.
2157 * However, we can't do this for PCI devices behind bridges,
2158 * because all PCI devices behind the same bridge will end up
2159 * with the same source-id on their transactions.
2161 * Practically speaking, we can't change things around for these
2162 * devices at run-time, because we can't be sure there'll be no
2163 * DMA transactions in flight for any of their siblings.
2165 * So PCI devices (unless they're on the root bus) as well as
2166 * their parent PCI-PCI or PCIe-PCI bridges must be left _out_ of
2167 * the 1:1 domain, just in _case_ one of their siblings turns out
2168 * not to be able to map all of memory.
2170 if (!pdev->is_pcie) {
2171 if (!pci_is_root_bus(pdev->bus))
2173 if (pdev->class >> 8 == PCI_CLASS_BRIDGE_PCI)
2175 } else if (pdev->pcie_type == PCI_EXP_TYPE_PCI_BRIDGE)
2179 * At boot time, we don't yet know if devices will be 64-bit capable.
2180 * Assume that they will -- if they turn out not to be, then we can
2181 * take them out of the 1:1 domain later.
2184 return pdev->dma_mask > DMA_BIT_MASK(32);
2189 static int __init iommu_prepare_static_identity_mapping(int hw)
2191 struct pci_dev *pdev = NULL;
2194 ret = si_domain_init(hw);
2198 for_each_pci_dev(pdev) {
2199 if (iommu_should_identity_map(pdev, 1)) {
2200 printk(KERN_INFO "IOMMU: %s identity mapping for device %s\n",
2201 hw ? "hardware" : "software", pci_name(pdev));
2203 ret = domain_add_dev_info(si_domain, pdev,
2204 hw ? CONTEXT_TT_PASS_THROUGH :
2205 CONTEXT_TT_MULTI_LEVEL);
2214 int __init init_dmars(void)
2216 struct dmar_drhd_unit *drhd;
2217 struct dmar_rmrr_unit *rmrr;
2218 struct pci_dev *pdev;
2219 struct intel_iommu *iommu;
2225 * initialize and program root entry to not present
2228 for_each_drhd_unit(drhd) {
2231 * lock not needed as this is only incremented in the single
2232 * threaded kernel __init code path all other access are read
2237 g_iommus = kcalloc(g_num_of_iommus, sizeof(struct intel_iommu *),
2240 printk(KERN_ERR "Allocating global iommu array failed\n");
2245 deferred_flush = kzalloc(g_num_of_iommus *
2246 sizeof(struct deferred_flush_tables), GFP_KERNEL);
2247 if (!deferred_flush) {
2252 for_each_drhd_unit(drhd) {
2256 iommu = drhd->iommu;
2257 g_iommus[iommu->seq_id] = iommu;
2259 ret = iommu_init_domains(iommu);
2265 * we could share the same root & context tables
2266 * amoung all IOMMU's. Need to Split it later.
2268 ret = iommu_alloc_root_entry(iommu);
2270 printk(KERN_ERR "IOMMU: allocate root entry failed\n");
2273 if (!ecap_pass_through(iommu->ecap))
2274 hw_pass_through = 0;
2278 * Start from the sane iommu hardware state.
2280 for_each_drhd_unit(drhd) {
2284 iommu = drhd->iommu;
2287 * If the queued invalidation is already initialized by us
2288 * (for example, while enabling interrupt-remapping) then
2289 * we got the things already rolling from a sane state.
2295 * Clear any previous faults.
2297 dmar_fault(-1, iommu);
2299 * Disable queued invalidation if supported and already enabled
2300 * before OS handover.
2302 dmar_disable_qi(iommu);
2305 for_each_drhd_unit(drhd) {
2309 iommu = drhd->iommu;
2311 if (dmar_enable_qi(iommu)) {
2313 * Queued Invalidate not enabled, use Register Based
2316 iommu->flush.flush_context = __iommu_flush_context;
2317 iommu->flush.flush_iotlb = __iommu_flush_iotlb;
2318 printk(KERN_INFO "IOMMU 0x%Lx: using Register based "
2320 (unsigned long long)drhd->reg_base_addr);
2322 iommu->flush.flush_context = qi_flush_context;
2323 iommu->flush.flush_iotlb = qi_flush_iotlb;
2324 printk(KERN_INFO "IOMMU 0x%Lx: using Queued "
2326 (unsigned long long)drhd->reg_base_addr);
2330 if (iommu_pass_through)
2331 iommu_identity_mapping = 1;
2332 #ifdef CONFIG_DMAR_BROKEN_GFX_WA
2334 iommu_identity_mapping = 2;
2337 * If pass through is not set or not enabled, setup context entries for
2338 * identity mappings for rmrr, gfx, and isa and may fall back to static
2339 * identity mapping if iommu_identity_mapping is set.
2341 if (iommu_identity_mapping) {
2342 ret = iommu_prepare_static_identity_mapping(hw_pass_through);
2344 printk(KERN_CRIT "Failed to setup IOMMU pass-through\n");
2350 * for each dev attached to rmrr
2352 * locate drhd for dev, alloc domain for dev
2353 * allocate free domain
2354 * allocate page table entries for rmrr
2355 * if context not allocated for bus
2356 * allocate and init context
2357 * set present in root table for this bus
2358 * init context with domain, translation etc
2362 printk(KERN_INFO "IOMMU: Setting RMRR:\n");
2363 for_each_rmrr_units(rmrr) {
2364 for (i = 0; i < rmrr->devices_cnt; i++) {
2365 pdev = rmrr->devices[i];
2367 * some BIOS lists non-exist devices in DMAR
2372 ret = iommu_prepare_rmrr_dev(rmrr, pdev);
2375 "IOMMU: mapping reserved region failed\n");
2379 iommu_prepare_isa();
2384 * global invalidate context cache
2385 * global invalidate iotlb
2386 * enable translation
2388 for_each_drhd_unit(drhd) {
2391 iommu = drhd->iommu;
2393 iommu_flush_write_buffer(iommu);
2395 ret = dmar_set_interrupt(iommu);
2399 iommu_set_root_entry(iommu);
2401 iommu->flush.flush_context(iommu, 0, 0, 0, DMA_CCMD_GLOBAL_INVL);
2402 iommu->flush.flush_iotlb(iommu, 0, 0, 0, DMA_TLB_GLOBAL_FLUSH);
2403 iommu_disable_protect_mem_regions(iommu);
2405 ret = iommu_enable_translation(iommu);
2412 for_each_drhd_unit(drhd) {
2415 iommu = drhd->iommu;
2422 /* This takes a number of _MM_ pages, not VTD pages */
2423 static struct iova *intel_alloc_iova(struct device *dev,
2424 struct dmar_domain *domain,
2425 unsigned long nrpages, uint64_t dma_mask)
2427 struct pci_dev *pdev = to_pci_dev(dev);
2428 struct iova *iova = NULL;
2430 /* Restrict dma_mask to the width that the iommu can handle */
2431 dma_mask = min_t(uint64_t, DOMAIN_MAX_ADDR(domain->gaw), dma_mask);
2433 if (!dmar_forcedac && dma_mask > DMA_BIT_MASK(32)) {
2435 * First try to allocate an io virtual address in
2436 * DMA_BIT_MASK(32) and if that fails then try allocating
2439 iova = alloc_iova(&domain->iovad, nrpages,
2440 IOVA_PFN(DMA_BIT_MASK(32)), 1);
2444 iova = alloc_iova(&domain->iovad, nrpages, IOVA_PFN(dma_mask), 1);
2445 if (unlikely(!iova)) {
2446 printk(KERN_ERR "Allocating %ld-page iova for %s failed",
2447 nrpages, pci_name(pdev));
2454 static struct dmar_domain *__get_valid_domain_for_dev(struct pci_dev *pdev)
2456 struct dmar_domain *domain;
2459 domain = get_domain_for_dev(pdev,
2460 DEFAULT_DOMAIN_ADDRESS_WIDTH);
2463 "Allocating domain for %s failed", pci_name(pdev));
2467 /* make sure context mapping is ok */
2468 if (unlikely(!domain_context_mapped(pdev))) {
2469 ret = domain_context_mapping(domain, pdev,
2470 CONTEXT_TT_MULTI_LEVEL);
2473 "Domain context map for %s failed",
2482 static inline struct dmar_domain *get_valid_domain_for_dev(struct pci_dev *dev)
2484 struct device_domain_info *info;
2486 /* No lock here, assumes no domain exit in normal case */
2487 info = dev->dev.archdata.iommu;
2489 return info->domain;
2491 return __get_valid_domain_for_dev(dev);
2494 static int iommu_dummy(struct pci_dev *pdev)
2496 return pdev->dev.archdata.iommu == DUMMY_DEVICE_DOMAIN_INFO;
2499 /* Check if the pdev needs to go through non-identity map and unmap process.*/
2500 static int iommu_no_mapping(struct device *dev)
2502 struct pci_dev *pdev;
2505 if (unlikely(dev->bus != &pci_bus_type))
2508 pdev = to_pci_dev(dev);
2509 if (iommu_dummy(pdev))
2512 if (!iommu_identity_mapping)
2515 found = identity_mapping(pdev);
2517 if (iommu_should_identity_map(pdev, 0))
2521 * 32 bit DMA is removed from si_domain and fall back
2522 * to non-identity mapping.
2524 domain_remove_one_dev_info(si_domain, pdev);
2525 printk(KERN_INFO "32bit %s uses non-identity mapping\n",
2531 * In case of a detached 64 bit DMA device from vm, the device
2532 * is put into si_domain for identity mapping.
2534 if (iommu_should_identity_map(pdev, 0)) {
2536 ret = domain_add_dev_info(si_domain, pdev,
2538 CONTEXT_TT_PASS_THROUGH :
2539 CONTEXT_TT_MULTI_LEVEL);
2541 printk(KERN_INFO "64bit %s uses identity mapping\n",
2551 static dma_addr_t __intel_map_single(struct device *hwdev, phys_addr_t paddr,
2552 size_t size, int dir, u64 dma_mask)
2554 struct pci_dev *pdev = to_pci_dev(hwdev);
2555 struct dmar_domain *domain;
2556 phys_addr_t start_paddr;
2560 struct intel_iommu *iommu;
2561 unsigned long paddr_pfn = paddr >> PAGE_SHIFT;
2563 BUG_ON(dir == DMA_NONE);
2565 if (iommu_no_mapping(hwdev))
2568 domain = get_valid_domain_for_dev(pdev);
2572 iommu = domain_get_iommu(domain);
2573 size = aligned_nrpages(paddr, size);
2575 iova = intel_alloc_iova(hwdev, domain, dma_to_mm_pfn(size),
2581 * Check if DMAR supports zero-length reads on write only
2584 if (dir == DMA_TO_DEVICE || dir == DMA_BIDIRECTIONAL || \
2585 !cap_zlr(iommu->cap))
2586 prot |= DMA_PTE_READ;
2587 if (dir == DMA_FROM_DEVICE || dir == DMA_BIDIRECTIONAL)
2588 prot |= DMA_PTE_WRITE;
2590 * paddr - (paddr + size) might be partial page, we should map the whole
2591 * page. Note: if two part of one page are separately mapped, we
2592 * might have two guest_addr mapping to the same host paddr, but this
2593 * is not a big problem
2595 ret = domain_pfn_mapping(domain, mm_to_dma_pfn(iova->pfn_lo),
2596 mm_to_dma_pfn(paddr_pfn), size, prot);
2600 /* it's a non-present to present mapping. Only flush if caching mode */
2601 if (cap_caching_mode(iommu->cap))
2602 iommu_flush_iotlb_psi(iommu, 0, mm_to_dma_pfn(iova->pfn_lo), size);
2604 iommu_flush_write_buffer(iommu);
2606 start_paddr = (phys_addr_t)iova->pfn_lo << PAGE_SHIFT;
2607 start_paddr += paddr & ~PAGE_MASK;
2612 __free_iova(&domain->iovad, iova);
2613 printk(KERN_ERR"Device %s request: %zx@%llx dir %d --- failed\n",
2614 pci_name(pdev), size, (unsigned long long)paddr, dir);
2618 static dma_addr_t intel_map_page(struct device *dev, struct page *page,
2619 unsigned long offset, size_t size,
2620 enum dma_data_direction dir,
2621 struct dma_attrs *attrs)
2623 return __intel_map_single(dev, page_to_phys(page) + offset, size,
2624 dir, to_pci_dev(dev)->dma_mask);
2627 static void flush_unmaps(void)
2633 /* just flush them all */
2634 for (i = 0; i < g_num_of_iommus; i++) {
2635 struct intel_iommu *iommu = g_iommus[i];
2639 if (!deferred_flush[i].next)
2642 iommu->flush.flush_iotlb(iommu, 0, 0, 0,
2643 DMA_TLB_GLOBAL_FLUSH);
2644 for (j = 0; j < deferred_flush[i].next; j++) {
2646 struct iova *iova = deferred_flush[i].iova[j];
2648 mask = (iova->pfn_hi - iova->pfn_lo + 1) << PAGE_SHIFT;
2649 mask = ilog2(mask >> VTD_PAGE_SHIFT);
2650 iommu_flush_dev_iotlb(deferred_flush[i].domain[j],
2651 iova->pfn_lo << PAGE_SHIFT, mask);
2652 __free_iova(&deferred_flush[i].domain[j]->iovad, iova);
2654 deferred_flush[i].next = 0;
2660 static void flush_unmaps_timeout(unsigned long data)
2662 unsigned long flags;
2664 spin_lock_irqsave(&async_umap_flush_lock, flags);
2666 spin_unlock_irqrestore(&async_umap_flush_lock, flags);
2669 static void add_unmap(struct dmar_domain *dom, struct iova *iova)
2671 unsigned long flags;
2673 struct intel_iommu *iommu;
2675 spin_lock_irqsave(&async_umap_flush_lock, flags);
2676 if (list_size == HIGH_WATER_MARK)
2679 iommu = domain_get_iommu(dom);
2680 iommu_id = iommu->seq_id;
2682 next = deferred_flush[iommu_id].next;
2683 deferred_flush[iommu_id].domain[next] = dom;
2684 deferred_flush[iommu_id].iova[next] = iova;
2685 deferred_flush[iommu_id].next++;
2688 mod_timer(&unmap_timer, jiffies + msecs_to_jiffies(10));
2692 spin_unlock_irqrestore(&async_umap_flush_lock, flags);
2695 static void intel_unmap_page(struct device *dev, dma_addr_t dev_addr,
2696 size_t size, enum dma_data_direction dir,
2697 struct dma_attrs *attrs)
2699 struct pci_dev *pdev = to_pci_dev(dev);
2700 struct dmar_domain *domain;
2701 unsigned long start_pfn, last_pfn;
2703 struct intel_iommu *iommu;
2705 if (iommu_no_mapping(dev))
2708 domain = find_domain(pdev);
2711 iommu = domain_get_iommu(domain);
2713 iova = find_iova(&domain->iovad, IOVA_PFN(dev_addr));
2714 if (WARN_ONCE(!iova, "Driver unmaps unmatched page at PFN %llx\n",
2715 (unsigned long long)dev_addr))
2718 start_pfn = mm_to_dma_pfn(iova->pfn_lo);
2719 last_pfn = mm_to_dma_pfn(iova->pfn_hi + 1) - 1;
2721 pr_debug("Device %s unmapping: pfn %lx-%lx\n",
2722 pci_name(pdev), start_pfn, last_pfn);
2724 /* clear the whole page */
2725 dma_pte_clear_range(domain, start_pfn, last_pfn);
2727 /* free page tables */
2728 dma_pte_free_pagetable(domain, start_pfn, last_pfn);
2730 if (intel_iommu_strict) {
2731 iommu_flush_iotlb_psi(iommu, domain->id, start_pfn,
2732 last_pfn - start_pfn + 1);
2734 __free_iova(&domain->iovad, iova);
2736 add_unmap(domain, iova);
2738 * queue up the release of the unmap to save the 1/6th of the
2739 * cpu used up by the iotlb flush operation...
2744 static void *intel_alloc_coherent(struct device *hwdev, size_t size,
2745 dma_addr_t *dma_handle, gfp_t flags)
2750 size = PAGE_ALIGN(size);
2751 order = get_order(size);
2752 flags &= ~(GFP_DMA | GFP_DMA32);
2754 vaddr = (void *)__get_free_pages(flags, order);
2757 memset(vaddr, 0, size);
2759 *dma_handle = __intel_map_single(hwdev, virt_to_bus(vaddr), size,
2761 hwdev->coherent_dma_mask);
2764 free_pages((unsigned long)vaddr, order);
2768 static void intel_free_coherent(struct device *hwdev, size_t size, void *vaddr,
2769 dma_addr_t dma_handle)
2773 size = PAGE_ALIGN(size);
2774 order = get_order(size);
2776 intel_unmap_page(hwdev, dma_handle, size, DMA_BIDIRECTIONAL, NULL);
2777 free_pages((unsigned long)vaddr, order);
2780 static void intel_unmap_sg(struct device *hwdev, struct scatterlist *sglist,
2781 int nelems, enum dma_data_direction dir,
2782 struct dma_attrs *attrs)
2784 struct pci_dev *pdev = to_pci_dev(hwdev);
2785 struct dmar_domain *domain;
2786 unsigned long start_pfn, last_pfn;
2788 struct intel_iommu *iommu;
2790 if (iommu_no_mapping(hwdev))
2793 domain = find_domain(pdev);
2796 iommu = domain_get_iommu(domain);
2798 iova = find_iova(&domain->iovad, IOVA_PFN(sglist[0].dma_address));
2799 if (WARN_ONCE(!iova, "Driver unmaps unmatched sglist at PFN %llx\n",
2800 (unsigned long long)sglist[0].dma_address))
2803 start_pfn = mm_to_dma_pfn(iova->pfn_lo);
2804 last_pfn = mm_to_dma_pfn(iova->pfn_hi + 1) - 1;
2806 /* clear the whole page */
2807 dma_pte_clear_range(domain, start_pfn, last_pfn);
2809 /* free page tables */
2810 dma_pte_free_pagetable(domain, start_pfn, last_pfn);
2812 if (intel_iommu_strict) {
2813 iommu_flush_iotlb_psi(iommu, domain->id, start_pfn,
2814 last_pfn - start_pfn + 1);
2816 __free_iova(&domain->iovad, iova);
2818 add_unmap(domain, iova);
2820 * queue up the release of the unmap to save the 1/6th of the
2821 * cpu used up by the iotlb flush operation...
2826 static int intel_nontranslate_map_sg(struct device *hddev,
2827 struct scatterlist *sglist, int nelems, int dir)
2830 struct scatterlist *sg;
2832 for_each_sg(sglist, sg, nelems, i) {
2833 BUG_ON(!sg_page(sg));
2834 sg->dma_address = page_to_phys(sg_page(sg)) + sg->offset;
2835 sg->dma_length = sg->length;
2840 static int intel_map_sg(struct device *hwdev, struct scatterlist *sglist, int nelems,
2841 enum dma_data_direction dir, struct dma_attrs *attrs)
2844 struct pci_dev *pdev = to_pci_dev(hwdev);
2845 struct dmar_domain *domain;
2848 size_t offset_pfn = 0;
2849 struct iova *iova = NULL;
2851 struct scatterlist *sg;
2852 unsigned long start_vpfn;
2853 struct intel_iommu *iommu;
2855 BUG_ON(dir == DMA_NONE);
2856 if (iommu_no_mapping(hwdev))
2857 return intel_nontranslate_map_sg(hwdev, sglist, nelems, dir);
2859 domain = get_valid_domain_for_dev(pdev);
2863 iommu = domain_get_iommu(domain);
2865 for_each_sg(sglist, sg, nelems, i)
2866 size += aligned_nrpages(sg->offset, sg->length);
2868 iova = intel_alloc_iova(hwdev, domain, dma_to_mm_pfn(size),
2871 sglist->dma_length = 0;
2876 * Check if DMAR supports zero-length reads on write only
2879 if (dir == DMA_TO_DEVICE || dir == DMA_BIDIRECTIONAL || \
2880 !cap_zlr(iommu->cap))
2881 prot |= DMA_PTE_READ;
2882 if (dir == DMA_FROM_DEVICE || dir == DMA_BIDIRECTIONAL)
2883 prot |= DMA_PTE_WRITE;
2885 start_vpfn = mm_to_dma_pfn(iova->pfn_lo);
2887 ret = domain_sg_mapping(domain, start_vpfn, sglist, size, prot);
2888 if (unlikely(ret)) {
2889 /* clear the page */
2890 dma_pte_clear_range(domain, start_vpfn,
2891 start_vpfn + size - 1);
2892 /* free page tables */
2893 dma_pte_free_pagetable(domain, start_vpfn,
2894 start_vpfn + size - 1);
2896 __free_iova(&domain->iovad, iova);
2900 /* it's a non-present to present mapping. Only flush if caching mode */
2901 if (cap_caching_mode(iommu->cap))
2902 iommu_flush_iotlb_psi(iommu, 0, start_vpfn, offset_pfn);
2904 iommu_flush_write_buffer(iommu);
2909 static int intel_mapping_error(struct device *dev, dma_addr_t dma_addr)
2914 struct dma_map_ops intel_dma_ops = {
2915 .alloc_coherent = intel_alloc_coherent,
2916 .free_coherent = intel_free_coherent,
2917 .map_sg = intel_map_sg,
2918 .unmap_sg = intel_unmap_sg,
2919 .map_page = intel_map_page,
2920 .unmap_page = intel_unmap_page,
2921 .mapping_error = intel_mapping_error,
2924 static inline int iommu_domain_cache_init(void)
2928 iommu_domain_cache = kmem_cache_create("iommu_domain",
2929 sizeof(struct dmar_domain),
2934 if (!iommu_domain_cache) {
2935 printk(KERN_ERR "Couldn't create iommu_domain cache\n");
2942 static inline int iommu_devinfo_cache_init(void)
2946 iommu_devinfo_cache = kmem_cache_create("iommu_devinfo",
2947 sizeof(struct device_domain_info),
2951 if (!iommu_devinfo_cache) {
2952 printk(KERN_ERR "Couldn't create devinfo cache\n");
2959 static inline int iommu_iova_cache_init(void)
2963 iommu_iova_cache = kmem_cache_create("iommu_iova",
2964 sizeof(struct iova),
2968 if (!iommu_iova_cache) {
2969 printk(KERN_ERR "Couldn't create iova cache\n");
2976 static int __init iommu_init_mempool(void)
2979 ret = iommu_iova_cache_init();
2983 ret = iommu_domain_cache_init();
2987 ret = iommu_devinfo_cache_init();
2991 kmem_cache_destroy(iommu_domain_cache);
2993 kmem_cache_destroy(iommu_iova_cache);
2998 static void __init iommu_exit_mempool(void)
3000 kmem_cache_destroy(iommu_devinfo_cache);
3001 kmem_cache_destroy(iommu_domain_cache);
3002 kmem_cache_destroy(iommu_iova_cache);
3006 static void __init init_no_remapping_devices(void)
3008 struct dmar_drhd_unit *drhd;
3010 for_each_drhd_unit(drhd) {
3011 if (!drhd->include_all) {
3013 for (i = 0; i < drhd->devices_cnt; i++)
3014 if (drhd->devices[i] != NULL)
3016 /* ignore DMAR unit if no pci devices exist */
3017 if (i == drhd->devices_cnt)
3025 for_each_drhd_unit(drhd) {
3027 if (drhd->ignored || drhd->include_all)
3030 for (i = 0; i < drhd->devices_cnt; i++)
3031 if (drhd->devices[i] &&
3032 !IS_GFX_DEVICE(drhd->devices[i]))
3035 if (i < drhd->devices_cnt)
3038 /* bypass IOMMU if it is just for gfx devices */
3040 for (i = 0; i < drhd->devices_cnt; i++) {
3041 if (!drhd->devices[i])
3043 drhd->devices[i]->dev.archdata.iommu = DUMMY_DEVICE_DOMAIN_INFO;
3048 #ifdef CONFIG_SUSPEND
3049 static int init_iommu_hw(void)
3051 struct dmar_drhd_unit *drhd;
3052 struct intel_iommu *iommu = NULL;
3054 for_each_active_iommu(iommu, drhd)
3056 dmar_reenable_qi(iommu);
3058 for_each_active_iommu(iommu, drhd) {
3059 iommu_flush_write_buffer(iommu);
3061 iommu_set_root_entry(iommu);
3063 iommu->flush.flush_context(iommu, 0, 0, 0,
3064 DMA_CCMD_GLOBAL_INVL);
3065 iommu->flush.flush_iotlb(iommu, 0, 0, 0,
3066 DMA_TLB_GLOBAL_FLUSH);
3067 iommu_disable_protect_mem_regions(iommu);
3068 iommu_enable_translation(iommu);
3074 static void iommu_flush_all(void)
3076 struct dmar_drhd_unit *drhd;
3077 struct intel_iommu *iommu;
3079 for_each_active_iommu(iommu, drhd) {
3080 iommu->flush.flush_context(iommu, 0, 0, 0,
3081 DMA_CCMD_GLOBAL_INVL);
3082 iommu->flush.flush_iotlb(iommu, 0, 0, 0,
3083 DMA_TLB_GLOBAL_FLUSH);
3087 static int iommu_suspend(struct sys_device *dev, pm_message_t state)
3089 struct dmar_drhd_unit *drhd;
3090 struct intel_iommu *iommu = NULL;
3093 for_each_active_iommu(iommu, drhd) {
3094 iommu->iommu_state = kzalloc(sizeof(u32) * MAX_SR_DMAR_REGS,
3096 if (!iommu->iommu_state)
3102 for_each_active_iommu(iommu, drhd) {
3103 iommu_disable_translation(iommu);
3105 spin_lock_irqsave(&iommu->register_lock, flag);
3107 iommu->iommu_state[SR_DMAR_FECTL_REG] =
3108 readl(iommu->reg + DMAR_FECTL_REG);
3109 iommu->iommu_state[SR_DMAR_FEDATA_REG] =
3110 readl(iommu->reg + DMAR_FEDATA_REG);
3111 iommu->iommu_state[SR_DMAR_FEADDR_REG] =
3112 readl(iommu->reg + DMAR_FEADDR_REG);
3113 iommu->iommu_state[SR_DMAR_FEUADDR_REG] =
3114 readl(iommu->reg + DMAR_FEUADDR_REG);
3116 spin_unlock_irqrestore(&iommu->register_lock, flag);
3121 for_each_active_iommu(iommu, drhd)
3122 kfree(iommu->iommu_state);
3127 static int iommu_resume(struct sys_device *dev)
3129 struct dmar_drhd_unit *drhd;
3130 struct intel_iommu *iommu = NULL;
3133 if (init_iommu_hw()) {
3134 WARN(1, "IOMMU setup failed, DMAR can not resume!\n");
3138 for_each_active_iommu(iommu, drhd) {
3140 spin_lock_irqsave(&iommu->register_lock, flag);
3142 writel(iommu->iommu_state[SR_DMAR_FECTL_REG],
3143 iommu->reg + DMAR_FECTL_REG);
3144 writel(iommu->iommu_state[SR_DMAR_FEDATA_REG],
3145 iommu->reg + DMAR_FEDATA_REG);
3146 writel(iommu->iommu_state[SR_DMAR_FEADDR_REG],
3147 iommu->reg + DMAR_FEADDR_REG);
3148 writel(iommu->iommu_state[SR_DMAR_FEUADDR_REG],
3149 iommu->reg + DMAR_FEUADDR_REG);
3151 spin_unlock_irqrestore(&iommu->register_lock, flag);
3154 for_each_active_iommu(iommu, drhd)
3155 kfree(iommu->iommu_state);
3160 static struct sysdev_class iommu_sysclass = {
3162 .resume = iommu_resume,
3163 .suspend = iommu_suspend,
3166 static struct sys_device device_iommu = {
3167 .cls = &iommu_sysclass,
3170 static int __init init_iommu_sysfs(void)
3174 error = sysdev_class_register(&iommu_sysclass);
3178 error = sysdev_register(&device_iommu);
3180 sysdev_class_unregister(&iommu_sysclass);
3186 static int __init init_iommu_sysfs(void)
3190 #endif /* CONFIG_PM */
3192 int __init intel_iommu_init(void)
3196 if (dmar_table_init())
3199 if (dmar_dev_scope_init())
3203 * Check the need for DMA-remapping initialization now.
3204 * Above initialization will also be used by Interrupt-remapping.
3206 if (no_iommu || swiotlb || dmar_disabled)
3209 iommu_init_mempool();
3210 dmar_init_reserved_ranges();
3212 init_no_remapping_devices();
3216 printk(KERN_ERR "IOMMU: dmar init failed\n");
3217 put_iova_domain(&reserved_iova_list);
3218 iommu_exit_mempool();
3222 "PCI-DMA: Intel(R) Virtualization Technology for Directed I/O\n");
3224 init_timer(&unmap_timer);
3226 dma_ops = &intel_dma_ops;
3230 register_iommu(&intel_iommu_ops);
3235 static void iommu_detach_dependent_devices(struct intel_iommu *iommu,
3236 struct pci_dev *pdev)
3238 struct pci_dev *tmp, *parent;
3240 if (!iommu || !pdev)
3243 /* dependent device detach */
3244 tmp = pci_find_upstream_pcie_bridge(pdev);
3245 /* Secondary interface's bus number and devfn 0 */
3247 parent = pdev->bus->self;
3248 while (parent != tmp) {
3249 iommu_detach_dev(iommu, parent->bus->number,
3251 parent = parent->bus->self;
3253 if (tmp->is_pcie) /* this is a PCIE-to-PCI bridge */
3254 iommu_detach_dev(iommu,
3255 tmp->subordinate->number, 0);
3256 else /* this is a legacy PCI bridge */
3257 iommu_detach_dev(iommu, tmp->bus->number,
3262 static void domain_remove_one_dev_info(struct dmar_domain *domain,
3263 struct pci_dev *pdev)
3265 struct device_domain_info *info;
3266 struct intel_iommu *iommu;
3267 unsigned long flags;
3269 struct list_head *entry, *tmp;
3271 iommu = device_to_iommu(pci_domain_nr(pdev->bus), pdev->bus->number,
3276 spin_lock_irqsave(&device_domain_lock, flags);
3277 list_for_each_safe(entry, tmp, &domain->devices) {
3278 info = list_entry(entry, struct device_domain_info, link);
3279 /* No need to compare PCI domain; it has to be the same */
3280 if (info->bus == pdev->bus->number &&
3281 info->devfn == pdev->devfn) {
3282 list_del(&info->link);
3283 list_del(&info->global);
3285 info->dev->dev.archdata.iommu = NULL;
3286 spin_unlock_irqrestore(&device_domain_lock, flags);
3288 iommu_disable_dev_iotlb(info);
3289 iommu_detach_dev(iommu, info->bus, info->devfn);
3290 iommu_detach_dependent_devices(iommu, pdev);
3291 free_devinfo_mem(info);
3293 spin_lock_irqsave(&device_domain_lock, flags);
3301 /* if there is no other devices under the same iommu
3302 * owned by this domain, clear this iommu in iommu_bmp
3303 * update iommu count and coherency
3305 if (iommu == device_to_iommu(info->segment, info->bus,
3311 unsigned long tmp_flags;
3312 spin_lock_irqsave(&domain->iommu_lock, tmp_flags);
3313 clear_bit(iommu->seq_id, &domain->iommu_bmp);
3314 domain->iommu_count--;
3315 domain_update_iommu_cap(domain);
3316 spin_unlock_irqrestore(&domain->iommu_lock, tmp_flags);
3319 spin_unlock_irqrestore(&device_domain_lock, flags);
3322 static void vm_domain_remove_all_dev_info(struct dmar_domain *domain)
3324 struct device_domain_info *info;
3325 struct intel_iommu *iommu;
3326 unsigned long flags1, flags2;
3328 spin_lock_irqsave(&device_domain_lock, flags1);
3329 while (!list_empty(&domain->devices)) {
3330 info = list_entry(domain->devices.next,
3331 struct device_domain_info, link);
3332 list_del(&info->link);
3333 list_del(&info->global);
3335 info->dev->dev.archdata.iommu = NULL;
3337 spin_unlock_irqrestore(&device_domain_lock, flags1);
3339 iommu_disable_dev_iotlb(info);
3340 iommu = device_to_iommu(info->segment, info->bus, info->devfn);
3341 iommu_detach_dev(iommu, info->bus, info->devfn);
3342 iommu_detach_dependent_devices(iommu, info->dev);
3344 /* clear this iommu in iommu_bmp, update iommu count
3347 spin_lock_irqsave(&domain->iommu_lock, flags2);
3348 if (test_and_clear_bit(iommu->seq_id,
3349 &domain->iommu_bmp)) {
3350 domain->iommu_count--;
3351 domain_update_iommu_cap(domain);
3353 spin_unlock_irqrestore(&domain->iommu_lock, flags2);
3355 free_devinfo_mem(info);
3356 spin_lock_irqsave(&device_domain_lock, flags1);
3358 spin_unlock_irqrestore(&device_domain_lock, flags1);
3361 /* domain id for virtual machine, it won't be set in context */
3362 static unsigned long vm_domid;
3364 static int vm_domain_min_agaw(struct dmar_domain *domain)
3367 int min_agaw = domain->agaw;
3369 i = find_first_bit(&domain->iommu_bmp, g_num_of_iommus);
3370 for (; i < g_num_of_iommus; ) {
3371 if (min_agaw > g_iommus[i]->agaw)
3372 min_agaw = g_iommus[i]->agaw;
3374 i = find_next_bit(&domain->iommu_bmp, g_num_of_iommus, i+1);
3380 static struct dmar_domain *iommu_alloc_vm_domain(void)
3382 struct dmar_domain *domain;
3384 domain = alloc_domain_mem();
3388 domain->id = vm_domid++;
3389 memset(&domain->iommu_bmp, 0, sizeof(unsigned long));
3390 domain->flags = DOMAIN_FLAG_VIRTUAL_MACHINE;
3395 static int md_domain_init(struct dmar_domain *domain, int guest_width)
3399 init_iova_domain(&domain->iovad, DMA_32BIT_PFN);
3400 spin_lock_init(&domain->iommu_lock);
3402 domain_reserve_special_ranges(domain);
3404 /* calculate AGAW */
3405 domain->gaw = guest_width;
3406 adjust_width = guestwidth_to_adjustwidth(guest_width);
3407 domain->agaw = width_to_agaw(adjust_width);
3409 INIT_LIST_HEAD(&domain->devices);
3411 domain->iommu_count = 0;
3412 domain->iommu_coherency = 0;
3413 domain->iommu_snooping = 0;
3414 domain->max_addr = 0;
3416 /* always allocate the top pgd */
3417 domain->pgd = (struct dma_pte *)alloc_pgtable_page();
3420 domain_flush_cache(domain, domain->pgd, PAGE_SIZE);
3424 static void iommu_free_vm_domain(struct dmar_domain *domain)
3426 unsigned long flags;
3427 struct dmar_drhd_unit *drhd;
3428 struct intel_iommu *iommu;
3430 unsigned long ndomains;
3432 for_each_drhd_unit(drhd) {
3435 iommu = drhd->iommu;
3437 ndomains = cap_ndoms(iommu->cap);
3438 i = find_first_bit(iommu->domain_ids, ndomains);
3439 for (; i < ndomains; ) {
3440 if (iommu->domains[i] == domain) {
3441 spin_lock_irqsave(&iommu->lock, flags);
3442 clear_bit(i, iommu->domain_ids);
3443 iommu->domains[i] = NULL;
3444 spin_unlock_irqrestore(&iommu->lock, flags);
3447 i = find_next_bit(iommu->domain_ids, ndomains, i+1);
3452 static void vm_domain_exit(struct dmar_domain *domain)
3454 /* Domain 0 is reserved, so dont process it */
3458 vm_domain_remove_all_dev_info(domain);
3460 put_iova_domain(&domain->iovad);
3463 dma_pte_clear_range(domain, 0, DOMAIN_MAX_PFN(domain->gaw));
3465 /* free page tables */
3466 dma_pte_free_pagetable(domain, 0, DOMAIN_MAX_PFN(domain->gaw));
3468 iommu_free_vm_domain(domain);
3469 free_domain_mem(domain);
3472 static int intel_iommu_domain_init(struct iommu_domain *domain)
3474 struct dmar_domain *dmar_domain;
3476 dmar_domain = iommu_alloc_vm_domain();
3479 "intel_iommu_domain_init: dmar_domain == NULL\n");
3482 if (md_domain_init(dmar_domain, DEFAULT_DOMAIN_ADDRESS_WIDTH)) {
3484 "intel_iommu_domain_init() failed\n");
3485 vm_domain_exit(dmar_domain);
3488 domain->priv = dmar_domain;
3493 static void intel_iommu_domain_destroy(struct iommu_domain *domain)
3495 struct dmar_domain *dmar_domain = domain->priv;
3497 domain->priv = NULL;
3498 vm_domain_exit(dmar_domain);
3501 static int intel_iommu_attach_device(struct iommu_domain *domain,
3504 struct dmar_domain *dmar_domain = domain->priv;
3505 struct pci_dev *pdev = to_pci_dev(dev);
3506 struct intel_iommu *iommu;
3510 /* normally pdev is not mapped */
3511 if (unlikely(domain_context_mapped(pdev))) {
3512 struct dmar_domain *old_domain;
3514 old_domain = find_domain(pdev);
3516 if (dmar_domain->flags & DOMAIN_FLAG_VIRTUAL_MACHINE ||
3517 dmar_domain->flags & DOMAIN_FLAG_STATIC_IDENTITY)
3518 domain_remove_one_dev_info(old_domain, pdev);
3520 domain_remove_dev_info(old_domain);
3524 iommu = device_to_iommu(pci_domain_nr(pdev->bus), pdev->bus->number,
3529 /* check if this iommu agaw is sufficient for max mapped address */
3530 addr_width = agaw_to_width(iommu->agaw);
3531 end = DOMAIN_MAX_ADDR(addr_width);
3532 end = end & VTD_PAGE_MASK;
3533 if (end < dmar_domain->max_addr) {
3534 printk(KERN_ERR "%s: iommu agaw (%d) is not "
3535 "sufficient for the mapped address (%llx)\n",
3536 __func__, iommu->agaw, dmar_domain->max_addr);
3540 return domain_add_dev_info(dmar_domain, pdev, CONTEXT_TT_MULTI_LEVEL);
3543 static void intel_iommu_detach_device(struct iommu_domain *domain,
3546 struct dmar_domain *dmar_domain = domain->priv;
3547 struct pci_dev *pdev = to_pci_dev(dev);
3549 domain_remove_one_dev_info(dmar_domain, pdev);
3552 static int intel_iommu_map_range(struct iommu_domain *domain,
3553 unsigned long iova, phys_addr_t hpa,
3554 size_t size, int iommu_prot)
3556 struct dmar_domain *dmar_domain = domain->priv;
3562 if (iommu_prot & IOMMU_READ)
3563 prot |= DMA_PTE_READ;
3564 if (iommu_prot & IOMMU_WRITE)
3565 prot |= DMA_PTE_WRITE;
3566 if ((iommu_prot & IOMMU_CACHE) && dmar_domain->iommu_snooping)
3567 prot |= DMA_PTE_SNP;
3569 max_addr = iova + size;
3570 if (dmar_domain->max_addr < max_addr) {
3574 /* check if minimum agaw is sufficient for mapped address */
3575 min_agaw = vm_domain_min_agaw(dmar_domain);
3576 addr_width = agaw_to_width(min_agaw);
3577 end = DOMAIN_MAX_ADDR(addr_width);
3578 end = end & VTD_PAGE_MASK;
3579 if (end < max_addr) {
3580 printk(KERN_ERR "%s: iommu agaw (%d) is not "
3581 "sufficient for the mapped address (%llx)\n",
3582 __func__, min_agaw, max_addr);
3585 dmar_domain->max_addr = max_addr;
3587 /* Round up size to next multiple of PAGE_SIZE, if it and
3588 the low bits of hpa would take us onto the next page */
3589 size = aligned_nrpages(hpa, size);
3590 ret = domain_pfn_mapping(dmar_domain, iova >> VTD_PAGE_SHIFT,
3591 hpa >> VTD_PAGE_SHIFT, size, prot);
3595 static void intel_iommu_unmap_range(struct iommu_domain *domain,
3596 unsigned long iova, size_t size)
3598 struct dmar_domain *dmar_domain = domain->priv;
3603 dma_pte_clear_range(dmar_domain, iova >> VTD_PAGE_SHIFT,
3604 (iova + size - 1) >> VTD_PAGE_SHIFT);
3606 if (dmar_domain->max_addr == iova + size)
3607 dmar_domain->max_addr = iova;
3610 static phys_addr_t intel_iommu_iova_to_phys(struct iommu_domain *domain,
3613 struct dmar_domain *dmar_domain = domain->priv;
3614 struct dma_pte *pte;
3617 pte = pfn_to_dma_pte(dmar_domain, iova >> VTD_PAGE_SHIFT);
3619 phys = dma_pte_addr(pte);
3624 static int intel_iommu_domain_has_cap(struct iommu_domain *domain,
3627 struct dmar_domain *dmar_domain = domain->priv;
3629 if (cap == IOMMU_CAP_CACHE_COHERENCY)
3630 return dmar_domain->iommu_snooping;
3635 static struct iommu_ops intel_iommu_ops = {
3636 .domain_init = intel_iommu_domain_init,
3637 .domain_destroy = intel_iommu_domain_destroy,
3638 .attach_dev = intel_iommu_attach_device,
3639 .detach_dev = intel_iommu_detach_device,
3640 .map = intel_iommu_map_range,
3641 .unmap = intel_iommu_unmap_range,
3642 .iova_to_phys = intel_iommu_iova_to_phys,
3643 .domain_has_cap = intel_iommu_domain_has_cap,
3646 static void __devinit quirk_iommu_rwbf(struct pci_dev *dev)
3649 * Mobile 4 Series Chipset neglects to set RWBF capability,
3652 printk(KERN_INFO "DMAR: Forcing write-buffer flush capability\n");
3656 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2a40, quirk_iommu_rwbf);