Merge remote-tracking branch 'lsk/v3.10/topic/gator' into linux-linaro-lsk
[firefly-linux-kernel-4.4.55.git] / drivers / iommu / intel-iommu.c
1 /*
2  * Copyright (c) 2006, Intel Corporation.
3  *
4  * This program is free software; you can redistribute it and/or modify it
5  * under the terms and conditions of the GNU General Public License,
6  * version 2, as published by the Free Software Foundation.
7  *
8  * This program is distributed in the hope it will be useful, but WITHOUT
9  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
10  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
11  * more details.
12  *
13  * You should have received a copy of the GNU General Public License along with
14  * this program; if not, write to the Free Software Foundation, Inc., 59 Temple
15  * Place - Suite 330, Boston, MA 02111-1307 USA.
16  *
17  * Copyright (C) 2006-2008 Intel Corporation
18  * Author: Ashok Raj <ashok.raj@intel.com>
19  * Author: Shaohua Li <shaohua.li@intel.com>
20  * Author: Anil S Keshavamurthy <anil.s.keshavamurthy@intel.com>
21  * Author: Fenghua Yu <fenghua.yu@intel.com>
22  */
23
24 #include <linux/init.h>
25 #include <linux/bitmap.h>
26 #include <linux/debugfs.h>
27 #include <linux/export.h>
28 #include <linux/slab.h>
29 #include <linux/irq.h>
30 #include <linux/interrupt.h>
31 #include <linux/spinlock.h>
32 #include <linux/pci.h>
33 #include <linux/dmar.h>
34 #include <linux/dma-mapping.h>
35 #include <linux/mempool.h>
36 #include <linux/timer.h>
37 #include <linux/iova.h>
38 #include <linux/iommu.h>
39 #include <linux/intel-iommu.h>
40 #include <linux/syscore_ops.h>
41 #include <linux/tboot.h>
42 #include <linux/dmi.h>
43 #include <linux/pci-ats.h>
44 #include <linux/memblock.h>
45 #include <asm/irq_remapping.h>
46 #include <asm/cacheflush.h>
47 #include <asm/iommu.h>
48
49 #include "irq_remapping.h"
50 #include "pci.h"
51
52 #define ROOT_SIZE               VTD_PAGE_SIZE
53 #define CONTEXT_SIZE            VTD_PAGE_SIZE
54
55 #define IS_GFX_DEVICE(pdev) ((pdev->class >> 16) == PCI_BASE_CLASS_DISPLAY)
56 #define IS_ISA_DEVICE(pdev) ((pdev->class >> 8) == PCI_CLASS_BRIDGE_ISA)
57 #define IS_AZALIA(pdev) ((pdev)->vendor == 0x8086 && (pdev)->device == 0x3a3e)
58
59 #define IOAPIC_RANGE_START      (0xfee00000)
60 #define IOAPIC_RANGE_END        (0xfeefffff)
61 #define IOVA_START_ADDR         (0x1000)
62
63 #define DEFAULT_DOMAIN_ADDRESS_WIDTH 48
64
65 #define MAX_AGAW_WIDTH 64
66
67 #define __DOMAIN_MAX_PFN(gaw)  ((((uint64_t)1) << (gaw-VTD_PAGE_SHIFT)) - 1)
68 #define __DOMAIN_MAX_ADDR(gaw) ((((uint64_t)1) << gaw) - 1)
69
70 /* We limit DOMAIN_MAX_PFN to fit in an unsigned long, and DOMAIN_MAX_ADDR
71    to match. That way, we can use 'unsigned long' for PFNs with impunity. */
72 #define DOMAIN_MAX_PFN(gaw)     ((unsigned long) min_t(uint64_t, \
73                                 __DOMAIN_MAX_PFN(gaw), (unsigned long)-1))
74 #define DOMAIN_MAX_ADDR(gaw)    (((uint64_t)__DOMAIN_MAX_PFN(gaw)) << VTD_PAGE_SHIFT)
75
76 #define IOVA_PFN(addr)          ((addr) >> PAGE_SHIFT)
77 #define DMA_32BIT_PFN           IOVA_PFN(DMA_BIT_MASK(32))
78 #define DMA_64BIT_PFN           IOVA_PFN(DMA_BIT_MASK(64))
79
80 /* page table handling */
81 #define LEVEL_STRIDE            (9)
82 #define LEVEL_MASK              (((u64)1 << LEVEL_STRIDE) - 1)
83
84 /*
85  * This bitmap is used to advertise the page sizes our hardware support
86  * to the IOMMU core, which will then use this information to split
87  * physically contiguous memory regions it is mapping into page sizes
88  * that we support.
89  *
90  * Traditionally the IOMMU core just handed us the mappings directly,
91  * after making sure the size is an order of a 4KiB page and that the
92  * mapping has natural alignment.
93  *
94  * To retain this behavior, we currently advertise that we support
95  * all page sizes that are an order of 4KiB.
96  *
97  * If at some point we'd like to utilize the IOMMU core's new behavior,
98  * we could change this to advertise the real page sizes we support.
99  */
100 #define INTEL_IOMMU_PGSIZES     (~0xFFFUL)
101
102 static inline int agaw_to_level(int agaw)
103 {
104         return agaw + 2;
105 }
106
107 static inline int agaw_to_width(int agaw)
108 {
109         return 30 + agaw * LEVEL_STRIDE;
110 }
111
112 static inline int width_to_agaw(int width)
113 {
114         return (width - 30) / LEVEL_STRIDE;
115 }
116
117 static inline unsigned int level_to_offset_bits(int level)
118 {
119         return (level - 1) * LEVEL_STRIDE;
120 }
121
122 static inline int pfn_level_offset(unsigned long pfn, int level)
123 {
124         return (pfn >> level_to_offset_bits(level)) & LEVEL_MASK;
125 }
126
127 static inline unsigned long level_mask(int level)
128 {
129         return -1UL << level_to_offset_bits(level);
130 }
131
132 static inline unsigned long level_size(int level)
133 {
134         return 1UL << level_to_offset_bits(level);
135 }
136
137 static inline unsigned long align_to_level(unsigned long pfn, int level)
138 {
139         return (pfn + level_size(level) - 1) & level_mask(level);
140 }
141
142 static inline unsigned long lvl_to_nr_pages(unsigned int lvl)
143 {
144         return  1 << ((lvl - 1) * LEVEL_STRIDE);
145 }
146
147 /* VT-d pages must always be _smaller_ than MM pages. Otherwise things
148    are never going to work. */
149 static inline unsigned long dma_to_mm_pfn(unsigned long dma_pfn)
150 {
151         return dma_pfn >> (PAGE_SHIFT - VTD_PAGE_SHIFT);
152 }
153
154 static inline unsigned long mm_to_dma_pfn(unsigned long mm_pfn)
155 {
156         return mm_pfn << (PAGE_SHIFT - VTD_PAGE_SHIFT);
157 }
158 static inline unsigned long page_to_dma_pfn(struct page *pg)
159 {
160         return mm_to_dma_pfn(page_to_pfn(pg));
161 }
162 static inline unsigned long virt_to_dma_pfn(void *p)
163 {
164         return page_to_dma_pfn(virt_to_page(p));
165 }
166
167 /* global iommu list, set NULL for ignored DMAR units */
168 static struct intel_iommu **g_iommus;
169
170 static void __init check_tylersburg_isoch(void);
171 static int rwbf_quirk;
172
173 /*
174  * set to 1 to panic kernel if can't successfully enable VT-d
175  * (used when kernel is launched w/ TXT)
176  */
177 static int force_on = 0;
178
179 /*
180  * 0: Present
181  * 1-11: Reserved
182  * 12-63: Context Ptr (12 - (haw-1))
183  * 64-127: Reserved
184  */
185 struct root_entry {
186         u64     val;
187         u64     rsvd1;
188 };
189 #define ROOT_ENTRY_NR (VTD_PAGE_SIZE/sizeof(struct root_entry))
190 static inline bool root_present(struct root_entry *root)
191 {
192         return (root->val & 1);
193 }
194 static inline void set_root_present(struct root_entry *root)
195 {
196         root->val |= 1;
197 }
198 static inline void set_root_value(struct root_entry *root, unsigned long value)
199 {
200         root->val |= value & VTD_PAGE_MASK;
201 }
202
203 static inline struct context_entry *
204 get_context_addr_from_root(struct root_entry *root)
205 {
206         return (struct context_entry *)
207                 (root_present(root)?phys_to_virt(
208                 root->val & VTD_PAGE_MASK) :
209                 NULL);
210 }
211
212 /*
213  * low 64 bits:
214  * 0: present
215  * 1: fault processing disable
216  * 2-3: translation type
217  * 12-63: address space root
218  * high 64 bits:
219  * 0-2: address width
220  * 3-6: aval
221  * 8-23: domain id
222  */
223 struct context_entry {
224         u64 lo;
225         u64 hi;
226 };
227
228 static inline bool context_present(struct context_entry *context)
229 {
230         return (context->lo & 1);
231 }
232 static inline void context_set_present(struct context_entry *context)
233 {
234         context->lo |= 1;
235 }
236
237 static inline void context_set_fault_enable(struct context_entry *context)
238 {
239         context->lo &= (((u64)-1) << 2) | 1;
240 }
241
242 static inline void context_set_translation_type(struct context_entry *context,
243                                                 unsigned long value)
244 {
245         context->lo &= (((u64)-1) << 4) | 3;
246         context->lo |= (value & 3) << 2;
247 }
248
249 static inline void context_set_address_root(struct context_entry *context,
250                                             unsigned long value)
251 {
252         context->lo |= value & VTD_PAGE_MASK;
253 }
254
255 static inline void context_set_address_width(struct context_entry *context,
256                                              unsigned long value)
257 {
258         context->hi |= value & 7;
259 }
260
261 static inline void context_set_domain_id(struct context_entry *context,
262                                          unsigned long value)
263 {
264         context->hi |= (value & ((1 << 16) - 1)) << 8;
265 }
266
267 static inline void context_clear_entry(struct context_entry *context)
268 {
269         context->lo = 0;
270         context->hi = 0;
271 }
272
273 /*
274  * 0: readable
275  * 1: writable
276  * 2-6: reserved
277  * 7: super page
278  * 8-10: available
279  * 11: snoop behavior
280  * 12-63: Host physcial address
281  */
282 struct dma_pte {
283         u64 val;
284 };
285
286 static inline void dma_clear_pte(struct dma_pte *pte)
287 {
288         pte->val = 0;
289 }
290
291 static inline void dma_set_pte_readable(struct dma_pte *pte)
292 {
293         pte->val |= DMA_PTE_READ;
294 }
295
296 static inline void dma_set_pte_writable(struct dma_pte *pte)
297 {
298         pte->val |= DMA_PTE_WRITE;
299 }
300
301 static inline void dma_set_pte_snp(struct dma_pte *pte)
302 {
303         pte->val |= DMA_PTE_SNP;
304 }
305
306 static inline void dma_set_pte_prot(struct dma_pte *pte, unsigned long prot)
307 {
308         pte->val = (pte->val & ~3) | (prot & 3);
309 }
310
311 static inline u64 dma_pte_addr(struct dma_pte *pte)
312 {
313 #ifdef CONFIG_64BIT
314         return pte->val & VTD_PAGE_MASK;
315 #else
316         /* Must have a full atomic 64-bit read */
317         return  __cmpxchg64(&pte->val, 0ULL, 0ULL) & VTD_PAGE_MASK;
318 #endif
319 }
320
321 static inline void dma_set_pte_pfn(struct dma_pte *pte, unsigned long pfn)
322 {
323         pte->val |= (uint64_t)pfn << VTD_PAGE_SHIFT;
324 }
325
326 static inline bool dma_pte_present(struct dma_pte *pte)
327 {
328         return (pte->val & 3) != 0;
329 }
330
331 static inline bool dma_pte_superpage(struct dma_pte *pte)
332 {
333         return (pte->val & (1 << 7));
334 }
335
336 static inline int first_pte_in_page(struct dma_pte *pte)
337 {
338         return !((unsigned long)pte & ~VTD_PAGE_MASK);
339 }
340
341 /*
342  * This domain is a statically identity mapping domain.
343  *      1. This domain creats a static 1:1 mapping to all usable memory.
344  *      2. It maps to each iommu if successful.
345  *      3. Each iommu mapps to this domain if successful.
346  */
347 static struct dmar_domain *si_domain;
348 static int hw_pass_through = 1;
349
350 /* devices under the same p2p bridge are owned in one domain */
351 #define DOMAIN_FLAG_P2P_MULTIPLE_DEVICES (1 << 0)
352
353 /* domain represents a virtual machine, more than one devices
354  * across iommus may be owned in one domain, e.g. kvm guest.
355  */
356 #define DOMAIN_FLAG_VIRTUAL_MACHINE     (1 << 1)
357
358 /* si_domain contains mulitple devices */
359 #define DOMAIN_FLAG_STATIC_IDENTITY     (1 << 2)
360
361 /* define the limit of IOMMUs supported in each domain */
362 #ifdef  CONFIG_X86
363 # define        IOMMU_UNITS_SUPPORTED   MAX_IO_APICS
364 #else
365 # define        IOMMU_UNITS_SUPPORTED   64
366 #endif
367
368 struct dmar_domain {
369         int     id;                     /* domain id */
370         int     nid;                    /* node id */
371         DECLARE_BITMAP(iommu_bmp, IOMMU_UNITS_SUPPORTED);
372                                         /* bitmap of iommus this domain uses*/
373
374         struct list_head devices;       /* all devices' list */
375         struct iova_domain iovad;       /* iova's that belong to this domain */
376
377         struct dma_pte  *pgd;           /* virtual address */
378         int             gaw;            /* max guest address width */
379
380         /* adjusted guest address width, 0 is level 2 30-bit */
381         int             agaw;
382
383         int             flags;          /* flags to find out type of domain */
384
385         int             iommu_coherency;/* indicate coherency of iommu access */
386         int             iommu_snooping; /* indicate snooping control feature*/
387         int             iommu_count;    /* reference count of iommu */
388         int             iommu_superpage;/* Level of superpages supported:
389                                            0 == 4KiB (no superpages), 1 == 2MiB,
390                                            2 == 1GiB, 3 == 512GiB, 4 == 1TiB */
391         spinlock_t      iommu_lock;     /* protect iommu set in domain */
392         u64             max_addr;       /* maximum mapped address */
393 };
394
395 /* PCI domain-device relationship */
396 struct device_domain_info {
397         struct list_head link;  /* link to domain siblings */
398         struct list_head global; /* link to global list */
399         int segment;            /* PCI domain */
400         u8 bus;                 /* PCI bus number */
401         u8 devfn;               /* PCI devfn number */
402         struct pci_dev *dev; /* it's NULL for PCIe-to-PCI bridge */
403         struct intel_iommu *iommu; /* IOMMU used by this device */
404         struct dmar_domain *domain; /* pointer to domain */
405 };
406
407 static void flush_unmaps_timeout(unsigned long data);
408
409 DEFINE_TIMER(unmap_timer,  flush_unmaps_timeout, 0, 0);
410
411 #define HIGH_WATER_MARK 250
412 struct deferred_flush_tables {
413         int next;
414         struct iova *iova[HIGH_WATER_MARK];
415         struct dmar_domain *domain[HIGH_WATER_MARK];
416 };
417
418 static struct deferred_flush_tables *deferred_flush;
419
420 /* bitmap for indexing intel_iommus */
421 static int g_num_of_iommus;
422
423 static DEFINE_SPINLOCK(async_umap_flush_lock);
424 static LIST_HEAD(unmaps_to_do);
425
426 static int timer_on;
427 static long list_size;
428
429 static void domain_remove_dev_info(struct dmar_domain *domain);
430
431 #ifdef CONFIG_INTEL_IOMMU_DEFAULT_ON
432 int dmar_disabled = 0;
433 #else
434 int dmar_disabled = 1;
435 #endif /*CONFIG_INTEL_IOMMU_DEFAULT_ON*/
436
437 int intel_iommu_enabled = 0;
438 EXPORT_SYMBOL_GPL(intel_iommu_enabled);
439
440 static int dmar_map_gfx = 1;
441 static int dmar_forcedac;
442 static int intel_iommu_strict;
443 static int intel_iommu_superpage = 1;
444
445 int intel_iommu_gfx_mapped;
446 EXPORT_SYMBOL_GPL(intel_iommu_gfx_mapped);
447
448 #define DUMMY_DEVICE_DOMAIN_INFO ((struct device_domain_info *)(-1))
449 static DEFINE_SPINLOCK(device_domain_lock);
450 static LIST_HEAD(device_domain_list);
451
452 static struct iommu_ops intel_iommu_ops;
453
454 static int __init intel_iommu_setup(char *str)
455 {
456         if (!str)
457                 return -EINVAL;
458         while (*str) {
459                 if (!strncmp(str, "on", 2)) {
460                         dmar_disabled = 0;
461                         printk(KERN_INFO "Intel-IOMMU: enabled\n");
462                 } else if (!strncmp(str, "off", 3)) {
463                         dmar_disabled = 1;
464                         printk(KERN_INFO "Intel-IOMMU: disabled\n");
465                 } else if (!strncmp(str, "igfx_off", 8)) {
466                         dmar_map_gfx = 0;
467                         printk(KERN_INFO
468                                 "Intel-IOMMU: disable GFX device mapping\n");
469                 } else if (!strncmp(str, "forcedac", 8)) {
470                         printk(KERN_INFO
471                                 "Intel-IOMMU: Forcing DAC for PCI devices\n");
472                         dmar_forcedac = 1;
473                 } else if (!strncmp(str, "strict", 6)) {
474                         printk(KERN_INFO
475                                 "Intel-IOMMU: disable batched IOTLB flush\n");
476                         intel_iommu_strict = 1;
477                 } else if (!strncmp(str, "sp_off", 6)) {
478                         printk(KERN_INFO
479                                 "Intel-IOMMU: disable supported super page\n");
480                         intel_iommu_superpage = 0;
481                 }
482
483                 str += strcspn(str, ",");
484                 while (*str == ',')
485                         str++;
486         }
487         return 0;
488 }
489 __setup("intel_iommu=", intel_iommu_setup);
490
491 static struct kmem_cache *iommu_domain_cache;
492 static struct kmem_cache *iommu_devinfo_cache;
493 static struct kmem_cache *iommu_iova_cache;
494
495 static inline void *alloc_pgtable_page(int node)
496 {
497         struct page *page;
498         void *vaddr = NULL;
499
500         page = alloc_pages_node(node, GFP_ATOMIC | __GFP_ZERO, 0);
501         if (page)
502                 vaddr = page_address(page);
503         return vaddr;
504 }
505
506 static inline void free_pgtable_page(void *vaddr)
507 {
508         free_page((unsigned long)vaddr);
509 }
510
511 static inline void *alloc_domain_mem(void)
512 {
513         return kmem_cache_alloc(iommu_domain_cache, GFP_ATOMIC);
514 }
515
516 static void free_domain_mem(void *vaddr)
517 {
518         kmem_cache_free(iommu_domain_cache, vaddr);
519 }
520
521 static inline void * alloc_devinfo_mem(void)
522 {
523         return kmem_cache_alloc(iommu_devinfo_cache, GFP_ATOMIC);
524 }
525
526 static inline void free_devinfo_mem(void *vaddr)
527 {
528         kmem_cache_free(iommu_devinfo_cache, vaddr);
529 }
530
531 struct iova *alloc_iova_mem(void)
532 {
533         return kmem_cache_alloc(iommu_iova_cache, GFP_ATOMIC);
534 }
535
536 void free_iova_mem(struct iova *iova)
537 {
538         kmem_cache_free(iommu_iova_cache, iova);
539 }
540
541
542 static int __iommu_calculate_agaw(struct intel_iommu *iommu, int max_gaw)
543 {
544         unsigned long sagaw;
545         int agaw = -1;
546
547         sagaw = cap_sagaw(iommu->cap);
548         for (agaw = width_to_agaw(max_gaw);
549              agaw >= 0; agaw--) {
550                 if (test_bit(agaw, &sagaw))
551                         break;
552         }
553
554         return agaw;
555 }
556
557 /*
558  * Calculate max SAGAW for each iommu.
559  */
560 int iommu_calculate_max_sagaw(struct intel_iommu *iommu)
561 {
562         return __iommu_calculate_agaw(iommu, MAX_AGAW_WIDTH);
563 }
564
565 /*
566  * calculate agaw for each iommu.
567  * "SAGAW" may be different across iommus, use a default agaw, and
568  * get a supported less agaw for iommus that don't support the default agaw.
569  */
570 int iommu_calculate_agaw(struct intel_iommu *iommu)
571 {
572         return __iommu_calculate_agaw(iommu, DEFAULT_DOMAIN_ADDRESS_WIDTH);
573 }
574
575 /* This functionin only returns single iommu in a domain */
576 static struct intel_iommu *domain_get_iommu(struct dmar_domain *domain)
577 {
578         int iommu_id;
579
580         /* si_domain and vm domain should not get here. */
581         BUG_ON(domain->flags & DOMAIN_FLAG_VIRTUAL_MACHINE);
582         BUG_ON(domain->flags & DOMAIN_FLAG_STATIC_IDENTITY);
583
584         iommu_id = find_first_bit(domain->iommu_bmp, g_num_of_iommus);
585         if (iommu_id < 0 || iommu_id >= g_num_of_iommus)
586                 return NULL;
587
588         return g_iommus[iommu_id];
589 }
590
591 static void domain_update_iommu_coherency(struct dmar_domain *domain)
592 {
593         int i;
594
595         i = find_first_bit(domain->iommu_bmp, g_num_of_iommus);
596
597         domain->iommu_coherency = i < g_num_of_iommus ? 1 : 0;
598
599         for_each_set_bit(i, domain->iommu_bmp, g_num_of_iommus) {
600                 if (!ecap_coherent(g_iommus[i]->ecap)) {
601                         domain->iommu_coherency = 0;
602                         break;
603                 }
604         }
605 }
606
607 static void domain_update_iommu_snooping(struct dmar_domain *domain)
608 {
609         int i;
610
611         domain->iommu_snooping = 1;
612
613         for_each_set_bit(i, domain->iommu_bmp, g_num_of_iommus) {
614                 if (!ecap_sc_support(g_iommus[i]->ecap)) {
615                         domain->iommu_snooping = 0;
616                         break;
617                 }
618         }
619 }
620
621 static void domain_update_iommu_superpage(struct dmar_domain *domain)
622 {
623         struct dmar_drhd_unit *drhd;
624         struct intel_iommu *iommu = NULL;
625         int mask = 0xf;
626
627         if (!intel_iommu_superpage) {
628                 domain->iommu_superpage = 0;
629                 return;
630         }
631
632         /* set iommu_superpage to the smallest common denominator */
633         for_each_active_iommu(iommu, drhd) {
634                 mask &= cap_super_page_val(iommu->cap);
635                 if (!mask) {
636                         break;
637                 }
638         }
639         domain->iommu_superpage = fls(mask);
640 }
641
642 /* Some capabilities may be different across iommus */
643 static void domain_update_iommu_cap(struct dmar_domain *domain)
644 {
645         domain_update_iommu_coherency(domain);
646         domain_update_iommu_snooping(domain);
647         domain_update_iommu_superpage(domain);
648 }
649
650 static struct intel_iommu *device_to_iommu(int segment, u8 bus, u8 devfn)
651 {
652         struct dmar_drhd_unit *drhd = NULL;
653         int i;
654
655         for_each_drhd_unit(drhd) {
656                 if (drhd->ignored)
657                         continue;
658                 if (segment != drhd->segment)
659                         continue;
660
661                 for (i = 0; i < drhd->devices_cnt; i++) {
662                         if (drhd->devices[i] &&
663                             drhd->devices[i]->bus->number == bus &&
664                             drhd->devices[i]->devfn == devfn)
665                                 return drhd->iommu;
666                         if (drhd->devices[i] &&
667                             drhd->devices[i]->subordinate &&
668                             drhd->devices[i]->subordinate->number <= bus &&
669                             drhd->devices[i]->subordinate->busn_res.end >= bus)
670                                 return drhd->iommu;
671                 }
672
673                 if (drhd->include_all)
674                         return drhd->iommu;
675         }
676
677         return NULL;
678 }
679
680 static void domain_flush_cache(struct dmar_domain *domain,
681                                void *addr, int size)
682 {
683         if (!domain->iommu_coherency)
684                 clflush_cache_range(addr, size);
685 }
686
687 /* Gets context entry for a given bus and devfn */
688 static struct context_entry * device_to_context_entry(struct intel_iommu *iommu,
689                 u8 bus, u8 devfn)
690 {
691         struct root_entry *root;
692         struct context_entry *context;
693         unsigned long phy_addr;
694         unsigned long flags;
695
696         spin_lock_irqsave(&iommu->lock, flags);
697         root = &iommu->root_entry[bus];
698         context = get_context_addr_from_root(root);
699         if (!context) {
700                 context = (struct context_entry *)
701                                 alloc_pgtable_page(iommu->node);
702                 if (!context) {
703                         spin_unlock_irqrestore(&iommu->lock, flags);
704                         return NULL;
705                 }
706                 __iommu_flush_cache(iommu, (void *)context, CONTEXT_SIZE);
707                 phy_addr = virt_to_phys((void *)context);
708                 set_root_value(root, phy_addr);
709                 set_root_present(root);
710                 __iommu_flush_cache(iommu, root, sizeof(*root));
711         }
712         spin_unlock_irqrestore(&iommu->lock, flags);
713         return &context[devfn];
714 }
715
716 static int device_context_mapped(struct intel_iommu *iommu, u8 bus, u8 devfn)
717 {
718         struct root_entry *root;
719         struct context_entry *context;
720         int ret;
721         unsigned long flags;
722
723         spin_lock_irqsave(&iommu->lock, flags);
724         root = &iommu->root_entry[bus];
725         context = get_context_addr_from_root(root);
726         if (!context) {
727                 ret = 0;
728                 goto out;
729         }
730         ret = context_present(&context[devfn]);
731 out:
732         spin_unlock_irqrestore(&iommu->lock, flags);
733         return ret;
734 }
735
736 static void clear_context_table(struct intel_iommu *iommu, u8 bus, u8 devfn)
737 {
738         struct root_entry *root;
739         struct context_entry *context;
740         unsigned long flags;
741
742         spin_lock_irqsave(&iommu->lock, flags);
743         root = &iommu->root_entry[bus];
744         context = get_context_addr_from_root(root);
745         if (context) {
746                 context_clear_entry(&context[devfn]);
747                 __iommu_flush_cache(iommu, &context[devfn], \
748                         sizeof(*context));
749         }
750         spin_unlock_irqrestore(&iommu->lock, flags);
751 }
752
753 static void free_context_table(struct intel_iommu *iommu)
754 {
755         struct root_entry *root;
756         int i;
757         unsigned long flags;
758         struct context_entry *context;
759
760         spin_lock_irqsave(&iommu->lock, flags);
761         if (!iommu->root_entry) {
762                 goto out;
763         }
764         for (i = 0; i < ROOT_ENTRY_NR; i++) {
765                 root = &iommu->root_entry[i];
766                 context = get_context_addr_from_root(root);
767                 if (context)
768                         free_pgtable_page(context);
769         }
770         free_pgtable_page(iommu->root_entry);
771         iommu->root_entry = NULL;
772 out:
773         spin_unlock_irqrestore(&iommu->lock, flags);
774 }
775
776 static struct dma_pte *pfn_to_dma_pte(struct dmar_domain *domain,
777                                       unsigned long pfn, int target_level)
778 {
779         int addr_width = agaw_to_width(domain->agaw) - VTD_PAGE_SHIFT;
780         struct dma_pte *parent, *pte = NULL;
781         int level = agaw_to_level(domain->agaw);
782         int offset;
783
784         BUG_ON(!domain->pgd);
785
786         if (addr_width < BITS_PER_LONG && pfn >> addr_width)
787                 /* Address beyond IOMMU's addressing capabilities. */
788                 return NULL;
789
790         parent = domain->pgd;
791
792         while (level > 0) {
793                 void *tmp_page;
794
795                 offset = pfn_level_offset(pfn, level);
796                 pte = &parent[offset];
797                 if (!target_level && (dma_pte_superpage(pte) || !dma_pte_present(pte)))
798                         break;
799                 if (level == target_level)
800                         break;
801
802                 if (!dma_pte_present(pte)) {
803                         uint64_t pteval;
804
805                         tmp_page = alloc_pgtable_page(domain->nid);
806
807                         if (!tmp_page)
808                                 return NULL;
809
810                         domain_flush_cache(domain, tmp_page, VTD_PAGE_SIZE);
811                         pteval = ((uint64_t)virt_to_dma_pfn(tmp_page) << VTD_PAGE_SHIFT) | DMA_PTE_READ | DMA_PTE_WRITE;
812                         if (cmpxchg64(&pte->val, 0ULL, pteval)) {
813                                 /* Someone else set it while we were thinking; use theirs. */
814                                 free_pgtable_page(tmp_page);
815                         } else {
816                                 dma_pte_addr(pte);
817                                 domain_flush_cache(domain, pte, sizeof(*pte));
818                         }
819                 }
820                 parent = phys_to_virt(dma_pte_addr(pte));
821                 level--;
822         }
823
824         return pte;
825 }
826
827
828 /* return address's pte at specific level */
829 static struct dma_pte *dma_pfn_level_pte(struct dmar_domain *domain,
830                                          unsigned long pfn,
831                                          int level, int *large_page)
832 {
833         struct dma_pte *parent, *pte = NULL;
834         int total = agaw_to_level(domain->agaw);
835         int offset;
836
837         parent = domain->pgd;
838         while (level <= total) {
839                 offset = pfn_level_offset(pfn, total);
840                 pte = &parent[offset];
841                 if (level == total)
842                         return pte;
843
844                 if (!dma_pte_present(pte)) {
845                         *large_page = total;
846                         break;
847                 }
848
849                 if (pte->val & DMA_PTE_LARGE_PAGE) {
850                         *large_page = total;
851                         return pte;
852                 }
853
854                 parent = phys_to_virt(dma_pte_addr(pte));
855                 total--;
856         }
857         return NULL;
858 }
859
860 /* clear last level pte, a tlb flush should be followed */
861 static int dma_pte_clear_range(struct dmar_domain *domain,
862                                 unsigned long start_pfn,
863                                 unsigned long last_pfn)
864 {
865         int addr_width = agaw_to_width(domain->agaw) - VTD_PAGE_SHIFT;
866         unsigned int large_page = 1;
867         struct dma_pte *first_pte, *pte;
868         int order;
869
870         BUG_ON(addr_width < BITS_PER_LONG && start_pfn >> addr_width);
871         BUG_ON(addr_width < BITS_PER_LONG && last_pfn >> addr_width);
872         BUG_ON(start_pfn > last_pfn);
873
874         /* we don't need lock here; nobody else touches the iova range */
875         do {
876                 large_page = 1;
877                 first_pte = pte = dma_pfn_level_pte(domain, start_pfn, 1, &large_page);
878                 if (!pte) {
879                         start_pfn = align_to_level(start_pfn + 1, large_page + 1);
880                         continue;
881                 }
882                 do {
883                         dma_clear_pte(pte);
884                         start_pfn += lvl_to_nr_pages(large_page);
885                         pte++;
886                 } while (start_pfn <= last_pfn && !first_pte_in_page(pte));
887
888                 domain_flush_cache(domain, first_pte,
889                                    (void *)pte - (void *)first_pte);
890
891         } while (start_pfn && start_pfn <= last_pfn);
892
893         order = (large_page - 1) * 9;
894         return order;
895 }
896
897 static void dma_pte_free_level(struct dmar_domain *domain, int level,
898                                struct dma_pte *pte, unsigned long pfn,
899                                unsigned long start_pfn, unsigned long last_pfn)
900 {
901         pfn = max(start_pfn, pfn);
902         pte = &pte[pfn_level_offset(pfn, level)];
903
904         do {
905                 unsigned long level_pfn;
906                 struct dma_pte *level_pte;
907
908                 if (!dma_pte_present(pte) || dma_pte_superpage(pte))
909                         goto next;
910
911                 level_pfn = pfn & level_mask(level - 1);
912                 level_pte = phys_to_virt(dma_pte_addr(pte));
913
914                 if (level > 2)
915                         dma_pte_free_level(domain, level - 1, level_pte,
916                                            level_pfn, start_pfn, last_pfn);
917
918                 /* If range covers entire pagetable, free it */
919                 if (!(start_pfn > level_pfn ||
920                       last_pfn < level_pfn + level_size(level) - 1)) {
921                         dma_clear_pte(pte);
922                         domain_flush_cache(domain, pte, sizeof(*pte));
923                         free_pgtable_page(level_pte);
924                 }
925 next:
926                 pfn += level_size(level);
927         } while (!first_pte_in_page(++pte) && pfn <= last_pfn);
928 }
929
930 /* free page table pages. last level pte should already be cleared */
931 static void dma_pte_free_pagetable(struct dmar_domain *domain,
932                                    unsigned long start_pfn,
933                                    unsigned long last_pfn)
934 {
935         int addr_width = agaw_to_width(domain->agaw) - VTD_PAGE_SHIFT;
936
937         BUG_ON(addr_width < BITS_PER_LONG && start_pfn >> addr_width);
938         BUG_ON(addr_width < BITS_PER_LONG && last_pfn >> addr_width);
939         BUG_ON(start_pfn > last_pfn);
940
941         /* We don't need lock here; nobody else touches the iova range */
942         dma_pte_free_level(domain, agaw_to_level(domain->agaw),
943                            domain->pgd, 0, start_pfn, last_pfn);
944
945         /* free pgd */
946         if (start_pfn == 0 && last_pfn == DOMAIN_MAX_PFN(domain->gaw)) {
947                 free_pgtable_page(domain->pgd);
948                 domain->pgd = NULL;
949         }
950 }
951
952 /* iommu handling */
953 static int iommu_alloc_root_entry(struct intel_iommu *iommu)
954 {
955         struct root_entry *root;
956         unsigned long flags;
957
958         root = (struct root_entry *)alloc_pgtable_page(iommu->node);
959         if (!root)
960                 return -ENOMEM;
961
962         __iommu_flush_cache(iommu, root, ROOT_SIZE);
963
964         spin_lock_irqsave(&iommu->lock, flags);
965         iommu->root_entry = root;
966         spin_unlock_irqrestore(&iommu->lock, flags);
967
968         return 0;
969 }
970
971 static void iommu_set_root_entry(struct intel_iommu *iommu)
972 {
973         void *addr;
974         u32 sts;
975         unsigned long flag;
976
977         addr = iommu->root_entry;
978
979         raw_spin_lock_irqsave(&iommu->register_lock, flag);
980         dmar_writeq(iommu->reg + DMAR_RTADDR_REG, virt_to_phys(addr));
981
982         writel(iommu->gcmd | DMA_GCMD_SRTP, iommu->reg + DMAR_GCMD_REG);
983
984         /* Make sure hardware complete it */
985         IOMMU_WAIT_OP(iommu, DMAR_GSTS_REG,
986                       readl, (sts & DMA_GSTS_RTPS), sts);
987
988         raw_spin_unlock_irqrestore(&iommu->register_lock, flag);
989 }
990
991 static void iommu_flush_write_buffer(struct intel_iommu *iommu)
992 {
993         u32 val;
994         unsigned long flag;
995
996         if (!rwbf_quirk && !cap_rwbf(iommu->cap))
997                 return;
998
999         raw_spin_lock_irqsave(&iommu->register_lock, flag);
1000         writel(iommu->gcmd | DMA_GCMD_WBF, iommu->reg + DMAR_GCMD_REG);
1001
1002         /* Make sure hardware complete it */
1003         IOMMU_WAIT_OP(iommu, DMAR_GSTS_REG,
1004                       readl, (!(val & DMA_GSTS_WBFS)), val);
1005
1006         raw_spin_unlock_irqrestore(&iommu->register_lock, flag);
1007 }
1008
1009 /* return value determine if we need a write buffer flush */
1010 static void __iommu_flush_context(struct intel_iommu *iommu,
1011                                   u16 did, u16 source_id, u8 function_mask,
1012                                   u64 type)
1013 {
1014         u64 val = 0;
1015         unsigned long flag;
1016
1017         switch (type) {
1018         case DMA_CCMD_GLOBAL_INVL:
1019                 val = DMA_CCMD_GLOBAL_INVL;
1020                 break;
1021         case DMA_CCMD_DOMAIN_INVL:
1022                 val = DMA_CCMD_DOMAIN_INVL|DMA_CCMD_DID(did);
1023                 break;
1024         case DMA_CCMD_DEVICE_INVL:
1025                 val = DMA_CCMD_DEVICE_INVL|DMA_CCMD_DID(did)
1026                         | DMA_CCMD_SID(source_id) | DMA_CCMD_FM(function_mask);
1027                 break;
1028         default:
1029                 BUG();
1030         }
1031         val |= DMA_CCMD_ICC;
1032
1033         raw_spin_lock_irqsave(&iommu->register_lock, flag);
1034         dmar_writeq(iommu->reg + DMAR_CCMD_REG, val);
1035
1036         /* Make sure hardware complete it */
1037         IOMMU_WAIT_OP(iommu, DMAR_CCMD_REG,
1038                 dmar_readq, (!(val & DMA_CCMD_ICC)), val);
1039
1040         raw_spin_unlock_irqrestore(&iommu->register_lock, flag);
1041 }
1042
1043 /* return value determine if we need a write buffer flush */
1044 static void __iommu_flush_iotlb(struct intel_iommu *iommu, u16 did,
1045                                 u64 addr, unsigned int size_order, u64 type)
1046 {
1047         int tlb_offset = ecap_iotlb_offset(iommu->ecap);
1048         u64 val = 0, val_iva = 0;
1049         unsigned long flag;
1050
1051         switch (type) {
1052         case DMA_TLB_GLOBAL_FLUSH:
1053                 /* global flush doesn't need set IVA_REG */
1054                 val = DMA_TLB_GLOBAL_FLUSH|DMA_TLB_IVT;
1055                 break;
1056         case DMA_TLB_DSI_FLUSH:
1057                 val = DMA_TLB_DSI_FLUSH|DMA_TLB_IVT|DMA_TLB_DID(did);
1058                 break;
1059         case DMA_TLB_PSI_FLUSH:
1060                 val = DMA_TLB_PSI_FLUSH|DMA_TLB_IVT|DMA_TLB_DID(did);
1061                 /* Note: always flush non-leaf currently */
1062                 val_iva = size_order | addr;
1063                 break;
1064         default:
1065                 BUG();
1066         }
1067         /* Note: set drain read/write */
1068 #if 0
1069         /*
1070          * This is probably to be super secure.. Looks like we can
1071          * ignore it without any impact.
1072          */
1073         if (cap_read_drain(iommu->cap))
1074                 val |= DMA_TLB_READ_DRAIN;
1075 #endif
1076         if (cap_write_drain(iommu->cap))
1077                 val |= DMA_TLB_WRITE_DRAIN;
1078
1079         raw_spin_lock_irqsave(&iommu->register_lock, flag);
1080         /* Note: Only uses first TLB reg currently */
1081         if (val_iva)
1082                 dmar_writeq(iommu->reg + tlb_offset, val_iva);
1083         dmar_writeq(iommu->reg + tlb_offset + 8, val);
1084
1085         /* Make sure hardware complete it */
1086         IOMMU_WAIT_OP(iommu, tlb_offset + 8,
1087                 dmar_readq, (!(val & DMA_TLB_IVT)), val);
1088
1089         raw_spin_unlock_irqrestore(&iommu->register_lock, flag);
1090
1091         /* check IOTLB invalidation granularity */
1092         if (DMA_TLB_IAIG(val) == 0)
1093                 printk(KERN_ERR"IOMMU: flush IOTLB failed\n");
1094         if (DMA_TLB_IAIG(val) != DMA_TLB_IIRG(type))
1095                 pr_debug("IOMMU: tlb flush request %Lx, actual %Lx\n",
1096                         (unsigned long long)DMA_TLB_IIRG(type),
1097                         (unsigned long long)DMA_TLB_IAIG(val));
1098 }
1099
1100 static struct device_domain_info *iommu_support_dev_iotlb(
1101         struct dmar_domain *domain, int segment, u8 bus, u8 devfn)
1102 {
1103         int found = 0;
1104         unsigned long flags;
1105         struct device_domain_info *info;
1106         struct intel_iommu *iommu = device_to_iommu(segment, bus, devfn);
1107
1108         if (!ecap_dev_iotlb_support(iommu->ecap))
1109                 return NULL;
1110
1111         if (!iommu->qi)
1112                 return NULL;
1113
1114         spin_lock_irqsave(&device_domain_lock, flags);
1115         list_for_each_entry(info, &domain->devices, link)
1116                 if (info->bus == bus && info->devfn == devfn) {
1117                         found = 1;
1118                         break;
1119                 }
1120         spin_unlock_irqrestore(&device_domain_lock, flags);
1121
1122         if (!found || !info->dev)
1123                 return NULL;
1124
1125         if (!pci_find_ext_capability(info->dev, PCI_EXT_CAP_ID_ATS))
1126                 return NULL;
1127
1128         if (!dmar_find_matched_atsr_unit(info->dev))
1129                 return NULL;
1130
1131         info->iommu = iommu;
1132
1133         return info;
1134 }
1135
1136 static void iommu_enable_dev_iotlb(struct device_domain_info *info)
1137 {
1138         if (!info)
1139                 return;
1140
1141         pci_enable_ats(info->dev, VTD_PAGE_SHIFT);
1142 }
1143
1144 static void iommu_disable_dev_iotlb(struct device_domain_info *info)
1145 {
1146         if (!info->dev || !pci_ats_enabled(info->dev))
1147                 return;
1148
1149         pci_disable_ats(info->dev);
1150 }
1151
1152 static void iommu_flush_dev_iotlb(struct dmar_domain *domain,
1153                                   u64 addr, unsigned mask)
1154 {
1155         u16 sid, qdep;
1156         unsigned long flags;
1157         struct device_domain_info *info;
1158
1159         spin_lock_irqsave(&device_domain_lock, flags);
1160         list_for_each_entry(info, &domain->devices, link) {
1161                 if (!info->dev || !pci_ats_enabled(info->dev))
1162                         continue;
1163
1164                 sid = info->bus << 8 | info->devfn;
1165                 qdep = pci_ats_queue_depth(info->dev);
1166                 qi_flush_dev_iotlb(info->iommu, sid, qdep, addr, mask);
1167         }
1168         spin_unlock_irqrestore(&device_domain_lock, flags);
1169 }
1170
1171 static void iommu_flush_iotlb_psi(struct intel_iommu *iommu, u16 did,
1172                                   unsigned long pfn, unsigned int pages, int map)
1173 {
1174         unsigned int mask = ilog2(__roundup_pow_of_two(pages));
1175         uint64_t addr = (uint64_t)pfn << VTD_PAGE_SHIFT;
1176
1177         BUG_ON(pages == 0);
1178
1179         /*
1180          * Fallback to domain selective flush if no PSI support or the size is
1181          * too big.
1182          * PSI requires page size to be 2 ^ x, and the base address is naturally
1183          * aligned to the size
1184          */
1185         if (!cap_pgsel_inv(iommu->cap) || mask > cap_max_amask_val(iommu->cap))
1186                 iommu->flush.flush_iotlb(iommu, did, 0, 0,
1187                                                 DMA_TLB_DSI_FLUSH);
1188         else
1189                 iommu->flush.flush_iotlb(iommu, did, addr, mask,
1190                                                 DMA_TLB_PSI_FLUSH);
1191
1192         /*
1193          * In caching mode, changes of pages from non-present to present require
1194          * flush. However, device IOTLB doesn't need to be flushed in this case.
1195          */
1196         if (!cap_caching_mode(iommu->cap) || !map)
1197                 iommu_flush_dev_iotlb(iommu->domains[did], addr, mask);
1198 }
1199
1200 static void iommu_disable_protect_mem_regions(struct intel_iommu *iommu)
1201 {
1202         u32 pmen;
1203         unsigned long flags;
1204
1205         raw_spin_lock_irqsave(&iommu->register_lock, flags);
1206         pmen = readl(iommu->reg + DMAR_PMEN_REG);
1207         pmen &= ~DMA_PMEN_EPM;
1208         writel(pmen, iommu->reg + DMAR_PMEN_REG);
1209
1210         /* wait for the protected region status bit to clear */
1211         IOMMU_WAIT_OP(iommu, DMAR_PMEN_REG,
1212                 readl, !(pmen & DMA_PMEN_PRS), pmen);
1213
1214         raw_spin_unlock_irqrestore(&iommu->register_lock, flags);
1215 }
1216
1217 static int iommu_enable_translation(struct intel_iommu *iommu)
1218 {
1219         u32 sts;
1220         unsigned long flags;
1221
1222         raw_spin_lock_irqsave(&iommu->register_lock, flags);
1223         iommu->gcmd |= DMA_GCMD_TE;
1224         writel(iommu->gcmd, iommu->reg + DMAR_GCMD_REG);
1225
1226         /* Make sure hardware complete it */
1227         IOMMU_WAIT_OP(iommu, DMAR_GSTS_REG,
1228                       readl, (sts & DMA_GSTS_TES), sts);
1229
1230         raw_spin_unlock_irqrestore(&iommu->register_lock, flags);
1231         return 0;
1232 }
1233
1234 static int iommu_disable_translation(struct intel_iommu *iommu)
1235 {
1236         u32 sts;
1237         unsigned long flag;
1238
1239         raw_spin_lock_irqsave(&iommu->register_lock, flag);
1240         iommu->gcmd &= ~DMA_GCMD_TE;
1241         writel(iommu->gcmd, iommu->reg + DMAR_GCMD_REG);
1242
1243         /* Make sure hardware complete it */
1244         IOMMU_WAIT_OP(iommu, DMAR_GSTS_REG,
1245                       readl, (!(sts & DMA_GSTS_TES)), sts);
1246
1247         raw_spin_unlock_irqrestore(&iommu->register_lock, flag);
1248         return 0;
1249 }
1250
1251
1252 static int iommu_init_domains(struct intel_iommu *iommu)
1253 {
1254         unsigned long ndomains;
1255         unsigned long nlongs;
1256
1257         ndomains = cap_ndoms(iommu->cap);
1258         pr_debug("IOMMU %d: Number of Domains supported <%ld>\n", iommu->seq_id,
1259                         ndomains);
1260         nlongs = BITS_TO_LONGS(ndomains);
1261
1262         spin_lock_init(&iommu->lock);
1263
1264         /* TBD: there might be 64K domains,
1265          * consider other allocation for future chip
1266          */
1267         iommu->domain_ids = kcalloc(nlongs, sizeof(unsigned long), GFP_KERNEL);
1268         if (!iommu->domain_ids) {
1269                 printk(KERN_ERR "Allocating domain id array failed\n");
1270                 return -ENOMEM;
1271         }
1272         iommu->domains = kcalloc(ndomains, sizeof(struct dmar_domain *),
1273                         GFP_KERNEL);
1274         if (!iommu->domains) {
1275                 printk(KERN_ERR "Allocating domain array failed\n");
1276                 return -ENOMEM;
1277         }
1278
1279         /*
1280          * if Caching mode is set, then invalid translations are tagged
1281          * with domainid 0. Hence we need to pre-allocate it.
1282          */
1283         if (cap_caching_mode(iommu->cap))
1284                 set_bit(0, iommu->domain_ids);
1285         return 0;
1286 }
1287
1288
1289 static void domain_exit(struct dmar_domain *domain);
1290 static void vm_domain_exit(struct dmar_domain *domain);
1291
1292 void free_dmar_iommu(struct intel_iommu *iommu)
1293 {
1294         struct dmar_domain *domain;
1295         int i;
1296         unsigned long flags;
1297
1298         if ((iommu->domains) && (iommu->domain_ids)) {
1299                 for_each_set_bit(i, iommu->domain_ids, cap_ndoms(iommu->cap)) {
1300                         domain = iommu->domains[i];
1301                         clear_bit(i, iommu->domain_ids);
1302
1303                         spin_lock_irqsave(&domain->iommu_lock, flags);
1304                         if (--domain->iommu_count == 0) {
1305                                 if (domain->flags & DOMAIN_FLAG_VIRTUAL_MACHINE)
1306                                         vm_domain_exit(domain);
1307                                 else
1308                                         domain_exit(domain);
1309                         }
1310                         spin_unlock_irqrestore(&domain->iommu_lock, flags);
1311                 }
1312         }
1313
1314         if (iommu->gcmd & DMA_GCMD_TE)
1315                 iommu_disable_translation(iommu);
1316
1317         if (iommu->irq) {
1318                 irq_set_handler_data(iommu->irq, NULL);
1319                 /* This will mask the irq */
1320                 free_irq(iommu->irq, iommu);
1321                 destroy_irq(iommu->irq);
1322         }
1323
1324         kfree(iommu->domains);
1325         kfree(iommu->domain_ids);
1326
1327         g_iommus[iommu->seq_id] = NULL;
1328
1329         /* if all iommus are freed, free g_iommus */
1330         for (i = 0; i < g_num_of_iommus; i++) {
1331                 if (g_iommus[i])
1332                         break;
1333         }
1334
1335         if (i == g_num_of_iommus)
1336                 kfree(g_iommus);
1337
1338         /* free context mapping */
1339         free_context_table(iommu);
1340 }
1341
1342 static struct dmar_domain *alloc_domain(void)
1343 {
1344         struct dmar_domain *domain;
1345
1346         domain = alloc_domain_mem();
1347         if (!domain)
1348                 return NULL;
1349
1350         domain->nid = -1;
1351         memset(domain->iommu_bmp, 0, sizeof(domain->iommu_bmp));
1352         domain->flags = 0;
1353
1354         return domain;
1355 }
1356
1357 static int iommu_attach_domain(struct dmar_domain *domain,
1358                                struct intel_iommu *iommu)
1359 {
1360         int num;
1361         unsigned long ndomains;
1362         unsigned long flags;
1363
1364         ndomains = cap_ndoms(iommu->cap);
1365
1366         spin_lock_irqsave(&iommu->lock, flags);
1367
1368         num = find_first_zero_bit(iommu->domain_ids, ndomains);
1369         if (num >= ndomains) {
1370                 spin_unlock_irqrestore(&iommu->lock, flags);
1371                 printk(KERN_ERR "IOMMU: no free domain ids\n");
1372                 return -ENOMEM;
1373         }
1374
1375         domain->id = num;
1376         set_bit(num, iommu->domain_ids);
1377         set_bit(iommu->seq_id, domain->iommu_bmp);
1378         iommu->domains[num] = domain;
1379         spin_unlock_irqrestore(&iommu->lock, flags);
1380
1381         return 0;
1382 }
1383
1384 static void iommu_detach_domain(struct dmar_domain *domain,
1385                                 struct intel_iommu *iommu)
1386 {
1387         unsigned long flags;
1388         int num, ndomains;
1389         int found = 0;
1390
1391         spin_lock_irqsave(&iommu->lock, flags);
1392         ndomains = cap_ndoms(iommu->cap);
1393         for_each_set_bit(num, iommu->domain_ids, ndomains) {
1394                 if (iommu->domains[num] == domain) {
1395                         found = 1;
1396                         break;
1397                 }
1398         }
1399
1400         if (found) {
1401                 clear_bit(num, iommu->domain_ids);
1402                 clear_bit(iommu->seq_id, domain->iommu_bmp);
1403                 iommu->domains[num] = NULL;
1404         }
1405         spin_unlock_irqrestore(&iommu->lock, flags);
1406 }
1407
1408 static struct iova_domain reserved_iova_list;
1409 static struct lock_class_key reserved_rbtree_key;
1410
1411 static int dmar_init_reserved_ranges(void)
1412 {
1413         struct pci_dev *pdev = NULL;
1414         struct iova *iova;
1415         int i;
1416
1417         init_iova_domain(&reserved_iova_list, DMA_32BIT_PFN);
1418
1419         lockdep_set_class(&reserved_iova_list.iova_rbtree_lock,
1420                 &reserved_rbtree_key);
1421
1422         /* IOAPIC ranges shouldn't be accessed by DMA */
1423         iova = reserve_iova(&reserved_iova_list, IOVA_PFN(IOAPIC_RANGE_START),
1424                 IOVA_PFN(IOAPIC_RANGE_END));
1425         if (!iova) {
1426                 printk(KERN_ERR "Reserve IOAPIC range failed\n");
1427                 return -ENODEV;
1428         }
1429
1430         /* Reserve all PCI MMIO to avoid peer-to-peer access */
1431         for_each_pci_dev(pdev) {
1432                 struct resource *r;
1433
1434                 for (i = 0; i < PCI_NUM_RESOURCES; i++) {
1435                         r = &pdev->resource[i];
1436                         if (!r->flags || !(r->flags & IORESOURCE_MEM))
1437                                 continue;
1438                         iova = reserve_iova(&reserved_iova_list,
1439                                             IOVA_PFN(r->start),
1440                                             IOVA_PFN(r->end));
1441                         if (!iova) {
1442                                 printk(KERN_ERR "Reserve iova failed\n");
1443                                 return -ENODEV;
1444                         }
1445                 }
1446         }
1447         return 0;
1448 }
1449
1450 static void domain_reserve_special_ranges(struct dmar_domain *domain)
1451 {
1452         copy_reserved_iova(&reserved_iova_list, &domain->iovad);
1453 }
1454
1455 static inline int guestwidth_to_adjustwidth(int gaw)
1456 {
1457         int agaw;
1458         int r = (gaw - 12) % 9;
1459
1460         if (r == 0)
1461                 agaw = gaw;
1462         else
1463                 agaw = gaw + 9 - r;
1464         if (agaw > 64)
1465                 agaw = 64;
1466         return agaw;
1467 }
1468
1469 static int domain_init(struct dmar_domain *domain, int guest_width)
1470 {
1471         struct intel_iommu *iommu;
1472         int adjust_width, agaw;
1473         unsigned long sagaw;
1474
1475         init_iova_domain(&domain->iovad, DMA_32BIT_PFN);
1476         spin_lock_init(&domain->iommu_lock);
1477
1478         domain_reserve_special_ranges(domain);
1479
1480         /* calculate AGAW */
1481         iommu = domain_get_iommu(domain);
1482         if (guest_width > cap_mgaw(iommu->cap))
1483                 guest_width = cap_mgaw(iommu->cap);
1484         domain->gaw = guest_width;
1485         adjust_width = guestwidth_to_adjustwidth(guest_width);
1486         agaw = width_to_agaw(adjust_width);
1487         sagaw = cap_sagaw(iommu->cap);
1488         if (!test_bit(agaw, &sagaw)) {
1489                 /* hardware doesn't support it, choose a bigger one */
1490                 pr_debug("IOMMU: hardware doesn't support agaw %d\n", agaw);
1491                 agaw = find_next_bit(&sagaw, 5, agaw);
1492                 if (agaw >= 5)
1493                         return -ENODEV;
1494         }
1495         domain->agaw = agaw;
1496         INIT_LIST_HEAD(&domain->devices);
1497
1498         if (ecap_coherent(iommu->ecap))
1499                 domain->iommu_coherency = 1;
1500         else
1501                 domain->iommu_coherency = 0;
1502
1503         if (ecap_sc_support(iommu->ecap))
1504                 domain->iommu_snooping = 1;
1505         else
1506                 domain->iommu_snooping = 0;
1507
1508         domain->iommu_superpage = fls(cap_super_page_val(iommu->cap));
1509         domain->iommu_count = 1;
1510         domain->nid = iommu->node;
1511
1512         /* always allocate the top pgd */
1513         domain->pgd = (struct dma_pte *)alloc_pgtable_page(domain->nid);
1514         if (!domain->pgd)
1515                 return -ENOMEM;
1516         __iommu_flush_cache(iommu, domain->pgd, PAGE_SIZE);
1517         return 0;
1518 }
1519
1520 static void domain_exit(struct dmar_domain *domain)
1521 {
1522         struct dmar_drhd_unit *drhd;
1523         struct intel_iommu *iommu;
1524
1525         /* Domain 0 is reserved, so dont process it */
1526         if (!domain)
1527                 return;
1528
1529         /* Flush any lazy unmaps that may reference this domain */
1530         if (!intel_iommu_strict)
1531                 flush_unmaps_timeout(0);
1532
1533         domain_remove_dev_info(domain);
1534         /* destroy iovas */
1535         put_iova_domain(&domain->iovad);
1536
1537         /* clear ptes */
1538         dma_pte_clear_range(domain, 0, DOMAIN_MAX_PFN(domain->gaw));
1539
1540         /* free page tables */
1541         dma_pte_free_pagetable(domain, 0, DOMAIN_MAX_PFN(domain->gaw));
1542
1543         for_each_active_iommu(iommu, drhd)
1544                 if (test_bit(iommu->seq_id, domain->iommu_bmp))
1545                         iommu_detach_domain(domain, iommu);
1546
1547         free_domain_mem(domain);
1548 }
1549
1550 static int domain_context_mapping_one(struct dmar_domain *domain, int segment,
1551                                  u8 bus, u8 devfn, int translation)
1552 {
1553         struct context_entry *context;
1554         unsigned long flags;
1555         struct intel_iommu *iommu;
1556         struct dma_pte *pgd;
1557         unsigned long num;
1558         unsigned long ndomains;
1559         int id;
1560         int agaw;
1561         struct device_domain_info *info = NULL;
1562
1563         pr_debug("Set context mapping for %02x:%02x.%d\n",
1564                 bus, PCI_SLOT(devfn), PCI_FUNC(devfn));
1565
1566         BUG_ON(!domain->pgd);
1567         BUG_ON(translation != CONTEXT_TT_PASS_THROUGH &&
1568                translation != CONTEXT_TT_MULTI_LEVEL);
1569
1570         iommu = device_to_iommu(segment, bus, devfn);
1571         if (!iommu)
1572                 return -ENODEV;
1573
1574         context = device_to_context_entry(iommu, bus, devfn);
1575         if (!context)
1576                 return -ENOMEM;
1577         spin_lock_irqsave(&iommu->lock, flags);
1578         if (context_present(context)) {
1579                 spin_unlock_irqrestore(&iommu->lock, flags);
1580                 return 0;
1581         }
1582
1583         id = domain->id;
1584         pgd = domain->pgd;
1585
1586         if (domain->flags & DOMAIN_FLAG_VIRTUAL_MACHINE ||
1587             domain->flags & DOMAIN_FLAG_STATIC_IDENTITY) {
1588                 int found = 0;
1589
1590                 /* find an available domain id for this device in iommu */
1591                 ndomains = cap_ndoms(iommu->cap);
1592                 for_each_set_bit(num, iommu->domain_ids, ndomains) {
1593                         if (iommu->domains[num] == domain) {
1594                                 id = num;
1595                                 found = 1;
1596                                 break;
1597                         }
1598                 }
1599
1600                 if (found == 0) {
1601                         num = find_first_zero_bit(iommu->domain_ids, ndomains);
1602                         if (num >= ndomains) {
1603                                 spin_unlock_irqrestore(&iommu->lock, flags);
1604                                 printk(KERN_ERR "IOMMU: no free domain ids\n");
1605                                 return -EFAULT;
1606                         }
1607
1608                         set_bit(num, iommu->domain_ids);
1609                         iommu->domains[num] = domain;
1610                         id = num;
1611                 }
1612
1613                 /* Skip top levels of page tables for
1614                  * iommu which has less agaw than default.
1615                  * Unnecessary for PT mode.
1616                  */
1617                 if (translation != CONTEXT_TT_PASS_THROUGH) {
1618                         for (agaw = domain->agaw; agaw != iommu->agaw; agaw--) {
1619                                 pgd = phys_to_virt(dma_pte_addr(pgd));
1620                                 if (!dma_pte_present(pgd)) {
1621                                         spin_unlock_irqrestore(&iommu->lock, flags);
1622                                         return -ENOMEM;
1623                                 }
1624                         }
1625                 }
1626         }
1627
1628         context_set_domain_id(context, id);
1629
1630         if (translation != CONTEXT_TT_PASS_THROUGH) {
1631                 info = iommu_support_dev_iotlb(domain, segment, bus, devfn);
1632                 translation = info ? CONTEXT_TT_DEV_IOTLB :
1633                                      CONTEXT_TT_MULTI_LEVEL;
1634         }
1635         /*
1636          * In pass through mode, AW must be programmed to indicate the largest
1637          * AGAW value supported by hardware. And ASR is ignored by hardware.
1638          */
1639         if (unlikely(translation == CONTEXT_TT_PASS_THROUGH))
1640                 context_set_address_width(context, iommu->msagaw);
1641         else {
1642                 context_set_address_root(context, virt_to_phys(pgd));
1643                 context_set_address_width(context, iommu->agaw);
1644         }
1645
1646         context_set_translation_type(context, translation);
1647         context_set_fault_enable(context);
1648         context_set_present(context);
1649         domain_flush_cache(domain, context, sizeof(*context));
1650
1651         /*
1652          * It's a non-present to present mapping. If hardware doesn't cache
1653          * non-present entry we only need to flush the write-buffer. If the
1654          * _does_ cache non-present entries, then it does so in the special
1655          * domain #0, which we have to flush:
1656          */
1657         if (cap_caching_mode(iommu->cap)) {
1658                 iommu->flush.flush_context(iommu, 0,
1659                                            (((u16)bus) << 8) | devfn,
1660                                            DMA_CCMD_MASK_NOBIT,
1661                                            DMA_CCMD_DEVICE_INVL);
1662                 iommu->flush.flush_iotlb(iommu, domain->id, 0, 0, DMA_TLB_DSI_FLUSH);
1663         } else {
1664                 iommu_flush_write_buffer(iommu);
1665         }
1666         iommu_enable_dev_iotlb(info);
1667         spin_unlock_irqrestore(&iommu->lock, flags);
1668
1669         spin_lock_irqsave(&domain->iommu_lock, flags);
1670         if (!test_and_set_bit(iommu->seq_id, domain->iommu_bmp)) {
1671                 domain->iommu_count++;
1672                 if (domain->iommu_count == 1)
1673                         domain->nid = iommu->node;
1674                 domain_update_iommu_cap(domain);
1675         }
1676         spin_unlock_irqrestore(&domain->iommu_lock, flags);
1677         return 0;
1678 }
1679
1680 static int
1681 domain_context_mapping(struct dmar_domain *domain, struct pci_dev *pdev,
1682                         int translation)
1683 {
1684         int ret;
1685         struct pci_dev *tmp, *parent;
1686
1687         ret = domain_context_mapping_one(domain, pci_domain_nr(pdev->bus),
1688                                          pdev->bus->number, pdev->devfn,
1689                                          translation);
1690         if (ret)
1691                 return ret;
1692
1693         /* dependent device mapping */
1694         tmp = pci_find_upstream_pcie_bridge(pdev);
1695         if (!tmp)
1696                 return 0;
1697         /* Secondary interface's bus number and devfn 0 */
1698         parent = pdev->bus->self;
1699         while (parent != tmp) {
1700                 ret = domain_context_mapping_one(domain,
1701                                                  pci_domain_nr(parent->bus),
1702                                                  parent->bus->number,
1703                                                  parent->devfn, translation);
1704                 if (ret)
1705                         return ret;
1706                 parent = parent->bus->self;
1707         }
1708         if (pci_is_pcie(tmp)) /* this is a PCIe-to-PCI bridge */
1709                 return domain_context_mapping_one(domain,
1710                                         pci_domain_nr(tmp->subordinate),
1711                                         tmp->subordinate->number, 0,
1712                                         translation);
1713         else /* this is a legacy PCI bridge */
1714                 return domain_context_mapping_one(domain,
1715                                                   pci_domain_nr(tmp->bus),
1716                                                   tmp->bus->number,
1717                                                   tmp->devfn,
1718                                                   translation);
1719 }
1720
1721 static int domain_context_mapped(struct pci_dev *pdev)
1722 {
1723         int ret;
1724         struct pci_dev *tmp, *parent;
1725         struct intel_iommu *iommu;
1726
1727         iommu = device_to_iommu(pci_domain_nr(pdev->bus), pdev->bus->number,
1728                                 pdev->devfn);
1729         if (!iommu)
1730                 return -ENODEV;
1731
1732         ret = device_context_mapped(iommu, pdev->bus->number, pdev->devfn);
1733         if (!ret)
1734                 return ret;
1735         /* dependent device mapping */
1736         tmp = pci_find_upstream_pcie_bridge(pdev);
1737         if (!tmp)
1738                 return ret;
1739         /* Secondary interface's bus number and devfn 0 */
1740         parent = pdev->bus->self;
1741         while (parent != tmp) {
1742                 ret = device_context_mapped(iommu, parent->bus->number,
1743                                             parent->devfn);
1744                 if (!ret)
1745                         return ret;
1746                 parent = parent->bus->self;
1747         }
1748         if (pci_is_pcie(tmp))
1749                 return device_context_mapped(iommu, tmp->subordinate->number,
1750                                              0);
1751         else
1752                 return device_context_mapped(iommu, tmp->bus->number,
1753                                              tmp->devfn);
1754 }
1755
1756 /* Returns a number of VTD pages, but aligned to MM page size */
1757 static inline unsigned long aligned_nrpages(unsigned long host_addr,
1758                                             size_t size)
1759 {
1760         host_addr &= ~PAGE_MASK;
1761         return PAGE_ALIGN(host_addr + size) >> VTD_PAGE_SHIFT;
1762 }
1763
1764 /* Return largest possible superpage level for a given mapping */
1765 static inline int hardware_largepage_caps(struct dmar_domain *domain,
1766                                           unsigned long iov_pfn,
1767                                           unsigned long phy_pfn,
1768                                           unsigned long pages)
1769 {
1770         int support, level = 1;
1771         unsigned long pfnmerge;
1772
1773         support = domain->iommu_superpage;
1774
1775         /* To use a large page, the virtual *and* physical addresses
1776            must be aligned to 2MiB/1GiB/etc. Lower bits set in either
1777            of them will mean we have to use smaller pages. So just
1778            merge them and check both at once. */
1779         pfnmerge = iov_pfn | phy_pfn;
1780
1781         while (support && !(pfnmerge & ~VTD_STRIDE_MASK)) {
1782                 pages >>= VTD_STRIDE_SHIFT;
1783                 if (!pages)
1784                         break;
1785                 pfnmerge >>= VTD_STRIDE_SHIFT;
1786                 level++;
1787                 support--;
1788         }
1789         return level;
1790 }
1791
1792 static int __domain_mapping(struct dmar_domain *domain, unsigned long iov_pfn,
1793                             struct scatterlist *sg, unsigned long phys_pfn,
1794                             unsigned long nr_pages, int prot)
1795 {
1796         struct dma_pte *first_pte = NULL, *pte = NULL;
1797         phys_addr_t uninitialized_var(pteval);
1798         int addr_width = agaw_to_width(domain->agaw) - VTD_PAGE_SHIFT;
1799         unsigned long sg_res;
1800         unsigned int largepage_lvl = 0;
1801         unsigned long lvl_pages = 0;
1802
1803         BUG_ON(addr_width < BITS_PER_LONG && (iov_pfn + nr_pages - 1) >> addr_width);
1804
1805         if ((prot & (DMA_PTE_READ|DMA_PTE_WRITE)) == 0)
1806                 return -EINVAL;
1807
1808         prot &= DMA_PTE_READ | DMA_PTE_WRITE | DMA_PTE_SNP;
1809
1810         if (sg)
1811                 sg_res = 0;
1812         else {
1813                 sg_res = nr_pages + 1;
1814                 pteval = ((phys_addr_t)phys_pfn << VTD_PAGE_SHIFT) | prot;
1815         }
1816
1817         while (nr_pages > 0) {
1818                 uint64_t tmp;
1819
1820                 if (!sg_res) {
1821                         sg_res = aligned_nrpages(sg->offset, sg->length);
1822                         sg->dma_address = ((dma_addr_t)iov_pfn << VTD_PAGE_SHIFT) + sg->offset;
1823                         sg->dma_length = sg->length;
1824                         pteval = page_to_phys(sg_page(sg)) | prot;
1825                         phys_pfn = pteval >> VTD_PAGE_SHIFT;
1826                 }
1827
1828                 if (!pte) {
1829                         largepage_lvl = hardware_largepage_caps(domain, iov_pfn, phys_pfn, sg_res);
1830
1831                         first_pte = pte = pfn_to_dma_pte(domain, iov_pfn, largepage_lvl);
1832                         if (!pte)
1833                                 return -ENOMEM;
1834                         /* It is large page*/
1835                         if (largepage_lvl > 1) {
1836                                 pteval |= DMA_PTE_LARGE_PAGE;
1837                                 /* Ensure that old small page tables are removed to make room
1838                                    for superpage, if they exist. */
1839                                 dma_pte_clear_range(domain, iov_pfn,
1840                                                     iov_pfn + lvl_to_nr_pages(largepage_lvl) - 1);
1841                                 dma_pte_free_pagetable(domain, iov_pfn,
1842                                                        iov_pfn + lvl_to_nr_pages(largepage_lvl) - 1);
1843                         } else {
1844                                 pteval &= ~(uint64_t)DMA_PTE_LARGE_PAGE;
1845                         }
1846
1847                 }
1848                 /* We don't need lock here, nobody else
1849                  * touches the iova range
1850                  */
1851                 tmp = cmpxchg64_local(&pte->val, 0ULL, pteval);
1852                 if (tmp) {
1853                         static int dumps = 5;
1854                         printk(KERN_CRIT "ERROR: DMA PTE for vPFN 0x%lx already set (to %llx not %llx)\n",
1855                                iov_pfn, tmp, (unsigned long long)pteval);
1856                         if (dumps) {
1857                                 dumps--;
1858                                 debug_dma_dump_mappings(NULL);
1859                         }
1860                         WARN_ON(1);
1861                 }
1862
1863                 lvl_pages = lvl_to_nr_pages(largepage_lvl);
1864
1865                 BUG_ON(nr_pages < lvl_pages);
1866                 BUG_ON(sg_res < lvl_pages);
1867
1868                 nr_pages -= lvl_pages;
1869                 iov_pfn += lvl_pages;
1870                 phys_pfn += lvl_pages;
1871                 pteval += lvl_pages * VTD_PAGE_SIZE;
1872                 sg_res -= lvl_pages;
1873
1874                 /* If the next PTE would be the first in a new page, then we
1875                    need to flush the cache on the entries we've just written.
1876                    And then we'll need to recalculate 'pte', so clear it and
1877                    let it get set again in the if (!pte) block above.
1878
1879                    If we're done (!nr_pages) we need to flush the cache too.
1880
1881                    Also if we've been setting superpages, we may need to
1882                    recalculate 'pte' and switch back to smaller pages for the
1883                    end of the mapping, if the trailing size is not enough to
1884                    use another superpage (i.e. sg_res < lvl_pages). */
1885                 pte++;
1886                 if (!nr_pages || first_pte_in_page(pte) ||
1887                     (largepage_lvl > 1 && sg_res < lvl_pages)) {
1888                         domain_flush_cache(domain, first_pte,
1889                                            (void *)pte - (void *)first_pte);
1890                         pte = NULL;
1891                 }
1892
1893                 if (!sg_res && nr_pages)
1894                         sg = sg_next(sg);
1895         }
1896         return 0;
1897 }
1898
1899 static inline int domain_sg_mapping(struct dmar_domain *domain, unsigned long iov_pfn,
1900                                     struct scatterlist *sg, unsigned long nr_pages,
1901                                     int prot)
1902 {
1903         return __domain_mapping(domain, iov_pfn, sg, 0, nr_pages, prot);
1904 }
1905
1906 static inline int domain_pfn_mapping(struct dmar_domain *domain, unsigned long iov_pfn,
1907                                      unsigned long phys_pfn, unsigned long nr_pages,
1908                                      int prot)
1909 {
1910         return __domain_mapping(domain, iov_pfn, NULL, phys_pfn, nr_pages, prot);
1911 }
1912
1913 static void iommu_detach_dev(struct intel_iommu *iommu, u8 bus, u8 devfn)
1914 {
1915         if (!iommu)
1916                 return;
1917
1918         clear_context_table(iommu, bus, devfn);
1919         iommu->flush.flush_context(iommu, 0, 0, 0,
1920                                            DMA_CCMD_GLOBAL_INVL);
1921         iommu->flush.flush_iotlb(iommu, 0, 0, 0, DMA_TLB_GLOBAL_FLUSH);
1922 }
1923
1924 static inline void unlink_domain_info(struct device_domain_info *info)
1925 {
1926         assert_spin_locked(&device_domain_lock);
1927         list_del(&info->link);
1928         list_del(&info->global);
1929         if (info->dev)
1930                 info->dev->dev.archdata.iommu = NULL;
1931 }
1932
1933 static void domain_remove_dev_info(struct dmar_domain *domain)
1934 {
1935         struct device_domain_info *info;
1936         unsigned long flags;
1937         struct intel_iommu *iommu;
1938
1939         spin_lock_irqsave(&device_domain_lock, flags);
1940         while (!list_empty(&domain->devices)) {
1941                 info = list_entry(domain->devices.next,
1942                         struct device_domain_info, link);
1943                 unlink_domain_info(info);
1944                 spin_unlock_irqrestore(&device_domain_lock, flags);
1945
1946                 iommu_disable_dev_iotlb(info);
1947                 iommu = device_to_iommu(info->segment, info->bus, info->devfn);
1948                 iommu_detach_dev(iommu, info->bus, info->devfn);
1949                 free_devinfo_mem(info);
1950
1951                 spin_lock_irqsave(&device_domain_lock, flags);
1952         }
1953         spin_unlock_irqrestore(&device_domain_lock, flags);
1954 }
1955
1956 /*
1957  * find_domain
1958  * Note: we use struct pci_dev->dev.archdata.iommu stores the info
1959  */
1960 static struct dmar_domain *
1961 find_domain(struct pci_dev *pdev)
1962 {
1963         struct device_domain_info *info;
1964
1965         /* No lock here, assumes no domain exit in normal case */
1966         info = pdev->dev.archdata.iommu;
1967         if (info)
1968                 return info->domain;
1969         return NULL;
1970 }
1971
1972 /* domain is initialized */
1973 static struct dmar_domain *get_domain_for_dev(struct pci_dev *pdev, int gaw)
1974 {
1975         struct dmar_domain *domain, *found = NULL;
1976         struct intel_iommu *iommu;
1977         struct dmar_drhd_unit *drhd;
1978         struct device_domain_info *info, *tmp;
1979         struct pci_dev *dev_tmp;
1980         unsigned long flags;
1981         int bus = 0, devfn = 0;
1982         int segment;
1983         int ret;
1984
1985         domain = find_domain(pdev);
1986         if (domain)
1987                 return domain;
1988
1989         segment = pci_domain_nr(pdev->bus);
1990
1991         dev_tmp = pci_find_upstream_pcie_bridge(pdev);
1992         if (dev_tmp) {
1993                 if (pci_is_pcie(dev_tmp)) {
1994                         bus = dev_tmp->subordinate->number;
1995                         devfn = 0;
1996                 } else {
1997                         bus = dev_tmp->bus->number;
1998                         devfn = dev_tmp->devfn;
1999                 }
2000                 spin_lock_irqsave(&device_domain_lock, flags);
2001                 list_for_each_entry(info, &device_domain_list, global) {
2002                         if (info->segment == segment &&
2003                             info->bus == bus && info->devfn == devfn) {
2004                                 found = info->domain;
2005                                 break;
2006                         }
2007                 }
2008                 spin_unlock_irqrestore(&device_domain_lock, flags);
2009                 /* pcie-pci bridge already has a domain, uses it */
2010                 if (found) {
2011                         domain = found;
2012                         goto found_domain;
2013                 }
2014         }
2015
2016         domain = alloc_domain();
2017         if (!domain)
2018                 goto error;
2019
2020         /* Allocate new domain for the device */
2021         drhd = dmar_find_matched_drhd_unit(pdev);
2022         if (!drhd) {
2023                 printk(KERN_ERR "IOMMU: can't find DMAR for device %s\n",
2024                         pci_name(pdev));
2025                 free_domain_mem(domain);
2026                 return NULL;
2027         }
2028         iommu = drhd->iommu;
2029
2030         ret = iommu_attach_domain(domain, iommu);
2031         if (ret) {
2032                 free_domain_mem(domain);
2033                 goto error;
2034         }
2035
2036         if (domain_init(domain, gaw)) {
2037                 domain_exit(domain);
2038                 goto error;
2039         }
2040
2041         /* register pcie-to-pci device */
2042         if (dev_tmp) {
2043                 info = alloc_devinfo_mem();
2044                 if (!info) {
2045                         domain_exit(domain);
2046                         goto error;
2047                 }
2048                 info->segment = segment;
2049                 info->bus = bus;
2050                 info->devfn = devfn;
2051                 info->dev = NULL;
2052                 info->domain = domain;
2053                 /* This domain is shared by devices under p2p bridge */
2054                 domain->flags |= DOMAIN_FLAG_P2P_MULTIPLE_DEVICES;
2055
2056                 /* pcie-to-pci bridge already has a domain, uses it */
2057                 found = NULL;
2058                 spin_lock_irqsave(&device_domain_lock, flags);
2059                 list_for_each_entry(tmp, &device_domain_list, global) {
2060                         if (tmp->segment == segment &&
2061                             tmp->bus == bus && tmp->devfn == devfn) {
2062                                 found = tmp->domain;
2063                                 break;
2064                         }
2065                 }
2066                 if (found) {
2067                         spin_unlock_irqrestore(&device_domain_lock, flags);
2068                         free_devinfo_mem(info);
2069                         domain_exit(domain);
2070                         domain = found;
2071                 } else {
2072                         list_add(&info->link, &domain->devices);
2073                         list_add(&info->global, &device_domain_list);
2074                         spin_unlock_irqrestore(&device_domain_lock, flags);
2075                 }
2076         }
2077
2078 found_domain:
2079         info = alloc_devinfo_mem();
2080         if (!info)
2081                 goto error;
2082         info->segment = segment;
2083         info->bus = pdev->bus->number;
2084         info->devfn = pdev->devfn;
2085         info->dev = pdev;
2086         info->domain = domain;
2087         spin_lock_irqsave(&device_domain_lock, flags);
2088         /* somebody is fast */
2089         found = find_domain(pdev);
2090         if (found != NULL) {
2091                 spin_unlock_irqrestore(&device_domain_lock, flags);
2092                 if (found != domain) {
2093                         domain_exit(domain);
2094                         domain = found;
2095                 }
2096                 free_devinfo_mem(info);
2097                 return domain;
2098         }
2099         list_add(&info->link, &domain->devices);
2100         list_add(&info->global, &device_domain_list);
2101         pdev->dev.archdata.iommu = info;
2102         spin_unlock_irqrestore(&device_domain_lock, flags);
2103         return domain;
2104 error:
2105         /* recheck it here, maybe others set it */
2106         return find_domain(pdev);
2107 }
2108
2109 static int iommu_identity_mapping;
2110 #define IDENTMAP_ALL            1
2111 #define IDENTMAP_GFX            2
2112 #define IDENTMAP_AZALIA         4
2113
2114 static int iommu_domain_identity_map(struct dmar_domain *domain,
2115                                      unsigned long long start,
2116                                      unsigned long long end)
2117 {
2118         unsigned long first_vpfn = start >> VTD_PAGE_SHIFT;
2119         unsigned long last_vpfn = end >> VTD_PAGE_SHIFT;
2120
2121         if (!reserve_iova(&domain->iovad, dma_to_mm_pfn(first_vpfn),
2122                           dma_to_mm_pfn(last_vpfn))) {
2123                 printk(KERN_ERR "IOMMU: reserve iova failed\n");
2124                 return -ENOMEM;
2125         }
2126
2127         pr_debug("Mapping reserved region %llx-%llx for domain %d\n",
2128                  start, end, domain->id);
2129         /*
2130          * RMRR range might have overlap with physical memory range,
2131          * clear it first
2132          */
2133         dma_pte_clear_range(domain, first_vpfn, last_vpfn);
2134
2135         return domain_pfn_mapping(domain, first_vpfn, first_vpfn,
2136                                   last_vpfn - first_vpfn + 1,
2137                                   DMA_PTE_READ|DMA_PTE_WRITE);
2138 }
2139
2140 static int iommu_prepare_identity_map(struct pci_dev *pdev,
2141                                       unsigned long long start,
2142                                       unsigned long long end)
2143 {
2144         struct dmar_domain *domain;
2145         int ret;
2146
2147         domain = get_domain_for_dev(pdev, DEFAULT_DOMAIN_ADDRESS_WIDTH);
2148         if (!domain)
2149                 return -ENOMEM;
2150
2151         /* For _hardware_ passthrough, don't bother. But for software
2152            passthrough, we do it anyway -- it may indicate a memory
2153            range which is reserved in E820, so which didn't get set
2154            up to start with in si_domain */
2155         if (domain == si_domain && hw_pass_through) {
2156                 printk("Ignoring identity map for HW passthrough device %s [0x%Lx - 0x%Lx]\n",
2157                        pci_name(pdev), start, end);
2158                 return 0;
2159         }
2160
2161         printk(KERN_INFO
2162                "IOMMU: Setting identity map for device %s [0x%Lx - 0x%Lx]\n",
2163                pci_name(pdev), start, end);
2164         
2165         if (end < start) {
2166                 WARN(1, "Your BIOS is broken; RMRR ends before it starts!\n"
2167                         "BIOS vendor: %s; Ver: %s; Product Version: %s\n",
2168                         dmi_get_system_info(DMI_BIOS_VENDOR),
2169                         dmi_get_system_info(DMI_BIOS_VERSION),
2170                      dmi_get_system_info(DMI_PRODUCT_VERSION));
2171                 ret = -EIO;
2172                 goto error;
2173         }
2174
2175         if (end >> agaw_to_width(domain->agaw)) {
2176                 WARN(1, "Your BIOS is broken; RMRR exceeds permitted address width (%d bits)\n"
2177                      "BIOS vendor: %s; Ver: %s; Product Version: %s\n",
2178                      agaw_to_width(domain->agaw),
2179                      dmi_get_system_info(DMI_BIOS_VENDOR),
2180                      dmi_get_system_info(DMI_BIOS_VERSION),
2181                      dmi_get_system_info(DMI_PRODUCT_VERSION));
2182                 ret = -EIO;
2183                 goto error;
2184         }
2185
2186         ret = iommu_domain_identity_map(domain, start, end);
2187         if (ret)
2188                 goto error;
2189
2190         /* context entry init */
2191         ret = domain_context_mapping(domain, pdev, CONTEXT_TT_MULTI_LEVEL);
2192         if (ret)
2193                 goto error;
2194
2195         return 0;
2196
2197  error:
2198         domain_exit(domain);
2199         return ret;
2200 }
2201
2202 static inline int iommu_prepare_rmrr_dev(struct dmar_rmrr_unit *rmrr,
2203         struct pci_dev *pdev)
2204 {
2205         if (pdev->dev.archdata.iommu == DUMMY_DEVICE_DOMAIN_INFO)
2206                 return 0;
2207         return iommu_prepare_identity_map(pdev, rmrr->base_address,
2208                 rmrr->end_address);
2209 }
2210
2211 #ifdef CONFIG_INTEL_IOMMU_FLOPPY_WA
2212 static inline void iommu_prepare_isa(void)
2213 {
2214         struct pci_dev *pdev;
2215         int ret;
2216
2217         pdev = pci_get_class(PCI_CLASS_BRIDGE_ISA << 8, NULL);
2218         if (!pdev)
2219                 return;
2220
2221         printk(KERN_INFO "IOMMU: Prepare 0-16MiB unity mapping for LPC\n");
2222         ret = iommu_prepare_identity_map(pdev, 0, 16*1024*1024 - 1);
2223
2224         if (ret)
2225                 printk(KERN_ERR "IOMMU: Failed to create 0-16MiB identity map; "
2226                        "floppy might not work\n");
2227
2228 }
2229 #else
2230 static inline void iommu_prepare_isa(void)
2231 {
2232         return;
2233 }
2234 #endif /* !CONFIG_INTEL_IOMMU_FLPY_WA */
2235
2236 static int md_domain_init(struct dmar_domain *domain, int guest_width);
2237
2238 static int __init si_domain_init(int hw)
2239 {
2240         struct dmar_drhd_unit *drhd;
2241         struct intel_iommu *iommu;
2242         int nid, ret = 0;
2243
2244         si_domain = alloc_domain();
2245         if (!si_domain)
2246                 return -EFAULT;
2247
2248         pr_debug("Identity mapping domain is domain %d\n", si_domain->id);
2249
2250         for_each_active_iommu(iommu, drhd) {
2251                 ret = iommu_attach_domain(si_domain, iommu);
2252                 if (ret) {
2253                         domain_exit(si_domain);
2254                         return -EFAULT;
2255                 }
2256         }
2257
2258         if (md_domain_init(si_domain, DEFAULT_DOMAIN_ADDRESS_WIDTH)) {
2259                 domain_exit(si_domain);
2260                 return -EFAULT;
2261         }
2262
2263         si_domain->flags = DOMAIN_FLAG_STATIC_IDENTITY;
2264
2265         if (hw)
2266                 return 0;
2267
2268         for_each_online_node(nid) {
2269                 unsigned long start_pfn, end_pfn;
2270                 int i;
2271
2272                 for_each_mem_pfn_range(i, nid, &start_pfn, &end_pfn, NULL) {
2273                         ret = iommu_domain_identity_map(si_domain,
2274                                         PFN_PHYS(start_pfn), PFN_PHYS(end_pfn));
2275                         if (ret)
2276                                 return ret;
2277                 }
2278         }
2279
2280         return 0;
2281 }
2282
2283 static void domain_remove_one_dev_info(struct dmar_domain *domain,
2284                                           struct pci_dev *pdev);
2285 static int identity_mapping(struct pci_dev *pdev)
2286 {
2287         struct device_domain_info *info;
2288
2289         if (likely(!iommu_identity_mapping))
2290                 return 0;
2291
2292         info = pdev->dev.archdata.iommu;
2293         if (info && info != DUMMY_DEVICE_DOMAIN_INFO)
2294                 return (info->domain == si_domain);
2295
2296         return 0;
2297 }
2298
2299 static int domain_add_dev_info(struct dmar_domain *domain,
2300                                struct pci_dev *pdev,
2301                                int translation)
2302 {
2303         struct device_domain_info *info;
2304         unsigned long flags;
2305         int ret;
2306
2307         info = alloc_devinfo_mem();
2308         if (!info)
2309                 return -ENOMEM;
2310
2311         info->segment = pci_domain_nr(pdev->bus);
2312         info->bus = pdev->bus->number;
2313         info->devfn = pdev->devfn;
2314         info->dev = pdev;
2315         info->domain = domain;
2316
2317         spin_lock_irqsave(&device_domain_lock, flags);
2318         list_add(&info->link, &domain->devices);
2319         list_add(&info->global, &device_domain_list);
2320         pdev->dev.archdata.iommu = info;
2321         spin_unlock_irqrestore(&device_domain_lock, flags);
2322
2323         ret = domain_context_mapping(domain, pdev, translation);
2324         if (ret) {
2325                 spin_lock_irqsave(&device_domain_lock, flags);
2326                 unlink_domain_info(info);
2327                 spin_unlock_irqrestore(&device_domain_lock, flags);
2328                 free_devinfo_mem(info);
2329                 return ret;
2330         }
2331
2332         return 0;
2333 }
2334
2335 static bool device_has_rmrr(struct pci_dev *dev)
2336 {
2337         struct dmar_rmrr_unit *rmrr;
2338         int i;
2339
2340         for_each_rmrr_units(rmrr) {
2341                 for (i = 0; i < rmrr->devices_cnt; i++) {
2342                         /*
2343                          * Return TRUE if this RMRR contains the device that
2344                          * is passed in.
2345                          */
2346                         if (rmrr->devices[i] == dev)
2347                                 return true;
2348                 }
2349         }
2350         return false;
2351 }
2352
2353 static int iommu_should_identity_map(struct pci_dev *pdev, int startup)
2354 {
2355
2356         /*
2357          * We want to prevent any device associated with an RMRR from
2358          * getting placed into the SI Domain. This is done because
2359          * problems exist when devices are moved in and out of domains
2360          * and their respective RMRR info is lost. We exempt USB devices
2361          * from this process due to their usage of RMRRs that are known
2362          * to not be needed after BIOS hand-off to OS.
2363          */
2364         if (device_has_rmrr(pdev) &&
2365             (pdev->class >> 8) != PCI_CLASS_SERIAL_USB)
2366                 return 0;
2367
2368         if ((iommu_identity_mapping & IDENTMAP_AZALIA) && IS_AZALIA(pdev))
2369                 return 1;
2370
2371         if ((iommu_identity_mapping & IDENTMAP_GFX) && IS_GFX_DEVICE(pdev))
2372                 return 1;
2373
2374         if (!(iommu_identity_mapping & IDENTMAP_ALL))
2375                 return 0;
2376
2377         /*
2378          * We want to start off with all devices in the 1:1 domain, and
2379          * take them out later if we find they can't access all of memory.
2380          *
2381          * However, we can't do this for PCI devices behind bridges,
2382          * because all PCI devices behind the same bridge will end up
2383          * with the same source-id on their transactions.
2384          *
2385          * Practically speaking, we can't change things around for these
2386          * devices at run-time, because we can't be sure there'll be no
2387          * DMA transactions in flight for any of their siblings.
2388          * 
2389          * So PCI devices (unless they're on the root bus) as well as
2390          * their parent PCI-PCI or PCIe-PCI bridges must be left _out_ of
2391          * the 1:1 domain, just in _case_ one of their siblings turns out
2392          * not to be able to map all of memory.
2393          */
2394         if (!pci_is_pcie(pdev)) {
2395                 if (!pci_is_root_bus(pdev->bus))
2396                         return 0;
2397                 if (pdev->class >> 8 == PCI_CLASS_BRIDGE_PCI)
2398                         return 0;
2399         } else if (pci_pcie_type(pdev) == PCI_EXP_TYPE_PCI_BRIDGE)
2400                 return 0;
2401
2402         /* 
2403          * At boot time, we don't yet know if devices will be 64-bit capable.
2404          * Assume that they will -- if they turn out not to be, then we can 
2405          * take them out of the 1:1 domain later.
2406          */
2407         if (!startup) {
2408                 /*
2409                  * If the device's dma_mask is less than the system's memory
2410                  * size then this is not a candidate for identity mapping.
2411                  */
2412                 u64 dma_mask = pdev->dma_mask;
2413
2414                 if (pdev->dev.coherent_dma_mask &&
2415                     pdev->dev.coherent_dma_mask < dma_mask)
2416                         dma_mask = pdev->dev.coherent_dma_mask;
2417
2418                 return dma_mask >= dma_get_required_mask(&pdev->dev);
2419         }
2420
2421         return 1;
2422 }
2423
2424 static int __init iommu_prepare_static_identity_mapping(int hw)
2425 {
2426         struct pci_dev *pdev = NULL;
2427         int ret;
2428
2429         ret = si_domain_init(hw);
2430         if (ret)
2431                 return -EFAULT;
2432
2433         for_each_pci_dev(pdev) {
2434                 if (iommu_should_identity_map(pdev, 1)) {
2435                         ret = domain_add_dev_info(si_domain, pdev,
2436                                              hw ? CONTEXT_TT_PASS_THROUGH :
2437                                                   CONTEXT_TT_MULTI_LEVEL);
2438                         if (ret) {
2439                                 /* device not associated with an iommu */
2440                                 if (ret == -ENODEV)
2441                                         continue;
2442                                 return ret;
2443                         }
2444                         pr_info("IOMMU: %s identity mapping for device %s\n",
2445                                 hw ? "hardware" : "software", pci_name(pdev));
2446                 }
2447         }
2448
2449         return 0;
2450 }
2451
2452 static int __init init_dmars(void)
2453 {
2454         struct dmar_drhd_unit *drhd;
2455         struct dmar_rmrr_unit *rmrr;
2456         struct pci_dev *pdev;
2457         struct intel_iommu *iommu;
2458         int i, ret;
2459
2460         /*
2461          * for each drhd
2462          *    allocate root
2463          *    initialize and program root entry to not present
2464          * endfor
2465          */
2466         for_each_drhd_unit(drhd) {
2467                 /*
2468                  * lock not needed as this is only incremented in the single
2469                  * threaded kernel __init code path all other access are read
2470                  * only
2471                  */
2472                 if (g_num_of_iommus < IOMMU_UNITS_SUPPORTED) {
2473                         g_num_of_iommus++;
2474                         continue;
2475                 }
2476                 printk_once(KERN_ERR "intel-iommu: exceeded %d IOMMUs\n",
2477                           IOMMU_UNITS_SUPPORTED);
2478         }
2479
2480         g_iommus = kcalloc(g_num_of_iommus, sizeof(struct intel_iommu *),
2481                         GFP_KERNEL);
2482         if (!g_iommus) {
2483                 printk(KERN_ERR "Allocating global iommu array failed\n");
2484                 ret = -ENOMEM;
2485                 goto error;
2486         }
2487
2488         deferred_flush = kzalloc(g_num_of_iommus *
2489                 sizeof(struct deferred_flush_tables), GFP_KERNEL);
2490         if (!deferred_flush) {
2491                 ret = -ENOMEM;
2492                 goto error;
2493         }
2494
2495         for_each_drhd_unit(drhd) {
2496                 if (drhd->ignored)
2497                         continue;
2498
2499                 iommu = drhd->iommu;
2500                 g_iommus[iommu->seq_id] = iommu;
2501
2502                 ret = iommu_init_domains(iommu);
2503                 if (ret)
2504                         goto error;
2505
2506                 /*
2507                  * TBD:
2508                  * we could share the same root & context tables
2509                  * among all IOMMU's. Need to Split it later.
2510                  */
2511                 ret = iommu_alloc_root_entry(iommu);
2512                 if (ret) {
2513                         printk(KERN_ERR "IOMMU: allocate root entry failed\n");
2514                         goto error;
2515                 }
2516                 if (!ecap_pass_through(iommu->ecap))
2517                         hw_pass_through = 0;
2518         }
2519
2520         /*
2521          * Start from the sane iommu hardware state.
2522          */
2523         for_each_drhd_unit(drhd) {
2524                 if (drhd->ignored)
2525                         continue;
2526
2527                 iommu = drhd->iommu;
2528
2529                 /*
2530                  * If the queued invalidation is already initialized by us
2531                  * (for example, while enabling interrupt-remapping) then
2532                  * we got the things already rolling from a sane state.
2533                  */
2534                 if (iommu->qi)
2535                         continue;
2536
2537                 /*
2538                  * Clear any previous faults.
2539                  */
2540                 dmar_fault(-1, iommu);
2541                 /*
2542                  * Disable queued invalidation if supported and already enabled
2543                  * before OS handover.
2544                  */
2545                 dmar_disable_qi(iommu);
2546         }
2547
2548         for_each_drhd_unit(drhd) {
2549                 if (drhd->ignored)
2550                         continue;
2551
2552                 iommu = drhd->iommu;
2553
2554                 if (dmar_enable_qi(iommu)) {
2555                         /*
2556                          * Queued Invalidate not enabled, use Register Based
2557                          * Invalidate
2558                          */
2559                         iommu->flush.flush_context = __iommu_flush_context;
2560                         iommu->flush.flush_iotlb = __iommu_flush_iotlb;
2561                         printk(KERN_INFO "IOMMU %d 0x%Lx: using Register based "
2562                                "invalidation\n",
2563                                 iommu->seq_id,
2564                                (unsigned long long)drhd->reg_base_addr);
2565                 } else {
2566                         iommu->flush.flush_context = qi_flush_context;
2567                         iommu->flush.flush_iotlb = qi_flush_iotlb;
2568                         printk(KERN_INFO "IOMMU %d 0x%Lx: using Queued "
2569                                "invalidation\n",
2570                                 iommu->seq_id,
2571                                (unsigned long long)drhd->reg_base_addr);
2572                 }
2573         }
2574
2575         if (iommu_pass_through)
2576                 iommu_identity_mapping |= IDENTMAP_ALL;
2577
2578 #ifdef CONFIG_INTEL_IOMMU_BROKEN_GFX_WA
2579         iommu_identity_mapping |= IDENTMAP_GFX;
2580 #endif
2581
2582         check_tylersburg_isoch();
2583
2584         /*
2585          * If pass through is not set or not enabled, setup context entries for
2586          * identity mappings for rmrr, gfx, and isa and may fall back to static
2587          * identity mapping if iommu_identity_mapping is set.
2588          */
2589         if (iommu_identity_mapping) {
2590                 ret = iommu_prepare_static_identity_mapping(hw_pass_through);
2591                 if (ret) {
2592                         printk(KERN_CRIT "Failed to setup IOMMU pass-through\n");
2593                         goto error;
2594                 }
2595         }
2596         /*
2597          * For each rmrr
2598          *   for each dev attached to rmrr
2599          *   do
2600          *     locate drhd for dev, alloc domain for dev
2601          *     allocate free domain
2602          *     allocate page table entries for rmrr
2603          *     if context not allocated for bus
2604          *           allocate and init context
2605          *           set present in root table for this bus
2606          *     init context with domain, translation etc
2607          *    endfor
2608          * endfor
2609          */
2610         printk(KERN_INFO "IOMMU: Setting RMRR:\n");
2611         for_each_rmrr_units(rmrr) {
2612                 for (i = 0; i < rmrr->devices_cnt; i++) {
2613                         pdev = rmrr->devices[i];
2614                         /*
2615                          * some BIOS lists non-exist devices in DMAR
2616                          * table.
2617                          */
2618                         if (!pdev)
2619                                 continue;
2620                         ret = iommu_prepare_rmrr_dev(rmrr, pdev);
2621                         if (ret)
2622                                 printk(KERN_ERR
2623                                        "IOMMU: mapping reserved region failed\n");
2624                 }
2625         }
2626
2627         iommu_prepare_isa();
2628
2629         /*
2630          * for each drhd
2631          *   enable fault log
2632          *   global invalidate context cache
2633          *   global invalidate iotlb
2634          *   enable translation
2635          */
2636         for_each_drhd_unit(drhd) {
2637                 if (drhd->ignored) {
2638                         /*
2639                          * we always have to disable PMRs or DMA may fail on
2640                          * this device
2641                          */
2642                         if (force_on)
2643                                 iommu_disable_protect_mem_regions(drhd->iommu);
2644                         continue;
2645                 }
2646                 iommu = drhd->iommu;
2647
2648                 iommu_flush_write_buffer(iommu);
2649
2650                 ret = dmar_set_interrupt(iommu);
2651                 if (ret)
2652                         goto error;
2653
2654                 iommu_set_root_entry(iommu);
2655
2656                 iommu->flush.flush_context(iommu, 0, 0, 0, DMA_CCMD_GLOBAL_INVL);
2657                 iommu->flush.flush_iotlb(iommu, 0, 0, 0, DMA_TLB_GLOBAL_FLUSH);
2658
2659                 ret = iommu_enable_translation(iommu);
2660                 if (ret)
2661                         goto error;
2662
2663                 iommu_disable_protect_mem_regions(iommu);
2664         }
2665
2666         return 0;
2667 error:
2668         for_each_drhd_unit(drhd) {
2669                 if (drhd->ignored)
2670                         continue;
2671                 iommu = drhd->iommu;
2672                 free_iommu(iommu);
2673         }
2674         kfree(g_iommus);
2675         return ret;
2676 }
2677
2678 /* This takes a number of _MM_ pages, not VTD pages */
2679 static struct iova *intel_alloc_iova(struct device *dev,
2680                                      struct dmar_domain *domain,
2681                                      unsigned long nrpages, uint64_t dma_mask)
2682 {
2683         struct pci_dev *pdev = to_pci_dev(dev);
2684         struct iova *iova = NULL;
2685
2686         /* Restrict dma_mask to the width that the iommu can handle */
2687         dma_mask = min_t(uint64_t, DOMAIN_MAX_ADDR(domain->gaw), dma_mask);
2688
2689         if (!dmar_forcedac && dma_mask > DMA_BIT_MASK(32)) {
2690                 /*
2691                  * First try to allocate an io virtual address in
2692                  * DMA_BIT_MASK(32) and if that fails then try allocating
2693                  * from higher range
2694                  */
2695                 iova = alloc_iova(&domain->iovad, nrpages,
2696                                   IOVA_PFN(DMA_BIT_MASK(32)), 1);
2697                 if (iova)
2698                         return iova;
2699         }
2700         iova = alloc_iova(&domain->iovad, nrpages, IOVA_PFN(dma_mask), 1);
2701         if (unlikely(!iova)) {
2702                 printk(KERN_ERR "Allocating %ld-page iova for %s failed",
2703                        nrpages, pci_name(pdev));
2704                 return NULL;
2705         }
2706
2707         return iova;
2708 }
2709
2710 static struct dmar_domain *__get_valid_domain_for_dev(struct pci_dev *pdev)
2711 {
2712         struct dmar_domain *domain;
2713         int ret;
2714
2715         domain = get_domain_for_dev(pdev,
2716                         DEFAULT_DOMAIN_ADDRESS_WIDTH);
2717         if (!domain) {
2718                 printk(KERN_ERR
2719                         "Allocating domain for %s failed", pci_name(pdev));
2720                 return NULL;
2721         }
2722
2723         /* make sure context mapping is ok */
2724         if (unlikely(!domain_context_mapped(pdev))) {
2725                 ret = domain_context_mapping(domain, pdev,
2726                                              CONTEXT_TT_MULTI_LEVEL);
2727                 if (ret) {
2728                         printk(KERN_ERR
2729                                 "Domain context map for %s failed",
2730                                 pci_name(pdev));
2731                         return NULL;
2732                 }
2733         }
2734
2735         return domain;
2736 }
2737
2738 static inline struct dmar_domain *get_valid_domain_for_dev(struct pci_dev *dev)
2739 {
2740         struct device_domain_info *info;
2741
2742         /* No lock here, assumes no domain exit in normal case */
2743         info = dev->dev.archdata.iommu;
2744         if (likely(info))
2745                 return info->domain;
2746
2747         return __get_valid_domain_for_dev(dev);
2748 }
2749
2750 static int iommu_dummy(struct pci_dev *pdev)
2751 {
2752         return pdev->dev.archdata.iommu == DUMMY_DEVICE_DOMAIN_INFO;
2753 }
2754
2755 /* Check if the pdev needs to go through non-identity map and unmap process.*/
2756 static int iommu_no_mapping(struct device *dev)
2757 {
2758         struct pci_dev *pdev;
2759         int found;
2760
2761         if (unlikely(dev->bus != &pci_bus_type))
2762                 return 1;
2763
2764         pdev = to_pci_dev(dev);
2765         if (iommu_dummy(pdev))
2766                 return 1;
2767
2768         if (!iommu_identity_mapping)
2769                 return 0;
2770
2771         found = identity_mapping(pdev);
2772         if (found) {
2773                 if (iommu_should_identity_map(pdev, 0))
2774                         return 1;
2775                 else {
2776                         /*
2777                          * 32 bit DMA is removed from si_domain and fall back
2778                          * to non-identity mapping.
2779                          */
2780                         domain_remove_one_dev_info(si_domain, pdev);
2781                         printk(KERN_INFO "32bit %s uses non-identity mapping\n",
2782                                pci_name(pdev));
2783                         return 0;
2784                 }
2785         } else {
2786                 /*
2787                  * In case of a detached 64 bit DMA device from vm, the device
2788                  * is put into si_domain for identity mapping.
2789                  */
2790                 if (iommu_should_identity_map(pdev, 0)) {
2791                         int ret;
2792                         ret = domain_add_dev_info(si_domain, pdev,
2793                                                   hw_pass_through ?
2794                                                   CONTEXT_TT_PASS_THROUGH :
2795                                                   CONTEXT_TT_MULTI_LEVEL);
2796                         if (!ret) {
2797                                 printk(KERN_INFO "64bit %s uses identity mapping\n",
2798                                        pci_name(pdev));
2799                                 return 1;
2800                         }
2801                 }
2802         }
2803
2804         return 0;
2805 }
2806
2807 static dma_addr_t __intel_map_single(struct device *hwdev, phys_addr_t paddr,
2808                                      size_t size, int dir, u64 dma_mask)
2809 {
2810         struct pci_dev *pdev = to_pci_dev(hwdev);
2811         struct dmar_domain *domain;
2812         phys_addr_t start_paddr;
2813         struct iova *iova;
2814         int prot = 0;
2815         int ret;
2816         struct intel_iommu *iommu;
2817         unsigned long paddr_pfn = paddr >> PAGE_SHIFT;
2818
2819         BUG_ON(dir == DMA_NONE);
2820
2821         if (iommu_no_mapping(hwdev))
2822                 return paddr;
2823
2824         domain = get_valid_domain_for_dev(pdev);
2825         if (!domain)
2826                 return 0;
2827
2828         iommu = domain_get_iommu(domain);
2829         size = aligned_nrpages(paddr, size);
2830
2831         iova = intel_alloc_iova(hwdev, domain, dma_to_mm_pfn(size), dma_mask);
2832         if (!iova)
2833                 goto error;
2834
2835         /*
2836          * Check if DMAR supports zero-length reads on write only
2837          * mappings..
2838          */
2839         if (dir == DMA_TO_DEVICE || dir == DMA_BIDIRECTIONAL || \
2840                         !cap_zlr(iommu->cap))
2841                 prot |= DMA_PTE_READ;
2842         if (dir == DMA_FROM_DEVICE || dir == DMA_BIDIRECTIONAL)
2843                 prot |= DMA_PTE_WRITE;
2844         /*
2845          * paddr - (paddr + size) might be partial page, we should map the whole
2846          * page.  Note: if two part of one page are separately mapped, we
2847          * might have two guest_addr mapping to the same host paddr, but this
2848          * is not a big problem
2849          */
2850         ret = domain_pfn_mapping(domain, mm_to_dma_pfn(iova->pfn_lo),
2851                                  mm_to_dma_pfn(paddr_pfn), size, prot);
2852         if (ret)
2853                 goto error;
2854
2855         /* it's a non-present to present mapping. Only flush if caching mode */
2856         if (cap_caching_mode(iommu->cap))
2857                 iommu_flush_iotlb_psi(iommu, domain->id, mm_to_dma_pfn(iova->pfn_lo), size, 1);
2858         else
2859                 iommu_flush_write_buffer(iommu);
2860
2861         start_paddr = (phys_addr_t)iova->pfn_lo << PAGE_SHIFT;
2862         start_paddr += paddr & ~PAGE_MASK;
2863         return start_paddr;
2864
2865 error:
2866         if (iova)
2867                 __free_iova(&domain->iovad, iova);
2868         printk(KERN_ERR"Device %s request: %zx@%llx dir %d --- failed\n",
2869                 pci_name(pdev), size, (unsigned long long)paddr, dir);
2870         return 0;
2871 }
2872
2873 static dma_addr_t intel_map_page(struct device *dev, struct page *page,
2874                                  unsigned long offset, size_t size,
2875                                  enum dma_data_direction dir,
2876                                  struct dma_attrs *attrs)
2877 {
2878         return __intel_map_single(dev, page_to_phys(page) + offset, size,
2879                                   dir, to_pci_dev(dev)->dma_mask);
2880 }
2881
2882 static void flush_unmaps(void)
2883 {
2884         int i, j;
2885
2886         timer_on = 0;
2887
2888         /* just flush them all */
2889         for (i = 0; i < g_num_of_iommus; i++) {
2890                 struct intel_iommu *iommu = g_iommus[i];
2891                 if (!iommu)
2892                         continue;
2893
2894                 if (!deferred_flush[i].next)
2895                         continue;
2896
2897                 /* In caching mode, global flushes turn emulation expensive */
2898                 if (!cap_caching_mode(iommu->cap))
2899                         iommu->flush.flush_iotlb(iommu, 0, 0, 0,
2900                                          DMA_TLB_GLOBAL_FLUSH);
2901                 for (j = 0; j < deferred_flush[i].next; j++) {
2902                         unsigned long mask;
2903                         struct iova *iova = deferred_flush[i].iova[j];
2904                         struct dmar_domain *domain = deferred_flush[i].domain[j];
2905
2906                         /* On real hardware multiple invalidations are expensive */
2907                         if (cap_caching_mode(iommu->cap))
2908                                 iommu_flush_iotlb_psi(iommu, domain->id,
2909                                 iova->pfn_lo, iova->pfn_hi - iova->pfn_lo + 1, 0);
2910                         else {
2911                                 mask = ilog2(mm_to_dma_pfn(iova->pfn_hi - iova->pfn_lo + 1));
2912                                 iommu_flush_dev_iotlb(deferred_flush[i].domain[j],
2913                                                 (uint64_t)iova->pfn_lo << PAGE_SHIFT, mask);
2914                         }
2915                         __free_iova(&deferred_flush[i].domain[j]->iovad, iova);
2916                 }
2917                 deferred_flush[i].next = 0;
2918         }
2919
2920         list_size = 0;
2921 }
2922
2923 static void flush_unmaps_timeout(unsigned long data)
2924 {
2925         unsigned long flags;
2926
2927         spin_lock_irqsave(&async_umap_flush_lock, flags);
2928         flush_unmaps();
2929         spin_unlock_irqrestore(&async_umap_flush_lock, flags);
2930 }
2931
2932 static void add_unmap(struct dmar_domain *dom, struct iova *iova)
2933 {
2934         unsigned long flags;
2935         int next, iommu_id;
2936         struct intel_iommu *iommu;
2937
2938         spin_lock_irqsave(&async_umap_flush_lock, flags);
2939         if (list_size == HIGH_WATER_MARK)
2940                 flush_unmaps();
2941
2942         iommu = domain_get_iommu(dom);
2943         iommu_id = iommu->seq_id;
2944
2945         next = deferred_flush[iommu_id].next;
2946         deferred_flush[iommu_id].domain[next] = dom;
2947         deferred_flush[iommu_id].iova[next] = iova;
2948         deferred_flush[iommu_id].next++;
2949
2950         if (!timer_on) {
2951                 mod_timer(&unmap_timer, jiffies + msecs_to_jiffies(10));
2952                 timer_on = 1;
2953         }
2954         list_size++;
2955         spin_unlock_irqrestore(&async_umap_flush_lock, flags);
2956 }
2957
2958 static void intel_unmap_page(struct device *dev, dma_addr_t dev_addr,
2959                              size_t size, enum dma_data_direction dir,
2960                              struct dma_attrs *attrs)
2961 {
2962         struct pci_dev *pdev = to_pci_dev(dev);
2963         struct dmar_domain *domain;
2964         unsigned long start_pfn, last_pfn;
2965         struct iova *iova;
2966         struct intel_iommu *iommu;
2967
2968         if (iommu_no_mapping(dev))
2969                 return;
2970
2971         domain = find_domain(pdev);
2972         BUG_ON(!domain);
2973
2974         iommu = domain_get_iommu(domain);
2975
2976         iova = find_iova(&domain->iovad, IOVA_PFN(dev_addr));
2977         if (WARN_ONCE(!iova, "Driver unmaps unmatched page at PFN %llx\n",
2978                       (unsigned long long)dev_addr))
2979                 return;
2980
2981         start_pfn = mm_to_dma_pfn(iova->pfn_lo);
2982         last_pfn = mm_to_dma_pfn(iova->pfn_hi + 1) - 1;
2983
2984         pr_debug("Device %s unmapping: pfn %lx-%lx\n",
2985                  pci_name(pdev), start_pfn, last_pfn);
2986
2987         /*  clear the whole page */
2988         dma_pte_clear_range(domain, start_pfn, last_pfn);
2989
2990         /* free page tables */
2991         dma_pte_free_pagetable(domain, start_pfn, last_pfn);
2992
2993         if (intel_iommu_strict) {
2994                 iommu_flush_iotlb_psi(iommu, domain->id, start_pfn,
2995                                       last_pfn - start_pfn + 1, 0);
2996                 /* free iova */
2997                 __free_iova(&domain->iovad, iova);
2998         } else {
2999                 add_unmap(domain, iova);
3000                 /*
3001                  * queue up the release of the unmap to save the 1/6th of the
3002                  * cpu used up by the iotlb flush operation...
3003                  */
3004         }
3005 }
3006
3007 static void *intel_alloc_coherent(struct device *hwdev, size_t size,
3008                                   dma_addr_t *dma_handle, gfp_t flags,
3009                                   struct dma_attrs *attrs)
3010 {
3011         void *vaddr;
3012         int order;
3013
3014         size = PAGE_ALIGN(size);
3015         order = get_order(size);
3016
3017         if (!iommu_no_mapping(hwdev))
3018                 flags &= ~(GFP_DMA | GFP_DMA32);
3019         else if (hwdev->coherent_dma_mask < dma_get_required_mask(hwdev)) {
3020                 if (hwdev->coherent_dma_mask < DMA_BIT_MASK(32))
3021                         flags |= GFP_DMA;
3022                 else
3023                         flags |= GFP_DMA32;
3024         }
3025
3026         vaddr = (void *)__get_free_pages(flags, order);
3027         if (!vaddr)
3028                 return NULL;
3029         memset(vaddr, 0, size);
3030
3031         *dma_handle = __intel_map_single(hwdev, virt_to_bus(vaddr), size,
3032                                          DMA_BIDIRECTIONAL,
3033                                          hwdev->coherent_dma_mask);
3034         if (*dma_handle)
3035                 return vaddr;
3036         free_pages((unsigned long)vaddr, order);
3037         return NULL;
3038 }
3039
3040 static void intel_free_coherent(struct device *hwdev, size_t size, void *vaddr,
3041                                 dma_addr_t dma_handle, struct dma_attrs *attrs)
3042 {
3043         int order;
3044
3045         size = PAGE_ALIGN(size);
3046         order = get_order(size);
3047
3048         intel_unmap_page(hwdev, dma_handle, size, DMA_BIDIRECTIONAL, NULL);
3049         free_pages((unsigned long)vaddr, order);
3050 }
3051
3052 static void intel_unmap_sg(struct device *hwdev, struct scatterlist *sglist,
3053                            int nelems, enum dma_data_direction dir,
3054                            struct dma_attrs *attrs)
3055 {
3056         struct pci_dev *pdev = to_pci_dev(hwdev);
3057         struct dmar_domain *domain;
3058         unsigned long start_pfn, last_pfn;
3059         struct iova *iova;
3060         struct intel_iommu *iommu;
3061
3062         if (iommu_no_mapping(hwdev))
3063                 return;
3064
3065         domain = find_domain(pdev);
3066         BUG_ON(!domain);
3067
3068         iommu = domain_get_iommu(domain);
3069
3070         iova = find_iova(&domain->iovad, IOVA_PFN(sglist[0].dma_address));
3071         if (WARN_ONCE(!iova, "Driver unmaps unmatched sglist at PFN %llx\n",
3072                       (unsigned long long)sglist[0].dma_address))
3073                 return;
3074
3075         start_pfn = mm_to_dma_pfn(iova->pfn_lo);
3076         last_pfn = mm_to_dma_pfn(iova->pfn_hi + 1) - 1;
3077
3078         /*  clear the whole page */
3079         dma_pte_clear_range(domain, start_pfn, last_pfn);
3080
3081         /* free page tables */
3082         dma_pte_free_pagetable(domain, start_pfn, last_pfn);
3083
3084         if (intel_iommu_strict) {
3085                 iommu_flush_iotlb_psi(iommu, domain->id, start_pfn,
3086                                       last_pfn - start_pfn + 1, 0);
3087                 /* free iova */
3088                 __free_iova(&domain->iovad, iova);
3089         } else {
3090                 add_unmap(domain, iova);
3091                 /*
3092                  * queue up the release of the unmap to save the 1/6th of the
3093                  * cpu used up by the iotlb flush operation...
3094                  */
3095         }
3096 }
3097
3098 static int intel_nontranslate_map_sg(struct device *hddev,
3099         struct scatterlist *sglist, int nelems, int dir)
3100 {
3101         int i;
3102         struct scatterlist *sg;
3103
3104         for_each_sg(sglist, sg, nelems, i) {
3105                 BUG_ON(!sg_page(sg));
3106                 sg->dma_address = page_to_phys(sg_page(sg)) + sg->offset;
3107                 sg->dma_length = sg->length;
3108         }
3109         return nelems;
3110 }
3111
3112 static int intel_map_sg(struct device *hwdev, struct scatterlist *sglist, int nelems,
3113                         enum dma_data_direction dir, struct dma_attrs *attrs)
3114 {
3115         int i;
3116         struct pci_dev *pdev = to_pci_dev(hwdev);
3117         struct dmar_domain *domain;
3118         size_t size = 0;
3119         int prot = 0;
3120         struct iova *iova = NULL;
3121         int ret;
3122         struct scatterlist *sg;
3123         unsigned long start_vpfn;
3124         struct intel_iommu *iommu;
3125
3126         BUG_ON(dir == DMA_NONE);
3127         if (iommu_no_mapping(hwdev))
3128                 return intel_nontranslate_map_sg(hwdev, sglist, nelems, dir);
3129
3130         domain = get_valid_domain_for_dev(pdev);
3131         if (!domain)
3132                 return 0;
3133
3134         iommu = domain_get_iommu(domain);
3135
3136         for_each_sg(sglist, sg, nelems, i)
3137                 size += aligned_nrpages(sg->offset, sg->length);
3138
3139         iova = intel_alloc_iova(hwdev, domain, dma_to_mm_pfn(size),
3140                                 pdev->dma_mask);
3141         if (!iova) {
3142                 sglist->dma_length = 0;
3143                 return 0;
3144         }
3145
3146         /*
3147          * Check if DMAR supports zero-length reads on write only
3148          * mappings..
3149          */
3150         if (dir == DMA_TO_DEVICE || dir == DMA_BIDIRECTIONAL || \
3151                         !cap_zlr(iommu->cap))
3152                 prot |= DMA_PTE_READ;
3153         if (dir == DMA_FROM_DEVICE || dir == DMA_BIDIRECTIONAL)
3154                 prot |= DMA_PTE_WRITE;
3155
3156         start_vpfn = mm_to_dma_pfn(iova->pfn_lo);
3157
3158         ret = domain_sg_mapping(domain, start_vpfn, sglist, size, prot);
3159         if (unlikely(ret)) {
3160                 /*  clear the page */
3161                 dma_pte_clear_range(domain, start_vpfn,
3162                                     start_vpfn + size - 1);
3163                 /* free page tables */
3164                 dma_pte_free_pagetable(domain, start_vpfn,
3165                                        start_vpfn + size - 1);
3166                 /* free iova */
3167                 __free_iova(&domain->iovad, iova);
3168                 return 0;
3169         }
3170
3171         /* it's a non-present to present mapping. Only flush if caching mode */
3172         if (cap_caching_mode(iommu->cap))
3173                 iommu_flush_iotlb_psi(iommu, domain->id, start_vpfn, size, 1);
3174         else
3175                 iommu_flush_write_buffer(iommu);
3176
3177         return nelems;
3178 }
3179
3180 static int intel_mapping_error(struct device *dev, dma_addr_t dma_addr)
3181 {
3182         return !dma_addr;
3183 }
3184
3185 struct dma_map_ops intel_dma_ops = {
3186         .alloc = intel_alloc_coherent,
3187         .free = intel_free_coherent,
3188         .map_sg = intel_map_sg,
3189         .unmap_sg = intel_unmap_sg,
3190         .map_page = intel_map_page,
3191         .unmap_page = intel_unmap_page,
3192         .mapping_error = intel_mapping_error,
3193 };
3194
3195 static inline int iommu_domain_cache_init(void)
3196 {
3197         int ret = 0;
3198
3199         iommu_domain_cache = kmem_cache_create("iommu_domain",
3200                                          sizeof(struct dmar_domain),
3201                                          0,
3202                                          SLAB_HWCACHE_ALIGN,
3203
3204                                          NULL);
3205         if (!iommu_domain_cache) {
3206                 printk(KERN_ERR "Couldn't create iommu_domain cache\n");
3207                 ret = -ENOMEM;
3208         }
3209
3210         return ret;
3211 }
3212
3213 static inline int iommu_devinfo_cache_init(void)
3214 {
3215         int ret = 0;
3216
3217         iommu_devinfo_cache = kmem_cache_create("iommu_devinfo",
3218                                          sizeof(struct device_domain_info),
3219                                          0,
3220                                          SLAB_HWCACHE_ALIGN,
3221                                          NULL);
3222         if (!iommu_devinfo_cache) {
3223                 printk(KERN_ERR "Couldn't create devinfo cache\n");
3224                 ret = -ENOMEM;
3225         }
3226
3227         return ret;
3228 }
3229
3230 static inline int iommu_iova_cache_init(void)
3231 {
3232         int ret = 0;
3233
3234         iommu_iova_cache = kmem_cache_create("iommu_iova",
3235                                          sizeof(struct iova),
3236                                          0,
3237                                          SLAB_HWCACHE_ALIGN,
3238                                          NULL);
3239         if (!iommu_iova_cache) {
3240                 printk(KERN_ERR "Couldn't create iova cache\n");
3241                 ret = -ENOMEM;
3242         }
3243
3244         return ret;
3245 }
3246
3247 static int __init iommu_init_mempool(void)
3248 {
3249         int ret;
3250         ret = iommu_iova_cache_init();
3251         if (ret)
3252                 return ret;
3253
3254         ret = iommu_domain_cache_init();
3255         if (ret)
3256                 goto domain_error;
3257
3258         ret = iommu_devinfo_cache_init();
3259         if (!ret)
3260                 return ret;
3261
3262         kmem_cache_destroy(iommu_domain_cache);
3263 domain_error:
3264         kmem_cache_destroy(iommu_iova_cache);
3265
3266         return -ENOMEM;
3267 }
3268
3269 static void __init iommu_exit_mempool(void)
3270 {
3271         kmem_cache_destroy(iommu_devinfo_cache);
3272         kmem_cache_destroy(iommu_domain_cache);
3273         kmem_cache_destroy(iommu_iova_cache);
3274
3275 }
3276
3277 static void quirk_ioat_snb_local_iommu(struct pci_dev *pdev)
3278 {
3279         struct dmar_drhd_unit *drhd;
3280         u32 vtbar;
3281         int rc;
3282
3283         /* We know that this device on this chipset has its own IOMMU.
3284          * If we find it under a different IOMMU, then the BIOS is lying
3285          * to us. Hope that the IOMMU for this device is actually
3286          * disabled, and it needs no translation...
3287          */
3288         rc = pci_bus_read_config_dword(pdev->bus, PCI_DEVFN(0, 0), 0xb0, &vtbar);
3289         if (rc) {
3290                 /* "can't" happen */
3291                 dev_info(&pdev->dev, "failed to run vt-d quirk\n");
3292                 return;
3293         }
3294         vtbar &= 0xffff0000;
3295
3296         /* we know that the this iommu should be at offset 0xa000 from vtbar */
3297         drhd = dmar_find_matched_drhd_unit(pdev);
3298         if (WARN_TAINT_ONCE(!drhd || drhd->reg_base_addr - vtbar != 0xa000,
3299                             TAINT_FIRMWARE_WORKAROUND,
3300                             "BIOS assigned incorrect VT-d unit for Intel(R) QuickData Technology device\n"))
3301                 pdev->dev.archdata.iommu = DUMMY_DEVICE_DOMAIN_INFO;
3302 }
3303 DECLARE_PCI_FIXUP_ENABLE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_IOAT_SNB, quirk_ioat_snb_local_iommu);
3304
3305 static void __init init_no_remapping_devices(void)
3306 {
3307         struct dmar_drhd_unit *drhd;
3308
3309         for_each_drhd_unit(drhd) {
3310                 if (!drhd->include_all) {
3311                         int i;
3312                         for (i = 0; i < drhd->devices_cnt; i++)
3313                                 if (drhd->devices[i] != NULL)
3314                                         break;
3315                         /* ignore DMAR unit if no pci devices exist */
3316                         if (i == drhd->devices_cnt)
3317                                 drhd->ignored = 1;
3318                 }
3319         }
3320
3321         for_each_drhd_unit(drhd) {
3322                 int i;
3323                 if (drhd->ignored || drhd->include_all)
3324                         continue;
3325
3326                 for (i = 0; i < drhd->devices_cnt; i++)
3327                         if (drhd->devices[i] &&
3328                             !IS_GFX_DEVICE(drhd->devices[i]))
3329                                 break;
3330
3331                 if (i < drhd->devices_cnt)
3332                         continue;
3333
3334                 /* This IOMMU has *only* gfx devices. Either bypass it or
3335                    set the gfx_mapped flag, as appropriate */
3336                 if (dmar_map_gfx) {
3337                         intel_iommu_gfx_mapped = 1;
3338                 } else {
3339                         drhd->ignored = 1;
3340                         for (i = 0; i < drhd->devices_cnt; i++) {
3341                                 if (!drhd->devices[i])
3342                                         continue;
3343                                 drhd->devices[i]->dev.archdata.iommu = DUMMY_DEVICE_DOMAIN_INFO;
3344                         }
3345                 }
3346         }
3347 }
3348
3349 #ifdef CONFIG_SUSPEND
3350 static int init_iommu_hw(void)
3351 {
3352         struct dmar_drhd_unit *drhd;
3353         struct intel_iommu *iommu = NULL;
3354
3355         for_each_active_iommu(iommu, drhd)
3356                 if (iommu->qi)
3357                         dmar_reenable_qi(iommu);
3358
3359         for_each_iommu(iommu, drhd) {
3360                 if (drhd->ignored) {
3361                         /*
3362                          * we always have to disable PMRs or DMA may fail on
3363                          * this device
3364                          */
3365                         if (force_on)
3366                                 iommu_disable_protect_mem_regions(iommu);
3367                         continue;
3368                 }
3369         
3370                 iommu_flush_write_buffer(iommu);
3371
3372                 iommu_set_root_entry(iommu);
3373
3374                 iommu->flush.flush_context(iommu, 0, 0, 0,
3375                                            DMA_CCMD_GLOBAL_INVL);
3376                 iommu->flush.flush_iotlb(iommu, 0, 0, 0,
3377                                          DMA_TLB_GLOBAL_FLUSH);
3378                 if (iommu_enable_translation(iommu))
3379                         return 1;
3380                 iommu_disable_protect_mem_regions(iommu);
3381         }
3382
3383         return 0;
3384 }
3385
3386 static void iommu_flush_all(void)
3387 {
3388         struct dmar_drhd_unit *drhd;
3389         struct intel_iommu *iommu;
3390
3391         for_each_active_iommu(iommu, drhd) {
3392                 iommu->flush.flush_context(iommu, 0, 0, 0,
3393                                            DMA_CCMD_GLOBAL_INVL);
3394                 iommu->flush.flush_iotlb(iommu, 0, 0, 0,
3395                                          DMA_TLB_GLOBAL_FLUSH);
3396         }
3397 }
3398
3399 static int iommu_suspend(void)
3400 {
3401         struct dmar_drhd_unit *drhd;
3402         struct intel_iommu *iommu = NULL;
3403         unsigned long flag;
3404
3405         for_each_active_iommu(iommu, drhd) {
3406                 iommu->iommu_state = kzalloc(sizeof(u32) * MAX_SR_DMAR_REGS,
3407                                                  GFP_ATOMIC);
3408                 if (!iommu->iommu_state)
3409                         goto nomem;
3410         }
3411
3412         iommu_flush_all();
3413
3414         for_each_active_iommu(iommu, drhd) {
3415                 iommu_disable_translation(iommu);
3416
3417                 raw_spin_lock_irqsave(&iommu->register_lock, flag);
3418
3419                 iommu->iommu_state[SR_DMAR_FECTL_REG] =
3420                         readl(iommu->reg + DMAR_FECTL_REG);
3421                 iommu->iommu_state[SR_DMAR_FEDATA_REG] =
3422                         readl(iommu->reg + DMAR_FEDATA_REG);
3423                 iommu->iommu_state[SR_DMAR_FEADDR_REG] =
3424                         readl(iommu->reg + DMAR_FEADDR_REG);
3425                 iommu->iommu_state[SR_DMAR_FEUADDR_REG] =
3426                         readl(iommu->reg + DMAR_FEUADDR_REG);
3427
3428                 raw_spin_unlock_irqrestore(&iommu->register_lock, flag);
3429         }
3430         return 0;
3431
3432 nomem:
3433         for_each_active_iommu(iommu, drhd)
3434                 kfree(iommu->iommu_state);
3435
3436         return -ENOMEM;
3437 }
3438
3439 static void iommu_resume(void)
3440 {
3441         struct dmar_drhd_unit *drhd;
3442         struct intel_iommu *iommu = NULL;
3443         unsigned long flag;
3444
3445         if (init_iommu_hw()) {
3446                 if (force_on)
3447                         panic("tboot: IOMMU setup failed, DMAR can not resume!\n");
3448                 else
3449                         WARN(1, "IOMMU setup failed, DMAR can not resume!\n");
3450                 return;
3451         }
3452
3453         for_each_active_iommu(iommu, drhd) {
3454
3455                 raw_spin_lock_irqsave(&iommu->register_lock, flag);
3456
3457                 writel(iommu->iommu_state[SR_DMAR_FECTL_REG],
3458                         iommu->reg + DMAR_FECTL_REG);
3459                 writel(iommu->iommu_state[SR_DMAR_FEDATA_REG],
3460                         iommu->reg + DMAR_FEDATA_REG);
3461                 writel(iommu->iommu_state[SR_DMAR_FEADDR_REG],
3462                         iommu->reg + DMAR_FEADDR_REG);
3463                 writel(iommu->iommu_state[SR_DMAR_FEUADDR_REG],
3464                         iommu->reg + DMAR_FEUADDR_REG);
3465
3466                 raw_spin_unlock_irqrestore(&iommu->register_lock, flag);
3467         }
3468
3469         for_each_active_iommu(iommu, drhd)
3470                 kfree(iommu->iommu_state);
3471 }
3472
3473 static struct syscore_ops iommu_syscore_ops = {
3474         .resume         = iommu_resume,
3475         .suspend        = iommu_suspend,
3476 };
3477
3478 static void __init init_iommu_pm_ops(void)
3479 {
3480         register_syscore_ops(&iommu_syscore_ops);
3481 }
3482
3483 #else
3484 static inline void init_iommu_pm_ops(void) {}
3485 #endif  /* CONFIG_PM */
3486
3487 LIST_HEAD(dmar_rmrr_units);
3488
3489 static void __init dmar_register_rmrr_unit(struct dmar_rmrr_unit *rmrr)
3490 {
3491         list_add(&rmrr->list, &dmar_rmrr_units);
3492 }
3493
3494
3495 int __init dmar_parse_one_rmrr(struct acpi_dmar_header *header)
3496 {
3497         struct acpi_dmar_reserved_memory *rmrr;
3498         struct dmar_rmrr_unit *rmrru;
3499
3500         rmrru = kzalloc(sizeof(*rmrru), GFP_KERNEL);
3501         if (!rmrru)
3502                 return -ENOMEM;
3503
3504         rmrru->hdr = header;
3505         rmrr = (struct acpi_dmar_reserved_memory *)header;
3506         rmrru->base_address = rmrr->base_address;
3507         rmrru->end_address = rmrr->end_address;
3508
3509         dmar_register_rmrr_unit(rmrru);
3510         return 0;
3511 }
3512
3513 static int __init
3514 rmrr_parse_dev(struct dmar_rmrr_unit *rmrru)
3515 {
3516         struct acpi_dmar_reserved_memory *rmrr;
3517         int ret;
3518
3519         rmrr = (struct acpi_dmar_reserved_memory *) rmrru->hdr;
3520         ret = dmar_parse_dev_scope((void *)(rmrr + 1),
3521                 ((void *)rmrr) + rmrr->header.length,
3522                 &rmrru->devices_cnt, &rmrru->devices, rmrr->segment);
3523
3524         if (ret || (rmrru->devices_cnt == 0)) {
3525                 list_del(&rmrru->list);
3526                 kfree(rmrru);
3527         }
3528         return ret;
3529 }
3530
3531 static LIST_HEAD(dmar_atsr_units);
3532
3533 int __init dmar_parse_one_atsr(struct acpi_dmar_header *hdr)
3534 {
3535         struct acpi_dmar_atsr *atsr;
3536         struct dmar_atsr_unit *atsru;
3537
3538         atsr = container_of(hdr, struct acpi_dmar_atsr, header);
3539         atsru = kzalloc(sizeof(*atsru), GFP_KERNEL);
3540         if (!atsru)
3541                 return -ENOMEM;
3542
3543         atsru->hdr = hdr;
3544         atsru->include_all = atsr->flags & 0x1;
3545
3546         list_add(&atsru->list, &dmar_atsr_units);
3547
3548         return 0;
3549 }
3550
3551 static int __init atsr_parse_dev(struct dmar_atsr_unit *atsru)
3552 {
3553         int rc;
3554         struct acpi_dmar_atsr *atsr;
3555
3556         if (atsru->include_all)
3557                 return 0;
3558
3559         atsr = container_of(atsru->hdr, struct acpi_dmar_atsr, header);
3560         rc = dmar_parse_dev_scope((void *)(atsr + 1),
3561                                 (void *)atsr + atsr->header.length,
3562                                 &atsru->devices_cnt, &atsru->devices,
3563                                 atsr->segment);
3564         if (rc || !atsru->devices_cnt) {
3565                 list_del(&atsru->list);
3566                 kfree(atsru);
3567         }
3568
3569         return rc;
3570 }
3571
3572 int dmar_find_matched_atsr_unit(struct pci_dev *dev)
3573 {
3574         int i;
3575         struct pci_bus *bus;
3576         struct acpi_dmar_atsr *atsr;
3577         struct dmar_atsr_unit *atsru;
3578
3579         dev = pci_physfn(dev);
3580
3581         list_for_each_entry(atsru, &dmar_atsr_units, list) {
3582                 atsr = container_of(atsru->hdr, struct acpi_dmar_atsr, header);
3583                 if (atsr->segment == pci_domain_nr(dev->bus))
3584                         goto found;
3585         }
3586
3587         return 0;
3588
3589 found:
3590         for (bus = dev->bus; bus; bus = bus->parent) {
3591                 struct pci_dev *bridge = bus->self;
3592
3593                 if (!bridge || !pci_is_pcie(bridge) ||
3594                     pci_pcie_type(bridge) == PCI_EXP_TYPE_PCI_BRIDGE)
3595                         return 0;
3596
3597                 if (pci_pcie_type(bridge) == PCI_EXP_TYPE_ROOT_PORT) {
3598                         for (i = 0; i < atsru->devices_cnt; i++)
3599                                 if (atsru->devices[i] == bridge)
3600                                         return 1;
3601                         break;
3602                 }
3603         }
3604
3605         if (atsru->include_all)
3606                 return 1;
3607
3608         return 0;
3609 }
3610
3611 int __init dmar_parse_rmrr_atsr_dev(void)
3612 {
3613         struct dmar_rmrr_unit *rmrr, *rmrr_n;
3614         struct dmar_atsr_unit *atsr, *atsr_n;
3615         int ret = 0;
3616
3617         list_for_each_entry_safe(rmrr, rmrr_n, &dmar_rmrr_units, list) {
3618                 ret = rmrr_parse_dev(rmrr);
3619                 if (ret)
3620                         return ret;
3621         }
3622
3623         list_for_each_entry_safe(atsr, atsr_n, &dmar_atsr_units, list) {
3624                 ret = atsr_parse_dev(atsr);
3625                 if (ret)
3626                         return ret;
3627         }
3628
3629         return ret;
3630 }
3631
3632 /*
3633  * Here we only respond to action of unbound device from driver.
3634  *
3635  * Added device is not attached to its DMAR domain here yet. That will happen
3636  * when mapping the device to iova.
3637  */
3638 static int device_notifier(struct notifier_block *nb,
3639                                   unsigned long action, void *data)
3640 {
3641         struct device *dev = data;
3642         struct pci_dev *pdev = to_pci_dev(dev);
3643         struct dmar_domain *domain;
3644
3645         if (iommu_no_mapping(dev))
3646                 return 0;
3647
3648         domain = find_domain(pdev);
3649         if (!domain)
3650                 return 0;
3651
3652         if (action == BUS_NOTIFY_UNBOUND_DRIVER && !iommu_pass_through) {
3653                 domain_remove_one_dev_info(domain, pdev);
3654
3655                 if (!(domain->flags & DOMAIN_FLAG_VIRTUAL_MACHINE) &&
3656                     !(domain->flags & DOMAIN_FLAG_STATIC_IDENTITY) &&
3657                     list_empty(&domain->devices))
3658                         domain_exit(domain);
3659         }
3660
3661         return 0;
3662 }
3663
3664 static struct notifier_block device_nb = {
3665         .notifier_call = device_notifier,
3666 };
3667
3668 int __init intel_iommu_init(void)
3669 {
3670         int ret = 0;
3671         struct dmar_drhd_unit *drhd;
3672
3673         /* VT-d is required for a TXT/tboot launch, so enforce that */
3674         force_on = tboot_force_iommu();
3675
3676         if (dmar_table_init()) {
3677                 if (force_on)
3678                         panic("tboot: Failed to initialize DMAR table\n");
3679                 return  -ENODEV;
3680         }
3681
3682         /*
3683          * Disable translation if already enabled prior to OS handover.
3684          */
3685         for_each_drhd_unit(drhd) {
3686                 struct intel_iommu *iommu;
3687
3688                 if (drhd->ignored)
3689                         continue;
3690
3691                 iommu = drhd->iommu;
3692                 if (iommu->gcmd & DMA_GCMD_TE)
3693                         iommu_disable_translation(iommu);
3694         }
3695
3696         if (dmar_dev_scope_init() < 0) {
3697                 if (force_on)
3698                         panic("tboot: Failed to initialize DMAR device scope\n");
3699                 return  -ENODEV;
3700         }
3701
3702         if (no_iommu || dmar_disabled)
3703                 return -ENODEV;
3704
3705         if (iommu_init_mempool()) {
3706                 if (force_on)
3707                         panic("tboot: Failed to initialize iommu memory\n");
3708                 return  -ENODEV;
3709         }
3710
3711         if (list_empty(&dmar_rmrr_units))
3712                 printk(KERN_INFO "DMAR: No RMRR found\n");
3713
3714         if (list_empty(&dmar_atsr_units))
3715                 printk(KERN_INFO "DMAR: No ATSR found\n");
3716
3717         if (dmar_init_reserved_ranges()) {
3718                 if (force_on)
3719                         panic("tboot: Failed to reserve iommu ranges\n");
3720                 return  -ENODEV;
3721         }
3722
3723         init_no_remapping_devices();
3724
3725         ret = init_dmars();
3726         if (ret) {
3727                 if (force_on)
3728                         panic("tboot: Failed to initialize DMARs\n");
3729                 printk(KERN_ERR "IOMMU: dmar init failed\n");
3730                 put_iova_domain(&reserved_iova_list);
3731                 iommu_exit_mempool();
3732                 return ret;
3733         }
3734         printk(KERN_INFO
3735         "PCI-DMA: Intel(R) Virtualization Technology for Directed I/O\n");
3736
3737         init_timer(&unmap_timer);
3738 #ifdef CONFIG_SWIOTLB
3739         swiotlb = 0;
3740 #endif
3741         dma_ops = &intel_dma_ops;
3742
3743         init_iommu_pm_ops();
3744
3745         bus_set_iommu(&pci_bus_type, &intel_iommu_ops);
3746
3747         bus_register_notifier(&pci_bus_type, &device_nb);
3748
3749         intel_iommu_enabled = 1;
3750
3751         return 0;
3752 }
3753
3754 static void iommu_detach_dependent_devices(struct intel_iommu *iommu,
3755                                            struct pci_dev *pdev)
3756 {
3757         struct pci_dev *tmp, *parent;
3758
3759         if (!iommu || !pdev)
3760                 return;
3761
3762         /* dependent device detach */
3763         tmp = pci_find_upstream_pcie_bridge(pdev);
3764         /* Secondary interface's bus number and devfn 0 */
3765         if (tmp) {
3766                 parent = pdev->bus->self;
3767                 while (parent != tmp) {
3768                         iommu_detach_dev(iommu, parent->bus->number,
3769                                          parent->devfn);
3770                         parent = parent->bus->self;
3771                 }
3772                 if (pci_is_pcie(tmp)) /* this is a PCIe-to-PCI bridge */
3773                         iommu_detach_dev(iommu,
3774                                 tmp->subordinate->number, 0);
3775                 else /* this is a legacy PCI bridge */
3776                         iommu_detach_dev(iommu, tmp->bus->number,
3777                                          tmp->devfn);
3778         }
3779 }
3780
3781 static void domain_remove_one_dev_info(struct dmar_domain *domain,
3782                                           struct pci_dev *pdev)
3783 {
3784         struct device_domain_info *info;
3785         struct intel_iommu *iommu;
3786         unsigned long flags;
3787         int found = 0;
3788         struct list_head *entry, *tmp;
3789
3790         iommu = device_to_iommu(pci_domain_nr(pdev->bus), pdev->bus->number,
3791                                 pdev->devfn);
3792         if (!iommu)
3793                 return;
3794
3795         spin_lock_irqsave(&device_domain_lock, flags);
3796         list_for_each_safe(entry, tmp, &domain->devices) {
3797                 info = list_entry(entry, struct device_domain_info, link);
3798                 if (info->segment == pci_domain_nr(pdev->bus) &&
3799                     info->bus == pdev->bus->number &&
3800                     info->devfn == pdev->devfn) {
3801                         unlink_domain_info(info);
3802                         spin_unlock_irqrestore(&device_domain_lock, flags);
3803
3804                         iommu_disable_dev_iotlb(info);
3805                         iommu_detach_dev(iommu, info->bus, info->devfn);
3806                         iommu_detach_dependent_devices(iommu, pdev);
3807                         free_devinfo_mem(info);
3808
3809                         spin_lock_irqsave(&device_domain_lock, flags);
3810
3811                         if (found)
3812                                 break;
3813                         else
3814                                 continue;
3815                 }
3816
3817                 /* if there is no other devices under the same iommu
3818                  * owned by this domain, clear this iommu in iommu_bmp
3819                  * update iommu count and coherency
3820                  */
3821                 if (iommu == device_to_iommu(info->segment, info->bus,
3822                                             info->devfn))
3823                         found = 1;
3824         }
3825
3826         spin_unlock_irqrestore(&device_domain_lock, flags);
3827
3828         if (found == 0) {
3829                 unsigned long tmp_flags;
3830                 spin_lock_irqsave(&domain->iommu_lock, tmp_flags);
3831                 clear_bit(iommu->seq_id, domain->iommu_bmp);
3832                 domain->iommu_count--;
3833                 domain_update_iommu_cap(domain);
3834                 spin_unlock_irqrestore(&domain->iommu_lock, tmp_flags);
3835
3836                 if (!(domain->flags & DOMAIN_FLAG_VIRTUAL_MACHINE) &&
3837                     !(domain->flags & DOMAIN_FLAG_STATIC_IDENTITY)) {
3838                         spin_lock_irqsave(&iommu->lock, tmp_flags);
3839                         clear_bit(domain->id, iommu->domain_ids);
3840                         iommu->domains[domain->id] = NULL;
3841                         spin_unlock_irqrestore(&iommu->lock, tmp_flags);
3842                 }
3843         }
3844 }
3845
3846 static void vm_domain_remove_all_dev_info(struct dmar_domain *domain)
3847 {
3848         struct device_domain_info *info;
3849         struct intel_iommu *iommu;
3850         unsigned long flags1, flags2;
3851
3852         spin_lock_irqsave(&device_domain_lock, flags1);
3853         while (!list_empty(&domain->devices)) {
3854                 info = list_entry(domain->devices.next,
3855                         struct device_domain_info, link);
3856                 unlink_domain_info(info);
3857                 spin_unlock_irqrestore(&device_domain_lock, flags1);
3858
3859                 iommu_disable_dev_iotlb(info);
3860                 iommu = device_to_iommu(info->segment, info->bus, info->devfn);
3861                 iommu_detach_dev(iommu, info->bus, info->devfn);
3862                 iommu_detach_dependent_devices(iommu, info->dev);
3863
3864                 /* clear this iommu in iommu_bmp, update iommu count
3865                  * and capabilities
3866                  */
3867                 spin_lock_irqsave(&domain->iommu_lock, flags2);
3868                 if (test_and_clear_bit(iommu->seq_id,
3869                                        domain->iommu_bmp)) {
3870                         domain->iommu_count--;
3871                         domain_update_iommu_cap(domain);
3872                 }
3873                 spin_unlock_irqrestore(&domain->iommu_lock, flags2);
3874
3875                 free_devinfo_mem(info);
3876                 spin_lock_irqsave(&device_domain_lock, flags1);
3877         }
3878         spin_unlock_irqrestore(&device_domain_lock, flags1);
3879 }
3880
3881 /* domain id for virtual machine, it won't be set in context */
3882 static unsigned long vm_domid;
3883
3884 static struct dmar_domain *iommu_alloc_vm_domain(void)
3885 {
3886         struct dmar_domain *domain;
3887
3888         domain = alloc_domain_mem();
3889         if (!domain)
3890                 return NULL;
3891
3892         domain->id = vm_domid++;
3893         domain->nid = -1;
3894         memset(domain->iommu_bmp, 0, sizeof(domain->iommu_bmp));
3895         domain->flags = DOMAIN_FLAG_VIRTUAL_MACHINE;
3896
3897         return domain;
3898 }
3899
3900 static int md_domain_init(struct dmar_domain *domain, int guest_width)
3901 {
3902         int adjust_width;
3903
3904         init_iova_domain(&domain->iovad, DMA_32BIT_PFN);
3905         spin_lock_init(&domain->iommu_lock);
3906
3907         domain_reserve_special_ranges(domain);
3908
3909         /* calculate AGAW */
3910         domain->gaw = guest_width;
3911         adjust_width = guestwidth_to_adjustwidth(guest_width);
3912         domain->agaw = width_to_agaw(adjust_width);
3913
3914         INIT_LIST_HEAD(&domain->devices);
3915
3916         domain->iommu_count = 0;
3917         domain->iommu_coherency = 0;
3918         domain->iommu_snooping = 0;
3919         domain->iommu_superpage = 0;
3920         domain->max_addr = 0;
3921         domain->nid = -1;
3922
3923         /* always allocate the top pgd */
3924         domain->pgd = (struct dma_pte *)alloc_pgtable_page(domain->nid);
3925         if (!domain->pgd)
3926                 return -ENOMEM;
3927         domain_flush_cache(domain, domain->pgd, PAGE_SIZE);
3928         return 0;
3929 }
3930
3931 static void iommu_free_vm_domain(struct dmar_domain *domain)
3932 {
3933         unsigned long flags;
3934         struct dmar_drhd_unit *drhd;
3935         struct intel_iommu *iommu;
3936         unsigned long i;
3937         unsigned long ndomains;
3938
3939         for_each_drhd_unit(drhd) {
3940                 if (drhd->ignored)
3941                         continue;
3942                 iommu = drhd->iommu;
3943
3944                 ndomains = cap_ndoms(iommu->cap);
3945                 for_each_set_bit(i, iommu->domain_ids, ndomains) {
3946                         if (iommu->domains[i] == domain) {
3947                                 spin_lock_irqsave(&iommu->lock, flags);
3948                                 clear_bit(i, iommu->domain_ids);
3949                                 iommu->domains[i] = NULL;
3950                                 spin_unlock_irqrestore(&iommu->lock, flags);
3951                                 break;
3952                         }
3953                 }
3954         }
3955 }
3956
3957 static void vm_domain_exit(struct dmar_domain *domain)
3958 {
3959         /* Domain 0 is reserved, so dont process it */
3960         if (!domain)
3961                 return;
3962
3963         vm_domain_remove_all_dev_info(domain);
3964         /* destroy iovas */
3965         put_iova_domain(&domain->iovad);
3966
3967         /* clear ptes */
3968         dma_pte_clear_range(domain, 0, DOMAIN_MAX_PFN(domain->gaw));
3969
3970         /* free page tables */
3971         dma_pte_free_pagetable(domain, 0, DOMAIN_MAX_PFN(domain->gaw));
3972
3973         iommu_free_vm_domain(domain);
3974         free_domain_mem(domain);
3975 }
3976
3977 static int intel_iommu_domain_init(struct iommu_domain *domain)
3978 {
3979         struct dmar_domain *dmar_domain;
3980
3981         dmar_domain = iommu_alloc_vm_domain();
3982         if (!dmar_domain) {
3983                 printk(KERN_ERR
3984                         "intel_iommu_domain_init: dmar_domain == NULL\n");
3985                 return -ENOMEM;
3986         }
3987         if (md_domain_init(dmar_domain, DEFAULT_DOMAIN_ADDRESS_WIDTH)) {
3988                 printk(KERN_ERR
3989                         "intel_iommu_domain_init() failed\n");
3990                 vm_domain_exit(dmar_domain);
3991                 return -ENOMEM;
3992         }
3993         domain_update_iommu_cap(dmar_domain);
3994         domain->priv = dmar_domain;
3995
3996         domain->geometry.aperture_start = 0;
3997         domain->geometry.aperture_end   = __DOMAIN_MAX_ADDR(dmar_domain->gaw);
3998         domain->geometry.force_aperture = true;
3999
4000         return 0;
4001 }
4002
4003 static void intel_iommu_domain_destroy(struct iommu_domain *domain)
4004 {
4005         struct dmar_domain *dmar_domain = domain->priv;
4006
4007         domain->priv = NULL;
4008         vm_domain_exit(dmar_domain);
4009 }
4010
4011 static int intel_iommu_attach_device(struct iommu_domain *domain,
4012                                      struct device *dev)
4013 {
4014         struct dmar_domain *dmar_domain = domain->priv;
4015         struct pci_dev *pdev = to_pci_dev(dev);
4016         struct intel_iommu *iommu;
4017         int addr_width;
4018
4019         /* normally pdev is not mapped */
4020         if (unlikely(domain_context_mapped(pdev))) {
4021                 struct dmar_domain *old_domain;
4022
4023                 old_domain = find_domain(pdev);
4024                 if (old_domain) {
4025                         if (dmar_domain->flags & DOMAIN_FLAG_VIRTUAL_MACHINE ||
4026                             dmar_domain->flags & DOMAIN_FLAG_STATIC_IDENTITY)
4027                                 domain_remove_one_dev_info(old_domain, pdev);
4028                         else
4029                                 domain_remove_dev_info(old_domain);
4030                 }
4031         }
4032
4033         iommu = device_to_iommu(pci_domain_nr(pdev->bus), pdev->bus->number,
4034                                 pdev->devfn);
4035         if (!iommu)
4036                 return -ENODEV;
4037
4038         /* check if this iommu agaw is sufficient for max mapped address */
4039         addr_width = agaw_to_width(iommu->agaw);
4040         if (addr_width > cap_mgaw(iommu->cap))
4041                 addr_width = cap_mgaw(iommu->cap);
4042
4043         if (dmar_domain->max_addr > (1LL << addr_width)) {
4044                 printk(KERN_ERR "%s: iommu width (%d) is not "
4045                        "sufficient for the mapped address (%llx)\n",
4046                        __func__, addr_width, dmar_domain->max_addr);
4047                 return -EFAULT;
4048         }
4049         dmar_domain->gaw = addr_width;
4050
4051         /*
4052          * Knock out extra levels of page tables if necessary
4053          */
4054         while (iommu->agaw < dmar_domain->agaw) {
4055                 struct dma_pte *pte;
4056
4057                 pte = dmar_domain->pgd;
4058                 if (dma_pte_present(pte)) {
4059                         dmar_domain->pgd = (struct dma_pte *)
4060                                 phys_to_virt(dma_pte_addr(pte));
4061                         free_pgtable_page(pte);
4062                 }
4063                 dmar_domain->agaw--;
4064         }
4065
4066         return domain_add_dev_info(dmar_domain, pdev, CONTEXT_TT_MULTI_LEVEL);
4067 }
4068
4069 static void intel_iommu_detach_device(struct iommu_domain *domain,
4070                                       struct device *dev)
4071 {
4072         struct dmar_domain *dmar_domain = domain->priv;
4073         struct pci_dev *pdev = to_pci_dev(dev);
4074
4075         domain_remove_one_dev_info(dmar_domain, pdev);
4076 }
4077
4078 static int intel_iommu_map(struct iommu_domain *domain,
4079                            unsigned long iova, phys_addr_t hpa,
4080                            size_t size, int iommu_prot)
4081 {
4082         struct dmar_domain *dmar_domain = domain->priv;
4083         u64 max_addr;
4084         int prot = 0;
4085         int ret;
4086
4087         if (iommu_prot & IOMMU_READ)
4088                 prot |= DMA_PTE_READ;
4089         if (iommu_prot & IOMMU_WRITE)
4090                 prot |= DMA_PTE_WRITE;
4091         if ((iommu_prot & IOMMU_CACHE) && dmar_domain->iommu_snooping)
4092                 prot |= DMA_PTE_SNP;
4093
4094         max_addr = iova + size;
4095         if (dmar_domain->max_addr < max_addr) {
4096                 u64 end;
4097
4098                 /* check if minimum agaw is sufficient for mapped address */
4099                 end = __DOMAIN_MAX_ADDR(dmar_domain->gaw) + 1;
4100                 if (end < max_addr) {
4101                         printk(KERN_ERR "%s: iommu width (%d) is not "
4102                                "sufficient for the mapped address (%llx)\n",
4103                                __func__, dmar_domain->gaw, max_addr);
4104                         return -EFAULT;
4105                 }
4106                 dmar_domain->max_addr = max_addr;
4107         }
4108         /* Round up size to next multiple of PAGE_SIZE, if it and
4109            the low bits of hpa would take us onto the next page */
4110         size = aligned_nrpages(hpa, size);
4111         ret = domain_pfn_mapping(dmar_domain, iova >> VTD_PAGE_SHIFT,
4112                                  hpa >> VTD_PAGE_SHIFT, size, prot);
4113         return ret;
4114 }
4115
4116 static size_t intel_iommu_unmap(struct iommu_domain *domain,
4117                              unsigned long iova, size_t size)
4118 {
4119         struct dmar_domain *dmar_domain = domain->priv;
4120         int order;
4121
4122         order = dma_pte_clear_range(dmar_domain, iova >> VTD_PAGE_SHIFT,
4123                             (iova + size - 1) >> VTD_PAGE_SHIFT);
4124
4125         if (dmar_domain->max_addr == iova + size)
4126                 dmar_domain->max_addr = iova;
4127
4128         return PAGE_SIZE << order;
4129 }
4130
4131 static phys_addr_t intel_iommu_iova_to_phys(struct iommu_domain *domain,
4132                                             dma_addr_t iova)
4133 {
4134         struct dmar_domain *dmar_domain = domain->priv;
4135         struct dma_pte *pte;
4136         u64 phys = 0;
4137
4138         pte = pfn_to_dma_pte(dmar_domain, iova >> VTD_PAGE_SHIFT, 0);
4139         if (pte)
4140                 phys = dma_pte_addr(pte);
4141
4142         return phys;
4143 }
4144
4145 static int intel_iommu_domain_has_cap(struct iommu_domain *domain,
4146                                       unsigned long cap)
4147 {
4148         struct dmar_domain *dmar_domain = domain->priv;
4149
4150         if (cap == IOMMU_CAP_CACHE_COHERENCY)
4151                 return dmar_domain->iommu_snooping;
4152         if (cap == IOMMU_CAP_INTR_REMAP)
4153                 return irq_remapping_enabled;
4154
4155         return 0;
4156 }
4157
4158 #define REQ_ACS_FLAGS   (PCI_ACS_SV | PCI_ACS_RR | PCI_ACS_CR | PCI_ACS_UF)
4159
4160 static int intel_iommu_add_device(struct device *dev)
4161 {
4162         struct pci_dev *pdev = to_pci_dev(dev);
4163         struct pci_dev *bridge, *dma_pdev = NULL;
4164         struct iommu_group *group;
4165         int ret;
4166
4167         if (!device_to_iommu(pci_domain_nr(pdev->bus),
4168                              pdev->bus->number, pdev->devfn))
4169                 return -ENODEV;
4170
4171         bridge = pci_find_upstream_pcie_bridge(pdev);
4172         if (bridge) {
4173                 if (pci_is_pcie(bridge))
4174                         dma_pdev = pci_get_domain_bus_and_slot(
4175                                                 pci_domain_nr(pdev->bus),
4176                                                 bridge->subordinate->number, 0);
4177                 if (!dma_pdev)
4178                         dma_pdev = pci_dev_get(bridge);
4179         } else
4180                 dma_pdev = pci_dev_get(pdev);
4181
4182         /* Account for quirked devices */
4183         swap_pci_ref(&dma_pdev, pci_get_dma_source(dma_pdev));
4184
4185         /*
4186          * If it's a multifunction device that does not support our
4187          * required ACS flags, add to the same group as function 0.
4188          */
4189         if (dma_pdev->multifunction &&
4190             !pci_acs_enabled(dma_pdev, REQ_ACS_FLAGS))
4191                 swap_pci_ref(&dma_pdev,
4192                              pci_get_slot(dma_pdev->bus,
4193                                           PCI_DEVFN(PCI_SLOT(dma_pdev->devfn),
4194                                           0)));
4195
4196         /*
4197          * Devices on the root bus go through the iommu.  If that's not us,
4198          * find the next upstream device and test ACS up to the root bus.
4199          * Finding the next device may require skipping virtual buses.
4200          */
4201         while (!pci_is_root_bus(dma_pdev->bus)) {
4202                 struct pci_bus *bus = dma_pdev->bus;
4203
4204                 while (!bus->self) {
4205                         if (!pci_is_root_bus(bus))
4206                                 bus = bus->parent;
4207                         else
4208                                 goto root_bus;
4209                 }
4210
4211                 if (pci_acs_path_enabled(bus->self, NULL, REQ_ACS_FLAGS))
4212                         break;
4213
4214                 swap_pci_ref(&dma_pdev, pci_dev_get(bus->self));
4215         }
4216
4217 root_bus:
4218         group = iommu_group_get(&dma_pdev->dev);
4219         pci_dev_put(dma_pdev);
4220         if (!group) {
4221                 group = iommu_group_alloc();
4222                 if (IS_ERR(group))
4223                         return PTR_ERR(group);
4224         }
4225
4226         ret = iommu_group_add_device(group, dev);
4227
4228         iommu_group_put(group);
4229         return ret;
4230 }
4231
4232 static void intel_iommu_remove_device(struct device *dev)
4233 {
4234         iommu_group_remove_device(dev);
4235 }
4236
4237 static struct iommu_ops intel_iommu_ops = {
4238         .domain_init    = intel_iommu_domain_init,
4239         .domain_destroy = intel_iommu_domain_destroy,
4240         .attach_dev     = intel_iommu_attach_device,
4241         .detach_dev     = intel_iommu_detach_device,
4242         .map            = intel_iommu_map,
4243         .unmap          = intel_iommu_unmap,
4244         .iova_to_phys   = intel_iommu_iova_to_phys,
4245         .domain_has_cap = intel_iommu_domain_has_cap,
4246         .add_device     = intel_iommu_add_device,
4247         .remove_device  = intel_iommu_remove_device,
4248         .pgsize_bitmap  = INTEL_IOMMU_PGSIZES,
4249 };
4250
4251 static void quirk_iommu_g4x_gfx(struct pci_dev *dev)
4252 {
4253         /* G4x/GM45 integrated gfx dmar support is totally busted. */
4254         printk(KERN_INFO "DMAR: Disabling IOMMU for graphics on this chipset\n");
4255         dmar_map_gfx = 0;
4256 }
4257
4258 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2a40, quirk_iommu_g4x_gfx);
4259 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2e00, quirk_iommu_g4x_gfx);
4260 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2e10, quirk_iommu_g4x_gfx);
4261 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2e20, quirk_iommu_g4x_gfx);
4262 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2e30, quirk_iommu_g4x_gfx);
4263 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2e40, quirk_iommu_g4x_gfx);
4264 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2e90, quirk_iommu_g4x_gfx);
4265
4266 static void quirk_iommu_rwbf(struct pci_dev *dev)
4267 {
4268         /*
4269          * Mobile 4 Series Chipset neglects to set RWBF capability,
4270          * but needs it. Same seems to hold for the desktop versions.
4271          */
4272         printk(KERN_INFO "DMAR: Forcing write-buffer flush capability\n");
4273         rwbf_quirk = 1;
4274 }
4275
4276 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2a40, quirk_iommu_rwbf);
4277 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2e00, quirk_iommu_rwbf);
4278 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2e10, quirk_iommu_rwbf);
4279 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2e20, quirk_iommu_rwbf);
4280 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2e30, quirk_iommu_rwbf);
4281 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2e40, quirk_iommu_rwbf);
4282 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2e90, quirk_iommu_rwbf);
4283
4284 #define GGC 0x52
4285 #define GGC_MEMORY_SIZE_MASK    (0xf << 8)
4286 #define GGC_MEMORY_SIZE_NONE    (0x0 << 8)
4287 #define GGC_MEMORY_SIZE_1M      (0x1 << 8)
4288 #define GGC_MEMORY_SIZE_2M      (0x3 << 8)
4289 #define GGC_MEMORY_VT_ENABLED   (0x8 << 8)
4290 #define GGC_MEMORY_SIZE_2M_VT   (0x9 << 8)
4291 #define GGC_MEMORY_SIZE_3M_VT   (0xa << 8)
4292 #define GGC_MEMORY_SIZE_4M_VT   (0xb << 8)
4293
4294 static void quirk_calpella_no_shadow_gtt(struct pci_dev *dev)
4295 {
4296         unsigned short ggc;
4297
4298         if (pci_read_config_word(dev, GGC, &ggc))
4299                 return;
4300
4301         if (!(ggc & GGC_MEMORY_VT_ENABLED)) {
4302                 printk(KERN_INFO "DMAR: BIOS has allocated no shadow GTT; disabling IOMMU for graphics\n");
4303                 dmar_map_gfx = 0;
4304         } else if (dmar_map_gfx) {
4305                 /* we have to ensure the gfx device is idle before we flush */
4306                 printk(KERN_INFO "DMAR: Disabling batched IOTLB flush on Ironlake\n");
4307                 intel_iommu_strict = 1;
4308        }
4309 }
4310 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x0040, quirk_calpella_no_shadow_gtt);
4311 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x0044, quirk_calpella_no_shadow_gtt);
4312 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x0062, quirk_calpella_no_shadow_gtt);
4313 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x006a, quirk_calpella_no_shadow_gtt);
4314
4315 /* On Tylersburg chipsets, some BIOSes have been known to enable the
4316    ISOCH DMAR unit for the Azalia sound device, but not give it any
4317    TLB entries, which causes it to deadlock. Check for that.  We do
4318    this in a function called from init_dmars(), instead of in a PCI
4319    quirk, because we don't want to print the obnoxious "BIOS broken"
4320    message if VT-d is actually disabled.
4321 */
4322 static void __init check_tylersburg_isoch(void)
4323 {
4324         struct pci_dev *pdev;
4325         uint32_t vtisochctrl;
4326
4327         /* If there's no Azalia in the system anyway, forget it. */
4328         pdev = pci_get_device(PCI_VENDOR_ID_INTEL, 0x3a3e, NULL);
4329         if (!pdev)
4330                 return;
4331         pci_dev_put(pdev);
4332
4333         /* System Management Registers. Might be hidden, in which case
4334            we can't do the sanity check. But that's OK, because the
4335            known-broken BIOSes _don't_ actually hide it, so far. */
4336         pdev = pci_get_device(PCI_VENDOR_ID_INTEL, 0x342e, NULL);
4337         if (!pdev)
4338                 return;
4339
4340         if (pci_read_config_dword(pdev, 0x188, &vtisochctrl)) {
4341                 pci_dev_put(pdev);
4342                 return;
4343         }
4344
4345         pci_dev_put(pdev);
4346
4347         /* If Azalia DMA is routed to the non-isoch DMAR unit, fine. */
4348         if (vtisochctrl & 1)
4349                 return;
4350
4351         /* Drop all bits other than the number of TLB entries */
4352         vtisochctrl &= 0x1c;
4353
4354         /* If we have the recommended number of TLB entries (16), fine. */
4355         if (vtisochctrl == 0x10)
4356                 return;
4357
4358         /* Zero TLB entries? You get to ride the short bus to school. */
4359         if (!vtisochctrl) {
4360                 WARN(1, "Your BIOS is broken; DMA routed to ISOCH DMAR unit but no TLB space.\n"
4361                      "BIOS vendor: %s; Ver: %s; Product Version: %s\n",
4362                      dmi_get_system_info(DMI_BIOS_VENDOR),
4363                      dmi_get_system_info(DMI_BIOS_VERSION),
4364                      dmi_get_system_info(DMI_PRODUCT_VERSION));
4365                 iommu_identity_mapping |= IDENTMAP_AZALIA;
4366                 return;
4367         }
4368         
4369         printk(KERN_WARNING "DMAR: Recommended TLB entries for ISOCH unit is 16; your BIOS set %d\n",
4370                vtisochctrl);
4371 }