2 * Dynamic DMA mapping support for AMD Hammer.
4 * Use the integrated AGP GART in the Hammer northbridge as an IOMMU for PCI.
5 * This allows to use PCI devices that only support 32bit addresses on systems
8 * See Documentation/DMA-mapping.txt for the interface specification.
10 * Copyright 2002 Andi Kleen, SuSE Labs.
13 #include <linux/types.h>
14 #include <linux/ctype.h>
15 #include <linux/agp_backend.h>
16 #include <linux/init.h>
18 #include <linux/string.h>
19 #include <linux/spinlock.h>
20 #include <linux/pci.h>
21 #include <linux/module.h>
22 #include <linux/topology.h>
23 #include <linux/interrupt.h>
24 #include <linux/bitops.h>
25 #include <linux/kdebug.h>
26 #include <linux/scatterlist.h>
27 #include <asm/atomic.h>
30 #include <asm/pgtable.h>
31 #include <asm/proto.h>
32 #include <asm/iommu.h>
33 #include <asm/cacheflush.h>
34 #include <asm/swiotlb.h>
38 unsigned long iommu_bus_base; /* GART remapping area (physical) */
39 static unsigned long iommu_size; /* size of remapping area bytes */
40 static unsigned long iommu_pages; /* .. and in pages */
42 u32 *iommu_gatt_base; /* Remapping table */
44 /* If this is disabled the IOMMU will use an optimized flushing strategy
45 of only flushing when an mapping is reused. With it true the GART is flushed
46 for every mapping. Problem is that doing the lazy flush seems to trigger
47 bugs with some popular PCI cards, in particular 3ware (but has been also
48 also seen with Qlogic at least). */
49 int iommu_fullflush = 1;
51 /* Allocation bitmap for the remapping area */
52 static DEFINE_SPINLOCK(iommu_bitmap_lock);
53 static unsigned long *iommu_gart_bitmap; /* guarded by iommu_bitmap_lock */
55 static u32 gart_unmapped_entry;
58 #define GPTE_COHERENT 2
59 #define GPTE_ENCODE(x) \
60 (((x) & 0xfffff000) | (((x) >> 32) << 4) | GPTE_VALID | GPTE_COHERENT)
61 #define GPTE_DECODE(x) (((x) & 0xfffff000) | (((u64)(x) & 0xff0) << 28))
63 #define to_pages(addr,size) \
64 (round_up(((addr) & ~PAGE_MASK) + (size), PAGE_SIZE) >> PAGE_SHIFT)
66 #define EMERGENCY_PAGES 32 /* = 128KB */
69 #define AGPEXTERN extern
74 /* backdoor interface to AGP driver */
75 AGPEXTERN int agp_memory_reserved;
76 AGPEXTERN __u32 *agp_gatt_table;
78 static unsigned long next_bit; /* protected by iommu_bitmap_lock */
79 static int need_flush; /* global flush state. set for each gart wrap */
81 static unsigned long alloc_iommu(int size)
83 unsigned long offset, flags;
85 spin_lock_irqsave(&iommu_bitmap_lock, flags);
86 offset = find_next_zero_string(iommu_gart_bitmap,next_bit,iommu_pages,size);
89 offset = find_next_zero_string(iommu_gart_bitmap,0,iommu_pages,size);
92 set_bit_string(iommu_gart_bitmap, offset, size);
93 next_bit = offset+size;
94 if (next_bit >= iommu_pages) {
101 spin_unlock_irqrestore(&iommu_bitmap_lock, flags);
105 static void free_iommu(unsigned long offset, int size)
108 spin_lock_irqsave(&iommu_bitmap_lock, flags);
109 __clear_bit_string(iommu_gart_bitmap, offset, size);
110 spin_unlock_irqrestore(&iommu_bitmap_lock, flags);
114 * Use global flush state to avoid races with multiple flushers.
116 static void flush_gart(void)
119 spin_lock_irqsave(&iommu_bitmap_lock, flags);
124 spin_unlock_irqrestore(&iommu_bitmap_lock, flags);
127 #ifdef CONFIG_IOMMU_LEAK
129 #define SET_LEAK(x) if (iommu_leak_tab) \
130 iommu_leak_tab[x] = __builtin_return_address(0);
131 #define CLEAR_LEAK(x) if (iommu_leak_tab) \
132 iommu_leak_tab[x] = NULL;
134 /* Debugging aid for drivers that don't free their IOMMU tables */
135 static void **iommu_leak_tab;
136 static int leak_trace;
137 int iommu_leak_pages = 20;
142 if (dump || !iommu_leak_tab) return;
144 show_stack(NULL,NULL);
145 /* Very crude. dump some from the end of the table too */
146 printk("Dumping %d pages from end of IOMMU:\n", iommu_leak_pages);
147 for (i = 0; i < iommu_leak_pages; i+=2) {
148 printk("%lu: ", iommu_pages-i);
149 printk_address((unsigned long) iommu_leak_tab[iommu_pages-i]);
150 printk("%c", (i+1)%2 == 0 ? '\n' : ' ');
156 #define CLEAR_LEAK(x)
159 static void iommu_full(struct device *dev, size_t size, int dir)
162 * Ran out of IOMMU space for this operation. This is very bad.
163 * Unfortunately the drivers cannot handle this operation properly.
164 * Return some non mapped prereserved space in the aperture and
165 * let the Northbridge deal with it. This will result in garbage
166 * in the IO operation. When the size exceeds the prereserved space
167 * memory corruption will occur or random memory will be DMAed
168 * out. Hopefully no network devices use single mappings that big.
172 "PCI-DMA: Out of IOMMU space for %lu bytes at device %s\n",
175 if (size > PAGE_SIZE*EMERGENCY_PAGES) {
176 if (dir == PCI_DMA_FROMDEVICE || dir == PCI_DMA_BIDIRECTIONAL)
177 panic("PCI-DMA: Memory would be corrupted\n");
178 if (dir == PCI_DMA_TODEVICE || dir == PCI_DMA_BIDIRECTIONAL)
179 panic(KERN_ERR "PCI-DMA: Random memory would be DMAed\n");
182 #ifdef CONFIG_IOMMU_LEAK
187 static inline int need_iommu(struct device *dev, unsigned long addr, size_t size)
189 u64 mask = *dev->dma_mask;
190 int high = addr + size > mask;
197 static inline int nonforced_iommu(struct device *dev, unsigned long addr, size_t size)
199 u64 mask = *dev->dma_mask;
200 int high = addr + size > mask;
205 /* Map a single continuous physical area into the IOMMU.
206 * Caller needs to check if the iommu is needed and flush.
208 static dma_addr_t dma_map_area(struct device *dev, dma_addr_t phys_mem,
209 size_t size, int dir)
211 unsigned long npages = to_pages(phys_mem, size);
212 unsigned long iommu_page = alloc_iommu(npages);
214 if (iommu_page == -1) {
215 if (!nonforced_iommu(dev, phys_mem, size))
217 if (panic_on_overflow)
218 panic("dma_map_area overflow %lu bytes\n", size);
219 iommu_full(dev, size, dir);
220 return bad_dma_address;
223 for (i = 0; i < npages; i++) {
224 iommu_gatt_base[iommu_page + i] = GPTE_ENCODE(phys_mem);
225 SET_LEAK(iommu_page + i);
226 phys_mem += PAGE_SIZE;
228 return iommu_bus_base + iommu_page*PAGE_SIZE + (phys_mem & ~PAGE_MASK);
231 static dma_addr_t gart_map_simple(struct device *dev, char *buf,
232 size_t size, int dir)
234 dma_addr_t map = dma_map_area(dev, virt_to_bus(buf), size, dir);
239 /* Map a single area into the IOMMU */
240 static dma_addr_t gart_map_single(struct device *dev, void *addr, size_t size, int dir)
242 unsigned long phys_mem, bus;
247 phys_mem = virt_to_phys(addr);
248 if (!need_iommu(dev, phys_mem, size))
251 bus = gart_map_simple(dev, addr, size, dir);
256 * Free a DMA mapping.
258 static void gart_unmap_single(struct device *dev, dma_addr_t dma_addr,
259 size_t size, int direction)
261 unsigned long iommu_page;
265 if (dma_addr < iommu_bus_base + EMERGENCY_PAGES*PAGE_SIZE ||
266 dma_addr >= iommu_bus_base + iommu_size)
268 iommu_page = (dma_addr - iommu_bus_base)>>PAGE_SHIFT;
269 npages = to_pages(dma_addr, size);
270 for (i = 0; i < npages; i++) {
271 iommu_gatt_base[iommu_page + i] = gart_unmapped_entry;
272 CLEAR_LEAK(iommu_page + i);
274 free_iommu(iommu_page, npages);
278 * Wrapper for pci_unmap_single working with scatterlists.
280 static void gart_unmap_sg(struct device *dev, struct scatterlist *sg, int nents, int dir)
282 struct scatterlist *s;
285 for_each_sg(sg, s, nents, i) {
286 if (!s->dma_length || !s->length)
288 gart_unmap_single(dev, s->dma_address, s->dma_length, dir);
292 /* Fallback for dma_map_sg in case of overflow */
293 static int dma_map_sg_nonforce(struct device *dev, struct scatterlist *sg,
296 struct scatterlist *s;
299 #ifdef CONFIG_IOMMU_DEBUG
300 printk(KERN_DEBUG "dma_map_sg overflow\n");
303 for_each_sg(sg, s, nents, i) {
304 unsigned long addr = page_to_phys(s->page) + s->offset;
305 if (nonforced_iommu(dev, addr, s->length)) {
306 addr = dma_map_area(dev, addr, s->length, dir);
307 if (addr == bad_dma_address) {
309 gart_unmap_sg(dev, sg, i, dir);
311 sg[0].dma_length = 0;
315 s->dma_address = addr;
316 s->dma_length = s->length;
322 /* Map multiple scatterlist entries continuous into the first. */
323 static int __dma_map_cont(struct scatterlist *start, int nelems,
324 struct scatterlist *sout, unsigned long pages)
326 unsigned long iommu_start = alloc_iommu(pages);
327 unsigned long iommu_page = iommu_start;
328 struct scatterlist *s;
331 if (iommu_start == -1)
334 for_each_sg(start, s, nelems, i) {
335 unsigned long pages, addr;
336 unsigned long phys_addr = s->dma_address;
338 BUG_ON(s != start && s->offset);
341 sout->dma_address = iommu_bus_base;
342 sout->dma_address += iommu_page*PAGE_SIZE + s->offset;
343 sout->dma_length = s->length;
345 sout->dma_length += s->length;
349 pages = to_pages(s->offset, s->length);
351 iommu_gatt_base[iommu_page] = GPTE_ENCODE(addr);
352 SET_LEAK(iommu_page);
357 BUG_ON(iommu_page - iommu_start != pages);
361 static inline int dma_map_cont(struct scatterlist *start, int nelems,
362 struct scatterlist *sout,
363 unsigned long pages, int need)
368 sout->dma_length = start->length;
371 return __dma_map_cont(start, nelems, sout, pages);
375 * DMA map all entries in a scatterlist.
376 * Merge chunks that have page aligned sizes into a continuous mapping.
378 int gart_map_sg(struct device *dev, struct scatterlist *sg, int nents, int dir)
383 unsigned long pages = 0;
384 int need = 0, nextneed;
385 struct scatterlist *s, *ps, *start_sg, *sgmap;
395 start_sg = sgmap = sg;
396 ps = NULL; /* shut up gcc */
397 for_each_sg(sg, s, nents, i) {
398 dma_addr_t addr = page_to_phys(s->page) + s->offset;
399 s->dma_address = addr;
400 BUG_ON(s->length == 0);
402 nextneed = need_iommu(dev, addr, s->length);
404 /* Handle the previous not yet processed entries */
406 /* Can only merge when the last chunk ends on a page
407 boundary and the new one doesn't have an offset. */
408 if (!iommu_merge || !nextneed || !need || s->offset ||
409 (ps->offset + ps->length) % PAGE_SIZE) {
410 if (dma_map_cont(start_sg, i - start, sgmap,
414 sgmap = sg_next(sgmap);
422 pages += to_pages(s->offset, s->length);
425 if (dma_map_cont(start_sg, i - start, sgmap, pages, need) < 0)
430 sgmap = sg_next(sgmap);
431 sgmap->dma_length = 0;
437 gart_unmap_sg(dev, sg, nents, dir);
438 /* When it was forced or merged try again in a dumb way */
439 if (force_iommu || iommu_merge) {
440 out = dma_map_sg_nonforce(dev, sg, nents, dir);
444 if (panic_on_overflow)
445 panic("dma_map_sg: overflow on %lu pages\n", pages);
446 iommu_full(dev, pages << PAGE_SHIFT, dir);
447 for_each_sg(sg, s, nents, i)
448 s->dma_address = bad_dma_address;
454 static __init unsigned long check_iommu_size(unsigned long aper, u64 aper_size)
458 iommu_size = aper_size;
463 a = aper + iommu_size;
464 iommu_size -= round_up(a, LARGE_PAGE_SIZE) - a;
466 if (iommu_size < 64*1024*1024)
468 "PCI-DMA: Warning: Small IOMMU %luMB. Consider increasing the AGP aperture in BIOS\n",iommu_size>>20);
473 static __init unsigned read_aperture(struct pci_dev *dev, u32 *size)
475 unsigned aper_size = 0, aper_base_32;
479 pci_read_config_dword(dev, 0x94, &aper_base_32);
480 pci_read_config_dword(dev, 0x90, &aper_order);
481 aper_order = (aper_order >> 1) & 7;
483 aper_base = aper_base_32 & 0x7fff;
486 aper_size = (32 * 1024 * 1024) << aper_order;
487 if (aper_base + aper_size > 0x100000000UL || !aper_size)
495 * Private Northbridge GATT initialization in case we cannot use the
496 * AGP driver for some reason.
498 static __init int init_k8_gatt(struct agp_kern_info *info)
502 unsigned aper_base, new_aper_base;
503 unsigned aper_size, gatt_size, new_aper_size;
506 printk(KERN_INFO "PCI-DMA: Disabling AGP.\n");
507 aper_size = aper_base = info->aper_size = 0;
509 for (i = 0; i < num_k8_northbridges; i++) {
510 dev = k8_northbridges[i];
511 new_aper_base = read_aperture(dev, &new_aper_size);
516 aper_size = new_aper_size;
517 aper_base = new_aper_base;
519 if (aper_size != new_aper_size || aper_base != new_aper_base)
524 info->aper_base = aper_base;
525 info->aper_size = aper_size>>20;
527 gatt_size = (aper_size >> PAGE_SHIFT) * sizeof(u32);
528 gatt = (void *)__get_free_pages(GFP_KERNEL, get_order(gatt_size));
530 panic("Cannot allocate GATT table");
531 if (change_page_attr_addr((unsigned long)gatt, gatt_size >> PAGE_SHIFT, PAGE_KERNEL_NOCACHE))
532 panic("Could not set GART PTEs to uncacheable pages");
535 memset(gatt, 0, gatt_size);
536 agp_gatt_table = gatt;
538 for (i = 0; i < num_k8_northbridges; i++) {
542 dev = k8_northbridges[i];
543 gatt_reg = __pa(gatt) >> 12;
545 pci_write_config_dword(dev, 0x98, gatt_reg);
546 pci_read_config_dword(dev, 0x90, &ctl);
549 ctl &= ~((1<<4) | (1<<5));
551 pci_write_config_dword(dev, 0x90, ctl);
555 printk("PCI-DMA: aperture base @ %x size %u KB\n",aper_base, aper_size>>10);
559 /* Should not happen anymore */
560 printk(KERN_ERR "PCI-DMA: More than 4GB of RAM and no IOMMU\n"
561 KERN_ERR "PCI-DMA: 32bit PCI IO may malfunction.\n");
565 extern int agp_amd64_init(void);
567 static const struct dma_mapping_ops gart_dma_ops = {
568 .mapping_error = NULL,
569 .map_single = gart_map_single,
570 .map_simple = gart_map_simple,
571 .unmap_single = gart_unmap_single,
572 .sync_single_for_cpu = NULL,
573 .sync_single_for_device = NULL,
574 .sync_single_range_for_cpu = NULL,
575 .sync_single_range_for_device = NULL,
576 .sync_sg_for_cpu = NULL,
577 .sync_sg_for_device = NULL,
578 .map_sg = gart_map_sg,
579 .unmap_sg = gart_unmap_sg,
582 void gart_iommu_shutdown(void)
587 if (no_agp && (dma_ops != &gart_dma_ops))
590 for (i = 0; i < num_k8_northbridges; i++) {
593 dev = k8_northbridges[i];
594 pci_read_config_dword(dev, 0x90, &ctl);
598 pci_write_config_dword(dev, 0x90, ctl);
602 void __init gart_iommu_init(void)
604 struct agp_kern_info info;
605 unsigned long aper_size;
606 unsigned long iommu_start;
607 unsigned long scratch;
610 if (cache_k8_northbridges() < 0 || num_k8_northbridges == 0) {
611 printk(KERN_INFO "PCI-GART: No AMD northbridge found.\n");
615 #ifndef CONFIG_AGP_AMD64
618 /* Makefile puts PCI initialization via subsys_initcall first. */
619 /* Add other K8 AGP bridge drivers here */
621 (agp_amd64_init() < 0) ||
622 (agp_copy_info(agp_bridge, &info) < 0);
628 /* Did we detect a different HW IOMMU? */
629 if (iommu_detected && !iommu_aperture)
633 (!force_iommu && end_pfn <= MAX_DMA32_PFN) ||
635 (no_agp && init_k8_gatt(&info) < 0)) {
636 if (end_pfn > MAX_DMA32_PFN) {
637 printk(KERN_ERR "WARNING more than 4GB of memory "
638 "but GART IOMMU not available.\n"
639 KERN_ERR "WARNING 32bit PCI may malfunction.\n");
644 printk(KERN_INFO "PCI-DMA: using GART IOMMU.\n");
645 aper_size = info.aper_size * 1024 * 1024;
646 iommu_size = check_iommu_size(info.aper_base, aper_size);
647 iommu_pages = iommu_size >> PAGE_SHIFT;
649 iommu_gart_bitmap = (void*)__get_free_pages(GFP_KERNEL,
650 get_order(iommu_pages/8));
651 if (!iommu_gart_bitmap)
652 panic("Cannot allocate iommu bitmap\n");
653 memset(iommu_gart_bitmap, 0, iommu_pages/8);
655 #ifdef CONFIG_IOMMU_LEAK
657 iommu_leak_tab = (void *)__get_free_pages(GFP_KERNEL,
658 get_order(iommu_pages*sizeof(void *)));
660 memset(iommu_leak_tab, 0, iommu_pages * 8);
662 printk("PCI-DMA: Cannot allocate leak trace area\n");
667 * Out of IOMMU space handling.
668 * Reserve some invalid pages at the beginning of the GART.
670 set_bit_string(iommu_gart_bitmap, 0, EMERGENCY_PAGES);
672 agp_memory_reserved = iommu_size;
674 "PCI-DMA: Reserving %luMB of IOMMU area in the AGP aperture\n",
677 iommu_start = aper_size - iommu_size;
678 iommu_bus_base = info.aper_base + iommu_start;
679 bad_dma_address = iommu_bus_base;
680 iommu_gatt_base = agp_gatt_table + (iommu_start>>PAGE_SHIFT);
683 * Unmap the IOMMU part of the GART. The alias of the page is
684 * always mapped with cache enabled and there is no full cache
685 * coherency across the GART remapping. The unmapping avoids
686 * automatic prefetches from the CPU allocating cache lines in
687 * there. All CPU accesses are done via the direct mapping to
688 * the backing memory. The GART address is only used by PCI
691 clear_kernel_mapping((unsigned long)__va(iommu_bus_base), iommu_size);
694 * Try to workaround a bug (thanks to BenH)
695 * Set unmapped entries to a scratch page instead of 0.
696 * Any prefetches that hit unmapped entries won't get an bus abort
699 scratch = get_zeroed_page(GFP_KERNEL);
701 panic("Cannot allocate iommu scratch page");
702 gart_unmapped_entry = GPTE_ENCODE(__pa(scratch));
703 for (i = EMERGENCY_PAGES; i < iommu_pages; i++)
704 iommu_gatt_base[i] = gart_unmapped_entry;
707 dma_ops = &gart_dma_ops;
710 void __init gart_parse_options(char *p)
714 #ifdef CONFIG_IOMMU_LEAK
715 if (!strncmp(p,"leak",4)) {
719 if (isdigit(*p) && get_option(&p, &arg))
720 iommu_leak_pages = arg;
723 if (isdigit(*p) && get_option(&p, &arg))
725 if (!strncmp(p, "fullflush",8))
727 if (!strncmp(p, "nofullflush",11))
729 if (!strncmp(p,"noagp",5))
731 if (!strncmp(p, "noaperture",10))
733 /* duplicated from pci-dma.c */
734 if (!strncmp(p,"force",5))
735 iommu_aperture_allowed = 1;
736 if (!strncmp(p,"allowed",7))
737 iommu_aperture_allowed = 1;
738 if (!strncmp(p, "memaper", 7)) {
739 fallback_aper_force = 1;
743 if (get_option(&p, &arg))
744 fallback_aper_order = arg;