2 * Dynamic DMA mapping support for AMD Hammer.
4 * Use the integrated AGP GART in the Hammer northbridge as an IOMMU for PCI.
5 * This allows to use PCI devices that only support 32bit addresses on systems
8 * See Documentation/PCI/PCI-DMA-mapping.txt for the interface specification.
10 * Copyright 2002 Andi Kleen, SuSE Labs.
11 * Subject to the GNU General Public License v2 only.
14 #include <linux/types.h>
15 #include <linux/ctype.h>
16 #include <linux/agp_backend.h>
17 #include <linux/init.h>
19 #include <linux/sched.h>
20 #include <linux/string.h>
21 #include <linux/spinlock.h>
22 #include <linux/pci.h>
23 #include <linux/module.h>
24 #include <linux/topology.h>
25 #include <linux/interrupt.h>
26 #include <linux/bitmap.h>
27 #include <linux/kdebug.h>
28 #include <linux/scatterlist.h>
29 #include <linux/iommu-helper.h>
30 #include <linux/sysdev.h>
32 #include <linux/gfp.h>
33 #include <asm/atomic.h>
35 #include <asm/pgtable.h>
36 #include <asm/proto.h>
37 #include <asm/iommu.h>
39 #include <asm/cacheflush.h>
40 #include <asm/swiotlb.h>
43 #include <asm/x86_init.h>
44 #include <asm/iommu_table.h>
46 static unsigned long iommu_bus_base; /* GART remapping area (physical) */
47 static unsigned long iommu_size; /* size of remapping area bytes */
48 static unsigned long iommu_pages; /* .. and in pages */
50 static u32 *iommu_gatt_base; /* Remapping table */
52 static dma_addr_t bad_dma_addr;
55 * If this is disabled the IOMMU will use an optimized flushing strategy
56 * of only flushing when an mapping is reused. With it true the GART is
57 * flushed for every mapping. Problem is that doing the lazy flush seems
58 * to trigger bugs with some popular PCI cards, in particular 3ware (but
59 * has been also also seen with Qlogic at least).
61 static int iommu_fullflush = 1;
63 /* Allocation bitmap for the remapping area: */
64 static DEFINE_SPINLOCK(iommu_bitmap_lock);
65 /* Guarded by iommu_bitmap_lock: */
66 static unsigned long *iommu_gart_bitmap;
68 static u32 gart_unmapped_entry;
71 #define GPTE_COHERENT 2
72 #define GPTE_ENCODE(x) \
73 (((x) & 0xfffff000) | (((x) >> 32) << 4) | GPTE_VALID | GPTE_COHERENT)
74 #define GPTE_DECODE(x) (((x) & 0xfffff000) | (((u64)(x) & 0xff0) << 28))
76 #define EMERGENCY_PAGES 32 /* = 128KB */
79 #define AGPEXTERN extern
84 /* backdoor interface to AGP driver */
85 AGPEXTERN int agp_memory_reserved;
86 AGPEXTERN __u32 *agp_gatt_table;
88 static unsigned long next_bit; /* protected by iommu_bitmap_lock */
89 static bool need_flush; /* global flush state. set for each gart wrap */
91 static unsigned long alloc_iommu(struct device *dev, int size,
92 unsigned long align_mask)
94 unsigned long offset, flags;
95 unsigned long boundary_size;
96 unsigned long base_index;
98 base_index = ALIGN(iommu_bus_base & dma_get_seg_boundary(dev),
99 PAGE_SIZE) >> PAGE_SHIFT;
100 boundary_size = ALIGN((u64)dma_get_seg_boundary(dev) + 1,
101 PAGE_SIZE) >> PAGE_SHIFT;
103 spin_lock_irqsave(&iommu_bitmap_lock, flags);
104 offset = iommu_area_alloc(iommu_gart_bitmap, iommu_pages, next_bit,
105 size, base_index, boundary_size, align_mask);
108 offset = iommu_area_alloc(iommu_gart_bitmap, iommu_pages, 0,
109 size, base_index, boundary_size,
113 next_bit = offset+size;
114 if (next_bit >= iommu_pages) {
121 spin_unlock_irqrestore(&iommu_bitmap_lock, flags);
126 static void free_iommu(unsigned long offset, int size)
130 spin_lock_irqsave(&iommu_bitmap_lock, flags);
131 bitmap_clear(iommu_gart_bitmap, offset, size);
132 if (offset >= next_bit)
133 next_bit = offset + size;
134 spin_unlock_irqrestore(&iommu_bitmap_lock, flags);
138 * Use global flush state to avoid races with multiple flushers.
140 static void flush_gart(void)
144 spin_lock_irqsave(&iommu_bitmap_lock, flags);
149 spin_unlock_irqrestore(&iommu_bitmap_lock, flags);
152 #ifdef CONFIG_IOMMU_LEAK
153 /* Debugging aid for drivers that don't free their IOMMU tables */
154 static int leak_trace;
155 static int iommu_leak_pages = 20;
157 static void dump_leak(void)
165 show_stack(NULL, NULL);
166 debug_dma_dump_mappings(NULL);
170 static void iommu_full(struct device *dev, size_t size, int dir)
173 * Ran out of IOMMU space for this operation. This is very bad.
174 * Unfortunately the drivers cannot handle this operation properly.
175 * Return some non mapped prereserved space in the aperture and
176 * let the Northbridge deal with it. This will result in garbage
177 * in the IO operation. When the size exceeds the prereserved space
178 * memory corruption will occur or random memory will be DMAed
179 * out. Hopefully no network devices use single mappings that big.
182 dev_err(dev, "PCI-DMA: Out of IOMMU space for %lu bytes\n", size);
184 if (size > PAGE_SIZE*EMERGENCY_PAGES) {
185 if (dir == PCI_DMA_FROMDEVICE || dir == PCI_DMA_BIDIRECTIONAL)
186 panic("PCI-DMA: Memory would be corrupted\n");
187 if (dir == PCI_DMA_TODEVICE || dir == PCI_DMA_BIDIRECTIONAL)
189 "PCI-DMA: Random memory would be DMAed\n");
191 #ifdef CONFIG_IOMMU_LEAK
197 need_iommu(struct device *dev, unsigned long addr, size_t size)
199 return force_iommu || !dma_capable(dev, addr, size);
203 nonforced_iommu(struct device *dev, unsigned long addr, size_t size)
205 return !dma_capable(dev, addr, size);
208 /* Map a single continuous physical area into the IOMMU.
209 * Caller needs to check if the iommu is needed and flush.
211 static dma_addr_t dma_map_area(struct device *dev, dma_addr_t phys_mem,
212 size_t size, int dir, unsigned long align_mask)
214 unsigned long npages = iommu_num_pages(phys_mem, size, PAGE_SIZE);
215 unsigned long iommu_page = alloc_iommu(dev, npages, align_mask);
218 if (iommu_page == -1) {
219 if (!nonforced_iommu(dev, phys_mem, size))
221 if (panic_on_overflow)
222 panic("dma_map_area overflow %lu bytes\n", size);
223 iommu_full(dev, size, dir);
227 for (i = 0; i < npages; i++) {
228 iommu_gatt_base[iommu_page + i] = GPTE_ENCODE(phys_mem);
229 phys_mem += PAGE_SIZE;
231 return iommu_bus_base + iommu_page*PAGE_SIZE + (phys_mem & ~PAGE_MASK);
234 /* Map a single area into the IOMMU */
235 static dma_addr_t gart_map_page(struct device *dev, struct page *page,
236 unsigned long offset, size_t size,
237 enum dma_data_direction dir,
238 struct dma_attrs *attrs)
241 phys_addr_t paddr = page_to_phys(page) + offset;
244 dev = &x86_dma_fallback_dev;
246 if (!need_iommu(dev, paddr, size))
249 bus = dma_map_area(dev, paddr, size, dir, 0);
256 * Free a DMA mapping.
258 static void gart_unmap_page(struct device *dev, dma_addr_t dma_addr,
259 size_t size, enum dma_data_direction dir,
260 struct dma_attrs *attrs)
262 unsigned long iommu_page;
266 if (dma_addr < iommu_bus_base + EMERGENCY_PAGES*PAGE_SIZE ||
267 dma_addr >= iommu_bus_base + iommu_size)
270 iommu_page = (dma_addr - iommu_bus_base)>>PAGE_SHIFT;
271 npages = iommu_num_pages(dma_addr, size, PAGE_SIZE);
272 for (i = 0; i < npages; i++) {
273 iommu_gatt_base[iommu_page + i] = gart_unmapped_entry;
275 free_iommu(iommu_page, npages);
279 * Wrapper for pci_unmap_single working with scatterlists.
281 static void gart_unmap_sg(struct device *dev, struct scatterlist *sg, int nents,
282 enum dma_data_direction dir, struct dma_attrs *attrs)
284 struct scatterlist *s;
287 for_each_sg(sg, s, nents, i) {
288 if (!s->dma_length || !s->length)
290 gart_unmap_page(dev, s->dma_address, s->dma_length, dir, NULL);
294 /* Fallback for dma_map_sg in case of overflow */
295 static int dma_map_sg_nonforce(struct device *dev, struct scatterlist *sg,
298 struct scatterlist *s;
301 #ifdef CONFIG_IOMMU_DEBUG
302 pr_debug("dma_map_sg overflow\n");
305 for_each_sg(sg, s, nents, i) {
306 unsigned long addr = sg_phys(s);
308 if (nonforced_iommu(dev, addr, s->length)) {
309 addr = dma_map_area(dev, addr, s->length, dir, 0);
310 if (addr == bad_dma_addr) {
312 gart_unmap_sg(dev, sg, i, dir, NULL);
314 sg[0].dma_length = 0;
318 s->dma_address = addr;
319 s->dma_length = s->length;
326 /* Map multiple scatterlist entries continuous into the first. */
327 static int __dma_map_cont(struct device *dev, struct scatterlist *start,
328 int nelems, struct scatterlist *sout,
331 unsigned long iommu_start = alloc_iommu(dev, pages, 0);
332 unsigned long iommu_page = iommu_start;
333 struct scatterlist *s;
336 if (iommu_start == -1)
339 for_each_sg(start, s, nelems, i) {
340 unsigned long pages, addr;
341 unsigned long phys_addr = s->dma_address;
343 BUG_ON(s != start && s->offset);
345 sout->dma_address = iommu_bus_base;
346 sout->dma_address += iommu_page*PAGE_SIZE + s->offset;
347 sout->dma_length = s->length;
349 sout->dma_length += s->length;
353 pages = iommu_num_pages(s->offset, s->length, PAGE_SIZE);
355 iommu_gatt_base[iommu_page] = GPTE_ENCODE(addr);
360 BUG_ON(iommu_page - iommu_start != pages);
366 dma_map_cont(struct device *dev, struct scatterlist *start, int nelems,
367 struct scatterlist *sout, unsigned long pages, int need)
371 sout->dma_address = start->dma_address;
372 sout->dma_length = start->length;
375 return __dma_map_cont(dev, start, nelems, sout, pages);
379 * DMA map all entries in a scatterlist.
380 * Merge chunks that have page aligned sizes into a continuous mapping.
382 static int gart_map_sg(struct device *dev, struct scatterlist *sg, int nents,
383 enum dma_data_direction dir, struct dma_attrs *attrs)
385 struct scatterlist *s, *ps, *start_sg, *sgmap;
386 int need = 0, nextneed, i, out, start;
387 unsigned long pages = 0;
388 unsigned int seg_size;
389 unsigned int max_seg_size;
395 dev = &x86_dma_fallback_dev;
402 max_seg_size = dma_get_max_seg_size(dev);
403 ps = NULL; /* shut up gcc */
405 for_each_sg(sg, s, nents, i) {
406 dma_addr_t addr = sg_phys(s);
408 s->dma_address = addr;
409 BUG_ON(s->length == 0);
411 nextneed = need_iommu(dev, addr, s->length);
413 /* Handle the previous not yet processed entries */
416 * Can only merge when the last chunk ends on a
417 * page boundary and the new one doesn't have an
420 if (!iommu_merge || !nextneed || !need || s->offset ||
421 (s->length + seg_size > max_seg_size) ||
422 (ps->offset + ps->length) % PAGE_SIZE) {
423 if (dma_map_cont(dev, start_sg, i - start,
424 sgmap, pages, need) < 0)
429 sgmap = sg_next(sgmap);
436 seg_size += s->length;
438 pages += iommu_num_pages(s->offset, s->length, PAGE_SIZE);
441 if (dma_map_cont(dev, start_sg, i - start, sgmap, pages, need) < 0)
446 sgmap = sg_next(sgmap);
447 sgmap->dma_length = 0;
453 gart_unmap_sg(dev, sg, out, dir, NULL);
455 /* When it was forced or merged try again in a dumb way */
456 if (force_iommu || iommu_merge) {
457 out = dma_map_sg_nonforce(dev, sg, nents, dir);
461 if (panic_on_overflow)
462 panic("dma_map_sg: overflow on %lu pages\n", pages);
464 iommu_full(dev, pages << PAGE_SHIFT, dir);
465 for_each_sg(sg, s, nents, i)
466 s->dma_address = bad_dma_addr;
470 /* allocate and map a coherent mapping */
472 gart_alloc_coherent(struct device *dev, size_t size, dma_addr_t *dma_addr,
476 unsigned long align_mask;
479 if (force_iommu && !(flag & GFP_DMA)) {
480 flag &= ~(__GFP_DMA | __GFP_HIGHMEM | __GFP_DMA32);
481 page = alloc_pages(flag | __GFP_ZERO, get_order(size));
485 align_mask = (1UL << get_order(size)) - 1;
486 paddr = dma_map_area(dev, page_to_phys(page), size,
487 DMA_BIDIRECTIONAL, align_mask);
490 if (paddr != bad_dma_addr) {
492 return page_address(page);
494 __free_pages(page, get_order(size));
496 return dma_generic_alloc_coherent(dev, size, dma_addr, flag);
501 /* free a coherent mapping */
503 gart_free_coherent(struct device *dev, size_t size, void *vaddr,
506 gart_unmap_page(dev, dma_addr, size, DMA_BIDIRECTIONAL, NULL);
507 free_pages((unsigned long)vaddr, get_order(size));
510 static int gart_mapping_error(struct device *dev, dma_addr_t dma_addr)
512 return (dma_addr == bad_dma_addr);
517 static __init unsigned long check_iommu_size(unsigned long aper, u64 aper_size)
522 iommu_size = aper_size;
527 a = aper + iommu_size;
528 iommu_size -= round_up(a, PMD_PAGE_SIZE) - a;
530 if (iommu_size < 64*1024*1024) {
532 "PCI-DMA: Warning: Small IOMMU %luMB."
533 " Consider increasing the AGP aperture in BIOS\n",
540 static __init unsigned read_aperture(struct pci_dev *dev, u32 *size)
542 unsigned aper_size = 0, aper_base_32, aper_order;
545 pci_read_config_dword(dev, AMD64_GARTAPERTUREBASE, &aper_base_32);
546 pci_read_config_dword(dev, AMD64_GARTAPERTURECTL, &aper_order);
547 aper_order = (aper_order >> 1) & 7;
549 aper_base = aper_base_32 & 0x7fff;
552 aper_size = (32 * 1024 * 1024) << aper_order;
553 if (aper_base + aper_size > 0x100000000UL || !aper_size)
560 static void enable_gart_translations(void)
564 for (i = 0; i < num_k8_northbridges; i++) {
565 struct pci_dev *dev = k8_northbridges[i];
567 enable_gart_translation(dev, __pa(agp_gatt_table));
570 /* Flush the GART-TLB to remove stale entries */
575 * If fix_up_north_bridges is set, the north bridges have to be fixed up on
576 * resume in the same way as they are handled in gart_iommu_hole_init().
578 static bool fix_up_north_bridges;
579 static u32 aperture_order;
580 static u32 aperture_alloc;
582 void set_up_gart_resume(u32 aper_order, u32 aper_alloc)
584 fix_up_north_bridges = true;
585 aperture_order = aper_order;
586 aperture_alloc = aper_alloc;
589 static void gart_fixup_northbridges(struct sys_device *dev)
593 if (!fix_up_north_bridges)
596 pr_info("PCI-DMA: Restoring GART aperture settings\n");
598 for (i = 0; i < num_k8_northbridges; i++) {
599 struct pci_dev *dev = k8_northbridges[i];
602 * Don't enable translations just yet. That is the next
603 * step. Restore the pre-suspend aperture settings.
605 pci_write_config_dword(dev, AMD64_GARTAPERTURECTL, aperture_order << 1);
606 pci_write_config_dword(dev, AMD64_GARTAPERTUREBASE, aperture_alloc >> 25);
610 static int gart_resume(struct sys_device *dev)
612 pr_info("PCI-DMA: Resuming GART IOMMU\n");
614 gart_fixup_northbridges(dev);
616 enable_gart_translations();
621 static int gart_suspend(struct sys_device *dev, pm_message_t state)
626 static struct sysdev_class gart_sysdev_class = {
628 .suspend = gart_suspend,
629 .resume = gart_resume,
633 static struct sys_device device_gart = {
634 .cls = &gart_sysdev_class,
638 * Private Northbridge GATT initialization in case we cannot use the
639 * AGP driver for some reason.
641 static __init int init_k8_gatt(struct agp_kern_info *info)
643 unsigned aper_size, gatt_size, new_aper_size;
644 unsigned aper_base, new_aper_base;
649 pr_info("PCI-DMA: Disabling AGP.\n");
651 aper_size = aper_base = info->aper_size = 0;
653 for (i = 0; i < num_k8_northbridges; i++) {
654 dev = k8_northbridges[i];
655 new_aper_base = read_aperture(dev, &new_aper_size);
660 aper_size = new_aper_size;
661 aper_base = new_aper_base;
663 if (aper_size != new_aper_size || aper_base != new_aper_base)
669 info->aper_base = aper_base;
670 info->aper_size = aper_size >> 20;
672 gatt_size = (aper_size >> PAGE_SHIFT) * sizeof(u32);
673 gatt = (void *)__get_free_pages(GFP_KERNEL | __GFP_ZERO,
674 get_order(gatt_size));
676 panic("Cannot allocate GATT table");
677 if (set_memory_uc((unsigned long)gatt, gatt_size >> PAGE_SHIFT))
678 panic("Could not set GART PTEs to uncacheable pages");
680 agp_gatt_table = gatt;
682 error = sysdev_class_register(&gart_sysdev_class);
684 error = sysdev_register(&device_gart);
686 panic("Could not register gart_sysdev -- "
687 "would corrupt data on next suspend");
691 pr_info("PCI-DMA: aperture base @ %x size %u KB\n",
692 aper_base, aper_size>>10);
697 /* Should not happen anymore */
698 pr_warning("PCI-DMA: More than 4GB of RAM and no IOMMU\n"
699 "falling back to iommu=soft.\n");
703 static struct dma_map_ops gart_dma_ops = {
704 .map_sg = gart_map_sg,
705 .unmap_sg = gart_unmap_sg,
706 .map_page = gart_map_page,
707 .unmap_page = gart_unmap_page,
708 .alloc_coherent = gart_alloc_coherent,
709 .free_coherent = gart_free_coherent,
710 .mapping_error = gart_mapping_error,
713 static void gart_iommu_shutdown(void)
718 /* don't shutdown it if there is AGP installed */
722 for (i = 0; i < num_k8_northbridges; i++) {
725 dev = k8_northbridges[i];
726 pci_read_config_dword(dev, AMD64_GARTAPERTURECTL, &ctl);
730 pci_write_config_dword(dev, AMD64_GARTAPERTURECTL, ctl);
734 int __init gart_iommu_init(void)
736 struct agp_kern_info info;
737 unsigned long iommu_start;
738 unsigned long aper_base, aper_size;
739 unsigned long start_pfn, end_pfn;
740 unsigned long scratch;
743 if (num_k8_northbridges == 0)
746 #ifndef CONFIG_AGP_AMD64
749 /* Makefile puts PCI initialization via subsys_initcall first. */
750 /* Add other K8 AGP bridge drivers here */
752 (agp_amd64_init() < 0) ||
753 (agp_copy_info(agp_bridge, &info) < 0);
757 (!force_iommu && max_pfn <= MAX_DMA32_PFN) ||
758 !gart_iommu_aperture ||
759 (no_agp && init_k8_gatt(&info) < 0)) {
760 if (max_pfn > MAX_DMA32_PFN) {
761 pr_warning("More than 4GB of memory but GART IOMMU not available.\n");
762 pr_warning("falling back to iommu=soft.\n");
767 /* need to map that range */
768 aper_size = info.aper_size << 20;
769 aper_base = info.aper_base;
770 end_pfn = (aper_base>>PAGE_SHIFT) + (aper_size>>PAGE_SHIFT);
772 if (end_pfn > max_low_pfn_mapped) {
773 start_pfn = (aper_base>>PAGE_SHIFT);
774 init_memory_mapping(start_pfn<<PAGE_SHIFT, end_pfn<<PAGE_SHIFT);
777 pr_info("PCI-DMA: using GART IOMMU.\n");
778 iommu_size = check_iommu_size(info.aper_base, aper_size);
779 iommu_pages = iommu_size >> PAGE_SHIFT;
781 iommu_gart_bitmap = (void *) __get_free_pages(GFP_KERNEL | __GFP_ZERO,
782 get_order(iommu_pages/8));
783 if (!iommu_gart_bitmap)
784 panic("Cannot allocate iommu bitmap\n");
786 #ifdef CONFIG_IOMMU_LEAK
790 ret = dma_debug_resize_entries(iommu_pages);
792 pr_debug("PCI-DMA: Cannot trace all the entries\n");
797 * Out of IOMMU space handling.
798 * Reserve some invalid pages at the beginning of the GART.
800 bitmap_set(iommu_gart_bitmap, 0, EMERGENCY_PAGES);
802 pr_info("PCI-DMA: Reserving %luMB of IOMMU area in the AGP aperture\n",
805 agp_memory_reserved = iommu_size;
806 iommu_start = aper_size - iommu_size;
807 iommu_bus_base = info.aper_base + iommu_start;
808 bad_dma_addr = iommu_bus_base;
809 iommu_gatt_base = agp_gatt_table + (iommu_start>>PAGE_SHIFT);
812 * Unmap the IOMMU part of the GART. The alias of the page is
813 * always mapped with cache enabled and there is no full cache
814 * coherency across the GART remapping. The unmapping avoids
815 * automatic prefetches from the CPU allocating cache lines in
816 * there. All CPU accesses are done via the direct mapping to
817 * the backing memory. The GART address is only used by PCI
820 set_memory_np((unsigned long)__va(iommu_bus_base),
821 iommu_size >> PAGE_SHIFT);
823 * Tricky. The GART table remaps the physical memory range,
824 * so the CPU wont notice potential aliases and if the memory
825 * is remapped to UC later on, we might surprise the PCI devices
826 * with a stray writeout of a cacheline. So play it sure and
827 * do an explicit, full-scale wbinvd() _after_ having marked all
828 * the pages as Not-Present:
833 * Now all caches are flushed and we can safely enable
834 * GART hardware. Doing it early leaves the possibility
835 * of stale cache entries that can lead to GART PTE
838 enable_gart_translations();
841 * Try to workaround a bug (thanks to BenH):
842 * Set unmapped entries to a scratch page instead of 0.
843 * Any prefetches that hit unmapped entries won't get an bus abort
844 * then. (P2P bridge may be prefetching on DMA reads).
846 scratch = get_zeroed_page(GFP_KERNEL);
848 panic("Cannot allocate iommu scratch page");
849 gart_unmapped_entry = GPTE_ENCODE(__pa(scratch));
850 for (i = EMERGENCY_PAGES; i < iommu_pages; i++)
851 iommu_gatt_base[i] = gart_unmapped_entry;
854 dma_ops = &gart_dma_ops;
855 x86_platform.iommu_shutdown = gart_iommu_shutdown;
861 void __init gart_parse_options(char *p)
865 #ifdef CONFIG_IOMMU_LEAK
866 if (!strncmp(p, "leak", 4)) {
871 if (isdigit(*p) && get_option(&p, &arg))
872 iommu_leak_pages = arg;
875 if (isdigit(*p) && get_option(&p, &arg))
877 if (!strncmp(p, "fullflush", 9))
879 if (!strncmp(p, "nofullflush", 11))
881 if (!strncmp(p, "noagp", 5))
883 if (!strncmp(p, "noaperture", 10))
885 /* duplicated from pci-dma.c */
886 if (!strncmp(p, "force", 5))
887 gart_iommu_aperture_allowed = 1;
888 if (!strncmp(p, "allowed", 7))
889 gart_iommu_aperture_allowed = 1;
890 if (!strncmp(p, "memaper", 7)) {
891 fallback_aper_force = 1;
895 if (get_option(&p, &arg))
896 fallback_aper_order = arg;
900 IOMMU_INIT_POST(gart_iommu_hole_init);