2 * Copyright (C) 2001 Mike Corrigan & Dave Engebretsen, IBM Corporation
6 * Copyright (C) 2004 Olof Johansson <olof@lixom.net>, IBM Corporation
7 * Copyright (C) 2006 Olof Johansson <olof@lixom.net>
9 * Dynamic DMA mapping support, pSeries-specific parts, both SMP and LPAR.
12 * This program is free software; you can redistribute it and/or modify
13 * it under the terms of the GNU General Public License as published by
14 * the Free Software Foundation; either version 2 of the License, or
15 * (at your option) any later version.
17 * This program is distributed in the hope that it will be useful,
18 * but WITHOUT ANY WARRANTY; without even the implied warranty of
19 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
20 * GNU General Public License for more details.
22 * You should have received a copy of the GNU General Public License
23 * along with this program; if not, write to the Free Software
24 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
27 #include <linux/init.h>
28 #include <linux/types.h>
29 #include <linux/slab.h>
31 #include <linux/memblock.h>
32 #include <linux/spinlock.h>
33 #include <linux/sched.h> /* for show_stack */
34 #include <linux/string.h>
35 #include <linux/pci.h>
36 #include <linux/dma-mapping.h>
37 #include <linux/crash_dump.h>
38 #include <linux/memory.h>
43 #include <asm/iommu.h>
44 #include <asm/pci-bridge.h>
45 #include <asm/machdep.h>
46 #include <asm/firmware.h>
48 #include <asm/ppc-pci.h>
50 #include <asm/mmzone.h>
51 #include <asm/plpar_wrappers.h>
54 static void tce_invalidate_pSeries_sw(struct iommu_table *tbl,
55 __be64 *startp, __be64 *endp)
57 u64 __iomem *invalidate = (u64 __iomem *)tbl->it_index;
58 unsigned long start, end, inc;
62 inc = L1_CACHE_BYTES; /* invalidate a cacheline of TCEs at a time */
64 /* If this is non-zero, change the format. We shift the
65 * address and or in the magic from the device tree. */
70 start |= tbl->it_busno;
74 end |= inc - 1; /* round up end to be different than start */
76 mb(); /* Make sure TCEs in memory are written */
77 while (start <= end) {
78 out_be64(invalidate, start);
83 static int tce_build_pSeries(struct iommu_table *tbl, long index,
84 long npages, unsigned long uaddr,
85 enum dma_data_direction direction,
86 struct dma_attrs *attrs)
92 proto_tce = TCE_PCI_READ; // Read allowed
94 if (direction != DMA_TO_DEVICE)
95 proto_tce |= TCE_PCI_WRITE;
97 tces = tcep = ((__be64 *)tbl->it_base) + index;
100 /* can't move this out since we might cross MEMBLOCK boundary */
101 rpn = __pa(uaddr) >> TCE_SHIFT;
102 *tcep = cpu_to_be64(proto_tce | (rpn & TCE_RPN_MASK) << TCE_RPN_SHIFT);
104 uaddr += TCE_PAGE_SIZE;
108 if (tbl->it_type & TCE_PCI_SWINV_CREATE)
109 tce_invalidate_pSeries_sw(tbl, tces, tcep - 1);
114 static void tce_free_pSeries(struct iommu_table *tbl, long index, long npages)
118 tces = tcep = ((__be64 *)tbl->it_base) + index;
123 if (tbl->it_type & TCE_PCI_SWINV_FREE)
124 tce_invalidate_pSeries_sw(tbl, tces, tcep - 1);
127 static unsigned long tce_get_pseries(struct iommu_table *tbl, long index)
131 tcep = ((__be64 *)tbl->it_base) + index;
133 return be64_to_cpu(*tcep);
136 static void tce_free_pSeriesLP(struct iommu_table*, long, long);
137 static void tce_freemulti_pSeriesLP(struct iommu_table*, long, long);
139 static int tce_build_pSeriesLP(struct iommu_table *tbl, long tcenum,
140 long npages, unsigned long uaddr,
141 enum dma_data_direction direction,
142 struct dma_attrs *attrs)
148 long tcenum_start = tcenum, npages_start = npages;
150 rpn = __pa(uaddr) >> TCE_SHIFT;
151 proto_tce = TCE_PCI_READ;
152 if (direction != DMA_TO_DEVICE)
153 proto_tce |= TCE_PCI_WRITE;
156 tce = proto_tce | (rpn & TCE_RPN_MASK) << TCE_RPN_SHIFT;
157 rc = plpar_tce_put((u64)tbl->it_index, (u64)tcenum << 12, tce);
159 if (unlikely(rc == H_NOT_ENOUGH_RESOURCES)) {
161 tce_free_pSeriesLP(tbl, tcenum_start,
162 (npages_start - (npages + 1)));
166 if (rc && printk_ratelimit()) {
167 printk("tce_build_pSeriesLP: plpar_tce_put failed. rc=%lld\n", rc);
168 printk("\tindex = 0x%llx\n", (u64)tbl->it_index);
169 printk("\ttcenum = 0x%llx\n", (u64)tcenum);
170 printk("\ttce val = 0x%llx\n", tce );
171 show_stack(current, (unsigned long *)__get_SP());
180 static DEFINE_PER_CPU(__be64 *, tce_page);
182 static int tce_buildmulti_pSeriesLP(struct iommu_table *tbl, long tcenum,
183 long npages, unsigned long uaddr,
184 enum dma_data_direction direction,
185 struct dma_attrs *attrs)
192 long tcenum_start = tcenum, npages_start = npages;
197 return tce_build_pSeriesLP(tbl, tcenum, npages, uaddr,
201 local_irq_save(flags); /* to protect tcep and the page behind it */
203 tcep = __get_cpu_var(tce_page);
205 /* This is safe to do since interrupts are off when we're called
206 * from iommu_alloc{,_sg}()
209 tcep = (__be64 *)__get_free_page(GFP_ATOMIC);
210 /* If allocation fails, fall back to the loop implementation */
212 local_irq_restore(flags);
213 return tce_build_pSeriesLP(tbl, tcenum, npages, uaddr,
216 __get_cpu_var(tce_page) = tcep;
219 rpn = __pa(uaddr) >> TCE_SHIFT;
220 proto_tce = TCE_PCI_READ;
221 if (direction != DMA_TO_DEVICE)
222 proto_tce |= TCE_PCI_WRITE;
224 /* We can map max one pageful of TCEs at a time */
227 * Set up the page with TCE data, looping through and setting
230 limit = min_t(long, npages, 4096/TCE_ENTRY_SIZE);
232 for (l = 0; l < limit; l++) {
233 tcep[l] = cpu_to_be64(proto_tce | (rpn & TCE_RPN_MASK) << TCE_RPN_SHIFT);
237 rc = plpar_tce_put_indirect((u64)tbl->it_index,
244 } while (npages > 0 && !rc);
246 local_irq_restore(flags);
248 if (unlikely(rc == H_NOT_ENOUGH_RESOURCES)) {
250 tce_freemulti_pSeriesLP(tbl, tcenum_start,
251 (npages_start - (npages + limit)));
255 if (rc && printk_ratelimit()) {
256 printk("tce_buildmulti_pSeriesLP: plpar_tce_put failed. rc=%lld\n", rc);
257 printk("\tindex = 0x%llx\n", (u64)tbl->it_index);
258 printk("\tnpages = 0x%llx\n", (u64)npages);
259 printk("\ttce[0] val = 0x%llx\n", tcep[0]);
260 show_stack(current, (unsigned long *)__get_SP());
265 static void tce_free_pSeriesLP(struct iommu_table *tbl, long tcenum, long npages)
270 rc = plpar_tce_put((u64)tbl->it_index, (u64)tcenum << 12, 0);
272 if (rc && printk_ratelimit()) {
273 printk("tce_free_pSeriesLP: plpar_tce_put failed. rc=%lld\n", rc);
274 printk("\tindex = 0x%llx\n", (u64)tbl->it_index);
275 printk("\ttcenum = 0x%llx\n", (u64)tcenum);
276 show_stack(current, (unsigned long *)__get_SP());
284 static void tce_freemulti_pSeriesLP(struct iommu_table *tbl, long tcenum, long npages)
288 rc = plpar_tce_stuff((u64)tbl->it_index, (u64)tcenum << 12, 0, npages);
290 if (rc && printk_ratelimit()) {
291 printk("tce_freemulti_pSeriesLP: plpar_tce_stuff failed\n");
292 printk("\trc = %lld\n", rc);
293 printk("\tindex = 0x%llx\n", (u64)tbl->it_index);
294 printk("\tnpages = 0x%llx\n", (u64)npages);
295 show_stack(current, (unsigned long *)__get_SP());
299 static unsigned long tce_get_pSeriesLP(struct iommu_table *tbl, long tcenum)
302 unsigned long tce_ret;
304 rc = plpar_tce_get((u64)tbl->it_index, (u64)tcenum << 12, &tce_ret);
306 if (rc && printk_ratelimit()) {
307 printk("tce_get_pSeriesLP: plpar_tce_get failed. rc=%lld\n", rc);
308 printk("\tindex = 0x%llx\n", (u64)tbl->it_index);
309 printk("\ttcenum = 0x%llx\n", (u64)tcenum);
310 show_stack(current, (unsigned long *)__get_SP());
316 /* this is compatible with cells for the device tree property */
317 struct dynamic_dma_window_prop {
318 __be32 liobn; /* tce table number */
319 __be64 dma_base; /* address hi,lo */
320 __be32 tce_shift; /* ilog2(tce_page_size) */
321 __be32 window_shift; /* ilog2(tce_window_size) */
324 struct direct_window {
325 struct device_node *device;
326 const struct dynamic_dma_window_prop *prop;
327 struct list_head list;
330 /* Dynamic DMA Window support */
331 struct ddw_query_response {
332 __be32 windows_available;
333 __be32 largest_available_block;
335 __be32 migration_capable;
338 struct ddw_create_response {
344 static LIST_HEAD(direct_window_list);
345 /* prevents races between memory on/offline and window creation */
346 static DEFINE_SPINLOCK(direct_window_list_lock);
347 /* protects initializing window twice for same device */
348 static DEFINE_MUTEX(direct_window_init_mutex);
349 #define DIRECT64_PROPNAME "linux,direct64-ddr-window-info"
351 static int tce_clearrange_multi_pSeriesLP(unsigned long start_pfn,
352 unsigned long num_pfn, const void *arg)
354 const struct dynamic_dma_window_prop *maprange = arg;
356 u64 tce_size, num_tce, dma_offset, next;
360 tce_shift = be32_to_cpu(maprange->tce_shift);
361 tce_size = 1ULL << tce_shift;
362 next = start_pfn << PAGE_SHIFT;
363 num_tce = num_pfn << PAGE_SHIFT;
365 /* round back to the beginning of the tce page size */
366 num_tce += next & (tce_size - 1);
367 next &= ~(tce_size - 1);
369 /* covert to number of tces */
370 num_tce |= tce_size - 1;
371 num_tce >>= tce_shift;
375 * Set up the page with TCE data, looping through and setting
378 limit = min_t(long, num_tce, 512);
379 dma_offset = next + be64_to_cpu(maprange->dma_base);
381 rc = plpar_tce_stuff((u64)be32_to_cpu(maprange->liobn),
384 next += limit * tce_size;
386 } while (num_tce > 0 && !rc);
391 static int tce_setrange_multi_pSeriesLP(unsigned long start_pfn,
392 unsigned long num_pfn, const void *arg)
394 const struct dynamic_dma_window_prop *maprange = arg;
395 u64 tce_size, num_tce, dma_offset, next, proto_tce, liobn;
401 local_irq_disable(); /* to protect tcep and the page behind it */
402 tcep = __get_cpu_var(tce_page);
405 tcep = (__be64 *)__get_free_page(GFP_ATOMIC);
410 __get_cpu_var(tce_page) = tcep;
413 proto_tce = TCE_PCI_READ | TCE_PCI_WRITE;
415 liobn = (u64)be32_to_cpu(maprange->liobn);
416 tce_shift = be32_to_cpu(maprange->tce_shift);
417 tce_size = 1ULL << tce_shift;
418 next = start_pfn << PAGE_SHIFT;
419 num_tce = num_pfn << PAGE_SHIFT;
421 /* round back to the beginning of the tce page size */
422 num_tce += next & (tce_size - 1);
423 next &= ~(tce_size - 1);
425 /* covert to number of tces */
426 num_tce |= tce_size - 1;
427 num_tce >>= tce_shift;
429 /* We can map max one pageful of TCEs at a time */
432 * Set up the page with TCE data, looping through and setting
435 limit = min_t(long, num_tce, 4096/TCE_ENTRY_SIZE);
436 dma_offset = next + be64_to_cpu(maprange->dma_base);
438 for (l = 0; l < limit; l++) {
439 tcep[l] = cpu_to_be64(proto_tce | next);
443 rc = plpar_tce_put_indirect(liobn,
449 } while (num_tce > 0 && !rc);
451 /* error cleanup: caller will clear whole range */
457 static int tce_setrange_multi_pSeriesLP_walk(unsigned long start_pfn,
458 unsigned long num_pfn, void *arg)
460 return tce_setrange_multi_pSeriesLP(start_pfn, num_pfn, arg);
465 static void iommu_table_setparms(struct pci_controller *phb,
466 struct device_node *dn,
467 struct iommu_table *tbl)
469 struct device_node *node;
470 const unsigned long *basep, *sw_inval;
475 basep = of_get_property(node, "linux,tce-base", NULL);
476 sizep = of_get_property(node, "linux,tce-size", NULL);
477 if (basep == NULL || sizep == NULL) {
478 printk(KERN_ERR "PCI_DMA: iommu_table_setparms: %s has "
479 "missing tce entries !\n", dn->full_name);
483 tbl->it_base = (unsigned long)__va(*basep);
485 if (!is_kdump_kernel())
486 memset((void *)tbl->it_base, 0, *sizep);
488 tbl->it_busno = phb->bus->number;
489 tbl->it_page_shift = IOMMU_PAGE_SHIFT_4K;
491 /* Units of tce entries */
492 tbl->it_offset = phb->dma_window_base_cur >> tbl->it_page_shift;
494 /* Test if we are going over 2GB of DMA space */
495 if (phb->dma_window_base_cur + phb->dma_window_size > 0x80000000ul) {
496 udbg_printf("PCI_DMA: Unexpected number of IOAs under this PHB.\n");
497 panic("PCI_DMA: Unexpected number of IOAs under this PHB.\n");
500 phb->dma_window_base_cur += phb->dma_window_size;
502 /* Set the tce table size - measured in entries */
503 tbl->it_size = phb->dma_window_size >> tbl->it_page_shift;
506 tbl->it_blocksize = 16;
507 tbl->it_type = TCE_PCI;
509 sw_inval = of_get_property(node, "linux,tce-sw-invalidate-info", NULL);
512 * This property contains information on how to
513 * invalidate the TCE entry. The first property is
514 * the base MMIO address used to invalidate entries.
515 * The second property tells us the format of the TCE
516 * invalidate (whether it needs to be shifted) and
517 * some magic routing info to add to our invalidate
520 tbl->it_index = (unsigned long) ioremap(sw_inval[0], 8);
521 tbl->it_busno = sw_inval[1]; /* overload this with magic */
522 tbl->it_type = TCE_PCI_SWINV_CREATE | TCE_PCI_SWINV_FREE;
527 * iommu_table_setparms_lpar
529 * Function: On pSeries LPAR systems, return TCE table info, given a pci bus.
531 static void iommu_table_setparms_lpar(struct pci_controller *phb,
532 struct device_node *dn,
533 struct iommu_table *tbl,
534 const __be32 *dma_window)
536 unsigned long offset, size;
538 of_parse_dma_window(dn, dma_window, &tbl->it_index, &offset, &size);
540 tbl->it_busno = phb->bus->number;
541 tbl->it_page_shift = IOMMU_PAGE_SHIFT_4K;
543 tbl->it_blocksize = 16;
544 tbl->it_type = TCE_PCI;
545 tbl->it_offset = offset >> tbl->it_page_shift;
546 tbl->it_size = size >> tbl->it_page_shift;
549 static void pci_dma_bus_setup_pSeries(struct pci_bus *bus)
551 struct device_node *dn;
552 struct iommu_table *tbl;
553 struct device_node *isa_dn, *isa_dn_orig;
554 struct device_node *tmp;
558 dn = pci_bus_to_OF_node(bus);
560 pr_debug("pci_dma_bus_setup_pSeries: setting up bus %s\n", dn->full_name);
563 /* This is not a root bus, any setup will be done for the
564 * device-side of the bridge in iommu_dev_setup_pSeries().
570 /* Check if the ISA bus on the system is under
573 isa_dn = isa_dn_orig = of_find_node_by_type(NULL, "isa");
575 while (isa_dn && isa_dn != dn)
576 isa_dn = isa_dn->parent;
579 of_node_put(isa_dn_orig);
581 /* Count number of direct PCI children of the PHB. */
582 for (children = 0, tmp = dn->child; tmp; tmp = tmp->sibling)
585 pr_debug("Children: %d\n", children);
587 /* Calculate amount of DMA window per slot. Each window must be
588 * a power of two (due to pci_alloc_consistent requirements).
590 * Keep 256MB aside for PHBs with ISA.
594 /* No ISA/IDE - just set window size and return */
595 pci->phb->dma_window_size = 0x80000000ul; /* To be divided */
597 while (pci->phb->dma_window_size * children > 0x80000000ul)
598 pci->phb->dma_window_size >>= 1;
599 pr_debug("No ISA/IDE, window size is 0x%llx\n",
600 pci->phb->dma_window_size);
601 pci->phb->dma_window_base_cur = 0;
606 /* If we have ISA, then we probably have an IDE
607 * controller too. Allocate a 128MB table but
608 * skip the first 128MB to avoid stepping on ISA
611 pci->phb->dma_window_size = 0x8000000ul;
612 pci->phb->dma_window_base_cur = 0x8000000ul;
614 tbl = kzalloc_node(sizeof(struct iommu_table), GFP_KERNEL,
617 iommu_table_setparms(pci->phb, dn, tbl);
618 pci->iommu_table = iommu_init_table(tbl, pci->phb->node);
619 iommu_register_group(tbl, pci_domain_nr(bus), 0);
621 /* Divide the rest (1.75GB) among the children */
622 pci->phb->dma_window_size = 0x80000000ul;
623 while (pci->phb->dma_window_size * children > 0x70000000ul)
624 pci->phb->dma_window_size >>= 1;
626 pr_debug("ISA/IDE, window size is 0x%llx\n", pci->phb->dma_window_size);
630 static void pci_dma_bus_setup_pSeriesLP(struct pci_bus *bus)
632 struct iommu_table *tbl;
633 struct device_node *dn, *pdn;
635 const __be32 *dma_window = NULL;
637 dn = pci_bus_to_OF_node(bus);
639 pr_debug("pci_dma_bus_setup_pSeriesLP: setting up bus %s\n",
642 /* Find nearest ibm,dma-window, walking up the device tree */
643 for (pdn = dn; pdn != NULL; pdn = pdn->parent) {
644 dma_window = of_get_property(pdn, "ibm,dma-window", NULL);
645 if (dma_window != NULL)
649 if (dma_window == NULL) {
650 pr_debug(" no ibm,dma-window property !\n");
656 pr_debug(" parent is %s, iommu_table: 0x%p\n",
657 pdn->full_name, ppci->iommu_table);
659 if (!ppci->iommu_table) {
660 tbl = kzalloc_node(sizeof(struct iommu_table), GFP_KERNEL,
662 iommu_table_setparms_lpar(ppci->phb, pdn, tbl, dma_window);
663 ppci->iommu_table = iommu_init_table(tbl, ppci->phb->node);
664 iommu_register_group(tbl, pci_domain_nr(bus), 0);
665 pr_debug(" created table: %p\n", ppci->iommu_table);
670 static void pci_dma_dev_setup_pSeries(struct pci_dev *dev)
672 struct device_node *dn;
673 struct iommu_table *tbl;
675 pr_debug("pci_dma_dev_setup_pSeries: %s\n", pci_name(dev));
677 dn = dev->dev.of_node;
679 /* If we're the direct child of a root bus, then we need to allocate
680 * an iommu table ourselves. The bus setup code should have setup
681 * the window sizes already.
683 if (!dev->bus->self) {
684 struct pci_controller *phb = PCI_DN(dn)->phb;
686 pr_debug(" --> first child, no bridge. Allocating iommu table.\n");
687 tbl = kzalloc_node(sizeof(struct iommu_table), GFP_KERNEL,
689 iommu_table_setparms(phb, dn, tbl);
690 PCI_DN(dn)->iommu_table = iommu_init_table(tbl, phb->node);
691 iommu_register_group(tbl, pci_domain_nr(phb->bus), 0);
692 set_iommu_table_base_and_group(&dev->dev,
693 PCI_DN(dn)->iommu_table);
697 /* If this device is further down the bus tree, search upwards until
698 * an already allocated iommu table is found and use that.
701 while (dn && PCI_DN(dn) && PCI_DN(dn)->iommu_table == NULL)
704 if (dn && PCI_DN(dn))
705 set_iommu_table_base_and_group(&dev->dev,
706 PCI_DN(dn)->iommu_table);
708 printk(KERN_WARNING "iommu: Device %s has no iommu table\n",
712 static int __read_mostly disable_ddw;
714 static int __init disable_ddw_setup(char *str)
717 printk(KERN_INFO "ppc iommu: disabling ddw.\n");
722 early_param("disable_ddw", disable_ddw_setup);
724 static void remove_ddw(struct device_node *np, bool remove_prop)
726 struct dynamic_dma_window_prop *dwp;
727 struct property *win64;
728 const u32 *ddw_avail;
732 ddw_avail = of_get_property(np, "ibm,ddw-applicable", &len);
733 win64 = of_find_property(np, DIRECT64_PROPNAME, NULL);
737 if (!ddw_avail || len < 3 * sizeof(u32) || win64->length < sizeof(*dwp))
741 liobn = (u64)be32_to_cpu(dwp->liobn);
743 /* clear the whole window, note the arg is in kernel pages */
744 ret = tce_clearrange_multi_pSeriesLP(0,
745 1ULL << (be32_to_cpu(dwp->window_shift) - PAGE_SHIFT), dwp);
747 pr_warning("%s failed to clear tces in window.\n",
750 pr_debug("%s successfully cleared tces in window.\n",
753 ret = rtas_call(ddw_avail[2], 1, 1, NULL, liobn);
755 pr_warning("%s: failed to remove direct window: rtas returned "
756 "%d to ibm,remove-pe-dma-window(%x) %llx\n",
757 np->full_name, ret, ddw_avail[2], liobn);
759 pr_debug("%s: successfully removed direct window: rtas returned "
760 "%d to ibm,remove-pe-dma-window(%x) %llx\n",
761 np->full_name, ret, ddw_avail[2], liobn);
765 ret = of_remove_property(np, win64);
767 pr_warning("%s: failed to remove direct window property: %d\n",
771 static u64 find_existing_ddw(struct device_node *pdn)
773 struct direct_window *window;
774 const struct dynamic_dma_window_prop *direct64;
777 spin_lock(&direct_window_list_lock);
778 /* check if we already created a window and dupe that config if so */
779 list_for_each_entry(window, &direct_window_list, list) {
780 if (window->device == pdn) {
781 direct64 = window->prop;
782 dma_addr = be64_to_cpu(direct64->dma_base);
786 spin_unlock(&direct_window_list_lock);
791 static int find_existing_ddw_windows(void)
794 struct device_node *pdn;
795 struct direct_window *window;
796 const struct dynamic_dma_window_prop *direct64;
798 if (!firmware_has_feature(FW_FEATURE_LPAR))
801 for_each_node_with_property(pdn, DIRECT64_PROPNAME) {
802 direct64 = of_get_property(pdn, DIRECT64_PROPNAME, &len);
806 window = kzalloc(sizeof(*window), GFP_KERNEL);
807 if (!window || len < sizeof(struct dynamic_dma_window_prop)) {
809 remove_ddw(pdn, true);
813 window->device = pdn;
814 window->prop = direct64;
815 spin_lock(&direct_window_list_lock);
816 list_add(&window->list, &direct_window_list);
817 spin_unlock(&direct_window_list_lock);
822 machine_arch_initcall(pseries, find_existing_ddw_windows);
824 static int query_ddw(struct pci_dev *dev, const u32 *ddw_avail,
825 struct ddw_query_response *query)
827 struct eeh_dev *edev;
833 * Get the config address and phb buid of the PE window.
834 * Rely on eeh to retrieve this for us.
835 * Retrieve them from the pci device, not the node with the
836 * dma-window property
838 edev = pci_dev_to_eeh_dev(dev);
839 cfg_addr = edev->config_addr;
840 if (edev->pe_config_addr)
841 cfg_addr = edev->pe_config_addr;
842 buid = edev->phb->buid;
844 ret = rtas_call(ddw_avail[0], 3, 5, (u32 *)query,
845 cfg_addr, BUID_HI(buid), BUID_LO(buid));
846 dev_info(&dev->dev, "ibm,query-pe-dma-windows(%x) %x %x %x"
847 " returned %d\n", ddw_avail[0], cfg_addr, BUID_HI(buid),
852 static int create_ddw(struct pci_dev *dev, const u32 *ddw_avail,
853 struct ddw_create_response *create, int page_shift,
856 struct eeh_dev *edev;
862 * Get the config address and phb buid of the PE window.
863 * Rely on eeh to retrieve this for us.
864 * Retrieve them from the pci device, not the node with the
865 * dma-window property
867 edev = pci_dev_to_eeh_dev(dev);
868 cfg_addr = edev->config_addr;
869 if (edev->pe_config_addr)
870 cfg_addr = edev->pe_config_addr;
871 buid = edev->phb->buid;
874 /* extra outputs are LIOBN and dma-addr (hi, lo) */
875 ret = rtas_call(ddw_avail[1], 5, 4, (u32 *)create, cfg_addr,
876 BUID_HI(buid), BUID_LO(buid), page_shift, window_shift);
877 } while (rtas_busy_delay(ret));
879 "ibm,create-pe-dma-window(%x) %x %x %x %x %x returned %d "
880 "(liobn = 0x%x starting addr = %x %x)\n", ddw_avail[1],
881 cfg_addr, BUID_HI(buid), BUID_LO(buid), page_shift,
882 window_shift, ret, create->liobn, create->addr_hi, create->addr_lo);
887 struct failed_ddw_pdn {
888 struct device_node *pdn;
889 struct list_head list;
892 static LIST_HEAD(failed_ddw_pdn_list);
895 * If the PE supports dynamic dma windows, and there is space for a table
896 * that can map all pages in a linear offset, then setup such a table,
897 * and record the dma-offset in the struct device.
899 * dev: the pci device we are checking
900 * pdn: the parent pe node with the ibm,dma_window property
901 * Future: also check if we can remap the base window for our base page size
903 * returns the dma offset for use by dma_set_mask
905 static u64 enable_ddw(struct pci_dev *dev, struct device_node *pdn)
908 struct ddw_query_response query;
909 struct ddw_create_response create;
911 u64 dma_addr, max_addr;
912 struct device_node *dn;
913 const u32 *uninitialized_var(ddw_avail);
914 struct direct_window *window;
915 struct property *win64;
916 struct dynamic_dma_window_prop *ddwprop;
917 struct failed_ddw_pdn *fpdn;
919 mutex_lock(&direct_window_init_mutex);
921 dma_addr = find_existing_ddw(pdn);
926 * If we already went through this for a previous function of
927 * the same device and failed, we don't want to muck with the
928 * DMA window again, as it will race with in-flight operations
929 * and can lead to EEHs. The above mutex protects access to the
932 list_for_each_entry(fpdn, &failed_ddw_pdn_list, list) {
933 if (!strcmp(fpdn->pdn->full_name, pdn->full_name))
938 * the ibm,ddw-applicable property holds the tokens for:
939 * ibm,query-pe-dma-window
940 * ibm,create-pe-dma-window
941 * ibm,remove-pe-dma-window
942 * for the given node in that order.
943 * the property is actually in the parent, not the PE
945 ddw_avail = of_get_property(pdn, "ibm,ddw-applicable", &len);
946 if (!ddw_avail || len < 3 * sizeof(u32))
950 * Query if there is a second window of size to map the
951 * whole partition. Query returns number of windows, largest
952 * block assigned to PE (partition endpoint), and two bitmasks
953 * of page sizes: supported and supported for migrate-dma.
955 dn = pci_device_to_OF_node(dev);
956 ret = query_ddw(dev, ddw_avail, &query);
960 if (query.windows_available == 0) {
962 * no additional windows are available for this device.
963 * We might be able to reallocate the existing window,
964 * trading in for a larger page size.
966 dev_dbg(&dev->dev, "no free dynamic windows");
969 if (be32_to_cpu(query.page_size) & 4) {
970 page_shift = 24; /* 16MB */
971 } else if (be32_to_cpu(query.page_size) & 2) {
972 page_shift = 16; /* 64kB */
973 } else if (be32_to_cpu(query.page_size) & 1) {
974 page_shift = 12; /* 4kB */
976 dev_dbg(&dev->dev, "no supported direct page size in mask %x",
980 /* verify the window * number of ptes will map the partition */
981 /* check largest block * page size > max memory hotplug addr */
982 max_addr = memory_hotplug_max();
983 if (be32_to_cpu(query.largest_available_block) < (max_addr >> page_shift)) {
984 dev_dbg(&dev->dev, "can't map partiton max 0x%llx with %u "
985 "%llu-sized pages\n", max_addr, query.largest_available_block,
989 len = order_base_2(max_addr);
990 win64 = kzalloc(sizeof(struct property), GFP_KERNEL);
993 "couldn't allocate property for 64bit dma window\n");
996 win64->name = kstrdup(DIRECT64_PROPNAME, GFP_KERNEL);
997 win64->value = ddwprop = kmalloc(sizeof(*ddwprop), GFP_KERNEL);
998 win64->length = sizeof(*ddwprop);
999 if (!win64->name || !win64->value) {
1001 "couldn't allocate property name and value\n");
1005 ret = create_ddw(dev, ddw_avail, &create, page_shift, len);
1009 ddwprop->liobn = create.liobn;
1010 ddwprop->dma_base = cpu_to_be64(of_read_number(&create.addr_hi, 2));
1011 ddwprop->tce_shift = cpu_to_be32(page_shift);
1012 ddwprop->window_shift = cpu_to_be32(len);
1014 dev_dbg(&dev->dev, "created tce table LIOBN 0x%x for %s\n",
1015 create.liobn, dn->full_name);
1017 window = kzalloc(sizeof(*window), GFP_KERNEL);
1019 goto out_clear_window;
1021 ret = walk_system_ram_range(0, memblock_end_of_DRAM() >> PAGE_SHIFT,
1022 win64->value, tce_setrange_multi_pSeriesLP_walk);
1024 dev_info(&dev->dev, "failed to map direct window for %s: %d\n",
1025 dn->full_name, ret);
1026 goto out_free_window;
1029 ret = of_add_property(pdn, win64);
1031 dev_err(&dev->dev, "unable to add dma window property for %s: %d",
1032 pdn->full_name, ret);
1033 goto out_free_window;
1036 window->device = pdn;
1037 window->prop = ddwprop;
1038 spin_lock(&direct_window_list_lock);
1039 list_add(&window->list, &direct_window_list);
1040 spin_unlock(&direct_window_list_lock);
1042 dma_addr = of_read_number(&create.addr_hi, 2);
1049 remove_ddw(pdn, true);
1053 kfree(win64->value);
1058 fpdn = kzalloc(sizeof(*fpdn), GFP_KERNEL);
1062 list_add(&fpdn->list, &failed_ddw_pdn_list);
1065 mutex_unlock(&direct_window_init_mutex);
1069 static void pci_dma_dev_setup_pSeriesLP(struct pci_dev *dev)
1071 struct device_node *pdn, *dn;
1072 struct iommu_table *tbl;
1073 const __be32 *dma_window = NULL;
1076 pr_debug("pci_dma_dev_setup_pSeriesLP: %s\n", pci_name(dev));
1078 /* dev setup for LPAR is a little tricky, since the device tree might
1079 * contain the dma-window properties per-device and not necessarily
1080 * for the bus. So we need to search upwards in the tree until we
1081 * either hit a dma-window property, OR find a parent with a table
1082 * already allocated.
1084 dn = pci_device_to_OF_node(dev);
1085 pr_debug(" node is %s\n", dn->full_name);
1087 for (pdn = dn; pdn && PCI_DN(pdn) && !PCI_DN(pdn)->iommu_table;
1088 pdn = pdn->parent) {
1089 dma_window = of_get_property(pdn, "ibm,dma-window", NULL);
1094 if (!pdn || !PCI_DN(pdn)) {
1095 printk(KERN_WARNING "pci_dma_dev_setup_pSeriesLP: "
1096 "no DMA window found for pci dev=%s dn=%s\n",
1097 pci_name(dev), of_node_full_name(dn));
1100 pr_debug(" parent is %s\n", pdn->full_name);
1103 if (!pci->iommu_table) {
1104 tbl = kzalloc_node(sizeof(struct iommu_table), GFP_KERNEL,
1106 iommu_table_setparms_lpar(pci->phb, pdn, tbl, dma_window);
1107 pci->iommu_table = iommu_init_table(tbl, pci->phb->node);
1108 iommu_register_group(tbl, pci_domain_nr(pci->phb->bus), 0);
1109 pr_debug(" created table: %p\n", pci->iommu_table);
1111 pr_debug(" found DMA window, table: %p\n", pci->iommu_table);
1114 set_iommu_table_base_and_group(&dev->dev, pci->iommu_table);
1117 static int dma_set_mask_pSeriesLP(struct device *dev, u64 dma_mask)
1119 bool ddw_enabled = false;
1120 struct device_node *pdn, *dn;
1121 struct pci_dev *pdev;
1122 const __be32 *dma_window = NULL;
1128 if (!dev_is_pci(dev))
1131 pdev = to_pci_dev(dev);
1133 /* only attempt to use a new window if 64-bit DMA is requested */
1134 if (!disable_ddw && dma_mask == DMA_BIT_MASK(64)) {
1135 dn = pci_device_to_OF_node(pdev);
1136 dev_dbg(dev, "node is %s\n", dn->full_name);
1139 * the device tree might contain the dma-window properties
1140 * per-device and not necessarily for the bus. So we need to
1141 * search upwards in the tree until we either hit a dma-window
1142 * property, OR find a parent with a table already allocated.
1144 for (pdn = dn; pdn && PCI_DN(pdn) && !PCI_DN(pdn)->iommu_table;
1145 pdn = pdn->parent) {
1146 dma_window = of_get_property(pdn, "ibm,dma-window", NULL);
1150 if (pdn && PCI_DN(pdn)) {
1151 dma_offset = enable_ddw(pdev, pdn);
1152 if (dma_offset != 0) {
1153 dev_info(dev, "Using 64-bit direct DMA at offset %llx\n", dma_offset);
1154 set_dma_offset(dev, dma_offset);
1155 set_dma_ops(dev, &dma_direct_ops);
1161 /* fall back on iommu ops, restore table pointer with ops */
1162 if (!ddw_enabled && get_dma_ops(dev) != &dma_iommu_ops) {
1163 dev_info(dev, "Restoring 32-bit DMA via iommu\n");
1164 set_dma_ops(dev, &dma_iommu_ops);
1165 pci_dma_dev_setup_pSeriesLP(pdev);
1169 if (!dma_supported(dev, dma_mask))
1172 *dev->dma_mask = dma_mask;
1176 static u64 dma_get_required_mask_pSeriesLP(struct device *dev)
1181 if (!disable_ddw && dev_is_pci(dev)) {
1182 struct pci_dev *pdev = to_pci_dev(dev);
1183 struct device_node *dn;
1185 dn = pci_device_to_OF_node(pdev);
1187 /* search upwards for ibm,dma-window */
1188 for (; dn && PCI_DN(dn) && !PCI_DN(dn)->iommu_table;
1190 if (of_get_property(dn, "ibm,dma-window", NULL))
1192 /* if there is a ibm,ddw-applicable property require 64 bits */
1193 if (dn && PCI_DN(dn) &&
1194 of_get_property(dn, "ibm,ddw-applicable", NULL))
1195 return DMA_BIT_MASK(64);
1198 return dma_iommu_ops.get_required_mask(dev);
1201 #else /* CONFIG_PCI */
1202 #define pci_dma_bus_setup_pSeries NULL
1203 #define pci_dma_dev_setup_pSeries NULL
1204 #define pci_dma_bus_setup_pSeriesLP NULL
1205 #define pci_dma_dev_setup_pSeriesLP NULL
1206 #define dma_set_mask_pSeriesLP NULL
1207 #define dma_get_required_mask_pSeriesLP NULL
1208 #endif /* !CONFIG_PCI */
1210 static int iommu_mem_notifier(struct notifier_block *nb, unsigned long action,
1213 struct direct_window *window;
1214 struct memory_notify *arg = data;
1218 case MEM_GOING_ONLINE:
1219 spin_lock(&direct_window_list_lock);
1220 list_for_each_entry(window, &direct_window_list, list) {
1221 ret |= tce_setrange_multi_pSeriesLP(arg->start_pfn,
1222 arg->nr_pages, window->prop);
1225 spin_unlock(&direct_window_list_lock);
1227 case MEM_CANCEL_ONLINE:
1229 spin_lock(&direct_window_list_lock);
1230 list_for_each_entry(window, &direct_window_list, list) {
1231 ret |= tce_clearrange_multi_pSeriesLP(arg->start_pfn,
1232 arg->nr_pages, window->prop);
1235 spin_unlock(&direct_window_list_lock);
1240 if (ret && action != MEM_CANCEL_ONLINE)
1246 static struct notifier_block iommu_mem_nb = {
1247 .notifier_call = iommu_mem_notifier,
1250 static int iommu_reconfig_notifier(struct notifier_block *nb, unsigned long action, void *node)
1252 int err = NOTIFY_OK;
1253 struct device_node *np = node;
1254 struct pci_dn *pci = PCI_DN(np);
1255 struct direct_window *window;
1258 case OF_RECONFIG_DETACH_NODE:
1260 * Removing the property will invoke the reconfig
1261 * notifier again, which causes dead-lock on the
1262 * read-write semaphore of the notifier chain. So
1263 * we have to remove the property when releasing
1266 remove_ddw(np, false);
1267 if (pci && pci->iommu_table)
1268 iommu_free_table(pci->iommu_table, np->full_name);
1270 spin_lock(&direct_window_list_lock);
1271 list_for_each_entry(window, &direct_window_list, list) {
1272 if (window->device == np) {
1273 list_del(&window->list);
1278 spin_unlock(&direct_window_list_lock);
1287 static struct notifier_block iommu_reconfig_nb = {
1288 .notifier_call = iommu_reconfig_notifier,
1291 /* These are called very early. */
1292 void iommu_init_early_pSeries(void)
1294 if (of_chosen && of_get_property(of_chosen, "linux,iommu-off", NULL))
1297 if (firmware_has_feature(FW_FEATURE_LPAR)) {
1298 if (firmware_has_feature(FW_FEATURE_MULTITCE)) {
1299 ppc_md.tce_build = tce_buildmulti_pSeriesLP;
1300 ppc_md.tce_free = tce_freemulti_pSeriesLP;
1302 ppc_md.tce_build = tce_build_pSeriesLP;
1303 ppc_md.tce_free = tce_free_pSeriesLP;
1305 ppc_md.tce_get = tce_get_pSeriesLP;
1306 ppc_md.pci_dma_bus_setup = pci_dma_bus_setup_pSeriesLP;
1307 ppc_md.pci_dma_dev_setup = pci_dma_dev_setup_pSeriesLP;
1308 ppc_md.dma_set_mask = dma_set_mask_pSeriesLP;
1309 ppc_md.dma_get_required_mask = dma_get_required_mask_pSeriesLP;
1311 ppc_md.tce_build = tce_build_pSeries;
1312 ppc_md.tce_free = tce_free_pSeries;
1313 ppc_md.tce_get = tce_get_pseries;
1314 ppc_md.pci_dma_bus_setup = pci_dma_bus_setup_pSeries;
1315 ppc_md.pci_dma_dev_setup = pci_dma_dev_setup_pSeries;
1319 of_reconfig_notifier_register(&iommu_reconfig_nb);
1320 register_memory_notifier(&iommu_mem_nb);
1322 set_pci_dma_ops(&dma_iommu_ops);
1325 static int __init disable_multitce(char *str)
1327 if (strcmp(str, "off") == 0 &&
1328 firmware_has_feature(FW_FEATURE_LPAR) &&
1329 firmware_has_feature(FW_FEATURE_MULTITCE)) {
1330 printk(KERN_INFO "Disabling MULTITCE firmware feature\n");
1331 ppc_md.tce_build = tce_build_pSeriesLP;
1332 ppc_md.tce_free = tce_free_pSeriesLP;
1333 powerpc_firmware_features &= ~FW_FEATURE_MULTITCE;
1338 __setup("multitce=", disable_multitce);