2 * Copyright (C) 2007-2010 Advanced Micro Devices, Inc.
3 * Author: Joerg Roedel <jroedel@suse.de>
4 * Leo Duran <leo.duran@amd.com>
6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms of the GNU General Public License version 2 as published
8 * by the Free Software Foundation.
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
15 * You should have received a copy of the GNU General Public License
16 * along with this program; if not, write to the Free Software
17 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
20 #include <linux/pci.h>
21 #include <linux/acpi.h>
22 #include <linux/list.h>
23 #include <linux/slab.h>
24 #include <linux/syscore_ops.h>
25 #include <linux/interrupt.h>
26 #include <linux/msi.h>
27 #include <linux/amd-iommu.h>
28 #include <linux/export.h>
29 #include <linux/iommu.h>
30 #include <asm/pci-direct.h>
31 #include <asm/iommu.h>
33 #include <asm/x86_init.h>
34 #include <asm/iommu_table.h>
35 #include <asm/io_apic.h>
36 #include <asm/irq_remapping.h>
38 #include "amd_iommu_proto.h"
39 #include "amd_iommu_types.h"
40 #include "irq_remapping.h"
43 * definitions for the ACPI scanning code
45 #define IVRS_HEADER_LENGTH 48
47 #define ACPI_IVHD_TYPE 0x10
48 #define ACPI_IVMD_TYPE_ALL 0x20
49 #define ACPI_IVMD_TYPE 0x21
50 #define ACPI_IVMD_TYPE_RANGE 0x22
52 #define IVHD_DEV_ALL 0x01
53 #define IVHD_DEV_SELECT 0x02
54 #define IVHD_DEV_SELECT_RANGE_START 0x03
55 #define IVHD_DEV_RANGE_END 0x04
56 #define IVHD_DEV_ALIAS 0x42
57 #define IVHD_DEV_ALIAS_RANGE 0x43
58 #define IVHD_DEV_EXT_SELECT 0x46
59 #define IVHD_DEV_EXT_SELECT_RANGE 0x47
60 #define IVHD_DEV_SPECIAL 0x48
62 #define IVHD_SPECIAL_IOAPIC 1
63 #define IVHD_SPECIAL_HPET 2
65 #define IVHD_FLAG_HT_TUN_EN_MASK 0x01
66 #define IVHD_FLAG_PASSPW_EN_MASK 0x02
67 #define IVHD_FLAG_RESPASSPW_EN_MASK 0x04
68 #define IVHD_FLAG_ISOC_EN_MASK 0x08
70 #define IVMD_FLAG_EXCL_RANGE 0x08
71 #define IVMD_FLAG_UNITY_MAP 0x01
73 #define ACPI_DEVFLAG_INITPASS 0x01
74 #define ACPI_DEVFLAG_EXTINT 0x02
75 #define ACPI_DEVFLAG_NMI 0x04
76 #define ACPI_DEVFLAG_SYSMGT1 0x10
77 #define ACPI_DEVFLAG_SYSMGT2 0x20
78 #define ACPI_DEVFLAG_LINT0 0x40
79 #define ACPI_DEVFLAG_LINT1 0x80
80 #define ACPI_DEVFLAG_ATSDIS 0x10000000
83 * ACPI table definitions
85 * These data structures are laid over the table to parse the important values
90 * structure describing one IOMMU in the ACPI table. Typically followed by one
91 * or more ivhd_entrys.
103 } __attribute__((packed));
106 * A device entry describing which devices a specific IOMMU translates and
107 * which requestor ids they use.
114 } __attribute__((packed));
117 * An AMD IOMMU memory definition structure. It defines things like exclusion
118 * ranges for devices and regions that should be unity mapped.
129 } __attribute__((packed));
132 bool amd_iommu_irq_remap __read_mostly;
134 static bool amd_iommu_detected;
135 static bool __initdata amd_iommu_disabled;
137 u16 amd_iommu_last_bdf; /* largest PCI device id we have
139 LIST_HEAD(amd_iommu_unity_map); /* a list of required unity mappings
141 bool amd_iommu_unmap_flush; /* if true, flush on every unmap */
143 LIST_HEAD(amd_iommu_list); /* list of all AMD IOMMUs in the
146 /* Array to assign indices to IOMMUs*/
147 struct amd_iommu *amd_iommus[MAX_IOMMUS];
148 int amd_iommus_present;
150 /* IOMMUs have a non-present cache? */
151 bool amd_iommu_np_cache __read_mostly;
152 bool amd_iommu_iotlb_sup __read_mostly = true;
154 u32 amd_iommu_max_pasid __read_mostly = ~0;
156 bool amd_iommu_v2_present __read_mostly;
157 static bool amd_iommu_pc_present __read_mostly;
159 bool amd_iommu_force_isolation __read_mostly;
162 * List of protection domains - used during resume
164 LIST_HEAD(amd_iommu_pd_list);
165 spinlock_t amd_iommu_pd_lock;
168 * Pointer to the device table which is shared by all AMD IOMMUs
169 * it is indexed by the PCI device id or the HT unit id and contains
170 * information about the domain the device belongs to as well as the
171 * page table root pointer.
173 struct dev_table_entry *amd_iommu_dev_table;
176 * The alias table is a driver specific data structure which contains the
177 * mappings of the PCI device ids to the actual requestor ids on the IOMMU.
178 * More than one device can share the same requestor id.
180 u16 *amd_iommu_alias_table;
183 * The rlookup table is used to find the IOMMU which is responsible
184 * for a specific device. It is also indexed by the PCI device id.
186 struct amd_iommu **amd_iommu_rlookup_table;
189 * This table is used to find the irq remapping table for a given device id
192 struct irq_remap_table **irq_lookup_table;
195 * AMD IOMMU allows up to 2^16 different protection domains. This is a bitmap
196 * to know which ones are already in use.
198 unsigned long *amd_iommu_pd_alloc_bitmap;
200 static u32 dev_table_size; /* size of the device table */
201 static u32 alias_table_size; /* size of the alias table */
202 static u32 rlookup_table_size; /* size if the rlookup table */
204 enum iommu_init_state {
217 /* Early ioapic and hpet maps from kernel command line */
218 #define EARLY_MAP_SIZE 4
219 static struct devid_map __initdata early_ioapic_map[EARLY_MAP_SIZE];
220 static struct devid_map __initdata early_hpet_map[EARLY_MAP_SIZE];
221 static int __initdata early_ioapic_map_size;
222 static int __initdata early_hpet_map_size;
223 static bool __initdata cmdline_maps;
225 static enum iommu_init_state init_state = IOMMU_START_STATE;
227 static int amd_iommu_enable_interrupts(void);
228 static int __init iommu_go_to_state(enum iommu_init_state state);
229 static void init_device_table_dma(void);
231 static inline void update_last_devid(u16 devid)
233 if (devid > amd_iommu_last_bdf)
234 amd_iommu_last_bdf = devid;
237 static inline unsigned long tbl_size(int entry_size)
239 unsigned shift = PAGE_SHIFT +
240 get_order(((int)amd_iommu_last_bdf + 1) * entry_size);
245 /* Access to l1 and l2 indexed register spaces */
247 static u32 iommu_read_l1(struct amd_iommu *iommu, u16 l1, u8 address)
251 pci_write_config_dword(iommu->dev, 0xf8, (address | l1 << 16));
252 pci_read_config_dword(iommu->dev, 0xfc, &val);
256 static void iommu_write_l1(struct amd_iommu *iommu, u16 l1, u8 address, u32 val)
258 pci_write_config_dword(iommu->dev, 0xf8, (address | l1 << 16 | 1 << 31));
259 pci_write_config_dword(iommu->dev, 0xfc, val);
260 pci_write_config_dword(iommu->dev, 0xf8, (address | l1 << 16));
263 static u32 iommu_read_l2(struct amd_iommu *iommu, u8 address)
267 pci_write_config_dword(iommu->dev, 0xf0, address);
268 pci_read_config_dword(iommu->dev, 0xf4, &val);
272 static void iommu_write_l2(struct amd_iommu *iommu, u8 address, u32 val)
274 pci_write_config_dword(iommu->dev, 0xf0, (address | 1 << 8));
275 pci_write_config_dword(iommu->dev, 0xf4, val);
278 /****************************************************************************
280 * AMD IOMMU MMIO register space handling functions
282 * These functions are used to program the IOMMU device registers in
283 * MMIO space required for that driver.
285 ****************************************************************************/
288 * This function set the exclusion range in the IOMMU. DMA accesses to the
289 * exclusion range are passed through untranslated
291 static void iommu_set_exclusion_range(struct amd_iommu *iommu)
293 u64 start = iommu->exclusion_start & PAGE_MASK;
294 u64 limit = (start + iommu->exclusion_length) & PAGE_MASK;
297 if (!iommu->exclusion_start)
300 entry = start | MMIO_EXCL_ENABLE_MASK;
301 memcpy_toio(iommu->mmio_base + MMIO_EXCL_BASE_OFFSET,
302 &entry, sizeof(entry));
305 memcpy_toio(iommu->mmio_base + MMIO_EXCL_LIMIT_OFFSET,
306 &entry, sizeof(entry));
309 /* Programs the physical address of the device table into the IOMMU hardware */
310 static void iommu_set_device_table(struct amd_iommu *iommu)
314 BUG_ON(iommu->mmio_base == NULL);
316 entry = virt_to_phys(amd_iommu_dev_table);
317 entry |= (dev_table_size >> 12) - 1;
318 memcpy_toio(iommu->mmio_base + MMIO_DEV_TABLE_OFFSET,
319 &entry, sizeof(entry));
322 /* Generic functions to enable/disable certain features of the IOMMU. */
323 static void iommu_feature_enable(struct amd_iommu *iommu, u8 bit)
327 ctrl = readl(iommu->mmio_base + MMIO_CONTROL_OFFSET);
329 writel(ctrl, iommu->mmio_base + MMIO_CONTROL_OFFSET);
332 static void iommu_feature_disable(struct amd_iommu *iommu, u8 bit)
336 ctrl = readl(iommu->mmio_base + MMIO_CONTROL_OFFSET);
338 writel(ctrl, iommu->mmio_base + MMIO_CONTROL_OFFSET);
341 static void iommu_set_inv_tlb_timeout(struct amd_iommu *iommu, int timeout)
345 ctrl = readl(iommu->mmio_base + MMIO_CONTROL_OFFSET);
346 ctrl &= ~CTRL_INV_TO_MASK;
347 ctrl |= (timeout << CONTROL_INV_TIMEOUT) & CTRL_INV_TO_MASK;
348 writel(ctrl, iommu->mmio_base + MMIO_CONTROL_OFFSET);
351 /* Function to enable the hardware */
352 static void iommu_enable(struct amd_iommu *iommu)
354 iommu_feature_enable(iommu, CONTROL_IOMMU_EN);
357 static void iommu_disable(struct amd_iommu *iommu)
359 /* Disable command buffer */
360 iommu_feature_disable(iommu, CONTROL_CMDBUF_EN);
362 /* Disable event logging and event interrupts */
363 iommu_feature_disable(iommu, CONTROL_EVT_INT_EN);
364 iommu_feature_disable(iommu, CONTROL_EVT_LOG_EN);
366 /* Disable IOMMU hardware itself */
367 iommu_feature_disable(iommu, CONTROL_IOMMU_EN);
371 * mapping and unmapping functions for the IOMMU MMIO space. Each AMD IOMMU in
372 * the system has one.
374 static u8 __iomem * __init iommu_map_mmio_space(u64 address, u64 end)
376 if (!request_mem_region(address, end, "amd_iommu")) {
377 pr_err("AMD-Vi: Can not reserve memory region %llx-%llx for mmio\n",
379 pr_err("AMD-Vi: This is a BIOS bug. Please contact your hardware vendor\n");
383 return (u8 __iomem *)ioremap_nocache(address, end);
386 static void __init iommu_unmap_mmio_space(struct amd_iommu *iommu)
388 if (iommu->mmio_base)
389 iounmap(iommu->mmio_base);
390 release_mem_region(iommu->mmio_phys, iommu->mmio_phys_end);
393 /****************************************************************************
395 * The functions below belong to the first pass of AMD IOMMU ACPI table
396 * parsing. In this pass we try to find out the highest device id this
397 * code has to handle. Upon this information the size of the shared data
398 * structures is determined later.
400 ****************************************************************************/
403 * This function calculates the length of a given IVHD entry
405 static inline int ivhd_entry_length(u8 *ivhd)
407 return 0x04 << (*ivhd >> 6);
411 * After reading the highest device id from the IOMMU PCI capability header
412 * this function looks if there is a higher device id defined in the ACPI table
414 static int __init find_last_devid_from_ivhd(struct ivhd_header *h)
416 u8 *p = (void *)h, *end = (void *)h;
417 struct ivhd_entry *dev;
423 dev = (struct ivhd_entry *)p;
426 /* Use maximum BDF value for DEV_ALL */
427 update_last_devid(0xffff);
429 case IVHD_DEV_SELECT:
430 case IVHD_DEV_RANGE_END:
432 case IVHD_DEV_EXT_SELECT:
433 /* all the above subfield types refer to device ids */
434 update_last_devid(dev->devid);
439 p += ivhd_entry_length(p);
448 * Iterate over all IVHD entries in the ACPI table and find the highest device
449 * id which we need to handle. This is the first of three functions which parse
450 * the ACPI table. So we check the checksum here.
452 static int __init find_last_devid_acpi(struct acpi_table_header *table)
455 u8 checksum = 0, *p = (u8 *)table, *end = (u8 *)table;
456 struct ivhd_header *h;
459 * Validate checksum here so we don't need to do it when
460 * we actually parse the table
462 for (i = 0; i < table->length; ++i)
465 /* ACPI table corrupt */
468 p += IVRS_HEADER_LENGTH;
470 end += table->length;
472 h = (struct ivhd_header *)p;
475 find_last_devid_from_ivhd(h);
487 /****************************************************************************
489 * The following functions belong to the code path which parses the ACPI table
490 * the second time. In this ACPI parsing iteration we allocate IOMMU specific
491 * data structures, initialize the device/alias/rlookup table and also
492 * basically initialize the hardware.
494 ****************************************************************************/
497 * Allocates the command buffer. This buffer is per AMD IOMMU. We can
498 * write commands to that buffer later and the IOMMU will execute them
501 static int __init alloc_command_buffer(struct amd_iommu *iommu)
503 iommu->cmd_buf = (void *)__get_free_pages(GFP_KERNEL | __GFP_ZERO,
504 get_order(CMD_BUFFER_SIZE));
506 return iommu->cmd_buf ? 0 : -ENOMEM;
510 * This function resets the command buffer if the IOMMU stopped fetching
513 void amd_iommu_reset_cmd_buffer(struct amd_iommu *iommu)
515 iommu_feature_disable(iommu, CONTROL_CMDBUF_EN);
517 writel(0x00, iommu->mmio_base + MMIO_CMD_HEAD_OFFSET);
518 writel(0x00, iommu->mmio_base + MMIO_CMD_TAIL_OFFSET);
520 iommu_feature_enable(iommu, CONTROL_CMDBUF_EN);
524 * This function writes the command buffer address to the hardware and
527 static void iommu_enable_command_buffer(struct amd_iommu *iommu)
531 BUG_ON(iommu->cmd_buf == NULL);
533 entry = (u64)virt_to_phys(iommu->cmd_buf);
534 entry |= MMIO_CMD_SIZE_512;
536 memcpy_toio(iommu->mmio_base + MMIO_CMD_BUF_OFFSET,
537 &entry, sizeof(entry));
539 amd_iommu_reset_cmd_buffer(iommu);
542 static void __init free_command_buffer(struct amd_iommu *iommu)
544 free_pages((unsigned long)iommu->cmd_buf, get_order(CMD_BUFFER_SIZE));
547 /* allocates the memory where the IOMMU will log its events to */
548 static int __init alloc_event_buffer(struct amd_iommu *iommu)
550 iommu->evt_buf = (void *)__get_free_pages(GFP_KERNEL | __GFP_ZERO,
551 get_order(EVT_BUFFER_SIZE));
553 return iommu->evt_buf ? 0 : -ENOMEM;
556 static void iommu_enable_event_buffer(struct amd_iommu *iommu)
560 BUG_ON(iommu->evt_buf == NULL);
562 entry = (u64)virt_to_phys(iommu->evt_buf) | EVT_LEN_MASK;
564 memcpy_toio(iommu->mmio_base + MMIO_EVT_BUF_OFFSET,
565 &entry, sizeof(entry));
567 /* set head and tail to zero manually */
568 writel(0x00, iommu->mmio_base + MMIO_EVT_HEAD_OFFSET);
569 writel(0x00, iommu->mmio_base + MMIO_EVT_TAIL_OFFSET);
571 iommu_feature_enable(iommu, CONTROL_EVT_LOG_EN);
574 static void __init free_event_buffer(struct amd_iommu *iommu)
576 free_pages((unsigned long)iommu->evt_buf, get_order(EVT_BUFFER_SIZE));
579 /* allocates the memory where the IOMMU will log its events to */
580 static int __init alloc_ppr_log(struct amd_iommu *iommu)
582 iommu->ppr_log = (void *)__get_free_pages(GFP_KERNEL | __GFP_ZERO,
583 get_order(PPR_LOG_SIZE));
585 return iommu->ppr_log ? 0 : -ENOMEM;
588 static void iommu_enable_ppr_log(struct amd_iommu *iommu)
592 if (iommu->ppr_log == NULL)
595 entry = (u64)virt_to_phys(iommu->ppr_log) | PPR_LOG_SIZE_512;
597 memcpy_toio(iommu->mmio_base + MMIO_PPR_LOG_OFFSET,
598 &entry, sizeof(entry));
600 /* set head and tail to zero manually */
601 writel(0x00, iommu->mmio_base + MMIO_PPR_HEAD_OFFSET);
602 writel(0x00, iommu->mmio_base + MMIO_PPR_TAIL_OFFSET);
604 iommu_feature_enable(iommu, CONTROL_PPFLOG_EN);
605 iommu_feature_enable(iommu, CONTROL_PPR_EN);
608 static void __init free_ppr_log(struct amd_iommu *iommu)
610 if (iommu->ppr_log == NULL)
613 free_pages((unsigned long)iommu->ppr_log, get_order(PPR_LOG_SIZE));
616 static void iommu_enable_gt(struct amd_iommu *iommu)
618 if (!iommu_feature(iommu, FEATURE_GT))
621 iommu_feature_enable(iommu, CONTROL_GT_EN);
624 /* sets a specific bit in the device table entry. */
625 static void set_dev_entry_bit(u16 devid, u8 bit)
627 int i = (bit >> 6) & 0x03;
628 int _bit = bit & 0x3f;
630 amd_iommu_dev_table[devid].data[i] |= (1UL << _bit);
633 static int get_dev_entry_bit(u16 devid, u8 bit)
635 int i = (bit >> 6) & 0x03;
636 int _bit = bit & 0x3f;
638 return (amd_iommu_dev_table[devid].data[i] & (1UL << _bit)) >> _bit;
642 void amd_iommu_apply_erratum_63(u16 devid)
646 sysmgt = get_dev_entry_bit(devid, DEV_ENTRY_SYSMGT1) |
647 (get_dev_entry_bit(devid, DEV_ENTRY_SYSMGT2) << 1);
650 set_dev_entry_bit(devid, DEV_ENTRY_IW);
653 /* Writes the specific IOMMU for a device into the rlookup table */
654 static void __init set_iommu_for_device(struct amd_iommu *iommu, u16 devid)
656 amd_iommu_rlookup_table[devid] = iommu;
660 * This function takes the device specific flags read from the ACPI
661 * table and sets up the device table entry with that information
663 static void __init set_dev_entry_from_acpi(struct amd_iommu *iommu,
664 u16 devid, u32 flags, u32 ext_flags)
666 if (flags & ACPI_DEVFLAG_INITPASS)
667 set_dev_entry_bit(devid, DEV_ENTRY_INIT_PASS);
668 if (flags & ACPI_DEVFLAG_EXTINT)
669 set_dev_entry_bit(devid, DEV_ENTRY_EINT_PASS);
670 if (flags & ACPI_DEVFLAG_NMI)
671 set_dev_entry_bit(devid, DEV_ENTRY_NMI_PASS);
672 if (flags & ACPI_DEVFLAG_SYSMGT1)
673 set_dev_entry_bit(devid, DEV_ENTRY_SYSMGT1);
674 if (flags & ACPI_DEVFLAG_SYSMGT2)
675 set_dev_entry_bit(devid, DEV_ENTRY_SYSMGT2);
676 if (flags & ACPI_DEVFLAG_LINT0)
677 set_dev_entry_bit(devid, DEV_ENTRY_LINT0_PASS);
678 if (flags & ACPI_DEVFLAG_LINT1)
679 set_dev_entry_bit(devid, DEV_ENTRY_LINT1_PASS);
681 amd_iommu_apply_erratum_63(devid);
683 set_iommu_for_device(iommu, devid);
686 static int __init add_special_device(u8 type, u8 id, u16 *devid, bool cmd_line)
688 struct devid_map *entry;
689 struct list_head *list;
691 if (type == IVHD_SPECIAL_IOAPIC)
693 else if (type == IVHD_SPECIAL_HPET)
698 list_for_each_entry(entry, list, list) {
699 if (!(entry->id == id && entry->cmd_line))
702 pr_info("AMD-Vi: Command-line override present for %s id %d - ignoring\n",
703 type == IVHD_SPECIAL_IOAPIC ? "IOAPIC" : "HPET", id);
705 *devid = entry->devid;
710 entry = kzalloc(sizeof(*entry), GFP_KERNEL);
715 entry->devid = *devid;
716 entry->cmd_line = cmd_line;
718 list_add_tail(&entry->list, list);
723 static int __init add_early_maps(void)
727 for (i = 0; i < early_ioapic_map_size; ++i) {
728 ret = add_special_device(IVHD_SPECIAL_IOAPIC,
729 early_ioapic_map[i].id,
730 &early_ioapic_map[i].devid,
731 early_ioapic_map[i].cmd_line);
736 for (i = 0; i < early_hpet_map_size; ++i) {
737 ret = add_special_device(IVHD_SPECIAL_HPET,
738 early_hpet_map[i].id,
739 &early_hpet_map[i].devid,
740 early_hpet_map[i].cmd_line);
749 * Reads the device exclusion range from ACPI and initializes the IOMMU with
752 static void __init set_device_exclusion_range(u16 devid, struct ivmd_header *m)
754 struct amd_iommu *iommu = amd_iommu_rlookup_table[devid];
756 if (!(m->flags & IVMD_FLAG_EXCL_RANGE))
761 * We only can configure exclusion ranges per IOMMU, not
762 * per device. But we can enable the exclusion range per
763 * device. This is done here
765 set_dev_entry_bit(devid, DEV_ENTRY_EX);
766 iommu->exclusion_start = m->range_start;
767 iommu->exclusion_length = m->range_length;
772 * Takes a pointer to an AMD IOMMU entry in the ACPI table and
773 * initializes the hardware and our data structures with it.
775 static int __init init_iommu_from_acpi(struct amd_iommu *iommu,
776 struct ivhd_header *h)
779 u8 *end = p, flags = 0;
780 u16 devid = 0, devid_start = 0, devid_to = 0;
781 u32 dev_i, ext_flags = 0;
783 struct ivhd_entry *e;
787 ret = add_early_maps();
792 * First save the recommended feature enable bits from ACPI
794 iommu->acpi_flags = h->flags;
797 * Done. Now parse the device entries
799 p += sizeof(struct ivhd_header);
804 e = (struct ivhd_entry *)p;
808 DUMP_printk(" DEV_ALL\t\t\tflags: %02x\n", e->flags);
810 for (dev_i = 0; dev_i <= amd_iommu_last_bdf; ++dev_i)
811 set_dev_entry_from_acpi(iommu, dev_i, e->flags, 0);
813 case IVHD_DEV_SELECT:
815 DUMP_printk(" DEV_SELECT\t\t\t devid: %02x:%02x.%x "
817 PCI_BUS_NUM(e->devid),
823 set_dev_entry_from_acpi(iommu, devid, e->flags, 0);
825 case IVHD_DEV_SELECT_RANGE_START:
827 DUMP_printk(" DEV_SELECT_RANGE_START\t "
828 "devid: %02x:%02x.%x flags: %02x\n",
829 PCI_BUS_NUM(e->devid),
834 devid_start = e->devid;
841 DUMP_printk(" DEV_ALIAS\t\t\t devid: %02x:%02x.%x "
842 "flags: %02x devid_to: %02x:%02x.%x\n",
843 PCI_BUS_NUM(e->devid),
847 PCI_BUS_NUM(e->ext >> 8),
848 PCI_SLOT(e->ext >> 8),
849 PCI_FUNC(e->ext >> 8));
852 devid_to = e->ext >> 8;
853 set_dev_entry_from_acpi(iommu, devid , e->flags, 0);
854 set_dev_entry_from_acpi(iommu, devid_to, e->flags, 0);
855 amd_iommu_alias_table[devid] = devid_to;
857 case IVHD_DEV_ALIAS_RANGE:
859 DUMP_printk(" DEV_ALIAS_RANGE\t\t "
860 "devid: %02x:%02x.%x flags: %02x "
861 "devid_to: %02x:%02x.%x\n",
862 PCI_BUS_NUM(e->devid),
866 PCI_BUS_NUM(e->ext >> 8),
867 PCI_SLOT(e->ext >> 8),
868 PCI_FUNC(e->ext >> 8));
870 devid_start = e->devid;
872 devid_to = e->ext >> 8;
876 case IVHD_DEV_EXT_SELECT:
878 DUMP_printk(" DEV_EXT_SELECT\t\t devid: %02x:%02x.%x "
879 "flags: %02x ext: %08x\n",
880 PCI_BUS_NUM(e->devid),
886 set_dev_entry_from_acpi(iommu, devid, e->flags,
889 case IVHD_DEV_EXT_SELECT_RANGE:
891 DUMP_printk(" DEV_EXT_SELECT_RANGE\t devid: "
892 "%02x:%02x.%x flags: %02x ext: %08x\n",
893 PCI_BUS_NUM(e->devid),
898 devid_start = e->devid;
903 case IVHD_DEV_RANGE_END:
905 DUMP_printk(" DEV_RANGE_END\t\t devid: %02x:%02x.%x\n",
906 PCI_BUS_NUM(e->devid),
911 for (dev_i = devid_start; dev_i <= devid; ++dev_i) {
913 amd_iommu_alias_table[dev_i] = devid_to;
914 set_dev_entry_from_acpi(iommu,
915 devid_to, flags, ext_flags);
917 set_dev_entry_from_acpi(iommu, dev_i,
921 case IVHD_DEV_SPECIAL: {
927 handle = e->ext & 0xff;
928 devid = (e->ext >> 8) & 0xffff;
929 type = (e->ext >> 24) & 0xff;
931 if (type == IVHD_SPECIAL_IOAPIC)
933 else if (type == IVHD_SPECIAL_HPET)
938 DUMP_printk(" DEV_SPECIAL(%s[%d])\t\tdevid: %02x:%02x.%x\n",
944 ret = add_special_device(type, handle, &devid, false);
949 * add_special_device might update the devid in case a
950 * command-line override is present. So call
951 * set_dev_entry_from_acpi after add_special_device.
953 set_dev_entry_from_acpi(iommu, devid, e->flags, 0);
961 p += ivhd_entry_length(p);
967 static void __init free_iommu_one(struct amd_iommu *iommu)
969 free_command_buffer(iommu);
970 free_event_buffer(iommu);
972 iommu_unmap_mmio_space(iommu);
975 static void __init free_iommu_all(void)
977 struct amd_iommu *iommu, *next;
979 for_each_iommu_safe(iommu, next) {
980 list_del(&iommu->list);
981 free_iommu_one(iommu);
987 * Family15h Model 10h-1fh erratum 746 (IOMMU Logging May Stall Translations)
989 * BIOS should disable L2B micellaneous clock gating by setting
990 * L2_L2B_CK_GATE_CONTROL[CKGateL2BMiscDisable](D0F2xF4_x90[2]) = 1b
992 static void amd_iommu_erratum_746_workaround(struct amd_iommu *iommu)
996 if ((boot_cpu_data.x86 != 0x15) ||
997 (boot_cpu_data.x86_model < 0x10) ||
998 (boot_cpu_data.x86_model > 0x1f))
1001 pci_write_config_dword(iommu->dev, 0xf0, 0x90);
1002 pci_read_config_dword(iommu->dev, 0xf4, &value);
1007 /* Select NB indirect register 0x90 and enable writing */
1008 pci_write_config_dword(iommu->dev, 0xf0, 0x90 | (1 << 8));
1010 pci_write_config_dword(iommu->dev, 0xf4, value | 0x4);
1011 pr_info("AMD-Vi: Applying erratum 746 workaround for IOMMU at %s\n",
1012 dev_name(&iommu->dev->dev));
1014 /* Clear the enable writing bit */
1015 pci_write_config_dword(iommu->dev, 0xf0, 0x90);
1019 * Family15h Model 30h-3fh (IOMMU Mishandles ATS Write Permission)
1021 * BIOS should enable ATS write permission check by setting
1022 * L2_DEBUG_3[AtsIgnoreIWDis](D0F2xF4_x47[0]) = 1b
1024 static void amd_iommu_ats_write_check_workaround(struct amd_iommu *iommu)
1028 if ((boot_cpu_data.x86 != 0x15) ||
1029 (boot_cpu_data.x86_model < 0x30) ||
1030 (boot_cpu_data.x86_model > 0x3f))
1033 /* Test L2_DEBUG_3[AtsIgnoreIWDis] == 1 */
1034 value = iommu_read_l2(iommu, 0x47);
1039 /* Set L2_DEBUG_3[AtsIgnoreIWDis] = 1 */
1040 iommu_write_l2(iommu, 0x47, value | BIT(0));
1042 pr_info("AMD-Vi: Applying ATS write check workaround for IOMMU at %s\n",
1043 dev_name(&iommu->dev->dev));
1047 * This function clues the initialization function for one IOMMU
1048 * together and also allocates the command buffer and programs the
1049 * hardware. It does NOT enable the IOMMU. This is done afterwards.
1051 static int __init init_iommu_one(struct amd_iommu *iommu, struct ivhd_header *h)
1055 spin_lock_init(&iommu->lock);
1057 /* Add IOMMU to internal data structures */
1058 list_add_tail(&iommu->list, &amd_iommu_list);
1059 iommu->index = amd_iommus_present++;
1061 if (unlikely(iommu->index >= MAX_IOMMUS)) {
1062 WARN(1, "AMD-Vi: System has more IOMMUs than supported by this driver\n");
1066 /* Index is fine - add IOMMU to the array */
1067 amd_iommus[iommu->index] = iommu;
1070 * Copy data from ACPI table entry to the iommu struct
1072 iommu->devid = h->devid;
1073 iommu->cap_ptr = h->cap_ptr;
1074 iommu->pci_seg = h->pci_seg;
1075 iommu->mmio_phys = h->mmio_phys;
1077 /* Check if IVHD EFR contains proper max banks/counters */
1078 if ((h->efr != 0) &&
1079 ((h->efr & (0xF << 13)) != 0) &&
1080 ((h->efr & (0x3F << 17)) != 0)) {
1081 iommu->mmio_phys_end = MMIO_REG_END_OFFSET;
1083 iommu->mmio_phys_end = MMIO_CNTR_CONF_OFFSET;
1086 iommu->mmio_base = iommu_map_mmio_space(iommu->mmio_phys,
1087 iommu->mmio_phys_end);
1088 if (!iommu->mmio_base)
1091 if (alloc_command_buffer(iommu))
1094 if (alloc_event_buffer(iommu))
1097 iommu->int_enabled = false;
1099 ret = init_iommu_from_acpi(iommu, h);
1103 ret = amd_iommu_create_irq_domain(iommu);
1108 * Make sure IOMMU is not considered to translate itself. The IVRS
1109 * table tells us so, but this is a lie!
1111 amd_iommu_rlookup_table[iommu->devid] = NULL;
1117 * Iterates over all IOMMU entries in the ACPI table, allocates the
1118 * IOMMU structure and initializes it with init_iommu_one()
1120 static int __init init_iommu_all(struct acpi_table_header *table)
1122 u8 *p = (u8 *)table, *end = (u8 *)table;
1123 struct ivhd_header *h;
1124 struct amd_iommu *iommu;
1127 end += table->length;
1128 p += IVRS_HEADER_LENGTH;
1131 h = (struct ivhd_header *)p;
1133 case ACPI_IVHD_TYPE:
1135 DUMP_printk("device: %02x:%02x.%01x cap: %04x "
1136 "seg: %d flags: %01x info %04x\n",
1137 PCI_BUS_NUM(h->devid), PCI_SLOT(h->devid),
1138 PCI_FUNC(h->devid), h->cap_ptr,
1139 h->pci_seg, h->flags, h->info);
1140 DUMP_printk(" mmio-addr: %016llx\n",
1143 iommu = kzalloc(sizeof(struct amd_iommu), GFP_KERNEL);
1147 ret = init_iommu_one(iommu, h);
1163 static void init_iommu_perf_ctr(struct amd_iommu *iommu)
1165 u64 val = 0xabcd, val2 = 0;
1167 if (!iommu_feature(iommu, FEATURE_PC))
1170 amd_iommu_pc_present = true;
1172 /* Check if the performance counters can be written to */
1173 if ((0 != amd_iommu_pc_get_set_reg_val(0, 0, 0, 0, &val, true)) ||
1174 (0 != amd_iommu_pc_get_set_reg_val(0, 0, 0, 0, &val2, false)) ||
1176 pr_err("AMD-Vi: Unable to write to IOMMU perf counter.\n");
1177 amd_iommu_pc_present = false;
1181 pr_info("AMD-Vi: IOMMU performance counters supported\n");
1183 val = readl(iommu->mmio_base + MMIO_CNTR_CONF_OFFSET);
1184 iommu->max_banks = (u8) ((val >> 12) & 0x3f);
1185 iommu->max_counters = (u8) ((val >> 7) & 0xf);
1188 static ssize_t amd_iommu_show_cap(struct device *dev,
1189 struct device_attribute *attr,
1192 struct amd_iommu *iommu = dev_get_drvdata(dev);
1193 return sprintf(buf, "%x\n", iommu->cap);
1195 static DEVICE_ATTR(cap, S_IRUGO, amd_iommu_show_cap, NULL);
1197 static ssize_t amd_iommu_show_features(struct device *dev,
1198 struct device_attribute *attr,
1201 struct amd_iommu *iommu = dev_get_drvdata(dev);
1202 return sprintf(buf, "%llx\n", iommu->features);
1204 static DEVICE_ATTR(features, S_IRUGO, amd_iommu_show_features, NULL);
1206 static struct attribute *amd_iommu_attrs[] = {
1208 &dev_attr_features.attr,
1212 static struct attribute_group amd_iommu_group = {
1213 .name = "amd-iommu",
1214 .attrs = amd_iommu_attrs,
1217 static const struct attribute_group *amd_iommu_groups[] = {
1222 static int iommu_init_pci(struct amd_iommu *iommu)
1224 int cap_ptr = iommu->cap_ptr;
1225 u32 range, misc, low, high;
1227 iommu->dev = pci_get_bus_and_slot(PCI_BUS_NUM(iommu->devid),
1228 iommu->devid & 0xff);
1232 /* Prevent binding other PCI device drivers to IOMMU devices */
1233 iommu->dev->match_driver = false;
1235 pci_read_config_dword(iommu->dev, cap_ptr + MMIO_CAP_HDR_OFFSET,
1237 pci_read_config_dword(iommu->dev, cap_ptr + MMIO_RANGE_OFFSET,
1239 pci_read_config_dword(iommu->dev, cap_ptr + MMIO_MISC_OFFSET,
1242 if (!(iommu->cap & (1 << IOMMU_CAP_IOTLB)))
1243 amd_iommu_iotlb_sup = false;
1245 /* read extended feature bits */
1246 low = readl(iommu->mmio_base + MMIO_EXT_FEATURES);
1247 high = readl(iommu->mmio_base + MMIO_EXT_FEATURES + 4);
1249 iommu->features = ((u64)high << 32) | low;
1251 if (iommu_feature(iommu, FEATURE_GT)) {
1256 pasmax = iommu->features & FEATURE_PASID_MASK;
1257 pasmax >>= FEATURE_PASID_SHIFT;
1258 max_pasid = (1 << (pasmax + 1)) - 1;
1260 amd_iommu_max_pasid = min(amd_iommu_max_pasid, max_pasid);
1262 BUG_ON(amd_iommu_max_pasid & ~PASID_MASK);
1264 glxval = iommu->features & FEATURE_GLXVAL_MASK;
1265 glxval >>= FEATURE_GLXVAL_SHIFT;
1267 if (amd_iommu_max_glx_val == -1)
1268 amd_iommu_max_glx_val = glxval;
1270 amd_iommu_max_glx_val = min(amd_iommu_max_glx_val, glxval);
1273 if (iommu_feature(iommu, FEATURE_GT) &&
1274 iommu_feature(iommu, FEATURE_PPR)) {
1275 iommu->is_iommu_v2 = true;
1276 amd_iommu_v2_present = true;
1279 if (iommu_feature(iommu, FEATURE_PPR) && alloc_ppr_log(iommu))
1282 if (iommu->cap & (1UL << IOMMU_CAP_NPCACHE))
1283 amd_iommu_np_cache = true;
1285 init_iommu_perf_ctr(iommu);
1287 if (is_rd890_iommu(iommu->dev)) {
1290 iommu->root_pdev = pci_get_bus_and_slot(iommu->dev->bus->number,
1294 * Some rd890 systems may not be fully reconfigured by the
1295 * BIOS, so it's necessary for us to store this information so
1296 * it can be reprogrammed on resume
1298 pci_read_config_dword(iommu->dev, iommu->cap_ptr + 4,
1299 &iommu->stored_addr_lo);
1300 pci_read_config_dword(iommu->dev, iommu->cap_ptr + 8,
1301 &iommu->stored_addr_hi);
1303 /* Low bit locks writes to configuration space */
1304 iommu->stored_addr_lo &= ~1;
1306 for (i = 0; i < 6; i++)
1307 for (j = 0; j < 0x12; j++)
1308 iommu->stored_l1[i][j] = iommu_read_l1(iommu, i, j);
1310 for (i = 0; i < 0x83; i++)
1311 iommu->stored_l2[i] = iommu_read_l2(iommu, i);
1314 amd_iommu_erratum_746_workaround(iommu);
1315 amd_iommu_ats_write_check_workaround(iommu);
1317 iommu->iommu_dev = iommu_device_create(&iommu->dev->dev, iommu,
1318 amd_iommu_groups, "ivhd%d",
1321 return pci_enable_device(iommu->dev);
1324 static void print_iommu_info(void)
1326 static const char * const feat_str[] = {
1327 "PreF", "PPR", "X2APIC", "NX", "GT", "[5]",
1328 "IA", "GA", "HE", "PC"
1330 struct amd_iommu *iommu;
1332 for_each_iommu(iommu) {
1335 pr_info("AMD-Vi: Found IOMMU at %s cap 0x%hx\n",
1336 dev_name(&iommu->dev->dev), iommu->cap_ptr);
1338 if (iommu->cap & (1 << IOMMU_CAP_EFR)) {
1339 pr_info("AMD-Vi: Extended features: ");
1340 for (i = 0; i < ARRAY_SIZE(feat_str); ++i) {
1341 if (iommu_feature(iommu, (1ULL << i)))
1342 pr_cont(" %s", feat_str[i]);
1347 if (irq_remapping_enabled)
1348 pr_info("AMD-Vi: Interrupt remapping enabled\n");
1351 static int __init amd_iommu_init_pci(void)
1353 struct amd_iommu *iommu;
1356 for_each_iommu(iommu) {
1357 ret = iommu_init_pci(iommu);
1362 init_device_table_dma();
1364 for_each_iommu(iommu)
1365 iommu_flush_all_caches(iommu);
1367 ret = amd_iommu_init_api();
1375 /****************************************************************************
1377 * The following functions initialize the MSI interrupts for all IOMMUs
1378 * in the system. It's a bit challenging because there could be multiple
1379 * IOMMUs per PCI BDF but we can call pci_enable_msi(x) only once per
1382 ****************************************************************************/
1384 static int iommu_setup_msi(struct amd_iommu *iommu)
1388 r = pci_enable_msi(iommu->dev);
1392 r = request_threaded_irq(iommu->dev->irq,
1393 amd_iommu_int_handler,
1394 amd_iommu_int_thread,
1399 pci_disable_msi(iommu->dev);
1403 iommu->int_enabled = true;
1408 static int iommu_init_msi(struct amd_iommu *iommu)
1412 if (iommu->int_enabled)
1415 if (iommu->dev->msi_cap)
1416 ret = iommu_setup_msi(iommu);
1424 iommu_feature_enable(iommu, CONTROL_EVT_INT_EN);
1426 if (iommu->ppr_log != NULL)
1427 iommu_feature_enable(iommu, CONTROL_PPFINT_EN);
1432 /****************************************************************************
1434 * The next functions belong to the third pass of parsing the ACPI
1435 * table. In this last pass the memory mapping requirements are
1436 * gathered (like exclusion and unity mapping ranges).
1438 ****************************************************************************/
1440 static void __init free_unity_maps(void)
1442 struct unity_map_entry *entry, *next;
1444 list_for_each_entry_safe(entry, next, &amd_iommu_unity_map, list) {
1445 list_del(&entry->list);
1450 /* called when we find an exclusion range definition in ACPI */
1451 static int __init init_exclusion_range(struct ivmd_header *m)
1456 case ACPI_IVMD_TYPE:
1457 set_device_exclusion_range(m->devid, m);
1459 case ACPI_IVMD_TYPE_ALL:
1460 for (i = 0; i <= amd_iommu_last_bdf; ++i)
1461 set_device_exclusion_range(i, m);
1463 case ACPI_IVMD_TYPE_RANGE:
1464 for (i = m->devid; i <= m->aux; ++i)
1465 set_device_exclusion_range(i, m);
1474 /* called for unity map ACPI definition */
1475 static int __init init_unity_map_range(struct ivmd_header *m)
1477 struct unity_map_entry *e = NULL;
1480 e = kzalloc(sizeof(*e), GFP_KERNEL);
1488 case ACPI_IVMD_TYPE:
1489 s = "IVMD_TYPEi\t\t\t";
1490 e->devid_start = e->devid_end = m->devid;
1492 case ACPI_IVMD_TYPE_ALL:
1493 s = "IVMD_TYPE_ALL\t\t";
1495 e->devid_end = amd_iommu_last_bdf;
1497 case ACPI_IVMD_TYPE_RANGE:
1498 s = "IVMD_TYPE_RANGE\t\t";
1499 e->devid_start = m->devid;
1500 e->devid_end = m->aux;
1503 e->address_start = PAGE_ALIGN(m->range_start);
1504 e->address_end = e->address_start + PAGE_ALIGN(m->range_length);
1505 e->prot = m->flags >> 1;
1507 DUMP_printk("%s devid_start: %02x:%02x.%x devid_end: %02x:%02x.%x"
1508 " range_start: %016llx range_end: %016llx flags: %x\n", s,
1509 PCI_BUS_NUM(e->devid_start), PCI_SLOT(e->devid_start),
1510 PCI_FUNC(e->devid_start), PCI_BUS_NUM(e->devid_end),
1511 PCI_SLOT(e->devid_end), PCI_FUNC(e->devid_end),
1512 e->address_start, e->address_end, m->flags);
1514 list_add_tail(&e->list, &amd_iommu_unity_map);
1519 /* iterates over all memory definitions we find in the ACPI table */
1520 static int __init init_memory_definitions(struct acpi_table_header *table)
1522 u8 *p = (u8 *)table, *end = (u8 *)table;
1523 struct ivmd_header *m;
1525 end += table->length;
1526 p += IVRS_HEADER_LENGTH;
1529 m = (struct ivmd_header *)p;
1530 if (m->flags & IVMD_FLAG_EXCL_RANGE)
1531 init_exclusion_range(m);
1532 else if (m->flags & IVMD_FLAG_UNITY_MAP)
1533 init_unity_map_range(m);
1542 * Init the device table to not allow DMA access for devices and
1543 * suppress all page faults
1545 static void init_device_table_dma(void)
1549 for (devid = 0; devid <= amd_iommu_last_bdf; ++devid) {
1550 set_dev_entry_bit(devid, DEV_ENTRY_VALID);
1551 set_dev_entry_bit(devid, DEV_ENTRY_TRANSLATION);
1555 static void __init uninit_device_table_dma(void)
1559 for (devid = 0; devid <= amd_iommu_last_bdf; ++devid) {
1560 amd_iommu_dev_table[devid].data[0] = 0ULL;
1561 amd_iommu_dev_table[devid].data[1] = 0ULL;
1565 static void init_device_table(void)
1569 if (!amd_iommu_irq_remap)
1572 for (devid = 0; devid <= amd_iommu_last_bdf; ++devid)
1573 set_dev_entry_bit(devid, DEV_ENTRY_IRQ_TBL_EN);
1576 static void iommu_init_flags(struct amd_iommu *iommu)
1578 iommu->acpi_flags & IVHD_FLAG_HT_TUN_EN_MASK ?
1579 iommu_feature_enable(iommu, CONTROL_HT_TUN_EN) :
1580 iommu_feature_disable(iommu, CONTROL_HT_TUN_EN);
1582 iommu->acpi_flags & IVHD_FLAG_PASSPW_EN_MASK ?
1583 iommu_feature_enable(iommu, CONTROL_PASSPW_EN) :
1584 iommu_feature_disable(iommu, CONTROL_PASSPW_EN);
1586 iommu->acpi_flags & IVHD_FLAG_RESPASSPW_EN_MASK ?
1587 iommu_feature_enable(iommu, CONTROL_RESPASSPW_EN) :
1588 iommu_feature_disable(iommu, CONTROL_RESPASSPW_EN);
1590 iommu->acpi_flags & IVHD_FLAG_ISOC_EN_MASK ?
1591 iommu_feature_enable(iommu, CONTROL_ISOC_EN) :
1592 iommu_feature_disable(iommu, CONTROL_ISOC_EN);
1595 * make IOMMU memory accesses cache coherent
1597 iommu_feature_enable(iommu, CONTROL_COHERENT_EN);
1599 /* Set IOTLB invalidation timeout to 1s */
1600 iommu_set_inv_tlb_timeout(iommu, CTRL_INV_TO_1S);
1603 static void iommu_apply_resume_quirks(struct amd_iommu *iommu)
1606 u32 ioc_feature_control;
1607 struct pci_dev *pdev = iommu->root_pdev;
1609 /* RD890 BIOSes may not have completely reconfigured the iommu */
1610 if (!is_rd890_iommu(iommu->dev) || !pdev)
1614 * First, we need to ensure that the iommu is enabled. This is
1615 * controlled by a register in the northbridge
1618 /* Select Northbridge indirect register 0x75 and enable writing */
1619 pci_write_config_dword(pdev, 0x60, 0x75 | (1 << 7));
1620 pci_read_config_dword(pdev, 0x64, &ioc_feature_control);
1622 /* Enable the iommu */
1623 if (!(ioc_feature_control & 0x1))
1624 pci_write_config_dword(pdev, 0x64, ioc_feature_control | 1);
1626 /* Restore the iommu BAR */
1627 pci_write_config_dword(iommu->dev, iommu->cap_ptr + 4,
1628 iommu->stored_addr_lo);
1629 pci_write_config_dword(iommu->dev, iommu->cap_ptr + 8,
1630 iommu->stored_addr_hi);
1632 /* Restore the l1 indirect regs for each of the 6 l1s */
1633 for (i = 0; i < 6; i++)
1634 for (j = 0; j < 0x12; j++)
1635 iommu_write_l1(iommu, i, j, iommu->stored_l1[i][j]);
1637 /* Restore the l2 indirect regs */
1638 for (i = 0; i < 0x83; i++)
1639 iommu_write_l2(iommu, i, iommu->stored_l2[i]);
1641 /* Lock PCI setup registers */
1642 pci_write_config_dword(iommu->dev, iommu->cap_ptr + 4,
1643 iommu->stored_addr_lo | 1);
1647 * This function finally enables all IOMMUs found in the system after
1648 * they have been initialized
1650 static void early_enable_iommus(void)
1652 struct amd_iommu *iommu;
1654 for_each_iommu(iommu) {
1655 iommu_disable(iommu);
1656 iommu_init_flags(iommu);
1657 iommu_set_device_table(iommu);
1658 iommu_enable_command_buffer(iommu);
1659 iommu_enable_event_buffer(iommu);
1660 iommu_set_exclusion_range(iommu);
1661 iommu_enable(iommu);
1662 iommu_flush_all_caches(iommu);
1666 static void enable_iommus_v2(void)
1668 struct amd_iommu *iommu;
1670 for_each_iommu(iommu) {
1671 iommu_enable_ppr_log(iommu);
1672 iommu_enable_gt(iommu);
1676 static void enable_iommus(void)
1678 early_enable_iommus();
1683 static void disable_iommus(void)
1685 struct amd_iommu *iommu;
1687 for_each_iommu(iommu)
1688 iommu_disable(iommu);
1692 * Suspend/Resume support
1693 * disable suspend until real resume implemented
1696 static void amd_iommu_resume(void)
1698 struct amd_iommu *iommu;
1700 for_each_iommu(iommu)
1701 iommu_apply_resume_quirks(iommu);
1703 /* re-load the hardware */
1706 amd_iommu_enable_interrupts();
1709 static int amd_iommu_suspend(void)
1711 /* disable IOMMUs to go out of the way for BIOS */
1717 static struct syscore_ops amd_iommu_syscore_ops = {
1718 .suspend = amd_iommu_suspend,
1719 .resume = amd_iommu_resume,
1722 static void __init free_on_init_error(void)
1724 free_pages((unsigned long)irq_lookup_table,
1725 get_order(rlookup_table_size));
1727 kmem_cache_destroy(amd_iommu_irq_cache);
1728 amd_iommu_irq_cache = NULL;
1730 free_pages((unsigned long)amd_iommu_rlookup_table,
1731 get_order(rlookup_table_size));
1733 free_pages((unsigned long)amd_iommu_alias_table,
1734 get_order(alias_table_size));
1736 free_pages((unsigned long)amd_iommu_dev_table,
1737 get_order(dev_table_size));
1741 #ifdef CONFIG_GART_IOMMU
1743 * We failed to initialize the AMD IOMMU - try fallback to GART
1751 /* SB IOAPIC is always on this device in AMD systems */
1752 #define IOAPIC_SB_DEVID ((0x00 << 8) | PCI_DEVFN(0x14, 0))
1754 static bool __init check_ioapic_information(void)
1756 const char *fw_bug = FW_BUG;
1757 bool ret, has_sb_ioapic;
1760 has_sb_ioapic = false;
1764 * If we have map overrides on the kernel command line the
1765 * messages in this function might not describe firmware bugs
1766 * anymore - so be careful
1771 for (idx = 0; idx < nr_ioapics; idx++) {
1772 int devid, id = mpc_ioapic_id(idx);
1774 devid = get_ioapic_devid(id);
1776 pr_err("%sAMD-Vi: IOAPIC[%d] not in IVRS table\n",
1779 } else if (devid == IOAPIC_SB_DEVID) {
1780 has_sb_ioapic = true;
1785 if (!has_sb_ioapic) {
1787 * We expect the SB IOAPIC to be listed in the IVRS
1788 * table. The system timer is connected to the SB IOAPIC
1789 * and if we don't have it in the list the system will
1790 * panic at boot time. This situation usually happens
1791 * when the BIOS is buggy and provides us the wrong
1792 * device id for the IOAPIC in the system.
1794 pr_err("%sAMD-Vi: No southbridge IOAPIC found\n", fw_bug);
1798 pr_err("AMD-Vi: Disabling interrupt remapping\n");
1803 static void __init free_dma_resources(void)
1805 free_pages((unsigned long)amd_iommu_pd_alloc_bitmap,
1806 get_order(MAX_DOMAIN_ID/8));
1812 * This is the hardware init function for AMD IOMMU in the system.
1813 * This function is called either from amd_iommu_init or from the interrupt
1814 * remapping setup code.
1816 * This function basically parses the ACPI table for AMD IOMMU (IVRS)
1819 * 1 pass) Find the highest PCI device id the driver has to handle.
1820 * Upon this information the size of the data structures is
1821 * determined that needs to be allocated.
1823 * 2 pass) Initialize the data structures just allocated with the
1824 * information in the ACPI table about available AMD IOMMUs
1825 * in the system. It also maps the PCI devices in the
1826 * system to specific IOMMUs
1828 * 3 pass) After the basic data structures are allocated and
1829 * initialized we update them with information about memory
1830 * remapping requirements parsed out of the ACPI table in
1833 * After everything is set up the IOMMUs are enabled and the necessary
1834 * hotplug and suspend notifiers are registered.
1836 static int __init early_amd_iommu_init(void)
1838 struct acpi_table_header *ivrs_base;
1839 acpi_size ivrs_size;
1843 if (!amd_iommu_detected)
1846 status = acpi_get_table_with_size("IVRS", 0, &ivrs_base, &ivrs_size);
1847 if (status == AE_NOT_FOUND)
1849 else if (ACPI_FAILURE(status)) {
1850 const char *err = acpi_format_exception(status);
1851 pr_err("AMD-Vi: IVRS table error: %s\n", err);
1856 * First parse ACPI tables to find the largest Bus/Dev/Func
1857 * we need to handle. Upon this information the shared data
1858 * structures for the IOMMUs in the system will be allocated
1860 ret = find_last_devid_acpi(ivrs_base);
1864 dev_table_size = tbl_size(DEV_TABLE_ENTRY_SIZE);
1865 alias_table_size = tbl_size(ALIAS_TABLE_ENTRY_SIZE);
1866 rlookup_table_size = tbl_size(RLOOKUP_TABLE_ENTRY_SIZE);
1868 /* Device table - directly used by all IOMMUs */
1870 amd_iommu_dev_table = (void *)__get_free_pages(GFP_KERNEL | __GFP_ZERO,
1871 get_order(dev_table_size));
1872 if (amd_iommu_dev_table == NULL)
1876 * Alias table - map PCI Bus/Dev/Func to Bus/Dev/Func the
1877 * IOMMU see for that device
1879 amd_iommu_alias_table = (void *)__get_free_pages(GFP_KERNEL,
1880 get_order(alias_table_size));
1881 if (amd_iommu_alias_table == NULL)
1884 /* IOMMU rlookup table - find the IOMMU for a specific device */
1885 amd_iommu_rlookup_table = (void *)__get_free_pages(
1886 GFP_KERNEL | __GFP_ZERO,
1887 get_order(rlookup_table_size));
1888 if (amd_iommu_rlookup_table == NULL)
1891 amd_iommu_pd_alloc_bitmap = (void *)__get_free_pages(
1892 GFP_KERNEL | __GFP_ZERO,
1893 get_order(MAX_DOMAIN_ID/8));
1894 if (amd_iommu_pd_alloc_bitmap == NULL)
1898 * let all alias entries point to itself
1900 for (i = 0; i <= amd_iommu_last_bdf; ++i)
1901 amd_iommu_alias_table[i] = i;
1904 * never allocate domain 0 because its used as the non-allocated and
1905 * error value placeholder
1907 amd_iommu_pd_alloc_bitmap[0] = 1;
1909 spin_lock_init(&amd_iommu_pd_lock);
1912 * now the data structures are allocated and basically initialized
1913 * start the real acpi table scan
1915 ret = init_iommu_all(ivrs_base);
1919 if (amd_iommu_irq_remap)
1920 amd_iommu_irq_remap = check_ioapic_information();
1922 if (amd_iommu_irq_remap) {
1924 * Interrupt remapping enabled, create kmem_cache for the
1928 amd_iommu_irq_cache = kmem_cache_create("irq_remap_cache",
1929 MAX_IRQS_PER_TABLE * sizeof(u32),
1930 IRQ_TABLE_ALIGNMENT,
1932 if (!amd_iommu_irq_cache)
1935 irq_lookup_table = (void *)__get_free_pages(
1936 GFP_KERNEL | __GFP_ZERO,
1937 get_order(rlookup_table_size));
1938 if (!irq_lookup_table)
1942 ret = init_memory_definitions(ivrs_base);
1946 /* init the device table */
1947 init_device_table();
1950 /* Don't leak any ACPI memory */
1951 early_acpi_os_unmap_memory((char __iomem *)ivrs_base, ivrs_size);
1957 static int amd_iommu_enable_interrupts(void)
1959 struct amd_iommu *iommu;
1962 for_each_iommu(iommu) {
1963 ret = iommu_init_msi(iommu);
1972 static bool detect_ivrs(void)
1974 struct acpi_table_header *ivrs_base;
1975 acpi_size ivrs_size;
1978 status = acpi_get_table_with_size("IVRS", 0, &ivrs_base, &ivrs_size);
1979 if (status == AE_NOT_FOUND)
1981 else if (ACPI_FAILURE(status)) {
1982 const char *err = acpi_format_exception(status);
1983 pr_err("AMD-Vi: IVRS table error: %s\n", err);
1987 early_acpi_os_unmap_memory((char __iomem *)ivrs_base, ivrs_size);
1989 /* Make sure ACS will be enabled during PCI probe */
1995 /****************************************************************************
1997 * AMD IOMMU Initialization State Machine
1999 ****************************************************************************/
2001 static int __init state_next(void)
2005 switch (init_state) {
2006 case IOMMU_START_STATE:
2007 if (!detect_ivrs()) {
2008 init_state = IOMMU_NOT_FOUND;
2011 init_state = IOMMU_IVRS_DETECTED;
2014 case IOMMU_IVRS_DETECTED:
2015 ret = early_amd_iommu_init();
2016 init_state = ret ? IOMMU_INIT_ERROR : IOMMU_ACPI_FINISHED;
2018 case IOMMU_ACPI_FINISHED:
2019 early_enable_iommus();
2020 register_syscore_ops(&amd_iommu_syscore_ops);
2021 x86_platform.iommu_shutdown = disable_iommus;
2022 init_state = IOMMU_ENABLED;
2025 ret = amd_iommu_init_pci();
2026 init_state = ret ? IOMMU_INIT_ERROR : IOMMU_PCI_INIT;
2029 case IOMMU_PCI_INIT:
2030 ret = amd_iommu_enable_interrupts();
2031 init_state = ret ? IOMMU_INIT_ERROR : IOMMU_INTERRUPTS_EN;
2033 case IOMMU_INTERRUPTS_EN:
2034 ret = amd_iommu_init_dma_ops();
2035 init_state = ret ? IOMMU_INIT_ERROR : IOMMU_DMA_OPS;
2038 init_state = IOMMU_INITIALIZED;
2040 case IOMMU_INITIALIZED:
2043 case IOMMU_NOT_FOUND:
2044 case IOMMU_INIT_ERROR:
2045 /* Error states => do nothing */
2056 static int __init iommu_go_to_state(enum iommu_init_state state)
2060 while (init_state != state) {
2062 if (init_state == IOMMU_NOT_FOUND ||
2063 init_state == IOMMU_INIT_ERROR)
2070 #ifdef CONFIG_IRQ_REMAP
2071 int __init amd_iommu_prepare(void)
2075 amd_iommu_irq_remap = true;
2077 ret = iommu_go_to_state(IOMMU_ACPI_FINISHED);
2080 return amd_iommu_irq_remap ? 0 : -ENODEV;
2083 int __init amd_iommu_enable(void)
2087 ret = iommu_go_to_state(IOMMU_ENABLED);
2091 irq_remapping_enabled = 1;
2096 void amd_iommu_disable(void)
2098 amd_iommu_suspend();
2101 int amd_iommu_reenable(int mode)
2108 int __init amd_iommu_enable_faulting(void)
2110 /* We enable MSI later when PCI is initialized */
2116 * This is the core init function for AMD IOMMU hardware in the system.
2117 * This function is called from the generic x86 DMA layer initialization
2120 static int __init amd_iommu_init(void)
2124 ret = iommu_go_to_state(IOMMU_INITIALIZED);
2126 free_dma_resources();
2127 if (!irq_remapping_enabled) {
2129 free_on_init_error();
2131 struct amd_iommu *iommu;
2133 uninit_device_table_dma();
2134 for_each_iommu(iommu)
2135 iommu_flush_all_caches(iommu);
2142 /****************************************************************************
2144 * Early detect code. This code runs at IOMMU detection time in the DMA
2145 * layer. It just looks if there is an IVRS ACPI table to detect AMD
2148 ****************************************************************************/
2149 int __init amd_iommu_detect(void)
2153 if (no_iommu || (iommu_detected && !gart_iommu_aperture))
2156 if (amd_iommu_disabled)
2159 ret = iommu_go_to_state(IOMMU_IVRS_DETECTED);
2163 amd_iommu_detected = true;
2165 x86_init.iommu.iommu_init = amd_iommu_init;
2170 /****************************************************************************
2172 * Parsing functions for the AMD IOMMU specific kernel command line
2175 ****************************************************************************/
2177 static int __init parse_amd_iommu_dump(char *str)
2179 amd_iommu_dump = true;
2184 static int __init parse_amd_iommu_options(char *str)
2186 for (; *str; ++str) {
2187 if (strncmp(str, "fullflush", 9) == 0)
2188 amd_iommu_unmap_flush = true;
2189 if (strncmp(str, "off", 3) == 0)
2190 amd_iommu_disabled = true;
2191 if (strncmp(str, "force_isolation", 15) == 0)
2192 amd_iommu_force_isolation = true;
2198 static int __init parse_ivrs_ioapic(char *str)
2200 unsigned int bus, dev, fn;
2204 ret = sscanf(str, "[%d]=%x:%x.%x", &id, &bus, &dev, &fn);
2207 pr_err("AMD-Vi: Invalid command line: ivrs_ioapic%s\n", str);
2211 if (early_ioapic_map_size == EARLY_MAP_SIZE) {
2212 pr_err("AMD-Vi: Early IOAPIC map overflow - ignoring ivrs_ioapic%s\n",
2217 devid = ((bus & 0xff) << 8) | ((dev & 0x1f) << 3) | (fn & 0x7);
2219 cmdline_maps = true;
2220 i = early_ioapic_map_size++;
2221 early_ioapic_map[i].id = id;
2222 early_ioapic_map[i].devid = devid;
2223 early_ioapic_map[i].cmd_line = true;
2228 static int __init parse_ivrs_hpet(char *str)
2230 unsigned int bus, dev, fn;
2234 ret = sscanf(str, "[%d]=%x:%x.%x", &id, &bus, &dev, &fn);
2237 pr_err("AMD-Vi: Invalid command line: ivrs_hpet%s\n", str);
2241 if (early_hpet_map_size == EARLY_MAP_SIZE) {
2242 pr_err("AMD-Vi: Early HPET map overflow - ignoring ivrs_hpet%s\n",
2247 devid = ((bus & 0xff) << 8) | ((dev & 0x1f) << 3) | (fn & 0x7);
2249 cmdline_maps = true;
2250 i = early_hpet_map_size++;
2251 early_hpet_map[i].id = id;
2252 early_hpet_map[i].devid = devid;
2253 early_hpet_map[i].cmd_line = true;
2258 __setup("amd_iommu_dump", parse_amd_iommu_dump);
2259 __setup("amd_iommu=", parse_amd_iommu_options);
2260 __setup("ivrs_ioapic", parse_ivrs_ioapic);
2261 __setup("ivrs_hpet", parse_ivrs_hpet);
2263 IOMMU_INIT_FINISH(amd_iommu_detect,
2264 gart_iommu_hole_init,
2268 bool amd_iommu_v2_supported(void)
2270 return amd_iommu_v2_present;
2272 EXPORT_SYMBOL(amd_iommu_v2_supported);
2274 /****************************************************************************
2276 * IOMMU EFR Performance Counter support functionality. This code allows
2277 * access to the IOMMU PC functionality.
2279 ****************************************************************************/
2281 u8 amd_iommu_pc_get_max_banks(u16 devid)
2283 struct amd_iommu *iommu;
2286 /* locate the iommu governing the devid */
2287 iommu = amd_iommu_rlookup_table[devid];
2289 ret = iommu->max_banks;
2293 EXPORT_SYMBOL(amd_iommu_pc_get_max_banks);
2295 bool amd_iommu_pc_supported(void)
2297 return amd_iommu_pc_present;
2299 EXPORT_SYMBOL(amd_iommu_pc_supported);
2301 u8 amd_iommu_pc_get_max_counters(u16 devid)
2303 struct amd_iommu *iommu;
2306 /* locate the iommu governing the devid */
2307 iommu = amd_iommu_rlookup_table[devid];
2309 ret = iommu->max_counters;
2313 EXPORT_SYMBOL(amd_iommu_pc_get_max_counters);
2315 int amd_iommu_pc_get_set_reg_val(u16 devid, u8 bank, u8 cntr, u8 fxn,
2316 u64 *value, bool is_write)
2318 struct amd_iommu *iommu;
2322 /* Make sure the IOMMU PC resource is available */
2323 if (!amd_iommu_pc_present)
2326 /* Locate the iommu associated with the device ID */
2327 iommu = amd_iommu_rlookup_table[devid];
2329 /* Check for valid iommu and pc register indexing */
2330 if (WARN_ON((iommu == NULL) || (fxn > 0x28) || (fxn & 7)))
2333 offset = (u32)(((0x40|bank) << 12) | (cntr << 8) | fxn);
2335 /* Limit the offset to the hw defined mmio region aperture */
2336 max_offset_lim = (u32)(((0x40|iommu->max_banks) << 12) |
2337 (iommu->max_counters << 8) | 0x28);
2338 if ((offset < MMIO_CNTR_REG_OFFSET) ||
2339 (offset > max_offset_lim))
2343 writel((u32)*value, iommu->mmio_base + offset);
2344 writel((*value >> 32), iommu->mmio_base + offset + 4);
2346 *value = readl(iommu->mmio_base + offset + 4);
2348 *value = readl(iommu->mmio_base + offset);
2353 EXPORT_SYMBOL(amd_iommu_pc_get_set_reg_val);