1 #include <linux/interrupt.h>
2 #include <linux/dmar.h>
3 #include <linux/spinlock.h>
4 #include <linux/slab.h>
5 #include <linux/jiffies.h>
6 #include <linux/hpet.h>
9 #include <linux/intel-iommu.h>
10 #include <linux/acpi.h>
11 #include <linux/irqdomain.h>
12 #include <asm/io_apic.h>
15 #include <asm/irq_remapping.h>
16 #include <asm/pci-direct.h>
17 #include <asm/msidef.h>
19 #include "irq_remapping.h"
27 struct intel_iommu *iommu;
29 unsigned int bus; /* PCI bus number */
30 unsigned int devfn; /* PCI devfn number */
34 struct intel_iommu *iommu;
41 struct intel_iommu *iommu;
48 struct intel_ir_data {
49 struct irq_2_iommu irq_2_iommu;
50 struct irte irte_entry;
52 struct msi_msg msi_entry;
56 #define IR_X2APIC_MODE(mode) (mode ? (1 << 11) : 0)
57 #define IRTE_DEST(dest) ((eim_mode) ? dest : dest << 8)
59 static int __read_mostly eim_mode;
60 static struct ioapic_scope ir_ioapic[MAX_IO_APICS];
61 static struct hpet_scope ir_hpet[MAX_HPET_TBS];
68 * ->iommu->register_lock
70 * intel_irq_remap_ops.{supported,prepare,enable,disable,reenable} are called
71 * in single-threaded environment with interrupt disabled, so no need to tabke
72 * the dmar_global_lock.
74 static DEFINE_RAW_SPINLOCK(irq_2_ir_lock);
75 static struct irq_domain_ops intel_ir_domain_ops;
77 static int __init parse_ioapics_under_ir(void);
79 static int alloc_irte(struct intel_iommu *iommu, int irq,
80 struct irq_2_iommu *irq_iommu, u16 count)
82 struct ir_table *table = iommu->ir_table;
83 unsigned int mask = 0;
87 if (!count || !irq_iommu)
91 count = __roundup_pow_of_two(count);
95 if (mask > ecap_max_handle_mask(iommu->ecap)) {
97 "Requested mask %x exceeds the max invalidation handle"
98 " mask value %Lx\n", mask,
99 ecap_max_handle_mask(iommu->ecap));
103 raw_spin_lock_irqsave(&irq_2_ir_lock, flags);
104 index = bitmap_find_free_region(table->bitmap,
105 INTR_REMAP_TABLE_ENTRIES, mask);
107 pr_warn("IR%d: can't allocate an IRTE\n", iommu->seq_id);
109 irq_iommu->iommu = iommu;
110 irq_iommu->irte_index = index;
111 irq_iommu->sub_handle = 0;
112 irq_iommu->irte_mask = mask;
113 irq_iommu->mode = IRQ_REMAPPING;
115 raw_spin_unlock_irqrestore(&irq_2_ir_lock, flags);
120 static int qi_flush_iec(struct intel_iommu *iommu, int index, int mask)
124 desc.low = QI_IEC_IIDEX(index) | QI_IEC_TYPE | QI_IEC_IM(mask)
128 return qi_submit_sync(&desc, iommu);
131 static int modify_irte(struct irq_2_iommu *irq_iommu,
132 struct irte *irte_modified)
134 struct intel_iommu *iommu;
142 raw_spin_lock_irqsave(&irq_2_ir_lock, flags);
144 iommu = irq_iommu->iommu;
146 index = irq_iommu->irte_index + irq_iommu->sub_handle;
147 irte = &iommu->ir_table->base[index];
149 set_64bit(&irte->low, irte_modified->low);
150 set_64bit(&irte->high, irte_modified->high);
151 __iommu_flush_cache(iommu, irte, sizeof(*irte));
153 rc = qi_flush_iec(iommu, index, 0);
155 /* Update iommu mode according to the IRTE mode */
156 irq_iommu->mode = irte->pst ? IRQ_POSTING : IRQ_REMAPPING;
157 raw_spin_unlock_irqrestore(&irq_2_ir_lock, flags);
162 static struct intel_iommu *map_hpet_to_ir(u8 hpet_id)
166 for (i = 0; i < MAX_HPET_TBS; i++)
167 if (ir_hpet[i].id == hpet_id && ir_hpet[i].iommu)
168 return ir_hpet[i].iommu;
172 static struct intel_iommu *map_ioapic_to_ir(int apic)
176 for (i = 0; i < MAX_IO_APICS; i++)
177 if (ir_ioapic[i].id == apic && ir_ioapic[i].iommu)
178 return ir_ioapic[i].iommu;
182 static struct intel_iommu *map_dev_to_ir(struct pci_dev *dev)
184 struct dmar_drhd_unit *drhd;
186 drhd = dmar_find_matched_drhd_unit(dev);
193 static int clear_entries(struct irq_2_iommu *irq_iommu)
195 struct irte *start, *entry, *end;
196 struct intel_iommu *iommu;
199 if (irq_iommu->sub_handle)
202 iommu = irq_iommu->iommu;
203 index = irq_iommu->irte_index;
205 start = iommu->ir_table->base + index;
206 end = start + (1 << irq_iommu->irte_mask);
208 for (entry = start; entry < end; entry++) {
209 set_64bit(&entry->low, 0);
210 set_64bit(&entry->high, 0);
212 bitmap_release_region(iommu->ir_table->bitmap, index,
213 irq_iommu->irte_mask);
215 return qi_flush_iec(iommu, index, irq_iommu->irte_mask);
219 * source validation type
221 #define SVT_NO_VERIFY 0x0 /* no verification is required */
222 #define SVT_VERIFY_SID_SQ 0x1 /* verify using SID and SQ fields */
223 #define SVT_VERIFY_BUS 0x2 /* verify bus of request-id */
226 * source-id qualifier
228 #define SQ_ALL_16 0x0 /* verify all 16 bits of request-id */
229 #define SQ_13_IGNORE_1 0x1 /* verify most significant 13 bits, ignore
230 * the third least significant bit
232 #define SQ_13_IGNORE_2 0x2 /* verify most significant 13 bits, ignore
233 * the second and third least significant bits
235 #define SQ_13_IGNORE_3 0x3 /* verify most significant 13 bits, ignore
236 * the least three significant bits
240 * set SVT, SQ and SID fields of irte to verify
241 * source ids of interrupt requests
243 static void set_irte_sid(struct irte *irte, unsigned int svt,
244 unsigned int sq, unsigned int sid)
246 if (disable_sourceid_checking)
253 static int set_ioapic_sid(struct irte *irte, int apic)
261 down_read(&dmar_global_lock);
262 for (i = 0; i < MAX_IO_APICS; i++) {
263 if (ir_ioapic[i].iommu && ir_ioapic[i].id == apic) {
264 sid = (ir_ioapic[i].bus << 8) | ir_ioapic[i].devfn;
268 up_read(&dmar_global_lock);
271 pr_warning("Failed to set source-id of IOAPIC (%d)\n", apic);
275 set_irte_sid(irte, SVT_VERIFY_SID_SQ, SQ_ALL_16, sid);
280 static int set_hpet_sid(struct irte *irte, u8 id)
288 down_read(&dmar_global_lock);
289 for (i = 0; i < MAX_HPET_TBS; i++) {
290 if (ir_hpet[i].iommu && ir_hpet[i].id == id) {
291 sid = (ir_hpet[i].bus << 8) | ir_hpet[i].devfn;
295 up_read(&dmar_global_lock);
298 pr_warning("Failed to set source-id of HPET block (%d)\n", id);
303 * Should really use SQ_ALL_16. Some platforms are broken.
304 * While we figure out the right quirks for these broken platforms, use
305 * SQ_13_IGNORE_3 for now.
307 set_irte_sid(irte, SVT_VERIFY_SID_SQ, SQ_13_IGNORE_3, sid);
312 struct set_msi_sid_data {
313 struct pci_dev *pdev;
317 static int set_msi_sid_cb(struct pci_dev *pdev, u16 alias, void *opaque)
319 struct set_msi_sid_data *data = opaque;
327 static int set_msi_sid(struct irte *irte, struct pci_dev *dev)
329 struct set_msi_sid_data data;
334 pci_for_each_dma_alias(dev, set_msi_sid_cb, &data);
337 * DMA alias provides us with a PCI device and alias. The only case
338 * where the it will return an alias on a different bus than the
339 * device is the case of a PCIe-to-PCI bridge, where the alias is for
340 * the subordinate bus. In this case we can only verify the bus.
342 * If the alias device is on a different bus than our source device
343 * then we have a topology based alias, use it.
345 * Otherwise, the alias is for a device DMA quirk and we cannot
346 * assume that MSI uses the same requester ID. Therefore use the
349 if (PCI_BUS_NUM(data.alias) != data.pdev->bus->number)
350 set_irte_sid(irte, SVT_VERIFY_BUS, SQ_ALL_16,
351 PCI_DEVID(PCI_BUS_NUM(data.alias),
353 else if (data.pdev->bus->number != dev->bus->number)
354 set_irte_sid(irte, SVT_VERIFY_SID_SQ, SQ_ALL_16, data.alias);
356 set_irte_sid(irte, SVT_VERIFY_SID_SQ, SQ_ALL_16,
357 PCI_DEVID(dev->bus->number, dev->devfn));
362 static void iommu_set_irq_remapping(struct intel_iommu *iommu, int mode)
368 addr = virt_to_phys((void *)iommu->ir_table->base);
370 raw_spin_lock_irqsave(&iommu->register_lock, flags);
372 dmar_writeq(iommu->reg + DMAR_IRTA_REG,
373 (addr) | IR_X2APIC_MODE(mode) | INTR_REMAP_TABLE_REG_SIZE);
375 /* Set interrupt-remapping table pointer */
376 writel(iommu->gcmd | DMA_GCMD_SIRTP, iommu->reg + DMAR_GCMD_REG);
378 IOMMU_WAIT_OP(iommu, DMAR_GSTS_REG,
379 readl, (sts & DMA_GSTS_IRTPS), sts);
380 raw_spin_unlock_irqrestore(&iommu->register_lock, flags);
383 * global invalidation of interrupt entry cache before enabling
384 * interrupt-remapping.
386 qi_global_iec(iommu);
388 raw_spin_lock_irqsave(&iommu->register_lock, flags);
390 /* Enable interrupt-remapping */
391 iommu->gcmd |= DMA_GCMD_IRE;
392 iommu->gcmd &= ~DMA_GCMD_CFI; /* Block compatibility-format MSIs */
393 writel(iommu->gcmd, iommu->reg + DMAR_GCMD_REG);
395 IOMMU_WAIT_OP(iommu, DMAR_GSTS_REG,
396 readl, (sts & DMA_GSTS_IRES), sts);
399 * With CFI clear in the Global Command register, we should be
400 * protected from dangerous (i.e. compatibility) interrupts
401 * regardless of x2apic status. Check just to be sure.
403 if (sts & DMA_GSTS_CFIS)
405 "Compatibility-format IRQs enabled despite intr remapping;\n"
406 "you are vulnerable to IRQ injection.\n");
408 raw_spin_unlock_irqrestore(&iommu->register_lock, flags);
411 static int intel_setup_irq_remapping(struct intel_iommu *iommu)
413 struct ir_table *ir_table;
415 unsigned long *bitmap;
420 ir_table = kzalloc(sizeof(struct ir_table), GFP_KERNEL);
424 pages = alloc_pages_node(iommu->node, GFP_KERNEL | __GFP_ZERO,
425 INTR_REMAP_PAGE_ORDER);
427 pr_err("IR%d: failed to allocate pages of order %d\n",
428 iommu->seq_id, INTR_REMAP_PAGE_ORDER);
432 bitmap = kcalloc(BITS_TO_LONGS(INTR_REMAP_TABLE_ENTRIES),
433 sizeof(long), GFP_ATOMIC);
434 if (bitmap == NULL) {
435 pr_err("IR%d: failed to allocate bitmap\n", iommu->seq_id);
439 iommu->ir_domain = irq_domain_add_hierarchy(arch_get_ir_parent_domain(),
440 0, INTR_REMAP_TABLE_ENTRIES,
441 NULL, &intel_ir_domain_ops,
443 if (!iommu->ir_domain) {
444 pr_err("IR%d: failed to allocate irqdomain\n", iommu->seq_id);
445 goto out_free_bitmap;
447 iommu->ir_msi_domain = arch_create_msi_irq_domain(iommu->ir_domain);
449 ir_table->base = page_address(pages);
450 ir_table->bitmap = bitmap;
451 iommu->ir_table = ir_table;
457 __free_pages(pages, INTR_REMAP_PAGE_ORDER);
463 static void intel_teardown_irq_remapping(struct intel_iommu *iommu)
465 if (iommu && iommu->ir_table) {
466 if (iommu->ir_msi_domain) {
467 irq_domain_remove(iommu->ir_msi_domain);
468 iommu->ir_msi_domain = NULL;
470 if (iommu->ir_domain) {
471 irq_domain_remove(iommu->ir_domain);
472 iommu->ir_domain = NULL;
474 free_pages((unsigned long)iommu->ir_table->base,
475 INTR_REMAP_PAGE_ORDER);
476 kfree(iommu->ir_table->bitmap);
477 kfree(iommu->ir_table);
478 iommu->ir_table = NULL;
483 * Disable Interrupt Remapping.
485 static void iommu_disable_irq_remapping(struct intel_iommu *iommu)
490 if (!ecap_ir_support(iommu->ecap))
494 * global invalidation of interrupt entry cache before disabling
495 * interrupt-remapping.
497 qi_global_iec(iommu);
499 raw_spin_lock_irqsave(&iommu->register_lock, flags);
501 sts = dmar_readq(iommu->reg + DMAR_GSTS_REG);
502 if (!(sts & DMA_GSTS_IRES))
505 iommu->gcmd &= ~DMA_GCMD_IRE;
506 writel(iommu->gcmd, iommu->reg + DMAR_GCMD_REG);
508 IOMMU_WAIT_OP(iommu, DMAR_GSTS_REG,
509 readl, !(sts & DMA_GSTS_IRES), sts);
512 raw_spin_unlock_irqrestore(&iommu->register_lock, flags);
515 static int __init dmar_x2apic_optout(void)
517 struct acpi_table_dmar *dmar;
518 dmar = (struct acpi_table_dmar *)dmar_tbl;
519 if (!dmar || no_x2apic_optout)
521 return dmar->flags & DMAR_X2APIC_OPT_OUT;
524 static void __init intel_cleanup_irq_remapping(void)
526 struct dmar_drhd_unit *drhd;
527 struct intel_iommu *iommu;
529 for_each_iommu(iommu, drhd) {
530 if (ecap_ir_support(iommu->ecap)) {
531 iommu_disable_irq_remapping(iommu);
532 intel_teardown_irq_remapping(iommu);
536 if (x2apic_supported())
537 pr_warn("Failed to enable irq remapping. You are vulnerable to irq-injection attacks.\n");
540 static int __init intel_prepare_irq_remapping(void)
542 struct dmar_drhd_unit *drhd;
543 struct intel_iommu *iommu;
545 if (irq_remap_broken) {
547 "This system BIOS has enabled interrupt remapping\n"
548 "on a chipset that contains an erratum making that\n"
549 "feature unstable. To maintain system stability\n"
550 "interrupt remapping is being disabled. Please\n"
551 "contact your BIOS vendor for an update\n");
552 add_taint(TAINT_FIRMWARE_WORKAROUND, LOCKDEP_STILL_OK);
556 if (dmar_table_init() < 0)
559 if (!dmar_ir_support())
562 if (parse_ioapics_under_ir() != 1) {
563 printk(KERN_INFO "Not enabling interrupt remapping\n");
567 /* First make sure all IOMMUs support IRQ remapping */
568 for_each_iommu(iommu, drhd)
569 if (!ecap_ir_support(iommu->ecap))
572 /* Do the allocations early */
573 for_each_iommu(iommu, drhd)
574 if (intel_setup_irq_remapping(iommu))
580 intel_cleanup_irq_remapping();
585 * Set Posted-Interrupts capability.
587 static inline void set_irq_posting_cap(void)
589 struct dmar_drhd_unit *drhd;
590 struct intel_iommu *iommu;
592 if (!disable_irq_post) {
593 intel_irq_remap_ops.capability |= 1 << IRQ_POSTING_CAP;
595 for_each_iommu(iommu, drhd)
596 if (!cap_pi_support(iommu->cap)) {
597 intel_irq_remap_ops.capability &=
598 ~(1 << IRQ_POSTING_CAP);
604 static int __init intel_enable_irq_remapping(void)
606 struct dmar_drhd_unit *drhd;
607 struct intel_iommu *iommu;
611 if (x2apic_supported()) {
612 eim = !dmar_x2apic_optout();
614 pr_info("x2apic is disabled because BIOS sets x2apic opt out bit. You can use 'intremap=no_x2apic_optout' to override the BIOS setting.\n");
617 for_each_iommu(iommu, drhd) {
619 * If the queued invalidation is already initialized,
620 * shouldn't disable it.
626 * Clear previous faults.
628 dmar_fault(-1, iommu);
631 * Disable intr remapping and queued invalidation, if already
632 * enabled prior to OS handover.
634 iommu_disable_irq_remapping(iommu);
636 dmar_disable_qi(iommu);
640 * check for the Interrupt-remapping support
642 for_each_iommu(iommu, drhd)
643 if (eim && !ecap_eim_support(iommu->ecap)) {
644 printk(KERN_INFO "DRHD %Lx: EIM not supported by DRHD, "
645 " ecap %Lx\n", drhd->reg_base_addr, iommu->ecap);
650 pr_info("Queued invalidation will be enabled to support x2apic and Intr-remapping.\n");
653 * Enable queued invalidation for all the DRHD's.
655 for_each_iommu(iommu, drhd) {
656 int ret = dmar_enable_qi(iommu);
659 printk(KERN_ERR "DRHD %Lx: failed to enable queued, "
660 " invalidation, ecap %Lx, ret %d\n",
661 drhd->reg_base_addr, iommu->ecap, ret);
667 * Setup Interrupt-remapping for all the DRHD's now.
669 for_each_iommu(iommu, drhd) {
670 iommu_set_irq_remapping(iommu, eim);
677 irq_remapping_enabled = 1;
679 set_irq_posting_cap();
681 pr_info("Enabled IRQ remapping in %s mode\n", eim ? "x2apic" : "xapic");
683 return eim ? IRQ_REMAP_X2APIC_MODE : IRQ_REMAP_XAPIC_MODE;
686 intel_cleanup_irq_remapping();
690 static int ir_parse_one_hpet_scope(struct acpi_dmar_device_scope *scope,
691 struct intel_iommu *iommu,
692 struct acpi_dmar_hardware_unit *drhd)
694 struct acpi_dmar_pci_path *path;
696 int count, free = -1;
699 path = (struct acpi_dmar_pci_path *)(scope + 1);
700 count = (scope->length - sizeof(struct acpi_dmar_device_scope))
701 / sizeof(struct acpi_dmar_pci_path);
703 while (--count > 0) {
705 * Access PCI directly due to the PCI
706 * subsystem isn't initialized yet.
708 bus = read_pci_config_byte(bus, path->device, path->function,
713 for (count = 0; count < MAX_HPET_TBS; count++) {
714 if (ir_hpet[count].iommu == iommu &&
715 ir_hpet[count].id == scope->enumeration_id)
717 else if (ir_hpet[count].iommu == NULL && free == -1)
721 pr_warn("Exceeded Max HPET blocks\n");
725 ir_hpet[free].iommu = iommu;
726 ir_hpet[free].id = scope->enumeration_id;
727 ir_hpet[free].bus = bus;
728 ir_hpet[free].devfn = PCI_DEVFN(path->device, path->function);
729 pr_info("HPET id %d under DRHD base 0x%Lx\n",
730 scope->enumeration_id, drhd->address);
735 static int ir_parse_one_ioapic_scope(struct acpi_dmar_device_scope *scope,
736 struct intel_iommu *iommu,
737 struct acpi_dmar_hardware_unit *drhd)
739 struct acpi_dmar_pci_path *path;
741 int count, free = -1;
744 path = (struct acpi_dmar_pci_path *)(scope + 1);
745 count = (scope->length - sizeof(struct acpi_dmar_device_scope))
746 / sizeof(struct acpi_dmar_pci_path);
748 while (--count > 0) {
750 * Access PCI directly due to the PCI
751 * subsystem isn't initialized yet.
753 bus = read_pci_config_byte(bus, path->device, path->function,
758 for (count = 0; count < MAX_IO_APICS; count++) {
759 if (ir_ioapic[count].iommu == iommu &&
760 ir_ioapic[count].id == scope->enumeration_id)
762 else if (ir_ioapic[count].iommu == NULL && free == -1)
766 pr_warn("Exceeded Max IO APICS\n");
770 ir_ioapic[free].bus = bus;
771 ir_ioapic[free].devfn = PCI_DEVFN(path->device, path->function);
772 ir_ioapic[free].iommu = iommu;
773 ir_ioapic[free].id = scope->enumeration_id;
774 pr_info("IOAPIC id %d under DRHD base 0x%Lx IOMMU %d\n",
775 scope->enumeration_id, drhd->address, iommu->seq_id);
780 static int ir_parse_ioapic_hpet_scope(struct acpi_dmar_header *header,
781 struct intel_iommu *iommu)
784 struct acpi_dmar_hardware_unit *drhd;
785 struct acpi_dmar_device_scope *scope;
788 drhd = (struct acpi_dmar_hardware_unit *)header;
789 start = (void *)(drhd + 1);
790 end = ((void *)drhd) + header->length;
792 while (start < end && ret == 0) {
794 if (scope->entry_type == ACPI_DMAR_SCOPE_TYPE_IOAPIC)
795 ret = ir_parse_one_ioapic_scope(scope, iommu, drhd);
796 else if (scope->entry_type == ACPI_DMAR_SCOPE_TYPE_HPET)
797 ret = ir_parse_one_hpet_scope(scope, iommu, drhd);
798 start += scope->length;
804 static void ir_remove_ioapic_hpet_scope(struct intel_iommu *iommu)
808 for (i = 0; i < MAX_HPET_TBS; i++)
809 if (ir_hpet[i].iommu == iommu)
810 ir_hpet[i].iommu = NULL;
812 for (i = 0; i < MAX_IO_APICS; i++)
813 if (ir_ioapic[i].iommu == iommu)
814 ir_ioapic[i].iommu = NULL;
818 * Finds the assocaition between IOAPIC's and its Interrupt-remapping
821 static int __init parse_ioapics_under_ir(void)
823 struct dmar_drhd_unit *drhd;
824 struct intel_iommu *iommu;
825 bool ir_supported = false;
828 for_each_iommu(iommu, drhd)
829 if (ecap_ir_support(iommu->ecap)) {
830 if (ir_parse_ioapic_hpet_scope(drhd->hdr, iommu))
839 for (ioapic_idx = 0; ioapic_idx < nr_ioapics; ioapic_idx++) {
840 int ioapic_id = mpc_ioapic_id(ioapic_idx);
841 if (!map_ioapic_to_ir(ioapic_id)) {
842 pr_err(FW_BUG "ioapic %d has no mapping iommu, "
843 "interrupt remapping will be disabled\n",
852 static int __init ir_dev_scope_init(void)
856 if (!irq_remapping_enabled)
859 down_write(&dmar_global_lock);
860 ret = dmar_dev_scope_init();
861 up_write(&dmar_global_lock);
865 rootfs_initcall(ir_dev_scope_init);
867 static void disable_irq_remapping(void)
869 struct dmar_drhd_unit *drhd;
870 struct intel_iommu *iommu = NULL;
873 * Disable Interrupt-remapping for all the DRHD's now.
875 for_each_iommu(iommu, drhd) {
876 if (!ecap_ir_support(iommu->ecap))
879 iommu_disable_irq_remapping(iommu);
883 * Clear Posted-Interrupts capability.
885 if (!disable_irq_post)
886 intel_irq_remap_ops.capability &= ~(1 << IRQ_POSTING_CAP);
889 static int reenable_irq_remapping(int eim)
891 struct dmar_drhd_unit *drhd;
893 struct intel_iommu *iommu = NULL;
895 for_each_iommu(iommu, drhd)
897 dmar_reenable_qi(iommu);
900 * Setup Interrupt-remapping for all the DRHD's now.
902 for_each_iommu(iommu, drhd) {
903 if (!ecap_ir_support(iommu->ecap))
906 /* Set up interrupt remapping for iommu.*/
907 iommu_set_irq_remapping(iommu, eim);
914 set_irq_posting_cap();
920 * handle error condition gracefully here!
925 static void prepare_irte(struct irte *irte, int vector, unsigned int dest)
927 memset(irte, 0, sizeof(*irte));
930 irte->dst_mode = apic->irq_dest_mode;
932 * Trigger mode in the IRTE will always be edge, and for IO-APIC, the
933 * actual level or edge trigger will be setup in the IO-APIC
934 * RTE. This will help simplify level triggered irq migration.
935 * For more details, see the comments (in io_apic.c) explainig IO-APIC
936 * irq migration in the presence of interrupt-remapping.
938 irte->trigger_mode = 0;
939 irte->dlvry_mode = apic->irq_delivery_mode;
940 irte->vector = vector;
941 irte->dest_id = IRTE_DEST(dest);
942 irte->redir_hint = 1;
945 static struct irq_domain *intel_get_ir_irq_domain(struct irq_alloc_info *info)
947 struct intel_iommu *iommu = NULL;
952 switch (info->type) {
953 case X86_IRQ_ALLOC_TYPE_IOAPIC:
954 iommu = map_ioapic_to_ir(info->ioapic_id);
956 case X86_IRQ_ALLOC_TYPE_HPET:
957 iommu = map_hpet_to_ir(info->hpet_id);
959 case X86_IRQ_ALLOC_TYPE_MSI:
960 case X86_IRQ_ALLOC_TYPE_MSIX:
961 iommu = map_dev_to_ir(info->msi_dev);
968 return iommu ? iommu->ir_domain : NULL;
971 static struct irq_domain *intel_get_irq_domain(struct irq_alloc_info *info)
973 struct intel_iommu *iommu;
978 switch (info->type) {
979 case X86_IRQ_ALLOC_TYPE_MSI:
980 case X86_IRQ_ALLOC_TYPE_MSIX:
981 iommu = map_dev_to_ir(info->msi_dev);
983 return iommu->ir_msi_domain;
992 struct irq_remap_ops intel_irq_remap_ops = {
993 .prepare = intel_prepare_irq_remapping,
994 .enable = intel_enable_irq_remapping,
995 .disable = disable_irq_remapping,
996 .reenable = reenable_irq_remapping,
997 .enable_faulting = enable_drhd_fault_handling,
998 .get_ir_irq_domain = intel_get_ir_irq_domain,
999 .get_irq_domain = intel_get_irq_domain,
1003 * Migrate the IO-APIC irq in the presence of intr-remapping.
1005 * For both level and edge triggered, irq migration is a simple atomic
1006 * update(of vector and cpu destination) of IRTE and flush the hardware cache.
1008 * For level triggered, we eliminate the io-apic RTE modification (with the
1009 * updated vector information), by using a virtual vector (io-apic pin number).
1010 * Real vector that is used for interrupting cpu will be coming from
1011 * the interrupt-remapping table entry.
1013 * As the migration is a simple atomic update of IRTE, the same mechanism
1014 * is used to migrate MSI irq's in the presence of interrupt-remapping.
1017 intel_ir_set_affinity(struct irq_data *data, const struct cpumask *mask,
1020 struct intel_ir_data *ir_data = data->chip_data;
1021 struct irte *irte = &ir_data->irte_entry;
1022 struct irq_cfg *cfg = irqd_cfg(data);
1023 struct irq_data *parent = data->parent_data;
1026 ret = parent->chip->irq_set_affinity(parent, mask, force);
1027 if (ret < 0 || ret == IRQ_SET_MASK_OK_DONE)
1031 * Atomically updates the IRTE with the new destination, vector
1032 * and flushes the interrupt entry cache.
1034 irte->vector = cfg->vector;
1035 irte->dest_id = IRTE_DEST(cfg->dest_apicid);
1037 /* Update the hardware only if the interrupt is in remapped mode. */
1038 if (ir_data->irq_2_iommu.mode == IRQ_REMAPPING)
1039 modify_irte(&ir_data->irq_2_iommu, irte);
1042 * After this point, all the interrupts will start arriving
1043 * at the new destination. So, time to cleanup the previous
1044 * vector allocation.
1046 send_cleanup_vector(cfg);
1048 return IRQ_SET_MASK_OK_DONE;
1051 static void intel_ir_compose_msi_msg(struct irq_data *irq_data,
1052 struct msi_msg *msg)
1054 struct intel_ir_data *ir_data = irq_data->chip_data;
1056 *msg = ir_data->msi_entry;
1059 static int intel_ir_set_vcpu_affinity(struct irq_data *data, void *info)
1061 struct intel_ir_data *ir_data = data->chip_data;
1062 struct vcpu_data *vcpu_pi_info = info;
1064 /* stop posting interrupts, back to remapping mode */
1065 if (!vcpu_pi_info) {
1066 modify_irte(&ir_data->irq_2_iommu, &ir_data->irte_entry);
1068 struct irte irte_pi;
1071 * We are not caching the posted interrupt entry. We
1072 * copy the data from the remapped entry and modify
1073 * the fields which are relevant for posted mode. The
1074 * cached remapped entry is used for switching back to
1077 memset(&irte_pi, 0, sizeof(irte_pi));
1078 dmar_copy_shared_irte(&irte_pi, &ir_data->irte_entry);
1080 /* Update the posted mode fields */
1082 irte_pi.p_urgent = 0;
1083 irte_pi.p_vector = vcpu_pi_info->vector;
1084 irte_pi.pda_l = (vcpu_pi_info->pi_desc_addr >>
1085 (32 - PDA_LOW_BIT)) & ~(-1UL << PDA_LOW_BIT);
1086 irte_pi.pda_h = (vcpu_pi_info->pi_desc_addr >> 32) &
1087 ~(-1UL << PDA_HIGH_BIT);
1089 modify_irte(&ir_data->irq_2_iommu, &irte_pi);
1095 static struct irq_chip intel_ir_chip = {
1096 .irq_ack = ir_ack_apic_edge,
1097 .irq_set_affinity = intel_ir_set_affinity,
1098 .irq_compose_msi_msg = intel_ir_compose_msi_msg,
1099 .irq_set_vcpu_affinity = intel_ir_set_vcpu_affinity,
1102 static void intel_irq_remapping_prepare_irte(struct intel_ir_data *data,
1103 struct irq_cfg *irq_cfg,
1104 struct irq_alloc_info *info,
1105 int index, int sub_handle)
1107 struct IR_IO_APIC_route_entry *entry;
1108 struct irte *irte = &data->irte_entry;
1109 struct msi_msg *msg = &data->msi_entry;
1111 prepare_irte(irte, irq_cfg->vector, irq_cfg->dest_apicid);
1112 switch (info->type) {
1113 case X86_IRQ_ALLOC_TYPE_IOAPIC:
1114 /* Set source-id of interrupt request */
1115 set_ioapic_sid(irte, info->ioapic_id);
1116 apic_printk(APIC_VERBOSE, KERN_DEBUG "IOAPIC[%d]: Set IRTE entry (P:%d FPD:%d Dst_Mode:%d Redir_hint:%d Trig_Mode:%d Dlvry_Mode:%X Avail:%X Vector:%02X Dest:%08X SID:%04X SQ:%X SVT:%X)\n",
1117 info->ioapic_id, irte->present, irte->fpd,
1118 irte->dst_mode, irte->redir_hint,
1119 irte->trigger_mode, irte->dlvry_mode,
1120 irte->avail, irte->vector, irte->dest_id,
1121 irte->sid, irte->sq, irte->svt);
1123 entry = (struct IR_IO_APIC_route_entry *)info->ioapic_entry;
1124 info->ioapic_entry = NULL;
1125 memset(entry, 0, sizeof(*entry));
1126 entry->index2 = (index >> 15) & 0x1;
1129 entry->index = (index & 0x7fff);
1131 * IO-APIC RTE will be configured with virtual vector.
1132 * irq handler will do the explicit EOI to the io-apic.
1134 entry->vector = info->ioapic_pin;
1135 entry->mask = 0; /* enable IRQ */
1136 entry->trigger = info->ioapic_trigger;
1137 entry->polarity = info->ioapic_polarity;
1138 if (info->ioapic_trigger)
1139 entry->mask = 1; /* Mask level triggered irqs. */
1142 case X86_IRQ_ALLOC_TYPE_HPET:
1143 case X86_IRQ_ALLOC_TYPE_MSI:
1144 case X86_IRQ_ALLOC_TYPE_MSIX:
1145 if (info->type == X86_IRQ_ALLOC_TYPE_HPET)
1146 set_hpet_sid(irte, info->hpet_id);
1148 set_msi_sid(irte, info->msi_dev);
1150 msg->address_hi = MSI_ADDR_BASE_HI;
1151 msg->data = sub_handle;
1152 msg->address_lo = MSI_ADDR_BASE_LO | MSI_ADDR_IR_EXT_INT |
1154 MSI_ADDR_IR_INDEX1(index) |
1155 MSI_ADDR_IR_INDEX2(index);
1164 static void intel_free_irq_resources(struct irq_domain *domain,
1165 unsigned int virq, unsigned int nr_irqs)
1167 struct irq_data *irq_data;
1168 struct intel_ir_data *data;
1169 struct irq_2_iommu *irq_iommu;
1170 unsigned long flags;
1173 for (i = 0; i < nr_irqs; i++) {
1174 irq_data = irq_domain_get_irq_data(domain, virq + i);
1175 if (irq_data && irq_data->chip_data) {
1176 data = irq_data->chip_data;
1177 irq_iommu = &data->irq_2_iommu;
1178 raw_spin_lock_irqsave(&irq_2_ir_lock, flags);
1179 clear_entries(irq_iommu);
1180 raw_spin_unlock_irqrestore(&irq_2_ir_lock, flags);
1181 irq_domain_reset_irq_data(irq_data);
1187 static int intel_irq_remapping_alloc(struct irq_domain *domain,
1188 unsigned int virq, unsigned int nr_irqs,
1191 struct intel_iommu *iommu = domain->host_data;
1192 struct irq_alloc_info *info = arg;
1193 struct intel_ir_data *data, *ird;
1194 struct irq_data *irq_data;
1195 struct irq_cfg *irq_cfg;
1198 if (!info || !iommu)
1200 if (nr_irqs > 1 && info->type != X86_IRQ_ALLOC_TYPE_MSI &&
1201 info->type != X86_IRQ_ALLOC_TYPE_MSIX)
1205 * With IRQ remapping enabled, don't need contiguous CPU vectors
1206 * to support multiple MSI interrupts.
1208 if (info->type == X86_IRQ_ALLOC_TYPE_MSI)
1209 info->flags &= ~X86_IRQ_ALLOC_CONTIGUOUS_VECTORS;
1211 ret = irq_domain_alloc_irqs_parent(domain, virq, nr_irqs, arg);
1216 data = kzalloc(sizeof(*data), GFP_KERNEL);
1218 goto out_free_parent;
1220 down_read(&dmar_global_lock);
1221 index = alloc_irte(iommu, virq, &data->irq_2_iommu, nr_irqs);
1222 up_read(&dmar_global_lock);
1224 pr_warn("Failed to allocate IRTE\n");
1226 goto out_free_parent;
1229 for (i = 0; i < nr_irqs; i++) {
1230 irq_data = irq_domain_get_irq_data(domain, virq + i);
1231 irq_cfg = irqd_cfg(irq_data);
1232 if (!irq_data || !irq_cfg) {
1238 ird = kzalloc(sizeof(*ird), GFP_KERNEL);
1241 /* Initialize the common data */
1242 ird->irq_2_iommu = data->irq_2_iommu;
1243 ird->irq_2_iommu.sub_handle = i;
1248 irq_data->hwirq = (index << 16) + i;
1249 irq_data->chip_data = ird;
1250 irq_data->chip = &intel_ir_chip;
1251 intel_irq_remapping_prepare_irte(ird, irq_cfg, info, index, i);
1252 irq_set_status_flags(virq + i, IRQ_MOVE_PCNTXT);
1257 intel_free_irq_resources(domain, virq, i);
1259 irq_domain_free_irqs_common(domain, virq, nr_irqs);
1263 static void intel_irq_remapping_free(struct irq_domain *domain,
1264 unsigned int virq, unsigned int nr_irqs)
1266 intel_free_irq_resources(domain, virq, nr_irqs);
1267 irq_domain_free_irqs_common(domain, virq, nr_irqs);
1270 static void intel_irq_remapping_activate(struct irq_domain *domain,
1271 struct irq_data *irq_data)
1273 struct intel_ir_data *data = irq_data->chip_data;
1275 modify_irte(&data->irq_2_iommu, &data->irte_entry);
1278 static void intel_irq_remapping_deactivate(struct irq_domain *domain,
1279 struct irq_data *irq_data)
1281 struct intel_ir_data *data = irq_data->chip_data;
1284 memset(&entry, 0, sizeof(entry));
1285 modify_irte(&data->irq_2_iommu, &entry);
1288 static struct irq_domain_ops intel_ir_domain_ops = {
1289 .alloc = intel_irq_remapping_alloc,
1290 .free = intel_irq_remapping_free,
1291 .activate = intel_irq_remapping_activate,
1292 .deactivate = intel_irq_remapping_deactivate,
1296 * Support of Interrupt Remapping Unit Hotplug
1298 static int dmar_ir_add(struct dmar_drhd_unit *dmaru, struct intel_iommu *iommu)
1301 int eim = x2apic_enabled();
1303 if (eim && !ecap_eim_support(iommu->ecap)) {
1304 pr_info("DRHD %Lx: EIM not supported by DRHD, ecap %Lx\n",
1305 iommu->reg_phys, iommu->ecap);
1309 if (ir_parse_ioapic_hpet_scope(dmaru->hdr, iommu)) {
1310 pr_warn("DRHD %Lx: failed to parse managed IOAPIC/HPET\n",
1315 /* TODO: check all IOAPICs are covered by IOMMU */
1317 /* Setup Interrupt-remapping now. */
1318 ret = intel_setup_irq_remapping(iommu);
1320 pr_err("DRHD %Lx: failed to allocate resource\n",
1322 ir_remove_ioapic_hpet_scope(iommu);
1327 /* Clear previous faults. */
1328 dmar_fault(-1, iommu);
1329 iommu_disable_irq_remapping(iommu);
1330 dmar_disable_qi(iommu);
1333 /* Enable queued invalidation */
1334 ret = dmar_enable_qi(iommu);
1336 iommu_set_irq_remapping(iommu, eim);
1338 pr_err("DRHD %Lx: failed to enable queued invalidation, ecap %Lx, ret %d\n",
1339 iommu->reg_phys, iommu->ecap, ret);
1340 intel_teardown_irq_remapping(iommu);
1341 ir_remove_ioapic_hpet_scope(iommu);
1347 int dmar_ir_hotplug(struct dmar_drhd_unit *dmaru, bool insert)
1350 struct intel_iommu *iommu = dmaru->iommu;
1352 if (!irq_remapping_enabled)
1356 if (!ecap_ir_support(iommu->ecap))
1358 if (irq_remapping_cap(IRQ_POSTING_CAP) &&
1359 !cap_pi_support(iommu->cap))
1363 if (!iommu->ir_table)
1364 ret = dmar_ir_add(dmaru, iommu);
1366 if (iommu->ir_table) {
1367 if (!bitmap_empty(iommu->ir_table->bitmap,
1368 INTR_REMAP_TABLE_ENTRIES)) {
1371 iommu_disable_irq_remapping(iommu);
1372 intel_teardown_irq_remapping(iommu);
1373 ir_remove_ioapic_hpet_scope(iommu);