1 #include <linux/interrupt.h>
2 #include <linux/dmar.h>
3 #include <linux/spinlock.h>
4 #include <linux/slab.h>
5 #include <linux/jiffies.h>
6 #include <linux/hpet.h>
9 #include <asm/io_apic.h>
12 #include <linux/intel-iommu.h>
13 #include <acpi/acpi.h>
14 #include <asm/irq_remapping.h>
15 #include <asm/pci-direct.h>
16 #include <asm/msidef.h>
18 #include "irq_remapping.h"
21 struct intel_iommu *iommu;
23 unsigned int bus; /* PCI bus number */
24 unsigned int devfn; /* PCI devfn number */
28 struct intel_iommu *iommu;
34 #define IR_X2APIC_MODE(mode) (mode ? (1 << 11) : 0)
35 #define IRTE_DEST(dest) ((x2apic_mode) ? dest : dest << 8)
37 static struct ioapic_scope ir_ioapic[MAX_IO_APICS];
38 static struct hpet_scope ir_hpet[MAX_HPET_TBS];
39 static int ir_ioapic_num, ir_hpet_num;
41 static DEFINE_RAW_SPINLOCK(irq_2_ir_lock);
43 static struct irq_2_iommu *irq_2_iommu(unsigned int irq)
45 struct irq_cfg *cfg = irq_get_chip_data(irq);
46 return cfg ? &cfg->irq_2_iommu : NULL;
49 int get_irte(int irq, struct irte *entry)
51 struct irq_2_iommu *irq_iommu = irq_2_iommu(irq);
55 if (!entry || !irq_iommu)
58 raw_spin_lock_irqsave(&irq_2_ir_lock, flags);
60 index = irq_iommu->irte_index + irq_iommu->sub_handle;
61 *entry = *(irq_iommu->iommu->ir_table->base + index);
63 raw_spin_unlock_irqrestore(&irq_2_ir_lock, flags);
67 static int alloc_irte(struct intel_iommu *iommu, int irq, u16 count)
69 struct ir_table *table = iommu->ir_table;
70 struct irq_2_iommu *irq_iommu = irq_2_iommu(irq);
71 u16 index, start_index;
72 unsigned int mask = 0;
76 if (!count || !irq_iommu)
80 * start the IRTE search from index 0.
82 index = start_index = 0;
85 count = __roundup_pow_of_two(count);
89 if (mask > ecap_max_handle_mask(iommu->ecap)) {
91 "Requested mask %x exceeds the max invalidation handle"
92 " mask value %Lx\n", mask,
93 ecap_max_handle_mask(iommu->ecap));
97 raw_spin_lock_irqsave(&irq_2_ir_lock, flags);
99 for (i = index; i < index + count; i++)
100 if (table->base[i].present)
102 /* empty index found */
103 if (i == index + count)
106 index = (index + count) % INTR_REMAP_TABLE_ENTRIES;
108 if (index == start_index) {
109 raw_spin_unlock_irqrestore(&irq_2_ir_lock, flags);
110 printk(KERN_ERR "can't allocate an IRTE\n");
115 for (i = index; i < index + count; i++)
116 table->base[i].present = 1;
118 irq_iommu->iommu = iommu;
119 irq_iommu->irte_index = index;
120 irq_iommu->sub_handle = 0;
121 irq_iommu->irte_mask = mask;
123 raw_spin_unlock_irqrestore(&irq_2_ir_lock, flags);
128 static int qi_flush_iec(struct intel_iommu *iommu, int index, int mask)
132 desc.low = QI_IEC_IIDEX(index) | QI_IEC_TYPE | QI_IEC_IM(mask)
136 return qi_submit_sync(&desc, iommu);
139 static int map_irq_to_irte_handle(int irq, u16 *sub_handle)
141 struct irq_2_iommu *irq_iommu = irq_2_iommu(irq);
148 raw_spin_lock_irqsave(&irq_2_ir_lock, flags);
149 *sub_handle = irq_iommu->sub_handle;
150 index = irq_iommu->irte_index;
151 raw_spin_unlock_irqrestore(&irq_2_ir_lock, flags);
155 static int set_irte_irq(int irq, struct intel_iommu *iommu, u16 index, u16 subhandle)
157 struct irq_2_iommu *irq_iommu = irq_2_iommu(irq);
163 raw_spin_lock_irqsave(&irq_2_ir_lock, flags);
165 irq_iommu->iommu = iommu;
166 irq_iommu->irte_index = index;
167 irq_iommu->sub_handle = subhandle;
168 irq_iommu->irte_mask = 0;
170 raw_spin_unlock_irqrestore(&irq_2_ir_lock, flags);
175 static int modify_irte(int irq, struct irte *irte_modified)
177 struct irq_2_iommu *irq_iommu = irq_2_iommu(irq);
178 struct intel_iommu *iommu;
186 raw_spin_lock_irqsave(&irq_2_ir_lock, flags);
188 iommu = irq_iommu->iommu;
190 index = irq_iommu->irte_index + irq_iommu->sub_handle;
191 irte = &iommu->ir_table->base[index];
193 set_64bit(&irte->low, irte_modified->low);
194 set_64bit(&irte->high, irte_modified->high);
195 __iommu_flush_cache(iommu, irte, sizeof(*irte));
197 rc = qi_flush_iec(iommu, index, 0);
198 raw_spin_unlock_irqrestore(&irq_2_ir_lock, flags);
203 static struct intel_iommu *map_hpet_to_ir(u8 hpet_id)
207 for (i = 0; i < MAX_HPET_TBS; i++)
208 if (ir_hpet[i].id == hpet_id)
209 return ir_hpet[i].iommu;
213 static struct intel_iommu *map_ioapic_to_ir(int apic)
217 for (i = 0; i < MAX_IO_APICS; i++)
218 if (ir_ioapic[i].id == apic)
219 return ir_ioapic[i].iommu;
223 static struct intel_iommu *map_dev_to_ir(struct pci_dev *dev)
225 struct dmar_drhd_unit *drhd;
227 drhd = dmar_find_matched_drhd_unit(dev);
234 static int clear_entries(struct irq_2_iommu *irq_iommu)
236 struct irte *start, *entry, *end;
237 struct intel_iommu *iommu;
240 if (irq_iommu->sub_handle)
243 iommu = irq_iommu->iommu;
244 index = irq_iommu->irte_index + irq_iommu->sub_handle;
246 start = iommu->ir_table->base + index;
247 end = start + (1 << irq_iommu->irte_mask);
249 for (entry = start; entry < end; entry++) {
250 set_64bit(&entry->low, 0);
251 set_64bit(&entry->high, 0);
254 return qi_flush_iec(iommu, index, irq_iommu->irte_mask);
257 static int free_irte(int irq)
259 struct irq_2_iommu *irq_iommu = irq_2_iommu(irq);
266 raw_spin_lock_irqsave(&irq_2_ir_lock, flags);
268 rc = clear_entries(irq_iommu);
270 irq_iommu->iommu = NULL;
271 irq_iommu->irte_index = 0;
272 irq_iommu->sub_handle = 0;
273 irq_iommu->irte_mask = 0;
275 raw_spin_unlock_irqrestore(&irq_2_ir_lock, flags);
281 * source validation type
283 #define SVT_NO_VERIFY 0x0 /* no verification is required */
284 #define SVT_VERIFY_SID_SQ 0x1 /* verify using SID and SQ fields */
285 #define SVT_VERIFY_BUS 0x2 /* verify bus of request-id */
288 * source-id qualifier
290 #define SQ_ALL_16 0x0 /* verify all 16 bits of request-id */
291 #define SQ_13_IGNORE_1 0x1 /* verify most significant 13 bits, ignore
292 * the third least significant bit
294 #define SQ_13_IGNORE_2 0x2 /* verify most significant 13 bits, ignore
295 * the second and third least significant bits
297 #define SQ_13_IGNORE_3 0x3 /* verify most significant 13 bits, ignore
298 * the least three significant bits
302 * set SVT, SQ and SID fields of irte to verify
303 * source ids of interrupt requests
305 static void set_irte_sid(struct irte *irte, unsigned int svt,
306 unsigned int sq, unsigned int sid)
308 if (disable_sourceid_checking)
315 static int set_ioapic_sid(struct irte *irte, int apic)
323 for (i = 0; i < MAX_IO_APICS; i++) {
324 if (ir_ioapic[i].id == apic) {
325 sid = (ir_ioapic[i].bus << 8) | ir_ioapic[i].devfn;
331 pr_warning("Failed to set source-id of IOAPIC (%d)\n", apic);
335 set_irte_sid(irte, 1, 0, sid);
340 static int set_hpet_sid(struct irte *irte, u8 id)
348 for (i = 0; i < MAX_HPET_TBS; i++) {
349 if (ir_hpet[i].id == id) {
350 sid = (ir_hpet[i].bus << 8) | ir_hpet[i].devfn;
356 pr_warning("Failed to set source-id of HPET block (%d)\n", id);
361 * Should really use SQ_ALL_16. Some platforms are broken.
362 * While we figure out the right quirks for these broken platforms, use
363 * SQ_13_IGNORE_3 for now.
365 set_irte_sid(irte, SVT_VERIFY_SID_SQ, SQ_13_IGNORE_3, sid);
370 static int set_msi_sid(struct irte *irte, struct pci_dev *dev)
372 struct pci_dev *bridge;
377 /* PCIe device or Root Complex integrated PCI device */
378 if (pci_is_pcie(dev) || !dev->bus->parent) {
379 set_irte_sid(irte, SVT_VERIFY_SID_SQ, SQ_ALL_16,
380 (dev->bus->number << 8) | dev->devfn);
384 bridge = pci_find_upstream_pcie_bridge(dev);
386 if (pci_is_pcie(bridge))/* this is a PCIe-to-PCI/PCIX bridge */
387 set_irte_sid(irte, SVT_VERIFY_BUS, SQ_ALL_16,
388 (bridge->bus->number << 8) | dev->bus->number);
389 else /* this is a legacy PCI bridge */
390 set_irte_sid(irte, SVT_VERIFY_SID_SQ, SQ_ALL_16,
391 (bridge->bus->number << 8) | bridge->devfn);
397 static void iommu_set_irq_remapping(struct intel_iommu *iommu, int mode)
403 addr = virt_to_phys((void *)iommu->ir_table->base);
405 raw_spin_lock_irqsave(&iommu->register_lock, flags);
407 dmar_writeq(iommu->reg + DMAR_IRTA_REG,
408 (addr) | IR_X2APIC_MODE(mode) | INTR_REMAP_TABLE_REG_SIZE);
410 /* Set interrupt-remapping table pointer */
411 iommu->gcmd |= DMA_GCMD_SIRTP;
412 writel(iommu->gcmd, iommu->reg + DMAR_GCMD_REG);
414 IOMMU_WAIT_OP(iommu, DMAR_GSTS_REG,
415 readl, (sts & DMA_GSTS_IRTPS), sts);
416 raw_spin_unlock_irqrestore(&iommu->register_lock, flags);
419 * global invalidation of interrupt entry cache before enabling
420 * interrupt-remapping.
422 qi_global_iec(iommu);
424 raw_spin_lock_irqsave(&iommu->register_lock, flags);
426 /* Enable interrupt-remapping */
427 iommu->gcmd |= DMA_GCMD_IRE;
428 writel(iommu->gcmd, iommu->reg + DMAR_GCMD_REG);
430 IOMMU_WAIT_OP(iommu, DMAR_GSTS_REG,
431 readl, (sts & DMA_GSTS_IRES), sts);
433 raw_spin_unlock_irqrestore(&iommu->register_lock, flags);
437 static int intel_setup_irq_remapping(struct intel_iommu *iommu, int mode)
439 struct ir_table *ir_table;
442 ir_table = iommu->ir_table = kzalloc(sizeof(struct ir_table),
445 if (!iommu->ir_table)
448 pages = alloc_pages_node(iommu->node, GFP_ATOMIC | __GFP_ZERO,
449 INTR_REMAP_PAGE_ORDER);
452 printk(KERN_ERR "failed to allocate pages of order %d\n",
453 INTR_REMAP_PAGE_ORDER);
454 kfree(iommu->ir_table);
458 ir_table->base = page_address(pages);
460 iommu_set_irq_remapping(iommu, mode);
465 * Disable Interrupt Remapping.
467 static void iommu_disable_irq_remapping(struct intel_iommu *iommu)
472 if (!ecap_ir_support(iommu->ecap))
476 * global invalidation of interrupt entry cache before disabling
477 * interrupt-remapping.
479 qi_global_iec(iommu);
481 raw_spin_lock_irqsave(&iommu->register_lock, flags);
483 sts = dmar_readq(iommu->reg + DMAR_GSTS_REG);
484 if (!(sts & DMA_GSTS_IRES))
487 iommu->gcmd &= ~DMA_GCMD_IRE;
488 writel(iommu->gcmd, iommu->reg + DMAR_GCMD_REG);
490 IOMMU_WAIT_OP(iommu, DMAR_GSTS_REG,
491 readl, !(sts & DMA_GSTS_IRES), sts);
494 raw_spin_unlock_irqrestore(&iommu->register_lock, flags);
497 static int __init dmar_x2apic_optout(void)
499 struct acpi_table_dmar *dmar;
500 dmar = (struct acpi_table_dmar *)dmar_tbl;
501 if (!dmar || no_x2apic_optout)
503 return dmar->flags & DMAR_X2APIC_OPT_OUT;
506 static int __init intel_irq_remapping_supported(void)
508 struct dmar_drhd_unit *drhd;
510 if (disable_irq_remap)
513 if (!dmar_ir_support())
516 for_each_drhd_unit(drhd) {
517 struct intel_iommu *iommu = drhd->iommu;
519 if (!ecap_ir_support(iommu->ecap))
526 static int __init intel_enable_irq_remapping(void)
528 struct dmar_drhd_unit *drhd;
532 if (parse_ioapics_under_ir() != 1) {
533 printk(KERN_INFO "Not enable interrupt remapping\n");
537 if (x2apic_supported()) {
538 eim = !dmar_x2apic_optout();
539 WARN(!eim, KERN_WARNING
540 "Your BIOS is broken and requested that x2apic be disabled\n"
541 "This will leave your machine vulnerable to irq-injection attacks\n"
542 "Use 'intremap=no_x2apic_optout' to override BIOS request\n");
545 for_each_drhd_unit(drhd) {
546 struct intel_iommu *iommu = drhd->iommu;
549 * If the queued invalidation is already initialized,
550 * shouldn't disable it.
556 * Clear previous faults.
558 dmar_fault(-1, iommu);
561 * Disable intr remapping and queued invalidation, if already
562 * enabled prior to OS handover.
564 iommu_disable_irq_remapping(iommu);
566 dmar_disable_qi(iommu);
570 * check for the Interrupt-remapping support
572 for_each_drhd_unit(drhd) {
573 struct intel_iommu *iommu = drhd->iommu;
575 if (!ecap_ir_support(iommu->ecap))
578 if (eim && !ecap_eim_support(iommu->ecap)) {
579 printk(KERN_INFO "DRHD %Lx: EIM not supported by DRHD, "
580 " ecap %Lx\n", drhd->reg_base_addr, iommu->ecap);
586 * Enable queued invalidation for all the DRHD's.
588 for_each_drhd_unit(drhd) {
590 struct intel_iommu *iommu = drhd->iommu;
591 ret = dmar_enable_qi(iommu);
594 printk(KERN_ERR "DRHD %Lx: failed to enable queued, "
595 " invalidation, ecap %Lx, ret %d\n",
596 drhd->reg_base_addr, iommu->ecap, ret);
602 * Setup Interrupt-remapping for all the DRHD's now.
604 for_each_drhd_unit(drhd) {
605 struct intel_iommu *iommu = drhd->iommu;
607 if (!ecap_ir_support(iommu->ecap))
610 if (intel_setup_irq_remapping(iommu, eim))
619 irq_remapping_enabled = 1;
622 * VT-d has a different layout for IO-APIC entries when
623 * interrupt remapping is enabled. So it needs a special routine
624 * to print IO-APIC entries for debugging purposes too.
626 x86_io_apic_ops.print_entries = intel_ir_io_apic_print_entries;
628 pr_info("Enabled IRQ remapping in %s mode\n", eim ? "x2apic" : "xapic");
630 return eim ? IRQ_REMAP_X2APIC_MODE : IRQ_REMAP_XAPIC_MODE;
634 * handle error condition gracefully here!
639 static void ir_parse_one_hpet_scope(struct acpi_dmar_device_scope *scope,
640 struct intel_iommu *iommu)
642 struct acpi_dmar_pci_path *path;
647 path = (struct acpi_dmar_pci_path *)(scope + 1);
648 count = (scope->length - sizeof(struct acpi_dmar_device_scope))
649 / sizeof(struct acpi_dmar_pci_path);
651 while (--count > 0) {
653 * Access PCI directly due to the PCI
654 * subsystem isn't initialized yet.
656 bus = read_pci_config_byte(bus, path->dev, path->fn,
660 ir_hpet[ir_hpet_num].bus = bus;
661 ir_hpet[ir_hpet_num].devfn = PCI_DEVFN(path->dev, path->fn);
662 ir_hpet[ir_hpet_num].iommu = iommu;
663 ir_hpet[ir_hpet_num].id = scope->enumeration_id;
667 static void ir_parse_one_ioapic_scope(struct acpi_dmar_device_scope *scope,
668 struct intel_iommu *iommu)
670 struct acpi_dmar_pci_path *path;
675 path = (struct acpi_dmar_pci_path *)(scope + 1);
676 count = (scope->length - sizeof(struct acpi_dmar_device_scope))
677 / sizeof(struct acpi_dmar_pci_path);
679 while (--count > 0) {
681 * Access PCI directly due to the PCI
682 * subsystem isn't initialized yet.
684 bus = read_pci_config_byte(bus, path->dev, path->fn,
689 ir_ioapic[ir_ioapic_num].bus = bus;
690 ir_ioapic[ir_ioapic_num].devfn = PCI_DEVFN(path->dev, path->fn);
691 ir_ioapic[ir_ioapic_num].iommu = iommu;
692 ir_ioapic[ir_ioapic_num].id = scope->enumeration_id;
696 static int ir_parse_ioapic_hpet_scope(struct acpi_dmar_header *header,
697 struct intel_iommu *iommu)
699 struct acpi_dmar_hardware_unit *drhd;
700 struct acpi_dmar_device_scope *scope;
703 drhd = (struct acpi_dmar_hardware_unit *)header;
705 start = (void *)(drhd + 1);
706 end = ((void *)drhd) + header->length;
708 while (start < end) {
710 if (scope->entry_type == ACPI_DMAR_SCOPE_TYPE_IOAPIC) {
711 if (ir_ioapic_num == MAX_IO_APICS) {
712 printk(KERN_WARNING "Exceeded Max IO APICS\n");
716 printk(KERN_INFO "IOAPIC id %d under DRHD base "
717 " 0x%Lx IOMMU %d\n", scope->enumeration_id,
718 drhd->address, iommu->seq_id);
720 ir_parse_one_ioapic_scope(scope, iommu);
721 } else if (scope->entry_type == ACPI_DMAR_SCOPE_TYPE_HPET) {
722 if (ir_hpet_num == MAX_HPET_TBS) {
723 printk(KERN_WARNING "Exceeded Max HPET blocks\n");
727 printk(KERN_INFO "HPET id %d under DRHD base"
728 " 0x%Lx\n", scope->enumeration_id,
731 ir_parse_one_hpet_scope(scope, iommu);
733 start += scope->length;
740 * Finds the assocaition between IOAPIC's and its Interrupt-remapping
743 int __init parse_ioapics_under_ir(void)
745 struct dmar_drhd_unit *drhd;
746 int ir_supported = 0;
749 for_each_drhd_unit(drhd) {
750 struct intel_iommu *iommu = drhd->iommu;
752 if (ecap_ir_support(iommu->ecap)) {
753 if (ir_parse_ioapic_hpet_scope(drhd->hdr, iommu))
763 for (ioapic_idx = 0; ioapic_idx < nr_ioapics; ioapic_idx++) {
764 int ioapic_id = mpc_ioapic_id(ioapic_idx);
765 if (!map_ioapic_to_ir(ioapic_id)) {
766 pr_err(FW_BUG "ioapic %d has no mapping iommu, "
767 "interrupt remapping will be disabled\n",
776 int __init ir_dev_scope_init(void)
778 if (!irq_remapping_enabled)
781 return dmar_dev_scope_init();
783 rootfs_initcall(ir_dev_scope_init);
785 static void disable_irq_remapping(void)
787 struct dmar_drhd_unit *drhd;
788 struct intel_iommu *iommu = NULL;
791 * Disable Interrupt-remapping for all the DRHD's now.
793 for_each_iommu(iommu, drhd) {
794 if (!ecap_ir_support(iommu->ecap))
797 iommu_disable_irq_remapping(iommu);
801 static int reenable_irq_remapping(int eim)
803 struct dmar_drhd_unit *drhd;
805 struct intel_iommu *iommu = NULL;
807 for_each_iommu(iommu, drhd)
809 dmar_reenable_qi(iommu);
812 * Setup Interrupt-remapping for all the DRHD's now.
814 for_each_iommu(iommu, drhd) {
815 if (!ecap_ir_support(iommu->ecap))
818 /* Set up interrupt remapping for iommu.*/
819 iommu_set_irq_remapping(iommu, eim);
830 * handle error condition gracefully here!
835 static void prepare_irte(struct irte *irte, int vector,
838 memset(irte, 0, sizeof(*irte));
841 irte->dst_mode = apic->irq_dest_mode;
843 * Trigger mode in the IRTE will always be edge, and for IO-APIC, the
844 * actual level or edge trigger will be setup in the IO-APIC
845 * RTE. This will help simplify level triggered irq migration.
846 * For more details, see the comments (in io_apic.c) explainig IO-APIC
847 * irq migration in the presence of interrupt-remapping.
849 irte->trigger_mode = 0;
850 irte->dlvry_mode = apic->irq_delivery_mode;
851 irte->vector = vector;
852 irte->dest_id = IRTE_DEST(dest);
853 irte->redir_hint = 1;
856 static int intel_setup_ioapic_entry(int irq,
857 struct IO_APIC_route_entry *route_entry,
858 unsigned int destination, int vector,
859 struct io_apic_irq_attr *attr)
861 int ioapic_id = mpc_ioapic_id(attr->ioapic);
862 struct intel_iommu *iommu = map_ioapic_to_ir(ioapic_id);
863 struct IR_IO_APIC_route_entry *entry;
868 pr_warn("No mapping iommu for ioapic %d\n", ioapic_id);
872 entry = (struct IR_IO_APIC_route_entry *)route_entry;
874 index = alloc_irte(iommu, irq, 1);
876 pr_warn("Failed to allocate IRTE for ioapic %d\n", ioapic_id);
880 prepare_irte(&irte, vector, destination);
882 /* Set source-id of interrupt request */
883 set_ioapic_sid(&irte, ioapic_id);
885 modify_irte(irq, &irte);
887 apic_printk(APIC_VERBOSE, KERN_DEBUG "IOAPIC[%d]: "
888 "Set IRTE entry (P:%d FPD:%d Dst_Mode:%d "
889 "Redir_hint:%d Trig_Mode:%d Dlvry_Mode:%X "
890 "Avail:%X Vector:%02X Dest:%08X "
891 "SID:%04X SQ:%X SVT:%X)\n",
892 attr->ioapic, irte.present, irte.fpd, irte.dst_mode,
893 irte.redir_hint, irte.trigger_mode, irte.dlvry_mode,
894 irte.avail, irte.vector, irte.dest_id,
895 irte.sid, irte.sq, irte.svt);
897 memset(entry, 0, sizeof(*entry));
899 entry->index2 = (index >> 15) & 0x1;
902 entry->index = (index & 0x7fff);
904 * IO-APIC RTE will be configured with virtual vector.
905 * irq handler will do the explicit EOI to the io-apic.
907 entry->vector = attr->ioapic_pin;
908 entry->mask = 0; /* enable IRQ */
909 entry->trigger = attr->trigger;
910 entry->polarity = attr->polarity;
912 /* Mask level triggered irqs.
913 * Use IRQ_DELAYED_DISABLE for edge triggered irqs.
922 * Migrate the IO-APIC irq in the presence of intr-remapping.
924 * For both level and edge triggered, irq migration is a simple atomic
925 * update(of vector and cpu destination) of IRTE and flush the hardware cache.
927 * For level triggered, we eliminate the io-apic RTE modification (with the
928 * updated vector information), by using a virtual vector (io-apic pin number).
929 * Real vector that is used for interrupting cpu will be coming from
930 * the interrupt-remapping table entry.
932 * As the migration is a simple atomic update of IRTE, the same mechanism
933 * is used to migrate MSI irq's in the presence of interrupt-remapping.
936 intel_ioapic_set_affinity(struct irq_data *data, const struct cpumask *mask,
939 struct irq_cfg *cfg = data->chip_data;
940 unsigned int dest, irq = data->irq;
944 if (!config_enabled(CONFIG_SMP))
947 if (!cpumask_intersects(mask, cpu_online_mask))
950 if (get_irte(irq, &irte))
953 err = assign_irq_vector(irq, cfg, mask);
957 err = apic->cpu_mask_to_apicid_and(cfg->domain, mask, &dest);
959 if (assign_irq_vector(irq, cfg, data->affinity))
960 pr_err("Failed to recover vector for irq %d\n", irq);
964 irte.vector = cfg->vector;
965 irte.dest_id = IRTE_DEST(dest);
968 * Atomically updates the IRTE with the new destination, vector
969 * and flushes the interrupt entry cache.
971 modify_irte(irq, &irte);
974 * After this point, all the interrupts will start arriving
975 * at the new destination. So, time to cleanup the previous
978 if (cfg->move_in_progress)
979 send_cleanup_vector(cfg);
981 cpumask_copy(data->affinity, mask);
985 static void intel_compose_msi_msg(struct pci_dev *pdev,
986 unsigned int irq, unsigned int dest,
987 struct msi_msg *msg, u8 hpet_id)
994 cfg = irq_get_chip_data(irq);
996 ir_index = map_irq_to_irte_handle(irq, &sub_handle);
997 BUG_ON(ir_index == -1);
999 prepare_irte(&irte, cfg->vector, dest);
1001 /* Set source-id of interrupt request */
1003 set_msi_sid(&irte, pdev);
1005 set_hpet_sid(&irte, hpet_id);
1007 modify_irte(irq, &irte);
1009 msg->address_hi = MSI_ADDR_BASE_HI;
1010 msg->data = sub_handle;
1011 msg->address_lo = MSI_ADDR_BASE_LO | MSI_ADDR_IR_EXT_INT |
1013 MSI_ADDR_IR_INDEX1(ir_index) |
1014 MSI_ADDR_IR_INDEX2(ir_index);
1018 * Map the PCI dev to the corresponding remapping hardware unit
1019 * and allocate 'nvec' consecutive interrupt-remapping table entries
1022 static int intel_msi_alloc_irq(struct pci_dev *dev, int irq, int nvec)
1024 struct intel_iommu *iommu;
1027 iommu = map_dev_to_ir(dev);
1030 "Unable to map PCI %s to iommu\n", pci_name(dev));
1034 index = alloc_irte(iommu, irq, nvec);
1037 "Unable to allocate %d IRTE for PCI %s\n", nvec,
1044 static int intel_msi_setup_irq(struct pci_dev *pdev, unsigned int irq,
1045 int index, int sub_handle)
1047 struct intel_iommu *iommu;
1049 iommu = map_dev_to_ir(pdev);
1053 * setup the mapping between the irq and the IRTE
1054 * base index, the sub_handle pointing to the
1055 * appropriate interrupt remap table entry.
1057 set_irte_irq(irq, iommu, index, sub_handle);
1062 static int intel_setup_hpet_msi(unsigned int irq, unsigned int id)
1064 struct intel_iommu *iommu = map_hpet_to_ir(id);
1070 index = alloc_irte(iommu, irq, 1);
1077 struct irq_remap_ops intel_irq_remap_ops = {
1078 .supported = intel_irq_remapping_supported,
1079 .prepare = dmar_table_init,
1080 .enable = intel_enable_irq_remapping,
1081 .disable = disable_irq_remapping,
1082 .reenable = reenable_irq_remapping,
1083 .enable_faulting = enable_drhd_fault_handling,
1084 .setup_ioapic_entry = intel_setup_ioapic_entry,
1085 .set_affinity = intel_ioapic_set_affinity,
1086 .free_irq = free_irte,
1087 .compose_msi_msg = intel_compose_msi_msg,
1088 .msi_alloc_irq = intel_msi_alloc_irq,
1089 .msi_setup_irq = intel_msi_setup_irq,
1090 .setup_hpet_msi = intel_setup_hpet_msi,