4 * Xen models interrupts with abstract event channels. Because each
5 * domain gets 1024 event channels, but NR_IRQ is not that large, we
6 * must dynamically map irqs<->event channels. The event channels
7 * interface with the rest of the kernel by defining a xen interrupt
8 * chip. When an event is recieved, it is mapped to an irq and sent
9 * through the normal interrupt processing path.
11 * There are four kinds of events which can be mapped to an event
14 * 1. Inter-domain notifications. This includes all the virtual
15 * device events, since they're driven by front-ends in another domain
17 * 2. VIRQs, typically used for timers. These are per-cpu events.
19 * 4. PIRQs - Hardware interrupts.
21 * Jeremy Fitzhardinge <jeremy@xensource.com>, XenSource Inc, 2007
24 #include <linux/linkage.h>
25 #include <linux/interrupt.h>
26 #include <linux/irq.h>
27 #include <linux/module.h>
28 #include <linux/string.h>
29 #include <linux/bootmem.h>
30 #include <linux/slab.h>
31 #include <linux/irqnr.h>
34 #include <asm/ptrace.h>
37 #include <asm/io_apic.h>
38 #include <asm/sync_bitops.h>
39 #include <asm/xen/pci.h>
40 #include <asm/xen/hypercall.h>
41 #include <asm/xen/hypervisor.h>
45 #include <xen/xen-ops.h>
46 #include <xen/events.h>
47 #include <xen/interface/xen.h>
48 #include <xen/interface/event_channel.h>
49 #include <xen/interface/hvm/hvm_op.h>
50 #include <xen/interface/hvm/params.h>
53 * This lock protects updates to the following mapping and reference-count
54 * arrays. The lock does not need to be acquired to read the mapping tables.
56 static DEFINE_SPINLOCK(irq_mapping_update_lock);
58 /* IRQ <-> VIRQ mapping. */
59 static DEFINE_PER_CPU(int [NR_VIRQS], virq_to_irq) = {[0 ... NR_VIRQS-1] = -1};
61 /* IRQ <-> IPI mapping */
62 static DEFINE_PER_CPU(int [XEN_NR_IPIS], ipi_to_irq) = {[0 ... XEN_NR_IPIS-1] = -1};
64 /* Interrupt types. */
74 * Packed IRQ information:
75 * type - enum xen_irq_type
76 * event channel - irq->event channel mapping
77 * cpu - cpu this event channel is bound to
78 * index - type-specific information:
79 * PIRQ - vector, with MSB being "needs EIO", or physical IRQ of the HVM
80 * guest, or GSI (real passthrough IRQ) of the device.
87 enum xen_irq_type type; /* type */
88 unsigned short evtchn; /* event channel */
89 unsigned short cpu; /* cpu bound */
102 #define PIRQ_NEEDS_EOI (1 << 0)
103 #define PIRQ_SHAREABLE (1 << 1)
105 static struct irq_info *irq_info;
106 static int *pirq_to_irq;
109 static int *evtchn_to_irq;
110 struct cpu_evtchn_s {
111 unsigned long bits[NR_EVENT_CHANNELS/BITS_PER_LONG];
114 static __initdata struct cpu_evtchn_s init_evtchn_mask = {
115 .bits[0 ... (NR_EVENT_CHANNELS/BITS_PER_LONG)-1] = ~0ul,
117 static struct cpu_evtchn_s *cpu_evtchn_mask_p = &init_evtchn_mask;
119 static inline unsigned long *cpu_evtchn_mask(int cpu)
121 return cpu_evtchn_mask_p[cpu].bits;
124 /* Xen will never allocate port zero for any purpose. */
125 #define VALID_EVTCHN(chn) ((chn) != 0)
127 static struct irq_chip xen_dynamic_chip;
128 static struct irq_chip xen_percpu_chip;
129 static struct irq_chip xen_pirq_chip;
131 /* Constructor for packed IRQ information. */
132 static struct irq_info mk_unbound_info(void)
134 return (struct irq_info) { .type = IRQT_UNBOUND };
137 static struct irq_info mk_evtchn_info(unsigned short evtchn)
139 return (struct irq_info) { .type = IRQT_EVTCHN, .evtchn = evtchn,
143 static struct irq_info mk_ipi_info(unsigned short evtchn, enum ipi_vector ipi)
145 return (struct irq_info) { .type = IRQT_IPI, .evtchn = evtchn,
146 .cpu = 0, .u.ipi = ipi };
149 static struct irq_info mk_virq_info(unsigned short evtchn, unsigned short virq)
151 return (struct irq_info) { .type = IRQT_VIRQ, .evtchn = evtchn,
152 .cpu = 0, .u.virq = virq };
155 static struct irq_info mk_pirq_info(unsigned short evtchn, unsigned short pirq,
156 unsigned short gsi, unsigned short vector)
158 return (struct irq_info) { .type = IRQT_PIRQ, .evtchn = evtchn,
160 .u.pirq = { .pirq = pirq, .gsi = gsi, .vector = vector } };
164 * Accessors for packed IRQ information.
166 static struct irq_info *info_for_irq(unsigned irq)
168 return &irq_info[irq];
171 static unsigned int evtchn_from_irq(unsigned irq)
173 return info_for_irq(irq)->evtchn;
176 unsigned irq_from_evtchn(unsigned int evtchn)
178 return evtchn_to_irq[evtchn];
180 EXPORT_SYMBOL_GPL(irq_from_evtchn);
182 static enum ipi_vector ipi_from_irq(unsigned irq)
184 struct irq_info *info = info_for_irq(irq);
186 BUG_ON(info == NULL);
187 BUG_ON(info->type != IRQT_IPI);
192 static unsigned virq_from_irq(unsigned irq)
194 struct irq_info *info = info_for_irq(irq);
196 BUG_ON(info == NULL);
197 BUG_ON(info->type != IRQT_VIRQ);
202 static unsigned pirq_from_irq(unsigned irq)
204 struct irq_info *info = info_for_irq(irq);
206 BUG_ON(info == NULL);
207 BUG_ON(info->type != IRQT_PIRQ);
209 return info->u.pirq.pirq;
212 static unsigned gsi_from_irq(unsigned irq)
214 struct irq_info *info = info_for_irq(irq);
216 BUG_ON(info == NULL);
217 BUG_ON(info->type != IRQT_PIRQ);
219 return info->u.pirq.gsi;
222 static unsigned vector_from_irq(unsigned irq)
224 struct irq_info *info = info_for_irq(irq);
226 BUG_ON(info == NULL);
227 BUG_ON(info->type != IRQT_PIRQ);
229 return info->u.pirq.vector;
232 static enum xen_irq_type type_from_irq(unsigned irq)
234 return info_for_irq(irq)->type;
237 static unsigned cpu_from_irq(unsigned irq)
239 return info_for_irq(irq)->cpu;
242 static unsigned int cpu_from_evtchn(unsigned int evtchn)
244 int irq = evtchn_to_irq[evtchn];
248 ret = cpu_from_irq(irq);
253 static bool pirq_needs_eoi(unsigned irq)
255 struct irq_info *info = info_for_irq(irq);
257 BUG_ON(info->type != IRQT_PIRQ);
259 return info->u.pirq.flags & PIRQ_NEEDS_EOI;
262 static inline unsigned long active_evtchns(unsigned int cpu,
263 struct shared_info *sh,
266 return (sh->evtchn_pending[idx] &
267 cpu_evtchn_mask(cpu)[idx] &
268 ~sh->evtchn_mask[idx]);
271 static void bind_evtchn_to_cpu(unsigned int chn, unsigned int cpu)
273 int irq = evtchn_to_irq[chn];
277 cpumask_copy(irq_to_desc(irq)->affinity, cpumask_of(cpu));
280 __clear_bit(chn, cpu_evtchn_mask(cpu_from_irq(irq)));
281 __set_bit(chn, cpu_evtchn_mask(cpu));
283 irq_info[irq].cpu = cpu;
286 static void init_evtchn_cpu_bindings(void)
289 struct irq_desc *desc;
292 /* By default all event channels notify CPU#0. */
293 for_each_irq_desc(i, desc) {
294 cpumask_copy(desc->affinity, cpumask_of(0));
298 memset(cpu_evtchn_mask(0), ~0, sizeof(cpu_evtchn_mask(0)));
301 static inline void clear_evtchn(int port)
303 struct shared_info *s = HYPERVISOR_shared_info;
304 sync_clear_bit(port, &s->evtchn_pending[0]);
307 static inline void set_evtchn(int port)
309 struct shared_info *s = HYPERVISOR_shared_info;
310 sync_set_bit(port, &s->evtchn_pending[0]);
313 static inline int test_evtchn(int port)
315 struct shared_info *s = HYPERVISOR_shared_info;
316 return sync_test_bit(port, &s->evtchn_pending[0]);
321 * notify_remote_via_irq - send event to remote end of event channel via irq
322 * @irq: irq of event channel to send event to
324 * Unlike notify_remote_via_evtchn(), this is safe to use across
325 * save/restore. Notifications on a broken connection are silently
328 void notify_remote_via_irq(int irq)
330 int evtchn = evtchn_from_irq(irq);
332 if (VALID_EVTCHN(evtchn))
333 notify_remote_via_evtchn(evtchn);
335 EXPORT_SYMBOL_GPL(notify_remote_via_irq);
337 static void mask_evtchn(int port)
339 struct shared_info *s = HYPERVISOR_shared_info;
340 sync_set_bit(port, &s->evtchn_mask[0]);
343 static void unmask_evtchn(int port)
345 struct shared_info *s = HYPERVISOR_shared_info;
346 unsigned int cpu = get_cpu();
348 BUG_ON(!irqs_disabled());
350 /* Slow path (hypercall) if this is a non-local port. */
351 if (unlikely(cpu != cpu_from_evtchn(port))) {
352 struct evtchn_unmask unmask = { .port = port };
353 (void)HYPERVISOR_event_channel_op(EVTCHNOP_unmask, &unmask);
355 struct vcpu_info *vcpu_info = __get_cpu_var(xen_vcpu);
357 sync_clear_bit(port, &s->evtchn_mask[0]);
360 * The following is basically the equivalent of
361 * 'hw_resend_irq'. Just like a real IO-APIC we 'lose
362 * the interrupt edge' if the channel is masked.
364 if (sync_test_bit(port, &s->evtchn_pending[0]) &&
365 !sync_test_and_set_bit(port / BITS_PER_LONG,
366 &vcpu_info->evtchn_pending_sel))
367 vcpu_info->evtchn_upcall_pending = 1;
373 static int get_nr_hw_irqs(void)
377 #ifdef CONFIG_X86_IO_APIC
378 ret = get_nr_irqs_gsi();
384 /* callers of this function should make sure that PHYSDEVOP_get_nr_pirqs
385 * succeeded otherwise nr_pirqs won't hold the right value */
386 static int find_unbound_pirq(void)
389 for (i = nr_pirqs-1; i >= 0; i--) {
390 if (pirq_to_irq[i] < 0)
396 static int find_unbound_irq(void)
398 struct irq_data *data;
400 int start = get_nr_hw_irqs();
402 if (start == nr_irqs)
405 /* nr_irqs is a magic value. Must not use it.*/
406 for (irq = nr_irqs-1; irq > start; irq--) {
407 data = irq_get_irq_data(irq);
408 /* only 0->15 have init'd desc; handle irq > 16 */
411 if (data->chip == &no_irq_chip)
413 if (data->chip != &xen_dynamic_chip)
415 if (irq_info[irq].type == IRQT_UNBOUND)
422 res = irq_alloc_desc_at(irq, 0);
424 if (WARN_ON(res != irq))
430 panic("No available IRQ to bind to: increase nr_irqs!\n");
433 static bool identity_mapped_irq(unsigned irq)
435 /* identity map all the hardware irqs */
436 return irq < get_nr_hw_irqs();
439 static void pirq_unmask_notify(int irq)
441 struct physdev_eoi eoi = { .irq = pirq_from_irq(irq) };
443 if (unlikely(pirq_needs_eoi(irq))) {
444 int rc = HYPERVISOR_physdev_op(PHYSDEVOP_eoi, &eoi);
449 static void pirq_query_unmask(int irq)
451 struct physdev_irq_status_query irq_status;
452 struct irq_info *info = info_for_irq(irq);
454 BUG_ON(info->type != IRQT_PIRQ);
456 irq_status.irq = pirq_from_irq(irq);
457 if (HYPERVISOR_physdev_op(PHYSDEVOP_irq_status_query, &irq_status))
458 irq_status.flags = 0;
460 info->u.pirq.flags &= ~PIRQ_NEEDS_EOI;
461 if (irq_status.flags & XENIRQSTAT_needs_eoi)
462 info->u.pirq.flags |= PIRQ_NEEDS_EOI;
465 static bool probing_irq(int irq)
467 struct irq_desc *desc = irq_to_desc(irq);
469 return desc && desc->action == NULL;
472 static unsigned int startup_pirq(unsigned int irq)
474 struct evtchn_bind_pirq bind_pirq;
475 struct irq_info *info = info_for_irq(irq);
476 int evtchn = evtchn_from_irq(irq);
479 BUG_ON(info->type != IRQT_PIRQ);
481 if (VALID_EVTCHN(evtchn))
484 bind_pirq.pirq = pirq_from_irq(irq);
485 /* NB. We are happy to share unless we are probing. */
486 bind_pirq.flags = info->u.pirq.flags & PIRQ_SHAREABLE ?
487 BIND_PIRQ__WILL_SHARE : 0;
488 rc = HYPERVISOR_event_channel_op(EVTCHNOP_bind_pirq, &bind_pirq);
490 if (!probing_irq(irq))
491 printk(KERN_INFO "Failed to obtain physical IRQ %d\n",
495 evtchn = bind_pirq.port;
497 pirq_query_unmask(irq);
499 evtchn_to_irq[evtchn] = irq;
500 bind_evtchn_to_cpu(evtchn, 0);
501 info->evtchn = evtchn;
504 unmask_evtchn(evtchn);
505 pirq_unmask_notify(irq);
510 static void shutdown_pirq(unsigned int irq)
512 struct evtchn_close close;
513 struct irq_info *info = info_for_irq(irq);
514 int evtchn = evtchn_from_irq(irq);
516 BUG_ON(info->type != IRQT_PIRQ);
518 if (!VALID_EVTCHN(evtchn))
524 if (HYPERVISOR_event_channel_op(EVTCHNOP_close, &close) != 0)
527 bind_evtchn_to_cpu(evtchn, 0);
528 evtchn_to_irq[evtchn] = -1;
532 static void enable_pirq(unsigned int irq)
537 static void disable_pirq(unsigned int irq)
541 static void ack_pirq(unsigned int irq)
543 int evtchn = evtchn_from_irq(irq);
545 move_native_irq(irq);
547 if (VALID_EVTCHN(evtchn)) {
549 clear_evtchn(evtchn);
553 static void end_pirq(unsigned int irq)
555 int evtchn = evtchn_from_irq(irq);
556 struct irq_desc *desc = irq_to_desc(irq);
561 if ((desc->status & (IRQ_DISABLED|IRQ_PENDING)) ==
562 (IRQ_DISABLED|IRQ_PENDING)) {
564 } else if (VALID_EVTCHN(evtchn)) {
565 unmask_evtchn(evtchn);
566 pirq_unmask_notify(irq);
570 static int find_irq_by_gsi(unsigned gsi)
574 for (irq = 0; irq < nr_irqs; irq++) {
575 struct irq_info *info = info_for_irq(irq);
577 if (info == NULL || info->type != IRQT_PIRQ)
580 if (gsi_from_irq(irq) == gsi)
587 int xen_allocate_pirq(unsigned gsi, int shareable, char *name)
589 return xen_map_pirq_gsi(gsi, gsi, shareable, name);
592 /* xen_map_pirq_gsi might allocate irqs from the top down, as a
593 * consequence don't assume that the irq number returned has a low value
594 * or can be used as a pirq number unless you know otherwise.
596 * One notable exception is when xen_map_pirq_gsi is called passing an
597 * hardware gsi as argument, in that case the irq number returned
598 * matches the gsi number passed as second argument.
600 * Note: We don't assign an event channel until the irq actually started
601 * up. Return an existing irq if we've already got one for the gsi.
603 int xen_map_pirq_gsi(unsigned pirq, unsigned gsi, int shareable, char *name)
606 struct physdev_irq irq_op;
608 spin_lock(&irq_mapping_update_lock);
610 if ((pirq > nr_pirqs) || (gsi > nr_irqs)) {
611 printk(KERN_WARNING "xen_map_pirq_gsi: %s %s is incorrect!\n",
612 pirq > nr_pirqs ? "nr_pirqs" :"",
613 gsi > nr_irqs ? "nr_irqs" : "");
617 irq = find_irq_by_gsi(gsi);
619 printk(KERN_INFO "xen_map_pirq_gsi: returning irq %d for gsi %u\n",
621 goto out; /* XXX need refcount? */
624 /* If we are a PV guest, we don't have GSIs (no ACPI passed). Therefore
625 * we are using the !xen_initial_domain() to drop in the function.*/
626 if (identity_mapped_irq(gsi) || (!xen_initial_domain() &&
629 irq_alloc_desc_at(irq, 0);
631 irq = find_unbound_irq();
633 set_irq_chip_and_handler_name(irq, &xen_pirq_chip,
634 handle_level_irq, name);
639 /* Only the privileged domain can do this. For non-priv, the pcifront
640 * driver provides a PCI bus that does the call to do exactly
641 * this in the priv domain. */
642 if (xen_initial_domain() &&
643 HYPERVISOR_physdev_op(PHYSDEVOP_alloc_irq_vector, &irq_op)) {
649 irq_info[irq] = mk_pirq_info(0, pirq, gsi, irq_op.vector);
650 irq_info[irq].u.pirq.flags |= shareable ? PIRQ_SHAREABLE : 0;
651 pirq_to_irq[pirq] = irq;
654 spin_unlock(&irq_mapping_update_lock);
659 int xen_destroy_irq(int irq)
661 struct irq_desc *desc;
664 spin_lock(&irq_mapping_update_lock);
666 desc = irq_to_desc(irq);
670 irq_info[irq] = mk_unbound_info();
675 spin_unlock(&irq_mapping_update_lock);
679 int xen_vector_from_irq(unsigned irq)
681 return vector_from_irq(irq);
684 int xen_gsi_from_irq(unsigned irq)
686 return gsi_from_irq(irq);
689 int bind_evtchn_to_irq(unsigned int evtchn)
693 spin_lock(&irq_mapping_update_lock);
695 irq = evtchn_to_irq[evtchn];
698 irq = find_unbound_irq();
700 set_irq_chip_and_handler_name(irq, &xen_dynamic_chip,
701 handle_edge_irq, "event");
703 evtchn_to_irq[evtchn] = irq;
704 irq_info[irq] = mk_evtchn_info(evtchn);
707 spin_unlock(&irq_mapping_update_lock);
711 EXPORT_SYMBOL_GPL(bind_evtchn_to_irq);
713 static int bind_ipi_to_irq(unsigned int ipi, unsigned int cpu)
715 struct evtchn_bind_ipi bind_ipi;
718 spin_lock(&irq_mapping_update_lock);
720 irq = per_cpu(ipi_to_irq, cpu)[ipi];
723 irq = find_unbound_irq();
727 set_irq_chip_and_handler_name(irq, &xen_percpu_chip,
728 handle_percpu_irq, "ipi");
731 if (HYPERVISOR_event_channel_op(EVTCHNOP_bind_ipi,
734 evtchn = bind_ipi.port;
736 evtchn_to_irq[evtchn] = irq;
737 irq_info[irq] = mk_ipi_info(evtchn, ipi);
738 per_cpu(ipi_to_irq, cpu)[ipi] = irq;
740 bind_evtchn_to_cpu(evtchn, cpu);
744 spin_unlock(&irq_mapping_update_lock);
749 static int bind_virq_to_irq(unsigned int virq, unsigned int cpu)
751 struct evtchn_bind_virq bind_virq;
754 spin_lock(&irq_mapping_update_lock);
756 irq = per_cpu(virq_to_irq, cpu)[virq];
759 bind_virq.virq = virq;
760 bind_virq.vcpu = cpu;
761 if (HYPERVISOR_event_channel_op(EVTCHNOP_bind_virq,
764 evtchn = bind_virq.port;
766 irq = find_unbound_irq();
768 set_irq_chip_and_handler_name(irq, &xen_percpu_chip,
769 handle_percpu_irq, "virq");
771 evtchn_to_irq[evtchn] = irq;
772 irq_info[irq] = mk_virq_info(evtchn, virq);
774 per_cpu(virq_to_irq, cpu)[virq] = irq;
776 bind_evtchn_to_cpu(evtchn, cpu);
779 spin_unlock(&irq_mapping_update_lock);
784 static void unbind_from_irq(unsigned int irq)
786 struct evtchn_close close;
787 int evtchn = evtchn_from_irq(irq);
789 spin_lock(&irq_mapping_update_lock);
791 if (VALID_EVTCHN(evtchn)) {
793 if (HYPERVISOR_event_channel_op(EVTCHNOP_close, &close) != 0)
796 switch (type_from_irq(irq)) {
798 per_cpu(virq_to_irq, cpu_from_evtchn(evtchn))
799 [virq_from_irq(irq)] = -1;
802 per_cpu(ipi_to_irq, cpu_from_evtchn(evtchn))
803 [ipi_from_irq(irq)] = -1;
809 /* Closed ports are implicitly re-bound to VCPU0. */
810 bind_evtchn_to_cpu(evtchn, 0);
812 evtchn_to_irq[evtchn] = -1;
815 if (irq_info[irq].type != IRQT_UNBOUND) {
816 irq_info[irq] = mk_unbound_info();
821 spin_unlock(&irq_mapping_update_lock);
824 int bind_evtchn_to_irqhandler(unsigned int evtchn,
825 irq_handler_t handler,
826 unsigned long irqflags,
827 const char *devname, void *dev_id)
832 irq = bind_evtchn_to_irq(evtchn);
833 retval = request_irq(irq, handler, irqflags, devname, dev_id);
835 unbind_from_irq(irq);
841 EXPORT_SYMBOL_GPL(bind_evtchn_to_irqhandler);
843 int bind_virq_to_irqhandler(unsigned int virq, unsigned int cpu,
844 irq_handler_t handler,
845 unsigned long irqflags, const char *devname, void *dev_id)
850 irq = bind_virq_to_irq(virq, cpu);
851 retval = request_irq(irq, handler, irqflags, devname, dev_id);
853 unbind_from_irq(irq);
859 EXPORT_SYMBOL_GPL(bind_virq_to_irqhandler);
861 int bind_ipi_to_irqhandler(enum ipi_vector ipi,
863 irq_handler_t handler,
864 unsigned long irqflags,
870 irq = bind_ipi_to_irq(ipi, cpu);
874 irqflags |= IRQF_NO_SUSPEND;
875 retval = request_irq(irq, handler, irqflags, devname, dev_id);
877 unbind_from_irq(irq);
884 void unbind_from_irqhandler(unsigned int irq, void *dev_id)
886 free_irq(irq, dev_id);
887 unbind_from_irq(irq);
889 EXPORT_SYMBOL_GPL(unbind_from_irqhandler);
891 void xen_send_IPI_one(unsigned int cpu, enum ipi_vector vector)
893 int irq = per_cpu(ipi_to_irq, cpu)[vector];
895 notify_remote_via_irq(irq);
898 irqreturn_t xen_debug_interrupt(int irq, void *dev_id)
900 struct shared_info *sh = HYPERVISOR_shared_info;
901 int cpu = smp_processor_id();
904 static DEFINE_SPINLOCK(debug_lock);
906 spin_lock_irqsave(&debug_lock, flags);
908 printk("vcpu %d\n ", cpu);
910 for_each_online_cpu(i) {
911 struct vcpu_info *v = per_cpu(xen_vcpu, i);
912 printk("%d: masked=%d pending=%d event_sel %08lx\n ", i,
913 (get_irq_regs() && i == cpu) ? xen_irqs_disabled(get_irq_regs()) : v->evtchn_upcall_mask,
914 v->evtchn_upcall_pending,
915 v->evtchn_pending_sel);
917 printk("pending:\n ");
918 for(i = ARRAY_SIZE(sh->evtchn_pending)-1; i >= 0; i--)
919 printk("%08lx%s", sh->evtchn_pending[i],
920 i % 8 == 0 ? "\n " : " ");
921 printk("\nmasks:\n ");
922 for(i = ARRAY_SIZE(sh->evtchn_mask)-1; i >= 0; i--)
923 printk("%08lx%s", sh->evtchn_mask[i],
924 i % 8 == 0 ? "\n " : " ");
926 printk("\nunmasked:\n ");
927 for(i = ARRAY_SIZE(sh->evtchn_mask)-1; i >= 0; i--)
928 printk("%08lx%s", sh->evtchn_pending[i] & ~sh->evtchn_mask[i],
929 i % 8 == 0 ? "\n " : " ");
931 printk("\npending list:\n");
932 for(i = 0; i < NR_EVENT_CHANNELS; i++) {
933 if (sync_test_bit(i, sh->evtchn_pending)) {
934 printk(" %d: event %d -> irq %d\n",
935 cpu_from_evtchn(i), i,
940 spin_unlock_irqrestore(&debug_lock, flags);
945 static DEFINE_PER_CPU(unsigned, xed_nesting_count);
948 * Search the CPUs pending events bitmasks. For each one found, map
949 * the event number to an irq, and feed it into do_IRQ() for
952 * Xen uses a two-level bitmap to speed searching. The first level is
953 * a bitset of words which contain pending event bits. The second
954 * level is a bitset of pending events themselves.
956 static void __xen_evtchn_do_upcall(void)
959 struct shared_info *s = HYPERVISOR_shared_info;
960 struct vcpu_info *vcpu_info = __get_cpu_var(xen_vcpu);
964 unsigned long pending_words;
966 vcpu_info->evtchn_upcall_pending = 0;
968 if (__get_cpu_var(xed_nesting_count)++)
971 #ifndef CONFIG_X86 /* No need for a barrier -- XCHG is a barrier on x86. */
972 /* Clear master flag /before/ clearing selector flag. */
975 pending_words = xchg(&vcpu_info->evtchn_pending_sel, 0);
976 while (pending_words != 0) {
977 unsigned long pending_bits;
978 int word_idx = __ffs(pending_words);
979 pending_words &= ~(1UL << word_idx);
981 while ((pending_bits = active_evtchns(cpu, s, word_idx)) != 0) {
982 int bit_idx = __ffs(pending_bits);
983 int port = (word_idx * BITS_PER_LONG) + bit_idx;
984 int irq = evtchn_to_irq[port];
985 struct irq_desc *desc;
988 desc = irq_to_desc(irq);
990 generic_handle_irq_desc(irq, desc);
995 BUG_ON(!irqs_disabled());
997 count = __get_cpu_var(xed_nesting_count);
998 __get_cpu_var(xed_nesting_count) = 0;
999 } while (count != 1 || vcpu_info->evtchn_upcall_pending);
1006 void xen_evtchn_do_upcall(struct pt_regs *regs)
1008 struct pt_regs *old_regs = set_irq_regs(regs);
1013 __xen_evtchn_do_upcall();
1016 set_irq_regs(old_regs);
1019 void xen_hvm_evtchn_do_upcall(void)
1021 __xen_evtchn_do_upcall();
1023 EXPORT_SYMBOL_GPL(xen_hvm_evtchn_do_upcall);
1025 /* Rebind a new event channel to an existing irq. */
1026 void rebind_evtchn_irq(int evtchn, int irq)
1028 struct irq_info *info = info_for_irq(irq);
1030 /* Make sure the irq is masked, since the new event channel
1031 will also be masked. */
1034 spin_lock(&irq_mapping_update_lock);
1036 /* After resume the irq<->evtchn mappings are all cleared out */
1037 BUG_ON(evtchn_to_irq[evtchn] != -1);
1038 /* Expect irq to have been bound before,
1039 so there should be a proper type */
1040 BUG_ON(info->type == IRQT_UNBOUND);
1042 evtchn_to_irq[evtchn] = irq;
1043 irq_info[irq] = mk_evtchn_info(evtchn);
1045 spin_unlock(&irq_mapping_update_lock);
1047 /* new event channels are always bound to cpu 0 */
1048 irq_set_affinity(irq, cpumask_of(0));
1050 /* Unmask the event channel. */
1054 /* Rebind an evtchn so that it gets delivered to a specific cpu */
1055 static int rebind_irq_to_cpu(unsigned irq, unsigned tcpu)
1057 struct evtchn_bind_vcpu bind_vcpu;
1058 int evtchn = evtchn_from_irq(irq);
1060 /* events delivered via platform PCI interrupts are always
1061 * routed to vcpu 0 */
1062 if (!VALID_EVTCHN(evtchn) ||
1063 (xen_hvm_domain() && !xen_have_vector_callback))
1066 /* Send future instances of this interrupt to other vcpu. */
1067 bind_vcpu.port = evtchn;
1068 bind_vcpu.vcpu = tcpu;
1071 * If this fails, it usually just indicates that we're dealing with a
1072 * virq or IPI channel, which don't actually need to be rebound. Ignore
1073 * it, but don't do the xenlinux-level rebind in that case.
1075 if (HYPERVISOR_event_channel_op(EVTCHNOP_bind_vcpu, &bind_vcpu) >= 0)
1076 bind_evtchn_to_cpu(evtchn, tcpu);
1081 static int set_affinity_irq(unsigned irq, const struct cpumask *dest)
1083 unsigned tcpu = cpumask_first(dest);
1085 return rebind_irq_to_cpu(irq, tcpu);
1088 int resend_irq_on_evtchn(unsigned int irq)
1090 int masked, evtchn = evtchn_from_irq(irq);
1091 struct shared_info *s = HYPERVISOR_shared_info;
1093 if (!VALID_EVTCHN(evtchn))
1096 masked = sync_test_and_set_bit(evtchn, s->evtchn_mask);
1097 sync_set_bit(evtchn, s->evtchn_pending);
1099 unmask_evtchn(evtchn);
1104 static void enable_dynirq(unsigned int irq)
1106 int evtchn = evtchn_from_irq(irq);
1108 if (VALID_EVTCHN(evtchn))
1109 unmask_evtchn(evtchn);
1112 static void disable_dynirq(unsigned int irq)
1114 int evtchn = evtchn_from_irq(irq);
1116 if (VALID_EVTCHN(evtchn))
1117 mask_evtchn(evtchn);
1120 static void ack_dynirq(unsigned int irq)
1122 int evtchn = evtchn_from_irq(irq);
1124 move_native_irq(irq);
1126 if (VALID_EVTCHN(evtchn))
1127 clear_evtchn(evtchn);
1130 static int retrigger_dynirq(unsigned int irq)
1132 int evtchn = evtchn_from_irq(irq);
1133 struct shared_info *sh = HYPERVISOR_shared_info;
1136 if (VALID_EVTCHN(evtchn)) {
1139 masked = sync_test_and_set_bit(evtchn, sh->evtchn_mask);
1140 sync_set_bit(evtchn, sh->evtchn_pending);
1142 unmask_evtchn(evtchn);
1149 static void restore_cpu_virqs(unsigned int cpu)
1151 struct evtchn_bind_virq bind_virq;
1152 int virq, irq, evtchn;
1154 for (virq = 0; virq < NR_VIRQS; virq++) {
1155 if ((irq = per_cpu(virq_to_irq, cpu)[virq]) == -1)
1158 BUG_ON(virq_from_irq(irq) != virq);
1160 /* Get a new binding from Xen. */
1161 bind_virq.virq = virq;
1162 bind_virq.vcpu = cpu;
1163 if (HYPERVISOR_event_channel_op(EVTCHNOP_bind_virq,
1166 evtchn = bind_virq.port;
1168 /* Record the new mapping. */
1169 evtchn_to_irq[evtchn] = irq;
1170 irq_info[irq] = mk_virq_info(evtchn, virq);
1171 bind_evtchn_to_cpu(evtchn, cpu);
1173 /* Ready for use. */
1174 unmask_evtchn(evtchn);
1178 static void restore_cpu_ipis(unsigned int cpu)
1180 struct evtchn_bind_ipi bind_ipi;
1181 int ipi, irq, evtchn;
1183 for (ipi = 0; ipi < XEN_NR_IPIS; ipi++) {
1184 if ((irq = per_cpu(ipi_to_irq, cpu)[ipi]) == -1)
1187 BUG_ON(ipi_from_irq(irq) != ipi);
1189 /* Get a new binding from Xen. */
1190 bind_ipi.vcpu = cpu;
1191 if (HYPERVISOR_event_channel_op(EVTCHNOP_bind_ipi,
1194 evtchn = bind_ipi.port;
1196 /* Record the new mapping. */
1197 evtchn_to_irq[evtchn] = irq;
1198 irq_info[irq] = mk_ipi_info(evtchn, ipi);
1199 bind_evtchn_to_cpu(evtchn, cpu);
1201 /* Ready for use. */
1202 unmask_evtchn(evtchn);
1207 /* Clear an irq's pending state, in preparation for polling on it */
1208 void xen_clear_irq_pending(int irq)
1210 int evtchn = evtchn_from_irq(irq);
1212 if (VALID_EVTCHN(evtchn))
1213 clear_evtchn(evtchn);
1215 EXPORT_SYMBOL(xen_clear_irq_pending);
1216 void xen_set_irq_pending(int irq)
1218 int evtchn = evtchn_from_irq(irq);
1220 if (VALID_EVTCHN(evtchn))
1224 bool xen_test_irq_pending(int irq)
1226 int evtchn = evtchn_from_irq(irq);
1229 if (VALID_EVTCHN(evtchn))
1230 ret = test_evtchn(evtchn);
1235 /* Poll waiting for an irq to become pending with timeout. In the usual case,
1236 * the irq will be disabled so it won't deliver an interrupt. */
1237 void xen_poll_irq_timeout(int irq, u64 timeout)
1239 evtchn_port_t evtchn = evtchn_from_irq(irq);
1241 if (VALID_EVTCHN(evtchn)) {
1242 struct sched_poll poll;
1245 poll.timeout = timeout;
1246 set_xen_guest_handle(poll.ports, &evtchn);
1248 if (HYPERVISOR_sched_op(SCHEDOP_poll, &poll) != 0)
1252 EXPORT_SYMBOL(xen_poll_irq_timeout);
1253 /* Poll waiting for an irq to become pending. In the usual case, the
1254 * irq will be disabled so it won't deliver an interrupt. */
1255 void xen_poll_irq(int irq)
1257 xen_poll_irq_timeout(irq, 0 /* no timeout */);
1260 void xen_irq_resume(void)
1262 unsigned int cpu, irq, evtchn;
1264 init_evtchn_cpu_bindings();
1266 /* New event-channel space is not 'live' yet. */
1267 for (evtchn = 0; evtchn < NR_EVENT_CHANNELS; evtchn++)
1268 mask_evtchn(evtchn);
1270 /* No IRQ <-> event-channel mappings. */
1271 for (irq = 0; irq < nr_irqs; irq++)
1272 irq_info[irq].evtchn = 0; /* zap event-channel binding */
1274 for (evtchn = 0; evtchn < NR_EVENT_CHANNELS; evtchn++)
1275 evtchn_to_irq[evtchn] = -1;
1277 for_each_possible_cpu(cpu) {
1278 restore_cpu_virqs(cpu);
1279 restore_cpu_ipis(cpu);
1283 static struct irq_chip xen_dynamic_chip __read_mostly = {
1286 .disable = disable_dynirq,
1287 .mask = disable_dynirq,
1288 .unmask = enable_dynirq,
1291 .set_affinity = set_affinity_irq,
1292 .retrigger = retrigger_dynirq,
1295 static struct irq_chip xen_pirq_chip __read_mostly = {
1298 .startup = startup_pirq,
1299 .shutdown = shutdown_pirq,
1301 .enable = enable_pirq,
1302 .unmask = enable_pirq,
1304 .disable = disable_pirq,
1305 .mask = disable_pirq,
1310 .set_affinity = set_affinity_irq,
1312 .retrigger = retrigger_dynirq,
1315 static struct irq_chip xen_percpu_chip __read_mostly = {
1316 .name = "xen-percpu",
1318 .disable = disable_dynirq,
1319 .mask = disable_dynirq,
1320 .unmask = enable_dynirq,
1325 int xen_set_callback_via(uint64_t via)
1327 struct xen_hvm_param a;
1328 a.domid = DOMID_SELF;
1329 a.index = HVM_PARAM_CALLBACK_IRQ;
1331 return HYPERVISOR_hvm_op(HVMOP_set_param, &a);
1333 EXPORT_SYMBOL_GPL(xen_set_callback_via);
1335 #ifdef CONFIG_XEN_PVHVM
1336 /* Vector callbacks are better than PCI interrupts to receive event
1337 * channel notifications because we can receive vector callbacks on any
1338 * vcpu and we don't need PCI support or APIC interactions. */
1339 void xen_callback_vector(void)
1342 uint64_t callback_via;
1343 if (xen_have_vector_callback) {
1344 callback_via = HVM_CALLBACK_VECTOR(XEN_HVM_EVTCHN_CALLBACK);
1345 rc = xen_set_callback_via(callback_via);
1347 printk(KERN_ERR "Request for Xen HVM callback vector"
1349 xen_have_vector_callback = 0;
1352 printk(KERN_INFO "Xen HVM callback vector for event delivery is "
1354 /* in the restore case the vector has already been allocated */
1355 if (!test_bit(XEN_HVM_EVTCHN_CALLBACK, used_vectors))
1356 alloc_intr_gate(XEN_HVM_EVTCHN_CALLBACK, xen_hvm_callback_vector);
1360 void xen_callback_vector(void) {}
1363 void __init xen_init_IRQ(void)
1366 struct physdev_nr_pirqs op_nr_pirqs;
1368 cpu_evtchn_mask_p = kcalloc(nr_cpu_ids, sizeof(struct cpu_evtchn_s),
1370 irq_info = kcalloc(nr_irqs, sizeof(*irq_info), GFP_KERNEL);
1372 rc = HYPERVISOR_physdev_op(PHYSDEVOP_get_nr_pirqs, &op_nr_pirqs);
1376 printk(KERN_WARNING "PHYSDEVOP_get_nr_pirqs returned rc=%d\n", rc);
1378 if (xen_pv_domain() && !xen_initial_domain())
1379 nr_pirqs = max((int)op_nr_pirqs.nr_pirqs, nr_irqs);
1381 nr_pirqs = op_nr_pirqs.nr_pirqs;
1383 pirq_to_irq = kcalloc(nr_pirqs, sizeof(*pirq_to_irq), GFP_KERNEL);
1384 for (i = 0; i < nr_pirqs; i++)
1385 pirq_to_irq[i] = -1;
1387 evtchn_to_irq = kcalloc(NR_EVENT_CHANNELS, sizeof(*evtchn_to_irq),
1389 for (i = 0; i < NR_EVENT_CHANNELS; i++)
1390 evtchn_to_irq[i] = -1;
1392 init_evtchn_cpu_bindings();
1394 /* No event channels are 'live' right now. */
1395 for (i = 0; i < NR_EVENT_CHANNELS; i++)
1398 if (xen_hvm_domain()) {
1399 xen_callback_vector();
1401 /* pci_xen_hvm_init must be called after native_init_IRQ so that
1402 * __acpi_register_gsi can point at the right function */
1405 irq_ctx_init(smp_processor_id());