2 * Copyright (C) 2012 ARM Ltd.
3 * Author: Marc Zyngier <marc.zyngier@arm.com>
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License version 2 as
7 * published by the Free Software Foundation.
9 * This program is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
14 * You should have received a copy of the GNU General Public License
15 * along with this program; if not, write to the Free Software
16 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
19 #include <linux/cpu.h>
20 #include <linux/kvm.h>
21 #include <linux/kvm_host.h>
22 #include <linux/interrupt.h>
25 #include <linux/of_address.h>
26 #include <linux/of_irq.h>
27 #include <linux/uaccess.h>
29 #include <linux/irqchip/arm-gic.h>
31 #include <asm/kvm_emulate.h>
32 #include <asm/kvm_arm.h>
33 #include <asm/kvm_mmu.h>
36 * How the whole thing works (courtesy of Christoffer Dall):
38 * - At any time, the dist->irq_pending_on_cpu is the oracle that knows if
39 * something is pending
40 * - VGIC pending interrupts are stored on the vgic.irq_pending vgic
41 * bitmap (this bitmap is updated by both user land ioctls and guest
42 * mmio ops, and other in-kernel peripherals such as the
43 * arch. timers) and indicate the 'wire' state.
44 * - Every time the bitmap changes, the irq_pending_on_cpu oracle is
46 * - To calculate the oracle, we need info for each cpu from
47 * compute_pending_for_cpu, which considers:
48 * - PPI: dist->irq_pending & dist->irq_enable
49 * - SPI: dist->irq_pending & dist->irq_enable & dist->irq_spi_target
50 * - irq_spi_target is a 'formatted' version of the GICD_ICFGR
51 * registers, stored on each vcpu. We only keep one bit of
52 * information per interrupt, making sure that only one vcpu can
53 * accept the interrupt.
54 * - The same is true when injecting an interrupt, except that we only
55 * consider a single interrupt at a time. The irq_spi_cpu array
56 * contains the target CPU for each SPI.
58 * The handling of level interrupts adds some extra complexity. We
59 * need to track when the interrupt has been EOIed, so we can sample
60 * the 'line' again. This is achieved as such:
62 * - When a level interrupt is moved onto a vcpu, the corresponding
63 * bit in irq_queued is set. As long as this bit is set, the line
64 * will be ignored for further interrupts. The interrupt is injected
65 * into the vcpu with the GICH_LR_EOI bit set (generate a
66 * maintenance interrupt on EOI).
67 * - When the interrupt is EOIed, the maintenance interrupt fires,
68 * and clears the corresponding bit in irq_queued. This allows the
69 * interrupt line to be sampled again.
70 * - Note that level-triggered interrupts can also be set to pending from
71 * writes to GICD_ISPENDRn and lowering the external input line does not
72 * cause the interrupt to become inactive in such a situation.
73 * Conversely, writes to GICD_ICPENDRn do not cause the interrupt to become
74 * inactive as long as the external input line is held high.
77 #define VGIC_ADDR_UNDEF (-1)
78 #define IS_VGIC_ADDR_UNDEF(_x) ((_x) == VGIC_ADDR_UNDEF)
80 #define PRODUCT_ID_KVM 0x4b /* ASCII code K */
81 #define IMPLEMENTER_ARM 0x43b
82 #define GICC_ARCH_VERSION_V2 0x2
84 #define ACCESS_READ_VALUE (1 << 0)
85 #define ACCESS_READ_RAZ (0 << 0)
86 #define ACCESS_READ_MASK(x) ((x) & (1 << 0))
87 #define ACCESS_WRITE_IGNORED (0 << 1)
88 #define ACCESS_WRITE_SETBIT (1 << 1)
89 #define ACCESS_WRITE_CLEARBIT (2 << 1)
90 #define ACCESS_WRITE_VALUE (3 << 1)
91 #define ACCESS_WRITE_MASK(x) ((x) & (3 << 1))
93 static void vgic_retire_disabled_irqs(struct kvm_vcpu *vcpu);
94 static void vgic_retire_lr(int lr_nr, int irq, struct kvm_vcpu *vcpu);
95 static void vgic_update_state(struct kvm *kvm);
96 static void vgic_kick_vcpus(struct kvm *kvm);
97 static void vgic_dispatch_sgi(struct kvm_vcpu *vcpu, u32 reg);
98 static struct vgic_lr vgic_get_lr(const struct kvm_vcpu *vcpu, int lr);
99 static void vgic_set_lr(struct kvm_vcpu *vcpu, int lr, struct vgic_lr lr_desc);
100 static void vgic_get_vmcr(struct kvm_vcpu *vcpu, struct vgic_vmcr *vmcr);
101 static void vgic_set_vmcr(struct kvm_vcpu *vcpu, struct vgic_vmcr *vmcr);
103 static const struct vgic_ops *vgic_ops;
104 static const struct vgic_params *vgic;
107 * struct vgic_bitmap contains unions that provide two views of
108 * the same data. In one case it is an array of registers of
109 * u32's, and in the other case it is a bitmap of unsigned
112 * This does not work on 64-bit BE systems, because the bitmap access
113 * will store two consecutive 32-bit words with the higher-addressed
114 * register's bits at the lower index and the lower-addressed register's
115 * bits at the higher index.
117 * Therefore, swizzle the register index when accessing the 32-bit word
118 * registers to access the right register's value.
120 #if defined(CONFIG_CPU_BIG_ENDIAN) && BITS_PER_LONG == 64
121 #define REG_OFFSET_SWIZZLE 1
123 #define REG_OFFSET_SWIZZLE 0
126 static u32 *vgic_bitmap_get_reg(struct vgic_bitmap *x,
127 int cpuid, u32 offset)
131 return x->percpu[cpuid].reg + (offset ^ REG_OFFSET_SWIZZLE);
133 return x->shared.reg + ((offset - 1) ^ REG_OFFSET_SWIZZLE);
136 static int vgic_bitmap_get_irq_val(struct vgic_bitmap *x,
139 if (irq < VGIC_NR_PRIVATE_IRQS)
140 return test_bit(irq, x->percpu[cpuid].reg_ul);
142 return test_bit(irq - VGIC_NR_PRIVATE_IRQS, x->shared.reg_ul);
145 static void vgic_bitmap_set_irq_val(struct vgic_bitmap *x, int cpuid,
150 if (irq < VGIC_NR_PRIVATE_IRQS) {
151 reg = x->percpu[cpuid].reg_ul;
153 reg = x->shared.reg_ul;
154 irq -= VGIC_NR_PRIVATE_IRQS;
163 static unsigned long *vgic_bitmap_get_cpu_map(struct vgic_bitmap *x, int cpuid)
165 if (unlikely(cpuid >= VGIC_MAX_CPUS))
167 return x->percpu[cpuid].reg_ul;
170 static unsigned long *vgic_bitmap_get_shared_map(struct vgic_bitmap *x)
172 return x->shared.reg_ul;
175 static u32 *vgic_bytemap_get_reg(struct vgic_bytemap *x, int cpuid, u32 offset)
178 BUG_ON(offset > (VGIC_NR_IRQS / 4));
180 return x->percpu[cpuid] + offset;
182 return x->shared + offset - 8;
185 #define VGIC_CFG_LEVEL 0
186 #define VGIC_CFG_EDGE 1
188 static bool vgic_irq_is_edge(struct kvm_vcpu *vcpu, int irq)
190 struct vgic_dist *dist = &vcpu->kvm->arch.vgic;
193 irq_val = vgic_bitmap_get_irq_val(&dist->irq_cfg, vcpu->vcpu_id, irq);
194 return irq_val == VGIC_CFG_EDGE;
197 static int vgic_irq_is_enabled(struct kvm_vcpu *vcpu, int irq)
199 struct vgic_dist *dist = &vcpu->kvm->arch.vgic;
201 return vgic_bitmap_get_irq_val(&dist->irq_enabled, vcpu->vcpu_id, irq);
204 static int vgic_irq_is_queued(struct kvm_vcpu *vcpu, int irq)
206 struct vgic_dist *dist = &vcpu->kvm->arch.vgic;
208 return vgic_bitmap_get_irq_val(&dist->irq_queued, vcpu->vcpu_id, irq);
211 static void vgic_irq_set_queued(struct kvm_vcpu *vcpu, int irq)
213 struct vgic_dist *dist = &vcpu->kvm->arch.vgic;
215 vgic_bitmap_set_irq_val(&dist->irq_queued, vcpu->vcpu_id, irq, 1);
218 static void vgic_irq_clear_queued(struct kvm_vcpu *vcpu, int irq)
220 struct vgic_dist *dist = &vcpu->kvm->arch.vgic;
222 vgic_bitmap_set_irq_val(&dist->irq_queued, vcpu->vcpu_id, irq, 0);
225 static int vgic_dist_irq_get_level(struct kvm_vcpu *vcpu, int irq)
227 struct vgic_dist *dist = &vcpu->kvm->arch.vgic;
229 return vgic_bitmap_get_irq_val(&dist->irq_level, vcpu->vcpu_id, irq);
232 static void vgic_dist_irq_set_level(struct kvm_vcpu *vcpu, int irq)
234 struct vgic_dist *dist = &vcpu->kvm->arch.vgic;
236 vgic_bitmap_set_irq_val(&dist->irq_level, vcpu->vcpu_id, irq, 1);
239 static void vgic_dist_irq_clear_level(struct kvm_vcpu *vcpu, int irq)
241 struct vgic_dist *dist = &vcpu->kvm->arch.vgic;
243 vgic_bitmap_set_irq_val(&dist->irq_level, vcpu->vcpu_id, irq, 0);
246 static int vgic_dist_irq_soft_pend(struct kvm_vcpu *vcpu, int irq)
248 struct vgic_dist *dist = &vcpu->kvm->arch.vgic;
250 return vgic_bitmap_get_irq_val(&dist->irq_soft_pend, vcpu->vcpu_id, irq);
253 static void vgic_dist_irq_clear_soft_pend(struct kvm_vcpu *vcpu, int irq)
255 struct vgic_dist *dist = &vcpu->kvm->arch.vgic;
257 vgic_bitmap_set_irq_val(&dist->irq_soft_pend, vcpu->vcpu_id, irq, 0);
260 static int vgic_dist_irq_is_pending(struct kvm_vcpu *vcpu, int irq)
262 struct vgic_dist *dist = &vcpu->kvm->arch.vgic;
264 return vgic_bitmap_get_irq_val(&dist->irq_pending, vcpu->vcpu_id, irq);
267 static void vgic_dist_irq_set_pending(struct kvm_vcpu *vcpu, int irq)
269 struct vgic_dist *dist = &vcpu->kvm->arch.vgic;
271 vgic_bitmap_set_irq_val(&dist->irq_pending, vcpu->vcpu_id, irq, 1);
274 static void vgic_dist_irq_clear_pending(struct kvm_vcpu *vcpu, int irq)
276 struct vgic_dist *dist = &vcpu->kvm->arch.vgic;
278 vgic_bitmap_set_irq_val(&dist->irq_pending, vcpu->vcpu_id, irq, 0);
281 static void vgic_cpu_irq_set(struct kvm_vcpu *vcpu, int irq)
283 if (irq < VGIC_NR_PRIVATE_IRQS)
284 set_bit(irq, vcpu->arch.vgic_cpu.pending_percpu);
286 set_bit(irq - VGIC_NR_PRIVATE_IRQS,
287 vcpu->arch.vgic_cpu.pending_shared);
290 static void vgic_cpu_irq_clear(struct kvm_vcpu *vcpu, int irq)
292 if (irq < VGIC_NR_PRIVATE_IRQS)
293 clear_bit(irq, vcpu->arch.vgic_cpu.pending_percpu);
295 clear_bit(irq - VGIC_NR_PRIVATE_IRQS,
296 vcpu->arch.vgic_cpu.pending_shared);
299 static bool vgic_can_sample_irq(struct kvm_vcpu *vcpu, int irq)
301 return vgic_irq_is_edge(vcpu, irq) || !vgic_irq_is_queued(vcpu, irq);
304 static u32 mmio_data_read(struct kvm_exit_mmio *mmio, u32 mask)
306 return le32_to_cpu(*((u32 *)mmio->data)) & mask;
309 static void mmio_data_write(struct kvm_exit_mmio *mmio, u32 mask, u32 value)
311 *((u32 *)mmio->data) = cpu_to_le32(value) & mask;
315 * vgic_reg_access - access vgic register
316 * @mmio: pointer to the data describing the mmio access
317 * @reg: pointer to the virtual backing of vgic distributor data
318 * @offset: least significant 2 bits used for word offset
319 * @mode: ACCESS_ mode (see defines above)
321 * Helper to make vgic register access easier using one of the access
322 * modes defined for vgic register access
323 * (read,raz,write-ignored,setbit,clearbit,write)
325 static void vgic_reg_access(struct kvm_exit_mmio *mmio, u32 *reg,
326 phys_addr_t offset, int mode)
328 int word_offset = (offset & 3) * 8;
329 u32 mask = (1UL << (mmio->len * 8)) - 1;
333 * Any alignment fault should have been delivered to the guest
334 * directly (ARM ARM B3.12.7 "Prioritization of aborts").
340 BUG_ON(mode != (ACCESS_READ_RAZ | ACCESS_WRITE_IGNORED));
344 if (mmio->is_write) {
345 u32 data = mmio_data_read(mmio, mask) << word_offset;
346 switch (ACCESS_WRITE_MASK(mode)) {
347 case ACCESS_WRITE_IGNORED:
350 case ACCESS_WRITE_SETBIT:
354 case ACCESS_WRITE_CLEARBIT:
358 case ACCESS_WRITE_VALUE:
359 regval = (regval & ~(mask << word_offset)) | data;
364 switch (ACCESS_READ_MASK(mode)) {
365 case ACCESS_READ_RAZ:
369 case ACCESS_READ_VALUE:
370 mmio_data_write(mmio, mask, regval >> word_offset);
375 static bool handle_mmio_misc(struct kvm_vcpu *vcpu,
376 struct kvm_exit_mmio *mmio, phys_addr_t offset)
379 u32 word_offset = offset & 3;
381 switch (offset & ~3) {
382 case 0: /* GICD_CTLR */
383 reg = vcpu->kvm->arch.vgic.enabled;
384 vgic_reg_access(mmio, ®, word_offset,
385 ACCESS_READ_VALUE | ACCESS_WRITE_VALUE);
386 if (mmio->is_write) {
387 vcpu->kvm->arch.vgic.enabled = reg & 1;
388 vgic_update_state(vcpu->kvm);
393 case 4: /* GICD_TYPER */
394 reg = (atomic_read(&vcpu->kvm->online_vcpus) - 1) << 5;
395 reg |= (VGIC_NR_IRQS >> 5) - 1;
396 vgic_reg_access(mmio, ®, word_offset,
397 ACCESS_READ_VALUE | ACCESS_WRITE_IGNORED);
400 case 8: /* GICD_IIDR */
401 reg = (PRODUCT_ID_KVM << 24) | (IMPLEMENTER_ARM << 0);
402 vgic_reg_access(mmio, ®, word_offset,
403 ACCESS_READ_VALUE | ACCESS_WRITE_IGNORED);
410 static bool handle_mmio_raz_wi(struct kvm_vcpu *vcpu,
411 struct kvm_exit_mmio *mmio, phys_addr_t offset)
413 vgic_reg_access(mmio, NULL, offset,
414 ACCESS_READ_RAZ | ACCESS_WRITE_IGNORED);
418 static bool handle_mmio_set_enable_reg(struct kvm_vcpu *vcpu,
419 struct kvm_exit_mmio *mmio,
422 u32 *reg = vgic_bitmap_get_reg(&vcpu->kvm->arch.vgic.irq_enabled,
423 vcpu->vcpu_id, offset);
424 vgic_reg_access(mmio, reg, offset,
425 ACCESS_READ_VALUE | ACCESS_WRITE_SETBIT);
426 if (mmio->is_write) {
427 vgic_update_state(vcpu->kvm);
434 static bool handle_mmio_clear_enable_reg(struct kvm_vcpu *vcpu,
435 struct kvm_exit_mmio *mmio,
438 u32 *reg = vgic_bitmap_get_reg(&vcpu->kvm->arch.vgic.irq_enabled,
439 vcpu->vcpu_id, offset);
440 vgic_reg_access(mmio, reg, offset,
441 ACCESS_READ_VALUE | ACCESS_WRITE_CLEARBIT);
442 if (mmio->is_write) {
443 if (offset < 4) /* Force SGI enabled */
445 vgic_retire_disabled_irqs(vcpu);
446 vgic_update_state(vcpu->kvm);
453 static bool handle_mmio_set_pending_reg(struct kvm_vcpu *vcpu,
454 struct kvm_exit_mmio *mmio,
459 struct vgic_dist *dist = &vcpu->kvm->arch.vgic;
461 reg = vgic_bitmap_get_reg(&dist->irq_cfg, vcpu->vcpu_id, offset);
462 level_mask = (~(*reg));
464 /* Mark both level and edge triggered irqs as pending */
465 reg = vgic_bitmap_get_reg(&dist->irq_pending, vcpu->vcpu_id, offset);
466 vgic_reg_access(mmio, reg, offset,
467 ACCESS_READ_VALUE | ACCESS_WRITE_SETBIT);
469 if (mmio->is_write) {
470 /* Set the soft-pending flag only for level-triggered irqs */
471 reg = vgic_bitmap_get_reg(&dist->irq_soft_pend,
472 vcpu->vcpu_id, offset);
473 vgic_reg_access(mmio, reg, offset,
474 ACCESS_READ_VALUE | ACCESS_WRITE_SETBIT);
477 vgic_update_state(vcpu->kvm);
484 static bool handle_mmio_clear_pending_reg(struct kvm_vcpu *vcpu,
485 struct kvm_exit_mmio *mmio,
490 struct vgic_dist *dist = &vcpu->kvm->arch.vgic;
492 reg = vgic_bitmap_get_reg(&dist->irq_pending, vcpu->vcpu_id, offset);
493 vgic_reg_access(mmio, reg, offset,
494 ACCESS_READ_VALUE | ACCESS_WRITE_CLEARBIT);
495 if (mmio->is_write) {
496 /* Re-set level triggered level-active interrupts */
497 level_active = vgic_bitmap_get_reg(&dist->irq_level,
498 vcpu->vcpu_id, offset);
499 reg = vgic_bitmap_get_reg(&dist->irq_pending,
500 vcpu->vcpu_id, offset);
501 *reg |= *level_active;
503 /* Clear soft-pending flags */
504 reg = vgic_bitmap_get_reg(&dist->irq_soft_pend,
505 vcpu->vcpu_id, offset);
506 vgic_reg_access(mmio, reg, offset,
507 ACCESS_READ_VALUE | ACCESS_WRITE_CLEARBIT);
509 vgic_update_state(vcpu->kvm);
516 static bool handle_mmio_priority_reg(struct kvm_vcpu *vcpu,
517 struct kvm_exit_mmio *mmio,
520 u32 *reg = vgic_bytemap_get_reg(&vcpu->kvm->arch.vgic.irq_priority,
521 vcpu->vcpu_id, offset);
522 vgic_reg_access(mmio, reg, offset,
523 ACCESS_READ_VALUE | ACCESS_WRITE_VALUE);
527 #define GICD_ITARGETSR_SIZE 32
528 #define GICD_CPUTARGETS_BITS 8
529 #define GICD_IRQS_PER_ITARGETSR (GICD_ITARGETSR_SIZE / GICD_CPUTARGETS_BITS)
530 static u32 vgic_get_target_reg(struct kvm *kvm, int irq)
532 struct vgic_dist *dist = &kvm->arch.vgic;
536 irq -= VGIC_NR_PRIVATE_IRQS;
538 for (i = 0; i < GICD_IRQS_PER_ITARGETSR; i++)
539 val |= 1 << (dist->irq_spi_cpu[irq + i] + i * 8);
544 static void vgic_set_target_reg(struct kvm *kvm, u32 val, int irq)
546 struct vgic_dist *dist = &kvm->arch.vgic;
547 struct kvm_vcpu *vcpu;
552 irq -= VGIC_NR_PRIVATE_IRQS;
555 * Pick the LSB in each byte. This ensures we target exactly
556 * one vcpu per IRQ. If the byte is null, assume we target
559 for (i = 0; i < GICD_IRQS_PER_ITARGETSR; i++) {
560 int shift = i * GICD_CPUTARGETS_BITS;
561 target = ffs((val >> shift) & 0xffU);
562 target = target ? (target - 1) : 0;
563 dist->irq_spi_cpu[irq + i] = target;
564 kvm_for_each_vcpu(c, vcpu, kvm) {
565 bmap = vgic_bitmap_get_shared_map(&dist->irq_spi_target[c]);
567 set_bit(irq + i, bmap);
569 clear_bit(irq + i, bmap);
574 static bool handle_mmio_target_reg(struct kvm_vcpu *vcpu,
575 struct kvm_exit_mmio *mmio,
580 /* We treat the banked interrupts targets as read-only */
582 u32 roreg = 1 << vcpu->vcpu_id;
584 roreg |= roreg << 16;
586 vgic_reg_access(mmio, &roreg, offset,
587 ACCESS_READ_VALUE | ACCESS_WRITE_IGNORED);
591 reg = vgic_get_target_reg(vcpu->kvm, offset & ~3U);
592 vgic_reg_access(mmio, ®, offset,
593 ACCESS_READ_VALUE | ACCESS_WRITE_VALUE);
594 if (mmio->is_write) {
595 vgic_set_target_reg(vcpu->kvm, reg, offset & ~3U);
596 vgic_update_state(vcpu->kvm);
603 static u32 vgic_cfg_expand(u16 val)
609 * Turn a 16bit value like abcd...mnop into a 32bit word
610 * a0b0c0d0...m0n0o0p0, which is what the HW cfg register is.
612 for (i = 0; i < 16; i++)
613 res |= ((val >> i) & VGIC_CFG_EDGE) << (2 * i + 1);
618 static u16 vgic_cfg_compress(u32 val)
624 * Turn a 32bit word a0b0c0d0...m0n0o0p0 into 16bit value like
625 * abcd...mnop which is what we really care about.
627 for (i = 0; i < 16; i++)
628 res |= ((val >> (i * 2 + 1)) & VGIC_CFG_EDGE) << i;
634 * The distributor uses 2 bits per IRQ for the CFG register, but the
635 * LSB is always 0. As such, we only keep the upper bit, and use the
636 * two above functions to compress/expand the bits
638 static bool handle_mmio_cfg_reg(struct kvm_vcpu *vcpu,
639 struct kvm_exit_mmio *mmio, phys_addr_t offset)
644 reg = vgic_bitmap_get_reg(&vcpu->kvm->arch.vgic.irq_cfg,
645 vcpu->vcpu_id, offset >> 1);
652 val = vgic_cfg_expand(val);
653 vgic_reg_access(mmio, &val, offset,
654 ACCESS_READ_VALUE | ACCESS_WRITE_VALUE);
655 if (mmio->is_write) {
657 *reg = ~0U; /* Force PPIs/SGIs to 1 */
661 val = vgic_cfg_compress(val);
666 *reg &= 0xffff << 16;
674 static bool handle_mmio_sgi_reg(struct kvm_vcpu *vcpu,
675 struct kvm_exit_mmio *mmio, phys_addr_t offset)
678 vgic_reg_access(mmio, ®, offset,
679 ACCESS_READ_RAZ | ACCESS_WRITE_VALUE);
680 if (mmio->is_write) {
681 vgic_dispatch_sgi(vcpu, reg);
682 vgic_update_state(vcpu->kvm);
690 * vgic_unqueue_irqs - move pending IRQs from LRs to the distributor
691 * @vgic_cpu: Pointer to the vgic_cpu struct holding the LRs
693 * Move any pending IRQs that have already been assigned to LRs back to the
694 * emulated distributor state so that the complete emulated state can be read
695 * from the main emulation structures without investigating the LRs.
697 * Note that IRQs in the active state in the LRs get their pending state moved
698 * to the distributor but the active state stays in the LRs, because we don't
699 * track the active state on the distributor side.
701 static void vgic_unqueue_irqs(struct kvm_vcpu *vcpu)
703 struct vgic_dist *dist = &vcpu->kvm->arch.vgic;
704 struct vgic_cpu *vgic_cpu = &vcpu->arch.vgic_cpu;
705 int vcpu_id = vcpu->vcpu_id;
708 for_each_set_bit(i, vgic_cpu->lr_used, vgic_cpu->nr_lr) {
709 struct vgic_lr lr = vgic_get_lr(vcpu, i);
712 * There are three options for the state bits:
716 * 11: pending and active
718 * If the LR holds only an active interrupt (not pending) then
719 * just leave it alone.
721 if ((lr.state & LR_STATE_MASK) == LR_STATE_ACTIVE)
725 * Reestablish the pending state on the distributor and the
726 * CPU interface. It may have already been pending, but that
727 * is fine, then we are only setting a few bits that were
730 vgic_dist_irq_set_pending(vcpu, lr.irq);
731 if (lr.irq < VGIC_NR_SGIS)
732 dist->irq_sgi_sources[vcpu_id][lr.irq] |= 1 << lr.source;
733 lr.state &= ~LR_STATE_PENDING;
734 vgic_set_lr(vcpu, i, lr);
737 * If there's no state left on the LR (it could still be
738 * active), then the LR does not hold any useful info and can
739 * be marked as free for other use.
741 if (!(lr.state & LR_STATE_MASK)) {
742 vgic_retire_lr(i, lr.irq, vcpu);
743 vgic_irq_clear_queued(vcpu, lr.irq);
746 /* Finally update the VGIC state. */
747 vgic_update_state(vcpu->kvm);
751 /* Handle reads of GICD_CPENDSGIRn and GICD_SPENDSGIRn */
752 static bool read_set_clear_sgi_pend_reg(struct kvm_vcpu *vcpu,
753 struct kvm_exit_mmio *mmio,
756 struct vgic_dist *dist = &vcpu->kvm->arch.vgic;
758 int min_sgi = (offset & ~0x3) * 4;
759 int max_sgi = min_sgi + 3;
760 int vcpu_id = vcpu->vcpu_id;
763 /* Copy source SGIs from distributor side */
764 for (sgi = min_sgi; sgi <= max_sgi; sgi++) {
765 int shift = 8 * (sgi - min_sgi);
766 reg |= (u32)dist->irq_sgi_sources[vcpu_id][sgi] << shift;
769 mmio_data_write(mmio, ~0, reg);
773 static bool write_set_clear_sgi_pend_reg(struct kvm_vcpu *vcpu,
774 struct kvm_exit_mmio *mmio,
775 phys_addr_t offset, bool set)
777 struct vgic_dist *dist = &vcpu->kvm->arch.vgic;
779 int min_sgi = (offset & ~0x3) * 4;
780 int max_sgi = min_sgi + 3;
781 int vcpu_id = vcpu->vcpu_id;
783 bool updated = false;
785 reg = mmio_data_read(mmio, ~0);
787 /* Clear pending SGIs on the distributor */
788 for (sgi = min_sgi; sgi <= max_sgi; sgi++) {
789 u8 mask = reg >> (8 * (sgi - min_sgi));
791 if ((dist->irq_sgi_sources[vcpu_id][sgi] & mask) != mask)
793 dist->irq_sgi_sources[vcpu_id][sgi] |= mask;
795 if (dist->irq_sgi_sources[vcpu_id][sgi] & mask)
797 dist->irq_sgi_sources[vcpu_id][sgi] &= ~mask;
802 vgic_update_state(vcpu->kvm);
807 static bool handle_mmio_sgi_set(struct kvm_vcpu *vcpu,
808 struct kvm_exit_mmio *mmio,
812 return read_set_clear_sgi_pend_reg(vcpu, mmio, offset);
814 return write_set_clear_sgi_pend_reg(vcpu, mmio, offset, true);
817 static bool handle_mmio_sgi_clear(struct kvm_vcpu *vcpu,
818 struct kvm_exit_mmio *mmio,
822 return read_set_clear_sgi_pend_reg(vcpu, mmio, offset);
824 return write_set_clear_sgi_pend_reg(vcpu, mmio, offset, false);
828 * I would have liked to use the kvm_bus_io_*() API instead, but it
829 * cannot cope with banked registers (only the VM pointer is passed
830 * around, and we need the vcpu). One of these days, someone please
836 bool (*handle_mmio)(struct kvm_vcpu *vcpu, struct kvm_exit_mmio *mmio,
840 static const struct mmio_range vgic_dist_ranges[] = {
842 .base = GIC_DIST_CTRL,
844 .handle_mmio = handle_mmio_misc,
847 .base = GIC_DIST_IGROUP,
848 .len = VGIC_NR_IRQS / 8,
849 .handle_mmio = handle_mmio_raz_wi,
852 .base = GIC_DIST_ENABLE_SET,
853 .len = VGIC_NR_IRQS / 8,
854 .handle_mmio = handle_mmio_set_enable_reg,
857 .base = GIC_DIST_ENABLE_CLEAR,
858 .len = VGIC_NR_IRQS / 8,
859 .handle_mmio = handle_mmio_clear_enable_reg,
862 .base = GIC_DIST_PENDING_SET,
863 .len = VGIC_NR_IRQS / 8,
864 .handle_mmio = handle_mmio_set_pending_reg,
867 .base = GIC_DIST_PENDING_CLEAR,
868 .len = VGIC_NR_IRQS / 8,
869 .handle_mmio = handle_mmio_clear_pending_reg,
872 .base = GIC_DIST_ACTIVE_SET,
873 .len = VGIC_NR_IRQS / 8,
874 .handle_mmio = handle_mmio_raz_wi,
877 .base = GIC_DIST_ACTIVE_CLEAR,
878 .len = VGIC_NR_IRQS / 8,
879 .handle_mmio = handle_mmio_raz_wi,
882 .base = GIC_DIST_PRI,
884 .handle_mmio = handle_mmio_priority_reg,
887 .base = GIC_DIST_TARGET,
889 .handle_mmio = handle_mmio_target_reg,
892 .base = GIC_DIST_CONFIG,
893 .len = VGIC_NR_IRQS / 4,
894 .handle_mmio = handle_mmio_cfg_reg,
897 .base = GIC_DIST_SOFTINT,
899 .handle_mmio = handle_mmio_sgi_reg,
902 .base = GIC_DIST_SGI_PENDING_CLEAR,
904 .handle_mmio = handle_mmio_sgi_clear,
907 .base = GIC_DIST_SGI_PENDING_SET,
909 .handle_mmio = handle_mmio_sgi_set,
915 struct mmio_range *find_matching_range(const struct mmio_range *ranges,
916 struct kvm_exit_mmio *mmio,
919 const struct mmio_range *r = ranges;
922 if (offset >= r->base &&
923 (offset + mmio->len) <= (r->base + r->len))
932 * vgic_handle_mmio - handle an in-kernel MMIO access
933 * @vcpu: pointer to the vcpu performing the access
934 * @run: pointer to the kvm_run structure
935 * @mmio: pointer to the data describing the access
937 * returns true if the MMIO access has been performed in kernel space,
938 * and false if it needs to be emulated in user space.
940 bool vgic_handle_mmio(struct kvm_vcpu *vcpu, struct kvm_run *run,
941 struct kvm_exit_mmio *mmio)
943 const struct mmio_range *range;
944 struct vgic_dist *dist = &vcpu->kvm->arch.vgic;
945 unsigned long base = dist->vgic_dist_base;
947 unsigned long offset;
949 if (!irqchip_in_kernel(vcpu->kvm) ||
950 mmio->phys_addr < base ||
951 (mmio->phys_addr + mmio->len) > (base + KVM_VGIC_V2_DIST_SIZE))
954 /* We don't support ldrd / strd or ldm / stm to the emulated vgic */
956 kvm_inject_dabt(vcpu, mmio->phys_addr);
960 offset = mmio->phys_addr - base;
961 range = find_matching_range(vgic_dist_ranges, mmio, offset);
962 if (unlikely(!range || !range->handle_mmio)) {
963 pr_warn("Unhandled access %d %08llx %d\n",
964 mmio->is_write, mmio->phys_addr, mmio->len);
968 spin_lock(&vcpu->kvm->arch.vgic.lock);
969 offset = mmio->phys_addr - range->base - base;
970 updated_state = range->handle_mmio(vcpu, mmio, offset);
971 spin_unlock(&vcpu->kvm->arch.vgic.lock);
972 kvm_prepare_mmio(run, mmio);
973 kvm_handle_mmio_return(vcpu, run);
976 vgic_kick_vcpus(vcpu->kvm);
981 static void vgic_dispatch_sgi(struct kvm_vcpu *vcpu, u32 reg)
983 struct kvm *kvm = vcpu->kvm;
984 struct vgic_dist *dist = &kvm->arch.vgic;
985 int nrcpus = atomic_read(&kvm->online_vcpus);
987 int sgi, mode, c, vcpu_id;
989 vcpu_id = vcpu->vcpu_id;
992 target_cpus = (reg >> 16) & 0xff;
993 mode = (reg >> 24) & 3;
1002 target_cpus = ((1 << nrcpus) - 1) & ~(1 << vcpu_id) & 0xff;
1006 target_cpus = 1 << vcpu_id;
1010 kvm_for_each_vcpu(c, vcpu, kvm) {
1011 if (target_cpus & 1) {
1012 /* Flag the SGI as pending */
1013 vgic_dist_irq_set_pending(vcpu, sgi);
1014 dist->irq_sgi_sources[c][sgi] |= 1 << vcpu_id;
1015 kvm_debug("SGI%d from CPU%d to CPU%d\n", sgi, vcpu_id, c);
1022 static int compute_pending_for_cpu(struct kvm_vcpu *vcpu)
1024 struct vgic_dist *dist = &vcpu->kvm->arch.vgic;
1025 unsigned long *pending, *enabled, *pend_percpu, *pend_shared;
1026 unsigned long pending_private, pending_shared;
1029 vcpu_id = vcpu->vcpu_id;
1030 pend_percpu = vcpu->arch.vgic_cpu.pending_percpu;
1031 pend_shared = vcpu->arch.vgic_cpu.pending_shared;
1033 pending = vgic_bitmap_get_cpu_map(&dist->irq_pending, vcpu_id);
1034 enabled = vgic_bitmap_get_cpu_map(&dist->irq_enabled, vcpu_id);
1035 bitmap_and(pend_percpu, pending, enabled, VGIC_NR_PRIVATE_IRQS);
1037 pending = vgic_bitmap_get_shared_map(&dist->irq_pending);
1038 enabled = vgic_bitmap_get_shared_map(&dist->irq_enabled);
1039 bitmap_and(pend_shared, pending, enabled, VGIC_NR_SHARED_IRQS);
1040 bitmap_and(pend_shared, pend_shared,
1041 vgic_bitmap_get_shared_map(&dist->irq_spi_target[vcpu_id]),
1042 VGIC_NR_SHARED_IRQS);
1044 pending_private = find_first_bit(pend_percpu, VGIC_NR_PRIVATE_IRQS);
1045 pending_shared = find_first_bit(pend_shared, VGIC_NR_SHARED_IRQS);
1046 return (pending_private < VGIC_NR_PRIVATE_IRQS ||
1047 pending_shared < VGIC_NR_SHARED_IRQS);
1051 * Update the interrupt state and determine which CPUs have pending
1052 * interrupts. Must be called with distributor lock held.
1054 static void vgic_update_state(struct kvm *kvm)
1056 struct vgic_dist *dist = &kvm->arch.vgic;
1057 struct kvm_vcpu *vcpu;
1060 if (!dist->enabled) {
1061 set_bit(0, &dist->irq_pending_on_cpu);
1065 kvm_for_each_vcpu(c, vcpu, kvm) {
1066 if (compute_pending_for_cpu(vcpu)) {
1067 pr_debug("CPU%d has pending interrupts\n", c);
1068 set_bit(c, &dist->irq_pending_on_cpu);
1073 static struct vgic_lr vgic_get_lr(const struct kvm_vcpu *vcpu, int lr)
1075 return vgic_ops->get_lr(vcpu, lr);
1078 static void vgic_set_lr(struct kvm_vcpu *vcpu, int lr,
1081 vgic_ops->set_lr(vcpu, lr, vlr);
1084 static void vgic_sync_lr_elrsr(struct kvm_vcpu *vcpu, int lr,
1087 vgic_ops->sync_lr_elrsr(vcpu, lr, vlr);
1090 static inline u64 vgic_get_elrsr(struct kvm_vcpu *vcpu)
1092 return vgic_ops->get_elrsr(vcpu);
1095 static inline u64 vgic_get_eisr(struct kvm_vcpu *vcpu)
1097 return vgic_ops->get_eisr(vcpu);
1100 static inline u32 vgic_get_interrupt_status(struct kvm_vcpu *vcpu)
1102 return vgic_ops->get_interrupt_status(vcpu);
1105 static inline void vgic_enable_underflow(struct kvm_vcpu *vcpu)
1107 vgic_ops->enable_underflow(vcpu);
1110 static inline void vgic_disable_underflow(struct kvm_vcpu *vcpu)
1112 vgic_ops->disable_underflow(vcpu);
1115 static inline void vgic_get_vmcr(struct kvm_vcpu *vcpu, struct vgic_vmcr *vmcr)
1117 vgic_ops->get_vmcr(vcpu, vmcr);
1120 static void vgic_set_vmcr(struct kvm_vcpu *vcpu, struct vgic_vmcr *vmcr)
1122 vgic_ops->set_vmcr(vcpu, vmcr);
1125 static inline void vgic_enable(struct kvm_vcpu *vcpu)
1127 vgic_ops->enable(vcpu);
1130 static void vgic_retire_lr(int lr_nr, int irq, struct kvm_vcpu *vcpu)
1132 struct vgic_cpu *vgic_cpu = &vcpu->arch.vgic_cpu;
1133 struct vgic_lr vlr = vgic_get_lr(vcpu, lr_nr);
1136 vgic_set_lr(vcpu, lr_nr, vlr);
1137 clear_bit(lr_nr, vgic_cpu->lr_used);
1138 vgic_cpu->vgic_irq_lr_map[irq] = LR_EMPTY;
1142 * An interrupt may have been disabled after being made pending on the
1143 * CPU interface (the classic case is a timer running while we're
1144 * rebooting the guest - the interrupt would kick as soon as the CPU
1145 * interface gets enabled, with deadly consequences).
1147 * The solution is to examine already active LRs, and check the
1148 * interrupt is still enabled. If not, just retire it.
1150 static void vgic_retire_disabled_irqs(struct kvm_vcpu *vcpu)
1152 struct vgic_cpu *vgic_cpu = &vcpu->arch.vgic_cpu;
1155 for_each_set_bit(lr, vgic_cpu->lr_used, vgic->nr_lr) {
1156 struct vgic_lr vlr = vgic_get_lr(vcpu, lr);
1158 if (!vgic_irq_is_enabled(vcpu, vlr.irq)) {
1159 vgic_retire_lr(lr, vlr.irq, vcpu);
1160 if (vgic_irq_is_queued(vcpu, vlr.irq))
1161 vgic_irq_clear_queued(vcpu, vlr.irq);
1167 * Queue an interrupt to a CPU virtual interface. Return true on success,
1168 * or false if it wasn't possible to queue it.
1170 static bool vgic_queue_irq(struct kvm_vcpu *vcpu, u8 sgi_source_id, int irq)
1172 struct vgic_cpu *vgic_cpu = &vcpu->arch.vgic_cpu;
1176 /* Sanitize the input... */
1177 BUG_ON(sgi_source_id & ~7);
1178 BUG_ON(sgi_source_id && irq >= VGIC_NR_SGIS);
1179 BUG_ON(irq >= VGIC_NR_IRQS);
1181 kvm_debug("Queue IRQ%d\n", irq);
1183 lr = vgic_cpu->vgic_irq_lr_map[irq];
1185 /* Do we have an active interrupt for the same CPUID? */
1186 if (lr != LR_EMPTY) {
1187 vlr = vgic_get_lr(vcpu, lr);
1188 if (vlr.source == sgi_source_id) {
1189 kvm_debug("LR%d piggyback for IRQ%d\n", lr, vlr.irq);
1190 BUG_ON(!test_bit(lr, vgic_cpu->lr_used));
1191 vlr.state |= LR_STATE_PENDING;
1192 vgic_set_lr(vcpu, lr, vlr);
1197 /* Try to use another LR for this interrupt */
1198 lr = find_first_zero_bit((unsigned long *)vgic_cpu->lr_used,
1200 if (lr >= vgic->nr_lr)
1203 kvm_debug("LR%d allocated for IRQ%d %x\n", lr, irq, sgi_source_id);
1204 vgic_cpu->vgic_irq_lr_map[irq] = lr;
1205 set_bit(lr, vgic_cpu->lr_used);
1208 vlr.source = sgi_source_id;
1209 vlr.state = LR_STATE_PENDING;
1210 if (!vgic_irq_is_edge(vcpu, irq))
1211 vlr.state |= LR_EOI_INT;
1213 vgic_set_lr(vcpu, lr, vlr);
1218 static bool vgic_queue_sgi(struct kvm_vcpu *vcpu, int irq)
1220 struct vgic_dist *dist = &vcpu->kvm->arch.vgic;
1221 unsigned long sources;
1222 int vcpu_id = vcpu->vcpu_id;
1225 sources = dist->irq_sgi_sources[vcpu_id][irq];
1227 for_each_set_bit(c, &sources, VGIC_MAX_CPUS) {
1228 if (vgic_queue_irq(vcpu, c, irq))
1229 clear_bit(c, &sources);
1232 dist->irq_sgi_sources[vcpu_id][irq] = sources;
1235 * If the sources bitmap has been cleared it means that we
1236 * could queue all the SGIs onto link registers (see the
1237 * clear_bit above), and therefore we are done with them in
1238 * our emulated gic and can get rid of them.
1241 vgic_dist_irq_clear_pending(vcpu, irq);
1242 vgic_cpu_irq_clear(vcpu, irq);
1249 static bool vgic_queue_hwirq(struct kvm_vcpu *vcpu, int irq)
1251 if (!vgic_can_sample_irq(vcpu, irq))
1252 return true; /* level interrupt, already queued */
1254 if (vgic_queue_irq(vcpu, 0, irq)) {
1255 if (vgic_irq_is_edge(vcpu, irq)) {
1256 vgic_dist_irq_clear_pending(vcpu, irq);
1257 vgic_cpu_irq_clear(vcpu, irq);
1259 vgic_irq_set_queued(vcpu, irq);
1269 * Fill the list registers with pending interrupts before running the
1272 static void __kvm_vgic_flush_hwstate(struct kvm_vcpu *vcpu)
1274 struct vgic_cpu *vgic_cpu = &vcpu->arch.vgic_cpu;
1275 struct vgic_dist *dist = &vcpu->kvm->arch.vgic;
1279 vcpu_id = vcpu->vcpu_id;
1282 * We may not have any pending interrupt, or the interrupts
1283 * may have been serviced from another vcpu. In all cases,
1286 if (!kvm_vgic_vcpu_pending_irq(vcpu)) {
1287 pr_debug("CPU%d has no pending interrupt\n", vcpu_id);
1292 for_each_set_bit(i, vgic_cpu->pending_percpu, VGIC_NR_SGIS) {
1293 if (!vgic_queue_sgi(vcpu, i))
1298 for_each_set_bit_from(i, vgic_cpu->pending_percpu, VGIC_NR_PRIVATE_IRQS) {
1299 if (!vgic_queue_hwirq(vcpu, i))
1304 for_each_set_bit(i, vgic_cpu->pending_shared, VGIC_NR_SHARED_IRQS) {
1305 if (!vgic_queue_hwirq(vcpu, i + VGIC_NR_PRIVATE_IRQS))
1311 vgic_enable_underflow(vcpu);
1313 vgic_disable_underflow(vcpu);
1315 * We're about to run this VCPU, and we've consumed
1316 * everything the distributor had in store for
1317 * us. Claim we don't have anything pending. We'll
1318 * adjust that if needed while exiting.
1320 clear_bit(vcpu_id, &dist->irq_pending_on_cpu);
1324 static bool vgic_process_maintenance(struct kvm_vcpu *vcpu)
1326 u32 status = vgic_get_interrupt_status(vcpu);
1327 bool level_pending = false;
1329 kvm_debug("STATUS = %08x\n", status);
1331 if (status & INT_STATUS_EOI) {
1333 * Some level interrupts have been EOIed. Clear their
1336 u64 eisr = vgic_get_eisr(vcpu);
1337 unsigned long *eisr_ptr = (unsigned long *)&eisr;
1340 for_each_set_bit(lr, eisr_ptr, vgic->nr_lr) {
1341 struct vgic_lr vlr = vgic_get_lr(vcpu, lr);
1342 WARN_ON(vgic_irq_is_edge(vcpu, vlr.irq));
1344 vgic_irq_clear_queued(vcpu, vlr.irq);
1345 WARN_ON(vlr.state & LR_STATE_MASK);
1347 vgic_set_lr(vcpu, lr, vlr);
1350 * If the IRQ was EOIed it was also ACKed and we we
1351 * therefore assume we can clear the soft pending
1352 * state (should it had been set) for this interrupt.
1354 * Note: if the IRQ soft pending state was set after
1355 * the IRQ was acked, it actually shouldn't be
1356 * cleared, but we have no way of knowing that unless
1357 * we start trapping ACKs when the soft-pending state
1360 vgic_dist_irq_clear_soft_pend(vcpu, vlr.irq);
1362 /* Any additional pending interrupt? */
1363 if (vgic_dist_irq_get_level(vcpu, vlr.irq)) {
1364 vgic_cpu_irq_set(vcpu, vlr.irq);
1365 level_pending = true;
1367 vgic_dist_irq_clear_pending(vcpu, vlr.irq);
1368 vgic_cpu_irq_clear(vcpu, vlr.irq);
1372 * Despite being EOIed, the LR may not have
1373 * been marked as empty.
1375 vgic_sync_lr_elrsr(vcpu, lr, vlr);
1379 if (status & INT_STATUS_UNDERFLOW)
1380 vgic_disable_underflow(vcpu);
1382 return level_pending;
1386 * Sync back the VGIC state after a guest run. The distributor lock is
1387 * needed so we don't get preempted in the middle of the state processing.
1389 static void __kvm_vgic_sync_hwstate(struct kvm_vcpu *vcpu)
1391 struct vgic_cpu *vgic_cpu = &vcpu->arch.vgic_cpu;
1392 struct vgic_dist *dist = &vcpu->kvm->arch.vgic;
1394 unsigned long *elrsr_ptr;
1398 level_pending = vgic_process_maintenance(vcpu);
1399 elrsr = vgic_get_elrsr(vcpu);
1400 elrsr_ptr = (unsigned long *)&elrsr;
1402 /* Clear mappings for empty LRs */
1403 for_each_set_bit(lr, elrsr_ptr, vgic->nr_lr) {
1406 if (!test_and_clear_bit(lr, vgic_cpu->lr_used))
1409 vlr = vgic_get_lr(vcpu, lr);
1411 BUG_ON(vlr.irq >= VGIC_NR_IRQS);
1412 vgic_cpu->vgic_irq_lr_map[vlr.irq] = LR_EMPTY;
1415 /* Check if we still have something up our sleeve... */
1416 pending = find_first_zero_bit(elrsr_ptr, vgic->nr_lr);
1417 if (level_pending || pending < vgic->nr_lr)
1418 set_bit(vcpu->vcpu_id, &dist->irq_pending_on_cpu);
1421 void kvm_vgic_flush_hwstate(struct kvm_vcpu *vcpu)
1423 struct vgic_dist *dist = &vcpu->kvm->arch.vgic;
1425 if (!irqchip_in_kernel(vcpu->kvm))
1428 spin_lock(&dist->lock);
1429 __kvm_vgic_flush_hwstate(vcpu);
1430 spin_unlock(&dist->lock);
1433 void kvm_vgic_sync_hwstate(struct kvm_vcpu *vcpu)
1435 struct vgic_dist *dist = &vcpu->kvm->arch.vgic;
1437 if (!irqchip_in_kernel(vcpu->kvm))
1440 spin_lock(&dist->lock);
1441 __kvm_vgic_sync_hwstate(vcpu);
1442 spin_unlock(&dist->lock);
1445 int kvm_vgic_vcpu_pending_irq(struct kvm_vcpu *vcpu)
1447 struct vgic_dist *dist = &vcpu->kvm->arch.vgic;
1449 if (!irqchip_in_kernel(vcpu->kvm))
1452 return test_bit(vcpu->vcpu_id, &dist->irq_pending_on_cpu);
1455 static void vgic_kick_vcpus(struct kvm *kvm)
1457 struct kvm_vcpu *vcpu;
1461 * We've injected an interrupt, time to find out who deserves
1464 kvm_for_each_vcpu(c, vcpu, kvm) {
1465 if (kvm_vgic_vcpu_pending_irq(vcpu))
1466 kvm_vcpu_kick(vcpu);
1470 static int vgic_validate_injection(struct kvm_vcpu *vcpu, int irq, int level)
1472 int edge_triggered = vgic_irq_is_edge(vcpu, irq);
1475 * Only inject an interrupt if:
1476 * - edge triggered and we have a rising edge
1477 * - level triggered and we change level
1479 if (edge_triggered) {
1480 int state = vgic_dist_irq_is_pending(vcpu, irq);
1481 return level > state;
1483 int state = vgic_dist_irq_get_level(vcpu, irq);
1484 return level != state;
1488 static bool vgic_update_irq_pending(struct kvm *kvm, int cpuid,
1489 unsigned int irq_num, bool level)
1491 struct vgic_dist *dist = &kvm->arch.vgic;
1492 struct kvm_vcpu *vcpu;
1493 int edge_triggered, level_triggered;
1497 spin_lock(&dist->lock);
1499 vcpu = kvm_get_vcpu(kvm, cpuid);
1500 edge_triggered = vgic_irq_is_edge(vcpu, irq_num);
1501 level_triggered = !edge_triggered;
1503 if (!vgic_validate_injection(vcpu, irq_num, level)) {
1508 if (irq_num >= VGIC_NR_PRIVATE_IRQS) {
1509 cpuid = dist->irq_spi_cpu[irq_num - VGIC_NR_PRIVATE_IRQS];
1510 vcpu = kvm_get_vcpu(kvm, cpuid);
1513 kvm_debug("Inject IRQ%d level %d CPU%d\n", irq_num, level, cpuid);
1516 if (level_triggered)
1517 vgic_dist_irq_set_level(vcpu, irq_num);
1518 vgic_dist_irq_set_pending(vcpu, irq_num);
1520 if (level_triggered) {
1521 vgic_dist_irq_clear_level(vcpu, irq_num);
1522 if (!vgic_dist_irq_soft_pend(vcpu, irq_num))
1523 vgic_dist_irq_clear_pending(vcpu, irq_num);
1525 vgic_dist_irq_clear_pending(vcpu, irq_num);
1529 enabled = vgic_irq_is_enabled(vcpu, irq_num);
1536 if (!vgic_can_sample_irq(vcpu, irq_num)) {
1538 * Level interrupt in progress, will be picked up
1546 vgic_cpu_irq_set(vcpu, irq_num);
1547 set_bit(cpuid, &dist->irq_pending_on_cpu);
1551 spin_unlock(&dist->lock);
1557 * kvm_vgic_inject_irq - Inject an IRQ from a device to the vgic
1558 * @kvm: The VM structure pointer
1559 * @cpuid: The CPU for PPIs
1560 * @irq_num: The IRQ number that is assigned to the device
1561 * @level: Edge-triggered: true: to trigger the interrupt
1562 * false: to ignore the call
1563 * Level-sensitive true: activates an interrupt
1564 * false: deactivates an interrupt
1566 * The GIC is not concerned with devices being active-LOW or active-HIGH for
1567 * level-sensitive interrupts. You can think of the level parameter as 1
1568 * being HIGH and 0 being LOW and all devices being active-HIGH.
1570 int kvm_vgic_inject_irq(struct kvm *kvm, int cpuid, unsigned int irq_num,
1573 if (vgic_update_irq_pending(kvm, cpuid, irq_num, level))
1574 vgic_kick_vcpus(kvm);
1579 static irqreturn_t vgic_maintenance_handler(int irq, void *data)
1582 * We cannot rely on the vgic maintenance interrupt to be
1583 * delivered synchronously. This means we can only use it to
1584 * exit the VM, and we perform the handling of EOIed
1585 * interrupts on the exit path (see vgic_process_maintenance).
1591 * kvm_vgic_vcpu_init - Initialize per-vcpu VGIC state
1592 * @vcpu: pointer to the vcpu struct
1594 * Initialize the vgic_cpu struct and vgic_dist struct fields pertaining to
1595 * this vcpu and enable the VGIC for this VCPU
1597 int kvm_vgic_vcpu_init(struct kvm_vcpu *vcpu)
1599 struct vgic_cpu *vgic_cpu = &vcpu->arch.vgic_cpu;
1600 struct vgic_dist *dist = &vcpu->kvm->arch.vgic;
1603 if (vcpu->vcpu_id >= VGIC_MAX_CPUS)
1606 for (i = 0; i < VGIC_NR_IRQS; i++) {
1607 if (i < VGIC_NR_PPIS)
1608 vgic_bitmap_set_irq_val(&dist->irq_enabled,
1609 vcpu->vcpu_id, i, 1);
1610 if (i < VGIC_NR_PRIVATE_IRQS)
1611 vgic_bitmap_set_irq_val(&dist->irq_cfg,
1612 vcpu->vcpu_id, i, VGIC_CFG_EDGE);
1614 vgic_cpu->vgic_irq_lr_map[i] = LR_EMPTY;
1618 * Store the number of LRs per vcpu, so we don't have to go
1619 * all the way to the distributor structure to find out. Only
1620 * assembly code should use this one.
1622 vgic_cpu->nr_lr = vgic->nr_lr;
1630 * kvm_vgic_init - Initialize global VGIC state before running any VCPUs
1631 * @kvm: pointer to the kvm struct
1633 * Map the virtual CPU interface into the VM before running any VCPUs. We
1634 * can't do this at creation time, because user space must first set the
1635 * virtual CPU interface address in the guest physical address space. Also
1636 * initialize the ITARGETSRn regs to 0 on the emulated distributor.
1638 int kvm_vgic_init(struct kvm *kvm)
1642 if (!irqchip_in_kernel(kvm))
1645 mutex_lock(&kvm->lock);
1647 if (vgic_initialized(kvm))
1650 if (IS_VGIC_ADDR_UNDEF(kvm->arch.vgic.vgic_dist_base) ||
1651 IS_VGIC_ADDR_UNDEF(kvm->arch.vgic.vgic_cpu_base)) {
1652 kvm_err("Need to set vgic cpu and dist addresses first\n");
1657 ret = kvm_phys_addr_ioremap(kvm, kvm->arch.vgic.vgic_cpu_base,
1658 vgic->vcpu_base, KVM_VGIC_V2_CPU_SIZE);
1660 kvm_err("Unable to remap VGIC CPU to VCPU\n");
1664 for (i = VGIC_NR_PRIVATE_IRQS; i < VGIC_NR_IRQS; i += 4)
1665 vgic_set_target_reg(kvm, 0, i);
1667 kvm->arch.vgic.ready = true;
1669 mutex_unlock(&kvm->lock);
1673 int kvm_vgic_create(struct kvm *kvm)
1675 int i, vcpu_lock_idx = -1, ret = 0;
1676 struct kvm_vcpu *vcpu;
1678 mutex_lock(&kvm->lock);
1680 if (kvm->arch.vgic.vctrl_base) {
1686 * Any time a vcpu is run, vcpu_load is called which tries to grab the
1687 * vcpu->mutex. By grabbing the vcpu->mutex of all VCPUs we ensure
1688 * that no other VCPUs are run while we create the vgic.
1690 kvm_for_each_vcpu(i, vcpu, kvm) {
1691 if (!mutex_trylock(&vcpu->mutex))
1696 kvm_for_each_vcpu(i, vcpu, kvm) {
1697 if (vcpu->arch.has_run_once) {
1703 spin_lock_init(&kvm->arch.vgic.lock);
1704 kvm->arch.vgic.in_kernel = true;
1705 kvm->arch.vgic.vctrl_base = vgic->vctrl_base;
1706 kvm->arch.vgic.vgic_dist_base = VGIC_ADDR_UNDEF;
1707 kvm->arch.vgic.vgic_cpu_base = VGIC_ADDR_UNDEF;
1710 for (; vcpu_lock_idx >= 0; vcpu_lock_idx--) {
1711 vcpu = kvm_get_vcpu(kvm, vcpu_lock_idx);
1712 mutex_unlock(&vcpu->mutex);
1716 mutex_unlock(&kvm->lock);
1720 static int vgic_ioaddr_overlap(struct kvm *kvm)
1722 phys_addr_t dist = kvm->arch.vgic.vgic_dist_base;
1723 phys_addr_t cpu = kvm->arch.vgic.vgic_cpu_base;
1725 if (IS_VGIC_ADDR_UNDEF(dist) || IS_VGIC_ADDR_UNDEF(cpu))
1727 if ((dist <= cpu && dist + KVM_VGIC_V2_DIST_SIZE > cpu) ||
1728 (cpu <= dist && cpu + KVM_VGIC_V2_CPU_SIZE > dist))
1733 static int vgic_ioaddr_assign(struct kvm *kvm, phys_addr_t *ioaddr,
1734 phys_addr_t addr, phys_addr_t size)
1738 if (addr & ~KVM_PHYS_MASK)
1741 if (addr & (SZ_4K - 1))
1744 if (!IS_VGIC_ADDR_UNDEF(*ioaddr))
1746 if (addr + size < addr)
1750 ret = vgic_ioaddr_overlap(kvm);
1752 *ioaddr = VGIC_ADDR_UNDEF;
1758 * kvm_vgic_addr - set or get vgic VM base addresses
1759 * @kvm: pointer to the vm struct
1760 * @type: the VGIC addr type, one of KVM_VGIC_V2_ADDR_TYPE_XXX
1761 * @addr: pointer to address value
1762 * @write: if true set the address in the VM address space, if false read the
1765 * Set or get the vgic base addresses for the distributor and the virtual CPU
1766 * interface in the VM physical address space. These addresses are properties
1767 * of the emulated core/SoC and therefore user space initially knows this
1770 int kvm_vgic_addr(struct kvm *kvm, unsigned long type, u64 *addr, bool write)
1773 struct vgic_dist *vgic = &kvm->arch.vgic;
1775 mutex_lock(&kvm->lock);
1777 case KVM_VGIC_V2_ADDR_TYPE_DIST:
1779 r = vgic_ioaddr_assign(kvm, &vgic->vgic_dist_base,
1780 *addr, KVM_VGIC_V2_DIST_SIZE);
1782 *addr = vgic->vgic_dist_base;
1785 case KVM_VGIC_V2_ADDR_TYPE_CPU:
1787 r = vgic_ioaddr_assign(kvm, &vgic->vgic_cpu_base,
1788 *addr, KVM_VGIC_V2_CPU_SIZE);
1790 *addr = vgic->vgic_cpu_base;
1797 mutex_unlock(&kvm->lock);
1801 static bool handle_cpu_mmio_misc(struct kvm_vcpu *vcpu,
1802 struct kvm_exit_mmio *mmio, phys_addr_t offset)
1804 bool updated = false;
1805 struct vgic_vmcr vmcr;
1809 vgic_get_vmcr(vcpu, &vmcr);
1811 switch (offset & ~0x3) {
1813 vmcr_field = &vmcr.ctlr;
1815 case GIC_CPU_PRIMASK:
1816 vmcr_field = &vmcr.pmr;
1818 case GIC_CPU_BINPOINT:
1819 vmcr_field = &vmcr.bpr;
1821 case GIC_CPU_ALIAS_BINPOINT:
1822 vmcr_field = &vmcr.abpr;
1828 if (!mmio->is_write) {
1830 mmio_data_write(mmio, ~0, reg);
1832 reg = mmio_data_read(mmio, ~0);
1833 if (reg != *vmcr_field) {
1835 vgic_set_vmcr(vcpu, &vmcr);
1842 static bool handle_mmio_abpr(struct kvm_vcpu *vcpu,
1843 struct kvm_exit_mmio *mmio, phys_addr_t offset)
1845 return handle_cpu_mmio_misc(vcpu, mmio, GIC_CPU_ALIAS_BINPOINT);
1848 static bool handle_cpu_mmio_ident(struct kvm_vcpu *vcpu,
1849 struct kvm_exit_mmio *mmio,
1858 reg = (PRODUCT_ID_KVM << 20) |
1859 (GICC_ARCH_VERSION_V2 << 16) |
1860 (IMPLEMENTER_ARM << 0);
1861 mmio_data_write(mmio, ~0, reg);
1866 * CPU Interface Register accesses - these are not accessed by the VM, but by
1867 * user space for saving and restoring VGIC state.
1869 static const struct mmio_range vgic_cpu_ranges[] = {
1871 .base = GIC_CPU_CTRL,
1873 .handle_mmio = handle_cpu_mmio_misc,
1876 .base = GIC_CPU_ALIAS_BINPOINT,
1878 .handle_mmio = handle_mmio_abpr,
1881 .base = GIC_CPU_ACTIVEPRIO,
1883 .handle_mmio = handle_mmio_raz_wi,
1886 .base = GIC_CPU_IDENT,
1888 .handle_mmio = handle_cpu_mmio_ident,
1892 static int vgic_attr_regs_access(struct kvm_device *dev,
1893 struct kvm_device_attr *attr,
1894 u32 *reg, bool is_write)
1896 const struct mmio_range *r = NULL, *ranges;
1899 struct kvm_vcpu *vcpu, *tmp_vcpu;
1900 struct vgic_dist *vgic;
1901 struct kvm_exit_mmio mmio;
1903 offset = attr->attr & KVM_DEV_ARM_VGIC_OFFSET_MASK;
1904 cpuid = (attr->attr & KVM_DEV_ARM_VGIC_CPUID_MASK) >>
1905 KVM_DEV_ARM_VGIC_CPUID_SHIFT;
1907 mutex_lock(&dev->kvm->lock);
1909 if (cpuid >= atomic_read(&dev->kvm->online_vcpus)) {
1914 vcpu = kvm_get_vcpu(dev->kvm, cpuid);
1915 vgic = &dev->kvm->arch.vgic;
1918 mmio.is_write = is_write;
1920 mmio_data_write(&mmio, ~0, *reg);
1921 switch (attr->group) {
1922 case KVM_DEV_ARM_VGIC_GRP_DIST_REGS:
1923 mmio.phys_addr = vgic->vgic_dist_base + offset;
1924 ranges = vgic_dist_ranges;
1926 case KVM_DEV_ARM_VGIC_GRP_CPU_REGS:
1927 mmio.phys_addr = vgic->vgic_cpu_base + offset;
1928 ranges = vgic_cpu_ranges;
1933 r = find_matching_range(ranges, &mmio, offset);
1935 if (unlikely(!r || !r->handle_mmio)) {
1941 spin_lock(&vgic->lock);
1944 * Ensure that no other VCPU is running by checking the vcpu->cpu
1945 * field. If no other VPCUs are running we can safely access the VGIC
1946 * state, because even if another VPU is run after this point, that
1947 * VCPU will not touch the vgic state, because it will block on
1948 * getting the vgic->lock in kvm_vgic_sync_hwstate().
1950 kvm_for_each_vcpu(c, tmp_vcpu, dev->kvm) {
1951 if (unlikely(tmp_vcpu->cpu != -1)) {
1953 goto out_vgic_unlock;
1958 * Move all pending IRQs from the LRs on all VCPUs so the pending
1959 * state can be properly represented in the register state accessible
1962 kvm_for_each_vcpu(c, tmp_vcpu, dev->kvm)
1963 vgic_unqueue_irqs(tmp_vcpu);
1966 r->handle_mmio(vcpu, &mmio, offset);
1969 *reg = mmio_data_read(&mmio, ~0);
1973 spin_unlock(&vgic->lock);
1975 mutex_unlock(&dev->kvm->lock);
1979 static int vgic_set_attr(struct kvm_device *dev, struct kvm_device_attr *attr)
1983 switch (attr->group) {
1984 case KVM_DEV_ARM_VGIC_GRP_ADDR: {
1985 u64 __user *uaddr = (u64 __user *)(long)attr->addr;
1987 unsigned long type = (unsigned long)attr->attr;
1989 if (copy_from_user(&addr, uaddr, sizeof(addr)))
1992 r = kvm_vgic_addr(dev->kvm, type, &addr, true);
1993 return (r == -ENODEV) ? -ENXIO : r;
1996 case KVM_DEV_ARM_VGIC_GRP_DIST_REGS:
1997 case KVM_DEV_ARM_VGIC_GRP_CPU_REGS: {
1998 u32 __user *uaddr = (u32 __user *)(long)attr->addr;
2001 if (get_user(reg, uaddr))
2004 return vgic_attr_regs_access(dev, attr, ®, true);
2012 static int vgic_get_attr(struct kvm_device *dev, struct kvm_device_attr *attr)
2016 switch (attr->group) {
2017 case KVM_DEV_ARM_VGIC_GRP_ADDR: {
2018 u64 __user *uaddr = (u64 __user *)(long)attr->addr;
2020 unsigned long type = (unsigned long)attr->attr;
2022 r = kvm_vgic_addr(dev->kvm, type, &addr, false);
2024 return (r == -ENODEV) ? -ENXIO : r;
2026 if (copy_to_user(uaddr, &addr, sizeof(addr)))
2031 case KVM_DEV_ARM_VGIC_GRP_DIST_REGS:
2032 case KVM_DEV_ARM_VGIC_GRP_CPU_REGS: {
2033 u32 __user *uaddr = (u32 __user *)(long)attr->addr;
2036 r = vgic_attr_regs_access(dev, attr, ®, false);
2039 r = put_user(reg, uaddr);
2048 static int vgic_has_attr_regs(const struct mmio_range *ranges,
2051 struct kvm_exit_mmio dev_attr_mmio;
2053 dev_attr_mmio.len = 4;
2054 if (find_matching_range(ranges, &dev_attr_mmio, offset))
2060 static int vgic_has_attr(struct kvm_device *dev, struct kvm_device_attr *attr)
2064 switch (attr->group) {
2065 case KVM_DEV_ARM_VGIC_GRP_ADDR:
2066 switch (attr->attr) {
2067 case KVM_VGIC_V2_ADDR_TYPE_DIST:
2068 case KVM_VGIC_V2_ADDR_TYPE_CPU:
2072 case KVM_DEV_ARM_VGIC_GRP_DIST_REGS:
2073 offset = attr->attr & KVM_DEV_ARM_VGIC_OFFSET_MASK;
2074 return vgic_has_attr_regs(vgic_dist_ranges, offset);
2075 case KVM_DEV_ARM_VGIC_GRP_CPU_REGS:
2076 offset = attr->attr & KVM_DEV_ARM_VGIC_OFFSET_MASK;
2077 return vgic_has_attr_regs(vgic_cpu_ranges, offset);
2082 static void vgic_destroy(struct kvm_device *dev)
2087 static int vgic_create(struct kvm_device *dev, u32 type)
2089 return kvm_vgic_create(dev->kvm);
2092 static struct kvm_device_ops kvm_arm_vgic_v2_ops = {
2093 .name = "kvm-arm-vgic",
2094 .create = vgic_create,
2095 .destroy = vgic_destroy,
2096 .set_attr = vgic_set_attr,
2097 .get_attr = vgic_get_attr,
2098 .has_attr = vgic_has_attr,
2101 static void vgic_init_maintenance_interrupt(void *info)
2103 enable_percpu_irq(vgic->maint_irq, 0);
2106 static int vgic_cpu_notify(struct notifier_block *self,
2107 unsigned long action, void *cpu)
2111 case CPU_STARTING_FROZEN:
2112 vgic_init_maintenance_interrupt(NULL);
2115 case CPU_DYING_FROZEN:
2116 disable_percpu_irq(vgic->maint_irq);
2123 static struct notifier_block vgic_cpu_nb = {
2124 .notifier_call = vgic_cpu_notify,
2127 static const struct of_device_id vgic_ids[] = {
2128 { .compatible = "arm,cortex-a15-gic", .data = vgic_v2_probe, },
2129 { .compatible = "arm,gic-v3", .data = vgic_v3_probe, },
2133 int kvm_vgic_hyp_init(void)
2135 const struct of_device_id *matched_id;
2136 int (*vgic_probe)(struct device_node *,const struct vgic_ops **,
2137 const struct vgic_params **);
2138 struct device_node *vgic_node;
2141 vgic_node = of_find_matching_node_and_match(NULL,
2142 vgic_ids, &matched_id);
2144 kvm_err("error: no compatible GIC node found\n");
2148 vgic_probe = matched_id->data;
2149 ret = vgic_probe(vgic_node, &vgic_ops, &vgic);
2153 ret = request_percpu_irq(vgic->maint_irq, vgic_maintenance_handler,
2154 "vgic", kvm_get_running_vcpus());
2156 kvm_err("Cannot register interrupt %d\n", vgic->maint_irq);
2160 ret = __register_cpu_notifier(&vgic_cpu_nb);
2162 kvm_err("Cannot register vgic CPU notifier\n");
2166 /* Callback into for arch code for setup */
2167 vgic_arch_setup(vgic);
2169 on_each_cpu(vgic_init_maintenance_interrupt, NULL, 1);
2171 return kvm_register_device_ops(&kvm_arm_vgic_v2_ops,
2172 KVM_DEV_TYPE_ARM_VGIC_V2);
2175 free_percpu_irq(vgic->maint_irq, kvm_get_running_vcpus());