2 * This program is free software; you can redistribute it and/or modify
3 * it under the terms of the GNU General Public License, version 2, as
4 * published by the Free Software Foundation.
6 * This program is distributed in the hope that it will be useful,
7 * but WITHOUT ANY WARRANTY; without even the implied warranty of
8 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
9 * GNU General Public License for more details.
11 * You should have received a copy of the GNU General Public License
12 * along with this program; if not, write to the Free Software
13 * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
15 * Copyright 2010 Paul Mackerras, IBM Corp. <paulus@au1.ibm.com>
18 #include <linux/types.h>
19 #include <linux/string.h>
20 #include <linux/kvm.h>
21 #include <linux/kvm_host.h>
22 #include <linux/highmem.h>
23 #include <linux/gfp.h>
24 #include <linux/slab.h>
25 #include <linux/hugetlb.h>
26 #include <linux/vmalloc.h>
27 #include <linux/srcu.h>
29 #include <asm/tlbflush.h>
30 #include <asm/kvm_ppc.h>
31 #include <asm/kvm_book3s.h>
32 #include <asm/mmu-hash64.h>
33 #include <asm/hvcall.h>
34 #include <asm/synch.h>
35 #include <asm/ppc-opcode.h>
36 #include <asm/cputable.h>
38 /* POWER7 has 10-bit LPIDs, PPC970 has 6-bit LPIDs */
39 #define MAX_LPID_970 63
41 /* Power architecture requires HPT is at least 256kB */
42 #define PPC_MIN_HPT_ORDER 18
44 long kvmppc_alloc_hpt(struct kvm *kvm, u32 *htab_orderp)
47 struct revmap_entry *rev;
48 struct kvmppc_linear_info *li;
49 long order = kvm_hpt_order;
53 if (order < PPC_MIN_HPT_ORDER)
54 order = PPC_MIN_HPT_ORDER;
58 * If the user wants a different size from default,
59 * try first to allocate it from the kernel page allocator.
62 if (order != kvm_hpt_order) {
63 hpt = __get_free_pages(GFP_KERNEL|__GFP_ZERO|__GFP_REPEAT|
64 __GFP_NOWARN, order - PAGE_SHIFT);
69 /* Next try to allocate from the preallocated pool */
73 hpt = (ulong)li->base_virt;
74 kvm->arch.hpt_li = li;
75 order = kvm_hpt_order;
79 /* Lastly try successively smaller sizes from the page allocator */
80 while (!hpt && order > PPC_MIN_HPT_ORDER) {
81 hpt = __get_free_pages(GFP_KERNEL|__GFP_ZERO|__GFP_REPEAT|
82 __GFP_NOWARN, order - PAGE_SHIFT);
90 kvm->arch.hpt_virt = hpt;
91 kvm->arch.hpt_order = order;
92 /* HPTEs are 2**4 bytes long */
93 kvm->arch.hpt_npte = 1ul << (order - 4);
94 /* 128 (2**7) bytes in each HPTEG */
95 kvm->arch.hpt_mask = (1ul << (order - 7)) - 1;
97 /* Allocate reverse map array */
98 rev = vmalloc(sizeof(struct revmap_entry) * kvm->arch.hpt_npte);
100 pr_err("kvmppc_alloc_hpt: Couldn't alloc reverse map array\n");
103 kvm->arch.revmap = rev;
104 kvm->arch.sdr1 = __pa(hpt) | (order - 18);
106 pr_info("KVM guest htab at %lx (order %ld), LPID %x\n",
107 hpt, order, kvm->arch.lpid);
110 *htab_orderp = order;
114 if (kvm->arch.hpt_li)
115 kvm_release_hpt(kvm->arch.hpt_li);
117 free_pages(hpt, order - PAGE_SHIFT);
121 long kvmppc_alloc_reset_hpt(struct kvm *kvm, u32 *htab_orderp)
126 mutex_lock(&kvm->lock);
127 if (kvm->arch.rma_setup_done) {
128 kvm->arch.rma_setup_done = 0;
129 /* order rma_setup_done vs. vcpus_running */
131 if (atomic_read(&kvm->arch.vcpus_running)) {
132 kvm->arch.rma_setup_done = 1;
136 if (kvm->arch.hpt_virt) {
137 order = kvm->arch.hpt_order;
138 /* Set the entire HPT to 0, i.e. invalid HPTEs */
139 memset((void *)kvm->arch.hpt_virt, 0, 1ul << order);
141 * Set the whole last_vcpu array to an invalid vcpu number.
142 * This ensures that each vcpu will flush its TLB on next entry.
144 memset(kvm->arch.last_vcpu, 0xff, sizeof(kvm->arch.last_vcpu));
145 *htab_orderp = order;
148 err = kvmppc_alloc_hpt(kvm, htab_orderp);
149 order = *htab_orderp;
152 mutex_unlock(&kvm->lock);
156 void kvmppc_free_hpt(struct kvm *kvm)
158 kvmppc_free_lpid(kvm->arch.lpid);
159 vfree(kvm->arch.revmap);
160 if (kvm->arch.hpt_li)
161 kvm_release_hpt(kvm->arch.hpt_li);
163 free_pages(kvm->arch.hpt_virt,
164 kvm->arch.hpt_order - PAGE_SHIFT);
167 /* Bits in first HPTE dword for pagesize 4k, 64k or 16M */
168 static inline unsigned long hpte0_pgsize_encoding(unsigned long pgsize)
170 return (pgsize > 0x1000) ? HPTE_V_LARGE : 0;
173 /* Bits in second HPTE dword for pagesize 4k, 64k or 16M */
174 static inline unsigned long hpte1_pgsize_encoding(unsigned long pgsize)
176 return (pgsize == 0x10000) ? 0x1000 : 0;
179 void kvmppc_map_vrma(struct kvm_vcpu *vcpu, struct kvm_memory_slot *memslot,
180 unsigned long porder)
183 unsigned long npages;
184 unsigned long hp_v, hp_r;
185 unsigned long addr, hash;
187 unsigned long hp0, hp1;
189 struct kvm *kvm = vcpu->kvm;
191 psize = 1ul << porder;
192 npages = memslot->npages >> (porder - PAGE_SHIFT);
194 /* VRMA can't be > 1TB */
195 if (npages > 1ul << (40 - porder))
196 npages = 1ul << (40 - porder);
197 /* Can't use more than 1 HPTE per HPTEG */
198 if (npages > kvm->arch.hpt_mask + 1)
199 npages = kvm->arch.hpt_mask + 1;
201 hp0 = HPTE_V_1TB_SEG | (VRMA_VSID << (40 - 16)) |
202 HPTE_V_BOLTED | hpte0_pgsize_encoding(psize);
203 hp1 = hpte1_pgsize_encoding(psize) |
204 HPTE_R_R | HPTE_R_C | HPTE_R_M | PP_RWXX;
206 for (i = 0; i < npages; ++i) {
208 /* can't use hpt_hash since va > 64 bits */
209 hash = (i ^ (VRMA_VSID ^ (VRMA_VSID << 25))) & kvm->arch.hpt_mask;
211 * We assume that the hash table is empty and no
212 * vcpus are using it at this stage. Since we create
213 * at most one HPTE per HPTEG, we just assume entry 7
214 * is available and use it.
216 hash = (hash << 3) + 7;
217 hp_v = hp0 | ((addr >> 16) & ~0x7fUL);
219 ret = kvmppc_virtmode_h_enter(vcpu, H_EXACT, hash, hp_v, hp_r);
220 if (ret != H_SUCCESS) {
221 pr_err("KVM: map_vrma at %lx failed, ret=%ld\n",
228 int kvmppc_mmu_hv_init(void)
230 unsigned long host_lpid, rsvd_lpid;
232 if (!cpu_has_feature(CPU_FTR_HVMODE))
235 /* POWER7 has 10-bit LPIDs, PPC970 and e500mc have 6-bit LPIDs */
236 if (cpu_has_feature(CPU_FTR_ARCH_206)) {
237 host_lpid = mfspr(SPRN_LPID); /* POWER7 */
238 rsvd_lpid = LPID_RSVD;
240 host_lpid = 0; /* PPC970 */
241 rsvd_lpid = MAX_LPID_970;
244 kvmppc_init_lpid(rsvd_lpid + 1);
246 kvmppc_claim_lpid(host_lpid);
247 /* rsvd_lpid is reserved for use in partition switching */
248 kvmppc_claim_lpid(rsvd_lpid);
253 void kvmppc_mmu_destroy(struct kvm_vcpu *vcpu)
257 static void kvmppc_mmu_book3s_64_hv_reset_msr(struct kvm_vcpu *vcpu)
259 kvmppc_set_msr(vcpu, MSR_SF | MSR_ME);
263 * This is called to get a reference to a guest page if there isn't
264 * one already in the memslot->arch.slot_phys[] array.
266 static long kvmppc_get_guest_page(struct kvm *kvm, unsigned long gfn,
267 struct kvm_memory_slot *memslot,
272 struct page *page, *hpage, *pages[1];
273 unsigned long s, pgsize;
274 unsigned long *physp;
275 unsigned int is_io, got, pgorder;
276 struct vm_area_struct *vma;
277 unsigned long pfn, i, npages;
279 physp = memslot->arch.slot_phys;
282 if (physp[gfn - memslot->base_gfn])
290 start = gfn_to_hva_memslot(memslot, gfn);
292 /* Instantiate and get the page we want access to */
293 np = get_user_pages_fast(start, 1, 1, pages);
295 /* Look up the vma for the page */
296 down_read(¤t->mm->mmap_sem);
297 vma = find_vma(current->mm, start);
298 if (!vma || vma->vm_start > start ||
299 start + psize > vma->vm_end ||
300 !(vma->vm_flags & VM_PFNMAP))
302 is_io = hpte_cache_bits(pgprot_val(vma->vm_page_prot));
303 pfn = vma->vm_pgoff + ((start - vma->vm_start) >> PAGE_SHIFT);
304 /* check alignment of pfn vs. requested page size */
305 if (psize > PAGE_SIZE && (pfn & ((psize >> PAGE_SHIFT) - 1)))
307 up_read(¤t->mm->mmap_sem);
311 got = KVMPPC_GOT_PAGE;
313 /* See if this is a large page */
315 if (PageHuge(page)) {
316 hpage = compound_head(page);
317 s <<= compound_order(hpage);
318 /* Get the whole large page if slot alignment is ok */
319 if (s > psize && slot_is_aligned(memslot, s) &&
320 !(memslot->userspace_addr & (s - 1))) {
330 pfn = page_to_pfn(page);
333 npages = pgsize >> PAGE_SHIFT;
334 pgorder = __ilog2(npages);
335 physp += (gfn - memslot->base_gfn) & ~(npages - 1);
336 spin_lock(&kvm->arch.slot_phys_lock);
337 for (i = 0; i < npages; ++i) {
339 physp[i] = ((pfn + i) << PAGE_SHIFT) +
340 got + is_io + pgorder;
344 spin_unlock(&kvm->arch.slot_phys_lock);
353 up_read(¤t->mm->mmap_sem);
358 * We come here on a H_ENTER call from the guest when we are not
359 * using mmu notifiers and we don't have the requested page pinned
362 long kvmppc_virtmode_h_enter(struct kvm_vcpu *vcpu, unsigned long flags,
363 long pte_index, unsigned long pteh, unsigned long ptel)
365 struct kvm *kvm = vcpu->kvm;
366 unsigned long psize, gpa, gfn;
367 struct kvm_memory_slot *memslot;
370 if (kvm->arch.using_mmu_notifiers)
373 psize = hpte_page_size(pteh, ptel);
377 pteh &= ~(HPTE_V_HVLOCK | HPTE_V_ABSENT | HPTE_V_VALID);
379 /* Find the memslot (if any) for this address */
380 gpa = (ptel & HPTE_R_RPN) & ~(psize - 1);
381 gfn = gpa >> PAGE_SHIFT;
382 memslot = gfn_to_memslot(kvm, gfn);
383 if (memslot && !(memslot->flags & KVM_MEMSLOT_INVALID)) {
384 if (!slot_is_aligned(memslot, psize))
386 if (kvmppc_get_guest_page(kvm, gfn, memslot, psize) < 0)
391 /* Protect linux PTE lookup from page table destruction */
392 rcu_read_lock_sched(); /* this disables preemption too */
393 vcpu->arch.pgdir = current->mm->pgd;
394 ret = kvmppc_h_enter(vcpu, flags, pte_index, pteh, ptel);
395 rcu_read_unlock_sched();
396 if (ret == H_TOO_HARD) {
397 /* this can't happen */
398 pr_err("KVM: Oops, kvmppc_h_enter returned too hard!\n");
399 ret = H_RESOURCE; /* or something */
405 static struct kvmppc_slb *kvmppc_mmu_book3s_hv_find_slbe(struct kvm_vcpu *vcpu,
411 for (i = 0; i < vcpu->arch.slb_nr; i++) {
412 if (!(vcpu->arch.slb[i].orige & SLB_ESID_V))
415 if (vcpu->arch.slb[i].origv & SLB_VSID_B_1T)
420 if (((vcpu->arch.slb[i].orige ^ eaddr) & mask) == 0)
421 return &vcpu->arch.slb[i];
426 static unsigned long kvmppc_mmu_get_real_addr(unsigned long v, unsigned long r,
429 unsigned long ra_mask;
431 ra_mask = hpte_page_size(v, r) - 1;
432 return (r & HPTE_R_RPN & ~ra_mask) | (ea & ra_mask);
435 static int kvmppc_mmu_book3s_64_hv_xlate(struct kvm_vcpu *vcpu, gva_t eaddr,
436 struct kvmppc_pte *gpte, bool data)
438 struct kvm *kvm = vcpu->kvm;
439 struct kvmppc_slb *slbe;
441 unsigned long pp, key;
443 unsigned long *hptep;
445 int virtmode = vcpu->arch.shregs.msr & (data ? MSR_DR : MSR_IR);
449 slbe = kvmppc_mmu_book3s_hv_find_slbe(vcpu, eaddr);
454 /* real mode access */
455 slb_v = vcpu->kvm->arch.vrma_slb_v;
458 /* Find the HPTE in the hash table */
459 index = kvmppc_hv_find_lock_hpte(kvm, eaddr, slb_v,
460 HPTE_V_VALID | HPTE_V_ABSENT);
463 hptep = (unsigned long *)(kvm->arch.hpt_virt + (index << 4));
464 v = hptep[0] & ~HPTE_V_HVLOCK;
465 gr = kvm->arch.revmap[index].guest_rpte;
467 /* Unlock the HPTE */
468 asm volatile("lwsync" : : : "memory");
472 gpte->vpage = ((v & HPTE_V_AVPN) << 4) | ((eaddr >> 12) & 0xfff);
474 /* Get PP bits and key for permission check */
475 pp = gr & (HPTE_R_PP0 | HPTE_R_PP);
476 key = (vcpu->arch.shregs.msr & MSR_PR) ? SLB_VSID_KP : SLB_VSID_KS;
479 /* Calculate permissions */
480 gpte->may_read = hpte_read_permission(pp, key);
481 gpte->may_write = hpte_write_permission(pp, key);
482 gpte->may_execute = gpte->may_read && !(gr & (HPTE_R_N | HPTE_R_G));
484 /* Storage key permission check for POWER7 */
485 if (data && virtmode && cpu_has_feature(CPU_FTR_ARCH_206)) {
486 int amrfield = hpte_get_skey_perm(gr, vcpu->arch.amr);
493 /* Get the guest physical address */
494 gpte->raddr = kvmppc_mmu_get_real_addr(v, gr, eaddr);
499 * Quick test for whether an instruction is a load or a store.
500 * If the instruction is a load or a store, then this will indicate
501 * which it is, at least on server processors. (Embedded processors
502 * have some external PID instructions that don't follow the rule
503 * embodied here.) If the instruction isn't a load or store, then
504 * this doesn't return anything useful.
506 static int instruction_is_store(unsigned int instr)
511 if ((instr & 0xfc000000) == 0x7c000000)
512 mask = 0x100; /* major opcode 31 */
513 return (instr & mask) != 0;
516 static int kvmppc_hv_emulate_mmio(struct kvm_run *run, struct kvm_vcpu *vcpu,
517 unsigned long gpa, gva_t ea, int is_store)
521 unsigned long srr0 = kvmppc_get_pc(vcpu);
523 /* We try to load the last instruction. We don't let
524 * emulate_instruction do it as it doesn't check what
526 * If we fail, we just return to the guest and try executing it again.
528 if (vcpu->arch.last_inst == KVM_INST_FETCH_FAILED) {
529 ret = kvmppc_ld(vcpu, &srr0, sizeof(u32), &last_inst, false);
530 if (ret != EMULATE_DONE || last_inst == KVM_INST_FETCH_FAILED)
532 vcpu->arch.last_inst = last_inst;
536 * WARNING: We do not know for sure whether the instruction we just
537 * read from memory is the same that caused the fault in the first
538 * place. If the instruction we read is neither an load or a store,
539 * then it can't access memory, so we don't need to worry about
540 * enforcing access permissions. So, assuming it is a load or
541 * store, we just check that its direction (load or store) is
542 * consistent with the original fault, since that's what we
543 * checked the access permissions against. If there is a mismatch
544 * we just return and retry the instruction.
547 if (instruction_is_store(vcpu->arch.last_inst) != !!is_store)
551 * Emulated accesses are emulated by looking at the hash for
552 * translation once, then performing the access later. The
553 * translation could be invalidated in the meantime in which
554 * point performing the subsequent memory access on the old
555 * physical address could possibly be a security hole for the
556 * guest (but not the host).
558 * This is less of an issue for MMIO stores since they aren't
559 * globally visible. It could be an issue for MMIO loads to
560 * a certain extent but we'll ignore it for now.
563 vcpu->arch.paddr_accessed = gpa;
564 vcpu->arch.vaddr_accessed = ea;
565 return kvmppc_emulate_mmio(run, vcpu);
568 int kvmppc_book3s_hv_page_fault(struct kvm_run *run, struct kvm_vcpu *vcpu,
569 unsigned long ea, unsigned long dsisr)
571 struct kvm *kvm = vcpu->kvm;
572 unsigned long *hptep, hpte[3], r;
573 unsigned long mmu_seq, psize, pte_size;
574 unsigned long gpa, gfn, hva, pfn;
575 struct kvm_memory_slot *memslot;
577 struct revmap_entry *rev;
578 struct page *page, *pages[1];
579 long index, ret, npages;
581 unsigned int writing, write_ok;
582 struct vm_area_struct *vma;
583 unsigned long rcbits;
586 * Real-mode code has already searched the HPT and found the
587 * entry we're interested in. Lock the entry and check that
588 * it hasn't changed. If it has, just return and re-execute the
591 if (ea != vcpu->arch.pgfault_addr)
593 index = vcpu->arch.pgfault_index;
594 hptep = (unsigned long *)(kvm->arch.hpt_virt + (index << 4));
595 rev = &kvm->arch.revmap[index];
597 while (!try_lock_hpte(hptep, HPTE_V_HVLOCK))
599 hpte[0] = hptep[0] & ~HPTE_V_HVLOCK;
601 hpte[2] = r = rev->guest_rpte;
602 asm volatile("lwsync" : : : "memory");
606 if (hpte[0] != vcpu->arch.pgfault_hpte[0] ||
607 hpte[1] != vcpu->arch.pgfault_hpte[1])
610 /* Translate the logical address and get the page */
611 psize = hpte_page_size(hpte[0], r);
612 gpa = (r & HPTE_R_RPN & ~(psize - 1)) | (ea & (psize - 1));
613 gfn = gpa >> PAGE_SHIFT;
614 memslot = gfn_to_memslot(kvm, gfn);
616 /* No memslot means it's an emulated MMIO region */
617 if (!memslot || (memslot->flags & KVM_MEMSLOT_INVALID))
618 return kvmppc_hv_emulate_mmio(run, vcpu, gpa, ea,
619 dsisr & DSISR_ISSTORE);
621 if (!kvm->arch.using_mmu_notifiers)
622 return -EFAULT; /* should never get here */
624 /* used to check for invalidations in progress */
625 mmu_seq = kvm->mmu_notifier_seq;
631 pte_size = PAGE_SIZE;
632 writing = (dsisr & DSISR_ISSTORE) != 0;
633 /* If writing != 0, then the HPTE must allow writing, if we get here */
635 hva = gfn_to_hva_memslot(memslot, gfn);
636 npages = get_user_pages_fast(hva, 1, writing, pages);
638 /* Check if it's an I/O mapping */
639 down_read(¤t->mm->mmap_sem);
640 vma = find_vma(current->mm, hva);
641 if (vma && vma->vm_start <= hva && hva + psize <= vma->vm_end &&
642 (vma->vm_flags & VM_PFNMAP)) {
643 pfn = vma->vm_pgoff +
644 ((hva - vma->vm_start) >> PAGE_SHIFT);
646 is_io = hpte_cache_bits(pgprot_val(vma->vm_page_prot));
647 write_ok = vma->vm_flags & VM_WRITE;
649 up_read(¤t->mm->mmap_sem);
654 if (PageHuge(page)) {
655 page = compound_head(page);
656 pte_size <<= compound_order(page);
658 /* if the guest wants write access, see if that is OK */
659 if (!writing && hpte_is_writable(r)) {
663 * We need to protect against page table destruction
664 * while looking up and updating the pte.
666 rcu_read_lock_sched();
667 ptep = find_linux_pte_or_hugepte(current->mm->pgd,
669 if (ptep && pte_present(*ptep)) {
670 pte = kvmppc_read_update_linux_pte(ptep, 1);
674 rcu_read_unlock_sched();
676 pfn = page_to_pfn(page);
680 if (psize > pte_size)
683 /* Check WIMG vs. the actual page we're accessing */
684 if (!hpte_cache_flags_ok(r, is_io)) {
688 * Allow guest to map emulated device memory as
689 * uncacheable, but actually make it cacheable.
691 r = (r & ~(HPTE_R_W|HPTE_R_I|HPTE_R_G)) | HPTE_R_M;
694 /* Set the HPTE to point to pfn */
695 r = (r & ~(HPTE_R_PP0 - pte_size)) | (pfn << PAGE_SHIFT);
696 if (hpte_is_writable(r) && !write_ok)
697 r = hpte_make_readonly(r);
700 while (!try_lock_hpte(hptep, HPTE_V_HVLOCK))
702 if ((hptep[0] & ~HPTE_V_HVLOCK) != hpte[0] || hptep[1] != hpte[1] ||
703 rev->guest_rpte != hpte[2])
704 /* HPTE has been changed under us; let the guest retry */
706 hpte[0] = (hpte[0] & ~HPTE_V_ABSENT) | HPTE_V_VALID;
708 rmap = &memslot->arch.rmap[gfn - memslot->base_gfn];
711 /* Check if we might have been invalidated; let the guest retry if so */
713 if (mmu_notifier_retry(vcpu->kvm, mmu_seq)) {
718 /* Only set R/C in real HPTE if set in both *rmap and guest_rpte */
719 rcbits = *rmap >> KVMPPC_RMAP_RC_SHIFT;
720 r &= rcbits | ~(HPTE_R_R | HPTE_R_C);
722 if (hptep[0] & HPTE_V_VALID) {
723 /* HPTE was previously valid, so we need to invalidate it */
725 hptep[0] |= HPTE_V_ABSENT;
726 kvmppc_invalidate_hpte(kvm, hptep, index);
727 /* don't lose previous R and C bits */
728 r |= hptep[1] & (HPTE_R_R | HPTE_R_C);
730 kvmppc_add_revmap_chain(kvm, rev, rmap, index, 0);
736 asm volatile("ptesync" : : : "memory");
738 if (page && hpte_is_writable(r))
744 * We drop pages[0] here, not page because page might
745 * have been set to the head page of a compound, but
746 * we have to drop the reference on the correct tail
747 * page to match the get inside gup()
754 hptep[0] &= ~HPTE_V_HVLOCK;
759 static int kvm_handle_hva_range(struct kvm *kvm,
762 int (*handler)(struct kvm *kvm,
763 unsigned long *rmapp,
768 struct kvm_memslots *slots;
769 struct kvm_memory_slot *memslot;
771 slots = kvm_memslots(kvm);
772 kvm_for_each_memslot(memslot, slots) {
773 unsigned long hva_start, hva_end;
776 hva_start = max(start, memslot->userspace_addr);
777 hva_end = min(end, memslot->userspace_addr +
778 (memslot->npages << PAGE_SHIFT));
779 if (hva_start >= hva_end)
782 * {gfn(page) | page intersects with [hva_start, hva_end)} =
783 * {gfn, gfn+1, ..., gfn_end-1}.
785 gfn = hva_to_gfn_memslot(hva_start, memslot);
786 gfn_end = hva_to_gfn_memslot(hva_end + PAGE_SIZE - 1, memslot);
788 for (; gfn < gfn_end; ++gfn) {
789 gfn_t gfn_offset = gfn - memslot->base_gfn;
791 ret = handler(kvm, &memslot->arch.rmap[gfn_offset], gfn);
799 static int kvm_handle_hva(struct kvm *kvm, unsigned long hva,
800 int (*handler)(struct kvm *kvm, unsigned long *rmapp,
803 return kvm_handle_hva_range(kvm, hva, hva + 1, handler);
806 static int kvm_unmap_rmapp(struct kvm *kvm, unsigned long *rmapp,
809 struct revmap_entry *rev = kvm->arch.revmap;
810 unsigned long h, i, j;
811 unsigned long *hptep;
812 unsigned long ptel, psize, rcbits;
816 if (!(*rmapp & KVMPPC_RMAP_PRESENT)) {
822 * To avoid an ABBA deadlock with the HPTE lock bit,
823 * we can't spin on the HPTE lock while holding the
826 i = *rmapp & KVMPPC_RMAP_INDEX;
827 hptep = (unsigned long *) (kvm->arch.hpt_virt + (i << 4));
828 if (!try_lock_hpte(hptep, HPTE_V_HVLOCK)) {
829 /* unlock rmap before spinning on the HPTE lock */
831 while (hptep[0] & HPTE_V_HVLOCK)
837 /* chain is now empty */
838 *rmapp &= ~(KVMPPC_RMAP_PRESENT | KVMPPC_RMAP_INDEX);
840 /* remove i from chain */
844 rev[i].forw = rev[i].back = i;
845 *rmapp = (*rmapp & ~KVMPPC_RMAP_INDEX) | j;
848 /* Now check and modify the HPTE */
849 ptel = rev[i].guest_rpte;
850 psize = hpte_page_size(hptep[0], ptel);
851 if ((hptep[0] & HPTE_V_VALID) &&
852 hpte_rpn(ptel, psize) == gfn) {
853 if (kvm->arch.using_mmu_notifiers)
854 hptep[0] |= HPTE_V_ABSENT;
855 kvmppc_invalidate_hpte(kvm, hptep, i);
856 /* Harvest R and C */
857 rcbits = hptep[1] & (HPTE_R_R | HPTE_R_C);
858 *rmapp |= rcbits << KVMPPC_RMAP_RC_SHIFT;
859 rev[i].guest_rpte = ptel | rcbits;
862 hptep[0] &= ~HPTE_V_HVLOCK;
867 int kvm_unmap_hva(struct kvm *kvm, unsigned long hva)
869 if (kvm->arch.using_mmu_notifiers)
870 kvm_handle_hva(kvm, hva, kvm_unmap_rmapp);
874 int kvm_unmap_hva_range(struct kvm *kvm, unsigned long start, unsigned long end)
876 if (kvm->arch.using_mmu_notifiers)
877 kvm_handle_hva_range(kvm, start, end, kvm_unmap_rmapp);
881 void kvmppc_core_flush_memslot(struct kvm *kvm, struct kvm_memory_slot *memslot)
883 unsigned long *rmapp;
887 rmapp = memslot->arch.rmap;
888 gfn = memslot->base_gfn;
889 for (n = memslot->npages; n; --n) {
891 * Testing the present bit without locking is OK because
892 * the memslot has been marked invalid already, and hence
893 * no new HPTEs referencing this page can be created,
894 * thus the present bit can't go from 0 to 1.
896 if (*rmapp & KVMPPC_RMAP_PRESENT)
897 kvm_unmap_rmapp(kvm, rmapp, gfn);
903 static int kvm_age_rmapp(struct kvm *kvm, unsigned long *rmapp,
906 struct revmap_entry *rev = kvm->arch.revmap;
907 unsigned long head, i, j;
908 unsigned long *hptep;
913 if (*rmapp & KVMPPC_RMAP_REFERENCED) {
914 *rmapp &= ~KVMPPC_RMAP_REFERENCED;
917 if (!(*rmapp & KVMPPC_RMAP_PRESENT)) {
922 i = head = *rmapp & KVMPPC_RMAP_INDEX;
924 hptep = (unsigned long *) (kvm->arch.hpt_virt + (i << 4));
927 /* If this HPTE isn't referenced, ignore it */
928 if (!(hptep[1] & HPTE_R_R))
931 if (!try_lock_hpte(hptep, HPTE_V_HVLOCK)) {
932 /* unlock rmap before spinning on the HPTE lock */
934 while (hptep[0] & HPTE_V_HVLOCK)
939 /* Now check and modify the HPTE */
940 if ((hptep[0] & HPTE_V_VALID) && (hptep[1] & HPTE_R_R)) {
941 kvmppc_clear_ref_hpte(kvm, hptep, i);
942 rev[i].guest_rpte |= HPTE_R_R;
945 hptep[0] &= ~HPTE_V_HVLOCK;
946 } while ((i = j) != head);
952 int kvm_age_hva(struct kvm *kvm, unsigned long hva)
954 if (!kvm->arch.using_mmu_notifiers)
956 return kvm_handle_hva(kvm, hva, kvm_age_rmapp);
959 static int kvm_test_age_rmapp(struct kvm *kvm, unsigned long *rmapp,
962 struct revmap_entry *rev = kvm->arch.revmap;
963 unsigned long head, i, j;
967 if (*rmapp & KVMPPC_RMAP_REFERENCED)
971 if (*rmapp & KVMPPC_RMAP_REFERENCED)
974 if (*rmapp & KVMPPC_RMAP_PRESENT) {
975 i = head = *rmapp & KVMPPC_RMAP_INDEX;
977 hp = (unsigned long *)(kvm->arch.hpt_virt + (i << 4));
979 if (hp[1] & HPTE_R_R)
981 } while ((i = j) != head);
990 int kvm_test_age_hva(struct kvm *kvm, unsigned long hva)
992 if (!kvm->arch.using_mmu_notifiers)
994 return kvm_handle_hva(kvm, hva, kvm_test_age_rmapp);
997 void kvm_set_spte_hva(struct kvm *kvm, unsigned long hva, pte_t pte)
999 if (!kvm->arch.using_mmu_notifiers)
1001 kvm_handle_hva(kvm, hva, kvm_unmap_rmapp);
1004 static int kvm_test_clear_dirty(struct kvm *kvm, unsigned long *rmapp)
1006 struct revmap_entry *rev = kvm->arch.revmap;
1007 unsigned long head, i, j;
1008 unsigned long *hptep;
1013 if (*rmapp & KVMPPC_RMAP_CHANGED) {
1014 *rmapp &= ~KVMPPC_RMAP_CHANGED;
1017 if (!(*rmapp & KVMPPC_RMAP_PRESENT)) {
1022 i = head = *rmapp & KVMPPC_RMAP_INDEX;
1024 hptep = (unsigned long *) (kvm->arch.hpt_virt + (i << 4));
1027 if (!(hptep[1] & HPTE_R_C))
1030 if (!try_lock_hpte(hptep, HPTE_V_HVLOCK)) {
1031 /* unlock rmap before spinning on the HPTE lock */
1033 while (hptep[0] & HPTE_V_HVLOCK)
1038 /* Now check and modify the HPTE */
1039 if ((hptep[0] & HPTE_V_VALID) && (hptep[1] & HPTE_R_C)) {
1040 /* need to make it temporarily absent to clear C */
1041 hptep[0] |= HPTE_V_ABSENT;
1042 kvmppc_invalidate_hpte(kvm, hptep, i);
1043 hptep[1] &= ~HPTE_R_C;
1045 hptep[0] = (hptep[0] & ~HPTE_V_ABSENT) | HPTE_V_VALID;
1046 rev[i].guest_rpte |= HPTE_R_C;
1049 hptep[0] &= ~HPTE_V_HVLOCK;
1050 } while ((i = j) != head);
1056 long kvmppc_hv_get_dirty_log(struct kvm *kvm, struct kvm_memory_slot *memslot,
1060 unsigned long *rmapp;
1063 rmapp = memslot->arch.rmap;
1064 for (i = 0; i < memslot->npages; ++i) {
1065 if (kvm_test_clear_dirty(kvm, rmapp) && map)
1066 __set_bit_le(i, map);
1073 void *kvmppc_pin_guest_page(struct kvm *kvm, unsigned long gpa,
1074 unsigned long *nb_ret)
1076 struct kvm_memory_slot *memslot;
1077 unsigned long gfn = gpa >> PAGE_SHIFT;
1078 struct page *page, *pages[1];
1080 unsigned long hva, psize, offset;
1082 unsigned long *physp;
1085 srcu_idx = srcu_read_lock(&kvm->srcu);
1086 memslot = gfn_to_memslot(kvm, gfn);
1087 if (!memslot || (memslot->flags & KVM_MEMSLOT_INVALID))
1089 if (!kvm->arch.using_mmu_notifiers) {
1090 physp = memslot->arch.slot_phys;
1093 physp += gfn - memslot->base_gfn;
1096 if (kvmppc_get_guest_page(kvm, gfn, memslot,
1101 page = pfn_to_page(pa >> PAGE_SHIFT);
1104 hva = gfn_to_hva_memslot(memslot, gfn);
1105 npages = get_user_pages_fast(hva, 1, 1, pages);
1110 srcu_read_unlock(&kvm->srcu, srcu_idx);
1113 if (PageHuge(page)) {
1114 page = compound_head(page);
1115 psize <<= compound_order(page);
1117 offset = gpa & (psize - 1);
1119 *nb_ret = psize - offset;
1120 return page_address(page) + offset;
1123 srcu_read_unlock(&kvm->srcu, srcu_idx);
1127 void kvmppc_unpin_guest_page(struct kvm *kvm, void *va)
1129 struct page *page = virt_to_page(va);
1134 void kvmppc_mmu_book3s_hv_init(struct kvm_vcpu *vcpu)
1136 struct kvmppc_mmu *mmu = &vcpu->arch.mmu;
1138 if (cpu_has_feature(CPU_FTR_ARCH_206))
1139 vcpu->arch.slb_nr = 32; /* POWER7 */
1141 vcpu->arch.slb_nr = 64;
1143 mmu->xlate = kvmppc_mmu_book3s_64_hv_xlate;
1144 mmu->reset_msr = kvmppc_mmu_book3s_64_hv_reset_msr;
1146 vcpu->arch.hflags |= BOOK3S_HFLAG_SLB;