2 * This program is free software; you can redistribute it and/or modify
3 * it under the terms of the GNU General Public License, version 2, as
4 * published by the Free Software Foundation.
6 * This program is distributed in the hope that it will be useful,
7 * but WITHOUT ANY WARRANTY; without even the implied warranty of
8 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
9 * GNU General Public License for more details.
11 * You should have received a copy of the GNU General Public License
12 * along with this program; if not, write to the Free Software
13 * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
15 * Copyright 2010 Paul Mackerras, IBM Corp. <paulus@au1.ibm.com>
18 #include <linux/types.h>
19 #include <linux/string.h>
20 #include <linux/kvm.h>
21 #include <linux/kvm_host.h>
22 #include <linux/highmem.h>
23 #include <linux/gfp.h>
24 #include <linux/slab.h>
25 #include <linux/hugetlb.h>
26 #include <linux/vmalloc.h>
28 #include <asm/tlbflush.h>
29 #include <asm/kvm_ppc.h>
30 #include <asm/kvm_book3s.h>
31 #include <asm/mmu-hash64.h>
32 #include <asm/hvcall.h>
33 #include <asm/synch.h>
34 #include <asm/ppc-opcode.h>
35 #include <asm/cputable.h>
37 /* POWER7 has 10-bit LPIDs, PPC970 has 6-bit LPIDs */
38 #define MAX_LPID_970 63
40 long kvmppc_alloc_hpt(struct kvm *kvm)
44 struct revmap_entry *rev;
45 struct kvmppc_linear_info *li;
47 /* Allocate guest's hashed page table */
50 /* using preallocated memory */
51 hpt = (ulong)li->base_virt;
52 kvm->arch.hpt_li = li;
54 /* using dynamic memory */
55 hpt = __get_free_pages(GFP_KERNEL|__GFP_ZERO|__GFP_REPEAT|
56 __GFP_NOWARN, HPT_ORDER - PAGE_SHIFT);
60 pr_err("kvm_alloc_hpt: Couldn't alloc HPT\n");
63 kvm->arch.hpt_virt = hpt;
65 /* Allocate reverse map array */
66 rev = vmalloc(sizeof(struct revmap_entry) * HPT_NPTE);
68 pr_err("kvmppc_alloc_hpt: Couldn't alloc reverse map array\n");
71 kvm->arch.revmap = rev;
73 lpid = kvmppc_alloc_lpid();
77 kvm->arch.sdr1 = __pa(hpt) | (HPT_ORDER - 18);
78 kvm->arch.lpid = lpid;
80 pr_info("KVM guest htab at %lx, LPID %lx\n", hpt, lpid);
86 free_pages(hpt, HPT_ORDER - PAGE_SHIFT);
90 void kvmppc_free_hpt(struct kvm *kvm)
92 kvmppc_free_lpid(kvm->arch.lpid);
93 vfree(kvm->arch.revmap);
95 kvm_release_hpt(kvm->arch.hpt_li);
97 free_pages(kvm->arch.hpt_virt, HPT_ORDER - PAGE_SHIFT);
100 /* Bits in first HPTE dword for pagesize 4k, 64k or 16M */
101 static inline unsigned long hpte0_pgsize_encoding(unsigned long pgsize)
103 return (pgsize > 0x1000) ? HPTE_V_LARGE : 0;
106 /* Bits in second HPTE dword for pagesize 4k, 64k or 16M */
107 static inline unsigned long hpte1_pgsize_encoding(unsigned long pgsize)
109 return (pgsize == 0x10000) ? 0x1000 : 0;
112 void kvmppc_map_vrma(struct kvm_vcpu *vcpu, struct kvm_memory_slot *memslot,
113 unsigned long porder)
116 unsigned long npages;
117 unsigned long hp_v, hp_r;
118 unsigned long addr, hash;
120 unsigned long hp0, hp1;
123 psize = 1ul << porder;
124 npages = memslot->npages >> (porder - PAGE_SHIFT);
126 /* VRMA can't be > 1TB */
127 if (npages > 1ul << (40 - porder))
128 npages = 1ul << (40 - porder);
129 /* Can't use more than 1 HPTE per HPTEG */
130 if (npages > HPT_NPTEG)
133 hp0 = HPTE_V_1TB_SEG | (VRMA_VSID << (40 - 16)) |
134 HPTE_V_BOLTED | hpte0_pgsize_encoding(psize);
135 hp1 = hpte1_pgsize_encoding(psize) |
136 HPTE_R_R | HPTE_R_C | HPTE_R_M | PP_RWXX;
138 for (i = 0; i < npages; ++i) {
140 /* can't use hpt_hash since va > 64 bits */
141 hash = (i ^ (VRMA_VSID ^ (VRMA_VSID << 25))) & HPT_HASH_MASK;
143 * We assume that the hash table is empty and no
144 * vcpus are using it at this stage. Since we create
145 * at most one HPTE per HPTEG, we just assume entry 7
146 * is available and use it.
148 hash = (hash << 3) + 7;
149 hp_v = hp0 | ((addr >> 16) & ~0x7fUL);
151 ret = kvmppc_virtmode_h_enter(vcpu, H_EXACT, hash, hp_v, hp_r);
152 if (ret != H_SUCCESS) {
153 pr_err("KVM: map_vrma at %lx failed, ret=%ld\n",
160 int kvmppc_mmu_hv_init(void)
162 unsigned long host_lpid, rsvd_lpid;
164 if (!cpu_has_feature(CPU_FTR_HVMODE))
167 /* POWER7 has 10-bit LPIDs, PPC970 and e500mc have 6-bit LPIDs */
168 if (cpu_has_feature(CPU_FTR_ARCH_206)) {
169 host_lpid = mfspr(SPRN_LPID); /* POWER7 */
170 rsvd_lpid = LPID_RSVD;
172 host_lpid = 0; /* PPC970 */
173 rsvd_lpid = MAX_LPID_970;
176 kvmppc_init_lpid(rsvd_lpid + 1);
178 kvmppc_claim_lpid(host_lpid);
179 /* rsvd_lpid is reserved for use in partition switching */
180 kvmppc_claim_lpid(rsvd_lpid);
185 void kvmppc_mmu_destroy(struct kvm_vcpu *vcpu)
189 static void kvmppc_mmu_book3s_64_hv_reset_msr(struct kvm_vcpu *vcpu)
191 kvmppc_set_msr(vcpu, MSR_SF | MSR_ME);
195 * This is called to get a reference to a guest page if there isn't
196 * one already in the kvm->arch.slot_phys[][] arrays.
198 static long kvmppc_get_guest_page(struct kvm *kvm, unsigned long gfn,
199 struct kvm_memory_slot *memslot,
204 struct page *page, *hpage, *pages[1];
205 unsigned long s, pgsize;
206 unsigned long *physp;
207 unsigned int is_io, got, pgorder;
208 struct vm_area_struct *vma;
209 unsigned long pfn, i, npages;
211 physp = kvm->arch.slot_phys[memslot->id];
214 if (physp[gfn - memslot->base_gfn])
222 start = gfn_to_hva_memslot(memslot, gfn);
224 /* Instantiate and get the page we want access to */
225 np = get_user_pages_fast(start, 1, 1, pages);
227 /* Look up the vma for the page */
228 down_read(¤t->mm->mmap_sem);
229 vma = find_vma(current->mm, start);
230 if (!vma || vma->vm_start > start ||
231 start + psize > vma->vm_end ||
232 !(vma->vm_flags & VM_PFNMAP))
234 is_io = hpte_cache_bits(pgprot_val(vma->vm_page_prot));
235 pfn = vma->vm_pgoff + ((start - vma->vm_start) >> PAGE_SHIFT);
236 /* check alignment of pfn vs. requested page size */
237 if (psize > PAGE_SIZE && (pfn & ((psize >> PAGE_SHIFT) - 1)))
239 up_read(¤t->mm->mmap_sem);
243 got = KVMPPC_GOT_PAGE;
245 /* See if this is a large page */
247 if (PageHuge(page)) {
248 hpage = compound_head(page);
249 s <<= compound_order(hpage);
250 /* Get the whole large page if slot alignment is ok */
251 if (s > psize && slot_is_aligned(memslot, s) &&
252 !(memslot->userspace_addr & (s - 1))) {
262 pfn = page_to_pfn(page);
265 npages = pgsize >> PAGE_SHIFT;
266 pgorder = __ilog2(npages);
267 physp += (gfn - memslot->base_gfn) & ~(npages - 1);
268 spin_lock(&kvm->arch.slot_phys_lock);
269 for (i = 0; i < npages; ++i) {
271 physp[i] = ((pfn + i) << PAGE_SHIFT) +
272 got + is_io + pgorder;
276 spin_unlock(&kvm->arch.slot_phys_lock);
285 up_read(¤t->mm->mmap_sem);
290 * We come here on a H_ENTER call from the guest when we are not
291 * using mmu notifiers and we don't have the requested page pinned
294 long kvmppc_virtmode_h_enter(struct kvm_vcpu *vcpu, unsigned long flags,
295 long pte_index, unsigned long pteh, unsigned long ptel)
297 struct kvm *kvm = vcpu->kvm;
298 unsigned long psize, gpa, gfn;
299 struct kvm_memory_slot *memslot;
302 if (kvm->arch.using_mmu_notifiers)
305 psize = hpte_page_size(pteh, ptel);
309 pteh &= ~(HPTE_V_HVLOCK | HPTE_V_ABSENT | HPTE_V_VALID);
311 /* Find the memslot (if any) for this address */
312 gpa = (ptel & HPTE_R_RPN) & ~(psize - 1);
313 gfn = gpa >> PAGE_SHIFT;
314 memslot = gfn_to_memslot(kvm, gfn);
315 if (memslot && !(memslot->flags & KVM_MEMSLOT_INVALID)) {
316 if (!slot_is_aligned(memslot, psize))
318 if (kvmppc_get_guest_page(kvm, gfn, memslot, psize) < 0)
323 /* Protect linux PTE lookup from page table destruction */
324 rcu_read_lock_sched(); /* this disables preemption too */
325 vcpu->arch.pgdir = current->mm->pgd;
326 ret = kvmppc_h_enter(vcpu, flags, pte_index, pteh, ptel);
327 rcu_read_unlock_sched();
328 if (ret == H_TOO_HARD) {
329 /* this can't happen */
330 pr_err("KVM: Oops, kvmppc_h_enter returned too hard!\n");
331 ret = H_RESOURCE; /* or something */
337 static struct kvmppc_slb *kvmppc_mmu_book3s_hv_find_slbe(struct kvm_vcpu *vcpu,
343 for (i = 0; i < vcpu->arch.slb_nr; i++) {
344 if (!(vcpu->arch.slb[i].orige & SLB_ESID_V))
347 if (vcpu->arch.slb[i].origv & SLB_VSID_B_1T)
352 if (((vcpu->arch.slb[i].orige ^ eaddr) & mask) == 0)
353 return &vcpu->arch.slb[i];
358 static unsigned long kvmppc_mmu_get_real_addr(unsigned long v, unsigned long r,
361 unsigned long ra_mask;
363 ra_mask = hpte_page_size(v, r) - 1;
364 return (r & HPTE_R_RPN & ~ra_mask) | (ea & ra_mask);
367 static int kvmppc_mmu_book3s_64_hv_xlate(struct kvm_vcpu *vcpu, gva_t eaddr,
368 struct kvmppc_pte *gpte, bool data)
370 struct kvm *kvm = vcpu->kvm;
371 struct kvmppc_slb *slbe;
373 unsigned long pp, key;
375 unsigned long *hptep;
377 int virtmode = vcpu->arch.shregs.msr & (data ? MSR_DR : MSR_IR);
381 slbe = kvmppc_mmu_book3s_hv_find_slbe(vcpu, eaddr);
386 /* real mode access */
387 slb_v = vcpu->kvm->arch.vrma_slb_v;
390 /* Find the HPTE in the hash table */
391 index = kvmppc_hv_find_lock_hpte(kvm, eaddr, slb_v,
392 HPTE_V_VALID | HPTE_V_ABSENT);
395 hptep = (unsigned long *)(kvm->arch.hpt_virt + (index << 4));
396 v = hptep[0] & ~HPTE_V_HVLOCK;
397 gr = kvm->arch.revmap[index].guest_rpte;
399 /* Unlock the HPTE */
400 asm volatile("lwsync" : : : "memory");
404 gpte->vpage = ((v & HPTE_V_AVPN) << 4) | ((eaddr >> 12) & 0xfff);
406 /* Get PP bits and key for permission check */
407 pp = gr & (HPTE_R_PP0 | HPTE_R_PP);
408 key = (vcpu->arch.shregs.msr & MSR_PR) ? SLB_VSID_KP : SLB_VSID_KS;
411 /* Calculate permissions */
412 gpte->may_read = hpte_read_permission(pp, key);
413 gpte->may_write = hpte_write_permission(pp, key);
414 gpte->may_execute = gpte->may_read && !(gr & (HPTE_R_N | HPTE_R_G));
416 /* Storage key permission check for POWER7 */
417 if (data && virtmode && cpu_has_feature(CPU_FTR_ARCH_206)) {
418 int amrfield = hpte_get_skey_perm(gr, vcpu->arch.amr);
425 /* Get the guest physical address */
426 gpte->raddr = kvmppc_mmu_get_real_addr(v, gr, eaddr);
431 * Quick test for whether an instruction is a load or a store.
432 * If the instruction is a load or a store, then this will indicate
433 * which it is, at least on server processors. (Embedded processors
434 * have some external PID instructions that don't follow the rule
435 * embodied here.) If the instruction isn't a load or store, then
436 * this doesn't return anything useful.
438 static int instruction_is_store(unsigned int instr)
443 if ((instr & 0xfc000000) == 0x7c000000)
444 mask = 0x100; /* major opcode 31 */
445 return (instr & mask) != 0;
448 static int kvmppc_hv_emulate_mmio(struct kvm_run *run, struct kvm_vcpu *vcpu,
449 unsigned long gpa, gva_t ea, int is_store)
453 unsigned long srr0 = kvmppc_get_pc(vcpu);
455 /* We try to load the last instruction. We don't let
456 * emulate_instruction do it as it doesn't check what
458 * If we fail, we just return to the guest and try executing it again.
460 if (vcpu->arch.last_inst == KVM_INST_FETCH_FAILED) {
461 ret = kvmppc_ld(vcpu, &srr0, sizeof(u32), &last_inst, false);
462 if (ret != EMULATE_DONE || last_inst == KVM_INST_FETCH_FAILED)
464 vcpu->arch.last_inst = last_inst;
468 * WARNING: We do not know for sure whether the instruction we just
469 * read from memory is the same that caused the fault in the first
470 * place. If the instruction we read is neither an load or a store,
471 * then it can't access memory, so we don't need to worry about
472 * enforcing access permissions. So, assuming it is a load or
473 * store, we just check that its direction (load or store) is
474 * consistent with the original fault, since that's what we
475 * checked the access permissions against. If there is a mismatch
476 * we just return and retry the instruction.
479 if (instruction_is_store(vcpu->arch.last_inst) != !!is_store)
483 * Emulated accesses are emulated by looking at the hash for
484 * translation once, then performing the access later. The
485 * translation could be invalidated in the meantime in which
486 * point performing the subsequent memory access on the old
487 * physical address could possibly be a security hole for the
488 * guest (but not the host).
490 * This is less of an issue for MMIO stores since they aren't
491 * globally visible. It could be an issue for MMIO loads to
492 * a certain extent but we'll ignore it for now.
495 vcpu->arch.paddr_accessed = gpa;
496 vcpu->arch.vaddr_accessed = ea;
497 return kvmppc_emulate_mmio(run, vcpu);
500 int kvmppc_book3s_hv_page_fault(struct kvm_run *run, struct kvm_vcpu *vcpu,
501 unsigned long ea, unsigned long dsisr)
503 struct kvm *kvm = vcpu->kvm;
504 unsigned long *hptep, hpte[3], r;
505 unsigned long mmu_seq, psize, pte_size;
506 unsigned long gfn, hva, pfn;
507 struct kvm_memory_slot *memslot;
509 struct revmap_entry *rev;
510 struct page *page, *pages[1];
511 long index, ret, npages;
513 unsigned int writing, write_ok;
514 struct vm_area_struct *vma;
515 unsigned long rcbits;
518 * Real-mode code has already searched the HPT and found the
519 * entry we're interested in. Lock the entry and check that
520 * it hasn't changed. If it has, just return and re-execute the
523 if (ea != vcpu->arch.pgfault_addr)
525 index = vcpu->arch.pgfault_index;
526 hptep = (unsigned long *)(kvm->arch.hpt_virt + (index << 4));
527 rev = &kvm->arch.revmap[index];
529 while (!try_lock_hpte(hptep, HPTE_V_HVLOCK))
531 hpte[0] = hptep[0] & ~HPTE_V_HVLOCK;
533 hpte[2] = r = rev->guest_rpte;
534 asm volatile("lwsync" : : : "memory");
538 if (hpte[0] != vcpu->arch.pgfault_hpte[0] ||
539 hpte[1] != vcpu->arch.pgfault_hpte[1])
542 /* Translate the logical address and get the page */
543 psize = hpte_page_size(hpte[0], r);
544 gfn = hpte_rpn(r, psize);
545 memslot = gfn_to_memslot(kvm, gfn);
547 /* No memslot means it's an emulated MMIO region */
548 if (!memslot || (memslot->flags & KVM_MEMSLOT_INVALID)) {
549 unsigned long gpa = (gfn << PAGE_SHIFT) | (ea & (psize - 1));
550 return kvmppc_hv_emulate_mmio(run, vcpu, gpa, ea,
551 dsisr & DSISR_ISSTORE);
554 if (!kvm->arch.using_mmu_notifiers)
555 return -EFAULT; /* should never get here */
557 /* used to check for invalidations in progress */
558 mmu_seq = kvm->mmu_notifier_seq;
564 pte_size = PAGE_SIZE;
565 writing = (dsisr & DSISR_ISSTORE) != 0;
566 /* If writing != 0, then the HPTE must allow writing, if we get here */
568 hva = gfn_to_hva_memslot(memslot, gfn);
569 npages = get_user_pages_fast(hva, 1, writing, pages);
571 /* Check if it's an I/O mapping */
572 down_read(¤t->mm->mmap_sem);
573 vma = find_vma(current->mm, hva);
574 if (vma && vma->vm_start <= hva && hva + psize <= vma->vm_end &&
575 (vma->vm_flags & VM_PFNMAP)) {
576 pfn = vma->vm_pgoff +
577 ((hva - vma->vm_start) >> PAGE_SHIFT);
579 is_io = hpte_cache_bits(pgprot_val(vma->vm_page_prot));
580 write_ok = vma->vm_flags & VM_WRITE;
582 up_read(¤t->mm->mmap_sem);
587 if (PageHuge(page)) {
588 page = compound_head(page);
589 pte_size <<= compound_order(page);
591 /* if the guest wants write access, see if that is OK */
592 if (!writing && hpte_is_writable(r)) {
596 * We need to protect against page table destruction
597 * while looking up and updating the pte.
599 rcu_read_lock_sched();
600 ptep = find_linux_pte_or_hugepte(current->mm->pgd,
602 if (ptep && pte_present(*ptep)) {
603 pte = kvmppc_read_update_linux_pte(ptep, 1);
607 rcu_read_unlock_sched();
609 pfn = page_to_pfn(page);
613 if (psize > pte_size)
616 /* Check WIMG vs. the actual page we're accessing */
617 if (!hpte_cache_flags_ok(r, is_io)) {
621 * Allow guest to map emulated device memory as
622 * uncacheable, but actually make it cacheable.
624 r = (r & ~(HPTE_R_W|HPTE_R_I|HPTE_R_G)) | HPTE_R_M;
627 /* Set the HPTE to point to pfn */
628 r = (r & ~(HPTE_R_PP0 - pte_size)) | (pfn << PAGE_SHIFT);
629 if (hpte_is_writable(r) && !write_ok)
630 r = hpte_make_readonly(r);
633 while (!try_lock_hpte(hptep, HPTE_V_HVLOCK))
635 if ((hptep[0] & ~HPTE_V_HVLOCK) != hpte[0] || hptep[1] != hpte[1] ||
636 rev->guest_rpte != hpte[2])
637 /* HPTE has been changed under us; let the guest retry */
639 hpte[0] = (hpte[0] & ~HPTE_V_ABSENT) | HPTE_V_VALID;
641 rmap = &memslot->rmap[gfn - memslot->base_gfn];
644 /* Check if we might have been invalidated; let the guest retry if so */
646 if (mmu_notifier_retry(vcpu, mmu_seq)) {
651 /* Only set R/C in real HPTE if set in both *rmap and guest_rpte */
652 rcbits = *rmap >> KVMPPC_RMAP_RC_SHIFT;
653 r &= rcbits | ~(HPTE_R_R | HPTE_R_C);
655 if (hptep[0] & HPTE_V_VALID) {
656 /* HPTE was previously valid, so we need to invalidate it */
658 hptep[0] |= HPTE_V_ABSENT;
659 kvmppc_invalidate_hpte(kvm, hptep, index);
660 /* don't lose previous R and C bits */
661 r |= hptep[1] & (HPTE_R_R | HPTE_R_C);
663 kvmppc_add_revmap_chain(kvm, rev, rmap, index, 0);
669 asm volatile("ptesync" : : : "memory");
671 if (page && hpte_is_writable(r))
677 * We drop pages[0] here, not page because page might
678 * have been set to the head page of a compound, but
679 * we have to drop the reference on the correct tail
680 * page to match the get inside gup()
687 hptep[0] &= ~HPTE_V_HVLOCK;
692 static int kvm_handle_hva(struct kvm *kvm, unsigned long hva,
693 int (*handler)(struct kvm *kvm, unsigned long *rmapp,
698 struct kvm_memslots *slots;
699 struct kvm_memory_slot *memslot;
701 slots = kvm_memslots(kvm);
702 kvm_for_each_memslot(memslot, slots) {
703 unsigned long start = memslot->userspace_addr;
706 end = start + (memslot->npages << PAGE_SHIFT);
707 if (hva >= start && hva < end) {
708 gfn_t gfn_offset = (hva - start) >> PAGE_SHIFT;
710 ret = handler(kvm, &memslot->rmap[gfn_offset],
711 memslot->base_gfn + gfn_offset);
719 static int kvm_unmap_rmapp(struct kvm *kvm, unsigned long *rmapp,
722 struct revmap_entry *rev = kvm->arch.revmap;
723 unsigned long h, i, j;
724 unsigned long *hptep;
725 unsigned long ptel, psize, rcbits;
729 if (!(*rmapp & KVMPPC_RMAP_PRESENT)) {
735 * To avoid an ABBA deadlock with the HPTE lock bit,
736 * we can't spin on the HPTE lock while holding the
739 i = *rmapp & KVMPPC_RMAP_INDEX;
740 hptep = (unsigned long *) (kvm->arch.hpt_virt + (i << 4));
741 if (!try_lock_hpte(hptep, HPTE_V_HVLOCK)) {
742 /* unlock rmap before spinning on the HPTE lock */
744 while (hptep[0] & HPTE_V_HVLOCK)
750 /* chain is now empty */
751 *rmapp &= ~(KVMPPC_RMAP_PRESENT | KVMPPC_RMAP_INDEX);
753 /* remove i from chain */
757 rev[i].forw = rev[i].back = i;
758 *rmapp = (*rmapp & ~KVMPPC_RMAP_INDEX) | j;
761 /* Now check and modify the HPTE */
762 ptel = rev[i].guest_rpte;
763 psize = hpte_page_size(hptep[0], ptel);
764 if ((hptep[0] & HPTE_V_VALID) &&
765 hpte_rpn(ptel, psize) == gfn) {
766 hptep[0] |= HPTE_V_ABSENT;
767 kvmppc_invalidate_hpte(kvm, hptep, i);
768 /* Harvest R and C */
769 rcbits = hptep[1] & (HPTE_R_R | HPTE_R_C);
770 *rmapp |= rcbits << KVMPPC_RMAP_RC_SHIFT;
771 rev[i].guest_rpte = ptel | rcbits;
774 hptep[0] &= ~HPTE_V_HVLOCK;
779 int kvm_unmap_hva(struct kvm *kvm, unsigned long hva)
781 if (kvm->arch.using_mmu_notifiers)
782 kvm_handle_hva(kvm, hva, kvm_unmap_rmapp);
786 static int kvm_age_rmapp(struct kvm *kvm, unsigned long *rmapp,
789 struct revmap_entry *rev = kvm->arch.revmap;
790 unsigned long head, i, j;
791 unsigned long *hptep;
796 if (*rmapp & KVMPPC_RMAP_REFERENCED) {
797 *rmapp &= ~KVMPPC_RMAP_REFERENCED;
800 if (!(*rmapp & KVMPPC_RMAP_PRESENT)) {
805 i = head = *rmapp & KVMPPC_RMAP_INDEX;
807 hptep = (unsigned long *) (kvm->arch.hpt_virt + (i << 4));
810 /* If this HPTE isn't referenced, ignore it */
811 if (!(hptep[1] & HPTE_R_R))
814 if (!try_lock_hpte(hptep, HPTE_V_HVLOCK)) {
815 /* unlock rmap before spinning on the HPTE lock */
817 while (hptep[0] & HPTE_V_HVLOCK)
822 /* Now check and modify the HPTE */
823 if ((hptep[0] & HPTE_V_VALID) && (hptep[1] & HPTE_R_R)) {
824 kvmppc_clear_ref_hpte(kvm, hptep, i);
825 rev[i].guest_rpte |= HPTE_R_R;
828 hptep[0] &= ~HPTE_V_HVLOCK;
829 } while ((i = j) != head);
835 int kvm_age_hva(struct kvm *kvm, unsigned long hva)
837 if (!kvm->arch.using_mmu_notifiers)
839 return kvm_handle_hva(kvm, hva, kvm_age_rmapp);
842 static int kvm_test_age_rmapp(struct kvm *kvm, unsigned long *rmapp,
845 struct revmap_entry *rev = kvm->arch.revmap;
846 unsigned long head, i, j;
850 if (*rmapp & KVMPPC_RMAP_REFERENCED)
854 if (*rmapp & KVMPPC_RMAP_REFERENCED)
857 if (*rmapp & KVMPPC_RMAP_PRESENT) {
858 i = head = *rmapp & KVMPPC_RMAP_INDEX;
860 hp = (unsigned long *)(kvm->arch.hpt_virt + (i << 4));
862 if (hp[1] & HPTE_R_R)
864 } while ((i = j) != head);
873 int kvm_test_age_hva(struct kvm *kvm, unsigned long hva)
875 if (!kvm->arch.using_mmu_notifiers)
877 return kvm_handle_hva(kvm, hva, kvm_test_age_rmapp);
880 void kvm_set_spte_hva(struct kvm *kvm, unsigned long hva, pte_t pte)
882 if (!kvm->arch.using_mmu_notifiers)
884 kvm_handle_hva(kvm, hva, kvm_unmap_rmapp);
887 static int kvm_test_clear_dirty(struct kvm *kvm, unsigned long *rmapp)
889 struct revmap_entry *rev = kvm->arch.revmap;
890 unsigned long head, i, j;
891 unsigned long *hptep;
896 if (*rmapp & KVMPPC_RMAP_CHANGED) {
897 *rmapp &= ~KVMPPC_RMAP_CHANGED;
900 if (!(*rmapp & KVMPPC_RMAP_PRESENT)) {
905 i = head = *rmapp & KVMPPC_RMAP_INDEX;
907 hptep = (unsigned long *) (kvm->arch.hpt_virt + (i << 4));
910 if (!(hptep[1] & HPTE_R_C))
913 if (!try_lock_hpte(hptep, HPTE_V_HVLOCK)) {
914 /* unlock rmap before spinning on the HPTE lock */
916 while (hptep[0] & HPTE_V_HVLOCK)
921 /* Now check and modify the HPTE */
922 if ((hptep[0] & HPTE_V_VALID) && (hptep[1] & HPTE_R_C)) {
923 /* need to make it temporarily absent to clear C */
924 hptep[0] |= HPTE_V_ABSENT;
925 kvmppc_invalidate_hpte(kvm, hptep, i);
926 hptep[1] &= ~HPTE_R_C;
928 hptep[0] = (hptep[0] & ~HPTE_V_ABSENT) | HPTE_V_VALID;
929 rev[i].guest_rpte |= HPTE_R_C;
932 hptep[0] &= ~HPTE_V_HVLOCK;
933 } while ((i = j) != head);
939 long kvmppc_hv_get_dirty_log(struct kvm *kvm, struct kvm_memory_slot *memslot)
942 unsigned long *rmapp, *map;
945 rmapp = memslot->rmap;
946 map = memslot->dirty_bitmap;
947 for (i = 0; i < memslot->npages; ++i) {
948 if (kvm_test_clear_dirty(kvm, rmapp))
949 __set_bit_le(i, map);
956 void *kvmppc_pin_guest_page(struct kvm *kvm, unsigned long gpa,
957 unsigned long *nb_ret)
959 struct kvm_memory_slot *memslot;
960 unsigned long gfn = gpa >> PAGE_SHIFT;
961 struct page *page, *pages[1];
963 unsigned long hva, psize, offset;
965 unsigned long *physp;
967 memslot = gfn_to_memslot(kvm, gfn);
968 if (!memslot || (memslot->flags & KVM_MEMSLOT_INVALID))
970 if (!kvm->arch.using_mmu_notifiers) {
971 physp = kvm->arch.slot_phys[memslot->id];
974 physp += gfn - memslot->base_gfn;
977 if (kvmppc_get_guest_page(kvm, gfn, memslot,
982 page = pfn_to_page(pa >> PAGE_SHIFT);
985 hva = gfn_to_hva_memslot(memslot, gfn);
986 npages = get_user_pages_fast(hva, 1, 1, pages);
992 if (PageHuge(page)) {
993 page = compound_head(page);
994 psize <<= compound_order(page);
996 offset = gpa & (psize - 1);
998 *nb_ret = psize - offset;
999 return page_address(page) + offset;
1002 void kvmppc_unpin_guest_page(struct kvm *kvm, void *va)
1004 struct page *page = virt_to_page(va);
1009 void kvmppc_mmu_book3s_hv_init(struct kvm_vcpu *vcpu)
1011 struct kvmppc_mmu *mmu = &vcpu->arch.mmu;
1013 if (cpu_has_feature(CPU_FTR_ARCH_206))
1014 vcpu->arch.slb_nr = 32; /* POWER7 */
1016 vcpu->arch.slb_nr = 64;
1018 mmu->xlate = kvmppc_mmu_book3s_64_hv_xlate;
1019 mmu->reset_msr = kvmppc_mmu_book3s_64_hv_reset_msr;
1021 vcpu->arch.hflags |= BOOK3S_HFLAG_SLB;