2 * Copyright IBM Corp. 2007, 2011
3 * Author(s): Martin Schwidefsky <schwidefsky@de.ibm.com>
6 #include <linux/sched.h>
7 #include <linux/kernel.h>
8 #include <linux/errno.h>
11 #include <linux/swap.h>
12 #include <linux/smp.h>
13 #include <linux/spinlock.h>
14 #include <linux/rcupdate.h>
15 #include <linux/slab.h>
16 #include <linux/swapops.h>
17 #include <linux/sysctl.h>
18 #include <linux/ksm.h>
19 #include <linux/mman.h>
21 #include <asm/pgtable.h>
22 #include <asm/pgalloc.h>
24 #include <asm/tlbflush.h>
25 #include <asm/mmu_context.h>
27 unsigned long *crst_table_alloc(struct mm_struct *mm)
29 struct page *page = alloc_pages(GFP_KERNEL, 2);
33 return (unsigned long *) page_to_phys(page);
36 void crst_table_free(struct mm_struct *mm, unsigned long *table)
38 free_pages((unsigned long) table, 2);
41 static void __crst_table_upgrade(void *arg)
43 struct mm_struct *mm = arg;
45 if (current->active_mm == mm) {
52 int crst_table_upgrade(struct mm_struct *mm)
54 unsigned long *table, *pgd;
56 /* upgrade should only happen from 3 to 4 levels */
57 BUG_ON(mm->context.asce_limit != (1UL << 42));
59 table = crst_table_alloc(mm);
63 spin_lock_bh(&mm->page_table_lock);
64 pgd = (unsigned long *) mm->pgd;
65 crst_table_init(table, _REGION2_ENTRY_EMPTY);
66 pgd_populate(mm, (pgd_t *) table, (pud_t *) pgd);
67 mm->pgd = (pgd_t *) table;
68 mm->context.asce_limit = 1UL << 53;
69 mm->context.asce = __pa(mm->pgd) | _ASCE_TABLE_LENGTH |
70 _ASCE_USER_BITS | _ASCE_TYPE_REGION2;
71 mm->task_size = mm->context.asce_limit;
72 spin_unlock_bh(&mm->page_table_lock);
74 on_each_cpu(__crst_table_upgrade, mm, 0);
78 void crst_table_downgrade(struct mm_struct *mm)
82 /* downgrade should only happen from 3 to 2 levels (compat only) */
83 BUG_ON(mm->context.asce_limit != (1UL << 42));
85 if (current->active_mm == mm) {
91 mm->pgd = (pgd_t *) (pgd_val(*pgd) & _REGION_ENTRY_ORIGIN);
92 mm->context.asce_limit = 1UL << 31;
93 mm->context.asce = __pa(mm->pgd) | _ASCE_TABLE_LENGTH |
94 _ASCE_USER_BITS | _ASCE_TYPE_SEGMENT;
95 mm->task_size = mm->context.asce_limit;
96 crst_table_free(mm, (unsigned long *) pgd);
98 if (current->active_mm == mm)
105 * gmap_alloc - allocate a guest address space
106 * @mm: pointer to the parent mm_struct
107 * @limit: maximum size of the gmap address space
109 * Returns a guest address space structure.
111 struct gmap *gmap_alloc(struct mm_struct *mm, unsigned long limit)
115 unsigned long *table;
116 unsigned long etype, atype;
118 if (limit < (1UL << 31)) {
119 limit = (1UL << 31) - 1;
120 atype = _ASCE_TYPE_SEGMENT;
121 etype = _SEGMENT_ENTRY_EMPTY;
122 } else if (limit < (1UL << 42)) {
123 limit = (1UL << 42) - 1;
124 atype = _ASCE_TYPE_REGION3;
125 etype = _REGION3_ENTRY_EMPTY;
126 } else if (limit < (1UL << 53)) {
127 limit = (1UL << 53) - 1;
128 atype = _ASCE_TYPE_REGION2;
129 etype = _REGION2_ENTRY_EMPTY;
132 atype = _ASCE_TYPE_REGION1;
133 etype = _REGION1_ENTRY_EMPTY;
135 gmap = kzalloc(sizeof(struct gmap), GFP_KERNEL);
138 INIT_LIST_HEAD(&gmap->crst_list);
139 INIT_RADIX_TREE(&gmap->guest_to_host, GFP_KERNEL);
140 INIT_RADIX_TREE(&gmap->host_to_guest, GFP_ATOMIC);
141 spin_lock_init(&gmap->guest_table_lock);
143 page = alloc_pages(GFP_KERNEL, 2);
147 list_add(&page->lru, &gmap->crst_list);
148 table = (unsigned long *) page_to_phys(page);
149 crst_table_init(table, etype);
151 gmap->asce = atype | _ASCE_TABLE_LENGTH |
152 _ASCE_USER_BITS | __pa(table);
153 gmap->asce_end = limit;
154 down_write(&mm->mmap_sem);
155 list_add(&gmap->list, &mm->context.gmap_list);
156 up_write(&mm->mmap_sem);
164 EXPORT_SYMBOL_GPL(gmap_alloc);
166 static void gmap_flush_tlb(struct gmap *gmap)
168 if (MACHINE_HAS_IDTE)
169 __tlb_flush_asce(gmap->mm, gmap->asce);
171 __tlb_flush_global();
174 static void gmap_radix_tree_free(struct radix_tree_root *root)
176 struct radix_tree_iter iter;
177 unsigned long indices[16];
182 /* A radix tree is freed by deleting all of its entries */
186 radix_tree_for_each_slot(slot, root, &iter, index) {
187 indices[nr] = iter.index;
191 for (i = 0; i < nr; i++) {
193 radix_tree_delete(root, index);
199 * gmap_free - free a guest address space
200 * @gmap: pointer to the guest address space structure
202 void gmap_free(struct gmap *gmap)
204 struct page *page, *next;
207 if (MACHINE_HAS_IDTE)
208 __tlb_flush_asce(gmap->mm, gmap->asce);
210 __tlb_flush_global();
212 /* Free all segment & region tables. */
213 list_for_each_entry_safe(page, next, &gmap->crst_list, lru)
214 __free_pages(page, 2);
215 gmap_radix_tree_free(&gmap->guest_to_host);
216 gmap_radix_tree_free(&gmap->host_to_guest);
217 down_write(&gmap->mm->mmap_sem);
218 list_del(&gmap->list);
219 up_write(&gmap->mm->mmap_sem);
222 EXPORT_SYMBOL_GPL(gmap_free);
225 * gmap_enable - switch primary space to the guest address space
226 * @gmap: pointer to the guest address space structure
228 void gmap_enable(struct gmap *gmap)
230 S390_lowcore.gmap = (unsigned long) gmap;
232 EXPORT_SYMBOL_GPL(gmap_enable);
235 * gmap_disable - switch back to the standard primary address space
236 * @gmap: pointer to the guest address space structure
238 void gmap_disable(struct gmap *gmap)
240 S390_lowcore.gmap = 0UL;
242 EXPORT_SYMBOL_GPL(gmap_disable);
245 * gmap_alloc_table is assumed to be called with mmap_sem held
247 static int gmap_alloc_table(struct gmap *gmap, unsigned long *table,
248 unsigned long init, unsigned long gaddr)
253 /* since we dont free the gmap table until gmap_free we can unlock */
254 page = alloc_pages(GFP_KERNEL, 2);
257 new = (unsigned long *) page_to_phys(page);
258 crst_table_init(new, init);
259 spin_lock(&gmap->mm->page_table_lock);
260 if (*table & _REGION_ENTRY_INVALID) {
261 list_add(&page->lru, &gmap->crst_list);
262 *table = (unsigned long) new | _REGION_ENTRY_LENGTH |
263 (*table & _REGION_ENTRY_TYPE_MASK);
267 spin_unlock(&gmap->mm->page_table_lock);
269 __free_pages(page, 2);
274 * __gmap_segment_gaddr - find virtual address from segment pointer
275 * @entry: pointer to a segment table entry in the guest address space
277 * Returns the virtual address in the guest address space for the segment
279 static unsigned long __gmap_segment_gaddr(unsigned long *entry)
282 unsigned long offset, mask;
284 offset = (unsigned long) entry / sizeof(unsigned long);
285 offset = (offset & (PTRS_PER_PMD - 1)) * PMD_SIZE;
286 mask = ~(PTRS_PER_PMD * sizeof(pmd_t) - 1);
287 page = virt_to_page((void *)((unsigned long) entry & mask));
288 return page->index + offset;
292 * __gmap_unlink_by_vmaddr - unlink a single segment via a host address
293 * @gmap: pointer to the guest address space structure
294 * @vmaddr: address in the host process address space
296 * Returns 1 if a TLB flush is required
298 static int __gmap_unlink_by_vmaddr(struct gmap *gmap, unsigned long vmaddr)
300 unsigned long *entry;
303 spin_lock(&gmap->guest_table_lock);
304 entry = radix_tree_delete(&gmap->host_to_guest, vmaddr >> PMD_SHIFT);
306 flush = (*entry != _SEGMENT_ENTRY_INVALID);
307 *entry = _SEGMENT_ENTRY_INVALID;
309 spin_unlock(&gmap->guest_table_lock);
314 * __gmap_unmap_by_gaddr - unmap a single segment via a guest address
315 * @gmap: pointer to the guest address space structure
316 * @gaddr: address in the guest address space
318 * Returns 1 if a TLB flush is required
320 static int __gmap_unmap_by_gaddr(struct gmap *gmap, unsigned long gaddr)
322 unsigned long vmaddr;
324 vmaddr = (unsigned long) radix_tree_delete(&gmap->guest_to_host,
326 return vmaddr ? __gmap_unlink_by_vmaddr(gmap, vmaddr) : 0;
330 * gmap_unmap_segment - unmap segment from the guest address space
331 * @gmap: pointer to the guest address space structure
332 * @to: address in the guest address space
333 * @len: length of the memory area to unmap
335 * Returns 0 if the unmap succeeded, -EINVAL if not.
337 int gmap_unmap_segment(struct gmap *gmap, unsigned long to, unsigned long len)
342 if ((to | len) & (PMD_SIZE - 1))
344 if (len == 0 || to + len < to)
348 down_write(&gmap->mm->mmap_sem);
349 for (off = 0; off < len; off += PMD_SIZE)
350 flush |= __gmap_unmap_by_gaddr(gmap, to + off);
351 up_write(&gmap->mm->mmap_sem);
353 gmap_flush_tlb(gmap);
356 EXPORT_SYMBOL_GPL(gmap_unmap_segment);
359 * gmap_mmap_segment - map a segment to the guest address space
360 * @gmap: pointer to the guest address space structure
361 * @from: source address in the parent address space
362 * @to: target address in the guest address space
363 * @len: length of the memory area to map
365 * Returns 0 if the mmap succeeded, -EINVAL or -ENOMEM if not.
367 int gmap_map_segment(struct gmap *gmap, unsigned long from,
368 unsigned long to, unsigned long len)
373 if ((from | to | len) & (PMD_SIZE - 1))
375 if (len == 0 || from + len < from || to + len < to ||
376 from + len > TASK_MAX_SIZE || to + len > gmap->asce_end)
380 down_write(&gmap->mm->mmap_sem);
381 for (off = 0; off < len; off += PMD_SIZE) {
382 /* Remove old translation */
383 flush |= __gmap_unmap_by_gaddr(gmap, to + off);
384 /* Store new translation */
385 if (radix_tree_insert(&gmap->guest_to_host,
386 (to + off) >> PMD_SHIFT,
387 (void *) from + off))
390 up_write(&gmap->mm->mmap_sem);
392 gmap_flush_tlb(gmap);
395 gmap_unmap_segment(gmap, to, len);
398 EXPORT_SYMBOL_GPL(gmap_map_segment);
401 * __gmap_translate - translate a guest address to a user space address
402 * @gmap: pointer to guest mapping meta data structure
403 * @gaddr: guest address
405 * Returns user space address which corresponds to the guest address or
406 * -EFAULT if no such mapping exists.
407 * This function does not establish potentially missing page table entries.
408 * The mmap_sem of the mm that belongs to the address space must be held
409 * when this function gets called.
411 unsigned long __gmap_translate(struct gmap *gmap, unsigned long gaddr)
413 unsigned long vmaddr;
415 vmaddr = (unsigned long)
416 radix_tree_lookup(&gmap->guest_to_host, gaddr >> PMD_SHIFT);
417 return vmaddr ? (vmaddr | (gaddr & ~PMD_MASK)) : -EFAULT;
419 EXPORT_SYMBOL_GPL(__gmap_translate);
422 * gmap_translate - translate a guest address to a user space address
423 * @gmap: pointer to guest mapping meta data structure
424 * @gaddr: guest address
426 * Returns user space address which corresponds to the guest address or
427 * -EFAULT if no such mapping exists.
428 * This function does not establish potentially missing page table entries.
430 unsigned long gmap_translate(struct gmap *gmap, unsigned long gaddr)
434 down_read(&gmap->mm->mmap_sem);
435 rc = __gmap_translate(gmap, gaddr);
436 up_read(&gmap->mm->mmap_sem);
439 EXPORT_SYMBOL_GPL(gmap_translate);
442 * gmap_unlink - disconnect a page table from the gmap shadow tables
443 * @gmap: pointer to guest mapping meta data structure
444 * @table: pointer to the host page table
445 * @vmaddr: vm address associated with the host page table
447 static void gmap_unlink(struct mm_struct *mm, unsigned long *table,
448 unsigned long vmaddr)
453 list_for_each_entry(gmap, &mm->context.gmap_list, list) {
454 flush = __gmap_unlink_by_vmaddr(gmap, vmaddr);
456 gmap_flush_tlb(gmap);
461 * gmap_link - set up shadow page tables to connect a host to a guest address
462 * @gmap: pointer to guest mapping meta data structure
463 * @gaddr: guest address
464 * @vmaddr: vm address
466 * Returns 0 on success, -ENOMEM for out of memory conditions, and -EFAULT
467 * if the vm address is already mapped to a different guest segment.
468 * The mmap_sem of the mm that belongs to the address space must be held
469 * when this function gets called.
471 int __gmap_link(struct gmap *gmap, unsigned long gaddr, unsigned long vmaddr)
473 struct mm_struct *mm;
474 unsigned long *table;
481 /* Create higher level tables in the gmap page table */
483 if ((gmap->asce & _ASCE_TYPE_MASK) >= _ASCE_TYPE_REGION1) {
484 table += (gaddr >> 53) & 0x7ff;
485 if ((*table & _REGION_ENTRY_INVALID) &&
486 gmap_alloc_table(gmap, table, _REGION2_ENTRY_EMPTY,
487 gaddr & 0xffe0000000000000UL))
489 table = (unsigned long *)(*table & _REGION_ENTRY_ORIGIN);
491 if ((gmap->asce & _ASCE_TYPE_MASK) >= _ASCE_TYPE_REGION2) {
492 table += (gaddr >> 42) & 0x7ff;
493 if ((*table & _REGION_ENTRY_INVALID) &&
494 gmap_alloc_table(gmap, table, _REGION3_ENTRY_EMPTY,
495 gaddr & 0xfffffc0000000000UL))
497 table = (unsigned long *)(*table & _REGION_ENTRY_ORIGIN);
499 if ((gmap->asce & _ASCE_TYPE_MASK) >= _ASCE_TYPE_REGION3) {
500 table += (gaddr >> 31) & 0x7ff;
501 if ((*table & _REGION_ENTRY_INVALID) &&
502 gmap_alloc_table(gmap, table, _SEGMENT_ENTRY_EMPTY,
503 gaddr & 0xffffffff80000000UL))
505 table = (unsigned long *)(*table & _REGION_ENTRY_ORIGIN);
507 table += (gaddr >> 20) & 0x7ff;
508 /* Walk the parent mm page table */
510 pgd = pgd_offset(mm, vmaddr);
511 VM_BUG_ON(pgd_none(*pgd));
512 pud = pud_offset(pgd, vmaddr);
513 VM_BUG_ON(pud_none(*pud));
514 pmd = pmd_offset(pud, vmaddr);
515 VM_BUG_ON(pmd_none(*pmd));
516 /* large pmds cannot yet be handled */
519 /* Link gmap segment table entry location to page table. */
520 rc = radix_tree_preload(GFP_KERNEL);
523 ptl = pmd_lock(mm, pmd);
524 spin_lock(&gmap->guest_table_lock);
525 if (*table == _SEGMENT_ENTRY_INVALID) {
526 rc = radix_tree_insert(&gmap->host_to_guest,
527 vmaddr >> PMD_SHIFT, table);
529 *table = pmd_val(*pmd);
532 spin_unlock(&gmap->guest_table_lock);
534 radix_tree_preload_end();
539 * gmap_fault - resolve a fault on a guest address
540 * @gmap: pointer to guest mapping meta data structure
541 * @gaddr: guest address
542 * @fault_flags: flags to pass down to handle_mm_fault()
544 * Returns 0 on success, -ENOMEM for out of memory conditions, and -EFAULT
545 * if the vm address is already mapped to a different guest segment.
547 int gmap_fault(struct gmap *gmap, unsigned long gaddr,
548 unsigned int fault_flags)
550 unsigned long vmaddr;
553 down_read(&gmap->mm->mmap_sem);
554 vmaddr = __gmap_translate(gmap, gaddr);
555 if (IS_ERR_VALUE(vmaddr)) {
559 if (fixup_user_fault(current, gmap->mm, vmaddr, fault_flags)) {
563 rc = __gmap_link(gmap, gaddr, vmaddr);
565 up_read(&gmap->mm->mmap_sem);
568 EXPORT_SYMBOL_GPL(gmap_fault);
570 static void gmap_zap_swap_entry(swp_entry_t entry, struct mm_struct *mm)
572 if (!non_swap_entry(entry))
573 dec_mm_counter(mm, MM_SWAPENTS);
574 else if (is_migration_entry(entry)) {
575 struct page *page = migration_entry_to_page(entry);
578 dec_mm_counter(mm, MM_ANONPAGES);
580 dec_mm_counter(mm, MM_FILEPAGES);
582 free_swap_and_cache(entry);
586 * this function is assumed to be called with mmap_sem held
588 void __gmap_zap(struct gmap *gmap, unsigned long gaddr)
590 unsigned long vmaddr, ptev, pgstev;
595 /* Find the vm address for the guest address */
596 vmaddr = (unsigned long) radix_tree_lookup(&gmap->guest_to_host,
600 vmaddr |= gaddr & ~PMD_MASK;
601 /* Get pointer to the page table entry */
602 ptep = get_locked_pte(gmap->mm, vmaddr, &ptl);
608 /* Zap unused and logically-zero pages */
609 pgste = pgste_get_lock(ptep);
610 pgstev = pgste_val(pgste);
612 if (((pgstev & _PGSTE_GPS_USAGE_MASK) == _PGSTE_GPS_USAGE_UNUSED) ||
613 ((pgstev & _PGSTE_GPS_ZERO) && (ptev & _PAGE_INVALID))) {
614 gmap_zap_swap_entry(pte_to_swp_entry(pte), gmap->mm);
615 pte_clear(gmap->mm, vmaddr, ptep);
617 pgste_set_unlock(ptep, pgste);
619 pte_unmap_unlock(ptep, ptl);
621 EXPORT_SYMBOL_GPL(__gmap_zap);
623 void gmap_discard(struct gmap *gmap, unsigned long from, unsigned long to)
625 unsigned long gaddr, vmaddr, size;
626 struct vm_area_struct *vma;
628 down_read(&gmap->mm->mmap_sem);
629 for (gaddr = from; gaddr < to;
630 gaddr = (gaddr + PMD_SIZE) & PMD_MASK) {
631 /* Find the vm address for the guest address */
632 vmaddr = (unsigned long)
633 radix_tree_lookup(&gmap->guest_to_host,
637 vmaddr |= gaddr & ~PMD_MASK;
638 /* Find vma in the parent mm */
639 vma = find_vma(gmap->mm, vmaddr);
640 size = min(to - gaddr, PMD_SIZE - (gaddr & ~PMD_MASK));
641 zap_page_range(vma, vmaddr, size, NULL);
643 up_read(&gmap->mm->mmap_sem);
645 EXPORT_SYMBOL_GPL(gmap_discard);
647 static LIST_HEAD(gmap_notifier_list);
648 static DEFINE_SPINLOCK(gmap_notifier_lock);
651 * gmap_register_ipte_notifier - register a pte invalidation callback
652 * @nb: pointer to the gmap notifier block
654 void gmap_register_ipte_notifier(struct gmap_notifier *nb)
656 spin_lock(&gmap_notifier_lock);
657 list_add(&nb->list, &gmap_notifier_list);
658 spin_unlock(&gmap_notifier_lock);
660 EXPORT_SYMBOL_GPL(gmap_register_ipte_notifier);
663 * gmap_unregister_ipte_notifier - remove a pte invalidation callback
664 * @nb: pointer to the gmap notifier block
666 void gmap_unregister_ipte_notifier(struct gmap_notifier *nb)
668 spin_lock(&gmap_notifier_lock);
669 list_del_init(&nb->list);
670 spin_unlock(&gmap_notifier_lock);
672 EXPORT_SYMBOL_GPL(gmap_unregister_ipte_notifier);
675 * gmap_ipte_notify - mark a range of ptes for invalidation notification
676 * @gmap: pointer to guest mapping meta data structure
677 * @gaddr: virtual address in the guest address space
680 * Returns 0 if for each page in the given range a gmap mapping exists and
681 * the invalidation notification could be set. If the gmap mapping is missing
682 * for one or more pages -EFAULT is returned. If no memory could be allocated
683 * -ENOMEM is returned. This function establishes missing page table entries.
685 int gmap_ipte_notify(struct gmap *gmap, unsigned long gaddr, unsigned long len)
693 if ((gaddr & ~PAGE_MASK) || (len & ~PAGE_MASK))
695 down_read(&gmap->mm->mmap_sem);
697 /* Convert gmap address and connect the page tables */
698 addr = __gmap_translate(gmap, gaddr);
699 if (IS_ERR_VALUE(addr)) {
703 /* Get the page mapped */
704 if (fixup_user_fault(current, gmap->mm, addr, FAULT_FLAG_WRITE)) {
708 rc = __gmap_link(gmap, gaddr, addr);
711 /* Walk the process page table, lock and get pte pointer */
712 ptep = get_locked_pte(gmap->mm, addr, &ptl);
714 /* Set notification bit in the pgste of the pte */
716 if ((pte_val(entry) & (_PAGE_INVALID | _PAGE_PROTECT)) == 0) {
717 pgste = pgste_get_lock(ptep);
718 pgste_val(pgste) |= PGSTE_IN_BIT;
719 pgste_set_unlock(ptep, pgste);
723 pte_unmap_unlock(ptep, ptl);
725 up_read(&gmap->mm->mmap_sem);
728 EXPORT_SYMBOL_GPL(gmap_ipte_notify);
731 * gmap_do_ipte_notify - call all invalidation callbacks for a specific pte.
732 * @mm: pointer to the process mm_struct
733 * @addr: virtual address in the process address space
734 * @pte: pointer to the page table entry
736 * This function is assumed to be called with the page table lock held
737 * for the pte to notify.
739 void gmap_do_ipte_notify(struct mm_struct *mm, unsigned long vmaddr, pte_t *pte)
741 unsigned long offset, gaddr;
742 unsigned long *table;
743 struct gmap_notifier *nb;
746 offset = ((unsigned long) pte) & (255 * sizeof(pte_t));
747 offset = offset * (4096 / sizeof(pte_t));
748 spin_lock(&gmap_notifier_lock);
749 list_for_each_entry(gmap, &mm->context.gmap_list, list) {
750 table = radix_tree_lookup(&gmap->host_to_guest,
751 vmaddr >> PMD_SHIFT);
754 gaddr = __gmap_segment_gaddr(table) + offset;
755 list_for_each_entry(nb, &gmap_notifier_list, list)
756 nb->notifier_call(gmap, gaddr);
758 spin_unlock(&gmap_notifier_lock);
760 EXPORT_SYMBOL_GPL(gmap_do_ipte_notify);
762 int set_guest_storage_key(struct mm_struct *mm, unsigned long addr,
763 unsigned long key, bool nq)
769 down_read(&mm->mmap_sem);
771 ptep = get_locked_pte(mm, addr, &ptl);
772 if (unlikely(!ptep)) {
773 up_read(&mm->mmap_sem);
776 if (!(pte_val(*ptep) & _PAGE_INVALID) &&
777 (pte_val(*ptep) & _PAGE_PROTECT)) {
778 pte_unmap_unlock(ptep, ptl);
779 if (fixup_user_fault(current, mm, addr, FAULT_FLAG_WRITE)) {
780 up_read(&mm->mmap_sem);
786 new = old = pgste_get_lock(ptep);
787 pgste_val(new) &= ~(PGSTE_GR_BIT | PGSTE_GC_BIT |
788 PGSTE_ACC_BITS | PGSTE_FP_BIT);
789 pgste_val(new) |= (key & (_PAGE_CHANGED | _PAGE_REFERENCED)) << 48;
790 pgste_val(new) |= (key & (_PAGE_ACC_BITS | _PAGE_FP_BIT)) << 56;
791 if (!(pte_val(*ptep) & _PAGE_INVALID)) {
792 unsigned long address, bits, skey;
794 address = pte_val(*ptep) & PAGE_MASK;
795 skey = (unsigned long) page_get_storage_key(address);
796 bits = skey & (_PAGE_CHANGED | _PAGE_REFERENCED);
797 skey = key & (_PAGE_ACC_BITS | _PAGE_FP_BIT);
798 /* Set storage key ACC and FP */
799 page_set_storage_key(address, skey, !nq);
800 /* Merge host changed & referenced into pgste */
801 pgste_val(new) |= bits << 52;
803 /* changing the guest storage key is considered a change of the page */
804 if ((pgste_val(new) ^ pgste_val(old)) &
805 (PGSTE_ACC_BITS | PGSTE_FP_BIT | PGSTE_GR_BIT | PGSTE_GC_BIT))
806 pgste_val(new) |= PGSTE_UC_BIT;
808 pgste_set_unlock(ptep, new);
809 pte_unmap_unlock(ptep, ptl);
810 up_read(&mm->mmap_sem);
813 EXPORT_SYMBOL(set_guest_storage_key);
815 unsigned long get_guest_storage_key(struct mm_struct *mm, unsigned long addr)
821 unsigned long key = 0;
823 down_read(&mm->mmap_sem);
824 ptep = get_locked_pte(mm, addr, &ptl);
825 if (unlikely(!ptep)) {
826 up_read(&mm->mmap_sem);
829 pgste = pgste_get_lock(ptep);
831 if (pte_val(*ptep) & _PAGE_INVALID) {
832 key |= (pgste_val(pgste) & PGSTE_ACC_BITS) >> 56;
833 key |= (pgste_val(pgste) & PGSTE_FP_BIT) >> 56;
834 key |= (pgste_val(pgste) & PGSTE_GR_BIT) >> 48;
835 key |= (pgste_val(pgste) & PGSTE_GC_BIT) >> 48;
837 physaddr = pte_val(*ptep) & PAGE_MASK;
838 key = page_get_storage_key(physaddr);
840 /* Reflect guest's logical view, not physical */
841 if (pgste_val(pgste) & PGSTE_GR_BIT)
842 key |= _PAGE_REFERENCED;
843 if (pgste_val(pgste) & PGSTE_GC_BIT)
844 key |= _PAGE_CHANGED;
847 pgste_set_unlock(ptep, pgste);
848 pte_unmap_unlock(ptep, ptl);
849 up_read(&mm->mmap_sem);
852 EXPORT_SYMBOL(get_guest_storage_key);
854 static int page_table_allocate_pgste_min = 0;
855 static int page_table_allocate_pgste_max = 1;
856 int page_table_allocate_pgste = 0;
857 EXPORT_SYMBOL(page_table_allocate_pgste);
859 static struct ctl_table page_table_sysctl[] = {
861 .procname = "allocate_pgste",
862 .data = &page_table_allocate_pgste,
863 .maxlen = sizeof(int),
864 .mode = S_IRUGO | S_IWUSR,
865 .proc_handler = proc_dointvec,
866 .extra1 = &page_table_allocate_pgste_min,
867 .extra2 = &page_table_allocate_pgste_max,
872 static struct ctl_table page_table_sysctl_dir[] = {
877 .child = page_table_sysctl,
882 static int __init page_table_register_sysctl(void)
884 return register_sysctl_table(page_table_sysctl_dir) ? 0 : -ENOMEM;
886 __initcall(page_table_register_sysctl);
888 #else /* CONFIG_PGSTE */
890 static inline void gmap_unlink(struct mm_struct *mm, unsigned long *table,
891 unsigned long vmaddr)
895 #endif /* CONFIG_PGSTE */
897 static inline unsigned int atomic_xor_bits(atomic_t *v, unsigned int bits)
899 unsigned int old, new;
902 old = atomic_read(v);
904 } while (atomic_cmpxchg(v, old, new) != old);
909 * page table entry allocation/free routines.
911 unsigned long *page_table_alloc(struct mm_struct *mm)
913 unsigned long *table;
915 unsigned int mask, bit;
917 /* Try to get a fragment of a 4K page as a 2K page table */
918 if (!mm_alloc_pgste(mm)) {
920 spin_lock_bh(&mm->context.list_lock);
921 if (!list_empty(&mm->context.pgtable_list)) {
922 page = list_first_entry(&mm->context.pgtable_list,
924 mask = atomic_read(&page->_mapcount);
925 mask = (mask | (mask >> 4)) & 3;
927 table = (unsigned long *) page_to_phys(page);
928 bit = mask & 1; /* =1 -> second 2K */
930 table += PTRS_PER_PTE;
931 atomic_xor_bits(&page->_mapcount, 1U << bit);
932 list_del(&page->lru);
935 spin_unlock_bh(&mm->context.list_lock);
939 /* Allocate a fresh page */
940 page = alloc_page(GFP_KERNEL|__GFP_REPEAT);
943 if (!pgtable_page_ctor(page)) {
947 /* Initialize page table */
948 table = (unsigned long *) page_to_phys(page);
949 if (mm_alloc_pgste(mm)) {
950 /* Return 4K page table with PGSTEs */
951 atomic_set(&page->_mapcount, 3);
952 clear_table(table, _PAGE_INVALID, PAGE_SIZE/2);
953 clear_table(table + PTRS_PER_PTE, 0, PAGE_SIZE/2);
955 /* Return the first 2K fragment of the page */
956 atomic_set(&page->_mapcount, 1);
957 clear_table(table, _PAGE_INVALID, PAGE_SIZE);
958 spin_lock_bh(&mm->context.list_lock);
959 list_add(&page->lru, &mm->context.pgtable_list);
960 spin_unlock_bh(&mm->context.list_lock);
965 void page_table_free(struct mm_struct *mm, unsigned long *table)
968 unsigned int bit, mask;
970 page = pfn_to_page(__pa(table) >> PAGE_SHIFT);
971 if (!mm_alloc_pgste(mm)) {
972 /* Free 2K page table fragment of a 4K page */
973 bit = (__pa(table) & ~PAGE_MASK)/(PTRS_PER_PTE*sizeof(pte_t));
974 spin_lock_bh(&mm->context.list_lock);
975 mask = atomic_xor_bits(&page->_mapcount, 1U << bit);
977 list_add(&page->lru, &mm->context.pgtable_list);
979 list_del(&page->lru);
980 spin_unlock_bh(&mm->context.list_lock);
985 pgtable_page_dtor(page);
986 atomic_set(&page->_mapcount, -1);
990 void page_table_free_rcu(struct mmu_gather *tlb, unsigned long *table,
991 unsigned long vmaddr)
993 struct mm_struct *mm;
995 unsigned int bit, mask;
998 page = pfn_to_page(__pa(table) >> PAGE_SHIFT);
999 if (mm_alloc_pgste(mm)) {
1000 gmap_unlink(mm, table, vmaddr);
1001 table = (unsigned long *) (__pa(table) | 3);
1002 tlb_remove_table(tlb, table);
1005 bit = (__pa(table) & ~PAGE_MASK) / (PTRS_PER_PTE*sizeof(pte_t));
1006 spin_lock_bh(&mm->context.list_lock);
1007 mask = atomic_xor_bits(&page->_mapcount, 0x11U << bit);
1009 list_add_tail(&page->lru, &mm->context.pgtable_list);
1011 list_del(&page->lru);
1012 spin_unlock_bh(&mm->context.list_lock);
1013 table = (unsigned long *) (__pa(table) | (1U << bit));
1014 tlb_remove_table(tlb, table);
1017 static void __tlb_remove_table(void *_table)
1019 unsigned int mask = (unsigned long) _table & 3;
1020 void *table = (void *)((unsigned long) _table ^ mask);
1021 struct page *page = pfn_to_page(__pa(table) >> PAGE_SHIFT);
1024 case 0: /* pmd or pud */
1025 free_pages((unsigned long) table, 2);
1027 case 1: /* lower 2K of a 4K page table */
1028 case 2: /* higher 2K of a 4K page table */
1029 if (atomic_xor_bits(&page->_mapcount, mask << 4) != 0)
1032 case 3: /* 4K page table with pgstes */
1033 pgtable_page_dtor(page);
1034 atomic_set(&page->_mapcount, -1);
1040 static void tlb_remove_table_smp_sync(void *arg)
1042 /* Simply deliver the interrupt */
1045 static void tlb_remove_table_one(void *table)
1048 * This isn't an RCU grace period and hence the page-tables cannot be
1049 * assumed to be actually RCU-freed.
1051 * It is however sufficient for software page-table walkers that rely
1052 * on IRQ disabling. See the comment near struct mmu_table_batch.
1054 smp_call_function(tlb_remove_table_smp_sync, NULL, 1);
1055 __tlb_remove_table(table);
1058 static void tlb_remove_table_rcu(struct rcu_head *head)
1060 struct mmu_table_batch *batch;
1063 batch = container_of(head, struct mmu_table_batch, rcu);
1065 for (i = 0; i < batch->nr; i++)
1066 __tlb_remove_table(batch->tables[i]);
1068 free_page((unsigned long)batch);
1071 void tlb_table_flush(struct mmu_gather *tlb)
1073 struct mmu_table_batch **batch = &tlb->batch;
1076 call_rcu_sched(&(*batch)->rcu, tlb_remove_table_rcu);
1081 void tlb_remove_table(struct mmu_gather *tlb, void *table)
1083 struct mmu_table_batch **batch = &tlb->batch;
1085 tlb->mm->context.flush_mm = 1;
1086 if (*batch == NULL) {
1087 *batch = (struct mmu_table_batch *)
1088 __get_free_page(GFP_NOWAIT | __GFP_NOWARN);
1089 if (*batch == NULL) {
1090 __tlb_flush_mm_lazy(tlb->mm);
1091 tlb_remove_table_one(table);
1096 (*batch)->tables[(*batch)->nr++] = table;
1097 if ((*batch)->nr == MAX_TABLE_BATCH)
1101 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
1102 static inline void thp_split_vma(struct vm_area_struct *vma)
1106 for (addr = vma->vm_start; addr < vma->vm_end; addr += PAGE_SIZE)
1107 follow_page(vma, addr, FOLL_SPLIT);
1110 static inline void thp_split_mm(struct mm_struct *mm)
1112 struct vm_area_struct *vma;
1114 for (vma = mm->mmap; vma != NULL; vma = vma->vm_next) {
1116 vma->vm_flags &= ~VM_HUGEPAGE;
1117 vma->vm_flags |= VM_NOHUGEPAGE;
1119 mm->def_flags |= VM_NOHUGEPAGE;
1122 static inline void thp_split_mm(struct mm_struct *mm)
1125 #endif /* CONFIG_TRANSPARENT_HUGEPAGE */
1128 * switch on pgstes for its userspace process (for kvm)
1130 int s390_enable_sie(void)
1132 struct mm_struct *mm = current->mm;
1134 /* Do we have pgstes? if yes, we are done */
1135 if (mm_has_pgste(mm))
1137 /* Fail if the page tables are 2K */
1138 if (!mm_alloc_pgste(mm))
1140 down_write(&mm->mmap_sem);
1141 mm->context.has_pgste = 1;
1142 /* split thp mappings and disable thp for future mappings */
1144 up_write(&mm->mmap_sem);
1147 EXPORT_SYMBOL_GPL(s390_enable_sie);
1150 * Enable storage key handling from now on and initialize the storage
1151 * keys with the default key.
1153 static int __s390_enable_skey(pte_t *pte, unsigned long addr,
1154 unsigned long next, struct mm_walk *walk)
1159 pgste = pgste_get_lock(pte);
1161 * Remove all zero page mappings,
1162 * after establishing a policy to forbid zero page mappings
1163 * following faults for that page will get fresh anonymous pages
1165 if (is_zero_pfn(pte_pfn(*pte))) {
1166 ptep_flush_direct(walk->mm, addr, pte);
1167 pte_val(*pte) = _PAGE_INVALID;
1169 /* Clear storage key */
1170 pgste_val(pgste) &= ~(PGSTE_ACC_BITS | PGSTE_FP_BIT |
1171 PGSTE_GR_BIT | PGSTE_GC_BIT);
1172 ptev = pte_val(*pte);
1173 if (!(ptev & _PAGE_INVALID) && (ptev & _PAGE_WRITE))
1174 page_set_storage_key(ptev & PAGE_MASK, PAGE_DEFAULT_KEY, 1);
1175 pgste_set_unlock(pte, pgste);
1179 int s390_enable_skey(void)
1181 struct mm_walk walk = { .pte_entry = __s390_enable_skey };
1182 struct mm_struct *mm = current->mm;
1183 struct vm_area_struct *vma;
1186 down_write(&mm->mmap_sem);
1187 if (mm_use_skey(mm))
1190 mm->context.use_skey = 1;
1191 for (vma = mm->mmap; vma; vma = vma->vm_next) {
1192 if (ksm_madvise(vma, vma->vm_start, vma->vm_end,
1193 MADV_UNMERGEABLE, &vma->vm_flags)) {
1194 mm->context.use_skey = 0;
1199 mm->def_flags &= ~VM_MERGEABLE;
1202 walk_page_range(0, TASK_SIZE, &walk);
1205 up_write(&mm->mmap_sem);
1208 EXPORT_SYMBOL_GPL(s390_enable_skey);
1211 * Reset CMMA state, make all pages stable again.
1213 static int __s390_reset_cmma(pte_t *pte, unsigned long addr,
1214 unsigned long next, struct mm_walk *walk)
1218 pgste = pgste_get_lock(pte);
1219 pgste_val(pgste) &= ~_PGSTE_GPS_USAGE_MASK;
1220 pgste_set_unlock(pte, pgste);
1224 void s390_reset_cmma(struct mm_struct *mm)
1226 struct mm_walk walk = { .pte_entry = __s390_reset_cmma };
1228 down_write(&mm->mmap_sem);
1230 walk_page_range(0, TASK_SIZE, &walk);
1231 up_write(&mm->mmap_sem);
1233 EXPORT_SYMBOL_GPL(s390_reset_cmma);
1236 * Test and reset if a guest page is dirty
1238 bool gmap_test_and_clear_dirty(unsigned long address, struct gmap *gmap)
1244 pte = get_locked_pte(gmap->mm, address, &ptl);
1248 if (ptep_test_and_clear_user_dirty(gmap->mm, address, pte))
1254 EXPORT_SYMBOL_GPL(gmap_test_and_clear_dirty);
1256 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
1257 int pmdp_clear_flush_young(struct vm_area_struct *vma, unsigned long address,
1260 VM_BUG_ON(address & ~HPAGE_PMD_MASK);
1261 /* No need to flush TLB
1262 * On s390 reference bits are in storage key and never in TLB */
1263 return pmdp_test_and_clear_young(vma, address, pmdp);
1266 int pmdp_set_access_flags(struct vm_area_struct *vma,
1267 unsigned long address, pmd_t *pmdp,
1268 pmd_t entry, int dirty)
1270 VM_BUG_ON(address & ~HPAGE_PMD_MASK);
1272 entry = pmd_mkyoung(entry);
1274 entry = pmd_mkdirty(entry);
1275 if (pmd_same(*pmdp, entry))
1277 pmdp_invalidate(vma, address, pmdp);
1278 set_pmd_at(vma->vm_mm, address, pmdp, entry);
1282 static void pmdp_splitting_flush_sync(void *arg)
1284 /* Simply deliver the interrupt */
1287 void pmdp_splitting_flush(struct vm_area_struct *vma, unsigned long address,
1290 VM_BUG_ON(address & ~HPAGE_PMD_MASK);
1291 if (!test_and_set_bit(_SEGMENT_ENTRY_SPLIT_BIT,
1292 (unsigned long *) pmdp)) {
1293 /* need to serialize against gup-fast (IRQ disabled) */
1294 smp_call_function(pmdp_splitting_flush_sync, NULL, 1);
1298 void pgtable_trans_huge_deposit(struct mm_struct *mm, pmd_t *pmdp,
1301 struct list_head *lh = (struct list_head *) pgtable;
1303 assert_spin_locked(pmd_lockptr(mm, pmdp));
1306 if (!pmd_huge_pte(mm, pmdp))
1309 list_add(lh, (struct list_head *) pmd_huge_pte(mm, pmdp));
1310 pmd_huge_pte(mm, pmdp) = pgtable;
1313 pgtable_t pgtable_trans_huge_withdraw(struct mm_struct *mm, pmd_t *pmdp)
1315 struct list_head *lh;
1319 assert_spin_locked(pmd_lockptr(mm, pmdp));
1322 pgtable = pmd_huge_pte(mm, pmdp);
1323 lh = (struct list_head *) pgtable;
1325 pmd_huge_pte(mm, pmdp) = NULL;
1327 pmd_huge_pte(mm, pmdp) = (pgtable_t) lh->next;
1330 ptep = (pte_t *) pgtable;
1331 pte_val(*ptep) = _PAGE_INVALID;
1333 pte_val(*ptep) = _PAGE_INVALID;
1336 #endif /* CONFIG_TRANSPARENT_HUGEPAGE */