2 * This file contains ioremap and related functions for 64-bit machines.
4 * Derived from arch/ppc64/mm/init.c
5 * Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org)
7 * Modifications by Paul Mackerras (PowerMac) (paulus@samba.org)
8 * and Cort Dougan (PReP) (cort@cs.nmt.edu)
9 * Copyright (C) 1996 Paul Mackerras
11 * Derived from "arch/i386/mm/init.c"
12 * Copyright (C) 1991, 1992, 1993, 1994 Linus Torvalds
14 * Dave Engebretsen <engebret@us.ibm.com>
15 * Rework for PPC64 port.
17 * This program is free software; you can redistribute it and/or
18 * modify it under the terms of the GNU General Public License
19 * as published by the Free Software Foundation; either version
20 * 2 of the License, or (at your option) any later version.
24 #include <linux/signal.h>
25 #include <linux/sched.h>
26 #include <linux/kernel.h>
27 #include <linux/errno.h>
28 #include <linux/string.h>
29 #include <linux/export.h>
30 #include <linux/types.h>
31 #include <linux/mman.h>
33 #include <linux/swap.h>
34 #include <linux/stddef.h>
35 #include <linux/vmalloc.h>
36 #include <linux/bootmem.h>
37 #include <linux/memblock.h>
38 #include <linux/slab.h>
40 #include <asm/pgalloc.h>
44 #include <asm/mmu_context.h>
45 #include <asm/pgtable.h>
48 #include <asm/machdep.h>
50 #include <asm/processor.h>
51 #include <asm/cputable.h>
52 #include <asm/sections.h>
53 #include <asm/firmware.h>
57 #define CREATE_TRACE_POINTS
58 #include <trace/events/thp.h>
60 /* Some sanity checking */
61 #if TASK_SIZE_USER64 > PGTABLE_RANGE
62 #error TASK_SIZE_USER64 exceeds pagetable range
65 #ifdef CONFIG_PPC_STD_MMU_64
66 #if TASK_SIZE_USER64 > (1UL << (ESID_BITS + SID_SHIFT))
67 #error TASK_SIZE_USER64 exceeds user VSID range
71 unsigned long ioremap_bot = IOREMAP_BASE;
73 #ifdef CONFIG_PPC_MMU_NOHASH
74 static __ref void *early_alloc_pgtable(unsigned long size)
78 pt = __va(memblock_alloc_base(size, size, __pa(MAX_DMA_ADDRESS)));
83 #endif /* CONFIG_PPC_MMU_NOHASH */
86 * map_kernel_page currently only called by __ioremap
87 * map_kernel_page adds an entry to the ioremap page table
88 * and adds an entry to the HPT, possibly bolting it
90 int map_kernel_page(unsigned long ea, unsigned long pa, int flags)
97 if (slab_is_available()) {
98 pgdp = pgd_offset_k(ea);
99 pudp = pud_alloc(&init_mm, pgdp, ea);
102 pmdp = pmd_alloc(&init_mm, pudp, ea);
105 ptep = pte_alloc_kernel(pmdp, ea);
108 set_pte_at(&init_mm, ea, ptep, pfn_pte(pa >> PAGE_SHIFT,
111 #ifdef CONFIG_PPC_MMU_NOHASH
112 pgdp = pgd_offset_k(ea);
113 #ifdef PUD_TABLE_SIZE
114 if (pgd_none(*pgdp)) {
115 pudp = early_alloc_pgtable(PUD_TABLE_SIZE);
116 BUG_ON(pudp == NULL);
117 pgd_populate(&init_mm, pgdp, pudp);
119 #endif /* PUD_TABLE_SIZE */
120 pudp = pud_offset(pgdp, ea);
121 if (pud_none(*pudp)) {
122 pmdp = early_alloc_pgtable(PMD_TABLE_SIZE);
123 BUG_ON(pmdp == NULL);
124 pud_populate(&init_mm, pudp, pmdp);
126 pmdp = pmd_offset(pudp, ea);
127 if (!pmd_present(*pmdp)) {
128 ptep = early_alloc_pgtable(PAGE_SIZE);
129 BUG_ON(ptep == NULL);
130 pmd_populate_kernel(&init_mm, pmdp, ptep);
132 ptep = pte_offset_kernel(pmdp, ea);
133 set_pte_at(&init_mm, ea, ptep, pfn_pte(pa >> PAGE_SHIFT,
135 #else /* CONFIG_PPC_MMU_NOHASH */
137 * If the mm subsystem is not fully up, we cannot create a
138 * linux page table entry for this mapping. Simply bolt an
139 * entry in the hardware page table.
142 if (htab_bolt_mapping(ea, ea + PAGE_SIZE, pa, flags,
143 mmu_io_psize, mmu_kernel_ssize)) {
144 printk(KERN_ERR "Failed to do bolted mapping IO "
145 "memory at %016lx !\n", pa);
148 #endif /* !CONFIG_PPC_MMU_NOHASH */
151 #ifdef CONFIG_PPC_BOOK3E_64
153 * With hardware tablewalk, a sync is needed to ensure that
154 * subsequent accesses see the PTE we just wrote. Unlike userspace
155 * mappings, we can't tolerate spurious faults, so make sure
156 * the new PTE will be seen the first time.
167 * __ioremap_at - Low level function to establish the page tables
170 void __iomem * __ioremap_at(phys_addr_t pa, void *ea, unsigned long size,
175 /* Make sure we have the base flags */
176 if ((flags & _PAGE_PRESENT) == 0)
177 flags |= pgprot_val(PAGE_KERNEL);
179 /* Non-cacheable page cannot be coherent */
180 if (flags & _PAGE_NO_CACHE)
181 flags &= ~_PAGE_COHERENT;
183 /* We don't support the 4K PFN hack with ioremap */
184 if (flags & _PAGE_4K_PFN)
187 WARN_ON(pa & ~PAGE_MASK);
188 WARN_ON(((unsigned long)ea) & ~PAGE_MASK);
189 WARN_ON(size & ~PAGE_MASK);
191 for (i = 0; i < size; i += PAGE_SIZE)
192 if (map_kernel_page((unsigned long)ea+i, pa+i, flags))
195 return (void __iomem *)ea;
199 * __iounmap_from - Low level function to tear down the page tables
200 * for an IO mapping. This is used for mappings that
201 * are manipulated manually, like partial unmapping of
202 * PCI IOs or ISA space.
204 void __iounmap_at(void *ea, unsigned long size)
206 WARN_ON(((unsigned long)ea) & ~PAGE_MASK);
207 WARN_ON(size & ~PAGE_MASK);
209 unmap_kernel_range((unsigned long)ea, size);
212 void __iomem * __ioremap_caller(phys_addr_t addr, unsigned long size,
213 unsigned long flags, void *caller)
215 phys_addr_t paligned;
219 * Choose an address to map it to.
220 * Once the imalloc system is running, we use it.
221 * Before that, we map using addresses going
222 * up from ioremap_bot. imalloc will use
223 * the addresses from ioremap_bot through
227 paligned = addr & PAGE_MASK;
228 size = PAGE_ALIGN(addr + size) - paligned;
230 if ((size == 0) || (paligned == 0))
234 struct vm_struct *area;
236 area = __get_vm_area_caller(size, VM_IOREMAP,
237 ioremap_bot, IOREMAP_END,
242 area->phys_addr = paligned;
243 ret = __ioremap_at(paligned, area->addr, size, flags);
247 ret = __ioremap_at(paligned, (void *)ioremap_bot, size, flags);
253 ret += addr & ~PAGE_MASK;
257 void __iomem * __ioremap(phys_addr_t addr, unsigned long size,
260 return __ioremap_caller(addr, size, flags, __builtin_return_address(0));
263 void __iomem * ioremap(phys_addr_t addr, unsigned long size)
265 unsigned long flags = _PAGE_NO_CACHE | _PAGE_GUARDED;
266 void *caller = __builtin_return_address(0);
269 return ppc_md.ioremap(addr, size, flags, caller);
270 return __ioremap_caller(addr, size, flags, caller);
273 void __iomem * ioremap_wc(phys_addr_t addr, unsigned long size)
275 unsigned long flags = _PAGE_NO_CACHE;
276 void *caller = __builtin_return_address(0);
279 return ppc_md.ioremap(addr, size, flags, caller);
280 return __ioremap_caller(addr, size, flags, caller);
283 void __iomem * ioremap_prot(phys_addr_t addr, unsigned long size,
286 void *caller = __builtin_return_address(0);
288 /* writeable implies dirty for kernel addresses */
289 if (flags & _PAGE_RW)
290 flags |= _PAGE_DIRTY;
292 /* we don't want to let _PAGE_USER and _PAGE_EXEC leak out */
293 flags &= ~(_PAGE_USER | _PAGE_EXEC);
296 /* _PAGE_USER contains _PAGE_BAP_SR on BookE using the new PTE format
297 * which means that we just cleared supervisor access... oops ;-) This
300 flags |= _PAGE_BAP_SR;
304 return ppc_md.ioremap(addr, size, flags, caller);
305 return __ioremap_caller(addr, size, flags, caller);
310 * Unmap an IO region and remove it from imalloc'd list.
311 * Access to IO memory should be serialized by driver.
313 void __iounmap(volatile void __iomem *token)
320 addr = (void *) ((unsigned long __force)
321 PCI_FIX_ADDR(token) & PAGE_MASK);
322 if ((unsigned long)addr < ioremap_bot) {
323 printk(KERN_WARNING "Attempt to iounmap early bolted mapping"
330 void iounmap(volatile void __iomem *token)
333 ppc_md.iounmap(token);
338 EXPORT_SYMBOL(ioremap);
339 EXPORT_SYMBOL(ioremap_wc);
340 EXPORT_SYMBOL(ioremap_prot);
341 EXPORT_SYMBOL(__ioremap);
342 EXPORT_SYMBOL(__ioremap_at);
343 EXPORT_SYMBOL(iounmap);
344 EXPORT_SYMBOL(__iounmap);
345 EXPORT_SYMBOL(__iounmap_at);
348 * For hugepage we have pfn in the pmd, we use PTE_RPN_SHIFT bits for flags
349 * For PTE page, we have a PTE_FRAG_SIZE (4K) aligned virtual address.
351 struct page *pmd_page(pmd_t pmd)
353 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
354 if (pmd_trans_huge(pmd))
355 return pfn_to_page(pmd_pfn(pmd));
357 return virt_to_page(pmd_page_vaddr(pmd));
360 #ifdef CONFIG_PPC_64K_PAGES
361 static pte_t *get_from_cache(struct mm_struct *mm)
363 void *pte_frag, *ret;
365 spin_lock(&mm->page_table_lock);
366 ret = mm->context.pte_frag;
368 pte_frag = ret + PTE_FRAG_SIZE;
370 * If we have taken up all the fragments mark PTE page NULL
372 if (((unsigned long)pte_frag & ~PAGE_MASK) == 0)
374 mm->context.pte_frag = pte_frag;
376 spin_unlock(&mm->page_table_lock);
380 static pte_t *__alloc_for_cache(struct mm_struct *mm, int kernel)
383 struct page *page = alloc_page(GFP_KERNEL | __GFP_NOTRACK |
384 __GFP_REPEAT | __GFP_ZERO);
387 if (!kernel && !pgtable_page_ctor(page)) {
392 ret = page_address(page);
393 spin_lock(&mm->page_table_lock);
395 * If we find pgtable_page set, we return
396 * the allocated page with single fragement
399 if (likely(!mm->context.pte_frag)) {
400 atomic_set(&page->_count, PTE_FRAG_NR);
401 mm->context.pte_frag = ret + PTE_FRAG_SIZE;
403 spin_unlock(&mm->page_table_lock);
408 pte_t *page_table_alloc(struct mm_struct *mm, unsigned long vmaddr, int kernel)
412 pte = get_from_cache(mm);
416 return __alloc_for_cache(mm, kernel);
419 void page_table_free(struct mm_struct *mm, unsigned long *table, int kernel)
421 struct page *page = virt_to_page(table);
422 if (put_page_testzero(page)) {
424 pgtable_page_dtor(page);
425 free_hot_cold_page(page, 0);
430 static void page_table_free_rcu(void *table)
432 struct page *page = virt_to_page(table);
433 if (put_page_testzero(page)) {
434 pgtable_page_dtor(page);
435 free_hot_cold_page(page, 0);
439 void pgtable_free_tlb(struct mmu_gather *tlb, void *table, int shift)
441 unsigned long pgf = (unsigned long)table;
443 BUG_ON(shift > MAX_PGTABLE_INDEX_SIZE);
445 tlb_remove_table(tlb, (void *)pgf);
448 void __tlb_remove_table(void *_table)
450 void *table = (void *)((unsigned long)_table & ~MAX_PGTABLE_INDEX_SIZE);
451 unsigned shift = (unsigned long)_table & MAX_PGTABLE_INDEX_SIZE;
454 /* PTE page needs special handling */
455 page_table_free_rcu(table);
457 BUG_ON(shift > MAX_PGTABLE_INDEX_SIZE);
458 kmem_cache_free(PGT_CACHE(shift), table);
462 void pgtable_free_tlb(struct mmu_gather *tlb, void *table, int shift)
465 /* PTE page needs special handling */
466 struct page *page = virt_to_page(table);
467 if (put_page_testzero(page)) {
468 pgtable_page_dtor(page);
469 free_hot_cold_page(page, 0);
472 BUG_ON(shift > MAX_PGTABLE_INDEX_SIZE);
473 kmem_cache_free(PGT_CACHE(shift), table);
477 #endif /* CONFIG_PPC_64K_PAGES */
479 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
482 * This is called when relaxing access to a hugepage. It's also called in the page
483 * fault path when we don't hit any of the major fault cases, ie, a minor
484 * update of _PAGE_ACCESSED, _PAGE_DIRTY, etc... The generic code will have
485 * handled those two for us, we additionally deal with missing execute
486 * permission here on some processors
488 int pmdp_set_access_flags(struct vm_area_struct *vma, unsigned long address,
489 pmd_t *pmdp, pmd_t entry, int dirty)
492 #ifdef CONFIG_DEBUG_VM
493 WARN_ON(!pmd_trans_huge(*pmdp));
494 assert_spin_locked(&vma->vm_mm->page_table_lock);
496 changed = !pmd_same(*(pmdp), entry);
498 __ptep_set_access_flags(pmdp_ptep(pmdp), pmd_pte(entry));
500 * Since we are not supporting SW TLB systems, we don't
501 * have any thing similar to flush_tlb_page_nohash()
507 unsigned long pmd_hugepage_update(struct mm_struct *mm, unsigned long addr,
508 pmd_t *pmdp, unsigned long clr,
512 unsigned long old, tmp;
514 #ifdef CONFIG_DEBUG_VM
515 WARN_ON(!pmd_trans_huge(*pmdp));
516 assert_spin_locked(&mm->page_table_lock);
519 #ifdef PTE_ATOMIC_UPDATES
520 __asm__ __volatile__(
528 : "=&r" (old), "=&r" (tmp), "=m" (*pmdp)
529 : "r" (pmdp), "r" (clr), "m" (*pmdp), "i" (_PAGE_BUSY), "r" (set)
532 old = pmd_val(*pmdp);
533 *pmdp = __pmd((old & ~clr) | set);
535 trace_hugepage_update(addr, old, clr, set);
536 if (old & _PAGE_HASHPTE)
537 hpte_do_hugepage_flush(mm, addr, pmdp, old);
541 pmd_t pmdp_clear_flush(struct vm_area_struct *vma, unsigned long address,
546 VM_BUG_ON(address & ~HPAGE_PMD_MASK);
547 if (pmd_trans_huge(*pmdp)) {
548 pmd = pmdp_get_and_clear(vma->vm_mm, address, pmdp);
551 * khugepaged calls this for normal pmd
556 * Wait for all pending hash_page to finish. This is needed
557 * in case of subpage collapse. When we collapse normal pages
558 * to hugepage, we first clear the pmd, then invalidate all
559 * the PTE entries. The assumption here is that any low level
560 * page fault will see a none pmd and take the slow path that
561 * will wait on mmap_sem. But we could very well be in a
562 * hash_page with local ptep pointer value. Such a hash page
563 * can result in adding new HPTE entries for normal subpages.
564 * That means we could be modifying the page content as we
565 * copy them to a huge page. So wait for parallel hash_page
566 * to finish before invalidating HPTE entries. We can do this
567 * by sending an IPI to all the cpus and executing a dummy
570 kick_all_cpus_sync();
572 * Now invalidate the hpte entries in the range
573 * covered by pmd. This make sure we take a
574 * fault and will find the pmd as none, which will
575 * result in a major fault which takes mmap_sem and
576 * hence wait for collapse to complete. Without this
577 * the __collapse_huge_page_copy can result in copying
580 flush_tlb_pmd_range(vma->vm_mm, &pmd, address);
585 int pmdp_test_and_clear_young(struct vm_area_struct *vma,
586 unsigned long address, pmd_t *pmdp)
588 return __pmdp_test_and_clear_young(vma->vm_mm, address, pmdp);
592 * We currently remove entries from the hashtable regardless of whether
593 * the entry was young or dirty. The generic routines only flush if the
594 * entry was young or dirty which is not good enough.
596 * We should be more intelligent about this but for the moment we override
597 * these functions and force a tlb flush unconditionally
599 int pmdp_clear_flush_young(struct vm_area_struct *vma,
600 unsigned long address, pmd_t *pmdp)
602 return __pmdp_test_and_clear_young(vma->vm_mm, address, pmdp);
606 * We mark the pmd splitting and invalidate all the hpte
607 * entries for this hugepage.
609 void pmdp_splitting_flush(struct vm_area_struct *vma,
610 unsigned long address, pmd_t *pmdp)
612 unsigned long old, tmp;
614 VM_BUG_ON(address & ~HPAGE_PMD_MASK);
616 #ifdef CONFIG_DEBUG_VM
617 WARN_ON(!pmd_trans_huge(*pmdp));
618 assert_spin_locked(&vma->vm_mm->page_table_lock);
621 #ifdef PTE_ATOMIC_UPDATES
623 __asm__ __volatile__(
630 : "=&r" (old), "=&r" (tmp), "=m" (*pmdp)
631 : "r" (pmdp), "i" (_PAGE_SPLITTING), "m" (*pmdp), "i" (_PAGE_BUSY)
634 old = pmd_val(*pmdp);
635 *pmdp = __pmd(old | _PAGE_SPLITTING);
638 * If we didn't had the splitting flag set, go and flush the
641 trace_hugepage_splitting(address, old);
642 if (!(old & _PAGE_SPLITTING)) {
643 /* We need to flush the hpte */
644 if (old & _PAGE_HASHPTE)
645 hpte_do_hugepage_flush(vma->vm_mm, address, pmdp, old);
648 * This ensures that generic code that rely on IRQ disabling
649 * to prevent a parallel THP split work as expected.
651 kick_all_cpus_sync();
655 * We want to put the pgtable in pmd and use pgtable for tracking
656 * the base page size hptes
658 void pgtable_trans_huge_deposit(struct mm_struct *mm, pmd_t *pmdp,
661 pgtable_t *pgtable_slot;
662 assert_spin_locked(&mm->page_table_lock);
664 * we store the pgtable in the second half of PMD
666 pgtable_slot = (pgtable_t *)pmdp + PTRS_PER_PMD;
667 *pgtable_slot = pgtable;
669 * expose the deposited pgtable to other cpus.
670 * before we set the hugepage PTE at pmd level
671 * hash fault code looks at the deposted pgtable
672 * to store hash index values.
677 pgtable_t pgtable_trans_huge_withdraw(struct mm_struct *mm, pmd_t *pmdp)
680 pgtable_t *pgtable_slot;
682 assert_spin_locked(&mm->page_table_lock);
683 pgtable_slot = (pgtable_t *)pmdp + PTRS_PER_PMD;
684 pgtable = *pgtable_slot;
686 * Once we withdraw, mark the entry NULL.
688 *pgtable_slot = NULL;
690 * We store HPTE information in the deposited PTE fragment.
691 * zero out the content on withdraw.
693 memset(pgtable, 0, PTE_FRAG_SIZE);
698 * set a new huge pmd. We should not be called for updating
699 * an existing pmd entry. That should go via pmd_hugepage_update.
701 void set_pmd_at(struct mm_struct *mm, unsigned long addr,
702 pmd_t *pmdp, pmd_t pmd)
704 #ifdef CONFIG_DEBUG_VM
705 WARN_ON(pmd_val(*pmdp) & _PAGE_PRESENT);
706 assert_spin_locked(&mm->page_table_lock);
707 WARN_ON(!pmd_trans_huge(pmd));
709 trace_hugepage_set_pmd(addr, pmd);
710 return set_pte_at(mm, addr, pmdp_ptep(pmdp), pmd_pte(pmd));
713 void pmdp_invalidate(struct vm_area_struct *vma, unsigned long address,
716 pmd_hugepage_update(vma->vm_mm, address, pmdp, _PAGE_PRESENT, 0);
720 * A linux hugepage PMD was changed and the corresponding hash table entries
721 * neesd to be flushed.
723 void hpte_do_hugepage_flush(struct mm_struct *mm, unsigned long addr,
724 pmd_t *pmdp, unsigned long old_pmd)
727 unsigned long s_addr;
729 unsigned int psize, valid;
730 unsigned char *hpte_slot_array;
731 unsigned long hidx, vpn, vsid, hash, shift, slot;
734 * Flush all the hptes mapping this hugepage
736 s_addr = addr & HPAGE_PMD_MASK;
737 hpte_slot_array = get_hpte_slot_array(pmdp);
739 * IF we try to do a HUGE PTE update after a withdraw is done.
740 * we will find the below NULL. This happens when we do
741 * split_huge_page_pmd
743 if (!hpte_slot_array)
746 /* get the base page size,vsid and segment size */
747 #ifdef CONFIG_DEBUG_VM
748 psize = get_slice_psize(mm, s_addr);
749 BUG_ON(psize == MMU_PAGE_16M);
751 if (old_pmd & _PAGE_COMBO)
754 psize = MMU_PAGE_64K;
756 if (!is_kernel_addr(s_addr)) {
757 ssize = user_segment_size(s_addr);
758 vsid = get_vsid(mm->context.id, s_addr, ssize);
761 vsid = get_kernel_vsid(s_addr, mmu_kernel_ssize);
762 ssize = mmu_kernel_ssize;
765 if (ppc_md.hugepage_invalidate)
766 return ppc_md.hugepage_invalidate(vsid, s_addr,
770 * No bluk hpte removal support, invalidate each entry
772 shift = mmu_psize_defs[psize].shift;
773 max_hpte_count = HPAGE_PMD_SIZE >> shift;
774 for (i = 0; i < max_hpte_count; i++) {
776 * 8 bits per each hpte entries
777 * 000| [ secondary group (one bit) | hidx (3 bits) | valid bit]
779 valid = hpte_valid(hpte_slot_array, i);
782 hidx = hpte_hash_index(hpte_slot_array, i);
785 addr = s_addr + (i * (1ul << shift));
786 vpn = hpt_vpn(addr, vsid, ssize);
787 hash = hpt_hash(vpn, shift, ssize);
788 if (hidx & _PTEIDX_SECONDARY)
791 slot = (hash & htab_hash_mask) * HPTES_PER_GROUP;
792 slot += hidx & _PTEIDX_GROUP_IX;
793 ppc_md.hpte_invalidate(slot, vpn, psize,
794 MMU_PAGE_16M, ssize, 0);
798 static pmd_t pmd_set_protbits(pmd_t pmd, pgprot_t pgprot)
800 pmd_val(pmd) |= pgprot_val(pgprot);
804 pmd_t pfn_pmd(unsigned long pfn, pgprot_t pgprot)
808 * For a valid pte, we would have _PAGE_PRESENT or _PAGE_FILE always
809 * set. We use this to check THP page at pmd level.
810 * leaf pte for huge page, bottom two bits != 00
812 pmd_val(pmd) = pfn << PTE_RPN_SHIFT;
813 pmd_val(pmd) |= _PAGE_THP_HUGE;
814 pmd = pmd_set_protbits(pmd, pgprot);
818 pmd_t mk_pmd(struct page *page, pgprot_t pgprot)
820 return pfn_pmd(page_to_pfn(page), pgprot);
823 pmd_t pmd_modify(pmd_t pmd, pgprot_t newprot)
826 pmd_val(pmd) &= _HPAGE_CHG_MASK;
827 pmd = pmd_set_protbits(pmd, newprot);
832 * This is called at the end of handling a user page fault, when the
833 * fault has been handled by updating a HUGE PMD entry in the linux page tables.
834 * We use it to preload an HPTE into the hash table corresponding to
835 * the updated linux HUGE PMD entry.
837 void update_mmu_cache_pmd(struct vm_area_struct *vma, unsigned long addr,
843 pmd_t pmdp_get_and_clear(struct mm_struct *mm,
844 unsigned long addr, pmd_t *pmdp)
849 pgtable_t *pgtable_slot;
851 old = pmd_hugepage_update(mm, addr, pmdp, ~0UL, 0);
852 old_pmd = __pmd(old);
854 * We have pmd == none and we are holding page_table_lock.
855 * So we can safely go and clear the pgtable hash
858 pgtable_slot = (pgtable_t *)pmdp + PTRS_PER_PMD;
859 pgtable = *pgtable_slot;
861 * Let's zero out old valid and hash index details
862 * hash fault look at them.
864 memset(pgtable, 0, PTE_FRAG_SIZE);
868 int has_transparent_hugepage(void)
870 if (!mmu_has_feature(MMU_FTR_16M_PAGE))
873 * We support THP only if PMD_SIZE is 16MB.
875 if (mmu_psize_defs[MMU_PAGE_16M].shift != PMD_SHIFT)
878 * We need to make sure that we support 16MB hugepage in a segement
879 * with base page size 64K or 4K. We only enable THP with a PAGE_SIZE
883 * If we have 64K HPTE, we will be using that by default
885 if (mmu_psize_defs[MMU_PAGE_64K].shift &&
886 (mmu_psize_defs[MMU_PAGE_64K].penc[MMU_PAGE_16M] == -1))
889 * Ok we only have 4K HPTE
891 if (mmu_psize_defs[MMU_PAGE_4K].penc[MMU_PAGE_16M] == -1)
896 #endif /* CONFIG_TRANSPARENT_HUGEPAGE */