2 * PPC Huge TLB Page Support for Kernel.
4 * Copyright (C) 2003 David Gibson, IBM Corporation.
5 * Copyright (C) 2011 Becky Bruce, Freescale Semiconductor
7 * Based on the IA-32 version:
8 * Copyright (C) 2002, Rohit Seth <rohit.seth@intel.com>
13 #include <linux/slab.h>
14 #include <linux/hugetlb.h>
15 #include <linux/export.h>
16 #include <linux/of_fdt.h>
17 #include <linux/memblock.h>
18 #include <linux/bootmem.h>
19 #include <linux/moduleparam.h>
20 #include <asm/pgtable.h>
21 #include <asm/pgalloc.h>
23 #include <asm/setup.h>
24 #include <asm/hugetlb.h>
26 #ifdef CONFIG_HUGETLB_PAGE
28 #define PAGE_SHIFT_64K 16
29 #define PAGE_SHIFT_16M 24
30 #define PAGE_SHIFT_16G 34
32 unsigned int HPAGE_SHIFT;
35 * Tracks gpages after the device tree is scanned and before the
36 * huge_boot_pages list is ready. On non-Freescale implementations, this is
37 * just used to track 16G pages and so is a single array. FSL-based
38 * implementations may have more than one gpage size, so we need multiple
41 #ifdef CONFIG_PPC_FSL_BOOK3E
42 #define MAX_NUMBER_GPAGES 128
44 u64 gpage_list[MAX_NUMBER_GPAGES];
45 unsigned int nr_gpages;
47 static struct psize_gpages gpage_freearray[MMU_PAGE_COUNT];
49 #define MAX_NUMBER_GPAGES 1024
50 static u64 gpage_freearray[MAX_NUMBER_GPAGES];
51 static unsigned nr_gpages;
54 #define hugepd_none(hpd) ((hpd).pd == 0)
56 #ifdef CONFIG_PPC_BOOK3S_64
58 * At this point we do the placement change only for BOOK3S 64. This would
59 * possibly work on other subarchs.
63 * We have PGD_INDEX_SIZ = 12 and PTE_INDEX_SIZE = 8, so that we can have
64 * 16GB hugepage pte in PGD and 16MB hugepage pte at PMD;
66 * Defined in such a way that we can optimize away code block at build time
67 * if CONFIG_HUGETLB_PAGE=n.
69 int pmd_huge(pmd_t pmd)
72 * leaf pte for huge page, bottom two bits != 00
74 return ((pmd_val(pmd) & 0x3) != 0x0);
77 int pud_huge(pud_t pud)
80 * leaf pte for huge page, bottom two bits != 00
82 return ((pud_val(pud) & 0x3) != 0x0);
85 int pgd_huge(pgd_t pgd)
88 * leaf pte for huge page, bottom two bits != 00
90 return ((pgd_val(pgd) & 0x3) != 0x0);
93 int pmd_huge(pmd_t pmd)
98 int pud_huge(pud_t pud)
103 int pgd_huge(pgd_t pgd)
109 pte_t *huge_pte_offset(struct mm_struct *mm, unsigned long addr)
111 /* Only called for hugetlbfs pages, hence can ignore THP */
112 return find_linux_pte_or_hugepte(mm->pgd, addr, NULL);
115 static int __hugepte_alloc(struct mm_struct *mm, hugepd_t *hpdp,
116 unsigned long address, unsigned pdshift, unsigned pshift)
118 struct kmem_cache *cachep;
121 #ifdef CONFIG_PPC_FSL_BOOK3E
123 int num_hugepd = 1 << (pshift - pdshift);
124 cachep = hugepte_cache;
126 cachep = PGT_CACHE(pdshift - pshift);
129 new = kmem_cache_zalloc(cachep, GFP_KERNEL|__GFP_REPEAT);
131 BUG_ON(pshift > HUGEPD_SHIFT_MASK);
132 BUG_ON((unsigned long)new & HUGEPD_SHIFT_MASK);
137 spin_lock(&mm->page_table_lock);
138 #ifdef CONFIG_PPC_FSL_BOOK3E
140 * We have multiple higher-level entries that point to the same
141 * actual pte location. Fill in each as we go and backtrack on error.
142 * We need all of these so the DTLB pgtable walk code can find the
143 * right higher-level entry without knowing if it's a hugepage or not.
145 for (i = 0; i < num_hugepd; i++, hpdp++) {
146 if (unlikely(!hugepd_none(*hpdp)))
149 /* We use the old format for PPC_FSL_BOOK3E */
150 hpdp->pd = ((unsigned long)new & ~PD_HUGE) | pshift;
152 /* If we bailed from the for loop early, an error occurred, clean up */
153 if (i < num_hugepd) {
154 for (i = i - 1 ; i >= 0; i--, hpdp--)
156 kmem_cache_free(cachep, new);
159 if (!hugepd_none(*hpdp))
160 kmem_cache_free(cachep, new);
162 #ifdef CONFIG_PPC_BOOK3S_64
163 hpdp->pd = (unsigned long)new |
164 (shift_to_mmu_psize(pshift) << 2);
166 hpdp->pd = ((unsigned long)new & ~PD_HUGE) | pshift;
170 spin_unlock(&mm->page_table_lock);
175 * These macros define how to determine which level of the page table holds
178 #ifdef CONFIG_PPC_FSL_BOOK3E
179 #define HUGEPD_PGD_SHIFT PGDIR_SHIFT
180 #define HUGEPD_PUD_SHIFT PUD_SHIFT
182 #define HUGEPD_PGD_SHIFT PUD_SHIFT
183 #define HUGEPD_PUD_SHIFT PMD_SHIFT
186 #ifdef CONFIG_PPC_BOOK3S_64
188 * At this point we do the placement change only for BOOK3S 64. This would
189 * possibly work on other subarchs.
191 pte_t *huge_pte_alloc(struct mm_struct *mm, unsigned long addr, unsigned long sz)
196 hugepd_t *hpdp = NULL;
197 unsigned pshift = __ffs(sz);
198 unsigned pdshift = PGDIR_SHIFT;
201 pg = pgd_offset(mm, addr);
203 if (pshift == PGDIR_SHIFT)
206 else if (pshift > PUD_SHIFT)
208 * We need to use hugepd table
210 hpdp = (hugepd_t *)pg;
213 pu = pud_alloc(mm, pg, addr);
214 if (pshift == PUD_SHIFT)
216 else if (pshift > PMD_SHIFT)
217 hpdp = (hugepd_t *)pu;
220 pm = pmd_alloc(mm, pu, addr);
221 if (pshift == PMD_SHIFT)
225 hpdp = (hugepd_t *)pm;
231 BUG_ON(!hugepd_none(*hpdp) && !hugepd_ok(*hpdp));
233 if (hugepd_none(*hpdp) && __hugepte_alloc(mm, hpdp, addr, pdshift, pshift))
236 return hugepte_offset(*hpdp, addr, pdshift);
241 pte_t *huge_pte_alloc(struct mm_struct *mm, unsigned long addr, unsigned long sz)
246 hugepd_t *hpdp = NULL;
247 unsigned pshift = __ffs(sz);
248 unsigned pdshift = PGDIR_SHIFT;
252 pg = pgd_offset(mm, addr);
254 if (pshift >= HUGEPD_PGD_SHIFT) {
255 hpdp = (hugepd_t *)pg;
258 pu = pud_alloc(mm, pg, addr);
259 if (pshift >= HUGEPD_PUD_SHIFT) {
260 hpdp = (hugepd_t *)pu;
263 pm = pmd_alloc(mm, pu, addr);
264 hpdp = (hugepd_t *)pm;
271 BUG_ON(!hugepd_none(*hpdp) && !hugepd_ok(*hpdp));
273 if (hugepd_none(*hpdp) && __hugepte_alloc(mm, hpdp, addr, pdshift, pshift))
276 return hugepte_offset(*hpdp, addr, pdshift);
280 #ifdef CONFIG_PPC_FSL_BOOK3E
281 /* Build list of addresses of gigantic pages. This function is used in early
282 * boot before the buddy allocator is setup.
284 void add_gpage(u64 addr, u64 page_size, unsigned long number_of_pages)
286 unsigned int idx = shift_to_mmu_psize(__ffs(page_size));
292 gpage_freearray[idx].nr_gpages = number_of_pages;
294 for (i = 0; i < number_of_pages; i++) {
295 gpage_freearray[idx].gpage_list[i] = addr;
301 * Moves the gigantic page addresses from the temporary list to the
302 * huge_boot_pages list.
304 int alloc_bootmem_huge_page(struct hstate *hstate)
306 struct huge_bootmem_page *m;
307 int idx = shift_to_mmu_psize(huge_page_shift(hstate));
308 int nr_gpages = gpage_freearray[idx].nr_gpages;
313 #ifdef CONFIG_HIGHMEM
315 * If gpages can be in highmem we can't use the trick of storing the
316 * data structure in the page; allocate space for this
318 m = memblock_virt_alloc(sizeof(struct huge_bootmem_page), 0);
319 m->phys = gpage_freearray[idx].gpage_list[--nr_gpages];
321 m = phys_to_virt(gpage_freearray[idx].gpage_list[--nr_gpages]);
324 list_add(&m->list, &huge_boot_pages);
325 gpage_freearray[idx].nr_gpages = nr_gpages;
326 gpage_freearray[idx].gpage_list[nr_gpages] = 0;
332 * Scan the command line hugepagesz= options for gigantic pages; store those in
333 * a list that we use to allocate the memory once all options are parsed.
336 unsigned long gpage_npages[MMU_PAGE_COUNT];
338 static int __init do_gpage_early_setup(char *param, char *val,
341 static phys_addr_t size;
342 unsigned long npages;
345 * The hugepagesz and hugepages cmdline options are interleaved. We
346 * use the size variable to keep track of whether or not this was done
347 * properly and skip over instances where it is incorrect. Other
348 * command-line parsing code will issue warnings, so we don't need to.
351 if ((strcmp(param, "default_hugepagesz") == 0) ||
352 (strcmp(param, "hugepagesz") == 0)) {
353 size = memparse(val, NULL);
354 } else if (strcmp(param, "hugepages") == 0) {
356 if (sscanf(val, "%lu", &npages) <= 0)
358 gpage_npages[shift_to_mmu_psize(__ffs(size))] = npages;
367 * This function allocates physical space for pages that are larger than the
368 * buddy allocator can handle. We want to allocate these in highmem because
369 * the amount of lowmem is limited. This means that this function MUST be
370 * called before lowmem_end_addr is set up in MMU_init() in order for the lmb
371 * allocate to grab highmem.
373 void __init reserve_hugetlb_gpages(void)
375 static __initdata char cmdline[COMMAND_LINE_SIZE];
376 phys_addr_t size, base;
379 strlcpy(cmdline, boot_command_line, COMMAND_LINE_SIZE);
380 parse_args("hugetlb gpages", cmdline, NULL, 0, 0, 0,
381 &do_gpage_early_setup);
384 * Walk gpage list in reverse, allocating larger page sizes first.
385 * Skip over unsupported sizes, or sizes that have 0 gpages allocated.
386 * When we reach the point in the list where pages are no longer
387 * considered gpages, we're done.
389 for (i = MMU_PAGE_COUNT-1; i >= 0; i--) {
390 if (mmu_psize_defs[i].shift == 0 || gpage_npages[i] == 0)
392 else if (mmu_psize_to_shift(i) < (MAX_ORDER + PAGE_SHIFT))
395 size = (phys_addr_t)(1ULL << mmu_psize_to_shift(i));
396 base = memblock_alloc_base(size * gpage_npages[i], size,
397 MEMBLOCK_ALLOC_ANYWHERE);
398 add_gpage(base, size, gpage_npages[i]);
402 #else /* !PPC_FSL_BOOK3E */
404 /* Build list of addresses of gigantic pages. This function is used in early
405 * boot before the buddy allocator is setup.
407 void add_gpage(u64 addr, u64 page_size, unsigned long number_of_pages)
411 while (number_of_pages > 0) {
412 gpage_freearray[nr_gpages] = addr;
419 /* Moves the gigantic page addresses from the temporary list to the
420 * huge_boot_pages list.
422 int alloc_bootmem_huge_page(struct hstate *hstate)
424 struct huge_bootmem_page *m;
427 m = phys_to_virt(gpage_freearray[--nr_gpages]);
428 gpage_freearray[nr_gpages] = 0;
429 list_add(&m->list, &huge_boot_pages);
435 int huge_pmd_unshare(struct mm_struct *mm, unsigned long *addr, pte_t *ptep)
440 #ifdef CONFIG_PPC_FSL_BOOK3E
441 #define HUGEPD_FREELIST_SIZE \
442 ((PAGE_SIZE - sizeof(struct hugepd_freelist)) / sizeof(pte_t))
444 struct hugepd_freelist {
450 static DEFINE_PER_CPU(struct hugepd_freelist *, hugepd_freelist_cur);
452 static void hugepd_free_rcu_callback(struct rcu_head *head)
454 struct hugepd_freelist *batch =
455 container_of(head, struct hugepd_freelist, rcu);
458 for (i = 0; i < batch->index; i++)
459 kmem_cache_free(hugepte_cache, batch->ptes[i]);
461 free_page((unsigned long)batch);
464 static void hugepd_free(struct mmu_gather *tlb, void *hugepte)
466 struct hugepd_freelist **batchp;
468 batchp = this_cpu_ptr(&hugepd_freelist_cur);
470 if (atomic_read(&tlb->mm->mm_users) < 2 ||
471 cpumask_equal(mm_cpumask(tlb->mm),
472 cpumask_of(smp_processor_id()))) {
473 kmem_cache_free(hugepte_cache, hugepte);
474 put_cpu_var(hugepd_freelist_cur);
478 if (*batchp == NULL) {
479 *batchp = (struct hugepd_freelist *)__get_free_page(GFP_ATOMIC);
480 (*batchp)->index = 0;
483 (*batchp)->ptes[(*batchp)->index++] = hugepte;
484 if ((*batchp)->index == HUGEPD_FREELIST_SIZE) {
485 call_rcu_sched(&(*batchp)->rcu, hugepd_free_rcu_callback);
488 put_cpu_var(hugepd_freelist_cur);
492 static void free_hugepd_range(struct mmu_gather *tlb, hugepd_t *hpdp, int pdshift,
493 unsigned long start, unsigned long end,
494 unsigned long floor, unsigned long ceiling)
496 pte_t *hugepte = hugepd_page(*hpdp);
499 unsigned long pdmask = ~((1UL << pdshift) - 1);
500 unsigned int num_hugepd = 1;
502 #ifdef CONFIG_PPC_FSL_BOOK3E
503 /* Note: On fsl the hpdp may be the first of several */
504 num_hugepd = (1 << (hugepd_shift(*hpdp) - pdshift));
506 unsigned int shift = hugepd_shift(*hpdp);
517 if (end - 1 > ceiling - 1)
520 for (i = 0; i < num_hugepd; i++, hpdp++)
525 #ifdef CONFIG_PPC_FSL_BOOK3E
526 hugepd_free(tlb, hugepte);
528 pgtable_free_tlb(tlb, hugepte, pdshift - shift);
532 static void hugetlb_free_pmd_range(struct mmu_gather *tlb, pud_t *pud,
533 unsigned long addr, unsigned long end,
534 unsigned long floor, unsigned long ceiling)
542 pmd = pmd_offset(pud, addr);
543 next = pmd_addr_end(addr, end);
544 if (!is_hugepd(__hugepd(pmd_val(*pmd)))) {
546 * if it is not hugepd pointer, we should already find
549 WARN_ON(!pmd_none_or_clear_bad(pmd));
552 #ifdef CONFIG_PPC_FSL_BOOK3E
554 * Increment next by the size of the huge mapping since
555 * there may be more than one entry at this level for a
556 * single hugepage, but all of them point to
557 * the same kmem cache that holds the hugepte.
559 next = addr + (1 << hugepd_shift(*(hugepd_t *)pmd));
561 free_hugepd_range(tlb, (hugepd_t *)pmd, PMD_SHIFT,
562 addr, next, floor, ceiling);
563 } while (addr = next, addr != end);
573 if (end - 1 > ceiling - 1)
576 pmd = pmd_offset(pud, start);
578 pmd_free_tlb(tlb, pmd, start);
581 static void hugetlb_free_pud_range(struct mmu_gather *tlb, pgd_t *pgd,
582 unsigned long addr, unsigned long end,
583 unsigned long floor, unsigned long ceiling)
591 pud = pud_offset(pgd, addr);
592 next = pud_addr_end(addr, end);
593 if (!is_hugepd(__hugepd(pud_val(*pud)))) {
594 if (pud_none_or_clear_bad(pud))
596 hugetlb_free_pmd_range(tlb, pud, addr, next, floor,
599 #ifdef CONFIG_PPC_FSL_BOOK3E
601 * Increment next by the size of the huge mapping since
602 * there may be more than one entry at this level for a
603 * single hugepage, but all of them point to
604 * the same kmem cache that holds the hugepte.
606 next = addr + (1 << hugepd_shift(*(hugepd_t *)pud));
608 free_hugepd_range(tlb, (hugepd_t *)pud, PUD_SHIFT,
609 addr, next, floor, ceiling);
611 } while (addr = next, addr != end);
617 ceiling &= PGDIR_MASK;
621 if (end - 1 > ceiling - 1)
624 pud = pud_offset(pgd, start);
626 pud_free_tlb(tlb, pud, start);
630 * This function frees user-level page tables of a process.
632 void hugetlb_free_pgd_range(struct mmu_gather *tlb,
633 unsigned long addr, unsigned long end,
634 unsigned long floor, unsigned long ceiling)
640 * Because there are a number of different possible pagetable
641 * layouts for hugepage ranges, we limit knowledge of how
642 * things should be laid out to the allocation path
643 * (huge_pte_alloc(), above). Everything else works out the
644 * structure as it goes from information in the hugepd
645 * pointers. That means that we can't here use the
646 * optimization used in the normal page free_pgd_range(), of
647 * checking whether we're actually covering a large enough
648 * range to have to do anything at the top level of the walk
649 * instead of at the bottom.
651 * To make sense of this, you should probably go read the big
652 * block comment at the top of the normal free_pgd_range(),
657 next = pgd_addr_end(addr, end);
658 pgd = pgd_offset(tlb->mm, addr);
659 if (!is_hugepd(__hugepd(pgd_val(*pgd)))) {
660 if (pgd_none_or_clear_bad(pgd))
662 hugetlb_free_pud_range(tlb, pgd, addr, next, floor, ceiling);
664 #ifdef CONFIG_PPC_FSL_BOOK3E
666 * Increment next by the size of the huge mapping since
667 * there may be more than one entry at the pgd level
668 * for a single hugepage, but all of them point to the
669 * same kmem cache that holds the hugepte.
671 next = addr + (1 << hugepd_shift(*(hugepd_t *)pgd));
673 free_hugepd_range(tlb, (hugepd_t *)pgd, PGDIR_SHIFT,
674 addr, next, floor, ceiling);
676 } while (addr = next, addr != end);
680 follow_huge_addr(struct mm_struct *mm, unsigned long address, int write)
687 * Transparent hugepages are handled by generic code. We can skip them
690 ptep = find_linux_pte_or_hugepte(mm->pgd, address, &shift);
692 /* Verify it is a huge page else bail. */
693 if (!ptep || !shift || pmd_trans_huge(*(pmd_t *)ptep))
694 return ERR_PTR(-EINVAL);
696 mask = (1UL << shift) - 1;
697 page = pte_page(*ptep);
699 page += (address & mask) / PAGE_SIZE;
705 follow_huge_pmd(struct mm_struct *mm, unsigned long address,
706 pmd_t *pmd, int write)
712 static unsigned long hugepte_addr_end(unsigned long addr, unsigned long end,
715 unsigned long __boundary = (addr + sz) & ~(sz-1);
716 return (__boundary - 1 < end - 1) ? __boundary : end;
719 int gup_huge_pd(hugepd_t hugepd, unsigned long addr, unsigned pdshift,
720 unsigned long end, int write, struct page **pages, int *nr)
723 unsigned long sz = 1UL << hugepd_shift(hugepd);
726 ptep = hugepte_offset(hugepd, addr, pdshift);
728 next = hugepte_addr_end(addr, end, sz);
729 if (!gup_hugepte(ptep, sz, addr, end, write, pages, nr))
731 } while (ptep++, addr = next, addr != end);
736 #ifdef CONFIG_PPC_MM_SLICES
737 unsigned long hugetlb_get_unmapped_area(struct file *file, unsigned long addr,
738 unsigned long len, unsigned long pgoff,
741 struct hstate *hstate = hstate_file(file);
742 int mmu_psize = shift_to_mmu_psize(huge_page_shift(hstate));
744 return slice_get_unmapped_area(addr, len, flags, mmu_psize, 1);
748 unsigned long vma_mmu_pagesize(struct vm_area_struct *vma)
750 #ifdef CONFIG_PPC_MM_SLICES
751 unsigned int psize = get_slice_psize(vma->vm_mm, vma->vm_start);
753 return 1UL << mmu_psize_to_shift(psize);
755 if (!is_vm_hugetlb_page(vma))
758 return huge_page_size(hstate_vma(vma));
762 static inline bool is_power_of_4(unsigned long x)
764 if (is_power_of_2(x))
765 return (__ilog2(x) % 2) ? false : true;
769 static int __init add_huge_page_size(unsigned long long size)
771 int shift = __ffs(size);
774 /* Check that it is a page size supported by the hardware and
775 * that it fits within pagetable and slice limits. */
776 #ifdef CONFIG_PPC_FSL_BOOK3E
777 if ((size < PAGE_SIZE) || !is_power_of_4(size))
780 if (!is_power_of_2(size)
781 || (shift > SLICE_HIGH_SHIFT) || (shift <= PAGE_SHIFT))
785 if ((mmu_psize = shift_to_mmu_psize(shift)) < 0)
788 #ifdef CONFIG_SPU_FS_64K_LS
789 /* Disable support for 64K huge pages when 64K SPU local store
790 * support is enabled as the current implementation conflicts.
792 if (shift == PAGE_SHIFT_64K)
794 #endif /* CONFIG_SPU_FS_64K_LS */
796 BUG_ON(mmu_psize_defs[mmu_psize].shift != shift);
798 /* Return if huge page size has already been setup */
799 if (size_to_hstate(size))
802 hugetlb_add_hstate(shift - PAGE_SHIFT);
807 static int __init hugepage_setup_sz(char *str)
809 unsigned long long size;
811 size = memparse(str, &str);
813 if (add_huge_page_size(size) != 0)
814 printk(KERN_WARNING "Invalid huge page size specified(%llu)\n", size);
818 __setup("hugepagesz=", hugepage_setup_sz);
820 #ifdef CONFIG_PPC_FSL_BOOK3E
821 struct kmem_cache *hugepte_cache;
822 static int __init hugetlbpage_init(void)
826 for (psize = 0; psize < MMU_PAGE_COUNT; ++psize) {
829 if (!mmu_psize_defs[psize].shift)
832 shift = mmu_psize_to_shift(psize);
834 /* Don't treat normal page sizes as huge... */
835 if (shift != PAGE_SHIFT)
836 if (add_huge_page_size(1ULL << shift) < 0)
841 * Create a kmem cache for hugeptes. The bottom bits in the pte have
842 * size information encoded in them, so align them to allow this
844 hugepte_cache = kmem_cache_create("hugepte-cache", sizeof(pte_t),
845 HUGEPD_SHIFT_MASK + 1, 0, NULL);
846 if (hugepte_cache == NULL)
847 panic("%s: Unable to create kmem cache for hugeptes\n",
850 /* Default hpage size = 4M */
851 if (mmu_psize_defs[MMU_PAGE_4M].shift)
852 HPAGE_SHIFT = mmu_psize_defs[MMU_PAGE_4M].shift;
854 panic("%s: Unable to set default huge page size\n", __func__);
860 static int __init hugetlbpage_init(void)
864 if (!mmu_has_feature(MMU_FTR_16M_PAGE))
867 for (psize = 0; psize < MMU_PAGE_COUNT; ++psize) {
871 if (!mmu_psize_defs[psize].shift)
874 shift = mmu_psize_to_shift(psize);
876 if (add_huge_page_size(1ULL << shift) < 0)
879 if (shift < PMD_SHIFT)
881 else if (shift < PUD_SHIFT)
884 pdshift = PGDIR_SHIFT;
886 * if we have pdshift and shift value same, we don't
887 * use pgt cache for hugepd.
889 if (pdshift != shift) {
890 pgtable_cache_add(pdshift - shift, NULL);
891 if (!PGT_CACHE(pdshift - shift))
892 panic("hugetlbpage_init(): could not create "
893 "pgtable cache for %d bit pagesize\n", shift);
897 /* Set default large page size. Currently, we pick 16M or 1M
898 * depending on what is available
900 if (mmu_psize_defs[MMU_PAGE_16M].shift)
901 HPAGE_SHIFT = mmu_psize_defs[MMU_PAGE_16M].shift;
902 else if (mmu_psize_defs[MMU_PAGE_1M].shift)
903 HPAGE_SHIFT = mmu_psize_defs[MMU_PAGE_1M].shift;
908 module_init(hugetlbpage_init);
910 void flush_dcache_icache_hugepage(struct page *page)
915 BUG_ON(!PageCompound(page));
917 for (i = 0; i < (1UL << compound_order(page)); i++) {
918 if (!PageHighMem(page)) {
919 __flush_dcache_icache(page_address(page+i));
921 start = kmap_atomic(page+i);
922 __flush_dcache_icache(start);
923 kunmap_atomic(start);
928 #endif /* CONFIG_HUGETLB_PAGE */
931 * We have 4 cases for pgds and pmds:
932 * (1) invalid (all zeroes)
933 * (2) pointer to next table, as normal; bottom 6 bits == 0
934 * (3) leaf pte for huge page, bottom two bits != 00
935 * (4) hugepd pointer, bottom two bits == 00, next 4 bits indicate size of table
937 * So long as we atomically load page table pointers we are safe against teardown,
938 * we can follow the address down to the the page and take a ref on it.
941 pte_t *find_linux_pte_or_hugepte(pgd_t *pgdir, unsigned long ea, unsigned *shift)
947 hugepd_t *hpdp = NULL;
948 unsigned pdshift = PGDIR_SHIFT;
953 pgdp = pgdir + pgd_index(ea);
954 pgd = ACCESS_ONCE(*pgdp);
956 * Always operate on the local stack value. This make sure the
957 * value don't get updated by a parallel THP split/collapse,
958 * page fault or a page unmap. The return pte_t * is still not
959 * stable. So should be checked there for above conditions.
963 else if (pgd_huge(pgd)) {
964 ret_pte = (pte_t *) pgdp;
966 } else if (is_hugepd(__hugepd(pgd_val(pgd))))
967 hpdp = (hugepd_t *)&pgd;
970 * Even if we end up with an unmap, the pgtable will not
971 * be freed, because we do an rcu free and here we are
975 pudp = pud_offset(&pgd, ea);
976 pud = ACCESS_ONCE(*pudp);
980 else if (pud_huge(pud)) {
981 ret_pte = (pte_t *) pudp;
983 } else if (is_hugepd(__hugepd(pud_val(pud))))
984 hpdp = (hugepd_t *)&pud;
987 pmdp = pmd_offset(&pud, ea);
988 pmd = ACCESS_ONCE(*pmdp);
990 * A hugepage collapse is captured by pmd_none, because
991 * it mark the pmd none and do a hpte invalidate.
993 * A hugepage split is captured by pmd_trans_splitting
994 * because we mark the pmd trans splitting and do a
998 if (pmd_none(pmd) || pmd_trans_splitting(pmd))
1001 if (pmd_huge(pmd) || pmd_large(pmd)) {
1002 ret_pte = (pte_t *) pmdp;
1004 } else if (is_hugepd(__hugepd(pmd_val(pmd))))
1005 hpdp = (hugepd_t *)&pmd;
1007 return pte_offset_kernel(&pmd, ea);
1013 ret_pte = hugepte_offset(*hpdp, ea, pdshift);
1014 pdshift = hugepd_shift(*hpdp);
1020 EXPORT_SYMBOL_GPL(find_linux_pte_or_hugepte);
1022 int gup_hugepte(pte_t *ptep, unsigned long sz, unsigned long addr,
1023 unsigned long end, int write, struct page **pages, int *nr)
1026 unsigned long pte_end;
1027 struct page *head, *page, *tail;
1031 pte_end = (addr + sz) & ~(sz-1);
1035 pte = ACCESS_ONCE(*ptep);
1036 mask = _PAGE_PRESENT | _PAGE_USER;
1040 if ((pte_val(pte) & mask) != mask)
1043 /* hugepages are never "special" */
1044 VM_BUG_ON(!pfn_valid(pte_pfn(pte)));
1047 head = pte_page(pte);
1049 page = head + ((addr & (sz-1)) >> PAGE_SHIFT);
1052 VM_BUG_ON(compound_head(page) != head);
1057 } while (addr += PAGE_SIZE, addr != end);
1059 if (!page_cache_add_speculative(head, refs)) {
1064 if (unlikely(pte_val(pte) != pte_val(*ptep))) {
1065 /* Could be optimized better */
1073 * Any tail page need their mapcount reference taken before we
1078 get_huge_page_tail(tail);