Merge remote-tracking branch 'lsk/v3.10/topic/gator' into linux-linaro-lsk
[firefly-linux-kernel-4.4.55.git] / arch / x86 / mm / hugetlbpage.c
1 /*
2  * IA-32 Huge TLB Page Support for Kernel.
3  *
4  * Copyright (C) 2002, Rohit Seth <rohit.seth@intel.com>
5  */
6
7 #include <linux/init.h>
8 #include <linux/fs.h>
9 #include <linux/mm.h>
10 #include <linux/hugetlb.h>
11 #include <linux/pagemap.h>
12 #include <linux/err.h>
13 #include <linux/sysctl.h>
14 #include <asm/mman.h>
15 #include <asm/tlb.h>
16 #include <asm/tlbflush.h>
17 #include <asm/pgalloc.h>
18
19 #if 0   /* This is just for testing */
20 struct page *
21 follow_huge_addr(struct mm_struct *mm, unsigned long address, int write)
22 {
23         unsigned long start = address;
24         int length = 1;
25         int nr;
26         struct page *page;
27         struct vm_area_struct *vma;
28
29         vma = find_vma(mm, addr);
30         if (!vma || !is_vm_hugetlb_page(vma))
31                 return ERR_PTR(-EINVAL);
32
33         pte = huge_pte_offset(mm, address);
34
35         /* hugetlb should be locked, and hence, prefaulted */
36         WARN_ON(!pte || pte_none(*pte));
37
38         page = &pte_page(*pte)[vpfn % (HPAGE_SIZE/PAGE_SIZE)];
39
40         WARN_ON(!PageHead(page));
41
42         return page;
43 }
44
45 int pmd_huge(pmd_t pmd)
46 {
47         return 0;
48 }
49
50 int pud_huge(pud_t pud)
51 {
52         return 0;
53 }
54
55 struct page *
56 follow_huge_pmd(struct mm_struct *mm, unsigned long address,
57                 pmd_t *pmd, int write)
58 {
59         return NULL;
60 }
61
62 #else
63
64 struct page *
65 follow_huge_addr(struct mm_struct *mm, unsigned long address, int write)
66 {
67         return ERR_PTR(-EINVAL);
68 }
69
70 int pmd_huge(pmd_t pmd)
71 {
72         return !!(pmd_val(pmd) & _PAGE_PSE);
73 }
74
75 int pud_huge(pud_t pud)
76 {
77         return !!(pud_val(pud) & _PAGE_PSE);
78 }
79
80 #endif
81
82 /* x86_64 also uses this file */
83
84 #ifdef HAVE_ARCH_HUGETLB_UNMAPPED_AREA
85 static unsigned long hugetlb_get_unmapped_area_bottomup(struct file *file,
86                 unsigned long addr, unsigned long len,
87                 unsigned long pgoff, unsigned long flags)
88 {
89         struct hstate *h = hstate_file(file);
90         struct vm_unmapped_area_info info;
91
92         info.flags = 0;
93         info.length = len;
94         info.low_limit = TASK_UNMAPPED_BASE;
95         info.high_limit = TASK_SIZE;
96         info.align_mask = PAGE_MASK & ~huge_page_mask(h);
97         info.align_offset = 0;
98         return vm_unmapped_area(&info);
99 }
100
101 static unsigned long hugetlb_get_unmapped_area_topdown(struct file *file,
102                 unsigned long addr0, unsigned long len,
103                 unsigned long pgoff, unsigned long flags)
104 {
105         struct hstate *h = hstate_file(file);
106         struct vm_unmapped_area_info info;
107         unsigned long addr;
108
109         info.flags = VM_UNMAPPED_AREA_TOPDOWN;
110         info.length = len;
111         info.low_limit = PAGE_SIZE;
112         info.high_limit = current->mm->mmap_base;
113         info.align_mask = PAGE_MASK & ~huge_page_mask(h);
114         info.align_offset = 0;
115         addr = vm_unmapped_area(&info);
116
117         /*
118          * A failed mmap() very likely causes application failure,
119          * so fall back to the bottom-up function here. This scenario
120          * can happen with large stack limits and large mmap()
121          * allocations.
122          */
123         if (addr & ~PAGE_MASK) {
124                 VM_BUG_ON(addr != -ENOMEM);
125                 info.flags = 0;
126                 info.low_limit = TASK_UNMAPPED_BASE;
127                 info.high_limit = TASK_SIZE;
128                 addr = vm_unmapped_area(&info);
129         }
130
131         return addr;
132 }
133
134 unsigned long
135 hugetlb_get_unmapped_area(struct file *file, unsigned long addr,
136                 unsigned long len, unsigned long pgoff, unsigned long flags)
137 {
138         struct hstate *h = hstate_file(file);
139         struct mm_struct *mm = current->mm;
140         struct vm_area_struct *vma;
141
142         if (len & ~huge_page_mask(h))
143                 return -EINVAL;
144         if (len > TASK_SIZE)
145                 return -ENOMEM;
146
147         if (flags & MAP_FIXED) {
148                 if (prepare_hugepage_range(file, addr, len))
149                         return -EINVAL;
150                 return addr;
151         }
152
153         if (addr) {
154                 addr = ALIGN(addr, huge_page_size(h));
155                 vma = find_vma(mm, addr);
156                 if (TASK_SIZE - len >= addr &&
157                     (!vma || addr + len <= vma->vm_start))
158                         return addr;
159         }
160         if (mm->get_unmapped_area == arch_get_unmapped_area)
161                 return hugetlb_get_unmapped_area_bottomup(file, addr, len,
162                                 pgoff, flags);
163         else
164                 return hugetlb_get_unmapped_area_topdown(file, addr, len,
165                                 pgoff, flags);
166 }
167
168 #endif /*HAVE_ARCH_HUGETLB_UNMAPPED_AREA*/
169
170 #ifdef CONFIG_X86_64
171 static __init int setup_hugepagesz(char *opt)
172 {
173         unsigned long ps = memparse(opt, &opt);
174         if (ps == PMD_SIZE) {
175                 hugetlb_add_hstate(PMD_SHIFT - PAGE_SHIFT);
176         } else if (ps == PUD_SIZE && cpu_has_gbpages) {
177                 hugetlb_add_hstate(PUD_SHIFT - PAGE_SHIFT);
178         } else {
179                 printk(KERN_ERR "hugepagesz: Unsupported page size %lu M\n",
180                         ps >> 20);
181                 return 0;
182         }
183         return 1;
184 }
185 __setup("hugepagesz=", setup_hugepagesz);
186 #endif