Merge branch 'tda998x-fixes' of git://ftp.arm.linux.org.uk/~rmk/linux-cubox
[firefly-linux-kernel-4.4.55.git] / arch / x86 / vdso / vma.c
1 /*
2  * Set up the VMAs to tell the VM about the vDSO.
3  * Copyright 2007 Andi Kleen, SUSE Labs.
4  * Subject to the GPL, v.2
5  */
6 #include <linux/mm.h>
7 #include <linux/err.h>
8 #include <linux/sched.h>
9 #include <linux/slab.h>
10 #include <linux/init.h>
11 #include <linux/random.h>
12 #include <linux/elf.h>
13 #include <asm/vsyscall.h>
14 #include <asm/vgtod.h>
15 #include <asm/proto.h>
16 #include <asm/vdso.h>
17 #include <asm/page.h>
18 #include <asm/hpet.h>
19
20 #if defined(CONFIG_X86_64)
21 unsigned int __read_mostly vdso64_enabled = 1;
22
23 extern unsigned short vdso_sync_cpuid;
24 #endif
25
26 void __init init_vdso_image(const struct vdso_image *image)
27 {
28         int i;
29         int npages = (image->size) / PAGE_SIZE;
30
31         BUG_ON(image->size % PAGE_SIZE != 0);
32         for (i = 0; i < npages; i++)
33                 image->text_mapping.pages[i] =
34                         virt_to_page(image->data + i*PAGE_SIZE);
35
36         apply_alternatives((struct alt_instr *)(image->data + image->alt),
37                            (struct alt_instr *)(image->data + image->alt +
38                                                 image->alt_len));
39 }
40
41 #if defined(CONFIG_X86_64)
42 static int __init init_vdso(void)
43 {
44         init_vdso_image(&vdso_image_64);
45
46 #ifdef CONFIG_X86_X32_ABI
47         init_vdso_image(&vdso_image_x32);
48 #endif
49
50         return 0;
51 }
52 subsys_initcall(init_vdso);
53 #endif
54
55 struct linux_binprm;
56
57 /* Put the vdso above the (randomized) stack with another randomized offset.
58    This way there is no hole in the middle of address space.
59    To save memory make sure it is still in the same PTE as the stack top.
60    This doesn't give that many random bits.
61
62    Only used for the 64-bit and x32 vdsos. */
63 static unsigned long vdso_addr(unsigned long start, unsigned len)
64 {
65         unsigned long addr, end;
66         unsigned offset;
67         end = (start + PMD_SIZE - 1) & PMD_MASK;
68         if (end >= TASK_SIZE_MAX)
69                 end = TASK_SIZE_MAX;
70         end -= len;
71         /* This loses some more bits than a modulo, but is cheaper */
72         offset = get_random_int() & (PTRS_PER_PTE - 1);
73         addr = start + (offset << PAGE_SHIFT);
74         if (addr >= end)
75                 addr = end;
76
77         /*
78          * page-align it here so that get_unmapped_area doesn't
79          * align it wrongfully again to the next page. addr can come in 4K
80          * unaligned here as a result of stack start randomization.
81          */
82         addr = PAGE_ALIGN(addr);
83         addr = align_vdso_addr(addr);
84
85         return addr;
86 }
87
88 static int map_vdso(const struct vdso_image *image, bool calculate_addr)
89 {
90         struct mm_struct *mm = current->mm;
91         struct vm_area_struct *vma;
92         unsigned long addr;
93         int ret = 0;
94         static struct page *no_pages[] = {NULL};
95         static struct vm_special_mapping vvar_mapping = {
96                 .name = "[vvar]",
97                 .pages = no_pages,
98         };
99
100         if (calculate_addr) {
101                 addr = vdso_addr(current->mm->start_stack,
102                                  image->sym_end_mapping);
103         } else {
104                 addr = 0;
105         }
106
107         down_write(&mm->mmap_sem);
108
109         addr = get_unmapped_area(NULL, addr, image->sym_end_mapping, 0, 0);
110         if (IS_ERR_VALUE(addr)) {
111                 ret = addr;
112                 goto up_fail;
113         }
114
115         current->mm->context.vdso = (void __user *)addr;
116
117         /*
118          * MAYWRITE to allow gdb to COW and set breakpoints
119          */
120         vma = _install_special_mapping(mm,
121                                        addr,
122                                        image->size,
123                                        VM_READ|VM_EXEC|
124                                        VM_MAYREAD|VM_MAYWRITE|VM_MAYEXEC,
125                                        &image->text_mapping);
126
127         if (IS_ERR(vma)) {
128                 ret = PTR_ERR(vma);
129                 goto up_fail;
130         }
131
132         vma = _install_special_mapping(mm,
133                                        addr + image->size,
134                                        image->sym_end_mapping - image->size,
135                                        VM_READ,
136                                        &vvar_mapping);
137
138         if (IS_ERR(vma)) {
139                 ret = PTR_ERR(vma);
140                 goto up_fail;
141         }
142
143         if (image->sym_vvar_page)
144                 ret = remap_pfn_range(vma,
145                                       addr + image->sym_vvar_page,
146                                       __pa_symbol(&__vvar_page) >> PAGE_SHIFT,
147                                       PAGE_SIZE,
148                                       PAGE_READONLY);
149
150         if (ret)
151                 goto up_fail;
152
153 #ifdef CONFIG_HPET_TIMER
154         if (hpet_address && image->sym_hpet_page) {
155                 ret = io_remap_pfn_range(vma,
156                         addr + image->sym_hpet_page,
157                         hpet_address >> PAGE_SHIFT,
158                         PAGE_SIZE,
159                         pgprot_noncached(PAGE_READONLY));
160
161                 if (ret)
162                         goto up_fail;
163         }
164 #endif
165
166 up_fail:
167         if (ret)
168                 current->mm->context.vdso = NULL;
169
170         up_write(&mm->mmap_sem);
171         return ret;
172 }
173
174 #if defined(CONFIG_X86_32) || defined(CONFIG_COMPAT)
175 static int load_vdso32(void)
176 {
177         int ret;
178
179         if (vdso32_enabled != 1)  /* Other values all mean "disabled" */
180                 return 0;
181
182         ret = map_vdso(selected_vdso32, false);
183         if (ret)
184                 return ret;
185
186         if (selected_vdso32->sym_VDSO32_SYSENTER_RETURN)
187                 current_thread_info()->sysenter_return =
188                         current->mm->context.vdso +
189                         selected_vdso32->sym_VDSO32_SYSENTER_RETURN;
190
191         return 0;
192 }
193 #endif
194
195 #ifdef CONFIG_X86_64
196 int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp)
197 {
198         if (!vdso64_enabled)
199                 return 0;
200
201         return map_vdso(&vdso_image_64, true);
202 }
203
204 #ifdef CONFIG_COMPAT
205 int compat_arch_setup_additional_pages(struct linux_binprm *bprm,
206                                        int uses_interp)
207 {
208 #ifdef CONFIG_X86_X32_ABI
209         if (test_thread_flag(TIF_X32)) {
210                 if (!vdso64_enabled)
211                         return 0;
212
213                 return map_vdso(&vdso_image_x32, true);
214         }
215 #endif
216
217         return load_vdso32();
218 }
219 #endif
220 #else
221 int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp)
222 {
223         return load_vdso32();
224 }
225 #endif
226
227 #ifdef CONFIG_X86_64
228 static __init int vdso_setup(char *s)
229 {
230         vdso64_enabled = simple_strtoul(s, NULL, 0);
231         return 0;
232 }
233 __setup("vdso=", vdso_setup);
234 #endif