2 * Copyright (C) 2000 - 2007 Jeff Dike (jdike@{addtoit,linux.intel}.com)
3 * Licensed under the GPL
7 #include <linux/module.h>
8 #include <linux/sched.h>
9 #include <asm/pgtable.h>
10 #include <asm/tlbflush.h>
11 #include <as-layout.h>
16 struct host_vm_change {
18 enum { NONE, MMAP, MUNMAP, MPROTECT } type;
44 #define INIT_HVC(mm, force) \
45 ((struct host_vm_change) \
46 { .ops = { { .type = NONE } }, \
47 .id = &mm->context.id, \
52 static int do_ops(struct host_vm_change *hvc, int end,
55 struct host_vm_op *op;
58 for (i = 0; i < end && !ret; i++) {
62 ret = map(hvc->id, op->u.mmap.addr, op->u.mmap.len,
63 op->u.mmap.prot, op->u.mmap.fd,
64 op->u.mmap.offset, finished, &hvc->data);
67 ret = unmap(hvc->id, op->u.munmap.addr,
68 op->u.munmap.len, finished, &hvc->data);
71 ret = protect(hvc->id, op->u.mprotect.addr,
72 op->u.mprotect.len, op->u.mprotect.prot,
73 finished, &hvc->data);
76 printk(KERN_ERR "Unknown op type %d in do_ops\n",
86 static int add_mmap(unsigned long virt, unsigned long phys, unsigned long len,
87 unsigned int prot, struct host_vm_change *hvc)
90 struct host_vm_op *last;
93 fd = phys_mapping(phys, &offset);
94 if (hvc->index != 0) {
95 last = &hvc->ops[hvc->index - 1];
96 if ((last->type == MMAP) &&
97 (last->u.mmap.addr + last->u.mmap.len == virt) &&
98 (last->u.mmap.prot == prot) && (last->u.mmap.fd == fd) &&
99 (last->u.mmap.offset + last->u.mmap.len == offset)) {
100 last->u.mmap.len += len;
105 if (hvc->index == ARRAY_SIZE(hvc->ops)) {
106 ret = do_ops(hvc, ARRAY_SIZE(hvc->ops), 0);
110 hvc->ops[hvc->index++] = ((struct host_vm_op)
112 .u = { .mmap = { .addr = virt,
121 static int add_munmap(unsigned long addr, unsigned long len,
122 struct host_vm_change *hvc)
124 struct host_vm_op *last;
127 if (hvc->index != 0) {
128 last = &hvc->ops[hvc->index - 1];
129 if ((last->type == MUNMAP) &&
130 (last->u.munmap.addr + last->u.mmap.len == addr)) {
131 last->u.munmap.len += len;
136 if (hvc->index == ARRAY_SIZE(hvc->ops)) {
137 ret = do_ops(hvc, ARRAY_SIZE(hvc->ops), 0);
141 hvc->ops[hvc->index++] = ((struct host_vm_op)
143 .u = { .munmap = { .addr = addr,
148 static int add_mprotect(unsigned long addr, unsigned long len,
149 unsigned int prot, struct host_vm_change *hvc)
151 struct host_vm_op *last;
154 if (hvc->index != 0) {
155 last = &hvc->ops[hvc->index - 1];
156 if ((last->type == MPROTECT) &&
157 (last->u.mprotect.addr + last->u.mprotect.len == addr) &&
158 (last->u.mprotect.prot == prot)) {
159 last->u.mprotect.len += len;
164 if (hvc->index == ARRAY_SIZE(hvc->ops)) {
165 ret = do_ops(hvc, ARRAY_SIZE(hvc->ops), 0);
169 hvc->ops[hvc->index++] = ((struct host_vm_op)
171 .u = { .mprotect = { .addr = addr,
177 #define ADD_ROUND(n, inc) (((n) + (inc)) & ~((inc) - 1))
179 static inline int update_pte_range(pmd_t *pmd, unsigned long addr,
181 struct host_vm_change *hvc)
184 int r, w, x, prot, ret = 0;
186 pte = pte_offset_kernel(pmd, addr);
188 if ((addr >= STUB_START) && (addr < STUB_END))
194 if (!pte_young(*pte)) {
197 } else if (!pte_dirty(*pte))
200 prot = ((r ? UM_PROT_READ : 0) | (w ? UM_PROT_WRITE : 0) |
201 (x ? UM_PROT_EXEC : 0));
202 if (hvc->force || pte_newpage(*pte)) {
203 if (pte_present(*pte))
204 ret = add_mmap(addr, pte_val(*pte) & PAGE_MASK,
205 PAGE_SIZE, prot, hvc);
207 ret = add_munmap(addr, PAGE_SIZE, hvc);
208 } else if (pte_newprot(*pte))
209 ret = add_mprotect(addr, PAGE_SIZE, prot, hvc);
210 *pte = pte_mkuptodate(*pte);
211 } while (pte++, addr += PAGE_SIZE, ((addr < end) && !ret));
215 static inline int update_pmd_range(pud_t *pud, unsigned long addr,
217 struct host_vm_change *hvc)
223 pmd = pmd_offset(pud, addr);
225 next = pmd_addr_end(addr, end);
226 if (!pmd_present(*pmd)) {
227 if (hvc->force || pmd_newpage(*pmd)) {
228 ret = add_munmap(addr, next - addr, hvc);
229 pmd_mkuptodate(*pmd);
232 else ret = update_pte_range(pmd, addr, next, hvc);
233 } while (pmd++, addr = next, ((addr < end) && !ret));
237 static inline int update_pud_range(pgd_t *pgd, unsigned long addr,
239 struct host_vm_change *hvc)
245 pud = pud_offset(pgd, addr);
247 next = pud_addr_end(addr, end);
248 if (!pud_present(*pud)) {
249 if (hvc->force || pud_newpage(*pud)) {
250 ret = add_munmap(addr, next - addr, hvc);
251 pud_mkuptodate(*pud);
254 else ret = update_pmd_range(pud, addr, next, hvc);
255 } while (pud++, addr = next, ((addr < end) && !ret));
259 void fix_range_common(struct mm_struct *mm, unsigned long start_addr,
260 unsigned long end_addr, int force)
263 struct host_vm_change hvc;
264 unsigned long addr = start_addr, next;
267 hvc = INIT_HVC(mm, force);
268 pgd = pgd_offset(mm, addr);
270 next = pgd_addr_end(addr, end_addr);
271 if (!pgd_present(*pgd)) {
272 if (force || pgd_newpage(*pgd)) {
273 ret = add_munmap(addr, next - addr, &hvc);
274 pgd_mkuptodate(*pgd);
277 else ret = update_pud_range(pgd, addr, next, &hvc);
278 } while (pgd++, addr = next, ((addr < end_addr) && !ret));
281 ret = do_ops(&hvc, hvc.index, 1);
283 /* This is not an else because ret is modified above */
285 printk(KERN_ERR "fix_range_common: failed, killing current "
287 force_sig(SIGKILL, current);
291 static int flush_tlb_kernel_range_common(unsigned long start, unsigned long end)
293 struct mm_struct *mm;
298 unsigned long addr, last;
299 int updated = 0, err;
302 for (addr = start; addr < end;) {
303 pgd = pgd_offset(mm, addr);
304 if (!pgd_present(*pgd)) {
305 last = ADD_ROUND(addr, PGDIR_SIZE);
308 if (pgd_newpage(*pgd)) {
310 err = os_unmap_memory((void *) addr,
313 panic("munmap failed, errno = %d\n",
320 pud = pud_offset(pgd, addr);
321 if (!pud_present(*pud)) {
322 last = ADD_ROUND(addr, PUD_SIZE);
325 if (pud_newpage(*pud)) {
327 err = os_unmap_memory((void *) addr,
330 panic("munmap failed, errno = %d\n",
337 pmd = pmd_offset(pud, addr);
338 if (!pmd_present(*pmd)) {
339 last = ADD_ROUND(addr, PMD_SIZE);
342 if (pmd_newpage(*pmd)) {
344 err = os_unmap_memory((void *) addr,
347 panic("munmap failed, errno = %d\n",
354 pte = pte_offset_kernel(pmd, addr);
355 if (!pte_present(*pte) || pte_newpage(*pte)) {
357 err = os_unmap_memory((void *) addr,
360 panic("munmap failed, errno = %d\n",
362 if (pte_present(*pte))
364 pte_val(*pte) & PAGE_MASK,
367 else if (pte_newprot(*pte)) {
369 os_protect_memory((void *) addr, PAGE_SIZE, 1, 1, 1);
376 void flush_tlb_page(struct vm_area_struct *vma, unsigned long address)
382 struct mm_struct *mm = vma->vm_mm;
384 int r, w, x, prot, err = 0;
387 address &= PAGE_MASK;
388 pgd = pgd_offset(mm, address);
389 if (!pgd_present(*pgd))
392 pud = pud_offset(pgd, address);
393 if (!pud_present(*pud))
396 pmd = pmd_offset(pud, address);
397 if (!pmd_present(*pmd))
400 pte = pte_offset_kernel(pmd, address);
405 if (!pte_young(*pte)) {
408 } else if (!pte_dirty(*pte)) {
412 mm_id = &mm->context.id;
413 prot = ((r ? UM_PROT_READ : 0) | (w ? UM_PROT_WRITE : 0) |
414 (x ? UM_PROT_EXEC : 0));
415 if (pte_newpage(*pte)) {
416 if (pte_present(*pte)) {
417 unsigned long long offset;
420 fd = phys_mapping(pte_val(*pte) & PAGE_MASK, &offset);
421 err = map(mm_id, address, PAGE_SIZE, prot, fd, offset,
424 else err = unmap(mm_id, address, PAGE_SIZE, 1, &flush);
426 else if (pte_newprot(*pte))
427 err = protect(mm_id, address, PAGE_SIZE, prot, 1, &flush);
432 *pte = pte_mkuptodate(*pte);
437 printk(KERN_ERR "Failed to flush page for address 0x%lx\n", address);
438 force_sig(SIGKILL, current);
441 pgd_t *pgd_offset_proc(struct mm_struct *mm, unsigned long address)
443 return pgd_offset(mm, address);
446 pud_t *pud_offset_proc(pgd_t *pgd, unsigned long address)
448 return pud_offset(pgd, address);
451 pmd_t *pmd_offset_proc(pud_t *pud, unsigned long address)
453 return pmd_offset(pud, address);
456 pte_t *pte_offset_proc(pmd_t *pmd, unsigned long address)
458 return pte_offset_kernel(pmd, address);
461 pte_t *addr_pte(struct task_struct *task, unsigned long addr)
463 pgd_t *pgd = pgd_offset(task->mm, addr);
464 pud_t *pud = pud_offset(pgd, addr);
465 pmd_t *pmd = pmd_offset(pud, addr);
467 return pte_offset_map(pmd, addr);
470 void flush_tlb_all(void)
472 flush_tlb_mm(current->mm);
475 void flush_tlb_kernel_range(unsigned long start, unsigned long end)
477 flush_tlb_kernel_range_common(start, end);
480 void flush_tlb_kernel_vm(void)
482 flush_tlb_kernel_range_common(start_vm, end_vm);
485 void __flush_tlb_one(unsigned long addr)
487 flush_tlb_kernel_range_common(addr, addr + PAGE_SIZE);
490 static void fix_range(struct mm_struct *mm, unsigned long start_addr,
491 unsigned long end_addr, int force)
493 fix_range_common(mm, start_addr, end_addr, force);
496 void flush_tlb_range(struct vm_area_struct *vma, unsigned long start,
499 if (vma->vm_mm == NULL)
500 flush_tlb_kernel_range_common(start, end);
501 else fix_range(vma->vm_mm, start, end, 0);
503 EXPORT_SYMBOL(flush_tlb_range);
505 void flush_tlb_mm_range(struct mm_struct *mm, unsigned long start,
509 * Don't bother flushing if this address space is about to be
512 if (atomic_read(&mm->mm_users) == 0)
515 fix_range(mm, start, end, 0);
518 void flush_tlb_mm(struct mm_struct *mm)
520 struct vm_area_struct *vma = mm->mmap;
522 while (vma != NULL) {
523 fix_range(mm, vma->vm_start, vma->vm_end, 0);
528 void force_flush_all(void)
530 struct mm_struct *mm = current->mm;
531 struct vm_area_struct *vma = mm->mmap;
533 while (vma != NULL) {
534 fix_range(mm, vma->vm_start, vma->vm_end, 1);