ipheth: Add iPhone 4S
[firefly-linux-kernel-4.4.55.git] / mm / nommu.c
1 /*
2  *  linux/mm/nommu.c
3  *
4  *  Replacement code for mm functions to support CPU's that don't
5  *  have any form of memory management unit (thus no virtual memory).
6  *
7  *  See Documentation/nommu-mmap.txt
8  *
9  *  Copyright (c) 2004-2008 David Howells <dhowells@redhat.com>
10  *  Copyright (c) 2000-2003 David McCullough <davidm@snapgear.com>
11  *  Copyright (c) 2000-2001 D Jeff Dionne <jeff@uClinux.org>
12  *  Copyright (c) 2002      Greg Ungerer <gerg@snapgear.com>
13  *  Copyright (c) 2007-2010 Paul Mundt <lethal@linux-sh.org>
14  */
15
16 #include <linux/module.h>
17 #include <linux/mm.h>
18 #include <linux/mman.h>
19 #include <linux/swap.h>
20 #include <linux/file.h>
21 #include <linux/highmem.h>
22 #include <linux/pagemap.h>
23 #include <linux/slab.h>
24 #include <linux/vmalloc.h>
25 #include <linux/tracehook.h>
26 #include <linux/blkdev.h>
27 #include <linux/backing-dev.h>
28 #include <linux/mount.h>
29 #include <linux/personality.h>
30 #include <linux/security.h>
31 #include <linux/syscalls.h>
32 #include <linux/audit.h>
33
34 #include <asm/uaccess.h>
35 #include <asm/tlb.h>
36 #include <asm/tlbflush.h>
37 #include <asm/mmu_context.h>
38 #include "internal.h"
39
40 #if 0
41 #define kenter(FMT, ...) \
42         printk(KERN_DEBUG "==> %s("FMT")\n", __func__, ##__VA_ARGS__)
43 #define kleave(FMT, ...) \
44         printk(KERN_DEBUG "<== %s()"FMT"\n", __func__, ##__VA_ARGS__)
45 #define kdebug(FMT, ...) \
46         printk(KERN_DEBUG "xxx" FMT"yyy\n", ##__VA_ARGS__)
47 #else
48 #define kenter(FMT, ...) \
49         no_printk(KERN_DEBUG "==> %s("FMT")\n", __func__, ##__VA_ARGS__)
50 #define kleave(FMT, ...) \
51         no_printk(KERN_DEBUG "<== %s()"FMT"\n", __func__, ##__VA_ARGS__)
52 #define kdebug(FMT, ...) \
53         no_printk(KERN_DEBUG FMT"\n", ##__VA_ARGS__)
54 #endif
55
56 void *high_memory;
57 struct page *mem_map;
58 unsigned long max_mapnr;
59 unsigned long num_physpages;
60 unsigned long highest_memmap_pfn;
61 struct percpu_counter vm_committed_as;
62 int sysctl_overcommit_memory = OVERCOMMIT_GUESS; /* heuristic overcommit */
63 int sysctl_overcommit_ratio = 50; /* default is 50% */
64 int sysctl_max_map_count = DEFAULT_MAX_MAP_COUNT;
65 int sysctl_nr_trim_pages = CONFIG_NOMMU_INITIAL_TRIM_EXCESS;
66 int heap_stack_gap = 0;
67
68 atomic_long_t mmap_pages_allocated;
69
70 EXPORT_SYMBOL(mem_map);
71 EXPORT_SYMBOL(num_physpages);
72
73 /* list of mapped, potentially shareable regions */
74 static struct kmem_cache *vm_region_jar;
75 struct rb_root nommu_region_tree = RB_ROOT;
76 DECLARE_RWSEM(nommu_region_sem);
77
78 const struct vm_operations_struct generic_file_vm_ops = {
79 };
80
81 /*
82  * Return the total memory allocated for this pointer, not
83  * just what the caller asked for.
84  *
85  * Doesn't have to be accurate, i.e. may have races.
86  */
87 unsigned int kobjsize(const void *objp)
88 {
89         struct page *page;
90
91         /*
92          * If the object we have should not have ksize performed on it,
93          * return size of 0
94          */
95         if (!objp || !virt_addr_valid(objp))
96                 return 0;
97
98         page = virt_to_head_page(objp);
99
100         /*
101          * If the allocator sets PageSlab, we know the pointer came from
102          * kmalloc().
103          */
104         if (PageSlab(page))
105                 return ksize(objp);
106
107         /*
108          * If it's not a compound page, see if we have a matching VMA
109          * region. This test is intentionally done in reverse order,
110          * so if there's no VMA, we still fall through and hand back
111          * PAGE_SIZE for 0-order pages.
112          */
113         if (!PageCompound(page)) {
114                 struct vm_area_struct *vma;
115
116                 vma = find_vma(current->mm, (unsigned long)objp);
117                 if (vma)
118                         return vma->vm_end - vma->vm_start;
119         }
120
121         /*
122          * The ksize() function is only guaranteed to work for pointers
123          * returned by kmalloc(). So handle arbitrary pointers here.
124          */
125         return PAGE_SIZE << compound_order(page);
126 }
127
128 int __get_user_pages(struct task_struct *tsk, struct mm_struct *mm,
129                      unsigned long start, int nr_pages, unsigned int foll_flags,
130                      struct page **pages, struct vm_area_struct **vmas,
131                      int *retry)
132 {
133         struct vm_area_struct *vma;
134         unsigned long vm_flags;
135         int i;
136
137         /* calculate required read or write permissions.
138          * If FOLL_FORCE is set, we only require the "MAY" flags.
139          */
140         vm_flags  = (foll_flags & FOLL_WRITE) ?
141                         (VM_WRITE | VM_MAYWRITE) : (VM_READ | VM_MAYREAD);
142         vm_flags &= (foll_flags & FOLL_FORCE) ?
143                         (VM_MAYREAD | VM_MAYWRITE) : (VM_READ | VM_WRITE);
144
145         for (i = 0; i < nr_pages; i++) {
146                 vma = find_vma(mm, start);
147                 if (!vma)
148                         goto finish_or_fault;
149
150                 /* protect what we can, including chardevs */
151                 if ((vma->vm_flags & (VM_IO | VM_PFNMAP)) ||
152                     !(vm_flags & vma->vm_flags))
153                         goto finish_or_fault;
154
155                 if (pages) {
156                         pages[i] = virt_to_page(start);
157                         if (pages[i])
158                                 page_cache_get(pages[i]);
159                 }
160                 if (vmas)
161                         vmas[i] = vma;
162                 start = (start + PAGE_SIZE) & PAGE_MASK;
163         }
164
165         return i;
166
167 finish_or_fault:
168         return i ? : -EFAULT;
169 }
170
171 /*
172  * get a list of pages in an address range belonging to the specified process
173  * and indicate the VMA that covers each page
174  * - this is potentially dodgy as we may end incrementing the page count of a
175  *   slab page or a secondary page from a compound page
176  * - don't permit access to VMAs that don't support it, such as I/O mappings
177  */
178 int get_user_pages(struct task_struct *tsk, struct mm_struct *mm,
179         unsigned long start, int nr_pages, int write, int force,
180         struct page **pages, struct vm_area_struct **vmas)
181 {
182         int flags = 0;
183
184         if (write)
185                 flags |= FOLL_WRITE;
186         if (force)
187                 flags |= FOLL_FORCE;
188
189         return __get_user_pages(tsk, mm, start, nr_pages, flags, pages, vmas,
190                                 NULL);
191 }
192 EXPORT_SYMBOL(get_user_pages);
193
194 /**
195  * follow_pfn - look up PFN at a user virtual address
196  * @vma: memory mapping
197  * @address: user virtual address
198  * @pfn: location to store found PFN
199  *
200  * Only IO mappings and raw PFN mappings are allowed.
201  *
202  * Returns zero and the pfn at @pfn on success, -ve otherwise.
203  */
204 int follow_pfn(struct vm_area_struct *vma, unsigned long address,
205         unsigned long *pfn)
206 {
207         if (!(vma->vm_flags & (VM_IO | VM_PFNMAP)))
208                 return -EINVAL;
209
210         *pfn = address >> PAGE_SHIFT;
211         return 0;
212 }
213 EXPORT_SYMBOL(follow_pfn);
214
215 DEFINE_RWLOCK(vmlist_lock);
216 struct vm_struct *vmlist;
217
218 void vfree(const void *addr)
219 {
220         kfree(addr);
221 }
222 EXPORT_SYMBOL(vfree);
223
224 void *__vmalloc(unsigned long size, gfp_t gfp_mask, pgprot_t prot)
225 {
226         /*
227          *  You can't specify __GFP_HIGHMEM with kmalloc() since kmalloc()
228          * returns only a logical address.
229          */
230         return kmalloc(size, (gfp_mask | __GFP_COMP) & ~__GFP_HIGHMEM);
231 }
232 EXPORT_SYMBOL(__vmalloc);
233
234 void *vmalloc_user(unsigned long size)
235 {
236         void *ret;
237
238         ret = __vmalloc(size, GFP_KERNEL | __GFP_HIGHMEM | __GFP_ZERO,
239                         PAGE_KERNEL);
240         if (ret) {
241                 struct vm_area_struct *vma;
242
243                 down_write(&current->mm->mmap_sem);
244                 vma = find_vma(current->mm, (unsigned long)ret);
245                 if (vma)
246                         vma->vm_flags |= VM_USERMAP;
247                 up_write(&current->mm->mmap_sem);
248         }
249
250         return ret;
251 }
252 EXPORT_SYMBOL(vmalloc_user);
253
254 struct page *vmalloc_to_page(const void *addr)
255 {
256         return virt_to_page(addr);
257 }
258 EXPORT_SYMBOL(vmalloc_to_page);
259
260 unsigned long vmalloc_to_pfn(const void *addr)
261 {
262         return page_to_pfn(virt_to_page(addr));
263 }
264 EXPORT_SYMBOL(vmalloc_to_pfn);
265
266 long vread(char *buf, char *addr, unsigned long count)
267 {
268         memcpy(buf, addr, count);
269         return count;
270 }
271
272 long vwrite(char *buf, char *addr, unsigned long count)
273 {
274         /* Don't allow overflow */
275         if ((unsigned long) addr + count < count)
276                 count = -(unsigned long) addr;
277
278         memcpy(addr, buf, count);
279         return(count);
280 }
281
282 /*
283  *      vmalloc  -  allocate virtually continguos memory
284  *
285  *      @size:          allocation size
286  *
287  *      Allocate enough pages to cover @size from the page level
288  *      allocator and map them into continguos kernel virtual space.
289  *
290  *      For tight control over page level allocator and protection flags
291  *      use __vmalloc() instead.
292  */
293 void *vmalloc(unsigned long size)
294 {
295        return __vmalloc(size, GFP_KERNEL | __GFP_HIGHMEM, PAGE_KERNEL);
296 }
297 EXPORT_SYMBOL(vmalloc);
298
299 /*
300  *      vzalloc - allocate virtually continguos memory with zero fill
301  *
302  *      @size:          allocation size
303  *
304  *      Allocate enough pages to cover @size from the page level
305  *      allocator and map them into continguos kernel virtual space.
306  *      The memory allocated is set to zero.
307  *
308  *      For tight control over page level allocator and protection flags
309  *      use __vmalloc() instead.
310  */
311 void *vzalloc(unsigned long size)
312 {
313         return __vmalloc(size, GFP_KERNEL | __GFP_HIGHMEM | __GFP_ZERO,
314                         PAGE_KERNEL);
315 }
316 EXPORT_SYMBOL(vzalloc);
317
318 /**
319  * vmalloc_node - allocate memory on a specific node
320  * @size:       allocation size
321  * @node:       numa node
322  *
323  * Allocate enough pages to cover @size from the page level
324  * allocator and map them into contiguous kernel virtual space.
325  *
326  * For tight control over page level allocator and protection flags
327  * use __vmalloc() instead.
328  */
329 void *vmalloc_node(unsigned long size, int node)
330 {
331         return vmalloc(size);
332 }
333 EXPORT_SYMBOL(vmalloc_node);
334
335 /**
336  * vzalloc_node - allocate memory on a specific node with zero fill
337  * @size:       allocation size
338  * @node:       numa node
339  *
340  * Allocate enough pages to cover @size from the page level
341  * allocator and map them into contiguous kernel virtual space.
342  * The memory allocated is set to zero.
343  *
344  * For tight control over page level allocator and protection flags
345  * use __vmalloc() instead.
346  */
347 void *vzalloc_node(unsigned long size, int node)
348 {
349         return vzalloc(size);
350 }
351 EXPORT_SYMBOL(vzalloc_node);
352
353 #ifndef PAGE_KERNEL_EXEC
354 # define PAGE_KERNEL_EXEC PAGE_KERNEL
355 #endif
356
357 /**
358  *      vmalloc_exec  -  allocate virtually contiguous, executable memory
359  *      @size:          allocation size
360  *
361  *      Kernel-internal function to allocate enough pages to cover @size
362  *      the page level allocator and map them into contiguous and
363  *      executable kernel virtual space.
364  *
365  *      For tight control over page level allocator and protection flags
366  *      use __vmalloc() instead.
367  */
368
369 void *vmalloc_exec(unsigned long size)
370 {
371         return __vmalloc(size, GFP_KERNEL | __GFP_HIGHMEM, PAGE_KERNEL_EXEC);
372 }
373
374 /**
375  * vmalloc_32  -  allocate virtually contiguous memory (32bit addressable)
376  *      @size:          allocation size
377  *
378  *      Allocate enough 32bit PA addressable pages to cover @size from the
379  *      page level allocator and map them into continguos kernel virtual space.
380  */
381 void *vmalloc_32(unsigned long size)
382 {
383         return __vmalloc(size, GFP_KERNEL, PAGE_KERNEL);
384 }
385 EXPORT_SYMBOL(vmalloc_32);
386
387 /**
388  * vmalloc_32_user - allocate zeroed virtually contiguous 32bit memory
389  *      @size:          allocation size
390  *
391  * The resulting memory area is 32bit addressable and zeroed so it can be
392  * mapped to userspace without leaking data.
393  *
394  * VM_USERMAP is set on the corresponding VMA so that subsequent calls to
395  * remap_vmalloc_range() are permissible.
396  */
397 void *vmalloc_32_user(unsigned long size)
398 {
399         /*
400          * We'll have to sort out the ZONE_DMA bits for 64-bit,
401          * but for now this can simply use vmalloc_user() directly.
402          */
403         return vmalloc_user(size);
404 }
405 EXPORT_SYMBOL(vmalloc_32_user);
406
407 void *vmap(struct page **pages, unsigned int count, unsigned long flags, pgprot_t prot)
408 {
409         BUG();
410         return NULL;
411 }
412 EXPORT_SYMBOL(vmap);
413
414 void vunmap(const void *addr)
415 {
416         BUG();
417 }
418 EXPORT_SYMBOL(vunmap);
419
420 void *vm_map_ram(struct page **pages, unsigned int count, int node, pgprot_t prot)
421 {
422         BUG();
423         return NULL;
424 }
425 EXPORT_SYMBOL(vm_map_ram);
426
427 void vm_unmap_ram(const void *mem, unsigned int count)
428 {
429         BUG();
430 }
431 EXPORT_SYMBOL(vm_unmap_ram);
432
433 void vm_unmap_aliases(void)
434 {
435 }
436 EXPORT_SYMBOL_GPL(vm_unmap_aliases);
437
438 /*
439  * Implement a stub for vmalloc_sync_all() if the architecture chose not to
440  * have one.
441  */
442 void  __attribute__((weak)) vmalloc_sync_all(void)
443 {
444 }
445
446 /**
447  *      alloc_vm_area - allocate a range of kernel address space
448  *      @size:          size of the area
449  *
450  *      Returns:        NULL on failure, vm_struct on success
451  *
452  *      This function reserves a range of kernel address space, and
453  *      allocates pagetables to map that range.  No actual mappings
454  *      are created.  If the kernel address space is not shared
455  *      between processes, it syncs the pagetable across all
456  *      processes.
457  */
458 struct vm_struct *alloc_vm_area(size_t size)
459 {
460         BUG();
461         return NULL;
462 }
463 EXPORT_SYMBOL_GPL(alloc_vm_area);
464
465 void free_vm_area(struct vm_struct *area)
466 {
467         BUG();
468 }
469 EXPORT_SYMBOL_GPL(free_vm_area);
470
471 int vm_insert_page(struct vm_area_struct *vma, unsigned long addr,
472                    struct page *page)
473 {
474         return -EINVAL;
475 }
476 EXPORT_SYMBOL(vm_insert_page);
477
478 /*
479  *  sys_brk() for the most part doesn't need the global kernel
480  *  lock, except when an application is doing something nasty
481  *  like trying to un-brk an area that has already been mapped
482  *  to a regular file.  in this case, the unmapping will need
483  *  to invoke file system routines that need the global lock.
484  */
485 SYSCALL_DEFINE1(brk, unsigned long, brk)
486 {
487         struct mm_struct *mm = current->mm;
488
489         if (brk < mm->start_brk || brk > mm->context.end_brk)
490                 return mm->brk;
491
492         if (mm->brk == brk)
493                 return mm->brk;
494
495         /*
496          * Always allow shrinking brk
497          */
498         if (brk <= mm->brk) {
499                 mm->brk = brk;
500                 return brk;
501         }
502
503         /*
504          * Ok, looks good - let it rip.
505          */
506         flush_icache_range(mm->brk, brk);
507         return mm->brk = brk;
508 }
509
510 /*
511  * initialise the VMA and region record slabs
512  */
513 void __init mmap_init(void)
514 {
515         int ret;
516
517         ret = percpu_counter_init(&vm_committed_as, 0);
518         VM_BUG_ON(ret);
519         vm_region_jar = KMEM_CACHE(vm_region, SLAB_PANIC);
520 }
521
522 /*
523  * validate the region tree
524  * - the caller must hold the region lock
525  */
526 #ifdef CONFIG_DEBUG_NOMMU_REGIONS
527 static noinline void validate_nommu_regions(void)
528 {
529         struct vm_region *region, *last;
530         struct rb_node *p, *lastp;
531
532         lastp = rb_first(&nommu_region_tree);
533         if (!lastp)
534                 return;
535
536         last = rb_entry(lastp, struct vm_region, vm_rb);
537         BUG_ON(unlikely(last->vm_end <= last->vm_start));
538         BUG_ON(unlikely(last->vm_top < last->vm_end));
539
540         while ((p = rb_next(lastp))) {
541                 region = rb_entry(p, struct vm_region, vm_rb);
542                 last = rb_entry(lastp, struct vm_region, vm_rb);
543
544                 BUG_ON(unlikely(region->vm_end <= region->vm_start));
545                 BUG_ON(unlikely(region->vm_top < region->vm_end));
546                 BUG_ON(unlikely(region->vm_start < last->vm_top));
547
548                 lastp = p;
549         }
550 }
551 #else
552 static void validate_nommu_regions(void)
553 {
554 }
555 #endif
556
557 /*
558  * add a region into the global tree
559  */
560 static void add_nommu_region(struct vm_region *region)
561 {
562         struct vm_region *pregion;
563         struct rb_node **p, *parent;
564
565         validate_nommu_regions();
566
567         parent = NULL;
568         p = &nommu_region_tree.rb_node;
569         while (*p) {
570                 parent = *p;
571                 pregion = rb_entry(parent, struct vm_region, vm_rb);
572                 if (region->vm_start < pregion->vm_start)
573                         p = &(*p)->rb_left;
574                 else if (region->vm_start > pregion->vm_start)
575                         p = &(*p)->rb_right;
576                 else if (pregion == region)
577                         return;
578                 else
579                         BUG();
580         }
581
582         rb_link_node(&region->vm_rb, parent, p);
583         rb_insert_color(&region->vm_rb, &nommu_region_tree);
584
585         validate_nommu_regions();
586 }
587
588 /*
589  * delete a region from the global tree
590  */
591 static void delete_nommu_region(struct vm_region *region)
592 {
593         BUG_ON(!nommu_region_tree.rb_node);
594
595         validate_nommu_regions();
596         rb_erase(&region->vm_rb, &nommu_region_tree);
597         validate_nommu_regions();
598 }
599
600 /*
601  * free a contiguous series of pages
602  */
603 static void free_page_series(unsigned long from, unsigned long to)
604 {
605         for (; from < to; from += PAGE_SIZE) {
606                 struct page *page = virt_to_page(from);
607
608                 kdebug("- free %lx", from);
609                 atomic_long_dec(&mmap_pages_allocated);
610                 if (page_count(page) != 1)
611                         kdebug("free page %p: refcount not one: %d",
612                                page, page_count(page));
613                 put_page(page);
614         }
615 }
616
617 /*
618  * release a reference to a region
619  * - the caller must hold the region semaphore for writing, which this releases
620  * - the region may not have been added to the tree yet, in which case vm_top
621  *   will equal vm_start
622  */
623 static void __put_nommu_region(struct vm_region *region)
624         __releases(nommu_region_sem)
625 {
626         kenter("%p{%d}", region, region->vm_usage);
627
628         BUG_ON(!nommu_region_tree.rb_node);
629
630         if (--region->vm_usage == 0) {
631                 if (region->vm_top > region->vm_start)
632                         delete_nommu_region(region);
633                 up_write(&nommu_region_sem);
634
635                 if (region->vm_file)
636                         fput(region->vm_file);
637
638                 /* IO memory and memory shared directly out of the pagecache
639                  * from ramfs/tmpfs mustn't be released here */
640                 if (region->vm_flags & VM_MAPPED_COPY) {
641                         kdebug("free series");
642                         free_page_series(region->vm_start, region->vm_top);
643                 }
644                 kmem_cache_free(vm_region_jar, region);
645         } else {
646                 up_write(&nommu_region_sem);
647         }
648 }
649
650 /*
651  * release a reference to a region
652  */
653 static void put_nommu_region(struct vm_region *region)
654 {
655         down_write(&nommu_region_sem);
656         __put_nommu_region(region);
657 }
658
659 /*
660  * update protection on a vma
661  */
662 static void protect_vma(struct vm_area_struct *vma, unsigned long flags)
663 {
664 #ifdef CONFIG_MPU
665         struct mm_struct *mm = vma->vm_mm;
666         long start = vma->vm_start & PAGE_MASK;
667         while (start < vma->vm_end) {
668                 protect_page(mm, start, flags);
669                 start += PAGE_SIZE;
670         }
671         update_protections(mm);
672 #endif
673 }
674
675 /*
676  * add a VMA into a process's mm_struct in the appropriate place in the list
677  * and tree and add to the address space's page tree also if not an anonymous
678  * page
679  * - should be called with mm->mmap_sem held writelocked
680  */
681 static void add_vma_to_mm(struct mm_struct *mm, struct vm_area_struct *vma)
682 {
683         struct vm_area_struct *pvma, *prev;
684         struct address_space *mapping;
685         struct rb_node **p, *parent, *rb_prev;
686
687         kenter(",%p", vma);
688
689         BUG_ON(!vma->vm_region);
690
691         mm->map_count++;
692         vma->vm_mm = mm;
693
694         protect_vma(vma, vma->vm_flags);
695
696         /* add the VMA to the mapping */
697         if (vma->vm_file) {
698                 mapping = vma->vm_file->f_mapping;
699
700                 mutex_lock(&mapping->i_mmap_mutex);
701                 flush_dcache_mmap_lock(mapping);
702                 vma_prio_tree_insert(vma, &mapping->i_mmap);
703                 flush_dcache_mmap_unlock(mapping);
704                 mutex_unlock(&mapping->i_mmap_mutex);
705         }
706
707         /* add the VMA to the tree */
708         parent = rb_prev = NULL;
709         p = &mm->mm_rb.rb_node;
710         while (*p) {
711                 parent = *p;
712                 pvma = rb_entry(parent, struct vm_area_struct, vm_rb);
713
714                 /* sort by: start addr, end addr, VMA struct addr in that order
715                  * (the latter is necessary as we may get identical VMAs) */
716                 if (vma->vm_start < pvma->vm_start)
717                         p = &(*p)->rb_left;
718                 else if (vma->vm_start > pvma->vm_start) {
719                         rb_prev = parent;
720                         p = &(*p)->rb_right;
721                 } else if (vma->vm_end < pvma->vm_end)
722                         p = &(*p)->rb_left;
723                 else if (vma->vm_end > pvma->vm_end) {
724                         rb_prev = parent;
725                         p = &(*p)->rb_right;
726                 } else if (vma < pvma)
727                         p = &(*p)->rb_left;
728                 else if (vma > pvma) {
729                         rb_prev = parent;
730                         p = &(*p)->rb_right;
731                 } else
732                         BUG();
733         }
734
735         rb_link_node(&vma->vm_rb, parent, p);
736         rb_insert_color(&vma->vm_rb, &mm->mm_rb);
737
738         /* add VMA to the VMA list also */
739         prev = NULL;
740         if (rb_prev)
741                 prev = rb_entry(rb_prev, struct vm_area_struct, vm_rb);
742
743         __vma_link_list(mm, vma, prev, parent);
744 }
745
746 /*
747  * delete a VMA from its owning mm_struct and address space
748  */
749 static void delete_vma_from_mm(struct vm_area_struct *vma)
750 {
751         struct address_space *mapping;
752         struct mm_struct *mm = vma->vm_mm;
753
754         kenter("%p", vma);
755
756         protect_vma(vma, 0);
757
758         mm->map_count--;
759         if (mm->mmap_cache == vma)
760                 mm->mmap_cache = NULL;
761
762         /* remove the VMA from the mapping */
763         if (vma->vm_file) {
764                 mapping = vma->vm_file->f_mapping;
765
766                 mutex_lock(&mapping->i_mmap_mutex);
767                 flush_dcache_mmap_lock(mapping);
768                 vma_prio_tree_remove(vma, &mapping->i_mmap);
769                 flush_dcache_mmap_unlock(mapping);
770                 mutex_unlock(&mapping->i_mmap_mutex);
771         }
772
773         /* remove from the MM's tree and list */
774         rb_erase(&vma->vm_rb, &mm->mm_rb);
775
776         if (vma->vm_prev)
777                 vma->vm_prev->vm_next = vma->vm_next;
778         else
779                 mm->mmap = vma->vm_next;
780
781         if (vma->vm_next)
782                 vma->vm_next->vm_prev = vma->vm_prev;
783
784         vma->vm_mm = NULL;
785 }
786
787 /*
788  * destroy a VMA record
789  */
790 static void delete_vma(struct mm_struct *mm, struct vm_area_struct *vma)
791 {
792         kenter("%p", vma);
793         if (vma->vm_ops && vma->vm_ops->close)
794                 vma->vm_ops->close(vma);
795         if (vma->vm_file) {
796                 fput(vma->vm_file);
797                 if (vma->vm_flags & VM_EXECUTABLE)
798                         removed_exe_file_vma(mm);
799         }
800         put_nommu_region(vma->vm_region);
801         kmem_cache_free(vm_area_cachep, vma);
802 }
803
804 /*
805  * look up the first VMA in which addr resides, NULL if none
806  * - should be called with mm->mmap_sem at least held readlocked
807  */
808 struct vm_area_struct *find_vma(struct mm_struct *mm, unsigned long addr)
809 {
810         struct vm_area_struct *vma;
811
812         /* check the cache first */
813         vma = mm->mmap_cache;
814         if (vma && vma->vm_start <= addr && vma->vm_end > addr)
815                 return vma;
816
817         /* trawl the list (there may be multiple mappings in which addr
818          * resides) */
819         for (vma = mm->mmap; vma; vma = vma->vm_next) {
820                 if (vma->vm_start > addr)
821                         return NULL;
822                 if (vma->vm_end > addr) {
823                         mm->mmap_cache = vma;
824                         return vma;
825                 }
826         }
827
828         return NULL;
829 }
830 EXPORT_SYMBOL(find_vma);
831
832 /*
833  * find a VMA
834  * - we don't extend stack VMAs under NOMMU conditions
835  */
836 struct vm_area_struct *find_extend_vma(struct mm_struct *mm, unsigned long addr)
837 {
838         return find_vma(mm, addr);
839 }
840
841 /*
842  * expand a stack to a given address
843  * - not supported under NOMMU conditions
844  */
845 int expand_stack(struct vm_area_struct *vma, unsigned long address)
846 {
847         return -ENOMEM;
848 }
849
850 /*
851  * look up the first VMA exactly that exactly matches addr
852  * - should be called with mm->mmap_sem at least held readlocked
853  */
854 static struct vm_area_struct *find_vma_exact(struct mm_struct *mm,
855                                              unsigned long addr,
856                                              unsigned long len)
857 {
858         struct vm_area_struct *vma;
859         unsigned long end = addr + len;
860
861         /* check the cache first */
862         vma = mm->mmap_cache;
863         if (vma && vma->vm_start == addr && vma->vm_end == end)
864                 return vma;
865
866         /* trawl the list (there may be multiple mappings in which addr
867          * resides) */
868         for (vma = mm->mmap; vma; vma = vma->vm_next) {
869                 if (vma->vm_start < addr)
870                         continue;
871                 if (vma->vm_start > addr)
872                         return NULL;
873                 if (vma->vm_end == end) {
874                         mm->mmap_cache = vma;
875                         return vma;
876                 }
877         }
878
879         return NULL;
880 }
881
882 /*
883  * determine whether a mapping should be permitted and, if so, what sort of
884  * mapping we're capable of supporting
885  */
886 static int validate_mmap_request(struct file *file,
887                                  unsigned long addr,
888                                  unsigned long len,
889                                  unsigned long prot,
890                                  unsigned long flags,
891                                  unsigned long pgoff,
892                                  unsigned long *_capabilities)
893 {
894         unsigned long capabilities, rlen;
895         unsigned long reqprot = prot;
896         int ret;
897
898         /* do the simple checks first */
899         if (flags & MAP_FIXED) {
900                 printk(KERN_DEBUG
901                        "%d: Can't do fixed-address/overlay mmap of RAM\n",
902                        current->pid);
903                 return -EINVAL;
904         }
905
906         if ((flags & MAP_TYPE) != MAP_PRIVATE &&
907             (flags & MAP_TYPE) != MAP_SHARED)
908                 return -EINVAL;
909
910         if (!len)
911                 return -EINVAL;
912
913         /* Careful about overflows.. */
914         rlen = PAGE_ALIGN(len);
915         if (!rlen || rlen > TASK_SIZE)
916                 return -ENOMEM;
917
918         /* offset overflow? */
919         if ((pgoff + (rlen >> PAGE_SHIFT)) < pgoff)
920                 return -EOVERFLOW;
921
922         if (file) {
923                 /* validate file mapping requests */
924                 struct address_space *mapping;
925
926                 /* files must support mmap */
927                 if (!file->f_op || !file->f_op->mmap)
928                         return -ENODEV;
929
930                 /* work out if what we've got could possibly be shared
931                  * - we support chardevs that provide their own "memory"
932                  * - we support files/blockdevs that are memory backed
933                  */
934                 mapping = file->f_mapping;
935                 if (!mapping)
936                         mapping = file->f_path.dentry->d_inode->i_mapping;
937
938                 capabilities = 0;
939                 if (mapping && mapping->backing_dev_info)
940                         capabilities = mapping->backing_dev_info->capabilities;
941
942                 if (!capabilities) {
943                         /* no explicit capabilities set, so assume some
944                          * defaults */
945                         switch (file->f_path.dentry->d_inode->i_mode & S_IFMT) {
946                         case S_IFREG:
947                         case S_IFBLK:
948                                 capabilities = BDI_CAP_MAP_COPY;
949                                 break;
950
951                         case S_IFCHR:
952                                 capabilities =
953                                         BDI_CAP_MAP_DIRECT |
954                                         BDI_CAP_READ_MAP |
955                                         BDI_CAP_WRITE_MAP;
956                                 break;
957
958                         default:
959                                 return -EINVAL;
960                         }
961                 }
962
963                 /* eliminate any capabilities that we can't support on this
964                  * device */
965                 if (!file->f_op->get_unmapped_area)
966                         capabilities &= ~BDI_CAP_MAP_DIRECT;
967                 if (!file->f_op->read)
968                         capabilities &= ~BDI_CAP_MAP_COPY;
969
970                 /* The file shall have been opened with read permission. */
971                 if (!(file->f_mode & FMODE_READ))
972                         return -EACCES;
973
974                 if (flags & MAP_SHARED) {
975                         /* do checks for writing, appending and locking */
976                         if ((prot & PROT_WRITE) &&
977                             !(file->f_mode & FMODE_WRITE))
978                                 return -EACCES;
979
980                         if (IS_APPEND(file->f_path.dentry->d_inode) &&
981                             (file->f_mode & FMODE_WRITE))
982                                 return -EACCES;
983
984                         if (locks_verify_locked(file->f_path.dentry->d_inode))
985                                 return -EAGAIN;
986
987                         if (!(capabilities & BDI_CAP_MAP_DIRECT))
988                                 return -ENODEV;
989
990                         /* we mustn't privatise shared mappings */
991                         capabilities &= ~BDI_CAP_MAP_COPY;
992                 }
993                 else {
994                         /* we're going to read the file into private memory we
995                          * allocate */
996                         if (!(capabilities & BDI_CAP_MAP_COPY))
997                                 return -ENODEV;
998
999                         /* we don't permit a private writable mapping to be
1000                          * shared with the backing device */
1001                         if (prot & PROT_WRITE)
1002                                 capabilities &= ~BDI_CAP_MAP_DIRECT;
1003                 }
1004
1005                 if (capabilities & BDI_CAP_MAP_DIRECT) {
1006                         if (((prot & PROT_READ)  && !(capabilities & BDI_CAP_READ_MAP))  ||
1007                             ((prot & PROT_WRITE) && !(capabilities & BDI_CAP_WRITE_MAP)) ||
1008                             ((prot & PROT_EXEC)  && !(capabilities & BDI_CAP_EXEC_MAP))
1009                             ) {
1010                                 capabilities &= ~BDI_CAP_MAP_DIRECT;
1011                                 if (flags & MAP_SHARED) {
1012                                         printk(KERN_WARNING
1013                                                "MAP_SHARED not completely supported on !MMU\n");
1014                                         return -EINVAL;
1015                                 }
1016                         }
1017                 }
1018
1019                 /* handle executable mappings and implied executable
1020                  * mappings */
1021                 if (file->f_path.mnt->mnt_flags & MNT_NOEXEC) {
1022                         if (prot & PROT_EXEC)
1023                                 return -EPERM;
1024                 }
1025                 else if ((prot & PROT_READ) && !(prot & PROT_EXEC)) {
1026                         /* handle implication of PROT_EXEC by PROT_READ */
1027                         if (current->personality & READ_IMPLIES_EXEC) {
1028                                 if (capabilities & BDI_CAP_EXEC_MAP)
1029                                         prot |= PROT_EXEC;
1030                         }
1031                 }
1032                 else if ((prot & PROT_READ) &&
1033                          (prot & PROT_EXEC) &&
1034                          !(capabilities & BDI_CAP_EXEC_MAP)
1035                          ) {
1036                         /* backing file is not executable, try to copy */
1037                         capabilities &= ~BDI_CAP_MAP_DIRECT;
1038                 }
1039         }
1040         else {
1041                 /* anonymous mappings are always memory backed and can be
1042                  * privately mapped
1043                  */
1044                 capabilities = BDI_CAP_MAP_COPY;
1045
1046                 /* handle PROT_EXEC implication by PROT_READ */
1047                 if ((prot & PROT_READ) &&
1048                     (current->personality & READ_IMPLIES_EXEC))
1049                         prot |= PROT_EXEC;
1050         }
1051
1052         /* allow the security API to have its say */
1053         ret = security_file_mmap(file, reqprot, prot, flags, addr, 0);
1054         if (ret < 0)
1055                 return ret;
1056
1057         /* looks okay */
1058         *_capabilities = capabilities;
1059         return 0;
1060 }
1061
1062 /*
1063  * we've determined that we can make the mapping, now translate what we
1064  * now know into VMA flags
1065  */
1066 static unsigned long determine_vm_flags(struct file *file,
1067                                         unsigned long prot,
1068                                         unsigned long flags,
1069                                         unsigned long capabilities)
1070 {
1071         unsigned long vm_flags;
1072
1073         vm_flags = calc_vm_prot_bits(prot) | calc_vm_flag_bits(flags);
1074         /* vm_flags |= mm->def_flags; */
1075
1076         if (!(capabilities & BDI_CAP_MAP_DIRECT)) {
1077                 /* attempt to share read-only copies of mapped file chunks */
1078                 vm_flags |= VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC;
1079                 if (file && !(prot & PROT_WRITE))
1080                         vm_flags |= VM_MAYSHARE;
1081         } else {
1082                 /* overlay a shareable mapping on the backing device or inode
1083                  * if possible - used for chardevs, ramfs/tmpfs/shmfs and
1084                  * romfs/cramfs */
1085                 vm_flags |= VM_MAYSHARE | (capabilities & BDI_CAP_VMFLAGS);
1086                 if (flags & MAP_SHARED)
1087                         vm_flags |= VM_SHARED;
1088         }
1089
1090         /* refuse to let anyone share private mappings with this process if
1091          * it's being traced - otherwise breakpoints set in it may interfere
1092          * with another untraced process
1093          */
1094         if ((flags & MAP_PRIVATE) && tracehook_expect_breakpoints(current))
1095                 vm_flags &= ~VM_MAYSHARE;
1096
1097         return vm_flags;
1098 }
1099
1100 /*
1101  * set up a shared mapping on a file (the driver or filesystem provides and
1102  * pins the storage)
1103  */
1104 static int do_mmap_shared_file(struct vm_area_struct *vma)
1105 {
1106         int ret;
1107
1108         ret = vma->vm_file->f_op->mmap(vma->vm_file, vma);
1109         if (ret == 0) {
1110                 vma->vm_region->vm_top = vma->vm_region->vm_end;
1111                 return 0;
1112         }
1113         if (ret != -ENOSYS)
1114                 return ret;
1115
1116         /* getting -ENOSYS indicates that direct mmap isn't possible (as
1117          * opposed to tried but failed) so we can only give a suitable error as
1118          * it's not possible to make a private copy if MAP_SHARED was given */
1119         return -ENODEV;
1120 }
1121
1122 /*
1123  * set up a private mapping or an anonymous shared mapping
1124  */
1125 static int do_mmap_private(struct vm_area_struct *vma,
1126                            struct vm_region *region,
1127                            unsigned long len,
1128                            unsigned long capabilities)
1129 {
1130         struct page *pages;
1131         unsigned long total, point, n;
1132         void *base;
1133         int ret, order;
1134
1135         /* invoke the file's mapping function so that it can keep track of
1136          * shared mappings on devices or memory
1137          * - VM_MAYSHARE will be set if it may attempt to share
1138          */
1139         if (capabilities & BDI_CAP_MAP_DIRECT) {
1140                 ret = vma->vm_file->f_op->mmap(vma->vm_file, vma);
1141                 if (ret == 0) {
1142                         /* shouldn't return success if we're not sharing */
1143                         BUG_ON(!(vma->vm_flags & VM_MAYSHARE));
1144                         vma->vm_region->vm_top = vma->vm_region->vm_end;
1145                         return 0;
1146                 }
1147                 if (ret != -ENOSYS)
1148                         return ret;
1149
1150                 /* getting an ENOSYS error indicates that direct mmap isn't
1151                  * possible (as opposed to tried but failed) so we'll try to
1152                  * make a private copy of the data and map that instead */
1153         }
1154
1155
1156         /* allocate some memory to hold the mapping
1157          * - note that this may not return a page-aligned address if the object
1158          *   we're allocating is smaller than a page
1159          */
1160         order = get_order(len);
1161         kdebug("alloc order %d for %lx", order, len);
1162
1163         pages = alloc_pages(GFP_KERNEL, order);
1164         if (!pages)
1165                 goto enomem;
1166
1167         total = 1 << order;
1168         atomic_long_add(total, &mmap_pages_allocated);
1169
1170         point = len >> PAGE_SHIFT;
1171
1172         /* we allocated a power-of-2 sized page set, so we may want to trim off
1173          * the excess */
1174         if (sysctl_nr_trim_pages && total - point >= sysctl_nr_trim_pages) {
1175                 while (total > point) {
1176                         order = ilog2(total - point);
1177                         n = 1 << order;
1178                         kdebug("shave %lu/%lu @%lu", n, total - point, total);
1179                         atomic_long_sub(n, &mmap_pages_allocated);
1180                         total -= n;
1181                         set_page_refcounted(pages + total);
1182                         __free_pages(pages + total, order);
1183                 }
1184         }
1185
1186         for (point = 1; point < total; point++)
1187                 set_page_refcounted(&pages[point]);
1188
1189         base = page_address(pages);
1190         region->vm_flags = vma->vm_flags |= VM_MAPPED_COPY;
1191         region->vm_start = (unsigned long) base;
1192         region->vm_end   = region->vm_start + len;
1193         region->vm_top   = region->vm_start + (total << PAGE_SHIFT);
1194
1195         vma->vm_start = region->vm_start;
1196         vma->vm_end   = region->vm_start + len;
1197
1198         if (vma->vm_file) {
1199                 /* read the contents of a file into the copy */
1200                 mm_segment_t old_fs;
1201                 loff_t fpos;
1202
1203                 fpos = vma->vm_pgoff;
1204                 fpos <<= PAGE_SHIFT;
1205
1206                 old_fs = get_fs();
1207                 set_fs(KERNEL_DS);
1208                 ret = vma->vm_file->f_op->read(vma->vm_file, base, len, &fpos);
1209                 set_fs(old_fs);
1210
1211                 if (ret < 0)
1212                         goto error_free;
1213
1214                 /* clear the last little bit */
1215                 if (ret < len)
1216                         memset(base + ret, 0, len - ret);
1217
1218         }
1219
1220         return 0;
1221
1222 error_free:
1223         free_page_series(region->vm_start, region->vm_top);
1224         region->vm_start = vma->vm_start = 0;
1225         region->vm_end   = vma->vm_end = 0;
1226         region->vm_top   = 0;
1227         return ret;
1228
1229 enomem:
1230         printk("Allocation of length %lu from process %d (%s) failed\n",
1231                len, current->pid, current->comm);
1232         show_free_areas(0);
1233         return -ENOMEM;
1234 }
1235
1236 /*
1237  * handle mapping creation for uClinux
1238  */
1239 unsigned long do_mmap_pgoff(struct file *file,
1240                             unsigned long addr,
1241                             unsigned long len,
1242                             unsigned long prot,
1243                             unsigned long flags,
1244                             unsigned long pgoff)
1245 {
1246         struct vm_area_struct *vma;
1247         struct vm_region *region;
1248         struct rb_node *rb;
1249         unsigned long capabilities, vm_flags, result;
1250         int ret;
1251
1252         kenter(",%lx,%lx,%lx,%lx,%lx", addr, len, prot, flags, pgoff);
1253
1254         /* decide whether we should attempt the mapping, and if so what sort of
1255          * mapping */
1256         ret = validate_mmap_request(file, addr, len, prot, flags, pgoff,
1257                                     &capabilities);
1258         if (ret < 0) {
1259                 kleave(" = %d [val]", ret);
1260                 return ret;
1261         }
1262
1263         /* we ignore the address hint */
1264         addr = 0;
1265         len = PAGE_ALIGN(len);
1266
1267         /* we've determined that we can make the mapping, now translate what we
1268          * now know into VMA flags */
1269         vm_flags = determine_vm_flags(file, prot, flags, capabilities);
1270
1271         /* we're going to need to record the mapping */
1272         region = kmem_cache_zalloc(vm_region_jar, GFP_KERNEL);
1273         if (!region)
1274                 goto error_getting_region;
1275
1276         vma = kmem_cache_zalloc(vm_area_cachep, GFP_KERNEL);
1277         if (!vma)
1278                 goto error_getting_vma;
1279
1280         region->vm_usage = 1;
1281         region->vm_flags = vm_flags;
1282         region->vm_pgoff = pgoff;
1283
1284         INIT_LIST_HEAD(&vma->anon_vma_chain);
1285         vma->vm_flags = vm_flags;
1286         vma->vm_pgoff = pgoff;
1287
1288         if (file) {
1289                 region->vm_file = file;
1290                 get_file(file);
1291                 vma->vm_file = file;
1292                 get_file(file);
1293                 if (vm_flags & VM_EXECUTABLE) {
1294                         added_exe_file_vma(current->mm);
1295                         vma->vm_mm = current->mm;
1296                 }
1297         }
1298
1299         down_write(&nommu_region_sem);
1300
1301         /* if we want to share, we need to check for regions created by other
1302          * mmap() calls that overlap with our proposed mapping
1303          * - we can only share with a superset match on most regular files
1304          * - shared mappings on character devices and memory backed files are
1305          *   permitted to overlap inexactly as far as we are concerned for in
1306          *   these cases, sharing is handled in the driver or filesystem rather
1307          *   than here
1308          */
1309         if (vm_flags & VM_MAYSHARE) {
1310                 struct vm_region *pregion;
1311                 unsigned long pglen, rpglen, pgend, rpgend, start;
1312
1313                 pglen = (len + PAGE_SIZE - 1) >> PAGE_SHIFT;
1314                 pgend = pgoff + pglen;
1315
1316                 for (rb = rb_first(&nommu_region_tree); rb; rb = rb_next(rb)) {
1317                         pregion = rb_entry(rb, struct vm_region, vm_rb);
1318
1319                         if (!(pregion->vm_flags & VM_MAYSHARE))
1320                                 continue;
1321
1322                         /* search for overlapping mappings on the same file */
1323                         if (pregion->vm_file->f_path.dentry->d_inode !=
1324                             file->f_path.dentry->d_inode)
1325                                 continue;
1326
1327                         if (pregion->vm_pgoff >= pgend)
1328                                 continue;
1329
1330                         rpglen = pregion->vm_end - pregion->vm_start;
1331                         rpglen = (rpglen + PAGE_SIZE - 1) >> PAGE_SHIFT;
1332                         rpgend = pregion->vm_pgoff + rpglen;
1333                         if (pgoff >= rpgend)
1334                                 continue;
1335
1336                         /* handle inexactly overlapping matches between
1337                          * mappings */
1338                         if ((pregion->vm_pgoff != pgoff || rpglen != pglen) &&
1339                             !(pgoff >= pregion->vm_pgoff && pgend <= rpgend)) {
1340                                 /* new mapping is not a subset of the region */
1341                                 if (!(capabilities & BDI_CAP_MAP_DIRECT))
1342                                         goto sharing_violation;
1343                                 continue;
1344                         }
1345
1346                         /* we've found a region we can share */
1347                         pregion->vm_usage++;
1348                         vma->vm_region = pregion;
1349                         start = pregion->vm_start;
1350                         start += (pgoff - pregion->vm_pgoff) << PAGE_SHIFT;
1351                         vma->vm_start = start;
1352                         vma->vm_end = start + len;
1353
1354                         if (pregion->vm_flags & VM_MAPPED_COPY) {
1355                                 kdebug("share copy");
1356                                 vma->vm_flags |= VM_MAPPED_COPY;
1357                         } else {
1358                                 kdebug("share mmap");
1359                                 ret = do_mmap_shared_file(vma);
1360                                 if (ret < 0) {
1361                                         vma->vm_region = NULL;
1362                                         vma->vm_start = 0;
1363                                         vma->vm_end = 0;
1364                                         pregion->vm_usage--;
1365                                         pregion = NULL;
1366                                         goto error_just_free;
1367                                 }
1368                         }
1369                         fput(region->vm_file);
1370                         kmem_cache_free(vm_region_jar, region);
1371                         region = pregion;
1372                         result = start;
1373                         goto share;
1374                 }
1375
1376                 /* obtain the address at which to make a shared mapping
1377                  * - this is the hook for quasi-memory character devices to
1378                  *   tell us the location of a shared mapping
1379                  */
1380                 if (capabilities & BDI_CAP_MAP_DIRECT) {
1381                         addr = file->f_op->get_unmapped_area(file, addr, len,
1382                                                              pgoff, flags);
1383                         if (IS_ERR_VALUE(addr)) {
1384                                 ret = addr;
1385                                 if (ret != -ENOSYS)
1386                                         goto error_just_free;
1387
1388                                 /* the driver refused to tell us where to site
1389                                  * the mapping so we'll have to attempt to copy
1390                                  * it */
1391                                 ret = -ENODEV;
1392                                 if (!(capabilities & BDI_CAP_MAP_COPY))
1393                                         goto error_just_free;
1394
1395                                 capabilities &= ~BDI_CAP_MAP_DIRECT;
1396                         } else {
1397                                 vma->vm_start = region->vm_start = addr;
1398                                 vma->vm_end = region->vm_end = addr + len;
1399                         }
1400                 }
1401         }
1402
1403         vma->vm_region = region;
1404
1405         /* set up the mapping
1406          * - the region is filled in if BDI_CAP_MAP_DIRECT is still set
1407          */
1408         if (file && vma->vm_flags & VM_SHARED)
1409                 ret = do_mmap_shared_file(vma);
1410         else
1411                 ret = do_mmap_private(vma, region, len, capabilities);
1412         if (ret < 0)
1413                 goto error_just_free;
1414         add_nommu_region(region);
1415
1416         /* clear anonymous mappings that don't ask for uninitialized data */
1417         if (!vma->vm_file && !(flags & MAP_UNINITIALIZED))
1418                 memset((void *)region->vm_start, 0,
1419                        region->vm_end - region->vm_start);
1420
1421         /* okay... we have a mapping; now we have to register it */
1422         result = vma->vm_start;
1423
1424         current->mm->total_vm += len >> PAGE_SHIFT;
1425
1426 share:
1427         add_vma_to_mm(current->mm, vma);
1428
1429         /* we flush the region from the icache only when the first executable
1430          * mapping of it is made  */
1431         if (vma->vm_flags & VM_EXEC && !region->vm_icache_flushed) {
1432                 flush_icache_range(region->vm_start, region->vm_end);
1433                 region->vm_icache_flushed = true;
1434         }
1435
1436         up_write(&nommu_region_sem);
1437
1438         kleave(" = %lx", result);
1439         return result;
1440
1441 error_just_free:
1442         up_write(&nommu_region_sem);
1443 error:
1444         if (region->vm_file)
1445                 fput(region->vm_file);
1446         kmem_cache_free(vm_region_jar, region);
1447         if (vma->vm_file)
1448                 fput(vma->vm_file);
1449         if (vma->vm_flags & VM_EXECUTABLE)
1450                 removed_exe_file_vma(vma->vm_mm);
1451         kmem_cache_free(vm_area_cachep, vma);
1452         kleave(" = %d", ret);
1453         return ret;
1454
1455 sharing_violation:
1456         up_write(&nommu_region_sem);
1457         printk(KERN_WARNING "Attempt to share mismatched mappings\n");
1458         ret = -EINVAL;
1459         goto error;
1460
1461 error_getting_vma:
1462         kmem_cache_free(vm_region_jar, region);
1463         printk(KERN_WARNING "Allocation of vma for %lu byte allocation"
1464                " from process %d failed\n",
1465                len, current->pid);
1466         show_free_areas(0);
1467         return -ENOMEM;
1468
1469 error_getting_region:
1470         printk(KERN_WARNING "Allocation of vm region for %lu byte allocation"
1471                " from process %d failed\n",
1472                len, current->pid);
1473         show_free_areas(0);
1474         return -ENOMEM;
1475 }
1476 EXPORT_SYMBOL(do_mmap_pgoff);
1477
1478 SYSCALL_DEFINE6(mmap_pgoff, unsigned long, addr, unsigned long, len,
1479                 unsigned long, prot, unsigned long, flags,
1480                 unsigned long, fd, unsigned long, pgoff)
1481 {
1482         struct file *file = NULL;
1483         unsigned long retval = -EBADF;
1484
1485         audit_mmap_fd(fd, flags);
1486         if (!(flags & MAP_ANONYMOUS)) {
1487                 file = fget(fd);
1488                 if (!file)
1489                         goto out;
1490         }
1491
1492         flags &= ~(MAP_EXECUTABLE | MAP_DENYWRITE);
1493
1494         down_write(&current->mm->mmap_sem);
1495         retval = do_mmap_pgoff(file, addr, len, prot, flags, pgoff);
1496         up_write(&current->mm->mmap_sem);
1497
1498         if (file)
1499                 fput(file);
1500 out:
1501         return retval;
1502 }
1503
1504 #ifdef __ARCH_WANT_SYS_OLD_MMAP
1505 struct mmap_arg_struct {
1506         unsigned long addr;
1507         unsigned long len;
1508         unsigned long prot;
1509         unsigned long flags;
1510         unsigned long fd;
1511         unsigned long offset;
1512 };
1513
1514 SYSCALL_DEFINE1(old_mmap, struct mmap_arg_struct __user *, arg)
1515 {
1516         struct mmap_arg_struct a;
1517
1518         if (copy_from_user(&a, arg, sizeof(a)))
1519                 return -EFAULT;
1520         if (a.offset & ~PAGE_MASK)
1521                 return -EINVAL;
1522
1523         return sys_mmap_pgoff(a.addr, a.len, a.prot, a.flags, a.fd,
1524                               a.offset >> PAGE_SHIFT);
1525 }
1526 #endif /* __ARCH_WANT_SYS_OLD_MMAP */
1527
1528 /*
1529  * split a vma into two pieces at address 'addr', a new vma is allocated either
1530  * for the first part or the tail.
1531  */
1532 int split_vma(struct mm_struct *mm, struct vm_area_struct *vma,
1533               unsigned long addr, int new_below)
1534 {
1535         struct vm_area_struct *new;
1536         struct vm_region *region;
1537         unsigned long npages;
1538
1539         kenter("");
1540
1541         /* we're only permitted to split anonymous regions (these should have
1542          * only a single usage on the region) */
1543         if (vma->vm_file)
1544                 return -ENOMEM;
1545
1546         if (mm->map_count >= sysctl_max_map_count)
1547                 return -ENOMEM;
1548
1549         region = kmem_cache_alloc(vm_region_jar, GFP_KERNEL);
1550         if (!region)
1551                 return -ENOMEM;
1552
1553         new = kmem_cache_alloc(vm_area_cachep, GFP_KERNEL);
1554         if (!new) {
1555                 kmem_cache_free(vm_region_jar, region);
1556                 return -ENOMEM;
1557         }
1558
1559         /* most fields are the same, copy all, and then fixup */
1560         *new = *vma;
1561         *region = *vma->vm_region;
1562         new->vm_region = region;
1563
1564         npages = (addr - vma->vm_start) >> PAGE_SHIFT;
1565
1566         if (new_below) {
1567                 region->vm_top = region->vm_end = new->vm_end = addr;
1568         } else {
1569                 region->vm_start = new->vm_start = addr;
1570                 region->vm_pgoff = new->vm_pgoff += npages;
1571         }
1572
1573         if (new->vm_ops && new->vm_ops->open)
1574                 new->vm_ops->open(new);
1575
1576         delete_vma_from_mm(vma);
1577         down_write(&nommu_region_sem);
1578         delete_nommu_region(vma->vm_region);
1579         if (new_below) {
1580                 vma->vm_region->vm_start = vma->vm_start = addr;
1581                 vma->vm_region->vm_pgoff = vma->vm_pgoff += npages;
1582         } else {
1583                 vma->vm_region->vm_end = vma->vm_end = addr;
1584                 vma->vm_region->vm_top = addr;
1585         }
1586         add_nommu_region(vma->vm_region);
1587         add_nommu_region(new->vm_region);
1588         up_write(&nommu_region_sem);
1589         add_vma_to_mm(mm, vma);
1590         add_vma_to_mm(mm, new);
1591         return 0;
1592 }
1593
1594 /*
1595  * shrink a VMA by removing the specified chunk from either the beginning or
1596  * the end
1597  */
1598 static int shrink_vma(struct mm_struct *mm,
1599                       struct vm_area_struct *vma,
1600                       unsigned long from, unsigned long to)
1601 {
1602         struct vm_region *region;
1603
1604         kenter("");
1605
1606         /* adjust the VMA's pointers, which may reposition it in the MM's tree
1607          * and list */
1608         delete_vma_from_mm(vma);
1609         if (from > vma->vm_start)
1610                 vma->vm_end = from;
1611         else
1612                 vma->vm_start = to;
1613         add_vma_to_mm(mm, vma);
1614
1615         /* cut the backing region down to size */
1616         region = vma->vm_region;
1617         BUG_ON(region->vm_usage != 1);
1618
1619         down_write(&nommu_region_sem);
1620         delete_nommu_region(region);
1621         if (from > region->vm_start) {
1622                 to = region->vm_top;
1623                 region->vm_top = region->vm_end = from;
1624         } else {
1625                 region->vm_start = to;
1626         }
1627         add_nommu_region(region);
1628         up_write(&nommu_region_sem);
1629
1630         free_page_series(from, to);
1631         return 0;
1632 }
1633
1634 /*
1635  * release a mapping
1636  * - under NOMMU conditions the chunk to be unmapped must be backed by a single
1637  *   VMA, though it need not cover the whole VMA
1638  */
1639 int do_munmap(struct mm_struct *mm, unsigned long start, size_t len)
1640 {
1641         struct vm_area_struct *vma;
1642         unsigned long end;
1643         int ret;
1644
1645         kenter(",%lx,%zx", start, len);
1646
1647         len = PAGE_ALIGN(len);
1648         if (len == 0)
1649                 return -EINVAL;
1650
1651         end = start + len;
1652
1653         /* find the first potentially overlapping VMA */
1654         vma = find_vma(mm, start);
1655         if (!vma) {
1656                 static int limit = 0;
1657                 if (limit < 5) {
1658                         printk(KERN_WARNING
1659                                "munmap of memory not mmapped by process %d"
1660                                " (%s): 0x%lx-0x%lx\n",
1661                                current->pid, current->comm,
1662                                start, start + len - 1);
1663                         limit++;
1664                 }
1665                 return -EINVAL;
1666         }
1667
1668         /* we're allowed to split an anonymous VMA but not a file-backed one */
1669         if (vma->vm_file) {
1670                 do {
1671                         if (start > vma->vm_start) {
1672                                 kleave(" = -EINVAL [miss]");
1673                                 return -EINVAL;
1674                         }
1675                         if (end == vma->vm_end)
1676                                 goto erase_whole_vma;
1677                         vma = vma->vm_next;
1678                 } while (vma);
1679                 kleave(" = -EINVAL [split file]");
1680                 return -EINVAL;
1681         } else {
1682                 /* the chunk must be a subset of the VMA found */
1683                 if (start == vma->vm_start && end == vma->vm_end)
1684                         goto erase_whole_vma;
1685                 if (start < vma->vm_start || end > vma->vm_end) {
1686                         kleave(" = -EINVAL [superset]");
1687                         return -EINVAL;
1688                 }
1689                 if (start & ~PAGE_MASK) {
1690                         kleave(" = -EINVAL [unaligned start]");
1691                         return -EINVAL;
1692                 }
1693                 if (end != vma->vm_end && end & ~PAGE_MASK) {
1694                         kleave(" = -EINVAL [unaligned split]");
1695                         return -EINVAL;
1696                 }
1697                 if (start != vma->vm_start && end != vma->vm_end) {
1698                         ret = split_vma(mm, vma, start, 1);
1699                         if (ret < 0) {
1700                                 kleave(" = %d [split]", ret);
1701                                 return ret;
1702                         }
1703                 }
1704                 return shrink_vma(mm, vma, start, end);
1705         }
1706
1707 erase_whole_vma:
1708         delete_vma_from_mm(vma);
1709         delete_vma(mm, vma);
1710         kleave(" = 0");
1711         return 0;
1712 }
1713 EXPORT_SYMBOL(do_munmap);
1714
1715 SYSCALL_DEFINE2(munmap, unsigned long, addr, size_t, len)
1716 {
1717         int ret;
1718         struct mm_struct *mm = current->mm;
1719
1720         down_write(&mm->mmap_sem);
1721         ret = do_munmap(mm, addr, len);
1722         up_write(&mm->mmap_sem);
1723         return ret;
1724 }
1725
1726 /*
1727  * release all the mappings made in a process's VM space
1728  */
1729 void exit_mmap(struct mm_struct *mm)
1730 {
1731         struct vm_area_struct *vma;
1732
1733         if (!mm)
1734                 return;
1735
1736         kenter("");
1737
1738         mm->total_vm = 0;
1739
1740         while ((vma = mm->mmap)) {
1741                 mm->mmap = vma->vm_next;
1742                 delete_vma_from_mm(vma);
1743                 delete_vma(mm, vma);
1744                 cond_resched();
1745         }
1746
1747         kleave("");
1748 }
1749
1750 unsigned long do_brk(unsigned long addr, unsigned long len)
1751 {
1752         return -ENOMEM;
1753 }
1754
1755 /*
1756  * expand (or shrink) an existing mapping, potentially moving it at the same
1757  * time (controlled by the MREMAP_MAYMOVE flag and available VM space)
1758  *
1759  * under NOMMU conditions, we only permit changing a mapping's size, and only
1760  * as long as it stays within the region allocated by do_mmap_private() and the
1761  * block is not shareable
1762  *
1763  * MREMAP_FIXED is not supported under NOMMU conditions
1764  */
1765 unsigned long do_mremap(unsigned long addr,
1766                         unsigned long old_len, unsigned long new_len,
1767                         unsigned long flags, unsigned long new_addr)
1768 {
1769         struct vm_area_struct *vma;
1770
1771         /* insanity checks first */
1772         old_len = PAGE_ALIGN(old_len);
1773         new_len = PAGE_ALIGN(new_len);
1774         if (old_len == 0 || new_len == 0)
1775                 return (unsigned long) -EINVAL;
1776
1777         if (addr & ~PAGE_MASK)
1778                 return -EINVAL;
1779
1780         if (flags & MREMAP_FIXED && new_addr != addr)
1781                 return (unsigned long) -EINVAL;
1782
1783         vma = find_vma_exact(current->mm, addr, old_len);
1784         if (!vma)
1785                 return (unsigned long) -EINVAL;
1786
1787         if (vma->vm_end != vma->vm_start + old_len)
1788                 return (unsigned long) -EFAULT;
1789
1790         if (vma->vm_flags & VM_MAYSHARE)
1791                 return (unsigned long) -EPERM;
1792
1793         if (new_len > vma->vm_region->vm_end - vma->vm_region->vm_start)
1794                 return (unsigned long) -ENOMEM;
1795
1796         /* all checks complete - do it */
1797         vma->vm_end = vma->vm_start + new_len;
1798         return vma->vm_start;
1799 }
1800 EXPORT_SYMBOL(do_mremap);
1801
1802 SYSCALL_DEFINE5(mremap, unsigned long, addr, unsigned long, old_len,
1803                 unsigned long, new_len, unsigned long, flags,
1804                 unsigned long, new_addr)
1805 {
1806         unsigned long ret;
1807
1808         down_write(&current->mm->mmap_sem);
1809         ret = do_mremap(addr, old_len, new_len, flags, new_addr);
1810         up_write(&current->mm->mmap_sem);
1811         return ret;
1812 }
1813
1814 struct page *follow_page(struct vm_area_struct *vma, unsigned long address,
1815                         unsigned int foll_flags)
1816 {
1817         return NULL;
1818 }
1819
1820 int remap_pfn_range(struct vm_area_struct *vma, unsigned long addr,
1821                 unsigned long pfn, unsigned long size, pgprot_t prot)
1822 {
1823         if (addr != (pfn << PAGE_SHIFT))
1824                 return -EINVAL;
1825
1826         vma->vm_flags |= VM_IO | VM_RESERVED | VM_PFNMAP;
1827         return 0;
1828 }
1829 EXPORT_SYMBOL(remap_pfn_range);
1830
1831 int remap_vmalloc_range(struct vm_area_struct *vma, void *addr,
1832                         unsigned long pgoff)
1833 {
1834         unsigned int size = vma->vm_end - vma->vm_start;
1835
1836         if (!(vma->vm_flags & VM_USERMAP))
1837                 return -EINVAL;
1838
1839         vma->vm_start = (unsigned long)(addr + (pgoff << PAGE_SHIFT));
1840         vma->vm_end = vma->vm_start + size;
1841
1842         return 0;
1843 }
1844 EXPORT_SYMBOL(remap_vmalloc_range);
1845
1846 unsigned long arch_get_unmapped_area(struct file *file, unsigned long addr,
1847         unsigned long len, unsigned long pgoff, unsigned long flags)
1848 {
1849         return -ENOMEM;
1850 }
1851
1852 void arch_unmap_area(struct mm_struct *mm, unsigned long addr)
1853 {
1854 }
1855
1856 void unmap_mapping_range(struct address_space *mapping,
1857                          loff_t const holebegin, loff_t const holelen,
1858                          int even_cows)
1859 {
1860 }
1861 EXPORT_SYMBOL(unmap_mapping_range);
1862
1863 /*
1864  * Check that a process has enough memory to allocate a new virtual
1865  * mapping. 0 means there is enough memory for the allocation to
1866  * succeed and -ENOMEM implies there is not.
1867  *
1868  * We currently support three overcommit policies, which are set via the
1869  * vm.overcommit_memory sysctl.  See Documentation/vm/overcommit-accounting
1870  *
1871  * Strict overcommit modes added 2002 Feb 26 by Alan Cox.
1872  * Additional code 2002 Jul 20 by Robert Love.
1873  *
1874  * cap_sys_admin is 1 if the process has admin privileges, 0 otherwise.
1875  *
1876  * Note this is a helper function intended to be used by LSMs which
1877  * wish to use this logic.
1878  */
1879 int __vm_enough_memory(struct mm_struct *mm, long pages, int cap_sys_admin)
1880 {
1881         unsigned long free, allowed;
1882
1883         vm_acct_memory(pages);
1884
1885         /*
1886          * Sometimes we want to use more memory than we have
1887          */
1888         if (sysctl_overcommit_memory == OVERCOMMIT_ALWAYS)
1889                 return 0;
1890
1891         if (sysctl_overcommit_memory == OVERCOMMIT_GUESS) {
1892                 unsigned long n;
1893
1894                 free = global_page_state(NR_FILE_PAGES);
1895                 free += nr_swap_pages;
1896
1897                 /*
1898                  * Any slabs which are created with the
1899                  * SLAB_RECLAIM_ACCOUNT flag claim to have contents
1900                  * which are reclaimable, under pressure.  The dentry
1901                  * cache and most inode caches should fall into this
1902                  */
1903                 free += global_page_state(NR_SLAB_RECLAIMABLE);
1904
1905                 /*
1906                  * Leave the last 3% for root
1907                  */
1908                 if (!cap_sys_admin)
1909                         free -= free / 32;
1910
1911                 if (free > pages)
1912                         return 0;
1913
1914                 /*
1915                  * nr_free_pages() is very expensive on large systems,
1916                  * only call if we're about to fail.
1917                  */
1918                 n = nr_free_pages();
1919
1920                 /*
1921                  * Leave reserved pages. The pages are not for anonymous pages.
1922                  */
1923                 if (n <= totalreserve_pages)
1924                         goto error;
1925                 else
1926                         n -= totalreserve_pages;
1927
1928                 /*
1929                  * Leave the last 3% for root
1930                  */
1931                 if (!cap_sys_admin)
1932                         n -= n / 32;
1933                 free += n;
1934
1935                 if (free > pages)
1936                         return 0;
1937
1938                 goto error;
1939         }
1940
1941         allowed = totalram_pages * sysctl_overcommit_ratio / 100;
1942         /*
1943          * Leave the last 3% for root
1944          */
1945         if (!cap_sys_admin)
1946                 allowed -= allowed / 32;
1947         allowed += total_swap_pages;
1948
1949         /* Don't let a single process grow too big:
1950            leave 3% of the size of this process for other processes */
1951         if (mm)
1952                 allowed -= mm->total_vm / 32;
1953
1954         if (percpu_counter_read_positive(&vm_committed_as) < allowed)
1955                 return 0;
1956
1957 error:
1958         vm_unacct_memory(pages);
1959
1960         return -ENOMEM;
1961 }
1962
1963 int in_gate_area_no_mm(unsigned long addr)
1964 {
1965         return 0;
1966 }
1967
1968 int filemap_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
1969 {
1970         BUG();
1971         return 0;
1972 }
1973 EXPORT_SYMBOL(filemap_fault);
1974
1975 static int __access_remote_vm(struct task_struct *tsk, struct mm_struct *mm,
1976                 unsigned long addr, void *buf, int len, int write)
1977 {
1978         struct vm_area_struct *vma;
1979
1980         down_read(&mm->mmap_sem);
1981
1982         /* the access must start within one of the target process's mappings */
1983         vma = find_vma(mm, addr);
1984         if (vma) {
1985                 /* don't overrun this mapping */
1986                 if (addr + len >= vma->vm_end)
1987                         len = vma->vm_end - addr;
1988
1989                 /* only read or write mappings where it is permitted */
1990                 if (write && vma->vm_flags & VM_MAYWRITE)
1991                         copy_to_user_page(vma, NULL, addr,
1992                                          (void *) addr, buf, len);
1993                 else if (!write && vma->vm_flags & VM_MAYREAD)
1994                         copy_from_user_page(vma, NULL, addr,
1995                                             buf, (void *) addr, len);
1996                 else
1997                         len = 0;
1998         } else {
1999                 len = 0;
2000         }
2001
2002         up_read(&mm->mmap_sem);
2003
2004         return len;
2005 }
2006
2007 /**
2008  * @access_remote_vm - access another process' address space
2009  * @mm:         the mm_struct of the target address space
2010  * @addr:       start address to access
2011  * @buf:        source or destination buffer
2012  * @len:        number of bytes to transfer
2013  * @write:      whether the access is a write
2014  *
2015  * The caller must hold a reference on @mm.
2016  */
2017 int access_remote_vm(struct mm_struct *mm, unsigned long addr,
2018                 void *buf, int len, int write)
2019 {
2020         return __access_remote_vm(NULL, mm, addr, buf, len, write);
2021 }
2022
2023 /*
2024  * Access another process' address space.
2025  * - source/target buffer must be kernel space
2026  */
2027 int access_process_vm(struct task_struct *tsk, unsigned long addr, void *buf, int len, int write)
2028 {
2029         struct mm_struct *mm;
2030
2031         if (addr + len < addr)
2032                 return 0;
2033
2034         mm = get_task_mm(tsk);
2035         if (!mm)
2036                 return 0;
2037
2038         len = __access_remote_vm(tsk, mm, addr, buf, len, write);
2039
2040         mmput(mm);
2041         return len;
2042 }
2043
2044 /**
2045  * nommu_shrink_inode_mappings - Shrink the shared mappings on an inode
2046  * @inode: The inode to check
2047  * @size: The current filesize of the inode
2048  * @newsize: The proposed filesize of the inode
2049  *
2050  * Check the shared mappings on an inode on behalf of a shrinking truncate to
2051  * make sure that that any outstanding VMAs aren't broken and then shrink the
2052  * vm_regions that extend that beyond so that do_mmap_pgoff() doesn't
2053  * automatically grant mappings that are too large.
2054  */
2055 int nommu_shrink_inode_mappings(struct inode *inode, size_t size,
2056                                 size_t newsize)
2057 {
2058         struct vm_area_struct *vma;
2059         struct prio_tree_iter iter;
2060         struct vm_region *region;
2061         pgoff_t low, high;
2062         size_t r_size, r_top;
2063
2064         low = newsize >> PAGE_SHIFT;
2065         high = (size + PAGE_SIZE - 1) >> PAGE_SHIFT;
2066
2067         down_write(&nommu_region_sem);
2068         mutex_lock(&inode->i_mapping->i_mmap_mutex);
2069
2070         /* search for VMAs that fall within the dead zone */
2071         vma_prio_tree_foreach(vma, &iter, &inode->i_mapping->i_mmap,
2072                               low, high) {
2073                 /* found one - only interested if it's shared out of the page
2074                  * cache */
2075                 if (vma->vm_flags & VM_SHARED) {
2076                         mutex_unlock(&inode->i_mapping->i_mmap_mutex);
2077                         up_write(&nommu_region_sem);
2078                         return -ETXTBSY; /* not quite true, but near enough */
2079                 }
2080         }
2081
2082         /* reduce any regions that overlap the dead zone - if in existence,
2083          * these will be pointed to by VMAs that don't overlap the dead zone
2084          *
2085          * we don't check for any regions that start beyond the EOF as there
2086          * shouldn't be any
2087          */
2088         vma_prio_tree_foreach(vma, &iter, &inode->i_mapping->i_mmap,
2089                               0, ULONG_MAX) {
2090                 if (!(vma->vm_flags & VM_SHARED))
2091                         continue;
2092
2093                 region = vma->vm_region;
2094                 r_size = region->vm_top - region->vm_start;
2095                 r_top = (region->vm_pgoff << PAGE_SHIFT) + r_size;
2096
2097                 if (r_top > newsize) {
2098                         region->vm_top -= r_top - newsize;
2099                         if (region->vm_end > region->vm_top)
2100                                 region->vm_end = region->vm_top;
2101                 }
2102         }
2103
2104         mutex_unlock(&inode->i_mapping->i_mmap_mutex);
2105         up_write(&nommu_region_sem);
2106         return 0;
2107 }