2 * Generic hugetlb support.
3 * (C) Nadia Yvette Chambers, April 2004
5 #include <linux/list.h>
6 #include <linux/init.h>
7 #include <linux/module.h>
9 #include <linux/seq_file.h>
10 #include <linux/sysctl.h>
11 #include <linux/highmem.h>
12 #include <linux/mmu_notifier.h>
13 #include <linux/nodemask.h>
14 #include <linux/pagemap.h>
15 #include <linux/mempolicy.h>
16 #include <linux/compiler.h>
17 #include <linux/cpuset.h>
18 #include <linux/mutex.h>
19 #include <linux/bootmem.h>
20 #include <linux/sysfs.h>
21 #include <linux/slab.h>
22 #include <linux/rmap.h>
23 #include <linux/swap.h>
24 #include <linux/swapops.h>
25 #include <linux/page-isolation.h>
26 #include <linux/jhash.h>
29 #include <asm/pgtable.h>
33 #include <linux/hugetlb.h>
34 #include <linux/hugetlb_cgroup.h>
35 #include <linux/node.h>
38 int hugepages_treat_as_movable;
40 int hugetlb_max_hstate __read_mostly;
41 unsigned int default_hstate_idx;
42 struct hstate hstates[HUGE_MAX_HSTATE];
44 * Minimum page order among possible hugepage sizes, set to a proper value
47 static unsigned int minimum_order __read_mostly = UINT_MAX;
49 __initdata LIST_HEAD(huge_boot_pages);
51 /* for command line parsing */
52 static struct hstate * __initdata parsed_hstate;
53 static unsigned long __initdata default_hstate_max_huge_pages;
54 static unsigned long __initdata default_hstate_size;
57 * Protects updates to hugepage_freelists, hugepage_activelist, nr_huge_pages,
58 * free_huge_pages, and surplus_huge_pages.
60 DEFINE_SPINLOCK(hugetlb_lock);
63 * Serializes faults on the same logical page. This is used to
64 * prevent spurious OOMs when the hugepage pool is fully utilized.
66 static int num_fault_mutexes;
67 struct mutex *hugetlb_fault_mutex_table ____cacheline_aligned_in_smp;
69 /* Forward declaration */
70 static int hugetlb_acct_memory(struct hstate *h, long delta);
72 static inline void unlock_or_release_subpool(struct hugepage_subpool *spool)
74 bool free = (spool->count == 0) && (spool->used_hpages == 0);
76 spin_unlock(&spool->lock);
78 /* If no pages are used, and no other handles to the subpool
79 * remain, give up any reservations mased on minimum size and
82 if (spool->min_hpages != -1)
83 hugetlb_acct_memory(spool->hstate,
89 struct hugepage_subpool *hugepage_new_subpool(struct hstate *h, long max_hpages,
92 struct hugepage_subpool *spool;
94 spool = kzalloc(sizeof(*spool), GFP_KERNEL);
98 spin_lock_init(&spool->lock);
100 spool->max_hpages = max_hpages;
102 spool->min_hpages = min_hpages;
104 if (min_hpages != -1 && hugetlb_acct_memory(h, min_hpages)) {
108 spool->rsv_hpages = min_hpages;
113 void hugepage_put_subpool(struct hugepage_subpool *spool)
115 spin_lock(&spool->lock);
116 BUG_ON(!spool->count);
118 unlock_or_release_subpool(spool);
122 * Subpool accounting for allocating and reserving pages.
123 * Return -ENOMEM if there are not enough resources to satisfy the
124 * the request. Otherwise, return the number of pages by which the
125 * global pools must be adjusted (upward). The returned value may
126 * only be different than the passed value (delta) in the case where
127 * a subpool minimum size must be manitained.
129 static long hugepage_subpool_get_pages(struct hugepage_subpool *spool,
137 spin_lock(&spool->lock);
139 if (spool->max_hpages != -1) { /* maximum size accounting */
140 if ((spool->used_hpages + delta) <= spool->max_hpages)
141 spool->used_hpages += delta;
148 if (spool->min_hpages != -1) { /* minimum size accounting */
149 if (delta > spool->rsv_hpages) {
151 * Asking for more reserves than those already taken on
152 * behalf of subpool. Return difference.
154 ret = delta - spool->rsv_hpages;
155 spool->rsv_hpages = 0;
157 ret = 0; /* reserves already accounted for */
158 spool->rsv_hpages -= delta;
163 spin_unlock(&spool->lock);
168 * Subpool accounting for freeing and unreserving pages.
169 * Return the number of global page reservations that must be dropped.
170 * The return value may only be different than the passed value (delta)
171 * in the case where a subpool minimum size must be maintained.
173 static long hugepage_subpool_put_pages(struct hugepage_subpool *spool,
181 spin_lock(&spool->lock);
183 if (spool->max_hpages != -1) /* maximum size accounting */
184 spool->used_hpages -= delta;
186 if (spool->min_hpages != -1) { /* minimum size accounting */
187 if (spool->rsv_hpages + delta <= spool->min_hpages)
190 ret = spool->rsv_hpages + delta - spool->min_hpages;
192 spool->rsv_hpages += delta;
193 if (spool->rsv_hpages > spool->min_hpages)
194 spool->rsv_hpages = spool->min_hpages;
198 * If hugetlbfs_put_super couldn't free spool due to an outstanding
199 * quota reference, free it now.
201 unlock_or_release_subpool(spool);
206 static inline struct hugepage_subpool *subpool_inode(struct inode *inode)
208 return HUGETLBFS_SB(inode->i_sb)->spool;
211 static inline struct hugepage_subpool *subpool_vma(struct vm_area_struct *vma)
213 return subpool_inode(file_inode(vma->vm_file));
217 * Region tracking -- allows tracking of reservations and instantiated pages
218 * across the pages in a mapping.
220 * The region data structures are embedded into a resv_map and protected
221 * by a resv_map's lock. The set of regions within the resv_map represent
222 * reservations for huge pages, or huge pages that have already been
223 * instantiated within the map. The from and to elements are huge page
224 * indicies into the associated mapping. from indicates the starting index
225 * of the region. to represents the first index past the end of the region.
227 * For example, a file region structure with from == 0 and to == 4 represents
228 * four huge pages in a mapping. It is important to note that the to element
229 * represents the first element past the end of the region. This is used in
230 * arithmetic as 4(to) - 0(from) = 4 huge pages in the region.
232 * Interval notation of the form [from, to) will be used to indicate that
233 * the endpoint from is inclusive and to is exclusive.
236 struct list_head link;
242 * Add the huge page range represented by [f, t) to the reserve
243 * map. In the normal case, existing regions will be expanded
244 * to accommodate the specified range. Sufficient regions should
245 * exist for expansion due to the previous call to region_chg
246 * with the same range. However, it is possible that region_del
247 * could have been called after region_chg and modifed the map
248 * in such a way that no region exists to be expanded. In this
249 * case, pull a region descriptor from the cache associated with
250 * the map and use that for the new range.
252 * Return the number of new huge pages added to the map. This
253 * number is greater than or equal to zero.
255 static long region_add(struct resv_map *resv, long f, long t)
257 struct list_head *head = &resv->regions;
258 struct file_region *rg, *nrg, *trg;
261 spin_lock(&resv->lock);
262 /* Locate the region we are either in or before. */
263 list_for_each_entry(rg, head, link)
268 * If no region exists which can be expanded to include the
269 * specified range, the list must have been modified by an
270 * interleving call to region_del(). Pull a region descriptor
271 * from the cache and use it for this range.
273 if (&rg->link == head || t < rg->from) {
274 VM_BUG_ON(resv->region_cache_count <= 0);
276 resv->region_cache_count--;
277 nrg = list_first_entry(&resv->region_cache, struct file_region,
279 list_del(&nrg->link);
283 list_add(&nrg->link, rg->link.prev);
289 /* Round our left edge to the current segment if it encloses us. */
293 /* Check for and consume any regions we now overlap with. */
295 list_for_each_entry_safe(rg, trg, rg->link.prev, link) {
296 if (&rg->link == head)
301 /* If this area reaches higher then extend our area to
302 * include it completely. If this is not the first area
303 * which we intend to reuse, free it. */
307 /* Decrement return value by the deleted range.
308 * Another range will span this area so that by
309 * end of routine add will be >= zero
311 add -= (rg->to - rg->from);
317 add += (nrg->from - f); /* Added to beginning of region */
319 add += t - nrg->to; /* Added to end of region */
323 resv->adds_in_progress--;
324 spin_unlock(&resv->lock);
330 * Examine the existing reserve map and determine how many
331 * huge pages in the specified range [f, t) are NOT currently
332 * represented. This routine is called before a subsequent
333 * call to region_add that will actually modify the reserve
334 * map to add the specified range [f, t). region_chg does
335 * not change the number of huge pages represented by the
336 * map. However, if the existing regions in the map can not
337 * be expanded to represent the new range, a new file_region
338 * structure is added to the map as a placeholder. This is
339 * so that the subsequent region_add call will have all the
340 * regions it needs and will not fail.
342 * Upon entry, region_chg will also examine the cache of region descriptors
343 * associated with the map. If there are not enough descriptors cached, one
344 * will be allocated for the in progress add operation.
346 * Returns the number of huge pages that need to be added to the existing
347 * reservation map for the range [f, t). This number is greater or equal to
348 * zero. -ENOMEM is returned if a new file_region structure or cache entry
349 * is needed and can not be allocated.
351 static long region_chg(struct resv_map *resv, long f, long t)
353 struct list_head *head = &resv->regions;
354 struct file_region *rg, *nrg = NULL;
358 spin_lock(&resv->lock);
360 resv->adds_in_progress++;
363 * Check for sufficient descriptors in the cache to accommodate
364 * the number of in progress add operations.
366 if (resv->adds_in_progress > resv->region_cache_count) {
367 struct file_region *trg;
369 VM_BUG_ON(resv->adds_in_progress - resv->region_cache_count > 1);
370 /* Must drop lock to allocate a new descriptor. */
371 resv->adds_in_progress--;
372 spin_unlock(&resv->lock);
374 trg = kmalloc(sizeof(*trg), GFP_KERNEL);
378 spin_lock(&resv->lock);
379 list_add(&trg->link, &resv->region_cache);
380 resv->region_cache_count++;
384 /* Locate the region we are before or in. */
385 list_for_each_entry(rg, head, link)
389 /* If we are below the current region then a new region is required.
390 * Subtle, allocate a new region at the position but make it zero
391 * size such that we can guarantee to record the reservation. */
392 if (&rg->link == head || t < rg->from) {
394 resv->adds_in_progress--;
395 spin_unlock(&resv->lock);
396 nrg = kmalloc(sizeof(*nrg), GFP_KERNEL);
402 INIT_LIST_HEAD(&nrg->link);
406 list_add(&nrg->link, rg->link.prev);
411 /* Round our left edge to the current segment if it encloses us. */
416 /* Check for and consume any regions we now overlap with. */
417 list_for_each_entry(rg, rg->link.prev, link) {
418 if (&rg->link == head)
423 /* We overlap with this area, if it extends further than
424 * us then we must extend ourselves. Account for its
425 * existing reservation. */
430 chg -= rg->to - rg->from;
434 spin_unlock(&resv->lock);
435 /* We already know we raced and no longer need the new region */
439 spin_unlock(&resv->lock);
444 * Abort the in progress add operation. The adds_in_progress field
445 * of the resv_map keeps track of the operations in progress between
446 * calls to region_chg and region_add. Operations are sometimes
447 * aborted after the call to region_chg. In such cases, region_abort
448 * is called to decrement the adds_in_progress counter.
450 * NOTE: The range arguments [f, t) are not needed or used in this
451 * routine. They are kept to make reading the calling code easier as
452 * arguments will match the associated region_chg call.
454 static void region_abort(struct resv_map *resv, long f, long t)
456 spin_lock(&resv->lock);
457 VM_BUG_ON(!resv->region_cache_count);
458 resv->adds_in_progress--;
459 spin_unlock(&resv->lock);
463 * Delete the specified range [f, t) from the reserve map. If the
464 * t parameter is LONG_MAX, this indicates that ALL regions after f
465 * should be deleted. Locate the regions which intersect [f, t)
466 * and either trim, delete or split the existing regions.
468 * Returns the number of huge pages deleted from the reserve map.
469 * In the normal case, the return value is zero or more. In the
470 * case where a region must be split, a new region descriptor must
471 * be allocated. If the allocation fails, -ENOMEM will be returned.
472 * NOTE: If the parameter t == LONG_MAX, then we will never split
473 * a region and possibly return -ENOMEM. Callers specifying
474 * t == LONG_MAX do not need to check for -ENOMEM error.
476 static long region_del(struct resv_map *resv, long f, long t)
478 struct list_head *head = &resv->regions;
479 struct file_region *rg, *trg;
480 struct file_region *nrg = NULL;
484 spin_lock(&resv->lock);
485 list_for_each_entry_safe(rg, trg, head, link) {
491 if (f > rg->from && t < rg->to) { /* Must split region */
493 * Check for an entry in the cache before dropping
494 * lock and attempting allocation.
497 resv->region_cache_count > resv->adds_in_progress) {
498 nrg = list_first_entry(&resv->region_cache,
501 list_del(&nrg->link);
502 resv->region_cache_count--;
506 spin_unlock(&resv->lock);
507 nrg = kmalloc(sizeof(*nrg), GFP_KERNEL);
515 /* New entry for end of split region */
518 INIT_LIST_HEAD(&nrg->link);
520 /* Original entry is trimmed */
523 list_add(&nrg->link, &rg->link);
528 if (f <= rg->from && t >= rg->to) { /* Remove entire region */
529 del += rg->to - rg->from;
535 if (f <= rg->from) { /* Trim beginning of region */
538 } else { /* Trim end of region */
544 spin_unlock(&resv->lock);
550 * Count and return the number of huge pages in the reserve map
551 * that intersect with the range [f, t).
553 static long region_count(struct resv_map *resv, long f, long t)
555 struct list_head *head = &resv->regions;
556 struct file_region *rg;
559 spin_lock(&resv->lock);
560 /* Locate each segment we overlap with, and count that overlap. */
561 list_for_each_entry(rg, head, link) {
570 seg_from = max(rg->from, f);
571 seg_to = min(rg->to, t);
573 chg += seg_to - seg_from;
575 spin_unlock(&resv->lock);
581 * Convert the address within this vma to the page offset within
582 * the mapping, in pagecache page units; huge pages here.
584 static pgoff_t vma_hugecache_offset(struct hstate *h,
585 struct vm_area_struct *vma, unsigned long address)
587 return ((address - vma->vm_start) >> huge_page_shift(h)) +
588 (vma->vm_pgoff >> huge_page_order(h));
591 pgoff_t linear_hugepage_index(struct vm_area_struct *vma,
592 unsigned long address)
594 return vma_hugecache_offset(hstate_vma(vma), vma, address);
598 * Return the size of the pages allocated when backing a VMA. In the majority
599 * cases this will be same size as used by the page table entries.
601 unsigned long vma_kernel_pagesize(struct vm_area_struct *vma)
603 struct hstate *hstate;
605 if (!is_vm_hugetlb_page(vma))
608 hstate = hstate_vma(vma);
610 return 1UL << huge_page_shift(hstate);
612 EXPORT_SYMBOL_GPL(vma_kernel_pagesize);
615 * Return the page size being used by the MMU to back a VMA. In the majority
616 * of cases, the page size used by the kernel matches the MMU size. On
617 * architectures where it differs, an architecture-specific version of this
618 * function is required.
620 #ifndef vma_mmu_pagesize
621 unsigned long vma_mmu_pagesize(struct vm_area_struct *vma)
623 return vma_kernel_pagesize(vma);
628 * Flags for MAP_PRIVATE reservations. These are stored in the bottom
629 * bits of the reservation map pointer, which are always clear due to
632 #define HPAGE_RESV_OWNER (1UL << 0)
633 #define HPAGE_RESV_UNMAPPED (1UL << 1)
634 #define HPAGE_RESV_MASK (HPAGE_RESV_OWNER | HPAGE_RESV_UNMAPPED)
637 * These helpers are used to track how many pages are reserved for
638 * faults in a MAP_PRIVATE mapping. Only the process that called mmap()
639 * is guaranteed to have their future faults succeed.
641 * With the exception of reset_vma_resv_huge_pages() which is called at fork(),
642 * the reserve counters are updated with the hugetlb_lock held. It is safe
643 * to reset the VMA at fork() time as it is not in use yet and there is no
644 * chance of the global counters getting corrupted as a result of the values.
646 * The private mapping reservation is represented in a subtly different
647 * manner to a shared mapping. A shared mapping has a region map associated
648 * with the underlying file, this region map represents the backing file
649 * pages which have ever had a reservation assigned which this persists even
650 * after the page is instantiated. A private mapping has a region map
651 * associated with the original mmap which is attached to all VMAs which
652 * reference it, this region map represents those offsets which have consumed
653 * reservation ie. where pages have been instantiated.
655 static unsigned long get_vma_private_data(struct vm_area_struct *vma)
657 return (unsigned long)vma->vm_private_data;
660 static void set_vma_private_data(struct vm_area_struct *vma,
663 vma->vm_private_data = (void *)value;
666 struct resv_map *resv_map_alloc(void)
668 struct resv_map *resv_map = kmalloc(sizeof(*resv_map), GFP_KERNEL);
669 struct file_region *rg = kmalloc(sizeof(*rg), GFP_KERNEL);
671 if (!resv_map || !rg) {
677 kref_init(&resv_map->refs);
678 spin_lock_init(&resv_map->lock);
679 INIT_LIST_HEAD(&resv_map->regions);
681 resv_map->adds_in_progress = 0;
683 INIT_LIST_HEAD(&resv_map->region_cache);
684 list_add(&rg->link, &resv_map->region_cache);
685 resv_map->region_cache_count = 1;
690 void resv_map_release(struct kref *ref)
692 struct resv_map *resv_map = container_of(ref, struct resv_map, refs);
693 struct list_head *head = &resv_map->region_cache;
694 struct file_region *rg, *trg;
696 /* Clear out any active regions before we release the map. */
697 region_del(resv_map, 0, LONG_MAX);
699 /* ... and any entries left in the cache */
700 list_for_each_entry_safe(rg, trg, head, link) {
705 VM_BUG_ON(resv_map->adds_in_progress);
710 static inline struct resv_map *inode_resv_map(struct inode *inode)
712 return inode->i_mapping->private_data;
715 static struct resv_map *vma_resv_map(struct vm_area_struct *vma)
717 VM_BUG_ON_VMA(!is_vm_hugetlb_page(vma), vma);
718 if (vma->vm_flags & VM_MAYSHARE) {
719 struct address_space *mapping = vma->vm_file->f_mapping;
720 struct inode *inode = mapping->host;
722 return inode_resv_map(inode);
725 return (struct resv_map *)(get_vma_private_data(vma) &
730 static void set_vma_resv_map(struct vm_area_struct *vma, struct resv_map *map)
732 VM_BUG_ON_VMA(!is_vm_hugetlb_page(vma), vma);
733 VM_BUG_ON_VMA(vma->vm_flags & VM_MAYSHARE, vma);
735 set_vma_private_data(vma, (get_vma_private_data(vma) &
736 HPAGE_RESV_MASK) | (unsigned long)map);
739 static void set_vma_resv_flags(struct vm_area_struct *vma, unsigned long flags)
741 VM_BUG_ON_VMA(!is_vm_hugetlb_page(vma), vma);
742 VM_BUG_ON_VMA(vma->vm_flags & VM_MAYSHARE, vma);
744 set_vma_private_data(vma, get_vma_private_data(vma) | flags);
747 static int is_vma_resv_set(struct vm_area_struct *vma, unsigned long flag)
749 VM_BUG_ON_VMA(!is_vm_hugetlb_page(vma), vma);
751 return (get_vma_private_data(vma) & flag) != 0;
754 /* Reset counters to 0 and clear all HPAGE_RESV_* flags */
755 void reset_vma_resv_huge_pages(struct vm_area_struct *vma)
757 VM_BUG_ON_VMA(!is_vm_hugetlb_page(vma), vma);
758 if (!(vma->vm_flags & VM_MAYSHARE))
759 vma->vm_private_data = (void *)0;
762 /* Returns true if the VMA has associated reserve pages */
763 static bool vma_has_reserves(struct vm_area_struct *vma, long chg)
765 if (vma->vm_flags & VM_NORESERVE) {
767 * This address is already reserved by other process(chg == 0),
768 * so, we should decrement reserved count. Without decrementing,
769 * reserve count remains after releasing inode, because this
770 * allocated page will go into page cache and is regarded as
771 * coming from reserved pool in releasing step. Currently, we
772 * don't have any other solution to deal with this situation
773 * properly, so add work-around here.
775 if (vma->vm_flags & VM_MAYSHARE && chg == 0)
781 /* Shared mappings always use reserves */
782 if (vma->vm_flags & VM_MAYSHARE)
786 * Only the process that called mmap() has reserves for
789 if (is_vma_resv_set(vma, HPAGE_RESV_OWNER))
795 static void enqueue_huge_page(struct hstate *h, struct page *page)
797 int nid = page_to_nid(page);
798 list_move(&page->lru, &h->hugepage_freelists[nid]);
799 h->free_huge_pages++;
800 h->free_huge_pages_node[nid]++;
803 static struct page *dequeue_huge_page_node(struct hstate *h, int nid)
807 list_for_each_entry(page, &h->hugepage_freelists[nid], lru)
808 if (!is_migrate_isolate_page(page))
811 * if 'non-isolated free hugepage' not found on the list,
812 * the allocation fails.
814 if (&h->hugepage_freelists[nid] == &page->lru)
816 list_move(&page->lru, &h->hugepage_activelist);
817 set_page_refcounted(page);
818 h->free_huge_pages--;
819 h->free_huge_pages_node[nid]--;
823 /* Movability of hugepages depends on migration support. */
824 static inline gfp_t htlb_alloc_mask(struct hstate *h)
826 if (hugepages_treat_as_movable || hugepage_migration_supported(h))
827 return GFP_HIGHUSER_MOVABLE;
832 static struct page *dequeue_huge_page_vma(struct hstate *h,
833 struct vm_area_struct *vma,
834 unsigned long address, int avoid_reserve,
837 struct page *page = NULL;
838 struct mempolicy *mpol;
839 nodemask_t *nodemask;
840 struct zonelist *zonelist;
843 unsigned int cpuset_mems_cookie;
846 * A child process with MAP_PRIVATE mappings created by their parent
847 * have no page reserves. This check ensures that reservations are
848 * not "stolen". The child may still get SIGKILLed
850 if (!vma_has_reserves(vma, chg) &&
851 h->free_huge_pages - h->resv_huge_pages == 0)
854 /* If reserves cannot be used, ensure enough pages are in the pool */
855 if (avoid_reserve && h->free_huge_pages - h->resv_huge_pages == 0)
859 cpuset_mems_cookie = read_mems_allowed_begin();
860 zonelist = huge_zonelist(vma, address,
861 htlb_alloc_mask(h), &mpol, &nodemask);
863 for_each_zone_zonelist_nodemask(zone, z, zonelist,
864 MAX_NR_ZONES - 1, nodemask) {
865 if (cpuset_zone_allowed(zone, htlb_alloc_mask(h))) {
866 page = dequeue_huge_page_node(h, zone_to_nid(zone));
870 if (!vma_has_reserves(vma, chg))
873 SetPagePrivate(page);
874 h->resv_huge_pages--;
881 if (unlikely(!page && read_mems_allowed_retry(cpuset_mems_cookie)))
890 * common helper functions for hstate_next_node_to_{alloc|free}.
891 * We may have allocated or freed a huge page based on a different
892 * nodes_allowed previously, so h->next_node_to_{alloc|free} might
893 * be outside of *nodes_allowed. Ensure that we use an allowed
894 * node for alloc or free.
896 static int next_node_allowed(int nid, nodemask_t *nodes_allowed)
898 nid = next_node(nid, *nodes_allowed);
899 if (nid == MAX_NUMNODES)
900 nid = first_node(*nodes_allowed);
901 VM_BUG_ON(nid >= MAX_NUMNODES);
906 static int get_valid_node_allowed(int nid, nodemask_t *nodes_allowed)
908 if (!node_isset(nid, *nodes_allowed))
909 nid = next_node_allowed(nid, nodes_allowed);
914 * returns the previously saved node ["this node"] from which to
915 * allocate a persistent huge page for the pool and advance the
916 * next node from which to allocate, handling wrap at end of node
919 static int hstate_next_node_to_alloc(struct hstate *h,
920 nodemask_t *nodes_allowed)
924 VM_BUG_ON(!nodes_allowed);
926 nid = get_valid_node_allowed(h->next_nid_to_alloc, nodes_allowed);
927 h->next_nid_to_alloc = next_node_allowed(nid, nodes_allowed);
933 * helper for free_pool_huge_page() - return the previously saved
934 * node ["this node"] from which to free a huge page. Advance the
935 * next node id whether or not we find a free huge page to free so
936 * that the next attempt to free addresses the next node.
938 static int hstate_next_node_to_free(struct hstate *h, nodemask_t *nodes_allowed)
942 VM_BUG_ON(!nodes_allowed);
944 nid = get_valid_node_allowed(h->next_nid_to_free, nodes_allowed);
945 h->next_nid_to_free = next_node_allowed(nid, nodes_allowed);
950 #define for_each_node_mask_to_alloc(hs, nr_nodes, node, mask) \
951 for (nr_nodes = nodes_weight(*mask); \
953 ((node = hstate_next_node_to_alloc(hs, mask)) || 1); \
956 #define for_each_node_mask_to_free(hs, nr_nodes, node, mask) \
957 for (nr_nodes = nodes_weight(*mask); \
959 ((node = hstate_next_node_to_free(hs, mask)) || 1); \
962 #if defined(CONFIG_CMA) && defined(CONFIG_X86_64)
963 static void destroy_compound_gigantic_page(struct page *page,
967 int nr_pages = 1 << order;
968 struct page *p = page + 1;
970 for (i = 1; i < nr_pages; i++, p = mem_map_next(p, page, i)) {
972 set_page_refcounted(p);
973 p->first_page = NULL;
976 set_compound_order(page, 0);
977 __ClearPageHead(page);
980 static void free_gigantic_page(struct page *page, unsigned order)
982 free_contig_range(page_to_pfn(page), 1 << order);
985 static int __alloc_gigantic_page(unsigned long start_pfn,
986 unsigned long nr_pages)
988 unsigned long end_pfn = start_pfn + nr_pages;
989 return alloc_contig_range(start_pfn, end_pfn, MIGRATE_MOVABLE);
992 static bool pfn_range_valid_gigantic(unsigned long start_pfn,
993 unsigned long nr_pages)
995 unsigned long i, end_pfn = start_pfn + nr_pages;
998 for (i = start_pfn; i < end_pfn; i++) {
1002 page = pfn_to_page(i);
1004 if (PageReserved(page))
1007 if (page_count(page) > 0)
1017 static bool zone_spans_last_pfn(const struct zone *zone,
1018 unsigned long start_pfn, unsigned long nr_pages)
1020 unsigned long last_pfn = start_pfn + nr_pages - 1;
1021 return zone_spans_pfn(zone, last_pfn);
1024 static struct page *alloc_gigantic_page(int nid, unsigned order)
1026 unsigned long nr_pages = 1 << order;
1027 unsigned long ret, pfn, flags;
1030 z = NODE_DATA(nid)->node_zones;
1031 for (; z - NODE_DATA(nid)->node_zones < MAX_NR_ZONES; z++) {
1032 spin_lock_irqsave(&z->lock, flags);
1034 pfn = ALIGN(z->zone_start_pfn, nr_pages);
1035 while (zone_spans_last_pfn(z, pfn, nr_pages)) {
1036 if (pfn_range_valid_gigantic(pfn, nr_pages)) {
1038 * We release the zone lock here because
1039 * alloc_contig_range() will also lock the zone
1040 * at some point. If there's an allocation
1041 * spinning on this lock, it may win the race
1042 * and cause alloc_contig_range() to fail...
1044 spin_unlock_irqrestore(&z->lock, flags);
1045 ret = __alloc_gigantic_page(pfn, nr_pages);
1047 return pfn_to_page(pfn);
1048 spin_lock_irqsave(&z->lock, flags);
1053 spin_unlock_irqrestore(&z->lock, flags);
1059 static void prep_new_huge_page(struct hstate *h, struct page *page, int nid);
1060 static void prep_compound_gigantic_page(struct page *page, unsigned long order);
1062 static struct page *alloc_fresh_gigantic_page_node(struct hstate *h, int nid)
1066 page = alloc_gigantic_page(nid, huge_page_order(h));
1068 prep_compound_gigantic_page(page, huge_page_order(h));
1069 prep_new_huge_page(h, page, nid);
1075 static int alloc_fresh_gigantic_page(struct hstate *h,
1076 nodemask_t *nodes_allowed)
1078 struct page *page = NULL;
1081 for_each_node_mask_to_alloc(h, nr_nodes, node, nodes_allowed) {
1082 page = alloc_fresh_gigantic_page_node(h, node);
1090 static inline bool gigantic_page_supported(void) { return true; }
1092 static inline bool gigantic_page_supported(void) { return false; }
1093 static inline void free_gigantic_page(struct page *page, unsigned order) { }
1094 static inline void destroy_compound_gigantic_page(struct page *page,
1095 unsigned long order) { }
1096 static inline int alloc_fresh_gigantic_page(struct hstate *h,
1097 nodemask_t *nodes_allowed) { return 0; }
1100 static void update_and_free_page(struct hstate *h, struct page *page)
1104 if (hstate_is_gigantic(h) && !gigantic_page_supported())
1108 h->nr_huge_pages_node[page_to_nid(page)]--;
1109 for (i = 0; i < pages_per_huge_page(h); i++) {
1110 page[i].flags &= ~(1 << PG_locked | 1 << PG_error |
1111 1 << PG_referenced | 1 << PG_dirty |
1112 1 << PG_active | 1 << PG_private |
1115 VM_BUG_ON_PAGE(hugetlb_cgroup_from_page(page), page);
1116 set_compound_page_dtor(page, NULL);
1117 set_page_refcounted(page);
1118 if (hstate_is_gigantic(h)) {
1119 destroy_compound_gigantic_page(page, huge_page_order(h));
1120 free_gigantic_page(page, huge_page_order(h));
1122 __free_pages(page, huge_page_order(h));
1126 struct hstate *size_to_hstate(unsigned long size)
1130 for_each_hstate(h) {
1131 if (huge_page_size(h) == size)
1138 * Test to determine whether the hugepage is "active/in-use" (i.e. being linked
1139 * to hstate->hugepage_activelist.)
1141 * This function can be called for tail pages, but never returns true for them.
1143 bool page_huge_active(struct page *page)
1145 VM_BUG_ON_PAGE(!PageHuge(page), page);
1146 return PageHead(page) && PagePrivate(&page[1]);
1149 /* never called for tail page */
1150 static void set_page_huge_active(struct page *page)
1152 VM_BUG_ON_PAGE(!PageHeadHuge(page), page);
1153 SetPagePrivate(&page[1]);
1156 static void clear_page_huge_active(struct page *page)
1158 VM_BUG_ON_PAGE(!PageHeadHuge(page), page);
1159 ClearPagePrivate(&page[1]);
1162 void free_huge_page(struct page *page)
1165 * Can't pass hstate in here because it is called from the
1166 * compound page destructor.
1168 struct hstate *h = page_hstate(page);
1169 int nid = page_to_nid(page);
1170 struct hugepage_subpool *spool =
1171 (struct hugepage_subpool *)page_private(page);
1172 bool restore_reserve;
1174 set_page_private(page, 0);
1175 page->mapping = NULL;
1176 BUG_ON(page_count(page));
1177 BUG_ON(page_mapcount(page));
1178 restore_reserve = PagePrivate(page);
1179 ClearPagePrivate(page);
1182 * A return code of zero implies that the subpool will be under its
1183 * minimum size if the reservation is not restored after page is free.
1184 * Therefore, force restore_reserve operation.
1186 if (hugepage_subpool_put_pages(spool, 1) == 0)
1187 restore_reserve = true;
1189 spin_lock(&hugetlb_lock);
1190 clear_page_huge_active(page);
1191 hugetlb_cgroup_uncharge_page(hstate_index(h),
1192 pages_per_huge_page(h), page);
1193 if (restore_reserve)
1194 h->resv_huge_pages++;
1196 if (h->surplus_huge_pages_node[nid]) {
1197 /* remove the page from active list */
1198 list_del(&page->lru);
1199 update_and_free_page(h, page);
1200 h->surplus_huge_pages--;
1201 h->surplus_huge_pages_node[nid]--;
1203 arch_clear_hugepage_flags(page);
1204 enqueue_huge_page(h, page);
1206 spin_unlock(&hugetlb_lock);
1209 static void prep_new_huge_page(struct hstate *h, struct page *page, int nid)
1211 INIT_LIST_HEAD(&page->lru);
1212 set_compound_page_dtor(page, free_huge_page);
1213 spin_lock(&hugetlb_lock);
1214 set_hugetlb_cgroup(page, NULL);
1216 h->nr_huge_pages_node[nid]++;
1217 spin_unlock(&hugetlb_lock);
1218 put_page(page); /* free it into the hugepage allocator */
1221 static void prep_compound_gigantic_page(struct page *page, unsigned long order)
1224 int nr_pages = 1 << order;
1225 struct page *p = page + 1;
1227 /* we rely on prep_new_huge_page to set the destructor */
1228 set_compound_order(page, order);
1229 __SetPageHead(page);
1230 __ClearPageReserved(page);
1231 for (i = 1; i < nr_pages; i++, p = mem_map_next(p, page, i)) {
1233 * For gigantic hugepages allocated through bootmem at
1234 * boot, it's safer to be consistent with the not-gigantic
1235 * hugepages and clear the PG_reserved bit from all tail pages
1236 * too. Otherwse drivers using get_user_pages() to access tail
1237 * pages may get the reference counting wrong if they see
1238 * PG_reserved set on a tail page (despite the head page not
1239 * having PG_reserved set). Enforcing this consistency between
1240 * head and tail pages allows drivers to optimize away a check
1241 * on the head page when they need know if put_page() is needed
1242 * after get_user_pages().
1244 __ClearPageReserved(p);
1245 set_page_count(p, 0);
1246 p->first_page = page;
1247 /* Make sure p->first_page is always valid for PageTail() */
1254 * PageHuge() only returns true for hugetlbfs pages, but not for normal or
1255 * transparent huge pages. See the PageTransHuge() documentation for more
1258 int PageHuge(struct page *page)
1260 if (!PageCompound(page))
1263 page = compound_head(page);
1264 return get_compound_page_dtor(page) == free_huge_page;
1266 EXPORT_SYMBOL_GPL(PageHuge);
1269 * PageHeadHuge() only returns true for hugetlbfs head page, but not for
1270 * normal or transparent huge pages.
1272 int PageHeadHuge(struct page *page_head)
1274 if (!PageHead(page_head))
1277 return get_compound_page_dtor(page_head) == free_huge_page;
1280 pgoff_t __basepage_index(struct page *page)
1282 struct page *page_head = compound_head(page);
1283 pgoff_t index = page_index(page_head);
1284 unsigned long compound_idx;
1286 if (!PageHuge(page_head))
1287 return page_index(page);
1289 if (compound_order(page_head) >= MAX_ORDER)
1290 compound_idx = page_to_pfn(page) - page_to_pfn(page_head);
1292 compound_idx = page - page_head;
1294 return (index << compound_order(page_head)) + compound_idx;
1297 static struct page *alloc_fresh_huge_page_node(struct hstate *h, int nid)
1301 page = alloc_pages_exact_node(nid,
1302 htlb_alloc_mask(h)|__GFP_COMP|__GFP_THISNODE|
1303 __GFP_REPEAT|__GFP_NOWARN,
1304 huge_page_order(h));
1306 prep_new_huge_page(h, page, nid);
1312 static int alloc_fresh_huge_page(struct hstate *h, nodemask_t *nodes_allowed)
1318 for_each_node_mask_to_alloc(h, nr_nodes, node, nodes_allowed) {
1319 page = alloc_fresh_huge_page_node(h, node);
1327 count_vm_event(HTLB_BUDDY_PGALLOC);
1329 count_vm_event(HTLB_BUDDY_PGALLOC_FAIL);
1335 * Free huge page from pool from next node to free.
1336 * Attempt to keep persistent huge pages more or less
1337 * balanced over allowed nodes.
1338 * Called with hugetlb_lock locked.
1340 static int free_pool_huge_page(struct hstate *h, nodemask_t *nodes_allowed,
1346 for_each_node_mask_to_free(h, nr_nodes, node, nodes_allowed) {
1348 * If we're returning unused surplus pages, only examine
1349 * nodes with surplus pages.
1351 if ((!acct_surplus || h->surplus_huge_pages_node[node]) &&
1352 !list_empty(&h->hugepage_freelists[node])) {
1354 list_entry(h->hugepage_freelists[node].next,
1356 list_del(&page->lru);
1357 h->free_huge_pages--;
1358 h->free_huge_pages_node[node]--;
1360 h->surplus_huge_pages--;
1361 h->surplus_huge_pages_node[node]--;
1363 update_and_free_page(h, page);
1373 * Dissolve a given free hugepage into free buddy pages. This function does
1374 * nothing for in-use (including surplus) hugepages.
1376 static void dissolve_free_huge_page(struct page *page)
1378 spin_lock(&hugetlb_lock);
1379 if (PageHuge(page) && !page_count(page)) {
1380 struct hstate *h = page_hstate(page);
1381 int nid = page_to_nid(page);
1382 list_del(&page->lru);
1383 h->free_huge_pages--;
1384 h->free_huge_pages_node[nid]--;
1385 update_and_free_page(h, page);
1387 spin_unlock(&hugetlb_lock);
1391 * Dissolve free hugepages in a given pfn range. Used by memory hotplug to
1392 * make specified memory blocks removable from the system.
1393 * Note that start_pfn should aligned with (minimum) hugepage size.
1395 void dissolve_free_huge_pages(unsigned long start_pfn, unsigned long end_pfn)
1399 if (!hugepages_supported())
1402 VM_BUG_ON(!IS_ALIGNED(start_pfn, 1 << minimum_order));
1403 for (pfn = start_pfn; pfn < end_pfn; pfn += 1 << minimum_order)
1404 dissolve_free_huge_page(pfn_to_page(pfn));
1407 static struct page *alloc_buddy_huge_page(struct hstate *h, int nid)
1412 if (hstate_is_gigantic(h))
1416 * Assume we will successfully allocate the surplus page to
1417 * prevent racing processes from causing the surplus to exceed
1420 * This however introduces a different race, where a process B
1421 * tries to grow the static hugepage pool while alloc_pages() is
1422 * called by process A. B will only examine the per-node
1423 * counters in determining if surplus huge pages can be
1424 * converted to normal huge pages in adjust_pool_surplus(). A
1425 * won't be able to increment the per-node counter, until the
1426 * lock is dropped by B, but B doesn't drop hugetlb_lock until
1427 * no more huge pages can be converted from surplus to normal
1428 * state (and doesn't try to convert again). Thus, we have a
1429 * case where a surplus huge page exists, the pool is grown, and
1430 * the surplus huge page still exists after, even though it
1431 * should just have been converted to a normal huge page. This
1432 * does not leak memory, though, as the hugepage will be freed
1433 * once it is out of use. It also does not allow the counters to
1434 * go out of whack in adjust_pool_surplus() as we don't modify
1435 * the node values until we've gotten the hugepage and only the
1436 * per-node value is checked there.
1438 spin_lock(&hugetlb_lock);
1439 if (h->surplus_huge_pages >= h->nr_overcommit_huge_pages) {
1440 spin_unlock(&hugetlb_lock);
1444 h->surplus_huge_pages++;
1446 spin_unlock(&hugetlb_lock);
1448 if (nid == NUMA_NO_NODE)
1449 page = alloc_pages(htlb_alloc_mask(h)|__GFP_COMP|
1450 __GFP_REPEAT|__GFP_NOWARN,
1451 huge_page_order(h));
1453 page = alloc_pages_exact_node(nid,
1454 htlb_alloc_mask(h)|__GFP_COMP|__GFP_THISNODE|
1455 __GFP_REPEAT|__GFP_NOWARN, huge_page_order(h));
1457 spin_lock(&hugetlb_lock);
1459 INIT_LIST_HEAD(&page->lru);
1460 r_nid = page_to_nid(page);
1461 set_compound_page_dtor(page, free_huge_page);
1462 set_hugetlb_cgroup(page, NULL);
1464 * We incremented the global counters already
1466 h->nr_huge_pages_node[r_nid]++;
1467 h->surplus_huge_pages_node[r_nid]++;
1468 __count_vm_event(HTLB_BUDDY_PGALLOC);
1471 h->surplus_huge_pages--;
1472 __count_vm_event(HTLB_BUDDY_PGALLOC_FAIL);
1474 spin_unlock(&hugetlb_lock);
1480 * This allocation function is useful in the context where vma is irrelevant.
1481 * E.g. soft-offlining uses this function because it only cares physical
1482 * address of error page.
1484 struct page *alloc_huge_page_node(struct hstate *h, int nid)
1486 struct page *page = NULL;
1488 spin_lock(&hugetlb_lock);
1489 if (h->free_huge_pages - h->resv_huge_pages > 0)
1490 page = dequeue_huge_page_node(h, nid);
1491 spin_unlock(&hugetlb_lock);
1494 page = alloc_buddy_huge_page(h, nid);
1500 * Increase the hugetlb pool such that it can accommodate a reservation
1503 static int gather_surplus_pages(struct hstate *h, int delta)
1505 struct list_head surplus_list;
1506 struct page *page, *tmp;
1508 int needed, allocated;
1509 bool alloc_ok = true;
1511 needed = (h->resv_huge_pages + delta) - h->free_huge_pages;
1513 h->resv_huge_pages += delta;
1518 INIT_LIST_HEAD(&surplus_list);
1522 spin_unlock(&hugetlb_lock);
1523 for (i = 0; i < needed; i++) {
1524 page = alloc_buddy_huge_page(h, NUMA_NO_NODE);
1529 list_add(&page->lru, &surplus_list);
1534 * After retaking hugetlb_lock, we need to recalculate 'needed'
1535 * because either resv_huge_pages or free_huge_pages may have changed.
1537 spin_lock(&hugetlb_lock);
1538 needed = (h->resv_huge_pages + delta) -
1539 (h->free_huge_pages + allocated);
1544 * We were not able to allocate enough pages to
1545 * satisfy the entire reservation so we free what
1546 * we've allocated so far.
1551 * The surplus_list now contains _at_least_ the number of extra pages
1552 * needed to accommodate the reservation. Add the appropriate number
1553 * of pages to the hugetlb pool and free the extras back to the buddy
1554 * allocator. Commit the entire reservation here to prevent another
1555 * process from stealing the pages as they are added to the pool but
1556 * before they are reserved.
1558 needed += allocated;
1559 h->resv_huge_pages += delta;
1562 /* Free the needed pages to the hugetlb pool */
1563 list_for_each_entry_safe(page, tmp, &surplus_list, lru) {
1567 * This page is now managed by the hugetlb allocator and has
1568 * no users -- drop the buddy allocator's reference.
1570 put_page_testzero(page);
1571 VM_BUG_ON_PAGE(page_count(page), page);
1572 enqueue_huge_page(h, page);
1575 spin_unlock(&hugetlb_lock);
1577 /* Free unnecessary surplus pages to the buddy allocator */
1578 list_for_each_entry_safe(page, tmp, &surplus_list, lru)
1580 spin_lock(&hugetlb_lock);
1586 * When releasing a hugetlb pool reservation, any surplus pages that were
1587 * allocated to satisfy the reservation must be explicitly freed if they were
1589 * Called with hugetlb_lock held.
1591 static void return_unused_surplus_pages(struct hstate *h,
1592 unsigned long unused_resv_pages)
1594 unsigned long nr_pages;
1596 /* Uncommit the reservation */
1597 h->resv_huge_pages -= unused_resv_pages;
1599 /* Cannot return gigantic pages currently */
1600 if (hstate_is_gigantic(h))
1603 nr_pages = min(unused_resv_pages, h->surplus_huge_pages);
1606 * We want to release as many surplus pages as possible, spread
1607 * evenly across all nodes with memory. Iterate across these nodes
1608 * until we can no longer free unreserved surplus pages. This occurs
1609 * when the nodes with surplus pages have no free pages.
1610 * free_pool_huge_page() will balance the the freed pages across the
1611 * on-line nodes with memory and will handle the hstate accounting.
1613 while (nr_pages--) {
1614 if (!free_pool_huge_page(h, &node_states[N_MEMORY], 1))
1616 cond_resched_lock(&hugetlb_lock);
1622 * vma_needs_reservation, vma_commit_reservation and vma_end_reservation
1623 * are used by the huge page allocation routines to manage reservations.
1625 * vma_needs_reservation is called to determine if the huge page at addr
1626 * within the vma has an associated reservation. If a reservation is
1627 * needed, the value 1 is returned. The caller is then responsible for
1628 * managing the global reservation and subpool usage counts. After
1629 * the huge page has been allocated, vma_commit_reservation is called
1630 * to add the page to the reservation map. If the page allocation fails,
1631 * the reservation must be ended instead of committed. vma_end_reservation
1632 * is called in such cases.
1634 * In the normal case, vma_commit_reservation returns the same value
1635 * as the preceding vma_needs_reservation call. The only time this
1636 * is not the case is if a reserve map was changed between calls. It
1637 * is the responsibility of the caller to notice the difference and
1638 * take appropriate action.
1640 enum vma_resv_mode {
1645 static long __vma_reservation_common(struct hstate *h,
1646 struct vm_area_struct *vma, unsigned long addr,
1647 enum vma_resv_mode mode)
1649 struct resv_map *resv;
1653 resv = vma_resv_map(vma);
1657 idx = vma_hugecache_offset(h, vma, addr);
1659 case VMA_NEEDS_RESV:
1660 ret = region_chg(resv, idx, idx + 1);
1662 case VMA_COMMIT_RESV:
1663 ret = region_add(resv, idx, idx + 1);
1666 region_abort(resv, idx, idx + 1);
1673 if (vma->vm_flags & VM_MAYSHARE)
1676 return ret < 0 ? ret : 0;
1679 static long vma_needs_reservation(struct hstate *h,
1680 struct vm_area_struct *vma, unsigned long addr)
1682 return __vma_reservation_common(h, vma, addr, VMA_NEEDS_RESV);
1685 static long vma_commit_reservation(struct hstate *h,
1686 struct vm_area_struct *vma, unsigned long addr)
1688 return __vma_reservation_common(h, vma, addr, VMA_COMMIT_RESV);
1691 static void vma_end_reservation(struct hstate *h,
1692 struct vm_area_struct *vma, unsigned long addr)
1694 (void)__vma_reservation_common(h, vma, addr, VMA_END_RESV);
1697 static struct page *alloc_huge_page(struct vm_area_struct *vma,
1698 unsigned long addr, int avoid_reserve)
1700 struct hugepage_subpool *spool = subpool_vma(vma);
1701 struct hstate *h = hstate_vma(vma);
1705 struct hugetlb_cgroup *h_cg;
1707 idx = hstate_index(h);
1709 * Processes that did not create the mapping will have no
1710 * reserves and will not have accounted against subpool
1711 * limit. Check that the subpool limit can be made before
1712 * satisfying the allocation MAP_NORESERVE mappings may also
1713 * need pages and subpool limit allocated allocated if no reserve
1716 chg = vma_needs_reservation(h, vma, addr);
1718 return ERR_PTR(-ENOMEM);
1719 if (chg || avoid_reserve)
1720 if (hugepage_subpool_get_pages(spool, 1) < 0) {
1721 vma_end_reservation(h, vma, addr);
1722 return ERR_PTR(-ENOSPC);
1725 ret = hugetlb_cgroup_charge_cgroup(idx, pages_per_huge_page(h), &h_cg);
1727 goto out_subpool_put;
1729 spin_lock(&hugetlb_lock);
1730 page = dequeue_huge_page_vma(h, vma, addr, avoid_reserve, chg);
1732 spin_unlock(&hugetlb_lock);
1733 page = alloc_buddy_huge_page(h, NUMA_NO_NODE);
1735 goto out_uncharge_cgroup;
1737 spin_lock(&hugetlb_lock);
1738 list_move(&page->lru, &h->hugepage_activelist);
1741 hugetlb_cgroup_commit_charge(idx, pages_per_huge_page(h), h_cg, page);
1742 spin_unlock(&hugetlb_lock);
1744 set_page_private(page, (unsigned long)spool);
1746 commit = vma_commit_reservation(h, vma, addr);
1747 if (unlikely(chg > commit)) {
1749 * The page was added to the reservation map between
1750 * vma_needs_reservation and vma_commit_reservation.
1751 * This indicates a race with hugetlb_reserve_pages.
1752 * Adjust for the subpool count incremented above AND
1753 * in hugetlb_reserve_pages for the same page. Also,
1754 * the reservation count added in hugetlb_reserve_pages
1755 * no longer applies.
1759 rsv_adjust = hugepage_subpool_put_pages(spool, 1);
1760 hugetlb_acct_memory(h, -rsv_adjust);
1764 out_uncharge_cgroup:
1765 hugetlb_cgroup_uncharge_cgroup(idx, pages_per_huge_page(h), h_cg);
1767 if (chg || avoid_reserve)
1768 hugepage_subpool_put_pages(spool, 1);
1769 vma_end_reservation(h, vma, addr);
1770 return ERR_PTR(-ENOSPC);
1774 * alloc_huge_page()'s wrapper which simply returns the page if allocation
1775 * succeeds, otherwise NULL. This function is called from new_vma_page(),
1776 * where no ERR_VALUE is expected to be returned.
1778 struct page *alloc_huge_page_noerr(struct vm_area_struct *vma,
1779 unsigned long addr, int avoid_reserve)
1781 struct page *page = alloc_huge_page(vma, addr, avoid_reserve);
1787 int __weak alloc_bootmem_huge_page(struct hstate *h)
1789 struct huge_bootmem_page *m;
1792 for_each_node_mask_to_alloc(h, nr_nodes, node, &node_states[N_MEMORY]) {
1795 addr = memblock_virt_alloc_try_nid_nopanic(
1796 huge_page_size(h), huge_page_size(h),
1797 0, BOOTMEM_ALLOC_ACCESSIBLE, node);
1800 * Use the beginning of the huge page to store the
1801 * huge_bootmem_page struct (until gather_bootmem
1802 * puts them into the mem_map).
1811 BUG_ON(!IS_ALIGNED(virt_to_phys(m), huge_page_size(h)));
1812 /* Put them into a private list first because mem_map is not up yet */
1813 list_add(&m->list, &huge_boot_pages);
1818 static void __init prep_compound_huge_page(struct page *page, int order)
1820 if (unlikely(order > (MAX_ORDER - 1)))
1821 prep_compound_gigantic_page(page, order);
1823 prep_compound_page(page, order);
1826 /* Put bootmem huge pages into the standard lists after mem_map is up */
1827 static void __init gather_bootmem_prealloc(void)
1829 struct huge_bootmem_page *m;
1831 list_for_each_entry(m, &huge_boot_pages, list) {
1832 struct hstate *h = m->hstate;
1835 #ifdef CONFIG_HIGHMEM
1836 page = pfn_to_page(m->phys >> PAGE_SHIFT);
1837 memblock_free_late(__pa(m),
1838 sizeof(struct huge_bootmem_page));
1840 page = virt_to_page(m);
1842 WARN_ON(page_count(page) != 1);
1843 prep_compound_huge_page(page, h->order);
1844 WARN_ON(PageReserved(page));
1845 prep_new_huge_page(h, page, page_to_nid(page));
1847 * If we had gigantic hugepages allocated at boot time, we need
1848 * to restore the 'stolen' pages to totalram_pages in order to
1849 * fix confusing memory reports from free(1) and another
1850 * side-effects, like CommitLimit going negative.
1852 if (hstate_is_gigantic(h))
1853 adjust_managed_page_count(page, 1 << h->order);
1857 static void __init hugetlb_hstate_alloc_pages(struct hstate *h)
1861 for (i = 0; i < h->max_huge_pages; ++i) {
1862 if (hstate_is_gigantic(h)) {
1863 if (!alloc_bootmem_huge_page(h))
1865 } else if (!alloc_fresh_huge_page(h,
1866 &node_states[N_MEMORY]))
1869 h->max_huge_pages = i;
1872 static void __init hugetlb_init_hstates(void)
1876 for_each_hstate(h) {
1877 if (minimum_order > huge_page_order(h))
1878 minimum_order = huge_page_order(h);
1880 /* oversize hugepages were init'ed in early boot */
1881 if (!hstate_is_gigantic(h))
1882 hugetlb_hstate_alloc_pages(h);
1884 VM_BUG_ON(minimum_order == UINT_MAX);
1887 static char * __init memfmt(char *buf, unsigned long n)
1889 if (n >= (1UL << 30))
1890 sprintf(buf, "%lu GB", n >> 30);
1891 else if (n >= (1UL << 20))
1892 sprintf(buf, "%lu MB", n >> 20);
1894 sprintf(buf, "%lu KB", n >> 10);
1898 static void __init report_hugepages(void)
1902 for_each_hstate(h) {
1904 pr_info("HugeTLB registered %s page size, pre-allocated %ld pages\n",
1905 memfmt(buf, huge_page_size(h)),
1906 h->free_huge_pages);
1910 #ifdef CONFIG_HIGHMEM
1911 static void try_to_free_low(struct hstate *h, unsigned long count,
1912 nodemask_t *nodes_allowed)
1916 if (hstate_is_gigantic(h))
1919 for_each_node_mask(i, *nodes_allowed) {
1920 struct page *page, *next;
1921 struct list_head *freel = &h->hugepage_freelists[i];
1922 list_for_each_entry_safe(page, next, freel, lru) {
1923 if (count >= h->nr_huge_pages)
1925 if (PageHighMem(page))
1927 list_del(&page->lru);
1928 update_and_free_page(h, page);
1929 h->free_huge_pages--;
1930 h->free_huge_pages_node[page_to_nid(page)]--;
1935 static inline void try_to_free_low(struct hstate *h, unsigned long count,
1936 nodemask_t *nodes_allowed)
1942 * Increment or decrement surplus_huge_pages. Keep node-specific counters
1943 * balanced by operating on them in a round-robin fashion.
1944 * Returns 1 if an adjustment was made.
1946 static int adjust_pool_surplus(struct hstate *h, nodemask_t *nodes_allowed,
1951 VM_BUG_ON(delta != -1 && delta != 1);
1954 for_each_node_mask_to_alloc(h, nr_nodes, node, nodes_allowed) {
1955 if (h->surplus_huge_pages_node[node])
1959 for_each_node_mask_to_free(h, nr_nodes, node, nodes_allowed) {
1960 if (h->surplus_huge_pages_node[node] <
1961 h->nr_huge_pages_node[node])
1968 h->surplus_huge_pages += delta;
1969 h->surplus_huge_pages_node[node] += delta;
1973 #define persistent_huge_pages(h) (h->nr_huge_pages - h->surplus_huge_pages)
1974 static unsigned long set_max_huge_pages(struct hstate *h, unsigned long count,
1975 nodemask_t *nodes_allowed)
1977 unsigned long min_count, ret;
1979 if (hstate_is_gigantic(h) && !gigantic_page_supported())
1980 return h->max_huge_pages;
1983 * Increase the pool size
1984 * First take pages out of surplus state. Then make up the
1985 * remaining difference by allocating fresh huge pages.
1987 * We might race with alloc_buddy_huge_page() here and be unable
1988 * to convert a surplus huge page to a normal huge page. That is
1989 * not critical, though, it just means the overall size of the
1990 * pool might be one hugepage larger than it needs to be, but
1991 * within all the constraints specified by the sysctls.
1993 spin_lock(&hugetlb_lock);
1994 while (h->surplus_huge_pages && count > persistent_huge_pages(h)) {
1995 if (!adjust_pool_surplus(h, nodes_allowed, -1))
1999 while (count > persistent_huge_pages(h)) {
2001 * If this allocation races such that we no longer need the
2002 * page, free_huge_page will handle it by freeing the page
2003 * and reducing the surplus.
2005 spin_unlock(&hugetlb_lock);
2006 if (hstate_is_gigantic(h))
2007 ret = alloc_fresh_gigantic_page(h, nodes_allowed);
2009 ret = alloc_fresh_huge_page(h, nodes_allowed);
2010 spin_lock(&hugetlb_lock);
2014 /* Bail for signals. Probably ctrl-c from user */
2015 if (signal_pending(current))
2020 * Decrease the pool size
2021 * First return free pages to the buddy allocator (being careful
2022 * to keep enough around to satisfy reservations). Then place
2023 * pages into surplus state as needed so the pool will shrink
2024 * to the desired size as pages become free.
2026 * By placing pages into the surplus state independent of the
2027 * overcommit value, we are allowing the surplus pool size to
2028 * exceed overcommit. There are few sane options here. Since
2029 * alloc_buddy_huge_page() is checking the global counter,
2030 * though, we'll note that we're not allowed to exceed surplus
2031 * and won't grow the pool anywhere else. Not until one of the
2032 * sysctls are changed, or the surplus pages go out of use.
2034 min_count = h->resv_huge_pages + h->nr_huge_pages - h->free_huge_pages;
2035 min_count = max(count, min_count);
2036 try_to_free_low(h, min_count, nodes_allowed);
2037 while (min_count < persistent_huge_pages(h)) {
2038 if (!free_pool_huge_page(h, nodes_allowed, 0))
2040 cond_resched_lock(&hugetlb_lock);
2042 while (count < persistent_huge_pages(h)) {
2043 if (!adjust_pool_surplus(h, nodes_allowed, 1))
2047 ret = persistent_huge_pages(h);
2048 spin_unlock(&hugetlb_lock);
2052 #define HSTATE_ATTR_RO(_name) \
2053 static struct kobj_attribute _name##_attr = __ATTR_RO(_name)
2055 #define HSTATE_ATTR(_name) \
2056 static struct kobj_attribute _name##_attr = \
2057 __ATTR(_name, 0644, _name##_show, _name##_store)
2059 static struct kobject *hugepages_kobj;
2060 static struct kobject *hstate_kobjs[HUGE_MAX_HSTATE];
2062 static struct hstate *kobj_to_node_hstate(struct kobject *kobj, int *nidp);
2064 static struct hstate *kobj_to_hstate(struct kobject *kobj, int *nidp)
2068 for (i = 0; i < HUGE_MAX_HSTATE; i++)
2069 if (hstate_kobjs[i] == kobj) {
2071 *nidp = NUMA_NO_NODE;
2075 return kobj_to_node_hstate(kobj, nidp);
2078 static ssize_t nr_hugepages_show_common(struct kobject *kobj,
2079 struct kobj_attribute *attr, char *buf)
2082 unsigned long nr_huge_pages;
2085 h = kobj_to_hstate(kobj, &nid);
2086 if (nid == NUMA_NO_NODE)
2087 nr_huge_pages = h->nr_huge_pages;
2089 nr_huge_pages = h->nr_huge_pages_node[nid];
2091 return sprintf(buf, "%lu\n", nr_huge_pages);
2094 static ssize_t __nr_hugepages_store_common(bool obey_mempolicy,
2095 struct hstate *h, int nid,
2096 unsigned long count, size_t len)
2099 NODEMASK_ALLOC(nodemask_t, nodes_allowed, GFP_KERNEL | __GFP_NORETRY);
2101 if (hstate_is_gigantic(h) && !gigantic_page_supported()) {
2106 if (nid == NUMA_NO_NODE) {
2108 * global hstate attribute
2110 if (!(obey_mempolicy &&
2111 init_nodemask_of_mempolicy(nodes_allowed))) {
2112 NODEMASK_FREE(nodes_allowed);
2113 nodes_allowed = &node_states[N_MEMORY];
2115 } else if (nodes_allowed) {
2117 * per node hstate attribute: adjust count to global,
2118 * but restrict alloc/free to the specified node.
2120 count += h->nr_huge_pages - h->nr_huge_pages_node[nid];
2121 init_nodemask_of_node(nodes_allowed, nid);
2123 nodes_allowed = &node_states[N_MEMORY];
2125 h->max_huge_pages = set_max_huge_pages(h, count, nodes_allowed);
2127 if (nodes_allowed != &node_states[N_MEMORY])
2128 NODEMASK_FREE(nodes_allowed);
2132 NODEMASK_FREE(nodes_allowed);
2136 static ssize_t nr_hugepages_store_common(bool obey_mempolicy,
2137 struct kobject *kobj, const char *buf,
2141 unsigned long count;
2145 err = kstrtoul(buf, 10, &count);
2149 h = kobj_to_hstate(kobj, &nid);
2150 return __nr_hugepages_store_common(obey_mempolicy, h, nid, count, len);
2153 static ssize_t nr_hugepages_show(struct kobject *kobj,
2154 struct kobj_attribute *attr, char *buf)
2156 return nr_hugepages_show_common(kobj, attr, buf);
2159 static ssize_t nr_hugepages_store(struct kobject *kobj,
2160 struct kobj_attribute *attr, const char *buf, size_t len)
2162 return nr_hugepages_store_common(false, kobj, buf, len);
2164 HSTATE_ATTR(nr_hugepages);
2169 * hstate attribute for optionally mempolicy-based constraint on persistent
2170 * huge page alloc/free.
2172 static ssize_t nr_hugepages_mempolicy_show(struct kobject *kobj,
2173 struct kobj_attribute *attr, char *buf)
2175 return nr_hugepages_show_common(kobj, attr, buf);
2178 static ssize_t nr_hugepages_mempolicy_store(struct kobject *kobj,
2179 struct kobj_attribute *attr, const char *buf, size_t len)
2181 return nr_hugepages_store_common(true, kobj, buf, len);
2183 HSTATE_ATTR(nr_hugepages_mempolicy);
2187 static ssize_t nr_overcommit_hugepages_show(struct kobject *kobj,
2188 struct kobj_attribute *attr, char *buf)
2190 struct hstate *h = kobj_to_hstate(kobj, NULL);
2191 return sprintf(buf, "%lu\n", h->nr_overcommit_huge_pages);
2194 static ssize_t nr_overcommit_hugepages_store(struct kobject *kobj,
2195 struct kobj_attribute *attr, const char *buf, size_t count)
2198 unsigned long input;
2199 struct hstate *h = kobj_to_hstate(kobj, NULL);
2201 if (hstate_is_gigantic(h))
2204 err = kstrtoul(buf, 10, &input);
2208 spin_lock(&hugetlb_lock);
2209 h->nr_overcommit_huge_pages = input;
2210 spin_unlock(&hugetlb_lock);
2214 HSTATE_ATTR(nr_overcommit_hugepages);
2216 static ssize_t free_hugepages_show(struct kobject *kobj,
2217 struct kobj_attribute *attr, char *buf)
2220 unsigned long free_huge_pages;
2223 h = kobj_to_hstate(kobj, &nid);
2224 if (nid == NUMA_NO_NODE)
2225 free_huge_pages = h->free_huge_pages;
2227 free_huge_pages = h->free_huge_pages_node[nid];
2229 return sprintf(buf, "%lu\n", free_huge_pages);
2231 HSTATE_ATTR_RO(free_hugepages);
2233 static ssize_t resv_hugepages_show(struct kobject *kobj,
2234 struct kobj_attribute *attr, char *buf)
2236 struct hstate *h = kobj_to_hstate(kobj, NULL);
2237 return sprintf(buf, "%lu\n", h->resv_huge_pages);
2239 HSTATE_ATTR_RO(resv_hugepages);
2241 static ssize_t surplus_hugepages_show(struct kobject *kobj,
2242 struct kobj_attribute *attr, char *buf)
2245 unsigned long surplus_huge_pages;
2248 h = kobj_to_hstate(kobj, &nid);
2249 if (nid == NUMA_NO_NODE)
2250 surplus_huge_pages = h->surplus_huge_pages;
2252 surplus_huge_pages = h->surplus_huge_pages_node[nid];
2254 return sprintf(buf, "%lu\n", surplus_huge_pages);
2256 HSTATE_ATTR_RO(surplus_hugepages);
2258 static struct attribute *hstate_attrs[] = {
2259 &nr_hugepages_attr.attr,
2260 &nr_overcommit_hugepages_attr.attr,
2261 &free_hugepages_attr.attr,
2262 &resv_hugepages_attr.attr,
2263 &surplus_hugepages_attr.attr,
2265 &nr_hugepages_mempolicy_attr.attr,
2270 static struct attribute_group hstate_attr_group = {
2271 .attrs = hstate_attrs,
2274 static int hugetlb_sysfs_add_hstate(struct hstate *h, struct kobject *parent,
2275 struct kobject **hstate_kobjs,
2276 struct attribute_group *hstate_attr_group)
2279 int hi = hstate_index(h);
2281 hstate_kobjs[hi] = kobject_create_and_add(h->name, parent);
2282 if (!hstate_kobjs[hi])
2285 retval = sysfs_create_group(hstate_kobjs[hi], hstate_attr_group);
2287 kobject_put(hstate_kobjs[hi]);
2292 static void __init hugetlb_sysfs_init(void)
2297 hugepages_kobj = kobject_create_and_add("hugepages", mm_kobj);
2298 if (!hugepages_kobj)
2301 for_each_hstate(h) {
2302 err = hugetlb_sysfs_add_hstate(h, hugepages_kobj,
2303 hstate_kobjs, &hstate_attr_group);
2305 pr_err("Hugetlb: Unable to add hstate %s", h->name);
2312 * node_hstate/s - associate per node hstate attributes, via their kobjects,
2313 * with node devices in node_devices[] using a parallel array. The array
2314 * index of a node device or _hstate == node id.
2315 * This is here to avoid any static dependency of the node device driver, in
2316 * the base kernel, on the hugetlb module.
2318 struct node_hstate {
2319 struct kobject *hugepages_kobj;
2320 struct kobject *hstate_kobjs[HUGE_MAX_HSTATE];
2322 struct node_hstate node_hstates[MAX_NUMNODES];
2325 * A subset of global hstate attributes for node devices
2327 static struct attribute *per_node_hstate_attrs[] = {
2328 &nr_hugepages_attr.attr,
2329 &free_hugepages_attr.attr,
2330 &surplus_hugepages_attr.attr,
2334 static struct attribute_group per_node_hstate_attr_group = {
2335 .attrs = per_node_hstate_attrs,
2339 * kobj_to_node_hstate - lookup global hstate for node device hstate attr kobj.
2340 * Returns node id via non-NULL nidp.
2342 static struct hstate *kobj_to_node_hstate(struct kobject *kobj, int *nidp)
2346 for (nid = 0; nid < nr_node_ids; nid++) {
2347 struct node_hstate *nhs = &node_hstates[nid];
2349 for (i = 0; i < HUGE_MAX_HSTATE; i++)
2350 if (nhs->hstate_kobjs[i] == kobj) {
2362 * Unregister hstate attributes from a single node device.
2363 * No-op if no hstate attributes attached.
2365 static void hugetlb_unregister_node(struct node *node)
2368 struct node_hstate *nhs = &node_hstates[node->dev.id];
2370 if (!nhs->hugepages_kobj)
2371 return; /* no hstate attributes */
2373 for_each_hstate(h) {
2374 int idx = hstate_index(h);
2375 if (nhs->hstate_kobjs[idx]) {
2376 kobject_put(nhs->hstate_kobjs[idx]);
2377 nhs->hstate_kobjs[idx] = NULL;
2381 kobject_put(nhs->hugepages_kobj);
2382 nhs->hugepages_kobj = NULL;
2386 * hugetlb module exit: unregister hstate attributes from node devices
2389 static void hugetlb_unregister_all_nodes(void)
2394 * disable node device registrations.
2396 register_hugetlbfs_with_node(NULL, NULL);
2399 * remove hstate attributes from any nodes that have them.
2401 for (nid = 0; nid < nr_node_ids; nid++)
2402 hugetlb_unregister_node(node_devices[nid]);
2406 * Register hstate attributes for a single node device.
2407 * No-op if attributes already registered.
2409 static void hugetlb_register_node(struct node *node)
2412 struct node_hstate *nhs = &node_hstates[node->dev.id];
2415 if (nhs->hugepages_kobj)
2416 return; /* already allocated */
2418 nhs->hugepages_kobj = kobject_create_and_add("hugepages",
2420 if (!nhs->hugepages_kobj)
2423 for_each_hstate(h) {
2424 err = hugetlb_sysfs_add_hstate(h, nhs->hugepages_kobj,
2426 &per_node_hstate_attr_group);
2428 pr_err("Hugetlb: Unable to add hstate %s for node %d\n",
2429 h->name, node->dev.id);
2430 hugetlb_unregister_node(node);
2437 * hugetlb init time: register hstate attributes for all registered node
2438 * devices of nodes that have memory. All on-line nodes should have
2439 * registered their associated device by this time.
2441 static void __init hugetlb_register_all_nodes(void)
2445 for_each_node_state(nid, N_MEMORY) {
2446 struct node *node = node_devices[nid];
2447 if (node->dev.id == nid)
2448 hugetlb_register_node(node);
2452 * Let the node device driver know we're here so it can
2453 * [un]register hstate attributes on node hotplug.
2455 register_hugetlbfs_with_node(hugetlb_register_node,
2456 hugetlb_unregister_node);
2458 #else /* !CONFIG_NUMA */
2460 static struct hstate *kobj_to_node_hstate(struct kobject *kobj, int *nidp)
2468 static void hugetlb_unregister_all_nodes(void) { }
2470 static void hugetlb_register_all_nodes(void) { }
2474 static void __exit hugetlb_exit(void)
2478 hugetlb_unregister_all_nodes();
2480 for_each_hstate(h) {
2481 kobject_put(hstate_kobjs[hstate_index(h)]);
2484 kobject_put(hugepages_kobj);
2485 kfree(hugetlb_fault_mutex_table);
2487 module_exit(hugetlb_exit);
2489 static int __init hugetlb_init(void)
2493 if (!hugepages_supported())
2496 if (!size_to_hstate(default_hstate_size)) {
2497 default_hstate_size = HPAGE_SIZE;
2498 if (!size_to_hstate(default_hstate_size))
2499 hugetlb_add_hstate(HUGETLB_PAGE_ORDER);
2501 default_hstate_idx = hstate_index(size_to_hstate(default_hstate_size));
2502 if (default_hstate_max_huge_pages)
2503 default_hstate.max_huge_pages = default_hstate_max_huge_pages;
2505 hugetlb_init_hstates();
2506 gather_bootmem_prealloc();
2509 hugetlb_sysfs_init();
2510 hugetlb_register_all_nodes();
2511 hugetlb_cgroup_file_init();
2514 num_fault_mutexes = roundup_pow_of_two(8 * num_possible_cpus());
2516 num_fault_mutexes = 1;
2518 hugetlb_fault_mutex_table =
2519 kmalloc(sizeof(struct mutex) * num_fault_mutexes, GFP_KERNEL);
2520 BUG_ON(!hugetlb_fault_mutex_table);
2522 for (i = 0; i < num_fault_mutexes; i++)
2523 mutex_init(&hugetlb_fault_mutex_table[i]);
2526 module_init(hugetlb_init);
2528 /* Should be called on processing a hugepagesz=... option */
2529 void __init hugetlb_add_hstate(unsigned order)
2534 if (size_to_hstate(PAGE_SIZE << order)) {
2535 pr_warning("hugepagesz= specified twice, ignoring\n");
2538 BUG_ON(hugetlb_max_hstate >= HUGE_MAX_HSTATE);
2540 h = &hstates[hugetlb_max_hstate++];
2542 h->mask = ~((1ULL << (order + PAGE_SHIFT)) - 1);
2543 h->nr_huge_pages = 0;
2544 h->free_huge_pages = 0;
2545 for (i = 0; i < MAX_NUMNODES; ++i)
2546 INIT_LIST_HEAD(&h->hugepage_freelists[i]);
2547 INIT_LIST_HEAD(&h->hugepage_activelist);
2548 h->next_nid_to_alloc = first_node(node_states[N_MEMORY]);
2549 h->next_nid_to_free = first_node(node_states[N_MEMORY]);
2550 snprintf(h->name, HSTATE_NAME_LEN, "hugepages-%lukB",
2551 huge_page_size(h)/1024);
2556 static int __init hugetlb_nrpages_setup(char *s)
2559 static unsigned long *last_mhp;
2562 * !hugetlb_max_hstate means we haven't parsed a hugepagesz= parameter yet,
2563 * so this hugepages= parameter goes to the "default hstate".
2565 if (!hugetlb_max_hstate)
2566 mhp = &default_hstate_max_huge_pages;
2568 mhp = &parsed_hstate->max_huge_pages;
2570 if (mhp == last_mhp) {
2571 pr_warning("hugepages= specified twice without "
2572 "interleaving hugepagesz=, ignoring\n");
2576 if (sscanf(s, "%lu", mhp) <= 0)
2580 * Global state is always initialized later in hugetlb_init.
2581 * But we need to allocate >= MAX_ORDER hstates here early to still
2582 * use the bootmem allocator.
2584 if (hugetlb_max_hstate && parsed_hstate->order >= MAX_ORDER)
2585 hugetlb_hstate_alloc_pages(parsed_hstate);
2591 __setup("hugepages=", hugetlb_nrpages_setup);
2593 static int __init hugetlb_default_setup(char *s)
2595 default_hstate_size = memparse(s, &s);
2598 __setup("default_hugepagesz=", hugetlb_default_setup);
2600 static unsigned int cpuset_mems_nr(unsigned int *array)
2603 unsigned int nr = 0;
2605 for_each_node_mask(node, cpuset_current_mems_allowed)
2611 #ifdef CONFIG_SYSCTL
2612 static int hugetlb_sysctl_handler_common(bool obey_mempolicy,
2613 struct ctl_table *table, int write,
2614 void __user *buffer, size_t *length, loff_t *ppos)
2616 struct hstate *h = &default_hstate;
2617 unsigned long tmp = h->max_huge_pages;
2620 if (!hugepages_supported())
2624 table->maxlen = sizeof(unsigned long);
2625 ret = proc_doulongvec_minmax(table, write, buffer, length, ppos);
2630 ret = __nr_hugepages_store_common(obey_mempolicy, h,
2631 NUMA_NO_NODE, tmp, *length);
2636 int hugetlb_sysctl_handler(struct ctl_table *table, int write,
2637 void __user *buffer, size_t *length, loff_t *ppos)
2640 return hugetlb_sysctl_handler_common(false, table, write,
2641 buffer, length, ppos);
2645 int hugetlb_mempolicy_sysctl_handler(struct ctl_table *table, int write,
2646 void __user *buffer, size_t *length, loff_t *ppos)
2648 return hugetlb_sysctl_handler_common(true, table, write,
2649 buffer, length, ppos);
2651 #endif /* CONFIG_NUMA */
2653 int hugetlb_overcommit_handler(struct ctl_table *table, int write,
2654 void __user *buffer,
2655 size_t *length, loff_t *ppos)
2657 struct hstate *h = &default_hstate;
2661 if (!hugepages_supported())
2664 tmp = h->nr_overcommit_huge_pages;
2666 if (write && hstate_is_gigantic(h))
2670 table->maxlen = sizeof(unsigned long);
2671 ret = proc_doulongvec_minmax(table, write, buffer, length, ppos);
2676 spin_lock(&hugetlb_lock);
2677 h->nr_overcommit_huge_pages = tmp;
2678 spin_unlock(&hugetlb_lock);
2684 #endif /* CONFIG_SYSCTL */
2686 void hugetlb_report_meminfo(struct seq_file *m)
2688 struct hstate *h = &default_hstate;
2689 if (!hugepages_supported())
2692 "HugePages_Total: %5lu\n"
2693 "HugePages_Free: %5lu\n"
2694 "HugePages_Rsvd: %5lu\n"
2695 "HugePages_Surp: %5lu\n"
2696 "Hugepagesize: %8lu kB\n",
2700 h->surplus_huge_pages,
2701 1UL << (huge_page_order(h) + PAGE_SHIFT - 10));
2704 int hugetlb_report_node_meminfo(int nid, char *buf)
2706 struct hstate *h = &default_hstate;
2707 if (!hugepages_supported())
2710 "Node %d HugePages_Total: %5u\n"
2711 "Node %d HugePages_Free: %5u\n"
2712 "Node %d HugePages_Surp: %5u\n",
2713 nid, h->nr_huge_pages_node[nid],
2714 nid, h->free_huge_pages_node[nid],
2715 nid, h->surplus_huge_pages_node[nid]);
2718 void hugetlb_show_meminfo(void)
2723 if (!hugepages_supported())
2726 for_each_node_state(nid, N_MEMORY)
2728 pr_info("Node %d hugepages_total=%u hugepages_free=%u hugepages_surp=%u hugepages_size=%lukB\n",
2730 h->nr_huge_pages_node[nid],
2731 h->free_huge_pages_node[nid],
2732 h->surplus_huge_pages_node[nid],
2733 1UL << (huge_page_order(h) + PAGE_SHIFT - 10));
2736 /* Return the number pages of memory we physically have, in PAGE_SIZE units. */
2737 unsigned long hugetlb_total_pages(void)
2740 unsigned long nr_total_pages = 0;
2743 nr_total_pages += h->nr_huge_pages * pages_per_huge_page(h);
2744 return nr_total_pages;
2747 static int hugetlb_acct_memory(struct hstate *h, long delta)
2751 spin_lock(&hugetlb_lock);
2753 * When cpuset is configured, it breaks the strict hugetlb page
2754 * reservation as the accounting is done on a global variable. Such
2755 * reservation is completely rubbish in the presence of cpuset because
2756 * the reservation is not checked against page availability for the
2757 * current cpuset. Application can still potentially OOM'ed by kernel
2758 * with lack of free htlb page in cpuset that the task is in.
2759 * Attempt to enforce strict accounting with cpuset is almost
2760 * impossible (or too ugly) because cpuset is too fluid that
2761 * task or memory node can be dynamically moved between cpusets.
2763 * The change of semantics for shared hugetlb mapping with cpuset is
2764 * undesirable. However, in order to preserve some of the semantics,
2765 * we fall back to check against current free page availability as
2766 * a best attempt and hopefully to minimize the impact of changing
2767 * semantics that cpuset has.
2770 if (gather_surplus_pages(h, delta) < 0)
2773 if (delta > cpuset_mems_nr(h->free_huge_pages_node)) {
2774 return_unused_surplus_pages(h, delta);
2781 return_unused_surplus_pages(h, (unsigned long) -delta);
2784 spin_unlock(&hugetlb_lock);
2788 static void hugetlb_vm_op_open(struct vm_area_struct *vma)
2790 struct resv_map *resv = vma_resv_map(vma);
2793 * This new VMA should share its siblings reservation map if present.
2794 * The VMA will only ever have a valid reservation map pointer where
2795 * it is being copied for another still existing VMA. As that VMA
2796 * has a reference to the reservation map it cannot disappear until
2797 * after this open call completes. It is therefore safe to take a
2798 * new reference here without additional locking.
2800 if (resv && is_vma_resv_set(vma, HPAGE_RESV_OWNER))
2801 kref_get(&resv->refs);
2804 static void hugetlb_vm_op_close(struct vm_area_struct *vma)
2806 struct hstate *h = hstate_vma(vma);
2807 struct resv_map *resv = vma_resv_map(vma);
2808 struct hugepage_subpool *spool = subpool_vma(vma);
2809 unsigned long reserve, start, end;
2812 if (!resv || !is_vma_resv_set(vma, HPAGE_RESV_OWNER))
2815 start = vma_hugecache_offset(h, vma, vma->vm_start);
2816 end = vma_hugecache_offset(h, vma, vma->vm_end);
2818 reserve = (end - start) - region_count(resv, start, end);
2820 kref_put(&resv->refs, resv_map_release);
2824 * Decrement reserve counts. The global reserve count may be
2825 * adjusted if the subpool has a minimum size.
2827 gbl_reserve = hugepage_subpool_put_pages(spool, reserve);
2828 hugetlb_acct_memory(h, -gbl_reserve);
2833 * We cannot handle pagefaults against hugetlb pages at all. They cause
2834 * handle_mm_fault() to try to instantiate regular-sized pages in the
2835 * hugegpage VMA. do_page_fault() is supposed to trap this, so BUG is we get
2838 static int hugetlb_vm_op_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
2844 const struct vm_operations_struct hugetlb_vm_ops = {
2845 .fault = hugetlb_vm_op_fault,
2846 .open = hugetlb_vm_op_open,
2847 .close = hugetlb_vm_op_close,
2850 static pte_t make_huge_pte(struct vm_area_struct *vma, struct page *page,
2856 entry = huge_pte_mkwrite(huge_pte_mkdirty(mk_huge_pte(page,
2857 vma->vm_page_prot)));
2859 entry = huge_pte_wrprotect(mk_huge_pte(page,
2860 vma->vm_page_prot));
2862 entry = pte_mkyoung(entry);
2863 entry = pte_mkhuge(entry);
2864 entry = arch_make_huge_pte(entry, vma, page, writable);
2869 static void set_huge_ptep_writable(struct vm_area_struct *vma,
2870 unsigned long address, pte_t *ptep)
2874 entry = huge_pte_mkwrite(huge_pte_mkdirty(huge_ptep_get(ptep)));
2875 if (huge_ptep_set_access_flags(vma, address, ptep, entry, 1))
2876 update_mmu_cache(vma, address, ptep);
2879 static int is_hugetlb_entry_migration(pte_t pte)
2883 if (huge_pte_none(pte) || pte_present(pte))
2885 swp = pte_to_swp_entry(pte);
2886 if (non_swap_entry(swp) && is_migration_entry(swp))
2892 static int is_hugetlb_entry_hwpoisoned(pte_t pte)
2896 if (huge_pte_none(pte) || pte_present(pte))
2898 swp = pte_to_swp_entry(pte);
2899 if (non_swap_entry(swp) && is_hwpoison_entry(swp))
2905 int copy_hugetlb_page_range(struct mm_struct *dst, struct mm_struct *src,
2906 struct vm_area_struct *vma)
2908 pte_t *src_pte, *dst_pte, entry;
2909 struct page *ptepage;
2912 struct hstate *h = hstate_vma(vma);
2913 unsigned long sz = huge_page_size(h);
2914 unsigned long mmun_start; /* For mmu_notifiers */
2915 unsigned long mmun_end; /* For mmu_notifiers */
2918 cow = (vma->vm_flags & (VM_SHARED | VM_MAYWRITE)) == VM_MAYWRITE;
2920 mmun_start = vma->vm_start;
2921 mmun_end = vma->vm_end;
2923 mmu_notifier_invalidate_range_start(src, mmun_start, mmun_end);
2925 for (addr = vma->vm_start; addr < vma->vm_end; addr += sz) {
2926 spinlock_t *src_ptl, *dst_ptl;
2927 src_pte = huge_pte_offset(src, addr);
2930 dst_pte = huge_pte_alloc(dst, addr, sz);
2936 /* If the pagetables are shared don't copy or take references */
2937 if (dst_pte == src_pte)
2940 dst_ptl = huge_pte_lock(h, dst, dst_pte);
2941 src_ptl = huge_pte_lockptr(h, src, src_pte);
2942 spin_lock_nested(src_ptl, SINGLE_DEPTH_NESTING);
2943 entry = huge_ptep_get(src_pte);
2944 if (huge_pte_none(entry)) { /* skip none entry */
2946 } else if (unlikely(is_hugetlb_entry_migration(entry) ||
2947 is_hugetlb_entry_hwpoisoned(entry))) {
2948 swp_entry_t swp_entry = pte_to_swp_entry(entry);
2950 if (is_write_migration_entry(swp_entry) && cow) {
2952 * COW mappings require pages in both
2953 * parent and child to be set to read.
2955 make_migration_entry_read(&swp_entry);
2956 entry = swp_entry_to_pte(swp_entry);
2957 set_huge_pte_at(src, addr, src_pte, entry);
2959 set_huge_pte_at(dst, addr, dst_pte, entry);
2962 huge_ptep_set_wrprotect(src, addr, src_pte);
2963 mmu_notifier_invalidate_range(src, mmun_start,
2966 entry = huge_ptep_get(src_pte);
2967 ptepage = pte_page(entry);
2969 page_dup_rmap(ptepage);
2970 set_huge_pte_at(dst, addr, dst_pte, entry);
2972 spin_unlock(src_ptl);
2973 spin_unlock(dst_ptl);
2977 mmu_notifier_invalidate_range_end(src, mmun_start, mmun_end);
2982 void __unmap_hugepage_range(struct mmu_gather *tlb, struct vm_area_struct *vma,
2983 unsigned long start, unsigned long end,
2984 struct page *ref_page)
2986 int force_flush = 0;
2987 struct mm_struct *mm = vma->vm_mm;
2988 unsigned long address;
2993 struct hstate *h = hstate_vma(vma);
2994 unsigned long sz = huge_page_size(h);
2995 const unsigned long mmun_start = start; /* For mmu_notifiers */
2996 const unsigned long mmun_end = end; /* For mmu_notifiers */
2998 WARN_ON(!is_vm_hugetlb_page(vma));
2999 BUG_ON(start & ~huge_page_mask(h));
3000 BUG_ON(end & ~huge_page_mask(h));
3002 tlb_start_vma(tlb, vma);
3003 mmu_notifier_invalidate_range_start(mm, mmun_start, mmun_end);
3006 for (; address < end; address += sz) {
3007 ptep = huge_pte_offset(mm, address);
3011 ptl = huge_pte_lock(h, mm, ptep);
3012 if (huge_pmd_unshare(mm, &address, ptep))
3015 pte = huge_ptep_get(ptep);
3016 if (huge_pte_none(pte))
3020 * Migrating hugepage or HWPoisoned hugepage is already
3021 * unmapped and its refcount is dropped, so just clear pte here.
3023 if (unlikely(!pte_present(pte))) {
3024 huge_pte_clear(mm, address, ptep);
3028 page = pte_page(pte);
3030 * If a reference page is supplied, it is because a specific
3031 * page is being unmapped, not a range. Ensure the page we
3032 * are about to unmap is the actual page of interest.
3035 if (page != ref_page)
3039 * Mark the VMA as having unmapped its page so that
3040 * future faults in this VMA will fail rather than
3041 * looking like data was lost
3043 set_vma_resv_flags(vma, HPAGE_RESV_UNMAPPED);
3046 pte = huge_ptep_get_and_clear(mm, address, ptep);
3047 tlb_remove_tlb_entry(tlb, ptep, address);
3048 if (huge_pte_dirty(pte))
3049 set_page_dirty(page);
3051 page_remove_rmap(page);
3052 force_flush = !__tlb_remove_page(tlb, page);
3058 /* Bail out after unmapping reference page if supplied */
3067 * mmu_gather ran out of room to batch pages, we break out of
3068 * the PTE lock to avoid doing the potential expensive TLB invalidate
3069 * and page-free while holding it.
3074 if (address < end && !ref_page)
3077 mmu_notifier_invalidate_range_end(mm, mmun_start, mmun_end);
3078 tlb_end_vma(tlb, vma);
3081 void __unmap_hugepage_range_final(struct mmu_gather *tlb,
3082 struct vm_area_struct *vma, unsigned long start,
3083 unsigned long end, struct page *ref_page)
3085 __unmap_hugepage_range(tlb, vma, start, end, ref_page);
3088 * Clear this flag so that x86's huge_pmd_share page_table_shareable
3089 * test will fail on a vma being torn down, and not grab a page table
3090 * on its way out. We're lucky that the flag has such an appropriate
3091 * name, and can in fact be safely cleared here. We could clear it
3092 * before the __unmap_hugepage_range above, but all that's necessary
3093 * is to clear it before releasing the i_mmap_rwsem. This works
3094 * because in the context this is called, the VMA is about to be
3095 * destroyed and the i_mmap_rwsem is held.
3097 vma->vm_flags &= ~VM_MAYSHARE;
3100 void unmap_hugepage_range(struct vm_area_struct *vma, unsigned long start,
3101 unsigned long end, struct page *ref_page)
3103 struct mm_struct *mm;
3104 struct mmu_gather tlb;
3108 tlb_gather_mmu(&tlb, mm, start, end);
3109 __unmap_hugepage_range(&tlb, vma, start, end, ref_page);
3110 tlb_finish_mmu(&tlb, start, end);
3114 * This is called when the original mapper is failing to COW a MAP_PRIVATE
3115 * mappping it owns the reserve page for. The intention is to unmap the page
3116 * from other VMAs and let the children be SIGKILLed if they are faulting the
3119 static void unmap_ref_private(struct mm_struct *mm, struct vm_area_struct *vma,
3120 struct page *page, unsigned long address)
3122 struct hstate *h = hstate_vma(vma);
3123 struct vm_area_struct *iter_vma;
3124 struct address_space *mapping;
3128 * vm_pgoff is in PAGE_SIZE units, hence the different calculation
3129 * from page cache lookup which is in HPAGE_SIZE units.
3131 address = address & huge_page_mask(h);
3132 pgoff = ((address - vma->vm_start) >> PAGE_SHIFT) +
3134 mapping = file_inode(vma->vm_file)->i_mapping;
3137 * Take the mapping lock for the duration of the table walk. As
3138 * this mapping should be shared between all the VMAs,
3139 * __unmap_hugepage_range() is called as the lock is already held
3141 i_mmap_lock_write(mapping);
3142 vma_interval_tree_foreach(iter_vma, &mapping->i_mmap, pgoff, pgoff) {
3143 /* Do not unmap the current VMA */
3144 if (iter_vma == vma)
3148 * Unmap the page from other VMAs without their own reserves.
3149 * They get marked to be SIGKILLed if they fault in these
3150 * areas. This is because a future no-page fault on this VMA
3151 * could insert a zeroed page instead of the data existing
3152 * from the time of fork. This would look like data corruption
3154 if (!is_vma_resv_set(iter_vma, HPAGE_RESV_OWNER))
3155 unmap_hugepage_range(iter_vma, address,
3156 address + huge_page_size(h), page);
3158 i_mmap_unlock_write(mapping);
3162 * Hugetlb_cow() should be called with page lock of the original hugepage held.
3163 * Called with hugetlb_instantiation_mutex held and pte_page locked so we
3164 * cannot race with other handlers or page migration.
3165 * Keep the pte_same checks anyway to make transition from the mutex easier.
3167 static int hugetlb_cow(struct mm_struct *mm, struct vm_area_struct *vma,
3168 unsigned long address, pte_t *ptep, pte_t pte,
3169 struct page *pagecache_page, spinlock_t *ptl)
3171 struct hstate *h = hstate_vma(vma);
3172 struct page *old_page, *new_page;
3173 int ret = 0, outside_reserve = 0;
3174 unsigned long mmun_start; /* For mmu_notifiers */
3175 unsigned long mmun_end; /* For mmu_notifiers */
3177 old_page = pte_page(pte);
3180 /* If no-one else is actually using this page, avoid the copy
3181 * and just make the page writable */
3182 if (page_mapcount(old_page) == 1 && PageAnon(old_page)) {
3183 page_move_anon_rmap(old_page, vma, address);
3184 set_huge_ptep_writable(vma, address, ptep);
3189 * If the process that created a MAP_PRIVATE mapping is about to
3190 * perform a COW due to a shared page count, attempt to satisfy
3191 * the allocation without using the existing reserves. The pagecache
3192 * page is used to determine if the reserve at this address was
3193 * consumed or not. If reserves were used, a partial faulted mapping
3194 * at the time of fork() could consume its reserves on COW instead
3195 * of the full address range.
3197 if (is_vma_resv_set(vma, HPAGE_RESV_OWNER) &&
3198 old_page != pagecache_page)
3199 outside_reserve = 1;
3201 page_cache_get(old_page);
3204 * Drop page table lock as buddy allocator may be called. It will
3205 * be acquired again before returning to the caller, as expected.
3208 new_page = alloc_huge_page(vma, address, outside_reserve);
3210 if (IS_ERR(new_page)) {
3212 * If a process owning a MAP_PRIVATE mapping fails to COW,
3213 * it is due to references held by a child and an insufficient
3214 * huge page pool. To guarantee the original mappers
3215 * reliability, unmap the page from child processes. The child
3216 * may get SIGKILLed if it later faults.
3218 if (outside_reserve) {
3219 page_cache_release(old_page);
3220 BUG_ON(huge_pte_none(pte));
3221 unmap_ref_private(mm, vma, old_page, address);
3222 BUG_ON(huge_pte_none(pte));
3224 ptep = huge_pte_offset(mm, address & huge_page_mask(h));
3226 pte_same(huge_ptep_get(ptep), pte)))
3227 goto retry_avoidcopy;
3229 * race occurs while re-acquiring page table
3230 * lock, and our job is done.
3235 ret = (PTR_ERR(new_page) == -ENOMEM) ?
3236 VM_FAULT_OOM : VM_FAULT_SIGBUS;
3237 goto out_release_old;
3241 * When the original hugepage is shared one, it does not have
3242 * anon_vma prepared.
3244 if (unlikely(anon_vma_prepare(vma))) {
3246 goto out_release_all;
3249 copy_user_huge_page(new_page, old_page, address, vma,
3250 pages_per_huge_page(h));
3251 __SetPageUptodate(new_page);
3252 set_page_huge_active(new_page);
3254 mmun_start = address & huge_page_mask(h);
3255 mmun_end = mmun_start + huge_page_size(h);
3256 mmu_notifier_invalidate_range_start(mm, mmun_start, mmun_end);
3259 * Retake the page table lock to check for racing updates
3260 * before the page tables are altered
3263 ptep = huge_pte_offset(mm, address & huge_page_mask(h));
3264 if (likely(ptep && pte_same(huge_ptep_get(ptep), pte))) {
3265 ClearPagePrivate(new_page);
3268 huge_ptep_clear_flush(vma, address, ptep);
3269 mmu_notifier_invalidate_range(mm, mmun_start, mmun_end);
3270 set_huge_pte_at(mm, address, ptep,
3271 make_huge_pte(vma, new_page, 1));
3272 page_remove_rmap(old_page);
3273 hugepage_add_new_anon_rmap(new_page, vma, address);
3274 /* Make the old page be freed below */
3275 new_page = old_page;
3278 mmu_notifier_invalidate_range_end(mm, mmun_start, mmun_end);
3280 page_cache_release(new_page);
3282 page_cache_release(old_page);
3284 spin_lock(ptl); /* Caller expects lock to be held */
3288 /* Return the pagecache page at a given address within a VMA */
3289 static struct page *hugetlbfs_pagecache_page(struct hstate *h,
3290 struct vm_area_struct *vma, unsigned long address)
3292 struct address_space *mapping;
3295 mapping = vma->vm_file->f_mapping;
3296 idx = vma_hugecache_offset(h, vma, address);
3298 return find_lock_page(mapping, idx);
3302 * Return whether there is a pagecache page to back given address within VMA.
3303 * Caller follow_hugetlb_page() holds page_table_lock so we cannot lock_page.
3305 static bool hugetlbfs_pagecache_present(struct hstate *h,
3306 struct vm_area_struct *vma, unsigned long address)
3308 struct address_space *mapping;
3312 mapping = vma->vm_file->f_mapping;
3313 idx = vma_hugecache_offset(h, vma, address);
3315 page = find_get_page(mapping, idx);
3318 return page != NULL;
3321 static int hugetlb_no_page(struct mm_struct *mm, struct vm_area_struct *vma,
3322 struct address_space *mapping, pgoff_t idx,
3323 unsigned long address, pte_t *ptep, unsigned int flags)
3325 struct hstate *h = hstate_vma(vma);
3326 int ret = VM_FAULT_SIGBUS;
3334 * Currently, we are forced to kill the process in the event the
3335 * original mapper has unmapped pages from the child due to a failed
3336 * COW. Warn that such a situation has occurred as it may not be obvious
3338 if (is_vma_resv_set(vma, HPAGE_RESV_UNMAPPED)) {
3339 pr_warning("PID %d killed due to inadequate hugepage pool\n",
3345 * Use page lock to guard against racing truncation
3346 * before we get page_table_lock.
3349 page = find_lock_page(mapping, idx);
3351 size = i_size_read(mapping->host) >> huge_page_shift(h);
3354 page = alloc_huge_page(vma, address, 0);
3356 ret = PTR_ERR(page);
3360 ret = VM_FAULT_SIGBUS;
3363 clear_huge_page(page, address, pages_per_huge_page(h));
3364 __SetPageUptodate(page);
3365 set_page_huge_active(page);
3367 if (vma->vm_flags & VM_MAYSHARE) {
3369 struct inode *inode = mapping->host;
3371 err = add_to_page_cache(page, mapping, idx, GFP_KERNEL);
3378 ClearPagePrivate(page);
3380 spin_lock(&inode->i_lock);
3381 inode->i_blocks += blocks_per_huge_page(h);
3382 spin_unlock(&inode->i_lock);
3385 if (unlikely(anon_vma_prepare(vma))) {
3387 goto backout_unlocked;
3393 * If memory error occurs between mmap() and fault, some process
3394 * don't have hwpoisoned swap entry for errored virtual address.
3395 * So we need to block hugepage fault by PG_hwpoison bit check.
3397 if (unlikely(PageHWPoison(page))) {
3398 ret = VM_FAULT_HWPOISON |
3399 VM_FAULT_SET_HINDEX(hstate_index(h));
3400 goto backout_unlocked;
3405 * If we are going to COW a private mapping later, we examine the
3406 * pending reservations for this page now. This will ensure that
3407 * any allocations necessary to record that reservation occur outside
3410 if ((flags & FAULT_FLAG_WRITE) && !(vma->vm_flags & VM_SHARED)) {
3411 if (vma_needs_reservation(h, vma, address) < 0) {
3413 goto backout_unlocked;
3415 /* Just decrements count, does not deallocate */
3416 vma_end_reservation(h, vma, address);
3419 ptl = huge_pte_lockptr(h, mm, ptep);
3421 size = i_size_read(mapping->host) >> huge_page_shift(h);
3426 if (!huge_pte_none(huge_ptep_get(ptep)))
3430 ClearPagePrivate(page);
3431 hugepage_add_new_anon_rmap(page, vma, address);
3433 page_dup_rmap(page);
3434 new_pte = make_huge_pte(vma, page, ((vma->vm_flags & VM_WRITE)
3435 && (vma->vm_flags & VM_SHARED)));
3436 set_huge_pte_at(mm, address, ptep, new_pte);
3438 if ((flags & FAULT_FLAG_WRITE) && !(vma->vm_flags & VM_SHARED)) {
3439 /* Optimization, do the COW without a second fault */
3440 ret = hugetlb_cow(mm, vma, address, ptep, new_pte, page, ptl);
3457 u32 hugetlb_fault_mutex_hash(struct hstate *h, struct mm_struct *mm,
3458 struct vm_area_struct *vma,
3459 struct address_space *mapping,
3460 pgoff_t idx, unsigned long address)
3462 unsigned long key[2];
3465 if (vma->vm_flags & VM_SHARED) {
3466 key[0] = (unsigned long) mapping;
3469 key[0] = (unsigned long) mm;
3470 key[1] = address >> huge_page_shift(h);
3473 hash = jhash2((u32 *)&key, sizeof(key)/sizeof(u32), 0);
3475 return hash & (num_fault_mutexes - 1);
3479 * For uniprocesor systems we always use a single mutex, so just
3480 * return 0 and avoid the hashing overhead.
3482 u32 hugetlb_fault_mutex_hash(struct hstate *h, struct mm_struct *mm,
3483 struct vm_area_struct *vma,
3484 struct address_space *mapping,
3485 pgoff_t idx, unsigned long address)
3491 int hugetlb_fault(struct mm_struct *mm, struct vm_area_struct *vma,
3492 unsigned long address, unsigned int flags)
3499 struct page *page = NULL;
3500 struct page *pagecache_page = NULL;
3501 struct hstate *h = hstate_vma(vma);
3502 struct address_space *mapping;
3503 int need_wait_lock = 0;
3505 address &= huge_page_mask(h);
3507 ptep = huge_pte_offset(mm, address);
3509 entry = huge_ptep_get(ptep);
3510 if (unlikely(is_hugetlb_entry_migration(entry))) {
3511 migration_entry_wait_huge(vma, mm, ptep);
3513 } else if (unlikely(is_hugetlb_entry_hwpoisoned(entry)))
3514 return VM_FAULT_HWPOISON_LARGE |
3515 VM_FAULT_SET_HINDEX(hstate_index(h));
3518 ptep = huge_pte_alloc(mm, address, huge_page_size(h));
3520 return VM_FAULT_OOM;
3522 mapping = vma->vm_file->f_mapping;
3523 idx = vma_hugecache_offset(h, vma, address);
3526 * Serialize hugepage allocation and instantiation, so that we don't
3527 * get spurious allocation failures if two CPUs race to instantiate
3528 * the same page in the page cache.
3530 hash = hugetlb_fault_mutex_hash(h, mm, vma, mapping, idx, address);
3531 mutex_lock(&hugetlb_fault_mutex_table[hash]);
3533 entry = huge_ptep_get(ptep);
3534 if (huge_pte_none(entry)) {
3535 ret = hugetlb_no_page(mm, vma, mapping, idx, address, ptep, flags);
3542 * entry could be a migration/hwpoison entry at this point, so this
3543 * check prevents the kernel from going below assuming that we have
3544 * a active hugepage in pagecache. This goto expects the 2nd page fault,
3545 * and is_hugetlb_entry_(migration|hwpoisoned) check will properly
3548 if (!pte_present(entry))
3552 * If we are going to COW the mapping later, we examine the pending
3553 * reservations for this page now. This will ensure that any
3554 * allocations necessary to record that reservation occur outside the
3555 * spinlock. For private mappings, we also lookup the pagecache
3556 * page now as it is used to determine if a reservation has been
3559 if ((flags & FAULT_FLAG_WRITE) && !huge_pte_write(entry)) {
3560 if (vma_needs_reservation(h, vma, address) < 0) {
3564 /* Just decrements count, does not deallocate */
3565 vma_end_reservation(h, vma, address);
3567 if (!(vma->vm_flags & VM_MAYSHARE))
3568 pagecache_page = hugetlbfs_pagecache_page(h,
3572 ptl = huge_pte_lock(h, mm, ptep);
3574 /* Check for a racing update before calling hugetlb_cow */
3575 if (unlikely(!pte_same(entry, huge_ptep_get(ptep))))
3579 * hugetlb_cow() requires page locks of pte_page(entry) and
3580 * pagecache_page, so here we need take the former one
3581 * when page != pagecache_page or !pagecache_page.
3583 page = pte_page(entry);
3584 if (page != pagecache_page)
3585 if (!trylock_page(page)) {
3592 if (flags & FAULT_FLAG_WRITE) {
3593 if (!huge_pte_write(entry)) {
3594 ret = hugetlb_cow(mm, vma, address, ptep, entry,
3595 pagecache_page, ptl);
3598 entry = huge_pte_mkdirty(entry);
3600 entry = pte_mkyoung(entry);
3601 if (huge_ptep_set_access_flags(vma, address, ptep, entry,
3602 flags & FAULT_FLAG_WRITE))
3603 update_mmu_cache(vma, address, ptep);
3605 if (page != pagecache_page)
3611 if (pagecache_page) {
3612 unlock_page(pagecache_page);
3613 put_page(pagecache_page);
3616 mutex_unlock(&hugetlb_fault_mutex_table[hash]);
3618 * Generally it's safe to hold refcount during waiting page lock. But
3619 * here we just wait to defer the next page fault to avoid busy loop and
3620 * the page is not used after unlocked before returning from the current
3621 * page fault. So we are safe from accessing freed page, even if we wait
3622 * here without taking refcount.
3625 wait_on_page_locked(page);
3629 long follow_hugetlb_page(struct mm_struct *mm, struct vm_area_struct *vma,
3630 struct page **pages, struct vm_area_struct **vmas,
3631 unsigned long *position, unsigned long *nr_pages,
3632 long i, unsigned int flags)
3634 unsigned long pfn_offset;
3635 unsigned long vaddr = *position;
3636 unsigned long remainder = *nr_pages;
3637 struct hstate *h = hstate_vma(vma);
3639 while (vaddr < vma->vm_end && remainder) {
3641 spinlock_t *ptl = NULL;
3646 * If we have a pending SIGKILL, don't keep faulting pages and
3647 * potentially allocating memory.
3649 if (unlikely(fatal_signal_pending(current))) {
3655 * Some archs (sparc64, sh*) have multiple pte_ts to
3656 * each hugepage. We have to make sure we get the
3657 * first, for the page indexing below to work.
3659 * Note that page table lock is not held when pte is null.
3661 pte = huge_pte_offset(mm, vaddr & huge_page_mask(h));
3663 ptl = huge_pte_lock(h, mm, pte);
3664 absent = !pte || huge_pte_none(huge_ptep_get(pte));
3667 * When coredumping, it suits get_dump_page if we just return
3668 * an error where there's an empty slot with no huge pagecache
3669 * to back it. This way, we avoid allocating a hugepage, and
3670 * the sparse dumpfile avoids allocating disk blocks, but its
3671 * huge holes still show up with zeroes where they need to be.
3673 if (absent && (flags & FOLL_DUMP) &&
3674 !hugetlbfs_pagecache_present(h, vma, vaddr)) {
3682 * We need call hugetlb_fault for both hugepages under migration
3683 * (in which case hugetlb_fault waits for the migration,) and
3684 * hwpoisoned hugepages (in which case we need to prevent the
3685 * caller from accessing to them.) In order to do this, we use
3686 * here is_swap_pte instead of is_hugetlb_entry_migration and
3687 * is_hugetlb_entry_hwpoisoned. This is because it simply covers
3688 * both cases, and because we can't follow correct pages
3689 * directly from any kind of swap entries.
3691 if (absent || is_swap_pte(huge_ptep_get(pte)) ||
3692 ((flags & FOLL_WRITE) &&
3693 !huge_pte_write(huge_ptep_get(pte)))) {
3698 ret = hugetlb_fault(mm, vma, vaddr,
3699 (flags & FOLL_WRITE) ? FAULT_FLAG_WRITE : 0);
3700 if (!(ret & VM_FAULT_ERROR))
3707 pfn_offset = (vaddr & ~huge_page_mask(h)) >> PAGE_SHIFT;
3708 page = pte_page(huge_ptep_get(pte));
3711 pages[i] = mem_map_offset(page, pfn_offset);
3712 get_page_foll(pages[i]);
3722 if (vaddr < vma->vm_end && remainder &&
3723 pfn_offset < pages_per_huge_page(h)) {
3725 * We use pfn_offset to avoid touching the pageframes
3726 * of this compound page.
3732 *nr_pages = remainder;
3735 return i ? i : -EFAULT;
3738 unsigned long hugetlb_change_protection(struct vm_area_struct *vma,
3739 unsigned long address, unsigned long end, pgprot_t newprot)
3741 struct mm_struct *mm = vma->vm_mm;
3742 unsigned long start = address;
3745 struct hstate *h = hstate_vma(vma);
3746 unsigned long pages = 0;
3748 BUG_ON(address >= end);
3749 flush_cache_range(vma, address, end);
3751 mmu_notifier_invalidate_range_start(mm, start, end);
3752 i_mmap_lock_write(vma->vm_file->f_mapping);
3753 for (; address < end; address += huge_page_size(h)) {
3755 ptep = huge_pte_offset(mm, address);
3758 ptl = huge_pte_lock(h, mm, ptep);
3759 if (huge_pmd_unshare(mm, &address, ptep)) {
3764 pte = huge_ptep_get(ptep);
3765 if (unlikely(is_hugetlb_entry_hwpoisoned(pte))) {
3769 if (unlikely(is_hugetlb_entry_migration(pte))) {
3770 swp_entry_t entry = pte_to_swp_entry(pte);
3772 if (is_write_migration_entry(entry)) {
3775 make_migration_entry_read(&entry);
3776 newpte = swp_entry_to_pte(entry);
3777 set_huge_pte_at(mm, address, ptep, newpte);
3783 if (!huge_pte_none(pte)) {
3784 pte = huge_ptep_get_and_clear(mm, address, ptep);
3785 pte = pte_mkhuge(huge_pte_modify(pte, newprot));
3786 pte = arch_make_huge_pte(pte, vma, NULL, 0);
3787 set_huge_pte_at(mm, address, ptep, pte);
3793 * Must flush TLB before releasing i_mmap_rwsem: x86's huge_pmd_unshare
3794 * may have cleared our pud entry and done put_page on the page table:
3795 * once we release i_mmap_rwsem, another task can do the final put_page
3796 * and that page table be reused and filled with junk.
3798 flush_tlb_range(vma, start, end);
3799 mmu_notifier_invalidate_range(mm, start, end);
3800 i_mmap_unlock_write(vma->vm_file->f_mapping);
3801 mmu_notifier_invalidate_range_end(mm, start, end);
3803 return pages << h->order;
3806 int hugetlb_reserve_pages(struct inode *inode,
3808 struct vm_area_struct *vma,
3809 vm_flags_t vm_flags)
3812 struct hstate *h = hstate_inode(inode);
3813 struct hugepage_subpool *spool = subpool_inode(inode);
3814 struct resv_map *resv_map;
3818 * Only apply hugepage reservation if asked. At fault time, an
3819 * attempt will be made for VM_NORESERVE to allocate a page
3820 * without using reserves
3822 if (vm_flags & VM_NORESERVE)
3826 * Shared mappings base their reservation on the number of pages that
3827 * are already allocated on behalf of the file. Private mappings need
3828 * to reserve the full area even if read-only as mprotect() may be
3829 * called to make the mapping read-write. Assume !vma is a shm mapping
3831 if (!vma || vma->vm_flags & VM_MAYSHARE) {
3832 resv_map = inode_resv_map(inode);
3834 chg = region_chg(resv_map, from, to);
3837 resv_map = resv_map_alloc();
3843 set_vma_resv_map(vma, resv_map);
3844 set_vma_resv_flags(vma, HPAGE_RESV_OWNER);
3853 * There must be enough pages in the subpool for the mapping. If
3854 * the subpool has a minimum size, there may be some global
3855 * reservations already in place (gbl_reserve).
3857 gbl_reserve = hugepage_subpool_get_pages(spool, chg);
3858 if (gbl_reserve < 0) {
3864 * Check enough hugepages are available for the reservation.
3865 * Hand the pages back to the subpool if there are not
3867 ret = hugetlb_acct_memory(h, gbl_reserve);
3869 /* put back original number of pages, chg */
3870 (void)hugepage_subpool_put_pages(spool, chg);
3875 * Account for the reservations made. Shared mappings record regions
3876 * that have reservations as they are shared by multiple VMAs.
3877 * When the last VMA disappears, the region map says how much
3878 * the reservation was and the page cache tells how much of
3879 * the reservation was consumed. Private mappings are per-VMA and
3880 * only the consumed reservations are tracked. When the VMA
3881 * disappears, the original reservation is the VMA size and the
3882 * consumed reservations are stored in the map. Hence, nothing
3883 * else has to be done for private mappings here
3885 if (!vma || vma->vm_flags & VM_MAYSHARE) {
3886 long add = region_add(resv_map, from, to);
3888 if (unlikely(chg > add)) {
3890 * pages in this range were added to the reserve
3891 * map between region_chg and region_add. This
3892 * indicates a race with alloc_huge_page. Adjust
3893 * the subpool and reserve counts modified above
3894 * based on the difference.
3898 rsv_adjust = hugepage_subpool_put_pages(spool,
3900 hugetlb_acct_memory(h, -rsv_adjust);
3905 if (!vma || vma->vm_flags & VM_MAYSHARE)
3906 region_abort(resv_map, from, to);
3907 if (vma && is_vma_resv_set(vma, HPAGE_RESV_OWNER))
3908 kref_put(&resv_map->refs, resv_map_release);
3912 void hugetlb_unreserve_pages(struct inode *inode, long offset, long freed)
3914 struct hstate *h = hstate_inode(inode);
3915 struct resv_map *resv_map = inode_resv_map(inode);
3917 struct hugepage_subpool *spool = subpool_inode(inode);
3921 chg = region_del(resv_map, offset, LONG_MAX);
3922 spin_lock(&inode->i_lock);
3923 inode->i_blocks -= (blocks_per_huge_page(h) * freed);
3924 spin_unlock(&inode->i_lock);
3927 * If the subpool has a minimum size, the number of global
3928 * reservations to be released may be adjusted.
3930 gbl_reserve = hugepage_subpool_put_pages(spool, (chg - freed));
3931 hugetlb_acct_memory(h, -gbl_reserve);
3934 #ifdef CONFIG_ARCH_WANT_HUGE_PMD_SHARE
3935 static unsigned long page_table_shareable(struct vm_area_struct *svma,
3936 struct vm_area_struct *vma,
3937 unsigned long addr, pgoff_t idx)
3939 unsigned long saddr = ((idx - svma->vm_pgoff) << PAGE_SHIFT) +
3941 unsigned long sbase = saddr & PUD_MASK;
3942 unsigned long s_end = sbase + PUD_SIZE;
3944 /* Allow segments to share if only one is marked locked */
3945 unsigned long vm_flags = vma->vm_flags & ~VM_LOCKED;
3946 unsigned long svm_flags = svma->vm_flags & ~VM_LOCKED;
3949 * match the virtual addresses, permission and the alignment of the
3952 if (pmd_index(addr) != pmd_index(saddr) ||
3953 vm_flags != svm_flags ||
3954 sbase < svma->vm_start || svma->vm_end < s_end)
3960 static bool vma_shareable(struct vm_area_struct *vma, unsigned long addr)
3962 unsigned long base = addr & PUD_MASK;
3963 unsigned long end = base + PUD_SIZE;
3966 * check on proper vm_flags and page table alignment
3968 if (vma->vm_flags & VM_MAYSHARE &&
3969 vma->vm_start <= base && end <= vma->vm_end)
3975 * Search for a shareable pmd page for hugetlb. In any case calls pmd_alloc()
3976 * and returns the corresponding pte. While this is not necessary for the
3977 * !shared pmd case because we can allocate the pmd later as well, it makes the
3978 * code much cleaner. pmd allocation is essential for the shared case because
3979 * pud has to be populated inside the same i_mmap_rwsem section - otherwise
3980 * racing tasks could either miss the sharing (see huge_pte_offset) or select a
3981 * bad pmd for sharing.
3983 pte_t *huge_pmd_share(struct mm_struct *mm, unsigned long addr, pud_t *pud)
3985 struct vm_area_struct *vma = find_vma(mm, addr);
3986 struct address_space *mapping = vma->vm_file->f_mapping;
3987 pgoff_t idx = ((addr - vma->vm_start) >> PAGE_SHIFT) +
3989 struct vm_area_struct *svma;
3990 unsigned long saddr;
3995 if (!vma_shareable(vma, addr))
3996 return (pte_t *)pmd_alloc(mm, pud, addr);
3998 i_mmap_lock_write(mapping);
3999 vma_interval_tree_foreach(svma, &mapping->i_mmap, idx, idx) {
4003 saddr = page_table_shareable(svma, vma, addr, idx);
4005 spte = huge_pte_offset(svma->vm_mm, saddr);
4008 get_page(virt_to_page(spte));
4017 ptl = huge_pte_lockptr(hstate_vma(vma), mm, spte);
4019 if (pud_none(*pud)) {
4020 pud_populate(mm, pud,
4021 (pmd_t *)((unsigned long)spte & PAGE_MASK));
4023 put_page(virt_to_page(spte));
4028 pte = (pte_t *)pmd_alloc(mm, pud, addr);
4029 i_mmap_unlock_write(mapping);
4034 * unmap huge page backed by shared pte.
4036 * Hugetlb pte page is ref counted at the time of mapping. If pte is shared
4037 * indicated by page_count > 1, unmap is achieved by clearing pud and
4038 * decrementing the ref count. If count == 1, the pte page is not shared.
4040 * called with page table lock held.
4042 * returns: 1 successfully unmapped a shared pte page
4043 * 0 the underlying pte page is not shared, or it is the last user
4045 int huge_pmd_unshare(struct mm_struct *mm, unsigned long *addr, pte_t *ptep)
4047 pgd_t *pgd = pgd_offset(mm, *addr);
4048 pud_t *pud = pud_offset(pgd, *addr);
4050 BUG_ON(page_count(virt_to_page(ptep)) == 0);
4051 if (page_count(virt_to_page(ptep)) == 1)
4055 put_page(virt_to_page(ptep));
4057 *addr = ALIGN(*addr, HPAGE_SIZE * PTRS_PER_PTE) - HPAGE_SIZE;
4060 #define want_pmd_share() (1)
4061 #else /* !CONFIG_ARCH_WANT_HUGE_PMD_SHARE */
4062 pte_t *huge_pmd_share(struct mm_struct *mm, unsigned long addr, pud_t *pud)
4067 int huge_pmd_unshare(struct mm_struct *mm, unsigned long *addr, pte_t *ptep)
4071 #define want_pmd_share() (0)
4072 #endif /* CONFIG_ARCH_WANT_HUGE_PMD_SHARE */
4074 #ifdef CONFIG_ARCH_WANT_GENERAL_HUGETLB
4075 pte_t *huge_pte_alloc(struct mm_struct *mm,
4076 unsigned long addr, unsigned long sz)
4082 pgd = pgd_offset(mm, addr);
4083 pud = pud_alloc(mm, pgd, addr);
4085 if (sz == PUD_SIZE) {
4088 BUG_ON(sz != PMD_SIZE);
4089 if (want_pmd_share() && pud_none(*pud))
4090 pte = huge_pmd_share(mm, addr, pud);
4092 pte = (pte_t *)pmd_alloc(mm, pud, addr);
4095 BUG_ON(pte && !pte_none(*pte) && !pte_huge(*pte));
4100 pte_t *huge_pte_offset(struct mm_struct *mm, unsigned long addr)
4106 pgd = pgd_offset(mm, addr);
4107 if (pgd_present(*pgd)) {
4108 pud = pud_offset(pgd, addr);
4109 if (pud_present(*pud)) {
4111 return (pte_t *)pud;
4112 pmd = pmd_offset(pud, addr);
4115 return (pte_t *) pmd;
4118 #endif /* CONFIG_ARCH_WANT_GENERAL_HUGETLB */
4121 * These functions are overwritable if your architecture needs its own
4124 struct page * __weak
4125 follow_huge_addr(struct mm_struct *mm, unsigned long address,
4128 return ERR_PTR(-EINVAL);
4131 struct page * __weak
4132 follow_huge_pmd(struct mm_struct *mm, unsigned long address,
4133 pmd_t *pmd, int flags)
4135 struct page *page = NULL;
4138 ptl = pmd_lockptr(mm, pmd);
4141 * make sure that the address range covered by this pmd is not
4142 * unmapped from other threads.
4144 if (!pmd_huge(*pmd))
4146 if (pmd_present(*pmd)) {
4147 page = pmd_page(*pmd) + ((address & ~PMD_MASK) >> PAGE_SHIFT);
4148 if (flags & FOLL_GET)
4151 if (is_hugetlb_entry_migration(huge_ptep_get((pte_t *)pmd))) {
4153 __migration_entry_wait(mm, (pte_t *)pmd, ptl);
4157 * hwpoisoned entry is treated as no_page_table in
4158 * follow_page_mask().
4166 struct page * __weak
4167 follow_huge_pud(struct mm_struct *mm, unsigned long address,
4168 pud_t *pud, int flags)
4170 if (flags & FOLL_GET)
4173 return pte_page(*(pte_t *)pud) + ((address & ~PUD_MASK) >> PAGE_SHIFT);
4176 #ifdef CONFIG_MEMORY_FAILURE
4179 * This function is called from memory failure code.
4180 * Assume the caller holds page lock of the head page.
4182 int dequeue_hwpoisoned_huge_page(struct page *hpage)
4184 struct hstate *h = page_hstate(hpage);
4185 int nid = page_to_nid(hpage);
4188 spin_lock(&hugetlb_lock);
4190 * Just checking !page_huge_active is not enough, because that could be
4191 * an isolated/hwpoisoned hugepage (which have >0 refcount).
4193 if (!page_huge_active(hpage) && !page_count(hpage)) {
4195 * Hwpoisoned hugepage isn't linked to activelist or freelist,
4196 * but dangling hpage->lru can trigger list-debug warnings
4197 * (this happens when we call unpoison_memory() on it),
4198 * so let it point to itself with list_del_init().
4200 list_del_init(&hpage->lru);
4201 set_page_refcounted(hpage);
4202 h->free_huge_pages--;
4203 h->free_huge_pages_node[nid]--;
4206 spin_unlock(&hugetlb_lock);
4211 bool isolate_huge_page(struct page *page, struct list_head *list)
4215 VM_BUG_ON_PAGE(!PageHead(page), page);
4216 spin_lock(&hugetlb_lock);
4217 if (!page_huge_active(page) || !get_page_unless_zero(page)) {
4221 clear_page_huge_active(page);
4222 list_move_tail(&page->lru, list);
4224 spin_unlock(&hugetlb_lock);
4228 void putback_active_hugepage(struct page *page)
4230 VM_BUG_ON_PAGE(!PageHead(page), page);
4231 spin_lock(&hugetlb_lock);
4232 set_page_huge_active(page);
4233 list_move_tail(&page->lru, &(page_hstate(page))->hugepage_activelist);
4234 spin_unlock(&hugetlb_lock);