2 * Simple NUMA memory policy for the Linux kernel.
4 * Copyright 2003,2004 Andi Kleen, SuSE Labs.
5 * (C) Copyright 2005 Christoph Lameter, Silicon Graphics, Inc.
6 * Subject to the GNU Public License, version 2.
8 * NUMA policy allows the user to give hints in which node(s) memory should
11 * Support four policies per VMA and per process:
13 * The VMA policy has priority over the process policy for a page fault.
15 * interleave Allocate memory interleaved over a set of nodes,
16 * with normal fallback if it fails.
17 * For VMA based allocations this interleaves based on the
18 * offset into the backing object or offset into the mapping
19 * for anonymous memory. For process policy an process counter
22 * bind Only allocate memory on a specific set of nodes,
24 * FIXME: memory is allocated starting with the first node
25 * to the last. It would be better if bind would truly restrict
26 * the allocation to memory nodes instead
28 * preferred Try a specific node first before normal fallback.
29 * As a special case NUMA_NO_NODE here means do the allocation
30 * on the local CPU. This is normally identical to default,
31 * but useful to set in a VMA when you have a non default
34 * default Allocate on the local node first, or when on a VMA
35 * use the process policy. This is what Linux always did
36 * in a NUMA aware kernel and still does by, ahem, default.
38 * The process policy is applied for most non interrupt memory allocations
39 * in that process' context. Interrupts ignore the policies and always
40 * try to allocate on the local CPU. The VMA policy is only applied for memory
41 * allocations for a VMA in the VM.
43 * Currently there are a few corner cases in swapping where the policy
44 * is not applied, but the majority should be handled. When process policy
45 * is used it is not remembered over swap outs/swap ins.
47 * Only the highest zone in the zone hierarchy gets policied. Allocations
48 * requesting a lower zone just use default policy. This implies that
49 * on systems with highmem kernel lowmem allocation don't get policied.
50 * Same with GFP_DMA allocations.
52 * For shmfs/tmpfs/hugetlbfs shared memory the policy is shared between
53 * all users and remembered even when nobody has memory mapped.
57 fix mmap readahead to honour policy and enable policy for any page cache
59 statistics for bigpages
60 global policy for page cache? currently it uses process policy. Requires
62 handle mremap for shared memory (currently ignored for the policy)
64 make bind policy root only? It can trigger oom much faster and the
65 kernel is not always grateful with that.
68 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
70 #include <linux/mempolicy.h>
72 #include <linux/highmem.h>
73 #include <linux/hugetlb.h>
74 #include <linux/kernel.h>
75 #include <linux/sched.h>
76 #include <linux/nodemask.h>
77 #include <linux/cpuset.h>
78 #include <linux/slab.h>
79 #include <linux/string.h>
80 #include <linux/export.h>
81 #include <linux/nsproxy.h>
82 #include <linux/interrupt.h>
83 #include <linux/init.h>
84 #include <linux/compat.h>
85 #include <linux/swap.h>
86 #include <linux/seq_file.h>
87 #include <linux/proc_fs.h>
88 #include <linux/migrate.h>
89 #include <linux/ksm.h>
90 #include <linux/rmap.h>
91 #include <linux/security.h>
92 #include <linux/syscalls.h>
93 #include <linux/ctype.h>
94 #include <linux/mm_inline.h>
95 #include <linux/mmu_notifier.h>
96 #include <linux/printk.h>
98 #include <asm/tlbflush.h>
99 #include <asm/uaccess.h>
100 #include <linux/random.h>
102 #include "internal.h"
105 #define MPOL_MF_DISCONTIG_OK (MPOL_MF_INTERNAL << 0) /* Skip checks for continuous vmas */
106 #define MPOL_MF_INVERT (MPOL_MF_INTERNAL << 1) /* Invert check for nodemask */
108 static struct kmem_cache *policy_cache;
109 static struct kmem_cache *sn_cache;
111 /* Highest zone. An specific allocation for a zone below that is not
113 enum zone_type policy_zone = 0;
116 * run-time system-wide default policy => local allocation
118 static struct mempolicy default_policy = {
119 .refcnt = ATOMIC_INIT(1), /* never free it */
120 .mode = MPOL_PREFERRED,
121 .flags = MPOL_F_LOCAL,
124 static struct mempolicy preferred_node_policy[MAX_NUMNODES];
126 struct mempolicy *get_task_policy(struct task_struct *p)
128 struct mempolicy *pol = p->mempolicy;
134 node = numa_node_id();
135 if (node != NUMA_NO_NODE) {
136 pol = &preferred_node_policy[node];
137 /* preferred_node_policy is not initialised early in boot */
142 return &default_policy;
145 static const struct mempolicy_operations {
146 int (*create)(struct mempolicy *pol, const nodemask_t *nodes);
148 * If read-side task has no lock to protect task->mempolicy, write-side
149 * task will rebind the task->mempolicy by two step. The first step is
150 * setting all the newly nodes, and the second step is cleaning all the
151 * disallowed nodes. In this way, we can avoid finding no node to alloc
153 * If we have a lock to protect task->mempolicy in read-side, we do
157 * MPOL_REBIND_ONCE - do rebind work at once
158 * MPOL_REBIND_STEP1 - set all the newly nodes
159 * MPOL_REBIND_STEP2 - clean all the disallowed nodes
161 void (*rebind)(struct mempolicy *pol, const nodemask_t *nodes,
162 enum mpol_rebind_step step);
163 } mpol_ops[MPOL_MAX];
165 static inline int mpol_store_user_nodemask(const struct mempolicy *pol)
167 return pol->flags & MPOL_MODE_FLAGS;
170 static void mpol_relative_nodemask(nodemask_t *ret, const nodemask_t *orig,
171 const nodemask_t *rel)
174 nodes_fold(tmp, *orig, nodes_weight(*rel));
175 nodes_onto(*ret, tmp, *rel);
178 static int mpol_new_interleave(struct mempolicy *pol, const nodemask_t *nodes)
180 if (nodes_empty(*nodes))
182 pol->v.nodes = *nodes;
186 static int mpol_new_preferred(struct mempolicy *pol, const nodemask_t *nodes)
189 pol->flags |= MPOL_F_LOCAL; /* local allocation */
190 else if (nodes_empty(*nodes))
191 return -EINVAL; /* no allowed nodes */
193 pol->v.preferred_node = first_node(*nodes);
197 static int mpol_new_bind(struct mempolicy *pol, const nodemask_t *nodes)
199 if (nodes_empty(*nodes))
201 pol->v.nodes = *nodes;
206 * mpol_set_nodemask is called after mpol_new() to set up the nodemask, if
207 * any, for the new policy. mpol_new() has already validated the nodes
208 * parameter with respect to the policy mode and flags. But, we need to
209 * handle an empty nodemask with MPOL_PREFERRED here.
211 * Must be called holding task's alloc_lock to protect task's mems_allowed
212 * and mempolicy. May also be called holding the mmap_semaphore for write.
214 static int mpol_set_nodemask(struct mempolicy *pol,
215 const nodemask_t *nodes, struct nodemask_scratch *nsc)
219 /* if mode is MPOL_DEFAULT, pol is NULL. This is right. */
223 nodes_and(nsc->mask1,
224 cpuset_current_mems_allowed, node_states[N_MEMORY]);
227 if (pol->mode == MPOL_PREFERRED && nodes_empty(*nodes))
228 nodes = NULL; /* explicit local allocation */
230 if (pol->flags & MPOL_F_RELATIVE_NODES)
231 mpol_relative_nodemask(&nsc->mask2, nodes, &nsc->mask1);
233 nodes_and(nsc->mask2, *nodes, nsc->mask1);
235 if (mpol_store_user_nodemask(pol))
236 pol->w.user_nodemask = *nodes;
238 pol->w.cpuset_mems_allowed =
239 cpuset_current_mems_allowed;
243 ret = mpol_ops[pol->mode].create(pol, &nsc->mask2);
245 ret = mpol_ops[pol->mode].create(pol, NULL);
250 * This function just creates a new policy, does some check and simple
251 * initialization. You must invoke mpol_set_nodemask() to set nodes.
253 static struct mempolicy *mpol_new(unsigned short mode, unsigned short flags,
256 struct mempolicy *policy;
258 pr_debug("setting mode %d flags %d nodes[0] %lx\n",
259 mode, flags, nodes ? nodes_addr(*nodes)[0] : NUMA_NO_NODE);
261 if (mode == MPOL_DEFAULT) {
262 if (nodes && !nodes_empty(*nodes))
263 return ERR_PTR(-EINVAL);
269 * MPOL_PREFERRED cannot be used with MPOL_F_STATIC_NODES or
270 * MPOL_F_RELATIVE_NODES if the nodemask is empty (local allocation).
271 * All other modes require a valid pointer to a non-empty nodemask.
273 if (mode == MPOL_PREFERRED) {
274 if (nodes_empty(*nodes)) {
275 if (((flags & MPOL_F_STATIC_NODES) ||
276 (flags & MPOL_F_RELATIVE_NODES)))
277 return ERR_PTR(-EINVAL);
279 } else if (mode == MPOL_LOCAL) {
280 if (!nodes_empty(*nodes))
281 return ERR_PTR(-EINVAL);
282 mode = MPOL_PREFERRED;
283 } else if (nodes_empty(*nodes))
284 return ERR_PTR(-EINVAL);
285 policy = kmem_cache_alloc(policy_cache, GFP_KERNEL);
287 return ERR_PTR(-ENOMEM);
288 atomic_set(&policy->refcnt, 1);
290 policy->flags = flags;
295 /* Slow path of a mpol destructor. */
296 void __mpol_put(struct mempolicy *p)
298 if (!atomic_dec_and_test(&p->refcnt))
300 kmem_cache_free(policy_cache, p);
303 static void mpol_rebind_default(struct mempolicy *pol, const nodemask_t *nodes,
304 enum mpol_rebind_step step)
310 * MPOL_REBIND_ONCE - do rebind work at once
311 * MPOL_REBIND_STEP1 - set all the newly nodes
312 * MPOL_REBIND_STEP2 - clean all the disallowed nodes
314 static void mpol_rebind_nodemask(struct mempolicy *pol, const nodemask_t *nodes,
315 enum mpol_rebind_step step)
319 if (pol->flags & MPOL_F_STATIC_NODES)
320 nodes_and(tmp, pol->w.user_nodemask, *nodes);
321 else if (pol->flags & MPOL_F_RELATIVE_NODES)
322 mpol_relative_nodemask(&tmp, &pol->w.user_nodemask, nodes);
325 * if step == 1, we use ->w.cpuset_mems_allowed to cache the
328 if (step == MPOL_REBIND_ONCE || step == MPOL_REBIND_STEP1) {
329 nodes_remap(tmp, pol->v.nodes,
330 pol->w.cpuset_mems_allowed, *nodes);
331 pol->w.cpuset_mems_allowed = step ? tmp : *nodes;
332 } else if (step == MPOL_REBIND_STEP2) {
333 tmp = pol->w.cpuset_mems_allowed;
334 pol->w.cpuset_mems_allowed = *nodes;
339 if (nodes_empty(tmp))
342 if (step == MPOL_REBIND_STEP1)
343 nodes_or(pol->v.nodes, pol->v.nodes, tmp);
344 else if (step == MPOL_REBIND_ONCE || step == MPOL_REBIND_STEP2)
349 if (!node_isset(current->il_next, tmp)) {
350 current->il_next = next_node(current->il_next, tmp);
351 if (current->il_next >= MAX_NUMNODES)
352 current->il_next = first_node(tmp);
353 if (current->il_next >= MAX_NUMNODES)
354 current->il_next = numa_node_id();
358 static void mpol_rebind_preferred(struct mempolicy *pol,
359 const nodemask_t *nodes,
360 enum mpol_rebind_step step)
364 if (pol->flags & MPOL_F_STATIC_NODES) {
365 int node = first_node(pol->w.user_nodemask);
367 if (node_isset(node, *nodes)) {
368 pol->v.preferred_node = node;
369 pol->flags &= ~MPOL_F_LOCAL;
371 pol->flags |= MPOL_F_LOCAL;
372 } else if (pol->flags & MPOL_F_RELATIVE_NODES) {
373 mpol_relative_nodemask(&tmp, &pol->w.user_nodemask, nodes);
374 pol->v.preferred_node = first_node(tmp);
375 } else if (!(pol->flags & MPOL_F_LOCAL)) {
376 pol->v.preferred_node = node_remap(pol->v.preferred_node,
377 pol->w.cpuset_mems_allowed,
379 pol->w.cpuset_mems_allowed = *nodes;
384 * mpol_rebind_policy - Migrate a policy to a different set of nodes
386 * If read-side task has no lock to protect task->mempolicy, write-side
387 * task will rebind the task->mempolicy by two step. The first step is
388 * setting all the newly nodes, and the second step is cleaning all the
389 * disallowed nodes. In this way, we can avoid finding no node to alloc
391 * If we have a lock to protect task->mempolicy in read-side, we do
395 * MPOL_REBIND_ONCE - do rebind work at once
396 * MPOL_REBIND_STEP1 - set all the newly nodes
397 * MPOL_REBIND_STEP2 - clean all the disallowed nodes
399 static void mpol_rebind_policy(struct mempolicy *pol, const nodemask_t *newmask,
400 enum mpol_rebind_step step)
404 if (!mpol_store_user_nodemask(pol) && step == MPOL_REBIND_ONCE &&
405 nodes_equal(pol->w.cpuset_mems_allowed, *newmask))
408 if (step == MPOL_REBIND_STEP1 && (pol->flags & MPOL_F_REBINDING))
411 if (step == MPOL_REBIND_STEP2 && !(pol->flags & MPOL_F_REBINDING))
414 if (step == MPOL_REBIND_STEP1)
415 pol->flags |= MPOL_F_REBINDING;
416 else if (step == MPOL_REBIND_STEP2)
417 pol->flags &= ~MPOL_F_REBINDING;
418 else if (step >= MPOL_REBIND_NSTEP)
421 mpol_ops[pol->mode].rebind(pol, newmask, step);
425 * Wrapper for mpol_rebind_policy() that just requires task
426 * pointer, and updates task mempolicy.
428 * Called with task's alloc_lock held.
431 void mpol_rebind_task(struct task_struct *tsk, const nodemask_t *new,
432 enum mpol_rebind_step step)
434 mpol_rebind_policy(tsk->mempolicy, new, step);
438 * Rebind each vma in mm to new nodemask.
440 * Call holding a reference to mm. Takes mm->mmap_sem during call.
443 void mpol_rebind_mm(struct mm_struct *mm, nodemask_t *new)
445 struct vm_area_struct *vma;
447 down_write(&mm->mmap_sem);
448 for (vma = mm->mmap; vma; vma = vma->vm_next)
449 mpol_rebind_policy(vma->vm_policy, new, MPOL_REBIND_ONCE);
450 up_write(&mm->mmap_sem);
453 static const struct mempolicy_operations mpol_ops[MPOL_MAX] = {
455 .rebind = mpol_rebind_default,
457 [MPOL_INTERLEAVE] = {
458 .create = mpol_new_interleave,
459 .rebind = mpol_rebind_nodemask,
462 .create = mpol_new_preferred,
463 .rebind = mpol_rebind_preferred,
466 .create = mpol_new_bind,
467 .rebind = mpol_rebind_nodemask,
471 static void migrate_page_add(struct page *page, struct list_head *pagelist,
472 unsigned long flags);
475 * Scan through pages checking if pages follow certain conditions,
476 * and move them to the pagelist if they do.
478 static int queue_pages_pte_range(struct vm_area_struct *vma, pmd_t *pmd,
479 unsigned long addr, unsigned long end,
480 const nodemask_t *nodes, unsigned long flags,
487 orig_pte = pte = pte_offset_map_lock(vma->vm_mm, pmd, addr, &ptl);
492 if (!pte_present(*pte))
494 page = vm_normal_page(vma, addr, *pte);
498 * vm_normal_page() filters out zero pages, but there might
499 * still be PageReserved pages to skip, perhaps in a VDSO.
501 if (PageReserved(page))
503 nid = page_to_nid(page);
504 if (node_isset(nid, *nodes) == !!(flags & MPOL_MF_INVERT))
507 if (flags & (MPOL_MF_MOVE | MPOL_MF_MOVE_ALL))
508 migrate_page_add(page, private, flags);
511 } while (pte++, addr += PAGE_SIZE, addr != end);
512 pte_unmap_unlock(orig_pte, ptl);
516 static void queue_pages_hugetlb_pmd_range(struct vm_area_struct *vma,
517 pmd_t *pmd, const nodemask_t *nodes, unsigned long flags,
520 #ifdef CONFIG_HUGETLB_PAGE
526 ptl = huge_pte_lock(hstate_vma(vma), vma->vm_mm, (pte_t *)pmd);
527 entry = huge_ptep_get((pte_t *)pmd);
528 if (!pte_present(entry))
530 page = pte_page(entry);
531 nid = page_to_nid(page);
532 if (node_isset(nid, *nodes) == !!(flags & MPOL_MF_INVERT))
534 /* With MPOL_MF_MOVE, we migrate only unshared hugepage. */
535 if (flags & (MPOL_MF_MOVE_ALL) ||
536 (flags & MPOL_MF_MOVE && page_mapcount(page) == 1))
537 isolate_huge_page(page, private);
545 static inline int queue_pages_pmd_range(struct vm_area_struct *vma, pud_t *pud,
546 unsigned long addr, unsigned long end,
547 const nodemask_t *nodes, unsigned long flags,
553 pmd = pmd_offset(pud, addr);
555 next = pmd_addr_end(addr, end);
556 if (!pmd_present(*pmd))
558 if (pmd_huge(*pmd) && is_vm_hugetlb_page(vma)) {
559 queue_pages_hugetlb_pmd_range(vma, pmd, nodes,
563 split_huge_page_pmd(vma, addr, pmd);
564 if (pmd_none_or_trans_huge_or_clear_bad(pmd))
566 if (queue_pages_pte_range(vma, pmd, addr, next, nodes,
569 } while (pmd++, addr = next, addr != end);
573 static inline int queue_pages_pud_range(struct vm_area_struct *vma, pgd_t *pgd,
574 unsigned long addr, unsigned long end,
575 const nodemask_t *nodes, unsigned long flags,
581 pud = pud_offset(pgd, addr);
583 next = pud_addr_end(addr, end);
584 if (pud_huge(*pud) && is_vm_hugetlb_page(vma))
586 if (pud_none_or_clear_bad(pud))
588 if (queue_pages_pmd_range(vma, pud, addr, next, nodes,
591 } while (pud++, addr = next, addr != end);
595 static inline int queue_pages_pgd_range(struct vm_area_struct *vma,
596 unsigned long addr, unsigned long end,
597 const nodemask_t *nodes, unsigned long flags,
603 pgd = pgd_offset(vma->vm_mm, addr);
605 next = pgd_addr_end(addr, end);
606 if (pgd_none_or_clear_bad(pgd))
608 if (queue_pages_pud_range(vma, pgd, addr, next, nodes,
611 } while (pgd++, addr = next, addr != end);
615 #ifdef CONFIG_NUMA_BALANCING
617 * This is used to mark a range of virtual addresses to be inaccessible.
618 * These are later cleared by a NUMA hinting fault. Depending on these
619 * faults, pages may be migrated for better NUMA placement.
621 * This is assuming that NUMA faults are handled using PROT_NONE. If
622 * an architecture makes a different choice, it will need further
623 * changes to the core.
625 unsigned long change_prot_numa(struct vm_area_struct *vma,
626 unsigned long addr, unsigned long end)
630 nr_updated = change_protection(vma, addr, end, vma->vm_page_prot, 0, 1);
632 count_vm_numa_events(NUMA_PTE_UPDATES, nr_updated);
637 static unsigned long change_prot_numa(struct vm_area_struct *vma,
638 unsigned long addr, unsigned long end)
642 #endif /* CONFIG_NUMA_BALANCING */
645 * Walk through page tables and collect pages to be migrated.
647 * If pages found in a given range are on a set of nodes (determined by
648 * @nodes and @flags,) it's isolated and queued to the pagelist which is
649 * passed via @private.)
652 queue_pages_range(struct mm_struct *mm, unsigned long start, unsigned long end,
653 const nodemask_t *nodes, unsigned long flags, void *private)
656 struct vm_area_struct *vma, *prev;
658 vma = find_vma(mm, start);
662 for (; vma && vma->vm_start < end; vma = vma->vm_next) {
663 unsigned long endvma = vma->vm_end;
667 if (vma->vm_start > start)
668 start = vma->vm_start;
670 if (!(flags & MPOL_MF_DISCONTIG_OK)) {
671 if (!vma->vm_next && vma->vm_end < end)
673 if (prev && prev->vm_end < vma->vm_start)
677 if (flags & MPOL_MF_LAZY) {
678 /* Similar to task_numa_work, skip inaccessible VMAs */
679 if (vma->vm_flags & (VM_READ | VM_EXEC | VM_WRITE))
680 change_prot_numa(vma, start, endvma);
684 if ((flags & MPOL_MF_STRICT) ||
685 ((flags & (MPOL_MF_MOVE | MPOL_MF_MOVE_ALL)) &&
686 vma_migratable(vma))) {
688 err = queue_pages_pgd_range(vma, start, endvma, nodes,
700 * Apply policy to a single VMA
701 * This must be called with the mmap_sem held for writing.
703 static int vma_replace_policy(struct vm_area_struct *vma,
704 struct mempolicy *pol)
707 struct mempolicy *old;
708 struct mempolicy *new;
710 pr_debug("vma %lx-%lx/%lx vm_ops %p vm_file %p set_policy %p\n",
711 vma->vm_start, vma->vm_end, vma->vm_pgoff,
712 vma->vm_ops, vma->vm_file,
713 vma->vm_ops ? vma->vm_ops->set_policy : NULL);
719 if (vma->vm_ops && vma->vm_ops->set_policy) {
720 err = vma->vm_ops->set_policy(vma, new);
725 old = vma->vm_policy;
726 vma->vm_policy = new; /* protected by mmap_sem */
735 /* Step 2: apply policy to a range and do splits. */
736 static int mbind_range(struct mm_struct *mm, unsigned long start,
737 unsigned long end, struct mempolicy *new_pol)
739 struct vm_area_struct *next;
740 struct vm_area_struct *prev;
741 struct vm_area_struct *vma;
744 unsigned long vmstart;
747 vma = find_vma(mm, start);
748 if (!vma || vma->vm_start > start)
752 if (start > vma->vm_start)
755 for (; vma && vma->vm_start < end; prev = vma, vma = next) {
757 vmstart = max(start, vma->vm_start);
758 vmend = min(end, vma->vm_end);
760 if (mpol_equal(vma_policy(vma), new_pol))
763 pgoff = vma->vm_pgoff +
764 ((vmstart - vma->vm_start) >> PAGE_SHIFT);
765 prev = vma_merge(mm, prev, vmstart, vmend, vma->vm_flags,
766 vma->anon_vma, vma->vm_file, pgoff,
771 if (mpol_equal(vma_policy(vma), new_pol))
773 /* vma_merge() joined vma && vma->next, case 8 */
776 if (vma->vm_start != vmstart) {
777 err = split_vma(vma->vm_mm, vma, vmstart, 1);
781 if (vma->vm_end != vmend) {
782 err = split_vma(vma->vm_mm, vma, vmend, 0);
787 err = vma_replace_policy(vma, new_pol);
796 /* Set the process memory policy */
797 static long do_set_mempolicy(unsigned short mode, unsigned short flags,
800 struct mempolicy *new, *old;
801 NODEMASK_SCRATCH(scratch);
807 new = mpol_new(mode, flags, nodes);
814 ret = mpol_set_nodemask(new, nodes, scratch);
816 task_unlock(current);
820 old = current->mempolicy;
821 current->mempolicy = new;
822 if (new && new->mode == MPOL_INTERLEAVE &&
823 nodes_weight(new->v.nodes))
824 current->il_next = first_node(new->v.nodes);
825 task_unlock(current);
829 NODEMASK_SCRATCH_FREE(scratch);
834 * Return nodemask for policy for get_mempolicy() query
836 * Called with task's alloc_lock held
838 static void get_policy_nodemask(struct mempolicy *p, nodemask_t *nodes)
841 if (p == &default_policy)
847 case MPOL_INTERLEAVE:
851 if (!(p->flags & MPOL_F_LOCAL))
852 node_set(p->v.preferred_node, *nodes);
853 /* else return empty node mask for local allocation */
860 static int lookup_node(struct mm_struct *mm, unsigned long addr)
865 err = get_user_pages(current, mm, addr & PAGE_MASK, 1, 0, 0, &p, NULL);
867 err = page_to_nid(p);
873 /* Retrieve NUMA policy */
874 static long do_get_mempolicy(int *policy, nodemask_t *nmask,
875 unsigned long addr, unsigned long flags)
878 struct mm_struct *mm = current->mm;
879 struct vm_area_struct *vma = NULL;
880 struct mempolicy *pol = current->mempolicy;
883 ~(unsigned long)(MPOL_F_NODE|MPOL_F_ADDR|MPOL_F_MEMS_ALLOWED))
886 if (flags & MPOL_F_MEMS_ALLOWED) {
887 if (flags & (MPOL_F_NODE|MPOL_F_ADDR))
889 *policy = 0; /* just so it's initialized */
891 *nmask = cpuset_current_mems_allowed;
892 task_unlock(current);
896 if (flags & MPOL_F_ADDR) {
898 * Do NOT fall back to task policy if the
899 * vma/shared policy at addr is NULL. We
900 * want to return MPOL_DEFAULT in this case.
902 down_read(&mm->mmap_sem);
903 vma = find_vma_intersection(mm, addr, addr+1);
905 up_read(&mm->mmap_sem);
908 if (vma->vm_ops && vma->vm_ops->get_policy)
909 pol = vma->vm_ops->get_policy(vma, addr);
911 pol = vma->vm_policy;
916 pol = &default_policy; /* indicates default behavior */
918 if (flags & MPOL_F_NODE) {
919 if (flags & MPOL_F_ADDR) {
920 err = lookup_node(mm, addr);
924 } else if (pol == current->mempolicy &&
925 pol->mode == MPOL_INTERLEAVE) {
926 *policy = current->il_next;
932 *policy = pol == &default_policy ? MPOL_DEFAULT :
935 * Internal mempolicy flags must be masked off before exposing
936 * the policy to userspace.
938 *policy |= (pol->flags & MPOL_MODE_FLAGS);
942 up_read(¤t->mm->mmap_sem);
948 if (mpol_store_user_nodemask(pol)) {
949 *nmask = pol->w.user_nodemask;
952 get_policy_nodemask(pol, nmask);
953 task_unlock(current);
960 up_read(¤t->mm->mmap_sem);
964 #ifdef CONFIG_MIGRATION
968 static void migrate_page_add(struct page *page, struct list_head *pagelist,
972 * Avoid migrating a page that is shared with others.
974 if ((flags & MPOL_MF_MOVE_ALL) || page_mapcount(page) == 1) {
975 if (!isolate_lru_page(page)) {
976 list_add_tail(&page->lru, pagelist);
977 inc_zone_page_state(page, NR_ISOLATED_ANON +
978 page_is_file_cache(page));
983 static struct page *new_node_page(struct page *page, unsigned long node, int **x)
986 return alloc_huge_page_node(page_hstate(compound_head(page)),
989 return alloc_pages_exact_node(node, GFP_HIGHUSER_MOVABLE, 0);
993 * Migrate pages from one node to a target node.
994 * Returns error or the number of pages not migrated.
996 static int migrate_to_node(struct mm_struct *mm, int source, int dest,
1000 LIST_HEAD(pagelist);
1004 node_set(source, nmask);
1007 * This does not "check" the range but isolates all pages that
1008 * need migration. Between passing in the full user address
1009 * space range and MPOL_MF_DISCONTIG_OK, this call can not fail.
1011 VM_BUG_ON(!(flags & (MPOL_MF_MOVE | MPOL_MF_MOVE_ALL)));
1012 queue_pages_range(mm, mm->mmap->vm_start, mm->task_size, &nmask,
1013 flags | MPOL_MF_DISCONTIG_OK, &pagelist);
1015 if (!list_empty(&pagelist)) {
1016 err = migrate_pages(&pagelist, new_node_page, NULL, dest,
1017 MIGRATE_SYNC, MR_SYSCALL);
1019 putback_movable_pages(&pagelist);
1026 * Move pages between the two nodesets so as to preserve the physical
1027 * layout as much as possible.
1029 * Returns the number of page that could not be moved.
1031 int do_migrate_pages(struct mm_struct *mm, const nodemask_t *from,
1032 const nodemask_t *to, int flags)
1038 err = migrate_prep();
1042 down_read(&mm->mmap_sem);
1045 * Find a 'source' bit set in 'tmp' whose corresponding 'dest'
1046 * bit in 'to' is not also set in 'tmp'. Clear the found 'source'
1047 * bit in 'tmp', and return that <source, dest> pair for migration.
1048 * The pair of nodemasks 'to' and 'from' define the map.
1050 * If no pair of bits is found that way, fallback to picking some
1051 * pair of 'source' and 'dest' bits that are not the same. If the
1052 * 'source' and 'dest' bits are the same, this represents a node
1053 * that will be migrating to itself, so no pages need move.
1055 * If no bits are left in 'tmp', or if all remaining bits left
1056 * in 'tmp' correspond to the same bit in 'to', return false
1057 * (nothing left to migrate).
1059 * This lets us pick a pair of nodes to migrate between, such that
1060 * if possible the dest node is not already occupied by some other
1061 * source node, minimizing the risk of overloading the memory on a
1062 * node that would happen if we migrated incoming memory to a node
1063 * before migrating outgoing memory source that same node.
1065 * A single scan of tmp is sufficient. As we go, we remember the
1066 * most recent <s, d> pair that moved (s != d). If we find a pair
1067 * that not only moved, but what's better, moved to an empty slot
1068 * (d is not set in tmp), then we break out then, with that pair.
1069 * Otherwise when we finish scanning from_tmp, we at least have the
1070 * most recent <s, d> pair that moved. If we get all the way through
1071 * the scan of tmp without finding any node that moved, much less
1072 * moved to an empty node, then there is nothing left worth migrating.
1076 while (!nodes_empty(tmp)) {
1078 int source = NUMA_NO_NODE;
1081 for_each_node_mask(s, tmp) {
1084 * do_migrate_pages() tries to maintain the relative
1085 * node relationship of the pages established between
1086 * threads and memory areas.
1088 * However if the number of source nodes is not equal to
1089 * the number of destination nodes we can not preserve
1090 * this node relative relationship. In that case, skip
1091 * copying memory from a node that is in the destination
1094 * Example: [2,3,4] -> [3,4,5] moves everything.
1095 * [0-7] - > [3,4,5] moves only 0,1,2,6,7.
1098 if ((nodes_weight(*from) != nodes_weight(*to)) &&
1099 (node_isset(s, *to)))
1102 d = node_remap(s, *from, *to);
1106 source = s; /* Node moved. Memorize */
1109 /* dest not in remaining from nodes? */
1110 if (!node_isset(dest, tmp))
1113 if (source == NUMA_NO_NODE)
1116 node_clear(source, tmp);
1117 err = migrate_to_node(mm, source, dest, flags);
1123 up_read(&mm->mmap_sem);
1131 * Allocate a new page for page migration based on vma policy.
1132 * Start by assuming the page is mapped by the same vma as contains @start.
1133 * Search forward from there, if not. N.B., this assumes that the
1134 * list of pages handed to migrate_pages()--which is how we get here--
1135 * is in virtual address order.
1137 static struct page *new_page(struct page *page, unsigned long start, int **x)
1139 struct vm_area_struct *vma;
1140 unsigned long uninitialized_var(address);
1142 vma = find_vma(current->mm, start);
1144 address = page_address_in_vma(page, vma);
1145 if (address != -EFAULT)
1150 if (PageHuge(page)) {
1152 return alloc_huge_page_noerr(vma, address, 1);
1155 * if !vma, alloc_page_vma() will use task or system default policy
1157 return alloc_page_vma(GFP_HIGHUSER_MOVABLE, vma, address);
1161 static void migrate_page_add(struct page *page, struct list_head *pagelist,
1162 unsigned long flags)
1166 int do_migrate_pages(struct mm_struct *mm, const nodemask_t *from,
1167 const nodemask_t *to, int flags)
1172 static struct page *new_page(struct page *page, unsigned long start, int **x)
1178 static long do_mbind(unsigned long start, unsigned long len,
1179 unsigned short mode, unsigned short mode_flags,
1180 nodemask_t *nmask, unsigned long flags)
1182 struct mm_struct *mm = current->mm;
1183 struct mempolicy *new;
1186 LIST_HEAD(pagelist);
1188 if (flags & ~(unsigned long)MPOL_MF_VALID)
1190 if ((flags & MPOL_MF_MOVE_ALL) && !capable(CAP_SYS_NICE))
1193 if (start & ~PAGE_MASK)
1196 if (mode == MPOL_DEFAULT)
1197 flags &= ~MPOL_MF_STRICT;
1199 len = (len + PAGE_SIZE - 1) & PAGE_MASK;
1207 new = mpol_new(mode, mode_flags, nmask);
1209 return PTR_ERR(new);
1211 if (flags & MPOL_MF_LAZY)
1212 new->flags |= MPOL_F_MOF;
1215 * If we are using the default policy then operation
1216 * on discontinuous address spaces is okay after all
1219 flags |= MPOL_MF_DISCONTIG_OK;
1221 pr_debug("mbind %lx-%lx mode:%d flags:%d nodes:%lx\n",
1222 start, start + len, mode, mode_flags,
1223 nmask ? nodes_addr(*nmask)[0] : NUMA_NO_NODE);
1225 if (flags & (MPOL_MF_MOVE | MPOL_MF_MOVE_ALL)) {
1227 err = migrate_prep();
1232 NODEMASK_SCRATCH(scratch);
1234 down_write(&mm->mmap_sem);
1236 err = mpol_set_nodemask(new, nmask, scratch);
1237 task_unlock(current);
1239 up_write(&mm->mmap_sem);
1242 NODEMASK_SCRATCH_FREE(scratch);
1247 err = queue_pages_range(mm, start, end, nmask,
1248 flags | MPOL_MF_INVERT, &pagelist);
1250 err = mbind_range(mm, start, end, new);
1255 if (!list_empty(&pagelist)) {
1256 WARN_ON_ONCE(flags & MPOL_MF_LAZY);
1257 nr_failed = migrate_pages(&pagelist, new_page, NULL,
1258 start, MIGRATE_SYNC, MR_MEMPOLICY_MBIND);
1260 putback_movable_pages(&pagelist);
1263 if (nr_failed && (flags & MPOL_MF_STRICT))
1266 putback_movable_pages(&pagelist);
1268 up_write(&mm->mmap_sem);
1275 * User space interface with variable sized bitmaps for nodelists.
1278 /* Copy a node mask from user space. */
1279 static int get_nodes(nodemask_t *nodes, const unsigned long __user *nmask,
1280 unsigned long maxnode)
1283 unsigned long nlongs;
1284 unsigned long endmask;
1287 nodes_clear(*nodes);
1288 if (maxnode == 0 || !nmask)
1290 if (maxnode > PAGE_SIZE*BITS_PER_BYTE)
1293 nlongs = BITS_TO_LONGS(maxnode);
1294 if ((maxnode % BITS_PER_LONG) == 0)
1297 endmask = (1UL << (maxnode % BITS_PER_LONG)) - 1;
1299 /* When the user specified more nodes than supported just check
1300 if the non supported part is all zero. */
1301 if (nlongs > BITS_TO_LONGS(MAX_NUMNODES)) {
1302 if (nlongs > PAGE_SIZE/sizeof(long))
1304 for (k = BITS_TO_LONGS(MAX_NUMNODES); k < nlongs; k++) {
1306 if (get_user(t, nmask + k))
1308 if (k == nlongs - 1) {
1314 nlongs = BITS_TO_LONGS(MAX_NUMNODES);
1318 if (copy_from_user(nodes_addr(*nodes), nmask, nlongs*sizeof(unsigned long)))
1320 nodes_addr(*nodes)[nlongs-1] &= endmask;
1324 /* Copy a kernel node mask to user space */
1325 static int copy_nodes_to_user(unsigned long __user *mask, unsigned long maxnode,
1328 unsigned long copy = ALIGN(maxnode-1, 64) / 8;
1329 const int nbytes = BITS_TO_LONGS(MAX_NUMNODES) * sizeof(long);
1331 if (copy > nbytes) {
1332 if (copy > PAGE_SIZE)
1334 if (clear_user((char __user *)mask + nbytes, copy - nbytes))
1338 return copy_to_user(mask, nodes_addr(*nodes), copy) ? -EFAULT : 0;
1341 SYSCALL_DEFINE6(mbind, unsigned long, start, unsigned long, len,
1342 unsigned long, mode, const unsigned long __user *, nmask,
1343 unsigned long, maxnode, unsigned, flags)
1347 unsigned short mode_flags;
1349 mode_flags = mode & MPOL_MODE_FLAGS;
1350 mode &= ~MPOL_MODE_FLAGS;
1351 if (mode >= MPOL_MAX)
1353 if ((mode_flags & MPOL_F_STATIC_NODES) &&
1354 (mode_flags & MPOL_F_RELATIVE_NODES))
1356 err = get_nodes(&nodes, nmask, maxnode);
1359 return do_mbind(start, len, mode, mode_flags, &nodes, flags);
1362 /* Set the process memory policy */
1363 SYSCALL_DEFINE3(set_mempolicy, int, mode, const unsigned long __user *, nmask,
1364 unsigned long, maxnode)
1368 unsigned short flags;
1370 flags = mode & MPOL_MODE_FLAGS;
1371 mode &= ~MPOL_MODE_FLAGS;
1372 if ((unsigned int)mode >= MPOL_MAX)
1374 if ((flags & MPOL_F_STATIC_NODES) && (flags & MPOL_F_RELATIVE_NODES))
1376 err = get_nodes(&nodes, nmask, maxnode);
1379 return do_set_mempolicy(mode, flags, &nodes);
1382 SYSCALL_DEFINE4(migrate_pages, pid_t, pid, unsigned long, maxnode,
1383 const unsigned long __user *, old_nodes,
1384 const unsigned long __user *, new_nodes)
1386 const struct cred *cred = current_cred(), *tcred;
1387 struct mm_struct *mm = NULL;
1388 struct task_struct *task;
1389 nodemask_t task_nodes;
1393 NODEMASK_SCRATCH(scratch);
1398 old = &scratch->mask1;
1399 new = &scratch->mask2;
1401 err = get_nodes(old, old_nodes, maxnode);
1405 err = get_nodes(new, new_nodes, maxnode);
1409 /* Find the mm_struct */
1411 task = pid ? find_task_by_vpid(pid) : current;
1417 get_task_struct(task);
1422 * Check if this process has the right to modify the specified
1423 * process. The right exists if the process has administrative
1424 * capabilities, superuser privileges or the same
1425 * userid as the target process.
1427 tcred = __task_cred(task);
1428 if (!uid_eq(cred->euid, tcred->suid) && !uid_eq(cred->euid, tcred->uid) &&
1429 !uid_eq(cred->uid, tcred->suid) && !uid_eq(cred->uid, tcred->uid) &&
1430 !capable(CAP_SYS_NICE)) {
1437 task_nodes = cpuset_mems_allowed(task);
1438 /* Is the user allowed to access the target nodes? */
1439 if (!nodes_subset(*new, task_nodes) && !capable(CAP_SYS_NICE)) {
1444 if (!nodes_subset(*new, node_states[N_MEMORY])) {
1449 err = security_task_movememory(task);
1453 mm = get_task_mm(task);
1454 put_task_struct(task);
1461 err = do_migrate_pages(mm, old, new,
1462 capable(CAP_SYS_NICE) ? MPOL_MF_MOVE_ALL : MPOL_MF_MOVE);
1466 NODEMASK_SCRATCH_FREE(scratch);
1471 put_task_struct(task);
1477 /* Retrieve NUMA policy */
1478 SYSCALL_DEFINE5(get_mempolicy, int __user *, policy,
1479 unsigned long __user *, nmask, unsigned long, maxnode,
1480 unsigned long, addr, unsigned long, flags)
1483 int uninitialized_var(pval);
1486 if (nmask != NULL && maxnode < MAX_NUMNODES)
1489 err = do_get_mempolicy(&pval, &nodes, addr, flags);
1494 if (policy && put_user(pval, policy))
1498 err = copy_nodes_to_user(nmask, maxnode, &nodes);
1503 #ifdef CONFIG_COMPAT
1505 COMPAT_SYSCALL_DEFINE5(get_mempolicy, int __user *, policy,
1506 compat_ulong_t __user *, nmask,
1507 compat_ulong_t, maxnode,
1508 compat_ulong_t, addr, compat_ulong_t, flags)
1511 unsigned long __user *nm = NULL;
1512 unsigned long nr_bits, alloc_size;
1513 DECLARE_BITMAP(bm, MAX_NUMNODES);
1515 nr_bits = min_t(unsigned long, maxnode-1, MAX_NUMNODES);
1516 alloc_size = ALIGN(nr_bits, BITS_PER_LONG) / 8;
1519 nm = compat_alloc_user_space(alloc_size);
1521 err = sys_get_mempolicy(policy, nm, nr_bits+1, addr, flags);
1523 if (!err && nmask) {
1524 unsigned long copy_size;
1525 copy_size = min_t(unsigned long, sizeof(bm), alloc_size);
1526 err = copy_from_user(bm, nm, copy_size);
1527 /* ensure entire bitmap is zeroed */
1528 err |= clear_user(nmask, ALIGN(maxnode-1, 8) / 8);
1529 err |= compat_put_bitmap(nmask, bm, nr_bits);
1535 COMPAT_SYSCALL_DEFINE3(set_mempolicy, int, mode, compat_ulong_t __user *, nmask,
1536 compat_ulong_t, maxnode)
1539 unsigned long __user *nm = NULL;
1540 unsigned long nr_bits, alloc_size;
1541 DECLARE_BITMAP(bm, MAX_NUMNODES);
1543 nr_bits = min_t(unsigned long, maxnode-1, MAX_NUMNODES);
1544 alloc_size = ALIGN(nr_bits, BITS_PER_LONG) / 8;
1547 err = compat_get_bitmap(bm, nmask, nr_bits);
1548 nm = compat_alloc_user_space(alloc_size);
1549 err |= copy_to_user(nm, bm, alloc_size);
1555 return sys_set_mempolicy(mode, nm, nr_bits+1);
1558 COMPAT_SYSCALL_DEFINE6(mbind, compat_ulong_t, start, compat_ulong_t, len,
1559 compat_ulong_t, mode, compat_ulong_t __user *, nmask,
1560 compat_ulong_t, maxnode, compat_ulong_t, flags)
1563 unsigned long __user *nm = NULL;
1564 unsigned long nr_bits, alloc_size;
1567 nr_bits = min_t(unsigned long, maxnode-1, MAX_NUMNODES);
1568 alloc_size = ALIGN(nr_bits, BITS_PER_LONG) / 8;
1571 err = compat_get_bitmap(nodes_addr(bm), nmask, nr_bits);
1572 nm = compat_alloc_user_space(alloc_size);
1573 err |= copy_to_user(nm, nodes_addr(bm), alloc_size);
1579 return sys_mbind(start, len, mode, nm, nr_bits+1, flags);
1584 struct mempolicy *__get_vma_policy(struct vm_area_struct *vma,
1587 struct mempolicy *pol = NULL;
1590 if (vma->vm_ops && vma->vm_ops->get_policy) {
1591 pol = vma->vm_ops->get_policy(vma, addr);
1592 } else if (vma->vm_policy) {
1593 pol = vma->vm_policy;
1596 * shmem_alloc_page() passes MPOL_F_SHARED policy with
1597 * a pseudo vma whose vma->vm_ops=NULL. Take a reference
1598 * count on these policies which will be dropped by
1599 * mpol_cond_put() later
1601 if (mpol_needs_cond_ref(pol))
1610 * get_vma_policy(@vma, @addr)
1611 * @vma: virtual memory area whose policy is sought
1612 * @addr: address in @vma for shared policy lookup
1614 * Returns effective policy for a VMA at specified address.
1615 * Falls back to current->mempolicy or system default policy, as necessary.
1616 * Shared policies [those marked as MPOL_F_SHARED] require an extra reference
1617 * count--added by the get_policy() vm_op, as appropriate--to protect against
1618 * freeing by another task. It is the caller's responsibility to free the
1619 * extra reference for shared policies.
1621 static struct mempolicy *get_vma_policy(struct vm_area_struct *vma,
1624 struct mempolicy *pol = __get_vma_policy(vma, addr);
1627 pol = get_task_policy(current);
1632 bool vma_policy_mof(struct vm_area_struct *vma)
1634 struct mempolicy *pol;
1636 if (vma->vm_ops && vma->vm_ops->get_policy) {
1639 pol = vma->vm_ops->get_policy(vma, vma->vm_start);
1640 if (pol && (pol->flags & MPOL_F_MOF))
1647 pol = vma->vm_policy;
1649 pol = get_task_policy(current);
1651 return pol->flags & MPOL_F_MOF;
1654 static int apply_policy_zone(struct mempolicy *policy, enum zone_type zone)
1656 enum zone_type dynamic_policy_zone = policy_zone;
1658 BUG_ON(dynamic_policy_zone == ZONE_MOVABLE);
1661 * if policy->v.nodes has movable memory only,
1662 * we apply policy when gfp_zone(gfp) = ZONE_MOVABLE only.
1664 * policy->v.nodes is intersect with node_states[N_MEMORY].
1665 * so if the following test faile, it implies
1666 * policy->v.nodes has movable memory only.
1668 if (!nodes_intersects(policy->v.nodes, node_states[N_HIGH_MEMORY]))
1669 dynamic_policy_zone = ZONE_MOVABLE;
1671 return zone >= dynamic_policy_zone;
1675 * Return a nodemask representing a mempolicy for filtering nodes for
1678 static nodemask_t *policy_nodemask(gfp_t gfp, struct mempolicy *policy)
1680 /* Lower zones don't get a nodemask applied for MPOL_BIND */
1681 if (unlikely(policy->mode == MPOL_BIND) &&
1682 apply_policy_zone(policy, gfp_zone(gfp)) &&
1683 cpuset_nodemask_valid_mems_allowed(&policy->v.nodes))
1684 return &policy->v.nodes;
1689 /* Return a zonelist indicated by gfp for node representing a mempolicy */
1690 static struct zonelist *policy_zonelist(gfp_t gfp, struct mempolicy *policy,
1693 switch (policy->mode) {
1694 case MPOL_PREFERRED:
1695 if (!(policy->flags & MPOL_F_LOCAL))
1696 nd = policy->v.preferred_node;
1700 * Normally, MPOL_BIND allocations are node-local within the
1701 * allowed nodemask. However, if __GFP_THISNODE is set and the
1702 * current node isn't part of the mask, we use the zonelist for
1703 * the first node in the mask instead.
1705 if (unlikely(gfp & __GFP_THISNODE) &&
1706 unlikely(!node_isset(nd, policy->v.nodes)))
1707 nd = first_node(policy->v.nodes);
1712 return node_zonelist(nd, gfp);
1715 /* Do dynamic interleaving for a process */
1716 static unsigned interleave_nodes(struct mempolicy *policy)
1719 struct task_struct *me = current;
1722 next = next_node(nid, policy->v.nodes);
1723 if (next >= MAX_NUMNODES)
1724 next = first_node(policy->v.nodes);
1725 if (next < MAX_NUMNODES)
1731 * Depending on the memory policy provide a node from which to allocate the
1734 unsigned int mempolicy_slab_node(void)
1736 struct mempolicy *policy;
1737 int node = numa_mem_id();
1742 policy = current->mempolicy;
1743 if (!policy || policy->flags & MPOL_F_LOCAL)
1746 switch (policy->mode) {
1747 case MPOL_PREFERRED:
1749 * handled MPOL_F_LOCAL above
1751 return policy->v.preferred_node;
1753 case MPOL_INTERLEAVE:
1754 return interleave_nodes(policy);
1758 * Follow bind policy behavior and start allocation at the
1761 struct zonelist *zonelist;
1763 enum zone_type highest_zoneidx = gfp_zone(GFP_KERNEL);
1764 zonelist = &NODE_DATA(node)->node_zonelists[0];
1765 (void)first_zones_zonelist(zonelist, highest_zoneidx,
1768 return zone ? zone->node : node;
1776 /* Do static interleaving for a VMA with known offset. */
1777 static unsigned offset_il_node(struct mempolicy *pol,
1778 struct vm_area_struct *vma, unsigned long off)
1780 unsigned nnodes = nodes_weight(pol->v.nodes);
1783 int nid = NUMA_NO_NODE;
1786 return numa_node_id();
1787 target = (unsigned int)off % nnodes;
1790 nid = next_node(nid, pol->v.nodes);
1792 } while (c <= target);
1796 /* Determine a node number for interleave */
1797 static inline unsigned interleave_nid(struct mempolicy *pol,
1798 struct vm_area_struct *vma, unsigned long addr, int shift)
1804 * for small pages, there is no difference between
1805 * shift and PAGE_SHIFT, so the bit-shift is safe.
1806 * for huge pages, since vm_pgoff is in units of small
1807 * pages, we need to shift off the always 0 bits to get
1810 BUG_ON(shift < PAGE_SHIFT);
1811 off = vma->vm_pgoff >> (shift - PAGE_SHIFT);
1812 off += (addr - vma->vm_start) >> shift;
1813 return offset_il_node(pol, vma, off);
1815 return interleave_nodes(pol);
1819 * Return the bit number of a random bit set in the nodemask.
1820 * (returns NUMA_NO_NODE if nodemask is empty)
1822 int node_random(const nodemask_t *maskp)
1824 int w, bit = NUMA_NO_NODE;
1826 w = nodes_weight(*maskp);
1828 bit = bitmap_ord_to_pos(maskp->bits,
1829 get_random_int() % w, MAX_NUMNODES);
1833 #ifdef CONFIG_HUGETLBFS
1835 * huge_zonelist(@vma, @addr, @gfp_flags, @mpol)
1836 * @vma: virtual memory area whose policy is sought
1837 * @addr: address in @vma for shared policy lookup and interleave policy
1838 * @gfp_flags: for requested zone
1839 * @mpol: pointer to mempolicy pointer for reference counted mempolicy
1840 * @nodemask: pointer to nodemask pointer for MPOL_BIND nodemask
1842 * Returns a zonelist suitable for a huge page allocation and a pointer
1843 * to the struct mempolicy for conditional unref after allocation.
1844 * If the effective policy is 'BIND, returns a pointer to the mempolicy's
1845 * @nodemask for filtering the zonelist.
1847 * Must be protected by read_mems_allowed_begin()
1849 struct zonelist *huge_zonelist(struct vm_area_struct *vma, unsigned long addr,
1850 gfp_t gfp_flags, struct mempolicy **mpol,
1851 nodemask_t **nodemask)
1853 struct zonelist *zl;
1855 *mpol = get_vma_policy(vma, addr);
1856 *nodemask = NULL; /* assume !MPOL_BIND */
1858 if (unlikely((*mpol)->mode == MPOL_INTERLEAVE)) {
1859 zl = node_zonelist(interleave_nid(*mpol, vma, addr,
1860 huge_page_shift(hstate_vma(vma))), gfp_flags);
1862 zl = policy_zonelist(gfp_flags, *mpol, numa_node_id());
1863 if ((*mpol)->mode == MPOL_BIND)
1864 *nodemask = &(*mpol)->v.nodes;
1870 * init_nodemask_of_mempolicy
1872 * If the current task's mempolicy is "default" [NULL], return 'false'
1873 * to indicate default policy. Otherwise, extract the policy nodemask
1874 * for 'bind' or 'interleave' policy into the argument nodemask, or
1875 * initialize the argument nodemask to contain the single node for
1876 * 'preferred' or 'local' policy and return 'true' to indicate presence
1877 * of non-default mempolicy.
1879 * We don't bother with reference counting the mempolicy [mpol_get/put]
1880 * because the current task is examining it's own mempolicy and a task's
1881 * mempolicy is only ever changed by the task itself.
1883 * N.B., it is the caller's responsibility to free a returned nodemask.
1885 bool init_nodemask_of_mempolicy(nodemask_t *mask)
1887 struct mempolicy *mempolicy;
1890 if (!(mask && current->mempolicy))
1894 mempolicy = current->mempolicy;
1895 switch (mempolicy->mode) {
1896 case MPOL_PREFERRED:
1897 if (mempolicy->flags & MPOL_F_LOCAL)
1898 nid = numa_node_id();
1900 nid = mempolicy->v.preferred_node;
1901 init_nodemask_of_node(mask, nid);
1906 case MPOL_INTERLEAVE:
1907 *mask = mempolicy->v.nodes;
1913 task_unlock(current);
1920 * mempolicy_nodemask_intersects
1922 * If tsk's mempolicy is "default" [NULL], return 'true' to indicate default
1923 * policy. Otherwise, check for intersection between mask and the policy
1924 * nodemask for 'bind' or 'interleave' policy. For 'perferred' or 'local'
1925 * policy, always return true since it may allocate elsewhere on fallback.
1927 * Takes task_lock(tsk) to prevent freeing of its mempolicy.
1929 bool mempolicy_nodemask_intersects(struct task_struct *tsk,
1930 const nodemask_t *mask)
1932 struct mempolicy *mempolicy;
1938 mempolicy = tsk->mempolicy;
1942 switch (mempolicy->mode) {
1943 case MPOL_PREFERRED:
1945 * MPOL_PREFERRED and MPOL_F_LOCAL are only preferred nodes to
1946 * allocate from, they may fallback to other nodes when oom.
1947 * Thus, it's possible for tsk to have allocated memory from
1952 case MPOL_INTERLEAVE:
1953 ret = nodes_intersects(mempolicy->v.nodes, *mask);
1963 /* Allocate a page in interleaved policy.
1964 Own path because it needs to do special accounting. */
1965 static struct page *alloc_page_interleave(gfp_t gfp, unsigned order,
1968 struct zonelist *zl;
1971 zl = node_zonelist(nid, gfp);
1972 page = __alloc_pages(gfp, order, zl);
1973 if (page && page_zone(page) == zonelist_zone(&zl->_zonerefs[0]))
1974 inc_zone_page_state(page, NUMA_INTERLEAVE_HIT);
1979 * alloc_pages_vma - Allocate a page for a VMA.
1982 * %GFP_USER user allocation.
1983 * %GFP_KERNEL kernel allocations,
1984 * %GFP_HIGHMEM highmem/user allocations,
1985 * %GFP_FS allocation should not call back into a file system.
1986 * %GFP_ATOMIC don't sleep.
1988 * @order:Order of the GFP allocation.
1989 * @vma: Pointer to VMA or NULL if not available.
1990 * @addr: Virtual Address of the allocation. Must be inside the VMA.
1991 * @node: Which node to prefer for allocation (modulo policy).
1992 * @hugepage: for hugepages try only the preferred node if possible
1994 * This function allocates a page from the kernel page pool and applies
1995 * a NUMA policy associated with the VMA or the current process.
1996 * When VMA is not NULL caller must hold down_read on the mmap_sem of the
1997 * mm_struct of the VMA to prevent it from going away. Should be used for
1998 * all allocations for pages that will be mapped into user space. Returns
1999 * NULL when no page can be allocated.
2002 alloc_pages_vma(gfp_t gfp, int order, struct vm_area_struct *vma,
2003 unsigned long addr, int node, bool hugepage)
2005 struct mempolicy *pol;
2007 unsigned int cpuset_mems_cookie;
2008 struct zonelist *zl;
2012 pol = get_vma_policy(vma, addr);
2013 cpuset_mems_cookie = read_mems_allowed_begin();
2015 if (unlikely(IS_ENABLED(CONFIG_TRANSPARENT_HUGEPAGE) && hugepage &&
2016 pol->mode != MPOL_INTERLEAVE)) {
2018 * For hugepage allocation and non-interleave policy which
2019 * allows the current node, we only try to allocate from the
2020 * current node and don't fall back to other nodes, as the
2021 * cost of remote accesses would likely offset THP benefits.
2023 * If the policy is interleave, or does not allow the current
2024 * node in its nodemask, we allocate the standard way.
2026 nmask = policy_nodemask(gfp, pol);
2027 if (!nmask || node_isset(node, *nmask)) {
2029 page = alloc_pages_exact_node(node, gfp, order);
2034 if (pol->mode == MPOL_INTERLEAVE) {
2037 nid = interleave_nid(pol, vma, addr, PAGE_SHIFT + order);
2039 page = alloc_page_interleave(gfp, order, nid);
2043 nmask = policy_nodemask(gfp, pol);
2044 zl = policy_zonelist(gfp, pol, node);
2046 page = __alloc_pages_nodemask(gfp, order, zl, nmask);
2048 if (unlikely(!page && read_mems_allowed_retry(cpuset_mems_cookie)))
2054 * alloc_pages_current - Allocate pages.
2057 * %GFP_USER user allocation,
2058 * %GFP_KERNEL kernel allocation,
2059 * %GFP_HIGHMEM highmem allocation,
2060 * %GFP_FS don't call back into a file system.
2061 * %GFP_ATOMIC don't sleep.
2062 * @order: Power of two of allocation size in pages. 0 is a single page.
2064 * Allocate a page from the kernel page pool. When not in
2065 * interrupt context and apply the current process NUMA policy.
2066 * Returns NULL when no page can be allocated.
2068 * Don't call cpuset_update_task_memory_state() unless
2069 * 1) it's ok to take cpuset_sem (can WAIT), and
2070 * 2) allocating for current task (not interrupt).
2072 struct page *alloc_pages_current(gfp_t gfp, unsigned order)
2074 struct mempolicy *pol = &default_policy;
2076 unsigned int cpuset_mems_cookie;
2078 if (!in_interrupt() && !(gfp & __GFP_THISNODE))
2079 pol = get_task_policy(current);
2082 cpuset_mems_cookie = read_mems_allowed_begin();
2085 * No reference counting needed for current->mempolicy
2086 * nor system default_policy
2088 if (pol->mode == MPOL_INTERLEAVE)
2089 page = alloc_page_interleave(gfp, order, interleave_nodes(pol));
2091 page = __alloc_pages_nodemask(gfp, order,
2092 policy_zonelist(gfp, pol, numa_node_id()),
2093 policy_nodemask(gfp, pol));
2095 if (unlikely(!page && read_mems_allowed_retry(cpuset_mems_cookie)))
2100 EXPORT_SYMBOL(alloc_pages_current);
2102 int vma_dup_policy(struct vm_area_struct *src, struct vm_area_struct *dst)
2104 struct mempolicy *pol = mpol_dup(vma_policy(src));
2107 return PTR_ERR(pol);
2108 dst->vm_policy = pol;
2113 * If mpol_dup() sees current->cpuset == cpuset_being_rebound, then it
2114 * rebinds the mempolicy its copying by calling mpol_rebind_policy()
2115 * with the mems_allowed returned by cpuset_mems_allowed(). This
2116 * keeps mempolicies cpuset relative after its cpuset moves. See
2117 * further kernel/cpuset.c update_nodemask().
2119 * current's mempolicy may be rebinded by the other task(the task that changes
2120 * cpuset's mems), so we needn't do rebind work for current task.
2123 /* Slow path of a mempolicy duplicate */
2124 struct mempolicy *__mpol_dup(struct mempolicy *old)
2126 struct mempolicy *new = kmem_cache_alloc(policy_cache, GFP_KERNEL);
2129 return ERR_PTR(-ENOMEM);
2131 /* task's mempolicy is protected by alloc_lock */
2132 if (old == current->mempolicy) {
2135 task_unlock(current);
2139 if (current_cpuset_is_being_rebound()) {
2140 nodemask_t mems = cpuset_mems_allowed(current);
2141 if (new->flags & MPOL_F_REBINDING)
2142 mpol_rebind_policy(new, &mems, MPOL_REBIND_STEP2);
2144 mpol_rebind_policy(new, &mems, MPOL_REBIND_ONCE);
2146 atomic_set(&new->refcnt, 1);
2150 /* Slow path of a mempolicy comparison */
2151 bool __mpol_equal(struct mempolicy *a, struct mempolicy *b)
2155 if (a->mode != b->mode)
2157 if (a->flags != b->flags)
2159 if (mpol_store_user_nodemask(a))
2160 if (!nodes_equal(a->w.user_nodemask, b->w.user_nodemask))
2166 case MPOL_INTERLEAVE:
2167 return !!nodes_equal(a->v.nodes, b->v.nodes);
2168 case MPOL_PREFERRED:
2169 return a->v.preferred_node == b->v.preferred_node;
2177 * Shared memory backing store policy support.
2179 * Remember policies even when nobody has shared memory mapped.
2180 * The policies are kept in Red-Black tree linked from the inode.
2181 * They are protected by the sp->lock spinlock, which should be held
2182 * for any accesses to the tree.
2185 /* lookup first element intersecting start-end */
2186 /* Caller holds sp->lock */
2187 static struct sp_node *
2188 sp_lookup(struct shared_policy *sp, unsigned long start, unsigned long end)
2190 struct rb_node *n = sp->root.rb_node;
2193 struct sp_node *p = rb_entry(n, struct sp_node, nd);
2195 if (start >= p->end)
2197 else if (end <= p->start)
2205 struct sp_node *w = NULL;
2206 struct rb_node *prev = rb_prev(n);
2209 w = rb_entry(prev, struct sp_node, nd);
2210 if (w->end <= start)
2214 return rb_entry(n, struct sp_node, nd);
2217 /* Insert a new shared policy into the list. */
2218 /* Caller holds sp->lock */
2219 static void sp_insert(struct shared_policy *sp, struct sp_node *new)
2221 struct rb_node **p = &sp->root.rb_node;
2222 struct rb_node *parent = NULL;
2227 nd = rb_entry(parent, struct sp_node, nd);
2228 if (new->start < nd->start)
2230 else if (new->end > nd->end)
2231 p = &(*p)->rb_right;
2235 rb_link_node(&new->nd, parent, p);
2236 rb_insert_color(&new->nd, &sp->root);
2237 pr_debug("inserting %lx-%lx: %d\n", new->start, new->end,
2238 new->policy ? new->policy->mode : 0);
2241 /* Find shared policy intersecting idx */
2243 mpol_shared_policy_lookup(struct shared_policy *sp, unsigned long idx)
2245 struct mempolicy *pol = NULL;
2248 if (!sp->root.rb_node)
2250 spin_lock(&sp->lock);
2251 sn = sp_lookup(sp, idx, idx+1);
2253 mpol_get(sn->policy);
2256 spin_unlock(&sp->lock);
2260 static void sp_free(struct sp_node *n)
2262 mpol_put(n->policy);
2263 kmem_cache_free(sn_cache, n);
2267 * mpol_misplaced - check whether current page node is valid in policy
2269 * @page: page to be checked
2270 * @vma: vm area where page mapped
2271 * @addr: virtual address where page mapped
2273 * Lookup current policy node id for vma,addr and "compare to" page's
2277 * -1 - not misplaced, page is in the right node
2278 * node - node id where the page should be
2280 * Policy determination "mimics" alloc_page_vma().
2281 * Called from fault path where we know the vma and faulting address.
2283 int mpol_misplaced(struct page *page, struct vm_area_struct *vma, unsigned long addr)
2285 struct mempolicy *pol;
2287 int curnid = page_to_nid(page);
2288 unsigned long pgoff;
2289 int thiscpu = raw_smp_processor_id();
2290 int thisnid = cpu_to_node(thiscpu);
2296 pol = get_vma_policy(vma, addr);
2297 if (!(pol->flags & MPOL_F_MOF))
2300 switch (pol->mode) {
2301 case MPOL_INTERLEAVE:
2302 BUG_ON(addr >= vma->vm_end);
2303 BUG_ON(addr < vma->vm_start);
2305 pgoff = vma->vm_pgoff;
2306 pgoff += (addr - vma->vm_start) >> PAGE_SHIFT;
2307 polnid = offset_il_node(pol, vma, pgoff);
2310 case MPOL_PREFERRED:
2311 if (pol->flags & MPOL_F_LOCAL)
2312 polnid = numa_node_id();
2314 polnid = pol->v.preferred_node;
2319 * allows binding to multiple nodes.
2320 * use current page if in policy nodemask,
2321 * else select nearest allowed node, if any.
2322 * If no allowed nodes, use current [!misplaced].
2324 if (node_isset(curnid, pol->v.nodes))
2326 (void)first_zones_zonelist(
2327 node_zonelist(numa_node_id(), GFP_HIGHUSER),
2328 gfp_zone(GFP_HIGHUSER),
2329 &pol->v.nodes, &zone);
2330 polnid = zone->node;
2337 /* Migrate the page towards the node whose CPU is referencing it */
2338 if (pol->flags & MPOL_F_MORON) {
2341 if (!should_numa_migrate_memory(current, page, curnid, thiscpu))
2345 if (curnid != polnid)
2353 static void sp_delete(struct shared_policy *sp, struct sp_node *n)
2355 pr_debug("deleting %lx-l%lx\n", n->start, n->end);
2356 rb_erase(&n->nd, &sp->root);
2360 static void sp_node_init(struct sp_node *node, unsigned long start,
2361 unsigned long end, struct mempolicy *pol)
2363 node->start = start;
2368 static struct sp_node *sp_alloc(unsigned long start, unsigned long end,
2369 struct mempolicy *pol)
2372 struct mempolicy *newpol;
2374 n = kmem_cache_alloc(sn_cache, GFP_KERNEL);
2378 newpol = mpol_dup(pol);
2379 if (IS_ERR(newpol)) {
2380 kmem_cache_free(sn_cache, n);
2383 newpol->flags |= MPOL_F_SHARED;
2384 sp_node_init(n, start, end, newpol);
2389 /* Replace a policy range. */
2390 static int shared_policy_replace(struct shared_policy *sp, unsigned long start,
2391 unsigned long end, struct sp_node *new)
2394 struct sp_node *n_new = NULL;
2395 struct mempolicy *mpol_new = NULL;
2399 spin_lock(&sp->lock);
2400 n = sp_lookup(sp, start, end);
2401 /* Take care of old policies in the same range. */
2402 while (n && n->start < end) {
2403 struct rb_node *next = rb_next(&n->nd);
2404 if (n->start >= start) {
2410 /* Old policy spanning whole new range. */
2415 *mpol_new = *n->policy;
2416 atomic_set(&mpol_new->refcnt, 1);
2417 sp_node_init(n_new, end, n->end, mpol_new);
2419 sp_insert(sp, n_new);
2428 n = rb_entry(next, struct sp_node, nd);
2432 spin_unlock(&sp->lock);
2439 kmem_cache_free(sn_cache, n_new);
2444 spin_unlock(&sp->lock);
2446 n_new = kmem_cache_alloc(sn_cache, GFP_KERNEL);
2449 mpol_new = kmem_cache_alloc(policy_cache, GFP_KERNEL);
2456 * mpol_shared_policy_init - initialize shared policy for inode
2457 * @sp: pointer to inode shared policy
2458 * @mpol: struct mempolicy to install
2460 * Install non-NULL @mpol in inode's shared policy rb-tree.
2461 * On entry, the current task has a reference on a non-NULL @mpol.
2462 * This must be released on exit.
2463 * This is called at get_inode() calls and we can use GFP_KERNEL.
2465 void mpol_shared_policy_init(struct shared_policy *sp, struct mempolicy *mpol)
2469 sp->root = RB_ROOT; /* empty tree == default mempolicy */
2470 spin_lock_init(&sp->lock);
2473 struct vm_area_struct pvma;
2474 struct mempolicy *new;
2475 NODEMASK_SCRATCH(scratch);
2479 /* contextualize the tmpfs mount point mempolicy */
2480 new = mpol_new(mpol->mode, mpol->flags, &mpol->w.user_nodemask);
2482 goto free_scratch; /* no valid nodemask intersection */
2485 ret = mpol_set_nodemask(new, &mpol->w.user_nodemask, scratch);
2486 task_unlock(current);
2490 /* Create pseudo-vma that contains just the policy */
2491 memset(&pvma, 0, sizeof(struct vm_area_struct));
2492 pvma.vm_end = TASK_SIZE; /* policy covers entire file */
2493 mpol_set_shared_policy(sp, &pvma, new); /* adds ref */
2496 mpol_put(new); /* drop initial ref */
2498 NODEMASK_SCRATCH_FREE(scratch);
2500 mpol_put(mpol); /* drop our incoming ref on sb mpol */
2504 int mpol_set_shared_policy(struct shared_policy *info,
2505 struct vm_area_struct *vma, struct mempolicy *npol)
2508 struct sp_node *new = NULL;
2509 unsigned long sz = vma_pages(vma);
2511 pr_debug("set_shared_policy %lx sz %lu %d %d %lx\n",
2513 sz, npol ? npol->mode : -1,
2514 npol ? npol->flags : -1,
2515 npol ? nodes_addr(npol->v.nodes)[0] : NUMA_NO_NODE);
2518 new = sp_alloc(vma->vm_pgoff, vma->vm_pgoff + sz, npol);
2522 err = shared_policy_replace(info, vma->vm_pgoff, vma->vm_pgoff+sz, new);
2528 /* Free a backing policy store on inode delete. */
2529 void mpol_free_shared_policy(struct shared_policy *p)
2532 struct rb_node *next;
2534 if (!p->root.rb_node)
2536 spin_lock(&p->lock);
2537 next = rb_first(&p->root);
2539 n = rb_entry(next, struct sp_node, nd);
2540 next = rb_next(&n->nd);
2543 spin_unlock(&p->lock);
2546 #ifdef CONFIG_NUMA_BALANCING
2547 static int __initdata numabalancing_override;
2549 static void __init check_numabalancing_enable(void)
2551 bool numabalancing_default = false;
2553 if (IS_ENABLED(CONFIG_NUMA_BALANCING_DEFAULT_ENABLED))
2554 numabalancing_default = true;
2556 /* Parsed by setup_numabalancing. override == 1 enables, -1 disables */
2557 if (numabalancing_override)
2558 set_numabalancing_state(numabalancing_override == 1);
2560 if (nr_node_ids > 1 && !numabalancing_override) {
2561 pr_info("%s automatic NUMA balancing. "
2562 "Configure with numa_balancing= or the "
2563 "kernel.numa_balancing sysctl",
2564 numabalancing_default ? "Enabling" : "Disabling");
2565 set_numabalancing_state(numabalancing_default);
2569 static int __init setup_numabalancing(char *str)
2575 if (!strcmp(str, "enable")) {
2576 numabalancing_override = 1;
2578 } else if (!strcmp(str, "disable")) {
2579 numabalancing_override = -1;
2584 pr_warn("Unable to parse numa_balancing=\n");
2588 __setup("numa_balancing=", setup_numabalancing);
2590 static inline void __init check_numabalancing_enable(void)
2593 #endif /* CONFIG_NUMA_BALANCING */
2595 /* assumes fs == KERNEL_DS */
2596 void __init numa_policy_init(void)
2598 nodemask_t interleave_nodes;
2599 unsigned long largest = 0;
2600 int nid, prefer = 0;
2602 policy_cache = kmem_cache_create("numa_policy",
2603 sizeof(struct mempolicy),
2604 0, SLAB_PANIC, NULL);
2606 sn_cache = kmem_cache_create("shared_policy_node",
2607 sizeof(struct sp_node),
2608 0, SLAB_PANIC, NULL);
2610 for_each_node(nid) {
2611 preferred_node_policy[nid] = (struct mempolicy) {
2612 .refcnt = ATOMIC_INIT(1),
2613 .mode = MPOL_PREFERRED,
2614 .flags = MPOL_F_MOF | MPOL_F_MORON,
2615 .v = { .preferred_node = nid, },
2620 * Set interleaving policy for system init. Interleaving is only
2621 * enabled across suitably sized nodes (default is >= 16MB), or
2622 * fall back to the largest node if they're all smaller.
2624 nodes_clear(interleave_nodes);
2625 for_each_node_state(nid, N_MEMORY) {
2626 unsigned long total_pages = node_present_pages(nid);
2628 /* Preserve the largest node */
2629 if (largest < total_pages) {
2630 largest = total_pages;
2634 /* Interleave this node? */
2635 if ((total_pages << PAGE_SHIFT) >= (16 << 20))
2636 node_set(nid, interleave_nodes);
2639 /* All too small, use the largest */
2640 if (unlikely(nodes_empty(interleave_nodes)))
2641 node_set(prefer, interleave_nodes);
2643 if (do_set_mempolicy(MPOL_INTERLEAVE, 0, &interleave_nodes))
2644 pr_err("%s: interleaving failed\n", __func__);
2646 check_numabalancing_enable();
2649 /* Reset policy of current process to default */
2650 void numa_default_policy(void)
2652 do_set_mempolicy(MPOL_DEFAULT, 0, NULL);
2656 * Parse and format mempolicy from/to strings
2660 * "local" is implemented internally by MPOL_PREFERRED with MPOL_F_LOCAL flag.
2662 static const char * const policy_modes[] =
2664 [MPOL_DEFAULT] = "default",
2665 [MPOL_PREFERRED] = "prefer",
2666 [MPOL_BIND] = "bind",
2667 [MPOL_INTERLEAVE] = "interleave",
2668 [MPOL_LOCAL] = "local",
2674 * mpol_parse_str - parse string to mempolicy, for tmpfs mpol mount option.
2675 * @str: string containing mempolicy to parse
2676 * @mpol: pointer to struct mempolicy pointer, returned on success.
2679 * <mode>[=<flags>][:<nodelist>]
2681 * On success, returns 0, else 1
2683 int mpol_parse_str(char *str, struct mempolicy **mpol)
2685 struct mempolicy *new = NULL;
2686 unsigned short mode;
2687 unsigned short mode_flags;
2689 char *nodelist = strchr(str, ':');
2690 char *flags = strchr(str, '=');
2694 /* NUL-terminate mode or flags string */
2696 if (nodelist_parse(nodelist, nodes))
2698 if (!nodes_subset(nodes, node_states[N_MEMORY]))
2704 *flags++ = '\0'; /* terminate mode string */
2706 for (mode = 0; mode < MPOL_MAX; mode++) {
2707 if (!strcmp(str, policy_modes[mode])) {
2711 if (mode >= MPOL_MAX)
2715 case MPOL_PREFERRED:
2717 * Insist on a nodelist of one node only
2720 char *rest = nodelist;
2721 while (isdigit(*rest))
2727 case MPOL_INTERLEAVE:
2729 * Default to online nodes with memory if no nodelist
2732 nodes = node_states[N_MEMORY];
2736 * Don't allow a nodelist; mpol_new() checks flags
2740 mode = MPOL_PREFERRED;
2744 * Insist on a empty nodelist
2751 * Insist on a nodelist
2760 * Currently, we only support two mutually exclusive
2763 if (!strcmp(flags, "static"))
2764 mode_flags |= MPOL_F_STATIC_NODES;
2765 else if (!strcmp(flags, "relative"))
2766 mode_flags |= MPOL_F_RELATIVE_NODES;
2771 new = mpol_new(mode, mode_flags, &nodes);
2776 * Save nodes for mpol_to_str() to show the tmpfs mount options
2777 * for /proc/mounts, /proc/pid/mounts and /proc/pid/mountinfo.
2779 if (mode != MPOL_PREFERRED)
2780 new->v.nodes = nodes;
2782 new->v.preferred_node = first_node(nodes);
2784 new->flags |= MPOL_F_LOCAL;
2787 * Save nodes for contextualization: this will be used to "clone"
2788 * the mempolicy in a specific context [cpuset] at a later time.
2790 new->w.user_nodemask = nodes;
2795 /* Restore string for error message */
2804 #endif /* CONFIG_TMPFS */
2807 * mpol_to_str - format a mempolicy structure for printing
2808 * @buffer: to contain formatted mempolicy string
2809 * @maxlen: length of @buffer
2810 * @pol: pointer to mempolicy to be formatted
2812 * Convert @pol into a string. If @buffer is too short, truncate the string.
2813 * Recommend a @maxlen of at least 32 for the longest mode, "interleave", the
2814 * longest flag, "relative", and to display at least a few node ids.
2816 void mpol_to_str(char *buffer, int maxlen, struct mempolicy *pol)
2819 nodemask_t nodes = NODE_MASK_NONE;
2820 unsigned short mode = MPOL_DEFAULT;
2821 unsigned short flags = 0;
2823 if (pol && pol != &default_policy && !(pol->flags & MPOL_F_MORON)) {
2831 case MPOL_PREFERRED:
2832 if (flags & MPOL_F_LOCAL)
2835 node_set(pol->v.preferred_node, nodes);
2838 case MPOL_INTERLEAVE:
2839 nodes = pol->v.nodes;
2843 snprintf(p, maxlen, "unknown");
2847 p += snprintf(p, maxlen, "%s", policy_modes[mode]);
2849 if (flags & MPOL_MODE_FLAGS) {
2850 p += snprintf(p, buffer + maxlen - p, "=");
2853 * Currently, the only defined flags are mutually exclusive
2855 if (flags & MPOL_F_STATIC_NODES)
2856 p += snprintf(p, buffer + maxlen - p, "static");
2857 else if (flags & MPOL_F_RELATIVE_NODES)
2858 p += snprintf(p, buffer + maxlen - p, "relative");
2861 if (!nodes_empty(nodes)) {
2862 p += snprintf(p, buffer + maxlen - p, ":");
2863 p += nodelist_scnprintf(p, buffer + maxlen - p, nodes);