1 #include <linux/kernel.h>
2 #include <linux/errno.h>
4 #include <linux/spinlock.h>
6 #include <linux/hugetlb.h>
8 #include <linux/pagemap.h>
9 #include <linux/rmap.h>
10 #include <linux/swap.h>
11 #include <linux/swapops.h>
13 #include <linux/sched.h>
14 #include <linux/rwsem.h>
15 #include <asm/pgtable.h>
19 static struct page *no_page_table(struct vm_area_struct *vma,
23 * When core dumping an enormous anonymous area that nobody
24 * has touched so far, we don't want to allocate unnecessary pages or
25 * page tables. Return error instead of NULL to skip handle_mm_fault,
26 * then get_dump_page() will return NULL to leave a hole in the dump.
27 * But we can only make this optimization where a hole would surely
28 * be zero-filled if handle_mm_fault() actually did handle it.
30 if ((flags & FOLL_DUMP) && (!vma->vm_ops || !vma->vm_ops->fault))
31 return ERR_PTR(-EFAULT);
35 static struct page *follow_page_pte(struct vm_area_struct *vma,
36 unsigned long address, pmd_t *pmd, unsigned int flags)
38 struct mm_struct *mm = vma->vm_mm;
44 if (unlikely(pmd_bad(*pmd)))
45 return no_page_table(vma, flags);
47 ptep = pte_offset_map_lock(mm, pmd, address, &ptl);
49 if (!pte_present(pte)) {
52 * KSM's break_ksm() relies upon recognizing a ksm page
53 * even while it is being migrated, so for that case we
54 * need migration_entry_wait().
56 if (likely(!(flags & FOLL_MIGRATION)))
58 if (pte_none(pte) || pte_file(pte))
60 entry = pte_to_swp_entry(pte);
61 if (!is_migration_entry(entry))
63 pte_unmap_unlock(ptep, ptl);
64 migration_entry_wait(mm, pmd, address);
67 if ((flags & FOLL_NUMA) && pte_numa(pte))
69 if ((flags & FOLL_WRITE) && !pte_write(pte)) {
70 pte_unmap_unlock(ptep, ptl);
74 page = vm_normal_page(vma, address, pte);
75 if (unlikely(!page)) {
76 if ((flags & FOLL_DUMP) ||
77 !is_zero_pfn(pte_pfn(pte)))
84 if (flags & FOLL_TOUCH) {
85 if ((flags & FOLL_WRITE) &&
86 !pte_dirty(pte) && !PageDirty(page))
89 * pte_mkyoung() would be more correct here, but atomic care
90 * is needed to avoid losing the dirty bit: it is easier to use
91 * mark_page_accessed().
93 mark_page_accessed(page);
95 if ((flags & FOLL_MLOCK) && (vma->vm_flags & VM_LOCKED)) {
97 * The preliminary mapping check is mainly to avoid the
98 * pointless overhead of lock_page on the ZERO_PAGE
99 * which might bounce very badly if there is contention.
101 * If the page is already locked, we don't need to
102 * handle it now - vmscan will handle it later if and
103 * when it attempts to reclaim the page.
105 if (page->mapping && trylock_page(page)) {
106 lru_add_drain(); /* push cached pages to LRU */
108 * Because we lock page here, and migration is
109 * blocked by the pte's page reference, and we
110 * know the page is still mapped, we don't even
111 * need to check for file-cache page truncation.
113 mlock_vma_page(page);
117 pte_unmap_unlock(ptep, ptl);
120 pte_unmap_unlock(ptep, ptl);
121 return ERR_PTR(-EFAULT);
124 pte_unmap_unlock(ptep, ptl);
127 return no_page_table(vma, flags);
131 * follow_page_mask - look up a page descriptor from a user-virtual address
132 * @vma: vm_area_struct mapping @address
133 * @address: virtual address to look up
134 * @flags: flags modifying lookup behaviour
135 * @page_mask: on output, *page_mask is set according to the size of the page
137 * @flags can have FOLL_ flags set, defined in <linux/mm.h>
139 * Returns the mapped (struct page *), %NULL if no mapping exists, or
140 * an error pointer if there is a mapping to something not represented
141 * by a page descriptor (see also vm_normal_page()).
143 struct page *follow_page_mask(struct vm_area_struct *vma,
144 unsigned long address, unsigned int flags,
145 unsigned int *page_mask)
152 struct mm_struct *mm = vma->vm_mm;
156 page = follow_huge_addr(mm, address, flags & FOLL_WRITE);
158 BUG_ON(flags & FOLL_GET);
162 pgd = pgd_offset(mm, address);
163 if (pgd_none(*pgd) || unlikely(pgd_bad(*pgd)))
164 return no_page_table(vma, flags);
166 pud = pud_offset(pgd, address);
168 return no_page_table(vma, flags);
169 if (pud_huge(*pud) && vma->vm_flags & VM_HUGETLB) {
170 if (flags & FOLL_GET)
172 page = follow_huge_pud(mm, address, pud, flags & FOLL_WRITE);
175 if (unlikely(pud_bad(*pud)))
176 return no_page_table(vma, flags);
178 pmd = pmd_offset(pud, address);
180 return no_page_table(vma, flags);
181 if (pmd_huge(*pmd) && vma->vm_flags & VM_HUGETLB) {
182 page = follow_huge_pmd(mm, address, pmd, flags & FOLL_WRITE);
183 if (flags & FOLL_GET) {
185 * Refcount on tail pages are not well-defined and
186 * shouldn't be taken. The caller should handle a NULL
187 * return when trying to follow tail pages.
196 if ((flags & FOLL_NUMA) && pmd_numa(*pmd))
197 return no_page_table(vma, flags);
198 if (pmd_trans_huge(*pmd)) {
199 if (flags & FOLL_SPLIT) {
200 split_huge_page_pmd(vma, address, pmd);
201 return follow_page_pte(vma, address, pmd, flags);
203 ptl = pmd_lock(mm, pmd);
204 if (likely(pmd_trans_huge(*pmd))) {
205 if (unlikely(pmd_trans_splitting(*pmd))) {
207 wait_split_huge_page(vma->anon_vma, pmd);
209 page = follow_trans_huge_pmd(vma, address,
212 *page_mask = HPAGE_PMD_NR - 1;
218 return follow_page_pte(vma, address, pmd, flags);
221 static int get_gate_page(struct mm_struct *mm, unsigned long address,
222 unsigned int gup_flags, struct vm_area_struct **vma,
231 /* user gate pages are read-only */
232 if (gup_flags & FOLL_WRITE)
234 if (address > TASK_SIZE)
235 pgd = pgd_offset_k(address);
237 pgd = pgd_offset_gate(mm, address);
238 BUG_ON(pgd_none(*pgd));
239 pud = pud_offset(pgd, address);
240 BUG_ON(pud_none(*pud));
241 pmd = pmd_offset(pud, address);
244 VM_BUG_ON(pmd_trans_huge(*pmd));
245 pte = pte_offset_map(pmd, address);
248 *vma = get_gate_vma(mm);
251 *page = vm_normal_page(*vma, address, *pte);
253 if ((gup_flags & FOLL_DUMP) || !is_zero_pfn(pte_pfn(*pte)))
255 *page = pte_page(*pte);
266 * mmap_sem must be held on entry. If @nonblocking != NULL and
267 * *@flags does not include FOLL_NOWAIT, the mmap_sem may be released.
268 * If it is, *@nonblocking will be set to 0 and -EBUSY returned.
270 static int faultin_page(struct task_struct *tsk, struct vm_area_struct *vma,
271 unsigned long address, unsigned int *flags, int *nonblocking)
273 struct mm_struct *mm = vma->vm_mm;
274 unsigned int fault_flags = 0;
277 /* For mlock, just skip the stack guard page. */
278 if ((*flags & FOLL_MLOCK) &&
279 (stack_guard_page_start(vma, address) ||
280 stack_guard_page_end(vma, address + PAGE_SIZE)))
282 if (*flags & FOLL_WRITE)
283 fault_flags |= FAULT_FLAG_WRITE;
285 fault_flags |= FAULT_FLAG_ALLOW_RETRY;
286 if (*flags & FOLL_NOWAIT)
287 fault_flags |= FAULT_FLAG_ALLOW_RETRY | FAULT_FLAG_RETRY_NOWAIT;
288 if (*flags & FOLL_TRIED) {
289 VM_WARN_ON_ONCE(fault_flags & FAULT_FLAG_ALLOW_RETRY);
290 fault_flags |= FAULT_FLAG_TRIED;
293 ret = handle_mm_fault(mm, vma, address, fault_flags);
294 if (ret & VM_FAULT_ERROR) {
295 if (ret & VM_FAULT_OOM)
297 if (ret & (VM_FAULT_HWPOISON | VM_FAULT_HWPOISON_LARGE))
298 return *flags & FOLL_HWPOISON ? -EHWPOISON : -EFAULT;
299 if (ret & VM_FAULT_SIGBUS)
305 if (ret & VM_FAULT_MAJOR)
311 if (ret & VM_FAULT_RETRY) {
318 * The VM_FAULT_WRITE bit tells us that do_wp_page has broken COW when
319 * necessary, even if maybe_mkwrite decided not to set pte_write. We
320 * can thus safely do subsequent page lookups as if they were reads.
321 * But only do so when looping for pte_write is futile: in some cases
322 * userspace may also be wanting to write to the gotten user page,
323 * which a read fault here might prevent (a readonly page might get
324 * reCOWed by userspace write).
326 if ((ret & VM_FAULT_WRITE) && !(vma->vm_flags & VM_WRITE))
327 *flags &= ~FOLL_WRITE;
331 static int check_vma_flags(struct vm_area_struct *vma, unsigned long gup_flags)
333 vm_flags_t vm_flags = vma->vm_flags;
335 if (vm_flags & (VM_IO | VM_PFNMAP))
338 if (gup_flags & FOLL_WRITE) {
339 if (!(vm_flags & VM_WRITE)) {
340 if (!(gup_flags & FOLL_FORCE))
343 * We used to let the write,force case do COW in a
344 * VM_MAYWRITE VM_SHARED !VM_WRITE vma, so ptrace could
345 * set a breakpoint in a read-only mapping of an
346 * executable, without corrupting the file (yet only
347 * when that file had been opened for writing!).
348 * Anon pages in shared mappings are surprising: now
351 if (!is_cow_mapping(vm_flags)) {
352 WARN_ON_ONCE(vm_flags & VM_MAYWRITE);
356 } else if (!(vm_flags & VM_READ)) {
357 if (!(gup_flags & FOLL_FORCE))
360 * Is there actually any vma we can reach here which does not
361 * have VM_MAYREAD set?
363 if (!(vm_flags & VM_MAYREAD))
370 * __get_user_pages() - pin user pages in memory
371 * @tsk: task_struct of target task
372 * @mm: mm_struct of target mm
373 * @start: starting user address
374 * @nr_pages: number of pages from start to pin
375 * @gup_flags: flags modifying pin behaviour
376 * @pages: array that receives pointers to the pages pinned.
377 * Should be at least nr_pages long. Or NULL, if caller
378 * only intends to ensure the pages are faulted in.
379 * @vmas: array of pointers to vmas corresponding to each page.
380 * Or NULL if the caller does not require them.
381 * @nonblocking: whether waiting for disk IO or mmap_sem contention
383 * Returns number of pages pinned. This may be fewer than the number
384 * requested. If nr_pages is 0 or negative, returns 0. If no pages
385 * were pinned, returns -errno. Each page returned must be released
386 * with a put_page() call when it is finished with. vmas will only
387 * remain valid while mmap_sem is held.
389 * Must be called with mmap_sem held. It may be released. See below.
391 * __get_user_pages walks a process's page tables and takes a reference to
392 * each struct page that each user address corresponds to at a given
393 * instant. That is, it takes the page that would be accessed if a user
394 * thread accesses the given user virtual address at that instant.
396 * This does not guarantee that the page exists in the user mappings when
397 * __get_user_pages returns, and there may even be a completely different
398 * page there in some cases (eg. if mmapped pagecache has been invalidated
399 * and subsequently re faulted). However it does guarantee that the page
400 * won't be freed completely. And mostly callers simply care that the page
401 * contains data that was valid *at some point in time*. Typically, an IO
402 * or similar operation cannot guarantee anything stronger anyway because
403 * locks can't be held over the syscall boundary.
405 * If @gup_flags & FOLL_WRITE == 0, the page must not be written to. If
406 * the page is written to, set_page_dirty (or set_page_dirty_lock, as
407 * appropriate) must be called after the page is finished with, and
408 * before put_page is called.
410 * If @nonblocking != NULL, __get_user_pages will not wait for disk IO
411 * or mmap_sem contention, and if waiting is needed to pin all pages,
412 * *@nonblocking will be set to 0. Further, if @gup_flags does not
413 * include FOLL_NOWAIT, the mmap_sem will be released via up_read() in
416 * A caller using such a combination of @nonblocking and @gup_flags
417 * must therefore hold the mmap_sem for reading only, and recognize
418 * when it's been released. Otherwise, it must be held for either
419 * reading or writing and will not be released.
421 * In most cases, get_user_pages or get_user_pages_fast should be used
422 * instead of __get_user_pages. __get_user_pages should be used only if
423 * you need some special @gup_flags.
425 long __get_user_pages(struct task_struct *tsk, struct mm_struct *mm,
426 unsigned long start, unsigned long nr_pages,
427 unsigned int gup_flags, struct page **pages,
428 struct vm_area_struct **vmas, int *nonblocking)
431 unsigned int page_mask;
432 struct vm_area_struct *vma = NULL;
437 VM_BUG_ON(!!pages != !!(gup_flags & FOLL_GET));
440 * If FOLL_FORCE is set then do not force a full fault as the hinting
441 * fault information is unrelated to the reference behaviour of a task
442 * using the address space
444 if (!(gup_flags & FOLL_FORCE))
445 gup_flags |= FOLL_NUMA;
449 unsigned int foll_flags = gup_flags;
450 unsigned int page_increm;
452 /* first iteration or cross vma bound */
453 if (!vma || start >= vma->vm_end) {
454 vma = find_extend_vma(mm, start);
455 if (!vma && in_gate_area(mm, start)) {
457 ret = get_gate_page(mm, start & PAGE_MASK,
459 pages ? &pages[i] : NULL);
466 if (!vma || check_vma_flags(vma, gup_flags))
467 return i ? : -EFAULT;
468 if (is_vm_hugetlb_page(vma)) {
469 i = follow_hugetlb_page(mm, vma, pages, vmas,
470 &start, &nr_pages, i,
477 * If we have a pending SIGKILL, don't keep faulting pages and
478 * potentially allocating memory.
480 if (unlikely(fatal_signal_pending(current)))
481 return i ? i : -ERESTARTSYS;
483 page = follow_page_mask(vma, start, foll_flags, &page_mask);
486 ret = faultin_page(tsk, vma, start, &foll_flags,
503 return i ? i : PTR_ERR(page);
506 flush_anon_page(vma, page, start);
507 flush_dcache_page(page);
515 page_increm = 1 + (~(start >> PAGE_SHIFT) & page_mask);
516 if (page_increm > nr_pages)
517 page_increm = nr_pages;
519 start += page_increm * PAGE_SIZE;
520 nr_pages -= page_increm;
524 EXPORT_SYMBOL(__get_user_pages);
527 * fixup_user_fault() - manually resolve a user page fault
528 * @tsk: the task_struct to use for page fault accounting, or
529 * NULL if faults are not to be recorded.
530 * @mm: mm_struct of target mm
531 * @address: user address
532 * @fault_flags:flags to pass down to handle_mm_fault()
534 * This is meant to be called in the specific scenario where for locking reasons
535 * we try to access user memory in atomic context (within a pagefault_disable()
536 * section), this returns -EFAULT, and we want to resolve the user fault before
539 * Typically this is meant to be used by the futex code.
541 * The main difference with get_user_pages() is that this function will
542 * unconditionally call handle_mm_fault() which will in turn perform all the
543 * necessary SW fixup of the dirty and young bits in the PTE, while
544 * handle_mm_fault() only guarantees to update these in the struct page.
546 * This is important for some architectures where those bits also gate the
547 * access permission to the page because they are maintained in software. On
548 * such architectures, gup() will not be enough to make a subsequent access
551 * This has the same semantics wrt the @mm->mmap_sem as does filemap_fault().
553 int fixup_user_fault(struct task_struct *tsk, struct mm_struct *mm,
554 unsigned long address, unsigned int fault_flags)
556 struct vm_area_struct *vma;
560 vma = find_extend_vma(mm, address);
561 if (!vma || address < vma->vm_start)
564 vm_flags = (fault_flags & FAULT_FLAG_WRITE) ? VM_WRITE : VM_READ;
565 if (!(vm_flags & vma->vm_flags))
568 ret = handle_mm_fault(mm, vma, address, fault_flags);
569 if (ret & VM_FAULT_ERROR) {
570 if (ret & VM_FAULT_OOM)
572 if (ret & (VM_FAULT_HWPOISON | VM_FAULT_HWPOISON_LARGE))
574 if (ret & VM_FAULT_SIGBUS)
579 if (ret & VM_FAULT_MAJOR)
588 * get_user_pages() - pin user pages in memory
589 * @tsk: the task_struct to use for page fault accounting, or
590 * NULL if faults are not to be recorded.
591 * @mm: mm_struct of target mm
592 * @start: starting user address
593 * @nr_pages: number of pages from start to pin
594 * @write: whether pages will be written to by the caller
595 * @force: whether to force access even when user mapping is currently
596 * protected (but never forces write access to shared mapping).
597 * @pages: array that receives pointers to the pages pinned.
598 * Should be at least nr_pages long. Or NULL, if caller
599 * only intends to ensure the pages are faulted in.
600 * @vmas: array of pointers to vmas corresponding to each page.
601 * Or NULL if the caller does not require them.
603 * Returns number of pages pinned. This may be fewer than the number
604 * requested. If nr_pages is 0 or negative, returns 0. If no pages
605 * were pinned, returns -errno. Each page returned must be released
606 * with a put_page() call when it is finished with. vmas will only
607 * remain valid while mmap_sem is held.
609 * Must be called with mmap_sem held for read or write.
611 * get_user_pages walks a process's page tables and takes a reference to
612 * each struct page that each user address corresponds to at a given
613 * instant. That is, it takes the page that would be accessed if a user
614 * thread accesses the given user virtual address at that instant.
616 * This does not guarantee that the page exists in the user mappings when
617 * get_user_pages returns, and there may even be a completely different
618 * page there in some cases (eg. if mmapped pagecache has been invalidated
619 * and subsequently re faulted). However it does guarantee that the page
620 * won't be freed completely. And mostly callers simply care that the page
621 * contains data that was valid *at some point in time*. Typically, an IO
622 * or similar operation cannot guarantee anything stronger anyway because
623 * locks can't be held over the syscall boundary.
625 * If write=0, the page must not be written to. If the page is written to,
626 * set_page_dirty (or set_page_dirty_lock, as appropriate) must be called
627 * after the page is finished with, and before put_page is called.
629 * get_user_pages is typically used for fewer-copy IO operations, to get a
630 * handle on the memory by some means other than accesses via the user virtual
631 * addresses. The pages may be submitted for DMA to devices or accessed via
632 * their kernel linear mapping (via the kmap APIs). Care should be taken to
633 * use the correct cache flushing APIs.
635 * See also get_user_pages_fast, for performance critical applications.
637 long get_user_pages(struct task_struct *tsk, struct mm_struct *mm,
638 unsigned long start, unsigned long nr_pages, int write,
639 int force, struct page **pages, struct vm_area_struct **vmas)
641 int flags = FOLL_TOUCH;
650 return __get_user_pages(tsk, mm, start, nr_pages, flags, pages, vmas,
653 EXPORT_SYMBOL(get_user_pages);
656 * get_dump_page() - pin user page in memory while writing it to core dump
657 * @addr: user address
659 * Returns struct page pointer of user page pinned for dump,
660 * to be freed afterwards by page_cache_release() or put_page().
662 * Returns NULL on any kind of failure - a hole must then be inserted into
663 * the corefile, to preserve alignment with its headers; and also returns
664 * NULL wherever the ZERO_PAGE, or an anonymous pte_none, has been found -
665 * allowing a hole to be left in the corefile to save diskspace.
667 * Called without mmap_sem, but after all other threads have been killed.
669 #ifdef CONFIG_ELF_CORE
670 struct page *get_dump_page(unsigned long addr)
672 struct vm_area_struct *vma;
675 if (__get_user_pages(current, current->mm, addr, 1,
676 FOLL_FORCE | FOLL_DUMP | FOLL_GET, &page, &vma,
679 flush_cache_page(vma, addr, page_to_pfn(page));
682 #endif /* CONFIG_ELF_CORE */
685 * Generic RCU Fast GUP
687 * get_user_pages_fast attempts to pin user pages by walking the page
688 * tables directly and avoids taking locks. Thus the walker needs to be
689 * protected from page table pages being freed from under it, and should
690 * block any THP splits.
692 * One way to achieve this is to have the walker disable interrupts, and
693 * rely on IPIs from the TLB flushing code blocking before the page table
694 * pages are freed. This is unsuitable for architectures that do not need
695 * to broadcast an IPI when invalidating TLBs.
697 * Another way to achieve this is to batch up page table containing pages
698 * belonging to more than one mm_user, then rcu_sched a callback to free those
699 * pages. Disabling interrupts will allow the fast_gup walker to both block
700 * the rcu_sched callback, and an IPI that we broadcast for splitting THPs
701 * (which is a relatively rare event). The code below adopts this strategy.
703 * Before activating this code, please be aware that the following assumptions
704 * are currently made:
706 * *) HAVE_RCU_TABLE_FREE is enabled, and tlb_remove_table is used to free
707 * pages containing page tables.
709 * *) THP splits will broadcast an IPI, this can be achieved by overriding
710 * pmdp_splitting_flush.
712 * *) ptes can be read atomically by the architecture.
714 * *) access_ok is sufficient to validate userspace address ranges.
716 * The last two assumptions can be relaxed by the addition of helper functions.
718 * This code is based heavily on the PowerPC implementation by Nick Piggin.
720 #ifdef CONFIG_HAVE_GENERIC_RCU_GUP
722 #ifdef __HAVE_ARCH_PTE_SPECIAL
723 static int gup_pte_range(pmd_t pmd, unsigned long addr, unsigned long end,
724 int write, struct page **pages, int *nr)
729 ptem = ptep = pte_offset_map(&pmd, addr);
732 * In the line below we are assuming that the pte can be read
733 * atomically. If this is not the case for your architecture,
734 * please wrap this in a helper function!
736 * for an example see gup_get_pte in arch/x86/mm/gup.c
738 pte_t pte = ACCESS_ONCE(*ptep);
742 * Similar to the PMD case below, NUMA hinting must take slow
745 if (!pte_present(pte) || pte_special(pte) ||
746 pte_numa(pte) || (write && !pte_write(pte)))
749 VM_BUG_ON(!pfn_valid(pte_pfn(pte)));
750 page = pte_page(pte);
752 if (!page_cache_get_speculative(page))
755 if (unlikely(pte_val(pte) != pte_val(*ptep))) {
763 } while (ptep++, addr += PAGE_SIZE, addr != end);
774 * If we can't determine whether or not a pte is special, then fail immediately
775 * for ptes. Note, we can still pin HugeTLB and THP as these are guaranteed not
778 * For a futex to be placed on a THP tail page, get_futex_key requires a
779 * __get_user_pages_fast implementation that can pin pages. Thus it's still
780 * useful to have gup_huge_pmd even if we can't operate on ptes.
782 static int gup_pte_range(pmd_t pmd, unsigned long addr, unsigned long end,
783 int write, struct page **pages, int *nr)
787 #endif /* __HAVE_ARCH_PTE_SPECIAL */
789 static int gup_huge_pmd(pmd_t orig, pmd_t *pmdp, unsigned long addr,
790 unsigned long end, int write, struct page **pages, int *nr)
792 struct page *head, *page, *tail;
795 if (write && !pmd_write(orig))
799 head = pmd_page(orig);
800 page = head + ((addr & ~PMD_MASK) >> PAGE_SHIFT);
803 VM_BUG_ON_PAGE(compound_head(page) != head, page);
808 } while (addr += PAGE_SIZE, addr != end);
810 if (!page_cache_add_speculative(head, refs)) {
815 if (unlikely(pmd_val(orig) != pmd_val(*pmdp))) {
823 * Any tail pages need their mapcount reference taken before we
824 * return. (This allows the THP code to bump their ref count when
825 * they are split into base pages).
829 get_huge_page_tail(tail);
836 static int gup_huge_pud(pud_t orig, pud_t *pudp, unsigned long addr,
837 unsigned long end, int write, struct page **pages, int *nr)
839 struct page *head, *page, *tail;
842 if (write && !pud_write(orig))
846 head = pud_page(orig);
847 page = head + ((addr & ~PUD_MASK) >> PAGE_SHIFT);
850 VM_BUG_ON_PAGE(compound_head(page) != head, page);
855 } while (addr += PAGE_SIZE, addr != end);
857 if (!page_cache_add_speculative(head, refs)) {
862 if (unlikely(pud_val(orig) != pud_val(*pudp))) {
871 get_huge_page_tail(tail);
878 static int gup_pmd_range(pud_t pud, unsigned long addr, unsigned long end,
879 int write, struct page **pages, int *nr)
884 pmdp = pmd_offset(&pud, addr);
886 pmd_t pmd = ACCESS_ONCE(*pmdp);
888 next = pmd_addr_end(addr, end);
889 if (pmd_none(pmd) || pmd_trans_splitting(pmd))
892 if (unlikely(pmd_trans_huge(pmd) || pmd_huge(pmd))) {
894 * NUMA hinting faults need to be handled in the GUP
895 * slowpath for accounting purposes and so that they
896 * can be serialised against THP migration.
901 if (!gup_huge_pmd(pmd, pmdp, addr, next, write,
905 } else if (!gup_pte_range(pmd, addr, next, write, pages, nr))
907 } while (pmdp++, addr = next, addr != end);
912 static int gup_pud_range(pgd_t *pgdp, unsigned long addr, unsigned long end,
913 int write, struct page **pages, int *nr)
918 pudp = pud_offset(pgdp, addr);
920 pud_t pud = ACCESS_ONCE(*pudp);
922 next = pud_addr_end(addr, end);
926 if (!gup_huge_pud(pud, pudp, addr, next, write,
929 } else if (!gup_pmd_range(pud, addr, next, write, pages, nr))
931 } while (pudp++, addr = next, addr != end);
937 * Like get_user_pages_fast() except it's IRQ-safe in that it won't fall back to
938 * the regular GUP. It will only return non-negative values.
940 int __get_user_pages_fast(unsigned long start, int nr_pages, int write,
943 struct mm_struct *mm = current->mm;
944 unsigned long addr, len, end;
945 unsigned long next, flags;
951 len = (unsigned long) nr_pages << PAGE_SHIFT;
954 if (unlikely(!access_ok(write ? VERIFY_WRITE : VERIFY_READ,
959 * Disable interrupts. We use the nested form as we can already have
960 * interrupts disabled by get_futex_key.
962 * With interrupts disabled, we block page table pages from being
963 * freed from under us. See mmu_gather_tlb in asm-generic/tlb.h
966 * We do not adopt an rcu_read_lock(.) here as we also want to
967 * block IPIs that come from THPs splitting.
970 local_irq_save(flags);
971 pgdp = pgd_offset(mm, addr);
973 next = pgd_addr_end(addr, end);
976 else if (!gup_pud_range(pgdp, addr, next, write, pages, &nr))
978 } while (pgdp++, addr = next, addr != end);
979 local_irq_restore(flags);
985 * get_user_pages_fast() - pin user pages in memory
986 * @start: starting user address
987 * @nr_pages: number of pages from start to pin
988 * @write: whether pages will be written to
989 * @pages: array that receives pointers to the pages pinned.
990 * Should be at least nr_pages long.
992 * Attempt to pin user pages in memory without taking mm->mmap_sem.
993 * If not successful, it will fall back to taking the lock and
994 * calling get_user_pages().
996 * Returns number of pages pinned. This may be fewer than the number
997 * requested. If nr_pages is 0 or negative, returns 0. If no pages
998 * were pinned, returns -errno.
1000 int get_user_pages_fast(unsigned long start, int nr_pages, int write,
1001 struct page **pages)
1003 struct mm_struct *mm = current->mm;
1007 nr = __get_user_pages_fast(start, nr_pages, write, pages);
1010 if (nr < nr_pages) {
1011 /* Try to get the remaining pages with get_user_pages */
1012 start += nr << PAGE_SHIFT;
1015 down_read(&mm->mmap_sem);
1016 ret = get_user_pages(current, mm, start,
1017 nr_pages - nr, write, 0, pages, NULL);
1018 up_read(&mm->mmap_sem);
1020 /* Have to be a bit careful with return values */
1032 #endif /* CONFIG_HAVE_GENERIC_RCU_GUP */