mm: fix use-after-free in sys_remap_file_pages
[firefly-linux-kernel-4.4.55.git] / mm / memory.c
index 61a262b08e53efa2f69c1387e488bea6f87bb0f9..4b60011907d7dbe9fe4de17196b8d7697f106ae1 100644 (file)
@@ -211,14 +211,15 @@ static int tlb_next_batch(struct mmu_gather *tlb)
  *     tear-down from @mm. The @fullmm argument is used when @mm is without
  *     users and we're going to destroy the full address space (exit/execve).
  */
-void tlb_gather_mmu(struct mmu_gather *tlb, struct mm_struct *mm, bool fullmm)
+void tlb_gather_mmu(struct mmu_gather *tlb, struct mm_struct *mm, unsigned long start, unsigned long end)
 {
        tlb->mm = mm;
 
-       tlb->fullmm     = fullmm;
+       /* Is it from 0 to ~0? */
+       tlb->fullmm     = !(start | (end+1));
        tlb->need_flush_all = 0;
-       tlb->start      = -1UL;
-       tlb->end        = 0;
+       tlb->start      = start;
+       tlb->end        = end;
        tlb->need_flush = 0;
        tlb->local.next = NULL;
        tlb->local.nr   = 0;
@@ -258,8 +259,6 @@ void tlb_finish_mmu(struct mmu_gather *tlb, unsigned long start, unsigned long e
 {
        struct mmu_gather_batch *batch, *next;
 
-       tlb->start = start;
-       tlb->end   = end;
        tlb_flush_mmu(tlb);
 
        /* keep the page table cache within bounds */
@@ -1203,13 +1202,23 @@ again:
         * and page-free while holding it.
         */
        if (force_flush) {
+               unsigned long old_end;
+
                force_flush = 0;
 
-#ifdef HAVE_GENERIC_MMU_GATHER
-               tlb->start = addr;
-               tlb->end = end;
-#endif
+               /*
+                * Flush the TLB just for the previous segment,
+                * then update the range to be the remaining
+                * TLB range.
+                */
+               old_end = tlb->end;
+               tlb->end = addr;
+
                tlb_flush_mmu(tlb);
+
+               tlb->start = addr;
+               tlb->end = old_end;
+
                if (addr != end)
                        goto again;
        }
@@ -1396,7 +1405,7 @@ void zap_page_range(struct vm_area_struct *vma, unsigned long start,
        unsigned long end = start + size;
 
        lru_add_drain();
-       tlb_gather_mmu(&tlb, mm, 0);
+       tlb_gather_mmu(&tlb, mm, start, end);
        update_hiwater_rss(mm);
        mmu_notifier_invalidate_range_start(mm, start, end);
        for ( ; vma && vma->vm_start < end; vma = vma->vm_next)
@@ -1422,7 +1431,7 @@ static void zap_page_range_single(struct vm_area_struct *vma, unsigned long addr
        unsigned long end = address + size;
 
        lru_add_drain();
-       tlb_gather_mmu(&tlb, mm, 0);
+       tlb_gather_mmu(&tlb, mm, address, end);
        update_hiwater_rss(mm);
        mmu_notifier_invalidate_range_start(mm, address, end);
        unmap_single_vma(&tlb, vma, address, end, details);
@@ -3516,12 +3525,12 @@ static int do_nonlinear_fault(struct mm_struct *mm, struct vm_area_struct *vma,
 }
 
 int numa_migrate_prep(struct page *page, struct vm_area_struct *vma,
-                               unsigned long addr, int current_nid)
+                               unsigned long addr, int page_nid)
 {
        get_page(page);
 
        count_vm_numa_event(NUMA_HINT_FAULTS);
-       if (current_nid == numa_node_id())
+       if (page_nid == numa_node_id())
                count_vm_numa_event(NUMA_HINT_FAULTS_LOCAL);
 
        return mpol_misplaced(page, vma, addr);
@@ -3532,7 +3541,7 @@ int do_numa_page(struct mm_struct *mm, struct vm_area_struct *vma,
 {
        struct page *page = NULL;
        spinlock_t *ptl;
-       int current_nid = -1;
+       int page_nid = -1;
        int target_nid;
        bool migrated = false;
 
@@ -3562,15 +3571,10 @@ int do_numa_page(struct mm_struct *mm, struct vm_area_struct *vma,
                return 0;
        }
 
-       current_nid = page_to_nid(page);
-       target_nid = numa_migrate_prep(page, vma, addr, current_nid);
+       page_nid = page_to_nid(page);
+       target_nid = numa_migrate_prep(page, vma, addr, page_nid);
        pte_unmap_unlock(ptep, ptl);
        if (target_nid == -1) {
-               /*
-                * Account for the fault against the current node if it not
-                * being replaced regardless of where the page is located.
-                */
-               current_nid = numa_node_id();
                put_page(page);
                goto out;
        }
@@ -3578,11 +3582,11 @@ int do_numa_page(struct mm_struct *mm, struct vm_area_struct *vma,
        /* Migrate to the requested node */
        migrated = migrate_misplaced_page(page, target_nid);
        if (migrated)
-               current_nid = target_nid;
+               page_nid = target_nid;
 
 out:
-       if (current_nid != -1)
-               task_numa_fault(current_nid, 1, migrated);
+       if (page_nid != -1)
+               task_numa_fault(page_nid, 1, migrated);
        return 0;
 }
 
@@ -3597,7 +3601,6 @@ static int do_pmd_numa_page(struct mm_struct *mm, struct vm_area_struct *vma,
        unsigned long offset;
        spinlock_t *ptl;
        bool numa = false;
-       int local_nid = numa_node_id();
 
        spin_lock(&mm->page_table_lock);
        pmd = *pmdp;
@@ -3620,9 +3623,10 @@ static int do_pmd_numa_page(struct mm_struct *mm, struct vm_area_struct *vma,
        for (addr = _addr + offset; addr < _addr + PMD_SIZE; pte++, addr += PAGE_SIZE) {
                pte_t pteval = *pte;
                struct page *page;
-               int curr_nid = local_nid;
+               int page_nid = -1;
                int target_nid;
-               bool migrated;
+               bool migrated = false;
+
                if (!pte_present(pteval))
                        continue;
                if (!pte_numa(pteval))
@@ -3644,25 +3648,19 @@ static int do_pmd_numa_page(struct mm_struct *mm, struct vm_area_struct *vma,
                if (unlikely(page_mapcount(page) != 1))
                        continue;
 
-               /*
-                * Note that the NUMA fault is later accounted to either
-                * the node that is currently running or where the page is
-                * migrated to.
-                */
-               curr_nid = local_nid;
-               target_nid = numa_migrate_prep(page, vma, addr,
-                                              page_to_nid(page));
-               if (target_nid == -1) {
+               page_nid = page_to_nid(page);
+               target_nid = numa_migrate_prep(page, vma, addr, page_nid);
+               pte_unmap_unlock(pte, ptl);
+               if (target_nid != -1) {
+                       migrated = migrate_misplaced_page(page, target_nid);
+                       if (migrated)
+                               page_nid = target_nid;
+               } else {
                        put_page(page);
-                       continue;
                }
 
-               /* Migrate to the requested node */
-               pte_unmap_unlock(pte, ptl);
-               migrated = migrate_misplaced_page(page, target_nid);
-               if (migrated)
-                       curr_nid = target_nid;
-               task_numa_fault(curr_nid, 1, migrated);
+               if (page_nid != -1)
+                       task_numa_fault(page_nid, 1, migrated);
 
                pte = pte_offset_map_lock(mm, pmdp, addr, &ptl);
        }
@@ -4065,6 +4063,7 @@ int generic_access_phys(struct vm_area_struct *vma, unsigned long addr,
 
        return len;
 }
+EXPORT_SYMBOL_GPL(generic_access_phys);
 #endif
 
 /*