mm: don't call pte_unmap() against an improper pte
authorDaisuke Nishimura <nishimura@mxp.nes.nec.co.jp>
Mon, 26 Oct 2009 23:50:23 +0000 (16:50 -0700)
committerLinus Torvalds <torvalds@linux-foundation.org>
Thu, 29 Oct 2009 14:39:32 +0000 (07:39 -0700)
There are some places where we do like:

pte = pte_map();
do {
(do break in some conditions)
} while (pte++, ...);
pte_unmap(pte - 1);

But if the loop breaks at the first loop, pte_unmap() unmaps invalid pte.

This patch is a fix for this problem.

Signed-off-by: Daisuke Nishimura <nishimura@mxp.nes.nec.co.jp>
Reviewd-by: KAMEZAWA Hiroyuki <kamezawa.hiroyu@jp.fujitsu.com>
Acked-by: Hugh Dickins <hugh.dickins@tiscali.co.uk>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
mm/memory.c

index 7e91b5f9f690e4ae9d7c3e58bc1530c995311089..60ea601e03ea59f485bc7c82b32885acab9ec16d 100644 (file)
@@ -641,6 +641,7 @@ static int copy_pte_range(struct mm_struct *dst_mm, struct mm_struct *src_mm,
                pmd_t *dst_pmd, pmd_t *src_pmd, struct vm_area_struct *vma,
                unsigned long addr, unsigned long end)
 {
+       pte_t *orig_src_pte, *orig_dst_pte;
        pte_t *src_pte, *dst_pte;
        spinlock_t *src_ptl, *dst_ptl;
        int progress = 0;
@@ -654,6 +655,8 @@ again:
        src_pte = pte_offset_map_nested(src_pmd, addr);
        src_ptl = pte_lockptr(src_mm, src_pmd);
        spin_lock_nested(src_ptl, SINGLE_DEPTH_NESTING);
+       orig_src_pte = src_pte;
+       orig_dst_pte = dst_pte;
        arch_enter_lazy_mmu_mode();
 
        do {
@@ -677,9 +680,9 @@ again:
 
        arch_leave_lazy_mmu_mode();
        spin_unlock(src_ptl);
-       pte_unmap_nested(src_pte - 1);
+       pte_unmap_nested(orig_src_pte);
        add_mm_rss(dst_mm, rss[0], rss[1]);
-       pte_unmap_unlock(dst_pte - 1, dst_ptl);
+       pte_unmap_unlock(orig_dst_pte, dst_ptl);
        cond_resched();
        if (addr != end)
                goto again;
@@ -1820,10 +1823,10 @@ static int apply_to_pte_range(struct mm_struct *mm, pmd_t *pmd,
        token = pmd_pgtable(*pmd);
 
        do {
-               err = fn(pte, token, addr, data);
+               err = fn(pte++, token, addr, data);
                if (err)
                        break;
-       } while (pte++, addr += PAGE_SIZE, addr != end);
+       } while (addr += PAGE_SIZE, addr != end);
 
        arch_leave_lazy_mmu_mode();