mm: thp: fix up pmd_trans_unstable() locations
authorAndrea Arcangeli <aarcange@redhat.com>
Wed, 28 Mar 2012 21:42:40 +0000 (14:42 -0700)
committerLinus Torvalds <torvalds@linux-foundation.org>
Thu, 29 Mar 2012 00:14:35 +0000 (17:14 -0700)
pmd_trans_unstable() should be called before pmd_offset_map() in the
locations where the mmap_sem is held for reading.

Signed-off-by: Andrea Arcangeli <aarcange@redhat.com>
Cc: Mel Gorman <mgorman@suse.de>
Cc: Hugh Dickins <hughd@google.com>
Cc: Larry Woodman <lwoodman@redhat.com>
Cc: Ulrich Obergfell <uobergfe@redhat.com>
Cc: Rik van Riel <riel@redhat.com>
Cc: Mark Salter <msalter@redhat.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
fs/proc/task_mmu.c
mm/memcontrol.c

index 9694cc2835115f18c5c6f9719d8ab9a332631901..c283832d411d4ea79170210e68fb27f008aa49ea 100644 (file)
@@ -781,9 +781,6 @@ static int pagemap_pte_range(pmd_t *pmd, unsigned long addr, unsigned long end,
        int err = 0;
        pagemap_entry_t pme = make_pme(PM_NOT_PRESENT);
 
-       if (pmd_trans_unstable(pmd))
-               return 0;
-
        /* find the first VMA at or above 'addr' */
        vma = find_vma(walk->mm, addr);
        spin_lock(&walk->mm->page_table_lock);
@@ -802,6 +799,8 @@ static int pagemap_pte_range(pmd_t *pmd, unsigned long addr, unsigned long end,
                return err;
        }
 
+       if (pmd_trans_unstable(pmd))
+               return 0;
        for (; addr != end; addr += PAGE_SIZE) {
 
                /* check to see if we've left 'vma' behind
index b2ee6df0e9bb31eebd3b2f1528cbbcccb2b9c43e..7d698df4a067ce591fd661f45e098610e1a027db 100644 (file)
@@ -5306,6 +5306,8 @@ static int mem_cgroup_count_precharge_pte_range(pmd_t *pmd,
                return 0;
        }
 
+       if (pmd_trans_unstable(pmd))
+               return 0;
        pte = pte_offset_map_lock(vma->vm_mm, pmd, addr, &ptl);
        for (; addr != end; pte++, addr += PAGE_SIZE)
                if (get_mctgt_type(vma, addr, *pte, NULL))
@@ -5502,6 +5504,8 @@ static int mem_cgroup_move_charge_pte_range(pmd_t *pmd,
                return 0;
        }
 
+       if (pmd_trans_unstable(pmd))
+               return 0;
 retry:
        pte = pte_offset_map_lock(vma->vm_mm, pmd, addr, &ptl);
        for (; addr != end; addr += PAGE_SIZE) {