thp: mincore transparent hugepage support
authorJohannes Weiner <hannes@cmpxchg.org>
Thu, 13 Jan 2011 23:47:02 +0000 (15:47 -0800)
committerLinus Torvalds <torvalds@linux-foundation.org>
Fri, 14 Jan 2011 01:32:44 +0000 (17:32 -0800)
Handle transparent huge page pmd entries natively instead of splitting
them into subpages.

Signed-off-by: Johannes Weiner <hannes@cmpxchg.org>
Signed-off-by: Andrea Arcangeli <aarcange@redhat.com>
Reviewed-by: Rik van Riel <riel@redhat.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
include/linux/huge_mm.h
mm/huge_memory.c
mm/mincore.c

index 43a694ef8904f3fabd8ffb26159a9bd569dbb239..25125fb6acf755942e52a17531952cd32c81b95c 100644 (file)
@@ -19,6 +19,9 @@ extern struct page *follow_trans_huge_pmd(struct mm_struct *mm,
 extern int zap_huge_pmd(struct mmu_gather *tlb,
                        struct vm_area_struct *vma,
                        pmd_t *pmd);
+extern int mincore_huge_pmd(struct vm_area_struct *vma, pmd_t *pmd,
+                       unsigned long addr, unsigned long end,
+                       unsigned char *vec);
 
 enum transparent_hugepage_flag {
        TRANSPARENT_HUGEPAGE_FLAG,
index ae2bf08b1099eca82c2a73852f6a38aea4bd09a6..37e89a32a0b1d229c1ccfba9b990ca6c79532d42 100644 (file)
@@ -923,6 +923,31 @@ int zap_huge_pmd(struct mmu_gather *tlb, struct vm_area_struct *vma,
        return ret;
 }
 
+int mincore_huge_pmd(struct vm_area_struct *vma, pmd_t *pmd,
+               unsigned long addr, unsigned long end,
+               unsigned char *vec)
+{
+       int ret = 0;
+
+       spin_lock(&vma->vm_mm->page_table_lock);
+       if (likely(pmd_trans_huge(*pmd))) {
+               ret = !pmd_trans_splitting(*pmd);
+               spin_unlock(&vma->vm_mm->page_table_lock);
+               if (unlikely(!ret))
+                       wait_split_huge_page(vma->anon_vma, pmd);
+               else {
+                       /*
+                        * All logical pages in the range are present
+                        * if backed by a huge page.
+                        */
+                       memset(vec, 1, (end - addr) >> PAGE_SHIFT);
+               }
+       } else
+               spin_unlock(&vma->vm_mm->page_table_lock);
+
+       return ret;
+}
+
 pmd_t *page_check_address_pmd(struct page *page,
                              struct mm_struct *mm,
                              unsigned long address,
index 9959bb41570e64fc446c86d186737d1ba17baeec..a4e6b9d75c76198be4f04d41e996f2067a97db27 100644 (file)
@@ -154,7 +154,13 @@ static void mincore_pmd_range(struct vm_area_struct *vma, pud_t *pud,
        pmd = pmd_offset(pud, addr);
        do {
                next = pmd_addr_end(addr, end);
-               split_huge_page_pmd(vma->vm_mm, pmd);
+               if (pmd_trans_huge(*pmd)) {
+                       if (mincore_huge_pmd(vma, pmd, addr, next, vec)) {
+                               vec += (next - addr) >> PAGE_SHIFT;
+                               continue;
+                       }
+                       /* fall through */
+               }
                if (pmd_none_or_clear_bad(pmd))
                        mincore_unmapped_range(vma, addr, next, vec);
                else