Merge branch 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/dtor/input
[firefly-linux-kernel-4.4.55.git] / fs / proc / task_mmu.c
index 60b914860f815e146d8ae5fea8eefae2dcaf95ea..2e7addfd9803559ca03a2527aaab161006e3b482 100644 (file)
@@ -1,5 +1,6 @@
 #include <linux/mm.h>
 #include <linux/hugetlb.h>
+#include <linux/huge_mm.h>
 #include <linux/mount.h>
 #include <linux/seq_file.h>
 #include <linux/highmem.h>
@@ -7,6 +8,7 @@
 #include <linux/slab.h>
 #include <linux/pagemap.h>
 #include <linux/mempolicy.h>
+#include <linux/rmap.h>
 #include <linux/swap.h>
 #include <linux/swapops.h>
 
@@ -119,14 +121,14 @@ static void *m_start(struct seq_file *m, loff_t *pos)
 
        priv->task = get_pid_task(priv->pid, PIDTYPE_PID);
        if (!priv->task)
-               return NULL;
+               return ERR_PTR(-ESRCH);
 
        mm = mm_for_maps(priv->task);
-       if (!mm)
-               return NULL;
+       if (!mm || IS_ERR(mm))
+               return mm;
        down_read(&mm->mmap_sem);
 
-       tail_vma = get_gate_vma(priv->task);
+       tail_vma = get_gate_vma(priv->task->mm);
        priv->tail_vma = tail_vma;
 
        /* Start with last addr hint */
@@ -180,7 +182,8 @@ static void m_stop(struct seq_file *m, void *v)
        struct proc_maps_private *priv = m->private;
        struct vm_area_struct *vma = v;
 
-       vma_stop(priv, vma);
+       if (!IS_ERR(vma))
+               vma_stop(priv, vma);
        if (priv->task)
                put_task_struct(priv->task);
 }
@@ -249,8 +252,8 @@ static void show_map_vma(struct seq_file *m, struct vm_area_struct *vma)
                const char *name = arch_vma_name(vma);
                if (!name) {
                        if (mm) {
-                               if (vma->vm_start <= mm->start_brk &&
-                                               vma->vm_end >= mm->brk) {
+                               if (vma->vm_start <= mm->brk &&
+                                               vma->vm_end >= mm->start_brk) {
                                        name = "[heap]";
                                } else if (vma->vm_start <= mm->start_stack &&
                                           vma->vm_end >= mm->start_stack) {
@@ -277,7 +280,8 @@ static int show_map(struct seq_file *m, void *v)
        show_map_vma(m, vma);
 
        if (m->count < m->size)  /* vma is copied successfully */
-               m->version = (vma != get_gate_vma(task))? vma->vm_start: 0;
+               m->version = (vma != get_gate_vma(task->mm))
+                       ? vma->vm_start : 0;
        return 0;
 }
 
@@ -329,58 +333,86 @@ struct mem_size_stats {
        unsigned long private_dirty;
        unsigned long referenced;
        unsigned long anonymous;
+       unsigned long anonymous_thp;
        unsigned long swap;
        u64 pss;
 };
 
-static int smaps_pte_range(pmd_t *pmd, unsigned long addr, unsigned long end,
-                          struct mm_walk *walk)
+
+static void smaps_pte_entry(pte_t ptent, unsigned long addr,
+               unsigned long ptent_size, struct mm_walk *walk)
 {
        struct mem_size_stats *mss = walk->private;
        struct vm_area_struct *vma = mss->vma;
-       pte_t *pte, ptent;
-       spinlock_t *ptl;
        struct page *page;
        int mapcount;
 
-       pte = pte_offset_map_lock(vma->vm_mm, pmd, addr, &ptl);
-       for (; addr != end; pte++, addr += PAGE_SIZE) {
-               ptent = *pte;
-
-               if (is_swap_pte(ptent)) {
-                       mss->swap += PAGE_SIZE;
-                       continue;
-               }
+       if (is_swap_pte(ptent)) {
+               mss->swap += ptent_size;
+               return;
+       }
 
-               if (!pte_present(ptent))
-                       continue;
+       if (!pte_present(ptent))
+               return;
+
+       page = vm_normal_page(vma, addr, ptent);
+       if (!page)
+               return;
+
+       if (PageAnon(page))
+               mss->anonymous += ptent_size;
+
+       mss->resident += ptent_size;
+       /* Accumulate the size in pages that have been accessed. */
+       if (pte_young(ptent) || PageReferenced(page))
+               mss->referenced += ptent_size;
+       mapcount = page_mapcount(page);
+       if (mapcount >= 2) {
+               if (pte_dirty(ptent) || PageDirty(page))
+                       mss->shared_dirty += ptent_size;
+               else
+                       mss->shared_clean += ptent_size;
+               mss->pss += (ptent_size << PSS_SHIFT) / mapcount;
+       } else {
+               if (pte_dirty(ptent) || PageDirty(page))
+                       mss->private_dirty += ptent_size;
+               else
+                       mss->private_clean += ptent_size;
+               mss->pss += (ptent_size << PSS_SHIFT);
+       }
+}
 
-               page = vm_normal_page(vma, addr, ptent);
-               if (!page)
-                       continue;
+static int smaps_pte_range(pmd_t *pmd, unsigned long addr, unsigned long end,
+                          struct mm_walk *walk)
+{
+       struct mem_size_stats *mss = walk->private;
+       struct vm_area_struct *vma = mss->vma;
+       pte_t *pte;
+       spinlock_t *ptl;
 
-               if (PageAnon(page))
-                       mss->anonymous += PAGE_SIZE;
-
-               mss->resident += PAGE_SIZE;
-               /* Accumulate the size in pages that have been accessed. */
-               if (pte_young(ptent) || PageReferenced(page))
-                       mss->referenced += PAGE_SIZE;
-               mapcount = page_mapcount(page);
-               if (mapcount >= 2) {
-                       if (pte_dirty(ptent) || PageDirty(page))
-                               mss->shared_dirty += PAGE_SIZE;
-                       else
-                               mss->shared_clean += PAGE_SIZE;
-                       mss->pss += (PAGE_SIZE << PSS_SHIFT) / mapcount;
+       spin_lock(&walk->mm->page_table_lock);
+       if (pmd_trans_huge(*pmd)) {
+               if (pmd_trans_splitting(*pmd)) {
+                       spin_unlock(&walk->mm->page_table_lock);
+                       wait_split_huge_page(vma->anon_vma, pmd);
                } else {
-                       if (pte_dirty(ptent) || PageDirty(page))
-                               mss->private_dirty += PAGE_SIZE;
-                       else
-                               mss->private_clean += PAGE_SIZE;
-                       mss->pss += (PAGE_SIZE << PSS_SHIFT);
+                       smaps_pte_entry(*(pte_t *)pmd, addr,
+                                       HPAGE_PMD_SIZE, walk);
+                       spin_unlock(&walk->mm->page_table_lock);
+                       mss->anonymous_thp += HPAGE_PMD_SIZE;
+                       return 0;
                }
+       } else {
+               spin_unlock(&walk->mm->page_table_lock);
        }
+       /*
+        * The mmap_sem held all the way back in m_start() is what
+        * keeps khugepaged out of here and from collapsing things
+        * in here.
+        */
+       pte = pte_offset_map_lock(vma->vm_mm, pmd, addr, &ptl);
+       for (; addr != end; pte++, addr += PAGE_SIZE)
+               smaps_pte_entry(*pte, addr, PAGE_SIZE, walk);
        pte_unmap_unlock(pte - 1, ptl);
        cond_resched();
        return 0;
@@ -416,6 +448,7 @@ static int show_smap(struct seq_file *m, void *v)
                   "Private_Dirty:  %8lu kB\n"
                   "Referenced:     %8lu kB\n"
                   "Anonymous:      %8lu kB\n"
+                  "AnonHugePages:  %8lu kB\n"
                   "Swap:           %8lu kB\n"
                   "KernelPageSize: %8lu kB\n"
                   "MMUPageSize:    %8lu kB\n"
@@ -429,6 +462,7 @@ static int show_smap(struct seq_file *m, void *v)
                   mss.private_dirty >> 10,
                   mss.referenced >> 10,
                   mss.anonymous >> 10,
+                  mss.anonymous_thp >> 10,
                   mss.swap >> 10,
                   vma_kernel_pagesize(vma) >> 10,
                   vma_mmu_pagesize(vma) >> 10,
@@ -436,7 +470,8 @@ static int show_smap(struct seq_file *m, void *v)
                        (unsigned long)(mss.pss >> (10 + PSS_SHIFT)) : 0);
 
        if (m->count < m->size)  /* vma is copied successfully */
-               m->version = (vma != get_gate_vma(task)) ? vma->vm_start : 0;
+               m->version = (vma != get_gate_vma(task->mm))
+                       ? vma->vm_start : 0;
        return 0;
 }
 
@@ -467,6 +502,8 @@ static int clear_refs_pte_range(pmd_t *pmd, unsigned long addr,
        spinlock_t *ptl;
        struct page *page;
 
+       split_huge_page_pmd(walk->mm, pmd);
+
        pte = pte_offset_map_lock(vma->vm_mm, pmd, addr, &ptl);
        for (; addr != end; pte++, addr += PAGE_SIZE) {
                ptent = *pte;
@@ -623,6 +660,8 @@ static int pagemap_pte_range(pmd_t *pmd, unsigned long addr, unsigned long end,
        pte_t *pte;
        int err = 0;
 
+       split_huge_page_pmd(walk->mm, pmd);
+
        /* find the first VMA at or above 'addr' */
        vma = find_vma(walk->mm, addr);
        for (; addr != end; addr += PAGE_SIZE) {
@@ -728,8 +767,9 @@ static ssize_t pagemap_read(struct file *file, char __user *buf,
        if (!task)
                goto out;
 
-       ret = -EACCES;
-       if (!ptrace_may_access(task, PTRACE_MODE_READ))
+       mm = mm_for_maps(task);
+       ret = PTR_ERR(mm);
+       if (!mm || IS_ERR(mm))
                goto out_task;
 
        ret = -EINVAL;
@@ -742,10 +782,6 @@ static ssize_t pagemap_read(struct file *file, char __user *buf,
        if (!count)
                goto out_task;
 
-       mm = get_task_mm(task);
-       if (!mm)
-               goto out_task;
-
        pm.len = PM_ENTRY_BYTES * (PAGEMAP_WALK_SIZE >> PAGE_SHIFT);
        pm.buffer = kmalloc(pm.len, GFP_TEMPORARY);
        ret = -ENOMEM;