vmscan: report vm_flags in page_referenced()
authorWu Fengguang <fengguang.wu@intel.com>
Tue, 16 Jun 2009 22:33:05 +0000 (15:33 -0700)
committerLinus Torvalds <torvalds@linux-foundation.org>
Wed, 17 Jun 2009 02:47:44 +0000 (19:47 -0700)
Collect vma->vm_flags of the VMAs that actually referenced the page.

This is preparing for more informed reclaim heuristics, eg.  to protect
executable file pages more aggressively.  For now only the VM_EXEC bit
will be used by the caller.

Thanks to Johannes, Peter and Minchan for all the good tips.

Acked-by: Peter Zijlstra <peterz@infradead.org>
Reviewed-by: Rik van Riel <riel@redhat.com>
Reviewed-by: Minchan Kim <minchan.kim@gmail.com>
Reviewed-by: Johannes Weiner <hannes@cmpxchg.org>
Signed-off-by: Wu Fengguang <fengguang.wu@intel.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
include/linux/rmap.h
mm/rmap.c
mm/vmscan.c

index 619379a1dd98508136c5e5edc3c2f02ef4849aad..216d024f830d2f272d4b38a768819c00bc7239d1 100644 (file)
@@ -83,7 +83,8 @@ static inline void page_dup_rmap(struct page *page, struct vm_area_struct *vma,
 /*
  * Called from mm/vmscan.c to handle paging out
  */
-int page_referenced(struct page *, int is_locked, struct mem_cgroup *cnt);
+int page_referenced(struct page *, int is_locked,
+                       struct mem_cgroup *cnt, unsigned long *vm_flags);
 int try_to_unmap(struct page *, int ignore_refs);
 
 /*
@@ -117,7 +118,7 @@ int try_to_munlock(struct page *);
 #define anon_vma_prepare(vma)  (0)
 #define anon_vma_link(vma)     do {} while (0)
 
-#define page_referenced(page,l,cnt) TestClearPageReferenced(page)
+#define page_referenced(page, locked, cnt, flags) TestClearPageReferenced(page)
 #define try_to_unmap(page, refs) SWAP_FAIL
 
 static inline int page_mkclean(struct page *page)
index 316c9d6930ad204b9e8ebae20a84d2a97a0ad67c..c9ccc1a72dc32652827c0d6d81add911f59abbfb 100644 (file)
--- a/mm/rmap.c
+++ b/mm/rmap.c
@@ -333,7 +333,9 @@ static int page_mapped_in_vma(struct page *page, struct vm_area_struct *vma)
  * repeatedly from either page_referenced_anon or page_referenced_file.
  */
 static int page_referenced_one(struct page *page,
-       struct vm_area_struct *vma, unsigned int *mapcount)
+                              struct vm_area_struct *vma,
+                              unsigned int *mapcount,
+                              unsigned long *vm_flags)
 {
        struct mm_struct *mm = vma->vm_mm;
        unsigned long address;
@@ -381,11 +383,14 @@ out_unmap:
        (*mapcount)--;
        pte_unmap_unlock(pte, ptl);
 out:
+       if (referenced)
+               *vm_flags |= vma->vm_flags;
        return referenced;
 }
 
 static int page_referenced_anon(struct page *page,
-                               struct mem_cgroup *mem_cont)
+                               struct mem_cgroup *mem_cont,
+                               unsigned long *vm_flags)
 {
        unsigned int mapcount;
        struct anon_vma *anon_vma;
@@ -405,7 +410,8 @@ static int page_referenced_anon(struct page *page,
                 */
                if (mem_cont && !mm_match_cgroup(vma->vm_mm, mem_cont))
                        continue;
-               referenced += page_referenced_one(page, vma, &mapcount);
+               referenced += page_referenced_one(page, vma,
+                                                 &mapcount, vm_flags);
                if (!mapcount)
                        break;
        }
@@ -418,6 +424,7 @@ static int page_referenced_anon(struct page *page,
  * page_referenced_file - referenced check for object-based rmap
  * @page: the page we're checking references on.
  * @mem_cont: target memory controller
+ * @vm_flags: collect encountered vma->vm_flags who actually referenced the page
  *
  * For an object-based mapped page, find all the places it is mapped and
  * check/clear the referenced flag.  This is done by following the page->mapping
@@ -427,7 +434,8 @@ static int page_referenced_anon(struct page *page,
  * This function is only called from page_referenced for object-based pages.
  */
 static int page_referenced_file(struct page *page,
-                               struct mem_cgroup *mem_cont)
+                               struct mem_cgroup *mem_cont,
+                               unsigned long *vm_flags)
 {
        unsigned int mapcount;
        struct address_space *mapping = page->mapping;
@@ -467,7 +475,8 @@ static int page_referenced_file(struct page *page,
                 */
                if (mem_cont && !mm_match_cgroup(vma->vm_mm, mem_cont))
                        continue;
-               referenced += page_referenced_one(page, vma, &mapcount);
+               referenced += page_referenced_one(page, vma,
+                                                 &mapcount, vm_flags);
                if (!mapcount)
                        break;
        }
@@ -481,29 +490,35 @@ static int page_referenced_file(struct page *page,
  * @page: the page to test
  * @is_locked: caller holds lock on the page
  * @mem_cont: target memory controller
+ * @vm_flags: collect encountered vma->vm_flags who actually referenced the page
  *
  * Quick test_and_clear_referenced for all mappings to a page,
  * returns the number of ptes which referenced the page.
  */
-int page_referenced(struct page *page, int is_locked,
-                       struct mem_cgroup *mem_cont)
+int page_referenced(struct page *page,
+                   int is_locked,
+                   struct mem_cgroup *mem_cont,
+                   unsigned long *vm_flags)
 {
        int referenced = 0;
 
        if (TestClearPageReferenced(page))
                referenced++;
 
+       *vm_flags = 0;
        if (page_mapped(page) && page->mapping) {
                if (PageAnon(page))
-                       referenced += page_referenced_anon(page, mem_cont);
+                       referenced += page_referenced_anon(page, mem_cont,
+                                                               vm_flags);
                else if (is_locked)
-                       referenced += page_referenced_file(page, mem_cont);
+                       referenced += page_referenced_file(page, mem_cont,
+                                                               vm_flags);
                else if (!trylock_page(page))
                        referenced++;
                else {
                        if (page->mapping)
-                               referenced +=
-                                       page_referenced_file(page, mem_cont);
+                               referenced += page_referenced_file(page,
+                                                       mem_cont, vm_flags);
                        unlock_page(page);
                }
        }
index 52339dd7bf858856f623516f836a653ad7542033..6be2068f61c83071748d890546b4fd9febebcee3 100644 (file)
@@ -577,6 +577,7 @@ static unsigned long shrink_page_list(struct list_head *page_list,
        struct pagevec freed_pvec;
        int pgactivate = 0;
        unsigned long nr_reclaimed = 0;
+       unsigned long vm_flags;
 
        cond_resched();
 
@@ -627,7 +628,8 @@ static unsigned long shrink_page_list(struct list_head *page_list,
                                goto keep_locked;
                }
 
-               referenced = page_referenced(page, 1, sc->mem_cgroup);
+               referenced = page_referenced(page, 1,
+                                               sc->mem_cgroup, &vm_flags);
                /* In active use or really unfreeable?  Activate it. */
                if (sc->order <= PAGE_ALLOC_COSTLY_ORDER &&
                                        referenced && page_mapping_inuse(page))
@@ -1208,6 +1210,7 @@ static void shrink_active_list(unsigned long nr_pages, struct zone *zone,
 {
        unsigned long pgmoved;
        unsigned long pgscanned;
+       unsigned long vm_flags;
        LIST_HEAD(l_hold);      /* The pages which were snipped off */
        LIST_HEAD(l_inactive);
        struct page *page;
@@ -1248,7 +1251,7 @@ static void shrink_active_list(unsigned long nr_pages, struct zone *zone,
 
                /* page_referenced clears PageReferenced */
                if (page_mapping_inuse(page) &&
-                   page_referenced(page, 0, sc->mem_cgroup))
+                   page_referenced(page, 0, sc->mem_cgroup, &vm_flags))
                        pgmoved++;
 
                list_add(&page->lru, &l_inactive);