mm/rmap: use rmap_walk() in page_mkclean()
authorJoonsoo Kim <iamjoonsoo.kim@lge.com>
Tue, 21 Jan 2014 23:49:55 +0000 (15:49 -0800)
committerLinus Torvalds <torvalds@linux-foundation.org>
Wed, 22 Jan 2014 00:19:46 +0000 (16:19 -0800)
Now, we have an infrastructure in rmap_walk() to handle difference from
   variants of rmap traversing functions.

So, just use it in page_mkclean().

In this patch, I change following things.

1. remove some variants of rmap traversing functions.
    cf> page_mkclean_file
2. mechanical change to use rmap_walk() in page_mkclean().

Signed-off-by: Joonsoo Kim <iamjoonsoo.kim@lge.com>
Reviewed-by: Naoya Horiguchi <n-horiguchi@ah.jp.nec.com>
Cc: Mel Gorman <mgorman@suse.de>
Cc: Hugh Dickins <hughd@google.com>
Cc: Rik van Riel <riel@redhat.com>
Cc: Ingo Molnar <mingo@kernel.org>
Cc: Hillf Danton <dhillf@gmail.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
mm/rmap.c

index 080413036406e732410fefb5393d6cb4566a8d23..962e2a1e13a0f2a9a049c3d9a391591a4244bbdf 100644 (file)
--- a/mm/rmap.c
+++ b/mm/rmap.c
@@ -812,12 +812,13 @@ int page_referenced(struct page *page,
 }
 
 static int page_mkclean_one(struct page *page, struct vm_area_struct *vma,
-                           unsigned long address)
+                           unsigned long address, void *arg)
 {
        struct mm_struct *mm = vma->vm_mm;
        pte_t *pte;
        spinlock_t *ptl;
        int ret = 0;
+       int *cleaned = arg;
 
        pte = page_check_address(page, mm, address, &ptl, 1);
        if (!pte)
@@ -836,44 +837,44 @@ static int page_mkclean_one(struct page *page, struct vm_area_struct *vma,
 
        pte_unmap_unlock(pte, ptl);
 
-       if (ret)
+       if (ret) {
                mmu_notifier_invalidate_page(mm, address);
+               (*cleaned)++;
+       }
 out:
-       return ret;
+       return SWAP_AGAIN;
 }
 
-static int page_mkclean_file(struct address_space *mapping, struct page *page)
+static bool invalid_mkclean_vma(struct vm_area_struct *vma, void *arg)
 {
-       pgoff_t pgoff = page->index << (PAGE_CACHE_SHIFT - PAGE_SHIFT);
-       struct vm_area_struct *vma;
-       int ret = 0;
-
-       BUG_ON(PageAnon(page));
+       if (vma->vm_flags & VM_SHARED)
+               return 0;
 
-       mutex_lock(&mapping->i_mmap_mutex);
-       vma_interval_tree_foreach(vma, &mapping->i_mmap, pgoff, pgoff) {
-               if (vma->vm_flags & VM_SHARED) {
-                       unsigned long address = vma_address(page, vma);
-                       ret += page_mkclean_one(page, vma, address);
-               }
-       }
-       mutex_unlock(&mapping->i_mmap_mutex);
-       return ret;
+       return 1;
 }
 
 int page_mkclean(struct page *page)
 {
-       int ret = 0;
+       int cleaned = 0;
+       struct address_space *mapping;
+       struct rmap_walk_control rwc = {
+               .arg = (void *)&cleaned,
+               .rmap_one = page_mkclean_one,
+               .invalid_vma = invalid_mkclean_vma,
+       };
 
        BUG_ON(!PageLocked(page));
 
-       if (page_mapped(page)) {
-               struct address_space *mapping = page_mapping(page);
-               if (mapping)
-                       ret = page_mkclean_file(mapping, page);
-       }
+       if (!page_mapped(page))
+               return 0;
 
-       return ret;
+       mapping = page_mapping(page);
+       if (!mapping)
+               return 0;
+
+       rmap_walk(page, &rwc);
+
+       return cleaned;
 }
 EXPORT_SYMBOL_GPL(page_mkclean);