mm: non-atomically mark page accessed during page cache allocation where possible
[firefly-linux-kernel-4.4.55.git] / mm / swap.c
index 1fb25f8bb1553663cfbec690732265b3b6ea99e7..9e8e3472248bb8dfa10107fb212974e1343ffa4a 100644 (file)
--- a/mm/swap.c
+++ b/mm/swap.c
@@ -614,6 +614,17 @@ void mark_page_accessed(struct page *page)
 }
 EXPORT_SYMBOL(mark_page_accessed);
 
+/*
+ * Used to mark_page_accessed(page) that is not visible yet and when it is
+ * still safe to use non-atomic ops
+ */
+void init_page_accessed(struct page *page)
+{
+       if (!PageReferenced(page))
+               __SetPageReferenced(page);
+}
+EXPORT_SYMBOL(init_page_accessed);
+
 static void __lru_cache_add(struct page *page)
 {
        struct pagevec *pvec = &get_cpu_var(lru_add_pvec);