rk312x DDR:correct ddr cap print error , rx dll set and ddr dll bypass freq
[firefly-linux-kernel-4.4.55.git] / mm / swap_state.c
index 46680461785bef647c3618bc7d493d1909aea80f..f24ab0dff554262e1da6866b866a4584871f8d9c 100644 (file)
@@ -6,7 +6,6 @@
  *
  *  Rewritten to use page cache, (C) 1998 Stephen Tweedie
  */
-#include <linux/module.h>
 #include <linux/mm.h>
 #include <linux/gfp.h>
 #include <linux/kernel_stat.h>
@@ -14,8 +13,8 @@
 #include <linux/swapops.h>
 #include <linux/init.h>
 #include <linux/pagemap.h>
-#include <linux/buffer_head.h>
 #include <linux/backing-dev.h>
+#include <linux/blkdev.h>
 #include <linux/pagevec.h>
 #include <linux/migrate.h>
 #include <linux/page_cgroup.h>
@@ -28,7 +27,7 @@
  */
 static const struct address_space_operations swap_aops = {
        .writepage      = swap_writepage,
-       .set_page_dirty = __set_page_dirty_nobuffers,
+       .set_page_dirty = swap_set_page_dirty,
        .migratepage    = migrate_page,
 };
 
@@ -37,12 +36,12 @@ static struct backing_dev_info swap_backing_dev_info = {
        .capabilities   = BDI_CAP_NO_ACCT_AND_WRITEBACK | BDI_CAP_SWAP_BACKED,
 };
 
-struct address_space swapper_space = {
-       .page_tree      = RADIX_TREE_INIT(GFP_ATOMIC|__GFP_NOWARN),
-       .tree_lock      = __SPIN_LOCK_UNLOCKED(swapper_space.tree_lock),
-       .a_ops          = &swap_aops,
-       .i_mmap_nonlinear = LIST_HEAD_INIT(swapper_space.i_mmap_nonlinear),
-       .backing_dev_info = &swap_backing_dev_info,
+struct address_space swapper_spaces[MAX_SWAPFILES] = {
+       [0 ... MAX_SWAPFILES - 1] = {
+               .page_tree      = RADIX_TREE_INIT(GFP_ATOMIC|__GFP_NOWARN),
+               .a_ops          = &swap_aops,
+               .backing_dev_info = &swap_backing_dev_info,
+       }
 };
 
 #define INC_CACHE_INFO(x)      do { swap_cache_info.x++; } while (0)
@@ -54,13 +53,24 @@ static struct {
        unsigned long find_total;
 } swap_cache_info;
 
+unsigned long total_swapcache_pages(void)
+{
+       int i;
+       unsigned long ret = 0;
+
+       for (i = 0; i < MAX_SWAPFILES; i++)
+               ret += swapper_spaces[i].nrpages;
+       return ret;
+}
+
 void show_swap_cache_info(void)
 {
-       printk("%lu pages in swap cache\n", total_swapcache_pages);
+       printk("%lu pages in swap cache\n", total_swapcache_pages());
        printk("Swap cache stats: add %lu, delete %lu, find %lu/%lu\n",
                swap_cache_info.add_total, swap_cache_info.del_total,
                swap_cache_info.find_success, swap_cache_info.find_total);
-       printk("Free swap  = %ldkB\n", nr_swap_pages << (PAGE_SHIFT - 10));
+       printk("Free swap  = %ldkB\n",
+               get_nr_swap_pages() << (PAGE_SHIFT - 10));
        printk("Total swap = %lukB\n", total_swap_pages << (PAGE_SHIFT - 10));
 }
 
@@ -68,9 +78,10 @@ void show_swap_cache_info(void)
  * __add_to_swap_cache resembles add_to_page_cache_locked on swapper_space,
  * but sets SwapCache flag and private instead of mapping and index.
  */
-static int __add_to_swap_cache(struct page *page, swp_entry_t entry)
+int __add_to_swap_cache(struct page *page, swp_entry_t entry)
 {
        int error;
+       struct address_space *address_space;
 
        VM_BUG_ON(!PageLocked(page));
        VM_BUG_ON(PageSwapCache(page));
@@ -80,14 +91,16 @@ static int __add_to_swap_cache(struct page *page, swp_entry_t entry)
        SetPageSwapCache(page);
        set_page_private(page, entry.val);
 
-       spin_lock_irq(&swapper_space.tree_lock);
-       error = radix_tree_insert(&swapper_space.page_tree, entry.val, page);
+       address_space = swap_address_space(entry);
+       spin_lock_irq(&address_space->tree_lock);
+       error = radix_tree_insert(&address_space->page_tree,
+                                       entry.val, page);
        if (likely(!error)) {
-               total_swapcache_pages++;
+               address_space->nrpages++;
                __inc_zone_page_state(page, NR_FILE_PAGES);
                INC_CACHE_INFO(add_total);
        }
-       spin_unlock_irq(&swapper_space.tree_lock);
+       spin_unlock_irq(&address_space->tree_lock);
 
        if (unlikely(error)) {
                /*
@@ -123,14 +136,19 @@ int add_to_swap_cache(struct page *page, swp_entry_t entry, gfp_t gfp_mask)
  */
 void __delete_from_swap_cache(struct page *page)
 {
+       swp_entry_t entry;
+       struct address_space *address_space;
+
        VM_BUG_ON(!PageLocked(page));
        VM_BUG_ON(!PageSwapCache(page));
        VM_BUG_ON(PageWriteback(page));
 
-       radix_tree_delete(&swapper_space.page_tree, page_private(page));
+       entry.val = page_private(page);
+       address_space = swap_address_space(entry);
+       radix_tree_delete(&address_space->page_tree, page_private(page));
        set_page_private(page, 0);
        ClearPageSwapCache(page);
-       total_swapcache_pages--;
+       address_space->nrpages--;
        __dec_zone_page_state(page, NR_FILE_PAGES);
        INC_CACHE_INFO(del_total);
 }
@@ -142,7 +160,7 @@ void __delete_from_swap_cache(struct page *page)
  * Allocate swap space for the page and add the page to the
  * swap cache.  Caller needs to hold the page lock. 
  */
-int add_to_swap(struct page *page)
+int add_to_swap(struct page *page, struct list_head *list)
 {
        swp_entry_t entry;
        int err;
@@ -155,7 +173,7 @@ int add_to_swap(struct page *page)
                return 0;
 
        if (unlikely(PageTransHuge(page)))
-               if (unlikely(split_huge_page(page))) {
+               if (unlikely(split_huge_page_to_list(page, list))) {
                        swapcache_free(entry, NULL);
                        return 0;
                }
@@ -196,12 +214,14 @@ int add_to_swap(struct page *page)
 void delete_from_swap_cache(struct page *page)
 {
        swp_entry_t entry;
+       struct address_space *address_space;
 
        entry.val = page_private(page);
 
-       spin_lock_irq(&swapper_space.tree_lock);
+       address_space = swap_address_space(entry);
+       spin_lock_irq(&address_space->tree_lock);
        __delete_from_swap_cache(page);
-       spin_unlock_irq(&swapper_space.tree_lock);
+       spin_unlock_irq(&address_space->tree_lock);
 
        swapcache_free(entry, page);
        page_cache_release(page);
@@ -264,7 +284,7 @@ struct page * lookup_swap_cache(swp_entry_t entry)
 {
        struct page *page;
 
-       page = find_get_page(&swapper_space, entry.val);
+       page = find_get_page(swap_address_space(entry), entry.val);
 
        if (page)
                INC_CACHE_INFO(find_success);
@@ -291,7 +311,8 @@ struct page *read_swap_cache_async(swp_entry_t entry, gfp_t gfp_mask,
                 * called after lookup_swap_cache() failed, re-calling
                 * that would confuse statistics.
                 */
-               found_page = find_get_page(&swapper_space, entry.val);
+               found_page = find_get_page(swap_address_space(entry),
+                                       entry.val);
                if (found_page)
                        break;
 
@@ -315,8 +336,24 @@ struct page *read_swap_cache_async(swp_entry_t entry, gfp_t gfp_mask,
                 * Swap entry may have been freed since our caller observed it.
                 */
                err = swapcache_prepare(entry);
-               if (err == -EEXIST) {   /* seems racy */
+               if (err == -EEXIST) {
                        radix_tree_preload_end();
+                       /*
+                        * We might race against get_swap_page() and stumble
+                        * across a SWAP_HAS_CACHE swap_map entry whose page
+                        * has not been brought into the swapcache yet, while
+                        * the other end is scheduled away waiting on discard
+                        * I/O completion at scan_swap_map().
+                        *
+                        * In order to avoid turning this transitory state
+                        * into a permanent loop around this -EEXIST case
+                        * if !CONFIG_PREEMPT and the I/O completion happens
+                        * to be waiting on the CPU waitqueue where we are now
+                        * busy looping, we just conditionally invoke the
+                        * scheduler here, if there are some more important
+                        * tasks to run.
+                        */
+                       cond_resched();
                        continue;
                }
                if (err) {              /* swp entry is obsolete ? */
@@ -374,27 +411,29 @@ struct page *read_swap_cache_async(swp_entry_t entry, gfp_t gfp_mask,
 struct page *swapin_readahead(swp_entry_t entry, gfp_t gfp_mask,
                        struct vm_area_struct *vma, unsigned long addr)
 {
-       int nr_pages;
        struct page *page;
-       unsigned long offset;
-       unsigned long end_offset;
-
-       /*
-        * Get starting offset for readaround, and number of pages to read.
-        * Adjust starting address by readbehind (for NUMA interleave case)?
-        * No, it's very unlikely that swap layout would follow vma layout,
-        * more likely that neighbouring swap pages came from the same node:
-        * so use the same "addr" to choose the same node for each swap read.
-        */
-       nr_pages = valid_swaphandles(entry, &offset);
-       for (end_offset = offset + nr_pages; offset < end_offset; offset++) {
+       unsigned long offset = swp_offset(entry);
+       unsigned long start_offset, end_offset;
+       unsigned long mask = (1UL << page_cluster) - 1;
+       struct blk_plug plug;
+
+       /* Read a page_cluster sized and aligned cluster around offset. */
+       start_offset = offset & ~mask;
+       end_offset = offset | mask;
+       if (!start_offset)      /* First page is swap header. */
+               start_offset++;
+
+       blk_start_plug(&plug);
+       for (offset = start_offset; offset <= end_offset ; offset++) {
                /* Ok, do the async read-ahead now */
                page = read_swap_cache_async(swp_entry(swp_type(entry), offset),
                                                gfp_mask, vma, addr);
                if (!page)
-                       break;
+                       continue;
                page_cache_release(page);
        }
+       blk_finish_plug(&plug);
+
        lru_add_drain();        /* Push any new pages onto the LRU now */
        return read_swap_cache_async(entry, gfp_mask, vma, addr);
 }