swap: change swap_list_head to plist, add swap_avail_head
[firefly-linux-kernel-4.4.55.git] / mm / frontswap.c
index fae11602e8a958f5361ba5a534b6c6d1583aae5a..c30eec536f03fb7148e3c7a08538f6a2c3571857 100644 (file)
@@ -331,7 +331,7 @@ static unsigned long __frontswap_curr_pages(void)
        struct swap_info_struct *si = NULL;
 
        assert_spin_locked(&swap_lock);
-       list_for_each_entry(si, &swap_list_head, list)
+       plist_for_each_entry(si, &swap_active_head, list)
                totalpages += atomic_read(&si->frontswap_pages);
        return totalpages;
 }
@@ -346,7 +346,7 @@ static int __frontswap_unuse_pages(unsigned long total, unsigned long *unused,
        unsigned long pages = 0, pages_to_unuse = 0;
 
        assert_spin_locked(&swap_lock);
-       list_for_each_entry(si, &swap_list_head, list) {
+       plist_for_each_entry(si, &swap_active_head, list) {
                si_frontswap_pages = atomic_read(&si->frontswap_pages);
                if (total_pages_to_unuse < si_frontswap_pages) {
                        pages = pages_to_unuse = total_pages_to_unuse;
@@ -408,7 +408,7 @@ void frontswap_shrink(unsigned long target_pages)
        /*
         * we don't want to hold swap_lock while doing a very
         * lengthy try_to_unuse, but swap_list may change
-        * so restart scan from swap_list_head each time
+        * so restart scan from swap_active_head each time
         */
        spin_lock(&swap_lock);
        ret = __frontswap_shrink(target_pages, &pages_to_unuse, &type);