slub: move discard_slab out of node lock
authorShaohua Li <shaohua.li@intel.com>
Mon, 14 Nov 2011 05:34:13 +0000 (13:34 +0800)
committerPekka Enberg <penberg@kernel.org>
Tue, 15 Nov 2011 18:41:00 +0000 (20:41 +0200)
Lockdep reports there is potential deadlock for slub node list_lock.
discard_slab() is called with the lock hold in unfreeze_partials(),
which could trigger a slab allocation, which could hold the lock again.

discard_slab() doesn't need hold the lock actually, if the slab is
already removed from partial list.

Acked-by: Christoph Lameter <cl@linux.com>
Reported-and-tested-by: Yong Zhang <yong.zhang0@gmail.com>
Reported-and-tested-by: Julie Sullivan <kernelmail.jms@gmail.com>
Signed-off-by: Shaohua Li <shaohua.li@intel.com>
Signed-off-by: Pekka Enberg <penberg@kernel.org>
mm/slub.c

index 60e16c43f88c89ab1832b03070d73f6c6e697967..00efbb56a268e239bec9641ebd24d108794e7f90 100644 (file)
--- a/mm/slub.c
+++ b/mm/slub.c
@@ -1862,7 +1862,7 @@ static void unfreeze_partials(struct kmem_cache *s)
 {
        struct kmem_cache_node *n = NULL;
        struct kmem_cache_cpu *c = this_cpu_ptr(s->cpu_slab);
-       struct page *page;
+       struct page *page, *discard_page = NULL;
 
        while ((page = c->partial)) {
                enum slab_modes { M_PARTIAL, M_FREE };
@@ -1916,14 +1916,22 @@ static void unfreeze_partials(struct kmem_cache *s)
                                "unfreezing slab"));
 
                if (m == M_FREE) {
-                       stat(s, DEACTIVATE_EMPTY);
-                       discard_slab(s, page);
-                       stat(s, FREE_SLAB);
+                       page->next = discard_page;
+                       discard_page = page;
                }
        }
 
        if (n)
                spin_unlock(&n->list_lock);
+
+       while (discard_page) {
+               page = discard_page;
+               discard_page = discard_page->next;
+
+               stat(s, DEACTIVATE_EMPTY);
+               discard_slab(s, page);
+               stat(s, FREE_SLAB);
+       }
 }
 
 /*