lightnvm: fix locking and mempool in rrpc_lun_gc
authorWenwei Tao <ww.tao0320@gmail.com>
Tue, 12 Jan 2016 06:49:25 +0000 (07:49 +0100)
committerGreg Kroah-Hartman <gregkh@linuxfoundation.org>
Thu, 15 Sep 2016 06:27:40 +0000 (08:27 +0200)
[ Upstream commit b262924be03d5d2ae735bc9a4b37eb2c613f61f8 ]

This patch fix two issues in rrpc_lun_gc

1. prio_list is protected by rrpc_lun's lock not nvm_lun's, so
acquire rlun's lock instead of lun's before operate on the list.

2. we delete block from prio_list before allocating gcb, but gcb
allocation may fail, we end without putting it back to the list,
this makes the block won't get reclaimed in the future. To solve
this issue, delete block after gcb allocation.

Signed-off-by: Wenwei Tao <ww.tao0320@gmail.com>
Signed-off-by: Matias Bjørling <m@bjorling.me>
Signed-off-by: Jens Axboe <axboe@fb.com>
Signed-off-by: Sasha Levin <alexander.levin@verizon.com>
Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
drivers/lightnvm/rrpc.c

index 748cab499580914f57f56f405abb13d3415438c1..a9859489acf6c46ce9f88f26bb6db616ad5bf2bf 100644 (file)
@@ -429,7 +429,7 @@ static void rrpc_lun_gc(struct work_struct *work)
        if (nr_blocks_need < rrpc->nr_luns)
                nr_blocks_need = rrpc->nr_luns;
 
-       spin_lock(&lun->lock);
+       spin_lock(&rlun->lock);
        while (nr_blocks_need > lun->nr_free_blocks &&
                                        !list_empty(&rlun->prio_list)) {
                struct rrpc_block *rblock = block_prio_find_max(rlun);
@@ -438,16 +438,16 @@ static void rrpc_lun_gc(struct work_struct *work)
                if (!rblock->nr_invalid_pages)
                        break;
 
+               gcb = mempool_alloc(rrpc->gcb_pool, GFP_ATOMIC);
+               if (!gcb)
+                       break;
+
                list_del_init(&rblock->prio);
 
                BUG_ON(!block_is_full(rrpc, rblock));
 
                pr_debug("rrpc: selected block '%lu' for GC\n", block->id);
 
-               gcb = mempool_alloc(rrpc->gcb_pool, GFP_ATOMIC);
-               if (!gcb)
-                       break;
-
                gcb->rrpc = rrpc;
                gcb->rblk = rblock;
                INIT_WORK(&gcb->ws_gc, rrpc_block_gc);
@@ -456,7 +456,7 @@ static void rrpc_lun_gc(struct work_struct *work)
 
                nr_blocks_need--;
        }
-       spin_unlock(&lun->lock);
+       spin_unlock(&rlun->lock);
 
        /* TODO: Hint that request queue can be started again */
 }