Revert "dm: do not allocate any mempools for blk-mq request-based DM"
authorMike Snitzer <snitzer@redhat.com>
Fri, 26 Jun 2015 13:42:57 +0000 (09:42 -0400)
committerMike Snitzer <snitzer@redhat.com>
Fri, 26 Jun 2015 14:11:07 +0000 (10:11 -0400)
This reverts commit cbc4e3c1350beb47beab8f34ad9be3d34a20c705.

Reported-by: Junichi Nomura <j-nomura@ce.jp.nec.com>
Signed-off-by: Mike Snitzer <snitzer@redhat.com>
drivers/md/dm-table.c
drivers/md/dm.c

index 85e1d39e9a38a568b7c05c1e4d49eda2e9b118fe..a5f94125ad01f6b3a3a43fd7ae78e9bad0747995 100644 (file)
@@ -964,8 +964,8 @@ static int dm_table_alloc_md_mempools(struct dm_table *t, struct mapped_device *
                return -EINVAL;
        }
 
-       if (IS_ERR(t->mempools))
-               return PTR_ERR(t->mempools);
+       if (!t->mempools)
+               return -ENOMEM;
 
        return 0;
 }
index 90dc49e3c78f67d3096c4c9745b817dcdcd0a60f..492181e16c6945cbfb463c99bd3af55c27f122b6 100644 (file)
@@ -2349,52 +2349,39 @@ static void free_dev(struct mapped_device *md)
        kfree(md);
 }
 
-static unsigned filter_md_type(unsigned type, struct mapped_device *md)
-{
-       if (type == DM_TYPE_BIO_BASED)
-               return type;
-
-       return !md->use_blk_mq ? DM_TYPE_REQUEST_BASED : DM_TYPE_MQ_REQUEST_BASED;
-}
-
 static void __bind_mempools(struct mapped_device *md, struct dm_table *t)
 {
        struct dm_md_mempools *p = dm_table_get_md_mempools(t);
 
-       switch (filter_md_type(dm_table_get_type(t), md)) {
-       case DM_TYPE_BIO_BASED:
-               if (md->bs && md->io_pool) {
+       if (md->bs) {
+               /* The md already has necessary mempools. */
+               if (dm_table_get_type(t) == DM_TYPE_BIO_BASED) {
                        /*
-                        * This bio-based md already has necessary mempools.
                         * Reload bioset because front_pad may have changed
                         * because a different table was loaded.
                         */
                        bioset_free(md->bs);
                        md->bs = p->bs;
                        p->bs = NULL;
-                       goto out;
                }
-               break;
-       case DM_TYPE_REQUEST_BASED:
-               if (md->rq_pool && md->io_pool)
-                       /*
-                        * This request-based md already has necessary mempools.
-                        */
-                       goto out;
-               break;
-       case DM_TYPE_MQ_REQUEST_BASED:
-               BUG_ON(p); /* No mempools needed */
-               return;
+               /*
+                * There's no need to reload with request-based dm
+                * because the size of front_pad doesn't change.
+                * Note for future: If you are to reload bioset,
+                * prep-ed requests in the queue may refer
+                * to bio from the old bioset, so you must walk
+                * through the queue to unprep.
+                */
+               goto out;
        }
 
-       BUG_ON(!p || md->io_pool || md->rq_pool || md->bs);
-
        md->io_pool = p->io_pool;
        p->io_pool = NULL;
        md->rq_pool = p->rq_pool;
        p->rq_pool = NULL;
        md->bs = p->bs;
        p->bs = NULL;
+
 out:
        /* mempool bind completed, no longer need any mempools in the table */
        dm_table_free_md_mempools(t);
@@ -2774,6 +2761,14 @@ out_tag_set:
        return err;
 }
 
+static unsigned filter_md_type(unsigned type, struct mapped_device *md)
+{
+       if (type == DM_TYPE_BIO_BASED)
+               return type;
+
+       return !md->use_blk_mq ? DM_TYPE_REQUEST_BASED : DM_TYPE_MQ_REQUEST_BASED;
+}
+
 /*
  * Setup the DM device's queue based on md's type
  */
@@ -3495,7 +3490,7 @@ struct dm_md_mempools *dm_alloc_bio_mempools(unsigned integrity,
 
        pools = kzalloc(sizeof(*pools), GFP_KERNEL);
        if (!pools)
-               return ERR_PTR(-ENOMEM);
+               return NULL;
 
        front_pad = roundup(per_bio_data_size, __alignof__(struct dm_target_io)) +
                offsetof(struct dm_target_io, clone);
@@ -3514,26 +3509,24 @@ struct dm_md_mempools *dm_alloc_bio_mempools(unsigned integrity,
        return pools;
 out:
        dm_free_md_mempools(pools);
-       return ERR_PTR(-ENOMEM);
+       return NULL;
 }
 
 struct dm_md_mempools *dm_alloc_rq_mempools(struct mapped_device *md,
                                            unsigned type)
 {
-       unsigned int pool_size;
+       unsigned int pool_size = dm_get_reserved_rq_based_ios();
        struct dm_md_mempools *pools;
 
-       if (filter_md_type(type, md) == DM_TYPE_MQ_REQUEST_BASED)
-               return NULL; /* No mempools needed */
-
-       pool_size = dm_get_reserved_rq_based_ios();
        pools = kzalloc(sizeof(*pools), GFP_KERNEL);
        if (!pools)
-               return ERR_PTR(-ENOMEM);
+               return NULL;
 
-       pools->rq_pool = mempool_create_slab_pool(pool_size, _rq_cache);
-       if (!pools->rq_pool)
-               goto out;
+       if (filter_md_type(type, md) == DM_TYPE_REQUEST_BASED) {
+               pools->rq_pool = mempool_create_slab_pool(pool_size, _rq_cache);
+               if (!pools->rq_pool)
+                       goto out;
+       }
 
        pools->io_pool = mempool_create_slab_pool(pool_size, _rq_tio_cache);
        if (!pools->io_pool)
@@ -3542,7 +3535,7 @@ struct dm_md_mempools *dm_alloc_rq_mempools(struct mapped_device *md,
        return pools;
 out:
        dm_free_md_mempools(pools);
-       return ERR_PTR(-ENOMEM);
+       return NULL;
 }
 
 void dm_free_md_mempools(struct dm_md_mempools *pools)