soc: rockchip: add cpuinfo support
[firefly-linux-kernel-4.4.55.git] / drivers / md / dm.c
index 5df40480228b7a26e3c73ac78e963ce47ed25448..397f0454100b02247d6dbc44b340d876f3f5a2f5 100644 (file)
@@ -1109,12 +1109,8 @@ static void rq_completed(struct mapped_device *md, int rw, bool run_queue)
         * back into ->request_fn() could deadlock attempting to grab the
         * queue lock again.
         */
-       if (run_queue) {
-               if (md->queue->mq_ops)
-                       blk_mq_run_hw_queues(md->queue, true);
-               else
-                       blk_run_queue_async(md->queue);
-       }
+       if (!md->queue->mq_ops && run_queue)
+               blk_run_queue_async(md->queue);
 
        /*
         * dm_put() must be at the end of this function. See the comment above
@@ -1191,6 +1187,8 @@ static void dm_unprep_request(struct request *rq)
 
        if (clone)
                free_rq_clone(clone);
+       else if (!tio->md->queue->mq_ops)
+               free_rq_tio(tio);
 }
 
 /*
@@ -1212,9 +1210,9 @@ static void dm_requeue_original_request(struct mapped_device *md,
 {
        int rw = rq_data_dir(rq);
 
+       rq_end_stats(md, rq);
        dm_unprep_request(rq);
 
-       rq_end_stats(md, rq);
        if (!rq->q->mq_ops)
                old_requeue_request(rq);
        else {
@@ -1334,7 +1332,10 @@ static void dm_complete_request(struct request *rq, int error)
        struct dm_rq_target_io *tio = tio_from_request(rq);
 
        tio->error = error;
-       blk_complete_request(rq);
+       if (!rq->q->mq_ops)
+               blk_complete_request(rq);
+       else
+               blk_mq_complete_request(rq, error);
 }
 
 /*
@@ -1466,11 +1467,62 @@ void dm_accept_partial_bio(struct bio *bio, unsigned n_sectors)
 }
 EXPORT_SYMBOL_GPL(dm_accept_partial_bio);
 
+/*
+ * Flush current->bio_list when the target map method blocks.
+ * This fixes deadlocks in snapshot and possibly in other targets.
+ */
+struct dm_offload {
+       struct blk_plug plug;
+       struct blk_plug_cb cb;
+};
+
+static void flush_current_bio_list(struct blk_plug_cb *cb, bool from_schedule)
+{
+       struct dm_offload *o = container_of(cb, struct dm_offload, cb);
+       struct bio_list list;
+       struct bio *bio;
+
+       INIT_LIST_HEAD(&o->cb.list);
+
+       if (unlikely(!current->bio_list))
+               return;
+
+       list = *current->bio_list;
+       bio_list_init(current->bio_list);
+
+       while ((bio = bio_list_pop(&list))) {
+               struct bio_set *bs = bio->bi_pool;
+               if (unlikely(!bs) || bs == fs_bio_set) {
+                       bio_list_add(current->bio_list, bio);
+                       continue;
+               }
+
+               spin_lock(&bs->rescue_lock);
+               bio_list_add(&bs->rescue_list, bio);
+               queue_work(bs->rescue_workqueue, &bs->rescue_work);
+               spin_unlock(&bs->rescue_lock);
+       }
+}
+
+static void dm_offload_start(struct dm_offload *o)
+{
+       blk_start_plug(&o->plug);
+       o->cb.callback = flush_current_bio_list;
+       list_add(&o->cb.list, &current->plug->cb_list);
+}
+
+static void dm_offload_end(struct dm_offload *o)
+{
+       list_del(&o->cb.list);
+       blk_finish_plug(&o->plug);
+}
+
 static void __map_bio(struct dm_target_io *tio)
 {
        int r;
        sector_t sector;
        struct mapped_device *md;
+       struct dm_offload o;
        struct bio *clone = &tio->clone;
        struct dm_target *ti = tio->ti;
 
@@ -1483,7 +1535,11 @@ static void __map_bio(struct dm_target_io *tio)
         */
        atomic_inc(&tio->io->io_count);
        sector = clone->bi_iter.bi_sector;
+
+       dm_offload_start(&o);
        r = ti->type->map(ti, clone);
+       dm_offload_end(&o);
+
        if (r == DM_MAPIO_REMAPPED) {
                /* the bio has been remapped so dispatch it */
 
@@ -2259,8 +2315,6 @@ static void cleanup_mapped_device(struct mapped_device *md)
        if (md->bs)
                bioset_free(md->bs);
 
-       cleanup_srcu_struct(&md->io_barrier);
-
        if (md->disk) {
                spin_lock(&_minor_lock);
                md->disk->private_data = NULL;
@@ -2272,6 +2326,8 @@ static void cleanup_mapped_device(struct mapped_device *md)
        if (md->queue)
                blk_cleanup_queue(md->queue);
 
+       cleanup_srcu_struct(&md->io_barrier);
+
        if (md->bdev) {
                bdput(md->bdev);
                md->bdev = NULL;
@@ -2868,6 +2924,7 @@ EXPORT_SYMBOL_GPL(dm_device_name);
 
 static void __dm_destroy(struct mapped_device *md, bool wait)
 {
+       struct request_queue *q = dm_get_md_queue(md);
        struct dm_table *map;
        int srcu_idx;
 
@@ -2878,6 +2935,10 @@ static void __dm_destroy(struct mapped_device *md, bool wait)
        set_bit(DMF_FREEING, &md->flags);
        spin_unlock(&_minor_lock);
 
+       spin_lock_irq(q->queue_lock);
+       queue_flag_set(QUEUE_FLAG_DYING, q);
+       spin_unlock_irq(q->queue_lock);
+
        if (dm_request_based(md) && md->kworker_task)
                flush_kthread_worker(&md->kworker);
 
@@ -3077,7 +3138,8 @@ static void unlock_fs(struct mapped_device *md)
  * Caller must hold md->suspend_lock
  */
 static int __dm_suspend(struct mapped_device *md, struct dm_table *map,
-                       unsigned suspend_flags, int interruptible)
+                       unsigned suspend_flags, int interruptible,
+                       int dmf_suspended_flag)
 {
        bool do_lockfs = suspend_flags & DM_SUSPEND_LOCKFS_FLAG;
        bool noflush = suspend_flags & DM_SUSPEND_NOFLUSH_FLAG;
@@ -3144,6 +3206,8 @@ static int __dm_suspend(struct mapped_device *md, struct dm_table *map,
         * to finish.
         */
        r = dm_wait_for_completion(md, interruptible);
+       if (!r)
+               set_bit(dmf_suspended_flag, &md->flags);
 
        if (noflush)
                clear_bit(DMF_NOFLUSH_SUSPENDING, &md->flags);
@@ -3205,12 +3269,10 @@ retry:
 
        map = rcu_dereference_protected(md->map, lockdep_is_held(&md->suspend_lock));
 
-       r = __dm_suspend(md, map, suspend_flags, TASK_INTERRUPTIBLE);
+       r = __dm_suspend(md, map, suspend_flags, TASK_INTERRUPTIBLE, DMF_SUSPENDED);
        if (r)
                goto out_unlock;
 
-       set_bit(DMF_SUSPENDED, &md->flags);
-
        dm_table_postsuspend_targets(map);
 
 out_unlock:
@@ -3243,10 +3305,11 @@ static int __dm_resume(struct mapped_device *md, struct dm_table *map)
 
 int dm_resume(struct mapped_device *md)
 {
-       int r = -EINVAL;
+       int r;
        struct dm_table *map = NULL;
 
 retry:
+       r = -EINVAL;
        mutex_lock_nested(&md->suspend_lock, SINGLE_DEPTH_NESTING);
 
        if (!dm_suspended_md(md))
@@ -3270,8 +3333,6 @@ retry:
                goto out;
 
        clear_bit(DMF_SUSPENDED, &md->flags);
-
-       r = 0;
 out:
        mutex_unlock(&md->suspend_lock);
 
@@ -3304,9 +3365,8 @@ static void __dm_internal_suspend(struct mapped_device *md, unsigned suspend_fla
         * would require changing .presuspend to return an error -- avoid this
         * until there is a need for more elaborate variants of internal suspend.
         */
-       (void) __dm_suspend(md, map, suspend_flags, TASK_UNINTERRUPTIBLE);
-
-       set_bit(DMF_SUSPENDED_INTERNALLY, &md->flags);
+       (void) __dm_suspend(md, map, suspend_flags, TASK_UNINTERRUPTIBLE,
+                           DMF_SUSPENDED_INTERNALLY);
 
        dm_table_postsuspend_targets(map);
 }