Merge branch 'v3.10/topic/pinctrl' of git://git.linaro.org/kernel/linux-linaro-stable...
[firefly-linux-kernel-4.4.55.git] / drivers / md / dm-cache-target.c
index df44b60e66f289880c177e98d78f1aa04ce08e7a..1a75869d3a825dd646bec2940ffd4eea3d0435ad 100644 (file)
@@ -151,6 +151,9 @@ struct cache {
        atomic_t nr_migrations;
        wait_queue_head_t migration_wait;
 
+       wait_queue_head_t quiescing_wait;
+       atomic_t quiescing_ack;
+
        /*
         * cache_size entries, dirty if set
         */
@@ -742,8 +745,9 @@ static void cell_defer(struct cache *cache, struct dm_bio_prison_cell *cell,
 
 static void cleanup_migration(struct dm_cache_migration *mg)
 {
-       dec_nr_migrations(mg->cache);
+       struct cache *cache = mg->cache;
        free_migration(mg);
+       dec_nr_migrations(cache);
 }
 
 static void migration_failure(struct dm_cache_migration *mg)
@@ -857,12 +861,13 @@ static void issue_copy_real(struct dm_cache_migration *mg)
        int r;
        struct dm_io_region o_region, c_region;
        struct cache *cache = mg->cache;
+       sector_t cblock = from_cblock(mg->cblock);
 
        o_region.bdev = cache->origin_dev->bdev;
        o_region.count = cache->sectors_per_block;
 
        c_region.bdev = cache->cache_dev->bdev;
-       c_region.sector = from_cblock(mg->cblock) * cache->sectors_per_block;
+       c_region.sector = cblock * cache->sectors_per_block;
        c_region.count = cache->sectors_per_block;
 
        if (mg->writeback || mg->demote) {
@@ -1340,34 +1345,51 @@ static void writeback_some_dirty_blocks(struct cache *cache)
 /*----------------------------------------------------------------
  * Main worker loop
  *--------------------------------------------------------------*/
-static void start_quiescing(struct cache *cache)
+static bool is_quiescing(struct cache *cache)
 {
+       int r;
        unsigned long flags;
 
        spin_lock_irqsave(&cache->lock, flags);
-       cache->quiescing = 1;
+       r = cache->quiescing;
        spin_unlock_irqrestore(&cache->lock, flags);
+
+       return r;
 }
 
-static void stop_quiescing(struct cache *cache)
+static void ack_quiescing(struct cache *cache)
+{
+       if (is_quiescing(cache)) {
+               atomic_inc(&cache->quiescing_ack);
+               wake_up(&cache->quiescing_wait);
+       }
+}
+
+static void wait_for_quiescing_ack(struct cache *cache)
+{
+       wait_event(cache->quiescing_wait, atomic_read(&cache->quiescing_ack));
+}
+
+static void start_quiescing(struct cache *cache)
 {
        unsigned long flags;
 
        spin_lock_irqsave(&cache->lock, flags);
-       cache->quiescing = 0;
+       cache->quiescing = true;
        spin_unlock_irqrestore(&cache->lock, flags);
+
+       wait_for_quiescing_ack(cache);
 }
 
-static bool is_quiescing(struct cache *cache)
+static void stop_quiescing(struct cache *cache)
 {
-       int r;
        unsigned long flags;
 
        spin_lock_irqsave(&cache->lock, flags);
-       r = cache->quiescing;
+       cache->quiescing = false;
        spin_unlock_irqrestore(&cache->lock, flags);
 
-       return r;
+       atomic_set(&cache->quiescing_ack, 0);
 }
 
 static void wait_for_migrations(struct cache *cache)
@@ -1414,16 +1436,15 @@ static void do_worker(struct work_struct *ws)
        struct cache *cache = container_of(ws, struct cache, worker);
 
        do {
-               if (!is_quiescing(cache))
+               if (!is_quiescing(cache)) {
+                       writeback_some_dirty_blocks(cache);
+                       process_deferred_writethrough_bios(cache);
                        process_deferred_bios(cache);
+               }
 
                process_migrations(cache, &cache->quiesced_migrations, issue_copy);
                process_migrations(cache, &cache->completed_migrations, complete_migration);
 
-               writeback_some_dirty_blocks(cache);
-
-               process_deferred_writethrough_bios(cache);
-
                if (commit_if_needed(cache)) {
                        process_deferred_flush_bios(cache, false);
 
@@ -1436,6 +1457,9 @@ static void do_worker(struct work_struct *ws)
                        process_migrations(cache, &cache->need_commit_migrations,
                                           migration_success_post_commit);
                }
+
+               ack_quiescing(cache);
+
        } while (more_work(cache));
 }
 
@@ -1998,6 +2022,9 @@ static int cache_create(struct cache_args *ca, struct cache **result)
        atomic_set(&cache->nr_migrations, 0);
        init_waitqueue_head(&cache->migration_wait);
 
+       init_waitqueue_head(&cache->quiescing_wait);
+       atomic_set(&cache->quiescing_ack, 0);
+
        r = -ENOMEM;
        cache->nr_dirty = 0;
        cache->dirty_bitset = alloc_bitset(from_cblock(cache->cache_size));
@@ -2148,20 +2175,18 @@ static int cache_map(struct dm_target *ti, struct bio *bio)
        bool discarded_block;
        struct dm_bio_prison_cell *cell;
        struct policy_result lookup_result;
-       struct per_bio_data *pb;
+       struct per_bio_data *pb = init_per_bio_data(bio, pb_data_size);
 
-       if (from_oblock(block) > from_oblock(cache->origin_blocks)) {
+       if (unlikely(from_oblock(block) >= from_oblock(cache->origin_blocks))) {
                /*
                 * This can only occur if the io goes to a partial block at
                 * the end of the origin device.  We don't cache these.
                 * Just remap to the origin and carry on.
                 */
-               remap_to_origin_clear_discard(cache, bio, block);
+               remap_to_origin(cache, bio);
                return DM_MAPIO_REMAPPED;
        }
 
-       pb = init_per_bio_data(bio, pb_data_size);
-
        if (bio->bi_rw & (REQ_FLUSH | REQ_FUA | REQ_DISCARD)) {
                defer_bio(cache, bio);
                return DM_MAPIO_SUBMITTED;