atomic_t nr_migrations;
wait_queue_head_t migration_wait;
+ wait_queue_head_t quiescing_wait;
+ atomic_t quiescing_ack;
+
/*
* cache_size entries, dirty if set
*/
static void cleanup_migration(struct dm_cache_migration *mg)
{
- dec_nr_migrations(mg->cache);
+ struct cache *cache = mg->cache;
free_migration(mg);
+ dec_nr_migrations(cache);
}
static void migration_failure(struct dm_cache_migration *mg)
int r;
struct dm_io_region o_region, c_region;
struct cache *cache = mg->cache;
+ sector_t cblock = from_cblock(mg->cblock);
o_region.bdev = cache->origin_dev->bdev;
o_region.count = cache->sectors_per_block;
c_region.bdev = cache->cache_dev->bdev;
- c_region.sector = from_cblock(mg->cblock) * cache->sectors_per_block;
+ c_region.sector = cblock * cache->sectors_per_block;
c_region.count = cache->sectors_per_block;
if (mg->writeback || mg->demote) {
/*----------------------------------------------------------------
* Main worker loop
*--------------------------------------------------------------*/
-static void start_quiescing(struct cache *cache)
+static bool is_quiescing(struct cache *cache)
{
+ int r;
unsigned long flags;
spin_lock_irqsave(&cache->lock, flags);
- cache->quiescing = 1;
+ r = cache->quiescing;
spin_unlock_irqrestore(&cache->lock, flags);
+
+ return r;
}
-static void stop_quiescing(struct cache *cache)
+static void ack_quiescing(struct cache *cache)
+{
+ if (is_quiescing(cache)) {
+ atomic_inc(&cache->quiescing_ack);
+ wake_up(&cache->quiescing_wait);
+ }
+}
+
+static void wait_for_quiescing_ack(struct cache *cache)
+{
+ wait_event(cache->quiescing_wait, atomic_read(&cache->quiescing_ack));
+}
+
+static void start_quiescing(struct cache *cache)
{
unsigned long flags;
spin_lock_irqsave(&cache->lock, flags);
- cache->quiescing = 0;
+ cache->quiescing = true;
spin_unlock_irqrestore(&cache->lock, flags);
+
+ wait_for_quiescing_ack(cache);
}
-static bool is_quiescing(struct cache *cache)
+static void stop_quiescing(struct cache *cache)
{
- int r;
unsigned long flags;
spin_lock_irqsave(&cache->lock, flags);
- r = cache->quiescing;
+ cache->quiescing = false;
spin_unlock_irqrestore(&cache->lock, flags);
- return r;
+ atomic_set(&cache->quiescing_ack, 0);
}
static void wait_for_migrations(struct cache *cache)
struct cache *cache = container_of(ws, struct cache, worker);
do {
- if (!is_quiescing(cache))
+ if (!is_quiescing(cache)) {
+ writeback_some_dirty_blocks(cache);
+ process_deferred_writethrough_bios(cache);
process_deferred_bios(cache);
+ }
process_migrations(cache, &cache->quiesced_migrations, issue_copy);
process_migrations(cache, &cache->completed_migrations, complete_migration);
- writeback_some_dirty_blocks(cache);
-
- process_deferred_writethrough_bios(cache);
-
if (commit_if_needed(cache)) {
process_deferred_flush_bios(cache, false);
process_migrations(cache, &cache->need_commit_migrations,
migration_success_post_commit);
}
+
+ ack_quiescing(cache);
+
} while (more_work(cache));
}
atomic_set(&cache->nr_migrations, 0);
init_waitqueue_head(&cache->migration_wait);
+ init_waitqueue_head(&cache->quiescing_wait);
+ atomic_set(&cache->quiescing_ack, 0);
+
r = -ENOMEM;
cache->nr_dirty = 0;
cache->dirty_bitset = alloc_bitset(from_cblock(cache->cache_size));
bool discarded_block;
struct dm_bio_prison_cell *cell;
struct policy_result lookup_result;
- struct per_bio_data *pb;
+ struct per_bio_data *pb = init_per_bio_data(bio, pb_data_size);
- if (from_oblock(block) > from_oblock(cache->origin_blocks)) {
+ if (unlikely(from_oblock(block) >= from_oblock(cache->origin_blocks))) {
/*
* This can only occur if the io goes to a partial block at
* the end of the origin device. We don't cache these.
* Just remap to the origin and carry on.
*/
- remap_to_origin_clear_discard(cache, bio, block);
+ remap_to_origin(cache, bio);
return DM_MAPIO_REMAPPED;
}
- pb = init_per_bio_data(bio, pb_data_size);
-
if (bio->bi_rw & (REQ_FLUSH | REQ_FUA | REQ_DISCARD)) {
defer_bio(cache, bio);
return DM_MAPIO_SUBMITTED;