dm snapshot: trigger exceptions in remaining snapshots during merge
[firefly-linux-kernel-4.4.55.git] / drivers / md / dm-snap.c
index 7ddee7c0c518fd79336708b9928c773d187c3b76..1498704467a7e12dcf33da9dd10ecd020e35efe5 100644 (file)
@@ -106,8 +106,30 @@ struct dm_snapshot {
        mempool_t *tracked_chunk_pool;
        spinlock_t tracked_chunk_lock;
        struct hlist_head tracked_chunk_hash[DM_TRACKED_CHUNK_HASH_SIZE];
+
+       /* Wait for events based on state_bits */
+       unsigned long state_bits;
+
+       /* Range of chunks currently being merged. */
+       chunk_t first_merging_chunk;
+       int num_merging_chunks;
+
+       /*
+        * Incoming bios that overlap with chunks being merged must wait
+        * for them to be committed.
+        */
+       struct bio_list bios_queued_during_merge;
 };
 
+/*
+ * state_bits:
+ *   RUNNING_MERGE  - Merge operation is in progress.
+ *   SHUTDOWN_MERGE - Set to signal that merge needs to be stopped;
+ *                    cleared afterwards.
+ */
+#define RUNNING_MERGE          0
+#define SHUTDOWN_MERGE         1
+
 struct dm_dev *dm_snap_cow(struct dm_snapshot *s)
 {
        return s->cow;
@@ -248,6 +270,10 @@ struct origin {
 static struct list_head *_origins;
 static struct rw_semaphore _origins_lock;
 
+static DECLARE_WAIT_QUEUE_HEAD(_pending_exceptions_done);
+static DEFINE_SPINLOCK(_pending_exceptions_done_spinlock);
+static uint64_t _pending_exceptions_done_count;
+
 static int init_origin_hash(void)
 {
        int i;
@@ -386,6 +412,13 @@ static int __validate_exception_handover(struct dm_snapshot *snap)
                return -EINVAL;
        }
 
+       if (!snap_src->store->type->prepare_merge ||
+           !snap_src->store->type->commit_merge) {
+               snap->ti->error = "Snapshot exception store does not "
+                                 "support snapshot-merge.";
+               return -EINVAL;
+       }
+
        return 1;
 }
 
@@ -721,6 +754,260 @@ static int init_hash_tables(struct dm_snapshot *s)
        return 0;
 }
 
+static void merge_shutdown(struct dm_snapshot *s)
+{
+       clear_bit_unlock(RUNNING_MERGE, &s->state_bits);
+       smp_mb__after_clear_bit();
+       wake_up_bit(&s->state_bits, RUNNING_MERGE);
+}
+
+static struct bio *__release_queued_bios_after_merge(struct dm_snapshot *s)
+{
+       s->first_merging_chunk = 0;
+       s->num_merging_chunks = 0;
+
+       return bio_list_get(&s->bios_queued_during_merge);
+}
+
+/*
+ * Remove one chunk from the index of completed exceptions.
+ */
+static int __remove_single_exception_chunk(struct dm_snapshot *s,
+                                          chunk_t old_chunk)
+{
+       struct dm_exception *e;
+
+       e = dm_lookup_exception(&s->complete, old_chunk);
+       if (!e) {
+               DMERR("Corruption detected: exception for block %llu is "
+                     "on disk but not in memory",
+                     (unsigned long long)old_chunk);
+               return -EINVAL;
+       }
+
+       /*
+        * If this is the only chunk using this exception, remove exception.
+        */
+       if (!dm_consecutive_chunk_count(e)) {
+               dm_remove_exception(e);
+               free_completed_exception(e);
+               return 0;
+       }
+
+       /*
+        * The chunk may be either at the beginning or the end of a
+        * group of consecutive chunks - never in the middle.  We are
+        * removing chunks in the opposite order to that in which they
+        * were added, so this should always be true.
+        * Decrement the consecutive chunk counter and adjust the
+        * starting point if necessary.
+        */
+       if (old_chunk == e->old_chunk) {
+               e->old_chunk++;
+               e->new_chunk++;
+       } else if (old_chunk != e->old_chunk +
+                  dm_consecutive_chunk_count(e)) {
+               DMERR("Attempt to merge block %llu from the "
+                     "middle of a chunk range [%llu - %llu]",
+                     (unsigned long long)old_chunk,
+                     (unsigned long long)e->old_chunk,
+                     (unsigned long long)
+                     e->old_chunk + dm_consecutive_chunk_count(e));
+               return -EINVAL;
+       }
+
+       dm_consecutive_chunk_count_dec(e);
+
+       return 0;
+}
+
+static void flush_bios(struct bio *bio);
+
+static int remove_single_exception_chunk(struct dm_snapshot *s)
+{
+       struct bio *b = NULL;
+       int r;
+       chunk_t old_chunk = s->first_merging_chunk + s->num_merging_chunks - 1;
+
+       down_write(&s->lock);
+
+       /*
+        * Process chunks (and associated exceptions) in reverse order
+        * so that dm_consecutive_chunk_count_dec() accounting works.
+        */
+       do {
+               r = __remove_single_exception_chunk(s, old_chunk);
+               if (r)
+                       goto out;
+       } while (old_chunk-- > s->first_merging_chunk);
+
+       b = __release_queued_bios_after_merge(s);
+
+out:
+       up_write(&s->lock);
+       if (b)
+               flush_bios(b);
+
+       return r;
+}
+
+static int origin_write_extent(struct dm_snapshot *merging_snap,
+                              sector_t sector, unsigned chunk_size);
+
+static void merge_callback(int read_err, unsigned long write_err,
+                          void *context);
+
+static uint64_t read_pending_exceptions_done_count(void)
+{
+       uint64_t pending_exceptions_done;
+
+       spin_lock(&_pending_exceptions_done_spinlock);
+       pending_exceptions_done = _pending_exceptions_done_count;
+       spin_unlock(&_pending_exceptions_done_spinlock);
+
+       return pending_exceptions_done;
+}
+
+static void increment_pending_exceptions_done_count(void)
+{
+       spin_lock(&_pending_exceptions_done_spinlock);
+       _pending_exceptions_done_count++;
+       spin_unlock(&_pending_exceptions_done_spinlock);
+
+       wake_up_all(&_pending_exceptions_done);
+}
+
+static void snapshot_merge_next_chunks(struct dm_snapshot *s)
+{
+       int r;
+       chunk_t old_chunk, new_chunk;
+       struct dm_io_region src, dest;
+       uint64_t previous_count;
+
+       BUG_ON(!test_bit(RUNNING_MERGE, &s->state_bits));
+       if (unlikely(test_bit(SHUTDOWN_MERGE, &s->state_bits)))
+               goto shut;
+
+       /*
+        * valid flag never changes during merge, so no lock required.
+        */
+       if (!s->valid) {
+               DMERR("Snapshot is invalid: can't merge");
+               goto shut;
+       }
+
+       r = s->store->type->prepare_merge(s->store, &old_chunk, &new_chunk);
+       if (r <= 0) {
+               if (r < 0)
+                       DMERR("Read error in exception store: "
+                             "shutting down merge");
+               goto shut;
+       }
+
+       /* TODO: use larger I/O size once we verify that kcopyd handles it */
+
+       dest.bdev = s->origin->bdev;
+       dest.sector = chunk_to_sector(s->store, old_chunk);
+       dest.count = min((sector_t)s->store->chunk_size,
+                        get_dev_size(dest.bdev) - dest.sector);
+
+       src.bdev = s->cow->bdev;
+       src.sector = chunk_to_sector(s->store, new_chunk);
+       src.count = dest.count;
+
+       /*
+        * Reallocate any exceptions needed in other snapshots then
+        * wait for the pending exceptions to complete.
+        * Each time any pending exception (globally on the system)
+        * completes we are woken and repeat the process to find out
+        * if we can proceed.  While this may not seem a particularly
+        * efficient algorithm, it is not expected to have any
+        * significant impact on performance.
+        */
+       previous_count = read_pending_exceptions_done_count();
+       while (origin_write_extent(s, dest.sector, s->store->chunk_size)) {
+               wait_event(_pending_exceptions_done,
+                          (read_pending_exceptions_done_count() !=
+                           previous_count));
+               /* Retry after the wait, until all exceptions are done. */
+               previous_count = read_pending_exceptions_done_count();
+       }
+
+       down_write(&s->lock);
+       s->first_merging_chunk = old_chunk;
+       s->num_merging_chunks = 1;
+       up_write(&s->lock);
+
+       __check_for_conflicting_io(s, old_chunk);
+
+       dm_kcopyd_copy(s->kcopyd_client, &src, 1, &dest, 0, merge_callback, s);
+       return;
+
+shut:
+       merge_shutdown(s);
+}
+
+static void error_bios(struct bio *bio);
+
+static void merge_callback(int read_err, unsigned long write_err, void *context)
+{
+       struct dm_snapshot *s = context;
+       struct bio *b = NULL;
+
+       if (read_err || write_err) {
+               if (read_err)
+                       DMERR("Read error: shutting down merge.");
+               else
+                       DMERR("Write error: shutting down merge.");
+               goto shut;
+       }
+
+       if (s->store->type->commit_merge(s->store,
+                                        s->num_merging_chunks) < 0) {
+               DMERR("Write error in exception store: shutting down merge");
+               goto shut;
+       }
+
+       if (remove_single_exception_chunk(s) < 0)
+               goto shut;
+
+       snapshot_merge_next_chunks(s);
+
+       return;
+
+shut:
+       down_write(&s->lock);
+       b = __release_queued_bios_after_merge(s);
+       up_write(&s->lock);
+       error_bios(b);
+
+       merge_shutdown(s);
+}
+
+static void start_merge(struct dm_snapshot *s)
+{
+       if (!test_and_set_bit(RUNNING_MERGE, &s->state_bits))
+               snapshot_merge_next_chunks(s);
+}
+
+static int wait_schedule(void *ptr)
+{
+       schedule();
+
+       return 0;
+}
+
+/*
+ * Stop the merging process and wait until it finishes.
+ */
+static void stop_merge(struct dm_snapshot *s)
+{
+       set_bit(SHUTDOWN_MERGE, &s->state_bits);
+       wait_on_bit(&s->state_bits, RUNNING_MERGE, wait_schedule,
+                   TASK_UNINTERRUPTIBLE);
+       clear_bit(SHUTDOWN_MERGE, &s->state_bits);
+}
+
 /*
  * Construct a snapshot mapping: <origin_dev> <COW-dev> <p/n> <chunk-size>
  */
@@ -791,6 +1078,10 @@ static int snapshot_ctr(struct dm_target *ti, unsigned int argc, char **argv)
        init_rwsem(&s->lock);
        INIT_LIST_HEAD(&s->list);
        spin_lock_init(&s->pe_lock);
+       s->state_bits = 0;
+       s->first_merging_chunk = 0;
+       s->num_merging_chunks = 0;
+       bio_list_init(&s->bios_queued_during_merge);
 
        /* Allocate hash table for COW data */
        if (init_hash_tables(s)) {
@@ -963,6 +1254,9 @@ static void snapshot_dtr(struct dm_target *ti)
        }
        up_read(&_origins_lock);
 
+       if (dm_target_is_snapshot_merge(ti))
+               stop_merge(s);
+
        /* Prevent further origin writes from using this snapshot. */
        /* After this returns there can be no new kcopyd jobs. */
        unregister_snapshot(s);
@@ -1124,6 +1418,8 @@ static void pending_complete(struct dm_snap_pending_exception *pe, int success)
        origin_bios = bio_list_get(&pe->origin_bios);
        free_pending_exception(pe);
 
+       increment_pending_exceptions_done_count();
+
        up_write(&s->lock);
 
        /* Submit any pending write bios */
@@ -1344,6 +1640,8 @@ static int snapshot_map(struct dm_target *ti, struct bio *bio,
  * For each chunk, if there is an existing exception, it is used to
  * redirect I/O to the cow device.  Otherwise I/O is sent to the origin,
  * which in turn might generate exceptions in other snapshots.
+ * If merging is currently taking place on the chunk in question, the
+ * I/O is deferred by adding it to s->bios_queued_during_merge.
  */
 static int snapshot_merge_map(struct dm_target *ti, struct bio *bio,
                              union map_info *map_context)
@@ -1364,7 +1662,7 @@ static int snapshot_merge_map(struct dm_target *ti, struct bio *bio,
 
        chunk = sector_to_chunk(s->store, bio->bi_sector);
 
-       down_read(&s->lock);
+       down_write(&s->lock);
 
        /* Full snapshots are not usable */
        if (!s->valid) {
@@ -1375,19 +1673,33 @@ static int snapshot_merge_map(struct dm_target *ti, struct bio *bio,
        /* If the block is already remapped - use that */
        e = dm_lookup_exception(&s->complete, chunk);
        if (e) {
+               /* Queue writes overlapping with chunks being merged */
+               if (bio_rw(bio) == WRITE &&
+                   chunk >= s->first_merging_chunk &&
+                   chunk < (s->first_merging_chunk +
+                            s->num_merging_chunks)) {
+                       bio->bi_bdev = s->origin->bdev;
+                       bio_list_add(&s->bios_queued_during_merge, bio);
+                       r = DM_MAPIO_SUBMITTED;
+                       goto out_unlock;
+               }
+
                remap_exception(s, e, bio, chunk);
+
+               if (bio_rw(bio) == WRITE)
+                       map_context->ptr = track_chunk(s, chunk);
                goto out_unlock;
        }
 
        bio->bi_bdev = s->origin->bdev;
 
        if (bio_rw(bio) == WRITE) {
-               up_read(&s->lock);
+               up_write(&s->lock);
                return do_origin(s->origin, bio);
        }
 
 out_unlock:
-       up_read(&s->lock);
+       up_write(&s->lock);
 
        return r;
 }
@@ -1404,6 +1716,13 @@ static int snapshot_end_io(struct dm_target *ti, struct bio *bio,
        return 0;
 }
 
+static void snapshot_merge_presuspend(struct dm_target *ti)
+{
+       struct dm_snapshot *s = ti->private;
+
+       stop_merge(s);
+}
+
 static void snapshot_postsuspend(struct dm_target *ti)
 {
        struct dm_snapshot *s = ti->private;
@@ -1464,6 +1783,34 @@ static void snapshot_resume(struct dm_target *ti)
        up_write(&s->lock);
 }
 
+static sector_t get_origin_minimum_chunksize(struct block_device *bdev)
+{
+       sector_t min_chunksize;
+
+       down_read(&_origins_lock);
+       min_chunksize = __minimum_chunk_size(__lookup_origin(bdev));
+       up_read(&_origins_lock);
+
+       return min_chunksize;
+}
+
+static void snapshot_merge_resume(struct dm_target *ti)
+{
+       struct dm_snapshot *s = ti->private;
+
+       /*
+        * Handover exceptions from existing snapshot.
+        */
+       snapshot_resume(ti);
+
+       /*
+        * snapshot-merge acts as an origin, so set ti->split_io
+        */
+       ti->split_io = get_origin_minimum_chunksize(s->origin->bdev);
+
+       start_merge(s);
+}
+
 static int snapshot_status(struct dm_target *ti, status_type_t type,
                           char *result, unsigned int maxlen)
 {
@@ -1663,6 +2010,41 @@ static int do_origin(struct dm_dev *origin, struct bio *bio)
        return r;
 }
 
+/*
+ * Trigger exceptions in all non-merging snapshots.
+ *
+ * The chunk size of the merging snapshot may be larger than the chunk
+ * size of some other snapshot so we may need to reallocate multiple
+ * chunks in other snapshots.
+ *
+ * We scan all the overlapping exceptions in the other snapshots.
+ * Returns 1 if anything was reallocated and must be waited for,
+ * otherwise returns 0.
+ *
+ * size must be a multiple of merging_snap's chunk_size.
+ */
+static int origin_write_extent(struct dm_snapshot *merging_snap,
+                              sector_t sector, unsigned size)
+{
+       int must_wait = 0;
+       sector_t n;
+       struct origin *o;
+
+       /*
+        * The origin's __minimum_chunk_size() got stored in split_io
+        * by snapshot_merge_resume().
+        */
+       down_read(&_origins_lock);
+       o = __lookup_origin(merging_snap->origin->bdev);
+       for (n = 0; n < size; n += merging_snap->ti->split_io)
+               if (__origin_write(&o->snapshots, sector + n, NULL) ==
+                   DM_MAPIO_SUBMITTED)
+                       must_wait = 1;
+       up_read(&_origins_lock);
+
+       return must_wait;
+}
+
 /*
  * Origin: maps a linear range of a device, with hooks for snapshotting.
  */
@@ -1722,11 +2104,7 @@ static void origin_resume(struct dm_target *ti)
 {
        struct dm_dev *dev = ti->private;
 
-       down_read(&_origins_lock);
-
-       ti->split_io = __minimum_chunk_size(__lookup_origin(dev->bdev));
-
-       up_read(&_origins_lock);
+       ti->split_io = get_origin_minimum_chunksize(dev->bdev);
 }
 
 static int origin_status(struct dm_target *ti, status_type_t type, char *result,
@@ -1790,9 +2168,10 @@ static struct target_type merge_target = {
        .dtr     = snapshot_dtr,
        .map     = snapshot_merge_map,
        .end_io  = snapshot_end_io,
+       .presuspend = snapshot_merge_presuspend,
        .postsuspend = snapshot_postsuspend,
        .preresume  = snapshot_preresume,
-       .resume  = snapshot_resume,
+       .resume  = snapshot_merge_resume,
        .status  = snapshot_status,
        .iterate_devices = snapshot_iterate_devices,
 };