Merge commit 'ed30f24e8d07d30aa3e69d1f508f4d7bd2e8ea14' of git://git.linaro.org/landi...
[firefly-linux-kernel-4.4.55.git] / drivers / md / dm-snap.c
1 /*
2  * dm-snapshot.c
3  *
4  * Copyright (C) 2001-2002 Sistina Software (UK) Limited.
5  *
6  * This file is released under the GPL.
7  */
8
9 #include <linux/blkdev.h>
10 #include <linux/device-mapper.h>
11 #include <linux/delay.h>
12 #include <linux/fs.h>
13 #include <linux/init.h>
14 #include <linux/kdev_t.h>
15 #include <linux/list.h>
16 #include <linux/mempool.h>
17 #include <linux/module.h>
18 #include <linux/slab.h>
19 #include <linux/vmalloc.h>
20 #include <linux/log2.h>
21 #include <linux/dm-kcopyd.h>
22
23 #include "dm-exception-store.h"
24
25 #define DM_MSG_PREFIX "snapshots"
26
27 static const char dm_snapshot_merge_target_name[] = "snapshot-merge";
28
29 #define dm_target_is_snapshot_merge(ti) \
30         ((ti)->type->name == dm_snapshot_merge_target_name)
31
32 /*
33  * The size of the mempool used to track chunks in use.
34  */
35 #define MIN_IOS 256
36
37 #define DM_TRACKED_CHUNK_HASH_SIZE      16
38 #define DM_TRACKED_CHUNK_HASH(x)        ((unsigned long)(x) & \
39                                          (DM_TRACKED_CHUNK_HASH_SIZE - 1))
40
41 struct dm_exception_table {
42         uint32_t hash_mask;
43         unsigned hash_shift;
44         struct list_head *table;
45 };
46
47 struct dm_snapshot {
48         struct rw_semaphore lock;
49
50         struct dm_dev *origin;
51         struct dm_dev *cow;
52
53         struct dm_target *ti;
54
55         /* List of snapshots per Origin */
56         struct list_head list;
57
58         /*
59          * You can't use a snapshot if this is 0 (e.g. if full).
60          * A snapshot-merge target never clears this.
61          */
62         int valid;
63
64         /* Origin writes don't trigger exceptions until this is set */
65         int active;
66
67         atomic_t pending_exceptions_count;
68
69         mempool_t *pending_pool;
70
71         struct dm_exception_table pending;
72         struct dm_exception_table complete;
73
74         /*
75          * pe_lock protects all pending_exception operations and access
76          * as well as the snapshot_bios list.
77          */
78         spinlock_t pe_lock;
79
80         /* Chunks with outstanding reads */
81         spinlock_t tracked_chunk_lock;
82         struct hlist_head tracked_chunk_hash[DM_TRACKED_CHUNK_HASH_SIZE];
83
84         /* The on disk metadata handler */
85         struct dm_exception_store *store;
86
87         struct dm_kcopyd_client *kcopyd_client;
88
89         /* Wait for events based on state_bits */
90         unsigned long state_bits;
91
92         /* Range of chunks currently being merged. */
93         chunk_t first_merging_chunk;
94         int num_merging_chunks;
95
96         /*
97          * The merge operation failed if this flag is set.
98          * Failure modes are handled as follows:
99          * - I/O error reading the header
100          *      => don't load the target; abort.
101          * - Header does not have "valid" flag set
102          *      => use the origin; forget about the snapshot.
103          * - I/O error when reading exceptions
104          *      => don't load the target; abort.
105          *         (We can't use the intermediate origin state.)
106          * - I/O error while merging
107          *      => stop merging; set merge_failed; process I/O normally.
108          */
109         int merge_failed;
110
111         /*
112          * Incoming bios that overlap with chunks being merged must wait
113          * for them to be committed.
114          */
115         struct bio_list bios_queued_during_merge;
116 };
117
118 /*
119  * state_bits:
120  *   RUNNING_MERGE  - Merge operation is in progress.
121  *   SHUTDOWN_MERGE - Set to signal that merge needs to be stopped;
122  *                    cleared afterwards.
123  */
124 #define RUNNING_MERGE          0
125 #define SHUTDOWN_MERGE         1
126
127 DECLARE_DM_KCOPYD_THROTTLE_WITH_MODULE_PARM(snapshot_copy_throttle,
128                 "A percentage of time allocated for copy on write");
129
130 struct dm_dev *dm_snap_origin(struct dm_snapshot *s)
131 {
132         return s->origin;
133 }
134 EXPORT_SYMBOL(dm_snap_origin);
135
136 struct dm_dev *dm_snap_cow(struct dm_snapshot *s)
137 {
138         return s->cow;
139 }
140 EXPORT_SYMBOL(dm_snap_cow);
141
142 static sector_t chunk_to_sector(struct dm_exception_store *store,
143                                 chunk_t chunk)
144 {
145         return chunk << store->chunk_shift;
146 }
147
148 static int bdev_equal(struct block_device *lhs, struct block_device *rhs)
149 {
150         /*
151          * There is only ever one instance of a particular block
152          * device so we can compare pointers safely.
153          */
154         return lhs == rhs;
155 }
156
157 struct dm_snap_pending_exception {
158         struct dm_exception e;
159
160         /*
161          * Origin buffers waiting for this to complete are held
162          * in a bio list
163          */
164         struct bio_list origin_bios;
165         struct bio_list snapshot_bios;
166
167         /* Pointer back to snapshot context */
168         struct dm_snapshot *snap;
169
170         /*
171          * 1 indicates the exception has already been sent to
172          * kcopyd.
173          */
174         int started;
175
176         /*
177          * For writing a complete chunk, bypassing the copy.
178          */
179         struct bio *full_bio;
180         bio_end_io_t *full_bio_end_io;
181         void *full_bio_private;
182 };
183
184 /*
185  * Hash table mapping origin volumes to lists of snapshots and
186  * a lock to protect it
187  */
188 static struct kmem_cache *exception_cache;
189 static struct kmem_cache *pending_cache;
190
191 struct dm_snap_tracked_chunk {
192         struct hlist_node node;
193         chunk_t chunk;
194 };
195
196 static void init_tracked_chunk(struct bio *bio)
197 {
198         struct dm_snap_tracked_chunk *c = dm_per_bio_data(bio, sizeof(struct dm_snap_tracked_chunk));
199         INIT_HLIST_NODE(&c->node);
200 }
201
202 static bool is_bio_tracked(struct bio *bio)
203 {
204         struct dm_snap_tracked_chunk *c = dm_per_bio_data(bio, sizeof(struct dm_snap_tracked_chunk));
205         return !hlist_unhashed(&c->node);
206 }
207
208 static void track_chunk(struct dm_snapshot *s, struct bio *bio, chunk_t chunk)
209 {
210         struct dm_snap_tracked_chunk *c = dm_per_bio_data(bio, sizeof(struct dm_snap_tracked_chunk));
211
212         c->chunk = chunk;
213
214         spin_lock_irq(&s->tracked_chunk_lock);
215         hlist_add_head(&c->node,
216                        &s->tracked_chunk_hash[DM_TRACKED_CHUNK_HASH(chunk)]);
217         spin_unlock_irq(&s->tracked_chunk_lock);
218 }
219
220 static void stop_tracking_chunk(struct dm_snapshot *s, struct bio *bio)
221 {
222         struct dm_snap_tracked_chunk *c = dm_per_bio_data(bio, sizeof(struct dm_snap_tracked_chunk));
223         unsigned long flags;
224
225         spin_lock_irqsave(&s->tracked_chunk_lock, flags);
226         hlist_del(&c->node);
227         spin_unlock_irqrestore(&s->tracked_chunk_lock, flags);
228 }
229
230 static int __chunk_is_tracked(struct dm_snapshot *s, chunk_t chunk)
231 {
232         struct dm_snap_tracked_chunk *c;
233         int found = 0;
234
235         spin_lock_irq(&s->tracked_chunk_lock);
236
237         hlist_for_each_entry(c,
238             &s->tracked_chunk_hash[DM_TRACKED_CHUNK_HASH(chunk)], node) {
239                 if (c->chunk == chunk) {
240                         found = 1;
241                         break;
242                 }
243         }
244
245         spin_unlock_irq(&s->tracked_chunk_lock);
246
247         return found;
248 }
249
250 /*
251  * This conflicting I/O is extremely improbable in the caller,
252  * so msleep(1) is sufficient and there is no need for a wait queue.
253  */
254 static void __check_for_conflicting_io(struct dm_snapshot *s, chunk_t chunk)
255 {
256         while (__chunk_is_tracked(s, chunk))
257                 msleep(1);
258 }
259
260 /*
261  * One of these per registered origin, held in the snapshot_origins hash
262  */
263 struct origin {
264         /* The origin device */
265         struct block_device *bdev;
266
267         struct list_head hash_list;
268
269         /* List of snapshots for this origin */
270         struct list_head snapshots;
271 };
272
273 /*
274  * Size of the hash table for origin volumes. If we make this
275  * the size of the minors list then it should be nearly perfect
276  */
277 #define ORIGIN_HASH_SIZE 256
278 #define ORIGIN_MASK      0xFF
279 static struct list_head *_origins;
280 static struct rw_semaphore _origins_lock;
281
282 static DECLARE_WAIT_QUEUE_HEAD(_pending_exceptions_done);
283 static DEFINE_SPINLOCK(_pending_exceptions_done_spinlock);
284 static uint64_t _pending_exceptions_done_count;
285
286 static int init_origin_hash(void)
287 {
288         int i;
289
290         _origins = kmalloc(ORIGIN_HASH_SIZE * sizeof(struct list_head),
291                            GFP_KERNEL);
292         if (!_origins) {
293                 DMERR("unable to allocate memory");
294                 return -ENOMEM;
295         }
296
297         for (i = 0; i < ORIGIN_HASH_SIZE; i++)
298                 INIT_LIST_HEAD(_origins + i);
299         init_rwsem(&_origins_lock);
300
301         return 0;
302 }
303
304 static void exit_origin_hash(void)
305 {
306         kfree(_origins);
307 }
308
309 static unsigned origin_hash(struct block_device *bdev)
310 {
311         return bdev->bd_dev & ORIGIN_MASK;
312 }
313
314 static struct origin *__lookup_origin(struct block_device *origin)
315 {
316         struct list_head *ol;
317         struct origin *o;
318
319         ol = &_origins[origin_hash(origin)];
320         list_for_each_entry (o, ol, hash_list)
321                 if (bdev_equal(o->bdev, origin))
322                         return o;
323
324         return NULL;
325 }
326
327 static void __insert_origin(struct origin *o)
328 {
329         struct list_head *sl = &_origins[origin_hash(o->bdev)];
330         list_add_tail(&o->hash_list, sl);
331 }
332
333 /*
334  * _origins_lock must be held when calling this function.
335  * Returns number of snapshots registered using the supplied cow device, plus:
336  * snap_src - a snapshot suitable for use as a source of exception handover
337  * snap_dest - a snapshot capable of receiving exception handover.
338  * snap_merge - an existing snapshot-merge target linked to the same origin.
339  *   There can be at most one snapshot-merge target. The parameter is optional.
340  *
341  * Possible return values and states of snap_src and snap_dest.
342  *   0: NULL, NULL  - first new snapshot
343  *   1: snap_src, NULL - normal snapshot
344  *   2: snap_src, snap_dest  - waiting for handover
345  *   2: snap_src, NULL - handed over, waiting for old to be deleted
346  *   1: NULL, snap_dest - source got destroyed without handover
347  */
348 static int __find_snapshots_sharing_cow(struct dm_snapshot *snap,
349                                         struct dm_snapshot **snap_src,
350                                         struct dm_snapshot **snap_dest,
351                                         struct dm_snapshot **snap_merge)
352 {
353         struct dm_snapshot *s;
354         struct origin *o;
355         int count = 0;
356         int active;
357
358         o = __lookup_origin(snap->origin->bdev);
359         if (!o)
360                 goto out;
361
362         list_for_each_entry(s, &o->snapshots, list) {
363                 if (dm_target_is_snapshot_merge(s->ti) && snap_merge)
364                         *snap_merge = s;
365                 if (!bdev_equal(s->cow->bdev, snap->cow->bdev))
366                         continue;
367
368                 down_read(&s->lock);
369                 active = s->active;
370                 up_read(&s->lock);
371
372                 if (active) {
373                         if (snap_src)
374                                 *snap_src = s;
375                 } else if (snap_dest)
376                         *snap_dest = s;
377
378                 count++;
379         }
380
381 out:
382         return count;
383 }
384
385 /*
386  * On success, returns 1 if this snapshot is a handover destination,
387  * otherwise returns 0.
388  */
389 static int __validate_exception_handover(struct dm_snapshot *snap)
390 {
391         struct dm_snapshot *snap_src = NULL, *snap_dest = NULL;
392         struct dm_snapshot *snap_merge = NULL;
393
394         /* Does snapshot need exceptions handed over to it? */
395         if ((__find_snapshots_sharing_cow(snap, &snap_src, &snap_dest,
396                                           &snap_merge) == 2) ||
397             snap_dest) {
398                 snap->ti->error = "Snapshot cow pairing for exception "
399                                   "table handover failed";
400                 return -EINVAL;
401         }
402
403         /*
404          * If no snap_src was found, snap cannot become a handover
405          * destination.
406          */
407         if (!snap_src)
408                 return 0;
409
410         /*
411          * Non-snapshot-merge handover?
412          */
413         if (!dm_target_is_snapshot_merge(snap->ti))
414                 return 1;
415
416         /*
417          * Do not allow more than one merging snapshot.
418          */
419         if (snap_merge) {
420                 snap->ti->error = "A snapshot is already merging.";
421                 return -EINVAL;
422         }
423
424         if (!snap_src->store->type->prepare_merge ||
425             !snap_src->store->type->commit_merge) {
426                 snap->ti->error = "Snapshot exception store does not "
427                                   "support snapshot-merge.";
428                 return -EINVAL;
429         }
430
431         return 1;
432 }
433
434 static void __insert_snapshot(struct origin *o, struct dm_snapshot *s)
435 {
436         struct dm_snapshot *l;
437
438         /* Sort the list according to chunk size, largest-first smallest-last */
439         list_for_each_entry(l, &o->snapshots, list)
440                 if (l->store->chunk_size < s->store->chunk_size)
441                         break;
442         list_add_tail(&s->list, &l->list);
443 }
444
445 /*
446  * Make a note of the snapshot and its origin so we can look it
447  * up when the origin has a write on it.
448  *
449  * Also validate snapshot exception store handovers.
450  * On success, returns 1 if this registration is a handover destination,
451  * otherwise returns 0.
452  */
453 static int register_snapshot(struct dm_snapshot *snap)
454 {
455         struct origin *o, *new_o = NULL;
456         struct block_device *bdev = snap->origin->bdev;
457         int r = 0;
458
459         new_o = kmalloc(sizeof(*new_o), GFP_KERNEL);
460         if (!new_o)
461                 return -ENOMEM;
462
463         down_write(&_origins_lock);
464
465         r = __validate_exception_handover(snap);
466         if (r < 0) {
467                 kfree(new_o);
468                 goto out;
469         }
470
471         o = __lookup_origin(bdev);
472         if (o)
473                 kfree(new_o);
474         else {
475                 /* New origin */
476                 o = new_o;
477
478                 /* Initialise the struct */
479                 INIT_LIST_HEAD(&o->snapshots);
480                 o->bdev = bdev;
481
482                 __insert_origin(o);
483         }
484
485         __insert_snapshot(o, snap);
486
487 out:
488         up_write(&_origins_lock);
489
490         return r;
491 }
492
493 /*
494  * Move snapshot to correct place in list according to chunk size.
495  */
496 static void reregister_snapshot(struct dm_snapshot *s)
497 {
498         struct block_device *bdev = s->origin->bdev;
499
500         down_write(&_origins_lock);
501
502         list_del(&s->list);
503         __insert_snapshot(__lookup_origin(bdev), s);
504
505         up_write(&_origins_lock);
506 }
507
508 static void unregister_snapshot(struct dm_snapshot *s)
509 {
510         struct origin *o;
511
512         down_write(&_origins_lock);
513         o = __lookup_origin(s->origin->bdev);
514
515         list_del(&s->list);
516         if (o && list_empty(&o->snapshots)) {
517                 list_del(&o->hash_list);
518                 kfree(o);
519         }
520
521         up_write(&_origins_lock);
522 }
523
524 /*
525  * Implementation of the exception hash tables.
526  * The lowest hash_shift bits of the chunk number are ignored, allowing
527  * some consecutive chunks to be grouped together.
528  */
529 static int dm_exception_table_init(struct dm_exception_table *et,
530                                    uint32_t size, unsigned hash_shift)
531 {
532         unsigned int i;
533
534         et->hash_shift = hash_shift;
535         et->hash_mask = size - 1;
536         et->table = dm_vcalloc(size, sizeof(struct list_head));
537         if (!et->table)
538                 return -ENOMEM;
539
540         for (i = 0; i < size; i++)
541                 INIT_LIST_HEAD(et->table + i);
542
543         return 0;
544 }
545
546 static void dm_exception_table_exit(struct dm_exception_table *et,
547                                     struct kmem_cache *mem)
548 {
549         struct list_head *slot;
550         struct dm_exception *ex, *next;
551         int i, size;
552
553         size = et->hash_mask + 1;
554         for (i = 0; i < size; i++) {
555                 slot = et->table + i;
556
557                 list_for_each_entry_safe (ex, next, slot, hash_list)
558                         kmem_cache_free(mem, ex);
559         }
560
561         vfree(et->table);
562 }
563
564 static uint32_t exception_hash(struct dm_exception_table *et, chunk_t chunk)
565 {
566         return (chunk >> et->hash_shift) & et->hash_mask;
567 }
568
569 static void dm_remove_exception(struct dm_exception *e)
570 {
571         list_del(&e->hash_list);
572 }
573
574 /*
575  * Return the exception data for a sector, or NULL if not
576  * remapped.
577  */
578 static struct dm_exception *dm_lookup_exception(struct dm_exception_table *et,
579                                                 chunk_t chunk)
580 {
581         struct list_head *slot;
582         struct dm_exception *e;
583
584         slot = &et->table[exception_hash(et, chunk)];
585         list_for_each_entry (e, slot, hash_list)
586                 if (chunk >= e->old_chunk &&
587                     chunk <= e->old_chunk + dm_consecutive_chunk_count(e))
588                         return e;
589
590         return NULL;
591 }
592
593 static struct dm_exception *alloc_completed_exception(void)
594 {
595         struct dm_exception *e;
596
597         e = kmem_cache_alloc(exception_cache, GFP_NOIO);
598         if (!e)
599                 e = kmem_cache_alloc(exception_cache, GFP_ATOMIC);
600
601         return e;
602 }
603
604 static void free_completed_exception(struct dm_exception *e)
605 {
606         kmem_cache_free(exception_cache, e);
607 }
608
609 static struct dm_snap_pending_exception *alloc_pending_exception(struct dm_snapshot *s)
610 {
611         struct dm_snap_pending_exception *pe = mempool_alloc(s->pending_pool,
612                                                              GFP_NOIO);
613
614         atomic_inc(&s->pending_exceptions_count);
615         pe->snap = s;
616
617         return pe;
618 }
619
620 static void free_pending_exception(struct dm_snap_pending_exception *pe)
621 {
622         struct dm_snapshot *s = pe->snap;
623
624         mempool_free(pe, s->pending_pool);
625         smp_mb__before_atomic_dec();
626         atomic_dec(&s->pending_exceptions_count);
627 }
628
629 static void dm_insert_exception(struct dm_exception_table *eh,
630                                 struct dm_exception *new_e)
631 {
632         struct list_head *l;
633         struct dm_exception *e = NULL;
634
635         l = &eh->table[exception_hash(eh, new_e->old_chunk)];
636
637         /* Add immediately if this table doesn't support consecutive chunks */
638         if (!eh->hash_shift)
639                 goto out;
640
641         /* List is ordered by old_chunk */
642         list_for_each_entry_reverse(e, l, hash_list) {
643                 /* Insert after an existing chunk? */
644                 if (new_e->old_chunk == (e->old_chunk +
645                                          dm_consecutive_chunk_count(e) + 1) &&
646                     new_e->new_chunk == (dm_chunk_number(e->new_chunk) +
647                                          dm_consecutive_chunk_count(e) + 1)) {
648                         dm_consecutive_chunk_count_inc(e);
649                         free_completed_exception(new_e);
650                         return;
651                 }
652
653                 /* Insert before an existing chunk? */
654                 if (new_e->old_chunk == (e->old_chunk - 1) &&
655                     new_e->new_chunk == (dm_chunk_number(e->new_chunk) - 1)) {
656                         dm_consecutive_chunk_count_inc(e);
657                         e->old_chunk--;
658                         e->new_chunk--;
659                         free_completed_exception(new_e);
660                         return;
661                 }
662
663                 if (new_e->old_chunk > e->old_chunk)
664                         break;
665         }
666
667 out:
668         list_add(&new_e->hash_list, e ? &e->hash_list : l);
669 }
670
671 /*
672  * Callback used by the exception stores to load exceptions when
673  * initialising.
674  */
675 static int dm_add_exception(void *context, chunk_t old, chunk_t new)
676 {
677         struct dm_snapshot *s = context;
678         struct dm_exception *e;
679
680         e = alloc_completed_exception();
681         if (!e)
682                 return -ENOMEM;
683
684         e->old_chunk = old;
685
686         /* Consecutive_count is implicitly initialised to zero */
687         e->new_chunk = new;
688
689         dm_insert_exception(&s->complete, e);
690
691         return 0;
692 }
693
694 /*
695  * Return a minimum chunk size of all snapshots that have the specified origin.
696  * Return zero if the origin has no snapshots.
697  */
698 static uint32_t __minimum_chunk_size(struct origin *o)
699 {
700         struct dm_snapshot *snap;
701         unsigned chunk_size = 0;
702
703         if (o)
704                 list_for_each_entry(snap, &o->snapshots, list)
705                         chunk_size = min_not_zero(chunk_size,
706                                                   snap->store->chunk_size);
707
708         return (uint32_t) chunk_size;
709 }
710
711 /*
712  * Hard coded magic.
713  */
714 static int calc_max_buckets(void)
715 {
716         /* use a fixed size of 2MB */
717         unsigned long mem = 2 * 1024 * 1024;
718         mem /= sizeof(struct list_head);
719
720         return mem;
721 }
722
723 /*
724  * Allocate room for a suitable hash table.
725  */
726 static int init_hash_tables(struct dm_snapshot *s)
727 {
728         sector_t hash_size, cow_dev_size, origin_dev_size, max_buckets;
729
730         /*
731          * Calculate based on the size of the original volume or
732          * the COW volume...
733          */
734         cow_dev_size = get_dev_size(s->cow->bdev);
735         origin_dev_size = get_dev_size(s->origin->bdev);
736         max_buckets = calc_max_buckets();
737
738         hash_size = min(origin_dev_size, cow_dev_size) >> s->store->chunk_shift;
739         hash_size = min(hash_size, max_buckets);
740
741         if (hash_size < 64)
742                 hash_size = 64;
743         hash_size = rounddown_pow_of_two(hash_size);
744         if (dm_exception_table_init(&s->complete, hash_size,
745                                     DM_CHUNK_CONSECUTIVE_BITS))
746                 return -ENOMEM;
747
748         /*
749          * Allocate hash table for in-flight exceptions
750          * Make this smaller than the real hash table
751          */
752         hash_size >>= 3;
753         if (hash_size < 64)
754                 hash_size = 64;
755
756         if (dm_exception_table_init(&s->pending, hash_size, 0)) {
757                 dm_exception_table_exit(&s->complete, exception_cache);
758                 return -ENOMEM;
759         }
760
761         return 0;
762 }
763
764 static void merge_shutdown(struct dm_snapshot *s)
765 {
766         clear_bit_unlock(RUNNING_MERGE, &s->state_bits);
767         smp_mb__after_clear_bit();
768         wake_up_bit(&s->state_bits, RUNNING_MERGE);
769 }
770
771 static struct bio *__release_queued_bios_after_merge(struct dm_snapshot *s)
772 {
773         s->first_merging_chunk = 0;
774         s->num_merging_chunks = 0;
775
776         return bio_list_get(&s->bios_queued_during_merge);
777 }
778
779 /*
780  * Remove one chunk from the index of completed exceptions.
781  */
782 static int __remove_single_exception_chunk(struct dm_snapshot *s,
783                                            chunk_t old_chunk)
784 {
785         struct dm_exception *e;
786
787         e = dm_lookup_exception(&s->complete, old_chunk);
788         if (!e) {
789                 DMERR("Corruption detected: exception for block %llu is "
790                       "on disk but not in memory",
791                       (unsigned long long)old_chunk);
792                 return -EINVAL;
793         }
794
795         /*
796          * If this is the only chunk using this exception, remove exception.
797          */
798         if (!dm_consecutive_chunk_count(e)) {
799                 dm_remove_exception(e);
800                 free_completed_exception(e);
801                 return 0;
802         }
803
804         /*
805          * The chunk may be either at the beginning or the end of a
806          * group of consecutive chunks - never in the middle.  We are
807          * removing chunks in the opposite order to that in which they
808          * were added, so this should always be true.
809          * Decrement the consecutive chunk counter and adjust the
810          * starting point if necessary.
811          */
812         if (old_chunk == e->old_chunk) {
813                 e->old_chunk++;
814                 e->new_chunk++;
815         } else if (old_chunk != e->old_chunk +
816                    dm_consecutive_chunk_count(e)) {
817                 DMERR("Attempt to merge block %llu from the "
818                       "middle of a chunk range [%llu - %llu]",
819                       (unsigned long long)old_chunk,
820                       (unsigned long long)e->old_chunk,
821                       (unsigned long long)
822                       e->old_chunk + dm_consecutive_chunk_count(e));
823                 return -EINVAL;
824         }
825
826         dm_consecutive_chunk_count_dec(e);
827
828         return 0;
829 }
830
831 static void flush_bios(struct bio *bio);
832
833 static int remove_single_exception_chunk(struct dm_snapshot *s)
834 {
835         struct bio *b = NULL;
836         int r;
837         chunk_t old_chunk = s->first_merging_chunk + s->num_merging_chunks - 1;
838
839         down_write(&s->lock);
840
841         /*
842          * Process chunks (and associated exceptions) in reverse order
843          * so that dm_consecutive_chunk_count_dec() accounting works.
844          */
845         do {
846                 r = __remove_single_exception_chunk(s, old_chunk);
847                 if (r)
848                         goto out;
849         } while (old_chunk-- > s->first_merging_chunk);
850
851         b = __release_queued_bios_after_merge(s);
852
853 out:
854         up_write(&s->lock);
855         if (b)
856                 flush_bios(b);
857
858         return r;
859 }
860
861 static int origin_write_extent(struct dm_snapshot *merging_snap,
862                                sector_t sector, unsigned chunk_size);
863
864 static void merge_callback(int read_err, unsigned long write_err,
865                            void *context);
866
867 static uint64_t read_pending_exceptions_done_count(void)
868 {
869         uint64_t pending_exceptions_done;
870
871         spin_lock(&_pending_exceptions_done_spinlock);
872         pending_exceptions_done = _pending_exceptions_done_count;
873         spin_unlock(&_pending_exceptions_done_spinlock);
874
875         return pending_exceptions_done;
876 }
877
878 static void increment_pending_exceptions_done_count(void)
879 {
880         spin_lock(&_pending_exceptions_done_spinlock);
881         _pending_exceptions_done_count++;
882         spin_unlock(&_pending_exceptions_done_spinlock);
883
884         wake_up_all(&_pending_exceptions_done);
885 }
886
887 static void snapshot_merge_next_chunks(struct dm_snapshot *s)
888 {
889         int i, linear_chunks;
890         chunk_t old_chunk, new_chunk;
891         struct dm_io_region src, dest;
892         sector_t io_size;
893         uint64_t previous_count;
894
895         BUG_ON(!test_bit(RUNNING_MERGE, &s->state_bits));
896         if (unlikely(test_bit(SHUTDOWN_MERGE, &s->state_bits)))
897                 goto shut;
898
899         /*
900          * valid flag never changes during merge, so no lock required.
901          */
902         if (!s->valid) {
903                 DMERR("Snapshot is invalid: can't merge");
904                 goto shut;
905         }
906
907         linear_chunks = s->store->type->prepare_merge(s->store, &old_chunk,
908                                                       &new_chunk);
909         if (linear_chunks <= 0) {
910                 if (linear_chunks < 0) {
911                         DMERR("Read error in exception store: "
912                               "shutting down merge");
913                         down_write(&s->lock);
914                         s->merge_failed = 1;
915                         up_write(&s->lock);
916                 }
917                 goto shut;
918         }
919
920         /* Adjust old_chunk and new_chunk to reflect start of linear region */
921         old_chunk = old_chunk + 1 - linear_chunks;
922         new_chunk = new_chunk + 1 - linear_chunks;
923
924         /*
925          * Use one (potentially large) I/O to copy all 'linear_chunks'
926          * from the exception store to the origin
927          */
928         io_size = linear_chunks * s->store->chunk_size;
929
930         dest.bdev = s->origin->bdev;
931         dest.sector = chunk_to_sector(s->store, old_chunk);
932         dest.count = min(io_size, get_dev_size(dest.bdev) - dest.sector);
933
934         src.bdev = s->cow->bdev;
935         src.sector = chunk_to_sector(s->store, new_chunk);
936         src.count = dest.count;
937
938         /*
939          * Reallocate any exceptions needed in other snapshots then
940          * wait for the pending exceptions to complete.
941          * Each time any pending exception (globally on the system)
942          * completes we are woken and repeat the process to find out
943          * if we can proceed.  While this may not seem a particularly
944          * efficient algorithm, it is not expected to have any
945          * significant impact on performance.
946          */
947         previous_count = read_pending_exceptions_done_count();
948         while (origin_write_extent(s, dest.sector, io_size)) {
949                 wait_event(_pending_exceptions_done,
950                            (read_pending_exceptions_done_count() !=
951                             previous_count));
952                 /* Retry after the wait, until all exceptions are done. */
953                 previous_count = read_pending_exceptions_done_count();
954         }
955
956         down_write(&s->lock);
957         s->first_merging_chunk = old_chunk;
958         s->num_merging_chunks = linear_chunks;
959         up_write(&s->lock);
960
961         /* Wait until writes to all 'linear_chunks' drain */
962         for (i = 0; i < linear_chunks; i++)
963                 __check_for_conflicting_io(s, old_chunk + i);
964
965         dm_kcopyd_copy(s->kcopyd_client, &src, 1, &dest, 0, merge_callback, s);
966         return;
967
968 shut:
969         merge_shutdown(s);
970 }
971
972 static void error_bios(struct bio *bio);
973
974 static void merge_callback(int read_err, unsigned long write_err, void *context)
975 {
976         struct dm_snapshot *s = context;
977         struct bio *b = NULL;
978
979         if (read_err || write_err) {
980                 if (read_err)
981                         DMERR("Read error: shutting down merge.");
982                 else
983                         DMERR("Write error: shutting down merge.");
984                 goto shut;
985         }
986
987         if (s->store->type->commit_merge(s->store,
988                                          s->num_merging_chunks) < 0) {
989                 DMERR("Write error in exception store: shutting down merge");
990                 goto shut;
991         }
992
993         if (remove_single_exception_chunk(s) < 0)
994                 goto shut;
995
996         snapshot_merge_next_chunks(s);
997
998         return;
999
1000 shut:
1001         down_write(&s->lock);
1002         s->merge_failed = 1;
1003         b = __release_queued_bios_after_merge(s);
1004         up_write(&s->lock);
1005         error_bios(b);
1006
1007         merge_shutdown(s);
1008 }
1009
1010 static void start_merge(struct dm_snapshot *s)
1011 {
1012         if (!test_and_set_bit(RUNNING_MERGE, &s->state_bits))
1013                 snapshot_merge_next_chunks(s);
1014 }
1015
1016 static int wait_schedule(void *ptr)
1017 {
1018         schedule();
1019
1020         return 0;
1021 }
1022
1023 /*
1024  * Stop the merging process and wait until it finishes.
1025  */
1026 static void stop_merge(struct dm_snapshot *s)
1027 {
1028         set_bit(SHUTDOWN_MERGE, &s->state_bits);
1029         wait_on_bit(&s->state_bits, RUNNING_MERGE, wait_schedule,
1030                     TASK_UNINTERRUPTIBLE);
1031         clear_bit(SHUTDOWN_MERGE, &s->state_bits);
1032 }
1033
1034 /*
1035  * Construct a snapshot mapping: <origin_dev> <COW-dev> <p/n> <chunk-size>
1036  */
1037 static int snapshot_ctr(struct dm_target *ti, unsigned int argc, char **argv)
1038 {
1039         struct dm_snapshot *s;
1040         int i;
1041         int r = -EINVAL;
1042         char *origin_path, *cow_path;
1043         unsigned args_used, num_flush_bios = 1;
1044         fmode_t origin_mode = FMODE_READ;
1045
1046         if (argc != 4) {
1047                 ti->error = "requires exactly 4 arguments";
1048                 r = -EINVAL;
1049                 goto bad;
1050         }
1051
1052         if (dm_target_is_snapshot_merge(ti)) {
1053                 num_flush_bios = 2;
1054                 origin_mode = FMODE_WRITE;
1055         }
1056
1057         s = kmalloc(sizeof(*s), GFP_KERNEL);
1058         if (!s) {
1059                 ti->error = "Cannot allocate private snapshot structure";
1060                 r = -ENOMEM;
1061                 goto bad;
1062         }
1063
1064         origin_path = argv[0];
1065         argv++;
1066         argc--;
1067
1068         r = dm_get_device(ti, origin_path, origin_mode, &s->origin);
1069         if (r) {
1070                 ti->error = "Cannot get origin device";
1071                 goto bad_origin;
1072         }
1073
1074         cow_path = argv[0];
1075         argv++;
1076         argc--;
1077
1078         r = dm_get_device(ti, cow_path, dm_table_get_mode(ti->table), &s->cow);
1079         if (r) {
1080                 ti->error = "Cannot get COW device";
1081                 goto bad_cow;
1082         }
1083
1084         r = dm_exception_store_create(ti, argc, argv, s, &args_used, &s->store);
1085         if (r) {
1086                 ti->error = "Couldn't create exception store";
1087                 r = -EINVAL;
1088                 goto bad_store;
1089         }
1090
1091         argv += args_used;
1092         argc -= args_used;
1093
1094         s->ti = ti;
1095         s->valid = 1;
1096         s->active = 0;
1097         atomic_set(&s->pending_exceptions_count, 0);
1098         init_rwsem(&s->lock);
1099         INIT_LIST_HEAD(&s->list);
1100         spin_lock_init(&s->pe_lock);
1101         s->state_bits = 0;
1102         s->merge_failed = 0;
1103         s->first_merging_chunk = 0;
1104         s->num_merging_chunks = 0;
1105         bio_list_init(&s->bios_queued_during_merge);
1106
1107         /* Allocate hash table for COW data */
1108         if (init_hash_tables(s)) {
1109                 ti->error = "Unable to allocate hash table space";
1110                 r = -ENOMEM;
1111                 goto bad_hash_tables;
1112         }
1113
1114         s->kcopyd_client = dm_kcopyd_client_create(&dm_kcopyd_throttle);
1115         if (IS_ERR(s->kcopyd_client)) {
1116                 r = PTR_ERR(s->kcopyd_client);
1117                 ti->error = "Could not create kcopyd client";
1118                 goto bad_kcopyd;
1119         }
1120
1121         s->pending_pool = mempool_create_slab_pool(MIN_IOS, pending_cache);
1122         if (!s->pending_pool) {
1123                 ti->error = "Could not allocate mempool for pending exceptions";
1124                 r = -ENOMEM;
1125                 goto bad_pending_pool;
1126         }
1127
1128         for (i = 0; i < DM_TRACKED_CHUNK_HASH_SIZE; i++)
1129                 INIT_HLIST_HEAD(&s->tracked_chunk_hash[i]);
1130
1131         spin_lock_init(&s->tracked_chunk_lock);
1132
1133         ti->private = s;
1134         ti->num_flush_bios = num_flush_bios;
1135         ti->per_bio_data_size = sizeof(struct dm_snap_tracked_chunk);
1136
1137         /* Add snapshot to the list of snapshots for this origin */
1138         /* Exceptions aren't triggered till snapshot_resume() is called */
1139         r = register_snapshot(s);
1140         if (r == -ENOMEM) {
1141                 ti->error = "Snapshot origin struct allocation failed";
1142                 goto bad_load_and_register;
1143         } else if (r < 0) {
1144                 /* invalid handover, register_snapshot has set ti->error */
1145                 goto bad_load_and_register;
1146         }
1147
1148         /*
1149          * Metadata must only be loaded into one table at once, so skip this
1150          * if metadata will be handed over during resume.
1151          * Chunk size will be set during the handover - set it to zero to
1152          * ensure it's ignored.
1153          */
1154         if (r > 0) {
1155                 s->store->chunk_size = 0;
1156                 return 0;
1157         }
1158
1159         r = s->store->type->read_metadata(s->store, dm_add_exception,
1160                                           (void *)s);
1161         if (r < 0) {
1162                 ti->error = "Failed to read snapshot metadata";
1163                 goto bad_read_metadata;
1164         } else if (r > 0) {
1165                 s->valid = 0;
1166                 DMWARN("Snapshot is marked invalid.");
1167         }
1168
1169         if (!s->store->chunk_size) {
1170                 ti->error = "Chunk size not set";
1171                 goto bad_read_metadata;
1172         }
1173
1174         r = dm_set_target_max_io_len(ti, s->store->chunk_size);
1175         if (r)
1176                 goto bad_read_metadata;
1177
1178         return 0;
1179
1180 bad_read_metadata:
1181         unregister_snapshot(s);
1182
1183 bad_load_and_register:
1184         mempool_destroy(s->pending_pool);
1185
1186 bad_pending_pool:
1187         dm_kcopyd_client_destroy(s->kcopyd_client);
1188
1189 bad_kcopyd:
1190         dm_exception_table_exit(&s->pending, pending_cache);
1191         dm_exception_table_exit(&s->complete, exception_cache);
1192
1193 bad_hash_tables:
1194         dm_exception_store_destroy(s->store);
1195
1196 bad_store:
1197         dm_put_device(ti, s->cow);
1198
1199 bad_cow:
1200         dm_put_device(ti, s->origin);
1201
1202 bad_origin:
1203         kfree(s);
1204
1205 bad:
1206         return r;
1207 }
1208
1209 static void __free_exceptions(struct dm_snapshot *s)
1210 {
1211         dm_kcopyd_client_destroy(s->kcopyd_client);
1212         s->kcopyd_client = NULL;
1213
1214         dm_exception_table_exit(&s->pending, pending_cache);
1215         dm_exception_table_exit(&s->complete, exception_cache);
1216 }
1217
1218 static void __handover_exceptions(struct dm_snapshot *snap_src,
1219                                   struct dm_snapshot *snap_dest)
1220 {
1221         union {
1222                 struct dm_exception_table table_swap;
1223                 struct dm_exception_store *store_swap;
1224         } u;
1225
1226         /*
1227          * Swap all snapshot context information between the two instances.
1228          */
1229         u.table_swap = snap_dest->complete;
1230         snap_dest->complete = snap_src->complete;
1231         snap_src->complete = u.table_swap;
1232
1233         u.store_swap = snap_dest->store;
1234         snap_dest->store = snap_src->store;
1235         snap_src->store = u.store_swap;
1236
1237         snap_dest->store->snap = snap_dest;
1238         snap_src->store->snap = snap_src;
1239
1240         snap_dest->ti->max_io_len = snap_dest->store->chunk_size;
1241         snap_dest->valid = snap_src->valid;
1242
1243         /*
1244          * Set source invalid to ensure it receives no further I/O.
1245          */
1246         snap_src->valid = 0;
1247 }
1248
1249 static void snapshot_dtr(struct dm_target *ti)
1250 {
1251 #ifdef CONFIG_DM_DEBUG
1252         int i;
1253 #endif
1254         struct dm_snapshot *s = ti->private;
1255         struct dm_snapshot *snap_src = NULL, *snap_dest = NULL;
1256
1257         down_read(&_origins_lock);
1258         /* Check whether exception handover must be cancelled */
1259         (void) __find_snapshots_sharing_cow(s, &snap_src, &snap_dest, NULL);
1260         if (snap_src && snap_dest && (s == snap_src)) {
1261                 down_write(&snap_dest->lock);
1262                 snap_dest->valid = 0;
1263                 up_write(&snap_dest->lock);
1264                 DMERR("Cancelling snapshot handover.");
1265         }
1266         up_read(&_origins_lock);
1267
1268         if (dm_target_is_snapshot_merge(ti))
1269                 stop_merge(s);
1270
1271         /* Prevent further origin writes from using this snapshot. */
1272         /* After this returns there can be no new kcopyd jobs. */
1273         unregister_snapshot(s);
1274
1275         while (atomic_read(&s->pending_exceptions_count))
1276                 msleep(1);
1277         /*
1278          * Ensure instructions in mempool_destroy aren't reordered
1279          * before atomic_read.
1280          */
1281         smp_mb();
1282
1283 #ifdef CONFIG_DM_DEBUG
1284         for (i = 0; i < DM_TRACKED_CHUNK_HASH_SIZE; i++)
1285                 BUG_ON(!hlist_empty(&s->tracked_chunk_hash[i]));
1286 #endif
1287
1288         __free_exceptions(s);
1289
1290         mempool_destroy(s->pending_pool);
1291
1292         dm_exception_store_destroy(s->store);
1293
1294         dm_put_device(ti, s->cow);
1295
1296         dm_put_device(ti, s->origin);
1297
1298         kfree(s);
1299 }
1300
1301 /*
1302  * Flush a list of buffers.
1303  */
1304 static void flush_bios(struct bio *bio)
1305 {
1306         struct bio *n;
1307
1308         while (bio) {
1309                 n = bio->bi_next;
1310                 bio->bi_next = NULL;
1311                 generic_make_request(bio);
1312                 bio = n;
1313         }
1314 }
1315
1316 static int do_origin(struct dm_dev *origin, struct bio *bio);
1317
1318 /*
1319  * Flush a list of buffers.
1320  */
1321 static void retry_origin_bios(struct dm_snapshot *s, struct bio *bio)
1322 {
1323         struct bio *n;
1324         int r;
1325
1326         while (bio) {
1327                 n = bio->bi_next;
1328                 bio->bi_next = NULL;
1329                 r = do_origin(s->origin, bio);
1330                 if (r == DM_MAPIO_REMAPPED)
1331                         generic_make_request(bio);
1332                 bio = n;
1333         }
1334 }
1335
1336 /*
1337  * Error a list of buffers.
1338  */
1339 static void error_bios(struct bio *bio)
1340 {
1341         struct bio *n;
1342
1343         while (bio) {
1344                 n = bio->bi_next;
1345                 bio->bi_next = NULL;
1346                 bio_io_error(bio);
1347                 bio = n;
1348         }
1349 }
1350
1351 static void __invalidate_snapshot(struct dm_snapshot *s, int err)
1352 {
1353         if (!s->valid)
1354                 return;
1355
1356         if (err == -EIO)
1357                 DMERR("Invalidating snapshot: Error reading/writing.");
1358         else if (err == -ENOMEM)
1359                 DMERR("Invalidating snapshot: Unable to allocate exception.");
1360
1361         if (s->store->type->drop_snapshot)
1362                 s->store->type->drop_snapshot(s->store);
1363
1364         s->valid = 0;
1365
1366         dm_table_event(s->ti->table);
1367 }
1368
1369 static void pending_complete(struct dm_snap_pending_exception *pe, int success)
1370 {
1371         struct dm_exception *e;
1372         struct dm_snapshot *s = pe->snap;
1373         struct bio *origin_bios = NULL;
1374         struct bio *snapshot_bios = NULL;
1375         struct bio *full_bio = NULL;
1376         int error = 0;
1377
1378         if (!success) {
1379                 /* Read/write error - snapshot is unusable */
1380                 down_write(&s->lock);
1381                 __invalidate_snapshot(s, -EIO);
1382                 error = 1;
1383                 goto out;
1384         }
1385
1386         e = alloc_completed_exception();
1387         if (!e) {
1388                 down_write(&s->lock);
1389                 __invalidate_snapshot(s, -ENOMEM);
1390                 error = 1;
1391                 goto out;
1392         }
1393         *e = pe->e;
1394
1395         down_write(&s->lock);
1396         if (!s->valid) {
1397                 free_completed_exception(e);
1398                 error = 1;
1399                 goto out;
1400         }
1401
1402         /* Check for conflicting reads */
1403         __check_for_conflicting_io(s, pe->e.old_chunk);
1404
1405         /*
1406          * Add a proper exception, and remove the
1407          * in-flight exception from the list.
1408          */
1409         dm_insert_exception(&s->complete, e);
1410
1411 out:
1412         dm_remove_exception(&pe->e);
1413         snapshot_bios = bio_list_get(&pe->snapshot_bios);
1414         origin_bios = bio_list_get(&pe->origin_bios);
1415         full_bio = pe->full_bio;
1416         if (full_bio) {
1417                 full_bio->bi_end_io = pe->full_bio_end_io;
1418                 full_bio->bi_private = pe->full_bio_private;
1419         }
1420         free_pending_exception(pe);
1421
1422         increment_pending_exceptions_done_count();
1423
1424         up_write(&s->lock);
1425
1426         /* Submit any pending write bios */
1427         if (error) {
1428                 if (full_bio)
1429                         bio_io_error(full_bio);
1430                 error_bios(snapshot_bios);
1431         } else {
1432                 if (full_bio)
1433                         bio_endio(full_bio, 0);
1434                 flush_bios(snapshot_bios);
1435         }
1436
1437         retry_origin_bios(s, origin_bios);
1438 }
1439
1440 static void commit_callback(void *context, int success)
1441 {
1442         struct dm_snap_pending_exception *pe = context;
1443
1444         pending_complete(pe, success);
1445 }
1446
1447 /*
1448  * Called when the copy I/O has finished.  kcopyd actually runs
1449  * this code so don't block.
1450  */
1451 static void copy_callback(int read_err, unsigned long write_err, void *context)
1452 {
1453         struct dm_snap_pending_exception *pe = context;
1454         struct dm_snapshot *s = pe->snap;
1455
1456         if (read_err || write_err)
1457                 pending_complete(pe, 0);
1458
1459         else
1460                 /* Update the metadata if we are persistent */
1461                 s->store->type->commit_exception(s->store, &pe->e,
1462                                                  commit_callback, pe);
1463 }
1464
1465 /*
1466  * Dispatches the copy operation to kcopyd.
1467  */
1468 static void start_copy(struct dm_snap_pending_exception *pe)
1469 {
1470         struct dm_snapshot *s = pe->snap;
1471         struct dm_io_region src, dest;
1472         struct block_device *bdev = s->origin->bdev;
1473         sector_t dev_size;
1474
1475         dev_size = get_dev_size(bdev);
1476
1477         src.bdev = bdev;
1478         src.sector = chunk_to_sector(s->store, pe->e.old_chunk);
1479         src.count = min((sector_t)s->store->chunk_size, dev_size - src.sector);
1480
1481         dest.bdev = s->cow->bdev;
1482         dest.sector = chunk_to_sector(s->store, pe->e.new_chunk);
1483         dest.count = src.count;
1484
1485         /* Hand over to kcopyd */
1486         dm_kcopyd_copy(s->kcopyd_client, &src, 1, &dest, 0, copy_callback, pe);
1487 }
1488
1489 static void full_bio_end_io(struct bio *bio, int error)
1490 {
1491         void *callback_data = bio->bi_private;
1492
1493         dm_kcopyd_do_callback(callback_data, 0, error ? 1 : 0);
1494 }
1495
1496 static void start_full_bio(struct dm_snap_pending_exception *pe,
1497                            struct bio *bio)
1498 {
1499         struct dm_snapshot *s = pe->snap;
1500         void *callback_data;
1501
1502         pe->full_bio = bio;
1503         pe->full_bio_end_io = bio->bi_end_io;
1504         pe->full_bio_private = bio->bi_private;
1505
1506         callback_data = dm_kcopyd_prepare_callback(s->kcopyd_client,
1507                                                    copy_callback, pe);
1508
1509         bio->bi_end_io = full_bio_end_io;
1510         bio->bi_private = callback_data;
1511
1512         generic_make_request(bio);
1513 }
1514
1515 static struct dm_snap_pending_exception *
1516 __lookup_pending_exception(struct dm_snapshot *s, chunk_t chunk)
1517 {
1518         struct dm_exception *e = dm_lookup_exception(&s->pending, chunk);
1519
1520         if (!e)
1521                 return NULL;
1522
1523         return container_of(e, struct dm_snap_pending_exception, e);
1524 }
1525
1526 /*
1527  * Looks to see if this snapshot already has a pending exception
1528  * for this chunk, otherwise it allocates a new one and inserts
1529  * it into the pending table.
1530  *
1531  * NOTE: a write lock must be held on snap->lock before calling
1532  * this.
1533  */
1534 static struct dm_snap_pending_exception *
1535 __find_pending_exception(struct dm_snapshot *s,
1536                          struct dm_snap_pending_exception *pe, chunk_t chunk)
1537 {
1538         struct dm_snap_pending_exception *pe2;
1539
1540         pe2 = __lookup_pending_exception(s, chunk);
1541         if (pe2) {
1542                 free_pending_exception(pe);
1543                 return pe2;
1544         }
1545
1546         pe->e.old_chunk = chunk;
1547         bio_list_init(&pe->origin_bios);
1548         bio_list_init(&pe->snapshot_bios);
1549         pe->started = 0;
1550         pe->full_bio = NULL;
1551
1552         if (s->store->type->prepare_exception(s->store, &pe->e)) {
1553                 free_pending_exception(pe);
1554                 return NULL;
1555         }
1556
1557         dm_insert_exception(&s->pending, &pe->e);
1558
1559         return pe;
1560 }
1561
1562 static void remap_exception(struct dm_snapshot *s, struct dm_exception *e,
1563                             struct bio *bio, chunk_t chunk)
1564 {
1565         bio->bi_bdev = s->cow->bdev;
1566         bio->bi_sector = chunk_to_sector(s->store,
1567                                          dm_chunk_number(e->new_chunk) +
1568                                          (chunk - e->old_chunk)) +
1569                                          (bio->bi_sector &
1570                                           s->store->chunk_mask);
1571 }
1572
1573 static int snapshot_map(struct dm_target *ti, struct bio *bio)
1574 {
1575         struct dm_exception *e;
1576         struct dm_snapshot *s = ti->private;
1577         int r = DM_MAPIO_REMAPPED;
1578         chunk_t chunk;
1579         struct dm_snap_pending_exception *pe = NULL;
1580
1581         init_tracked_chunk(bio);
1582
1583         if (bio->bi_rw & REQ_FLUSH) {
1584                 bio->bi_bdev = s->cow->bdev;
1585                 return DM_MAPIO_REMAPPED;
1586         }
1587
1588         chunk = sector_to_chunk(s->store, bio->bi_sector);
1589
1590         /* Full snapshots are not usable */
1591         /* To get here the table must be live so s->active is always set. */
1592         if (!s->valid)
1593                 return -EIO;
1594
1595         /* FIXME: should only take write lock if we need
1596          * to copy an exception */
1597         down_write(&s->lock);
1598
1599         if (!s->valid) {
1600                 r = -EIO;
1601                 goto out_unlock;
1602         }
1603
1604         /* If the block is already remapped - use that, else remap it */
1605         e = dm_lookup_exception(&s->complete, chunk);
1606         if (e) {
1607                 remap_exception(s, e, bio, chunk);
1608                 goto out_unlock;
1609         }
1610
1611         /*
1612          * Write to snapshot - higher level takes care of RW/RO
1613          * flags so we should only get this if we are
1614          * writeable.
1615          */
1616         if (bio_rw(bio) == WRITE) {
1617                 pe = __lookup_pending_exception(s, chunk);
1618                 if (!pe) {
1619                         up_write(&s->lock);
1620                         pe = alloc_pending_exception(s);
1621                         down_write(&s->lock);
1622
1623                         if (!s->valid) {
1624                                 free_pending_exception(pe);
1625                                 r = -EIO;
1626                                 goto out_unlock;
1627                         }
1628
1629                         e = dm_lookup_exception(&s->complete, chunk);
1630                         if (e) {
1631                                 free_pending_exception(pe);
1632                                 remap_exception(s, e, bio, chunk);
1633                                 goto out_unlock;
1634                         }
1635
1636                         pe = __find_pending_exception(s, pe, chunk);
1637                         if (!pe) {
1638                                 __invalidate_snapshot(s, -ENOMEM);
1639                                 r = -EIO;
1640                                 goto out_unlock;
1641                         }
1642                 }
1643
1644                 remap_exception(s, &pe->e, bio, chunk);
1645
1646                 r = DM_MAPIO_SUBMITTED;
1647
1648                 if (!pe->started &&
1649                     bio->bi_size == (s->store->chunk_size << SECTOR_SHIFT)) {
1650                         pe->started = 1;
1651                         up_write(&s->lock);
1652                         start_full_bio(pe, bio);
1653                         goto out;
1654                 }
1655
1656                 bio_list_add(&pe->snapshot_bios, bio);
1657
1658                 if (!pe->started) {
1659                         /* this is protected by snap->lock */
1660                         pe->started = 1;
1661                         up_write(&s->lock);
1662                         start_copy(pe);
1663                         goto out;
1664                 }
1665         } else {
1666                 bio->bi_bdev = s->origin->bdev;
1667                 track_chunk(s, bio, chunk);
1668         }
1669
1670 out_unlock:
1671         up_write(&s->lock);
1672 out:
1673         return r;
1674 }
1675
1676 /*
1677  * A snapshot-merge target behaves like a combination of a snapshot
1678  * target and a snapshot-origin target.  It only generates new
1679  * exceptions in other snapshots and not in the one that is being
1680  * merged.
1681  *
1682  * For each chunk, if there is an existing exception, it is used to
1683  * redirect I/O to the cow device.  Otherwise I/O is sent to the origin,
1684  * which in turn might generate exceptions in other snapshots.
1685  * If merging is currently taking place on the chunk in question, the
1686  * I/O is deferred by adding it to s->bios_queued_during_merge.
1687  */
1688 static int snapshot_merge_map(struct dm_target *ti, struct bio *bio)
1689 {
1690         struct dm_exception *e;
1691         struct dm_snapshot *s = ti->private;
1692         int r = DM_MAPIO_REMAPPED;
1693         chunk_t chunk;
1694
1695         init_tracked_chunk(bio);
1696
1697         if (bio->bi_rw & REQ_FLUSH) {
1698                 if (!dm_bio_get_target_bio_nr(bio))
1699                         bio->bi_bdev = s->origin->bdev;
1700                 else
1701                         bio->bi_bdev = s->cow->bdev;
1702                 return DM_MAPIO_REMAPPED;
1703         }
1704
1705         chunk = sector_to_chunk(s->store, bio->bi_sector);
1706
1707         down_write(&s->lock);
1708
1709         /* Full merging snapshots are redirected to the origin */
1710         if (!s->valid)
1711                 goto redirect_to_origin;
1712
1713         /* If the block is already remapped - use that */
1714         e = dm_lookup_exception(&s->complete, chunk);
1715         if (e) {
1716                 /* Queue writes overlapping with chunks being merged */
1717                 if (bio_rw(bio) == WRITE &&
1718                     chunk >= s->first_merging_chunk &&
1719                     chunk < (s->first_merging_chunk +
1720                              s->num_merging_chunks)) {
1721                         bio->bi_bdev = s->origin->bdev;
1722                         bio_list_add(&s->bios_queued_during_merge, bio);
1723                         r = DM_MAPIO_SUBMITTED;
1724                         goto out_unlock;
1725                 }
1726
1727                 remap_exception(s, e, bio, chunk);
1728
1729                 if (bio_rw(bio) == WRITE)
1730                         track_chunk(s, bio, chunk);
1731                 goto out_unlock;
1732         }
1733
1734 redirect_to_origin:
1735         bio->bi_bdev = s->origin->bdev;
1736
1737         if (bio_rw(bio) == WRITE) {
1738                 up_write(&s->lock);
1739                 return do_origin(s->origin, bio);
1740         }
1741
1742 out_unlock:
1743         up_write(&s->lock);
1744
1745         return r;
1746 }
1747
1748 static int snapshot_end_io(struct dm_target *ti, struct bio *bio, int error)
1749 {
1750         struct dm_snapshot *s = ti->private;
1751
1752         if (is_bio_tracked(bio))
1753                 stop_tracking_chunk(s, bio);
1754
1755         return 0;
1756 }
1757
1758 static void snapshot_merge_presuspend(struct dm_target *ti)
1759 {
1760         struct dm_snapshot *s = ti->private;
1761
1762         stop_merge(s);
1763 }
1764
1765 static int snapshot_preresume(struct dm_target *ti)
1766 {
1767         int r = 0;
1768         struct dm_snapshot *s = ti->private;
1769         struct dm_snapshot *snap_src = NULL, *snap_dest = NULL;
1770
1771         down_read(&_origins_lock);
1772         (void) __find_snapshots_sharing_cow(s, &snap_src, &snap_dest, NULL);
1773         if (snap_src && snap_dest) {
1774                 down_read(&snap_src->lock);
1775                 if (s == snap_src) {
1776                         DMERR("Unable to resume snapshot source until "
1777                               "handover completes.");
1778                         r = -EINVAL;
1779                 } else if (!dm_suspended(snap_src->ti)) {
1780                         DMERR("Unable to perform snapshot handover until "
1781                               "source is suspended.");
1782                         r = -EINVAL;
1783                 }
1784                 up_read(&snap_src->lock);
1785         }
1786         up_read(&_origins_lock);
1787
1788         return r;
1789 }
1790
1791 static void snapshot_resume(struct dm_target *ti)
1792 {
1793         struct dm_snapshot *s = ti->private;
1794         struct dm_snapshot *snap_src = NULL, *snap_dest = NULL;
1795
1796         down_read(&_origins_lock);
1797         (void) __find_snapshots_sharing_cow(s, &snap_src, &snap_dest, NULL);
1798         if (snap_src && snap_dest) {
1799                 down_write(&snap_src->lock);
1800                 down_write_nested(&snap_dest->lock, SINGLE_DEPTH_NESTING);
1801                 __handover_exceptions(snap_src, snap_dest);
1802                 up_write(&snap_dest->lock);
1803                 up_write(&snap_src->lock);
1804         }
1805         up_read(&_origins_lock);
1806
1807         /* Now we have correct chunk size, reregister */
1808         reregister_snapshot(s);
1809
1810         down_write(&s->lock);
1811         s->active = 1;
1812         up_write(&s->lock);
1813 }
1814
1815 static uint32_t get_origin_minimum_chunksize(struct block_device *bdev)
1816 {
1817         uint32_t min_chunksize;
1818
1819         down_read(&_origins_lock);
1820         min_chunksize = __minimum_chunk_size(__lookup_origin(bdev));
1821         up_read(&_origins_lock);
1822
1823         return min_chunksize;
1824 }
1825
1826 static void snapshot_merge_resume(struct dm_target *ti)
1827 {
1828         struct dm_snapshot *s = ti->private;
1829
1830         /*
1831          * Handover exceptions from existing snapshot.
1832          */
1833         snapshot_resume(ti);
1834
1835         /*
1836          * snapshot-merge acts as an origin, so set ti->max_io_len
1837          */
1838         ti->max_io_len = get_origin_minimum_chunksize(s->origin->bdev);
1839
1840         start_merge(s);
1841 }
1842
1843 static void snapshot_status(struct dm_target *ti, status_type_t type,
1844                             unsigned status_flags, char *result, unsigned maxlen)
1845 {
1846         unsigned sz = 0;
1847         struct dm_snapshot *snap = ti->private;
1848
1849         switch (type) {
1850         case STATUSTYPE_INFO:
1851
1852                 down_write(&snap->lock);
1853
1854                 if (!snap->valid)
1855                         DMEMIT("Invalid");
1856                 else if (snap->merge_failed)
1857                         DMEMIT("Merge failed");
1858                 else {
1859                         if (snap->store->type->usage) {
1860                                 sector_t total_sectors, sectors_allocated,
1861                                          metadata_sectors;
1862                                 snap->store->type->usage(snap->store,
1863                                                          &total_sectors,
1864                                                          &sectors_allocated,
1865                                                          &metadata_sectors);
1866                                 DMEMIT("%llu/%llu %llu",
1867                                        (unsigned long long)sectors_allocated,
1868                                        (unsigned long long)total_sectors,
1869                                        (unsigned long long)metadata_sectors);
1870                         }
1871                         else
1872                                 DMEMIT("Unknown");
1873                 }
1874
1875                 up_write(&snap->lock);
1876
1877                 break;
1878
1879         case STATUSTYPE_TABLE:
1880                 /*
1881                  * kdevname returns a static pointer so we need
1882                  * to make private copies if the output is to
1883                  * make sense.
1884                  */
1885                 DMEMIT("%s %s", snap->origin->name, snap->cow->name);
1886                 snap->store->type->status(snap->store, type, result + sz,
1887                                           maxlen - sz);
1888                 break;
1889         }
1890 }
1891
1892 static int snapshot_iterate_devices(struct dm_target *ti,
1893                                     iterate_devices_callout_fn fn, void *data)
1894 {
1895         struct dm_snapshot *snap = ti->private;
1896         int r;
1897
1898         r = fn(ti, snap->origin, 0, ti->len, data);
1899
1900         if (!r)
1901                 r = fn(ti, snap->cow, 0, get_dev_size(snap->cow->bdev), data);
1902
1903         return r;
1904 }
1905
1906
1907 /*-----------------------------------------------------------------
1908  * Origin methods
1909  *---------------------------------------------------------------*/
1910
1911 /*
1912  * If no exceptions need creating, DM_MAPIO_REMAPPED is returned and any
1913  * supplied bio was ignored.  The caller may submit it immediately.
1914  * (No remapping actually occurs as the origin is always a direct linear
1915  * map.)
1916  *
1917  * If further exceptions are required, DM_MAPIO_SUBMITTED is returned
1918  * and any supplied bio is added to a list to be submitted once all
1919  * the necessary exceptions exist.
1920  */
1921 static int __origin_write(struct list_head *snapshots, sector_t sector,
1922                           struct bio *bio)
1923 {
1924         int r = DM_MAPIO_REMAPPED;
1925         struct dm_snapshot *snap;
1926         struct dm_exception *e;
1927         struct dm_snap_pending_exception *pe;
1928         struct dm_snap_pending_exception *pe_to_start_now = NULL;
1929         struct dm_snap_pending_exception *pe_to_start_last = NULL;
1930         chunk_t chunk;
1931
1932         /* Do all the snapshots on this origin */
1933         list_for_each_entry (snap, snapshots, list) {
1934                 /*
1935                  * Don't make new exceptions in a merging snapshot
1936                  * because it has effectively been deleted
1937                  */
1938                 if (dm_target_is_snapshot_merge(snap->ti))
1939                         continue;
1940
1941                 down_write(&snap->lock);
1942
1943                 /* Only deal with valid and active snapshots */
1944                 if (!snap->valid || !snap->active)
1945                         goto next_snapshot;
1946
1947                 /* Nothing to do if writing beyond end of snapshot */
1948                 if (sector >= dm_table_get_size(snap->ti->table))
1949                         goto next_snapshot;
1950
1951                 /*
1952                  * Remember, different snapshots can have
1953                  * different chunk sizes.
1954                  */
1955                 chunk = sector_to_chunk(snap->store, sector);
1956
1957                 /*
1958                  * Check exception table to see if block
1959                  * is already remapped in this snapshot
1960                  * and trigger an exception if not.
1961                  */
1962                 e = dm_lookup_exception(&snap->complete, chunk);
1963                 if (e)
1964                         goto next_snapshot;
1965
1966                 pe = __lookup_pending_exception(snap, chunk);
1967                 if (!pe) {
1968                         up_write(&snap->lock);
1969                         pe = alloc_pending_exception(snap);
1970                         down_write(&snap->lock);
1971
1972                         if (!snap->valid) {
1973                                 free_pending_exception(pe);
1974                                 goto next_snapshot;
1975                         }
1976
1977                         e = dm_lookup_exception(&snap->complete, chunk);
1978                         if (e) {
1979                                 free_pending_exception(pe);
1980                                 goto next_snapshot;
1981                         }
1982
1983                         pe = __find_pending_exception(snap, pe, chunk);
1984                         if (!pe) {
1985                                 __invalidate_snapshot(snap, -ENOMEM);
1986                                 goto next_snapshot;
1987                         }
1988                 }
1989
1990                 r = DM_MAPIO_SUBMITTED;
1991
1992                 /*
1993                  * If an origin bio was supplied, queue it to wait for the
1994                  * completion of this exception, and start this one last,
1995                  * at the end of the function.
1996                  */
1997                 if (bio) {
1998                         bio_list_add(&pe->origin_bios, bio);
1999                         bio = NULL;
2000
2001                         if (!pe->started) {
2002                                 pe->started = 1;
2003                                 pe_to_start_last = pe;
2004                         }
2005                 }
2006
2007                 if (!pe->started) {
2008                         pe->started = 1;
2009                         pe_to_start_now = pe;
2010                 }
2011
2012 next_snapshot:
2013                 up_write(&snap->lock);
2014
2015                 if (pe_to_start_now) {
2016                         start_copy(pe_to_start_now);
2017                         pe_to_start_now = NULL;
2018                 }
2019         }
2020
2021         /*
2022          * Submit the exception against which the bio is queued last,
2023          * to give the other exceptions a head start.
2024          */
2025         if (pe_to_start_last)
2026                 start_copy(pe_to_start_last);
2027
2028         return r;
2029 }
2030
2031 /*
2032  * Called on a write from the origin driver.
2033  */
2034 static int do_origin(struct dm_dev *origin, struct bio *bio)
2035 {
2036         struct origin *o;
2037         int r = DM_MAPIO_REMAPPED;
2038
2039         down_read(&_origins_lock);
2040         o = __lookup_origin(origin->bdev);
2041         if (o)
2042                 r = __origin_write(&o->snapshots, bio->bi_sector, bio);
2043         up_read(&_origins_lock);
2044
2045         return r;
2046 }
2047
2048 /*
2049  * Trigger exceptions in all non-merging snapshots.
2050  *
2051  * The chunk size of the merging snapshot may be larger than the chunk
2052  * size of some other snapshot so we may need to reallocate multiple
2053  * chunks in other snapshots.
2054  *
2055  * We scan all the overlapping exceptions in the other snapshots.
2056  * Returns 1 if anything was reallocated and must be waited for,
2057  * otherwise returns 0.
2058  *
2059  * size must be a multiple of merging_snap's chunk_size.
2060  */
2061 static int origin_write_extent(struct dm_snapshot *merging_snap,
2062                                sector_t sector, unsigned size)
2063 {
2064         int must_wait = 0;
2065         sector_t n;
2066         struct origin *o;
2067
2068         /*
2069          * The origin's __minimum_chunk_size() got stored in max_io_len
2070          * by snapshot_merge_resume().
2071          */
2072         down_read(&_origins_lock);
2073         o = __lookup_origin(merging_snap->origin->bdev);
2074         for (n = 0; n < size; n += merging_snap->ti->max_io_len)
2075                 if (__origin_write(&o->snapshots, sector + n, NULL) ==
2076                     DM_MAPIO_SUBMITTED)
2077                         must_wait = 1;
2078         up_read(&_origins_lock);
2079
2080         return must_wait;
2081 }
2082
2083 /*
2084  * Origin: maps a linear range of a device, with hooks for snapshotting.
2085  */
2086
2087 /*
2088  * Construct an origin mapping: <dev_path>
2089  * The context for an origin is merely a 'struct dm_dev *'
2090  * pointing to the real device.
2091  */
2092 static int origin_ctr(struct dm_target *ti, unsigned int argc, char **argv)
2093 {
2094         int r;
2095         struct dm_dev *dev;
2096
2097         if (argc != 1) {
2098                 ti->error = "origin: incorrect number of arguments";
2099                 return -EINVAL;
2100         }
2101
2102         r = dm_get_device(ti, argv[0], dm_table_get_mode(ti->table), &dev);
2103         if (r) {
2104                 ti->error = "Cannot get target device";
2105                 return r;
2106         }
2107
2108         ti->private = dev;
2109         ti->num_flush_bios = 1;
2110
2111         return 0;
2112 }
2113
2114 static void origin_dtr(struct dm_target *ti)
2115 {
2116         struct dm_dev *dev = ti->private;
2117         dm_put_device(ti, dev);
2118 }
2119
2120 static int origin_map(struct dm_target *ti, struct bio *bio)
2121 {
2122         struct dm_dev *dev = ti->private;
2123         bio->bi_bdev = dev->bdev;
2124
2125         if (bio->bi_rw & REQ_FLUSH)
2126                 return DM_MAPIO_REMAPPED;
2127
2128         /* Only tell snapshots if this is a write */
2129         return (bio_rw(bio) == WRITE) ? do_origin(dev, bio) : DM_MAPIO_REMAPPED;
2130 }
2131
2132 /*
2133  * Set the target "max_io_len" field to the minimum of all the snapshots'
2134  * chunk sizes.
2135  */
2136 static void origin_resume(struct dm_target *ti)
2137 {
2138         struct dm_dev *dev = ti->private;
2139
2140         ti->max_io_len = get_origin_minimum_chunksize(dev->bdev);
2141 }
2142
2143 static void origin_status(struct dm_target *ti, status_type_t type,
2144                           unsigned status_flags, char *result, unsigned maxlen)
2145 {
2146         struct dm_dev *dev = ti->private;
2147
2148         switch (type) {
2149         case STATUSTYPE_INFO:
2150                 result[0] = '\0';
2151                 break;
2152
2153         case STATUSTYPE_TABLE:
2154                 snprintf(result, maxlen, "%s", dev->name);
2155                 break;
2156         }
2157 }
2158
2159 static int origin_merge(struct dm_target *ti, struct bvec_merge_data *bvm,
2160                         struct bio_vec *biovec, int max_size)
2161 {
2162         struct dm_dev *dev = ti->private;
2163         struct request_queue *q = bdev_get_queue(dev->bdev);
2164
2165         if (!q->merge_bvec_fn)
2166                 return max_size;
2167
2168         bvm->bi_bdev = dev->bdev;
2169
2170         return min(max_size, q->merge_bvec_fn(q, bvm, biovec));
2171 }
2172
2173 static int origin_iterate_devices(struct dm_target *ti,
2174                                   iterate_devices_callout_fn fn, void *data)
2175 {
2176         struct dm_dev *dev = ti->private;
2177
2178         return fn(ti, dev, 0, ti->len, data);
2179 }
2180
2181 static struct target_type origin_target = {
2182         .name    = "snapshot-origin",
2183         .version = {1, 8, 1},
2184         .module  = THIS_MODULE,
2185         .ctr     = origin_ctr,
2186         .dtr     = origin_dtr,
2187         .map     = origin_map,
2188         .resume  = origin_resume,
2189         .status  = origin_status,
2190         .merge   = origin_merge,
2191         .iterate_devices = origin_iterate_devices,
2192 };
2193
2194 static struct target_type snapshot_target = {
2195         .name    = "snapshot",
2196         .version = {1, 11, 1},
2197         .module  = THIS_MODULE,
2198         .ctr     = snapshot_ctr,
2199         .dtr     = snapshot_dtr,
2200         .map     = snapshot_map,
2201         .end_io  = snapshot_end_io,
2202         .preresume  = snapshot_preresume,
2203         .resume  = snapshot_resume,
2204         .status  = snapshot_status,
2205         .iterate_devices = snapshot_iterate_devices,
2206 };
2207
2208 static struct target_type merge_target = {
2209         .name    = dm_snapshot_merge_target_name,
2210         .version = {1, 2, 0},
2211         .module  = THIS_MODULE,
2212         .ctr     = snapshot_ctr,
2213         .dtr     = snapshot_dtr,
2214         .map     = snapshot_merge_map,
2215         .end_io  = snapshot_end_io,
2216         .presuspend = snapshot_merge_presuspend,
2217         .preresume  = snapshot_preresume,
2218         .resume  = snapshot_merge_resume,
2219         .status  = snapshot_status,
2220         .iterate_devices = snapshot_iterate_devices,
2221 };
2222
2223 static int __init dm_snapshot_init(void)
2224 {
2225         int r;
2226
2227         r = dm_exception_store_init();
2228         if (r) {
2229                 DMERR("Failed to initialize exception stores");
2230                 return r;
2231         }
2232
2233         r = dm_register_target(&snapshot_target);
2234         if (r < 0) {
2235                 DMERR("snapshot target register failed %d", r);
2236                 goto bad_register_snapshot_target;
2237         }
2238
2239         r = dm_register_target(&origin_target);
2240         if (r < 0) {
2241                 DMERR("Origin target register failed %d", r);
2242                 goto bad_register_origin_target;
2243         }
2244
2245         r = dm_register_target(&merge_target);
2246         if (r < 0) {
2247                 DMERR("Merge target register failed %d", r);
2248                 goto bad_register_merge_target;
2249         }
2250
2251         r = init_origin_hash();
2252         if (r) {
2253                 DMERR("init_origin_hash failed.");
2254                 goto bad_origin_hash;
2255         }
2256
2257         exception_cache = KMEM_CACHE(dm_exception, 0);
2258         if (!exception_cache) {
2259                 DMERR("Couldn't create exception cache.");
2260                 r = -ENOMEM;
2261                 goto bad_exception_cache;
2262         }
2263
2264         pending_cache = KMEM_CACHE(dm_snap_pending_exception, 0);
2265         if (!pending_cache) {
2266                 DMERR("Couldn't create pending cache.");
2267                 r = -ENOMEM;
2268                 goto bad_pending_cache;
2269         }
2270
2271         return 0;
2272
2273 bad_pending_cache:
2274         kmem_cache_destroy(exception_cache);
2275 bad_exception_cache:
2276         exit_origin_hash();
2277 bad_origin_hash:
2278         dm_unregister_target(&merge_target);
2279 bad_register_merge_target:
2280         dm_unregister_target(&origin_target);
2281 bad_register_origin_target:
2282         dm_unregister_target(&snapshot_target);
2283 bad_register_snapshot_target:
2284         dm_exception_store_exit();
2285
2286         return r;
2287 }
2288
2289 static void __exit dm_snapshot_exit(void)
2290 {
2291         dm_unregister_target(&snapshot_target);
2292         dm_unregister_target(&origin_target);
2293         dm_unregister_target(&merge_target);
2294
2295         exit_origin_hash();
2296         kmem_cache_destroy(pending_cache);
2297         kmem_cache_destroy(exception_cache);
2298
2299         dm_exception_store_exit();
2300 }
2301
2302 /* Module hooks */
2303 module_init(dm_snapshot_init);
2304 module_exit(dm_snapshot_exit);
2305
2306 MODULE_DESCRIPTION(DM_NAME " snapshot target");
2307 MODULE_AUTHOR("Joe Thornber");
2308 MODULE_LICENSE("GPL");
2309 MODULE_ALIAS("dm-snapshot-origin");
2310 MODULE_ALIAS("dm-snapshot-merge");