1/*
2 * dm-snapshot.c
3 *
4 * Copyright (C) 2001-2002 Sistina Software (UK) Limited.
5 *
6 * This file is released under the GPL.
7 */
8
9#include <linux/blkdev.h>
10#include <linux/device-mapper.h>
11#include <linux/delay.h>
12#include <linux/fs.h>
13#include <linux/init.h>
14#include <linux/kdev_t.h>
15#include <linux/list.h>
16#include <linux/mempool.h>
17#include <linux/module.h>
18#include <linux/slab.h>
19#include <linux/vmalloc.h>
20#include <linux/log2.h>
21#include <linux/dm-kcopyd.h>
22
23#include "dm.h"
24
25#include "dm-exception-store.h"
26
27#define DM_MSG_PREFIX "snapshots"
28
29static const char dm_snapshot_merge_target_name[] = "snapshot-merge";
30
31#define dm_target_is_snapshot_merge(ti) \
32	((ti)->type->name == dm_snapshot_merge_target_name)
33
34/*
35 * The size of the mempool used to track chunks in use.
36 */
37#define MIN_IOS 256
38
39#define DM_TRACKED_CHUNK_HASH_SIZE	16
40#define DM_TRACKED_CHUNK_HASH(x)	((unsigned long)(x) & \
41					 (DM_TRACKED_CHUNK_HASH_SIZE - 1))
42
43struct dm_exception_table {
44	uint32_t hash_mask;
45	unsigned hash_shift;
46	struct list_head *table;
47};
48
49struct dm_snapshot {
50	struct rw_semaphore lock;
51
52	struct dm_dev *origin;
53	struct dm_dev *cow;
54
55	struct dm_target *ti;
56
57	/* List of snapshots per Origin */
58	struct list_head list;
59
60	/*
61	 * You can't use a snapshot if this is 0 (e.g. if full).
62	 * A snapshot-merge target never clears this.
63	 */
64	int valid;
65
66	/* Origin writes don't trigger exceptions until this is set */
67	int active;
68
69	atomic_t pending_exceptions_count;
70
71	/* Protected by "lock" */
72	sector_t exception_start_sequence;
73
74	/* Protected by kcopyd single-threaded callback */
75	sector_t exception_complete_sequence;
76
77	/*
78	 * A list of pending exceptions that completed out of order.
79	 * Protected by kcopyd single-threaded callback.
80	 */
81	struct list_head out_of_order_list;
82
83	mempool_t *pending_pool;
84
85	struct dm_exception_table pending;
86	struct dm_exception_table complete;
87
88	/*
89	 * pe_lock protects all pending_exception operations and access
90	 * as well as the snapshot_bios list.
91	 */
92	spinlock_t pe_lock;
93
94	/* Chunks with outstanding reads */
95	spinlock_t tracked_chunk_lock;
96	struct hlist_head tracked_chunk_hash[DM_TRACKED_CHUNK_HASH_SIZE];
97
98	/* The on disk metadata handler */
99	struct dm_exception_store *store;
100
101	struct dm_kcopyd_client *kcopyd_client;
102
103	/* Wait for events based on state_bits */
104	unsigned long state_bits;
105
106	/* Range of chunks currently being merged. */
107	chunk_t first_merging_chunk;
108	int num_merging_chunks;
109
110	/*
111	 * The merge operation failed if this flag is set.
112	 * Failure modes are handled as follows:
113	 * - I/O error reading the header
114	 *   	=> don't load the target; abort.
115	 * - Header does not have "valid" flag set
116	 *   	=> use the origin; forget about the snapshot.
117	 * - I/O error when reading exceptions
118	 *   	=> don't load the target; abort.
119	 *         (We can't use the intermediate origin state.)
120	 * - I/O error while merging
121	 *	=> stop merging; set merge_failed; process I/O normally.
122	 */
123	int merge_failed;
124
125	/*
126	 * Incoming bios that overlap with chunks being merged must wait
127	 * for them to be committed.
128	 */
129	struct bio_list bios_queued_during_merge;
130};
131
132/*
133 * state_bits:
134 *   RUNNING_MERGE  - Merge operation is in progress.
135 *   SHUTDOWN_MERGE - Set to signal that merge needs to be stopped;
136 *                    cleared afterwards.
137 */
138#define RUNNING_MERGE          0
139#define SHUTDOWN_MERGE         1
140
141DECLARE_DM_KCOPYD_THROTTLE_WITH_MODULE_PARM(snapshot_copy_throttle,
142		"A percentage of time allocated for copy on write");
143
144struct dm_dev *dm_snap_origin(struct dm_snapshot *s)
145{
146	return s->origin;
147}
148EXPORT_SYMBOL(dm_snap_origin);
149
150struct dm_dev *dm_snap_cow(struct dm_snapshot *s)
151{
152	return s->cow;
153}
154EXPORT_SYMBOL(dm_snap_cow);
155
156static sector_t chunk_to_sector(struct dm_exception_store *store,
157				chunk_t chunk)
158{
159	return chunk << store->chunk_shift;
160}
161
162static int bdev_equal(struct block_device *lhs, struct block_device *rhs)
163{
164	/*
165	 * There is only ever one instance of a particular block
166	 * device so we can compare pointers safely.
167	 */
168	return lhs == rhs;
169}
170
171struct dm_snap_pending_exception {
172	struct dm_exception e;
173
174	/*
175	 * Origin buffers waiting for this to complete are held
176	 * in a bio list
177	 */
178	struct bio_list origin_bios;
179	struct bio_list snapshot_bios;
180
181	/* Pointer back to snapshot context */
182	struct dm_snapshot *snap;
183
184	/*
185	 * 1 indicates the exception has already been sent to
186	 * kcopyd.
187	 */
188	int started;
189
190	/* There was copying error. */
191	int copy_error;
192
193	/* A sequence number, it is used for in-order completion. */
194	sector_t exception_sequence;
195
196	struct list_head out_of_order_entry;
197
198	/*
199	 * For writing a complete chunk, bypassing the copy.
200	 */
201	struct bio *full_bio;
202	bio_end_io_t *full_bio_end_io;
203	void *full_bio_private;
204};
205
206/*
207 * Hash table mapping origin volumes to lists of snapshots and
208 * a lock to protect it
209 */
210static struct kmem_cache *exception_cache;
211static struct kmem_cache *pending_cache;
212
213struct dm_snap_tracked_chunk {
214	struct hlist_node node;
215	chunk_t chunk;
216};
217
218static void init_tracked_chunk(struct bio *bio)
219{
220	struct dm_snap_tracked_chunk *c = dm_per_bio_data(bio, sizeof(struct dm_snap_tracked_chunk));
221	INIT_HLIST_NODE(&c->node);
222}
223
224static bool is_bio_tracked(struct bio *bio)
225{
226	struct dm_snap_tracked_chunk *c = dm_per_bio_data(bio, sizeof(struct dm_snap_tracked_chunk));
227	return !hlist_unhashed(&c->node);
228}
229
230static void track_chunk(struct dm_snapshot *s, struct bio *bio, chunk_t chunk)
231{
232	struct dm_snap_tracked_chunk *c = dm_per_bio_data(bio, sizeof(struct dm_snap_tracked_chunk));
233
234	c->chunk = chunk;
235
236	spin_lock_irq(&s->tracked_chunk_lock);
237	hlist_add_head(&c->node,
238		       &s->tracked_chunk_hash[DM_TRACKED_CHUNK_HASH(chunk)]);
239	spin_unlock_irq(&s->tracked_chunk_lock);
240}
241
242static void stop_tracking_chunk(struct dm_snapshot *s, struct bio *bio)
243{
244	struct dm_snap_tracked_chunk *c = dm_per_bio_data(bio, sizeof(struct dm_snap_tracked_chunk));
245	unsigned long flags;
246
247	spin_lock_irqsave(&s->tracked_chunk_lock, flags);
248	hlist_del(&c->node);
249	spin_unlock_irqrestore(&s->tracked_chunk_lock, flags);
250}
251
252static int __chunk_is_tracked(struct dm_snapshot *s, chunk_t chunk)
253{
254	struct dm_snap_tracked_chunk *c;
255	int found = 0;
256
257	spin_lock_irq(&s->tracked_chunk_lock);
258
259	hlist_for_each_entry(c,
260	    &s->tracked_chunk_hash[DM_TRACKED_CHUNK_HASH(chunk)], node) {
261		if (c->chunk == chunk) {
262			found = 1;
263			break;
264		}
265	}
266
267	spin_unlock_irq(&s->tracked_chunk_lock);
268
269	return found;
270}
271
272/*
273 * This conflicting I/O is extremely improbable in the caller,
274 * so msleep(1) is sufficient and there is no need for a wait queue.
275 */
276static void __check_for_conflicting_io(struct dm_snapshot *s, chunk_t chunk)
277{
278	while (__chunk_is_tracked(s, chunk))
279		msleep(1);
280}
281
282/*
283 * One of these per registered origin, held in the snapshot_origins hash
284 */
285struct origin {
286	/* The origin device */
287	struct block_device *bdev;
288
289	struct list_head hash_list;
290
291	/* List of snapshots for this origin */
292	struct list_head snapshots;
293};
294
295/*
296 * This structure is allocated for each origin target
297 */
298struct dm_origin {
299	struct dm_dev *dev;
300	struct dm_target *ti;
301	unsigned split_boundary;
302	struct list_head hash_list;
303};
304
305/*
306 * Size of the hash table for origin volumes. If we make this
307 * the size of the minors list then it should be nearly perfect
308 */
309#define ORIGIN_HASH_SIZE 256
310#define ORIGIN_MASK      0xFF
311static struct list_head *_origins;
312static struct list_head *_dm_origins;
313static struct rw_semaphore _origins_lock;
314
315static DECLARE_WAIT_QUEUE_HEAD(_pending_exceptions_done);
316static DEFINE_SPINLOCK(_pending_exceptions_done_spinlock);
317static uint64_t _pending_exceptions_done_count;
318
319static int init_origin_hash(void)
320{
321	int i;
322
323	_origins = kmalloc(ORIGIN_HASH_SIZE * sizeof(struct list_head),
324			   GFP_KERNEL);
325	if (!_origins) {
326		DMERR("unable to allocate memory for _origins");
327		return -ENOMEM;
328	}
329	for (i = 0; i < ORIGIN_HASH_SIZE; i++)
330		INIT_LIST_HEAD(_origins + i);
331
332	_dm_origins = kmalloc(ORIGIN_HASH_SIZE * sizeof(struct list_head),
333			      GFP_KERNEL);
334	if (!_dm_origins) {
335		DMERR("unable to allocate memory for _dm_origins");
336		kfree(_origins);
337		return -ENOMEM;
338	}
339	for (i = 0; i < ORIGIN_HASH_SIZE; i++)
340		INIT_LIST_HEAD(_dm_origins + i);
341
342	init_rwsem(&_origins_lock);
343
344	return 0;
345}
346
347static void exit_origin_hash(void)
348{
349	kfree(_origins);
350	kfree(_dm_origins);
351}
352
353static unsigned origin_hash(struct block_device *bdev)
354{
355	return bdev->bd_dev & ORIGIN_MASK;
356}
357
358static struct origin *__lookup_origin(struct block_device *origin)
359{
360	struct list_head *ol;
361	struct origin *o;
362
363	ol = &_origins[origin_hash(origin)];
364	list_for_each_entry (o, ol, hash_list)
365		if (bdev_equal(o->bdev, origin))
366			return o;
367
368	return NULL;
369}
370
371static void __insert_origin(struct origin *o)
372{
373	struct list_head *sl = &_origins[origin_hash(o->bdev)];
374	list_add_tail(&o->hash_list, sl);
375}
376
377static struct dm_origin *__lookup_dm_origin(struct block_device *origin)
378{
379	struct list_head *ol;
380	struct dm_origin *o;
381
382	ol = &_dm_origins[origin_hash(origin)];
383	list_for_each_entry (o, ol, hash_list)
384		if (bdev_equal(o->dev->bdev, origin))
385			return o;
386
387	return NULL;
388}
389
390static void __insert_dm_origin(struct dm_origin *o)
391{
392	struct list_head *sl = &_dm_origins[origin_hash(o->dev->bdev)];
393	list_add_tail(&o->hash_list, sl);
394}
395
396static void __remove_dm_origin(struct dm_origin *o)
397{
398	list_del(&o->hash_list);
399}
400
401/*
402 * _origins_lock must be held when calling this function.
403 * Returns number of snapshots registered using the supplied cow device, plus:
404 * snap_src - a snapshot suitable for use as a source of exception handover
405 * snap_dest - a snapshot capable of receiving exception handover.
406 * snap_merge - an existing snapshot-merge target linked to the same origin.
407 *   There can be at most one snapshot-merge target. The parameter is optional.
408 *
409 * Possible return values and states of snap_src and snap_dest.
410 *   0: NULL, NULL  - first new snapshot
411 *   1: snap_src, NULL - normal snapshot
412 *   2: snap_src, snap_dest  - waiting for handover
413 *   2: snap_src, NULL - handed over, waiting for old to be deleted
414 *   1: NULL, snap_dest - source got destroyed without handover
415 */
416static int __find_snapshots_sharing_cow(struct dm_snapshot *snap,
417					struct dm_snapshot **snap_src,
418					struct dm_snapshot **snap_dest,
419					struct dm_snapshot **snap_merge)
420{
421	struct dm_snapshot *s;
422	struct origin *o;
423	int count = 0;
424	int active;
425
426	o = __lookup_origin(snap->origin->bdev);
427	if (!o)
428		goto out;
429
430	list_for_each_entry(s, &o->snapshots, list) {
431		if (dm_target_is_snapshot_merge(s->ti) && snap_merge)
432			*snap_merge = s;
433		if (!bdev_equal(s->cow->bdev, snap->cow->bdev))
434			continue;
435
436		down_read(&s->lock);
437		active = s->active;
438		up_read(&s->lock);
439
440		if (active) {
441			if (snap_src)
442				*snap_src = s;
443		} else if (snap_dest)
444			*snap_dest = s;
445
446		count++;
447	}
448
449out:
450	return count;
451}
452
453/*
454 * On success, returns 1 if this snapshot is a handover destination,
455 * otherwise returns 0.
456 */
457static int __validate_exception_handover(struct dm_snapshot *snap)
458{
459	struct dm_snapshot *snap_src = NULL, *snap_dest = NULL;
460	struct dm_snapshot *snap_merge = NULL;
461
462	/* Does snapshot need exceptions handed over to it? */
463	if ((__find_snapshots_sharing_cow(snap, &snap_src, &snap_dest,
464					  &snap_merge) == 2) ||
465	    snap_dest) {
466		snap->ti->error = "Snapshot cow pairing for exception "
467				  "table handover failed";
468		return -EINVAL;
469	}
470
471	/*
472	 * If no snap_src was found, snap cannot become a handover
473	 * destination.
474	 */
475	if (!snap_src)
476		return 0;
477
478	/*
479	 * Non-snapshot-merge handover?
480	 */
481	if (!dm_target_is_snapshot_merge(snap->ti))
482		return 1;
483
484	/*
485	 * Do not allow more than one merging snapshot.
486	 */
487	if (snap_merge) {
488		snap->ti->error = "A snapshot is already merging.";
489		return -EINVAL;
490	}
491
492	if (!snap_src->store->type->prepare_merge ||
493	    !snap_src->store->type->commit_merge) {
494		snap->ti->error = "Snapshot exception store does not "
495				  "support snapshot-merge.";
496		return -EINVAL;
497	}
498
499	return 1;
500}
501
502static void __insert_snapshot(struct origin *o, struct dm_snapshot *s)
503{
504	struct dm_snapshot *l;
505
506	/* Sort the list according to chunk size, largest-first smallest-last */
507	list_for_each_entry(l, &o->snapshots, list)
508		if (l->store->chunk_size < s->store->chunk_size)
509			break;
510	list_add_tail(&s->list, &l->list);
511}
512
513/*
514 * Make a note of the snapshot and its origin so we can look it
515 * up when the origin has a write on it.
516 *
517 * Also validate snapshot exception store handovers.
518 * On success, returns 1 if this registration is a handover destination,
519 * otherwise returns 0.
520 */
521static int register_snapshot(struct dm_snapshot *snap)
522{
523	struct origin *o, *new_o = NULL;
524	struct block_device *bdev = snap->origin->bdev;
525	int r = 0;
526
527	new_o = kmalloc(sizeof(*new_o), GFP_KERNEL);
528	if (!new_o)
529		return -ENOMEM;
530
531	down_write(&_origins_lock);
532
533	r = __validate_exception_handover(snap);
534	if (r < 0) {
535		kfree(new_o);
536		goto out;
537	}
538
539	o = __lookup_origin(bdev);
540	if (o)
541		kfree(new_o);
542	else {
543		/* New origin */
544		o = new_o;
545
546		/* Initialise the struct */
547		INIT_LIST_HEAD(&o->snapshots);
548		o->bdev = bdev;
549
550		__insert_origin(o);
551	}
552
553	__insert_snapshot(o, snap);
554
555out:
556	up_write(&_origins_lock);
557
558	return r;
559}
560
561/*
562 * Move snapshot to correct place in list according to chunk size.
563 */
564static void reregister_snapshot(struct dm_snapshot *s)
565{
566	struct block_device *bdev = s->origin->bdev;
567
568	down_write(&_origins_lock);
569
570	list_del(&s->list);
571	__insert_snapshot(__lookup_origin(bdev), s);
572
573	up_write(&_origins_lock);
574}
575
576static void unregister_snapshot(struct dm_snapshot *s)
577{
578	struct origin *o;
579
580	down_write(&_origins_lock);
581	o = __lookup_origin(s->origin->bdev);
582
583	list_del(&s->list);
584	if (o && list_empty(&o->snapshots)) {
585		list_del(&o->hash_list);
586		kfree(o);
587	}
588
589	up_write(&_origins_lock);
590}
591
592/*
593 * Implementation of the exception hash tables.
594 * The lowest hash_shift bits of the chunk number are ignored, allowing
595 * some consecutive chunks to be grouped together.
596 */
597static int dm_exception_table_init(struct dm_exception_table *et,
598				   uint32_t size, unsigned hash_shift)
599{
600	unsigned int i;
601
602	et->hash_shift = hash_shift;
603	et->hash_mask = size - 1;
604	et->table = dm_vcalloc(size, sizeof(struct list_head));
605	if (!et->table)
606		return -ENOMEM;
607
608	for (i = 0; i < size; i++)
609		INIT_LIST_HEAD(et->table + i);
610
611	return 0;
612}
613
614static void dm_exception_table_exit(struct dm_exception_table *et,
615				    struct kmem_cache *mem)
616{
617	struct list_head *slot;
618	struct dm_exception *ex, *next;
619	int i, size;
620
621	size = et->hash_mask + 1;
622	for (i = 0; i < size; i++) {
623		slot = et->table + i;
624
625		list_for_each_entry_safe (ex, next, slot, hash_list)
626			kmem_cache_free(mem, ex);
627	}
628
629	vfree(et->table);
630}
631
632static uint32_t exception_hash(struct dm_exception_table *et, chunk_t chunk)
633{
634	return (chunk >> et->hash_shift) & et->hash_mask;
635}
636
637static void dm_remove_exception(struct dm_exception *e)
638{
639	list_del(&e->hash_list);
640}
641
642/*
643 * Return the exception data for a sector, or NULL if not
644 * remapped.
645 */
646static struct dm_exception *dm_lookup_exception(struct dm_exception_table *et,
647						chunk_t chunk)
648{
649	struct list_head *slot;
650	struct dm_exception *e;
651
652	slot = &et->table[exception_hash(et, chunk)];
653	list_for_each_entry (e, slot, hash_list)
654		if (chunk >= e->old_chunk &&
655		    chunk <= e->old_chunk + dm_consecutive_chunk_count(e))
656			return e;
657
658	return NULL;
659}
660
661static struct dm_exception *alloc_completed_exception(gfp_t gfp)
662{
663	struct dm_exception *e;
664
665	e = kmem_cache_alloc(exception_cache, gfp);
666	if (!e && gfp == GFP_NOIO)
667		e = kmem_cache_alloc(exception_cache, GFP_ATOMIC);
668
669	return e;
670}
671
672static void free_completed_exception(struct dm_exception *e)
673{
674	kmem_cache_free(exception_cache, e);
675}
676
677static struct dm_snap_pending_exception *alloc_pending_exception(struct dm_snapshot *s)
678{
679	struct dm_snap_pending_exception *pe = mempool_alloc(s->pending_pool,
680							     GFP_NOIO);
681
682	atomic_inc(&s->pending_exceptions_count);
683	pe->snap = s;
684
685	return pe;
686}
687
688static void free_pending_exception(struct dm_snap_pending_exception *pe)
689{
690	struct dm_snapshot *s = pe->snap;
691
692	mempool_free(pe, s->pending_pool);
693	smp_mb__before_atomic();
694	atomic_dec(&s->pending_exceptions_count);
695}
696
697static void dm_insert_exception(struct dm_exception_table *eh,
698				struct dm_exception *new_e)
699{
700	struct list_head *l;
701	struct dm_exception *e = NULL;
702
703	l = &eh->table[exception_hash(eh, new_e->old_chunk)];
704
705	/* Add immediately if this table doesn't support consecutive chunks */
706	if (!eh->hash_shift)
707		goto out;
708
709	/* List is ordered by old_chunk */
710	list_for_each_entry_reverse(e, l, hash_list) {
711		/* Insert after an existing chunk? */
712		if (new_e->old_chunk == (e->old_chunk +
713					 dm_consecutive_chunk_count(e) + 1) &&
714		    new_e->new_chunk == (dm_chunk_number(e->new_chunk) +
715					 dm_consecutive_chunk_count(e) + 1)) {
716			dm_consecutive_chunk_count_inc(e);
717			free_completed_exception(new_e);
718			return;
719		}
720
721		/* Insert before an existing chunk? */
722		if (new_e->old_chunk == (e->old_chunk - 1) &&
723		    new_e->new_chunk == (dm_chunk_number(e->new_chunk) - 1)) {
724			dm_consecutive_chunk_count_inc(e);
725			e->old_chunk--;
726			e->new_chunk--;
727			free_completed_exception(new_e);
728			return;
729		}
730
731		if (new_e->old_chunk > e->old_chunk)
732			break;
733	}
734
735out:
736	list_add(&new_e->hash_list, e ? &e->hash_list : l);
737}
738
739/*
740 * Callback used by the exception stores to load exceptions when
741 * initialising.
742 */
743static int dm_add_exception(void *context, chunk_t old, chunk_t new)
744{
745	struct dm_snapshot *s = context;
746	struct dm_exception *e;
747
748	e = alloc_completed_exception(GFP_KERNEL);
749	if (!e)
750		return -ENOMEM;
751
752	e->old_chunk = old;
753
754	/* Consecutive_count is implicitly initialised to zero */
755	e->new_chunk = new;
756
757	dm_insert_exception(&s->complete, e);
758
759	return 0;
760}
761
762/*
763 * Return a minimum chunk size of all snapshots that have the specified origin.
764 * Return zero if the origin has no snapshots.
765 */
766static uint32_t __minimum_chunk_size(struct origin *o)
767{
768	struct dm_snapshot *snap;
769	unsigned chunk_size = 0;
770
771	if (o)
772		list_for_each_entry(snap, &o->snapshots, list)
773			chunk_size = min_not_zero(chunk_size,
774						  snap->store->chunk_size);
775
776	return (uint32_t) chunk_size;
777}
778
779/*
780 * Hard coded magic.
781 */
782static int calc_max_buckets(void)
783{
784	/* use a fixed size of 2MB */
785	unsigned long mem = 2 * 1024 * 1024;
786	mem /= sizeof(struct list_head);
787
788	return mem;
789}
790
791/*
792 * Allocate room for a suitable hash table.
793 */
794static int init_hash_tables(struct dm_snapshot *s)
795{
796	sector_t hash_size, cow_dev_size, max_buckets;
797
798	/*
799	 * Calculate based on the size of the original volume or
800	 * the COW volume...
801	 */
802	cow_dev_size = get_dev_size(s->cow->bdev);
803	max_buckets = calc_max_buckets();
804
805	hash_size = cow_dev_size >> s->store->chunk_shift;
806	hash_size = min(hash_size, max_buckets);
807
808	if (hash_size < 64)
809		hash_size = 64;
810	hash_size = rounddown_pow_of_two(hash_size);
811	if (dm_exception_table_init(&s->complete, hash_size,
812				    DM_CHUNK_CONSECUTIVE_BITS))
813		return -ENOMEM;
814
815	/*
816	 * Allocate hash table for in-flight exceptions
817	 * Make this smaller than the real hash table
818	 */
819	hash_size >>= 3;
820	if (hash_size < 64)
821		hash_size = 64;
822
823	if (dm_exception_table_init(&s->pending, hash_size, 0)) {
824		dm_exception_table_exit(&s->complete, exception_cache);
825		return -ENOMEM;
826	}
827
828	return 0;
829}
830
831static void merge_shutdown(struct dm_snapshot *s)
832{
833	clear_bit_unlock(RUNNING_MERGE, &s->state_bits);
834	smp_mb__after_atomic();
835	wake_up_bit(&s->state_bits, RUNNING_MERGE);
836}
837
838static struct bio *__release_queued_bios_after_merge(struct dm_snapshot *s)
839{
840	s->first_merging_chunk = 0;
841	s->num_merging_chunks = 0;
842
843	return bio_list_get(&s->bios_queued_during_merge);
844}
845
846/*
847 * Remove one chunk from the index of completed exceptions.
848 */
849static int __remove_single_exception_chunk(struct dm_snapshot *s,
850					   chunk_t old_chunk)
851{
852	struct dm_exception *e;
853
854	e = dm_lookup_exception(&s->complete, old_chunk);
855	if (!e) {
856		DMERR("Corruption detected: exception for block %llu is "
857		      "on disk but not in memory",
858		      (unsigned long long)old_chunk);
859		return -EINVAL;
860	}
861
862	/*
863	 * If this is the only chunk using this exception, remove exception.
864	 */
865	if (!dm_consecutive_chunk_count(e)) {
866		dm_remove_exception(e);
867		free_completed_exception(e);
868		return 0;
869	}
870
871	/*
872	 * The chunk may be either at the beginning or the end of a
873	 * group of consecutive chunks - never in the middle.  We are
874	 * removing chunks in the opposite order to that in which they
875	 * were added, so this should always be true.
876	 * Decrement the consecutive chunk counter and adjust the
877	 * starting point if necessary.
878	 */
879	if (old_chunk == e->old_chunk) {
880		e->old_chunk++;
881		e->new_chunk++;
882	} else if (old_chunk != e->old_chunk +
883		   dm_consecutive_chunk_count(e)) {
884		DMERR("Attempt to merge block %llu from the "
885		      "middle of a chunk range [%llu - %llu]",
886		      (unsigned long long)old_chunk,
887		      (unsigned long long)e->old_chunk,
888		      (unsigned long long)
889		      e->old_chunk + dm_consecutive_chunk_count(e));
890		return -EINVAL;
891	}
892
893	dm_consecutive_chunk_count_dec(e);
894
895	return 0;
896}
897
898static void flush_bios(struct bio *bio);
899
900static int remove_single_exception_chunk(struct dm_snapshot *s)
901{
902	struct bio *b = NULL;
903	int r;
904	chunk_t old_chunk = s->first_merging_chunk + s->num_merging_chunks - 1;
905
906	down_write(&s->lock);
907
908	/*
909	 * Process chunks (and associated exceptions) in reverse order
910	 * so that dm_consecutive_chunk_count_dec() accounting works.
911	 */
912	do {
913		r = __remove_single_exception_chunk(s, old_chunk);
914		if (r)
915			goto out;
916	} while (old_chunk-- > s->first_merging_chunk);
917
918	b = __release_queued_bios_after_merge(s);
919
920out:
921	up_write(&s->lock);
922	if (b)
923		flush_bios(b);
924
925	return r;
926}
927
928static int origin_write_extent(struct dm_snapshot *merging_snap,
929			       sector_t sector, unsigned chunk_size);
930
931static void merge_callback(int read_err, unsigned long write_err,
932			   void *context);
933
934static uint64_t read_pending_exceptions_done_count(void)
935{
936	uint64_t pending_exceptions_done;
937
938	spin_lock(&_pending_exceptions_done_spinlock);
939	pending_exceptions_done = _pending_exceptions_done_count;
940	spin_unlock(&_pending_exceptions_done_spinlock);
941
942	return pending_exceptions_done;
943}
944
945static void increment_pending_exceptions_done_count(void)
946{
947	spin_lock(&_pending_exceptions_done_spinlock);
948	_pending_exceptions_done_count++;
949	spin_unlock(&_pending_exceptions_done_spinlock);
950
951	wake_up_all(&_pending_exceptions_done);
952}
953
954static void snapshot_merge_next_chunks(struct dm_snapshot *s)
955{
956	int i, linear_chunks;
957	chunk_t old_chunk, new_chunk;
958	struct dm_io_region src, dest;
959	sector_t io_size;
960	uint64_t previous_count;
961
962	BUG_ON(!test_bit(RUNNING_MERGE, &s->state_bits));
963	if (unlikely(test_bit(SHUTDOWN_MERGE, &s->state_bits)))
964		goto shut;
965
966	/*
967	 * valid flag never changes during merge, so no lock required.
968	 */
969	if (!s->valid) {
970		DMERR("Snapshot is invalid: can't merge");
971		goto shut;
972	}
973
974	linear_chunks = s->store->type->prepare_merge(s->store, &old_chunk,
975						      &new_chunk);
976	if (linear_chunks <= 0) {
977		if (linear_chunks < 0) {
978			DMERR("Read error in exception store: "
979			      "shutting down merge");
980			down_write(&s->lock);
981			s->merge_failed = 1;
982			up_write(&s->lock);
983		}
984		goto shut;
985	}
986
987	/* Adjust old_chunk and new_chunk to reflect start of linear region */
988	old_chunk = old_chunk + 1 - linear_chunks;
989	new_chunk = new_chunk + 1 - linear_chunks;
990
991	/*
992	 * Use one (potentially large) I/O to copy all 'linear_chunks'
993	 * from the exception store to the origin
994	 */
995	io_size = linear_chunks * s->store->chunk_size;
996
997	dest.bdev = s->origin->bdev;
998	dest.sector = chunk_to_sector(s->store, old_chunk);
999	dest.count = min(io_size, get_dev_size(dest.bdev) - dest.sector);
1000
1001	src.bdev = s->cow->bdev;
1002	src.sector = chunk_to_sector(s->store, new_chunk);
1003	src.count = dest.count;
1004
1005	/*
1006	 * Reallocate any exceptions needed in other snapshots then
1007	 * wait for the pending exceptions to complete.
1008	 * Each time any pending exception (globally on the system)
1009	 * completes we are woken and repeat the process to find out
1010	 * if we can proceed.  While this may not seem a particularly
1011	 * efficient algorithm, it is not expected to have any
1012	 * significant impact on performance.
1013	 */
1014	previous_count = read_pending_exceptions_done_count();
1015	while (origin_write_extent(s, dest.sector, io_size)) {
1016		wait_event(_pending_exceptions_done,
1017			   (read_pending_exceptions_done_count() !=
1018			    previous_count));
1019		/* Retry after the wait, until all exceptions are done. */
1020		previous_count = read_pending_exceptions_done_count();
1021	}
1022
1023	down_write(&s->lock);
1024	s->first_merging_chunk = old_chunk;
1025	s->num_merging_chunks = linear_chunks;
1026	up_write(&s->lock);
1027
1028	/* Wait until writes to all 'linear_chunks' drain */
1029	for (i = 0; i < linear_chunks; i++)
1030		__check_for_conflicting_io(s, old_chunk + i);
1031
1032	dm_kcopyd_copy(s->kcopyd_client, &src, 1, &dest, 0, merge_callback, s);
1033	return;
1034
1035shut:
1036	merge_shutdown(s);
1037}
1038
1039static void error_bios(struct bio *bio);
1040
1041static void merge_callback(int read_err, unsigned long write_err, void *context)
1042{
1043	struct dm_snapshot *s = context;
1044	struct bio *b = NULL;
1045
1046	if (read_err || write_err) {
1047		if (read_err)
1048			DMERR("Read error: shutting down merge.");
1049		else
1050			DMERR("Write error: shutting down merge.");
1051		goto shut;
1052	}
1053
1054	if (s->store->type->commit_merge(s->store,
1055					 s->num_merging_chunks) < 0) {
1056		DMERR("Write error in exception store: shutting down merge");
1057		goto shut;
1058	}
1059
1060	if (remove_single_exception_chunk(s) < 0)
1061		goto shut;
1062
1063	snapshot_merge_next_chunks(s);
1064
1065	return;
1066
1067shut:
1068	down_write(&s->lock);
1069	s->merge_failed = 1;
1070	b = __release_queued_bios_after_merge(s);
1071	up_write(&s->lock);
1072	error_bios(b);
1073
1074	merge_shutdown(s);
1075}
1076
1077static void start_merge(struct dm_snapshot *s)
1078{
1079	if (!test_and_set_bit(RUNNING_MERGE, &s->state_bits))
1080		snapshot_merge_next_chunks(s);
1081}
1082
1083/*
1084 * Stop the merging process and wait until it finishes.
1085 */
1086static void stop_merge(struct dm_snapshot *s)
1087{
1088	set_bit(SHUTDOWN_MERGE, &s->state_bits);
1089	wait_on_bit(&s->state_bits, RUNNING_MERGE, TASK_UNINTERRUPTIBLE);
1090	clear_bit(SHUTDOWN_MERGE, &s->state_bits);
1091}
1092
1093/*
1094 * Construct a snapshot mapping: <origin_dev> <COW-dev> <p/n> <chunk-size>
1095 */
1096static int snapshot_ctr(struct dm_target *ti, unsigned int argc, char **argv)
1097{
1098	struct dm_snapshot *s;
1099	int i;
1100	int r = -EINVAL;
1101	char *origin_path, *cow_path;
1102	unsigned args_used, num_flush_bios = 1;
1103	fmode_t origin_mode = FMODE_READ;
1104
1105	if (argc != 4) {
1106		ti->error = "requires exactly 4 arguments";
1107		r = -EINVAL;
1108		goto bad;
1109	}
1110
1111	if (dm_target_is_snapshot_merge(ti)) {
1112		num_flush_bios = 2;
1113		origin_mode = FMODE_WRITE;
1114	}
1115
1116	s = kmalloc(sizeof(*s), GFP_KERNEL);
1117	if (!s) {
1118		ti->error = "Cannot allocate private snapshot structure";
1119		r = -ENOMEM;
1120		goto bad;
1121	}
1122
1123	origin_path = argv[0];
1124	argv++;
1125	argc--;
1126
1127	r = dm_get_device(ti, origin_path, origin_mode, &s->origin);
1128	if (r) {
1129		ti->error = "Cannot get origin device";
1130		goto bad_origin;
1131	}
1132
1133	cow_path = argv[0];
1134	argv++;
1135	argc--;
1136
1137	r = dm_get_device(ti, cow_path, dm_table_get_mode(ti->table), &s->cow);
1138	if (r) {
1139		ti->error = "Cannot get COW device";
1140		goto bad_cow;
1141	}
1142
1143	r = dm_exception_store_create(ti, argc, argv, s, &args_used, &s->store);
1144	if (r) {
1145		ti->error = "Couldn't create exception store";
1146		r = -EINVAL;
1147		goto bad_store;
1148	}
1149
1150	argv += args_used;
1151	argc -= args_used;
1152
1153	s->ti = ti;
1154	s->valid = 1;
1155	s->active = 0;
1156	atomic_set(&s->pending_exceptions_count, 0);
1157	s->exception_start_sequence = 0;
1158	s->exception_complete_sequence = 0;
1159	INIT_LIST_HEAD(&s->out_of_order_list);
1160	init_rwsem(&s->lock);
1161	INIT_LIST_HEAD(&s->list);
1162	spin_lock_init(&s->pe_lock);
1163	s->state_bits = 0;
1164	s->merge_failed = 0;
1165	s->first_merging_chunk = 0;
1166	s->num_merging_chunks = 0;
1167	bio_list_init(&s->bios_queued_during_merge);
1168
1169	/* Allocate hash table for COW data */
1170	if (init_hash_tables(s)) {
1171		ti->error = "Unable to allocate hash table space";
1172		r = -ENOMEM;
1173		goto bad_hash_tables;
1174	}
1175
1176	s->kcopyd_client = dm_kcopyd_client_create(&dm_kcopyd_throttle);
1177	if (IS_ERR(s->kcopyd_client)) {
1178		r = PTR_ERR(s->kcopyd_client);
1179		ti->error = "Could not create kcopyd client";
1180		goto bad_kcopyd;
1181	}
1182
1183	s->pending_pool = mempool_create_slab_pool(MIN_IOS, pending_cache);
1184	if (!s->pending_pool) {
1185		ti->error = "Could not allocate mempool for pending exceptions";
1186		r = -ENOMEM;
1187		goto bad_pending_pool;
1188	}
1189
1190	for (i = 0; i < DM_TRACKED_CHUNK_HASH_SIZE; i++)
1191		INIT_HLIST_HEAD(&s->tracked_chunk_hash[i]);
1192
1193	spin_lock_init(&s->tracked_chunk_lock);
1194
1195	ti->private = s;
1196	ti->num_flush_bios = num_flush_bios;
1197	ti->per_bio_data_size = sizeof(struct dm_snap_tracked_chunk);
1198
1199	/* Add snapshot to the list of snapshots for this origin */
1200	/* Exceptions aren't triggered till snapshot_resume() is called */
1201	r = register_snapshot(s);
1202	if (r == -ENOMEM) {
1203		ti->error = "Snapshot origin struct allocation failed";
1204		goto bad_load_and_register;
1205	} else if (r < 0) {
1206		/* invalid handover, register_snapshot has set ti->error */
1207		goto bad_load_and_register;
1208	}
1209
1210	/*
1211	 * Metadata must only be loaded into one table at once, so skip this
1212	 * if metadata will be handed over during resume.
1213	 * Chunk size will be set during the handover - set it to zero to
1214	 * ensure it's ignored.
1215	 */
1216	if (r > 0) {
1217		s->store->chunk_size = 0;
1218		return 0;
1219	}
1220
1221	r = s->store->type->read_metadata(s->store, dm_add_exception,
1222					  (void *)s);
1223	if (r < 0) {
1224		ti->error = "Failed to read snapshot metadata";
1225		goto bad_read_metadata;
1226	} else if (r > 0) {
1227		s->valid = 0;
1228		DMWARN("Snapshot is marked invalid.");
1229	}
1230
1231	if (!s->store->chunk_size) {
1232		ti->error = "Chunk size not set";
1233		goto bad_read_metadata;
1234	}
1235
1236	r = dm_set_target_max_io_len(ti, s->store->chunk_size);
1237	if (r)
1238		goto bad_read_metadata;
1239
1240	return 0;
1241
1242bad_read_metadata:
1243	unregister_snapshot(s);
1244
1245bad_load_and_register:
1246	mempool_destroy(s->pending_pool);
1247
1248bad_pending_pool:
1249	dm_kcopyd_client_destroy(s->kcopyd_client);
1250
1251bad_kcopyd:
1252	dm_exception_table_exit(&s->pending, pending_cache);
1253	dm_exception_table_exit(&s->complete, exception_cache);
1254
1255bad_hash_tables:
1256	dm_exception_store_destroy(s->store);
1257
1258bad_store:
1259	dm_put_device(ti, s->cow);
1260
1261bad_cow:
1262	dm_put_device(ti, s->origin);
1263
1264bad_origin:
1265	kfree(s);
1266
1267bad:
1268	return r;
1269}
1270
1271static void __free_exceptions(struct dm_snapshot *s)
1272{
1273	dm_kcopyd_client_destroy(s->kcopyd_client);
1274	s->kcopyd_client = NULL;
1275
1276	dm_exception_table_exit(&s->pending, pending_cache);
1277	dm_exception_table_exit(&s->complete, exception_cache);
1278}
1279
1280static void __handover_exceptions(struct dm_snapshot *snap_src,
1281				  struct dm_snapshot *snap_dest)
1282{
1283	union {
1284		struct dm_exception_table table_swap;
1285		struct dm_exception_store *store_swap;
1286	} u;
1287
1288	/*
1289	 * Swap all snapshot context information between the two instances.
1290	 */
1291	u.table_swap = snap_dest->complete;
1292	snap_dest->complete = snap_src->complete;
1293	snap_src->complete = u.table_swap;
1294
1295	u.store_swap = snap_dest->store;
1296	snap_dest->store = snap_src->store;
1297	snap_src->store = u.store_swap;
1298
1299	snap_dest->store->snap = snap_dest;
1300	snap_src->store->snap = snap_src;
1301
1302	snap_dest->ti->max_io_len = snap_dest->store->chunk_size;
1303	snap_dest->valid = snap_src->valid;
1304
1305	/*
1306	 * Set source invalid to ensure it receives no further I/O.
1307	 */
1308	snap_src->valid = 0;
1309}
1310
1311static void snapshot_dtr(struct dm_target *ti)
1312{
1313#ifdef CONFIG_DM_DEBUG
1314	int i;
1315#endif
1316	struct dm_snapshot *s = ti->private;
1317	struct dm_snapshot *snap_src = NULL, *snap_dest = NULL;
1318
1319	down_read(&_origins_lock);
1320	/* Check whether exception handover must be cancelled */
1321	(void) __find_snapshots_sharing_cow(s, &snap_src, &snap_dest, NULL);
1322	if (snap_src && snap_dest && (s == snap_src)) {
1323		down_write(&snap_dest->lock);
1324		snap_dest->valid = 0;
1325		up_write(&snap_dest->lock);
1326		DMERR("Cancelling snapshot handover.");
1327	}
1328	up_read(&_origins_lock);
1329
1330	if (dm_target_is_snapshot_merge(ti))
1331		stop_merge(s);
1332
1333	/* Prevent further origin writes from using this snapshot. */
1334	/* After this returns there can be no new kcopyd jobs. */
1335	unregister_snapshot(s);
1336
1337	while (atomic_read(&s->pending_exceptions_count))
1338		msleep(1);
1339	/*
1340	 * Ensure instructions in mempool_destroy aren't reordered
1341	 * before atomic_read.
1342	 */
1343	smp_mb();
1344
1345#ifdef CONFIG_DM_DEBUG
1346	for (i = 0; i < DM_TRACKED_CHUNK_HASH_SIZE; i++)
1347		BUG_ON(!hlist_empty(&s->tracked_chunk_hash[i]));
1348#endif
1349
1350	__free_exceptions(s);
1351
1352	mempool_destroy(s->pending_pool);
1353
1354	dm_exception_store_destroy(s->store);
1355
1356	dm_put_device(ti, s->cow);
1357
1358	dm_put_device(ti, s->origin);
1359
1360	kfree(s);
1361}
1362
1363/*
1364 * Flush a list of buffers.
1365 */
1366static void flush_bios(struct bio *bio)
1367{
1368	struct bio *n;
1369
1370	while (bio) {
1371		n = bio->bi_next;
1372		bio->bi_next = NULL;
1373		generic_make_request(bio);
1374		bio = n;
1375	}
1376}
1377
1378static int do_origin(struct dm_dev *origin, struct bio *bio);
1379
1380/*
1381 * Flush a list of buffers.
1382 */
1383static void retry_origin_bios(struct dm_snapshot *s, struct bio *bio)
1384{
1385	struct bio *n;
1386	int r;
1387
1388	while (bio) {
1389		n = bio->bi_next;
1390		bio->bi_next = NULL;
1391		r = do_origin(s->origin, bio);
1392		if (r == DM_MAPIO_REMAPPED)
1393			generic_make_request(bio);
1394		bio = n;
1395	}
1396}
1397
1398/*
1399 * Error a list of buffers.
1400 */
1401static void error_bios(struct bio *bio)
1402{
1403	struct bio *n;
1404
1405	while (bio) {
1406		n = bio->bi_next;
1407		bio->bi_next = NULL;
1408		bio_io_error(bio);
1409		bio = n;
1410	}
1411}
1412
1413static void __invalidate_snapshot(struct dm_snapshot *s, int err)
1414{
1415	if (!s->valid)
1416		return;
1417
1418	if (err == -EIO)
1419		DMERR("Invalidating snapshot: Error reading/writing.");
1420	else if (err == -ENOMEM)
1421		DMERR("Invalidating snapshot: Unable to allocate exception.");
1422
1423	if (s->store->type->drop_snapshot)
1424		s->store->type->drop_snapshot(s->store);
1425
1426	s->valid = 0;
1427
1428	dm_table_event(s->ti->table);
1429}
1430
1431static void pending_complete(void *context, int success)
1432{
1433	struct dm_snap_pending_exception *pe = context;
1434	struct dm_exception *e;
1435	struct dm_snapshot *s = pe->snap;
1436	struct bio *origin_bios = NULL;
1437	struct bio *snapshot_bios = NULL;
1438	struct bio *full_bio = NULL;
1439	int error = 0;
1440
1441	if (!success) {
1442		/* Read/write error - snapshot is unusable */
1443		down_write(&s->lock);
1444		__invalidate_snapshot(s, -EIO);
1445		error = 1;
1446		goto out;
1447	}
1448
1449	e = alloc_completed_exception(GFP_NOIO);
1450	if (!e) {
1451		down_write(&s->lock);
1452		__invalidate_snapshot(s, -ENOMEM);
1453		error = 1;
1454		goto out;
1455	}
1456	*e = pe->e;
1457
1458	down_write(&s->lock);
1459	if (!s->valid) {
1460		free_completed_exception(e);
1461		error = 1;
1462		goto out;
1463	}
1464
1465	/* Check for conflicting reads */
1466	__check_for_conflicting_io(s, pe->e.old_chunk);
1467
1468	/*
1469	 * Add a proper exception, and remove the
1470	 * in-flight exception from the list.
1471	 */
1472	dm_insert_exception(&s->complete, e);
1473
1474out:
1475	dm_remove_exception(&pe->e);
1476	snapshot_bios = bio_list_get(&pe->snapshot_bios);
1477	origin_bios = bio_list_get(&pe->origin_bios);
1478	full_bio = pe->full_bio;
1479	if (full_bio) {
1480		full_bio->bi_end_io = pe->full_bio_end_io;
1481		full_bio->bi_private = pe->full_bio_private;
1482		atomic_inc(&full_bio->bi_remaining);
1483	}
1484	increment_pending_exceptions_done_count();
1485
1486	up_write(&s->lock);
1487
1488	/* Submit any pending write bios */
1489	if (error) {
1490		if (full_bio)
1491			bio_io_error(full_bio);
1492		error_bios(snapshot_bios);
1493	} else {
1494		if (full_bio)
1495			bio_endio(full_bio, 0);
1496		flush_bios(snapshot_bios);
1497	}
1498
1499	retry_origin_bios(s, origin_bios);
1500
1501	free_pending_exception(pe);
1502}
1503
1504static void complete_exception(struct dm_snap_pending_exception *pe)
1505{
1506	struct dm_snapshot *s = pe->snap;
1507
1508	/* Update the metadata if we are persistent */
1509	s->store->type->commit_exception(s->store, &pe->e, !pe->copy_error,
1510					 pending_complete, pe);
1511}
1512
1513/*
1514 * Called when the copy I/O has finished.  kcopyd actually runs
1515 * this code so don't block.
1516 */
1517static void copy_callback(int read_err, unsigned long write_err, void *context)
1518{
1519	struct dm_snap_pending_exception *pe = context;
1520	struct dm_snapshot *s = pe->snap;
1521
1522	pe->copy_error = read_err || write_err;
1523
1524	if (pe->exception_sequence == s->exception_complete_sequence) {
1525		s->exception_complete_sequence++;
1526		complete_exception(pe);
1527
1528		while (!list_empty(&s->out_of_order_list)) {
1529			pe = list_entry(s->out_of_order_list.next,
1530					struct dm_snap_pending_exception, out_of_order_entry);
1531			if (pe->exception_sequence != s->exception_complete_sequence)
1532				break;
1533			s->exception_complete_sequence++;
1534			list_del(&pe->out_of_order_entry);
1535			complete_exception(pe);
1536		}
1537	} else {
1538		struct list_head *lh;
1539		struct dm_snap_pending_exception *pe2;
1540
1541		list_for_each_prev(lh, &s->out_of_order_list) {
1542			pe2 = list_entry(lh, struct dm_snap_pending_exception, out_of_order_entry);
1543			if (pe2->exception_sequence < pe->exception_sequence)
1544				break;
1545		}
1546		list_add(&pe->out_of_order_entry, lh);
1547	}
1548}
1549
1550/*
1551 * Dispatches the copy operation to kcopyd.
1552 */
1553static void start_copy(struct dm_snap_pending_exception *pe)
1554{
1555	struct dm_snapshot *s = pe->snap;
1556	struct dm_io_region src, dest;
1557	struct block_device *bdev = s->origin->bdev;
1558	sector_t dev_size;
1559
1560	dev_size = get_dev_size(bdev);
1561
1562	src.bdev = bdev;
1563	src.sector = chunk_to_sector(s->store, pe->e.old_chunk);
1564	src.count = min((sector_t)s->store->chunk_size, dev_size - src.sector);
1565
1566	dest.bdev = s->cow->bdev;
1567	dest.sector = chunk_to_sector(s->store, pe->e.new_chunk);
1568	dest.count = src.count;
1569
1570	/* Hand over to kcopyd */
1571	dm_kcopyd_copy(s->kcopyd_client, &src, 1, &dest, 0, copy_callback, pe);
1572}
1573
1574static void full_bio_end_io(struct bio *bio, int error)
1575{
1576	void *callback_data = bio->bi_private;
1577
1578	dm_kcopyd_do_callback(callback_data, 0, error ? 1 : 0);
1579}
1580
1581static void start_full_bio(struct dm_snap_pending_exception *pe,
1582			   struct bio *bio)
1583{
1584	struct dm_snapshot *s = pe->snap;
1585	void *callback_data;
1586
1587	pe->full_bio = bio;
1588	pe->full_bio_end_io = bio->bi_end_io;
1589	pe->full_bio_private = bio->bi_private;
1590
1591	callback_data = dm_kcopyd_prepare_callback(s->kcopyd_client,
1592						   copy_callback, pe);
1593
1594	bio->bi_end_io = full_bio_end_io;
1595	bio->bi_private = callback_data;
1596
1597	generic_make_request(bio);
1598}
1599
1600static struct dm_snap_pending_exception *
1601__lookup_pending_exception(struct dm_snapshot *s, chunk_t chunk)
1602{
1603	struct dm_exception *e = dm_lookup_exception(&s->pending, chunk);
1604
1605	if (!e)
1606		return NULL;
1607
1608	return container_of(e, struct dm_snap_pending_exception, e);
1609}
1610
1611/*
1612 * Looks to see if this snapshot already has a pending exception
1613 * for this chunk, otherwise it allocates a new one and inserts
1614 * it into the pending table.
1615 *
1616 * NOTE: a write lock must be held on snap->lock before calling
1617 * this.
1618 */
1619static struct dm_snap_pending_exception *
1620__find_pending_exception(struct dm_snapshot *s,
1621			 struct dm_snap_pending_exception *pe, chunk_t chunk)
1622{
1623	struct dm_snap_pending_exception *pe2;
1624
1625	pe2 = __lookup_pending_exception(s, chunk);
1626	if (pe2) {
1627		free_pending_exception(pe);
1628		return pe2;
1629	}
1630
1631	pe->e.old_chunk = chunk;
1632	bio_list_init(&pe->origin_bios);
1633	bio_list_init(&pe->snapshot_bios);
1634	pe->started = 0;
1635	pe->full_bio = NULL;
1636
1637	if (s->store->type->prepare_exception(s->store, &pe->e)) {
1638		free_pending_exception(pe);
1639		return NULL;
1640	}
1641
1642	pe->exception_sequence = s->exception_start_sequence++;
1643
1644	dm_insert_exception(&s->pending, &pe->e);
1645
1646	return pe;
1647}
1648
1649static void remap_exception(struct dm_snapshot *s, struct dm_exception *e,
1650			    struct bio *bio, chunk_t chunk)
1651{
1652	bio->bi_bdev = s->cow->bdev;
1653	bio->bi_iter.bi_sector =
1654		chunk_to_sector(s->store, dm_chunk_number(e->new_chunk) +
1655				(chunk - e->old_chunk)) +
1656		(bio->bi_iter.bi_sector & s->store->chunk_mask);
1657}
1658
1659static int snapshot_map(struct dm_target *ti, struct bio *bio)
1660{
1661	struct dm_exception *e;
1662	struct dm_snapshot *s = ti->private;
1663	int r = DM_MAPIO_REMAPPED;
1664	chunk_t chunk;
1665	struct dm_snap_pending_exception *pe = NULL;
1666
1667	init_tracked_chunk(bio);
1668
1669	if (bio->bi_rw & REQ_FLUSH) {
1670		bio->bi_bdev = s->cow->bdev;
1671		return DM_MAPIO_REMAPPED;
1672	}
1673
1674	chunk = sector_to_chunk(s->store, bio->bi_iter.bi_sector);
1675
1676	/* Full snapshots are not usable */
1677	/* To get here the table must be live so s->active is always set. */
1678	if (!s->valid)
1679		return -EIO;
1680
1681	/* FIXME: should only take write lock if we need
1682	 * to copy an exception */
1683	down_write(&s->lock);
1684
1685	if (!s->valid) {
1686		r = -EIO;
1687		goto out_unlock;
1688	}
1689
1690	/* If the block is already remapped - use that, else remap it */
1691	e = dm_lookup_exception(&s->complete, chunk);
1692	if (e) {
1693		remap_exception(s, e, bio, chunk);
1694		goto out_unlock;
1695	}
1696
1697	/*
1698	 * Write to snapshot - higher level takes care of RW/RO
1699	 * flags so we should only get this if we are
1700	 * writeable.
1701	 */
1702	if (bio_rw(bio) == WRITE) {
1703		pe = __lookup_pending_exception(s, chunk);
1704		if (!pe) {
1705			up_write(&s->lock);
1706			pe = alloc_pending_exception(s);
1707			down_write(&s->lock);
1708
1709			if (!s->valid) {
1710				free_pending_exception(pe);
1711				r = -EIO;
1712				goto out_unlock;
1713			}
1714
1715			e = dm_lookup_exception(&s->complete, chunk);
1716			if (e) {
1717				free_pending_exception(pe);
1718				remap_exception(s, e, bio, chunk);
1719				goto out_unlock;
1720			}
1721
1722			pe = __find_pending_exception(s, pe, chunk);
1723			if (!pe) {
1724				__invalidate_snapshot(s, -ENOMEM);
1725				r = -EIO;
1726				goto out_unlock;
1727			}
1728		}
1729
1730		remap_exception(s, &pe->e, bio, chunk);
1731
1732		r = DM_MAPIO_SUBMITTED;
1733
1734		if (!pe->started &&
1735		    bio->bi_iter.bi_size ==
1736		    (s->store->chunk_size << SECTOR_SHIFT)) {
1737			pe->started = 1;
1738			up_write(&s->lock);
1739			start_full_bio(pe, bio);
1740			goto out;
1741		}
1742
1743		bio_list_add(&pe->snapshot_bios, bio);
1744
1745		if (!pe->started) {
1746			/* this is protected by snap->lock */
1747			pe->started = 1;
1748			up_write(&s->lock);
1749			start_copy(pe);
1750			goto out;
1751		}
1752	} else {
1753		bio->bi_bdev = s->origin->bdev;
1754		track_chunk(s, bio, chunk);
1755	}
1756
1757out_unlock:
1758	up_write(&s->lock);
1759out:
1760	return r;
1761}
1762
1763/*
1764 * A snapshot-merge target behaves like a combination of a snapshot
1765 * target and a snapshot-origin target.  It only generates new
1766 * exceptions in other snapshots and not in the one that is being
1767 * merged.
1768 *
1769 * For each chunk, if there is an existing exception, it is used to
1770 * redirect I/O to the cow device.  Otherwise I/O is sent to the origin,
1771 * which in turn might generate exceptions in other snapshots.
1772 * If merging is currently taking place on the chunk in question, the
1773 * I/O is deferred by adding it to s->bios_queued_during_merge.
1774 */
1775static int snapshot_merge_map(struct dm_target *ti, struct bio *bio)
1776{
1777	struct dm_exception *e;
1778	struct dm_snapshot *s = ti->private;
1779	int r = DM_MAPIO_REMAPPED;
1780	chunk_t chunk;
1781
1782	init_tracked_chunk(bio);
1783
1784	if (bio->bi_rw & REQ_FLUSH) {
1785		if (!dm_bio_get_target_bio_nr(bio))
1786			bio->bi_bdev = s->origin->bdev;
1787		else
1788			bio->bi_bdev = s->cow->bdev;
1789		return DM_MAPIO_REMAPPED;
1790	}
1791
1792	chunk = sector_to_chunk(s->store, bio->bi_iter.bi_sector);
1793
1794	down_write(&s->lock);
1795
1796	/* Full merging snapshots are redirected to the origin */
1797	if (!s->valid)
1798		goto redirect_to_origin;
1799
1800	/* If the block is already remapped - use that */
1801	e = dm_lookup_exception(&s->complete, chunk);
1802	if (e) {
1803		/* Queue writes overlapping with chunks being merged */
1804		if (bio_rw(bio) == WRITE &&
1805		    chunk >= s->first_merging_chunk &&
1806		    chunk < (s->first_merging_chunk +
1807			     s->num_merging_chunks)) {
1808			bio->bi_bdev = s->origin->bdev;
1809			bio_list_add(&s->bios_queued_during_merge, bio);
1810			r = DM_MAPIO_SUBMITTED;
1811			goto out_unlock;
1812		}
1813
1814		remap_exception(s, e, bio, chunk);
1815
1816		if (bio_rw(bio) == WRITE)
1817			track_chunk(s, bio, chunk);
1818		goto out_unlock;
1819	}
1820
1821redirect_to_origin:
1822	bio->bi_bdev = s->origin->bdev;
1823
1824	if (bio_rw(bio) == WRITE) {
1825		up_write(&s->lock);
1826		return do_origin(s->origin, bio);
1827	}
1828
1829out_unlock:
1830	up_write(&s->lock);
1831
1832	return r;
1833}
1834
1835static int snapshot_end_io(struct dm_target *ti, struct bio *bio, int error)
1836{
1837	struct dm_snapshot *s = ti->private;
1838
1839	if (is_bio_tracked(bio))
1840		stop_tracking_chunk(s, bio);
1841
1842	return 0;
1843}
1844
1845static void snapshot_merge_presuspend(struct dm_target *ti)
1846{
1847	struct dm_snapshot *s = ti->private;
1848
1849	stop_merge(s);
1850}
1851
1852static int snapshot_preresume(struct dm_target *ti)
1853{
1854	int r = 0;
1855	struct dm_snapshot *s = ti->private;
1856	struct dm_snapshot *snap_src = NULL, *snap_dest = NULL;
1857
1858	down_read(&_origins_lock);
1859	(void) __find_snapshots_sharing_cow(s, &snap_src, &snap_dest, NULL);
1860	if (snap_src && snap_dest) {
1861		down_read(&snap_src->lock);
1862		if (s == snap_src) {
1863			DMERR("Unable to resume snapshot source until "
1864			      "handover completes.");
1865			r = -EINVAL;
1866		} else if (!dm_suspended(snap_src->ti)) {
1867			DMERR("Unable to perform snapshot handover until "
1868			      "source is suspended.");
1869			r = -EINVAL;
1870		}
1871		up_read(&snap_src->lock);
1872	}
1873	up_read(&_origins_lock);
1874
1875	return r;
1876}
1877
1878static void snapshot_resume(struct dm_target *ti)
1879{
1880	struct dm_snapshot *s = ti->private;
1881	struct dm_snapshot *snap_src = NULL, *snap_dest = NULL, *snap_merging = NULL;
1882	struct dm_origin *o;
1883	struct mapped_device *origin_md = NULL;
1884	bool must_restart_merging = false;
1885
1886	down_read(&_origins_lock);
1887
1888	o = __lookup_dm_origin(s->origin->bdev);
1889	if (o)
1890		origin_md = dm_table_get_md(o->ti->table);
1891	if (!origin_md) {
1892		(void) __find_snapshots_sharing_cow(s, NULL, NULL, &snap_merging);
1893		if (snap_merging)
1894			origin_md = dm_table_get_md(snap_merging->ti->table);
1895	}
1896	if (origin_md == dm_table_get_md(ti->table))
1897		origin_md = NULL;
1898	if (origin_md) {
1899		if (dm_hold(origin_md))
1900			origin_md = NULL;
1901	}
1902
1903	up_read(&_origins_lock);
1904
1905	if (origin_md) {
1906		dm_internal_suspend_fast(origin_md);
1907		if (snap_merging && test_bit(RUNNING_MERGE, &snap_merging->state_bits)) {
1908			must_restart_merging = true;
1909			stop_merge(snap_merging);
1910		}
1911	}
1912
1913	down_read(&_origins_lock);
1914
1915	(void) __find_snapshots_sharing_cow(s, &snap_src, &snap_dest, NULL);
1916	if (snap_src && snap_dest) {
1917		down_write(&snap_src->lock);
1918		down_write_nested(&snap_dest->lock, SINGLE_DEPTH_NESTING);
1919		__handover_exceptions(snap_src, snap_dest);
1920		up_write(&snap_dest->lock);
1921		up_write(&snap_src->lock);
1922	}
1923
1924	up_read(&_origins_lock);
1925
1926	if (origin_md) {
1927		if (must_restart_merging)
1928			start_merge(snap_merging);
1929		dm_internal_resume_fast(origin_md);
1930		dm_put(origin_md);
1931	}
1932
1933	/* Now we have correct chunk size, reregister */
1934	reregister_snapshot(s);
1935
1936	down_write(&s->lock);
1937	s->active = 1;
1938	up_write(&s->lock);
1939}
1940
1941static uint32_t get_origin_minimum_chunksize(struct block_device *bdev)
1942{
1943	uint32_t min_chunksize;
1944
1945	down_read(&_origins_lock);
1946	min_chunksize = __minimum_chunk_size(__lookup_origin(bdev));
1947	up_read(&_origins_lock);
1948
1949	return min_chunksize;
1950}
1951
1952static void snapshot_merge_resume(struct dm_target *ti)
1953{
1954	struct dm_snapshot *s = ti->private;
1955
1956	/*
1957	 * Handover exceptions from existing snapshot.
1958	 */
1959	snapshot_resume(ti);
1960
1961	/*
1962	 * snapshot-merge acts as an origin, so set ti->max_io_len
1963	 */
1964	ti->max_io_len = get_origin_minimum_chunksize(s->origin->bdev);
1965
1966	start_merge(s);
1967}
1968
1969static void snapshot_status(struct dm_target *ti, status_type_t type,
1970			    unsigned status_flags, char *result, unsigned maxlen)
1971{
1972	unsigned sz = 0;
1973	struct dm_snapshot *snap = ti->private;
1974
1975	switch (type) {
1976	case STATUSTYPE_INFO:
1977
1978		down_write(&snap->lock);
1979
1980		if (!snap->valid)
1981			DMEMIT("Invalid");
1982		else if (snap->merge_failed)
1983			DMEMIT("Merge failed");
1984		else {
1985			if (snap->store->type->usage) {
1986				sector_t total_sectors, sectors_allocated,
1987					 metadata_sectors;
1988				snap->store->type->usage(snap->store,
1989							 &total_sectors,
1990							 &sectors_allocated,
1991							 &metadata_sectors);
1992				DMEMIT("%llu/%llu %llu",
1993				       (unsigned long long)sectors_allocated,
1994				       (unsigned long long)total_sectors,
1995				       (unsigned long long)metadata_sectors);
1996			}
1997			else
1998				DMEMIT("Unknown");
1999		}
2000
2001		up_write(&snap->lock);
2002
2003		break;
2004
2005	case STATUSTYPE_TABLE:
2006		/*
2007		 * kdevname returns a static pointer so we need
2008		 * to make private copies if the output is to
2009		 * make sense.
2010		 */
2011		DMEMIT("%s %s", snap->origin->name, snap->cow->name);
2012		snap->store->type->status(snap->store, type, result + sz,
2013					  maxlen - sz);
2014		break;
2015	}
2016}
2017
2018static int snapshot_iterate_devices(struct dm_target *ti,
2019				    iterate_devices_callout_fn fn, void *data)
2020{
2021	struct dm_snapshot *snap = ti->private;
2022	int r;
2023
2024	r = fn(ti, snap->origin, 0, ti->len, data);
2025
2026	if (!r)
2027		r = fn(ti, snap->cow, 0, get_dev_size(snap->cow->bdev), data);
2028
2029	return r;
2030}
2031
2032
2033/*-----------------------------------------------------------------
2034 * Origin methods
2035 *---------------------------------------------------------------*/
2036
2037/*
2038 * If no exceptions need creating, DM_MAPIO_REMAPPED is returned and any
2039 * supplied bio was ignored.  The caller may submit it immediately.
2040 * (No remapping actually occurs as the origin is always a direct linear
2041 * map.)
2042 *
2043 * If further exceptions are required, DM_MAPIO_SUBMITTED is returned
2044 * and any supplied bio is added to a list to be submitted once all
2045 * the necessary exceptions exist.
2046 */
2047static int __origin_write(struct list_head *snapshots, sector_t sector,
2048			  struct bio *bio)
2049{
2050	int r = DM_MAPIO_REMAPPED;
2051	struct dm_snapshot *snap;
2052	struct dm_exception *e;
2053	struct dm_snap_pending_exception *pe;
2054	struct dm_snap_pending_exception *pe_to_start_now = NULL;
2055	struct dm_snap_pending_exception *pe_to_start_last = NULL;
2056	chunk_t chunk;
2057
2058	/* Do all the snapshots on this origin */
2059	list_for_each_entry (snap, snapshots, list) {
2060		/*
2061		 * Don't make new exceptions in a merging snapshot
2062		 * because it has effectively been deleted
2063		 */
2064		if (dm_target_is_snapshot_merge(snap->ti))
2065			continue;
2066
2067		down_write(&snap->lock);
2068
2069		/* Only deal with valid and active snapshots */
2070		if (!snap->valid || !snap->active)
2071			goto next_snapshot;
2072
2073		/* Nothing to do if writing beyond end of snapshot */
2074		if (sector >= dm_table_get_size(snap->ti->table))
2075			goto next_snapshot;
2076
2077		/*
2078		 * Remember, different snapshots can have
2079		 * different chunk sizes.
2080		 */
2081		chunk = sector_to_chunk(snap->store, sector);
2082
2083		/*
2084		 * Check exception table to see if block
2085		 * is already remapped in this snapshot
2086		 * and trigger an exception if not.
2087		 */
2088		e = dm_lookup_exception(&snap->complete, chunk);
2089		if (e)
2090			goto next_snapshot;
2091
2092		pe = __lookup_pending_exception(snap, chunk);
2093		if (!pe) {
2094			up_write(&snap->lock);
2095			pe = alloc_pending_exception(snap);
2096			down_write(&snap->lock);
2097
2098			if (!snap->valid) {
2099				free_pending_exception(pe);
2100				goto next_snapshot;
2101			}
2102
2103			e = dm_lookup_exception(&snap->complete, chunk);
2104			if (e) {
2105				free_pending_exception(pe);
2106				goto next_snapshot;
2107			}
2108
2109			pe = __find_pending_exception(snap, pe, chunk);
2110			if (!pe) {
2111				__invalidate_snapshot(snap, -ENOMEM);
2112				goto next_snapshot;
2113			}
2114		}
2115
2116		r = DM_MAPIO_SUBMITTED;
2117
2118		/*
2119		 * If an origin bio was supplied, queue it to wait for the
2120		 * completion of this exception, and start this one last,
2121		 * at the end of the function.
2122		 */
2123		if (bio) {
2124			bio_list_add(&pe->origin_bios, bio);
2125			bio = NULL;
2126
2127			if (!pe->started) {
2128				pe->started = 1;
2129				pe_to_start_last = pe;
2130			}
2131		}
2132
2133		if (!pe->started) {
2134			pe->started = 1;
2135			pe_to_start_now = pe;
2136		}
2137
2138next_snapshot:
2139		up_write(&snap->lock);
2140
2141		if (pe_to_start_now) {
2142			start_copy(pe_to_start_now);
2143			pe_to_start_now = NULL;
2144		}
2145	}
2146
2147	/*
2148	 * Submit the exception against which the bio is queued last,
2149	 * to give the other exceptions a head start.
2150	 */
2151	if (pe_to_start_last)
2152		start_copy(pe_to_start_last);
2153
2154	return r;
2155}
2156
2157/*
2158 * Called on a write from the origin driver.
2159 */
2160static int do_origin(struct dm_dev *origin, struct bio *bio)
2161{
2162	struct origin *o;
2163	int r = DM_MAPIO_REMAPPED;
2164
2165	down_read(&_origins_lock);
2166	o = __lookup_origin(origin->bdev);
2167	if (o)
2168		r = __origin_write(&o->snapshots, bio->bi_iter.bi_sector, bio);
2169	up_read(&_origins_lock);
2170
2171	return r;
2172}
2173
2174/*
2175 * Trigger exceptions in all non-merging snapshots.
2176 *
2177 * The chunk size of the merging snapshot may be larger than the chunk
2178 * size of some other snapshot so we may need to reallocate multiple
2179 * chunks in other snapshots.
2180 *
2181 * We scan all the overlapping exceptions in the other snapshots.
2182 * Returns 1 if anything was reallocated and must be waited for,
2183 * otherwise returns 0.
2184 *
2185 * size must be a multiple of merging_snap's chunk_size.
2186 */
2187static int origin_write_extent(struct dm_snapshot *merging_snap,
2188			       sector_t sector, unsigned size)
2189{
2190	int must_wait = 0;
2191	sector_t n;
2192	struct origin *o;
2193
2194	/*
2195	 * The origin's __minimum_chunk_size() got stored in max_io_len
2196	 * by snapshot_merge_resume().
2197	 */
2198	down_read(&_origins_lock);
2199	o = __lookup_origin(merging_snap->origin->bdev);
2200	for (n = 0; n < size; n += merging_snap->ti->max_io_len)
2201		if (__origin_write(&o->snapshots, sector + n, NULL) ==
2202		    DM_MAPIO_SUBMITTED)
2203			must_wait = 1;
2204	up_read(&_origins_lock);
2205
2206	return must_wait;
2207}
2208
2209/*
2210 * Origin: maps a linear range of a device, with hooks for snapshotting.
2211 */
2212
2213/*
2214 * Construct an origin mapping: <dev_path>
2215 * The context for an origin is merely a 'struct dm_dev *'
2216 * pointing to the real device.
2217 */
2218static int origin_ctr(struct dm_target *ti, unsigned int argc, char **argv)
2219{
2220	int r;
2221	struct dm_origin *o;
2222
2223	if (argc != 1) {
2224		ti->error = "origin: incorrect number of arguments";
2225		return -EINVAL;
2226	}
2227
2228	o = kmalloc(sizeof(struct dm_origin), GFP_KERNEL);
2229	if (!o) {
2230		ti->error = "Cannot allocate private origin structure";
2231		r = -ENOMEM;
2232		goto bad_alloc;
2233	}
2234
2235	r = dm_get_device(ti, argv[0], dm_table_get_mode(ti->table), &o->dev);
2236	if (r) {
2237		ti->error = "Cannot get target device";
2238		goto bad_open;
2239	}
2240
2241	o->ti = ti;
2242	ti->private = o;
2243	ti->num_flush_bios = 1;
2244
2245	return 0;
2246
2247bad_open:
2248	kfree(o);
2249bad_alloc:
2250	return r;
2251}
2252
2253static void origin_dtr(struct dm_target *ti)
2254{
2255	struct dm_origin *o = ti->private;
2256
2257	dm_put_device(ti, o->dev);
2258	kfree(o);
2259}
2260
2261static int origin_map(struct dm_target *ti, struct bio *bio)
2262{
2263	struct dm_origin *o = ti->private;
2264	unsigned available_sectors;
2265
2266	bio->bi_bdev = o->dev->bdev;
2267
2268	if (unlikely(bio->bi_rw & REQ_FLUSH))
2269		return DM_MAPIO_REMAPPED;
2270
2271	if (bio_rw(bio) != WRITE)
2272		return DM_MAPIO_REMAPPED;
2273
2274	available_sectors = o->split_boundary -
2275		((unsigned)bio->bi_iter.bi_sector & (o->split_boundary - 1));
2276
2277	if (bio_sectors(bio) > available_sectors)
2278		dm_accept_partial_bio(bio, available_sectors);
2279
2280	/* Only tell snapshots if this is a write */
2281	return do_origin(o->dev, bio);
2282}
2283
2284/*
2285 * Set the target "max_io_len" field to the minimum of all the snapshots'
2286 * chunk sizes.
2287 */
2288static void origin_resume(struct dm_target *ti)
2289{
2290	struct dm_origin *o = ti->private;
2291
2292	o->split_boundary = get_origin_minimum_chunksize(o->dev->bdev);
2293
2294	down_write(&_origins_lock);
2295	__insert_dm_origin(o);
2296	up_write(&_origins_lock);
2297}
2298
2299static void origin_postsuspend(struct dm_target *ti)
2300{
2301	struct dm_origin *o = ti->private;
2302
2303	down_write(&_origins_lock);
2304	__remove_dm_origin(o);
2305	up_write(&_origins_lock);
2306}
2307
2308static void origin_status(struct dm_target *ti, status_type_t type,
2309			  unsigned status_flags, char *result, unsigned maxlen)
2310{
2311	struct dm_origin *o = ti->private;
2312
2313	switch (type) {
2314	case STATUSTYPE_INFO:
2315		result[0] = '\0';
2316		break;
2317
2318	case STATUSTYPE_TABLE:
2319		snprintf(result, maxlen, "%s", o->dev->name);
2320		break;
2321	}
2322}
2323
2324static int origin_merge(struct dm_target *ti, struct bvec_merge_data *bvm,
2325			struct bio_vec *biovec, int max_size)
2326{
2327	struct dm_origin *o = ti->private;
2328	struct request_queue *q = bdev_get_queue(o->dev->bdev);
2329
2330	if (!q->merge_bvec_fn)
2331		return max_size;
2332
2333	bvm->bi_bdev = o->dev->bdev;
2334
2335	return min(max_size, q->merge_bvec_fn(q, bvm, biovec));
2336}
2337
2338static int origin_iterate_devices(struct dm_target *ti,
2339				  iterate_devices_callout_fn fn, void *data)
2340{
2341	struct dm_origin *o = ti->private;
2342
2343	return fn(ti, o->dev, 0, ti->len, data);
2344}
2345
2346static struct target_type origin_target = {
2347	.name    = "snapshot-origin",
2348	.version = {1, 9, 0},
2349	.module  = THIS_MODULE,
2350	.ctr     = origin_ctr,
2351	.dtr     = origin_dtr,
2352	.map     = origin_map,
2353	.resume  = origin_resume,
2354	.postsuspend = origin_postsuspend,
2355	.status  = origin_status,
2356	.merge	 = origin_merge,
2357	.iterate_devices = origin_iterate_devices,
2358};
2359
2360static struct target_type snapshot_target = {
2361	.name    = "snapshot",
2362	.version = {1, 13, 0},
2363	.module  = THIS_MODULE,
2364	.ctr     = snapshot_ctr,
2365	.dtr     = snapshot_dtr,
2366	.map     = snapshot_map,
2367	.end_io  = snapshot_end_io,
2368	.preresume  = snapshot_preresume,
2369	.resume  = snapshot_resume,
2370	.status  = snapshot_status,
2371	.iterate_devices = snapshot_iterate_devices,
2372};
2373
2374static struct target_type merge_target = {
2375	.name    = dm_snapshot_merge_target_name,
2376	.version = {1, 3, 0},
2377	.module  = THIS_MODULE,
2378	.ctr     = snapshot_ctr,
2379	.dtr     = snapshot_dtr,
2380	.map     = snapshot_merge_map,
2381	.end_io  = snapshot_end_io,
2382	.presuspend = snapshot_merge_presuspend,
2383	.preresume  = snapshot_preresume,
2384	.resume  = snapshot_merge_resume,
2385	.status  = snapshot_status,
2386	.iterate_devices = snapshot_iterate_devices,
2387};
2388
2389static int __init dm_snapshot_init(void)
2390{
2391	int r;
2392
2393	r = dm_exception_store_init();
2394	if (r) {
2395		DMERR("Failed to initialize exception stores");
2396		return r;
2397	}
2398
2399	r = dm_register_target(&snapshot_target);
2400	if (r < 0) {
2401		DMERR("snapshot target register failed %d", r);
2402		goto bad_register_snapshot_target;
2403	}
2404
2405	r = dm_register_target(&origin_target);
2406	if (r < 0) {
2407		DMERR("Origin target register failed %d", r);
2408		goto bad_register_origin_target;
2409	}
2410
2411	r = dm_register_target(&merge_target);
2412	if (r < 0) {
2413		DMERR("Merge target register failed %d", r);
2414		goto bad_register_merge_target;
2415	}
2416
2417	r = init_origin_hash();
2418	if (r) {
2419		DMERR("init_origin_hash failed.");
2420		goto bad_origin_hash;
2421	}
2422
2423	exception_cache = KMEM_CACHE(dm_exception, 0);
2424	if (!exception_cache) {
2425		DMERR("Couldn't create exception cache.");
2426		r = -ENOMEM;
2427		goto bad_exception_cache;
2428	}
2429
2430	pending_cache = KMEM_CACHE(dm_snap_pending_exception, 0);
2431	if (!pending_cache) {
2432		DMERR("Couldn't create pending cache.");
2433		r = -ENOMEM;
2434		goto bad_pending_cache;
2435	}
2436
2437	return 0;
2438
2439bad_pending_cache:
2440	kmem_cache_destroy(exception_cache);
2441bad_exception_cache:
2442	exit_origin_hash();
2443bad_origin_hash:
2444	dm_unregister_target(&merge_target);
2445bad_register_merge_target:
2446	dm_unregister_target(&origin_target);
2447bad_register_origin_target:
2448	dm_unregister_target(&snapshot_target);
2449bad_register_snapshot_target:
2450	dm_exception_store_exit();
2451
2452	return r;
2453}
2454
2455static void __exit dm_snapshot_exit(void)
2456{
2457	dm_unregister_target(&snapshot_target);
2458	dm_unregister_target(&origin_target);
2459	dm_unregister_target(&merge_target);
2460
2461	exit_origin_hash();
2462	kmem_cache_destroy(pending_cache);
2463	kmem_cache_destroy(exception_cache);
2464
2465	dm_exception_store_exit();
2466}
2467
2468/* Module hooks */
2469module_init(dm_snapshot_init);
2470module_exit(dm_snapshot_exit);
2471
2472MODULE_DESCRIPTION(DM_NAME " snapshot target");
2473MODULE_AUTHOR("Joe Thornber");
2474MODULE_LICENSE("GPL");
2475MODULE_ALIAS("dm-snapshot-origin");
2476MODULE_ALIAS("dm-snapshot-merge");
2477