1/*
2 * dm-snapshot.c
3 *
4 * Copyright (C) 2001-2002 Sistina Software (UK) Limited.
5 *
6 * This file is released under the GPL.
7 */
8
9#include <linux/blkdev.h>
10#include <linux/device-mapper.h>
11#include <linux/delay.h>
12#include <linux/fs.h>
13#include <linux/init.h>
14#include <linux/kdev_t.h>
15#include <linux/list.h>
16#include <linux/mempool.h>
17#include <linux/module.h>
18#include <linux/slab.h>
19#include <linux/vmalloc.h>
20#include <linux/log2.h>
21#include <linux/dm-kcopyd.h>
22
23#include "dm.h"
24
25#include "dm-exception-store.h"
26
27#define DM_MSG_PREFIX "snapshots"
28
29static const char dm_snapshot_merge_target_name[] = "snapshot-merge";
30
31#define dm_target_is_snapshot_merge(ti) \
32	((ti)->type->name == dm_snapshot_merge_target_name)
33
34/*
35 * The size of the mempool used to track chunks in use.
36 */
37#define MIN_IOS 256
38
39#define DM_TRACKED_CHUNK_HASH_SIZE	16
40#define DM_TRACKED_CHUNK_HASH(x)	((unsigned long)(x) & \
41					 (DM_TRACKED_CHUNK_HASH_SIZE - 1))
42
43struct dm_exception_table {
44	uint32_t hash_mask;
45	unsigned hash_shift;
46	struct list_head *table;
47};
48
49struct dm_snapshot {
50	struct rw_semaphore lock;
51
52	struct dm_dev *origin;
53	struct dm_dev *cow;
54
55	struct dm_target *ti;
56
57	/* List of snapshots per Origin */
58	struct list_head list;
59
60	/*
61	 * You can't use a snapshot if this is 0 (e.g. if full).
62	 * A snapshot-merge target never clears this.
63	 */
64	int valid;
65
66	/*
67	 * The snapshot overflowed because of a write to the snapshot device.
68	 * We don't have to invalidate the snapshot in this case, but we need
69	 * to prevent further writes.
70	 */
71	int snapshot_overflowed;
72
73	/* Origin writes don't trigger exceptions until this is set */
74	int active;
75
76	atomic_t pending_exceptions_count;
77
78	/* Protected by "lock" */
79	sector_t exception_start_sequence;
80
81	/* Protected by kcopyd single-threaded callback */
82	sector_t exception_complete_sequence;
83
84	/*
85	 * A list of pending exceptions that completed out of order.
86	 * Protected by kcopyd single-threaded callback.
87	 */
88	struct list_head out_of_order_list;
89
90	mempool_t *pending_pool;
91
92	struct dm_exception_table pending;
93	struct dm_exception_table complete;
94
95	/*
96	 * pe_lock protects all pending_exception operations and access
97	 * as well as the snapshot_bios list.
98	 */
99	spinlock_t pe_lock;
100
101	/* Chunks with outstanding reads */
102	spinlock_t tracked_chunk_lock;
103	struct hlist_head tracked_chunk_hash[DM_TRACKED_CHUNK_HASH_SIZE];
104
105	/* The on disk metadata handler */
106	struct dm_exception_store *store;
107
108	struct dm_kcopyd_client *kcopyd_client;
109
110	/* Wait for events based on state_bits */
111	unsigned long state_bits;
112
113	/* Range of chunks currently being merged. */
114	chunk_t first_merging_chunk;
115	int num_merging_chunks;
116
117	/*
118	 * The merge operation failed if this flag is set.
119	 * Failure modes are handled as follows:
120	 * - I/O error reading the header
121	 *   	=> don't load the target; abort.
122	 * - Header does not have "valid" flag set
123	 *   	=> use the origin; forget about the snapshot.
124	 * - I/O error when reading exceptions
125	 *   	=> don't load the target; abort.
126	 *         (We can't use the intermediate origin state.)
127	 * - I/O error while merging
128	 *	=> stop merging; set merge_failed; process I/O normally.
129	 */
130	int merge_failed;
131
132	/*
133	 * Incoming bios that overlap with chunks being merged must wait
134	 * for them to be committed.
135	 */
136	struct bio_list bios_queued_during_merge;
137};
138
139/*
140 * state_bits:
141 *   RUNNING_MERGE  - Merge operation is in progress.
142 *   SHUTDOWN_MERGE - Set to signal that merge needs to be stopped;
143 *                    cleared afterwards.
144 */
145#define RUNNING_MERGE          0
146#define SHUTDOWN_MERGE         1
147
148DECLARE_DM_KCOPYD_THROTTLE_WITH_MODULE_PARM(snapshot_copy_throttle,
149		"A percentage of time allocated for copy on write");
150
151struct dm_dev *dm_snap_origin(struct dm_snapshot *s)
152{
153	return s->origin;
154}
155EXPORT_SYMBOL(dm_snap_origin);
156
157struct dm_dev *dm_snap_cow(struct dm_snapshot *s)
158{
159	return s->cow;
160}
161EXPORT_SYMBOL(dm_snap_cow);
162
163static sector_t chunk_to_sector(struct dm_exception_store *store,
164				chunk_t chunk)
165{
166	return chunk << store->chunk_shift;
167}
168
169static int bdev_equal(struct block_device *lhs, struct block_device *rhs)
170{
171	/*
172	 * There is only ever one instance of a particular block
173	 * device so we can compare pointers safely.
174	 */
175	return lhs == rhs;
176}
177
178struct dm_snap_pending_exception {
179	struct dm_exception e;
180
181	/*
182	 * Origin buffers waiting for this to complete are held
183	 * in a bio list
184	 */
185	struct bio_list origin_bios;
186	struct bio_list snapshot_bios;
187
188	/* Pointer back to snapshot context */
189	struct dm_snapshot *snap;
190
191	/*
192	 * 1 indicates the exception has already been sent to
193	 * kcopyd.
194	 */
195	int started;
196
197	/* There was copying error. */
198	int copy_error;
199
200	/* A sequence number, it is used for in-order completion. */
201	sector_t exception_sequence;
202
203	struct list_head out_of_order_entry;
204
205	/*
206	 * For writing a complete chunk, bypassing the copy.
207	 */
208	struct bio *full_bio;
209	bio_end_io_t *full_bio_end_io;
210	void *full_bio_private;
211};
212
213/*
214 * Hash table mapping origin volumes to lists of snapshots and
215 * a lock to protect it
216 */
217static struct kmem_cache *exception_cache;
218static struct kmem_cache *pending_cache;
219
220struct dm_snap_tracked_chunk {
221	struct hlist_node node;
222	chunk_t chunk;
223};
224
225static void init_tracked_chunk(struct bio *bio)
226{
227	struct dm_snap_tracked_chunk *c = dm_per_bio_data(bio, sizeof(struct dm_snap_tracked_chunk));
228	INIT_HLIST_NODE(&c->node);
229}
230
231static bool is_bio_tracked(struct bio *bio)
232{
233	struct dm_snap_tracked_chunk *c = dm_per_bio_data(bio, sizeof(struct dm_snap_tracked_chunk));
234	return !hlist_unhashed(&c->node);
235}
236
237static void track_chunk(struct dm_snapshot *s, struct bio *bio, chunk_t chunk)
238{
239	struct dm_snap_tracked_chunk *c = dm_per_bio_data(bio, sizeof(struct dm_snap_tracked_chunk));
240
241	c->chunk = chunk;
242
243	spin_lock_irq(&s->tracked_chunk_lock);
244	hlist_add_head(&c->node,
245		       &s->tracked_chunk_hash[DM_TRACKED_CHUNK_HASH(chunk)]);
246	spin_unlock_irq(&s->tracked_chunk_lock);
247}
248
249static void stop_tracking_chunk(struct dm_snapshot *s, struct bio *bio)
250{
251	struct dm_snap_tracked_chunk *c = dm_per_bio_data(bio, sizeof(struct dm_snap_tracked_chunk));
252	unsigned long flags;
253
254	spin_lock_irqsave(&s->tracked_chunk_lock, flags);
255	hlist_del(&c->node);
256	spin_unlock_irqrestore(&s->tracked_chunk_lock, flags);
257}
258
259static int __chunk_is_tracked(struct dm_snapshot *s, chunk_t chunk)
260{
261	struct dm_snap_tracked_chunk *c;
262	int found = 0;
263
264	spin_lock_irq(&s->tracked_chunk_lock);
265
266	hlist_for_each_entry(c,
267	    &s->tracked_chunk_hash[DM_TRACKED_CHUNK_HASH(chunk)], node) {
268		if (c->chunk == chunk) {
269			found = 1;
270			break;
271		}
272	}
273
274	spin_unlock_irq(&s->tracked_chunk_lock);
275
276	return found;
277}
278
279/*
280 * This conflicting I/O is extremely improbable in the caller,
281 * so msleep(1) is sufficient and there is no need for a wait queue.
282 */
283static void __check_for_conflicting_io(struct dm_snapshot *s, chunk_t chunk)
284{
285	while (__chunk_is_tracked(s, chunk))
286		msleep(1);
287}
288
289/*
290 * One of these per registered origin, held in the snapshot_origins hash
291 */
292struct origin {
293	/* The origin device */
294	struct block_device *bdev;
295
296	struct list_head hash_list;
297
298	/* List of snapshots for this origin */
299	struct list_head snapshots;
300};
301
302/*
303 * This structure is allocated for each origin target
304 */
305struct dm_origin {
306	struct dm_dev *dev;
307	struct dm_target *ti;
308	unsigned split_boundary;
309	struct list_head hash_list;
310};
311
312/*
313 * Size of the hash table for origin volumes. If we make this
314 * the size of the minors list then it should be nearly perfect
315 */
316#define ORIGIN_HASH_SIZE 256
317#define ORIGIN_MASK      0xFF
318static struct list_head *_origins;
319static struct list_head *_dm_origins;
320static struct rw_semaphore _origins_lock;
321
322static DECLARE_WAIT_QUEUE_HEAD(_pending_exceptions_done);
323static DEFINE_SPINLOCK(_pending_exceptions_done_spinlock);
324static uint64_t _pending_exceptions_done_count;
325
326static int init_origin_hash(void)
327{
328	int i;
329
330	_origins = kmalloc(ORIGIN_HASH_SIZE * sizeof(struct list_head),
331			   GFP_KERNEL);
332	if (!_origins) {
333		DMERR("unable to allocate memory for _origins");
334		return -ENOMEM;
335	}
336	for (i = 0; i < ORIGIN_HASH_SIZE; i++)
337		INIT_LIST_HEAD(_origins + i);
338
339	_dm_origins = kmalloc(ORIGIN_HASH_SIZE * sizeof(struct list_head),
340			      GFP_KERNEL);
341	if (!_dm_origins) {
342		DMERR("unable to allocate memory for _dm_origins");
343		kfree(_origins);
344		return -ENOMEM;
345	}
346	for (i = 0; i < ORIGIN_HASH_SIZE; i++)
347		INIT_LIST_HEAD(_dm_origins + i);
348
349	init_rwsem(&_origins_lock);
350
351	return 0;
352}
353
354static void exit_origin_hash(void)
355{
356	kfree(_origins);
357	kfree(_dm_origins);
358}
359
360static unsigned origin_hash(struct block_device *bdev)
361{
362	return bdev->bd_dev & ORIGIN_MASK;
363}
364
365static struct origin *__lookup_origin(struct block_device *origin)
366{
367	struct list_head *ol;
368	struct origin *o;
369
370	ol = &_origins[origin_hash(origin)];
371	list_for_each_entry (o, ol, hash_list)
372		if (bdev_equal(o->bdev, origin))
373			return o;
374
375	return NULL;
376}
377
378static void __insert_origin(struct origin *o)
379{
380	struct list_head *sl = &_origins[origin_hash(o->bdev)];
381	list_add_tail(&o->hash_list, sl);
382}
383
384static struct dm_origin *__lookup_dm_origin(struct block_device *origin)
385{
386	struct list_head *ol;
387	struct dm_origin *o;
388
389	ol = &_dm_origins[origin_hash(origin)];
390	list_for_each_entry (o, ol, hash_list)
391		if (bdev_equal(o->dev->bdev, origin))
392			return o;
393
394	return NULL;
395}
396
397static void __insert_dm_origin(struct dm_origin *o)
398{
399	struct list_head *sl = &_dm_origins[origin_hash(o->dev->bdev)];
400	list_add_tail(&o->hash_list, sl);
401}
402
403static void __remove_dm_origin(struct dm_origin *o)
404{
405	list_del(&o->hash_list);
406}
407
408/*
409 * _origins_lock must be held when calling this function.
410 * Returns number of snapshots registered using the supplied cow device, plus:
411 * snap_src - a snapshot suitable for use as a source of exception handover
412 * snap_dest - a snapshot capable of receiving exception handover.
413 * snap_merge - an existing snapshot-merge target linked to the same origin.
414 *   There can be at most one snapshot-merge target. The parameter is optional.
415 *
416 * Possible return values and states of snap_src and snap_dest.
417 *   0: NULL, NULL  - first new snapshot
418 *   1: snap_src, NULL - normal snapshot
419 *   2: snap_src, snap_dest  - waiting for handover
420 *   2: snap_src, NULL - handed over, waiting for old to be deleted
421 *   1: NULL, snap_dest - source got destroyed without handover
422 */
423static int __find_snapshots_sharing_cow(struct dm_snapshot *snap,
424					struct dm_snapshot **snap_src,
425					struct dm_snapshot **snap_dest,
426					struct dm_snapshot **snap_merge)
427{
428	struct dm_snapshot *s;
429	struct origin *o;
430	int count = 0;
431	int active;
432
433	o = __lookup_origin(snap->origin->bdev);
434	if (!o)
435		goto out;
436
437	list_for_each_entry(s, &o->snapshots, list) {
438		if (dm_target_is_snapshot_merge(s->ti) && snap_merge)
439			*snap_merge = s;
440		if (!bdev_equal(s->cow->bdev, snap->cow->bdev))
441			continue;
442
443		down_read(&s->lock);
444		active = s->active;
445		up_read(&s->lock);
446
447		if (active) {
448			if (snap_src)
449				*snap_src = s;
450		} else if (snap_dest)
451			*snap_dest = s;
452
453		count++;
454	}
455
456out:
457	return count;
458}
459
460/*
461 * On success, returns 1 if this snapshot is a handover destination,
462 * otherwise returns 0.
463 */
464static int __validate_exception_handover(struct dm_snapshot *snap)
465{
466	struct dm_snapshot *snap_src = NULL, *snap_dest = NULL;
467	struct dm_snapshot *snap_merge = NULL;
468
469	/* Does snapshot need exceptions handed over to it? */
470	if ((__find_snapshots_sharing_cow(snap, &snap_src, &snap_dest,
471					  &snap_merge) == 2) ||
472	    snap_dest) {
473		snap->ti->error = "Snapshot cow pairing for exception "
474				  "table handover failed";
475		return -EINVAL;
476	}
477
478	/*
479	 * If no snap_src was found, snap cannot become a handover
480	 * destination.
481	 */
482	if (!snap_src)
483		return 0;
484
485	/*
486	 * Non-snapshot-merge handover?
487	 */
488	if (!dm_target_is_snapshot_merge(snap->ti))
489		return 1;
490
491	/*
492	 * Do not allow more than one merging snapshot.
493	 */
494	if (snap_merge) {
495		snap->ti->error = "A snapshot is already merging.";
496		return -EINVAL;
497	}
498
499	if (!snap_src->store->type->prepare_merge ||
500	    !snap_src->store->type->commit_merge) {
501		snap->ti->error = "Snapshot exception store does not "
502				  "support snapshot-merge.";
503		return -EINVAL;
504	}
505
506	return 1;
507}
508
509static void __insert_snapshot(struct origin *o, struct dm_snapshot *s)
510{
511	struct dm_snapshot *l;
512
513	/* Sort the list according to chunk size, largest-first smallest-last */
514	list_for_each_entry(l, &o->snapshots, list)
515		if (l->store->chunk_size < s->store->chunk_size)
516			break;
517	list_add_tail(&s->list, &l->list);
518}
519
520/*
521 * Make a note of the snapshot and its origin so we can look it
522 * up when the origin has a write on it.
523 *
524 * Also validate snapshot exception store handovers.
525 * On success, returns 1 if this registration is a handover destination,
526 * otherwise returns 0.
527 */
528static int register_snapshot(struct dm_snapshot *snap)
529{
530	struct origin *o, *new_o = NULL;
531	struct block_device *bdev = snap->origin->bdev;
532	int r = 0;
533
534	new_o = kmalloc(sizeof(*new_o), GFP_KERNEL);
535	if (!new_o)
536		return -ENOMEM;
537
538	down_write(&_origins_lock);
539
540	r = __validate_exception_handover(snap);
541	if (r < 0) {
542		kfree(new_o);
543		goto out;
544	}
545
546	o = __lookup_origin(bdev);
547	if (o)
548		kfree(new_o);
549	else {
550		/* New origin */
551		o = new_o;
552
553		/* Initialise the struct */
554		INIT_LIST_HEAD(&o->snapshots);
555		o->bdev = bdev;
556
557		__insert_origin(o);
558	}
559
560	__insert_snapshot(o, snap);
561
562out:
563	up_write(&_origins_lock);
564
565	return r;
566}
567
568/*
569 * Move snapshot to correct place in list according to chunk size.
570 */
571static void reregister_snapshot(struct dm_snapshot *s)
572{
573	struct block_device *bdev = s->origin->bdev;
574
575	down_write(&_origins_lock);
576
577	list_del(&s->list);
578	__insert_snapshot(__lookup_origin(bdev), s);
579
580	up_write(&_origins_lock);
581}
582
583static void unregister_snapshot(struct dm_snapshot *s)
584{
585	struct origin *o;
586
587	down_write(&_origins_lock);
588	o = __lookup_origin(s->origin->bdev);
589
590	list_del(&s->list);
591	if (o && list_empty(&o->snapshots)) {
592		list_del(&o->hash_list);
593		kfree(o);
594	}
595
596	up_write(&_origins_lock);
597}
598
599/*
600 * Implementation of the exception hash tables.
601 * The lowest hash_shift bits of the chunk number are ignored, allowing
602 * some consecutive chunks to be grouped together.
603 */
604static int dm_exception_table_init(struct dm_exception_table *et,
605				   uint32_t size, unsigned hash_shift)
606{
607	unsigned int i;
608
609	et->hash_shift = hash_shift;
610	et->hash_mask = size - 1;
611	et->table = dm_vcalloc(size, sizeof(struct list_head));
612	if (!et->table)
613		return -ENOMEM;
614
615	for (i = 0; i < size; i++)
616		INIT_LIST_HEAD(et->table + i);
617
618	return 0;
619}
620
621static void dm_exception_table_exit(struct dm_exception_table *et,
622				    struct kmem_cache *mem)
623{
624	struct list_head *slot;
625	struct dm_exception *ex, *next;
626	int i, size;
627
628	size = et->hash_mask + 1;
629	for (i = 0; i < size; i++) {
630		slot = et->table + i;
631
632		list_for_each_entry_safe (ex, next, slot, hash_list)
633			kmem_cache_free(mem, ex);
634	}
635
636	vfree(et->table);
637}
638
639static uint32_t exception_hash(struct dm_exception_table *et, chunk_t chunk)
640{
641	return (chunk >> et->hash_shift) & et->hash_mask;
642}
643
644static void dm_remove_exception(struct dm_exception *e)
645{
646	list_del(&e->hash_list);
647}
648
649/*
650 * Return the exception data for a sector, or NULL if not
651 * remapped.
652 */
653static struct dm_exception *dm_lookup_exception(struct dm_exception_table *et,
654						chunk_t chunk)
655{
656	struct list_head *slot;
657	struct dm_exception *e;
658
659	slot = &et->table[exception_hash(et, chunk)];
660	list_for_each_entry (e, slot, hash_list)
661		if (chunk >= e->old_chunk &&
662		    chunk <= e->old_chunk + dm_consecutive_chunk_count(e))
663			return e;
664
665	return NULL;
666}
667
668static struct dm_exception *alloc_completed_exception(gfp_t gfp)
669{
670	struct dm_exception *e;
671
672	e = kmem_cache_alloc(exception_cache, gfp);
673	if (!e && gfp == GFP_NOIO)
674		e = kmem_cache_alloc(exception_cache, GFP_ATOMIC);
675
676	return e;
677}
678
679static void free_completed_exception(struct dm_exception *e)
680{
681	kmem_cache_free(exception_cache, e);
682}
683
684static struct dm_snap_pending_exception *alloc_pending_exception(struct dm_snapshot *s)
685{
686	struct dm_snap_pending_exception *pe = mempool_alloc(s->pending_pool,
687							     GFP_NOIO);
688
689	atomic_inc(&s->pending_exceptions_count);
690	pe->snap = s;
691
692	return pe;
693}
694
695static void free_pending_exception(struct dm_snap_pending_exception *pe)
696{
697	struct dm_snapshot *s = pe->snap;
698
699	mempool_free(pe, s->pending_pool);
700	smp_mb__before_atomic();
701	atomic_dec(&s->pending_exceptions_count);
702}
703
704static void dm_insert_exception(struct dm_exception_table *eh,
705				struct dm_exception *new_e)
706{
707	struct list_head *l;
708	struct dm_exception *e = NULL;
709
710	l = &eh->table[exception_hash(eh, new_e->old_chunk)];
711
712	/* Add immediately if this table doesn't support consecutive chunks */
713	if (!eh->hash_shift)
714		goto out;
715
716	/* List is ordered by old_chunk */
717	list_for_each_entry_reverse(e, l, hash_list) {
718		/* Insert after an existing chunk? */
719		if (new_e->old_chunk == (e->old_chunk +
720					 dm_consecutive_chunk_count(e) + 1) &&
721		    new_e->new_chunk == (dm_chunk_number(e->new_chunk) +
722					 dm_consecutive_chunk_count(e) + 1)) {
723			dm_consecutive_chunk_count_inc(e);
724			free_completed_exception(new_e);
725			return;
726		}
727
728		/* Insert before an existing chunk? */
729		if (new_e->old_chunk == (e->old_chunk - 1) &&
730		    new_e->new_chunk == (dm_chunk_number(e->new_chunk) - 1)) {
731			dm_consecutive_chunk_count_inc(e);
732			e->old_chunk--;
733			e->new_chunk--;
734			free_completed_exception(new_e);
735			return;
736		}
737
738		if (new_e->old_chunk > e->old_chunk)
739			break;
740	}
741
742out:
743	list_add(&new_e->hash_list, e ? &e->hash_list : l);
744}
745
746/*
747 * Callback used by the exception stores to load exceptions when
748 * initialising.
749 */
750static int dm_add_exception(void *context, chunk_t old, chunk_t new)
751{
752	struct dm_snapshot *s = context;
753	struct dm_exception *e;
754
755	e = alloc_completed_exception(GFP_KERNEL);
756	if (!e)
757		return -ENOMEM;
758
759	e->old_chunk = old;
760
761	/* Consecutive_count is implicitly initialised to zero */
762	e->new_chunk = new;
763
764	dm_insert_exception(&s->complete, e);
765
766	return 0;
767}
768
769/*
770 * Return a minimum chunk size of all snapshots that have the specified origin.
771 * Return zero if the origin has no snapshots.
772 */
773static uint32_t __minimum_chunk_size(struct origin *o)
774{
775	struct dm_snapshot *snap;
776	unsigned chunk_size = 0;
777
778	if (o)
779		list_for_each_entry(snap, &o->snapshots, list)
780			chunk_size = min_not_zero(chunk_size,
781						  snap->store->chunk_size);
782
783	return (uint32_t) chunk_size;
784}
785
786/*
787 * Hard coded magic.
788 */
789static int calc_max_buckets(void)
790{
791	/* use a fixed size of 2MB */
792	unsigned long mem = 2 * 1024 * 1024;
793	mem /= sizeof(struct list_head);
794
795	return mem;
796}
797
798/*
799 * Allocate room for a suitable hash table.
800 */
801static int init_hash_tables(struct dm_snapshot *s)
802{
803	sector_t hash_size, cow_dev_size, max_buckets;
804
805	/*
806	 * Calculate based on the size of the original volume or
807	 * the COW volume...
808	 */
809	cow_dev_size = get_dev_size(s->cow->bdev);
810	max_buckets = calc_max_buckets();
811
812	hash_size = cow_dev_size >> s->store->chunk_shift;
813	hash_size = min(hash_size, max_buckets);
814
815	if (hash_size < 64)
816		hash_size = 64;
817	hash_size = rounddown_pow_of_two(hash_size);
818	if (dm_exception_table_init(&s->complete, hash_size,
819				    DM_CHUNK_CONSECUTIVE_BITS))
820		return -ENOMEM;
821
822	/*
823	 * Allocate hash table for in-flight exceptions
824	 * Make this smaller than the real hash table
825	 */
826	hash_size >>= 3;
827	if (hash_size < 64)
828		hash_size = 64;
829
830	if (dm_exception_table_init(&s->pending, hash_size, 0)) {
831		dm_exception_table_exit(&s->complete, exception_cache);
832		return -ENOMEM;
833	}
834
835	return 0;
836}
837
838static void merge_shutdown(struct dm_snapshot *s)
839{
840	clear_bit_unlock(RUNNING_MERGE, &s->state_bits);
841	smp_mb__after_atomic();
842	wake_up_bit(&s->state_bits, RUNNING_MERGE);
843}
844
845static struct bio *__release_queued_bios_after_merge(struct dm_snapshot *s)
846{
847	s->first_merging_chunk = 0;
848	s->num_merging_chunks = 0;
849
850	return bio_list_get(&s->bios_queued_during_merge);
851}
852
853/*
854 * Remove one chunk from the index of completed exceptions.
855 */
856static int __remove_single_exception_chunk(struct dm_snapshot *s,
857					   chunk_t old_chunk)
858{
859	struct dm_exception *e;
860
861	e = dm_lookup_exception(&s->complete, old_chunk);
862	if (!e) {
863		DMERR("Corruption detected: exception for block %llu is "
864		      "on disk but not in memory",
865		      (unsigned long long)old_chunk);
866		return -EINVAL;
867	}
868
869	/*
870	 * If this is the only chunk using this exception, remove exception.
871	 */
872	if (!dm_consecutive_chunk_count(e)) {
873		dm_remove_exception(e);
874		free_completed_exception(e);
875		return 0;
876	}
877
878	/*
879	 * The chunk may be either at the beginning or the end of a
880	 * group of consecutive chunks - never in the middle.  We are
881	 * removing chunks in the opposite order to that in which they
882	 * were added, so this should always be true.
883	 * Decrement the consecutive chunk counter and adjust the
884	 * starting point if necessary.
885	 */
886	if (old_chunk == e->old_chunk) {
887		e->old_chunk++;
888		e->new_chunk++;
889	} else if (old_chunk != e->old_chunk +
890		   dm_consecutive_chunk_count(e)) {
891		DMERR("Attempt to merge block %llu from the "
892		      "middle of a chunk range [%llu - %llu]",
893		      (unsigned long long)old_chunk,
894		      (unsigned long long)e->old_chunk,
895		      (unsigned long long)
896		      e->old_chunk + dm_consecutive_chunk_count(e));
897		return -EINVAL;
898	}
899
900	dm_consecutive_chunk_count_dec(e);
901
902	return 0;
903}
904
905static void flush_bios(struct bio *bio);
906
907static int remove_single_exception_chunk(struct dm_snapshot *s)
908{
909	struct bio *b = NULL;
910	int r;
911	chunk_t old_chunk = s->first_merging_chunk + s->num_merging_chunks - 1;
912
913	down_write(&s->lock);
914
915	/*
916	 * Process chunks (and associated exceptions) in reverse order
917	 * so that dm_consecutive_chunk_count_dec() accounting works.
918	 */
919	do {
920		r = __remove_single_exception_chunk(s, old_chunk);
921		if (r)
922			goto out;
923	} while (old_chunk-- > s->first_merging_chunk);
924
925	b = __release_queued_bios_after_merge(s);
926
927out:
928	up_write(&s->lock);
929	if (b)
930		flush_bios(b);
931
932	return r;
933}
934
935static int origin_write_extent(struct dm_snapshot *merging_snap,
936			       sector_t sector, unsigned chunk_size);
937
938static void merge_callback(int read_err, unsigned long write_err,
939			   void *context);
940
941static uint64_t read_pending_exceptions_done_count(void)
942{
943	uint64_t pending_exceptions_done;
944
945	spin_lock(&_pending_exceptions_done_spinlock);
946	pending_exceptions_done = _pending_exceptions_done_count;
947	spin_unlock(&_pending_exceptions_done_spinlock);
948
949	return pending_exceptions_done;
950}
951
952static void increment_pending_exceptions_done_count(void)
953{
954	spin_lock(&_pending_exceptions_done_spinlock);
955	_pending_exceptions_done_count++;
956	spin_unlock(&_pending_exceptions_done_spinlock);
957
958	wake_up_all(&_pending_exceptions_done);
959}
960
961static void snapshot_merge_next_chunks(struct dm_snapshot *s)
962{
963	int i, linear_chunks;
964	chunk_t old_chunk, new_chunk;
965	struct dm_io_region src, dest;
966	sector_t io_size;
967	uint64_t previous_count;
968
969	BUG_ON(!test_bit(RUNNING_MERGE, &s->state_bits));
970	if (unlikely(test_bit(SHUTDOWN_MERGE, &s->state_bits)))
971		goto shut;
972
973	/*
974	 * valid flag never changes during merge, so no lock required.
975	 */
976	if (!s->valid) {
977		DMERR("Snapshot is invalid: can't merge");
978		goto shut;
979	}
980
981	linear_chunks = s->store->type->prepare_merge(s->store, &old_chunk,
982						      &new_chunk);
983	if (linear_chunks <= 0) {
984		if (linear_chunks < 0) {
985			DMERR("Read error in exception store: "
986			      "shutting down merge");
987			down_write(&s->lock);
988			s->merge_failed = 1;
989			up_write(&s->lock);
990		}
991		goto shut;
992	}
993
994	/* Adjust old_chunk and new_chunk to reflect start of linear region */
995	old_chunk = old_chunk + 1 - linear_chunks;
996	new_chunk = new_chunk + 1 - linear_chunks;
997
998	/*
999	 * Use one (potentially large) I/O to copy all 'linear_chunks'
1000	 * from the exception store to the origin
1001	 */
1002	io_size = linear_chunks * s->store->chunk_size;
1003
1004	dest.bdev = s->origin->bdev;
1005	dest.sector = chunk_to_sector(s->store, old_chunk);
1006	dest.count = min(io_size, get_dev_size(dest.bdev) - dest.sector);
1007
1008	src.bdev = s->cow->bdev;
1009	src.sector = chunk_to_sector(s->store, new_chunk);
1010	src.count = dest.count;
1011
1012	/*
1013	 * Reallocate any exceptions needed in other snapshots then
1014	 * wait for the pending exceptions to complete.
1015	 * Each time any pending exception (globally on the system)
1016	 * completes we are woken and repeat the process to find out
1017	 * if we can proceed.  While this may not seem a particularly
1018	 * efficient algorithm, it is not expected to have any
1019	 * significant impact on performance.
1020	 */
1021	previous_count = read_pending_exceptions_done_count();
1022	while (origin_write_extent(s, dest.sector, io_size)) {
1023		wait_event(_pending_exceptions_done,
1024			   (read_pending_exceptions_done_count() !=
1025			    previous_count));
1026		/* Retry after the wait, until all exceptions are done. */
1027		previous_count = read_pending_exceptions_done_count();
1028	}
1029
1030	down_write(&s->lock);
1031	s->first_merging_chunk = old_chunk;
1032	s->num_merging_chunks = linear_chunks;
1033	up_write(&s->lock);
1034
1035	/* Wait until writes to all 'linear_chunks' drain */
1036	for (i = 0; i < linear_chunks; i++)
1037		__check_for_conflicting_io(s, old_chunk + i);
1038
1039	dm_kcopyd_copy(s->kcopyd_client, &src, 1, &dest, 0, merge_callback, s);
1040	return;
1041
1042shut:
1043	merge_shutdown(s);
1044}
1045
1046static void error_bios(struct bio *bio);
1047
1048static void merge_callback(int read_err, unsigned long write_err, void *context)
1049{
1050	struct dm_snapshot *s = context;
1051	struct bio *b = NULL;
1052
1053	if (read_err || write_err) {
1054		if (read_err)
1055			DMERR("Read error: shutting down merge.");
1056		else
1057			DMERR("Write error: shutting down merge.");
1058		goto shut;
1059	}
1060
1061	if (s->store->type->commit_merge(s->store,
1062					 s->num_merging_chunks) < 0) {
1063		DMERR("Write error in exception store: shutting down merge");
1064		goto shut;
1065	}
1066
1067	if (remove_single_exception_chunk(s) < 0)
1068		goto shut;
1069
1070	snapshot_merge_next_chunks(s);
1071
1072	return;
1073
1074shut:
1075	down_write(&s->lock);
1076	s->merge_failed = 1;
1077	b = __release_queued_bios_after_merge(s);
1078	up_write(&s->lock);
1079	error_bios(b);
1080
1081	merge_shutdown(s);
1082}
1083
1084static void start_merge(struct dm_snapshot *s)
1085{
1086	if (!test_and_set_bit(RUNNING_MERGE, &s->state_bits))
1087		snapshot_merge_next_chunks(s);
1088}
1089
1090/*
1091 * Stop the merging process and wait until it finishes.
1092 */
1093static void stop_merge(struct dm_snapshot *s)
1094{
1095	set_bit(SHUTDOWN_MERGE, &s->state_bits);
1096	wait_on_bit(&s->state_bits, RUNNING_MERGE, TASK_UNINTERRUPTIBLE);
1097	clear_bit(SHUTDOWN_MERGE, &s->state_bits);
1098}
1099
1100/*
1101 * Construct a snapshot mapping: <origin_dev> <COW-dev> <p|po|n> <chunk-size>
1102 */
1103static int snapshot_ctr(struct dm_target *ti, unsigned int argc, char **argv)
1104{
1105	struct dm_snapshot *s;
1106	int i;
1107	int r = -EINVAL;
1108	char *origin_path, *cow_path;
1109	dev_t origin_dev, cow_dev;
1110	unsigned args_used, num_flush_bios = 1;
1111	fmode_t origin_mode = FMODE_READ;
1112
1113	if (argc != 4) {
1114		ti->error = "requires exactly 4 arguments";
1115		r = -EINVAL;
1116		goto bad;
1117	}
1118
1119	if (dm_target_is_snapshot_merge(ti)) {
1120		num_flush_bios = 2;
1121		origin_mode = FMODE_WRITE;
1122	}
1123
1124	s = kmalloc(sizeof(*s), GFP_KERNEL);
1125	if (!s) {
1126		ti->error = "Cannot allocate private snapshot structure";
1127		r = -ENOMEM;
1128		goto bad;
1129	}
1130
1131	origin_path = argv[0];
1132	argv++;
1133	argc--;
1134
1135	r = dm_get_device(ti, origin_path, origin_mode, &s->origin);
1136	if (r) {
1137		ti->error = "Cannot get origin device";
1138		goto bad_origin;
1139	}
1140	origin_dev = s->origin->bdev->bd_dev;
1141
1142	cow_path = argv[0];
1143	argv++;
1144	argc--;
1145
1146	cow_dev = dm_get_dev_t(cow_path);
1147	if (cow_dev && cow_dev == origin_dev) {
1148		ti->error = "COW device cannot be the same as origin device";
1149		r = -EINVAL;
1150		goto bad_cow;
1151	}
1152
1153	r = dm_get_device(ti, cow_path, dm_table_get_mode(ti->table), &s->cow);
1154	if (r) {
1155		ti->error = "Cannot get COW device";
1156		goto bad_cow;
1157	}
1158
1159	r = dm_exception_store_create(ti, argc, argv, s, &args_used, &s->store);
1160	if (r) {
1161		ti->error = "Couldn't create exception store";
1162		r = -EINVAL;
1163		goto bad_store;
1164	}
1165
1166	argv += args_used;
1167	argc -= args_used;
1168
1169	s->ti = ti;
1170	s->valid = 1;
1171	s->snapshot_overflowed = 0;
1172	s->active = 0;
1173	atomic_set(&s->pending_exceptions_count, 0);
1174	s->exception_start_sequence = 0;
1175	s->exception_complete_sequence = 0;
1176	INIT_LIST_HEAD(&s->out_of_order_list);
1177	init_rwsem(&s->lock);
1178	INIT_LIST_HEAD(&s->list);
1179	spin_lock_init(&s->pe_lock);
1180	s->state_bits = 0;
1181	s->merge_failed = 0;
1182	s->first_merging_chunk = 0;
1183	s->num_merging_chunks = 0;
1184	bio_list_init(&s->bios_queued_during_merge);
1185
1186	/* Allocate hash table for COW data */
1187	if (init_hash_tables(s)) {
1188		ti->error = "Unable to allocate hash table space";
1189		r = -ENOMEM;
1190		goto bad_hash_tables;
1191	}
1192
1193	s->kcopyd_client = dm_kcopyd_client_create(&dm_kcopyd_throttle);
1194	if (IS_ERR(s->kcopyd_client)) {
1195		r = PTR_ERR(s->kcopyd_client);
1196		ti->error = "Could not create kcopyd client";
1197		goto bad_kcopyd;
1198	}
1199
1200	s->pending_pool = mempool_create_slab_pool(MIN_IOS, pending_cache);
1201	if (!s->pending_pool) {
1202		ti->error = "Could not allocate mempool for pending exceptions";
1203		r = -ENOMEM;
1204		goto bad_pending_pool;
1205	}
1206
1207	for (i = 0; i < DM_TRACKED_CHUNK_HASH_SIZE; i++)
1208		INIT_HLIST_HEAD(&s->tracked_chunk_hash[i]);
1209
1210	spin_lock_init(&s->tracked_chunk_lock);
1211
1212	ti->private = s;
1213	ti->num_flush_bios = num_flush_bios;
1214	ti->per_bio_data_size = sizeof(struct dm_snap_tracked_chunk);
1215
1216	/* Add snapshot to the list of snapshots for this origin */
1217	/* Exceptions aren't triggered till snapshot_resume() is called */
1218	r = register_snapshot(s);
1219	if (r == -ENOMEM) {
1220		ti->error = "Snapshot origin struct allocation failed";
1221		goto bad_load_and_register;
1222	} else if (r < 0) {
1223		/* invalid handover, register_snapshot has set ti->error */
1224		goto bad_load_and_register;
1225	}
1226
1227	/*
1228	 * Metadata must only be loaded into one table at once, so skip this
1229	 * if metadata will be handed over during resume.
1230	 * Chunk size will be set during the handover - set it to zero to
1231	 * ensure it's ignored.
1232	 */
1233	if (r > 0) {
1234		s->store->chunk_size = 0;
1235		return 0;
1236	}
1237
1238	r = s->store->type->read_metadata(s->store, dm_add_exception,
1239					  (void *)s);
1240	if (r < 0) {
1241		ti->error = "Failed to read snapshot metadata";
1242		goto bad_read_metadata;
1243	} else if (r > 0) {
1244		s->valid = 0;
1245		DMWARN("Snapshot is marked invalid.");
1246	}
1247
1248	if (!s->store->chunk_size) {
1249		ti->error = "Chunk size not set";
1250		goto bad_read_metadata;
1251	}
1252
1253	r = dm_set_target_max_io_len(ti, s->store->chunk_size);
1254	if (r)
1255		goto bad_read_metadata;
1256
1257	return 0;
1258
1259bad_read_metadata:
1260	unregister_snapshot(s);
1261
1262bad_load_and_register:
1263	mempool_destroy(s->pending_pool);
1264
1265bad_pending_pool:
1266	dm_kcopyd_client_destroy(s->kcopyd_client);
1267
1268bad_kcopyd:
1269	dm_exception_table_exit(&s->pending, pending_cache);
1270	dm_exception_table_exit(&s->complete, exception_cache);
1271
1272bad_hash_tables:
1273	dm_exception_store_destroy(s->store);
1274
1275bad_store:
1276	dm_put_device(ti, s->cow);
1277
1278bad_cow:
1279	dm_put_device(ti, s->origin);
1280
1281bad_origin:
1282	kfree(s);
1283
1284bad:
1285	return r;
1286}
1287
1288static void __free_exceptions(struct dm_snapshot *s)
1289{
1290	dm_kcopyd_client_destroy(s->kcopyd_client);
1291	s->kcopyd_client = NULL;
1292
1293	dm_exception_table_exit(&s->pending, pending_cache);
1294	dm_exception_table_exit(&s->complete, exception_cache);
1295}
1296
1297static void __handover_exceptions(struct dm_snapshot *snap_src,
1298				  struct dm_snapshot *snap_dest)
1299{
1300	union {
1301		struct dm_exception_table table_swap;
1302		struct dm_exception_store *store_swap;
1303	} u;
1304
1305	/*
1306	 * Swap all snapshot context information between the two instances.
1307	 */
1308	u.table_swap = snap_dest->complete;
1309	snap_dest->complete = snap_src->complete;
1310	snap_src->complete = u.table_swap;
1311
1312	u.store_swap = snap_dest->store;
1313	snap_dest->store = snap_src->store;
1314	snap_dest->store->userspace_supports_overflow = u.store_swap->userspace_supports_overflow;
1315	snap_src->store = u.store_swap;
1316
1317	snap_dest->store->snap = snap_dest;
1318	snap_src->store->snap = snap_src;
1319
1320	snap_dest->ti->max_io_len = snap_dest->store->chunk_size;
1321	snap_dest->valid = snap_src->valid;
1322	snap_dest->snapshot_overflowed = snap_src->snapshot_overflowed;
1323
1324	/*
1325	 * Set source invalid to ensure it receives no further I/O.
1326	 */
1327	snap_src->valid = 0;
1328}
1329
1330static void snapshot_dtr(struct dm_target *ti)
1331{
1332#ifdef CONFIG_DM_DEBUG
1333	int i;
1334#endif
1335	struct dm_snapshot *s = ti->private;
1336	struct dm_snapshot *snap_src = NULL, *snap_dest = NULL;
1337
1338	down_read(&_origins_lock);
1339	/* Check whether exception handover must be cancelled */
1340	(void) __find_snapshots_sharing_cow(s, &snap_src, &snap_dest, NULL);
1341	if (snap_src && snap_dest && (s == snap_src)) {
1342		down_write(&snap_dest->lock);
1343		snap_dest->valid = 0;
1344		up_write(&snap_dest->lock);
1345		DMERR("Cancelling snapshot handover.");
1346	}
1347	up_read(&_origins_lock);
1348
1349	if (dm_target_is_snapshot_merge(ti))
1350		stop_merge(s);
1351
1352	/* Prevent further origin writes from using this snapshot. */
1353	/* After this returns there can be no new kcopyd jobs. */
1354	unregister_snapshot(s);
1355
1356	while (atomic_read(&s->pending_exceptions_count))
1357		msleep(1);
1358	/*
1359	 * Ensure instructions in mempool_destroy aren't reordered
1360	 * before atomic_read.
1361	 */
1362	smp_mb();
1363
1364#ifdef CONFIG_DM_DEBUG
1365	for (i = 0; i < DM_TRACKED_CHUNK_HASH_SIZE; i++)
1366		BUG_ON(!hlist_empty(&s->tracked_chunk_hash[i]));
1367#endif
1368
1369	__free_exceptions(s);
1370
1371	mempool_destroy(s->pending_pool);
1372
1373	dm_exception_store_destroy(s->store);
1374
1375	dm_put_device(ti, s->cow);
1376
1377	dm_put_device(ti, s->origin);
1378
1379	kfree(s);
1380}
1381
1382/*
1383 * Flush a list of buffers.
1384 */
1385static void flush_bios(struct bio *bio)
1386{
1387	struct bio *n;
1388
1389	while (bio) {
1390		n = bio->bi_next;
1391		bio->bi_next = NULL;
1392		generic_make_request(bio);
1393		bio = n;
1394	}
1395}
1396
1397static int do_origin(struct dm_dev *origin, struct bio *bio);
1398
1399/*
1400 * Flush a list of buffers.
1401 */
1402static void retry_origin_bios(struct dm_snapshot *s, struct bio *bio)
1403{
1404	struct bio *n;
1405	int r;
1406
1407	while (bio) {
1408		n = bio->bi_next;
1409		bio->bi_next = NULL;
1410		r = do_origin(s->origin, bio);
1411		if (r == DM_MAPIO_REMAPPED)
1412			generic_make_request(bio);
1413		bio = n;
1414	}
1415}
1416
1417/*
1418 * Error a list of buffers.
1419 */
1420static void error_bios(struct bio *bio)
1421{
1422	struct bio *n;
1423
1424	while (bio) {
1425		n = bio->bi_next;
1426		bio->bi_next = NULL;
1427		bio_io_error(bio);
1428		bio = n;
1429	}
1430}
1431
1432static void __invalidate_snapshot(struct dm_snapshot *s, int err)
1433{
1434	if (!s->valid)
1435		return;
1436
1437	if (err == -EIO)
1438		DMERR("Invalidating snapshot: Error reading/writing.");
1439	else if (err == -ENOMEM)
1440		DMERR("Invalidating snapshot: Unable to allocate exception.");
1441
1442	if (s->store->type->drop_snapshot)
1443		s->store->type->drop_snapshot(s->store);
1444
1445	s->valid = 0;
1446
1447	dm_table_event(s->ti->table);
1448}
1449
1450static void pending_complete(void *context, int success)
1451{
1452	struct dm_snap_pending_exception *pe = context;
1453	struct dm_exception *e;
1454	struct dm_snapshot *s = pe->snap;
1455	struct bio *origin_bios = NULL;
1456	struct bio *snapshot_bios = NULL;
1457	struct bio *full_bio = NULL;
1458	int error = 0;
1459
1460	if (!success) {
1461		/* Read/write error - snapshot is unusable */
1462		down_write(&s->lock);
1463		__invalidate_snapshot(s, -EIO);
1464		error = 1;
1465		goto out;
1466	}
1467
1468	e = alloc_completed_exception(GFP_NOIO);
1469	if (!e) {
1470		down_write(&s->lock);
1471		__invalidate_snapshot(s, -ENOMEM);
1472		error = 1;
1473		goto out;
1474	}
1475	*e = pe->e;
1476
1477	down_write(&s->lock);
1478	if (!s->valid) {
1479		free_completed_exception(e);
1480		error = 1;
1481		goto out;
1482	}
1483
1484	/* Check for conflicting reads */
1485	__check_for_conflicting_io(s, pe->e.old_chunk);
1486
1487	/*
1488	 * Add a proper exception, and remove the
1489	 * in-flight exception from the list.
1490	 */
1491	dm_insert_exception(&s->complete, e);
1492
1493out:
1494	dm_remove_exception(&pe->e);
1495	snapshot_bios = bio_list_get(&pe->snapshot_bios);
1496	origin_bios = bio_list_get(&pe->origin_bios);
1497	full_bio = pe->full_bio;
1498	if (full_bio) {
1499		full_bio->bi_end_io = pe->full_bio_end_io;
1500		full_bio->bi_private = pe->full_bio_private;
1501	}
1502	increment_pending_exceptions_done_count();
1503
1504	up_write(&s->lock);
1505
1506	/* Submit any pending write bios */
1507	if (error) {
1508		if (full_bio)
1509			bio_io_error(full_bio);
1510		error_bios(snapshot_bios);
1511	} else {
1512		if (full_bio)
1513			bio_endio(full_bio);
1514		flush_bios(snapshot_bios);
1515	}
1516
1517	retry_origin_bios(s, origin_bios);
1518
1519	free_pending_exception(pe);
1520}
1521
1522static void complete_exception(struct dm_snap_pending_exception *pe)
1523{
1524	struct dm_snapshot *s = pe->snap;
1525
1526	/* Update the metadata if we are persistent */
1527	s->store->type->commit_exception(s->store, &pe->e, !pe->copy_error,
1528					 pending_complete, pe);
1529}
1530
1531/*
1532 * Called when the copy I/O has finished.  kcopyd actually runs
1533 * this code so don't block.
1534 */
1535static void copy_callback(int read_err, unsigned long write_err, void *context)
1536{
1537	struct dm_snap_pending_exception *pe = context;
1538	struct dm_snapshot *s = pe->snap;
1539
1540	pe->copy_error = read_err || write_err;
1541
1542	if (pe->exception_sequence == s->exception_complete_sequence) {
1543		s->exception_complete_sequence++;
1544		complete_exception(pe);
1545
1546		while (!list_empty(&s->out_of_order_list)) {
1547			pe = list_entry(s->out_of_order_list.next,
1548					struct dm_snap_pending_exception, out_of_order_entry);
1549			if (pe->exception_sequence != s->exception_complete_sequence)
1550				break;
1551			s->exception_complete_sequence++;
1552			list_del(&pe->out_of_order_entry);
1553			complete_exception(pe);
1554		}
1555	} else {
1556		struct list_head *lh;
1557		struct dm_snap_pending_exception *pe2;
1558
1559		list_for_each_prev(lh, &s->out_of_order_list) {
1560			pe2 = list_entry(lh, struct dm_snap_pending_exception, out_of_order_entry);
1561			if (pe2->exception_sequence < pe->exception_sequence)
1562				break;
1563		}
1564		list_add(&pe->out_of_order_entry, lh);
1565	}
1566}
1567
1568/*
1569 * Dispatches the copy operation to kcopyd.
1570 */
1571static void start_copy(struct dm_snap_pending_exception *pe)
1572{
1573	struct dm_snapshot *s = pe->snap;
1574	struct dm_io_region src, dest;
1575	struct block_device *bdev = s->origin->bdev;
1576	sector_t dev_size;
1577
1578	dev_size = get_dev_size(bdev);
1579
1580	src.bdev = bdev;
1581	src.sector = chunk_to_sector(s->store, pe->e.old_chunk);
1582	src.count = min((sector_t)s->store->chunk_size, dev_size - src.sector);
1583
1584	dest.bdev = s->cow->bdev;
1585	dest.sector = chunk_to_sector(s->store, pe->e.new_chunk);
1586	dest.count = src.count;
1587
1588	/* Hand over to kcopyd */
1589	dm_kcopyd_copy(s->kcopyd_client, &src, 1, &dest, 0, copy_callback, pe);
1590}
1591
1592static void full_bio_end_io(struct bio *bio)
1593{
1594	void *callback_data = bio->bi_private;
1595
1596	dm_kcopyd_do_callback(callback_data, 0, bio->bi_error ? 1 : 0);
1597}
1598
1599static void start_full_bio(struct dm_snap_pending_exception *pe,
1600			   struct bio *bio)
1601{
1602	struct dm_snapshot *s = pe->snap;
1603	void *callback_data;
1604
1605	pe->full_bio = bio;
1606	pe->full_bio_end_io = bio->bi_end_io;
1607	pe->full_bio_private = bio->bi_private;
1608
1609	callback_data = dm_kcopyd_prepare_callback(s->kcopyd_client,
1610						   copy_callback, pe);
1611
1612	bio->bi_end_io = full_bio_end_io;
1613	bio->bi_private = callback_data;
1614
1615	generic_make_request(bio);
1616}
1617
1618static struct dm_snap_pending_exception *
1619__lookup_pending_exception(struct dm_snapshot *s, chunk_t chunk)
1620{
1621	struct dm_exception *e = dm_lookup_exception(&s->pending, chunk);
1622
1623	if (!e)
1624		return NULL;
1625
1626	return container_of(e, struct dm_snap_pending_exception, e);
1627}
1628
1629/*
1630 * Looks to see if this snapshot already has a pending exception
1631 * for this chunk, otherwise it allocates a new one and inserts
1632 * it into the pending table.
1633 *
1634 * NOTE: a write lock must be held on snap->lock before calling
1635 * this.
1636 */
1637static struct dm_snap_pending_exception *
1638__find_pending_exception(struct dm_snapshot *s,
1639			 struct dm_snap_pending_exception *pe, chunk_t chunk)
1640{
1641	struct dm_snap_pending_exception *pe2;
1642
1643	pe2 = __lookup_pending_exception(s, chunk);
1644	if (pe2) {
1645		free_pending_exception(pe);
1646		return pe2;
1647	}
1648
1649	pe->e.old_chunk = chunk;
1650	bio_list_init(&pe->origin_bios);
1651	bio_list_init(&pe->snapshot_bios);
1652	pe->started = 0;
1653	pe->full_bio = NULL;
1654
1655	if (s->store->type->prepare_exception(s->store, &pe->e)) {
1656		free_pending_exception(pe);
1657		return NULL;
1658	}
1659
1660	pe->exception_sequence = s->exception_start_sequence++;
1661
1662	dm_insert_exception(&s->pending, &pe->e);
1663
1664	return pe;
1665}
1666
1667static void remap_exception(struct dm_snapshot *s, struct dm_exception *e,
1668			    struct bio *bio, chunk_t chunk)
1669{
1670	bio->bi_bdev = s->cow->bdev;
1671	bio->bi_iter.bi_sector =
1672		chunk_to_sector(s->store, dm_chunk_number(e->new_chunk) +
1673				(chunk - e->old_chunk)) +
1674		(bio->bi_iter.bi_sector & s->store->chunk_mask);
1675}
1676
1677static int snapshot_map(struct dm_target *ti, struct bio *bio)
1678{
1679	struct dm_exception *e;
1680	struct dm_snapshot *s = ti->private;
1681	int r = DM_MAPIO_REMAPPED;
1682	chunk_t chunk;
1683	struct dm_snap_pending_exception *pe = NULL;
1684
1685	init_tracked_chunk(bio);
1686
1687	if (bio->bi_rw & REQ_FLUSH) {
1688		bio->bi_bdev = s->cow->bdev;
1689		return DM_MAPIO_REMAPPED;
1690	}
1691
1692	chunk = sector_to_chunk(s->store, bio->bi_iter.bi_sector);
1693
1694	/* Full snapshots are not usable */
1695	/* To get here the table must be live so s->active is always set. */
1696	if (!s->valid)
1697		return -EIO;
1698
1699	/* FIXME: should only take write lock if we need
1700	 * to copy an exception */
1701	down_write(&s->lock);
1702
1703	if (!s->valid || (unlikely(s->snapshot_overflowed) && bio_rw(bio) == WRITE)) {
1704		r = -EIO;
1705		goto out_unlock;
1706	}
1707
1708	/* If the block is already remapped - use that, else remap it */
1709	e = dm_lookup_exception(&s->complete, chunk);
1710	if (e) {
1711		remap_exception(s, e, bio, chunk);
1712		goto out_unlock;
1713	}
1714
1715	/*
1716	 * Write to snapshot - higher level takes care of RW/RO
1717	 * flags so we should only get this if we are
1718	 * writeable.
1719	 */
1720	if (bio_rw(bio) == WRITE) {
1721		pe = __lookup_pending_exception(s, chunk);
1722		if (!pe) {
1723			up_write(&s->lock);
1724			pe = alloc_pending_exception(s);
1725			down_write(&s->lock);
1726
1727			if (!s->valid || s->snapshot_overflowed) {
1728				free_pending_exception(pe);
1729				r = -EIO;
1730				goto out_unlock;
1731			}
1732
1733			e = dm_lookup_exception(&s->complete, chunk);
1734			if (e) {
1735				free_pending_exception(pe);
1736				remap_exception(s, e, bio, chunk);
1737				goto out_unlock;
1738			}
1739
1740			pe = __find_pending_exception(s, pe, chunk);
1741			if (!pe) {
1742				if (s->store->userspace_supports_overflow) {
1743					s->snapshot_overflowed = 1;
1744					DMERR("Snapshot overflowed: Unable to allocate exception.");
1745				} else
1746					__invalidate_snapshot(s, -ENOMEM);
1747				r = -EIO;
1748				goto out_unlock;
1749			}
1750		}
1751
1752		remap_exception(s, &pe->e, bio, chunk);
1753
1754		r = DM_MAPIO_SUBMITTED;
1755
1756		if (!pe->started &&
1757		    bio->bi_iter.bi_size ==
1758		    (s->store->chunk_size << SECTOR_SHIFT)) {
1759			pe->started = 1;
1760			up_write(&s->lock);
1761			start_full_bio(pe, bio);
1762			goto out;
1763		}
1764
1765		bio_list_add(&pe->snapshot_bios, bio);
1766
1767		if (!pe->started) {
1768			/* this is protected by snap->lock */
1769			pe->started = 1;
1770			up_write(&s->lock);
1771			start_copy(pe);
1772			goto out;
1773		}
1774	} else {
1775		bio->bi_bdev = s->origin->bdev;
1776		track_chunk(s, bio, chunk);
1777	}
1778
1779out_unlock:
1780	up_write(&s->lock);
1781out:
1782	return r;
1783}
1784
1785/*
1786 * A snapshot-merge target behaves like a combination of a snapshot
1787 * target and a snapshot-origin target.  It only generates new
1788 * exceptions in other snapshots and not in the one that is being
1789 * merged.
1790 *
1791 * For each chunk, if there is an existing exception, it is used to
1792 * redirect I/O to the cow device.  Otherwise I/O is sent to the origin,
1793 * which in turn might generate exceptions in other snapshots.
1794 * If merging is currently taking place on the chunk in question, the
1795 * I/O is deferred by adding it to s->bios_queued_during_merge.
1796 */
1797static int snapshot_merge_map(struct dm_target *ti, struct bio *bio)
1798{
1799	struct dm_exception *e;
1800	struct dm_snapshot *s = ti->private;
1801	int r = DM_MAPIO_REMAPPED;
1802	chunk_t chunk;
1803
1804	init_tracked_chunk(bio);
1805
1806	if (bio->bi_rw & REQ_FLUSH) {
1807		if (!dm_bio_get_target_bio_nr(bio))
1808			bio->bi_bdev = s->origin->bdev;
1809		else
1810			bio->bi_bdev = s->cow->bdev;
1811		return DM_MAPIO_REMAPPED;
1812	}
1813
1814	chunk = sector_to_chunk(s->store, bio->bi_iter.bi_sector);
1815
1816	down_write(&s->lock);
1817
1818	/* Full merging snapshots are redirected to the origin */
1819	if (!s->valid)
1820		goto redirect_to_origin;
1821
1822	/* If the block is already remapped - use that */
1823	e = dm_lookup_exception(&s->complete, chunk);
1824	if (e) {
1825		/* Queue writes overlapping with chunks being merged */
1826		if (bio_rw(bio) == WRITE &&
1827		    chunk >= s->first_merging_chunk &&
1828		    chunk < (s->first_merging_chunk +
1829			     s->num_merging_chunks)) {
1830			bio->bi_bdev = s->origin->bdev;
1831			bio_list_add(&s->bios_queued_during_merge, bio);
1832			r = DM_MAPIO_SUBMITTED;
1833			goto out_unlock;
1834		}
1835
1836		remap_exception(s, e, bio, chunk);
1837
1838		if (bio_rw(bio) == WRITE)
1839			track_chunk(s, bio, chunk);
1840		goto out_unlock;
1841	}
1842
1843redirect_to_origin:
1844	bio->bi_bdev = s->origin->bdev;
1845
1846	if (bio_rw(bio) == WRITE) {
1847		up_write(&s->lock);
1848		return do_origin(s->origin, bio);
1849	}
1850
1851out_unlock:
1852	up_write(&s->lock);
1853
1854	return r;
1855}
1856
1857static int snapshot_end_io(struct dm_target *ti, struct bio *bio, int error)
1858{
1859	struct dm_snapshot *s = ti->private;
1860
1861	if (is_bio_tracked(bio))
1862		stop_tracking_chunk(s, bio);
1863
1864	return 0;
1865}
1866
1867static void snapshot_merge_presuspend(struct dm_target *ti)
1868{
1869	struct dm_snapshot *s = ti->private;
1870
1871	stop_merge(s);
1872}
1873
1874static int snapshot_preresume(struct dm_target *ti)
1875{
1876	int r = 0;
1877	struct dm_snapshot *s = ti->private;
1878	struct dm_snapshot *snap_src = NULL, *snap_dest = NULL;
1879
1880	down_read(&_origins_lock);
1881	(void) __find_snapshots_sharing_cow(s, &snap_src, &snap_dest, NULL);
1882	if (snap_src && snap_dest) {
1883		down_read(&snap_src->lock);
1884		if (s == snap_src) {
1885			DMERR("Unable to resume snapshot source until "
1886			      "handover completes.");
1887			r = -EINVAL;
1888		} else if (!dm_suspended(snap_src->ti)) {
1889			DMERR("Unable to perform snapshot handover until "
1890			      "source is suspended.");
1891			r = -EINVAL;
1892		}
1893		up_read(&snap_src->lock);
1894	}
1895	up_read(&_origins_lock);
1896
1897	return r;
1898}
1899
1900static void snapshot_resume(struct dm_target *ti)
1901{
1902	struct dm_snapshot *s = ti->private;
1903	struct dm_snapshot *snap_src = NULL, *snap_dest = NULL, *snap_merging = NULL;
1904	struct dm_origin *o;
1905	struct mapped_device *origin_md = NULL;
1906	bool must_restart_merging = false;
1907
1908	down_read(&_origins_lock);
1909
1910	o = __lookup_dm_origin(s->origin->bdev);
1911	if (o)
1912		origin_md = dm_table_get_md(o->ti->table);
1913	if (!origin_md) {
1914		(void) __find_snapshots_sharing_cow(s, NULL, NULL, &snap_merging);
1915		if (snap_merging)
1916			origin_md = dm_table_get_md(snap_merging->ti->table);
1917	}
1918	if (origin_md == dm_table_get_md(ti->table))
1919		origin_md = NULL;
1920	if (origin_md) {
1921		if (dm_hold(origin_md))
1922			origin_md = NULL;
1923	}
1924
1925	up_read(&_origins_lock);
1926
1927	if (origin_md) {
1928		dm_internal_suspend_fast(origin_md);
1929		if (snap_merging && test_bit(RUNNING_MERGE, &snap_merging->state_bits)) {
1930			must_restart_merging = true;
1931			stop_merge(snap_merging);
1932		}
1933	}
1934
1935	down_read(&_origins_lock);
1936
1937	(void) __find_snapshots_sharing_cow(s, &snap_src, &snap_dest, NULL);
1938	if (snap_src && snap_dest) {
1939		down_write(&snap_src->lock);
1940		down_write_nested(&snap_dest->lock, SINGLE_DEPTH_NESTING);
1941		__handover_exceptions(snap_src, snap_dest);
1942		up_write(&snap_dest->lock);
1943		up_write(&snap_src->lock);
1944	}
1945
1946	up_read(&_origins_lock);
1947
1948	if (origin_md) {
1949		if (must_restart_merging)
1950			start_merge(snap_merging);
1951		dm_internal_resume_fast(origin_md);
1952		dm_put(origin_md);
1953	}
1954
1955	/* Now we have correct chunk size, reregister */
1956	reregister_snapshot(s);
1957
1958	down_write(&s->lock);
1959	s->active = 1;
1960	up_write(&s->lock);
1961}
1962
1963static uint32_t get_origin_minimum_chunksize(struct block_device *bdev)
1964{
1965	uint32_t min_chunksize;
1966
1967	down_read(&_origins_lock);
1968	min_chunksize = __minimum_chunk_size(__lookup_origin(bdev));
1969	up_read(&_origins_lock);
1970
1971	return min_chunksize;
1972}
1973
1974static void snapshot_merge_resume(struct dm_target *ti)
1975{
1976	struct dm_snapshot *s = ti->private;
1977
1978	/*
1979	 * Handover exceptions from existing snapshot.
1980	 */
1981	snapshot_resume(ti);
1982
1983	/*
1984	 * snapshot-merge acts as an origin, so set ti->max_io_len
1985	 */
1986	ti->max_io_len = get_origin_minimum_chunksize(s->origin->bdev);
1987
1988	start_merge(s);
1989}
1990
1991static void snapshot_status(struct dm_target *ti, status_type_t type,
1992			    unsigned status_flags, char *result, unsigned maxlen)
1993{
1994	unsigned sz = 0;
1995	struct dm_snapshot *snap = ti->private;
1996
1997	switch (type) {
1998	case STATUSTYPE_INFO:
1999
2000		down_write(&snap->lock);
2001
2002		if (!snap->valid)
2003			DMEMIT("Invalid");
2004		else if (snap->merge_failed)
2005			DMEMIT("Merge failed");
2006		else if (snap->snapshot_overflowed)
2007			DMEMIT("Overflow");
2008		else {
2009			if (snap->store->type->usage) {
2010				sector_t total_sectors, sectors_allocated,
2011					 metadata_sectors;
2012				snap->store->type->usage(snap->store,
2013							 &total_sectors,
2014							 &sectors_allocated,
2015							 &metadata_sectors);
2016				DMEMIT("%llu/%llu %llu",
2017				       (unsigned long long)sectors_allocated,
2018				       (unsigned long long)total_sectors,
2019				       (unsigned long long)metadata_sectors);
2020			}
2021			else
2022				DMEMIT("Unknown");
2023		}
2024
2025		up_write(&snap->lock);
2026
2027		break;
2028
2029	case STATUSTYPE_TABLE:
2030		/*
2031		 * kdevname returns a static pointer so we need
2032		 * to make private copies if the output is to
2033		 * make sense.
2034		 */
2035		DMEMIT("%s %s", snap->origin->name, snap->cow->name);
2036		snap->store->type->status(snap->store, type, result + sz,
2037					  maxlen - sz);
2038		break;
2039	}
2040}
2041
2042static int snapshot_iterate_devices(struct dm_target *ti,
2043				    iterate_devices_callout_fn fn, void *data)
2044{
2045	struct dm_snapshot *snap = ti->private;
2046	int r;
2047
2048	r = fn(ti, snap->origin, 0, ti->len, data);
2049
2050	if (!r)
2051		r = fn(ti, snap->cow, 0, get_dev_size(snap->cow->bdev), data);
2052
2053	return r;
2054}
2055
2056
2057/*-----------------------------------------------------------------
2058 * Origin methods
2059 *---------------------------------------------------------------*/
2060
2061/*
2062 * If no exceptions need creating, DM_MAPIO_REMAPPED is returned and any
2063 * supplied bio was ignored.  The caller may submit it immediately.
2064 * (No remapping actually occurs as the origin is always a direct linear
2065 * map.)
2066 *
2067 * If further exceptions are required, DM_MAPIO_SUBMITTED is returned
2068 * and any supplied bio is added to a list to be submitted once all
2069 * the necessary exceptions exist.
2070 */
2071static int __origin_write(struct list_head *snapshots, sector_t sector,
2072			  struct bio *bio)
2073{
2074	int r = DM_MAPIO_REMAPPED;
2075	struct dm_snapshot *snap;
2076	struct dm_exception *e;
2077	struct dm_snap_pending_exception *pe;
2078	struct dm_snap_pending_exception *pe_to_start_now = NULL;
2079	struct dm_snap_pending_exception *pe_to_start_last = NULL;
2080	chunk_t chunk;
2081
2082	/* Do all the snapshots on this origin */
2083	list_for_each_entry (snap, snapshots, list) {
2084		/*
2085		 * Don't make new exceptions in a merging snapshot
2086		 * because it has effectively been deleted
2087		 */
2088		if (dm_target_is_snapshot_merge(snap->ti))
2089			continue;
2090
2091		down_write(&snap->lock);
2092
2093		/* Only deal with valid and active snapshots */
2094		if (!snap->valid || !snap->active)
2095			goto next_snapshot;
2096
2097		/* Nothing to do if writing beyond end of snapshot */
2098		if (sector >= dm_table_get_size(snap->ti->table))
2099			goto next_snapshot;
2100
2101		/*
2102		 * Remember, different snapshots can have
2103		 * different chunk sizes.
2104		 */
2105		chunk = sector_to_chunk(snap->store, sector);
2106
2107		/*
2108		 * Check exception table to see if block
2109		 * is already remapped in this snapshot
2110		 * and trigger an exception if not.
2111		 */
2112		e = dm_lookup_exception(&snap->complete, chunk);
2113		if (e)
2114			goto next_snapshot;
2115
2116		pe = __lookup_pending_exception(snap, chunk);
2117		if (!pe) {
2118			up_write(&snap->lock);
2119			pe = alloc_pending_exception(snap);
2120			down_write(&snap->lock);
2121
2122			if (!snap->valid) {
2123				free_pending_exception(pe);
2124				goto next_snapshot;
2125			}
2126
2127			e = dm_lookup_exception(&snap->complete, chunk);
2128			if (e) {
2129				free_pending_exception(pe);
2130				goto next_snapshot;
2131			}
2132
2133			pe = __find_pending_exception(snap, pe, chunk);
2134			if (!pe) {
2135				__invalidate_snapshot(snap, -ENOMEM);
2136				goto next_snapshot;
2137			}
2138		}
2139
2140		r = DM_MAPIO_SUBMITTED;
2141
2142		/*
2143		 * If an origin bio was supplied, queue it to wait for the
2144		 * completion of this exception, and start this one last,
2145		 * at the end of the function.
2146		 */
2147		if (bio) {
2148			bio_list_add(&pe->origin_bios, bio);
2149			bio = NULL;
2150
2151			if (!pe->started) {
2152				pe->started = 1;
2153				pe_to_start_last = pe;
2154			}
2155		}
2156
2157		if (!pe->started) {
2158			pe->started = 1;
2159			pe_to_start_now = pe;
2160		}
2161
2162next_snapshot:
2163		up_write(&snap->lock);
2164
2165		if (pe_to_start_now) {
2166			start_copy(pe_to_start_now);
2167			pe_to_start_now = NULL;
2168		}
2169	}
2170
2171	/*
2172	 * Submit the exception against which the bio is queued last,
2173	 * to give the other exceptions a head start.
2174	 */
2175	if (pe_to_start_last)
2176		start_copy(pe_to_start_last);
2177
2178	return r;
2179}
2180
2181/*
2182 * Called on a write from the origin driver.
2183 */
2184static int do_origin(struct dm_dev *origin, struct bio *bio)
2185{
2186	struct origin *o;
2187	int r = DM_MAPIO_REMAPPED;
2188
2189	down_read(&_origins_lock);
2190	o = __lookup_origin(origin->bdev);
2191	if (o)
2192		r = __origin_write(&o->snapshots, bio->bi_iter.bi_sector, bio);
2193	up_read(&_origins_lock);
2194
2195	return r;
2196}
2197
2198/*
2199 * Trigger exceptions in all non-merging snapshots.
2200 *
2201 * The chunk size of the merging snapshot may be larger than the chunk
2202 * size of some other snapshot so we may need to reallocate multiple
2203 * chunks in other snapshots.
2204 *
2205 * We scan all the overlapping exceptions in the other snapshots.
2206 * Returns 1 if anything was reallocated and must be waited for,
2207 * otherwise returns 0.
2208 *
2209 * size must be a multiple of merging_snap's chunk_size.
2210 */
2211static int origin_write_extent(struct dm_snapshot *merging_snap,
2212			       sector_t sector, unsigned size)
2213{
2214	int must_wait = 0;
2215	sector_t n;
2216	struct origin *o;
2217
2218	/*
2219	 * The origin's __minimum_chunk_size() got stored in max_io_len
2220	 * by snapshot_merge_resume().
2221	 */
2222	down_read(&_origins_lock);
2223	o = __lookup_origin(merging_snap->origin->bdev);
2224	for (n = 0; n < size; n += merging_snap->ti->max_io_len)
2225		if (__origin_write(&o->snapshots, sector + n, NULL) ==
2226		    DM_MAPIO_SUBMITTED)
2227			must_wait = 1;
2228	up_read(&_origins_lock);
2229
2230	return must_wait;
2231}
2232
2233/*
2234 * Origin: maps a linear range of a device, with hooks for snapshotting.
2235 */
2236
2237/*
2238 * Construct an origin mapping: <dev_path>
2239 * The context for an origin is merely a 'struct dm_dev *'
2240 * pointing to the real device.
2241 */
2242static int origin_ctr(struct dm_target *ti, unsigned int argc, char **argv)
2243{
2244	int r;
2245	struct dm_origin *o;
2246
2247	if (argc != 1) {
2248		ti->error = "origin: incorrect number of arguments";
2249		return -EINVAL;
2250	}
2251
2252	o = kmalloc(sizeof(struct dm_origin), GFP_KERNEL);
2253	if (!o) {
2254		ti->error = "Cannot allocate private origin structure";
2255		r = -ENOMEM;
2256		goto bad_alloc;
2257	}
2258
2259	r = dm_get_device(ti, argv[0], dm_table_get_mode(ti->table), &o->dev);
2260	if (r) {
2261		ti->error = "Cannot get target device";
2262		goto bad_open;
2263	}
2264
2265	o->ti = ti;
2266	ti->private = o;
2267	ti->num_flush_bios = 1;
2268
2269	return 0;
2270
2271bad_open:
2272	kfree(o);
2273bad_alloc:
2274	return r;
2275}
2276
2277static void origin_dtr(struct dm_target *ti)
2278{
2279	struct dm_origin *o = ti->private;
2280
2281	dm_put_device(ti, o->dev);
2282	kfree(o);
2283}
2284
2285static int origin_map(struct dm_target *ti, struct bio *bio)
2286{
2287	struct dm_origin *o = ti->private;
2288	unsigned available_sectors;
2289
2290	bio->bi_bdev = o->dev->bdev;
2291
2292	if (unlikely(bio->bi_rw & REQ_FLUSH))
2293		return DM_MAPIO_REMAPPED;
2294
2295	if (bio_rw(bio) != WRITE)
2296		return DM_MAPIO_REMAPPED;
2297
2298	available_sectors = o->split_boundary -
2299		((unsigned)bio->bi_iter.bi_sector & (o->split_boundary - 1));
2300
2301	if (bio_sectors(bio) > available_sectors)
2302		dm_accept_partial_bio(bio, available_sectors);
2303
2304	/* Only tell snapshots if this is a write */
2305	return do_origin(o->dev, bio);
2306}
2307
2308/*
2309 * Set the target "max_io_len" field to the minimum of all the snapshots'
2310 * chunk sizes.
2311 */
2312static void origin_resume(struct dm_target *ti)
2313{
2314	struct dm_origin *o = ti->private;
2315
2316	o->split_boundary = get_origin_minimum_chunksize(o->dev->bdev);
2317
2318	down_write(&_origins_lock);
2319	__insert_dm_origin(o);
2320	up_write(&_origins_lock);
2321}
2322
2323static void origin_postsuspend(struct dm_target *ti)
2324{
2325	struct dm_origin *o = ti->private;
2326
2327	down_write(&_origins_lock);
2328	__remove_dm_origin(o);
2329	up_write(&_origins_lock);
2330}
2331
2332static void origin_status(struct dm_target *ti, status_type_t type,
2333			  unsigned status_flags, char *result, unsigned maxlen)
2334{
2335	struct dm_origin *o = ti->private;
2336
2337	switch (type) {
2338	case STATUSTYPE_INFO:
2339		result[0] = '\0';
2340		break;
2341
2342	case STATUSTYPE_TABLE:
2343		snprintf(result, maxlen, "%s", o->dev->name);
2344		break;
2345	}
2346}
2347
2348static int origin_iterate_devices(struct dm_target *ti,
2349				  iterate_devices_callout_fn fn, void *data)
2350{
2351	struct dm_origin *o = ti->private;
2352
2353	return fn(ti, o->dev, 0, ti->len, data);
2354}
2355
2356static struct target_type origin_target = {
2357	.name    = "snapshot-origin",
2358	.version = {1, 9, 0},
2359	.module  = THIS_MODULE,
2360	.ctr     = origin_ctr,
2361	.dtr     = origin_dtr,
2362	.map     = origin_map,
2363	.resume  = origin_resume,
2364	.postsuspend = origin_postsuspend,
2365	.status  = origin_status,
2366	.iterate_devices = origin_iterate_devices,
2367};
2368
2369static struct target_type snapshot_target = {
2370	.name    = "snapshot",
2371	.version = {1, 15, 0},
2372	.module  = THIS_MODULE,
2373	.ctr     = snapshot_ctr,
2374	.dtr     = snapshot_dtr,
2375	.map     = snapshot_map,
2376	.end_io  = snapshot_end_io,
2377	.preresume  = snapshot_preresume,
2378	.resume  = snapshot_resume,
2379	.status  = snapshot_status,
2380	.iterate_devices = snapshot_iterate_devices,
2381};
2382
2383static struct target_type merge_target = {
2384	.name    = dm_snapshot_merge_target_name,
2385	.version = {1, 4, 0},
2386	.module  = THIS_MODULE,
2387	.ctr     = snapshot_ctr,
2388	.dtr     = snapshot_dtr,
2389	.map     = snapshot_merge_map,
2390	.end_io  = snapshot_end_io,
2391	.presuspend = snapshot_merge_presuspend,
2392	.preresume  = snapshot_preresume,
2393	.resume  = snapshot_merge_resume,
2394	.status  = snapshot_status,
2395	.iterate_devices = snapshot_iterate_devices,
2396};
2397
2398static int __init dm_snapshot_init(void)
2399{
2400	int r;
2401
2402	r = dm_exception_store_init();
2403	if (r) {
2404		DMERR("Failed to initialize exception stores");
2405		return r;
2406	}
2407
2408	r = dm_register_target(&snapshot_target);
2409	if (r < 0) {
2410		DMERR("snapshot target register failed %d", r);
2411		goto bad_register_snapshot_target;
2412	}
2413
2414	r = dm_register_target(&origin_target);
2415	if (r < 0) {
2416		DMERR("Origin target register failed %d", r);
2417		goto bad_register_origin_target;
2418	}
2419
2420	r = dm_register_target(&merge_target);
2421	if (r < 0) {
2422		DMERR("Merge target register failed %d", r);
2423		goto bad_register_merge_target;
2424	}
2425
2426	r = init_origin_hash();
2427	if (r) {
2428		DMERR("init_origin_hash failed.");
2429		goto bad_origin_hash;
2430	}
2431
2432	exception_cache = KMEM_CACHE(dm_exception, 0);
2433	if (!exception_cache) {
2434		DMERR("Couldn't create exception cache.");
2435		r = -ENOMEM;
2436		goto bad_exception_cache;
2437	}
2438
2439	pending_cache = KMEM_CACHE(dm_snap_pending_exception, 0);
2440	if (!pending_cache) {
2441		DMERR("Couldn't create pending cache.");
2442		r = -ENOMEM;
2443		goto bad_pending_cache;
2444	}
2445
2446	return 0;
2447
2448bad_pending_cache:
2449	kmem_cache_destroy(exception_cache);
2450bad_exception_cache:
2451	exit_origin_hash();
2452bad_origin_hash:
2453	dm_unregister_target(&merge_target);
2454bad_register_merge_target:
2455	dm_unregister_target(&origin_target);
2456bad_register_origin_target:
2457	dm_unregister_target(&snapshot_target);
2458bad_register_snapshot_target:
2459	dm_exception_store_exit();
2460
2461	return r;
2462}
2463
2464static void __exit dm_snapshot_exit(void)
2465{
2466	dm_unregister_target(&snapshot_target);
2467	dm_unregister_target(&origin_target);
2468	dm_unregister_target(&merge_target);
2469
2470	exit_origin_hash();
2471	kmem_cache_destroy(pending_cache);
2472	kmem_cache_destroy(exception_cache);
2473
2474	dm_exception_store_exit();
2475}
2476
2477/* Module hooks */
2478module_init(dm_snapshot_init);
2479module_exit(dm_snapshot_exit);
2480
2481MODULE_DESCRIPTION(DM_NAME " snapshot target");
2482MODULE_AUTHOR("Joe Thornber");
2483MODULE_LICENSE("GPL");
2484MODULE_ALIAS("dm-snapshot-origin");
2485MODULE_ALIAS("dm-snapshot-merge");
2486