Lines Matching refs:bio
208 struct bio *full_bio;
225 static void init_tracked_chunk(struct bio *bio) in init_tracked_chunk() argument
227 struct dm_snap_tracked_chunk *c = dm_per_bio_data(bio, sizeof(struct dm_snap_tracked_chunk)); in init_tracked_chunk()
231 static bool is_bio_tracked(struct bio *bio) in is_bio_tracked() argument
233 struct dm_snap_tracked_chunk *c = dm_per_bio_data(bio, sizeof(struct dm_snap_tracked_chunk)); in is_bio_tracked()
237 static void track_chunk(struct dm_snapshot *s, struct bio *bio, chunk_t chunk) in track_chunk() argument
239 struct dm_snap_tracked_chunk *c = dm_per_bio_data(bio, sizeof(struct dm_snap_tracked_chunk)); in track_chunk()
249 static void stop_tracking_chunk(struct dm_snapshot *s, struct bio *bio) in stop_tracking_chunk() argument
251 struct dm_snap_tracked_chunk *c = dm_per_bio_data(bio, sizeof(struct dm_snap_tracked_chunk)); in stop_tracking_chunk()
845 static struct bio *__release_queued_bios_after_merge(struct dm_snapshot *s) in __release_queued_bios_after_merge()
905 static void flush_bios(struct bio *bio);
909 struct bio *b = NULL; in remove_single_exception_chunk()
1046 static void error_bios(struct bio *bio);
1051 struct bio *b = NULL; in merge_callback()
1385 static void flush_bios(struct bio *bio) in flush_bios() argument
1387 struct bio *n; in flush_bios()
1389 while (bio) { in flush_bios()
1390 n = bio->bi_next; in flush_bios()
1391 bio->bi_next = NULL; in flush_bios()
1392 generic_make_request(bio); in flush_bios()
1393 bio = n; in flush_bios()
1397 static int do_origin(struct dm_dev *origin, struct bio *bio);
1402 static void retry_origin_bios(struct dm_snapshot *s, struct bio *bio) in retry_origin_bios() argument
1404 struct bio *n; in retry_origin_bios()
1407 while (bio) { in retry_origin_bios()
1408 n = bio->bi_next; in retry_origin_bios()
1409 bio->bi_next = NULL; in retry_origin_bios()
1410 r = do_origin(s->origin, bio); in retry_origin_bios()
1412 generic_make_request(bio); in retry_origin_bios()
1413 bio = n; in retry_origin_bios()
1420 static void error_bios(struct bio *bio) in error_bios() argument
1422 struct bio *n; in error_bios()
1424 while (bio) { in error_bios()
1425 n = bio->bi_next; in error_bios()
1426 bio->bi_next = NULL; in error_bios()
1427 bio_io_error(bio); in error_bios()
1428 bio = n; in error_bios()
1455 struct bio *origin_bios = NULL; in pending_complete()
1456 struct bio *snapshot_bios = NULL; in pending_complete()
1457 struct bio *full_bio = NULL; in pending_complete()
1592 static void full_bio_end_io(struct bio *bio) in full_bio_end_io() argument
1594 void *callback_data = bio->bi_private; in full_bio_end_io()
1596 dm_kcopyd_do_callback(callback_data, 0, bio->bi_error ? 1 : 0); in full_bio_end_io()
1600 struct bio *bio) in start_full_bio() argument
1605 pe->full_bio = bio; in start_full_bio()
1606 pe->full_bio_end_io = bio->bi_end_io; in start_full_bio()
1607 pe->full_bio_private = bio->bi_private; in start_full_bio()
1612 bio->bi_end_io = full_bio_end_io; in start_full_bio()
1613 bio->bi_private = callback_data; in start_full_bio()
1615 generic_make_request(bio); in start_full_bio()
1668 struct bio *bio, chunk_t chunk) in remap_exception() argument
1670 bio->bi_bdev = s->cow->bdev; in remap_exception()
1671 bio->bi_iter.bi_sector = in remap_exception()
1674 (bio->bi_iter.bi_sector & s->store->chunk_mask); in remap_exception()
1677 static int snapshot_map(struct dm_target *ti, struct bio *bio) in snapshot_map() argument
1685 init_tracked_chunk(bio); in snapshot_map()
1687 if (bio->bi_rw & REQ_FLUSH) { in snapshot_map()
1688 bio->bi_bdev = s->cow->bdev; in snapshot_map()
1692 chunk = sector_to_chunk(s->store, bio->bi_iter.bi_sector); in snapshot_map()
1703 if (!s->valid || (unlikely(s->snapshot_overflowed) && bio_rw(bio) == WRITE)) { in snapshot_map()
1711 remap_exception(s, e, bio, chunk); in snapshot_map()
1720 if (bio_rw(bio) == WRITE) { in snapshot_map()
1736 remap_exception(s, e, bio, chunk); in snapshot_map()
1752 remap_exception(s, &pe->e, bio, chunk); in snapshot_map()
1757 bio->bi_iter.bi_size == in snapshot_map()
1761 start_full_bio(pe, bio); in snapshot_map()
1765 bio_list_add(&pe->snapshot_bios, bio); in snapshot_map()
1775 bio->bi_bdev = s->origin->bdev; in snapshot_map()
1776 track_chunk(s, bio, chunk); in snapshot_map()
1797 static int snapshot_merge_map(struct dm_target *ti, struct bio *bio) in snapshot_merge_map() argument
1804 init_tracked_chunk(bio); in snapshot_merge_map()
1806 if (bio->bi_rw & REQ_FLUSH) { in snapshot_merge_map()
1807 if (!dm_bio_get_target_bio_nr(bio)) in snapshot_merge_map()
1808 bio->bi_bdev = s->origin->bdev; in snapshot_merge_map()
1810 bio->bi_bdev = s->cow->bdev; in snapshot_merge_map()
1814 chunk = sector_to_chunk(s->store, bio->bi_iter.bi_sector); in snapshot_merge_map()
1826 if (bio_rw(bio) == WRITE && in snapshot_merge_map()
1830 bio->bi_bdev = s->origin->bdev; in snapshot_merge_map()
1831 bio_list_add(&s->bios_queued_during_merge, bio); in snapshot_merge_map()
1836 remap_exception(s, e, bio, chunk); in snapshot_merge_map()
1838 if (bio_rw(bio) == WRITE) in snapshot_merge_map()
1839 track_chunk(s, bio, chunk); in snapshot_merge_map()
1844 bio->bi_bdev = s->origin->bdev; in snapshot_merge_map()
1846 if (bio_rw(bio) == WRITE) { in snapshot_merge_map()
1848 return do_origin(s->origin, bio); in snapshot_merge_map()
1857 static int snapshot_end_io(struct dm_target *ti, struct bio *bio, int error) in snapshot_end_io() argument
1861 if (is_bio_tracked(bio)) in snapshot_end_io()
1862 stop_tracking_chunk(s, bio); in snapshot_end_io()
2072 struct bio *bio) in __origin_write() argument
2147 if (bio) { in __origin_write()
2148 bio_list_add(&pe->origin_bios, bio); in __origin_write()
2149 bio = NULL; in __origin_write()
2184 static int do_origin(struct dm_dev *origin, struct bio *bio) in do_origin() argument
2192 r = __origin_write(&o->snapshots, bio->bi_iter.bi_sector, bio); in do_origin()
2285 static int origin_map(struct dm_target *ti, struct bio *bio) in origin_map() argument
2290 bio->bi_bdev = o->dev->bdev; in origin_map()
2292 if (unlikely(bio->bi_rw & REQ_FLUSH)) in origin_map()
2295 if (bio_rw(bio) != WRITE) in origin_map()
2299 ((unsigned)bio->bi_iter.bi_sector & (o->split_boundary - 1)); in origin_map()
2301 if (bio_sectors(bio) > available_sectors) in origin_map()
2302 dm_accept_partial_bio(bio, available_sectors); in origin_map()
2305 return do_origin(o->dev, bio); in origin_map()