Lines Matching refs:sector

138 static inline struct bio *r5_next_bio(struct bio *bio, sector_t sector)  in r5_next_bio()  argument
141 if (bio->bi_iter.bi_sector + sectors < sector + STRIPE_SECTORS) in r5_next_bio()
447 (unsigned long long)sh->sector); in remove_hash()
454 struct hlist_head *hp = stripe_hash(conf, sh->sector); in insert_hash()
457 (unsigned long long)sh->sector); in insert_hash()
519 static void init_stripe(struct stripe_head *sh, sector_t sector, int previous) in init_stripe() argument
530 (unsigned long long)sector); in init_stripe()
535 sh->sector = sector; in init_stripe()
536 stripe_set_idx(sector, conf, previous, sh); in init_stripe()
545 (unsigned long long)sh->sector, i, dev->toread, in init_stripe()
561 static struct stripe_head *__find_stripe(struct r5conf *conf, sector_t sector, in __find_stripe() argument
566 pr_debug("__find_stripe, sector %llu\n", (unsigned long long)sector); in __find_stripe()
567 hlist_for_each_entry(sh, stripe_hash(conf, sector), hash) in __find_stripe()
568 if (sh->sector == sector && sh->generation == generation) in __find_stripe()
570 pr_debug("__stripe %llu not in cache\n", (unsigned long long)sector); in __find_stripe()
657 raid5_get_active_stripe(struct r5conf *conf, sector_t sector, in raid5_get_active_stripe() argument
661 int hash = stripe_hash_locks_hash(sector); in raid5_get_active_stripe()
663 pr_debug("get_stripe, sector %llu\n", (unsigned long long)sector); in raid5_get_active_stripe()
671 sh = __find_stripe(conf, sector, conf->generation - previous); in raid5_get_active_stripe()
696 init_stripe(sh, sector, previous); in raid5_get_active_stripe()
769 tmp_sec = sh->sector; in stripe_add_to_batch_list()
772 head_sector = sh->sector - STRIPE_SECTORS; in stripe_add_to_batch_list()
962 int bad = is_badblock(rdev, sh->sector, STRIPE_SECTORS, in ops_run_io()
1007 __func__, (unsigned long long)sh->sector, in ops_run_io()
1013 bi->bi_iter.bi_sector = (sh->sector in ops_run_io()
1016 bi->bi_iter.bi_sector = (sh->sector in ops_run_io()
1040 sh->dev[i].sector); in ops_run_io()
1059 __func__, (unsigned long long)sh->sector, in ops_run_io()
1065 rbi->bi_iter.bi_sector = (sh->sector in ops_run_io()
1068 rbi->bi_iter.bi_sector = (sh->sector in ops_run_io()
1086 sh->dev[i].sector); in ops_run_io()
1093 bi->bi_rw, i, (unsigned long long)sh->sector); in ops_run_io()
1109 sector_t sector, struct dma_async_tx_descriptor *tx, in async_copy_data() argument
1119 if (bio->bi_iter.bi_sector >= sector) in async_copy_data()
1120 page_offset = (signed)(bio->bi_iter.bi_sector - sector) * 512; in async_copy_data()
1122 page_offset = (signed)(sector - bio->bi_iter.bi_sector) * -512; in async_copy_data()
1177 (unsigned long long)sh->sector); in ops_complete_biofill()
1195 dev->sector + STRIPE_SECTORS) { in ops_complete_biofill()
1196 rbi2 = r5_next_bio(rbi, dev->sector); in ops_complete_biofill()
1219 (unsigned long long)sh->sector); in ops_run_biofill()
1230 dev->sector + STRIPE_SECTORS) { in ops_run_biofill()
1232 dev->sector, tx, sh); in ops_run_biofill()
1233 rbi = r5_next_bio(rbi, dev->sector); in ops_run_biofill()
1261 (unsigned long long)sh->sector); in ops_complete_compute()
1309 __func__, (unsigned long long)sh->sector, target); in ops_run_compute5()
1393 __func__, (unsigned long long)sh->sector, target); in ops_run_compute6_1()
1444 __func__, (unsigned long long)sh->sector, target, target2); in ops_run_compute6_2()
1472 __func__, (unsigned long long)sh->sector, faila, failb); in ops_run_compute6_2()
1540 (unsigned long long)sh->sector); in ops_complete_prexor()
1557 (unsigned long long)sh->sector); in ops_run_prexor5()
1582 (unsigned long long)sh->sector); in ops_run_prexor6()
1601 (unsigned long long)sh->sector); in ops_run_biodrain()
1623 dev->sector + STRIPE_SECTORS) { in ops_run_biodrain()
1632 dev->sector, tx, sh); in ops_run_biodrain()
1639 wbi = r5_next_bio(wbi, dev->sector); in ops_run_biodrain()
1666 (unsigned long long)sh->sector); in ops_complete_reconstruct()
1716 (unsigned long long)sh->sector); in ops_run_reconstruct5()
1798 pr_debug("%s: stripe %llu\n", __func__, (unsigned long long)sh->sector); in ops_run_reconstruct6()
1851 (unsigned long long)sh->sector); in ops_complete_check()
1871 (unsigned long long)sh->sector); in ops_run_check_p()
1900 (unsigned long long)sh->sector, checkp); in ops_run_check_pq()
2301 (unsigned long long)sh->sector, i, atomic_read(&sh->count), in raid5_end_read_request()
2318 s = sh->sector + rdev->new_data_offset; in raid5_end_read_request()
2320 s = sh->sector + rdev->data_offset; in raid5_end_read_request()
2399 rdev, sh->sector, STRIPE_SECTORS, 0))) in raid5_end_read_request()
2438 (unsigned long long)sh->sector, i, atomic_read(&sh->count), in raid5_end_write_request()
2448 else if (is_badblock(rdev, sh->sector, in raid5_end_write_request()
2460 } else if (is_badblock(rdev, sh->sector, in raid5_end_write_request()
2501 dev->sector = raid5_compute_blocknr(sh, i, previous); in raid5_build_block()
2741 sector_t new_sector = sh->sector, check; in raid5_compute_blocknr()
2847 if (check != sh->sector || dummy1 != dd_idx || sh2.pd_idx != sh->pd_idx in raid5_compute_blocknr()
2942 __func__, (unsigned long long)sh->sector, in schedule_reconstruction()
2960 (unsigned long long)sh->sector); in add_stripe_bio()
2999 sector_t sector = sh->dev[dd_idx].sector; in add_stripe_bio() local
3001 sector < sh->dev[dd_idx].sector + STRIPE_SECTORS && in add_stripe_bio()
3002 bi && bi->bi_iter.bi_sector <= sector; in add_stripe_bio()
3003 bi = r5_next_bio(bi, sh->dev[dd_idx].sector)) { in add_stripe_bio()
3004 if (bio_end_sector(bi) >= sector) in add_stripe_bio()
3005 sector = bio_end_sector(bi); in add_stripe_bio()
3007 if (sector >= sh->dev[dd_idx].sector + STRIPE_SECTORS) in add_stripe_bio()
3014 (unsigned long long)sh->sector, dd_idx); in add_stripe_bio()
3031 bitmap_startwrite(conf->mddev->bitmap, sh->sector, in add_stripe_bio()
3093 sh->sector, in handle_failed_stripe()
3114 sh->dev[i].sector + STRIPE_SECTORS) { in handle_failed_stripe()
3115 struct bio *nextbi = r5_next_bio(bi, sh->dev[i].sector); in handle_failed_stripe()
3125 bitmap_endwrite(conf->mddev->bitmap, sh->sector, in handle_failed_stripe()
3138 sh->dev[i].sector + STRIPE_SECTORS) { in handle_failed_stripe()
3139 struct bio *bi2 = r5_next_bio(bi, sh->dev[i].sector); in handle_failed_stripe()
3165 sh->dev[i].sector + STRIPE_SECTORS) { in handle_failed_stripe()
3167 r5_next_bio(bi, sh->dev[i].sector); in handle_failed_stripe()
3176 bitmap_endwrite(conf->mddev->bitmap, sh->sector, in handle_failed_stripe()
3220 && !rdev_set_badblocks(rdev, sh->sector, in handle_failed_sync()
3227 && !rdev_set_badblocks(rdev, sh->sector, in handle_failed_sync()
3247 && (rdev->recovery_offset <= sh->sector in want_replace()
3248 || rdev->mddev->recovery_cp <= sh->sector)) in want_replace()
3337 sh->sector < sh->raid_conf->mddev->recovery_cp) in need_this_block()
3371 (unsigned long long)sh->sector, disk_idx); in fetch_block()
3400 (unsigned long long)sh->sector, in fetch_block()
3482 dev->sector + STRIPE_SECTORS) { in handle_stripe_clean_event()
3483 wbi2 = r5_next_bio(wbi, dev->sector); in handle_stripe_clean_event()
3490 bitmap_endwrite(conf->mddev->bitmap, sh->sector, in handle_stripe_clean_event()
3571 (recovery_cp < MaxSector && sh->sector >= recovery_cp && in handle_stripe_dirtying()
3579 (unsigned long long)sh->sector); in handle_stripe_dirtying()
3605 (unsigned long long)sh->sector, rmw, rcw); in handle_stripe_dirtying()
3612 (unsigned long long)sh->sector, rmw); in handle_stripe_dirtying()
3663 (unsigned long long)sh->sector, in handle_stripe_dirtying()
3770 (unsigned long long) sh->sector); in handle_parity_checks5()
3934 (unsigned long long) sh->sector); in handle_parity_checks6()
4074 rdev->recovery_offset >= sh->sector + STRIPE_SECTORS && in analyse_stripe()
4075 !is_badblock(rdev, sh->sector, STRIPE_SECTORS, in analyse_stripe()
4089 is_bad = is_badblock(rdev, sh->sector, STRIPE_SECTORS, in analyse_stripe()
4116 else if (sh->sector + STRIPE_SECTORS <= rdev->recovery_offset) in analyse_stripe()
4185 sh->sector >= conf->mddev->recovery_cp || in analyse_stripe()
4331 (unsigned long long)sh->sector, sh->state, in handle_stripe()
4526 = raid5_get_active_stripe(conf, sh->sector, 1, 1, 1); in handle_stripe()
4555 stripe_set_idx(sh->sector, conf, 0, sh); in handle_stripe()
4590 if (!rdev_set_badblocks(rdev, sh->sector, in handle_stripe()
4597 rdev_clear_badblocks(rdev, sh->sector, in handle_stripe()
4606 rdev_clear_badblocks(rdev, sh->sector, in handle_stripe()
4696 sector_t sector = bio->bi_iter.bi_sector + get_start_sect(bio->bi_bdev); in in_chunk_boundary() local
4702 ((sector & (chunk_sectors - 1)) + bio_sectors); in in_chunk_boundary()
4871 sector_t sector = raid_bio->bi_iter.bi_sector; in chunk_aligned_read() local
4873 unsigned sectors = chunk_sects - (sector & (chunk_sects-1)); in chunk_aligned_read()
5122 sh->sector, in make_discard_request()
5689 sector_t sector, logical_sector, last_sector; in retry_aligned_read() local
5696 sector = raid5_compute_sector(conf, logical_sector, in retry_aligned_read()
5702 sector += STRIPE_SECTORS, in retry_aligned_read()
5709 sh = raid5_get_active_stripe(conf, sector, 0, 1, 1); in retry_aligned_read()