chunk_sectors 48 block/blk-settings.c lim->chunk_sectors = 0; chunk_sectors 215 block/blk-settings.c void blk_queue_chunk_sectors(struct request_queue *q, unsigned int chunk_sectors) chunk_sectors 217 block/blk-settings.c BUG_ON(!is_power_of_2(chunk_sectors)); chunk_sectors 218 block/blk-settings.c q->limits.chunk_sectors = chunk_sectors; chunk_sectors 613 block/blk-settings.c if (b->chunk_sectors) chunk_sectors 614 block/blk-settings.c t->chunk_sectors = min_not_zero(t->chunk_sectors, chunk_sectors 615 block/blk-settings.c b->chunk_sectors); chunk_sectors 150 block/blk-sysfs.c return queue_var_show(q->limits.chunk_sectors, page); chunk_sectors 26 drivers/char/ps3flash.c u64 chunk_sectors; chunk_sectors 38 drivers/char/ps3flash.c start_sector, priv->chunk_sectors, chunk_sectors 118 drivers/char/ps3flash.c sector = *pos / dev->bounce_size * priv->chunk_sectors; chunk_sectors 151 drivers/char/ps3flash.c sector += priv->chunk_sectors; chunk_sectors 187 drivers/char/ps3flash.c sector = *pos / dev->bounce_size * priv->chunk_sectors; chunk_sectors 226 drivers/char/ps3flash.c sector += priv->chunk_sectors; chunk_sectors 376 drivers/char/ps3flash.c priv->chunk_sectors = dev->bounce_size / dev->blk_size; chunk_sectors 713 drivers/md/dm-raid.c mddev->new_chunk_sectors = mddev->chunk_sectors; chunk_sectors 726 drivers/md/dm-raid.c mddev->chunk_sectors = mddev->new_chunk_sectors; chunk_sectors 975 drivers/md/dm-raid.c if (region_size < rs->md.chunk_sectors) { chunk_sectors 1154 drivers/md/dm-raid.c rs->md.new_chunk_sectors = rs->md.chunk_sectors = value; chunk_sectors 1484 drivers/md/dm-raid.c if (rs->md.chunk_sectors) chunk_sectors 1485 drivers/md/dm-raid.c max_io_len = rs->md.chunk_sectors; chunk_sectors 1534 drivers/md/dm-raid.c uint32_t min_stripes = max(mddev->chunk_sectors, mddev->new_chunk_sectors) / 2; chunk_sectors 1908 drivers/md/dm-raid.c mddev->new_chunk_sectors != mddev->chunk_sectors || chunk_sectors 2136 drivers/md/dm-raid.c sb->stripe_sectors = cpu_to_le32(mddev->chunk_sectors); chunk_sectors 2248 drivers/md/dm-raid.c mddev->chunk_sectors = le32_to_cpu(sb->stripe_sectors); chunk_sectors 2303 drivers/md/dm-raid.c if (mddev->chunk_sectors != mddev->new_chunk_sectors) chunk_sectors 2305 drivers/md/dm-raid.c mddev->chunk_sectors, mddev->new_chunk_sectors); chunk_sectors 3741 drivers/md/dm-raid.c unsigned int chunk_size_bytes = to_bytes(rs->md.chunk_sectors); chunk_sectors 3752 drivers/md/dm-raid.c limits->max_discard_sectors = rs->md.chunk_sectors; chunk_sectors 1535 drivers/md/dm-table.c zone_sectors = ti_limits.chunk_sectors; chunk_sectors 1593 drivers/md/dm-table.c zone_sectors = limits->chunk_sectors; chunk_sectors 175 drivers/md/dm-unstripe.c limits->chunk_sectors = uc->chunk_size; chunk_sectors 898 drivers/md/dm-zoned-target.c unsigned int chunk_sectors = dmz->dev->zone_nr_sectors; chunk_sectors 908 drivers/md/dm-zoned-target.c limits->max_discard_sectors = chunk_sectors; chunk_sectors 909 drivers/md/dm-zoned-target.c limits->max_hw_discard_sectors = chunk_sectors; chunk_sectors 910 drivers/md/dm-zoned-target.c limits->max_write_zeroes_sectors = chunk_sectors; chunk_sectors 913 drivers/md/dm-zoned-target.c limits->chunk_sectors = chunk_sectors; chunk_sectors 914 drivers/md/dm-zoned-target.c limits->max_sectors = chunk_sectors; chunk_sectors 111 drivers/md/md-linear.c if (mddev->chunk_sectors) { chunk_sectors 113 drivers/md/md-linear.c sector_div(sectors, mddev->chunk_sectors); chunk_sectors 114 drivers/md/md-linear.c rdev->sectors = sectors * mddev->chunk_sectors; chunk_sectors 306 drivers/md/md-linear.c seq_printf(seq, " %dk rounding", mddev->chunk_sectors / 2); chunk_sectors 1229 drivers/md/md.c mddev->chunk_sectors = sb->chunk_size >> 9; chunk_sectors 1258 drivers/md/md.c mddev->new_chunk_sectors = mddev->chunk_sectors; chunk_sectors 1408 drivers/md/md.c sb->chunk_size = mddev->chunk_sectors << 9; chunk_sectors 1742 drivers/md/md.c mddev->chunk_sectors = le32_to_cpu(sb->chunksize); chunk_sectors 1800 drivers/md/md.c mddev->new_chunk_sectors = mddev->chunk_sectors; chunk_sectors 1927 drivers/md/md.c sb->chunksize = cpu_to_le32(mddev->chunk_sectors); chunk_sectors 2541 drivers/md/md.c (mddev->chunk_sectors != le32_to_cpu(sb->chunksize))) chunk_sectors 3882 drivers/md/md.c mddev->new_chunk_sectors = mddev->chunk_sectors; chunk_sectors 3905 drivers/md/md.c mddev->chunk_sectors = mddev->new_chunk_sectors; chunk_sectors 4097 drivers/md/md.c mddev->chunk_sectors != mddev->new_chunk_sectors) chunk_sectors 4100 drivers/md/md.c mddev->chunk_sectors << 9); chunk_sectors 4101 drivers/md/md.c return sprintf(page, "%d\n", mddev->chunk_sectors << 9); chunk_sectors 4126 drivers/md/md.c mddev->new_chunk_sectors = mddev->chunk_sectors; chunk_sectors 4131 drivers/md/md.c mddev->chunk_sectors = n >> 9; chunk_sectors 4968 drivers/md/md.c chunk = mddev->chunk_sectors; chunk_sectors 5095 drivers/md/md.c mddev->new_chunk_sectors = mddev->chunk_sectors; chunk_sectors 5972 drivers/md/md.c mddev->chunk_sectors = 0; chunk_sectors 6414 drivers/md/md.c info.chunk_size = mddev->chunk_sectors << 9; chunk_sectors 6956 drivers/md/md.c mddev->chunk_sectors = info->chunk_size >> 9; chunk_sectors 6977 drivers/md/md.c mddev->new_chunk_sectors = mddev->chunk_sectors; chunk_sectors 7105 drivers/md/md.c mddev->chunk_sectors != info->chunk_size >> 9 || chunk_sectors 305 drivers/md/md.h int chunk_sectors; chunk_sectors 103 drivers/md/raid0.c sector_div(sectors, mddev->chunk_sectors); chunk_sectors 104 drivers/md/raid0.c rdev1->sectors = sectors * mddev->chunk_sectors; chunk_sectors 165 drivers/md/raid0.c if ((mddev->chunk_sectors << 9) % blksize) { chunk_sectors 168 drivers/md/raid0.c mddev->chunk_sectors << 9, blksize); chunk_sectors 333 drivers/md/raid0.c unsigned int chunk_sects = mddev->chunk_sectors; chunk_sectors 369 drivers/md/raid0.c ~(sector_t)(mddev->chunk_sectors-1)); chunk_sectors 381 drivers/md/raid0.c if (mddev->chunk_sectors == 0) { chunk_sectors 400 drivers/md/raid0.c blk_queue_max_hw_sectors(mddev->queue, mddev->chunk_sectors); chunk_sectors 401 drivers/md/raid0.c blk_queue_max_write_same_sectors(mddev->queue, mddev->chunk_sectors); chunk_sectors 402 drivers/md/raid0.c blk_queue_max_write_zeroes_sectors(mddev->queue, mddev->chunk_sectors); chunk_sectors 405 drivers/md/raid0.c blk_queue_io_min(mddev->queue, mddev->chunk_sectors << 9); chunk_sectors 407 drivers/md/raid0.c (mddev->chunk_sectors << 9) * mddev->raid_disks); chunk_sectors 439 drivers/md/raid0.c (mddev->chunk_sectors << 9) / PAGE_SIZE; chunk_sectors 508 drivers/md/raid0.c stripe_size = zone->nb_dev * mddev->chunk_sectors; chunk_sectors 516 drivers/md/raid0.c mddev->chunk_sectors; chunk_sectors 518 drivers/md/raid0.c mddev->chunk_sectors) + chunk_sectors 519 drivers/md/raid0.c first_stripe_index * mddev->chunk_sectors; chunk_sectors 521 drivers/md/raid0.c mddev->chunk_sectors; chunk_sectors 523 drivers/md/raid0.c mddev->chunk_sectors) + chunk_sectors 524 drivers/md/raid0.c last_stripe_index * mddev->chunk_sectors; chunk_sectors 533 drivers/md/raid0.c mddev->chunk_sectors; chunk_sectors 535 drivers/md/raid0.c dev_start = first_stripe_index * mddev->chunk_sectors; chunk_sectors 540 drivers/md/raid0.c dev_end = (last_stripe_index + 1) * mddev->chunk_sectors; chunk_sectors 542 drivers/md/raid0.c dev_end = last_stripe_index * mddev->chunk_sectors; chunk_sectors 589 drivers/md/raid0.c chunk_sects = mddev->chunk_sectors; chunk_sectors 642 drivers/md/raid0.c seq_printf(seq, " %dk chunks", mddev->chunk_sectors / 2); chunk_sectors 671 drivers/md/raid0.c mddev->new_chunk_sectors = mddev->chunk_sectors; chunk_sectors 713 drivers/md/raid0.c mddev->new_chunk_sectors = mddev->chunk_sectors; chunk_sectors 757 drivers/md/raid0.c mddev->chunk_sectors = chunksect; chunk_sectors 3251 drivers/md/raid1.c if (mddev->chunk_sectors != mddev->new_chunk_sectors || chunk_sectors 3254 drivers/md/raid1.c mddev->new_chunk_sectors = mddev->chunk_sectors; chunk_sectors 1560 drivers/md/raid10.c seq_printf(seq, " %dK chunks", mddev->chunk_sectors / 2); chunk_sectors 2858 drivers/md/raid10.c window_size = (chunks + extra_chunk) * conf->mddev->chunk_sectors; chunk_sectors 3582 drivers/md/raid10.c chunk = mddev->chunk_sectors; chunk_sectors 3762 drivers/md/raid10.c chunk_size = mddev->chunk_sectors << 9; chunk_sectors 3765 drivers/md/raid10.c mddev->chunk_sectors); chunk_sectors 3890 drivers/md/raid10.c ((mddev->chunk_sectors << 9) / PAGE_SIZE); chunk_sectors 4026 drivers/md/raid10.c mddev->new_chunk_sectors = mddev->chunk_sectors; chunk_sectors 4743 drivers/md/raid10.c ((conf->mddev->chunk_sectors << 9) / PAGE_SIZE); chunk_sectors 4904 drivers/md/raid10.c mddev->chunk_sectors = 1 << conf->geo.chunk_shift; chunk_sectors 200 drivers/md/raid5-cache.c offset = sector_div(sect, conf->chunk_sectors); chunk_sectors 367 drivers/md/raid5-cache.c conf->chunk_sectors >> STRIPE_SHIFT)) chunk_sectors 328 drivers/md/raid5-ppl.c (data_sector >> ilog2(conf->chunk_sectors) == chunk_sectors 329 drivers/md/raid5-ppl.c data_sector_last >> ilog2(conf->chunk_sectors)) && chunk_sectors 824 drivers/md/raid5-ppl.c if ((pp_size >> 9) < conf->chunk_sectors) { chunk_sectors 833 drivers/md/raid5-ppl.c (data_disks - 1) * conf->chunk_sectors + chunk_sectors 837 drivers/md/raid5-ppl.c strip_sectors = conf->chunk_sectors; chunk_sectors 871 drivers/md/raid5-ppl.c (disk * conf->chunk_sectors); chunk_sectors 749 drivers/md/raid5.c if (!sector_div(tmp_sec, conf->chunk_sectors)) chunk_sectors 2726 drivers/md/raid5.c : conf->chunk_sectors; chunk_sectors 2922 drivers/md/raid5.c : conf->chunk_sectors; chunk_sectors 3261 drivers/md/raid5.c if (first + conf->chunk_sectors * (count - 1) != last) chunk_sectors 3336 drivers/md/raid5.c previous ? conf->prev_chunk_sectors : conf->chunk_sectors; chunk_sectors 5121 drivers/md/raid5.c unsigned int chunk_sectors; chunk_sectors 5126 drivers/md/raid5.c chunk_sectors = min(conf->chunk_sectors, conf->prev_chunk_sectors); chunk_sectors 5127 drivers/md/raid5.c return chunk_sectors >= chunk_sectors 5128 drivers/md/raid5.c ((sector & (chunk_sectors - 1)) + bio_sectors); chunk_sectors 5298 drivers/md/raid5.c unsigned chunk_sects = mddev->chunk_sectors; chunk_sectors 5506 drivers/md/raid5.c stripe_sectors = conf->chunk_sectors * chunk_sectors 5512 drivers/md/raid5.c logical_sector *= conf->chunk_sectors; chunk_sectors 5513 drivers/md/raid5.c last_sector *= conf->chunk_sectors; chunk_sectors 5807 drivers/md/raid5.c reshape_sectors = max(conf->chunk_sectors, conf->prev_chunk_sectors); chunk_sectors 6742 drivers/md/raid5.c sectors &= ~((sector_t)conf->chunk_sectors - 1); chunk_sectors 6766 drivers/md/raid5.c max(conf->chunk_sectors, chunk_sectors 6839 drivers/md/raid5.c conf->scribble_sectors = max(conf->chunk_sectors, chunk_sectors 7017 drivers/md/raid5.c conf->chunk_sectors = mddev->new_chunk_sectors; chunk_sectors 7063 drivers/md/raid5.c conf->prev_chunk_sectors = mddev->chunk_sectors; chunk_sectors 7066 drivers/md/raid5.c conf->prev_chunk_sectors = conf->chunk_sectors; chunk_sectors 7073 drivers/md/raid5.c ((mddev->chunk_sectors << 9) / STRIPE_SIZE) * 4, chunk_sectors 7212 drivers/md/raid5.c int chunk_sectors; chunk_sectors 7235 drivers/md/raid5.c chunk_sectors = max(mddev->chunk_sectors, mddev->new_chunk_sectors); chunk_sectors 7237 drivers/md/raid5.c if (sector_div(here_new, chunk_sectors * new_data_disks)) { chunk_sectors 7242 drivers/md/raid5.c reshape_offset = here_new * chunk_sectors; chunk_sectors 7245 drivers/md/raid5.c sector_div(here_old, chunk_sectors * (old_disks-max_degraded)); chunk_sectors 7256 drivers/md/raid5.c if (abs(min_offset_diff) >= mddev->chunk_sectors && chunk_sectors 7265 drivers/md/raid5.c ? (here_new * chunk_sectors + min_offset_diff <= chunk_sectors 7266 drivers/md/raid5.c here_old * chunk_sectors) chunk_sectors 7267 drivers/md/raid5.c : (here_new * chunk_sectors >= chunk_sectors 7268 drivers/md/raid5.c here_old * chunk_sectors + (-min_offset_diff))) { chunk_sectors 7279 drivers/md/raid5.c BUG_ON(mddev->chunk_sectors != mddev->new_chunk_sectors); chunk_sectors 7377 drivers/md/raid5.c mddev->dev_sectors &= ~(mddev->chunk_sectors - 1); chunk_sectors 7432 drivers/md/raid5.c ((mddev->chunk_sectors << 9) / PAGE_SIZE); chunk_sectors 7436 drivers/md/raid5.c chunk_size = mddev->chunk_sectors << 9; chunk_sectors 7517 drivers/md/raid5.c conf->chunk_sectors / 2, mddev->layout); chunk_sectors 7773 drivers/md/raid5.c sectors &= ~((sector_t)conf->chunk_sectors - 1); chunk_sectors 7805 drivers/md/raid5.c if (((mddev->chunk_sectors << 9) / STRIPE_SIZE) * 4 chunk_sectors 7811 drivers/md/raid5.c ((max(mddev->chunk_sectors, mddev->new_chunk_sectors) << 9) chunk_sectors 7826 drivers/md/raid5.c mddev->new_chunk_sectors == mddev->chunk_sectors) chunk_sectors 7846 drivers/md/raid5.c if (mddev->new_chunk_sectors > mddev->chunk_sectors || chunk_sectors 7852 drivers/md/raid5.c mddev->chunk_sectors) chunk_sectors 7906 drivers/md/raid5.c conf->prev_chunk_sectors = conf->chunk_sectors; chunk_sectors 7907 drivers/md/raid5.c conf->chunk_sectors = mddev->new_chunk_sectors; chunk_sectors 7982 drivers/md/raid5.c conf->chunk_sectors = conf->prev_chunk_sectors; chunk_sectors 8028 drivers/md/raid5.c int stripe = data_disks * ((conf->chunk_sectors << 9) chunk_sectors 8062 drivers/md/raid5.c mddev->chunk_sectors = conf->chunk_sectors; chunk_sectors 8118 drivers/md/raid5.c mddev->new_chunk_sectors = mddev->chunk_sectors; chunk_sectors 8223 drivers/md/raid5.c conf->chunk_sectors = new_chunk ; chunk_sectors 8224 drivers/md/raid5.c mddev->chunk_sectors = new_chunk; chunk_sectors 571 drivers/md/raid5.h int chunk_sectors; chunk_sectors 327 include/linux/blkdev.h unsigned int chunk_sectors; chunk_sectors 708 include/linux/blkdev.h return blk_queue_is_zoned(q) ? q->limits.chunk_sectors : 0; chunk_sectors 722 include/linux/blkdev.h return sector >> ilog2(q->limits.chunk_sectors); chunk_sectors 1016 include/linux/blkdev.h if (!q->limits.chunk_sectors) chunk_sectors 1019 include/linux/blkdev.h return min(q->limits.max_sectors, (unsigned int)(q->limits.chunk_sectors - chunk_sectors 1020 include/linux/blkdev.h (offset & (q->limits.chunk_sectors - 1)))); chunk_sectors 1031 include/linux/blkdev.h if (!q->limits.chunk_sectors ||