sectors 89 arch/m68k/emu/nfblock.c geo->sectors = 16; sectors 54 arch/powerpc/include/asm/ps3stor.h u64 start_sector, u64 sectors, sectors 1421 arch/um/drivers/ubd_kern.c geo->sectors = 32; sectors 53 block/badblocks.c int badblocks_check(struct badblocks *bb, sector_t s, int sectors, sectors 60 block/badblocks.c sector_t target = s + sectors; sectors 68 block/badblocks.c sectors = target - s; sectors 163 block/badblocks.c int badblocks_set(struct badblocks *bb, sector_t s, int sectors, sectors 177 block/badblocks.c sector_t next = s + sectors; sectors 182 block/badblocks.c sectors = next - s; sectors 213 block/badblocks.c if (s == a && s + sectors >= e) sectors 219 block/badblocks.c if (e < s + sectors) sectors 220 block/badblocks.c e = s + sectors; sectors 232 block/badblocks.c sectors = e - s; sectors 235 block/badblocks.c if (sectors && hi < bb->count) { sectors 243 block/badblocks.c if (a <= s + sectors) { sectors 245 block/badblocks.c if (e <= s + sectors) { sectors 247 block/badblocks.c e = s + sectors; sectors 260 block/badblocks.c sectors = e - s; sectors 265 block/badblocks.c if (sectors == 0 && hi < bb->count) { sectors 283 block/badblocks.c while (sectors) { sectors 292 block/badblocks.c int this_sectors = sectors; sectors 301 block/badblocks.c sectors -= this_sectors; sectors 331 block/badblocks.c int badblocks_clear(struct badblocks *bb, sector_t s, int sectors) sectors 335 block/badblocks.c sector_t target = s + sectors; sectors 348 block/badblocks.c sectors = target - s; sectors 1772 block/bio.c unsigned long sectors, struct hd_struct *part) sectors 1780 block/bio.c part_stat_add(part, sectors[sgrp], sectors); sectors 1890 block/bio.c struct bio *bio_split(struct bio *bio, int sectors, sectors 1895 block/bio.c BUG_ON(sectors <= 0); sectors 1896 block/bio.c BUG_ON(sectors >= bio_sectors(bio)); sectors 1902 block/bio.c split->bi_iter.bi_size = sectors << 9; sectors 1317 block/blk-core.c part_stat_add(part, sectors[sgrp], bytes >> 9); sectors 146 block/blk-merge.c unsigned sectors = blk_max_size_offset(q, bio->bi_iter.bi_sector); sectors 147 block/blk-merge.c unsigned max_sectors = sectors; sectors 157 block/blk-merge.c return sectors & (lbs - 1); sectors 195 block/blk-merge.c unsigned *sectors, unsigned max_segs, sectors 198 block/blk-merge.c unsigned max_len = (min(max_sectors, UINT_MAX >> 9) - *sectors) << 9; sectors 215 block/blk-merge.c *sectors += total_len >> 9; sectors 247 block/blk-merge.c unsigned nsegs = 0, sectors = 0; sectors 260 block/blk-merge.c sectors + (bv.bv_len >> 9) <= max_sectors && sectors 263 block/blk-merge.c sectors += bv.bv_len >> 9; sectors 264 block/blk-merge.c } else if (bvec_split_segs(q, &bv, &nsegs, §ors, max_segs, sectors 277 block/blk-merge.c return bio_split(bio, sectors, GFP_NOIO, bs); sectors 48 block/blk-mq.c int ddir, sectors, bucket; sectors 51 block/blk-mq.c sectors = blk_rq_stats_sectors(rq); sectors 53 block/blk-mq.c bucket = ddir + 2 * ilog2(sectors); sectors 354 block/blk-throttle.c #define request_bucket_index(sectors) \ sectors 355 block/blk-throttle.c clamp_t(int, order_base_2(sectors) - 3, 0, LATENCY_BUCKET_SIZE - 1) sectors 295 block/bounce.c int sectors = 0; sectors 300 block/bounce.c sectors += from.bv_len >> 9; sectors 307 block/bounce.c if (!passthrough && sectors < bio_sectors(*bio_orig)) { sectors 308 block/bounce.c bio = bio_split(*bio_orig, sectors, GFP_NOIO, &bounce_bio_split); sectors 47 block/compat_ioctl.c unsigned char sectors; sectors 1393 block/genhd.c part_stat_read(hd, sectors[STAT_READ]), sectors 1397 block/genhd.c part_stat_read(hd, sectors[STAT_WRITE]), sectors 1404 block/genhd.c part_stat_read(hd, sectors[STAT_DISCARD]), sectors 134 block/partition-generic.c (unsigned long long)part_stat_read(p, sectors[STAT_READ]), sectors 138 block/partition-generic.c (unsigned long long)part_stat_read(p, sectors[STAT_WRITE]), sectors 145 block/partition-generic.c (unsigned long long)part_stat_read(p, sectors[STAT_DISCARD]), sectors 41 block/partitions/ibm.c return cyl * geo->heads * geo->sectors + sectors 42 block/partitions/ibm.c head * geo->sectors; sectors 59 block/partitions/ibm.c return cyl * geo->heads * geo->sectors + sectors 60 block/partitions/ibm.c head * geo->sectors + sectors 177 block/partitions/ibm.c offset + geo->sectors; sectors 222 block/partitions/ibm.c * geo->sectors * secperblk; sectors 87 drivers/ata/libata-core.c u16 heads, u16 sectors); sectors 712 drivers/ata/libata-core.c block = (cyl * dev->heads + head) * dev->sectors + sect - 1; sectors 823 drivers/ata/libata-core.c track = (u32)block / dev->sectors; sectors 826 drivers/ata/libata-core.c sect = (u32)block % dev->sectors + 1; sectors 1180 drivers/ata/libata-core.c u64 sectors = 0; sectors 1182 drivers/ata/libata-core.c sectors |= ((u64)(tf->hob_lbah & 0xff)) << 40; sectors 1183 drivers/ata/libata-core.c sectors |= ((u64)(tf->hob_lbam & 0xff)) << 32; sectors 1184 drivers/ata/libata-core.c sectors |= ((u64)(tf->hob_lbal & 0xff)) << 24; sectors 1185 drivers/ata/libata-core.c sectors |= (tf->lbah & 0xff) << 16; sectors 1186 drivers/ata/libata-core.c sectors |= (tf->lbam & 0xff) << 8; sectors 1187 drivers/ata/libata-core.c sectors |= (tf->lbal & 0xff); sectors 1189 drivers/ata/libata-core.c return sectors; sectors 1194 drivers/ata/libata-core.c u64 sectors = 0; sectors 1196 drivers/ata/libata-core.c sectors |= (tf->device & 0x0f) << 24; sectors 1197 drivers/ata/libata-core.c sectors |= (tf->lbah & 0xff) << 16; sectors 1198 drivers/ata/libata-core.c sectors |= (tf->lbam & 0xff) << 8; sectors 1199 drivers/ata/libata-core.c sectors |= (tf->lbal & 0xff); sectors 1201 drivers/ata/libata-core.c return sectors; sectors 1329 drivers/ata/libata-core.c u64 sectors = ata_id_n_sectors(dev->id); sectors 1360 drivers/ata/libata-core.c if (native_sectors <= sectors || !unlock_hpa) { sectors 1361 drivers/ata/libata-core.c if (!print_info || native_sectors == sectors) sectors 1364 drivers/ata/libata-core.c if (native_sectors > sectors) sectors 1367 drivers/ata/libata-core.c (unsigned long long)sectors, sectors 1369 drivers/ata/libata-core.c else if (native_sectors < sectors) sectors 1373 drivers/ata/libata-core.c (unsigned long long)sectors); sectors 1383 drivers/ata/libata-core.c (unsigned long long)sectors, sectors 1402 drivers/ata/libata-core.c (unsigned long long)sectors, sectors 2043 drivers/ata/libata-core.c u8 page, void *buf, unsigned int sectors) sectors 2073 drivers/ata/libata-core.c tf.nsect = sectors; sectors 2074 drivers/ata/libata-core.c tf.hob_nsect = sectors >> 8; sectors 2078 drivers/ata/libata-core.c buf, sectors * ATA_SECT_SIZE, 0); sectors 2514 drivers/ata/libata-core.c dev->sectors = 0; sectors 2598 drivers/ata/libata-core.c dev->sectors = id[6]; sectors 2604 drivers/ata/libata-core.c dev->sectors = id[56]; sectors 2616 drivers/ata/libata-core.c dev->heads, dev->sectors); sectors 4892 drivers/ata/libata-core.c u16 heads, u16 sectors) sectors 4898 drivers/ata/libata-core.c if (sectors < 1 || sectors > 255 || heads < 1 || heads > 16) sectors 4908 drivers/ata/libata-core.c tf.nsect = sectors; sectors 1750 drivers/ata/libata-scsi.c track = (u32)block / dev->sectors; sectors 1753 drivers/ata/libata-scsi.c sect = (u32)block % dev->sectors + 1; sectors 86 drivers/ata/libata.h u8 page, void *buf, unsigned int sectors); sectors 1530 drivers/block/amiflop.c geo->sectors = unit[drive].dtype->sects * unit[drive].type->sect_mult; sectors 296 drivers/block/aoe/aoeblk.c geo->sectors = d->geo.sectors; sectors 948 drivers/block/aoe/aoecmd.c d->geo.sectors = 63; sectors 958 drivers/block/aoe/aoecmd.c d->geo.sectors = get_unaligned_le16(&id[56 << 1]); sectors 1343 drivers/block/drbd/drbd_int.h extern int drbd_bm_resize(struct drbd_device *device, sector_t sectors, int set_new_bits); sectors 2804 drivers/block/drbd/drbd_receiver.c curr_events = (int)part_stat_read_accum(&disk->part0, sectors) - sectors 1680 drivers/block/drbd/drbd_worker.c device->rs_last_events = (int)part_stat_read_accum(&disk->part0, sectors); sectors 2637 drivers/block/floppy.c unsigned int sectors; sectors 2639 drivers/block/floppy.c sectors = fsector_t + blk_rq_sectors(current_req); sectors 2640 drivers/block/floppy.c if (sectors > ssize && sectors < ssize + ssize) sectors 3383 drivers/block/floppy.c geo->sectors = g->sect; sectors 486 drivers/block/mtip32xx/mtip32xx.c dma_addr_t buffer_dma, unsigned int sectors); sectors 1256 drivers/block/mtip32xx/mtip32xx.c dma_addr_t buffer_dma, unsigned int sectors) sectors 1264 drivers/block/mtip32xx/mtip32xx.c fis.sect_count = sectors & 0xFF; sectors 1265 drivers/block/mtip32xx/mtip32xx.c fis.sect_cnt_ex = (sectors >> 8) & 0xFF; sectors 1270 drivers/block/mtip32xx/mtip32xx.c memset(buffer, 0, sectors * ATA_SECT_SIZE); sectors 1276 drivers/block/mtip32xx/mtip32xx.c sectors * ATA_SECT_SIZE, sectors 1381 drivers/block/mtip32xx/mtip32xx.c static bool mtip_hw_get_capacity(struct driver_data *dd, sector_t *sectors) sectors 1390 drivers/block/mtip32xx/mtip32xx.c *sectors = total; sectors 1404 drivers/block/mtip32xx/mtip32xx.c sector_t sectors; sectors 1426 drivers/block/mtip32xx/mtip32xx.c if (mtip_hw_get_capacity(port->dd, §ors)) sectors 1429 drivers/block/mtip32xx/mtip32xx.c (u64)sectors, sectors 1430 drivers/block/mtip32xx/mtip32xx.c ((u64)sectors) * ATA_SECT_SIZE >> 20); sectors 3337 drivers/block/mtip32xx/mtip32xx.c geo->sectors = 56; sectors 3338 drivers/block/mtip32xx/mtip32xx.c sector_div(capacity, (geo->heads * geo->sectors)); sectors 229 drivers/block/paride/pd.c int sectors; sectors 350 drivers/block/paride/pd.c s = (block % disk->sectors) + 1; sectors 351 drivers/block/paride/pd.c h = (block /= disk->sectors) % disk->heads; sectors 638 drivers/block/paride/pd.c pd_send_command(disk, disk->sectors, 0, disk->heads - 1, 0, 0, sectors 720 drivers/block/paride/pd.c disk->sectors = le16_to_cpu(*(__le16 *) (pd_scratch + 12)); sectors 726 drivers/block/paride/pd.c disk->capacity = disk->sectors * disk->heads * disk->cylinders; sectors 742 drivers/block/paride/pd.c disk->cylinders, disk->heads, disk->sectors, sectors 812 drivers/block/paride/pd.c geo->sectors = PD_LOG_SECTS; sectors 813 drivers/block/paride/pd.c geo->cylinders = disk->capacity / (geo->heads * geo->sectors); sectors 816 drivers/block/paride/pd.c geo->sectors = disk->sectors; sectors 361 drivers/block/paride/pf.c geo->sectors = PF_FD_SPT; sectors 365 drivers/block/paride/pf.c geo->sectors = PF_HD_SPT; sectors 116 drivers/block/ps3disk.c u64 start_sector, sectors; sectors 132 drivers/block/ps3disk.c sectors = blk_rq_sectors(req) * priv->blocking_factor; sectors 134 drivers/block/ps3disk.c __func__, __LINE__, op, sectors, start_sector); sectors 140 drivers/block/ps3disk.c start_sector, sectors, 0, sectors 144 drivers/block/ps3disk.c start_sector, sectors, 0, sectors 82 drivers/block/rsxx/dev.c geo->sectors = 16; sectors 83 drivers/block/rsxx/dev.c do_div(blocks, (geo->heads * geo->sectors)); sectors 87 drivers/block/rsxx/dev.c geo->sectors = 0; sectors 3101 drivers/block/skd_main.c geo->sectors = 255; sectors 130 drivers/block/sunvdc.c geo->sectors = 0x3f; sectors 131 drivers/block/sunvdc.c sector_div(cylinders, geo->heads * geo->sectors); sectors 133 drivers/block/sunvdc.c if ((sector_t)(geo->cylinders + 1) * geo->heads * geo->sectors < nsect) sectors 724 drivers/block/swim.c geo->sectors = g->sect; sectors 435 drivers/block/sx8.c geo->sectors = (u8) port->dev_geom_sect; sectors 774 drivers/block/umem.c geo->sectors = 32; sectors 775 drivers/block/umem.c geo->cylinders = size / (geo->heads * geo->sectors); sectors 464 drivers/block/virtio_blk.c geometry.sectors, &geo->sectors); sectors 468 drivers/block/virtio_blk.c geo->sectors = 1 << 5; sectors 484 drivers/block/xen-blkfront.c hg->sectors = 0x3f; sectors 485 drivers/block/xen-blkfront.c sector_div(cylinders, hg->heads * hg->sectors); sectors 487 drivers/block/xen-blkfront.c if ((sector_t)(hg->cylinders + 1) * hg->heads * hg->sectors < nsect) sectors 2343 drivers/block/xen-blkfront.c unsigned long long sectors; sectors 2357 drivers/block/xen-blkfront.c "sectors", "%Lu", §ors); sectors 2361 drivers/block/xen-blkfront.c sectors); sectors 2362 drivers/block/xen-blkfront.c set_capacity(info->gd, sectors); sectors 2386 drivers/block/xen-blkfront.c "sectors", "%llu", §ors, sectors 2416 drivers/block/xen-blkfront.c err = xlvbd_alloc_gendisk(sectors, info, binfo, sector_size, sectors 958 drivers/block/xsysace.c geo->sectors = cf_id[ATA_ID_SECTORS]; sectors 71 drivers/dax/super.c sector_t sectors) sectors 95 drivers/dax/super.c last_page = PFN_DOWN((start + sectors - 1) * 512) * PAGE_SIZE / 512; sectors 557 drivers/firmware/edd.c static EDD_DEVICE_ATTR(sectors, 0444, edd_show_sectors, edd_has_edd_info); sectors 323 drivers/ide/ide-floppy.c u8 heads, sectors; sectors 346 drivers/ide/ide-floppy.c sectors = buf[8 + 5]; sectors 348 drivers/ide/ide-floppy.c capacity = cyls * heads * sectors * sector_size; sectors 354 drivers/ide/ide-floppy.c sectors, transfer_rate / 8, sector_size, rpm); sectors 359 drivers/ide/ide-floppy.c drive->bios_sect = sectors; sectors 282 drivers/ide/ide-gd.c geo->sectors = drive->bios_sect; sectors 259 drivers/ide/pdc202xx_old.c #define DECLARE_PDC2026X_DEV(udma, sectors) \ sectors 270 drivers/ide/pdc202xx_old.c .max_sectors = sectors, \ sectors 86 drivers/md/bcache/alloc.c void bch_rescale_priorities(struct cache_set *c, int sectors) sectors 94 drivers/md/bcache/alloc.c atomic_sub(sectors, &c->rescale); sectors 615 drivers/md/bcache/alloc.c unsigned int sectors, sectors 663 drivers/md/bcache/alloc.c sectors = min(sectors, b->sectors_free); sectors 665 drivers/md/bcache/alloc.c SET_KEY_OFFSET(k, KEY_OFFSET(k) + sectors); sectors 666 drivers/md/bcache/alloc.c SET_KEY_SIZE(k, sectors); sectors 677 drivers/md/bcache/alloc.c b->sectors_free -= sectors; sectors 680 drivers/md/bcache/alloc.c SET_PTR_OFFSET(&b->key, i, PTR_OFFSET(&b->key, i) + sectors); sectors 682 drivers/md/bcache/alloc.c atomic_long_add(sectors, sectors 277 drivers/md/bcache/bcache.h struct bio *bio, unsigned int sectors); sectors 962 drivers/md/bcache/bcache.h void bch_rescale_priorities(struct cache_set *c, int sectors); sectors 976 drivers/md/bcache/bcache.h unsigned int sectors, unsigned int write_point, sectors 316 drivers/md/bcache/extents.c int sectors) sectors 320 drivers/md/bcache/extents.c offset, -sectors); sectors 762 drivers/md/bcache/journal.c unsigned int i, sectors = set_blocks(w->data, block_bytes(c)) * sectors 801 drivers/md/bcache/journal.c atomic_long_add(sectors, &ca->meta_sectors_written); sectors 806 drivers/md/bcache/journal.c bio->bi_iter.bi_size = sectors << 9; sectors 817 drivers/md/bcache/journal.c SET_PTR_OFFSET(k, i, PTR_OFFSET(k, i) + sectors); sectors 865 drivers/md/bcache/journal.c size_t sectors; sectors 876 drivers/md/bcache/journal.c sectors = __set_blocks(w->data, w->data->keys + nkeys, sectors 879 drivers/md/bcache/journal.c if (sectors <= min_t(size_t, sectors 129 drivers/md/bcache/request.c unsigned int sectors = min(bio_sectors(bio), sectors 135 drivers/md/bcache/request.c bio->bi_iter.bi_sector += sectors; sectors 136 drivers/md/bcache/request.c bio->bi_iter.bi_size -= sectors << 9; sectors 141 drivers/md/bcache/request.c sectors)); sectors 379 drivers/md/bcache/request.c unsigned int sectors, congested; sectors 451 drivers/md/bcache/request.c sectors = max(task->sequential_io, sectors 455 drivers/md/bcache/request.c sectors >= dc->sequential_cutoff >> 9) { sectors 460 drivers/md/bcache/request.c if (congested && sectors >= congested) { sectors 537 drivers/md/bcache/request.c unsigned int sectors = KEY_INODE(k) == s->iop.inode sectors 541 drivers/md/bcache/request.c int ret = s->d->cache_miss(b, s, bio, sectors); sectors 547 drivers/md/bcache/request.c BUG_ON(bio_sectors <= sectors); sectors 889 drivers/md/bcache/request.c struct bio *bio, unsigned int sectors) sectors 899 drivers/md/bcache/request.c miss = bio_next_split(bio, sectors, GFP_NOIO, &s->d->bio_split); sectors 910 drivers/md/bcache/request.c s->insert_bio_sectors = min(sectors, bio_sectors(bio) + reada); sectors 922 drivers/md/bcache/request.c miss = bio_next_split(bio, sectors, GFP_NOIO, &s->d->bio_split); sectors 1290 drivers/md/bcache/request.c struct bio *bio, unsigned int sectors) sectors 1292 drivers/md/bcache/request.c unsigned int bytes = min(sectors, bio_sectors(bio)) << 9; sectors 229 drivers/md/bcache/stats.c int sectors) sectors 231 drivers/md/bcache/stats.c atomic_add(sectors, &dc->accounting.collector.sectors_bypassed); sectors 232 drivers/md/bcache/stats.c atomic_add(sectors, &c->accounting.collector.sectors_bypassed); sectors 63 drivers/md/bcache/stats.h int sectors); sectors 414 drivers/md/bcache/super.c u1[i].sectors = 0; sectors 811 drivers/md/bcache/super.c sector_t sectors) sectors 822 drivers/md/bcache/super.c d->nr_stripes = DIV_ROUND_UP_ULL(sectors, d->stripe_size); sectors 853 drivers/md/bcache/super.c set_capacity(d->disk, sectors); sectors 896 drivers/md/bcache/super.c uint64_t sectors = 0; sectors 900 drivers/md/bcache/super.c sectors += bdev_sectors(dc->bdev); sectors 902 drivers/md/bcache/super.c c->cached_dev_sectors = sectors; sectors 1450 drivers/md/bcache/super.c if (bcache_device_init(d, block_bytes(c), u->sectors)) sectors 1504 drivers/md/bcache/super.c u->sectors = size >> 9; sectors 531 drivers/md/bcache/sysfs.c sysfs_hprint(size, u->sectors << 9); sectors 560 drivers/md/bcache/sysfs.c u->sectors = v >> 9; sectors 562 drivers/md/bcache/sysfs.c set_capacity(d->disk, u->sectors); sectors 232 drivers/md/bcache/writeback.c unsigned int sectors) sectors 238 drivers/md/bcache/writeback.c return bch_next_delay(&dc->writeback_rate, sectors); sectors 993 drivers/md/dm-ioctl.c geometry.sectors = indata[2]; sectors 138 drivers/md/dm-log-writes.c sector_t sectors) sectors 140 drivers/md/dm-log-writes.c return sectors >> (lc->sectorshift - SECTOR_SHIFT); sectors 144 drivers/md/dm-log-writes.c sector_t sectors) sectors 146 drivers/md/dm-log-writes.c return sectors << (lc->sectorshift - SECTOR_SHIFT); sectors 689 drivers/md/dm-raid.c rdev->sectors = mddev->dev_sectors; sectors 1261 drivers/md/dm-raid.c jdev->sectors = to_sector(i_size_read(jdev->bdev->bd_inode)); sectors 1262 drivers/md/dm-raid.c if (jdev->sectors < MIN_RAID456_JOURNAL_SPACE) { sectors 1592 drivers/md/dm-raid.c rdev->bdev && rdev->sectors) sectors 1593 drivers/md/dm-raid.c return rdev->sectors; sectors 1661 drivers/md/dm-raid.c rdev->sectors = dev_sectors; sectors 2003 drivers/md/dm-raid.c __le64 sectors; /* Used device size in sectors */ sectors 2165 drivers/md/dm-raid.c sb->sectors = cpu_to_le64(rdev->sectors); sectors 2488 drivers/md/dm-raid.c rdev->sectors = le64_to_cpu(sb->sectors); sectors 2906 drivers/md/dm-raid.c rdev->sectors = mddev->dev_sectors; sectors 2950 drivers/md/dm-raid.c rdev->sectors += reshape_sectors; sectors 26 drivers/md/dm-stats.c unsigned long long sectors[2]; sectors 552 drivers/md/dm-stats.c p->sectors[idx] += len; sectors 680 drivers/md/dm-stats.c shared->tmp.sectors[READ] = 0; sectors 681 drivers/md/dm-stats.c shared->tmp.sectors[WRITE] = 0; sectors 698 drivers/md/dm-stats.c shared->tmp.sectors[READ] += READ_ONCE(p->sectors[READ]); sectors 699 drivers/md/dm-stats.c shared->tmp.sectors[WRITE] += READ_ONCE(p->sectors[WRITE]); sectors 731 drivers/md/dm-stats.c p->sectors[READ] -= shared->tmp.sectors[READ]; sectors 732 drivers/md/dm-stats.c p->sectors[WRITE] -= shared->tmp.sectors[WRITE]; sectors 847 drivers/md/dm-stats.c shared->tmp.sectors[READ], sectors 851 drivers/md/dm-stats.c shared->tmp.sectors[WRITE], sectors 873 drivers/md/dm.c sector_t sz = (sector_t)geo->cylinders * geo->heads * geo->sectors; sectors 1661 drivers/md/dm.c sectors[op_stat_group(bio_op(bio))], ci.sector_count); sectors 1396 drivers/md/md-bitmap.c int md_bitmap_startwrite(struct bitmap *bitmap, sector_t offset, unsigned long sectors, int behind) sectors 1412 drivers/md/md-bitmap.c while (sectors) { sectors 1451 drivers/md/md-bitmap.c if (sectors > blocks) sectors 1452 drivers/md/md-bitmap.c sectors -= blocks; sectors 1454 drivers/md/md-bitmap.c sectors = 0; sectors 1461 drivers/md/md-bitmap.c unsigned long sectors, int success, int behind) sectors 1473 drivers/md/md-bitmap.c while (sectors) { sectors 1505 drivers/md/md-bitmap.c if (sectors > blocks) sectors 1506 drivers/md/md-bitmap.c sectors -= blocks; sectors 1508 drivers/md/md-bitmap.c sectors = 0; sectors 2387 drivers/md/md-bitmap.c unsigned long sectors; sectors 2390 drivers/md/md-bitmap.c rv = kstrtoul(buf, 10, §ors); sectors 2394 drivers/md/md-bitmap.c if (sectors == 0) sectors 2398 drivers/md/md-bitmap.c sectors < (mddev->bitmap->storage.bytes + 511) >> 9) sectors 2404 drivers/md/md-bitmap.c mddev->bitmap_info.space = sectors; sectors 255 drivers/md/md-bitmap.h unsigned long sectors, int behind); sectors 257 drivers/md/md-bitmap.h unsigned long sectors, int success, int behind); sectors 283 drivers/md/md-faulty.c static sector_t faulty_size(struct mddev *mddev, sector_t sectors, int raid_disks) sectors 288 drivers/md/md-faulty.c if (sectors == 0) sectors 291 drivers/md/md-faulty.c return sectors; sectors 72 drivers/md/md-linear.c static sector_t linear_size(struct mddev *mddev, sector_t sectors, int raid_disks) sectors 78 drivers/md/md-linear.c WARN_ONCE(sectors || raid_disks, sectors 102 drivers/md/md-linear.c sector_t sectors; sectors 112 drivers/md/md-linear.c sectors = rdev->sectors; sectors 113 drivers/md/md-linear.c sector_div(sectors, mddev->chunk_sectors); sectors 114 drivers/md/md-linear.c rdev->sectors = sectors * mddev->chunk_sectors; sectors 120 drivers/md/md-linear.c conf->array_sectors += rdev->sectors; sectors 140 drivers/md/md-linear.c conf->disks[0].end_sector = conf->disks[0].rdev->sectors; sectors 145 drivers/md/md-linear.c conf->disks[i].rdev->sectors; sectors 252 drivers/md/md-linear.c start_sector = tmp_dev->end_sector - tmp_dev->rdev->sectors; sectors 298 drivers/md/md-linear.c (unsigned long long)tmp_dev->rdev->sectors, sectors 357 drivers/md/md-multipath.c static sector_t multipath_size(struct mddev *mddev, sector_t sectors, int raid_disks) sectors 359 drivers/md/md-multipath.c WARN_ONCE(sectors || raid_disks, sectors 377 drivers/md/md.c unsigned int sectors; sectors 401 drivers/md/md.c sectors = bio_sectors(bio); sectors 409 drivers/md/md.c part_stat_add(&mddev->gendisk->part0, sectors[sgrp], sectors); sectors 837 drivers/md/md.c rdev->sectors = 0; sectors 1193 drivers/md/md.c rdev->sectors = rdev->sb_start; sectors 1198 drivers/md/md.c if ((u64)rdev->sectors >= (2ULL << 32) && sb->level >= 1) sectors 1199 drivers/md/md.c rdev->sectors = (sector_t)(2ULL << 32) - 2; sectors 1201 drivers/md/md.c if (rdev->sectors < ((sector_t)sb->size) * 2 && sb->level >= 1) sectors 1545 drivers/md/md.c sector_t sectors; sectors 1645 drivers/md/md.c int sectors = le16_to_cpu(sb->bblog_size); sectors 1646 drivers/md/md.c if (sectors > (PAGE_SIZE / 512)) sectors 1652 drivers/md/md.c if (!sync_page_io(rdev, bb_sector, sectors << 9, sectors 1657 drivers/md/md.c for (i = 0 ; i < (sectors << (9-3)) ; i++, bbp++) { sectors 1717 drivers/md/md.c sectors = (i_size_read(rdev->bdev->bd_inode) >> 9); sectors 1718 drivers/md/md.c sectors -= rdev->data_offset; sectors 1720 drivers/md/md.c sectors = rdev->sb_start; sectors 1721 drivers/md/md.c if (sectors < le64_to_cpu(sb->data_size)) sectors 1723 drivers/md/md.c rdev->sectors = le64_to_cpu(sb->data_size); sectors 1940 drivers/md/md.c sb->data_size = cpu_to_le64(rdev->sectors); sectors 2091 drivers/md/md.c max_sectors = rdev->sectors + sb_start - rdev->sb_start; sectors 2295 drivers/md/md.c rdev->sectors && sectors 2296 drivers/md/md.c (mddev->dev_sectors == 0 || rdev->sectors < mddev->dev_sectors)) { sectors 2305 drivers/md/md.c mddev->dev_sectors = rdev->sectors; sectors 3142 drivers/md/md.c if (rdev->sectors && rdev->mddev->external) sectors 3178 drivers/md/md.c + mddev->dev_sectors > rdev->sectors) sectors 3215 drivers/md/md.c return sprintf(page, "%llu\n", (unsigned long long)rdev->sectors / 2); sectors 3228 drivers/md/md.c static int strict_blocks_to_sectors(const char *buf, sector_t *sectors) sectors 3243 drivers/md/md.c *sectors = new; sectors 3251 drivers/md/md.c sector_t oldsectors = rdev->sectors; sectors 3252 drivers/md/md.c sector_t sectors; sectors 3256 drivers/md/md.c if (strict_blocks_to_sectors(buf, §ors) < 0) sectors 3262 drivers/md/md.c sectors = super_types[my_mddev->major_version]. sectors 3263 drivers/md/md.c rdev_size_change(rdev, sectors); sectors 3264 drivers/md/md.c if (!sectors) sectors 3266 drivers/md/md.c } else if (!sectors) sectors 3267 drivers/md/md.c sectors = (i_size_read(rdev->bdev->bd_inode) >> 9) - sectors 3273 drivers/md/md.c if (sectors < my_mddev->dev_sectors) sectors 3276 drivers/md/md.c rdev->sectors = sectors; sectors 3277 drivers/md/md.c if (sectors > oldsectors && my_mddev->external) { sectors 3295 drivers/md/md.c overlaps(rdev->data_offset, rdev->sectors, sectors 3297 drivers/md/md.c rdev2->sectors)) { sectors 3314 drivers/md/md.c rdev->sectors = oldsectors; sectors 4540 drivers/md/md.c sector_t sectors; sectors 4541 drivers/md/md.c int err = strict_blocks_to_sectors(buf, §ors); sectors 4549 drivers/md/md.c err = update_size(mddev, sectors); sectors 4554 drivers/md/md.c mddev->dev_sectors > sectors) sectors 4555 drivers/md/md.c mddev->dev_sectors = sectors; sectors 5162 drivers/md/md.c sector_t sectors; sectors 5177 drivers/md/md.c sectors = mddev->pers->size(mddev, 0, 0); sectors 5179 drivers/md/md.c sectors = mddev->array_sectors; sectors 5183 drivers/md/md.c if (strict_blocks_to_sectors(buf, §ors) < 0) sectors 5185 drivers/md/md.c else if (mddev->pers && mddev->pers->size(mddev, 0, 0) < sectors) sectors 5192 drivers/md/md.c mddev->array_sectors = sectors; sectors 6689 drivers/md/md.c rdev->sectors = rdev->sb_start; sectors 6773 drivers/md/md.c rdev->sectors = rdev->sb_start; sectors 7021 drivers/md/md.c sector_t avail = rdev->sectors; sectors 7245 drivers/md/md.c geo->sectors = 4; sectors 8004 drivers/md/md.c sector_t sectors; sectors 8036 drivers/md/md.c sectors = 0; sectors 8054 drivers/md/md.c sectors += rdev->sectors; sectors 8065 drivers/md/md.c (unsigned long long)sectors / 2); sectors 8227 drivers/md/md.c curr_events = (int)part_stat_read_accum(&disk->part0, sectors) - sectors 8613 drivers/md/md.c sector_t sectors; sectors 8654 drivers/md/md.c sectors = mddev->pers->sync_request(mddev, j, &skipped); sectors 8655 drivers/md/md.c if (sectors == 0) { sectors 8661 drivers/md/md.c io_sectors += sectors; sectors 8662 drivers/md/md.c atomic_add(sectors, &mddev->recovery_active); sectors 8668 drivers/md/md.c j += sectors; sectors 9223 drivers/md/md.c rdev->sectors += rdev->data_offset - rdev->new_data_offset; sectors 9225 drivers/md/md.c rdev->sectors -= rdev->new_data_offset - rdev->data_offset; sectors 9234 drivers/md/md.c int rdev_set_badblocks(struct md_rdev *rdev, sector_t s, int sectors, sectors 9243 drivers/md/md.c rv = badblocks_set(&rdev->badblocks, s, sectors, 0); sectors 9259 drivers/md/md.c int rdev_clear_badblocks(struct md_rdev *rdev, sector_t s, int sectors, sectors 9267 drivers/md/md.c rv = badblocks_clear(&rdev->badblocks, s, sectors); sectors 41 drivers/md/md.h sector_t sectors; /* Device size (in 512bytes sectors) */ sectors 210 drivers/md/md.h static inline int is_badblock(struct md_rdev *rdev, sector_t s, int sectors, sectors 215 drivers/md/md.h sectors, sectors 223 drivers/md/md.h extern int rdev_set_badblocks(struct md_rdev *rdev, sector_t s, int sectors, sectors 225 drivers/md/md.h extern int rdev_clear_badblocks(struct md_rdev *rdev, sector_t s, int sectors, sectors 571 drivers/md/md.h int (*resize) (struct mddev *mddev, sector_t sectors); sectors 572 drivers/md/md.h sector_t (*size) (struct mddev *mddev, sector_t sectors, int raid_disks); sectors 83 drivers/md/raid0.c sector_t curr_zone_end, sectors; sectors 102 drivers/md/raid0.c sectors = rdev1->sectors; sectors 103 drivers/md/raid0.c sector_div(sectors, mddev->chunk_sectors); sectors 104 drivers/md/raid0.c rdev1->sectors = sectors * mddev->chunk_sectors; sectors 114 drivers/md/raid0.c (unsigned long long)rdev1->sectors, sectors 116 drivers/md/raid0.c (unsigned long long)rdev2->sectors); sectors 122 drivers/md/raid0.c if (rdev2->sectors == rdev1->sectors) { sectors 228 drivers/md/raid0.c if (!smallest || (rdev1->sectors < smallest->sectors)) sectors 238 drivers/md/raid0.c zone->zone_end = smallest->sectors * cnt; sectors 251 drivers/md/raid0.c zone->dev_start = smallest->sectors; sectors 257 drivers/md/raid0.c if (rdev->sectors <= zone->dev_start) { sectors 269 drivers/md/raid0.c if (!smallest || rdev->sectors < smallest->sectors) { sectors 273 drivers/md/raid0.c (unsigned long long)rdev->sectors); sectors 278 drivers/md/raid0.c sectors = (smallest->sectors - zone->dev_start) * c; sectors 281 drivers/md/raid0.c zone->nb_dev, (unsigned long long)sectors); sectors 283 drivers/md/raid0.c curr_zone_end += sectors; sectors 288 drivers/md/raid0.c (unsigned long long)smallest->sectors); sectors 359 drivers/md/raid0.c static sector_t raid0_size(struct mddev *mddev, sector_t sectors, int raid_disks) sectors 364 drivers/md/raid0.c WARN_ONCE(sectors || raid_disks, sectors 368 drivers/md/raid0.c array_sectors += (rdev->sectors & sectors 576 drivers/md/raid0.c unsigned sectors; sectors 591 drivers/md/raid0.c sectors = chunk_sects - sectors 599 drivers/md/raid0.c if (sectors < bio_sectors(bio)) { sectors 600 drivers/md/raid0.c struct bio *split = bio_split(bio, sectors, GFP_NOIO, sectors 665 drivers/md/raid0.c rdev->sectors = mddev->dev_sectors; sectors 319 drivers/md/raid1.c r1_bio->sector + (r1_bio->sectors); sectors 402 drivers/md/raid1.c r1_bio->sectors, sectors 494 drivers/md/raid1.c if (is_badblock(rdev, r1_bio->sector, r1_bio->sectors, sectors 504 drivers/md/raid1.c sector_t hi = r1_bio->sector + r1_bio->sectors; sectors 545 drivers/md/raid1.c sector_t sectors) sectors 549 drivers/md/raid1.c WARN_ON(sectors == 0); sectors 557 drivers/md/raid1.c if (len > sectors) sectors 558 drivers/md/raid1.c len = sectors; sectors 580 drivers/md/raid1.c int sectors; sectors 598 drivers/md/raid1.c sectors = r1_bio->sectors; sectors 609 drivers/md/raid1.c if ((conf->mddev->recovery_cp < this_sector + sectors) || sectors 612 drivers/md/raid1.c this_sector + sectors))) sectors 630 drivers/md/raid1.c rdev->recovery_offset < this_sector + sectors) sectors 636 drivers/md/raid1.c if (is_badblock(rdev, this_sector, sectors, sectors 643 drivers/md/raid1.c best_good_sectors = sectors; sectors 652 drivers/md/raid1.c if (is_badblock(rdev, this_sector, sectors, sectors 663 drivers/md/raid1.c if (choose_first && sectors > bad_sectors) sectors 664 drivers/md/raid1.c sectors = bad_sectors; sectors 665 drivers/md/raid1.c if (best_good_sectors > sectors) sectors 666 drivers/md/raid1.c best_good_sectors = sectors; sectors 679 drivers/md/raid1.c if ((sectors > best_good_sectors) && (best_disk >= 0)) sectors 681 drivers/md/raid1.c best_good_sectors = sectors; sectors 759 drivers/md/raid1.c sectors = best_good_sectors; sectors 764 drivers/md/raid1.c conf->mirrors[best_disk].next_seq_sect = this_sector + sectors; sectors 767 drivers/md/raid1.c *max_sectors = sectors; sectors 1193 drivers/md/raid1.c r1_bio->sectors = bio_sectors(bio); sectors 1255 drivers/md/raid1.c r1_bio->sectors = max_read_sectors; sectors 1300 drivers/md/raid1.c r1_bio->sectors = max_sectors; sectors 1365 drivers/md/raid1.c r1_bio->sectors = max_write_sectors; sectors 1388 drivers/md/raid1.c max_sectors = r1_bio->sectors; sectors 1471 drivers/md/raid1.c r1_bio->sectors = max_sectors; sectors 1496 drivers/md/raid1.c md_bitmap_startwrite(bitmap, r1_bio->sector, r1_bio->sectors, sectors 1512 drivers/md/raid1.c sector_t hi = r1_bio->sector + r1_bio->sectors; sectors 1568 drivers/md/raid1.c sector_t sectors; sectors 1581 drivers/md/raid1.c sectors = align_to_barrier_unit_end( sectors 1585 drivers/md/raid1.c raid1_read_request(mddev, bio, sectors, NULL); sectors 1589 drivers/md/raid1.c raid1_write_request(mddev, bio, sectors); sectors 1898 drivers/md/raid1.c long sectors_to_go = r1_bio->sectors; sectors 1912 drivers/md/raid1.c int s = r1_bio->sectors; sectors 1941 drivers/md/raid1.c } else if (is_badblock(rdev, r1_bio->sector, r1_bio->sectors, sectors 1945 drivers/md/raid1.c r1_bio->sectors, sectors 1954 drivers/md/raid1.c int sectors, struct page *page, int rw) sectors 1956 drivers/md/raid1.c if (sync_page_io(rdev, sector, sectors << 9, page, rw, 0, false)) sectors 1967 drivers/md/raid1.c if (!rdev_set_badblocks(rdev, sector, sectors, 0)) sectors 1990 drivers/md/raid1.c int sectors = r1_bio->sectors; sectors 2006 drivers/md/raid1.c while(sectors) { sectors 2007 drivers/md/raid1.c int s = sectors; sectors 2055 drivers/md/raid1.c md_done_sync(mddev, r1_bio->sectors, 0); sectors 2060 drivers/md/raid1.c sectors -= s; sectors 2095 drivers/md/raid1.c sectors -= s; sectors 2120 drivers/md/raid1.c vcnt = (r1_bio->sectors + PAGE_SIZE / 512 - 1) >> (PAGE_SHIFT - 9); sectors 2139 drivers/md/raid1.c md_bio_reset_resync_pages(b, rp, r1_bio->sectors << 9); sectors 2178 drivers/md/raid1.c atomic64_add(r1_bio->sectors, &mddev->resync_mismatches); sectors 2245 drivers/md/raid1.c sector_t sect, int sectors) sectors 2248 drivers/md/raid1.c while(sectors) { sectors 2249 drivers/md/raid1.c int s = sectors; sectors 2335 drivers/md/raid1.c sectors -= s; sectors 2359 drivers/md/raid1.c int sectors; sectors 2360 drivers/md/raid1.c int sect_to_write = r1_bio->sectors; sectors 2369 drivers/md/raid1.c sectors = ((sector + block_sectors) sectors 2375 drivers/md/raid1.c if (sectors > sect_to_write) sectors 2376 drivers/md/raid1.c sectors = sect_to_write; sectors 2390 drivers/md/raid1.c wbio->bi_iter.bi_size = r1_bio->sectors << 9; sectors 2392 drivers/md/raid1.c bio_trim(wbio, sector - r1_bio->sector, sectors); sectors 2399 drivers/md/raid1.c sectors, 0) sectors 2403 drivers/md/raid1.c sect_to_write -= sectors; sectors 2404 drivers/md/raid1.c sector += sectors; sectors 2405 drivers/md/raid1.c sectors = block_sectors; sectors 2413 drivers/md/raid1.c int s = r1_bio->sectors; sectors 2443 drivers/md/raid1.c r1_bio->sectors, 0); sectors 2504 drivers/md/raid1.c r1_bio->sector, r1_bio->sectors); sectors 2518 drivers/md/raid1.c raid1_read_request(mddev, bio, r1_bio->sectors, r1_bio); sectors 2892 drivers/md/raid1.c r1_bio->sectors = nr_sectors; sectors 2930 drivers/md/raid1.c static sector_t raid1_size(struct mddev *mddev, sector_t sectors, int raid_disks) sectors 2932 drivers/md/raid1.c if (sectors) sectors 2933 drivers/md/raid1.c return sectors; sectors 3196 drivers/md/raid1.c static int raid1_resize(struct mddev *mddev, sector_t sectors) sectors 3205 drivers/md/raid1.c sector_t newsize = raid1_size(mddev, sectors, 0); sectors 3215 drivers/md/raid1.c if (sectors > mddev->dev_sectors && sectors 3220 drivers/md/raid1.c mddev->dev_sectors = sectors; sectors 3221 drivers/md/raid1.c mddev->resync_max_sectors = sectors; sectors 159 drivers/md/raid1.h int sectors; sectors 318 drivers/md/raid10.c r10_bio->devs[slot].addr + (r10_bio->sectors); sectors 406 drivers/md/raid10.c r10_bio->sectors, sectors 512 drivers/md/raid10.c r10_bio->sectors, sectors 709 drivers/md/raid10.c int sectors = r10_bio->sectors; sectors 736 drivers/md/raid10.c && (this_sector + sectors >= conf->next_resync)) || sectors 739 drivers/md/raid10.c this_sector + sectors))) sectors 754 drivers/md/raid10.c r10_bio->devs[slot].addr + sectors > rdev->recovery_offset) sectors 760 drivers/md/raid10.c r10_bio->devs[slot].addr + sectors > rdev->recovery_offset) sectors 764 drivers/md/raid10.c if (is_badblock(rdev, dev_sector, sectors, sectors 775 drivers/md/raid10.c if (!do_balance && sectors > bad_sectors) sectors 776 drivers/md/raid10.c sectors = bad_sectors; sectors 777 drivers/md/raid10.c if (best_good_sectors > sectors) sectors 778 drivers/md/raid10.c best_good_sectors = sectors; sectors 793 drivers/md/raid10.c best_good_sectors = sectors; sectors 1118 drivers/md/raid10.c struct bio *bio, sector_t sectors) sectors 1123 drivers/md/raid10.c bio->bi_iter.bi_sector + sectors > conf->reshape_progress) { sectors 1129 drivers/md/raid10.c sectors); sectors 1176 drivers/md/raid10.c regular_request_wait(mddev, conf, bio, r10_bio->sectors); sectors 1201 drivers/md/raid10.c r10_bio->sectors = max_sectors; sectors 1302 drivers/md/raid10.c sector_t sectors; sectors 1321 drivers/md/raid10.c sectors = r10_bio->sectors; sectors 1322 drivers/md/raid10.c regular_request_wait(mddev, conf, bio, sectors); sectors 1326 drivers/md/raid10.c bio->bi_iter.bi_sector + sectors > conf->reshape_progress) sectors 1327 drivers/md/raid10.c : (bio->bi_iter.bi_sector + sectors > conf->reshape_safe && sectors 1362 drivers/md/raid10.c max_sectors = r10_bio->sectors; sectors 1474 drivers/md/raid10.c if (max_sectors < r10_bio->sectors) sectors 1475 drivers/md/raid10.c r10_bio->sectors = max_sectors; sectors 1477 drivers/md/raid10.c if (r10_bio->sectors < bio_sectors(bio)) { sectors 1478 drivers/md/raid10.c struct bio *split = bio_split(bio, r10_bio->sectors, sectors 1489 drivers/md/raid10.c md_bitmap_startwrite(mddev->bitmap, r10_bio->sector, r10_bio->sectors, 0); sectors 1500 drivers/md/raid10.c static void __make_request(struct mddev *mddev, struct bio *bio, int sectors) sectors 1508 drivers/md/raid10.c r10_bio->sectors = sectors; sectors 1526 drivers/md/raid10.c int sectors = bio_sectors(bio); sectors 1540 drivers/md/raid10.c sectors > chunk_sects sectors 1544 drivers/md/raid10.c sectors = chunk_sects - sectors 1547 drivers/md/raid10.c __make_request(mddev, bio, sectors); sectors 1893 drivers/md/raid10.c atomic_add(r10_bio->sectors, sectors 1933 drivers/md/raid10.c sector_t s = r10_bio->sectors; sectors 1983 drivers/md/raid10.c r10_bio->sectors, sectors 2028 drivers/md/raid10.c fbio->bi_iter.bi_size = r10_bio->sectors << 9; sectors 2032 drivers/md/raid10.c vcnt = (r10_bio->sectors + (PAGE_SIZE >> 9) - 1) >> (PAGE_SHIFT - 9); sectors 2054 drivers/md/raid10.c int sectors = r10_bio->sectors; sectors 2057 drivers/md/raid10.c if (sectors < (len / 512)) sectors 2058 drivers/md/raid10.c len = sectors * 512; sectors 2063 drivers/md/raid10.c sectors -= len/512; sectors 2067 drivers/md/raid10.c atomic64_add(r10_bio->sectors, &mddev->resync_mismatches); sectors 2126 drivers/md/raid10.c md_done_sync(mddev, r10_bio->sectors, 1); sectors 2154 drivers/md/raid10.c int sectors = r10_bio->sectors; sectors 2160 drivers/md/raid10.c while (sectors) { sectors 2161 drivers/md/raid10.c int s = sectors; sectors 2218 drivers/md/raid10.c sectors -= s; sectors 2299 drivers/md/raid10.c int sectors, struct page *page, int rw) sectors 2304 drivers/md/raid10.c if (is_badblock(rdev, sector, sectors, &first_bad, &bad_sectors) sectors 2307 drivers/md/raid10.c if (sync_page_io(rdev, sector, sectors << 9, page, rw, 0, false)) sectors 2317 drivers/md/raid10.c if (!rdev_set_badblocks(rdev, sector, sectors, 0)) sectors 2333 drivers/md/raid10.c int sectors = r10_bio->sectors; sectors 2364 drivers/md/raid10.c while(sectors) { sectors 2365 drivers/md/raid10.c int s = sectors; sectors 2510 drivers/md/raid10.c sectors -= s; sectors 2534 drivers/md/raid10.c int sectors; sectors 2535 drivers/md/raid10.c int sect_to_write = r10_bio->sectors; sectors 2544 drivers/md/raid10.c sectors = ((r10_bio->sector + block_sectors) sectors 2551 drivers/md/raid10.c if (sectors > sect_to_write) sectors 2552 drivers/md/raid10.c sectors = sect_to_write; sectors 2555 drivers/md/raid10.c bio_trim(wbio, sector - bio->bi_iter.bi_sector, sectors); sectors 2565 drivers/md/raid10.c sectors, 0) sectors 2569 drivers/md/raid10.c sect_to_write -= sectors; sectors 2570 drivers/md/raid10.c sector += sectors; sectors 2571 drivers/md/raid10.c sectors = block_sectors; sectors 2633 drivers/md/raid10.c r10_bio->sectors, 0); sectors 2638 drivers/md/raid10.c r10_bio->sectors, 0)) sectors 2650 drivers/md/raid10.c r10_bio->sectors, 0); sectors 2655 drivers/md/raid10.c r10_bio->sectors, 0)) sectors 2670 drivers/md/raid10.c r10_bio->sectors, 0); sectors 2687 drivers/md/raid10.c r10_bio->sectors, 0); sectors 3332 drivers/md/raid10.c r10_bio->sectors = (sector_nr | chunk_mask) - sector_nr + 1; sectors 3440 drivers/md/raid10.c r10_bio->sectors = nr_sectors; sectors 3494 drivers/md/raid10.c r10_bio->sectors = nr_sectors; sectors 3525 drivers/md/raid10.c raid10_size(struct mddev *mddev, sector_t sectors, int raid_disks) sectors 3533 drivers/md/raid10.c if (!sectors) sectors 3534 drivers/md/raid10.c sectors = conf->dev_sectors; sectors 3536 drivers/md/raid10.c size = sectors >> conf->geo.chunk_shift; sectors 3965 drivers/md/raid10.c static int raid10_resize(struct mddev *mddev, sector_t sectors) sectors 3989 drivers/md/raid10.c size = raid10_size(mddev, sectors, 0); sectors 3999 drivers/md/raid10.c if (sectors > mddev->dev_sectors && sectors 4004 drivers/md/raid10.c calc_sectors(conf, sectors); sectors 4038 drivers/md/raid10.c rdev->sectors = size; sectors 4546 drivers/md/raid10.c r10_bio->sectors = last - sector_nr + 1; sectors 4651 drivers/md/raid10.c r10_bio->sectors = nr_sectors; sectors 4654 drivers/md/raid10.c md_sync_acct_bio(read_bio, r10_bio->sectors); sectors 4691 drivers/md/raid10.c md_done_sync(mddev, r10_bio->sectors, 0); sectors 4717 drivers/md/raid10.c md_sync_acct_bio(b, r10_bio->sectors); sectors 4768 drivers/md/raid10.c int sectors = r10_bio->sectors; sectors 4787 drivers/md/raid10.c while (sectors) { sectors 4788 drivers/md/raid10.c int s = sectors; sectors 4832 drivers/md/raid10.c sectors -= s; sectors 4870 drivers/md/raid10.c md_done_sync(r10_bio->mddev, r10_bio->sectors, 1); sectors 125 drivers/md/raid10.h int sectors; sectors 2969 drivers/md/raid5-cache.c if (cp >= rdev->sectors || round_down(cp, BLOCK_SECTORS) != cp) sectors 3011 drivers/md/raid5-cache.c log->device_size = round_down(rdev->sectors, BLOCK_SECTORS); sectors 1290 drivers/md/raid5-ppl.c rdev->data_offset + rdev->sectors > rdev->ppl.sector)) { sectors 5299 drivers/md/raid5.c unsigned sectors = chunk_sects - (sector & (chunk_sects-1)); sectors 5301 drivers/md/raid5.c if (sectors < bio_sectors(raid_bio)) { sectors 5303 drivers/md/raid5.c split = bio_split(raid_bio, sectors, GFP_NOIO, &conf->bio_split); sectors 5751 drivers/md/raid5.c static sector_t raid5_size(struct mddev *mddev, sector_t sectors, int raid_disks); sectors 6732 drivers/md/raid5.c raid5_size(struct mddev *mddev, sector_t sectors, int raid_disks) sectors 6736 drivers/md/raid5.c if (!sectors) sectors 6737 drivers/md/raid5.c sectors = mddev->dev_sectors; sectors 6742 drivers/md/raid5.c sectors &= ~((sector_t)conf->chunk_sectors - 1); sectors 6743 drivers/md/raid5.c sectors &= ~((sector_t)conf->prev_chunk_sectors - 1); sectors 6744 drivers/md/raid5.c return sectors * (raid_disks - conf->max_degraded); sectors 7759 drivers/md/raid5.c static int raid5_resize(struct mddev *mddev, sector_t sectors) sectors 7773 drivers/md/raid5.c sectors &= ~((sector_t)conf->chunk_sectors - 1); sectors 7774 drivers/md/raid5.c newsize = raid5_size(mddev, sectors, mddev->raid_disks); sectors 7779 drivers/md/raid5.c int ret = md_bitmap_resize(mddev->bitmap, sectors, 0, 0); sectors 7784 drivers/md/raid5.c if (sectors > mddev->dev_sectors && sectors 7789 drivers/md/raid5.c mddev->dev_sectors = sectors; sectors 7790 drivers/md/raid5.c mddev->resync_max_sectors = sectors; sectors 8104 drivers/md/raid5.c sector_t sectors; sectors 8113 drivers/md/raid5.c sectors = raid0_conf->strip_zone[0].zone_end; sectors 8114 drivers/md/raid5.c sector_div(sectors, raid0_conf->strip_zone[0].nb_dev); sectors 8115 drivers/md/raid5.c mddev->dev_sectors = sectors; sectors 1749 drivers/memstick/core/ms_block.c msb->geometry.sectors = chs_table[i].sec; sectors 236 drivers/memstick/core/mspro_block.c geo->sectors = msb->sectors_per_track; sectors 2089 drivers/message/fusion/mptscsih.c int sectors; sectors 2094 drivers/message/fusion/mptscsih.c sectors = 32; sectors 2096 drivers/message/fusion/mptscsih.c dummy = heads * sectors; sectors 2106 drivers/message/fusion/mptscsih.c sectors = 63; sectors 2107 drivers/message/fusion/mptscsih.c dummy = heads * sectors; sectors 2114 drivers/message/fusion/mptscsih.c geom[1] = sectors; sectors 339 drivers/mmc/core/block.c geo->sectors = 16; sectors 2379 drivers/mmc/core/block.c size = card->ext_csd.sectors; sectors 401 drivers/mmc/core/mmc.c card->ext_csd.sectors = sectors 408 drivers/mmc/core/mmc.c if (card->ext_csd.sectors > (2u * 1024 * 1024 * 1024) / 512) sectors 99 drivers/mmc/core/mmc_test.c unsigned int sectors; sectors 533 drivers/mmc/core/mmc_test.c unsigned int count, unsigned int sectors, struct timespec64 ts, sectors 546 drivers/mmc/core/mmc_test.c tr->sectors = sectors; sectors 560 drivers/mmc/core/mmc_test.c unsigned int rate, iops, sectors = bytes >> 9; sectors 570 drivers/mmc/core/mmc_test.c mmc_hostname(test->card->host), sectors, sectors >> 1, sectors 571 drivers/mmc/core/mmc_test.c (sectors & 1 ? ".5" : ""), (u64)ts.tv_sec, sectors 575 drivers/mmc/core/mmc_test.c mmc_test_save_transfer_result(test, 1, sectors, ts, rate, iops); sectors 585 drivers/mmc/core/mmc_test.c unsigned int rate, iops, sectors = bytes >> 9; sectors 597 drivers/mmc/core/mmc_test.c mmc_hostname(test->card->host), count, sectors, count, sectors 598 drivers/mmc/core/mmc_test.c sectors >> 1, (sectors & 1 ? ".5" : ""), sectors 603 drivers/mmc/core/mmc_test.c mmc_test_save_transfer_result(test, count, sectors, ts, rate, iops); sectors 612 drivers/mmc/core/mmc_test.c return card->ext_csd.sectors; sectors 967 drivers/mmc/core/mmc_test.c int sectors; sectors 973 drivers/mmc/core/mmc_test.c sectors = (blocks * blksz + 511) / 512; sectors 974 drivers/mmc/core/mmc_test.c if ((sectors * 512) == (blocks * blksz)) sectors 975 drivers/mmc/core/mmc_test.c sectors++; sectors 977 drivers/mmc/core/mmc_test.c if ((sectors * 512) > BUFFER_SIZE) sectors 980 drivers/mmc/core/mmc_test.c memset(test->buffer, 0, sectors * 512); sectors 982 drivers/mmc/core/mmc_test.c for (i = 0; i < sectors; i++) { sectors 995 drivers/mmc/core/mmc_test.c for (; i < sectors * 512; i++) { sectors 3056 drivers/mmc/core/mmc_test.c tr->count, tr->sectors, sectors 2813 drivers/mtd/chips/cfi_cmdset_0002.c int sectors; sectors 2838 drivers/mtd/chips/cfi_cmdset_0002.c sectors = 0; sectors 2851 drivers/mtd/chips/cfi_cmdset_0002.c sect[sectors].chip = &cfi->chips[chipnum]; sectors 2852 drivers/mtd/chips/cfi_cmdset_0002.c sect[sectors].adr = adr; sectors 2853 drivers/mtd/chips/cfi_cmdset_0002.c sect[sectors].locked = do_ppb_xxlock( sectors 2875 drivers/mtd/chips/cfi_cmdset_0002.c sectors++; sectors 2876 drivers/mtd/chips/cfi_cmdset_0002.c if (sectors >= max_sectors) { sectors 2896 drivers/mtd/chips/cfi_cmdset_0002.c for (i = 0; i < sectors; i++) { sectors 951 drivers/mtd/ftl.c geo->sectors = 8; sectors 78 drivers/mtd/inftlcore.c inftl->sectors = inftl->mbd.size / temp; sectors 80 drivers/mtd/inftlcore.c inftl->sectors++; sectors 81 drivers/mtd/inftlcore.c temp = inftl->cylinders * inftl->sectors; sectors 86 drivers/mtd/inftlcore.c temp = inftl->heads * inftl->sectors; sectors 91 drivers/mtd/inftlcore.c if (inftl->mbd.size != inftl->heads * inftl->cylinders * inftl->sectors) { sectors 100 drivers/mtd/inftlcore.c inftl->cylinders, inftl->heads , inftl->sectors, sectors 102 drivers/mtd/inftlcore.c (long)inftl->sectors ); sectors 921 drivers/mtd/inftlcore.c geo->sectors = inftl->sectors; sectors 499 drivers/mtd/inftlmount.c s->EraseSize, s->heads, s->sectors, s->cylinders, sectors 278 drivers/mtd/nand/onenand/onenand_base.c static int onenand_buffer_address(int dataram1, int sectors, int count) sectors 283 drivers/mtd/nand/onenand/onenand_base.c bsa = sectors & ONENAND_BSA_MASK; sectors 481 drivers/mtd/nand/onenand/onenand_base.c int sectors = 0, count = 0; sectors 503 drivers/mtd/nand/onenand/onenand_base.c value = onenand_page_address(page, sectors); sectors 507 drivers/mtd/nand/onenand/onenand_base.c value = onenand_buffer_address(dataram, sectors, count); sectors 2734 drivers/mtd/nand/onenand/onenand_base.c int sectors = 4, count = 4; sectors 2746 drivers/mtd/nand/onenand/onenand_base.c value = onenand_page_address(page, sectors); sectors 2751 drivers/mtd/nand/onenand/onenand_base.c value = onenand_buffer_address(dataram, sectors, count); sectors 987 drivers/mtd/nand/raw/brcmnand/brcmnand.c int sectors = cfg->page_size / (512 << cfg->sector_size_1k); sectors 989 drivers/mtd/nand/raw/brcmnand/brcmnand.c if (section >= sectors) sectors 1005 drivers/mtd/nand/raw/brcmnand/brcmnand.c int sectors = cfg->page_size / (512 << cfg->sector_size_1k); sectors 1007 drivers/mtd/nand/raw/brcmnand/brcmnand.c if (section >= sectors * 2) sectors 1045 drivers/mtd/nand/raw/brcmnand/brcmnand.c int sectors = cfg->page_size / (512 << cfg->sector_size_1k); sectors 1047 drivers/mtd/nand/raw/brcmnand/brcmnand.c if (section >= sectors) sectors 1063 drivers/mtd/nand/raw/brcmnand/brcmnand.c int sectors = cfg->page_size / (512 << cfg->sector_size_1k); sectors 1065 drivers/mtd/nand/raw/brcmnand/brcmnand.c if (section >= sectors) sectors 1122 drivers/mtd/nand/raw/brcmnand/brcmnand.c int sectors = p->page_size / (512 << p->sector_size_1k); sectors 1128 drivers/mtd/nand/raw/brcmnand/brcmnand.c ecc->bytes = 3 * sectors; sectors 62 drivers/mtd/nand/raw/mtk_ecc.c u32 sectors; sectors 142 drivers/mtd/nand/raw/mtk_ecc.c if (dec & ecc->sectors) { sectors 148 drivers/mtd/nand/raw/mtk_ecc.c ecc->sectors = 0; sectors 205 drivers/mtd/nand/raw/mtk_ecc.c if (config->sectors) sectors 206 drivers/mtd/nand/raw/mtk_ecc.c ecc->sectors = 1 << (config->sectors - 1); sectors 213 drivers/mtd/nand/raw/mtk_ecc.c int sectors) sectors 221 drivers/mtd/nand/raw/mtk_ecc.c for (i = 0; i < sectors; i++) { sectors 32 drivers/mtd/nand/raw/mtk_ecc.h u32 sectors; sectors 700 drivers/mtd/nand/raw/mtk_nand.c u32 sectors) sectors 709 drivers/mtd/nand/raw/mtk_nand.c for (i = 0; i < sectors; i++) { sectors 885 drivers/mtd/nand/raw/mtk_nand.c u32 sectors) sectors 896 drivers/mtd/nand/raw/mtk_nand.c memset(buf, 0xff, sectors * chip->ecc.size); sectors 897 drivers/mtd/nand/raw/mtk_nand.c for (i = 0; i < sectors; i++) sectors 902 drivers/mtd/nand/raw/mtk_nand.c mtk_ecc_get_stats(nfc->ecc, &stats, sectors); sectors 916 drivers/mtd/nand/raw/mtk_nand.c u32 column, sectors, start, end, reg; sectors 926 drivers/mtd/nand/raw/mtk_nand.c sectors = end - start; sectors 929 drivers/mtd/nand/raw/mtk_nand.c len = sectors * chip->ecc.size + (raw ? sectors * spare : 0); sectors 949 drivers/mtd/nand/raw/mtk_nand.c nfc->ecc_cfg.sectors = sectors; sectors 966 drivers/mtd/nand/raw/mtk_nand.c nfi_writel(nfc, sectors << CON_SEC_SHIFT, NFI_CON); sectors 980 drivers/mtd/nand/raw/mtk_nand.c ADDRCNTR_SEC(reg) >= sectors, 10, sectors 988 drivers/mtd/nand/raw/mtk_nand.c mtk_nfc_update_ecc_stats(mtd, buf, start, sectors); sectors 989 drivers/mtd/nand/raw/mtk_nand.c mtk_nfc_read_fdm(chip, start, sectors); sectors 71 drivers/mtd/nftlcore.c nftl->sectors = nftl->mbd.size / temp; sectors 73 drivers/mtd/nftlcore.c nftl->sectors++; sectors 74 drivers/mtd/nftlcore.c temp = nftl->cylinders * nftl->sectors; sectors 79 drivers/mtd/nftlcore.c temp = nftl->heads * nftl->sectors; sectors 84 drivers/mtd/nftlcore.c if (nftl->mbd.size != nftl->heads * nftl->cylinders * nftl->sectors) { sectors 93 drivers/mtd/nftlcore.c nftl->cylinders, nftl->heads , nftl->sectors, sectors 95 drivers/mtd/nftlcore.c (long)nftl->sectors ); sectors 772 drivers/mtd/nftlcore.c geo->sectors = nftl->sectors; sectors 716 drivers/mtd/rfd_ftl.c geo->sectors = SECTORS_PER_TRACK; sectors 639 drivers/mtd/sm_ftl.c ftl->sectors = chs_table[i].sec; sectors 647 drivers/mtd/sm_ftl.c ftl->sectors = 63; sectors 1112 drivers/mtd/sm_ftl.c geo->sectors = ftl->sectors; sectors 55 drivers/mtd/sm_ftl.h int sectors; sectors 23 drivers/mtd/ssfdc.c unsigned char sectors; sectors 318 drivers/mtd/ssfdc.c ssfdc->sectors = 32; sectors 319 drivers/mtd/ssfdc.c get_chs(mtd->size, NULL, &ssfdc->heads, &ssfdc->sectors); sectors 321 drivers/mtd/ssfdc.c ((long)ssfdc->sectors * (long)ssfdc->heads)); sectors 324 drivers/mtd/ssfdc.c ssfdc->cylinders, ssfdc->heads , ssfdc->sectors, sectors 326 drivers/mtd/ssfdc.c (long)ssfdc->sectors); sectors 329 drivers/mtd/ssfdc.c (long)ssfdc->sectors; sectors 411 drivers/mtd/ssfdc.c ssfdc->cylinders, ssfdc->heads, ssfdc->sectors); sectors 414 drivers/mtd/ssfdc.c geo->sectors = ssfdc->sectors; sectors 280 drivers/mtd/ubi/block.c geo->sectors = get_capacity(bdev->bd_disk); sectors 1748 drivers/net/ethernet/intel/i40e/i40e_ethtool.c u16 i, sectors; sectors 1796 drivers/net/ethernet/intel/i40e/i40e_ethtool.c sectors = eeprom->len / I40E_NVM_SECTOR_SIZE; sectors 1797 drivers/net/ethernet/intel/i40e/i40e_ethtool.c sectors += (eeprom->len % I40E_NVM_SECTOR_SIZE) ? 1 : 0; sectors 1800 drivers/net/ethernet/intel/i40e/i40e_ethtool.c for (i = 0; i < sectors; i++) { sectors 1801 drivers/net/ethernet/intel/i40e/i40e_ethtool.c if (i == (sectors - 1)) { sectors 1506 drivers/nvdimm/btt.c geo->sectors = 1 << 5; sectors 1639 drivers/nvme/host/core.c geo->sectors = 1 << 5; sectors 83 drivers/nvme/target/admin-cmd.c sectors[READ]), 1000); sectors 86 drivers/nvme/target/admin-cmd.c sectors[WRITE]), 1000); sectors 115 drivers/nvme/target/admin-cmd.c part_stat_read(ns->bdev->bd_part, sectors[READ]), 1000); sectors 118 drivers/nvme/target/admin-cmd.c part_stat_read(ns->bdev->bd_part, sectors[WRITE]), 1000); sectors 265 drivers/ps3/ps3stor_lib.c u64 start_sector, u64 sectors, int write) sectors 272 drivers/ps3/ps3stor_lib.c __func__, __LINE__, op, sectors, start_sector); sectors 276 drivers/ps3/ps3stor_lib.c start_sector, sectors, 0, lpar, sectors 279 drivers/ps3/ps3stor_lib.c start_sector, sectors, 0, lpar, sectors 769 drivers/s390/block/dasd.c long sectors, sectors 784 drivers/s390/block/dasd.c data->dasd_io_sects += sectors; sectors 800 drivers/s390/block/dasd.c data->dasd_read_sects += sectors; sectors 818 drivers/s390/block/dasd.c unsigned long tottimeps, sectors; sectors 830 drivers/s390/block/dasd.c sectors = blk_rq_sectors(req); sectors 833 drivers/s390/block/dasd.c !sectors) sectors 840 drivers/s390/block/dasd.c tottimeps = tottime / sectors; sectors 842 drivers/s390/block/dasd.c dasd_profile_counter(sectors, sectors_ind); sectors 847 drivers/s390/block/dasd.c dasd_profile_counter(irqtime / sectors, irqtimeps_ind); sectors 861 drivers/s390/block/dasd.c sectors, sectors_ind, tottime_ind, sectors 879 drivers/s390/block/dasd.c sectors, sectors_ind, tottime_ind, sectors 897 drivers/s390/block/dasd.c sectors, sectors_ind, tottime_ind, sectors 481 drivers/s390/block/dasd_diag.c geo->sectors = 128 >> block->s2b_shift; sectors 2392 drivers/s390/block/dasd_eckd.c geo->sectors = recs_per_track(&private->rdc_data, sectors 4611 drivers/s390/block/dasd_eckd.c sector_t first_trk, last_trk, sectors; sectors 4717 drivers/s390/block/dasd_eckd.c for (sectors = 0; sectors < start_padding_sectors; sectors += 8) sectors 4740 drivers/s390/block/dasd_eckd.c for (sectors = 0; sectors < end_padding_sectors; sectors += 8) sectors 221 drivers/s390/block/dasd_fba.c geo->sectors = 128 >> block->s2b_shift; sectors 245 drivers/s390/block/xpram.c geo->sectors = 16; sectors 1700 drivers/scsi/3w-9xxx.c int heads, sectors, cylinders; sectors 1707 drivers/scsi/3w-9xxx.c sectors = 63; sectors 1708 drivers/scsi/3w-9xxx.c cylinders = sector_div(capacity, heads * sectors); sectors 1711 drivers/scsi/3w-9xxx.c sectors = 32; sectors 1712 drivers/scsi/3w-9xxx.c cylinders = sector_div(capacity, heads * sectors); sectors 1716 drivers/scsi/3w-9xxx.c geom[1] = sectors; sectors 1410 drivers/scsi/3w-sas.c int heads, sectors; sectors 1417 drivers/scsi/3w-sas.c sectors = 63; sectors 1420 drivers/scsi/3w-sas.c sectors = 32; sectors 1424 drivers/scsi/3w-sas.c geom[1] = sectors; sectors 1425 drivers/scsi/3w-sas.c geom[2] = sector_div(capacity, heads * sectors); /* cylinders */ sectors 1346 drivers/scsi/3w-xxxx.c int heads, sectors, cylinders; sectors 1353 drivers/scsi/3w-xxxx.c sectors = 32; sectors 1354 drivers/scsi/3w-xxxx.c cylinders = sector_div(capacity, heads * sectors); sectors 1358 drivers/scsi/3w-xxxx.c sectors = 63; sectors 1359 drivers/scsi/3w-xxxx.c cylinders = sector_div(capacity, heads * sectors); sectors 1362 drivers/scsi/3w-xxxx.c dprintk(KERN_NOTICE "3w-xxxx: tw_scsi_biosparam(): heads = %d, sectors = %d, cylinders = %d\n", heads, sectors, cylinders); sectors 1364 drivers/scsi/3w-xxxx.c geom[1] = sectors; sectors 3395 drivers/scsi/BusLogic.c diskparam->sectors = 63; sectors 3398 drivers/scsi/BusLogic.c diskparam->sectors = 32; sectors 3402 drivers/scsi/BusLogic.c diskparam->sectors = 32; sectors 3404 drivers/scsi/BusLogic.c diskparam->cylinders = (unsigned long) capacity / (diskparam->heads * diskparam->sectors); sectors 3424 drivers/scsi/BusLogic.c diskparam->sectors = 32; sectors 3428 drivers/scsi/BusLogic.c diskparam->sectors = 32; sectors 3432 drivers/scsi/BusLogic.c diskparam->sectors = 63; sectors 3441 drivers/scsi/BusLogic.c diskparam->cylinders = (unsigned long) capacity / (diskparam->heads * diskparam->sectors); sectors 3442 drivers/scsi/BusLogic.c if (part_no < 4 && part_end_sector == diskparam->sectors) { sectors 3444 drivers/scsi/BusLogic.c blogic_warn("Adopting Geometry %d/%d from Partition Table\n", adapter, diskparam->heads, diskparam->sectors); sectors 3447 drivers/scsi/BusLogic.c blogic_warn("not compatible with current BusLogic " "Host Adapter Geometry %d/%d\n", adapter, diskparam->heads, diskparam->sectors); sectors 1087 drivers/scsi/BusLogic.h int sectors; sectors 435 drivers/scsi/aacraid/aacraid.h int sectors; sectors 310 drivers/scsi/aacraid/linit.c param->sectors = 63; sectors 313 drivers/scsi/aacraid/linit.c param->sectors = 32; sectors 317 drivers/scsi/aacraid/linit.c param->sectors = 32; sectors 320 drivers/scsi/aacraid/linit.c param->cylinders = cap_to_cyls(capacity, param->heads * param->sectors); sectors 344 drivers/scsi/aacraid/linit.c param->sectors = 32; sectors 348 drivers/scsi/aacraid/linit.c param->sectors = 32; sectors 352 drivers/scsi/aacraid/linit.c param->sectors = 63; sectors 363 drivers/scsi/aacraid/linit.c param->cylinders = cap_to_cyls(capacity, param->heads * param->sectors); sectors 364 drivers/scsi/aacraid/linit.c if (num < 4 && end_sec == param->sectors) { sectors 367 drivers/scsi/aacraid/linit.c param->heads, param->sectors, num)); sectors 372 drivers/scsi/aacraid/linit.c param->heads, param->sectors)); sectors 728 drivers/scsi/aic7xxx/aic79xx_osm.c int sectors; sectors 745 drivers/scsi/aic7xxx/aic79xx_osm.c sectors = 32; sectors 746 drivers/scsi/aic7xxx/aic79xx_osm.c cylinders = aic_sector_div(capacity, heads, sectors); sectors 754 drivers/scsi/aic7xxx/aic79xx_osm.c sectors = 63; sectors 755 drivers/scsi/aic7xxx/aic79xx_osm.c cylinders = aic_sector_div(capacity, heads, sectors); sectors 758 drivers/scsi/aic7xxx/aic79xx_osm.c geom[1] = sectors; sectors 700 drivers/scsi/aic7xxx/aic7xxx_osm.c int sectors; sectors 719 drivers/scsi/aic7xxx/aic7xxx_osm.c sectors = 32; sectors 720 drivers/scsi/aic7xxx/aic7xxx_osm.c cylinders = aic_sector_div(capacity, heads, sectors); sectors 730 drivers/scsi/aic7xxx/aic7xxx_osm.c sectors = 63; sectors 731 drivers/scsi/aic7xxx/aic7xxx_osm.c cylinders = aic_sector_div(capacity, heads, sectors); sectors 734 drivers/scsi/aic7xxx/aic7xxx_osm.c geom[1] = sectors; sectors 137 drivers/scsi/aic7xxx/aiclib.h aic_sector_div(sector_t capacity, int heads, int sectors) sectors 140 drivers/scsi/aic7xxx/aiclib.h sector_div(capacity, (heads * sectors)); sectors 356 drivers/scsi/arcmsr/arcmsr_hba.c int ret, heads, sectors, cylinders, total_capacity; sectors 368 drivers/scsi/arcmsr/arcmsr_hba.c sectors = 32; sectors 369 drivers/scsi/arcmsr/arcmsr_hba.c cylinders = total_capacity / (heads * sectors); sectors 372 drivers/scsi/arcmsr/arcmsr_hba.c sectors = 63; sectors 373 drivers/scsi/arcmsr/arcmsr_hba.c cylinders = total_capacity / (heads * sectors); sectors 376 drivers/scsi/arcmsr/arcmsr_hba.c geom[1] = sectors; sectors 1640 drivers/scsi/atp870u.c int heads, sectors, cylinders; sectors 1643 drivers/scsi/atp870u.c sectors = 32; sectors 1644 drivers/scsi/atp870u.c cylinders = (unsigned long)capacity / (heads * sectors); sectors 1647 drivers/scsi/atp870u.c sectors = 63; sectors 1648 drivers/scsi/atp870u.c cylinders = (unsigned long)capacity / (heads * sectors); sectors 1651 drivers/scsi/atp870u.c ip[1] = sectors; sectors 1063 drivers/scsi/dc395x.c int heads, sectors, cylinders; sectors 1070 drivers/scsi/dc395x.c sectors = 32; sectors 1071 drivers/scsi/dc395x.c cylinders = size / (heads * sectors); sectors 1075 drivers/scsi/dc395x.c sectors = 63; sectors 1076 drivers/scsi/dc395x.c cylinders = size / (heads * sectors); sectors 1079 drivers/scsi/dc395x.c geom[1] = sectors; sectors 91 drivers/scsi/dpt/dpti_ioctl.h unsigned sectors; /* sectors for drives on cntlr. */ sectors 84 drivers/scsi/dpt/sys_info.h uCHAR sectors; /* Up to 63 */ sectors 485 drivers/scsi/dpt_i2o.c int sectors=-1; sectors 493 drivers/scsi/dpt_i2o.c sectors = 2; sectors 498 drivers/scsi/dpt_i2o.c sectors = 32; sectors 503 drivers/scsi/dpt_i2o.c sectors = 63; sectors 508 drivers/scsi/dpt_i2o.c sectors = 63; sectors 513 drivers/scsi/dpt_i2o.c sectors = 63; sectors 515 drivers/scsi/dpt_i2o.c cylinders = sector_div(capacity, heads * sectors); sectors 520 drivers/scsi/dpt_i2o.c sectors = 63; sectors 525 drivers/scsi/dpt_i2o.c geom[1] = sectors; sectors 212 drivers/scsi/hptiop.h __le16 sectors; sectors 632 drivers/scsi/initio.c host->targets[i].sectors = *heads++; sectors 2675 drivers/scsi/initio.c info_array[1] = tc->sectors; sectors 2676 drivers/scsi/initio.c info_array[2] = (unsigned long)capacity / tc->heads / tc->sectors; sectors 471 drivers/scsi/initio.h u8 sectors; sectors 4748 drivers/scsi/ipr.c int heads, sectors; sectors 4752 drivers/scsi/ipr.c sectors = 32; sectors 4759 drivers/scsi/ipr.c parm[1] = sectors; sectors 1145 drivers/scsi/ips.c int sectors; sectors 1163 drivers/scsi/ips.c sectors = IPS_NORM_SECTORS; sectors 1166 drivers/scsi/ips.c sectors = IPS_COMP_SECTORS; sectors 1169 drivers/scsi/ips.c cylinders = (unsigned long) capacity / (heads * sectors); sectors 1172 drivers/scsi/ips.c heads, sectors, cylinders); sectors 1175 drivers/scsi/ips.c geom[1] = sectors; sectors 4091 drivers/scsi/ips.c uint16_t sectors; sectors 4100 drivers/scsi/ips.c sectors = IPS_NORM_SECTORS; sectors 4103 drivers/scsi/ips.c sectors = IPS_COMP_SECTORS; sectors 4108 drivers/scsi/ips.c 1) / (heads * sectors); sectors 4124 drivers/scsi/ips.c mdata.pdata.pg3.SectorsPerTrack = cpu_to_be16(sectors); sectors 2800 drivers/scsi/megaraid.c int sectors; sectors 2810 drivers/scsi/megaraid.c sectors = 32; sectors 2811 drivers/scsi/megaraid.c cylinders = (ulong)capacity / (heads * sectors); sectors 2819 drivers/scsi/megaraid.c sectors = 63; sectors 2820 drivers/scsi/megaraid.c cylinders = (ulong)capacity / (heads * sectors); sectors 2825 drivers/scsi/megaraid.c geom[1] = sectors; sectors 2845 drivers/scsi/megaraid.c sectors = 32; sectors 2846 drivers/scsi/megaraid.c cylinders = (ulong)capacity / (heads * sectors); sectors 2851 drivers/scsi/megaraid.c sectors = 63; sectors 2852 drivers/scsi/megaraid.c cylinders = (ulong)capacity / (heads * sectors); sectors 2857 drivers/scsi/megaraid.c geom[1] = sectors; sectors 3077 drivers/scsi/megaraid/megaraid_sas_base.c int sectors; sectors 3083 drivers/scsi/megaraid/megaraid_sas_base.c sectors = 32; sectors 3085 drivers/scsi/megaraid/megaraid_sas_base.c tmp = heads * sectors; sectors 3096 drivers/scsi/megaraid/megaraid_sas_base.c sectors = 63; sectors 3097 drivers/scsi/megaraid/megaraid_sas_base.c tmp = heads*sectors; sectors 3103 drivers/scsi/megaraid/megaraid_sas_base.c geom[1] = sectors; sectors 2486 drivers/scsi/mpt3sas/mpt3sas_scsih.c int sectors; sectors 2491 drivers/scsi/mpt3sas/mpt3sas_scsih.c sectors = 32; sectors 2493 drivers/scsi/mpt3sas/mpt3sas_scsih.c dummy = heads * sectors; sectors 2503 drivers/scsi/mpt3sas/mpt3sas_scsih.c sectors = 63; sectors 2504 drivers/scsi/mpt3sas/mpt3sas_scsih.c dummy = heads * sectors; sectors 2511 drivers/scsi/mpt3sas/mpt3sas_scsih.c params[1] = sectors; sectors 2150 drivers/scsi/mvumi.c int heads, sectors; sectors 2155 drivers/scsi/mvumi.c sectors = 32; sectors 2156 drivers/scsi/mvumi.c tmp = heads * sectors; sectors 2162 drivers/scsi/mvumi.c sectors = 63; sectors 2163 drivers/scsi/mvumi.c tmp = heads * sectors; sectors 2168 drivers/scsi/mvumi.c geom[1] = sectors; sectors 161 drivers/scsi/ps3rom.c u32 sectors) sectors 166 drivers/scsi/ps3rom.c __func__, __LINE__, sectors, start_sector); sectors 170 drivers/scsi/ps3rom.c sectors, 0, dev->bounce_lpar, &dev->tag); sectors 182 drivers/scsi/ps3rom.c u32 sectors) sectors 187 drivers/scsi/ps3rom.c __func__, __LINE__, sectors, start_sector); sectors 193 drivers/scsi/ps3rom.c sectors, 0, dev->bounce_lpar, &dev->tag); sectors 1037 drivers/scsi/qla1280.c int heads, sectors, cylinders; sectors 1040 drivers/scsi/qla1280.c sectors = 32; sectors 1041 drivers/scsi/qla1280.c cylinders = (unsigned long)capacity / (heads * sectors); sectors 1044 drivers/scsi/qla1280.c sectors = 63; sectors 1045 drivers/scsi/qla1280.c cylinders = (unsigned long)capacity / (heads * sectors); sectors 1051 drivers/scsi/qla1280.c geom[1] = sectors; sectors 2590 drivers/scsi/scsi_debug.c unsigned int sectors, bool read) sectors 2598 drivers/scsi/scsi_debug.c resid = sectors * sizeof(*dif_storep); sectors 2633 drivers/scsi/scsi_debug.c unsigned int sectors, u32 ei_lba) sectors 2639 drivers/scsi/scsi_debug.c for (i = 0; i < sectors; i++, ei_lba++) { sectors 2655 drivers/scsi/scsi_debug.c dif_copy_prot(SCpnt, start_sec, sectors, true); sectors 2816 drivers/scsi/scsi_debug.c unsigned int sectors, u32 ei_lba) sectors 2875 drivers/scsi/scsi_debug.c dif_copy_prot(SCpnt, start_sec, sectors, false); sectors 235 drivers/scsi/scsicam.c unsigned long heads, sectors, cylinders, temp; sectors 238 drivers/scsi/scsicam.c sectors = 62L; /* Maximize sectors per track */ sectors 240 drivers/scsi/scsicam.c temp = cylinders * sectors; /* Compute divisor for heads */ sectors 245 drivers/scsi/scsicam.c sectors = capacity / temp; /* Compute value for sectors per sectors 248 drivers/scsi/scsicam.c sectors++; /* Else, increment number of sectors */ sectors 249 drivers/scsi/scsicam.c temp = heads * sectors; /* Compute divisor for cylinders */ sectors 257 drivers/scsi/scsicam.c *secs = (unsigned int) sectors; sectors 1443 drivers/scsi/sd.c geo->sectors = diskinfo[1]; sectors 1461 drivers/scsi/stex.c int heads = 255, sectors = 63; sectors 1465 drivers/scsi/stex.c sectors = 32; sectors 1468 drivers/scsi/stex.c sector_div(capacity, heads * sectors); sectors 1471 drivers/scsi/stex.c geom[1] = sectors; sectors 466 drivers/target/target_core_alua.c u64 segment_size, segment_mult, sectors, lba; sectors 475 drivers/target/target_core_alua.c sectors = cmd->data_length / dev->dev_attrib.block_size; sectors 478 drivers/target/target_core_alua.c while (lba < cmd->t_task_lba + sectors) { sectors 622 drivers/target/target_core_file.c u32 sectors = cmd->data_length >> sectors 625 drivers/target/target_core_file.c rc = sbc_dif_verify(cmd, cmd->t_task_lba, sectors, sectors 633 drivers/target/target_core_file.c u32 sectors = cmd->data_length >> sectors 636 drivers/target/target_core_file.c rc = sbc_dif_verify(cmd, cmd->t_task_lba, sectors, sectors 453 drivers/target/target_core_iblock.c sector_t sectors = target_to_linux_sector(dev, sectors 490 drivers/target/target_core_iblock.c while (sectors) { sectors 505 drivers/target/target_core_iblock.c sectors -= sg->length >> SECTOR_SHIFT; sectors 385 drivers/target/target_core_rd.c u32 sectors = cmd->data_length / se_dev->dev_attrib.block_size; sectors 404 drivers/target/target_core_rd.c rc = sbc_dif_verify(cmd, cmd->t_task_lba, sectors, 0, sectors 407 drivers/target/target_core_rd.c rc = sbc_dif_verify(cmd, cmd->t_task_lba, sectors, 0, sectors 411 drivers/target/target_core_rd.c sbc_dif_copy_prot(cmd, sectors, is_read, prot_sg, prot_offset); sectors 216 drivers/target/target_core_sbc.c static inline u32 sbc_get_size(struct se_cmd *cmd, u32 sectors) sectors 218 drivers/target/target_core_sbc.c return cmd->se_dev->dev_attrib.block_size * sectors; sectors 286 drivers/target/target_core_sbc.c unsigned int sectors = sbc_get_write_same_sectors(cmd); sectors 295 drivers/target/target_core_sbc.c if (sectors > cmd->se_dev->dev_attrib.max_write_same_len) { sectors 297 drivers/target/target_core_sbc.c sectors, cmd->se_dev->dev_attrib.max_write_same_len); sectors 303 drivers/target/target_core_sbc.c if (((cmd->t_task_lba + sectors) < cmd->t_task_lba) || sectors 304 drivers/target/target_core_sbc.c ((cmd->t_task_lba + sectors) > end_lba)) { sectors 306 drivers/target/target_core_sbc.c (unsigned long long)end_lba, cmd->t_task_lba, sectors); sectors 334 drivers/target/target_core_sbc.c ret = sbc_check_prot(dev, cmd, &cmd->t_task_cdb[0], sectors, true); sectors 690 drivers/target/target_core_sbc.c u32 sectors, bool is_write) sectors 748 drivers/target/target_core_sbc.c cmd->prot_length = dev->prot_length * sectors; sectors 757 drivers/target/target_core_sbc.c cmd->data_length = sectors * dev->dev_attrib.block_size; sectors 796 drivers/target/target_core_sbc.c u32 sectors = 0; sectors 803 drivers/target/target_core_sbc.c sectors = transport_get_sectors_6(cdb); sectors 809 drivers/target/target_core_sbc.c sectors = transport_get_sectors_10(cdb); sectors 815 drivers/target/target_core_sbc.c ret = sbc_check_prot(dev, cmd, cdb, sectors, false); sectors 823 drivers/target/target_core_sbc.c sectors = transport_get_sectors_12(cdb); sectors 829 drivers/target/target_core_sbc.c ret = sbc_check_prot(dev, cmd, cdb, sectors, false); sectors 837 drivers/target/target_core_sbc.c sectors = transport_get_sectors_16(cdb); sectors 843 drivers/target/target_core_sbc.c ret = sbc_check_prot(dev, cmd, cdb, sectors, false); sectors 851 drivers/target/target_core_sbc.c sectors = transport_get_sectors_6(cdb); sectors 858 drivers/target/target_core_sbc.c sectors = transport_get_sectors_10(cdb); sectors 864 drivers/target/target_core_sbc.c ret = sbc_check_prot(dev, cmd, cdb, sectors, true); sectors 872 drivers/target/target_core_sbc.c sectors = transport_get_sectors_12(cdb); sectors 878 drivers/target/target_core_sbc.c ret = sbc_check_prot(dev, cmd, cdb, sectors, true); sectors 887 drivers/target/target_core_sbc.c sectors = transport_get_sectors_16(cdb); sectors 893 drivers/target/target_core_sbc.c ret = sbc_check_prot(dev, cmd, cdb, sectors, true); sectors 904 drivers/target/target_core_sbc.c sectors = transport_get_sectors_10(cdb); sectors 923 drivers/target/target_core_sbc.c sectors = transport_get_sectors_32(cdb); sectors 942 drivers/target/target_core_sbc.c sectors = transport_get_sectors_32(cdb); sectors 943 drivers/target/target_core_sbc.c if (!sectors) { sectors 971 drivers/target/target_core_sbc.c sectors = cdb[13]; sectors 975 drivers/target/target_core_sbc.c if (sectors > 1) { sectors 977 drivers/target/target_core_sbc.c " than 1\n", sectors); sectors 987 drivers/target/target_core_sbc.c size = 2 * sbc_get_size(cmd, sectors); sectors 989 drivers/target/target_core_sbc.c cmd->t_task_nolb = sectors; sectors 1016 drivers/target/target_core_sbc.c sectors = transport_get_sectors_10(cdb); sectors 1019 drivers/target/target_core_sbc.c sectors = transport_get_sectors_16(cdb); sectors 1042 drivers/target/target_core_sbc.c sectors = transport_get_sectors_16(cdb); sectors 1043 drivers/target/target_core_sbc.c if (!sectors) { sectors 1056 drivers/target/target_core_sbc.c sectors = transport_get_sectors_10(cdb); sectors 1057 drivers/target/target_core_sbc.c if (!sectors) { sectors 1077 drivers/target/target_core_sbc.c sectors = transport_get_sectors_10(cdb); sectors 1080 drivers/target/target_core_sbc.c sectors = transport_get_sectors_16(cdb); sectors 1115 drivers/target/target_core_sbc.c if (((cmd->t_task_lba + sectors) < cmd->t_task_lba) || sectors 1116 drivers/target/target_core_sbc.c ((cmd->t_task_lba + sectors) > end_lba)) { sectors 1119 drivers/target/target_core_sbc.c end_lba, cmd->t_task_lba, sectors); sectors 1124 drivers/target/target_core_sbc.c size = sbc_get_size(cmd, sectors); sectors 1330 drivers/target/target_core_sbc.c void sbc_dif_copy_prot(struct se_cmd *cmd, unsigned int sectors, bool read, sectors 1342 drivers/target/target_core_sbc.c left = sectors * dev->prot_length; sectors 1376 drivers/target/target_core_sbc.c sbc_dif_verify(struct se_cmd *cmd, sector_t start, unsigned int sectors, sectors 1389 drivers/target/target_core_sbc.c for (; psg && sector < start + sectors; psg = sg_next(psg)) { sectors 1394 drivers/target/target_core_sbc.c sector < start + sectors; sectors 1272 drivers/target/target_core_transport.c u32 sectors = (mtl / dev->dev_attrib.block_size); sectors 1273 drivers/target/target_core_transport.c cmd->prot_length = dev->prot_length * sectors; sectors 1964 drivers/target/target_core_transport.c u32 sectors; sectors 1979 drivers/target/target_core_transport.c sectors = cmd->data_length >> ilog2(cmd->se_dev->dev_attrib.block_size); sectors 1981 drivers/target/target_core_transport.c sectors, 0, cmd->t_prot_sg, 0); sectors 2241 drivers/target/target_core_transport.c u32 sectors = cmd->data_length >> sectors 2245 drivers/target/target_core_transport.c sectors, 0, cmd->t_prot_sg, sectors 907 drivers/usb/storage/alauda.c unsigned int sectors) sectors 928 drivers/usb/storage/alauda.c len = min(sectors, blocksize) * (pagesize + 64); sectors 942 drivers/usb/storage/alauda.c while (sectors > 0) { sectors 958 drivers/usb/storage/alauda.c pages = min(sectors, blocksize - page); sectors 991 drivers/usb/storage/alauda.c sectors -= pages; sectors 1002 drivers/usb/storage/alauda.c unsigned int sectors) sectors 1020 drivers/usb/storage/alauda.c len = min(sectors, blocksize) * pagesize; sectors 1044 drivers/usb/storage/alauda.c while (sectors > 0) { sectors 1046 drivers/usb/storage/alauda.c unsigned int pages = min(sectors, blocksize - page); sectors 1068 drivers/usb/storage/alauda.c sectors -= pages; sectors 60 drivers/usb/storage/datafab.c unsigned long sectors; /* total sector count */ sectors 138 drivers/usb/storage/datafab.c u32 sectors) sectors 153 drivers/usb/storage/datafab.c if (sectors > 0x0FFFFFFF) sectors 162 drivers/usb/storage/datafab.c totallen = sectors * info->ssize; sectors 221 drivers/usb/storage/datafab.c u32 sectors) sectors 237 drivers/usb/storage/datafab.c if (sectors > 0x0FFFFFFF) sectors 246 drivers/usb/storage/datafab.c totallen = sectors * info->ssize; sectors 421 drivers/usb/storage/datafab.c info->sectors = ((u32)(reply[117]) << 24) | sectors 583 drivers/usb/storage/datafab.c info->sectors, info->ssize); sectors 587 drivers/usb/storage/datafab.c ((__be32 *) ptr)[0] = cpu_to_be32(info->sectors - 1); sectors 96 drivers/usb/storage/jumpshot.c unsigned long sectors; /* total sector count */ sectors 156 drivers/usb/storage/jumpshot.c u32 sectors) sectors 174 drivers/usb/storage/jumpshot.c totallen = sectors * info->ssize; sectors 233 drivers/usb/storage/jumpshot.c u32 sectors) sectors 251 drivers/usb/storage/jumpshot.c totallen = sectors * info->ssize; sectors 354 drivers/usb/storage/jumpshot.c info->sectors = ((u32)(reply[117]) << 24) | sectors 513 drivers/usb/storage/jumpshot.c info->sectors, info->ssize); sectors 517 drivers/usb/storage/jumpshot.c ((__be32 *) ptr)[0] = cpu_to_be32(info->sectors - 1); sectors 734 drivers/usb/storage/sddr09.c unsigned int sectors) { sectors 755 drivers/usb/storage/sddr09.c len = min(sectors, (unsigned int) info->blocksize) * info->pagesize; sectors 767 drivers/usb/storage/sddr09.c while (sectors > 0) { sectors 770 drivers/usb/storage/sddr09.c pages = min(sectors, info->blocksize - page); sectors 817 drivers/usb/storage/sddr09.c sectors -= pages; sectors 960 drivers/usb/storage/sddr09.c unsigned int sectors) { sectors 1000 drivers/usb/storage/sddr09.c len = min(sectors, (unsigned int) info->blocksize) * info->pagesize; sectors 1011 drivers/usb/storage/sddr09.c while (sectors > 0) { sectors 1015 drivers/usb/storage/sddr09.c pages = min(sectors, info->blocksize - page); sectors 1037 drivers/usb/storage/sddr09.c sectors -= pages; sectors 190 drivers/usb/storage/sddr55.c unsigned short sectors) { sectors 209 drivers/usb/storage/sddr55.c len = min((unsigned int) sectors, (unsigned int) info->blocksize >> sectors 217 drivers/usb/storage/sddr55.c while (sectors>0) { sectors 227 drivers/usb/storage/sddr55.c pages = min((unsigned int) sectors << info->smallpageshift, sectors 295 drivers/usb/storage/sddr55.c sectors -= pages >> info->smallpageshift; sectors 309 drivers/usb/storage/sddr55.c unsigned short sectors) { sectors 336 drivers/usb/storage/sddr55.c len = min((unsigned int) sectors, (unsigned int) info->blocksize >> sectors 344 drivers/usb/storage/sddr55.c while (sectors > 0) { sectors 354 drivers/usb/storage/sddr55.c pages = min((unsigned int) sectors << info->smallpageshift, sectors 503 drivers/usb/storage/sddr55.c sectors -= pages >> info->smallpageshift; sectors 135 drivers/usb/storage/shuttle_usbat.c unsigned long sectors; /* total sector count */ sectors 1092 drivers/usb/storage/shuttle_usbat.c info->sectors = ((u32)(reply[117]) << 24) | sectors 1110 drivers/usb/storage/shuttle_usbat.c u32 sectors) sectors 1143 drivers/usb/storage/shuttle_usbat.c totallen = sectors * info->ssize; sectors 1201 drivers/usb/storage/shuttle_usbat.c u32 sectors) sectors 1234 drivers/usb/storage/shuttle_usbat.c totallen = sectors * info->ssize; sectors 1709 drivers/usb/storage/shuttle_usbat.c info->sectors, info->ssize); sectors 1716 drivers/usb/storage/shuttle_usbat.c ((__be32 *) ptr)[0] = cpu_to_be32(info->sectors - 1); sectors 3653 fs/ext4/super.c part_stat_read(sb->s_bdev->bd_part, sectors[STAT_WRITE]); sectors 5049 fs/ext4/super.c sectors[STAT_WRITE]) - sectors 63 fs/ext4/sysfs.c sectors[STAT_WRITE]) - sectors 76 fs/ext4/sysfs.c sectors[STAT_WRITE]) - sectors 1414 fs/f2fs/f2fs.h (((u64)part_stat_read((s)->sb->s_bdev->bd_part, sectors[STAT_WRITE]) - \ sectors 1847 fs/f2fs/f2fs.h blkcnt_t sectors = count << F2FS_LOG_SECTORS_PER_BLOCK; sectors 1857 fs/f2fs/f2fs.h if (unlikely(inode->i_blocks < sectors)) { sectors 1861 fs/f2fs/f2fs.h (unsigned long long)sectors); sectors 118 fs/f2fs/segment.h #define SECTOR_TO_BLOCK(sectors) \ sectors 119 fs/f2fs/segment.h ((sectors) >> F2FS_LOG_SECTORS_PER_BLOCK) sectors 3431 fs/f2fs/super.c sectors[STAT_WRITE]); sectors 1443 fs/fat/inode.c if (get_unaligned_le16(&b->sectors)) sectors 1468 fs/fat/inode.c bpb->fat_sectors = get_unaligned_le16(&b->sectors); sectors 76 fs/hfsplus/btree.c u64 sectors, int file_id) sectors 100 fs/hfsplus/btree.c if (sectors < 0x200000) { sectors 101 fs/hfsplus/btree.c clump_size = sectors << 2; /* 0.8 % */ sectors 106 fs/hfsplus/btree.c for (i = 0, sectors = sectors >> 22; sectors 107 fs/hfsplus/btree.c sectors && (i < CLUMP_ENTRIES - 1); sectors 108 fs/hfsplus/btree.c ++i, sectors = sectors >> 1) { sectors 394 fs/hfsplus/hfsplus_fs.h u32 hfsplus_calc_btree_clump_size(u32 block_size, u32 node_size, u64 sectors, sectors 43 fs/ntfs/layout.h le16 sectors; /* zero */ sectors 609 fs/ntfs/super.c le16_to_cpu(b->bpb.sectors) || sectors 44 include/linux/badblocks.h int badblocks_check(struct badblocks *bb, sector_t s, int sectors, sectors 46 include/linux/badblocks.h int badblocks_set(struct badblocks *bb, sector_t s, int sectors, sectors 48 include/linux/badblocks.h int badblocks_clear(struct badblocks *bb, sector_t s, int sectors); sectors 359 include/linux/bio.h extern struct bio *bio_split(struct bio *bio, int sectors, sectors 372 include/linux/bio.h static inline struct bio *bio_next_split(struct bio *bio, int sectors, sectors 375 include/linux/bio.h if (sectors >= bio_sectors(bio)) sectors 378 include/linux/bio.h return bio_split(bio, sectors, gfp, bs); sectors 456 include/linux/bio.h unsigned long sectors, struct hd_struct *part); sectors 363 include/linux/blkdev.h extern int blkdev_reset_zones(struct block_device *bdev, sector_t sectors, sectors 1593 include/linux/blkdev.h unsigned int sectors) sectors 1595 include/linux/blkdev.h return sectors >> (bi->interval_exp - 9); sectors 1599 include/linux/blkdev.h unsigned int sectors) sectors 1601 include/linux/blkdev.h return bio_integrity_intervals(bi, sectors) * bi->tuple_size; sectors 1678 include/linux/blkdev.h unsigned int sectors) sectors 1684 include/linux/blkdev.h unsigned int sectors) sectors 123 include/linux/dax.h sector_t sectors); sectors 126 include/linux/dax.h sector_t sectors) sectors 129 include/linux/dax.h sectors); sectors 158 include/linux/dax.h sector_t sectors) sectors 88 include/linux/genhd.h unsigned long sectors[NR_STAT_GROUPS]; sectors 708 include/linux/libata.h u16 sectors; /* Number of sectors per track */ sectors 68 include/linux/mmc/card.h unsigned int sectors; sectors 35 include/linux/mtd/inftl.h unsigned char sectors; sectors 27 include/linux/mtd/nftl.h unsigned char sectors; sectors 432 include/trace/events/bcache.h __field(unsigned, sectors ) sectors 440 include/trace/events/bcache.h __entry->sectors = GC_SECTORS_USED(&ca->buckets[bucket]); sectors 444 include/trace/events/bcache.h __entry->sectors, MAJOR(__entry->dev), sectors 329 include/uapi/linux/bcache.h __u64 sectors; sectors 326 include/uapi/linux/hdreg.h unsigned char sectors; sectors 406 include/uapi/linux/hdreg.h unsigned short sectors; /* Obsolete, "physical" sectors per track */ sectors 117 include/uapi/linux/msdos_fs.h __u8 sectors[2]; /* number of sectors */ sectors 69 include/uapi/linux/virtio_blk.h __u8 sectors;