sector_t 68 arch/m68k/emu/nfblock.c sector_t sec = bio->bi_iter.bi_sector; sector_t 138 arch/m68k/emu/nfblock.c set_capacity(dev->disk, (sector_t)blocks * (bsize / 512)); sector_t 109 arch/xtensa/platforms/iss/simdisk.c sector_t sector = bio->bi_iter.bi_sector; sector_t 53 block/badblocks.c int badblocks_check(struct badblocks *bb, sector_t s, int sectors, sector_t 54 block/badblocks.c sector_t *first_bad, int *bad_sectors) sector_t 60 block/badblocks.c sector_t target = s + sectors; sector_t 88 block/badblocks.c sector_t a = BB_OFFSET(p[mid]); sector_t 163 block/badblocks.c int badblocks_set(struct badblocks *bb, sector_t s, int sectors, sector_t 177 block/badblocks.c sector_t next = s + sectors; sector_t 193 block/badblocks.c sector_t a = BB_OFFSET(p[mid]); sector_t 207 block/badblocks.c sector_t a = BB_OFFSET(p[lo]); sector_t 208 block/badblocks.c sector_t e = a + BB_LEN(p[lo]); sector_t 239 block/badblocks.c sector_t a = BB_OFFSET(p[hi]); sector_t 240 block/badblocks.c sector_t e = a + BB_LEN(p[hi]); sector_t 268 block/badblocks.c sector_t a = BB_OFFSET(p[hi]); sector_t 331 block/badblocks.c int badblocks_clear(struct badblocks *bb, sector_t s, int sectors) sector_t 335 block/badblocks.c sector_t target = s + sectors; sector_t 359 block/badblocks.c sector_t a = BB_OFFSET(p[mid]); sector_t 375 block/badblocks.c sector_t a = BB_OFFSET(p[lo]); sector_t 376 block/badblocks.c sector_t end = a + BB_LEN(p[lo]); sector_t 401 block/badblocks.c sector_t start = BB_OFFSET(p[lo]); sector_t 446 block/badblocks.c sector_t start = BB_OFFSET(p[i]); sector_t 484 block/badblocks.c sector_t s = BB_OFFSET(p[i]); sector_t 228 block/bfq-iosched.c #define BFQQ_SEEK_THR (sector_t)(8 * 100) sector_t 229 block/bfq-iosched.c #define BFQQ_SECT_THR_NONROT (sector_t)(2 * 32) sector_t 235 block/bfq-iosched.c #define BFQQ_CLOSE_THR (sector_t)(8 * 1024) sector_t 442 block/bfq-iosched.c sector_t last) sector_t 444 block/bfq-iosched.c sector_t s1, s2, d1 = 0, d2 = 0; sector_t 552 block/bfq-iosched.c sector_t sector, struct rb_node **ret_parent, sector_t 2128 block/bfq-iosched.c static sector_t get_sdist(sector_t last_pos, struct request *rq) sector_t 2395 block/bfq-iosched.c static sector_t bfq_io_struct_pos(void *io_struct, bool request) sector_t 2404 block/bfq-iosched.c sector_t sector) sector_t 2412 block/bfq-iosched.c sector_t sector) sector_t 2454 block/bfq-iosched.c sector_t sector) sector_t 299 block/bfq-iosched.h sector_t last_request_pos; sector_t 549 block/bfq-iosched.h sector_t last_position; sector_t 552 block/bfq-iosched.h sector_t in_serv_last_pos; sector_t 115 block/blk-core.c rq->__sector = (sector_t) -1; sector_t 741 block/blk-core.c static void handle_bad_sector(struct bio *bio, sector_t maxsector) sector_t 821 block/blk-core.c static inline int bio_check_eod(struct bio *bio, sector_t maxsector) sector_t 434 block/blk-flush.c sector_t *error_sector) sector_t 455 block/blk-iocost.c sector_t cursor; /* to detect randio */ sector_t 1815 block/blk-iocost.c sector_t bio_end = bio_end_sector(bio); sector_t 25 block/blk-lib.c int __blkdev_issue_discard(struct block_device *bdev, sector_t sector, sector_t 26 block/blk-lib.c sector_t nr_sects, gfp_t gfp_mask, int flags, sector_t 32 block/blk-lib.c sector_t bs_mask; sector_t 58 block/blk-lib.c sector_t req_sects = min_t(sector_t, nr_sects, sector_t 97 block/blk-lib.c int blkdev_issue_discard(struct block_device *bdev, sector_t sector, sector_t 98 block/blk-lib.c sector_t nr_sects, gfp_t gfp_mask, unsigned long flags) sector_t 131 block/blk-lib.c static int __blkdev_issue_write_same(struct block_device *bdev, sector_t sector, sector_t 132 block/blk-lib.c sector_t nr_sects, gfp_t gfp_mask, struct page *page, sector_t 138 block/blk-lib.c sector_t bs_mask; sector_t 192 block/blk-lib.c int blkdev_issue_write_same(struct block_device *bdev, sector_t sector, sector_t 193 block/blk-lib.c sector_t nr_sects, gfp_t gfp_mask, sector_t 213 block/blk-lib.c sector_t sector, sector_t nr_sects, gfp_t gfp_mask, sector_t 261 block/blk-lib.c static unsigned int __blkdev_sectors_to_bio_pages(sector_t nr_sects) sector_t 263 block/blk-lib.c sector_t pages = DIV_ROUND_UP_SECTOR_T(nr_sects, PAGE_SIZE / 512); sector_t 265 block/blk-lib.c return min(pages, (sector_t)BIO_MAX_PAGES); sector_t 269 block/blk-lib.c sector_t sector, sector_t nr_sects, gfp_t gfp_mask, sector_t 291 block/blk-lib.c sz = min((sector_t) PAGE_SIZE, nr_sects << 9); sector_t 324 block/blk-lib.c int __blkdev_issue_zeroout(struct block_device *bdev, sector_t sector, sector_t 325 block/blk-lib.c sector_t nr_sects, gfp_t gfp_mask, struct bio **biop, sector_t 329 block/blk-lib.c sector_t bs_mask; sector_t 358 block/blk-lib.c int blkdev_issue_zeroout(struct block_device *bdev, sector_t sector, sector_t 359 block/blk-lib.c sector_t nr_sects, gfp_t gfp_mask, unsigned flags) sector_t 362 block/blk-lib.c sector_t bs_mask; sector_t 68 block/blk-merge.c sector_t tmp; sector_t 440 block/blk-mq.c rq->__sector = (sector_t) -1; sector_t 498 block/blk-settings.c sector_t start) sector_t 633 block/blk-settings.c sector_t start) sector_t 654 block/blk-settings.c sector_t offset) sector_t 2227 block/blk-throttle.c static void throtl_track_latency(struct throtl_data *td, sector_t size, sector_t 23 block/blk-zoned.c static inline sector_t blk_zone_start(struct request_queue *q, sector_t 24 block/blk-zoned.c sector_t sector) sector_t 26 block/blk-zoned.c sector_t zone_mask = blk_queue_zone_sectors(q) - 1; sector_t 74 block/blk-zoned.c sector_t nr_sectors) sector_t 76 block/blk-zoned.c sector_t zone_sectors = blk_queue_zone_sectors(q); sector_t 106 block/blk-zoned.c sector_t offset = get_start_sect(bdev); sector_t 122 block/blk-zoned.c static int blk_report_zones(struct gendisk *disk, sector_t sector, sector_t 127 block/blk-zoned.c sector_t capacity = get_capacity(disk); sector_t 163 block/blk-zoned.c int blkdev_report_zones(struct block_device *bdev, sector_t sector, sector_t 225 block/blk-zoned.c sector_t nr_sectors) sector_t 254 block/blk-zoned.c sector_t sector, sector_t nr_sectors, sector_t 258 block/blk-zoned.c sector_t zone_sectors; sector_t 259 block/blk-zoned.c sector_t end_sector = sector + nr_sectors; sector_t 461 block/blk-zoned.c sector_t sector = 0; sector_t 23 block/cmdline-parser.c new_subpart->size = (sector_t)(~0ULL); sector_t 26 block/cmdline-parser.c new_subpart->size = (sector_t)memparse(partdef, &partdef); sector_t 27 block/cmdline-parser.c if (new_subpart->size < (sector_t)PAGE_SIZE) { sector_t 36 block/cmdline-parser.c new_subpart->from = (sector_t)memparse(partdef, &partdef); sector_t 38 block/cmdline-parser.c new_subpart->from = (sector_t)(~0ULL); sector_t 226 block/cmdline-parser.c int cmdline_parts_set(struct cmdline_parts *parts, sector_t disk_size, sector_t 231 block/cmdline-parser.c sector_t from = 0; sector_t 236 block/cmdline-parser.c if (subpart->from == (sector_t)(~0ULL)) sector_t 230 block/elevator.c struct request *elv_rqhash_find(struct request_queue *q, sector_t offset) sector_t 284 block/elevator.c struct request *elv_rb_find(struct rb_root *root, sector_t sector) sector_t 259 block/genhd.c static inline int sector_in_part(struct hd_struct *part, sector_t sector) sector_t 280 block/genhd.c struct hd_struct *disk_map_sector_rcu(struct gendisk *disk, sector_t sector) sector_t 42 block/ioctl.c if (sizeof(sector_t) == sizeof(long) && sector_t 105 block/ioctl.c if (sizeof(sector_t) == sizeof(long) && sector_t 146 block/ioctl.c part_nr_sects_write(part, (sector_t)length); sector_t 443 block/mq-deadline.c sector_t sector = bio_end_sector(bio); sector_t 310 block/partition-generic.c sector_t start, sector_t len, int flags, sector_t 464 block/partition-generic.c sector_t from, sector_t size) sector_t 575 block/partition-generic.c sector_t size, from; sector_t 662 block/partition-generic.c unsigned char *read_dev_sector(struct block_device *bdev, sector_t n, Sector *p) sector_t 514 block/partitions/acorn.c sector_t start = 0; sector_t 530 block/partitions/acorn.c sector_t next; sector_t 542 block/partitions/acorn.c sector_t size; sector_t 14 block/partitions/check.h sector_t from; sector_t 15 block/partitions/check.h sector_t size; sector_t 32 block/partitions/check.h sector_t n, Sector *p) sector_t 42 block/partitions/check.h put_partition(struct parsed_partitions *p, int n, sector_t from, sector_t size) sector_t 61 block/partitions/cmdline.c static bool has_overlaps(sector_t from, sector_t size, sector_t 62 block/partitions/cmdline.c sector_t from2, sector_t size2) sector_t 64 block/partitions/cmdline.c sector_t end = from + size; sector_t 65 block/partitions/cmdline.c sector_t end2 = from2 + size2; sector_t 126 block/partitions/cmdline.c sector_t disk_size; sector_t 174 block/partitions/efi.c static int is_pmbr_valid(legacy_mbr *mbr, sector_t total_sectors) sector_t 242 block/partitions/efi.c sector_t n = lba * (bdev_logical_block_size(bdev) / 512); sector_t 590 block/partitions/efi.c sector_t total_sectors = i_size_read(state->bdev->bd_inode) >> 9; sector_t 31 block/partitions/ibm.c static sector_t cchh2blk(struct vtoc_cchh *ptr, struct hd_geometry *geo) sector_t 33 block/partitions/ibm.c sector_t cyl; sector_t 49 block/partitions/ibm.c static sector_t cchhb2blk(struct vtoc_cchhb *ptr, struct hd_geometry *geo) sector_t 51 block/partitions/ibm.c sector_t cyl; sector_t 68 block/partitions/ibm.c sector_t *labelsect, sector_t 75 block/partitions/ibm.c sector_t testsect[3]; sector_t 139 block/partitions/ibm.c sector_t blk; sector_t 200 block/partitions/ibm.c sector_t labelsect, sector_t 247 block/partitions/ibm.c sector_t labelsect) sector_t 299 block/partitions/ibm.c sector_t labelsect; sector_t 38 block/partitions/msdos.c static inline sector_t nr_sects(struct partition *p) sector_t 40 block/partitions/msdos.c return (sector_t)get_unaligned_le32(&p->nr_sects); sector_t 43 block/partitions/msdos.c static inline sector_t start_sect(struct partition *p) sector_t 45 block/partitions/msdos.c return (sector_t)get_unaligned_le32(&p->start_sect); sector_t 122 block/partitions/msdos.c sector_t first_sector, sector_t first_size, sector_t 128 block/partitions/msdos.c sector_t this_sector, this_size; sector_t 129 block/partitions/msdos.c sector_t sector_size = bdev_logical_block_size(state->bdev) / 512; sector_t 164 block/partitions/msdos.c sector_t offs, size, next; sector_t 217 block/partitions/msdos.c sector_t offset, sector_t size, int origin) sector_t 274 block/partitions/msdos.c sector_t offset, sector_t size, int origin, char *flavour, sector_t 296 block/partitions/msdos.c sector_t bsd_start, bsd_size; sector_t 328 block/partitions/msdos.c sector_t offset, sector_t size, int origin) sector_t 336 block/partitions/msdos.c sector_t offset, sector_t size, int origin) sector_t 344 block/partitions/msdos.c sector_t offset, sector_t size, int origin) sector_t 357 block/partitions/msdos.c sector_t offset, sector_t size, int origin) sector_t 401 block/partitions/msdos.c sector_t offset, sector_t size, int origin) sector_t 440 block/partitions/msdos.c void (*parse)(struct parsed_partitions *, sector_t, sector_t, int); sector_t 454 block/partitions/msdos.c sector_t sector_size = bdev_logical_block_size(state->bdev) / 512; sector_t 534 block/partitions/msdos.c sector_t start = start_sect(p)*sector_size; sector_t 535 block/partitions/msdos.c sector_t size = nr_sects(p)*sector_size; sector_t 546 block/partitions/msdos.c sector_t n = 2; sector_t 492 drivers/ata/libata-scsi.c sector_t capacity, int geom[]) sector_t 176 drivers/block/aoe/aoe.h sector_t ssize; sector_t 146 drivers/block/aoe/aoecmd.c put_lba(struct aoe_atahdr *ah, sector_t lba) sector_t 56 drivers/block/brd.c static struct page *brd_lookup_page(struct brd_device *brd, sector_t sector) sector_t 87 drivers/block/brd.c static struct page *brd_insert_page(struct brd_device *brd, sector_t sector) sector_t 173 drivers/block/brd.c static int copy_to_brd_setup(struct brd_device *brd, sector_t sector, size_t n) sector_t 193 drivers/block/brd.c sector_t sector, size_t n) sector_t 225 drivers/block/brd.c sector_t sector, size_t n) sector_t 260 drivers/block/brd.c sector_t sector) sector_t 289 drivers/block/brd.c sector_t sector; sector_t 314 drivers/block/brd.c static int brd_rw_page(struct block_device *bdev, sector_t sector, sector_t 98 drivers/block/cryptoloop.c int size, sector_t IV) sector_t 127 drivers/block/drbd/drbd_actlog.c sector_t sector, int op) sector_t 178 drivers/block/drbd/drbd_actlog.c sector_t sector, int op) sector_t 298 drivers/block/drbd/drbd_actlog.c static sector_t al_tr_number_to_on_disk_sector(struct drbd_device *device) sector_t 319 drivers/block/drbd/drbd_actlog.c sector_t sector; sector_t 846 drivers/block/drbd/drbd_actlog.c int __drbd_change_sync(struct drbd_device *device, sector_t sector, int size, sector_t 852 drivers/block/drbd/drbd_actlog.c sector_t esector, nr_sectors; sector_t 960 drivers/block/drbd/drbd_actlog.c int drbd_rs_begin_io(struct drbd_device *device, sector_t sector) sector_t 1012 drivers/block/drbd/drbd_actlog.c int drbd_try_rs_begin_io(struct drbd_device *device, sector_t sector) sector_t 1134 drivers/block/drbd/drbd_actlog.c void drbd_rs_complete_io(struct drbd_device *device, sector_t sector) sector_t 100 drivers/block/drbd/drbd_bitmap.c sector_t bm_dev_capacity; sector_t 451 drivers/block/drbd/drbd_bitmap.c sector_t drbd_bm_capacity(struct drbd_device *device) sector_t 632 drivers/block/drbd/drbd_bitmap.c int drbd_bm_resize(struct drbd_device *device, sector_t capacity, int set_new_bits) sector_t 988 drivers/block/drbd/drbd_bitmap.c sector_t on_disk_sector = sector_t 990 drivers/block/drbd/drbd_bitmap.c on_disk_sector += ((sector_t)page_nr) << (PAGE_SHIFT-9); sector_t 598 drivers/block/drbd/drbd_int.h sector_t known_size; /* last known size of that backing device */ sector_t 842 drivers/block/drbd/drbd_int.h sector_t p_size; /* partner's disk size */ sector_t 907 drivers/block/drbd/drbd_int.h sector_t ov_start_sector; sector_t 908 drivers/block/drbd/drbd_int.h sector_t ov_stop_sector; sector_t 910 drivers/block/drbd/drbd_int.h sector_t ov_position; sector_t 912 drivers/block/drbd/drbd_int.h sector_t ov_last_oos_start; sector_t 914 drivers/block/drbd/drbd_int.h sector_t ov_last_oos_size; sector_t 1105 drivers/block/drbd/drbd_int.h sector_t sector, int blksize, u64 block_id); sector_t 1111 drivers/block/drbd/drbd_int.h sector_t sector, int size, u64 block_id); sector_t 1112 drivers/block/drbd/drbd_int.h extern int drbd_send_drequest_csum(struct drbd_peer_device *, sector_t sector, sector_t 1115 drivers/block/drbd/drbd_int.h extern int drbd_send_ov_request(struct drbd_peer_device *, sector_t sector, int size); sector_t 1262 drivers/block/drbd/drbd_int.h #define BM_BIT_TO_SECT(x) ((sector_t)(x)<<(BM_BLOCK_SHIFT-9)) sector_t 1274 drivers/block/drbd/drbd_int.h #define BM_EXT_TO_SECT(x) ((sector_t)(x) << (BM_EXT_SHIFT-9)) sector_t 1343 drivers/block/drbd/drbd_int.h extern int drbd_bm_resize(struct drbd_device *device, sector_t sectors, int set_new_bits); sector_t 1370 drivers/block/drbd/drbd_int.h extern sector_t drbd_bm_capacity(struct drbd_device *device); sector_t 1456 drivers/block/drbd/drbd_int.h extern int is_valid_ar_handle(struct drbd_request *, sector_t); sector_t 1466 drivers/block/drbd/drbd_int.h extern sector_t drbd_new_dev_size(struct drbd_device *, struct drbd_backing_dev *, sector_t, int); sector_t 1505 drivers/block/drbd/drbd_int.h struct drbd_backing_dev *bdev, sector_t sector, int op); sector_t 1506 drivers/block/drbd/drbd_int.h extern void drbd_ov_out_of_sync_found(struct drbd_device *, sector_t, int); sector_t 1548 drivers/block/drbd/drbd_int.h sector_t start, unsigned int nr_sectors, int flags); sector_t 1554 drivers/block/drbd/drbd_int.h extern bool drbd_rs_should_slow_down(struct drbd_device *device, sector_t sector, sector_t 1561 drivers/block/drbd/drbd_int.h sector_t, unsigned int, sector_t 1602 drivers/block/drbd/drbd_int.h void drbd_set_my_capacity(struct drbd_device *device, sector_t size); sector_t 1638 drivers/block/drbd/drbd_int.h extern void drbd_rs_complete_io(struct drbd_device *device, sector_t sector); sector_t 1639 drivers/block/drbd/drbd_int.h extern int drbd_rs_begin_io(struct drbd_device *device, sector_t sector); sector_t 1640 drivers/block/drbd/drbd_int.h extern int drbd_try_rs_begin_io(struct drbd_device *device, sector_t sector); sector_t 1644 drivers/block/drbd/drbd_int.h sector_t sector, int size); sector_t 1648 drivers/block/drbd/drbd_int.h extern int __drbd_change_sync(struct drbd_device *device, sector_t sector, int size, sector_t 1829 drivers/block/drbd/drbd_int.h static inline sector_t drbd_md_first_sector(struct drbd_backing_dev *bdev) sector_t 1845 drivers/block/drbd/drbd_int.h static inline sector_t drbd_md_last_sector(struct drbd_backing_dev *bdev) sector_t 1858 drivers/block/drbd/drbd_int.h static inline sector_t drbd_get_capacity(struct block_device *bdev) sector_t 1872 drivers/block/drbd/drbd_int.h static inline sector_t drbd_get_max_capacity(struct drbd_backing_dev *bdev) sector_t 1874 drivers/block/drbd/drbd_int.h sector_t s; sector_t 1880 drivers/block/drbd/drbd_int.h ? min_t(sector_t, DRBD_MAX_SECTORS_FLEX, sector_t 1885 drivers/block/drbd/drbd_int.h s = min_t(sector_t, DRBD_MAX_SECTORS_FLEX, sector_t 1888 drivers/block/drbd/drbd_int.h s = min_t(sector_t, s, sector_t 1893 drivers/block/drbd/drbd_int.h s = min_t(sector_t, DRBD_MAX_SECTORS, sector_t 1903 drivers/block/drbd/drbd_int.h static inline sector_t drbd_md_ss(struct drbd_backing_dev *bdev) sector_t 10 drivers/block/drbd/drbd_interval.c sector_t interval_end(struct rb_node *node) sector_t 19 drivers/block/drbd/drbd_interval.c struct drbd_interval, rb, sector_t, end, NODE_END); sector_t 28 drivers/block/drbd/drbd_interval.c sector_t this_end = this->sector + (this->size >> 9); sector_t 68 drivers/block/drbd/drbd_interval.c drbd_contains_interval(struct rb_root *root, sector_t sector, sector_t 112 drivers/block/drbd/drbd_interval.c drbd_find_overlap(struct rb_root *root, sector_t sector, unsigned int size) sector_t 116 drivers/block/drbd/drbd_interval.c sector_t end = sector + (size >> 9); sector_t 142 drivers/block/drbd/drbd_interval.c drbd_next_overlap(struct drbd_interval *i, sector_t sector, unsigned int size) sector_t 144 drivers/block/drbd/drbd_interval.c sector_t end = sector + (size >> 9); sector_t 10 drivers/block/drbd/drbd_interval.h sector_t sector; /* start sector of the interval */ sector_t 12 drivers/block/drbd/drbd_interval.h sector_t end; /* highest interval end in subtree */ sector_t 30 drivers/block/drbd/drbd_interval.h extern bool drbd_contains_interval(struct rb_root *, sector_t, sector_t 33 drivers/block/drbd/drbd_interval.h extern struct drbd_interval *drbd_find_overlap(struct rb_root *, sector_t, sector_t 35 drivers/block/drbd/drbd_interval.h extern struct drbd_interval *drbd_next_overlap(struct drbd_interval *, sector_t, sector_t 945 drivers/block/drbd/drbd_main.c sector_t d_size, u_size; sector_t 1398 drivers/block/drbd/drbd_main.c sector_t sector, int blksize, u64 block_id) sector_t 1423 drivers/block/drbd/drbd_main.c sector_t sector, int size, u64 block_id) sector_t 1438 drivers/block/drbd/drbd_main.c int drbd_send_drequest_csum(struct drbd_peer_device *peer_device, sector_t sector, int size, sector_t 1456 drivers/block/drbd/drbd_main.c int drbd_send_ov_request(struct drbd_peer_device *peer_device, sector_t sector, int size) sector_t 2031 drivers/block/drbd/drbd_main.c static void _drbd_set_my_capacity(struct drbd_device *device, sector_t size) sector_t 2038 drivers/block/drbd/drbd_main.c void drbd_set_my_capacity(struct drbd_device *device, sector_t size) sector_t 3102 drivers/block/drbd/drbd_main.c sector_t sector; sector_t 3219 drivers/block/drbd/drbd_main.c sector_t capacity = drbd_get_capacity(bdev->md_bdev); sector_t 829 drivers/block/drbd/drbd_nl.c sector_t md_size_sect = 0; sector_t 940 drivers/block/drbd/drbd_nl.c sector_t u_size, size; sector_t 1091 drivers/block/drbd/drbd_nl.c sector_t sector_t 1093 drivers/block/drbd/drbd_nl.c sector_t u_size, int assume_peer_has_space) sector_t 1095 drivers/block/drbd/drbd_nl.c sector_t p_size = device->p_size; /* partner's disk size. */ sector_t 1096 drivers/block/drbd/drbd_nl.c sector_t la_size_sect = bdev->md.la_size_sect; /* last agreed size. */ sector_t 1097 drivers/block/drbd/drbd_nl.c sector_t m_size; /* my size */ sector_t 1098 drivers/block/drbd/drbd_nl.c sector_t size = 0; sector_t 1108 drivers/block/drbd/drbd_nl.c size = min_t(sector_t, p_size, m_size); sector_t 1799 drivers/block/drbd/drbd_nl.c sector_t max_possible_sectors; sector_t 1800 drivers/block/drbd/drbd_nl.c sector_t min_md_device_sectors; sector_t 2847 drivers/block/drbd/drbd_nl.c sector_t u_size; sector_t 2894 drivers/block/drbd/drbd_nl.c if (u_size != (sector_t)rs.resize_size) { sector_t 2931 drivers/block/drbd/drbd_nl.c new_disk_conf->disk_size = (sector_t)rs.resize_size; sector_t 358 drivers/block/drbd/drbd_receiver.c drbd_alloc_peer_req(struct drbd_peer_device *peer_device, u64 id, sector_t sector, sector_t 1511 drivers/block/drbd/drbd_receiver.c int drbd_issue_discard_or_zero_out(struct drbd_device *device, sector_t start, unsigned int nr_sectors, int flags) sector_t 1515 drivers/block/drbd/drbd_receiver.c sector_t tmp, nr; sector_t 1611 drivers/block/drbd/drbd_receiver.c sector_t s = peer_req->i.sector; sector_t 1612 drivers/block/drbd/drbd_receiver.c sector_t nr = peer_req->i.size >> 9; sector_t 1644 drivers/block/drbd/drbd_receiver.c sector_t sector = peer_req->i.sector; sector_t 1858 drivers/block/drbd/drbd_receiver.c read_in_block(struct drbd_peer_device *peer_device, u64 id, sector_t sector, sector_t 1862 drivers/block/drbd/drbd_receiver.c const sector_t capacity = drbd_get_capacity(device->this_bdev); sector_t 2009 drivers/block/drbd/drbd_receiver.c sector_t sector, int data_size) sector_t 2066 drivers/block/drbd/drbd_receiver.c sector_t sector = peer_req->i.sector; sector_t 2085 drivers/block/drbd/drbd_receiver.c static int recv_resync_read(struct drbd_peer_device *peer_device, sector_t sector, sector_t 2127 drivers/block/drbd/drbd_receiver.c sector_t sector, bool missing_ok, const char *func) sector_t 2147 drivers/block/drbd/drbd_receiver.c sector_t sector; sector_t 2181 drivers/block/drbd/drbd_receiver.c sector_t sector; sector_t 2213 drivers/block/drbd/drbd_receiver.c sector_t sector, int size) sector_t 2240 drivers/block/drbd/drbd_receiver.c sector_t sector = peer_req->i.sector; sector_t 2336 drivers/block/drbd/drbd_receiver.c static inline int overlaps(sector_t s1, int l1, sector_t s2, int l2) sector_t 2451 drivers/block/drbd/drbd_receiver.c static void fail_postponed_requests(struct drbd_device *device, sector_t sector, sector_t 2481 drivers/block/drbd/drbd_receiver.c sector_t sector = peer_req->i.sector; sector_t 2590 drivers/block/drbd/drbd_receiver.c sector_t sector; sector_t 2766 drivers/block/drbd/drbd_receiver.c bool drbd_rs_should_slow_down(struct drbd_device *device, sector_t sector, sector_t 2839 drivers/block/drbd/drbd_receiver.c sector_t sector; sector_t 2840 drivers/block/drbd/drbd_receiver.c sector_t capacity; sector_t 2962 drivers/block/drbd/drbd_receiver.c if (device->ov_start_sector == ~(sector_t)0 && sector_t 4092 drivers/block/drbd/drbd_receiver.c const char *s, sector_t a, sector_t b) sector_t 4094 drivers/block/drbd/drbd_receiver.c sector_t d; sector_t 4110 drivers/block/drbd/drbd_receiver.c sector_t p_size, p_usize, p_csize, my_usize; sector_t 4111 drivers/block/drbd/drbd_receiver.c sector_t new_size, cur_size; sector_t 4216 drivers/block/drbd/drbd_receiver.c sector_t new_size = p_csize ?: p_usize ?: p_size; sector_t 4997 drivers/block/drbd/drbd_receiver.c sector_t sector; sector_t 5690 drivers/block/drbd/drbd_receiver.c sector_t sector = be64_to_cpu(p->sector); sector_t 5716 drivers/block/drbd/drbd_receiver.c validate_req_change_req_state(struct drbd_device *device, u64 id, sector_t sector, sector_t 5742 drivers/block/drbd/drbd_receiver.c sector_t sector = be64_to_cpu(p->sector); sector_t 5788 drivers/block/drbd/drbd_receiver.c sector_t sector = be64_to_cpu(p->sector); sector_t 5824 drivers/block/drbd/drbd_receiver.c sector_t sector = be64_to_cpu(p->sector); sector_t 5845 drivers/block/drbd/drbd_receiver.c sector_t sector; sector_t 5907 drivers/block/drbd/drbd_receiver.c sector_t sector; sector_t 22 drivers/block/drbd/drbd_req.c static bool drbd_may_do_local_read(struct drbd_device *device, sector_t sector, int size); sector_t 899 drivers/block/drbd/drbd_req.c static bool drbd_may_do_local_read(struct drbd_device *device, sector_t sector, int size) sector_t 902 drivers/block/drbd/drbd_req.c sector_t esector, nr_sectors; sector_t 919 drivers/block/drbd/drbd_req.c static bool remote_due_to_read_balancing(struct drbd_device *device, sector_t sector, sector_t 964 drivers/block/drbd/drbd_req.c sector_t sector = req->i.sector; sector_t 1237 drivers/block/drbd/drbd_state.c device->ov_start_sector = ~(sector_t)0; sector_t 360 drivers/block/drbd/drbd_worker.c sector_t sector = peer_req->i.sector; sector_t 391 drivers/block/drbd/drbd_worker.c static int read_for_csum(struct drbd_peer_device *peer_device, sector_t sector, int size) sector_t 592 drivers/block/drbd/drbd_worker.c sector_t sector; sector_t 593 drivers/block/drbd/drbd_worker.c const sector_t capacity = drbd_get_capacity(device->this_bdev); sector_t 770 drivers/block/drbd/drbd_worker.c sector_t sector; sector_t 771 drivers/block/drbd/drbd_worker.c const sector_t capacity = drbd_get_capacity(device->this_bdev); sector_t 1236 drivers/block/drbd/drbd_worker.c sector_t sector = peer_req->i.sector; sector_t 1277 drivers/block/drbd/drbd_worker.c void drbd_ov_out_of_sync_found(struct drbd_device *device, sector_t sector, int size) sector_t 1295 drivers/block/drbd/drbd_worker.c sector_t sector = peer_req->i.sector; sector_t 503 drivers/block/floppy.c static sector_t floppy_sizes[256]; sector_t 95 drivers/block/loop.c int size, sector_t real_block) sector_t 233 drivers/block/loop.c sector_t x = (sector_t)size; sector_t 253 drivers/block/loop.c int size, sector_t rblock) sector_t 1004 drivers/block/loop.c if ((loff_t)(sector_t)size != size) sector_t 38 drivers/block/loop.h int size, sector_t real_block); sector_t 83 drivers/block/loop.h int size, sector_t real_block); sector_t 1381 drivers/block/mtip32xx/mtip32xx.c static bool mtip_hw_get_capacity(struct driver_data *dd, sector_t *sectors) sector_t 1404 drivers/block/mtip32xx/mtip32xx.c sector_t sectors; sector_t 3325 drivers/block/mtip32xx/mtip32xx.c sector_t capacity; sector_t 3579 drivers/block/mtip32xx/mtip32xx.c sector_t capacity; sector_t 49 drivers/block/null_blk.h sector_t zone_size_sects; sector_t 94 drivers/block/null_blk.h int null_zone_report(struct gendisk *disk, sector_t sector, sector_t 97 drivers/block/null_blk.h enum req_opf op, sector_t sector, sector_t 98 drivers/block/null_blk.h sector_t nr_sectors); sector_t 106 drivers/block/null_blk.h static inline int null_zone_report(struct gendisk *disk, sector_t sector, sector_t 113 drivers/block/null_blk.h enum req_opf op, sector_t sector, sector_t 114 drivers/block/null_blk.h sector_t nr_sectors) sector_t 689 drivers/block/null_blk_main.c static void null_free_sector(struct nullb *nullb, sector_t sector, sector_t 762 drivers/block/null_blk_main.c sector_t sector, bool for_write, bool is_cache) sector_t 783 drivers/block/null_blk_main.c sector_t sector, bool for_write, bool ignore_cache) sector_t 795 drivers/block/null_blk_main.c sector_t sector, bool ignore_cache) sector_t 932 drivers/block/null_blk_main.c unsigned int off, sector_t sector, size_t n, bool is_fua) sector_t 969 drivers/block/null_blk_main.c unsigned int off, sector_t sector, size_t n) sector_t 1000 drivers/block/null_blk_main.c static void null_handle_discard(struct nullb *nullb, sector_t sector, size_t n) sector_t 1037 drivers/block/null_blk_main.c unsigned int len, unsigned int off, bool is_write, sector_t sector, sector_t 1059 drivers/block/null_blk_main.c sector_t sector; sector_t 1093 drivers/block/null_blk_main.c sector_t sector; sector_t 1159 drivers/block/null_blk_main.c sector_t sector, sector_t 1160 drivers/block/null_blk_main.c sector_t nr_sectors) sector_t 1163 drivers/block/null_blk_main.c sector_t first_bad; sector_t 1212 drivers/block/null_blk_main.c static blk_status_t null_handle_cmd(struct nullb_cmd *cmd, sector_t sector, sector_t 1213 drivers/block/null_blk_main.c sector_t nr_sectors, enum req_opf op) sector_t 1286 drivers/block/null_blk_main.c sector_t sector = bio->bi_iter.bi_sector; sector_t 1287 drivers/block/null_blk_main.c sector_t nr_sectors = bio_sectors(bio); sector_t 1329 drivers/block/null_blk_main.c sector_t nr_sectors = blk_rq_sectors(bd->rq); sector_t 1330 drivers/block/null_blk_main.c sector_t sector = blk_rq_pos(bd->rq); sector_t 1531 drivers/block/null_blk_main.c sector_t size; sector_t 1536 drivers/block/null_blk_main.c size = (sector_t)nullb->dev->size * 1024 * 1024ULL; sector_t 8 drivers/block/null_blk_zoned.c static inline unsigned int null_zone_no(struct nullb_device *dev, sector_t sect) sector_t 15 drivers/block/null_blk_zoned.c sector_t dev_size = (sector_t)dev->size * 1024 * 1024; sector_t 16 drivers/block/null_blk_zoned.c sector_t sector = 0; sector_t 73 drivers/block/null_blk_zoned.c int null_zone_report(struct gendisk *disk, sector_t sector, sector_t 91 drivers/block/null_blk_zoned.c static blk_status_t null_zone_write(struct nullb_cmd *cmd, sector_t sector, sector_t 125 drivers/block/null_blk_zoned.c static blk_status_t null_zone_reset(struct nullb_cmd *cmd, sector_t sector) sector_t 155 drivers/block/null_blk_zoned.c sector_t sector, sector_t nr_sectors) sector_t 356 drivers/block/paride/pf.c sector_t capacity = get_capacity(pf->disk); sector_t 111 drivers/block/pktcdvd.c static sector_t get_zone(sector_t sector, struct pktcdvd_device *pd) sector_t 113 drivers/block/pktcdvd.c return (sector + pd->offset) & ~(sector_t)(pd->settings.size - 1); sector_t 642 drivers/block/pktcdvd.c static struct pkt_rb_node *pkt_rbtree_find(struct pktcdvd_device *pd, sector_t s) sector_t 680 drivers/block/pktcdvd.c sector_t s = node->bio->bi_iter.bi_sector; sector_t 1104 drivers/block/pktcdvd.c sector_t new_sector; sector_t 1166 drivers/block/pktcdvd.c sector_t zone = 0; /* Suppress gcc warning */ sector_t 2350 drivers/block/pktcdvd.c sector_t zone; sector_t 2469 drivers/block/pktcdvd.c sector_t zone = get_zone(bio->bi_iter.bi_sector, pd); sector_t 2470 drivers/block/pktcdvd.c sector_t last_zone = get_zone(bio_end_sector(bio) - 1, pd); sector_t 5053 drivers/block/rbd.c sector_t size; sector_t 5062 drivers/block/rbd.c size = (sector_t)rbd_dev->mapping.size / SECTOR_SIZE; sector_t 126 drivers/block/sunvdc.c sector_t nsect = get_capacity(disk); sector_t 127 drivers/block/sunvdc.c sector_t cylinders = nsect; sector_t 133 drivers/block/sunvdc.c if ((sector_t)(geo->cylinders + 1) * geo->heads * geo->sectors < nsect) sector_t 529 drivers/block/virtio_blk.c if ((sector_t)capacity != capacity) { sector_t 532 drivers/block/virtio_blk.c capacity = (sector_t)-1; sector_t 227 drivers/block/xen-blkback/common.h sector_t size; sector_t 480 drivers/block/xen-blkfront.c sector_t nsect = get_capacity(bd->bd_disk); sector_t 481 drivers/block/xen-blkfront.c sector_t cylinders = nsect; sector_t 487 drivers/block/xen-blkfront.c if ((sector_t)(hg->cylinders + 1) * hg->heads * hg->sectors < nsect) sector_t 160 drivers/block/zram/zram_drv.c sector_t start, unsigned int size) sector_t 1625 drivers/block/zram/zram_drv.c static int zram_rw_page(struct block_device *bdev, sector_t sector, sector_t 46 drivers/dax/super.c int bdev_dax_pgoff(struct block_device *bdev, sector_t sector, size_t size, sector_t 70 drivers/dax/super.c struct block_device *bdev, int blocksize, sector_t start, sector_t 71 drivers/dax/super.c sector_t sectors) sector_t 78 drivers/dax/super.c sector_t last_page; sector_t 319 drivers/dax/super.c int blocksize, sector_t start, sector_t len) sector_t 876 drivers/ide/ide-cd.c sector_t block) sector_t 1422 drivers/ide/ide-cd.c static sector_t ide_cdrom_capacity(ide_drive_t *drive) sector_t 83 drivers/ide/ide-disk.c sector_t block) sector_t 183 drivers/ide/ide-disk.c sector_t block) sector_t 230 drivers/ide/ide-floppy.c struct request *rq, sector_t block) sector_t 60 drivers/ide/ide-gd.c sector_t ide_gd_capacity(ide_drive_t *drive) sector_t 161 drivers/ide/ide-gd.c struct request *rq, sector_t sector) sector_t 41 drivers/ide/ide-gd.h sector_t ide_gd_capacity(ide_drive_t *); sector_t 567 drivers/ide/ide-tape.c struct request *rq, sector_t block) sector_t 407 drivers/infiniband/ulp/iser/iscsi_iser.c iscsi_iser_check_protection(struct iscsi_task *task, sector_t *sector) sector_t 637 drivers/infiniband/ulp/iser/iscsi_iser.h enum iser_data_dir cmd_dir, sector_t *sector); sector_t 1071 drivers/infiniband/ulp/iser/iser_verbs.c enum iser_data_dir cmd_dir, sector_t *sector) sector_t 1091 drivers/infiniband/ulp/iser/iser_verbs.c sector_t sector_off = mr_status.sig_err.sig_err_offset; sector_t 1002 drivers/lightnvm/core.c static int nvm_get_bb_meta(struct nvm_dev *dev, sector_t slba, sector_t 1064 drivers/lightnvm/core.c return nvm_get_bb_meta(dev, (sector_t)ppa.ppa, nchks, meta); sector_t 1066 drivers/lightnvm/core.c return dev->ops->get_chk_meta(dev, (sector_t)ppa.ppa, nchks, meta); sector_t 26 drivers/lightnvm/pblk-cache.c sector_t lba = pblk_get_lba(bio); sector_t 221 drivers/lightnvm/pblk-core.c static void pblk_invalidate_range(struct pblk *pblk, sector_t slba, sector_t 224 drivers/lightnvm/pblk-core.c sector_t lba; sector_t 467 drivers/lightnvm/pblk-core.c sector_t slba = pblk_get_lba(bio); sector_t 468 drivers/lightnvm/pblk-core.c sector_t nr_secs = pblk_get_secs(bio); sector_t 1947 drivers/lightnvm/pblk-core.c void pblk_update_map(struct pblk *pblk, sector_t lba, struct ppa_addr ppa) sector_t 1967 drivers/lightnvm/pblk-core.c void pblk_update_map_cache(struct pblk *pblk, sector_t lba, struct ppa_addr ppa) sector_t 1979 drivers/lightnvm/pblk-core.c int pblk_update_map_gc(struct pblk *pblk, sector_t lba, struct ppa_addr ppa_new, sector_t 2017 drivers/lightnvm/pblk-core.c void pblk_update_map_dev(struct pblk *pblk, sector_t lba, sector_t 2065 drivers/lightnvm/pblk-core.c sector_t blba, int nr_secs, bool *from_cache) sector_t 142 drivers/lightnvm/pblk-init.c sector_t i; sector_t 654 drivers/lightnvm/pblk-init.c sector_t provisioned; sector_t 1137 drivers/lightnvm/pblk-init.c static sector_t pblk_capacity(void *private) sector_t 644 drivers/lightnvm/pblk-rb.c int pblk_rb_copy_to_bio(struct pblk_rb *rb, struct bio *bio, sector_t lba, sector_t 29 drivers/lightnvm/pblk-read.c sector_t lba, struct ppa_addr ppa) sector_t 41 drivers/lightnvm/pblk-read.c struct bio *bio, sector_t blba, sector_t 56 drivers/lightnvm/pblk-read.c sector_t lba = blba + i; sector_t 110 drivers/lightnvm/pblk-read.c sector_t blba) sector_t 226 drivers/lightnvm/pblk-read.c sector_t lba, bool *from_cache) sector_t 268 drivers/lightnvm/pblk-read.c sector_t blba = pblk_get_lba(bio); sector_t 384 drivers/lightnvm/pblk-read.c struct pblk_line *line, sector_t lba, sector_t 617 drivers/lightnvm/pblk.h sector_t capacity; /* Device capacity when bad blocks are subtracted */ sector_t 741 drivers/lightnvm/pblk.h int pblk_rb_copy_to_bio(struct pblk_rb *rb, struct bio *bio, sector_t lba, sector_t 829 drivers/lightnvm/pblk.h void pblk_update_map(struct pblk *pblk, sector_t lba, struct ppa_addr ppa); sector_t 830 drivers/lightnvm/pblk.h void pblk_update_map_cache(struct pblk *pblk, sector_t lba, sector_t 832 drivers/lightnvm/pblk.h void pblk_update_map_dev(struct pblk *pblk, sector_t lba, sector_t 834 drivers/lightnvm/pblk.h int pblk_update_map_gc(struct pblk *pblk, sector_t lba, struct ppa_addr ppa, sector_t 839 drivers/lightnvm/pblk.h sector_t blba, int nr_secs, bool *from_cache); sector_t 1081 drivers/lightnvm/pblk.h sector_t lba) sector_t 1098 drivers/lightnvm/pblk.h static inline void pblk_trans_map_set(struct pblk *pblk, sector_t lba, sector_t 1313 drivers/lightnvm/pblk.h static inline sector_t pblk_get_lba(struct bio *bio) sector_t 289 drivers/md/bcache/bcache.h sector_t last; sector_t 767 drivers/md/bcache/bcache.h static inline size_t sector_to_bucket(struct cache_set *c, sector_t s) sector_t 772 drivers/md/bcache/bcache.h static inline sector_t bucket_to_sector(struct cache_set *c, size_t b) sector_t 774 drivers/md/bcache/bcache.h return ((sector_t) b) << c->bucket_bits; sector_t 777 drivers/md/bcache/bcache.h static inline sector_t bucket_remainder(struct cache_set *c, sector_t s) sector_t 46 drivers/md/bcache/journal.c sector_t bucket = bucket_to_sector(ca->set, ca->sb.d[bucket_index]); sector_t 907 drivers/md/bcache/request.c reada = min_t(sector_t, dc->readahead >> 9, sector_t 811 drivers/md/bcache/super.c sector_t sectors) sector_t 589 drivers/md/bcache/util.h static inline sector_t bdev_sectors(struct block_device *bdev) sector_t 106 drivers/md/dm-bufio.c sector_t start; sector_t 137 drivers/md/dm-bufio.c sector_t block; sector_t 248 drivers/md/dm-bufio.c static struct dm_buffer *__find(struct dm_bufio_client *c, sector_t block) sector_t 477 drivers/md/dm-bufio.c static void __link_buffer(struct dm_buffer *b, sector_t block, int dirty) sector_t 554 drivers/md/dm-bufio.c static void use_dmio(struct dm_buffer *b, int rw, sector_t sector, sector_t 592 drivers/md/dm-bufio.c static void use_bio(struct dm_buffer *b, int rw, sector_t sector, sector_t 637 drivers/md/dm-bufio.c sector_t sector; sector_t 950 drivers/md/dm-bufio.c static struct dm_buffer *__bufio_new(struct dm_bufio_client *c, sector_t block, sector_t 1039 drivers/md/dm-bufio.c static void *new_read(struct dm_bufio_client *c, sector_t block, sector_t 1078 drivers/md/dm-bufio.c void *dm_bufio_get(struct dm_bufio_client *c, sector_t block, sector_t 1085 drivers/md/dm-bufio.c void *dm_bufio_read(struct dm_bufio_client *c, sector_t block, sector_t 1094 drivers/md/dm-bufio.c void *dm_bufio_new(struct dm_bufio_client *c, sector_t block, sector_t 1104 drivers/md/dm-bufio.c sector_t block, unsigned n_blocks) sector_t 1340 drivers/md/dm-bufio.c void dm_bufio_release_move(struct dm_buffer *b, sector_t new_block) sector_t 1379 drivers/md/dm-bufio.c sector_t old_block; sector_t 1410 drivers/md/dm-bufio.c void dm_bufio_forget(struct dm_bufio_client *c, sector_t block) sector_t 1438 drivers/md/dm-bufio.c sector_t dm_bufio_get_device_size(struct dm_bufio_client *c) sector_t 1440 drivers/md/dm-bufio.c sector_t s = i_size_read(c->bdev->bd_inode) >> SECTOR_SHIFT; sector_t 1449 drivers/md/dm-bufio.c sector_t dm_bufio_get_block_number(struct dm_buffer *b) sector_t 1769 drivers/md/dm-bufio.c void dm_bufio_set_sector_offset(struct dm_bufio_client *c, sector_t start) sector_t 123 drivers/md/dm-cache-metadata.c sector_t discard_block_size; sector_t 126 drivers/md/dm-cache-metadata.c sector_t data_block_size; sector_t 337 drivers/md/dm-cache-metadata.c sector_t bdev_size = i_size_read(cmd->bdev->bd_inode) >> SECTOR_SHIFT; sector_t 743 drivers/md/dm-cache-metadata.c sector_t data_block_size, sector_t 804 drivers/md/dm-cache-metadata.c sector_t data_block_size, sector_t 836 drivers/md/dm-cache-metadata.c static bool same_params(struct dm_cache_metadata *cmd, sector_t data_block_size) sector_t 849 drivers/md/dm-cache-metadata.c sector_t data_block_size, sector_t 1090 drivers/md/dm-cache-metadata.c sector_t discard_block_size, sector_t 60 drivers/md/dm-cache-metadata.h sector_t data_block_size, sector_t 76 drivers/md/dm-cache-metadata.h sector_t discard_block_size, sector_t 79 drivers/md/dm-cache-metadata.h typedef int (*load_discard_fn)(void *context, sector_t discard_block_size, sector_t 143 drivers/md/dm-cache-policy-internal.h sector_t origin_size, sector_t block_size); sector_t 800 drivers/md/dm-cache-policy-smq.c sector_t cache_block_size; sector_t 802 drivers/md/dm-cache-policy-smq.c sector_t hotspot_block_size; sector_t 1294 drivers/md/dm-cache-policy-smq.c sector_t r = from_oblock(b); sector_t 1698 drivers/md/dm-cache-policy-smq.c static bool too_many_hotspot_blocks(sector_t origin_size, sector_t 1699 drivers/md/dm-cache-policy-smq.c sector_t hotspot_block_size, sector_t 1705 drivers/md/dm-cache-policy-smq.c static void calc_hotspot_params(sector_t origin_size, sector_t 1706 drivers/md/dm-cache-policy-smq.c sector_t cache_block_size, sector_t 1708 drivers/md/dm-cache-policy-smq.c sector_t *hotspot_block_size, sector_t 1720 drivers/md/dm-cache-policy-smq.c sector_t origin_size, sector_t 1721 drivers/md/dm-cache-policy-smq.c sector_t cache_block_size, sector_t 1830 drivers/md/dm-cache-policy-smq.c sector_t origin_size, sector_t 1831 drivers/md/dm-cache-policy-smq.c sector_t cache_block_size) sector_t 1837 drivers/md/dm-cache-policy-smq.c sector_t origin_size, sector_t 1838 drivers/md/dm-cache-policy-smq.c sector_t cache_block_size) sector_t 1844 drivers/md/dm-cache-policy-smq.c sector_t origin_size, sector_t 1845 drivers/md/dm-cache-policy-smq.c sector_t cache_block_size) sector_t 113 drivers/md/dm-cache-policy.c sector_t origin_size, sector_t 114 drivers/md/dm-cache-policy.c sector_t cache_block_size) sector_t 177 drivers/md/dm-cache-policy.h sector_t origin_size, sector_t 178 drivers/md/dm-cache-policy.h sector_t block_size); sector_t 48 drivers/md/dm-cache-target.c sector_t in_flight; sector_t 86 drivers/md/dm-cache-target.c static void iot_io_begin(struct io_tracker *iot, sector_t len) sector_t 95 drivers/md/dm-cache-target.c static void __iot_io_end(struct io_tracker *iot, sector_t len) sector_t 105 drivers/md/dm-cache-target.c static void iot_io_end(struct io_tracker *iot, sector_t len) sector_t 381 drivers/md/dm-cache-target.c sector_t sectors_per_block; sector_t 404 drivers/md/dm-cache-target.c sector_t origin_sectors; sector_t 417 drivers/md/dm-cache-target.c sector_t migration_threshold; sector_t 494 drivers/md/dm-cache-target.c sector_t len; sector_t 814 drivers/md/dm-cache-target.c sector_t bi_sector = bio->bi_iter.bi_sector; sector_t 815 drivers/md/dm-cache-target.c sector_t block = from_cblock(cblock); sector_t 873 drivers/md/dm-cache-target.c sector_t block_nr = bio->bi_iter.bi_sector; sector_t 1104 drivers/md/dm-cache-target.c sector_t sb = bio->bi_iter.bi_sector; sector_t 1105 drivers/md/dm-cache-target.c sector_t se = bio_end_sector(bio); sector_t 1672 drivers/md/dm-cache-target.c sector_t current_volume = (atomic_read(&cache->nr_io_migrations) + 1) * sector_t 2038 drivers/md/dm-cache-target.c static sector_t get_dev_size(struct dm_dev *dev) sector_t 2080 drivers/md/dm-cache-target.c sector_t cache_sectors; sector_t 2083 drivers/md/dm-cache-target.c sector_t origin_sectors; sector_t 2122 drivers/md/dm-cache-target.c sector_t metadata_dev_size; sector_t 2414 drivers/md/dm-cache-target.c static bool too_many_discard_blocks(sector_t discard_block_size, sector_t 2415 drivers/md/dm-cache-target.c sector_t origin_size) sector_t 2422 drivers/md/dm-cache-target.c static sector_t calculate_discard_block_size(sector_t cache_block_size, sector_t 2423 drivers/md/dm-cache-target.c sector_t origin_size) sector_t 2425 drivers/md/dm-cache-target.c sector_t discard_block_size = cache_block_size; sector_t 2929 drivers/md/dm-cache-target.c sector_t b, e; sector_t 2957 drivers/md/dm-cache-target.c static int load_discard(void *context, sector_t discard_block_size, sector_t 2989 drivers/md/dm-cache-target.c sector_t size = get_dev_size(cache->cache_dev); sector_t 3476 drivers/md/dm-cache-target.c limits->max_discard_sectors = min_t(sector_t, cache->discard_block_size * 1024, sector_t 120 drivers/md/dm-clone-metadata.c sector_t target_size; sector_t 121 drivers/md/dm-clone-metadata.c sector_t region_size; sector_t 564 drivers/md/dm-clone-metadata.c sector_t target_size, sector_t 565 drivers/md/dm-clone-metadata.c sector_t region_size) sector_t 68 drivers/md/dm-clone-metadata.h sector_t target_size, sector_t 69 drivers/md/dm-clone-metadata.h sector_t region_size); sector_t 78 drivers/md/dm-clone-target.c sector_t region_size; sector_t 283 drivers/md/dm-clone-target.c static inline sector_t region_to_sector(struct clone *clone, unsigned long region_nr) sector_t 285 drivers/md/dm-clone-target.c return ((sector_t)region_nr << clone->region_shift); sector_t 456 drivers/md/dm-clone-target.c static void trim_bio(struct bio *bio, sector_t sector, unsigned int len) sector_t 797 drivers/md/dm-clone-target.c sector_t tail_size, region_size, total_size; sector_t 1529 drivers/md/dm-clone-target.c static sector_t get_dev_size(struct dm_dev *dev) sector_t 1698 drivers/md/dm-clone-target.c sector_t metadata_dev_size; sector_t 1719 drivers/md/dm-clone-target.c sector_t dest_dev_size; sector_t 1741 drivers/md/dm-clone-target.c sector_t source_dev_size; sector_t 1792 drivers/md/dm-clone-target.c sector_t nr_regions; sector_t 75 drivers/md/dm-crypt.c sector_t sector; sector_t 135 drivers/md/dm-crypt.c sector_t start; sector_t 895 drivers/md/dm-crypt.c sector_t sector) sector_t 1358 drivers/md/dm-crypt.c struct bio *bio, sector_t sector) sector_t 1568 drivers/md/dm-crypt.c sector_t sector; sector_t 1611 drivers/md/dm-crypt.c sector_t sector = io->sector; sector_t 2683 drivers/md/dm-crypt.c if (sscanf(argv[4], "%llu%c", &tmpll, &dummy) != 1 || tmpll != (sector_t)tmpll) { sector_t 22 drivers/md/dm-delay.c sector_t start; sector_t 145 drivers/md/dm-delay.c if (sscanf(argv[1], "%llu%c", &tmpll, &dummy) != 1 || tmpll != (sector_t)tmpll) { sector_t 19 drivers/md/dm-dust.c sector_t bb; sector_t 30 drivers/md/dm-dust.c sector_t start; sector_t 35 drivers/md/dm-dust.c static struct badblock *dust_rb_search(struct rb_root *root, sector_t blk) sector_t 57 drivers/md/dm-dust.c sector_t value = new->bb; sector_t 152 drivers/md/dm-dust.c static int __dust_map_read(struct dust_device *dd, sector_t thisblock) sector_t 162 drivers/md/dm-dust.c static int dust_map_read(struct dust_device *dd, sector_t thisblock, sector_t 178 drivers/md/dm-dust.c static void __dust_map_write(struct dust_device *dd, sector_t thisblock) sector_t 194 drivers/md/dm-dust.c static int dust_map_write(struct dust_device *dd, sector_t thisblock, sector_t 286 drivers/md/dm-dust.c sector_t DUST_MAX_BLKSZ_SECTORS = 2097152; sector_t 287 drivers/md/dm-dust.c sector_t max_block_sectors = min(ti->len, DUST_MAX_BLKSZ_SECTORS); sector_t 316 drivers/md/dm-dust.c if (sscanf(argv[1], "%llu%c", &tmp, &dummy) != 1 || tmp != (sector_t)tmp) { sector_t 376 drivers/md/dm-dust.c sector_t size = i_size_read(dd->dev->bdev->bd_inode) >> SECTOR_SHIFT; sector_t 778 drivers/md/dm-era-target.c sector_t block_size, sector_t 1184 drivers/md/dm-era-target.c sector_t block_nr = bio->bi_iter.bi_sector; sector_t 1662 drivers/md/dm-era-target.c static sector_t get_dev_size(struct dm_dev *dev) sector_t 21 drivers/md/dm-exception-store.h typedef sector_t chunk_t; sector_t 107 drivers/md/dm-exception-store.h sector_t *total_sectors, sector_t *sectors_allocated, sector_t 108 drivers/md/dm-exception-store.h sector_t *metadata_sectors); sector_t 169 drivers/md/dm-exception-store.h static inline sector_t get_dev_size(struct block_device *bdev) sector_t 175 drivers/md/dm-exception-store.h sector_t sector) sector_t 28 drivers/md/dm-flakey.c sector_t start; sector_t 216 drivers/md/dm-flakey.c if (sscanf(dm_shift_arg(&as), "%llu%c", &tmpll, &dummy) != 1 || tmpll != (sector_t)tmpll) { sector_t 271 drivers/md/dm-flakey.c static sector_t flakey_map_sector(struct dm_target *ti, sector_t bi_sector) sector_t 463 drivers/md/dm-flakey.c static int flakey_report_zones(struct dm_target *ti, sector_t sector, sector_t 141 drivers/md/dm-integrity.c sector_t sector; sector_t 156 drivers/md/dm-integrity.c sector_t start; sector_t 186 drivers/md/dm-integrity.c sector_t provided_data_sectors; sector_t 194 drivers/md/dm-integrity.c sector_t data_device_sectors; sector_t 195 drivers/md/dm-integrity.c sector_t meta_device_sectors; sector_t 268 drivers/md/dm-integrity.c sector_t logical_sector; sector_t 269 drivers/md/dm-integrity.c sector_t n_sectors; sector_t 289 drivers/md/dm-integrity.c sector_t metadata_block; sector_t 394 drivers/md/dm-integrity.c static void get_area_and_offset(struct dm_integrity_c *ic, sector_t data_sector, sector_t 395 drivers/md/dm-integrity.c sector_t *area, sector_t *offset) sector_t 413 drivers/md/dm-integrity.c static __u64 get_metadata_sector_and_offset(struct dm_integrity_c *ic, sector_t area, sector_t 414 drivers/md/dm-integrity.c sector_t offset, unsigned *metadata_offset) sector_t 439 drivers/md/dm-integrity.c static sector_t get_data_sector(struct dm_integrity_c *ic, sector_t area, sector_t offset) sector_t 441 drivers/md/dm-integrity.c sector_t result; sector_t 452 drivers/md/dm-integrity.c result += (sector_t)ic->initial_sectors + offset; sector_t 501 drivers/md/dm-integrity.c sector_t sector, sector_t n_sectors, int mode) sector_t 617 drivers/md/dm-integrity.c static struct bitmap_block_status *sector_to_bitmap_block(struct dm_integrity_c *ic, sector_t sector) sector_t 1054 drivers/md/dm-integrity.c unsigned n_sectors, sector_t target, io_notify_fn fn, void *data) sector_t 1184 drivers/md/dm-integrity.c node->sector = (sector_t)-1; sector_t 1187 drivers/md/dm-integrity.c static void add_journal_node(struct dm_integrity_c *ic, struct journal_node *node, sector_t sector) sector_t 1221 drivers/md/dm-integrity.c static unsigned find_journal_node(struct dm_integrity_c *ic, sector_t sector, sector_t *next_sector) sector_t 1225 drivers/md/dm-integrity.c *next_sector = (sector_t)-1; sector_t 1242 drivers/md/dm-integrity.c static bool test_journal_node(struct dm_integrity_c *ic, unsigned pos, sector_t sector) sector_t 1294 drivers/md/dm-integrity.c static int dm_integrity_rw_tag(struct dm_integrity_c *ic, unsigned char *tag, sector_t *metadata_block, sector_t 1458 drivers/md/dm-integrity.c static void integrity_sector_checksum(struct dm_integrity_c *ic, sector_t sector, sector_t 1519 drivers/md/dm-integrity.c sector_t sector = dio->range.logical_sector; sector_t 1619 drivers/md/dm-integrity.c sector_t area, offset; sector_t 1700 drivers/md/dm-integrity.c sector_t logical_sector; sector_t 1836 drivers/md/dm-integrity.c sector_t area, offset; sector_t 1878 drivers/md/dm-integrity.c (sector_t)ic->free_sectors << ic->sb->log2_sectors_per_block); sector_t 1922 drivers/md/dm-integrity.c sector_t next_sector; sector_t 1960 drivers/md/dm-integrity.c sector_t next_sector; sector_t 2175 drivers/md/dm-integrity.c sector_t sec, area, offset; sector_t 2177 drivers/md/dm-integrity.c sector_t metadata_block; sector_t 2188 drivers/md/dm-integrity.c sec &= ~(sector_t)(ic->sectors_per_block - 1); sector_t 2195 drivers/md/dm-integrity.c sector_t sec2, area2, offset2; sector_t 2347 drivers/md/dm-integrity.c sector_t area, offset; sector_t 2348 drivers/md/dm-integrity.c sector_t metadata_block; sector_t 2350 drivers/md/dm-integrity.c sector_t logical_sector, n_sectors; sector_t 2375 drivers/md/dm-integrity.c range.n_sectors = min((sector_t)RECALC_SECTORS, ic->provided_data_sectors - range.logical_sector); sector_t 2377 drivers/md/dm-integrity.c range.n_sectors = min(range.n_sectors, ((sector_t)1U << ic->sb->log2_interleave_sectors) - (unsigned)offset); sector_t 3034 drivers/md/dm-integrity.c sector_t last_sector, last_area, last_offset; sector_t 3091 drivers/md/dm-integrity.c ic->provided_data_sectors |= (sector_t)1 << test_bit; sector_t 3100 drivers/md/dm-integrity.c ic->provided_data_sectors &= ~(sector_t)(ic->sectors_per_block - 1); sector_t 3618 drivers/md/dm-integrity.c if (sscanf(argv[1], "%llu%c", &start, &dummy) != 1 || start != (sector_t)start) { sector_t 3734 drivers/md/dm-integrity.c journal_sectors = min((sector_t)DEFAULT_MAX_JOURNAL_SECTORS, sector_t 3939 drivers/md/dm-integrity.c while (bits_in_journal < (ic->provided_data_sectors + ((sector_t)1 << log2_sectors_per_bitmap_bit) - 1) >> log2_sectors_per_bitmap_bit) sector_t 3948 drivers/md/dm-integrity.c + (((sector_t)1 << log2_blocks_per_bitmap_bit) - 1)) >> log2_blocks_per_bitmap_bit; sector_t 4090 drivers/md/dm-integrity.c unsigned max_io_len = ((sector_t)ic->sectors_per_block << ic->log2_blocks_per_bitmap_bit) * (BITMAP_BLOCK_SIZE * 8); sector_t 219 drivers/md/dm-io.c dp->context_bi.bi_sector = (sector_t)bvec.bv_len; sector_t 305 drivers/md/dm-io.c sector_t remaining = where->count; sector_t 308 drivers/md/dm-io.c sector_t num_sectors; sector_t 356 drivers/md/dm-io.c num_sectors = min_t(sector_t, special_cmd_max_sectors, remaining); sector_t 365 drivers/md/dm-io.c num_sectors = min_t(sector_t, special_cmd_max_sectors, remaining); sector_t 1306 drivers/md/dm-ioctl.c (sector_t) spec->sector_start, sector_t 1307 drivers/md/dm-ioctl.c (sector_t) spec->length, sector_t 2103 drivers/md/dm-ioctl.c (sector_t) spec_array[i]->sector_start, sector_t 2104 drivers/md/dm-ioctl.c (sector_t) spec_array[i]->length, sector_t 379 drivers/md/dm-kcopyd.c sector_t progress; sector_t 380 drivers/md/dm-kcopyd.c sector_t write_offset; sector_t 693 drivers/md/dm-kcopyd.c sector_t progress = 0; sector_t 694 drivers/md/dm-kcopyd.c sector_t count = 0; sector_t 23 drivers/md/dm-linear.c sector_t start; sector_t 48 drivers/md/dm-linear.c if (sscanf(argv[1], "%llu%c", &tmp, &dummy) != 1 || tmp != (sector_t)tmp) { sector_t 81 drivers/md/dm-linear.c static sector_t linear_map_sector(struct dm_target *ti, sector_t bi_sector) sector_t 139 drivers/md/dm-linear.c static int linear_report_zones(struct dm_target *ti, sector_t sector, sector_t 173 drivers/md/dm-linear.c sector_t dev_sector, sector = pgoff * PAGE_SECTORS; sector_t 188 drivers/md/dm-linear.c sector_t dev_sector, sector = pgoff * PAGE_SECTORS; sector_t 202 drivers/md/dm-linear.c sector_t dev_sector, sector = pgoff * PAGE_SECTORS; sector_t 110 drivers/md/dm-log-writes.c sector_t next_sector; sector_t 111 drivers/md/dm-log-writes.c sector_t end_sector; sector_t 125 drivers/md/dm-log-writes.c sector_t sector; sector_t 126 drivers/md/dm-log-writes.c sector_t nr_sectors; sector_t 137 drivers/md/dm-log-writes.c static inline sector_t bio_to_dev_sectors(struct log_writes_c *lc, sector_t 138 drivers/md/dm-log-writes.c sector_t sectors) sector_t 143 drivers/md/dm-log-writes.c static inline sector_t dev_to_bio_sectors(struct log_writes_c *lc, sector_t 144 drivers/md/dm-log-writes.c sector_t sectors) sector_t 213 drivers/md/dm-log-writes.c sector_t sector) sector_t 265 drivers/md/dm-log-writes.c sector_t sector) sector_t 332 drivers/md/dm-log-writes.c struct pending_block *block, sector_t sector) sector_t 447 drivers/md/dm-log-writes.c static inline sector_t logdev_last_sector(struct log_writes_c *lc) sector_t 455 drivers/md/dm-log-writes.c sector_t sector = 0; sector_t 903 drivers/md/dm-log-writes.c static int log_dax(struct log_writes_c *lc, sector_t sector, size_t bytes, sector_t 952 drivers/md/dm-log-writes.c sector_t sector = pgoff * PAGE_SECTORS; sector_t 966 drivers/md/dm-log-writes.c sector_t sector = pgoff * PAGE_SECTORS; sector_t 990 drivers/md/dm-log-writes.c sector_t sector = pgoff * PAGE_SECTORS; sector_t 1125 drivers/md/dm-raid.c sector_t max_io_len; sector_t 1584 drivers/md/dm-raid.c static sector_t __rdev_sectors(struct raid_set *rs) sector_t 1602 drivers/md/dm-raid.c sector_t ds = ~0; sector_t 1624 drivers/md/dm-raid.c sector_t array_sectors = rs->ti->len, dev_sectors = rs->ti->len; sector_t 1673 drivers/md/dm-raid.c static void __rs_setup_recovery(struct raid_set *rs, sector_t dev_sectors) sector_t 1695 drivers/md/dm-raid.c static void rs_setup_recovery(struct raid_set *rs, sector_t dev_sectors) sector_t 2618 drivers/md/dm-raid.c sector_t data_offset = 0, new_data_offset = 0; sector_t 2730 drivers/md/dm-raid.c sector_t new_data_offset = rs->dev[0].rdev.data_offset ? 0 : rs->data_offset; sector_t 2829 drivers/md/dm-raid.c static sector_t _get_reshape_sectors(struct raid_set *rs) sector_t 2832 drivers/md/dm-raid.c sector_t reshape_sectors = 0; sector_t 2842 drivers/md/dm-raid.c return max(reshape_sectors, (sector_t) rs->data_offset); sector_t 2856 drivers/md/dm-raid.c sector_t reshape_sectors = _get_reshape_sectors(rs); sector_t 3011 drivers/md/dm-raid.c sector_t calculated_dev_sectors, rdev_sectors, reshape_sectors; sector_t 3408 drivers/md/dm-raid.c static sector_t rs_get_progress(struct raid_set *rs, unsigned long recovery, sector_t 3409 drivers/md/dm-raid.c sector_t resync_max_sectors) sector_t 3411 drivers/md/dm-raid.c sector_t r; sector_t 3514 drivers/md/dm-raid.c sector_t progress, resync_max_sectors, resync_mismatches; sector_t 50 drivers/md/dm-raid1.c sector_t offset; sector_t 336 drivers/md/dm-raid1.c sector_t region_size = dm_rh_get_region_size(ms->rh); sector_t 415 drivers/md/dm-raid1.c static struct mirror *choose_mirror(struct mirror_set *ms, sector_t sector) sector_t 451 drivers/md/dm-raid1.c static sector_t map_sector(struct mirror *m, struct bio *bio) sector_t 944 drivers/md/dm-raid1.c offset != (sector_t)offset) { sector_t 90 drivers/md/dm-region-hash.c sector_t target_begin; sector_t 117 drivers/md/dm-region-hash.c static region_t dm_rh_sector_to_region(struct dm_region_hash *rh, sector_t sector) sector_t 122 drivers/md/dm-region-hash.c sector_t dm_rh_region_to_sector(struct dm_region_hash *rh, region_t region) sector_t 147 drivers/md/dm-region-hash.c sector_t dm_rh_get_region_size(struct dm_region_hash *rh) sector_t 166 drivers/md/dm-region-hash.c sector_t target_begin, unsigned max_recovery, sector_t 576 drivers/md/dm-snap-persistent.c sector_t *total_sectors, sector_t 577 drivers/md/dm-snap-persistent.c sector_t *sectors_allocated, sector_t 578 drivers/md/dm-snap-persistent.c sector_t *metadata_sectors) sector_t 680 drivers/md/dm-snap-persistent.c sector_t size = get_dev_size(dm_snap_cow(store->snap)->bdev); sector_t 23 drivers/md/dm-snap-transient.c sector_t next_free; sector_t 43 drivers/md/dm-snap-transient.c sector_t size = get_dev_size(dm_snap_cow(store->snap)->bdev); sector_t 64 drivers/md/dm-snap-transient.c sector_t *total_sectors, sector_t 65 drivers/md/dm-snap-transient.c sector_t *sectors_allocated, sector_t 66 drivers/md/dm-snap-transient.c sector_t *metadata_sectors) sector_t 80 drivers/md/dm-snap.c sector_t exception_start_sequence; sector_t 83 drivers/md/dm-snap.c sector_t exception_complete_sequence; sector_t 183 drivers/md/dm-snap.c static sector_t chunk_to_sector(struct dm_exception_store *store, sector_t 221 drivers/md/dm-snap.c sector_t exception_sequence; sector_t 879 drivers/md/dm-snap.c sector_t hash_size, cow_dev_size, max_buckets; sector_t 1012 drivers/md/dm-snap.c sector_t sector, unsigned chunk_size); sector_t 1042 drivers/md/dm-snap.c sector_t io_size; sector_t 1792 drivers/md/dm-snap.c sector_t dev_size; sector_t 1798 drivers/md/dm-snap.c src.count = min((sector_t)s->store->chunk_size, dev_size - src.sector); sector_t 2330 drivers/md/dm-snap.c sector_t total_sectors, sectors_allocated, sector_t 2419 drivers/md/dm-snap.c static int __origin_write(struct list_head *snapshots, sector_t sector, sector_t 2578 drivers/md/dm-snap.c sector_t sector, unsigned size) sector_t 2581 drivers/md/dm-snap.c sector_t n; sector_t 47 drivers/md/dm-stats.c sector_t start; sector_t 48 drivers/md/dm-stats.c sector_t end; sector_t 49 drivers/md/dm-stats.c sector_t step; sector_t 65 drivers/md/dm-stats.c sector_t last_sector; sector_t 201 drivers/md/dm-stats.c last->last_sector = (sector_t)ULLONG_MAX; sector_t 234 drivers/md/dm-stats.c static int dm_stats_create(struct dm_stats *stats, sector_t start, sector_t end, sector_t 235 drivers/md/dm-stats.c sector_t step, unsigned stat_flags, sector_t 245 drivers/md/dm-stats.c sector_t n_entries; sector_t 446 drivers/md/dm-stats.c sector_t len; sector_t 515 drivers/md/dm-stats.c int idx, sector_t len, sector_t 585 drivers/md/dm-stats.c sector_t bi_sector, sector_t end_sector, sector_t 589 drivers/md/dm-stats.c sector_t rel_sector, offset, todo, fragment_len; sector_t 623 drivers/md/dm-stats.c sector_t bi_sector, unsigned bi_sectors, bool end, sector_t 628 drivers/md/dm-stats.c sector_t end_sector; sector_t 806 drivers/md/dm-stats.c sector_t start, end, step; sector_t 982 drivers/md/dm-stats.c start != (sector_t)start || len != (sector_t)len) sector_t 999 drivers/md/dm-stats.c step != (sector_t)step || !step) sector_t 16 drivers/md/dm-stats.h sector_t last_sector; sector_t 34 drivers/md/dm-stats.h sector_t bi_sector, unsigned bi_sectors, bool end, sector_t 23 drivers/md/dm-stripe.c sector_t physical_start; sector_t 33 drivers/md/dm-stripe.c sector_t stripe_width; sector_t 101 drivers/md/dm-stripe.c sector_t width, tmp_len; sector_t 216 drivers/md/dm-stripe.c static void stripe_map_sector(struct stripe_c *sc, sector_t sector, sector_t 217 drivers/md/dm-stripe.c uint32_t *stripe, sector_t *result) sector_t 219 drivers/md/dm-stripe.c sector_t chunk = dm_target_offset(sc->ti, sector); sector_t 220 drivers/md/dm-stripe.c sector_t chunk_offset; sector_t 244 drivers/md/dm-stripe.c static void stripe_map_range_sector(struct stripe_c *sc, sector_t sector, sector_t 245 drivers/md/dm-stripe.c uint32_t target_stripe, sector_t *result) sector_t 258 drivers/md/dm-stripe.c *result = sector & ~(sector_t)(sc->chunk_size - 1); sector_t 267 drivers/md/dm-stripe.c sector_t begin, end; sector_t 320 drivers/md/dm-stripe.c sector_t dev_sector, sector = pgoff * PAGE_SECTORS; sector_t 341 drivers/md/dm-stripe.c sector_t dev_sector, sector = pgoff * PAGE_SECTORS; sector_t 360 drivers/md/dm-stripe.c sector_t dev_sector, sector = pgoff * PAGE_SECTORS; sector_t 32 drivers/md/dm-switch.c sector_t start; sector_t 79 drivers/md/dm-switch.c sector_t nr_regions = ti->len; sector_t 80 drivers/md/dm-switch.c sector_t nr_slots; sector_t 154 drivers/md/dm-switch.c static unsigned switch_get_path_nr(struct switch_ctx *sctx, sector_t offset) sector_t 157 drivers/md/dm-switch.c sector_t p; sector_t 217 drivers/md/dm-switch.c if (kstrtoull(dm_shift_arg(as), 10, &start) || start != (sector_t)start) { sector_t 322 drivers/md/dm-switch.c sector_t offset = dm_target_offset(ti, bio->bi_iter.bi_sector); sector_t 29 drivers/md/dm-table.c #define KEYS_PER_NODE (NODE_SIZE / sizeof(sector_t)) sector_t 39 drivers/md/dm-table.c sector_t *index[MAX_DEPTH]; sector_t 43 drivers/md/dm-table.c sector_t *highs; sector_t 97 drivers/md/dm-table.c static inline sector_t *get_node(struct dm_table *t, sector_t 107 drivers/md/dm-table.c static sector_t high(struct dm_table *t, unsigned int l, unsigned int n) sector_t 113 drivers/md/dm-table.c return (sector_t) - 1; sector_t 125 drivers/md/dm-table.c sector_t *node; sector_t 161 drivers/md/dm-table.c sector_t *n_highs; sector_t 167 drivers/md/dm-table.c n_highs = (sector_t *) dm_vcalloc(num, sizeof(struct dm_target) + sector_t 168 drivers/md/dm-table.c sizeof(sector_t)); sector_t 280 drivers/md/dm-table.c sector_t start, sector_t len, void *data) sector_t 285 drivers/md/dm-table.c sector_t dev_size = sector_t 468 drivers/md/dm-table.c sector_t start, sector_t len, void *data) sector_t 709 drivers/md/dm-table.c sector_t start, sector_t len, char *params) sector_t 883 drivers/md/dm-table.c sector_t start, sector_t len, void *data) sector_t 893 drivers/md/dm-table.c sector_t start, sector_t len, void *data) sector_t 927 drivers/md/dm-table.c sector_t start, sector_t len, void *data) sector_t 1147 drivers/md/dm-table.c sector_t *indexes; sector_t 1155 drivers/md/dm-table.c indexes = (sector_t *) dm_vcalloc(total, (unsigned long) NODE_SIZE); sector_t 1343 drivers/md/dm-table.c inline sector_t dm_table_get_size(struct dm_table *t) sector_t 1363 drivers/md/dm-table.c struct dm_target *dm_table_find_target(struct dm_table *t, sector_t sector) sector_t 1366 drivers/md/dm-table.c sector_t *node; sector_t 1384 drivers/md/dm-table.c sector_t start, sector_t len, void *data) sector_t 1420 drivers/md/dm-table.c sector_t start, sector_t len, void *data) sector_t 1450 drivers/md/dm-table.c sector_t start, sector_t len, void *data) sector_t 1632 drivers/md/dm-table.c sector_t start, sector_t len, void *data) sector_t 1669 drivers/md/dm-table.c struct dm_dev *dev, sector_t start, sector_t 1670 drivers/md/dm-table.c sector_t len, void *data) sector_t 1700 drivers/md/dm-table.c sector_t start, sector_t len, void *data) sector_t 1708 drivers/md/dm-table.c sector_t start, sector_t len, void *data) sector_t 1733 drivers/md/dm-table.c sector_t start, sector_t len, void *data) sector_t 1747 drivers/md/dm-table.c sector_t start, sector_t len, void *data) sector_t 1774 drivers/md/dm-table.c sector_t start, sector_t len, void *data) sector_t 1801 drivers/md/dm-table.c sector_t start, sector_t len, void *data) sector_t 1834 drivers/md/dm-table.c struct dm_dev *dev, sector_t start, sector_t 1835 drivers/md/dm-table.c sector_t len, void *data) sector_t 1862 drivers/md/dm-table.c struct dm_dev *dev, sector_t start, sector_t 1863 drivers/md/dm-table.c sector_t len, void *data) sector_t 189 drivers/md/dm-thin-metadata.c sector_t data_block_size; sector_t 529 drivers/md/dm-thin-metadata.c sector_t bdev_size = i_size_read(pmd->bdev->bd_inode) >> SECTOR_SHIFT; sector_t 893 drivers/md/dm-thin-metadata.c sector_t data_block_size, sector_t 45 drivers/md/dm-thin-metadata.h sector_t data_block_size, sector_t 342 drivers/md/dm-thin.c sector_t origin_size; sector_t 371 drivers/md/dm-thin.c static sector_t block_to_sectors(struct pool *pool, dm_block_t b) sector_t 400 drivers/md/dm-thin.c sector_t s = block_to_sectors(tc->pool, data_b); sector_t 401 drivers/md/dm-thin.c sector_t len = block_to_sectors(tc->pool, data_e - data_b); sector_t 683 drivers/md/dm-thin.c sector_t block_nr = bio->bi_iter.bi_sector; sector_t 700 drivers/md/dm-thin.c sector_t b = bio->bi_iter.bi_sector; sector_t 701 drivers/md/dm-thin.c sector_t e = b + (bio->bi_iter.bi_size >> SECTOR_SHIFT); sector_t 724 drivers/md/dm-thin.c sector_t bi_sector = bio->bi_iter.bi_sector; sector_t 1292 drivers/md/dm-thin.c sector_t begin, sector_t end) sector_t 1324 drivers/md/dm-thin.c sector_t len) sector_t 1424 drivers/md/dm-thin.c sector_t virt_block_begin = virt_block * pool->sectors_per_block; sector_t 1425 drivers/md/dm-thin.c sector_t virt_block_end = (virt_block + 1) * pool->sectors_per_block; sector_t 2115 drivers/md/dm-thin.c sector_t bi_sector = bio->bi_iter.bi_sector; sector_t 2846 drivers/md/dm-thin.c static bool is_factor(sector_t block_size, uint32_t n) sector_t 3236 drivers/md/dm-thin.c static sector_t get_dev_size(struct block_device *bdev) sector_t 3243 drivers/md/dm-thin.c sector_t metadata_dev_size = get_dev_size(bdev); sector_t 3251 drivers/md/dm-thin.c static sector_t get_metadata_dev_size(struct block_device *bdev) sector_t 3253 drivers/md/dm-thin.c sector_t metadata_dev_size = get_dev_size(bdev); sector_t 3263 drivers/md/dm-thin.c sector_t metadata_dev_size = get_metadata_dev_size(bdev); sector_t 3475 drivers/md/dm-thin.c sector_t data_size = ti->len; sector_t 4065 drivers/md/dm-thin.c sector_t io_opt_sectors = limits->io_opt >> SECTOR_SHIFT; sector_t 4471 drivers/md/dm-thin.c sector_t blocks; sector_t 13 drivers/md/dm-unstripe.c sector_t physical_start; sector_t 18 drivers/md/dm-unstripe.c sector_t unstripe_width; sector_t 19 drivers/md/dm-unstripe.c sector_t unstripe_offset; sector_t 41 drivers/md/dm-unstripe.c sector_t tmp_len; sector_t 81 drivers/md/dm-unstripe.c if (sscanf(argv[4], "%llu%c", &start, &dummy) != 1 || start != (sector_t)start) { sector_t 116 drivers/md/dm-unstripe.c static sector_t map_to_core(struct dm_target *ti, struct bio *bio) sector_t 119 drivers/md/dm-unstripe.c sector_t sector = bio->bi_iter.bi_sector; sector_t 120 drivers/md/dm-unstripe.c sector_t tmp_sector = sector; sector_t 420 drivers/md/dm-verity-fec.c enum verity_block_type type, sector_t block, u8 *dest, sector_t 619 drivers/md/dm-verity-fec.c ((sector_t)(num_ll << (v->data_dev_block_bits - SECTOR_SHIFT)) sector_t 628 drivers/md/dm-verity-fec.c ((sector_t)(num_ll << (v->data_dev_block_bits - SECTOR_SHIFT)) >> sector_t 39 drivers/md/dm-verity-fec.h sector_t start; /* parity data start in blocks */ sector_t 40 drivers/md/dm-verity-fec.h sector_t blocks; /* number of blocks covered */ sector_t 41 drivers/md/dm-verity-fec.h sector_t rounds; /* number of interleaving rounds */ sector_t 42 drivers/md/dm-verity-fec.h sector_t hash_blocks; /* blocks covered after v->hash_start */ sector_t 71 drivers/md/dm-verity-fec.h enum verity_block_type type, sector_t block, sector_t 102 drivers/md/dm-verity-fec.h sector_t block, u8 *dest, sector_t 46 drivers/md/dm-verity-target.c sector_t block; sector_t 79 drivers/md/dm-verity-target.c static sector_t verity_map_sector(struct dm_verity *v, sector_t bi_sector) sector_t 90 drivers/md/dm-verity-target.c static sector_t verity_position_at_level(struct dm_verity *v, sector_t block, sector_t 191 drivers/md/dm-verity-target.c static void verity_hash_at_level(struct dm_verity *v, sector_t block, int level, sector_t 192 drivers/md/dm-verity-target.c sector_t *hash_block, unsigned *offset) sector_t 194 drivers/md/dm-verity-target.c sector_t position = verity_position_at_level(v, block, level); sector_t 272 drivers/md/dm-verity-target.c sector_t block, int level, bool skip_unverified, sector_t 279 drivers/md/dm-verity-target.c sector_t hash_block; sector_t 331 drivers/md/dm-verity-target.c sector_t block, u8 *digest, bool *is_zero) sector_t 477 drivers/md/dm-verity-target.c sector_t cur_block = io->block + b; sector_t 585 drivers/md/dm-verity-target.c sector_t hash_block_start; sector_t 586 drivers/md/dm-verity-target.c sector_t hash_block_end; sector_t 599 drivers/md/dm-verity-target.c hash_block_start &= ~(sector_t)(cluster - 1); sector_t 955 drivers/md/dm-verity-target.c sector_t hash_position; sector_t 1024 drivers/md/dm-verity-target.c (sector_t)(num_ll << (v->data_dev_block_bits - SECTOR_SHIFT)) sector_t 1039 drivers/md/dm-verity-target.c (sector_t)(num_ll << (v->hash_dev_block_bits - SECTOR_SHIFT)) sector_t 1149 drivers/md/dm-verity-target.c sector_t s; sector_t 1151 drivers/md/dm-verity-target.c s = (v->data_blocks + ((sector_t)1 << ((i + 1) * v->hash_per_block_bits)) - 1) sector_t 44 drivers/md/dm-verity.h sector_t data_start; /* data offset in 512-byte sectors */ sector_t 45 drivers/md/dm-verity.h sector_t hash_start; /* hash start in blocks */ sector_t 46 drivers/md/dm-verity.h sector_t data_blocks; /* the number of data blocks */ sector_t 47 drivers/md/dm-verity.h sector_t hash_blocks; /* the number of hash blocks */ sector_t 62 drivers/md/dm-verity.h sector_t hash_level_block[DM_VERITY_MAX_LEVELS]; sector_t 76 drivers/md/dm-verity.h sector_t block; sector_t 129 drivers/md/dm-verity.h sector_t block, u8 *digest, bool *is_zero); sector_t 139 drivers/md/dm-writecache.c sector_t start_sector; sector_t 359 drivers/md/dm-writecache.c static sector_t cache_sector(struct dm_writecache *wc, struct wc_entry *e) sector_t 362 drivers/md/dm-writecache.c ((sector_t)e->index << (wc->block_size_bits - SECTOR_SHIFT)); sector_t 471 drivers/md/dm-writecache.c region.sector = (sector_t)i * (BITMAP_GRANULARITY >> SECTOR_SHIFT); sector_t 472 drivers/md/dm-writecache.c region.count = (sector_t)(j - i) * (BITMAP_GRANULARITY >> SECTOR_SHIFT); sector_t 790 drivers/md/dm-writecache.c static void writecache_discard(struct dm_writecache *wc, sector_t start, sector_t end) sector_t 881 drivers/md/dm-writecache.c static int writecache_read_metadata(struct dm_writecache *wc, sector_t n_sectors) sector_t 1213 drivers/md/dm-writecache.c sector_t next_boundary = sector_t 103 drivers/md/dm-zoned-metadata.c sector_t no; sector_t 124 drivers/md/dm-zoned-metadata.c sector_t block; sector_t 135 drivers/md/dm-zoned-metadata.c sector_t zone_bitmap_size; sector_t 195 drivers/md/dm-zoned-metadata.c sector_t dmz_start_sect(struct dmz_metadata *zmd, struct dm_zone *zone) sector_t 197 drivers/md/dm-zoned-metadata.c return (sector_t)dmz_id(zmd, zone) << zmd->dev->zone_nr_sectors_shift; sector_t 200 drivers/md/dm-zoned-metadata.c sector_t dmz_start_block(struct dmz_metadata *zmd, struct dm_zone *zone) sector_t 202 drivers/md/dm-zoned-metadata.c return (sector_t)dmz_id(zmd, zone) << zmd->dev->zone_nr_blocks_shift; sector_t 270 drivers/md/dm-zoned-metadata.c sector_t mblk_no) sector_t 349 drivers/md/dm-zoned-metadata.c sector_t mblk_no) sector_t 401 drivers/md/dm-zoned-metadata.c sector_t mblk_no) sector_t 404 drivers/md/dm-zoned-metadata.c sector_t block = zmd->sb[zmd->mblk_primary].block + mblk_no; sector_t 537 drivers/md/dm-zoned-metadata.c sector_t mblk_no) sector_t 582 drivers/md/dm-zoned-metadata.c sector_t block = zmd->sb[set].block + mblk->no; sector_t 610 drivers/md/dm-zoned-metadata.c static int dmz_rdwr_block(struct dmz_metadata *zmd, int op, sector_t block, sector_t 640 drivers/md/dm-zoned-metadata.c sector_t block = zmd->sb[set].block; sector_t 1165 drivers/md/dm-zoned-metadata.c sector_t sector = 0; sector_t 1171 drivers/md/dm-zoned-metadata.c max_t(sector_t, 1, zmd->zone_bitmap_size >> DMZ_BLOCK_SHIFT); sector_t 1172 drivers/md/dm-zoned-metadata.c zmd->zone_bits_per_mblk = min_t(sector_t, dev->zone_nr_blocks, sector_t 1962 drivers/md/dm-zoned-metadata.c sector_t chunk_block) sector_t 1964 drivers/md/dm-zoned-metadata.c sector_t bitmap_block = 1 + zmd->nr_map_blocks + sector_t 1965 drivers/md/dm-zoned-metadata.c (sector_t)(dmz_id(zmd, zone) * zmd->zone_nr_bitmap_blocks) + sector_t 1978 drivers/md/dm-zoned-metadata.c sector_t chunk_block = 0; sector_t 2010 drivers/md/dm-zoned-metadata.c struct dm_zone *to_zone, sector_t chunk_block) sector_t 2037 drivers/md/dm-zoned-metadata.c sector_t chunk_block, unsigned int nr_blocks) sector_t 2118 drivers/md/dm-zoned-metadata.c sector_t chunk_block, unsigned int nr_blocks) sector_t 2166 drivers/md/dm-zoned-metadata.c sector_t chunk_block) sector_t 2192 drivers/md/dm-zoned-metadata.c sector_t chunk_block, unsigned int nr_blocks, sector_t 2235 drivers/md/dm-zoned-metadata.c sector_t chunk_block) sector_t 2255 drivers/md/dm-zoned-metadata.c sector_t *chunk_block) sector_t 2257 drivers/md/dm-zoned-metadata.c sector_t start_block = *chunk_block; sector_t 2306 drivers/md/dm-zoned-metadata.c sector_t chunk_block = 0; sector_t 2528 drivers/md/dm-zoned-metadata.c sector_t wp_block; sector_t 59 drivers/md/dm-zoned-reclaim.c sector_t block) sector_t 62 drivers/md/dm-zoned-reclaim.c sector_t wp_block = zone->wp_block; sector_t 121 drivers/md/dm-zoned-reclaim.c sector_t block = 0, end_block; sector_t 122 drivers/md/dm-zoned-reclaim.c sector_t nr_blocks; sector_t 123 drivers/md/dm-zoned-reclaim.c sector_t src_zone_block; sector_t 124 drivers/md/dm-zoned-reclaim.c sector_t dst_zone_block; sector_t 193 drivers/md/dm-zoned-reclaim.c sector_t chunk_block = dzone->wp_block; sector_t 373 drivers/md/dm-zoned-reclaim.c sector_t chunk_block = 0; sector_t 118 drivers/md/dm-zoned-target.c struct bio *bio, sector_t chunk_block, sector_t 150 drivers/md/dm-zoned-target.c sector_t chunk_block, unsigned int nr_blocks) sector_t 168 drivers/md/dm-zoned-target.c sector_t chunk_block = dmz_chunk_block(dmz->dev, dmz_bio_block(bio)); sector_t 170 drivers/md/dm-zoned-target.c sector_t end_block = chunk_block + nr_blocks; sector_t 241 drivers/md/dm-zoned-target.c sector_t chunk_block, sector_t 274 drivers/md/dm-zoned-target.c sector_t chunk_block, sector_t 311 drivers/md/dm-zoned-target.c sector_t chunk_block = dmz_chunk_block(dmz->dev, dmz_bio_block(bio)); sector_t 346 drivers/md/dm-zoned-target.c sector_t block = dmz_bio_block(bio); sector_t 348 drivers/md/dm-zoned-target.c sector_t chunk_block = dmz_chunk_block(dmz->dev, block); sector_t 623 drivers/md/dm-zoned-target.c sector_t sector = bio->bi_iter.bi_sector; sector_t 625 drivers/md/dm-zoned-target.c sector_t chunk_sector; sector_t 687 drivers/md/dm-zoned-target.c sector_t aligned_capacity; sector_t 716 drivers/md/dm-zoned-target.c ~((sector_t)blk_queue_zone_sectors(q) - 1); sector_t 802 drivers/md/dm-zoned-target.c ti->len = (sector_t)dmz_nr_chunks(dmz->metadata) << dev->zone_nr_sectors_shift; sector_t 963 drivers/md/dm-zoned-target.c sector_t capacity = dev->capacity & ~(dev->zone_nr_sectors - 1); sector_t 42 drivers/md/dm-zoned.h #define dmz_blk2sect(b) ((sector_t)(b) << DMZ_BLOCK_SECTORS_SHIFT) sector_t 43 drivers/md/dm-zoned.h #define dmz_sect2blk(s) ((sector_t)(s) >> DMZ_BLOCK_SECTORS_SHIFT) sector_t 56 drivers/md/dm-zoned.h sector_t capacity; sector_t 62 drivers/md/dm-zoned.h sector_t zone_nr_sectors; sector_t 65 drivers/md/dm-zoned.h sector_t zone_nr_blocks; sector_t 66 drivers/md/dm-zoned.h sector_t zone_nr_blocks_shift; sector_t 180 drivers/md/dm-zoned.h sector_t dmz_start_sect(struct dmz_metadata *zmd, struct dm_zone *zone); sector_t 181 drivers/md/dm-zoned.h sector_t dmz_start_block(struct dmz_metadata *zmd, struct dm_zone *zone); sector_t 232 drivers/md/dm-zoned.h sector_t chunk_block, unsigned int nr_blocks); sector_t 234 drivers/md/dm-zoned.h sector_t chunk_block, unsigned int nr_blocks); sector_t 236 drivers/md/dm-zoned.h sector_t chunk_block); sector_t 238 drivers/md/dm-zoned.h sector_t *chunk_block); sector_t 242 drivers/md/dm-zoned.h struct dm_zone *to_zone, sector_t chunk_block); sector_t 69 drivers/md/dm.c sector_t sector; sector_t 421 drivers/md/dm.c sector_t dm_get_size(struct mapped_device *md) sector_t 443 drivers/md/dm.c static int dm_blk_report_zones(struct gendisk *disk, sector_t sector, sector_t 873 drivers/md/dm.c sector_t sz = (sector_t)geo->cylinders * geo->heads * geo->sectors; sector_t 1018 drivers/md/dm.c static sector_t max_io_len_target_boundary(sector_t sector, struct dm_target *ti) sector_t 1020 drivers/md/dm.c sector_t target_offset = dm_target_offset(ti, sector); sector_t 1025 drivers/md/dm.c static sector_t max_io_len(sector_t sector, struct dm_target *ti) sector_t 1027 drivers/md/dm.c sector_t len = max_io_len_target_boundary(sector, ti); sector_t 1028 drivers/md/dm.c sector_t offset, max_len; sector_t 1048 drivers/md/dm.c int dm_set_target_max_io_len(struct dm_target *ti, sector_t len) sector_t 1064 drivers/md/dm.c sector_t sector, int *srcu_idx) sector_t 1085 drivers/md/dm.c sector_t sector = pgoff * PAGE_SECTORS; sector_t 1109 drivers/md/dm.c int blocksize, sector_t start, sector_t len) sector_t 1131 drivers/md/dm.c sector_t sector = pgoff * PAGE_SECTORS; sector_t 1155 drivers/md/dm.c sector_t sector = pgoff * PAGE_SECTORS; sector_t 1223 drivers/md/dm.c void dm_remap_zone_report(struct dm_target *ti, sector_t start, sector_t 1266 drivers/md/dm.c sector_t sector; sector_t 1312 drivers/md/dm.c static void bio_setup_sector(struct bio *bio, sector_t sector, unsigned len) sector_t 1322 drivers/md/dm.c sector_t sector, unsigned len) sector_t 1446 drivers/md/dm.c sector_t sector, unsigned *len) sector_t 1500 drivers/md/dm.c len = min((sector_t)ci->sector_count, max_io_len_target_boundary(ci->sector, ti)); sector_t 1581 drivers/md/dm.c len = min_t(sector_t, max_io_len(ci->sector, ti), ci->sector_count); sector_t 1726 drivers/md/dm.c len = min_t(sector_t, max_io_len((*bio)->bi_iter.bi_sector, ti), sector_count); sector_t 2113 drivers/md/dm.c static void __set_size(struct mapped_device *md, sector_t size) sector_t 2131 drivers/md/dm.c sector_t size; sector_t 3085 drivers/md/dm.c sector_t start, sector_t len, void *data) sector_t 55 drivers/md/dm.h struct dm_target *dm_table_find_target(struct dm_table *t, sector_t sector); sector_t 78 drivers/md/dm.h sector_t start, sector_t len, void *data); sector_t 183 drivers/md/dm.h sector_t dm_get_size(struct mapped_device *md); sector_t 156 drivers/md/md-bitmap.c sector_t target; sector_t 367 drivers/md/md-bitmap.c sector_t block; sector_t 610 drivers/md/md-bitmap.c sector_t bm_blocks = bitmap->mddev->resync_max_sectors; sector_t 927 drivers/md/md-bitmap.c static void md_bitmap_file_set_bit(struct bitmap *bitmap, sector_t block) sector_t 956 drivers/md/md-bitmap.c static void md_bitmap_file_clear_bit(struct bitmap *bitmap, sector_t block) sector_t 984 drivers/md/md-bitmap.c static int md_bitmap_file_test_bit(struct bitmap *bitmap, sector_t block) sector_t 1047 drivers/md/md-bitmap.c static void md_bitmap_set_memory_bits(struct bitmap *bitmap, sector_t offset, int needed); sector_t 1059 drivers/md/md-bitmap.c static int md_bitmap_init_from_disk(struct bitmap *bitmap, sector_t start) sector_t 1080 drivers/md/md-bitmap.c int needed = ((sector_t)(i+1) << (bitmap->counts.chunkshift) sector_t 1083 drivers/md/md-bitmap.c (sector_t)i << bitmap->counts.chunkshift, sector_t 1161 drivers/md/md-bitmap.c int needed = ((sector_t)(i+1) << bitmap->counts.chunkshift sector_t 1164 drivers/md/md-bitmap.c (sector_t)i << bitmap->counts.chunkshift, sector_t 1203 drivers/md/md-bitmap.c sector_t offset, int inc) sector_t 1205 drivers/md/md-bitmap.c sector_t chunk = offset >> bitmap->chunkshift; sector_t 1211 drivers/md/md-bitmap.c static void md_bitmap_set_pending(struct bitmap_counts *bitmap, sector_t offset) sector_t 1213 drivers/md/md-bitmap.c sector_t chunk = offset >> bitmap->chunkshift; sector_t 1222 drivers/md/md-bitmap.c sector_t offset, sector_t *blocks, sector_t 1235 drivers/md/md-bitmap.c sector_t blocks; sector_t 1295 drivers/md/md-bitmap.c sector_t block = (sector_t)j << counts->chunkshift; sector_t 1355 drivers/md/md-bitmap.c sector_t offset, sector_t *blocks, sector_t 1364 drivers/md/md-bitmap.c sector_t chunk = offset >> bitmap->chunkshift; sector_t 1367 drivers/md/md-bitmap.c sector_t csize; sector_t 1374 drivers/md/md-bitmap.c csize = ((sector_t)1) << (bitmap->chunkshift + sector_t 1377 drivers/md/md-bitmap.c csize = ((sector_t)1) << bitmap->chunkshift; sector_t 1396 drivers/md/md-bitmap.c int md_bitmap_startwrite(struct bitmap *bitmap, sector_t offset, unsigned long sectors, int behind) sector_t 1413 drivers/md/md-bitmap.c sector_t blocks; sector_t 1460 drivers/md/md-bitmap.c void md_bitmap_endwrite(struct bitmap *bitmap, sector_t offset, sector_t 1474 drivers/md/md-bitmap.c sector_t blocks; sector_t 1513 drivers/md/md-bitmap.c static int __bitmap_start_sync(struct bitmap *bitmap, sector_t offset, sector_t *blocks, sector_t 1541 drivers/md/md-bitmap.c int md_bitmap_start_sync(struct bitmap *bitmap, sector_t offset, sector_t *blocks, sector_t 1552 drivers/md/md-bitmap.c sector_t blocks1; sector_t 1565 drivers/md/md-bitmap.c void md_bitmap_end_sync(struct bitmap *bitmap, sector_t offset, sector_t *blocks, int aborted) sector_t 1602 drivers/md/md-bitmap.c sector_t sector = 0; sector_t 1603 drivers/md/md-bitmap.c sector_t blocks; sector_t 1613 drivers/md/md-bitmap.c void md_bitmap_cond_end_sync(struct bitmap *bitmap, sector_t sector, bool force) sector_t 1615 drivers/md/md-bitmap.c sector_t s = 0; sector_t 1616 drivers/md/md-bitmap.c sector_t blocks; sector_t 1644 drivers/md/md-bitmap.c sector_t old_lo, sector_t old_hi, sector_t 1645 drivers/md/md-bitmap.c sector_t new_lo, sector_t new_hi) sector_t 1648 drivers/md/md-bitmap.c sector_t sector, blocks = 0; sector_t 1664 drivers/md/md-bitmap.c static void md_bitmap_set_memory_bits(struct bitmap *bitmap, sector_t offset, int needed) sector_t 1671 drivers/md/md-bitmap.c sector_t secs; sector_t 1696 drivers/md/md-bitmap.c sector_t sec = (sector_t)chunk << bitmap->counts.chunkshift; sector_t 1815 drivers/md/md-bitmap.c sector_t blocks = mddev->resync_max_sectors; sector_t 1902 drivers/md/md-bitmap.c sector_t start = 0; sector_t 1903 drivers/md/md-bitmap.c sector_t sector = 0; sector_t 1922 drivers/md/md-bitmap.c sector_t blocks; sector_t 1982 drivers/md/md-bitmap.c sector_t *low, sector_t *high, bool clear_bits) sector_t 1985 drivers/md/md-bitmap.c sector_t block, lo = 0, hi = 0; sector_t 1997 drivers/md/md-bitmap.c block = (sector_t)j << counts->chunkshift; sector_t 2053 drivers/md/md-bitmap.c int md_bitmap_resize(struct bitmap *bitmap, sector_t blocks, sector_t 2069 drivers/md/md-bitmap.c sector_t block; sector_t 2070 drivers/md/md-bitmap.c sector_t old_blocks, new_blocks; sector_t 2199 drivers/md/md-bitmap.c sector_t end = block + new_blocks; sector_t 2200 drivers/md/md-bitmap.c sector_t start = block >> chunkshift; sector_t 254 drivers/md/md-bitmap.h int md_bitmap_startwrite(struct bitmap *bitmap, sector_t offset, sector_t 256 drivers/md/md-bitmap.h void md_bitmap_endwrite(struct bitmap *bitmap, sector_t offset, sector_t 258 drivers/md/md-bitmap.h int md_bitmap_start_sync(struct bitmap *bitmap, sector_t offset, sector_t *blocks, int degraded); sector_t 259 drivers/md/md-bitmap.h void md_bitmap_end_sync(struct bitmap *bitmap, sector_t offset, sector_t *blocks, int aborted); sector_t 261 drivers/md/md-bitmap.h void md_bitmap_cond_end_sync(struct bitmap *bitmap, sector_t sector, bool force); sector_t 263 drivers/md/md-bitmap.h sector_t old_lo, sector_t old_hi, sector_t 264 drivers/md/md-bitmap.h sector_t new_lo, sector_t new_hi); sector_t 269 drivers/md/md-bitmap.h int md_bitmap_resize(struct bitmap *bitmap, sector_t blocks, sector_t 273 drivers/md/md-bitmap.h sector_t *lo, sector_t *hi, bool clear_bits); sector_t 74 drivers/md/md-cluster.c sector_t suspend_lo; sector_t 75 drivers/md/md-cluster.c sector_t suspend_hi; sector_t 90 drivers/md/md-cluster.c sector_t sync_low; sector_t 91 drivers/md/md-cluster.c sector_t sync_hi; sector_t 259 drivers/md/md-cluster.c sector_t lo, sector_t hi) sector_t 293 drivers/md/md-cluster.c sector_t lo, hi; sector_t 444 drivers/md/md-cluster.c int slot, sector_t lo, sector_t hi) sector_t 799 drivers/md/md-cluster.c sector_t lo, hi; sector_t 1103 drivers/md/md-cluster.c static int update_bitmap_size(struct mddev *mddev, sector_t size) sector_t 1118 drivers/md/md-cluster.c static int resize_bitmaps(struct mddev *mddev, sector_t newsize, sector_t oldsize) sector_t 1247 drivers/md/md-cluster.c static void update_size(struct mddev *mddev, sector_t old_dev_sectors) sector_t 1318 drivers/md/md-cluster.c static void resync_info_get(struct mddev *mddev, sector_t *lo, sector_t *hi) sector_t 1328 drivers/md/md-cluster.c static int resync_info_update(struct mddev *mddev, sector_t lo, sector_t hi) sector_t 1376 drivers/md/md-cluster.c sector_t lo, sector_t hi) sector_t 1527 drivers/md/md-cluster.c sector_t lo, hi; sector_t 16 drivers/md/md-cluster.h int (*resync_info_update)(struct mddev *mddev, sector_t lo, sector_t hi); sector_t 17 drivers/md/md-cluster.h void (*resync_info_get)(struct mddev *mddev, sector_t *lo, sector_t *hi); sector_t 23 drivers/md/md-cluster.h int (*area_resyncing)(struct mddev *mddev, int direction, sector_t lo, sector_t hi); sector_t 30 drivers/md/md-cluster.h int (*resize_bitmaps)(struct mddev *mddev, sector_t newsize, sector_t oldsize); sector_t 33 drivers/md/md-cluster.h void (*update_size)(struct mddev *mddev, sector_t old_dev_sectors); sector_t 79 drivers/md/md-faulty.c sector_t faults[MaxFault]; sector_t 100 drivers/md/md-faulty.c static int check_sector(struct faulty_conf *conf, sector_t start, sector_t end, int dir) sector_t 124 drivers/md/md-faulty.c static void add_sector(struct faulty_conf *conf, sector_t start, int mode) sector_t 283 drivers/md/md-faulty.c static sector_t faulty_size(struct mddev *mddev, sector_t sectors, int raid_disks) sector_t 24 drivers/md/md-linear.c static inline struct dev_info *which_dev(struct mddev *mddev, sector_t sector) sector_t 72 drivers/md/md-linear.c static sector_t linear_size(struct mddev *mddev, sector_t sectors, int raid_disks) sector_t 75 drivers/md/md-linear.c sector_t array_sectors; sector_t 102 drivers/md/md-linear.c sector_t sectors; sector_t 244 drivers/md/md-linear.c sector_t start_sector, end_sector, data_offset; sector_t 245 drivers/md/md-linear.c sector_t bio_sector = bio->bi_iter.bi_sector; sector_t 7 drivers/md/md-linear.h sector_t end_sector; sector_t 13 drivers/md/md-linear.h sector_t array_sectors; sector_t 357 drivers/md/md-multipath.c static sector_t multipath_size(struct mddev *mddev, sector_t sectors, int raid_disks) sector_t 816 drivers/md/md.c static inline sector_t calc_dev_sboffset(struct md_rdev *rdev) sector_t 818 drivers/md/md.c sector_t num_sectors = i_size_read(rdev->bdev->bd_inode) / 512; sector_t 870 drivers/md/md.c sector_t sector, int size, struct page *page) sector_t 916 drivers/md/md.c int sync_page_io(struct md_rdev *rdev, sector_t sector, int size, sector_t 1077 drivers/md/md.c sector_t num_sectors); sector_t 1199 drivers/md/md.c rdev->sectors = (sector_t)(2ULL << 32) - 2; sector_t 1201 drivers/md/md.c if (rdev->sectors < ((sector_t)sb->size) * 2 && sb->level >= 1) sector_t 1236 drivers/md/md.c mddev->dev_sectors = ((sector_t)sb->size) * 2; sector_t 1486 drivers/md/md.c super_90_rdev_size_change(struct md_rdev *rdev, sector_t num_sectors) sector_t 1499 drivers/md/md.c num_sectors = (sector_t)(2ULL << 32) - 2; sector_t 1544 drivers/md/md.c sector_t sb_start; sector_t 1545 drivers/md/md.c sector_t sectors; sector_t 1562 drivers/md/md.c sb_start &= ~(sector_t)(4*2-1); sector_t 1642 drivers/md/md.c sector_t bb_sector; sector_t 2069 drivers/md/md.c super_1_rdev_size_change(struct md_rdev *rdev, sector_t num_sectors) sector_t 2072 drivers/md/md.c sector_t max_sectors; sector_t 2088 drivers/md/md.c sector_t sb_start; sector_t 2090 drivers/md/md.c sb_start &= ~(sector_t)(4*2 - 1); sector_t 3218 drivers/md/md.c static int overlaps(sector_t s1, sector_t l1, sector_t s2, sector_t l2) sector_t 3228 drivers/md/md.c static int strict_blocks_to_sectors(const char *buf, sector_t *sectors) sector_t 3231 drivers/md/md.c sector_t new; sector_t 3251 drivers/md/md.c sector_t oldsectors = rdev->sectors; sector_t 3252 drivers/md/md.c sector_t sectors; sector_t 3409 drivers/md/md.c if (sector != (sector_t)sector) sector_t 3575 drivers/md/md.c sector_t size; sector_t 4159 drivers/md/md.c if (n != (sector_t)n) sector_t 4531 drivers/md/md.c static int update_size(struct mddev *mddev, sector_t num_sectors); sector_t 4540 drivers/md/md.c sector_t sectors; sector_t 4970 drivers/md/md.c sector_t temp = max; sector_t 5003 drivers/md/md.c if (new != (sector_t)new) sector_t 5040 drivers/md/md.c if (new != (sector_t)new) sector_t 5082 drivers/md/md.c if (new != (sector_t)new) sector_t 5162 drivers/md/md.c sector_t sectors; sector_t 6940 drivers/md/md.c mddev->dev_sectors = 2 * (sector_t)info->size; sector_t 6985 drivers/md/md.c void md_set_array_sectors(struct mddev *mddev, sector_t array_sectors) sector_t 6996 drivers/md/md.c static int update_size(struct mddev *mddev, sector_t num_sectors) sector_t 7001 drivers/md/md.c sector_t old_dev_sectors = mddev->dev_sectors; sector_t 7021 drivers/md/md.c sector_t avail = rdev->sectors; sector_t 7140 drivers/md/md.c rv = update_size(mddev, (sector_t)info->size * 2); sector_t 7813 drivers/md/md.c sector_t max_sectors, resync, res; sector_t 7815 drivers/md/md.c sector_t rt, curr_mark_cnt, resync_mark_cnt; sector_t 7871 drivers/md/md.c if (sizeof(sector_t) > sizeof(unsigned long)) { sector_t 8004 drivers/md/md.c sector_t sectors; sector_t 8406 drivers/md/md.c sector_t max_sectors,j, io_sectors, recovery_done; sector_t 8409 drivers/md/md.c sector_t mark_cnt[SYNC_MARKS]; sector_t 8412 drivers/md/md.c sector_t last_check; sector_t 8613 drivers/md/md.c sector_t sectors; sector_t 9145 drivers/md/md.c sector_t old_dev_sectors = mddev->dev_sectors; sector_t 9234 drivers/md/md.c int rdev_set_badblocks(struct md_rdev *rdev, sector_t s, int sectors, sector_t 9259 drivers/md/md.c int rdev_clear_badblocks(struct md_rdev *rdev, sector_t s, int sectors, sector_t 23 drivers/md/md.h #define MaxSector (~(sector_t)0) sector_t 41 drivers/md/md.h sector_t sectors; /* Device size (in 512bytes sectors) */ sector_t 56 drivers/md/md.h sector_t data_offset; /* start of data in array */ sector_t 57 drivers/md/md.h sector_t new_data_offset;/* only relevant while reshaping */ sector_t 58 drivers/md/md.h sector_t sb_start; /* offset of the super block (in 512byte sectors) */ sector_t 88 drivers/md/md.h sector_t recovery_offset;/* If this device has been partially sector_t 92 drivers/md/md.h sector_t journal_tail; /* If this device is a journal device, sector_t 131 drivers/md/md.h sector_t sector; /* First sector of the PPL space */ sector_t 210 drivers/md/md.h static inline int is_badblock(struct md_rdev *rdev, sector_t s, int sectors, sector_t 211 drivers/md/md.h sector_t *first_bad, int *bad_sectors) sector_t 223 drivers/md/md.h extern int rdev_set_badblocks(struct md_rdev *rdev, sector_t s, int sectors, sector_t 225 drivers/md/md.h extern int rdev_clear_badblocks(struct md_rdev *rdev, sector_t s, int sectors, sector_t 269 drivers/md/md.h sector_t lo; sector_t 270 drivers/md/md.h sector_t hi; sector_t 311 drivers/md/md.h sector_t dev_sectors; /* used size of sector_t 313 drivers/md/md.h sector_t array_sectors; /* exported array size */ sector_t 331 drivers/md/md.h sector_t reshape_position; sector_t 346 drivers/md/md.h sector_t curr_resync; /* last block scheduled */ sector_t 353 drivers/md/md.h sector_t curr_resync_completed; sector_t 355 drivers/md/md.h sector_t resync_mark_cnt;/* blocks written at resync_mark */ sector_t 356 drivers/md/md.h sector_t curr_mark_cnt; /* blocks scheduled now */ sector_t 358 drivers/md/md.h sector_t resync_max_sectors; /* may be set by personality */ sector_t 365 drivers/md/md.h sector_t suspend_lo; sector_t 366 drivers/md/md.h sector_t suspend_hi; sector_t 407 drivers/md/md.h sector_t recovery_cp; sector_t 408 drivers/md/md.h sector_t resync_min; /* user requested sync sector_t 410 drivers/md/md.h sector_t resync_max; /* resync should pause sector_t 570 drivers/md/md.h sector_t (*sync_request)(struct mddev *mddev, sector_t sector_nr, int *skipped); sector_t 571 drivers/md/md.h int (*resize) (struct mddev *mddev, sector_t sectors); sector_t 572 drivers/md/md.h sector_t (*size) (struct mddev *mddev, sector_t sectors, int raid_disks); sector_t 708 drivers/md/md.h sector_t sector, int size, struct page *page); sector_t 710 drivers/md/md.h extern int sync_page_io(struct md_rdev *rdev, sector_t sector, int size, sector_t 717 drivers/md/md.h extern void md_set_array_sectors(struct mddev *mddev, sector_t array_sectors); sector_t 53 drivers/md/raid0.c sector_t zone_size = 0; sector_t 54 drivers/md/raid0.c sector_t zone_start = 0; sector_t 83 drivers/md/raid0.c sector_t curr_zone_end, sectors; sector_t 307 drivers/md/raid0.c sector_t *sectorp) sector_t 311 drivers/md/raid0.c sector_t sector = *sectorp; sector_t 327 drivers/md/raid0.c sector_t sector, sector_t *sector_offset) sector_t 330 drivers/md/raid0.c sector_t chunk; sector_t 359 drivers/md/raid0.c static sector_t raid0_size(struct mddev *mddev, sector_t sectors, int raid_disks) sector_t 361 drivers/md/raid0.c sector_t array_sectors = 0; sector_t 369 drivers/md/raid0.c ~(sector_t)(mddev->chunk_sectors-1)); sector_t 471 drivers/md/raid0.c sector_t sector = bio->bi_iter.bi_sector; sector_t 481 drivers/md/raid0.c sector_t start = bio->bi_iter.bi_sector; sector_t 482 drivers/md/raid0.c sector_t end; sector_t 484 drivers/md/raid0.c sector_t first_stripe_index, last_stripe_index; sector_t 485 drivers/md/raid0.c sector_t start_disk_offset; sector_t 487 drivers/md/raid0.c sector_t end_disk_offset; sector_t 527 drivers/md/raid0.c sector_t dev_start, dev_end; sector_t 572 drivers/md/raid0.c sector_t bio_sector; sector_t 573 drivers/md/raid0.c sector_t sector; sector_t 574 drivers/md/raid0.c sector_t orig_sector; sector_t 6 drivers/md/raid0.h sector_t zone_end; /* Start of the next zone (in sectors) */ sector_t 7 drivers/md/raid0.h sector_t dev_start; /* Zone offset in real dev (in sectors) */ sector_t 45 drivers/md/raid1.c static void allow_barrier(struct r1conf *conf, sector_t sector_nr); sector_t 46 drivers/md/raid1.c static void lower_barrier(struct r1conf *conf, sector_t sector_nr); sector_t 53 drivers/md/raid1.c static int check_and_add_wb(struct md_rdev *rdev, sector_t lo, sector_t hi) sector_t 82 drivers/md/raid1.c static void remove_wb(struct md_rdev *rdev, sector_t lo, sector_t hi) sector_t 243 drivers/md/raid1.c sector_t sect = r1_bio->sector; sector_t 476 drivers/md/raid1.c sector_t first_bad; sector_t 503 drivers/md/raid1.c sector_t lo = r1_bio->sector; sector_t 504 drivers/md/raid1.c sector_t hi = r1_bio->sector + r1_bio->sectors; sector_t 544 drivers/md/raid1.c static sector_t align_to_barrier_unit_end(sector_t start_sector, sector_t 545 drivers/md/raid1.c sector_t sectors) sector_t 547 drivers/md/raid1.c sector_t len; sector_t 579 drivers/md/raid1.c const sector_t this_sector = r1_bio->sector; sector_t 585 drivers/md/raid1.c sector_t best_dist; sector_t 618 drivers/md/raid1.c sector_t dist; sector_t 619 drivers/md/raid1.c sector_t first_bad; sector_t 669 drivers/md/raid1.c sector_t good_sectors = first_bad - this_sector; sector_t 881 drivers/md/raid1.c static int raise_barrier(struct r1conf *conf, sector_t sector_nr) sector_t 931 drivers/md/raid1.c static void lower_barrier(struct r1conf *conf, sector_t sector_nr) sector_t 1001 drivers/md/raid1.c static void wait_read_barrier(struct r1conf *conf, sector_t sector_nr) sector_t 1034 drivers/md/raid1.c static void wait_barrier(struct r1conf *conf, sector_t sector_nr) sector_t 1047 drivers/md/raid1.c static void allow_barrier(struct r1conf *conf, sector_t sector_nr) sector_t 1405 drivers/md/raid1.c sector_t first_bad; sector_t 1511 drivers/md/raid1.c sector_t lo = r1_bio->sector; sector_t 1512 drivers/md/raid1.c sector_t hi = r1_bio->sector + r1_bio->sectors; sector_t 1568 drivers/md/raid1.c sector_t sectors; sector_t 1896 drivers/md/raid1.c sector_t sync_blocks = 0; sector_t 1897 drivers/md/raid1.c sector_t s = r1_bio->sector; sector_t 1930 drivers/md/raid1.c sector_t first_bad; sector_t 1953 drivers/md/raid1.c static int r1_sync_page_io(struct md_rdev *rdev, sector_t sector, sector_t 1989 drivers/md/raid1.c sector_t sect = r1_bio->sector; sector_t 2245 drivers/md/raid1.c sector_t sect, int sectors) sector_t 2259 drivers/md/raid1.c sector_t first_bad; sector_t 2358 drivers/md/raid1.c sector_t sector; sector_t 2370 drivers/md/raid1.c & ~(sector_t)(block_sectors - 1)) sector_t 2631 drivers/md/raid1.c static sector_t raid1_sync_request(struct mddev *mddev, sector_t sector_nr, sector_t 2637 drivers/md/raid1.c sector_t max_sector, nr_sectors; sector_t 2642 drivers/md/raid1.c sector_t sync_blocks; sector_t 2745 drivers/md/raid1.c sector_t first_bad = MaxSector; sector_t 2840 drivers/md/raid1.c sector_t rv; sector_t 2930 drivers/md/raid1.c static sector_t raid1_size(struct mddev *mddev, sector_t sectors, int raid_disks) sector_t 3196 drivers/md/raid1.c static int raid1_resize(struct mddev *mddev, sector_t sectors) sector_t 3205 drivers/md/raid1.c sector_t newsize = raid1_size(mddev, sectors, 0); sector_t 43 drivers/md/raid1.h sector_t head_position; sector_t 48 drivers/md/raid1.h sector_t next_seq_sect; sector_t 49 drivers/md/raid1.h sector_t seq_start; sector_t 139 drivers/md/raid1.h sector_t cluster_sync_low; sector_t 140 drivers/md/raid1.h sector_t cluster_sync_high; sector_t 158 drivers/md/raid1.h sector_t sector; sector_t 213 drivers/md/raid1.h static inline int sector_to_idx(sector_t sector) sector_t 71 drivers/md/raid10.c static sector_t reshape_request(struct mddev *mddev, sector_t sector_nr, sector_t 494 drivers/md/raid10.c sector_t first_bad; sector_t 564 drivers/md/raid10.c sector_t sector; sector_t 565 drivers/md/raid10.c sector_t chunk; sector_t 566 drivers/md/raid10.c sector_t stripe; sector_t 593 drivers/md/raid10.c sector_t s = sector; sector_t 639 drivers/md/raid10.c static sector_t raid10_find_virt(struct r10conf *conf, sector_t sector, int dev) sector_t 641 drivers/md/raid10.c sector_t offset, chunk, vchunk; sector_t 707 drivers/md/raid10.c const sector_t this_sector = r10_bio->sector; sector_t 711 drivers/md/raid10.c sector_t new_distance, best_dist; sector_t 743 drivers/md/raid10.c sector_t first_bad; sector_t 745 drivers/md/raid10.c sector_t dev_sector; sector_t 780 drivers/md/raid10.c sector_t good_sectors = sector_t 1053 drivers/md/raid10.c static sector_t choose_data_offset(struct r10bio *r10_bio, sector_t 1118 drivers/md/raid10.c struct bio *bio, sector_t sectors) sector_t 1302 drivers/md/raid10.c sector_t sectors; sector_t 1394 drivers/md/raid10.c sector_t first_bad; sector_t 1395 drivers/md/raid10.c sector_t dev_sector = r10_bio->devs[i].addr; sector_t 1524 drivers/md/raid10.c sector_t chunk_mask = (conf->geo.chunk_mask & conf->prev.chunk_mask); sector_t 1933 drivers/md/raid10.c sector_t s = r10_bio->sectors; sector_t 1959 drivers/md/raid10.c sector_t first_bad; sector_t 2153 drivers/md/raid10.c sector_t sect = 0; sector_t 2163 drivers/md/raid10.c sector_t addr; sector_t 2298 drivers/md/raid10.c static int r10_sync_page_io(struct md_rdev *rdev, sector_t sector, sector_t 2301 drivers/md/raid10.c sector_t first_bad; sector_t 2375 drivers/md/raid10.c sector_t first_bad; sector_t 2533 drivers/md/raid10.c sector_t sector; sector_t 2545 drivers/md/raid10.c & ~(sector_t)(block_sectors - 1)) sector_t 2550 drivers/md/raid10.c sector_t wsector; sector_t 2838 drivers/md/raid10.c sector_t window_size; sector_t 2901 drivers/md/raid10.c static sector_t raid10_sync_request(struct mddev *mddev, sector_t sector_nr, sector_t 2907 drivers/md/raid10.c sector_t max_sector, nr_sectors; sector_t 2910 drivers/md/raid10.c sector_t sync_blocks; sector_t 2911 drivers/md/raid10.c sector_t sectors_skipped = 0; sector_t 2913 drivers/md/raid10.c sector_t chunk_mask = conf->geo.chunk_mask; sector_t 2964 drivers/md/raid10.c sector_t sect = sector_t 3046 drivers/md/raid10.c sector_t sect; sector_t 3141 drivers/md/raid10.c sector_t from_addr, to_addr; sector_t 3144 drivers/md/raid10.c sector_t sector, first_bad; sector_t 3336 drivers/md/raid10.c sector_t first_bad, sector; sector_t 3455 drivers/md/raid10.c sector_t sect_va1, sect_va2; sector_t 3524 drivers/md/raid10.c static sector_t sector_t 3525 drivers/md/raid10.c raid10_size(struct mddev *mddev, sector_t sectors, int raid_disks) sector_t 3527 drivers/md/raid10.c sector_t size; sector_t 3544 drivers/md/raid10.c static void calc_sectors(struct r10conf *conf, sector_t size) sector_t 3729 drivers/md/raid10.c sector_t size; sector_t 3730 drivers/md/raid10.c sector_t min_offset_diff = 0; sector_t 3965 drivers/md/raid10.c static int raid10_resize(struct mddev *mddev, sector_t sectors) sector_t 3980 drivers/md/raid10.c sector_t oldsize, size; sector_t 4010 drivers/md/raid10.c static void *raid10_takeover_raid0(struct mddev *mddev, sector_t size, int devs) sector_t 4188 drivers/md/raid10.c sector_t min_offset_diff = 0; sector_t 4244 drivers/md/raid10.c sector_t size = raid10_size(mddev, 0, 0); sector_t 4260 drivers/md/raid10.c sector_t oldsize, newsize; sector_t 4368 drivers/md/raid10.c static sector_t last_dev_address(sector_t s, struct geom *geo) sector_t 4383 drivers/md/raid10.c static sector_t first_dev_address(sector_t s, struct geom *geo) sector_t 4393 drivers/md/raid10.c static sector_t reshape_request(struct mddev *mddev, sector_t sector_nr, sector_t 4435 drivers/md/raid10.c sector_t next, safe, last; sector_t 4484 drivers/md/raid10.c sector_nr = last & ~(sector_t)(conf->geo.chunk_mask sector_t 4754 drivers/md/raid10.c sector_t lo, hi; sector_t 4799 drivers/md/raid10.c sector_t addr; sector_t 20 drivers/md/raid10.h sector_t head_position; sector_t 45 drivers/md/raid10.h sector_t stride; /* distance between far copies. sector_t 56 drivers/md/raid10.h sector_t chunk_mask; sector_t 62 drivers/md/raid10.h sector_t dev_sectors; /* temp copy of sector_t 64 drivers/md/raid10.h sector_t reshape_progress; sector_t 65 drivers/md/raid10.h sector_t reshape_safe; sector_t 67 drivers/md/raid10.h sector_t offset_diff; sector_t 86 drivers/md/raid10.h sector_t next_resync; sector_t 109 drivers/md/raid10.h sector_t cluster_sync_low; sector_t 110 drivers/md/raid10.h sector_t cluster_sync_high; sector_t 124 drivers/md/raid10.h sector_t sector; /* virtual sector number */ sector_t 154 drivers/md/raid10.h sector_t addr; sector_t 87 drivers/md/raid5-cache.c sector_t device_size; /* log device size, round to sector_t 89 drivers/md/raid5-cache.c sector_t max_free_space; /* reclaim run if free space is at sector_t 92 drivers/md/raid5-cache.c sector_t last_checkpoint; /* log tail. where recovery scan sector_t 96 drivers/md/raid5-cache.c sector_t log_start; /* log head. where new data appends */ sector_t 99 drivers/md/raid5-cache.c sector_t next_checkpoint; sector_t 195 drivers/md/raid5-cache.c static inline sector_t r5c_tree_index(struct r5conf *conf, sector_t 196 drivers/md/raid5-cache.c sector_t sect) sector_t 198 drivers/md/raid5-cache.c sector_t offset; sector_t 221 drivers/md/raid5-cache.c sector_t log_start; /* where the io_unit starts */ sector_t 222 drivers/md/raid5-cache.c sector_t log_end; /* where the io_unit ends */ sector_t 258 drivers/md/raid5-cache.c static sector_t r5l_ring_add(struct r5l_log *log, sector_t start, sector_t inc) sector_t 266 drivers/md/raid5-cache.c static sector_t r5l_ring_distance(struct r5l_log *log, sector_t start, sector_t 267 drivers/md/raid5-cache.c sector_t end) sector_t 275 drivers/md/raid5-cache.c static bool r5l_has_free_space(struct r5l_log *log, sector_t size) sector_t 277 drivers/md/raid5-cache.c sector_t used_size; sector_t 326 drivers/md/raid5-cache.c void r5l_wake_reclaim(struct r5l_log *log, sector_t space); sector_t 399 drivers/md/raid5-cache.c static sector_t r5c_log_required_to_flush_cache(struct r5conf *conf) sector_t 421 drivers/md/raid5-cache.c sector_t free_space; sector_t 422 drivers/md/raid5-cache.c sector_t reclaim_space; sector_t 825 drivers/md/raid5-cache.c sector_t location, sector_t 864 drivers/md/raid5-cache.c static void r5l_append_flush_payload(struct r5l_log *log, sector_t sect) sector_t 1154 drivers/md/raid5-cache.c static sector_t r5c_calculate_new_cp(struct r5conf *conf) sector_t 1158 drivers/md/raid5-cache.c sector_t new_cp; sector_t 1177 drivers/md/raid5-cache.c static sector_t r5l_reclaimable_space(struct r5l_log *log) sector_t 1314 drivers/md/raid5-cache.c static void r5l_write_super(struct r5l_log *log, sector_t cp); sector_t 1316 drivers/md/raid5-cache.c sector_t end) sector_t 1502 drivers/md/raid5-cache.c sector_t reclaim_target = xchg(&log->reclaim_target, 0); sector_t 1503 drivers/md/raid5-cache.c sector_t reclaimable; sector_t 1504 drivers/md/raid5-cache.c sector_t next_checkpoint; sector_t 1563 drivers/md/raid5-cache.c void r5l_wake_reclaim(struct r5l_log *log, sector_t space) sector_t 1613 drivers/md/raid5-cache.c sector_t meta_total_blocks; /* total size of current meta and data */ sector_t 1614 drivers/md/raid5-cache.c sector_t pos; /* recovery position */ sector_t 1628 drivers/md/raid5-cache.c sector_t pool_offset; /* offset of first page in the pool */ sector_t 1681 drivers/md/raid5-cache.c sector_t offset) sector_t 1712 drivers/md/raid5-cache.c sector_t offset) sector_t 1770 drivers/md/raid5-cache.c sector_t pos, u64 seq) sector_t 1783 drivers/md/raid5-cache.c static int r5l_log_write_empty_meta_block(struct r5l_log *log, sector_t pos, sector_t 1816 drivers/md/raid5-cache.c sector_t log_offset) sector_t 1838 drivers/md/raid5-cache.c sector_t log_offset) sector_t 1931 drivers/md/raid5-cache.c sector_t stripe_sect, sector_t 1946 drivers/md/raid5-cache.c r5c_recovery_lookup_stripe(struct list_head *list, sector_t sect) sector_t 1988 drivers/md/raid5-cache.c sector_t log_offset, __le32 log_checksum) sector_t 2011 drivers/md/raid5-cache.c sector_t mb_offset = sizeof(struct r5l_meta_block); sector_t 2012 drivers/md/raid5-cache.c sector_t log_offset = r5l_ring_add(log, ctx->pos, BLOCK_SECTORS); sector_t 2088 drivers/md/raid5-cache.c sector_t log_offset; sector_t 2089 drivers/md/raid5-cache.c sector_t stripe_sect; sector_t 2360 drivers/md/raid5-cache.c sector_t next_checkpoint = MaxSector; sector_t 2375 drivers/md/raid5-cache.c sector_t write_pos; sector_t 2458 drivers/md/raid5-cache.c sector_t pos; sector_t 2522 drivers/md/raid5-cache.c static void r5l_write_super(struct r5l_log *log, sector_t cp) sector_t 2644 drivers/md/raid5-cache.c sector_t tree_index; sector_t 2809 drivers/md/raid5-cache.c sector_t tree_index; sector_t 2943 drivers/md/raid5-cache.c bool r5c_big_stripe_cached(struct r5conf *conf, sector_t sect) sector_t 2946 drivers/md/raid5-cache.c sector_t tree_index; sector_t 2963 drivers/md/raid5-cache.c sector_t cp = log->rdev->journal_tail; sector_t 23 drivers/md/raid5-log.h extern void r5l_wake_reclaim(struct r5l_log *log, sector_t space); sector_t 34 drivers/md/raid5-log.h extern bool r5c_big_stripe_cached(struct r5conf *conf, sector_t sect); sector_t 126 drivers/md/raid5-ppl.c sector_t next_io_sector; sector_t 272 drivers/md/raid5-ppl.c sector_t data_sector = 0; sector_t 796 drivers/md/raid5-ppl.c sector_t ppl_sector) sector_t 804 drivers/md/raid5-ppl.c sector_t r_sector_first; sector_t 805 drivers/md/raid5-ppl.c sector_t r_sector_last; sector_t 854 drivers/md/raid5-ppl.c sector_t parity_sector; sector_t 869 drivers/md/raid5-ppl.c sector_t sector; sector_t 870 drivers/md/raid5-ppl.c sector_t r_sector = r_sector_first + i + sector_t 971 drivers/md/raid5-ppl.c sector_t offset) sector_t 976 drivers/md/raid5-ppl.c sector_t ppl_sector = rdev->ppl.sector + offset + sector_t 990 drivers/md/raid5-ppl.c sector_t sector = ppl_sector; sector_t 1089 drivers/md/raid5-ppl.c sector_t pplhdr_offset = 0, prev_pplhdr_offset = 0; sector_t 70 drivers/md/raid5.c static inline struct hlist_head *stripe_hash(struct r5conf *conf, sector_t sect) sector_t 76 drivers/md/raid5.c static inline int stripe_hash_locks_hash(sector_t sect) sector_t 485 drivers/md/raid5.c static void stripe_set_idx(sector_t stripe, struct r5conf *conf, int previous, sector_t 488 drivers/md/raid5.c static void init_stripe(struct stripe_head *sh, sector_t sector, int previous) sector_t 530 drivers/md/raid5.c static struct stripe_head *__find_stripe(struct r5conf *conf, sector_t sector, sector_t 626 drivers/md/raid5.c raid5_get_active_stripe(struct r5conf *conf, sector_t sector, sector_t 742 drivers/md/raid5.c sector_t head_sector, tmp_sec; sector_t 855 drivers/md/raid5.c sector_t progress = conf->reshape_progress; sector_t 952 drivers/md/raid5.c static void defer_issue_bios(struct r5conf *conf, sector_t sector, sector_t 1058 drivers/md/raid5.c sector_t first_bad; sector_t 1229 drivers/md/raid5.c sector_t sector, struct dma_async_tx_descriptor *tx, sector_t 2468 drivers/md/raid5.c sector_t s; sector_t 2598 drivers/md/raid5.c sector_t first_bad; sector_t 2713 drivers/md/raid5.c sector_t raid5_compute_sector(struct r5conf *conf, sector_t r_sector, sector_t 2717 drivers/md/raid5.c sector_t stripe, stripe2; sector_t 2718 drivers/md/raid5.c sector_t chunk_number; sector_t 2722 drivers/md/raid5.c sector_t new_sector; sector_t 2911 drivers/md/raid5.c new_sector = (sector_t)stripe * sectors_per_chunk + chunk_offset; sector_t 2915 drivers/md/raid5.c sector_t raid5_compute_blocknr(struct stripe_head *sh, int i, int previous) sector_t 2920 drivers/md/raid5.c sector_t new_sector = sh->sector, check; sector_t 2925 drivers/md/raid5.c sector_t stripe; sector_t 2927 drivers/md/raid5.c sector_t chunk_number; sector_t 2929 drivers/md/raid5.c sector_t r_sector; sector_t 3243 drivers/md/raid5.c sector_t sector; sector_t 3244 drivers/md/raid5.c sector_t first = 0; sector_t 3245 drivers/md/raid5.c sector_t last = 0; sector_t 3277 drivers/md/raid5.c sector_t sector = sh->dev[dd_idx].sector; sector_t 3332 drivers/md/raid5.c static void stripe_set_idx(sector_t stripe, struct r5conf *conf, int previous, sector_t 3871 drivers/md/raid5.c sector_t recovery_cp = conf->mddev->recovery_cp; sector_t 4319 drivers/md/raid5.c sector_t bn = raid5_compute_blocknr(sh, i, 1); sector_t 4320 drivers/md/raid5.c sector_t s = raid5_compute_sector(conf, bn, 0, sector_t 4394 drivers/md/raid5.c sector_t first_bad; sector_t 5120 drivers/md/raid5.c sector_t sector = bio->bi_iter.bi_sector; sector_t 5210 drivers/md/raid5.c sector_t end_sector; sector_t 5255 drivers/md/raid5.c sector_t first_bad; sector_t 5297 drivers/md/raid5.c sector_t sector = raid_bio->bi_iter.bi_sector; sector_t 5493 drivers/md/raid5.c sector_t logical_sector, last_sector; sector_t 5501 drivers/md/raid5.c logical_sector = bi->bi_iter.bi_sector & ~((sector_t)STRIPE_SECTORS-1); sector_t 5581 drivers/md/raid5.c sector_t new_sector; sector_t 5582 drivers/md/raid5.c sector_t logical_sector, last_sector; sector_t 5626 drivers/md/raid5.c logical_sector = bi->bi_iter.bi_sector & ~((sector_t)STRIPE_SECTORS-1); sector_t 5751 drivers/md/raid5.c static sector_t raid5_size(struct mddev *mddev, sector_t sectors, int raid_disks); sector_t 5753 drivers/md/raid5.c static sector_t reshape_request(struct mddev *mddev, sector_t sector_nr, int *skipped) sector_t 5767 drivers/md/raid5.c sector_t first_sector, last_sector; sector_t 5773 drivers/md/raid5.c sector_t writepos, readpos, safepos; sector_t 5774 drivers/md/raid5.c sector_t stripe_addr; sector_t 5777 drivers/md/raid5.c sector_t retn; sector_t 5832 drivers/md/raid5.c readpos -= min_t(sector_t, reshape_sectors, readpos); sector_t 5833 drivers/md/raid5.c safepos -= min_t(sector_t, reshape_sectors, safepos); sector_t 5843 drivers/md/raid5.c ~((sector_t)reshape_sectors - 1)) sector_t 5923 drivers/md/raid5.c sector_t s; sector_t 6022 drivers/md/raid5.c static inline sector_t raid5_sync_request(struct mddev *mddev, sector_t sector_nr, sector_t 6027 drivers/md/raid5.c sector_t max_sector = mddev->dev_sectors; sector_t 6028 drivers/md/raid5.c sector_t sync_blocks; sector_t 6068 drivers/md/raid5.c sector_t rv = mddev->dev_sectors - sector_nr; sector_t 6130 drivers/md/raid5.c sector_t sector, logical_sector, last_sector; sector_t 6135 drivers/md/raid5.c ~((sector_t)STRIPE_SECTORS-1); sector_t 6731 drivers/md/raid5.c static sector_t sector_t 6732 drivers/md/raid5.c raid5_size(struct mddev *mddev, sector_t sectors, int raid_disks) sector_t 6742 drivers/md/raid5.c sectors &= ~((sector_t)conf->chunk_sectors - 1); sector_t 6743 drivers/md/raid5.c sectors &= ~((sector_t)conf->prev_chunk_sectors - 1); sector_t 7156 drivers/md/raid5.c sector_t reshape_offset = 0; sector_t 7209 drivers/md/raid5.c sector_t here_new, here_old; sector_t 7759 drivers/md/raid5.c static int raid5_resize(struct mddev *mddev, sector_t sectors) sector_t 7768 drivers/md/raid5.c sector_t newsize; sector_t 7773 drivers/md/raid5.c sectors &= ~((sector_t)conf->chunk_sectors - 1); sector_t 8104 drivers/md/raid5.c sector_t sectors; sector_t 205 drivers/md/raid5.h sector_t sector; /* sector of this row */ sector_t 234 drivers/md/raid5.h sector_t log_start; /* first meta block on the journal */ sector_t 257 drivers/md/raid5.h sector_t sector; /* sector of this page */ sector_t 493 drivers/md/raid5.h static inline struct bio *r5_next_bio(struct bio *bio, sector_t sector) sector_t 562 drivers/md/raid5.h sector_t sector; /* stripe sector */ sector_t 583 drivers/md/raid5.h sector_t reshape_progress; sector_t 587 drivers/md/raid5.h sector_t reshape_safe; sector_t 757 drivers/md/raid5.h extern sector_t raid5_compute_blocknr(struct stripe_head *sh, int i, int previous); sector_t 759 drivers/md/raid5.h extern sector_t raid5_compute_sector(struct r5conf *conf, sector_t r_sector, sector_t 763 drivers/md/raid5.h raid5_get_active_stripe(struct r5conf *conf, sector_t sector, sector_t 1871 drivers/memstick/core/ms_block.c sector_t lba; sector_t 2086 drivers/message/fusion/mptscsih.c sector_t capacity, int geom[]) sector_t 2090 drivers/message/fusion/mptscsih.c sector_t cylinders; sector_t 125 drivers/message/fusion/mptscsih.h extern int mptscsih_bios_param(struct scsi_device * sdev, struct block_device *bdev, sector_t capacity, int geom[]); sector_t 2248 drivers/mmc/core/block.c sector_t size, sector_t 2372 drivers/mmc/core/block.c sector_t size; sector_t 2385 drivers/mmc/core/block.c size = (typeof(sector_t))card->csd.capacity sector_t 2396 drivers/mmc/core/block.c sector_t size, sector_t 2506 drivers/mmc/core/block.c sector_t size, sector_t 358 drivers/mtd/ubi/block.c if ((sector_t)size != size) sector_t 165 drivers/nvdimm/badrange.c static void set_badblock(struct badblocks *bb, sector_t s, int num) sector_t 188 drivers/nvdimm/badrange.c sector_t start_sector, end_sector; sector_t 200 drivers/nvdimm/badrange.c sector_t s = start_sector; sector_t 118 drivers/nvdimm/blk.c unsigned int len, unsigned int off, int rw, sector_t sector) sector_t 1068 drivers/nvdimm/btt.c static int lba_to_arena(struct btt *btt, sector_t sector, __u32 *premap, sector_t 1198 drivers/nvdimm/btt.c struct page *page, unsigned int off, sector_t sector, sector_t 1305 drivers/nvdimm/btt.c sector_t phys_sector = nsoff >> 9; sector_t 1311 drivers/nvdimm/btt.c sector_t sector, struct page *page, unsigned int off, sector_t 1427 drivers/nvdimm/btt.c unsigned int op, sector_t sector) sector_t 1486 drivers/nvdimm/btt.c static int btt_rw_page(struct block_device *bdev, sector_t sector, sector_t 180 drivers/nvdimm/bus.c sector_t sector; sector_t 257 drivers/nvdimm/claim.c sector_t sector = offset >> 9; sector_t 418 drivers/nvdimm/nd.h static inline bool is_bad_pmem(struct badblocks *bb, sector_t sector, sector_t 422 drivers/nvdimm/nd.h sector_t first_bad; sector_t 375 drivers/nvdimm/pfn_devs.c sector_t first_bad, meta_start; sector_t 75 drivers/nvdimm/pmem.c sector_t sector; sector_t 142 drivers/nvdimm/pmem.c sector_t sector) sector_t 221 drivers/nvdimm/pmem.c static int pmem_rw_page(struct block_device *bdev, sector_t sector, sector_t 1770 drivers/nvme/host/core.c sector_t capacity = le64_to_cpu(id->nsze) << (ns->lba_shift - 9); sector_t 556 drivers/nvme/host/lightnvm.c sector_t slba, int nchks, sector_t 422 drivers/nvme/host/nvme.h static inline u64 nvme_block_nr(struct nvme_ns *ns, sector_t sector) sector_t 150 drivers/nvme/target/io-cmd-bdev.c sector_t sector; sector_t 294 drivers/nvme/target/io-cmd-bdev.c sector_t sector; sector_t 295 drivers/nvme/target/io-cmd-bdev.c sector_t nr_sector; sector_t 300 drivers/nvme/target/io-cmd-bdev.c nr_sector = (((sector_t)le16_to_cpu(write_zeroes->length) + 1) << sector_t 348 drivers/nvme/target/io-cmd-file.c len = (((sector_t)le16_to_cpu(write_zeroes->length) + 1) << sector_t 510 drivers/s390/block/dasd_diag.c sector_t recid, first_rec, last_rec; sector_t 215 drivers/s390/block/dasd_eckd.c sector_t *track) sector_t 3086 drivers/s390/block/dasd_eckd.c sector_t first_trk; sector_t 3087 drivers/s390/block/dasd_eckd.c sector_t last_trk; sector_t 3088 drivers/s390/block/dasd_eckd.c sector_t curr_trk; sector_t 3157 drivers/s390/block/dasd_eckd.c sector_t first_trk, last_trk; sector_t 3158 drivers/s390/block/dasd_eckd.c sector_t first_blk, last_blk; sector_t 3168 drivers/s390/block/dasd_eckd.c sector_t curr_trk; sector_t 3169 drivers/s390/block/dasd_eckd.c sector_t end_blk; sector_t 3830 drivers/s390/block/dasd_eckd.c sector_t first_rec, sector_t 3831 drivers/s390/block/dasd_eckd.c sector_t last_rec, sector_t 3832 drivers/s390/block/dasd_eckd.c sector_t first_trk, sector_t 3833 drivers/s390/block/dasd_eckd.c sector_t last_trk, sector_t 3849 drivers/s390/block/dasd_eckd.c sector_t recid; sector_t 3952 drivers/s390/block/dasd_eckd.c sector_t trkid = recid; sector_t 4022 drivers/s390/block/dasd_eckd.c sector_t first_rec, sector_t 4023 drivers/s390/block/dasd_eckd.c sector_t last_rec, sector_t 4024 drivers/s390/block/dasd_eckd.c sector_t first_trk, sector_t 4025 drivers/s390/block/dasd_eckd.c sector_t last_trk, sector_t 4039 drivers/s390/block/dasd_eckd.c sector_t recid; sector_t 4045 drivers/s390/block/dasd_eckd.c sector_t trkid; sector_t 4117 drivers/s390/block/dasd_eckd.c (sector_t)count_to_trk_end); sector_t 4354 drivers/s390/block/dasd_eckd.c sector_t first_rec, sector_t 4355 drivers/s390/block/dasd_eckd.c sector_t last_rec, sector_t 4356 drivers/s390/block/dasd_eckd.c sector_t first_trk, sector_t 4357 drivers/s390/block/dasd_eckd.c sector_t last_trk, sector_t 4378 drivers/s390/block/dasd_eckd.c sector_t recid, trkid; sector_t 4460 drivers/s390/block/dasd_eckd.c (sector_t)count_to_trk_end); sector_t 4533 drivers/s390/block/dasd_eckd.c sector_t first_rec, last_rec; sector_t 4534 drivers/s390/block/dasd_eckd.c sector_t first_trk, last_trk; sector_t 4608 drivers/s390/block/dasd_eckd.c sector_t start_padding_sectors, end_sector_offset, end_padding_sectors; sector_t 4611 drivers/s390/block/dasd_eckd.c sector_t first_trk, last_trk, sectors; sector_t 4767 drivers/s390/block/dasd_eckd.c sector_t recid; sector_t 280 drivers/s390/block/dasd_fba.c static int count_ccws(sector_t first_rec, sector_t last_rec, sector_t 283 drivers/s390/block/dasd_fba.c sector_t wz_stop = 0, d_stop = 0; sector_t 333 drivers/s390/block/dasd_fba.c sector_t wz_stop = 0, d_stop = 0; sector_t 334 drivers/s390/block/dasd_fba.c sector_t first_rec, last_rec; sector_t 451 drivers/s390/block/dasd_fba.c sector_t recid, first_rec, last_rec; sector_t 481 drivers/s390/block/dasd_int.h sector_t track; sector_t 1698 drivers/scsi/3w-9xxx.c static int twa_scsi_biosparam(struct scsi_device *sdev, struct block_device *bdev, sector_t capacity, int geom[]) sector_t 1408 drivers/scsi/3w-sas.c static int twl_scsi_biosparam(struct scsi_device *sdev, struct block_device *bdev, sector_t capacity, int geom[]) sector_t 1344 drivers/scsi/3w-xxxx.c sector_t capacity, int geom[]) sector_t 3385 drivers/scsi/BusLogic.c sector_t capacity, int *params) sector_t 1287 drivers/scsi/BusLogic.h static int blogic_diskparam(struct scsi_device *, struct block_device *, sector_t, int *); sector_t 2623 drivers/scsi/aacraid/aacraid.h static inline unsigned int cap_to_cyls(sector_t capacity, unsigned divisor) sector_t 297 drivers/scsi/aacraid/linit.c sector_t capacity, int *geom) sector_t 7186 drivers/scsi/advansys.c sector_t capacity, int ip[]) sector_t 1224 drivers/scsi/aha152x.c sector_t capacity, int *info_array) sector_t 982 drivers/scsi/aha1542.c struct block_device *bdev, sector_t capacity, int geom[]) sector_t 506 drivers/scsi/aha1740.c sector_t capacity, int* ip) sector_t 724 drivers/scsi/aic7xxx/aic79xx_osm.c sector_t capacity, int geom[]) sector_t 696 drivers/scsi/aic7xxx/aic7xxx_osm.c sector_t capacity, int geom[]) sector_t 137 drivers/scsi/aic7xxx/aiclib.h aic_sector_div(sector_t capacity, int heads, int sectors) sector_t 112 drivers/scsi/arcmsr/arcmsr_hba.c struct block_device *bdev, sector_t capacity, int *info); sector_t 354 drivers/scsi/arcmsr/arcmsr_hba.c struct block_device *bdev, sector_t capacity, int *geom) sector_t 1638 drivers/scsi/atp870u.c sector_t capacity, int *ip) sector_t 1060 drivers/scsi/dc395x.c struct block_device *bdev, sector_t capacity, int *info) sector_t 482 drivers/scsi/dpt_i2o.c sector_t capacity, int geom[]) sector_t 36 drivers/scsi/dpti.h sector_t, int geom[]); sector_t 463 drivers/scsi/fdomain.c struct block_device *bdev, sector_t capacity, sector_t 3359 drivers/scsi/gdth.c static int gdth_bios_param(struct scsi_device *sdev,struct block_device *bdev,sector_t cap,int *ip) sector_t 946 drivers/scsi/imm.c sector_t capacity, int ip[]) sector_t 2665 drivers/scsi/initio.c sector_t capacity, int *info_array) sector_t 4746 drivers/scsi/ipr.c sector_t capacity, int *parm) sector_t 4749 drivers/scsi/ipr.c sector_t cylinders; sector_t 1141 drivers/scsi/ips.c sector_t capacity, int geom[]) sector_t 402 drivers/scsi/ips.h sector_t capacity, int geom[]); sector_t 804 drivers/scsi/libiscsi.c sector_t sector; sector_t 878 drivers/scsi/libsas/sas_scsi_host.c sector_t capacity, int *hsc) sector_t 1072 drivers/scsi/lpfc/lpfc.h sector_t lpfc_injerr_lba; sector_t 1073 drivers/scsi/lpfc/lpfc.h #define LPFC_INJERR_LBA_OFF (sector_t)(-1) sector_t 2195 drivers/scsi/lpfc/lpfc_debugfs.c if (phba->lpfc_injerr_lba == (sector_t)(-1)) sector_t 2243 drivers/scsi/lpfc/lpfc_debugfs.c phba->lpfc_injerr_lba = (sector_t)tmp; sector_t 995 drivers/scsi/lpfc/lpfc_scsi.c sector_t lba; sector_t 2795 drivers/scsi/megaraid.c sector_t capacity, int geom[]) sector_t 958 drivers/scsi/megaraid.h sector_t, int []); sector_t 3074 drivers/scsi/megaraid/megaraid_sas_base.c sector_t capacity, int geom[]) sector_t 3078 drivers/scsi/megaraid/megaraid_sas_base.c sector_t cylinders; sector_t 2483 drivers/scsi/mpt3sas/mpt3sas_scsih.c sector_t capacity, int params[]) sector_t 2487 drivers/scsi/mpt3sas/mpt3sas_scsih.c sector_t cylinders; sector_t 259 drivers/scsi/mpt3sas/mpt3sas_warpdrive.c sector_t v_lba, p_lba, stripe_off, column, io_size; sector_t 2148 drivers/scsi/mvumi.c sector_t capacity, int geom[]) sector_t 2151 drivers/scsi/mvumi.c sector_t cylinders; sector_t 1778 drivers/scsi/myrb.c sector_t capacity, int geom[]) sector_t 595 drivers/scsi/pcmcia/sym53c500_cs.c sector_t capacity, int *info_array) sector_t 822 drivers/scsi/ppa.c sector_t capacity, int ip[]) sector_t 1035 drivers/scsi/qla1280.c sector_t capacity, int geom[]) sector_t 2172 drivers/scsi/qla2xxx/qla_isr.c sector_t lba_s = scsi_get_lba(cmd); sector_t 469 drivers/scsi/qlogicfas408.c sector_t capacity, int ip[]) sector_t 110 drivers/scsi/qlogicfas408.h sector_t capacity, int ip[]); sector_t 670 drivers/scsi/scsi_debug.c static sector_t sdebug_capacity; /* in sectors */ sector_t 737 drivers/scsi/scsi_debug.c static struct t10_pi_tuple *dif_store(sector_t sector) sector_t 1625 drivers/scsi/scsi_debug.c static sector_t get_sdebug_capacity(void) sector_t 1630 drivers/scsi/scsi_debug.c return (sector_t)sdebug_virtual_gb * sector_t 2563 drivers/scsi/scsi_debug.c sector_t sector, u32 ei_lba) sector_t 2589 drivers/scsi/scsi_debug.c static void dif_copy_prot(struct scsi_cmnd *SCpnt, sector_t sector, sector_t 2632 drivers/scsi/scsi_debug.c static int prot_verify_read(struct scsi_cmnd *SCpnt, sector_t start_sec, sector_t 2637 drivers/scsi/scsi_debug.c sector_t sector; sector_t 2815 drivers/scsi/scsi_debug.c static int prot_verify_write(struct scsi_cmnd *SCpnt, sector_t start_sec, sector_t 2821 drivers/scsi/scsi_debug.c sector_t sector = start_sec; sector_t 2887 drivers/scsi/scsi_debug.c static unsigned long lba_to_map_index(sector_t lba) sector_t 2895 drivers/scsi/scsi_debug.c static sector_t map_index_to_lba(unsigned long index) sector_t 2897 drivers/scsi/scsi_debug.c sector_t lba = index * sdebug_unmap_granularity; sector_t 2904 drivers/scsi/scsi_debug.c static unsigned int map_state(sector_t lba, unsigned int *num) sector_t 2906 drivers/scsi/scsi_debug.c sector_t end; sector_t 2919 drivers/scsi/scsi_debug.c end = min_t(sector_t, sdebug_store_sectors, map_index_to_lba(next)); sector_t 2924 drivers/scsi/scsi_debug.c static void map_region(sector_t lba, unsigned int len) sector_t 2926 drivers/scsi/scsi_debug.c sector_t end = lba + len; sector_t 2938 drivers/scsi/scsi_debug.c static void unmap_region(sector_t lba, unsigned int len) sector_t 2940 drivers/scsi/scsi_debug.c sector_t end = lba + len; sector_t 68 drivers/scsi/scsicam.c int scsicam_bios_param(struct block_device *bdev, sector_t capacity, int *ip) sector_t 1081 drivers/scsi/sd.c sector_t lba, unsigned int nr_blocks, sector_t 1103 drivers/scsi/sd.c sector_t lba, unsigned int nr_blocks, sector_t 1118 drivers/scsi/sd.c sector_t lba, unsigned int nr_blocks, sector_t 1133 drivers/scsi/sd.c sector_t lba, unsigned int nr_blocks, sector_t 1166 drivers/scsi/sd.c sector_t lba = sectors_to_logical(sdp, blk_rq_pos(rq)); sector_t 1167 drivers/scsi/sd.c sector_t threshold; sector_t 1428 drivers/scsi/sd.c sector_t capacity = logical_to_sectors(sdp, sdkp->capacity); sector_t 2370 drivers/scsi/sd.c sector_t lba; sector_t 2469 drivers/scsi/sd.c sdkp->capacity = 1 + (sector_t) 0xffffffff; sector_t 2539 drivers/scsi/sd.c sector_t old_capacity) sector_t 3103 drivers/scsi/sd.c sector_t old_capacity = sdkp->capacity; sector_t 3177 drivers/scsi/sd.c (sector_t)BLK_DEF_MAX_SECTORS); sector_t 84 drivers/scsi/sd.h sector_t capacity; /* size in logical blocks */ sector_t 171 drivers/scsi/sd.h static inline sector_t logical_to_sectors(struct scsi_device *sdev, sector_t blocks) sector_t 176 drivers/scsi/sd.h static inline unsigned int logical_to_bytes(struct scsi_device *sdev, sector_t blocks) sector_t 181 drivers/scsi/sd.h static inline sector_t bytes_to_logical(struct scsi_device *sdev, unsigned int bytes) sector_t 186 drivers/scsi/sd.h static inline sector_t sectors_to_logical(struct scsi_device *sdev, sector_t sector) sector_t 215 drivers/scsi/sd.h extern int sd_zbc_report_zones(struct gendisk *disk, sector_t sector, sector_t 67 drivers/scsi/sd_zbc.c unsigned int buflen, sector_t lba, sector_t 163 drivers/scsi/sd_zbc.c int sd_zbc_report_zones(struct gendisk *disk, sector_t sector, sector_t 204 drivers/scsi/sd_zbc.c static inline sector_t sd_zbc_zone_sectors(struct scsi_disk *sdkp) sector_t 220 drivers/scsi/sd_zbc.c sector_t sector = blk_rq_pos(rq); sector_t 221 drivers/scsi/sd_zbc.c sector_t block = sectors_to_logical(sdkp->device, sector); sector_t 343 drivers/scsi/sd_zbc.c sector_t max_lba, block = 0; sector_t 1459 drivers/scsi/stex.c struct block_device *bdev, sector_t capacity, int geom[]) sector_t 1449 drivers/scsi/storvsc_drv.c sector_t capacity, int *info) sector_t 1451 drivers/scsi/storvsc_drv.c sector_t nsect = capacity; sector_t 1452 drivers/scsi/storvsc_drv.c sector_t cylinders = nsect; sector_t 1461 drivers/scsi/storvsc_drv.c if ((sector_t)(cylinders + 1) * heads * sectors_pt < nsect) sector_t 331 drivers/scsi/ufs/ufshcd.c sector_t lba = -1; sector_t 546 drivers/scsi/wd719x.c sector_t capacity, int geom[]) sector_t 90 drivers/staging/exfat/exfat.h ((((sector_t)((x) - 2)) << p_fs->sectors_per_clu_bits) + \ sector_t 534 drivers/staging/exfat/exfat.h sector_t sec; sector_t 640 drivers/staging/exfat/exfat.h sector_t sector; sector_t 771 drivers/staging/exfat/exfat.h u8 *FAT_getblk(struct super_block *sb, sector_t sec); sector_t 772 drivers/staging/exfat/exfat.h void FAT_modify(struct super_block *sb, sector_t sec); sector_t 775 drivers/staging/exfat/exfat.h u8 *buf_getblk(struct super_block *sb, sector_t sec); sector_t 776 drivers/staging/exfat/exfat.h void buf_modify(struct super_block *sb, sector_t sec); sector_t 777 drivers/staging/exfat/exfat.h void buf_lock(struct super_block *sb, sector_t sec); sector_t 778 drivers/staging/exfat/exfat.h void buf_unlock(struct super_block *sb, sector_t sec); sector_t 779 drivers/staging/exfat/exfat.h void buf_release(struct super_block *sb, sector_t sec); sector_t 870 drivers/staging/exfat/exfat.h sector_t *sector, s32 *offset); sector_t 871 drivers/staging/exfat/exfat.h struct dentry_t *get_entry_with_sector(struct super_block *sb, sector_t sector, sector_t 874 drivers/staging/exfat/exfat.h s32 entry, sector_t *sector); sector_t 953 drivers/staging/exfat/exfat.h int sector_read(struct super_block *sb, sector_t sec, sector_t 955 drivers/staging/exfat/exfat.h int sector_write(struct super_block *sb, sector_t sec, sector_t 957 drivers/staging/exfat/exfat.h int multi_sector_read(struct super_block *sb, sector_t sec, sector_t 959 drivers/staging/exfat/exfat.h int multi_sector_write(struct super_block *sb, sector_t sec, sector_t 964 drivers/staging/exfat/exfat.h int bdev_read(struct super_block *sb, sector_t secno, sector_t 966 drivers/staging/exfat/exfat.h int bdev_write(struct super_block *sb, sector_t secno, sector_t 33 drivers/staging/exfat/exfat_blkdev.c int bdev_read(struct super_block *sb, sector_t secno, struct buffer_head **bh, sector_t 68 drivers/staging/exfat/exfat_blkdev.c int bdev_write(struct super_block *sb, sector_t secno, struct buffer_head *bh, sector_t 18 drivers/staging/exfat/exfat_cache.c static struct buf_cache_t *FAT_cache_find(struct super_block *sb, sector_t sec) sector_t 70 drivers/staging/exfat/exfat_cache.c static struct buf_cache_t *FAT_cache_get(struct super_block *sb, sector_t sec) sector_t 200 drivers/staging/exfat/exfat_cache.c sector_t sec; sector_t 328 drivers/staging/exfat/exfat_cache.c sector_t sec; sector_t 444 drivers/staging/exfat/exfat_cache.c u8 *FAT_getblk(struct super_block *sb, sector_t sec) sector_t 479 drivers/staging/exfat/exfat_cache.c void FAT_modify(struct super_block *sb, sector_t sec) sector_t 532 drivers/staging/exfat/exfat_cache.c static struct buf_cache_t *buf_cache_find(struct super_block *sb, sector_t sec) sector_t 551 drivers/staging/exfat/exfat_cache.c static struct buf_cache_t *buf_cache_get(struct super_block *sb, sector_t sec) sector_t 564 drivers/staging/exfat/exfat_cache.c static u8 *__buf_getblk(struct super_block *sb, sector_t sec) sector_t 599 drivers/staging/exfat/exfat_cache.c u8 *buf_getblk(struct super_block *sb, sector_t sec) sector_t 610 drivers/staging/exfat/exfat_cache.c void buf_modify(struct super_block *sb, sector_t sec) sector_t 626 drivers/staging/exfat/exfat_cache.c void buf_lock(struct super_block *sb, sector_t sec) sector_t 642 drivers/staging/exfat/exfat_cache.c void buf_unlock(struct super_block *sb, sector_t sec) sector_t 658 drivers/staging/exfat/exfat_cache.c void buf_release(struct super_block *sb, sector_t sec) sector_t 141 drivers/staging/exfat/exfat_core.c sector_t s, n; sector_t 310 drivers/staging/exfat/exfat_core.c sector_t sector; sector_t 351 drivers/staging/exfat/exfat_core.c sector_t sector; sector_t 511 drivers/staging/exfat/exfat_core.c sector_t sector; sector_t 595 drivers/staging/exfat/exfat_core.c sector_t sector; sector_t 612 drivers/staging/exfat/exfat_core.c sector_t sector; sector_t 698 drivers/staging/exfat/exfat_core.c static s32 __load_upcase_table(struct super_block *sb, sector_t sector, sector_t 706 drivers/staging/exfat/exfat_core.c sector_t end_sector = num_sectors + sector; sector_t 847 drivers/staging/exfat/exfat_core.c sector_t sector; sector_t 1242 drivers/staging/exfat/exfat_core.c sector_t sector; sector_t 1259 drivers/staging/exfat/exfat_core.c sector_t sector; sector_t 1292 drivers/staging/exfat/exfat_core.c sector_t sector; sector_t 1343 drivers/staging/exfat/exfat_core.c sector_t sector; sector_t 1490 drivers/staging/exfat/exfat_core.c sector_t sector; sector_t 1508 drivers/staging/exfat/exfat_core.c sector_t sector; sector_t 1526 drivers/staging/exfat/exfat_core.c sector_t sector; sector_t 1605 drivers/staging/exfat/exfat_core.c sector_t *sector, s32 *offset) sector_t 1636 drivers/staging/exfat/exfat_core.c struct dentry_t *get_entry_with_sector(struct super_block *sb, sector_t sector, sector_t 1650 drivers/staging/exfat/exfat_core.c s32 entry, sector_t *sector) sector_t 1653 drivers/staging/exfat/exfat_core.c sector_t sec; sector_t 1694 drivers/staging/exfat/exfat_core.c sector_t sec; sector_t 1855 drivers/staging/exfat/exfat_core.c sector_t sec, s32 off, u32 count) sector_t 1926 drivers/staging/exfat/exfat_core.c sector_t sec; sector_t 2058 drivers/staging/exfat/exfat_core.c sector_t sector; sector_t 3358 drivers/staging/exfat/exfat_core.c sector_t sector; sector_t 3388 drivers/staging/exfat/exfat_core.c sector_t sector_old, sector_new; sector_t 3494 drivers/staging/exfat/exfat_core.c sector_t sector_mov, sector_new; sector_t 3600 drivers/staging/exfat/exfat_core.c int sector_read(struct super_block *sb, sector_t sec, struct buffer_head **bh, sector_t 3623 drivers/staging/exfat/exfat_core.c int sector_write(struct super_block *sb, sector_t sec, struct buffer_head *bh, sector_t 3652 drivers/staging/exfat/exfat_core.c int multi_sector_read(struct super_block *sb, sector_t sec, sector_t 3675 drivers/staging/exfat/exfat_core.c int multi_sector_write(struct super_block *sb, sector_t sec, sector_t 691 drivers/staging/exfat/exfat_super.c sector_t LogSector; sector_t 817 drivers/staging/exfat/exfat_super.c sector_t LogSector, sector = 0; sector_t 1071 drivers/staging/exfat/exfat_super.c sector_t sector = 0; sector_t 1413 drivers/staging/exfat/exfat_super.c sector_t sector = 0; sector_t 1499 drivers/staging/exfat/exfat_super.c sector_t sector = 0; sector_t 1652 drivers/staging/exfat/exfat_super.c sector_t sector = 0; sector_t 1744 drivers/staging/exfat/exfat_super.c sector_t sector = 0; sector_t 1940 drivers/staging/exfat/exfat_super.c sector_t sector; sector_t 3071 drivers/staging/exfat/exfat_super.c static int exfat_bmap(struct inode *inode, sector_t sector, sector_t *phys, sector_t 3080 drivers/staging/exfat/exfat_super.c sector_t last_block; sector_t 3130 drivers/staging/exfat/exfat_super.c static int exfat_get_block(struct inode *inode, sector_t iblock, sector_t 3137 drivers/staging/exfat/exfat_super.c sector_t phys; sector_t 3253 drivers/staging/exfat/exfat_super.c static sector_t _exfat_bmap(struct address_space *mapping, sector_t block) sector_t 3255 drivers/staging/exfat/exfat_super.c sector_t blocknr; sector_t 841 drivers/target/target_core_device.c sector_t target_to_linux_sector(struct se_device *dev, sector_t lb) sector_t 442 drivers/target/target_core_file.c sector_t nolb = sbc_get_write_same_sectors(cmd); sector_t 494 drivers/target/target_core_file.c fd_do_prot_fill(struct se_device *se_dev, sector_t lba, sector_t nolb, sector_t 499 drivers/target/target_core_file.c sector_t prot_length, prot; sector_t 510 drivers/target/target_core_file.c sector_t len = min_t(sector_t, bufsize, prot_length - prot); sector_t 524 drivers/target/target_core_file.c fd_do_prot_unmap(struct se_cmd *cmd, sector_t lba, sector_t nolb) sector_t 544 drivers/target/target_core_file.c fd_execute_unmap(struct se_cmd *cmd, sector_t lba, sector_t nolb) sector_t 815 drivers/target/target_core_file.c static sector_t fd_get_blocks(struct se_device *dev) sector_t 307 drivers/target/target_core_iblock.c iblock_get_bio(struct se_cmd *cmd, sector_t lba, u32 sg_num, int op, sector_t 392 drivers/target/target_core_iblock.c iblock_execute_unmap(struct se_cmd *cmd, sector_t lba, sector_t nolb) sector_t 452 drivers/target/target_core_iblock.c sector_t block_lba = target_to_linux_sector(dev, cmd->t_task_lba); sector_t 453 drivers/target/target_core_iblock.c sector_t sectors = target_to_linux_sector(dev, sector_t 685 drivers/target/target_core_iblock.c sector_t block_lba = target_to_linux_sector(dev, cmd->t_task_lba); sector_t 792 drivers/target/target_core_iblock.c static sector_t iblock_get_blocks(struct se_device *dev) sector_t 801 drivers/target/target_core_iblock.c static sector_t iblock_get_alignment_offset_lbas(struct se_device *dev) sector_t 1028 drivers/target/target_core_pscsi.c static sector_t pscsi_get_blocks(struct se_device *dev) sector_t 599 drivers/target/target_core_rd.c static sector_t rd_get_blocks(struct se_device *dev) sector_t 169 drivers/target/target_core_sbc.c sector_t sbc_get_write_same_sectors(struct se_cmd *cmd) sector_t 196 drivers/target/target_core_sbc.c sector_t nolb = sbc_get_write_same_sectors(cmd); sector_t 285 drivers/target/target_core_sbc.c sector_t end_lba = dev->transport->get_blocks(dev) + 1; sector_t 1143 drivers/target/target_core_sbc.c sector_t lba; sector_t 1227 drivers/target/target_core_sbc.c sector_t sector = cmd->t_task_lba; sector_t 1291 drivers/target/target_core_sbc.c __u16 crc, sector_t sector, unsigned int ei_lba) sector_t 1376 drivers/target/target_core_sbc.c sbc_dif_verify(struct se_cmd *cmd, sector_t start, unsigned int sectors, sector_t 1382 drivers/target/target_core_sbc.c sector_t sector = start; sector_t 2230 drivers/target/target_core_user.c static sector_t tcmu_get_blocks(struct se_device *dev) sector_t 605 drivers/target/target_core_xcopy.c sector_t src_lba, sector_t 667 drivers/target/target_core_xcopy.c sector_t dst_lba, sector_t 730 drivers/target/target_core_xcopy.c sector_t src_lba, dst_lba, end_lba; sector_t 33 drivers/target/target_core_xcopy.h sector_t src_lba; sector_t 34 drivers/target/target_core_xcopy.h sector_t dst_lba; sector_t 16 fs/adfs/inode.c adfs_get_block(struct inode *inode, sector_t block, struct buffer_head *bh, sector_t 69 fs/adfs/inode.c static sector_t _adfs_bmap(struct address_space *mapping, sector_t block) sector_t 295 fs/affs/file.c affs_get_block(struct inode *inode, sector_t block, struct buffer_head *bh_result, int create) sector_t 304 fs/affs/file.c BUG_ON(block > (sector_t)0x7fffffffUL); sector_t 320 fs/affs/file.c map_bh(bh_result, sb, (sector_t)be32_to_cpu(AFFS_BLOCK(sb, ext_bh, block))); sector_t 431 fs/affs/file.c static sector_t _affs_bmap(struct address_space *mapping, sector_t block) sector_t 41 fs/befs/linuxvfs.c static int befs_get_block(struct inode *, sector_t, struct buffer_head *, int); sector_t 43 fs/befs/linuxvfs.c static sector_t befs_bmap(struct address_space *mapping, sector_t block); sector_t 116 fs/befs/linuxvfs.c static sector_t sector_t 117 fs/befs/linuxvfs.c befs_bmap(struct address_space *mapping, sector_t block) sector_t 132 fs/befs/linuxvfs.c befs_get_block(struct inode *inode, sector_t block, sector_t 880 fs/befs/linuxvfs.c if (befs_sb->num_blocks > ~((sector_t)0)) { sector_t 64 fs/bfs/file.c static int bfs_get_block(struct inode *inode, sector_t block, sector_t 185 fs/bfs/file.c static sector_t bfs_bmap(struct address_space *mapping, sector_t block) sector_t 168 fs/block_dev.c blkdev_get_block(struct inode *inode, sector_t iblock, sector_t 700 fs/block_dev.c int bdev_read_page(struct block_device *bdev, sector_t sector, sector_t 738 fs/block_dev.c int bdev_write_page(struct block_device *bdev, sector_t sector, sector_t 2949 fs/btrfs/extent_io.c sector_t sector = offset >> 9; sector_t 7847 fs/btrfs/inode.c static int btrfs_get_blocks_direct(struct inode *inode, sector_t iblock, sector_t 10845 fs/btrfs/inode.c sector_t *span) sector_t 10854 fs/btrfs/inode.c .lowest_ppage = (sector_t)-1ULL, sector_t 11059 fs/btrfs/inode.c sector_t *span) sector_t 194 fs/buffer.c __find_get_block_slow(struct block_device *bdev, sector_t block) sector_t 535 fs/buffer.c sector_t bblock, unsigned blocksize) sector_t 878 fs/buffer.c static sector_t blkdev_max_block(struct block_device *bdev, unsigned int size) sector_t 880 fs/buffer.c sector_t retval = ~((sector_t)0); sector_t 893 fs/buffer.c static sector_t sector_t 895 fs/buffer.c sector_t block, int size) sector_t 900 fs/buffer.c sector_t end_block = blkdev_max_block(I_BDEV(bdev->bd_inode), size); sector_t 929 fs/buffer.c grow_dev_page(struct block_device *bdev, sector_t block, sector_t 935 fs/buffer.c sector_t end_block; sector_t 957 fs/buffer.c (sector_t)index << sizebits, sector_t 977 fs/buffer.c end_block = init_page_buffers(page, bdev, (sector_t)index << sizebits, sector_t 993 fs/buffer.c grow_buffers(struct block_device *bdev, sector_t block, int size, gfp_t gfp) sector_t 1022 fs/buffer.c __getblk_slow(struct block_device *bdev, sector_t block, sector_t 1255 fs/buffer.c lookup_bh_lru(struct block_device *bdev, sector_t block, unsigned size) sector_t 1290 fs/buffer.c __find_get_block(struct block_device *bdev, sector_t block, unsigned size) sector_t 1315 fs/buffer.c __getblk_gfp(struct block_device *bdev, sector_t block, sector_t 1330 fs/buffer.c void __breadahead(struct block_device *bdev, sector_t block, unsigned size) sector_t 1340 fs/buffer.c void __breadahead_gfp(struct block_device *bdev, sector_t block, unsigned size, sector_t 1364 fs/buffer.c __bread_gfp(struct block_device *bdev, sector_t block, sector_t 1574 fs/buffer.c void clean_bdev_aliases(struct block_device *bdev, sector_t block, sector_t len) sector_t 1685 fs/buffer.c sector_t block; sector_t 1686 fs/buffer.c sector_t last_block; sector_t 1709 fs/buffer.c block = (sector_t)page->index << (PAGE_SHIFT - bbits); sector_t 1887 fs/buffer.c iomap_to_bh(struct inode *inode, sector_t block, struct buffer_head *bh, sector_t 1948 fs/buffer.c sector_t block; sector_t 1962 fs/buffer.c block = (sector_t)page->index << (PAGE_SHIFT - bbits); sector_t 2230 fs/buffer.c sector_t iblock, lblock; sector_t 2240 fs/buffer.c iblock = (sector_t)page->index << (PAGE_SHIFT - bbits); sector_t 2559 fs/buffer.c sector_t block_in_file; sector_t 2599 fs/buffer.c block_in_file = (sector_t)page->index << (PAGE_SHIFT - blkbits); sector_t 2779 fs/buffer.c sector_t iblock; sector_t 2794 fs/buffer.c iblock = (sector_t)index << (PAGE_SHIFT - inode->i_blkbits); sector_t 2857 fs/buffer.c sector_t iblock; sector_t 2872 fs/buffer.c iblock = (sector_t)index << (PAGE_SHIFT - inode->i_blkbits); sector_t 2969 fs/buffer.c sector_t generic_block_bmap(struct address_space *mapping, sector_t block, sector_t 3007 fs/buffer.c sector_t maxsector; sector_t 79 fs/cachefiles/internal.h sector_t brun; /* when to stop culling */ sector_t 80 fs/cachefiles/internal.h sector_t bcull; /* when to start culling */ sector_t 81 fs/cachefiles/internal.h sector_t bstop; /* when to stop allocating */ sector_t 399 fs/cachefiles/rdwr.c sector_t block0, block; sector_t 731 fs/cachefiles/rdwr.c sector_t block0, block; sector_t 72 fs/crypto/bio.c sector_t pblk, unsigned int len) sector_t 684 fs/dax.c sector_t sector, size_t size, struct page *to, sector_t 986 fs/dax.c static sector_t dax_iomap_sector(struct iomap *iomap, loff_t pos) sector_t 994 fs/dax.c const sector_t sector = dax_iomap_sector(iomap, pos); sector_t 1061 fs/dax.c struct dax_device *dax_dev, sector_t sector, sector_t 1065 fs/dax.c sector_t start_sector = sector + (offset >> 9); sector_t 1131 fs/dax.c const sector_t sector = dax_iomap_sector(iomap, pos); sector_t 1312 fs/dax.c sector_t sector = dax_iomap_sector(&iomap, pos); sector_t 80 fs/direct-io.c sector_t block_in_file; /* Current offset into the underlying sector_t 84 fs/direct-io.c sector_t final_block_in_request;/* doesn't change */ sector_t 90 fs/direct-io.c sector_t final_block_in_bio; /* current final block in bio + 1 */ sector_t 91 fs/direct-io.c sector_t next_block_for_io; /* next block to be put under IO, sector_t 102 fs/direct-io.c sector_t cur_page_block; /* Where it starts */ sector_t 430 fs/direct-io.c sector_t first_sector, int nr_vecs) sector_t 669 fs/direct-io.c sector_t fs_startblk; /* Into file, in filesystem-sized blocks */ sector_t 670 fs/direct-io.c sector_t fs_endblk; /* Into file, in filesystem-sized blocks */ sector_t 725 fs/direct-io.c sector_t start_sector, struct buffer_head *map_bh) sector_t 727 fs/direct-io.c sector_t sector; sector_t 847 fs/direct-io.c unsigned offset, unsigned len, sector_t blocknr, sector_t 525 fs/ecryptfs/mmap.c static sector_t ecryptfs_bmap(struct address_space *mapping, sector_t block) sector_t 137 fs/efs/efs.h extern int efs_get_block(struct inode *, sector_t, struct buffer_head *, int); sector_t 13 fs/efs/file.c int efs_get_block(struct inode *inode, sector_t iblock, sector_t 21 fs/efs/inode.c static sector_t _efs_bmap(struct address_space *mapping, sector_t block) sector_t 225 fs/erofs/data.c bio->bi_iter.bi_sector = (sector_t)blknr << sector_t 326 fs/erofs/data.c static int erofs_get_block(struct inode *inode, sector_t iblock, sector_t 344 fs/erofs/data.c static sector_t erofs_bmap(struct address_space *mapping, sector_t block) sector_t 1273 fs/erofs/zdata.c bio->bi_iter.bi_sector = (sector_t)(first_index + i) << sector_t 759 fs/ext2/ext2.h extern int ext2_get_block(struct inode *, sector_t, struct buffer_head *, int); sector_t 624 fs/ext2/inode.c sector_t iblock, unsigned long maxblocks, sector_t 782 fs/ext2/inode.c int ext2_get_block(struct inode *inode, sector_t iblock, sector_t 933 fs/ext2/inode.c static sector_t ext2_bmap(struct address_space *mapping, sector_t block) sector_t 1496 fs/ext2/super.c sector_t blk = off >> EXT2_BLOCK_SIZE_BITS(sb); sector_t 1541 fs/ext2/super.c sector_t blk = off >> EXT2_BLOCK_SIZE_BITS(sb); sector_t 222 fs/ext4/ext4.h sector_t io_next_block; sector_t 2582 fs/ext4/ext4.h int ext4_get_block_unwritten(struct inode *inode, sector_t iblock, sector_t 2584 fs/ext4/ext4.h int ext4_get_block(struct inode *inode, sector_t iblock, sector_t 2586 fs/ext4/ext4.h int ext4_dio_get_block(struct inode *inode, sector_t iblock, sector_t 2588 fs/ext4/ext4.h int ext4_da_get_block_prep(struct inode *inode, sector_t iblock, sector_t 2651 fs/ext4/ext4.h extern int ext4_ind_calc_metadata_amount(struct inode *inode, sector_t lblock); sector_t 2699 fs/ext4/ext4.h sector_t block, int op_flags); sector_t 656 fs/ext4/indirect.c int ext4_ind_calc_metadata_amount(struct inode *inode, sector_t lblock) sector_t 659 fs/ext4/indirect.c sector_t dind_mask = ~((sector_t)EXT4_ADDR_PER_BLOCK(inode->i_sb) - 1); sector_t 788 fs/ext4/inode.c static int _ext4_get_block(struct inode *inode, sector_t iblock, sector_t 814 fs/ext4/inode.c int ext4_get_block(struct inode *inode, sector_t iblock, sector_t 826 fs/ext4/inode.c int ext4_get_block_unwritten(struct inode *inode, sector_t iblock, sector_t 843 fs/ext4/inode.c static int ext4_get_block_trans(struct inode *inode, sector_t iblock, sector_t 870 fs/ext4/inode.c int ext4_dio_get_block(struct inode *inode, sector_t iblock, sector_t 887 fs/ext4/inode.c sector_t iblock, struct buffer_head *bh_result, int create) sector_t 926 fs/ext4/inode.c sector_t iblock, struct buffer_head *bh_result, int create) sector_t 947 fs/ext4/inode.c static int ext4_dio_get_block_overwrite(struct inode *inode, sector_t iblock, sector_t 1178 fs/ext4/inode.c sector_t block; sector_t 1195 fs/ext4/inode.c block = (sector_t)page->index << (PAGE_SHIFT - bbits); sector_t 1825 fs/ext4/inode.c static int ext4_da_map_blocks(struct inode *inode, sector_t iblock, sector_t 1831 fs/ext4/inode.c sector_t invalid_block = ~((sector_t) 0xffff); sector_t 1951 fs/ext4/inode.c int ext4_da_get_block_prep(struct inode *inode, sector_t iblock, sector_t 2375 fs/ext4/inode.c sector_t pblock; sector_t 3269 fs/ext4/inode.c static sector_t ext4_bmap(struct address_space *mapping, sector_t block) sector_t 2791 fs/ext4/mballoc.c (sector_t)discard_block << (sb->s_blocksize_bits - 9), sector_t 2792 fs/ext4/mballoc.c (sector_t)count << (sb->s_blocksize_bits - 9), sector_t 170 fs/ext4/move_extent.c sector_t block; sector_t 185 fs/ext4/move_extent.c block = (sector_t)page->index << (PAGE_SHIFT - inode->i_blkbits); sector_t 425 fs/ext4/move_extent.c EXT4_ERROR_INODE_BLOCK(orig_inode, (sector_t)(orig_blk_offset), sector_t 292 fs/ext4/page-io.c sector_t bi_sector = bio->bi_iter.bi_sector; sector_t 230 fs/ext4/readpage.c sector_t last_block_in_bio = 0; sector_t 236 fs/ext4/readpage.c sector_t block_in_file; sector_t 237 fs/ext4/readpage.c sector_t last_block; sector_t 238 fs/ext4/readpage.c sector_t last_block_in_file; sector_t 239 fs/ext4/readpage.c sector_t blocks[MAX_BUF_PER_PAGE]; sector_t 268 fs/ext4/readpage.c block_in_file = (sector_t)page->index << (PAGE_SHIFT - blkbits); sector_t 1109 fs/ext4/resize.c static void update_backups(struct super_block *sb, sector_t blk_off, char *data, sector_t 1550 fs/ext4/resize.c sector_t old_gdb = 0; sector_t 1796 fs/ext4/resize.c if (n_blocks_count > (sector_t)(~0ULL) >> (sb->s_blocksize_bits - 9)) { sector_t 151 fs/ext4/super.c ext4_sb_bread(struct super_block *sb, sector_t block, int op_flags) sector_t 129 fs/ext4/xattr.c sector_t block_nr, sector_t 1394 fs/f2fs/data.c static int __get_data_block(struct inode *inode, sector_t iblock, sector_t 1417 fs/f2fs/data.c static int get_data_block(struct inode *inode, sector_t iblock, sector_t 1426 fs/f2fs/data.c static int get_data_block_dio_write(struct inode *inode, sector_t iblock, sector_t 1435 fs/f2fs/data.c static int get_data_block_dio(struct inode *inode, sector_t iblock, sector_t 1444 fs/f2fs/data.c static int get_data_block_bmap(struct inode *inode, sector_t iblock, sector_t 1456 fs/f2fs/data.c static inline sector_t logical_to_blk(struct inode *inode, loff_t offset) sector_t 1461 fs/f2fs/data.c static inline loff_t blk_to_logical(struct inode *inode, sector_t blk) sector_t 1540 fs/f2fs/data.c sector_t start_blk, last_blk; sector_t 1642 fs/f2fs/data.c sector_t *last_block_in_bio, sector_t 1648 fs/f2fs/data.c sector_t block_in_file; sector_t 1649 fs/f2fs/data.c sector_t last_block; sector_t 1650 fs/f2fs/data.c sector_t last_block_in_file; sector_t 1651 fs/f2fs/data.c sector_t block_nr; sector_t 1654 fs/f2fs/data.c block_in_file = (sector_t)page_index(page); sector_t 1770 fs/f2fs/data.c sector_t last_block_in_bio = 0; sector_t 2079 fs/f2fs/data.c sector_t *last_block, sector_t 2255 fs/f2fs/data.c sector_t last_block; sector_t 2961 fs/f2fs/data.c static sector_t f2fs_bmap(struct address_space *mapping, sector_t block) sector_t 3034 fs/f2fs/data.c struct file *swap_file, sector_t *span) sector_t 3041 fs/f2fs/data.c sector_t probe_block; sector_t 3042 fs/f2fs/data.c sector_t last_block; sector_t 3043 fs/f2fs/data.c sector_t lowest_block = -1; sector_t 3044 fs/f2fs/data.c sector_t highest_block = 0; sector_t 3061 fs/f2fs/data.c sector_t first_block; sector_t 3079 fs/f2fs/data.c sector_t block; sector_t 3126 fs/f2fs/data.c sector_t *span) sector_t 3159 fs/f2fs/data.c sector_t *span) sector_t 1068 fs/f2fs/f2fs.h sector_t *last_block; /* last block number in bio */ sector_t 1076 fs/f2fs/f2fs.h sector_t last_block_in_bio; /* last block number */ sector_t 1756 fs/f2fs/segment.c sector_t sector, nr_sects; sector_t 1804 fs/f2fs/segment.c sector_t start = blkstart, len = 0; sector_t 117 fs/f2fs/segment.h (((sector_t)blk_addr) << F2FS_LOG_SECTORS_PER_BLOCK) sector_t 2868 fs/f2fs/super.c sector_t nr_sectors = bdev->bd_part->nr_sects; sector_t 2869 fs/f2fs/super.c sector_t sector = 0; sector_t 310 fs/fat/cache.c int fat_get_mapped_cluster(struct inode *inode, sector_t sector, sector_t 311 fs/fat/cache.c sector_t last_block, sector_t 312 fs/fat/cache.c unsigned long *mapped_blocks, sector_t *bmap) sector_t 333 fs/fat/cache.c static int is_exceed_eof(struct inode *inode, sector_t sector, sector_t 334 fs/fat/cache.c sector_t *last_block, int create) sector_t 358 fs/fat/cache.c int fat_bmap(struct inode *inode, sector_t sector, sector_t *phys, sector_t 362 fs/fat/cache.c sector_t last_block; sector_t 49 fs/fat/dir.c static inline void fat_dir_readahead(struct inode *dir, sector_t iblock, sector_t 50 fs/fat/dir.c sector_t phys) sector_t 86 fs/fat/dir.c sector_t phys, iblock; sector_t 1085 fs/fat/dir.c static int fat_zeroed_cluster(struct inode *dir, sector_t blknr, int nr_used, sector_t 1089 fs/fat/dir.c sector_t last_blknr = blknr + MSDOS_SB(sb)->sec_per_clus; sector_t 1143 fs/fat/dir.c sector_t blknr; sector_t 1209 fs/fat/dir.c sector_t blknr, start_blknr, last_blknr; sector_t 245 fs/fat/fat.h static inline sector_t fat_clus_to_blknr(struct msdos_sb_info *sbi, int clus) sector_t 247 fs/fat/fat.h return ((sector_t)clus - FAT_START_ENT) * sbi->sec_per_clus sector_t 252 fs/fat/fat.h loff_t i_pos, sector_t *blknr, int *offset) sector_t 317 fs/fat/fat.h extern int fat_get_mapped_cluster(struct inode *inode, sector_t sector, sector_t 318 fs/fat/fat.h sector_t last_block, sector_t 319 fs/fat/fat.h unsigned long *mapped_blocks, sector_t *bmap); sector_t 320 fs/fat/fat.h extern int fat_bmap(struct inode *inode, sector_t sector, sector_t *phys, sector_t 11 fs/fat/fatent.c void (*ent_blocknr)(struct super_block *, int, int *, sector_t *); sector_t 14 fs/fat/fatent.c int, sector_t); sector_t 23 fs/fat/fatent.c int *offset, sector_t *blocknr) sector_t 33 fs/fat/fatent.c int *offset, sector_t *blocknr) sector_t 69 fs/fat/fatent.c int offset, sector_t blocknr) sector_t 101 fs/fat/fatent.c int offset, sector_t blocknr) sector_t 319 fs/fat/fatent.c int offset, sector_t blocknr) sector_t 353 fs/fat/fatent.c sector_t blocknr; sector_t 383 fs/fat/fatent.c sector_t backup_fat = sbi->fat_length * copy; sector_t 438 fs/fat/fatent.c sector_t blocknr; sector_t 642 fs/fat/fatent.c sector_t blocknr; sector_t 118 fs/fat/inode.c static inline int __fat_get_block(struct inode *inode, sector_t iblock, sector_t 125 fs/fat/inode.c sector_t phys, last_block; sector_t 182 fs/fat/inode.c static int fat_get_block(struct inode *inode, sector_t iblock, sector_t 295 fs/fat/inode.c static int fat_get_block_bmap(struct inode *inode, sector_t iblock, sector_t 301 fs/fat/inode.c sector_t bmap; sector_t 320 fs/fat/inode.c static sector_t _fat_bmap(struct address_space *mapping, sector_t block) sector_t 322 fs/fat/inode.c sector_t blocknr; sector_t 854 fs/fat/inode.c sector_t blocknr; sector_t 1542 fs/fat/inode.c sector_t bd_sects; sector_t 68 fs/fat/nfs.c sector_t blocknr; sector_t 222 fs/fat/nfs.c sector_t blknr = fat_clus_to_blknr(sbi, parent_logstart); sector_t 42 fs/freevxfs/vxfs_subr.c static sector_t vxfs_bmap(struct address_space *, sector_t); sector_t 129 fs/freevxfs/vxfs_subr.c vxfs_getblk(struct inode *ip, sector_t iblock, sector_t 179 fs/freevxfs/vxfs_subr.c static sector_t sector_t 180 fs/freevxfs/vxfs_subr.c vxfs_bmap(struct address_space *mapping, sector_t block) sector_t 2470 fs/fuse/file.c static sector_t fuse_bmap(struct address_space *mapping, sector_t block) sector_t 71 fs/gfs2/aops.c static int gfs2_get_block_noalloc(struct inode *inode, sector_t lblock, sector_t 688 fs/gfs2/aops.c static sector_t gfs2_bmap(struct address_space *mapping, sector_t lblock) sector_t 692 fs/gfs2/aops.c sector_t dblock = 0; sector_t 393 fs/gfs2/bmap.c static sector_t metapath_to_block(struct gfs2_sbd *sdp, struct metapath *mp) sector_t 395 fs/gfs2/bmap.c sector_t factor = 1, block = 0; sector_t 585 fs/gfs2/bmap.c static int gfs2_hole_size(struct inode *inode, sector_t lblock, u64 len, sector_t 850 fs/gfs2/bmap.c sector_t lblock; sector_t 851 fs/gfs2/bmap.c sector_t lblock_stop; sector_t 1285 fs/gfs2/bmap.c int gfs2_block_map(struct inode *inode, sector_t lblock, sector_t 49 fs/gfs2/bmap.h extern int gfs2_block_map(struct inode *inode, sector_t lblock, sector_t 536 fs/gfs2/lops.c sector_t sector = dblock << sdp->sd_fsb2bb_shift; sector_t 236 fs/gfs2/ops_fstype.c static int gfs2_read_super(struct gfs2_sbd *sdp, sector_t sector, int silent) sector_t 1324 fs/gfs2/rgrp.c sector_t start = 0; sector_t 1325 fs/gfs2/rgrp.c sector_t nr_blks = 0; sector_t 333 fs/gfs2/trace_gfs2.h __field( sector_t, block ) sector_t 441 fs/gfs2/trace_gfs2.h sector_t lblock, int create, int errno), sector_t 447 fs/gfs2/trace_gfs2.h __field( sector_t, lblock ) sector_t 448 fs/gfs2/trace_gfs2.h __field( sector_t, pblock ) sector_t 518 fs/gfs2/trace_gfs2.h __field( sector_t, pblock ) sector_t 336 fs/hfs/extent.c int hfs_get_block(struct inode *inode, sector_t block, sector_t 78 fs/hfs/hfs_fs.h sector_t fs_blocks; sector_t 198 fs/hfs/hfs_fs.h extern int hfs_get_block(struct inode *, sector_t, struct buffer_head *, int); sector_t 224 fs/hfs/hfs_fs.h extern int hfs_part_find(struct super_block *, sector_t *, sector_t *); sector_t 272 fs/hfs/hfs_fs.h sector_t __block; \ sector_t 67 fs/hfs/inode.c static sector_t hfs_bmap(struct address_space *mapping, sector_t block) sector_t 33 fs/hfs/mdb.c sector_t *start, sector_t *size) sector_t 48 fs/hfs/mdb.c *start = (sector_t)te.cdte_addr.lba << 2; sector_t 57 fs/hfs/mdb.c *start = (sector_t)ms_info.addr.lba << 2; sector_t 74 fs/hfs/mdb.c sector_t part_start, part_size; sector_t 302 fs/hfs/mdb.c sector_t block; sector_t 59 fs/hfs/part_tbl.c sector_t *part_start, sector_t *part_size) sector_t 225 fs/hfsplus/extents.c int hfsplus_get_block(struct inode *inode, sector_t iblock, sector_t 233 fs/hfsplus/extents.c sector_t sector; sector_t 282 fs/hfsplus/extents.c sector = ((sector_t)dblock << sbi->fs_shift) + sector_t 159 fs/hfsplus/hfsplus_fs.h sector_t part_start; sector_t 160 fs/hfsplus/hfsplus_fs.h sector_t sect_count; sector_t 243 fs/hfsplus/hfsplus_fs.h sector_t fs_blocks; sector_t 470 fs/hfsplus/hfsplus_fs.h int hfsplus_get_block(struct inode *inode, sector_t iblock, sector_t 506 fs/hfsplus/hfsplus_fs.h int hfs_part_find(struct super_block *sb, sector_t *part_start, sector_t 507 fs/hfsplus/hfsplus_fs.h sector_t *part_size); sector_t 532 fs/hfsplus/hfsplus_fs.h int hfsplus_submit_bio(struct super_block *sb, sector_t sector, void *buf, sector_t 61 fs/hfsplus/inode.c static sector_t hfsplus_bmap(struct address_space *mapping, sector_t block) sector_t 71 fs/hfsplus/part_tbl.c sector_t *part_start, sector_t *part_size) sector_t 92 fs/hfsplus/part_tbl.c struct new_pmap *pm, sector_t *part_start, sector_t *part_size) sector_t 130 fs/hfsplus/part_tbl.c sector_t *part_start, sector_t *part_size) sector_t 447 fs/hfsplus/super.c if ((last_fs_block > (sector_t)(~0ULL) >> (sbi->alloc_blksz_shift - 9)) || sector_t 48 fs/hfsplus/wrapper.c int hfsplus_submit_bio(struct super_block *sb, sector_t sector, sector_t 128 fs/hfsplus/wrapper.c sector_t *start, sector_t *size) sector_t 144 fs/hfsplus/wrapper.c *start = (sector_t)te.cdte_addr.lba << 2; sector_t 154 fs/hfsplus/wrapper.c *start = (sector_t)ms_info.addr.lba << 2; sector_t 164 fs/hfsplus/wrapper.c sector_t part_start, part_size; sector_t 202 fs/hfsplus/wrapper.c part_start += (sector_t)wd.ablk_start + sector_t 203 fs/hfsplus/wrapper.c (sector_t)wd.embed_start * wd.ablk_size; sector_t 204 fs/hfsplus/wrapper.c part_size = (sector_t)wd.embed_count * wd.ablk_size; sector_t 77 fs/hpfs/file.c static int hpfs_get_block(struct inode *inode, sector_t iblock, struct buffer_head *bh_result, int create) sector_t 188 fs/hpfs/file.c static sector_t _hpfs_bmap(struct address_space *mapping, sector_t block) sector_t 1613 fs/inode.c sector_t bmap(struct inode *inode, sector_t block) sector_t 1615 fs/inode.c sector_t res = 0; sector_t 260 fs/ioctl.c static inline sector_t logical_to_blk(struct inode *inode, loff_t offset) sector_t 265 fs/ioctl.c static inline loff_t blk_to_logical(struct inode *inode, sector_t blk) sector_t 295 fs/ioctl.c sector_t start_blk, last_blk; sector_t 216 fs/iomap/buffered-io.c sector_t sector; sector_t 116 fs/iomap/fiemap.c sector_t *bno = data, addr; sector_t 129 fs/iomap/fiemap.c sector_t sector_t 130 fs/iomap/fiemap.c iomap_bmap(struct address_space *mapping, sector_t bno, sector_t 135 fs/iomap/swapfile.c struct file *swap_file, sector_t *pagespan, sector_t 140 fs/iomap/swapfile.c .lowest_ppage = (sector_t)-1ULL, sector_t 1057 fs/isofs/inode.c int isofs_get_blocks(struct inode *inode, sector_t iblock, sector_t 1148 fs/isofs/inode.c static int isofs_get_block(struct inode *inode, sector_t iblock, sector_t 1162 fs/isofs/inode.c static int isofs_bmap(struct inode *inode, sector_t block) sector_t 1175 fs/isofs/inode.c struct buffer_head *isofs_bread(struct inode *inode, sector_t block) sector_t 1177 fs/isofs/inode.c sector_t blknr = isofs_bmap(inode, block); sector_t 1194 fs/isofs/inode.c static sector_t _isofs_bmap(struct address_space *mapping, sector_t block) sector_t 122 fs/isofs/isofs.h extern struct buffer_head *isofs_bread(struct inode *, sector_t); sector_t 123 fs/isofs/isofs.h extern int isofs_get_blocks(struct inode *, sector_t, struct buffer_head **, unsigned long); sector_t 338 fs/jbd2/journal.c sector_t blocknr) sector_t 197 fs/jfs/inode.c int jfs_get_block(struct inode *ip, sector_t lblock, sector_t 329 fs/jfs/inode.c static sector_t jfs_bmap(struct address_space *mapping, sector_t block) sector_t 28 fs/jfs/jfs_inode.h extern int jfs_get_block(struct inode *, sector_t, struct buffer_head *, int); sector_t 234 fs/jfs/jfs_metapage.c static sector_t metapage_get_blocks(struct inode *inode, sector_t lblock, sector_t 240 fs/jfs/jfs_metapage.c sector_t file_blocks = (inode->i_size + inode->i_sb->s_blocksize - 1) >> sector_t 251 fs/jfs/jfs_metapage.c lblock = (sector_t)xaddr; sector_t 345 fs/jfs/jfs_metapage.c sector_t lblock; sector_t 347 fs/jfs/jfs_metapage.c sector_t pblock; sector_t 348 fs/jfs/jfs_metapage.c sector_t next_block = 0; sector_t 349 fs/jfs/jfs_metapage.c sector_t page_start; sector_t 355 fs/jfs/jfs_metapage.c page_start = (sector_t)page->index << sector_t 477 fs/jfs/jfs_metapage.c sector_t page_start; /* address of page in fs blocks */ sector_t 478 fs/jfs/jfs_metapage.c sector_t pblock; sector_t 484 fs/jfs/jfs_metapage.c page_start = (sector_t)page->index << sector_t 766 fs/jfs/jfs_metapage.c sector_t lblock; sector_t 23 fs/jfs/jfs_metapage.h sector_t index; /* block address of page */ sector_t 739 fs/jfs/super.c sector_t blk = off >> sb->s_blocksize_bits; sector_t 784 fs/jfs/super.c sector_t blk = off >> sb->s_blocksize_bits; sector_t 1071 fs/libfs.c if ((last_fs_block > (sector_t)(~0ULL) >> (blocksize_bits - 9)) || sector_t 372 fs/minix/inode.c static int minix_get_block(struct inode *inode, sector_t block, sector_t 420 fs/minix/inode.c static sector_t minix_bmap(struct address_space *mapping, sector_t block) sector_t 146 fs/minix/itree_common.c static int get_block(struct inode * inode, sector_t block, sector_t 72 fs/mpage.c sector_t first_sector, int nr_vecs, sector_t 141 fs/mpage.c sector_t last_block_in_bio; sector_t 164 fs/mpage.c sector_t block_in_file; sector_t 165 fs/mpage.c sector_t last_block; sector_t 166 fs/mpage.c sector_t last_block_in_file; sector_t 167 fs/mpage.c sector_t blocks[MAX_BUF_PER_PAGE]; sector_t 189 fs/mpage.c block_in_file = (sector_t)page->index << (PAGE_SHIFT - blkbits); sector_t 452 fs/mpage.c sector_t last_block_in_bio; sector_t 506 fs/mpage.c sector_t last_block; sector_t 507 fs/mpage.c sector_t block_in_file; sector_t 508 fs/mpage.c sector_t blocks[MAX_BUF_PER_PAGE]; sector_t 513 fs/mpage.c sector_t boundary_block = 0; sector_t 575 fs/mpage.c block_in_file = (sector_t)page->index << (PAGE_SHIFT - blkbits); sector_t 119 fs/nfs/blocklayout/blocklayout.c bl_alloc_init_bio(int npg, struct block_device *bdev, sector_t disk_sector, sector_t 146 fs/nfs/blocklayout/blocklayout.c do_add_page_to_bio(struct bio *bio, int npg, int rw, sector_t isect, sector_t 196 fs/nfs/blocklayout/blocklayout.c sector_t isect, extent_length = 0; sector_t 259 fs/nfs/blocklayout/blocklayout.c sector_t isect, extent_length = 0; sector_t 281 fs/nfs/blocklayout/blocklayout.c isect = (sector_t) (f_offset >> SECTOR_SHIFT); sector_t 404 fs/nfs/blocklayout/blocklayout.c sector_t isect, extent_length = 0; sector_t 572 fs/nfs/blocklayout/blocklayout.c static int decode_sector_number(__be32 **rp, sector_t *sp) sector_t 770 fs/nfs/blocklayout/blocklayout.c sector_t offset = range->offset >> SECTOR_SHIFT, end; sector_t 128 fs/nfs/blocklayout/blocklayout.h sector_t be_f_offset; /* the starting offset in the file */ sector_t 129 fs/nfs/blocklayout/blocklayout.h sector_t be_length; /* the size of the extent */ sector_t 130 fs/nfs/blocklayout/blocklayout.h sector_t be_v_offset; /* the starting offset in the volume */ sector_t 182 fs/nfs/blocklayout/blocklayout.h int ext_tree_remove(struct pnfs_block_layout *bl, bool rw, sector_t start, sector_t 183 fs/nfs/blocklayout/blocklayout.h sector_t end); sector_t 184 fs/nfs/blocklayout/blocklayout.h int ext_tree_mark_written(struct pnfs_block_layout *bl, sector_t start, sector_t 185 fs/nfs/blocklayout/blocklayout.h sector_t len, u64 lwb); sector_t 186 fs/nfs/blocklayout/blocklayout.h bool ext_tree_lookup(struct pnfs_block_layout *bl, sector_t isect, sector_t 39 fs/nfs/blocklayout/extent_tree.c static inline sector_t sector_t 46 fs/nfs/blocklayout/extent_tree.c __ext_tree_search(struct rb_root *root, sector_t start) sector_t 178 fs/nfs/blocklayout/extent_tree.c sector_t start, sector_t end, struct list_head *tmp) sector_t 181 fs/nfs/blocklayout/extent_tree.c sector_t len1 = 0, len2 = 0; sector_t 182 fs/nfs/blocklayout/extent_tree.c sector_t orig_v_offset; sector_t 183 fs/nfs/blocklayout/extent_tree.c sector_t orig_len; sector_t 285 fs/nfs/blocklayout/extent_tree.c sector_t new_len = ext_f_end(new) - ext_f_end(be); sector_t 286 fs/nfs/blocklayout/extent_tree.c sector_t diff = new->be_length - new_len; sector_t 298 fs/nfs/blocklayout/extent_tree.c sector_t new_len = ext_f_end(new) - ext_f_end(be); sector_t 299 fs/nfs/blocklayout/extent_tree.c sector_t diff = new->be_length - new_len; sector_t 322 fs/nfs/blocklayout/extent_tree.c __ext_tree_lookup(struct rb_root *root, sector_t isect, sector_t 345 fs/nfs/blocklayout/extent_tree.c ext_tree_lookup(struct pnfs_block_layout *bl, sector_t isect, sector_t 361 fs/nfs/blocklayout/extent_tree.c sector_t start, sector_t end) sector_t 381 fs/nfs/blocklayout/extent_tree.c sector_t split) sector_t 384 fs/nfs/blocklayout/extent_tree.c sector_t orig_len = be->be_length; sector_t 405 fs/nfs/blocklayout/extent_tree.c ext_tree_mark_written(struct pnfs_block_layout *bl, sector_t start, sector_t 406 fs/nfs/blocklayout/extent_tree.c sector_t len, u64 lwb) sector_t 409 fs/nfs/blocklayout/extent_tree.c sector_t end = start + len; sector_t 436 fs/nfs/blocklayout/extent_tree.c sector_t diff = start - be->be_f_offset; sector_t 454 fs/nfs/blocklayout/extent_tree.c sector_t diff = end - be->be_f_offset; sector_t 490 fs/nfs/file.c sector_t *span) sector_t 65 fs/nilfs2/bmap.c sector_t blocknr; sector_t 62 fs/nilfs2/bmap.h sector_t, sector_t 56 fs/nilfs2/btnode.c sector_t pblocknr, int mode, int mode_flags, sector_t 57 fs/nilfs2/btnode.c struct buffer_head **pbh, sector_t *submit_ptr) sector_t 36 fs/nilfs2/btnode.h int nilfs_btnode_submit_block(struct address_space *, __u64, sector_t, int, sector_t 37 fs/nilfs2/btnode.h int, struct buffer_head **, sector_t *); sector_t 340 fs/nilfs2/btree.c sector_t blocknr) sector_t 475 fs/nilfs2/btree.c sector_t submit_ptr = 0; sector_t 699 fs/nilfs2/btree.c sector_t blocknr; sector_t 2176 fs/nilfs2/btree.c sector_t blocknr, sector_t 2217 fs/nilfs2/btree.c sector_t blocknr, sector_t 2246 fs/nilfs2/btree.c sector_t blocknr, sector_t 2285 fs/nilfs2/btree.c sector_t blocknr, sector_t 127 fs/nilfs2/dat.c sector_t blocknr) sector_t 145 fs/nilfs2/dat.c sector_t blocknr; sector_t 177 fs/nilfs2/dat.c sector_t blocknr; sector_t 202 fs/nilfs2/dat.c sector_t blocknr; sector_t 313 fs/nilfs2/dat.c int nilfs_dat_move(struct inode *dat, __u64 vblocknr, sector_t blocknr) sector_t 383 fs/nilfs2/dat.c int nilfs_dat_translate(struct inode *dat, __u64 vblocknr, sector_t *blocknrp) sector_t 387 fs/nilfs2/dat.c sector_t blocknr; sector_t 21 fs/nilfs2/dat.h int nilfs_dat_translate(struct inode *, __u64, sector_t *); sector_t 28 fs/nilfs2/dat.h sector_t); sector_t 41 fs/nilfs2/dat.h int nilfs_dat_move(struct inode *, __u64, sector_t); sector_t 56 fs/nilfs2/direct.c sector_t blocknr; sector_t 290 fs/nilfs2/direct.c sector_t blocknr, sector_t 310 fs/nilfs2/direct.c sector_t blocknr, sector_t 323 fs/nilfs2/direct.c sector_t blocknr, sector_t 58 fs/nilfs2/gcinode.c int nilfs_gccache_submit_read_data(struct inode *inode, sector_t blkoff, sector_t 59 fs/nilfs2/gcinode.c sector_t pbn, __u64 vbn, sector_t 126 fs/nilfs2/gcinode.c int nilfs_gccache_submit_read_node(struct inode *inode, sector_t pbn, sector_t 70 fs/nilfs2/inode.c int nilfs_get_block(struct inode *inode, sector_t blkoff, sector_t 1002 fs/nilfs2/inode.c sector_t blkoff, end_blkoff; sector_t 1003 fs/nilfs2/inode.c sector_t delalloc_blkoff; sector_t 1054 fs/nilfs2/inode.c maxblocks = min_t(sector_t, delalloc_blkoff - blkoff, sector_t 147 fs/nilfs2/mdt.c map_bh(bh, inode->i_sb, (sector_t)blknum); sector_t 255 fs/nilfs2/nilfs.h extern int nilfs_get_block(struct inode *, sector_t, struct buffer_head *, int); sector_t 339 fs/nilfs2/nilfs.h int nilfs_gccache_submit_read_data(struct inode *, sector_t, sector_t, __u64, sector_t 341 fs/nilfs2/nilfs.h int nilfs_gccache_submit_read_node(struct inode *, sector_t, __u64, sector_t 492 fs/nilfs2/page.c sector_t start_blk, sector_t 493 fs/nilfs2/page.c sector_t *blkoff) sector_t 499 fs/nilfs2/page.c sector_t b; sector_t 50 fs/nilfs2/page.h sector_t start_blk, sector_t 51 fs/nilfs2/page.h sector_t *blkoff); sector_t 41 fs/nilfs2/recovery.c sector_t blocknr; /* block number */ sector_t 95 fs/nilfs2/recovery.c sector_t start, unsigned long nblock) sector_t 130 fs/nilfs2/recovery.c int nilfs_read_super_root_block(struct the_nilfs *nilfs, sector_t sr_block, sector_t 181 fs/nilfs2/recovery.c nilfs_read_log_header(struct the_nilfs *nilfs, sector_t start_blocknr, sector_t 247 fs/nilfs2/recovery.c sector_t blocknr; sector_t 283 fs/nilfs2/recovery.c sector_t blocknr = (*pbh)->b_blocknr; sector_t 304 fs/nilfs2/recovery.c static int nilfs_scan_dsync_log(struct the_nilfs *nilfs, sector_t start_blocknr, sector_t 311 fs/nilfs2/recovery.c sector_t blocknr; sector_t 574 fs/nilfs2/recovery.c sector_t pseg_start; sector_t 575 fs/nilfs2/recovery.c sector_t seg_start, seg_end; /* Starting/ending DBN of full segment */ sector_t 806 fs/nilfs2/recovery.c sector_t pseg_start, pseg_end, sr_pseg_start = 0; sector_t 807 fs/nilfs2/recovery.c sector_t seg_start, seg_end; /* range of full segment (block number) */ sector_t 808 fs/nilfs2/recovery.c sector_t b, end; sector_t 27 fs/nilfs2/segbuf.c sector_t blocknr; sector_t 383 fs/nilfs2/segbuf.c static struct bio *nilfs_alloc_seg_bio(struct the_nilfs *nilfs, sector_t start, sector_t 41 fs/nilfs2/segbuf.h sector_t next; sector_t 70 fs/nilfs2/segbuf.h sector_t sb_fseg_start, sb_fseg_end; sector_t 71 fs/nilfs2/segbuf.h sector_t sb_pseg_start; sector_t 1547 fs/nilfs2/segment.c sector_t blocknr; sector_t 37 fs/nilfs2/segment.h sector_t ri_super_root; sector_t 40 fs/nilfs2/segment.h sector_t ri_lsegs_start; sector_t 41 fs/nilfs2/segment.h sector_t ri_lsegs_end; sector_t 44 fs/nilfs2/segment.h sector_t ri_pseg_start; sector_t 241 fs/nilfs2/segment.h extern int nilfs_read_super_root_block(struct the_nilfs *, sector_t, sector_t 1024 fs/nilfs2/sufile.c sector_t seg_start, seg_end, start_block, end_block; sector_t 1025 fs/nilfs2/sufile.c sector_t start = 0, nblocks = 0; sector_t 227 fs/nilfs2/super.c sector_t nfreeblocks; sector_t 337 fs/nilfs2/super.c sector_t blocknr, newblocknr; sector_t 596 fs/nilfs2/super.c sector_t nfreeblocks; sector_t 460 fs/nilfs2/sysfs.c sector_t last_pseg; sector_t 879 fs/nilfs2/sysfs.c sector_t free_blocks = 0; sector_t 29 fs/nilfs2/the_nilfs.c sector_t start_blocknr, u64 seq, __u64 cno) sector_t 97 fs/nilfs2/the_nilfs.c struct super_block *sb, sector_t sr_block) sector_t 660 fs/nilfs2/the_nilfs.c sector_t seg_start, seg_end; sector_t 661 fs/nilfs2/the_nilfs.c sector_t start = 0, nblocks = 0; sector_t 694 fs/nilfs2/the_nilfs.c int nilfs_count_free_blocks(struct the_nilfs *nilfs, sector_t *nblocks) sector_t 701 fs/nilfs2/the_nilfs.c *nblocks = (sector_t)ncleansegs * nilfs->ns_blocks_per_segment; sector_t 135 fs/nilfs2/the_nilfs.h sector_t ns_last_pseg; sector_t 274 fs/nilfs2/the_nilfs.h void nilfs_set_last_segment(struct the_nilfs *, sector_t, u64, __u64); sector_t 282 fs/nilfs2/the_nilfs.h int nilfs_count_free_blocks(struct the_nilfs *, sector_t *); sector_t 309 fs/nilfs2/the_nilfs.h sector_t *seg_start, sector_t *seg_end) sector_t 311 fs/nilfs2/the_nilfs.h *seg_start = (sector_t)nilfs->ns_blocks_per_segment * segnum; sector_t 317 fs/nilfs2/the_nilfs.h static inline sector_t sector_t 321 fs/nilfs2/the_nilfs.h (sector_t)nilfs->ns_blocks_per_segment * segnum; sector_t 325 fs/nilfs2/the_nilfs.h nilfs_get_segnum_of_block(struct the_nilfs *nilfs, sector_t blocknr) sector_t 327 fs/nilfs2/the_nilfs.h sector_t segnum = blocknr; sector_t 334 fs/nilfs2/the_nilfs.h nilfs_terminate_segment(struct the_nilfs *nilfs, sector_t seg_start, sector_t 335 fs/nilfs2/the_nilfs.h sector_t seg_end) sector_t 178 fs/ntfs/aops.c sector_t iblock, lblock, zblock; sector_t 539 fs/ntfs/aops.c sector_t block, dblock, iblock; sector_t 905 fs/ntfs/aops.c sector_t block, dblock, rec_block; sector_t 955 fs/ntfs/aops.c rec_block = block = (sector_t)page->index << sector_t 1549 fs/ntfs/aops.c static sector_t ntfs_bmap(struct address_space *mapping, sector_t block) sector_t 576 fs/ntfs/file.c sector_t lcn_block; sector_t 763 fs/ntfs/logfile.c sector_t block, end_block; sector_t 667 fs/ntfs/super.c sector_t nr_blocks = NTFS_SB(sb)->nr_blocks; sector_t 43 fs/ocfs2/aops.c static int ocfs2_symlink_get_block(struct inode *inode, sector_t iblock, sector_t 124 fs/ocfs2/aops.c static int ocfs2_lock_get_block(struct inode *inode, sector_t iblock, sector_t 137 fs/ocfs2/aops.c int ocfs2_get_block(struct inode *inode, sector_t iblock, sector_t 457 fs/ocfs2/aops.c static sector_t ocfs2_bmap(struct address_space *mapping, sector_t block) sector_t 459 fs/ocfs2/aops.c sector_t status; sector_t 2140 fs/ocfs2/aops.c static int ocfs2_dio_wr_get_block(struct inode *inode, sector_t iblock, sector_t 2152 fs/ocfs2/aops.c sector_t endblk = (i_size_read(inode) - 1) >> i_blkbits; sector_t 50 fs/ocfs2/aops.h int ocfs2_get_block(struct inode *inode, sector_t iblock, sector_t 408 fs/ocfs2/buffer_head_io.c sector_t blkno) sector_t 76 fs/ocfs2/ocfs2.h sector_t ci_array[OCFS2_CACHE_INFO_MAX_ARRAY]; sector_t 174 fs/ocfs2/quota_global.c sector_t blk = off >> sb->s_blocksize_bits; sector_t 223 fs/ocfs2/quota_global.c sector_t blk = off >> sb->s_blocksize_bits; sector_t 583 fs/ocfs2/super.c BUILD_BUG_ON(sizeof(sector_t) != 8); sector_t 1653 fs/ocfs2/super.c buf->f_blocks = ((sector_t) numbits) * sector_t 1655 fs/ocfs2/super.c buf->f_bfree = ((sector_t) freebits) * sector_t 53 fs/ocfs2/uptodate.c sector_t c_block; sector_t 194 fs/ocfs2/uptodate.c sector_t item) sector_t 210 fs/ocfs2/uptodate.c sector_t block) sector_t 290 fs/ocfs2/uptodate.c sector_t block) sector_t 308 fs/ocfs2/uptodate.c sector_t block = new->c_block; sector_t 387 fs/ocfs2/uptodate.c sector_t block, sector_t 524 fs/ocfs2/uptodate.c sector_t *array = ci->ci_cache.ci_array; sector_t 540 fs/ocfs2/uptodate.c bytes = sizeof(sector_t) * (ci->ci_num_cached - index); sector_t 558 fs/ocfs2/uptodate.c sector_t block) sector_t 592 fs/ocfs2/uptodate.c sector_t block = bh->b_blocknr; sector_t 599 fs/ocfs2/uptodate.c sector_t block, sector_t 65 fs/ocfs2/uptodate.h sector_t block, sector_t 191 fs/omfs/file.c static sector_t find_block(struct inode *inode, struct omfs_extent_entry *ent, sector_t 192 fs/omfs/file.c sector_t block, int count, int *left) sector_t 195 fs/omfs/file.c sector_t searched = 0; sector_t 217 fs/omfs/file.c static int omfs_get_block(struct inode *inode, sector_t block, sector_t 221 fs/omfs/file.c sector_t next, offset; sector_t 333 fs/omfs/file.c static sector_t omfs_bmap(struct address_space *mapping, sector_t block) sector_t 24 fs/omfs/inode.c struct buffer_head *omfs_bread(struct super_block *sb, sector_t block) sector_t 344 fs/omfs/inode.c sector_t block; sector_t 30 fs/omfs/omfs.h static inline sector_t clus_to_blk(struct omfs_sb_info *sbi, sector_t block) sector_t 62 fs/omfs/omfs.h extern struct buffer_head *omfs_bread(struct super_block *sb, sector_t block); sector_t 65 fs/omfs/omfs.h extern int omfs_reserve_block(struct super_block *sb, sector_t block); sector_t 193 fs/orangefs/orangefs-kernel.h sector_t last_failed_block_index_read; sector_t 207 fs/orangefs/super.c buf->f_blocks = (sector_t) new_op->downcall.resp.statfs.blocks_total; sector_t 208 fs/orangefs/super.c buf->f_bfree = (sector_t) new_op->downcall.resp.statfs.blocks_avail; sector_t 209 fs/orangefs/super.c buf->f_bavail = (sector_t) new_op->downcall.resp.statfs.blocks_avail; sector_t 210 fs/orangefs/super.c buf->f_files = (sector_t) new_op->downcall.resp.statfs.files_total; sector_t 211 fs/orangefs/super.c buf->f_ffree = (sector_t) new_op->downcall.resp.statfs.files_avail; sector_t 55 fs/qnx4/inode.c static int qnx4_get_block( struct inode *inode, sector_t iblock, struct buffer_head *bh, int create ) sector_t 254 fs/qnx4/inode.c static sector_t qnx4_bmap(struct address_space *mapping, sector_t block) sector_t 72 fs/qnx6/inode.c static int qnx6_get_block(struct inode *inode, sector_t iblock, sector_t 496 fs/qnx6/inode.c static sector_t qnx6_bmap(struct address_space *mapping, sector_t block) sector_t 349 fs/reiserfs/bitmap.c int min, int max, int unfm, sector_t file_block) sector_t 241 fs/reiserfs/inode.c static int file_capable(struct inode *inode, sector_t block) sector_t 285 fs/reiserfs/inode.c static int _get_block_create_0(struct inode *inode, sector_t block, sector_t 472 fs/reiserfs/inode.c static int reiserfs_bmap(struct inode *inode, sector_t block, sector_t 503 fs/reiserfs/inode.c static int reiserfs_get_block_create_0(struct inode *inode, sector_t block, sector_t 515 fs/reiserfs/inode.c sector_t iblock, sector_t 640 fs/reiserfs/inode.c sector_t block, sector_t 657 fs/reiserfs/inode.c int reiserfs_get_block(struct inode *inode, sector_t block, sector_t 2534 fs/reiserfs/inode.c sector_t last_block; sector_t 2890 fs/reiserfs/inode.c static sector_t reiserfs_aop_bmap(struct address_space *as, sector_t block) sector_t 3071 fs/reiserfs/reiserfs.h int reiserfs_get_block(struct inode *inode, sector_t block, sector_t 3270 fs/reiserfs/reiserfs.h sector_t block; /* file offset, in blocks */ sector_t 3350 fs/reiserfs/reiserfs.h sector_t block) sector_t 3368 fs/reiserfs/reiserfs.h sector_t block) sector_t 203 fs/sysv/itree.c static int get_block(struct inode *inode, sector_t iblock, struct buffer_head *bh_result, int create) sector_t 492 fs/sysv/itree.c static sector_t sysv_bmap(struct address_space *mapping, sector_t block) sector_t 668 fs/udf/balloc.c ((sector_t)count) << sb->s_blocksize_bits); sector_t 57 fs/udf/dir.c sector_t offset; sector_t 26 fs/udf/directory.c sector_t *offset) sector_t 59 fs/udf/inode.c static sector_t inode_getblk(struct inode *, sector_t, int *, int *); sector_t 69 fs/udf/inode.c static int udf_get_block(struct inode *, sector_t, struct buffer_head *, int); sector_t 230 fs/udf/inode.c static sector_t udf_bmap(struct address_space *mapping, sector_t block) sector_t 418 fs/udf/inode.c static int udf_get_block(struct inode *inode, sector_t block, sector_t 422 fs/udf/inode.c sector_t phys = 0; sector_t 632 fs/udf/inode.c sector_t first_block = newsize >> sb->s_blocksize_bits, offset; sector_t 688 fs/udf/inode.c static sector_t inode_getblk(struct inode *inode, sector_t block, sector_t 699 fs/udf/inode.c sector_t offset = 0; sector_t 2329 fs/udf/inode.c int8_t inode_bmap(struct inode *inode, sector_t block, sector_t 2331 fs/udf/inode.c uint32_t *elen, sector_t *offset) sector_t 2361 fs/udf/inode.c udf_pblk_t udf_block_map(struct inode *inode, sector_t block) sector_t 2365 fs/udf/inode.c sector_t offset; sector_t 176 fs/udf/namei.c sector_t offset; sector_t 342 fs/udf/namei.c sector_t offset; sector_t 730 fs/udf/namei.c sector_t offset; sector_t 287 fs/udf/partition.c sector_t ext_offset; sector_t 851 fs/udf/super.c static int udf_load_pvoldesc(struct super_block *sb, sector_t block) sector_t 1151 fs/udf/super.c int type1_index, sector_t start_block) sector_t 1155 fs/udf/super.c sector_t vat_block; sector_t 1184 fs/udf/super.c sector_t blocks = i_size_read(sb->s_bdev->bd_inode) >> sector_t 1232 fs/udf/super.c static int udf_load_partdesc(struct super_block *sb, sector_t block) sector_t 1378 fs/udf/super.c static int udf_load_logicalvol(struct super_block *sb, sector_t block, sector_t 1657 fs/udf/super.c sector_t block, sector_t lastblock, sector_t 1775 fs/udf/super.c sector_t main_s, main_e, reserve_s, reserve_e; sector_t 1815 fs/udf/super.c static int udf_check_anchor_block(struct super_block *sb, sector_t block, sector_t 1845 fs/udf/super.c static int udf_scan_anchors(struct super_block *sb, sector_t *lastblock, sector_t 1848 fs/udf/super.c sector_t last[6]; sector_t 1921 fs/udf/super.c sector_t lastblock = sbi->s_last_block; sector_t 209 fs/udf/truncate.c sector_t first_block = inode->i_size >> sb->s_blocksize_bits, offset; sector_t 157 fs/udf/udfdecl.h extern udf_pblk_t udf_block_map(struct inode *inode, sector_t block); sector_t 158 fs/udf/udfdecl.h extern int8_t inode_bmap(struct inode *, sector_t, struct extent_position *, sector_t 159 fs/udf/udfdecl.h struct kernel_lb_addr *, uint32_t *, sector_t *); sector_t 247 fs/udf/udfdecl.h sector_t *); sector_t 239 fs/ufs/balloc.c static void ufs_change_blocknr(struct inode *inode, sector_t beg, sector_t 240 fs/ufs/balloc.c unsigned int count, sector_t oldb, sector_t 241 fs/ufs/balloc.c sector_t newb, struct page *locked_page) sector_t 249 fs/ufs/balloc.c sector_t end, i; sector_t 325 fs/ufs/balloc.c static void ufs_clear_frags(struct inode *inode, sector_t beg, unsigned int n, sector_t 329 fs/ufs/balloc.c sector_t end = beg + n; sector_t 135 fs/ufs/ialloc.c sector_t beg = uspi->s_sbbase + sector_t 138 fs/ufs/ialloc.c sector_t end = beg + uspi->s_fpb; sector_t 46 fs/ufs/inode.c static int ufs_block_to_path(struct inode *inode, sector_t i_block, unsigned offsets[4]) sector_t 256 fs/ufs/inode.c sector_t new_fragment, int *err, sector_t 335 fs/ufs/inode.c unsigned index, sector_t new_fragment, int *err, sector_t 396 fs/ufs/inode.c static int ufs_getfrag_block(struct inode *inode, sector_t fragment, struct buffer_head *bh_result, int create) sector_t 523 fs/ufs/inode.c static sector_t ufs_bmap(struct address_space *mapping, sector_t block) sector_t 1055 fs/ufs/inode.c sector_t lastfrag; sector_t 1131 fs/ufs/inode.c sector_t last = (inode->i_size - 1) >> uspi->s_bshift; sector_t 700 fs/xfs/xfs_aops.c sector_t sector, sector_t 770 fs/xfs/xfs_aops.c sector_t sector; sector_t 1135 fs/xfs/xfs_aops.c STATIC sector_t sector_t 1138 fs/xfs/xfs_aops.c sector_t block) sector_t 1182 fs/xfs/xfs_aops.c sector_t *span) sector_t 15 fs/xfs/xfs_bio_io.c sector_t sector, sector_t 62 fs/xfs/xfs_bmap_util.c sector_t block = XFS_BB_TO_FSBT(mp, sector); sector_t 1271 fs/xfs/xfs_buf.c sector_t sector = bp->b_maps[map].bm_bn; sector_t 222 fs/xfs/xfs_linux.h int xfs_rw_bdev(struct block_device *bdev, sector_t sector, unsigned int count, sector_t 533 fs/xfs/xfs_super.c ASSERT(sizeof(sector_t) == 8); sector_t 40 include/linux/badblocks.h sector_t sector; sector_t 41 include/linux/badblocks.h sector_t size; /* in sectors */ sector_t 44 include/linux/badblocks.h int badblocks_check(struct badblocks *bb, sector_t s, int sectors, sector_t 45 include/linux/badblocks.h sector_t *first_bad, int *bad_sectors); sector_t 46 include/linux/badblocks.h int badblocks_set(struct badblocks *bb, sector_t s, int sectors, sector_t 48 include/linux/badblocks.h int badblocks_clear(struct badblocks *bb, sector_t s, int sectors); sector_t 345 include/linux/bio.h static inline sector_t bip_get_seed(struct bio_integrity_payload *bip) sector_t 351 include/linux/bio.h sector_t seed) sector_t 126 include/linux/blk_types.h static inline sector_t bio_issue_size(struct bio_issue *issue) sector_t 132 include/linux/blk_types.h sector_t size) sector_t 145 include/linux/blkdev.h sector_t __sector; /* sector cursor */ sector_t 361 include/linux/blkdev.h sector_t sector, struct blk_zone *zones, sector_t 363 include/linux/blkdev.h extern int blkdev_reset_zones(struct block_device *bdev, sector_t sectors, sector_t 364 include/linux/blkdev.h sector_t nr_sectors, gfp_t gfp_mask); sector_t 706 include/linux/blkdev.h static inline sector_t blk_queue_zone_sectors(struct request_queue *q) sector_t 718 include/linux/blkdev.h sector_t sector) sector_t 726 include/linux/blkdev.h sector_t sector) sector_t 925 include/linux/blkdev.h static inline sector_t blk_rq_pos(const struct request *rq) sector_t 1014 include/linux/blkdev.h sector_t offset) sector_t 1024 include/linux/blkdev.h sector_t offset) sector_t 1095 include/linux/blkdev.h sector_t offset); sector_t 1097 include/linux/blkdev.h sector_t offset); sector_t 1099 include/linux/blkdev.h sector_t offset); sector_t 1210 include/linux/blkdev.h extern int blkdev_issue_flush(struct block_device *, gfp_t, sector_t *); sector_t 1211 include/linux/blkdev.h extern int blkdev_issue_write_same(struct block_device *bdev, sector_t sector, sector_t 1212 include/linux/blkdev.h sector_t nr_sects, gfp_t gfp_mask, struct page *page); sector_t 1216 include/linux/blkdev.h extern int blkdev_issue_discard(struct block_device *bdev, sector_t sector, sector_t 1217 include/linux/blkdev.h sector_t nr_sects, gfp_t gfp_mask, unsigned long flags); sector_t 1218 include/linux/blkdev.h extern int __blkdev_issue_discard(struct block_device *bdev, sector_t sector, sector_t 1219 include/linux/blkdev.h sector_t nr_sects, gfp_t gfp_mask, int flags, sector_t 1225 include/linux/blkdev.h extern int __blkdev_issue_zeroout(struct block_device *bdev, sector_t sector, sector_t 1226 include/linux/blkdev.h sector_t nr_sects, gfp_t gfp_mask, struct bio **biop, sector_t 1228 include/linux/blkdev.h extern int blkdev_issue_zeroout(struct block_device *bdev, sector_t sector, sector_t 1229 include/linux/blkdev.h sector_t nr_sects, gfp_t gfp_mask, unsigned flags); sector_t 1231 include/linux/blkdev.h static inline int sb_issue_discard(struct super_block *sb, sector_t block, sector_t 1232 include/linux/blkdev.h sector_t nr_blocks, gfp_t gfp_mask, unsigned long flags) sector_t 1241 include/linux/blkdev.h static inline int sb_issue_zeroout(struct super_block *sb, sector_t block, sector_t 1242 include/linux/blkdev.h sector_t nr_blocks, gfp_t gfp_mask) sector_t 1350 include/linux/blkdev.h static inline int queue_limit_alignment_offset(struct queue_limits *lim, sector_t sector) sector_t 1380 include/linux/blkdev.h static inline int queue_limit_discard_alignment(struct queue_limits *lim, sector_t sector) sector_t 1453 include/linux/blkdev.h static inline sector_t bdev_zone_sectors(struct block_device *bdev) sector_t 1492 include/linux/blkdev.h unsigned char *read_dev_sector(struct block_device *, sector_t, Sector *); sector_t 1520 include/linux/blkdev.h sector_t seed; sector_t 1699 include/linux/blkdev.h int (*rw_page)(struct block_device *, sector_t, struct page *, unsigned int); sector_t 1711 include/linux/blkdev.h int (*report_zones)(struct gendisk *, sector_t sector, sector_t 1719 include/linux/blkdev.h extern int bdev_read_page(struct block_device *, sector_t, struct page *); sector_t 1720 include/linux/blkdev.h extern int bdev_write_page(struct block_device *, sector_t, struct page *, sector_t 1816 include/linux/blkdev.h sector_t *error_sector) sector_t 125 include/linux/blktrace_api.h static inline sector_t blk_rq_trace_sector(struct request *rq) sector_t 131 include/linux/blktrace_api.h if (blk_rq_is_passthrough(rq) || blk_rq_pos(rq) == (sector_t)-1) sector_t 68 include/linux/buffer_head.h sector_t b_blocknr; /* start block number */ sector_t 175 include/linux/buffer_head.h void clean_bdev_aliases(struct block_device *bdev, sector_t block, sector_t 176 include/linux/buffer_head.h sector_t len); sector_t 185 include/linux/buffer_head.h struct buffer_head *__find_get_block(struct block_device *bdev, sector_t block, sector_t 187 include/linux/buffer_head.h struct buffer_head *__getblk_gfp(struct block_device *bdev, sector_t block, sector_t 191 include/linux/buffer_head.h void __breadahead(struct block_device *, sector_t block, unsigned int size); sector_t 192 include/linux/buffer_head.h void __breadahead_gfp(struct block_device *, sector_t block, unsigned int size, sector_t 195 include/linux/buffer_head.h sector_t block, unsigned size, gfp_t gfp); sector_t 207 include/linux/buffer_head.h sector_t bblock, unsigned blocksize); sector_t 258 include/linux/buffer_head.h sector_t generic_block_bmap(struct address_space *, sector_t, get_block_t *); sector_t 307 include/linux/buffer_head.h sb_bread(struct super_block *sb, sector_t block) sector_t 313 include/linux/buffer_head.h sb_bread_unmovable(struct super_block *sb, sector_t block) sector_t 319 include/linux/buffer_head.h sb_breadahead(struct super_block *sb, sector_t block) sector_t 325 include/linux/buffer_head.h sb_breadahead_unmovable(struct super_block *sb, sector_t block) sector_t 331 include/linux/buffer_head.h sb_getblk(struct super_block *sb, sector_t block) sector_t 338 include/linux/buffer_head.h sb_getblk_gfp(struct super_block *sb, sector_t block, gfp_t gfp) sector_t 344 include/linux/buffer_head.h sb_find_get_block(struct super_block *sb, sector_t block) sector_t 350 include/linux/buffer_head.h map_bh(struct buffer_head *bh, struct super_block *sb, sector_t block) sector_t 378 include/linux/buffer_head.h sector_t block, sector_t 385 include/linux/buffer_head.h sector_t block, sector_t 402 include/linux/buffer_head.h __bread(struct block_device *bdev, sector_t block, unsigned size) sector_t 25 include/linux/bvec.h sector_t bi_sector; /* device address in 512 byte sector_t 21 include/linux/cmdline-parser.h sector_t from; sector_t 22 include/linux/cmdline-parser.h sector_t size; sector_t 41 include/linux/cmdline-parser.h int cmdline_parts_set(struct cmdline_parts *parts, sector_t disk_size, sector_t 30 include/linux/dax.h sector_t, sector_t); sector_t 113 include/linux/dax.h int bdev_dax_pgoff(struct block_device *, sector_t, size_t, pgoff_t *pgoff); sector_t 122 include/linux/dax.h struct block_device *bdev, int blocksize, sector_t start, sector_t 123 include/linux/dax.h sector_t sectors); sector_t 125 include/linux/dax.h struct block_device *bdev, int blocksize, sector_t start, sector_t 126 include/linux/dax.h sector_t sectors) sector_t 157 include/linux/dax.h struct block_device *bdev, int blocksize, sector_t start, sector_t 158 include/linux/dax.h sector_t sectors) sector_t 207 include/linux/dax.h int blocksize, sector_t start, sector_t len); sector_t 226 include/linux/dax.h struct dax_device *dax_dev, sector_t sector, sector_t 230 include/linux/dax.h struct dax_device *dax_dev, sector_t sector, sector_t 96 include/linux/device-mapper.h typedef int (*dm_report_zones_fn) (struct dm_target *ti, sector_t sector, sector_t 112 include/linux/device-mapper.h sector_t start, sector_t len, sector_t 256 include/linux/device-mapper.h sector_t begin; sector_t 257 include/linux/device-mapper.h sector_t len; sector_t 425 include/linux/device-mapper.h void dm_remap_zone_report(struct dm_target *ti, sector_t start, sector_t 459 include/linux/device-mapper.h sector_t start, sector_t len, char *params); sector_t 487 include/linux/device-mapper.h int __must_check dm_set_target_max_io_len(struct dm_target *ti, sector_t len); sector_t 499 include/linux/device-mapper.h sector_t dm_table_get_size(struct dm_table *t); sector_t 586 include/linux/device-mapper.h sector_t _r = ((n) + (sz) - 1); \ sector_t 606 include/linux/device-mapper.h static inline sector_t to_sector(unsigned long long n) sector_t 611 include/linux/device-mapper.h static inline unsigned long to_bytes(sector_t n) sector_t 39 include/linux/dm-bufio.h void dm_bufio_set_sector_offset(struct dm_bufio_client *c, sector_t start); sector_t 55 include/linux/dm-bufio.h void *dm_bufio_read(struct dm_bufio_client *c, sector_t block, sector_t 62 include/linux/dm-bufio.h void *dm_bufio_get(struct dm_bufio_client *c, sector_t block, sector_t 69 include/linux/dm-bufio.h void *dm_bufio_new(struct dm_bufio_client *c, sector_t block, sector_t 78 include/linux/dm-bufio.h sector_t block, unsigned n_blocks); sector_t 125 include/linux/dm-bufio.h void dm_bufio_release_move(struct dm_buffer *b, sector_t new_block); sector_t 132 include/linux/dm-bufio.h void dm_bufio_forget(struct dm_bufio_client *c, sector_t block); sector_t 140 include/linux/dm-bufio.h sector_t dm_bufio_get_device_size(struct dm_bufio_client *c); sector_t 141 include/linux/dm-bufio.h sector_t dm_bufio_get_block_number(struct dm_buffer *b); sector_t 18 include/linux/dm-dirty-log.h typedef sector_t region_t; sector_t 19 include/linux/dm-io.h sector_t sector; sector_t 20 include/linux/dm-io.h sector_t count; /* If this is zero the region is ignored. */ sector_t 40 include/linux/dm-region-hash.h sector_t target_begin, unsigned max_recovery, sector_t 51 include/linux/dm-region-hash.h sector_t dm_rh_region_to_sector(struct dm_region_hash *rh, region_t region); sector_t 57 include/linux/dm-region-hash.h sector_t dm_rh_get_region_size(struct dm_region_hash *rh); sector_t 96 include/linux/elevator.h struct request *elv_rqhash_find(struct request_queue *q, sector_t offset); sector_t 151 include/linux/elevator.h extern struct request *elv_rb_find(struct rb_root *, sector_t); sector_t 90 include/linux/fs.h typedef int (get_block_t)(struct inode *inode, sector_t iblock, sector_t 387 include/linux/fs.h sector_t (*bmap)(struct address_space *, sector_t); sector_t 408 include/linux/fs.h sector_t *span); sector_t 2874 include/linux/fs.h extern sector_t bmap(struct inode *, sector_t); sector_t 249 include/linux/fscrypt.h extern int fscrypt_zeroout_range(const struct inode *, pgoff_t, sector_t, sector_t 493 include/linux/fscrypt.h sector_t pblk, unsigned int len) sector_t 109 include/linux/genhd.h sector_t start_sect; sector_t 115 include/linux/genhd.h sector_t nr_sects; sector_t 117 include/linux/genhd.h sector_t alignment_offset; sector_t 288 include/linux/genhd.h sector_t sector); sector_t 458 include/linux/genhd.h static inline sector_t get_start_sect(struct block_device *bdev) sector_t 462 include/linux/genhd.h static inline sector_t get_capacity(struct gendisk *disk) sector_t 466 include/linux/genhd.h static inline void set_capacity(struct gendisk *disk, sector_t size) sector_t 628 include/linux/genhd.h int partno, sector_t start, sector_t 629 include/linux/genhd.h sector_t len, int flags, sector_t 723 include/linux/genhd.h static inline sector_t part_nr_sects_read(struct hd_struct *part) sector_t 726 include/linux/genhd.h sector_t nr_sects; sector_t 734 include/linux/genhd.h sector_t nr_sects; sector_t 750 include/linux/genhd.h static inline void part_nr_sects_write(struct hd_struct *part, sector_t size) sector_t 413 include/linux/ide.h sector_t); sector_t 1069 include/linux/ide.h ide_startstop_t (*do_request)(ide_drive_t *, struct request *, sector_t); sector_t 73 include/linux/iomap.h static inline sector_t sector_t 183 include/linux/iomap.h sector_t iomap_bmap(struct address_space *mapping, sector_t bno, sector_t 206 include/linux/iomap.h struct file *swap_file, sector_t *pagespan, sector_t 1327 include/linux/jbd2.h sector_t blocknr); sector_t 1181 include/linux/libata.h sector_t capacity, int geom[]); sector_t 89 include/linux/lightnvm.h typedef int (nvm_get_chk_meta_fn)(struct nvm_dev *, sector_t, int, sector_t 351 include/linux/lightnvm.h sector_t total_secs; /* across channels */ sector_t 635 include/linux/lightnvm.h typedef sector_t (nvm_tgt_capacity_fn)(void *); sector_t 73 include/linux/pktcdvd.h sector_t last_write; /* The sector where the last write ended */ sector_t 120 include/linux/pktcdvd.h sector_t sector; /* First sector in this packet */ sector_t 186 include/linux/pktcdvd.h sector_t current_sector; /* Keep track of where the elevator is */ sector_t 154 include/linux/swap.h sector_t start_block; sector_t 396 include/linux/swap.h unsigned long nr_pages, sector_t start_block); sector_t 398 include/linux/swap.h sector_t *); sector_t 460 include/linux/swap.h extern int swap_type_of(dev_t, sector_t, struct block_device **); sector_t 462 include/linux/swap.h extern sector_t map_swap_page(struct page *, struct block_device **); sector_t 463 include/linux/swap.h extern sector_t swapdev_block(int, pgoff_t); sector_t 670 include/scsi/libsas.h sector_t capacity, int *hsc); sector_t 282 include/scsi/scsi_cmnd.h static inline sector_t scsi_get_lba(struct scsi_cmnd *scmd) sector_t 292 include/scsi/scsi_host.h sector_t, int []); sector_t 157 include/scsi/scsi_transport_iscsi.h u8 (*check_protection)(struct iscsi_task *task, sector_t *sector); sector_t 16 include/scsi/scsicam.h extern int scsicam_bios_param (struct block_device *bdev, sector_t capacity, int *ip); sector_t 43 include/target/target_core_backend.h sector_t (*get_blocks)(struct se_device *); sector_t 44 include/target/target_core_backend.h sector_t (*get_alignment_offset_lbas)(struct se_device *); sector_t 65 include/target/target_core_backend.h sector_t lba, sector_t nolb); sector_t 84 include/target/target_core_backend.h sector_t sbc_get_write_same_sectors(struct se_cmd *cmd); sector_t 86 include/target/target_core_backend.h sense_reason_t sbc_dif_verify(struct se_cmd *, sector_t, unsigned int, sector_t 110 include/target/target_core_backend.h sector_t target_to_linux_sector(struct se_device *dev, sector_t lb); sector_t 537 include/target/target_core_base.h sector_t bad_sector; sector_t 18 include/trace/events/bcache.h __field(sector_t, sector ) sector_t 96 include/trace/events/bcache.h __field(sector_t, sector ) sector_t 129 include/trace/events/bcache.h __field(sector_t, sector ) sector_t 159 include/trace/events/bcache.h __field(sector_t, sector ) sector_t 230 include/trace/events/bcache.h __field(sector_t, sector ) sector_t 23 include/trace/events/block.h __field( sector_t, sector ) sector_t 82 include/trace/events/block.h __field( sector_t, sector ) sector_t 124 include/trace/events/block.h __field( sector_t, sector ) sector_t 156 include/trace/events/block.h __field( sector_t, sector ) sector_t 233 include/trace/events/block.h __field( sector_t, sector ) sector_t 270 include/trace/events/block.h __field( sector_t, sector ) sector_t 298 include/trace/events/block.h __field( sector_t, sector ) sector_t 365 include/trace/events/block.h __field( sector_t, sector ) sector_t 393 include/trace/events/block.h __field( sector_t, sector ) sector_t 528 include/trace/events/block.h __field( sector_t, sector ) sector_t 529 include/trace/events/block.h __field( sector_t, new_sector ) sector_t 562 include/trace/events/block.h sector_t from), sector_t 568 include/trace/events/block.h __field( sector_t, sector ) sector_t 571 include/trace/events/block.h __field( sector_t, old_sector ) sector_t 606 include/trace/events/block.h sector_t from), sector_t 612 include/trace/events/block.h __field( sector_t, sector ) sector_t 615 include/trace/events/block.h __field( sector_t, old_sector ) sector_t 1074 include/trace/events/f2fs.h __field(sector_t, sector) sector_t 46 kernel/power/hibernate.c sector_t swsusp_resume_block; sector_t 103 kernel/power/power.h extern sector_t swsusp_resume_block; sector_t 160 kernel/power/power.h extern sector_t alloc_swapdev_block(int swap); sector_t 61 kernel/power/swap.c #define MAP_PAGE_ENTRIES (PAGE_SIZE / sizeof(sector_t) - 1) sector_t 81 kernel/power/swap.c sector_t entries[MAP_PAGE_ENTRIES]; sector_t 82 kernel/power/swap.c sector_t next_swap; sector_t 98 kernel/power/swap.c sector_t cur_swap; sector_t 99 kernel/power/swap.c sector_t first_sector; sector_t 106 kernel/power/swap.c char reserved[PAGE_SIZE - 20 - sizeof(sector_t) - sizeof(int) - sector_t 109 kernel/power/swap.c sector_t image; sector_t 176 kernel/power/swap.c sector_t alloc_swapdev_block(int swap) sector_t 368 kernel/power/swap.c static int write_page(void *buf, sector_t offset, struct hib_bio_batch *hb) sector_t 444 kernel/power/swap.c sector_t offset; sector_t 966 kernel/power/swap.c sector_t offset; sector_t 1010 kernel/power/swap.c sector_t offset; sector_t 207 kernel/power/user.c sector_t offset; sector_t 183 kernel/trace/blktrace.c static int act_log_check(struct blk_trace *bt, u32 what, sector_t sector, sector_t 213 kernel/trace/blktrace.c static void __blk_add_trace(struct blk_trace *bt, sector_t sector, int bytes, sector_t 1023 kernel/trace/blktrace.c dev_t dev, sector_t from) sector_t 1061 kernel/trace/blktrace.c sector_t from) sector_t 148 mm/page_io.c sector_t *span) sector_t 155 mm/page_io.c sector_t probe_block; sector_t 156 mm/page_io.c sector_t last_block; sector_t 157 mm/page_io.c sector_t lowest_block = -1; sector_t 158 mm/page_io.c sector_t highest_block = 0; sector_t 175 mm/page_io.c sector_t first_block; sector_t 193 mm/page_io.c sector_t block; sector_t 263 mm/page_io.c static sector_t swap_page_sector(struct page *page) sector_t 265 mm/page_io.c return (sector_t)__page_file_index(page) << (PAGE_SHIFT - 9); sector_t 51 mm/swapfile.c static sector_t map_swap_entry(swp_entry_t, struct block_device**); sector_t 174 mm/swapfile.c sector_t start_block; sector_t 175 mm/swapfile.c sector_t nr_blocks; sector_t 181 mm/swapfile.c nr_blocks = ((sector_t)se->nr_pages - 1) << (PAGE_SHIFT - 9); sector_t 192 mm/swapfile.c nr_blocks = (sector_t)se->nr_pages << (PAGE_SHIFT - 9); sector_t 235 mm/swapfile.c sector_t start_block = se->start_block + offset; sector_t 236 mm/swapfile.c sector_t nr_blocks = se->nr_pages - offset; sector_t 1760 mm/swapfile.c int swap_type_of(dev_t device, sector_t offset, struct block_device **bdev_p) sector_t 1806 mm/swapfile.c sector_t swapdev_block(int type, pgoff_t offset) sector_t 2257 mm/swapfile.c static sector_t map_swap_entry(swp_entry_t entry, struct block_device **bdev) sector_t 2274 mm/swapfile.c sector_t map_swap_page(struct page *page, struct block_device **bdev) sector_t 2312 mm/swapfile.c unsigned long nr_pages, sector_t start_block) sector_t 2381 mm/swapfile.c static int setup_swap_extents(struct swap_info_struct *sis, sector_t *span) sector_t 3008 mm/swapfile.c sector_t *span) sector_t 3108 mm/swapfile.c sector_t span; sector_t 160 tools/lib/traceevent/plugins/plugin_scsi.c sector_t lba = 0, txlen = 0; sector_t 177 tools/lib/traceevent/plugins/plugin_scsi.c sector_t lba = 0, txlen = 0; sector_t 201 tools/lib/traceevent/plugins/plugin_scsi.c sector_t lba = 0, txlen = 0; sector_t 223 tools/lib/traceevent/plugins/plugin_scsi.c sector_t lba = 0, txlen = 0; sector_t 253 tools/lib/traceevent/plugins/plugin_scsi.c sector_t lba = 0, txlen = 0; sector_t 318 tools/lib/traceevent/plugins/plugin_scsi.c sector_t lba = 0;