blk_rq_pos 1364 arch/um/drivers/ubd_kern.c u64 off = (u64)blk_rq_pos(req) << SECTOR_SHIFT; blk_rq_pos 1397 arch/um/drivers/ubd_kern.c ret = ubd_queue_one_vec(hctx, req, (u64)blk_rq_pos(req) << 9, NULL); blk_rq_pos 464 block/bfq-iosched.c s1 = blk_rq_pos(rq1); blk_rq_pos 465 block/bfq-iosched.c s2 = blk_rq_pos(rq2); blk_rq_pos 570 block/bfq-iosched.c if (sector > blk_rq_pos(bfqq->next_rq)) blk_rq_pos 572 block/bfq-iosched.c else if (sector < blk_rq_pos(bfqq->next_rq)) blk_rq_pos 636 block/bfq-iosched.c blk_rq_pos(bfqq->next_rq), &parent, &p); blk_rq_pos 914 block/bfq-iosched.c return bfq_choose_req(bfqd, next, prev, blk_rq_pos(last)); blk_rq_pos 2131 block/bfq-iosched.c return abs(blk_rq_pos(rq) - last_pos); blk_rq_pos 2268 block/bfq-iosched.c blk_rq_pos(req) < blk_rq_pos 2269 block/bfq-iosched.c blk_rq_pos(container_of(rb_prev(&req->rb_node), blk_rq_pos 2398 block/bfq-iosched.c return blk_rq_pos(io_struct); blk_rq_pos 2438 block/bfq-iosched.c if (blk_rq_pos(__bfqq->next_rq) < sector) blk_rq_pos 3229 block/bfq-iosched.c bfqd->last_position = blk_rq_pos(rq) + blk_rq_sectors(rq); blk_rq_pos 5353 block/bfq-iosched.c bfqq->last_request_pos = blk_rq_pos(rq) + blk_rq_sectors(rq); blk_rq_pos 223 block/blk-core.c blk_rq_pos(req), req_op(req), blk_op_str(req_op(req)), blk_rq_pos 252 block/blk-core.c (unsigned long long)blk_rq_pos(rq), blk_rq_pos 653 block/blk-core.c blk_rq_get_max_sectors(req, blk_rq_pos(req))) blk_rq_pos 1361 block/blk-core.c part = disk_map_sector_rcu(rq->rq_disk, blk_rq_pos(rq)); blk_rq_pos 1586 block/blk-core.c dst->__sector = blk_rq_pos(src); blk_rq_pos 1834 block/blk-iocost.c if (blk_rq_pos(rq) < bio_end && blk_rq_pos 1835 block/blk-iocost.c blk_rq_pos(rq) + blk_rq_sectors(rq) == iocg->cursor) blk_rq_pos 580 block/blk-merge.c blk_rq_get_max_sectors(req, blk_rq_pos(req))) { blk_rq_pos 612 block/blk-merge.c blk_rq_get_max_sectors(req, blk_rq_pos(req))) blk_rq_pos 634 block/blk-merge.c blk_rq_get_max_sectors(req, blk_rq_pos(req))) blk_rq_pos 714 block/blk-merge.c else if (blk_rq_pos(req) + blk_rq_sectors(req) == blk_rq_pos(next)) blk_rq_pos 890 block/blk-merge.c else if (blk_rq_pos(rq) + blk_rq_sectors(rq) == bio->bi_iter.bi_sector) blk_rq_pos 892 block/blk-merge.c else if (blk_rq_pos(rq) - bio_sectors(bio) == bio->bi_iter.bi_sector) blk_rq_pos 1725 block/blk-mq.c return blk_rq_pos(rqa) > blk_rq_pos(rqb); blk_rq_pos 54 block/elevator.c #define rq_hash_key(rq) (blk_rq_pos(rq) + blk_rq_sectors(rq)) blk_rq_pos 265 block/elevator.c if (blk_rq_pos(rq) < blk_rq_pos(__rq)) blk_rq_pos 267 block/elevator.c else if (blk_rq_pos(rq) >= blk_rq_pos(__rq)) blk_rq_pos 292 block/elevator.c if (sector < blk_rq_pos(rq)) blk_rq_pos 294 block/elevator.c else if (sector > blk_rq_pos(rq)) blk_rq_pos 378 block/elevator.c __rq = elv_rqhash_find(q, blk_rq_pos(rq)); blk_rq_pos 451 block/mq-deadline.c BUG_ON(sector != blk_rq_pos(__rq)); blk_rq_pos 1468 drivers/block/amiflop.c blk_rq_pos(rq), cnt, blk_rq_pos 1471 drivers/block/amiflop.c block = blk_rq_pos(rq) + cnt; blk_rq_pos 1545 drivers/block/ataflop.c ReqBlock = blk_rq_pos(fd_request); blk_rq_pos 2255 drivers/block/floppy.c block = current_count_sectors + blk_rq_pos(req); blk_rq_pos 2266 drivers/block/floppy.c DRWE->first_error_sector = blk_rq_pos(req); blk_rq_pos 2269 drivers/block/floppy.c DRWE->last_error_sector = blk_rq_pos(req); blk_rq_pos 2553 drivers/block/floppy.c TRACK = (int)blk_rq_pos(current_req) / max_sector; blk_rq_pos 2554 drivers/block/floppy.c fsector_t = (int)blk_rq_pos(current_req) % max_sector; blk_rq_pos 2901 drivers/block/floppy.c current_req, (long)blk_rq_pos(current_req), blk_rq_pos 588 drivers/block/loop.c loff_t pos = ((loff_t) blk_rq_pos(rq) << 9) + lo->lo_offset; blk_rq_pos 2065 drivers/block/mtip32xx/mtip32xx.c u64 start = blk_rq_pos(rq); blk_rq_pos 3436 drivers/block/mtip32xx/mtip32xx.c if ((blk_rq_pos(rq) & 7) || (blk_rq_sectors(rq) & 7)) blk_rq_pos 437 drivers/block/nbd.c (unsigned long long)blk_rq_pos(req) << 9, blk_rq_pos 569 drivers/block/nbd.c request.from = cpu_to_be64((u64)blk_rq_pos(req) << 9); blk_rq_pos 579 drivers/block/nbd.c (unsigned long long)blk_rq_pos(req) << 9, blk_rq_bytes(req)); blk_rq_pos 1063 drivers/block/null_blk_main.c sector = blk_rq_pos(rq); blk_rq_pos 1330 drivers/block/null_blk_main.c sector_t sector = blk_rq_pos(bd->rq); blk_rq_pos 806 drivers/block/paride/pcd.c pcd_sector = blk_rq_pos(pcd_req); blk_rq_pos 493 drivers/block/paride/pd.c pd_block = blk_rq_pos(pd_req); blk_rq_pos 846 drivers/block/paride/pf.c pf_block = blk_rq_pos(pf_req); blk_rq_pos 131 drivers/block/ps3disk.c start_sector = blk_rq_pos(req) * priv->blocking_factor; blk_rq_pos 4800 drivers/block/rbd.c u64 offset = (u64)blk_rq_pos(rq) << SECTOR_SHIFT; blk_rq_pos 490 drivers/block/skd_main.c const u32 lba = blk_rq_pos(req); blk_rq_pos 3581 drivers/block/skd_main.c u32 lba = blk_rq_pos(req); blk_rq_pos 513 drivers/block/sunvdc.c desc->offset = (blk_rq_pos(req) << 9) / port->vdisk_block_size; blk_rq_pos 543 drivers/block/swim.c err = floppy_read_sectors(fs, blk_rq_pos(req), blk_rq_pos 348 drivers/block/swim3.c fs->req_cyl = ((long)blk_rq_pos(req)) / fs->secpercyl; blk_rq_pos 349 drivers/block/swim3.c x = ((long)blk_rq_pos(req)) % fs->secpercyl; blk_rq_pos 632 drivers/block/swim3.c (long)blk_rq_pos(fs->cur_req)); blk_rq_pos 750 drivers/block/swim3.c (long)blk_rq_pos(req), err); blk_rq_pos 762 drivers/block/sx8.c msg->lba = cpu_to_le32(blk_rq_pos(rq) & 0xffffffff); blk_rq_pos 763 drivers/block/sx8.c tmp = (blk_rq_pos(rq) >> 16) >> 16; blk_rq_pos 332 drivers/block/virtio_blk.c 0 : cpu_to_virtio64(vblk->vdev, blk_rq_pos(req)); blk_rq_pos 556 drivers/block/xen-blkfront.c ring_req->u.discard.sector_number = (blkif_sector_t)blk_rq_pos(req); blk_rq_pos 763 drivers/block/xen-blkfront.c ring_req->u.indirect.sector_number = (blkif_sector_t)blk_rq_pos(req); blk_rq_pos 767 drivers/block/xen-blkfront.c ring_req->u.rw.sector_number = (blkif_sector_t)blk_rq_pos(req); blk_rq_pos 665 drivers/block/xsysace.c (unsigned long long)blk_rq_pos(req), blk_rq_pos 672 drivers/block/xsysace.c ace_out32(ace, ACE_MPULBA, blk_rq_pos(req) & 0x0FFFFFFF); blk_rq_pos 73 drivers/block/z2ram.c unsigned long start = blk_rq_pos(req) << 9; blk_rq_pos 81 drivers/block/z2ram.c (unsigned long long)blk_rq_pos(req), blk_rq_pos 581 drivers/cdrom/gdrom.c block = blk_rq_pos(req)/GD_TO_BLK + GD_SESSION_OFFSET; blk_rq_pos 534 drivers/ide/ide-cd.c long block = (long)blk_rq_pos(rq) / (hard_sect >> 9); blk_rq_pos 826 drivers/ide/ide-cd.c (blk_rq_pos(rq) & (sectors_per_frame - 1))) blk_rq_pos 260 drivers/ide/ide-floppy.c if (((long)blk_rq_pos(rq) % floppy->bs_factor) || blk_rq_pos 380 drivers/ide/ide-io.c return drv->do_request(drive, rq, blk_rq_pos(rq)); blk_rq_pos 75 drivers/ide/ide-lib.c (unsigned long long)blk_rq_pos(rq)); blk_rq_pos 577 drivers/ide/ide-tape.c req->cmd[0], (unsigned long long)blk_rq_pos(rq), blk_rq_pos 137 drivers/md/dm-rq.c blk_rq_pos(orig), tio->n_sectors, true, blk_rq_pos 407 drivers/md/dm-rq.c blk_rq_pos(rq)); blk_rq_pos 457 drivers/md/dm-rq.c blk_rq_pos(orig), tio->n_sectors, false, 0, blk_rq_pos 1900 drivers/memstick/core/ms_block.c lba = blk_rq_pos(req); blk_rq_pos 721 drivers/memstick/core/mspro_block.c t_off = blk_rq_pos(msb->block_req); blk_rq_pos 1099 drivers/mmc/core/block.c from = blk_rq_pos(req); blk_rq_pos 1137 drivers/mmc/core/block.c from = blk_rq_pos(req); blk_rq_pos 1216 drivers/mmc/core/block.c if (!IS_ALIGNED(blk_rq_pos(req), card->ext_csd.rel_sectors)) blk_rq_pos 1316 drivers/mmc/core/block.c brq->data.blk_addr = blk_rq_pos(req); blk_rq_pos 1340 drivers/mmc/core/block.c (blk_rq_pos(req) + blk_rq_sectors(req) == blk_rq_pos 1560 drivers/mmc/core/block.c brq->cmd.arg = blk_rq_pos(req); blk_rq_pos 72 drivers/mtd/mtd_blkdevs.c block = blk_rq_pos(req) << 9 >> tr->blkshift; blk_rq_pos 81 drivers/mtd/mtd_blkdevs.c if (blk_rq_pos(req) + blk_rq_cur_sectors(req) > blk_rq_pos 192 drivers/mtd/ubi/block.c pos = blk_rq_pos(req) << 9; blk_rq_pos 677 drivers/nvme/host/core.c cpu_to_le64(nvme_block_nr(ns, blk_rq_pos(req))); blk_rq_pos 701 drivers/nvme/host/core.c cmnd->rw.slba = cpu_to_le64(nvme_block_nr(ns, blk_rq_pos(req))); blk_rq_pos 522 drivers/s390/block/dasd_diag.c first_rec = blk_rq_pos(req) >> block->s2b_shift; blk_rq_pos 524 drivers/s390/block/dasd_diag.c (blk_rq_pos(req) + blk_rq_sectors(req) - 1) >> block->s2b_shift; blk_rq_pos 3099 drivers/s390/block/dasd_eckd.c first_trk = blk_rq_pos(req) >> block->s2b_shift; blk_rq_pos 3102 drivers/s390/block/dasd_eckd.c (blk_rq_pos(req) + blk_rq_sectors(req) - 1) >> block->s2b_shift; blk_rq_pos 3182 drivers/s390/block/dasd_eckd.c first_trk = first_blk = blk_rq_pos(req) >> block->s2b_shift; blk_rq_pos 3185 drivers/s390/block/dasd_eckd.c (blk_rq_pos(req) + blk_rq_sectors(req) - 1) >> block->s2b_shift; blk_rq_pos 4550 drivers/s390/block/dasd_eckd.c first_rec = first_trk = blk_rq_pos(req) >> block->s2b_shift; blk_rq_pos 4553 drivers/s390/block/dasd_eckd.c (blk_rq_pos(req) + blk_rq_sectors(req) - 1) >> block->s2b_shift; blk_rq_pos 4632 drivers/s390/block/dasd_eckd.c start_padding_sectors = blk_rq_pos(req) % DASD_RAW_SECTORS_PER_TRACK; blk_rq_pos 4633 drivers/s390/block/dasd_eckd.c end_sector_offset = (blk_rq_pos(req) + blk_rq_sectors(req)) % blk_rq_pos 4646 drivers/s390/block/dasd_eckd.c first_trk = blk_rq_pos(req) / DASD_RAW_SECTORS_PER_TRACK; blk_rq_pos 4647 drivers/s390/block/dasd_eckd.c last_trk = (blk_rq_pos(req) + blk_rq_sectors(req) - 1) / blk_rq_pos 4775 drivers/s390/block/dasd_eckd.c recid = blk_rq_pos(req) >> cqr->block->s2b_shift; blk_rq_pos 346 drivers/s390/block/dasd_fba.c first_rec = blk_rq_pos(req) >> block->s2b_shift; blk_rq_pos 348 drivers/s390/block/dasd_fba.c (blk_rq_pos(req) + blk_rq_sectors(req) - 1) >> block->s2b_shift; blk_rq_pos 463 drivers/s390/block/dasd_fba.c first_rec = blk_rq_pos(req) >> block->s2b_shift; blk_rq_pos 465 drivers/s390/block/dasd_fba.c (blk_rq_pos(req) + blk_rq_sectors(req) - 1) >> block->s2b_shift; blk_rq_pos 501 drivers/s390/block/dasd_fba.c block->bp_block, blk_rq_pos(req), blk_rq_sectors(req)); blk_rq_pos 196 drivers/s390/block/scm_blk.c msb->scm_addr = scmdev->address + ((u64) blk_rq_pos(req) << 9); blk_rq_pos 828 drivers/scsi/sd.c u64 lba = sectors_to_logical(sdp, blk_rq_pos(rq)); blk_rq_pos 863 drivers/scsi/sd.c u64 lba = sectors_to_logical(sdp, blk_rq_pos(rq)); blk_rq_pos 894 drivers/scsi/sd.c u64 lba = sectors_to_logical(sdp, blk_rq_pos(rq)); blk_rq_pos 925 drivers/scsi/sd.c u64 lba = sectors_to_logical(sdp, blk_rq_pos(rq)); blk_rq_pos 1021 drivers/scsi/sd.c u64 lba = sectors_to_logical(sdp, blk_rq_pos(rq)); blk_rq_pos 1166 drivers/scsi/sd.c sector_t lba = sectors_to_logical(sdp, blk_rq_pos(rq)); blk_rq_pos 1185 drivers/scsi/sd.c if (blk_rq_pos(rq) + blk_rq_sectors(rq) > get_capacity(rq->rq_disk)) { blk_rq_pos 1190 drivers/scsi/sd.c if ((blk_rq_pos(rq) & mask) || (blk_rq_sectors(rq) & mask)) { blk_rq_pos 1251 drivers/scsi/sd.c (unsigned long long)blk_rq_pos(rq), blk_rq_pos 1930 drivers/scsi/sd.c start_lba = sectors_to_logical(sdev, blk_rq_pos(req)); blk_rq_pos 220 drivers/scsi/sd_zbc.c sector_t sector = blk_rq_pos(rq); blk_rq_pos 360 drivers/scsi/sr.c blk_rq_pos(SCpnt->request)) << 9; blk_rq_pos 475 drivers/scsi/sr.c if (((unsigned int)blk_rq_pos(rq) % (s_size >> 9)) || blk_rq_pos 491 drivers/scsi/sr.c block = (unsigned int)blk_rq_pos(rq) / (s_size >> 9); blk_rq_pos 960 include/linux/blkdev.h return blk_queue_zone_no(rq->q, blk_rq_pos(rq)); blk_rq_pos 965 include/linux/blkdev.h return blk_queue_zone_is_seq(rq->q, blk_rq_pos(rq)); blk_rq_pos 131 include/linux/blktrace_api.h if (blk_rq_is_passthrough(rq) || blk_rq_pos(rq) == (sector_t)-1) blk_rq_pos 133 include/linux/blktrace_api.h return blk_rq_pos(rq); blk_rq_pos 163 include/linux/elevator.h #define rq_end_sector(rq) (blk_rq_pos(rq) + blk_rq_sectors(rq)) blk_rq_pos 48 include/linux/t10-pi.h return blk_rq_pos(rq) >> (shift - SECTOR_SHIFT) & 0xffffffff; blk_rq_pos 284 include/scsi/scsi_cmnd.h return blk_rq_pos(scmd->request); blk_rq_pos 133 include/trace/events/block.h __entry->sector = blk_rq_pos(rq); blk_rq_pos 622 include/trace/events/block.h __entry->sector = blk_rq_pos(rq); blk_rq_pos 1077 kernel/trace/blktrace.c __blk_add_trace(bt, blk_rq_pos(rq), blk_rq_bytes(rq),