bvec 65 arch/m68k/emu/nfblock.c struct bio_vec bvec; bvec 72 arch/m68k/emu/nfblock.c bio_for_each_segment(bvec, bio, iter) { bvec 73 arch/m68k/emu/nfblock.c len = bvec.bv_len; bvec 76 arch/m68k/emu/nfblock.c page_to_phys(bvec.bv_page) + bvec.bv_offset); bvec 1313 arch/um/drivers/ubd_kern.c u64 off, struct bio_vec *bvec) bvec 1330 arch/um/drivers/ubd_kern.c if (bvec != NULL) { bvec 1331 arch/um/drivers/ubd_kern.c io_req->buffer = page_address(bvec->bv_page) + bvec->bv_offset; bvec 1332 arch/um/drivers/ubd_kern.c io_req->length = bvec->bv_len; bvec 1362 arch/um/drivers/ubd_kern.c struct bio_vec bvec; bvec 1366 arch/um/drivers/ubd_kern.c rq_for_each_segment(bvec, req, iter) { bvec 1367 arch/um/drivers/ubd_kern.c ret = ubd_queue_one_vec(hctx, req, off, &bvec); bvec 1370 arch/um/drivers/ubd_kern.c off += bvec.bv_len; bvec 107 arch/xtensa/platforms/iss/simdisk.c struct bio_vec bvec; bvec 111 arch/xtensa/platforms/iss/simdisk.c bio_for_each_segment(bvec, bio, iter) { bvec 112 arch/xtensa/platforms/iss/simdisk.c char *buffer = kmap_atomic(bvec.bv_page) + bvec.bv_offset; bvec 113 arch/xtensa/platforms/iss/simdisk.c unsigned len = bvec.bv_len >> SECTOR_SHIFT; bvec 737 block/bio.c struct bio_vec *bvec; bvec 756 block/bio.c bvec = &bio->bi_io_vec[bio->bi_vcnt - 1]; bvec 757 block/bio.c if (bvec_gap_to_prev(q, bvec, offset)) bvec 767 block/bio.c bvec = &bio->bi_io_vec[bio->bi_vcnt]; bvec 768 block/bio.c bvec->bv_page = page; bvec 769 block/bio.c bvec->bv_len = len; bvec 770 block/bio.c bvec->bv_offset = offset; bvec 878 block/bio.c struct bio_vec *bvec; bvec 883 block/bio.c bio_for_each_segment_all(bvec, bio, iter_all) { bvec 884 block/bio.c if (mark_dirty && !PageCompound(bvec->bv_page)) bvec 885 block/bio.c set_page_dirty_lock(bvec->bv_page); bvec 886 block/bio.c put_page(bvec->bv_page); bvec 892 block/bio.c const struct bio_vec *bv = iter->bvec; bvec 1170 block/bio.c struct bio_vec *bvec; bvec 1173 block/bio.c bio_for_each_segment_all(bvec, bio, iter_all) { bvec 1176 block/bio.c ret = copy_page_from_iter(bvec->bv_page, bvec 1177 block/bio.c bvec->bv_offset, bvec 1178 block/bio.c bvec->bv_len, bvec 1184 block/bio.c if (ret < bvec->bv_len) bvec 1201 block/bio.c struct bio_vec *bvec; bvec 1204 block/bio.c bio_for_each_segment_all(bvec, bio, iter_all) { bvec 1207 block/bio.c ret = copy_page_to_iter(bvec->bv_page, bvec 1208 block/bio.c bvec->bv_offset, bvec 1209 block/bio.c bvec->bv_len, bvec 1215 block/bio.c if (ret < bvec->bv_len) bvec 1224 block/bio.c struct bio_vec *bvec; bvec 1227 block/bio.c bio_for_each_segment_all(bvec, bio, iter_all) bvec 1228 block/bio.c __free_page(bvec->bv_page); bvec 1577 block/bio.c struct bio_vec *bvec; bvec 1580 block/bio.c bio_for_each_segment_all(bvec, bio, iter_all) { bvec 1581 block/bio.c memcpy(p, page_address(bvec->bv_page), bvec->bv_len); bvec 1582 block/bio.c p += bvec->bv_len; bvec 1687 block/bio.c struct bio_vec *bvec; bvec 1690 block/bio.c bio_for_each_segment_all(bvec, bio, iter_all) { bvec 1691 block/bio.c if (!PageCompound(bvec->bv_page)) bvec 1692 block/bio.c set_page_dirty_lock(bvec->bv_page); bvec 1735 block/bio.c struct bio_vec *bvec; bvec 1739 block/bio.c bio_for_each_segment_all(bvec, bio, iter_all) { bvec 1740 block/bio.c if (!PageDirty(bvec->bv_page) && !PageCompound(bvec->bv_page)) bvec 1525 block/blk-core.c struct bio_vec bvec; bvec 1527 block/blk-core.c rq_for_each_segment(bvec, rq, iter) bvec 1528 block/blk-core.c flush_dcache_page(bvec.bv_page); bvec 399 block/blk-merge.c struct bio_vec *bvec, struct scatterlist *sglist, bvec 402 block/blk-merge.c unsigned nbytes = bvec->bv_len; bvec 406 block/blk-merge.c unsigned offset = bvec->bv_offset + total; bvec 408 block/blk-merge.c struct page *page = bvec->bv_page; bvec 442 block/blk-merge.c __blk_segment_map_sg_merge(struct request_queue *q, struct bio_vec *bvec, bvec 446 block/blk-merge.c int nbytes = bvec->bv_len; bvec 454 block/blk-merge.c if (!biovec_phys_mergeable(q, bvprv, bvec)) bvec 466 block/blk-merge.c struct bio_vec uninitialized_var(bvec), bvprv = { NULL }; bvec 472 block/blk-merge.c bio_for_each_bvec(bvec, bio, iter) { bvec 479 block/blk-merge.c __blk_segment_map_sg_merge(q, &bvec, &bvprv, sg)) bvec 482 block/blk-merge.c if (bvec.bv_offset + bvec.bv_len <= PAGE_SIZE) bvec 483 block/blk-merge.c nsegs += __blk_bvec_map_sg(bvec, sglist, sg); bvec 485 block/blk-merge.c nsegs += blk_bvec_map_sg(q, &bvec, sglist, sg); bvec 490 block/blk-merge.c bvprv = bvec; bvec 165 block/bounce.c struct bio_vec *bvec, orig_vec; bvec 172 block/bounce.c bio_for_each_segment_all(bvec, bio, iter_all) { bvec 174 block/bounce.c if (bvec->bv_page != orig_vec.bv_page) { bvec 175 block/bounce.c dec_zone_page_state(bvec->bv_page, NR_BOUNCE); bvec 176 block/bounce.c mempool_free(bvec->bv_page, pool); bvec 288 drivers/block/brd.c struct bio_vec bvec; bvec 296 drivers/block/brd.c bio_for_each_segment(bvec, bio, iter) { bvec 297 drivers/block/brd.c unsigned int len = bvec.bv_len; bvec 300 drivers/block/brd.c err = brd_do_bvec(brd, bvec.bv_page, len, bvec.bv_offset, bvec 1590 drivers/block/drbd/drbd_main.c struct bio_vec bvec; bvec 1594 drivers/block/drbd/drbd_main.c bio_for_each_segment(bvec, bio, iter) { bvec 1597 drivers/block/drbd/drbd_main.c err = _drbd_no_send_page(peer_device, bvec.bv_page, bvec 1598 drivers/block/drbd/drbd_main.c bvec.bv_offset, bvec.bv_len, bvec 1599 drivers/block/drbd/drbd_main.c bio_iter_last(bvec, iter) bvec 1612 drivers/block/drbd/drbd_main.c struct bio_vec bvec; bvec 1616 drivers/block/drbd/drbd_main.c bio_for_each_segment(bvec, bio, iter) { bvec 1619 drivers/block/drbd/drbd_main.c err = _drbd_send_page(peer_device, bvec.bv_page, bvec 1620 drivers/block/drbd/drbd_main.c bvec.bv_offset, bvec.bv_len, bvec 1621 drivers/block/drbd/drbd_main.c bio_iter_last(bvec, iter) ? 0 : MSG_MORE); bvec 2011 drivers/block/drbd/drbd_receiver.c struct bio_vec bvec; bvec 2034 drivers/block/drbd/drbd_receiver.c bio_for_each_segment(bvec, bio, iter) { bvec 2035 drivers/block/drbd/drbd_receiver.c void *mapped = kmap(bvec.bv_page) + bvec.bv_offset; bvec 2036 drivers/block/drbd/drbd_receiver.c expect = min_t(int, data_size, bvec.bv_len); bvec 2038 drivers/block/drbd/drbd_receiver.c kunmap(bvec.bv_page); bvec 318 drivers/block/drbd/drbd_worker.c struct bio_vec bvec; bvec 325 drivers/block/drbd/drbd_worker.c bio_for_each_segment(bvec, bio, iter) { bvec 328 drivers/block/drbd/drbd_worker.c src = kmap_atomic(bvec.bv_page); bvec 329 drivers/block/drbd/drbd_worker.c crypto_shash_update(desc, src + bvec.bv_offset, bvec.bv_len); bvec 267 drivers/block/loop.c static int lo_write_bvec(struct file *file, struct bio_vec *bvec, loff_t *ppos) bvec 272 drivers/block/loop.c iov_iter_bvec(&i, WRITE, bvec, 1, bvec->bv_len); bvec 278 drivers/block/loop.c if (likely(bw == bvec->bv_len)) bvec 283 drivers/block/loop.c (unsigned long long)*ppos, bvec->bv_len); bvec 292 drivers/block/loop.c struct bio_vec bvec; bvec 296 drivers/block/loop.c rq_for_each_segment(bvec, rq, iter) { bvec 297 drivers/block/loop.c ret = lo_write_bvec(lo->lo_backing_file, &bvec, &pos); bvec 314 drivers/block/loop.c struct bio_vec bvec, b; bvec 323 drivers/block/loop.c rq_for_each_segment(bvec, rq, iter) { bvec 324 drivers/block/loop.c ret = lo_do_transfer(lo, WRITE, page, 0, bvec.bv_page, bvec 325 drivers/block/loop.c bvec.bv_offset, bvec.bv_len, pos >> 9); bvec 331 drivers/block/loop.c b.bv_len = bvec.bv_len; bvec 344 drivers/block/loop.c struct bio_vec bvec; bvec 349 drivers/block/loop.c rq_for_each_segment(bvec, rq, iter) { bvec 350 drivers/block/loop.c iov_iter_bvec(&i, READ, &bvec, 1, bvec.bv_len); bvec 355 drivers/block/loop.c flush_dcache_page(bvec.bv_page); bvec 357 drivers/block/loop.c if (len != bvec.bv_len) { bvec 373 drivers/block/loop.c struct bio_vec bvec, b; bvec 384 drivers/block/loop.c rq_for_each_segment(bvec, rq, iter) { bvec 389 drivers/block/loop.c b.bv_len = bvec.bv_len; bvec 398 drivers/block/loop.c ret = lo_do_transfer(lo, READ, page, 0, bvec.bv_page, bvec 399 drivers/block/loop.c bvec.bv_offset, len, offset >> 9); bvec 403 drivers/block/loop.c flush_dcache_page(bvec.bv_page); bvec 405 drivers/block/loop.c if (len != bvec.bv_len) { bvec 498 drivers/block/loop.c kfree(cmd->bvec); bvec 499 drivers/block/loop.c cmd->bvec = NULL; bvec 518 drivers/block/loop.c struct bio_vec *bvec; bvec 532 drivers/block/loop.c bvec = kmalloc_array(nr_bvec, sizeof(struct bio_vec), bvec 534 drivers/block/loop.c if (!bvec) bvec 536 drivers/block/loop.c cmd->bvec = bvec; bvec 545 drivers/block/loop.c *bvec = tmp; bvec 546 drivers/block/loop.c bvec++; bvec 548 drivers/block/loop.c bvec = cmd->bvec; bvec 557 drivers/block/loop.c bvec = __bvec_iter_bvec(bio->bi_io_vec, bio->bi_iter); bvec 561 drivers/block/loop.c iov_iter_bvec(&iter, rw, bvec, nr_bvec, blk_rq_bytes(rq)); bvec 73 drivers/block/loop.h struct bio_vec *bvec; bvec 609 drivers/block/nbd.c struct bio_vec bvec; bvec 611 drivers/block/nbd.c bio_for_each_segment(bvec, bio, iter) { bvec 612 drivers/block/nbd.c bool is_last = !next && bio_iter_last(bvec, iter); bvec 616 drivers/block/nbd.c req, bvec.bv_len); bvec 617 drivers/block/nbd.c iov_iter_bvec(&from, WRITE, &bvec, 1, bvec.bv_len); bvec 735 drivers/block/nbd.c struct bio_vec bvec; bvec 737 drivers/block/nbd.c rq_for_each_segment(bvec, req, iter) { bvec 738 drivers/block/nbd.c iov_iter_bvec(&to, READ, &bvec, 1, bvec.bv_len); bvec 759 drivers/block/nbd.c req, bvec.bv_len); bvec 1061 drivers/block/null_blk_main.c struct bio_vec bvec; bvec 1071 drivers/block/null_blk_main.c rq_for_each_segment(bvec, rq, iter) { bvec 1072 drivers/block/null_blk_main.c len = bvec.bv_len; bvec 1073 drivers/block/null_blk_main.c err = null_transfer(nullb, bvec.bv_page, len, bvec.bv_offset, bvec 1094 drivers/block/null_blk_main.c struct bio_vec bvec; bvec 1106 drivers/block/null_blk_main.c bio_for_each_segment(bvec, bio, iter) { bvec 1107 drivers/block/null_blk_main.c len = bvec.bv_len; bvec 1108 drivers/block/null_blk_main.c err = null_transfer(nullb, bvec.bv_page, len, bvec.bv_offset, bvec 86 drivers/block/ps3disk.c struct bio_vec bvec; bvec 91 drivers/block/ps3disk.c rq_for_each_segment(bvec, req, iter) { bvec 97 drivers/block/ps3disk.c size = bvec.bv_len; bvec 98 drivers/block/ps3disk.c buf = bvec_kmap_irq(&bvec, &flags); bvec 104 drivers/block/ps3disk.c flush_kernel_dcache_page(bvec.bv_page); bvec 545 drivers/block/ps3vram.c struct bio_vec bvec; bvec 549 drivers/block/ps3vram.c bio_for_each_segment(bvec, bio, iter) { bvec 551 drivers/block/ps3vram.c char *ptr = page_address(bvec.bv_page) + bvec.bv_offset; bvec 552 drivers/block/ps3vram.c size_t len = bvec.bv_len, retlen; bvec 673 drivers/block/rsxx/dma.c struct bio_vec bvec; bvec 712 drivers/block/rsxx/dma.c bio_for_each_segment(bvec, bio, iter) { bvec 713 drivers/block/rsxx/dma.c bv_len = bvec.bv_len; bvec 714 drivers/block/rsxx/dma.c bv_off = bvec.bv_offset; bvec 726 drivers/block/rsxx/dma.c laddr, bvec.bv_page, bvec 55 drivers/block/zram/zram_drv.c static int zram_bvec_read(struct zram *zram, struct bio_vec *bvec, bvec 145 drivers/block/zram/zram_drv.c static inline bool is_partial_io(struct bio_vec *bvec) bvec 147 drivers/block/zram/zram_drv.c return bvec->bv_len != PAGE_SIZE; bvec 150 drivers/block/zram/zram_drv.c static inline bool is_partial_io(struct bio_vec *bvec) bvec 180 drivers/block/zram/zram_drv.c static void update_position(u32 *index, int *offset, struct bio_vec *bvec) bvec 182 drivers/block/zram/zram_drv.c *index += (*offset + bvec->bv_len) / PAGE_SIZE; bvec 183 drivers/block/zram/zram_drv.c *offset = (*offset + bvec->bv_len) % PAGE_SIZE; bvec 589 drivers/block/zram/zram_drv.c static int read_from_bdev_async(struct zram *zram, struct bio_vec *bvec, bvec 600 drivers/block/zram/zram_drv.c if (!bio_add_page(bio, bvec->bv_page, bvec->bv_len, bvec->bv_offset)) { bvec 658 drivers/block/zram/zram_drv.c struct bio_vec bvec; bvec 660 drivers/block/zram/zram_drv.c bvec.bv_page = page; bvec 661 drivers/block/zram/zram_drv.c bvec.bv_len = PAGE_SIZE; bvec 662 drivers/block/zram/zram_drv.c bvec.bv_offset = 0; bvec 703 drivers/block/zram/zram_drv.c if (zram_bvec_read(zram, &bvec, index, 0, NULL)) { bvec 716 drivers/block/zram/zram_drv.c bio_add_page(&bio, bvec.bv_page, bvec.bv_len, bvec 717 drivers/block/zram/zram_drv.c bvec.bv_offset); bvec 777 drivers/block/zram/zram_drv.c struct bio_vec bvec; bvec 788 drivers/block/zram/zram_drv.c read_from_bdev_async(zram, &zw->bvec, entry, bio); bvec 796 drivers/block/zram/zram_drv.c static int read_from_bdev_sync(struct zram *zram, struct bio_vec *bvec, bvec 801 drivers/block/zram/zram_drv.c work.bvec = *bvec; bvec 814 drivers/block/zram/zram_drv.c static int read_from_bdev_sync(struct zram *zram, struct bio_vec *bvec, bvec 822 drivers/block/zram/zram_drv.c static int read_from_bdev(struct zram *zram, struct bio_vec *bvec, bvec 827 drivers/block/zram/zram_drv.c return read_from_bdev_sync(zram, bvec, entry, parent); bvec 829 drivers/block/zram/zram_drv.c return read_from_bdev_async(zram, bvec, entry, parent); bvec 833 drivers/block/zram/zram_drv.c static int read_from_bdev(struct zram *zram, struct bio_vec *bvec, bvec 1222 drivers/block/zram/zram_drv.c struct bio_vec bvec; bvec 1226 drivers/block/zram/zram_drv.c bvec.bv_page = page; bvec 1227 drivers/block/zram/zram_drv.c bvec.bv_len = PAGE_SIZE; bvec 1228 drivers/block/zram/zram_drv.c bvec.bv_offset = 0; bvec 1229 drivers/block/zram/zram_drv.c return read_from_bdev(zram, &bvec, bvec 1273 drivers/block/zram/zram_drv.c static int zram_bvec_read(struct zram *zram, struct bio_vec *bvec, bvec 1279 drivers/block/zram/zram_drv.c page = bvec->bv_page; bvec 1280 drivers/block/zram/zram_drv.c if (is_partial_io(bvec)) { bvec 1287 drivers/block/zram/zram_drv.c ret = __zram_bvec_read(zram, page, index, bio, is_partial_io(bvec)); bvec 1291 drivers/block/zram/zram_drv.c if (is_partial_io(bvec)) { bvec 1292 drivers/block/zram/zram_drv.c void *dst = kmap_atomic(bvec->bv_page); bvec 1295 drivers/block/zram/zram_drv.c memcpy(dst + bvec->bv_offset, src + offset, bvec->bv_len); bvec 1300 drivers/block/zram/zram_drv.c if (is_partial_io(bvec)) bvec 1306 drivers/block/zram/zram_drv.c static int __zram_bvec_write(struct zram *zram, struct bio_vec *bvec, bvec 1315 drivers/block/zram/zram_drv.c struct page *page = bvec->bv_page; bvec 1422 drivers/block/zram/zram_drv.c static int zram_bvec_write(struct zram *zram, struct bio_vec *bvec, bvec 1430 drivers/block/zram/zram_drv.c vec = *bvec; bvec 1431 drivers/block/zram/zram_drv.c if (is_partial_io(bvec)) { bvec 1445 drivers/block/zram/zram_drv.c src = kmap_atomic(bvec->bv_page); bvec 1447 drivers/block/zram/zram_drv.c memcpy(dst + offset, src + bvec->bv_offset, bvec->bv_len); bvec 1458 drivers/block/zram/zram_drv.c if (is_partial_io(bvec)) bvec 1506 drivers/block/zram/zram_drv.c static int zram_bvec_rw(struct zram *zram, struct bio_vec *bvec, u32 index, bvec 1513 drivers/block/zram/zram_drv.c generic_start_io_acct(q, op, bvec->bv_len >> SECTOR_SHIFT, bvec 1518 drivers/block/zram/zram_drv.c ret = zram_bvec_read(zram, bvec, index, offset, bio); bvec 1519 drivers/block/zram/zram_drv.c flush_dcache_page(bvec->bv_page); bvec 1522 drivers/block/zram/zram_drv.c ret = zram_bvec_write(zram, bvec, index, offset, bio); bvec 1545 drivers/block/zram/zram_drv.c struct bio_vec bvec; bvec 1562 drivers/block/zram/zram_drv.c bio_for_each_segment(bvec, bio, iter) { bvec 1563 drivers/block/zram/zram_drv.c struct bio_vec bv = bvec; bvec 1564 drivers/block/zram/zram_drv.c unsigned int unwritten = bvec.bv_len; bvec 293 drivers/md/dm-flakey.c struct bio_vec bvec; bvec 302 drivers/md/dm-flakey.c bio_for_each_segment(bvec, bio, iter) { bvec 211 drivers/md/dm-io.c struct bio_vec bvec = bvec_iter_bvec((struct bio_vec *)dp->context_ptr, bvec 214 drivers/md/dm-io.c *p = bvec.bv_page; bvec 215 drivers/md/dm-io.c *len = bvec.bv_len; bvec 216 drivers/md/dm-io.c *offset = bvec.bv_offset; bvec 219 drivers/md/dm-io.c dp->context_bi.bi_sector = (sector_t)bvec.bv_len; bvec 171 drivers/nvdimm/blk.c struct bio_vec bvec; bvec 182 drivers/nvdimm/blk.c bio_for_each_segment(bvec, bio, iter) { bvec 183 drivers/nvdimm/blk.c unsigned int len = bvec.bv_len; bvec 186 drivers/nvdimm/blk.c err = nsblk_do_bvec(nsblk, bip, bvec.bv_page, len, bvec 187 drivers/nvdimm/blk.c bvec.bv_offset, rw, iter.bi_sector); bvec 1448 drivers/nvdimm/btt.c struct bio_vec bvec; bvec 1456 drivers/nvdimm/btt.c bio_for_each_segment(bvec, bio, iter) { bvec 1457 drivers/nvdimm/btt.c unsigned int len = bvec.bv_len; bvec 1467 drivers/nvdimm/btt.c err = btt_do_bvec(btt, bip, bvec.bv_page, len, bvec.bv_offset, bvec 191 drivers/nvdimm/pmem.c struct bio_vec bvec; bvec 200 drivers/nvdimm/pmem.c bio_for_each_segment(bvec, bio, iter) { bvec 201 drivers/nvdimm/pmem.c rc = pmem_do_bvec(pmem, bvec.bv_page, bvec.bv_len, bvec 202 drivers/nvdimm/pmem.c bvec.bv_offset, bio_op(bio), iter.bi_sector); bvec 179 drivers/nvme/host/tcp.c return req->iter.bvec->bv_page; bvec 184 drivers/nvme/host/tcp.c return req->iter.bvec->bv_offset + req->iter.iov_offset; bvec 189 drivers/nvme/host/tcp.c return min_t(size_t, req->iter.bvec->bv_len - req->iter.iov_offset, bvec 108 drivers/nvme/target/io-cmd-file.c iov_iter_bvec(&iter, rw, req->f.bvec, nr_segs, count); bvec 122 drivers/nvme/target/io-cmd-file.c if (req->f.bvec != req->inline_bvec) { bvec 124 drivers/nvme/target/io-cmd-file.c kfree(req->f.bvec); bvec 126 drivers/nvme/target/io-cmd-file.c mempool_free(req->f.bvec, req->ns->bvec_pool); bvec 156 drivers/nvme/target/io-cmd-file.c nvmet_file_init_bvec(&req->f.bvec[bv_cnt], sg); bvec 157 drivers/nvme/target/io-cmd-file.c len += req->f.bvec[bv_cnt].bv_len; bvec 158 drivers/nvme/target/io-cmd-file.c total_len += req->f.bvec[bv_cnt].bv_len; bvec 241 drivers/nvme/target/io-cmd-file.c req->f.bvec = kmalloc_array(nr_bvec, sizeof(struct bio_vec), bvec 244 drivers/nvme/target/io-cmd-file.c req->f.bvec = req->inline_bvec; bvec 246 drivers/nvme/target/io-cmd-file.c if (unlikely(!req->f.bvec)) { bvec 248 drivers/nvme/target/io-cmd-file.c req->f.bvec = mempool_alloc(req->ns->bvec_pool, GFP_KERNEL); bvec 302 drivers/nvme/target/nvmet.h struct bio_vec *bvec; bvec 858 drivers/s390/block/dcssblk.c struct bio_vec bvec; bvec 895 drivers/s390/block/dcssblk.c bio_for_each_segment(bvec, bio, iter) { bvec 897 drivers/s390/block/dcssblk.c page_address(bvec.bv_page) + bvec.bv_offset; bvec 899 drivers/s390/block/dcssblk.c if (unlikely((page_addr & 4095) != 0) || (bvec.bv_len & 4095) != 0) bvec 904 drivers/s390/block/dcssblk.c bvec.bv_len); bvec 907 drivers/s390/block/dcssblk.c bvec.bv_len); bvec 909 drivers/s390/block/dcssblk.c bytes_done += bvec.bv_len; bvec 188 drivers/s390/block/xpram.c struct bio_vec bvec; bvec 206 drivers/s390/block/xpram.c bio_for_each_segment(bvec, bio, iter) { bvec 208 drivers/s390/block/xpram.c kmap(bvec.bv_page) + bvec.bv_offset; bvec 209 drivers/s390/block/xpram.c bytes = bvec.bv_len; bvec 271 drivers/target/target_core_file.c struct bio_vec *bvec; bvec 279 drivers/target/target_core_file.c bvec = kcalloc(sgl_nents, sizeof(struct bio_vec), GFP_KERNEL); bvec 280 drivers/target/target_core_file.c if (!bvec) { bvec 286 drivers/target/target_core_file.c bvec[i].bv_page = sg_page(sg); bvec 287 drivers/target/target_core_file.c bvec[i].bv_len = sg->length; bvec 288 drivers/target/target_core_file.c bvec[i].bv_offset = sg->offset; bvec 293 drivers/target/target_core_file.c iov_iter_bvec(&iter, is_write, bvec, sgl_nents, len); bvec 310 drivers/target/target_core_file.c kfree(bvec); bvec 324 drivers/target/target_core_file.c struct bio_vec *bvec; bvec 329 drivers/target/target_core_file.c bvec = kcalloc(sgl_nents, sizeof(struct bio_vec), GFP_KERNEL); bvec 330 drivers/target/target_core_file.c if (!bvec) { bvec 336 drivers/target/target_core_file.c bvec[i].bv_page = sg_page(sg); bvec 337 drivers/target/target_core_file.c bvec[i].bv_len = sg->length; bvec 338 drivers/target/target_core_file.c bvec[i].bv_offset = sg->offset; bvec 343 drivers/target/target_core_file.c iov_iter_bvec(&iter, READ, bvec, sgl_nents, len); bvec 387 drivers/target/target_core_file.c kfree(bvec); bvec 444 drivers/target/target_core_file.c struct bio_vec *bvec; bvec 468 drivers/target/target_core_file.c bvec = kcalloc(nolb, sizeof(struct bio_vec), GFP_KERNEL); bvec 469 drivers/target/target_core_file.c if (!bvec) bvec 473 drivers/target/target_core_file.c bvec[i].bv_page = sg_page(&cmd->t_data_sg[0]); bvec 474 drivers/target/target_core_file.c bvec[i].bv_len = cmd->t_data_sg[0].length; bvec 475 drivers/target/target_core_file.c bvec[i].bv_offset = cmd->t_data_sg[0].offset; bvec 480 drivers/target/target_core_file.c iov_iter_bvec(&iter, READ, bvec, nolb, len); bvec 483 drivers/target/target_core_file.c kfree(bvec); bvec 42 fs/9p/vfs_addr.c struct bio_vec bvec = {.bv_page = page, .bv_len = PAGE_SIZE}; bvec 54 fs/9p/vfs_addr.c iov_iter_bvec(&to, READ, &bvec, 1, PAGE_SIZE); bvec 154 fs/9p/vfs_addr.c struct bio_vec bvec; bvec 162 fs/9p/vfs_addr.c bvec.bv_page = page; bvec 163 fs/9p/vfs_addr.c bvec.bv_offset = 0; bvec 164 fs/9p/vfs_addr.c bvec.bv_len = len; bvec 165 fs/9p/vfs_addr.c iov_iter_bvec(&from, WRITE, &bvec, 1, len); bvec 368 fs/afs/fsclient.c call->bvec[0].bv_len = size; bvec 369 fs/afs/fsclient.c call->bvec[0].bv_offset = req->offset; bvec 370 fs/afs/fsclient.c call->bvec[0].bv_page = req->pages[req->index]; bvec 371 fs/afs/fsclient.c iov_iter_bvec(&call->iter, READ, call->bvec, 1, size); bvec 383 fs/afs/fsclient.c req->remain -= call->bvec[0].bv_len; bvec 384 fs/afs/fsclient.c req->offset += call->bvec[0].bv_len; bvec 122 fs/afs/internal.h struct bio_vec bvec[1]; bvec 480 fs/afs/yfsclient.c call->bvec[0].bv_len = size; bvec 481 fs/afs/yfsclient.c call->bvec[0].bv_offset = req->offset; bvec 482 fs/afs/yfsclient.c call->bvec[0].bv_page = req->pages[req->index]; bvec 483 fs/afs/yfsclient.c iov_iter_bvec(&call->iter, READ, call->bvec, 1, size); bvec 495 fs/afs/yfsclient.c req->remain -= call->bvec[0].bv_len; bvec 496 fs/afs/yfsclient.c req->offset += call->bvec[0].bv_len; bvec 2807 fs/btrfs/check-integrity.c struct bio_vec bvec; bvec 2828 fs/btrfs/check-integrity.c bio_for_each_segment(bvec, bio, iter) { bvec 2829 fs/btrfs/check-integrity.c BUG_ON(bvec.bv_len != PAGE_SIZE); bvec 2830 fs/btrfs/check-integrity.c mapped_datav[i] = kmap(bvec.bv_page); bvec 2836 fs/btrfs/check-integrity.c i, cur_bytenr, bvec.bv_len, bvec.bv_offset); bvec 2837 fs/btrfs/check-integrity.c cur_bytenr += bvec.bv_len; bvec 2843 fs/btrfs/check-integrity.c bio_for_each_segment(bvec, bio, iter) bvec 2844 fs/btrfs/check-integrity.c kunmap(bvec.bv_page); bvec 186 fs/btrfs/compression.c struct bio_vec *bvec; bvec 194 fs/btrfs/compression.c bio_for_each_segment_all(bvec, cb->orig_bio, iter_all) bvec 195 fs/btrfs/compression.c SetPageChecked(bvec->bv_page); bvec 1134 fs/btrfs/compression.c struct bio_vec bvec = bio_iter_iovec(bio, bio->bi_iter); bvec 1140 fs/btrfs/compression.c start_byte = page_offset(bvec.bv_page) - disk_start; bvec 1160 fs/btrfs/compression.c bytes = min_t(unsigned long, bvec.bv_len, bvec 1164 fs/btrfs/compression.c kaddr = kmap_atomic(bvec.bv_page); bvec 1165 fs/btrfs/compression.c memcpy(kaddr + bvec.bv_offset, buf + buf_offset, bytes); bvec 1167 fs/btrfs/compression.c flush_dcache_page(bvec.bv_page); bvec 1177 fs/btrfs/compression.c bvec = bio_iter_iovec(bio, bio->bi_iter); bvec 1179 fs/btrfs/compression.c start_byte = page_offset(bvec.bv_page) - disk_start; bvec 842 fs/btrfs/disk-io.c struct bio_vec *bvec; bvec 848 fs/btrfs/disk-io.c bio_for_each_segment_all(bvec, bio, iter_all) { bvec 849 fs/btrfs/disk-io.c root = BTRFS_I(bvec->bv_page->mapping->host)->root; bvec 850 fs/btrfs/disk-io.c ret = csum_dirty_buffer(root->fs_info, bvec->bv_page); bvec 2647 fs/btrfs/extent_io.c struct bio_vec *bvec; bvec 2653 fs/btrfs/extent_io.c bio_for_each_segment_all(bvec, bio, iter_all) { bvec 2654 fs/btrfs/extent_io.c struct page *page = bvec->bv_page; bvec 2663 fs/btrfs/extent_io.c if (bvec->bv_offset || bvec->bv_len != PAGE_SIZE) { bvec 2664 fs/btrfs/extent_io.c if (bvec->bv_offset + bvec->bv_len != PAGE_SIZE) bvec 2667 fs/btrfs/extent_io.c bvec->bv_offset, bvec->bv_len); bvec 2671 fs/btrfs/extent_io.c bvec->bv_offset, bvec->bv_len); bvec 2675 fs/btrfs/extent_io.c end = start + bvec->bv_offset + bvec->bv_len - 1; bvec 2709 fs/btrfs/extent_io.c struct bio_vec *bvec; bvec 2724 fs/btrfs/extent_io.c bio_for_each_segment_all(bvec, bio, iter_all) { bvec 2725 fs/btrfs/extent_io.c struct page *page = bvec->bv_page; bvec 2743 fs/btrfs/extent_io.c if (bvec->bv_offset || bvec->bv_len != PAGE_SIZE) { bvec 2744 fs/btrfs/extent_io.c if (bvec->bv_offset + bvec->bv_len != PAGE_SIZE) bvec 2747 fs/btrfs/extent_io.c bvec->bv_offset, bvec->bv_len); bvec 2751 fs/btrfs/extent_io.c bvec->bv_offset, bvec->bv_len); bvec 2755 fs/btrfs/extent_io.c end = start + bvec->bv_offset + bvec->bv_len - 1; bvec 2756 fs/btrfs/extent_io.c len = bvec->bv_len; bvec 3821 fs/btrfs/extent_io.c struct bio_vec *bvec; bvec 3827 fs/btrfs/extent_io.c bio_for_each_segment_all(bvec, bio, iter_all) { bvec 3828 fs/btrfs/extent_io.c struct page *page = bvec->bv_page; bvec 155 fs/btrfs/file-item.c struct bio_vec bvec; bvec 211 fs/btrfs/file-item.c bio_for_each_segment(bvec, bio, iter) { bvec 212 fs/btrfs/file-item.c page_bytes_left = bvec.bv_len; bvec 217 fs/btrfs/file-item.c offset = page_offset(bvec.bv_page) + bvec.bv_offset; bvec 442 fs/btrfs/file-item.c struct bio_vec bvec; bvec 473 fs/btrfs/file-item.c bio_for_each_segment(bvec, bio, iter) { bvec 475 fs/btrfs/file-item.c offset = page_offset(bvec.bv_page) + bvec.bv_offset; bvec 483 fs/btrfs/file-item.c bvec.bv_len + fs_info->sectorsize bvec 513 fs/btrfs/file-item.c data = kmap_atomic(bvec.bv_page); bvec 514 fs/btrfs/file-item.c crypto_shash_update(shash, data + bvec.bv_offset bvec 8023 fs/btrfs/inode.c struct bio_vec bvec; bvec 8039 fs/btrfs/inode.c bio_get_first_bvec(failed_bio, &bvec); bvec 8041 fs/btrfs/inode.c (bvec.bv_len > btrfs_inode_sectorsize(inode))) bvec 8074 fs/btrfs/inode.c struct bio_vec *bvec; bvec 8088 fs/btrfs/inode.c bio_for_each_segment_all(bvec, bio, iter_all) bvec 8090 fs/btrfs/inode.c io_tree, done->start, bvec->bv_page, bvec 8101 fs/btrfs/inode.c struct bio_vec bvec; bvec 8118 fs/btrfs/inode.c bio_for_each_segment(bvec, &io_bio->bio, iter) { bvec 8119 fs/btrfs/inode.c nr_sectors = BTRFS_BYTES_TO_BLKS(fs_info, bvec.bv_len); bvec 8120 fs/btrfs/inode.c pgoff = bvec.bv_offset; bvec 8127 fs/btrfs/inode.c ret = dio_read_error(inode, &io_bio->bio, bvec.bv_page, bvec 8163 fs/btrfs/inode.c struct bio_vec *bvec; bvec 8181 fs/btrfs/inode.c bio_for_each_segment_all(bvec, bio, iter_all) { bvec 8182 fs/btrfs/inode.c ret = __readpage_endio_check(inode, io_bio, i, bvec->bv_page, bvec 8183 fs/btrfs/inode.c bvec->bv_offset, done->start, bvec 8184 fs/btrfs/inode.c bvec->bv_len); bvec 8188 fs/btrfs/inode.c bvec->bv_page, bvec 8190 fs/btrfs/inode.c bvec->bv_offset); bvec 8206 fs/btrfs/inode.c struct bio_vec bvec; bvec 8227 fs/btrfs/inode.c bio_for_each_segment(bvec, &io_bio->bio, iter) { bvec 8228 fs/btrfs/inode.c nr_sectors = BTRFS_BYTES_TO_BLKS(fs_info, bvec.bv_len); bvec 8230 fs/btrfs/inode.c pgoff = bvec.bv_offset; bvec 8235 fs/btrfs/inode.c bvec.bv_page, pgoff, start, sectorsize); bvec 8244 fs/btrfs/inode.c status = dio_read_error(inode, &io_bio->bio, bvec.bv_page, bvec 1167 fs/btrfs/raid56.c struct bio_vec bvec; bvec 1178 fs/btrfs/raid56.c bio_for_each_segment(bvec, bio, iter) { bvec 1179 fs/btrfs/raid56.c rbio->bio_pages[page_index + i] = bvec.bv_page; bvec 1460 fs/btrfs/raid56.c struct bio_vec *bvec; bvec 1465 fs/btrfs/raid56.c bio_for_each_segment_all(bvec, bio, iter_all) bvec 1466 fs/btrfs/raid56.c SetPageUptodate(bvec->bv_page); bvec 3924 fs/cifs/smb2ops.c struct bio_vec *bvec; bvec 3927 fs/cifs/smb2ops.c bvec = kcalloc(npages, sizeof(struct bio_vec), GFP_KERNEL); bvec 3928 fs/cifs/smb2ops.c if (!bvec) bvec 3932 fs/cifs/smb2ops.c bvec[i].bv_page = pages[i]; bvec 3933 fs/cifs/smb2ops.c bvec[i].bv_offset = (i == 0) ? cur_off : 0; bvec 3934 fs/cifs/smb2ops.c bvec[i].bv_len = min_t(unsigned int, PAGE_SIZE, data_size); bvec 3935 fs/cifs/smb2ops.c data_size -= bvec[i].bv_len; bvec 3940 fs/cifs/smb2ops.c kfree(bvec); bvec 3944 fs/cifs/smb2ops.c *page_vec = bvec; bvec 3960 fs/cifs/smb2ops.c struct bio_vec *bvec = NULL; bvec 4050 fs/cifs/smb2ops.c cur_off, &bvec); bvec 4056 fs/cifs/smb2ops.c iov_iter_bvec(&iter, WRITE, bvec, npages, data_len); bvec 4073 fs/cifs/smb2ops.c kfree(bvec); bvec 2044 fs/cifs/smbdirect.c page = msg->msg_iter.bvec->bv_page; bvec 2045 fs/cifs/smbdirect.c page_offset = msg->msg_iter.bvec->bv_offset; bvec 2046 fs/cifs/smbdirect.c to_read = msg->msg_iter.bvec->bv_len; bvec 402 fs/cifs/transport.c struct bio_vec bvec; bvec 404 fs/cifs/transport.c bvec.bv_page = rqst[j].rq_pages[i]; bvec 405 fs/cifs/transport.c rqst_page_get_length(&rqst[j], i, &bvec.bv_len, bvec 406 fs/cifs/transport.c &bvec.bv_offset); bvec 409 fs/cifs/transport.c &bvec, 1, bvec.bv_len); bvec 14 fs/erofs/data.c struct bio_vec *bvec; bvec 18 fs/erofs/data.c bio_for_each_segment_all(bvec, bio, iter_all) { bvec 19 fs/erofs/data.c struct page *page = bvec->bv_page; bvec 725 fs/erofs/zdata.c struct bio_vec *bvec; bvec 728 fs/erofs/zdata.c bio_for_each_segment_all(bvec, bio, iter_all) { bvec 729 fs/erofs/zdata.c struct page *page = bvec->bv_page; bvec 64 fs/ext4/page-io.c struct bio_vec *bvec; bvec 67 fs/ext4/page-io.c bio_for_each_segment_all(bvec, bio, iter_all) { bvec 68 fs/ext4/page-io.c struct page *page = bvec->bv_page; bvec 71 fs/ext4/page-io.c unsigned bio_start = bvec->bv_offset; bvec 72 fs/ext4/page-io.c unsigned bio_end = bio_start + bvec->bv_len; bvec 190 fs/f2fs/data.c struct bio_vec *bvec; bvec 198 fs/f2fs/data.c bio_for_each_segment_all(bvec, bio, iter_all) { bvec 199 fs/f2fs/data.c struct page *page = bvec->bv_page; bvec 380 fs/f2fs/data.c struct bio_vec *bvec; bvec 390 fs/f2fs/data.c bio_for_each_segment_all(bvec, bio, iter_all) { bvec 392 fs/f2fs/data.c target = bvec->bv_page; bvec 171 fs/gfs2/lops.c struct bio_vec *bvec, bvec 175 fs/gfs2/lops.c struct page *page = bvec->bv_page; bvec 179 fs/gfs2/lops.c size = bvec->bv_len; bvec 180 fs/gfs2/lops.c while (bh_offset(bh) < bvec->bv_offset) bvec 206 fs/gfs2/lops.c struct bio_vec *bvec; bvec 216 fs/gfs2/lops.c bio_for_each_segment_all(bvec, bio, iter_all) { bvec 217 fs/gfs2/lops.c page = bvec->bv_page; bvec 219 fs/gfs2/lops.c gfs2_end_log_write_bh(sdp, bvec, bio->bi_status); bvec 387 fs/gfs2/lops.c struct bio_vec *bvec; bvec 390 fs/gfs2/lops.c bio_for_each_segment_all(bvec, bio, iter_all) { bvec 391 fs/gfs2/lops.c page = bvec->bv_page; bvec 188 fs/gfs2/meta_io.c struct bio_vec *bvec; bvec 191 fs/gfs2/meta_io.c bio_for_each_segment_all(bvec, bio, iter_all) { bvec 192 fs/gfs2/meta_io.c struct page *page = bvec->bv_page; bvec 194 fs/gfs2/meta_io.c unsigned int len = bvec->bv_len; bvec 196 fs/gfs2/meta_io.c while (bh_offset(bh) < bvec->bv_offset) bvec 162 fs/io_uring.c struct bio_vec *bvec; bvec 1199 fs/io_uring.c iov_iter_bvec(iter, rw, imu->bvec, imu->nr_bvecs, offset + len); bvec 1218 fs/io_uring.c const struct bio_vec *bvec = imu->bvec; bvec 1220 fs/io_uring.c if (offset <= bvec->bv_len) { bvec 1226 fs/io_uring.c offset -= bvec->bv_len; bvec 1229 fs/io_uring.c iter->bvec = bvec + seg_skip; bvec 1231 fs/io_uring.c iter->count -= bvec->bv_len + offset; bvec 1362 fs/io_uring.c iovec.iov_base = kmap(iter->bvec->bv_page) bvec 1365 fs/io_uring.c iter->bvec->bv_len - iter->iov_offset); bvec 1377 fs/io_uring.c kunmap(iter->bvec->bv_page); bvec 3407 fs/io_uring.c put_user_page(imu->bvec[j].bv_page); bvec 3411 fs/io_uring.c kvfree(imu->bvec); bvec 3517 fs/io_uring.c imu->bvec = kvmalloc_array(nr_pages, sizeof(struct bio_vec), bvec 3520 fs/io_uring.c if (!imu->bvec) { bvec 3555 fs/io_uring.c kvfree(imu->bvec); bvec 3565 fs/io_uring.c imu->bvec[j].bv_page = pages[j]; bvec 3566 fs/io_uring.c imu->bvec[j].bv_len = vec_len; bvec 3567 fs/io_uring.c imu->bvec[j].bv_offset = off; bvec 151 fs/iomap/buffered-io.c iomap_read_page_end_io(struct bio_vec *bvec, int error) bvec 153 fs/iomap/buffered-io.c struct page *page = bvec->bv_page; bvec 160 fs/iomap/buffered-io.c iomap_set_range_uptodate(page, bvec->bv_offset, bvec->bv_len); bvec 170 fs/iomap/buffered-io.c struct bio_vec *bvec; bvec 173 fs/iomap/buffered-io.c bio_for_each_segment_all(bvec, bio, iter_all) bvec 174 fs/iomap/buffered-io.c iomap_read_page_end_io(bvec, error); bvec 532 fs/iomap/buffered-io.c struct bio_vec bvec; bvec 541 fs/iomap/buffered-io.c bio_init(&bio, &bvec, 1); bvec 61 fs/xfs/xfs_aops.c struct bio_vec *bvec, bvec 64 fs/xfs/xfs_aops.c struct iomap_page *iop = to_iomap_page(bvec->bv_page); bvec 67 fs/xfs/xfs_aops.c SetPageError(bvec->bv_page); bvec 75 fs/xfs/xfs_aops.c end_page_writeback(bvec->bv_page); bvec 95 fs/xfs/xfs_aops.c struct bio_vec *bvec; bvec 108 fs/xfs/xfs_aops.c bio_for_each_segment_all(bvec, bio, iter_all) bvec 109 fs/xfs/xfs_aops.c xfs_finish_page_writeback(inode, bvec, error); bvec 172 include/linux/bio.h #define bio_iter_last(bvec, iter) ((iter).bi_size == (bvec).bv_len) bvec 532 include/linux/bio.h static inline char *bvec_kmap_irq(struct bio_vec *bvec, unsigned long *flags) bvec 541 include/linux/bio.h addr = (unsigned long) kmap_atomic(bvec->bv_page); bvec 545 include/linux/bio.h return (char *) addr + bvec->bv_offset; bvec 557 include/linux/bio.h static inline char *bvec_kmap_irq(struct bio_vec *bvec, unsigned long *flags) bvec 559 include/linux/bio.h return page_address(bvec->bv_page) + bvec->bv_offset; bvec 833 include/linux/blkdev.h #define rq_iter_last(bvec, _iter) \ bvec 835 include/linux/blkdev.h bio_iter_last(bvec, _iter.iter)) bvec 45 include/linux/bvec.h #define __bvec_iter_bvec(bvec, iter) (&(bvec)[(iter).bi_idx]) bvec 48 include/linux/bvec.h #define mp_bvec_iter_page(bvec, iter) \ bvec 49 include/linux/bvec.h (__bvec_iter_bvec((bvec), (iter))->bv_page) bvec 51 include/linux/bvec.h #define mp_bvec_iter_len(bvec, iter) \ bvec 53 include/linux/bvec.h __bvec_iter_bvec((bvec), (iter))->bv_len - (iter).bi_bvec_done) bvec 55 include/linux/bvec.h #define mp_bvec_iter_offset(bvec, iter) \ bvec 56 include/linux/bvec.h (__bvec_iter_bvec((bvec), (iter))->bv_offset + (iter).bi_bvec_done) bvec 58 include/linux/bvec.h #define mp_bvec_iter_page_idx(bvec, iter) \ bvec 59 include/linux/bvec.h (mp_bvec_iter_offset((bvec), (iter)) / PAGE_SIZE) bvec 61 include/linux/bvec.h #define mp_bvec_iter_bvec(bvec, iter) \ bvec 63 include/linux/bvec.h .bv_page = mp_bvec_iter_page((bvec), (iter)), \ bvec 64 include/linux/bvec.h .bv_len = mp_bvec_iter_len((bvec), (iter)), \ bvec 65 include/linux/bvec.h .bv_offset = mp_bvec_iter_offset((bvec), (iter)), \ bvec 69 include/linux/bvec.h #define bvec_iter_offset(bvec, iter) \ bvec 70 include/linux/bvec.h (mp_bvec_iter_offset((bvec), (iter)) % PAGE_SIZE) bvec 72 include/linux/bvec.h #define bvec_iter_len(bvec, iter) \ bvec 73 include/linux/bvec.h min_t(unsigned, mp_bvec_iter_len((bvec), (iter)), \ bvec 74 include/linux/bvec.h PAGE_SIZE - bvec_iter_offset((bvec), (iter))) bvec 76 include/linux/bvec.h #define bvec_iter_page(bvec, iter) \ bvec 77 include/linux/bvec.h (mp_bvec_iter_page((bvec), (iter)) + \ bvec 78 include/linux/bvec.h mp_bvec_iter_page_idx((bvec), (iter))) bvec 80 include/linux/bvec.h #define bvec_iter_bvec(bvec, iter) \ bvec 82 include/linux/bvec.h .bv_page = bvec_iter_page((bvec), (iter)), \ bvec 83 include/linux/bvec.h .bv_len = bvec_iter_len((bvec), (iter)), \ bvec 84 include/linux/bvec.h .bv_offset = bvec_iter_offset((bvec), (iter)), \ bvec 136 include/linux/bvec.h static inline void bvec_advance(const struct bio_vec *bvec, bvec 145 include/linux/bvec.h bv->bv_page = bvec->bv_page + (bvec->bv_offset >> PAGE_SHIFT); bvec 146 include/linux/bvec.h bv->bv_offset = bvec->bv_offset & ~PAGE_MASK; bvec 149 include/linux/bvec.h bvec->bv_len - iter_all->done); bvec 152 include/linux/bvec.h if (iter_all->done == bvec->bv_len) { bvec 162 include/linux/bvec.h static inline void mp_bvec_last_segment(const struct bio_vec *bvec, bvec 165 include/linux/bvec.h unsigned total = bvec->bv_offset + bvec->bv_len; bvec 168 include/linux/bvec.h seg->bv_page = bvec->bv_page + last_page; bvec 171 include/linux/bvec.h if (bvec->bv_offset >= last_page * PAGE_SIZE) { bvec 172 include/linux/bvec.h seg->bv_offset = bvec->bv_offset % PAGE_SIZE; bvec 173 include/linux/bvec.h seg->bv_len = bvec->bv_len; bvec 56 include/linux/sunrpc/xdr.h struct bio_vec *bvec; bvec 42 include/linux/uio.h const struct bio_vec *bvec; bvec 220 include/linux/uio.h void iov_iter_bvec(struct iov_iter *i, unsigned int direction, const struct bio_vec *bvec, bvec 70 lib/iov_iter.c for_each_bvec(__v, i->bvec, __bi, __start) { \ bvec 103 lib/iov_iter.c const struct bio_vec *bvec = i->bvec; \ bvec 107 lib/iov_iter.c i->bvec = __bvec_iter_bvec(i->bvec, __bi); \ bvec 108 lib/iov_iter.c i->nr_segs -= i->bvec - bvec; \ bvec 1086 lib/iov_iter.c const struct bio_vec *bvec = i->bvec; bvec 1088 lib/iov_iter.c size_t n = (--bvec)->bv_len; bvec 1091 lib/iov_iter.c i->bvec = bvec; bvec 1125 lib/iov_iter.c return min(i->count, i->bvec->bv_len - i->iov_offset); bvec 1145 lib/iov_iter.c const struct bio_vec *bvec, unsigned long nr_segs, bvec 1150 lib/iov_iter.c i->bvec = bvec; bvec 1606 lib/iov_iter.c return new->bvec = kmemdup(new->bvec, bvec 529 net/ceph/messenger.c struct bio_vec bvec = { bvec 538 net/ceph/messenger.c iov_iter_bvec(&msg.msg_iter, READ, &bvec, 1, length); bvec 147 net/sunrpc/xdr.c if (n != 0 && buf->bvec == NULL) { bvec 148 net/sunrpc/xdr.c buf->bvec = kmalloc_array(n, sizeof(buf->bvec[0]), gfp); bvec 149 net/sunrpc/xdr.c if (!buf->bvec) bvec 152 net/sunrpc/xdr.c buf->bvec[i].bv_page = buf->pages[i]; bvec 153 net/sunrpc/xdr.c buf->bvec[i].bv_len = PAGE_SIZE; bvec 154 net/sunrpc/xdr.c buf->bvec[i].bv_offset = 0; bvec 163 net/sunrpc/xdr.c kfree(buf->bvec); bvec 164 net/sunrpc/xdr.c buf->bvec = NULL; bvec 1729 net/sunrpc/xprt.c req->rq_snd_buf.bvec = NULL; bvec 1730 net/sunrpc/xprt.c req->rq_rcv_buf.bvec = NULL; bvec 337 net/sunrpc/xprtsock.c buf->bvec[i].bv_page = buf->pages[i] = alloc_page(gfp); bvec 366 net/sunrpc/xprtsock.c struct bio_vec *bvec, unsigned long nr, size_t count, bvec 369 net/sunrpc/xprtsock.c iov_iter_bvec(&msg->msg_iter, READ, bvec, nr, count); bvec 383 net/sunrpc/xprtsock.c xs_flush_bvec(const struct bio_vec *bvec, size_t count, size_t seek) bvec 390 net/sunrpc/xprtsock.c bvec_iter_advance(bvec, &bi, seek & PAGE_MASK); bvec 391 net/sunrpc/xprtsock.c for_each_bvec(bv, bvec, bi, bi) bvec 396 net/sunrpc/xprtsock.c xs_flush_bvec(const struct bio_vec *bvec, size_t count, size_t seek) bvec 428 net/sunrpc/xprtsock.c ret = xs_read_bvec(sock, msg, flags, buf->bvec, bvec 434 net/sunrpc/xprtsock.c xs_flush_bvec(buf->bvec, ret, seek + buf->page_base); bvec 773 net/sunrpc/xprtsock.c iov_iter_bvec(&msg->msg_iter, WRITE, xdr->bvec,