/linux-4.4.14/block/ |
D | bio.c | 715 struct bio_vec *bvec; in bio_add_pc_page() local 756 bvec = &bio->bi_io_vec[bio->bi_vcnt]; in bio_add_pc_page() 757 bvec->bv_page = page; in bio_add_pc_page() 758 bvec->bv_len = len; in bio_add_pc_page() 759 bvec->bv_offset = offset; in bio_add_pc_page() 779 if (bio->bi_vcnt > 1 && (BIOVEC_PHYS_MERGEABLE(bvec-1, bvec))) in bio_add_pc_page() 786 bvec->bv_page = NULL; in bio_add_pc_page() 787 bvec->bv_len = 0; in bio_add_pc_page() 788 bvec->bv_offset = 0; in bio_add_pc_page() 1018 struct bio_vec *bvec; in bio_copy_from_iter() local [all …]
|
D | blk-merge.c | 336 __blk_segment_map_sg(struct request_queue *q, struct bio_vec *bvec, in __blk_segment_map_sg() argument 341 int nbytes = bvec->bv_len; in __blk_segment_map_sg() 347 if (!BIOVEC_PHYS_MERGEABLE(bvprv, bvec)) in __blk_segment_map_sg() 349 if (!BIOVEC_SEG_BOUNDARY(q, bvprv, bvec)) in __blk_segment_map_sg() 372 sg_set_page(*sg, bvec->bv_page, nbytes, bvec->bv_offset); in __blk_segment_map_sg() 375 *bvprv = *bvec; in __blk_segment_map_sg() 382 struct bio_vec bvec, bvprv = { NULL }; in __blk_bios_map_sg() local 407 bvec = bio_iovec(bio); in __blk_bios_map_sg() 408 sg_set_page(*sg, bvec.bv_page, bvec.bv_len, bvec.bv_offset); in __blk_bios_map_sg() 413 bio_for_each_segment(bvec, bio, iter) in __blk_bios_map_sg() [all …]
|
D | bounce.c | 129 struct bio_vec *bvec, *org_vec; in bounce_end_io() local 136 bio_for_each_segment_all(bvec, bio, i) { in bounce_end_io() 139 if (bvec->bv_page == org_vec->bv_page) in bounce_end_io() 142 dec_zone_page_state(bvec->bv_page, NR_BOUNCE); in bounce_end_io() 143 mempool_free(bvec->bv_page, pool); in bounce_end_io()
|
D | blk-core.c | 2993 struct bio_vec bvec; in rq_flush_dcache_pages() local 2995 rq_for_each_segment(bvec, rq, iter) in rq_flush_dcache_pages() 2996 flush_dcache_page(bvec.bv_page); in rq_flush_dcache_pages()
|
/linux-4.4.14/drivers/block/zram/ |
D | zram_drv.c | 109 static inline bool is_partial_io(struct bio_vec *bvec) in is_partial_io() argument 111 return bvec->bv_len != PAGE_SIZE; in is_partial_io() 138 static void update_position(u32 *index, int *offset, struct bio_vec *bvec) in update_position() argument 140 if (*offset + bvec->bv_len >= PAGE_SIZE) in update_position() 142 *offset = (*offset + bvec->bv_len) % PAGE_SIZE; in update_position() 175 static void handle_zero_page(struct bio_vec *bvec) in handle_zero_page() argument 177 struct page *page = bvec->bv_page; in handle_zero_page() 181 if (is_partial_io(bvec)) in handle_zero_page() 182 memset(user_mem + bvec->bv_offset, 0, bvec->bv_len); in handle_zero_page() 598 static int zram_bvec_read(struct zram *zram, struct bio_vec *bvec, in zram_bvec_read() argument [all …]
|
/linux-4.4.14/include/linux/ |
D | bio.h | 64 #define __bvec_iter_bvec(bvec, iter) (&(bvec)[(iter).bi_idx]) argument 66 #define bvec_iter_page(bvec, iter) \ argument 67 (__bvec_iter_bvec((bvec), (iter))->bv_page) 69 #define bvec_iter_len(bvec, iter) \ argument 71 __bvec_iter_bvec((bvec), (iter))->bv_len - (iter).bi_bvec_done) 73 #define bvec_iter_offset(bvec, iter) \ argument 74 (__bvec_iter_bvec((bvec), (iter))->bv_offset + (iter).bi_bvec_done) 76 #define bvec_iter_bvec(bvec, iter) \ argument 78 .bv_page = bvec_iter_page((bvec), (iter)), \ 79 .bv_len = bvec_iter_len((bvec), (iter)), \ [all …]
|
D | uio.h | 35 const struct bio_vec *bvec; member 94 void iov_iter_bvec(struct iov_iter *i, int direction, const struct bio_vec *bvec,
|
D | blkdev.h | 749 #define rq_iter_last(bvec, _iter) \ argument 751 bio_iter_last(bvec, _iter.iter))
|
/linux-4.4.14/drivers/target/ |
D | target_core_file.c | 255 struct bio_vec *bvec; in fd_do_rw() local 260 bvec = kcalloc(sgl_nents, sizeof(struct bio_vec), GFP_KERNEL); in fd_do_rw() 261 if (!bvec) { in fd_do_rw() 267 bvec[i].bv_page = sg_page(sg); in fd_do_rw() 268 bvec[i].bv_len = sg->length; in fd_do_rw() 269 bvec[i].bv_offset = sg->offset; in fd_do_rw() 274 iov_iter_bvec(&iter, ITER_BVEC, bvec, sgl_nents, len); in fd_do_rw() 280 kfree(bvec); in fd_do_rw() 364 struct bio_vec *bvec; in fd_execute_write_same() local 388 bvec = kcalloc(nolb, sizeof(struct bio_vec), GFP_KERNEL); in fd_execute_write_same() [all …]
|
/linux-4.4.14/fs/9p/ |
D | vfs_addr.c | 55 struct bio_vec bvec = {.bv_page = page, .bv_len = PAGE_SIZE}; in v9fs_fid_readpage() local 67 iov_iter_bvec(&to, ITER_BVEC | READ, &bvec, 1, PAGE_SIZE); in v9fs_fid_readpage() 166 struct bio_vec bvec; in v9fs_vfs_writepage_locked() local 174 bvec.bv_page = page; in v9fs_vfs_writepage_locked() 175 bvec.bv_offset = 0; in v9fs_vfs_writepage_locked() 176 bvec.bv_len = len; in v9fs_vfs_writepage_locked() 177 iov_iter_bvec(&from, ITER_BVEC | WRITE, &bvec, 1, len); in v9fs_vfs_writepage_locked()
|
/linux-4.4.14/drivers/block/ |
D | nbd.c | 224 static inline int sock_send_bvec(struct nbd_device *nbd, struct bio_vec *bvec, in sock_send_bvec() argument 228 void *kaddr = kmap(bvec->bv_page); in sock_send_bvec() 229 result = sock_xmit(nbd, 1, kaddr + bvec->bv_offset, in sock_send_bvec() 230 bvec->bv_len, flags); in sock_send_bvec() 231 kunmap(bvec->bv_page); in sock_send_bvec() 276 struct bio_vec bvec; in nbd_send_req() local 281 rq_for_each_segment(bvec, req, iter) { in nbd_send_req() 283 if (!rq_iter_last(bvec, iter)) in nbd_send_req() 286 req, bvec.bv_len); in nbd_send_req() 287 result = sock_send_bvec(nbd, &bvec, flags); in nbd_send_req() [all …]
|
D | loop.c | 261 static int lo_write_bvec(struct file *file, struct bio_vec *bvec, loff_t *ppos) in lo_write_bvec() argument 266 iov_iter_bvec(&i, ITER_BVEC, bvec, 1, bvec->bv_len); in lo_write_bvec() 272 if (likely(bw == bvec->bv_len)) in lo_write_bvec() 277 (unsigned long long)*ppos, bvec->bv_len); in lo_write_bvec() 286 struct bio_vec bvec; in lo_write_simple() local 290 rq_for_each_segment(bvec, rq, iter) { in lo_write_simple() 291 ret = lo_write_bvec(lo->lo_backing_file, &bvec, &pos); in lo_write_simple() 308 struct bio_vec bvec, b; in lo_write_transfer() local 317 rq_for_each_segment(bvec, rq, iter) { in lo_write_transfer() 318 ret = lo_do_transfer(lo, WRITE, page, 0, bvec.bv_page, in lo_write_transfer() [all …]
|
D | brd.c | 331 struct bio_vec bvec; in brd_make_request() local 351 bio_for_each_segment(bvec, bio, iter) { in brd_make_request() 352 unsigned int len = bvec.bv_len; in brd_make_request() 355 err = brd_do_bvec(brd, bvec.bv_page, len, in brd_make_request() 356 bvec.bv_offset, rw, sector); in brd_make_request()
|
D | ps3disk.c | 97 struct bio_vec bvec; in ps3disk_scatter_gather() local 102 rq_for_each_segment(bvec, req, iter) { in ps3disk_scatter_gather() 108 size = bvec.bv_len; in ps3disk_scatter_gather() 109 buf = bvec_kmap_irq(&bvec, &flags); in ps3disk_scatter_gather() 115 flush_kernel_dcache_page(bvec.bv_page); in ps3disk_scatter_gather()
|
D | ps3vram.c | 558 struct bio_vec bvec; in ps3vram_do_bio() local 562 bio_for_each_segment(bvec, bio, iter) { in ps3vram_do_bio() 564 char *ptr = page_address(bvec.bv_page) + bvec.bv_offset; in ps3vram_do_bio() 565 size_t len = bvec.bv_len, retlen; in ps3vram_do_bio()
|
D | pktcdvd.c | 954 static void pkt_make_local_copy(struct packet_data *pkt, struct bio_vec *bvec) in pkt_make_local_copy() argument 962 if (bvec[f].bv_page != pkt->pages[p]) { in pkt_make_local_copy() 963 void *vfrom = kmap_atomic(bvec[f].bv_page) + bvec[f].bv_offset; in pkt_make_local_copy() 967 bvec[f].bv_page = pkt->pages[p]; in pkt_make_local_copy() 968 bvec[f].bv_offset = offs; in pkt_make_local_copy() 970 BUG_ON(bvec[f].bv_offset != offs); in pkt_make_local_copy() 1301 struct bio_vec *bvec = pkt->w_bio->bi_io_vec; in pkt_start_write() local 1311 bvec[f].bv_page = pkt->pages[(f * CD_FRAMESIZE) / PAGE_SIZE]; in pkt_start_write() 1312 bvec[f].bv_offset = (f * CD_FRAMESIZE) % PAGE_SIZE; in pkt_start_write() 1313 if (!bio_add_page(pkt->w_bio, bvec[f].bv_page, CD_FRAMESIZE, bvec[f].bv_offset)) in pkt_start_write() [all …]
|
/linux-4.4.14/fs/btrfs/ |
D | compression.h | 37 struct bio_vec *bvec, int vcnt, 48 void btrfs_clear_biovec_end(struct bio_vec *bvec, int vcnt, 69 struct bio_vec *bvec,
|
D | file-item.c | 165 struct bio_vec *bvec = bio->bi_io_vec; in __btrfs_lookup_bio_sums() local 225 offset = page_offset(bvec->bv_page) + bvec->bv_offset; in __btrfs_lookup_bio_sums() 246 offset + bvec->bv_len - 1, in __btrfs_lookup_bio_sums() 286 disk_bytenr += bvec->bv_len; in __btrfs_lookup_bio_sums() 287 offset += bvec->bv_len; in __btrfs_lookup_bio_sums() 288 bvec++; in __btrfs_lookup_bio_sums() 432 struct bio_vec *bvec = bio->bi_io_vec; in btrfs_csum_one_bio() local 451 offset = page_offset(bvec->bv_page) + bvec->bv_offset; in btrfs_csum_one_bio() 460 offset = page_offset(bvec->bv_page) + bvec->bv_offset; in btrfs_csum_one_bio() 483 data = kmap_atomic(bvec->bv_page); in btrfs_csum_one_bio() [all …]
|
D | compression.c | 85 u64 disk_start, struct bio_vec *bvec, 201 struct bio_vec *bvec; in end_compressed_bio_read() local 207 bio_for_each_segment_all(bvec, cb->orig_bio, i) in end_compressed_bio_read() 208 SetPageChecked(bvec->bv_page); in end_compressed_bio_read() 930 u64 disk_start, struct bio_vec *bvec, in btrfs_decompress_biovec() argument 942 bvec, vcnt, srclen); in btrfs_decompress_biovec() 984 struct bio_vec *bvec, int vcnt, in btrfs_decompress_buf2page() argument 994 struct page *page_out = bvec[*pg_index].bv_page; in btrfs_decompress_buf2page() 1039 page_out = bvec[*pg_index].bv_page; in btrfs_decompress_buf2page() 1074 void btrfs_clear_biovec_end(struct bio_vec *bvec, int vcnt, in btrfs_clear_biovec_end() argument [all …]
|
D | lzo.c | 260 struct bio_vec *bvec, in lzo_decompress_biovec() argument 369 bvec, vcnt, in lzo_decompress_biovec() 377 btrfs_clear_biovec_end(bvec, vcnt, page_out_index, pg_offset); in lzo_decompress_biovec()
|
D | extent_io.c | 2554 struct bio_vec *bvec; in end_bio_extent_writepage() local 2559 bio_for_each_segment_all(bvec, bio, i) { in end_bio_extent_writepage() 2560 struct page *page = bvec->bv_page; in end_bio_extent_writepage() 2567 if (bvec->bv_offset || bvec->bv_len != PAGE_CACHE_SIZE) { in end_bio_extent_writepage() 2568 if (bvec->bv_offset + bvec->bv_len != PAGE_CACHE_SIZE) in end_bio_extent_writepage() 2571 bvec->bv_offset, bvec->bv_len); in end_bio_extent_writepage() 2576 bvec->bv_offset, bvec->bv_len); in end_bio_extent_writepage() 2580 end = start + bvec->bv_offset + bvec->bv_len - 1; in end_bio_extent_writepage() 2616 struct bio_vec *bvec; in end_bio_extent_readpage() local 2630 bio_for_each_segment_all(bvec, bio, i) { in end_bio_extent_readpage() [all …]
|
D | zlib.c | 215 struct bio_vec *bvec, in zlib_decompress_biovec() argument 269 bvec, vcnt, in zlib_decompress_biovec() 303 btrfs_clear_biovec_end(bvec, vcnt, page_out_index, pg_offset); in zlib_decompress_biovec()
|
D | inode.c | 7837 struct bio_vec *bvec; in btrfs_retry_endio_nocsum() local 7844 bio_for_each_segment_all(bvec, bio, i) in btrfs_retry_endio_nocsum() 7845 clean_io_failure(done->inode, done->start, bvec->bv_page, 0); in btrfs_retry_endio_nocsum() 7854 struct bio_vec *bvec; in __btrfs_correct_data_nocsum() local 7863 bio_for_each_segment_all(bvec, &io_bio->bio, i) { in __btrfs_correct_data_nocsum() 7869 ret = dio_read_error(inode, &io_bio->bio, bvec->bv_page, start, in __btrfs_correct_data_nocsum() 7870 start + bvec->bv_len - 1, in __btrfs_correct_data_nocsum() 7883 start += bvec->bv_len; in __btrfs_correct_data_nocsum() 7893 struct bio_vec *bvec; in btrfs_retry_endio() local 7902 bio_for_each_segment_all(bvec, bio, i) { in btrfs_retry_endio() [all …]
|
D | disk-io.c | 877 struct bio_vec *bvec; in btree_csum_one_bio() local 881 bio_for_each_segment_all(bvec, bio, i) { in btree_csum_one_bio() 882 root = BTRFS_I(bvec->bv_page->mapping->host)->root; in btree_csum_one_bio() 883 ret = csum_dirty_buffer(root->fs_info, bvec->bv_page); in btree_csum_one_bio()
|
/linux-4.4.14/lib/ |
D | iov_iter.c | 61 __p = i->bvec; \ 87 const struct bio_vec *bvec; \ 89 iterate_bvec(i, n, v, bvec, skip, (B)) \ 104 const struct bio_vec *bvec; \ 106 iterate_bvec(i, n, v, bvec, skip, (B)) \ 107 if (skip == bvec->bv_len) { \ 108 bvec++; \ 111 i->nr_segs -= bvec - i->bvec; \ 112 i->bvec = bvec; \ 523 return min(i->count, i->bvec->bv_len - i->iov_offset); in iov_iter_single_seg_count() [all …]
|
/linux-4.4.14/arch/m68k/emu/ |
D | nfblock.c | 65 struct bio_vec bvec; in nfhd_make_request() local 72 bio_for_each_segment(bvec, bio, iter) { in nfhd_make_request() 73 len = bvec.bv_len; in nfhd_make_request() 76 bvec_to_phys(&bvec)); in nfhd_make_request()
|
/linux-4.4.14/drivers/s390/block/ |
D | xpram.c | 187 struct bio_vec bvec; in xpram_make_request() local 205 bio_for_each_segment(bvec, bio, iter) { in xpram_make_request() 207 kmap(bvec.bv_page) + bvec.bv_offset; in xpram_make_request() 208 bytes = bvec.bv_len; in xpram_make_request()
|
D | dcssblk.c | 823 struct bio_vec bvec; in dcssblk_make_request() local 861 bio_for_each_segment(bvec, bio, iter) { in dcssblk_make_request() 863 page_address(bvec.bv_page) + bvec.bv_offset; in dcssblk_make_request() 865 if (unlikely((page_addr & 4095) != 0) || (bvec.bv_len & 4095) != 0) in dcssblk_make_request() 870 bvec.bv_len); in dcssblk_make_request() 873 bvec.bv_len); in dcssblk_make_request() 875 bytes_done += bvec.bv_len; in dcssblk_make_request()
|
/linux-4.4.14/Documentation/block/ |
D | biovecs.txt | 20 bytes completed in the current bvec. 50 exactly one bvec at a time - for example, bio_copy_data() in fs/bio.c, 58 coding bvec iterators before, and having common implementation considerably 63 it somewhere else if there was an error) had to save the entire bvec array 66 * Biovecs can be shared between multiple bios - a bvec iter can represent an 74 bios with more than a single bvec! Now, we can efficiently split arbitrary 86 fine to _most_ devices, but since accessing the raw bvec array was the 88 since all drivers _must_ go through the bvec iterator - and have been
|
D | data-integrity.txt | 136 information (bvec pool, vector count, etc.)
|
/linux-4.4.14/drivers/nvdimm/ |
D | blk.c | 172 struct bio_vec bvec; in nd_blk_make_request() local 191 bio_for_each_segment(bvec, bio, iter) { in nd_blk_make_request() 192 unsigned int len = bvec.bv_len; in nd_blk_make_request() 195 err = nd_blk_do_bvec(blk_dev, bip, bvec.bv_page, len, in nd_blk_make_request() 196 bvec.bv_offset, rw, iter.bi_sector); in nd_blk_make_request()
|
D | pmem.c | 71 struct bio_vec bvec; in pmem_make_request() local 77 bio_for_each_segment(bvec, bio, iter) in pmem_make_request() 78 pmem_do_bvec(pmem, bvec.bv_page, bvec.bv_len, bvec.bv_offset, in pmem_make_request()
|
D | btt.c | 1159 struct bio_vec bvec; in btt_make_request() local 1176 bio_for_each_segment(bvec, bio, iter) { in btt_make_request() 1177 unsigned int len = bvec.bv_len; in btt_make_request() 1185 err = btt_do_bvec(btt, bip, bvec.bv_page, len, bvec.bv_offset, in btt_make_request()
|
/linux-4.4.14/drivers/md/ |
D | dm-io.c | 207 struct bio_vec *bvec = dp->context_ptr; in bio_get_page() local 208 *p = bvec->bv_page; in bio_get_page() 209 *len = bvec->bv_len - dp->context_u; in bio_get_page() 210 *offset = bvec->bv_offset + dp->context_u; in bio_get_page() 215 struct bio_vec *bvec = dp->context_ptr; in bio_next_page() local 216 dp->context_ptr = bvec + 1; in bio_next_page()
|
D | dm-log-writes.c | 152 struct bio_vec *bvec; in log_end_io() local 164 bio_for_each_segment_all(bvec, bio, i) in log_end_io() 165 __free_page(bvec->bv_page); in log_end_io()
|
D | raid1.c | 973 struct bio_vec *bvec; in alloc_behind_pages() local 979 bio_for_each_segment_all(bvec, bio, i) { in alloc_behind_pages() 980 bvecs[i] = *bvec; in alloc_behind_pages() 984 memcpy(kmap(bvecs[i].bv_page) + bvec->bv_offset, in alloc_behind_pages() 985 kmap(bvec->bv_page) + bvec->bv_offset, bvec->bv_len); in alloc_behind_pages() 987 kunmap(bvec->bv_page); in alloc_behind_pages() 1361 struct bio_vec *bvec; in make_request() local 1367 bio_for_each_segment_all(bvec, mbio, j) in make_request() 1368 bvec->bv_page = r1_bio->behind_bvecs[j].bv_page; in make_request()
|
D | dm-crypt.c | 995 struct bio_vec *bvec; in crypt_alloc_buffer() local 1020 bvec = &clone->bi_io_vec[clone->bi_vcnt++]; in crypt_alloc_buffer() 1021 bvec->bv_page = page; in crypt_alloc_buffer() 1022 bvec->bv_len = len; in crypt_alloc_buffer() 1023 bvec->bv_offset = 0; in crypt_alloc_buffer()
|
D | raid10.c | 4496 struct bio_vec *bvec = r10_bio->master_bio->bi_io_vec; in handle_reshape_read_error() local 4522 bvec[idx].bv_page, in handle_reshape_read_error()
|
/linux-4.4.14/fs/gfs2/ |
D | lops.c | 172 static void gfs2_end_log_write_bh(struct gfs2_sbd *sdp, struct bio_vec *bvec, in gfs2_end_log_write_bh() argument 176 struct page *page = bvec->bv_page; in gfs2_end_log_write_bh() 180 size = bvec->bv_len; in gfs2_end_log_write_bh() 181 while (bh_offset(bh) < bvec->bv_offset) in gfs2_end_log_write_bh() 208 struct bio_vec *bvec; in gfs2_end_log_write() local 217 bio_for_each_segment_all(bvec, bio, i) { in gfs2_end_log_write() 218 page = bvec->bv_page; in gfs2_end_log_write() 220 gfs2_end_log_write_bh(sdp, bvec, bio->bi_error); in gfs2_end_log_write()
|
/linux-4.4.14/fs/ext4/ |
D | page-io.c | 64 struct bio_vec *bvec; in ext4_finish_bio() local 66 bio_for_each_segment_all(bvec, bio, i) { in ext4_finish_bio() 67 struct page *page = bvec->bv_page; in ext4_finish_bio() 73 unsigned bio_start = bvec->bv_offset; in ext4_finish_bio() 74 unsigned bio_end = bio_start + bvec->bv_len; in ext4_finish_bio()
|
/linux-4.4.14/fs/logfs/ |
D | dev_bdev.c | 58 struct bio_vec *bvec; in writeseg_end_io() local 65 bio_for_each_segment_all(bvec, bio, i) { in writeseg_end_io() 66 end_page_writeback(bvec->bv_page); in writeseg_end_io() 67 page_cache_release(bvec->bv_page); in writeseg_end_io()
|
/linux-4.4.14/drivers/staging/lustre/lustre/llite/ |
D | lloop.c | 195 struct bio_vec bvec; in do_bio_lustrebacked() local 220 bio_for_each_segment(bvec, bio, iter) { in do_bio_lustrebacked() 221 BUG_ON(bvec.bv_offset != 0); in do_bio_lustrebacked() 222 BUG_ON(bvec.bv_len != PAGE_CACHE_SIZE); in do_bio_lustrebacked() 224 pages[page_count] = bvec.bv_page; in do_bio_lustrebacked() 227 offset += bvec.bv_len; in do_bio_lustrebacked()
|
/linux-4.4.14/drivers/scsi/mpt3sas/ |
D | mpt3sas_transport.c | 1923 struct bio_vec bvec; in _transport_smp_handler() local 1964 bio_for_each_segment(bvec, req->bio, iter) { in _transport_smp_handler() 1966 page_address(bvec.bv_page) + bvec.bv_offset, in _transport_smp_handler() 1967 bvec.bv_len); in _transport_smp_handler() 1968 offset += bvec.bv_len; in _transport_smp_handler() 2093 bio_for_each_segment(bvec, rsp->bio, iter) { in _transport_smp_handler() 2094 if (bytes_to_copy <= bvec.bv_len) { in _transport_smp_handler() 2095 memcpy(page_address(bvec.bv_page) + in _transport_smp_handler() 2096 bvec.bv_offset, pci_addr_in + in _transport_smp_handler() 2100 memcpy(page_address(bvec.bv_page) + in _transport_smp_handler() [all …]
|
/linux-4.4.14/arch/xtensa/platforms/iss/ |
D | simdisk.c | 107 struct bio_vec bvec; in simdisk_make_request() local 111 bio_for_each_segment(bvec, bio, iter) { in simdisk_make_request() 113 unsigned len = bvec.bv_len >> SECTOR_SHIFT; in simdisk_make_request()
|
/linux-4.4.14/Documentation/device-mapper/ |
D | dm-io.txt | 41 int rw, struct bio_vec *bvec, 44 int rw, struct bio_vec *bvec,
|
/linux-4.4.14/drivers/block/rsxx/ |
D | dma.c | 687 struct bio_vec bvec; in rsxx_dma_queue_bio() local 726 bio_for_each_segment(bvec, bio, iter) { in rsxx_dma_queue_bio() 727 bv_len = bvec.bv_len; in rsxx_dma_queue_bio() 728 bv_off = bvec.bv_offset; in rsxx_dma_queue_bio() 740 laddr, bvec.bv_page, in rsxx_dma_queue_bio()
|
/linux-4.4.14/fs/f2fs/ |
D | data.c | 32 struct bio_vec *bvec; in f2fs_read_end_io() local 44 bio_for_each_segment_all(bvec, bio, i) { in f2fs_read_end_io() 45 struct page *page = bvec->bv_page; in f2fs_read_end_io() 61 struct bio_vec *bvec; in f2fs_write_end_io() local 64 bio_for_each_segment_all(bvec, bio, i) { in f2fs_write_end_io() 65 struct page *page = bvec->bv_page; in f2fs_write_end_io()
|
D | segment.c | 1435 struct bio_vec *bvec; in is_merged_page() local 1445 bio_for_each_segment_all(bvec, io->bio, i) { in is_merged_page() 1447 if (bvec->bv_page->mapping) { in is_merged_page() 1448 target = bvec->bv_page; in is_merged_page() 1454 bvec->bv_page); in is_merged_page()
|
/linux-4.4.14/drivers/block/drbd/ |
D | drbd_main.c | 1550 struct bio_vec bvec; in _drbd_send_bio() local 1554 bio_for_each_segment(bvec, bio, iter) { in _drbd_send_bio() 1557 err = _drbd_no_send_page(peer_device, bvec.bv_page, in _drbd_send_bio() 1558 bvec.bv_offset, bvec.bv_len, in _drbd_send_bio() 1559 bio_iter_last(bvec, iter) in _drbd_send_bio() 1569 struct bio_vec bvec; in _drbd_send_zc_bio() local 1573 bio_for_each_segment(bvec, bio, iter) { in _drbd_send_zc_bio() 1576 err = _drbd_send_page(peer_device, bvec.bv_page, in _drbd_send_zc_bio() 1577 bvec.bv_offset, bvec.bv_len, in _drbd_send_zc_bio() 1578 bio_iter_last(bvec, iter) ? 0 : MSG_MORE); in _drbd_send_zc_bio()
|
D | drbd_worker.c | 303 struct bio_vec bvec; in drbd_csum_bio() local 312 bio_for_each_segment(bvec, bio, iter) { in drbd_csum_bio() 313 sg_set_page(&sg, bvec.bv_page, bvec.bv_len, bvec.bv_offset); in drbd_csum_bio()
|
D | drbd_receiver.c | 1709 struct bio_vec bvec; in recv_dless_read() local 1732 bio_for_each_segment(bvec, bio, iter) { in recv_dless_read() 1733 void *mapped = kmap(bvec.bv_page) + bvec.bv_offset; in recv_dless_read() 1734 expect = min_t(int, data_size, bvec.bv_len); in recv_dless_read() 1736 kunmap(bvec.bv_page); in recv_dless_read()
|
/linux-4.4.14/fs/ |
D | direct-io.c | 467 struct bio_vec *bvec; in dio_bio_complete() local 478 bio_for_each_segment_all(bvec, bio, i) { in dio_bio_complete() 479 struct page *page = bvec->bv_page; in dio_bio_complete()
|
D | buffer.c | 2966 struct bio_vec *bvec = &bio->bi_io_vec[bio->bi_vcnt - 1]; in guard_bio_eod() local 2990 bvec->bv_len -= truncated_bytes; in guard_bio_eod() 2994 zero_user(bvec->bv_page, bvec->bv_offset + bvec->bv_len, in guard_bio_eod()
|