/linux-4.1.27/block/ |
D | bounce.c | 58 vto = kmap_atomic(to->bv_page); in bounce_copy_vec() 67 memcpy(page_address((to)->bv_page) + (to)->bv_offset, vfrom, (to)->bv_len) 108 if (tovec.bv_page != fromvec->bv_page) { in copy_to_high_bio_irq() 114 vfrom = page_address(fromvec->bv_page) + in copy_to_high_bio_irq() 118 flush_dcache_page(tovec.bv_page); in copy_to_high_bio_irq() 139 if (bvec->bv_page == org_vec->bv_page) in bounce_end_io() 142 dec_zone_page_state(bvec->bv_page, NR_BOUNCE); in bounce_end_io() 143 mempool_free(bvec->bv_page, pool); in bounce_end_io() 211 if (page_to_pfn(from.bv_page) > queue_bounce_pfn(q)) in __blk_queue_bounce() 219 struct page *page = to->bv_page; in __blk_queue_bounce() [all …]
|
D | bio.c | 511 flush_dcache_page(bv.bv_page); in zero_fill_bio() 726 if (page == prev->bv_page && in __bio_add_page() 771 bvec->bv_page = page; in __bio_add_page() 821 bvec->bv_page = NULL; in __bio_add_page() 951 bv->bv_page = alloc_page(gfp_mask); in bio_alloc_pages() 952 if (!bv->bv_page) { in bio_alloc_pages() 954 __free_page(bv->bv_page); in bio_alloc_pages() 1007 src_p = kmap_atomic(src_bv.bv_page); in bio_copy_data() 1008 dst_p = kmap_atomic(dst_bv.bv_page); in bio_copy_data() 1055 ret = copy_page_from_iter(bvec->bv_page, in bio_copy_from_iter() [all …]
|
D | bio-integrity.c | 104 kfree(page_address(bip->bip_vec->bv_page) + in bio_integrity_free() 143 iv->bv_page = page; in bio_integrity_add_page() 223 void *prot_buf = page_address(bip->bip_vec->bv_page) + in bio_integrity_process() 232 void *kaddr = kmap_atomic(bv.bv_page); in bio_integrity_process()
|
D | blk-merge.c | 54 high = page_to_pfn(bv.bv_page) > queue_bounce_pfn(q); in __blk_recalc_rq_segments() 195 sg_set_page(*sg, bvec->bv_page, nbytes, bvec->bv_offset); in __blk_segment_map_sg() 231 sg_set_page(*sg, bvec.bv_page, bvec.bv_len, bvec.bv_offset); in __blk_bios_map_sg()
|
D | blk-lib.c | 190 bio->bi_io_vec->bv_page = page; in blkdev_issue_write_same()
|
D | blk-integrity.c | 120 sg_set_page(sg, iv.bv_page, iv.bv_len, iv.bv_offset); in blk_rq_map_integrity_sg()
|
D | blk-core.c | 1449 bio->bi_io_vec->bv_page = page; in blk_add_request_payload() 2874 flush_dcache_page(bvec.bv_page); in rq_flush_dcache_pages()
|
/linux-4.1.27/drivers/xen/ |
D | biomerge.c | 9 unsigned long mfn1 = pfn_to_mfn(page_to_pfn(vec1->bv_page)); in xen_biovec_phys_mergeable() 10 unsigned long mfn2 = pfn_to_mfn(page_to_pfn(vec2->bv_page)); in xen_biovec_phys_mergeable()
|
/linux-4.1.27/lib/ |
D | iov_iter.c | 64 __v.bv_page = __p->bv_page; \ 75 __v.bv_page = __p->bv_page; \ 398 memcpy_to_page(v.bv_page, v.bv_offset, in copy_to_iter() 419 memcpy_from_page((to += v.bv_len) - v.bv_len, v.bv_page, in copy_from_iter() 440 memcpy_from_page((to += v.bv_len) - v.bv_len, v.bv_page, in copy_from_iter_nocache() 485 memzero_page(v.bv_page, v.bv_offset, v.bv_len), in iov_iter_zero() 500 memcpy_from_page((p += v.bv_len) - v.bv_len, v.bv_page, in iov_iter_copy_from_user_atomic() 599 get_page(*pages = v.bv_page); in iov_iter_get_pages() 653 get_page(*p = v.bv_page); in iov_iter_get_pages_alloc() 687 char *p = kmap_atomic(v.bv_page); in csum_and_copy_from_iter() [all …]
|
/linux-4.1.27/fs/logfs/ |
D | dev_bdev.c | 25 bio_vec.bv_page = page; in sync_request() 68 end_page_writeback(bvec->bv_page); in writeseg_end_io() 69 page_cache_release(bvec->bv_page); in writeseg_end_io() 113 bio->bi_io_vec[i].bv_page = page; in __bdev_writeseg() 203 bio->bi_io_vec[i].bv_page = super->s_erase_page; in do_erase()
|
/linux-4.1.27/mm/ |
D | page_io.c | 36 bio->bi_io_vec[0].bv_page = page; in get_swap_bio() 49 struct page *page = bio->bi_io_vec[0].bv_page; in end_swap_bio_write() 75 struct page *page = bio->bi_io_vec[0].bv_page; in end_swap_bio_read() 268 .bv_page = page, in __swap_writepage()
|
/linux-4.1.27/include/linux/ |
D | bio.h | 67 (__bvec_iter_bvec((bvec), (iter))->bv_page) 78 .bv_page = bvec_iter_page((bvec), (iter)), \ 154 #define bvec_to_phys(bv) (page_to_phys((bv)->bv_page) + (unsigned long) (bv)->bv_offset) 163 (kmap_atomic(bio_iter_iovec((bio), (iter)).bv_page) + \ 493 addr = (unsigned long) kmap_atomic(bvec->bv_page); in bvec_kmap_irq() 511 return page_address(bvec->bv_page) + bvec->bv_offset; in bvec_kmap_irq()
|
D | blk_types.h | 24 struct page *bv_page; member
|
/linux-4.1.27/drivers/md/bcache/ |
D | debug.c | 123 void *p1 = kmap_atomic(bv.bv_page); in bch_data_verify() 124 void *p2 = page_address(check->bi_io_vec[iter.bi_idx].bv_page); in bch_data_verify() 138 __free_page(bv2->bv_page); in bch_data_verify()
|
D | util.c | 241 bv->bv_page = is_vmalloc_addr(base) in bch_bio_map()
|
D | movinggc.c | 51 __free_page(bv->bv_page); in write_moving_finish()
|
D | request.c | 45 void *d = kmap(bv.bv_page) + bv.bv_offset; in bio_csum() 47 kunmap(bv.bv_page); in bio_csum() 693 __free_page(bv->bv_page); in cached_dev_cache_miss_done()
|
D | super.c | 234 struct cache_sb *out = page_address(bio->bi_io_vec[0].bv_page); in __write_super() 1187 dc->sb_bio.bi_io_vec[0].bv_page = sb_page; in register_bdev() 1829 if (ca->sb_bio.bi_inline_vecs[0].bv_page) in bch_cache_release() 1830 put_page(ca->sb_bio.bi_io_vec[0].bv_page); in bch_cache_release() 1889 ca->sb_bio.bi_io_vec[0].bv_page = sb_page; in register_cache()
|
D | writeback.c | 136 __free_page(bv->bv_page); in write_dirty_finish()
|
D | btree.c | 369 __free_page(bv->bv_page); in btree_node_write_done() 429 memcpy(page_address(bv->bv_page), in do_btree_node_write()
|
/linux-4.1.27/drivers/md/ |
D | raid1.c | 137 r1_bio->bios[j]->bi_io_vec[i].bv_page = in r1buf_pool_alloc() 138 r1_bio->bios[0]->bi_io_vec[i].bv_page; in r1buf_pool_alloc() 150 __free_page(bv->bv_page); in r1buf_pool_alloc() 169 r1bio->bios[j]->bi_io_vec[i].bv_page != in r1buf_pool_free() 170 r1bio->bios[0]->bi_io_vec[i].bv_page) in r1buf_pool_free() 171 safe_put_page(r1bio->bios[j]->bi_io_vec[i].bv_page); in r1buf_pool_free() 372 safe_put_page(r1_bio->behind_bvecs[i].bv_page); in close_write() 1013 bvecs[i].bv_page = alloc_page(GFP_NOIO); in alloc_behind_pages() 1014 if (unlikely(!bvecs[i].bv_page)) in alloc_behind_pages() 1016 memcpy(kmap(bvecs[i].bv_page) + bvec->bv_offset, in alloc_behind_pages() [all …]
|
D | dm-log-writes.c | 165 __free_page(bvec->bv_page); in log_end_io() 181 if (block->vecs[i].bv_page) in free_pending_block() 182 __free_page(block->vecs[i].bv_page); in free_pending_block() 280 ret = bio_add_page(bio, block->vecs[i].bv_page, in log_one_block() 297 ret = bio_add_page(bio, block->vecs[i].bv_page, in log_one_block() 644 src = kmap_atomic(bv.bv_page); in log_writes_map() 649 block->vecs[i].bv_page = page; in log_writes_map()
|
D | raid10.c | 184 page = rbio->bi_io_vec[i].bv_page; in r10buf_pool_alloc() 191 bio->bi_io_vec[i].bv_page = page; in r10buf_pool_alloc() 193 rbio->bi_io_vec[i].bv_page = page; in r10buf_pool_alloc() 201 safe_put_page(bio->bi_io_vec[i-1].bv_page); in r10buf_pool_alloc() 204 safe_put_page(r10_bio->devs[j].bio->bi_io_vec[i].bv_page); in r10buf_pool_alloc() 228 safe_put_page(bio->bi_io_vec[i].bv_page); in r10buf_pool_free() 229 bio->bi_io_vec[i].bv_page = NULL; in r10buf_pool_free() 2077 if (memcmp(page_address(fbio->bi_io_vec[j].bv_page), in sync_request_write() 2078 page_address(tbio->bi_io_vec[j].bv_page), in sync_request_write() 2107 memcpy(page_address(tbio->bi_io_vec[j].bv_page), in sync_request_write() [all …]
|
D | dm-crypt.c | 852 sg_set_page(&dmreq->sg_in, bv_in.bv_page, 1 << SECTOR_SHIFT, in crypt_convert_block() 856 sg_set_page(&dmreq->sg_out, bv_out.bv_page, 1 << SECTOR_SHIFT, in crypt_convert_block() 1007 bvec->bv_page = page; in crypt_alloc_buffer() 1029 BUG_ON(!bv->bv_page); in crypt_free_buffer_pages() 1030 mempool_free(bv->bv_page, cc->page_pool); in crypt_free_buffer_pages() 1031 bv->bv_page = NULL; in crypt_free_buffer_pages()
|
D | dm-io.c | 207 *p = bvec->bv_page; in bio_get_page()
|
D | dm-verity.c | 411 page = kmap_atomic(bv.bv_page); in verity_verify_io()
|
D | raid5.c | 1019 sh->dev[i].vec.bv_page = sh->dev[i].page; in ops_run_io() 1068 sh->dev[i].rvec.bv_page = sh->dev[i].page; in ops_run_io() 1142 bio_page = bvl.bv_page; in async_copy_data()
|
/linux-4.1.27/drivers/scsi/ |
D | sd_dif.c | 137 pi = kmap_atomic(iv.bv_page) + iv.bv_offset; in sd_dif_prepare() 184 pi = kmap_atomic(iv.bv_page) + iv.bv_offset; in sd_dif_complete()
|
/linux-4.1.27/fs/btrfs/ |
D | compression.c | 211 SetPageChecked(bvec->bv_page); in end_compressed_bio_read() 462 page = cb->orig_bio->bi_io_vec[cb->orig_bio->bi_vcnt - 1].bv_page; in add_ra_bio_pages() 595 page_offset(bio->bi_io_vec->bv_page), in btrfs_submit_compressed_read() 989 struct page *page_out = bvec[*pg_index].bv_page; in btrfs_decompress_buf2page() 1034 page_out = bvec[*pg_index].bv_page; in btrfs_decompress_buf2page() 1074 struct page *page = bvec[pg_index].bv_page; in btrfs_clear_biovec_end()
|
D | file-item.c | 225 offset = page_offset(bvec->bv_page) + bvec->bv_offset; in __btrfs_lookup_bio_sums() 451 offset = page_offset(bvec->bv_page) + bvec->bv_offset; in btrfs_csum_one_bio() 460 offset = page_offset(bvec->bv_page) + bvec->bv_offset; in btrfs_csum_one_bio() 483 data = kmap_atomic(bvec->bv_page); in btrfs_csum_one_bio()
|
D | raid56.c | 1150 p = bio->bi_io_vec[i].bv_page; in index_rbio_pages() 1431 p = bio->bi_io_vec[i].bv_page; in set_bio_pages_uptodate()
|
D | check-integrity.c | 3000 mapped_datav[i] = kmap(bio->bi_io_vec[i].bv_page); in __btrfsic_submit_bio() 3004 kunmap(bio->bi_io_vec[i].bv_page); in __btrfsic_submit_bio() 3023 kunmap(bio->bi_io_vec[i].bv_page); in __btrfsic_submit_bio()
|
D | extent_io.c | 2492 struct page *page = bvec->bv_page; in end_bio_extent_writepage() 2566 struct page *page = bvec->bv_page; in end_bio_extent_readpage() 2754 struct page *page = bvec->bv_page; in submit_one_bio() 3754 struct page *page = bvec->bv_page; in end_bio_extent_buffer_writepage()
|
D | inode.c | 7750 clean_io_failure(done->inode, done->start, bvec->bv_page, 0); in btrfs_retry_endio_nocsum() 7774 ret = dio_read_error(inode, &io_bio->bio, bvec->bv_page, start, in __btrfs_correct_data_nocsum() 7809 bvec->bv_page, 0, in btrfs_retry_endio() 7813 bvec->bv_page, 0); in btrfs_retry_endio() 7839 ret = __readpage_endio_check(inode, io_bio, i, bvec->bv_page, in __btrfs_subio_endio_read() 7848 ret = dio_read_error(inode, &io_bio->bio, bvec->bv_page, start, in __btrfs_subio_endio_read() 8144 bio_add_page(bio, bvec->bv_page, bvec->bv_len, in btrfs_submit_direct_hook()
|
D | disk-io.c | 879 root = BTRFS_I(bvec->bv_page->mapping->host)->root; in btree_csum_one_bio() 880 ret = csum_dirty_buffer(root->fs_info, bvec->bv_page); in btree_csum_one_bio()
|
D | volumes.c | 5796 if (bio_add_page(bio, bvec->bv_page, bvec->bv_len, in breakup_stripe_bio()
|
/linux-4.1.27/fs/ext4/ |
D | readpage.c | 63 struct page *page = bv->bv_page; in completion_pages() 119 struct page *page = bv->bv_page; in mpage_end_io()
|
D | page-io.c | 68 struct page *page = bvec->bv_page; in ext4_finish_bio()
|
/linux-4.1.27/fs/9p/ |
D | vfs_addr.c | 55 struct bio_vec bvec = {.bv_page = page, .bv_len = PAGE_SIZE}; in v9fs_fid_readpage() 174 bvec.bv_page = page; in v9fs_vfs_writepage_locked()
|
/linux-4.1.27/kernel/power/ |
D | block_io.c | 93 page = bio->bi_io_vec[0].bv_page; in hib_wait_on_bio_chain()
|
/linux-4.1.27/drivers/block/zram/ |
D | zram_drv.c | 433 struct page *page = bvec->bv_page; in handle_zero_page() 521 page = bvec->bv_page; in zram_bvec_read() 592 page = bvec->bv_page; in zram_bvec_write() 950 bv.bv_page = bvec.bv_page; in __zram_make_request() 1037 bv.bv_page = page; in zram_rw_page()
|
/linux-4.1.27/drivers/block/ |
D | nbd.c | 220 void *kaddr = kmap(bvec->bv_page); in sock_send_bvec() 223 kunmap(bvec->bv_page); in sock_send_bvec() 306 void *kaddr = kmap(bvec->bv_page); in sock_recv_bvec() 309 kunmap(bvec->bv_page); in sock_recv_bvec()
|
D | loop.c | 262 ret = lo_do_transfer(lo, WRITE, page, 0, bvec.bv_page, in lo_write_transfer() 267 b.bv_page = page; in lo_write_transfer() 293 flush_dcache_page(bvec.bv_page); in lo_read_simple() 325 b.bv_page = page; in lo_read_transfer() 336 ret = lo_do_transfer(lo, READ, page, 0, bvec.bv_page, in lo_read_transfer() 341 flush_dcache_page(bvec.bv_page); in lo_read_transfer()
|
D | pmem.c | 80 pmem_do_bvec(pmem, bvec.bv_page, bvec.bv_len, bvec.bv_offset, in pmem_make_request()
|
D | pktcdvd.c | 961 if (bvec[f].bv_page != pkt->pages[p]) { in pkt_make_local_copy() 962 void *vfrom = kmap_atomic(bvec[f].bv_page) + bvec[f].bv_offset; in pkt_make_local_copy() 966 bvec[f].bv_page = pkt->pages[p]; in pkt_make_local_copy() 1310 bvec[f].bv_page = pkt->pages[(f * CD_FRAMESIZE) / PAGE_SIZE]; in pkt_start_write() 1312 if (!bio_add_page(pkt->w_bio, bvec[f].bv_page, CD_FRAMESIZE, bvec[f].bv_offset)) in pkt_start_write()
|
D | brd.c | 352 err = brd_do_bvec(brd, bvec.bv_page, len, in brd_make_request()
|
D | ps3disk.c | 115 flush_kernel_dcache_page(bvec.bv_page); in ps3disk_scatter_gather()
|
D | ps3vram.c | 564 char *ptr = page_address(bvec.bv_page) + bvec.bv_offset; in ps3vram_do_bio()
|
D | floppy.c | 2377 if (page_address(bv.bv_page) + bv.bv_offset != base + size) in buffer_chain_size() 2447 buffer = page_address(bv.bv_page) + bv.bv_offset; in copy_buffer() 3808 bio_vec.bv_page = page; in __floppy_read_block_0()
|
D | umem.c | 369 vec.bv_page, in add_bio()
|
D | nvme-core.c | 519 pmap = kmap_atomic(bip->bip_vec->bv_page) + bip->bip_vec->bv_offset; in nvme_dif_remap()
|
D | rbd.c | 1263 flush_dcache_page(bv.bv_page); in zero_bio_chain()
|
/linux-4.1.27/drivers/s390/block/ |
D | dasd_fba.c | 290 if (idal_is_needed (page_address(bv.bv_page), bv.bv_len)) in dasd_fba_build_cp() 327 dst = page_address(bv.bv_page) + bv.bv_offset; in dasd_fba_build_cp() 400 dst = page_address(bv.bv_page) + bv.bv_offset; in dasd_fba_free_cp()
|
D | scm_blk_cluster.c | 184 aidaw->data_addr = (u64) page_address(bv.bv_page); in scm_prepare_cluster_request()
|
D | xpram.c | 205 kmap(bvec.bv_page) + bvec.bv_offset; in xpram_make_request()
|
D | dasd_diag.c | 548 dst = page_address(bv.bv_page) + bv.bv_offset; in dasd_diag_build_cp()
|
D | scm_blk.c | 206 aidaw->data_addr = (u64) page_address(bv.bv_page); in scm_request_prepare()
|
D | dasd_eckd.c | 2615 if (idal_is_needed (page_address(bv.bv_page), bv.bv_len)) in dasd_eckd_build_cp_cmd_single() 2686 dst = page_address(bv.bv_page) + bv.bv_offset; in dasd_eckd_build_cp_cmd_single() 2849 dst = page_address(bv.bv_page) + bv.bv_offset; in dasd_eckd_build_cp_cmd_track() 3161 dst = page_address(bv.bv_page) + bv.bv_offset; in dasd_eckd_build_cp_tpm_track() 3194 dst = page_address(bv.bv_page) + bv.bv_offset; in dasd_eckd_build_cp_tpm_track() 3414 dst = page_address(bv.bv_page) + bv.bv_offset; in dasd_raw_build_cp() 3478 dst = page_address(bv.bv_page) + bv.bv_offset; in dasd_eckd_free_cp()
|
D | dcssblk.c | 860 page_address(bvec.bv_page) + bvec.bv_offset; in dcssblk_make_request()
|
/linux-4.1.27/arch/powerpc/sysdev/ |
D | axonram.c | 126 user_mem = page_address(vec.bv_page) + vec.bv_offset; in axon_ram_make_request()
|
/linux-4.1.27/fs/gfs2/ |
D | lops.c | 176 struct page *page = bvec->bv_page; in gfs2_end_log_write_bh() 218 page = bvec->bv_page; in gfs2_end_log_write()
|
/linux-4.1.27/drivers/target/ |
D | target_core_file.c | 339 bvec[i].bv_page = sg_page(sg); in fd_do_rw() 465 bvec[i].bv_page = sg_page(&cmd->t_data_sg[0]); in fd_execute_write_same()
|
/linux-4.1.27/drivers/block/aoe/ |
D | aoecmd.c | 303 skb_fill_page_desc(skb, frag++, bv.bv_page, in skb_fillup() 877 page = compound_head(bv.bv_page); in bio_pageinc() 890 page = compound_head(bv.bv_page); in bio_pagedec() 1095 char *p = page_address(bv.bv_page) + bv.bv_offset; in bvcpy()
|
/linux-4.1.27/fs/ |
D | mpage.c | 51 struct page *page = bv->bv_page; in mpage_end_io()
|
D | direct-io.c | 471 struct page *page = bvec->bv_page; in dio_bio_complete()
|
D | buffer.c | 2995 zero_user(bvec->bv_page, bvec->bv_offset + bvec->bv_len, in guard_bio_eod() 3025 bio->bi_io_vec[0].bv_page = bh->b_page; in _submit_bh()
|
D | splice.c | 1006 array[n].bv_page = buf->page; in iter_file_splice_write()
|
/linux-4.1.27/fs/exofs/ |
D | ore.c | 414 clear_highpage(bv->bv_page); in _clear_bio() 416 zero_user(bv->bv_page, bv->bv_offset, this_count); in _clear_bio()
|
D | ore_raid.c | 441 struct page *page = bv->bv_page; in _mark_read4write_pages_uptodate()
|
/linux-4.1.27/drivers/scsi/mpt3sas/ |
D | mpt3sas_transport.c | 1942 page_address(bvec.bv_page) + bvec.bv_offset, in _transport_smp_handler() 2071 memcpy(page_address(bvec.bv_page) + in _transport_smp_handler() 2076 memcpy(page_address(bvec.bv_page) + in _transport_smp_handler()
|
/linux-4.1.27/drivers/scsi/mpt2sas/ |
D | mpt2sas_transport.c | 1959 page_address(bvec.bv_page) + bvec.bv_offset, in _transport_smp_handler() 2110 memcpy(page_address(bvec.bv_page) + in _transport_smp_handler() 2115 memcpy(page_address(bvec.bv_page) + in _transport_smp_handler()
|
/linux-4.1.27/drivers/block/drbd/ |
D | drbd_bitmap.c | 949 unsigned int idx = bm_page_to_idx(bio->bi_io_vec[0].bv_page); in drbd_bm_endio() 982 mempool_free(bio->bi_io_vec[0].bv_page, drbd_md_io_page_pool); in drbd_bm_endio()
|
D | drbd_main.c | 1557 err = _drbd_no_send_page(peer_device, bvec.bv_page, in _drbd_send_bio() 1576 err = _drbd_send_page(peer_device, bvec.bv_page, in _drbd_send_zc_bio()
|
D | drbd_worker.c | 335 sg_set_page(&sg, bvec.bv_page, bvec.bv_len, bvec.bv_offset); in drbd_csum_bio()
|
D | drbd_receiver.c | 1732 void *mapped = kmap(bvec.bv_page) + bvec.bv_offset; in recv_dless_read() 1735 kunmap(bvec.bv_page); in recv_dless_read()
|
/linux-4.1.27/drivers/staging/lustre/lustre/llite/ |
D | lloop.c | 224 pages[page_count] = bvec.bv_page; in do_bio_lustrebacked()
|
/linux-4.1.27/fs/jfs/ |
D | jfs_logmgr.c | 2002 bio->bi_io_vec[0].bv_page = bp->l_page; in lbmRead() 2148 bio->bi_io_vec[0].bv_page = bp->l_page; in lbmStartIO()
|
/linux-4.1.27/fs/f2fs/ |
D | data.c | 37 struct page *page = bvec->bv_page; in f2fs_read_end_io() 57 struct page *page = bvec->bv_page; in f2fs_write_end_io()
|
D | segment.c | 1317 if (page == bvec->bv_page) { in is_merged_page()
|
/linux-4.1.27/drivers/block/rsxx/ |
D | dma.c | 740 laddr, bvec.bv_page, in rsxx_dma_queue_bio()
|
/linux-4.1.27/net/ceph/ |
D | messenger.c | 870 return bio_vec.bv_page; in ceph_msg_data_bio_next()
|
/linux-4.1.27/Documentation/block/ |
D | biodoc.txt | 423 struct page *bv_page;
|