Home
last modified time | relevance | path

Searched refs:bv_page (Results 1 – 82 of 82) sorted by relevance

/linux-4.4.14/block/
Dbounce.c59 vto = kmap_atomic(to->bv_page); in bounce_copy_vec()
68 memcpy(page_address((to)->bv_page) + (to)->bv_offset, vfrom, (to)->bv_len)
109 if (tovec.bv_page != fromvec->bv_page) { in copy_to_high_bio_irq()
115 vfrom = page_address(fromvec->bv_page) + in copy_to_high_bio_irq()
119 flush_dcache_page(tovec.bv_page); in copy_to_high_bio_irq()
139 if (bvec->bv_page == org_vec->bv_page) in bounce_end_io()
142 dec_zone_page_state(bvec->bv_page, NR_BOUNCE); in bounce_end_io()
143 mempool_free(bvec->bv_page, pool); in bounce_end_io()
192 if (page_to_pfn(from.bv_page) > queue_bounce_pfn(q)) in __blk_queue_bounce()
200 struct page *page = to->bv_page; in __blk_queue_bounce()
[all …]
Dbio.c524 flush_dcache_page(bv.bv_page); in zero_fill_bio()
734 if (page == prev->bv_page && in bio_add_pc_page()
757 bvec->bv_page = page; in bio_add_pc_page()
786 bvec->bv_page = NULL; in bio_add_pc_page()
825 if (page == bv->bv_page && in bio_add_page()
836 bv->bv_page = page; in bio_add_page()
919 bv->bv_page = alloc_page(gfp_mask); in bio_alloc_pages()
920 if (!bv->bv_page) { in bio_alloc_pages()
922 __free_page(bv->bv_page); in bio_alloc_pages()
975 src_p = kmap_atomic(src_bv.bv_page); in bio_copy_data()
[all …]
Dbio-integrity.c109 kfree(page_address(bip->bip_vec->bv_page) + in bio_integrity_free()
153 iv->bv_page = page; in bio_integrity_add_page()
233 void *prot_buf = page_address(bip->bip_vec->bv_page) + in bio_integrity_process()
242 void *kaddr = kmap_atomic(bv.bv_page); in bio_integrity_process()
Dblk-lib.c177 bio->bi_io_vec->bv_page = page; in blkdev_issue_write_same()
Dblk-merge.c372 sg_set_page(*sg, bvec->bv_page, nbytes, bvec->bv_offset); in __blk_segment_map_sg()
408 sg_set_page(*sg, bvec.bv_page, bvec.bv_len, bvec.bv_offset); in __blk_bios_map_sg()
Dblk-integrity.c117 sg_set_page(sg, iv.bv_page, iv.bv_len, iv.bv_offset); in blk_rq_map_integrity_sg()
Dblk-core.c1530 bio->bi_io_vec->bv_page = page; in blk_add_request_payload()
2996 flush_dcache_page(bvec.bv_page); in rq_flush_dcache_pages()
/linux-4.4.14/drivers/xen/
Dbiomerge.c10 unsigned long bfn1 = pfn_to_bfn(page_to_pfn(vec1->bv_page)); in xen_biovec_phys_mergeable()
11 unsigned long bfn2 = pfn_to_bfn(page_to_pfn(vec2->bv_page)); in xen_biovec_phys_mergeable()
/linux-4.4.14/lib/
Diov_iter.c64 __v.bv_page = __p->bv_page; \
75 __v.bv_page = __p->bv_page; \
398 memcpy_to_page(v.bv_page, v.bv_offset, in copy_to_iter()
419 memcpy_from_page((to += v.bv_len) - v.bv_len, v.bv_page, in copy_from_iter()
440 memcpy_from_page((to += v.bv_len) - v.bv_len, v.bv_page, in copy_from_iter_nocache()
485 memzero_page(v.bv_page, v.bv_offset, v.bv_len), in iov_iter_zero()
500 memcpy_from_page((p += v.bv_len) - v.bv_len, v.bv_page, in iov_iter_copy_from_user_atomic()
599 get_page(*pages = v.bv_page); in iov_iter_get_pages()
653 get_page(*p = v.bv_page); in iov_iter_get_pages_alloc()
687 char *p = kmap_atomic(v.bv_page); in csum_and_copy_from_iter()
[all …]
/linux-4.4.14/fs/logfs/
Ddev_bdev.c25 bio_vec.bv_page = page; in sync_request()
66 end_page_writeback(bvec->bv_page); in writeseg_end_io()
67 page_cache_release(bvec->bv_page); in writeseg_end_io()
111 bio->bi_io_vec[i].bv_page = page; in __bdev_writeseg()
199 bio->bi_io_vec[i].bv_page = super->s_erase_page; in do_erase()
/linux-4.4.14/drivers/md/
Ddm-log-writes.c165 __free_page(bvec->bv_page); in log_end_io()
181 if (block->vecs[i].bv_page) in free_pending_block()
182 __free_page(block->vecs[i].bv_page); in free_pending_block()
278 ret = bio_add_page(bio, block->vecs[i].bv_page, in log_one_block()
294 ret = bio_add_page(bio, block->vecs[i].bv_page, in log_one_block()
646 src = kmap_atomic(bv.bv_page); in log_writes_map()
651 block->vecs[i].bv_page = page; in log_writes_map()
Draid1.c139 r1_bio->bios[j]->bi_io_vec[i].bv_page = in r1buf_pool_alloc()
140 r1_bio->bios[0]->bi_io_vec[i].bv_page; in r1buf_pool_alloc()
152 __free_page(bv->bv_page); in r1buf_pool_alloc()
171 r1bio->bios[j]->bi_io_vec[i].bv_page != in r1buf_pool_free()
172 r1bio->bios[0]->bi_io_vec[i].bv_page) in r1buf_pool_free()
173 safe_put_page(r1bio->bios[j]->bi_io_vec[i].bv_page); in r1buf_pool_free()
375 safe_put_page(r1_bio->behind_bvecs[i].bv_page); in close_write()
981 bvecs[i].bv_page = alloc_page(GFP_NOIO); in alloc_behind_pages()
982 if (unlikely(!bvecs[i].bv_page)) in alloc_behind_pages()
984 memcpy(kmap(bvecs[i].bv_page) + bvec->bv_offset, in alloc_behind_pages()
[all …]
Ddm-crypt.c853 sg_set_page(&dmreq->sg_in, bv_in.bv_page, 1 << SECTOR_SHIFT, in crypt_convert_block()
857 sg_set_page(&dmreq->sg_out, bv_out.bv_page, 1 << SECTOR_SHIFT, in crypt_convert_block()
1021 bvec->bv_page = page; in crypt_alloc_buffer()
1043 BUG_ON(!bv->bv_page); in crypt_free_buffer_pages()
1044 mempool_free(bv->bv_page, cc->page_pool); in crypt_free_buffer_pages()
1045 bv->bv_page = NULL; in crypt_free_buffer_pages()
Draid10.c185 page = rbio->bi_io_vec[i].bv_page; in r10buf_pool_alloc()
192 bio->bi_io_vec[i].bv_page = page; in r10buf_pool_alloc()
194 rbio->bi_io_vec[i].bv_page = page; in r10buf_pool_alloc()
202 safe_put_page(bio->bi_io_vec[i-1].bv_page); in r10buf_pool_alloc()
205 safe_put_page(r10_bio->devs[j].bio->bi_io_vec[i].bv_page); in r10buf_pool_alloc()
229 safe_put_page(bio->bi_io_vec[i].bv_page); in r10buf_pool_free()
230 bio->bi_io_vec[i].bv_page = NULL; in r10buf_pool_free()
1973 if (memcmp(page_address(fbio->bi_io_vec[j].bv_page), in sync_request_write()
1974 page_address(tbio->bi_io_vec[j].bv_page), in sync_request_write()
2080 bio->bi_io_vec[idx].bv_page, in fix_recovery_read_error()
[all …]
Ddm-io.c208 *p = bvec->bv_page; in bio_get_page()
Ddm-verity.c407 page = kmap_atomic(bv.bv_page); in verity_verify_io()
Draid5.c1023 sh->dev[i].vec.bv_page = sh->dev[i].page; in ops_run_io()
1072 sh->dev[i].rvec.bv_page = sh->dev[i].page; in ops_run_io()
1146 bio_page = bvl.bv_page; in async_copy_data()
/linux-4.4.14/include/linux/
Dbio.h67 (__bvec_iter_bvec((bvec), (iter))->bv_page)
78 .bv_page = bvec_iter_page((bvec), (iter)), \
154 #define bvec_to_phys(bv) (page_to_phys((bv)->bv_page) + (unsigned long) (bv)->bv_offset)
163 (kmap_atomic(bio_iter_iovec((bio), (iter)).bv_page) + \
551 addr = (unsigned long) kmap_atomic(bvec->bv_page); in bvec_kmap_irq()
569 return page_address(bvec->bv_page) + bvec->bv_offset; in bvec_kmap_irq()
Dblk_types.h24 struct page *bv_page; member
/linux-4.4.14/drivers/md/bcache/
Ddebug.c123 void *p1 = kmap_atomic(bv.bv_page); in bch_data_verify()
124 void *p2 = page_address(check->bi_io_vec[iter.bi_idx].bv_page); in bch_data_verify()
138 __free_page(bv2->bv_page); in bch_data_verify()
Dutil.c241 bv->bv_page = is_vmalloc_addr(base) in bch_bio_map()
Dmovinggc.c51 __free_page(bv->bv_page); in write_moving_finish()
Drequest.c46 void *d = kmap(bv.bv_page) + bv.bv_offset; in bio_csum()
48 kunmap(bv.bv_page); in bio_csum()
702 __free_page(bv->bv_page); in cached_dev_cache_miss_done()
Dsuper.c211 struct cache_sb *out = page_address(bio->bi_io_vec[0].bv_page); in __write_super()
1156 dc->sb_bio.bi_io_vec[0].bv_page = sb_page; in register_bdev()
1796 if (ca->sb_bio.bi_inline_vecs[0].bv_page) in bch_cache_release()
1797 put_page(ca->sb_bio.bi_io_vec[0].bv_page); in bch_cache_release()
1855 ca->sb_bio.bi_io_vec[0].bv_page = sb_page; in register_cache()
Dwriteback.c136 __free_page(bv->bv_page); in write_dirty_finish()
Dbtree.c369 __free_page(bv->bv_page); in btree_node_write_done()
429 memcpy(page_address(bv->bv_page), in do_btree_node_write()
/linux-4.4.14/drivers/scsi/
Dsd_dif.c142 pi = kmap_atomic(iv.bv_page) + iv.bv_offset; in sd_dif_prepare()
189 pi = kmap_atomic(iv.bv_page) + iv.bv_offset; in sd_dif_complete()
/linux-4.4.14/mm/
Dpage_io.c46 struct page *page = bio->bi_io_vec[0].bv_page; in end_swap_bio_write()
71 struct page *page = bio->bi_io_vec[0].bv_page; in end_swap_bio_read()
264 .bv_page = page, in __swap_writepage()
/linux-4.4.14/fs/btrfs/
Dcompression.c208 SetPageChecked(bvec->bv_page); in end_compressed_bio_read()
459 page = cb->orig_bio->bi_io_vec[cb->orig_bio->bi_vcnt - 1].bv_page; in add_ra_bio_pages()
591 page_offset(bio->bi_io_vec->bv_page), in btrfs_submit_compressed_read()
994 struct page *page_out = bvec[*pg_index].bv_page; in btrfs_decompress_buf2page()
1039 page_out = bvec[*pg_index].bv_page; in btrfs_decompress_buf2page()
1079 struct page *page = bvec[pg_index].bv_page; in btrfs_clear_biovec_end()
Dfile-item.c225 offset = page_offset(bvec->bv_page) + bvec->bv_offset; in __btrfs_lookup_bio_sums()
451 offset = page_offset(bvec->bv_page) + bvec->bv_offset; in btrfs_csum_one_bio()
460 offset = page_offset(bvec->bv_page) + bvec->bv_offset; in btrfs_csum_one_bio()
483 data = kmap_atomic(bvec->bv_page); in btrfs_csum_one_bio()
Draid56.c1161 p = bio->bi_io_vec[i].bv_page; in index_rbio_pages()
1441 p = bio->bi_io_vec[i].bv_page; in set_bio_pages_uptodate()
Dcheck-integrity.c3000 mapped_datav[i] = kmap(bio->bi_io_vec[i].bv_page); in __btrfsic_submit_bio()
3004 kunmap(bio->bi_io_vec[i].bv_page); in __btrfsic_submit_bio()
3023 kunmap(bio->bi_io_vec[i].bv_page); in __btrfsic_submit_bio()
Dextent_io.c2560 struct page *page = bvec->bv_page; in end_bio_extent_writepage()
2631 struct page *page = bvec->bv_page; in end_bio_extent_readpage()
2821 struct page *page = bvec->bv_page; in submit_one_bio()
3825 struct page *page = bvec->bv_page; in end_bio_extent_buffer_writepage()
Dinode.c7845 clean_io_failure(done->inode, done->start, bvec->bv_page, 0); in btrfs_retry_endio_nocsum()
7869 ret = dio_read_error(inode, &io_bio->bio, bvec->bv_page, start, in __btrfs_correct_data_nocsum()
7904 bvec->bv_page, 0, in btrfs_retry_endio()
7908 bvec->bv_page, 0); in btrfs_retry_endio()
7934 ret = __readpage_endio_check(inode, io_bio, i, bvec->bv_page, in __btrfs_subio_endio_read()
7943 ret = dio_read_error(inode, &io_bio->bio, bvec->bv_page, start, in __btrfs_subio_endio_read()
8238 bio_add_page(bio, bvec->bv_page, bvec->bv_len, in btrfs_submit_direct_hook()
Ddisk-io.c882 root = BTRFS_I(bvec->bv_page->mapping->host)->root; in btree_csum_one_bio()
883 ret = csum_dirty_buffer(root->fs_info, bvec->bv_page); in btree_csum_one_bio()
/linux-4.4.14/fs/ext4/
Dreadpage.c63 struct page *page = bv->bv_page; in completion_pages()
119 struct page *page = bv->bv_page; in mpage_end_io()
Dpage-io.c67 struct page *page = bvec->bv_page; in ext4_finish_bio()
/linux-4.4.14/fs/9p/
Dvfs_addr.c55 struct bio_vec bvec = {.bv_page = page, .bv_len = PAGE_SIZE}; in v9fs_fid_readpage()
174 bvec.bv_page = page; in v9fs_vfs_writepage_locked()
/linux-4.4.14/drivers/nvdimm/
Dblk.c93 iobuf = kmap_atomic(bv.bv_page); in nd_blk_rw_integrity()
195 err = nd_blk_do_bvec(blk_dev, bip, bvec.bv_page, len, in nd_blk_make_request()
Dpmem.c78 pmem_do_bvec(pmem, bvec.bv_page, bvec.bv_len, bvec.bv_offset, in pmem_make_request()
Dbtt.c933 mem = kmap_atomic(bv.bv_page); in btt_rw_integrity()
1185 err = btt_do_bvec(btt, bip, bvec.bv_page, len, bvec.bv_offset, in btt_make_request()
/linux-4.4.14/drivers/s390/block/
Ddasd_fba.c290 if (idal_is_needed (page_address(bv.bv_page), bv.bv_len)) in dasd_fba_build_cp()
327 dst = page_address(bv.bv_page) + bv.bv_offset; in dasd_fba_build_cp()
400 dst = page_address(bv.bv_page) + bv.bv_offset; in dasd_fba_free_cp()
Dscm_blk_cluster.c184 aidaw->data_addr = (u64) page_address(bv.bv_page); in scm_prepare_cluster_request()
Dxpram.c207 kmap(bvec.bv_page) + bvec.bv_offset; in xpram_make_request()
Dscm_blk.c206 aidaw->data_addr = (u64) page_address(bv.bv_page); in scm_request_prepare()
Ddasd_diag.c555 dst = page_address(bv.bv_page) + bv.bv_offset; in dasd_diag_build_cp()
Ddasd_eckd.c2653 if (idal_is_needed (page_address(bv.bv_page), bv.bv_len)) in dasd_eckd_build_cp_cmd_single()
2724 dst = page_address(bv.bv_page) + bv.bv_offset; in dasd_eckd_build_cp_cmd_single()
2887 dst = page_address(bv.bv_page) + bv.bv_offset; in dasd_eckd_build_cp_cmd_track()
3199 dst = page_address(bv.bv_page) + bv.bv_offset; in dasd_eckd_build_cp_tpm_track()
3232 dst = page_address(bv.bv_page) + bv.bv_offset; in dasd_eckd_build_cp_tpm_track()
3452 dst = page_address(bv.bv_page) + bv.bv_offset; in dasd_raw_build_cp()
3516 dst = page_address(bv.bv_page) + bv.bv_offset; in dasd_eckd_free_cp()
Ddcssblk.c863 page_address(bvec.bv_page) + bvec.bv_offset; in dcssblk_make_request()
/linux-4.4.14/drivers/block/zram/
Dzram_drv.c177 struct page *page = bvec->bv_page; in handle_zero_page()
605 page = bvec->bv_page; in zram_bvec_read()
660 page = bvec->bv_page; in zram_bvec_write()
869 bv.bv_page = bvec.bv_page; in __zram_make_request()
958 bv.bv_page = page; in zram_rw_page()
/linux-4.4.14/drivers/block/
Dnbd.c228 void *kaddr = kmap(bvec->bv_page); in sock_send_bvec()
231 kunmap(bvec->bv_page); in sock_send_bvec()
325 void *kaddr = kmap(bvec->bv_page); in sock_recv_bvec()
328 kunmap(bvec->bv_page); in sock_recv_bvec()
Dloop.c318 ret = lo_do_transfer(lo, WRITE, page, 0, bvec.bv_page, in lo_write_transfer()
323 b.bv_page = page; in lo_write_transfer()
349 flush_dcache_page(bvec.bv_page); in lo_read_simple()
381 b.bv_page = page; in lo_read_transfer()
392 ret = lo_do_transfer(lo, READ, page, 0, bvec.bv_page, in lo_read_transfer()
397 flush_dcache_page(bvec.bv_page); in lo_read_transfer()
Dpktcdvd.c962 if (bvec[f].bv_page != pkt->pages[p]) { in pkt_make_local_copy()
963 void *vfrom = kmap_atomic(bvec[f].bv_page) + bvec[f].bv_offset; in pkt_make_local_copy()
967 bvec[f].bv_page = pkt->pages[p]; in pkt_make_local_copy()
1311 bvec[f].bv_page = pkt->pages[(f * CD_FRAMESIZE) / PAGE_SIZE]; in pkt_start_write()
1313 if (!bio_add_page(pkt->w_bio, bvec[f].bv_page, CD_FRAMESIZE, bvec[f].bv_offset)) in pkt_start_write()
Dbrd.c355 err = brd_do_bvec(brd, bvec.bv_page, len, in brd_make_request()
Dps3disk.c115 flush_kernel_dcache_page(bvec.bv_page); in ps3disk_scatter_gather()
Dps3vram.c564 char *ptr = page_address(bvec.bv_page) + bvec.bv_offset; in ps3vram_do_bio()
Dfloppy.c2377 if (page_address(bv.bv_page) + bv.bv_offset != base + size) in buffer_chain_size()
2447 buffer = page_address(bv.bv_page) + bv.bv_offset; in copy_buffer()
3809 bio_vec.bv_page = page; in __floppy_read_block_0()
Dumem.c369 vec.bv_page, in add_bio()
Drbd.c1265 flush_dcache_page(bv.bv_page); in zero_bio_chain()
/linux-4.4.14/drivers/target/
Dtarget_core_file.c267 bvec[i].bv_page = sg_page(sg); in fd_do_rw()
393 bvec[i].bv_page = sg_page(&cmd->t_data_sg[0]); in fd_execute_write_same()
/linux-4.4.14/arch/powerpc/sysdev/
Daxonram.c126 user_mem = page_address(vec.bv_page) + vec.bv_offset; in axon_ram_make_request()
/linux-4.4.14/fs/f2fs/
Dcrypto.c158 struct page *page = bv->bv_page; in completion_pages()
Ddata.c45 struct page *page = bvec->bv_page; in f2fs_read_end_io()
65 struct page *page = bvec->bv_page; in f2fs_write_end_io()
Dsegment.c1447 if (bvec->bv_page->mapping) { in is_merged_page()
1448 target = bvec->bv_page; in is_merged_page()
1454 bvec->bv_page); in is_merged_page()
/linux-4.4.14/fs/gfs2/
Dlops.c176 struct page *page = bvec->bv_page; in gfs2_end_log_write_bh()
218 page = bvec->bv_page; in gfs2_end_log_write()
/linux-4.4.14/drivers/block/aoe/
Daoecmd.c303 skb_fill_page_desc(skb, frag++, bv.bv_page, in skb_fillup()
877 page = compound_head(bv.bv_page); in bio_pageinc()
890 page = compound_head(bv.bv_page); in bio_pagedec()
1095 char *p = page_address(bv.bv_page) + bv.bv_offset; in bvcpy()
/linux-4.4.14/fs/exofs/
Dore.c414 clear_highpage(bv->bv_page); in _clear_bio()
416 zero_user(bv->bv_page, bv->bv_offset, this_count); in _clear_bio()
Dore_raid.c441 struct page *page = bv->bv_page; in _mark_read4write_pages_uptodate()
/linux-4.4.14/fs/
Dmpage.c51 struct page *page = bv->bv_page; in mpage_end_io()
Ddirect-io.c479 struct page *page = bvec->bv_page; in dio_bio_complete()
Dsplice.c1015 array[n].bv_page = buf->page; in iter_file_splice_write()
Dbuffer.c2994 zero_user(bvec->bv_page, bvec->bv_offset + bvec->bv_len, in guard_bio_eod()
/linux-4.4.14/drivers/scsi/mpt3sas/
Dmpt3sas_transport.c1966 page_address(bvec.bv_page) + bvec.bv_offset, in _transport_smp_handler()
2095 memcpy(page_address(bvec.bv_page) + in _transport_smp_handler()
2100 memcpy(page_address(bvec.bv_page) + in _transport_smp_handler()
/linux-4.4.14/drivers/block/drbd/
Ddrbd_bitmap.c949 unsigned int idx = bm_page_to_idx(bio->bi_io_vec[0].bv_page); in drbd_bm_endio()
973 mempool_free(bio->bi_io_vec[0].bv_page, drbd_md_io_page_pool); in drbd_bm_endio()
Ddrbd_main.c1557 err = _drbd_no_send_page(peer_device, bvec.bv_page, in _drbd_send_bio()
1576 err = _drbd_send_page(peer_device, bvec.bv_page, in _drbd_send_zc_bio()
Ddrbd_worker.c313 sg_set_page(&sg, bvec.bv_page, bvec.bv_len, bvec.bv_offset); in drbd_csum_bio()
Ddrbd_receiver.c1733 void *mapped = kmap(bvec.bv_page) + bvec.bv_offset; in recv_dless_read()
1736 kunmap(bvec.bv_page); in recv_dless_read()
/linux-4.4.14/drivers/staging/lustre/lustre/llite/
Dlloop.c224 pages[page_count] = bvec.bv_page; in do_bio_lustrebacked()
/linux-4.4.14/drivers/block/rsxx/
Ddma.c740 laddr, bvec.bv_page, in rsxx_dma_queue_bio()
/linux-4.4.14/kernel/power/
Dswap.c233 struct page *page = bio->bi_io_vec[0].bv_page; in hib_end_io()
/linux-4.4.14/net/ceph/
Dmessenger.c867 return bio_vec.bv_page; in ceph_msg_data_bio_next()
/linux-4.4.14/drivers/nvme/host/
Dpci.c537 pmap = kmap_atomic(bip->bip_vec->bv_page) + bip->bip_vec->bv_offset; in nvme_dif_remap()
/linux-4.4.14/Documentation/block/
Dbiodoc.txt423 struct page *bv_page;