bv                159 block/bio-integrity.c 	struct bio_vec bv;
bv                170 block/bio-integrity.c 	__bio_for_each_segment(bv, bio, bviter, *proc_iter) {
bv                171 block/bio-integrity.c 		void *kaddr = kmap_atomic(bv.bv_page);
bv                173 block/bio-integrity.c 		iter.data_buf = kaddr + bv.bv_offset;
bv                174 block/bio-integrity.c 		iter.data_size = bv.bv_len;
bv                152 block/bio.c    void bvec_free(mempool_t *pool, struct bio_vec *bv, unsigned int idx)
bv                161 block/bio.c    		mempool_free(bv, pool);
bv                165 block/bio.c    		kmem_cache_free(bvs->slab, bv);
bv                529 block/bio.c    	struct bio_vec bv;
bv                532 block/bio.c    	__bio_for_each_segment(bv, bio, iter, start) {
bv                533 block/bio.c    		char *data = bvec_kmap_irq(&bv, &flags);
bv                534 block/bio.c    		memset(data, 0, bv.bv_len);
bv                535 block/bio.c    		flush_dcache_page(bv.bv_page);
bv                553 block/bio.c    	struct bio_vec bv;
bv                564 block/bio.c    	bio_for_each_segment(bv, bio, iter) {
bv                565 block/bio.c    		if (done + bv.bv_len > new_size) {
bv                572 block/bio.c    			zero_user(bv.bv_page, offset, bv.bv_len - offset);
bv                575 block/bio.c    		done += bv.bv_len;
bv                682 block/bio.c    static inline bool page_is_mergeable(const struct bio_vec *bv,
bv                686 block/bio.c    	phys_addr_t vec_end_addr = page_to_phys(bv->bv_page) +
bv                687 block/bio.c    		bv->bv_offset + bv->bv_len - 1;
bv                692 block/bio.c    	if (xen_domain() && !xen_biovec_phys_mergeable(bv, page))
bv                705 block/bio.c    	struct bio_vec *bv = &bio->bi_io_vec[bio->bi_vcnt - 1];
bv                707 block/bio.c    	phys_addr_t addr1 = page_to_phys(bv->bv_page) + bv->bv_offset;
bv                712 block/bio.c    	if (bv->bv_len + len > queue_max_segment_size(q))
bv                807 block/bio.c    		struct bio_vec *bv = &bio->bi_io_vec[bio->bi_vcnt - 1];
bv                809 block/bio.c    		if (page_is_mergeable(bv, page, len, off, same_page)) {
bv                812 block/bio.c    			bv->bv_len += len;
bv                834 block/bio.c    	struct bio_vec *bv = &bio->bi_io_vec[bio->bi_vcnt];
bv                839 block/bio.c    	bv->bv_page = page;
bv                840 block/bio.c    	bv->bv_offset = off;
bv                841 block/bio.c    	bv->bv_len = len;
bv                892 block/bio.c    	const struct bio_vec *bv = iter->bvec;
bv                896 block/bio.c    	if (WARN_ON_ONCE(iter->iov_offset > bv->bv_len))
bv                899 block/bio.c    	len = min_t(size_t, bv->bv_len - iter->iov_offset, iter->count);
bv                900 block/bio.c    	size = bio_add_page(bio, bv->bv_page, len,
bv                901 block/bio.c    				bv->bv_offset + iter->iov_offset);
bv                924 block/bio.c    	struct bio_vec *bv = bio->bi_io_vec + bio->bi_vcnt;
bv                925 block/bio.c    	struct page **pages = (struct page **)bv;
bv                 22 block/blk-map.c 	struct bio_vec bv;
bv                 27 block/blk-map.c 	bio_for_each_bvec(bv, *bio, iter)
bv                194 block/blk-merge.c 			    const struct bio_vec *bv, unsigned *nsegs,
bv                199 block/blk-merge.c 	unsigned len = min(bv->bv_len, max_len);
bv                204 block/blk-merge.c 		seg_size = get_max_segment_size(q, bv->bv_offset + total_len);
bv                211 block/blk-merge.c 		if ((bv->bv_offset + total_len) & queue_virt_boundary(q))
bv                218 block/blk-merge.c 	return len > 0 || bv->bv_len > max_len;
bv                245 block/blk-merge.c 	struct bio_vec bv, bvprv, *bvprvp = NULL;
bv                251 block/blk-merge.c 	bio_for_each_bvec(bv, bio, iter) {
bv                256 block/blk-merge.c 		if (bvprvp && bvec_gap_to_prev(q, bvprvp, bv.bv_offset))
bv                260 block/blk-merge.c 		    sectors + (bv.bv_len >> 9) <= max_sectors &&
bv                261 block/blk-merge.c 		    bv.bv_offset + bv.bv_len <= PAGE_SIZE) {
bv                263 block/blk-merge.c 			sectors += bv.bv_len >> 9;
bv                264 block/blk-merge.c 		} else if (bvec_split_segs(q, &bv, &nsegs, &sectors, max_segs,
bv                269 block/blk-merge.c 		bvprv = bv;
bv                361 block/blk-merge.c 	struct bio_vec bv;
bv                375 block/blk-merge.c 	rq_for_each_bvec(bv, rq, iter)
bv                376 block/blk-merge.c 		bvec_split_segs(rq->q, &bv, &nr_phys_segs, &nr_sectors,
bv                432 block/blk-merge.c static inline int __blk_bvec_map_sg(struct bio_vec bv,
bv                436 block/blk-merge.c 	sg_set_page(*sg, bv.bv_page, bv.bv_len, bv.bv_offset);
bv                221 block/bounce.c 	struct bio_vec bv;
bv                265 block/bounce.c 		bio_for_each_segment(bv, bio_src, iter)
bv                266 block/bounce.c 			bio->bi_io_vec[bio->bi_vcnt++] = bv;
bv                300 drivers/block/aoe/aoecmd.c 	struct bio_vec bv;
bv                302 drivers/block/aoe/aoecmd.c 	__bio_for_each_segment(bv, bio, iter, iter)
bv                303 drivers/block/aoe/aoecmd.c 		skb_fill_page_desc(skb, frag++, bv.bv_page,
bv                304 drivers/block/aoe/aoecmd.c 				   bv.bv_offset, bv.bv_len);
bv               1026 drivers/block/aoe/aoecmd.c 	struct bio_vec bv;
bv               1030 drivers/block/aoe/aoecmd.c 	__bio_for_each_segment(bv, bio, iter, iter) {
bv               1031 drivers/block/aoe/aoecmd.c 		char *p = kmap_atomic(bv.bv_page) + bv.bv_offset;
bv               1032 drivers/block/aoe/aoecmd.c 		skb_copy_bits(skb, soff, p, bv.bv_len);
bv               1034 drivers/block/aoe/aoecmd.c 		soff += bv.bv_len;
bv               2373 drivers/block/floppy.c 	struct bio_vec bv;
bv               2381 drivers/block/floppy.c 	rq_for_each_segment(bv, current_req, iter) {
bv               2382 drivers/block/floppy.c 		if (page_address(bv.bv_page) + bv.bv_offset != base + size)
bv               2385 drivers/block/floppy.c 		size += bv.bv_len;
bv               2411 drivers/block/floppy.c 	struct bio_vec bv;
bv               2445 drivers/block/floppy.c 	rq_for_each_segment(bv, current_req, iter) {
bv               2449 drivers/block/floppy.c 		size = bv.bv_len;
bv               2452 drivers/block/floppy.c 		buffer = page_address(bv.bv_page) + bv.bv_offset;
bv                121 drivers/block/ps3disk.c 	struct bio_vec bv;
bv                124 drivers/block/ps3disk.c 	rq_for_each_segment(bv, req, iter)
bv               1352 drivers/block/rbd.c static void zero_bvec(struct bio_vec *bv)
bv               1357 drivers/block/rbd.c 	buf = bvec_kmap_irq(bv, &flags);
bv               1358 drivers/block/rbd.c 	memset(buf, 0, bv->bv_len);
bv               1359 drivers/block/rbd.c 	flush_dcache_page(bv->bv_page);
bv               1369 drivers/block/rbd.c 		zero_bvec(&bv);
bv               1379 drivers/block/rbd.c 		zero_bvec(&bv);
bv               2783 drivers/block/rbd.c 		obj_req->bvec_pos.bvecs[obj_req->bvec_idx++] = bv;
bv               2784 drivers/block/rbd.c 		obj_req->bvec_pos.iter.bi_size += bv.bv_len;
bv               2843 drivers/block/rbd.c 		obj_req->bvec_pos.bvecs[obj_req->bvec_idx++] = bv;
bv               2844 drivers/block/rbd.c 		obj_req->bvec_pos.iter.bi_size += bv.bv_len;
bv               3139 drivers/block/rbd.c 		if (memchr_inv(page_address(bv.bv_page) + bv.bv_offset, 0,
bv               3140 drivers/block/rbd.c 			       bv.bv_len))
bv               1563 drivers/block/zram/zram_drv.c 		struct bio_vec bv = bvec;
bv               1567 drivers/block/zram/zram_drv.c 			bv.bv_len = min_t(unsigned int, PAGE_SIZE - offset,
bv               1569 drivers/block/zram/zram_drv.c 			if (zram_bvec_rw(zram, &bv, index, offset,
bv               1573 drivers/block/zram/zram_drv.c 			bv.bv_offset += bv.bv_len;
bv               1574 drivers/block/zram/zram_drv.c 			unwritten -= bv.bv_len;
bv               1576 drivers/block/zram/zram_drv.c 			update_position(&index, &offset, &bv);
bv               1631 drivers/block/zram/zram_drv.c 	struct bio_vec bv;
bv               1646 drivers/block/zram/zram_drv.c 	bv.bv_page = page;
bv               1647 drivers/block/zram/zram_drv.c 	bv.bv_len = PAGE_SIZE;
bv               1648 drivers/block/zram/zram_drv.c 	bv.bv_offset = 0;
bv               1650 drivers/block/zram/zram_drv.c 	ret = zram_bvec_rw(zram, &bv, index, offset, op, NULL);
bv               2862 drivers/firmware/ti_sci.c 				      u8 proc_id, u64 *bv, u32 *cfg_flags,
bv               2902 drivers/firmware/ti_sci.c 		*bv = (resp->bootvector_low & TI_SCI_ADDR_LOW_MASK) |
bv               1867 drivers/gpu/drm/amd/amdgpu/si_dpm.c 	s64 temperature, t_slope, t_intercept, av, bv, t_ref;
bv               1877 drivers/gpu/drm/amd/amdgpu/si_dpm.c 	bv = div64_s64(drm_int2fixp(coeff->bv), 100000000);
bv               1883 drivers/gpu/drm/amd/amdgpu/si_dpm.c 	kv = drm_fixp_mul(av, drm_fixp_exp(drm_fixp_mul(bv, vddc)));
bv               1911 drivers/gpu/drm/amd/amdgpu/si_dpm.c 			  drm_fixp_exp(drm_fixp_mul(div64_s64(drm_int2fixp(coeff->bv), 100000000), vddc)));
bv                255 drivers/gpu/drm/amd/amdgpu/si_dpm.h 	u32 bv;
bv                 63 drivers/gpu/drm/i915/display/intel_tv.c 	u16 rv, gv, bv, av;
bv                191 drivers/gpu/drm/i915/display/intel_tv.c 	.rv = 0x0340, .gv = 0x030c, .bv = 0x06d0, .av = 0x0200,
bv                201 drivers/gpu/drm/i915/display/intel_tv.c 	.rv = 0x037a, .gv = 0x033d, .bv = 0x06f6, .av = 0x0200,
bv                211 drivers/gpu/drm/i915/display/intel_tv.c 	.rv = 0x035a, .gv = 0x0322, .bv = 0x06e1, .av = 0x0200,
bv                221 drivers/gpu/drm/i915/display/intel_tv.c 	.rv = 0x0399, .gv = 0x0356, .bv = 0x070a, .av = 0x0200,
bv                231 drivers/gpu/drm/i915/display/intel_tv.c 	.rv = 0x0353, .gv = 0x031c, .bv = 0x06dc, .av = 0x0200,
bv                241 drivers/gpu/drm/i915/display/intel_tv.c 	.rv = 0x0390, .gv = 0x034f, .bv = 0x0705, .av = 0x0200,
bv                251 drivers/gpu/drm/i915/display/intel_tv.c 	.rv = 0x0340, .gv = 0x030c, .bv = 0x06d0, .av = 0x0200,
bv                261 drivers/gpu/drm/i915/display/intel_tv.c 	.rv = 0x037a, .gv = 0x033d, .bv = 0x06f6, .av = 0x0200,
bv                271 drivers/gpu/drm/i915/display/intel_tv.c 	.rv = 0x0340, .gv = 0x030c, .bv = 0x06d0, .av = 0x0200,
bv                281 drivers/gpu/drm/i915/display/intel_tv.c 	.rv = 0x037a, .gv = 0x033d, .bv = 0x06f6, .av = 0x0200,
bv                294 drivers/gpu/drm/i915/display/intel_tv.c 	.rv = 0x0100, .gv = 0x03ad, .bv = 0x074d, .av = 0x0200,
bv                300 drivers/gpu/drm/i915/display/intel_tv.c 	.rv = 0x0100, .gv = 0x03d1, .bv = 0x06bc, .av = 0x0200,
bv               1412 drivers/gpu/drm/i915/display/intel_tv.c 	I915_WRITE(TV_CSC_V2, (color_conversion->bv << 16) |
bv                271 drivers/gpu/drm/panel/panel-sony-acx565akm.c 	u8 bv;
bv                273 drivers/gpu/drm/panel/panel-sony-acx565akm.c 	acx565akm_read(lcd, MIPI_DCS_GET_DISPLAY_BRIGHTNESS, &bv, 1);
bv                275 drivers/gpu/drm/panel/panel-sony-acx565akm.c 	return bv;
bv                281 drivers/gpu/drm/panel/panel-sony-acx565akm.c 	int bv;
bv                283 drivers/gpu/drm/panel/panel-sony-acx565akm.c 	bv = level | (1 << 8);
bv                284 drivers/gpu/drm/panel/panel-sony-acx565akm.c 	acx565akm_write(lcd, MIPI_DCS_SET_DISPLAY_BRIGHTNESS, (u8 *)&bv, 2);
bv                756 drivers/gpu/drm/radeon/ni_dpm.c 			  drm_fixp_exp(drm_fixp_mul(div64_s64(drm_int2fixp(coeff->bv), 1000), vddc)));
bv               4209 drivers/gpu/drm/radeon/ni_dpm.c 	ni_pi->cac_data.leakage_coefficients.bv = 2957;
bv                 80 drivers/gpu/drm/radeon/ni_dpm.h 	u32 bv;
bv               1776 drivers/gpu/drm/radeon/si_dpm.c 	s64 temperature, t_slope, t_intercept, av, bv, t_ref;
bv               1786 drivers/gpu/drm/radeon/si_dpm.c 	bv = div64_s64(drm_int2fixp(coeff->bv), 100000000);
bv               1792 drivers/gpu/drm/radeon/si_dpm.c 	kv = drm_fixp_mul(av, drm_fixp_exp(drm_fixp_mul(bv, vddc)));
bv               1820 drivers/gpu/drm/radeon/si_dpm.c 			  drm_fixp_exp(drm_fixp_mul(div64_s64(drm_int2fixp(coeff->bv), 100000000), vddc)));
bv                326 drivers/lightnvm/pblk-core.c 	struct bio_vec *bv;
bv                331 drivers/lightnvm/pblk-core.c 		bv = &bio->bi_io_vec[i];
bv                332 drivers/lightnvm/pblk-core.c 		page = bv->bv_page;
bv                333 drivers/lightnvm/pblk-core.c 		for (e = 0; e < bv->bv_len; e += PBLK_EXPOSED_PAGE_SIZE, nbv++)
bv                432 drivers/md/bcache/btree.c 		struct bio_vec *bv;
bv                436 drivers/md/bcache/btree.c 		bio_for_each_segment_all(bv, b->bio, iter_all) {
bv                437 drivers/md/bcache/btree.c 			memcpy(page_address(bv->bv_page), addr, PAGE_SIZE);
bv                111 drivers/md/bcache/debug.c 	struct bio_vec bv, cbv;
bv                129 drivers/md/bcache/debug.c 	bio_for_each_segment(bv, bio, iter) {
bv                130 drivers/md/bcache/debug.c 		void *p1 = kmap_atomic(bv.bv_page);
bv                136 drivers/md/bcache/debug.c 		cache_set_err_on(memcmp(p1 + bv.bv_offset,
bv                137 drivers/md/bcache/debug.c 					p2 + bv.bv_offset,
bv                138 drivers/md/bcache/debug.c 					bv.bv_len),
bv                145 drivers/md/bcache/debug.c 		bio_advance_iter(check, &citer, bv.bv_len);
bv                156 drivers/md/bcache/journal.h 	struct bio_vec		bv[8];
bv                 42 drivers/md/bcache/request.c 	struct bio_vec bv;
bv                 46 drivers/md/bcache/request.c 	bio_for_each_segment(bv, bio, iter) {
bv                 47 drivers/md/bcache/request.c 		void *d = kmap(bv.bv_page) + bv.bv_offset;
bv                 49 drivers/md/bcache/request.c 		csum = bch_crc64_update(csum, d, bv.bv_len);
bv                 50 drivers/md/bcache/request.c 		kunmap(bv.bv_page);
bv                234 drivers/md/bcache/util.c 	struct bio_vec *bv = bio->bi_io_vec;
bv                239 drivers/md/bcache/util.c 	bv->bv_offset = base ? offset_in_page(base) : 0;
bv                242 drivers/md/bcache/util.c 	for (; size; bio->bi_vcnt++, bv++) {
bv                243 drivers/md/bcache/util.c 		bv->bv_offset	= 0;
bv                244 drivers/md/bcache/util.c start:		bv->bv_len	= min_t(size_t, PAGE_SIZE - bv->bv_offset,
bv                247 drivers/md/bcache/util.c 			bv->bv_page = is_vmalloc_addr(base)
bv                251 drivers/md/bcache/util.c 			base += bv->bv_len;
bv                254 drivers/md/bcache/util.c 		size -= bv->bv_len;
bv                271 drivers/md/bcache/util.c 	struct bio_vec *bv;
bv                277 drivers/md/bcache/util.c 	for (i = 0, bv = bio->bi_io_vec; i < bio->bi_vcnt; bv++, i++) {
bv                278 drivers/md/bcache/util.c 		bv->bv_page = alloc_page(gfp_mask);
bv                279 drivers/md/bcache/util.c 		if (!bv->bv_page) {
bv                280 drivers/md/bcache/util.c 			while (--bv >= bio->bi_io_vec)
bv                281 drivers/md/bcache/util.c 				__free_page(bv->bv_page);
bv               1348 drivers/md/dm-crypt.c 	struct bio_vec *bv;
bv               1351 drivers/md/dm-crypt.c 	bio_for_each_segment_all(bv, clone, iter_all) {
bv               1352 drivers/md/dm-crypt.c 		BUG_ON(!bv->bv_page);
bv               1353 drivers/md/dm-crypt.c 		mempool_free(bv->bv_page, &cc->page_pool);
bv               1512 drivers/md/dm-integrity.c 		struct bio_vec bv;
bv               1535 drivers/md/dm-integrity.c 		__bio_for_each_segment(bv, bio, iter, dio->bio_details.bi_iter) {
bv               1540 drivers/md/dm-integrity.c 			mem = (char *)kmap_atomic(bv.bv_page) + bv.bv_offset;
bv               1549 drivers/md/dm-integrity.c 			} while (pos < bv.bv_len && sectors_to_process && checksums != checksums_onstack);
bv               1569 drivers/md/dm-integrity.c 			if (unlikely(pos < bv.bv_len)) {
bv               1570 drivers/md/dm-integrity.c 				bv.bv_offset += pos;
bv               1571 drivers/md/dm-integrity.c 				bv.bv_len -= pos;
bv               1654 drivers/md/dm-integrity.c 		struct bio_vec bv;
bv               1655 drivers/md/dm-integrity.c 		bio_for_each_segment(bv, bio, iter) {
bv               1656 drivers/md/dm-integrity.c 			if (unlikely(bv.bv_len & ((ic->sectors_per_block << SECTOR_SHIFT) - 1))) {
bv               1658 drivers/md/dm-integrity.c 					bv.bv_offset, bv.bv_len, ic->sectors_per_block);
bv               1706 drivers/md/dm-integrity.c 		struct bio_vec bv = bio_iovec(bio);
bv               1709 drivers/md/dm-integrity.c 		if (unlikely(bv.bv_len >> SECTOR_SHIFT > n_sectors))
bv               1710 drivers/md/dm-integrity.c 			bv.bv_len = n_sectors << SECTOR_SHIFT;
bv               1711 drivers/md/dm-integrity.c 		n_sectors -= bv.bv_len >> SECTOR_SHIFT;
bv               1712 drivers/md/dm-integrity.c 		bio_advance_iter(bio, &bio->bi_iter, bv.bv_len);
bv               1714 drivers/md/dm-integrity.c 		mem = kmap_atomic(bv.bv_page);
bv               1716 drivers/md/dm-integrity.c 			flush_dcache_page(bv.bv_page);
bv               1727 drivers/md/dm-integrity.c 					flush_dcache_page(bv.bv_page);
bv               1736 drivers/md/dm-integrity.c 				mem_ptr = mem + bv.bv_offset;
bv               1748 drivers/md/dm-integrity.c 					integrity_sector_checksum(ic, logical_sector, mem + bv.bv_offset, checksums_onstack);
bv               1786 drivers/md/dm-integrity.c 				memcpy(js, mem + bv.bv_offset, ic->sectors_per_block << SECTOR_SHIFT);
bv               1814 drivers/md/dm-integrity.c 			bv.bv_offset += ic->sectors_per_block << SECTOR_SHIFT;
bv               1815 drivers/md/dm-integrity.c 		} while (bv.bv_len -= ic->sectors_per_block << SECTOR_SHIFT);
bv               1818 drivers/md/dm-integrity.c 			flush_dcache_page(bv.bv_page);
bv                671 drivers/md/dm-log-writes.c 	struct bio_vec bv;
bv                754 drivers/md/dm-log-writes.c 	bio_for_each_segment(bv, bio, iter) {
bv                768 drivers/md/dm-log-writes.c 		src = kmap_atomic(bv.bv_page);
bv                770 drivers/md/dm-log-writes.c 		memcpy(dst, src + bv.bv_offset, bv.bv_len);
bv                774 drivers/md/dm-log-writes.c 		block->vecs[i].bv_len = bv.bv_len;
bv                378 drivers/md/dm-verity-target.c 		struct bio_vec bv = bio_iter_iovec(bio, *iter);
bv                382 drivers/md/dm-verity-target.c 		len = bv.bv_len;
bv                391 drivers/md/dm-verity-target.c 		sg_set_page(&sg, bv.bv_page, len, bv.bv_offset);
bv                424 drivers/md/dm-verity-target.c 		struct bio_vec bv = bio_iter_iovec(bio, *iter);
bv                426 drivers/md/dm-verity-target.c 		page = kmap_atomic(bv.bv_page);
bv                427 drivers/md/dm-verity-target.c 		len = bv.bv_len;
bv                432 drivers/md/dm-verity-target.c 		r = process(v, io, page + bv.bv_offset, len);
bv               1075 drivers/md/dm-writecache.c 		struct bio_vec bv = bio_iter_iovec(bio, bio->bi_iter);
bv               1076 drivers/md/dm-writecache.c 		buf = bvec_kmap_irq(&bv, &flags);
bv               1077 drivers/md/dm-writecache.c 		size = bv.bv_len;
bv                 54 drivers/md/persistent-data/dm-bitset.c 	bool bv;
bv                 57 drivers/md/persistent-data/dm-bitset.c 		r = p->fn(index * 64 + bit, &bv, p->context);
bv                 61 drivers/md/persistent-data/dm-bitset.c 		if (bv)
bv                478 drivers/media/usb/usbvision/usbvision-core.c 	unsigned char rv, gv, bv;	/* RGB components */
bv                507 drivers/media/usb/usbvision/usbvision-core.c 			YUV_TO_RGB_BY_THE_BOOK(yuyv[0], yuyv[1], yuyv[3], rv, gv, bv);
bv                513 drivers/media/usb/usbvision/usbvision-core.c 					(0xF8 &  bv);
bv                518 drivers/media/usb/usbvision/usbvision-core.c 				*f++ = bv;
bv                523 drivers/media/usb/usbvision/usbvision-core.c 				*f++ = bv;
bv                530 drivers/media/usb/usbvision/usbvision-core.c 					(0x7C & (bv << 2));
bv                541 drivers/media/usb/usbvision/usbvision-core.c 			YUV_TO_RGB_BY_THE_BOOK(yuyv[2], yuyv[1], yuyv[3], rv, gv, bv);
bv                547 drivers/media/usb/usbvision/usbvision-core.c 					(0xF8 &  bv);
bv                552 drivers/media/usb/usbvision/usbvision-core.c 				*f++ = bv;
bv                557 drivers/media/usb/usbvision/usbvision-core.c 				*f++ = bv;
bv                564 drivers/media/usb/usbvision/usbvision-core.c 					(0x7C & (bv << 2));
bv                687 drivers/media/usb/usbvision/usbvision-core.c 	unsigned char rv, gv, bv;
bv                796 drivers/media/usb/usbvision/usbvision-core.c 			YUV_TO_RGB_BY_THE_BOOK(Y[idx], U[idx / 2], V[idx / 2], rv, gv, bv);
bv                805 drivers/media/usb/usbvision/usbvision-core.c 					(0x7C & (bv << 2));
bv                811 drivers/media/usb/usbvision/usbvision-core.c 					(0xF8 & bv);
bv                816 drivers/media/usb/usbvision/usbvision-core.c 				*f++ = bv;
bv                821 drivers/media/usb/usbvision/usbvision-core.c 				*f++ = bv;
bv               4528 drivers/net/ethernet/netronome/nfp/bpf/jit.c void *nfp_bpf_relo_for_vnic(struct nfp_prog *nfp_prog, struct nfp_bpf_vnic *bv)
bv               4549 drivers/net/ethernet/netronome/nfp/bpf/jit.c 			br_add_offset(&prog[i], bv->start_off);
bv               4553 drivers/net/ethernet/netronome/nfp/bpf/jit.c 				      nfp_prog->tgt_out + bv->start_off);
bv               4557 drivers/net/ethernet/netronome/nfp/bpf/jit.c 				      nfp_prog->tgt_abort + bv->start_off);
bv               4565 drivers/net/ethernet/netronome/nfp/bpf/jit.c 			off = nfp_prog->tgt_call_push_regs + bv->start_off;
bv               4574 drivers/net/ethernet/netronome/nfp/bpf/jit.c 			off = nfp_prog->tgt_call_pop_regs + bv->start_off;
bv               4578 drivers/net/ethernet/netronome/nfp/bpf/jit.c 			br_set_offset(&prog[i], bv->tgt_done);
bv               4605 drivers/net/ethernet/netronome/nfp/bpf/jit.c 			immed_add_value(&prog[i], bv->start_off);
bv                 66 drivers/net/ethernet/netronome/nfp/bpf/main.c 	struct nfp_bpf_vnic *bv;
bv                 79 drivers/net/ethernet/netronome/nfp/bpf/main.c 	bv = kzalloc(sizeof(*bv), GFP_KERNEL);
bv                 80 drivers/net/ethernet/netronome/nfp/bpf/main.c 	if (!bv)
bv                 82 drivers/net/ethernet/netronome/nfp/bpf/main.c 	nn->app_priv = bv;
bv                 88 drivers/net/ethernet/netronome/nfp/bpf/main.c 	bv->start_off = nn_readw(nn, NFP_NET_CFG_BPF_START);
bv                 89 drivers/net/ethernet/netronome/nfp/bpf/main.c 	bv->tgt_done = nn_readw(nn, NFP_NET_CFG_BPF_DONE);
bv                 99 drivers/net/ethernet/netronome/nfp/bpf/main.c 	struct nfp_bpf_vnic *bv = nn->app_priv;
bv                101 drivers/net/ethernet/netronome/nfp/bpf/main.c 	WARN_ON(bv->tc_prog);
bv                102 drivers/net/ethernet/netronome/nfp/bpf/main.c 	kfree(bv);
bv                111 drivers/net/ethernet/netronome/nfp/bpf/main.c 	struct nfp_bpf_vnic *bv;
bv                143 drivers/net/ethernet/netronome/nfp/bpf/main.c 	bv = nn->app_priv;
bv                147 drivers/net/ethernet/netronome/nfp/bpf/main.c 	if (bv->tc_prog != oldprog) {
bv                158 drivers/net/ethernet/netronome/nfp/bpf/main.c 	bv->tc_prog = cls_bpf->prog;
bv                159 drivers/net/ethernet/netronome/nfp/bpf/main.c 	nn->port->tc_offload_cnt = !!bv->tc_prog;
bv                587 drivers/net/ethernet/netronome/nfp/bpf/main.h void *nfp_bpf_relo_for_vnic(struct nfp_prog *nfp_prog, struct nfp_bpf_vnic *bv);
bv                349 drivers/net/wireless/realtek/rtlwifi/rtl8192cu/trx.c 	bool bv;
bv                399 drivers/net/wireless/realtek/rtlwifi/rtl8192cu/trx.c 	bv = ieee80211_is_probe_resp(fc);
bv                400 drivers/net/wireless/realtek/rtlwifi/rtl8192cu/trx.c 	if (bv)
bv               2024 drivers/net/wireless/zydas/zd1211rw/zd_usb.c 		u16 bv = bit_value_template;
bv               2026 drivers/net/wireless/zydas/zd1211rw/zd_usb.c 			bv |= RF_DATA;
bv               2027 drivers/net/wireless/zydas/zd1211rw/zd_usb.c 		req->bit_values[i] = cpu_to_le16(bv);
bv                 81 drivers/nvdimm/blk.c 		struct bio_vec bv;
bv                 84 drivers/nvdimm/blk.c 		bv = bvec_iter_bvec(bip->bip_vec, bip->bip_iter);
bv                 91 drivers/nvdimm/blk.c 		cur_len = min(len, bv.bv_len);
bv                 92 drivers/nvdimm/blk.c 		iobuf = kmap_atomic(bv.bv_page);
bv                 93 drivers/nvdimm/blk.c 		err = ndbr->do_io(ndbr, dev_offset, iobuf + bv.bv_offset,
bv               1155 drivers/nvdimm/btt.c 		struct bio_vec bv;
bv               1158 drivers/nvdimm/btt.c 		bv = bvec_iter_bvec(bip->bip_vec, bip->bip_iter);
bv               1165 drivers/nvdimm/btt.c 		cur_len = min(len, bv.bv_len);
bv               1166 drivers/nvdimm/btt.c 		mem = kmap_atomic(bv.bv_page);
bv               1169 drivers/nvdimm/btt.c 					mem + bv.bv_offset, cur_len,
bv               1173 drivers/nvdimm/btt.c 					mem + bv.bv_offset, cur_len,
bv                760 drivers/nvme/host/pci.c 		struct bio_vec *bv)
bv                763 drivers/nvme/host/pci.c 	unsigned int offset = bv->bv_offset & (dev->ctrl.page_size - 1);
bv                766 drivers/nvme/host/pci.c 	iod->first_dma = dma_map_bvec(dev->dev, bv, rq_dma_dir(req), 0);
bv                769 drivers/nvme/host/pci.c 	iod->dma_len = bv->bv_len;
bv                772 drivers/nvme/host/pci.c 	if (bv->bv_len > first_prp_len)
bv                779 drivers/nvme/host/pci.c 		struct bio_vec *bv)
bv                783 drivers/nvme/host/pci.c 	iod->first_dma = dma_map_bvec(dev->dev, bv, rq_dma_dir(req), 0);
bv                786 drivers/nvme/host/pci.c 	iod->dma_len = bv->bv_len;
bv                803 drivers/nvme/host/pci.c 		struct bio_vec bv = req_bvec(req);
bv                805 drivers/nvme/host/pci.c 		if (!is_pci_p2pdma_page(bv.bv_page)) {
bv                806 drivers/nvme/host/pci.c 			if (bv.bv_offset + bv.bv_len <= dev->ctrl.page_size * 2)
bv                808 drivers/nvme/host/pci.c 							     &cmnd->rw, &bv);
bv                813 drivers/nvme/host/pci.c 							     &cmnd->rw, &bv);
bv                 83 drivers/nvme/target/io-cmd-file.c static void nvmet_file_init_bvec(struct bio_vec *bv, struct scatterlist *sg)
bv                 85 drivers/nvme/target/io-cmd-file.c 	bv->bv_page = sg_page(sg);
bv                 86 drivers/nvme/target/io-cmd-file.c 	bv->bv_offset = sg->offset;
bv                 87 drivers/nvme/target/io-cmd-file.c 	bv->bv_len = sg->length;
bv                507 drivers/s390/block/dasd_diag.c 	struct bio_vec bv;
bv                527 drivers/s390/block/dasd_diag.c 	rq_for_each_segment(bv, req, iter) {
bv                528 drivers/s390/block/dasd_diag.c 		if (bv.bv_len & (blksize - 1))
bv                531 drivers/s390/block/dasd_diag.c 		count += bv.bv_len >> (block->s2b_shift + 9);
bv                548 drivers/s390/block/dasd_diag.c 	rq_for_each_segment(bv, req, iter) {
bv                549 drivers/s390/block/dasd_diag.c 		dst = page_address(bv.bv_page) + bv.bv_offset;
bv                550 drivers/s390/block/dasd_diag.c 		for (off = 0; off < bv.bv_len; off += blksize) {
bv               3167 drivers/s390/block/dasd_eckd.c 	struct bio_vec bv;
bv               3209 drivers/s390/block/dasd_eckd.c 	rq_for_each_segment(bv, req, iter) {
bv               3210 drivers/s390/block/dasd_eckd.c 		dst = page_address(bv.bv_page) + bv.bv_offset;
bv               3211 drivers/s390/block/dasd_eckd.c 		for (off = 0; off < bv.bv_len; off += blksize) {
bv               3845 drivers/s390/block/dasd_eckd.c 	struct bio_vec bv;
bv               3866 drivers/s390/block/dasd_eckd.c 	rq_for_each_segment(bv, req, iter) {
bv               3867 drivers/s390/block/dasd_eckd.c 		if (bv.bv_len & (blksize - 1))
bv               3870 drivers/s390/block/dasd_eckd.c 		count += bv.bv_len >> (block->s2b_shift + 9);
bv               3871 drivers/s390/block/dasd_eckd.c 		if (idal_is_needed (page_address(bv.bv_page), bv.bv_len))
bv               3872 drivers/s390/block/dasd_eckd.c 			cidaw += bv.bv_len >> (block->s2b_shift + 9);
bv               3941 drivers/s390/block/dasd_eckd.c 	rq_for_each_segment(bv, req, iter) {
bv               3942 drivers/s390/block/dasd_eckd.c 		dst = page_address(bv.bv_page) + bv.bv_offset;
bv               3947 drivers/s390/block/dasd_eckd.c 				memcpy(copy + bv.bv_offset, dst, bv.bv_len);
bv               3949 drivers/s390/block/dasd_eckd.c 				dst = copy + bv.bv_offset;
bv               3951 drivers/s390/block/dasd_eckd.c 		for (off = 0; off < bv.bv_len; off += blksize) {
bv               4035 drivers/s390/block/dasd_eckd.c 	struct bio_vec bv;
bv               4108 drivers/s390/block/dasd_eckd.c 	rq_for_each_segment(bv, req, iter) {
bv               4109 drivers/s390/block/dasd_eckd.c 		dst = page_address(bv.bv_page) + bv.bv_offset;
bv               4110 drivers/s390/block/dasd_eckd.c 		seg_len = bv.bv_len;
bv               4365 drivers/s390/block/dasd_eckd.c 	struct bio_vec bv;
bv               4401 drivers/s390/block/dasd_eckd.c 	rq_for_each_segment(bv, req, iter) {
bv               4451 drivers/s390/block/dasd_eckd.c 		rq_for_each_segment(bv, req, iter) {
bv               4452 drivers/s390/block/dasd_eckd.c 			dst = page_address(bv.bv_page) + bv.bv_offset;
bv               4453 drivers/s390/block/dasd_eckd.c 			seg_len = bv.bv_len;
bv               4484 drivers/s390/block/dasd_eckd.c 		rq_for_each_segment(bv, req, iter) {
bv               4485 drivers/s390/block/dasd_eckd.c 			dst = page_address(bv.bv_page) + bv.bv_offset;
bv               4487 drivers/s390/block/dasd_eckd.c 						    dst, bv.bv_len);
bv               4621 drivers/s390/block/dasd_eckd.c 	struct bio_vec bv;
bv               4720 drivers/s390/block/dasd_eckd.c 	rq_for_each_segment(bv, req, iter) {
bv               4721 drivers/s390/block/dasd_eckd.c 		dst = page_address(bv.bv_page) + bv.bv_offset;
bv               4722 drivers/s390/block/dasd_eckd.c 		seg_len = bv.bv_len;
bv               4764 drivers/s390/block/dasd_eckd.c 	struct bio_vec bv;
bv               4781 drivers/s390/block/dasd_eckd.c 	rq_for_each_segment(bv, req, iter) {
bv               4782 drivers/s390/block/dasd_eckd.c 		dst = page_address(bv.bv_page) + bv.bv_offset;
bv               4783 drivers/s390/block/dasd_eckd.c 		for (off = 0; off < bv.bv_len; off += blksize) {
bv               4794 drivers/s390/block/dasd_eckd.c 						memcpy(dst, cda, bv.bv_len);
bv                448 drivers/s390/block/dasd_fba.c 	struct bio_vec bv;
bv                469 drivers/s390/block/dasd_fba.c 	rq_for_each_segment(bv, req, iter) {
bv                470 drivers/s390/block/dasd_fba.c 		if (bv.bv_len & (blksize - 1))
bv                473 drivers/s390/block/dasd_fba.c 		count += bv.bv_len >> (block->s2b_shift + 9);
bv                474 drivers/s390/block/dasd_fba.c 		if (idal_is_needed (page_address(bv.bv_page), bv.bv_len))
bv                475 drivers/s390/block/dasd_fba.c 			cidaw += bv.bv_len / blksize;
bv                511 drivers/s390/block/dasd_fba.c 	rq_for_each_segment(bv, req, iter) {
bv                512 drivers/s390/block/dasd_fba.c 		dst = page_address(bv.bv_page) + bv.bv_offset;
bv                517 drivers/s390/block/dasd_fba.c 				memcpy(copy + bv.bv_offset, dst, bv.bv_len);
bv                519 drivers/s390/block/dasd_fba.c 				dst = copy + bv.bv_offset;
bv                521 drivers/s390/block/dasd_fba.c 		for (off = 0; off < bv.bv_len; off += blksize) {
bv                580 drivers/s390/block/dasd_fba.c 	struct bio_vec bv;
bv                593 drivers/s390/block/dasd_fba.c 	rq_for_each_segment(bv, req, iter) {
bv                594 drivers/s390/block/dasd_fba.c 		dst = page_address(bv.bv_page) + bv.bv_offset;
bv                595 drivers/s390/block/dasd_fba.c 		for (off = 0; off < bv.bv_len; off += blksize) {
bv                606 drivers/s390/block/dasd_fba.c 						memcpy(dst, cda, bv.bv_len);
bv                188 drivers/s390/block/scm_blk.c 	struct bio_vec bv;
bv                201 drivers/s390/block/scm_blk.c 	rq_for_each_segment(bv, req, iter) {
bv                202 drivers/s390/block/scm_blk.c 		WARN_ON(bv.bv_offset);
bv                203 drivers/s390/block/scm_blk.c 		msb->blk_count += bv.bv_len >> 12;
bv                204 drivers/s390/block/scm_blk.c 		aidaw->data_addr = (u64) page_address(bv.bv_page);
bv                408 drivers/s390/net/ism_drv.c 	unsigned long *bv;
bv                410 drivers/s390/net/ism_drv.c 	bv = (void *) &ism->sba->dmb_bits[ISM_DMB_WORD_OFFSET];
bv                417 drivers/s390/net/ism_drv.c 		bit = find_next_bit_inv(bv, end, bit);
bv                421 drivers/s390/net/ism_drv.c 		clear_bit_inv(bit, bv);
bv               2467 drivers/usb/serial/io_ti.c 	int bv = 0;	/* Off */
bv               2470 drivers/usb/serial/io_ti.c 		bv = 1;	/* On */
bv               2471 drivers/usb/serial/io_ti.c 	status = ti_do_config(edge_port, UMPC_SET_CLR_BREAK, bv);
bv                759 drivers/video/fbdev/matrox/matroxfb_maven.c 	unsigned int a, bv, c;
bv                922 drivers/video/fbdev/matrox/matroxfb_maven.c 	DAC1064_calcclock(mt->pixclock, 450000, &a, &bv, &c);
bv                924 drivers/video/fbdev/matrox/matroxfb_maven.c 	m->regs[0x81] = bv;
bv                316 drivers/video/fbdev/omap2/omapfb/displays/panel-sony-acx565akm.c 	int bv;
bv                318 drivers/video/fbdev/omap2/omapfb/displays/panel-sony-acx565akm.c 	bv = level | (1 << 8);
bv                319 drivers/video/fbdev/omap2/omapfb/displays/panel-sony-acx565akm.c 	acx565akm_write(ddata, MIPID_CMD_WRITE_DISP_BRIGHTNESS, (u8 *)&bv, 2);
bv                329 drivers/video/fbdev/omap2/omapfb/displays/panel-sony-acx565akm.c 	u8 bv;
bv                331 drivers/video/fbdev/omap2/omapfb/displays/panel-sony-acx565akm.c 	acx565akm_read(ddata, MIPID_CMD_READ_DISP_BRIGHTNESS, &bv, 1);
bv                333 drivers/video/fbdev/omap2/omapfb/displays/panel-sony-acx565akm.c 	return bv;
bv                283 fs/afs/rxrpc.c 			  struct bio_vec *bv, pgoff_t first, pgoff_t last,
bv                300 fs/afs/rxrpc.c 		bv[i].bv_page = pages[i];
bv                301 fs/afs/rxrpc.c 		bv[i].bv_len = to - offset;
bv                302 fs/afs/rxrpc.c 		bv[i].bv_offset = offset;
bv                307 fs/afs/rxrpc.c 	iov_iter_bvec(&msg->msg_iter, WRITE, bv, nr, bytes);
bv                327 fs/afs/rxrpc.c 	struct bio_vec bv[AFS_BVEC_MAX];
bv                336 fs/afs/rxrpc.c 		afs_load_bvec(call, msg, bv, first, last, offset);
bv                346 fs/afs/rxrpc.c 			put_page(bv[loop].bv_page);
bv                105 fs/ceph/file.c 			struct bio_vec bv = {
bv                111 fs/ceph/file.c 			bvecs[bvec_idx] = bv;
bv                112 fs/ceph/file.c 			bytes -= bv.bv_len;
bv                131 fs/ceph/file.c 	struct bio_vec *bv;
bv                144 fs/ceph/file.c 	bv = kvmalloc_array(npages, sizeof(*bv), GFP_KERNEL | __GFP_ZERO);
bv                145 fs/ceph/file.c 	if (!bv)
bv                148 fs/ceph/file.c 	bytes = __iter_get_bvecs(iter, maxsize, bv);
bv                153 fs/ceph/file.c 		kvfree(bv);
bv                157 fs/ceph/file.c 	*bvecs = bv;
bv                434 fs/cifs/cifs_debug.c 	bool bv;
bv                441 fs/cifs/cifs_debug.c 	rc = kstrtobool_from_user(buffer, count, &bv);
bv                729 fs/cifs/cifs_debug.c 	bool bv;
bv                735 fs/cifs/cifs_debug.c 	if (strtobool(c, &bv) == 0)
bv                736 fs/cifs/cifs_debug.c 		cifsFYI = bv;
bv                891 fs/cifs/cifs_debug.c 	bool bv;
bv                903 fs/cifs/cifs_debug.c 		if (strtobool(flags_string, &bv) == 0) {
bv                904 fs/cifs/cifs_debug.c 			global_secflags = bv ? CIFSSEC_MAX : CIFSSEC_DEF;
bv               1290 fs/cifs/cifsglob.h 	struct bio_vec		*bv;
bv                859 fs/cifs/connect.c 	struct bio_vec bv = {
bv                861 fs/cifs/connect.c 	iov_iter_bvec(&smb_msg.msg_iter, READ, &bv, 1, to_read);
bv                822 fs/cifs/misc.c 	if (ctx->bv) {
bv                827 fs/cifs/misc.c 				set_page_dirty(ctx->bv[i].bv_page);
bv                828 fs/cifs/misc.c 			put_page(ctx->bv[i].bv_page);
bv                830 fs/cifs/misc.c 		kvfree(ctx->bv);
bv                851 fs/cifs/misc.c 	struct bio_vec *bv = NULL;
bv                861 fs/cifs/misc.c 		bv = kmalloc_array(max_pages, sizeof(struct bio_vec),
bv                864 fs/cifs/misc.c 	if (!bv) {
bv                865 fs/cifs/misc.c 		bv = vmalloc(array_size(max_pages, sizeof(struct bio_vec)));
bv                866 fs/cifs/misc.c 		if (!bv)
bv                877 fs/cifs/misc.c 			kvfree(bv);
bv                910 fs/cifs/misc.c 			bv[npages + i].bv_page = pages[i];
bv                911 fs/cifs/misc.c 			bv[npages + i].bv_offset = start;
bv                912 fs/cifs/misc.c 			bv[npages + i].bv_len = len - start;
bv                921 fs/cifs/misc.c 	ctx->bv = bv;
bv                924 fs/cifs/misc.c 	iov_iter_bvec(&ctx->iter, rw, ctx->bv, npages, ctx->len);
bv                 31 fs/crypto/bio.c 	struct bio_vec *bv;
bv                 34 fs/crypto/bio.c 	bio_for_each_segment_all(bv, bio, iter_all) {
bv                 35 fs/crypto/bio.c 		struct page *page = bv->bv_page;
bv                 36 fs/crypto/bio.c 		int ret = fscrypt_decrypt_pagecache_blocks(page, bv->bv_len,
bv                 37 fs/crypto/bio.c 							   bv->bv_offset);
bv                837 fs/debugfs/file.c 	bool bv;
bv                842 fs/debugfs/file.c 	r = kstrtobool_from_user(user_buf, count, &bv);
bv                847 fs/debugfs/file.c 		*val = bv;
bv                 73 fs/ext4/readpage.c 	struct bio_vec *bv;
bv                 76 fs/ext4/readpage.c 	bio_for_each_segment_all(bv, bio, iter_all) {
bv                 77 fs/ext4/readpage.c 		page = bv->bv_page;
bv                 90 fs/f2fs/data.c 	struct bio_vec *bv;
bv                 93 fs/f2fs/data.c 	bio_for_each_segment_all(bv, bio, iter_all) {
bv                 94 fs/f2fs/data.c 		page = bv->bv_page;
bv                 49 fs/mpage.c     	struct bio_vec *bv;
bv                 52 fs/mpage.c     	bio_for_each_segment_all(bv, bio, iter_all) {
bv                 53 fs/mpage.c     		struct page *page = bv->bv_page;
bv                 24 fs/orangefs/inode.c 	struct bio_vec bv;
bv                 51 fs/orangefs/inode.c 	bv.bv_page = page;
bv                 52 fs/orangefs/inode.c 	bv.bv_len = wlen;
bv                 53 fs/orangefs/inode.c 	bv.bv_offset = off % PAGE_SIZE;
bv                 55 fs/orangefs/inode.c 	iov_iter_bvec(&iter, WRITE, &bv, 1, wlen);
bv                 91 fs/orangefs/inode.c 	struct bio_vec *bv;
bv                109 fs/orangefs/inode.c 		ow->bv[i].bv_page = ow->pages[i];
bv                110 fs/orangefs/inode.c 		ow->bv[i].bv_len = min(page_offset(ow->pages[i]) + PAGE_SIZE,
bv                114 fs/orangefs/inode.c 			ow->bv[i].bv_offset = ow->off -
bv                117 fs/orangefs/inode.c 			ow->bv[i].bv_offset = 0;
bv                119 fs/orangefs/inode.c 	iov_iter_bvec(&iter, WRITE, ow->bv, ow->npages, ow->len);
bv                233 fs/orangefs/inode.c 	ow->bv = kcalloc(ow->maxpages, sizeof(struct bio_vec), GFP_KERNEL);
bv                234 fs/orangefs/inode.c 	if (!ow->bv) {
bv                245 fs/orangefs/inode.c 	kfree(ow->bv);
bv                256 fs/orangefs/inode.c 	struct bio_vec bv;
bv                308 fs/orangefs/inode.c 	bv.bv_page = page;
bv                309 fs/orangefs/inode.c 	bv.bv_len = PAGE_SIZE;
bv                310 fs/orangefs/inode.c 	bv.bv_offset = 0;
bv                311 fs/orangefs/inode.c 	iov_iter_bvec(&iter, READ, &bv, 1, PAGE_SIZE);
bv                226 fs/verity/verify.c 	struct bio_vec *bv;
bv                231 fs/verity/verify.c 		bio_for_each_segment_all(bv, bio, iter_all)
bv                232 fs/verity/verify.c 			SetPageError(bv->bv_page);
bv                236 fs/verity/verify.c 	bio_for_each_segment_all(bv, bio, iter_all) {
bv                237 fs/verity/verify.c 		struct page *page = bv->bv_page;
bv                177 include/linux/bio.h 	struct bio_vec bv;
bv                196 include/linux/bio.h 	bio_for_each_segment(bv, bio, iter)
bv                247 include/linux/bio.h static inline void bio_get_first_bvec(struct bio *bio, struct bio_vec *bv)
bv                249 include/linux/bio.h 	*bv = bio_iovec(bio);
bv                252 include/linux/bio.h static inline void bio_get_last_bvec(struct bio *bio, struct bio_vec *bv)
bv                258 include/linux/bio.h 		*bv = bio_iovec(bio);
bv                269 include/linux/bio.h 	*bv = bio->bi_io_vec[idx];
bv                276 include/linux/bio.h 		bv->bv_len = iter.bi_bvec_done;
bv                680 include/linux/blkdev.h #define dma_map_bvec(dev, bv, dir, attrs) \
bv                681 include/linux/blkdev.h 	dma_map_page_attrs(dev, (bv)->bv_page, (bv)->bv_offset, (bv)->bv_len, \
bv                 36 include/linux/bvec.h 	struct bio_vec	bv;
bv                 87 include/linux/bvec.h static inline bool bvec_iter_advance(const struct bio_vec *bv,
bv                 97 include/linux/bvec.h 		const struct bio_vec *cur = bv + iter->bi_idx;
bv                133 include/linux/bvec.h 	return &iter_all->bv;
bv                139 include/linux/bvec.h 	struct bio_vec *bv = &iter_all->bv;
bv                142 include/linux/bvec.h 		bv->bv_page++;
bv                143 include/linux/bvec.h 		bv->bv_offset = 0;
bv                145 include/linux/bvec.h 		bv->bv_page = bvec->bv_page + (bvec->bv_offset >> PAGE_SHIFT);
bv                146 include/linux/bvec.h 		bv->bv_offset = bvec->bv_offset & ~PAGE_MASK;
bv                148 include/linux/bvec.h 	bv->bv_len = min_t(unsigned int, PAGE_SIZE - bv->bv_offset,
bv                150 include/linux/bvec.h 	iter_all->done += bv->bv_len;
bv                120 include/linux/ceph/messenger.h 		struct bio_vec bv;					      \
bv                125 include/linux/ceph/messenger.h 		__bio_for_each_segment(bv, (it)->bio, __cur_iter, __cur_iter) \
bv                153 include/linux/ceph/messenger.h 		struct bio_vec bv;					      \
bv                158 include/linux/ceph/messenger.h 		for_each_bvec(bv, (it)->bvecs, __cur_iter, __cur_iter)	      \
bv                289 mm/page_io.c   		struct bio_vec bv = {
bv                296 mm/page_io.c   		iov_iter_bvec(&from, WRITE, &bv, 1, PAGE_SIZE);
bv                833 net/ceph/messenger.c 	struct bio_vec bv = bio_iter_iovec(cursor->bio_iter.bio,
bv                836 net/ceph/messenger.c 	*page_offset = bv.bv_offset;
bv                837 net/ceph/messenger.c 	*length = bv.bv_len;
bv                838 net/ceph/messenger.c 	return bv.bv_page;
bv                894 net/ceph/messenger.c 	struct bio_vec bv = bvec_iter_bvec(cursor->data->bvec_pos.bvecs,
bv                897 net/ceph/messenger.c 	*page_offset = bv.bv_offset;
bv                898 net/ceph/messenger.c 	*length = bv.bv_len;
bv                899 net/ceph/messenger.c 	return bv.bv_page;
bv                388 net/sunrpc/xprtsock.c 	struct bio_vec bv;
bv                391 net/sunrpc/xprtsock.c 	for_each_bvec(bv, bvec, bi, bi)
bv                392 net/sunrpc/xprtsock.c 		flush_dcache_page(bv.bv_page);
bv                203 scripts/sortextable.c 	int32_t bv = (int32_t)r(b);
bv                205 scripts/sortextable.c 	if (av < bv)
bv                207 scripts/sortextable.c 	if (av > bv)
bv                 81 scripts/sortextable.h 	Elf_Addr bv = _r(b);
bv                 83 scripts/sortextable.h 	if (av < bv)
bv                 85 scripts/sortextable.h 	if (av > bv)