rbi                52 drivers/hv/ring_buffer.c 	struct hv_ring_buffer_info *rbi = &channel->outbound;
rbi                55 drivers/hv/ring_buffer.c 	if (READ_ONCE(rbi->ring_buffer->interrupt_mask))
rbi                64 drivers/hv/ring_buffer.c 	if (old_write == READ_ONCE(rbi->ring_buffer->read_index)) {
rbi               140 drivers/hv/ring_buffer.c hv_get_ringbuffer_availbytes(const struct hv_ring_buffer_info *rbi,
rbi               146 drivers/hv/ring_buffer.c 	read_loc = READ_ONCE(rbi->ring_buffer->read_index);
rbi               147 drivers/hv/ring_buffer.c 	write_loc = READ_ONCE(rbi->ring_buffer->write_index);
rbi               148 drivers/hv/ring_buffer.c 	dsize = rbi->ring_datasize;
rbi               378 drivers/hv/ring_buffer.c static u32 hv_pkt_iter_avail(const struct hv_ring_buffer_info *rbi)
rbi               380 drivers/hv/ring_buffer.c 	u32 priv_read_loc = rbi->priv_read_index;
rbi               381 drivers/hv/ring_buffer.c 	u32 write_loc = READ_ONCE(rbi->ring_buffer->write_index);
rbi               386 drivers/hv/ring_buffer.c 		return (rbi->ring_datasize - priv_read_loc) + write_loc;
rbi               396 drivers/hv/ring_buffer.c 	struct hv_ring_buffer_info *rbi = &channel->inbound;
rbi               399 drivers/hv/ring_buffer.c 	if (hv_pkt_iter_avail(rbi) < sizeof(struct vmpacket_descriptor))
rbi               402 drivers/hv/ring_buffer.c 	desc = hv_get_ring_buffer(rbi) + rbi->priv_read_index;
rbi               420 drivers/hv/ring_buffer.c 	struct hv_ring_buffer_info *rbi = &channel->inbound;
rbi               422 drivers/hv/ring_buffer.c 	u32 dsize = rbi->ring_datasize;
rbi               425 drivers/hv/ring_buffer.c 	rbi->priv_read_index += packetlen + VMBUS_PKT_TRAILER;
rbi               426 drivers/hv/ring_buffer.c 	if (rbi->priv_read_index >= dsize)
rbi               427 drivers/hv/ring_buffer.c 		rbi->priv_read_index -= dsize;
rbi               435 drivers/hv/ring_buffer.c static u32 hv_pkt_iter_bytes_read(const struct hv_ring_buffer_info *rbi,
rbi               438 drivers/hv/ring_buffer.c 	if (rbi->priv_read_index >= start_read_index)
rbi               439 drivers/hv/ring_buffer.c 		return rbi->priv_read_index - start_read_index;
rbi               441 drivers/hv/ring_buffer.c 		return rbi->ring_datasize - start_read_index +
rbi               442 drivers/hv/ring_buffer.c 			rbi->priv_read_index;
rbi               467 drivers/hv/ring_buffer.c 	struct hv_ring_buffer_info *rbi = &channel->inbound;
rbi               476 drivers/hv/ring_buffer.c 	start_read_index = rbi->ring_buffer->read_index;
rbi               477 drivers/hv/ring_buffer.c 	rbi->ring_buffer->read_index = rbi->priv_read_index;
rbi               484 drivers/hv/ring_buffer.c 	if (!rbi->ring_buffer->feature_bits.feat_pending_send_sz)
rbi               503 drivers/hv/ring_buffer.c 	pending_sz = READ_ONCE(rbi->ring_buffer->pending_send_sz);
rbi               512 drivers/hv/ring_buffer.c 	curr_write_sz = hv_get_bytes_to_write(rbi);
rbi               513 drivers/hv/ring_buffer.c 	bytes_read = hv_pkt_iter_bytes_read(rbi, start_read_index);
rbi              1561 drivers/hv/vmbus_drv.c 	struct hv_ring_buffer_info *rbi = &channel->outbound;
rbi              1564 drivers/hv/vmbus_drv.c 	mutex_lock(&rbi->ring_buffer_mutex);
rbi              1565 drivers/hv/vmbus_drv.c 	if (!rbi->ring_buffer) {
rbi              1566 drivers/hv/vmbus_drv.c 		mutex_unlock(&rbi->ring_buffer_mutex);
rbi              1570 drivers/hv/vmbus_drv.c 	ret = sprintf(buf, "%u\n", rbi->ring_buffer->interrupt_mask);
rbi              1571 drivers/hv/vmbus_drv.c 	mutex_unlock(&rbi->ring_buffer_mutex);
rbi              1578 drivers/hv/vmbus_drv.c 	struct hv_ring_buffer_info *rbi = &channel->inbound;
rbi              1581 drivers/hv/vmbus_drv.c 	mutex_lock(&rbi->ring_buffer_mutex);
rbi              1582 drivers/hv/vmbus_drv.c 	if (!rbi->ring_buffer) {
rbi              1583 drivers/hv/vmbus_drv.c 		mutex_unlock(&rbi->ring_buffer_mutex);
rbi              1587 drivers/hv/vmbus_drv.c 	ret = sprintf(buf, "%u\n", rbi->ring_buffer->interrupt_mask);
rbi              1588 drivers/hv/vmbus_drv.c 	mutex_unlock(&rbi->ring_buffer_mutex);
rbi              1595 drivers/hv/vmbus_drv.c 	struct hv_ring_buffer_info *rbi = &channel->inbound;
rbi              1598 drivers/hv/vmbus_drv.c 	mutex_lock(&rbi->ring_buffer_mutex);
rbi              1599 drivers/hv/vmbus_drv.c 	if (!rbi->ring_buffer) {
rbi              1600 drivers/hv/vmbus_drv.c 		mutex_unlock(&rbi->ring_buffer_mutex);
rbi              1604 drivers/hv/vmbus_drv.c 	ret = sprintf(buf, "%u\n", hv_get_bytes_to_read(rbi));
rbi              1605 drivers/hv/vmbus_drv.c 	mutex_unlock(&rbi->ring_buffer_mutex);
rbi              1612 drivers/hv/vmbus_drv.c 	struct hv_ring_buffer_info *rbi = &channel->outbound;
rbi              1615 drivers/hv/vmbus_drv.c 	mutex_lock(&rbi->ring_buffer_mutex);
rbi              1616 drivers/hv/vmbus_drv.c 	if (!rbi->ring_buffer) {
rbi              1617 drivers/hv/vmbus_drv.c 		mutex_unlock(&rbi->ring_buffer_mutex);
rbi              1621 drivers/hv/vmbus_drv.c 	ret = sprintf(buf, "%u\n", hv_get_bytes_to_write(rbi));
rbi              1622 drivers/hv/vmbus_drv.c 	mutex_unlock(&rbi->ring_buffer_mutex);
rbi               997 drivers/md/raid5.c 		struct bio *bi, *rbi;
rbi              1020 drivers/md/raid5.c 		rbi = &sh->dev[i].rreq; /* For writing to replacement */
rbi              1163 drivers/md/raid5.c 			bio_set_dev(rbi, rrdev->bdev);
rbi              1164 drivers/md/raid5.c 			bio_set_op_attrs(rbi, op, op_flags);
rbi              1166 drivers/md/raid5.c 			rbi->bi_end_io = raid5_end_write_request;
rbi              1167 drivers/md/raid5.c 			rbi->bi_private = sh;
rbi              1172 drivers/md/raid5.c 				rbi->bi_opf, i);
rbi              1177 drivers/md/raid5.c 				rbi->bi_iter.bi_sector = (sh->sector
rbi              1180 drivers/md/raid5.c 				rbi->bi_iter.bi_sector = (sh->sector
rbi              1185 drivers/md/raid5.c 			rbi->bi_vcnt = 1;
rbi              1186 drivers/md/raid5.c 			rbi->bi_io_vec[0].bv_len = STRIPE_SIZE;
rbi              1187 drivers/md/raid5.c 			rbi->bi_io_vec[0].bv_offset = 0;
rbi              1188 drivers/md/raid5.c 			rbi->bi_iter.bi_size = STRIPE_SIZE;
rbi              1189 drivers/md/raid5.c 			rbi->bi_write_hint = sh->dev[i].write_hint;
rbi              1196 drivers/md/raid5.c 				rbi->bi_vcnt = 0;
rbi              1198 drivers/md/raid5.c 				trace_block_bio_remap(rbi->bi_disk->queue,
rbi              1199 drivers/md/raid5.c 						      rbi, disk_devt(conf->mddev->gendisk),
rbi              1202 drivers/md/raid5.c 				bio_list_add(&pending_bios, rbi);
rbi              1204 drivers/md/raid5.c 				generic_make_request(rbi);
rbi              1309 drivers/md/raid5.c 			struct bio *rbi, *rbi2;
rbi              1312 drivers/md/raid5.c 			rbi = dev->read;
rbi              1314 drivers/md/raid5.c 			while (rbi && rbi->bi_iter.bi_sector <
rbi              1316 drivers/md/raid5.c 				rbi2 = r5_next_bio(rbi, dev->sector);
rbi              1317 drivers/md/raid5.c 				bio_endio(rbi);
rbi              1318 drivers/md/raid5.c 				rbi = rbi2;
rbi              1341 drivers/md/raid5.c 			struct bio *rbi;
rbi              1343 drivers/md/raid5.c 			dev->read = rbi = dev->toread;
rbi              1346 drivers/md/raid5.c 			while (rbi && rbi->bi_iter.bi_sector <
rbi              1348 drivers/md/raid5.c 				tx = async_copy_data(0, rbi, &dev->page,
rbi              1350 drivers/md/raid5.c 				rbi = r5_next_bio(rbi, dev->sector);
rbi              1344 drivers/net/hyperv/netvsc.c 	struct hv_ring_buffer_info *rbi = &channel->inbound;
rbi              1347 drivers/net/hyperv/netvsc.c 	prefetch(hv_get_ring_buffer(rbi) + rbi->priv_read_index);
rbi              1351 drivers/net/hyperv/netvsc.c 		hv_begin_read(rbi);
rbi               575 drivers/net/vmxnet3/vmxnet3_drv.c 		struct vmxnet3_rx_buf_info *rbi;
rbi               578 drivers/net/vmxnet3/vmxnet3_drv.c 		rbi = rbi_base + ring->next2fill;
rbi               581 drivers/net/vmxnet3/vmxnet3_drv.c 		if (rbi->buf_type == VMXNET3_RX_BUF_SKB) {
rbi               582 drivers/net/vmxnet3/vmxnet3_drv.c 			if (rbi->skb == NULL) {
rbi               583 drivers/net/vmxnet3/vmxnet3_drv.c 				rbi->skb = __netdev_alloc_skb_ip_align(adapter->netdev,
rbi               584 drivers/net/vmxnet3/vmxnet3_drv.c 								       rbi->len,
rbi               586 drivers/net/vmxnet3/vmxnet3_drv.c 				if (unlikely(rbi->skb == NULL)) {
rbi               591 drivers/net/vmxnet3/vmxnet3_drv.c 				rbi->dma_addr = dma_map_single(
rbi               593 drivers/net/vmxnet3/vmxnet3_drv.c 						rbi->skb->data, rbi->len,
rbi               596 drivers/net/vmxnet3/vmxnet3_drv.c 						      rbi->dma_addr)) {
rbi               597 drivers/net/vmxnet3/vmxnet3_drv.c 					dev_kfree_skb_any(rbi->skb);
rbi               606 drivers/net/vmxnet3/vmxnet3_drv.c 			BUG_ON(rbi->buf_type != VMXNET3_RX_BUF_PAGE ||
rbi               607 drivers/net/vmxnet3/vmxnet3_drv.c 			       rbi->len  != PAGE_SIZE);
rbi               609 drivers/net/vmxnet3/vmxnet3_drv.c 			if (rbi->page == NULL) {
rbi               610 drivers/net/vmxnet3/vmxnet3_drv.c 				rbi->page = alloc_page(GFP_ATOMIC);
rbi               611 drivers/net/vmxnet3/vmxnet3_drv.c 				if (unlikely(rbi->page == NULL)) {
rbi               615 drivers/net/vmxnet3/vmxnet3_drv.c 				rbi->dma_addr = dma_map_page(
rbi               617 drivers/net/vmxnet3/vmxnet3_drv.c 						rbi->page, 0, PAGE_SIZE,
rbi               620 drivers/net/vmxnet3/vmxnet3_drv.c 						      rbi->dma_addr)) {
rbi               621 drivers/net/vmxnet3/vmxnet3_drv.c 					put_page(rbi->page);
rbi               631 drivers/net/vmxnet3/vmxnet3_drv.c 		gd->rxd.addr = cpu_to_le64(rbi->dma_addr);
rbi               633 drivers/net/vmxnet3/vmxnet3_drv.c 					   | val | rbi->len);
rbi               658 drivers/net/vmxnet3/vmxnet3_drv.c 		    struct vmxnet3_rx_buf_info *rbi)
rbi               664 drivers/net/vmxnet3/vmxnet3_drv.c 	__skb_frag_set_page(frag, rbi->page);
rbi              1296 drivers/net/vmxnet3/vmxnet3_drv.c 		struct vmxnet3_rx_buf_info *rbi;
rbi              1323 drivers/net/vmxnet3/vmxnet3_drv.c 		rbi = rq->buf_info[ring_idx] + idx;
rbi              1325 drivers/net/vmxnet3/vmxnet3_drv.c 		BUG_ON(rxd->addr != rbi->dma_addr ||
rbi              1326 drivers/net/vmxnet3/vmxnet3_drv.c 		       rxd->len != rbi->len);
rbi              1341 drivers/net/vmxnet3/vmxnet3_drv.c 			BUG_ON(rbi->buf_type != VMXNET3_RX_BUF_SKB);
rbi              1342 drivers/net/vmxnet3/vmxnet3_drv.c 			BUG_ON(ctx->skb != NULL || rbi->skb == NULL);
rbi              1354 drivers/net/vmxnet3/vmxnet3_drv.c 			ctx->skb = rbi->skb;
rbi              1358 drivers/net/vmxnet3/vmxnet3_drv.c 			len = rxDataRingUsed ? rcd->len : rbi->len;
rbi              1382 drivers/net/vmxnet3/vmxnet3_drv.c 				ctx->skb = rbi->skb;
rbi              1386 drivers/net/vmxnet3/vmxnet3_drv.c 						       new_skb->data, rbi->len,
rbi              1403 drivers/net/vmxnet3/vmxnet3_drv.c 						 rbi->dma_addr,
rbi              1404 drivers/net/vmxnet3/vmxnet3_drv.c 						 rbi->len,
rbi              1408 drivers/net/vmxnet3/vmxnet3_drv.c 				rbi->skb = new_skb;
rbi              1409 drivers/net/vmxnet3/vmxnet3_drv.c 				rbi->dma_addr = new_dma_addr;
rbi              1410 drivers/net/vmxnet3/vmxnet3_drv.c 				rxd->addr = cpu_to_le64(rbi->dma_addr);
rbi              1411 drivers/net/vmxnet3/vmxnet3_drv.c 				rxd->len = rbi->len;
rbi              1440 drivers/net/vmxnet3/vmxnet3_drv.c 			BUG_ON(rbi->buf_type != VMXNET3_RX_BUF_PAGE);
rbi              1478 drivers/net/vmxnet3/vmxnet3_drv.c 					       rbi->dma_addr, rbi->len,
rbi              1481 drivers/net/vmxnet3/vmxnet3_drv.c 				vmxnet3_append_frag(ctx->skb, rcd, rbi);
rbi              1484 drivers/net/vmxnet3/vmxnet3_drv.c 				rbi->page = new_page;
rbi              1485 drivers/net/vmxnet3/vmxnet3_drv.c 				rbi->dma_addr = new_dma_addr;
rbi              1486 drivers/net/vmxnet3/vmxnet3_drv.c 				rxd->addr = cpu_to_le64(rbi->dma_addr);
rbi              1487 drivers/net/vmxnet3/vmxnet3_drv.c 				rxd->len = rbi->len;
rbi               139 include/linux/hyperv.h static inline u32 hv_get_bytes_to_read(const struct hv_ring_buffer_info *rbi)
rbi               143 include/linux/hyperv.h 	dsize = rbi->ring_datasize;
rbi               144 include/linux/hyperv.h 	read_loc = rbi->ring_buffer->read_index;
rbi               145 include/linux/hyperv.h 	write_loc = READ_ONCE(rbi->ring_buffer->write_index);
rbi               153 include/linux/hyperv.h static inline u32 hv_get_bytes_to_write(const struct hv_ring_buffer_info *rbi)
rbi               157 include/linux/hyperv.h 	dsize = rbi->ring_datasize;
rbi               158 include/linux/hyperv.h 	read_loc = READ_ONCE(rbi->ring_buffer->read_index);
rbi               159 include/linux/hyperv.h 	write_loc = rbi->ring_buffer->write_index;
rbi               167 include/linux/hyperv.h 		const struct hv_ring_buffer_info *rbi)
rbi               169 include/linux/hyperv.h 	u32 avail_write = hv_get_bytes_to_write(rbi);
rbi               173 include/linux/hyperv.h 			rbi->ring_size_div10_reciprocal);
rbi               483 include/linux/hyperv.h hv_ringbuffer_pending_size(const struct hv_ring_buffer_info *rbi)
rbi               485 include/linux/hyperv.h 	return rbi->ring_buffer->pending_send_sz;
rbi              1521 include/linux/hyperv.h static inline void hv_begin_read(struct hv_ring_buffer_info *rbi)
rbi              1523 include/linux/hyperv.h 	rbi->ring_buffer->interrupt_mask = 1;
rbi              1532 include/linux/hyperv.h static inline u32 hv_end_read(struct hv_ring_buffer_info *rbi)
rbi              1535 include/linux/hyperv.h 	rbi->ring_buffer->interrupt_mask = 0;
rbi              1545 include/linux/hyperv.h 	return hv_get_bytes_to_read(rbi);
rbi               802 kernel/rcu/rcutorture.c 	struct rcu_boost_inflight rbi = { .inflight = 0 };
rbi               814 kernel/rcu/rcutorture.c 	init_rcu_head_on_stack(&rbi.rcu);
rbi               846 kernel/rcu/rcutorture.c 			if (!smp_load_acquire(&rbi.inflight)) {
rbi               848 kernel/rcu/rcutorture.c 				smp_store_release(&rbi.inflight, 1);
rbi               849 kernel/rcu/rcutorture.c 				call_rcu(&rbi.rcu, rcu_torture_boost_cb);
rbi               866 kernel/rcu/rcutorture.c 		if (!failed && smp_load_acquire(&rbi.inflight))
rbi               892 kernel/rcu/rcutorture.c 	while (!kthread_should_stop() || smp_load_acquire(&rbi.inflight)) {
rbi               896 kernel/rcu/rcutorture.c 	destroy_rcu_head_on_stack(&rbi.rcu);