rb_page           296 drivers/net/ethernet/cavium/thunder/nic.h 	struct page		*rb_page;
rb_page            23 drivers/net/ethernet/cavium/thunder/nicvf_queues.c 	if (!nic->rb_pageref || !nic->rb_page)
rb_page            26 drivers/net/ethernet/cavium/thunder/nicvf_queues.c 	page_ref_add(nic->rb_page, nic->rb_pageref);
rb_page           130 drivers/net/ethernet/cavium/thunder/nicvf_queues.c 			nic->rb_page = page;
rb_page           183 drivers/net/ethernet/cavium/thunder/nicvf_queues.c 	if (!rbdr->is_xdp && nic->rb_page &&
rb_page           190 drivers/net/ethernet/cavium/thunder/nicvf_queues.c 	nic->rb_page = NULL;
rb_page           194 drivers/net/ethernet/cavium/thunder/nicvf_queues.c 	if (!pgcache && !nic->rb_page) {
rb_page           207 drivers/net/ethernet/cavium/thunder/nicvf_queues.c 		nic->rb_page = pgcache->page;
rb_page           213 drivers/net/ethernet/cavium/thunder/nicvf_queues.c 		*rbuf = (u64)dma_map_page_attrs(&nic->pdev->dev, nic->rb_page,
rb_page           219 drivers/net/ethernet/cavium/thunder/nicvf_queues.c 				__free_pages(nic->rb_page, 0);
rb_page           220 drivers/net/ethernet/cavium/thunder/nicvf_queues.c 			nic->rb_page = NULL;
rb_page           298 drivers/net/ethernet/cavium/thunder/nicvf_queues.c 	nic->rb_page = NULL;
rb_page           108 kernel/trace/ring_buffer_benchmark.c 	struct rb_page *rpage;
rb_page           127 kernel/trace/ring_buffer_benchmark.c 			if (i >= (PAGE_SIZE - offsetof(struct rb_page, data))) {