rbdr               93 drivers/net/ethernet/cavium/thunder/nicvf_queues.c 					       struct rbdr *rbdr, gfp_t gfp)
rbdr              100 drivers/net/ethernet/cavium/thunder/nicvf_queues.c 	pgcache = &rbdr->pgcache[rbdr->pgidx];
rbdr              110 drivers/net/ethernet/cavium/thunder/nicvf_queues.c 		if (rbdr->is_xdp) {
rbdr              128 drivers/net/ethernet/cavium/thunder/nicvf_queues.c 		if (rbdr->pgalloc >= rbdr->pgcnt) {
rbdr              138 drivers/net/ethernet/cavium/thunder/nicvf_queues.c 		rbdr->pgalloc++;
rbdr              142 drivers/net/ethernet/cavium/thunder/nicvf_queues.c 	if (rbdr->is_xdp) {
rbdr              162 drivers/net/ethernet/cavium/thunder/nicvf_queues.c 	rbdr->pgidx++;
rbdr              163 drivers/net/ethernet/cavium/thunder/nicvf_queues.c 	rbdr->pgidx &= (rbdr->pgcnt - 1);
rbdr              166 drivers/net/ethernet/cavium/thunder/nicvf_queues.c 	next = &rbdr->pgcache[rbdr->pgidx];
rbdr              175 drivers/net/ethernet/cavium/thunder/nicvf_queues.c static inline int nicvf_alloc_rcv_buffer(struct nicvf *nic, struct rbdr *rbdr,
rbdr              183 drivers/net/ethernet/cavium/thunder/nicvf_queues.c 	if (!rbdr->is_xdp && nic->rb_page &&
rbdr              193 drivers/net/ethernet/cavium/thunder/nicvf_queues.c 	pgcache = nicvf_alloc_page(nic, rbdr, gfp);
rbdr              202 drivers/net/ethernet/cavium/thunder/nicvf_queues.c 	if (rbdr->is_xdp)
rbdr              209 drivers/net/ethernet/cavium/thunder/nicvf_queues.c 	if (rbdr->is_xdp && pgcache && pgcache->dma_addr) {
rbdr              252 drivers/net/ethernet/cavium/thunder/nicvf_queues.c static int  nicvf_init_rbdr(struct nicvf *nic, struct rbdr *rbdr,
rbdr              260 drivers/net/ethernet/cavium/thunder/nicvf_queues.c 	err = nicvf_alloc_q_desc_mem(nic, &rbdr->dmem, ring_len,
rbdr              266 drivers/net/ethernet/cavium/thunder/nicvf_queues.c 	rbdr->desc = rbdr->dmem.base;
rbdr              268 drivers/net/ethernet/cavium/thunder/nicvf_queues.c 	rbdr->dma_size = buf_size;
rbdr              269 drivers/net/ethernet/cavium/thunder/nicvf_queues.c 	rbdr->enable = true;
rbdr              270 drivers/net/ethernet/cavium/thunder/nicvf_queues.c 	rbdr->thresh = RBDR_THRESH;
rbdr              271 drivers/net/ethernet/cavium/thunder/nicvf_queues.c 	rbdr->head = 0;
rbdr              272 drivers/net/ethernet/cavium/thunder/nicvf_queues.c 	rbdr->tail = 0;
rbdr              284 drivers/net/ethernet/cavium/thunder/nicvf_queues.c 		rbdr->pgcnt = ring_len / (PAGE_SIZE / buf_size);
rbdr              285 drivers/net/ethernet/cavium/thunder/nicvf_queues.c 		rbdr->is_xdp = false;
rbdr              287 drivers/net/ethernet/cavium/thunder/nicvf_queues.c 		rbdr->pgcnt = ring_len;
rbdr              288 drivers/net/ethernet/cavium/thunder/nicvf_queues.c 		rbdr->is_xdp = true;
rbdr              290 drivers/net/ethernet/cavium/thunder/nicvf_queues.c 	rbdr->pgcnt = roundup_pow_of_two(rbdr->pgcnt);
rbdr              291 drivers/net/ethernet/cavium/thunder/nicvf_queues.c 	rbdr->pgcache = kcalloc(rbdr->pgcnt, sizeof(*rbdr->pgcache),
rbdr              293 drivers/net/ethernet/cavium/thunder/nicvf_queues.c 	if (!rbdr->pgcache)
rbdr              295 drivers/net/ethernet/cavium/thunder/nicvf_queues.c 	rbdr->pgidx = 0;
rbdr              296 drivers/net/ethernet/cavium/thunder/nicvf_queues.c 	rbdr->pgalloc = 0;
rbdr              300 drivers/net/ethernet/cavium/thunder/nicvf_queues.c 		err = nicvf_alloc_rcv_buffer(nic, rbdr, GFP_KERNEL,
rbdr              304 drivers/net/ethernet/cavium/thunder/nicvf_queues.c 			rbdr->tail = idx - 1;
rbdr              308 drivers/net/ethernet/cavium/thunder/nicvf_queues.c 		desc = GET_RBDR_DESC(rbdr, idx);
rbdr              318 drivers/net/ethernet/cavium/thunder/nicvf_queues.c static void nicvf_free_rbdr(struct nicvf *nic, struct rbdr *rbdr)
rbdr              325 drivers/net/ethernet/cavium/thunder/nicvf_queues.c 	if (!rbdr)
rbdr              328 drivers/net/ethernet/cavium/thunder/nicvf_queues.c 	rbdr->enable = false;
rbdr              329 drivers/net/ethernet/cavium/thunder/nicvf_queues.c 	if (!rbdr->dmem.base)
rbdr              332 drivers/net/ethernet/cavium/thunder/nicvf_queues.c 	head = rbdr->head;
rbdr              333 drivers/net/ethernet/cavium/thunder/nicvf_queues.c 	tail = rbdr->tail;
rbdr              337 drivers/net/ethernet/cavium/thunder/nicvf_queues.c 		desc = GET_RBDR_DESC(rbdr, head);
rbdr              345 drivers/net/ethernet/cavium/thunder/nicvf_queues.c 		head &= (rbdr->dmem.q_len - 1);
rbdr              348 drivers/net/ethernet/cavium/thunder/nicvf_queues.c 	desc = GET_RBDR_DESC(rbdr, tail);
rbdr              361 drivers/net/ethernet/cavium/thunder/nicvf_queues.c 	while (head < rbdr->pgcnt) {
rbdr              362 drivers/net/ethernet/cavium/thunder/nicvf_queues.c 		pgcache = &rbdr->pgcache[head];
rbdr              364 drivers/net/ethernet/cavium/thunder/nicvf_queues.c 			if (rbdr->is_xdp) {
rbdr              374 drivers/net/ethernet/cavium/thunder/nicvf_queues.c 	nicvf_free_q_desc_mem(nic, &rbdr->dmem);
rbdr              385 drivers/net/ethernet/cavium/thunder/nicvf_queues.c 	struct rbdr *rbdr;
rbdr              394 drivers/net/ethernet/cavium/thunder/nicvf_queues.c 	rbdr = &qs->rbdr[rbdr_idx];
rbdr              396 drivers/net/ethernet/cavium/thunder/nicvf_queues.c 	if (!rbdr->enable)
rbdr              415 drivers/net/ethernet/cavium/thunder/nicvf_queues.c 		tail &= (rbdr->dmem.q_len - 1);
rbdr              417 drivers/net/ethernet/cavium/thunder/nicvf_queues.c 		if (nicvf_alloc_rcv_buffer(nic, rbdr, gfp, RCV_FRAG_LEN, &rbuf))
rbdr              420 drivers/net/ethernet/cavium/thunder/nicvf_queues.c 		desc = GET_RBDR_DESC(rbdr, tail);
rbdr              442 drivers/net/ethernet/cavium/thunder/nicvf_queues.c 	if (!nic->rb_alloc_fail && rbdr->enable &&
rbdr              660 drivers/net/ethernet/cavium/thunder/nicvf_queues.c 			       struct rbdr *rbdr, int qidx)
rbdr              666 drivers/net/ethernet/cavium/thunder/nicvf_queues.c 	rbdr->head = nicvf_queue_reg_read(nic,
rbdr              669 drivers/net/ethernet/cavium/thunder/nicvf_queues.c 	rbdr->tail = nicvf_queue_reg_read(nic,
rbdr              919 drivers/net/ethernet/cavium/thunder/nicvf_queues.c 	struct rbdr *rbdr;
rbdr              922 drivers/net/ethernet/cavium/thunder/nicvf_queues.c 	rbdr = &qs->rbdr[qidx];
rbdr              923 drivers/net/ethernet/cavium/thunder/nicvf_queues.c 	nicvf_reclaim_rbdr(nic, rbdr, qidx);
rbdr              929 drivers/net/ethernet/cavium/thunder/nicvf_queues.c 			      qidx, (u64)(rbdr->dmem.phys_base));
rbdr              939 drivers/net/ethernet/cavium/thunder/nicvf_queues.c 	rbdr_cfg.lines = rbdr->dma_size / 128;
rbdr              949 drivers/net/ethernet/cavium/thunder/nicvf_queues.c 			      qidx, rbdr->thresh - 1);
rbdr              995 drivers/net/ethernet/cavium/thunder/nicvf_queues.c 		nicvf_free_rbdr(nic, &qs->rbdr[qidx]);
rbdr             1013 drivers/net/ethernet/cavium/thunder/nicvf_queues.c 		if (nicvf_init_rbdr(nic, &qs->rbdr[qidx], qs->rbdr_len,
rbdr              241 drivers/net/ethernet/cavium/thunder/nicvf_queues.h 	struct	rbdr	*rbdr_start;
rbdr              242 drivers/net/ethernet/cavium/thunder/nicvf_queues.h 	struct	rbdr	*rbdr_cont;
rbdr              302 drivers/net/ethernet/cavium/thunder/nicvf_queues.h 	struct	rbdr		rbdr[MAX_RCV_BUF_DESC_RINGS_PER_QS];