ring             1351 arch/um/drivers/vector_kern.c 				struct ethtool_ringparam *ring)
ring             1355 arch/um/drivers/vector_kern.c 	ring->rx_max_pending = vp->rx_queue->max_depth;
ring             1356 arch/um/drivers/vector_kern.c 	ring->tx_max_pending = vp->tx_queue->max_depth;
ring             1357 arch/um/drivers/vector_kern.c 	ring->rx_pending = vp->rx_queue->max_depth;
ring             1358 arch/um/drivers/vector_kern.c 	ring->tx_pending = vp->tx_queue->max_depth;
ring              371 drivers/ata/libata-eh.c 	ent = &ering->ring[ering->cursor];
ring              379 drivers/ata/libata-eh.c 	struct ata_ering_entry *ent = &ering->ring[ering->cursor];
ring              395 drivers/ata/libata-eh.c 		ent = &ering->ring[idx];
ring              653 drivers/atm/zatm.c 		dsc = zatm_vcc->ring+zatm_vcc->ring_curr;
ring              911 drivers/atm/zatm.c 	kfree(zatm_vcc->ring);
ring              959 drivers/atm/zatm.c 	zatm_vcc->ring = kzalloc(RING_SIZE,GFP_KERNEL);
ring              960 drivers/atm/zatm.c 	if (!zatm_vcc->ring) return -ENOMEM;
ring              961 drivers/atm/zatm.c 	loop = zatm_vcc->ring+RING_ENTRIES*RING_WORDS;
ring              964 drivers/atm/zatm.c 	loop[3] = virt_to_bus(zatm_vcc->ring);
ring              968 drivers/atm/zatm.c 	zpokel(zatm_dev,virt_to_bus(zatm_vcc->ring),
ring               52 drivers/atm/zatm.h 	u32 *ring;			/* transmit ring */
ring              145 drivers/block/xen-blkback/blkback.c static inline int get_free_page(struct xen_blkif_ring *ring, struct page **page)
ring              149 drivers/block/xen-blkback/blkback.c 	spin_lock_irqsave(&ring->free_pages_lock, flags);
ring              150 drivers/block/xen-blkback/blkback.c 	if (list_empty(&ring->free_pages)) {
ring              151 drivers/block/xen-blkback/blkback.c 		BUG_ON(ring->free_pages_num != 0);
ring              152 drivers/block/xen-blkback/blkback.c 		spin_unlock_irqrestore(&ring->free_pages_lock, flags);
ring              155 drivers/block/xen-blkback/blkback.c 	BUG_ON(ring->free_pages_num == 0);
ring              156 drivers/block/xen-blkback/blkback.c 	page[0] = list_first_entry(&ring->free_pages, struct page, lru);
ring              158 drivers/block/xen-blkback/blkback.c 	ring->free_pages_num--;
ring              159 drivers/block/xen-blkback/blkback.c 	spin_unlock_irqrestore(&ring->free_pages_lock, flags);
ring              164 drivers/block/xen-blkback/blkback.c static inline void put_free_pages(struct xen_blkif_ring *ring, struct page **page,
ring              170 drivers/block/xen-blkback/blkback.c 	spin_lock_irqsave(&ring->free_pages_lock, flags);
ring              172 drivers/block/xen-blkback/blkback.c 		list_add(&page[i]->lru, &ring->free_pages);
ring              173 drivers/block/xen-blkback/blkback.c 	ring->free_pages_num += num;
ring              174 drivers/block/xen-blkback/blkback.c 	spin_unlock_irqrestore(&ring->free_pages_lock, flags);
ring              177 drivers/block/xen-blkback/blkback.c static inline void shrink_free_pagepool(struct xen_blkif_ring *ring, int num)
ring              184 drivers/block/xen-blkback/blkback.c 	spin_lock_irqsave(&ring->free_pages_lock, flags);
ring              185 drivers/block/xen-blkback/blkback.c 	while (ring->free_pages_num > num) {
ring              186 drivers/block/xen-blkback/blkback.c 		BUG_ON(list_empty(&ring->free_pages));
ring              187 drivers/block/xen-blkback/blkback.c 		page[num_pages] = list_first_entry(&ring->free_pages,
ring              190 drivers/block/xen-blkback/blkback.c 		ring->free_pages_num--;
ring              192 drivers/block/xen-blkback/blkback.c 			spin_unlock_irqrestore(&ring->free_pages_lock, flags);
ring              194 drivers/block/xen-blkback/blkback.c 			spin_lock_irqsave(&ring->free_pages_lock, flags);
ring              198 drivers/block/xen-blkback/blkback.c 	spin_unlock_irqrestore(&ring->free_pages_lock, flags);
ring              205 drivers/block/xen-blkback/blkback.c static int do_block_io_op(struct xen_blkif_ring *ring);
ring              206 drivers/block/xen-blkback/blkback.c static int dispatch_rw_block_io(struct xen_blkif_ring *ring,
ring              209 drivers/block/xen-blkback/blkback.c static void make_response(struct xen_blkif_ring *ring, u64 id,
ring              230 drivers/block/xen-blkback/blkback.c static int add_persistent_gnt(struct xen_blkif_ring *ring,
ring              235 drivers/block/xen-blkback/blkback.c 	struct xen_blkif *blkif = ring->blkif;
ring              237 drivers/block/xen-blkback/blkback.c 	if (ring->persistent_gnt_c >= xen_blkif_max_pgrants) {
ring              243 drivers/block/xen-blkback/blkback.c 	new = &ring->persistent_gnts.rb_node;
ring              261 drivers/block/xen-blkback/blkback.c 	rb_insert_color(&(persistent_gnt->node), &ring->persistent_gnts);
ring              262 drivers/block/xen-blkback/blkback.c 	ring->persistent_gnt_c++;
ring              263 drivers/block/xen-blkback/blkback.c 	atomic_inc(&ring->persistent_gnt_in_use);
ring              267 drivers/block/xen-blkback/blkback.c static struct persistent_gnt *get_persistent_gnt(struct xen_blkif_ring *ring,
ring              273 drivers/block/xen-blkback/blkback.c 	node = ring->persistent_gnts.rb_node;
ring              287 drivers/block/xen-blkback/blkback.c 			atomic_inc(&ring->persistent_gnt_in_use);
ring              294 drivers/block/xen-blkback/blkback.c static void put_persistent_gnt(struct xen_blkif_ring *ring,
ring              301 drivers/block/xen-blkback/blkback.c 	atomic_dec(&ring->persistent_gnt_in_use);
ring              304 drivers/block/xen-blkback/blkback.c static void free_persistent_gnts(struct xen_blkif_ring *ring, struct rb_root *root,
ring              335 drivers/block/xen-blkback/blkback.c 			put_free_pages(ring, pages, segs_to_unmap);
ring              352 drivers/block/xen-blkback/blkback.c 	struct xen_blkif_ring *ring = container_of(work, typeof(*ring), persistent_purge_work);
ring              359 drivers/block/xen-blkback/blkback.c 	while(!list_empty(&ring->persistent_purge_list)) {
ring              360 drivers/block/xen-blkback/blkback.c 		persistent_gnt = list_first_entry(&ring->persistent_purge_list,
ring              375 drivers/block/xen-blkback/blkback.c 			put_free_pages(ring, pages, segs_to_unmap);
ring              383 drivers/block/xen-blkback/blkback.c 		put_free_pages(ring, pages, segs_to_unmap);
ring              387 drivers/block/xen-blkback/blkback.c static void purge_persistent_gnt(struct xen_blkif_ring *ring)
ring              395 drivers/block/xen-blkback/blkback.c 	if (work_busy(&ring->persistent_purge_work)) {
ring              400 drivers/block/xen-blkback/blkback.c 	if (ring->persistent_gnt_c < xen_blkif_max_pgrants ||
ring              401 drivers/block/xen-blkback/blkback.c 	    (ring->persistent_gnt_c == xen_blkif_max_pgrants &&
ring              402 drivers/block/xen-blkback/blkback.c 	    !ring->blkif->vbd.overflow_max_grants)) {
ring              406 drivers/block/xen-blkback/blkback.c 		num_clean = ring->persistent_gnt_c - xen_blkif_max_pgrants +
ring              408 drivers/block/xen-blkback/blkback.c 		num_clean = min(ring->persistent_gnt_c, num_clean);
ring              424 drivers/block/xen-blkback/blkback.c 	BUG_ON(!list_empty(&ring->persistent_purge_list));
ring              425 drivers/block/xen-blkback/blkback.c 	root = &ring->persistent_gnts;
ring              440 drivers/block/xen-blkback/blkback.c 			 &ring->persistent_purge_list);
ring              455 drivers/block/xen-blkback/blkback.c 		ring->persistent_gnt_c -= total;
ring              456 drivers/block/xen-blkback/blkback.c 		ring->blkif->vbd.overflow_max_grants = 0;
ring              459 drivers/block/xen-blkback/blkback.c 		schedule_work(&ring->persistent_purge_work);
ring              470 drivers/block/xen-blkback/blkback.c static struct pending_req *alloc_req(struct xen_blkif_ring *ring)
ring              475 drivers/block/xen-blkback/blkback.c 	spin_lock_irqsave(&ring->pending_free_lock, flags);
ring              476 drivers/block/xen-blkback/blkback.c 	if (!list_empty(&ring->pending_free)) {
ring              477 drivers/block/xen-blkback/blkback.c 		req = list_entry(ring->pending_free.next, struct pending_req,
ring              481 drivers/block/xen-blkback/blkback.c 	spin_unlock_irqrestore(&ring->pending_free_lock, flags);
ring              489 drivers/block/xen-blkback/blkback.c static void free_req(struct xen_blkif_ring *ring, struct pending_req *req)
ring              494 drivers/block/xen-blkback/blkback.c 	spin_lock_irqsave(&ring->pending_free_lock, flags);
ring              495 drivers/block/xen-blkback/blkback.c 	was_empty = list_empty(&ring->pending_free);
ring              496 drivers/block/xen-blkback/blkback.c 	list_add(&req->free_list, &ring->pending_free);
ring              497 drivers/block/xen-blkback/blkback.c 	spin_unlock_irqrestore(&ring->pending_free_lock, flags);
ring              499 drivers/block/xen-blkback/blkback.c 		wake_up(&ring->pending_free_wq);
ring              579 drivers/block/xen-blkback/blkback.c static void blkif_notify_work(struct xen_blkif_ring *ring)
ring              581 drivers/block/xen-blkback/blkback.c 	ring->waiting_reqs = 1;
ring              582 drivers/block/xen-blkback/blkback.c 	wake_up(&ring->wq);
ring              595 drivers/block/xen-blkback/blkback.c static void print_stats(struct xen_blkif_ring *ring)
ring              599 drivers/block/xen-blkback/blkback.c 		 current->comm, ring->st_oo_req,
ring              600 drivers/block/xen-blkback/blkback.c 		 ring->st_rd_req, ring->st_wr_req,
ring              601 drivers/block/xen-blkback/blkback.c 		 ring->st_f_req, ring->st_ds_req,
ring              602 drivers/block/xen-blkback/blkback.c 		 ring->persistent_gnt_c,
ring              604 drivers/block/xen-blkback/blkback.c 	ring->st_print = jiffies + msecs_to_jiffies(10 * 1000);
ring              605 drivers/block/xen-blkback/blkback.c 	ring->st_rd_req = 0;
ring              606 drivers/block/xen-blkback/blkback.c 	ring->st_wr_req = 0;
ring              607 drivers/block/xen-blkback/blkback.c 	ring->st_oo_req = 0;
ring              608 drivers/block/xen-blkback/blkback.c 	ring->st_ds_req = 0;
ring              613 drivers/block/xen-blkback/blkback.c 	struct xen_blkif_ring *ring = arg;
ring              614 drivers/block/xen-blkback/blkback.c 	struct xen_blkif *blkif = ring->blkif;
ring              629 drivers/block/xen-blkback/blkback.c 			ring->wq,
ring              630 drivers/block/xen-blkback/blkback.c 			ring->waiting_reqs || kthread_should_stop(),
ring              635 drivers/block/xen-blkback/blkback.c 			ring->pending_free_wq,
ring              636 drivers/block/xen-blkback/blkback.c 			!list_empty(&ring->pending_free) ||
ring              642 drivers/block/xen-blkback/blkback.c 		ring->waiting_reqs = 0;
ring              645 drivers/block/xen-blkback/blkback.c 		ret = do_block_io_op(ring);
ring              647 drivers/block/xen-blkback/blkback.c 			ring->waiting_reqs = 1;
ring              649 drivers/block/xen-blkback/blkback.c 			wait_event_interruptible(ring->shutdown_wq,
ring              654 drivers/block/xen-blkback/blkback.c 		    time_after(jiffies, ring->next_lru)) {
ring              655 drivers/block/xen-blkback/blkback.c 			purge_persistent_gnt(ring);
ring              656 drivers/block/xen-blkback/blkback.c 			ring->next_lru = jiffies + msecs_to_jiffies(LRU_INTERVAL);
ring              660 drivers/block/xen-blkback/blkback.c 		shrink_free_pagepool(ring, xen_blkif_max_buffer_pages);
ring              662 drivers/block/xen-blkback/blkback.c 		if (log_stats && time_after(jiffies, ring->st_print))
ring              663 drivers/block/xen-blkback/blkback.c 			print_stats(ring);
ring              667 drivers/block/xen-blkback/blkback.c 	flush_work(&ring->persistent_purge_work);
ring              670 drivers/block/xen-blkback/blkback.c 		print_stats(ring);
ring              672 drivers/block/xen-blkback/blkback.c 	ring->xenblkd = NULL;
ring              680 drivers/block/xen-blkback/blkback.c void xen_blkbk_free_caches(struct xen_blkif_ring *ring)
ring              683 drivers/block/xen-blkback/blkback.c 	if (!RB_EMPTY_ROOT(&ring->persistent_gnts))
ring              684 drivers/block/xen-blkback/blkback.c 		free_persistent_gnts(ring, &ring->persistent_gnts,
ring              685 drivers/block/xen-blkback/blkback.c 			ring->persistent_gnt_c);
ring              687 drivers/block/xen-blkback/blkback.c 	BUG_ON(!RB_EMPTY_ROOT(&ring->persistent_gnts));
ring              688 drivers/block/xen-blkback/blkback.c 	ring->persistent_gnt_c = 0;
ring              691 drivers/block/xen-blkback/blkback.c 	shrink_free_pagepool(ring, 0 /* All */);
ring              695 drivers/block/xen-blkback/blkback.c 	struct xen_blkif_ring *ring,
ring              705 drivers/block/xen-blkback/blkback.c 			put_persistent_gnt(ring, pages[i]->persistent_gnt);
ring              723 drivers/block/xen-blkback/blkback.c 	struct xen_blkif_ring *ring = pending_req->ring;
ring              724 drivers/block/xen-blkback/blkback.c 	struct xen_blkif *blkif = ring->blkif;
ring              730 drivers/block/xen-blkback/blkback.c 	put_free_pages(ring, data->pages, data->count);
ring              731 drivers/block/xen-blkback/blkback.c 	make_response(ring, pending_req->id,
ring              733 drivers/block/xen-blkback/blkback.c 	free_req(ring, pending_req);
ring              746 drivers/block/xen-blkback/blkback.c 	if (atomic_dec_and_test(&ring->inflight) && atomic_read(&blkif->drain)) {
ring              755 drivers/block/xen-blkback/blkback.c 	struct xen_blkif_ring *ring = req->ring;
ring              759 drivers/block/xen-blkback/blkback.c 	invcount = xen_blkbk_unmap_prepare(ring, pages, req->nr_segs,
ring              780 drivers/block/xen-blkback/blkback.c static void xen_blkbk_unmap(struct xen_blkif_ring *ring,
ring              792 drivers/block/xen-blkback/blkback.c 		invcount = xen_blkbk_unmap_prepare(ring, pages, batch,
ring              797 drivers/block/xen-blkback/blkback.c 			put_free_pages(ring, unmap_pages, invcount);
ring              804 drivers/block/xen-blkback/blkback.c static int xen_blkbk_map(struct xen_blkif_ring *ring,
ring              817 drivers/block/xen-blkback/blkback.c 	struct xen_blkif *blkif = ring->blkif;
ring              832 drivers/block/xen-blkback/blkback.c 				ring,
ring              844 drivers/block/xen-blkback/blkback.c 			if (get_free_page(ring, &pages[i]->page))
ring              877 drivers/block/xen-blkback/blkback.c 				put_free_pages(ring, &pages[seg_idx]->page, 1);
ring              887 drivers/block/xen-blkback/blkback.c 		    ring->persistent_gnt_c < xen_blkif_max_pgrants) {
ring              905 drivers/block/xen-blkback/blkback.c 			if (add_persistent_gnt(ring,
ring              913 drivers/block/xen-blkback/blkback.c 				 persistent_gnt->gnt, ring->persistent_gnt_c,
ring              938 drivers/block/xen-blkback/blkback.c 	put_free_pages(ring, pages_to_gnt, segs_to_map);
ring              948 drivers/block/xen-blkback/blkback.c 	rc = xen_blkbk_map(pending_req->ring, pending_req->segments,
ring              961 drivers/block/xen-blkback/blkback.c 	struct xen_blkif_ring *ring = pending_req->ring;
ring              972 drivers/block/xen-blkback/blkback.c 	rc = xen_blkbk_map(ring, pages, indirect_grefs, true);
ring             1004 drivers/block/xen-blkback/blkback.c 	xen_blkbk_unmap(ring, pages, indirect_grefs);
ring             1008 drivers/block/xen-blkback/blkback.c static int dispatch_discard_io(struct xen_blkif_ring *ring,
ring             1013 drivers/block/xen-blkback/blkback.c 	struct xen_blkif *blkif = ring->blkif;
ring             1030 drivers/block/xen-blkback/blkback.c 	ring->st_ds_req++;
ring             1046 drivers/block/xen-blkback/blkback.c 	make_response(ring, req->u.discard.id, req->operation, status);
ring             1051 drivers/block/xen-blkback/blkback.c static int dispatch_other_io(struct xen_blkif_ring *ring,
ring             1055 drivers/block/xen-blkback/blkback.c 	free_req(ring, pending_req);
ring             1056 drivers/block/xen-blkback/blkback.c 	make_response(ring, req->u.other.id, req->operation,
ring             1061 drivers/block/xen-blkback/blkback.c static void xen_blk_drain_io(struct xen_blkif_ring *ring)
ring             1063 drivers/block/xen-blkback/blkback.c 	struct xen_blkif *blkif = ring->blkif;
ring             1067 drivers/block/xen-blkback/blkback.c 		if (atomic_read(&ring->inflight) == 0)
ring             1085 drivers/block/xen-blkback/blkback.c 		xen_blkbk_flush_diskcache(XBT_NIL, pending_req->ring->blkif->be, 0);
ring             1090 drivers/block/xen-blkback/blkback.c 		xen_blkbk_barrier(XBT_NIL, pending_req->ring->blkif->be, 0);
ring             1124 drivers/block/xen-blkback/blkback.c __do_block_io_op(struct xen_blkif_ring *ring)
ring             1126 drivers/block/xen-blkback/blkback.c 	union blkif_back_rings *blk_rings = &ring->blk_rings;
ring             1139 drivers/block/xen-blkback/blkback.c 			rp, rc, rp - rc, ring->blkif->vbd.pdevice);
ring             1152 drivers/block/xen-blkback/blkback.c 		pending_req = alloc_req(ring);
ring             1154 drivers/block/xen-blkback/blkback.c 			ring->st_oo_req++;
ring             1159 drivers/block/xen-blkback/blkback.c 		switch (ring->blkif->blk_protocol) {
ring             1183 drivers/block/xen-blkback/blkback.c 			if (dispatch_rw_block_io(ring, &req, pending_req))
ring             1187 drivers/block/xen-blkback/blkback.c 			free_req(ring, pending_req);
ring             1188 drivers/block/xen-blkback/blkback.c 			if (dispatch_discard_io(ring, &req))
ring             1192 drivers/block/xen-blkback/blkback.c 			if (dispatch_other_io(ring, &req, pending_req))
ring             1205 drivers/block/xen-blkback/blkback.c do_block_io_op(struct xen_blkif_ring *ring)
ring             1207 drivers/block/xen-blkback/blkback.c 	union blkif_back_rings *blk_rings = &ring->blk_rings;
ring             1211 drivers/block/xen-blkback/blkback.c 		more_to_do = __do_block_io_op(ring);
ring             1224 drivers/block/xen-blkback/blkback.c static int dispatch_rw_block_io(struct xen_blkif_ring *ring,
ring             1253 drivers/block/xen-blkback/blkback.c 		ring->st_rd_req++;
ring             1257 drivers/block/xen-blkback/blkback.c 		ring->st_wr_req++;
ring             1265 drivers/block/xen-blkback/blkback.c 		ring->st_f_req++;
ring             1291 drivers/block/xen-blkback/blkback.c 	pending_req->ring      = ring;
ring             1318 drivers/block/xen-blkback/blkback.c 	if (xen_vbd_translate(&preq, ring->blkif, operation) != 0) {
ring             1323 drivers/block/xen-blkback/blkback.c 			 ring->blkif->vbd.pdevice);
ring             1335 drivers/block/xen-blkback/blkback.c 				 ring->blkif->domid);
ring             1344 drivers/block/xen-blkback/blkback.c 		xen_blk_drain_io(pending_req->ring);
ring             1359 drivers/block/xen-blkback/blkback.c 	xen_blkif_get(ring->blkif);
ring             1360 drivers/block/xen-blkback/blkback.c 	atomic_inc(&ring->inflight);
ring             1410 drivers/block/xen-blkback/blkback.c 		ring->st_rd_sect += preq.nr_sects;
ring             1412 drivers/block/xen-blkback/blkback.c 		ring->st_wr_sect += preq.nr_sects;
ring             1417 drivers/block/xen-blkback/blkback.c 	xen_blkbk_unmap(ring, pending_req->segments,
ring             1421 drivers/block/xen-blkback/blkback.c 	make_response(ring, req->u.rw.id, req_operation, BLKIF_RSP_ERROR);
ring             1422 drivers/block/xen-blkback/blkback.c 	free_req(ring, pending_req);
ring             1440 drivers/block/xen-blkback/blkback.c static void make_response(struct xen_blkif_ring *ring, u64 id,
ring             1448 drivers/block/xen-blkback/blkback.c 	spin_lock_irqsave(&ring->blk_ring_lock, flags);
ring             1449 drivers/block/xen-blkback/blkback.c 	blk_rings = &ring->blk_rings;
ring             1451 drivers/block/xen-blkback/blkback.c 	switch (ring->blkif->blk_protocol) {
ring             1474 drivers/block/xen-blkback/blkback.c 	spin_unlock_irqrestore(&ring->blk_ring_lock, flags);
ring             1476 drivers/block/xen-blkback/blkback.c 		notify_remote_via_irq(ring->irq);
ring              343 drivers/block/xen-blkback/common.h 	struct xen_blkif_ring   *ring;
ring              385 drivers/block/xen-blkback/common.h void xen_blkbk_free_caches(struct xen_blkif_ring *ring);
ring               80 drivers/block/xen-blkback/xenbus.c 	struct xen_blkif_ring *ring;
ring              110 drivers/block/xen-blkback/xenbus.c 		ring = &blkif->rings[i];
ring              111 drivers/block/xen-blkback/xenbus.c 		ring->xenblkd = kthread_run(xen_blkif_schedule, ring, "%s-%d", name, i);
ring              112 drivers/block/xen-blkback/xenbus.c 		if (IS_ERR(ring->xenblkd)) {
ring              113 drivers/block/xen-blkback/xenbus.c 			err = PTR_ERR(ring->xenblkd);
ring              114 drivers/block/xen-blkback/xenbus.c 			ring->xenblkd = NULL;
ring              124 drivers/block/xen-blkback/xenbus.c 		ring = &blkif->rings[i];
ring              125 drivers/block/xen-blkback/xenbus.c 		kthread_stop(ring->xenblkd);
ring              140 drivers/block/xen-blkback/xenbus.c 		struct xen_blkif_ring *ring = &blkif->rings[r];
ring              142 drivers/block/xen-blkback/xenbus.c 		spin_lock_init(&ring->blk_ring_lock);
ring              143 drivers/block/xen-blkback/xenbus.c 		init_waitqueue_head(&ring->wq);
ring              144 drivers/block/xen-blkback/xenbus.c 		INIT_LIST_HEAD(&ring->pending_free);
ring              145 drivers/block/xen-blkback/xenbus.c 		INIT_LIST_HEAD(&ring->persistent_purge_list);
ring              146 drivers/block/xen-blkback/xenbus.c 		INIT_WORK(&ring->persistent_purge_work, xen_blkbk_unmap_purged_grants);
ring              147 drivers/block/xen-blkback/xenbus.c 		spin_lock_init(&ring->free_pages_lock);
ring              148 drivers/block/xen-blkback/xenbus.c 		INIT_LIST_HEAD(&ring->free_pages);
ring              150 drivers/block/xen-blkback/xenbus.c 		spin_lock_init(&ring->pending_free_lock);
ring              151 drivers/block/xen-blkback/xenbus.c 		init_waitqueue_head(&ring->pending_free_wq);
ring              152 drivers/block/xen-blkback/xenbus.c 		init_waitqueue_head(&ring->shutdown_wq);
ring              153 drivers/block/xen-blkback/xenbus.c 		ring->blkif = blkif;
ring              154 drivers/block/xen-blkback/xenbus.c 		ring->st_print = jiffies;
ring              155 drivers/block/xen-blkback/xenbus.c 		ring->active = true;
ring              188 drivers/block/xen-blkback/xenbus.c static int xen_blkif_map(struct xen_blkif_ring *ring, grant_ref_t *gref,
ring              192 drivers/block/xen-blkback/xenbus.c 	struct xen_blkif *blkif = ring->blkif;
ring              195 drivers/block/xen-blkback/xenbus.c 	if (ring->irq)
ring              199 drivers/block/xen-blkback/xenbus.c 				     &ring->blk_ring);
ring              207 drivers/block/xen-blkback/xenbus.c 		sring = (struct blkif_sring *)ring->blk_ring;
ring              208 drivers/block/xen-blkback/xenbus.c 		BACK_RING_INIT(&ring->blk_rings.native, sring,
ring              215 drivers/block/xen-blkback/xenbus.c 		sring_x86_32 = (struct blkif_x86_32_sring *)ring->blk_ring;
ring              216 drivers/block/xen-blkback/xenbus.c 		BACK_RING_INIT(&ring->blk_rings.x86_32, sring_x86_32,
ring              223 drivers/block/xen-blkback/xenbus.c 		sring_x86_64 = (struct blkif_x86_64_sring *)ring->blk_ring;
ring              224 drivers/block/xen-blkback/xenbus.c 		BACK_RING_INIT(&ring->blk_rings.x86_64, sring_x86_64,
ring              234 drivers/block/xen-blkback/xenbus.c 						    "blkif-backend", ring);
ring              236 drivers/block/xen-blkback/xenbus.c 		xenbus_unmap_ring_vfree(blkif->be->dev, ring->blk_ring);
ring              237 drivers/block/xen-blkback/xenbus.c 		ring->blk_rings.common.sring = NULL;
ring              240 drivers/block/xen-blkback/xenbus.c 	ring->irq = err;
ring              252 drivers/block/xen-blkback/xenbus.c 		struct xen_blkif_ring *ring = &blkif->rings[r];
ring              255 drivers/block/xen-blkback/xenbus.c 		if (!ring->active)
ring              258 drivers/block/xen-blkback/xenbus.c 		if (ring->xenblkd) {
ring              259 drivers/block/xen-blkback/xenbus.c 			kthread_stop(ring->xenblkd);
ring              260 drivers/block/xen-blkback/xenbus.c 			wake_up(&ring->shutdown_wq);
ring              267 drivers/block/xen-blkback/xenbus.c 		if (atomic_read(&ring->inflight) > 0) {
ring              272 drivers/block/xen-blkback/xenbus.c 		if (ring->irq) {
ring              273 drivers/block/xen-blkback/xenbus.c 			unbind_from_irqhandler(ring->irq, ring);
ring              274 drivers/block/xen-blkback/xenbus.c 			ring->irq = 0;
ring              277 drivers/block/xen-blkback/xenbus.c 		if (ring->blk_rings.common.sring) {
ring              278 drivers/block/xen-blkback/xenbus.c 			xenbus_unmap_ring_vfree(blkif->be->dev, ring->blk_ring);
ring              279 drivers/block/xen-blkback/xenbus.c 			ring->blk_rings.common.sring = NULL;
ring              283 drivers/block/xen-blkback/xenbus.c 		xen_blkbk_free_caches(ring);
ring              286 drivers/block/xen-blkback/xenbus.c 		list_for_each_entry_safe(req, n, &ring->pending_free, free_list) {
ring              299 drivers/block/xen-blkback/xenbus.c 		BUG_ON(atomic_read(&ring->persistent_gnt_in_use) != 0);
ring              300 drivers/block/xen-blkback/xenbus.c 		BUG_ON(!list_empty(&ring->persistent_purge_list));
ring              301 drivers/block/xen-blkback/xenbus.c 		BUG_ON(!RB_EMPTY_ROOT(&ring->persistent_gnts));
ring              302 drivers/block/xen-blkback/xenbus.c 		BUG_ON(!list_empty(&ring->free_pages));
ring              303 drivers/block/xen-blkback/xenbus.c 		BUG_ON(ring->free_pages_num != 0);
ring              304 drivers/block/xen-blkback/xenbus.c 		BUG_ON(ring->persistent_gnt_c != 0);
ring              306 drivers/block/xen-blkback/xenbus.c 		ring->active = false;
ring              365 drivers/block/xen-blkback/xenbus.c 			struct xen_blkif_ring *ring = &blkif->rings[i];	\
ring              367 drivers/block/xen-blkback/xenbus.c 			result += ring->st_##name;			\
ring              924 drivers/block/xen-blkback/xenbus.c static int read_per_ring_refs(struct xen_blkif_ring *ring, const char *dir)
ring              929 drivers/block/xen-blkback/xenbus.c 	struct xen_blkif *blkif = ring->blkif;
ring              983 drivers/block/xen-blkback/xenbus.c 		list_add_tail(&req->free_list, &ring->pending_free);
ring              998 drivers/block/xen-blkback/xenbus.c 	err = xen_blkif_map(ring, ring_ref, nr_grefs, evtchn);
ring             1007 drivers/block/xen-blkback/xenbus.c 	list_for_each_entry_safe(req, n, &ring->pending_free, free_list) {
ring              176 drivers/block/xen-blkfront.c 	struct blkif_front_ring ring;
ring              531 drivers/block/xen-blkfront.c 	*ring_req = RING_GET_REQUEST(&rinfo->ring, rinfo->ring.req_prod_pvt);
ring              532 drivers/block/xen-blkfront.c 	rinfo->ring.req_prod_pvt++;
ring              866 drivers/block/xen-blkfront.c 	RING_PUSH_REQUESTS_AND_CHECK_NOTIFY(&rinfo->ring, notify);
ring              894 drivers/block/xen-blkfront.c 	if (RING_FULL(&rinfo->ring))
ring             1222 drivers/block/xen-blkfront.c 	if (!RING_FULL(&rinfo->ring))
ring             1335 drivers/block/xen-blkfront.c 	free_pages((unsigned long)rinfo->ring.sring, get_order(info->nr_ring_pages * XEN_PAGE_SIZE));
ring             1336 drivers/block/xen-blkfront.c 	rinfo->ring.sring = NULL;
ring             1562 drivers/block/xen-blkfront.c 	rp = rinfo->ring.sring->rsp_prod;
ring             1565 drivers/block/xen-blkfront.c 	for (i = rinfo->ring.rsp_cons; i != rp; i++) {
ring             1568 drivers/block/xen-blkfront.c 		bret = RING_GET_RESPONSE(&rinfo->ring, i);
ring             1652 drivers/block/xen-blkfront.c 	rinfo->ring.rsp_cons = i;
ring             1654 drivers/block/xen-blkfront.c 	if (i != rinfo->ring.req_prod_pvt) {
ring             1656 drivers/block/xen-blkfront.c 		RING_FINAL_CHECK_FOR_RESPONSES(&rinfo->ring, more_to_do);
ring             1660 drivers/block/xen-blkfront.c 		rinfo->ring.sring->rsp_event = i + 1;
ring             1689 drivers/block/xen-blkfront.c 	FRONT_RING_INIT(&rinfo->ring, sring, ring_size);
ring             1691 drivers/block/xen-blkfront.c 	err = xenbus_grant_ring(dev, rinfo->ring.sring, info->nr_ring_pages, gref);
ring             1694 drivers/block/xen-blkfront.c 		rinfo->ring.sring = NULL;
ring              574 drivers/crypto/caam/ctrl.c 	int ret, ring, gen_sk, ent_delay = RTSDCTL_ENT_DLY_MIN;
ring              764 drivers/crypto/caam/ctrl.c 	ring = 0;
ring              768 drivers/crypto/caam/ctrl.c 			ctrlpriv->jr[ring] = (struct caam_job_ring __iomem __force *)
ring              770 drivers/crypto/caam/ctrl.c 					     (ring + JR_BLOCK_NUMBER) *
ring              774 drivers/crypto/caam/ctrl.c 			ring++;
ring              104 drivers/crypto/cavium/nitrox/nitrox_dev.h 	int ring;
ring              146 drivers/crypto/cavium/nitrox/nitrox_dev.h 	int ring;
ring               64 drivers/crypto/cavium/nitrox/nitrox_hal.c static void reset_pkt_input_ring(struct nitrox_device *ndev, int ring)
ring               72 drivers/crypto/cavium/nitrox/nitrox_hal.c 	offset = NPS_PKT_IN_INSTR_CTLX(ring);
ring               87 drivers/crypto/cavium/nitrox/nitrox_hal.c 	offset = NPS_PKT_IN_DONE_CNTSX(ring);
ring               93 drivers/crypto/cavium/nitrox/nitrox_hal.c void enable_pkt_input_ring(struct nitrox_device *ndev, int ring)
ring              100 drivers/crypto/cavium/nitrox/nitrox_hal.c 	offset = NPS_PKT_IN_INSTR_CTLX(ring);
ring              308 drivers/crypto/cavium/nitrox/nitrox_hal.c static void reset_aqm_ring(struct nitrox_device *ndev, int ring)
ring              317 drivers/crypto/cavium/nitrox/nitrox_hal.c 	offset = AQMQ_ENX(ring);
ring              324 drivers/crypto/cavium/nitrox/nitrox_hal.c 	offset = AQMQ_ACTIVITY_STATX(ring);
ring              333 drivers/crypto/cavium/nitrox/nitrox_hal.c 	offset = AQMQ_CMP_CNTX(ring);
ring              339 drivers/crypto/cavium/nitrox/nitrox_hal.c void enable_aqm_ring(struct nitrox_device *ndev, int ring)
ring              344 drivers/crypto/cavium/nitrox/nitrox_hal.c 	offset = AQMQ_ENX(ring);
ring              353 drivers/crypto/cavium/nitrox/nitrox_hal.c 	int ring;
ring              355 drivers/crypto/cavium/nitrox/nitrox_hal.c 	for (ring = 0; ring < ndev->nr_queues; ring++) {
ring              356 drivers/crypto/cavium/nitrox/nitrox_hal.c 		struct nitrox_cmdq *cmdq = ndev->aqmq[ring];
ring              363 drivers/crypto/cavium/nitrox/nitrox_hal.c 		reset_aqm_ring(ndev, ring);
ring              366 drivers/crypto/cavium/nitrox/nitrox_hal.c 		offset = AQMQ_DRBLX(ring);
ring              374 drivers/crypto/cavium/nitrox/nitrox_hal.c 		offset = AQMQ_NXT_CMDX(ring);
ring              378 drivers/crypto/cavium/nitrox/nitrox_hal.c 		offset = AQMQ_BADRX(ring);
ring              382 drivers/crypto/cavium/nitrox/nitrox_hal.c 		offset = AQMQ_QSZX(ring);
ring              388 drivers/crypto/cavium/nitrox/nitrox_hal.c 		offset = AQMQ_CMP_THRX(ring);
ring              394 drivers/crypto/cavium/nitrox/nitrox_hal.c 		enable_aqm_ring(ndev, ring);
ring               22 drivers/crypto/cavium/nitrox/nitrox_hal.h void enable_pkt_input_ring(struct nitrox_device *ndev, int ring);
ring              328 drivers/crypto/cavium/nitrox/nitrox_isr.c 		qvec->ring = i / NR_RING_VECTORS;
ring              329 drivers/crypto/cavium/nitrox/nitrox_isr.c 		if (qvec->ring >= ndev->nr_queues)
ring              332 drivers/crypto/cavium/nitrox/nitrox_isr.c 		qvec->cmdq = &ndev->pkt_inq[qvec->ring];
ring              333 drivers/crypto/cavium/nitrox/nitrox_isr.c 		snprintf(qvec->name, IRQ_NAMESZ, "nitrox-pkt%d", qvec->ring);
ring              339 drivers/crypto/cavium/nitrox/nitrox_isr.c 				qvec->ring);
ring              342 drivers/crypto/cavium/nitrox/nitrox_isr.c 		cpu = qvec->ring % num_online_cpus();
ring               36 drivers/crypto/cavium/nitrox/nitrox_mbx.c static inline u64 pf2vf_read_mbox(struct nitrox_device *ndev, int ring)
ring               40 drivers/crypto/cavium/nitrox/nitrox_mbx.c 	reg_addr = NPS_PKT_MBOX_VF_PF_PFDATAX(ring);
ring               45 drivers/crypto/cavium/nitrox/nitrox_mbx.c 				    int ring)
ring               49 drivers/crypto/cavium/nitrox/nitrox_mbx.c 	reg_addr = NPS_PKT_MBOX_PF_VF_PFDATAX(ring);
ring               86 drivers/crypto/cavium/nitrox/nitrox_mbx.c 	pf2vf_write_mbox(ndev, msg.value, vfdev->ring);
ring              127 drivers/crypto/cavium/nitrox/nitrox_mbx.c 		vfdev->ring = i;
ring              129 drivers/crypto/cavium/nitrox/nitrox_mbx.c 		vfdev->msg.value = pf2vf_read_mbox(ndev, vfdev->ring);
ring              149 drivers/crypto/cavium/nitrox/nitrox_mbx.c 		vfdev->ring = (i + 64);
ring              151 drivers/crypto/cavium/nitrox/nitrox_mbx.c 		vfdev->msg.value = pf2vf_read_mbox(ndev, vfdev->ring);
ring              477 drivers/crypto/inside-secure/safexcel.c 		writel(lower_32_bits(priv->ring[i].cdr.base_dma),
ring              479 drivers/crypto/inside-secure/safexcel.c 		writel(upper_32_bits(priv->ring[i].cdr.base_dma),
ring              525 drivers/crypto/inside-secure/safexcel.c 		writel(lower_32_bits(priv->ring[i].rdr.base_dma),
ring              527 drivers/crypto/inside-secure/safexcel.c 		writel(upper_32_bits(priv->ring[i].rdr.base_dma),
ring              756 drivers/crypto/inside-secure/safexcel.c 				       int ring)
ring              758 drivers/crypto/inside-secure/safexcel.c 	int coal = min_t(int, priv->ring[ring].requests, EIP197_MAX_BATCH_SZ);
ring              766 drivers/crypto/inside-secure/safexcel.c 	       EIP197_HIA_RDR(priv, ring) + EIP197_HIA_xDR_THRESH);
ring              769 drivers/crypto/inside-secure/safexcel.c void safexcel_dequeue(struct safexcel_crypto_priv *priv, int ring)
ring              778 drivers/crypto/inside-secure/safexcel.c 	req = priv->ring[ring].req;
ring              779 drivers/crypto/inside-secure/safexcel.c 	backlog = priv->ring[ring].backlog;
ring              784 drivers/crypto/inside-secure/safexcel.c 		spin_lock_bh(&priv->ring[ring].queue_lock);
ring              785 drivers/crypto/inside-secure/safexcel.c 		backlog = crypto_get_backlog(&priv->ring[ring].queue);
ring              786 drivers/crypto/inside-secure/safexcel.c 		req = crypto_dequeue_request(&priv->ring[ring].queue);
ring              787 drivers/crypto/inside-secure/safexcel.c 		spin_unlock_bh(&priv->ring[ring].queue_lock);
ring              790 drivers/crypto/inside-secure/safexcel.c 			priv->ring[ring].req = NULL;
ring              791 drivers/crypto/inside-secure/safexcel.c 			priv->ring[ring].backlog = NULL;
ring              797 drivers/crypto/inside-secure/safexcel.c 		ret = ctx->send(req, ring, &commands, &results);
ring              820 drivers/crypto/inside-secure/safexcel.c 	priv->ring[ring].req = req;
ring              821 drivers/crypto/inside-secure/safexcel.c 	priv->ring[ring].backlog = backlog;
ring              827 drivers/crypto/inside-secure/safexcel.c 	spin_lock_bh(&priv->ring[ring].lock);
ring              829 drivers/crypto/inside-secure/safexcel.c 	priv->ring[ring].requests += nreq;
ring              831 drivers/crypto/inside-secure/safexcel.c 	if (!priv->ring[ring].busy) {
ring              832 drivers/crypto/inside-secure/safexcel.c 		safexcel_try_push_requests(priv, ring);
ring              833 drivers/crypto/inside-secure/safexcel.c 		priv->ring[ring].busy = true;
ring              836 drivers/crypto/inside-secure/safexcel.c 	spin_unlock_bh(&priv->ring[ring].lock);
ring              840 drivers/crypto/inside-secure/safexcel.c 	       EIP197_HIA_RDR(priv, ring) + EIP197_HIA_xDR_PREP_COUNT);
ring              844 drivers/crypto/inside-secure/safexcel.c 	       EIP197_HIA_CDR(priv, ring) + EIP197_HIA_xDR_PREP_COUNT);
ring              885 drivers/crypto/inside-secure/safexcel.c 				 int ring,
ring              889 drivers/crypto/inside-secure/safexcel.c 	int i = safexcel_ring_rdr_rdesc_index(priv, ring, rdesc);
ring              891 drivers/crypto/inside-secure/safexcel.c 	priv->ring[ring].rdr_req[i] = req;
ring              895 drivers/crypto/inside-secure/safexcel.c safexcel_rdr_req_get(struct safexcel_crypto_priv *priv, int ring)
ring              897 drivers/crypto/inside-secure/safexcel.c 	int i = safexcel_ring_first_rdr_index(priv, ring);
ring              899 drivers/crypto/inside-secure/safexcel.c 	return priv->ring[ring].rdr_req[i];
ring              902 drivers/crypto/inside-secure/safexcel.c void safexcel_complete(struct safexcel_crypto_priv *priv, int ring)
ring              908 drivers/crypto/inside-secure/safexcel.c 		cdesc = safexcel_ring_next_rptr(priv, &priv->ring[ring].cdr);
ring              930 drivers/crypto/inside-secure/safexcel.c 			      dma_addr_t ctxr_dma, int ring)
ring              937 drivers/crypto/inside-secure/safexcel.c 	cdesc = safexcel_add_cdesc(priv, ring, true, true, 0, 0, 0, ctxr_dma);
ring              947 drivers/crypto/inside-secure/safexcel.c 	rdesc = safexcel_add_rdesc(priv, ring, true, true, 0, 0);
ring              954 drivers/crypto/inside-secure/safexcel.c 	safexcel_rdr_req_set(priv, ring, rdesc, async);
ring              959 drivers/crypto/inside-secure/safexcel.c 	safexcel_ring_rollback_wptr(priv, &priv->ring[ring].cdr);
ring              965 drivers/crypto/inside-secure/safexcel.c 						     int ring)
ring              975 drivers/crypto/inside-secure/safexcel.c 	nreq = readl(EIP197_HIA_RDR(priv, ring) + EIP197_HIA_xDR_PROC_COUNT);
ring              982 drivers/crypto/inside-secure/safexcel.c 		req = safexcel_rdr_req_get(priv, ring);
ring              985 drivers/crypto/inside-secure/safexcel.c 		ndesc = ctx->handle_result(priv, ring, req,
ring             1007 drivers/crypto/inside-secure/safexcel.c 		       EIP197_HIA_RDR(priv, ring) + EIP197_HIA_xDR_PROC_COUNT);
ring             1016 drivers/crypto/inside-secure/safexcel.c 	spin_lock_bh(&priv->ring[ring].lock);
ring             1018 drivers/crypto/inside-secure/safexcel.c 	priv->ring[ring].requests -= handled;
ring             1019 drivers/crypto/inside-secure/safexcel.c 	safexcel_try_push_requests(priv, ring);
ring             1021 drivers/crypto/inside-secure/safexcel.c 	if (!priv->ring[ring].requests)
ring             1022 drivers/crypto/inside-secure/safexcel.c 		priv->ring[ring].busy = false;
ring             1024 drivers/crypto/inside-secure/safexcel.c 	spin_unlock_bh(&priv->ring[ring].lock);
ring             1032 drivers/crypto/inside-secure/safexcel.c 	safexcel_dequeue(data->priv, data->ring);
ring             1037 drivers/crypto/inside-secure/safexcel.c 	int ring;
ring             1044 drivers/crypto/inside-secure/safexcel.c 	int ring = irq_data->ring, rc = IRQ_NONE;
ring             1047 drivers/crypto/inside-secure/safexcel.c 	status = readl(EIP197_HIA_AIC_R(priv) + EIP197_HIA_AIC_R_ENABLED_STAT(ring));
ring             1052 drivers/crypto/inside-secure/safexcel.c 	if (status & EIP197_RDR_IRQ(ring)) {
ring             1053 drivers/crypto/inside-secure/safexcel.c 		stat = readl(EIP197_HIA_RDR(priv, ring) + EIP197_HIA_xDR_STAT);
ring             1068 drivers/crypto/inside-secure/safexcel.c 		       EIP197_HIA_RDR(priv, ring) + EIP197_HIA_xDR_STAT);
ring             1072 drivers/crypto/inside-secure/safexcel.c 	writel(status, EIP197_HIA_AIC_R(priv) + EIP197_HIA_AIC_R_ACK(ring));
ring             1081 drivers/crypto/inside-secure/safexcel.c 	int ring = irq_data->ring;
ring             1083 drivers/crypto/inside-secure/safexcel.c 	safexcel_handle_result_descriptor(priv, ring);
ring             1085 drivers/crypto/inside-secure/safexcel.c 	queue_work(priv->ring[ring].workqueue,
ring             1086 drivers/crypto/inside-secure/safexcel.c 		   &priv->ring[ring].work_data.work);
ring             1449 drivers/crypto/inside-secure/safexcel.c 	priv->ring = devm_kcalloc(dev, priv->config.rings,
ring             1450 drivers/crypto/inside-secure/safexcel.c 				  sizeof(*priv->ring),
ring             1452 drivers/crypto/inside-secure/safexcel.c 	if (!priv->ring)
ring             1461 drivers/crypto/inside-secure/safexcel.c 						     &priv->ring[i].cdr,
ring             1462 drivers/crypto/inside-secure/safexcel.c 						     &priv->ring[i].rdr);
ring             1468 drivers/crypto/inside-secure/safexcel.c 		priv->ring[i].rdr_req = devm_kcalloc(dev,
ring             1470 drivers/crypto/inside-secure/safexcel.c 			sizeof(priv->ring[i].rdr_req),
ring             1472 drivers/crypto/inside-secure/safexcel.c 		if (!priv->ring[i].rdr_req)
ring             1480 drivers/crypto/inside-secure/safexcel.c 		ring_irq->ring = i;
ring             1493 drivers/crypto/inside-secure/safexcel.c 		priv->ring[i].work_data.priv = priv;
ring             1494 drivers/crypto/inside-secure/safexcel.c 		priv->ring[i].work_data.ring = i;
ring             1495 drivers/crypto/inside-secure/safexcel.c 		INIT_WORK(&priv->ring[i].work_data.work,
ring             1499 drivers/crypto/inside-secure/safexcel.c 		priv->ring[i].workqueue =
ring             1501 drivers/crypto/inside-secure/safexcel.c 		if (!priv->ring[i].workqueue)
ring             1504 drivers/crypto/inside-secure/safexcel.c 		priv->ring[i].requests = 0;
ring             1505 drivers/crypto/inside-secure/safexcel.c 		priv->ring[i].busy = false;
ring             1507 drivers/crypto/inside-secure/safexcel.c 		crypto_init_queue(&priv->ring[i].queue,
ring             1510 drivers/crypto/inside-secure/safexcel.c 		spin_lock_init(&priv->ring[i].lock);
ring             1511 drivers/crypto/inside-secure/safexcel.c 		spin_lock_init(&priv->ring[i].queue_lock);
ring             1631 drivers/crypto/inside-secure/safexcel.c 		destroy_workqueue(priv->ring[i].workqueue);
ring             1770 drivers/crypto/inside-secure/safexcel.c 		destroy_workqueue(priv->ring[i].workqueue);
ring              612 drivers/crypto/inside-secure/safexcel.h 	int ring;
ring              729 drivers/crypto/inside-secure/safexcel.h 	struct safexcel_ring *ring;
ring              733 drivers/crypto/inside-secure/safexcel.h 	int (*send)(struct crypto_async_request *req, int ring,
ring              735 drivers/crypto/inside-secure/safexcel.h 	int (*handle_result)(struct safexcel_crypto_priv *priv, int ring,
ring              741 drivers/crypto/inside-secure/safexcel.h 	int ring;
ring              779 drivers/crypto/inside-secure/safexcel.h void safexcel_dequeue(struct safexcel_crypto_priv *priv, int ring);
ring              782 drivers/crypto/inside-secure/safexcel.h void safexcel_complete(struct safexcel_crypto_priv *priv, int ring);
ring              785 drivers/crypto/inside-secure/safexcel.h 			      dma_addr_t ctxr_dma, int ring);
ring              791 drivers/crypto/inside-secure/safexcel.h 			      struct safexcel_desc_ring *ring);
ring              792 drivers/crypto/inside-secure/safexcel.h void *safexcel_ring_first_rptr(struct safexcel_crypto_priv *priv, int  ring);
ring              794 drivers/crypto/inside-secure/safexcel.h 				 struct safexcel_desc_ring *ring);
ring              806 drivers/crypto/inside-secure/safexcel.h 				  int ring);
ring              808 drivers/crypto/inside-secure/safexcel.h 				  int ring,
ring              811 drivers/crypto/inside-secure/safexcel.h 			  int ring,
ring              815 drivers/crypto/inside-secure/safexcel.h safexcel_rdr_req_get(struct safexcel_crypto_priv *priv, int ring);
ring              488 drivers/crypto/inside-secure/safexcel_cipher.c static int safexcel_handle_req_result(struct safexcel_crypto_priv *priv, int ring,
ring              508 drivers/crypto/inside-secure/safexcel_cipher.c 		rdesc = safexcel_ring_next_rptr(priv, &priv->ring[ring].rdr);
ring              522 drivers/crypto/inside-secure/safexcel_cipher.c 	safexcel_complete(priv, ring);
ring              548 drivers/crypto/inside-secure/safexcel_cipher.c static int safexcel_send_req(struct crypto_async_request *base, int ring,
ring              652 drivers/crypto/inside-secure/safexcel_cipher.c 		cdesc = safexcel_add_cdesc(priv, ring, !n_cdesc,
ring              677 drivers/crypto/inside-secure/safexcel_cipher.c 		first_cdesc = safexcel_add_cdesc(priv, ring, 1, 1, 0, 0, totlen,
ring              710 drivers/crypto/inside-secure/safexcel_cipher.c 			rdesc = safexcel_add_rdesc(priv, ring, first, last,
ring              716 drivers/crypto/inside-secure/safexcel_cipher.c 			rdesc = safexcel_add_rdesc(priv, ring, first, last,
ring              739 drivers/crypto/inside-secure/safexcel_cipher.c 		rdesc = safexcel_add_rdesc(priv, ring, true, true, 0, 0);
ring              749 drivers/crypto/inside-secure/safexcel_cipher.c 	safexcel_rdr_req_set(priv, ring, first_rdesc, base);
ring              757 drivers/crypto/inside-secure/safexcel_cipher.c 		safexcel_ring_rollback_wptr(priv, &priv->ring[ring].rdr);
ring              760 drivers/crypto/inside-secure/safexcel_cipher.c 		safexcel_ring_rollback_wptr(priv, &priv->ring[ring].cdr);
ring              773 drivers/crypto/inside-secure/safexcel_cipher.c 				      int ring,
ring              788 drivers/crypto/inside-secure/safexcel_cipher.c 		rdesc = safexcel_ring_next_rptr(priv, &priv->ring[ring].rdr);
ring              802 drivers/crypto/inside-secure/safexcel_cipher.c 	safexcel_complete(priv, ring);
ring              813 drivers/crypto/inside-secure/safexcel_cipher.c 	ring = safexcel_select_ring(priv);
ring              814 drivers/crypto/inside-secure/safexcel_cipher.c 	ctx->base.ring = ring;
ring              816 drivers/crypto/inside-secure/safexcel_cipher.c 	spin_lock_bh(&priv->ring[ring].queue_lock);
ring              817 drivers/crypto/inside-secure/safexcel_cipher.c 	enq_ret = crypto_enqueue_request(&priv->ring[ring].queue, base);
ring              818 drivers/crypto/inside-secure/safexcel_cipher.c 	spin_unlock_bh(&priv->ring[ring].queue_lock);
ring              823 drivers/crypto/inside-secure/safexcel_cipher.c 	queue_work(priv->ring[ring].workqueue,
ring              824 drivers/crypto/inside-secure/safexcel_cipher.c 		   &priv->ring[ring].work_data.work);
ring              832 drivers/crypto/inside-secure/safexcel_cipher.c 					   int ring,
ring              842 drivers/crypto/inside-secure/safexcel_cipher.c 		err = safexcel_handle_inv_result(priv, ring, async, sreq,
ring              845 drivers/crypto/inside-secure/safexcel_cipher.c 		err = safexcel_handle_req_result(priv, ring, async, req->src,
ring              854 drivers/crypto/inside-secure/safexcel_cipher.c 				       int ring,
ring              865 drivers/crypto/inside-secure/safexcel_cipher.c 		err = safexcel_handle_inv_result(priv, ring, async, sreq,
ring              868 drivers/crypto/inside-secure/safexcel_cipher.c 		err = safexcel_handle_req_result(priv, ring, async, req->src,
ring              878 drivers/crypto/inside-secure/safexcel_cipher.c 				    int ring, int *commands, int *results)
ring              884 drivers/crypto/inside-secure/safexcel_cipher.c 	ret = safexcel_invalidate_cache(base, priv, ctx->base.ctxr_dma, ring);
ring              894 drivers/crypto/inside-secure/safexcel_cipher.c static int safexcel_skcipher_send(struct crypto_async_request *async, int ring,
ring              906 drivers/crypto/inside-secure/safexcel_cipher.c 		ret = safexcel_cipher_send_inv(async, ring, commands, results);
ring              917 drivers/crypto/inside-secure/safexcel_cipher.c 		ret = safexcel_send_req(async, ring, sreq, req->src,
ring              926 drivers/crypto/inside-secure/safexcel_cipher.c static int safexcel_aead_send(struct crypto_async_request *async, int ring,
ring              939 drivers/crypto/inside-secure/safexcel_cipher.c 		ret = safexcel_cipher_send_inv(async, ring, commands, results);
ring              941 drivers/crypto/inside-secure/safexcel_cipher.c 		ret = safexcel_send_req(async, ring, sreq, req->src, req->dst,
ring              956 drivers/crypto/inside-secure/safexcel_cipher.c 	int ring = ctx->base.ring;
ring              964 drivers/crypto/inside-secure/safexcel_cipher.c 	spin_lock_bh(&priv->ring[ring].queue_lock);
ring              965 drivers/crypto/inside-secure/safexcel_cipher.c 	crypto_enqueue_request(&priv->ring[ring].queue, base);
ring              966 drivers/crypto/inside-secure/safexcel_cipher.c 	spin_unlock_bh(&priv->ring[ring].queue_lock);
ring              968 drivers/crypto/inside-secure/safexcel_cipher.c 	queue_work(priv->ring[ring].workqueue,
ring              969 drivers/crypto/inside-secure/safexcel_cipher.c 		   &priv->ring[ring].work_data.work);
ring             1019 drivers/crypto/inside-secure/safexcel_cipher.c 	int ret, ring;
ring             1030 drivers/crypto/inside-secure/safexcel_cipher.c 		ctx->base.ring = safexcel_select_ring(priv);
ring             1038 drivers/crypto/inside-secure/safexcel_cipher.c 	ring = ctx->base.ring;
ring             1040 drivers/crypto/inside-secure/safexcel_cipher.c 	spin_lock_bh(&priv->ring[ring].queue_lock);
ring             1041 drivers/crypto/inside-secure/safexcel_cipher.c 	ret = crypto_enqueue_request(&priv->ring[ring].queue, base);
ring             1042 drivers/crypto/inside-secure/safexcel_cipher.c 	spin_unlock_bh(&priv->ring[ring].queue_lock);
ring             1044 drivers/crypto/inside-secure/safexcel_cipher.c 	queue_work(priv->ring[ring].workqueue,
ring             1045 drivers/crypto/inside-secure/safexcel_cipher.c 		   &priv->ring[ring].work_data.work);
ring              177 drivers/crypto/inside-secure/safexcel_hash.c 				      int ring,
ring              190 drivers/crypto/inside-secure/safexcel_hash.c 	rdesc = safexcel_ring_next_rptr(priv, &priv->ring[ring].rdr);
ring              199 drivers/crypto/inside-secure/safexcel_hash.c 	safexcel_complete(priv, ring);
ring              254 drivers/crypto/inside-secure/safexcel_hash.c static int safexcel_ahash_send_req(struct crypto_async_request *async, int ring,
ring              307 drivers/crypto/inside-secure/safexcel_hash.c 		first_cdesc = safexcel_add_cdesc(priv, ring, 1,
ring              343 drivers/crypto/inside-secure/safexcel_hash.c 		cdesc = safexcel_add_cdesc(priv, ring, !n_cdesc,
ring              376 drivers/crypto/inside-secure/safexcel_hash.c 	rdesc = safexcel_add_rdesc(priv, ring, 1, 1, req->result_dma,
ring              383 drivers/crypto/inside-secure/safexcel_hash.c 	safexcel_rdr_req_set(priv, ring, rdesc, &areq->base);
ring              398 drivers/crypto/inside-secure/safexcel_hash.c 		safexcel_ring_rollback_wptr(priv, &priv->ring[ring].cdr);
ring              411 drivers/crypto/inside-secure/safexcel_hash.c 				      int ring,
ring              423 drivers/crypto/inside-secure/safexcel_hash.c 	rdesc = safexcel_ring_next_rptr(priv, &priv->ring[ring].rdr);
ring              432 drivers/crypto/inside-secure/safexcel_hash.c 	safexcel_complete(priv, ring);
ring              442 drivers/crypto/inside-secure/safexcel_hash.c 	ring = safexcel_select_ring(priv);
ring              443 drivers/crypto/inside-secure/safexcel_hash.c 	ctx->base.ring = ring;
ring              445 drivers/crypto/inside-secure/safexcel_hash.c 	spin_lock_bh(&priv->ring[ring].queue_lock);
ring              446 drivers/crypto/inside-secure/safexcel_hash.c 	enq_ret = crypto_enqueue_request(&priv->ring[ring].queue, async);
ring              447 drivers/crypto/inside-secure/safexcel_hash.c 	spin_unlock_bh(&priv->ring[ring].queue_lock);
ring              452 drivers/crypto/inside-secure/safexcel_hash.c 	queue_work(priv->ring[ring].workqueue,
ring              453 drivers/crypto/inside-secure/safexcel_hash.c 		   &priv->ring[ring].work_data.work);
ring              460 drivers/crypto/inside-secure/safexcel_hash.c static int safexcel_handle_result(struct safexcel_crypto_priv *priv, int ring,
ring              472 drivers/crypto/inside-secure/safexcel_hash.c 		err = safexcel_handle_inv_result(priv, ring, async,
ring              475 drivers/crypto/inside-secure/safexcel_hash.c 		err = safexcel_handle_req_result(priv, ring, async,
ring              483 drivers/crypto/inside-secure/safexcel_hash.c 				   int ring, int *commands, int *results)
ring              490 drivers/crypto/inside-secure/safexcel_hash.c 					ctx->base.ctxr_dma, ring);
ring              501 drivers/crypto/inside-secure/safexcel_hash.c 			       int ring, int *commands, int *results)
ring              508 drivers/crypto/inside-secure/safexcel_hash.c 		ret = safexcel_ahash_send_inv(async, ring, commands, results);
ring              510 drivers/crypto/inside-secure/safexcel_hash.c 		ret = safexcel_ahash_send_req(async, ring, commands, results);
ring              522 drivers/crypto/inside-secure/safexcel_hash.c 	int ring = ctx->base.ring;
ring              536 drivers/crypto/inside-secure/safexcel_hash.c 	spin_lock_bh(&priv->ring[ring].queue_lock);
ring              537 drivers/crypto/inside-secure/safexcel_hash.c 	crypto_enqueue_request(&priv->ring[ring].queue, &req->base);
ring              538 drivers/crypto/inside-secure/safexcel_hash.c 	spin_unlock_bh(&priv->ring[ring].queue_lock);
ring              540 drivers/crypto/inside-secure/safexcel_hash.c 	queue_work(priv->ring[ring].workqueue,
ring              541 drivers/crypto/inside-secure/safexcel_hash.c 		   &priv->ring[ring].work_data.work);
ring              587 drivers/crypto/inside-secure/safexcel_hash.c 	int ret, ring;
ring              618 drivers/crypto/inside-secure/safexcel_hash.c 		ctx->base.ring = safexcel_select_ring(priv);
ring              626 drivers/crypto/inside-secure/safexcel_hash.c 	ring = ctx->base.ring;
ring              628 drivers/crypto/inside-secure/safexcel_hash.c 	spin_lock_bh(&priv->ring[ring].queue_lock);
ring              629 drivers/crypto/inside-secure/safexcel_hash.c 	ret = crypto_enqueue_request(&priv->ring[ring].queue, &areq->base);
ring              630 drivers/crypto/inside-secure/safexcel_hash.c 	spin_unlock_bh(&priv->ring[ring].queue_lock);
ring              632 drivers/crypto/inside-secure/safexcel_hash.c 	queue_work(priv->ring[ring].workqueue,
ring              633 drivers/crypto/inside-secure/safexcel_hash.c 		   &priv->ring[ring].work_data.work);
ring               46 drivers/crypto/inside-secure/safexcel_ring.c 				     struct safexcel_desc_ring *ring)
ring               48 drivers/crypto/inside-secure/safexcel_ring.c 	void *ptr = ring->write;
ring               50 drivers/crypto/inside-secure/safexcel_ring.c 	if ((ring->write == ring->read - ring->offset) ||
ring               51 drivers/crypto/inside-secure/safexcel_ring.c 	    (ring->read == ring->base && ring->write == ring->base_end))
ring               54 drivers/crypto/inside-secure/safexcel_ring.c 	if (ring->write == ring->base_end)
ring               55 drivers/crypto/inside-secure/safexcel_ring.c 		ring->write = ring->base;
ring               57 drivers/crypto/inside-secure/safexcel_ring.c 		ring->write += ring->offset;
ring               63 drivers/crypto/inside-secure/safexcel_ring.c 			      struct safexcel_desc_ring *ring)
ring               65 drivers/crypto/inside-secure/safexcel_ring.c 	void *ptr = ring->read;
ring               67 drivers/crypto/inside-secure/safexcel_ring.c 	if (ring->write == ring->read)
ring               70 drivers/crypto/inside-secure/safexcel_ring.c 	if (ring->read == ring->base_end)
ring               71 drivers/crypto/inside-secure/safexcel_ring.c 		ring->read = ring->base;
ring               73 drivers/crypto/inside-secure/safexcel_ring.c 		ring->read += ring->offset;
ring               79 drivers/crypto/inside-secure/safexcel_ring.c 				     int ring)
ring               81 drivers/crypto/inside-secure/safexcel_ring.c 	struct safexcel_desc_ring *rdr = &priv->ring[ring].rdr;
ring               87 drivers/crypto/inside-secure/safexcel_ring.c 					 int ring)
ring               89 drivers/crypto/inside-secure/safexcel_ring.c 	struct safexcel_desc_ring *rdr = &priv->ring[ring].rdr;
ring               95 drivers/crypto/inside-secure/safexcel_ring.c 					 int ring,
ring               98 drivers/crypto/inside-secure/safexcel_ring.c 	struct safexcel_desc_ring *rdr = &priv->ring[ring].rdr;
ring              104 drivers/crypto/inside-secure/safexcel_ring.c 				 struct safexcel_desc_ring *ring)
ring              106 drivers/crypto/inside-secure/safexcel_ring.c 	if (ring->write == ring->read)
ring              109 drivers/crypto/inside-secure/safexcel_ring.c 	if (ring->write == ring->base)
ring              110 drivers/crypto/inside-secure/safexcel_ring.c 		ring->write = ring->base_end;
ring              112 drivers/crypto/inside-secure/safexcel_ring.c 		ring->write -= ring->offset;
ring              124 drivers/crypto/inside-secure/safexcel_ring.c 	cdesc = safexcel_ring_next_wptr(priv, &priv->ring[ring_id].cdr);
ring              175 drivers/crypto/inside-secure/safexcel_ring.c 	rdesc = safexcel_ring_next_wptr(priv, &priv->ring[ring_id].rdr);
ring              279 drivers/crypto/mediatek/mtk-aes.c 	struct mtk_ring *ring = cryp->ring[aes->id];
ring              287 drivers/crypto/mediatek/mtk-aes.c 		cmd = ring->cmd_next;
ring              300 drivers/crypto/mediatek/mtk-aes.c 		if (++ring->cmd_next == ring->cmd_base + MTK_DESC_NUM)
ring              301 drivers/crypto/mediatek/mtk-aes.c 			ring->cmd_next = ring->cmd_base;
ring              307 drivers/crypto/mediatek/mtk-aes.c 		res = ring->res_next;
ring              315 drivers/crypto/mediatek/mtk-aes.c 		if (++ring->res_next == ring->res_base + MTK_DESC_NUM)
ring              316 drivers/crypto/mediatek/mtk-aes.c 			ring->res_next = ring->res_base;
ring              321 drivers/crypto/mediatek/mtk-aes.c 	ring->res_prev = res;
ring              870 drivers/crypto/mediatek/mtk-aes.c 	u32 status = cryp->ring[aes->id]->res_prev->ct;
ring              230 drivers/crypto/mediatek/mtk-platform.c 	writel(cryp->ring[i]->cmd_dma, cryp->base + CDR_BASE_ADDR_LO(i));
ring              270 drivers/crypto/mediatek/mtk-platform.c 	writel(cryp->ring[i]->res_dma, cryp->base + RDR_BASE_ADDR_LO(i));
ring              433 drivers/crypto/mediatek/mtk-platform.c 				  cryp->ring[i]->res_base,
ring              434 drivers/crypto/mediatek/mtk-platform.c 				  cryp->ring[i]->res_dma);
ring              436 drivers/crypto/mediatek/mtk-platform.c 				  cryp->ring[i]->cmd_base,
ring              437 drivers/crypto/mediatek/mtk-platform.c 				  cryp->ring[i]->cmd_dma);
ring              438 drivers/crypto/mediatek/mtk-platform.c 		kfree(cryp->ring[i]);
ring              444 drivers/crypto/mediatek/mtk-platform.c 	struct mtk_ring **ring = cryp->ring;
ring              448 drivers/crypto/mediatek/mtk-platform.c 		ring[i] = kzalloc(sizeof(**ring), GFP_KERNEL);
ring              449 drivers/crypto/mediatek/mtk-platform.c 		if (!ring[i])
ring              452 drivers/crypto/mediatek/mtk-platform.c 		ring[i]->cmd_base = dma_alloc_coherent(cryp->dev,
ring              454 drivers/crypto/mediatek/mtk-platform.c 						       &ring[i]->cmd_dma,
ring              456 drivers/crypto/mediatek/mtk-platform.c 		if (!ring[i]->cmd_base)
ring              459 drivers/crypto/mediatek/mtk-platform.c 		ring[i]->res_base = dma_alloc_coherent(cryp->dev,
ring              461 drivers/crypto/mediatek/mtk-platform.c 						       &ring[i]->res_dma,
ring              463 drivers/crypto/mediatek/mtk-platform.c 		if (!ring[i]->res_base)
ring              466 drivers/crypto/mediatek/mtk-platform.c 		ring[i]->cmd_next = ring[i]->cmd_base;
ring              467 drivers/crypto/mediatek/mtk-platform.c 		ring[i]->res_next = ring[i]->res_base;
ring              474 drivers/crypto/mediatek/mtk-platform.c 				  ring[i]->res_base, ring[i]->res_dma);
ring              476 drivers/crypto/mediatek/mtk-platform.c 				  ring[i]->cmd_base, ring[i]->cmd_dma);
ring              477 drivers/crypto/mediatek/mtk-platform.c 		kfree(ring[i]);
ring              216 drivers/crypto/mediatek/mtk-platform.h 	struct mtk_ring *ring[MTK_RING_MAX];
ring              138 drivers/crypto/mediatek/mtk-sha.c static inline void mtk_sha_ring_shift(struct mtk_ring *ring,
ring              143 drivers/crypto/mediatek/mtk-sha.c 	*cmd_curr = ring->cmd_next++;
ring              144 drivers/crypto/mediatek/mtk-sha.c 	*res_curr = ring->res_next++;
ring              147 drivers/crypto/mediatek/mtk-sha.c 	if (ring->cmd_next == ring->cmd_base + MTK_DESC_NUM) {
ring              148 drivers/crypto/mediatek/mtk-sha.c 		ring->cmd_next = ring->cmd_base;
ring              149 drivers/crypto/mediatek/mtk-sha.c 		ring->res_next = ring->res_base;
ring              426 drivers/crypto/mediatek/mtk-sha.c 	struct mtk_ring *ring = cryp->ring[sha->id];
ring              435 drivers/crypto/mediatek/mtk-sha.c 	mtk_sha_ring_shift(ring, &cmd, &res, &count);
ring              446 drivers/crypto/mediatek/mtk-sha.c 		mtk_sha_ring_shift(ring, &cmd, &res, &count);
ring              154 drivers/crypto/qat/qat_common/adf_common_drv.h void adf_update_ring_arb(struct adf_etr_ring_data *ring);
ring              109 drivers/crypto/qat/qat_common/adf_hw_arbiter.c void adf_update_ring_arb(struct adf_etr_ring_data *ring)
ring              111 drivers/crypto/qat/qat_common/adf_hw_arbiter.c 	WRITE_CSR_ARB_RINGSRVARBEN(ring->bank->csr_addr,
ring              112 drivers/crypto/qat/qat_common/adf_hw_arbiter.c 				   ring->bank->bank_number,
ring              113 drivers/crypto/qat/qat_common/adf_hw_arbiter.c 				   ring->bank->ring_mask & 0xFF);
ring               80 drivers/crypto/qat/qat_common/adf_transport.c static int adf_reserve_ring(struct adf_etr_bank_data *bank, uint32_t ring)
ring               83 drivers/crypto/qat/qat_common/adf_transport.c 	if (bank->ring_mask & (1 << ring)) {
ring               87 drivers/crypto/qat/qat_common/adf_transport.c 	bank->ring_mask |= (1 << ring);
ring               92 drivers/crypto/qat/qat_common/adf_transport.c static void adf_unreserve_ring(struct adf_etr_bank_data *bank, uint32_t ring)
ring               95 drivers/crypto/qat/qat_common/adf_transport.c 	bank->ring_mask &= ~(1 << ring);
ring               99 drivers/crypto/qat/qat_common/adf_transport.c static void adf_enable_ring_irq(struct adf_etr_bank_data *bank, uint32_t ring)
ring              102 drivers/crypto/qat/qat_common/adf_transport.c 	bank->irq_mask |= (1 << ring);
ring              109 drivers/crypto/qat/qat_common/adf_transport.c static void adf_disable_ring_irq(struct adf_etr_bank_data *bank, uint32_t ring)
ring              112 drivers/crypto/qat/qat_common/adf_transport.c 	bank->irq_mask &= ~(1 << ring);
ring              117 drivers/crypto/qat/qat_common/adf_transport.c int adf_send_message(struct adf_etr_ring_data *ring, uint32_t *msg)
ring              119 drivers/crypto/qat/qat_common/adf_transport.c 	if (atomic_add_return(1, ring->inflights) >
ring              120 drivers/crypto/qat/qat_common/adf_transport.c 	    ADF_MAX_INFLIGHTS(ring->ring_size, ring->msg_size)) {
ring              121 drivers/crypto/qat/qat_common/adf_transport.c 		atomic_dec(ring->inflights);
ring              124 drivers/crypto/qat/qat_common/adf_transport.c 	spin_lock_bh(&ring->lock);
ring              125 drivers/crypto/qat/qat_common/adf_transport.c 	memcpy((void *)((uintptr_t)ring->base_addr + ring->tail), msg,
ring              126 drivers/crypto/qat/qat_common/adf_transport.c 	       ADF_MSG_SIZE_TO_BYTES(ring->msg_size));
ring              128 drivers/crypto/qat/qat_common/adf_transport.c 	ring->tail = adf_modulo(ring->tail +
ring              129 drivers/crypto/qat/qat_common/adf_transport.c 				ADF_MSG_SIZE_TO_BYTES(ring->msg_size),
ring              130 drivers/crypto/qat/qat_common/adf_transport.c 				ADF_RING_SIZE_MODULO(ring->ring_size));
ring              131 drivers/crypto/qat/qat_common/adf_transport.c 	WRITE_CSR_RING_TAIL(ring->bank->csr_addr, ring->bank->bank_number,
ring              132 drivers/crypto/qat/qat_common/adf_transport.c 			    ring->ring_number, ring->tail);
ring              133 drivers/crypto/qat/qat_common/adf_transport.c 	spin_unlock_bh(&ring->lock);
ring              137 drivers/crypto/qat/qat_common/adf_transport.c static int adf_handle_response(struct adf_etr_ring_data *ring)
ring              140 drivers/crypto/qat/qat_common/adf_transport.c 	uint32_t *msg = (uint32_t *)((uintptr_t)ring->base_addr + ring->head);
ring              143 drivers/crypto/qat/qat_common/adf_transport.c 		ring->callback((uint32_t *)msg);
ring              144 drivers/crypto/qat/qat_common/adf_transport.c 		atomic_dec(ring->inflights);
ring              146 drivers/crypto/qat/qat_common/adf_transport.c 		ring->head = adf_modulo(ring->head +
ring              147 drivers/crypto/qat/qat_common/adf_transport.c 					ADF_MSG_SIZE_TO_BYTES(ring->msg_size),
ring              148 drivers/crypto/qat/qat_common/adf_transport.c 					ADF_RING_SIZE_MODULO(ring->ring_size));
ring              150 drivers/crypto/qat/qat_common/adf_transport.c 		msg = (uint32_t *)((uintptr_t)ring->base_addr + ring->head);
ring              153 drivers/crypto/qat/qat_common/adf_transport.c 		WRITE_CSR_RING_HEAD(ring->bank->csr_addr,
ring              154 drivers/crypto/qat/qat_common/adf_transport.c 				    ring->bank->bank_number,
ring              155 drivers/crypto/qat/qat_common/adf_transport.c 				    ring->ring_number, ring->head);
ring              159 drivers/crypto/qat/qat_common/adf_transport.c static void adf_configure_tx_ring(struct adf_etr_ring_data *ring)
ring              161 drivers/crypto/qat/qat_common/adf_transport.c 	uint32_t ring_config = BUILD_RING_CONFIG(ring->ring_size);
ring              163 drivers/crypto/qat/qat_common/adf_transport.c 	WRITE_CSR_RING_CONFIG(ring->bank->csr_addr, ring->bank->bank_number,
ring              164 drivers/crypto/qat/qat_common/adf_transport.c 			      ring->ring_number, ring_config);
ring              167 drivers/crypto/qat/qat_common/adf_transport.c static void adf_configure_rx_ring(struct adf_etr_ring_data *ring)
ring              170 drivers/crypto/qat/qat_common/adf_transport.c 			BUILD_RESP_RING_CONFIG(ring->ring_size,
ring              174 drivers/crypto/qat/qat_common/adf_transport.c 	WRITE_CSR_RING_CONFIG(ring->bank->csr_addr, ring->bank->bank_number,
ring              175 drivers/crypto/qat/qat_common/adf_transport.c 			      ring->ring_number, ring_config);
ring              178 drivers/crypto/qat/qat_common/adf_transport.c static int adf_init_ring(struct adf_etr_ring_data *ring)
ring              180 drivers/crypto/qat/qat_common/adf_transport.c 	struct adf_etr_bank_data *bank = ring->bank;
ring              185 drivers/crypto/qat/qat_common/adf_transport.c 			ADF_SIZE_TO_RING_SIZE_IN_BYTES(ring->ring_size);
ring              188 drivers/crypto/qat/qat_common/adf_transport.c 	ring->base_addr = dma_alloc_coherent(&GET_DEV(accel_dev),
ring              189 drivers/crypto/qat/qat_common/adf_transport.c 					     ring_size_bytes, &ring->dma_addr,
ring              191 drivers/crypto/qat/qat_common/adf_transport.c 	if (!ring->base_addr)
ring              194 drivers/crypto/qat/qat_common/adf_transport.c 	memset(ring->base_addr, 0x7F, ring_size_bytes);
ring              196 drivers/crypto/qat/qat_common/adf_transport.c 	if (adf_check_ring_alignment(ring->dma_addr, ring_size_bytes)) {
ring              199 drivers/crypto/qat/qat_common/adf_transport.c 				  ring->base_addr, ring->dma_addr);
ring              203 drivers/crypto/qat/qat_common/adf_transport.c 	if (hw_data->tx_rings_mask & (1 << ring->ring_number))
ring              204 drivers/crypto/qat/qat_common/adf_transport.c 		adf_configure_tx_ring(ring);
ring              207 drivers/crypto/qat/qat_common/adf_transport.c 		adf_configure_rx_ring(ring);
ring              209 drivers/crypto/qat/qat_common/adf_transport.c 	ring_base = BUILD_RING_BASE_ADDR(ring->dma_addr, ring->ring_size);
ring              210 drivers/crypto/qat/qat_common/adf_transport.c 	WRITE_CSR_RING_BASE(ring->bank->csr_addr, ring->bank->bank_number,
ring              211 drivers/crypto/qat/qat_common/adf_transport.c 			    ring->ring_number, ring_base);
ring              212 drivers/crypto/qat/qat_common/adf_transport.c 	spin_lock_init(&ring->lock);
ring              216 drivers/crypto/qat/qat_common/adf_transport.c static void adf_cleanup_ring(struct adf_etr_ring_data *ring)
ring              219 drivers/crypto/qat/qat_common/adf_transport.c 			ADF_SIZE_TO_RING_SIZE_IN_BYTES(ring->ring_size);
ring              222 drivers/crypto/qat/qat_common/adf_transport.c 	if (ring->base_addr) {
ring              223 drivers/crypto/qat/qat_common/adf_transport.c 		memset(ring->base_addr, 0x7F, ring_size_bytes);
ring              224 drivers/crypto/qat/qat_common/adf_transport.c 		dma_free_coherent(&GET_DEV(ring->bank->accel_dev),
ring              225 drivers/crypto/qat/qat_common/adf_transport.c 				  ring_size_bytes, ring->base_addr,
ring              226 drivers/crypto/qat/qat_common/adf_transport.c 				  ring->dma_addr);
ring              238 drivers/crypto/qat/qat_common/adf_transport.c 	struct adf_etr_ring_data *ring;
ring              277 drivers/crypto/qat/qat_common/adf_transport.c 	ring = &bank->rings[ring_num];
ring              278 drivers/crypto/qat/qat_common/adf_transport.c 	ring->ring_number = ring_num;
ring              279 drivers/crypto/qat/qat_common/adf_transport.c 	ring->bank = bank;
ring              280 drivers/crypto/qat/qat_common/adf_transport.c 	ring->callback = callback;
ring              281 drivers/crypto/qat/qat_common/adf_transport.c 	ring->msg_size = ADF_BYTES_TO_MSG_SIZE(msg_size);
ring              282 drivers/crypto/qat/qat_common/adf_transport.c 	ring->ring_size = adf_verify_ring_size(msg_size, num_msgs);
ring              283 drivers/crypto/qat/qat_common/adf_transport.c 	ring->head = 0;
ring              284 drivers/crypto/qat/qat_common/adf_transport.c 	ring->tail = 0;
ring              285 drivers/crypto/qat/qat_common/adf_transport.c 	atomic_set(ring->inflights, 0);
ring              286 drivers/crypto/qat/qat_common/adf_transport.c 	ret = adf_init_ring(ring);
ring              291 drivers/crypto/qat/qat_common/adf_transport.c 	adf_update_ring_arb(ring);
ring              293 drivers/crypto/qat/qat_common/adf_transport.c 	if (adf_ring_debugfs_add(ring, ring_name)) {
ring              302 drivers/crypto/qat/qat_common/adf_transport.c 		adf_enable_ring_irq(bank, ring->ring_number);
ring              303 drivers/crypto/qat/qat_common/adf_transport.c 	*ring_ptr = ring;
ring              306 drivers/crypto/qat/qat_common/adf_transport.c 	adf_cleanup_ring(ring);
ring              308 drivers/crypto/qat/qat_common/adf_transport.c 	adf_update_ring_arb(ring);
ring              312 drivers/crypto/qat/qat_common/adf_transport.c void adf_remove_ring(struct adf_etr_ring_data *ring)
ring              314 drivers/crypto/qat/qat_common/adf_transport.c 	struct adf_etr_bank_data *bank = ring->bank;
ring              317 drivers/crypto/qat/qat_common/adf_transport.c 	adf_disable_ring_irq(bank, ring->ring_number);
ring              321 drivers/crypto/qat/qat_common/adf_transport.c 			      ring->ring_number, 0);
ring              323 drivers/crypto/qat/qat_common/adf_transport.c 			    ring->ring_number, 0);
ring              324 drivers/crypto/qat/qat_common/adf_transport.c 	adf_ring_debugfs_rm(ring);
ring              325 drivers/crypto/qat/qat_common/adf_transport.c 	adf_unreserve_ring(bank, ring->ring_number);
ring              327 drivers/crypto/qat/qat_common/adf_transport.c 	adf_update_ring_arb(ring);
ring              328 drivers/crypto/qat/qat_common/adf_transport.c 	adf_cleanup_ring(ring);
ring              390 drivers/crypto/qat/qat_common/adf_transport.c 	struct adf_etr_ring_data *ring;
ring              413 drivers/crypto/qat/qat_common/adf_transport.c 		ring = &bank->rings[i];
ring              415 drivers/crypto/qat/qat_common/adf_transport.c 			ring->inflights =
ring              419 drivers/crypto/qat/qat_common/adf_transport.c 			if (!ring->inflights)
ring              428 drivers/crypto/qat/qat_common/adf_transport.c 			ring->inflights = tx_ring->inflights;
ring              442 drivers/crypto/qat/qat_common/adf_transport.c 		ring = &bank->rings[i];
ring              444 drivers/crypto/qat/qat_common/adf_transport.c 			kfree(ring->inflights);
ring              516 drivers/crypto/qat/qat_common/adf_transport.c 		struct adf_etr_ring_data *ring = &bank->rings[i];
ring              519 drivers/crypto/qat/qat_common/adf_transport.c 			adf_cleanup_ring(ring);
ring              522 drivers/crypto/qat/qat_common/adf_transport.c 			kfree(ring->inflights);
ring               61 drivers/crypto/qat/qat_common/adf_transport.h int adf_send_message(struct adf_etr_ring_data *ring, uint32_t *msg);
ring               62 drivers/crypto/qat/qat_common/adf_transport.h void adf_remove_ring(struct adf_etr_ring_data *ring);
ring              121 drivers/crypto/qat/qat_common/adf_transport_access_macros.h #define READ_CSR_RING_HEAD(csr_base_addr, bank, ring) \
ring              123 drivers/crypto/qat/qat_common/adf_transport_access_macros.h 			ADF_RING_CSR_RING_HEAD + (ring << 2))
ring              124 drivers/crypto/qat/qat_common/adf_transport_access_macros.h #define READ_CSR_RING_TAIL(csr_base_addr, bank, ring) \
ring              126 drivers/crypto/qat/qat_common/adf_transport_access_macros.h 			ADF_RING_CSR_RING_TAIL + (ring << 2))
ring              130 drivers/crypto/qat/qat_common/adf_transport_access_macros.h #define WRITE_CSR_RING_CONFIG(csr_base_addr, bank, ring, value) \
ring              132 drivers/crypto/qat/qat_common/adf_transport_access_macros.h 		ADF_RING_CSR_RING_CONFIG + (ring << 2), value)
ring              133 drivers/crypto/qat/qat_common/adf_transport_access_macros.h #define WRITE_CSR_RING_BASE(csr_base_addr, bank, ring, value) \
ring              139 drivers/crypto/qat/qat_common/adf_transport_access_macros.h 		ADF_RING_CSR_RING_LBASE + (ring << 2), l_base);	\
ring              141 drivers/crypto/qat/qat_common/adf_transport_access_macros.h 		ADF_RING_CSR_RING_UBASE + (ring << 2), u_base);	\
ring              143 drivers/crypto/qat/qat_common/adf_transport_access_macros.h #define WRITE_CSR_RING_HEAD(csr_base_addr, bank, ring, value) \
ring              145 drivers/crypto/qat/qat_common/adf_transport_access_macros.h 		ADF_RING_CSR_RING_HEAD + (ring << 2), value)
ring              146 drivers/crypto/qat/qat_common/adf_transport_access_macros.h #define WRITE_CSR_RING_TAIL(csr_base_addr, bank, ring, value) \
ring              148 drivers/crypto/qat/qat_common/adf_transport_access_macros.h 		ADF_RING_CSR_RING_TAIL + (ring << 2), value)
ring               59 drivers/crypto/qat/qat_common/adf_transport_debug.c 	struct adf_etr_ring_data *ring = sfile->private;
ring               65 drivers/crypto/qat/qat_common/adf_transport_debug.c 	if (*pos >= (ADF_SIZE_TO_RING_SIZE_IN_BYTES(ring->ring_size) /
ring               66 drivers/crypto/qat/qat_common/adf_transport_debug.c 		     ADF_MSG_SIZE_TO_BYTES(ring->msg_size)))
ring               69 drivers/crypto/qat/qat_common/adf_transport_debug.c 	return ring->base_addr +
ring               70 drivers/crypto/qat/qat_common/adf_transport_debug.c 		(ADF_MSG_SIZE_TO_BYTES(ring->msg_size) * (*pos)++);
ring               75 drivers/crypto/qat/qat_common/adf_transport_debug.c 	struct adf_etr_ring_data *ring = sfile->private;
ring               77 drivers/crypto/qat/qat_common/adf_transport_debug.c 	if (*pos >= (ADF_SIZE_TO_RING_SIZE_IN_BYTES(ring->ring_size) /
ring               78 drivers/crypto/qat/qat_common/adf_transport_debug.c 		     ADF_MSG_SIZE_TO_BYTES(ring->msg_size)))
ring               81 drivers/crypto/qat/qat_common/adf_transport_debug.c 	return ring->base_addr +
ring               82 drivers/crypto/qat/qat_common/adf_transport_debug.c 		(ADF_MSG_SIZE_TO_BYTES(ring->msg_size) * (*pos)++);
ring               87 drivers/crypto/qat/qat_common/adf_transport_debug.c 	struct adf_etr_ring_data *ring = sfile->private;
ring               88 drivers/crypto/qat/qat_common/adf_transport_debug.c 	struct adf_etr_bank_data *bank = ring->bank;
ring               89 drivers/crypto/qat/qat_common/adf_transport_debug.c 	void __iomem *csr = ring->bank->csr_addr;
ring               95 drivers/crypto/qat/qat_common/adf_transport_debug.c 					  ring->ring_number);
ring               97 drivers/crypto/qat/qat_common/adf_transport_debug.c 					  ring->ring_number);
ring              102 drivers/crypto/qat/qat_common/adf_transport_debug.c 			   ring->ring_debug->ring_name);
ring              104 drivers/crypto/qat/qat_common/adf_transport_debug.c 			   ring->ring_number, ring->bank->bank_number);
ring              106 drivers/crypto/qat/qat_common/adf_transport_debug.c 			   head, tail, (empty & 1 << ring->ring_number)
ring              107 drivers/crypto/qat/qat_common/adf_transport_debug.c 			   >> ring->ring_number);
ring              109 drivers/crypto/qat/qat_common/adf_transport_debug.c 			   ADF_SIZE_TO_RING_SIZE_IN_BYTES(ring->ring_size),
ring              110 drivers/crypto/qat/qat_common/adf_transport_debug.c 			   ADF_MSG_SIZE_TO_BYTES(ring->msg_size));
ring              115 drivers/crypto/qat/qat_common/adf_transport_debug.c 		     v, ADF_MSG_SIZE_TO_BYTES(ring->msg_size), false);
ring              150 drivers/crypto/qat/qat_common/adf_transport_debug.c int adf_ring_debugfs_add(struct adf_etr_ring_data *ring, const char *name)
ring              161 drivers/crypto/qat/qat_common/adf_transport_debug.c 		 ring->ring_number);
ring              164 drivers/crypto/qat/qat_common/adf_transport_debug.c 						ring->bank->bank_debug_dir,
ring              165 drivers/crypto/qat/qat_common/adf_transport_debug.c 						ring, &adf_ring_debug_fops);
ring              166 drivers/crypto/qat/qat_common/adf_transport_debug.c 	ring->ring_debug = ring_debug;
ring              170 drivers/crypto/qat/qat_common/adf_transport_debug.c void adf_ring_debugfs_rm(struct adf_etr_ring_data *ring)
ring              172 drivers/crypto/qat/qat_common/adf_transport_debug.c 	if (ring->ring_debug) {
ring              173 drivers/crypto/qat/qat_common/adf_transport_debug.c 		debugfs_remove(ring->ring_debug->debug);
ring              174 drivers/crypto/qat/qat_common/adf_transport_debug.c 		kfree(ring->ring_debug);
ring              175 drivers/crypto/qat/qat_common/adf_transport_debug.c 		ring->ring_debug = NULL;
ring              208 drivers/crypto/qat/qat_common/adf_transport_debug.c 		struct adf_etr_ring_data *ring = &bank->rings[ring_id];
ring              216 drivers/crypto/qat/qat_common/adf_transport_debug.c 					  ring->ring_number);
ring              218 drivers/crypto/qat/qat_common/adf_transport_debug.c 					  ring->ring_number);
ring              223 drivers/crypto/qat/qat_common/adf_transport_debug.c 			   ring->ring_number, head, tail,
ring              224 drivers/crypto/qat/qat_common/adf_transport_debug.c 			   (empty & 1 << ring->ring_number) >>
ring              225 drivers/crypto/qat/qat_common/adf_transport_debug.c 			   ring->ring_number);
ring               99 drivers/crypto/qat/qat_common/adf_transport_internal.h int adf_ring_debugfs_add(struct adf_etr_ring_data *ring, const char *name);
ring              100 drivers/crypto/qat/qat_common/adf_transport_internal.h void adf_ring_debugfs_rm(struct adf_etr_ring_data *ring);
ring              109 drivers/crypto/qat/qat_common/adf_transport_internal.h static inline int adf_ring_debugfs_add(struct adf_etr_ring_data *ring,
ring              115 drivers/crypto/qat/qat_common/adf_transport_internal.h #define adf_ring_debugfs_rm(ring) do {} while (0)
ring              364 drivers/dma/ioat/dma.c 	struct ioat_ring_ent **ring;
ring              369 drivers/dma/ioat/dma.c 	ring = kcalloc(total_descs, sizeof(*ring), flags);
ring              370 drivers/dma/ioat/dma.c 	if (!ring)
ring              392 drivers/dma/ioat/dma.c 			kfree(ring);
ring              398 drivers/dma/ioat/dma.c 		ring[i] = ioat_alloc_ring_ent(c, i, flags);
ring              399 drivers/dma/ioat/dma.c 		if (!ring[i]) {
ring              403 drivers/dma/ioat/dma.c 				ioat_free_ring_ent(ring[i], c);
ring              415 drivers/dma/ioat/dma.c 			kfree(ring);
ring              418 drivers/dma/ioat/dma.c 		set_desc_id(ring[i], i);
ring              423 drivers/dma/ioat/dma.c 		struct ioat_ring_ent *next = ring[i+1];
ring              424 drivers/dma/ioat/dma.c 		struct ioat_dma_descriptor *hw = ring[i]->hw;
ring              428 drivers/dma/ioat/dma.c 	ring[i]->hw->next = ring[0]->txd.phys;
ring              441 drivers/dma/ioat/dma.c 	return ring;
ring              129 drivers/dma/ioat/dma.h 	struct ioat_ring_ent **ring;
ring              344 drivers/dma/ioat/dma.h 	return ioat_chan->ring[idx & (ioat_ring_size(ioat_chan) - 1)];
ring              623 drivers/dma/ioat/init.c 	if (!ioat_chan->ring)
ring              662 drivers/dma/ioat/init.c 	kfree(ioat_chan->ring);
ring              663 drivers/dma/ioat/init.c 	ioat_chan->ring = NULL;
ring              681 drivers/dma/ioat/init.c 	struct ioat_ring_ent **ring;
ring              688 drivers/dma/ioat/init.c 	if (ioat_chan->ring)
ring              708 drivers/dma/ioat/init.c 	ring = ioat_alloc_ring(c, order, GFP_NOWAIT);
ring              709 drivers/dma/ioat/init.c 	if (!ring)
ring              714 drivers/dma/ioat/init.c 	ioat_chan->ring = ring;
ring              186 drivers/dma/mediatek/mtk-hsdma.c 	struct mtk_hsdma_ring ring;
ring              318 drivers/dma/mediatek/mtk-hsdma.c 	struct mtk_hsdma_ring *ring = &pc->ring;
ring              327 drivers/dma/mediatek/mtk-hsdma.c 	pc->sz_ring = 2 * MTK_DMA_SIZE * sizeof(*ring->txd);
ring              328 drivers/dma/mediatek/mtk-hsdma.c 	ring->txd = dma_alloc_coherent(hsdma2dev(hsdma), pc->sz_ring,
ring              329 drivers/dma/mediatek/mtk-hsdma.c 				       &ring->tphys, GFP_NOWAIT);
ring              330 drivers/dma/mediatek/mtk-hsdma.c 	if (!ring->txd)
ring              333 drivers/dma/mediatek/mtk-hsdma.c 	ring->rxd = &ring->txd[MTK_DMA_SIZE];
ring              334 drivers/dma/mediatek/mtk-hsdma.c 	ring->rphys = ring->tphys + MTK_DMA_SIZE * sizeof(*ring->txd);
ring              335 drivers/dma/mediatek/mtk-hsdma.c 	ring->cur_tptr = 0;
ring              336 drivers/dma/mediatek/mtk-hsdma.c 	ring->cur_rptr = MTK_DMA_SIZE - 1;
ring              338 drivers/dma/mediatek/mtk-hsdma.c 	ring->cb = kcalloc(MTK_DMA_SIZE, sizeof(*ring->cb), GFP_NOWAIT);
ring              339 drivers/dma/mediatek/mtk-hsdma.c 	if (!ring->cb) {
ring              359 drivers/dma/mediatek/mtk-hsdma.c 	mtk_dma_write(hsdma, MTK_HSDMA_TX_BASE, ring->tphys);
ring              361 drivers/dma/mediatek/mtk-hsdma.c 	mtk_dma_write(hsdma, MTK_HSDMA_TX_CPU, ring->cur_tptr);
ring              363 drivers/dma/mediatek/mtk-hsdma.c 	mtk_dma_write(hsdma, MTK_HSDMA_RX_BASE, ring->rphys);
ring              365 drivers/dma/mediatek/mtk-hsdma.c 	mtk_dma_write(hsdma, MTK_HSDMA_RX_CPU, ring->cur_rptr);
ring              380 drivers/dma/mediatek/mtk-hsdma.c 	kfree(ring->cb);
ring              384 drivers/dma/mediatek/mtk-hsdma.c 			  pc->sz_ring, ring->txd, ring->tphys);
ring              391 drivers/dma/mediatek/mtk-hsdma.c 	struct mtk_hsdma_ring *ring = &pc->ring;
ring              406 drivers/dma/mediatek/mtk-hsdma.c 	kfree(ring->cb);
ring              409 drivers/dma/mediatek/mtk-hsdma.c 			  pc->sz_ring, ring->txd, ring->tphys);
ring              416 drivers/dma/mediatek/mtk-hsdma.c 	struct mtk_hsdma_ring *ring = &pc->ring;
ring              450 drivers/dma/mediatek/mtk-hsdma.c 		txd = &ring->txd[ring->cur_tptr];
ring              455 drivers/dma/mediatek/mtk-hsdma.c 		rxd = &ring->rxd[ring->cur_tptr];
ring              460 drivers/dma/mediatek/mtk-hsdma.c 		ring->cb[ring->cur_tptr].vd = &hvd->vd;
ring              463 drivers/dma/mediatek/mtk-hsdma.c 		ring->cur_tptr = MTK_HSDMA_NEXT_DESP_IDX(ring->cur_tptr,
ring              477 drivers/dma/mediatek/mtk-hsdma.c 		prev = MTK_HSDMA_LAST_DESP_IDX(ring->cur_tptr, MTK_DMA_SIZE);
ring              478 drivers/dma/mediatek/mtk-hsdma.c 		ring->cb[prev].flag = MTK_HSDMA_VDESC_FINISHED;
ring              488 drivers/dma/mediatek/mtk-hsdma.c 	mtk_dma_write(hsdma, MTK_HSDMA_TX_CPU, ring->cur_tptr);
ring              560 drivers/dma/mediatek/mtk-hsdma.c 		next = MTK_HSDMA_NEXT_DESP_IDX(pc->ring.cur_rptr,
ring              562 drivers/dma/mediatek/mtk-hsdma.c 		rxd = &pc->ring.rxd[next];
ring              572 drivers/dma/mediatek/mtk-hsdma.c 		cb = &pc->ring.cb[next];
ring              612 drivers/dma/mediatek/mtk-hsdma.c 		pc->ring.cur_rptr = next;
ring              622 drivers/dma/mediatek/mtk-hsdma.c 	mtk_dma_write(hsdma, MTK_HSDMA_RX_CPU, pc->ring.cur_rptr);
ring              594 drivers/dma/xgene-dma.c 	struct xgene_dma_ring *ring = &chan->tx_ring;
ring              598 drivers/dma/xgene-dma.c 	desc_hw = &ring->desc_hw[ring->head];
ring              604 drivers/dma/xgene-dma.c 	if (++ring->head == ring->slots)
ring              605 drivers/dma/xgene-dma.c 		ring->head = 0;
ring              615 drivers/dma/xgene-dma.c 		desc_hw = &ring->desc_hw[ring->head];
ring              617 drivers/dma/xgene-dma.c 		if (++ring->head == ring->slots)
ring              618 drivers/dma/xgene-dma.c 			ring->head = 0;
ring              629 drivers/dma/xgene-dma.c 		  2 : 1, ring->cmd);
ring              686 drivers/dma/xgene-dma.c 	struct xgene_dma_ring *ring = &chan->rx_ring;
ring              702 drivers/dma/xgene-dma.c 		desc_hw = &ring->desc_hw[ring->head];
ring              709 drivers/dma/xgene-dma.c 		if (++ring->head == ring->slots)
ring              710 drivers/dma/xgene-dma.c 			ring->head = 0;
ring              737 drivers/dma/xgene-dma.c 		iowrite32(-1, ring->cmd);
ring             1029 drivers/dma/xgene-dma.c static void xgene_dma_wr_ring_state(struct xgene_dma_ring *ring)
ring             1033 drivers/dma/xgene-dma.c 	iowrite32(ring->num, ring->pdma->csr_ring + XGENE_DMA_RING_STATE);
ring             1036 drivers/dma/xgene-dma.c 		iowrite32(ring->state[i], ring->pdma->csr_ring +
ring             1040 drivers/dma/xgene-dma.c static void xgene_dma_clr_ring_state(struct xgene_dma_ring *ring)
ring             1042 drivers/dma/xgene-dma.c 	memset(ring->state, 0, sizeof(u32) * XGENE_DMA_RING_NUM_CONFIG);
ring             1043 drivers/dma/xgene-dma.c 	xgene_dma_wr_ring_state(ring);
ring             1046 drivers/dma/xgene-dma.c static void xgene_dma_setup_ring(struct xgene_dma_ring *ring)
ring             1048 drivers/dma/xgene-dma.c 	void *ring_cfg = ring->state;
ring             1049 drivers/dma/xgene-dma.c 	u64 addr = ring->desc_paddr;
ring             1052 drivers/dma/xgene-dma.c 	ring->slots = ring->size / XGENE_DMA_RING_WQ_DESC_SIZE;
ring             1055 drivers/dma/xgene-dma.c 	xgene_dma_clr_ring_state(ring);
ring             1060 drivers/dma/xgene-dma.c 	if (ring->owner == XGENE_DMA_RING_OWNER_DMA) {
ring             1073 drivers/dma/xgene-dma.c 	XGENE_DMA_RING_SIZE_SET(ring_cfg, ring->cfgsize);
ring             1076 drivers/dma/xgene-dma.c 	xgene_dma_wr_ring_state(ring);
ring             1079 drivers/dma/xgene-dma.c 	iowrite32(XGENE_DMA_RING_ID_SETUP(ring->id),
ring             1080 drivers/dma/xgene-dma.c 		  ring->pdma->csr_ring + XGENE_DMA_RING_ID);
ring             1083 drivers/dma/xgene-dma.c 	iowrite32(XGENE_DMA_RING_ID_BUF_SETUP(ring->num),
ring             1084 drivers/dma/xgene-dma.c 		  ring->pdma->csr_ring + XGENE_DMA_RING_ID_BUF);
ring             1086 drivers/dma/xgene-dma.c 	if (ring->owner != XGENE_DMA_RING_OWNER_CPU)
ring             1090 drivers/dma/xgene-dma.c 	for (i = 0; i < ring->slots; i++) {
ring             1093 drivers/dma/xgene-dma.c 		desc = &ring->desc_hw[i];
ring             1098 drivers/dma/xgene-dma.c 	val = ioread32(ring->pdma->csr_ring + XGENE_DMA_RING_NE_INT_MODE);
ring             1099 drivers/dma/xgene-dma.c 	XGENE_DMA_RING_NE_INT_MODE_SET(val, ring->buf_num);
ring             1100 drivers/dma/xgene-dma.c 	iowrite32(val, ring->pdma->csr_ring + XGENE_DMA_RING_NE_INT_MODE);
ring             1103 drivers/dma/xgene-dma.c static void xgene_dma_clear_ring(struct xgene_dma_ring *ring)
ring             1107 drivers/dma/xgene-dma.c 	if (ring->owner == XGENE_DMA_RING_OWNER_CPU) {
ring             1109 drivers/dma/xgene-dma.c 		val = ioread32(ring->pdma->csr_ring +
ring             1111 drivers/dma/xgene-dma.c 		XGENE_DMA_RING_NE_INT_MODE_RESET(val, ring->buf_num);
ring             1112 drivers/dma/xgene-dma.c 		iowrite32(val, ring->pdma->csr_ring +
ring             1117 drivers/dma/xgene-dma.c 	ring_id = XGENE_DMA_RING_ID_SETUP(ring->id);
ring             1118 drivers/dma/xgene-dma.c 	iowrite32(ring_id, ring->pdma->csr_ring + XGENE_DMA_RING_ID);
ring             1120 drivers/dma/xgene-dma.c 	iowrite32(0, ring->pdma->csr_ring + XGENE_DMA_RING_ID_BUF);
ring             1121 drivers/dma/xgene-dma.c 	xgene_dma_clr_ring_state(ring);
ring             1124 drivers/dma/xgene-dma.c static void xgene_dma_set_ring_cmd(struct xgene_dma_ring *ring)
ring             1126 drivers/dma/xgene-dma.c 	ring->cmd_base = ring->pdma->csr_ring_cmd +
ring             1127 drivers/dma/xgene-dma.c 				XGENE_DMA_RING_CMD_BASE_OFFSET((ring->num -
ring             1130 drivers/dma/xgene-dma.c 	ring->cmd = ring->cmd_base + XGENE_DMA_RING_CMD_OFFSET;
ring             1162 drivers/dma/xgene-dma.c static void xgene_dma_delete_ring_one(struct xgene_dma_ring *ring)
ring             1165 drivers/dma/xgene-dma.c 	xgene_dma_clear_ring(ring);
ring             1168 drivers/dma/xgene-dma.c 	if (ring->desc_vaddr) {
ring             1169 drivers/dma/xgene-dma.c 		dma_free_coherent(ring->pdma->dev, ring->size,
ring             1170 drivers/dma/xgene-dma.c 				  ring->desc_vaddr, ring->desc_paddr);
ring             1171 drivers/dma/xgene-dma.c 		ring->desc_vaddr = NULL;
ring             1182 drivers/dma/xgene-dma.c 				     struct xgene_dma_ring *ring,
ring             1188 drivers/dma/xgene-dma.c 	ring->pdma = chan->pdma;
ring             1189 drivers/dma/xgene-dma.c 	ring->cfgsize = cfgsize;
ring             1190 drivers/dma/xgene-dma.c 	ring->num = chan->pdma->ring_num++;
ring             1191 drivers/dma/xgene-dma.c 	ring->id = XGENE_DMA_RING_ID_GET(ring->owner, ring->buf_num);
ring             1196 drivers/dma/xgene-dma.c 	ring->size = ret;
ring             1199 drivers/dma/xgene-dma.c 	ring->desc_vaddr = dma_alloc_coherent(chan->dev, ring->size,
ring             1200 drivers/dma/xgene-dma.c 					      &ring->desc_paddr, GFP_KERNEL);
ring             1201 drivers/dma/xgene-dma.c 	if (!ring->desc_vaddr) {
ring             1207 drivers/dma/xgene-dma.c 	xgene_dma_set_ring_cmd(ring);
ring             1208 drivers/dma/xgene-dma.c 	xgene_dma_setup_ring(ring);
ring              434 drivers/gpu/drm/amd/amdgpu/amdgpu.h int amdgpu_ib_schedule(struct amdgpu_ring *ring, unsigned num_ibs,
ring              568 drivers/gpu/drm/amd/amdgpu/amdgpu.h 	void (*flush_hdp)(struct amdgpu_device *adev, struct amdgpu_ring *ring);
ring              571 drivers/gpu/drm/amd/amdgpu/amdgpu.h 			       struct amdgpu_ring *ring);
ring              686 drivers/gpu/drm/amd/amdgpu/amdgpu.h 	void (*hdp_flush)(struct amdgpu_device *adev, struct amdgpu_ring *ring);
ring              169 drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c 		if (adev->gfx.kiq.ring.sched.ready)
ring              171 drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c 							  adev->gfx.kiq.ring.me - 1,
ring              172 drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c 							  adev->gfx.kiq.ring.pipe,
ring              173 drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c 							  adev->gfx.kiq.ring.queue),
ring              607 drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c 	struct amdgpu_ring *ring;
ring              613 drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c 		ring = &adev->gfx.compute_ring[0];
ring              616 drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c 		ring = &adev->sdma.instance[0].ring;
ring              619 drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c 		ring = &adev->sdma.instance[1].ring;
ring              640 drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c 	ret = amdgpu_ib_schedule(ring, 1, ib, job, &f);
ring              808 drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v10.c 	struct amdgpu_ring *ring = &adev->gfx.kiq.ring;
ring              811 drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v10.c 	amdgpu_ring_alloc(ring, 12); /* fence + invalidate_tlbs package*/
ring              812 drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v10.c 	amdgpu_ring_write(ring, PACKET3(PACKET3_INVALIDATE_TLBS, 0));
ring              813 drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v10.c 	amdgpu_ring_write(ring,
ring              816 drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v10.c 	amdgpu_fence_emit_polling(ring, &seq);
ring              817 drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v10.c 	amdgpu_ring_commit(ring);
ring              820 drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v10.c 	r = amdgpu_fence_wait_polling(ring, seq, adev->usec_timeout);
ring              833 drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v10.c 	struct amdgpu_ring *ring = &adev->gfx.kiq.ring;
ring              835 drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v10.c 	if (amdgpu_emu_mode == 0 && ring->sched.ready)
ring              647 drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v9.c 	struct amdgpu_ring *ring = &adev->gfx.kiq.ring;
ring              650 drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v9.c 	amdgpu_ring_alloc(ring, 12); /* fence + invalidate_tlbs package*/
ring              651 drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v9.c 	amdgpu_ring_write(ring, PACKET3(PACKET3_INVALIDATE_TLBS, 0));
ring              652 drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v9.c 	amdgpu_ring_write(ring,
ring              657 drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v9.c 	amdgpu_fence_emit_polling(ring, &seq);
ring              658 drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v9.c 	amdgpu_ring_commit(ring);
ring              661 drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v9.c 	r = amdgpu_fence_wait_polling(ring, seq, adev->usec_timeout);
ring              674 drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v9.c 	struct amdgpu_ring *ring = &adev->gfx.kiq.ring;
ring              683 drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v9.c 	if (ring->sched.ready)
ring               41 drivers/gpu/drm/amd/amdgpu/amdgpu_benchmark.c 		struct amdgpu_ring *ring = adev->mman.buffer_funcs_ring;
ring               42 drivers/gpu/drm/amd/amdgpu/amdgpu_benchmark.c 		r = amdgpu_copy_buffer(ring, saddr, daddr, size, NULL, &fence,
ring              786 drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c 	struct amdgpu_ring *ring = to_amdgpu_ring(p->entity->rq->sched);
ring              796 drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c 	if (ring->funcs->parse_cs || ring->funcs->patch_cs_in_place) {
ring              837 drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c 			if (ring->funcs->parse_cs) {
ring              841 drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c 				r = amdgpu_ring_parse_cs(ring, p, j);
ring              846 drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c 				r = amdgpu_ring_patch_cs_in_place(ring, p, j);
ring              946 drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c 	struct amdgpu_ring *ring;
ring              977 drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c 					  chunk_ib->ip_instance, chunk_ib->ring,
ring              991 drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c 		ring = to_amdgpu_ring(entity->rq->sched);
ring              992 drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c 		r =  amdgpu_ib_get(adev, vm, ring->funcs->parse_cs ?
ring             1007 drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c 	ring = to_amdgpu_ring(parser->entity->rq->sched);
ring             1008 drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c 	if (parser->job->uf_addr && ring->funcs->no_user_fence)
ring             1037 drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c 					  deps[i].ring, &entity);
ring             1277 drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c 	struct amdgpu_ring *ring;
ring             1331 drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c 	ring = to_amdgpu_ring(entity->rq->sched);
ring             1332 drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c 	amdgpu_ring_priority_get(ring, priority);
ring             1430 drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c 				  wait->in.ring, &entity);
ring             1478 drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c 				  user->ring, &entity);
ring              141 drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c 				rings[j] = &adev->sdma.instance[j].ring;
ring              145 drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c 			rings[0] = &adev->uvd.inst[0].ring;
ring              149 drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c 			rings[0] = &adev->vce.ring[0];
ring              229 drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c 			  u32 ring, struct drm_sched_entity **entity)
ring              242 drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c 	if (ring >= amdgpu_ctx_num_entities[hw_ip]) {
ring              243 drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c 		DRM_DEBUG("invalid ring: %d %d\n", hw_ip, ring);
ring              247 drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c 	*entity = &ctx->entities[hw_ip][ring].entity;
ring               69 drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.h 			  u32 ring, struct drm_sched_entity **entity);
ring              867 drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.c 		struct amdgpu_ring *ring = adev->rings[i];
ring              869 drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.c 		if (!ring || !ring->sched.thread)
ring              871 drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.c 		kthread_park(ring->sched.thread);
ring              883 drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.c 		struct amdgpu_ring *ring = adev->rings[i];
ring              885 drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.c 		if (!ring || !ring->sched.thread)
ring              887 drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.c 		kthread_unpark(ring->sched.thread);
ring              932 drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.c static void amdgpu_ib_preempt_fences_swap(struct amdgpu_ring *ring,
ring              935 drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.c 	struct amdgpu_fence_driver *drv = &ring->fence_drv;
ring              938 drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.c 	last_seq = atomic_read(&ring->fence_drv.last_seq);
ring              939 drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.c 	sync_seq = ring->fence_drv.sync_seq;
ring              990 drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.c static void amdgpu_ib_preempt_mark_partial_job(struct amdgpu_ring *ring)
ring              996 drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.c 	struct amdgpu_fence_driver *drv = &ring->fence_drv;
ring              997 drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.c 	struct drm_gpu_scheduler *sched = &ring->sched;
ring              999 drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.c 	if (ring->funcs->type != AMDGPU_RING_TYPE_GFX)
ring             1023 drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.c 	struct amdgpu_ring *ring;
ring             1030 drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.c 	ring = adev->rings[val];
ring             1032 drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.c 	if (!ring || !ring->funcs->preempt_ib || !ring->sched.thread)
ring             1036 drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.c 	if (ring->trail_seq != le32_to_cpu(*ring->trail_fence_cpu_addr))
ring             1039 drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.c 	length = ring->fence_drv.num_fences_mask + 1;
ring             1048 drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.c 	kthread_park(ring->sched.thread);
ring             1053 drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.c 	r = amdgpu_ring_preempt_ib(ring);
ring             1055 drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.c 		DRM_WARN("failed to preempt ring %d\n", ring->idx);
ring             1059 drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.c 	amdgpu_fence_process(ring);
ring             1061 drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.c 	if (atomic_read(&ring->fence_drv.last_seq) !=
ring             1062 drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.c 	    ring->fence_drv.sync_seq) {
ring             1063 drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.c 		DRM_INFO("ring %d was preempted\n", ring->idx);
ring             1065 drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.c 		amdgpu_ib_preempt_mark_partial_job(ring);
ring             1068 drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.c 		amdgpu_ib_preempt_fences_swap(ring, fences);
ring             1070 drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.c 		amdgpu_fence_driver_force_completion(ring);
ring             1073 drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.c 		amdgpu_ib_preempt_job_recovery(&ring->sched);
ring             1076 drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.c 		amdgpu_fence_wait_empty(ring);
ring             1084 drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.c 	kthread_unpark(ring->sched.thread);
ring             3560 drivers/gpu/drm/amd/amdgpu/amdgpu_device.c 		struct amdgpu_ring *ring = adev->rings[i];
ring             3562 drivers/gpu/drm/amd/amdgpu/amdgpu_device.c 		if (!ring || !ring->sched.thread)
ring             3566 drivers/gpu/drm/amd/amdgpu/amdgpu_device.c 		amdgpu_fence_driver_force_completion(ring);
ring             3845 drivers/gpu/drm/amd/amdgpu/amdgpu_device.c 			struct amdgpu_ring *ring = tmp_adev->rings[i];
ring             3847 drivers/gpu/drm/amd/amdgpu/amdgpu_device.c 			if (!ring || !ring->sched.thread)
ring             3850 drivers/gpu/drm/amd/amdgpu/amdgpu_device.c 			drm_sched_stop(&ring->sched, job ? &job->base : NULL);
ring             3918 drivers/gpu/drm/amd/amdgpu/amdgpu_device.c 			struct amdgpu_ring *ring = tmp_adev->rings[i];
ring             3920 drivers/gpu/drm/amd/amdgpu/amdgpu_device.c 			if (!ring || !ring->sched.thread)
ring             3925 drivers/gpu/drm/amd/amdgpu/amdgpu_device.c 				drm_sched_resubmit_jobs(&ring->sched);
ring             3927 drivers/gpu/drm/amd/amdgpu/amdgpu_device.c 			drm_sched_start(&ring->sched, !tmp_adev->asic_reset_res);
ring               56 drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c 	struct amdgpu_ring		*ring;
ring               98 drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c static void amdgpu_fence_write(struct amdgpu_ring *ring, u32 seq)
ring              100 drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c 	struct amdgpu_fence_driver *drv = &ring->fence_drv;
ring              114 drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c static u32 amdgpu_fence_read(struct amdgpu_ring *ring)
ring              116 drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c 	struct amdgpu_fence_driver *drv = &ring->fence_drv;
ring              136 drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c int amdgpu_fence_emit(struct amdgpu_ring *ring, struct dma_fence **f,
ring              139 drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c 	struct amdgpu_device *adev = ring->adev;
ring              149 drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c 	seq = ++ring->fence_drv.sync_seq;
ring              150 drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c 	fence->ring = ring;
ring              152 drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c 		       &ring->fence_drv.lock,
ring              153 drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c 		       adev->fence_context + ring->idx,
ring              155 drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c 	amdgpu_ring_emit_fence(ring, ring->fence_drv.gpu_addr,
ring              158 drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c 	ptr = &ring->fence_drv.fences[seq & ring->fence_drv.num_fences_mask];
ring              194 drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c int amdgpu_fence_emit_polling(struct amdgpu_ring *ring, uint32_t *s)
ring              201 drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c 	seq = ++ring->fence_drv.sync_seq;
ring              202 drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c 	amdgpu_ring_emit_fence(ring, ring->fence_drv.gpu_addr,
ring              217 drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c static void amdgpu_fence_schedule_fallback(struct amdgpu_ring *ring)
ring              219 drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c 	mod_timer(&ring->fence_drv.fallback_timer,
ring              234 drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c bool amdgpu_fence_process(struct amdgpu_ring *ring)
ring              236 drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c 	struct amdgpu_fence_driver *drv = &ring->fence_drv;
ring              241 drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c 		last_seq = atomic_read(&ring->fence_drv.last_seq);
ring              242 drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c 		seq = amdgpu_fence_read(ring);
ring              246 drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c 	if (del_timer(&ring->fence_drv.fallback_timer) &&
ring              247 drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c 	    seq != ring->fence_drv.sync_seq)
ring              248 drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c 		amdgpu_fence_schedule_fallback(ring);
ring              291 drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c 	struct amdgpu_ring *ring = from_timer(ring, t,
ring              294 drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c 	if (amdgpu_fence_process(ring))
ring              295 drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c 		DRM_WARN("Fence fallback timer expired on ring %s\n", ring->name);
ring              307 drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c int amdgpu_fence_wait_empty(struct amdgpu_ring *ring)
ring              309 drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c 	uint64_t seq = READ_ONCE(ring->fence_drv.sync_seq);
ring              316 drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c 	ptr = &ring->fence_drv.fences[seq & ring->fence_drv.num_fences_mask];
ring              340 drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c signed long amdgpu_fence_wait_polling(struct amdgpu_ring *ring,
ring              347 drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c 		seq = amdgpu_fence_read(ring);
ring              363 drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c unsigned amdgpu_fence_count_emitted(struct amdgpu_ring *ring)
ring              370 drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c 	amdgpu_fence_process(ring);
ring              372 drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c 	emitted -= atomic_read(&ring->fence_drv.last_seq);
ring              373 drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c 	emitted += READ_ONCE(ring->fence_drv.sync_seq);
ring              390 drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c int amdgpu_fence_driver_start_ring(struct amdgpu_ring *ring,
ring              394 drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c 	struct amdgpu_device *adev = ring->adev;
ring              397 drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c 	if (ring->funcs->type != AMDGPU_RING_TYPE_UVD) {
ring              398 drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c 		ring->fence_drv.cpu_addr = &adev->wb.wb[ring->fence_offs];
ring              399 drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c 		ring->fence_drv.gpu_addr = adev->wb.gpu_addr + (ring->fence_offs * 4);
ring              403 drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c 		ring->fence_drv.cpu_addr = adev->uvd.inst[ring->me].cpu_addr + index;
ring              404 drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c 		ring->fence_drv.gpu_addr = adev->uvd.inst[ring->me].gpu_addr + index;
ring              406 drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c 	amdgpu_fence_write(ring, atomic_read(&ring->fence_drv.last_seq));
ring              409 drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c 	ring->fence_drv.irq_src = irq_src;
ring              410 drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c 	ring->fence_drv.irq_type = irq_type;
ring              411 drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c 	ring->fence_drv.initialized = true;
ring              414 drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c 		      "0x%016llx, cpu addr 0x%p\n", ring->name,
ring              415 drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c 		      ring->fence_drv.gpu_addr, ring->fence_drv.cpu_addr);
ring              429 drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c int amdgpu_fence_driver_init_ring(struct amdgpu_ring *ring,
ring              432 drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c 	struct amdgpu_device *adev = ring->adev;
ring              443 drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c 	ring->fence_drv.cpu_addr = NULL;
ring              444 drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c 	ring->fence_drv.gpu_addr = 0;
ring              445 drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c 	ring->fence_drv.sync_seq = 0;
ring              446 drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c 	atomic_set(&ring->fence_drv.last_seq, 0);
ring              447 drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c 	ring->fence_drv.initialized = false;
ring              449 drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c 	timer_setup(&ring->fence_drv.fallback_timer, amdgpu_fence_fallback, 0);
ring              451 drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c 	ring->fence_drv.num_fences_mask = num_hw_submission * 2 - 1;
ring              452 drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c 	spin_lock_init(&ring->fence_drv.lock);
ring              453 drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c 	ring->fence_drv.fences = kcalloc(num_hw_submission * 2, sizeof(void *),
ring              455 drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c 	if (!ring->fence_drv.fences)
ring              459 drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c 	if (ring->funcs->type != AMDGPU_RING_TYPE_KIQ) {
ring              460 drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c 		switch (ring->funcs->type) {
ring              473 drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c 			if (!amdgpu_sriov_vf(ring->adev))
ring              486 drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c 		r = drm_sched_init(&ring->sched, &amdgpu_sched_ops,
ring              488 drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c 				   timeout, ring->name);
ring              491 drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c 				  ring->name);
ring              533 drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c 		struct amdgpu_ring *ring = adev->rings[i];
ring              535 drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c 		if (!ring || !ring->fence_drv.initialized)
ring              537 drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c 		r = amdgpu_fence_wait_empty(ring);
ring              540 drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c 			amdgpu_fence_driver_force_completion(ring);
ring              542 drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c 		amdgpu_irq_put(adev, ring->fence_drv.irq_src,
ring              543 drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c 			       ring->fence_drv.irq_type);
ring              544 drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c 		drm_sched_fini(&ring->sched);
ring              545 drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c 		del_timer_sync(&ring->fence_drv.fallback_timer);
ring              546 drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c 		for (j = 0; j <= ring->fence_drv.num_fences_mask; ++j)
ring              547 drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c 			dma_fence_put(ring->fence_drv.fences[j]);
ring              548 drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c 		kfree(ring->fence_drv.fences);
ring              549 drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c 		ring->fence_drv.fences = NULL;
ring              550 drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c 		ring->fence_drv.initialized = false;
ring              567 drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c 		struct amdgpu_ring *ring = adev->rings[i];
ring              568 drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c 		if (!ring || !ring->fence_drv.initialized)
ring              572 drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c 		r = amdgpu_fence_wait_empty(ring);
ring              575 drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c 			amdgpu_fence_driver_force_completion(ring);
ring              579 drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c 		amdgpu_irq_put(adev, ring->fence_drv.irq_src,
ring              580 drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c 			       ring->fence_drv.irq_type);
ring              601 drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c 		struct amdgpu_ring *ring = adev->rings[i];
ring              602 drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c 		if (!ring || !ring->fence_drv.initialized)
ring              606 drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c 		amdgpu_irq_get(adev, ring->fence_drv.irq_src,
ring              607 drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c 			       ring->fence_drv.irq_type);
ring              617 drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c void amdgpu_fence_driver_force_completion(struct amdgpu_ring *ring)
ring              619 drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c 	amdgpu_fence_write(ring, ring->fence_drv.sync_seq);
ring              620 drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c 	amdgpu_fence_process(ring);
ring              635 drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c 	return (const char *)fence->ring->name;
ring              649 drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c 	struct amdgpu_ring *ring = fence->ring;
ring              651 drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c 	if (!timer_pending(&ring->fence_drv.fallback_timer))
ring              652 drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c 		amdgpu_fence_schedule_fallback(ring);
ring              654 drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c 	DMA_FENCE_TRACE(&fence->base, "armed on ring %i!\n", ring->idx);
ring              705 drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c 		struct amdgpu_ring *ring = adev->rings[i];
ring              706 drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c 		if (!ring || !ring->fence_drv.initialized)
ring              709 drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c 		amdgpu_fence_process(ring);
ring              711 drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c 		seq_printf(m, "--- ring %d (%s) ---\n", i, ring->name);
ring              713 drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c 			   atomic_read(&ring->fence_drv.last_seq));
ring              715 drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c 			   ring->fence_drv.sync_seq);
ring              717 drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c 		if (ring->funcs->type == AMDGPU_RING_TYPE_GFX ||
ring              718 drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c 		    ring->funcs->type == AMDGPU_RING_TYPE_SDMA) {
ring              720 drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c 				   le32_to_cpu(*ring->trail_fence_cpu_addr));
ring              722 drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c 				   ring->trail_seq);
ring              725 drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c 		if (ring->funcs->type != AMDGPU_RING_TYPE_GFX)
ring              730 drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c 			   le32_to_cpu(*(ring->fence_drv.cpu_addr + 2)));
ring              733 drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c 			   le32_to_cpu(*(ring->fence_drv.cpu_addr + 4)));
ring              736 drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c 			   le32_to_cpu(*(ring->fence_drv.cpu_addr + 6)));
ring              257 drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c 				  struct amdgpu_ring *ring)
ring              280 drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c 		ring->me = mec + 1;
ring              281 drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c 		ring->pipe = pipe;
ring              282 drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c 		ring->queue = queue;
ring              292 drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c 			     struct amdgpu_ring *ring,
ring              304 drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c 	ring->adev = NULL;
ring              305 drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c 	ring->ring_obj = NULL;
ring              306 drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c 	ring->use_doorbell = true;
ring              307 drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c 	ring->doorbell_index = adev->doorbell_index.kiq;
ring              309 drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c 	r = amdgpu_gfx_kiq_acquire(adev, ring);
ring              313 drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c 	ring->eop_gpu_addr = kiq->eop_gpu_addr;
ring              314 drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c 	sprintf(ring->name, "kiq_%d.%d.%d", ring->me, ring->pipe, ring->queue);
ring              315 drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c 	r = amdgpu_ring_init(adev, ring, 1024,
ring              323 drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c void amdgpu_gfx_kiq_free_ring(struct amdgpu_ring *ring,
ring              326 drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c 	amdgpu_device_wb_free(ring->adev, ring->adev->virt.reg_val_offs);
ring              327 drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c 	amdgpu_ring_fini(ring);
ring              367 drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c 	struct amdgpu_ring *ring = NULL;
ring              371 drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c 	ring = &adev->gfx.kiq.ring;
ring              372 drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c 	if (!ring->mqd_obj) {
ring              379 drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c 					    AMDGPU_GEM_DOMAIN_VRAM, &ring->mqd_obj,
ring              380 drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c 					    &ring->mqd_gpu_addr, &ring->mqd_ptr);
ring              389 drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c 				dev_warn(adev->dev, "no memory to create MQD backup for ring %s\n", ring->name);
ring              395 drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c 			ring = &adev->gfx.gfx_ring[i];
ring              396 drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c 			if (!ring->mqd_obj) {
ring              398 drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c 							    AMDGPU_GEM_DOMAIN_GTT, &ring->mqd_obj,
ring              399 drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c 							    &ring->mqd_gpu_addr, &ring->mqd_ptr);
ring              408 drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c 					dev_warn(adev->dev, "no memory to create MQD backup for ring %s\n", ring->name);
ring              415 drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c 		ring = &adev->gfx.compute_ring[i];
ring              416 drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c 		if (!ring->mqd_obj) {
ring              418 drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c 						    AMDGPU_GEM_DOMAIN_GTT, &ring->mqd_obj,
ring              419 drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c 						    &ring->mqd_gpu_addr, &ring->mqd_ptr);
ring              428 drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c 				dev_warn(adev->dev, "no memory to create MQD backup for ring %s\n", ring->name);
ring              437 drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c 	struct amdgpu_ring *ring = NULL;
ring              442 drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c 			ring = &adev->gfx.gfx_ring[i];
ring              444 drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c 			amdgpu_bo_free_kernel(&ring->mqd_obj,
ring              445 drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c 					      &ring->mqd_gpu_addr,
ring              446 drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c 					      &ring->mqd_ptr);
ring              451 drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c 		ring = &adev->gfx.compute_ring[i];
ring              453 drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c 		amdgpu_bo_free_kernel(&ring->mqd_obj,
ring              454 drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c 				      &ring->mqd_gpu_addr,
ring              455 drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c 				      &ring->mqd_ptr);
ring              458 drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c 	ring = &adev->gfx.kiq.ring;
ring              462 drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c 	amdgpu_bo_free_kernel(&ring->mqd_obj,
ring              463 drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c 			      &ring->mqd_gpu_addr,
ring              464 drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c 			      &ring->mqd_ptr);
ring              470 drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c 	struct amdgpu_ring *kiq_ring = &kiq->ring;
ring              490 drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c 	struct amdgpu_ring *kiq_ring = &adev->gfx.kiq.ring;
ring               70 drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.h 					struct amdgpu_ring *ring);
ring               72 drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.h 				 struct amdgpu_ring *ring,
ring               76 drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.h 					struct amdgpu_ring *ring,
ring               90 drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.h 	struct amdgpu_ring	ring;
ring              356 drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.h 			     struct amdgpu_ring *ring,
ring              359 drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.h void amdgpu_gfx_kiq_free_ring(struct amdgpu_ring *ring,
ring               96 drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.h 	uint64_t (*emit_flush_gpu_tlb)(struct amdgpu_ring *ring, unsigned vmid,
ring               99 drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.h 	void (*emit_pasid_mapping)(struct amdgpu_ring *ring, unsigned vmid,
ring              122 drivers/gpu/drm/amd/amdgpu/amdgpu_ib.c int amdgpu_ib_schedule(struct amdgpu_ring *ring, unsigned num_ibs,
ring              126 drivers/gpu/drm/amd/amdgpu/amdgpu_ib.c 	struct amdgpu_device *adev = ring->adev;
ring              153 drivers/gpu/drm/amd/amdgpu/amdgpu_ib.c 	if (!ring->sched.ready) {
ring              154 drivers/gpu/drm/amd/amdgpu/amdgpu_ib.c 		dev_err(adev->dev, "couldn't schedule ib on ring <%s>\n", ring->name);
ring              163 drivers/gpu/drm/amd/amdgpu/amdgpu_ib.c 	alloc_size = ring->funcs->emit_frame_size + num_ibs *
ring              164 drivers/gpu/drm/amd/amdgpu/amdgpu_ib.c 		ring->funcs->emit_ib_size;
ring              166 drivers/gpu/drm/amd/amdgpu/amdgpu_ib.c 	r = amdgpu_ring_alloc(ring, alloc_size);
ring              172 drivers/gpu/drm/amd/amdgpu/amdgpu_ib.c 	need_ctx_switch = ring->current_ctx != fence_ctx;
ring              173 drivers/gpu/drm/amd/amdgpu/amdgpu_ib.c 	if (ring->funcs->emit_pipeline_sync && job &&
ring              176 drivers/gpu/drm/amd/amdgpu/amdgpu_ib.c 	     amdgpu_vm_need_pipeline_sync(ring, job))) {
ring              185 drivers/gpu/drm/amd/amdgpu/amdgpu_ib.c 	if (ring->funcs->insert_start)
ring              186 drivers/gpu/drm/amd/amdgpu/amdgpu_ib.c 		ring->funcs->insert_start(ring);
ring              189 drivers/gpu/drm/amd/amdgpu/amdgpu_ib.c 		r = amdgpu_vm_flush(ring, job, need_pipe_sync);
ring              191 drivers/gpu/drm/amd/amdgpu/amdgpu_ib.c 			amdgpu_ring_undo(ring);
ring              196 drivers/gpu/drm/amd/amdgpu/amdgpu_ib.c 	if (job && ring->funcs->init_cond_exec)
ring              197 drivers/gpu/drm/amd/amdgpu/amdgpu_ib.c 		patch_offset = amdgpu_ring_init_cond_exec(ring);
ring              203 drivers/gpu/drm/amd/amdgpu/amdgpu_ib.c 		if (ring->funcs->emit_hdp_flush)
ring              204 drivers/gpu/drm/amd/amdgpu/amdgpu_ib.c 			amdgpu_ring_emit_hdp_flush(ring);
ring              206 drivers/gpu/drm/amd/amdgpu/amdgpu_ib.c 			amdgpu_asic_flush_hdp(adev, ring);
ring              212 drivers/gpu/drm/amd/amdgpu/amdgpu_ib.c 	skip_preamble = ring->current_ctx == fence_ctx;
ring              213 drivers/gpu/drm/amd/amdgpu/amdgpu_ib.c 	if (job && ring->funcs->emit_cntxcntl) {
ring              216 drivers/gpu/drm/amd/amdgpu/amdgpu_ib.c 		amdgpu_ring_emit_cntxcntl(ring, status);
ring              230 drivers/gpu/drm/amd/amdgpu/amdgpu_ib.c 		amdgpu_ring_emit_ib(ring, job, ib, status);
ring              234 drivers/gpu/drm/amd/amdgpu/amdgpu_ib.c 	if (ring->funcs->emit_tmz)
ring              235 drivers/gpu/drm/amd/amdgpu/amdgpu_ib.c 		amdgpu_ring_emit_tmz(ring, false);
ring              240 drivers/gpu/drm/amd/amdgpu/amdgpu_ib.c 		amdgpu_asic_invalidate_hdp(adev, ring);
ring              247 drivers/gpu/drm/amd/amdgpu/amdgpu_ib.c 		amdgpu_ring_emit_fence(ring, job->uf_addr, job->uf_sequence,
ring              251 drivers/gpu/drm/amd/amdgpu/amdgpu_ib.c 	r = amdgpu_fence_emit(ring, f, fence_flags);
ring              255 drivers/gpu/drm/amd/amdgpu/amdgpu_ib.c 			amdgpu_vmid_reset(adev, ring->funcs->vmhub, job->vmid);
ring              256 drivers/gpu/drm/amd/amdgpu/amdgpu_ib.c 		amdgpu_ring_undo(ring);
ring              260 drivers/gpu/drm/amd/amdgpu/amdgpu_ib.c 	if (ring->funcs->insert_end)
ring              261 drivers/gpu/drm/amd/amdgpu/amdgpu_ib.c 		ring->funcs->insert_end(ring);
ring              263 drivers/gpu/drm/amd/amdgpu/amdgpu_ib.c 	if (patch_offset != ~0 && ring->funcs->patch_cond_exec)
ring              264 drivers/gpu/drm/amd/amdgpu/amdgpu_ib.c 		amdgpu_ring_patch_cond_exec(ring, patch_offset);
ring              266 drivers/gpu/drm/amd/amdgpu/amdgpu_ib.c 	ring->current_ctx = fence_ctx;
ring              267 drivers/gpu/drm/amd/amdgpu/amdgpu_ib.c 	if (vm && ring->funcs->emit_switch_buffer)
ring              268 drivers/gpu/drm/amd/amdgpu/amdgpu_ib.c 		amdgpu_ring_emit_switch_buffer(ring);
ring              269 drivers/gpu/drm/amd/amdgpu/amdgpu_ib.c 	amdgpu_ring_commit(ring);
ring              358 drivers/gpu/drm/amd/amdgpu/amdgpu_ib.c 		struct amdgpu_ring *ring = adev->rings[i];
ring              364 drivers/gpu/drm/amd/amdgpu/amdgpu_ib.c 		if (!ring->sched.ready || !ring->funcs->test_ib)
ring              368 drivers/gpu/drm/amd/amdgpu/amdgpu_ib.c 		if (ring->funcs->type == AMDGPU_RING_TYPE_UVD ||
ring              369 drivers/gpu/drm/amd/amdgpu/amdgpu_ib.c 			ring->funcs->type == AMDGPU_RING_TYPE_VCE ||
ring              370 drivers/gpu/drm/amd/amdgpu/amdgpu_ib.c 			ring->funcs->type == AMDGPU_RING_TYPE_UVD_ENC ||
ring              371 drivers/gpu/drm/amd/amdgpu/amdgpu_ib.c 			ring->funcs->type == AMDGPU_RING_TYPE_VCN_DEC ||
ring              372 drivers/gpu/drm/amd/amdgpu/amdgpu_ib.c 			ring->funcs->type == AMDGPU_RING_TYPE_VCN_ENC ||
ring              373 drivers/gpu/drm/amd/amdgpu/amdgpu_ib.c 			ring->funcs->type == AMDGPU_RING_TYPE_VCN_JPEG)
ring              378 drivers/gpu/drm/amd/amdgpu/amdgpu_ib.c 		r = amdgpu_ring_test_ib(ring, tmo);
ring              381 drivers/gpu/drm/amd/amdgpu/amdgpu_ib.c 				      ring->name);
ring              385 drivers/gpu/drm/amd/amdgpu/amdgpu_ib.c 		ring->sched.ready = false;
ring              387 drivers/gpu/drm/amd/amdgpu/amdgpu_ib.c 			  ring->name, r);
ring              389 drivers/gpu/drm/amd/amdgpu/amdgpu_ib.c 		if (ring == &adev->gfx.gfx_ring[0]) {
ring              197 drivers/gpu/drm/amd/amdgpu/amdgpu_ids.c 				 struct amdgpu_ring *ring,
ring              201 drivers/gpu/drm/amd/amdgpu/amdgpu_ids.c 	struct amdgpu_device *adev = ring->adev;
ring              202 drivers/gpu/drm/amd/amdgpu/amdgpu_ids.c 	unsigned vmhub = ring->funcs->vmhub;
ring              208 drivers/gpu/drm/amd/amdgpu/amdgpu_ids.c 	if (ring->vmid_wait && !dma_fence_is_signaled(ring->vmid_wait))
ring              209 drivers/gpu/drm/amd/amdgpu/amdgpu_ids.c 		return amdgpu_sync_fence(adev, sync, ring->vmid_wait, false);
ring              218 drivers/gpu/drm/amd/amdgpu/amdgpu_ids.c 		fences[i] = amdgpu_sync_peek_fence(&(*idle)->active, ring);
ring              226 drivers/gpu/drm/amd/amdgpu/amdgpu_ids.c 		u64 fence_context = adev->vm_manager.fence_context + ring->idx;
ring              227 drivers/gpu/drm/amd/amdgpu/amdgpu_ids.c 		unsigned seqno = ++adev->vm_manager.seqno[ring->idx];
ring              245 drivers/gpu/drm/amd/amdgpu/amdgpu_ids.c 		dma_fence_put(ring->vmid_wait);
ring              246 drivers/gpu/drm/amd/amdgpu/amdgpu_ids.c 		ring->vmid_wait = &array->base;
ring              266 drivers/gpu/drm/amd/amdgpu/amdgpu_ids.c 				     struct amdgpu_ring *ring,
ring              272 drivers/gpu/drm/amd/amdgpu/amdgpu_ids.c 	struct amdgpu_device *adev = ring->adev;
ring              273 drivers/gpu/drm/amd/amdgpu/amdgpu_ids.c 	unsigned vmhub = ring->funcs->vmhub;
ring              274 drivers/gpu/drm/amd/amdgpu/amdgpu_ids.c 	uint64_t fence_context = adev->fence_context + ring->idx;
ring              294 drivers/gpu/drm/amd/amdgpu/amdgpu_ids.c 		tmp = amdgpu_sync_peek_fence(&(*id)->active, ring);
ring              306 drivers/gpu/drm/amd/amdgpu/amdgpu_ids.c 	r = amdgpu_sync_fence(ring->adev, &(*id)->active, fence, false);
ring              331 drivers/gpu/drm/amd/amdgpu/amdgpu_ids.c 				 struct amdgpu_ring *ring,
ring              337 drivers/gpu/drm/amd/amdgpu/amdgpu_ids.c 	struct amdgpu_device *adev = ring->adev;
ring              338 drivers/gpu/drm/amd/amdgpu/amdgpu_ids.c 	unsigned vmhub = ring->funcs->vmhub;
ring              340 drivers/gpu/drm/amd/amdgpu/amdgpu_ids.c 	uint64_t fence_context = adev->fence_context + ring->idx;
ring              378 drivers/gpu/drm/amd/amdgpu/amdgpu_ids.c 		r = amdgpu_sync_fence(ring->adev, &(*id)->active, fence, false);
ring              406 drivers/gpu/drm/amd/amdgpu/amdgpu_ids.c int amdgpu_vmid_grab(struct amdgpu_vm *vm, struct amdgpu_ring *ring,
ring              410 drivers/gpu/drm/amd/amdgpu/amdgpu_ids.c 	struct amdgpu_device *adev = ring->adev;
ring              411 drivers/gpu/drm/amd/amdgpu/amdgpu_ids.c 	unsigned vmhub = ring->funcs->vmhub;
ring              418 drivers/gpu/drm/amd/amdgpu/amdgpu_ids.c 	r = amdgpu_vmid_grab_idle(vm, ring, sync, &idle);
ring              423 drivers/gpu/drm/amd/amdgpu/amdgpu_ids.c 		r = amdgpu_vmid_grab_reserved(vm, ring, sync, fence, job, &id);
ring              427 drivers/gpu/drm/amd/amdgpu/amdgpu_ids.c 		r = amdgpu_vmid_grab_used(vm, ring, sync, fence, job, &id);
ring              438 drivers/gpu/drm/amd/amdgpu/amdgpu_ids.c 			r = amdgpu_sync_fence(ring->adev, &id->active,
ring              460 drivers/gpu/drm/amd/amdgpu/amdgpu_ids.c 	trace_amdgpu_vm_grab_id(vm, ring, job);
ring               86 drivers/gpu/drm/amd/amdgpu/amdgpu_ids.h int amdgpu_vmid_grab(struct amdgpu_vm *vm, struct amdgpu_ring *ring,
ring               58 drivers/gpu/drm/amd/amdgpu/amdgpu_ih.c 		if (ih->ring)
ring               64 drivers/gpu/drm/amd/amdgpu/amdgpu_ih.c 		ih->ring = dma_alloc_coherent(adev->dev, ih->ring_size + 8,
ring               66 drivers/gpu/drm/amd/amdgpu/amdgpu_ih.c 		if (ih->ring == NULL)
ring               69 drivers/gpu/drm/amd/amdgpu/amdgpu_ih.c 		memset((void *)ih->ring, 0, ih->ring_size + 8);
ring               72 drivers/gpu/drm/amd/amdgpu/amdgpu_ih.c 		ih->wptr_cpu = &ih->ring[ih->ring_size / 4];
ring               74 drivers/gpu/drm/amd/amdgpu/amdgpu_ih.c 		ih->rptr_cpu = &ih->ring[(ih->ring_size / 4) + 1];
ring               91 drivers/gpu/drm/amd/amdgpu/amdgpu_ih.c 					    (void **)&ih->ring);
ring              118 drivers/gpu/drm/amd/amdgpu/amdgpu_ih.c 		if (!ih->ring)
ring              125 drivers/gpu/drm/amd/amdgpu/amdgpu_ih.c 				  (void *)ih->ring, ih->gpu_addr);
ring              126 drivers/gpu/drm/amd/amdgpu/amdgpu_ih.c 		ih->ring = NULL;
ring              129 drivers/gpu/drm/amd/amdgpu/amdgpu_ih.c 				      (void **)&ih->ring);
ring               44 drivers/gpu/drm/amd/amdgpu/amdgpu_ih.h 	volatile uint32_t	*ring;
ring              386 drivers/gpu/drm/amd/amdgpu/amdgpu_irq.c 	entry.iv_entry = (const uint32_t *)&ih->ring[ring_index];
ring               33 drivers/gpu/drm/amd/amdgpu/amdgpu_job.c 	struct amdgpu_ring *ring = to_amdgpu_ring(s_job->sched);
ring               39 drivers/gpu/drm/amd/amdgpu/amdgpu_job.c 	if (amdgpu_ring_soft_recovery(ring, job->vmid, s_job->s_fence->parent)) {
ring               45 drivers/gpu/drm/amd/amdgpu/amdgpu_job.c 	amdgpu_vm_get_task_info(ring->adev, job->pasid, &ti);
ring               47 drivers/gpu/drm/amd/amdgpu/amdgpu_job.c 		  job->base.sched->name, atomic_read(&ring->fence_drv.last_seq),
ring               48 drivers/gpu/drm/amd/amdgpu/amdgpu_job.c 		  ring->fence_drv.sync_seq);
ring               52 drivers/gpu/drm/amd/amdgpu/amdgpu_job.c 	if (amdgpu_device_should_recover_gpu(ring->adev))
ring               53 drivers/gpu/drm/amd/amdgpu/amdgpu_job.c 		amdgpu_device_gpu_recover(ring->adev, job);
ring               55 drivers/gpu/drm/amd/amdgpu/amdgpu_job.c 		drm_sched_suspend_timeout(&ring->sched);
ring              107 drivers/gpu/drm/amd/amdgpu/amdgpu_job.c 	struct amdgpu_ring *ring = to_amdgpu_ring(job->base.sched);
ring              115 drivers/gpu/drm/amd/amdgpu/amdgpu_job.c 		amdgpu_ib_free(ring->adev, &job->ibs[i], f);
ring              120 drivers/gpu/drm/amd/amdgpu/amdgpu_job.c 	struct amdgpu_ring *ring = to_amdgpu_ring(s_job->sched);
ring              125 drivers/gpu/drm/amd/amdgpu/amdgpu_job.c 	amdgpu_ring_priority_put(ring, s_job->s_priority);
ring              146 drivers/gpu/drm/amd/amdgpu/amdgpu_job.c 	struct amdgpu_ring *ring;
ring              162 drivers/gpu/drm/amd/amdgpu/amdgpu_job.c 	ring = to_amdgpu_ring(entity->rq->sched);
ring              163 drivers/gpu/drm/amd/amdgpu/amdgpu_job.c 	amdgpu_ring_priority_get(ring, priority);
ring              168 drivers/gpu/drm/amd/amdgpu/amdgpu_job.c int amdgpu_job_submit_direct(struct amdgpu_job *job, struct amdgpu_ring *ring,
ring              173 drivers/gpu/drm/amd/amdgpu/amdgpu_job.c 	job->base.sched = &ring->sched;
ring              174 drivers/gpu/drm/amd/amdgpu/amdgpu_job.c 	r = amdgpu_ib_schedule(ring, job->num_ibs, job->ibs, NULL, fence);
ring              186 drivers/gpu/drm/amd/amdgpu/amdgpu_job.c 	struct amdgpu_ring *ring = to_amdgpu_ring(s_entity->rq->sched);
ring              196 drivers/gpu/drm/amd/amdgpu/amdgpu_job.c 			r = amdgpu_sync_fence(ring->adev, &job->sched_sync,
ring              204 drivers/gpu/drm/amd/amdgpu/amdgpu_job.c 		r = amdgpu_vmid_grab(vm, ring, &job->sync,
ring              218 drivers/gpu/drm/amd/amdgpu/amdgpu_job.c 	struct amdgpu_ring *ring = to_amdgpu_ring(sched_job->sched);
ring              230 drivers/gpu/drm/amd/amdgpu/amdgpu_job.c 	if (job->vram_lost_counter != atomic_read(&ring->adev->vram_lost_counter))
ring              236 drivers/gpu/drm/amd/amdgpu/amdgpu_job.c 		r = amdgpu_ib_schedule(ring, job->num_ibs, job->ibs, job,
ring               77 drivers/gpu/drm/amd/amdgpu/amdgpu_job.h int amdgpu_job_submit_direct(struct amdgpu_job *job, struct amdgpu_ring *ring,
ring              335 drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c 			if (adev->sdma.instance[i].ring.sched.ready)
ring              346 drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c 			if (adev->uvd.inst[i].ring.sched.ready)
ring              355 drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c 			if (adev->vce.ring[i].sched.ready)
ring              744 drivers/gpu/drm/amd/amdgpu/amdgpu_object.c 	struct amdgpu_ring *ring = adev->mman.buffer_funcs_ring;
ring              750 drivers/gpu/drm/amd/amdgpu/amdgpu_object.c 	return amdgpu_copy_buffer(ring, shadow_addr, parent_addr,
ring             3003 drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c 		struct amdgpu_ring *ring = adev->rings[i];
ring             3004 drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c 		if (ring && ring->sched.ready)
ring             3005 drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c 			amdgpu_fence_wait_empty(ring);
ring             1012 drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c 		memcpy(&entry, &data->ring[data->rptr],
ring             1063 drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c 	memcpy(&data->ring[data->wptr], info->entry,
ring             1090 drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c 	kfree(data->ring);
ring             1126 drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c 	data->ring = kmalloc(data->ring_size, GFP_KERNEL);
ring             1127 drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c 	if (!data->ring) {
ring              378 drivers/gpu/drm/amd/amdgpu/amdgpu_ras.h 	unsigned char *ring;
ring               52 drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c 				    struct amdgpu_ring *ring);
ring               53 drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c static void amdgpu_debugfs_ring_fini(struct amdgpu_ring *ring);
ring               65 drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c int amdgpu_ring_alloc(struct amdgpu_ring *ring, unsigned ndw)
ring               69 drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c 	ndw = (ndw + ring->funcs->align_mask) & ~ring->funcs->align_mask;
ring               74 drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c 	if (WARN_ON_ONCE(ndw > ring->max_dw))
ring               77 drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c 	ring->count_dw = ndw;
ring               78 drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c 	ring->wptr_old = ring->wptr;
ring               80 drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c 	if (ring->funcs->begin_use)
ring               81 drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c 		ring->funcs->begin_use(ring);
ring               93 drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c void amdgpu_ring_insert_nop(struct amdgpu_ring *ring, uint32_t count)
ring               98 drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c 		amdgpu_ring_write(ring, ring->funcs->nop);
ring              108 drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c void amdgpu_ring_generic_pad_ib(struct amdgpu_ring *ring, struct amdgpu_ib *ib)
ring              110 drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c 	while (ib->length_dw & ring->funcs->align_mask)
ring              111 drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c 		ib->ptr[ib->length_dw++] = ring->funcs->nop;
ring              124 drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c void amdgpu_ring_commit(struct amdgpu_ring *ring)
ring              129 drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c 	count = ring->funcs->align_mask + 1 -
ring              130 drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c 		(ring->wptr & ring->funcs->align_mask);
ring              131 drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c 	count %= ring->funcs->align_mask + 1;
ring              132 drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c 	ring->funcs->insert_nop(ring, count);
ring              135 drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c 	amdgpu_ring_set_wptr(ring);
ring              137 drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c 	if (ring->funcs->end_use)
ring              138 drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c 		ring->funcs->end_use(ring);
ring              148 drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c void amdgpu_ring_undo(struct amdgpu_ring *ring)
ring              150 drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c 	ring->wptr = ring->wptr_old;
ring              152 drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c 	if (ring->funcs->end_use)
ring              153 drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c 		ring->funcs->end_use(ring);
ring              164 drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c void amdgpu_ring_priority_put(struct amdgpu_ring *ring,
ring              169 drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c 	if (!ring->funcs->set_priority)
ring              172 drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c 	if (atomic_dec_return(&ring->num_jobs[priority]) > 0)
ring              179 drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c 	mutex_lock(&ring->priority_mutex);
ring              181 drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c 	if (ring->priority > priority)
ring              187 drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c 				|| atomic_read(&ring->num_jobs[i])) {
ring              188 drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c 			ring->priority = i;
ring              189 drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c 			ring->funcs->set_priority(ring, i);
ring              195 drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c 	mutex_unlock(&ring->priority_mutex);
ring              206 drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c void amdgpu_ring_priority_get(struct amdgpu_ring *ring,
ring              209 drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c 	if (!ring->funcs->set_priority)
ring              212 drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c 	if (atomic_inc_return(&ring->num_jobs[priority]) <= 0)
ring              215 drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c 	mutex_lock(&ring->priority_mutex);
ring              216 drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c 	if (priority <= ring->priority)
ring              219 drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c 	ring->priority = priority;
ring              220 drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c 	ring->funcs->set_priority(ring, priority);
ring              223 drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c 	mutex_unlock(&ring->priority_mutex);
ring              237 drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c int amdgpu_ring_init(struct amdgpu_device *adev, struct amdgpu_ring *ring,
ring              250 drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c 	if (ring->funcs->type == AMDGPU_RING_TYPE_KIQ)
ring              252 drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c 	else if (ring == &adev->sdma.instance[0].page)
ring              255 drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c 	if (ring->adev == NULL) {
ring              259 drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c 		ring->adev = adev;
ring              260 drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c 		ring->idx = adev->num_rings++;
ring              261 drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c 		adev->rings[ring->idx] = ring;
ring              262 drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c 		r = amdgpu_fence_driver_init_ring(ring, sched_hw_submission);
ring              267 drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c 	r = amdgpu_device_wb_get(adev, &ring->rptr_offs);
ring              273 drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c 	r = amdgpu_device_wb_get(adev, &ring->wptr_offs);
ring              279 drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c 	r = amdgpu_device_wb_get(adev, &ring->fence_offs);
ring              285 drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c 	r = amdgpu_device_wb_get(adev, &ring->trail_fence_offs);
ring              291 drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c 	ring->trail_fence_gpu_addr =
ring              292 drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c 		adev->wb.gpu_addr + (ring->trail_fence_offs * 4);
ring              293 drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c 	ring->trail_fence_cpu_addr = &adev->wb.wb[ring->trail_fence_offs];
ring              295 drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c 	r = amdgpu_device_wb_get(adev, &ring->cond_exe_offs);
ring              300 drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c 	ring->cond_exe_gpu_addr = adev->wb.gpu_addr + (ring->cond_exe_offs * 4);
ring              301 drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c 	ring->cond_exe_cpu_addr = &adev->wb.wb[ring->cond_exe_offs];
ring              303 drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c 	*ring->cond_exe_cpu_addr = 1;
ring              305 drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c 	r = amdgpu_fence_driver_start_ring(ring, irq_src, irq_type);
ring              311 drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c 	ring->ring_size = roundup_pow_of_two(max_dw * 4 * sched_hw_submission);
ring              313 drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c 	ring->buf_mask = (ring->ring_size / 4) - 1;
ring              314 drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c 	ring->ptr_mask = ring->funcs->support_64bit_ptrs ?
ring              315 drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c 		0xffffffffffffffff : ring->buf_mask;
ring              317 drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c 	if (ring->ring_obj == NULL) {
ring              318 drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c 		r = amdgpu_bo_create_kernel(adev, ring->ring_size + ring->funcs->extra_dw, PAGE_SIZE,
ring              320 drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c 					    &ring->ring_obj,
ring              321 drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c 					    &ring->gpu_addr,
ring              322 drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c 					    (void **)&ring->ring);
ring              327 drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c 		amdgpu_ring_clear_ring(ring);
ring              330 drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c 	ring->max_dw = max_dw;
ring              331 drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c 	ring->priority = DRM_SCHED_PRIORITY_NORMAL;
ring              332 drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c 	mutex_init(&ring->priority_mutex);
ring              335 drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c 		atomic_set(&ring->num_jobs[i], 0);
ring              337 drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c 	if (amdgpu_debugfs_ring_init(adev, ring)) {
ring              352 drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c void amdgpu_ring_fini(struct amdgpu_ring *ring)
ring              354 drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c 	ring->sched.ready = false;
ring              357 drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c 	if (!(ring->adev) || !(ring->adev->rings[ring->idx]))
ring              360 drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c 	amdgpu_device_wb_free(ring->adev, ring->rptr_offs);
ring              361 drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c 	amdgpu_device_wb_free(ring->adev, ring->wptr_offs);
ring              363 drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c 	amdgpu_device_wb_free(ring->adev, ring->cond_exe_offs);
ring              364 drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c 	amdgpu_device_wb_free(ring->adev, ring->fence_offs);
ring              366 drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c 	amdgpu_bo_free_kernel(&ring->ring_obj,
ring              367 drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c 			      &ring->gpu_addr,
ring              368 drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c 			      (void **)&ring->ring);
ring              370 drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c 	amdgpu_debugfs_ring_fini(ring);
ring              372 drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c 	dma_fence_put(ring->vmid_wait);
ring              373 drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c 	ring->vmid_wait = NULL;
ring              374 drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c 	ring->me = 0;
ring              376 drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c 	ring->adev->rings[ring->idx] = NULL;
ring              391 drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c void amdgpu_ring_emit_reg_write_reg_wait_helper(struct amdgpu_ring *ring,
ring              395 drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c 	amdgpu_ring_emit_wreg(ring, reg0, ref);
ring              396 drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c 	amdgpu_ring_emit_reg_wait(ring, reg1, mask, mask);
ring              408 drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c bool amdgpu_ring_soft_recovery(struct amdgpu_ring *ring, unsigned int vmid,
ring              413 drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c 	if (amdgpu_sriov_vf(ring->adev) || !ring->funcs->soft_recovery || !fence)
ring              416 drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c 	atomic_inc(&ring->adev->gpu_reset_counter);
ring              419 drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c 		ring->funcs->soft_recovery(ring, vmid);
ring              439 drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c 	struct amdgpu_ring *ring = file_inode(f)->i_private;
ring              449 drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c 		early[0] = amdgpu_ring_get_rptr(ring) & ring->buf_mask;
ring              450 drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c 		early[1] = amdgpu_ring_get_wptr(ring) & ring->buf_mask;
ring              451 drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c 		early[2] = ring->wptr & ring->buf_mask;
ring              464 drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c 		if (*pos >= (ring->ring_size + 12))
ring              467 drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c 		value = ring->ring[(*pos - 12)/4];
ring              489 drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c 				    struct amdgpu_ring *ring)
ring              496 drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c 	sprintf(name, "amdgpu_ring_%s", ring->name);
ring              500 drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c 				  ring, &amdgpu_debugfs_ring_fops);
ring              504 drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c 	i_size_write(ent->d_inode, ring->ring_size + 12);
ring              505 drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c 	ring->ent = ent;
ring              510 drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c static void amdgpu_debugfs_ring_fini(struct amdgpu_ring *ring)
ring              513 drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c 	debugfs_remove(ring->ent);
ring              526 drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c int amdgpu_ring_test_helper(struct amdgpu_ring *ring)
ring              528 drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c 	struct amdgpu_device *adev = ring->adev;
ring              531 drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c 	r = amdgpu_ring_test_ring(ring);
ring              534 drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c 			      ring->name, r);
ring              537 drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c 			      ring->name);
ring              539 drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c 	ring->sched.ready = !r;
ring               88 drivers/gpu/drm/amd/amdgpu/amdgpu_ring.h void amdgpu_fence_driver_force_completion(struct amdgpu_ring *ring);
ring               90 drivers/gpu/drm/amd/amdgpu/amdgpu_ring.h int amdgpu_fence_driver_init_ring(struct amdgpu_ring *ring,
ring               92 drivers/gpu/drm/amd/amdgpu/amdgpu_ring.h int amdgpu_fence_driver_start_ring(struct amdgpu_ring *ring,
ring               97 drivers/gpu/drm/amd/amdgpu/amdgpu_ring.h int amdgpu_fence_emit(struct amdgpu_ring *ring, struct dma_fence **fence,
ring               99 drivers/gpu/drm/amd/amdgpu/amdgpu_ring.h int amdgpu_fence_emit_polling(struct amdgpu_ring *ring, uint32_t *s);
ring              100 drivers/gpu/drm/amd/amdgpu/amdgpu_ring.h bool amdgpu_fence_process(struct amdgpu_ring *ring);
ring              101 drivers/gpu/drm/amd/amdgpu/amdgpu_ring.h int amdgpu_fence_wait_empty(struct amdgpu_ring *ring);
ring              102 drivers/gpu/drm/amd/amdgpu/amdgpu_ring.h signed long amdgpu_fence_wait_polling(struct amdgpu_ring *ring,
ring              105 drivers/gpu/drm/amd/amdgpu/amdgpu_ring.h unsigned amdgpu_fence_count_emitted(struct amdgpu_ring *ring);
ring              122 drivers/gpu/drm/amd/amdgpu/amdgpu_ring.h 	u64 (*get_rptr)(struct amdgpu_ring *ring);
ring              123 drivers/gpu/drm/amd/amdgpu/amdgpu_ring.h 	u64 (*get_wptr)(struct amdgpu_ring *ring);
ring              124 drivers/gpu/drm/amd/amdgpu/amdgpu_ring.h 	void (*set_wptr)(struct amdgpu_ring *ring);
ring              132 drivers/gpu/drm/amd/amdgpu/amdgpu_ring.h 	void (*emit_ib)(struct amdgpu_ring *ring,
ring              136 drivers/gpu/drm/amd/amdgpu/amdgpu_ring.h 	void (*emit_fence)(struct amdgpu_ring *ring, uint64_t addr,
ring              138 drivers/gpu/drm/amd/amdgpu/amdgpu_ring.h 	void (*emit_pipeline_sync)(struct amdgpu_ring *ring);
ring              139 drivers/gpu/drm/amd/amdgpu/amdgpu_ring.h 	void (*emit_vm_flush)(struct amdgpu_ring *ring, unsigned vmid,
ring              141 drivers/gpu/drm/amd/amdgpu/amdgpu_ring.h 	void (*emit_hdp_flush)(struct amdgpu_ring *ring);
ring              142 drivers/gpu/drm/amd/amdgpu/amdgpu_ring.h 	void (*emit_gds_switch)(struct amdgpu_ring *ring, uint32_t vmid,
ring              147 drivers/gpu/drm/amd/amdgpu/amdgpu_ring.h 	int (*test_ring)(struct amdgpu_ring *ring);
ring              148 drivers/gpu/drm/amd/amdgpu/amdgpu_ring.h 	int (*test_ib)(struct amdgpu_ring *ring, long timeout);
ring              150 drivers/gpu/drm/amd/amdgpu/amdgpu_ring.h 	void (*insert_nop)(struct amdgpu_ring *ring, uint32_t count);
ring              151 drivers/gpu/drm/amd/amdgpu/amdgpu_ring.h 	void (*insert_start)(struct amdgpu_ring *ring);
ring              152 drivers/gpu/drm/amd/amdgpu/amdgpu_ring.h 	void (*insert_end)(struct amdgpu_ring *ring);
ring              154 drivers/gpu/drm/amd/amdgpu/amdgpu_ring.h 	void (*pad_ib)(struct amdgpu_ring *ring, struct amdgpu_ib *ib);
ring              155 drivers/gpu/drm/amd/amdgpu/amdgpu_ring.h 	unsigned (*init_cond_exec)(struct amdgpu_ring *ring);
ring              156 drivers/gpu/drm/amd/amdgpu/amdgpu_ring.h 	void (*patch_cond_exec)(struct amdgpu_ring *ring, unsigned offset);
ring              158 drivers/gpu/drm/amd/amdgpu/amdgpu_ring.h 	void (*begin_use)(struct amdgpu_ring *ring);
ring              159 drivers/gpu/drm/amd/amdgpu/amdgpu_ring.h 	void (*end_use)(struct amdgpu_ring *ring);
ring              160 drivers/gpu/drm/amd/amdgpu/amdgpu_ring.h 	void (*emit_switch_buffer) (struct amdgpu_ring *ring);
ring              161 drivers/gpu/drm/amd/amdgpu/amdgpu_ring.h 	void (*emit_cntxcntl) (struct amdgpu_ring *ring, uint32_t flags);
ring              162 drivers/gpu/drm/amd/amdgpu/amdgpu_ring.h 	void (*emit_rreg)(struct amdgpu_ring *ring, uint32_t reg);
ring              163 drivers/gpu/drm/amd/amdgpu/amdgpu_ring.h 	void (*emit_wreg)(struct amdgpu_ring *ring, uint32_t reg, uint32_t val);
ring              164 drivers/gpu/drm/amd/amdgpu/amdgpu_ring.h 	void (*emit_reg_wait)(struct amdgpu_ring *ring, uint32_t reg,
ring              166 drivers/gpu/drm/amd/amdgpu/amdgpu_ring.h 	void (*emit_reg_write_reg_wait)(struct amdgpu_ring *ring,
ring              169 drivers/gpu/drm/amd/amdgpu/amdgpu_ring.h 	void (*emit_tmz)(struct amdgpu_ring *ring, bool start);
ring              171 drivers/gpu/drm/amd/amdgpu/amdgpu_ring.h 	void (*set_priority) (struct amdgpu_ring *ring,
ring              174 drivers/gpu/drm/amd/amdgpu/amdgpu_ring.h 	void (*soft_recovery)(struct amdgpu_ring *ring, unsigned vmid);
ring              175 drivers/gpu/drm/amd/amdgpu/amdgpu_ring.h 	int (*preempt_ib)(struct amdgpu_ring *ring);
ring              185 drivers/gpu/drm/amd/amdgpu/amdgpu_ring.h 	volatile uint32_t	*ring;
ring              256 drivers/gpu/drm/amd/amdgpu/amdgpu_ring.h int amdgpu_ring_alloc(struct amdgpu_ring *ring, unsigned ndw);
ring              257 drivers/gpu/drm/amd/amdgpu/amdgpu_ring.h void amdgpu_ring_insert_nop(struct amdgpu_ring *ring, uint32_t count);
ring              258 drivers/gpu/drm/amd/amdgpu/amdgpu_ring.h void amdgpu_ring_generic_pad_ib(struct amdgpu_ring *ring, struct amdgpu_ib *ib);
ring              259 drivers/gpu/drm/amd/amdgpu/amdgpu_ring.h void amdgpu_ring_commit(struct amdgpu_ring *ring);
ring              260 drivers/gpu/drm/amd/amdgpu/amdgpu_ring.h void amdgpu_ring_undo(struct amdgpu_ring *ring);
ring              261 drivers/gpu/drm/amd/amdgpu/amdgpu_ring.h void amdgpu_ring_priority_get(struct amdgpu_ring *ring,
ring              263 drivers/gpu/drm/amd/amdgpu/amdgpu_ring.h void amdgpu_ring_priority_put(struct amdgpu_ring *ring,
ring              265 drivers/gpu/drm/amd/amdgpu/amdgpu_ring.h int amdgpu_ring_init(struct amdgpu_device *adev, struct amdgpu_ring *ring,
ring              268 drivers/gpu/drm/amd/amdgpu/amdgpu_ring.h void amdgpu_ring_fini(struct amdgpu_ring *ring);
ring              269 drivers/gpu/drm/amd/amdgpu/amdgpu_ring.h void amdgpu_ring_emit_reg_write_reg_wait_helper(struct amdgpu_ring *ring,
ring              272 drivers/gpu/drm/amd/amdgpu/amdgpu_ring.h bool amdgpu_ring_soft_recovery(struct amdgpu_ring *ring, unsigned int vmid,
ring              275 drivers/gpu/drm/amd/amdgpu/amdgpu_ring.h static inline void amdgpu_ring_set_preempt_cond_exec(struct amdgpu_ring *ring,
ring              278 drivers/gpu/drm/amd/amdgpu/amdgpu_ring.h 	*ring->cond_exe_cpu_addr = cond_exec;
ring              281 drivers/gpu/drm/amd/amdgpu/amdgpu_ring.h static inline void amdgpu_ring_clear_ring(struct amdgpu_ring *ring)
ring              284 drivers/gpu/drm/amd/amdgpu/amdgpu_ring.h 	while (i <= ring->buf_mask)
ring              285 drivers/gpu/drm/amd/amdgpu/amdgpu_ring.h 		ring->ring[i++] = ring->funcs->nop;
ring              289 drivers/gpu/drm/amd/amdgpu/amdgpu_ring.h static inline void amdgpu_ring_write(struct amdgpu_ring *ring, uint32_t v)
ring              291 drivers/gpu/drm/amd/amdgpu/amdgpu_ring.h 	if (ring->count_dw <= 0)
ring              293 drivers/gpu/drm/amd/amdgpu/amdgpu_ring.h 	ring->ring[ring->wptr++ & ring->buf_mask] = v;
ring              294 drivers/gpu/drm/amd/amdgpu/amdgpu_ring.h 	ring->wptr &= ring->ptr_mask;
ring              295 drivers/gpu/drm/amd/amdgpu/amdgpu_ring.h 	ring->count_dw--;
ring              298 drivers/gpu/drm/amd/amdgpu/amdgpu_ring.h static inline void amdgpu_ring_write_multiple(struct amdgpu_ring *ring,
ring              304 drivers/gpu/drm/amd/amdgpu/amdgpu_ring.h 	if (unlikely(ring->count_dw < count_dw))
ring              307 drivers/gpu/drm/amd/amdgpu/amdgpu_ring.h 	occupied = ring->wptr & ring->buf_mask;
ring              308 drivers/gpu/drm/amd/amdgpu/amdgpu_ring.h 	dst = (void *)&ring->ring[occupied];
ring              309 drivers/gpu/drm/amd/amdgpu/amdgpu_ring.h 	chunk1 = ring->buf_mask + 1 - occupied;
ring              320 drivers/gpu/drm/amd/amdgpu/amdgpu_ring.h 		dst = (void *)ring->ring;
ring              324 drivers/gpu/drm/amd/amdgpu/amdgpu_ring.h 	ring->wptr += count_dw;
ring              325 drivers/gpu/drm/amd/amdgpu/amdgpu_ring.h 	ring->wptr &= ring->ptr_mask;
ring              326 drivers/gpu/drm/amd/amdgpu/amdgpu_ring.h 	ring->count_dw -= count_dw;
ring              329 drivers/gpu/drm/amd/amdgpu/amdgpu_ring.h int amdgpu_ring_test_helper(struct amdgpu_ring *ring);
ring               35 drivers/gpu/drm/amd/amdgpu/amdgpu_sdma.c struct amdgpu_sdma_instance *amdgpu_sdma_get_instance_from_ring(struct amdgpu_ring *ring)
ring               37 drivers/gpu/drm/amd/amdgpu/amdgpu_sdma.c 	struct amdgpu_device *adev = ring->adev;
ring               41 drivers/gpu/drm/amd/amdgpu/amdgpu_sdma.c 		if (ring == &adev->sdma.instance[i].ring ||
ring               42 drivers/gpu/drm/amd/amdgpu/amdgpu_sdma.c 		    ring == &adev->sdma.instance[i].page)
ring               48 drivers/gpu/drm/amd/amdgpu/amdgpu_sdma.c int amdgpu_sdma_get_index_from_ring(struct amdgpu_ring *ring, uint32_t *index)
ring               50 drivers/gpu/drm/amd/amdgpu/amdgpu_sdma.c 	struct amdgpu_device *adev = ring->adev;
ring               54 drivers/gpu/drm/amd/amdgpu/amdgpu_sdma.c 		if (ring == &adev->sdma.instance[i].ring ||
ring               55 drivers/gpu/drm/amd/amdgpu/amdgpu_sdma.c 			ring == &adev->sdma.instance[i].page) {
ring               64 drivers/gpu/drm/amd/amdgpu/amdgpu_sdma.c uint64_t amdgpu_sdma_get_csa_mc_addr(struct amdgpu_ring *ring,
ring               67 drivers/gpu/drm/amd/amdgpu/amdgpu_sdma.c 	struct amdgpu_device *adev = ring->adev;
ring               75 drivers/gpu/drm/amd/amdgpu/amdgpu_sdma.c 	r = amdgpu_sdma_get_index_from_ring(ring, &index);
ring               48 drivers/gpu/drm/amd/amdgpu/amdgpu_sdma.h 	struct amdgpu_ring	ring;
ring              104 drivers/gpu/drm/amd/amdgpu/amdgpu_sdma.h amdgpu_sdma_get_instance_from_ring(struct amdgpu_ring *ring);
ring              105 drivers/gpu/drm/amd/amdgpu/amdgpu_sdma.h int amdgpu_sdma_get_index_from_ring(struct amdgpu_ring *ring, uint32_t *index);
ring              106 drivers/gpu/drm/amd/amdgpu/amdgpu_sdma.h uint64_t amdgpu_sdma_get_csa_mc_addr(struct amdgpu_ring *ring, unsigned vmid);
ring               70 drivers/gpu/drm/amd/amdgpu/amdgpu_sync.c 		struct amdgpu_ring *ring;
ring               72 drivers/gpu/drm/amd/amdgpu/amdgpu_sync.c 		ring = container_of(s_fence->sched, struct amdgpu_ring, sched);
ring               73 drivers/gpu/drm/amd/amdgpu/amdgpu_sync.c 		return ring->adev == adev;
ring              259 drivers/gpu/drm/amd/amdgpu/amdgpu_sync.c 					 struct amdgpu_ring *ring)
ring              275 drivers/gpu/drm/amd/amdgpu/amdgpu_sync.c 		if (ring && s_fence) {
ring              279 drivers/gpu/drm/amd/amdgpu/amdgpu_sync.c 			if (s_fence->sched == &ring->sched) {
ring               51 drivers/gpu/drm/amd/amdgpu/amdgpu_sync.h 				     struct amdgpu_ring *ring);
ring               34 drivers/gpu/drm/amd/amdgpu/amdgpu_test.c 	struct amdgpu_ring *ring = adev->mman.buffer_funcs_ring;
ring              126 drivers/gpu/drm/amd/amdgpu/amdgpu_test.c 		r = amdgpu_copy_buffer(ring, gart_addr, vram_addr,
ring              172 drivers/gpu/drm/amd/amdgpu/amdgpu_test.c 		r = amdgpu_copy_buffer(ring, vram_addr, gart_addr,
ring              147 drivers/gpu/drm/amd/amdgpu/amdgpu_trace.h 			     __field(u32, ring)
ring              154 drivers/gpu/drm/amd/amdgpu/amdgpu_trace.h 			   __entry->ring = to_amdgpu_ring(p->entity->rq->sched)->idx;
ring              160 drivers/gpu/drm/amd/amdgpu/amdgpu_trace.h 		      __entry->bo_list, __entry->ring, __entry->dw,
ring              173 drivers/gpu/drm/amd/amdgpu/amdgpu_trace.h 			     __string(ring, to_amdgpu_ring(job->base.sched)->name)
ring              182 drivers/gpu/drm/amd/amdgpu/amdgpu_trace.h 			   __assign_str(ring, to_amdgpu_ring(job->base.sched)->name)
ring              187 drivers/gpu/drm/amd/amdgpu/amdgpu_trace.h 		      __entry->seqno, __get_str(ring), __entry->num_ibs)
ring              198 drivers/gpu/drm/amd/amdgpu/amdgpu_trace.h 			     __string(ring, to_amdgpu_ring(job->base.sched)->name)
ring              207 drivers/gpu/drm/amd/amdgpu/amdgpu_trace.h 			   __assign_str(ring, to_amdgpu_ring(job->base.sched)->name)
ring              212 drivers/gpu/drm/amd/amdgpu/amdgpu_trace.h 		      __entry->seqno, __get_str(ring), __entry->num_ibs)
ring              217 drivers/gpu/drm/amd/amdgpu/amdgpu_trace.h 	    TP_PROTO(struct amdgpu_vm *vm, struct amdgpu_ring *ring,
ring              219 drivers/gpu/drm/amd/amdgpu/amdgpu_trace.h 	    TP_ARGS(vm, ring, job),
ring              222 drivers/gpu/drm/amd/amdgpu/amdgpu_trace.h 			     __string(ring, ring->name)
ring              223 drivers/gpu/drm/amd/amdgpu/amdgpu_trace.h 			     __field(u32, ring)
ring              232 drivers/gpu/drm/amd/amdgpu/amdgpu_trace.h 			   __assign_str(ring, ring->name)
ring              234 drivers/gpu/drm/amd/amdgpu/amdgpu_trace.h 			   __entry->vm_hub = ring->funcs->vmhub,
ring              239 drivers/gpu/drm/amd/amdgpu/amdgpu_trace.h 		      __entry->pasid, __get_str(ring), __entry->vmid,
ring              367 drivers/gpu/drm/amd/amdgpu/amdgpu_trace.h 	    TP_PROTO(struct amdgpu_ring *ring, unsigned vmid,
ring              369 drivers/gpu/drm/amd/amdgpu/amdgpu_trace.h 	    TP_ARGS(ring, vmid, pd_addr),
ring              371 drivers/gpu/drm/amd/amdgpu/amdgpu_trace.h 			     __string(ring, ring->name)
ring              378 drivers/gpu/drm/amd/amdgpu/amdgpu_trace.h 			   __assign_str(ring, ring->name)
ring              380 drivers/gpu/drm/amd/amdgpu/amdgpu_trace.h 			   __entry->vm_hub = ring->funcs->vmhub;
ring              384 drivers/gpu/drm/amd/amdgpu/amdgpu_trace.h 		      __get_str(ring), __entry->vmid,
ring              471 drivers/gpu/drm/amd/amdgpu/amdgpu_trace.h 			     __string(ring, sched_job->base.sched->name);
ring              479 drivers/gpu/drm/amd/amdgpu/amdgpu_trace.h 			   __assign_str(ring, sched_job->base.sched->name)
ring              486 drivers/gpu/drm/amd/amdgpu/amdgpu_trace.h 		      __get_str(ring), __entry->id,
ring               63 drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c 			     struct amdgpu_ring *ring,
ring              310 drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c 	struct amdgpu_ring *ring = adev->mman.buffer_funcs_ring;
ring              358 drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c 					src_node_start, 0, ring,
ring              371 drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c 					dst_node_start, 1, ring,
ring              378 drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c 		r = amdgpu_copy_buffer(ring, from, to, cur_size,
ring             1860 drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c 		struct amdgpu_ring *ring;
ring             1863 drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c 		ring = adev->mman.buffer_funcs_ring;
ring             1864 drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c 		rq = &ring->sched.sched_rq[DRM_SCHED_PRIORITY_KERNEL];
ring             1900 drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c 			     struct amdgpu_ring *ring,
ring             1904 drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c 	struct amdgpu_device *adev = ring->adev;
ring             1939 drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c 	amdgpu_ring_pad_ib(ring, &job->ibs[0]);
ring             1963 drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c int amdgpu_copy_buffer(struct amdgpu_ring *ring, uint64_t src_offset,
ring             1969 drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c 	struct amdgpu_device *adev = ring->adev;
ring             1977 drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c 	if (direct_submit && !ring->sched.ready) {
ring             2019 drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c 	amdgpu_ring_pad_ib(ring, &job->ibs[0]);
ring             2022 drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c 		r = amdgpu_job_submit_direct(job, ring, fence);
ring             2044 drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c 	struct amdgpu_ring *ring = adev->mman.buffer_funcs_ring;
ring             2115 drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c 	amdgpu_ring_pad_ib(ring, &job->ibs[0]);
ring               86 drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.h int amdgpu_copy_buffer(struct amdgpu_ring *ring, uint64_t src_offset,
ring              311 drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c 		amdgpu_ring_fini(&adev->uvd.inst[j].ring);
ring              329 drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c 	struct amdgpu_ring *ring;
ring              333 drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c 	ring = &adev->uvd.inst[0].ring;
ring              334 drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c 	rq = &ring->sched.sched_rq[DRM_SCHED_PRIORITY_NORMAL];
ring              413 drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c 			amdgpu_fence_driver_force_completion(&adev->uvd.inst[i].ring);
ring              421 drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c 	struct amdgpu_ring *ring = &adev->uvd.inst[0].ring;
ring              430 drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c 			r = amdgpu_uvd_get_destroy_msg(ring, handle, false,
ring             1019 drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c static int amdgpu_uvd_send_msg(struct amdgpu_ring *ring, struct amdgpu_bo *bo,
ring             1022 drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c 	struct amdgpu_device *adev = ring->adev;
ring             1036 drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c 	if (!ring->adev->uvd.address_64_bit) {
ring             1051 drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c 		offset_idx = 1 + ring->me;
ring             1084 drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c 		r = amdgpu_job_submit_direct(job, ring, &f);
ring             1121 drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c int amdgpu_uvd_get_create_msg(struct amdgpu_ring *ring, uint32_t handle,
ring             1124 drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c 	struct amdgpu_device *adev = ring->adev;
ring             1150 drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c 	return amdgpu_uvd_send_msg(ring, bo, true, fence);
ring             1153 drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c int amdgpu_uvd_get_destroy_msg(struct amdgpu_ring *ring, uint32_t handle,
ring             1156 drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c 	struct amdgpu_device *adev = ring->adev;
ring             1175 drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c 	return amdgpu_uvd_send_msg(ring, bo, direct, fence);
ring             1187 drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c 		fences += amdgpu_fence_count_emitted(&adev->uvd.inst[i].ring);
ring             1209 drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c void amdgpu_uvd_ring_begin_use(struct amdgpu_ring *ring)
ring             1211 drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c 	struct amdgpu_device *adev = ring->adev;
ring             1231 drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c void amdgpu_uvd_ring_end_use(struct amdgpu_ring *ring)
ring             1233 drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c 	if (!amdgpu_sriov_vf(ring->adev))
ring             1234 drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c 		schedule_delayed_work(&ring->adev->uvd.idle_work, UVD_IDLE_TIMEOUT);
ring             1244 drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c int amdgpu_uvd_ring_test_ib(struct amdgpu_ring *ring, long timeout)
ring             1249 drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c 	r = amdgpu_uvd_get_create_msg(ring, 1, NULL);
ring             1253 drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c 	r = amdgpu_uvd_get_destroy_msg(ring, 1, true, &fence);
ring               45 drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.h 	struct amdgpu_ring	ring;
ring               77 drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.h int amdgpu_uvd_get_create_msg(struct amdgpu_ring *ring, uint32_t handle,
ring               79 drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.h int amdgpu_uvd_get_destroy_msg(struct amdgpu_ring *ring, uint32_t handle,
ring               84 drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.h void amdgpu_uvd_ring_begin_use(struct amdgpu_ring *ring);
ring               85 drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.h void amdgpu_uvd_ring_end_use(struct amdgpu_ring *ring);
ring               86 drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.h int amdgpu_uvd_ring_test_ib(struct amdgpu_ring *ring, long timeout);
ring              220 drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c 		amdgpu_ring_fini(&adev->vce.ring[i]);
ring              236 drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c 	struct amdgpu_ring *ring;
ring              240 drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c 	ring = &adev->vce.ring[0];
ring              241 drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c 	rq = &ring->sched.sched_rq[DRM_SCHED_PRIORITY_NORMAL];
ring              332 drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c 		count += amdgpu_fence_count_emitted(&adev->vce.ring[i]);
ring              356 drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c void amdgpu_vce_ring_begin_use(struct amdgpu_ring *ring)
ring              358 drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c 	struct amdgpu_device *adev = ring->adev;
ring              388 drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c void amdgpu_vce_ring_end_use(struct amdgpu_ring *ring)
ring              390 drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c 	if (!amdgpu_sriov_vf(ring->adev))
ring              391 drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c 		schedule_delayed_work(&ring->adev->vce.idle_work, VCE_IDLE_TIMEOUT);
ring              404 drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c 	struct amdgpu_ring *ring = &adev->vce.ring[0];
ring              412 drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c 		r = amdgpu_vce_get_destroy_msg(ring, handle, false, NULL);
ring              431 drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c int amdgpu_vce_get_create_msg(struct amdgpu_ring *ring, uint32_t handle,
ring              442 drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c 	r = amdgpu_job_alloc_with_ib(ring->adev, ib_size_dw * 4, &job);
ring              456 drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c 	if ((ring->adev->vce.fw_version >> 24) >= 52)
ring              471 drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c 	if ((ring->adev->vce.fw_version >> 24) >= 52) {
ring              487 drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c 	r = amdgpu_job_submit_direct(job, ring, &f);
ring              511 drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c int amdgpu_vce_get_destroy_msg(struct amdgpu_ring *ring, uint32_t handle,
ring              520 drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c 	r = amdgpu_job_alloc_with_ib(ring->adev, ib_size_dw * 4, &job);
ring              548 drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c 		r = amdgpu_job_submit_direct(job, ring, &f);
ring              550 drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c 		r = amdgpu_job_submit(job, &ring->adev->vce.entity,
ring             1036 drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c void amdgpu_vce_ring_emit_ib(struct amdgpu_ring *ring,
ring             1041 drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c 	amdgpu_ring_write(ring, VCE_CMD_IB);
ring             1042 drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c 	amdgpu_ring_write(ring, lower_32_bits(ib->gpu_addr));
ring             1043 drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c 	amdgpu_ring_write(ring, upper_32_bits(ib->gpu_addr));
ring             1044 drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c 	amdgpu_ring_write(ring, ib->length_dw);
ring             1054 drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c void amdgpu_vce_ring_emit_fence(struct amdgpu_ring *ring, u64 addr, u64 seq,
ring             1059 drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c 	amdgpu_ring_write(ring, VCE_CMD_FENCE);
ring             1060 drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c 	amdgpu_ring_write(ring, addr);
ring             1061 drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c 	amdgpu_ring_write(ring, upper_32_bits(addr));
ring             1062 drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c 	amdgpu_ring_write(ring, seq);
ring             1063 drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c 	amdgpu_ring_write(ring, VCE_CMD_TRAP);
ring             1064 drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c 	amdgpu_ring_write(ring, VCE_CMD_END);
ring             1073 drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c int amdgpu_vce_ring_test_ring(struct amdgpu_ring *ring)
ring             1075 drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c 	struct amdgpu_device *adev = ring->adev;
ring             1084 drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c 	r = amdgpu_ring_alloc(ring, 16);
ring             1088 drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c 	rptr = amdgpu_ring_get_rptr(ring);
ring             1090 drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c 	amdgpu_ring_write(ring, VCE_CMD_END);
ring             1091 drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c 	amdgpu_ring_commit(ring);
ring             1094 drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c 		if (amdgpu_ring_get_rptr(ring) != rptr)
ring             1111 drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c int amdgpu_vce_ring_test_ib(struct amdgpu_ring *ring, long timeout)
ring             1118 drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c 	if (ring != &ring->adev->vce.ring[0])
ring             1121 drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c 	r = amdgpu_bo_create_reserved(ring->adev, 512, PAGE_SIZE,
ring             1127 drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c 	r = amdgpu_vce_get_create_msg(ring, 1, bo, NULL);
ring             1131 drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c 	r = amdgpu_vce_get_destroy_msg(ring, 1, true, &fence);
ring               48 drivers/gpu/drm/amd/amdgpu/amdgpu_vce.h 	struct amdgpu_ring	ring[AMDGPU_MAX_VCE_RINGS];
ring               61 drivers/gpu/drm/amd/amdgpu/amdgpu_vce.h int amdgpu_vce_get_create_msg(struct amdgpu_ring *ring, uint32_t handle,
ring               64 drivers/gpu/drm/amd/amdgpu/amdgpu_vce.h int amdgpu_vce_get_destroy_msg(struct amdgpu_ring *ring, uint32_t handle,
ring               69 drivers/gpu/drm/amd/amdgpu/amdgpu_vce.h void amdgpu_vce_ring_emit_ib(struct amdgpu_ring *ring, struct amdgpu_job *job,
ring               71 drivers/gpu/drm/amd/amdgpu/amdgpu_vce.h void amdgpu_vce_ring_emit_fence(struct amdgpu_ring *ring, u64 addr, u64 seq,
ring               73 drivers/gpu/drm/amd/amdgpu/amdgpu_vce.h int amdgpu_vce_ring_test_ring(struct amdgpu_ring *ring);
ring               74 drivers/gpu/drm/amd/amdgpu/amdgpu_vce.h int amdgpu_vce_ring_test_ib(struct amdgpu_ring *ring, long timeout);
ring               75 drivers/gpu/drm/amd/amdgpu/amdgpu_vce.h void amdgpu_vce_ring_begin_use(struct amdgpu_ring *ring);
ring               76 drivers/gpu/drm/amd/amdgpu/amdgpu_vce.h void amdgpu_vce_ring_end_use(struct amdgpu_ring *ring);
ring               77 drivers/gpu/drm/amd/amdgpu/amdgpu_vce.h unsigned amdgpu_vce_ring_get_emit_ib_size(struct amdgpu_ring *ring);
ring               78 drivers/gpu/drm/amd/amdgpu/amdgpu_vce.h unsigned amdgpu_vce_ring_get_dma_frame_size(struct amdgpu_ring *ring);
ring              334 drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c void amdgpu_vcn_ring_begin_use(struct amdgpu_ring *ring)
ring              336 drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c 	struct amdgpu_device *adev = ring->adev;
ring              354 drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c 			fences += amdgpu_fence_count_emitted(&adev->vcn.inst[ring->me].ring_enc[i]);
ring              361 drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c 		if (amdgpu_fence_count_emitted(&adev->vcn.inst[ring->me].ring_jpeg))
ring              366 drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c 		if (ring->funcs->type == AMDGPU_RING_TYPE_VCN_ENC)
ring              368 drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c 		else if (ring->funcs->type == AMDGPU_RING_TYPE_VCN_JPEG)
ring              375 drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c void amdgpu_vcn_ring_end_use(struct amdgpu_ring *ring)
ring              377 drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c 	schedule_delayed_work(&ring->adev->vcn.idle_work, VCN_IDLE_TIMEOUT);
ring              380 drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c int amdgpu_vcn_dec_ring_test_ring(struct amdgpu_ring *ring)
ring              382 drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c 	struct amdgpu_device *adev = ring->adev;
ring              387 drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c 	WREG32(adev->vcn.inst[ring->me].external.scratch9, 0xCAFEDEAD);
ring              388 drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c 	r = amdgpu_ring_alloc(ring, 3);
ring              391 drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c 	amdgpu_ring_write(ring, PACKET0(adev->vcn.internal.scratch9, 0));
ring              392 drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c 	amdgpu_ring_write(ring, 0xDEADBEEF);
ring              393 drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c 	amdgpu_ring_commit(ring);
ring              395 drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c 		tmp = RREG32(adev->vcn.inst[ring->me].external.scratch9);
ring              407 drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c static int amdgpu_vcn_dec_send_msg(struct amdgpu_ring *ring,
ring              411 drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c 	struct amdgpu_device *adev = ring->adev;
ring              436 drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c 	r = amdgpu_job_submit_direct(job, ring, &f);
ring              459 drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c static int amdgpu_vcn_dec_get_create_msg(struct amdgpu_ring *ring, uint32_t handle,
ring              462 drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c 	struct amdgpu_device *adev = ring->adev;
ring              490 drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c 	return amdgpu_vcn_dec_send_msg(ring, bo, fence);
ring              493 drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c static int amdgpu_vcn_dec_get_destroy_msg(struct amdgpu_ring *ring, uint32_t handle,
ring              496 drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c 	struct amdgpu_device *adev = ring->adev;
ring              516 drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c 	return amdgpu_vcn_dec_send_msg(ring, bo, fence);
ring              519 drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c int amdgpu_vcn_dec_ring_test_ib(struct amdgpu_ring *ring, long timeout)
ring              524 drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c 	r = amdgpu_vcn_dec_get_create_msg(ring, 1, NULL);
ring              528 drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c 	r = amdgpu_vcn_dec_get_destroy_msg(ring, 1, &fence);
ring              543 drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c int amdgpu_vcn_enc_ring_test_ring(struct amdgpu_ring *ring)
ring              545 drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c 	struct amdgpu_device *adev = ring->adev;
ring              550 drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c 	r = amdgpu_ring_alloc(ring, 16);
ring              554 drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c 	rptr = amdgpu_ring_get_rptr(ring);
ring              556 drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c 	amdgpu_ring_write(ring, VCN_ENC_CMD_END);
ring              557 drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c 	amdgpu_ring_commit(ring);
ring              560 drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c 		if (amdgpu_ring_get_rptr(ring) != rptr)
ring              571 drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c static int amdgpu_vcn_enc_get_create_msg(struct amdgpu_ring *ring, uint32_t handle,
ring              582 drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c 	r = amdgpu_job_alloc_with_ib(ring->adev, ib_size_dw * 4, &job);
ring              609 drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c 	r = amdgpu_job_submit_direct(job, ring, &f);
ring              624 drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c static int amdgpu_vcn_enc_get_destroy_msg(struct amdgpu_ring *ring, uint32_t handle,
ring              635 drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c 	r = amdgpu_job_alloc_with_ib(ring->adev, ib_size_dw * 4, &job);
ring              662 drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c 	r = amdgpu_job_submit_direct(job, ring, &f);
ring              677 drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c int amdgpu_vcn_enc_ring_test_ib(struct amdgpu_ring *ring, long timeout)
ring              683 drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c 	r = amdgpu_bo_create_reserved(ring->adev, 128 * 1024, PAGE_SIZE,
ring              689 drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c 	r = amdgpu_vcn_enc_get_create_msg(ring, 1, bo, NULL);
ring              693 drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c 	r = amdgpu_vcn_enc_get_destroy_msg(ring, 1, bo, &fence);
ring              710 drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c int amdgpu_vcn_jpeg_ring_test_ring(struct amdgpu_ring *ring)
ring              712 drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c 	struct amdgpu_device *adev = ring->adev;
ring              717 drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c 	WREG32(adev->vcn.inst[ring->me].external.jpeg_pitch, 0xCAFEDEAD);
ring              718 drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c 	r = amdgpu_ring_alloc(ring, 3);
ring              722 drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c 	amdgpu_ring_write(ring, PACKET0(adev->vcn.internal.jpeg_pitch, 0));
ring              723 drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c 	amdgpu_ring_write(ring, 0xDEADBEEF);
ring              724 drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c 	amdgpu_ring_commit(ring);
ring              727 drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c 		tmp = RREG32(adev->vcn.inst[ring->me].external.jpeg_pitch);
ring              739 drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c static int amdgpu_vcn_jpeg_set_reg(struct amdgpu_ring *ring, uint32_t handle,
ring              742 drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c 	struct amdgpu_device *adev = ring->adev;
ring              749 drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c 	r = amdgpu_job_alloc_with_ib(ring->adev, ib_size_dw * 4, &job);
ring              763 drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c 	r = amdgpu_job_submit_direct(job, ring, &f);
ring              778 drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c int amdgpu_vcn_jpeg_ring_test_ib(struct amdgpu_ring *ring, long timeout)
ring              780 drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c 	struct amdgpu_device *adev = ring->adev;
ring              786 drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c 	r = amdgpu_vcn_jpeg_set_reg(ring, 1, &fence);
ring              801 drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c 		tmp = RREG32(adev->vcn.inst[ring->me].external.jpeg_pitch);
ring              203 drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.h void amdgpu_vcn_ring_begin_use(struct amdgpu_ring *ring);
ring              204 drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.h void amdgpu_vcn_ring_end_use(struct amdgpu_ring *ring);
ring              206 drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.h int amdgpu_vcn_dec_ring_test_ring(struct amdgpu_ring *ring);
ring              207 drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.h int amdgpu_vcn_dec_ring_test_ib(struct amdgpu_ring *ring, long timeout);
ring              209 drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.h int amdgpu_vcn_enc_ring_test_ring(struct amdgpu_ring *ring);
ring              210 drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.h int amdgpu_vcn_enc_ring_test_ib(struct amdgpu_ring *ring, long timeout);
ring              212 drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.h int amdgpu_vcn_jpeg_ring_test_ring(struct amdgpu_ring *ring);
ring              213 drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.h int amdgpu_vcn_jpeg_ring_test_ib(struct amdgpu_ring *ring, long timeout);
ring               54 drivers/gpu/drm/amd/amdgpu/amdgpu_virt.c 	struct amdgpu_ring *ring = &kiq->ring;
ring               56 drivers/gpu/drm/amd/amdgpu/amdgpu_virt.c 	BUG_ON(!ring->funcs->emit_rreg);
ring               59 drivers/gpu/drm/amd/amdgpu/amdgpu_virt.c 	amdgpu_ring_alloc(ring, 32);
ring               60 drivers/gpu/drm/amd/amdgpu/amdgpu_virt.c 	amdgpu_ring_emit_rreg(ring, reg);
ring               61 drivers/gpu/drm/amd/amdgpu/amdgpu_virt.c 	amdgpu_fence_emit_polling(ring, &seq);
ring               62 drivers/gpu/drm/amd/amdgpu/amdgpu_virt.c 	amdgpu_ring_commit(ring);
ring               65 drivers/gpu/drm/amd/amdgpu/amdgpu_virt.c 	r = amdgpu_fence_wait_polling(ring, seq, MAX_KIQ_REG_WAIT);
ring               81 drivers/gpu/drm/amd/amdgpu/amdgpu_virt.c 		r = amdgpu_fence_wait_polling(ring, seq, MAX_KIQ_REG_WAIT);
ring              100 drivers/gpu/drm/amd/amdgpu/amdgpu_virt.c 	struct amdgpu_ring *ring = &kiq->ring;
ring              102 drivers/gpu/drm/amd/amdgpu/amdgpu_virt.c 	BUG_ON(!ring->funcs->emit_wreg);
ring              105 drivers/gpu/drm/amd/amdgpu/amdgpu_virt.c 	amdgpu_ring_alloc(ring, 32);
ring              106 drivers/gpu/drm/amd/amdgpu/amdgpu_virt.c 	amdgpu_ring_emit_wreg(ring, reg, v);
ring              107 drivers/gpu/drm/amd/amdgpu/amdgpu_virt.c 	amdgpu_fence_emit_polling(ring, &seq);
ring              108 drivers/gpu/drm/amd/amdgpu/amdgpu_virt.c 	amdgpu_ring_commit(ring);
ring              111 drivers/gpu/drm/amd/amdgpu/amdgpu_virt.c 	r = amdgpu_fence_wait_polling(ring, seq, MAX_KIQ_REG_WAIT);
ring              128 drivers/gpu/drm/amd/amdgpu/amdgpu_virt.c 		r = amdgpu_fence_wait_polling(ring, seq, MAX_KIQ_REG_WAIT);
ring              145 drivers/gpu/drm/amd/amdgpu/amdgpu_virt.c 	struct amdgpu_ring *ring = &kiq->ring;
ring              151 drivers/gpu/drm/amd/amdgpu/amdgpu_virt.c 	amdgpu_ring_alloc(ring, 32);
ring              152 drivers/gpu/drm/amd/amdgpu/amdgpu_virt.c 	amdgpu_ring_emit_reg_write_reg_wait(ring, reg0, reg1,
ring              154 drivers/gpu/drm/amd/amdgpu/amdgpu_virt.c 	amdgpu_fence_emit_polling(ring, &seq);
ring              155 drivers/gpu/drm/amd/amdgpu/amdgpu_virt.c 	amdgpu_ring_commit(ring);
ring              158 drivers/gpu/drm/amd/amdgpu/amdgpu_virt.c 	r = amdgpu_fence_wait_polling(ring, seq, MAX_KIQ_REG_WAIT);
ring              168 drivers/gpu/drm/amd/amdgpu/amdgpu_virt.c 		r = amdgpu_fence_wait_polling(ring, seq, MAX_KIQ_REG_WAIT);
ring              949 drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c 	struct amdgpu_ring *ring;
ring              966 drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c 		ring = adev->rings[i];
ring              967 drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c 		if (ring->funcs->type == AMDGPU_RING_TYPE_COMPUTE)
ring              969 drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c 			ring->has_compute_vm_bug = has_compute_vm_bug;
ring              971 drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c 			ring->has_compute_vm_bug = false;
ring              984 drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c bool amdgpu_vm_need_pipeline_sync(struct amdgpu_ring *ring,
ring              987 drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c 	struct amdgpu_device *adev = ring->adev;
ring              988 drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c 	unsigned vmhub = ring->funcs->vmhub;
ring              992 drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c 	bool vm_flush_needed = job->vm_needs_flush || ring->has_compute_vm_bug;
ring              997 drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c 	gds_switch_needed = ring->funcs->emit_gds_switch && (
ring             1023 drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c int amdgpu_vm_flush(struct amdgpu_ring *ring, struct amdgpu_job *job, bool need_pipe_sync)
ring             1025 drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c 	struct amdgpu_device *adev = ring->adev;
ring             1026 drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c 	unsigned vmhub = ring->funcs->vmhub;
ring             1029 drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c 	bool gds_switch_needed = ring->funcs->emit_gds_switch && (
ring             1054 drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c 	gds_switch_needed &= !!ring->funcs->emit_gds_switch;
ring             1055 drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c 	vm_flush_needed &= !!ring->funcs->emit_vm_flush  &&
ring             1058 drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c 		ring->funcs->emit_wreg;
ring             1063 drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c 	if (ring->funcs->init_cond_exec)
ring             1064 drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c 		patch_offset = amdgpu_ring_init_cond_exec(ring);
ring             1067 drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c 		amdgpu_ring_emit_pipeline_sync(ring);
ring             1070 drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c 		trace_amdgpu_vm_flush(ring, job->vmid, job->vm_pd_addr);
ring             1071 drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c 		amdgpu_ring_emit_vm_flush(ring, job->vmid, job->vm_pd_addr);
ring             1075 drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c 		amdgpu_gmc_emit_pasid_mapping(ring, job->vmid, job->pasid);
ring             1078 drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c 		r = amdgpu_fence_emit(ring, &fence, 0);
ring             1101 drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c 	if (ring->funcs->emit_gds_switch && gds_switch_needed) {
ring             1108 drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c 		amdgpu_ring_emit_gds_switch(ring, job->vmid, job->gds_base,
ring             1114 drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c 	if (ring->funcs->patch_cond_exec)
ring             1115 drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c 		amdgpu_ring_patch_cond_exec(ring, patch_offset);
ring             1118 drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c 	if (ring->funcs->emit_switch_buffer) {
ring             1119 drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c 		amdgpu_ring_emit_switch_buffer(ring);
ring             1120 drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c 		amdgpu_ring_emit_switch_buffer(ring);
ring              359 drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h int amdgpu_vm_flush(struct amdgpu_ring *ring, struct amdgpu_job *job, bool need_pipe_sync);
ring              401 drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h bool amdgpu_vm_need_pipeline_sync(struct amdgpu_ring *ring,
ring               98 drivers/gpu/drm/amd/amdgpu/amdgpu_vm_sdma.c 	struct amdgpu_ring *ring;
ring              102 drivers/gpu/drm/amd/amdgpu/amdgpu_vm_sdma.c 	ring = container_of(p->vm->entity.rq->sched, struct amdgpu_ring, sched);
ring              105 drivers/gpu/drm/amd/amdgpu/amdgpu_vm_sdma.c 	amdgpu_ring_pad_ib(ring, ib);
ring             1724 drivers/gpu/drm/amd/amdgpu/cik.c static void cik_flush_hdp(struct amdgpu_device *adev, struct amdgpu_ring *ring)
ring             1726 drivers/gpu/drm/amd/amdgpu/cik.c 	if (!ring || !ring->funcs->emit_wreg) {
ring             1730 drivers/gpu/drm/amd/amdgpu/cik.c 		amdgpu_ring_emit_wreg(ring, mmHDP_MEM_COHERENCY_FLUSH_CNTL, 1);
ring             1735 drivers/gpu/drm/amd/amdgpu/cik.c 			       struct amdgpu_ring *ring)
ring             1737 drivers/gpu/drm/amd/amdgpu/cik.c 	if (!ring || !ring->funcs->emit_wreg) {
ring             1741 drivers/gpu/drm/amd/amdgpu/cik.c 		amdgpu_ring_emit_wreg(ring, mmHDP_DEBUG0, 1);
ring              249 drivers/gpu/drm/amd/amdgpu/cik_ih.c 	dw[0] = le32_to_cpu(ih->ring[ring_index + 0]);
ring              250 drivers/gpu/drm/amd/amdgpu/cik_ih.c 	dw[1] = le32_to_cpu(ih->ring[ring_index + 1]);
ring              251 drivers/gpu/drm/amd/amdgpu/cik_ih.c 	dw[2] = le32_to_cpu(ih->ring[ring_index + 2]);
ring              252 drivers/gpu/drm/amd/amdgpu/cik_ih.c 	dw[3] = le32_to_cpu(ih->ring[ring_index + 3]);
ring              163 drivers/gpu/drm/amd/amdgpu/cik_sdma.c static uint64_t cik_sdma_ring_get_rptr(struct amdgpu_ring *ring)
ring              167 drivers/gpu/drm/amd/amdgpu/cik_sdma.c 	rptr = ring->adev->wb.wb[ring->rptr_offs];
ring              179 drivers/gpu/drm/amd/amdgpu/cik_sdma.c static uint64_t cik_sdma_ring_get_wptr(struct amdgpu_ring *ring)
ring              181 drivers/gpu/drm/amd/amdgpu/cik_sdma.c 	struct amdgpu_device *adev = ring->adev;
ring              183 drivers/gpu/drm/amd/amdgpu/cik_sdma.c 	return (RREG32(mmSDMA0_GFX_RB_WPTR + sdma_offsets[ring->me]) & 0x3fffc) >> 2;
ring              193 drivers/gpu/drm/amd/amdgpu/cik_sdma.c static void cik_sdma_ring_set_wptr(struct amdgpu_ring *ring)
ring              195 drivers/gpu/drm/amd/amdgpu/cik_sdma.c 	struct amdgpu_device *adev = ring->adev;
ring              197 drivers/gpu/drm/amd/amdgpu/cik_sdma.c 	WREG32(mmSDMA0_GFX_RB_WPTR + sdma_offsets[ring->me],
ring              198 drivers/gpu/drm/amd/amdgpu/cik_sdma.c 		       	(lower_32_bits(ring->wptr) << 2) & 0x3fffc);
ring              201 drivers/gpu/drm/amd/amdgpu/cik_sdma.c static void cik_sdma_ring_insert_nop(struct amdgpu_ring *ring, uint32_t count)
ring              203 drivers/gpu/drm/amd/amdgpu/cik_sdma.c 	struct amdgpu_sdma_instance *sdma = amdgpu_sdma_get_instance_from_ring(ring);
ring              208 drivers/gpu/drm/amd/amdgpu/cik_sdma.c 			amdgpu_ring_write(ring, ring->funcs->nop |
ring              211 drivers/gpu/drm/amd/amdgpu/cik_sdma.c 			amdgpu_ring_write(ring, ring->funcs->nop);
ring              222 drivers/gpu/drm/amd/amdgpu/cik_sdma.c static void cik_sdma_ring_emit_ib(struct amdgpu_ring *ring,
ring              231 drivers/gpu/drm/amd/amdgpu/cik_sdma.c 	cik_sdma_ring_insert_nop(ring, (4 - lower_32_bits(ring->wptr)) & 7);
ring              233 drivers/gpu/drm/amd/amdgpu/cik_sdma.c 	amdgpu_ring_write(ring, SDMA_PACKET(SDMA_OPCODE_INDIRECT_BUFFER, 0, extra_bits));
ring              234 drivers/gpu/drm/amd/amdgpu/cik_sdma.c 	amdgpu_ring_write(ring, ib->gpu_addr & 0xffffffe0); /* base must be 32 byte aligned */
ring              235 drivers/gpu/drm/amd/amdgpu/cik_sdma.c 	amdgpu_ring_write(ring, upper_32_bits(ib->gpu_addr) & 0xffffffff);
ring              236 drivers/gpu/drm/amd/amdgpu/cik_sdma.c 	amdgpu_ring_write(ring, ib->length_dw);
ring              247 drivers/gpu/drm/amd/amdgpu/cik_sdma.c static void cik_sdma_ring_emit_hdp_flush(struct amdgpu_ring *ring)
ring              253 drivers/gpu/drm/amd/amdgpu/cik_sdma.c 	if (ring->me == 0)
ring              258 drivers/gpu/drm/amd/amdgpu/cik_sdma.c 	amdgpu_ring_write(ring, SDMA_PACKET(SDMA_OPCODE_POLL_REG_MEM, 0, extra_bits));
ring              259 drivers/gpu/drm/amd/amdgpu/cik_sdma.c 	amdgpu_ring_write(ring, mmGPU_HDP_FLUSH_DONE << 2);
ring              260 drivers/gpu/drm/amd/amdgpu/cik_sdma.c 	amdgpu_ring_write(ring, mmGPU_HDP_FLUSH_REQ << 2);
ring              261 drivers/gpu/drm/amd/amdgpu/cik_sdma.c 	amdgpu_ring_write(ring, ref_and_mask); /* reference */
ring              262 drivers/gpu/drm/amd/amdgpu/cik_sdma.c 	amdgpu_ring_write(ring, ref_and_mask); /* mask */
ring              263 drivers/gpu/drm/amd/amdgpu/cik_sdma.c 	amdgpu_ring_write(ring, (0xfff << 16) | 10); /* retry count, poll interval */
ring              276 drivers/gpu/drm/amd/amdgpu/cik_sdma.c static void cik_sdma_ring_emit_fence(struct amdgpu_ring *ring, u64 addr, u64 seq,
ring              281 drivers/gpu/drm/amd/amdgpu/cik_sdma.c 	amdgpu_ring_write(ring, SDMA_PACKET(SDMA_OPCODE_FENCE, 0, 0));
ring              282 drivers/gpu/drm/amd/amdgpu/cik_sdma.c 	amdgpu_ring_write(ring, lower_32_bits(addr));
ring              283 drivers/gpu/drm/amd/amdgpu/cik_sdma.c 	amdgpu_ring_write(ring, upper_32_bits(addr));
ring              284 drivers/gpu/drm/amd/amdgpu/cik_sdma.c 	amdgpu_ring_write(ring, lower_32_bits(seq));
ring              289 drivers/gpu/drm/amd/amdgpu/cik_sdma.c 		amdgpu_ring_write(ring, SDMA_PACKET(SDMA_OPCODE_FENCE, 0, 0));
ring              290 drivers/gpu/drm/amd/amdgpu/cik_sdma.c 		amdgpu_ring_write(ring, lower_32_bits(addr));
ring              291 drivers/gpu/drm/amd/amdgpu/cik_sdma.c 		amdgpu_ring_write(ring, upper_32_bits(addr));
ring              292 drivers/gpu/drm/amd/amdgpu/cik_sdma.c 		amdgpu_ring_write(ring, upper_32_bits(seq));
ring              296 drivers/gpu/drm/amd/amdgpu/cik_sdma.c 	amdgpu_ring_write(ring, SDMA_PACKET(SDMA_OPCODE_TRAP, 0, 0));
ring              308 drivers/gpu/drm/amd/amdgpu/cik_sdma.c 	struct amdgpu_ring *sdma0 = &adev->sdma.instance[0].ring;
ring              309 drivers/gpu/drm/amd/amdgpu/cik_sdma.c 	struct amdgpu_ring *sdma1 = &adev->sdma.instance[1].ring;
ring              434 drivers/gpu/drm/amd/amdgpu/cik_sdma.c 	struct amdgpu_ring *ring;
ring              441 drivers/gpu/drm/amd/amdgpu/cik_sdma.c 		ring = &adev->sdma.instance[i].ring;
ring              442 drivers/gpu/drm/amd/amdgpu/cik_sdma.c 		wb_offset = (ring->rptr_offs * 4);
ring              462 drivers/gpu/drm/amd/amdgpu/cik_sdma.c 		rb_bufsz = order_base_2(ring->ring_size / 4);
ring              484 drivers/gpu/drm/amd/amdgpu/cik_sdma.c 		WREG32(mmSDMA0_GFX_RB_BASE + sdma_offsets[i], ring->gpu_addr >> 8);
ring              485 drivers/gpu/drm/amd/amdgpu/cik_sdma.c 		WREG32(mmSDMA0_GFX_RB_BASE_HI + sdma_offsets[i], ring->gpu_addr >> 40);
ring              487 drivers/gpu/drm/amd/amdgpu/cik_sdma.c 		ring->wptr = 0;
ring              488 drivers/gpu/drm/amd/amdgpu/cik_sdma.c 		WREG32(mmSDMA0_GFX_RB_WPTR + sdma_offsets[i], lower_32_bits(ring->wptr) << 2);
ring              501 drivers/gpu/drm/amd/amdgpu/cik_sdma.c 		ring->sched.ready = true;
ring              507 drivers/gpu/drm/amd/amdgpu/cik_sdma.c 		ring = &adev->sdma.instance[i].ring;
ring              508 drivers/gpu/drm/amd/amdgpu/cik_sdma.c 		r = amdgpu_ring_test_helper(ring);
ring              512 drivers/gpu/drm/amd/amdgpu/cik_sdma.c 		if (adev->mman.buffer_funcs_ring == ring)
ring              613 drivers/gpu/drm/amd/amdgpu/cik_sdma.c static int cik_sdma_ring_test_ring(struct amdgpu_ring *ring)
ring              615 drivers/gpu/drm/amd/amdgpu/cik_sdma.c 	struct amdgpu_device *adev = ring->adev;
ring              630 drivers/gpu/drm/amd/amdgpu/cik_sdma.c 	r = amdgpu_ring_alloc(ring, 5);
ring              634 drivers/gpu/drm/amd/amdgpu/cik_sdma.c 	amdgpu_ring_write(ring, SDMA_PACKET(SDMA_OPCODE_WRITE, SDMA_WRITE_SUB_OPCODE_LINEAR, 0));
ring              635 drivers/gpu/drm/amd/amdgpu/cik_sdma.c 	amdgpu_ring_write(ring, lower_32_bits(gpu_addr));
ring              636 drivers/gpu/drm/amd/amdgpu/cik_sdma.c 	amdgpu_ring_write(ring, upper_32_bits(gpu_addr));
ring              637 drivers/gpu/drm/amd/amdgpu/cik_sdma.c 	amdgpu_ring_write(ring, 1); /* number of DWs to follow */
ring              638 drivers/gpu/drm/amd/amdgpu/cik_sdma.c 	amdgpu_ring_write(ring, 0xDEADBEEF);
ring              639 drivers/gpu/drm/amd/amdgpu/cik_sdma.c 	amdgpu_ring_commit(ring);
ring              664 drivers/gpu/drm/amd/amdgpu/cik_sdma.c static int cik_sdma_ring_test_ib(struct amdgpu_ring *ring, long timeout)
ring              666 drivers/gpu/drm/amd/amdgpu/cik_sdma.c 	struct amdgpu_device *adev = ring->adev;
ring              693 drivers/gpu/drm/amd/amdgpu/cik_sdma.c 	r = amdgpu_ib_schedule(ring, 1, &ib, NULL, &f);
ring              808 drivers/gpu/drm/amd/amdgpu/cik_sdma.c static void cik_sdma_ring_pad_ib(struct amdgpu_ring *ring, struct amdgpu_ib *ib)
ring              810 drivers/gpu/drm/amd/amdgpu/cik_sdma.c 	struct amdgpu_sdma_instance *sdma = amdgpu_sdma_get_instance_from_ring(ring);
ring              832 drivers/gpu/drm/amd/amdgpu/cik_sdma.c static void cik_sdma_ring_emit_pipeline_sync(struct amdgpu_ring *ring)
ring              834 drivers/gpu/drm/amd/amdgpu/cik_sdma.c 	uint32_t seq = ring->fence_drv.sync_seq;
ring              835 drivers/gpu/drm/amd/amdgpu/cik_sdma.c 	uint64_t addr = ring->fence_drv.gpu_addr;
ring              838 drivers/gpu/drm/amd/amdgpu/cik_sdma.c 	amdgpu_ring_write(ring, SDMA_PACKET(SDMA_OPCODE_POLL_REG_MEM, 0,
ring              842 drivers/gpu/drm/amd/amdgpu/cik_sdma.c 	amdgpu_ring_write(ring, addr & 0xfffffffc);
ring              843 drivers/gpu/drm/amd/amdgpu/cik_sdma.c 	amdgpu_ring_write(ring, upper_32_bits(addr) & 0xffffffff);
ring              844 drivers/gpu/drm/amd/amdgpu/cik_sdma.c 	amdgpu_ring_write(ring, seq); /* reference */
ring              845 drivers/gpu/drm/amd/amdgpu/cik_sdma.c 	amdgpu_ring_write(ring, 0xffffffff); /* mask */
ring              846 drivers/gpu/drm/amd/amdgpu/cik_sdma.c 	amdgpu_ring_write(ring, (0xfff << 16) | 4); /* retry count, poll interval */
ring              858 drivers/gpu/drm/amd/amdgpu/cik_sdma.c static void cik_sdma_ring_emit_vm_flush(struct amdgpu_ring *ring,
ring              864 drivers/gpu/drm/amd/amdgpu/cik_sdma.c 	amdgpu_gmc_emit_flush_gpu_tlb(ring, vmid, pd_addr);
ring              866 drivers/gpu/drm/amd/amdgpu/cik_sdma.c 	amdgpu_ring_write(ring, SDMA_PACKET(SDMA_OPCODE_POLL_REG_MEM, 0, extra_bits));
ring              867 drivers/gpu/drm/amd/amdgpu/cik_sdma.c 	amdgpu_ring_write(ring, mmVM_INVALIDATE_REQUEST << 2);
ring              868 drivers/gpu/drm/amd/amdgpu/cik_sdma.c 	amdgpu_ring_write(ring, 0);
ring              869 drivers/gpu/drm/amd/amdgpu/cik_sdma.c 	amdgpu_ring_write(ring, 0); /* reference */
ring              870 drivers/gpu/drm/amd/amdgpu/cik_sdma.c 	amdgpu_ring_write(ring, 0); /* mask */
ring              871 drivers/gpu/drm/amd/amdgpu/cik_sdma.c 	amdgpu_ring_write(ring, (0xfff << 16) | 10); /* retry count, poll interval */
ring              874 drivers/gpu/drm/amd/amdgpu/cik_sdma.c static void cik_sdma_ring_emit_wreg(struct amdgpu_ring *ring,
ring              877 drivers/gpu/drm/amd/amdgpu/cik_sdma.c 	amdgpu_ring_write(ring, SDMA_PACKET(SDMA_OPCODE_SRBM_WRITE, 0, 0xf000));
ring              878 drivers/gpu/drm/amd/amdgpu/cik_sdma.c 	amdgpu_ring_write(ring, reg);
ring              879 drivers/gpu/drm/amd/amdgpu/cik_sdma.c 	amdgpu_ring_write(ring, val);
ring              947 drivers/gpu/drm/amd/amdgpu/cik_sdma.c 	struct amdgpu_ring *ring;
ring              976 drivers/gpu/drm/amd/amdgpu/cik_sdma.c 		ring = &adev->sdma.instance[i].ring;
ring              977 drivers/gpu/drm/amd/amdgpu/cik_sdma.c 		ring->ring_obj = NULL;
ring              978 drivers/gpu/drm/amd/amdgpu/cik_sdma.c 		sprintf(ring->name, "sdma%d", i);
ring              979 drivers/gpu/drm/amd/amdgpu/cik_sdma.c 		r = amdgpu_ring_init(adev, ring, 1024,
ring              997 drivers/gpu/drm/amd/amdgpu/cik_sdma.c 		amdgpu_ring_fini(&adev->sdma.instance[i].ring);
ring             1170 drivers/gpu/drm/amd/amdgpu/cik_sdma.c 			amdgpu_fence_process(&adev->sdma.instance[0].ring);
ring             1183 drivers/gpu/drm/amd/amdgpu/cik_sdma.c 			amdgpu_fence_process(&adev->sdma.instance[1].ring);
ring             1206 drivers/gpu/drm/amd/amdgpu/cik_sdma.c 	drm_sched_fault(&adev->sdma.instance[instance_id].ring.sched);
ring             1280 drivers/gpu/drm/amd/amdgpu/cik_sdma.c 		adev->sdma.instance[i].ring.funcs = &cik_sdma_ring_funcs;
ring             1281 drivers/gpu/drm/amd/amdgpu/cik_sdma.c 		adev->sdma.instance[i].ring.me = i;
ring             1362 drivers/gpu/drm/amd/amdgpu/cik_sdma.c 	adev->mman.buffer_funcs_ring = &adev->sdma.instance[0].ring;
ring             1380 drivers/gpu/drm/amd/amdgpu/cik_sdma.c 		sched = &adev->sdma.instance[i].ring.sched;
ring              228 drivers/gpu/drm/amd/amdgpu/cz_ih.c 	dw[0] = le32_to_cpu(ih->ring[ring_index + 0]);
ring              229 drivers/gpu/drm/amd/amdgpu/cz_ih.c 	dw[1] = le32_to_cpu(ih->ring[ring_index + 1]);
ring              230 drivers/gpu/drm/amd/amdgpu/cz_ih.c 	dw[2] = le32_to_cpu(ih->ring[ring_index + 2]);
ring              231 drivers/gpu/drm/amd/amdgpu/cz_ih.c 	dw[3] = le32_to_cpu(ih->ring[ring_index + 3]);
ring              253 drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c static void gfx_v10_0_ring_emit_ce_meta(struct amdgpu_ring *ring, bool resume);
ring              254 drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c static void gfx_v10_0_ring_emit_de_meta(struct amdgpu_ring *ring, bool resume);
ring              255 drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c static void gfx_v10_0_ring_emit_tmz(struct amdgpu_ring *ring, bool start);
ring              271 drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c 				 struct amdgpu_ring *ring)
ring              274 drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c 	uint64_t mqd_addr = amdgpu_bo_gpu_offset(ring->mqd_obj);
ring              275 drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c 	uint64_t wptr_addr = adev->wb.gpu_addr + (ring->wptr_offs * 4);
ring              276 drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c 	uint32_t eng_sel = ring->funcs->type == AMDGPU_RING_TYPE_GFX ? 4 : 0;
ring              283 drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c 			  PACKET3_MAP_QUEUES_QUEUE(ring->queue) |
ring              284 drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c 			  PACKET3_MAP_QUEUES_PIPE(ring->pipe) |
ring              285 drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c 			  PACKET3_MAP_QUEUES_ME((ring->me == 1 ? 0 : 1)) |
ring              290 drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c 	amdgpu_ring_write(kiq_ring, PACKET3_MAP_QUEUES_DOORBELL_OFFSET(ring->doorbell_index));
ring              298 drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c 				   struct amdgpu_ring *ring,
ring              302 drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c 	uint32_t eng_sel = ring->funcs->type == AMDGPU_RING_TYPE_GFX ? 4 : 0;
ring              311 drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c 		  PACKET3_UNMAP_QUEUES_DOORBELL_OFFSET0(ring->doorbell_index));
ring              325 drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c 				   struct amdgpu_ring *ring,
ring              329 drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c 	uint32_t eng_sel = ring->funcs->type == AMDGPU_RING_TYPE_GFX ? 4 : 0;
ring              337 drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c 			  PACKET3_QUERY_STATUS_DOORBELL_OFFSET(ring->doorbell_index) |
ring              400 drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c static void gfx_v10_0_write_data_to_reg(struct amdgpu_ring *ring, int eng_sel,
ring              403 drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c 	amdgpu_ring_write(ring, PACKET3(PACKET3_WRITE_DATA, 3));
ring              404 drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c 	amdgpu_ring_write(ring, WRITE_DATA_ENGINE_SEL(eng_sel) |
ring              406 drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c 	amdgpu_ring_write(ring, reg);
ring              407 drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c 	amdgpu_ring_write(ring, 0);
ring              408 drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c 	amdgpu_ring_write(ring, val);
ring              411 drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c static void gfx_v10_0_wait_reg_mem(struct amdgpu_ring *ring, int eng_sel,
ring              416 drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c 	amdgpu_ring_write(ring, PACKET3(PACKET3_WAIT_REG_MEM, 5));
ring              417 drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c 	amdgpu_ring_write(ring,
ring              426 drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c 	amdgpu_ring_write(ring, addr0);
ring              427 drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c 	amdgpu_ring_write(ring, addr1);
ring              428 drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c 	amdgpu_ring_write(ring, ref);
ring              429 drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c 	amdgpu_ring_write(ring, mask);
ring              430 drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c 	amdgpu_ring_write(ring, inv); /* poll interval */
ring              433 drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c static int gfx_v10_0_ring_test_ring(struct amdgpu_ring *ring)
ring              435 drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c 	struct amdgpu_device *adev = ring->adev;
ring              449 drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c 	r = amdgpu_ring_alloc(ring, 3);
ring              452 drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c 			  ring->idx, r);
ring              457 drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c 	amdgpu_ring_write(ring, PACKET3(PACKET3_SET_UCONFIG_REG, 1));
ring              458 drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c 	amdgpu_ring_write(ring, (scratch - PACKET3_SET_UCONFIG_REG_START));
ring              459 drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c 	amdgpu_ring_write(ring, 0xDEADBEEF);
ring              460 drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c 	amdgpu_ring_commit(ring);
ring              474 drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c 				 ring->idx, i);
ring              477 drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c 				 ring->idx, i);
ring              480 drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c 			  ring->idx, scratch, tmp);
ring              488 drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c static int gfx_v10_0_ring_test_ib(struct amdgpu_ring *ring, long timeout)
ring              490 drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c 	struct amdgpu_device *adev = ring->adev;
ring              517 drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c 	r = amdgpu_ib_schedule(ring, 1, &ib, NULL, &f);
ring              533 drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c 		DRM_INFO("ib test on ring %d succeeded\n", ring->idx);
ring             1243 drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c 	struct amdgpu_ring *ring;
ring             1246 drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c 	ring = &adev->gfx.gfx_ring[ring_id];
ring             1248 drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c 	ring->me = me;
ring             1249 drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c 	ring->pipe = pipe;
ring             1250 drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c 	ring->queue = queue;
ring             1252 drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c 	ring->ring_obj = NULL;
ring             1253 drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c 	ring->use_doorbell = true;
ring             1256 drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c 		ring->doorbell_index = adev->doorbell_index.gfx_ring0 << 1;
ring             1258 drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c 		ring->doorbell_index = adev->doorbell_index.gfx_ring1 << 1;
ring             1259 drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c 	sprintf(ring->name, "gfx_%d.%d.%d", ring->me, ring->pipe, ring->queue);
ring             1261 drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c 	irq_type = AMDGPU_CP_IRQ_GFX_ME0_PIPE0_EOP + ring->pipe;
ring             1262 drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c 	r = amdgpu_ring_init(adev, ring, 1024,
ring             1274 drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c 	struct amdgpu_ring *ring = &adev->gfx.compute_ring[ring_id];
ring             1276 drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c 	ring = &adev->gfx.compute_ring[ring_id];
ring             1279 drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c 	ring->me = mec + 1;
ring             1280 drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c 	ring->pipe = pipe;
ring             1281 drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c 	ring->queue = queue;
ring             1283 drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c 	ring->ring_obj = NULL;
ring             1284 drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c 	ring->use_doorbell = true;
ring             1285 drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c 	ring->doorbell_index = (adev->doorbell_index.mec_ring0 + ring_id) << 1;
ring             1286 drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c 	ring->eop_gpu_addr = adev->gfx.mec.hpd_eop_gpu_addr
ring             1288 drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c 	sprintf(ring->name, "comp_%d.%d.%d", ring->me, ring->pipe, ring->queue);
ring             1291 drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c 		+ ((ring->me - 1) * adev->gfx.mec.num_pipe_per_mec)
ring             1292 drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c 		+ ring->pipe;
ring             1295 drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c 	r = amdgpu_ring_init(adev, ring, 1024,
ring             1418 drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c 	r = amdgpu_gfx_kiq_init_ring(adev, &kiq->ring, &kiq->irq);
ring             1472 drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c 	amdgpu_gfx_kiq_free_ring(&adev->gfx.kiq.ring, &adev->gfx.kiq.irq);
ring             2700 drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c 	struct amdgpu_ring *ring;
ring             2713 drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c 	ring = &adev->gfx.gfx_ring[0];
ring             2714 drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c 	r = amdgpu_ring_alloc(ring, gfx_v10_0_get_csb_size(adev) + 4);
ring             2720 drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c 	amdgpu_ring_write(ring, PACKET3(PACKET3_PREAMBLE_CNTL, 0));
ring             2721 drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c 	amdgpu_ring_write(ring, PACKET3_PREAMBLE_BEGIN_CLEAR_STATE);
ring             2723 drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c 	amdgpu_ring_write(ring, PACKET3(PACKET3_CONTEXT_CONTROL, 1));
ring             2724 drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c 	amdgpu_ring_write(ring, 0x80000000);
ring             2725 drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c 	amdgpu_ring_write(ring, 0x80000000);
ring             2730 drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c 				amdgpu_ring_write(ring,
ring             2733 drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c 				amdgpu_ring_write(ring, ext->reg_index -
ring             2736 drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c 					amdgpu_ring_write(ring, ext->extent[i]);
ring             2743 drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c 	amdgpu_ring_write(ring, PACKET3(PACKET3_SET_CONTEXT_REG, 1));
ring             2744 drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c 	amdgpu_ring_write(ring, ctx_reg_offset);
ring             2745 drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c 	amdgpu_ring_write(ring, adev->gfx.config.pa_sc_tile_steering_override);
ring             2747 drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c 	amdgpu_ring_write(ring, PACKET3(PACKET3_PREAMBLE_CNTL, 0));
ring             2748 drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c 	amdgpu_ring_write(ring, PACKET3_PREAMBLE_END_CLEAR_STATE);
ring             2750 drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c 	amdgpu_ring_write(ring, PACKET3(PACKET3_CLEAR_STATE, 0));
ring             2751 drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c 	amdgpu_ring_write(ring, 0);
ring             2753 drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c 	amdgpu_ring_write(ring, PACKET3(PACKET3_SET_BASE, 2));
ring             2754 drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c 	amdgpu_ring_write(ring, PACKET3_BASE_INDEX(CE_PARTITION_BASE));
ring             2755 drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c 	amdgpu_ring_write(ring, 0x8000);
ring             2756 drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c 	amdgpu_ring_write(ring, 0x8000);
ring             2758 drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c 	amdgpu_ring_commit(ring);
ring             2761 drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c 	ring = &adev->gfx.gfx_ring[1];
ring             2762 drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c 	r = amdgpu_ring_alloc(ring, 2);
ring             2768 drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c 	amdgpu_ring_write(ring, PACKET3(PACKET3_CLEAR_STATE, 0));
ring             2769 drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c 	amdgpu_ring_write(ring, 0);
ring             2771 drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c 	amdgpu_ring_commit(ring);
ring             2788 drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c 					  struct amdgpu_ring *ring)
ring             2793 drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c 	if (ring->use_doorbell) {
ring             2795 drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c 				    DOORBELL_OFFSET, ring->doorbell_index);
ring             2804 drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c 			    DOORBELL_RANGE_LOWER, ring->doorbell_index);
ring             2813 drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c 	struct amdgpu_ring *ring;
ring             2830 drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c 	ring = &adev->gfx.gfx_ring[0];
ring             2831 drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c 	rb_bufsz = order_base_2(ring->ring_size / 8);
ring             2840 drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c 	ring->wptr = 0;
ring             2841 drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c 	WREG32_SOC15(GC, 0, mmCP_RB0_WPTR, lower_32_bits(ring->wptr));
ring             2842 drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c 	WREG32_SOC15(GC, 0, mmCP_RB0_WPTR_HI, upper_32_bits(ring->wptr));
ring             2845 drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c 	rptr_addr = adev->wb.gpu_addr + (ring->rptr_offs * 4);
ring             2850 drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c 	wptr_gpu_addr = adev->wb.gpu_addr + (ring->wptr_offs * 4);
ring             2859 drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c 	rb_addr = ring->gpu_addr >> 8;
ring             2865 drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c 	gfx_v10_0_cp_gfx_set_doorbell(adev, ring);
ring             2871 drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c 	ring = &adev->gfx.gfx_ring[1];
ring             2872 drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c 	rb_bufsz = order_base_2(ring->ring_size / 8);
ring             2877 drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c 	ring->wptr = 0;
ring             2878 drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c 	WREG32_SOC15(GC, 0, mmCP_RB1_WPTR, lower_32_bits(ring->wptr));
ring             2879 drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c 	WREG32_SOC15(GC, 0, mmCP_RB1_WPTR_HI, upper_32_bits(ring->wptr));
ring             2881 drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c 	rptr_addr = adev->wb.gpu_addr + (ring->rptr_offs * 4);
ring             2885 drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c 	wptr_gpu_addr = adev->wb.gpu_addr + (ring->wptr_offs * 4);
ring             2894 drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c 	rb_addr = ring->gpu_addr >> 8;
ring             2899 drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c 	gfx_v10_0_cp_gfx_set_doorbell(adev, ring);
ring             2910 drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c 		ring = &adev->gfx.gfx_ring[i];
ring             2911 drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c 		ring->sched.ready = true;
ring             2929 drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c 		adev->gfx.kiq.ring.sched.ready = false;
ring             3004 drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c static void gfx_v10_0_kiq_setting(struct amdgpu_ring *ring)
ring             3007 drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c 	struct amdgpu_device *adev = ring->adev;
ring             3012 drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c 	tmp |= (ring->me << 5) | (ring->pipe << 3) | (ring->queue);
ring             3018 drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c static int gfx_v10_0_gfx_mqd_init(struct amdgpu_ring *ring)
ring             3020 drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c 	struct amdgpu_device *adev = ring->adev;
ring             3021 drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c 	struct v10_gfx_mqd *mqd = ring->mqd_ptr;
ring             3031 drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c 	mqd->cp_mqd_base_addr = ring->mqd_gpu_addr & 0xfffffffc;
ring             3032 drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c 	mqd->cp_mqd_base_addr_hi = upper_32_bits(ring->mqd_gpu_addr);
ring             3058 drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c 	hqd_gpu_addr = ring->gpu_addr >> 8;
ring             3063 drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c 	wb_gpu_addr = adev->wb.gpu_addr + (ring->rptr_offs * 4);
ring             3069 drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c 	wb_gpu_addr = adev->wb.gpu_addr + (ring->wptr_offs * 4);
ring             3074 drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c 	rb_bufsz = order_base_2(ring->ring_size / 4) - 1;
ring             3085 drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c 	if (ring->use_doorbell) {
ring             3087 drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c 				    DOORBELL_OFFSET, ring->doorbell_index);
ring             3096 drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c 	ring->wptr = 0;
ring             3106 drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c static int gfx_v10_0_gfx_queue_init_register(struct amdgpu_ring *ring)
ring             3108 drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c 	struct amdgpu_device *adev = ring->adev;
ring             3109 drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c 	struct v10_gfx_mqd *mqd = ring->mqd_ptr;
ring             3154 drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c static int gfx_v10_0_gfx_init_queue(struct amdgpu_ring *ring)
ring             3156 drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c 	struct amdgpu_device *adev = ring->adev;
ring             3157 drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c 	struct v10_gfx_mqd *mqd = ring->mqd_ptr;
ring             3162 drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c 		nv_grbm_select(adev, ring->me, ring->pipe, ring->queue, 0);
ring             3163 drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c 		gfx_v10_0_gfx_mqd_init(ring);
ring             3165 drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c 		gfx_v10_0_gfx_queue_init_register(ring);
ring             3176 drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c 		ring->wptr = 0;
ring             3177 drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c 		amdgpu_ring_clear_ring(ring);
ring             3180 drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c 		nv_grbm_select(adev, ring->me, ring->pipe, ring->queue, 0);
ring             3181 drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c 		gfx_v10_0_gfx_queue_init_register(ring);
ring             3186 drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c 		amdgpu_ring_clear_ring(ring);
ring             3196 drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c 	struct amdgpu_ring *kiq_ring = &adev->gfx.kiq.ring;
ring             3224 drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c 	struct amdgpu_ring *ring;
ring             3227 drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c 		ring = &adev->gfx.gfx_ring[i];
ring             3229 drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c 		r = amdgpu_bo_reserve(ring->mqd_obj, false);
ring             3233 drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c 		r = amdgpu_bo_kmap(ring->mqd_obj, (void **)&ring->mqd_ptr);
ring             3235 drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c 			r = gfx_v10_0_gfx_init_queue(ring);
ring             3236 drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c 			amdgpu_bo_kunmap(ring->mqd_obj);
ring             3237 drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c 			ring->mqd_ptr = NULL;
ring             3239 drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c 		amdgpu_bo_unreserve(ring->mqd_obj);
ring             3253 drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c 		ring = &adev->gfx.gfx_ring[i];
ring             3254 drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c 		ring->sched.ready = true;
ring             3260 drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c static int gfx_v10_0_compute_mqd_init(struct amdgpu_ring *ring)
ring             3262 drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c 	struct amdgpu_device *adev = ring->adev;
ring             3263 drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c 	struct v10_compute_mqd *mqd = ring->mqd_ptr;
ring             3275 drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c 	eop_base_addr = ring->eop_gpu_addr >> 8;
ring             3289 drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c 	if (ring->use_doorbell) {
ring             3291 drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c 				    DOORBELL_OFFSET, ring->doorbell_index);
ring             3306 drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c 	ring->wptr = 0;
ring             3313 drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c 	mqd->cp_mqd_base_addr_lo = ring->mqd_gpu_addr & 0xfffffffc;
ring             3314 drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c 	mqd->cp_mqd_base_addr_hi = upper_32_bits(ring->mqd_gpu_addr);
ring             3322 drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c 	hqd_gpu_addr = ring->gpu_addr >> 8;
ring             3329 drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c 			    (order_base_2(ring->ring_size / 4) - 1));
ring             3342 drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c 	wb_gpu_addr = adev->wb.gpu_addr + (ring->rptr_offs * 4);
ring             3348 drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c 	wb_gpu_addr = adev->wb.gpu_addr + (ring->wptr_offs * 4);
ring             3354 drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c 	if (ring->use_doorbell) {
ring             3357 drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c 				DOORBELL_OFFSET, ring->doorbell_index);
ring             3370 drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c 	ring->wptr = 0;
ring             3391 drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c static int gfx_v10_0_kiq_init_register(struct amdgpu_ring *ring)
ring             3393 drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c 	struct amdgpu_device *adev = ring->adev;
ring             3394 drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c 	struct v10_compute_mqd *mqd = ring->mqd_ptr;
ring             3465 drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c 	if (ring->use_doorbell) {
ring             3491 drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c 	if (ring->use_doorbell)
ring             3497 drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c static int gfx_v10_0_kiq_init_queue(struct amdgpu_ring *ring)
ring             3499 drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c 	struct amdgpu_device *adev = ring->adev;
ring             3500 drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c 	struct v10_compute_mqd *mqd = ring->mqd_ptr;
ring             3503 drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c 	gfx_v10_0_kiq_setting(ring);
ring             3511 drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c 		ring->wptr = 0;
ring             3512 drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c 		amdgpu_ring_clear_ring(ring);
ring             3515 drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c 		nv_grbm_select(adev, ring->me, ring->pipe, ring->queue, 0);
ring             3516 drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c 		gfx_v10_0_kiq_init_register(ring);
ring             3522 drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c 		nv_grbm_select(adev, ring->me, ring->pipe, ring->queue, 0);
ring             3523 drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c 		gfx_v10_0_compute_mqd_init(ring);
ring             3524 drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c 		gfx_v10_0_kiq_init_register(ring);
ring             3535 drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c static int gfx_v10_0_kcq_init_queue(struct amdgpu_ring *ring)
ring             3537 drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c 	struct amdgpu_device *adev = ring->adev;
ring             3538 drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c 	struct v10_compute_mqd *mqd = ring->mqd_ptr;
ring             3539 drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c 	int mqd_idx = ring - &adev->gfx.compute_ring[0];
ring             3544 drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c 		nv_grbm_select(adev, ring->me, ring->pipe, ring->queue, 0);
ring             3545 drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c 		gfx_v10_0_compute_mqd_init(ring);
ring             3557 drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c 		ring->wptr = 0;
ring             3558 drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c 		atomic64_set((atomic64_t *)&adev->wb.wb[ring->wptr_offs], 0);
ring             3559 drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c 		amdgpu_ring_clear_ring(ring);
ring             3561 drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c 		amdgpu_ring_clear_ring(ring);
ring             3569 drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c 	struct amdgpu_ring *ring;
ring             3572 drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c 	ring = &adev->gfx.kiq.ring;
ring             3574 drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c 	r = amdgpu_bo_reserve(ring->mqd_obj, false);
ring             3578 drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c 	r = amdgpu_bo_kmap(ring->mqd_obj, (void **)&ring->mqd_ptr);
ring             3582 drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c 	gfx_v10_0_kiq_init_queue(ring);
ring             3583 drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c 	amdgpu_bo_kunmap(ring->mqd_obj);
ring             3584 drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c 	ring->mqd_ptr = NULL;
ring             3585 drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c 	amdgpu_bo_unreserve(ring->mqd_obj);
ring             3586 drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c 	ring->sched.ready = true;
ring             3592 drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c 	struct amdgpu_ring *ring = NULL;
ring             3598 drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c 		ring = &adev->gfx.compute_ring[i];
ring             3600 drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c 		r = amdgpu_bo_reserve(ring->mqd_obj, false);
ring             3603 drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c 		r = amdgpu_bo_kmap(ring->mqd_obj, (void **)&ring->mqd_ptr);
ring             3605 drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c 			r = gfx_v10_0_kcq_init_queue(ring);
ring             3606 drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c 			amdgpu_bo_kunmap(ring->mqd_obj);
ring             3607 drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c 			ring->mqd_ptr = NULL;
ring             3609 drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c 		amdgpu_bo_unreserve(ring->mqd_obj);
ring             3622 drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c 	struct amdgpu_ring *ring;
ring             3657 drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c 		ring = &adev->gfx.gfx_ring[i];
ring             3659 drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c 			 i, ring->me, ring->pipe, ring->queue);
ring             3660 drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c 		r = amdgpu_ring_test_ring(ring);
ring             3662 drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c 			ring->sched.ready = false;
ring             3668 drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c 		ring = &adev->gfx.compute_ring[i];
ring             3669 drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c 		ring->sched.ready = true;
ring             3671 drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c 			 i, ring->me, ring->pipe, ring->queue);
ring             3672 drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c 		r = amdgpu_ring_test_ring(ring);
ring             3674 drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c 			ring->sched.ready = false;
ring             3828 drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c 	struct amdgpu_ring *kiq_ring = &kiq->ring;
ring             3991 drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c static void gfx_v10_0_ring_emit_gds_switch(struct amdgpu_ring *ring,
ring             3997 drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c 	struct amdgpu_device *adev = ring->adev;
ring             4000 drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c 	gfx_v10_0_write_data_to_reg(ring, 0, false,
ring             4005 drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c 	gfx_v10_0_write_data_to_reg(ring, 0, false,
ring             4010 drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c 	gfx_v10_0_write_data_to_reg(ring, 0, false,
ring             4015 drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c 	gfx_v10_0_write_data_to_reg(ring, 0, false,
ring             4358 drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c static u64 gfx_v10_0_ring_get_rptr_gfx(struct amdgpu_ring *ring)
ring             4360 drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c 	return ring->adev->wb.wb[ring->rptr_offs]; /* gfx10 is 32bit rptr*/
ring             4363 drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c static u64 gfx_v10_0_ring_get_wptr_gfx(struct amdgpu_ring *ring)
ring             4365 drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c 	struct amdgpu_device *adev = ring->adev;
ring             4369 drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c 	if (ring->use_doorbell) {
ring             4370 drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c 		wptr = atomic64_read((atomic64_t *)&adev->wb.wb[ring->wptr_offs]);
ring             4379 drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c static void gfx_v10_0_ring_set_wptr_gfx(struct amdgpu_ring *ring)
ring             4381 drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c 	struct amdgpu_device *adev = ring->adev;
ring             4383 drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c 	if (ring->use_doorbell) {
ring             4385 drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c 		atomic64_set((atomic64_t *)&adev->wb.wb[ring->wptr_offs], ring->wptr);
ring             4386 drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c 		WDOORBELL64(ring->doorbell_index, ring->wptr);
ring             4388 drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c 		WREG32_SOC15(GC, 0, mmCP_RB0_WPTR, lower_32_bits(ring->wptr));
ring             4389 drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c 		WREG32_SOC15(GC, 0, mmCP_RB0_WPTR_HI, upper_32_bits(ring->wptr));
ring             4393 drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c static u64 gfx_v10_0_ring_get_rptr_compute(struct amdgpu_ring *ring)
ring             4395 drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c 	return ring->adev->wb.wb[ring->rptr_offs]; /* gfx10 hardware is 32bit rptr */
ring             4398 drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c static u64 gfx_v10_0_ring_get_wptr_compute(struct amdgpu_ring *ring)
ring             4403 drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c 	if (ring->use_doorbell)
ring             4404 drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c 		wptr = atomic64_read((atomic64_t *)&ring->adev->wb.wb[ring->wptr_offs]);
ring             4410 drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c static void gfx_v10_0_ring_set_wptr_compute(struct amdgpu_ring *ring)
ring             4412 drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c 	struct amdgpu_device *adev = ring->adev;
ring             4415 drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c 	if (ring->use_doorbell) {
ring             4416 drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c 		atomic64_set((atomic64_t *)&adev->wb.wb[ring->wptr_offs], ring->wptr);
ring             4417 drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c 		WDOORBELL64(ring->doorbell_index, ring->wptr);
ring             4423 drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c static void gfx_v10_0_ring_emit_hdp_flush(struct amdgpu_ring *ring)
ring             4425 drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c 	struct amdgpu_device *adev = ring->adev;
ring             4429 drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c 	if (ring->funcs->type == AMDGPU_RING_TYPE_COMPUTE) {
ring             4430 drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c 		switch (ring->me) {
ring             4432 drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c 			ref_and_mask = nbio_hf_reg->ref_and_mask_cp2 << ring->pipe;
ring             4435 drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c 			ref_and_mask = nbio_hf_reg->ref_and_mask_cp6 << ring->pipe;
ring             4446 drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c 	gfx_v10_0_wait_reg_mem(ring, reg_mem_engine, 0, 1,
ring             4452 drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c static void gfx_v10_0_ring_emit_ib_gfx(struct amdgpu_ring *ring,
ring             4474 drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c 			gfx_v10_0_ring_emit_de_meta(ring,
ring             4478 drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c 	amdgpu_ring_write(ring, header);
ring             4480 drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c 	amdgpu_ring_write(ring,
ring             4485 drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c 	amdgpu_ring_write(ring, upper_32_bits(ib->gpu_addr));
ring             4486 drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c 	amdgpu_ring_write(ring, control);
ring             4489 drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c static void gfx_v10_0_ring_emit_ib_compute(struct amdgpu_ring *ring,
ring             4508 drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c 		amdgpu_ring_write(ring, PACKET3(PACKET3_SET_CONFIG_REG, 1));
ring             4509 drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c 		amdgpu_ring_write(ring, mmGDS_COMPUTE_MAX_WAVE_ID);
ring             4510 drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c 		amdgpu_ring_write(ring, ring->adev->gds.gds_compute_max_wave_id);
ring             4513 drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c 	amdgpu_ring_write(ring, PACKET3(PACKET3_INDIRECT_BUFFER, 2));
ring             4515 drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c 	amdgpu_ring_write(ring,
ring             4520 drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c 	amdgpu_ring_write(ring, upper_32_bits(ib->gpu_addr));
ring             4521 drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c 	amdgpu_ring_write(ring, control);
ring             4524 drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c static void gfx_v10_0_ring_emit_fence(struct amdgpu_ring *ring, u64 addr,
ring             4527 drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c 	struct amdgpu_device *adev = ring->adev;
ring             4536 drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c 	amdgpu_ring_write(ring, PACKET3(PACKET3_RELEASE_MEM, 6));
ring             4537 drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c 	amdgpu_ring_write(ring, (PACKET3_RELEASE_MEM_GCR_SEQ |
ring             4544 drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c 	amdgpu_ring_write(ring, (PACKET3_RELEASE_MEM_DATA_SEL(write64bit ? 2 : 1) |
ring             4555 drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c 	amdgpu_ring_write(ring, lower_32_bits(addr));
ring             4556 drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c 	amdgpu_ring_write(ring, upper_32_bits(addr));
ring             4557 drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c 	amdgpu_ring_write(ring, lower_32_bits(seq));
ring             4558 drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c 	amdgpu_ring_write(ring, upper_32_bits(seq));
ring             4559 drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c 	amdgpu_ring_write(ring, 0);
ring             4562 drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c static void gfx_v10_0_ring_emit_pipeline_sync(struct amdgpu_ring *ring)
ring             4564 drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c 	int usepfp = (ring->funcs->type == AMDGPU_RING_TYPE_GFX);
ring             4565 drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c 	uint32_t seq = ring->fence_drv.sync_seq;
ring             4566 drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c 	uint64_t addr = ring->fence_drv.gpu_addr;
ring             4568 drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c 	gfx_v10_0_wait_reg_mem(ring, usepfp, 1, 0, lower_32_bits(addr),
ring             4572 drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c static void gfx_v10_0_ring_emit_vm_flush(struct amdgpu_ring *ring,
ring             4575 drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c 	amdgpu_gmc_emit_flush_gpu_tlb(ring, vmid, pd_addr);
ring             4578 drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c 	if (ring->funcs->type == AMDGPU_RING_TYPE_GFX) {
ring             4580 drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c 		amdgpu_ring_write(ring, PACKET3(PACKET3_PFP_SYNC_ME, 0));
ring             4581 drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c 		amdgpu_ring_write(ring, 0x0);
ring             4585 drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c static void gfx_v10_0_ring_emit_fence_kiq(struct amdgpu_ring *ring, u64 addr,
ring             4588 drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c 	struct amdgpu_device *adev = ring->adev;
ring             4594 drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c 	amdgpu_ring_write(ring, PACKET3(PACKET3_WRITE_DATA, 3));
ring             4595 drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c 	amdgpu_ring_write(ring, (WRITE_DATA_ENGINE_SEL(0) |
ring             4597 drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c 	amdgpu_ring_write(ring, lower_32_bits(addr));
ring             4598 drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c 	amdgpu_ring_write(ring, upper_32_bits(addr));
ring             4599 drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c 	amdgpu_ring_write(ring, lower_32_bits(seq));
ring             4603 drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c 		amdgpu_ring_write(ring, PACKET3(PACKET3_WRITE_DATA, 3));
ring             4604 drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c 		amdgpu_ring_write(ring, (WRITE_DATA_ENGINE_SEL(0) |
ring             4606 drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c 		amdgpu_ring_write(ring, SOC15_REG_OFFSET(GC, 0, mmCPC_INT_STATUS));
ring             4607 drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c 		amdgpu_ring_write(ring, 0);
ring             4608 drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c 		amdgpu_ring_write(ring, 0x20000000); /* src_id is 178 */
ring             4612 drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c static void gfx_v10_0_ring_emit_sb(struct amdgpu_ring *ring)
ring             4614 drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c 	amdgpu_ring_write(ring, PACKET3(PACKET3_SWITCH_BUFFER, 0));
ring             4615 drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c 	amdgpu_ring_write(ring, 0);
ring             4618 drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c static void gfx_v10_0_ring_emit_cntxcntl(struct amdgpu_ring *ring, uint32_t flags)
ring             4623 drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c 		gfx_v10_0_ring_emit_ce_meta(ring,
ring             4626 drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c 	gfx_v10_0_ring_emit_tmz(ring, true);
ring             4648 drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c 	amdgpu_ring_write(ring, PACKET3(PACKET3_CONTEXT_CONTROL, 1));
ring             4649 drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c 	amdgpu_ring_write(ring, dw2);
ring             4650 drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c 	amdgpu_ring_write(ring, 0);
ring             4653 drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c static unsigned gfx_v10_0_ring_emit_init_cond_exec(struct amdgpu_ring *ring)
ring             4657 drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c 	amdgpu_ring_write(ring, PACKET3(PACKET3_COND_EXEC, 3));
ring             4658 drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c 	amdgpu_ring_write(ring, lower_32_bits(ring->cond_exe_gpu_addr));
ring             4659 drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c 	amdgpu_ring_write(ring, upper_32_bits(ring->cond_exe_gpu_addr));
ring             4660 drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c 	amdgpu_ring_write(ring, 0); /* discard following DWs if *cond_exec_gpu_addr==0 */
ring             4661 drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c 	ret = ring->wptr & ring->buf_mask;
ring             4662 drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c 	amdgpu_ring_write(ring, 0x55aa55aa); /* patch dummy value later */
ring             4667 drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c static void gfx_v10_0_ring_emit_patch_cond_exec(struct amdgpu_ring *ring, unsigned offset)
ring             4670 drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c 	BUG_ON(offset > ring->buf_mask);
ring             4671 drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c 	BUG_ON(ring->ring[offset] != 0x55aa55aa);
ring             4673 drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c 	cur = (ring->wptr - 1) & ring->buf_mask;
ring             4675 drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c 		ring->ring[offset] = cur - offset;
ring             4677 drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c 		ring->ring[offset] = (ring->buf_mask + 1) - offset + cur;
ring             4680 drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c static int gfx_v10_0_ring_preempt_ib(struct amdgpu_ring *ring)
ring             4683 drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c 	struct amdgpu_device *adev = ring->adev;
ring             4685 drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c 	struct amdgpu_ring *kiq_ring = &kiq->ring;
ring             4694 drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c 	amdgpu_ring_set_preempt_cond_exec(ring, false);
ring             4697 drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c 	kiq->pmf->kiq_unmap_queues(kiq_ring, ring, PREEMPT_QUEUES_NO_UNMAP,
ring             4698 drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c 				   ring->trail_fence_gpu_addr,
ring             4699 drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c 				   ++ring->trail_seq);
ring             4704 drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c 		if (ring->trail_seq ==
ring             4705 drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c 		    le32_to_cpu(*(ring->trail_fence_cpu_addr)))
ring             4712 drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c 		DRM_ERROR("ring %d failed to preempt ib\n", ring->idx);
ring             4716 drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c 	amdgpu_ring_set_preempt_cond_exec(ring, true);
ring             4720 drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c static void gfx_v10_0_ring_emit_ce_meta(struct amdgpu_ring *ring, bool resume)
ring             4722 drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c 	struct amdgpu_device *adev = ring->adev;
ring             4728 drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c 	csa_addr = amdgpu_csa_vaddr(ring->adev);
ring             4730 drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c 	amdgpu_ring_write(ring, PACKET3(PACKET3_WRITE_DATA, cnt));
ring             4731 drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c 	amdgpu_ring_write(ring, (WRITE_DATA_ENGINE_SEL(2) |
ring             4735 drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c 	amdgpu_ring_write(ring, lower_32_bits(csa_addr +
ring             4737 drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c 	amdgpu_ring_write(ring, upper_32_bits(csa_addr +
ring             4741 drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c 		amdgpu_ring_write_multiple(ring, adev->virt.csa_cpu_addr +
ring             4746 drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c 		amdgpu_ring_write_multiple(ring, (void *)&ce_payload,
ring             4750 drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c static void gfx_v10_0_ring_emit_de_meta(struct amdgpu_ring *ring, bool resume)
ring             4752 drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c 	struct amdgpu_device *adev = ring->adev;
ring             4757 drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c 	csa_addr = amdgpu_csa_vaddr(ring->adev);
ring             4764 drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c 	amdgpu_ring_write(ring, PACKET3(PACKET3_WRITE_DATA, cnt));
ring             4765 drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c 	amdgpu_ring_write(ring, (WRITE_DATA_ENGINE_SEL(1) |
ring             4769 drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c 	amdgpu_ring_write(ring, lower_32_bits(csa_addr +
ring             4771 drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c 	amdgpu_ring_write(ring, upper_32_bits(csa_addr +
ring             4775 drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c 		amdgpu_ring_write_multiple(ring, adev->virt.csa_cpu_addr +
ring             4780 drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c 		amdgpu_ring_write_multiple(ring, (void *)&de_payload,
ring             4784 drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c static void gfx_v10_0_ring_emit_tmz(struct amdgpu_ring *ring, bool start)
ring             4786 drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c 	amdgpu_ring_write(ring, PACKET3(PACKET3_FRAME_CONTROL, 0));
ring             4787 drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c 	amdgpu_ring_write(ring, FRAME_CMD(start ? 0 : 1)); /* frame_end */
ring             4790 drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c static void gfx_v10_0_ring_emit_rreg(struct amdgpu_ring *ring, uint32_t reg)
ring             4792 drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c 	struct amdgpu_device *adev = ring->adev;
ring             4794 drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c 	amdgpu_ring_write(ring, PACKET3(PACKET3_COPY_DATA, 4));
ring             4795 drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c 	amdgpu_ring_write(ring, 0 |	/* src: register*/
ring             4798 drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c 	amdgpu_ring_write(ring, reg);
ring             4799 drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c 	amdgpu_ring_write(ring, 0);
ring             4800 drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c 	amdgpu_ring_write(ring, lower_32_bits(adev->wb.gpu_addr +
ring             4802 drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c 	amdgpu_ring_write(ring, upper_32_bits(adev->wb.gpu_addr +
ring             4806 drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c static void gfx_v10_0_ring_emit_wreg(struct amdgpu_ring *ring, uint32_t reg,
ring             4811 drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c 	switch (ring->funcs->type) {
ring             4822 drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c 	amdgpu_ring_write(ring, PACKET3(PACKET3_WRITE_DATA, 3));
ring             4823 drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c 	amdgpu_ring_write(ring, cmd);
ring             4824 drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c 	amdgpu_ring_write(ring, reg);
ring             4825 drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c 	amdgpu_ring_write(ring, 0);
ring             4826 drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c 	amdgpu_ring_write(ring, val);
ring             4829 drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c static void gfx_v10_0_ring_emit_reg_wait(struct amdgpu_ring *ring, uint32_t reg,
ring             4832 drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c 	gfx_v10_0_wait_reg_mem(ring, 0, 0, 0, reg, 0, val, mask, 0x20);
ring             4835 drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c static void gfx_v10_0_ring_emit_reg_write_reg_wait(struct amdgpu_ring *ring,
ring             4839 drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c 	int usepfp = (ring->funcs->type == AMDGPU_RING_TYPE_GFX);
ring             4840 drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c 	struct amdgpu_device *adev = ring->adev;
ring             4846 drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c 		gfx_v10_0_wait_reg_mem(ring, usepfp, 0, 1, reg0, reg1,
ring             4849 drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c 		amdgpu_ring_emit_reg_write_reg_wait_helper(ring, reg0, reg1,
ring             4996 drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c 	struct amdgpu_ring *ring;
ring             5013 drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c 			ring = &adev->gfx.compute_ring[i];
ring             5017 drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c 			if ((ring->me == me_id) && (ring->pipe == pipe_id) && (ring->queue == queue_id))
ring             5018 drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c 				amdgpu_fence_process(ring);
ring             5066 drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c 	struct amdgpu_ring *ring;
ring             5076 drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c 			ring = &adev->gfx.gfx_ring[i];
ring             5078 drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c 			if (ring->me == me_id && ring->pipe == pipe_id)
ring             5079 drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c 				drm_sched_fault(&ring->sched);
ring             5085 drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c 			ring = &adev->gfx.compute_ring[i];
ring             5086 drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c 			if (ring->me == me_id && ring->pipe == pipe_id &&
ring             5087 drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c 			    ring->queue == queue_id)
ring             5088 drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c 				drm_sched_fault(&ring->sched);
ring             5120 drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c 	struct amdgpu_ring *ring = &(adev->gfx.kiq.ring);
ring             5122 drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c 	if (ring->me == 1)
ring             5126 drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c 	target += ring->pipe;
ring             5164 drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c 	struct amdgpu_ring *ring = &(adev->gfx.kiq.ring);
ring             5172 drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c 	amdgpu_fence_process(ring);
ring             5315 drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c 	adev->gfx.kiq.ring.funcs = &gfx_v10_0_ring_funcs_kiq;
ring             1790 drivers/gpu/drm/amd/amdgpu/gfx_v6_0.c static int gfx_v6_0_ring_test_ring(struct amdgpu_ring *ring)
ring             1792 drivers/gpu/drm/amd/amdgpu/gfx_v6_0.c 	struct amdgpu_device *adev = ring->adev;
ring             1804 drivers/gpu/drm/amd/amdgpu/gfx_v6_0.c 	r = amdgpu_ring_alloc(ring, 3);
ring             1808 drivers/gpu/drm/amd/amdgpu/gfx_v6_0.c 	amdgpu_ring_write(ring, PACKET3(PACKET3_SET_CONFIG_REG, 1));
ring             1809 drivers/gpu/drm/amd/amdgpu/gfx_v6_0.c 	amdgpu_ring_write(ring, (scratch - PACKET3_SET_CONFIG_REG_START));
ring             1810 drivers/gpu/drm/amd/amdgpu/gfx_v6_0.c 	amdgpu_ring_write(ring, 0xDEADBEEF);
ring             1811 drivers/gpu/drm/amd/amdgpu/gfx_v6_0.c 	amdgpu_ring_commit(ring);
ring             1828 drivers/gpu/drm/amd/amdgpu/gfx_v6_0.c static void gfx_v6_0_ring_emit_vgt_flush(struct amdgpu_ring *ring)
ring             1830 drivers/gpu/drm/amd/amdgpu/gfx_v6_0.c 	amdgpu_ring_write(ring, PACKET3(PACKET3_EVENT_WRITE, 0));
ring             1831 drivers/gpu/drm/amd/amdgpu/gfx_v6_0.c 	amdgpu_ring_write(ring, EVENT_TYPE(VGT_FLUSH) |
ring             1835 drivers/gpu/drm/amd/amdgpu/gfx_v6_0.c static void gfx_v6_0_ring_emit_fence(struct amdgpu_ring *ring, u64 addr,
ring             1841 drivers/gpu/drm/amd/amdgpu/gfx_v6_0.c 	amdgpu_ring_write(ring, PACKET3(PACKET3_SET_CONFIG_REG, 1));
ring             1842 drivers/gpu/drm/amd/amdgpu/gfx_v6_0.c 	amdgpu_ring_write(ring, (mmCP_COHER_CNTL2 - PACKET3_SET_CONFIG_REG_START));
ring             1843 drivers/gpu/drm/amd/amdgpu/gfx_v6_0.c 	amdgpu_ring_write(ring, 0);
ring             1844 drivers/gpu/drm/amd/amdgpu/gfx_v6_0.c 	amdgpu_ring_write(ring, PACKET3(PACKET3_SURFACE_SYNC, 3));
ring             1845 drivers/gpu/drm/amd/amdgpu/gfx_v6_0.c 	amdgpu_ring_write(ring, PACKET3_TCL1_ACTION_ENA |
ring             1849 drivers/gpu/drm/amd/amdgpu/gfx_v6_0.c 	amdgpu_ring_write(ring, 0xFFFFFFFF);
ring             1850 drivers/gpu/drm/amd/amdgpu/gfx_v6_0.c 	amdgpu_ring_write(ring, 0);
ring             1851 drivers/gpu/drm/amd/amdgpu/gfx_v6_0.c 	amdgpu_ring_write(ring, 10); /* poll interval */
ring             1853 drivers/gpu/drm/amd/amdgpu/gfx_v6_0.c 	amdgpu_ring_write(ring, PACKET3(PACKET3_EVENT_WRITE_EOP, 4));
ring             1854 drivers/gpu/drm/amd/amdgpu/gfx_v6_0.c 	amdgpu_ring_write(ring, EVENT_TYPE(CACHE_FLUSH_AND_INV_TS_EVENT) | EVENT_INDEX(5));
ring             1855 drivers/gpu/drm/amd/amdgpu/gfx_v6_0.c 	amdgpu_ring_write(ring, addr & 0xfffffffc);
ring             1856 drivers/gpu/drm/amd/amdgpu/gfx_v6_0.c 	amdgpu_ring_write(ring, (upper_32_bits(addr) & 0xffff) |
ring             1859 drivers/gpu/drm/amd/amdgpu/gfx_v6_0.c 	amdgpu_ring_write(ring, lower_32_bits(seq));
ring             1860 drivers/gpu/drm/amd/amdgpu/gfx_v6_0.c 	amdgpu_ring_write(ring, upper_32_bits(seq));
ring             1863 drivers/gpu/drm/amd/amdgpu/gfx_v6_0.c static void gfx_v6_0_ring_emit_ib(struct amdgpu_ring *ring,
ring             1873 drivers/gpu/drm/amd/amdgpu/gfx_v6_0.c 		amdgpu_ring_write(ring, PACKET3(PACKET3_SWITCH_BUFFER, 0));
ring             1874 drivers/gpu/drm/amd/amdgpu/gfx_v6_0.c 		amdgpu_ring_write(ring, 0);
ring             1884 drivers/gpu/drm/amd/amdgpu/gfx_v6_0.c 	amdgpu_ring_write(ring, header);
ring             1885 drivers/gpu/drm/amd/amdgpu/gfx_v6_0.c 	amdgpu_ring_write(ring,
ring             1890 drivers/gpu/drm/amd/amdgpu/gfx_v6_0.c 	amdgpu_ring_write(ring, upper_32_bits(ib->gpu_addr) & 0xFFFF);
ring             1891 drivers/gpu/drm/amd/amdgpu/gfx_v6_0.c 	amdgpu_ring_write(ring, control);
ring             1903 drivers/gpu/drm/amd/amdgpu/gfx_v6_0.c static int gfx_v6_0_ring_test_ib(struct amdgpu_ring *ring, long timeout)
ring             1905 drivers/gpu/drm/amd/amdgpu/gfx_v6_0.c 	struct amdgpu_device *adev = ring->adev;
ring             1927 drivers/gpu/drm/amd/amdgpu/gfx_v6_0.c 	r = amdgpu_ib_schedule(ring, 1, &ib, NULL, &f);
ring             2029 drivers/gpu/drm/amd/amdgpu/gfx_v6_0.c 	struct amdgpu_ring *ring = &adev->gfx.gfx_ring[0];
ring             2032 drivers/gpu/drm/amd/amdgpu/gfx_v6_0.c 	r = amdgpu_ring_alloc(ring, 7 + 4);
ring             2037 drivers/gpu/drm/amd/amdgpu/gfx_v6_0.c 	amdgpu_ring_write(ring, PACKET3(PACKET3_ME_INITIALIZE, 5));
ring             2038 drivers/gpu/drm/amd/amdgpu/gfx_v6_0.c 	amdgpu_ring_write(ring, 0x1);
ring             2039 drivers/gpu/drm/amd/amdgpu/gfx_v6_0.c 	amdgpu_ring_write(ring, 0x0);
ring             2040 drivers/gpu/drm/amd/amdgpu/gfx_v6_0.c 	amdgpu_ring_write(ring, adev->gfx.config.max_hw_contexts - 1);
ring             2041 drivers/gpu/drm/amd/amdgpu/gfx_v6_0.c 	amdgpu_ring_write(ring, PACKET3_ME_INITIALIZE_DEVICE_ID(1));
ring             2042 drivers/gpu/drm/amd/amdgpu/gfx_v6_0.c 	amdgpu_ring_write(ring, 0);
ring             2043 drivers/gpu/drm/amd/amdgpu/gfx_v6_0.c 	amdgpu_ring_write(ring, 0);
ring             2045 drivers/gpu/drm/amd/amdgpu/gfx_v6_0.c 	amdgpu_ring_write(ring, PACKET3(PACKET3_SET_BASE, 2));
ring             2046 drivers/gpu/drm/amd/amdgpu/gfx_v6_0.c 	amdgpu_ring_write(ring, PACKET3_BASE_INDEX(CE_PARTITION_BASE));
ring             2047 drivers/gpu/drm/amd/amdgpu/gfx_v6_0.c 	amdgpu_ring_write(ring, 0xc000);
ring             2048 drivers/gpu/drm/amd/amdgpu/gfx_v6_0.c 	amdgpu_ring_write(ring, 0xe000);
ring             2049 drivers/gpu/drm/amd/amdgpu/gfx_v6_0.c 	amdgpu_ring_commit(ring);
ring             2053 drivers/gpu/drm/amd/amdgpu/gfx_v6_0.c 	r = amdgpu_ring_alloc(ring, gfx_v6_0_get_csb_size(adev) + 10);
ring             2059 drivers/gpu/drm/amd/amdgpu/gfx_v6_0.c 	amdgpu_ring_write(ring, PACKET3(PACKET3_PREAMBLE_CNTL, 0));
ring             2060 drivers/gpu/drm/amd/amdgpu/gfx_v6_0.c 	amdgpu_ring_write(ring, PACKET3_PREAMBLE_BEGIN_CLEAR_STATE);
ring             2065 drivers/gpu/drm/amd/amdgpu/gfx_v6_0.c 				amdgpu_ring_write(ring,
ring             2067 drivers/gpu/drm/amd/amdgpu/gfx_v6_0.c 				amdgpu_ring_write(ring, ext->reg_index - PACKET3_SET_CONTEXT_REG_START);
ring             2069 drivers/gpu/drm/amd/amdgpu/gfx_v6_0.c 					amdgpu_ring_write(ring, ext->extent[i]);
ring             2074 drivers/gpu/drm/amd/amdgpu/gfx_v6_0.c 	amdgpu_ring_write(ring, PACKET3(PACKET3_PREAMBLE_CNTL, 0));
ring             2075 drivers/gpu/drm/amd/amdgpu/gfx_v6_0.c 	amdgpu_ring_write(ring, PACKET3_PREAMBLE_END_CLEAR_STATE);
ring             2077 drivers/gpu/drm/amd/amdgpu/gfx_v6_0.c 	amdgpu_ring_write(ring, PACKET3(PACKET3_CLEAR_STATE, 0));
ring             2078 drivers/gpu/drm/amd/amdgpu/gfx_v6_0.c 	amdgpu_ring_write(ring, 0);
ring             2080 drivers/gpu/drm/amd/amdgpu/gfx_v6_0.c 	amdgpu_ring_write(ring, PACKET3(PACKET3_SET_CONTEXT_REG, 2));
ring             2081 drivers/gpu/drm/amd/amdgpu/gfx_v6_0.c 	amdgpu_ring_write(ring, 0x00000316);
ring             2082 drivers/gpu/drm/amd/amdgpu/gfx_v6_0.c 	amdgpu_ring_write(ring, 0x0000000e);
ring             2083 drivers/gpu/drm/amd/amdgpu/gfx_v6_0.c 	amdgpu_ring_write(ring, 0x00000010);
ring             2085 drivers/gpu/drm/amd/amdgpu/gfx_v6_0.c 	amdgpu_ring_commit(ring);
ring             2092 drivers/gpu/drm/amd/amdgpu/gfx_v6_0.c 	struct amdgpu_ring *ring;
ring             2109 drivers/gpu/drm/amd/amdgpu/gfx_v6_0.c 	ring = &adev->gfx.gfx_ring[0];
ring             2110 drivers/gpu/drm/amd/amdgpu/gfx_v6_0.c 	rb_bufsz = order_base_2(ring->ring_size / 8);
ring             2120 drivers/gpu/drm/amd/amdgpu/gfx_v6_0.c 	ring->wptr = 0;
ring             2121 drivers/gpu/drm/amd/amdgpu/gfx_v6_0.c 	WREG32(mmCP_RB0_WPTR, ring->wptr);
ring             2124 drivers/gpu/drm/amd/amdgpu/gfx_v6_0.c 	rptr_addr = adev->wb.gpu_addr + (ring->rptr_offs * 4);
ring             2133 drivers/gpu/drm/amd/amdgpu/gfx_v6_0.c 	WREG32(mmCP_RB0_BASE, ring->gpu_addr >> 8);
ring             2137 drivers/gpu/drm/amd/amdgpu/gfx_v6_0.c 	r = amdgpu_ring_test_helper(ring);
ring             2144 drivers/gpu/drm/amd/amdgpu/gfx_v6_0.c static u64 gfx_v6_0_ring_get_rptr(struct amdgpu_ring *ring)
ring             2146 drivers/gpu/drm/amd/amdgpu/gfx_v6_0.c 	return ring->adev->wb.wb[ring->rptr_offs];
ring             2149 drivers/gpu/drm/amd/amdgpu/gfx_v6_0.c static u64 gfx_v6_0_ring_get_wptr(struct amdgpu_ring *ring)
ring             2151 drivers/gpu/drm/amd/amdgpu/gfx_v6_0.c 	struct amdgpu_device *adev = ring->adev;
ring             2153 drivers/gpu/drm/amd/amdgpu/gfx_v6_0.c 	if (ring == &adev->gfx.gfx_ring[0])
ring             2155 drivers/gpu/drm/amd/amdgpu/gfx_v6_0.c 	else if (ring == &adev->gfx.compute_ring[0])
ring             2157 drivers/gpu/drm/amd/amdgpu/gfx_v6_0.c 	else if (ring == &adev->gfx.compute_ring[1])
ring             2163 drivers/gpu/drm/amd/amdgpu/gfx_v6_0.c static void gfx_v6_0_ring_set_wptr_gfx(struct amdgpu_ring *ring)
ring             2165 drivers/gpu/drm/amd/amdgpu/gfx_v6_0.c 	struct amdgpu_device *adev = ring->adev;
ring             2167 drivers/gpu/drm/amd/amdgpu/gfx_v6_0.c 	WREG32(mmCP_RB0_WPTR, lower_32_bits(ring->wptr));
ring             2171 drivers/gpu/drm/amd/amdgpu/gfx_v6_0.c static void gfx_v6_0_ring_set_wptr_compute(struct amdgpu_ring *ring)
ring             2173 drivers/gpu/drm/amd/amdgpu/gfx_v6_0.c 	struct amdgpu_device *adev = ring->adev;
ring             2175 drivers/gpu/drm/amd/amdgpu/gfx_v6_0.c 	if (ring == &adev->gfx.compute_ring[0]) {
ring             2176 drivers/gpu/drm/amd/amdgpu/gfx_v6_0.c 		WREG32(mmCP_RB1_WPTR, lower_32_bits(ring->wptr));
ring             2178 drivers/gpu/drm/amd/amdgpu/gfx_v6_0.c 	} else if (ring == &adev->gfx.compute_ring[1]) {
ring             2179 drivers/gpu/drm/amd/amdgpu/gfx_v6_0.c 		WREG32(mmCP_RB2_WPTR, lower_32_bits(ring->wptr));
ring             2189 drivers/gpu/drm/amd/amdgpu/gfx_v6_0.c 	struct amdgpu_ring *ring;
ring             2198 drivers/gpu/drm/amd/amdgpu/gfx_v6_0.c 	ring = &adev->gfx.compute_ring[0];
ring             2199 drivers/gpu/drm/amd/amdgpu/gfx_v6_0.c 	rb_bufsz = order_base_2(ring->ring_size / 8);
ring             2207 drivers/gpu/drm/amd/amdgpu/gfx_v6_0.c 	ring->wptr = 0;
ring             2208 drivers/gpu/drm/amd/amdgpu/gfx_v6_0.c 	WREG32(mmCP_RB1_WPTR, ring->wptr);
ring             2210 drivers/gpu/drm/amd/amdgpu/gfx_v6_0.c 	rptr_addr = adev->wb.gpu_addr + (ring->rptr_offs * 4);
ring             2216 drivers/gpu/drm/amd/amdgpu/gfx_v6_0.c 	WREG32(mmCP_RB1_BASE, ring->gpu_addr >> 8);
ring             2218 drivers/gpu/drm/amd/amdgpu/gfx_v6_0.c 	ring = &adev->gfx.compute_ring[1];
ring             2219 drivers/gpu/drm/amd/amdgpu/gfx_v6_0.c 	rb_bufsz = order_base_2(ring->ring_size / 8);
ring             2227 drivers/gpu/drm/amd/amdgpu/gfx_v6_0.c 	ring->wptr = 0;
ring             2228 drivers/gpu/drm/amd/amdgpu/gfx_v6_0.c 	WREG32(mmCP_RB2_WPTR, ring->wptr);
ring             2229 drivers/gpu/drm/amd/amdgpu/gfx_v6_0.c 	rptr_addr = adev->wb.gpu_addr + (ring->rptr_offs * 4);
ring             2235 drivers/gpu/drm/amd/amdgpu/gfx_v6_0.c 	WREG32(mmCP_RB2_BASE, ring->gpu_addr >> 8);
ring             2307 drivers/gpu/drm/amd/amdgpu/gfx_v6_0.c static void gfx_v6_0_ring_emit_pipeline_sync(struct amdgpu_ring *ring)
ring             2309 drivers/gpu/drm/amd/amdgpu/gfx_v6_0.c 	int usepfp = (ring->funcs->type == AMDGPU_RING_TYPE_GFX);
ring             2310 drivers/gpu/drm/amd/amdgpu/gfx_v6_0.c 	uint32_t seq = ring->fence_drv.sync_seq;
ring             2311 drivers/gpu/drm/amd/amdgpu/gfx_v6_0.c 	uint64_t addr = ring->fence_drv.gpu_addr;
ring             2313 drivers/gpu/drm/amd/amdgpu/gfx_v6_0.c 	amdgpu_ring_write(ring, PACKET3(PACKET3_WAIT_REG_MEM, 5));
ring             2314 drivers/gpu/drm/amd/amdgpu/gfx_v6_0.c 	amdgpu_ring_write(ring, (WAIT_REG_MEM_MEM_SPACE(1) | /* memory */
ring             2317 drivers/gpu/drm/amd/amdgpu/gfx_v6_0.c 	amdgpu_ring_write(ring, addr & 0xfffffffc);
ring             2318 drivers/gpu/drm/amd/amdgpu/gfx_v6_0.c 	amdgpu_ring_write(ring, upper_32_bits(addr) & 0xffffffff);
ring             2319 drivers/gpu/drm/amd/amdgpu/gfx_v6_0.c 	amdgpu_ring_write(ring, seq);
ring             2320 drivers/gpu/drm/amd/amdgpu/gfx_v6_0.c 	amdgpu_ring_write(ring, 0xffffffff);
ring             2321 drivers/gpu/drm/amd/amdgpu/gfx_v6_0.c 	amdgpu_ring_write(ring, 4); /* poll interval */
ring             2325 drivers/gpu/drm/amd/amdgpu/gfx_v6_0.c 		amdgpu_ring_write(ring, PACKET3(PACKET3_SWITCH_BUFFER, 0));
ring             2326 drivers/gpu/drm/amd/amdgpu/gfx_v6_0.c 		amdgpu_ring_write(ring, 0);
ring             2327 drivers/gpu/drm/amd/amdgpu/gfx_v6_0.c 		amdgpu_ring_write(ring, PACKET3(PACKET3_SWITCH_BUFFER, 0));
ring             2328 drivers/gpu/drm/amd/amdgpu/gfx_v6_0.c 		amdgpu_ring_write(ring, 0);
ring             2332 drivers/gpu/drm/amd/amdgpu/gfx_v6_0.c static void gfx_v6_0_ring_emit_vm_flush(struct amdgpu_ring *ring,
ring             2335 drivers/gpu/drm/amd/amdgpu/gfx_v6_0.c 	int usepfp = (ring->funcs->type == AMDGPU_RING_TYPE_GFX);
ring             2337 drivers/gpu/drm/amd/amdgpu/gfx_v6_0.c 	amdgpu_gmc_emit_flush_gpu_tlb(ring, vmid, pd_addr);
ring             2340 drivers/gpu/drm/amd/amdgpu/gfx_v6_0.c 	amdgpu_ring_write(ring, PACKET3(PACKET3_WAIT_REG_MEM, 5));
ring             2341 drivers/gpu/drm/amd/amdgpu/gfx_v6_0.c 	amdgpu_ring_write(ring, (WAIT_REG_MEM_FUNCTION(0) |  /* always */
ring             2343 drivers/gpu/drm/amd/amdgpu/gfx_v6_0.c 	amdgpu_ring_write(ring, mmVM_INVALIDATE_REQUEST);
ring             2344 drivers/gpu/drm/amd/amdgpu/gfx_v6_0.c 	amdgpu_ring_write(ring, 0);
ring             2345 drivers/gpu/drm/amd/amdgpu/gfx_v6_0.c 	amdgpu_ring_write(ring, 0); /* ref */
ring             2346 drivers/gpu/drm/amd/amdgpu/gfx_v6_0.c 	amdgpu_ring_write(ring, 0); /* mask */
ring             2347 drivers/gpu/drm/amd/amdgpu/gfx_v6_0.c 	amdgpu_ring_write(ring, 0x20); /* poll interval */
ring             2351 drivers/gpu/drm/amd/amdgpu/gfx_v6_0.c 		amdgpu_ring_write(ring, PACKET3(PACKET3_PFP_SYNC_ME, 0));
ring             2352 drivers/gpu/drm/amd/amdgpu/gfx_v6_0.c 		amdgpu_ring_write(ring, 0x0);
ring             2355 drivers/gpu/drm/amd/amdgpu/gfx_v6_0.c 		amdgpu_ring_write(ring, PACKET3(PACKET3_SWITCH_BUFFER, 0));
ring             2356 drivers/gpu/drm/amd/amdgpu/gfx_v6_0.c 		amdgpu_ring_write(ring, 0);
ring             2357 drivers/gpu/drm/amd/amdgpu/gfx_v6_0.c 		amdgpu_ring_write(ring, PACKET3(PACKET3_SWITCH_BUFFER, 0));
ring             2358 drivers/gpu/drm/amd/amdgpu/gfx_v6_0.c 		amdgpu_ring_write(ring, 0);
ring             2362 drivers/gpu/drm/amd/amdgpu/gfx_v6_0.c static void gfx_v6_0_ring_emit_wreg(struct amdgpu_ring *ring,
ring             2365 drivers/gpu/drm/amd/amdgpu/gfx_v6_0.c 	int usepfp = (ring->funcs->type == AMDGPU_RING_TYPE_GFX);
ring             2367 drivers/gpu/drm/amd/amdgpu/gfx_v6_0.c 	amdgpu_ring_write(ring, PACKET3(PACKET3_WRITE_DATA, 3));
ring             2368 drivers/gpu/drm/amd/amdgpu/gfx_v6_0.c 	amdgpu_ring_write(ring, (WRITE_DATA_ENGINE_SEL(usepfp) |
ring             2370 drivers/gpu/drm/amd/amdgpu/gfx_v6_0.c 	amdgpu_ring_write(ring, reg);
ring             2371 drivers/gpu/drm/amd/amdgpu/gfx_v6_0.c 	amdgpu_ring_write(ring, 0);
ring             2372 drivers/gpu/drm/amd/amdgpu/gfx_v6_0.c 	amdgpu_ring_write(ring, val);
ring             2977 drivers/gpu/drm/amd/amdgpu/gfx_v6_0.c static void gfx_v6_ring_emit_cntxcntl(struct amdgpu_ring *ring, uint32_t flags)
ring             2980 drivers/gpu/drm/amd/amdgpu/gfx_v6_0.c 		gfx_v6_0_ring_emit_vgt_flush(ring);
ring             2981 drivers/gpu/drm/amd/amdgpu/gfx_v6_0.c 	amdgpu_ring_write(ring, PACKET3(PACKET3_CONTEXT_CONTROL, 1));
ring             2982 drivers/gpu/drm/amd/amdgpu/gfx_v6_0.c 	amdgpu_ring_write(ring, 0x80000000);
ring             2983 drivers/gpu/drm/amd/amdgpu/gfx_v6_0.c 	amdgpu_ring_write(ring, 0);
ring             3083 drivers/gpu/drm/amd/amdgpu/gfx_v6_0.c 	struct amdgpu_ring *ring;
ring             3114 drivers/gpu/drm/amd/amdgpu/gfx_v6_0.c 		ring = &adev->gfx.gfx_ring[i];
ring             3115 drivers/gpu/drm/amd/amdgpu/gfx_v6_0.c 		ring->ring_obj = NULL;
ring             3116 drivers/gpu/drm/amd/amdgpu/gfx_v6_0.c 		sprintf(ring->name, "gfx");
ring             3117 drivers/gpu/drm/amd/amdgpu/gfx_v6_0.c 		r = amdgpu_ring_init(adev, ring, 1024,
ring             3130 drivers/gpu/drm/amd/amdgpu/gfx_v6_0.c 		ring = &adev->gfx.compute_ring[i];
ring             3131 drivers/gpu/drm/amd/amdgpu/gfx_v6_0.c 		ring->ring_obj = NULL;
ring             3132 drivers/gpu/drm/amd/amdgpu/gfx_v6_0.c 		ring->use_doorbell = false;
ring             3133 drivers/gpu/drm/amd/amdgpu/gfx_v6_0.c 		ring->doorbell_index = 0;
ring             3134 drivers/gpu/drm/amd/amdgpu/gfx_v6_0.c 		ring->me = 1;
ring             3135 drivers/gpu/drm/amd/amdgpu/gfx_v6_0.c 		ring->pipe = i;
ring             3136 drivers/gpu/drm/amd/amdgpu/gfx_v6_0.c 		ring->queue = i;
ring             3137 drivers/gpu/drm/amd/amdgpu/gfx_v6_0.c 		sprintf(ring->name, "comp_%d.%d.%d", ring->me, ring->pipe, ring->queue);
ring             3138 drivers/gpu/drm/amd/amdgpu/gfx_v6_0.c 		irq_type = AMDGPU_CP_IRQ_COMPUTE_MEC1_PIPE0_EOP + ring->pipe;
ring             3139 drivers/gpu/drm/amd/amdgpu/gfx_v6_0.c 		r = amdgpu_ring_init(adev, ring, 1024,
ring             3258 drivers/gpu/drm/amd/amdgpu/gfx_v6_0.c 						     int ring,
ring             3264 drivers/gpu/drm/amd/amdgpu/gfx_v6_0.c 		if (ring == 0) {
ring             3277 drivers/gpu/drm/amd/amdgpu/gfx_v6_0.c 		if (ring == 0) {
ring             3389 drivers/gpu/drm/amd/amdgpu/gfx_v6_0.c 	struct amdgpu_ring *ring;
ring             3393 drivers/gpu/drm/amd/amdgpu/gfx_v6_0.c 		ring = &adev->gfx.gfx_ring[0];
ring             3397 drivers/gpu/drm/amd/amdgpu/gfx_v6_0.c 		ring = &adev->gfx.compute_ring[entry->ring_id - 1];
ring             3402 drivers/gpu/drm/amd/amdgpu/gfx_v6_0.c 	drm_sched_fault(&ring->sched);
ring             2087 drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c static int gfx_v7_0_ring_test_ring(struct amdgpu_ring *ring)
ring             2089 drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c 	struct amdgpu_device *adev = ring->adev;
ring             2100 drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c 	r = amdgpu_ring_alloc(ring, 3);
ring             2104 drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c 	amdgpu_ring_write(ring, PACKET3(PACKET3_SET_UCONFIG_REG, 1));
ring             2105 drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c 	amdgpu_ring_write(ring, (scratch - PACKET3_SET_UCONFIG_REG_START));
ring             2106 drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c 	amdgpu_ring_write(ring, 0xDEADBEEF);
ring             2107 drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c 	amdgpu_ring_commit(ring);
ring             2131 drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c static void gfx_v7_0_ring_emit_hdp_flush(struct amdgpu_ring *ring)
ring             2134 drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c 	int usepfp = ring->funcs->type == AMDGPU_RING_TYPE_COMPUTE ? 0 : 1;
ring             2136 drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c 	if (ring->funcs->type == AMDGPU_RING_TYPE_COMPUTE) {
ring             2137 drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c 		switch (ring->me) {
ring             2139 drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c 			ref_and_mask = GPU_HDP_FLUSH_DONE__CP2_MASK << ring->pipe;
ring             2142 drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c 			ref_and_mask = GPU_HDP_FLUSH_DONE__CP6_MASK << ring->pipe;
ring             2151 drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c 	amdgpu_ring_write(ring, PACKET3(PACKET3_WAIT_REG_MEM, 5));
ring             2152 drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c 	amdgpu_ring_write(ring, (WAIT_REG_MEM_OPERATION(1) | /* write, wait, write */
ring             2155 drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c 	amdgpu_ring_write(ring, mmGPU_HDP_FLUSH_REQ);
ring             2156 drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c 	amdgpu_ring_write(ring, mmGPU_HDP_FLUSH_DONE);
ring             2157 drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c 	amdgpu_ring_write(ring, ref_and_mask);
ring             2158 drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c 	amdgpu_ring_write(ring, ref_and_mask);
ring             2159 drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c 	amdgpu_ring_write(ring, 0x20); /* poll interval */
ring             2162 drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c static void gfx_v7_0_ring_emit_vgt_flush(struct amdgpu_ring *ring)
ring             2164 drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c 	amdgpu_ring_write(ring, PACKET3(PACKET3_EVENT_WRITE, 0));
ring             2165 drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c 	amdgpu_ring_write(ring, EVENT_TYPE(VS_PARTIAL_FLUSH) |
ring             2168 drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c 	amdgpu_ring_write(ring, PACKET3(PACKET3_EVENT_WRITE, 0));
ring             2169 drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c 	amdgpu_ring_write(ring, EVENT_TYPE(VGT_FLUSH) |
ring             2182 drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c static void gfx_v7_0_ring_emit_fence_gfx(struct amdgpu_ring *ring, u64 addr,
ring             2190 drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c 	amdgpu_ring_write(ring, PACKET3(PACKET3_EVENT_WRITE_EOP, 4));
ring             2191 drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c 	amdgpu_ring_write(ring, (EOP_TCL1_ACTION_EN |
ring             2195 drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c 	amdgpu_ring_write(ring, addr & 0xfffffffc);
ring             2196 drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c 	amdgpu_ring_write(ring, (upper_32_bits(addr) & 0xffff) |
ring             2198 drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c 	amdgpu_ring_write(ring, lower_32_bits(seq - 1));
ring             2199 drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c 	amdgpu_ring_write(ring, upper_32_bits(seq - 1));
ring             2202 drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c 	amdgpu_ring_write(ring, PACKET3(PACKET3_EVENT_WRITE_EOP, 4));
ring             2203 drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c 	amdgpu_ring_write(ring, (EOP_TCL1_ACTION_EN |
ring             2207 drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c 	amdgpu_ring_write(ring, addr & 0xfffffffc);
ring             2208 drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c 	amdgpu_ring_write(ring, (upper_32_bits(addr) & 0xffff) |
ring             2210 drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c 	amdgpu_ring_write(ring, lower_32_bits(seq));
ring             2211 drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c 	amdgpu_ring_write(ring, upper_32_bits(seq));
ring             2223 drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c static void gfx_v7_0_ring_emit_fence_compute(struct amdgpu_ring *ring,
ring             2231 drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c 	amdgpu_ring_write(ring, PACKET3(PACKET3_RELEASE_MEM, 5));
ring             2232 drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c 	amdgpu_ring_write(ring, (EOP_TCL1_ACTION_EN |
ring             2236 drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c 	amdgpu_ring_write(ring, DATA_SEL(write64bit ? 2 : 1) | INT_SEL(int_sel ? 2 : 0));
ring             2237 drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c 	amdgpu_ring_write(ring, addr & 0xfffffffc);
ring             2238 drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c 	amdgpu_ring_write(ring, upper_32_bits(addr));
ring             2239 drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c 	amdgpu_ring_write(ring, lower_32_bits(seq));
ring             2240 drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c 	amdgpu_ring_write(ring, upper_32_bits(seq));
ring             2258 drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c static void gfx_v7_0_ring_emit_ib_gfx(struct amdgpu_ring *ring,
ring             2268 drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c 		amdgpu_ring_write(ring, PACKET3(PACKET3_SWITCH_BUFFER, 0));
ring             2269 drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c 		amdgpu_ring_write(ring, 0);
ring             2279 drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c 	amdgpu_ring_write(ring, header);
ring             2280 drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c 	amdgpu_ring_write(ring,
ring             2285 drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c 	amdgpu_ring_write(ring, upper_32_bits(ib->gpu_addr) & 0xFFFF);
ring             2286 drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c 	amdgpu_ring_write(ring, control);
ring             2289 drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c static void gfx_v7_0_ring_emit_ib_compute(struct amdgpu_ring *ring,
ring             2308 drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c 		amdgpu_ring_write(ring, PACKET3(PACKET3_SET_CONFIG_REG, 1));
ring             2309 drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c 		amdgpu_ring_write(ring, mmGDS_COMPUTE_MAX_WAVE_ID - PACKET3_SET_CONFIG_REG_START);
ring             2310 drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c 		amdgpu_ring_write(ring, ring->adev->gds.gds_compute_max_wave_id);
ring             2313 drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c 	amdgpu_ring_write(ring, PACKET3(PACKET3_INDIRECT_BUFFER, 2));
ring             2314 drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c 	amdgpu_ring_write(ring,
ring             2319 drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c 	amdgpu_ring_write(ring, upper_32_bits(ib->gpu_addr) & 0xFFFF);
ring             2320 drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c 	amdgpu_ring_write(ring, control);
ring             2323 drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c static void gfx_v7_ring_emit_cntxcntl(struct amdgpu_ring *ring, uint32_t flags)
ring             2329 drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c 		gfx_v7_0_ring_emit_vgt_flush(ring);
ring             2338 drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c 	amdgpu_ring_write(ring, PACKET3(PACKET3_CONTEXT_CONTROL, 1));
ring             2339 drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c 	amdgpu_ring_write(ring, dw2);
ring             2340 drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c 	amdgpu_ring_write(ring, 0);
ring             2352 drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c static int gfx_v7_0_ring_test_ib(struct amdgpu_ring *ring, long timeout)
ring             2354 drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c 	struct amdgpu_device *adev = ring->adev;
ring             2376 drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c 	r = amdgpu_ib_schedule(ring, 1, &ib, NULL, &f);
ring             2525 drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c 	struct amdgpu_ring *ring = &adev->gfx.gfx_ring[0];
ring             2537 drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c 	r = amdgpu_ring_alloc(ring, gfx_v7_0_get_csb_size(adev) + 8);
ring             2544 drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c 	amdgpu_ring_write(ring, PACKET3(PACKET3_SET_BASE, 2));
ring             2545 drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c 	amdgpu_ring_write(ring, PACKET3_BASE_INDEX(CE_PARTITION_BASE));
ring             2546 drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c 	amdgpu_ring_write(ring, 0x8000);
ring             2547 drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c 	amdgpu_ring_write(ring, 0x8000);
ring             2550 drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c 	amdgpu_ring_write(ring, PACKET3(PACKET3_PREAMBLE_CNTL, 0));
ring             2551 drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c 	amdgpu_ring_write(ring, PACKET3_PREAMBLE_BEGIN_CLEAR_STATE);
ring             2553 drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c 	amdgpu_ring_write(ring, PACKET3(PACKET3_CONTEXT_CONTROL, 1));
ring             2554 drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c 	amdgpu_ring_write(ring, 0x80000000);
ring             2555 drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c 	amdgpu_ring_write(ring, 0x80000000);
ring             2560 drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c 				amdgpu_ring_write(ring,
ring             2562 drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c 				amdgpu_ring_write(ring, ext->reg_index - PACKET3_SET_CONTEXT_REG_START);
ring             2564 drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c 					amdgpu_ring_write(ring, ext->extent[i]);
ring             2569 drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c 	amdgpu_ring_write(ring, PACKET3(PACKET3_SET_CONTEXT_REG, 2));
ring             2570 drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c 	amdgpu_ring_write(ring, mmPA_SC_RASTER_CONFIG - PACKET3_SET_CONTEXT_REG_START);
ring             2571 drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c 	amdgpu_ring_write(ring, adev->gfx.config.rb_config[0][0].raster_config);
ring             2572 drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c 	amdgpu_ring_write(ring, adev->gfx.config.rb_config[0][0].raster_config_1);
ring             2574 drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c 	amdgpu_ring_write(ring, PACKET3(PACKET3_PREAMBLE_CNTL, 0));
ring             2575 drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c 	amdgpu_ring_write(ring, PACKET3_PREAMBLE_END_CLEAR_STATE);
ring             2577 drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c 	amdgpu_ring_write(ring, PACKET3(PACKET3_CLEAR_STATE, 0));
ring             2578 drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c 	amdgpu_ring_write(ring, 0);
ring             2580 drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c 	amdgpu_ring_write(ring, PACKET3(PACKET3_SET_CONTEXT_REG, 2));
ring             2581 drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c 	amdgpu_ring_write(ring, 0x00000316);
ring             2582 drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c 	amdgpu_ring_write(ring, 0x0000000e); /* VGT_VERTEX_REUSE_BLOCK_CNTL */
ring             2583 drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c 	amdgpu_ring_write(ring, 0x00000010); /* VGT_OUT_DEALLOC_CNTL */
ring             2585 drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c 	amdgpu_ring_commit(ring);
ring             2601 drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c 	struct amdgpu_ring *ring;
ring             2621 drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c 	ring = &adev->gfx.gfx_ring[0];
ring             2622 drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c 	rb_bufsz = order_base_2(ring->ring_size / 8);
ring             2631 drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c 	ring->wptr = 0;
ring             2632 drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c 	WREG32(mmCP_RB0_WPTR, lower_32_bits(ring->wptr));
ring             2635 drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c 	rptr_addr = adev->wb.gpu_addr + (ring->rptr_offs * 4);
ring             2645 drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c 	rb_addr = ring->gpu_addr >> 8;
ring             2651 drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c 	r = amdgpu_ring_test_helper(ring);
ring             2658 drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c static u64 gfx_v7_0_ring_get_rptr(struct amdgpu_ring *ring)
ring             2660 drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c 	return ring->adev->wb.wb[ring->rptr_offs];
ring             2663 drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c static u64 gfx_v7_0_ring_get_wptr_gfx(struct amdgpu_ring *ring)
ring             2665 drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c 	struct amdgpu_device *adev = ring->adev;
ring             2670 drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c static void gfx_v7_0_ring_set_wptr_gfx(struct amdgpu_ring *ring)
ring             2672 drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c 	struct amdgpu_device *adev = ring->adev;
ring             2674 drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c 	WREG32(mmCP_RB0_WPTR, lower_32_bits(ring->wptr));
ring             2678 drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c static u64 gfx_v7_0_ring_get_wptr_compute(struct amdgpu_ring *ring)
ring             2681 drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c 	return ring->adev->wb.wb[ring->wptr_offs];
ring             2684 drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c static void gfx_v7_0_ring_set_wptr_compute(struct amdgpu_ring *ring)
ring             2686 drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c 	struct amdgpu_device *adev = ring->adev;
ring             2689 drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c 	adev->wb.wb[ring->wptr_offs] = lower_32_bits(ring->wptr);
ring             2690 drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c 	WDOORBELL32(ring->doorbell_index, lower_32_bits(ring->wptr));
ring             2789 drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c 		struct amdgpu_ring *ring = &adev->gfx.compute_ring[i];
ring             2791 drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c 		amdgpu_bo_free_kernel(&ring->mqd_obj, NULL, NULL);
ring             2931 drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c 			     struct amdgpu_ring *ring)
ring             2948 drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c 	if (ring->use_doorbell)
ring             2962 drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c 	hqd_gpu_addr = ring->gpu_addr >> 8;
ring             2973 drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c 		order_base_2(ring->ring_size / 8);
ring             2989 drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c 	wb_gpu_addr = adev->wb.gpu_addr + (ring->wptr_offs * 4);
ring             2994 drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c 	wb_gpu_addr = adev->wb.gpu_addr + (ring->rptr_offs * 4);
ring             3000 drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c 	if (ring->use_doorbell) {
ring             3006 drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c 			(ring->doorbell_index <<
ring             3019 drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c 	ring->wptr = 0;
ring             3020 drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c 	mqd->cp_hqd_pq_wptr = lower_32_bits(ring->wptr);
ring             3078 drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c 	struct amdgpu_ring *ring = &adev->gfx.compute_ring[ring_id];
ring             3081 drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c 				      AMDGPU_GEM_DOMAIN_GTT, &ring->mqd_obj,
ring             3089 drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c 	cik_srbm_select(adev, ring->me, ring->pipe, ring->queue, 0);
ring             3091 drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c 	gfx_v7_0_mqd_init(adev, mqd, mqd_gpu_addr, ring);
ring             3098 drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c 	amdgpu_bo_kunmap(ring->mqd_obj);
ring             3099 drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c 	amdgpu_bo_unreserve(ring->mqd_obj);
ring             3116 drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c 	struct amdgpu_ring *ring;
ring             3140 drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c 		ring = &adev->gfx.compute_ring[i];
ring             3141 drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c 		amdgpu_ring_test_helper(ring);
ring             3211 drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c static void gfx_v7_0_ring_emit_pipeline_sync(struct amdgpu_ring *ring)
ring             3213 drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c 	int usepfp = (ring->funcs->type == AMDGPU_RING_TYPE_GFX);
ring             3214 drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c 	uint32_t seq = ring->fence_drv.sync_seq;
ring             3215 drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c 	uint64_t addr = ring->fence_drv.gpu_addr;
ring             3217 drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c 	amdgpu_ring_write(ring, PACKET3(PACKET3_WAIT_REG_MEM, 5));
ring             3218 drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c 	amdgpu_ring_write(ring, (WAIT_REG_MEM_MEM_SPACE(1) | /* memory */
ring             3221 drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c 	amdgpu_ring_write(ring, addr & 0xfffffffc);
ring             3222 drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c 	amdgpu_ring_write(ring, upper_32_bits(addr) & 0xffffffff);
ring             3223 drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c 	amdgpu_ring_write(ring, seq);
ring             3224 drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c 	amdgpu_ring_write(ring, 0xffffffff);
ring             3225 drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c 	amdgpu_ring_write(ring, 4); /* poll interval */
ring             3229 drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c 		amdgpu_ring_write(ring, PACKET3(PACKET3_SWITCH_BUFFER, 0));
ring             3230 drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c 		amdgpu_ring_write(ring, 0);
ring             3231 drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c 		amdgpu_ring_write(ring, PACKET3(PACKET3_SWITCH_BUFFER, 0));
ring             3232 drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c 		amdgpu_ring_write(ring, 0);
ring             3250 drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c static void gfx_v7_0_ring_emit_vm_flush(struct amdgpu_ring *ring,
ring             3253 drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c 	int usepfp = (ring->funcs->type == AMDGPU_RING_TYPE_GFX);
ring             3255 drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c 	amdgpu_gmc_emit_flush_gpu_tlb(ring, vmid, pd_addr);
ring             3258 drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c 	amdgpu_ring_write(ring, PACKET3(PACKET3_WAIT_REG_MEM, 5));
ring             3259 drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c 	amdgpu_ring_write(ring, (WAIT_REG_MEM_OPERATION(0) | /* wait */
ring             3262 drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c 	amdgpu_ring_write(ring, mmVM_INVALIDATE_REQUEST);
ring             3263 drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c 	amdgpu_ring_write(ring, 0);
ring             3264 drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c 	amdgpu_ring_write(ring, 0); /* ref */
ring             3265 drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c 	amdgpu_ring_write(ring, 0); /* mask */
ring             3266 drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c 	amdgpu_ring_write(ring, 0x20); /* poll interval */
ring             3271 drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c 		amdgpu_ring_write(ring, PACKET3(PACKET3_PFP_SYNC_ME, 0));
ring             3272 drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c 		amdgpu_ring_write(ring, 0x0);
ring             3275 drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c 		amdgpu_ring_write(ring, PACKET3(PACKET3_SWITCH_BUFFER, 0));
ring             3276 drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c 		amdgpu_ring_write(ring, 0);
ring             3277 drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c 		amdgpu_ring_write(ring, PACKET3(PACKET3_SWITCH_BUFFER, 0));
ring             3278 drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c 		amdgpu_ring_write(ring, 0);
ring             3282 drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c static void gfx_v7_0_ring_emit_wreg(struct amdgpu_ring *ring,
ring             3285 drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c 	int usepfp = (ring->funcs->type == AMDGPU_RING_TYPE_GFX);
ring             3287 drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c 	amdgpu_ring_write(ring, PACKET3(PACKET3_WRITE_DATA, 3));
ring             3288 drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c 	amdgpu_ring_write(ring, (WRITE_DATA_ENGINE_SEL(usepfp) |
ring             3290 drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c 	amdgpu_ring_write(ring, reg);
ring             3291 drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c 	amdgpu_ring_write(ring, 0);
ring             3292 drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c 	amdgpu_ring_write(ring, val);
ring             4090 drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c static void gfx_v7_0_ring_emit_gds_switch(struct amdgpu_ring *ring,
ring             4097 drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c 	amdgpu_ring_write(ring, PACKET3(PACKET3_WRITE_DATA, 3));
ring             4098 drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c 	amdgpu_ring_write(ring, (WRITE_DATA_ENGINE_SEL(0) |
ring             4100 drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c 	amdgpu_ring_write(ring, amdgpu_gds_reg_offset[vmid].mem_base);
ring             4101 drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c 	amdgpu_ring_write(ring, 0);
ring             4102 drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c 	amdgpu_ring_write(ring, gds_base);
ring             4105 drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c 	amdgpu_ring_write(ring, PACKET3(PACKET3_WRITE_DATA, 3));
ring             4106 drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c 	amdgpu_ring_write(ring, (WRITE_DATA_ENGINE_SEL(0) |
ring             4108 drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c 	amdgpu_ring_write(ring, amdgpu_gds_reg_offset[vmid].mem_size);
ring             4109 drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c 	amdgpu_ring_write(ring, 0);
ring             4110 drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c 	amdgpu_ring_write(ring, gds_size);
ring             4113 drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c 	amdgpu_ring_write(ring, PACKET3(PACKET3_WRITE_DATA, 3));
ring             4114 drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c 	amdgpu_ring_write(ring, (WRITE_DATA_ENGINE_SEL(0) |
ring             4116 drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c 	amdgpu_ring_write(ring, amdgpu_gds_reg_offset[vmid].gws);
ring             4117 drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c 	amdgpu_ring_write(ring, 0);
ring             4118 drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c 	amdgpu_ring_write(ring, gws_size << GDS_GWS_VMID0__SIZE__SHIFT | gws_base);
ring             4121 drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c 	amdgpu_ring_write(ring, PACKET3(PACKET3_WRITE_DATA, 3));
ring             4122 drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c 	amdgpu_ring_write(ring, (WRITE_DATA_ENGINE_SEL(0) |
ring             4124 drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c 	amdgpu_ring_write(ring, amdgpu_gds_reg_offset[vmid].oa);
ring             4125 drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c 	amdgpu_ring_write(ring, 0);
ring             4126 drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c 	amdgpu_ring_write(ring, (1 << (oa_size + oa_base)) - (1 << oa_base));
ring             4129 drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c static void gfx_v7_0_ring_soft_recovery(struct amdgpu_ring *ring, unsigned vmid)
ring             4131 drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c 	struct amdgpu_device *adev = ring->adev;
ring             4403 drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c 	struct amdgpu_ring *ring = &adev->gfx.compute_ring[ring_id];
ring             4406 drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c 	ring->me = mec + 1;
ring             4407 drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c 	ring->pipe = pipe;
ring             4408 drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c 	ring->queue = queue;
ring             4410 drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c 	ring->ring_obj = NULL;
ring             4411 drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c 	ring->use_doorbell = true;
ring             4412 drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c 	ring->doorbell_index = adev->doorbell_index.mec_ring0 + ring_id;
ring             4413 drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c 	sprintf(ring->name, "comp_%d.%d.%d", ring->me, ring->pipe, ring->queue);
ring             4416 drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c 		+ ((ring->me - 1) * adev->gfx.mec.num_pipe_per_mec)
ring             4417 drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c 		+ ring->pipe;
ring             4420 drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c 	r = amdgpu_ring_init(adev, ring, 1024,
ring             4431 drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c 	struct amdgpu_ring *ring;
ring             4489 drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c 		ring = &adev->gfx.gfx_ring[i];
ring             4490 drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c 		ring->ring_obj = NULL;
ring             4491 drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c 		sprintf(ring->name, "gfx");
ring             4492 drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c 		r = amdgpu_ring_init(adev, ring, 1024,
ring             4868 drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c 	struct amdgpu_ring *ring;
ring             4881 drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c 			ring = &adev->gfx.compute_ring[i];
ring             4882 drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c 			if ((ring->me == me_id) && (ring->pipe == pipe_id))
ring             4883 drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c 				amdgpu_fence_process(ring);
ring             4893 drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c 	struct amdgpu_ring *ring;
ring             4906 drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c 			ring = &adev->gfx.compute_ring[i];
ring             4907 drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c 			if ((ring->me == me_id) && (ring->pipe == pipe_id))
ring             4908 drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c 				drm_sched_fault(&ring->sched);
ring              729 drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c static void gfx_v8_0_ring_emit_ce_meta(struct amdgpu_ring *ring);
ring              730 drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c static void gfx_v8_0_ring_emit_de_meta(struct amdgpu_ring *ring);
ring              836 drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c static int gfx_v8_0_ring_test_ring(struct amdgpu_ring *ring)
ring              838 drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c 	struct amdgpu_device *adev = ring->adev;
ring              849 drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c 	r = amdgpu_ring_alloc(ring, 3);
ring              853 drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c 	amdgpu_ring_write(ring, PACKET3(PACKET3_SET_UCONFIG_REG, 1));
ring              854 drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c 	amdgpu_ring_write(ring, (scratch - PACKET3_SET_UCONFIG_REG_START));
ring              855 drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c 	amdgpu_ring_write(ring, 0xDEADBEEF);
ring              856 drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c 	amdgpu_ring_commit(ring);
ring              873 drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c static int gfx_v8_0_ring_test_ib(struct amdgpu_ring *ring, long timeout)
ring              875 drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c 	struct amdgpu_device *adev = ring->adev;
ring              902 drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c 	r = amdgpu_ib_schedule(ring, 1, &ib, NULL, &f);
ring             1549 drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c 	struct amdgpu_ring *ring = &adev->gfx.compute_ring[0];
ring             1562 drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c 	if (!ring->sched.ready)
ring             1677 drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c 	r = amdgpu_ib_schedule(ring, 1, &ib, NULL, &f);
ring             1919 drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c 	struct amdgpu_ring *ring = &adev->gfx.compute_ring[ring_id];
ring             1921 drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c 	ring = &adev->gfx.compute_ring[ring_id];
ring             1924 drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c 	ring->me = mec + 1;
ring             1925 drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c 	ring->pipe = pipe;
ring             1926 drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c 	ring->queue = queue;
ring             1928 drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c 	ring->ring_obj = NULL;
ring             1929 drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c 	ring->use_doorbell = true;
ring             1930 drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c 	ring->doorbell_index = adev->doorbell_index.mec_ring0 + ring_id;
ring             1931 drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c 	ring->eop_gpu_addr = adev->gfx.mec.hpd_eop_gpu_addr
ring             1933 drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c 	sprintf(ring->name, "comp_%d.%d.%d", ring->me, ring->pipe, ring->queue);
ring             1936 drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c 		+ ((ring->me - 1) * adev->gfx.mec.num_pipe_per_mec)
ring             1937 drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c 		+ ring->pipe;
ring             1940 drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c 	r = amdgpu_ring_init(adev, ring, 1024,
ring             1954 drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c 	struct amdgpu_ring *ring;
ring             2035 drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c 		ring = &adev->gfx.gfx_ring[i];
ring             2036 drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c 		ring->ring_obj = NULL;
ring             2037 drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c 		sprintf(ring->name, "gfx");
ring             2040 drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c 			ring->use_doorbell = true;
ring             2041 drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c 			ring->doorbell_index = adev->doorbell_index.gfx_ring0;
ring             2044 drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c 		r = amdgpu_ring_init(adev, ring, 1024, &adev->gfx.eop_irq,
ring             2077 drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c 	r = amdgpu_gfx_kiq_init_ring(adev, &kiq->ring, &kiq->irq);
ring             2106 drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c 	amdgpu_gfx_kiq_free_ring(&adev->gfx.kiq.ring, &adev->gfx.kiq.irq);
ring             4196 drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c 	struct amdgpu_ring *ring = &adev->gfx.gfx_ring[0];
ring             4208 drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c 	r = amdgpu_ring_alloc(ring, gfx_v8_0_get_csb_size(adev) + 4);
ring             4215 drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c 	amdgpu_ring_write(ring, PACKET3(PACKET3_PREAMBLE_CNTL, 0));
ring             4216 drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c 	amdgpu_ring_write(ring, PACKET3_PREAMBLE_BEGIN_CLEAR_STATE);
ring             4218 drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c 	amdgpu_ring_write(ring, PACKET3(PACKET3_CONTEXT_CONTROL, 1));
ring             4219 drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c 	amdgpu_ring_write(ring, 0x80000000);
ring             4220 drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c 	amdgpu_ring_write(ring, 0x80000000);
ring             4225 drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c 				amdgpu_ring_write(ring,
ring             4228 drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c 				amdgpu_ring_write(ring,
ring             4231 drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c 					amdgpu_ring_write(ring, ext->extent[i]);
ring             4236 drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c 	amdgpu_ring_write(ring, PACKET3(PACKET3_SET_CONTEXT_REG, 2));
ring             4237 drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c 	amdgpu_ring_write(ring, mmPA_SC_RASTER_CONFIG - PACKET3_SET_CONTEXT_REG_START);
ring             4238 drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c 	amdgpu_ring_write(ring, adev->gfx.config.rb_config[0][0].raster_config);
ring             4239 drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c 	amdgpu_ring_write(ring, adev->gfx.config.rb_config[0][0].raster_config_1);
ring             4241 drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c 	amdgpu_ring_write(ring, PACKET3(PACKET3_PREAMBLE_CNTL, 0));
ring             4242 drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c 	amdgpu_ring_write(ring, PACKET3_PREAMBLE_END_CLEAR_STATE);
ring             4244 drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c 	amdgpu_ring_write(ring, PACKET3(PACKET3_CLEAR_STATE, 0));
ring             4245 drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c 	amdgpu_ring_write(ring, 0);
ring             4248 drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c 	amdgpu_ring_write(ring, PACKET3(PACKET3_SET_BASE, 2));
ring             4249 drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c 	amdgpu_ring_write(ring, PACKET3_BASE_INDEX(CE_PARTITION_BASE));
ring             4250 drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c 	amdgpu_ring_write(ring, 0x8000);
ring             4251 drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c 	amdgpu_ring_write(ring, 0x8000);
ring             4253 drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c 	amdgpu_ring_commit(ring);
ring             4257 drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c static void gfx_v8_0_set_cpg_door_bell(struct amdgpu_device *adev, struct amdgpu_ring *ring)
ring             4266 drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c 	if (ring->use_doorbell) {
ring             4268 drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c 				DOORBELL_OFFSET, ring->doorbell_index);
ring             4293 drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c 	struct amdgpu_ring *ring;
ring             4305 drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c 	ring = &adev->gfx.gfx_ring[0];
ring             4306 drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c 	rb_bufsz = order_base_2(ring->ring_size / 8);
ring             4318 drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c 	ring->wptr = 0;
ring             4319 drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c 	WREG32(mmCP_RB0_WPTR, lower_32_bits(ring->wptr));
ring             4322 drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c 	rptr_addr = adev->wb.gpu_addr + (ring->rptr_offs * 4);
ring             4326 drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c 	wptr_gpu_addr = adev->wb.gpu_addr + (ring->wptr_offs * 4);
ring             4332 drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c 	rb_addr = ring->gpu_addr >> 8;
ring             4336 drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c 	gfx_v8_0_set_cpg_door_bell(adev, ring);
ring             4338 drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c 	amdgpu_ring_clear_ring(ring);
ring             4340 drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c 	ring->sched.ready = true;
ring             4355 drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c 		adev->gfx.kiq.ring.sched.ready = false;
ring             4361 drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c static void gfx_v8_0_kiq_setting(struct amdgpu_ring *ring)
ring             4364 drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c 	struct amdgpu_device *adev = ring->adev;
ring             4369 drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c 	tmp |= (ring->me << 5) | (ring->pipe << 3) | (ring->queue);
ring             4377 drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c 	struct amdgpu_ring *kiq_ring = &adev->gfx.kiq.ring;
ring             4411 drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c 		struct amdgpu_ring *ring = &adev->gfx.compute_ring[i];
ring             4412 drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c 		uint64_t mqd_addr = amdgpu_bo_gpu_offset(ring->mqd_obj);
ring             4413 drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c 		uint64_t wptr_addr = adev->wb.gpu_addr + (ring->wptr_offs * 4);
ring             4421 drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c 				  PACKET3_MAP_QUEUES_DOORBELL_OFFSET(ring->doorbell_index) |
ring             4422 drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c 				  PACKET3_MAP_QUEUES_QUEUE(ring->queue) |
ring             4423 drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c 				  PACKET3_MAP_QUEUES_PIPE(ring->pipe) |
ring             4424 drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c 				  PACKET3_MAP_QUEUES_ME(ring->me == 1 ? 0 : 1)); /* doorbell */
ring             4457 drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c static int gfx_v8_0_mqd_init(struct amdgpu_ring *ring)
ring             4459 drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c 	struct amdgpu_device *adev = ring->adev;
ring             4460 drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c 	struct vi_mqd *mqd = ring->mqd_ptr;
ring             4471 drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c 	mqd->dynamic_cu_mask_addr_lo = lower_32_bits(ring->mqd_gpu_addr
ring             4473 drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c 	mqd->dynamic_cu_mask_addr_hi = upper_32_bits(ring->mqd_gpu_addr
ring             4475 drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c 	eop_base_addr = ring->eop_gpu_addr >> 8;
ring             4490 drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c 			    ring->use_doorbell ? 1 : 0);
ring             4495 drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c 	mqd->cp_mqd_base_addr_lo = ring->mqd_gpu_addr & 0xfffffffc;
ring             4496 drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c 	mqd->cp_mqd_base_addr_hi = upper_32_bits(ring->mqd_gpu_addr);
ring             4504 drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c 	hqd_gpu_addr = ring->gpu_addr >> 8;
ring             4511 drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c 			    (order_base_2(ring->ring_size / 4) - 1));
ring             4524 drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c 	wb_gpu_addr = adev->wb.gpu_addr + (ring->rptr_offs * 4);
ring             4530 drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c 	wb_gpu_addr = adev->wb.gpu_addr + (ring->wptr_offs * 4);
ring             4536 drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c 	if (ring->use_doorbell) {
ring             4539 drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c 				DOORBELL_OFFSET, ring->doorbell_index);
ring             4552 drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c 	ring->wptr = 0;
ring             4553 drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c 	mqd->cp_hqd_pq_wptr = ring->wptr;
ring             4637 drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c static int gfx_v8_0_kiq_init_queue(struct amdgpu_ring *ring)
ring             4639 drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c 	struct amdgpu_device *adev = ring->adev;
ring             4640 drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c 	struct vi_mqd *mqd = ring->mqd_ptr;
ring             4643 drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c 	gfx_v8_0_kiq_setting(ring);
ring             4651 drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c 		ring->wptr = 0;
ring             4652 drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c 		amdgpu_ring_clear_ring(ring);
ring             4654 drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c 		vi_srbm_select(adev, ring->me, ring->pipe, ring->queue, 0);
ring             4663 drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c 		vi_srbm_select(adev, ring->me, ring->pipe, ring->queue, 0);
ring             4664 drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c 		gfx_v8_0_mqd_init(ring);
ring             4676 drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c static int gfx_v8_0_kcq_init_queue(struct amdgpu_ring *ring)
ring             4678 drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c 	struct amdgpu_device *adev = ring->adev;
ring             4679 drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c 	struct vi_mqd *mqd = ring->mqd_ptr;
ring             4680 drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c 	int mqd_idx = ring - &adev->gfx.compute_ring[0];
ring             4687 drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c 		vi_srbm_select(adev, ring->me, ring->pipe, ring->queue, 0);
ring             4688 drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c 		gfx_v8_0_mqd_init(ring);
ring             4699 drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c 		ring->wptr = 0;
ring             4700 drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c 		amdgpu_ring_clear_ring(ring);
ring             4702 drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c 		amdgpu_ring_clear_ring(ring);
ring             4719 drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c 	struct amdgpu_ring *ring;
ring             4722 drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c 	ring = &adev->gfx.kiq.ring;
ring             4724 drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c 	r = amdgpu_bo_reserve(ring->mqd_obj, false);
ring             4728 drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c 	r = amdgpu_bo_kmap(ring->mqd_obj, &ring->mqd_ptr);
ring             4732 drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c 	gfx_v8_0_kiq_init_queue(ring);
ring             4733 drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c 	amdgpu_bo_kunmap(ring->mqd_obj);
ring             4734 drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c 	ring->mqd_ptr = NULL;
ring             4735 drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c 	amdgpu_bo_unreserve(ring->mqd_obj);
ring             4736 drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c 	ring->sched.ready = true;
ring             4742 drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c 	struct amdgpu_ring *ring = NULL;
ring             4748 drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c 		ring = &adev->gfx.compute_ring[i];
ring             4750 drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c 		r = amdgpu_bo_reserve(ring->mqd_obj, false);
ring             4753 drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c 		r = amdgpu_bo_kmap(ring->mqd_obj, &ring->mqd_ptr);
ring             4755 drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c 			r = gfx_v8_0_kcq_init_queue(ring);
ring             4756 drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c 			amdgpu_bo_kunmap(ring->mqd_obj);
ring             4757 drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c 			ring->mqd_ptr = NULL;
ring             4759 drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c 		amdgpu_bo_unreserve(ring->mqd_obj);
ring             4777 drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c 	struct amdgpu_ring *ring;
ring             4780 drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c 	ring = &adev->gfx.gfx_ring[0];
ring             4781 drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c 	r = amdgpu_ring_test_helper(ring);
ring             4785 drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c 	ring = &adev->gfx.kiq.ring;
ring             4786 drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c 	r = amdgpu_ring_test_helper(ring);
ring             4791 drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c 		ring = &adev->gfx.compute_ring[i];
ring             4792 drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c 		amdgpu_ring_test_helper(ring);
ring             4856 drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c 	struct amdgpu_ring *kiq_ring = &adev->gfx.kiq.ring;
ring             4863 drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c 		struct amdgpu_ring *ring = &adev->gfx.compute_ring[i];
ring             4871 drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c 		amdgpu_ring_write(kiq_ring, PACKET3_UNMAP_QUEUES_DOORBELL_OFFSET0(ring->doorbell_index));
ring             5064 drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c 			struct amdgpu_ring *ring = &adev->gfx.compute_ring[i];
ring             5067 drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c 			vi_srbm_select(adev, ring->me, ring->pipe, ring->queue, 0);
ring             5159 drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c 			struct amdgpu_ring *ring = &adev->gfx.compute_ring[i];
ring             5162 drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c 			vi_srbm_select(adev, ring->me, ring->pipe, ring->queue, 0);
ring             5202 drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c static void gfx_v8_0_ring_emit_gds_switch(struct amdgpu_ring *ring,
ring             5209 drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c 	amdgpu_ring_write(ring, PACKET3(PACKET3_WRITE_DATA, 3));
ring             5210 drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c 	amdgpu_ring_write(ring, (WRITE_DATA_ENGINE_SEL(0) |
ring             5212 drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c 	amdgpu_ring_write(ring, amdgpu_gds_reg_offset[vmid].mem_base);
ring             5213 drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c 	amdgpu_ring_write(ring, 0);
ring             5214 drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c 	amdgpu_ring_write(ring, gds_base);
ring             5217 drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c 	amdgpu_ring_write(ring, PACKET3(PACKET3_WRITE_DATA, 3));
ring             5218 drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c 	amdgpu_ring_write(ring, (WRITE_DATA_ENGINE_SEL(0) |
ring             5220 drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c 	amdgpu_ring_write(ring, amdgpu_gds_reg_offset[vmid].mem_size);
ring             5221 drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c 	amdgpu_ring_write(ring, 0);
ring             5222 drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c 	amdgpu_ring_write(ring, gds_size);
ring             5225 drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c 	amdgpu_ring_write(ring, PACKET3(PACKET3_WRITE_DATA, 3));
ring             5226 drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c 	amdgpu_ring_write(ring, (WRITE_DATA_ENGINE_SEL(0) |
ring             5228 drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c 	amdgpu_ring_write(ring, amdgpu_gds_reg_offset[vmid].gws);
ring             5229 drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c 	amdgpu_ring_write(ring, 0);
ring             5230 drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c 	amdgpu_ring_write(ring, gws_size << GDS_GWS_VMID0__SIZE__SHIFT | gws_base);
ring             5233 drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c 	amdgpu_ring_write(ring, PACKET3(PACKET3_WRITE_DATA, 3));
ring             5234 drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c 	amdgpu_ring_write(ring, (WRITE_DATA_ENGINE_SEL(0) |
ring             5236 drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c 	amdgpu_ring_write(ring, amdgpu_gds_reg_offset[vmid].oa);
ring             5237 drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c 	amdgpu_ring_write(ring, 0);
ring             5238 drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c 	amdgpu_ring_write(ring, (1 << (oa_size + oa_base)) - (1 << oa_base));
ring             6041 drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c static u64 gfx_v8_0_ring_get_rptr(struct amdgpu_ring *ring)
ring             6043 drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c 	return ring->adev->wb.wb[ring->rptr_offs];
ring             6046 drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c static u64 gfx_v8_0_ring_get_wptr_gfx(struct amdgpu_ring *ring)
ring             6048 drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c 	struct amdgpu_device *adev = ring->adev;
ring             6050 drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c 	if (ring->use_doorbell)
ring             6052 drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c 		return ring->adev->wb.wb[ring->wptr_offs];
ring             6057 drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c static void gfx_v8_0_ring_set_wptr_gfx(struct amdgpu_ring *ring)
ring             6059 drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c 	struct amdgpu_device *adev = ring->adev;
ring             6061 drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c 	if (ring->use_doorbell) {
ring             6063 drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c 		adev->wb.wb[ring->wptr_offs] = lower_32_bits(ring->wptr);
ring             6064 drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c 		WDOORBELL32(ring->doorbell_index, lower_32_bits(ring->wptr));
ring             6066 drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c 		WREG32(mmCP_RB0_WPTR, lower_32_bits(ring->wptr));
ring             6071 drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c static void gfx_v8_0_ring_emit_hdp_flush(struct amdgpu_ring *ring)
ring             6075 drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c 	if ((ring->funcs->type == AMDGPU_RING_TYPE_COMPUTE) ||
ring             6076 drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c 	    (ring->funcs->type == AMDGPU_RING_TYPE_KIQ)) {
ring             6077 drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c 		switch (ring->me) {
ring             6079 drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c 			ref_and_mask = GPU_HDP_FLUSH_DONE__CP2_MASK << ring->pipe;
ring             6082 drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c 			ref_and_mask = GPU_HDP_FLUSH_DONE__CP6_MASK << ring->pipe;
ring             6093 drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c 	amdgpu_ring_write(ring, PACKET3(PACKET3_WAIT_REG_MEM, 5));
ring             6094 drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c 	amdgpu_ring_write(ring, (WAIT_REG_MEM_OPERATION(1) | /* write, wait, write */
ring             6097 drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c 	amdgpu_ring_write(ring, mmGPU_HDP_FLUSH_REQ);
ring             6098 drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c 	amdgpu_ring_write(ring, mmGPU_HDP_FLUSH_DONE);
ring             6099 drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c 	amdgpu_ring_write(ring, ref_and_mask);
ring             6100 drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c 	amdgpu_ring_write(ring, ref_and_mask);
ring             6101 drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c 	amdgpu_ring_write(ring, 0x20); /* poll interval */
ring             6104 drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c static void gfx_v8_0_ring_emit_vgt_flush(struct amdgpu_ring *ring)
ring             6106 drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c 	amdgpu_ring_write(ring, PACKET3(PACKET3_EVENT_WRITE, 0));
ring             6107 drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c 	amdgpu_ring_write(ring, EVENT_TYPE(VS_PARTIAL_FLUSH) |
ring             6110 drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c 	amdgpu_ring_write(ring, PACKET3(PACKET3_EVENT_WRITE, 0));
ring             6111 drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c 	amdgpu_ring_write(ring, EVENT_TYPE(VGT_FLUSH) |
ring             6115 drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c static void gfx_v8_0_ring_emit_ib_gfx(struct amdgpu_ring *ring,
ring             6130 drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c 	if (amdgpu_sriov_vf(ring->adev) && (ib->flags & AMDGPU_IB_FLAG_PREEMPT)) {
ring             6134 drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c 			gfx_v8_0_ring_emit_de_meta(ring);
ring             6137 drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c 	amdgpu_ring_write(ring, header);
ring             6138 drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c 	amdgpu_ring_write(ring,
ring             6143 drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c 	amdgpu_ring_write(ring, upper_32_bits(ib->gpu_addr) & 0xFFFF);
ring             6144 drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c 	amdgpu_ring_write(ring, control);
ring             6147 drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c static void gfx_v8_0_ring_emit_ib_compute(struct amdgpu_ring *ring,
ring             6166 drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c 		amdgpu_ring_write(ring, PACKET3(PACKET3_SET_CONFIG_REG, 1));
ring             6167 drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c 		amdgpu_ring_write(ring, mmGDS_COMPUTE_MAX_WAVE_ID - PACKET3_SET_CONFIG_REG_START);
ring             6168 drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c 		amdgpu_ring_write(ring, ring->adev->gds.gds_compute_max_wave_id);
ring             6171 drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c 	amdgpu_ring_write(ring, PACKET3(PACKET3_INDIRECT_BUFFER, 2));
ring             6172 drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c 	amdgpu_ring_write(ring,
ring             6177 drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c 	amdgpu_ring_write(ring, upper_32_bits(ib->gpu_addr) & 0xFFFF);
ring             6178 drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c 	amdgpu_ring_write(ring, control);
ring             6181 drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c static void gfx_v8_0_ring_emit_fence_gfx(struct amdgpu_ring *ring, u64 addr,
ring             6190 drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c 	amdgpu_ring_write(ring, PACKET3(PACKET3_EVENT_WRITE_EOP, 4));
ring             6191 drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c 	amdgpu_ring_write(ring, (EOP_TCL1_ACTION_EN |
ring             6196 drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c 	amdgpu_ring_write(ring, addr & 0xfffffffc);
ring             6197 drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c 	amdgpu_ring_write(ring, (upper_32_bits(addr) & 0xffff) |
ring             6199 drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c 	amdgpu_ring_write(ring, lower_32_bits(seq - 1));
ring             6200 drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c 	amdgpu_ring_write(ring, upper_32_bits(seq - 1));
ring             6204 drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c 	amdgpu_ring_write(ring, PACKET3(PACKET3_EVENT_WRITE_EOP, 4));
ring             6205 drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c 	amdgpu_ring_write(ring, (EOP_TCL1_ACTION_EN |
ring             6210 drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c 	amdgpu_ring_write(ring, addr & 0xfffffffc);
ring             6211 drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c 	amdgpu_ring_write(ring, (upper_32_bits(addr) & 0xffff) |
ring             6213 drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c 	amdgpu_ring_write(ring, lower_32_bits(seq));
ring             6214 drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c 	amdgpu_ring_write(ring, upper_32_bits(seq));
ring             6218 drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c static void gfx_v8_0_ring_emit_pipeline_sync(struct amdgpu_ring *ring)
ring             6220 drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c 	int usepfp = (ring->funcs->type == AMDGPU_RING_TYPE_GFX);
ring             6221 drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c 	uint32_t seq = ring->fence_drv.sync_seq;
ring             6222 drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c 	uint64_t addr = ring->fence_drv.gpu_addr;
ring             6224 drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c 	amdgpu_ring_write(ring, PACKET3(PACKET3_WAIT_REG_MEM, 5));
ring             6225 drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c 	amdgpu_ring_write(ring, (WAIT_REG_MEM_MEM_SPACE(1) | /* memory */
ring             6228 drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c 	amdgpu_ring_write(ring, addr & 0xfffffffc);
ring             6229 drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c 	amdgpu_ring_write(ring, upper_32_bits(addr) & 0xffffffff);
ring             6230 drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c 	amdgpu_ring_write(ring, seq);
ring             6231 drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c 	amdgpu_ring_write(ring, 0xffffffff);
ring             6232 drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c 	amdgpu_ring_write(ring, 4); /* poll interval */
ring             6235 drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c static void gfx_v8_0_ring_emit_vm_flush(struct amdgpu_ring *ring,
ring             6238 drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c 	int usepfp = (ring->funcs->type == AMDGPU_RING_TYPE_GFX);
ring             6240 drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c 	amdgpu_gmc_emit_flush_gpu_tlb(ring, vmid, pd_addr);
ring             6243 drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c 	amdgpu_ring_write(ring, PACKET3(PACKET3_WAIT_REG_MEM, 5));
ring             6244 drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c 	amdgpu_ring_write(ring, (WAIT_REG_MEM_OPERATION(0) | /* wait */
ring             6247 drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c 	amdgpu_ring_write(ring, mmVM_INVALIDATE_REQUEST);
ring             6248 drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c 	amdgpu_ring_write(ring, 0);
ring             6249 drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c 	amdgpu_ring_write(ring, 0); /* ref */
ring             6250 drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c 	amdgpu_ring_write(ring, 0); /* mask */
ring             6251 drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c 	amdgpu_ring_write(ring, 0x20); /* poll interval */
ring             6256 drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c 		amdgpu_ring_write(ring, PACKET3(PACKET3_PFP_SYNC_ME, 0));
ring             6257 drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c 		amdgpu_ring_write(ring, 0x0);
ring             6261 drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c static u64 gfx_v8_0_ring_get_wptr_compute(struct amdgpu_ring *ring)
ring             6263 drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c 	return ring->adev->wb.wb[ring->wptr_offs];
ring             6266 drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c static void gfx_v8_0_ring_set_wptr_compute(struct amdgpu_ring *ring)
ring             6268 drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c 	struct amdgpu_device *adev = ring->adev;
ring             6271 drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c 	adev->wb.wb[ring->wptr_offs] = lower_32_bits(ring->wptr);
ring             6272 drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c 	WDOORBELL32(ring->doorbell_index, lower_32_bits(ring->wptr));
ring             6275 drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c static void gfx_v8_0_ring_set_pipe_percent(struct amdgpu_ring *ring,
ring             6278 drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c 	struct amdgpu_device *adev = ring->adev;
ring             6282 drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c 	pipe_num = ring->me * adev->gfx.mec.num_pipe_per_mec + ring->pipe;
ring             6285 drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c 	if (ring->me > 0)
ring             6295 drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c 					    struct amdgpu_ring *ring,
ring             6303 drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c 	pipe = amdgpu_gfx_mec_queue_to_bit(adev, ring->me, ring->pipe, 0);
ring             6345 drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c 				      struct amdgpu_ring *ring,
ring             6352 drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c 	vi_srbm_select(adev, ring->me, ring->pipe, ring->queue, 0);
ring             6360 drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c static void gfx_v8_0_ring_set_priority_compute(struct amdgpu_ring *ring,
ring             6363 drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c 	struct amdgpu_device *adev = ring->adev;
ring             6366 drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c 	if (ring->funcs->type != AMDGPU_RING_TYPE_COMPUTE)
ring             6369 drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c 	gfx_v8_0_hqd_set_priority(adev, ring, acquire);
ring             6370 drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c 	gfx_v8_0_pipe_reserve_resources(adev, ring, acquire);
ring             6373 drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c static void gfx_v8_0_ring_emit_fence_compute(struct amdgpu_ring *ring,
ring             6381 drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c 	amdgpu_ring_write(ring, PACKET3(PACKET3_RELEASE_MEM, 5));
ring             6382 drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c 	amdgpu_ring_write(ring, (EOP_TCL1_ACTION_EN |
ring             6387 drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c 	amdgpu_ring_write(ring, DATA_SEL(write64bit ? 2 : 1) | INT_SEL(int_sel ? 2 : 0));
ring             6388 drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c 	amdgpu_ring_write(ring, addr & 0xfffffffc);
ring             6389 drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c 	amdgpu_ring_write(ring, upper_32_bits(addr));
ring             6390 drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c 	amdgpu_ring_write(ring, lower_32_bits(seq));
ring             6391 drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c 	amdgpu_ring_write(ring, upper_32_bits(seq));
ring             6394 drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c static void gfx_v8_0_ring_emit_fence_kiq(struct amdgpu_ring *ring, u64 addr,
ring             6401 drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c 	amdgpu_ring_write(ring, PACKET3(PACKET3_WRITE_DATA, 3));
ring             6402 drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c 	amdgpu_ring_write(ring, (WRITE_DATA_ENGINE_SEL(0) |
ring             6404 drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c 	amdgpu_ring_write(ring, lower_32_bits(addr));
ring             6405 drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c 	amdgpu_ring_write(ring, upper_32_bits(addr));
ring             6406 drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c 	amdgpu_ring_write(ring, lower_32_bits(seq));
ring             6410 drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c 		amdgpu_ring_write(ring, PACKET3(PACKET3_WRITE_DATA, 3));
ring             6411 drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c 		amdgpu_ring_write(ring, (WRITE_DATA_ENGINE_SEL(0) |
ring             6413 drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c 		amdgpu_ring_write(ring, mmCPC_INT_STATUS);
ring             6414 drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c 		amdgpu_ring_write(ring, 0);
ring             6415 drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c 		amdgpu_ring_write(ring, 0x20000000); /* src_id is 178 */
ring             6419 drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c static void gfx_v8_ring_emit_sb(struct amdgpu_ring *ring)
ring             6421 drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c 	amdgpu_ring_write(ring, PACKET3(PACKET3_SWITCH_BUFFER, 0));
ring             6422 drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c 	amdgpu_ring_write(ring, 0);
ring             6425 drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c static void gfx_v8_ring_emit_cntxcntl(struct amdgpu_ring *ring, uint32_t flags)
ring             6429 drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c 	if (amdgpu_sriov_vf(ring->adev))
ring             6430 drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c 		gfx_v8_0_ring_emit_ce_meta(ring);
ring             6434 drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c 		gfx_v8_0_ring_emit_vgt_flush(ring);
ring             6453 drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c 	amdgpu_ring_write(ring, PACKET3(PACKET3_CONTEXT_CONTROL, 1));
ring             6454 drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c 	amdgpu_ring_write(ring, dw2);
ring             6455 drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c 	amdgpu_ring_write(ring, 0);
ring             6458 drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c static unsigned gfx_v8_0_ring_emit_init_cond_exec(struct amdgpu_ring *ring)
ring             6462 drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c 	amdgpu_ring_write(ring, PACKET3(PACKET3_COND_EXEC, 3));
ring             6463 drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c 	amdgpu_ring_write(ring, lower_32_bits(ring->cond_exe_gpu_addr));
ring             6464 drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c 	amdgpu_ring_write(ring, upper_32_bits(ring->cond_exe_gpu_addr));
ring             6465 drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c 	amdgpu_ring_write(ring, 0); /* discard following DWs if *cond_exec_gpu_addr==0 */
ring             6466 drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c 	ret = ring->wptr & ring->buf_mask;
ring             6467 drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c 	amdgpu_ring_write(ring, 0x55aa55aa); /* patch dummy value later */
ring             6471 drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c static void gfx_v8_0_ring_emit_patch_cond_exec(struct amdgpu_ring *ring, unsigned offset)
ring             6475 drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c 	BUG_ON(offset > ring->buf_mask);
ring             6476 drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c 	BUG_ON(ring->ring[offset] != 0x55aa55aa);
ring             6478 drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c 	cur = (ring->wptr & ring->buf_mask) - 1;
ring             6480 drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c 		ring->ring[offset] = cur - offset;
ring             6482 drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c 		ring->ring[offset] = (ring->ring_size >> 2) - offset + cur;
ring             6485 drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c static void gfx_v8_0_ring_emit_rreg(struct amdgpu_ring *ring, uint32_t reg)
ring             6487 drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c 	struct amdgpu_device *adev = ring->adev;
ring             6489 drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c 	amdgpu_ring_write(ring, PACKET3(PACKET3_COPY_DATA, 4));
ring             6490 drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c 	amdgpu_ring_write(ring, 0 |	/* src: register*/
ring             6493 drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c 	amdgpu_ring_write(ring, reg);
ring             6494 drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c 	amdgpu_ring_write(ring, 0);
ring             6495 drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c 	amdgpu_ring_write(ring, lower_32_bits(adev->wb.gpu_addr +
ring             6497 drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c 	amdgpu_ring_write(ring, upper_32_bits(adev->wb.gpu_addr +
ring             6501 drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c static void gfx_v8_0_ring_emit_wreg(struct amdgpu_ring *ring, uint32_t reg,
ring             6506 drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c 	switch (ring->funcs->type) {
ring             6518 drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c 	amdgpu_ring_write(ring, PACKET3(PACKET3_WRITE_DATA, 3));
ring             6519 drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c 	amdgpu_ring_write(ring, cmd);
ring             6520 drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c 	amdgpu_ring_write(ring, reg);
ring             6521 drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c 	amdgpu_ring_write(ring, 0);
ring             6522 drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c 	amdgpu_ring_write(ring, val);
ring             6525 drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c static void gfx_v8_0_ring_soft_recovery(struct amdgpu_ring *ring, unsigned vmid)
ring             6527 drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c 	struct amdgpu_device *adev = ring->adev;
ring             6733 drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c 	struct amdgpu_ring *ring;
ring             6747 drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c 			ring = &adev->gfx.compute_ring[i];
ring             6751 drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c 			if ((ring->me == me_id) && (ring->pipe == pipe_id) && (ring->queue == queue_id))
ring             6752 drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c 				amdgpu_fence_process(ring);
ring             6763 drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c 	struct amdgpu_ring *ring;
ring             6777 drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c 			ring = &adev->gfx.compute_ring[i];
ring             6778 drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c 			if (ring->me == me_id && ring->pipe == pipe_id &&
ring             6779 drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c 			    ring->queue == queue_id)
ring             6780 drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c 				drm_sched_fault(&ring->sched);
ring             7036 drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c 	adev->gfx.kiq.ring.funcs = &gfx_v8_0_ring_funcs_kiq;
ring             7202 drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c static void gfx_v8_0_ring_emit_ce_meta(struct amdgpu_ring *ring)
ring             7211 drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c 	if (ring->adev->virt.chained_ib_support) {
ring             7212 drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c 		ce_payload_addr = amdgpu_csa_vaddr(ring->adev) +
ring             7216 drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c 		ce_payload_addr = amdgpu_csa_vaddr(ring->adev) +
ring             7221 drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c 	amdgpu_ring_write(ring, PACKET3(PACKET3_WRITE_DATA, cnt_ce));
ring             7222 drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c 	amdgpu_ring_write(ring, (WRITE_DATA_ENGINE_SEL(2) |
ring             7226 drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c 	amdgpu_ring_write(ring, lower_32_bits(ce_payload_addr));
ring             7227 drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c 	amdgpu_ring_write(ring, upper_32_bits(ce_payload_addr));
ring             7228 drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c 	amdgpu_ring_write_multiple(ring, (void *)&ce_payload, cnt_ce - 2);
ring             7231 drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c static void gfx_v8_0_ring_emit_de_meta(struct amdgpu_ring *ring)
ring             7240 drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c 	csa_addr = amdgpu_csa_vaddr(ring->adev);
ring             7242 drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c 	if (ring->adev->virt.chained_ib_support) {
ring             7254 drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c 	amdgpu_ring_write(ring, PACKET3(PACKET3_WRITE_DATA, cnt_de));
ring             7255 drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c 	amdgpu_ring_write(ring, (WRITE_DATA_ENGINE_SEL(1) |
ring             7259 drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c 	amdgpu_ring_write(ring, lower_32_bits(de_payload_addr));
ring             7260 drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c 	amdgpu_ring_write(ring, upper_32_bits(de_payload_addr));
ring             7261 drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c 	amdgpu_ring_write_multiple(ring, (void *)&de_payload, cnt_de - 2);
ring              733 drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c static void gfx_v9_0_ring_emit_de_meta(struct amdgpu_ring *ring);
ring              734 drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c static u64 gfx_v9_0_ring_get_rptr_compute(struct amdgpu_ring *ring);
ring              805 drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c static void gfx_v9_0_write_data_to_reg(struct amdgpu_ring *ring, int eng_sel,
ring              808 drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c 	amdgpu_ring_write(ring, PACKET3(PACKET3_WRITE_DATA, 3));
ring              809 drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c 	amdgpu_ring_write(ring, WRITE_DATA_ENGINE_SEL(eng_sel) |
ring              812 drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c 	amdgpu_ring_write(ring, reg);
ring              813 drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c 	amdgpu_ring_write(ring, 0);
ring              814 drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c 	amdgpu_ring_write(ring, val);
ring              817 drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c static void gfx_v9_0_wait_reg_mem(struct amdgpu_ring *ring, int eng_sel,
ring              822 drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c 	amdgpu_ring_write(ring, PACKET3(PACKET3_WAIT_REG_MEM, 5));
ring              823 drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c 	amdgpu_ring_write(ring,
ring              832 drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c 	amdgpu_ring_write(ring, addr0);
ring              833 drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c 	amdgpu_ring_write(ring, addr1);
ring              834 drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c 	amdgpu_ring_write(ring, ref);
ring              835 drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c 	amdgpu_ring_write(ring, mask);
ring              836 drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c 	amdgpu_ring_write(ring, inv); /* poll interval */
ring              839 drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c static int gfx_v9_0_ring_test_ring(struct amdgpu_ring *ring)
ring              841 drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c 	struct amdgpu_device *adev = ring->adev;
ring              852 drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c 	r = amdgpu_ring_alloc(ring, 3);
ring              856 drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c 	amdgpu_ring_write(ring, PACKET3(PACKET3_SET_UCONFIG_REG, 1));
ring              857 drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c 	amdgpu_ring_write(ring, (scratch - PACKET3_SET_UCONFIG_REG_START));
ring              858 drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c 	amdgpu_ring_write(ring, 0xDEADBEEF);
ring              859 drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c 	amdgpu_ring_commit(ring);
ring              876 drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c static int gfx_v9_0_ring_test_ib(struct amdgpu_ring *ring, long timeout)
ring              878 drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c 	struct amdgpu_device *adev = ring->adev;
ring              905 drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c 	r = amdgpu_ib_schedule(ring, 1, &ib, NULL, &f);
ring             2079 drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c 	struct amdgpu_ring *ring = &adev->gfx.gfx_ring[0];
ring             2125 drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c 	r = amdgpu_ring_alloc(ring, 17);
ring             2128 drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c 			  ring->name, r);
ring             2132 drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c 	gfx_v9_0_write_data_to_reg(ring, 0, false,
ring             2137 drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c 	amdgpu_ring_write(ring, PACKET3(PACKET3_DMA_DATA, 5));
ring             2138 drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c 	amdgpu_ring_write(ring, (PACKET3_DMA_DATA_CP_SYNC |
ring             2141 drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c 	amdgpu_ring_write(ring, 0);
ring             2142 drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c 	amdgpu_ring_write(ring, 0);
ring             2143 drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c 	amdgpu_ring_write(ring, adev->gfx.ngg.gds_reserve_addr);
ring             2144 drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c 	amdgpu_ring_write(ring, 0);
ring             2145 drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c 	amdgpu_ring_write(ring, PACKET3_DMA_DATA_CMD_RAW_WAIT |
ring             2148 drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c 	gfx_v9_0_write_data_to_reg(ring, 0, false,
ring             2151 drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c 	amdgpu_ring_commit(ring);
ring             2161 drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c 	struct amdgpu_ring *ring = &adev->gfx.compute_ring[ring_id];
ring             2163 drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c 	ring = &adev->gfx.compute_ring[ring_id];
ring             2166 drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c 	ring->me = mec + 1;
ring             2167 drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c 	ring->pipe = pipe;
ring             2168 drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c 	ring->queue = queue;
ring             2170 drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c 	ring->ring_obj = NULL;
ring             2171 drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c 	ring->use_doorbell = true;
ring             2172 drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c 	ring->doorbell_index = (adev->doorbell_index.mec_ring0 + ring_id) << 1;
ring             2173 drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c 	ring->eop_gpu_addr = adev->gfx.mec.hpd_eop_gpu_addr
ring             2175 drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c 	sprintf(ring->name, "comp_%d.%d.%d", ring->me, ring->pipe, ring->queue);
ring             2178 drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c 		+ ((ring->me - 1) * adev->gfx.mec.num_pipe_per_mec)
ring             2179 drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c 		+ ring->pipe;
ring             2182 drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c 	r = amdgpu_ring_init(adev, ring, 1024,
ring             2194 drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c 	struct amdgpu_ring *ring;
ring             2268 drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c 		ring = &adev->gfx.gfx_ring[i];
ring             2269 drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c 		ring->ring_obj = NULL;
ring             2271 drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c 			sprintf(ring->name, "gfx");
ring             2273 drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c 			sprintf(ring->name, "gfx_%d", i);
ring             2274 drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c 		ring->use_doorbell = true;
ring             2275 drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c 		ring->doorbell_index = adev->doorbell_index.gfx_ring0 << 1;
ring             2276 drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c 		r = amdgpu_ring_init(adev, ring, 1024,
ring             2308 drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c 	r = amdgpu_gfx_kiq_init_ring(adev, &kiq->ring, &kiq->irq);
ring             2356 drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c 	amdgpu_gfx_kiq_free_ring(&adev->gfx.kiq.ring, &adev->gfx.kiq.irq);
ring             3139 drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c 	struct amdgpu_ring *ring = &adev->gfx.gfx_ring[0];
ring             3150 drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c 	r = amdgpu_ring_alloc(ring, gfx_v9_0_get_csb_size(adev) + 4 + 3);
ring             3156 drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c 	amdgpu_ring_write(ring, PACKET3(PACKET3_PREAMBLE_CNTL, 0));
ring             3157 drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c 	amdgpu_ring_write(ring, PACKET3_PREAMBLE_BEGIN_CLEAR_STATE);
ring             3159 drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c 	amdgpu_ring_write(ring, PACKET3(PACKET3_CONTEXT_CONTROL, 1));
ring             3160 drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c 	amdgpu_ring_write(ring, 0x80000000);
ring             3161 drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c 	amdgpu_ring_write(ring, 0x80000000);
ring             3166 drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c 				amdgpu_ring_write(ring,
ring             3169 drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c 				amdgpu_ring_write(ring,
ring             3172 drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c 					amdgpu_ring_write(ring, ext->extent[i]);
ring             3177 drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c 	amdgpu_ring_write(ring, PACKET3(PACKET3_PREAMBLE_CNTL, 0));
ring             3178 drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c 	amdgpu_ring_write(ring, PACKET3_PREAMBLE_END_CLEAR_STATE);
ring             3180 drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c 	amdgpu_ring_write(ring, PACKET3(PACKET3_CLEAR_STATE, 0));
ring             3181 drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c 	amdgpu_ring_write(ring, 0);
ring             3183 drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c 	amdgpu_ring_write(ring, PACKET3(PACKET3_SET_BASE, 2));
ring             3184 drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c 	amdgpu_ring_write(ring, PACKET3_BASE_INDEX(CE_PARTITION_BASE));
ring             3185 drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c 	amdgpu_ring_write(ring, 0x8000);
ring             3186 drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c 	amdgpu_ring_write(ring, 0x8000);
ring             3188 drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c 	amdgpu_ring_write(ring, PACKET3(PACKET3_SET_UCONFIG_REG,1));
ring             3191 drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c 	amdgpu_ring_write(ring, tmp);
ring             3192 drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c 	amdgpu_ring_write(ring, 0);
ring             3194 drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c 	amdgpu_ring_commit(ring);
ring             3201 drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c 	struct amdgpu_ring *ring;
ring             3213 drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c 	ring = &adev->gfx.gfx_ring[0];
ring             3214 drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c 	rb_bufsz = order_base_2(ring->ring_size / 8);
ring             3223 drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c 	ring->wptr = 0;
ring             3224 drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c 	WREG32_SOC15(GC, 0, mmCP_RB0_WPTR, lower_32_bits(ring->wptr));
ring             3225 drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c 	WREG32_SOC15(GC, 0, mmCP_RB0_WPTR_HI, upper_32_bits(ring->wptr));
ring             3228 drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c 	rptr_addr = adev->wb.gpu_addr + (ring->rptr_offs * 4);
ring             3232 drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c 	wptr_gpu_addr = adev->wb.gpu_addr + (ring->wptr_offs * 4);
ring             3239 drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c 	rb_addr = ring->gpu_addr >> 8;
ring             3244 drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c 	if (ring->use_doorbell) {
ring             3246 drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c 				    DOORBELL_OFFSET, ring->doorbell_index);
ring             3255 drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c 			DOORBELL_RANGE_LOWER, ring->doorbell_index);
ring             3264 drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c 	ring->sched.ready = true;
ring             3280 drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c 		adev->gfx.kiq.ring.sched.ready = false;
ring             3328 drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c static void gfx_v9_0_kiq_setting(struct amdgpu_ring *ring)
ring             3331 drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c 	struct amdgpu_device *adev = ring->adev;
ring             3336 drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c 	tmp |= (ring->me << 5) | (ring->pipe << 3) | (ring->queue);
ring             3344 drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c 	struct amdgpu_ring *kiq_ring = &adev->gfx.kiq.ring;
ring             3380 drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c 		struct amdgpu_ring *ring = &adev->gfx.compute_ring[i];
ring             3381 drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c 		uint64_t mqd_addr = amdgpu_bo_gpu_offset(ring->mqd_obj);
ring             3382 drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c 		uint64_t wptr_addr = adev->wb.gpu_addr + (ring->wptr_offs * 4);
ring             3389 drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c 				  PACKET3_MAP_QUEUES_QUEUE(ring->queue) |
ring             3390 drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c 				  PACKET3_MAP_QUEUES_PIPE(ring->pipe) |
ring             3391 drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c 				  PACKET3_MAP_QUEUES_ME((ring->me == 1 ? 0 : 1)) |
ring             3396 drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c 		amdgpu_ring_write(kiq_ring, PACKET3_MAP_QUEUES_DOORBELL_OFFSET(ring->doorbell_index));
ring             3410 drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c static int gfx_v9_0_mqd_init(struct amdgpu_ring *ring)
ring             3412 drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c 	struct amdgpu_device *adev = ring->adev;
ring             3413 drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c 	struct v9_mqd *mqd = ring->mqd_ptr;
ring             3430 drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c 		lower_32_bits(ring->mqd_gpu_addr
ring             3433 drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c 		upper_32_bits(ring->mqd_gpu_addr
ring             3436 drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c 	eop_base_addr = ring->eop_gpu_addr >> 8;
ring             3450 drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c 	if (ring->use_doorbell) {
ring             3452 drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c 				    DOORBELL_OFFSET, ring->doorbell_index);
ring             3467 drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c 	ring->wptr = 0;
ring             3474 drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c 	mqd->cp_mqd_base_addr_lo = ring->mqd_gpu_addr & 0xfffffffc;
ring             3475 drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c 	mqd->cp_mqd_base_addr_hi = upper_32_bits(ring->mqd_gpu_addr);
ring             3483 drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c 	hqd_gpu_addr = ring->gpu_addr >> 8;
ring             3490 drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c 			    (order_base_2(ring->ring_size / 4) - 1));
ring             3503 drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c 	wb_gpu_addr = adev->wb.gpu_addr + (ring->rptr_offs * 4);
ring             3509 drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c 	wb_gpu_addr = adev->wb.gpu_addr + (ring->wptr_offs * 4);
ring             3515 drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c 	if (ring->use_doorbell) {
ring             3518 drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c 				DOORBELL_OFFSET, ring->doorbell_index);
ring             3531 drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c 	ring->wptr = 0;
ring             3552 drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c static int gfx_v9_0_kiq_init_register(struct amdgpu_ring *ring)
ring             3554 drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c 	struct amdgpu_device *adev = ring->adev;
ring             3555 drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c 	struct v9_mqd *mqd = ring->mqd_ptr;
ring             3625 drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c 	if (ring->use_doorbell) {
ring             3651 drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c 	if (ring->use_doorbell)
ring             3657 drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c static int gfx_v9_0_kiq_fini_register(struct amdgpu_ring *ring)
ring             3659 drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c 	struct amdgpu_device *adev = ring->adev;
ring             3696 drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c static int gfx_v9_0_kiq_init_queue(struct amdgpu_ring *ring)
ring             3698 drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c 	struct amdgpu_device *adev = ring->adev;
ring             3699 drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c 	struct v9_mqd *mqd = ring->mqd_ptr;
ring             3702 drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c 	gfx_v9_0_kiq_setting(ring);
ring             3710 drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c 		ring->wptr = 0;
ring             3711 drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c 		amdgpu_ring_clear_ring(ring);
ring             3714 drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c 		soc15_grbm_select(adev, ring->me, ring->pipe, ring->queue, 0);
ring             3715 drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c 		gfx_v9_0_kiq_init_register(ring);
ring             3723 drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c 		soc15_grbm_select(adev, ring->me, ring->pipe, ring->queue, 0);
ring             3724 drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c 		gfx_v9_0_mqd_init(ring);
ring             3725 drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c 		gfx_v9_0_kiq_init_register(ring);
ring             3736 drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c static int gfx_v9_0_kcq_init_queue(struct amdgpu_ring *ring)
ring             3738 drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c 	struct amdgpu_device *adev = ring->adev;
ring             3739 drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c 	struct v9_mqd *mqd = ring->mqd_ptr;
ring             3740 drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c 	int mqd_idx = ring - &adev->gfx.compute_ring[0];
ring             3747 drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c 		soc15_grbm_select(adev, ring->me, ring->pipe, ring->queue, 0);
ring             3748 drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c 		gfx_v9_0_mqd_init(ring);
ring             3760 drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c 		ring->wptr = 0;
ring             3761 drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c 		atomic64_set((atomic64_t *)&adev->wb.wb[ring->wptr_offs], 0);
ring             3762 drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c 		amdgpu_ring_clear_ring(ring);
ring             3764 drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c 		amdgpu_ring_clear_ring(ring);
ring             3772 drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c 	struct amdgpu_ring *ring;
ring             3775 drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c 	ring = &adev->gfx.kiq.ring;
ring             3777 drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c 	r = amdgpu_bo_reserve(ring->mqd_obj, false);
ring             3781 drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c 	r = amdgpu_bo_kmap(ring->mqd_obj, (void **)&ring->mqd_ptr);
ring             3785 drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c 	gfx_v9_0_kiq_init_queue(ring);
ring             3786 drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c 	amdgpu_bo_kunmap(ring->mqd_obj);
ring             3787 drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c 	ring->mqd_ptr = NULL;
ring             3788 drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c 	amdgpu_bo_unreserve(ring->mqd_obj);
ring             3789 drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c 	ring->sched.ready = true;
ring             3795 drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c 	struct amdgpu_ring *ring = NULL;
ring             3801 drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c 		ring = &adev->gfx.compute_ring[i];
ring             3803 drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c 		r = amdgpu_bo_reserve(ring->mqd_obj, false);
ring             3806 drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c 		r = amdgpu_bo_kmap(ring->mqd_obj, (void **)&ring->mqd_ptr);
ring             3808 drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c 			r = gfx_v9_0_kcq_init_queue(ring);
ring             3809 drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c 			amdgpu_bo_kunmap(ring->mqd_obj);
ring             3810 drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c 			ring->mqd_ptr = NULL;
ring             3812 drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c 		amdgpu_bo_unreserve(ring->mqd_obj);
ring             3825 drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c 	struct amdgpu_ring *ring;
ring             3858 drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c 		ring = &adev->gfx.gfx_ring[0];
ring             3859 drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c 		r = amdgpu_ring_test_helper(ring);
ring             3865 drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c 		ring = &adev->gfx.compute_ring[i];
ring             3866 drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c 		amdgpu_ring_test_helper(ring);
ring             3915 drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c 	struct amdgpu_ring *kiq_ring = &adev->gfx.kiq.ring;
ring             3922 drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c 		struct amdgpu_ring *ring = &adev->gfx.compute_ring[i];
ring             3930 drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c 		amdgpu_ring_write(kiq_ring, PACKET3_UNMAP_QUEUES_DOORBELL_OFFSET0(ring->doorbell_index));
ring             3969 drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c 		soc15_grbm_select(adev, adev->gfx.kiq.ring.me,
ring             3970 drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c 				adev->gfx.kiq.ring.pipe,
ring             3971 drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c 				adev->gfx.kiq.ring.queue, 0);
ring             3972 drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c 		gfx_v9_0_kiq_fini_register(&adev->gfx.kiq.ring);
ring             4096 drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c static void gfx_v9_0_ring_emit_gds_switch(struct amdgpu_ring *ring,
ring             4102 drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c 	struct amdgpu_device *adev = ring->adev;
ring             4105 drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c 	gfx_v9_0_write_data_to_reg(ring, 0, false,
ring             4110 drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c 	gfx_v9_0_write_data_to_reg(ring, 0, false,
ring             4115 drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c 	gfx_v9_0_write_data_to_reg(ring, 0, false,
ring             4120 drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c 	gfx_v9_0_write_data_to_reg(ring, 0, false,
ring             4222 drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c 	struct amdgpu_ring *ring = &adev->gfx.compute_ring[0];
ring             4225 drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c 	r = amdgpu_ring_alloc(ring, 7);
ring             4228 drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c 			ring->name, r);
ring             4235 drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c 	amdgpu_ring_write(ring, PACKET3(PACKET3_DMA_DATA, 5));
ring             4236 drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c 	amdgpu_ring_write(ring, (PACKET3_DMA_DATA_CP_SYNC |
ring             4240 drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c 	amdgpu_ring_write(ring, 0);
ring             4241 drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c 	amdgpu_ring_write(ring, 0);
ring             4242 drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c 	amdgpu_ring_write(ring, 0);
ring             4243 drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c 	amdgpu_ring_write(ring, 0);
ring             4244 drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c 	amdgpu_ring_write(ring, PACKET3_DMA_DATA_CMD_RAW_WAIT |
ring             4247 drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c 	amdgpu_ring_commit(ring);
ring             4250 drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c 		if (ring->wptr == gfx_v9_0_ring_get_rptr_compute(ring))
ring             4265 drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c 	struct amdgpu_ring *ring = &adev->gfx.compute_ring[0];
ring             4277 drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c 	if (!ring->sched.ready)
ring             4365 drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c 	r = amdgpu_ib_schedule(ring, 1, &ib, NULL, &f);
ring             4947 drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c static u64 gfx_v9_0_ring_get_rptr_gfx(struct amdgpu_ring *ring)
ring             4949 drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c 	return ring->adev->wb.wb[ring->rptr_offs]; /* gfx9 is 32bit rptr*/
ring             4952 drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c static u64 gfx_v9_0_ring_get_wptr_gfx(struct amdgpu_ring *ring)
ring             4954 drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c 	struct amdgpu_device *adev = ring->adev;
ring             4958 drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c 	if (ring->use_doorbell) {
ring             4959 drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c 		wptr = atomic64_read((atomic64_t *)&adev->wb.wb[ring->wptr_offs]);
ring             4968 drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c static void gfx_v9_0_ring_set_wptr_gfx(struct amdgpu_ring *ring)
ring             4970 drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c 	struct amdgpu_device *adev = ring->adev;
ring             4972 drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c 	if (ring->use_doorbell) {
ring             4974 drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c 		atomic64_set((atomic64_t*)&adev->wb.wb[ring->wptr_offs], ring->wptr);
ring             4975 drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c 		WDOORBELL64(ring->doorbell_index, ring->wptr);
ring             4977 drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c 		WREG32_SOC15(GC, 0, mmCP_RB0_WPTR, lower_32_bits(ring->wptr));
ring             4978 drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c 		WREG32_SOC15(GC, 0, mmCP_RB0_WPTR_HI, upper_32_bits(ring->wptr));
ring             4982 drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c static void gfx_v9_0_ring_emit_hdp_flush(struct amdgpu_ring *ring)
ring             4984 drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c 	struct amdgpu_device *adev = ring->adev;
ring             4988 drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c 	if (ring->funcs->type == AMDGPU_RING_TYPE_COMPUTE) {
ring             4989 drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c 		switch (ring->me) {
ring             4991 drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c 			ref_and_mask = nbio_hf_reg->ref_and_mask_cp2 << ring->pipe;
ring             4994 drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c 			ref_and_mask = nbio_hf_reg->ref_and_mask_cp6 << ring->pipe;
ring             5005 drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c 	gfx_v9_0_wait_reg_mem(ring, reg_mem_engine, 0, 1,
ring             5011 drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c static void gfx_v9_0_ring_emit_ib_gfx(struct amdgpu_ring *ring,
ring             5026 drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c 	if (amdgpu_sriov_vf(ring->adev) && (ib->flags & AMDGPU_IB_FLAG_PREEMPT)) {
ring             5030 drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c 			gfx_v9_0_ring_emit_de_meta(ring);
ring             5033 drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c 	amdgpu_ring_write(ring, header);
ring             5035 drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c 	amdgpu_ring_write(ring,
ring             5040 drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c 	amdgpu_ring_write(ring, upper_32_bits(ib->gpu_addr));
ring             5041 drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c 	amdgpu_ring_write(ring, control);
ring             5044 drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c static void gfx_v9_0_ring_emit_ib_compute(struct amdgpu_ring *ring,
ring             5063 drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c 		amdgpu_ring_write(ring, PACKET3(PACKET3_SET_CONFIG_REG, 1));
ring             5064 drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c 		amdgpu_ring_write(ring, mmGDS_COMPUTE_MAX_WAVE_ID);
ring             5065 drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c 		amdgpu_ring_write(ring, ring->adev->gds.gds_compute_max_wave_id);
ring             5068 drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c 	amdgpu_ring_write(ring, PACKET3(PACKET3_INDIRECT_BUFFER, 2));
ring             5070 drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c 	amdgpu_ring_write(ring,
ring             5075 drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c 	amdgpu_ring_write(ring, upper_32_bits(ib->gpu_addr));
ring             5076 drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c 	amdgpu_ring_write(ring, control);
ring             5079 drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c static void gfx_v9_0_ring_emit_fence(struct amdgpu_ring *ring, u64 addr,
ring             5087 drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c 	amdgpu_ring_write(ring, PACKET3(PACKET3_RELEASE_MEM, 6));
ring             5088 drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c 	amdgpu_ring_write(ring, ((writeback ? (EOP_TC_WB_ACTION_EN |
ring             5096 drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c 	amdgpu_ring_write(ring, DATA_SEL(write64bit ? 2 : 1) | INT_SEL(int_sel ? 2 : 0));
ring             5106 drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c 	amdgpu_ring_write(ring, lower_32_bits(addr));
ring             5107 drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c 	amdgpu_ring_write(ring, upper_32_bits(addr));
ring             5108 drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c 	amdgpu_ring_write(ring, lower_32_bits(seq));
ring             5109 drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c 	amdgpu_ring_write(ring, upper_32_bits(seq));
ring             5110 drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c 	amdgpu_ring_write(ring, 0);
ring             5113 drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c static void gfx_v9_0_ring_emit_pipeline_sync(struct amdgpu_ring *ring)
ring             5115 drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c 	int usepfp = (ring->funcs->type == AMDGPU_RING_TYPE_GFX);
ring             5116 drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c 	uint32_t seq = ring->fence_drv.sync_seq;
ring             5117 drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c 	uint64_t addr = ring->fence_drv.gpu_addr;
ring             5119 drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c 	gfx_v9_0_wait_reg_mem(ring, usepfp, 1, 0,
ring             5124 drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c static void gfx_v9_0_ring_emit_vm_flush(struct amdgpu_ring *ring,
ring             5127 drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c 	amdgpu_gmc_emit_flush_gpu_tlb(ring, vmid, pd_addr);
ring             5130 drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c 	if (ring->funcs->type == AMDGPU_RING_TYPE_GFX) {
ring             5132 drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c 		amdgpu_ring_write(ring, PACKET3(PACKET3_PFP_SYNC_ME, 0));
ring             5133 drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c 		amdgpu_ring_write(ring, 0x0);
ring             5137 drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c static u64 gfx_v9_0_ring_get_rptr_compute(struct amdgpu_ring *ring)
ring             5139 drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c 	return ring->adev->wb.wb[ring->rptr_offs]; /* gfx9 hardware is 32bit rptr */
ring             5142 drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c static u64 gfx_v9_0_ring_get_wptr_compute(struct amdgpu_ring *ring)
ring             5147 drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c 	if (ring->use_doorbell)
ring             5148 drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c 		wptr = atomic64_read((atomic64_t *)&ring->adev->wb.wb[ring->wptr_offs]);
ring             5154 drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c static void gfx_v9_0_ring_set_pipe_percent(struct amdgpu_ring *ring,
ring             5157 drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c 	struct amdgpu_device *adev = ring->adev;
ring             5161 drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c 	pipe_num = ring->me * adev->gfx.mec.num_pipe_per_mec + ring->pipe;
ring             5164 drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c 	if (ring->me > 0)
ring             5174 drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c 					    struct amdgpu_ring *ring,
ring             5182 drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c 	pipe = amdgpu_gfx_mec_queue_to_bit(adev, ring->me, ring->pipe, 0);
ring             5224 drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c 				      struct amdgpu_ring *ring,
ring             5231 drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c 	soc15_grbm_select(adev, ring->me, ring->pipe, ring->queue, 0);
ring             5240 drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c static void gfx_v9_0_ring_set_priority_compute(struct amdgpu_ring *ring,
ring             5243 drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c 	struct amdgpu_device *adev = ring->adev;
ring             5246 drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c 	if (ring->funcs->type != AMDGPU_RING_TYPE_COMPUTE)
ring             5249 drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c 	gfx_v9_0_hqd_set_priority(adev, ring, acquire);
ring             5250 drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c 	gfx_v9_0_pipe_reserve_resources(adev, ring, acquire);
ring             5253 drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c static void gfx_v9_0_ring_set_wptr_compute(struct amdgpu_ring *ring)
ring             5255 drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c 	struct amdgpu_device *adev = ring->adev;
ring             5258 drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c 	if (ring->use_doorbell) {
ring             5259 drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c 		atomic64_set((atomic64_t*)&adev->wb.wb[ring->wptr_offs], ring->wptr);
ring             5260 drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c 		WDOORBELL64(ring->doorbell_index, ring->wptr);
ring             5266 drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c static void gfx_v9_0_ring_emit_fence_kiq(struct amdgpu_ring *ring, u64 addr,
ring             5269 drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c 	struct amdgpu_device *adev = ring->adev;
ring             5275 drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c 	amdgpu_ring_write(ring, PACKET3(PACKET3_WRITE_DATA, 3));
ring             5276 drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c 	amdgpu_ring_write(ring, (WRITE_DATA_ENGINE_SEL(0) |
ring             5278 drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c 	amdgpu_ring_write(ring, lower_32_bits(addr));
ring             5279 drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c 	amdgpu_ring_write(ring, upper_32_bits(addr));
ring             5280 drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c 	amdgpu_ring_write(ring, lower_32_bits(seq));
ring             5284 drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c 		amdgpu_ring_write(ring, PACKET3(PACKET3_WRITE_DATA, 3));
ring             5285 drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c 		amdgpu_ring_write(ring, (WRITE_DATA_ENGINE_SEL(0) |
ring             5287 drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c 		amdgpu_ring_write(ring, SOC15_REG_OFFSET(GC, 0, mmCPC_INT_STATUS));
ring             5288 drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c 		amdgpu_ring_write(ring, 0);
ring             5289 drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c 		amdgpu_ring_write(ring, 0x20000000); /* src_id is 178 */
ring             5293 drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c static void gfx_v9_ring_emit_sb(struct amdgpu_ring *ring)
ring             5295 drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c 	amdgpu_ring_write(ring, PACKET3(PACKET3_SWITCH_BUFFER, 0));
ring             5296 drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c 	amdgpu_ring_write(ring, 0);
ring             5299 drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c static void gfx_v9_0_ring_emit_ce_meta(struct amdgpu_ring *ring)
ring             5306 drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c 	csa_addr = amdgpu_csa_vaddr(ring->adev);
ring             5308 drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c 	amdgpu_ring_write(ring, PACKET3(PACKET3_WRITE_DATA, cnt));
ring             5309 drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c 	amdgpu_ring_write(ring, (WRITE_DATA_ENGINE_SEL(2) |
ring             5313 drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c 	amdgpu_ring_write(ring, lower_32_bits(csa_addr + offsetof(struct v9_gfx_meta_data, ce_payload)));
ring             5314 drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c 	amdgpu_ring_write(ring, upper_32_bits(csa_addr + offsetof(struct v9_gfx_meta_data, ce_payload)));
ring             5315 drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c 	amdgpu_ring_write_multiple(ring, (void *)&ce_payload, sizeof(ce_payload) >> 2);
ring             5318 drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c static void gfx_v9_0_ring_emit_de_meta(struct amdgpu_ring *ring)
ring             5324 drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c 	csa_addr = amdgpu_csa_vaddr(ring->adev);
ring             5330 drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c 	amdgpu_ring_write(ring, PACKET3(PACKET3_WRITE_DATA, cnt));
ring             5331 drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c 	amdgpu_ring_write(ring, (WRITE_DATA_ENGINE_SEL(1) |
ring             5335 drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c 	amdgpu_ring_write(ring, lower_32_bits(csa_addr + offsetof(struct v9_gfx_meta_data, de_payload)));
ring             5336 drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c 	amdgpu_ring_write(ring, upper_32_bits(csa_addr + offsetof(struct v9_gfx_meta_data, de_payload)));
ring             5337 drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c 	amdgpu_ring_write_multiple(ring, (void *)&de_payload, sizeof(de_payload) >> 2);
ring             5340 drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c static void gfx_v9_0_ring_emit_tmz(struct amdgpu_ring *ring, bool start)
ring             5342 drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c 	amdgpu_ring_write(ring, PACKET3(PACKET3_FRAME_CONTROL, 0));
ring             5343 drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c 	amdgpu_ring_write(ring, FRAME_CMD(start ? 0 : 1)); /* frame_end */
ring             5346 drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c static void gfx_v9_ring_emit_cntxcntl(struct amdgpu_ring *ring, uint32_t flags)
ring             5350 drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c 	if (amdgpu_sriov_vf(ring->adev))
ring             5351 drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c 		gfx_v9_0_ring_emit_ce_meta(ring);
ring             5353 drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c 	gfx_v9_0_ring_emit_tmz(ring, true);
ring             5375 drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c 	amdgpu_ring_write(ring, PACKET3(PACKET3_CONTEXT_CONTROL, 1));
ring             5376 drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c 	amdgpu_ring_write(ring, dw2);
ring             5377 drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c 	amdgpu_ring_write(ring, 0);
ring             5380 drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c static unsigned gfx_v9_0_ring_emit_init_cond_exec(struct amdgpu_ring *ring)
ring             5383 drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c 	amdgpu_ring_write(ring, PACKET3(PACKET3_COND_EXEC, 3));
ring             5384 drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c 	amdgpu_ring_write(ring, lower_32_bits(ring->cond_exe_gpu_addr));
ring             5385 drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c 	amdgpu_ring_write(ring, upper_32_bits(ring->cond_exe_gpu_addr));
ring             5386 drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c 	amdgpu_ring_write(ring, 0); /* discard following DWs if *cond_exec_gpu_addr==0 */
ring             5387 drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c 	ret = ring->wptr & ring->buf_mask;
ring             5388 drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c 	amdgpu_ring_write(ring, 0x55aa55aa); /* patch dummy value later */
ring             5392 drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c static void gfx_v9_0_ring_emit_patch_cond_exec(struct amdgpu_ring *ring, unsigned offset)
ring             5395 drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c 	BUG_ON(offset > ring->buf_mask);
ring             5396 drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c 	BUG_ON(ring->ring[offset] != 0x55aa55aa);
ring             5398 drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c 	cur = (ring->wptr & ring->buf_mask) - 1;
ring             5400 drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c 		ring->ring[offset] = cur - offset;
ring             5402 drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c 		ring->ring[offset] = (ring->ring_size>>2) - offset + cur;
ring             5405 drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c static void gfx_v9_0_ring_emit_rreg(struct amdgpu_ring *ring, uint32_t reg)
ring             5407 drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c 	struct amdgpu_device *adev = ring->adev;
ring             5409 drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c 	amdgpu_ring_write(ring, PACKET3(PACKET3_COPY_DATA, 4));
ring             5410 drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c 	amdgpu_ring_write(ring, 0 |	/* src: register*/
ring             5413 drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c 	amdgpu_ring_write(ring, reg);
ring             5414 drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c 	amdgpu_ring_write(ring, 0);
ring             5415 drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c 	amdgpu_ring_write(ring, lower_32_bits(adev->wb.gpu_addr +
ring             5417 drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c 	amdgpu_ring_write(ring, upper_32_bits(adev->wb.gpu_addr +
ring             5421 drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c static void gfx_v9_0_ring_emit_wreg(struct amdgpu_ring *ring, uint32_t reg,
ring             5426 drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c 	switch (ring->funcs->type) {
ring             5437 drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c 	amdgpu_ring_write(ring, PACKET3(PACKET3_WRITE_DATA, 3));
ring             5438 drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c 	amdgpu_ring_write(ring, cmd);
ring             5439 drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c 	amdgpu_ring_write(ring, reg);
ring             5440 drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c 	amdgpu_ring_write(ring, 0);
ring             5441 drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c 	amdgpu_ring_write(ring, val);
ring             5444 drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c static void gfx_v9_0_ring_emit_reg_wait(struct amdgpu_ring *ring, uint32_t reg,
ring             5447 drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c 	gfx_v9_0_wait_reg_mem(ring, 0, 0, 0, reg, 0, val, mask, 0x20);
ring             5450 drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c static void gfx_v9_0_ring_emit_reg_write_reg_wait(struct amdgpu_ring *ring,
ring             5454 drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c 	int usepfp = (ring->funcs->type == AMDGPU_RING_TYPE_GFX);
ring             5455 drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c 	struct amdgpu_device *adev = ring->adev;
ring             5456 drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c 	bool fw_version_ok = (ring->funcs->type == AMDGPU_RING_TYPE_GFX) ?
ring             5460 drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c 		gfx_v9_0_wait_reg_mem(ring, usepfp, 0, 1, reg0, reg1,
ring             5463 drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c 		amdgpu_ring_emit_reg_write_reg_wait_helper(ring, reg0, reg1,
ring             5467 drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c static void gfx_v9_0_ring_soft_recovery(struct amdgpu_ring *ring, unsigned vmid)
ring             5469 drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c 	struct amdgpu_device *adev = ring->adev;
ring             5668 drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c 	struct amdgpu_ring *ring;
ring             5682 drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c 			ring = &adev->gfx.compute_ring[i];
ring             5686 drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c 			if ((ring->me == me_id) && (ring->pipe == pipe_id) && (ring->queue == queue_id))
ring             5687 drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c 				amdgpu_fence_process(ring);
ring             5698 drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c 	struct amdgpu_ring *ring;
ring             5712 drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c 			ring = &adev->gfx.compute_ring[i];
ring             5713 drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c 			if (ring->me == me_id && ring->pipe == pipe_id &&
ring             5714 drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c 			    ring->queue == queue_id)
ring             5715 drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c 				drm_sched_fault(&ring->sched);
ring             6315 drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c 	adev->gfx.kiq.ring.funcs = &gfx_v9_0_ring_funcs_kiq;
ring              309 drivers/gpu/drm/amd/amdgpu/gmc_v10_0.c 	struct amdgpu_ring *ring = adev->mman.buffer_funcs_ring;
ring              347 drivers/gpu/drm/amd/amdgpu/gmc_v10_0.c 	job->ibs->ptr[job->ibs->length_dw++] = ring->funcs->nop;
ring              348 drivers/gpu/drm/amd/amdgpu/gmc_v10_0.c 	amdgpu_ring_pad_ib(ring, &job->ibs[0]);
ring              369 drivers/gpu/drm/amd/amdgpu/gmc_v10_0.c static uint64_t gmc_v10_0_emit_flush_gpu_tlb(struct amdgpu_ring *ring,
ring              372 drivers/gpu/drm/amd/amdgpu/gmc_v10_0.c 	struct amdgpu_vmhub *hub = &ring->adev->vmhub[ring->funcs->vmhub];
ring              374 drivers/gpu/drm/amd/amdgpu/gmc_v10_0.c 	unsigned eng = ring->vm_inv_eng;
ring              384 drivers/gpu/drm/amd/amdgpu/gmc_v10_0.c 	if (ring->funcs->vmhub == AMDGPU_MMHUB_0 ||
ring              385 drivers/gpu/drm/amd/amdgpu/gmc_v10_0.c 	    ring->funcs->vmhub == AMDGPU_MMHUB_1)
ring              387 drivers/gpu/drm/amd/amdgpu/gmc_v10_0.c 		amdgpu_ring_emit_reg_wait(ring,
ring              390 drivers/gpu/drm/amd/amdgpu/gmc_v10_0.c 	amdgpu_ring_emit_wreg(ring, hub->ctx0_ptb_addr_lo32 + (2 * vmid),
ring              393 drivers/gpu/drm/amd/amdgpu/gmc_v10_0.c 	amdgpu_ring_emit_wreg(ring, hub->ctx0_ptb_addr_hi32 + (2 * vmid),
ring              396 drivers/gpu/drm/amd/amdgpu/gmc_v10_0.c 	amdgpu_ring_emit_reg_write_reg_wait(ring, hub->vm_inv_eng0_req + eng,
ring              401 drivers/gpu/drm/amd/amdgpu/gmc_v10_0.c 	if (ring->funcs->vmhub == AMDGPU_MMHUB_0 ||
ring              402 drivers/gpu/drm/amd/amdgpu/gmc_v10_0.c 	    ring->funcs->vmhub == AMDGPU_MMHUB_1)
ring              407 drivers/gpu/drm/amd/amdgpu/gmc_v10_0.c 		amdgpu_ring_emit_wreg(ring, hub->vm_inv_eng0_sem + eng, 0);
ring              412 drivers/gpu/drm/amd/amdgpu/gmc_v10_0.c static void gmc_v10_0_emit_pasid_mapping(struct amdgpu_ring *ring, unsigned vmid,
ring              415 drivers/gpu/drm/amd/amdgpu/gmc_v10_0.c 	struct amdgpu_device *adev = ring->adev;
ring              418 drivers/gpu/drm/amd/amdgpu/gmc_v10_0.c 	if (ring->funcs->vmhub == AMDGPU_GFXHUB_0)
ring              423 drivers/gpu/drm/amd/amdgpu/gmc_v10_0.c 	amdgpu_ring_emit_wreg(ring, reg, pasid);
ring              558 drivers/gpu/drm/amd/amdgpu/gmc_v10_0.c 		struct amdgpu_ring *ring = adev->rings[i];
ring              559 drivers/gpu/drm/amd/amdgpu/gmc_v10_0.c 		unsigned vmhub = ring->funcs->vmhub;
ring              561 drivers/gpu/drm/amd/amdgpu/gmc_v10_0.c 		ring->vm_inv_eng = vm_inv_eng[vmhub]++;
ring              563 drivers/gpu/drm/amd/amdgpu/gmc_v10_0.c 			 ring->idx, ring->name, ring->vm_inv_eng,
ring              564 drivers/gpu/drm/amd/amdgpu/gmc_v10_0.c 			 ring->funcs->vmhub);
ring              371 drivers/gpu/drm/amd/amdgpu/gmc_v6_0.c static uint64_t gmc_v6_0_emit_flush_gpu_tlb(struct amdgpu_ring *ring,
ring              381 drivers/gpu/drm/amd/amdgpu/gmc_v6_0.c 	amdgpu_ring_emit_wreg(ring, reg, pd_addr >> 12);
ring              384 drivers/gpu/drm/amd/amdgpu/gmc_v6_0.c 	amdgpu_ring_emit_wreg(ring, mmVM_INVALIDATE_REQUEST, 1 << vmid);
ring              443 drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c static uint64_t gmc_v7_0_emit_flush_gpu_tlb(struct amdgpu_ring *ring,
ring              452 drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c 	amdgpu_ring_emit_wreg(ring, reg, pd_addr >> 12);
ring              455 drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c 	amdgpu_ring_emit_wreg(ring, mmVM_INVALIDATE_REQUEST, 1 << vmid);
ring              460 drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c static void gmc_v7_0_emit_pasid_mapping(struct amdgpu_ring *ring, unsigned vmid,
ring              463 drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c 	amdgpu_ring_emit_wreg(ring, mmIH_VMID_0_LUT + vmid, pasid);
ring              645 drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c static uint64_t gmc_v8_0_emit_flush_gpu_tlb(struct amdgpu_ring *ring,
ring              654 drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c 	amdgpu_ring_emit_wreg(ring, reg, pd_addr >> 12);
ring              657 drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c 	amdgpu_ring_emit_wreg(ring, mmVM_INVALIDATE_REQUEST, 1 << vmid);
ring              662 drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c static void gmc_v8_0_emit_pasid_mapping(struct amdgpu_ring *ring, unsigned vmid,
ring              665 drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c 	amdgpu_ring_emit_wreg(ring, mmIH_VMID_0_LUT + vmid, pasid);
ring              501 drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c 	if (adev->gfx.kiq.ring.sched.ready &&
ring              567 drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c static uint64_t gmc_v9_0_emit_flush_gpu_tlb(struct amdgpu_ring *ring,
ring              570 drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c 	bool use_semaphore = gmc_v9_0_use_invalidate_semaphore(ring->adev, ring->funcs->vmhub);
ring              571 drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c 	struct amdgpu_device *adev = ring->adev;
ring              572 drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c 	struct amdgpu_vmhub *hub = &adev->vmhub[ring->funcs->vmhub];
ring              574 drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c 	unsigned eng = ring->vm_inv_eng;
ring              586 drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c 		amdgpu_ring_emit_reg_wait(ring,
ring              589 drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c 	amdgpu_ring_emit_wreg(ring, hub->ctx0_ptb_addr_lo32 + (2 * vmid),
ring              592 drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c 	amdgpu_ring_emit_wreg(ring, hub->ctx0_ptb_addr_hi32 + (2 * vmid),
ring              595 drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c 	amdgpu_ring_emit_reg_write_reg_wait(ring, hub->vm_inv_eng0_req + eng,
ring              605 drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c 		amdgpu_ring_emit_wreg(ring, hub->vm_inv_eng0_sem + eng, 0);
ring              610 drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c static void gmc_v9_0_emit_pasid_mapping(struct amdgpu_ring *ring, unsigned vmid,
ring              613 drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c 	struct amdgpu_device *adev = ring->adev;
ring              617 drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c 	if (ring->funcs->vmhub == AMDGPU_MMHUB_1)
ring              620 drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c 	if (ring->funcs->vmhub == AMDGPU_GFXHUB_0)
ring              625 drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c 	amdgpu_ring_emit_wreg(ring, reg, pasid);
ring              810 drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c 	struct amdgpu_ring *ring;
ring              818 drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c 		ring = adev->rings[i];
ring              819 drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c 		vmhub = ring->funcs->vmhub;
ring              824 drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c 				ring->name);
ring              828 drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c 		ring->vm_inv_eng = inv_eng - 1;
ring              829 drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c 		vm_inv_engs[vmhub] &= ~(1 << ring->vm_inv_eng);
ring              832 drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c 			 ring->name, ring->vm_inv_eng, ring->funcs->vmhub);
ring              228 drivers/gpu/drm/amd/amdgpu/iceland_ih.c 	dw[0] = le32_to_cpu(ih->ring[ring_index + 0]);
ring              229 drivers/gpu/drm/amd/amdgpu/iceland_ih.c 	dw[1] = le32_to_cpu(ih->ring[ring_index + 1]);
ring              230 drivers/gpu/drm/amd/amdgpu/iceland_ih.c 	dw[2] = le32_to_cpu(ih->ring[ring_index + 2]);
ring              231 drivers/gpu/drm/amd/amdgpu/iceland_ih.c 	dw[3] = le32_to_cpu(ih->ring[ring_index + 3]);
ring              260 drivers/gpu/drm/amd/amdgpu/navi10_ih.c 	dw[0] = le32_to_cpu(ih->ring[ring_index + 0]);
ring              261 drivers/gpu/drm/amd/amdgpu/navi10_ih.c 	dw[1] = le32_to_cpu(ih->ring[ring_index + 1]);
ring              262 drivers/gpu/drm/amd/amdgpu/navi10_ih.c 	dw[2] = le32_to_cpu(ih->ring[ring_index + 2]);
ring              263 drivers/gpu/drm/amd/amdgpu/navi10_ih.c 	dw[3] = le32_to_cpu(ih->ring[ring_index + 3]);
ring              264 drivers/gpu/drm/amd/amdgpu/navi10_ih.c 	dw[4] = le32_to_cpu(ih->ring[ring_index + 4]);
ring              265 drivers/gpu/drm/amd/amdgpu/navi10_ih.c 	dw[5] = le32_to_cpu(ih->ring[ring_index + 5]);
ring              266 drivers/gpu/drm/amd/amdgpu/navi10_ih.c 	dw[6] = le32_to_cpu(ih->ring[ring_index + 6]);
ring              267 drivers/gpu/drm/amd/amdgpu/navi10_ih.c 	dw[7] = le32_to_cpu(ih->ring[ring_index + 7]);
ring               56 drivers/gpu/drm/amd/amdgpu/nbio_v2_3.c 				struct amdgpu_ring *ring)
ring               58 drivers/gpu/drm/amd/amdgpu/nbio_v2_3.c 	if (!ring || !ring->funcs->emit_wreg)
ring               61 drivers/gpu/drm/amd/amdgpu/nbio_v2_3.c 		amdgpu_ring_emit_wreg(ring, SOC15_REG_OFFSET(
ring               54 drivers/gpu/drm/amd/amdgpu/nbio_v6_1.c 				struct amdgpu_ring *ring)
ring               56 drivers/gpu/drm/amd/amdgpu/nbio_v6_1.c 	if (!ring || !ring->funcs->emit_wreg)
ring               61 drivers/gpu/drm/amd/amdgpu/nbio_v6_1.c 		amdgpu_ring_emit_wreg(ring, SOC15_REG_OFFSET(
ring               64 drivers/gpu/drm/amd/amdgpu/nbio_v7_0.c 				struct amdgpu_ring *ring)
ring               66 drivers/gpu/drm/amd/amdgpu/nbio_v7_0.c 	if (!ring || !ring->funcs->emit_wreg)
ring               69 drivers/gpu/drm/amd/amdgpu/nbio_v7_0.c 		amdgpu_ring_emit_wreg(ring, (adev->rmmio_remap.reg_offset + KFD_MMIO_REMAP_HDP_MEM_FLUSH_CNTL) >> 2, 0);
ring               81 drivers/gpu/drm/amd/amdgpu/nbio_v7_4.c 				struct amdgpu_ring *ring)
ring               83 drivers/gpu/drm/amd/amdgpu/nbio_v7_4.c 	if (!ring || !ring->funcs->emit_wreg)
ring               86 drivers/gpu/drm/amd/amdgpu/nbio_v7_4.c 		amdgpu_ring_emit_wreg(ring, (adev->rmmio_remap.reg_offset + KFD_MMIO_REMAP_HDP_MEM_FLUSH_CNTL) >> 2, 0);
ring              488 drivers/gpu/drm/amd/amdgpu/nv.c static void nv_flush_hdp(struct amdgpu_device *adev, struct amdgpu_ring *ring)
ring              490 drivers/gpu/drm/amd/amdgpu/nv.c 	adev->nbio_funcs->hdp_flush(adev, ring);
ring              494 drivers/gpu/drm/amd/amdgpu/nv.c 				struct amdgpu_ring *ring)
ring              496 drivers/gpu/drm/amd/amdgpu/nv.c 	if (!ring || !ring->funcs->emit_wreg) {
ring              499 drivers/gpu/drm/amd/amdgpu/nv.c 		amdgpu_ring_emit_wreg(ring, SOC15_REG_OFFSET(
ring               99 drivers/gpu/drm/amd/amdgpu/psp_v10_0.c 	struct psp_ring *ring;
ring              102 drivers/gpu/drm/amd/amdgpu/psp_v10_0.c 	ring = &psp->km_ring;
ring              104 drivers/gpu/drm/amd/amdgpu/psp_v10_0.c 	ring->ring_type = ring_type;
ring              107 drivers/gpu/drm/amd/amdgpu/psp_v10_0.c 	ring->ring_size = 0x1000;
ring              108 drivers/gpu/drm/amd/amdgpu/psp_v10_0.c 	ret = amdgpu_bo_create_kernel(adev, ring->ring_size, PAGE_SIZE,
ring              111 drivers/gpu/drm/amd/amdgpu/psp_v10_0.c 				      &ring->ring_mem_mc_addr,
ring              112 drivers/gpu/drm/amd/amdgpu/psp_v10_0.c 				      (void **)&ring->ring_mem);
ring              114 drivers/gpu/drm/amd/amdgpu/psp_v10_0.c 		ring->ring_size = 0;
ring              126 drivers/gpu/drm/amd/amdgpu/psp_v10_0.c 	struct psp_ring *ring = &psp->km_ring;
ring              130 drivers/gpu/drm/amd/amdgpu/psp_v10_0.c 	psp_ring_reg = lower_32_bits(ring->ring_mem_mc_addr);
ring              133 drivers/gpu/drm/amd/amdgpu/psp_v10_0.c 	psp_ring_reg = upper_32_bits(ring->ring_mem_mc_addr);
ring              136 drivers/gpu/drm/amd/amdgpu/psp_v10_0.c 	psp_ring_reg = ring->ring_size;
ring              178 drivers/gpu/drm/amd/amdgpu/psp_v10_0.c 	struct psp_ring *ring = &psp->km_ring;
ring              186 drivers/gpu/drm/amd/amdgpu/psp_v10_0.c 			      &ring->ring_mem_mc_addr,
ring              187 drivers/gpu/drm/amd/amdgpu/psp_v10_0.c 			      (void **)&ring->ring_mem);
ring              198 drivers/gpu/drm/amd/amdgpu/psp_v10_0.c 	struct psp_ring *ring = &psp->km_ring;
ring              199 drivers/gpu/drm/amd/amdgpu/psp_v10_0.c 	struct psp_gfx_rb_frame *ring_buffer_start = ring->ring_mem;
ring              201 drivers/gpu/drm/amd/amdgpu/psp_v10_0.c 		ring->ring_size / sizeof(struct psp_gfx_rb_frame) - 1;
ring              203 drivers/gpu/drm/amd/amdgpu/psp_v10_0.c 	uint32_t ring_size_dw = ring->ring_size / 4;
ring              370 drivers/gpu/drm/amd/amdgpu/psp_v11_0.c 	struct psp_ring *ring;
ring              375 drivers/gpu/drm/amd/amdgpu/psp_v11_0.c 	ring = &psp->km_ring;
ring              377 drivers/gpu/drm/amd/amdgpu/psp_v11_0.c 	ring->ring_type = ring_type;
ring              380 drivers/gpu/drm/amd/amdgpu/psp_v11_0.c 	ring->ring_size = 0x1000;
ring              381 drivers/gpu/drm/amd/amdgpu/psp_v11_0.c 	ret = amdgpu_bo_create_kernel(adev, ring->ring_size, PAGE_SIZE,
ring              384 drivers/gpu/drm/amd/amdgpu/psp_v11_0.c 				      &ring->ring_mem_mc_addr,
ring              385 drivers/gpu/drm/amd/amdgpu/psp_v11_0.c 				      (void **)&ring->ring_mem);
ring              387 drivers/gpu/drm/amd/amdgpu/psp_v11_0.c 		ring->ring_size = 0;
ring              434 drivers/gpu/drm/amd/amdgpu/psp_v11_0.c 	struct psp_ring *ring = &psp->km_ring;
ring              445 drivers/gpu/drm/amd/amdgpu/psp_v11_0.c 		psp_ring_reg = lower_32_bits(ring->ring_mem_mc_addr);
ring              448 drivers/gpu/drm/amd/amdgpu/psp_v11_0.c 		psp_ring_reg = upper_32_bits(ring->ring_mem_mc_addr);
ring              464 drivers/gpu/drm/amd/amdgpu/psp_v11_0.c 		psp_ring_reg = lower_32_bits(ring->ring_mem_mc_addr);
ring              467 drivers/gpu/drm/amd/amdgpu/psp_v11_0.c 		psp_ring_reg = upper_32_bits(ring->ring_mem_mc_addr);
ring              470 drivers/gpu/drm/amd/amdgpu/psp_v11_0.c 		psp_ring_reg = ring->ring_size;
ring              493 drivers/gpu/drm/amd/amdgpu/psp_v11_0.c 	struct psp_ring *ring = &psp->km_ring;
ring              501 drivers/gpu/drm/amd/amdgpu/psp_v11_0.c 			      &ring->ring_mem_mc_addr,
ring              502 drivers/gpu/drm/amd/amdgpu/psp_v11_0.c 			      (void **)&ring->ring_mem);
ring              513 drivers/gpu/drm/amd/amdgpu/psp_v11_0.c 	struct psp_ring *ring = &psp->km_ring;
ring              514 drivers/gpu/drm/amd/amdgpu/psp_v11_0.c 	struct psp_gfx_rb_frame *ring_buffer_start = ring->ring_mem;
ring              516 drivers/gpu/drm/amd/amdgpu/psp_v11_0.c 		ring->ring_size / sizeof(struct psp_gfx_rb_frame) - 1;
ring              518 drivers/gpu/drm/amd/amdgpu/psp_v11_0.c 	uint32_t ring_size_dw = ring->ring_size / 4;
ring              207 drivers/gpu/drm/amd/amdgpu/psp_v12_0.c 	struct psp_ring *ring;
ring              212 drivers/gpu/drm/amd/amdgpu/psp_v12_0.c 	ring = &psp->km_ring;
ring              214 drivers/gpu/drm/amd/amdgpu/psp_v12_0.c 	ring->ring_type = ring_type;
ring              217 drivers/gpu/drm/amd/amdgpu/psp_v12_0.c 	ring->ring_size = 0x1000;
ring              218 drivers/gpu/drm/amd/amdgpu/psp_v12_0.c 	ret = amdgpu_bo_create_kernel(adev, ring->ring_size, PAGE_SIZE,
ring              221 drivers/gpu/drm/amd/amdgpu/psp_v12_0.c 				      &ring->ring_mem_mc_addr,
ring              222 drivers/gpu/drm/amd/amdgpu/psp_v12_0.c 				      (void **)&ring->ring_mem);
ring              224 drivers/gpu/drm/amd/amdgpu/psp_v12_0.c 		ring->ring_size = 0;
ring              243 drivers/gpu/drm/amd/amdgpu/psp_v12_0.c 	struct psp_ring *ring = &psp->km_ring;
ring              248 drivers/gpu/drm/amd/amdgpu/psp_v12_0.c 		psp_ring_reg = lower_32_bits(ring->ring_mem_mc_addr);
ring              251 drivers/gpu/drm/amd/amdgpu/psp_v12_0.c 		psp_ring_reg = upper_32_bits(ring->ring_mem_mc_addr);
ring              267 drivers/gpu/drm/amd/amdgpu/psp_v12_0.c 		psp_ring_reg = lower_32_bits(ring->ring_mem_mc_addr);
ring              270 drivers/gpu/drm/amd/amdgpu/psp_v12_0.c 		psp_ring_reg = upper_32_bits(ring->ring_mem_mc_addr);
ring              273 drivers/gpu/drm/amd/amdgpu/psp_v12_0.c 		psp_ring_reg = ring->ring_size;
ring              323 drivers/gpu/drm/amd/amdgpu/psp_v12_0.c 	struct psp_ring *ring = &psp->km_ring;
ring              331 drivers/gpu/drm/amd/amdgpu/psp_v12_0.c 			      &ring->ring_mem_mc_addr,
ring              332 drivers/gpu/drm/amd/amdgpu/psp_v12_0.c 			      (void **)&ring->ring_mem);
ring              343 drivers/gpu/drm/amd/amdgpu/psp_v12_0.c 	struct psp_ring *ring = &psp->km_ring;
ring              344 drivers/gpu/drm/amd/amdgpu/psp_v12_0.c 	struct psp_gfx_rb_frame *ring_buffer_start = ring->ring_mem;
ring              346 drivers/gpu/drm/amd/amdgpu/psp_v12_0.c 		ring->ring_size / sizeof(struct psp_gfx_rb_frame) - 1;
ring              348 drivers/gpu/drm/amd/amdgpu/psp_v12_0.c 	uint32_t ring_size_dw = ring->ring_size / 4;
ring              242 drivers/gpu/drm/amd/amdgpu/psp_v3_1.c 	struct psp_ring *ring;
ring              245 drivers/gpu/drm/amd/amdgpu/psp_v3_1.c 	ring = &psp->km_ring;
ring              247 drivers/gpu/drm/amd/amdgpu/psp_v3_1.c 	ring->ring_type = ring_type;
ring              250 drivers/gpu/drm/amd/amdgpu/psp_v3_1.c 	ring->ring_size = 0x1000;
ring              251 drivers/gpu/drm/amd/amdgpu/psp_v3_1.c 	ret = amdgpu_bo_create_kernel(adev, ring->ring_size, PAGE_SIZE,
ring              254 drivers/gpu/drm/amd/amdgpu/psp_v3_1.c 				      &ring->ring_mem_mc_addr,
ring              255 drivers/gpu/drm/amd/amdgpu/psp_v3_1.c 				      (void **)&ring->ring_mem);
ring              257 drivers/gpu/drm/amd/amdgpu/psp_v3_1.c 		ring->ring_size = 0;
ring              300 drivers/gpu/drm/amd/amdgpu/psp_v3_1.c 	struct psp_ring *ring = &psp->km_ring;
ring              313 drivers/gpu/drm/amd/amdgpu/psp_v3_1.c 		psp_ring_reg = lower_32_bits(ring->ring_mem_mc_addr);
ring              316 drivers/gpu/drm/amd/amdgpu/psp_v3_1.c 		psp_ring_reg = upper_32_bits(ring->ring_mem_mc_addr);
ring              334 drivers/gpu/drm/amd/amdgpu/psp_v3_1.c 		psp_ring_reg = lower_32_bits(ring->ring_mem_mc_addr);
ring              337 drivers/gpu/drm/amd/amdgpu/psp_v3_1.c 		psp_ring_reg = upper_32_bits(ring->ring_mem_mc_addr);
ring              340 drivers/gpu/drm/amd/amdgpu/psp_v3_1.c 		psp_ring_reg = ring->ring_size;
ring              399 drivers/gpu/drm/amd/amdgpu/psp_v3_1.c 	struct psp_ring *ring = &psp->km_ring;
ring              407 drivers/gpu/drm/amd/amdgpu/psp_v3_1.c 			      &ring->ring_mem_mc_addr,
ring              408 drivers/gpu/drm/amd/amdgpu/psp_v3_1.c 			      (void **)&ring->ring_mem);
ring              419 drivers/gpu/drm/amd/amdgpu/psp_v3_1.c 	struct psp_ring *ring = &psp->km_ring;
ring              420 drivers/gpu/drm/amd/amdgpu/psp_v3_1.c 	struct psp_gfx_rb_frame *ring_buffer_start = ring->ring_mem;
ring              422 drivers/gpu/drm/amd/amdgpu/psp_v3_1.c 		ring->ring_size / sizeof(struct psp_gfx_rb_frame) - 1;
ring              424 drivers/gpu/drm/amd/amdgpu/psp_v3_1.c 	uint32_t ring_size_dw = ring->ring_size / 4;
ring              194 drivers/gpu/drm/amd/amdgpu/sdma_v2_4.c static uint64_t sdma_v2_4_ring_get_rptr(struct amdgpu_ring *ring)
ring              197 drivers/gpu/drm/amd/amdgpu/sdma_v2_4.c 	return ring->adev->wb.wb[ring->rptr_offs] >> 2;
ring              207 drivers/gpu/drm/amd/amdgpu/sdma_v2_4.c static uint64_t sdma_v2_4_ring_get_wptr(struct amdgpu_ring *ring)
ring              209 drivers/gpu/drm/amd/amdgpu/sdma_v2_4.c 	struct amdgpu_device *adev = ring->adev;
ring              210 drivers/gpu/drm/amd/amdgpu/sdma_v2_4.c 	u32 wptr = RREG32(mmSDMA0_GFX_RB_WPTR + sdma_offsets[ring->me]) >> 2;
ring              222 drivers/gpu/drm/amd/amdgpu/sdma_v2_4.c static void sdma_v2_4_ring_set_wptr(struct amdgpu_ring *ring)
ring              224 drivers/gpu/drm/amd/amdgpu/sdma_v2_4.c 	struct amdgpu_device *adev = ring->adev;
ring              226 drivers/gpu/drm/amd/amdgpu/sdma_v2_4.c 	WREG32(mmSDMA0_GFX_RB_WPTR + sdma_offsets[ring->me], lower_32_bits(ring->wptr) << 2);
ring              229 drivers/gpu/drm/amd/amdgpu/sdma_v2_4.c static void sdma_v2_4_ring_insert_nop(struct amdgpu_ring *ring, uint32_t count)
ring              231 drivers/gpu/drm/amd/amdgpu/sdma_v2_4.c 	struct amdgpu_sdma_instance *sdma = amdgpu_sdma_get_instance_from_ring(ring);
ring              236 drivers/gpu/drm/amd/amdgpu/sdma_v2_4.c 			amdgpu_ring_write(ring, ring->funcs->nop |
ring              239 drivers/gpu/drm/amd/amdgpu/sdma_v2_4.c 			amdgpu_ring_write(ring, ring->funcs->nop);
ring              250 drivers/gpu/drm/amd/amdgpu/sdma_v2_4.c static void sdma_v2_4_ring_emit_ib(struct amdgpu_ring *ring,
ring              258 drivers/gpu/drm/amd/amdgpu/sdma_v2_4.c 	sdma_v2_4_ring_insert_nop(ring, (2 - lower_32_bits(ring->wptr)) & 7);
ring              260 drivers/gpu/drm/amd/amdgpu/sdma_v2_4.c 	amdgpu_ring_write(ring, SDMA_PKT_HEADER_OP(SDMA_OP_INDIRECT) |
ring              263 drivers/gpu/drm/amd/amdgpu/sdma_v2_4.c 	amdgpu_ring_write(ring, lower_32_bits(ib->gpu_addr) & 0xffffffe0);
ring              264 drivers/gpu/drm/amd/amdgpu/sdma_v2_4.c 	amdgpu_ring_write(ring, upper_32_bits(ib->gpu_addr));
ring              265 drivers/gpu/drm/amd/amdgpu/sdma_v2_4.c 	amdgpu_ring_write(ring, ib->length_dw);
ring              266 drivers/gpu/drm/amd/amdgpu/sdma_v2_4.c 	amdgpu_ring_write(ring, 0);
ring              267 drivers/gpu/drm/amd/amdgpu/sdma_v2_4.c 	amdgpu_ring_write(ring, 0);
ring              278 drivers/gpu/drm/amd/amdgpu/sdma_v2_4.c static void sdma_v2_4_ring_emit_hdp_flush(struct amdgpu_ring *ring)
ring              282 drivers/gpu/drm/amd/amdgpu/sdma_v2_4.c 	if (ring->me == 0)
ring              287 drivers/gpu/drm/amd/amdgpu/sdma_v2_4.c 	amdgpu_ring_write(ring, SDMA_PKT_HEADER_OP(SDMA_OP_POLL_REGMEM) |
ring              290 drivers/gpu/drm/amd/amdgpu/sdma_v2_4.c 	amdgpu_ring_write(ring, mmGPU_HDP_FLUSH_DONE << 2);
ring              291 drivers/gpu/drm/amd/amdgpu/sdma_v2_4.c 	amdgpu_ring_write(ring, mmGPU_HDP_FLUSH_REQ << 2);
ring              292 drivers/gpu/drm/amd/amdgpu/sdma_v2_4.c 	amdgpu_ring_write(ring, ref_and_mask); /* reference */
ring              293 drivers/gpu/drm/amd/amdgpu/sdma_v2_4.c 	amdgpu_ring_write(ring, ref_and_mask); /* mask */
ring              294 drivers/gpu/drm/amd/amdgpu/sdma_v2_4.c 	amdgpu_ring_write(ring, SDMA_PKT_POLL_REGMEM_DW5_RETRY_COUNT(0xfff) |
ring              308 drivers/gpu/drm/amd/amdgpu/sdma_v2_4.c static void sdma_v2_4_ring_emit_fence(struct amdgpu_ring *ring, u64 addr, u64 seq,
ring              313 drivers/gpu/drm/amd/amdgpu/sdma_v2_4.c 	amdgpu_ring_write(ring, SDMA_PKT_HEADER_OP(SDMA_OP_FENCE));
ring              314 drivers/gpu/drm/amd/amdgpu/sdma_v2_4.c 	amdgpu_ring_write(ring, lower_32_bits(addr));
ring              315 drivers/gpu/drm/amd/amdgpu/sdma_v2_4.c 	amdgpu_ring_write(ring, upper_32_bits(addr));
ring              316 drivers/gpu/drm/amd/amdgpu/sdma_v2_4.c 	amdgpu_ring_write(ring, lower_32_bits(seq));
ring              321 drivers/gpu/drm/amd/amdgpu/sdma_v2_4.c 		amdgpu_ring_write(ring, SDMA_PKT_HEADER_OP(SDMA_OP_FENCE));
ring              322 drivers/gpu/drm/amd/amdgpu/sdma_v2_4.c 		amdgpu_ring_write(ring, lower_32_bits(addr));
ring              323 drivers/gpu/drm/amd/amdgpu/sdma_v2_4.c 		amdgpu_ring_write(ring, upper_32_bits(addr));
ring              324 drivers/gpu/drm/amd/amdgpu/sdma_v2_4.c 		amdgpu_ring_write(ring, upper_32_bits(seq));
ring              328 drivers/gpu/drm/amd/amdgpu/sdma_v2_4.c 	amdgpu_ring_write(ring, SDMA_PKT_HEADER_OP(SDMA_OP_TRAP));
ring              329 drivers/gpu/drm/amd/amdgpu/sdma_v2_4.c 	amdgpu_ring_write(ring, SDMA_PKT_TRAP_INT_CONTEXT_INT_CONTEXT(0));
ring              341 drivers/gpu/drm/amd/amdgpu/sdma_v2_4.c 	struct amdgpu_ring *sdma0 = &adev->sdma.instance[0].ring;
ring              342 drivers/gpu/drm/amd/amdgpu/sdma_v2_4.c 	struct amdgpu_ring *sdma1 = &adev->sdma.instance[1].ring;
ring              412 drivers/gpu/drm/amd/amdgpu/sdma_v2_4.c 	struct amdgpu_ring *ring;
ring              419 drivers/gpu/drm/amd/amdgpu/sdma_v2_4.c 		ring = &adev->sdma.instance[i].ring;
ring              420 drivers/gpu/drm/amd/amdgpu/sdma_v2_4.c 		wb_offset = (ring->rptr_offs * 4);
ring              438 drivers/gpu/drm/amd/amdgpu/sdma_v2_4.c 		rb_bufsz = order_base_2(ring->ring_size / 4);
ring              462 drivers/gpu/drm/amd/amdgpu/sdma_v2_4.c 		WREG32(mmSDMA0_GFX_RB_BASE + sdma_offsets[i], ring->gpu_addr >> 8);
ring              463 drivers/gpu/drm/amd/amdgpu/sdma_v2_4.c 		WREG32(mmSDMA0_GFX_RB_BASE_HI + sdma_offsets[i], ring->gpu_addr >> 40);
ring              465 drivers/gpu/drm/amd/amdgpu/sdma_v2_4.c 		ring->wptr = 0;
ring              466 drivers/gpu/drm/amd/amdgpu/sdma_v2_4.c 		WREG32(mmSDMA0_GFX_RB_WPTR + sdma_offsets[i], lower_32_bits(ring->wptr) << 2);
ring              480 drivers/gpu/drm/amd/amdgpu/sdma_v2_4.c 		ring->sched.ready = true;
ring              485 drivers/gpu/drm/amd/amdgpu/sdma_v2_4.c 		ring = &adev->sdma.instance[i].ring;
ring              486 drivers/gpu/drm/amd/amdgpu/sdma_v2_4.c 		r = amdgpu_ring_test_helper(ring);
ring              490 drivers/gpu/drm/amd/amdgpu/sdma_v2_4.c 		if (adev->mman.buffer_funcs_ring == ring)
ring              547 drivers/gpu/drm/amd/amdgpu/sdma_v2_4.c static int sdma_v2_4_ring_test_ring(struct amdgpu_ring *ring)
ring              549 drivers/gpu/drm/amd/amdgpu/sdma_v2_4.c 	struct amdgpu_device *adev = ring->adev;
ring              564 drivers/gpu/drm/amd/amdgpu/sdma_v2_4.c 	r = amdgpu_ring_alloc(ring, 5);
ring              568 drivers/gpu/drm/amd/amdgpu/sdma_v2_4.c 	amdgpu_ring_write(ring, SDMA_PKT_HEADER_OP(SDMA_OP_WRITE) |
ring              570 drivers/gpu/drm/amd/amdgpu/sdma_v2_4.c 	amdgpu_ring_write(ring, lower_32_bits(gpu_addr));
ring              571 drivers/gpu/drm/amd/amdgpu/sdma_v2_4.c 	amdgpu_ring_write(ring, upper_32_bits(gpu_addr));
ring              572 drivers/gpu/drm/amd/amdgpu/sdma_v2_4.c 	amdgpu_ring_write(ring, SDMA_PKT_WRITE_UNTILED_DW_3_COUNT(1));
ring              573 drivers/gpu/drm/amd/amdgpu/sdma_v2_4.c 	amdgpu_ring_write(ring, 0xDEADBEEF);
ring              574 drivers/gpu/drm/amd/amdgpu/sdma_v2_4.c 	amdgpu_ring_commit(ring);
ring              599 drivers/gpu/drm/amd/amdgpu/sdma_v2_4.c static int sdma_v2_4_ring_test_ib(struct amdgpu_ring *ring, long timeout)
ring              601 drivers/gpu/drm/amd/amdgpu/sdma_v2_4.c 	struct amdgpu_device *adev = ring->adev;
ring              632 drivers/gpu/drm/amd/amdgpu/sdma_v2_4.c 	r = amdgpu_ib_schedule(ring, 1, &ib, NULL, &f);
ring              747 drivers/gpu/drm/amd/amdgpu/sdma_v2_4.c static void sdma_v2_4_ring_pad_ib(struct amdgpu_ring *ring, struct amdgpu_ib *ib)
ring              749 drivers/gpu/drm/amd/amdgpu/sdma_v2_4.c 	struct amdgpu_sdma_instance *sdma = amdgpu_sdma_get_instance_from_ring(ring);
ring              771 drivers/gpu/drm/amd/amdgpu/sdma_v2_4.c static void sdma_v2_4_ring_emit_pipeline_sync(struct amdgpu_ring *ring)
ring              773 drivers/gpu/drm/amd/amdgpu/sdma_v2_4.c 	uint32_t seq = ring->fence_drv.sync_seq;
ring              774 drivers/gpu/drm/amd/amdgpu/sdma_v2_4.c 	uint64_t addr = ring->fence_drv.gpu_addr;
ring              777 drivers/gpu/drm/amd/amdgpu/sdma_v2_4.c 	amdgpu_ring_write(ring, SDMA_PKT_HEADER_OP(SDMA_OP_POLL_REGMEM) |
ring              781 drivers/gpu/drm/amd/amdgpu/sdma_v2_4.c 	amdgpu_ring_write(ring, addr & 0xfffffffc);
ring              782 drivers/gpu/drm/amd/amdgpu/sdma_v2_4.c 	amdgpu_ring_write(ring, upper_32_bits(addr) & 0xffffffff);
ring              783 drivers/gpu/drm/amd/amdgpu/sdma_v2_4.c 	amdgpu_ring_write(ring, seq); /* reference */
ring              784 drivers/gpu/drm/amd/amdgpu/sdma_v2_4.c 	amdgpu_ring_write(ring, 0xffffffff); /* mask */
ring              785 drivers/gpu/drm/amd/amdgpu/sdma_v2_4.c 	amdgpu_ring_write(ring, SDMA_PKT_POLL_REGMEM_DW5_RETRY_COUNT(0xfff) |
ring              798 drivers/gpu/drm/amd/amdgpu/sdma_v2_4.c static void sdma_v2_4_ring_emit_vm_flush(struct amdgpu_ring *ring,
ring              801 drivers/gpu/drm/amd/amdgpu/sdma_v2_4.c 	amdgpu_gmc_emit_flush_gpu_tlb(ring, vmid, pd_addr);
ring              804 drivers/gpu/drm/amd/amdgpu/sdma_v2_4.c 	amdgpu_ring_write(ring, SDMA_PKT_HEADER_OP(SDMA_OP_POLL_REGMEM) |
ring              807 drivers/gpu/drm/amd/amdgpu/sdma_v2_4.c 	amdgpu_ring_write(ring, mmVM_INVALIDATE_REQUEST << 2);
ring              808 drivers/gpu/drm/amd/amdgpu/sdma_v2_4.c 	amdgpu_ring_write(ring, 0);
ring              809 drivers/gpu/drm/amd/amdgpu/sdma_v2_4.c 	amdgpu_ring_write(ring, 0); /* reference */
ring              810 drivers/gpu/drm/amd/amdgpu/sdma_v2_4.c 	amdgpu_ring_write(ring, 0); /* mask */
ring              811 drivers/gpu/drm/amd/amdgpu/sdma_v2_4.c 	amdgpu_ring_write(ring, SDMA_PKT_POLL_REGMEM_DW5_RETRY_COUNT(0xfff) |
ring              815 drivers/gpu/drm/amd/amdgpu/sdma_v2_4.c static void sdma_v2_4_ring_emit_wreg(struct amdgpu_ring *ring,
ring              818 drivers/gpu/drm/amd/amdgpu/sdma_v2_4.c 	amdgpu_ring_write(ring, SDMA_PKT_HEADER_OP(SDMA_OP_SRBM_WRITE) |
ring              820 drivers/gpu/drm/amd/amdgpu/sdma_v2_4.c 	amdgpu_ring_write(ring, reg);
ring              821 drivers/gpu/drm/amd/amdgpu/sdma_v2_4.c 	amdgpu_ring_write(ring, val);
ring              840 drivers/gpu/drm/amd/amdgpu/sdma_v2_4.c 	struct amdgpu_ring *ring;
ring              869 drivers/gpu/drm/amd/amdgpu/sdma_v2_4.c 		ring = &adev->sdma.instance[i].ring;
ring              870 drivers/gpu/drm/amd/amdgpu/sdma_v2_4.c 		ring->ring_obj = NULL;
ring              871 drivers/gpu/drm/amd/amdgpu/sdma_v2_4.c 		ring->use_doorbell = false;
ring              872 drivers/gpu/drm/amd/amdgpu/sdma_v2_4.c 		sprintf(ring->name, "sdma%d", i);
ring              873 drivers/gpu/drm/amd/amdgpu/sdma_v2_4.c 		r = amdgpu_ring_init(adev, ring, 1024,
ring              891 drivers/gpu/drm/amd/amdgpu/sdma_v2_4.c 		amdgpu_ring_fini(&adev->sdma.instance[i].ring);
ring             1063 drivers/gpu/drm/amd/amdgpu/sdma_v2_4.c 			amdgpu_fence_process(&adev->sdma.instance[0].ring);
ring             1076 drivers/gpu/drm/amd/amdgpu/sdma_v2_4.c 			amdgpu_fence_process(&adev->sdma.instance[1].ring);
ring             1101 drivers/gpu/drm/amd/amdgpu/sdma_v2_4.c 		drm_sched_fault(&adev->sdma.instance[instance_id].ring.sched);
ring             1167 drivers/gpu/drm/amd/amdgpu/sdma_v2_4.c 		adev->sdma.instance[i].ring.funcs = &sdma_v2_4_ring_funcs;
ring             1168 drivers/gpu/drm/amd/amdgpu/sdma_v2_4.c 		adev->sdma.instance[i].ring.me = i;
ring             1250 drivers/gpu/drm/amd/amdgpu/sdma_v2_4.c 	adev->mman.buffer_funcs_ring = &adev->sdma.instance[0].ring;
ring             1268 drivers/gpu/drm/amd/amdgpu/sdma_v2_4.c 		sched = &adev->sdma.instance[i].ring.sched;
ring              350 drivers/gpu/drm/amd/amdgpu/sdma_v3_0.c static uint64_t sdma_v3_0_ring_get_rptr(struct amdgpu_ring *ring)
ring              353 drivers/gpu/drm/amd/amdgpu/sdma_v3_0.c 	return ring->adev->wb.wb[ring->rptr_offs] >> 2;
ring              363 drivers/gpu/drm/amd/amdgpu/sdma_v3_0.c static uint64_t sdma_v3_0_ring_get_wptr(struct amdgpu_ring *ring)
ring              365 drivers/gpu/drm/amd/amdgpu/sdma_v3_0.c 	struct amdgpu_device *adev = ring->adev;
ring              368 drivers/gpu/drm/amd/amdgpu/sdma_v3_0.c 	if (ring->use_doorbell || ring->use_pollmem) {
ring              370 drivers/gpu/drm/amd/amdgpu/sdma_v3_0.c 		wptr = ring->adev->wb.wb[ring->wptr_offs] >> 2;
ring              372 drivers/gpu/drm/amd/amdgpu/sdma_v3_0.c 		wptr = RREG32(mmSDMA0_GFX_RB_WPTR + sdma_offsets[ring->me]) >> 2;
ring              385 drivers/gpu/drm/amd/amdgpu/sdma_v3_0.c static void sdma_v3_0_ring_set_wptr(struct amdgpu_ring *ring)
ring              387 drivers/gpu/drm/amd/amdgpu/sdma_v3_0.c 	struct amdgpu_device *adev = ring->adev;
ring              389 drivers/gpu/drm/amd/amdgpu/sdma_v3_0.c 	if (ring->use_doorbell) {
ring              390 drivers/gpu/drm/amd/amdgpu/sdma_v3_0.c 		u32 *wb = (u32 *)&adev->wb.wb[ring->wptr_offs];
ring              392 drivers/gpu/drm/amd/amdgpu/sdma_v3_0.c 		WRITE_ONCE(*wb, (lower_32_bits(ring->wptr) << 2));
ring              393 drivers/gpu/drm/amd/amdgpu/sdma_v3_0.c 		WDOORBELL32(ring->doorbell_index, lower_32_bits(ring->wptr) << 2);
ring              394 drivers/gpu/drm/amd/amdgpu/sdma_v3_0.c 	} else if (ring->use_pollmem) {
ring              395 drivers/gpu/drm/amd/amdgpu/sdma_v3_0.c 		u32 *wb = (u32 *)&adev->wb.wb[ring->wptr_offs];
ring              397 drivers/gpu/drm/amd/amdgpu/sdma_v3_0.c 		WRITE_ONCE(*wb, (lower_32_bits(ring->wptr) << 2));
ring              399 drivers/gpu/drm/amd/amdgpu/sdma_v3_0.c 		WREG32(mmSDMA0_GFX_RB_WPTR + sdma_offsets[ring->me], lower_32_bits(ring->wptr) << 2);
ring              403 drivers/gpu/drm/amd/amdgpu/sdma_v3_0.c static void sdma_v3_0_ring_insert_nop(struct amdgpu_ring *ring, uint32_t count)
ring              405 drivers/gpu/drm/amd/amdgpu/sdma_v3_0.c 	struct amdgpu_sdma_instance *sdma = amdgpu_sdma_get_instance_from_ring(ring);
ring              410 drivers/gpu/drm/amd/amdgpu/sdma_v3_0.c 			amdgpu_ring_write(ring, ring->funcs->nop |
ring              413 drivers/gpu/drm/amd/amdgpu/sdma_v3_0.c 			amdgpu_ring_write(ring, ring->funcs->nop);
ring              424 drivers/gpu/drm/amd/amdgpu/sdma_v3_0.c static void sdma_v3_0_ring_emit_ib(struct amdgpu_ring *ring,
ring              432 drivers/gpu/drm/amd/amdgpu/sdma_v3_0.c 	sdma_v3_0_ring_insert_nop(ring, (2 - lower_32_bits(ring->wptr)) & 7);
ring              434 drivers/gpu/drm/amd/amdgpu/sdma_v3_0.c 	amdgpu_ring_write(ring, SDMA_PKT_HEADER_OP(SDMA_OP_INDIRECT) |
ring              437 drivers/gpu/drm/amd/amdgpu/sdma_v3_0.c 	amdgpu_ring_write(ring, lower_32_bits(ib->gpu_addr) & 0xffffffe0);
ring              438 drivers/gpu/drm/amd/amdgpu/sdma_v3_0.c 	amdgpu_ring_write(ring, upper_32_bits(ib->gpu_addr));
ring              439 drivers/gpu/drm/amd/amdgpu/sdma_v3_0.c 	amdgpu_ring_write(ring, ib->length_dw);
ring              440 drivers/gpu/drm/amd/amdgpu/sdma_v3_0.c 	amdgpu_ring_write(ring, 0);
ring              441 drivers/gpu/drm/amd/amdgpu/sdma_v3_0.c 	amdgpu_ring_write(ring, 0);
ring              452 drivers/gpu/drm/amd/amdgpu/sdma_v3_0.c static void sdma_v3_0_ring_emit_hdp_flush(struct amdgpu_ring *ring)
ring              456 drivers/gpu/drm/amd/amdgpu/sdma_v3_0.c 	if (ring->me == 0)
ring              461 drivers/gpu/drm/amd/amdgpu/sdma_v3_0.c 	amdgpu_ring_write(ring, SDMA_PKT_HEADER_OP(SDMA_OP_POLL_REGMEM) |
ring              464 drivers/gpu/drm/amd/amdgpu/sdma_v3_0.c 	amdgpu_ring_write(ring, mmGPU_HDP_FLUSH_DONE << 2);
ring              465 drivers/gpu/drm/amd/amdgpu/sdma_v3_0.c 	amdgpu_ring_write(ring, mmGPU_HDP_FLUSH_REQ << 2);
ring              466 drivers/gpu/drm/amd/amdgpu/sdma_v3_0.c 	amdgpu_ring_write(ring, ref_and_mask); /* reference */
ring              467 drivers/gpu/drm/amd/amdgpu/sdma_v3_0.c 	amdgpu_ring_write(ring, ref_and_mask); /* mask */
ring              468 drivers/gpu/drm/amd/amdgpu/sdma_v3_0.c 	amdgpu_ring_write(ring, SDMA_PKT_POLL_REGMEM_DW5_RETRY_COUNT(0xfff) |
ring              482 drivers/gpu/drm/amd/amdgpu/sdma_v3_0.c static void sdma_v3_0_ring_emit_fence(struct amdgpu_ring *ring, u64 addr, u64 seq,
ring              487 drivers/gpu/drm/amd/amdgpu/sdma_v3_0.c 	amdgpu_ring_write(ring, SDMA_PKT_HEADER_OP(SDMA_OP_FENCE));
ring              488 drivers/gpu/drm/amd/amdgpu/sdma_v3_0.c 	amdgpu_ring_write(ring, lower_32_bits(addr));
ring              489 drivers/gpu/drm/amd/amdgpu/sdma_v3_0.c 	amdgpu_ring_write(ring, upper_32_bits(addr));
ring              490 drivers/gpu/drm/amd/amdgpu/sdma_v3_0.c 	amdgpu_ring_write(ring, lower_32_bits(seq));
ring              495 drivers/gpu/drm/amd/amdgpu/sdma_v3_0.c 		amdgpu_ring_write(ring, SDMA_PKT_HEADER_OP(SDMA_OP_FENCE));
ring              496 drivers/gpu/drm/amd/amdgpu/sdma_v3_0.c 		amdgpu_ring_write(ring, lower_32_bits(addr));
ring              497 drivers/gpu/drm/amd/amdgpu/sdma_v3_0.c 		amdgpu_ring_write(ring, upper_32_bits(addr));
ring              498 drivers/gpu/drm/amd/amdgpu/sdma_v3_0.c 		amdgpu_ring_write(ring, upper_32_bits(seq));
ring              502 drivers/gpu/drm/amd/amdgpu/sdma_v3_0.c 	amdgpu_ring_write(ring, SDMA_PKT_HEADER_OP(SDMA_OP_TRAP));
ring              503 drivers/gpu/drm/amd/amdgpu/sdma_v3_0.c 	amdgpu_ring_write(ring, SDMA_PKT_TRAP_INT_CONTEXT_INT_CONTEXT(0));
ring              515 drivers/gpu/drm/amd/amdgpu/sdma_v3_0.c 	struct amdgpu_ring *sdma0 = &adev->sdma.instance[0].ring;
ring              516 drivers/gpu/drm/amd/amdgpu/sdma_v3_0.c 	struct amdgpu_ring *sdma1 = &adev->sdma.instance[1].ring;
ring              647 drivers/gpu/drm/amd/amdgpu/sdma_v3_0.c 	struct amdgpu_ring *ring;
ring              656 drivers/gpu/drm/amd/amdgpu/sdma_v3_0.c 		ring = &adev->sdma.instance[i].ring;
ring              657 drivers/gpu/drm/amd/amdgpu/sdma_v3_0.c 		amdgpu_ring_clear_ring(ring);
ring              658 drivers/gpu/drm/amd/amdgpu/sdma_v3_0.c 		wb_offset = (ring->rptr_offs * 4);
ring              676 drivers/gpu/drm/amd/amdgpu/sdma_v3_0.c 		rb_bufsz = order_base_2(ring->ring_size / 4);
ring              687 drivers/gpu/drm/amd/amdgpu/sdma_v3_0.c 		ring->wptr = 0;
ring              689 drivers/gpu/drm/amd/amdgpu/sdma_v3_0.c 		sdma_v3_0_ring_set_wptr(ring);
ring              701 drivers/gpu/drm/amd/amdgpu/sdma_v3_0.c 		WREG32(mmSDMA0_GFX_RB_BASE + sdma_offsets[i], ring->gpu_addr >> 8);
ring              702 drivers/gpu/drm/amd/amdgpu/sdma_v3_0.c 		WREG32(mmSDMA0_GFX_RB_BASE_HI + sdma_offsets[i], ring->gpu_addr >> 40);
ring              706 drivers/gpu/drm/amd/amdgpu/sdma_v3_0.c 		if (ring->use_doorbell) {
ring              708 drivers/gpu/drm/amd/amdgpu/sdma_v3_0.c 						 OFFSET, ring->doorbell_index);
ring              716 drivers/gpu/drm/amd/amdgpu/sdma_v3_0.c 		wptr_gpu_addr = adev->wb.gpu_addr + (ring->wptr_offs * 4);
ring              723 drivers/gpu/drm/amd/amdgpu/sdma_v3_0.c 		if (ring->use_pollmem) {
ring              748 drivers/gpu/drm/amd/amdgpu/sdma_v3_0.c 		ring->sched.ready = true;
ring              757 drivers/gpu/drm/amd/amdgpu/sdma_v3_0.c 		ring = &adev->sdma.instance[i].ring;
ring              758 drivers/gpu/drm/amd/amdgpu/sdma_v3_0.c 		r = amdgpu_ring_test_helper(ring);
ring              762 drivers/gpu/drm/amd/amdgpu/sdma_v3_0.c 		if (adev->mman.buffer_funcs_ring == ring)
ring              819 drivers/gpu/drm/amd/amdgpu/sdma_v3_0.c static int sdma_v3_0_ring_test_ring(struct amdgpu_ring *ring)
ring              821 drivers/gpu/drm/amd/amdgpu/sdma_v3_0.c 	struct amdgpu_device *adev = ring->adev;
ring              836 drivers/gpu/drm/amd/amdgpu/sdma_v3_0.c 	r = amdgpu_ring_alloc(ring, 5);
ring              840 drivers/gpu/drm/amd/amdgpu/sdma_v3_0.c 	amdgpu_ring_write(ring, SDMA_PKT_HEADER_OP(SDMA_OP_WRITE) |
ring              842 drivers/gpu/drm/amd/amdgpu/sdma_v3_0.c 	amdgpu_ring_write(ring, lower_32_bits(gpu_addr));
ring              843 drivers/gpu/drm/amd/amdgpu/sdma_v3_0.c 	amdgpu_ring_write(ring, upper_32_bits(gpu_addr));
ring              844 drivers/gpu/drm/amd/amdgpu/sdma_v3_0.c 	amdgpu_ring_write(ring, SDMA_PKT_WRITE_UNTILED_DW_3_COUNT(1));
ring              845 drivers/gpu/drm/amd/amdgpu/sdma_v3_0.c 	amdgpu_ring_write(ring, 0xDEADBEEF);
ring              846 drivers/gpu/drm/amd/amdgpu/sdma_v3_0.c 	amdgpu_ring_commit(ring);
ring              871 drivers/gpu/drm/amd/amdgpu/sdma_v3_0.c static int sdma_v3_0_ring_test_ib(struct amdgpu_ring *ring, long timeout)
ring              873 drivers/gpu/drm/amd/amdgpu/sdma_v3_0.c 	struct amdgpu_device *adev = ring->adev;
ring              904 drivers/gpu/drm/amd/amdgpu/sdma_v3_0.c 	r = amdgpu_ib_schedule(ring, 1, &ib, NULL, &f);
ring             1018 drivers/gpu/drm/amd/amdgpu/sdma_v3_0.c static void sdma_v3_0_ring_pad_ib(struct amdgpu_ring *ring, struct amdgpu_ib *ib)
ring             1020 drivers/gpu/drm/amd/amdgpu/sdma_v3_0.c 	struct amdgpu_sdma_instance *sdma = amdgpu_sdma_get_instance_from_ring(ring);
ring             1042 drivers/gpu/drm/amd/amdgpu/sdma_v3_0.c static void sdma_v3_0_ring_emit_pipeline_sync(struct amdgpu_ring *ring)
ring             1044 drivers/gpu/drm/amd/amdgpu/sdma_v3_0.c 	uint32_t seq = ring->fence_drv.sync_seq;
ring             1045 drivers/gpu/drm/amd/amdgpu/sdma_v3_0.c 	uint64_t addr = ring->fence_drv.gpu_addr;
ring             1048 drivers/gpu/drm/amd/amdgpu/sdma_v3_0.c 	amdgpu_ring_write(ring, SDMA_PKT_HEADER_OP(SDMA_OP_POLL_REGMEM) |
ring             1052 drivers/gpu/drm/amd/amdgpu/sdma_v3_0.c 	amdgpu_ring_write(ring, addr & 0xfffffffc);
ring             1053 drivers/gpu/drm/amd/amdgpu/sdma_v3_0.c 	amdgpu_ring_write(ring, upper_32_bits(addr) & 0xffffffff);
ring             1054 drivers/gpu/drm/amd/amdgpu/sdma_v3_0.c 	amdgpu_ring_write(ring, seq); /* reference */
ring             1055 drivers/gpu/drm/amd/amdgpu/sdma_v3_0.c 	amdgpu_ring_write(ring, 0xffffffff); /* mask */
ring             1056 drivers/gpu/drm/amd/amdgpu/sdma_v3_0.c 	amdgpu_ring_write(ring, SDMA_PKT_POLL_REGMEM_DW5_RETRY_COUNT(0xfff) |
ring             1069 drivers/gpu/drm/amd/amdgpu/sdma_v3_0.c static void sdma_v3_0_ring_emit_vm_flush(struct amdgpu_ring *ring,
ring             1072 drivers/gpu/drm/amd/amdgpu/sdma_v3_0.c 	amdgpu_gmc_emit_flush_gpu_tlb(ring, vmid, pd_addr);
ring             1075 drivers/gpu/drm/amd/amdgpu/sdma_v3_0.c 	amdgpu_ring_write(ring, SDMA_PKT_HEADER_OP(SDMA_OP_POLL_REGMEM) |
ring             1078 drivers/gpu/drm/amd/amdgpu/sdma_v3_0.c 	amdgpu_ring_write(ring, mmVM_INVALIDATE_REQUEST << 2);
ring             1079 drivers/gpu/drm/amd/amdgpu/sdma_v3_0.c 	amdgpu_ring_write(ring, 0);
ring             1080 drivers/gpu/drm/amd/amdgpu/sdma_v3_0.c 	amdgpu_ring_write(ring, 0); /* reference */
ring             1081 drivers/gpu/drm/amd/amdgpu/sdma_v3_0.c 	amdgpu_ring_write(ring, 0); /* mask */
ring             1082 drivers/gpu/drm/amd/amdgpu/sdma_v3_0.c 	amdgpu_ring_write(ring, SDMA_PKT_POLL_REGMEM_DW5_RETRY_COUNT(0xfff) |
ring             1086 drivers/gpu/drm/amd/amdgpu/sdma_v3_0.c static void sdma_v3_0_ring_emit_wreg(struct amdgpu_ring *ring,
ring             1089 drivers/gpu/drm/amd/amdgpu/sdma_v3_0.c 	amdgpu_ring_write(ring, SDMA_PKT_HEADER_OP(SDMA_OP_SRBM_WRITE) |
ring             1091 drivers/gpu/drm/amd/amdgpu/sdma_v3_0.c 	amdgpu_ring_write(ring, reg);
ring             1092 drivers/gpu/drm/amd/amdgpu/sdma_v3_0.c 	amdgpu_ring_write(ring, val);
ring             1118 drivers/gpu/drm/amd/amdgpu/sdma_v3_0.c 	struct amdgpu_ring *ring;
ring             1147 drivers/gpu/drm/amd/amdgpu/sdma_v3_0.c 		ring = &adev->sdma.instance[i].ring;
ring             1148 drivers/gpu/drm/amd/amdgpu/sdma_v3_0.c 		ring->ring_obj = NULL;
ring             1150 drivers/gpu/drm/amd/amdgpu/sdma_v3_0.c 			ring->use_doorbell = true;
ring             1151 drivers/gpu/drm/amd/amdgpu/sdma_v3_0.c 			ring->doorbell_index = adev->doorbell_index.sdma_engine[i];
ring             1153 drivers/gpu/drm/amd/amdgpu/sdma_v3_0.c 			ring->use_pollmem = true;
ring             1156 drivers/gpu/drm/amd/amdgpu/sdma_v3_0.c 		sprintf(ring->name, "sdma%d", i);
ring             1157 drivers/gpu/drm/amd/amdgpu/sdma_v3_0.c 		r = amdgpu_ring_init(adev, ring, 1024,
ring             1175 drivers/gpu/drm/amd/amdgpu/sdma_v3_0.c 		amdgpu_ring_fini(&adev->sdma.instance[i].ring);
ring             1397 drivers/gpu/drm/amd/amdgpu/sdma_v3_0.c 			amdgpu_fence_process(&adev->sdma.instance[0].ring);
ring             1410 drivers/gpu/drm/amd/amdgpu/sdma_v3_0.c 			amdgpu_fence_process(&adev->sdma.instance[1].ring);
ring             1435 drivers/gpu/drm/amd/amdgpu/sdma_v3_0.c 		drm_sched_fault(&adev->sdma.instance[instance_id].ring.sched);
ring             1605 drivers/gpu/drm/amd/amdgpu/sdma_v3_0.c 		adev->sdma.instance[i].ring.funcs = &sdma_v3_0_ring_funcs;
ring             1606 drivers/gpu/drm/amd/amdgpu/sdma_v3_0.c 		adev->sdma.instance[i].ring.me = i;
ring             1688 drivers/gpu/drm/amd/amdgpu/sdma_v3_0.c 	adev->mman.buffer_funcs_ring = &adev->sdma.instance[0].ring;
ring             1706 drivers/gpu/drm/amd/amdgpu/sdma_v3_0.c 		sched = &adev->sdma.instance[i].ring.sched;
ring              541 drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c static uint64_t sdma_v4_0_ring_get_rptr(struct amdgpu_ring *ring)
ring              546 drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c 	rptr = ((u64 *)&ring->adev->wb.wb[ring->rptr_offs]);
ring              559 drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c static uint64_t sdma_v4_0_ring_get_wptr(struct amdgpu_ring *ring)
ring              561 drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c 	struct amdgpu_device *adev = ring->adev;
ring              564 drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c 	if (ring->use_doorbell) {
ring              566 drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c 		wptr = READ_ONCE(*((u64 *)&adev->wb.wb[ring->wptr_offs]));
ring              569 drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c 		wptr = RREG32_SDMA(ring->me, mmSDMA0_GFX_RB_WPTR_HI);
ring              571 drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c 		wptr |= RREG32_SDMA(ring->me, mmSDMA0_GFX_RB_WPTR);
ring              573 drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c 				ring->me, wptr);
ring              586 drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c static void sdma_v4_0_ring_set_wptr(struct amdgpu_ring *ring)
ring              588 drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c 	struct amdgpu_device *adev = ring->adev;
ring              591 drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c 	if (ring->use_doorbell) {
ring              592 drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c 		u64 *wb = (u64 *)&adev->wb.wb[ring->wptr_offs];
ring              598 drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c 				ring->wptr_offs,
ring              599 drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c 				lower_32_bits(ring->wptr << 2),
ring              600 drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c 				upper_32_bits(ring->wptr << 2));
ring              602 drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c 		WRITE_ONCE(*wb, (ring->wptr << 2));
ring              604 drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c 				ring->doorbell_index, ring->wptr << 2);
ring              605 drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c 		WDOORBELL64(ring->doorbell_index, ring->wptr << 2);
ring              610 drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c 				ring->me,
ring              611 drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c 				lower_32_bits(ring->wptr << 2),
ring              612 drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c 				ring->me,
ring              613 drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c 				upper_32_bits(ring->wptr << 2));
ring              614 drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c 		WREG32_SDMA(ring->me, mmSDMA0_GFX_RB_WPTR,
ring              615 drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c 			    lower_32_bits(ring->wptr << 2));
ring              616 drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c 		WREG32_SDMA(ring->me, mmSDMA0_GFX_RB_WPTR_HI,
ring              617 drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c 			    upper_32_bits(ring->wptr << 2));
ring              628 drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c static uint64_t sdma_v4_0_page_ring_get_wptr(struct amdgpu_ring *ring)
ring              630 drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c 	struct amdgpu_device *adev = ring->adev;
ring              633 drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c 	if (ring->use_doorbell) {
ring              635 drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c 		wptr = READ_ONCE(*((u64 *)&adev->wb.wb[ring->wptr_offs]));
ring              637 drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c 		wptr = RREG32_SDMA(ring->me, mmSDMA0_PAGE_RB_WPTR_HI);
ring              639 drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c 		wptr |= RREG32_SDMA(ring->me, mmSDMA0_PAGE_RB_WPTR);
ring              652 drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c static void sdma_v4_0_page_ring_set_wptr(struct amdgpu_ring *ring)
ring              654 drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c 	struct amdgpu_device *adev = ring->adev;
ring              656 drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c 	if (ring->use_doorbell) {
ring              657 drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c 		u64 *wb = (u64 *)&adev->wb.wb[ring->wptr_offs];
ring              660 drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c 		WRITE_ONCE(*wb, (ring->wptr << 2));
ring              661 drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c 		WDOORBELL64(ring->doorbell_index, ring->wptr << 2);
ring              663 drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c 		uint64_t wptr = ring->wptr << 2;
ring              665 drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c 		WREG32_SDMA(ring->me, mmSDMA0_PAGE_RB_WPTR,
ring              667 drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c 		WREG32_SDMA(ring->me, mmSDMA0_PAGE_RB_WPTR_HI,
ring              672 drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c static void sdma_v4_0_ring_insert_nop(struct amdgpu_ring *ring, uint32_t count)
ring              674 drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c 	struct amdgpu_sdma_instance *sdma = amdgpu_sdma_get_instance_from_ring(ring);
ring              679 drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c 			amdgpu_ring_write(ring, ring->funcs->nop |
ring              682 drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c 			amdgpu_ring_write(ring, ring->funcs->nop);
ring              693 drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c static void sdma_v4_0_ring_emit_ib(struct amdgpu_ring *ring,
ring              701 drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c 	sdma_v4_0_ring_insert_nop(ring, (2 - lower_32_bits(ring->wptr)) & 7);
ring              703 drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c 	amdgpu_ring_write(ring, SDMA_PKT_HEADER_OP(SDMA_OP_INDIRECT) |
ring              706 drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c 	amdgpu_ring_write(ring, lower_32_bits(ib->gpu_addr) & 0xffffffe0);
ring              707 drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c 	amdgpu_ring_write(ring, upper_32_bits(ib->gpu_addr));
ring              708 drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c 	amdgpu_ring_write(ring, ib->length_dw);
ring              709 drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c 	amdgpu_ring_write(ring, 0);
ring              710 drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c 	amdgpu_ring_write(ring, 0);
ring              714 drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c static void sdma_v4_0_wait_reg_mem(struct amdgpu_ring *ring,
ring              720 drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c 	amdgpu_ring_write(ring, SDMA_PKT_HEADER_OP(SDMA_OP_POLL_REGMEM) |
ring              726 drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c 		amdgpu_ring_write(ring, addr0);
ring              727 drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c 		amdgpu_ring_write(ring, addr1);
ring              730 drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c 		amdgpu_ring_write(ring, addr0 << 2);
ring              731 drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c 		amdgpu_ring_write(ring, addr1 << 2);
ring              733 drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c 	amdgpu_ring_write(ring, ref); /* reference */
ring              734 drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c 	amdgpu_ring_write(ring, mask); /* mask */
ring              735 drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c 	amdgpu_ring_write(ring, SDMA_PKT_POLL_REGMEM_DW5_RETRY_COUNT(0xfff) |
ring              746 drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c static void sdma_v4_0_ring_emit_hdp_flush(struct amdgpu_ring *ring)
ring              748 drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c 	struct amdgpu_device *adev = ring->adev;
ring              752 drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c 	ref_and_mask = nbio_hf_reg->ref_and_mask_sdma0 << ring->me;
ring              754 drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c 	sdma_v4_0_wait_reg_mem(ring, 0, 1,
ring              770 drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c static void sdma_v4_0_ring_emit_fence(struct amdgpu_ring *ring, u64 addr, u64 seq,
ring              775 drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c 	amdgpu_ring_write(ring, SDMA_PKT_HEADER_OP(SDMA_OP_FENCE));
ring              778 drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c 	amdgpu_ring_write(ring, lower_32_bits(addr));
ring              779 drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c 	amdgpu_ring_write(ring, upper_32_bits(addr));
ring              780 drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c 	amdgpu_ring_write(ring, lower_32_bits(seq));
ring              785 drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c 		amdgpu_ring_write(ring, SDMA_PKT_HEADER_OP(SDMA_OP_FENCE));
ring              788 drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c 		amdgpu_ring_write(ring, lower_32_bits(addr));
ring              789 drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c 		amdgpu_ring_write(ring, upper_32_bits(addr));
ring              790 drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c 		amdgpu_ring_write(ring, upper_32_bits(seq));
ring              794 drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c 	amdgpu_ring_write(ring, SDMA_PKT_HEADER_OP(SDMA_OP_TRAP));
ring              795 drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c 	amdgpu_ring_write(ring, SDMA_PKT_TRAP_INT_CONTEXT_INT_CONTEXT(0));
ring              813 drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c 		sdma[i] = &adev->sdma.instance[i].ring;
ring              960 drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c static uint32_t sdma_v4_0_rb_cntl(struct amdgpu_ring *ring, uint32_t rb_cntl)
ring              963 drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c 	uint32_t rb_bufsz = order_base_2(ring->ring_size / 4);
ring              985 drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c 	struct amdgpu_ring *ring = &adev->sdma.instance[i].ring;
ring              992 drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c 	wb_offset = (ring->rptr_offs * 4);
ring              995 drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c 	rb_cntl = sdma_v4_0_rb_cntl(ring, rb_cntl);
ring             1013 drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c 	WREG32_SDMA(i, mmSDMA0_GFX_RB_BASE, ring->gpu_addr >> 8);
ring             1014 drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c 	WREG32_SDMA(i, mmSDMA0_GFX_RB_BASE_HI, ring->gpu_addr >> 40);
ring             1016 drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c 	ring->wptr = 0;
ring             1025 drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c 				 ring->use_doorbell);
ring             1028 drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c 					OFFSET, ring->doorbell_index);
ring             1032 drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c 	sdma_v4_0_ring_set_wptr(ring);
ring             1038 drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c 	wptr_gpu_addr = adev->wb.gpu_addr + (ring->wptr_offs * 4);
ring             1061 drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c 	ring->sched.ready = true;
ring             1075 drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c 	struct amdgpu_ring *ring = &adev->sdma.instance[i].page;
ring             1082 drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c 	wb_offset = (ring->rptr_offs * 4);
ring             1085 drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c 	rb_cntl = sdma_v4_0_rb_cntl(ring, rb_cntl);
ring             1103 drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c 	WREG32_SDMA(i, mmSDMA0_PAGE_RB_BASE, ring->gpu_addr >> 8);
ring             1104 drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c 	WREG32_SDMA(i, mmSDMA0_PAGE_RB_BASE_HI, ring->gpu_addr >> 40);
ring             1106 drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c 	ring->wptr = 0;
ring             1115 drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c 				 ring->use_doorbell);
ring             1118 drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c 					OFFSET, ring->doorbell_index);
ring             1123 drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c 	sdma_v4_0_page_ring_set_wptr(ring);
ring             1129 drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c 	wptr_gpu_addr = adev->wb.gpu_addr + (ring->wptr_offs * 4);
ring             1152 drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c 	ring->sched.ready = true;
ring             1287 drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c 	struct amdgpu_ring *ring;
ring             1339 drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c 		ring = &adev->sdma.instance[i].ring;
ring             1341 drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c 		r = amdgpu_ring_test_helper(ring);
ring             1356 drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c 		if (adev->mman.buffer_funcs_ring == ring)
ring             1372 drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c static int sdma_v4_0_ring_test_ring(struct amdgpu_ring *ring)
ring             1374 drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c 	struct amdgpu_device *adev = ring->adev;
ring             1389 drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c 	r = amdgpu_ring_alloc(ring, 5);
ring             1393 drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c 	amdgpu_ring_write(ring, SDMA_PKT_HEADER_OP(SDMA_OP_WRITE) |
ring             1395 drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c 	amdgpu_ring_write(ring, lower_32_bits(gpu_addr));
ring             1396 drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c 	amdgpu_ring_write(ring, upper_32_bits(gpu_addr));
ring             1397 drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c 	amdgpu_ring_write(ring, SDMA_PKT_WRITE_UNTILED_DW_3_COUNT(0));
ring             1398 drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c 	amdgpu_ring_write(ring, 0xDEADBEEF);
ring             1399 drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c 	amdgpu_ring_commit(ring);
ring             1424 drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c static int sdma_v4_0_ring_test_ib(struct amdgpu_ring *ring, long timeout)
ring             1426 drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c 	struct amdgpu_device *adev = ring->adev;
ring             1457 drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c 	r = amdgpu_ib_schedule(ring, 1, &ib, NULL, &f);
ring             1576 drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c static void sdma_v4_0_ring_pad_ib(struct amdgpu_ring *ring, struct amdgpu_ib *ib)
ring             1578 drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c 	struct amdgpu_sdma_instance *sdma = amdgpu_sdma_get_instance_from_ring(ring);
ring             1601 drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c static void sdma_v4_0_ring_emit_pipeline_sync(struct amdgpu_ring *ring)
ring             1603 drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c 	uint32_t seq = ring->fence_drv.sync_seq;
ring             1604 drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c 	uint64_t addr = ring->fence_drv.gpu_addr;
ring             1607 drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c 	sdma_v4_0_wait_reg_mem(ring, 1, 0,
ring             1623 drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c static void sdma_v4_0_ring_emit_vm_flush(struct amdgpu_ring *ring,
ring             1626 drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c 	amdgpu_gmc_emit_flush_gpu_tlb(ring, vmid, pd_addr);
ring             1629 drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c static void sdma_v4_0_ring_emit_wreg(struct amdgpu_ring *ring,
ring             1632 drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c 	amdgpu_ring_write(ring, SDMA_PKT_HEADER_OP(SDMA_OP_SRBM_WRITE) |
ring             1634 drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c 	amdgpu_ring_write(ring, reg);
ring             1635 drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c 	amdgpu_ring_write(ring, val);
ring             1638 drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c static void sdma_v4_0_ring_emit_reg_wait(struct amdgpu_ring *ring, uint32_t reg,
ring             1641 drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c 	sdma_v4_0_wait_reg_mem(ring, 0, 0, reg, 0, val, mask, 10);
ring             1794 drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c 	struct amdgpu_ring *ring;
ring             1817 drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c 		ring = &adev->sdma.instance[i].ring;
ring             1818 drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c 		ring->ring_obj = NULL;
ring             1819 drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c 		ring->use_doorbell = true;
ring             1822 drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c 				ring->use_doorbell?"true":"false");
ring             1825 drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c 		ring->doorbell_index = adev->doorbell_index.sdma_engine[i] << 1;
ring             1827 drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c 		sprintf(ring->name, "sdma%d", i);
ring             1828 drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c 		r = amdgpu_ring_init(adev, ring, 1024, &adev->sdma.trap_irq,
ring             1834 drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c 			ring = &adev->sdma.instance[i].page;
ring             1835 drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c 			ring->ring_obj = NULL;
ring             1836 drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c 			ring->use_doorbell = true;
ring             1841 drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c 			ring->doorbell_index = adev->doorbell_index.sdma_engine[i] << 1;
ring             1842 drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c 			ring->doorbell_index += 0x400;
ring             1844 drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c 			sprintf(ring->name, "page%d", i);
ring             1845 drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c 			r = amdgpu_ring_init(adev, ring, 1024,
ring             1878 drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c 		amdgpu_ring_fini(&adev->sdma.instance[i].ring);
ring             2010 drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c 		amdgpu_fence_process(&adev->sdma.instance[instance].ring);
ring             2088 drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c 		drm_sched_fault(&adev->sdma.instance[instance].ring.sched);
ring             2392 drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c 			adev->sdma.instance[i].ring.funcs =
ring             2395 drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c 			adev->sdma.instance[i].ring.funcs =
ring             2397 drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c 		adev->sdma.instance[i].ring.me = i;
ring             2513 drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c 		adev->mman.buffer_funcs_ring = &adev->sdma.instance[0].ring;
ring             2534 drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c 			sched = &adev->sdma.instance[i].ring.sched;
ring              232 drivers/gpu/drm/amd/amdgpu/sdma_v5_0.c static unsigned sdma_v5_0_ring_init_cond_exec(struct amdgpu_ring *ring)
ring              236 drivers/gpu/drm/amd/amdgpu/sdma_v5_0.c 	amdgpu_ring_write(ring, SDMA_PKT_HEADER_OP(SDMA_OP_COND_EXE));
ring              237 drivers/gpu/drm/amd/amdgpu/sdma_v5_0.c 	amdgpu_ring_write(ring, lower_32_bits(ring->cond_exe_gpu_addr));
ring              238 drivers/gpu/drm/amd/amdgpu/sdma_v5_0.c 	amdgpu_ring_write(ring, upper_32_bits(ring->cond_exe_gpu_addr));
ring              239 drivers/gpu/drm/amd/amdgpu/sdma_v5_0.c 	amdgpu_ring_write(ring, 1);
ring              240 drivers/gpu/drm/amd/amdgpu/sdma_v5_0.c 	ret = ring->wptr & ring->buf_mask;/* this is the offset we need patch later */
ring              241 drivers/gpu/drm/amd/amdgpu/sdma_v5_0.c 	amdgpu_ring_write(ring, 0x55aa55aa);/* insert dummy here and patch it later */
ring              246 drivers/gpu/drm/amd/amdgpu/sdma_v5_0.c static void sdma_v5_0_ring_patch_cond_exec(struct amdgpu_ring *ring,
ring              251 drivers/gpu/drm/amd/amdgpu/sdma_v5_0.c 	BUG_ON(offset > ring->buf_mask);
ring              252 drivers/gpu/drm/amd/amdgpu/sdma_v5_0.c 	BUG_ON(ring->ring[offset] != 0x55aa55aa);
ring              254 drivers/gpu/drm/amd/amdgpu/sdma_v5_0.c 	cur = (ring->wptr - 1) & ring->buf_mask;
ring              256 drivers/gpu/drm/amd/amdgpu/sdma_v5_0.c 		ring->ring[offset] = cur - offset;
ring              258 drivers/gpu/drm/amd/amdgpu/sdma_v5_0.c 		ring->ring[offset] = (ring->buf_mask + 1) - offset + cur;
ring              268 drivers/gpu/drm/amd/amdgpu/sdma_v5_0.c static uint64_t sdma_v5_0_ring_get_rptr(struct amdgpu_ring *ring)
ring              273 drivers/gpu/drm/amd/amdgpu/sdma_v5_0.c 	rptr = ((u64 *)&ring->adev->wb.wb[ring->rptr_offs]);
ring              286 drivers/gpu/drm/amd/amdgpu/sdma_v5_0.c static uint64_t sdma_v5_0_ring_get_wptr(struct amdgpu_ring *ring)
ring              288 drivers/gpu/drm/amd/amdgpu/sdma_v5_0.c 	struct amdgpu_device *adev = ring->adev;
ring              292 drivers/gpu/drm/amd/amdgpu/sdma_v5_0.c 	if (ring->use_doorbell) {
ring              294 drivers/gpu/drm/amd/amdgpu/sdma_v5_0.c 		wptr = ((u64 *)&adev->wb.wb[ring->wptr_offs]);
ring              302 drivers/gpu/drm/amd/amdgpu/sdma_v5_0.c 		lowbit = RREG32(sdma_v5_0_get_reg_offset(adev, ring->me, mmSDMA0_GFX_RB_WPTR)) >> 2;
ring              303 drivers/gpu/drm/amd/amdgpu/sdma_v5_0.c 		highbit = RREG32(sdma_v5_0_get_reg_offset(adev, ring->me, mmSDMA0_GFX_RB_WPTR_HI)) >> 2;
ring              306 drivers/gpu/drm/amd/amdgpu/sdma_v5_0.c 				ring->me, highbit, lowbit);
ring              322 drivers/gpu/drm/amd/amdgpu/sdma_v5_0.c static void sdma_v5_0_ring_set_wptr(struct amdgpu_ring *ring)
ring              324 drivers/gpu/drm/amd/amdgpu/sdma_v5_0.c 	struct amdgpu_device *adev = ring->adev;
ring              327 drivers/gpu/drm/amd/amdgpu/sdma_v5_0.c 	if (ring->use_doorbell) {
ring              332 drivers/gpu/drm/amd/amdgpu/sdma_v5_0.c 				ring->wptr_offs,
ring              333 drivers/gpu/drm/amd/amdgpu/sdma_v5_0.c 				lower_32_bits(ring->wptr << 2),
ring              334 drivers/gpu/drm/amd/amdgpu/sdma_v5_0.c 				upper_32_bits(ring->wptr << 2));
ring              336 drivers/gpu/drm/amd/amdgpu/sdma_v5_0.c 		adev->wb.wb[ring->wptr_offs] = lower_32_bits(ring->wptr << 2);
ring              337 drivers/gpu/drm/amd/amdgpu/sdma_v5_0.c 		adev->wb.wb[ring->wptr_offs + 1] = upper_32_bits(ring->wptr << 2);
ring              339 drivers/gpu/drm/amd/amdgpu/sdma_v5_0.c 				ring->doorbell_index, ring->wptr << 2);
ring              340 drivers/gpu/drm/amd/amdgpu/sdma_v5_0.c 		WDOORBELL64(ring->doorbell_index, ring->wptr << 2);
ring              345 drivers/gpu/drm/amd/amdgpu/sdma_v5_0.c 				ring->me,
ring              346 drivers/gpu/drm/amd/amdgpu/sdma_v5_0.c 				lower_32_bits(ring->wptr << 2),
ring              347 drivers/gpu/drm/amd/amdgpu/sdma_v5_0.c 				ring->me,
ring              348 drivers/gpu/drm/amd/amdgpu/sdma_v5_0.c 				upper_32_bits(ring->wptr << 2));
ring              349 drivers/gpu/drm/amd/amdgpu/sdma_v5_0.c 		WREG32(sdma_v5_0_get_reg_offset(adev, ring->me, mmSDMA0_GFX_RB_WPTR),
ring              350 drivers/gpu/drm/amd/amdgpu/sdma_v5_0.c 			lower_32_bits(ring->wptr << 2));
ring              351 drivers/gpu/drm/amd/amdgpu/sdma_v5_0.c 		WREG32(sdma_v5_0_get_reg_offset(adev, ring->me, mmSDMA0_GFX_RB_WPTR_HI),
ring              352 drivers/gpu/drm/amd/amdgpu/sdma_v5_0.c 			upper_32_bits(ring->wptr << 2));
ring              356 drivers/gpu/drm/amd/amdgpu/sdma_v5_0.c static void sdma_v5_0_ring_insert_nop(struct amdgpu_ring *ring, uint32_t count)
ring              358 drivers/gpu/drm/amd/amdgpu/sdma_v5_0.c 	struct amdgpu_sdma_instance *sdma = amdgpu_sdma_get_instance_from_ring(ring);
ring              363 drivers/gpu/drm/amd/amdgpu/sdma_v5_0.c 			amdgpu_ring_write(ring, ring->funcs->nop |
ring              366 drivers/gpu/drm/amd/amdgpu/sdma_v5_0.c 			amdgpu_ring_write(ring, ring->funcs->nop);
ring              377 drivers/gpu/drm/amd/amdgpu/sdma_v5_0.c static void sdma_v5_0_ring_emit_ib(struct amdgpu_ring *ring,
ring              383 drivers/gpu/drm/amd/amdgpu/sdma_v5_0.c 	uint64_t csa_mc_addr = amdgpu_sdma_get_csa_mc_addr(ring, vmid);
ring              388 drivers/gpu/drm/amd/amdgpu/sdma_v5_0.c 	amdgpu_ring_write(ring, SDMA_PKT_HEADER_OP(SDMA_OP_GCR_REQ));
ring              389 drivers/gpu/drm/amd/amdgpu/sdma_v5_0.c 	amdgpu_ring_write(ring, 0);
ring              390 drivers/gpu/drm/amd/amdgpu/sdma_v5_0.c 	amdgpu_ring_write(ring, (SDMA_GCR_GL2_INV |
ring              394 drivers/gpu/drm/amd/amdgpu/sdma_v5_0.c 	amdgpu_ring_write(ring, 0xffffff80);
ring              395 drivers/gpu/drm/amd/amdgpu/sdma_v5_0.c 	amdgpu_ring_write(ring, 0xffff);
ring              405 drivers/gpu/drm/amd/amdgpu/sdma_v5_0.c 	sdma_v5_0_ring_insert_nop(ring, (2 - lower_32_bits(ring->wptr)) & 7);
ring              407 drivers/gpu/drm/amd/amdgpu/sdma_v5_0.c 	amdgpu_ring_write(ring, SDMA_PKT_HEADER_OP(SDMA_OP_INDIRECT) |
ring              410 drivers/gpu/drm/amd/amdgpu/sdma_v5_0.c 	amdgpu_ring_write(ring, lower_32_bits(ib->gpu_addr) & 0xffffffe0);
ring              411 drivers/gpu/drm/amd/amdgpu/sdma_v5_0.c 	amdgpu_ring_write(ring, upper_32_bits(ib->gpu_addr));
ring              412 drivers/gpu/drm/amd/amdgpu/sdma_v5_0.c 	amdgpu_ring_write(ring, ib->length_dw);
ring              413 drivers/gpu/drm/amd/amdgpu/sdma_v5_0.c 	amdgpu_ring_write(ring, lower_32_bits(csa_mc_addr));
ring              414 drivers/gpu/drm/amd/amdgpu/sdma_v5_0.c 	amdgpu_ring_write(ring, upper_32_bits(csa_mc_addr));
ring              424 drivers/gpu/drm/amd/amdgpu/sdma_v5_0.c static void sdma_v5_0_ring_emit_hdp_flush(struct amdgpu_ring *ring)
ring              426 drivers/gpu/drm/amd/amdgpu/sdma_v5_0.c 	struct amdgpu_device *adev = ring->adev;
ring              430 drivers/gpu/drm/amd/amdgpu/sdma_v5_0.c 	if (ring->me == 0)
ring              435 drivers/gpu/drm/amd/amdgpu/sdma_v5_0.c 	amdgpu_ring_write(ring, SDMA_PKT_HEADER_OP(SDMA_OP_POLL_REGMEM) |
ring              438 drivers/gpu/drm/amd/amdgpu/sdma_v5_0.c 	amdgpu_ring_write(ring, (adev->nbio_funcs->get_hdp_flush_done_offset(adev)) << 2);
ring              439 drivers/gpu/drm/amd/amdgpu/sdma_v5_0.c 	amdgpu_ring_write(ring, (adev->nbio_funcs->get_hdp_flush_req_offset(adev)) << 2);
ring              440 drivers/gpu/drm/amd/amdgpu/sdma_v5_0.c 	amdgpu_ring_write(ring, ref_and_mask); /* reference */
ring              441 drivers/gpu/drm/amd/amdgpu/sdma_v5_0.c 	amdgpu_ring_write(ring, ref_and_mask); /* mask */
ring              442 drivers/gpu/drm/amd/amdgpu/sdma_v5_0.c 	amdgpu_ring_write(ring, SDMA_PKT_POLL_REGMEM_DW5_RETRY_COUNT(0xfff) |
ring              456 drivers/gpu/drm/amd/amdgpu/sdma_v5_0.c static void sdma_v5_0_ring_emit_fence(struct amdgpu_ring *ring, u64 addr, u64 seq,
ring              459 drivers/gpu/drm/amd/amdgpu/sdma_v5_0.c 	struct amdgpu_device *adev = ring->adev;
ring              462 drivers/gpu/drm/amd/amdgpu/sdma_v5_0.c 	amdgpu_ring_write(ring, SDMA_PKT_HEADER_OP(SDMA_OP_FENCE) |
ring              466 drivers/gpu/drm/amd/amdgpu/sdma_v5_0.c 	amdgpu_ring_write(ring, lower_32_bits(addr));
ring              467 drivers/gpu/drm/amd/amdgpu/sdma_v5_0.c 	amdgpu_ring_write(ring, upper_32_bits(addr));
ring              468 drivers/gpu/drm/amd/amdgpu/sdma_v5_0.c 	amdgpu_ring_write(ring, lower_32_bits(seq));
ring              473 drivers/gpu/drm/amd/amdgpu/sdma_v5_0.c 		amdgpu_ring_write(ring, SDMA_PKT_HEADER_OP(SDMA_OP_FENCE) |
ring              477 drivers/gpu/drm/amd/amdgpu/sdma_v5_0.c 		amdgpu_ring_write(ring, lower_32_bits(addr));
ring              478 drivers/gpu/drm/amd/amdgpu/sdma_v5_0.c 		amdgpu_ring_write(ring, upper_32_bits(addr));
ring              479 drivers/gpu/drm/amd/amdgpu/sdma_v5_0.c 		amdgpu_ring_write(ring, upper_32_bits(seq));
ring              485 drivers/gpu/drm/amd/amdgpu/sdma_v5_0.c 		amdgpu_ring_write(ring, SDMA_PKT_HEADER_OP(SDMA_OP_TRAP));
ring              486 drivers/gpu/drm/amd/amdgpu/sdma_v5_0.c 		amdgpu_ring_write(ring, SDMA_PKT_TRAP_INT_CONTEXT_INT_CONTEXT(0));
ring              500 drivers/gpu/drm/amd/amdgpu/sdma_v5_0.c 	struct amdgpu_ring *sdma0 = &adev->sdma.instance[0].ring;
ring              501 drivers/gpu/drm/amd/amdgpu/sdma_v5_0.c 	struct amdgpu_ring *sdma1 = &adev->sdma.instance[1].ring;
ring              623 drivers/gpu/drm/amd/amdgpu/sdma_v5_0.c 	struct amdgpu_ring *ring;
ring              635 drivers/gpu/drm/amd/amdgpu/sdma_v5_0.c 		ring = &adev->sdma.instance[i].ring;
ring              636 drivers/gpu/drm/amd/amdgpu/sdma_v5_0.c 		wb_offset = (ring->rptr_offs * 4);
ring              641 drivers/gpu/drm/amd/amdgpu/sdma_v5_0.c 		rb_bufsz = order_base_2(ring->ring_size / 4);
ring              658 drivers/gpu/drm/amd/amdgpu/sdma_v5_0.c 		wptr_gpu_addr = adev->wb.gpu_addr + (ring->wptr_offs * 4);
ring              679 drivers/gpu/drm/amd/amdgpu/sdma_v5_0.c 		WREG32(sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_GFX_RB_BASE), ring->gpu_addr >> 8);
ring              680 drivers/gpu/drm/amd/amdgpu/sdma_v5_0.c 		WREG32(sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_GFX_RB_BASE_HI), ring->gpu_addr >> 40);
ring              682 drivers/gpu/drm/amd/amdgpu/sdma_v5_0.c 		ring->wptr = 0;
ring              688 drivers/gpu/drm/amd/amdgpu/sdma_v5_0.c 			WREG32(sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_GFX_RB_WPTR), lower_32_bits(ring->wptr) << 2);
ring              689 drivers/gpu/drm/amd/amdgpu/sdma_v5_0.c 			WREG32(sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_GFX_RB_WPTR_HI), upper_32_bits(ring->wptr) << 2);
ring              695 drivers/gpu/drm/amd/amdgpu/sdma_v5_0.c 		if (ring->use_doorbell) {
ring              698 drivers/gpu/drm/amd/amdgpu/sdma_v5_0.c 					OFFSET, ring->doorbell_index);
ring              705 drivers/gpu/drm/amd/amdgpu/sdma_v5_0.c 		adev->nbio_funcs->sdma_doorbell_range(adev, i, ring->use_doorbell,
ring              706 drivers/gpu/drm/amd/amdgpu/sdma_v5_0.c 						      ring->doorbell_index, 20);
ring              709 drivers/gpu/drm/amd/amdgpu/sdma_v5_0.c 			sdma_v5_0_ring_set_wptr(ring);
ring              754 drivers/gpu/drm/amd/amdgpu/sdma_v5_0.c 		ring->sched.ready = true;
ring              761 drivers/gpu/drm/amd/amdgpu/sdma_v5_0.c 		r = amdgpu_ring_test_ring(ring);
ring              763 drivers/gpu/drm/amd/amdgpu/sdma_v5_0.c 			ring->sched.ready = false;
ring              767 drivers/gpu/drm/amd/amdgpu/sdma_v5_0.c 		if (adev->mman.buffer_funcs_ring == ring)
ring              885 drivers/gpu/drm/amd/amdgpu/sdma_v5_0.c static int sdma_v5_0_ring_test_ring(struct amdgpu_ring *ring)
ring              887 drivers/gpu/drm/amd/amdgpu/sdma_v5_0.c 	struct amdgpu_device *adev = ring->adev;
ring              904 drivers/gpu/drm/amd/amdgpu/sdma_v5_0.c 	r = amdgpu_ring_alloc(ring, 5);
ring              906 drivers/gpu/drm/amd/amdgpu/sdma_v5_0.c 		DRM_ERROR("amdgpu: dma failed to lock ring %d (%d).\n", ring->idx, r);
ring              911 drivers/gpu/drm/amd/amdgpu/sdma_v5_0.c 	amdgpu_ring_write(ring, SDMA_PKT_HEADER_OP(SDMA_OP_WRITE) |
ring              913 drivers/gpu/drm/amd/amdgpu/sdma_v5_0.c 	amdgpu_ring_write(ring, lower_32_bits(gpu_addr));
ring              914 drivers/gpu/drm/amd/amdgpu/sdma_v5_0.c 	amdgpu_ring_write(ring, upper_32_bits(gpu_addr));
ring              915 drivers/gpu/drm/amd/amdgpu/sdma_v5_0.c 	amdgpu_ring_write(ring, SDMA_PKT_WRITE_UNTILED_DW_3_COUNT(0));
ring              916 drivers/gpu/drm/amd/amdgpu/sdma_v5_0.c 	amdgpu_ring_write(ring, 0xDEADBEEF);
ring              917 drivers/gpu/drm/amd/amdgpu/sdma_v5_0.c 	amdgpu_ring_commit(ring);
ring              931 drivers/gpu/drm/amd/amdgpu/sdma_v5_0.c 			DRM_INFO("ring test on %d succeeded in %d msecs\n", ring->idx, i);
ring              933 drivers/gpu/drm/amd/amdgpu/sdma_v5_0.c 			DRM_INFO("ring test on %d succeeded in %d usecs\n", ring->idx, i);
ring              936 drivers/gpu/drm/amd/amdgpu/sdma_v5_0.c 			  ring->idx, tmp);
ring              952 drivers/gpu/drm/amd/amdgpu/sdma_v5_0.c static int sdma_v5_0_ring_test_ib(struct amdgpu_ring *ring, long timeout)
ring              954 drivers/gpu/drm/amd/amdgpu/sdma_v5_0.c 	struct amdgpu_device *adev = ring->adev;
ring              989 drivers/gpu/drm/amd/amdgpu/sdma_v5_0.c 	r = amdgpu_ib_schedule(ring, 1, &ib, NULL, &f);
ring             1004 drivers/gpu/drm/amd/amdgpu/sdma_v5_0.c 		DRM_INFO("ib test on ring %d succeeded\n", ring->idx);
ring             1113 drivers/gpu/drm/amd/amdgpu/sdma_v5_0.c static void sdma_v5_0_ring_pad_ib(struct amdgpu_ring *ring, struct amdgpu_ib *ib)
ring             1115 drivers/gpu/drm/amd/amdgpu/sdma_v5_0.c 	struct amdgpu_sdma_instance *sdma = amdgpu_sdma_get_instance_from_ring(ring);
ring             1138 drivers/gpu/drm/amd/amdgpu/sdma_v5_0.c static void sdma_v5_0_ring_emit_pipeline_sync(struct amdgpu_ring *ring)
ring             1140 drivers/gpu/drm/amd/amdgpu/sdma_v5_0.c 	uint32_t seq = ring->fence_drv.sync_seq;
ring             1141 drivers/gpu/drm/amd/amdgpu/sdma_v5_0.c 	uint64_t addr = ring->fence_drv.gpu_addr;
ring             1144 drivers/gpu/drm/amd/amdgpu/sdma_v5_0.c 	amdgpu_ring_write(ring, SDMA_PKT_HEADER_OP(SDMA_OP_POLL_REGMEM) |
ring             1148 drivers/gpu/drm/amd/amdgpu/sdma_v5_0.c 	amdgpu_ring_write(ring, addr & 0xfffffffc);
ring             1149 drivers/gpu/drm/amd/amdgpu/sdma_v5_0.c 	amdgpu_ring_write(ring, upper_32_bits(addr) & 0xffffffff);
ring             1150 drivers/gpu/drm/amd/amdgpu/sdma_v5_0.c 	amdgpu_ring_write(ring, seq); /* reference */
ring             1151 drivers/gpu/drm/amd/amdgpu/sdma_v5_0.c 	amdgpu_ring_write(ring, 0xffffffff); /* mask */
ring             1152 drivers/gpu/drm/amd/amdgpu/sdma_v5_0.c 	amdgpu_ring_write(ring, SDMA_PKT_POLL_REGMEM_DW5_RETRY_COUNT(0xfff) |
ring             1166 drivers/gpu/drm/amd/amdgpu/sdma_v5_0.c static void sdma_v5_0_ring_emit_vm_flush(struct amdgpu_ring *ring,
ring             1169 drivers/gpu/drm/amd/amdgpu/sdma_v5_0.c 	amdgpu_gmc_emit_flush_gpu_tlb(ring, vmid, pd_addr);
ring             1172 drivers/gpu/drm/amd/amdgpu/sdma_v5_0.c static void sdma_v5_0_ring_emit_wreg(struct amdgpu_ring *ring,
ring             1175 drivers/gpu/drm/amd/amdgpu/sdma_v5_0.c 	amdgpu_ring_write(ring, SDMA_PKT_HEADER_OP(SDMA_OP_SRBM_WRITE) |
ring             1177 drivers/gpu/drm/amd/amdgpu/sdma_v5_0.c 	amdgpu_ring_write(ring, reg);
ring             1178 drivers/gpu/drm/amd/amdgpu/sdma_v5_0.c 	amdgpu_ring_write(ring, val);
ring             1181 drivers/gpu/drm/amd/amdgpu/sdma_v5_0.c static void sdma_v5_0_ring_emit_reg_wait(struct amdgpu_ring *ring, uint32_t reg,
ring             1184 drivers/gpu/drm/amd/amdgpu/sdma_v5_0.c 	amdgpu_ring_write(ring, SDMA_PKT_HEADER_OP(SDMA_OP_POLL_REGMEM) |
ring             1187 drivers/gpu/drm/amd/amdgpu/sdma_v5_0.c 	amdgpu_ring_write(ring, reg << 2);
ring             1188 drivers/gpu/drm/amd/amdgpu/sdma_v5_0.c 	amdgpu_ring_write(ring, 0);
ring             1189 drivers/gpu/drm/amd/amdgpu/sdma_v5_0.c 	amdgpu_ring_write(ring, val); /* reference */
ring             1190 drivers/gpu/drm/amd/amdgpu/sdma_v5_0.c 	amdgpu_ring_write(ring, mask); /* mask */
ring             1191 drivers/gpu/drm/amd/amdgpu/sdma_v5_0.c 	amdgpu_ring_write(ring, SDMA_PKT_POLL_REGMEM_DW5_RETRY_COUNT(0xfff) |
ring             1195 drivers/gpu/drm/amd/amdgpu/sdma_v5_0.c static void sdma_v5_0_ring_emit_reg_write_reg_wait(struct amdgpu_ring *ring,
ring             1199 drivers/gpu/drm/amd/amdgpu/sdma_v5_0.c 	amdgpu_ring_emit_wreg(ring, reg0, ref);
ring             1201 drivers/gpu/drm/amd/amdgpu/sdma_v5_0.c 	amdgpu_ring_emit_reg_wait(ring, reg0, 0, 0);
ring             1202 drivers/gpu/drm/amd/amdgpu/sdma_v5_0.c 	amdgpu_ring_emit_reg_wait(ring, reg1, mask, mask);
ring             1222 drivers/gpu/drm/amd/amdgpu/sdma_v5_0.c 	struct amdgpu_ring *ring;
ring             1247 drivers/gpu/drm/amd/amdgpu/sdma_v5_0.c 		ring = &adev->sdma.instance[i].ring;
ring             1248 drivers/gpu/drm/amd/amdgpu/sdma_v5_0.c 		ring->ring_obj = NULL;
ring             1249 drivers/gpu/drm/amd/amdgpu/sdma_v5_0.c 		ring->use_doorbell = true;
ring             1252 drivers/gpu/drm/amd/amdgpu/sdma_v5_0.c 				ring->use_doorbell?"true":"false");
ring             1254 drivers/gpu/drm/amd/amdgpu/sdma_v5_0.c 		ring->doorbell_index = (i == 0) ?
ring             1258 drivers/gpu/drm/amd/amdgpu/sdma_v5_0.c 		sprintf(ring->name, "sdma%d", i);
ring             1259 drivers/gpu/drm/amd/amdgpu/sdma_v5_0.c 		r = amdgpu_ring_init(adev, ring, 1024,
ring             1277 drivers/gpu/drm/amd/amdgpu/sdma_v5_0.c 		amdgpu_ring_fini(&adev->sdma.instance[i].ring);
ring             1360 drivers/gpu/drm/amd/amdgpu/sdma_v5_0.c static int sdma_v5_0_ring_preempt_ib(struct amdgpu_ring *ring)
ring             1363 drivers/gpu/drm/amd/amdgpu/sdma_v5_0.c 	struct amdgpu_device *adev = ring->adev;
ring             1367 drivers/gpu/drm/amd/amdgpu/sdma_v5_0.c 	amdgpu_sdma_get_index_from_ring(ring, &index);
ring             1374 drivers/gpu/drm/amd/amdgpu/sdma_v5_0.c 	amdgpu_ring_set_preempt_cond_exec(ring, false);
ring             1377 drivers/gpu/drm/amd/amdgpu/sdma_v5_0.c 	ring->trail_seq += 1;
ring             1378 drivers/gpu/drm/amd/amdgpu/sdma_v5_0.c 	amdgpu_ring_alloc(ring, 10);
ring             1379 drivers/gpu/drm/amd/amdgpu/sdma_v5_0.c 	sdma_v5_0_ring_emit_fence(ring, ring->trail_fence_gpu_addr,
ring             1380 drivers/gpu/drm/amd/amdgpu/sdma_v5_0.c 				  ring->trail_seq, 0);
ring             1381 drivers/gpu/drm/amd/amdgpu/sdma_v5_0.c 	amdgpu_ring_commit(ring);
ring             1388 drivers/gpu/drm/amd/amdgpu/sdma_v5_0.c 		if (ring->trail_seq ==
ring             1389 drivers/gpu/drm/amd/amdgpu/sdma_v5_0.c 		    le32_to_cpu(*(ring->trail_fence_cpu_addr)))
ring             1396 drivers/gpu/drm/amd/amdgpu/sdma_v5_0.c 		DRM_ERROR("ring %d failed to be preempted\n", ring->idx);
ring             1403 drivers/gpu/drm/amd/amdgpu/sdma_v5_0.c 	amdgpu_ring_set_preempt_cond_exec(ring, true);
ring             1435 drivers/gpu/drm/amd/amdgpu/sdma_v5_0.c 			amdgpu_fence_process(&adev->sdma.instance[0].ring);
ring             1451 drivers/gpu/drm/amd/amdgpu/sdma_v5_0.c 			amdgpu_fence_process(&adev->sdma.instance[1].ring);
ring             1645 drivers/gpu/drm/amd/amdgpu/sdma_v5_0.c 		adev->sdma.instance[i].ring.funcs = &sdma_v5_0_ring_funcs;
ring             1646 drivers/gpu/drm/amd/amdgpu/sdma_v5_0.c 		adev->sdma.instance[i].ring.me = i;
ring             1730 drivers/gpu/drm/amd/amdgpu/sdma_v5_0.c 		adev->mman.buffer_funcs_ring = &adev->sdma.instance[0].ring;
ring             1749 drivers/gpu/drm/amd/amdgpu/sdma_v5_0.c 			sched = &adev->sdma.instance[i].ring.sched;
ring             1242 drivers/gpu/drm/amd/amdgpu/si.c static void si_flush_hdp(struct amdgpu_device *adev, struct amdgpu_ring *ring)
ring             1244 drivers/gpu/drm/amd/amdgpu/si.c 	if (!ring || !ring->funcs->emit_wreg) {
ring             1248 drivers/gpu/drm/amd/amdgpu/si.c 		amdgpu_ring_emit_wreg(ring, mmHDP_MEM_COHERENCY_FLUSH_CNTL, 1);
ring             1253 drivers/gpu/drm/amd/amdgpu/si.c 			      struct amdgpu_ring *ring)
ring             1255 drivers/gpu/drm/amd/amdgpu/si.c 	if (!ring || !ring->funcs->emit_wreg) {
ring             1259 drivers/gpu/drm/amd/amdgpu/si.c 		amdgpu_ring_emit_wreg(ring, mmHDP_DEBUG0, 1);
ring               41 drivers/gpu/drm/amd/amdgpu/si_dma.c static uint64_t si_dma_ring_get_rptr(struct amdgpu_ring *ring)
ring               43 drivers/gpu/drm/amd/amdgpu/si_dma.c 	return ring->adev->wb.wb[ring->rptr_offs>>2];
ring               46 drivers/gpu/drm/amd/amdgpu/si_dma.c static uint64_t si_dma_ring_get_wptr(struct amdgpu_ring *ring)
ring               48 drivers/gpu/drm/amd/amdgpu/si_dma.c 	struct amdgpu_device *adev = ring->adev;
ring               49 drivers/gpu/drm/amd/amdgpu/si_dma.c 	u32 me = (ring == &adev->sdma.instance[0].ring) ? 0 : 1;
ring               54 drivers/gpu/drm/amd/amdgpu/si_dma.c static void si_dma_ring_set_wptr(struct amdgpu_ring *ring)
ring               56 drivers/gpu/drm/amd/amdgpu/si_dma.c 	struct amdgpu_device *adev = ring->adev;
ring               57 drivers/gpu/drm/amd/amdgpu/si_dma.c 	u32 me = (ring == &adev->sdma.instance[0].ring) ? 0 : 1;
ring               60 drivers/gpu/drm/amd/amdgpu/si_dma.c 	       (lower_32_bits(ring->wptr) << 2) & 0x3fffc);
ring               63 drivers/gpu/drm/amd/amdgpu/si_dma.c static void si_dma_ring_emit_ib(struct amdgpu_ring *ring,
ring               72 drivers/gpu/drm/amd/amdgpu/si_dma.c 	while ((lower_32_bits(ring->wptr) & 7) != 5)
ring               73 drivers/gpu/drm/amd/amdgpu/si_dma.c 		amdgpu_ring_write(ring, DMA_PACKET(DMA_PACKET_NOP, 0, 0, 0, 0));
ring               74 drivers/gpu/drm/amd/amdgpu/si_dma.c 	amdgpu_ring_write(ring, DMA_IB_PACKET(DMA_PACKET_INDIRECT_BUFFER, vmid, 0));
ring               75 drivers/gpu/drm/amd/amdgpu/si_dma.c 	amdgpu_ring_write(ring, (ib->gpu_addr & 0xFFFFFFE0));
ring               76 drivers/gpu/drm/amd/amdgpu/si_dma.c 	amdgpu_ring_write(ring, (ib->length_dw << 12) | (upper_32_bits(ib->gpu_addr) & 0xFF));
ring               90 drivers/gpu/drm/amd/amdgpu/si_dma.c static void si_dma_ring_emit_fence(struct amdgpu_ring *ring, u64 addr, u64 seq,
ring               96 drivers/gpu/drm/amd/amdgpu/si_dma.c 	amdgpu_ring_write(ring, DMA_PACKET(DMA_PACKET_FENCE, 0, 0, 0, 0));
ring               97 drivers/gpu/drm/amd/amdgpu/si_dma.c 	amdgpu_ring_write(ring, addr & 0xfffffffc);
ring               98 drivers/gpu/drm/amd/amdgpu/si_dma.c 	amdgpu_ring_write(ring, (upper_32_bits(addr) & 0xff));
ring               99 drivers/gpu/drm/amd/amdgpu/si_dma.c 	amdgpu_ring_write(ring, seq);
ring              103 drivers/gpu/drm/amd/amdgpu/si_dma.c 		amdgpu_ring_write(ring, DMA_PACKET(DMA_PACKET_FENCE, 0, 0, 0, 0));
ring              104 drivers/gpu/drm/amd/amdgpu/si_dma.c 		amdgpu_ring_write(ring, addr & 0xfffffffc);
ring              105 drivers/gpu/drm/amd/amdgpu/si_dma.c 		amdgpu_ring_write(ring, (upper_32_bits(addr) & 0xff));
ring              106 drivers/gpu/drm/amd/amdgpu/si_dma.c 		amdgpu_ring_write(ring, upper_32_bits(seq));
ring              109 drivers/gpu/drm/amd/amdgpu/si_dma.c 	amdgpu_ring_write(ring, DMA_PACKET(DMA_PACKET_TRAP, 0, 0, 0, 0));
ring              114 drivers/gpu/drm/amd/amdgpu/si_dma.c 	struct amdgpu_ring *ring;
ring              119 drivers/gpu/drm/amd/amdgpu/si_dma.c 		ring = &adev->sdma.instance[i].ring;
ring              125 drivers/gpu/drm/amd/amdgpu/si_dma.c 		if (adev->mman.buffer_funcs_ring == ring)
ring              127 drivers/gpu/drm/amd/amdgpu/si_dma.c 		ring->sched.ready = false;
ring              133 drivers/gpu/drm/amd/amdgpu/si_dma.c 	struct amdgpu_ring *ring;
ring              139 drivers/gpu/drm/amd/amdgpu/si_dma.c 		ring = &adev->sdma.instance[i].ring;
ring              145 drivers/gpu/drm/amd/amdgpu/si_dma.c 		rb_bufsz = order_base_2(ring->ring_size / 4);
ring              156 drivers/gpu/drm/amd/amdgpu/si_dma.c 		rptr_addr = adev->wb.gpu_addr + (ring->rptr_offs * 4);
ring              163 drivers/gpu/drm/amd/amdgpu/si_dma.c 		WREG32(DMA_RB_BASE + sdma_offsets[i], ring->gpu_addr >> 8);
ring              176 drivers/gpu/drm/amd/amdgpu/si_dma.c 		ring->wptr = 0;
ring              177 drivers/gpu/drm/amd/amdgpu/si_dma.c 		WREG32(DMA_RB_WPTR + sdma_offsets[i], lower_32_bits(ring->wptr) << 2);
ring              180 drivers/gpu/drm/amd/amdgpu/si_dma.c 		ring->sched.ready = true;
ring              182 drivers/gpu/drm/amd/amdgpu/si_dma.c 		r = amdgpu_ring_test_helper(ring);
ring              186 drivers/gpu/drm/amd/amdgpu/si_dma.c 		if (adev->mman.buffer_funcs_ring == ring)
ring              202 drivers/gpu/drm/amd/amdgpu/si_dma.c static int si_dma_ring_test_ring(struct amdgpu_ring *ring)
ring              204 drivers/gpu/drm/amd/amdgpu/si_dma.c 	struct amdgpu_device *adev = ring->adev;
ring              219 drivers/gpu/drm/amd/amdgpu/si_dma.c 	r = amdgpu_ring_alloc(ring, 4);
ring              223 drivers/gpu/drm/amd/amdgpu/si_dma.c 	amdgpu_ring_write(ring, DMA_PACKET(DMA_PACKET_WRITE, 0, 0, 0, 1));
ring              224 drivers/gpu/drm/amd/amdgpu/si_dma.c 	amdgpu_ring_write(ring, lower_32_bits(gpu_addr));
ring              225 drivers/gpu/drm/amd/amdgpu/si_dma.c 	amdgpu_ring_write(ring, upper_32_bits(gpu_addr) & 0xff);
ring              226 drivers/gpu/drm/amd/amdgpu/si_dma.c 	amdgpu_ring_write(ring, 0xDEADBEEF);
ring              227 drivers/gpu/drm/amd/amdgpu/si_dma.c 	amdgpu_ring_commit(ring);
ring              252 drivers/gpu/drm/amd/amdgpu/si_dma.c static int si_dma_ring_test_ib(struct amdgpu_ring *ring, long timeout)
ring              254 drivers/gpu/drm/amd/amdgpu/si_dma.c 	struct amdgpu_device *adev = ring->adev;
ring              279 drivers/gpu/drm/amd/amdgpu/si_dma.c 	r = amdgpu_ib_schedule(ring, 1, &ib, NULL, &f);
ring              407 drivers/gpu/drm/amd/amdgpu/si_dma.c static void si_dma_ring_pad_ib(struct amdgpu_ring *ring, struct amdgpu_ib *ib)
ring              420 drivers/gpu/drm/amd/amdgpu/si_dma.c static void si_dma_ring_emit_pipeline_sync(struct amdgpu_ring *ring)
ring              422 drivers/gpu/drm/amd/amdgpu/si_dma.c 	uint32_t seq = ring->fence_drv.sync_seq;
ring              423 drivers/gpu/drm/amd/amdgpu/si_dma.c 	uint64_t addr = ring->fence_drv.gpu_addr;
ring              426 drivers/gpu/drm/amd/amdgpu/si_dma.c 	amdgpu_ring_write(ring, DMA_PACKET(DMA_PACKET_POLL_REG_MEM, 0, 0, 0, 0) |
ring              428 drivers/gpu/drm/amd/amdgpu/si_dma.c 	amdgpu_ring_write(ring, lower_32_bits(addr));
ring              429 drivers/gpu/drm/amd/amdgpu/si_dma.c 	amdgpu_ring_write(ring, (0xff << 16) | upper_32_bits(addr)); /* retry, addr_hi */
ring              430 drivers/gpu/drm/amd/amdgpu/si_dma.c 	amdgpu_ring_write(ring, 0xffffffff); /* mask */
ring              431 drivers/gpu/drm/amd/amdgpu/si_dma.c 	amdgpu_ring_write(ring, seq); /* value */
ring              432 drivers/gpu/drm/amd/amdgpu/si_dma.c 	amdgpu_ring_write(ring, (3 << 28) | 0x20); /* func(equal) | poll interval */
ring              444 drivers/gpu/drm/amd/amdgpu/si_dma.c static void si_dma_ring_emit_vm_flush(struct amdgpu_ring *ring,
ring              447 drivers/gpu/drm/amd/amdgpu/si_dma.c 	amdgpu_gmc_emit_flush_gpu_tlb(ring, vmid, pd_addr);
ring              450 drivers/gpu/drm/amd/amdgpu/si_dma.c 	amdgpu_ring_write(ring, DMA_PACKET(DMA_PACKET_POLL_REG_MEM, 0, 0, 0, 0));
ring              451 drivers/gpu/drm/amd/amdgpu/si_dma.c 	amdgpu_ring_write(ring, VM_INVALIDATE_REQUEST);
ring              452 drivers/gpu/drm/amd/amdgpu/si_dma.c 	amdgpu_ring_write(ring, 0xff << 16); /* retry */
ring              453 drivers/gpu/drm/amd/amdgpu/si_dma.c 	amdgpu_ring_write(ring, 1 << vmid); /* mask */
ring              454 drivers/gpu/drm/amd/amdgpu/si_dma.c 	amdgpu_ring_write(ring, 0); /* value */
ring              455 drivers/gpu/drm/amd/amdgpu/si_dma.c 	amdgpu_ring_write(ring, (0 << 28) | 0x20); /* func(always) | poll interval */
ring              458 drivers/gpu/drm/amd/amdgpu/si_dma.c static void si_dma_ring_emit_wreg(struct amdgpu_ring *ring,
ring              461 drivers/gpu/drm/amd/amdgpu/si_dma.c 	amdgpu_ring_write(ring, DMA_PACKET(DMA_PACKET_SRBM_WRITE, 0, 0, 0, 0));
ring              462 drivers/gpu/drm/amd/amdgpu/si_dma.c 	amdgpu_ring_write(ring, (0xf << 16) | reg);
ring              463 drivers/gpu/drm/amd/amdgpu/si_dma.c 	amdgpu_ring_write(ring, val);
ring              482 drivers/gpu/drm/amd/amdgpu/si_dma.c 	struct amdgpu_ring *ring;
ring              499 drivers/gpu/drm/amd/amdgpu/si_dma.c 		ring = &adev->sdma.instance[i].ring;
ring              500 drivers/gpu/drm/amd/amdgpu/si_dma.c 		ring->ring_obj = NULL;
ring              501 drivers/gpu/drm/amd/amdgpu/si_dma.c 		ring->use_doorbell = false;
ring              502 drivers/gpu/drm/amd/amdgpu/si_dma.c 		sprintf(ring->name, "sdma%d", i);
ring              503 drivers/gpu/drm/amd/amdgpu/si_dma.c 		r = amdgpu_ring_init(adev, ring, 1024,
ring              521 drivers/gpu/drm/amd/amdgpu/si_dma.c 		amdgpu_ring_fini(&adev->sdma.instance[i].ring);
ring              637 drivers/gpu/drm/amd/amdgpu/si_dma.c 		amdgpu_fence_process(&adev->sdma.instance[0].ring);
ring              639 drivers/gpu/drm/amd/amdgpu/si_dma.c 		amdgpu_fence_process(&adev->sdma.instance[1].ring);
ring              749 drivers/gpu/drm/amd/amdgpu/si_dma.c 		adev->sdma.instance[i].ring.funcs = &si_dma_ring_funcs;
ring              824 drivers/gpu/drm/amd/amdgpu/si_dma.c 	adev->mman.buffer_funcs_ring = &adev->sdma.instance[0].ring;
ring              842 drivers/gpu/drm/amd/amdgpu/si_dma.c 		sched = &adev->sdma.instance[i].ring.sched;
ring              131 drivers/gpu/drm/amd/amdgpu/si_ih.c 	dw[0] = le32_to_cpu(ih->ring[ring_index + 0]);
ring              132 drivers/gpu/drm/amd/amdgpu/si_ih.c 	dw[1] = le32_to_cpu(ih->ring[ring_index + 1]);
ring              133 drivers/gpu/drm/amd/amdgpu/si_ih.c 	dw[2] = le32_to_cpu(ih->ring[ring_index + 2]);
ring              134 drivers/gpu/drm/amd/amdgpu/si_ih.c 	dw[3] = le32_to_cpu(ih->ring[ring_index + 3]);
ring              812 drivers/gpu/drm/amd/amdgpu/soc15.c static void soc15_flush_hdp(struct amdgpu_device *adev, struct amdgpu_ring *ring)
ring              814 drivers/gpu/drm/amd/amdgpu/soc15.c 	adev->nbio_funcs->hdp_flush(adev, ring);
ring              818 drivers/gpu/drm/amd/amdgpu/soc15.c 				 struct amdgpu_ring *ring)
ring              820 drivers/gpu/drm/amd/amdgpu/soc15.c 	if (!ring || !ring->funcs->emit_wreg)
ring              823 drivers/gpu/drm/amd/amdgpu/soc15.c 		amdgpu_ring_emit_wreg(ring, SOC15_REG_OFFSET(
ring             1259 drivers/gpu/drm/amd/amdgpu/soc15.c 	struct amdgpu_ring *ring;
ring             1264 drivers/gpu/drm/amd/amdgpu/soc15.c 			ring = &adev->sdma.instance[i].ring;
ring             1266 drivers/gpu/drm/amd/amdgpu/soc15.c 				ring->use_doorbell, ring->doorbell_index,
ring              230 drivers/gpu/drm/amd/amdgpu/tonga_ih.c 	dw[0] = le32_to_cpu(ih->ring[ring_index + 0]);
ring              231 drivers/gpu/drm/amd/amdgpu/tonga_ih.c 	dw[1] = le32_to_cpu(ih->ring[ring_index + 1]);
ring              232 drivers/gpu/drm/amd/amdgpu/tonga_ih.c 	dw[2] = le32_to_cpu(ih->ring[ring_index + 2]);
ring              233 drivers/gpu/drm/amd/amdgpu/tonga_ih.c 	dw[3] = le32_to_cpu(ih->ring[ring_index + 3]);
ring               58 drivers/gpu/drm/amd/amdgpu/uvd_v4_2.c static uint64_t uvd_v4_2_ring_get_rptr(struct amdgpu_ring *ring)
ring               60 drivers/gpu/drm/amd/amdgpu/uvd_v4_2.c 	struct amdgpu_device *adev = ring->adev;
ring               72 drivers/gpu/drm/amd/amdgpu/uvd_v4_2.c static uint64_t uvd_v4_2_ring_get_wptr(struct amdgpu_ring *ring)
ring               74 drivers/gpu/drm/amd/amdgpu/uvd_v4_2.c 	struct amdgpu_device *adev = ring->adev;
ring               86 drivers/gpu/drm/amd/amdgpu/uvd_v4_2.c static void uvd_v4_2_ring_set_wptr(struct amdgpu_ring *ring)
ring               88 drivers/gpu/drm/amd/amdgpu/uvd_v4_2.c 	struct amdgpu_device *adev = ring->adev;
ring               90 drivers/gpu/drm/amd/amdgpu/uvd_v4_2.c 	WREG32(mmUVD_RBC_RB_WPTR, lower_32_bits(ring->wptr));
ring              106 drivers/gpu/drm/amd/amdgpu/uvd_v4_2.c 	struct amdgpu_ring *ring;
ring              119 drivers/gpu/drm/amd/amdgpu/uvd_v4_2.c 	ring = &adev->uvd.inst->ring;
ring              120 drivers/gpu/drm/amd/amdgpu/uvd_v4_2.c 	sprintf(ring->name, "uvd");
ring              121 drivers/gpu/drm/amd/amdgpu/uvd_v4_2.c 	r = amdgpu_ring_init(adev, ring, 512, &adev->uvd.inst->irq, 0);
ring              158 drivers/gpu/drm/amd/amdgpu/uvd_v4_2.c 	struct amdgpu_ring *ring = &adev->uvd.inst->ring;
ring              165 drivers/gpu/drm/amd/amdgpu/uvd_v4_2.c 	r = amdgpu_ring_test_helper(ring);
ring              169 drivers/gpu/drm/amd/amdgpu/uvd_v4_2.c 	r = amdgpu_ring_alloc(ring, 10);
ring              176 drivers/gpu/drm/amd/amdgpu/uvd_v4_2.c 	amdgpu_ring_write(ring, tmp);
ring              177 drivers/gpu/drm/amd/amdgpu/uvd_v4_2.c 	amdgpu_ring_write(ring, 0xFFFFF);
ring              180 drivers/gpu/drm/amd/amdgpu/uvd_v4_2.c 	amdgpu_ring_write(ring, tmp);
ring              181 drivers/gpu/drm/amd/amdgpu/uvd_v4_2.c 	amdgpu_ring_write(ring, 0xFFFFF);
ring              184 drivers/gpu/drm/amd/amdgpu/uvd_v4_2.c 	amdgpu_ring_write(ring, tmp);
ring              185 drivers/gpu/drm/amd/amdgpu/uvd_v4_2.c 	amdgpu_ring_write(ring, 0xFFFFF);
ring              188 drivers/gpu/drm/amd/amdgpu/uvd_v4_2.c 	amdgpu_ring_write(ring, PACKET0(mmUVD_SEMA_TIMEOUT_STATUS, 0));
ring              189 drivers/gpu/drm/amd/amdgpu/uvd_v4_2.c 	amdgpu_ring_write(ring, 0x8);
ring              191 drivers/gpu/drm/amd/amdgpu/uvd_v4_2.c 	amdgpu_ring_write(ring, PACKET0(mmUVD_SEMA_CNTL, 0));
ring              192 drivers/gpu/drm/amd/amdgpu/uvd_v4_2.c 	amdgpu_ring_write(ring, 3);
ring              194 drivers/gpu/drm/amd/amdgpu/uvd_v4_2.c 	amdgpu_ring_commit(ring);
ring              213 drivers/gpu/drm/amd/amdgpu/uvd_v4_2.c 	struct amdgpu_ring *ring = &adev->uvd.inst->ring;
ring              218 drivers/gpu/drm/amd/amdgpu/uvd_v4_2.c 	ring->sched.ready = false;
ring              256 drivers/gpu/drm/amd/amdgpu/uvd_v4_2.c 	struct amdgpu_ring *ring = &adev->uvd.inst->ring;
ring              354 drivers/gpu/drm/amd/amdgpu/uvd_v4_2.c 	WREG32(mmUVD_LMI_EXT40_ADDR, upper_32_bits(ring->gpu_addr) |
ring              360 drivers/gpu/drm/amd/amdgpu/uvd_v4_2.c 	ring->wptr = RREG32(mmUVD_RBC_RB_RPTR);
ring              361 drivers/gpu/drm/amd/amdgpu/uvd_v4_2.c 	WREG32(mmUVD_RBC_RB_WPTR, lower_32_bits(ring->wptr));
ring              364 drivers/gpu/drm/amd/amdgpu/uvd_v4_2.c 	WREG32(mmUVD_RBC_RB_BASE, ring->gpu_addr);
ring              367 drivers/gpu/drm/amd/amdgpu/uvd_v4_2.c 	rb_bufsz = order_base_2(ring->ring_size);
ring              446 drivers/gpu/drm/amd/amdgpu/uvd_v4_2.c static void uvd_v4_2_ring_emit_fence(struct amdgpu_ring *ring, u64 addr, u64 seq,
ring              451 drivers/gpu/drm/amd/amdgpu/uvd_v4_2.c 	amdgpu_ring_write(ring, PACKET0(mmUVD_CONTEXT_ID, 0));
ring              452 drivers/gpu/drm/amd/amdgpu/uvd_v4_2.c 	amdgpu_ring_write(ring, seq);
ring              453 drivers/gpu/drm/amd/amdgpu/uvd_v4_2.c 	amdgpu_ring_write(ring, PACKET0(mmUVD_GPCOM_VCPU_DATA0, 0));
ring              454 drivers/gpu/drm/amd/amdgpu/uvd_v4_2.c 	amdgpu_ring_write(ring, addr & 0xffffffff);
ring              455 drivers/gpu/drm/amd/amdgpu/uvd_v4_2.c 	amdgpu_ring_write(ring, PACKET0(mmUVD_GPCOM_VCPU_DATA1, 0));
ring              456 drivers/gpu/drm/amd/amdgpu/uvd_v4_2.c 	amdgpu_ring_write(ring, upper_32_bits(addr) & 0xff);
ring              457 drivers/gpu/drm/amd/amdgpu/uvd_v4_2.c 	amdgpu_ring_write(ring, PACKET0(mmUVD_GPCOM_VCPU_CMD, 0));
ring              458 drivers/gpu/drm/amd/amdgpu/uvd_v4_2.c 	amdgpu_ring_write(ring, 0);
ring              460 drivers/gpu/drm/amd/amdgpu/uvd_v4_2.c 	amdgpu_ring_write(ring, PACKET0(mmUVD_GPCOM_VCPU_DATA0, 0));
ring              461 drivers/gpu/drm/amd/amdgpu/uvd_v4_2.c 	amdgpu_ring_write(ring, 0);
ring              462 drivers/gpu/drm/amd/amdgpu/uvd_v4_2.c 	amdgpu_ring_write(ring, PACKET0(mmUVD_GPCOM_VCPU_DATA1, 0));
ring              463 drivers/gpu/drm/amd/amdgpu/uvd_v4_2.c 	amdgpu_ring_write(ring, 0);
ring              464 drivers/gpu/drm/amd/amdgpu/uvd_v4_2.c 	amdgpu_ring_write(ring, PACKET0(mmUVD_GPCOM_VCPU_CMD, 0));
ring              465 drivers/gpu/drm/amd/amdgpu/uvd_v4_2.c 	amdgpu_ring_write(ring, 2);
ring              475 drivers/gpu/drm/amd/amdgpu/uvd_v4_2.c static int uvd_v4_2_ring_test_ring(struct amdgpu_ring *ring)
ring              477 drivers/gpu/drm/amd/amdgpu/uvd_v4_2.c 	struct amdgpu_device *adev = ring->adev;
ring              483 drivers/gpu/drm/amd/amdgpu/uvd_v4_2.c 	r = amdgpu_ring_alloc(ring, 3);
ring              487 drivers/gpu/drm/amd/amdgpu/uvd_v4_2.c 	amdgpu_ring_write(ring, PACKET0(mmUVD_CONTEXT_ID, 0));
ring              488 drivers/gpu/drm/amd/amdgpu/uvd_v4_2.c 	amdgpu_ring_write(ring, 0xDEADBEEF);
ring              489 drivers/gpu/drm/amd/amdgpu/uvd_v4_2.c 	amdgpu_ring_commit(ring);
ring              511 drivers/gpu/drm/amd/amdgpu/uvd_v4_2.c static void uvd_v4_2_ring_emit_ib(struct amdgpu_ring *ring,
ring              516 drivers/gpu/drm/amd/amdgpu/uvd_v4_2.c 	amdgpu_ring_write(ring, PACKET0(mmUVD_RBC_IB_BASE, 0));
ring              517 drivers/gpu/drm/amd/amdgpu/uvd_v4_2.c 	amdgpu_ring_write(ring, ib->gpu_addr);
ring              518 drivers/gpu/drm/amd/amdgpu/uvd_v4_2.c 	amdgpu_ring_write(ring, PACKET0(mmUVD_RBC_IB_SIZE, 0));
ring              519 drivers/gpu/drm/amd/amdgpu/uvd_v4_2.c 	amdgpu_ring_write(ring, ib->length_dw);
ring              522 drivers/gpu/drm/amd/amdgpu/uvd_v4_2.c static void uvd_v4_2_ring_insert_nop(struct amdgpu_ring *ring, uint32_t count)
ring              526 drivers/gpu/drm/amd/amdgpu/uvd_v4_2.c 	WARN_ON(ring->wptr % 2 || count % 2);
ring              529 drivers/gpu/drm/amd/amdgpu/uvd_v4_2.c 		amdgpu_ring_write(ring, PACKET0(mmUVD_NO_OP, 0));
ring              530 drivers/gpu/drm/amd/amdgpu/uvd_v4_2.c 		amdgpu_ring_write(ring, 0);
ring              675 drivers/gpu/drm/amd/amdgpu/uvd_v4_2.c 	amdgpu_fence_process(&adev->uvd.inst->ring);
ring              764 drivers/gpu/drm/amd/amdgpu/uvd_v4_2.c 	adev->uvd.inst->ring.funcs = &uvd_v4_2_ring_funcs;
ring               56 drivers/gpu/drm/amd/amdgpu/uvd_v5_0.c static uint64_t uvd_v5_0_ring_get_rptr(struct amdgpu_ring *ring)
ring               58 drivers/gpu/drm/amd/amdgpu/uvd_v5_0.c 	struct amdgpu_device *adev = ring->adev;
ring               70 drivers/gpu/drm/amd/amdgpu/uvd_v5_0.c static uint64_t uvd_v5_0_ring_get_wptr(struct amdgpu_ring *ring)
ring               72 drivers/gpu/drm/amd/amdgpu/uvd_v5_0.c 	struct amdgpu_device *adev = ring->adev;
ring               84 drivers/gpu/drm/amd/amdgpu/uvd_v5_0.c static void uvd_v5_0_ring_set_wptr(struct amdgpu_ring *ring)
ring               86 drivers/gpu/drm/amd/amdgpu/uvd_v5_0.c 	struct amdgpu_device *adev = ring->adev;
ring               88 drivers/gpu/drm/amd/amdgpu/uvd_v5_0.c 	WREG32(mmUVD_RBC_RB_WPTR, lower_32_bits(ring->wptr));
ring              104 drivers/gpu/drm/amd/amdgpu/uvd_v5_0.c 	struct amdgpu_ring *ring;
ring              117 drivers/gpu/drm/amd/amdgpu/uvd_v5_0.c 	ring = &adev->uvd.inst->ring;
ring              118 drivers/gpu/drm/amd/amdgpu/uvd_v5_0.c 	sprintf(ring->name, "uvd");
ring              119 drivers/gpu/drm/amd/amdgpu/uvd_v5_0.c 	r = amdgpu_ring_init(adev, ring, 512, &adev->uvd.inst->irq, 0);
ring              154 drivers/gpu/drm/amd/amdgpu/uvd_v5_0.c 	struct amdgpu_ring *ring = &adev->uvd.inst->ring;
ring              162 drivers/gpu/drm/amd/amdgpu/uvd_v5_0.c 	r = amdgpu_ring_test_helper(ring);
ring              166 drivers/gpu/drm/amd/amdgpu/uvd_v5_0.c 	r = amdgpu_ring_alloc(ring, 10);
ring              173 drivers/gpu/drm/amd/amdgpu/uvd_v5_0.c 	amdgpu_ring_write(ring, tmp);
ring              174 drivers/gpu/drm/amd/amdgpu/uvd_v5_0.c 	amdgpu_ring_write(ring, 0xFFFFF);
ring              177 drivers/gpu/drm/amd/amdgpu/uvd_v5_0.c 	amdgpu_ring_write(ring, tmp);
ring              178 drivers/gpu/drm/amd/amdgpu/uvd_v5_0.c 	amdgpu_ring_write(ring, 0xFFFFF);
ring              181 drivers/gpu/drm/amd/amdgpu/uvd_v5_0.c 	amdgpu_ring_write(ring, tmp);
ring              182 drivers/gpu/drm/amd/amdgpu/uvd_v5_0.c 	amdgpu_ring_write(ring, 0xFFFFF);
ring              185 drivers/gpu/drm/amd/amdgpu/uvd_v5_0.c 	amdgpu_ring_write(ring, PACKET0(mmUVD_SEMA_TIMEOUT_STATUS, 0));
ring              186 drivers/gpu/drm/amd/amdgpu/uvd_v5_0.c 	amdgpu_ring_write(ring, 0x8);
ring              188 drivers/gpu/drm/amd/amdgpu/uvd_v5_0.c 	amdgpu_ring_write(ring, PACKET0(mmUVD_SEMA_CNTL, 0));
ring              189 drivers/gpu/drm/amd/amdgpu/uvd_v5_0.c 	amdgpu_ring_write(ring, 3);
ring              191 drivers/gpu/drm/amd/amdgpu/uvd_v5_0.c 	amdgpu_ring_commit(ring);
ring              211 drivers/gpu/drm/amd/amdgpu/uvd_v5_0.c 	struct amdgpu_ring *ring = &adev->uvd.inst->ring;
ring              216 drivers/gpu/drm/amd/amdgpu/uvd_v5_0.c 	ring->sched.ready = false;
ring              294 drivers/gpu/drm/amd/amdgpu/uvd_v5_0.c 	struct amdgpu_ring *ring = &adev->uvd.inst->ring;
ring              392 drivers/gpu/drm/amd/amdgpu/uvd_v5_0.c 	rb_bufsz = order_base_2(ring->ring_size);
ring              407 drivers/gpu/drm/amd/amdgpu/uvd_v5_0.c 	WREG32(mmUVD_RBC_RB_RPTR_ADDR, (upper_32_bits(ring->gpu_addr) >> 2));
ring              411 drivers/gpu/drm/amd/amdgpu/uvd_v5_0.c 			lower_32_bits(ring->gpu_addr));
ring              413 drivers/gpu/drm/amd/amdgpu/uvd_v5_0.c 			upper_32_bits(ring->gpu_addr));
ring              418 drivers/gpu/drm/amd/amdgpu/uvd_v5_0.c 	ring->wptr = RREG32(mmUVD_RBC_RB_RPTR);
ring              419 drivers/gpu/drm/amd/amdgpu/uvd_v5_0.c 	WREG32(mmUVD_RBC_RB_WPTR, lower_32_bits(ring->wptr));
ring              463 drivers/gpu/drm/amd/amdgpu/uvd_v5_0.c static void uvd_v5_0_ring_emit_fence(struct amdgpu_ring *ring, u64 addr, u64 seq,
ring              468 drivers/gpu/drm/amd/amdgpu/uvd_v5_0.c 	amdgpu_ring_write(ring, PACKET0(mmUVD_CONTEXT_ID, 0));
ring              469 drivers/gpu/drm/amd/amdgpu/uvd_v5_0.c 	amdgpu_ring_write(ring, seq);
ring              470 drivers/gpu/drm/amd/amdgpu/uvd_v5_0.c 	amdgpu_ring_write(ring, PACKET0(mmUVD_GPCOM_VCPU_DATA0, 0));
ring              471 drivers/gpu/drm/amd/amdgpu/uvd_v5_0.c 	amdgpu_ring_write(ring, addr & 0xffffffff);
ring              472 drivers/gpu/drm/amd/amdgpu/uvd_v5_0.c 	amdgpu_ring_write(ring, PACKET0(mmUVD_GPCOM_VCPU_DATA1, 0));
ring              473 drivers/gpu/drm/amd/amdgpu/uvd_v5_0.c 	amdgpu_ring_write(ring, upper_32_bits(addr) & 0xff);
ring              474 drivers/gpu/drm/amd/amdgpu/uvd_v5_0.c 	amdgpu_ring_write(ring, PACKET0(mmUVD_GPCOM_VCPU_CMD, 0));
ring              475 drivers/gpu/drm/amd/amdgpu/uvd_v5_0.c 	amdgpu_ring_write(ring, 0);
ring              477 drivers/gpu/drm/amd/amdgpu/uvd_v5_0.c 	amdgpu_ring_write(ring, PACKET0(mmUVD_GPCOM_VCPU_DATA0, 0));
ring              478 drivers/gpu/drm/amd/amdgpu/uvd_v5_0.c 	amdgpu_ring_write(ring, 0);
ring              479 drivers/gpu/drm/amd/amdgpu/uvd_v5_0.c 	amdgpu_ring_write(ring, PACKET0(mmUVD_GPCOM_VCPU_DATA1, 0));
ring              480 drivers/gpu/drm/amd/amdgpu/uvd_v5_0.c 	amdgpu_ring_write(ring, 0);
ring              481 drivers/gpu/drm/amd/amdgpu/uvd_v5_0.c 	amdgpu_ring_write(ring, PACKET0(mmUVD_GPCOM_VCPU_CMD, 0));
ring              482 drivers/gpu/drm/amd/amdgpu/uvd_v5_0.c 	amdgpu_ring_write(ring, 2);
ring              492 drivers/gpu/drm/amd/amdgpu/uvd_v5_0.c static int uvd_v5_0_ring_test_ring(struct amdgpu_ring *ring)
ring              494 drivers/gpu/drm/amd/amdgpu/uvd_v5_0.c 	struct amdgpu_device *adev = ring->adev;
ring              500 drivers/gpu/drm/amd/amdgpu/uvd_v5_0.c 	r = amdgpu_ring_alloc(ring, 3);
ring              503 drivers/gpu/drm/amd/amdgpu/uvd_v5_0.c 	amdgpu_ring_write(ring, PACKET0(mmUVD_CONTEXT_ID, 0));
ring              504 drivers/gpu/drm/amd/amdgpu/uvd_v5_0.c 	amdgpu_ring_write(ring, 0xDEADBEEF);
ring              505 drivers/gpu/drm/amd/amdgpu/uvd_v5_0.c 	amdgpu_ring_commit(ring);
ring              527 drivers/gpu/drm/amd/amdgpu/uvd_v5_0.c static void uvd_v5_0_ring_emit_ib(struct amdgpu_ring *ring,
ring              532 drivers/gpu/drm/amd/amdgpu/uvd_v5_0.c 	amdgpu_ring_write(ring, PACKET0(mmUVD_LMI_RBC_IB_64BIT_BAR_LOW, 0));
ring              533 drivers/gpu/drm/amd/amdgpu/uvd_v5_0.c 	amdgpu_ring_write(ring, lower_32_bits(ib->gpu_addr));
ring              534 drivers/gpu/drm/amd/amdgpu/uvd_v5_0.c 	amdgpu_ring_write(ring, PACKET0(mmUVD_LMI_RBC_IB_64BIT_BAR_HIGH, 0));
ring              535 drivers/gpu/drm/amd/amdgpu/uvd_v5_0.c 	amdgpu_ring_write(ring, upper_32_bits(ib->gpu_addr));
ring              536 drivers/gpu/drm/amd/amdgpu/uvd_v5_0.c 	amdgpu_ring_write(ring, PACKET0(mmUVD_RBC_IB_SIZE, 0));
ring              537 drivers/gpu/drm/amd/amdgpu/uvd_v5_0.c 	amdgpu_ring_write(ring, ib->length_dw);
ring              540 drivers/gpu/drm/amd/amdgpu/uvd_v5_0.c static void uvd_v5_0_ring_insert_nop(struct amdgpu_ring *ring, uint32_t count)
ring              544 drivers/gpu/drm/amd/amdgpu/uvd_v5_0.c 	WARN_ON(ring->wptr % 2 || count % 2);
ring              547 drivers/gpu/drm/amd/amdgpu/uvd_v5_0.c 		amdgpu_ring_write(ring, PACKET0(mmUVD_NO_OP, 0));
ring              548 drivers/gpu/drm/amd/amdgpu/uvd_v5_0.c 		amdgpu_ring_write(ring, 0);
ring              598 drivers/gpu/drm/amd/amdgpu/uvd_v5_0.c 	amdgpu_fence_process(&adev->uvd.inst->ring);
ring              873 drivers/gpu/drm/amd/amdgpu/uvd_v5_0.c 	adev->uvd.inst->ring.funcs = &uvd_v5_0_ring_funcs;
ring               77 drivers/gpu/drm/amd/amdgpu/uvd_v6_0.c static uint64_t uvd_v6_0_ring_get_rptr(struct amdgpu_ring *ring)
ring               79 drivers/gpu/drm/amd/amdgpu/uvd_v6_0.c 	struct amdgpu_device *adev = ring->adev;
ring               91 drivers/gpu/drm/amd/amdgpu/uvd_v6_0.c static uint64_t uvd_v6_0_enc_ring_get_rptr(struct amdgpu_ring *ring)
ring               93 drivers/gpu/drm/amd/amdgpu/uvd_v6_0.c 	struct amdgpu_device *adev = ring->adev;
ring               95 drivers/gpu/drm/amd/amdgpu/uvd_v6_0.c 	if (ring == &adev->uvd.inst->ring_enc[0])
ring              107 drivers/gpu/drm/amd/amdgpu/uvd_v6_0.c static uint64_t uvd_v6_0_ring_get_wptr(struct amdgpu_ring *ring)
ring              109 drivers/gpu/drm/amd/amdgpu/uvd_v6_0.c 	struct amdgpu_device *adev = ring->adev;
ring              121 drivers/gpu/drm/amd/amdgpu/uvd_v6_0.c static uint64_t uvd_v6_0_enc_ring_get_wptr(struct amdgpu_ring *ring)
ring              123 drivers/gpu/drm/amd/amdgpu/uvd_v6_0.c 	struct amdgpu_device *adev = ring->adev;
ring              125 drivers/gpu/drm/amd/amdgpu/uvd_v6_0.c 	if (ring == &adev->uvd.inst->ring_enc[0])
ring              138 drivers/gpu/drm/amd/amdgpu/uvd_v6_0.c static void uvd_v6_0_ring_set_wptr(struct amdgpu_ring *ring)
ring              140 drivers/gpu/drm/amd/amdgpu/uvd_v6_0.c 	struct amdgpu_device *adev = ring->adev;
ring              142 drivers/gpu/drm/amd/amdgpu/uvd_v6_0.c 	WREG32(mmUVD_RBC_RB_WPTR, lower_32_bits(ring->wptr));
ring              152 drivers/gpu/drm/amd/amdgpu/uvd_v6_0.c static void uvd_v6_0_enc_ring_set_wptr(struct amdgpu_ring *ring)
ring              154 drivers/gpu/drm/amd/amdgpu/uvd_v6_0.c 	struct amdgpu_device *adev = ring->adev;
ring              156 drivers/gpu/drm/amd/amdgpu/uvd_v6_0.c 	if (ring == &adev->uvd.inst->ring_enc[0])
ring              158 drivers/gpu/drm/amd/amdgpu/uvd_v6_0.c 			lower_32_bits(ring->wptr));
ring              161 drivers/gpu/drm/amd/amdgpu/uvd_v6_0.c 			lower_32_bits(ring->wptr));
ring              170 drivers/gpu/drm/amd/amdgpu/uvd_v6_0.c static int uvd_v6_0_enc_ring_test_ring(struct amdgpu_ring *ring)
ring              172 drivers/gpu/drm/amd/amdgpu/uvd_v6_0.c 	struct amdgpu_device *adev = ring->adev;
ring              177 drivers/gpu/drm/amd/amdgpu/uvd_v6_0.c 	r = amdgpu_ring_alloc(ring, 16);
ring              181 drivers/gpu/drm/amd/amdgpu/uvd_v6_0.c 	rptr = amdgpu_ring_get_rptr(ring);
ring              183 drivers/gpu/drm/amd/amdgpu/uvd_v6_0.c 	amdgpu_ring_write(ring, HEVC_ENC_CMD_END);
ring              184 drivers/gpu/drm/amd/amdgpu/uvd_v6_0.c 	amdgpu_ring_commit(ring);
ring              187 drivers/gpu/drm/amd/amdgpu/uvd_v6_0.c 		if (amdgpu_ring_get_rptr(ring) != rptr)
ring              208 drivers/gpu/drm/amd/amdgpu/uvd_v6_0.c static int uvd_v6_0_enc_get_create_msg(struct amdgpu_ring *ring, uint32_t handle,
ring              219 drivers/gpu/drm/amd/amdgpu/uvd_v6_0.c 	r = amdgpu_job_alloc_with_ib(ring->adev, ib_size_dw * 4, &job);
ring              246 drivers/gpu/drm/amd/amdgpu/uvd_v6_0.c 	r = amdgpu_job_submit_direct(job, ring, &f);
ring              270 drivers/gpu/drm/amd/amdgpu/uvd_v6_0.c static int uvd_v6_0_enc_get_destroy_msg(struct amdgpu_ring *ring,
ring              282 drivers/gpu/drm/amd/amdgpu/uvd_v6_0.c 	r = amdgpu_job_alloc_with_ib(ring->adev, ib_size_dw * 4, &job);
ring              309 drivers/gpu/drm/amd/amdgpu/uvd_v6_0.c 	r = amdgpu_job_submit_direct(job, ring, &f);
ring              329 drivers/gpu/drm/amd/amdgpu/uvd_v6_0.c static int uvd_v6_0_enc_ring_test_ib(struct amdgpu_ring *ring, long timeout)
ring              335 drivers/gpu/drm/amd/amdgpu/uvd_v6_0.c 	r = amdgpu_bo_create_reserved(ring->adev, 128 * 1024, PAGE_SIZE,
ring              341 drivers/gpu/drm/amd/amdgpu/uvd_v6_0.c 	r = uvd_v6_0_enc_get_create_msg(ring, 1, bo, NULL);
ring              345 drivers/gpu/drm/amd/amdgpu/uvd_v6_0.c 	r = uvd_v6_0_enc_get_destroy_msg(ring, 1, bo, &fence);
ring              385 drivers/gpu/drm/amd/amdgpu/uvd_v6_0.c 	struct amdgpu_ring *ring;
ring              417 drivers/gpu/drm/amd/amdgpu/uvd_v6_0.c 	ring = &adev->uvd.inst->ring;
ring              418 drivers/gpu/drm/amd/amdgpu/uvd_v6_0.c 	sprintf(ring->name, "uvd");
ring              419 drivers/gpu/drm/amd/amdgpu/uvd_v6_0.c 	r = amdgpu_ring_init(adev, ring, 512, &adev->uvd.inst->irq, 0);
ring              429 drivers/gpu/drm/amd/amdgpu/uvd_v6_0.c 			ring = &adev->uvd.inst->ring_enc[i];
ring              430 drivers/gpu/drm/amd/amdgpu/uvd_v6_0.c 			sprintf(ring->name, "uvd_enc%d", i);
ring              431 drivers/gpu/drm/amd/amdgpu/uvd_v6_0.c 			r = amdgpu_ring_init(adev, ring, 512, &adev->uvd.inst->irq, 0);
ring              469 drivers/gpu/drm/amd/amdgpu/uvd_v6_0.c 	struct amdgpu_ring *ring = &adev->uvd.inst->ring;
ring              477 drivers/gpu/drm/amd/amdgpu/uvd_v6_0.c 	r = amdgpu_ring_test_helper(ring);
ring              481 drivers/gpu/drm/amd/amdgpu/uvd_v6_0.c 	r = amdgpu_ring_alloc(ring, 10);
ring              488 drivers/gpu/drm/amd/amdgpu/uvd_v6_0.c 	amdgpu_ring_write(ring, tmp);
ring              489 drivers/gpu/drm/amd/amdgpu/uvd_v6_0.c 	amdgpu_ring_write(ring, 0xFFFFF);
ring              492 drivers/gpu/drm/amd/amdgpu/uvd_v6_0.c 	amdgpu_ring_write(ring, tmp);
ring              493 drivers/gpu/drm/amd/amdgpu/uvd_v6_0.c 	amdgpu_ring_write(ring, 0xFFFFF);
ring              496 drivers/gpu/drm/amd/amdgpu/uvd_v6_0.c 	amdgpu_ring_write(ring, tmp);
ring              497 drivers/gpu/drm/amd/amdgpu/uvd_v6_0.c 	amdgpu_ring_write(ring, 0xFFFFF);
ring              500 drivers/gpu/drm/amd/amdgpu/uvd_v6_0.c 	amdgpu_ring_write(ring, PACKET0(mmUVD_SEMA_TIMEOUT_STATUS, 0));
ring              501 drivers/gpu/drm/amd/amdgpu/uvd_v6_0.c 	amdgpu_ring_write(ring, 0x8);
ring              503 drivers/gpu/drm/amd/amdgpu/uvd_v6_0.c 	amdgpu_ring_write(ring, PACKET0(mmUVD_SEMA_CNTL, 0));
ring              504 drivers/gpu/drm/amd/amdgpu/uvd_v6_0.c 	amdgpu_ring_write(ring, 3);
ring              506 drivers/gpu/drm/amd/amdgpu/uvd_v6_0.c 	amdgpu_ring_commit(ring);
ring              510 drivers/gpu/drm/amd/amdgpu/uvd_v6_0.c 			ring = &adev->uvd.inst->ring_enc[i];
ring              511 drivers/gpu/drm/amd/amdgpu/uvd_v6_0.c 			r = amdgpu_ring_test_helper(ring);
ring              538 drivers/gpu/drm/amd/amdgpu/uvd_v6_0.c 	struct amdgpu_ring *ring = &adev->uvd.inst->ring;
ring              543 drivers/gpu/drm/amd/amdgpu/uvd_v6_0.c 	ring->sched.ready = false;
ring              701 drivers/gpu/drm/amd/amdgpu/uvd_v6_0.c 	struct amdgpu_ring *ring = &adev->uvd.inst->ring;
ring              811 drivers/gpu/drm/amd/amdgpu/uvd_v6_0.c 	rb_bufsz = order_base_2(ring->ring_size);
ring              824 drivers/gpu/drm/amd/amdgpu/uvd_v6_0.c 	WREG32(mmUVD_RBC_RB_RPTR_ADDR, (upper_32_bits(ring->gpu_addr) >> 2));
ring              828 drivers/gpu/drm/amd/amdgpu/uvd_v6_0.c 			lower_32_bits(ring->gpu_addr));
ring              830 drivers/gpu/drm/amd/amdgpu/uvd_v6_0.c 			upper_32_bits(ring->gpu_addr));
ring              835 drivers/gpu/drm/amd/amdgpu/uvd_v6_0.c 	ring->wptr = RREG32(mmUVD_RBC_RB_RPTR);
ring              836 drivers/gpu/drm/amd/amdgpu/uvd_v6_0.c 	WREG32(mmUVD_RBC_RB_WPTR, lower_32_bits(ring->wptr));
ring              841 drivers/gpu/drm/amd/amdgpu/uvd_v6_0.c 		ring = &adev->uvd.inst->ring_enc[0];
ring              842 drivers/gpu/drm/amd/amdgpu/uvd_v6_0.c 		WREG32(mmUVD_RB_RPTR, lower_32_bits(ring->wptr));
ring              843 drivers/gpu/drm/amd/amdgpu/uvd_v6_0.c 		WREG32(mmUVD_RB_WPTR, lower_32_bits(ring->wptr));
ring              844 drivers/gpu/drm/amd/amdgpu/uvd_v6_0.c 		WREG32(mmUVD_RB_BASE_LO, ring->gpu_addr);
ring              845 drivers/gpu/drm/amd/amdgpu/uvd_v6_0.c 		WREG32(mmUVD_RB_BASE_HI, upper_32_bits(ring->gpu_addr));
ring              846 drivers/gpu/drm/amd/amdgpu/uvd_v6_0.c 		WREG32(mmUVD_RB_SIZE, ring->ring_size / 4);
ring              848 drivers/gpu/drm/amd/amdgpu/uvd_v6_0.c 		ring = &adev->uvd.inst->ring_enc[1];
ring              849 drivers/gpu/drm/amd/amdgpu/uvd_v6_0.c 		WREG32(mmUVD_RB_RPTR2, lower_32_bits(ring->wptr));
ring              850 drivers/gpu/drm/amd/amdgpu/uvd_v6_0.c 		WREG32(mmUVD_RB_WPTR2, lower_32_bits(ring->wptr));
ring              851 drivers/gpu/drm/amd/amdgpu/uvd_v6_0.c 		WREG32(mmUVD_RB_BASE_LO2, ring->gpu_addr);
ring              852 drivers/gpu/drm/amd/amdgpu/uvd_v6_0.c 		WREG32(mmUVD_RB_BASE_HI2, upper_32_bits(ring->gpu_addr));
ring              853 drivers/gpu/drm/amd/amdgpu/uvd_v6_0.c 		WREG32(mmUVD_RB_SIZE2, ring->ring_size / 4);
ring              896 drivers/gpu/drm/amd/amdgpu/uvd_v6_0.c static void uvd_v6_0_ring_emit_fence(struct amdgpu_ring *ring, u64 addr, u64 seq,
ring              901 drivers/gpu/drm/amd/amdgpu/uvd_v6_0.c 	amdgpu_ring_write(ring, PACKET0(mmUVD_CONTEXT_ID, 0));
ring              902 drivers/gpu/drm/amd/amdgpu/uvd_v6_0.c 	amdgpu_ring_write(ring, seq);
ring              903 drivers/gpu/drm/amd/amdgpu/uvd_v6_0.c 	amdgpu_ring_write(ring, PACKET0(mmUVD_GPCOM_VCPU_DATA0, 0));
ring              904 drivers/gpu/drm/amd/amdgpu/uvd_v6_0.c 	amdgpu_ring_write(ring, addr & 0xffffffff);
ring              905 drivers/gpu/drm/amd/amdgpu/uvd_v6_0.c 	amdgpu_ring_write(ring, PACKET0(mmUVD_GPCOM_VCPU_DATA1, 0));
ring              906 drivers/gpu/drm/amd/amdgpu/uvd_v6_0.c 	amdgpu_ring_write(ring, upper_32_bits(addr) & 0xff);
ring              907 drivers/gpu/drm/amd/amdgpu/uvd_v6_0.c 	amdgpu_ring_write(ring, PACKET0(mmUVD_GPCOM_VCPU_CMD, 0));
ring              908 drivers/gpu/drm/amd/amdgpu/uvd_v6_0.c 	amdgpu_ring_write(ring, 0);
ring              910 drivers/gpu/drm/amd/amdgpu/uvd_v6_0.c 	amdgpu_ring_write(ring, PACKET0(mmUVD_GPCOM_VCPU_DATA0, 0));
ring              911 drivers/gpu/drm/amd/amdgpu/uvd_v6_0.c 	amdgpu_ring_write(ring, 0);
ring              912 drivers/gpu/drm/amd/amdgpu/uvd_v6_0.c 	amdgpu_ring_write(ring, PACKET0(mmUVD_GPCOM_VCPU_DATA1, 0));
ring              913 drivers/gpu/drm/amd/amdgpu/uvd_v6_0.c 	amdgpu_ring_write(ring, 0);
ring              914 drivers/gpu/drm/amd/amdgpu/uvd_v6_0.c 	amdgpu_ring_write(ring, PACKET0(mmUVD_GPCOM_VCPU_CMD, 0));
ring              915 drivers/gpu/drm/amd/amdgpu/uvd_v6_0.c 	amdgpu_ring_write(ring, 2);
ring              926 drivers/gpu/drm/amd/amdgpu/uvd_v6_0.c static void uvd_v6_0_enc_ring_emit_fence(struct amdgpu_ring *ring, u64 addr,
ring              931 drivers/gpu/drm/amd/amdgpu/uvd_v6_0.c 	amdgpu_ring_write(ring, HEVC_ENC_CMD_FENCE);
ring              932 drivers/gpu/drm/amd/amdgpu/uvd_v6_0.c 	amdgpu_ring_write(ring, addr);
ring              933 drivers/gpu/drm/amd/amdgpu/uvd_v6_0.c 	amdgpu_ring_write(ring, upper_32_bits(addr));
ring              934 drivers/gpu/drm/amd/amdgpu/uvd_v6_0.c 	amdgpu_ring_write(ring, seq);
ring              935 drivers/gpu/drm/amd/amdgpu/uvd_v6_0.c 	amdgpu_ring_write(ring, HEVC_ENC_CMD_TRAP);
ring              943 drivers/gpu/drm/amd/amdgpu/uvd_v6_0.c static void uvd_v6_0_ring_emit_hdp_flush(struct amdgpu_ring *ring)
ring              955 drivers/gpu/drm/amd/amdgpu/uvd_v6_0.c static int uvd_v6_0_ring_test_ring(struct amdgpu_ring *ring)
ring              957 drivers/gpu/drm/amd/amdgpu/uvd_v6_0.c 	struct amdgpu_device *adev = ring->adev;
ring              963 drivers/gpu/drm/amd/amdgpu/uvd_v6_0.c 	r = amdgpu_ring_alloc(ring, 3);
ring              967 drivers/gpu/drm/amd/amdgpu/uvd_v6_0.c 	amdgpu_ring_write(ring, PACKET0(mmUVD_CONTEXT_ID, 0));
ring              968 drivers/gpu/drm/amd/amdgpu/uvd_v6_0.c 	amdgpu_ring_write(ring, 0xDEADBEEF);
ring              969 drivers/gpu/drm/amd/amdgpu/uvd_v6_0.c 	amdgpu_ring_commit(ring);
ring              991 drivers/gpu/drm/amd/amdgpu/uvd_v6_0.c static void uvd_v6_0_ring_emit_ib(struct amdgpu_ring *ring,
ring              998 drivers/gpu/drm/amd/amdgpu/uvd_v6_0.c 	amdgpu_ring_write(ring, PACKET0(mmUVD_LMI_RBC_IB_VMID, 0));
ring              999 drivers/gpu/drm/amd/amdgpu/uvd_v6_0.c 	amdgpu_ring_write(ring, vmid);
ring             1001 drivers/gpu/drm/amd/amdgpu/uvd_v6_0.c 	amdgpu_ring_write(ring, PACKET0(mmUVD_LMI_RBC_IB_64BIT_BAR_LOW, 0));
ring             1002 drivers/gpu/drm/amd/amdgpu/uvd_v6_0.c 	amdgpu_ring_write(ring, lower_32_bits(ib->gpu_addr));
ring             1003 drivers/gpu/drm/amd/amdgpu/uvd_v6_0.c 	amdgpu_ring_write(ring, PACKET0(mmUVD_LMI_RBC_IB_64BIT_BAR_HIGH, 0));
ring             1004 drivers/gpu/drm/amd/amdgpu/uvd_v6_0.c 	amdgpu_ring_write(ring, upper_32_bits(ib->gpu_addr));
ring             1005 drivers/gpu/drm/amd/amdgpu/uvd_v6_0.c 	amdgpu_ring_write(ring, PACKET0(mmUVD_RBC_IB_SIZE, 0));
ring             1006 drivers/gpu/drm/amd/amdgpu/uvd_v6_0.c 	amdgpu_ring_write(ring, ib->length_dw);
ring             1017 drivers/gpu/drm/amd/amdgpu/uvd_v6_0.c static void uvd_v6_0_enc_ring_emit_ib(struct amdgpu_ring *ring,
ring             1024 drivers/gpu/drm/amd/amdgpu/uvd_v6_0.c 	amdgpu_ring_write(ring, HEVC_ENC_CMD_IB_VM);
ring             1025 drivers/gpu/drm/amd/amdgpu/uvd_v6_0.c 	amdgpu_ring_write(ring, vmid);
ring             1026 drivers/gpu/drm/amd/amdgpu/uvd_v6_0.c 	amdgpu_ring_write(ring, lower_32_bits(ib->gpu_addr));
ring             1027 drivers/gpu/drm/amd/amdgpu/uvd_v6_0.c 	amdgpu_ring_write(ring, upper_32_bits(ib->gpu_addr));
ring             1028 drivers/gpu/drm/amd/amdgpu/uvd_v6_0.c 	amdgpu_ring_write(ring, ib->length_dw);
ring             1031 drivers/gpu/drm/amd/amdgpu/uvd_v6_0.c static void uvd_v6_0_ring_emit_wreg(struct amdgpu_ring *ring,
ring             1034 drivers/gpu/drm/amd/amdgpu/uvd_v6_0.c 	amdgpu_ring_write(ring, PACKET0(mmUVD_GPCOM_VCPU_DATA0, 0));
ring             1035 drivers/gpu/drm/amd/amdgpu/uvd_v6_0.c 	amdgpu_ring_write(ring, reg << 2);
ring             1036 drivers/gpu/drm/amd/amdgpu/uvd_v6_0.c 	amdgpu_ring_write(ring, PACKET0(mmUVD_GPCOM_VCPU_DATA1, 0));
ring             1037 drivers/gpu/drm/amd/amdgpu/uvd_v6_0.c 	amdgpu_ring_write(ring, val);
ring             1038 drivers/gpu/drm/amd/amdgpu/uvd_v6_0.c 	amdgpu_ring_write(ring, PACKET0(mmUVD_GPCOM_VCPU_CMD, 0));
ring             1039 drivers/gpu/drm/amd/amdgpu/uvd_v6_0.c 	amdgpu_ring_write(ring, 0x8);
ring             1042 drivers/gpu/drm/amd/amdgpu/uvd_v6_0.c static void uvd_v6_0_ring_emit_vm_flush(struct amdgpu_ring *ring,
ring             1045 drivers/gpu/drm/amd/amdgpu/uvd_v6_0.c 	amdgpu_gmc_emit_flush_gpu_tlb(ring, vmid, pd_addr);
ring             1047 drivers/gpu/drm/amd/amdgpu/uvd_v6_0.c 	amdgpu_ring_write(ring, PACKET0(mmUVD_GPCOM_VCPU_DATA0, 0));
ring             1048 drivers/gpu/drm/amd/amdgpu/uvd_v6_0.c 	amdgpu_ring_write(ring, mmVM_INVALIDATE_REQUEST << 2);
ring             1049 drivers/gpu/drm/amd/amdgpu/uvd_v6_0.c 	amdgpu_ring_write(ring, PACKET0(mmUVD_GPCOM_VCPU_DATA1, 0));
ring             1050 drivers/gpu/drm/amd/amdgpu/uvd_v6_0.c 	amdgpu_ring_write(ring, 0);
ring             1051 drivers/gpu/drm/amd/amdgpu/uvd_v6_0.c 	amdgpu_ring_write(ring, PACKET0(mmUVD_GP_SCRATCH8, 0));
ring             1052 drivers/gpu/drm/amd/amdgpu/uvd_v6_0.c 	amdgpu_ring_write(ring, 1 << vmid); /* mask */
ring             1053 drivers/gpu/drm/amd/amdgpu/uvd_v6_0.c 	amdgpu_ring_write(ring, PACKET0(mmUVD_GPCOM_VCPU_CMD, 0));
ring             1054 drivers/gpu/drm/amd/amdgpu/uvd_v6_0.c 	amdgpu_ring_write(ring, 0xC);
ring             1057 drivers/gpu/drm/amd/amdgpu/uvd_v6_0.c static void uvd_v6_0_ring_emit_pipeline_sync(struct amdgpu_ring *ring)
ring             1059 drivers/gpu/drm/amd/amdgpu/uvd_v6_0.c 	uint32_t seq = ring->fence_drv.sync_seq;
ring             1060 drivers/gpu/drm/amd/amdgpu/uvd_v6_0.c 	uint64_t addr = ring->fence_drv.gpu_addr;
ring             1062 drivers/gpu/drm/amd/amdgpu/uvd_v6_0.c 	amdgpu_ring_write(ring, PACKET0(mmUVD_GPCOM_VCPU_DATA0, 0));
ring             1063 drivers/gpu/drm/amd/amdgpu/uvd_v6_0.c 	amdgpu_ring_write(ring, lower_32_bits(addr));
ring             1064 drivers/gpu/drm/amd/amdgpu/uvd_v6_0.c 	amdgpu_ring_write(ring, PACKET0(mmUVD_GPCOM_VCPU_DATA1, 0));
ring             1065 drivers/gpu/drm/amd/amdgpu/uvd_v6_0.c 	amdgpu_ring_write(ring, upper_32_bits(addr));
ring             1066 drivers/gpu/drm/amd/amdgpu/uvd_v6_0.c 	amdgpu_ring_write(ring, PACKET0(mmUVD_GP_SCRATCH8, 0));
ring             1067 drivers/gpu/drm/amd/amdgpu/uvd_v6_0.c 	amdgpu_ring_write(ring, 0xffffffff); /* mask */
ring             1068 drivers/gpu/drm/amd/amdgpu/uvd_v6_0.c 	amdgpu_ring_write(ring, PACKET0(mmUVD_GP_SCRATCH9, 0));
ring             1069 drivers/gpu/drm/amd/amdgpu/uvd_v6_0.c 	amdgpu_ring_write(ring, seq);
ring             1070 drivers/gpu/drm/amd/amdgpu/uvd_v6_0.c 	amdgpu_ring_write(ring, PACKET0(mmUVD_GPCOM_VCPU_CMD, 0));
ring             1071 drivers/gpu/drm/amd/amdgpu/uvd_v6_0.c 	amdgpu_ring_write(ring, 0xE);
ring             1074 drivers/gpu/drm/amd/amdgpu/uvd_v6_0.c static void uvd_v6_0_ring_insert_nop(struct amdgpu_ring *ring, uint32_t count)
ring             1078 drivers/gpu/drm/amd/amdgpu/uvd_v6_0.c 	WARN_ON(ring->wptr % 2 || count % 2);
ring             1081 drivers/gpu/drm/amd/amdgpu/uvd_v6_0.c 		amdgpu_ring_write(ring, PACKET0(mmUVD_NO_OP, 0));
ring             1082 drivers/gpu/drm/amd/amdgpu/uvd_v6_0.c 		amdgpu_ring_write(ring, 0);
ring             1086 drivers/gpu/drm/amd/amdgpu/uvd_v6_0.c static void uvd_v6_0_enc_ring_emit_pipeline_sync(struct amdgpu_ring *ring)
ring             1088 drivers/gpu/drm/amd/amdgpu/uvd_v6_0.c 	uint32_t seq = ring->fence_drv.sync_seq;
ring             1089 drivers/gpu/drm/amd/amdgpu/uvd_v6_0.c 	uint64_t addr = ring->fence_drv.gpu_addr;
ring             1091 drivers/gpu/drm/amd/amdgpu/uvd_v6_0.c 	amdgpu_ring_write(ring, HEVC_ENC_CMD_WAIT_GE);
ring             1092 drivers/gpu/drm/amd/amdgpu/uvd_v6_0.c 	amdgpu_ring_write(ring, lower_32_bits(addr));
ring             1093 drivers/gpu/drm/amd/amdgpu/uvd_v6_0.c 	amdgpu_ring_write(ring, upper_32_bits(addr));
ring             1094 drivers/gpu/drm/amd/amdgpu/uvd_v6_0.c 	amdgpu_ring_write(ring, seq);
ring             1097 drivers/gpu/drm/amd/amdgpu/uvd_v6_0.c static void uvd_v6_0_enc_ring_insert_end(struct amdgpu_ring *ring)
ring             1099 drivers/gpu/drm/amd/amdgpu/uvd_v6_0.c 	amdgpu_ring_write(ring, HEVC_ENC_CMD_END);
ring             1102 drivers/gpu/drm/amd/amdgpu/uvd_v6_0.c static void uvd_v6_0_enc_ring_emit_vm_flush(struct amdgpu_ring *ring,
ring             1105 drivers/gpu/drm/amd/amdgpu/uvd_v6_0.c 	amdgpu_ring_write(ring, HEVC_ENC_CMD_UPDATE_PTB);
ring             1106 drivers/gpu/drm/amd/amdgpu/uvd_v6_0.c 	amdgpu_ring_write(ring, vmid);
ring             1107 drivers/gpu/drm/amd/amdgpu/uvd_v6_0.c 	amdgpu_ring_write(ring, pd_addr >> 12);
ring             1109 drivers/gpu/drm/amd/amdgpu/uvd_v6_0.c 	amdgpu_ring_write(ring, HEVC_ENC_CMD_FLUSH_TLB);
ring             1110 drivers/gpu/drm/amd/amdgpu/uvd_v6_0.c 	amdgpu_ring_write(ring, vmid);
ring             1225 drivers/gpu/drm/amd/amdgpu/uvd_v6_0.c 		amdgpu_fence_process(&adev->uvd.inst->ring);
ring             1600 drivers/gpu/drm/amd/amdgpu/uvd_v6_0.c 		adev->uvd.inst->ring.funcs = &uvd_v6_0_ring_vm_funcs;
ring             1603 drivers/gpu/drm/amd/amdgpu/uvd_v6_0.c 		adev->uvd.inst->ring.funcs = &uvd_v6_0_ring_phys_funcs;
ring               71 drivers/gpu/drm/amd/amdgpu/uvd_v7_0.c static uint64_t uvd_v7_0_ring_get_rptr(struct amdgpu_ring *ring)
ring               73 drivers/gpu/drm/amd/amdgpu/uvd_v7_0.c 	struct amdgpu_device *adev = ring->adev;
ring               75 drivers/gpu/drm/amd/amdgpu/uvd_v7_0.c 	return RREG32_SOC15(UVD, ring->me, mmUVD_RBC_RB_RPTR);
ring               85 drivers/gpu/drm/amd/amdgpu/uvd_v7_0.c static uint64_t uvd_v7_0_enc_ring_get_rptr(struct amdgpu_ring *ring)
ring               87 drivers/gpu/drm/amd/amdgpu/uvd_v7_0.c 	struct amdgpu_device *adev = ring->adev;
ring               89 drivers/gpu/drm/amd/amdgpu/uvd_v7_0.c 	if (ring == &adev->uvd.inst[ring->me].ring_enc[0])
ring               90 drivers/gpu/drm/amd/amdgpu/uvd_v7_0.c 		return RREG32_SOC15(UVD, ring->me, mmUVD_RB_RPTR);
ring               92 drivers/gpu/drm/amd/amdgpu/uvd_v7_0.c 		return RREG32_SOC15(UVD, ring->me, mmUVD_RB_RPTR2);
ring              102 drivers/gpu/drm/amd/amdgpu/uvd_v7_0.c static uint64_t uvd_v7_0_ring_get_wptr(struct amdgpu_ring *ring)
ring              104 drivers/gpu/drm/amd/amdgpu/uvd_v7_0.c 	struct amdgpu_device *adev = ring->adev;
ring              106 drivers/gpu/drm/amd/amdgpu/uvd_v7_0.c 	return RREG32_SOC15(UVD, ring->me, mmUVD_RBC_RB_WPTR);
ring              116 drivers/gpu/drm/amd/amdgpu/uvd_v7_0.c static uint64_t uvd_v7_0_enc_ring_get_wptr(struct amdgpu_ring *ring)
ring              118 drivers/gpu/drm/amd/amdgpu/uvd_v7_0.c 	struct amdgpu_device *adev = ring->adev;
ring              120 drivers/gpu/drm/amd/amdgpu/uvd_v7_0.c 	if (ring->use_doorbell)
ring              121 drivers/gpu/drm/amd/amdgpu/uvd_v7_0.c 		return adev->wb.wb[ring->wptr_offs];
ring              123 drivers/gpu/drm/amd/amdgpu/uvd_v7_0.c 	if (ring == &adev->uvd.inst[ring->me].ring_enc[0])
ring              124 drivers/gpu/drm/amd/amdgpu/uvd_v7_0.c 		return RREG32_SOC15(UVD, ring->me, mmUVD_RB_WPTR);
ring              126 drivers/gpu/drm/amd/amdgpu/uvd_v7_0.c 		return RREG32_SOC15(UVD, ring->me, mmUVD_RB_WPTR2);
ring              136 drivers/gpu/drm/amd/amdgpu/uvd_v7_0.c static void uvd_v7_0_ring_set_wptr(struct amdgpu_ring *ring)
ring              138 drivers/gpu/drm/amd/amdgpu/uvd_v7_0.c 	struct amdgpu_device *adev = ring->adev;
ring              140 drivers/gpu/drm/amd/amdgpu/uvd_v7_0.c 	WREG32_SOC15(UVD, ring->me, mmUVD_RBC_RB_WPTR, lower_32_bits(ring->wptr));
ring              150 drivers/gpu/drm/amd/amdgpu/uvd_v7_0.c static void uvd_v7_0_enc_ring_set_wptr(struct amdgpu_ring *ring)
ring              152 drivers/gpu/drm/amd/amdgpu/uvd_v7_0.c 	struct amdgpu_device *adev = ring->adev;
ring              154 drivers/gpu/drm/amd/amdgpu/uvd_v7_0.c 	if (ring->use_doorbell) {
ring              156 drivers/gpu/drm/amd/amdgpu/uvd_v7_0.c 		adev->wb.wb[ring->wptr_offs] = lower_32_bits(ring->wptr);
ring              157 drivers/gpu/drm/amd/amdgpu/uvd_v7_0.c 		WDOORBELL32(ring->doorbell_index, lower_32_bits(ring->wptr));
ring              161 drivers/gpu/drm/amd/amdgpu/uvd_v7_0.c 	if (ring == &adev->uvd.inst[ring->me].ring_enc[0])
ring              162 drivers/gpu/drm/amd/amdgpu/uvd_v7_0.c 		WREG32_SOC15(UVD, ring->me, mmUVD_RB_WPTR,
ring              163 drivers/gpu/drm/amd/amdgpu/uvd_v7_0.c 			lower_32_bits(ring->wptr));
ring              165 drivers/gpu/drm/amd/amdgpu/uvd_v7_0.c 		WREG32_SOC15(UVD, ring->me, mmUVD_RB_WPTR2,
ring              166 drivers/gpu/drm/amd/amdgpu/uvd_v7_0.c 			lower_32_bits(ring->wptr));
ring              175 drivers/gpu/drm/amd/amdgpu/uvd_v7_0.c static int uvd_v7_0_enc_ring_test_ring(struct amdgpu_ring *ring)
ring              177 drivers/gpu/drm/amd/amdgpu/uvd_v7_0.c 	struct amdgpu_device *adev = ring->adev;
ring              185 drivers/gpu/drm/amd/amdgpu/uvd_v7_0.c 	r = amdgpu_ring_alloc(ring, 16);
ring              189 drivers/gpu/drm/amd/amdgpu/uvd_v7_0.c 	rptr = amdgpu_ring_get_rptr(ring);
ring              191 drivers/gpu/drm/amd/amdgpu/uvd_v7_0.c 	amdgpu_ring_write(ring, HEVC_ENC_CMD_END);
ring              192 drivers/gpu/drm/amd/amdgpu/uvd_v7_0.c 	amdgpu_ring_commit(ring);
ring              195 drivers/gpu/drm/amd/amdgpu/uvd_v7_0.c 		if (amdgpu_ring_get_rptr(ring) != rptr)
ring              216 drivers/gpu/drm/amd/amdgpu/uvd_v7_0.c static int uvd_v7_0_enc_get_create_msg(struct amdgpu_ring *ring, uint32_t handle,
ring              227 drivers/gpu/drm/amd/amdgpu/uvd_v7_0.c 	r = amdgpu_job_alloc_with_ib(ring->adev, ib_size_dw * 4, &job);
ring              254 drivers/gpu/drm/amd/amdgpu/uvd_v7_0.c 	r = amdgpu_job_submit_direct(job, ring, &f);
ring              278 drivers/gpu/drm/amd/amdgpu/uvd_v7_0.c static int uvd_v7_0_enc_get_destroy_msg(struct amdgpu_ring *ring, uint32_t handle,
ring              289 drivers/gpu/drm/amd/amdgpu/uvd_v7_0.c 	r = amdgpu_job_alloc_with_ib(ring->adev, ib_size_dw * 4, &job);
ring              316 drivers/gpu/drm/amd/amdgpu/uvd_v7_0.c 	r = amdgpu_job_submit_direct(job, ring, &f);
ring              336 drivers/gpu/drm/amd/amdgpu/uvd_v7_0.c static int uvd_v7_0_enc_ring_test_ib(struct amdgpu_ring *ring, long timeout)
ring              342 drivers/gpu/drm/amd/amdgpu/uvd_v7_0.c 	r = amdgpu_bo_create_reserved(ring->adev, 128 * 1024, PAGE_SIZE,
ring              348 drivers/gpu/drm/amd/amdgpu/uvd_v7_0.c 	r = uvd_v7_0_enc_get_create_msg(ring, 1, bo, NULL);
ring              352 drivers/gpu/drm/amd/amdgpu/uvd_v7_0.c 	r = uvd_v7_0_enc_get_destroy_msg(ring, 1, bo, &fence);
ring              405 drivers/gpu/drm/amd/amdgpu/uvd_v7_0.c 	struct amdgpu_ring *ring;
ring              451 drivers/gpu/drm/amd/amdgpu/uvd_v7_0.c 			ring = &adev->uvd.inst[j].ring;
ring              452 drivers/gpu/drm/amd/amdgpu/uvd_v7_0.c 			sprintf(ring->name, "uvd_%d", ring->me);
ring              453 drivers/gpu/drm/amd/amdgpu/uvd_v7_0.c 			r = amdgpu_ring_init(adev, ring, 512, &adev->uvd.inst[j].irq, 0);
ring              459 drivers/gpu/drm/amd/amdgpu/uvd_v7_0.c 			ring = &adev->uvd.inst[j].ring_enc[i];
ring              460 drivers/gpu/drm/amd/amdgpu/uvd_v7_0.c 			sprintf(ring->name, "uvd_enc_%d.%d", ring->me, i);
ring              462 drivers/gpu/drm/amd/amdgpu/uvd_v7_0.c 				ring->use_doorbell = true;
ring              468 drivers/gpu/drm/amd/amdgpu/uvd_v7_0.c 					ring->doorbell_index = adev->doorbell_index.uvd_vce.uvd_ring0_1 * 2;
ring              470 drivers/gpu/drm/amd/amdgpu/uvd_v7_0.c 					ring->doorbell_index = adev->doorbell_index.uvd_vce.uvd_ring2_3 * 2 + 1;
ring              472 drivers/gpu/drm/amd/amdgpu/uvd_v7_0.c 			r = amdgpu_ring_init(adev, ring, 512, &adev->uvd.inst[j].irq, 0);
ring              523 drivers/gpu/drm/amd/amdgpu/uvd_v7_0.c 	struct amdgpu_ring *ring;
ring              537 drivers/gpu/drm/amd/amdgpu/uvd_v7_0.c 		ring = &adev->uvd.inst[j].ring;
ring              540 drivers/gpu/drm/amd/amdgpu/uvd_v7_0.c 			r = amdgpu_ring_test_helper(ring);
ring              544 drivers/gpu/drm/amd/amdgpu/uvd_v7_0.c 			r = amdgpu_ring_alloc(ring, 10);
ring              552 drivers/gpu/drm/amd/amdgpu/uvd_v7_0.c 			amdgpu_ring_write(ring, tmp);
ring              553 drivers/gpu/drm/amd/amdgpu/uvd_v7_0.c 			amdgpu_ring_write(ring, 0xFFFFF);
ring              557 drivers/gpu/drm/amd/amdgpu/uvd_v7_0.c 			amdgpu_ring_write(ring, tmp);
ring              558 drivers/gpu/drm/amd/amdgpu/uvd_v7_0.c 			amdgpu_ring_write(ring, 0xFFFFF);
ring              562 drivers/gpu/drm/amd/amdgpu/uvd_v7_0.c 			amdgpu_ring_write(ring, tmp);
ring              563 drivers/gpu/drm/amd/amdgpu/uvd_v7_0.c 			amdgpu_ring_write(ring, 0xFFFFF);
ring              566 drivers/gpu/drm/amd/amdgpu/uvd_v7_0.c 			amdgpu_ring_write(ring, PACKET0(SOC15_REG_OFFSET(UVD, j,
ring              568 drivers/gpu/drm/amd/amdgpu/uvd_v7_0.c 			amdgpu_ring_write(ring, 0x8);
ring              570 drivers/gpu/drm/amd/amdgpu/uvd_v7_0.c 			amdgpu_ring_write(ring, PACKET0(SOC15_REG_OFFSET(UVD, j,
ring              572 drivers/gpu/drm/amd/amdgpu/uvd_v7_0.c 			amdgpu_ring_write(ring, 3);
ring              574 drivers/gpu/drm/amd/amdgpu/uvd_v7_0.c 			amdgpu_ring_commit(ring);
ring              578 drivers/gpu/drm/amd/amdgpu/uvd_v7_0.c 			ring = &adev->uvd.inst[j].ring_enc[i];
ring              579 drivers/gpu/drm/amd/amdgpu/uvd_v7_0.c 			r = amdgpu_ring_test_helper(ring);
ring              613 drivers/gpu/drm/amd/amdgpu/uvd_v7_0.c 		adev->uvd.inst[i].ring.sched.ready = false;
ring              766 drivers/gpu/drm/amd/amdgpu/uvd_v7_0.c 	struct amdgpu_ring *ring;
ring              796 drivers/gpu/drm/amd/amdgpu/uvd_v7_0.c 			ring = &adev->uvd.inst[i].ring;
ring              797 drivers/gpu/drm/amd/amdgpu/uvd_v7_0.c 			ring->wptr = 0;
ring              894 drivers/gpu/drm/amd/amdgpu/uvd_v7_0.c 			size = order_base_2(ring->ring_size);
ring              899 drivers/gpu/drm/amd/amdgpu/uvd_v7_0.c 			ring = &adev->uvd.inst[i].ring_enc[0];
ring              900 drivers/gpu/drm/amd/amdgpu/uvd_v7_0.c 			ring->wptr = 0;
ring              901 drivers/gpu/drm/amd/amdgpu/uvd_v7_0.c 			MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(UVD, i, mmUVD_RB_BASE_LO), ring->gpu_addr);
ring              902 drivers/gpu/drm/amd/amdgpu/uvd_v7_0.c 			MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(UVD, i, mmUVD_RB_BASE_HI), upper_32_bits(ring->gpu_addr));
ring              903 drivers/gpu/drm/amd/amdgpu/uvd_v7_0.c 			MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(UVD, i, mmUVD_RB_SIZE), ring->ring_size / 4);
ring              932 drivers/gpu/drm/amd/amdgpu/uvd_v7_0.c 	struct amdgpu_ring *ring;
ring              955 drivers/gpu/drm/amd/amdgpu/uvd_v7_0.c 		ring = &adev->uvd.inst[k].ring;
ring             1061 drivers/gpu/drm/amd/amdgpu/uvd_v7_0.c 		rb_bufsz = order_base_2(ring->ring_size);
ring             1075 drivers/gpu/drm/amd/amdgpu/uvd_v7_0.c 				(upper_32_bits(ring->gpu_addr) >> 2));
ring             1079 drivers/gpu/drm/amd/amdgpu/uvd_v7_0.c 				lower_32_bits(ring->gpu_addr));
ring             1081 drivers/gpu/drm/amd/amdgpu/uvd_v7_0.c 				upper_32_bits(ring->gpu_addr));
ring             1086 drivers/gpu/drm/amd/amdgpu/uvd_v7_0.c 		ring->wptr = RREG32_SOC15(UVD, k, mmUVD_RBC_RB_RPTR);
ring             1088 drivers/gpu/drm/amd/amdgpu/uvd_v7_0.c 				lower_32_bits(ring->wptr));
ring             1093 drivers/gpu/drm/amd/amdgpu/uvd_v7_0.c 		ring = &adev->uvd.inst[k].ring_enc[0];
ring             1094 drivers/gpu/drm/amd/amdgpu/uvd_v7_0.c 		WREG32_SOC15(UVD, k, mmUVD_RB_RPTR, lower_32_bits(ring->wptr));
ring             1095 drivers/gpu/drm/amd/amdgpu/uvd_v7_0.c 		WREG32_SOC15(UVD, k, mmUVD_RB_WPTR, lower_32_bits(ring->wptr));
ring             1096 drivers/gpu/drm/amd/amdgpu/uvd_v7_0.c 		WREG32_SOC15(UVD, k, mmUVD_RB_BASE_LO, ring->gpu_addr);
ring             1097 drivers/gpu/drm/amd/amdgpu/uvd_v7_0.c 		WREG32_SOC15(UVD, k, mmUVD_RB_BASE_HI, upper_32_bits(ring->gpu_addr));
ring             1098 drivers/gpu/drm/amd/amdgpu/uvd_v7_0.c 		WREG32_SOC15(UVD, k, mmUVD_RB_SIZE, ring->ring_size / 4);
ring             1100 drivers/gpu/drm/amd/amdgpu/uvd_v7_0.c 		ring = &adev->uvd.inst[k].ring_enc[1];
ring             1101 drivers/gpu/drm/amd/amdgpu/uvd_v7_0.c 		WREG32_SOC15(UVD, k, mmUVD_RB_RPTR2, lower_32_bits(ring->wptr));
ring             1102 drivers/gpu/drm/amd/amdgpu/uvd_v7_0.c 		WREG32_SOC15(UVD, k, mmUVD_RB_WPTR2, lower_32_bits(ring->wptr));
ring             1103 drivers/gpu/drm/amd/amdgpu/uvd_v7_0.c 		WREG32_SOC15(UVD, k, mmUVD_RB_BASE_LO2, ring->gpu_addr);
ring             1104 drivers/gpu/drm/amd/amdgpu/uvd_v7_0.c 		WREG32_SOC15(UVD, k, mmUVD_RB_BASE_HI2, upper_32_bits(ring->gpu_addr));
ring             1105 drivers/gpu/drm/amd/amdgpu/uvd_v7_0.c 		WREG32_SOC15(UVD, k, mmUVD_RB_SIZE2, ring->ring_size / 4);
ring             1155 drivers/gpu/drm/amd/amdgpu/uvd_v7_0.c static void uvd_v7_0_ring_emit_fence(struct amdgpu_ring *ring, u64 addr, u64 seq,
ring             1158 drivers/gpu/drm/amd/amdgpu/uvd_v7_0.c 	struct amdgpu_device *adev = ring->adev;
ring             1162 drivers/gpu/drm/amd/amdgpu/uvd_v7_0.c 	amdgpu_ring_write(ring,
ring             1163 drivers/gpu/drm/amd/amdgpu/uvd_v7_0.c 		PACKET0(SOC15_REG_OFFSET(UVD, ring->me, mmUVD_CONTEXT_ID), 0));
ring             1164 drivers/gpu/drm/amd/amdgpu/uvd_v7_0.c 	amdgpu_ring_write(ring, seq);
ring             1165 drivers/gpu/drm/amd/amdgpu/uvd_v7_0.c 	amdgpu_ring_write(ring,
ring             1166 drivers/gpu/drm/amd/amdgpu/uvd_v7_0.c 		PACKET0(SOC15_REG_OFFSET(UVD, ring->me, mmUVD_GPCOM_VCPU_DATA0), 0));
ring             1167 drivers/gpu/drm/amd/amdgpu/uvd_v7_0.c 	amdgpu_ring_write(ring, addr & 0xffffffff);
ring             1168 drivers/gpu/drm/amd/amdgpu/uvd_v7_0.c 	amdgpu_ring_write(ring,
ring             1169 drivers/gpu/drm/amd/amdgpu/uvd_v7_0.c 		PACKET0(SOC15_REG_OFFSET(UVD, ring->me, mmUVD_GPCOM_VCPU_DATA1), 0));
ring             1170 drivers/gpu/drm/amd/amdgpu/uvd_v7_0.c 	amdgpu_ring_write(ring, upper_32_bits(addr) & 0xff);
ring             1171 drivers/gpu/drm/amd/amdgpu/uvd_v7_0.c 	amdgpu_ring_write(ring,
ring             1172 drivers/gpu/drm/amd/amdgpu/uvd_v7_0.c 		PACKET0(SOC15_REG_OFFSET(UVD, ring->me, mmUVD_GPCOM_VCPU_CMD), 0));
ring             1173 drivers/gpu/drm/amd/amdgpu/uvd_v7_0.c 	amdgpu_ring_write(ring, 0);
ring             1175 drivers/gpu/drm/amd/amdgpu/uvd_v7_0.c 	amdgpu_ring_write(ring,
ring             1176 drivers/gpu/drm/amd/amdgpu/uvd_v7_0.c 		PACKET0(SOC15_REG_OFFSET(UVD, ring->me, mmUVD_GPCOM_VCPU_DATA0), 0));
ring             1177 drivers/gpu/drm/amd/amdgpu/uvd_v7_0.c 	amdgpu_ring_write(ring, 0);
ring             1178 drivers/gpu/drm/amd/amdgpu/uvd_v7_0.c 	amdgpu_ring_write(ring,
ring             1179 drivers/gpu/drm/amd/amdgpu/uvd_v7_0.c 		PACKET0(SOC15_REG_OFFSET(UVD, ring->me, mmUVD_GPCOM_VCPU_DATA1), 0));
ring             1180 drivers/gpu/drm/amd/amdgpu/uvd_v7_0.c 	amdgpu_ring_write(ring, 0);
ring             1181 drivers/gpu/drm/amd/amdgpu/uvd_v7_0.c 	amdgpu_ring_write(ring,
ring             1182 drivers/gpu/drm/amd/amdgpu/uvd_v7_0.c 		PACKET0(SOC15_REG_OFFSET(UVD, ring->me, mmUVD_GPCOM_VCPU_CMD), 0));
ring             1183 drivers/gpu/drm/amd/amdgpu/uvd_v7_0.c 	amdgpu_ring_write(ring, 2);
ring             1194 drivers/gpu/drm/amd/amdgpu/uvd_v7_0.c static void uvd_v7_0_enc_ring_emit_fence(struct amdgpu_ring *ring, u64 addr,
ring             1200 drivers/gpu/drm/amd/amdgpu/uvd_v7_0.c 	amdgpu_ring_write(ring, HEVC_ENC_CMD_FENCE);
ring             1201 drivers/gpu/drm/amd/amdgpu/uvd_v7_0.c 	amdgpu_ring_write(ring, addr);
ring             1202 drivers/gpu/drm/amd/amdgpu/uvd_v7_0.c 	amdgpu_ring_write(ring, upper_32_bits(addr));
ring             1203 drivers/gpu/drm/amd/amdgpu/uvd_v7_0.c 	amdgpu_ring_write(ring, seq);
ring             1204 drivers/gpu/drm/amd/amdgpu/uvd_v7_0.c 	amdgpu_ring_write(ring, HEVC_ENC_CMD_TRAP);
ring             1212 drivers/gpu/drm/amd/amdgpu/uvd_v7_0.c static void uvd_v7_0_ring_emit_hdp_flush(struct amdgpu_ring *ring)
ring             1224 drivers/gpu/drm/amd/amdgpu/uvd_v7_0.c static int uvd_v7_0_ring_test_ring(struct amdgpu_ring *ring)
ring             1226 drivers/gpu/drm/amd/amdgpu/uvd_v7_0.c 	struct amdgpu_device *adev = ring->adev;
ring             1231 drivers/gpu/drm/amd/amdgpu/uvd_v7_0.c 	WREG32_SOC15(UVD, ring->me, mmUVD_CONTEXT_ID, 0xCAFEDEAD);
ring             1232 drivers/gpu/drm/amd/amdgpu/uvd_v7_0.c 	r = amdgpu_ring_alloc(ring, 3);
ring             1236 drivers/gpu/drm/amd/amdgpu/uvd_v7_0.c 	amdgpu_ring_write(ring,
ring             1237 drivers/gpu/drm/amd/amdgpu/uvd_v7_0.c 		PACKET0(SOC15_REG_OFFSET(UVD, ring->me, mmUVD_CONTEXT_ID), 0));
ring             1238 drivers/gpu/drm/amd/amdgpu/uvd_v7_0.c 	amdgpu_ring_write(ring, 0xDEADBEEF);
ring             1239 drivers/gpu/drm/amd/amdgpu/uvd_v7_0.c 	amdgpu_ring_commit(ring);
ring             1241 drivers/gpu/drm/amd/amdgpu/uvd_v7_0.c 		tmp = RREG32_SOC15(UVD, ring->me, mmUVD_CONTEXT_ID);
ring             1263 drivers/gpu/drm/amd/amdgpu/uvd_v7_0.c 	struct amdgpu_ring *ring = to_amdgpu_ring(p->entity->rq->sched);
ring             1268 drivers/gpu/drm/amd/amdgpu/uvd_v7_0.c 	if (!ring->me)
ring             1290 drivers/gpu/drm/amd/amdgpu/uvd_v7_0.c static void uvd_v7_0_ring_emit_ib(struct amdgpu_ring *ring,
ring             1295 drivers/gpu/drm/amd/amdgpu/uvd_v7_0.c 	struct amdgpu_device *adev = ring->adev;
ring             1298 drivers/gpu/drm/amd/amdgpu/uvd_v7_0.c 	amdgpu_ring_write(ring,
ring             1299 drivers/gpu/drm/amd/amdgpu/uvd_v7_0.c 		PACKET0(SOC15_REG_OFFSET(UVD, ring->me, mmUVD_LMI_RBC_IB_VMID), 0));
ring             1300 drivers/gpu/drm/amd/amdgpu/uvd_v7_0.c 	amdgpu_ring_write(ring, vmid);
ring             1302 drivers/gpu/drm/amd/amdgpu/uvd_v7_0.c 	amdgpu_ring_write(ring,
ring             1303 drivers/gpu/drm/amd/amdgpu/uvd_v7_0.c 		PACKET0(SOC15_REG_OFFSET(UVD, ring->me, mmUVD_LMI_RBC_IB_64BIT_BAR_LOW), 0));
ring             1304 drivers/gpu/drm/amd/amdgpu/uvd_v7_0.c 	amdgpu_ring_write(ring, lower_32_bits(ib->gpu_addr));
ring             1305 drivers/gpu/drm/amd/amdgpu/uvd_v7_0.c 	amdgpu_ring_write(ring,
ring             1306 drivers/gpu/drm/amd/amdgpu/uvd_v7_0.c 		PACKET0(SOC15_REG_OFFSET(UVD, ring->me, mmUVD_LMI_RBC_IB_64BIT_BAR_HIGH), 0));
ring             1307 drivers/gpu/drm/amd/amdgpu/uvd_v7_0.c 	amdgpu_ring_write(ring, upper_32_bits(ib->gpu_addr));
ring             1308 drivers/gpu/drm/amd/amdgpu/uvd_v7_0.c 	amdgpu_ring_write(ring,
ring             1309 drivers/gpu/drm/amd/amdgpu/uvd_v7_0.c 		PACKET0(SOC15_REG_OFFSET(UVD, ring->me, mmUVD_RBC_IB_SIZE), 0));
ring             1310 drivers/gpu/drm/amd/amdgpu/uvd_v7_0.c 	amdgpu_ring_write(ring, ib->length_dw);
ring             1321 drivers/gpu/drm/amd/amdgpu/uvd_v7_0.c static void uvd_v7_0_enc_ring_emit_ib(struct amdgpu_ring *ring,
ring             1328 drivers/gpu/drm/amd/amdgpu/uvd_v7_0.c 	amdgpu_ring_write(ring, HEVC_ENC_CMD_IB_VM);
ring             1329 drivers/gpu/drm/amd/amdgpu/uvd_v7_0.c 	amdgpu_ring_write(ring, vmid);
ring             1330 drivers/gpu/drm/amd/amdgpu/uvd_v7_0.c 	amdgpu_ring_write(ring, lower_32_bits(ib->gpu_addr));
ring             1331 drivers/gpu/drm/amd/amdgpu/uvd_v7_0.c 	amdgpu_ring_write(ring, upper_32_bits(ib->gpu_addr));
ring             1332 drivers/gpu/drm/amd/amdgpu/uvd_v7_0.c 	amdgpu_ring_write(ring, ib->length_dw);
ring             1335 drivers/gpu/drm/amd/amdgpu/uvd_v7_0.c static void uvd_v7_0_ring_emit_wreg(struct amdgpu_ring *ring,
ring             1338 drivers/gpu/drm/amd/amdgpu/uvd_v7_0.c 	struct amdgpu_device *adev = ring->adev;
ring             1340 drivers/gpu/drm/amd/amdgpu/uvd_v7_0.c 	amdgpu_ring_write(ring,
ring             1341 drivers/gpu/drm/amd/amdgpu/uvd_v7_0.c 		PACKET0(SOC15_REG_OFFSET(UVD, ring->me, mmUVD_GPCOM_VCPU_DATA0), 0));
ring             1342 drivers/gpu/drm/amd/amdgpu/uvd_v7_0.c 	amdgpu_ring_write(ring, reg << 2);
ring             1343 drivers/gpu/drm/amd/amdgpu/uvd_v7_0.c 	amdgpu_ring_write(ring,
ring             1344 drivers/gpu/drm/amd/amdgpu/uvd_v7_0.c 		PACKET0(SOC15_REG_OFFSET(UVD, ring->me, mmUVD_GPCOM_VCPU_DATA1), 0));
ring             1345 drivers/gpu/drm/amd/amdgpu/uvd_v7_0.c 	amdgpu_ring_write(ring, val);
ring             1346 drivers/gpu/drm/amd/amdgpu/uvd_v7_0.c 	amdgpu_ring_write(ring,
ring             1347 drivers/gpu/drm/amd/amdgpu/uvd_v7_0.c 		PACKET0(SOC15_REG_OFFSET(UVD, ring->me, mmUVD_GPCOM_VCPU_CMD), 0));
ring             1348 drivers/gpu/drm/amd/amdgpu/uvd_v7_0.c 	amdgpu_ring_write(ring, 8);
ring             1351 drivers/gpu/drm/amd/amdgpu/uvd_v7_0.c static void uvd_v7_0_ring_emit_reg_wait(struct amdgpu_ring *ring, uint32_t reg,
ring             1354 drivers/gpu/drm/amd/amdgpu/uvd_v7_0.c 	struct amdgpu_device *adev = ring->adev;
ring             1356 drivers/gpu/drm/amd/amdgpu/uvd_v7_0.c 	amdgpu_ring_write(ring,
ring             1357 drivers/gpu/drm/amd/amdgpu/uvd_v7_0.c 		PACKET0(SOC15_REG_OFFSET(UVD, ring->me, mmUVD_GPCOM_VCPU_DATA0), 0));
ring             1358 drivers/gpu/drm/amd/amdgpu/uvd_v7_0.c 	amdgpu_ring_write(ring, reg << 2);
ring             1359 drivers/gpu/drm/amd/amdgpu/uvd_v7_0.c 	amdgpu_ring_write(ring,
ring             1360 drivers/gpu/drm/amd/amdgpu/uvd_v7_0.c 		PACKET0(SOC15_REG_OFFSET(UVD, ring->me, mmUVD_GPCOM_VCPU_DATA1), 0));
ring             1361 drivers/gpu/drm/amd/amdgpu/uvd_v7_0.c 	amdgpu_ring_write(ring, val);
ring             1362 drivers/gpu/drm/amd/amdgpu/uvd_v7_0.c 	amdgpu_ring_write(ring,
ring             1363 drivers/gpu/drm/amd/amdgpu/uvd_v7_0.c 		PACKET0(SOC15_REG_OFFSET(UVD, ring->me, mmUVD_GP_SCRATCH8), 0));
ring             1364 drivers/gpu/drm/amd/amdgpu/uvd_v7_0.c 	amdgpu_ring_write(ring, mask);
ring             1365 drivers/gpu/drm/amd/amdgpu/uvd_v7_0.c 	amdgpu_ring_write(ring,
ring             1366 drivers/gpu/drm/amd/amdgpu/uvd_v7_0.c 		PACKET0(SOC15_REG_OFFSET(UVD, ring->me, mmUVD_GPCOM_VCPU_CMD), 0));
ring             1367 drivers/gpu/drm/amd/amdgpu/uvd_v7_0.c 	amdgpu_ring_write(ring, 12);
ring             1370 drivers/gpu/drm/amd/amdgpu/uvd_v7_0.c static void uvd_v7_0_ring_emit_vm_flush(struct amdgpu_ring *ring,
ring             1373 drivers/gpu/drm/amd/amdgpu/uvd_v7_0.c 	struct amdgpu_vmhub *hub = &ring->adev->vmhub[ring->funcs->vmhub];
ring             1376 drivers/gpu/drm/amd/amdgpu/uvd_v7_0.c 	pd_addr = amdgpu_gmc_emit_flush_gpu_tlb(ring, vmid, pd_addr);
ring             1382 drivers/gpu/drm/amd/amdgpu/uvd_v7_0.c 	uvd_v7_0_ring_emit_reg_wait(ring, data0, data1, mask);
ring             1385 drivers/gpu/drm/amd/amdgpu/uvd_v7_0.c static void uvd_v7_0_ring_insert_nop(struct amdgpu_ring *ring, uint32_t count)
ring             1387 drivers/gpu/drm/amd/amdgpu/uvd_v7_0.c 	struct amdgpu_device *adev = ring->adev;
ring             1390 drivers/gpu/drm/amd/amdgpu/uvd_v7_0.c 	WARN_ON(ring->wptr % 2 || count % 2);
ring             1393 drivers/gpu/drm/amd/amdgpu/uvd_v7_0.c 		amdgpu_ring_write(ring, PACKET0(SOC15_REG_OFFSET(UVD, ring->me, mmUVD_NO_OP), 0));
ring             1394 drivers/gpu/drm/amd/amdgpu/uvd_v7_0.c 		amdgpu_ring_write(ring, 0);
ring             1398 drivers/gpu/drm/amd/amdgpu/uvd_v7_0.c static void uvd_v7_0_enc_ring_insert_end(struct amdgpu_ring *ring)
ring             1400 drivers/gpu/drm/amd/amdgpu/uvd_v7_0.c 	amdgpu_ring_write(ring, HEVC_ENC_CMD_END);
ring             1403 drivers/gpu/drm/amd/amdgpu/uvd_v7_0.c static void uvd_v7_0_enc_ring_emit_reg_wait(struct amdgpu_ring *ring,
ring             1407 drivers/gpu/drm/amd/amdgpu/uvd_v7_0.c 	amdgpu_ring_write(ring, HEVC_ENC_CMD_REG_WAIT);
ring             1408 drivers/gpu/drm/amd/amdgpu/uvd_v7_0.c 	amdgpu_ring_write(ring,	reg << 2);
ring             1409 drivers/gpu/drm/amd/amdgpu/uvd_v7_0.c 	amdgpu_ring_write(ring, mask);
ring             1410 drivers/gpu/drm/amd/amdgpu/uvd_v7_0.c 	amdgpu_ring_write(ring, val);
ring             1413 drivers/gpu/drm/amd/amdgpu/uvd_v7_0.c static void uvd_v7_0_enc_ring_emit_vm_flush(struct amdgpu_ring *ring,
ring             1416 drivers/gpu/drm/amd/amdgpu/uvd_v7_0.c 	struct amdgpu_vmhub *hub = &ring->adev->vmhub[ring->funcs->vmhub];
ring             1418 drivers/gpu/drm/amd/amdgpu/uvd_v7_0.c 	pd_addr = amdgpu_gmc_emit_flush_gpu_tlb(ring, vmid, pd_addr);
ring             1421 drivers/gpu/drm/amd/amdgpu/uvd_v7_0.c 	uvd_v7_0_enc_ring_emit_reg_wait(ring, hub->ctx0_ptb_addr_lo32 + vmid * 2,
ring             1425 drivers/gpu/drm/amd/amdgpu/uvd_v7_0.c static void uvd_v7_0_enc_ring_emit_wreg(struct amdgpu_ring *ring,
ring             1428 drivers/gpu/drm/amd/amdgpu/uvd_v7_0.c 	amdgpu_ring_write(ring, HEVC_ENC_CMD_REG_WRITE);
ring             1429 drivers/gpu/drm/amd/amdgpu/uvd_v7_0.c 	amdgpu_ring_write(ring,	reg << 2);
ring             1430 drivers/gpu/drm/amd/amdgpu/uvd_v7_0.c 	amdgpu_ring_write(ring, val);
ring             1462 drivers/gpu/drm/amd/amdgpu/uvd_v7_0.c 	    (RREG32_SOC15(UVD, ring->me, mmUVD_STATUS) &
ring             1468 drivers/gpu/drm/amd/amdgpu/uvd_v7_0.c 		adev->uvd.inst[ring->me].srbm_soft_reset = srbm_soft_reset;
ring             1471 drivers/gpu/drm/amd/amdgpu/uvd_v7_0.c 		adev->uvd.inst[ring->me].srbm_soft_reset = 0;
ring             1480 drivers/gpu/drm/amd/amdgpu/uvd_v7_0.c 	if (!adev->uvd.inst[ring->me].srbm_soft_reset)
ring             1492 drivers/gpu/drm/amd/amdgpu/uvd_v7_0.c 	if (!adev->uvd.inst[ring->me].srbm_soft_reset)
ring             1494 drivers/gpu/drm/amd/amdgpu/uvd_v7_0.c 	srbm_soft_reset = adev->uvd.inst[ring->me].srbm_soft_reset;
ring             1522 drivers/gpu/drm/amd/amdgpu/uvd_v7_0.c 	if (!adev->uvd.inst[ring->me].srbm_soft_reset)
ring             1562 drivers/gpu/drm/amd/amdgpu/uvd_v7_0.c 		amdgpu_fence_process(&adev->uvd.inst[ip_instance].ring);
ring             1585 drivers/gpu/drm/amd/amdgpu/uvd_v7_0.c 	data = RREG32_SOC15(UVD, ring->me, mmUVD_CGC_CTRL);
ring             1586 drivers/gpu/drm/amd/amdgpu/uvd_v7_0.c 	data1 = RREG32_SOC15(UVD, ring->me, mmUVD_SUVD_CGC_GATE);
ring             1587 drivers/gpu/drm/amd/amdgpu/uvd_v7_0.c 	data2 = RREG32_SOC15(UVD, ring->me, mmUVD_SUVD_CGC_CTRL);
ring             1631 drivers/gpu/drm/amd/amdgpu/uvd_v7_0.c 	WREG32_SOC15(UVD, ring->me, mmUVD_CGC_CTRL, data);
ring             1632 drivers/gpu/drm/amd/amdgpu/uvd_v7_0.c 	WREG32_SOC15(UVD, ring->me, mmUVD_CGC_GATE, 0);
ring             1633 drivers/gpu/drm/amd/amdgpu/uvd_v7_0.c 	WREG32_SOC15(UVD, ring->me, mmUVD_SUVD_CGC_GATE, data1);
ring             1634 drivers/gpu/drm/amd/amdgpu/uvd_v7_0.c 	WREG32_SOC15(UVD, ring->me, mmUVD_SUVD_CGC_CTRL, data2);
ring             1641 drivers/gpu/drm/amd/amdgpu/uvd_v7_0.c 	data = RREG32_SOC15(UVD, ring->me, mmUVD_CGC_GATE);
ring             1642 drivers/gpu/drm/amd/amdgpu/uvd_v7_0.c 	data1 = RREG32_SOC15(UVD, ring->me, mmUVD_SUVD_CGC_GATE);
ring             1674 drivers/gpu/drm/amd/amdgpu/uvd_v7_0.c 	WREG32_SOC15(UVD, ring->me, mmUVD_CGC_GATE, data);
ring             1675 drivers/gpu/drm/amd/amdgpu/uvd_v7_0.c 	WREG32_SOC15(UVD, ring->me, mmUVD_SUVD_CGC_GATE, data1);
ring             1734 drivers/gpu/drm/amd/amdgpu/uvd_v7_0.c 	WREG32_SOC15(UVD, ring->me, mmUVD_POWER_STATUS, UVD_POWER_STATUS__UVD_PG_EN_MASK);
ring             1844 drivers/gpu/drm/amd/amdgpu/uvd_v7_0.c 		adev->uvd.inst[i].ring.funcs = &uvd_v7_0_ring_vm_funcs;
ring             1845 drivers/gpu/drm/amd/amdgpu/uvd_v7_0.c 		adev->uvd.inst[i].ring.me = i;
ring               55 drivers/gpu/drm/amd/amdgpu/vce_v2_0.c static uint64_t vce_v2_0_ring_get_rptr(struct amdgpu_ring *ring)
ring               57 drivers/gpu/drm/amd/amdgpu/vce_v2_0.c 	struct amdgpu_device *adev = ring->adev;
ring               59 drivers/gpu/drm/amd/amdgpu/vce_v2_0.c 	if (ring->me == 0)
ring               72 drivers/gpu/drm/amd/amdgpu/vce_v2_0.c static uint64_t vce_v2_0_ring_get_wptr(struct amdgpu_ring *ring)
ring               74 drivers/gpu/drm/amd/amdgpu/vce_v2_0.c 	struct amdgpu_device *adev = ring->adev;
ring               76 drivers/gpu/drm/amd/amdgpu/vce_v2_0.c 	if (ring->me == 0)
ring               89 drivers/gpu/drm/amd/amdgpu/vce_v2_0.c static void vce_v2_0_ring_set_wptr(struct amdgpu_ring *ring)
ring               91 drivers/gpu/drm/amd/amdgpu/vce_v2_0.c 	struct amdgpu_device *adev = ring->adev;
ring               93 drivers/gpu/drm/amd/amdgpu/vce_v2_0.c 	if (ring->me == 0)
ring               94 drivers/gpu/drm/amd/amdgpu/vce_v2_0.c 		WREG32(mmVCE_RB_WPTR, lower_32_bits(ring->wptr));
ring               96 drivers/gpu/drm/amd/amdgpu/vce_v2_0.c 		WREG32(mmVCE_RB_WPTR2, lower_32_bits(ring->wptr));
ring              232 drivers/gpu/drm/amd/amdgpu/vce_v2_0.c 	struct amdgpu_ring *ring;
ring              243 drivers/gpu/drm/amd/amdgpu/vce_v2_0.c 	ring = &adev->vce.ring[0];
ring              244 drivers/gpu/drm/amd/amdgpu/vce_v2_0.c 	WREG32(mmVCE_RB_RPTR, lower_32_bits(ring->wptr));
ring              245 drivers/gpu/drm/amd/amdgpu/vce_v2_0.c 	WREG32(mmVCE_RB_WPTR, lower_32_bits(ring->wptr));
ring              246 drivers/gpu/drm/amd/amdgpu/vce_v2_0.c 	WREG32(mmVCE_RB_BASE_LO, ring->gpu_addr);
ring              247 drivers/gpu/drm/amd/amdgpu/vce_v2_0.c 	WREG32(mmVCE_RB_BASE_HI, upper_32_bits(ring->gpu_addr));
ring              248 drivers/gpu/drm/amd/amdgpu/vce_v2_0.c 	WREG32(mmVCE_RB_SIZE, ring->ring_size / 4);
ring              250 drivers/gpu/drm/amd/amdgpu/vce_v2_0.c 	ring = &adev->vce.ring[1];
ring              251 drivers/gpu/drm/amd/amdgpu/vce_v2_0.c 	WREG32(mmVCE_RB_RPTR2, lower_32_bits(ring->wptr));
ring              252 drivers/gpu/drm/amd/amdgpu/vce_v2_0.c 	WREG32(mmVCE_RB_WPTR2, lower_32_bits(ring->wptr));
ring              253 drivers/gpu/drm/amd/amdgpu/vce_v2_0.c 	WREG32(mmVCE_RB_BASE_LO2, ring->gpu_addr);
ring              254 drivers/gpu/drm/amd/amdgpu/vce_v2_0.c 	WREG32(mmVCE_RB_BASE_HI2, upper_32_bits(ring->gpu_addr));
ring              255 drivers/gpu/drm/amd/amdgpu/vce_v2_0.c 	WREG32(mmVCE_RB_SIZE2, ring->ring_size / 4);
ring              415 drivers/gpu/drm/amd/amdgpu/vce_v2_0.c 	struct amdgpu_ring *ring;
ring              434 drivers/gpu/drm/amd/amdgpu/vce_v2_0.c 		ring = &adev->vce.ring[i];
ring              435 drivers/gpu/drm/amd/amdgpu/vce_v2_0.c 		sprintf(ring->name, "vce%d", i);
ring              436 drivers/gpu/drm/amd/amdgpu/vce_v2_0.c 		r = amdgpu_ring_init(adev, ring, 512,
ring              468 drivers/gpu/drm/amd/amdgpu/vce_v2_0.c 		r = amdgpu_ring_test_helper(&adev->vce.ring[i]);
ring              539 drivers/gpu/drm/amd/amdgpu/vce_v2_0.c 		amdgpu_fence_process(&adev->vce.ring[entry->src_data[0]]);
ring              630 drivers/gpu/drm/amd/amdgpu/vce_v2_0.c 		adev->vce.ring[i].funcs = &vce_v2_0_ring_funcs;
ring              631 drivers/gpu/drm/amd/amdgpu/vce_v2_0.c 		adev->vce.ring[i].me = i;
ring               77 drivers/gpu/drm/amd/amdgpu/vce_v3_0.c static uint64_t vce_v3_0_ring_get_rptr(struct amdgpu_ring *ring)
ring               79 drivers/gpu/drm/amd/amdgpu/vce_v3_0.c 	struct amdgpu_device *adev = ring->adev;
ring               89 drivers/gpu/drm/amd/amdgpu/vce_v3_0.c 	if (ring->me == 0)
ring               91 drivers/gpu/drm/amd/amdgpu/vce_v3_0.c 	else if (ring->me == 1)
ring              109 drivers/gpu/drm/amd/amdgpu/vce_v3_0.c static uint64_t vce_v3_0_ring_get_wptr(struct amdgpu_ring *ring)
ring              111 drivers/gpu/drm/amd/amdgpu/vce_v3_0.c 	struct amdgpu_device *adev = ring->adev;
ring              121 drivers/gpu/drm/amd/amdgpu/vce_v3_0.c 	if (ring->me == 0)
ring              123 drivers/gpu/drm/amd/amdgpu/vce_v3_0.c 	else if (ring->me == 1)
ring              141 drivers/gpu/drm/amd/amdgpu/vce_v3_0.c static void vce_v3_0_ring_set_wptr(struct amdgpu_ring *ring)
ring              143 drivers/gpu/drm/amd/amdgpu/vce_v3_0.c 	struct amdgpu_device *adev = ring->adev;
ring              152 drivers/gpu/drm/amd/amdgpu/vce_v3_0.c 	if (ring->me == 0)
ring              153 drivers/gpu/drm/amd/amdgpu/vce_v3_0.c 		WREG32(mmVCE_RB_WPTR, lower_32_bits(ring->wptr));
ring              154 drivers/gpu/drm/amd/amdgpu/vce_v3_0.c 	else if (ring->me == 1)
ring              155 drivers/gpu/drm/amd/amdgpu/vce_v3_0.c 		WREG32(mmVCE_RB_WPTR2, lower_32_bits(ring->wptr));
ring              157 drivers/gpu/drm/amd/amdgpu/vce_v3_0.c 		WREG32(mmVCE_RB_WPTR3, lower_32_bits(ring->wptr));
ring              267 drivers/gpu/drm/amd/amdgpu/vce_v3_0.c 	struct amdgpu_ring *ring;
ring              280 drivers/gpu/drm/amd/amdgpu/vce_v3_0.c 			ring = &adev->vce.ring[0];
ring              281 drivers/gpu/drm/amd/amdgpu/vce_v3_0.c 			WREG32(mmVCE_RB_RPTR, lower_32_bits(ring->wptr));
ring              282 drivers/gpu/drm/amd/amdgpu/vce_v3_0.c 			WREG32(mmVCE_RB_WPTR, lower_32_bits(ring->wptr));
ring              283 drivers/gpu/drm/amd/amdgpu/vce_v3_0.c 			WREG32(mmVCE_RB_BASE_LO, ring->gpu_addr);
ring              284 drivers/gpu/drm/amd/amdgpu/vce_v3_0.c 			WREG32(mmVCE_RB_BASE_HI, upper_32_bits(ring->gpu_addr));
ring              285 drivers/gpu/drm/amd/amdgpu/vce_v3_0.c 			WREG32(mmVCE_RB_SIZE, ring->ring_size / 4);
ring              287 drivers/gpu/drm/amd/amdgpu/vce_v3_0.c 			ring = &adev->vce.ring[1];
ring              288 drivers/gpu/drm/amd/amdgpu/vce_v3_0.c 			WREG32(mmVCE_RB_RPTR2, lower_32_bits(ring->wptr));
ring              289 drivers/gpu/drm/amd/amdgpu/vce_v3_0.c 			WREG32(mmVCE_RB_WPTR2, lower_32_bits(ring->wptr));
ring              290 drivers/gpu/drm/amd/amdgpu/vce_v3_0.c 			WREG32(mmVCE_RB_BASE_LO2, ring->gpu_addr);
ring              291 drivers/gpu/drm/amd/amdgpu/vce_v3_0.c 			WREG32(mmVCE_RB_BASE_HI2, upper_32_bits(ring->gpu_addr));
ring              292 drivers/gpu/drm/amd/amdgpu/vce_v3_0.c 			WREG32(mmVCE_RB_SIZE2, ring->ring_size / 4);
ring              294 drivers/gpu/drm/amd/amdgpu/vce_v3_0.c 			ring = &adev->vce.ring[2];
ring              295 drivers/gpu/drm/amd/amdgpu/vce_v3_0.c 			WREG32(mmVCE_RB_RPTR3, lower_32_bits(ring->wptr));
ring              296 drivers/gpu/drm/amd/amdgpu/vce_v3_0.c 			WREG32(mmVCE_RB_WPTR3, lower_32_bits(ring->wptr));
ring              297 drivers/gpu/drm/amd/amdgpu/vce_v3_0.c 			WREG32(mmVCE_RB_BASE_LO3, ring->gpu_addr);
ring              298 drivers/gpu/drm/amd/amdgpu/vce_v3_0.c 			WREG32(mmVCE_RB_BASE_HI3, upper_32_bits(ring->gpu_addr));
ring              299 drivers/gpu/drm/amd/amdgpu/vce_v3_0.c 			WREG32(mmVCE_RB_SIZE3, ring->ring_size / 4);
ring              421 drivers/gpu/drm/amd/amdgpu/vce_v3_0.c 	struct amdgpu_ring *ring;
ring              443 drivers/gpu/drm/amd/amdgpu/vce_v3_0.c 		ring = &adev->vce.ring[i];
ring              444 drivers/gpu/drm/amd/amdgpu/vce_v3_0.c 		sprintf(ring->name, "vce%d", i);
ring              445 drivers/gpu/drm/amd/amdgpu/vce_v3_0.c 		r = amdgpu_ring_init(adev, ring, 512, &adev->vce.irq, 0);
ring              477 drivers/gpu/drm/amd/amdgpu/vce_v3_0.c 		r = amdgpu_ring_test_helper(&adev->vce.ring[i]);
ring              727 drivers/gpu/drm/amd/amdgpu/vce_v3_0.c 		amdgpu_fence_process(&adev->vce.ring[entry->src_data[0]]);
ring              834 drivers/gpu/drm/amd/amdgpu/vce_v3_0.c static void vce_v3_0_ring_emit_ib(struct amdgpu_ring *ring,
ring              841 drivers/gpu/drm/amd/amdgpu/vce_v3_0.c 	amdgpu_ring_write(ring, VCE_CMD_IB_VM);
ring              842 drivers/gpu/drm/amd/amdgpu/vce_v3_0.c 	amdgpu_ring_write(ring, vmid);
ring              843 drivers/gpu/drm/amd/amdgpu/vce_v3_0.c 	amdgpu_ring_write(ring, lower_32_bits(ib->gpu_addr));
ring              844 drivers/gpu/drm/amd/amdgpu/vce_v3_0.c 	amdgpu_ring_write(ring, upper_32_bits(ib->gpu_addr));
ring              845 drivers/gpu/drm/amd/amdgpu/vce_v3_0.c 	amdgpu_ring_write(ring, ib->length_dw);
ring              848 drivers/gpu/drm/amd/amdgpu/vce_v3_0.c static void vce_v3_0_emit_vm_flush(struct amdgpu_ring *ring,
ring              851 drivers/gpu/drm/amd/amdgpu/vce_v3_0.c 	amdgpu_ring_write(ring, VCE_CMD_UPDATE_PTB);
ring              852 drivers/gpu/drm/amd/amdgpu/vce_v3_0.c 	amdgpu_ring_write(ring, vmid);
ring              853 drivers/gpu/drm/amd/amdgpu/vce_v3_0.c 	amdgpu_ring_write(ring, pd_addr >> 12);
ring              855 drivers/gpu/drm/amd/amdgpu/vce_v3_0.c 	amdgpu_ring_write(ring, VCE_CMD_FLUSH_TLB);
ring              856 drivers/gpu/drm/amd/amdgpu/vce_v3_0.c 	amdgpu_ring_write(ring, vmid);
ring              857 drivers/gpu/drm/amd/amdgpu/vce_v3_0.c 	amdgpu_ring_write(ring, VCE_CMD_END);
ring              860 drivers/gpu/drm/amd/amdgpu/vce_v3_0.c static void vce_v3_0_emit_pipeline_sync(struct amdgpu_ring *ring)
ring              862 drivers/gpu/drm/amd/amdgpu/vce_v3_0.c 	uint32_t seq = ring->fence_drv.sync_seq;
ring              863 drivers/gpu/drm/amd/amdgpu/vce_v3_0.c 	uint64_t addr = ring->fence_drv.gpu_addr;
ring              865 drivers/gpu/drm/amd/amdgpu/vce_v3_0.c 	amdgpu_ring_write(ring, VCE_CMD_WAIT_GE);
ring              866 drivers/gpu/drm/amd/amdgpu/vce_v3_0.c 	amdgpu_ring_write(ring, lower_32_bits(addr));
ring              867 drivers/gpu/drm/amd/amdgpu/vce_v3_0.c 	amdgpu_ring_write(ring, upper_32_bits(addr));
ring              868 drivers/gpu/drm/amd/amdgpu/vce_v3_0.c 	amdgpu_ring_write(ring, seq);
ring              949 drivers/gpu/drm/amd/amdgpu/vce_v3_0.c 			adev->vce.ring[i].funcs = &vce_v3_0_ring_vm_funcs;
ring              950 drivers/gpu/drm/amd/amdgpu/vce_v3_0.c 			adev->vce.ring[i].me = i;
ring              955 drivers/gpu/drm/amd/amdgpu/vce_v3_0.c 			adev->vce.ring[i].funcs = &vce_v3_0_ring_phys_funcs;
ring              956 drivers/gpu/drm/amd/amdgpu/vce_v3_0.c 			adev->vce.ring[i].me = i;
ring               61 drivers/gpu/drm/amd/amdgpu/vce_v4_0.c static uint64_t vce_v4_0_ring_get_rptr(struct amdgpu_ring *ring)
ring               63 drivers/gpu/drm/amd/amdgpu/vce_v4_0.c 	struct amdgpu_device *adev = ring->adev;
ring               65 drivers/gpu/drm/amd/amdgpu/vce_v4_0.c 	if (ring->me == 0)
ring               67 drivers/gpu/drm/amd/amdgpu/vce_v4_0.c 	else if (ring->me == 1)
ring               80 drivers/gpu/drm/amd/amdgpu/vce_v4_0.c static uint64_t vce_v4_0_ring_get_wptr(struct amdgpu_ring *ring)
ring               82 drivers/gpu/drm/amd/amdgpu/vce_v4_0.c 	struct amdgpu_device *adev = ring->adev;
ring               84 drivers/gpu/drm/amd/amdgpu/vce_v4_0.c 	if (ring->use_doorbell)
ring               85 drivers/gpu/drm/amd/amdgpu/vce_v4_0.c 		return adev->wb.wb[ring->wptr_offs];
ring               87 drivers/gpu/drm/amd/amdgpu/vce_v4_0.c 	if (ring->me == 0)
ring               89 drivers/gpu/drm/amd/amdgpu/vce_v4_0.c 	else if (ring->me == 1)
ring              102 drivers/gpu/drm/amd/amdgpu/vce_v4_0.c static void vce_v4_0_ring_set_wptr(struct amdgpu_ring *ring)
ring              104 drivers/gpu/drm/amd/amdgpu/vce_v4_0.c 	struct amdgpu_device *adev = ring->adev;
ring              106 drivers/gpu/drm/amd/amdgpu/vce_v4_0.c 	if (ring->use_doorbell) {
ring              108 drivers/gpu/drm/amd/amdgpu/vce_v4_0.c 		adev->wb.wb[ring->wptr_offs] = lower_32_bits(ring->wptr);
ring              109 drivers/gpu/drm/amd/amdgpu/vce_v4_0.c 		WDOORBELL32(ring->doorbell_index, lower_32_bits(ring->wptr));
ring              113 drivers/gpu/drm/amd/amdgpu/vce_v4_0.c 	if (ring->me == 0)
ring              115 drivers/gpu/drm/amd/amdgpu/vce_v4_0.c 			lower_32_bits(ring->wptr));
ring              116 drivers/gpu/drm/amd/amdgpu/vce_v4_0.c 	else if (ring->me == 1)
ring              118 drivers/gpu/drm/amd/amdgpu/vce_v4_0.c 			lower_32_bits(ring->wptr));
ring              121 drivers/gpu/drm/amd/amdgpu/vce_v4_0.c 			lower_32_bits(ring->wptr));
ring              178 drivers/gpu/drm/amd/amdgpu/vce_v4_0.c 	WDOORBELL32(adev->vce.ring[0].doorbell_index, 0);
ring              179 drivers/gpu/drm/amd/amdgpu/vce_v4_0.c 	adev->wb.wb[adev->vce.ring[0].wptr_offs] = 0;
ring              180 drivers/gpu/drm/amd/amdgpu/vce_v4_0.c 	adev->vce.ring[0].wptr = 0;
ring              181 drivers/gpu/drm/amd/amdgpu/vce_v4_0.c 	adev->vce.ring[0].wptr_old = 0;
ring              206 drivers/gpu/drm/amd/amdgpu/vce_v4_0.c 	struct amdgpu_ring *ring;
ring              232 drivers/gpu/drm/amd/amdgpu/vce_v4_0.c 		ring = &adev->vce.ring[0];
ring              234 drivers/gpu/drm/amd/amdgpu/vce_v4_0.c 					    lower_32_bits(ring->gpu_addr));
ring              236 drivers/gpu/drm/amd/amdgpu/vce_v4_0.c 					    upper_32_bits(ring->gpu_addr));
ring              238 drivers/gpu/drm/amd/amdgpu/vce_v4_0.c 					    ring->ring_size / 4);
ring              337 drivers/gpu/drm/amd/amdgpu/vce_v4_0.c 	struct amdgpu_ring *ring;
ring              340 drivers/gpu/drm/amd/amdgpu/vce_v4_0.c 	ring = &adev->vce.ring[0];
ring              342 drivers/gpu/drm/amd/amdgpu/vce_v4_0.c 	WREG32(SOC15_REG_OFFSET(VCE, 0, mmVCE_RB_RPTR), lower_32_bits(ring->wptr));
ring              343 drivers/gpu/drm/amd/amdgpu/vce_v4_0.c 	WREG32(SOC15_REG_OFFSET(VCE, 0, mmVCE_RB_WPTR), lower_32_bits(ring->wptr));
ring              344 drivers/gpu/drm/amd/amdgpu/vce_v4_0.c 	WREG32(SOC15_REG_OFFSET(VCE, 0, mmVCE_RB_BASE_LO), ring->gpu_addr);
ring              345 drivers/gpu/drm/amd/amdgpu/vce_v4_0.c 	WREG32(SOC15_REG_OFFSET(VCE, 0, mmVCE_RB_BASE_HI), upper_32_bits(ring->gpu_addr));
ring              346 drivers/gpu/drm/amd/amdgpu/vce_v4_0.c 	WREG32(SOC15_REG_OFFSET(VCE, 0, mmVCE_RB_SIZE), ring->ring_size / 4);
ring              348 drivers/gpu/drm/amd/amdgpu/vce_v4_0.c 	ring = &adev->vce.ring[1];
ring              350 drivers/gpu/drm/amd/amdgpu/vce_v4_0.c 	WREG32(SOC15_REG_OFFSET(VCE, 0, mmVCE_RB_RPTR2), lower_32_bits(ring->wptr));
ring              351 drivers/gpu/drm/amd/amdgpu/vce_v4_0.c 	WREG32(SOC15_REG_OFFSET(VCE, 0, mmVCE_RB_WPTR2), lower_32_bits(ring->wptr));
ring              352 drivers/gpu/drm/amd/amdgpu/vce_v4_0.c 	WREG32(SOC15_REG_OFFSET(VCE, 0, mmVCE_RB_BASE_LO2), ring->gpu_addr);
ring              353 drivers/gpu/drm/amd/amdgpu/vce_v4_0.c 	WREG32(SOC15_REG_OFFSET(VCE, 0, mmVCE_RB_BASE_HI2), upper_32_bits(ring->gpu_addr));
ring              354 drivers/gpu/drm/amd/amdgpu/vce_v4_0.c 	WREG32(SOC15_REG_OFFSET(VCE, 0, mmVCE_RB_SIZE2), ring->ring_size / 4);
ring              356 drivers/gpu/drm/amd/amdgpu/vce_v4_0.c 	ring = &adev->vce.ring[2];
ring              358 drivers/gpu/drm/amd/amdgpu/vce_v4_0.c 	WREG32(SOC15_REG_OFFSET(VCE, 0, mmVCE_RB_RPTR3), lower_32_bits(ring->wptr));
ring              359 drivers/gpu/drm/amd/amdgpu/vce_v4_0.c 	WREG32(SOC15_REG_OFFSET(VCE, 0, mmVCE_RB_WPTR3), lower_32_bits(ring->wptr));
ring              360 drivers/gpu/drm/amd/amdgpu/vce_v4_0.c 	WREG32(SOC15_REG_OFFSET(VCE, 0, mmVCE_RB_BASE_LO3), ring->gpu_addr);
ring              361 drivers/gpu/drm/amd/amdgpu/vce_v4_0.c 	WREG32(SOC15_REG_OFFSET(VCE, 0, mmVCE_RB_BASE_HI3), upper_32_bits(ring->gpu_addr));
ring              362 drivers/gpu/drm/amd/amdgpu/vce_v4_0.c 	WREG32(SOC15_REG_OFFSET(VCE, 0, mmVCE_RB_SIZE3), ring->ring_size / 4);
ring              427 drivers/gpu/drm/amd/amdgpu/vce_v4_0.c 	struct amdgpu_ring *ring;
ring              465 drivers/gpu/drm/amd/amdgpu/vce_v4_0.c 		ring = &adev->vce.ring[i];
ring              466 drivers/gpu/drm/amd/amdgpu/vce_v4_0.c 		sprintf(ring->name, "vce%d", i);
ring              469 drivers/gpu/drm/amd/amdgpu/vce_v4_0.c 			ring->use_doorbell = true;
ring              475 drivers/gpu/drm/amd/amdgpu/vce_v4_0.c 				ring->doorbell_index = adev->doorbell_index.uvd_vce.vce_ring0_1 * 2;
ring              477 drivers/gpu/drm/amd/amdgpu/vce_v4_0.c 				ring->doorbell_index = adev->doorbell_index.uvd_vce.vce_ring2_3 * 2 + 1;
ring              479 drivers/gpu/drm/amd/amdgpu/vce_v4_0.c 		r = amdgpu_ring_init(adev, ring, 512, &adev->vce.irq, 0);
ring              529 drivers/gpu/drm/amd/amdgpu/vce_v4_0.c 		r = amdgpu_ring_test_helper(&adev->vce.ring[i]);
ring              553 drivers/gpu/drm/amd/amdgpu/vce_v4_0.c 		adev->vce.ring[i].sched.ready = false;
ring              951 drivers/gpu/drm/amd/amdgpu/vce_v4_0.c static void vce_v4_0_ring_emit_ib(struct amdgpu_ring *ring, struct amdgpu_job *job,
ring              956 drivers/gpu/drm/amd/amdgpu/vce_v4_0.c 	amdgpu_ring_write(ring, VCE_CMD_IB_VM);
ring              957 drivers/gpu/drm/amd/amdgpu/vce_v4_0.c 	amdgpu_ring_write(ring, vmid);
ring              958 drivers/gpu/drm/amd/amdgpu/vce_v4_0.c 	amdgpu_ring_write(ring, lower_32_bits(ib->gpu_addr));
ring              959 drivers/gpu/drm/amd/amdgpu/vce_v4_0.c 	amdgpu_ring_write(ring, upper_32_bits(ib->gpu_addr));
ring              960 drivers/gpu/drm/amd/amdgpu/vce_v4_0.c 	amdgpu_ring_write(ring, ib->length_dw);
ring              963 drivers/gpu/drm/amd/amdgpu/vce_v4_0.c static void vce_v4_0_ring_emit_fence(struct amdgpu_ring *ring, u64 addr,
ring              968 drivers/gpu/drm/amd/amdgpu/vce_v4_0.c 	amdgpu_ring_write(ring, VCE_CMD_FENCE);
ring              969 drivers/gpu/drm/amd/amdgpu/vce_v4_0.c 	amdgpu_ring_write(ring, addr);
ring              970 drivers/gpu/drm/amd/amdgpu/vce_v4_0.c 	amdgpu_ring_write(ring, upper_32_bits(addr));
ring              971 drivers/gpu/drm/amd/amdgpu/vce_v4_0.c 	amdgpu_ring_write(ring, seq);
ring              972 drivers/gpu/drm/amd/amdgpu/vce_v4_0.c 	amdgpu_ring_write(ring, VCE_CMD_TRAP);
ring              975 drivers/gpu/drm/amd/amdgpu/vce_v4_0.c static void vce_v4_0_ring_insert_end(struct amdgpu_ring *ring)
ring              977 drivers/gpu/drm/amd/amdgpu/vce_v4_0.c 	amdgpu_ring_write(ring, VCE_CMD_END);
ring              980 drivers/gpu/drm/amd/amdgpu/vce_v4_0.c static void vce_v4_0_emit_reg_wait(struct amdgpu_ring *ring, uint32_t reg,
ring              983 drivers/gpu/drm/amd/amdgpu/vce_v4_0.c 	amdgpu_ring_write(ring, VCE_CMD_REG_WAIT);
ring              984 drivers/gpu/drm/amd/amdgpu/vce_v4_0.c 	amdgpu_ring_write(ring,	reg << 2);
ring              985 drivers/gpu/drm/amd/amdgpu/vce_v4_0.c 	amdgpu_ring_write(ring, mask);
ring              986 drivers/gpu/drm/amd/amdgpu/vce_v4_0.c 	amdgpu_ring_write(ring, val);
ring              989 drivers/gpu/drm/amd/amdgpu/vce_v4_0.c static void vce_v4_0_emit_vm_flush(struct amdgpu_ring *ring,
ring              992 drivers/gpu/drm/amd/amdgpu/vce_v4_0.c 	struct amdgpu_vmhub *hub = &ring->adev->vmhub[ring->funcs->vmhub];
ring              994 drivers/gpu/drm/amd/amdgpu/vce_v4_0.c 	pd_addr = amdgpu_gmc_emit_flush_gpu_tlb(ring, vmid, pd_addr);
ring              997 drivers/gpu/drm/amd/amdgpu/vce_v4_0.c 	vce_v4_0_emit_reg_wait(ring, hub->ctx0_ptb_addr_lo32 + vmid * 2,
ring             1001 drivers/gpu/drm/amd/amdgpu/vce_v4_0.c static void vce_v4_0_emit_wreg(struct amdgpu_ring *ring,
ring             1004 drivers/gpu/drm/amd/amdgpu/vce_v4_0.c 	amdgpu_ring_write(ring, VCE_CMD_REG_WRITE);
ring             1005 drivers/gpu/drm/amd/amdgpu/vce_v4_0.c 	amdgpu_ring_write(ring,	reg << 2);
ring             1006 drivers/gpu/drm/amd/amdgpu/vce_v4_0.c 	amdgpu_ring_write(ring, val);
ring             1036 drivers/gpu/drm/amd/amdgpu/vce_v4_0.c 		amdgpu_fence_process(&adev->vce.ring[entry->src_data[0]]);
ring             1105 drivers/gpu/drm/amd/amdgpu/vce_v4_0.c 		adev->vce.ring[i].funcs = &vce_v4_0_ring_vm_funcs;
ring             1106 drivers/gpu/drm/amd/amdgpu/vce_v4_0.c 		adev->vce.ring[i].me = i;
ring               50 drivers/gpu/drm/amd/amdgpu/vcn_v1_0.c static void vcn_v1_0_jpeg_ring_set_patch_ring(struct amdgpu_ring *ring, uint32_t ptr);
ring               86 drivers/gpu/drm/amd/amdgpu/vcn_v1_0.c 	struct amdgpu_ring *ring;
ring              127 drivers/gpu/drm/amd/amdgpu/vcn_v1_0.c 	ring = &adev->vcn.inst->ring_dec;
ring              128 drivers/gpu/drm/amd/amdgpu/vcn_v1_0.c 	sprintf(ring->name, "vcn_dec");
ring              129 drivers/gpu/drm/amd/amdgpu/vcn_v1_0.c 	r = amdgpu_ring_init(adev, ring, 512, &adev->vcn.inst->irq, 0);
ring              145 drivers/gpu/drm/amd/amdgpu/vcn_v1_0.c 		ring = &adev->vcn.inst->ring_enc[i];
ring              146 drivers/gpu/drm/amd/amdgpu/vcn_v1_0.c 		sprintf(ring->name, "vcn_enc%d", i);
ring              147 drivers/gpu/drm/amd/amdgpu/vcn_v1_0.c 		r = amdgpu_ring_init(adev, ring, 512, &adev->vcn.inst->irq, 0);
ring              152 drivers/gpu/drm/amd/amdgpu/vcn_v1_0.c 	ring = &adev->vcn.inst->ring_jpeg;
ring              153 drivers/gpu/drm/amd/amdgpu/vcn_v1_0.c 	sprintf(ring->name, "vcn_jpeg");
ring              154 drivers/gpu/drm/amd/amdgpu/vcn_v1_0.c 	r = amdgpu_ring_init(adev, ring, 512, &adev->vcn.inst->irq, 0);
ring              196 drivers/gpu/drm/amd/amdgpu/vcn_v1_0.c 	struct amdgpu_ring *ring = &adev->vcn.inst->ring_dec;
ring              199 drivers/gpu/drm/amd/amdgpu/vcn_v1_0.c 	r = amdgpu_ring_test_helper(ring);
ring              204 drivers/gpu/drm/amd/amdgpu/vcn_v1_0.c 		ring = &adev->vcn.inst->ring_enc[i];
ring              205 drivers/gpu/drm/amd/amdgpu/vcn_v1_0.c 		ring->sched.ready = true;
ring              206 drivers/gpu/drm/amd/amdgpu/vcn_v1_0.c 		r = amdgpu_ring_test_helper(ring);
ring              211 drivers/gpu/drm/amd/amdgpu/vcn_v1_0.c 	ring = &adev->vcn.inst->ring_jpeg;
ring              212 drivers/gpu/drm/amd/amdgpu/vcn_v1_0.c 	r = amdgpu_ring_test_helper(ring);
ring              234 drivers/gpu/drm/amd/amdgpu/vcn_v1_0.c 	struct amdgpu_ring *ring = &adev->vcn.inst->ring_dec;
ring              240 drivers/gpu/drm/amd/amdgpu/vcn_v1_0.c 	ring->sched.ready = false;
ring              784 drivers/gpu/drm/amd/amdgpu/vcn_v1_0.c 	struct amdgpu_ring *ring = &adev->vcn.inst->ring_dec;
ring              904 drivers/gpu/drm/amd/amdgpu/vcn_v1_0.c 	rb_bufsz = order_base_2(ring->ring_size);
ring              917 drivers/gpu/drm/amd/amdgpu/vcn_v1_0.c 			(upper_32_bits(ring->gpu_addr) >> 2));
ring              921 drivers/gpu/drm/amd/amdgpu/vcn_v1_0.c 			lower_32_bits(ring->gpu_addr));
ring              923 drivers/gpu/drm/amd/amdgpu/vcn_v1_0.c 			upper_32_bits(ring->gpu_addr));
ring              930 drivers/gpu/drm/amd/amdgpu/vcn_v1_0.c 	ring->wptr = RREG32_SOC15(UVD, 0, mmUVD_RBC_RB_RPTR);
ring              932 drivers/gpu/drm/amd/amdgpu/vcn_v1_0.c 			lower_32_bits(ring->wptr));
ring              937 drivers/gpu/drm/amd/amdgpu/vcn_v1_0.c 	ring = &adev->vcn.inst->ring_enc[0];
ring              938 drivers/gpu/drm/amd/amdgpu/vcn_v1_0.c 	WREG32_SOC15(UVD, 0, mmUVD_RB_RPTR, lower_32_bits(ring->wptr));
ring              939 drivers/gpu/drm/amd/amdgpu/vcn_v1_0.c 	WREG32_SOC15(UVD, 0, mmUVD_RB_WPTR, lower_32_bits(ring->wptr));
ring              940 drivers/gpu/drm/amd/amdgpu/vcn_v1_0.c 	WREG32_SOC15(UVD, 0, mmUVD_RB_BASE_LO, ring->gpu_addr);
ring              941 drivers/gpu/drm/amd/amdgpu/vcn_v1_0.c 	WREG32_SOC15(UVD, 0, mmUVD_RB_BASE_HI, upper_32_bits(ring->gpu_addr));
ring              942 drivers/gpu/drm/amd/amdgpu/vcn_v1_0.c 	WREG32_SOC15(UVD, 0, mmUVD_RB_SIZE, ring->ring_size / 4);
ring              944 drivers/gpu/drm/amd/amdgpu/vcn_v1_0.c 	ring = &adev->vcn.inst->ring_enc[1];
ring              945 drivers/gpu/drm/amd/amdgpu/vcn_v1_0.c 	WREG32_SOC15(UVD, 0, mmUVD_RB_RPTR2, lower_32_bits(ring->wptr));
ring              946 drivers/gpu/drm/amd/amdgpu/vcn_v1_0.c 	WREG32_SOC15(UVD, 0, mmUVD_RB_WPTR2, lower_32_bits(ring->wptr));
ring              947 drivers/gpu/drm/amd/amdgpu/vcn_v1_0.c 	WREG32_SOC15(UVD, 0, mmUVD_RB_BASE_LO2, ring->gpu_addr);
ring              948 drivers/gpu/drm/amd/amdgpu/vcn_v1_0.c 	WREG32_SOC15(UVD, 0, mmUVD_RB_BASE_HI2, upper_32_bits(ring->gpu_addr));
ring              949 drivers/gpu/drm/amd/amdgpu/vcn_v1_0.c 	WREG32_SOC15(UVD, 0, mmUVD_RB_SIZE2, ring->ring_size / 4);
ring              951 drivers/gpu/drm/amd/amdgpu/vcn_v1_0.c 	ring = &adev->vcn.inst->ring_jpeg;
ring              955 drivers/gpu/drm/amd/amdgpu/vcn_v1_0.c 	WREG32_SOC15(UVD, 0, mmUVD_LMI_JRBC_RB_64BIT_BAR_LOW, lower_32_bits(ring->gpu_addr));
ring              956 drivers/gpu/drm/amd/amdgpu/vcn_v1_0.c 	WREG32_SOC15(UVD, 0, mmUVD_LMI_JRBC_RB_64BIT_BAR_HIGH, upper_32_bits(ring->gpu_addr));
ring              962 drivers/gpu/drm/amd/amdgpu/vcn_v1_0.c 	ring->wptr = RREG32_SOC15(UVD, 0, mmUVD_JRBC_RB_WPTR);
ring              965 drivers/gpu/drm/amd/amdgpu/vcn_v1_0.c 	vcn_v1_0_jpeg_ring_set_patch_ring(ring,
ring              966 drivers/gpu/drm/amd/amdgpu/vcn_v1_0.c 		(ring->wptr + ring->max_dw * amdgpu_sched_hw_submission));
ring              973 drivers/gpu/drm/amd/amdgpu/vcn_v1_0.c 	struct amdgpu_ring *ring = &adev->vcn.inst->ring_dec;
ring             1077 drivers/gpu/drm/amd/amdgpu/vcn_v1_0.c 	rb_bufsz = order_base_2(ring->ring_size);
ring             1090 drivers/gpu/drm/amd/amdgpu/vcn_v1_0.c 								(upper_32_bits(ring->gpu_addr) >> 2));
ring             1094 drivers/gpu/drm/amd/amdgpu/vcn_v1_0.c 								lower_32_bits(ring->gpu_addr));
ring             1096 drivers/gpu/drm/amd/amdgpu/vcn_v1_0.c 								upper_32_bits(ring->gpu_addr));
ring             1103 drivers/gpu/drm/amd/amdgpu/vcn_v1_0.c 	ring->wptr = RREG32_SOC15(UVD, 0, mmUVD_RBC_RB_RPTR);
ring             1105 drivers/gpu/drm/amd/amdgpu/vcn_v1_0.c 								lower_32_bits(ring->wptr));
ring             1111 drivers/gpu/drm/amd/amdgpu/vcn_v1_0.c 	ring = &adev->vcn.inst->ring_jpeg;
ring             1112 drivers/gpu/drm/amd/amdgpu/vcn_v1_0.c 	ring->wptr = RREG32_SOC15(UVD, 0, mmUVD_JRBC_RB_WPTR);
ring             1115 drivers/gpu/drm/amd/amdgpu/vcn_v1_0.c 	vcn_v1_0_jpeg_ring_set_patch_ring(ring,
ring             1116 drivers/gpu/drm/amd/amdgpu/vcn_v1_0.c 		(ring->wptr + ring->max_dw * amdgpu_sched_hw_submission));
ring             1232 drivers/gpu/drm/amd/amdgpu/vcn_v1_0.c 	struct amdgpu_ring *ring;
ring             1260 drivers/gpu/drm/amd/amdgpu/vcn_v1_0.c 				ring = &adev->vcn.inst->ring_enc[0];
ring             1261 drivers/gpu/drm/amd/amdgpu/vcn_v1_0.c 				WREG32_SOC15(UVD, 0, mmUVD_RB_BASE_LO, ring->gpu_addr);
ring             1262 drivers/gpu/drm/amd/amdgpu/vcn_v1_0.c 				WREG32_SOC15(UVD, 0, mmUVD_RB_BASE_HI, upper_32_bits(ring->gpu_addr));
ring             1263 drivers/gpu/drm/amd/amdgpu/vcn_v1_0.c 				WREG32_SOC15(UVD, 0, mmUVD_RB_SIZE, ring->ring_size / 4);
ring             1264 drivers/gpu/drm/amd/amdgpu/vcn_v1_0.c 				WREG32_SOC15(UVD, 0, mmUVD_RB_RPTR, lower_32_bits(ring->wptr));
ring             1265 drivers/gpu/drm/amd/amdgpu/vcn_v1_0.c 				WREG32_SOC15(UVD, 0, mmUVD_RB_WPTR, lower_32_bits(ring->wptr));
ring             1267 drivers/gpu/drm/amd/amdgpu/vcn_v1_0.c 				ring = &adev->vcn.inst->ring_enc[1];
ring             1268 drivers/gpu/drm/amd/amdgpu/vcn_v1_0.c 				WREG32_SOC15(UVD, 0, mmUVD_RB_BASE_LO2, ring->gpu_addr);
ring             1269 drivers/gpu/drm/amd/amdgpu/vcn_v1_0.c 				WREG32_SOC15(UVD, 0, mmUVD_RB_BASE_HI2, upper_32_bits(ring->gpu_addr));
ring             1270 drivers/gpu/drm/amd/amdgpu/vcn_v1_0.c 				WREG32_SOC15(UVD, 0, mmUVD_RB_SIZE2, ring->ring_size / 4);
ring             1271 drivers/gpu/drm/amd/amdgpu/vcn_v1_0.c 				WREG32_SOC15(UVD, 0, mmUVD_RB_RPTR2, lower_32_bits(ring->wptr));
ring             1272 drivers/gpu/drm/amd/amdgpu/vcn_v1_0.c 				WREG32_SOC15(UVD, 0, mmUVD_RB_WPTR2, lower_32_bits(ring->wptr));
ring             1274 drivers/gpu/drm/amd/amdgpu/vcn_v1_0.c 				ring = &adev->vcn.inst->ring_dec;
ring             1320 drivers/gpu/drm/amd/amdgpu/vcn_v1_0.c 				ring = &adev->vcn.inst->ring_jpeg;
ring             1326 drivers/gpu/drm/amd/amdgpu/vcn_v1_0.c 							lower_32_bits(ring->gpu_addr));
ring             1328 drivers/gpu/drm/amd/amdgpu/vcn_v1_0.c 							upper_32_bits(ring->gpu_addr));
ring             1329 drivers/gpu/drm/amd/amdgpu/vcn_v1_0.c 				WREG32_SOC15(UVD, 0, mmUVD_JRBC_RB_RPTR, ring->wptr);
ring             1330 drivers/gpu/drm/amd/amdgpu/vcn_v1_0.c 				WREG32_SOC15(UVD, 0, mmUVD_JRBC_RB_WPTR, ring->wptr);
ring             1334 drivers/gpu/drm/amd/amdgpu/vcn_v1_0.c 				ring = &adev->vcn.inst->ring_dec;
ring             1395 drivers/gpu/drm/amd/amdgpu/vcn_v1_0.c static uint64_t vcn_v1_0_dec_ring_get_rptr(struct amdgpu_ring *ring)
ring             1397 drivers/gpu/drm/amd/amdgpu/vcn_v1_0.c 	struct amdgpu_device *adev = ring->adev;
ring             1409 drivers/gpu/drm/amd/amdgpu/vcn_v1_0.c static uint64_t vcn_v1_0_dec_ring_get_wptr(struct amdgpu_ring *ring)
ring             1411 drivers/gpu/drm/amd/amdgpu/vcn_v1_0.c 	struct amdgpu_device *adev = ring->adev;
ring             1423 drivers/gpu/drm/amd/amdgpu/vcn_v1_0.c static void vcn_v1_0_dec_ring_set_wptr(struct amdgpu_ring *ring)
ring             1425 drivers/gpu/drm/amd/amdgpu/vcn_v1_0.c 	struct amdgpu_device *adev = ring->adev;
ring             1429 drivers/gpu/drm/amd/amdgpu/vcn_v1_0.c 			lower_32_bits(ring->wptr) | 0x80000000);
ring             1431 drivers/gpu/drm/amd/amdgpu/vcn_v1_0.c 	WREG32_SOC15(UVD, 0, mmUVD_RBC_RB_WPTR, lower_32_bits(ring->wptr));
ring             1441 drivers/gpu/drm/amd/amdgpu/vcn_v1_0.c static void vcn_v1_0_dec_ring_insert_start(struct amdgpu_ring *ring)
ring             1443 drivers/gpu/drm/amd/amdgpu/vcn_v1_0.c 	struct amdgpu_device *adev = ring->adev;
ring             1445 drivers/gpu/drm/amd/amdgpu/vcn_v1_0.c 	amdgpu_ring_write(ring,
ring             1447 drivers/gpu/drm/amd/amdgpu/vcn_v1_0.c 	amdgpu_ring_write(ring, 0);
ring             1448 drivers/gpu/drm/amd/amdgpu/vcn_v1_0.c 	amdgpu_ring_write(ring,
ring             1450 drivers/gpu/drm/amd/amdgpu/vcn_v1_0.c 	amdgpu_ring_write(ring, VCN_DEC_CMD_PACKET_START << 1);
ring             1460 drivers/gpu/drm/amd/amdgpu/vcn_v1_0.c static void vcn_v1_0_dec_ring_insert_end(struct amdgpu_ring *ring)
ring             1462 drivers/gpu/drm/amd/amdgpu/vcn_v1_0.c 	struct amdgpu_device *adev = ring->adev;
ring             1464 drivers/gpu/drm/amd/amdgpu/vcn_v1_0.c 	amdgpu_ring_write(ring,
ring             1466 drivers/gpu/drm/amd/amdgpu/vcn_v1_0.c 	amdgpu_ring_write(ring, VCN_DEC_CMD_PACKET_END << 1);
ring             1477 drivers/gpu/drm/amd/amdgpu/vcn_v1_0.c static void vcn_v1_0_dec_ring_emit_fence(struct amdgpu_ring *ring, u64 addr, u64 seq,
ring             1480 drivers/gpu/drm/amd/amdgpu/vcn_v1_0.c 	struct amdgpu_device *adev = ring->adev;
ring             1484 drivers/gpu/drm/amd/amdgpu/vcn_v1_0.c 	amdgpu_ring_write(ring,
ring             1486 drivers/gpu/drm/amd/amdgpu/vcn_v1_0.c 	amdgpu_ring_write(ring, seq);
ring             1487 drivers/gpu/drm/amd/amdgpu/vcn_v1_0.c 	amdgpu_ring_write(ring,
ring             1489 drivers/gpu/drm/amd/amdgpu/vcn_v1_0.c 	amdgpu_ring_write(ring, addr & 0xffffffff);
ring             1490 drivers/gpu/drm/amd/amdgpu/vcn_v1_0.c 	amdgpu_ring_write(ring,
ring             1492 drivers/gpu/drm/amd/amdgpu/vcn_v1_0.c 	amdgpu_ring_write(ring, upper_32_bits(addr) & 0xff);
ring             1493 drivers/gpu/drm/amd/amdgpu/vcn_v1_0.c 	amdgpu_ring_write(ring,
ring             1495 drivers/gpu/drm/amd/amdgpu/vcn_v1_0.c 	amdgpu_ring_write(ring, VCN_DEC_CMD_FENCE << 1);
ring             1497 drivers/gpu/drm/amd/amdgpu/vcn_v1_0.c 	amdgpu_ring_write(ring,
ring             1499 drivers/gpu/drm/amd/amdgpu/vcn_v1_0.c 	amdgpu_ring_write(ring, 0);
ring             1500 drivers/gpu/drm/amd/amdgpu/vcn_v1_0.c 	amdgpu_ring_write(ring,
ring             1502 drivers/gpu/drm/amd/amdgpu/vcn_v1_0.c 	amdgpu_ring_write(ring, 0);
ring             1503 drivers/gpu/drm/amd/amdgpu/vcn_v1_0.c 	amdgpu_ring_write(ring,
ring             1505 drivers/gpu/drm/amd/amdgpu/vcn_v1_0.c 	amdgpu_ring_write(ring, VCN_DEC_CMD_TRAP << 1);
ring             1516 drivers/gpu/drm/amd/amdgpu/vcn_v1_0.c static void vcn_v1_0_dec_ring_emit_ib(struct amdgpu_ring *ring,
ring             1521 drivers/gpu/drm/amd/amdgpu/vcn_v1_0.c 	struct amdgpu_device *adev = ring->adev;
ring             1524 drivers/gpu/drm/amd/amdgpu/vcn_v1_0.c 	amdgpu_ring_write(ring,
ring             1526 drivers/gpu/drm/amd/amdgpu/vcn_v1_0.c 	amdgpu_ring_write(ring, vmid);
ring             1528 drivers/gpu/drm/amd/amdgpu/vcn_v1_0.c 	amdgpu_ring_write(ring,
ring             1530 drivers/gpu/drm/amd/amdgpu/vcn_v1_0.c 	amdgpu_ring_write(ring, lower_32_bits(ib->gpu_addr));
ring             1531 drivers/gpu/drm/amd/amdgpu/vcn_v1_0.c 	amdgpu_ring_write(ring,
ring             1533 drivers/gpu/drm/amd/amdgpu/vcn_v1_0.c 	amdgpu_ring_write(ring, upper_32_bits(ib->gpu_addr));
ring             1534 drivers/gpu/drm/amd/amdgpu/vcn_v1_0.c 	amdgpu_ring_write(ring,
ring             1536 drivers/gpu/drm/amd/amdgpu/vcn_v1_0.c 	amdgpu_ring_write(ring, ib->length_dw);
ring             1539 drivers/gpu/drm/amd/amdgpu/vcn_v1_0.c static void vcn_v1_0_dec_ring_emit_reg_wait(struct amdgpu_ring *ring,
ring             1543 drivers/gpu/drm/amd/amdgpu/vcn_v1_0.c 	struct amdgpu_device *adev = ring->adev;
ring             1545 drivers/gpu/drm/amd/amdgpu/vcn_v1_0.c 	amdgpu_ring_write(ring,
ring             1547 drivers/gpu/drm/amd/amdgpu/vcn_v1_0.c 	amdgpu_ring_write(ring, reg << 2);
ring             1548 drivers/gpu/drm/amd/amdgpu/vcn_v1_0.c 	amdgpu_ring_write(ring,
ring             1550 drivers/gpu/drm/amd/amdgpu/vcn_v1_0.c 	amdgpu_ring_write(ring, val);
ring             1551 drivers/gpu/drm/amd/amdgpu/vcn_v1_0.c 	amdgpu_ring_write(ring,
ring             1553 drivers/gpu/drm/amd/amdgpu/vcn_v1_0.c 	amdgpu_ring_write(ring, mask);
ring             1554 drivers/gpu/drm/amd/amdgpu/vcn_v1_0.c 	amdgpu_ring_write(ring,
ring             1556 drivers/gpu/drm/amd/amdgpu/vcn_v1_0.c 	amdgpu_ring_write(ring, VCN_DEC_CMD_REG_READ_COND_WAIT << 1);
ring             1559 drivers/gpu/drm/amd/amdgpu/vcn_v1_0.c static void vcn_v1_0_dec_ring_emit_vm_flush(struct amdgpu_ring *ring,
ring             1562 drivers/gpu/drm/amd/amdgpu/vcn_v1_0.c 	struct amdgpu_vmhub *hub = &ring->adev->vmhub[ring->funcs->vmhub];
ring             1565 drivers/gpu/drm/amd/amdgpu/vcn_v1_0.c 	pd_addr = amdgpu_gmc_emit_flush_gpu_tlb(ring, vmid, pd_addr);
ring             1571 drivers/gpu/drm/amd/amdgpu/vcn_v1_0.c 	vcn_v1_0_dec_ring_emit_reg_wait(ring, data0, data1, mask);
ring             1574 drivers/gpu/drm/amd/amdgpu/vcn_v1_0.c static void vcn_v1_0_dec_ring_emit_wreg(struct amdgpu_ring *ring,
ring             1577 drivers/gpu/drm/amd/amdgpu/vcn_v1_0.c 	struct amdgpu_device *adev = ring->adev;
ring             1579 drivers/gpu/drm/amd/amdgpu/vcn_v1_0.c 	amdgpu_ring_write(ring,
ring             1581 drivers/gpu/drm/amd/amdgpu/vcn_v1_0.c 	amdgpu_ring_write(ring, reg << 2);
ring             1582 drivers/gpu/drm/amd/amdgpu/vcn_v1_0.c 	amdgpu_ring_write(ring,
ring             1584 drivers/gpu/drm/amd/amdgpu/vcn_v1_0.c 	amdgpu_ring_write(ring, val);
ring             1585 drivers/gpu/drm/amd/amdgpu/vcn_v1_0.c 	amdgpu_ring_write(ring,
ring             1587 drivers/gpu/drm/amd/amdgpu/vcn_v1_0.c 	amdgpu_ring_write(ring, VCN_DEC_CMD_WRITE_REG << 1);
ring             1597 drivers/gpu/drm/amd/amdgpu/vcn_v1_0.c static uint64_t vcn_v1_0_enc_ring_get_rptr(struct amdgpu_ring *ring)
ring             1599 drivers/gpu/drm/amd/amdgpu/vcn_v1_0.c 	struct amdgpu_device *adev = ring->adev;
ring             1601 drivers/gpu/drm/amd/amdgpu/vcn_v1_0.c 	if (ring == &adev->vcn.inst->ring_enc[0])
ring             1614 drivers/gpu/drm/amd/amdgpu/vcn_v1_0.c static uint64_t vcn_v1_0_enc_ring_get_wptr(struct amdgpu_ring *ring)
ring             1616 drivers/gpu/drm/amd/amdgpu/vcn_v1_0.c 	struct amdgpu_device *adev = ring->adev;
ring             1618 drivers/gpu/drm/amd/amdgpu/vcn_v1_0.c 	if (ring == &adev->vcn.inst->ring_enc[0])
ring             1631 drivers/gpu/drm/amd/amdgpu/vcn_v1_0.c static void vcn_v1_0_enc_ring_set_wptr(struct amdgpu_ring *ring)
ring             1633 drivers/gpu/drm/amd/amdgpu/vcn_v1_0.c 	struct amdgpu_device *adev = ring->adev;
ring             1635 drivers/gpu/drm/amd/amdgpu/vcn_v1_0.c 	if (ring == &adev->vcn.inst->ring_enc[0])
ring             1637 drivers/gpu/drm/amd/amdgpu/vcn_v1_0.c 			lower_32_bits(ring->wptr));
ring             1640 drivers/gpu/drm/amd/amdgpu/vcn_v1_0.c 			lower_32_bits(ring->wptr));
ring             1651 drivers/gpu/drm/amd/amdgpu/vcn_v1_0.c static void vcn_v1_0_enc_ring_emit_fence(struct amdgpu_ring *ring, u64 addr,
ring             1656 drivers/gpu/drm/amd/amdgpu/vcn_v1_0.c 	amdgpu_ring_write(ring, VCN_ENC_CMD_FENCE);
ring             1657 drivers/gpu/drm/amd/amdgpu/vcn_v1_0.c 	amdgpu_ring_write(ring, addr);
ring             1658 drivers/gpu/drm/amd/amdgpu/vcn_v1_0.c 	amdgpu_ring_write(ring, upper_32_bits(addr));
ring             1659 drivers/gpu/drm/amd/amdgpu/vcn_v1_0.c 	amdgpu_ring_write(ring, seq);
ring             1660 drivers/gpu/drm/amd/amdgpu/vcn_v1_0.c 	amdgpu_ring_write(ring, VCN_ENC_CMD_TRAP);
ring             1663 drivers/gpu/drm/amd/amdgpu/vcn_v1_0.c static void vcn_v1_0_enc_ring_insert_end(struct amdgpu_ring *ring)
ring             1665 drivers/gpu/drm/amd/amdgpu/vcn_v1_0.c 	amdgpu_ring_write(ring, VCN_ENC_CMD_END);
ring             1676 drivers/gpu/drm/amd/amdgpu/vcn_v1_0.c static void vcn_v1_0_enc_ring_emit_ib(struct amdgpu_ring *ring,
ring             1683 drivers/gpu/drm/amd/amdgpu/vcn_v1_0.c 	amdgpu_ring_write(ring, VCN_ENC_CMD_IB);
ring             1684 drivers/gpu/drm/amd/amdgpu/vcn_v1_0.c 	amdgpu_ring_write(ring, vmid);
ring             1685 drivers/gpu/drm/amd/amdgpu/vcn_v1_0.c 	amdgpu_ring_write(ring, lower_32_bits(ib->gpu_addr));
ring             1686 drivers/gpu/drm/amd/amdgpu/vcn_v1_0.c 	amdgpu_ring_write(ring, upper_32_bits(ib->gpu_addr));
ring             1687 drivers/gpu/drm/amd/amdgpu/vcn_v1_0.c 	amdgpu_ring_write(ring, ib->length_dw);
ring             1690 drivers/gpu/drm/amd/amdgpu/vcn_v1_0.c static void vcn_v1_0_enc_ring_emit_reg_wait(struct amdgpu_ring *ring,
ring             1694 drivers/gpu/drm/amd/amdgpu/vcn_v1_0.c 	amdgpu_ring_write(ring, VCN_ENC_CMD_REG_WAIT);
ring             1695 drivers/gpu/drm/amd/amdgpu/vcn_v1_0.c 	amdgpu_ring_write(ring, reg << 2);
ring             1696 drivers/gpu/drm/amd/amdgpu/vcn_v1_0.c 	amdgpu_ring_write(ring, mask);
ring             1697 drivers/gpu/drm/amd/amdgpu/vcn_v1_0.c 	amdgpu_ring_write(ring, val);
ring             1700 drivers/gpu/drm/amd/amdgpu/vcn_v1_0.c static void vcn_v1_0_enc_ring_emit_vm_flush(struct amdgpu_ring *ring,
ring             1703 drivers/gpu/drm/amd/amdgpu/vcn_v1_0.c 	struct amdgpu_vmhub *hub = &ring->adev->vmhub[ring->funcs->vmhub];
ring             1705 drivers/gpu/drm/amd/amdgpu/vcn_v1_0.c 	pd_addr = amdgpu_gmc_emit_flush_gpu_tlb(ring, vmid, pd_addr);
ring             1708 drivers/gpu/drm/amd/amdgpu/vcn_v1_0.c 	vcn_v1_0_enc_ring_emit_reg_wait(ring, hub->ctx0_ptb_addr_lo32 + vmid * 2,
ring             1712 drivers/gpu/drm/amd/amdgpu/vcn_v1_0.c static void vcn_v1_0_enc_ring_emit_wreg(struct amdgpu_ring *ring,
ring             1715 drivers/gpu/drm/amd/amdgpu/vcn_v1_0.c 	amdgpu_ring_write(ring, VCN_ENC_CMD_REG_WRITE);
ring             1716 drivers/gpu/drm/amd/amdgpu/vcn_v1_0.c 	amdgpu_ring_write(ring,	reg << 2);
ring             1717 drivers/gpu/drm/amd/amdgpu/vcn_v1_0.c 	amdgpu_ring_write(ring, val);
ring             1728 drivers/gpu/drm/amd/amdgpu/vcn_v1_0.c static uint64_t vcn_v1_0_jpeg_ring_get_rptr(struct amdgpu_ring *ring)
ring             1730 drivers/gpu/drm/amd/amdgpu/vcn_v1_0.c 	struct amdgpu_device *adev = ring->adev;
ring             1742 drivers/gpu/drm/amd/amdgpu/vcn_v1_0.c static uint64_t vcn_v1_0_jpeg_ring_get_wptr(struct amdgpu_ring *ring)
ring             1744 drivers/gpu/drm/amd/amdgpu/vcn_v1_0.c 	struct amdgpu_device *adev = ring->adev;
ring             1756 drivers/gpu/drm/amd/amdgpu/vcn_v1_0.c static void vcn_v1_0_jpeg_ring_set_wptr(struct amdgpu_ring *ring)
ring             1758 drivers/gpu/drm/amd/amdgpu/vcn_v1_0.c 	struct amdgpu_device *adev = ring->adev;
ring             1760 drivers/gpu/drm/amd/amdgpu/vcn_v1_0.c 	WREG32_SOC15(UVD, 0, mmUVD_JRBC_RB_WPTR, lower_32_bits(ring->wptr));
ring             1770 drivers/gpu/drm/amd/amdgpu/vcn_v1_0.c static void vcn_v1_0_jpeg_ring_insert_start(struct amdgpu_ring *ring)
ring             1772 drivers/gpu/drm/amd/amdgpu/vcn_v1_0.c 	struct amdgpu_device *adev = ring->adev;
ring             1774 drivers/gpu/drm/amd/amdgpu/vcn_v1_0.c 	amdgpu_ring_write(ring,
ring             1776 drivers/gpu/drm/amd/amdgpu/vcn_v1_0.c 	amdgpu_ring_write(ring, 0x68e04);
ring             1778 drivers/gpu/drm/amd/amdgpu/vcn_v1_0.c 	amdgpu_ring_write(ring, PACKETJ(0, 0, 0, PACKETJ_TYPE0));
ring             1779 drivers/gpu/drm/amd/amdgpu/vcn_v1_0.c 	amdgpu_ring_write(ring, 0x80010000);
ring             1789 drivers/gpu/drm/amd/amdgpu/vcn_v1_0.c static void vcn_v1_0_jpeg_ring_insert_end(struct amdgpu_ring *ring)
ring             1791 drivers/gpu/drm/amd/amdgpu/vcn_v1_0.c 	struct amdgpu_device *adev = ring->adev;
ring             1793 drivers/gpu/drm/amd/amdgpu/vcn_v1_0.c 	amdgpu_ring_write(ring,
ring             1795 drivers/gpu/drm/amd/amdgpu/vcn_v1_0.c 	amdgpu_ring_write(ring, 0x68e04);
ring             1797 drivers/gpu/drm/amd/amdgpu/vcn_v1_0.c 	amdgpu_ring_write(ring, PACKETJ(0, 0, 0, PACKETJ_TYPE0));
ring             1798 drivers/gpu/drm/amd/amdgpu/vcn_v1_0.c 	amdgpu_ring_write(ring, 0x00010000);
ring             1809 drivers/gpu/drm/amd/amdgpu/vcn_v1_0.c static void vcn_v1_0_jpeg_ring_emit_fence(struct amdgpu_ring *ring, u64 addr, u64 seq,
ring             1812 drivers/gpu/drm/amd/amdgpu/vcn_v1_0.c 	struct amdgpu_device *adev = ring->adev;
ring             1816 drivers/gpu/drm/amd/amdgpu/vcn_v1_0.c 	amdgpu_ring_write(ring,
ring             1818 drivers/gpu/drm/amd/amdgpu/vcn_v1_0.c 	amdgpu_ring_write(ring, seq);
ring             1820 drivers/gpu/drm/amd/amdgpu/vcn_v1_0.c 	amdgpu_ring_write(ring,
ring             1822 drivers/gpu/drm/amd/amdgpu/vcn_v1_0.c 	amdgpu_ring_write(ring, seq);
ring             1824 drivers/gpu/drm/amd/amdgpu/vcn_v1_0.c 	amdgpu_ring_write(ring,
ring             1826 drivers/gpu/drm/amd/amdgpu/vcn_v1_0.c 	amdgpu_ring_write(ring, lower_32_bits(addr));
ring             1828 drivers/gpu/drm/amd/amdgpu/vcn_v1_0.c 	amdgpu_ring_write(ring,
ring             1830 drivers/gpu/drm/amd/amdgpu/vcn_v1_0.c 	amdgpu_ring_write(ring, upper_32_bits(addr));
ring             1832 drivers/gpu/drm/amd/amdgpu/vcn_v1_0.c 	amdgpu_ring_write(ring,
ring             1834 drivers/gpu/drm/amd/amdgpu/vcn_v1_0.c 	amdgpu_ring_write(ring, 0x8);
ring             1836 drivers/gpu/drm/amd/amdgpu/vcn_v1_0.c 	amdgpu_ring_write(ring,
ring             1838 drivers/gpu/drm/amd/amdgpu/vcn_v1_0.c 	amdgpu_ring_write(ring, 0);
ring             1840 drivers/gpu/drm/amd/amdgpu/vcn_v1_0.c 	amdgpu_ring_write(ring,
ring             1842 drivers/gpu/drm/amd/amdgpu/vcn_v1_0.c 	amdgpu_ring_write(ring, 0x01400200);
ring             1844 drivers/gpu/drm/amd/amdgpu/vcn_v1_0.c 	amdgpu_ring_write(ring,
ring             1846 drivers/gpu/drm/amd/amdgpu/vcn_v1_0.c 	amdgpu_ring_write(ring, seq);
ring             1848 drivers/gpu/drm/amd/amdgpu/vcn_v1_0.c 	amdgpu_ring_write(ring,
ring             1850 drivers/gpu/drm/amd/amdgpu/vcn_v1_0.c 	amdgpu_ring_write(ring, lower_32_bits(addr));
ring             1852 drivers/gpu/drm/amd/amdgpu/vcn_v1_0.c 	amdgpu_ring_write(ring,
ring             1854 drivers/gpu/drm/amd/amdgpu/vcn_v1_0.c 	amdgpu_ring_write(ring, upper_32_bits(addr));
ring             1856 drivers/gpu/drm/amd/amdgpu/vcn_v1_0.c 	amdgpu_ring_write(ring,
ring             1858 drivers/gpu/drm/amd/amdgpu/vcn_v1_0.c 	amdgpu_ring_write(ring, 0xffffffff);
ring             1860 drivers/gpu/drm/amd/amdgpu/vcn_v1_0.c 	amdgpu_ring_write(ring,
ring             1862 drivers/gpu/drm/amd/amdgpu/vcn_v1_0.c 	amdgpu_ring_write(ring, 0x3fbc);
ring             1864 drivers/gpu/drm/amd/amdgpu/vcn_v1_0.c 	amdgpu_ring_write(ring,
ring             1866 drivers/gpu/drm/amd/amdgpu/vcn_v1_0.c 	amdgpu_ring_write(ring, 0x1);
ring             1869 drivers/gpu/drm/amd/amdgpu/vcn_v1_0.c 	amdgpu_ring_write(ring, PACKETJ(0, 0, 0, PACKETJ_TYPE7));
ring             1870 drivers/gpu/drm/amd/amdgpu/vcn_v1_0.c 	amdgpu_ring_write(ring, 0);
ring             1881 drivers/gpu/drm/amd/amdgpu/vcn_v1_0.c static void vcn_v1_0_jpeg_ring_emit_ib(struct amdgpu_ring *ring,
ring             1886 drivers/gpu/drm/amd/amdgpu/vcn_v1_0.c 	struct amdgpu_device *adev = ring->adev;
ring             1889 drivers/gpu/drm/amd/amdgpu/vcn_v1_0.c 	amdgpu_ring_write(ring,
ring             1891 drivers/gpu/drm/amd/amdgpu/vcn_v1_0.c 	amdgpu_ring_write(ring, (vmid | (vmid << 4)));
ring             1893 drivers/gpu/drm/amd/amdgpu/vcn_v1_0.c 	amdgpu_ring_write(ring,
ring             1895 drivers/gpu/drm/amd/amdgpu/vcn_v1_0.c 	amdgpu_ring_write(ring, (vmid | (vmid << 4)));
ring             1897 drivers/gpu/drm/amd/amdgpu/vcn_v1_0.c 	amdgpu_ring_write(ring,
ring             1899 drivers/gpu/drm/amd/amdgpu/vcn_v1_0.c 	amdgpu_ring_write(ring, lower_32_bits(ib->gpu_addr));
ring             1901 drivers/gpu/drm/amd/amdgpu/vcn_v1_0.c 	amdgpu_ring_write(ring,
ring             1903 drivers/gpu/drm/amd/amdgpu/vcn_v1_0.c 	amdgpu_ring_write(ring, upper_32_bits(ib->gpu_addr));
ring             1905 drivers/gpu/drm/amd/amdgpu/vcn_v1_0.c 	amdgpu_ring_write(ring,
ring             1907 drivers/gpu/drm/amd/amdgpu/vcn_v1_0.c 	amdgpu_ring_write(ring, ib->length_dw);
ring             1909 drivers/gpu/drm/amd/amdgpu/vcn_v1_0.c 	amdgpu_ring_write(ring,
ring             1911 drivers/gpu/drm/amd/amdgpu/vcn_v1_0.c 	amdgpu_ring_write(ring, lower_32_bits(ring->gpu_addr));
ring             1913 drivers/gpu/drm/amd/amdgpu/vcn_v1_0.c 	amdgpu_ring_write(ring,
ring             1915 drivers/gpu/drm/amd/amdgpu/vcn_v1_0.c 	amdgpu_ring_write(ring, upper_32_bits(ring->gpu_addr));
ring             1917 drivers/gpu/drm/amd/amdgpu/vcn_v1_0.c 	amdgpu_ring_write(ring,
ring             1919 drivers/gpu/drm/amd/amdgpu/vcn_v1_0.c 	amdgpu_ring_write(ring, 0);
ring             1921 drivers/gpu/drm/amd/amdgpu/vcn_v1_0.c 	amdgpu_ring_write(ring,
ring             1923 drivers/gpu/drm/amd/amdgpu/vcn_v1_0.c 	amdgpu_ring_write(ring, 0x01400200);
ring             1925 drivers/gpu/drm/amd/amdgpu/vcn_v1_0.c 	amdgpu_ring_write(ring,
ring             1927 drivers/gpu/drm/amd/amdgpu/vcn_v1_0.c 	amdgpu_ring_write(ring, 0x2);
ring             1929 drivers/gpu/drm/amd/amdgpu/vcn_v1_0.c 	amdgpu_ring_write(ring,
ring             1931 drivers/gpu/drm/amd/amdgpu/vcn_v1_0.c 	amdgpu_ring_write(ring, 0x2);
ring             1934 drivers/gpu/drm/amd/amdgpu/vcn_v1_0.c static void vcn_v1_0_jpeg_ring_emit_reg_wait(struct amdgpu_ring *ring,
ring             1938 drivers/gpu/drm/amd/amdgpu/vcn_v1_0.c 	struct amdgpu_device *adev = ring->adev;
ring             1941 drivers/gpu/drm/amd/amdgpu/vcn_v1_0.c 	amdgpu_ring_write(ring,
ring             1943 drivers/gpu/drm/amd/amdgpu/vcn_v1_0.c 	amdgpu_ring_write(ring, 0x01400200);
ring             1945 drivers/gpu/drm/amd/amdgpu/vcn_v1_0.c 	amdgpu_ring_write(ring,
ring             1947 drivers/gpu/drm/amd/amdgpu/vcn_v1_0.c 	amdgpu_ring_write(ring, val);
ring             1949 drivers/gpu/drm/amd/amdgpu/vcn_v1_0.c 	amdgpu_ring_write(ring,
ring             1953 drivers/gpu/drm/amd/amdgpu/vcn_v1_0.c 		amdgpu_ring_write(ring, 0);
ring             1954 drivers/gpu/drm/amd/amdgpu/vcn_v1_0.c 		amdgpu_ring_write(ring,
ring             1957 drivers/gpu/drm/amd/amdgpu/vcn_v1_0.c 		amdgpu_ring_write(ring, reg_offset);
ring             1958 drivers/gpu/drm/amd/amdgpu/vcn_v1_0.c 		amdgpu_ring_write(ring,
ring             1961 drivers/gpu/drm/amd/amdgpu/vcn_v1_0.c 	amdgpu_ring_write(ring, mask);
ring             1964 drivers/gpu/drm/amd/amdgpu/vcn_v1_0.c static void vcn_v1_0_jpeg_ring_emit_vm_flush(struct amdgpu_ring *ring,
ring             1967 drivers/gpu/drm/amd/amdgpu/vcn_v1_0.c 	struct amdgpu_vmhub *hub = &ring->adev->vmhub[ring->funcs->vmhub];
ring             1970 drivers/gpu/drm/amd/amdgpu/vcn_v1_0.c 	pd_addr = amdgpu_gmc_emit_flush_gpu_tlb(ring, vmid, pd_addr);
ring             1976 drivers/gpu/drm/amd/amdgpu/vcn_v1_0.c 	vcn_v1_0_jpeg_ring_emit_reg_wait(ring, data0, data1, mask);
ring             1979 drivers/gpu/drm/amd/amdgpu/vcn_v1_0.c static void vcn_v1_0_jpeg_ring_emit_wreg(struct amdgpu_ring *ring,
ring             1982 drivers/gpu/drm/amd/amdgpu/vcn_v1_0.c 	struct amdgpu_device *adev = ring->adev;
ring             1985 drivers/gpu/drm/amd/amdgpu/vcn_v1_0.c 	amdgpu_ring_write(ring,
ring             1989 drivers/gpu/drm/amd/amdgpu/vcn_v1_0.c 		amdgpu_ring_write(ring, 0);
ring             1990 drivers/gpu/drm/amd/amdgpu/vcn_v1_0.c 		amdgpu_ring_write(ring,
ring             1993 drivers/gpu/drm/amd/amdgpu/vcn_v1_0.c 		amdgpu_ring_write(ring, reg_offset);
ring             1994 drivers/gpu/drm/amd/amdgpu/vcn_v1_0.c 		amdgpu_ring_write(ring,
ring             1997 drivers/gpu/drm/amd/amdgpu/vcn_v1_0.c 	amdgpu_ring_write(ring, val);
ring             2000 drivers/gpu/drm/amd/amdgpu/vcn_v1_0.c static void vcn_v1_0_jpeg_ring_nop(struct amdgpu_ring *ring, uint32_t count)
ring             2004 drivers/gpu/drm/amd/amdgpu/vcn_v1_0.c 	WARN_ON(ring->wptr % 2 || count % 2);
ring             2007 drivers/gpu/drm/amd/amdgpu/vcn_v1_0.c 		amdgpu_ring_write(ring, PACKETJ(0, 0, 0, PACKETJ_TYPE6));
ring             2008 drivers/gpu/drm/amd/amdgpu/vcn_v1_0.c 		amdgpu_ring_write(ring, 0);
ring             2012 drivers/gpu/drm/amd/amdgpu/vcn_v1_0.c static void vcn_v1_0_jpeg_ring_patch_wreg(struct amdgpu_ring *ring, uint32_t *ptr, uint32_t reg_offset, uint32_t val)
ring             2014 drivers/gpu/drm/amd/amdgpu/vcn_v1_0.c 	struct amdgpu_device *adev = ring->adev;
ring             2015 drivers/gpu/drm/amd/amdgpu/vcn_v1_0.c 	ring->ring[(*ptr)++] = PACKETJ(SOC15_REG_OFFSET(UVD, 0, mmUVD_JRBC_EXTERNAL_REG_BASE), 0, 0, PACKETJ_TYPE0);
ring             2018 drivers/gpu/drm/amd/amdgpu/vcn_v1_0.c 		ring->ring[(*ptr)++] = 0;
ring             2019 drivers/gpu/drm/amd/amdgpu/vcn_v1_0.c 		ring->ring[(*ptr)++] = PACKETJ((reg_offset >> 2), 0, 0, PACKETJ_TYPE0);
ring             2021 drivers/gpu/drm/amd/amdgpu/vcn_v1_0.c 		ring->ring[(*ptr)++] = reg_offset;
ring             2022 drivers/gpu/drm/amd/amdgpu/vcn_v1_0.c 		ring->ring[(*ptr)++] = PACKETJ(0, 0, 0, PACKETJ_TYPE0);
ring             2024 drivers/gpu/drm/amd/amdgpu/vcn_v1_0.c 	ring->ring[(*ptr)++] = val;
ring             2027 drivers/gpu/drm/amd/amdgpu/vcn_v1_0.c static void vcn_v1_0_jpeg_ring_set_patch_ring(struct amdgpu_ring *ring, uint32_t ptr)
ring             2029 drivers/gpu/drm/amd/amdgpu/vcn_v1_0.c 	struct amdgpu_device *adev = ring->adev;
ring             2036 drivers/gpu/drm/amd/amdgpu/vcn_v1_0.c 	val = lower_32_bits(ring->gpu_addr);
ring             2037 drivers/gpu/drm/amd/amdgpu/vcn_v1_0.c 	vcn_v1_0_jpeg_ring_patch_wreg(ring, &ptr, reg_offset, val);
ring             2042 drivers/gpu/drm/amd/amdgpu/vcn_v1_0.c 	val = upper_32_bits(ring->gpu_addr);
ring             2043 drivers/gpu/drm/amd/amdgpu/vcn_v1_0.c 	vcn_v1_0_jpeg_ring_patch_wreg(ring, &ptr, reg_offset, val);
ring             2047 drivers/gpu/drm/amd/amdgpu/vcn_v1_0.c 		ring->ring[ptr++] = PACKETJ(0, 0, 0, PACKETJ_TYPE2);
ring             2048 drivers/gpu/drm/amd/amdgpu/vcn_v1_0.c 		ring->ring[ptr++] = 0;
ring             2055 drivers/gpu/drm/amd/amdgpu/vcn_v1_0.c 	vcn_v1_0_jpeg_ring_patch_wreg(ring, &ptr, reg_offset, val);
ring             2061 drivers/gpu/drm/amd/amdgpu/vcn_v1_0.c 	vcn_v1_0_jpeg_ring_patch_wreg(ring, &ptr, reg_offset, val);
ring             2069 drivers/gpu/drm/amd/amdgpu/vcn_v1_0.c 	ring->ring[ptr++] = PACKETJ(SOC15_REG_OFFSET(UVD, 0, mmUVD_JRBC_RB_COND_RD_TIMER), 0, 0, PACKETJ_TYPE0);
ring             2070 drivers/gpu/drm/amd/amdgpu/vcn_v1_0.c 	ring->ring[ptr++] = 0x01400200;
ring             2071 drivers/gpu/drm/amd/amdgpu/vcn_v1_0.c 	ring->ring[ptr++] = PACKETJ(SOC15_REG_OFFSET(UVD, 0, mmUVD_JRBC_RB_REF_DATA), 0, 0, PACKETJ_TYPE0);
ring             2072 drivers/gpu/drm/amd/amdgpu/vcn_v1_0.c 	ring->ring[ptr++] = val;
ring             2073 drivers/gpu/drm/amd/amdgpu/vcn_v1_0.c 	ring->ring[ptr++] = PACKETJ(SOC15_REG_OFFSET(UVD, 0, mmUVD_JRBC_EXTERNAL_REG_BASE), 0, 0, PACKETJ_TYPE0);
ring             2076 drivers/gpu/drm/amd/amdgpu/vcn_v1_0.c 		ring->ring[ptr++] = 0;
ring             2077 drivers/gpu/drm/amd/amdgpu/vcn_v1_0.c 		ring->ring[ptr++] = PACKETJ((reg_offset >> 2), 0, 0, PACKETJ_TYPE3);
ring             2079 drivers/gpu/drm/amd/amdgpu/vcn_v1_0.c 		ring->ring[ptr++] = reg_offset;
ring             2080 drivers/gpu/drm/amd/amdgpu/vcn_v1_0.c 		ring->ring[ptr++] = PACKETJ(0, 0, 0, PACKETJ_TYPE3);
ring             2082 drivers/gpu/drm/amd/amdgpu/vcn_v1_0.c 	ring->ring[ptr++] = mask;
ring             2086 drivers/gpu/drm/amd/amdgpu/vcn_v1_0.c 		ring->ring[ptr++] = PACKETJ(0, 0, 0, PACKETJ_TYPE6);
ring             2087 drivers/gpu/drm/amd/amdgpu/vcn_v1_0.c 		ring->ring[ptr++] = 0;
ring             2094 drivers/gpu/drm/amd/amdgpu/vcn_v1_0.c 	vcn_v1_0_jpeg_ring_patch_wreg(ring, &ptr, reg_offset, val);
ring             2100 drivers/gpu/drm/amd/amdgpu/vcn_v1_0.c 	vcn_v1_0_jpeg_ring_patch_wreg(ring, &ptr, reg_offset, val);
ring             2139 drivers/gpu/drm/amd/amdgpu/vcn_v1_0.c static void vcn_v1_0_dec_ring_insert_nop(struct amdgpu_ring *ring, uint32_t count)
ring             2141 drivers/gpu/drm/amd/amdgpu/vcn_v1_0.c 	struct amdgpu_device *adev = ring->adev;
ring             2144 drivers/gpu/drm/amd/amdgpu/vcn_v1_0.c 	WARN_ON(ring->wptr % 2 || count % 2);
ring             2147 drivers/gpu/drm/amd/amdgpu/vcn_v1_0.c 		amdgpu_ring_write(ring, PACKET0(SOC15_REG_OFFSET(UVD, 0, mmUVD_NO_OP), 0));
ring             2148 drivers/gpu/drm/amd/amdgpu/vcn_v1_0.c 		amdgpu_ring_write(ring, 0);
ring              115 drivers/gpu/drm/amd/amdgpu/vcn_v2_0.c 	struct amdgpu_ring *ring;
ring              159 drivers/gpu/drm/amd/amdgpu/vcn_v2_0.c 	ring = &adev->vcn.inst->ring_dec;
ring              161 drivers/gpu/drm/amd/amdgpu/vcn_v2_0.c 	ring->use_doorbell = true;
ring              162 drivers/gpu/drm/amd/amdgpu/vcn_v2_0.c 	ring->doorbell_index = adev->doorbell_index.vcn.vcn_ring0_1 << 1;
ring              164 drivers/gpu/drm/amd/amdgpu/vcn_v2_0.c 	sprintf(ring->name, "vcn_dec");
ring              165 drivers/gpu/drm/amd/amdgpu/vcn_v2_0.c 	r = amdgpu_ring_init(adev, ring, 512, &adev->vcn.inst->irq, 0);
ring              188 drivers/gpu/drm/amd/amdgpu/vcn_v2_0.c 		ring = &adev->vcn.inst->ring_enc[i];
ring              189 drivers/gpu/drm/amd/amdgpu/vcn_v2_0.c 		ring->use_doorbell = true;
ring              190 drivers/gpu/drm/amd/amdgpu/vcn_v2_0.c 		ring->doorbell_index = (adev->doorbell_index.vcn.vcn_ring0_1 << 1) + 2 + i;
ring              191 drivers/gpu/drm/amd/amdgpu/vcn_v2_0.c 		sprintf(ring->name, "vcn_enc%d", i);
ring              192 drivers/gpu/drm/amd/amdgpu/vcn_v2_0.c 		r = amdgpu_ring_init(adev, ring, 512, &adev->vcn.inst->irq, 0);
ring              197 drivers/gpu/drm/amd/amdgpu/vcn_v2_0.c 	ring = &adev->vcn.inst->ring_jpeg;
ring              198 drivers/gpu/drm/amd/amdgpu/vcn_v2_0.c 	ring->use_doorbell = true;
ring              199 drivers/gpu/drm/amd/amdgpu/vcn_v2_0.c 	ring->doorbell_index = (adev->doorbell_index.vcn.vcn_ring0_1 << 1) + 1;
ring              200 drivers/gpu/drm/amd/amdgpu/vcn_v2_0.c 	sprintf(ring->name, "vcn_jpeg");
ring              201 drivers/gpu/drm/amd/amdgpu/vcn_v2_0.c 	r = amdgpu_ring_init(adev, ring, 512, &adev->vcn.inst->irq, 0);
ring              244 drivers/gpu/drm/amd/amdgpu/vcn_v2_0.c 	struct amdgpu_ring *ring = &adev->vcn.inst->ring_dec;
ring              247 drivers/gpu/drm/amd/amdgpu/vcn_v2_0.c 	adev->nbio_funcs->vcn_doorbell_range(adev, ring->use_doorbell,
ring              248 drivers/gpu/drm/amd/amdgpu/vcn_v2_0.c 					     ring->doorbell_index, 0);
ring              250 drivers/gpu/drm/amd/amdgpu/vcn_v2_0.c 	ring->sched.ready = true;
ring              251 drivers/gpu/drm/amd/amdgpu/vcn_v2_0.c 	r = amdgpu_ring_test_ring(ring);
ring              253 drivers/gpu/drm/amd/amdgpu/vcn_v2_0.c 		ring->sched.ready = false;
ring              258 drivers/gpu/drm/amd/amdgpu/vcn_v2_0.c 		ring = &adev->vcn.inst->ring_enc[i];
ring              259 drivers/gpu/drm/amd/amdgpu/vcn_v2_0.c 		ring->sched.ready = true;
ring              260 drivers/gpu/drm/amd/amdgpu/vcn_v2_0.c 		r = amdgpu_ring_test_ring(ring);
ring              262 drivers/gpu/drm/amd/amdgpu/vcn_v2_0.c 			ring->sched.ready = false;
ring              267 drivers/gpu/drm/amd/amdgpu/vcn_v2_0.c 	ring = &adev->vcn.inst->ring_jpeg;
ring              268 drivers/gpu/drm/amd/amdgpu/vcn_v2_0.c 	ring->sched.ready = true;
ring              269 drivers/gpu/drm/amd/amdgpu/vcn_v2_0.c 	r = amdgpu_ring_test_ring(ring);
ring              271 drivers/gpu/drm/amd/amdgpu/vcn_v2_0.c 		ring->sched.ready = false;
ring              293 drivers/gpu/drm/amd/amdgpu/vcn_v2_0.c 	struct amdgpu_ring *ring = &adev->vcn.inst->ring_dec;
ring              301 drivers/gpu/drm/amd/amdgpu/vcn_v2_0.c 	ring->sched.ready = false;
ring              304 drivers/gpu/drm/amd/amdgpu/vcn_v2_0.c 		ring = &adev->vcn.inst->ring_enc[i];
ring              305 drivers/gpu/drm/amd/amdgpu/vcn_v2_0.c 		ring->sched.ready = false;
ring              308 drivers/gpu/drm/amd/amdgpu/vcn_v2_0.c 	ring = &adev->vcn.inst->ring_jpeg;
ring              309 drivers/gpu/drm/amd/amdgpu/vcn_v2_0.c 	ring->sched.ready = false;
ring              668 drivers/gpu/drm/amd/amdgpu/vcn_v2_0.c 	struct amdgpu_ring *ring = &adev->vcn.inst->ring_jpeg;
ring              716 drivers/gpu/drm/amd/amdgpu/vcn_v2_0.c 		lower_32_bits(ring->gpu_addr));
ring              718 drivers/gpu/drm/amd/amdgpu/vcn_v2_0.c 		upper_32_bits(ring->gpu_addr));
ring              722 drivers/gpu/drm/amd/amdgpu/vcn_v2_0.c 	WREG32_SOC15(UVD, 0, mmUVD_JRBC_RB_SIZE, ring->ring_size / 4);
ring              723 drivers/gpu/drm/amd/amdgpu/vcn_v2_0.c 	ring->wptr = RREG32_SOC15(UVD, 0, mmUVD_JRBC_RB_WPTR);
ring              930 drivers/gpu/drm/amd/amdgpu/vcn_v2_0.c 	struct amdgpu_ring *ring = &adev->vcn.inst->ring_dec;
ring             1021 drivers/gpu/drm/amd/amdgpu/vcn_v2_0.c 	rb_bufsz = order_base_2(ring->ring_size);
ring             1034 drivers/gpu/drm/amd/amdgpu/vcn_v2_0.c 		(upper_32_bits(ring->gpu_addr) >> 2));
ring             1038 drivers/gpu/drm/amd/amdgpu/vcn_v2_0.c 		lower_32_bits(ring->gpu_addr));
ring             1040 drivers/gpu/drm/amd/amdgpu/vcn_v2_0.c 		upper_32_bits(ring->gpu_addr));
ring             1047 drivers/gpu/drm/amd/amdgpu/vcn_v2_0.c 	ring->wptr = RREG32_SOC15(UVD, 0, mmUVD_RBC_RB_RPTR);
ring             1049 drivers/gpu/drm/amd/amdgpu/vcn_v2_0.c 		lower_32_bits(ring->wptr));
ring             1056 drivers/gpu/drm/amd/amdgpu/vcn_v2_0.c 	struct amdgpu_ring *ring = &adev->vcn.inst->ring_dec;
ring             1186 drivers/gpu/drm/amd/amdgpu/vcn_v2_0.c 	rb_bufsz = order_base_2(ring->ring_size);
ring             1196 drivers/gpu/drm/amd/amdgpu/vcn_v2_0.c 		lower_32_bits(ring->gpu_addr));
ring             1198 drivers/gpu/drm/amd/amdgpu/vcn_v2_0.c 		upper_32_bits(ring->gpu_addr));
ring             1203 drivers/gpu/drm/amd/amdgpu/vcn_v2_0.c 	ring->wptr = RREG32_SOC15(UVD, 0, mmUVD_RBC_RB_RPTR);
ring             1205 drivers/gpu/drm/amd/amdgpu/vcn_v2_0.c 			lower_32_bits(ring->wptr));
ring             1207 drivers/gpu/drm/amd/amdgpu/vcn_v2_0.c 	ring = &adev->vcn.inst->ring_enc[0];
ring             1208 drivers/gpu/drm/amd/amdgpu/vcn_v2_0.c 	WREG32_SOC15(UVD, 0, mmUVD_RB_RPTR, lower_32_bits(ring->wptr));
ring             1209 drivers/gpu/drm/amd/amdgpu/vcn_v2_0.c 	WREG32_SOC15(UVD, 0, mmUVD_RB_WPTR, lower_32_bits(ring->wptr));
ring             1210 drivers/gpu/drm/amd/amdgpu/vcn_v2_0.c 	WREG32_SOC15(UVD, 0, mmUVD_RB_BASE_LO, ring->gpu_addr);
ring             1211 drivers/gpu/drm/amd/amdgpu/vcn_v2_0.c 	WREG32_SOC15(UVD, 0, mmUVD_RB_BASE_HI, upper_32_bits(ring->gpu_addr));
ring             1212 drivers/gpu/drm/amd/amdgpu/vcn_v2_0.c 	WREG32_SOC15(UVD, 0, mmUVD_RB_SIZE, ring->ring_size / 4);
ring             1214 drivers/gpu/drm/amd/amdgpu/vcn_v2_0.c 	ring = &adev->vcn.inst->ring_enc[1];
ring             1215 drivers/gpu/drm/amd/amdgpu/vcn_v2_0.c 	WREG32_SOC15(UVD, 0, mmUVD_RB_RPTR2, lower_32_bits(ring->wptr));
ring             1216 drivers/gpu/drm/amd/amdgpu/vcn_v2_0.c 	WREG32_SOC15(UVD, 0, mmUVD_RB_WPTR2, lower_32_bits(ring->wptr));
ring             1217 drivers/gpu/drm/amd/amdgpu/vcn_v2_0.c 	WREG32_SOC15(UVD, 0, mmUVD_RB_BASE_LO2, ring->gpu_addr);
ring             1218 drivers/gpu/drm/amd/amdgpu/vcn_v2_0.c 	WREG32_SOC15(UVD, 0, mmUVD_RB_BASE_HI2, upper_32_bits(ring->gpu_addr));
ring             1219 drivers/gpu/drm/amd/amdgpu/vcn_v2_0.c 	WREG32_SOC15(UVD, 0, mmUVD_RB_SIZE2, ring->ring_size / 4);
ring             1334 drivers/gpu/drm/amd/amdgpu/vcn_v2_0.c 	struct amdgpu_ring *ring;
ring             1361 drivers/gpu/drm/amd/amdgpu/vcn_v2_0.c 				ring = &adev->vcn.inst->ring_enc[0];
ring             1362 drivers/gpu/drm/amd/amdgpu/vcn_v2_0.c 				WREG32_SOC15(UVD, 0, mmUVD_RB_BASE_LO, ring->gpu_addr);
ring             1363 drivers/gpu/drm/amd/amdgpu/vcn_v2_0.c 				WREG32_SOC15(UVD, 0, mmUVD_RB_BASE_HI, upper_32_bits(ring->gpu_addr));
ring             1364 drivers/gpu/drm/amd/amdgpu/vcn_v2_0.c 				WREG32_SOC15(UVD, 0, mmUVD_RB_SIZE, ring->ring_size / 4);
ring             1365 drivers/gpu/drm/amd/amdgpu/vcn_v2_0.c 				WREG32_SOC15(UVD, 0, mmUVD_RB_RPTR, lower_32_bits(ring->wptr));
ring             1366 drivers/gpu/drm/amd/amdgpu/vcn_v2_0.c 				WREG32_SOC15(UVD, 0, mmUVD_RB_WPTR, lower_32_bits(ring->wptr));
ring             1368 drivers/gpu/drm/amd/amdgpu/vcn_v2_0.c 				ring = &adev->vcn.inst->ring_enc[1];
ring             1369 drivers/gpu/drm/amd/amdgpu/vcn_v2_0.c 				WREG32_SOC15(UVD, 0, mmUVD_RB_BASE_LO2, ring->gpu_addr);
ring             1370 drivers/gpu/drm/amd/amdgpu/vcn_v2_0.c 				WREG32_SOC15(UVD, 0, mmUVD_RB_BASE_HI2, upper_32_bits(ring->gpu_addr));
ring             1371 drivers/gpu/drm/amd/amdgpu/vcn_v2_0.c 				WREG32_SOC15(UVD, 0, mmUVD_RB_SIZE2, ring->ring_size / 4);
ring             1372 drivers/gpu/drm/amd/amdgpu/vcn_v2_0.c 				WREG32_SOC15(UVD, 0, mmUVD_RB_RPTR2, lower_32_bits(ring->wptr));
ring             1373 drivers/gpu/drm/amd/amdgpu/vcn_v2_0.c 				WREG32_SOC15(UVD, 0, mmUVD_RB_WPTR2, lower_32_bits(ring->wptr));
ring             1436 drivers/gpu/drm/amd/amdgpu/vcn_v2_0.c static uint64_t vcn_v2_0_dec_ring_get_rptr(struct amdgpu_ring *ring)
ring             1438 drivers/gpu/drm/amd/amdgpu/vcn_v2_0.c 	struct amdgpu_device *adev = ring->adev;
ring             1450 drivers/gpu/drm/amd/amdgpu/vcn_v2_0.c static uint64_t vcn_v2_0_dec_ring_get_wptr(struct amdgpu_ring *ring)
ring             1452 drivers/gpu/drm/amd/amdgpu/vcn_v2_0.c 	struct amdgpu_device *adev = ring->adev;
ring             1454 drivers/gpu/drm/amd/amdgpu/vcn_v2_0.c 	if (ring->use_doorbell)
ring             1455 drivers/gpu/drm/amd/amdgpu/vcn_v2_0.c 		return adev->wb.wb[ring->wptr_offs];
ring             1467 drivers/gpu/drm/amd/amdgpu/vcn_v2_0.c static void vcn_v2_0_dec_ring_set_wptr(struct amdgpu_ring *ring)
ring             1469 drivers/gpu/drm/amd/amdgpu/vcn_v2_0.c 	struct amdgpu_device *adev = ring->adev;
ring             1473 drivers/gpu/drm/amd/amdgpu/vcn_v2_0.c 			lower_32_bits(ring->wptr) | 0x80000000);
ring             1475 drivers/gpu/drm/amd/amdgpu/vcn_v2_0.c 	if (ring->use_doorbell) {
ring             1476 drivers/gpu/drm/amd/amdgpu/vcn_v2_0.c 		adev->wb.wb[ring->wptr_offs] = lower_32_bits(ring->wptr);
ring             1477 drivers/gpu/drm/amd/amdgpu/vcn_v2_0.c 		WDOORBELL32(ring->doorbell_index, lower_32_bits(ring->wptr));
ring             1479 drivers/gpu/drm/amd/amdgpu/vcn_v2_0.c 		WREG32_SOC15(UVD, 0, mmUVD_RBC_RB_WPTR, lower_32_bits(ring->wptr));
ring             1490 drivers/gpu/drm/amd/amdgpu/vcn_v2_0.c void vcn_v2_0_dec_ring_insert_start(struct amdgpu_ring *ring)
ring             1492 drivers/gpu/drm/amd/amdgpu/vcn_v2_0.c 	struct amdgpu_device *adev = ring->adev;
ring             1494 drivers/gpu/drm/amd/amdgpu/vcn_v2_0.c 	amdgpu_ring_write(ring, PACKET0(adev->vcn.internal.data0, 0));
ring             1495 drivers/gpu/drm/amd/amdgpu/vcn_v2_0.c 	amdgpu_ring_write(ring, 0);
ring             1496 drivers/gpu/drm/amd/amdgpu/vcn_v2_0.c 	amdgpu_ring_write(ring, PACKET0(adev->vcn.internal.cmd, 0));
ring             1497 drivers/gpu/drm/amd/amdgpu/vcn_v2_0.c 	amdgpu_ring_write(ring, VCN_DEC_KMD_CMD | (VCN_DEC_CMD_PACKET_START << 1));
ring             1507 drivers/gpu/drm/amd/amdgpu/vcn_v2_0.c void vcn_v2_0_dec_ring_insert_end(struct amdgpu_ring *ring)
ring             1509 drivers/gpu/drm/amd/amdgpu/vcn_v2_0.c 	struct amdgpu_device *adev = ring->adev;
ring             1511 drivers/gpu/drm/amd/amdgpu/vcn_v2_0.c 	amdgpu_ring_write(ring, PACKET0(adev->vcn.internal.cmd, 0));
ring             1512 drivers/gpu/drm/amd/amdgpu/vcn_v2_0.c 	amdgpu_ring_write(ring, VCN_DEC_KMD_CMD | (VCN_DEC_CMD_PACKET_END << 1));
ring             1522 drivers/gpu/drm/amd/amdgpu/vcn_v2_0.c void vcn_v2_0_dec_ring_insert_nop(struct amdgpu_ring *ring, uint32_t count)
ring             1524 drivers/gpu/drm/amd/amdgpu/vcn_v2_0.c 	struct amdgpu_device *adev = ring->adev;
ring             1527 drivers/gpu/drm/amd/amdgpu/vcn_v2_0.c 	WARN_ON(ring->wptr % 2 || count % 2);
ring             1530 drivers/gpu/drm/amd/amdgpu/vcn_v2_0.c 		amdgpu_ring_write(ring, PACKET0(adev->vcn.internal.nop, 0));
ring             1531 drivers/gpu/drm/amd/amdgpu/vcn_v2_0.c 		amdgpu_ring_write(ring, 0);
ring             1543 drivers/gpu/drm/amd/amdgpu/vcn_v2_0.c void vcn_v2_0_dec_ring_emit_fence(struct amdgpu_ring *ring, u64 addr, u64 seq,
ring             1546 drivers/gpu/drm/amd/amdgpu/vcn_v2_0.c 	struct amdgpu_device *adev = ring->adev;
ring             1549 drivers/gpu/drm/amd/amdgpu/vcn_v2_0.c 	amdgpu_ring_write(ring, PACKET0(adev->vcn.internal.context_id, 0));
ring             1550 drivers/gpu/drm/amd/amdgpu/vcn_v2_0.c 	amdgpu_ring_write(ring, seq);
ring             1552 drivers/gpu/drm/amd/amdgpu/vcn_v2_0.c 	amdgpu_ring_write(ring, PACKET0(adev->vcn.internal.data0, 0));
ring             1553 drivers/gpu/drm/amd/amdgpu/vcn_v2_0.c 	amdgpu_ring_write(ring, addr & 0xffffffff);
ring             1555 drivers/gpu/drm/amd/amdgpu/vcn_v2_0.c 	amdgpu_ring_write(ring, PACKET0(adev->vcn.internal.data1, 0));
ring             1556 drivers/gpu/drm/amd/amdgpu/vcn_v2_0.c 	amdgpu_ring_write(ring, upper_32_bits(addr) & 0xff);
ring             1558 drivers/gpu/drm/amd/amdgpu/vcn_v2_0.c 	amdgpu_ring_write(ring, PACKET0(adev->vcn.internal.cmd, 0));
ring             1559 drivers/gpu/drm/amd/amdgpu/vcn_v2_0.c 	amdgpu_ring_write(ring, VCN_DEC_KMD_CMD | (VCN_DEC_CMD_FENCE << 1));
ring             1561 drivers/gpu/drm/amd/amdgpu/vcn_v2_0.c 	amdgpu_ring_write(ring, PACKET0(adev->vcn.internal.data0, 0));
ring             1562 drivers/gpu/drm/amd/amdgpu/vcn_v2_0.c 	amdgpu_ring_write(ring, 0);
ring             1564 drivers/gpu/drm/amd/amdgpu/vcn_v2_0.c 	amdgpu_ring_write(ring, PACKET0(adev->vcn.internal.data1, 0));
ring             1565 drivers/gpu/drm/amd/amdgpu/vcn_v2_0.c 	amdgpu_ring_write(ring, 0);
ring             1567 drivers/gpu/drm/amd/amdgpu/vcn_v2_0.c 	amdgpu_ring_write(ring, PACKET0(adev->vcn.internal.cmd, 0));
ring             1569 drivers/gpu/drm/amd/amdgpu/vcn_v2_0.c 	amdgpu_ring_write(ring, VCN_DEC_KMD_CMD | (VCN_DEC_CMD_TRAP << 1));
ring             1580 drivers/gpu/drm/amd/amdgpu/vcn_v2_0.c void vcn_v2_0_dec_ring_emit_ib(struct amdgpu_ring *ring,
ring             1585 drivers/gpu/drm/amd/amdgpu/vcn_v2_0.c 	struct amdgpu_device *adev = ring->adev;
ring             1588 drivers/gpu/drm/amd/amdgpu/vcn_v2_0.c 	amdgpu_ring_write(ring, PACKET0(adev->vcn.internal.ib_vmid, 0));
ring             1589 drivers/gpu/drm/amd/amdgpu/vcn_v2_0.c 	amdgpu_ring_write(ring, vmid);
ring             1591 drivers/gpu/drm/amd/amdgpu/vcn_v2_0.c 	amdgpu_ring_write(ring,	PACKET0(adev->vcn.internal.ib_bar_low, 0));
ring             1592 drivers/gpu/drm/amd/amdgpu/vcn_v2_0.c 	amdgpu_ring_write(ring, lower_32_bits(ib->gpu_addr));
ring             1593 drivers/gpu/drm/amd/amdgpu/vcn_v2_0.c 	amdgpu_ring_write(ring,	PACKET0(adev->vcn.internal.ib_bar_high, 0));
ring             1594 drivers/gpu/drm/amd/amdgpu/vcn_v2_0.c 	amdgpu_ring_write(ring, upper_32_bits(ib->gpu_addr));
ring             1595 drivers/gpu/drm/amd/amdgpu/vcn_v2_0.c 	amdgpu_ring_write(ring,	PACKET0(adev->vcn.internal.ib_size, 0));
ring             1596 drivers/gpu/drm/amd/amdgpu/vcn_v2_0.c 	amdgpu_ring_write(ring, ib->length_dw);
ring             1599 drivers/gpu/drm/amd/amdgpu/vcn_v2_0.c void vcn_v2_0_dec_ring_emit_reg_wait(struct amdgpu_ring *ring, uint32_t reg,
ring             1602 drivers/gpu/drm/amd/amdgpu/vcn_v2_0.c 	struct amdgpu_device *adev = ring->adev;
ring             1604 drivers/gpu/drm/amd/amdgpu/vcn_v2_0.c 	amdgpu_ring_write(ring, PACKET0(adev->vcn.internal.data0, 0));
ring             1605 drivers/gpu/drm/amd/amdgpu/vcn_v2_0.c 	amdgpu_ring_write(ring, reg << 2);
ring             1607 drivers/gpu/drm/amd/amdgpu/vcn_v2_0.c 	amdgpu_ring_write(ring, PACKET0(adev->vcn.internal.data1, 0));
ring             1608 drivers/gpu/drm/amd/amdgpu/vcn_v2_0.c 	amdgpu_ring_write(ring, val);
ring             1610 drivers/gpu/drm/amd/amdgpu/vcn_v2_0.c 	amdgpu_ring_write(ring, PACKET0(adev->vcn.internal.gp_scratch8, 0));
ring             1611 drivers/gpu/drm/amd/amdgpu/vcn_v2_0.c 	amdgpu_ring_write(ring, mask);
ring             1613 drivers/gpu/drm/amd/amdgpu/vcn_v2_0.c 	amdgpu_ring_write(ring, PACKET0(adev->vcn.internal.cmd, 0));
ring             1615 drivers/gpu/drm/amd/amdgpu/vcn_v2_0.c 	amdgpu_ring_write(ring, VCN_DEC_KMD_CMD | (VCN_DEC_CMD_REG_READ_COND_WAIT << 1));
ring             1618 drivers/gpu/drm/amd/amdgpu/vcn_v2_0.c void vcn_v2_0_dec_ring_emit_vm_flush(struct amdgpu_ring *ring,
ring             1621 drivers/gpu/drm/amd/amdgpu/vcn_v2_0.c 	struct amdgpu_vmhub *hub = &ring->adev->vmhub[ring->funcs->vmhub];
ring             1624 drivers/gpu/drm/amd/amdgpu/vcn_v2_0.c 	pd_addr = amdgpu_gmc_emit_flush_gpu_tlb(ring, vmid, pd_addr);
ring             1630 drivers/gpu/drm/amd/amdgpu/vcn_v2_0.c 	vcn_v2_0_dec_ring_emit_reg_wait(ring, data0, data1, mask);
ring             1633 drivers/gpu/drm/amd/amdgpu/vcn_v2_0.c void vcn_v2_0_dec_ring_emit_wreg(struct amdgpu_ring *ring,
ring             1636 drivers/gpu/drm/amd/amdgpu/vcn_v2_0.c 	struct amdgpu_device *adev = ring->adev;
ring             1638 drivers/gpu/drm/amd/amdgpu/vcn_v2_0.c 	amdgpu_ring_write(ring, PACKET0(adev->vcn.internal.data0, 0));
ring             1639 drivers/gpu/drm/amd/amdgpu/vcn_v2_0.c 	amdgpu_ring_write(ring, reg << 2);
ring             1641 drivers/gpu/drm/amd/amdgpu/vcn_v2_0.c 	amdgpu_ring_write(ring, PACKET0(adev->vcn.internal.data1, 0));
ring             1642 drivers/gpu/drm/amd/amdgpu/vcn_v2_0.c 	amdgpu_ring_write(ring, val);
ring             1644 drivers/gpu/drm/amd/amdgpu/vcn_v2_0.c 	amdgpu_ring_write(ring, PACKET0(adev->vcn.internal.cmd, 0));
ring             1646 drivers/gpu/drm/amd/amdgpu/vcn_v2_0.c 	amdgpu_ring_write(ring, VCN_DEC_KMD_CMD | (VCN_DEC_CMD_WRITE_REG << 1));
ring             1656 drivers/gpu/drm/amd/amdgpu/vcn_v2_0.c static uint64_t vcn_v2_0_enc_ring_get_rptr(struct amdgpu_ring *ring)
ring             1658 drivers/gpu/drm/amd/amdgpu/vcn_v2_0.c 	struct amdgpu_device *adev = ring->adev;
ring             1660 drivers/gpu/drm/amd/amdgpu/vcn_v2_0.c 	if (ring == &adev->vcn.inst->ring_enc[0])
ring             1673 drivers/gpu/drm/amd/amdgpu/vcn_v2_0.c static uint64_t vcn_v2_0_enc_ring_get_wptr(struct amdgpu_ring *ring)
ring             1675 drivers/gpu/drm/amd/amdgpu/vcn_v2_0.c 	struct amdgpu_device *adev = ring->adev;
ring             1677 drivers/gpu/drm/amd/amdgpu/vcn_v2_0.c 	if (ring == &adev->vcn.inst->ring_enc[0]) {
ring             1678 drivers/gpu/drm/amd/amdgpu/vcn_v2_0.c 		if (ring->use_doorbell)
ring             1679 drivers/gpu/drm/amd/amdgpu/vcn_v2_0.c 			return adev->wb.wb[ring->wptr_offs];
ring             1683 drivers/gpu/drm/amd/amdgpu/vcn_v2_0.c 		if (ring->use_doorbell)
ring             1684 drivers/gpu/drm/amd/amdgpu/vcn_v2_0.c 			return adev->wb.wb[ring->wptr_offs];
ring             1697 drivers/gpu/drm/amd/amdgpu/vcn_v2_0.c static void vcn_v2_0_enc_ring_set_wptr(struct amdgpu_ring *ring)
ring             1699 drivers/gpu/drm/amd/amdgpu/vcn_v2_0.c 	struct amdgpu_device *adev = ring->adev;
ring             1701 drivers/gpu/drm/amd/amdgpu/vcn_v2_0.c 	if (ring == &adev->vcn.inst->ring_enc[0]) {
ring             1702 drivers/gpu/drm/amd/amdgpu/vcn_v2_0.c 		if (ring->use_doorbell) {
ring             1703 drivers/gpu/drm/amd/amdgpu/vcn_v2_0.c 			adev->wb.wb[ring->wptr_offs] = lower_32_bits(ring->wptr);
ring             1704 drivers/gpu/drm/amd/amdgpu/vcn_v2_0.c 			WDOORBELL32(ring->doorbell_index, lower_32_bits(ring->wptr));
ring             1706 drivers/gpu/drm/amd/amdgpu/vcn_v2_0.c 			WREG32_SOC15(UVD, 0, mmUVD_RB_WPTR, lower_32_bits(ring->wptr));
ring             1709 drivers/gpu/drm/amd/amdgpu/vcn_v2_0.c 		if (ring->use_doorbell) {
ring             1710 drivers/gpu/drm/amd/amdgpu/vcn_v2_0.c 			adev->wb.wb[ring->wptr_offs] = lower_32_bits(ring->wptr);
ring             1711 drivers/gpu/drm/amd/amdgpu/vcn_v2_0.c 			WDOORBELL32(ring->doorbell_index, lower_32_bits(ring->wptr));
ring             1713 drivers/gpu/drm/amd/amdgpu/vcn_v2_0.c 			WREG32_SOC15(UVD, 0, mmUVD_RB_WPTR2, lower_32_bits(ring->wptr));
ring             1726 drivers/gpu/drm/amd/amdgpu/vcn_v2_0.c void vcn_v2_0_enc_ring_emit_fence(struct amdgpu_ring *ring, u64 addr,
ring             1731 drivers/gpu/drm/amd/amdgpu/vcn_v2_0.c 	amdgpu_ring_write(ring, VCN_ENC_CMD_FENCE);
ring             1732 drivers/gpu/drm/amd/amdgpu/vcn_v2_0.c 	amdgpu_ring_write(ring, addr);
ring             1733 drivers/gpu/drm/amd/amdgpu/vcn_v2_0.c 	amdgpu_ring_write(ring, upper_32_bits(addr));
ring             1734 drivers/gpu/drm/amd/amdgpu/vcn_v2_0.c 	amdgpu_ring_write(ring, seq);
ring             1735 drivers/gpu/drm/amd/amdgpu/vcn_v2_0.c 	amdgpu_ring_write(ring, VCN_ENC_CMD_TRAP);
ring             1738 drivers/gpu/drm/amd/amdgpu/vcn_v2_0.c void vcn_v2_0_enc_ring_insert_end(struct amdgpu_ring *ring)
ring             1740 drivers/gpu/drm/amd/amdgpu/vcn_v2_0.c 	amdgpu_ring_write(ring, VCN_ENC_CMD_END);
ring             1751 drivers/gpu/drm/amd/amdgpu/vcn_v2_0.c void vcn_v2_0_enc_ring_emit_ib(struct amdgpu_ring *ring,
ring             1758 drivers/gpu/drm/amd/amdgpu/vcn_v2_0.c 	amdgpu_ring_write(ring, VCN_ENC_CMD_IB);
ring             1759 drivers/gpu/drm/amd/amdgpu/vcn_v2_0.c 	amdgpu_ring_write(ring, vmid);
ring             1760 drivers/gpu/drm/amd/amdgpu/vcn_v2_0.c 	amdgpu_ring_write(ring, lower_32_bits(ib->gpu_addr));
ring             1761 drivers/gpu/drm/amd/amdgpu/vcn_v2_0.c 	amdgpu_ring_write(ring, upper_32_bits(ib->gpu_addr));
ring             1762 drivers/gpu/drm/amd/amdgpu/vcn_v2_0.c 	amdgpu_ring_write(ring, ib->length_dw);
ring             1765 drivers/gpu/drm/amd/amdgpu/vcn_v2_0.c void vcn_v2_0_enc_ring_emit_reg_wait(struct amdgpu_ring *ring, uint32_t reg,
ring             1768 drivers/gpu/drm/amd/amdgpu/vcn_v2_0.c 	amdgpu_ring_write(ring, VCN_ENC_CMD_REG_WAIT);
ring             1769 drivers/gpu/drm/amd/amdgpu/vcn_v2_0.c 	amdgpu_ring_write(ring, reg << 2);
ring             1770 drivers/gpu/drm/amd/amdgpu/vcn_v2_0.c 	amdgpu_ring_write(ring, mask);
ring             1771 drivers/gpu/drm/amd/amdgpu/vcn_v2_0.c 	amdgpu_ring_write(ring, val);
ring             1774 drivers/gpu/drm/amd/amdgpu/vcn_v2_0.c void vcn_v2_0_enc_ring_emit_vm_flush(struct amdgpu_ring *ring,
ring             1777 drivers/gpu/drm/amd/amdgpu/vcn_v2_0.c 	struct amdgpu_vmhub *hub = &ring->adev->vmhub[ring->funcs->vmhub];
ring             1779 drivers/gpu/drm/amd/amdgpu/vcn_v2_0.c 	pd_addr = amdgpu_gmc_emit_flush_gpu_tlb(ring, vmid, pd_addr);
ring             1782 drivers/gpu/drm/amd/amdgpu/vcn_v2_0.c 	vcn_v2_0_enc_ring_emit_reg_wait(ring, hub->ctx0_ptb_addr_lo32 + vmid * 2,
ring             1786 drivers/gpu/drm/amd/amdgpu/vcn_v2_0.c void vcn_v2_0_enc_ring_emit_wreg(struct amdgpu_ring *ring, uint32_t reg, uint32_t val)
ring             1788 drivers/gpu/drm/amd/amdgpu/vcn_v2_0.c 	amdgpu_ring_write(ring, VCN_ENC_CMD_REG_WRITE);
ring             1789 drivers/gpu/drm/amd/amdgpu/vcn_v2_0.c 	amdgpu_ring_write(ring,	reg << 2);
ring             1790 drivers/gpu/drm/amd/amdgpu/vcn_v2_0.c 	amdgpu_ring_write(ring, val);
ring             1800 drivers/gpu/drm/amd/amdgpu/vcn_v2_0.c static uint64_t vcn_v2_0_jpeg_ring_get_rptr(struct amdgpu_ring *ring)
ring             1802 drivers/gpu/drm/amd/amdgpu/vcn_v2_0.c 	struct amdgpu_device *adev = ring->adev;
ring             1814 drivers/gpu/drm/amd/amdgpu/vcn_v2_0.c static uint64_t vcn_v2_0_jpeg_ring_get_wptr(struct amdgpu_ring *ring)
ring             1816 drivers/gpu/drm/amd/amdgpu/vcn_v2_0.c 	struct amdgpu_device *adev = ring->adev;
ring             1818 drivers/gpu/drm/amd/amdgpu/vcn_v2_0.c 	if (ring->use_doorbell)
ring             1819 drivers/gpu/drm/amd/amdgpu/vcn_v2_0.c 		return adev->wb.wb[ring->wptr_offs];
ring             1831 drivers/gpu/drm/amd/amdgpu/vcn_v2_0.c static void vcn_v2_0_jpeg_ring_set_wptr(struct amdgpu_ring *ring)
ring             1833 drivers/gpu/drm/amd/amdgpu/vcn_v2_0.c 	struct amdgpu_device *adev = ring->adev;
ring             1835 drivers/gpu/drm/amd/amdgpu/vcn_v2_0.c 	if (ring->use_doorbell) {
ring             1836 drivers/gpu/drm/amd/amdgpu/vcn_v2_0.c 		adev->wb.wb[ring->wptr_offs] = lower_32_bits(ring->wptr);
ring             1837 drivers/gpu/drm/amd/amdgpu/vcn_v2_0.c 		WDOORBELL32(ring->doorbell_index, lower_32_bits(ring->wptr));
ring             1839 drivers/gpu/drm/amd/amdgpu/vcn_v2_0.c 		WREG32_SOC15(UVD, 0, mmUVD_JRBC_RB_WPTR, lower_32_bits(ring->wptr));
ring             1850 drivers/gpu/drm/amd/amdgpu/vcn_v2_0.c void vcn_v2_0_jpeg_ring_insert_start(struct amdgpu_ring *ring)
ring             1852 drivers/gpu/drm/amd/amdgpu/vcn_v2_0.c 	amdgpu_ring_write(ring, PACKETJ(mmUVD_JRBC_EXTERNAL_REG_INTERNAL_OFFSET,
ring             1854 drivers/gpu/drm/amd/amdgpu/vcn_v2_0.c 	amdgpu_ring_write(ring, 0x68e04);
ring             1856 drivers/gpu/drm/amd/amdgpu/vcn_v2_0.c 	amdgpu_ring_write(ring, PACKETJ(JRBC_DEC_EXTERNAL_REG_WRITE_ADDR,
ring             1858 drivers/gpu/drm/amd/amdgpu/vcn_v2_0.c 	amdgpu_ring_write(ring, 0x80010000);
ring             1868 drivers/gpu/drm/amd/amdgpu/vcn_v2_0.c void vcn_v2_0_jpeg_ring_insert_end(struct amdgpu_ring *ring)
ring             1870 drivers/gpu/drm/amd/amdgpu/vcn_v2_0.c 	amdgpu_ring_write(ring, PACKETJ(mmUVD_JRBC_EXTERNAL_REG_INTERNAL_OFFSET,
ring             1872 drivers/gpu/drm/amd/amdgpu/vcn_v2_0.c 	amdgpu_ring_write(ring, 0x68e04);
ring             1874 drivers/gpu/drm/amd/amdgpu/vcn_v2_0.c 	amdgpu_ring_write(ring, PACKETJ(JRBC_DEC_EXTERNAL_REG_WRITE_ADDR,
ring             1876 drivers/gpu/drm/amd/amdgpu/vcn_v2_0.c 	amdgpu_ring_write(ring, 0x00010000);
ring             1887 drivers/gpu/drm/amd/amdgpu/vcn_v2_0.c void vcn_v2_0_jpeg_ring_emit_fence(struct amdgpu_ring *ring, u64 addr, u64 seq,
ring             1892 drivers/gpu/drm/amd/amdgpu/vcn_v2_0.c 	amdgpu_ring_write(ring, PACKETJ(mmUVD_JPEG_GPCOM_DATA0_INTERNAL_OFFSET,
ring             1894 drivers/gpu/drm/amd/amdgpu/vcn_v2_0.c 	amdgpu_ring_write(ring, seq);
ring             1896 drivers/gpu/drm/amd/amdgpu/vcn_v2_0.c 	amdgpu_ring_write(ring,	PACKETJ(mmUVD_JPEG_GPCOM_DATA1_INTERNAL_OFFSET,
ring             1898 drivers/gpu/drm/amd/amdgpu/vcn_v2_0.c 	amdgpu_ring_write(ring, seq);
ring             1900 drivers/gpu/drm/amd/amdgpu/vcn_v2_0.c 	amdgpu_ring_write(ring,	PACKETJ(mmUVD_LMI_JRBC_RB_MEM_WR_64BIT_BAR_LOW_INTERNAL_OFFSET,
ring             1902 drivers/gpu/drm/amd/amdgpu/vcn_v2_0.c 	amdgpu_ring_write(ring, lower_32_bits(addr));
ring             1904 drivers/gpu/drm/amd/amdgpu/vcn_v2_0.c 	amdgpu_ring_write(ring,	PACKETJ(mmUVD_LMI_JRBC_RB_MEM_WR_64BIT_BAR_HIGH_INTERNAL_OFFSET,
ring             1906 drivers/gpu/drm/amd/amdgpu/vcn_v2_0.c 	amdgpu_ring_write(ring, upper_32_bits(addr));
ring             1908 drivers/gpu/drm/amd/amdgpu/vcn_v2_0.c 	amdgpu_ring_write(ring,	PACKETJ(mmUVD_JPEG_GPCOM_CMD_INTERNAL_OFFSET,
ring             1910 drivers/gpu/drm/amd/amdgpu/vcn_v2_0.c 	amdgpu_ring_write(ring, 0x8);
ring             1912 drivers/gpu/drm/amd/amdgpu/vcn_v2_0.c 	amdgpu_ring_write(ring,	PACKETJ(mmUVD_JPEG_GPCOM_CMD_INTERNAL_OFFSET,
ring             1914 drivers/gpu/drm/amd/amdgpu/vcn_v2_0.c 	amdgpu_ring_write(ring, 0);
ring             1916 drivers/gpu/drm/amd/amdgpu/vcn_v2_0.c 	amdgpu_ring_write(ring,	PACKETJ(mmUVD_JRBC_EXTERNAL_REG_INTERNAL_OFFSET,
ring             1918 drivers/gpu/drm/amd/amdgpu/vcn_v2_0.c 	amdgpu_ring_write(ring, 0x3fbc);
ring             1920 drivers/gpu/drm/amd/amdgpu/vcn_v2_0.c 	amdgpu_ring_write(ring, PACKETJ(JRBC_DEC_EXTERNAL_REG_WRITE_ADDR,
ring             1922 drivers/gpu/drm/amd/amdgpu/vcn_v2_0.c 	amdgpu_ring_write(ring, 0x1);
ring             1924 drivers/gpu/drm/amd/amdgpu/vcn_v2_0.c 	amdgpu_ring_write(ring, PACKETJ(0, 0, 0, PACKETJ_TYPE7));
ring             1925 drivers/gpu/drm/amd/amdgpu/vcn_v2_0.c 	amdgpu_ring_write(ring, 0);
ring             1936 drivers/gpu/drm/amd/amdgpu/vcn_v2_0.c void vcn_v2_0_jpeg_ring_emit_ib(struct amdgpu_ring *ring,
ring             1943 drivers/gpu/drm/amd/amdgpu/vcn_v2_0.c 	amdgpu_ring_write(ring, PACKETJ(mmUVD_LMI_JRBC_IB_VMID_INTERNAL_OFFSET,
ring             1945 drivers/gpu/drm/amd/amdgpu/vcn_v2_0.c 	amdgpu_ring_write(ring, (vmid | (vmid << 4)));
ring             1947 drivers/gpu/drm/amd/amdgpu/vcn_v2_0.c 	amdgpu_ring_write(ring, PACKETJ(mmUVD_LMI_JPEG_VMID_INTERNAL_OFFSET,
ring             1949 drivers/gpu/drm/amd/amdgpu/vcn_v2_0.c 	amdgpu_ring_write(ring, (vmid | (vmid << 4)));
ring             1951 drivers/gpu/drm/amd/amdgpu/vcn_v2_0.c 	amdgpu_ring_write(ring,	PACKETJ(mmUVD_LMI_JRBC_IB_64BIT_BAR_LOW_INTERNAL_OFFSET,
ring             1953 drivers/gpu/drm/amd/amdgpu/vcn_v2_0.c 	amdgpu_ring_write(ring, lower_32_bits(ib->gpu_addr));
ring             1955 drivers/gpu/drm/amd/amdgpu/vcn_v2_0.c 	amdgpu_ring_write(ring,	PACKETJ(mmUVD_LMI_JRBC_IB_64BIT_BAR_HIGH_INTERNAL_OFFSET,
ring             1957 drivers/gpu/drm/amd/amdgpu/vcn_v2_0.c 	amdgpu_ring_write(ring, upper_32_bits(ib->gpu_addr));
ring             1959 drivers/gpu/drm/amd/amdgpu/vcn_v2_0.c 	amdgpu_ring_write(ring,	PACKETJ(mmUVD_JRBC_IB_SIZE_INTERNAL_OFFSET,
ring             1961 drivers/gpu/drm/amd/amdgpu/vcn_v2_0.c 	amdgpu_ring_write(ring, ib->length_dw);
ring             1963 drivers/gpu/drm/amd/amdgpu/vcn_v2_0.c 	amdgpu_ring_write(ring,	PACKETJ(mmUVD_LMI_JRBC_RB_MEM_RD_64BIT_BAR_LOW_INTERNAL_OFFSET,
ring             1965 drivers/gpu/drm/amd/amdgpu/vcn_v2_0.c 	amdgpu_ring_write(ring, lower_32_bits(ring->gpu_addr));
ring             1967 drivers/gpu/drm/amd/amdgpu/vcn_v2_0.c 	amdgpu_ring_write(ring,	PACKETJ(mmUVD_LMI_JRBC_RB_MEM_RD_64BIT_BAR_HIGH_INTERNAL_OFFSET,
ring             1969 drivers/gpu/drm/amd/amdgpu/vcn_v2_0.c 	amdgpu_ring_write(ring, upper_32_bits(ring->gpu_addr));
ring             1971 drivers/gpu/drm/amd/amdgpu/vcn_v2_0.c 	amdgpu_ring_write(ring,	PACKETJ(0, 0, PACKETJ_CONDITION_CHECK0, PACKETJ_TYPE2));
ring             1972 drivers/gpu/drm/amd/amdgpu/vcn_v2_0.c 	amdgpu_ring_write(ring, 0);
ring             1974 drivers/gpu/drm/amd/amdgpu/vcn_v2_0.c 	amdgpu_ring_write(ring,	PACKETJ(mmUVD_JRBC_RB_COND_RD_TIMER_INTERNAL_OFFSET,
ring             1976 drivers/gpu/drm/amd/amdgpu/vcn_v2_0.c 	amdgpu_ring_write(ring, 0x01400200);
ring             1978 drivers/gpu/drm/amd/amdgpu/vcn_v2_0.c 	amdgpu_ring_write(ring, PACKETJ(mmUVD_JRBC_RB_REF_DATA_INTERNAL_OFFSET,
ring             1980 drivers/gpu/drm/amd/amdgpu/vcn_v2_0.c 	amdgpu_ring_write(ring, 0x2);
ring             1982 drivers/gpu/drm/amd/amdgpu/vcn_v2_0.c 	amdgpu_ring_write(ring,	PACKETJ(mmUVD_JRBC_STATUS_INTERNAL_OFFSET,
ring             1984 drivers/gpu/drm/amd/amdgpu/vcn_v2_0.c 	amdgpu_ring_write(ring, 0x2);
ring             1987 drivers/gpu/drm/amd/amdgpu/vcn_v2_0.c void vcn_v2_0_jpeg_ring_emit_reg_wait(struct amdgpu_ring *ring, uint32_t reg,
ring             1992 drivers/gpu/drm/amd/amdgpu/vcn_v2_0.c 	amdgpu_ring_write(ring, PACKETJ(mmUVD_JRBC_RB_COND_RD_TIMER_INTERNAL_OFFSET,
ring             1994 drivers/gpu/drm/amd/amdgpu/vcn_v2_0.c 	amdgpu_ring_write(ring, 0x01400200);
ring             1996 drivers/gpu/drm/amd/amdgpu/vcn_v2_0.c 	amdgpu_ring_write(ring,	PACKETJ(mmUVD_JRBC_RB_REF_DATA_INTERNAL_OFFSET,
ring             1998 drivers/gpu/drm/amd/amdgpu/vcn_v2_0.c 	amdgpu_ring_write(ring, val);
ring             2000 drivers/gpu/drm/amd/amdgpu/vcn_v2_0.c 	amdgpu_ring_write(ring, PACKETJ(mmUVD_JRBC_EXTERNAL_REG_INTERNAL_OFFSET,
ring             2003 drivers/gpu/drm/amd/amdgpu/vcn_v2_0.c 		amdgpu_ring_write(ring, 0);
ring             2004 drivers/gpu/drm/amd/amdgpu/vcn_v2_0.c 		amdgpu_ring_write(ring,
ring             2007 drivers/gpu/drm/amd/amdgpu/vcn_v2_0.c 		amdgpu_ring_write(ring, reg_offset);
ring             2008 drivers/gpu/drm/amd/amdgpu/vcn_v2_0.c 		amdgpu_ring_write(ring,	PACKETJ(JRBC_DEC_EXTERNAL_REG_WRITE_ADDR,
ring             2011 drivers/gpu/drm/amd/amdgpu/vcn_v2_0.c 	amdgpu_ring_write(ring, mask);
ring             2014 drivers/gpu/drm/amd/amdgpu/vcn_v2_0.c void vcn_v2_0_jpeg_ring_emit_vm_flush(struct amdgpu_ring *ring,
ring             2017 drivers/gpu/drm/amd/amdgpu/vcn_v2_0.c 	struct amdgpu_vmhub *hub = &ring->adev->vmhub[ring->funcs->vmhub];
ring             2020 drivers/gpu/drm/amd/amdgpu/vcn_v2_0.c 	pd_addr = amdgpu_gmc_emit_flush_gpu_tlb(ring, vmid, pd_addr);
ring             2026 drivers/gpu/drm/amd/amdgpu/vcn_v2_0.c 	vcn_v2_0_jpeg_ring_emit_reg_wait(ring, data0, data1, mask);
ring             2029 drivers/gpu/drm/amd/amdgpu/vcn_v2_0.c void vcn_v2_0_jpeg_ring_emit_wreg(struct amdgpu_ring *ring, uint32_t reg, uint32_t val)
ring             2033 drivers/gpu/drm/amd/amdgpu/vcn_v2_0.c 	amdgpu_ring_write(ring,	PACKETJ(mmUVD_JRBC_EXTERNAL_REG_INTERNAL_OFFSET,
ring             2036 drivers/gpu/drm/amd/amdgpu/vcn_v2_0.c 		amdgpu_ring_write(ring, 0);
ring             2037 drivers/gpu/drm/amd/amdgpu/vcn_v2_0.c 		amdgpu_ring_write(ring,
ring             2040 drivers/gpu/drm/amd/amdgpu/vcn_v2_0.c 		amdgpu_ring_write(ring, reg_offset);
ring             2041 drivers/gpu/drm/amd/amdgpu/vcn_v2_0.c 		amdgpu_ring_write(ring,	PACKETJ(JRBC_DEC_EXTERNAL_REG_WRITE_ADDR,
ring             2044 drivers/gpu/drm/amd/amdgpu/vcn_v2_0.c 	amdgpu_ring_write(ring, val);
ring             2047 drivers/gpu/drm/amd/amdgpu/vcn_v2_0.c void vcn_v2_0_jpeg_ring_nop(struct amdgpu_ring *ring, uint32_t count)
ring             2051 drivers/gpu/drm/amd/amdgpu/vcn_v2_0.c 	WARN_ON(ring->wptr % 2 || count % 2);
ring             2054 drivers/gpu/drm/amd/amdgpu/vcn_v2_0.c 		amdgpu_ring_write(ring, PACKETJ(0, 0, 0, PACKETJ_TYPE6));
ring             2055 drivers/gpu/drm/amd/amdgpu/vcn_v2_0.c 		amdgpu_ring_write(ring, 0);
ring             2095 drivers/gpu/drm/amd/amdgpu/vcn_v2_0.c static int vcn_v2_0_dec_ring_test_ring(struct amdgpu_ring *ring)
ring             2097 drivers/gpu/drm/amd/amdgpu/vcn_v2_0.c 	struct amdgpu_device *adev = ring->adev;
ring             2102 drivers/gpu/drm/amd/amdgpu/vcn_v2_0.c 	WREG32(adev->vcn.inst[ring->me].external.scratch9, 0xCAFEDEAD);
ring             2103 drivers/gpu/drm/amd/amdgpu/vcn_v2_0.c 	r = amdgpu_ring_alloc(ring, 4);
ring             2106 drivers/gpu/drm/amd/amdgpu/vcn_v2_0.c 	amdgpu_ring_write(ring, PACKET0(adev->vcn.internal.cmd, 0));
ring             2107 drivers/gpu/drm/amd/amdgpu/vcn_v2_0.c 	amdgpu_ring_write(ring, VCN_DEC_KMD_CMD | (VCN_DEC_CMD_PACKET_START << 1));
ring             2108 drivers/gpu/drm/amd/amdgpu/vcn_v2_0.c 	amdgpu_ring_write(ring, PACKET0(adev->vcn.internal.scratch9, 0));
ring             2109 drivers/gpu/drm/amd/amdgpu/vcn_v2_0.c 	amdgpu_ring_write(ring, 0xDEADBEEF);
ring             2110 drivers/gpu/drm/amd/amdgpu/vcn_v2_0.c 	amdgpu_ring_commit(ring);
ring             2112 drivers/gpu/drm/amd/amdgpu/vcn_v2_0.c 		tmp = RREG32(adev->vcn.inst[ring->me].external.scratch9);
ring               27 drivers/gpu/drm/amd/amdgpu/vcn_v2_0.h extern void vcn_v2_0_dec_ring_insert_start(struct amdgpu_ring *ring);
ring               28 drivers/gpu/drm/amd/amdgpu/vcn_v2_0.h extern void vcn_v2_0_dec_ring_insert_end(struct amdgpu_ring *ring);
ring               29 drivers/gpu/drm/amd/amdgpu/vcn_v2_0.h extern void vcn_v2_0_dec_ring_insert_nop(struct amdgpu_ring *ring, uint32_t count);
ring               30 drivers/gpu/drm/amd/amdgpu/vcn_v2_0.h extern void vcn_v2_0_dec_ring_emit_fence(struct amdgpu_ring *ring, u64 addr, u64 seq,
ring               32 drivers/gpu/drm/amd/amdgpu/vcn_v2_0.h extern void vcn_v2_0_dec_ring_emit_ib(struct amdgpu_ring *ring, struct amdgpu_job *job,
ring               34 drivers/gpu/drm/amd/amdgpu/vcn_v2_0.h extern void vcn_v2_0_dec_ring_emit_reg_wait(struct amdgpu_ring *ring, uint32_t reg,
ring               36 drivers/gpu/drm/amd/amdgpu/vcn_v2_0.h extern void vcn_v2_0_dec_ring_emit_vm_flush(struct amdgpu_ring *ring,
ring               38 drivers/gpu/drm/amd/amdgpu/vcn_v2_0.h extern void vcn_v2_0_dec_ring_emit_wreg(struct amdgpu_ring *ring,
ring               41 drivers/gpu/drm/amd/amdgpu/vcn_v2_0.h extern void vcn_v2_0_enc_ring_insert_end(struct amdgpu_ring *ring);
ring               42 drivers/gpu/drm/amd/amdgpu/vcn_v2_0.h extern void vcn_v2_0_enc_ring_emit_fence(struct amdgpu_ring *ring, u64 addr,
ring               44 drivers/gpu/drm/amd/amdgpu/vcn_v2_0.h extern void vcn_v2_0_enc_ring_emit_ib(struct amdgpu_ring *ring, struct amdgpu_job *job,
ring               46 drivers/gpu/drm/amd/amdgpu/vcn_v2_0.h extern void vcn_v2_0_enc_ring_emit_reg_wait(struct amdgpu_ring *ring, uint32_t reg,
ring               48 drivers/gpu/drm/amd/amdgpu/vcn_v2_0.h extern void vcn_v2_0_enc_ring_emit_vm_flush(struct amdgpu_ring *ring,
ring               50 drivers/gpu/drm/amd/amdgpu/vcn_v2_0.h extern void vcn_v2_0_enc_ring_emit_wreg(struct amdgpu_ring *ring, uint32_t reg, uint32_t val);
ring               52 drivers/gpu/drm/amd/amdgpu/vcn_v2_0.h extern void vcn_v2_0_jpeg_ring_insert_start(struct amdgpu_ring *ring);
ring               53 drivers/gpu/drm/amd/amdgpu/vcn_v2_0.h extern void vcn_v2_0_jpeg_ring_insert_end(struct amdgpu_ring *ring);
ring               54 drivers/gpu/drm/amd/amdgpu/vcn_v2_0.h extern void vcn_v2_0_jpeg_ring_emit_fence(struct amdgpu_ring *ring, u64 addr, u64 seq,
ring               56 drivers/gpu/drm/amd/amdgpu/vcn_v2_0.h extern void vcn_v2_0_jpeg_ring_emit_ib(struct amdgpu_ring *ring, struct amdgpu_job *job,
ring               58 drivers/gpu/drm/amd/amdgpu/vcn_v2_0.h extern void vcn_v2_0_jpeg_ring_emit_reg_wait(struct amdgpu_ring *ring, uint32_t reg,
ring               60 drivers/gpu/drm/amd/amdgpu/vcn_v2_0.h extern void vcn_v2_0_jpeg_ring_emit_vm_flush(struct amdgpu_ring *ring,
ring               62 drivers/gpu/drm/amd/amdgpu/vcn_v2_0.h extern void vcn_v2_0_jpeg_ring_emit_wreg(struct amdgpu_ring *ring, uint32_t reg, uint32_t val);
ring               63 drivers/gpu/drm/amd/amdgpu/vcn_v2_0.h extern void vcn_v2_0_jpeg_ring_nop(struct amdgpu_ring *ring, uint32_t count);
ring              112 drivers/gpu/drm/amd/amdgpu/vcn_v2_5.c 	struct amdgpu_ring *ring;
ring              189 drivers/gpu/drm/amd/amdgpu/vcn_v2_5.c 		ring = &adev->vcn.inst[j].ring_dec;
ring              190 drivers/gpu/drm/amd/amdgpu/vcn_v2_5.c 		ring->use_doorbell = true;
ring              191 drivers/gpu/drm/amd/amdgpu/vcn_v2_5.c 		ring->doorbell_index = (adev->doorbell_index.vcn.vcn_ring0_1 << 1) + 8*j;
ring              192 drivers/gpu/drm/amd/amdgpu/vcn_v2_5.c 		sprintf(ring->name, "vcn_dec_%d", j);
ring              193 drivers/gpu/drm/amd/amdgpu/vcn_v2_5.c 		r = amdgpu_ring_init(adev, ring, 512, &adev->vcn.inst[j].irq, 0);
ring              198 drivers/gpu/drm/amd/amdgpu/vcn_v2_5.c 			ring = &adev->vcn.inst[j].ring_enc[i];
ring              199 drivers/gpu/drm/amd/amdgpu/vcn_v2_5.c 			ring->use_doorbell = true;
ring              200 drivers/gpu/drm/amd/amdgpu/vcn_v2_5.c 			ring->doorbell_index = (adev->doorbell_index.vcn.vcn_ring0_1 << 1) + 2 + i + 8*j;
ring              201 drivers/gpu/drm/amd/amdgpu/vcn_v2_5.c 			sprintf(ring->name, "vcn_enc_%d.%d", j, i);
ring              202 drivers/gpu/drm/amd/amdgpu/vcn_v2_5.c 			r = amdgpu_ring_init(adev, ring, 512, &adev->vcn.inst[j].irq, 0);
ring              207 drivers/gpu/drm/amd/amdgpu/vcn_v2_5.c 		ring = &adev->vcn.inst[j].ring_jpeg;
ring              208 drivers/gpu/drm/amd/amdgpu/vcn_v2_5.c 		ring->use_doorbell = true;
ring              209 drivers/gpu/drm/amd/amdgpu/vcn_v2_5.c 		ring->doorbell_index = (adev->doorbell_index.vcn.vcn_ring0_1 << 1) + 1 + 8*j;
ring              210 drivers/gpu/drm/amd/amdgpu/vcn_v2_5.c 		sprintf(ring->name, "vcn_jpeg_%d", j);
ring              211 drivers/gpu/drm/amd/amdgpu/vcn_v2_5.c 		r = amdgpu_ring_init(adev, ring, 512, &adev->vcn.inst[j].irq, 0);
ring              250 drivers/gpu/drm/amd/amdgpu/vcn_v2_5.c 	struct amdgpu_ring *ring;
ring              256 drivers/gpu/drm/amd/amdgpu/vcn_v2_5.c 		ring = &adev->vcn.inst[j].ring_dec;
ring              258 drivers/gpu/drm/amd/amdgpu/vcn_v2_5.c 		adev->nbio_funcs->vcn_doorbell_range(adev, ring->use_doorbell,
ring              259 drivers/gpu/drm/amd/amdgpu/vcn_v2_5.c 						     ring->doorbell_index, j);
ring              261 drivers/gpu/drm/amd/amdgpu/vcn_v2_5.c 		r = amdgpu_ring_test_ring(ring);
ring              263 drivers/gpu/drm/amd/amdgpu/vcn_v2_5.c 			ring->sched.ready = false;
ring              268 drivers/gpu/drm/amd/amdgpu/vcn_v2_5.c 			ring = &adev->vcn.inst[j].ring_enc[i];
ring              269 drivers/gpu/drm/amd/amdgpu/vcn_v2_5.c 			ring->sched.ready = false;
ring              271 drivers/gpu/drm/amd/amdgpu/vcn_v2_5.c 			r = amdgpu_ring_test_ring(ring);
ring              273 drivers/gpu/drm/amd/amdgpu/vcn_v2_5.c 				ring->sched.ready = false;
ring              278 drivers/gpu/drm/amd/amdgpu/vcn_v2_5.c 		ring = &adev->vcn.inst[j].ring_jpeg;
ring              279 drivers/gpu/drm/amd/amdgpu/vcn_v2_5.c 		r = amdgpu_ring_test_ring(ring);
ring              281 drivers/gpu/drm/amd/amdgpu/vcn_v2_5.c 			ring->sched.ready = false;
ring              302 drivers/gpu/drm/amd/amdgpu/vcn_v2_5.c 	struct amdgpu_ring *ring;
ring              308 drivers/gpu/drm/amd/amdgpu/vcn_v2_5.c 		ring = &adev->vcn.inst[i].ring_dec;
ring              313 drivers/gpu/drm/amd/amdgpu/vcn_v2_5.c 		ring->sched.ready = false;
ring              316 drivers/gpu/drm/amd/amdgpu/vcn_v2_5.c 			ring = &adev->vcn.inst[i].ring_enc[i];
ring              317 drivers/gpu/drm/amd/amdgpu/vcn_v2_5.c 			ring->sched.ready = false;
ring              320 drivers/gpu/drm/amd/amdgpu/vcn_v2_5.c 		ring = &adev->vcn.inst[i].ring_jpeg;
ring              321 drivers/gpu/drm/amd/amdgpu/vcn_v2_5.c 		ring->sched.ready = false;
ring              611 drivers/gpu/drm/amd/amdgpu/vcn_v2_5.c 	struct amdgpu_ring *ring;
ring              618 drivers/gpu/drm/amd/amdgpu/vcn_v2_5.c 		ring = &adev->vcn.inst[i].ring_jpeg;
ring              662 drivers/gpu/drm/amd/amdgpu/vcn_v2_5.c 			lower_32_bits(ring->gpu_addr));
ring              664 drivers/gpu/drm/amd/amdgpu/vcn_v2_5.c 			upper_32_bits(ring->gpu_addr));
ring              668 drivers/gpu/drm/amd/amdgpu/vcn_v2_5.c 		WREG32_SOC15(UVD, i, mmUVD_JRBC_RB_SIZE, ring->ring_size / 4);
ring              669 drivers/gpu/drm/amd/amdgpu/vcn_v2_5.c 		ring->wptr = RREG32_SOC15(UVD, i, mmUVD_JRBC_RB_WPTR);
ring              713 drivers/gpu/drm/amd/amdgpu/vcn_v2_5.c 	struct amdgpu_ring *ring;
ring              845 drivers/gpu/drm/amd/amdgpu/vcn_v2_5.c 		ring = &adev->vcn.inst[i].ring_dec;
ring              847 drivers/gpu/drm/amd/amdgpu/vcn_v2_5.c 		rb_bufsz = order_base_2(ring->ring_size);
ring              857 drivers/gpu/drm/amd/amdgpu/vcn_v2_5.c 			lower_32_bits(ring->gpu_addr));
ring              859 drivers/gpu/drm/amd/amdgpu/vcn_v2_5.c 			upper_32_bits(ring->gpu_addr));
ring              864 drivers/gpu/drm/amd/amdgpu/vcn_v2_5.c 		ring->wptr = RREG32_SOC15(UVD, i, mmUVD_RBC_RB_RPTR);
ring              866 drivers/gpu/drm/amd/amdgpu/vcn_v2_5.c 				lower_32_bits(ring->wptr));
ring              867 drivers/gpu/drm/amd/amdgpu/vcn_v2_5.c 		ring = &adev->vcn.inst[i].ring_enc[0];
ring              868 drivers/gpu/drm/amd/amdgpu/vcn_v2_5.c 		WREG32_SOC15(UVD, i, mmUVD_RB_RPTR, lower_32_bits(ring->wptr));
ring              869 drivers/gpu/drm/amd/amdgpu/vcn_v2_5.c 		WREG32_SOC15(UVD, i, mmUVD_RB_WPTR, lower_32_bits(ring->wptr));
ring              870 drivers/gpu/drm/amd/amdgpu/vcn_v2_5.c 		WREG32_SOC15(UVD, i, mmUVD_RB_BASE_LO, ring->gpu_addr);
ring              871 drivers/gpu/drm/amd/amdgpu/vcn_v2_5.c 		WREG32_SOC15(UVD, i, mmUVD_RB_BASE_HI, upper_32_bits(ring->gpu_addr));
ring              872 drivers/gpu/drm/amd/amdgpu/vcn_v2_5.c 		WREG32_SOC15(UVD, i, mmUVD_RB_SIZE, ring->ring_size / 4);
ring              874 drivers/gpu/drm/amd/amdgpu/vcn_v2_5.c 		ring = &adev->vcn.inst[i].ring_enc[1];
ring              875 drivers/gpu/drm/amd/amdgpu/vcn_v2_5.c 		WREG32_SOC15(UVD, i, mmUVD_RB_RPTR2, lower_32_bits(ring->wptr));
ring              876 drivers/gpu/drm/amd/amdgpu/vcn_v2_5.c 		WREG32_SOC15(UVD, i, mmUVD_RB_WPTR2, lower_32_bits(ring->wptr));
ring              877 drivers/gpu/drm/amd/amdgpu/vcn_v2_5.c 		WREG32_SOC15(UVD, i, mmUVD_RB_BASE_LO2, ring->gpu_addr);
ring              878 drivers/gpu/drm/amd/amdgpu/vcn_v2_5.c 		WREG32_SOC15(UVD, i, mmUVD_RB_BASE_HI2, upper_32_bits(ring->gpu_addr));
ring              879 drivers/gpu/drm/amd/amdgpu/vcn_v2_5.c 		WREG32_SOC15(UVD, i, mmUVD_RB_SIZE2, ring->ring_size / 4);
ring              957 drivers/gpu/drm/amd/amdgpu/vcn_v2_5.c static uint64_t vcn_v2_5_dec_ring_get_rptr(struct amdgpu_ring *ring)
ring              959 drivers/gpu/drm/amd/amdgpu/vcn_v2_5.c 	struct amdgpu_device *adev = ring->adev;
ring              961 drivers/gpu/drm/amd/amdgpu/vcn_v2_5.c 	return RREG32_SOC15(UVD, ring->me, mmUVD_RBC_RB_RPTR);
ring              971 drivers/gpu/drm/amd/amdgpu/vcn_v2_5.c static uint64_t vcn_v2_5_dec_ring_get_wptr(struct amdgpu_ring *ring)
ring              973 drivers/gpu/drm/amd/amdgpu/vcn_v2_5.c 	struct amdgpu_device *adev = ring->adev;
ring              975 drivers/gpu/drm/amd/amdgpu/vcn_v2_5.c 	if (ring->use_doorbell)
ring              976 drivers/gpu/drm/amd/amdgpu/vcn_v2_5.c 		return adev->wb.wb[ring->wptr_offs];
ring              978 drivers/gpu/drm/amd/amdgpu/vcn_v2_5.c 		return RREG32_SOC15(UVD, ring->me, mmUVD_RBC_RB_WPTR);
ring              988 drivers/gpu/drm/amd/amdgpu/vcn_v2_5.c static void vcn_v2_5_dec_ring_set_wptr(struct amdgpu_ring *ring)
ring              990 drivers/gpu/drm/amd/amdgpu/vcn_v2_5.c 	struct amdgpu_device *adev = ring->adev;
ring              992 drivers/gpu/drm/amd/amdgpu/vcn_v2_5.c 	if (ring->use_doorbell) {
ring              993 drivers/gpu/drm/amd/amdgpu/vcn_v2_5.c 		adev->wb.wb[ring->wptr_offs] = lower_32_bits(ring->wptr);
ring              994 drivers/gpu/drm/amd/amdgpu/vcn_v2_5.c 		WDOORBELL32(ring->doorbell_index, lower_32_bits(ring->wptr));
ring              996 drivers/gpu/drm/amd/amdgpu/vcn_v2_5.c 		WREG32_SOC15(UVD, ring->me, mmUVD_RBC_RB_WPTR, lower_32_bits(ring->wptr));
ring             1037 drivers/gpu/drm/amd/amdgpu/vcn_v2_5.c static uint64_t vcn_v2_5_enc_ring_get_rptr(struct amdgpu_ring *ring)
ring             1039 drivers/gpu/drm/amd/amdgpu/vcn_v2_5.c 	struct amdgpu_device *adev = ring->adev;
ring             1041 drivers/gpu/drm/amd/amdgpu/vcn_v2_5.c 	if (ring == &adev->vcn.inst[ring->me].ring_enc[0])
ring             1042 drivers/gpu/drm/amd/amdgpu/vcn_v2_5.c 		return RREG32_SOC15(UVD, ring->me, mmUVD_RB_RPTR);
ring             1044 drivers/gpu/drm/amd/amdgpu/vcn_v2_5.c 		return RREG32_SOC15(UVD, ring->me, mmUVD_RB_RPTR2);
ring             1054 drivers/gpu/drm/amd/amdgpu/vcn_v2_5.c static uint64_t vcn_v2_5_enc_ring_get_wptr(struct amdgpu_ring *ring)
ring             1056 drivers/gpu/drm/amd/amdgpu/vcn_v2_5.c 	struct amdgpu_device *adev = ring->adev;
ring             1058 drivers/gpu/drm/amd/amdgpu/vcn_v2_5.c 	if (ring == &adev->vcn.inst[ring->me].ring_enc[0]) {
ring             1059 drivers/gpu/drm/amd/amdgpu/vcn_v2_5.c 		if (ring->use_doorbell)
ring             1060 drivers/gpu/drm/amd/amdgpu/vcn_v2_5.c 			return adev->wb.wb[ring->wptr_offs];
ring             1062 drivers/gpu/drm/amd/amdgpu/vcn_v2_5.c 			return RREG32_SOC15(UVD, ring->me, mmUVD_RB_WPTR);
ring             1064 drivers/gpu/drm/amd/amdgpu/vcn_v2_5.c 		if (ring->use_doorbell)
ring             1065 drivers/gpu/drm/amd/amdgpu/vcn_v2_5.c 			return adev->wb.wb[ring->wptr_offs];
ring             1067 drivers/gpu/drm/amd/amdgpu/vcn_v2_5.c 			return RREG32_SOC15(UVD, ring->me, mmUVD_RB_WPTR2);
ring             1078 drivers/gpu/drm/amd/amdgpu/vcn_v2_5.c static void vcn_v2_5_enc_ring_set_wptr(struct amdgpu_ring *ring)
ring             1080 drivers/gpu/drm/amd/amdgpu/vcn_v2_5.c 	struct amdgpu_device *adev = ring->adev;
ring             1082 drivers/gpu/drm/amd/amdgpu/vcn_v2_5.c 	if (ring == &adev->vcn.inst[ring->me].ring_enc[0]) {
ring             1083 drivers/gpu/drm/amd/amdgpu/vcn_v2_5.c 		if (ring->use_doorbell) {
ring             1084 drivers/gpu/drm/amd/amdgpu/vcn_v2_5.c 			adev->wb.wb[ring->wptr_offs] = lower_32_bits(ring->wptr);
ring             1085 drivers/gpu/drm/amd/amdgpu/vcn_v2_5.c 			WDOORBELL32(ring->doorbell_index, lower_32_bits(ring->wptr));
ring             1087 drivers/gpu/drm/amd/amdgpu/vcn_v2_5.c 			WREG32_SOC15(UVD, ring->me, mmUVD_RB_WPTR, lower_32_bits(ring->wptr));
ring             1090 drivers/gpu/drm/amd/amdgpu/vcn_v2_5.c 		if (ring->use_doorbell) {
ring             1091 drivers/gpu/drm/amd/amdgpu/vcn_v2_5.c 			adev->wb.wb[ring->wptr_offs] = lower_32_bits(ring->wptr);
ring             1092 drivers/gpu/drm/amd/amdgpu/vcn_v2_5.c 			WDOORBELL32(ring->doorbell_index, lower_32_bits(ring->wptr));
ring             1094 drivers/gpu/drm/amd/amdgpu/vcn_v2_5.c 			WREG32_SOC15(UVD, ring->me, mmUVD_RB_WPTR2, lower_32_bits(ring->wptr));
ring             1136 drivers/gpu/drm/amd/amdgpu/vcn_v2_5.c static uint64_t vcn_v2_5_jpeg_ring_get_rptr(struct amdgpu_ring *ring)
ring             1138 drivers/gpu/drm/amd/amdgpu/vcn_v2_5.c 	struct amdgpu_device *adev = ring->adev;
ring             1140 drivers/gpu/drm/amd/amdgpu/vcn_v2_5.c 	return RREG32_SOC15(UVD, ring->me, mmUVD_JRBC_RB_RPTR);
ring             1150 drivers/gpu/drm/amd/amdgpu/vcn_v2_5.c static uint64_t vcn_v2_5_jpeg_ring_get_wptr(struct amdgpu_ring *ring)
ring             1152 drivers/gpu/drm/amd/amdgpu/vcn_v2_5.c 	struct amdgpu_device *adev = ring->adev;
ring             1154 drivers/gpu/drm/amd/amdgpu/vcn_v2_5.c 	if (ring->use_doorbell)
ring             1155 drivers/gpu/drm/amd/amdgpu/vcn_v2_5.c 		return adev->wb.wb[ring->wptr_offs];
ring             1157 drivers/gpu/drm/amd/amdgpu/vcn_v2_5.c 		return RREG32_SOC15(UVD, ring->me, mmUVD_JRBC_RB_WPTR);
ring             1167 drivers/gpu/drm/amd/amdgpu/vcn_v2_5.c static void vcn_v2_5_jpeg_ring_set_wptr(struct amdgpu_ring *ring)
ring             1169 drivers/gpu/drm/amd/amdgpu/vcn_v2_5.c 	struct amdgpu_device *adev = ring->adev;
ring             1171 drivers/gpu/drm/amd/amdgpu/vcn_v2_5.c 	if (ring->use_doorbell) {
ring             1172 drivers/gpu/drm/amd/amdgpu/vcn_v2_5.c 		adev->wb.wb[ring->wptr_offs] = lower_32_bits(ring->wptr);
ring             1173 drivers/gpu/drm/amd/amdgpu/vcn_v2_5.c 		WDOORBELL32(ring->doorbell_index, lower_32_bits(ring->wptr));
ring             1175 drivers/gpu/drm/amd/amdgpu/vcn_v2_5.c 		WREG32_SOC15(UVD, ring->me, mmUVD_JRBC_RB_WPTR, lower_32_bits(ring->wptr));
ring              443 drivers/gpu/drm/amd/amdgpu/vega10_ih.c 	dw[0] = le32_to_cpu(ih->ring[ring_index + 0]);
ring              444 drivers/gpu/drm/amd/amdgpu/vega10_ih.c 	dw[1] = le32_to_cpu(ih->ring[ring_index + 1]);
ring              445 drivers/gpu/drm/amd/amdgpu/vega10_ih.c 	dw[2] = le32_to_cpu(ih->ring[ring_index + 2]);
ring              446 drivers/gpu/drm/amd/amdgpu/vega10_ih.c 	dw[3] = le32_to_cpu(ih->ring[ring_index + 3]);
ring              447 drivers/gpu/drm/amd/amdgpu/vega10_ih.c 	dw[4] = le32_to_cpu(ih->ring[ring_index + 4]);
ring              448 drivers/gpu/drm/amd/amdgpu/vega10_ih.c 	dw[5] = le32_to_cpu(ih->ring[ring_index + 5]);
ring              449 drivers/gpu/drm/amd/amdgpu/vega10_ih.c 	dw[6] = le32_to_cpu(ih->ring[ring_index + 6]);
ring              450 drivers/gpu/drm/amd/amdgpu/vega10_ih.c 	dw[7] = le32_to_cpu(ih->ring[ring_index + 7]);
ring              909 drivers/gpu/drm/amd/amdgpu/vi.c static void vi_flush_hdp(struct amdgpu_device *adev, struct amdgpu_ring *ring)
ring              911 drivers/gpu/drm/amd/amdgpu/vi.c 	if (!ring || !ring->funcs->emit_wreg) {
ring              915 drivers/gpu/drm/amd/amdgpu/vi.c 		amdgpu_ring_emit_wreg(ring, mmHDP_MEM_COHERENCY_FLUSH_CNTL, 1);
ring              920 drivers/gpu/drm/amd/amdgpu/vi.c 			      struct amdgpu_ring *ring)
ring              922 drivers/gpu/drm/amd/amdgpu/vi.c 	if (!ring || !ring->funcs->emit_wreg) {
ring              926 drivers/gpu/drm/amd/amdgpu/vi.c 		amdgpu_ring_emit_wreg(ring, mmHDP_DEBUG0, 1);
ring              220 drivers/gpu/drm/i810/i810_dma.c 		if (dev_priv->ring.virtual_start)
ring              221 drivers/gpu/drm/i810/i810_dma.c 			drm_legacy_ioremapfree(&dev_priv->ring.map, dev);
ring              244 drivers/gpu/drm/i810/i810_dma.c 	drm_i810_ring_buffer_t *ring = &(dev_priv->ring);
ring              250 drivers/gpu/drm/i810/i810_dma.c 	while (ring->space < n) {
ring              251 drivers/gpu/drm/i810/i810_dma.c 		ring->head = I810_READ(LP_RING + RING_HEAD) & HEAD_ADDR;
ring              252 drivers/gpu/drm/i810/i810_dma.c 		ring->space = ring->head - (ring->tail + 8);
ring              253 drivers/gpu/drm/i810/i810_dma.c 		if (ring->space < 0)
ring              254 drivers/gpu/drm/i810/i810_dma.c 			ring->space += ring->Size;
ring              256 drivers/gpu/drm/i810/i810_dma.c 		if (ring->head != last_head) {
ring              258 drivers/gpu/drm/i810/i810_dma.c 			last_head = ring->head;
ring              263 drivers/gpu/drm/i810/i810_dma.c 			DRM_ERROR("space: %d wanted %d\n", ring->space, n);
ring              277 drivers/gpu/drm/i810/i810_dma.c 	drm_i810_ring_buffer_t *ring = &(dev_priv->ring);
ring              279 drivers/gpu/drm/i810/i810_dma.c 	ring->head = I810_READ(LP_RING + RING_HEAD) & HEAD_ADDR;
ring              280 drivers/gpu/drm/i810/i810_dma.c 	ring->tail = I810_READ(LP_RING + RING_TAIL);
ring              281 drivers/gpu/drm/i810/i810_dma.c 	ring->space = ring->head - (ring->tail + 8);
ring              282 drivers/gpu/drm/i810/i810_dma.c 	if (ring->space < 0)
ring              283 drivers/gpu/drm/i810/i810_dma.c 		ring->space += ring->Size;
ring              361 drivers/gpu/drm/i810/i810_dma.c 	dev_priv->ring.Start = init->ring_start;
ring              362 drivers/gpu/drm/i810/i810_dma.c 	dev_priv->ring.End = init->ring_end;
ring              363 drivers/gpu/drm/i810/i810_dma.c 	dev_priv->ring.Size = init->ring_size;
ring              365 drivers/gpu/drm/i810/i810_dma.c 	dev_priv->ring.map.offset = dev->agp->base + init->ring_start;
ring              366 drivers/gpu/drm/i810/i810_dma.c 	dev_priv->ring.map.size = init->ring_size;
ring              367 drivers/gpu/drm/i810/i810_dma.c 	dev_priv->ring.map.type = _DRM_AGP;
ring              368 drivers/gpu/drm/i810/i810_dma.c 	dev_priv->ring.map.flags = 0;
ring              369 drivers/gpu/drm/i810/i810_dma.c 	dev_priv->ring.map.mtrr = 0;
ring              371 drivers/gpu/drm/i810/i810_dma.c 	drm_legacy_ioremap(&dev_priv->ring.map, dev);
ring              373 drivers/gpu/drm/i810/i810_dma.c 	if (dev_priv->ring.map.handle == NULL) {
ring              381 drivers/gpu/drm/i810/i810_dma.c 	dev_priv->ring.virtual_start = dev_priv->ring.map.handle;
ring              383 drivers/gpu/drm/i810/i810_dma.c 	dev_priv->ring.tail_mask = dev_priv->ring.Size - 1;
ring              853 drivers/gpu/drm/i810/i810_dma.c 	i810_wait_ring(dev, dev_priv->ring.Size - 8);
ring              870 drivers/gpu/drm/i810/i810_dma.c 	i810_wait_ring(dev, dev_priv->ring.Size - 8);
ring               88 drivers/gpu/drm/i810/i810_drv.h 	drm_i810_ring_buffer_t ring;
ring              151 drivers/gpu/drm/i810/i810_drv.h 	if (dev_priv->ring.space < n*4)				\
ring              153 drivers/gpu/drm/i810/i810_drv.h 	dev_priv->ring.space -= n*4;				\
ring              154 drivers/gpu/drm/i810/i810_drv.h 	outring = dev_priv->ring.tail;				\
ring              155 drivers/gpu/drm/i810/i810_drv.h 	ringmask = dev_priv->ring.tail_mask;			\
ring              156 drivers/gpu/drm/i810/i810_drv.h 	virt = dev_priv->ring.virtual_start;			\
ring              162 drivers/gpu/drm/i810/i810_drv.h 	dev_priv->ring.tail = outring;				\
ring             2142 drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c 	struct intel_ring *ring = ce->ring;
ring             2150 drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c 	if (intel_ring_update_space(ring) >= PAGE_SIZE)
ring             2161 drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c 		if (rq->ring != ring)
ring             2165 drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c 				       ring->emit, ring->size) > ring->size / 2)
ring               72 drivers/gpu/drm/i915/gt/intel_context.c 			  ce->ring->head, ce->ring->tail);
ring              152 drivers/gpu/drm/i915/gt/intel_context.c 	intel_ring_unpin(ce->ring);
ring              163 drivers/gpu/drm/i915/gt/intel_context.c 	err = intel_ring_pin(ce->ring);
ring              183 drivers/gpu/drm/i915/gt/intel_context.c 	intel_ring_unpin(ce->ring);
ring              234 drivers/gpu/drm/i915/gt/intel_context.c 	ce->ring = __intel_context_ring_size(SZ_16K);
ring               53 drivers/gpu/drm/i915/gt/intel_context_types.h 	struct intel_ring *ring;
ring              211 drivers/gpu/drm/i915/gt/intel_engine.h int intel_ring_pin(struct intel_ring *ring);
ring              212 drivers/gpu/drm/i915/gt/intel_engine.h void intel_ring_reset(struct intel_ring *ring, u32 tail);
ring              213 drivers/gpu/drm/i915/gt/intel_engine.h unsigned int intel_ring_update_space(struct intel_ring *ring);
ring              214 drivers/gpu/drm/i915/gt/intel_engine.h void intel_ring_unpin(struct intel_ring *ring);
ring              217 drivers/gpu/drm/i915/gt/intel_engine.h static inline struct intel_ring *intel_ring_get(struct intel_ring *ring)
ring              219 drivers/gpu/drm/i915/gt/intel_engine.h 	kref_get(&ring->ref);
ring              220 drivers/gpu/drm/i915/gt/intel_engine.h 	return ring;
ring              223 drivers/gpu/drm/i915/gt/intel_engine.h static inline void intel_ring_put(struct intel_ring *ring)
ring              225 drivers/gpu/drm/i915/gt/intel_engine.h 	kref_put(&ring->ref, intel_ring_free);
ring              245 drivers/gpu/drm/i915/gt/intel_engine.h 	GEM_BUG_ON((rq->ring->vaddr + rq->ring->emit) != cs);
ring              248 drivers/gpu/drm/i915/gt/intel_engine.h static inline u32 intel_ring_wrap(const struct intel_ring *ring, u32 pos)
ring              250 drivers/gpu/drm/i915/gt/intel_engine.h 	return pos & (ring->size - 1);
ring              253 drivers/gpu/drm/i915/gt/intel_engine.h static inline int intel_ring_direction(const struct intel_ring *ring,
ring              256 drivers/gpu/drm/i915/gt/intel_engine.h 	typecheck(typeof(ring->size), next);
ring              257 drivers/gpu/drm/i915/gt/intel_engine.h 	typecheck(typeof(ring->size), prev);
ring              258 drivers/gpu/drm/i915/gt/intel_engine.h 	return (next - prev) << ring->wrap;
ring              262 drivers/gpu/drm/i915/gt/intel_engine.h intel_ring_offset_valid(const struct intel_ring *ring,
ring              265 drivers/gpu/drm/i915/gt/intel_engine.h 	if (pos & -ring->size) /* must be strictly within the ring */
ring              277 drivers/gpu/drm/i915/gt/intel_engine.h 	u32 offset = addr - rq->ring->vaddr;
ring              278 drivers/gpu/drm/i915/gt/intel_engine.h 	GEM_BUG_ON(offset > rq->ring->size);
ring              279 drivers/gpu/drm/i915/gt/intel_engine.h 	return intel_ring_wrap(rq->ring, offset);
ring              283 drivers/gpu/drm/i915/gt/intel_engine.h assert_ring_tail_valid(const struct intel_ring *ring, unsigned int tail)
ring              285 drivers/gpu/drm/i915/gt/intel_engine.h 	GEM_BUG_ON(!intel_ring_offset_valid(ring, tail));
ring              302 drivers/gpu/drm/i915/gt/intel_engine.h 	GEM_BUG_ON(cacheline(tail) == cacheline(ring->head) &&
ring              303 drivers/gpu/drm/i915/gt/intel_engine.h 		   tail < ring->head);
ring              308 drivers/gpu/drm/i915/gt/intel_engine.h intel_ring_set_tail(struct intel_ring *ring, unsigned int tail)
ring              316 drivers/gpu/drm/i915/gt/intel_engine.h 	assert_ring_tail_valid(ring, tail);
ring              317 drivers/gpu/drm/i915/gt/intel_engine.h 	ring->tail = tail;
ring              663 drivers/gpu/drm/i915/gt/intel_engine_cs.c 	struct intel_ring ring;
ring              683 drivers/gpu/drm/i915/gt/intel_engine_cs.c 	frame->ring.vaddr = frame->cs;
ring              684 drivers/gpu/drm/i915/gt/intel_engine_cs.c 	frame->ring.size = sizeof(frame->cs);
ring              685 drivers/gpu/drm/i915/gt/intel_engine_cs.c 	frame->ring.effective_size = frame->ring.size;
ring              686 drivers/gpu/drm/i915/gt/intel_engine_cs.c 	intel_ring_update_space(&frame->ring);
ring              690 drivers/gpu/drm/i915/gt/intel_engine_cs.c 	frame->rq.ring = &frame->ring;
ring              740 drivers/gpu/drm/i915/gt/intel_engine_cs.c 	ce->ring = __intel_context_ring_size(SZ_4K);
ring             1293 drivers/gpu/drm/i915/gt/intel_engine_cs.c 						i915_ggtt_offset(rq->ring->vma),
ring             1305 drivers/gpu/drm/i915/gt/intel_engine_cs.c 				 i915_ggtt_offset(rq->ring->vma),
ring             1323 drivers/gpu/drm/i915/gt/intel_engine_cs.c 	void *ring;
ring             1334 drivers/gpu/drm/i915/gt/intel_engine_cs.c 		size += rq->ring->size;
ring             1336 drivers/gpu/drm/i915/gt/intel_engine_cs.c 	ring = kmalloc(size, GFP_ATOMIC);
ring             1337 drivers/gpu/drm/i915/gt/intel_engine_cs.c 	if (ring) {
ring             1338 drivers/gpu/drm/i915/gt/intel_engine_cs.c 		const void *vaddr = rq->ring->vaddr;
ring             1343 drivers/gpu/drm/i915/gt/intel_engine_cs.c 			len = rq->ring->size - head;
ring             1344 drivers/gpu/drm/i915/gt/intel_engine_cs.c 			memcpy(ring, vaddr + head, len);
ring             1347 drivers/gpu/drm/i915/gt/intel_engine_cs.c 		memcpy(ring + len, vaddr + head, size - len);
ring             1349 drivers/gpu/drm/i915/gt/intel_engine_cs.c 		hexdump(m, ring, size);
ring             1350 drivers/gpu/drm/i915/gt/intel_engine_cs.c 		kfree(ring);
ring             1389 drivers/gpu/drm/i915/gt/intel_engine_cs.c 			   i915_ggtt_offset(rq->ring->vma));
ring             1391 drivers/gpu/drm/i915/gt/intel_engine_cs.c 			   rq->ring->head);
ring             1393 drivers/gpu/drm/i915/gt/intel_engine_cs.c 			   rq->ring->tail);
ring             1395 drivers/gpu/drm/i915/gt/intel_engine_cs.c 			   rq->ring->emit);
ring             1397 drivers/gpu/drm/i915/gt/intel_engine_cs.c 			   rq->ring->space);
ring             1534 drivers/gpu/drm/i915/gt/intel_engine_cs.c 	u32 ring = ENGINE_READ(rq->engine, RING_START);
ring             1536 drivers/gpu/drm/i915/gt/intel_engine_cs.c 	return ring == i915_ggtt_offset(rq->ring->vma);
ring              335 drivers/gpu/drm/i915/gt/intel_engine_types.h 		struct intel_ring *ring;
ring              151 drivers/gpu/drm/i915/gt/intel_engine_user.c static int legacy_ring_idx(const struct legacy_ring *ring)
ring              162 drivers/gpu/drm/i915/gt/intel_engine_user.c 	if (GEM_DEBUG_WARN_ON(ring->class >= ARRAY_SIZE(map)))
ring              165 drivers/gpu/drm/i915/gt/intel_engine_user.c 	if (GEM_DEBUG_WARN_ON(ring->instance >= map[ring->class].max))
ring              168 drivers/gpu/drm/i915/gt/intel_engine_user.c 	return map[ring->class].base + ring->instance;
ring              171 drivers/gpu/drm/i915/gt/intel_engine_user.c static void add_legacy_ring(struct legacy_ring *ring,
ring              176 drivers/gpu/drm/i915/gt/intel_engine_user.c 	if (engine->gt != ring->gt || engine->class != ring->class) {
ring              177 drivers/gpu/drm/i915/gt/intel_engine_user.c 		ring->gt = engine->gt;
ring              178 drivers/gpu/drm/i915/gt/intel_engine_user.c 		ring->class = engine->class;
ring              179 drivers/gpu/drm/i915/gt/intel_engine_user.c 		ring->instance = 0;
ring              182 drivers/gpu/drm/i915/gt/intel_engine_user.c 	idx = legacy_ring_idx(ring);
ring              186 drivers/gpu/drm/i915/gt/intel_engine_user.c 	GEM_BUG_ON(idx >= ARRAY_SIZE(ring->gt->engine));
ring              187 drivers/gpu/drm/i915/gt/intel_engine_user.c 	ring->gt->engine[idx] = engine;
ring              188 drivers/gpu/drm/i915/gt/intel_engine_user.c 	ring->instance++;
ring              195 drivers/gpu/drm/i915/gt/intel_engine_user.c 	struct legacy_ring ring = {};
ring              232 drivers/gpu/drm/i915/gt/intel_engine_user.c 		add_legacy_ring(&ring, engine);
ring               32 drivers/gpu/drm/i915/gt/intel_hangcheck.c 	u32 ring;
ring              135 drivers/gpu/drm/i915/gt/intel_hangcheck.c 	hc->ring = ENGINE_READ(engine, RING_START);
ring              143 drivers/gpu/drm/i915/gt/intel_hangcheck.c 	engine->hangcheck.last_ring = hc->ring;
ring              154 drivers/gpu/drm/i915/gt/intel_hangcheck.c 	if (engine->hangcheck.last_ring != hc->ring)
ring              235 drivers/gpu/drm/i915/gt/intel_lrc.c 				     struct intel_ring *ring);
ring              669 drivers/gpu/drm/i915/gt/intel_lrc.c 	tail = intel_ring_set_tail(rq->ring, rq->tail);
ring              671 drivers/gpu/drm/i915/gt/intel_lrc.c 	if (unlikely(intel_ring_direction(rq->ring, tail, prev) <= 0))
ring             1707 drivers/gpu/drm/i915/gt/intel_lrc.c 	intel_ring_put(ce->ring);
ring             1759 drivers/gpu/drm/i915/gt/intel_lrc.c 	intel_ring_reset(ce->ring, ce->ring->tail);
ring             1766 drivers/gpu/drm/i915/gt/intel_lrc.c 	struct intel_ring *ring = ce->ring;
ring             1769 drivers/gpu/drm/i915/gt/intel_lrc.c 	GEM_BUG_ON(!intel_ring_offset_valid(ring, ring->head));
ring             1770 drivers/gpu/drm/i915/gt/intel_lrc.c 	GEM_BUG_ON(!intel_ring_offset_valid(ring, ring->tail));
ring             1772 drivers/gpu/drm/i915/gt/intel_lrc.c 	regs[CTX_RING_BUFFER_START + 1] = i915_ggtt_offset(ring->vma);
ring             1773 drivers/gpu/drm/i915/gt/intel_lrc.c 	regs[CTX_RING_HEAD + 1] = ring->head;
ring             1774 drivers/gpu/drm/i915/gt/intel_lrc.c 	regs[CTX_RING_TAIL + 1] = ring->tail;
ring             1853 drivers/gpu/drm/i915/gt/intel_lrc.c 	intel_ring_reset(ce->ring, 0);
ring             2485 drivers/gpu/drm/i915/gt/intel_lrc.c 		ce->ring->head = ce->ring->tail;
ring             2489 drivers/gpu/drm/i915/gt/intel_lrc.c 	ce->ring->head = intel_ring_wrap(ce->ring, rq->head);
ring             2535 drivers/gpu/drm/i915/gt/intel_lrc.c 	execlists_init_reg_state(regs, ce, engine, ce->ring);
ring             2539 drivers/gpu/drm/i915/gt/intel_lrc.c 		  engine->name, ce->ring->head, ce->ring->tail);
ring             2540 drivers/gpu/drm/i915/gt/intel_lrc.c 	intel_ring_update_space(ce->ring);
ring             2937 drivers/gpu/drm/i915/gt/intel_lrc.c 	assert_ring_tail_valid(request->ring, request->tail);
ring             3191 drivers/gpu/drm/i915/gt/intel_lrc.c 				     struct intel_ring *ring)
ring             3222 drivers/gpu/drm/i915/gt/intel_lrc.c 		RING_CTL_SIZE(ring->size) | RING_VALID);
ring             3295 drivers/gpu/drm/i915/gt/intel_lrc.c 		    struct intel_ring *ring)
ring             3333 drivers/gpu/drm/i915/gt/intel_lrc.c 	execlists_init_reg_state(regs, ce, engine, ring);
ring             3351 drivers/gpu/drm/i915/gt/intel_lrc.c 	struct intel_ring *ring;
ring             3389 drivers/gpu/drm/i915/gt/intel_lrc.c 	ring = intel_engine_create_ring(engine, (unsigned long)ce->ring);
ring             3390 drivers/gpu/drm/i915/gt/intel_lrc.c 	if (IS_ERR(ring)) {
ring             3391 drivers/gpu/drm/i915/gt/intel_lrc.c 		ret = PTR_ERR(ring);
ring             3395 drivers/gpu/drm/i915/gt/intel_lrc.c 	ret = populate_lr_context(ce, ctx_obj, engine, ring);
ring             3401 drivers/gpu/drm/i915/gt/intel_lrc.c 	ce->ring = ring;
ring             3407 drivers/gpu/drm/i915/gt/intel_lrc.c 	intel_ring_put(ring);
ring             4018 drivers/gpu/drm/i915/gt/intel_lrc.c 		execlists_init_reg_state(regs, ce, engine, ce->ring);
ring             4022 drivers/gpu/drm/i915/gt/intel_lrc.c 	ce->ring->head = head;
ring             4023 drivers/gpu/drm/i915/gt/intel_lrc.c 	intel_ring_update_space(ce->ring);
ring               50 drivers/gpu/drm/i915/gt/intel_ringbuffer.c unsigned int intel_ring_update_space(struct intel_ring *ring)
ring               54 drivers/gpu/drm/i915/gt/intel_ringbuffer.c 	space = __intel_ring_space(ring->head, ring->emit, ring->size);
ring               56 drivers/gpu/drm/i915/gt/intel_ringbuffer.c 	ring->space = space;
ring              332 drivers/gpu/drm/i915/gt/intel_ringbuffer.c 	assert_ring_tail_valid(rq->ring, rq->tail);
ring              435 drivers/gpu/drm/i915/gt/intel_ringbuffer.c 	assert_ring_tail_valid(rq->ring, rq->tail);
ring              452 drivers/gpu/drm/i915/gt/intel_ringbuffer.c 	assert_ring_tail_valid(rq->ring, rq->tail);
ring              483 drivers/gpu/drm/i915/gt/intel_ringbuffer.c 	assert_ring_tail_valid(rq->ring, rq->tail);
ring              641 drivers/gpu/drm/i915/gt/intel_ringbuffer.c 	struct intel_ring *ring = engine->legacy.ring;
ring              645 drivers/gpu/drm/i915/gt/intel_ringbuffer.c 		  engine->name, ring->head, ring->tail);
ring              689 drivers/gpu/drm/i915/gt/intel_ringbuffer.c 	ENGINE_WRITE(engine, RING_START, i915_ggtt_offset(ring->vma));
ring              692 drivers/gpu/drm/i915/gt/intel_ringbuffer.c 	GEM_BUG_ON(!intel_ring_offset_valid(ring, ring->head));
ring              693 drivers/gpu/drm/i915/gt/intel_ringbuffer.c 	GEM_BUG_ON(!intel_ring_offset_valid(ring, ring->tail));
ring              694 drivers/gpu/drm/i915/gt/intel_ringbuffer.c 	intel_ring_update_space(ring);
ring              697 drivers/gpu/drm/i915/gt/intel_ringbuffer.c 	ENGINE_WRITE(engine, RING_HEAD, ring->head);
ring              698 drivers/gpu/drm/i915/gt/intel_ringbuffer.c 	ENGINE_WRITE(engine, RING_TAIL, ring->head);
ring              701 drivers/gpu/drm/i915/gt/intel_ringbuffer.c 	ENGINE_WRITE(engine, RING_CTL, RING_CTL_SIZE(ring->size) | RING_VALID);
ring              713 drivers/gpu/drm/i915/gt/intel_ringbuffer.c 			  ENGINE_READ(engine, RING_HEAD), ring->head,
ring              714 drivers/gpu/drm/i915/gt/intel_ringbuffer.c 			  ENGINE_READ(engine, RING_TAIL), ring->tail,
ring              716 drivers/gpu/drm/i915/gt/intel_ringbuffer.c 			  i915_ggtt_offset(ring->vma));
ring              726 drivers/gpu/drm/i915/gt/intel_ringbuffer.c 	if (ring->tail != ring->head) {
ring              727 drivers/gpu/drm/i915/gt/intel_ringbuffer.c 		ENGINE_WRITE(engine, RING_TAIL, ring->tail);
ring              837 drivers/gpu/drm/i915/gt/intel_ringbuffer.c 		GEM_BUG_ON(rq->ring != engine->legacy.ring);
ring              840 drivers/gpu/drm/i915/gt/intel_ringbuffer.c 		head = engine->legacy.ring->tail;
ring              842 drivers/gpu/drm/i915/gt/intel_ringbuffer.c 	engine->legacy.ring->head = intel_ring_wrap(engine->legacy.ring, head);
ring              935 drivers/gpu/drm/i915/gt/intel_ringbuffer.c 		     intel_ring_set_tail(request->ring, request->tail));
ring              953 drivers/gpu/drm/i915/gt/intel_ringbuffer.c 	assert_ring_tail_valid(rq->ring, rq->tail);
ring              978 drivers/gpu/drm/i915/gt/intel_ringbuffer.c 	assert_ring_tail_valid(rq->ring, rq->tail);
ring             1187 drivers/gpu/drm/i915/gt/intel_ringbuffer.c int intel_ring_pin(struct intel_ring *ring)
ring             1189 drivers/gpu/drm/i915/gt/intel_ringbuffer.c 	struct i915_vma *vma = ring->vma;
ring             1194 drivers/gpu/drm/i915/gt/intel_ringbuffer.c 	if (atomic_fetch_inc(&ring->pin_count))
ring             1223 drivers/gpu/drm/i915/gt/intel_ringbuffer.c 	GEM_BUG_ON(ring->vaddr);
ring             1224 drivers/gpu/drm/i915/gt/intel_ringbuffer.c 	ring->vaddr = addr;
ring             1231 drivers/gpu/drm/i915/gt/intel_ringbuffer.c 	atomic_dec(&ring->pin_count);
ring             1235 drivers/gpu/drm/i915/gt/intel_ringbuffer.c void intel_ring_reset(struct intel_ring *ring, u32 tail)
ring             1237 drivers/gpu/drm/i915/gt/intel_ringbuffer.c 	tail = intel_ring_wrap(ring, tail);
ring             1238 drivers/gpu/drm/i915/gt/intel_ringbuffer.c 	ring->tail = tail;
ring             1239 drivers/gpu/drm/i915/gt/intel_ringbuffer.c 	ring->head = tail;
ring             1240 drivers/gpu/drm/i915/gt/intel_ringbuffer.c 	ring->emit = tail;
ring             1241 drivers/gpu/drm/i915/gt/intel_ringbuffer.c 	intel_ring_update_space(ring);
ring             1244 drivers/gpu/drm/i915/gt/intel_ringbuffer.c void intel_ring_unpin(struct intel_ring *ring)
ring             1246 drivers/gpu/drm/i915/gt/intel_ringbuffer.c 	struct i915_vma *vma = ring->vma;
ring             1248 drivers/gpu/drm/i915/gt/intel_ringbuffer.c 	if (!atomic_dec_and_test(&ring->pin_count))
ring             1252 drivers/gpu/drm/i915/gt/intel_ringbuffer.c 	intel_ring_reset(ring, ring->emit);
ring             1260 drivers/gpu/drm/i915/gt/intel_ringbuffer.c 	GEM_BUG_ON(!ring->vaddr);
ring             1261 drivers/gpu/drm/i915/gt/intel_ringbuffer.c 	ring->vaddr = NULL;
ring             1302 drivers/gpu/drm/i915/gt/intel_ringbuffer.c 	struct intel_ring *ring;
ring             1308 drivers/gpu/drm/i915/gt/intel_ringbuffer.c 	ring = kzalloc(sizeof(*ring), GFP_KERNEL);
ring             1309 drivers/gpu/drm/i915/gt/intel_ringbuffer.c 	if (!ring)
ring             1312 drivers/gpu/drm/i915/gt/intel_ringbuffer.c 	kref_init(&ring->ref);
ring             1314 drivers/gpu/drm/i915/gt/intel_ringbuffer.c 	ring->size = size;
ring             1315 drivers/gpu/drm/i915/gt/intel_ringbuffer.c 	ring->wrap = BITS_PER_TYPE(ring->size) - ilog2(size);
ring             1321 drivers/gpu/drm/i915/gt/intel_ringbuffer.c 	ring->effective_size = size;
ring             1323 drivers/gpu/drm/i915/gt/intel_ringbuffer.c 		ring->effective_size -= 2 * CACHELINE_BYTES;
ring             1325 drivers/gpu/drm/i915/gt/intel_ringbuffer.c 	intel_ring_update_space(ring);
ring             1329 drivers/gpu/drm/i915/gt/intel_ringbuffer.c 		kfree(ring);
ring             1332 drivers/gpu/drm/i915/gt/intel_ringbuffer.c 	ring->vma = vma;
ring             1334 drivers/gpu/drm/i915/gt/intel_ringbuffer.c 	return ring;
ring             1339 drivers/gpu/drm/i915/gt/intel_ringbuffer.c 	struct intel_ring *ring = container_of(ref, typeof(*ring), ref);
ring             1341 drivers/gpu/drm/i915/gt/intel_ringbuffer.c 	i915_vma_close(ring->vma);
ring             1342 drivers/gpu/drm/i915/gt/intel_ringbuffer.c 	i915_vma_put(ring->vma);
ring             1344 drivers/gpu/drm/i915/gt/intel_ringbuffer.c 	kfree(ring);
ring             1475 drivers/gpu/drm/i915/gt/intel_ringbuffer.c 	GEM_BUG_ON(!engine->legacy.ring);
ring             1476 drivers/gpu/drm/i915/gt/intel_ringbuffer.c 	ce->ring = engine->legacy.ring;
ring             1514 drivers/gpu/drm/i915/gt/intel_ringbuffer.c 	intel_ring_reset(ce->ring, 0);
ring             1869 drivers/gpu/drm/i915/gt/intel_ringbuffer.c wait_for_space(struct intel_ring *ring,
ring             1876 drivers/gpu/drm/i915/gt/intel_ringbuffer.c 	if (intel_ring_update_space(ring) >= bytes)
ring             1881 drivers/gpu/drm/i915/gt/intel_ringbuffer.c 		if (target->ring != ring)
ring             1886 drivers/gpu/drm/i915/gt/intel_ringbuffer.c 						ring->emit, ring->size))
ring             1901 drivers/gpu/drm/i915/gt/intel_ringbuffer.c 	intel_ring_update_space(ring);
ring             1902 drivers/gpu/drm/i915/gt/intel_ringbuffer.c 	GEM_BUG_ON(ring->space < bytes);
ring             1908 drivers/gpu/drm/i915/gt/intel_ringbuffer.c 	struct intel_ring *ring = rq->ring;
ring             1909 drivers/gpu/drm/i915/gt/intel_ringbuffer.c 	const unsigned int remain_usable = ring->effective_size - ring->emit;
ring             1919 drivers/gpu/drm/i915/gt/intel_ringbuffer.c 	GEM_BUG_ON(total_bytes > ring->effective_size);
ring             1922 drivers/gpu/drm/i915/gt/intel_ringbuffer.c 		const int remain_actual = ring->size - ring->emit;
ring             1943 drivers/gpu/drm/i915/gt/intel_ringbuffer.c 	if (unlikely(total_bytes > ring->space)) {
ring             1957 drivers/gpu/drm/i915/gt/intel_ringbuffer.c 		ret = wait_for_space(ring, rq->timeline, total_bytes);
ring             1964 drivers/gpu/drm/i915/gt/intel_ringbuffer.c 		GEM_BUG_ON(need_wrap > ring->space);
ring             1965 drivers/gpu/drm/i915/gt/intel_ringbuffer.c 		GEM_BUG_ON(ring->emit + need_wrap > ring->size);
ring             1969 drivers/gpu/drm/i915/gt/intel_ringbuffer.c 		memset64(ring->vaddr + ring->emit, 0, need_wrap / sizeof(u64));
ring             1970 drivers/gpu/drm/i915/gt/intel_ringbuffer.c 		ring->space -= need_wrap;
ring             1971 drivers/gpu/drm/i915/gt/intel_ringbuffer.c 		ring->emit = 0;
ring             1974 drivers/gpu/drm/i915/gt/intel_ringbuffer.c 	GEM_BUG_ON(ring->emit > ring->size - bytes);
ring             1975 drivers/gpu/drm/i915/gt/intel_ringbuffer.c 	GEM_BUG_ON(ring->space < bytes);
ring             1976 drivers/gpu/drm/i915/gt/intel_ringbuffer.c 	cs = ring->vaddr + ring->emit;
ring             1978 drivers/gpu/drm/i915/gt/intel_ringbuffer.c 	ring->emit += bytes;
ring             1979 drivers/gpu/drm/i915/gt/intel_ringbuffer.c 	ring->space -= bytes;
ring             1990 drivers/gpu/drm/i915/gt/intel_ringbuffer.c 	num_dwords = (rq->ring->emit & (CACHELINE_BYTES - 1)) / sizeof(u32);
ring             2004 drivers/gpu/drm/i915/gt/intel_ringbuffer.c 	GEM_BUG_ON(rq->ring->emit & (CACHELINE_BYTES - 1));
ring             2162 drivers/gpu/drm/i915/gt/intel_ringbuffer.c 	intel_ring_unpin(engine->legacy.ring);
ring             2163 drivers/gpu/drm/i915/gt/intel_ringbuffer.c 	intel_ring_put(engine->legacy.ring);
ring             2340 drivers/gpu/drm/i915/gt/intel_ringbuffer.c 	struct intel_ring *ring;
ring             2354 drivers/gpu/drm/i915/gt/intel_ringbuffer.c 	ring = intel_engine_create_ring(engine, SZ_16K);
ring             2355 drivers/gpu/drm/i915/gt/intel_ringbuffer.c 	if (IS_ERR(ring)) {
ring             2356 drivers/gpu/drm/i915/gt/intel_ringbuffer.c 		err = PTR_ERR(ring);
ring             2360 drivers/gpu/drm/i915/gt/intel_ringbuffer.c 	err = intel_ring_pin(ring);
ring             2364 drivers/gpu/drm/i915/gt/intel_ringbuffer.c 	GEM_BUG_ON(engine->legacy.ring);
ring             2365 drivers/gpu/drm/i915/gt/intel_ringbuffer.c 	engine->legacy.ring = ring;
ring             2377 drivers/gpu/drm/i915/gt/intel_ringbuffer.c 	intel_ring_unpin(ring);
ring             2379 drivers/gpu/drm/i915/gt/intel_ringbuffer.c 	intel_ring_put(ring);
ring               49 drivers/gpu/drm/i915/gt/mock_engine.c 	struct intel_ring *ring;
ring               51 drivers/gpu/drm/i915/gt/mock_engine.c 	ring = kzalloc(sizeof(*ring) + sz, GFP_KERNEL);
ring               52 drivers/gpu/drm/i915/gt/mock_engine.c 	if (!ring)
ring               55 drivers/gpu/drm/i915/gt/mock_engine.c 	kref_init(&ring->ref);
ring               56 drivers/gpu/drm/i915/gt/mock_engine.c 	ring->size = sz;
ring               57 drivers/gpu/drm/i915/gt/mock_engine.c 	ring->effective_size = sz;
ring               58 drivers/gpu/drm/i915/gt/mock_engine.c 	ring->vaddr = (void *)(ring + 1);
ring               59 drivers/gpu/drm/i915/gt/mock_engine.c 	atomic_set(&ring->pin_count, 1);
ring               61 drivers/gpu/drm/i915/gt/mock_engine.c 	intel_ring_update_space(ring);
ring               63 drivers/gpu/drm/i915/gt/mock_engine.c 	return ring;
ring              123 drivers/gpu/drm/i915/gt/mock_engine.c 		kfree(ce->ring);
ring              133 drivers/gpu/drm/i915/gt/mock_engine.c 	ce->ring = mock_ring(ce->engine);
ring              134 drivers/gpu/drm/i915/gt/mock_engine.c 	if (!ce->ring)
ring             1191 drivers/gpu/drm/i915/gt/selftest_lrc.c 			ring_size += rq->ring->size;
ring             1192 drivers/gpu/drm/i915/gt/selftest_lrc.c 		ring_size = rq->ring->size / ring_size;
ring              468 drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c 	u32 ring_tail = intel_ring_set_tail(rq->ring, rq->tail) / sizeof(u64);
ring              502 drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c 		flush_ggtt_writes(rq->ring->vma);
ring             3034 drivers/gpu/drm/i915/gvt/cmd_parser.c 	unsigned int ring;
ring             3036 drivers/gpu/drm/i915/gvt/cmd_parser.c 	for_each_set_bit(ring, &rings, I915_NUM_ENGINES) {
ring             3037 drivers/gpu/drm/i915/gvt/cmd_parser.c 		info = find_cmd_entry(gvt, opcode, ring);
ring              624 drivers/gpu/drm/i915/gvt/scheduler.c 	int ring = workload->ring_id;
ring              641 drivers/gpu/drm/i915/gvt/scheduler.c 	set_context_ppgtt_from_shadow(workload, s->shadow[ring]->gem_context);
ring             1270 drivers/gpu/drm/i915/gvt/scheduler.c 			ce->ring = __intel_context_ring_size(ring_size);
ring              325 drivers/gpu/drm/i915/i915_debugfs.c 				per_file_stats(0, ce->ring->vma->obj, &kstats);
ring             1559 drivers/gpu/drm/i915/i915_debugfs.c static void describe_ctx_ring(struct seq_file *m, struct intel_ring *ring)
ring             1562 drivers/gpu/drm/i915/i915_debugfs.c 		   ring->space, ring->head, ring->tail, ring->emit);
ring             1609 drivers/gpu/drm/i915/i915_debugfs.c 				describe_ctx_ring(m, ce->ring);
ring             1179 drivers/gpu/drm/i915/i915_gpu_error.c 	erq->start = i915_ggtt_offset(request->ring->vma);
ring             1411 drivers/gpu/drm/i915/i915_gpu_error.c 				      request->ring->vma,
ring             1414 drivers/gpu/drm/i915/i915_gpu_error.c 		ee->cpu_ring_head = request->ring->head;
ring             1415 drivers/gpu/drm/i915/i915_gpu_error.c 		ee->cpu_ring_tail = request->ring->tail;
ring              244 drivers/gpu/drm/i915/i915_request.c 	rq->ring->head = rq->postfix;
ring              436 drivers/gpu/drm/i915/i915_request.c 				     request->ring->vaddr + request->postfix);
ring              694 drivers/gpu/drm/i915/i915_request.c 	rq->ring = ce->ring;
ring              741 drivers/gpu/drm/i915/i915_request.c 	rq->head = rq->ring->emit;
ring              747 drivers/gpu/drm/i915/i915_request.c 	rq->infix = rq->ring->emit; /* end of header; start of user payload */
ring              753 drivers/gpu/drm/i915/i915_request.c 	ce->ring->emit = rq->head;
ring             1102 drivers/gpu/drm/i915/i915_request.c 	void *vaddr = rq->ring->vaddr;
ring             1118 drivers/gpu/drm/i915/i915_request.c 		memset(vaddr + head, 0, rq->ring->size - head);
ring             1190 drivers/gpu/drm/i915/i915_request.c 	struct intel_ring *ring = rq->ring;
ring             1201 drivers/gpu/drm/i915/i915_request.c 	GEM_BUG_ON(rq->reserved_space > ring->space);
ring              116 drivers/gpu/drm/i915/i915_request.h 	struct intel_ring *ring;
ring             1089 drivers/gpu/drm/i915/selftests/i915_request.c 		ret = rq->ring->size - rq->reserved_space;
ring             1092 drivers/gpu/drm/i915/selftests/i915_request.c 		sz = rq->ring->emit - rq->head;
ring             1094 drivers/gpu/drm/i915/selftests/i915_request.c 			sz += rq->ring->size;
ring               15 drivers/gpu/drm/msm/adreno/a2xx_gpu.c 	struct msm_ringbuffer *ring = gpu->rb[0];
ring               17 drivers/gpu/drm/msm/adreno/a2xx_gpu.c 	OUT_PKT3(ring, CP_ME_INIT, 18);
ring               20 drivers/gpu/drm/msm/adreno/a2xx_gpu.c 	OUT_RING(ring, 0x000003ff);
ring               22 drivers/gpu/drm/msm/adreno/a2xx_gpu.c 	OUT_RING(ring, 0x00000000);
ring               24 drivers/gpu/drm/msm/adreno/a2xx_gpu.c 	OUT_RING(ring, 0x00000000);
ring               26 drivers/gpu/drm/msm/adreno/a2xx_gpu.c 	OUT_RING(ring, REG_A2XX_RB_SURFACE_INFO - 0x2000);
ring               27 drivers/gpu/drm/msm/adreno/a2xx_gpu.c 	OUT_RING(ring, REG_A2XX_PA_SC_WINDOW_OFFSET - 0x2000);
ring               28 drivers/gpu/drm/msm/adreno/a2xx_gpu.c 	OUT_RING(ring, REG_A2XX_VGT_MAX_VTX_INDX - 0x2000);
ring               29 drivers/gpu/drm/msm/adreno/a2xx_gpu.c 	OUT_RING(ring, REG_A2XX_SQ_PROGRAM_CNTL - 0x2000);
ring               30 drivers/gpu/drm/msm/adreno/a2xx_gpu.c 	OUT_RING(ring, REG_A2XX_RB_DEPTHCONTROL - 0x2000);
ring               31 drivers/gpu/drm/msm/adreno/a2xx_gpu.c 	OUT_RING(ring, REG_A2XX_PA_SU_POINT_SIZE - 0x2000);
ring               32 drivers/gpu/drm/msm/adreno/a2xx_gpu.c 	OUT_RING(ring, REG_A2XX_PA_SC_LINE_CNTL - 0x2000);
ring               33 drivers/gpu/drm/msm/adreno/a2xx_gpu.c 	OUT_RING(ring, REG_A2XX_PA_SU_POLY_OFFSET_FRONT_SCALE - 0x2000);
ring               37 drivers/gpu/drm/msm/adreno/a2xx_gpu.c 	OUT_RING(ring, 0x80000180);
ring               39 drivers/gpu/drm/msm/adreno/a2xx_gpu.c 	OUT_RING(ring, 0x00000001);
ring               42 drivers/gpu/drm/msm/adreno/a2xx_gpu.c 	OUT_RING(ring, 0x00000000);
ring               44 drivers/gpu/drm/msm/adreno/a2xx_gpu.c 	OUT_RING(ring, 0x00000000);
ring               46 drivers/gpu/drm/msm/adreno/a2xx_gpu.c 	OUT_RING(ring, 0x200001f2);
ring               48 drivers/gpu/drm/msm/adreno/a2xx_gpu.c 	OUT_RING(ring, 0x00000000);
ring               50 drivers/gpu/drm/msm/adreno/a2xx_gpu.c 	OUT_RING(ring, 0x00000000);
ring               53 drivers/gpu/drm/msm/adreno/a2xx_gpu.c 	OUT_PKT3(ring, CP_SET_PROTECTED_MODE, 1);
ring               54 drivers/gpu/drm/msm/adreno/a2xx_gpu.c 	OUT_RING(ring, 1);
ring               56 drivers/gpu/drm/msm/adreno/a2xx_gpu.c 	gpu->funcs->flush(gpu, ring);
ring               37 drivers/gpu/drm/msm/adreno/a3xx_gpu.c 	struct msm_ringbuffer *ring = gpu->rb[0];
ring               39 drivers/gpu/drm/msm/adreno/a3xx_gpu.c 	OUT_PKT3(ring, CP_ME_INIT, 17);
ring               40 drivers/gpu/drm/msm/adreno/a3xx_gpu.c 	OUT_RING(ring, 0x000003f7);
ring               41 drivers/gpu/drm/msm/adreno/a3xx_gpu.c 	OUT_RING(ring, 0x00000000);
ring               42 drivers/gpu/drm/msm/adreno/a3xx_gpu.c 	OUT_RING(ring, 0x00000000);
ring               43 drivers/gpu/drm/msm/adreno/a3xx_gpu.c 	OUT_RING(ring, 0x00000000);
ring               44 drivers/gpu/drm/msm/adreno/a3xx_gpu.c 	OUT_RING(ring, 0x00000080);
ring               45 drivers/gpu/drm/msm/adreno/a3xx_gpu.c 	OUT_RING(ring, 0x00000100);
ring               46 drivers/gpu/drm/msm/adreno/a3xx_gpu.c 	OUT_RING(ring, 0x00000180);
ring               47 drivers/gpu/drm/msm/adreno/a3xx_gpu.c 	OUT_RING(ring, 0x00006600);
ring               48 drivers/gpu/drm/msm/adreno/a3xx_gpu.c 	OUT_RING(ring, 0x00000150);
ring               49 drivers/gpu/drm/msm/adreno/a3xx_gpu.c 	OUT_RING(ring, 0x0000014e);
ring               50 drivers/gpu/drm/msm/adreno/a3xx_gpu.c 	OUT_RING(ring, 0x00000154);
ring               51 drivers/gpu/drm/msm/adreno/a3xx_gpu.c 	OUT_RING(ring, 0x00000001);
ring               52 drivers/gpu/drm/msm/adreno/a3xx_gpu.c 	OUT_RING(ring, 0x00000000);
ring               53 drivers/gpu/drm/msm/adreno/a3xx_gpu.c 	OUT_RING(ring, 0x00000000);
ring               54 drivers/gpu/drm/msm/adreno/a3xx_gpu.c 	OUT_RING(ring, 0x00000000);
ring               55 drivers/gpu/drm/msm/adreno/a3xx_gpu.c 	OUT_RING(ring, 0x00000000);
ring               56 drivers/gpu/drm/msm/adreno/a3xx_gpu.c 	OUT_RING(ring, 0x00000000);
ring               58 drivers/gpu/drm/msm/adreno/a3xx_gpu.c 	gpu->funcs->flush(gpu, ring);
ring              111 drivers/gpu/drm/msm/adreno/a4xx_gpu.c 	struct msm_ringbuffer *ring = gpu->rb[0];
ring              113 drivers/gpu/drm/msm/adreno/a4xx_gpu.c 	OUT_PKT3(ring, CP_ME_INIT, 17);
ring              114 drivers/gpu/drm/msm/adreno/a4xx_gpu.c 	OUT_RING(ring, 0x000003f7);
ring              115 drivers/gpu/drm/msm/adreno/a4xx_gpu.c 	OUT_RING(ring, 0x00000000);
ring              116 drivers/gpu/drm/msm/adreno/a4xx_gpu.c 	OUT_RING(ring, 0x00000000);
ring              117 drivers/gpu/drm/msm/adreno/a4xx_gpu.c 	OUT_RING(ring, 0x00000000);
ring              118 drivers/gpu/drm/msm/adreno/a4xx_gpu.c 	OUT_RING(ring, 0x00000080);
ring              119 drivers/gpu/drm/msm/adreno/a4xx_gpu.c 	OUT_RING(ring, 0x00000100);
ring              120 drivers/gpu/drm/msm/adreno/a4xx_gpu.c 	OUT_RING(ring, 0x00000180);
ring              121 drivers/gpu/drm/msm/adreno/a4xx_gpu.c 	OUT_RING(ring, 0x00006600);
ring              122 drivers/gpu/drm/msm/adreno/a4xx_gpu.c 	OUT_RING(ring, 0x00000150);
ring              123 drivers/gpu/drm/msm/adreno/a4xx_gpu.c 	OUT_RING(ring, 0x0000014e);
ring              124 drivers/gpu/drm/msm/adreno/a4xx_gpu.c 	OUT_RING(ring, 0x00000154);
ring              125 drivers/gpu/drm/msm/adreno/a4xx_gpu.c 	OUT_RING(ring, 0x00000001);
ring              126 drivers/gpu/drm/msm/adreno/a4xx_gpu.c 	OUT_RING(ring, 0x00000000);
ring              127 drivers/gpu/drm/msm/adreno/a4xx_gpu.c 	OUT_RING(ring, 0x00000000);
ring              128 drivers/gpu/drm/msm/adreno/a4xx_gpu.c 	OUT_RING(ring, 0x00000000);
ring              129 drivers/gpu/drm/msm/adreno/a4xx_gpu.c 	OUT_RING(ring, 0x00000000);
ring              130 drivers/gpu/drm/msm/adreno/a4xx_gpu.c 	OUT_RING(ring, 0x00000000);
ring              132 drivers/gpu/drm/msm/adreno/a4xx_gpu.c 	gpu->funcs->flush(gpu, ring);
ring               21 drivers/gpu/drm/msm/adreno/a5xx_gpu.c static void a5xx_flush(struct msm_gpu *gpu, struct msm_ringbuffer *ring)
ring               28 drivers/gpu/drm/msm/adreno/a5xx_gpu.c 	spin_lock_irqsave(&ring->lock, flags);
ring               31 drivers/gpu/drm/msm/adreno/a5xx_gpu.c 	ring->cur = ring->next;
ring               34 drivers/gpu/drm/msm/adreno/a5xx_gpu.c 	wptr = get_wptr(ring);
ring               36 drivers/gpu/drm/msm/adreno/a5xx_gpu.c 	spin_unlock_irqrestore(&ring->lock, flags);
ring               42 drivers/gpu/drm/msm/adreno/a5xx_gpu.c 	if (a5xx_gpu->cur_ring == ring && !a5xx_in_preempt(a5xx_gpu))
ring               50 drivers/gpu/drm/msm/adreno/a5xx_gpu.c 	struct msm_ringbuffer *ring = submit->ring;
ring               84 drivers/gpu/drm/msm/adreno/a5xx_gpu.c 				adreno_wait_ring(ring, 1);
ring               85 drivers/gpu/drm/msm/adreno/a5xx_gpu.c 				OUT_RING(ring, ptr[i]);
ring               94 drivers/gpu/drm/msm/adreno/a5xx_gpu.c 	a5xx_flush(gpu, ring);
ring              101 drivers/gpu/drm/msm/adreno/a5xx_gpu.c 	a5xx_idle(gpu, ring);
ring              102 drivers/gpu/drm/msm/adreno/a5xx_gpu.c 	ring->memptrs->fence = submit->seqno;
ring              112 drivers/gpu/drm/msm/adreno/a5xx_gpu.c 	struct msm_ringbuffer *ring = submit->ring;
ring              121 drivers/gpu/drm/msm/adreno/a5xx_gpu.c 	OUT_PKT7(ring, CP_PREEMPT_ENABLE_GLOBAL, 1);
ring              122 drivers/gpu/drm/msm/adreno/a5xx_gpu.c 	OUT_RING(ring, 0x02);
ring              125 drivers/gpu/drm/msm/adreno/a5xx_gpu.c 	OUT_PKT7(ring, CP_SET_PROTECTED_MODE, 1);
ring              126 drivers/gpu/drm/msm/adreno/a5xx_gpu.c 	OUT_RING(ring, 0);
ring              129 drivers/gpu/drm/msm/adreno/a5xx_gpu.c 	OUT_PKT4(ring, REG_A5XX_CP_CONTEXT_SWITCH_SAVE_ADDR_LO, 2);
ring              130 drivers/gpu/drm/msm/adreno/a5xx_gpu.c 	OUT_RING(ring, lower_32_bits(a5xx_gpu->preempt_iova[submit->ring->id]));
ring              131 drivers/gpu/drm/msm/adreno/a5xx_gpu.c 	OUT_RING(ring, upper_32_bits(a5xx_gpu->preempt_iova[submit->ring->id]));
ring              134 drivers/gpu/drm/msm/adreno/a5xx_gpu.c 	OUT_PKT7(ring, CP_SET_PROTECTED_MODE, 1);
ring              135 drivers/gpu/drm/msm/adreno/a5xx_gpu.c 	OUT_RING(ring, 1);
ring              138 drivers/gpu/drm/msm/adreno/a5xx_gpu.c 	OUT_PKT7(ring, CP_PREEMPT_ENABLE_GLOBAL, 1);
ring              139 drivers/gpu/drm/msm/adreno/a5xx_gpu.c 	OUT_RING(ring, 0x02);
ring              142 drivers/gpu/drm/msm/adreno/a5xx_gpu.c 	OUT_PKT7(ring, CP_YIELD_ENABLE, 1);
ring              143 drivers/gpu/drm/msm/adreno/a5xx_gpu.c 	OUT_RING(ring, 0x02);
ring              155 drivers/gpu/drm/msm/adreno/a5xx_gpu.c 			OUT_PKT7(ring, CP_INDIRECT_BUFFER_PFE, 3);
ring              156 drivers/gpu/drm/msm/adreno/a5xx_gpu.c 			OUT_RING(ring, lower_32_bits(submit->cmd[i].iova));
ring              157 drivers/gpu/drm/msm/adreno/a5xx_gpu.c 			OUT_RING(ring, upper_32_bits(submit->cmd[i].iova));
ring              158 drivers/gpu/drm/msm/adreno/a5xx_gpu.c 			OUT_RING(ring, submit->cmd[i].size);
ring              169 drivers/gpu/drm/msm/adreno/a5xx_gpu.c 	OUT_PKT7(ring, CP_SET_RENDER_MODE, 5);
ring              170 drivers/gpu/drm/msm/adreno/a5xx_gpu.c 	OUT_RING(ring, 0);
ring              171 drivers/gpu/drm/msm/adreno/a5xx_gpu.c 	OUT_RING(ring, 0);
ring              172 drivers/gpu/drm/msm/adreno/a5xx_gpu.c 	OUT_RING(ring, 0);
ring              173 drivers/gpu/drm/msm/adreno/a5xx_gpu.c 	OUT_RING(ring, 0);
ring              174 drivers/gpu/drm/msm/adreno/a5xx_gpu.c 	OUT_RING(ring, 0);
ring              177 drivers/gpu/drm/msm/adreno/a5xx_gpu.c 	OUT_PKT7(ring, CP_YIELD_ENABLE, 1);
ring              178 drivers/gpu/drm/msm/adreno/a5xx_gpu.c 	OUT_RING(ring, 0x01);
ring              181 drivers/gpu/drm/msm/adreno/a5xx_gpu.c 	OUT_PKT4(ring, REG_A5XX_CP_SCRATCH_REG(2), 1);
ring              182 drivers/gpu/drm/msm/adreno/a5xx_gpu.c 	OUT_RING(ring, submit->seqno);
ring              188 drivers/gpu/drm/msm/adreno/a5xx_gpu.c 	OUT_PKT7(ring, CP_EVENT_WRITE, 4);
ring              189 drivers/gpu/drm/msm/adreno/a5xx_gpu.c 	OUT_RING(ring, CACHE_FLUSH_TS | (1 << 31));
ring              190 drivers/gpu/drm/msm/adreno/a5xx_gpu.c 	OUT_RING(ring, lower_32_bits(rbmemptr(ring, fence)));
ring              191 drivers/gpu/drm/msm/adreno/a5xx_gpu.c 	OUT_RING(ring, upper_32_bits(rbmemptr(ring, fence)));
ring              192 drivers/gpu/drm/msm/adreno/a5xx_gpu.c 	OUT_RING(ring, submit->seqno);
ring              195 drivers/gpu/drm/msm/adreno/a5xx_gpu.c 	OUT_PKT7(ring, CP_CONTEXT_SWITCH_YIELD, 4);
ring              201 drivers/gpu/drm/msm/adreno/a5xx_gpu.c 	OUT_RING(ring, 0x00);
ring              202 drivers/gpu/drm/msm/adreno/a5xx_gpu.c 	OUT_RING(ring, 0x00);
ring              204 drivers/gpu/drm/msm/adreno/a5xx_gpu.c 	OUT_RING(ring, 0x01);
ring              206 drivers/gpu/drm/msm/adreno/a5xx_gpu.c 	OUT_RING(ring, 0x01);
ring              208 drivers/gpu/drm/msm/adreno/a5xx_gpu.c 	a5xx_flush(gpu, ring);
ring              333 drivers/gpu/drm/msm/adreno/a5xx_gpu.c 	struct msm_ringbuffer *ring = gpu->rb[0];
ring              335 drivers/gpu/drm/msm/adreno/a5xx_gpu.c 	OUT_PKT7(ring, CP_ME_INIT, 8);
ring              337 drivers/gpu/drm/msm/adreno/a5xx_gpu.c 	OUT_RING(ring, 0x0000002F);
ring              340 drivers/gpu/drm/msm/adreno/a5xx_gpu.c 	OUT_RING(ring, 0x00000003);
ring              343 drivers/gpu/drm/msm/adreno/a5xx_gpu.c 	OUT_RING(ring, 0x20000000);
ring              346 drivers/gpu/drm/msm/adreno/a5xx_gpu.c 	OUT_RING(ring, 0x00000000);
ring              347 drivers/gpu/drm/msm/adreno/a5xx_gpu.c 	OUT_RING(ring, 0x00000000);
ring              355 drivers/gpu/drm/msm/adreno/a5xx_gpu.c 		OUT_RING(ring, 0x0000000B);
ring              358 drivers/gpu/drm/msm/adreno/a5xx_gpu.c 		OUT_RING(ring, 0x00000000);
ring              361 drivers/gpu/drm/msm/adreno/a5xx_gpu.c 	OUT_RING(ring, 0x00000000);
ring              362 drivers/gpu/drm/msm/adreno/a5xx_gpu.c 	OUT_RING(ring, 0x00000000);
ring              364 drivers/gpu/drm/msm/adreno/a5xx_gpu.c 	gpu->funcs->flush(gpu, ring);
ring              365 drivers/gpu/drm/msm/adreno/a5xx_gpu.c 	return a5xx_idle(gpu, ring) ? 0 : -EINVAL;
ring              372 drivers/gpu/drm/msm/adreno/a5xx_gpu.c 	struct msm_ringbuffer *ring = gpu->rb[0];
ring              378 drivers/gpu/drm/msm/adreno/a5xx_gpu.c 	OUT_PKT7(ring, CP_SET_PROTECTED_MODE, 1);
ring              379 drivers/gpu/drm/msm/adreno/a5xx_gpu.c 	OUT_RING(ring, 0);
ring              382 drivers/gpu/drm/msm/adreno/a5xx_gpu.c 	OUT_PKT4(ring, REG_A5XX_CP_CONTEXT_SWITCH_SAVE_ADDR_LO, 2);
ring              383 drivers/gpu/drm/msm/adreno/a5xx_gpu.c 	OUT_RING(ring, lower_32_bits(a5xx_gpu->preempt_iova[ring->id]));
ring              384 drivers/gpu/drm/msm/adreno/a5xx_gpu.c 	OUT_RING(ring, upper_32_bits(a5xx_gpu->preempt_iova[ring->id]));
ring              387 drivers/gpu/drm/msm/adreno/a5xx_gpu.c 	OUT_PKT7(ring, CP_SET_PROTECTED_MODE, 1);
ring              388 drivers/gpu/drm/msm/adreno/a5xx_gpu.c 	OUT_RING(ring, 1);
ring              390 drivers/gpu/drm/msm/adreno/a5xx_gpu.c 	OUT_PKT7(ring, CP_PREEMPT_ENABLE_GLOBAL, 1);
ring              391 drivers/gpu/drm/msm/adreno/a5xx_gpu.c 	OUT_RING(ring, 0x00);
ring              393 drivers/gpu/drm/msm/adreno/a5xx_gpu.c 	OUT_PKT7(ring, CP_PREEMPT_ENABLE_LOCAL, 1);
ring              394 drivers/gpu/drm/msm/adreno/a5xx_gpu.c 	OUT_RING(ring, 0x01);
ring              396 drivers/gpu/drm/msm/adreno/a5xx_gpu.c 	OUT_PKT7(ring, CP_YIELD_ENABLE, 1);
ring              397 drivers/gpu/drm/msm/adreno/a5xx_gpu.c 	OUT_RING(ring, 0x01);
ring              400 drivers/gpu/drm/msm/adreno/a5xx_gpu.c 	OUT_PKT7(ring, CP_CONTEXT_SWITCH_YIELD, 4);
ring              401 drivers/gpu/drm/msm/adreno/a5xx_gpu.c 	OUT_RING(ring, 0x00);
ring              402 drivers/gpu/drm/msm/adreno/a5xx_gpu.c 	OUT_RING(ring, 0x00);
ring              403 drivers/gpu/drm/msm/adreno/a5xx_gpu.c 	OUT_RING(ring, 0x01);
ring              404 drivers/gpu/drm/msm/adreno/a5xx_gpu.c 	OUT_RING(ring, 0x01);
ring              406 drivers/gpu/drm/msm/adreno/a5xx_gpu.c 	gpu->funcs->flush(gpu, ring);
ring              408 drivers/gpu/drm/msm/adreno/a5xx_gpu.c 	return a5xx_idle(gpu, ring) ? 0 : -EINVAL;
ring              810 drivers/gpu/drm/msm/adreno/a5xx_gpu.c bool a5xx_idle(struct msm_gpu *gpu, struct msm_ringbuffer *ring)
ring              815 drivers/gpu/drm/msm/adreno/a5xx_gpu.c 	if (ring != a5xx_gpu->cur_ring) {
ring              821 drivers/gpu/drm/msm/adreno/a5xx_gpu.c 	if (!adreno_idle(gpu, ring))
ring              961 drivers/gpu/drm/msm/adreno/a5xx_gpu.c 	struct msm_ringbuffer *ring = gpu->funcs->active_ring(gpu);
ring              964 drivers/gpu/drm/msm/adreno/a5xx_gpu.c 		ring ? ring->id : -1, ring ? ring->seqno : 0,
ring              143 drivers/gpu/drm/msm/adreno/a5xx_gpu.h bool a5xx_idle(struct msm_gpu *gpu, struct msm_ringbuffer *ring);
ring              224 drivers/gpu/drm/msm/adreno/a5xx_power.c 	struct msm_ringbuffer *ring = gpu->rb[0];
ring              230 drivers/gpu/drm/msm/adreno/a5xx_power.c 	OUT_PKT7(ring, CP_SET_PROTECTED_MODE, 1);
ring              231 drivers/gpu/drm/msm/adreno/a5xx_power.c 	OUT_RING(ring, 0);
ring              234 drivers/gpu/drm/msm/adreno/a5xx_power.c 	OUT_PKT7(ring, CP_INDIRECT_BUFFER_PFE, 3);
ring              235 drivers/gpu/drm/msm/adreno/a5xx_power.c 	OUT_RING(ring, lower_32_bits(a5xx_gpu->gpmu_iova));
ring              236 drivers/gpu/drm/msm/adreno/a5xx_power.c 	OUT_RING(ring, upper_32_bits(a5xx_gpu->gpmu_iova));
ring              237 drivers/gpu/drm/msm/adreno/a5xx_power.c 	OUT_RING(ring, a5xx_gpu->gpmu_dwords);
ring              240 drivers/gpu/drm/msm/adreno/a5xx_power.c 	OUT_PKT7(ring, CP_SET_PROTECTED_MODE, 1);
ring              241 drivers/gpu/drm/msm/adreno/a5xx_power.c 	OUT_RING(ring, 1);
ring              243 drivers/gpu/drm/msm/adreno/a5xx_power.c 	gpu->funcs->flush(gpu, ring);
ring              245 drivers/gpu/drm/msm/adreno/a5xx_power.c 	if (!a5xx_idle(gpu, ring)) {
ring               40 drivers/gpu/drm/msm/adreno/a5xx_preempt.c static inline void update_wptr(struct msm_gpu *gpu, struct msm_ringbuffer *ring)
ring               45 drivers/gpu/drm/msm/adreno/a5xx_preempt.c 	if (!ring)
ring               48 drivers/gpu/drm/msm/adreno/a5xx_preempt.c 	spin_lock_irqsave(&ring->lock, flags);
ring               49 drivers/gpu/drm/msm/adreno/a5xx_preempt.c 	wptr = get_wptr(ring);
ring               50 drivers/gpu/drm/msm/adreno/a5xx_preempt.c 	spin_unlock_irqrestore(&ring->lock, flags);
ring               63 drivers/gpu/drm/msm/adreno/a5xx_preempt.c 		struct msm_ringbuffer *ring = gpu->rb[i];
ring               65 drivers/gpu/drm/msm/adreno/a5xx_preempt.c 		spin_lock_irqsave(&ring->lock, flags);
ring               66 drivers/gpu/drm/msm/adreno/a5xx_preempt.c 		empty = (get_wptr(ring) == ring->memptrs->rptr);
ring               67 drivers/gpu/drm/msm/adreno/a5xx_preempt.c 		spin_unlock_irqrestore(&ring->lock, flags);
ring               70 drivers/gpu/drm/msm/adreno/a5xx_preempt.c 			return ring;
ring               96 drivers/gpu/drm/msm/adreno/a5xx_preempt.c 	struct msm_ringbuffer *ring;
ring              109 drivers/gpu/drm/msm/adreno/a5xx_preempt.c 	ring = get_next_ring(gpu);
ring              115 drivers/gpu/drm/msm/adreno/a5xx_preempt.c 	if (!ring || (a5xx_gpu->cur_ring == ring)) {
ring              135 drivers/gpu/drm/msm/adreno/a5xx_preempt.c 	spin_lock_irqsave(&ring->lock, flags);
ring              136 drivers/gpu/drm/msm/adreno/a5xx_preempt.c 	a5xx_gpu->preempt[ring->id]->wptr = get_wptr(ring);
ring              137 drivers/gpu/drm/msm/adreno/a5xx_preempt.c 	spin_unlock_irqrestore(&ring->lock, flags);
ring              142 drivers/gpu/drm/msm/adreno/a5xx_preempt.c 		a5xx_gpu->preempt_iova[ring->id]);
ring              144 drivers/gpu/drm/msm/adreno/a5xx_preempt.c 	a5xx_gpu->next_ring = ring;
ring              224 drivers/gpu/drm/msm/adreno/a5xx_preempt.c 		struct msm_ringbuffer *ring)
ring              241 drivers/gpu/drm/msm/adreno/a5xx_preempt.c 	a5xx_gpu->preempt_bo[ring->id] = bo;
ring              242 drivers/gpu/drm/msm/adreno/a5xx_preempt.c 	a5xx_gpu->preempt_iova[ring->id] = iova;
ring              243 drivers/gpu/drm/msm/adreno/a5xx_preempt.c 	a5xx_gpu->preempt[ring->id] = ptr;
ring              251 drivers/gpu/drm/msm/adreno/a5xx_preempt.c 	ptr->rptr_addr = rbmemptr(ring, rptr);
ring               33 drivers/gpu/drm/msm/adreno/a6xx_gpu.c bool a6xx_idle(struct msm_gpu *gpu, struct msm_ringbuffer *ring)
ring               36 drivers/gpu/drm/msm/adreno/a6xx_gpu.c 	if (!adreno_idle(gpu, ring))
ring               52 drivers/gpu/drm/msm/adreno/a6xx_gpu.c static void a6xx_flush(struct msm_gpu *gpu, struct msm_ringbuffer *ring)
ring               57 drivers/gpu/drm/msm/adreno/a6xx_gpu.c 	spin_lock_irqsave(&ring->lock, flags);
ring               60 drivers/gpu/drm/msm/adreno/a6xx_gpu.c 	ring->cur = ring->next;
ring               63 drivers/gpu/drm/msm/adreno/a6xx_gpu.c 	wptr = get_wptr(ring);
ring               65 drivers/gpu/drm/msm/adreno/a6xx_gpu.c 	spin_unlock_irqrestore(&ring->lock, flags);
ring               73 drivers/gpu/drm/msm/adreno/a6xx_gpu.c static void get_stats_counter(struct msm_ringbuffer *ring, u32 counter,
ring               76 drivers/gpu/drm/msm/adreno/a6xx_gpu.c 	OUT_PKT7(ring, CP_REG_TO_MEM, 3);
ring               77 drivers/gpu/drm/msm/adreno/a6xx_gpu.c 	OUT_RING(ring, counter | (1 << 30) | (2 << 18));
ring               78 drivers/gpu/drm/msm/adreno/a6xx_gpu.c 	OUT_RING(ring, lower_32_bits(iova));
ring               79 drivers/gpu/drm/msm/adreno/a6xx_gpu.c 	OUT_RING(ring, upper_32_bits(iova));
ring               89 drivers/gpu/drm/msm/adreno/a6xx_gpu.c 	struct msm_ringbuffer *ring = submit->ring;
ring               92 drivers/gpu/drm/msm/adreno/a6xx_gpu.c 	get_stats_counter(ring, REG_A6XX_RBBM_PERFCTR_CP_0_LO,
ring               93 drivers/gpu/drm/msm/adreno/a6xx_gpu.c 		rbmemptr_stats(ring, index, cpcycles_start));
ring              100 drivers/gpu/drm/msm/adreno/a6xx_gpu.c 	get_stats_counter(ring, REG_A6XX_GMU_ALWAYS_ON_COUNTER_L + 0x1a800,
ring              101 drivers/gpu/drm/msm/adreno/a6xx_gpu.c 		rbmemptr_stats(ring, index, alwayson_start));
ring              104 drivers/gpu/drm/msm/adreno/a6xx_gpu.c 	OUT_PKT7(ring, CP_EVENT_WRITE, 1);
ring              105 drivers/gpu/drm/msm/adreno/a6xx_gpu.c 	OUT_RING(ring, PC_CCU_INVALIDATE_DEPTH);
ring              107 drivers/gpu/drm/msm/adreno/a6xx_gpu.c 	OUT_PKT7(ring, CP_EVENT_WRITE, 1);
ring              108 drivers/gpu/drm/msm/adreno/a6xx_gpu.c 	OUT_RING(ring, PC_CCU_INVALIDATE_COLOR);
ring              120 drivers/gpu/drm/msm/adreno/a6xx_gpu.c 			OUT_PKT7(ring, CP_INDIRECT_BUFFER_PFE, 3);
ring              121 drivers/gpu/drm/msm/adreno/a6xx_gpu.c 			OUT_RING(ring, lower_32_bits(submit->cmd[i].iova));
ring              122 drivers/gpu/drm/msm/adreno/a6xx_gpu.c 			OUT_RING(ring, upper_32_bits(submit->cmd[i].iova));
ring              123 drivers/gpu/drm/msm/adreno/a6xx_gpu.c 			OUT_RING(ring, submit->cmd[i].size);
ring              128 drivers/gpu/drm/msm/adreno/a6xx_gpu.c 	get_stats_counter(ring, REG_A6XX_RBBM_PERFCTR_CP_0_LO,
ring              129 drivers/gpu/drm/msm/adreno/a6xx_gpu.c 		rbmemptr_stats(ring, index, cpcycles_end));
ring              130 drivers/gpu/drm/msm/adreno/a6xx_gpu.c 	get_stats_counter(ring, REG_A6XX_GMU_ALWAYS_ON_COUNTER_L + 0x1a800,
ring              131 drivers/gpu/drm/msm/adreno/a6xx_gpu.c 		rbmemptr_stats(ring, index, alwayson_end));
ring              134 drivers/gpu/drm/msm/adreno/a6xx_gpu.c 	OUT_PKT4(ring, REG_A6XX_CP_SCRATCH_REG(2), 1);
ring              135 drivers/gpu/drm/msm/adreno/a6xx_gpu.c 	OUT_RING(ring, submit->seqno);
ring              141 drivers/gpu/drm/msm/adreno/a6xx_gpu.c 	OUT_PKT7(ring, CP_EVENT_WRITE, 4);
ring              142 drivers/gpu/drm/msm/adreno/a6xx_gpu.c 	OUT_RING(ring, CACHE_FLUSH_TS | (1 << 31));
ring              143 drivers/gpu/drm/msm/adreno/a6xx_gpu.c 	OUT_RING(ring, lower_32_bits(rbmemptr(ring, fence)));
ring              144 drivers/gpu/drm/msm/adreno/a6xx_gpu.c 	OUT_RING(ring, upper_32_bits(rbmemptr(ring, fence)));
ring              145 drivers/gpu/drm/msm/adreno/a6xx_gpu.c 	OUT_RING(ring, submit->seqno);
ring              151 drivers/gpu/drm/msm/adreno/a6xx_gpu.c 	a6xx_flush(gpu, ring);
ring              294 drivers/gpu/drm/msm/adreno/a6xx_gpu.c 	struct msm_ringbuffer *ring = gpu->rb[0];
ring              296 drivers/gpu/drm/msm/adreno/a6xx_gpu.c 	OUT_PKT7(ring, CP_ME_INIT, 8);
ring              298 drivers/gpu/drm/msm/adreno/a6xx_gpu.c 	OUT_RING(ring, 0x0000002f);
ring              301 drivers/gpu/drm/msm/adreno/a6xx_gpu.c 	OUT_RING(ring, 0x00000003);
ring              304 drivers/gpu/drm/msm/adreno/a6xx_gpu.c 	OUT_RING(ring, 0x20000000);
ring              307 drivers/gpu/drm/msm/adreno/a6xx_gpu.c 	OUT_RING(ring, 0x00000000);
ring              308 drivers/gpu/drm/msm/adreno/a6xx_gpu.c 	OUT_RING(ring, 0x00000000);
ring              311 drivers/gpu/drm/msm/adreno/a6xx_gpu.c 	OUT_RING(ring, 0x00000000);
ring              314 drivers/gpu/drm/msm/adreno/a6xx_gpu.c 	OUT_RING(ring, 0x00000000);
ring              315 drivers/gpu/drm/msm/adreno/a6xx_gpu.c 	OUT_RING(ring, 0x00000000);
ring              317 drivers/gpu/drm/msm/adreno/a6xx_gpu.c 	a6xx_flush(gpu, ring);
ring              318 drivers/gpu/drm/msm/adreno/a6xx_gpu.c 	return a6xx_idle(gpu, ring) ? 0 : -EINVAL;
ring              667 drivers/gpu/drm/msm/adreno/a6xx_gpu.c 	struct msm_ringbuffer *ring = gpu->funcs->active_ring(gpu);
ring              677 drivers/gpu/drm/msm/adreno/a6xx_gpu.c 		ring ? ring->id : -1, ring ? ring->seqno : 0,
ring              344 drivers/gpu/drm/msm/adreno/adreno_gpu.c 		struct msm_ringbuffer *ring = gpu->rb[i];
ring              346 drivers/gpu/drm/msm/adreno/adreno_gpu.c 		if (!ring)
ring              349 drivers/gpu/drm/msm/adreno/adreno_gpu.c 		ring->cur = ring->start;
ring              350 drivers/gpu/drm/msm/adreno/adreno_gpu.c 		ring->next = ring->start;
ring              353 drivers/gpu/drm/msm/adreno/adreno_gpu.c 		ring->memptrs->fence = ring->seqno;
ring              354 drivers/gpu/drm/msm/adreno/adreno_gpu.c 		ring->memptrs->rptr = 0;
ring              382 drivers/gpu/drm/msm/adreno/adreno_gpu.c 		struct msm_ringbuffer *ring)
ring              385 drivers/gpu/drm/msm/adreno/adreno_gpu.c 		return ring->memptrs->rptr = adreno_gpu_read(
ring              388 drivers/gpu/drm/msm/adreno/adreno_gpu.c 		return ring->memptrs->rptr;
ring              419 drivers/gpu/drm/msm/adreno/adreno_gpu.c 	struct msm_ringbuffer *ring = submit->ring;
ring              433 drivers/gpu/drm/msm/adreno/adreno_gpu.c 			OUT_PKT3(ring, adreno_is_a430(adreno_gpu) ?
ring              435 drivers/gpu/drm/msm/adreno/adreno_gpu.c 			OUT_RING(ring, lower_32_bits(submit->cmd[i].iova));
ring              436 drivers/gpu/drm/msm/adreno/adreno_gpu.c 			OUT_RING(ring, submit->cmd[i].size);
ring              437 drivers/gpu/drm/msm/adreno/adreno_gpu.c 			OUT_PKT2(ring);
ring              442 drivers/gpu/drm/msm/adreno/adreno_gpu.c 	OUT_PKT0(ring, REG_AXXX_CP_SCRATCH_REG2, 1);
ring              443 drivers/gpu/drm/msm/adreno/adreno_gpu.c 	OUT_RING(ring, submit->seqno);
ring              450 drivers/gpu/drm/msm/adreno/adreno_gpu.c 		OUT_PKT3(ring, CP_EVENT_WRITE, 1);
ring              451 drivers/gpu/drm/msm/adreno/adreno_gpu.c 		OUT_RING(ring, HLSQ_FLUSH);
ring              455 drivers/gpu/drm/msm/adreno/adreno_gpu.c 	OUT_PKT3(ring, CP_WAIT_FOR_IDLE, 1);
ring              456 drivers/gpu/drm/msm/adreno/adreno_gpu.c 	OUT_RING(ring, 0x00000000);
ring              460 drivers/gpu/drm/msm/adreno/adreno_gpu.c 		OUT_PKT3(ring, CP_EVENT_WRITE, 3);
ring              461 drivers/gpu/drm/msm/adreno/adreno_gpu.c 		OUT_RING(ring, CACHE_FLUSH_TS | BIT(31));
ring              462 drivers/gpu/drm/msm/adreno/adreno_gpu.c 		OUT_RING(ring, rbmemptr(ring, fence));
ring              463 drivers/gpu/drm/msm/adreno/adreno_gpu.c 		OUT_RING(ring, submit->seqno);
ring              466 drivers/gpu/drm/msm/adreno/adreno_gpu.c 		OUT_PKT3(ring, CP_EVENT_WRITE, 3);
ring              467 drivers/gpu/drm/msm/adreno/adreno_gpu.c 		OUT_RING(ring, CACHE_FLUSH_TS);
ring              468 drivers/gpu/drm/msm/adreno/adreno_gpu.c 		OUT_RING(ring, rbmemptr(ring, fence));
ring              469 drivers/gpu/drm/msm/adreno/adreno_gpu.c 		OUT_RING(ring, submit->seqno);
ring              470 drivers/gpu/drm/msm/adreno/adreno_gpu.c 		OUT_PKT3(ring, CP_INTERRUPT, 1);
ring              471 drivers/gpu/drm/msm/adreno/adreno_gpu.c 		OUT_RING(ring, 0x80000000);
ring              477 drivers/gpu/drm/msm/adreno/adreno_gpu.c 		OUT_PKT3(ring, CP_SET_CONSTANT, 2);
ring              478 drivers/gpu/drm/msm/adreno/adreno_gpu.c 		OUT_RING(ring, CP_REG(REG_A3XX_HLSQ_CL_KERNEL_GROUP_X_REG));
ring              479 drivers/gpu/drm/msm/adreno/adreno_gpu.c 		OUT_RING(ring, 0x00000000);
ring              483 drivers/gpu/drm/msm/adreno/adreno_gpu.c 	gpu->funcs->flush(gpu, ring);
ring              486 drivers/gpu/drm/msm/adreno/adreno_gpu.c void adreno_flush(struct msm_gpu *gpu, struct msm_ringbuffer *ring)
ring              492 drivers/gpu/drm/msm/adreno/adreno_gpu.c 	ring->cur = ring->next;
ring              499 drivers/gpu/drm/msm/adreno/adreno_gpu.c 	wptr = get_wptr(ring);
ring              507 drivers/gpu/drm/msm/adreno/adreno_gpu.c bool adreno_idle(struct msm_gpu *gpu, struct msm_ringbuffer *ring)
ring              510 drivers/gpu/drm/msm/adreno/adreno_gpu.c 	uint32_t wptr = get_wptr(ring);
ring              513 drivers/gpu/drm/msm/adreno/adreno_gpu.c 	if (!spin_until(get_rptr(adreno_gpu, ring) == wptr))
ring              518 drivers/gpu/drm/msm/adreno/adreno_gpu.c 		gpu->name, ring->id, get_rptr(adreno_gpu, ring), wptr);
ring              535 drivers/gpu/drm/msm/adreno/adreno_gpu.c 		state->ring[i].fence = gpu->rb[i]->memptrs->fence;
ring              536 drivers/gpu/drm/msm/adreno/adreno_gpu.c 		state->ring[i].iova = gpu->rb[i]->iova;
ring              537 drivers/gpu/drm/msm/adreno/adreno_gpu.c 		state->ring[i].seqno = gpu->rb[i]->seqno;
ring              538 drivers/gpu/drm/msm/adreno/adreno_gpu.c 		state->ring[i].rptr = get_rptr(adreno_gpu, gpu->rb[i]);
ring              539 drivers/gpu/drm/msm/adreno/adreno_gpu.c 		state->ring[i].wptr = get_wptr(gpu->rb[i]);
ring              542 drivers/gpu/drm/msm/adreno/adreno_gpu.c 		size = state->ring[i].wptr;
ring              545 drivers/gpu/drm/msm/adreno/adreno_gpu.c 		for (j = state->ring[i].wptr; j < MSM_GPU_RINGBUFFER_SZ >> 2; j++)
ring              550 drivers/gpu/drm/msm/adreno/adreno_gpu.c 			state->ring[i].data = kvmalloc(size << 2, GFP_KERNEL);
ring              551 drivers/gpu/drm/msm/adreno/adreno_gpu.c 			if (state->ring[i].data) {
ring              552 drivers/gpu/drm/msm/adreno/adreno_gpu.c 				memcpy(state->ring[i].data, gpu->rb[i]->start, size << 2);
ring              553 drivers/gpu/drm/msm/adreno/adreno_gpu.c 				state->ring[i].data_size = size << 2;
ring              592 drivers/gpu/drm/msm/adreno/adreno_gpu.c 	for (i = 0; i < ARRAY_SIZE(state->ring); i++)
ring              593 drivers/gpu/drm/msm/adreno/adreno_gpu.c 		kvfree(state->ring[i].data);
ring              715 drivers/gpu/drm/msm/adreno/adreno_gpu.c 		drm_printf(p, "    iova: 0x%016llx\n", state->ring[i].iova);
ring              716 drivers/gpu/drm/msm/adreno/adreno_gpu.c 		drm_printf(p, "    last-fence: %d\n", state->ring[i].seqno);
ring              717 drivers/gpu/drm/msm/adreno/adreno_gpu.c 		drm_printf(p, "    retired-fence: %d\n", state->ring[i].fence);
ring              718 drivers/gpu/drm/msm/adreno/adreno_gpu.c 		drm_printf(p, "    rptr: %d\n", state->ring[i].rptr);
ring              719 drivers/gpu/drm/msm/adreno/adreno_gpu.c 		drm_printf(p, "    wptr: %d\n", state->ring[i].wptr);
ring              722 drivers/gpu/drm/msm/adreno/adreno_gpu.c 		adreno_show_object(p, &state->ring[i].data,
ring              723 drivers/gpu/drm/msm/adreno/adreno_gpu.c 			state->ring[i].data_size, &state->ring[i].encoded);
ring              768 drivers/gpu/drm/msm/adreno/adreno_gpu.c 		struct msm_ringbuffer *ring = gpu->rb[i];
ring              771 drivers/gpu/drm/msm/adreno/adreno_gpu.c 			ring->memptrs->fence,
ring              772 drivers/gpu/drm/msm/adreno/adreno_gpu.c 			ring->seqno);
ring              774 drivers/gpu/drm/msm/adreno/adreno_gpu.c 		printk("rptr:     %d\n", get_rptr(adreno_gpu, ring));
ring              775 drivers/gpu/drm/msm/adreno/adreno_gpu.c 		printk("rb wptr:  %d\n", get_wptr(ring));
ring              802 drivers/gpu/drm/msm/adreno/adreno_gpu.c static uint32_t ring_freewords(struct msm_ringbuffer *ring)
ring              804 drivers/gpu/drm/msm/adreno/adreno_gpu.c 	struct adreno_gpu *adreno_gpu = to_adreno_gpu(ring->gpu);
ring              807 drivers/gpu/drm/msm/adreno/adreno_gpu.c 	uint32_t wptr = ring->next - ring->start;
ring              808 drivers/gpu/drm/msm/adreno/adreno_gpu.c 	uint32_t rptr = get_rptr(adreno_gpu, ring);
ring              812 drivers/gpu/drm/msm/adreno/adreno_gpu.c void adreno_wait_ring(struct msm_ringbuffer *ring, uint32_t ndwords)
ring              814 drivers/gpu/drm/msm/adreno/adreno_gpu.c 	if (spin_until(ring_freewords(ring) >= ndwords))
ring              815 drivers/gpu/drm/msm/adreno/adreno_gpu.c 		DRM_DEV_ERROR(ring->gpu->dev->dev,
ring              817 drivers/gpu/drm/msm/adreno/adreno_gpu.c 			ring->id);
ring              228 drivers/gpu/drm/msm/adreno/adreno_gpu.h void adreno_flush(struct msm_gpu *gpu, struct msm_ringbuffer *ring);
ring              229 drivers/gpu/drm/msm/adreno/adreno_gpu.h bool adreno_idle(struct msm_gpu *gpu, struct msm_ringbuffer *ring);
ring              236 drivers/gpu/drm/msm/adreno/adreno_gpu.h void adreno_wait_ring(struct msm_ringbuffer *ring, uint32_t ndwords);
ring              259 drivers/gpu/drm/msm/adreno/adreno_gpu.h OUT_PKT0(struct msm_ringbuffer *ring, uint16_t regindx, uint16_t cnt)
ring              261 drivers/gpu/drm/msm/adreno/adreno_gpu.h 	adreno_wait_ring(ring, cnt+1);
ring              262 drivers/gpu/drm/msm/adreno/adreno_gpu.h 	OUT_RING(ring, CP_TYPE0_PKT | ((cnt-1) << 16) | (regindx & 0x7FFF));
ring              267 drivers/gpu/drm/msm/adreno/adreno_gpu.h OUT_PKT2(struct msm_ringbuffer *ring)
ring              269 drivers/gpu/drm/msm/adreno/adreno_gpu.h 	adreno_wait_ring(ring, 1);
ring              270 drivers/gpu/drm/msm/adreno/adreno_gpu.h 	OUT_RING(ring, CP_TYPE2_PKT);
ring              274 drivers/gpu/drm/msm/adreno/adreno_gpu.h OUT_PKT3(struct msm_ringbuffer *ring, uint8_t opcode, uint16_t cnt)
ring              276 drivers/gpu/drm/msm/adreno/adreno_gpu.h 	adreno_wait_ring(ring, cnt+1);
ring              277 drivers/gpu/drm/msm/adreno/adreno_gpu.h 	OUT_RING(ring, CP_TYPE3_PKT | ((cnt-1) << 16) | ((opcode & 0xFF) << 8));
ring              296 drivers/gpu/drm/msm/adreno/adreno_gpu.h OUT_PKT4(struct msm_ringbuffer *ring, uint16_t regindx, uint16_t cnt)
ring              298 drivers/gpu/drm/msm/adreno/adreno_gpu.h 	adreno_wait_ring(ring, cnt + 1);
ring              299 drivers/gpu/drm/msm/adreno/adreno_gpu.h 	OUT_RING(ring, PKT4(regindx, cnt));
ring              303 drivers/gpu/drm/msm/adreno/adreno_gpu.h OUT_PKT7(struct msm_ringbuffer *ring, uint8_t opcode, uint16_t cnt)
ring              305 drivers/gpu/drm/msm/adreno/adreno_gpu.h 	adreno_wait_ring(ring, cnt + 1);
ring              306 drivers/gpu/drm/msm/adreno/adreno_gpu.h 	OUT_RING(ring, CP_TYPE7_PKT | (cnt << 0) | (PM4_PARITY(cnt) << 15) |
ring              366 drivers/gpu/drm/msm/adreno/adreno_gpu.h static inline uint32_t get_wptr(struct msm_ringbuffer *ring)
ring              368 drivers/gpu/drm/msm/adreno/adreno_gpu.h 	return (ring->cur - ring->start) % (MSM_GPU_RINGBUFFER_SZ >> 2);
ring              143 drivers/gpu/drm/msm/msm_gem.h 	struct msm_ringbuffer *ring;
ring               49 drivers/gpu/drm/msm/msm_gem_submit.c 	submit->ring = gpu->rb[queue->prio];
ring              241 drivers/gpu/drm/msm/msm_gem_submit.c 		ret = msm_gem_sync_object(&msm_obj->base, submit->ring->fctx,
ring              408 drivers/gpu/drm/msm/msm_gem_submit.c 	struct msm_ringbuffer *ring;
ring              438 drivers/gpu/drm/msm/msm_gem_submit.c 	ring = gpu->rb[queue->prio];
ring              439 drivers/gpu/drm/msm/msm_gem_submit.c 	trace_msm_gpu_submit(pid_nr(pid), ring->id, submitid,
ring              455 drivers/gpu/drm/msm/msm_gem_submit.c 		if (!dma_fence_match_context(in_fence, ring->fctx->context))
ring              565 drivers/gpu/drm/msm/msm_gem_submit.c 	submit->fence = msm_fence_alloc(ring->fctx);
ring              389 drivers/gpu/drm/msm/msm_gpu.c static void update_fences(struct msm_gpu *gpu, struct msm_ringbuffer *ring,
ring              394 drivers/gpu/drm/msm/msm_gpu.c 	list_for_each_entry(submit, &ring->submits, node) {
ring              398 drivers/gpu/drm/msm/msm_gpu.c 		msm_update_fence(submit->ring->fctx,
ring              404 drivers/gpu/drm/msm/msm_gpu.c find_submit(struct msm_ringbuffer *ring, uint32_t fence)
ring              408 drivers/gpu/drm/msm/msm_gpu.c 	WARN_ON(!mutex_is_locked(&ring->gpu->dev->struct_mutex));
ring              410 drivers/gpu/drm/msm/msm_gpu.c 	list_for_each_entry(submit, &ring->submits, node)
ring              472 drivers/gpu/drm/msm/msm_gpu.c 		struct msm_ringbuffer *ring = gpu->rb[i];
ring              474 drivers/gpu/drm/msm/msm_gpu.c 		uint32_t fence = ring->memptrs->fence;
ring              480 drivers/gpu/drm/msm/msm_gpu.c 		if (ring == cur_ring)
ring              483 drivers/gpu/drm/msm/msm_gpu.c 		update_fences(gpu, ring, fence);
ring              499 drivers/gpu/drm/msm/msm_gpu.c 			struct msm_ringbuffer *ring = gpu->rb[i];
ring              501 drivers/gpu/drm/msm/msm_gpu.c 			list_for_each_entry(submit, &ring->submits, node)
ring              523 drivers/gpu/drm/msm/msm_gpu.c 	struct msm_ringbuffer *ring = gpu->funcs->active_ring(gpu);
ring              524 drivers/gpu/drm/msm/msm_gpu.c 	uint32_t fence = ring->memptrs->fence;
ring              526 drivers/gpu/drm/msm/msm_gpu.c 	if (fence != ring->hangcheck_fence) {
ring              528 drivers/gpu/drm/msm/msm_gpu.c 		ring->hangcheck_fence = fence;
ring              529 drivers/gpu/drm/msm/msm_gpu.c 	} else if (fence < ring->seqno) {
ring              531 drivers/gpu/drm/msm/msm_gpu.c 		ring->hangcheck_fence = fence;
ring              533 drivers/gpu/drm/msm/msm_gpu.c 				gpu->name, ring->id);
ring              537 drivers/gpu/drm/msm/msm_gpu.c 				gpu->name, ring->seqno);
ring              543 drivers/gpu/drm/msm/msm_gpu.c 	if (ring->seqno > ring->hangcheck_fence)
ring              652 drivers/gpu/drm/msm/msm_gpu.c static void retire_submit(struct msm_gpu *gpu, struct msm_ringbuffer *ring,
ring              660 drivers/gpu/drm/msm/msm_gpu.c 	stats = &ring->memptrs->stats[index];
ring              697 drivers/gpu/drm/msm/msm_gpu.c 		struct msm_ringbuffer *ring = gpu->rb[i];
ring              699 drivers/gpu/drm/msm/msm_gpu.c 		list_for_each_entry_safe(submit, tmp, &ring->submits, node) {
ring              701 drivers/gpu/drm/msm/msm_gpu.c 				retire_submit(gpu, ring, submit);
ring              734 drivers/gpu/drm/msm/msm_gpu.c 	struct msm_ringbuffer *ring = submit->ring;
ring              743 drivers/gpu/drm/msm/msm_gpu.c 	submit->seqno = ++ring->seqno;
ring              745 drivers/gpu/drm/msm/msm_gpu.c 	list_add_tail(&submit->node, &ring->submits);
ring               50 drivers/gpu/drm/msm/msm_gpu.h 	void (*flush)(struct msm_gpu *gpu, struct msm_ringbuffer *ring);
ring              149 drivers/gpu/drm/msm/msm_gpu.h 		struct msm_ringbuffer *ring = gpu->rb[i];
ring              151 drivers/gpu/drm/msm/msm_gpu.h 		if (ring->seqno > ring->memptrs->fence)
ring              200 drivers/gpu/drm/msm/msm_gpu.h 	} ring[MSM_GPU_MAX_RINGS];
ring               46 drivers/gpu/drm/msm/msm_gpu_trace.h 		    __entry->ringid = submit->ring->id;
ring               73 drivers/gpu/drm/msm/msm_gpu_trace.h 		    __entry->ringid = submit->ring->id;
ring               13 drivers/gpu/drm/msm/msm_ringbuffer.c 	struct msm_ringbuffer *ring;
ring               20 drivers/gpu/drm/msm/msm_ringbuffer.c 	ring = kzalloc(sizeof(*ring), GFP_KERNEL);
ring               21 drivers/gpu/drm/msm/msm_ringbuffer.c 	if (!ring) {
ring               26 drivers/gpu/drm/msm/msm_ringbuffer.c 	ring->gpu = gpu;
ring               27 drivers/gpu/drm/msm/msm_ringbuffer.c 	ring->id = id;
ring               29 drivers/gpu/drm/msm/msm_ringbuffer.c 	ring->start = msm_gem_kernel_new(gpu->dev, MSM_GPU_RINGBUFFER_SZ,
ring               30 drivers/gpu/drm/msm/msm_ringbuffer.c 		MSM_BO_WC, gpu->aspace, &ring->bo, &ring->iova);
ring               32 drivers/gpu/drm/msm/msm_ringbuffer.c 	if (IS_ERR(ring->start)) {
ring               33 drivers/gpu/drm/msm/msm_ringbuffer.c 		ret = PTR_ERR(ring->start);
ring               34 drivers/gpu/drm/msm/msm_ringbuffer.c 		ring->start = 0;
ring               38 drivers/gpu/drm/msm/msm_ringbuffer.c 	msm_gem_object_set_name(ring->bo, "ring%d", id);
ring               40 drivers/gpu/drm/msm/msm_ringbuffer.c 	ring->end   = ring->start + (MSM_GPU_RINGBUFFER_SZ >> 2);
ring               41 drivers/gpu/drm/msm/msm_ringbuffer.c 	ring->next  = ring->start;
ring               42 drivers/gpu/drm/msm/msm_ringbuffer.c 	ring->cur   = ring->start;
ring               44 drivers/gpu/drm/msm/msm_ringbuffer.c 	ring->memptrs = memptrs;
ring               45 drivers/gpu/drm/msm/msm_ringbuffer.c 	ring->memptrs_iova = memptrs_iova;
ring               47 drivers/gpu/drm/msm/msm_ringbuffer.c 	INIT_LIST_HEAD(&ring->submits);
ring               48 drivers/gpu/drm/msm/msm_ringbuffer.c 	spin_lock_init(&ring->lock);
ring               50 drivers/gpu/drm/msm/msm_ringbuffer.c 	snprintf(name, sizeof(name), "gpu-ring-%d", ring->id);
ring               52 drivers/gpu/drm/msm/msm_ringbuffer.c 	ring->fctx = msm_fence_context_alloc(gpu->dev, name);
ring               54 drivers/gpu/drm/msm/msm_ringbuffer.c 	return ring;
ring               57 drivers/gpu/drm/msm/msm_ringbuffer.c 	msm_ringbuffer_destroy(ring);
ring               61 drivers/gpu/drm/msm/msm_ringbuffer.c void msm_ringbuffer_destroy(struct msm_ringbuffer *ring)
ring               63 drivers/gpu/drm/msm/msm_ringbuffer.c 	if (IS_ERR_OR_NULL(ring))
ring               66 drivers/gpu/drm/msm/msm_ringbuffer.c 	msm_fence_context_free(ring->fctx);
ring               68 drivers/gpu/drm/msm/msm_ringbuffer.c 	msm_gem_kernel_put(ring->bo, ring->gpu->aspace, false);
ring               70 drivers/gpu/drm/msm/msm_ringbuffer.c 	kfree(ring);
ring               12 drivers/gpu/drm/msm/msm_ringbuffer.h #define rbmemptr(ring, member)  \
ring               13 drivers/gpu/drm/msm/msm_ringbuffer.h 	((ring)->memptrs_iova + offsetof(struct msm_rbmemptrs, member))
ring               15 drivers/gpu/drm/msm/msm_ringbuffer.h #define rbmemptr_stats(ring, index, member) \
ring               16 drivers/gpu/drm/msm/msm_ringbuffer.h 	(rbmemptr((ring), stats) + \
ring               53 drivers/gpu/drm/msm/msm_ringbuffer.h void msm_ringbuffer_destroy(struct msm_ringbuffer *ring);
ring               58 drivers/gpu/drm/msm/msm_ringbuffer.h OUT_RING(struct msm_ringbuffer *ring, uint32_t data)
ring               64 drivers/gpu/drm/msm/msm_ringbuffer.h 	if (ring->next == ring->end)
ring               65 drivers/gpu/drm/msm/msm_ringbuffer.h 		ring->next = ring->start;
ring               66 drivers/gpu/drm/msm/msm_ringbuffer.h 	*(ring->next++) = data;
ring               43 drivers/gpu/drm/qxl/qxl_cmd.c 	struct ring	       *ring;
ring               51 drivers/gpu/drm/qxl/qxl_cmd.c void qxl_ring_free(struct qxl_ring *ring)
ring               53 drivers/gpu/drm/qxl/qxl_cmd.c 	kfree(ring);
ring               56 drivers/gpu/drm/qxl/qxl_cmd.c void qxl_ring_init_hdr(struct qxl_ring *ring)
ring               58 drivers/gpu/drm/qxl/qxl_cmd.c 	ring->ring->header.notify_on_prod = ring->n_elements;
ring               69 drivers/gpu/drm/qxl/qxl_cmd.c 	struct qxl_ring *ring;
ring               71 drivers/gpu/drm/qxl/qxl_cmd.c 	ring = kmalloc(sizeof(*ring), GFP_KERNEL);
ring               72 drivers/gpu/drm/qxl/qxl_cmd.c 	if (!ring)
ring               75 drivers/gpu/drm/qxl/qxl_cmd.c 	ring->ring = (struct ring *)header;
ring               76 drivers/gpu/drm/qxl/qxl_cmd.c 	ring->element_size = element_size;
ring               77 drivers/gpu/drm/qxl/qxl_cmd.c 	ring->n_elements = n_elements;
ring               78 drivers/gpu/drm/qxl/qxl_cmd.c 	ring->prod_notify = prod_notify;
ring               79 drivers/gpu/drm/qxl/qxl_cmd.c 	ring->push_event = push_event;
ring               81 drivers/gpu/drm/qxl/qxl_cmd.c 		qxl_ring_init_hdr(ring);
ring               82 drivers/gpu/drm/qxl/qxl_cmd.c 	spin_lock_init(&ring->lock);
ring               83 drivers/gpu/drm/qxl/qxl_cmd.c 	return ring;
ring               86 drivers/gpu/drm/qxl/qxl_cmd.c static int qxl_check_header(struct qxl_ring *ring)
ring               89 drivers/gpu/drm/qxl/qxl_cmd.c 	struct qxl_ring_header *header = &(ring->ring->header);
ring               92 drivers/gpu/drm/qxl/qxl_cmd.c 	spin_lock_irqsave(&ring->lock, flags);
ring               96 drivers/gpu/drm/qxl/qxl_cmd.c 	spin_unlock_irqrestore(&ring->lock, flags);
ring              100 drivers/gpu/drm/qxl/qxl_cmd.c int qxl_check_idle(struct qxl_ring *ring)
ring              103 drivers/gpu/drm/qxl/qxl_cmd.c 	struct qxl_ring_header *header = &(ring->ring->header);
ring              106 drivers/gpu/drm/qxl/qxl_cmd.c 	spin_lock_irqsave(&ring->lock, flags);
ring              108 drivers/gpu/drm/qxl/qxl_cmd.c 	spin_unlock_irqrestore(&ring->lock, flags);
ring              112 drivers/gpu/drm/qxl/qxl_cmd.c int qxl_ring_push(struct qxl_ring *ring,
ring              115 drivers/gpu/drm/qxl/qxl_cmd.c 	struct qxl_ring_header *header = &(ring->ring->header);
ring              120 drivers/gpu/drm/qxl/qxl_cmd.c 	spin_lock_irqsave(&ring->lock, flags);
ring              124 drivers/gpu/drm/qxl/qxl_cmd.c 		spin_unlock_irqrestore(&ring->lock, flags);
ring              126 drivers/gpu/drm/qxl/qxl_cmd.c 			while (!qxl_check_header(ring))
ring              130 drivers/gpu/drm/qxl/qxl_cmd.c 				ret = wait_event_interruptible(*ring->push_event,
ring              131 drivers/gpu/drm/qxl/qxl_cmd.c 							       qxl_check_header(ring));
ring              135 drivers/gpu/drm/qxl/qxl_cmd.c 				wait_event(*ring->push_event,
ring              136 drivers/gpu/drm/qxl/qxl_cmd.c 					   qxl_check_header(ring));
ring              140 drivers/gpu/drm/qxl/qxl_cmd.c 		spin_lock_irqsave(&ring->lock, flags);
ring              143 drivers/gpu/drm/qxl/qxl_cmd.c 	idx = header->prod & (ring->n_elements - 1);
ring              144 drivers/gpu/drm/qxl/qxl_cmd.c 	elt = ring->ring->elements + idx * ring->element_size;
ring              146 drivers/gpu/drm/qxl/qxl_cmd.c 	memcpy((void *)elt, new_elt, ring->element_size);
ring              153 drivers/gpu/drm/qxl/qxl_cmd.c 		outb(0, ring->prod_notify);
ring              155 drivers/gpu/drm/qxl/qxl_cmd.c 	spin_unlock_irqrestore(&ring->lock, flags);
ring              159 drivers/gpu/drm/qxl/qxl_cmd.c static bool qxl_ring_pop(struct qxl_ring *ring,
ring              162 drivers/gpu/drm/qxl/qxl_cmd.c 	volatile struct qxl_ring_header *header = &(ring->ring->header);
ring              167 drivers/gpu/drm/qxl/qxl_cmd.c 	spin_lock_irqsave(&ring->lock, flags);
ring              170 drivers/gpu/drm/qxl/qxl_cmd.c 		spin_unlock_irqrestore(&ring->lock, flags);
ring              174 drivers/gpu/drm/qxl/qxl_cmd.c 	idx = header->cons & (ring->n_elements - 1);
ring              175 drivers/gpu/drm/qxl/qxl_cmd.c 	ring_elt = ring->ring->elements + idx * ring->element_size;
ring              177 drivers/gpu/drm/qxl/qxl_cmd.c 	memcpy(element, (void *)ring_elt, ring->element_size);
ring              181 drivers/gpu/drm/qxl/qxl_cmd.c 	spin_unlock_irqrestore(&ring->lock, flags);
ring              301 drivers/gpu/drm/qxl/qxl_drv.h void qxl_ring_free(struct qxl_ring *ring);
ring              302 drivers/gpu/drm/qxl/qxl_drv.h void qxl_ring_init_hdr(struct qxl_ring *ring);
ring              303 drivers/gpu/drm/qxl/qxl_drv.h int qxl_check_idle(struct qxl_ring *ring);
ring              389 drivers/gpu/drm/qxl/qxl_drv.h int qxl_ring_push(struct qxl_ring *ring, const void *new_elt, bool interruptible);
ring              213 drivers/gpu/drm/r128/r128_cce.c 		if (GET_RING_HEAD(dev_priv) == dev_priv->ring.tail) {
ring              239 drivers/gpu/drm/r128/r128_cce.c 		   dev_priv->cce_mode | dev_priv->ring.size_l2qw
ring              255 drivers/gpu/drm/r128/r128_cce.c 	dev_priv->ring.tail = 0;
ring              546 drivers/gpu/drm/r128/r128_cce.c 	dev_priv->ring.start = (u32 *) dev_priv->cce_ring->handle;
ring              547 drivers/gpu/drm/r128/r128_cce.c 	dev_priv->ring.end = ((u32 *) dev_priv->cce_ring->handle
ring              549 drivers/gpu/drm/r128/r128_cce.c 	dev_priv->ring.size = init->ring_size;
ring              550 drivers/gpu/drm/r128/r128_cce.c 	dev_priv->ring.size_l2qw = order_base_2(init->ring_size / 8);
ring              552 drivers/gpu/drm/r128/r128_cce.c 	dev_priv->ring.tail_mask = (dev_priv->ring.size / sizeof(u32)) - 1;
ring              554 drivers/gpu/drm/r128/r128_cce.c 	dev_priv->ring.high_mark = 128;
ring              874 drivers/gpu/drm/r128/r128_cce.c 	drm_r128_ring_buffer_t *ring = &dev_priv->ring;
ring              879 drivers/gpu/drm/r128/r128_cce.c 		if (ring->space >= n)
ring               88 drivers/gpu/drm/r128/r128_drv.h 	drm_r128_ring_buffer_t ring;
ring              428 drivers/gpu/drm/r128/r128_drv.h 	drm_r128_ring_buffer_t *ring = &dev_priv->ring;
ring              429 drivers/gpu/drm/r128/r128_drv.h 	ring->space = (GET_RING_HEAD(dev_priv) - ring->tail) * sizeof(u32);
ring              430 drivers/gpu/drm/r128/r128_drv.h 	if (ring->space <= 0)
ring              431 drivers/gpu/drm/r128/r128_drv.h 		ring->space += ring->size;
ring              448 drivers/gpu/drm/r128/r128_drv.h 	drm_r128_ring_buffer_t *ring = &dev_priv->ring; int i;		\
ring              449 drivers/gpu/drm/r128/r128_drv.h 	if (ring->space < ring->high_mark) {				\
ring              452 drivers/gpu/drm/r128/r128_drv.h 			if (ring->space >= ring->high_mark)		\
ring              487 drivers/gpu/drm/r128/r128_drv.h 	int write, _nr; unsigned int tail_mask; volatile u32 *ring;
ring              492 drivers/gpu/drm/r128/r128_drv.h 	if (dev_priv->ring.space <= (n) * sizeof(u32)) {		\
ring              496 drivers/gpu/drm/r128/r128_drv.h 	_nr = n; dev_priv->ring.space -= (n) * sizeof(u32);		\
ring              497 drivers/gpu/drm/r128/r128_drv.h 	ring = dev_priv->ring.start;					\
ring              498 drivers/gpu/drm/r128/r128_drv.h 	write = dev_priv->ring.tail;					\
ring              499 drivers/gpu/drm/r128/r128_drv.h 	tail_mask = dev_priv->ring.tail_mask;				\
ring              512 drivers/gpu/drm/r128/r128_drv.h 			 write, dev_priv->ring.tail);			\
ring              514 drivers/gpu/drm/r128/r128_drv.h 		memcpy(dev_priv->ring.end,				\
ring              515 drivers/gpu/drm/r128/r128_drv.h 		       dev_priv->ring.start,				\
ring              517 drivers/gpu/drm/r128/r128_drv.h 	if (((dev_priv->ring.tail + _nr) & tail_mask) != write)		\
ring              520 drivers/gpu/drm/r128/r128_drv.h 			((dev_priv->ring.tail + _nr) & tail_mask),	\
ring              523 drivers/gpu/drm/r128/r128_drv.h 		dev_priv->ring.tail = write;				\
ring              529 drivers/gpu/drm/r128/r128_drv.h 			 dev_priv->ring.tail);				\
ring              531 drivers/gpu/drm/r128/r128_drv.h 	R128_WRITE(R128_PM4_BUFFER_DL_WPTR, dev_priv->ring.tail);	\
ring              539 drivers/gpu/drm/r128/r128_drv.h 	ring[write++] = cpu_to_le32(x);					\
ring             3459 drivers/gpu/drm/radeon/cik.c int cik_ring_test(struct radeon_device *rdev, struct radeon_ring *ring)
ring             3472 drivers/gpu/drm/radeon/cik.c 	r = radeon_ring_lock(rdev, ring, 3);
ring             3474 drivers/gpu/drm/radeon/cik.c 		DRM_ERROR("radeon: cp failed to lock ring %d (%d).\n", ring->idx, r);
ring             3478 drivers/gpu/drm/radeon/cik.c 	radeon_ring_write(ring, PACKET3(PACKET3_SET_UCONFIG_REG, 1));
ring             3479 drivers/gpu/drm/radeon/cik.c 	radeon_ring_write(ring, ((scratch - PACKET3_SET_UCONFIG_REG_START) >> 2));
ring             3480 drivers/gpu/drm/radeon/cik.c 	radeon_ring_write(ring, 0xDEADBEEF);
ring             3481 drivers/gpu/drm/radeon/cik.c 	radeon_ring_unlock_commit(rdev, ring, false);
ring             3490 drivers/gpu/drm/radeon/cik.c 		DRM_INFO("ring test on %d succeeded in %d usecs\n", ring->idx, i);
ring             3493 drivers/gpu/drm/radeon/cik.c 			  ring->idx, scratch, tmp);
ring             3511 drivers/gpu/drm/radeon/cik.c 	struct radeon_ring *ring = &rdev->ring[ridx];
ring             3514 drivers/gpu/drm/radeon/cik.c 	switch (ring->idx) {
ring             3518 drivers/gpu/drm/radeon/cik.c 		switch (ring->me) {
ring             3520 drivers/gpu/drm/radeon/cik.c 			ref_and_mask = CP2 << ring->pipe;
ring             3523 drivers/gpu/drm/radeon/cik.c 			ref_and_mask = CP6 << ring->pipe;
ring             3534 drivers/gpu/drm/radeon/cik.c 	radeon_ring_write(ring, PACKET3(PACKET3_WAIT_REG_MEM, 5));
ring             3535 drivers/gpu/drm/radeon/cik.c 	radeon_ring_write(ring, (WAIT_REG_MEM_OPERATION(1) | /* write, wait, write */
ring             3538 drivers/gpu/drm/radeon/cik.c 	radeon_ring_write(ring, GPU_HDP_FLUSH_REQ >> 2);
ring             3539 drivers/gpu/drm/radeon/cik.c 	radeon_ring_write(ring, GPU_HDP_FLUSH_DONE >> 2);
ring             3540 drivers/gpu/drm/radeon/cik.c 	radeon_ring_write(ring, ref_and_mask);
ring             3541 drivers/gpu/drm/radeon/cik.c 	radeon_ring_write(ring, ref_and_mask);
ring             3542 drivers/gpu/drm/radeon/cik.c 	radeon_ring_write(ring, 0x20); /* poll interval */
ring             3557 drivers/gpu/drm/radeon/cik.c 	struct radeon_ring *ring = &rdev->ring[fence->ring];
ring             3558 drivers/gpu/drm/radeon/cik.c 	u64 addr = rdev->fence_drv[fence->ring].gpu_addr;
ring             3563 drivers/gpu/drm/radeon/cik.c 	radeon_ring_write(ring, PACKET3(PACKET3_EVENT_WRITE_EOP, 4));
ring             3564 drivers/gpu/drm/radeon/cik.c 	radeon_ring_write(ring, (EOP_TCL1_ACTION_EN |
ring             3568 drivers/gpu/drm/radeon/cik.c 	radeon_ring_write(ring, addr & 0xfffffffc);
ring             3569 drivers/gpu/drm/radeon/cik.c 	radeon_ring_write(ring, (upper_32_bits(addr) & 0xffff) |
ring             3571 drivers/gpu/drm/radeon/cik.c 	radeon_ring_write(ring, fence->seq - 1);
ring             3572 drivers/gpu/drm/radeon/cik.c 	radeon_ring_write(ring, 0);
ring             3575 drivers/gpu/drm/radeon/cik.c 	radeon_ring_write(ring, PACKET3(PACKET3_EVENT_WRITE_EOP, 4));
ring             3576 drivers/gpu/drm/radeon/cik.c 	radeon_ring_write(ring, (EOP_TCL1_ACTION_EN |
ring             3580 drivers/gpu/drm/radeon/cik.c 	radeon_ring_write(ring, addr & 0xfffffffc);
ring             3581 drivers/gpu/drm/radeon/cik.c 	radeon_ring_write(ring, (upper_32_bits(addr) & 0xffff) | DATA_SEL(1) | INT_SEL(2));
ring             3582 drivers/gpu/drm/radeon/cik.c 	radeon_ring_write(ring, fence->seq);
ring             3583 drivers/gpu/drm/radeon/cik.c 	radeon_ring_write(ring, 0);
ring             3598 drivers/gpu/drm/radeon/cik.c 	struct radeon_ring *ring = &rdev->ring[fence->ring];
ring             3599 drivers/gpu/drm/radeon/cik.c 	u64 addr = rdev->fence_drv[fence->ring].gpu_addr;
ring             3602 drivers/gpu/drm/radeon/cik.c 	radeon_ring_write(ring, PACKET3(PACKET3_RELEASE_MEM, 5));
ring             3603 drivers/gpu/drm/radeon/cik.c 	radeon_ring_write(ring, (EOP_TCL1_ACTION_EN |
ring             3607 drivers/gpu/drm/radeon/cik.c 	radeon_ring_write(ring, DATA_SEL(1) | INT_SEL(2));
ring             3608 drivers/gpu/drm/radeon/cik.c 	radeon_ring_write(ring, addr & 0xfffffffc);
ring             3609 drivers/gpu/drm/radeon/cik.c 	radeon_ring_write(ring, upper_32_bits(addr));
ring             3610 drivers/gpu/drm/radeon/cik.c 	radeon_ring_write(ring, fence->seq);
ring             3611 drivers/gpu/drm/radeon/cik.c 	radeon_ring_write(ring, 0);
ring             3626 drivers/gpu/drm/radeon/cik.c 			     struct radeon_ring *ring,
ring             3633 drivers/gpu/drm/radeon/cik.c 	radeon_ring_write(ring, PACKET3(PACKET3_MEM_SEMAPHORE, 1));
ring             3634 drivers/gpu/drm/radeon/cik.c 	radeon_ring_write(ring, lower_32_bits(addr));
ring             3635 drivers/gpu/drm/radeon/cik.c 	radeon_ring_write(ring, (upper_32_bits(addr) & 0xffff) | sel);
ring             3637 drivers/gpu/drm/radeon/cik.c 	if (emit_wait && ring->idx == RADEON_RING_TYPE_GFX_INDEX) {
ring             3639 drivers/gpu/drm/radeon/cik.c 		radeon_ring_write(ring, PACKET3(PACKET3_PFP_SYNC_ME, 0));
ring             3640 drivers/gpu/drm/radeon/cik.c 		radeon_ring_write(ring, 0x0);
ring             3667 drivers/gpu/drm/radeon/cik.c 	struct radeon_ring *ring = &rdev->ring[ring_index];
ring             3676 drivers/gpu/drm/radeon/cik.c 	r = radeon_ring_lock(rdev, ring, num_loops * 7 + 18);
ring             3684 drivers/gpu/drm/radeon/cik.c 	radeon_sync_rings(rdev, &sync, ring->idx);
ring             3694 drivers/gpu/drm/radeon/cik.c 		radeon_ring_write(ring, PACKET3(PACKET3_DMA_DATA, 5));
ring             3695 drivers/gpu/drm/radeon/cik.c 		radeon_ring_write(ring, control);
ring             3696 drivers/gpu/drm/radeon/cik.c 		radeon_ring_write(ring, lower_32_bits(src_offset));
ring             3697 drivers/gpu/drm/radeon/cik.c 		radeon_ring_write(ring, upper_32_bits(src_offset));
ring             3698 drivers/gpu/drm/radeon/cik.c 		radeon_ring_write(ring, lower_32_bits(dst_offset));
ring             3699 drivers/gpu/drm/radeon/cik.c 		radeon_ring_write(ring, upper_32_bits(dst_offset));
ring             3700 drivers/gpu/drm/radeon/cik.c 		radeon_ring_write(ring, cur_size_in_bytes);
ring             3705 drivers/gpu/drm/radeon/cik.c 	r = radeon_fence_emit(rdev, &fence, ring->idx);
ring             3707 drivers/gpu/drm/radeon/cik.c 		radeon_ring_unlock_undo(rdev, ring);
ring             3712 drivers/gpu/drm/radeon/cik.c 	radeon_ring_unlock_commit(rdev, ring, false);
ring             3735 drivers/gpu/drm/radeon/cik.c 	struct radeon_ring *ring = &rdev->ring[ib->ring];
ring             3736 drivers/gpu/drm/radeon/cik.c 	unsigned vm_id = ib->vm ? ib->vm->ids[ib->ring].id : 0;
ring             3741 drivers/gpu/drm/radeon/cik.c 		radeon_ring_write(ring, PACKET3(PACKET3_SWITCH_BUFFER, 0));
ring             3742 drivers/gpu/drm/radeon/cik.c 		radeon_ring_write(ring, 0);
ring             3747 drivers/gpu/drm/radeon/cik.c 		if (ring->rptr_save_reg) {
ring             3748 drivers/gpu/drm/radeon/cik.c 			next_rptr = ring->wptr + 3 + 4;
ring             3749 drivers/gpu/drm/radeon/cik.c 			radeon_ring_write(ring, PACKET3(PACKET3_SET_UCONFIG_REG, 1));
ring             3750 drivers/gpu/drm/radeon/cik.c 			radeon_ring_write(ring, ((ring->rptr_save_reg -
ring             3752 drivers/gpu/drm/radeon/cik.c 			radeon_ring_write(ring, next_rptr);
ring             3754 drivers/gpu/drm/radeon/cik.c 			next_rptr = ring->wptr + 5 + 4;
ring             3755 drivers/gpu/drm/radeon/cik.c 			radeon_ring_write(ring, PACKET3(PACKET3_WRITE_DATA, 3));
ring             3756 drivers/gpu/drm/radeon/cik.c 			radeon_ring_write(ring, WRITE_DATA_DST_SEL(1));
ring             3757 drivers/gpu/drm/radeon/cik.c 			radeon_ring_write(ring, ring->next_rptr_gpu_addr & 0xfffffffc);
ring             3758 drivers/gpu/drm/radeon/cik.c 			radeon_ring_write(ring, upper_32_bits(ring->next_rptr_gpu_addr));
ring             3759 drivers/gpu/drm/radeon/cik.c 			radeon_ring_write(ring, next_rptr);
ring             3767 drivers/gpu/drm/radeon/cik.c 	radeon_ring_write(ring, header);
ring             3768 drivers/gpu/drm/radeon/cik.c 	radeon_ring_write(ring, (ib->gpu_addr & 0xFFFFFFFC));
ring             3769 drivers/gpu/drm/radeon/cik.c 	radeon_ring_write(ring, upper_32_bits(ib->gpu_addr) & 0xFFFF);
ring             3770 drivers/gpu/drm/radeon/cik.c 	radeon_ring_write(ring, control);
ring             3783 drivers/gpu/drm/radeon/cik.c int cik_ib_test(struct radeon_device *rdev, struct radeon_ring *ring)
ring             3797 drivers/gpu/drm/radeon/cik.c 	r = radeon_ib_get(rdev, ring->idx, &ib, NULL, 256);
ring             3835 drivers/gpu/drm/radeon/cik.c 		DRM_INFO("ib test on ring %d succeeded in %u usecs\n", ib.fence->ring, i);
ring             3885 drivers/gpu/drm/radeon/cik.c 		rdev->ring[RADEON_RING_TYPE_GFX_INDEX].ready = false;
ring             3987 drivers/gpu/drm/radeon/cik.c 	struct radeon_ring *ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX];
ring             3997 drivers/gpu/drm/radeon/cik.c 	r = radeon_ring_lock(rdev, ring, cik_default_size + 17);
ring             4004 drivers/gpu/drm/radeon/cik.c 	radeon_ring_write(ring, PACKET3(PACKET3_SET_BASE, 2));
ring             4005 drivers/gpu/drm/radeon/cik.c 	radeon_ring_write(ring, PACKET3_BASE_INDEX(CE_PARTITION_BASE));
ring             4006 drivers/gpu/drm/radeon/cik.c 	radeon_ring_write(ring, 0x8000);
ring             4007 drivers/gpu/drm/radeon/cik.c 	radeon_ring_write(ring, 0x8000);
ring             4010 drivers/gpu/drm/radeon/cik.c 	radeon_ring_write(ring, PACKET3(PACKET3_PREAMBLE_CNTL, 0));
ring             4011 drivers/gpu/drm/radeon/cik.c 	radeon_ring_write(ring, PACKET3_PREAMBLE_BEGIN_CLEAR_STATE);
ring             4013 drivers/gpu/drm/radeon/cik.c 	radeon_ring_write(ring, PACKET3(PACKET3_CONTEXT_CONTROL, 1));
ring             4014 drivers/gpu/drm/radeon/cik.c 	radeon_ring_write(ring, 0x80000000);
ring             4015 drivers/gpu/drm/radeon/cik.c 	radeon_ring_write(ring, 0x80000000);
ring             4018 drivers/gpu/drm/radeon/cik.c 		radeon_ring_write(ring, cik_default_state[i]);
ring             4020 drivers/gpu/drm/radeon/cik.c 	radeon_ring_write(ring, PACKET3(PACKET3_PREAMBLE_CNTL, 0));
ring             4021 drivers/gpu/drm/radeon/cik.c 	radeon_ring_write(ring, PACKET3_PREAMBLE_END_CLEAR_STATE);
ring             4024 drivers/gpu/drm/radeon/cik.c 	radeon_ring_write(ring, PACKET3(PACKET3_CLEAR_STATE, 0));
ring             4025 drivers/gpu/drm/radeon/cik.c 	radeon_ring_write(ring, 0);
ring             4027 drivers/gpu/drm/radeon/cik.c 	radeon_ring_write(ring, PACKET3(PACKET3_SET_CONTEXT_REG, 2));
ring             4028 drivers/gpu/drm/radeon/cik.c 	radeon_ring_write(ring, 0x00000316);
ring             4029 drivers/gpu/drm/radeon/cik.c 	radeon_ring_write(ring, 0x0000000e); /* VGT_VERTEX_REUSE_BLOCK_CNTL */
ring             4030 drivers/gpu/drm/radeon/cik.c 	radeon_ring_write(ring, 0x00000010); /* VGT_OUT_DEALLOC_CNTL */
ring             4032 drivers/gpu/drm/radeon/cik.c 	radeon_ring_unlock_commit(rdev, ring, false);
ring             4048 drivers/gpu/drm/radeon/cik.c 	radeon_ring_fini(rdev, &rdev->ring[RADEON_RING_TYPE_GFX_INDEX]);
ring             4062 drivers/gpu/drm/radeon/cik.c 	struct radeon_ring *ring;
ring             4082 drivers/gpu/drm/radeon/cik.c 	ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX];
ring             4083 drivers/gpu/drm/radeon/cik.c 	rb_bufsz = order_base_2(ring->ring_size / 8);
ring             4092 drivers/gpu/drm/radeon/cik.c 	ring->wptr = 0;
ring             4093 drivers/gpu/drm/radeon/cik.c 	WREG32(CP_RB0_WPTR, ring->wptr);
ring             4108 drivers/gpu/drm/radeon/cik.c 	rb_addr = ring->gpu_addr >> 8;
ring             4114 drivers/gpu/drm/radeon/cik.c 	rdev->ring[RADEON_RING_TYPE_GFX_INDEX].ready = true;
ring             4115 drivers/gpu/drm/radeon/cik.c 	r = radeon_ring_test(rdev, RADEON_RING_TYPE_GFX_INDEX, &rdev->ring[RADEON_RING_TYPE_GFX_INDEX]);
ring             4117 drivers/gpu/drm/radeon/cik.c 		rdev->ring[RADEON_RING_TYPE_GFX_INDEX].ready = false;
ring             4128 drivers/gpu/drm/radeon/cik.c 		     struct radeon_ring *ring)
ring             4133 drivers/gpu/drm/radeon/cik.c 		rptr = rdev->wb.wb[ring->rptr_offs/4];
ring             4141 drivers/gpu/drm/radeon/cik.c 		     struct radeon_ring *ring)
ring             4147 drivers/gpu/drm/radeon/cik.c 		      struct radeon_ring *ring)
ring             4149 drivers/gpu/drm/radeon/cik.c 	WREG32(CP_RB0_WPTR, ring->wptr);
ring             4154 drivers/gpu/drm/radeon/cik.c 			 struct radeon_ring *ring)
ring             4159 drivers/gpu/drm/radeon/cik.c 		rptr = rdev->wb.wb[ring->rptr_offs/4];
ring             4162 drivers/gpu/drm/radeon/cik.c 		cik_srbm_select(rdev, ring->me, ring->pipe, ring->queue, 0);
ring             4172 drivers/gpu/drm/radeon/cik.c 			 struct radeon_ring *ring)
ring             4178 drivers/gpu/drm/radeon/cik.c 		wptr = rdev->wb.wb[ring->wptr_offs/4];
ring             4181 drivers/gpu/drm/radeon/cik.c 		cik_srbm_select(rdev, ring->me, ring->pipe, ring->queue, 0);
ring             4191 drivers/gpu/drm/radeon/cik.c 			  struct radeon_ring *ring)
ring             4194 drivers/gpu/drm/radeon/cik.c 	rdev->wb.wb[ring->wptr_offs/4] = ring->wptr;
ring             4195 drivers/gpu/drm/radeon/cik.c 	WDOORBELL32(ring->doorbell_index, ring->wptr);
ring             4199 drivers/gpu/drm/radeon/cik.c 			     struct radeon_ring *ring)
ring             4203 drivers/gpu/drm/radeon/cik.c 	cik_srbm_select(rdev, ring->me, ring->pipe, ring->queue, 0);
ring             4241 drivers/gpu/drm/radeon/cik.c 		cik_compute_stop(rdev,&rdev->ring[CAYMAN_RING_TYPE_CP1_INDEX]);
ring             4242 drivers/gpu/drm/radeon/cik.c 		cik_compute_stop(rdev,&rdev->ring[CAYMAN_RING_TYPE_CP2_INDEX]);
ring             4246 drivers/gpu/drm/radeon/cik.c 		rdev->ring[CAYMAN_RING_TYPE_CP1_INDEX].ready = false;
ring             4247 drivers/gpu/drm/radeon/cik.c 		rdev->ring[CAYMAN_RING_TYPE_CP2_INDEX].ready = false;
ring             4358 drivers/gpu/drm/radeon/cik.c 		if (rdev->ring[idx].mqd_obj) {
ring             4359 drivers/gpu/drm/radeon/cik.c 			r = radeon_bo_reserve(rdev->ring[idx].mqd_obj, false);
ring             4363 drivers/gpu/drm/radeon/cik.c 			radeon_bo_unpin(rdev->ring[idx].mqd_obj);
ring             4364 drivers/gpu/drm/radeon/cik.c 			radeon_bo_unreserve(rdev->ring[idx].mqd_obj);
ring             4366 drivers/gpu/drm/radeon/cik.c 			radeon_bo_unref(&rdev->ring[idx].mqd_obj);
ring             4367 drivers/gpu/drm/radeon/cik.c 			rdev->ring[idx].mqd_obj = NULL;
ring             4577 drivers/gpu/drm/radeon/cik.c 		if (rdev->ring[idx].mqd_obj == NULL) {
ring             4582 drivers/gpu/drm/radeon/cik.c 					     NULL, &rdev->ring[idx].mqd_obj);
ring             4589 drivers/gpu/drm/radeon/cik.c 		r = radeon_bo_reserve(rdev->ring[idx].mqd_obj, false);
ring             4594 drivers/gpu/drm/radeon/cik.c 		r = radeon_bo_pin(rdev->ring[idx].mqd_obj, RADEON_GEM_DOMAIN_GTT,
ring             4601 drivers/gpu/drm/radeon/cik.c 		r = radeon_bo_kmap(rdev->ring[idx].mqd_obj, (void **)&buf);
ring             4619 drivers/gpu/drm/radeon/cik.c 		cik_srbm_select(rdev, rdev->ring[idx].me,
ring             4620 drivers/gpu/drm/radeon/cik.c 				rdev->ring[idx].pipe,
ring             4621 drivers/gpu/drm/radeon/cik.c 				rdev->ring[idx].queue, 0);
ring             4665 drivers/gpu/drm/radeon/cik.c 		hqd_gpu_addr = rdev->ring[idx].gpu_addr >> 8;
ring             4677 drivers/gpu/drm/radeon/cik.c 			order_base_2(rdev->ring[idx].ring_size / 8);
ring             4719 drivers/gpu/drm/radeon/cik.c 				DOORBELL_OFFSET(rdev->ring[idx].doorbell_index);
ring             4731 drivers/gpu/drm/radeon/cik.c 		rdev->ring[idx].wptr = 0;
ring             4732 drivers/gpu/drm/radeon/cik.c 		mqd->queue_state.cp_hqd_pq_wptr = rdev->ring[idx].wptr;
ring             4747 drivers/gpu/drm/radeon/cik.c 		radeon_bo_kunmap(rdev->ring[idx].mqd_obj);
ring             4748 drivers/gpu/drm/radeon/cik.c 		radeon_bo_unreserve(rdev->ring[idx].mqd_obj);
ring             4750 drivers/gpu/drm/radeon/cik.c 		rdev->ring[idx].ready = true;
ring             4751 drivers/gpu/drm/radeon/cik.c 		r = radeon_ring_test(rdev, idx, &rdev->ring[idx]);
ring             4753 drivers/gpu/drm/radeon/cik.c 			rdev->ring[idx].ready = false;
ring             5263 drivers/gpu/drm/radeon/cik.c bool cik_gfx_is_lockup(struct radeon_device *rdev, struct radeon_ring *ring)
ring             5270 drivers/gpu/drm/radeon/cik.c 		radeon_ring_lockup_update(rdev, ring);
ring             5273 drivers/gpu/drm/radeon/cik.c 	return radeon_ring_test_lockup(rdev, ring);
ring             5692 drivers/gpu/drm/radeon/cik.c void cik_vm_flush(struct radeon_device *rdev, struct radeon_ring *ring,
ring             5695 drivers/gpu/drm/radeon/cik.c 	int usepfp = (ring->idx == RADEON_RING_TYPE_GFX_INDEX);
ring             5697 drivers/gpu/drm/radeon/cik.c 	radeon_ring_write(ring, PACKET3(PACKET3_WRITE_DATA, 3));
ring             5698 drivers/gpu/drm/radeon/cik.c 	radeon_ring_write(ring, (WRITE_DATA_ENGINE_SEL(usepfp) |
ring             5701 drivers/gpu/drm/radeon/cik.c 		radeon_ring_write(ring,
ring             5704 drivers/gpu/drm/radeon/cik.c 		radeon_ring_write(ring,
ring             5707 drivers/gpu/drm/radeon/cik.c 	radeon_ring_write(ring, 0);
ring             5708 drivers/gpu/drm/radeon/cik.c 	radeon_ring_write(ring, pd_addr >> 12);
ring             5711 drivers/gpu/drm/radeon/cik.c 	radeon_ring_write(ring, PACKET3(PACKET3_WRITE_DATA, 3));
ring             5712 drivers/gpu/drm/radeon/cik.c 	radeon_ring_write(ring, (WRITE_DATA_ENGINE_SEL(usepfp) |
ring             5714 drivers/gpu/drm/radeon/cik.c 	radeon_ring_write(ring, SRBM_GFX_CNTL >> 2);
ring             5715 drivers/gpu/drm/radeon/cik.c 	radeon_ring_write(ring, 0);
ring             5716 drivers/gpu/drm/radeon/cik.c 	radeon_ring_write(ring, VMID(vm_id));
ring             5718 drivers/gpu/drm/radeon/cik.c 	radeon_ring_write(ring, PACKET3(PACKET3_WRITE_DATA, 6));
ring             5719 drivers/gpu/drm/radeon/cik.c 	radeon_ring_write(ring, (WRITE_DATA_ENGINE_SEL(usepfp) |
ring             5721 drivers/gpu/drm/radeon/cik.c 	radeon_ring_write(ring, SH_MEM_BASES >> 2);
ring             5722 drivers/gpu/drm/radeon/cik.c 	radeon_ring_write(ring, 0);
ring             5724 drivers/gpu/drm/radeon/cik.c 	radeon_ring_write(ring, 0); /* SH_MEM_BASES */
ring             5725 drivers/gpu/drm/radeon/cik.c 	radeon_ring_write(ring, SH_MEM_CONFIG_GFX_DEFAULT); /* SH_MEM_CONFIG */
ring             5726 drivers/gpu/drm/radeon/cik.c 	radeon_ring_write(ring, 1); /* SH_MEM_APE1_BASE */
ring             5727 drivers/gpu/drm/radeon/cik.c 	radeon_ring_write(ring, 0); /* SH_MEM_APE1_LIMIT */
ring             5729 drivers/gpu/drm/radeon/cik.c 	radeon_ring_write(ring, PACKET3(PACKET3_WRITE_DATA, 3));
ring             5730 drivers/gpu/drm/radeon/cik.c 	radeon_ring_write(ring, (WRITE_DATA_ENGINE_SEL(usepfp) |
ring             5732 drivers/gpu/drm/radeon/cik.c 	radeon_ring_write(ring, SRBM_GFX_CNTL >> 2);
ring             5733 drivers/gpu/drm/radeon/cik.c 	radeon_ring_write(ring, 0);
ring             5734 drivers/gpu/drm/radeon/cik.c 	radeon_ring_write(ring, VMID(0));
ring             5737 drivers/gpu/drm/radeon/cik.c 	cik_hdp_flush_cp_ring_emit(rdev, ring->idx);
ring             5740 drivers/gpu/drm/radeon/cik.c 	radeon_ring_write(ring, PACKET3(PACKET3_WRITE_DATA, 3));
ring             5741 drivers/gpu/drm/radeon/cik.c 	radeon_ring_write(ring, (WRITE_DATA_ENGINE_SEL(usepfp) |
ring             5743 drivers/gpu/drm/radeon/cik.c 	radeon_ring_write(ring, VM_INVALIDATE_REQUEST >> 2);
ring             5744 drivers/gpu/drm/radeon/cik.c 	radeon_ring_write(ring, 0);
ring             5745 drivers/gpu/drm/radeon/cik.c 	radeon_ring_write(ring, 1 << vm_id);
ring             5748 drivers/gpu/drm/radeon/cik.c 	radeon_ring_write(ring, PACKET3(PACKET3_WAIT_REG_MEM, 5));
ring             5749 drivers/gpu/drm/radeon/cik.c 	radeon_ring_write(ring, (WAIT_REG_MEM_OPERATION(0) | /* wait */
ring             5752 drivers/gpu/drm/radeon/cik.c 	radeon_ring_write(ring, VM_INVALIDATE_REQUEST >> 2);
ring             5753 drivers/gpu/drm/radeon/cik.c 	radeon_ring_write(ring, 0);
ring             5754 drivers/gpu/drm/radeon/cik.c 	radeon_ring_write(ring, 0); /* ref */
ring             5755 drivers/gpu/drm/radeon/cik.c 	radeon_ring_write(ring, 0); /* mask */
ring             5756 drivers/gpu/drm/radeon/cik.c 	radeon_ring_write(ring, 0x20); /* poll interval */
ring             5761 drivers/gpu/drm/radeon/cik.c 		radeon_ring_write(ring, PACKET3(PACKET3_PFP_SYNC_ME, 0));
ring             5762 drivers/gpu/drm/radeon/cik.c 		radeon_ring_write(ring, 0x0);
ring             7081 drivers/gpu/drm/radeon/cik.c 		struct radeon_ring *ring = &rdev->ring[CAYMAN_RING_TYPE_CP1_INDEX];
ring             7083 drivers/gpu/drm/radeon/cik.c 		if (ring->me == 1) {
ring             7084 drivers/gpu/drm/radeon/cik.c 			switch (ring->pipe) {
ring             7098 drivers/gpu/drm/radeon/cik.c 				DRM_DEBUG("si_irq_set: sw int cp1 invalid pipe %d\n", ring->pipe);
ring             7101 drivers/gpu/drm/radeon/cik.c 		} else if (ring->me == 2) {
ring             7102 drivers/gpu/drm/radeon/cik.c 			switch (ring->pipe) {
ring             7116 drivers/gpu/drm/radeon/cik.c 				DRM_DEBUG("si_irq_set: sw int cp1 invalid pipe %d\n", ring->pipe);
ring             7120 drivers/gpu/drm/radeon/cik.c 			DRM_DEBUG("si_irq_set: sw int cp1 invalid me %d\n", ring->me);
ring             7124 drivers/gpu/drm/radeon/cik.c 		struct radeon_ring *ring = &rdev->ring[CAYMAN_RING_TYPE_CP2_INDEX];
ring             7126 drivers/gpu/drm/radeon/cik.c 		if (ring->me == 1) {
ring             7127 drivers/gpu/drm/radeon/cik.c 			switch (ring->pipe) {
ring             7141 drivers/gpu/drm/radeon/cik.c 				DRM_DEBUG("si_irq_set: sw int cp2 invalid pipe %d\n", ring->pipe);
ring             7144 drivers/gpu/drm/radeon/cik.c 		} else if (ring->me == 2) {
ring             7145 drivers/gpu/drm/radeon/cik.c 			switch (ring->pipe) {
ring             7159 drivers/gpu/drm/radeon/cik.c 				DRM_DEBUG("si_irq_set: sw int cp2 invalid pipe %d\n", ring->pipe);
ring             7163 drivers/gpu/drm/radeon/cik.c 			DRM_DEBUG("si_irq_set: sw int cp2 invalid me %d\n", ring->me);
ring             7555 drivers/gpu/drm/radeon/cik.c 	struct radeon_ring *cp1_ring = &rdev->ring[CAYMAN_RING_TYPE_CP1_INDEX];
ring             7556 drivers/gpu/drm/radeon/cik.c 	struct radeon_ring *cp2_ring = &rdev->ring[CAYMAN_RING_TYPE_CP2_INDEX];
ring             7591 drivers/gpu/drm/radeon/cik.c 		src_id =  le32_to_cpu(rdev->ih.ring[ring_index]) & 0xff;
ring             7592 drivers/gpu/drm/radeon/cik.c 		src_data = le32_to_cpu(rdev->ih.ring[ring_index + 1]) & 0xfffffff;
ring             7593 drivers/gpu/drm/radeon/cik.c 		ring_id = le32_to_cpu(rdev->ih.ring[ring_index + 2]) & 0xff;
ring             8147 drivers/gpu/drm/radeon/cik.c 	rdev->ring[R600_RING_TYPE_UVD_INDEX].ring_obj = NULL;
ring             8148 drivers/gpu/drm/radeon/cik.c 	r600_ring_init(rdev, &rdev->ring[R600_RING_TYPE_UVD_INDEX], 4096);
ring             8176 drivers/gpu/drm/radeon/cik.c 	rdev->ring[R600_RING_TYPE_UVD_INDEX].ring_size = 0;
ring             8181 drivers/gpu/drm/radeon/cik.c 	struct radeon_ring *ring;
ring             8184 drivers/gpu/drm/radeon/cik.c 	if (!rdev->has_uvd || !rdev->ring[R600_RING_TYPE_UVD_INDEX].ring_size)
ring             8187 drivers/gpu/drm/radeon/cik.c 	ring = &rdev->ring[R600_RING_TYPE_UVD_INDEX];
ring             8188 drivers/gpu/drm/radeon/cik.c 	r = radeon_ring_init(rdev, ring, ring->ring_size, 0, PACKET0(UVD_NO_OP, 0));
ring             8219 drivers/gpu/drm/radeon/cik.c 	rdev->ring[TN_RING_TYPE_VCE1_INDEX].ring_obj = NULL;
ring             8220 drivers/gpu/drm/radeon/cik.c 	r600_ring_init(rdev, &rdev->ring[TN_RING_TYPE_VCE1_INDEX], 4096);
ring             8221 drivers/gpu/drm/radeon/cik.c 	rdev->ring[TN_RING_TYPE_VCE2_INDEX].ring_obj = NULL;
ring             8222 drivers/gpu/drm/radeon/cik.c 	r600_ring_init(rdev, &rdev->ring[TN_RING_TYPE_VCE2_INDEX], 4096);
ring             8255 drivers/gpu/drm/radeon/cik.c 	rdev->ring[TN_RING_TYPE_VCE1_INDEX].ring_size = 0;
ring             8256 drivers/gpu/drm/radeon/cik.c 	rdev->ring[TN_RING_TYPE_VCE2_INDEX].ring_size = 0;
ring             8261 drivers/gpu/drm/radeon/cik.c 	struct radeon_ring *ring;
ring             8264 drivers/gpu/drm/radeon/cik.c 	if (!rdev->has_vce || !rdev->ring[TN_RING_TYPE_VCE1_INDEX].ring_size)
ring             8267 drivers/gpu/drm/radeon/cik.c 	ring = &rdev->ring[TN_RING_TYPE_VCE1_INDEX];
ring             8268 drivers/gpu/drm/radeon/cik.c 	r = radeon_ring_init(rdev, ring, ring->ring_size, 0, VCE_CMD_NO_OP);
ring             8273 drivers/gpu/drm/radeon/cik.c 	ring = &rdev->ring[TN_RING_TYPE_VCE2_INDEX];
ring             8274 drivers/gpu/drm/radeon/cik.c 	r = radeon_ring_init(rdev, ring, ring->ring_size, 0, VCE_CMD_NO_OP);
ring             8297 drivers/gpu/drm/radeon/cik.c 	struct radeon_ring *ring;
ring             8416 drivers/gpu/drm/radeon/cik.c 	ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX];
ring             8417 drivers/gpu/drm/radeon/cik.c 	r = radeon_ring_init(rdev, ring, ring->ring_size, RADEON_WB_CP_RPTR_OFFSET,
ring             8424 drivers/gpu/drm/radeon/cik.c 	ring = &rdev->ring[CAYMAN_RING_TYPE_CP1_INDEX];
ring             8425 drivers/gpu/drm/radeon/cik.c 	r = radeon_ring_init(rdev, ring, ring->ring_size, RADEON_WB_CP1_RPTR_OFFSET,
ring             8429 drivers/gpu/drm/radeon/cik.c 	ring->me = 1; /* first MEC */
ring             8430 drivers/gpu/drm/radeon/cik.c 	ring->pipe = 0; /* first pipe */
ring             8431 drivers/gpu/drm/radeon/cik.c 	ring->queue = 0; /* first queue */
ring             8432 drivers/gpu/drm/radeon/cik.c 	ring->wptr_offs = CIK_WB_CP1_WPTR_OFFSET;
ring             8435 drivers/gpu/drm/radeon/cik.c 	ring = &rdev->ring[CAYMAN_RING_TYPE_CP2_INDEX];
ring             8436 drivers/gpu/drm/radeon/cik.c 	r = radeon_ring_init(rdev, ring, ring->ring_size, RADEON_WB_CP2_RPTR_OFFSET,
ring             8441 drivers/gpu/drm/radeon/cik.c 	ring->me = 1; /* first MEC */
ring             8442 drivers/gpu/drm/radeon/cik.c 	ring->pipe = 0; /* first pipe */
ring             8443 drivers/gpu/drm/radeon/cik.c 	ring->queue = 1; /* second queue */
ring             8444 drivers/gpu/drm/radeon/cik.c 	ring->wptr_offs = CIK_WB_CP2_WPTR_OFFSET;
ring             8446 drivers/gpu/drm/radeon/cik.c 	ring = &rdev->ring[R600_RING_TYPE_DMA_INDEX];
ring             8447 drivers/gpu/drm/radeon/cik.c 	r = radeon_ring_init(rdev, ring, ring->ring_size, R600_WB_DMA_RPTR_OFFSET,
ring             8452 drivers/gpu/drm/radeon/cik.c 	ring = &rdev->ring[CAYMAN_RING_TYPE_DMA1_INDEX];
ring             8453 drivers/gpu/drm/radeon/cik.c 	r = radeon_ring_init(rdev, ring, ring->ring_size, CAYMAN_WB_DMA1_RPTR_OFFSET,
ring             8570 drivers/gpu/drm/radeon/cik.c 	struct radeon_ring *ring;
ring             8643 drivers/gpu/drm/radeon/cik.c 	ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX];
ring             8644 drivers/gpu/drm/radeon/cik.c 	ring->ring_obj = NULL;
ring             8645 drivers/gpu/drm/radeon/cik.c 	r600_ring_init(rdev, ring, 1024 * 1024);
ring             8647 drivers/gpu/drm/radeon/cik.c 	ring = &rdev->ring[CAYMAN_RING_TYPE_CP1_INDEX];
ring             8648 drivers/gpu/drm/radeon/cik.c 	ring->ring_obj = NULL;
ring             8649 drivers/gpu/drm/radeon/cik.c 	r600_ring_init(rdev, ring, 1024 * 1024);
ring             8650 drivers/gpu/drm/radeon/cik.c 	r = radeon_doorbell_get(rdev, &ring->doorbell_index);
ring             8654 drivers/gpu/drm/radeon/cik.c 	ring = &rdev->ring[CAYMAN_RING_TYPE_CP2_INDEX];
ring             8655 drivers/gpu/drm/radeon/cik.c 	ring->ring_obj = NULL;
ring             8656 drivers/gpu/drm/radeon/cik.c 	r600_ring_init(rdev, ring, 1024 * 1024);
ring             8657 drivers/gpu/drm/radeon/cik.c 	r = radeon_doorbell_get(rdev, &ring->doorbell_index);
ring             8661 drivers/gpu/drm/radeon/cik.c 	ring = &rdev->ring[R600_RING_TYPE_DMA_INDEX];
ring             8662 drivers/gpu/drm/radeon/cik.c 	ring->ring_obj = NULL;
ring             8663 drivers/gpu/drm/radeon/cik.c 	r600_ring_init(rdev, ring, 256 * 1024);
ring             8665 drivers/gpu/drm/radeon/cik.c 	ring = &rdev->ring[CAYMAN_RING_TYPE_DMA1_INDEX];
ring             8666 drivers/gpu/drm/radeon/cik.c 	ring->ring_obj = NULL;
ring             8667 drivers/gpu/drm/radeon/cik.c 	r600_ring_init(rdev, ring, 256 * 1024);
ring               64 drivers/gpu/drm/radeon/cik_sdma.c 			   struct radeon_ring *ring)
ring               69 drivers/gpu/drm/radeon/cik_sdma.c 		rptr = rdev->wb.wb[ring->rptr_offs/4];
ring               71 drivers/gpu/drm/radeon/cik_sdma.c 		if (ring->idx == R600_RING_TYPE_DMA_INDEX)
ring               91 drivers/gpu/drm/radeon/cik_sdma.c 			   struct radeon_ring *ring)
ring               95 drivers/gpu/drm/radeon/cik_sdma.c 	if (ring->idx == R600_RING_TYPE_DMA_INDEX)
ring              112 drivers/gpu/drm/radeon/cik_sdma.c 		       struct radeon_ring *ring)
ring              116 drivers/gpu/drm/radeon/cik_sdma.c 	if (ring->idx == R600_RING_TYPE_DMA_INDEX)
ring              121 drivers/gpu/drm/radeon/cik_sdma.c 	WREG32(reg, (ring->wptr << 2) & 0x3fffc);
ring              136 drivers/gpu/drm/radeon/cik_sdma.c 	struct radeon_ring *ring = &rdev->ring[ib->ring];
ring              137 drivers/gpu/drm/radeon/cik_sdma.c 	u32 extra_bits = (ib->vm ? ib->vm->ids[ib->ring].id : 0) & 0xf;
ring              140 drivers/gpu/drm/radeon/cik_sdma.c 		u32 next_rptr = ring->wptr + 5;
ring              144 drivers/gpu/drm/radeon/cik_sdma.c 		radeon_ring_write(ring, SDMA_PACKET(SDMA_OPCODE_WRITE, SDMA_WRITE_SUB_OPCODE_LINEAR, 0));
ring              145 drivers/gpu/drm/radeon/cik_sdma.c 		radeon_ring_write(ring, ring->next_rptr_gpu_addr & 0xfffffffc);
ring              146 drivers/gpu/drm/radeon/cik_sdma.c 		radeon_ring_write(ring, upper_32_bits(ring->next_rptr_gpu_addr));
ring              147 drivers/gpu/drm/radeon/cik_sdma.c 		radeon_ring_write(ring, 1); /* number of DWs to follow */
ring              148 drivers/gpu/drm/radeon/cik_sdma.c 		radeon_ring_write(ring, next_rptr);
ring              152 drivers/gpu/drm/radeon/cik_sdma.c 	while ((ring->wptr & 7) != 4)
ring              153 drivers/gpu/drm/radeon/cik_sdma.c 		radeon_ring_write(ring, SDMA_PACKET(SDMA_OPCODE_NOP, 0, 0));
ring              154 drivers/gpu/drm/radeon/cik_sdma.c 	radeon_ring_write(ring, SDMA_PACKET(SDMA_OPCODE_INDIRECT_BUFFER, 0, extra_bits));
ring              155 drivers/gpu/drm/radeon/cik_sdma.c 	radeon_ring_write(ring, ib->gpu_addr & 0xffffffe0); /* base must be 32 byte aligned */
ring              156 drivers/gpu/drm/radeon/cik_sdma.c 	radeon_ring_write(ring, upper_32_bits(ib->gpu_addr));
ring              157 drivers/gpu/drm/radeon/cik_sdma.c 	radeon_ring_write(ring, ib->length_dw);
ring              172 drivers/gpu/drm/radeon/cik_sdma.c 	struct radeon_ring *ring = &rdev->ring[ridx];
ring              182 drivers/gpu/drm/radeon/cik_sdma.c 	radeon_ring_write(ring, SDMA_PACKET(SDMA_OPCODE_POLL_REG_MEM, 0, extra_bits));
ring              183 drivers/gpu/drm/radeon/cik_sdma.c 	radeon_ring_write(ring, GPU_HDP_FLUSH_DONE);
ring              184 drivers/gpu/drm/radeon/cik_sdma.c 	radeon_ring_write(ring, GPU_HDP_FLUSH_REQ);
ring              185 drivers/gpu/drm/radeon/cik_sdma.c 	radeon_ring_write(ring, ref_and_mask); /* reference */
ring              186 drivers/gpu/drm/radeon/cik_sdma.c 	radeon_ring_write(ring, ref_and_mask); /* mask */
ring              187 drivers/gpu/drm/radeon/cik_sdma.c 	radeon_ring_write(ring, (0xfff << 16) | 10); /* retry count, poll interval */
ring              203 drivers/gpu/drm/radeon/cik_sdma.c 	struct radeon_ring *ring = &rdev->ring[fence->ring];
ring              204 drivers/gpu/drm/radeon/cik_sdma.c 	u64 addr = rdev->fence_drv[fence->ring].gpu_addr;
ring              207 drivers/gpu/drm/radeon/cik_sdma.c 	radeon_ring_write(ring, SDMA_PACKET(SDMA_OPCODE_FENCE, 0, 0));
ring              208 drivers/gpu/drm/radeon/cik_sdma.c 	radeon_ring_write(ring, lower_32_bits(addr));
ring              209 drivers/gpu/drm/radeon/cik_sdma.c 	radeon_ring_write(ring, upper_32_bits(addr));
ring              210 drivers/gpu/drm/radeon/cik_sdma.c 	radeon_ring_write(ring, fence->seq);
ring              212 drivers/gpu/drm/radeon/cik_sdma.c 	radeon_ring_write(ring, SDMA_PACKET(SDMA_OPCODE_TRAP, 0, 0));
ring              214 drivers/gpu/drm/radeon/cik_sdma.c 	cik_sdma_hdp_flush_ring_emit(rdev, fence->ring);
ring              229 drivers/gpu/drm/radeon/cik_sdma.c 				  struct radeon_ring *ring,
ring              236 drivers/gpu/drm/radeon/cik_sdma.c 	radeon_ring_write(ring, SDMA_PACKET(SDMA_OPCODE_SEMAPHORE, 0, extra_bits));
ring              237 drivers/gpu/drm/radeon/cik_sdma.c 	radeon_ring_write(ring, addr & 0xfffffff8);
ring              238 drivers/gpu/drm/radeon/cik_sdma.c 	radeon_ring_write(ring, upper_32_bits(addr));
ring              269 drivers/gpu/drm/radeon/cik_sdma.c 	rdev->ring[R600_RING_TYPE_DMA_INDEX].ready = false;
ring              270 drivers/gpu/drm/radeon/cik_sdma.c 	rdev->ring[CAYMAN_RING_TYPE_DMA1_INDEX].ready = false;
ring              367 drivers/gpu/drm/radeon/cik_sdma.c 	struct radeon_ring *ring;
ring              375 drivers/gpu/drm/radeon/cik_sdma.c 			ring = &rdev->ring[R600_RING_TYPE_DMA_INDEX];
ring              379 drivers/gpu/drm/radeon/cik_sdma.c 			ring = &rdev->ring[CAYMAN_RING_TYPE_DMA1_INDEX];
ring              388 drivers/gpu/drm/radeon/cik_sdma.c 		rb_bufsz = order_base_2(ring->ring_size / 4);
ring              408 drivers/gpu/drm/radeon/cik_sdma.c 		WREG32(SDMA0_GFX_RB_BASE + reg_offset, ring->gpu_addr >> 8);
ring              409 drivers/gpu/drm/radeon/cik_sdma.c 		WREG32(SDMA0_GFX_RB_BASE_HI + reg_offset, ring->gpu_addr >> 40);
ring              411 drivers/gpu/drm/radeon/cik_sdma.c 		ring->wptr = 0;
ring              412 drivers/gpu/drm/radeon/cik_sdma.c 		WREG32(SDMA0_GFX_RB_WPTR + reg_offset, ring->wptr << 2);
ring              424 drivers/gpu/drm/radeon/cik_sdma.c 		ring->ready = true;
ring              426 drivers/gpu/drm/radeon/cik_sdma.c 		r = radeon_ring_test(rdev, ring->idx, ring);
ring              428 drivers/gpu/drm/radeon/cik_sdma.c 			ring->ready = false;
ring              561 drivers/gpu/drm/radeon/cik_sdma.c 	radeon_ring_fini(rdev, &rdev->ring[R600_RING_TYPE_DMA_INDEX]);
ring              562 drivers/gpu/drm/radeon/cik_sdma.c 	radeon_ring_fini(rdev, &rdev->ring[CAYMAN_RING_TYPE_DMA1_INDEX]);
ring              587 drivers/gpu/drm/radeon/cik_sdma.c 	struct radeon_ring *ring = &rdev->ring[ring_index];
ring              596 drivers/gpu/drm/radeon/cik_sdma.c 	r = radeon_ring_lock(rdev, ring, num_loops * 7 + 14);
ring              604 drivers/gpu/drm/radeon/cik_sdma.c 	radeon_sync_rings(rdev, &sync, ring->idx);
ring              611 drivers/gpu/drm/radeon/cik_sdma.c 		radeon_ring_write(ring, SDMA_PACKET(SDMA_OPCODE_COPY, SDMA_COPY_SUB_OPCODE_LINEAR, 0));
ring              612 drivers/gpu/drm/radeon/cik_sdma.c 		radeon_ring_write(ring, cur_size_in_bytes);
ring              613 drivers/gpu/drm/radeon/cik_sdma.c 		radeon_ring_write(ring, 0); /* src/dst endian swap */
ring              614 drivers/gpu/drm/radeon/cik_sdma.c 		radeon_ring_write(ring, lower_32_bits(src_offset));
ring              615 drivers/gpu/drm/radeon/cik_sdma.c 		radeon_ring_write(ring, upper_32_bits(src_offset));
ring              616 drivers/gpu/drm/radeon/cik_sdma.c 		radeon_ring_write(ring, lower_32_bits(dst_offset));
ring              617 drivers/gpu/drm/radeon/cik_sdma.c 		radeon_ring_write(ring, upper_32_bits(dst_offset));
ring              622 drivers/gpu/drm/radeon/cik_sdma.c 	r = radeon_fence_emit(rdev, &fence, ring->idx);
ring              624 drivers/gpu/drm/radeon/cik_sdma.c 		radeon_ring_unlock_undo(rdev, ring);
ring              629 drivers/gpu/drm/radeon/cik_sdma.c 	radeon_ring_unlock_commit(rdev, ring, false);
ring              646 drivers/gpu/drm/radeon/cik_sdma.c 		       struct radeon_ring *ring)
ring              654 drivers/gpu/drm/radeon/cik_sdma.c 	if (ring->idx == R600_RING_TYPE_DMA_INDEX)
ring              664 drivers/gpu/drm/radeon/cik_sdma.c 	r = radeon_ring_lock(rdev, ring, 5);
ring              666 drivers/gpu/drm/radeon/cik_sdma.c 		DRM_ERROR("radeon: dma failed to lock ring %d (%d).\n", ring->idx, r);
ring              669 drivers/gpu/drm/radeon/cik_sdma.c 	radeon_ring_write(ring, SDMA_PACKET(SDMA_OPCODE_WRITE, SDMA_WRITE_SUB_OPCODE_LINEAR, 0));
ring              670 drivers/gpu/drm/radeon/cik_sdma.c 	radeon_ring_write(ring, lower_32_bits(gpu_addr));
ring              671 drivers/gpu/drm/radeon/cik_sdma.c 	radeon_ring_write(ring, upper_32_bits(gpu_addr));
ring              672 drivers/gpu/drm/radeon/cik_sdma.c 	radeon_ring_write(ring, 1); /* number of DWs to follow */
ring              673 drivers/gpu/drm/radeon/cik_sdma.c 	radeon_ring_write(ring, 0xDEADBEEF);
ring              674 drivers/gpu/drm/radeon/cik_sdma.c 	radeon_ring_unlock_commit(rdev, ring, false);
ring              684 drivers/gpu/drm/radeon/cik_sdma.c 		DRM_INFO("ring test on %d succeeded in %d usecs\n", ring->idx, i);
ring              687 drivers/gpu/drm/radeon/cik_sdma.c 			  ring->idx, tmp);
ring              702 drivers/gpu/drm/radeon/cik_sdma.c int cik_sdma_ib_test(struct radeon_device *rdev, struct radeon_ring *ring)
ring              711 drivers/gpu/drm/radeon/cik_sdma.c 	if (ring->idx == R600_RING_TYPE_DMA_INDEX)
ring              721 drivers/gpu/drm/radeon/cik_sdma.c 	r = radeon_ib_get(rdev, ring->idx, &ib, NULL, 256);
ring              757 drivers/gpu/drm/radeon/cik_sdma.c 		DRM_INFO("ib test on ring %d succeeded in %u usecs\n", ib.fence->ring, i);
ring              775 drivers/gpu/drm/radeon/cik_sdma.c bool cik_sdma_is_lockup(struct radeon_device *rdev, struct radeon_ring *ring)
ring              780 drivers/gpu/drm/radeon/cik_sdma.c 	if (ring->idx == R600_RING_TYPE_DMA_INDEX)
ring              786 drivers/gpu/drm/radeon/cik_sdma.c 		radeon_ring_lockup_update(rdev, ring);
ring              789 drivers/gpu/drm/radeon/cik_sdma.c 	return radeon_ring_test_lockup(rdev, ring);
ring              947 drivers/gpu/drm/radeon/cik_sdma.c void cik_dma_vm_flush(struct radeon_device *rdev, struct radeon_ring *ring,
ring              953 drivers/gpu/drm/radeon/cik_sdma.c 	radeon_ring_write(ring, SDMA_PACKET(SDMA_OPCODE_SRBM_WRITE, 0, 0xf000));
ring              955 drivers/gpu/drm/radeon/cik_sdma.c 		radeon_ring_write(ring, (VM_CONTEXT0_PAGE_TABLE_BASE_ADDR + (vm_id << 2)) >> 2);
ring              957 drivers/gpu/drm/radeon/cik_sdma.c 		radeon_ring_write(ring, (VM_CONTEXT8_PAGE_TABLE_BASE_ADDR + ((vm_id - 8) << 2)) >> 2);
ring              959 drivers/gpu/drm/radeon/cik_sdma.c 	radeon_ring_write(ring, pd_addr >> 12);
ring              962 drivers/gpu/drm/radeon/cik_sdma.c 	radeon_ring_write(ring, SDMA_PACKET(SDMA_OPCODE_SRBM_WRITE, 0, 0xf000));
ring              963 drivers/gpu/drm/radeon/cik_sdma.c 	radeon_ring_write(ring, SRBM_GFX_CNTL >> 2);
ring              964 drivers/gpu/drm/radeon/cik_sdma.c 	radeon_ring_write(ring, VMID(vm_id));
ring              966 drivers/gpu/drm/radeon/cik_sdma.c 	radeon_ring_write(ring, SDMA_PACKET(SDMA_OPCODE_SRBM_WRITE, 0, 0xf000));
ring              967 drivers/gpu/drm/radeon/cik_sdma.c 	radeon_ring_write(ring, SH_MEM_BASES >> 2);
ring              968 drivers/gpu/drm/radeon/cik_sdma.c 	radeon_ring_write(ring, 0);
ring              970 drivers/gpu/drm/radeon/cik_sdma.c 	radeon_ring_write(ring, SDMA_PACKET(SDMA_OPCODE_SRBM_WRITE, 0, 0xf000));
ring              971 drivers/gpu/drm/radeon/cik_sdma.c 	radeon_ring_write(ring, SH_MEM_CONFIG >> 2);
ring              972 drivers/gpu/drm/radeon/cik_sdma.c 	radeon_ring_write(ring, 0);
ring              974 drivers/gpu/drm/radeon/cik_sdma.c 	radeon_ring_write(ring, SDMA_PACKET(SDMA_OPCODE_SRBM_WRITE, 0, 0xf000));
ring              975 drivers/gpu/drm/radeon/cik_sdma.c 	radeon_ring_write(ring, SH_MEM_APE1_BASE >> 2);
ring              976 drivers/gpu/drm/radeon/cik_sdma.c 	radeon_ring_write(ring, 1);
ring              978 drivers/gpu/drm/radeon/cik_sdma.c 	radeon_ring_write(ring, SDMA_PACKET(SDMA_OPCODE_SRBM_WRITE, 0, 0xf000));
ring              979 drivers/gpu/drm/radeon/cik_sdma.c 	radeon_ring_write(ring, SH_MEM_APE1_LIMIT >> 2);
ring              980 drivers/gpu/drm/radeon/cik_sdma.c 	radeon_ring_write(ring, 0);
ring              982 drivers/gpu/drm/radeon/cik_sdma.c 	radeon_ring_write(ring, SDMA_PACKET(SDMA_OPCODE_SRBM_WRITE, 0, 0xf000));
ring              983 drivers/gpu/drm/radeon/cik_sdma.c 	radeon_ring_write(ring, SRBM_GFX_CNTL >> 2);
ring              984 drivers/gpu/drm/radeon/cik_sdma.c 	radeon_ring_write(ring, VMID(0));
ring              987 drivers/gpu/drm/radeon/cik_sdma.c 	cik_sdma_hdp_flush_ring_emit(rdev, ring->idx);
ring              990 drivers/gpu/drm/radeon/cik_sdma.c 	radeon_ring_write(ring, SDMA_PACKET(SDMA_OPCODE_SRBM_WRITE, 0, 0xf000));
ring              991 drivers/gpu/drm/radeon/cik_sdma.c 	radeon_ring_write(ring, VM_INVALIDATE_REQUEST >> 2);
ring              992 drivers/gpu/drm/radeon/cik_sdma.c 	radeon_ring_write(ring, 1 << vm_id);
ring              994 drivers/gpu/drm/radeon/cik_sdma.c 	radeon_ring_write(ring, SDMA_PACKET(SDMA_OPCODE_POLL_REG_MEM, 0, extra_bits));
ring              995 drivers/gpu/drm/radeon/cik_sdma.c 	radeon_ring_write(ring, VM_INVALIDATE_REQUEST >> 2);
ring              996 drivers/gpu/drm/radeon/cik_sdma.c 	radeon_ring_write(ring, 0);
ring              997 drivers/gpu/drm/radeon/cik_sdma.c 	radeon_ring_write(ring, 0); /* reference */
ring              998 drivers/gpu/drm/radeon/cik_sdma.c 	radeon_ring_write(ring, 0); /* mask */
ring              999 drivers/gpu/drm/radeon/cik_sdma.c 	radeon_ring_write(ring, (0xfff << 16) | 10); /* retry count, poll interval */
ring              217 drivers/gpu/drm/radeon/evergreen.c 				     int ring, u32 cp_int_cntl);
ring             2932 drivers/gpu/drm/radeon/evergreen.c 	struct radeon_ring *ring = &rdev->ring[ib->ring];
ring             2936 drivers/gpu/drm/radeon/evergreen.c 	radeon_ring_write(ring, PACKET3(PACKET3_MODE_CONTROL, 0));
ring             2937 drivers/gpu/drm/radeon/evergreen.c 	radeon_ring_write(ring, 1);
ring             2939 drivers/gpu/drm/radeon/evergreen.c 	if (ring->rptr_save_reg) {
ring             2940 drivers/gpu/drm/radeon/evergreen.c 		next_rptr = ring->wptr + 3 + 4;
ring             2941 drivers/gpu/drm/radeon/evergreen.c 		radeon_ring_write(ring, PACKET3(PACKET3_SET_CONFIG_REG, 1));
ring             2942 drivers/gpu/drm/radeon/evergreen.c 		radeon_ring_write(ring, ((ring->rptr_save_reg - 
ring             2944 drivers/gpu/drm/radeon/evergreen.c 		radeon_ring_write(ring, next_rptr);
ring             2946 drivers/gpu/drm/radeon/evergreen.c 		next_rptr = ring->wptr + 5 + 4;
ring             2947 drivers/gpu/drm/radeon/evergreen.c 		radeon_ring_write(ring, PACKET3(PACKET3_MEM_WRITE, 3));
ring             2948 drivers/gpu/drm/radeon/evergreen.c 		radeon_ring_write(ring, ring->next_rptr_gpu_addr & 0xfffffffc);
ring             2949 drivers/gpu/drm/radeon/evergreen.c 		radeon_ring_write(ring, (upper_32_bits(ring->next_rptr_gpu_addr) & 0xff) | (1 << 18));
ring             2950 drivers/gpu/drm/radeon/evergreen.c 		radeon_ring_write(ring, next_rptr);
ring             2951 drivers/gpu/drm/radeon/evergreen.c 		radeon_ring_write(ring, 0);
ring             2954 drivers/gpu/drm/radeon/evergreen.c 	radeon_ring_write(ring, PACKET3(PACKET3_INDIRECT_BUFFER, 2));
ring             2955 drivers/gpu/drm/radeon/evergreen.c 	radeon_ring_write(ring,
ring             2960 drivers/gpu/drm/radeon/evergreen.c 	radeon_ring_write(ring, upper_32_bits(ib->gpu_addr) & 0xFF);
ring             2961 drivers/gpu/drm/radeon/evergreen.c 	radeon_ring_write(ring, ib->length_dw);
ring             2999 drivers/gpu/drm/radeon/evergreen.c 	struct radeon_ring *ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX];
ring             3003 drivers/gpu/drm/radeon/evergreen.c 	r = radeon_ring_lock(rdev, ring, 7);
ring             3008 drivers/gpu/drm/radeon/evergreen.c 	radeon_ring_write(ring, PACKET3(PACKET3_ME_INITIALIZE, 5));
ring             3009 drivers/gpu/drm/radeon/evergreen.c 	radeon_ring_write(ring, 0x1);
ring             3010 drivers/gpu/drm/radeon/evergreen.c 	radeon_ring_write(ring, 0x0);
ring             3011 drivers/gpu/drm/radeon/evergreen.c 	radeon_ring_write(ring, rdev->config.evergreen.max_hw_contexts - 1);
ring             3012 drivers/gpu/drm/radeon/evergreen.c 	radeon_ring_write(ring, PACKET3_ME_INITIALIZE_DEVICE_ID(1));
ring             3013 drivers/gpu/drm/radeon/evergreen.c 	radeon_ring_write(ring, 0);
ring             3014 drivers/gpu/drm/radeon/evergreen.c 	radeon_ring_write(ring, 0);
ring             3015 drivers/gpu/drm/radeon/evergreen.c 	radeon_ring_unlock_commit(rdev, ring, false);
ring             3020 drivers/gpu/drm/radeon/evergreen.c 	r = radeon_ring_lock(rdev, ring, evergreen_default_size + 19);
ring             3027 drivers/gpu/drm/radeon/evergreen.c 	radeon_ring_write(ring, PACKET3(PACKET3_PREAMBLE_CNTL, 0));
ring             3028 drivers/gpu/drm/radeon/evergreen.c 	radeon_ring_write(ring, PACKET3_PREAMBLE_BEGIN_CLEAR_STATE);
ring             3031 drivers/gpu/drm/radeon/evergreen.c 		radeon_ring_write(ring, evergreen_default_state[i]);
ring             3033 drivers/gpu/drm/radeon/evergreen.c 	radeon_ring_write(ring, PACKET3(PACKET3_PREAMBLE_CNTL, 0));
ring             3034 drivers/gpu/drm/radeon/evergreen.c 	radeon_ring_write(ring, PACKET3_PREAMBLE_END_CLEAR_STATE);
ring             3037 drivers/gpu/drm/radeon/evergreen.c 	radeon_ring_write(ring, PACKET3(PACKET3_CLEAR_STATE, 0));
ring             3038 drivers/gpu/drm/radeon/evergreen.c 	radeon_ring_write(ring, 0);
ring             3041 drivers/gpu/drm/radeon/evergreen.c 	radeon_ring_write(ring, 0xc0026f00);
ring             3042 drivers/gpu/drm/radeon/evergreen.c 	radeon_ring_write(ring, 0x00000000);
ring             3043 drivers/gpu/drm/radeon/evergreen.c 	radeon_ring_write(ring, 0x00000000);
ring             3044 drivers/gpu/drm/radeon/evergreen.c 	radeon_ring_write(ring, 0x00000000);
ring             3047 drivers/gpu/drm/radeon/evergreen.c 	radeon_ring_write(ring, 0xc0036f00);
ring             3048 drivers/gpu/drm/radeon/evergreen.c 	radeon_ring_write(ring, 0x00000bc4);
ring             3049 drivers/gpu/drm/radeon/evergreen.c 	radeon_ring_write(ring, 0xffffffff);
ring             3050 drivers/gpu/drm/radeon/evergreen.c 	radeon_ring_write(ring, 0xffffffff);
ring             3051 drivers/gpu/drm/radeon/evergreen.c 	radeon_ring_write(ring, 0xffffffff);
ring             3053 drivers/gpu/drm/radeon/evergreen.c 	radeon_ring_write(ring, 0xc0026900);
ring             3054 drivers/gpu/drm/radeon/evergreen.c 	radeon_ring_write(ring, 0x00000316);
ring             3055 drivers/gpu/drm/radeon/evergreen.c 	radeon_ring_write(ring, 0x0000000e); /* VGT_VERTEX_REUSE_BLOCK_CNTL */
ring             3056 drivers/gpu/drm/radeon/evergreen.c 	radeon_ring_write(ring, 0x00000010); /*  */
ring             3058 drivers/gpu/drm/radeon/evergreen.c 	radeon_ring_unlock_commit(rdev, ring, false);
ring             3065 drivers/gpu/drm/radeon/evergreen.c 	struct radeon_ring *ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX];
ring             3083 drivers/gpu/drm/radeon/evergreen.c 	rb_bufsz = order_base_2(ring->ring_size / 8);
ring             3098 drivers/gpu/drm/radeon/evergreen.c 	ring->wptr = 0;
ring             3099 drivers/gpu/drm/radeon/evergreen.c 	WREG32(CP_RB_WPTR, ring->wptr);
ring             3117 drivers/gpu/drm/radeon/evergreen.c 	WREG32(CP_RB_BASE, ring->gpu_addr >> 8);
ring             3121 drivers/gpu/drm/radeon/evergreen.c 	ring->ready = true;
ring             3122 drivers/gpu/drm/radeon/evergreen.c 	r = radeon_ring_test(rdev, RADEON_RING_TYPE_GFX_INDEX, ring);
ring             3124 drivers/gpu/drm/radeon/evergreen.c 		ring->ready = false;
ring             4091 drivers/gpu/drm/radeon/evergreen.c bool evergreen_gfx_is_lockup(struct radeon_device *rdev, struct radeon_ring *ring)
ring             4098 drivers/gpu/drm/radeon/evergreen.c 		radeon_ring_lockup_update(rdev, ring);
ring             4101 drivers/gpu/drm/radeon/evergreen.c 	return radeon_ring_test_lockup(rdev, ring);
ring             4740 drivers/gpu/drm/radeon/evergreen.c 		src_id =  le32_to_cpu(rdev->ih.ring[ring_index]) & 0xff;
ring             4741 drivers/gpu/drm/radeon/evergreen.c 		src_data = le32_to_cpu(rdev->ih.ring[ring_index + 1]) & 0xfffffff;
ring             4951 drivers/gpu/drm/radeon/evergreen.c 	rdev->ring[R600_RING_TYPE_UVD_INDEX].ring_obj = NULL;
ring             4952 drivers/gpu/drm/radeon/evergreen.c 	r600_ring_init(rdev, &rdev->ring[R600_RING_TYPE_UVD_INDEX], 4096);
ring             4975 drivers/gpu/drm/radeon/evergreen.c 	rdev->ring[R600_RING_TYPE_UVD_INDEX].ring_size = 0;
ring             4980 drivers/gpu/drm/radeon/evergreen.c 	struct radeon_ring *ring;
ring             4983 drivers/gpu/drm/radeon/evergreen.c 	if (!rdev->has_uvd || !rdev->ring[R600_RING_TYPE_UVD_INDEX].ring_size)
ring             4986 drivers/gpu/drm/radeon/evergreen.c 	ring = &rdev->ring[R600_RING_TYPE_UVD_INDEX];
ring             4987 drivers/gpu/drm/radeon/evergreen.c 	r = radeon_ring_init(rdev, ring, ring->ring_size, 0, PACKET0(UVD_NO_OP, 0));
ring             5001 drivers/gpu/drm/radeon/evergreen.c 	struct radeon_ring *ring;
ring             5080 drivers/gpu/drm/radeon/evergreen.c 	ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX];
ring             5081 drivers/gpu/drm/radeon/evergreen.c 	r = radeon_ring_init(rdev, ring, ring->ring_size, RADEON_WB_CP_RPTR_OFFSET,
ring             5086 drivers/gpu/drm/radeon/evergreen.c 	ring = &rdev->ring[R600_RING_TYPE_DMA_INDEX];
ring             5087 drivers/gpu/drm/radeon/evergreen.c 	r = radeon_ring_init(rdev, ring, ring->ring_size, R600_WB_DMA_RPTR_OFFSET,
ring             5255 drivers/gpu/drm/radeon/evergreen.c 	rdev->ring[RADEON_RING_TYPE_GFX_INDEX].ring_obj = NULL;
ring             5256 drivers/gpu/drm/radeon/evergreen.c 	r600_ring_init(rdev, &rdev->ring[RADEON_RING_TYPE_GFX_INDEX], 1024 * 1024);
ring             5258 drivers/gpu/drm/radeon/evergreen.c 	rdev->ring[R600_RING_TYPE_DMA_INDEX].ring_obj = NULL;
ring             5259 drivers/gpu/drm/radeon/evergreen.c 	r600_ring_init(rdev, &rdev->ring[R600_RING_TYPE_DMA_INDEX], 64 * 1024);
ring               44 drivers/gpu/drm/radeon/evergreen_dma.c 	struct radeon_ring *ring = &rdev->ring[fence->ring];
ring               45 drivers/gpu/drm/radeon/evergreen_dma.c 	u64 addr = rdev->fence_drv[fence->ring].gpu_addr;
ring               47 drivers/gpu/drm/radeon/evergreen_dma.c 	radeon_ring_write(ring, DMA_PACKET(DMA_PACKET_FENCE, 0, 0));
ring               48 drivers/gpu/drm/radeon/evergreen_dma.c 	radeon_ring_write(ring, addr & 0xfffffffc);
ring               49 drivers/gpu/drm/radeon/evergreen_dma.c 	radeon_ring_write(ring, (upper_32_bits(addr) & 0xff));
ring               50 drivers/gpu/drm/radeon/evergreen_dma.c 	radeon_ring_write(ring, fence->seq);
ring               52 drivers/gpu/drm/radeon/evergreen_dma.c 	radeon_ring_write(ring, DMA_PACKET(DMA_PACKET_TRAP, 0, 0));
ring               54 drivers/gpu/drm/radeon/evergreen_dma.c 	radeon_ring_write(ring, DMA_PACKET(DMA_PACKET_SRBM_WRITE, 0, 0));
ring               55 drivers/gpu/drm/radeon/evergreen_dma.c 	radeon_ring_write(ring, (0xf << 16) | (HDP_MEM_COHERENCY_FLUSH_CNTL >> 2));
ring               56 drivers/gpu/drm/radeon/evergreen_dma.c 	radeon_ring_write(ring, 1);
ring               70 drivers/gpu/drm/radeon/evergreen_dma.c 	struct radeon_ring *ring = &rdev->ring[ib->ring];
ring               73 drivers/gpu/drm/radeon/evergreen_dma.c 		u32 next_rptr = ring->wptr + 4;
ring               77 drivers/gpu/drm/radeon/evergreen_dma.c 		radeon_ring_write(ring, DMA_PACKET(DMA_PACKET_WRITE, 0, 1));
ring               78 drivers/gpu/drm/radeon/evergreen_dma.c 		radeon_ring_write(ring, ring->next_rptr_gpu_addr & 0xfffffffc);
ring               79 drivers/gpu/drm/radeon/evergreen_dma.c 		radeon_ring_write(ring, upper_32_bits(ring->next_rptr_gpu_addr) & 0xff);
ring               80 drivers/gpu/drm/radeon/evergreen_dma.c 		radeon_ring_write(ring, next_rptr);
ring               86 drivers/gpu/drm/radeon/evergreen_dma.c 	while ((ring->wptr & 7) != 5)
ring               87 drivers/gpu/drm/radeon/evergreen_dma.c 		radeon_ring_write(ring, DMA_PACKET(DMA_PACKET_NOP, 0, 0));
ring               88 drivers/gpu/drm/radeon/evergreen_dma.c 	radeon_ring_write(ring, DMA_PACKET(DMA_PACKET_INDIRECT_BUFFER, 0, 0));
ring               89 drivers/gpu/drm/radeon/evergreen_dma.c 	radeon_ring_write(ring, (ib->gpu_addr & 0xFFFFFFE0));
ring               90 drivers/gpu/drm/radeon/evergreen_dma.c 	radeon_ring_write(ring, (ib->length_dw << 12) | (upper_32_bits(ib->gpu_addr) & 0xFF));
ring              116 drivers/gpu/drm/radeon/evergreen_dma.c 	struct radeon_ring *ring = &rdev->ring[ring_index];
ring              125 drivers/gpu/drm/radeon/evergreen_dma.c 	r = radeon_ring_lock(rdev, ring, num_loops * 5 + 11);
ring              133 drivers/gpu/drm/radeon/evergreen_dma.c 	radeon_sync_rings(rdev, &sync, ring->idx);
ring              140 drivers/gpu/drm/radeon/evergreen_dma.c 		radeon_ring_write(ring, DMA_PACKET(DMA_PACKET_COPY, 0, cur_size_in_dw));
ring              141 drivers/gpu/drm/radeon/evergreen_dma.c 		radeon_ring_write(ring, dst_offset & 0xfffffffc);
ring              142 drivers/gpu/drm/radeon/evergreen_dma.c 		radeon_ring_write(ring, src_offset & 0xfffffffc);
ring              143 drivers/gpu/drm/radeon/evergreen_dma.c 		radeon_ring_write(ring, upper_32_bits(dst_offset) & 0xff);
ring              144 drivers/gpu/drm/radeon/evergreen_dma.c 		radeon_ring_write(ring, upper_32_bits(src_offset) & 0xff);
ring              149 drivers/gpu/drm/radeon/evergreen_dma.c 	r = radeon_fence_emit(rdev, &fence, ring->idx);
ring              151 drivers/gpu/drm/radeon/evergreen_dma.c 		radeon_ring_unlock_undo(rdev, ring);
ring              156 drivers/gpu/drm/radeon/evergreen_dma.c 	radeon_ring_unlock_commit(rdev, ring, false);
ring              171 drivers/gpu/drm/radeon/evergreen_dma.c bool evergreen_dma_is_lockup(struct radeon_device *rdev, struct radeon_ring *ring)
ring              176 drivers/gpu/drm/radeon/evergreen_dma.c 		radeon_ring_lockup_update(rdev, ring);
ring              179 drivers/gpu/drm/radeon/evergreen_dma.c 	return radeon_ring_test_lockup(rdev, ring);
ring             1393 drivers/gpu/drm/radeon/ni.c 			      int ring, u32 cp_int_cntl)
ring             1395 drivers/gpu/drm/radeon/ni.c 	WREG32(SRBM_GFX_CNTL, RINGID(ring));
ring             1405 drivers/gpu/drm/radeon/ni.c 	struct radeon_ring *ring = &rdev->ring[fence->ring];
ring             1406 drivers/gpu/drm/radeon/ni.c 	u64 addr = rdev->fence_drv[fence->ring].gpu_addr;
ring             1411 drivers/gpu/drm/radeon/ni.c 	radeon_ring_write(ring, PACKET3(PACKET3_SURFACE_SYNC, 3));
ring             1412 drivers/gpu/drm/radeon/ni.c 	radeon_ring_write(ring, PACKET3_ENGINE_ME | cp_coher_cntl);
ring             1413 drivers/gpu/drm/radeon/ni.c 	radeon_ring_write(ring, 0xFFFFFFFF);
ring             1414 drivers/gpu/drm/radeon/ni.c 	radeon_ring_write(ring, 0);
ring             1415 drivers/gpu/drm/radeon/ni.c 	radeon_ring_write(ring, 10); /* poll interval */
ring             1417 drivers/gpu/drm/radeon/ni.c 	radeon_ring_write(ring, PACKET3(PACKET3_EVENT_WRITE_EOP, 4));
ring             1418 drivers/gpu/drm/radeon/ni.c 	radeon_ring_write(ring, EVENT_TYPE(CACHE_FLUSH_AND_INV_EVENT_TS) | EVENT_INDEX(5));
ring             1419 drivers/gpu/drm/radeon/ni.c 	radeon_ring_write(ring, lower_32_bits(addr));
ring             1420 drivers/gpu/drm/radeon/ni.c 	radeon_ring_write(ring, (upper_32_bits(addr) & 0xff) | DATA_SEL(1) | INT_SEL(2));
ring             1421 drivers/gpu/drm/radeon/ni.c 	radeon_ring_write(ring, fence->seq);
ring             1422 drivers/gpu/drm/radeon/ni.c 	radeon_ring_write(ring, 0);
ring             1427 drivers/gpu/drm/radeon/ni.c 	struct radeon_ring *ring = &rdev->ring[ib->ring];
ring             1428 drivers/gpu/drm/radeon/ni.c 	unsigned vm_id = ib->vm ? ib->vm->ids[ib->ring].id : 0;
ring             1433 drivers/gpu/drm/radeon/ni.c 	radeon_ring_write(ring, PACKET3(PACKET3_MODE_CONTROL, 0));
ring             1434 drivers/gpu/drm/radeon/ni.c 	radeon_ring_write(ring, 1);
ring             1436 drivers/gpu/drm/radeon/ni.c 	if (ring->rptr_save_reg) {
ring             1437 drivers/gpu/drm/radeon/ni.c 		uint32_t next_rptr = ring->wptr + 3 + 4 + 8;
ring             1438 drivers/gpu/drm/radeon/ni.c 		radeon_ring_write(ring, PACKET3(PACKET3_SET_CONFIG_REG, 1));
ring             1439 drivers/gpu/drm/radeon/ni.c 		radeon_ring_write(ring, ((ring->rptr_save_reg - 
ring             1441 drivers/gpu/drm/radeon/ni.c 		radeon_ring_write(ring, next_rptr);
ring             1444 drivers/gpu/drm/radeon/ni.c 	radeon_ring_write(ring, PACKET3(PACKET3_INDIRECT_BUFFER, 2));
ring             1445 drivers/gpu/drm/radeon/ni.c 	radeon_ring_write(ring,
ring             1450 drivers/gpu/drm/radeon/ni.c 	radeon_ring_write(ring, upper_32_bits(ib->gpu_addr) & 0xFF);
ring             1451 drivers/gpu/drm/radeon/ni.c 	radeon_ring_write(ring, ib->length_dw | (vm_id << 24));
ring             1454 drivers/gpu/drm/radeon/ni.c 	radeon_ring_write(ring, PACKET3(PACKET3_SURFACE_SYNC, 3));
ring             1455 drivers/gpu/drm/radeon/ni.c 	radeon_ring_write(ring, PACKET3_ENGINE_ME | cp_coher_cntl);
ring             1456 drivers/gpu/drm/radeon/ni.c 	radeon_ring_write(ring, 0xFFFFFFFF);
ring             1457 drivers/gpu/drm/radeon/ni.c 	radeon_ring_write(ring, 0);
ring             1458 drivers/gpu/drm/radeon/ni.c 	radeon_ring_write(ring, (vm_id << 24) | 10); /* poll interval */
ring             1470 drivers/gpu/drm/radeon/ni.c 		rdev->ring[RADEON_RING_TYPE_GFX_INDEX].ready = false;
ring             1475 drivers/gpu/drm/radeon/ni.c 			struct radeon_ring *ring)
ring             1480 drivers/gpu/drm/radeon/ni.c 		rptr = rdev->wb.wb[ring->rptr_offs/4];
ring             1482 drivers/gpu/drm/radeon/ni.c 		if (ring->idx == RADEON_RING_TYPE_GFX_INDEX)
ring             1484 drivers/gpu/drm/radeon/ni.c 		else if (ring->idx == CAYMAN_RING_TYPE_CP1_INDEX)
ring             1494 drivers/gpu/drm/radeon/ni.c 			struct radeon_ring *ring)
ring             1498 drivers/gpu/drm/radeon/ni.c 	if (ring->idx == RADEON_RING_TYPE_GFX_INDEX)
ring             1500 drivers/gpu/drm/radeon/ni.c 	else if (ring->idx == CAYMAN_RING_TYPE_CP1_INDEX)
ring             1509 drivers/gpu/drm/radeon/ni.c 			 struct radeon_ring *ring)
ring             1511 drivers/gpu/drm/radeon/ni.c 	if (ring->idx == RADEON_RING_TYPE_GFX_INDEX) {
ring             1512 drivers/gpu/drm/radeon/ni.c 		WREG32(CP_RB0_WPTR, ring->wptr);
ring             1514 drivers/gpu/drm/radeon/ni.c 	} else if (ring->idx == CAYMAN_RING_TYPE_CP1_INDEX) {
ring             1515 drivers/gpu/drm/radeon/ni.c 		WREG32(CP_RB1_WPTR, ring->wptr);
ring             1518 drivers/gpu/drm/radeon/ni.c 		WREG32(CP_RB2_WPTR, ring->wptr);
ring             1552 drivers/gpu/drm/radeon/ni.c 	struct radeon_ring *ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX];
ring             1555 drivers/gpu/drm/radeon/ni.c 	r = radeon_ring_lock(rdev, ring, 7);
ring             1560 drivers/gpu/drm/radeon/ni.c 	radeon_ring_write(ring, PACKET3(PACKET3_ME_INITIALIZE, 5));
ring             1561 drivers/gpu/drm/radeon/ni.c 	radeon_ring_write(ring, 0x1);
ring             1562 drivers/gpu/drm/radeon/ni.c 	radeon_ring_write(ring, 0x0);
ring             1563 drivers/gpu/drm/radeon/ni.c 	radeon_ring_write(ring, rdev->config.cayman.max_hw_contexts - 1);
ring             1564 drivers/gpu/drm/radeon/ni.c 	radeon_ring_write(ring, PACKET3_ME_INITIALIZE_DEVICE_ID(1));
ring             1565 drivers/gpu/drm/radeon/ni.c 	radeon_ring_write(ring, 0);
ring             1566 drivers/gpu/drm/radeon/ni.c 	radeon_ring_write(ring, 0);
ring             1567 drivers/gpu/drm/radeon/ni.c 	radeon_ring_unlock_commit(rdev, ring, false);
ring             1571 drivers/gpu/drm/radeon/ni.c 	r = radeon_ring_lock(rdev, ring, cayman_default_size + 19);
ring             1578 drivers/gpu/drm/radeon/ni.c 	radeon_ring_write(ring, PACKET3(PACKET3_PREAMBLE_CNTL, 0));
ring             1579 drivers/gpu/drm/radeon/ni.c 	radeon_ring_write(ring, PACKET3_PREAMBLE_BEGIN_CLEAR_STATE);
ring             1582 drivers/gpu/drm/radeon/ni.c 		radeon_ring_write(ring, cayman_default_state[i]);
ring             1584 drivers/gpu/drm/radeon/ni.c 	radeon_ring_write(ring, PACKET3(PACKET3_PREAMBLE_CNTL, 0));
ring             1585 drivers/gpu/drm/radeon/ni.c 	radeon_ring_write(ring, PACKET3_PREAMBLE_END_CLEAR_STATE);
ring             1588 drivers/gpu/drm/radeon/ni.c 	radeon_ring_write(ring, PACKET3(PACKET3_CLEAR_STATE, 0));
ring             1589 drivers/gpu/drm/radeon/ni.c 	radeon_ring_write(ring, 0);
ring             1592 drivers/gpu/drm/radeon/ni.c 	radeon_ring_write(ring, 0xc0026f00);
ring             1593 drivers/gpu/drm/radeon/ni.c 	radeon_ring_write(ring, 0x00000000);
ring             1594 drivers/gpu/drm/radeon/ni.c 	radeon_ring_write(ring, 0x00000000);
ring             1595 drivers/gpu/drm/radeon/ni.c 	radeon_ring_write(ring, 0x00000000);
ring             1598 drivers/gpu/drm/radeon/ni.c 	radeon_ring_write(ring, 0xc0036f00);
ring             1599 drivers/gpu/drm/radeon/ni.c 	radeon_ring_write(ring, 0x00000bc4);
ring             1600 drivers/gpu/drm/radeon/ni.c 	radeon_ring_write(ring, 0xffffffff);
ring             1601 drivers/gpu/drm/radeon/ni.c 	radeon_ring_write(ring, 0xffffffff);
ring             1602 drivers/gpu/drm/radeon/ni.c 	radeon_ring_write(ring, 0xffffffff);
ring             1604 drivers/gpu/drm/radeon/ni.c 	radeon_ring_write(ring, 0xc0026900);
ring             1605 drivers/gpu/drm/radeon/ni.c 	radeon_ring_write(ring, 0x00000316);
ring             1606 drivers/gpu/drm/radeon/ni.c 	radeon_ring_write(ring, 0x0000000e); /* VGT_VERTEX_REUSE_BLOCK_CNTL */
ring             1607 drivers/gpu/drm/radeon/ni.c 	radeon_ring_write(ring, 0x00000010); /*  */
ring             1609 drivers/gpu/drm/radeon/ni.c 	radeon_ring_unlock_commit(rdev, ring, false);
ring             1618 drivers/gpu/drm/radeon/ni.c 	struct radeon_ring *ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX];
ring             1620 drivers/gpu/drm/radeon/ni.c 	radeon_ring_fini(rdev, ring);
ring             1621 drivers/gpu/drm/radeon/ni.c 	radeon_scratch_free(rdev, ring->rptr_save_reg);
ring             1661 drivers/gpu/drm/radeon/ni.c 	struct radeon_ring *ring;
ring             1693 drivers/gpu/drm/radeon/ni.c 		ring = &rdev->ring[ridx[i]];
ring             1694 drivers/gpu/drm/radeon/ni.c 		rb_cntl = order_base_2(ring->ring_size / 8);
ring             1709 drivers/gpu/drm/radeon/ni.c 		ring = &rdev->ring[ridx[i]];
ring             1710 drivers/gpu/drm/radeon/ni.c 		WREG32(cp_rb_base[i], ring->gpu_addr >> 8);
ring             1715 drivers/gpu/drm/radeon/ni.c 		ring = &rdev->ring[ridx[i]];
ring             1718 drivers/gpu/drm/radeon/ni.c 		ring->wptr = 0;
ring             1720 drivers/gpu/drm/radeon/ni.c 		WREG32(cp_rb_wptr[i], ring->wptr);
ring             1728 drivers/gpu/drm/radeon/ni.c 	rdev->ring[RADEON_RING_TYPE_GFX_INDEX].ready = true;
ring             1729 drivers/gpu/drm/radeon/ni.c 	rdev->ring[CAYMAN_RING_TYPE_CP1_INDEX].ready = false;
ring             1730 drivers/gpu/drm/radeon/ni.c 	rdev->ring[CAYMAN_RING_TYPE_CP2_INDEX].ready = false;
ring             1732 drivers/gpu/drm/radeon/ni.c 	r = radeon_ring_test(rdev, RADEON_RING_TYPE_GFX_INDEX, &rdev->ring[RADEON_RING_TYPE_GFX_INDEX]);
ring             1734 drivers/gpu/drm/radeon/ni.c 		rdev->ring[RADEON_RING_TYPE_GFX_INDEX].ready = false;
ring             1735 drivers/gpu/drm/radeon/ni.c 		rdev->ring[CAYMAN_RING_TYPE_CP1_INDEX].ready = false;
ring             1736 drivers/gpu/drm/radeon/ni.c 		rdev->ring[CAYMAN_RING_TYPE_CP2_INDEX].ready = false;
ring             1991 drivers/gpu/drm/radeon/ni.c bool cayman_gfx_is_lockup(struct radeon_device *rdev, struct radeon_ring *ring)
ring             1998 drivers/gpu/drm/radeon/ni.c 		radeon_ring_lockup_update(rdev, ring);
ring             2001 drivers/gpu/drm/radeon/ni.c 	return radeon_ring_test_lockup(rdev, ring);
ring             2023 drivers/gpu/drm/radeon/ni.c 	rdev->ring[R600_RING_TYPE_UVD_INDEX].ring_obj = NULL;
ring             2024 drivers/gpu/drm/radeon/ni.c 	r600_ring_init(rdev, &rdev->ring[R600_RING_TYPE_UVD_INDEX], 4096);
ring             2047 drivers/gpu/drm/radeon/ni.c 	rdev->ring[R600_RING_TYPE_UVD_INDEX].ring_size = 0;
ring             2052 drivers/gpu/drm/radeon/ni.c 	struct radeon_ring *ring;
ring             2055 drivers/gpu/drm/radeon/ni.c 	if (!rdev->has_uvd || !rdev->ring[R600_RING_TYPE_UVD_INDEX].ring_size)
ring             2058 drivers/gpu/drm/radeon/ni.c 	ring = &rdev->ring[R600_RING_TYPE_UVD_INDEX];
ring             2059 drivers/gpu/drm/radeon/ni.c 	r = radeon_ring_init(rdev, ring, ring->ring_size, 0, PACKET0(UVD_NO_OP, 0));
ring             2091 drivers/gpu/drm/radeon/ni.c 	rdev->ring[TN_RING_TYPE_VCE1_INDEX].ring_obj = NULL;
ring             2092 drivers/gpu/drm/radeon/ni.c 	r600_ring_init(rdev, &rdev->ring[TN_RING_TYPE_VCE1_INDEX], 4096);
ring             2093 drivers/gpu/drm/radeon/ni.c 	rdev->ring[TN_RING_TYPE_VCE2_INDEX].ring_obj = NULL;
ring             2094 drivers/gpu/drm/radeon/ni.c 	r600_ring_init(rdev, &rdev->ring[TN_RING_TYPE_VCE2_INDEX], 4096);
ring             2127 drivers/gpu/drm/radeon/ni.c 	rdev->ring[TN_RING_TYPE_VCE1_INDEX].ring_size = 0;
ring             2128 drivers/gpu/drm/radeon/ni.c 	rdev->ring[TN_RING_TYPE_VCE2_INDEX].ring_size = 0;
ring             2133 drivers/gpu/drm/radeon/ni.c 	struct radeon_ring *ring;
ring             2136 drivers/gpu/drm/radeon/ni.c 	if (!rdev->has_vce || !rdev->ring[TN_RING_TYPE_VCE1_INDEX].ring_size)
ring             2139 drivers/gpu/drm/radeon/ni.c 	ring = &rdev->ring[TN_RING_TYPE_VCE1_INDEX];
ring             2140 drivers/gpu/drm/radeon/ni.c 	r = radeon_ring_init(rdev, ring, ring->ring_size, 0, 0x0);
ring             2145 drivers/gpu/drm/radeon/ni.c 	ring = &rdev->ring[TN_RING_TYPE_VCE2_INDEX];
ring             2146 drivers/gpu/drm/radeon/ni.c 	r = radeon_ring_init(rdev, ring, ring->ring_size, 0, 0x0);
ring             2160 drivers/gpu/drm/radeon/ni.c 	struct radeon_ring *ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX];
ring             2254 drivers/gpu/drm/radeon/ni.c 	r = radeon_ring_init(rdev, ring, ring->ring_size, RADEON_WB_CP_RPTR_OFFSET,
ring             2259 drivers/gpu/drm/radeon/ni.c 	ring = &rdev->ring[R600_RING_TYPE_DMA_INDEX];
ring             2260 drivers/gpu/drm/radeon/ni.c 	r = radeon_ring_init(rdev, ring, ring->ring_size, R600_WB_DMA_RPTR_OFFSET,
ring             2265 drivers/gpu/drm/radeon/ni.c 	ring = &rdev->ring[CAYMAN_RING_TYPE_DMA1_INDEX];
ring             2266 drivers/gpu/drm/radeon/ni.c 	r = radeon_ring_init(rdev, ring, ring->ring_size, CAYMAN_WB_DMA1_RPTR_OFFSET,
ring             2356 drivers/gpu/drm/radeon/ni.c 	struct radeon_ring *ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX];
ring             2424 drivers/gpu/drm/radeon/ni.c 	ring->ring_obj = NULL;
ring             2425 drivers/gpu/drm/radeon/ni.c 	r600_ring_init(rdev, ring, 1024 * 1024);
ring             2427 drivers/gpu/drm/radeon/ni.c 	ring = &rdev->ring[R600_RING_TYPE_DMA_INDEX];
ring             2428 drivers/gpu/drm/radeon/ni.c 	ring->ring_obj = NULL;
ring             2429 drivers/gpu/drm/radeon/ni.c 	r600_ring_init(rdev, ring, 64 * 1024);
ring             2431 drivers/gpu/drm/radeon/ni.c 	ring = &rdev->ring[CAYMAN_RING_TYPE_DMA1_INDEX];
ring             2432 drivers/gpu/drm/radeon/ni.c 	ring->ring_obj = NULL;
ring             2433 drivers/gpu/drm/radeon/ni.c 	r600_ring_init(rdev, ring, 64 * 1024);
ring             2693 drivers/gpu/drm/radeon/ni.c void cayman_vm_flush(struct radeon_device *rdev, struct radeon_ring *ring,
ring             2696 drivers/gpu/drm/radeon/ni.c 	radeon_ring_write(ring, PACKET0(VM_CONTEXT0_PAGE_TABLE_BASE_ADDR + (vm_id << 2), 0));
ring             2697 drivers/gpu/drm/radeon/ni.c 	radeon_ring_write(ring, pd_addr >> 12);
ring             2700 drivers/gpu/drm/radeon/ni.c 	radeon_ring_write(ring, PACKET0(HDP_MEM_COHERENCY_FLUSH_CNTL, 0));
ring             2701 drivers/gpu/drm/radeon/ni.c 	radeon_ring_write(ring, 0x1);
ring             2704 drivers/gpu/drm/radeon/ni.c 	radeon_ring_write(ring, PACKET0(VM_INVALIDATE_REQUEST, 0));
ring             2705 drivers/gpu/drm/radeon/ni.c 	radeon_ring_write(ring, 1 << vm_id);
ring             2708 drivers/gpu/drm/radeon/ni.c 	radeon_ring_write(ring, PACKET3(PACKET3_WAIT_REG_MEM, 5));
ring             2709 drivers/gpu/drm/radeon/ni.c 	radeon_ring_write(ring, (WAIT_REG_MEM_FUNCTION(0) |  /* always */
ring             2711 drivers/gpu/drm/radeon/ni.c 	radeon_ring_write(ring, VM_INVALIDATE_REQUEST >> 2);
ring             2712 drivers/gpu/drm/radeon/ni.c 	radeon_ring_write(ring, 0);
ring             2713 drivers/gpu/drm/radeon/ni.c 	radeon_ring_write(ring, 0); /* ref */
ring             2714 drivers/gpu/drm/radeon/ni.c 	radeon_ring_write(ring, 0); /* mask */
ring             2715 drivers/gpu/drm/radeon/ni.c 	radeon_ring_write(ring, 0x20); /* poll interval */
ring             2718 drivers/gpu/drm/radeon/ni.c 	radeon_ring_write(ring, PACKET3(PACKET3_PFP_SYNC_ME, 0));
ring             2719 drivers/gpu/drm/radeon/ni.c 	radeon_ring_write(ring, 0x0);
ring               54 drivers/gpu/drm/radeon/ni_dma.c 			     struct radeon_ring *ring)
ring               59 drivers/gpu/drm/radeon/ni_dma.c 		rptr = rdev->wb.wb[ring->rptr_offs/4];
ring               61 drivers/gpu/drm/radeon/ni_dma.c 		if (ring->idx == R600_RING_TYPE_DMA_INDEX)
ring               81 drivers/gpu/drm/radeon/ni_dma.c 			   struct radeon_ring *ring)
ring               85 drivers/gpu/drm/radeon/ni_dma.c 	if (ring->idx == R600_RING_TYPE_DMA_INDEX)
ring              102 drivers/gpu/drm/radeon/ni_dma.c 			 struct radeon_ring *ring)
ring              106 drivers/gpu/drm/radeon/ni_dma.c 	if (ring->idx == R600_RING_TYPE_DMA_INDEX)
ring              111 drivers/gpu/drm/radeon/ni_dma.c 	WREG32(reg, (ring->wptr << 2) & 0x3fffc);
ring              125 drivers/gpu/drm/radeon/ni_dma.c 	struct radeon_ring *ring = &rdev->ring[ib->ring];
ring              126 drivers/gpu/drm/radeon/ni_dma.c 	unsigned vm_id = ib->vm ? ib->vm->ids[ib->ring].id : 0;
ring              129 drivers/gpu/drm/radeon/ni_dma.c 		u32 next_rptr = ring->wptr + 4;
ring              133 drivers/gpu/drm/radeon/ni_dma.c 		radeon_ring_write(ring, DMA_PACKET(DMA_PACKET_WRITE, 0, 0, 1));
ring              134 drivers/gpu/drm/radeon/ni_dma.c 		radeon_ring_write(ring, ring->next_rptr_gpu_addr & 0xfffffffc);
ring              135 drivers/gpu/drm/radeon/ni_dma.c 		radeon_ring_write(ring, upper_32_bits(ring->next_rptr_gpu_addr) & 0xff);
ring              136 drivers/gpu/drm/radeon/ni_dma.c 		radeon_ring_write(ring, next_rptr);
ring              142 drivers/gpu/drm/radeon/ni_dma.c 	while ((ring->wptr & 7) != 5)
ring              143 drivers/gpu/drm/radeon/ni_dma.c 		radeon_ring_write(ring, DMA_PACKET(DMA_PACKET_NOP, 0, 0, 0));
ring              144 drivers/gpu/drm/radeon/ni_dma.c 	radeon_ring_write(ring, DMA_IB_PACKET(DMA_PACKET_INDIRECT_BUFFER, vm_id, 0));
ring              145 drivers/gpu/drm/radeon/ni_dma.c 	radeon_ring_write(ring, (ib->gpu_addr & 0xFFFFFFE0));
ring              146 drivers/gpu/drm/radeon/ni_dma.c 	radeon_ring_write(ring, (ib->length_dw << 12) | (upper_32_bits(ib->gpu_addr) & 0xFF));
ring              175 drivers/gpu/drm/radeon/ni_dma.c 	rdev->ring[R600_RING_TYPE_DMA_INDEX].ready = false;
ring              176 drivers/gpu/drm/radeon/ni_dma.c 	rdev->ring[CAYMAN_RING_TYPE_DMA1_INDEX].ready = false;
ring              189 drivers/gpu/drm/radeon/ni_dma.c 	struct radeon_ring *ring;
ring              197 drivers/gpu/drm/radeon/ni_dma.c 			ring = &rdev->ring[R600_RING_TYPE_DMA_INDEX];
ring              201 drivers/gpu/drm/radeon/ni_dma.c 			ring = &rdev->ring[CAYMAN_RING_TYPE_DMA1_INDEX];
ring              210 drivers/gpu/drm/radeon/ni_dma.c 		rb_bufsz = order_base_2(ring->ring_size / 4);
ring              230 drivers/gpu/drm/radeon/ni_dma.c 		WREG32(DMA_RB_BASE + reg_offset, ring->gpu_addr >> 8);
ring              243 drivers/gpu/drm/radeon/ni_dma.c 		ring->wptr = 0;
ring              244 drivers/gpu/drm/radeon/ni_dma.c 		WREG32(DMA_RB_WPTR + reg_offset, ring->wptr << 2);
ring              248 drivers/gpu/drm/radeon/ni_dma.c 		ring->ready = true;
ring              250 drivers/gpu/drm/radeon/ni_dma.c 		r = radeon_ring_test(rdev, ring->idx, ring);
ring              252 drivers/gpu/drm/radeon/ni_dma.c 			ring->ready = false;
ring              274 drivers/gpu/drm/radeon/ni_dma.c 	radeon_ring_fini(rdev, &rdev->ring[R600_RING_TYPE_DMA_INDEX]);
ring              275 drivers/gpu/drm/radeon/ni_dma.c 	radeon_ring_fini(rdev, &rdev->ring[CAYMAN_RING_TYPE_DMA1_INDEX]);
ring              287 drivers/gpu/drm/radeon/ni_dma.c bool cayman_dma_is_lockup(struct radeon_device *rdev, struct radeon_ring *ring)
ring              292 drivers/gpu/drm/radeon/ni_dma.c 	if (ring->idx == R600_RING_TYPE_DMA_INDEX)
ring              298 drivers/gpu/drm/radeon/ni_dma.c 		radeon_ring_lockup_update(rdev, ring);
ring              301 drivers/gpu/drm/radeon/ni_dma.c 	return radeon_ring_test_lockup(rdev, ring);
ring              449 drivers/gpu/drm/radeon/ni_dma.c void cayman_dma_vm_flush(struct radeon_device *rdev, struct radeon_ring *ring,
ring              452 drivers/gpu/drm/radeon/ni_dma.c 	radeon_ring_write(ring, DMA_PACKET(DMA_PACKET_SRBM_WRITE, 0, 0, 0));
ring              453 drivers/gpu/drm/radeon/ni_dma.c 	radeon_ring_write(ring, (0xf << 16) | ((VM_CONTEXT0_PAGE_TABLE_BASE_ADDR + (vm_id << 2)) >> 2));
ring              454 drivers/gpu/drm/radeon/ni_dma.c 	radeon_ring_write(ring, pd_addr >> 12);
ring              457 drivers/gpu/drm/radeon/ni_dma.c 	radeon_ring_write(ring, DMA_PACKET(DMA_PACKET_SRBM_WRITE, 0, 0, 0));
ring              458 drivers/gpu/drm/radeon/ni_dma.c 	radeon_ring_write(ring, (0xf << 16) | (HDP_MEM_COHERENCY_FLUSH_CNTL >> 2));
ring              459 drivers/gpu/drm/radeon/ni_dma.c 	radeon_ring_write(ring, 1);
ring              462 drivers/gpu/drm/radeon/ni_dma.c 	radeon_ring_write(ring, DMA_PACKET(DMA_PACKET_SRBM_WRITE, 0, 0, 0));
ring              463 drivers/gpu/drm/radeon/ni_dma.c 	radeon_ring_write(ring, (0xf << 16) | (VM_INVALIDATE_REQUEST >> 2));
ring              464 drivers/gpu/drm/radeon/ni_dma.c 	radeon_ring_write(ring, 1 << vm_id);
ring              467 drivers/gpu/drm/radeon/ni_dma.c 	radeon_ring_write(ring, DMA_SRBM_READ_PACKET);
ring              468 drivers/gpu/drm/radeon/ni_dma.c 	radeon_ring_write(ring, (0xff << 20) | (VM_INVALIDATE_REQUEST >> 2));
ring              469 drivers/gpu/drm/radeon/ni_dma.c 	radeon_ring_write(ring, 0); /* mask */
ring              470 drivers/gpu/drm/radeon/ni_dma.c 	radeon_ring_write(ring, 0); /* value */
ring              847 drivers/gpu/drm/radeon/r100.c static void r100_ring_hdp_flush(struct radeon_device *rdev, struct radeon_ring *ring)
ring              849 drivers/gpu/drm/radeon/r100.c 	radeon_ring_write(ring, PACKET0(RADEON_HOST_PATH_CNTL, 0));
ring              850 drivers/gpu/drm/radeon/r100.c 	radeon_ring_write(ring, rdev->config.r100.hdp_cntl |
ring              852 drivers/gpu/drm/radeon/r100.c 	radeon_ring_write(ring, PACKET0(RADEON_HOST_PATH_CNTL, 0));
ring              853 drivers/gpu/drm/radeon/r100.c 	radeon_ring_write(ring, rdev->config.r100.hdp_cntl);
ring              861 drivers/gpu/drm/radeon/r100.c 	struct radeon_ring *ring = &rdev->ring[fence->ring];
ring              865 drivers/gpu/drm/radeon/r100.c 	radeon_ring_write(ring, PACKET0(RADEON_RB3D_DSTCACHE_CTLSTAT, 0));
ring              866 drivers/gpu/drm/radeon/r100.c 	radeon_ring_write(ring, RADEON_RB3D_DC_FLUSH_ALL);
ring              867 drivers/gpu/drm/radeon/r100.c 	radeon_ring_write(ring, PACKET0(RADEON_RB3D_ZCACHE_CTLSTAT, 0));
ring              868 drivers/gpu/drm/radeon/r100.c 	radeon_ring_write(ring, RADEON_RB3D_ZC_FLUSH_ALL);
ring              870 drivers/gpu/drm/radeon/r100.c 	radeon_ring_write(ring, PACKET0(RADEON_WAIT_UNTIL, 0));
ring              871 drivers/gpu/drm/radeon/r100.c 	radeon_ring_write(ring, RADEON_WAIT_2D_IDLECLEAN | RADEON_WAIT_3D_IDLECLEAN);
ring              872 drivers/gpu/drm/radeon/r100.c 	r100_ring_hdp_flush(rdev, ring);
ring              874 drivers/gpu/drm/radeon/r100.c 	radeon_ring_write(ring, PACKET0(rdev->fence_drv[fence->ring].scratch_reg, 0));
ring              875 drivers/gpu/drm/radeon/r100.c 	radeon_ring_write(ring, fence->seq);
ring              876 drivers/gpu/drm/radeon/r100.c 	radeon_ring_write(ring, PACKET0(RADEON_GEN_INT_STATUS, 0));
ring              877 drivers/gpu/drm/radeon/r100.c 	radeon_ring_write(ring, RADEON_SW_INT_FIRE);
ring              881 drivers/gpu/drm/radeon/r100.c 			      struct radeon_ring *ring,
ring              896 drivers/gpu/drm/radeon/r100.c 	struct radeon_ring *ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX];
ring              915 drivers/gpu/drm/radeon/r100.c 	r = radeon_ring_lock(rdev, ring, ndw);
ring              929 drivers/gpu/drm/radeon/r100.c 		radeon_ring_write(ring, PACKET3(PACKET3_BITBLT_MULTI, 8));
ring              930 drivers/gpu/drm/radeon/r100.c 		radeon_ring_write(ring,
ring              942 drivers/gpu/drm/radeon/r100.c 		radeon_ring_write(ring, (pitch << 22) | (src_offset >> 10));
ring              943 drivers/gpu/drm/radeon/r100.c 		radeon_ring_write(ring, (pitch << 22) | (dst_offset >> 10));
ring              944 drivers/gpu/drm/radeon/r100.c 		radeon_ring_write(ring, (0x1fff) | (0x1fff << 16));
ring              945 drivers/gpu/drm/radeon/r100.c 		radeon_ring_write(ring, 0);
ring              946 drivers/gpu/drm/radeon/r100.c 		radeon_ring_write(ring, (0x1fff) | (0x1fff << 16));
ring              947 drivers/gpu/drm/radeon/r100.c 		radeon_ring_write(ring, num_gpu_pages);
ring              948 drivers/gpu/drm/radeon/r100.c 		radeon_ring_write(ring, num_gpu_pages);
ring              949 drivers/gpu/drm/radeon/r100.c 		radeon_ring_write(ring, cur_pages | (stride_pixels << 16));
ring              951 drivers/gpu/drm/radeon/r100.c 	radeon_ring_write(ring, PACKET0(RADEON_DSTCACHE_CTLSTAT, 0));
ring              952 drivers/gpu/drm/radeon/r100.c 	radeon_ring_write(ring, RADEON_RB2D_DC_FLUSH_ALL);
ring              953 drivers/gpu/drm/radeon/r100.c 	radeon_ring_write(ring, PACKET0(RADEON_WAIT_UNTIL, 0));
ring              954 drivers/gpu/drm/radeon/r100.c 	radeon_ring_write(ring,
ring              960 drivers/gpu/drm/radeon/r100.c 		radeon_ring_unlock_undo(rdev, ring);
ring              963 drivers/gpu/drm/radeon/r100.c 	radeon_ring_unlock_commit(rdev, ring, false);
ring              982 drivers/gpu/drm/radeon/r100.c void r100_ring_start(struct radeon_device *rdev, struct radeon_ring *ring)
ring              986 drivers/gpu/drm/radeon/r100.c 	r = radeon_ring_lock(rdev, ring, 2);
ring              990 drivers/gpu/drm/radeon/r100.c 	radeon_ring_write(ring, PACKET0(RADEON_ISYNC_CNTL, 0));
ring              991 drivers/gpu/drm/radeon/r100.c 	radeon_ring_write(ring,
ring              996 drivers/gpu/drm/radeon/r100.c 	radeon_ring_unlock_commit(rdev, ring, false);
ring             1063 drivers/gpu/drm/radeon/r100.c 		      struct radeon_ring *ring)
ring             1068 drivers/gpu/drm/radeon/r100.c 		rptr = le32_to_cpu(rdev->wb.wb[ring->rptr_offs/4]);
ring             1076 drivers/gpu/drm/radeon/r100.c 		      struct radeon_ring *ring)
ring             1082 drivers/gpu/drm/radeon/r100.c 		       struct radeon_ring *ring)
ring             1084 drivers/gpu/drm/radeon/r100.c 	WREG32(RADEON_CP_RB_WPTR, ring->wptr);
ring             1112 drivers/gpu/drm/radeon/r100.c 	struct radeon_ring *ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX];
ring             1138 drivers/gpu/drm/radeon/r100.c 	r = radeon_ring_init(rdev, ring, ring_size, RADEON_WB_CP_RPTR_OFFSET,
ring             1148 drivers/gpu/drm/radeon/r100.c 	ring->align_mask = 16 - 1;
ring             1178 drivers/gpu/drm/radeon/r100.c 	DRM_INFO("radeon: ring at 0x%016lX\n", (unsigned long)ring->gpu_addr);
ring             1179 drivers/gpu/drm/radeon/r100.c 	WREG32(RADEON_CP_RB_BASE, ring->gpu_addr);
ring             1183 drivers/gpu/drm/radeon/r100.c 	ring->wptr = 0;
ring             1184 drivers/gpu/drm/radeon/r100.c 	WREG32(RADEON_CP_RB_WPTR, ring->wptr);
ring             1211 drivers/gpu/drm/radeon/r100.c 	radeon_ring_start(rdev, RADEON_RING_TYPE_GFX_INDEX, &rdev->ring[RADEON_RING_TYPE_GFX_INDEX]);
ring             1212 drivers/gpu/drm/radeon/r100.c 	r = radeon_ring_test(rdev, RADEON_RING_TYPE_GFX_INDEX, ring);
ring             1217 drivers/gpu/drm/radeon/r100.c 	ring->ready = true;
ring             1220 drivers/gpu/drm/radeon/r100.c 	if (!ring->rptr_save_reg /* not resuming from suspend */
ring             1221 drivers/gpu/drm/radeon/r100.c 	    && radeon_ring_supports_scratch_reg(rdev, ring)) {
ring             1222 drivers/gpu/drm/radeon/r100.c 		r = radeon_scratch_get(rdev, &ring->rptr_save_reg);
ring             1225 drivers/gpu/drm/radeon/r100.c 			ring->rptr_save_reg = 0;
ring             1238 drivers/gpu/drm/radeon/r100.c 	radeon_scratch_free(rdev, rdev->ring[RADEON_RING_TYPE_GFX_INDEX].rptr_save_reg);
ring             1239 drivers/gpu/drm/radeon/r100.c 	radeon_ring_fini(rdev, &rdev->ring[RADEON_RING_TYPE_GFX_INDEX]);
ring             1247 drivers/gpu/drm/radeon/r100.c 	rdev->ring[RADEON_RING_TYPE_GFX_INDEX].ready = false;
ring             2518 drivers/gpu/drm/radeon/r100.c bool r100_gpu_is_lockup(struct radeon_device *rdev, struct radeon_ring *ring)
ring             2524 drivers/gpu/drm/radeon/r100.c 		radeon_ring_lockup_update(rdev, ring);
ring             2527 drivers/gpu/drm/radeon/r100.c 	return radeon_ring_test_lockup(rdev, ring);
ring             2949 drivers/gpu/drm/radeon/r100.c 	struct radeon_ring *ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX];
ring             2953 drivers/gpu/drm/radeon/r100.c 	radeon_ring_free_size(rdev, ring);
ring             2956 drivers/gpu/drm/radeon/r100.c 	count = (rdp + ring->ring_size - wdp) & ring->ptr_mask;
ring             2960 drivers/gpu/drm/radeon/r100.c 	seq_printf(m, "%u free dwords in ring\n", ring->ring_free_dw);
ring             2962 drivers/gpu/drm/radeon/r100.c 	if (ring->ready) {
ring             2964 drivers/gpu/drm/radeon/r100.c 			i = (rdp + j) & ring->ptr_mask;
ring             2965 drivers/gpu/drm/radeon/r100.c 			seq_printf(m, "r[%04d]=0x%08x\n", i, ring->ring[i]);
ring             3651 drivers/gpu/drm/radeon/r100.c int r100_ring_test(struct radeon_device *rdev, struct radeon_ring *ring)
ring             3664 drivers/gpu/drm/radeon/r100.c 	r = radeon_ring_lock(rdev, ring, 2);
ring             3670 drivers/gpu/drm/radeon/r100.c 	radeon_ring_write(ring, PACKET0(scratch, 0));
ring             3671 drivers/gpu/drm/radeon/r100.c 	radeon_ring_write(ring, 0xDEADBEEF);
ring             3672 drivers/gpu/drm/radeon/r100.c 	radeon_ring_unlock_commit(rdev, ring, false);
ring             3693 drivers/gpu/drm/radeon/r100.c 	struct radeon_ring *ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX];
ring             3695 drivers/gpu/drm/radeon/r100.c 	if (ring->rptr_save_reg) {
ring             3696 drivers/gpu/drm/radeon/r100.c 		u32 next_rptr = ring->wptr + 2 + 3;
ring             3697 drivers/gpu/drm/radeon/r100.c 		radeon_ring_write(ring, PACKET0(ring->rptr_save_reg, 0));
ring             3698 drivers/gpu/drm/radeon/r100.c 		radeon_ring_write(ring, next_rptr);
ring             3701 drivers/gpu/drm/radeon/r100.c 	radeon_ring_write(ring, PACKET0(RADEON_CP_IB_BASE, 1));
ring             3702 drivers/gpu/drm/radeon/r100.c 	radeon_ring_write(ring, ib->gpu_addr);
ring             3703 drivers/gpu/drm/radeon/r100.c 	radeon_ring_write(ring, ib->length_dw);
ring             3706 drivers/gpu/drm/radeon/r100.c int r100_ib_test(struct radeon_device *rdev, struct radeon_ring *ring)
ring             3776 drivers/gpu/drm/radeon/r100.c 	rdev->ring[RADEON_RING_TYPE_GFX_INDEX].ready = false;
ring               89 drivers/gpu/drm/radeon/r200.c 	struct radeon_ring *ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX];
ring               99 drivers/gpu/drm/radeon/r200.c 	r = radeon_ring_lock(rdev, ring, num_loops * 4 + 64);
ring              105 drivers/gpu/drm/radeon/r200.c 	radeon_ring_write(ring, PACKET0(RADEON_WAIT_UNTIL, 0));
ring              106 drivers/gpu/drm/radeon/r200.c 	radeon_ring_write(ring, (1 << 16));
ring              113 drivers/gpu/drm/radeon/r200.c 		radeon_ring_write(ring, PACKET0(0x720, 2));
ring              114 drivers/gpu/drm/radeon/r200.c 		radeon_ring_write(ring, src_offset);
ring              115 drivers/gpu/drm/radeon/r200.c 		radeon_ring_write(ring, dst_offset);
ring              116 drivers/gpu/drm/radeon/r200.c 		radeon_ring_write(ring, cur_size | (1 << 31) | (1 << 30));
ring              120 drivers/gpu/drm/radeon/r200.c 	radeon_ring_write(ring, PACKET0(RADEON_WAIT_UNTIL, 0));
ring              121 drivers/gpu/drm/radeon/r200.c 	radeon_ring_write(ring, RADEON_WAIT_DMA_GUI_IDLE);
ring              124 drivers/gpu/drm/radeon/r200.c 		radeon_ring_unlock_undo(rdev, ring);
ring              127 drivers/gpu/drm/radeon/r200.c 	radeon_ring_unlock_commit(rdev, ring, false);
ring              217 drivers/gpu/drm/radeon/r300.c 	struct radeon_ring *ring = &rdev->ring[fence->ring];
ring              222 drivers/gpu/drm/radeon/r300.c 	radeon_ring_write(ring, PACKET0(R300_RE_SCISSORS_TL, 0));
ring              223 drivers/gpu/drm/radeon/r300.c 	radeon_ring_write(ring, 0);
ring              224 drivers/gpu/drm/radeon/r300.c 	radeon_ring_write(ring, PACKET0(R300_RE_SCISSORS_BR, 0));
ring              225 drivers/gpu/drm/radeon/r300.c 	radeon_ring_write(ring, 0);
ring              227 drivers/gpu/drm/radeon/r300.c 	radeon_ring_write(ring, PACKET0(R300_RB3D_DSTCACHE_CTLSTAT, 0));
ring              228 drivers/gpu/drm/radeon/r300.c 	radeon_ring_write(ring, R300_RB3D_DC_FLUSH);
ring              229 drivers/gpu/drm/radeon/r300.c 	radeon_ring_write(ring, PACKET0(R300_RB3D_ZCACHE_CTLSTAT, 0));
ring              230 drivers/gpu/drm/radeon/r300.c 	radeon_ring_write(ring, R300_ZC_FLUSH);
ring              232 drivers/gpu/drm/radeon/r300.c 	radeon_ring_write(ring, PACKET0(RADEON_WAIT_UNTIL, 0));
ring              233 drivers/gpu/drm/radeon/r300.c 	radeon_ring_write(ring, (RADEON_WAIT_3D_IDLECLEAN |
ring              236 drivers/gpu/drm/radeon/r300.c 	radeon_ring_write(ring, PACKET0(RADEON_HOST_PATH_CNTL, 0));
ring              237 drivers/gpu/drm/radeon/r300.c 	radeon_ring_write(ring, rdev->config.r300.hdp_cntl |
ring              239 drivers/gpu/drm/radeon/r300.c 	radeon_ring_write(ring, PACKET0(RADEON_HOST_PATH_CNTL, 0));
ring              240 drivers/gpu/drm/radeon/r300.c 	radeon_ring_write(ring, rdev->config.r300.hdp_cntl);
ring              242 drivers/gpu/drm/radeon/r300.c 	radeon_ring_write(ring, PACKET0(rdev->fence_drv[fence->ring].scratch_reg, 0));
ring              243 drivers/gpu/drm/radeon/r300.c 	radeon_ring_write(ring, fence->seq);
ring              244 drivers/gpu/drm/radeon/r300.c 	radeon_ring_write(ring, PACKET0(RADEON_GEN_INT_STATUS, 0));
ring              245 drivers/gpu/drm/radeon/r300.c 	radeon_ring_write(ring, RADEON_SW_INT_FIRE);
ring              248 drivers/gpu/drm/radeon/r300.c void r300_ring_start(struct radeon_device *rdev, struct radeon_ring *ring)
ring              271 drivers/gpu/drm/radeon/r300.c 	r = radeon_ring_lock(rdev, ring, 64);
ring              275 drivers/gpu/drm/radeon/r300.c 	radeon_ring_write(ring, PACKET0(RADEON_ISYNC_CNTL, 0));
ring              276 drivers/gpu/drm/radeon/r300.c 	radeon_ring_write(ring,
ring              281 drivers/gpu/drm/radeon/r300.c 	radeon_ring_write(ring, PACKET0(R300_GB_TILE_CONFIG, 0));
ring              282 drivers/gpu/drm/radeon/r300.c 	radeon_ring_write(ring, gb_tile_config);
ring              283 drivers/gpu/drm/radeon/r300.c 	radeon_ring_write(ring, PACKET0(RADEON_WAIT_UNTIL, 0));
ring              284 drivers/gpu/drm/radeon/r300.c 	radeon_ring_write(ring,
ring              287 drivers/gpu/drm/radeon/r300.c 	radeon_ring_write(ring, PACKET0(R300_DST_PIPE_CONFIG, 0));
ring              288 drivers/gpu/drm/radeon/r300.c 	radeon_ring_write(ring, R300_PIPE_AUTO_CONFIG);
ring              289 drivers/gpu/drm/radeon/r300.c 	radeon_ring_write(ring, PACKET0(R300_GB_SELECT, 0));
ring              290 drivers/gpu/drm/radeon/r300.c 	radeon_ring_write(ring, 0);
ring              291 drivers/gpu/drm/radeon/r300.c 	radeon_ring_write(ring, PACKET0(R300_GB_ENABLE, 0));
ring              292 drivers/gpu/drm/radeon/r300.c 	radeon_ring_write(ring, 0);
ring              293 drivers/gpu/drm/radeon/r300.c 	radeon_ring_write(ring, PACKET0(R300_RB3D_DSTCACHE_CTLSTAT, 0));
ring              294 drivers/gpu/drm/radeon/r300.c 	radeon_ring_write(ring, R300_RB3D_DC_FLUSH | R300_RB3D_DC_FREE);
ring              295 drivers/gpu/drm/radeon/r300.c 	radeon_ring_write(ring, PACKET0(R300_RB3D_ZCACHE_CTLSTAT, 0));
ring              296 drivers/gpu/drm/radeon/r300.c 	radeon_ring_write(ring, R300_ZC_FLUSH | R300_ZC_FREE);
ring              297 drivers/gpu/drm/radeon/r300.c 	radeon_ring_write(ring, PACKET0(RADEON_WAIT_UNTIL, 0));
ring              298 drivers/gpu/drm/radeon/r300.c 	radeon_ring_write(ring,
ring              301 drivers/gpu/drm/radeon/r300.c 	radeon_ring_write(ring, PACKET0(R300_GB_AA_CONFIG, 0));
ring              302 drivers/gpu/drm/radeon/r300.c 	radeon_ring_write(ring, 0);
ring              303 drivers/gpu/drm/radeon/r300.c 	radeon_ring_write(ring, PACKET0(R300_RB3D_DSTCACHE_CTLSTAT, 0));
ring              304 drivers/gpu/drm/radeon/r300.c 	radeon_ring_write(ring, R300_RB3D_DC_FLUSH | R300_RB3D_DC_FREE);
ring              305 drivers/gpu/drm/radeon/r300.c 	radeon_ring_write(ring, PACKET0(R300_RB3D_ZCACHE_CTLSTAT, 0));
ring              306 drivers/gpu/drm/radeon/r300.c 	radeon_ring_write(ring, R300_ZC_FLUSH | R300_ZC_FREE);
ring              307 drivers/gpu/drm/radeon/r300.c 	radeon_ring_write(ring, PACKET0(R300_GB_MSPOS0, 0));
ring              308 drivers/gpu/drm/radeon/r300.c 	radeon_ring_write(ring,
ring              317 drivers/gpu/drm/radeon/r300.c 	radeon_ring_write(ring, PACKET0(R300_GB_MSPOS1, 0));
ring              318 drivers/gpu/drm/radeon/r300.c 	radeon_ring_write(ring,
ring              326 drivers/gpu/drm/radeon/r300.c 	radeon_ring_write(ring, PACKET0(R300_GA_ENHANCE, 0));
ring              327 drivers/gpu/drm/radeon/r300.c 	radeon_ring_write(ring, R300_GA_DEADLOCK_CNTL | R300_GA_FASTSYNC_CNTL);
ring              328 drivers/gpu/drm/radeon/r300.c 	radeon_ring_write(ring, PACKET0(R300_GA_POLY_MODE, 0));
ring              329 drivers/gpu/drm/radeon/r300.c 	radeon_ring_write(ring,
ring              331 drivers/gpu/drm/radeon/r300.c 	radeon_ring_write(ring, PACKET0(R300_GA_ROUND_MODE, 0));
ring              332 drivers/gpu/drm/radeon/r300.c 	radeon_ring_write(ring,
ring              335 drivers/gpu/drm/radeon/r300.c 	radeon_ring_unlock_commit(rdev, ring, false);
ring              214 drivers/gpu/drm/radeon/r420.c 	struct radeon_ring *ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX];
ring              223 drivers/gpu/drm/radeon/r420.c 	r = radeon_ring_lock(rdev, ring, 8);
ring              225 drivers/gpu/drm/radeon/r420.c 	radeon_ring_write(ring, PACKET0(R300_CP_RESYNC_ADDR, 1));
ring              226 drivers/gpu/drm/radeon/r420.c 	radeon_ring_write(ring, rdev->config.r300.resync_scratch);
ring              227 drivers/gpu/drm/radeon/r420.c 	radeon_ring_write(ring, 0xDEADBEEF);
ring              228 drivers/gpu/drm/radeon/r420.c 	radeon_ring_unlock_commit(rdev, ring, false);
ring              234 drivers/gpu/drm/radeon/r420.c 	struct radeon_ring *ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX];
ring              239 drivers/gpu/drm/radeon/r420.c 	r = radeon_ring_lock(rdev, ring, 8);
ring              241 drivers/gpu/drm/radeon/r420.c 	radeon_ring_write(ring, PACKET0(R300_RB3D_DSTCACHE_CTLSTAT, 0));
ring              242 drivers/gpu/drm/radeon/r420.c 	radeon_ring_write(ring, R300_RB3D_DC_FINISH);
ring              243 drivers/gpu/drm/radeon/r420.c 	radeon_ring_unlock_commit(rdev, ring, false);
ring             1922 drivers/gpu/drm/radeon/r600.c bool r600_gfx_is_lockup(struct radeon_device *rdev, struct radeon_ring *ring)
ring             1929 drivers/gpu/drm/radeon/r600.c 		radeon_ring_lockup_update(rdev, ring);
ring             1932 drivers/gpu/drm/radeon/r600.c 	return radeon_ring_test_lockup(rdev, ring);
ring             2430 drivers/gpu/drm/radeon/r600.c 	rdev->ring[RADEON_RING_TYPE_GFX_INDEX].ready = false;
ring             2618 drivers/gpu/drm/radeon/r600.c 		      struct radeon_ring *ring)
ring             2623 drivers/gpu/drm/radeon/r600.c 		rptr = rdev->wb.wb[ring->rptr_offs/4];
ring             2631 drivers/gpu/drm/radeon/r600.c 		      struct radeon_ring *ring)
ring             2637 drivers/gpu/drm/radeon/r600.c 		       struct radeon_ring *ring)
ring             2639 drivers/gpu/drm/radeon/r600.c 	WREG32(R600_CP_RB_WPTR, ring->wptr);
ring             2687 drivers/gpu/drm/radeon/r600.c 	struct radeon_ring *ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX];
ring             2691 drivers/gpu/drm/radeon/r600.c 	r = radeon_ring_lock(rdev, ring, 7);
ring             2696 drivers/gpu/drm/radeon/r600.c 	radeon_ring_write(ring, PACKET3(PACKET3_ME_INITIALIZE, 5));
ring             2697 drivers/gpu/drm/radeon/r600.c 	radeon_ring_write(ring, 0x1);
ring             2699 drivers/gpu/drm/radeon/r600.c 		radeon_ring_write(ring, 0x0);
ring             2700 drivers/gpu/drm/radeon/r600.c 		radeon_ring_write(ring, rdev->config.rv770.max_hw_contexts - 1);
ring             2702 drivers/gpu/drm/radeon/r600.c 		radeon_ring_write(ring, 0x3);
ring             2703 drivers/gpu/drm/radeon/r600.c 		radeon_ring_write(ring, rdev->config.r600.max_hw_contexts - 1);
ring             2705 drivers/gpu/drm/radeon/r600.c 	radeon_ring_write(ring, PACKET3_ME_INITIALIZE_DEVICE_ID(1));
ring             2706 drivers/gpu/drm/radeon/r600.c 	radeon_ring_write(ring, 0);
ring             2707 drivers/gpu/drm/radeon/r600.c 	radeon_ring_write(ring, 0);
ring             2708 drivers/gpu/drm/radeon/r600.c 	radeon_ring_unlock_commit(rdev, ring, false);
ring             2717 drivers/gpu/drm/radeon/r600.c 	struct radeon_ring *ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX];
ring             2729 drivers/gpu/drm/radeon/r600.c 	rb_bufsz = order_base_2(ring->ring_size / 8);
ring             2743 drivers/gpu/drm/radeon/r600.c 	ring->wptr = 0;
ring             2744 drivers/gpu/drm/radeon/r600.c 	WREG32(CP_RB_WPTR, ring->wptr);
ring             2762 drivers/gpu/drm/radeon/r600.c 	WREG32(CP_RB_BASE, ring->gpu_addr >> 8);
ring             2766 drivers/gpu/drm/radeon/r600.c 	ring->ready = true;
ring             2767 drivers/gpu/drm/radeon/r600.c 	r = radeon_ring_test(rdev, RADEON_RING_TYPE_GFX_INDEX, ring);
ring             2769 drivers/gpu/drm/radeon/r600.c 		ring->ready = false;
ring             2779 drivers/gpu/drm/radeon/r600.c void r600_ring_init(struct radeon_device *rdev, struct radeon_ring *ring, unsigned ring_size)
ring             2787 drivers/gpu/drm/radeon/r600.c 	ring->ring_size = ring_size;
ring             2788 drivers/gpu/drm/radeon/r600.c 	ring->align_mask = 16 - 1;
ring             2790 drivers/gpu/drm/radeon/r600.c 	if (radeon_ring_supports_scratch_reg(rdev, ring)) {
ring             2791 drivers/gpu/drm/radeon/r600.c 		r = radeon_scratch_get(rdev, &ring->rptr_save_reg);
ring             2794 drivers/gpu/drm/radeon/r600.c 			ring->rptr_save_reg = 0;
ring             2801 drivers/gpu/drm/radeon/r600.c 	struct radeon_ring *ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX];
ring             2803 drivers/gpu/drm/radeon/r600.c 	radeon_ring_fini(rdev, ring);
ring             2804 drivers/gpu/drm/radeon/r600.c 	radeon_scratch_free(rdev, ring->rptr_save_reg);
ring             2822 drivers/gpu/drm/radeon/r600.c int r600_ring_test(struct radeon_device *rdev, struct radeon_ring *ring)
ring             2835 drivers/gpu/drm/radeon/r600.c 	r = radeon_ring_lock(rdev, ring, 3);
ring             2837 drivers/gpu/drm/radeon/r600.c 		DRM_ERROR("radeon: cp failed to lock ring %d (%d).\n", ring->idx, r);
ring             2841 drivers/gpu/drm/radeon/r600.c 	radeon_ring_write(ring, PACKET3(PACKET3_SET_CONFIG_REG, 1));
ring             2842 drivers/gpu/drm/radeon/r600.c 	radeon_ring_write(ring, ((scratch - PACKET3_SET_CONFIG_REG_OFFSET) >> 2));
ring             2843 drivers/gpu/drm/radeon/r600.c 	radeon_ring_write(ring, 0xDEADBEEF);
ring             2844 drivers/gpu/drm/radeon/r600.c 	radeon_ring_unlock_commit(rdev, ring, false);
ring             2852 drivers/gpu/drm/radeon/r600.c 		DRM_INFO("ring test on %d succeeded in %d usecs\n", ring->idx, i);
ring             2855 drivers/gpu/drm/radeon/r600.c 			  ring->idx, scratch, tmp);
ring             2869 drivers/gpu/drm/radeon/r600.c 	struct radeon_ring *ring = &rdev->ring[fence->ring];
ring             2877 drivers/gpu/drm/radeon/r600.c 		u64 addr = rdev->fence_drv[fence->ring].gpu_addr;
ring             2879 drivers/gpu/drm/radeon/r600.c 		radeon_ring_write(ring, PACKET3(PACKET3_SURFACE_SYNC, 3));
ring             2880 drivers/gpu/drm/radeon/r600.c 		radeon_ring_write(ring, cp_coher_cntl);
ring             2881 drivers/gpu/drm/radeon/r600.c 		radeon_ring_write(ring, 0xFFFFFFFF);
ring             2882 drivers/gpu/drm/radeon/r600.c 		radeon_ring_write(ring, 0);
ring             2883 drivers/gpu/drm/radeon/r600.c 		radeon_ring_write(ring, 10); /* poll interval */
ring             2885 drivers/gpu/drm/radeon/r600.c 		radeon_ring_write(ring, PACKET3(PACKET3_EVENT_WRITE_EOP, 4));
ring             2886 drivers/gpu/drm/radeon/r600.c 		radeon_ring_write(ring, EVENT_TYPE(CACHE_FLUSH_AND_INV_EVENT_TS) | EVENT_INDEX(5));
ring             2887 drivers/gpu/drm/radeon/r600.c 		radeon_ring_write(ring, lower_32_bits(addr));
ring             2888 drivers/gpu/drm/radeon/r600.c 		radeon_ring_write(ring, (upper_32_bits(addr) & 0xff) | DATA_SEL(1) | INT_SEL(2));
ring             2889 drivers/gpu/drm/radeon/r600.c 		radeon_ring_write(ring, fence->seq);
ring             2890 drivers/gpu/drm/radeon/r600.c 		radeon_ring_write(ring, 0);
ring             2893 drivers/gpu/drm/radeon/r600.c 		radeon_ring_write(ring, PACKET3(PACKET3_SURFACE_SYNC, 3));
ring             2894 drivers/gpu/drm/radeon/r600.c 		radeon_ring_write(ring, cp_coher_cntl);
ring             2895 drivers/gpu/drm/radeon/r600.c 		radeon_ring_write(ring, 0xFFFFFFFF);
ring             2896 drivers/gpu/drm/radeon/r600.c 		radeon_ring_write(ring, 0);
ring             2897 drivers/gpu/drm/radeon/r600.c 		radeon_ring_write(ring, 10); /* poll interval */
ring             2898 drivers/gpu/drm/radeon/r600.c 		radeon_ring_write(ring, PACKET3(PACKET3_EVENT_WRITE, 0));
ring             2899 drivers/gpu/drm/radeon/r600.c 		radeon_ring_write(ring, EVENT_TYPE(CACHE_FLUSH_AND_INV_EVENT) | EVENT_INDEX(0));
ring             2901 drivers/gpu/drm/radeon/r600.c 		radeon_ring_write(ring, PACKET3(PACKET3_SET_CONFIG_REG, 1));
ring             2902 drivers/gpu/drm/radeon/r600.c 		radeon_ring_write(ring, (WAIT_UNTIL - PACKET3_SET_CONFIG_REG_OFFSET) >> 2);
ring             2903 drivers/gpu/drm/radeon/r600.c 		radeon_ring_write(ring, WAIT_3D_IDLE_bit | WAIT_3D_IDLECLEAN_bit);
ring             2905 drivers/gpu/drm/radeon/r600.c 		radeon_ring_write(ring, PACKET3(PACKET3_SET_CONFIG_REG, 1));
ring             2906 drivers/gpu/drm/radeon/r600.c 		radeon_ring_write(ring, ((rdev->fence_drv[fence->ring].scratch_reg - PACKET3_SET_CONFIG_REG_OFFSET) >> 2));
ring             2907 drivers/gpu/drm/radeon/r600.c 		radeon_ring_write(ring, fence->seq);
ring             2909 drivers/gpu/drm/radeon/r600.c 		radeon_ring_write(ring, PACKET0(CP_INT_STATUS, 0));
ring             2910 drivers/gpu/drm/radeon/r600.c 		radeon_ring_write(ring, RB_INT_STAT);
ring             2926 drivers/gpu/drm/radeon/r600.c 			      struct radeon_ring *ring,
ring             2936 drivers/gpu/drm/radeon/r600.c 	radeon_ring_write(ring, PACKET3(PACKET3_MEM_SEMAPHORE, 1));
ring             2937 drivers/gpu/drm/radeon/r600.c 	radeon_ring_write(ring, lower_32_bits(addr));
ring             2938 drivers/gpu/drm/radeon/r600.c 	radeon_ring_write(ring, (upper_32_bits(addr) & 0xff) | sel);
ring             2943 drivers/gpu/drm/radeon/r600.c 		radeon_ring_write(ring, PACKET3(PACKET3_PFP_SYNC_ME, 0));
ring             2944 drivers/gpu/drm/radeon/r600.c 		radeon_ring_write(ring, 0x0);
ring             2971 drivers/gpu/drm/radeon/r600.c 	struct radeon_ring *ring = &rdev->ring[ring_index];
ring             2980 drivers/gpu/drm/radeon/r600.c 	r = radeon_ring_lock(rdev, ring, num_loops * 6 + 24);
ring             2988 drivers/gpu/drm/radeon/r600.c 	radeon_sync_rings(rdev, &sync, ring->idx);
ring             2990 drivers/gpu/drm/radeon/r600.c 	radeon_ring_write(ring, PACKET3(PACKET3_SET_CONFIG_REG, 1));
ring             2991 drivers/gpu/drm/radeon/r600.c 	radeon_ring_write(ring, (WAIT_UNTIL - PACKET3_SET_CONFIG_REG_OFFSET) >> 2);
ring             2992 drivers/gpu/drm/radeon/r600.c 	radeon_ring_write(ring, WAIT_3D_IDLE_bit);
ring             3001 drivers/gpu/drm/radeon/r600.c 		radeon_ring_write(ring, PACKET3(PACKET3_CP_DMA, 4));
ring             3002 drivers/gpu/drm/radeon/r600.c 		radeon_ring_write(ring, lower_32_bits(src_offset));
ring             3003 drivers/gpu/drm/radeon/r600.c 		radeon_ring_write(ring, tmp);
ring             3004 drivers/gpu/drm/radeon/r600.c 		radeon_ring_write(ring, lower_32_bits(dst_offset));
ring             3005 drivers/gpu/drm/radeon/r600.c 		radeon_ring_write(ring, upper_32_bits(dst_offset) & 0xff);
ring             3006 drivers/gpu/drm/radeon/r600.c 		radeon_ring_write(ring, cur_size_in_bytes);
ring             3010 drivers/gpu/drm/radeon/r600.c 	radeon_ring_write(ring, PACKET3(PACKET3_SET_CONFIG_REG, 1));
ring             3011 drivers/gpu/drm/radeon/r600.c 	radeon_ring_write(ring, (WAIT_UNTIL - PACKET3_SET_CONFIG_REG_OFFSET) >> 2);
ring             3012 drivers/gpu/drm/radeon/r600.c 	radeon_ring_write(ring, WAIT_CP_DMA_IDLE_bit);
ring             3014 drivers/gpu/drm/radeon/r600.c 	r = radeon_fence_emit(rdev, &fence, ring->idx);
ring             3016 drivers/gpu/drm/radeon/r600.c 		radeon_ring_unlock_undo(rdev, ring);
ring             3021 drivers/gpu/drm/radeon/r600.c 	radeon_ring_unlock_commit(rdev, ring, false);
ring             3059 drivers/gpu/drm/radeon/r600.c 	rdev->ring[R600_RING_TYPE_UVD_INDEX].ring_obj = NULL;
ring             3060 drivers/gpu/drm/radeon/r600.c 	r600_ring_init(rdev, &rdev->ring[R600_RING_TYPE_UVD_INDEX], 4096);
ring             3083 drivers/gpu/drm/radeon/r600.c 	rdev->ring[R600_RING_TYPE_UVD_INDEX].ring_size = 0;
ring             3088 drivers/gpu/drm/radeon/r600.c 	struct radeon_ring *ring;
ring             3091 drivers/gpu/drm/radeon/r600.c 	if (!rdev->has_uvd || !rdev->ring[R600_RING_TYPE_UVD_INDEX].ring_size)
ring             3094 drivers/gpu/drm/radeon/r600.c 	ring = &rdev->ring[R600_RING_TYPE_UVD_INDEX];
ring             3095 drivers/gpu/drm/radeon/r600.c 	r = radeon_ring_init(rdev, ring, ring->ring_size, 0, PACKET0(UVD_NO_OP, 0));
ring             3109 drivers/gpu/drm/radeon/r600.c 	struct radeon_ring *ring;
ring             3159 drivers/gpu/drm/radeon/r600.c 	ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX];
ring             3160 drivers/gpu/drm/radeon/r600.c 	r = radeon_ring_init(rdev, ring, ring->ring_size, RADEON_WB_CP_RPTR_OFFSET,
ring             3313 drivers/gpu/drm/radeon/r600.c 	rdev->ring[RADEON_RING_TYPE_GFX_INDEX].ring_obj = NULL;
ring             3314 drivers/gpu/drm/radeon/r600.c 	r600_ring_init(rdev, &rdev->ring[RADEON_RING_TYPE_GFX_INDEX], 1024 * 1024);
ring             3371 drivers/gpu/drm/radeon/r600.c 	struct radeon_ring *ring = &rdev->ring[ib->ring];
ring             3374 drivers/gpu/drm/radeon/r600.c 	if (ring->rptr_save_reg) {
ring             3375 drivers/gpu/drm/radeon/r600.c 		next_rptr = ring->wptr + 3 + 4;
ring             3376 drivers/gpu/drm/radeon/r600.c 		radeon_ring_write(ring, PACKET3(PACKET3_SET_CONFIG_REG, 1));
ring             3377 drivers/gpu/drm/radeon/r600.c 		radeon_ring_write(ring, ((ring->rptr_save_reg -
ring             3379 drivers/gpu/drm/radeon/r600.c 		radeon_ring_write(ring, next_rptr);
ring             3381 drivers/gpu/drm/radeon/r600.c 		next_rptr = ring->wptr + 5 + 4;
ring             3382 drivers/gpu/drm/radeon/r600.c 		radeon_ring_write(ring, PACKET3(PACKET3_MEM_WRITE, 3));
ring             3383 drivers/gpu/drm/radeon/r600.c 		radeon_ring_write(ring, ring->next_rptr_gpu_addr & 0xfffffffc);
ring             3384 drivers/gpu/drm/radeon/r600.c 		radeon_ring_write(ring, (upper_32_bits(ring->next_rptr_gpu_addr) & 0xff) | (1 << 18));
ring             3385 drivers/gpu/drm/radeon/r600.c 		radeon_ring_write(ring, next_rptr);
ring             3386 drivers/gpu/drm/radeon/r600.c 		radeon_ring_write(ring, 0);
ring             3389 drivers/gpu/drm/radeon/r600.c 	radeon_ring_write(ring, PACKET3(PACKET3_INDIRECT_BUFFER, 2));
ring             3390 drivers/gpu/drm/radeon/r600.c 	radeon_ring_write(ring,
ring             3395 drivers/gpu/drm/radeon/r600.c 	radeon_ring_write(ring, upper_32_bits(ib->gpu_addr) & 0xFF);
ring             3396 drivers/gpu/drm/radeon/r600.c 	radeon_ring_write(ring, ib->length_dw);
ring             3399 drivers/gpu/drm/radeon/r600.c int r600_ib_test(struct radeon_device *rdev, struct radeon_ring *ring)
ring             3413 drivers/gpu/drm/radeon/r600.c 	r = radeon_ib_get(rdev, ring->idx, &ib, NULL, 256);
ring             3445 drivers/gpu/drm/radeon/r600.c 		DRM_INFO("ib test on ring %d succeeded in %u usecs\n", ib.fence->ring, i);
ring             3507 drivers/gpu/drm/radeon/r600.c 				   (void **)&rdev->ih.ring);
ring             3528 drivers/gpu/drm/radeon/r600.c 		rdev->ih.ring = NULL;
ring             4131 drivers/gpu/drm/radeon/r600.c 		src_id = le32_to_cpu(rdev->ih.ring[ring_index]) & 0xff;
ring             4132 drivers/gpu/drm/radeon/r600.c 		src_data = le32_to_cpu(rdev->ih.ring[ring_index + 1]) & 0xfffffff;
ring               52 drivers/gpu/drm/radeon/r600_dma.c 			   struct radeon_ring *ring)
ring               57 drivers/gpu/drm/radeon/r600_dma.c 		rptr = rdev->wb.wb[ring->rptr_offs/4];
ring               73 drivers/gpu/drm/radeon/r600_dma.c 			   struct radeon_ring *ring)
ring               87 drivers/gpu/drm/radeon/r600_dma.c 		       struct radeon_ring *ring)
ring               89 drivers/gpu/drm/radeon/r600_dma.c 	WREG32(DMA_RB_WPTR, (ring->wptr << 2) & 0x3fffc);
ring              109 drivers/gpu/drm/radeon/r600_dma.c 	rdev->ring[R600_RING_TYPE_DMA_INDEX].ready = false;
ring              122 drivers/gpu/drm/radeon/r600_dma.c 	struct radeon_ring *ring = &rdev->ring[R600_RING_TYPE_DMA_INDEX];
ring              131 drivers/gpu/drm/radeon/r600_dma.c 	rb_bufsz = order_base_2(ring->ring_size / 4);
ring              151 drivers/gpu/drm/radeon/r600_dma.c 	WREG32(DMA_RB_BASE, ring->gpu_addr >> 8);
ring              167 drivers/gpu/drm/radeon/r600_dma.c 	ring->wptr = 0;
ring              168 drivers/gpu/drm/radeon/r600_dma.c 	WREG32(DMA_RB_WPTR, ring->wptr << 2);
ring              172 drivers/gpu/drm/radeon/r600_dma.c 	ring->ready = true;
ring              174 drivers/gpu/drm/radeon/r600_dma.c 	r = radeon_ring_test(rdev, R600_RING_TYPE_DMA_INDEX, ring);
ring              176 drivers/gpu/drm/radeon/r600_dma.c 		ring->ready = false;
ring              196 drivers/gpu/drm/radeon/r600_dma.c 	radeon_ring_fini(rdev, &rdev->ring[R600_RING_TYPE_DMA_INDEX]);
ring              208 drivers/gpu/drm/radeon/r600_dma.c bool r600_dma_is_lockup(struct radeon_device *rdev, struct radeon_ring *ring)
ring              213 drivers/gpu/drm/radeon/r600_dma.c 		radeon_ring_lockup_update(rdev, ring);
ring              216 drivers/gpu/drm/radeon/r600_dma.c 	return radeon_ring_test_lockup(rdev, ring);
ring              231 drivers/gpu/drm/radeon/r600_dma.c 		       struct radeon_ring *ring)
ring              239 drivers/gpu/drm/radeon/r600_dma.c 	if (ring->idx == R600_RING_TYPE_DMA_INDEX)
ring              249 drivers/gpu/drm/radeon/r600_dma.c 	r = radeon_ring_lock(rdev, ring, 4);
ring              251 drivers/gpu/drm/radeon/r600_dma.c 		DRM_ERROR("radeon: dma failed to lock ring %d (%d).\n", ring->idx, r);
ring              254 drivers/gpu/drm/radeon/r600_dma.c 	radeon_ring_write(ring, DMA_PACKET(DMA_PACKET_WRITE, 0, 0, 1));
ring              255 drivers/gpu/drm/radeon/r600_dma.c 	radeon_ring_write(ring, lower_32_bits(gpu_addr));
ring              256 drivers/gpu/drm/radeon/r600_dma.c 	radeon_ring_write(ring, upper_32_bits(gpu_addr) & 0xff);
ring              257 drivers/gpu/drm/radeon/r600_dma.c 	radeon_ring_write(ring, 0xDEADBEEF);
ring              258 drivers/gpu/drm/radeon/r600_dma.c 	radeon_ring_unlock_commit(rdev, ring, false);
ring              268 drivers/gpu/drm/radeon/r600_dma.c 		DRM_INFO("ring test on %d succeeded in %d usecs\n", ring->idx, i);
ring              271 drivers/gpu/drm/radeon/r600_dma.c 			  ring->idx, tmp);
ring              290 drivers/gpu/drm/radeon/r600_dma.c 	struct radeon_ring *ring = &rdev->ring[fence->ring];
ring              291 drivers/gpu/drm/radeon/r600_dma.c 	u64 addr = rdev->fence_drv[fence->ring].gpu_addr;
ring              294 drivers/gpu/drm/radeon/r600_dma.c 	radeon_ring_write(ring, DMA_PACKET(DMA_PACKET_FENCE, 0, 0, 0));
ring              295 drivers/gpu/drm/radeon/r600_dma.c 	radeon_ring_write(ring, addr & 0xfffffffc);
ring              296 drivers/gpu/drm/radeon/r600_dma.c 	radeon_ring_write(ring, (upper_32_bits(addr) & 0xff));
ring              297 drivers/gpu/drm/radeon/r600_dma.c 	radeon_ring_write(ring, lower_32_bits(fence->seq));
ring              299 drivers/gpu/drm/radeon/r600_dma.c 	radeon_ring_write(ring, DMA_PACKET(DMA_PACKET_TRAP, 0, 0, 0));
ring              314 drivers/gpu/drm/radeon/r600_dma.c 				  struct radeon_ring *ring,
ring              321 drivers/gpu/drm/radeon/r600_dma.c 	radeon_ring_write(ring, DMA_PACKET(DMA_PACKET_SEMAPHORE, 0, s, 0));
ring              322 drivers/gpu/drm/radeon/r600_dma.c 	radeon_ring_write(ring, addr & 0xfffffffc);
ring              323 drivers/gpu/drm/radeon/r600_dma.c 	radeon_ring_write(ring, upper_32_bits(addr) & 0xff);
ring              337 drivers/gpu/drm/radeon/r600_dma.c int r600_dma_ib_test(struct radeon_device *rdev, struct radeon_ring *ring)
ring              346 drivers/gpu/drm/radeon/r600_dma.c 	if (ring->idx == R600_RING_TYPE_DMA_INDEX)
ring              353 drivers/gpu/drm/radeon/r600_dma.c 	r = radeon_ib_get(rdev, ring->idx, &ib, NULL, 256);
ring              388 drivers/gpu/drm/radeon/r600_dma.c 		DRM_INFO("ib test on ring %d succeeded in %u usecs\n", ib.fence->ring, i);
ring              407 drivers/gpu/drm/radeon/r600_dma.c 	struct radeon_ring *ring = &rdev->ring[ib->ring];
ring              410 drivers/gpu/drm/radeon/r600_dma.c 		u32 next_rptr = ring->wptr + 4;
ring              414 drivers/gpu/drm/radeon/r600_dma.c 		radeon_ring_write(ring, DMA_PACKET(DMA_PACKET_WRITE, 0, 0, 1));
ring              415 drivers/gpu/drm/radeon/r600_dma.c 		radeon_ring_write(ring, ring->next_rptr_gpu_addr & 0xfffffffc);
ring              416 drivers/gpu/drm/radeon/r600_dma.c 		radeon_ring_write(ring, upper_32_bits(ring->next_rptr_gpu_addr) & 0xff);
ring              417 drivers/gpu/drm/radeon/r600_dma.c 		radeon_ring_write(ring, next_rptr);
ring              423 drivers/gpu/drm/radeon/r600_dma.c 	while ((ring->wptr & 7) != 5)
ring              424 drivers/gpu/drm/radeon/r600_dma.c 		radeon_ring_write(ring, DMA_PACKET(DMA_PACKET_NOP, 0, 0, 0));
ring              425 drivers/gpu/drm/radeon/r600_dma.c 	radeon_ring_write(ring, DMA_PACKET(DMA_PACKET_INDIRECT_BUFFER, 0, 0, 0));
ring              426 drivers/gpu/drm/radeon/r600_dma.c 	radeon_ring_write(ring, (ib->gpu_addr & 0xFFFFFFE0));
ring              427 drivers/gpu/drm/radeon/r600_dma.c 	radeon_ring_write(ring, (ib->length_dw << 16) | (upper_32_bits(ib->gpu_addr) & 0xFF));
ring              452 drivers/gpu/drm/radeon/r600_dma.c 	struct radeon_ring *ring = &rdev->ring[ring_index];
ring              461 drivers/gpu/drm/radeon/r600_dma.c 	r = radeon_ring_lock(rdev, ring, num_loops * 4 + 8);
ring              469 drivers/gpu/drm/radeon/r600_dma.c 	radeon_sync_rings(rdev, &sync, ring->idx);
ring              476 drivers/gpu/drm/radeon/r600_dma.c 		radeon_ring_write(ring, DMA_PACKET(DMA_PACKET_COPY, 0, 0, cur_size_in_dw));
ring              477 drivers/gpu/drm/radeon/r600_dma.c 		radeon_ring_write(ring, dst_offset & 0xfffffffc);
ring              478 drivers/gpu/drm/radeon/r600_dma.c 		radeon_ring_write(ring, src_offset & 0xfffffffc);
ring              479 drivers/gpu/drm/radeon/r600_dma.c 		radeon_ring_write(ring, (((upper_32_bits(dst_offset) & 0xff) << 16) |
ring              485 drivers/gpu/drm/radeon/r600_dma.c 	r = radeon_fence_emit(rdev, &fence, ring->idx);
ring              487 drivers/gpu/drm/radeon/r600_dma.c 		radeon_ring_unlock_undo(rdev, ring);
ring              492 drivers/gpu/drm/radeon/r600_dma.c 	radeon_ring_unlock_commit(rdev, ring, false);
ring              377 drivers/gpu/drm/radeon/radeon.h 	unsigned		ring;
ring              383 drivers/gpu/drm/radeon/radeon.h int radeon_fence_driver_start_ring(struct radeon_device *rdev, int ring);
ring              386 drivers/gpu/drm/radeon/radeon.h void radeon_fence_driver_force_completion(struct radeon_device *rdev, int ring);
ring              387 drivers/gpu/drm/radeon/radeon.h int radeon_fence_emit(struct radeon_device *rdev, struct radeon_fence **fence, int ring);
ring              388 drivers/gpu/drm/radeon/radeon.h void radeon_fence_process(struct radeon_device *rdev, int ring);
ring              392 drivers/gpu/drm/radeon/radeon.h int radeon_fence_wait_next(struct radeon_device *rdev, int ring);
ring              393 drivers/gpu/drm/radeon/radeon.h int radeon_fence_wait_empty(struct radeon_device *rdev, int ring);
ring              399 drivers/gpu/drm/radeon/radeon.h unsigned radeon_fence_count_emitted(struct radeon_device *rdev, int ring);
ring              400 drivers/gpu/drm/radeon/radeon.h bool radeon_fence_need_sync(struct radeon_fence *fence, int ring);
ring              401 drivers/gpu/drm/radeon/radeon.h void radeon_fence_note_sync(struct radeon_fence *fence, int ring);
ring              413 drivers/gpu/drm/radeon/radeon.h 	BUG_ON(a->ring != b->ring);
ring              433 drivers/gpu/drm/radeon/radeon.h 	BUG_ON(a->ring != b->ring);
ring              600 drivers/gpu/drm/radeon/radeon.h bool radeon_semaphore_emit_signal(struct radeon_device *rdev, int ring,
ring              602 drivers/gpu/drm/radeon/radeon.h bool radeon_semaphore_emit_wait(struct radeon_device *rdev, int ring,
ring              807 drivers/gpu/drm/radeon/radeon.h void radeon_irq_kms_sw_irq_get(struct radeon_device *rdev, int ring);
ring              808 drivers/gpu/drm/radeon/radeon.h bool radeon_irq_kms_sw_irq_get_delayed(struct radeon_device *rdev, int ring);
ring              809 drivers/gpu/drm/radeon/radeon.h void radeon_irq_kms_sw_irq_put(struct radeon_device *rdev, int ring);
ring              826 drivers/gpu/drm/radeon/radeon.h 	int				ring;
ring              835 drivers/gpu/drm/radeon/radeon.h 	volatile uint32_t	*ring;
ring              971 drivers/gpu/drm/radeon/radeon.h 	volatile uint32_t	*ring;
ring             1005 drivers/gpu/drm/radeon/radeon.h int radeon_ib_get(struct radeon_device *rdev, int ring,
ring             1016 drivers/gpu/drm/radeon/radeon.h 				      struct radeon_ring *ring);
ring             1024 drivers/gpu/drm/radeon/radeon.h void radeon_ring_undo(struct radeon_ring *ring);
ring             1028 drivers/gpu/drm/radeon/radeon.h 			       struct radeon_ring *ring);
ring             1029 drivers/gpu/drm/radeon/radeon.h bool radeon_ring_test_lockup(struct radeon_device *rdev, struct radeon_ring *ring);
ring             1030 drivers/gpu/drm/radeon/radeon.h unsigned radeon_ring_backup(struct radeon_device *rdev, struct radeon_ring *ring,
ring             1032 drivers/gpu/drm/radeon/radeon.h int radeon_ring_restore(struct radeon_device *rdev, struct radeon_ring *ring,
ring             1084 drivers/gpu/drm/radeon/radeon.h 	u32			ring;
ring             1684 drivers/gpu/drm/radeon/radeon.h int radeon_uvd_get_create_msg(struct radeon_device *rdev, int ring,
ring             1686 drivers/gpu/drm/radeon/radeon.h int radeon_uvd_get_destroy_msg(struct radeon_device *rdev, int ring,
ring             1727 drivers/gpu/drm/radeon/radeon.h int radeon_vce_get_create_msg(struct radeon_device *rdev, int ring,
ring             1729 drivers/gpu/drm/radeon/radeon.h int radeon_vce_get_destroy_msg(struct radeon_device *rdev, int ring,
ring             1736 drivers/gpu/drm/radeon/radeon.h 			       struct radeon_ring *ring,
ring             1742 drivers/gpu/drm/radeon/radeon.h int radeon_vce_ring_test(struct radeon_device *rdev, struct radeon_ring *ring);
ring             1743 drivers/gpu/drm/radeon/radeon.h int radeon_vce_ib_test(struct radeon_device *rdev, struct radeon_ring *ring);
ring             1812 drivers/gpu/drm/radeon/radeon.h 	u32 (*get_rptr)(struct radeon_device *rdev, struct radeon_ring *ring);
ring             1813 drivers/gpu/drm/radeon/radeon.h 	u32 (*get_wptr)(struct radeon_device *rdev, struct radeon_ring *ring);
ring             1814 drivers/gpu/drm/radeon/radeon.h 	void (*set_wptr)(struct radeon_device *rdev, struct radeon_ring *ring);
ring             1823 drivers/gpu/drm/radeon/radeon.h 	void (*hdp_flush)(struct radeon_device *rdev, struct radeon_ring *ring);
ring             1826 drivers/gpu/drm/radeon/radeon.h 	void (*vm_flush)(struct radeon_device *rdev, struct radeon_ring *ring,
ring             1887 drivers/gpu/drm/radeon/radeon.h 	const struct radeon_asic_ring *ring[RADEON_NUM_RINGS];
ring             2376 drivers/gpu/drm/radeon/radeon.h 	struct radeon_ring		ring[RADEON_NUM_RINGS];
ring             2685 drivers/gpu/drm/radeon/radeon.h static inline void radeon_ring_write(struct radeon_ring *ring, uint32_t v)
ring             2687 drivers/gpu/drm/radeon/radeon.h 	if (ring->count_dw <= 0)
ring             2690 drivers/gpu/drm/radeon/radeon.h 	ring->ring[ring->wptr++] = v;
ring             2691 drivers/gpu/drm/radeon/radeon.h 	ring->wptr &= ring->ptr_mask;
ring             2692 drivers/gpu/drm/radeon/radeon.h 	ring->count_dw--;
ring             2693 drivers/gpu/drm/radeon/radeon.h 	ring->ring_free_dw--;
ring             2703 drivers/gpu/drm/radeon/radeon.h #define radeon_cs_parse(rdev, r, p) (rdev)->asic->ring[(r)]->cs_parse((p))
ring             2715 drivers/gpu/drm/radeon/radeon.h #define radeon_ring_start(rdev, r, cp) (rdev)->asic->ring[(r)]->ring_start((rdev), (cp))
ring             2716 drivers/gpu/drm/radeon/radeon.h #define radeon_ring_test(rdev, r, cp) (rdev)->asic->ring[(r)]->ring_test((rdev), (cp))
ring             2717 drivers/gpu/drm/radeon/radeon.h #define radeon_ib_test(rdev, r, cp) (rdev)->asic->ring[(r)]->ib_test((rdev), (cp))
ring             2718 drivers/gpu/drm/radeon/radeon.h #define radeon_ring_ib_execute(rdev, r, ib) (rdev)->asic->ring[(r)]->ib_execute((rdev), (ib))
ring             2719 drivers/gpu/drm/radeon/radeon.h #define radeon_ring_ib_parse(rdev, r, ib) (rdev)->asic->ring[(r)]->ib_parse((rdev), (ib))
ring             2720 drivers/gpu/drm/radeon/radeon.h #define radeon_ring_is_lockup(rdev, r, cp) (rdev)->asic->ring[(r)]->is_lockup((rdev), (cp))
ring             2721 drivers/gpu/drm/radeon/radeon.h #define radeon_ring_vm_flush(rdev, r, vm_id, pd_addr) (rdev)->asic->ring[(r)->idx]->vm_flush((rdev), (r), (vm_id), (pd_addr))
ring             2722 drivers/gpu/drm/radeon/radeon.h #define radeon_ring_get_rptr(rdev, r) (rdev)->asic->ring[(r)->idx]->get_rptr((rdev), (r))
ring             2723 drivers/gpu/drm/radeon/radeon.h #define radeon_ring_get_wptr(rdev, r) (rdev)->asic->ring[(r)->idx]->get_wptr((rdev), (r))
ring             2724 drivers/gpu/drm/radeon/radeon.h #define radeon_ring_set_wptr(rdev, r) (rdev)->asic->ring[(r)->idx]->set_wptr((rdev), (r))
ring             2732 drivers/gpu/drm/radeon/radeon.h #define radeon_fence_ring_emit(rdev, r, fence) (rdev)->asic->ring[(r)]->emit_fence((rdev), (fence))
ring             2733 drivers/gpu/drm/radeon/radeon.h #define radeon_semaphore_ring_emit(rdev, r, cp, semaphore, emit_wait) (rdev)->asic->ring[(r)]->emit_semaphore((rdev), (cp), (semaphore), (emit_wait))
ring             2838 drivers/gpu/drm/radeon/radeon.h 				       struct radeon_vm *vm, int ring);
ring             2841 drivers/gpu/drm/radeon/radeon.h 		     int ring, struct radeon_fence *fence);
ring              214 drivers/gpu/drm/radeon/radeon_asic.c 	.ring = {
ring              282 drivers/gpu/drm/radeon/radeon_asic.c 	.ring = {
ring              378 drivers/gpu/drm/radeon/radeon_asic.c 	.ring = {
ring              446 drivers/gpu/drm/radeon/radeon_asic.c 	.ring = {
ring              514 drivers/gpu/drm/radeon/radeon_asic.c 	.ring = {
ring              582 drivers/gpu/drm/radeon/radeon_asic.c 	.ring = {
ring              650 drivers/gpu/drm/radeon/radeon_asic.c 	.ring = {
ring              718 drivers/gpu/drm/radeon/radeon_asic.c 	.ring = {
ring              786 drivers/gpu/drm/radeon/radeon_asic.c 	.ring = {
ring              854 drivers/gpu/drm/radeon/radeon_asic.c 	.ring = {
ring              950 drivers/gpu/drm/radeon/radeon_asic.c 	.ring = {
ring             1035 drivers/gpu/drm/radeon/radeon_asic.c 	.ring = {
ring             1128 drivers/gpu/drm/radeon/radeon_asic.c 	.ring = {
ring             1234 drivers/gpu/drm/radeon/radeon_asic.c 	.ring = {
ring             1354 drivers/gpu/drm/radeon/radeon_asic.c 	.ring = {
ring             1448 drivers/gpu/drm/radeon/radeon_asic.c 	.ring = {
ring             1541 drivers/gpu/drm/radeon/radeon_asic.c 	.ring = {
ring             1686 drivers/gpu/drm/radeon/radeon_asic.c 	.ring = {
ring             1804 drivers/gpu/drm/radeon/radeon_asic.c 	.ring = {
ring             1942 drivers/gpu/drm/radeon/radeon_asic.c 	.ring = {
ring             2112 drivers/gpu/drm/radeon/radeon_asic.c 	.ring = {
ring             2225 drivers/gpu/drm/radeon/radeon_asic.c 	.ring = {
ring               73 drivers/gpu/drm/radeon/radeon_asic.h void r100_ring_start(struct radeon_device *rdev, struct radeon_ring *ring);
ring              113 drivers/gpu/drm/radeon/radeon_asic.h int r100_ib_test(struct radeon_device *rdev, struct radeon_ring *ring);
ring              147 drivers/gpu/drm/radeon/radeon_asic.h 		      struct radeon_ring *ring);
ring              149 drivers/gpu/drm/radeon/radeon_asic.h 		      struct radeon_ring *ring);
ring              151 drivers/gpu/drm/radeon/radeon_asic.h 		       struct radeon_ring *ring);
ring              171 drivers/gpu/drm/radeon/radeon_asic.h extern void r300_ring_start(struct radeon_device *rdev, struct radeon_ring *ring);
ring              287 drivers/gpu/drm/radeon/radeon_asic.h void rv515_ring_start(struct radeon_device *rdev, struct radeon_ring *ring);
ring              331 drivers/gpu/drm/radeon/radeon_asic.h 				  struct radeon_ring *ring,
ring              335 drivers/gpu/drm/radeon/radeon_asic.h bool r600_dma_is_lockup(struct radeon_device *rdev, struct radeon_ring *ring);
ring              342 drivers/gpu/drm/radeon/radeon_asic.h int r600_ib_test(struct radeon_device *rdev, struct radeon_ring *ring);
ring              343 drivers/gpu/drm/radeon/radeon_asic.h int r600_dma_ib_test(struct radeon_device *rdev, struct radeon_ring *ring);
ring              382 drivers/gpu/drm/radeon/radeon_asic.h 		      struct radeon_ring *ring);
ring              384 drivers/gpu/drm/radeon/radeon_asic.h 		      struct radeon_ring *ring);
ring              386 drivers/gpu/drm/radeon/radeon_asic.h 		       struct radeon_ring *ring);
ring              417 drivers/gpu/drm/radeon/radeon_asic.h 			   struct radeon_ring *ring);
ring              419 drivers/gpu/drm/radeon/radeon_asic.h 			   struct radeon_ring *ring);
ring              421 drivers/gpu/drm/radeon/radeon_asic.h 		       struct radeon_ring *ring);
ring              614 drivers/gpu/drm/radeon/radeon_asic.h void cayman_vm_flush(struct radeon_device *rdev, struct radeon_ring *ring,
ring              621 drivers/gpu/drm/radeon/radeon_asic.h bool cayman_gfx_is_lockup(struct radeon_device *rdev, struct radeon_ring *ring);
ring              622 drivers/gpu/drm/radeon/radeon_asic.h bool cayman_dma_is_lockup(struct radeon_device *rdev, struct radeon_ring *ring);
ring              640 drivers/gpu/drm/radeon/radeon_asic.h void cayman_dma_vm_flush(struct radeon_device *rdev, struct radeon_ring *ring,
ring              644 drivers/gpu/drm/radeon/radeon_asic.h 			struct radeon_ring *ring);
ring              646 drivers/gpu/drm/radeon/radeon_asic.h 			struct radeon_ring *ring);
ring              648 drivers/gpu/drm/radeon/radeon_asic.h 			 struct radeon_ring *ring);
ring              650 drivers/gpu/drm/radeon/radeon_asic.h 			     struct radeon_ring *ring);
ring              652 drivers/gpu/drm/radeon/radeon_asic.h 			     struct radeon_ring *ring);
ring              654 drivers/gpu/drm/radeon/radeon_asic.h 			 struct radeon_ring *ring);
ring              722 drivers/gpu/drm/radeon/radeon_asic.h void si_vm_flush(struct radeon_device *rdev, struct radeon_ring *ring,
ring              745 drivers/gpu/drm/radeon/radeon_asic.h void si_dma_vm_flush(struct radeon_device *rdev, struct radeon_ring *ring,
ring              792 drivers/gpu/drm/radeon/radeon_asic.h 				  struct radeon_ring *ring,
ring              804 drivers/gpu/drm/radeon/radeon_asic.h int cik_sdma_ring_test(struct radeon_device *rdev, struct radeon_ring *ring);
ring              805 drivers/gpu/drm/radeon/radeon_asic.h int cik_sdma_ib_test(struct radeon_device *rdev, struct radeon_ring *ring);
ring              806 drivers/gpu/drm/radeon/radeon_asic.h bool cik_sdma_is_lockup(struct radeon_device *rdev, struct radeon_ring *ring);
ring              823 drivers/gpu/drm/radeon/radeon_asic.h int cik_ring_test(struct radeon_device *rdev, struct radeon_ring *ring);
ring              824 drivers/gpu/drm/radeon/radeon_asic.h int cik_ib_test(struct radeon_device *rdev, struct radeon_ring *ring);
ring              829 drivers/gpu/drm/radeon/radeon_asic.h void cik_vm_flush(struct radeon_device *rdev, struct radeon_ring *ring,
ring              848 drivers/gpu/drm/radeon/radeon_asic.h void cik_dma_vm_flush(struct radeon_device *rdev, struct radeon_ring *ring,
ring              852 drivers/gpu/drm/radeon/radeon_asic.h 		     struct radeon_ring *ring);
ring              854 drivers/gpu/drm/radeon/radeon_asic.h 		     struct radeon_ring *ring);
ring              856 drivers/gpu/drm/radeon/radeon_asic.h 		      struct radeon_ring *ring);
ring              858 drivers/gpu/drm/radeon/radeon_asic.h 			 struct radeon_ring *ring);
ring              860 drivers/gpu/drm/radeon/radeon_asic.h 			 struct radeon_ring *ring);
ring              862 drivers/gpu/drm/radeon/radeon_asic.h 			  struct radeon_ring *ring);
ring              864 drivers/gpu/drm/radeon/radeon_asic.h 		      struct radeon_ring *ring);
ring              866 drivers/gpu/drm/radeon/radeon_asic.h 		      struct radeon_ring *ring);
ring              868 drivers/gpu/drm/radeon/radeon_asic.h 		       struct radeon_ring *ring);
ring              929 drivers/gpu/drm/radeon/radeon_asic.h                            struct radeon_ring *ring);
ring              931 drivers/gpu/drm/radeon/radeon_asic.h                            struct radeon_ring *ring);
ring              933 drivers/gpu/drm/radeon/radeon_asic.h                        struct radeon_ring *ring);
ring              941 drivers/gpu/drm/radeon/radeon_asic.h int uvd_v1_0_ring_test(struct radeon_device *rdev, struct radeon_ring *ring);
ring              944 drivers/gpu/drm/radeon/radeon_asic.h int uvd_v1_0_ib_test(struct radeon_device *rdev, struct radeon_ring *ring);
ring              946 drivers/gpu/drm/radeon/radeon_asic.h 			     struct radeon_ring *ring,
ring              956 drivers/gpu/drm/radeon/radeon_asic.h 			     struct radeon_ring *ring,
ring              962 drivers/gpu/drm/radeon/radeon_asic.h 			     struct radeon_ring *ring,
ring              971 drivers/gpu/drm/radeon/radeon_asic.h 			   struct radeon_ring *ring);
ring              973 drivers/gpu/drm/radeon/radeon_asic.h 			   struct radeon_ring *ring);
ring              975 drivers/gpu/drm/radeon/radeon_asic.h 		       struct radeon_ring *ring);
ring              132 drivers/gpu/drm/radeon/radeon_cs.c 		if (p->ring == R600_RING_TYPE_UVD_INDEX &&
ring              201 drivers/gpu/drm/radeon/radeon_cs.c 	r = radeon_bo_list_validate(p->rdev, &p->ticket, &p->validated, p->ring);
ring              209 drivers/gpu/drm/radeon/radeon_cs.c static int radeon_cs_get_ring(struct radeon_cs_parser *p, u32 ring, s32 priority)
ring              213 drivers/gpu/drm/radeon/radeon_cs.c 	switch (ring) {
ring              215 drivers/gpu/drm/radeon/radeon_cs.c 		DRM_ERROR("unknown ring id: %d\n", ring);
ring              218 drivers/gpu/drm/radeon/radeon_cs.c 		p->ring = RADEON_RING_TYPE_GFX_INDEX;
ring              223 drivers/gpu/drm/radeon/radeon_cs.c 				p->ring = CAYMAN_RING_TYPE_CP1_INDEX;
ring              225 drivers/gpu/drm/radeon/radeon_cs.c 				p->ring = CAYMAN_RING_TYPE_CP2_INDEX;
ring              227 drivers/gpu/drm/radeon/radeon_cs.c 			p->ring = RADEON_RING_TYPE_GFX_INDEX;
ring              232 drivers/gpu/drm/radeon/radeon_cs.c 				p->ring = R600_RING_TYPE_DMA_INDEX;
ring              234 drivers/gpu/drm/radeon/radeon_cs.c 				p->ring = CAYMAN_RING_TYPE_DMA1_INDEX;
ring              236 drivers/gpu/drm/radeon/radeon_cs.c 			p->ring = R600_RING_TYPE_DMA_INDEX;
ring              242 drivers/gpu/drm/radeon/radeon_cs.c 		p->ring = R600_RING_TYPE_UVD_INDEX;
ring              246 drivers/gpu/drm/radeon/radeon_cs.c 		p->ring = TN_RING_TYPE_VCE1_INDEX;
ring              275 drivers/gpu/drm/radeon/radeon_cs.c 	u32 ring = RADEON_CS_RING_GFX;
ring              362 drivers/gpu/drm/radeon/radeon_cs.c 				ring = p->chunks[i].kdata[1];
ring              376 drivers/gpu/drm/radeon/radeon_cs.c 		if (radeon_cs_get_ring(p, ring, priority))
ring              381 drivers/gpu/drm/radeon/radeon_cs.c 			if (p->rdev->asic->ring[p->ring]->cs_parse == NULL) {
ring              382 drivers/gpu/drm/radeon/radeon_cs.c 				DRM_ERROR("Ring %d requires VM!\n", p->ring);
ring              386 drivers/gpu/drm/radeon/radeon_cs.c 			if (p->rdev->asic->ring[p->ring]->ib_parse == NULL) {
ring              388 drivers/gpu/drm/radeon/radeon_cs.c 					  p->ring);
ring              471 drivers/gpu/drm/radeon/radeon_cs.c 	r = radeon_cs_parse(rdev, parser->ring, parser);
ring              484 drivers/gpu/drm/radeon/radeon_cs.c 	if (parser->ring == R600_RING_TYPE_UVD_INDEX)
ring              486 drivers/gpu/drm/radeon/radeon_cs.c 	else if ((parser->ring == TN_RING_TYPE_VCE1_INDEX) ||
ring              487 drivers/gpu/drm/radeon/radeon_cs.c 		 (parser->ring == TN_RING_TYPE_VCE2_INDEX))
ring              555 drivers/gpu/drm/radeon/radeon_cs.c 		r = radeon_ring_ib_parse(rdev, parser->ring, &parser->const_ib);
ring              561 drivers/gpu/drm/radeon/radeon_cs.c 	r = radeon_ring_ib_parse(rdev, parser->ring, &parser->ib);
ring              566 drivers/gpu/drm/radeon/radeon_cs.c 	if (parser->ring == R600_RING_TYPE_UVD_INDEX)
ring              624 drivers/gpu/drm/radeon/radeon_cs.c 			r =  radeon_ib_get(rdev, parser->ring, &parser->const_ib,
ring              646 drivers/gpu/drm/radeon/radeon_cs.c 	r =  radeon_ib_get(rdev, parser->ring, &parser->ib,
ring             1308 drivers/gpu/drm/radeon/radeon_device.c 		rdev->ring[i].idx = i;
ring             1806 drivers/gpu/drm/radeon/radeon_device.c 		ring_sizes[i] = radeon_ring_backup(rdev, &rdev->ring[i],
ring             1825 drivers/gpu/drm/radeon/radeon_device.c 			radeon_ring_restore(rdev, &rdev->ring[i],
ring               68 drivers/gpu/drm/radeon/radeon_fence.c static void radeon_fence_write(struct radeon_device *rdev, u32 seq, int ring)
ring               70 drivers/gpu/drm/radeon/radeon_fence.c 	struct radeon_fence_driver *drv = &rdev->fence_drv[ring];
ring               89 drivers/gpu/drm/radeon/radeon_fence.c static u32 radeon_fence_read(struct radeon_device *rdev, int ring)
ring               91 drivers/gpu/drm/radeon/radeon_fence.c 	struct radeon_fence_driver *drv = &rdev->fence_drv[ring];
ring              114 drivers/gpu/drm/radeon/radeon_fence.c static void radeon_fence_schedule_check(struct radeon_device *rdev, int ring)
ring              121 drivers/gpu/drm/radeon/radeon_fence.c 			   &rdev->fence_drv[ring].lockup_work,
ring              137 drivers/gpu/drm/radeon/radeon_fence.c 		      int ring)
ring              147 drivers/gpu/drm/radeon/radeon_fence.c 	(*fence)->seq = seq = ++rdev->fence_drv[ring].sync_seq[ring];
ring              148 drivers/gpu/drm/radeon/radeon_fence.c 	(*fence)->ring = ring;
ring              152 drivers/gpu/drm/radeon/radeon_fence.c 		       rdev->fence_context + ring,
ring              154 drivers/gpu/drm/radeon/radeon_fence.c 	radeon_fence_ring_emit(rdev, ring, *fence);
ring              155 drivers/gpu/drm/radeon/radeon_fence.c 	trace_radeon_fence_emit(rdev->ddev, ring, (*fence)->seq);
ring              156 drivers/gpu/drm/radeon/radeon_fence.c 	radeon_fence_schedule_check(rdev, ring);
ring              178 drivers/gpu/drm/radeon/radeon_fence.c 	seq = atomic64_read(&fence->rdev->fence_drv[fence->ring].last_seq);
ring              187 drivers/gpu/drm/radeon/radeon_fence.c 		radeon_irq_kms_sw_irq_put(fence->rdev, fence->ring);
ring              205 drivers/gpu/drm/radeon/radeon_fence.c static bool radeon_fence_activity(struct radeon_device *rdev, int ring)
ring              232 drivers/gpu/drm/radeon/radeon_fence.c 	last_seq = atomic64_read(&rdev->fence_drv[ring].last_seq);
ring              234 drivers/gpu/drm/radeon/radeon_fence.c 		last_emitted = rdev->fence_drv[ring].sync_seq[ring];
ring              235 drivers/gpu/drm/radeon/radeon_fence.c 		seq = radeon_fence_read(rdev, ring);
ring              259 drivers/gpu/drm/radeon/radeon_fence.c 	} while (atomic64_xchg(&rdev->fence_drv[ring].last_seq, seq) > seq);
ring              262 drivers/gpu/drm/radeon/radeon_fence.c 		radeon_fence_schedule_check(rdev, ring);
ring              279 drivers/gpu/drm/radeon/radeon_fence.c 	int ring;
ring              284 drivers/gpu/drm/radeon/radeon_fence.c 	ring = fence_drv - &rdev->fence_drv[0];
ring              288 drivers/gpu/drm/radeon/radeon_fence.c 		radeon_fence_schedule_check(rdev, ring);
ring              301 drivers/gpu/drm/radeon/radeon_fence.c 	if (radeon_fence_activity(rdev, ring))
ring              304 drivers/gpu/drm/radeon/radeon_fence.c 	else if (radeon_ring_is_lockup(rdev, ring, &rdev->ring[ring])) {
ring              310 drivers/gpu/drm/radeon/radeon_fence.c 			 fence_drv->sync_seq[ring], ring);
ring              328 drivers/gpu/drm/radeon/radeon_fence.c void radeon_fence_process(struct radeon_device *rdev, int ring)
ring              330 drivers/gpu/drm/radeon/radeon_fence.c 	if (radeon_fence_activity(rdev, ring))
ring              349 drivers/gpu/drm/radeon/radeon_fence.c 				      u64 seq, unsigned ring)
ring              351 drivers/gpu/drm/radeon/radeon_fence.c 	if (atomic64_read(&rdev->fence_drv[ring].last_seq) >= seq) {
ring              355 drivers/gpu/drm/radeon/radeon_fence.c 	radeon_fence_process(rdev, ring);
ring              356 drivers/gpu/drm/radeon/radeon_fence.c 	if (atomic64_read(&rdev->fence_drv[ring].last_seq) >= seq) {
ring              366 drivers/gpu/drm/radeon/radeon_fence.c 	unsigned ring = fence->ring;
ring              369 drivers/gpu/drm/radeon/radeon_fence.c 	if (atomic64_read(&rdev->fence_drv[ring].last_seq) >= seq) {
ring              374 drivers/gpu/drm/radeon/radeon_fence.c 		radeon_fence_process(rdev, ring);
ring              377 drivers/gpu/drm/radeon/radeon_fence.c 		if (atomic64_read(&rdev->fence_drv[ring].last_seq) >= seq) {
ring              397 drivers/gpu/drm/radeon/radeon_fence.c 	if (atomic64_read(&rdev->fence_drv[fence->ring].last_seq) >= fence->seq)
ring              401 drivers/gpu/drm/radeon/radeon_fence.c 		radeon_irq_kms_sw_irq_get(rdev, fence->ring);
ring              403 drivers/gpu/drm/radeon/radeon_fence.c 		if (radeon_fence_activity(rdev, fence->ring))
ring              407 drivers/gpu/drm/radeon/radeon_fence.c 		if (atomic64_read(&rdev->fence_drv[fence->ring].last_seq) >= fence->seq) {
ring              408 drivers/gpu/drm/radeon/radeon_fence.c 			radeon_irq_kms_sw_irq_put(rdev, fence->ring);
ring              416 drivers/gpu/drm/radeon/radeon_fence.c 		if (radeon_irq_kms_sw_irq_get_delayed(rdev, fence->ring))
ring              417 drivers/gpu/drm/radeon/radeon_fence.c 			rdev->fence_drv[fence->ring].delayed_irq = true;
ring              418 drivers/gpu/drm/radeon/radeon_fence.c 		radeon_fence_schedule_check(rdev, fence->ring);
ring              427 drivers/gpu/drm/radeon/radeon_fence.c 	DMA_FENCE_TRACE(&fence->base, "armed on ring %i!\n", fence->ring);
ring              444 drivers/gpu/drm/radeon/radeon_fence.c 	if (radeon_fence_seq_signaled(fence->rdev, fence->seq, fence->ring)) {
ring              565 drivers/gpu/drm/radeon/radeon_fence.c 	seq[fence->ring] = fence->seq;
ring              651 drivers/gpu/drm/radeon/radeon_fence.c int radeon_fence_wait_next(struct radeon_device *rdev, int ring)
ring              656 drivers/gpu/drm/radeon/radeon_fence.c 	seq[ring] = atomic64_read(&rdev->fence_drv[ring].last_seq) + 1ULL;
ring              657 drivers/gpu/drm/radeon/radeon_fence.c 	if (seq[ring] >= rdev->fence_drv[ring].sync_seq[ring]) {
ring              678 drivers/gpu/drm/radeon/radeon_fence.c int radeon_fence_wait_empty(struct radeon_device *rdev, int ring)
ring              683 drivers/gpu/drm/radeon/radeon_fence.c 	seq[ring] = rdev->fence_drv[ring].sync_seq[ring];
ring              684 drivers/gpu/drm/radeon/radeon_fence.c 	if (!seq[ring])
ring              693 drivers/gpu/drm/radeon/radeon_fence.c 			ring, r);
ring              739 drivers/gpu/drm/radeon/radeon_fence.c unsigned radeon_fence_count_emitted(struct radeon_device *rdev, int ring)
ring              746 drivers/gpu/drm/radeon/radeon_fence.c 	radeon_fence_process(rdev, ring);
ring              747 drivers/gpu/drm/radeon/radeon_fence.c 	emitted = rdev->fence_drv[ring].sync_seq[ring]
ring              748 drivers/gpu/drm/radeon/radeon_fence.c 		- atomic64_read(&rdev->fence_drv[ring].last_seq);
ring              775 drivers/gpu/drm/radeon/radeon_fence.c 	if (fence->ring == dst_ring) {
ring              781 drivers/gpu/drm/radeon/radeon_fence.c 	if (fence->seq <= fdrv->sync_seq[fence->ring]) {
ring              806 drivers/gpu/drm/radeon/radeon_fence.c 	if (fence->ring == dst_ring) {
ring              811 drivers/gpu/drm/radeon/radeon_fence.c 	src = &fence->rdev->fence_drv[fence->ring];
ring              833 drivers/gpu/drm/radeon/radeon_fence.c int radeon_fence_driver_start_ring(struct radeon_device *rdev, int ring)
ring              838 drivers/gpu/drm/radeon/radeon_fence.c 	radeon_scratch_free(rdev, rdev->fence_drv[ring].scratch_reg);
ring              839 drivers/gpu/drm/radeon/radeon_fence.c 	if (rdev->wb.use_event || !radeon_ring_supports_scratch_reg(rdev, &rdev->ring[ring])) {
ring              840 drivers/gpu/drm/radeon/radeon_fence.c 		rdev->fence_drv[ring].scratch_reg = 0;
ring              841 drivers/gpu/drm/radeon/radeon_fence.c 		if (ring != R600_RING_TYPE_UVD_INDEX) {
ring              842 drivers/gpu/drm/radeon/radeon_fence.c 			index = R600_WB_EVENT_OFFSET + ring * 4;
ring              843 drivers/gpu/drm/radeon/radeon_fence.c 			rdev->fence_drv[ring].cpu_addr = &rdev->wb.wb[index/4];
ring              844 drivers/gpu/drm/radeon/radeon_fence.c 			rdev->fence_drv[ring].gpu_addr = rdev->wb.gpu_addr +
ring              850 drivers/gpu/drm/radeon/radeon_fence.c 			rdev->fence_drv[ring].cpu_addr = rdev->uvd.cpu_addr + index;
ring              851 drivers/gpu/drm/radeon/radeon_fence.c 			rdev->fence_drv[ring].gpu_addr = rdev->uvd.gpu_addr + index;
ring              855 drivers/gpu/drm/radeon/radeon_fence.c 		r = radeon_scratch_get(rdev, &rdev->fence_drv[ring].scratch_reg);
ring              861 drivers/gpu/drm/radeon/radeon_fence.c 			rdev->fence_drv[ring].scratch_reg -
ring              863 drivers/gpu/drm/radeon/radeon_fence.c 		rdev->fence_drv[ring].cpu_addr = &rdev->wb.wb[index/4];
ring              864 drivers/gpu/drm/radeon/radeon_fence.c 		rdev->fence_drv[ring].gpu_addr = rdev->wb.gpu_addr + index;
ring              866 drivers/gpu/drm/radeon/radeon_fence.c 	radeon_fence_write(rdev, atomic64_read(&rdev->fence_drv[ring].last_seq), ring);
ring              867 drivers/gpu/drm/radeon/radeon_fence.c 	rdev->fence_drv[ring].initialized = true;
ring              869 drivers/gpu/drm/radeon/radeon_fence.c 		 ring, rdev->fence_drv[ring].gpu_addr, rdev->fence_drv[ring].cpu_addr);
ring              883 drivers/gpu/drm/radeon/radeon_fence.c static void radeon_fence_driver_init_ring(struct radeon_device *rdev, int ring)
ring              887 drivers/gpu/drm/radeon/radeon_fence.c 	rdev->fence_drv[ring].scratch_reg = -1;
ring              888 drivers/gpu/drm/radeon/radeon_fence.c 	rdev->fence_drv[ring].cpu_addr = NULL;
ring              889 drivers/gpu/drm/radeon/radeon_fence.c 	rdev->fence_drv[ring].gpu_addr = 0;
ring              891 drivers/gpu/drm/radeon/radeon_fence.c 		rdev->fence_drv[ring].sync_seq[i] = 0;
ring              892 drivers/gpu/drm/radeon/radeon_fence.c 	atomic64_set(&rdev->fence_drv[ring].last_seq, 0);
ring              893 drivers/gpu/drm/radeon/radeon_fence.c 	rdev->fence_drv[ring].initialized = false;
ring              894 drivers/gpu/drm/radeon/radeon_fence.c 	INIT_DELAYED_WORK(&rdev->fence_drv[ring].lockup_work,
ring              896 drivers/gpu/drm/radeon/radeon_fence.c 	rdev->fence_drv[ring].rdev = rdev;
ring              913 drivers/gpu/drm/radeon/radeon_fence.c 	int ring;
ring              916 drivers/gpu/drm/radeon/radeon_fence.c 	for (ring = 0; ring < RADEON_NUM_RINGS; ring++) {
ring              917 drivers/gpu/drm/radeon/radeon_fence.c 		radeon_fence_driver_init_ring(rdev, ring);
ring              935 drivers/gpu/drm/radeon/radeon_fence.c 	int ring, r;
ring              938 drivers/gpu/drm/radeon/radeon_fence.c 	for (ring = 0; ring < RADEON_NUM_RINGS; ring++) {
ring              939 drivers/gpu/drm/radeon/radeon_fence.c 		if (!rdev->fence_drv[ring].initialized)
ring              941 drivers/gpu/drm/radeon/radeon_fence.c 		r = radeon_fence_wait_empty(rdev, ring);
ring              944 drivers/gpu/drm/radeon/radeon_fence.c 			radeon_fence_driver_force_completion(rdev, ring);
ring              946 drivers/gpu/drm/radeon/radeon_fence.c 		cancel_delayed_work_sync(&rdev->fence_drv[ring].lockup_work);
ring              948 drivers/gpu/drm/radeon/radeon_fence.c 		radeon_scratch_free(rdev, rdev->fence_drv[ring].scratch_reg);
ring              949 drivers/gpu/drm/radeon/radeon_fence.c 		rdev->fence_drv[ring].initialized = false;
ring              963 drivers/gpu/drm/radeon/radeon_fence.c void radeon_fence_driver_force_completion(struct radeon_device *rdev, int ring)
ring              965 drivers/gpu/drm/radeon/radeon_fence.c 	if (rdev->fence_drv[ring].initialized) {
ring              966 drivers/gpu/drm/radeon/radeon_fence.c 		radeon_fence_write(rdev, rdev->fence_drv[ring].sync_seq[ring], ring);
ring              967 drivers/gpu/drm/radeon/radeon_fence.c 		cancel_delayed_work_sync(&rdev->fence_drv[ring].lockup_work);
ring             1047 drivers/gpu/drm/radeon/radeon_fence.c 	switch (fence->ring) {
ring               58 drivers/gpu/drm/radeon/radeon_ib.c int radeon_ib_get(struct radeon_device *rdev, int ring,
ring               72 drivers/gpu/drm/radeon/radeon_ib.c 	ib->ring = ring;
ring              128 drivers/gpu/drm/radeon/radeon_ib.c 	struct radeon_ring *ring = &rdev->ring[ib->ring];
ring              131 drivers/gpu/drm/radeon/radeon_ib.c 	if (!ib->length_dw || !ring->ready) {
ring              138 drivers/gpu/drm/radeon/radeon_ib.c 	r = radeon_ring_lock(rdev, ring, 64 + RADEON_NUM_SYNCS * 8);
ring              147 drivers/gpu/drm/radeon/radeon_ib.c 		vm_id_fence = radeon_vm_grab_id(rdev, ib->vm, ib->ring);
ring              152 drivers/gpu/drm/radeon/radeon_ib.c 	r = radeon_sync_rings(rdev, &ib->sync, ib->ring);
ring              155 drivers/gpu/drm/radeon/radeon_ib.c 		radeon_ring_unlock_undo(rdev, ring);
ring              160 drivers/gpu/drm/radeon/radeon_ib.c 		radeon_vm_flush(rdev, ib->vm, ib->ring,
ring              164 drivers/gpu/drm/radeon/radeon_ib.c 		radeon_ring_ib_execute(rdev, const_ib->ring, const_ib);
ring              167 drivers/gpu/drm/radeon/radeon_ib.c 	radeon_ring_ib_execute(rdev, ib->ring, ib);
ring              168 drivers/gpu/drm/radeon/radeon_ib.c 	r = radeon_fence_emit(rdev, &ib->fence, ib->ring);
ring              171 drivers/gpu/drm/radeon/radeon_ib.c 		radeon_ring_unlock_undo(rdev, ring);
ring              181 drivers/gpu/drm/radeon/radeon_ib.c 	radeon_ring_unlock_commit(rdev, ring, hdp_flush);
ring              266 drivers/gpu/drm/radeon/radeon_ib.c 		struct radeon_ring *ring = &rdev->ring[i];
ring              268 drivers/gpu/drm/radeon/radeon_ib.c 		if (!ring->ready)
ring              271 drivers/gpu/drm/radeon/radeon_ib.c 		r = radeon_ib_test(rdev, i, ring);
ring              274 drivers/gpu/drm/radeon/radeon_ib.c 			ring->ready = false;
ring              357 drivers/gpu/drm/radeon/radeon_irq_kms.c void radeon_irq_kms_sw_irq_get(struct radeon_device *rdev, int ring)
ring              364 drivers/gpu/drm/radeon/radeon_irq_kms.c 	if (atomic_inc_return(&rdev->irq.ring_int[ring]) == 1) {
ring              381 drivers/gpu/drm/radeon/radeon_irq_kms.c bool radeon_irq_kms_sw_irq_get_delayed(struct radeon_device *rdev, int ring)
ring              383 drivers/gpu/drm/radeon/radeon_irq_kms.c 	return atomic_inc_return(&rdev->irq.ring_int[ring]) == 1;
ring              396 drivers/gpu/drm/radeon/radeon_irq_kms.c void radeon_irq_kms_sw_irq_put(struct radeon_device *rdev, int ring)
ring              403 drivers/gpu/drm/radeon/radeon_irq_kms.c 	if (atomic_dec_and_test(&rdev->irq.ring_int[ring])) {
ring              468 drivers/gpu/drm/radeon/radeon_kms.c 			*value = rdev->ring[RADEON_RING_TYPE_GFX_INDEX].ready;
ring              471 drivers/gpu/drm/radeon/radeon_kms.c 			*value = rdev->ring[R600_RING_TYPE_DMA_INDEX].ready;
ring              472 drivers/gpu/drm/radeon/radeon_kms.c 			*value |= rdev->ring[CAYMAN_RING_TYPE_DMA1_INDEX].ready;
ring              475 drivers/gpu/drm/radeon/radeon_kms.c 			*value = rdev->ring[R600_RING_TYPE_UVD_INDEX].ready;
ring              478 drivers/gpu/drm/radeon/radeon_kms.c 			*value = rdev->ring[TN_RING_TYPE_VCE1_INDEX].ready;
ring              535 drivers/gpu/drm/radeon/radeon_object.c 			    struct list_head *head, int ring)
ring              575 drivers/gpu/drm/radeon/radeon_object.c 			if (ring == R600_RING_TYPE_UVD_INDEX)
ring              145 drivers/gpu/drm/radeon/radeon_object.h 				   struct list_head *head, int ring);
ring              267 drivers/gpu/drm/radeon/radeon_pm.c 		struct radeon_ring *ring = &rdev->ring[i];
ring              268 drivers/gpu/drm/radeon/radeon_pm.c 		if (!ring->ready) {
ring             1097 drivers/gpu/drm/radeon/radeon_pm.c 		struct radeon_ring *ring = &rdev->ring[i];
ring             1098 drivers/gpu/drm/radeon/radeon_pm.c 		if (ring->ready)
ring             1812 drivers/gpu/drm/radeon/radeon_pm.c 			struct radeon_ring *ring = &rdev->ring[i];
ring             1814 drivers/gpu/drm/radeon/radeon_pm.c 			if (ring->ready) {
ring               49 drivers/gpu/drm/radeon/radeon_ring.c static int radeon_debugfs_ring_init(struct radeon_device *rdev, struct radeon_ring *ring);
ring               62 drivers/gpu/drm/radeon/radeon_ring.c 				      struct radeon_ring *ring)
ring               64 drivers/gpu/drm/radeon/radeon_ring.c 	switch (ring->idx) {
ring               82 drivers/gpu/drm/radeon/radeon_ring.c void radeon_ring_free_size(struct radeon_device *rdev, struct radeon_ring *ring)
ring               84 drivers/gpu/drm/radeon/radeon_ring.c 	uint32_t rptr = radeon_ring_get_rptr(rdev, ring);
ring               87 drivers/gpu/drm/radeon/radeon_ring.c 	ring->ring_free_dw = rptr + (ring->ring_size / 4);
ring               88 drivers/gpu/drm/radeon/radeon_ring.c 	ring->ring_free_dw -= ring->wptr;
ring               89 drivers/gpu/drm/radeon/radeon_ring.c 	ring->ring_free_dw &= ring->ptr_mask;
ring               90 drivers/gpu/drm/radeon/radeon_ring.c 	if (!ring->ring_free_dw) {
ring               92 drivers/gpu/drm/radeon/radeon_ring.c 		ring->ring_free_dw = ring->ring_size / 4;
ring               94 drivers/gpu/drm/radeon/radeon_ring.c 		radeon_ring_lockup_update(rdev, ring);
ring              108 drivers/gpu/drm/radeon/radeon_ring.c int radeon_ring_alloc(struct radeon_device *rdev, struct radeon_ring *ring, unsigned ndw)
ring              113 drivers/gpu/drm/radeon/radeon_ring.c 	if (ndw > (ring->ring_size / 4))
ring              117 drivers/gpu/drm/radeon/radeon_ring.c 	radeon_ring_free_size(rdev, ring);
ring              118 drivers/gpu/drm/radeon/radeon_ring.c 	ndw = (ndw + ring->align_mask) & ~ring->align_mask;
ring              119 drivers/gpu/drm/radeon/radeon_ring.c 	while (ndw > (ring->ring_free_dw - 1)) {
ring              120 drivers/gpu/drm/radeon/radeon_ring.c 		radeon_ring_free_size(rdev, ring);
ring              121 drivers/gpu/drm/radeon/radeon_ring.c 		if (ndw < ring->ring_free_dw) {
ring              124 drivers/gpu/drm/radeon/radeon_ring.c 		r = radeon_fence_wait_next(rdev, ring->idx);
ring              128 drivers/gpu/drm/radeon/radeon_ring.c 	ring->count_dw = ndw;
ring              129 drivers/gpu/drm/radeon/radeon_ring.c 	ring->wptr_old = ring->wptr;
ring              144 drivers/gpu/drm/radeon/radeon_ring.c int radeon_ring_lock(struct radeon_device *rdev, struct radeon_ring *ring, unsigned ndw)
ring              149 drivers/gpu/drm/radeon/radeon_ring.c 	r = radeon_ring_alloc(rdev, ring, ndw);
ring              168 drivers/gpu/drm/radeon/radeon_ring.c void radeon_ring_commit(struct radeon_device *rdev, struct radeon_ring *ring,
ring              174 drivers/gpu/drm/radeon/radeon_ring.c 	if (hdp_flush && rdev->asic->ring[ring->idx]->hdp_flush)
ring              175 drivers/gpu/drm/radeon/radeon_ring.c 		rdev->asic->ring[ring->idx]->hdp_flush(rdev, ring);
ring              177 drivers/gpu/drm/radeon/radeon_ring.c 	while (ring->wptr & ring->align_mask) {
ring              178 drivers/gpu/drm/radeon/radeon_ring.c 		radeon_ring_write(ring, ring->nop);
ring              186 drivers/gpu/drm/radeon/radeon_ring.c 	radeon_ring_set_wptr(rdev, ring);
ring              199 drivers/gpu/drm/radeon/radeon_ring.c void radeon_ring_unlock_commit(struct radeon_device *rdev, struct radeon_ring *ring,
ring              202 drivers/gpu/drm/radeon/radeon_ring.c 	radeon_ring_commit(rdev, ring, hdp_flush);
ring              213 drivers/gpu/drm/radeon/radeon_ring.c void radeon_ring_undo(struct radeon_ring *ring)
ring              215 drivers/gpu/drm/radeon/radeon_ring.c 	ring->wptr = ring->wptr_old;
ring              225 drivers/gpu/drm/radeon/radeon_ring.c void radeon_ring_unlock_undo(struct radeon_device *rdev, struct radeon_ring *ring)
ring              227 drivers/gpu/drm/radeon/radeon_ring.c 	radeon_ring_undo(ring);
ring              239 drivers/gpu/drm/radeon/radeon_ring.c 			       struct radeon_ring *ring)
ring              241 drivers/gpu/drm/radeon/radeon_ring.c 	atomic_set(&ring->last_rptr, radeon_ring_get_rptr(rdev, ring));
ring              242 drivers/gpu/drm/radeon/radeon_ring.c 	atomic64_set(&ring->last_activity, jiffies_64);
ring              251 drivers/gpu/drm/radeon/radeon_ring.c bool radeon_ring_test_lockup(struct radeon_device *rdev, struct radeon_ring *ring)
ring              253 drivers/gpu/drm/radeon/radeon_ring.c 	uint32_t rptr = radeon_ring_get_rptr(rdev, ring);
ring              254 drivers/gpu/drm/radeon/radeon_ring.c 	uint64_t last = atomic64_read(&ring->last_activity);
ring              257 drivers/gpu/drm/radeon/radeon_ring.c 	if (rptr != atomic_read(&ring->last_rptr)) {
ring              259 drivers/gpu/drm/radeon/radeon_ring.c 		radeon_ring_lockup_update(rdev, ring);
ring              266 drivers/gpu/drm/radeon/radeon_ring.c 			ring->idx, elapsed);
ring              281 drivers/gpu/drm/radeon/radeon_ring.c unsigned radeon_ring_backup(struct radeon_device *rdev, struct radeon_ring *ring,
ring              290 drivers/gpu/drm/radeon/radeon_ring.c 	if (ring->ring_obj == NULL) {
ring              296 drivers/gpu/drm/radeon/radeon_ring.c 	if (!radeon_fence_count_emitted(rdev, ring->idx)) {
ring              302 drivers/gpu/drm/radeon/radeon_ring.c 	if (ring->rptr_save_reg)
ring              303 drivers/gpu/drm/radeon/radeon_ring.c 		ptr = RREG32(ring->rptr_save_reg);
ring              305 drivers/gpu/drm/radeon/radeon_ring.c 		ptr = le32_to_cpu(*ring->next_rptr_cpu_addr);
ring              312 drivers/gpu/drm/radeon/radeon_ring.c 	size = ring->wptr + (ring->ring_size / 4);
ring              314 drivers/gpu/drm/radeon/radeon_ring.c 	size &= ring->ptr_mask;
ring              327 drivers/gpu/drm/radeon/radeon_ring.c 		(*data)[i] = ring->ring[ptr++];
ring              328 drivers/gpu/drm/radeon/radeon_ring.c 		ptr &= ring->ptr_mask;
ring              345 drivers/gpu/drm/radeon/radeon_ring.c int radeon_ring_restore(struct radeon_device *rdev, struct radeon_ring *ring,
ring              354 drivers/gpu/drm/radeon/radeon_ring.c 	r = radeon_ring_lock(rdev, ring, size);
ring              359 drivers/gpu/drm/radeon/radeon_ring.c 		radeon_ring_write(ring, data[i]);
ring              362 drivers/gpu/drm/radeon/radeon_ring.c 	radeon_ring_unlock_commit(rdev, ring, false);
ring              379 drivers/gpu/drm/radeon/radeon_ring.c int radeon_ring_init(struct radeon_device *rdev, struct radeon_ring *ring, unsigned ring_size,
ring              384 drivers/gpu/drm/radeon/radeon_ring.c 	ring->ring_size = ring_size;
ring              385 drivers/gpu/drm/radeon/radeon_ring.c 	ring->rptr_offs = rptr_offs;
ring              386 drivers/gpu/drm/radeon/radeon_ring.c 	ring->nop = nop;
ring              388 drivers/gpu/drm/radeon/radeon_ring.c 	if (ring->ring_obj == NULL) {
ring              389 drivers/gpu/drm/radeon/radeon_ring.c 		r = radeon_bo_create(rdev, ring->ring_size, PAGE_SIZE, true,
ring              391 drivers/gpu/drm/radeon/radeon_ring.c 				     NULL, &ring->ring_obj);
ring              396 drivers/gpu/drm/radeon/radeon_ring.c 		r = radeon_bo_reserve(ring->ring_obj, false);
ring              399 drivers/gpu/drm/radeon/radeon_ring.c 		r = radeon_bo_pin(ring->ring_obj, RADEON_GEM_DOMAIN_GTT,
ring              400 drivers/gpu/drm/radeon/radeon_ring.c 					&ring->gpu_addr);
ring              402 drivers/gpu/drm/radeon/radeon_ring.c 			radeon_bo_unreserve(ring->ring_obj);
ring              406 drivers/gpu/drm/radeon/radeon_ring.c 		r = radeon_bo_kmap(ring->ring_obj,
ring              407 drivers/gpu/drm/radeon/radeon_ring.c 				       (void **)&ring->ring);
ring              408 drivers/gpu/drm/radeon/radeon_ring.c 		radeon_bo_unreserve(ring->ring_obj);
ring              414 drivers/gpu/drm/radeon/radeon_ring.c 	ring->ptr_mask = (ring->ring_size / 4) - 1;
ring              415 drivers/gpu/drm/radeon/radeon_ring.c 	ring->ring_free_dw = ring->ring_size / 4;
ring              417 drivers/gpu/drm/radeon/radeon_ring.c 		u32 index = RADEON_WB_RING0_NEXT_RPTR + (ring->idx * 4);
ring              418 drivers/gpu/drm/radeon/radeon_ring.c 		ring->next_rptr_gpu_addr = rdev->wb.gpu_addr + index;
ring              419 drivers/gpu/drm/radeon/radeon_ring.c 		ring->next_rptr_cpu_addr = &rdev->wb.wb[index/4];
ring              421 drivers/gpu/drm/radeon/radeon_ring.c 	if (radeon_debugfs_ring_init(rdev, ring)) {
ring              424 drivers/gpu/drm/radeon/radeon_ring.c 	radeon_ring_lockup_update(rdev, ring);
ring              436 drivers/gpu/drm/radeon/radeon_ring.c void radeon_ring_fini(struct radeon_device *rdev, struct radeon_ring *ring)
ring              442 drivers/gpu/drm/radeon/radeon_ring.c 	ring_obj = ring->ring_obj;
ring              443 drivers/gpu/drm/radeon/radeon_ring.c 	ring->ready = false;
ring              444 drivers/gpu/drm/radeon/radeon_ring.c 	ring->ring = NULL;
ring              445 drivers/gpu/drm/radeon/radeon_ring.c 	ring->ring_obj = NULL;
ring              470 drivers/gpu/drm/radeon/radeon_ring.c 	struct radeon_ring *ring = &rdev->ring[ridx];
ring              475 drivers/gpu/drm/radeon/radeon_ring.c 	radeon_ring_free_size(rdev, ring);
ring              476 drivers/gpu/drm/radeon/radeon_ring.c 	count = (ring->ring_size / 4) - ring->ring_free_dw;
ring              478 drivers/gpu/drm/radeon/radeon_ring.c 	wptr = radeon_ring_get_wptr(rdev, ring);
ring              482 drivers/gpu/drm/radeon/radeon_ring.c 	rptr = radeon_ring_get_rptr(rdev, ring);
ring              486 drivers/gpu/drm/radeon/radeon_ring.c 	if (ring->rptr_save_reg) {
ring              487 drivers/gpu/drm/radeon/radeon_ring.c 		rptr_next = RREG32(ring->rptr_save_reg);
ring              489 drivers/gpu/drm/radeon/radeon_ring.c 			   ring->rptr_save_reg, rptr_next, rptr_next);
ring              494 drivers/gpu/drm/radeon/radeon_ring.c 		   ring->wptr, ring->wptr);
ring              496 drivers/gpu/drm/radeon/radeon_ring.c 		   ring->last_semaphore_signal_addr);
ring              498 drivers/gpu/drm/radeon/radeon_ring.c 		   ring->last_semaphore_wait_addr);
ring              499 drivers/gpu/drm/radeon/radeon_ring.c 	seq_printf(m, "%u free dwords in ring\n", ring->ring_free_dw);
ring              502 drivers/gpu/drm/radeon/radeon_ring.c 	if (!ring->ring)
ring              508 drivers/gpu/drm/radeon/radeon_ring.c 	i = (rptr + ring->ptr_mask + 1 - 32) & ring->ptr_mask;
ring              510 drivers/gpu/drm/radeon/radeon_ring.c 		seq_printf(m, "r[%5d]=0x%08x", i, ring->ring[i]);
ring              516 drivers/gpu/drm/radeon/radeon_ring.c 		i = (i + 1) & ring->ptr_mask;
ring              543 drivers/gpu/drm/radeon/radeon_ring.c static int radeon_debugfs_ring_init(struct radeon_device *rdev, struct radeon_ring *ring)
ring              552 drivers/gpu/drm/radeon/radeon_ring.c 		if (&rdev->ring[ridx] != ring)
ring              301 drivers/gpu/drm/radeon/radeon_sa.c 		++tries[best_bo->fence->ring];
ring              390 drivers/gpu/drm/radeon/radeon_sa.c 			      &sa_manager->flist[fence->ring]);
ring              418 drivers/gpu/drm/radeon/radeon_sa.c 				   i->fence->seq, i->fence->ring);
ring               61 drivers/gpu/drm/radeon/radeon_semaphore.c 	struct radeon_ring *ring = &rdev->ring[ridx];
ring               65 drivers/gpu/drm/radeon/radeon_semaphore.c 	if (radeon_semaphore_ring_emit(rdev, ridx, ring, semaphore, false)) {
ring               69 drivers/gpu/drm/radeon/radeon_semaphore.c 		ring->last_semaphore_signal_addr = semaphore->gpu_addr;
ring               78 drivers/gpu/drm/radeon/radeon_semaphore.c 	struct radeon_ring *ring = &rdev->ring[ridx];
ring               82 drivers/gpu/drm/radeon/radeon_semaphore.c 	if (radeon_semaphore_ring_emit(rdev, ridx, ring, semaphore, true)) {
ring               86 drivers/gpu/drm/radeon/radeon_semaphore.c 		ring->last_semaphore_wait_addr = semaphore->gpu_addr;
ring               70 drivers/gpu/drm/radeon/radeon_sync.c 	other = sync->sync_to[fence->ring];
ring               71 drivers/gpu/drm/radeon/radeon_sync.c 	sync->sync_to[fence->ring] = radeon_fence_later(fence, other);
ring              138 drivers/gpu/drm/radeon/radeon_sync.c 		      int ring)
ring              148 drivers/gpu/drm/radeon/radeon_sync.c 		if (!radeon_fence_need_sync(fence, ring))
ring              152 drivers/gpu/drm/radeon/radeon_sync.c 		if (!rdev->ring[i].ready) {
ring              171 drivers/gpu/drm/radeon/radeon_sync.c 		r = radeon_ring_alloc(rdev, &rdev->ring[i], 16);
ring              178 drivers/gpu/drm/radeon/radeon_sync.c 			radeon_ring_undo(&rdev->ring[i]);
ring              186 drivers/gpu/drm/radeon/radeon_sync.c 		if (!radeon_semaphore_emit_wait(rdev, ring, semaphore)) {
ring              188 drivers/gpu/drm/radeon/radeon_sync.c 			radeon_ring_undo(&rdev->ring[i]);
ring              195 drivers/gpu/drm/radeon/radeon_sync.c 		radeon_ring_commit(rdev, &rdev->ring[i], false);
ring              196 drivers/gpu/drm/radeon/radeon_sync.c 		radeon_fence_note_sync(fence, ring);
ring               41 drivers/gpu/drm/radeon/radeon_test.c 	int i, r, ring;
ring               45 drivers/gpu/drm/radeon/radeon_test.c 		ring = radeon_copy_dma_ring_index(rdev);
ring               48 drivers/gpu/drm/radeon/radeon_test.c 		ring = radeon_copy_blit_ring_index(rdev);
ring              120 drivers/gpu/drm/radeon/radeon_test.c 		if (ring == R600_RING_TYPE_DMA_INDEX)
ring              171 drivers/gpu/drm/radeon/radeon_test.c 		if (ring == R600_RING_TYPE_DMA_INDEX)
ring              263 drivers/gpu/drm/radeon/radeon_test.c 					     struct radeon_ring *ring,
ring              266 drivers/gpu/drm/radeon/radeon_test.c 	uint32_t handle = ring->idx ^ 0xdeafbeef;
ring              269 drivers/gpu/drm/radeon/radeon_test.c 	if (ring->idx == R600_RING_TYPE_UVD_INDEX) {
ring              270 drivers/gpu/drm/radeon/radeon_test.c 		r = radeon_uvd_get_create_msg(rdev, ring->idx, handle, NULL);
ring              276 drivers/gpu/drm/radeon/radeon_test.c 		r = radeon_uvd_get_destroy_msg(rdev, ring->idx, handle, fence);
ring              282 drivers/gpu/drm/radeon/radeon_test.c 	} else if (ring->idx == TN_RING_TYPE_VCE1_INDEX ||
ring              283 drivers/gpu/drm/radeon/radeon_test.c 		   ring->idx == TN_RING_TYPE_VCE2_INDEX) {
ring              284 drivers/gpu/drm/radeon/radeon_test.c 		r = radeon_vce_get_create_msg(rdev, ring->idx, handle, NULL);
ring              290 drivers/gpu/drm/radeon/radeon_test.c 		r = radeon_vce_get_destroy_msg(rdev, ring->idx, handle, fence);
ring              297 drivers/gpu/drm/radeon/radeon_test.c 		r = radeon_ring_lock(rdev, ring, 64);
ring              299 drivers/gpu/drm/radeon/radeon_test.c 			DRM_ERROR("Failed to lock ring A %d\n", ring->idx);
ring              302 drivers/gpu/drm/radeon/radeon_test.c 		r = radeon_fence_emit(rdev, fence, ring->idx);
ring              305 drivers/gpu/drm/radeon/radeon_test.c 			radeon_ring_unlock_undo(rdev, ring);
ring              308 drivers/gpu/drm/radeon/radeon_test.c 		radeon_ring_unlock_commit(rdev, ring, false);
ring              531 drivers/gpu/drm/radeon/radeon_test.c 		struct radeon_ring *ringA = &rdev->ring[i];
ring              536 drivers/gpu/drm/radeon/radeon_test.c 			struct radeon_ring *ringB = &rdev->ring[j];
ring              550 drivers/gpu/drm/radeon/radeon_test.c 				struct radeon_ring *ringC = &rdev->ring[k];
ring               34 drivers/gpu/drm/radeon/radeon_trace.h 			     __field(u32, ring)
ring               40 drivers/gpu/drm/radeon/radeon_trace.h 			   __entry->ring = p->ring;
ring               43 drivers/gpu/drm/radeon/radeon_trace.h 				p->rdev, p->ring);
ring               46 drivers/gpu/drm/radeon/radeon_trace.h 		      __entry->ring, __entry->dw,
ring               51 drivers/gpu/drm/radeon/radeon_trace.h 	    TP_PROTO(unsigned vmid, int ring),
ring               52 drivers/gpu/drm/radeon/radeon_trace.h 	    TP_ARGS(vmid, ring),
ring               55 drivers/gpu/drm/radeon/radeon_trace.h 			     __field(u32, ring)
ring               60 drivers/gpu/drm/radeon/radeon_trace.h 			   __entry->ring = ring;
ring               62 drivers/gpu/drm/radeon/radeon_trace.h 	    TP_printk("vmid=%u, ring=%u", __entry->vmid, __entry->ring)
ring              108 drivers/gpu/drm/radeon/radeon_trace.h 	    TP_PROTO(uint64_t pd_addr, unsigned ring, unsigned id),
ring              109 drivers/gpu/drm/radeon/radeon_trace.h 	    TP_ARGS(pd_addr, ring, id),
ring              112 drivers/gpu/drm/radeon/radeon_trace.h 			     __field(u32, ring)
ring              118 drivers/gpu/drm/radeon/radeon_trace.h 			   __entry->ring = ring;
ring              122 drivers/gpu/drm/radeon/radeon_trace.h 		      __entry->pd_addr, __entry->ring, __entry->id)
ring              127 drivers/gpu/drm/radeon/radeon_trace.h 	    TP_PROTO(struct drm_device *dev, int ring, u32 seqno),
ring              129 drivers/gpu/drm/radeon/radeon_trace.h 	    TP_ARGS(dev, ring, seqno),
ring              133 drivers/gpu/drm/radeon/radeon_trace.h 			     __field(int, ring)
ring              139 drivers/gpu/drm/radeon/radeon_trace.h 			   __entry->ring = ring;
ring              144 drivers/gpu/drm/radeon/radeon_trace.h 		      __entry->dev, __entry->ring, __entry->seqno)
ring              149 drivers/gpu/drm/radeon/radeon_trace.h 	    TP_PROTO(struct drm_device *dev, int ring, u32 seqno),
ring              151 drivers/gpu/drm/radeon/radeon_trace.h 	    TP_ARGS(dev, ring, seqno)
ring              156 drivers/gpu/drm/radeon/radeon_trace.h 	    TP_PROTO(struct drm_device *dev, int ring, u32 seqno),
ring              158 drivers/gpu/drm/radeon/radeon_trace.h 	    TP_ARGS(dev, ring, seqno)
ring              163 drivers/gpu/drm/radeon/radeon_trace.h 	    TP_PROTO(struct drm_device *dev, int ring, u32 seqno),
ring              165 drivers/gpu/drm/radeon/radeon_trace.h 	    TP_ARGS(dev, ring, seqno)
ring              170 drivers/gpu/drm/radeon/radeon_trace.h 	    TP_PROTO(int ring, struct radeon_semaphore *sem),
ring              172 drivers/gpu/drm/radeon/radeon_trace.h 	    TP_ARGS(ring, sem),
ring              175 drivers/gpu/drm/radeon/radeon_trace.h 			     __field(int, ring)
ring              181 drivers/gpu/drm/radeon/radeon_trace.h 			   __entry->ring = ring;
ring              186 drivers/gpu/drm/radeon/radeon_trace.h 	    TP_printk("ring=%u, waiters=%d, addr=%010Lx", __entry->ring,
ring              192 drivers/gpu/drm/radeon/radeon_trace.h 	    TP_PROTO(int ring, struct radeon_semaphore *sem),
ring              194 drivers/gpu/drm/radeon/radeon_trace.h 	    TP_ARGS(ring, sem)
ring              199 drivers/gpu/drm/radeon/radeon_trace.h 	    TP_PROTO(int ring, struct radeon_semaphore *sem),
ring              201 drivers/gpu/drm/radeon/radeon_trace.h 	    TP_ARGS(ring, sem)
ring              146 drivers/gpu/drm/radeon/radeon_ttm.c 		if (rbo->rdev->ring[radeon_copy_ring_index(rbo->rdev)].ready == false)
ring              239 drivers/gpu/drm/radeon/radeon_ttm.c 	if (!rdev->ring[ridx].ready) {
ring              370 drivers/gpu/drm/radeon/radeon_ttm.c 	if (!rdev->ring[radeon_copy_ring_index(rdev)].ready ||
ring              244 drivers/gpu/drm/radeon/radeon_uvd.c 	radeon_ring_fini(rdev, &rdev->ring[R600_RING_TYPE_UVD_INDEX]);
ring              741 drivers/gpu/drm/radeon/radeon_uvd.c 			       int ring, uint64_t addr,
ring              747 drivers/gpu/drm/radeon/radeon_uvd.c 	r = radeon_ib_get(rdev, ring, &ib, NULL, 64);
ring              777 drivers/gpu/drm/radeon/radeon_uvd.c int radeon_uvd_get_create_msg(struct radeon_device *rdev, int ring,
ring              808 drivers/gpu/drm/radeon/radeon_uvd.c 	r = radeon_uvd_send_msg(rdev, ring, addr, fence);
ring              813 drivers/gpu/drm/radeon/radeon_uvd.c int radeon_uvd_get_destroy_msg(struct radeon_device *rdev, int ring,
ring              837 drivers/gpu/drm/radeon/radeon_uvd.c 	r = radeon_uvd_send_msg(rdev, ring, addr, fence);
ring              346 drivers/gpu/drm/radeon/radeon_vce.c int radeon_vce_get_create_msg(struct radeon_device *rdev, int ring,
ring              354 drivers/gpu/drm/radeon/radeon_vce.c 	r = radeon_ib_get(rdev, ring, &ib, NULL, ib_size_dw * 4);
ring              413 drivers/gpu/drm/radeon/radeon_vce.c int radeon_vce_get_destroy_msg(struct radeon_device *rdev, int ring,
ring              421 drivers/gpu/drm/radeon/radeon_vce.c 	r = radeon_ib_get(rdev, ring, &ib, NULL, ib_size_dw * 4);
ring              696 drivers/gpu/drm/radeon/radeon_vce.c 			       struct radeon_ring *ring,
ring              702 drivers/gpu/drm/radeon/radeon_vce.c 	radeon_ring_write(ring, cpu_to_le32(VCE_CMD_SEMAPHORE));
ring              703 drivers/gpu/drm/radeon/radeon_vce.c 	radeon_ring_write(ring, cpu_to_le32((addr >> 3) & 0x000FFFFF));
ring              704 drivers/gpu/drm/radeon/radeon_vce.c 	radeon_ring_write(ring, cpu_to_le32((addr >> 23) & 0x000FFFFF));
ring              705 drivers/gpu/drm/radeon/radeon_vce.c 	radeon_ring_write(ring, cpu_to_le32(0x01003000 | (emit_wait ? 1 : 0)));
ring              707 drivers/gpu/drm/radeon/radeon_vce.c 		radeon_ring_write(ring, cpu_to_le32(VCE_CMD_END));
ring              721 drivers/gpu/drm/radeon/radeon_vce.c 	struct radeon_ring *ring = &rdev->ring[ib->ring];
ring              722 drivers/gpu/drm/radeon/radeon_vce.c 	radeon_ring_write(ring, cpu_to_le32(VCE_CMD_IB));
ring              723 drivers/gpu/drm/radeon/radeon_vce.c 	radeon_ring_write(ring, cpu_to_le32(ib->gpu_addr));
ring              724 drivers/gpu/drm/radeon/radeon_vce.c 	radeon_ring_write(ring, cpu_to_le32(upper_32_bits(ib->gpu_addr)));
ring              725 drivers/gpu/drm/radeon/radeon_vce.c 	radeon_ring_write(ring, cpu_to_le32(ib->length_dw));
ring              738 drivers/gpu/drm/radeon/radeon_vce.c 	struct radeon_ring *ring = &rdev->ring[fence->ring];
ring              739 drivers/gpu/drm/radeon/radeon_vce.c 	uint64_t addr = rdev->fence_drv[fence->ring].gpu_addr;
ring              741 drivers/gpu/drm/radeon/radeon_vce.c 	radeon_ring_write(ring, cpu_to_le32(VCE_CMD_FENCE));
ring              742 drivers/gpu/drm/radeon/radeon_vce.c 	radeon_ring_write(ring, cpu_to_le32(addr));
ring              743 drivers/gpu/drm/radeon/radeon_vce.c 	radeon_ring_write(ring, cpu_to_le32(upper_32_bits(addr)));
ring              744 drivers/gpu/drm/radeon/radeon_vce.c 	radeon_ring_write(ring, cpu_to_le32(fence->seq));
ring              745 drivers/gpu/drm/radeon/radeon_vce.c 	radeon_ring_write(ring, cpu_to_le32(VCE_CMD_TRAP));
ring              746 drivers/gpu/drm/radeon/radeon_vce.c 	radeon_ring_write(ring, cpu_to_le32(VCE_CMD_END));
ring              756 drivers/gpu/drm/radeon/radeon_vce.c int radeon_vce_ring_test(struct radeon_device *rdev, struct radeon_ring *ring)
ring              758 drivers/gpu/drm/radeon/radeon_vce.c 	uint32_t rptr = vce_v1_0_get_rptr(rdev, ring);
ring              762 drivers/gpu/drm/radeon/radeon_vce.c 	r = radeon_ring_lock(rdev, ring, 16);
ring              765 drivers/gpu/drm/radeon/radeon_vce.c 			  ring->idx, r);
ring              768 drivers/gpu/drm/radeon/radeon_vce.c 	radeon_ring_write(ring, cpu_to_le32(VCE_CMD_END));
ring              769 drivers/gpu/drm/radeon/radeon_vce.c 	radeon_ring_unlock_commit(rdev, ring, false);
ring              772 drivers/gpu/drm/radeon/radeon_vce.c 		if (vce_v1_0_get_rptr(rdev, ring) != rptr)
ring              779 drivers/gpu/drm/radeon/radeon_vce.c 			 ring->idx, i);
ring              782 drivers/gpu/drm/radeon/radeon_vce.c 			 ring->idx);
ring              796 drivers/gpu/drm/radeon/radeon_vce.c int radeon_vce_ib_test(struct radeon_device *rdev, struct radeon_ring *ring)
ring              801 drivers/gpu/drm/radeon/radeon_vce.c 	r = radeon_vce_get_create_msg(rdev, ring->idx, 1, NULL);
ring              807 drivers/gpu/drm/radeon/radeon_vce.c 	r = radeon_vce_get_destroy_msg(rdev, ring->idx, 1, &fence);
ring              821 drivers/gpu/drm/radeon/radeon_vce.c 		DRM_INFO("ib test on ring %d succeeded\n", ring->idx);
ring              178 drivers/gpu/drm/radeon/radeon_vm.c 				       struct radeon_vm *vm, int ring)
ring              181 drivers/gpu/drm/radeon/radeon_vm.c 	struct radeon_vm_id *vm_id = &vm->ids[ring];
ring              201 drivers/gpu/drm/radeon/radeon_vm.c 			trace_radeon_vm_grab_id(i, ring);
ring              205 drivers/gpu/drm/radeon/radeon_vm.c 		if (radeon_fence_is_earlier(fence, best[fence->ring])) {
ring              206 drivers/gpu/drm/radeon/radeon_vm.c 			best[fence->ring] = fence;
ring              207 drivers/gpu/drm/radeon/radeon_vm.c 			choices[fence->ring == ring ? 0 : 1] = i;
ring              214 drivers/gpu/drm/radeon/radeon_vm.c 			trace_radeon_vm_grab_id(choices[i], ring);
ring              238 drivers/gpu/drm/radeon/radeon_vm.c 		     int ring, struct radeon_fence *updates)
ring              241 drivers/gpu/drm/radeon/radeon_vm.c 	struct radeon_vm_id *vm_id = &vm->ids[ring];
ring              246 drivers/gpu/drm/radeon/radeon_vm.c 		trace_radeon_vm_flush(pd_addr, ring, vm->ids[ring].id);
ring              250 drivers/gpu/drm/radeon/radeon_vm.c 		radeon_ring_vm_flush(rdev, &rdev->ring[ring],
ring              272 drivers/gpu/drm/radeon/radeon_vm.c 	unsigned vm_id = vm->ids[fence->ring].id;
ring              277 drivers/gpu/drm/radeon/radeon_vm.c 	radeon_fence_unref(&vm->ids[fence->ring].last_id_use);
ring              278 drivers/gpu/drm/radeon/radeon_vm.c 	vm->ids[fence->ring].last_id_use = radeon_fence_ref(fence);
ring               67 drivers/gpu/drm/radeon/rv515.c void rv515_ring_start(struct radeon_device *rdev, struct radeon_ring *ring)
ring               71 drivers/gpu/drm/radeon/rv515.c 	r = radeon_ring_lock(rdev, ring, 64);
ring               75 drivers/gpu/drm/radeon/rv515.c 	radeon_ring_write(ring, PACKET0(ISYNC_CNTL, 0));
ring               76 drivers/gpu/drm/radeon/rv515.c 	radeon_ring_write(ring,
ring               81 drivers/gpu/drm/radeon/rv515.c 	radeon_ring_write(ring, PACKET0(WAIT_UNTIL, 0));
ring               82 drivers/gpu/drm/radeon/rv515.c 	radeon_ring_write(ring, WAIT_2D_IDLECLEAN | WAIT_3D_IDLECLEAN);
ring               83 drivers/gpu/drm/radeon/rv515.c 	radeon_ring_write(ring, PACKET0(R300_DST_PIPE_CONFIG, 0));
ring               84 drivers/gpu/drm/radeon/rv515.c 	radeon_ring_write(ring, R300_PIPE_AUTO_CONFIG);
ring               85 drivers/gpu/drm/radeon/rv515.c 	radeon_ring_write(ring, PACKET0(GB_SELECT, 0));
ring               86 drivers/gpu/drm/radeon/rv515.c 	radeon_ring_write(ring, 0);
ring               87 drivers/gpu/drm/radeon/rv515.c 	radeon_ring_write(ring, PACKET0(GB_ENABLE, 0));
ring               88 drivers/gpu/drm/radeon/rv515.c 	radeon_ring_write(ring, 0);
ring               89 drivers/gpu/drm/radeon/rv515.c 	radeon_ring_write(ring, PACKET0(R500_SU_REG_DEST, 0));
ring               90 drivers/gpu/drm/radeon/rv515.c 	radeon_ring_write(ring, (1 << rdev->num_gb_pipes) - 1);
ring               91 drivers/gpu/drm/radeon/rv515.c 	radeon_ring_write(ring, PACKET0(VAP_INDEX_OFFSET, 0));
ring               92 drivers/gpu/drm/radeon/rv515.c 	radeon_ring_write(ring, 0);
ring               93 drivers/gpu/drm/radeon/rv515.c 	radeon_ring_write(ring, PACKET0(RB3D_DSTCACHE_CTLSTAT, 0));
ring               94 drivers/gpu/drm/radeon/rv515.c 	radeon_ring_write(ring, RB3D_DC_FLUSH | RB3D_DC_FREE);
ring               95 drivers/gpu/drm/radeon/rv515.c 	radeon_ring_write(ring, PACKET0(ZB_ZCACHE_CTLSTAT, 0));
ring               96 drivers/gpu/drm/radeon/rv515.c 	radeon_ring_write(ring, ZC_FLUSH | ZC_FREE);
ring               97 drivers/gpu/drm/radeon/rv515.c 	radeon_ring_write(ring, PACKET0(WAIT_UNTIL, 0));
ring               98 drivers/gpu/drm/radeon/rv515.c 	radeon_ring_write(ring, WAIT_2D_IDLECLEAN | WAIT_3D_IDLECLEAN);
ring               99 drivers/gpu/drm/radeon/rv515.c 	radeon_ring_write(ring, PACKET0(GB_AA_CONFIG, 0));
ring              100 drivers/gpu/drm/radeon/rv515.c 	radeon_ring_write(ring, 0);
ring              101 drivers/gpu/drm/radeon/rv515.c 	radeon_ring_write(ring, PACKET0(RB3D_DSTCACHE_CTLSTAT, 0));
ring              102 drivers/gpu/drm/radeon/rv515.c 	radeon_ring_write(ring, RB3D_DC_FLUSH | RB3D_DC_FREE);
ring              103 drivers/gpu/drm/radeon/rv515.c 	radeon_ring_write(ring, PACKET0(ZB_ZCACHE_CTLSTAT, 0));
ring              104 drivers/gpu/drm/radeon/rv515.c 	radeon_ring_write(ring, ZC_FLUSH | ZC_FREE);
ring              105 drivers/gpu/drm/radeon/rv515.c 	radeon_ring_write(ring, PACKET0(GB_MSPOS0, 0));
ring              106 drivers/gpu/drm/radeon/rv515.c 	radeon_ring_write(ring,
ring              115 drivers/gpu/drm/radeon/rv515.c 	radeon_ring_write(ring, PACKET0(GB_MSPOS1, 0));
ring              116 drivers/gpu/drm/radeon/rv515.c 	radeon_ring_write(ring,
ring              124 drivers/gpu/drm/radeon/rv515.c 	radeon_ring_write(ring, PACKET0(GA_ENHANCE, 0));
ring              125 drivers/gpu/drm/radeon/rv515.c 	radeon_ring_write(ring, GA_DEADLOCK_CNTL | GA_FASTSYNC_CNTL);
ring              126 drivers/gpu/drm/radeon/rv515.c 	radeon_ring_write(ring, PACKET0(GA_POLY_MODE, 0));
ring              127 drivers/gpu/drm/radeon/rv515.c 	radeon_ring_write(ring, FRONT_PTYPE_TRIANGE | BACK_PTYPE_TRIANGE);
ring              128 drivers/gpu/drm/radeon/rv515.c 	radeon_ring_write(ring, PACKET0(GA_ROUND_MODE, 0));
ring              129 drivers/gpu/drm/radeon/rv515.c 	radeon_ring_write(ring, GEOMETRY_ROUND_NEAREST | COLOR_ROUND_NEAREST);
ring              130 drivers/gpu/drm/radeon/rv515.c 	radeon_ring_write(ring, PACKET0(0x20C8, 0));
ring              131 drivers/gpu/drm/radeon/rv515.c 	radeon_ring_write(ring, 0);
ring              132 drivers/gpu/drm/radeon/rv515.c 	radeon_ring_unlock_commit(rdev, ring, false);
ring             1089 drivers/gpu/drm/radeon/rv770.c 	rdev->ring[RADEON_RING_TYPE_GFX_INDEX].ready = false;
ring             1132 drivers/gpu/drm/radeon/rv770.c 	struct radeon_ring *ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX];
ring             1134 drivers/gpu/drm/radeon/rv770.c 	radeon_ring_fini(rdev, ring);
ring             1135 drivers/gpu/drm/radeon/rv770.c 	radeon_scratch_free(rdev, ring->rptr_save_reg);
ring             1709 drivers/gpu/drm/radeon/rv770.c 	rdev->ring[R600_RING_TYPE_UVD_INDEX].ring_obj = NULL;
ring             1710 drivers/gpu/drm/radeon/rv770.c 	r600_ring_init(rdev, &rdev->ring[R600_RING_TYPE_UVD_INDEX], 4096);
ring             1733 drivers/gpu/drm/radeon/rv770.c 	rdev->ring[R600_RING_TYPE_UVD_INDEX].ring_size = 0;
ring             1738 drivers/gpu/drm/radeon/rv770.c 	struct radeon_ring *ring;
ring             1741 drivers/gpu/drm/radeon/rv770.c 	if (!rdev->has_uvd || !rdev->ring[R600_RING_TYPE_UVD_INDEX].ring_size)
ring             1744 drivers/gpu/drm/radeon/rv770.c 	ring = &rdev->ring[R600_RING_TYPE_UVD_INDEX];
ring             1745 drivers/gpu/drm/radeon/rv770.c 	r = radeon_ring_init(rdev, ring, ring->ring_size, 0, PACKET0(UVD_NO_OP, 0));
ring             1759 drivers/gpu/drm/radeon/rv770.c 	struct radeon_ring *ring;
ring             1816 drivers/gpu/drm/radeon/rv770.c 	ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX];
ring             1817 drivers/gpu/drm/radeon/rv770.c 	r = radeon_ring_init(rdev, ring, ring->ring_size, RADEON_WB_CP_RPTR_OFFSET,
ring             1822 drivers/gpu/drm/radeon/rv770.c 	ring = &rdev->ring[R600_RING_TYPE_DMA_INDEX];
ring             1823 drivers/gpu/drm/radeon/rv770.c 	r = radeon_ring_init(rdev, ring, ring->ring_size, R600_WB_DMA_RPTR_OFFSET,
ring             1971 drivers/gpu/drm/radeon/rv770.c 	rdev->ring[RADEON_RING_TYPE_GFX_INDEX].ring_obj = NULL;
ring             1972 drivers/gpu/drm/radeon/rv770.c 	r600_ring_init(rdev, &rdev->ring[RADEON_RING_TYPE_GFX_INDEX], 1024 * 1024);
ring             1974 drivers/gpu/drm/radeon/rv770.c 	rdev->ring[R600_RING_TYPE_DMA_INDEX].ring_obj = NULL;
ring             1975 drivers/gpu/drm/radeon/rv770.c 	r600_ring_init(rdev, &rdev->ring[R600_RING_TYPE_DMA_INDEX], 64 * 1024);
ring               50 drivers/gpu/drm/radeon/rv770_dma.c 	struct radeon_ring *ring = &rdev->ring[ring_index];
ring               59 drivers/gpu/drm/radeon/rv770_dma.c 	r = radeon_ring_lock(rdev, ring, num_loops * 5 + 8);
ring               67 drivers/gpu/drm/radeon/rv770_dma.c 	radeon_sync_rings(rdev, &sync, ring->idx);
ring               74 drivers/gpu/drm/radeon/rv770_dma.c 		radeon_ring_write(ring, DMA_PACKET(DMA_PACKET_COPY, 0, 0, cur_size_in_dw));
ring               75 drivers/gpu/drm/radeon/rv770_dma.c 		radeon_ring_write(ring, dst_offset & 0xfffffffc);
ring               76 drivers/gpu/drm/radeon/rv770_dma.c 		radeon_ring_write(ring, src_offset & 0xfffffffc);
ring               77 drivers/gpu/drm/radeon/rv770_dma.c 		radeon_ring_write(ring, upper_32_bits(dst_offset) & 0xff);
ring               78 drivers/gpu/drm/radeon/rv770_dma.c 		radeon_ring_write(ring, upper_32_bits(src_offset) & 0xff);
ring               83 drivers/gpu/drm/radeon/rv770_dma.c 	r = radeon_fence_emit(rdev, &fence, ring->idx);
ring               85 drivers/gpu/drm/radeon/rv770_dma.c 		radeon_ring_unlock_undo(rdev, ring);
ring               90 drivers/gpu/drm/radeon/rv770_dma.c 	radeon_ring_unlock_commit(rdev, ring, false);
ring             3377 drivers/gpu/drm/radeon/si.c 	struct radeon_ring *ring = &rdev->ring[fence->ring];
ring             3378 drivers/gpu/drm/radeon/si.c 	u64 addr = rdev->fence_drv[fence->ring].gpu_addr;
ring             3381 drivers/gpu/drm/radeon/si.c 	radeon_ring_write(ring, PACKET3(PACKET3_SET_CONFIG_REG, 1));
ring             3382 drivers/gpu/drm/radeon/si.c 	radeon_ring_write(ring, (CP_COHER_CNTL2 - PACKET3_SET_CONFIG_REG_START) >> 2);
ring             3383 drivers/gpu/drm/radeon/si.c 	radeon_ring_write(ring, 0);
ring             3384 drivers/gpu/drm/radeon/si.c 	radeon_ring_write(ring, PACKET3(PACKET3_SURFACE_SYNC, 3));
ring             3385 drivers/gpu/drm/radeon/si.c 	radeon_ring_write(ring, PACKET3_TCL1_ACTION_ENA |
ring             3389 drivers/gpu/drm/radeon/si.c 	radeon_ring_write(ring, 0xFFFFFFFF);
ring             3390 drivers/gpu/drm/radeon/si.c 	radeon_ring_write(ring, 0);
ring             3391 drivers/gpu/drm/radeon/si.c 	radeon_ring_write(ring, 10); /* poll interval */
ring             3393 drivers/gpu/drm/radeon/si.c 	radeon_ring_write(ring, PACKET3(PACKET3_EVENT_WRITE_EOP, 4));
ring             3394 drivers/gpu/drm/radeon/si.c 	radeon_ring_write(ring, EVENT_TYPE(CACHE_FLUSH_AND_INV_TS_EVENT) | EVENT_INDEX(5));
ring             3395 drivers/gpu/drm/radeon/si.c 	radeon_ring_write(ring, lower_32_bits(addr));
ring             3396 drivers/gpu/drm/radeon/si.c 	radeon_ring_write(ring, (upper_32_bits(addr) & 0xff) | DATA_SEL(1) | INT_SEL(2));
ring             3397 drivers/gpu/drm/radeon/si.c 	radeon_ring_write(ring, fence->seq);
ring             3398 drivers/gpu/drm/radeon/si.c 	radeon_ring_write(ring, 0);
ring             3406 drivers/gpu/drm/radeon/si.c 	struct radeon_ring *ring = &rdev->ring[ib->ring];
ring             3407 drivers/gpu/drm/radeon/si.c 	unsigned vm_id = ib->vm ? ib->vm->ids[ib->ring].id : 0;
ring             3412 drivers/gpu/drm/radeon/si.c 		radeon_ring_write(ring, PACKET3(PACKET3_SWITCH_BUFFER, 0));
ring             3413 drivers/gpu/drm/radeon/si.c 		radeon_ring_write(ring, 0);
ring             3418 drivers/gpu/drm/radeon/si.c 		if (ring->rptr_save_reg) {
ring             3419 drivers/gpu/drm/radeon/si.c 			next_rptr = ring->wptr + 3 + 4 + 8;
ring             3420 drivers/gpu/drm/radeon/si.c 			radeon_ring_write(ring, PACKET3(PACKET3_SET_CONFIG_REG, 1));
ring             3421 drivers/gpu/drm/radeon/si.c 			radeon_ring_write(ring, ((ring->rptr_save_reg -
ring             3423 drivers/gpu/drm/radeon/si.c 			radeon_ring_write(ring, next_rptr);
ring             3425 drivers/gpu/drm/radeon/si.c 			next_rptr = ring->wptr + 5 + 4 + 8;
ring             3426 drivers/gpu/drm/radeon/si.c 			radeon_ring_write(ring, PACKET3(PACKET3_WRITE_DATA, 3));
ring             3427 drivers/gpu/drm/radeon/si.c 			radeon_ring_write(ring, (1 << 8));
ring             3428 drivers/gpu/drm/radeon/si.c 			radeon_ring_write(ring, ring->next_rptr_gpu_addr & 0xfffffffc);
ring             3429 drivers/gpu/drm/radeon/si.c 			radeon_ring_write(ring, upper_32_bits(ring->next_rptr_gpu_addr));
ring             3430 drivers/gpu/drm/radeon/si.c 			radeon_ring_write(ring, next_rptr);
ring             3436 drivers/gpu/drm/radeon/si.c 	radeon_ring_write(ring, header);
ring             3437 drivers/gpu/drm/radeon/si.c 	radeon_ring_write(ring,
ring             3442 drivers/gpu/drm/radeon/si.c 	radeon_ring_write(ring, upper_32_bits(ib->gpu_addr) & 0xFFFF);
ring             3443 drivers/gpu/drm/radeon/si.c 	radeon_ring_write(ring, ib->length_dw | (vm_id << 24));
ring             3447 drivers/gpu/drm/radeon/si.c 		radeon_ring_write(ring, PACKET3(PACKET3_SET_CONFIG_REG, 1));
ring             3448 drivers/gpu/drm/radeon/si.c 		radeon_ring_write(ring, (CP_COHER_CNTL2 - PACKET3_SET_CONFIG_REG_START) >> 2);
ring             3449 drivers/gpu/drm/radeon/si.c 		radeon_ring_write(ring, vm_id);
ring             3450 drivers/gpu/drm/radeon/si.c 		radeon_ring_write(ring, PACKET3(PACKET3_SURFACE_SYNC, 3));
ring             3451 drivers/gpu/drm/radeon/si.c 		radeon_ring_write(ring, PACKET3_TCL1_ACTION_ENA |
ring             3455 drivers/gpu/drm/radeon/si.c 		radeon_ring_write(ring, 0xFFFFFFFF);
ring             3456 drivers/gpu/drm/radeon/si.c 		radeon_ring_write(ring, 0);
ring             3457 drivers/gpu/drm/radeon/si.c 		radeon_ring_write(ring, 10); /* poll interval */
ring             3473 drivers/gpu/drm/radeon/si.c 		rdev->ring[RADEON_RING_TYPE_GFX_INDEX].ready = false;
ring             3474 drivers/gpu/drm/radeon/si.c 		rdev->ring[CAYMAN_RING_TYPE_CP1_INDEX].ready = false;
ring             3475 drivers/gpu/drm/radeon/si.c 		rdev->ring[CAYMAN_RING_TYPE_CP2_INDEX].ready = false;
ring             3563 drivers/gpu/drm/radeon/si.c 	struct radeon_ring *ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX];
ring             3566 drivers/gpu/drm/radeon/si.c 	r = radeon_ring_lock(rdev, ring, 7 + 4);
ring             3572 drivers/gpu/drm/radeon/si.c 	radeon_ring_write(ring, PACKET3(PACKET3_ME_INITIALIZE, 5));
ring             3573 drivers/gpu/drm/radeon/si.c 	radeon_ring_write(ring, 0x1);
ring             3574 drivers/gpu/drm/radeon/si.c 	radeon_ring_write(ring, 0x0);
ring             3575 drivers/gpu/drm/radeon/si.c 	radeon_ring_write(ring, rdev->config.si.max_hw_contexts - 1);
ring             3576 drivers/gpu/drm/radeon/si.c 	radeon_ring_write(ring, PACKET3_ME_INITIALIZE_DEVICE_ID(1));
ring             3577 drivers/gpu/drm/radeon/si.c 	radeon_ring_write(ring, 0);
ring             3578 drivers/gpu/drm/radeon/si.c 	radeon_ring_write(ring, 0);
ring             3581 drivers/gpu/drm/radeon/si.c 	radeon_ring_write(ring, PACKET3(PACKET3_SET_BASE, 2));
ring             3582 drivers/gpu/drm/radeon/si.c 	radeon_ring_write(ring, PACKET3_BASE_INDEX(CE_PARTITION_BASE));
ring             3583 drivers/gpu/drm/radeon/si.c 	radeon_ring_write(ring, 0xc000);
ring             3584 drivers/gpu/drm/radeon/si.c 	radeon_ring_write(ring, 0xe000);
ring             3585 drivers/gpu/drm/radeon/si.c 	radeon_ring_unlock_commit(rdev, ring, false);
ring             3589 drivers/gpu/drm/radeon/si.c 	r = radeon_ring_lock(rdev, ring, si_default_size + 10);
ring             3596 drivers/gpu/drm/radeon/si.c 	radeon_ring_write(ring, PACKET3(PACKET3_PREAMBLE_CNTL, 0));
ring             3597 drivers/gpu/drm/radeon/si.c 	radeon_ring_write(ring, PACKET3_PREAMBLE_BEGIN_CLEAR_STATE);
ring             3600 drivers/gpu/drm/radeon/si.c 		radeon_ring_write(ring, si_default_state[i]);
ring             3602 drivers/gpu/drm/radeon/si.c 	radeon_ring_write(ring, PACKET3(PACKET3_PREAMBLE_CNTL, 0));
ring             3603 drivers/gpu/drm/radeon/si.c 	radeon_ring_write(ring, PACKET3_PREAMBLE_END_CLEAR_STATE);
ring             3606 drivers/gpu/drm/radeon/si.c 	radeon_ring_write(ring, PACKET3(PACKET3_CLEAR_STATE, 0));
ring             3607 drivers/gpu/drm/radeon/si.c 	radeon_ring_write(ring, 0);
ring             3609 drivers/gpu/drm/radeon/si.c 	radeon_ring_write(ring, PACKET3(PACKET3_SET_CONTEXT_REG, 2));
ring             3610 drivers/gpu/drm/radeon/si.c 	radeon_ring_write(ring, 0x00000316);
ring             3611 drivers/gpu/drm/radeon/si.c 	radeon_ring_write(ring, 0x0000000e); /* VGT_VERTEX_REUSE_BLOCK_CNTL */
ring             3612 drivers/gpu/drm/radeon/si.c 	radeon_ring_write(ring, 0x00000010); /* VGT_OUT_DEALLOC_CNTL */
ring             3614 drivers/gpu/drm/radeon/si.c 	radeon_ring_unlock_commit(rdev, ring, false);
ring             3617 drivers/gpu/drm/radeon/si.c 		ring = &rdev->ring[i];
ring             3618 drivers/gpu/drm/radeon/si.c 		r = radeon_ring_lock(rdev, ring, 2);
ring             3621 drivers/gpu/drm/radeon/si.c 		radeon_ring_write(ring, PACKET3_COMPUTE(PACKET3_CLEAR_STATE, 0));
ring             3622 drivers/gpu/drm/radeon/si.c 		radeon_ring_write(ring, 0);
ring             3624 drivers/gpu/drm/radeon/si.c 		radeon_ring_unlock_commit(rdev, ring, false);
ring             3632 drivers/gpu/drm/radeon/si.c 	struct radeon_ring *ring;
ring             3635 drivers/gpu/drm/radeon/si.c 	ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX];
ring             3636 drivers/gpu/drm/radeon/si.c 	radeon_ring_fini(rdev, ring);
ring             3637 drivers/gpu/drm/radeon/si.c 	radeon_scratch_free(rdev, ring->rptr_save_reg);
ring             3639 drivers/gpu/drm/radeon/si.c 	ring = &rdev->ring[CAYMAN_RING_TYPE_CP1_INDEX];
ring             3640 drivers/gpu/drm/radeon/si.c 	radeon_ring_fini(rdev, ring);
ring             3641 drivers/gpu/drm/radeon/si.c 	radeon_scratch_free(rdev, ring->rptr_save_reg);
ring             3643 drivers/gpu/drm/radeon/si.c 	ring = &rdev->ring[CAYMAN_RING_TYPE_CP2_INDEX];
ring             3644 drivers/gpu/drm/radeon/si.c 	radeon_ring_fini(rdev, ring);
ring             3645 drivers/gpu/drm/radeon/si.c 	radeon_scratch_free(rdev, ring->rptr_save_reg);
ring             3650 drivers/gpu/drm/radeon/si.c 	struct radeon_ring *ring;
ring             3668 drivers/gpu/drm/radeon/si.c 	ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX];
ring             3669 drivers/gpu/drm/radeon/si.c 	rb_bufsz = order_base_2(ring->ring_size / 8);
ring             3678 drivers/gpu/drm/radeon/si.c 	ring->wptr = 0;
ring             3679 drivers/gpu/drm/radeon/si.c 	WREG32(CP_RB0_WPTR, ring->wptr);
ring             3695 drivers/gpu/drm/radeon/si.c 	WREG32(CP_RB0_BASE, ring->gpu_addr >> 8);
ring             3699 drivers/gpu/drm/radeon/si.c 	ring = &rdev->ring[CAYMAN_RING_TYPE_CP1_INDEX];
ring             3700 drivers/gpu/drm/radeon/si.c 	rb_bufsz = order_base_2(ring->ring_size / 8);
ring             3709 drivers/gpu/drm/radeon/si.c 	ring->wptr = 0;
ring             3710 drivers/gpu/drm/radeon/si.c 	WREG32(CP_RB1_WPTR, ring->wptr);
ring             3719 drivers/gpu/drm/radeon/si.c 	WREG32(CP_RB1_BASE, ring->gpu_addr >> 8);
ring             3723 drivers/gpu/drm/radeon/si.c 	ring = &rdev->ring[CAYMAN_RING_TYPE_CP2_INDEX];
ring             3724 drivers/gpu/drm/radeon/si.c 	rb_bufsz = order_base_2(ring->ring_size / 8);
ring             3733 drivers/gpu/drm/radeon/si.c 	ring->wptr = 0;
ring             3734 drivers/gpu/drm/radeon/si.c 	WREG32(CP_RB2_WPTR, ring->wptr);
ring             3743 drivers/gpu/drm/radeon/si.c 	WREG32(CP_RB2_BASE, ring->gpu_addr >> 8);
ring             3747 drivers/gpu/drm/radeon/si.c 	rdev->ring[RADEON_RING_TYPE_GFX_INDEX].ready = true;
ring             3748 drivers/gpu/drm/radeon/si.c 	rdev->ring[CAYMAN_RING_TYPE_CP1_INDEX].ready = true;
ring             3749 drivers/gpu/drm/radeon/si.c 	rdev->ring[CAYMAN_RING_TYPE_CP2_INDEX].ready = true;
ring             3750 drivers/gpu/drm/radeon/si.c 	r = radeon_ring_test(rdev, RADEON_RING_TYPE_GFX_INDEX, &rdev->ring[RADEON_RING_TYPE_GFX_INDEX]);
ring             3752 drivers/gpu/drm/radeon/si.c 		rdev->ring[RADEON_RING_TYPE_GFX_INDEX].ready = false;
ring             3753 drivers/gpu/drm/radeon/si.c 		rdev->ring[CAYMAN_RING_TYPE_CP1_INDEX].ready = false;
ring             3754 drivers/gpu/drm/radeon/si.c 		rdev->ring[CAYMAN_RING_TYPE_CP2_INDEX].ready = false;
ring             3757 drivers/gpu/drm/radeon/si.c 	r = radeon_ring_test(rdev, CAYMAN_RING_TYPE_CP1_INDEX, &rdev->ring[CAYMAN_RING_TYPE_CP1_INDEX]);
ring             3759 drivers/gpu/drm/radeon/si.c 		rdev->ring[CAYMAN_RING_TYPE_CP1_INDEX].ready = false;
ring             3761 drivers/gpu/drm/radeon/si.c 	r = radeon_ring_test(rdev, CAYMAN_RING_TYPE_CP2_INDEX, &rdev->ring[CAYMAN_RING_TYPE_CP2_INDEX]);
ring             3763 drivers/gpu/drm/radeon/si.c 		rdev->ring[CAYMAN_RING_TYPE_CP2_INDEX].ready = false;
ring             4127 drivers/gpu/drm/radeon/si.c bool si_gfx_is_lockup(struct radeon_device *rdev, struct radeon_ring *ring)
ring             4134 drivers/gpu/drm/radeon/si.c 		radeon_ring_lockup_update(rdev, ring);
ring             4137 drivers/gpu/drm/radeon/si.c 	return radeon_ring_test_lockup(rdev, ring);
ring             4760 drivers/gpu/drm/radeon/si.c 				switch (ib->ring) {
ring             4769 drivers/gpu/drm/radeon/si.c 					dev_err(rdev->dev, "Non-PM4 ring %d !\n", ib->ring);
ring             5076 drivers/gpu/drm/radeon/si.c void si_vm_flush(struct radeon_device *rdev, struct radeon_ring *ring,
ring             5080 drivers/gpu/drm/radeon/si.c 	radeon_ring_write(ring, PACKET3(PACKET3_WRITE_DATA, 3));
ring             5081 drivers/gpu/drm/radeon/si.c 	radeon_ring_write(ring, (WRITE_DATA_ENGINE_SEL(1) |
ring             5085 drivers/gpu/drm/radeon/si.c 		radeon_ring_write(ring,
ring             5088 drivers/gpu/drm/radeon/si.c 		radeon_ring_write(ring,
ring             5091 drivers/gpu/drm/radeon/si.c 	radeon_ring_write(ring, 0);
ring             5092 drivers/gpu/drm/radeon/si.c 	radeon_ring_write(ring, pd_addr >> 12);
ring             5095 drivers/gpu/drm/radeon/si.c 	radeon_ring_write(ring, PACKET3(PACKET3_WRITE_DATA, 3));
ring             5096 drivers/gpu/drm/radeon/si.c 	radeon_ring_write(ring, (WRITE_DATA_ENGINE_SEL(1) |
ring             5098 drivers/gpu/drm/radeon/si.c 	radeon_ring_write(ring, HDP_MEM_COHERENCY_FLUSH_CNTL >> 2);
ring             5099 drivers/gpu/drm/radeon/si.c 	radeon_ring_write(ring, 0);
ring             5100 drivers/gpu/drm/radeon/si.c 	radeon_ring_write(ring, 0x1);
ring             5103 drivers/gpu/drm/radeon/si.c 	radeon_ring_write(ring, PACKET3(PACKET3_WRITE_DATA, 3));
ring             5104 drivers/gpu/drm/radeon/si.c 	radeon_ring_write(ring, (WRITE_DATA_ENGINE_SEL(1) |
ring             5106 drivers/gpu/drm/radeon/si.c 	radeon_ring_write(ring, VM_INVALIDATE_REQUEST >> 2);
ring             5107 drivers/gpu/drm/radeon/si.c 	radeon_ring_write(ring, 0);
ring             5108 drivers/gpu/drm/radeon/si.c 	radeon_ring_write(ring, 1 << vm_id);
ring             5111 drivers/gpu/drm/radeon/si.c 	radeon_ring_write(ring, PACKET3(PACKET3_WAIT_REG_MEM, 5));
ring             5112 drivers/gpu/drm/radeon/si.c 	radeon_ring_write(ring, (WAIT_REG_MEM_FUNCTION(0) |  /* always */
ring             5114 drivers/gpu/drm/radeon/si.c 	radeon_ring_write(ring, VM_INVALIDATE_REQUEST >> 2);
ring             5115 drivers/gpu/drm/radeon/si.c 	radeon_ring_write(ring, 0);
ring             5116 drivers/gpu/drm/radeon/si.c 	radeon_ring_write(ring, 0); /* ref */
ring             5117 drivers/gpu/drm/radeon/si.c 	radeon_ring_write(ring, 0); /* mask */
ring             5118 drivers/gpu/drm/radeon/si.c 	radeon_ring_write(ring, 0x20); /* poll interval */
ring             5121 drivers/gpu/drm/radeon/si.c 	radeon_ring_write(ring, PACKET3(PACKET3_PFP_SYNC_ME, 0));
ring             5122 drivers/gpu/drm/radeon/si.c 	radeon_ring_write(ring, 0x0);
ring             6282 drivers/gpu/drm/radeon/si.c 		src_id =  le32_to_cpu(rdev->ih.ring[ring_index]) & 0xff;
ring             6283 drivers/gpu/drm/radeon/si.c 		src_data = le32_to_cpu(rdev->ih.ring[ring_index + 1]) & 0xfffffff;
ring             6284 drivers/gpu/drm/radeon/si.c 		ring_id = le32_to_cpu(rdev->ih.ring[ring_index + 2]) & 0xff;
ring             6478 drivers/gpu/drm/radeon/si.c 	rdev->ring[R600_RING_TYPE_UVD_INDEX].ring_obj = NULL;
ring             6479 drivers/gpu/drm/radeon/si.c 	r600_ring_init(rdev, &rdev->ring[R600_RING_TYPE_UVD_INDEX], 4096);
ring             6502 drivers/gpu/drm/radeon/si.c 	rdev->ring[R600_RING_TYPE_UVD_INDEX].ring_size = 0;
ring             6507 drivers/gpu/drm/radeon/si.c 	struct radeon_ring *ring;
ring             6510 drivers/gpu/drm/radeon/si.c 	if (!rdev->has_uvd || !rdev->ring[R600_RING_TYPE_UVD_INDEX].ring_size)
ring             6513 drivers/gpu/drm/radeon/si.c 	ring = &rdev->ring[R600_RING_TYPE_UVD_INDEX];
ring             6514 drivers/gpu/drm/radeon/si.c 	r = radeon_ring_init(rdev, ring, ring->ring_size, 0, PACKET0(UVD_NO_OP, 0));
ring             6545 drivers/gpu/drm/radeon/si.c 	rdev->ring[TN_RING_TYPE_VCE1_INDEX].ring_obj = NULL;
ring             6546 drivers/gpu/drm/radeon/si.c 	r600_ring_init(rdev, &rdev->ring[TN_RING_TYPE_VCE1_INDEX], 4096);
ring             6547 drivers/gpu/drm/radeon/si.c 	rdev->ring[TN_RING_TYPE_VCE2_INDEX].ring_obj = NULL;
ring             6548 drivers/gpu/drm/radeon/si.c 	r600_ring_init(rdev, &rdev->ring[TN_RING_TYPE_VCE2_INDEX], 4096);
ring             6581 drivers/gpu/drm/radeon/si.c 	rdev->ring[TN_RING_TYPE_VCE1_INDEX].ring_size = 0;
ring             6582 drivers/gpu/drm/radeon/si.c 	rdev->ring[TN_RING_TYPE_VCE2_INDEX].ring_size = 0;
ring             6587 drivers/gpu/drm/radeon/si.c 	struct radeon_ring *ring;
ring             6590 drivers/gpu/drm/radeon/si.c 	if (!rdev->has_vce || !rdev->ring[TN_RING_TYPE_VCE1_INDEX].ring_size)
ring             6593 drivers/gpu/drm/radeon/si.c 	ring = &rdev->ring[TN_RING_TYPE_VCE1_INDEX];
ring             6594 drivers/gpu/drm/radeon/si.c 	r = radeon_ring_init(rdev, ring, ring->ring_size, 0, VCE_CMD_NO_OP);
ring             6599 drivers/gpu/drm/radeon/si.c 	ring = &rdev->ring[TN_RING_TYPE_VCE2_INDEX];
ring             6600 drivers/gpu/drm/radeon/si.c 	r = radeon_ring_init(rdev, ring, ring->ring_size, 0, VCE_CMD_NO_OP);
ring             6614 drivers/gpu/drm/radeon/si.c 	struct radeon_ring *ring;
ring             6708 drivers/gpu/drm/radeon/si.c 	ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX];
ring             6709 drivers/gpu/drm/radeon/si.c 	r = radeon_ring_init(rdev, ring, ring->ring_size, RADEON_WB_CP_RPTR_OFFSET,
ring             6714 drivers/gpu/drm/radeon/si.c 	ring = &rdev->ring[CAYMAN_RING_TYPE_CP1_INDEX];
ring             6715 drivers/gpu/drm/radeon/si.c 	r = radeon_ring_init(rdev, ring, ring->ring_size, RADEON_WB_CP1_RPTR_OFFSET,
ring             6720 drivers/gpu/drm/radeon/si.c 	ring = &rdev->ring[CAYMAN_RING_TYPE_CP2_INDEX];
ring             6721 drivers/gpu/drm/radeon/si.c 	r = radeon_ring_init(rdev, ring, ring->ring_size, RADEON_WB_CP2_RPTR_OFFSET,
ring             6726 drivers/gpu/drm/radeon/si.c 	ring = &rdev->ring[R600_RING_TYPE_DMA_INDEX];
ring             6727 drivers/gpu/drm/radeon/si.c 	r = radeon_ring_init(rdev, ring, ring->ring_size, R600_WB_DMA_RPTR_OFFSET,
ring             6732 drivers/gpu/drm/radeon/si.c 	ring = &rdev->ring[CAYMAN_RING_TYPE_DMA1_INDEX];
ring             6733 drivers/gpu/drm/radeon/si.c 	r = radeon_ring_init(rdev, ring, ring->ring_size, CAYMAN_WB_DMA1_RPTR_OFFSET,
ring             6829 drivers/gpu/drm/radeon/si.c 	struct radeon_ring *ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX];
ring             6890 drivers/gpu/drm/radeon/si.c 	ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX];
ring             6891 drivers/gpu/drm/radeon/si.c 	ring->ring_obj = NULL;
ring             6892 drivers/gpu/drm/radeon/si.c 	r600_ring_init(rdev, ring, 1024 * 1024);
ring             6894 drivers/gpu/drm/radeon/si.c 	ring = &rdev->ring[CAYMAN_RING_TYPE_CP1_INDEX];
ring             6895 drivers/gpu/drm/radeon/si.c 	ring->ring_obj = NULL;
ring             6896 drivers/gpu/drm/radeon/si.c 	r600_ring_init(rdev, ring, 1024 * 1024);
ring             6898 drivers/gpu/drm/radeon/si.c 	ring = &rdev->ring[CAYMAN_RING_TYPE_CP2_INDEX];
ring             6899 drivers/gpu/drm/radeon/si.c 	ring->ring_obj = NULL;
ring             6900 drivers/gpu/drm/radeon/si.c 	r600_ring_init(rdev, ring, 1024 * 1024);
ring             6902 drivers/gpu/drm/radeon/si.c 	ring = &rdev->ring[R600_RING_TYPE_DMA_INDEX];
ring             6903 drivers/gpu/drm/radeon/si.c 	ring->ring_obj = NULL;
ring             6904 drivers/gpu/drm/radeon/si.c 	r600_ring_init(rdev, ring, 64 * 1024);
ring             6906 drivers/gpu/drm/radeon/si.c 	ring = &rdev->ring[CAYMAN_RING_TYPE_DMA1_INDEX];
ring             6907 drivers/gpu/drm/radeon/si.c 	ring->ring_obj = NULL;
ring             6908 drivers/gpu/drm/radeon/si.c 	r600_ring_init(rdev, ring, 64 * 1024);
ring               41 drivers/gpu/drm/radeon/si_dma.c bool si_dma_is_lockup(struct radeon_device *rdev, struct radeon_ring *ring)
ring               46 drivers/gpu/drm/radeon/si_dma.c 	if (ring->idx == R600_RING_TYPE_DMA_INDEX)
ring               52 drivers/gpu/drm/radeon/si_dma.c 		radeon_ring_lockup_update(rdev, ring);
ring               55 drivers/gpu/drm/radeon/si_dma.c 	return radeon_ring_test_lockup(rdev, ring);
ring              187 drivers/gpu/drm/radeon/si_dma.c void si_dma_vm_flush(struct radeon_device *rdev, struct radeon_ring *ring,
ring              191 drivers/gpu/drm/radeon/si_dma.c 	radeon_ring_write(ring, DMA_PACKET(DMA_PACKET_SRBM_WRITE, 0, 0, 0, 0));
ring              193 drivers/gpu/drm/radeon/si_dma.c 		radeon_ring_write(ring, (0xf << 16) | ((VM_CONTEXT0_PAGE_TABLE_BASE_ADDR + (vm_id << 2)) >> 2));
ring              195 drivers/gpu/drm/radeon/si_dma.c 		radeon_ring_write(ring, (0xf << 16) | ((VM_CONTEXT8_PAGE_TABLE_BASE_ADDR + ((vm_id - 8) << 2)) >> 2));
ring              197 drivers/gpu/drm/radeon/si_dma.c 	radeon_ring_write(ring, pd_addr >> 12);
ring              200 drivers/gpu/drm/radeon/si_dma.c 	radeon_ring_write(ring, DMA_PACKET(DMA_PACKET_SRBM_WRITE, 0, 0, 0, 0));
ring              201 drivers/gpu/drm/radeon/si_dma.c 	radeon_ring_write(ring, (0xf << 16) | (HDP_MEM_COHERENCY_FLUSH_CNTL >> 2));
ring              202 drivers/gpu/drm/radeon/si_dma.c 	radeon_ring_write(ring, 1);
ring              205 drivers/gpu/drm/radeon/si_dma.c 	radeon_ring_write(ring, DMA_PACKET(DMA_PACKET_SRBM_WRITE, 0, 0, 0, 0));
ring              206 drivers/gpu/drm/radeon/si_dma.c 	radeon_ring_write(ring, (0xf << 16) | (VM_INVALIDATE_REQUEST >> 2));
ring              207 drivers/gpu/drm/radeon/si_dma.c 	radeon_ring_write(ring, 1 << vm_id);
ring              210 drivers/gpu/drm/radeon/si_dma.c 	radeon_ring_write(ring, DMA_PACKET(DMA_PACKET_POLL_REG_MEM, 0, 0, 0, 0));
ring              211 drivers/gpu/drm/radeon/si_dma.c 	radeon_ring_write(ring, VM_INVALIDATE_REQUEST);
ring              212 drivers/gpu/drm/radeon/si_dma.c 	radeon_ring_write(ring, 0xff << 16); /* retry */
ring              213 drivers/gpu/drm/radeon/si_dma.c 	radeon_ring_write(ring, 1 << vm_id); /* mask */
ring              214 drivers/gpu/drm/radeon/si_dma.c 	radeon_ring_write(ring, 0); /* value */
ring              215 drivers/gpu/drm/radeon/si_dma.c 	radeon_ring_write(ring, (0 << 28) | 0x20); /* func(always) | poll interval */
ring              239 drivers/gpu/drm/radeon/si_dma.c 	struct radeon_ring *ring = &rdev->ring[ring_index];
ring              248 drivers/gpu/drm/radeon/si_dma.c 	r = radeon_ring_lock(rdev, ring, num_loops * 5 + 11);
ring              256 drivers/gpu/drm/radeon/si_dma.c 	radeon_sync_rings(rdev, &sync, ring->idx);
ring              263 drivers/gpu/drm/radeon/si_dma.c 		radeon_ring_write(ring, DMA_PACKET(DMA_PACKET_COPY, 1, 0, 0, cur_size_in_bytes));
ring              264 drivers/gpu/drm/radeon/si_dma.c 		radeon_ring_write(ring, lower_32_bits(dst_offset));
ring              265 drivers/gpu/drm/radeon/si_dma.c 		radeon_ring_write(ring, lower_32_bits(src_offset));
ring              266 drivers/gpu/drm/radeon/si_dma.c 		radeon_ring_write(ring, upper_32_bits(dst_offset) & 0xff);
ring              267 drivers/gpu/drm/radeon/si_dma.c 		radeon_ring_write(ring, upper_32_bits(src_offset) & 0xff);
ring              272 drivers/gpu/drm/radeon/si_dma.c 	r = radeon_fence_emit(rdev, &fence, ring->idx);
ring              274 drivers/gpu/drm/radeon/si_dma.c 		radeon_ring_unlock_undo(rdev, ring);
ring              279 drivers/gpu/drm/radeon/si_dma.c 	radeon_ring_unlock_commit(rdev, ring, false);
ring               40 drivers/gpu/drm/radeon/uvd_v1_0.c 			   struct radeon_ring *ring)
ring               54 drivers/gpu/drm/radeon/uvd_v1_0.c 			   struct radeon_ring *ring)
ring               68 drivers/gpu/drm/radeon/uvd_v1_0.c 		       struct radeon_ring *ring)
ring               70 drivers/gpu/drm/radeon/uvd_v1_0.c 	WREG32(UVD_RBC_RB_WPTR, ring->wptr);
ring               84 drivers/gpu/drm/radeon/uvd_v1_0.c 	struct radeon_ring *ring = &rdev->ring[fence->ring];
ring               85 drivers/gpu/drm/radeon/uvd_v1_0.c 	uint64_t addr = rdev->fence_drv[fence->ring].gpu_addr;
ring               87 drivers/gpu/drm/radeon/uvd_v1_0.c 	radeon_ring_write(ring, PACKET0(UVD_GPCOM_VCPU_DATA0, 0));
ring               88 drivers/gpu/drm/radeon/uvd_v1_0.c 	radeon_ring_write(ring, addr & 0xffffffff);
ring               89 drivers/gpu/drm/radeon/uvd_v1_0.c 	radeon_ring_write(ring, PACKET0(UVD_GPCOM_VCPU_DATA1, 0));
ring               90 drivers/gpu/drm/radeon/uvd_v1_0.c 	radeon_ring_write(ring, fence->seq);
ring               91 drivers/gpu/drm/radeon/uvd_v1_0.c 	radeon_ring_write(ring, PACKET0(UVD_GPCOM_VCPU_CMD, 0));
ring               92 drivers/gpu/drm/radeon/uvd_v1_0.c 	radeon_ring_write(ring, 0);
ring               94 drivers/gpu/drm/radeon/uvd_v1_0.c 	radeon_ring_write(ring, PACKET0(UVD_GPCOM_VCPU_DATA0, 0));
ring               95 drivers/gpu/drm/radeon/uvd_v1_0.c 	radeon_ring_write(ring, 0);
ring               96 drivers/gpu/drm/radeon/uvd_v1_0.c 	radeon_ring_write(ring, PACKET0(UVD_GPCOM_VCPU_DATA1, 0));
ring               97 drivers/gpu/drm/radeon/uvd_v1_0.c 	radeon_ring_write(ring, 0);
ring               98 drivers/gpu/drm/radeon/uvd_v1_0.c 	radeon_ring_write(ring, PACKET0(UVD_GPCOM_VCPU_CMD, 0));
ring               99 drivers/gpu/drm/radeon/uvd_v1_0.c 	radeon_ring_write(ring, 2);
ring              159 drivers/gpu/drm/radeon/uvd_v1_0.c 	struct radeon_ring *ring = &rdev->ring[R600_RING_TYPE_UVD_INDEX];
ring              173 drivers/gpu/drm/radeon/uvd_v1_0.c 	ring->ready = true;
ring              174 drivers/gpu/drm/radeon/uvd_v1_0.c 	r = radeon_ring_test(rdev, R600_RING_TYPE_UVD_INDEX, ring);
ring              176 drivers/gpu/drm/radeon/uvd_v1_0.c 		ring->ready = false;
ring              180 drivers/gpu/drm/radeon/uvd_v1_0.c 	r = radeon_ring_lock(rdev, ring, 10);
ring              187 drivers/gpu/drm/radeon/uvd_v1_0.c 	radeon_ring_write(ring, tmp);
ring              188 drivers/gpu/drm/radeon/uvd_v1_0.c 	radeon_ring_write(ring, 0xFFFFF);
ring              191 drivers/gpu/drm/radeon/uvd_v1_0.c 	radeon_ring_write(ring, tmp);
ring              192 drivers/gpu/drm/radeon/uvd_v1_0.c 	radeon_ring_write(ring, 0xFFFFF);
ring              195 drivers/gpu/drm/radeon/uvd_v1_0.c 	radeon_ring_write(ring, tmp);
ring              196 drivers/gpu/drm/radeon/uvd_v1_0.c 	radeon_ring_write(ring, 0xFFFFF);
ring              199 drivers/gpu/drm/radeon/uvd_v1_0.c 	radeon_ring_write(ring, PACKET0(UVD_SEMA_TIMEOUT_STATUS, 0));
ring              200 drivers/gpu/drm/radeon/uvd_v1_0.c 	radeon_ring_write(ring, 0x8);
ring              202 drivers/gpu/drm/radeon/uvd_v1_0.c 	radeon_ring_write(ring, PACKET0(UVD_SEMA_CNTL, 0));
ring              203 drivers/gpu/drm/radeon/uvd_v1_0.c 	radeon_ring_write(ring, 3);
ring              205 drivers/gpu/drm/radeon/uvd_v1_0.c 	radeon_ring_unlock_commit(rdev, ring, false);
ring              250 drivers/gpu/drm/radeon/uvd_v1_0.c 	struct radeon_ring *ring = &rdev->ring[R600_RING_TYPE_UVD_INDEX];
ring              253 drivers/gpu/drm/radeon/uvd_v1_0.c 	ring->ready = false;
ring              265 drivers/gpu/drm/radeon/uvd_v1_0.c 	struct radeon_ring *ring = &rdev->ring[R600_RING_TYPE_UVD_INDEX];
ring              364 drivers/gpu/drm/radeon/uvd_v1_0.c 	WREG32(UVD_LMI_EXT40_ADDR, upper_32_bits(ring->gpu_addr) |
ring              370 drivers/gpu/drm/radeon/uvd_v1_0.c 	ring->wptr = RREG32(UVD_RBC_RB_RPTR);
ring              371 drivers/gpu/drm/radeon/uvd_v1_0.c 	WREG32(UVD_RBC_RB_WPTR, ring->wptr);
ring              374 drivers/gpu/drm/radeon/uvd_v1_0.c 	WREG32(UVD_RBC_RB_BASE, ring->gpu_addr);
ring              377 drivers/gpu/drm/radeon/uvd_v1_0.c 	rb_bufsz = order_base_2(ring->ring_size);
ring              421 drivers/gpu/drm/radeon/uvd_v1_0.c int uvd_v1_0_ring_test(struct radeon_device *rdev, struct radeon_ring *ring)
ring              428 drivers/gpu/drm/radeon/uvd_v1_0.c 	r = radeon_ring_lock(rdev, ring, 3);
ring              431 drivers/gpu/drm/radeon/uvd_v1_0.c 			  ring->idx, r);
ring              434 drivers/gpu/drm/radeon/uvd_v1_0.c 	radeon_ring_write(ring, PACKET0(UVD_CONTEXT_ID, 0));
ring              435 drivers/gpu/drm/radeon/uvd_v1_0.c 	radeon_ring_write(ring, 0xDEADBEEF);
ring              436 drivers/gpu/drm/radeon/uvd_v1_0.c 	radeon_ring_unlock_commit(rdev, ring, false);
ring              446 drivers/gpu/drm/radeon/uvd_v1_0.c 			 ring->idx, i);
ring              449 drivers/gpu/drm/radeon/uvd_v1_0.c 			  ring->idx, tmp);
ring              466 drivers/gpu/drm/radeon/uvd_v1_0.c 			     struct radeon_ring *ring,
ring              484 drivers/gpu/drm/radeon/uvd_v1_0.c 	struct radeon_ring *ring = &rdev->ring[ib->ring];
ring              486 drivers/gpu/drm/radeon/uvd_v1_0.c 	radeon_ring_write(ring, PACKET0(UVD_RBC_IB_BASE, 0));
ring              487 drivers/gpu/drm/radeon/uvd_v1_0.c 	radeon_ring_write(ring, ib->gpu_addr);
ring              488 drivers/gpu/drm/radeon/uvd_v1_0.c 	radeon_ring_write(ring, PACKET0(UVD_RBC_IB_SIZE, 0));
ring              489 drivers/gpu/drm/radeon/uvd_v1_0.c 	radeon_ring_write(ring, ib->length_dw);
ring              500 drivers/gpu/drm/radeon/uvd_v1_0.c int uvd_v1_0_ib_test(struct radeon_device *rdev, struct radeon_ring *ring)
ring              514 drivers/gpu/drm/radeon/uvd_v1_0.c 	r = radeon_uvd_get_create_msg(rdev, ring->idx, 1, NULL);
ring              520 drivers/gpu/drm/radeon/uvd_v1_0.c 	r = radeon_uvd_get_destroy_msg(rdev, ring->idx, 1, &fence);
ring              537 drivers/gpu/drm/radeon/uvd_v1_0.c 	DRM_INFO("ib test on ring %d succeeded\n",  ring->idx);
ring               42 drivers/gpu/drm/radeon/uvd_v2_2.c 	struct radeon_ring *ring = &rdev->ring[fence->ring];
ring               43 drivers/gpu/drm/radeon/uvd_v2_2.c 	uint64_t addr = rdev->fence_drv[fence->ring].gpu_addr;
ring               45 drivers/gpu/drm/radeon/uvd_v2_2.c 	radeon_ring_write(ring, PACKET0(UVD_CONTEXT_ID, 0));
ring               46 drivers/gpu/drm/radeon/uvd_v2_2.c 	radeon_ring_write(ring, fence->seq);
ring               47 drivers/gpu/drm/radeon/uvd_v2_2.c 	radeon_ring_write(ring, PACKET0(UVD_GPCOM_VCPU_DATA0, 0));
ring               48 drivers/gpu/drm/radeon/uvd_v2_2.c 	radeon_ring_write(ring, lower_32_bits(addr));
ring               49 drivers/gpu/drm/radeon/uvd_v2_2.c 	radeon_ring_write(ring, PACKET0(UVD_GPCOM_VCPU_DATA1, 0));
ring               50 drivers/gpu/drm/radeon/uvd_v2_2.c 	radeon_ring_write(ring, upper_32_bits(addr) & 0xff);
ring               51 drivers/gpu/drm/radeon/uvd_v2_2.c 	radeon_ring_write(ring, PACKET0(UVD_GPCOM_VCPU_CMD, 0));
ring               52 drivers/gpu/drm/radeon/uvd_v2_2.c 	radeon_ring_write(ring, 0);
ring               54 drivers/gpu/drm/radeon/uvd_v2_2.c 	radeon_ring_write(ring, PACKET0(UVD_GPCOM_VCPU_DATA0, 0));
ring               55 drivers/gpu/drm/radeon/uvd_v2_2.c 	radeon_ring_write(ring, 0);
ring               56 drivers/gpu/drm/radeon/uvd_v2_2.c 	radeon_ring_write(ring, PACKET0(UVD_GPCOM_VCPU_DATA1, 0));
ring               57 drivers/gpu/drm/radeon/uvd_v2_2.c 	radeon_ring_write(ring, 0);
ring               58 drivers/gpu/drm/radeon/uvd_v2_2.c 	radeon_ring_write(ring, PACKET0(UVD_GPCOM_VCPU_CMD, 0));
ring               59 drivers/gpu/drm/radeon/uvd_v2_2.c 	radeon_ring_write(ring, 2);
ring               73 drivers/gpu/drm/radeon/uvd_v2_2.c 			     struct radeon_ring *ring,
ring               79 drivers/gpu/drm/radeon/uvd_v2_2.c 	radeon_ring_write(ring, PACKET0(UVD_SEMA_ADDR_LOW, 0));
ring               80 drivers/gpu/drm/radeon/uvd_v2_2.c 	radeon_ring_write(ring, (addr >> 3) & 0x000FFFFF);
ring               82 drivers/gpu/drm/radeon/uvd_v2_2.c 	radeon_ring_write(ring, PACKET0(UVD_SEMA_ADDR_HIGH, 0));
ring               83 drivers/gpu/drm/radeon/uvd_v2_2.c 	radeon_ring_write(ring, (addr >> 23) & 0x000FFFFF);
ring               85 drivers/gpu/drm/radeon/uvd_v2_2.c 	radeon_ring_write(ring, PACKET0(UVD_SEMA_CMD, 0));
ring               86 drivers/gpu/drm/radeon/uvd_v2_2.c 	radeon_ring_write(ring, emit_wait ? 1 : 0);
ring               40 drivers/gpu/drm/radeon/uvd_v3_1.c 			     struct radeon_ring *ring,
ring               46 drivers/gpu/drm/radeon/uvd_v3_1.c 	radeon_ring_write(ring, PACKET0(UVD_SEMA_ADDR_LOW, 0));
ring               47 drivers/gpu/drm/radeon/uvd_v3_1.c 	radeon_ring_write(ring, (addr >> 3) & 0x000FFFFF);
ring               49 drivers/gpu/drm/radeon/uvd_v3_1.c 	radeon_ring_write(ring, PACKET0(UVD_SEMA_ADDR_HIGH, 0));
ring               50 drivers/gpu/drm/radeon/uvd_v3_1.c 	radeon_ring_write(ring, (addr >> 23) & 0x000FFFFF);
ring               52 drivers/gpu/drm/radeon/uvd_v3_1.c 	radeon_ring_write(ring, PACKET0(UVD_SEMA_CMD, 0));
ring               53 drivers/gpu/drm/radeon/uvd_v3_1.c 	radeon_ring_write(ring, 0x80 | (emit_wait ? 1 : 0));
ring               60 drivers/gpu/drm/radeon/vce_v1_0.c 			   struct radeon_ring *ring)
ring               62 drivers/gpu/drm/radeon/vce_v1_0.c 	if (ring->idx == TN_RING_TYPE_VCE1_INDEX)
ring               77 drivers/gpu/drm/radeon/vce_v1_0.c 			   struct radeon_ring *ring)
ring               79 drivers/gpu/drm/radeon/vce_v1_0.c 	if (ring->idx == TN_RING_TYPE_VCE1_INDEX)
ring               94 drivers/gpu/drm/radeon/vce_v1_0.c 		       struct radeon_ring *ring)
ring               96 drivers/gpu/drm/radeon/vce_v1_0.c 	if (ring->idx == TN_RING_TYPE_VCE1_INDEX)
ring               97 drivers/gpu/drm/radeon/vce_v1_0.c 		WREG32(VCE_RB_WPTR, ring->wptr);
ring               99 drivers/gpu/drm/radeon/vce_v1_0.c 		WREG32(VCE_RB_WPTR2, ring->wptr);
ring              291 drivers/gpu/drm/radeon/vce_v1_0.c 	struct radeon_ring *ring;
ring              297 drivers/gpu/drm/radeon/vce_v1_0.c 	ring = &rdev->ring[TN_RING_TYPE_VCE1_INDEX];
ring              298 drivers/gpu/drm/radeon/vce_v1_0.c 	WREG32(VCE_RB_RPTR, ring->wptr);
ring              299 drivers/gpu/drm/radeon/vce_v1_0.c 	WREG32(VCE_RB_WPTR, ring->wptr);
ring              300 drivers/gpu/drm/radeon/vce_v1_0.c 	WREG32(VCE_RB_BASE_LO, ring->gpu_addr);
ring              301 drivers/gpu/drm/radeon/vce_v1_0.c 	WREG32(VCE_RB_BASE_HI, upper_32_bits(ring->gpu_addr));
ring              302 drivers/gpu/drm/radeon/vce_v1_0.c 	WREG32(VCE_RB_SIZE, ring->ring_size / 4);
ring              304 drivers/gpu/drm/radeon/vce_v1_0.c 	ring = &rdev->ring[TN_RING_TYPE_VCE2_INDEX];
ring              305 drivers/gpu/drm/radeon/vce_v1_0.c 	WREG32(VCE_RB_RPTR2, ring->wptr);
ring              306 drivers/gpu/drm/radeon/vce_v1_0.c 	WREG32(VCE_RB_WPTR2, ring->wptr);
ring              307 drivers/gpu/drm/radeon/vce_v1_0.c 	WREG32(VCE_RB_BASE_LO2, ring->gpu_addr);
ring              308 drivers/gpu/drm/radeon/vce_v1_0.c 	WREG32(VCE_RB_BASE_HI2, upper_32_bits(ring->gpu_addr));
ring              309 drivers/gpu/drm/radeon/vce_v1_0.c 	WREG32(VCE_RB_SIZE2, ring->ring_size / 4);
ring              358 drivers/gpu/drm/radeon/vce_v1_0.c 	struct radeon_ring *ring;
ring              365 drivers/gpu/drm/radeon/vce_v1_0.c 	ring = &rdev->ring[TN_RING_TYPE_VCE1_INDEX];
ring              366 drivers/gpu/drm/radeon/vce_v1_0.c 	ring->ready = true;
ring              367 drivers/gpu/drm/radeon/vce_v1_0.c 	r = radeon_ring_test(rdev, TN_RING_TYPE_VCE1_INDEX, ring);
ring              369 drivers/gpu/drm/radeon/vce_v1_0.c 		ring->ready = false;
ring              373 drivers/gpu/drm/radeon/vce_v1_0.c 	ring = &rdev->ring[TN_RING_TYPE_VCE2_INDEX];
ring              374 drivers/gpu/drm/radeon/vce_v1_0.c 	ring->ready = true;
ring              375 drivers/gpu/drm/radeon/vce_v1_0.c 	r = radeon_ring_test(rdev, TN_RING_TYPE_VCE2_INDEX, ring);
ring              377 drivers/gpu/drm/radeon/vce_v1_0.c 		ring->ready = false;
ring              168 drivers/gpu/drm/via/via_dma.c 		if (dev_priv->ring.virtual_start) {
ring              171 drivers/gpu/drm/via/via_dma.c 			drm_legacy_ioremapfree(&dev_priv->ring.map, dev);
ring              172 drivers/gpu/drm/via/via_dma.c 			dev_priv->ring.virtual_start = NULL;
ring              189 drivers/gpu/drm/via/via_dma.c 	if (dev_priv->ring.virtual_start != NULL) {
ring              204 drivers/gpu/drm/via/via_dma.c 	dev_priv->ring.map.offset = dev->agp->base + init->offset;
ring              205 drivers/gpu/drm/via/via_dma.c 	dev_priv->ring.map.size = init->size;
ring              206 drivers/gpu/drm/via/via_dma.c 	dev_priv->ring.map.type = 0;
ring              207 drivers/gpu/drm/via/via_dma.c 	dev_priv->ring.map.flags = 0;
ring              208 drivers/gpu/drm/via/via_dma.c 	dev_priv->ring.map.mtrr = 0;
ring              210 drivers/gpu/drm/via/via_dma.c 	drm_legacy_ioremap(&dev_priv->ring.map, dev);
ring              212 drivers/gpu/drm/via/via_dma.c 	if (dev_priv->ring.map.handle == NULL) {
ring              219 drivers/gpu/drm/via/via_dma.c 	dev_priv->ring.virtual_start = dev_priv->ring.map.handle;
ring              221 drivers/gpu/drm/via/via_dma.c 	dev_priv->dma_ptr = dev_priv->ring.virtual_start;
ring              256 drivers/gpu/drm/via/via_dma.c 		retcode = (dev_priv->ring.virtual_start != NULL) ?
ring              275 drivers/gpu/drm/via/via_dma.c 	if (dev_priv->ring.virtual_start == NULL) {
ring              692 drivers/gpu/drm/via/via_dma.c 	if (dev_priv->ring.virtual_start == NULL) {
ring               84 drivers/gpu/drm/via/via_drv.h 	drm_via_ring_buffer_t ring;
ring               94 drivers/gpu/drm/xen/xen_drm_front.c 	req = RING_GET_REQUEST(&evtchnl->u.req.ring,
ring               95 drivers/gpu/drm/xen/xen_drm_front.c 			       evtchnl->u.req.ring.req_prod_pvt);
ring               37 drivers/gpu/drm/xen/xen_drm_front_evtchnl.c 	rp = evtchnl->u.req.ring.sring->rsp_prod;
ring               41 drivers/gpu/drm/xen/xen_drm_front_evtchnl.c 	for (i = evtchnl->u.req.ring.rsp_cons; i != rp; i++) {
ring               42 drivers/gpu/drm/xen/xen_drm_front_evtchnl.c 		resp = RING_GET_RESPONSE(&evtchnl->u.req.ring, i);
ring               64 drivers/gpu/drm/xen/xen_drm_front_evtchnl.c 	evtchnl->u.req.ring.rsp_cons = i;
ring               66 drivers/gpu/drm/xen/xen_drm_front_evtchnl.c 	if (i != evtchnl->u.req.ring.req_prod_pvt) {
ring               69 drivers/gpu/drm/xen/xen_drm_front_evtchnl.c 		RING_FINAL_CHECK_FOR_RESPONSES(&evtchnl->u.req.ring,
ring               74 drivers/gpu/drm/xen/xen_drm_front_evtchnl.c 		evtchnl->u.req.ring.sring->rsp_event = i + 1;
ring              129 drivers/gpu/drm/xen/xen_drm_front_evtchnl.c 		page = (unsigned long)evtchnl->u.req.ring.sring;
ring              186 drivers/gpu/drm/xen/xen_drm_front_evtchnl.c 		FRONT_RING_INIT(&evtchnl->u.req.ring, sring, XEN_PAGE_SIZE);
ring              190 drivers/gpu/drm/xen/xen_drm_front_evtchnl.c 			evtchnl->u.req.ring.sring = NULL;
ring              350 drivers/gpu/drm/xen/xen_drm_front_evtchnl.c 	evtchnl->u.req.ring.req_prod_pvt++;
ring              351 drivers/gpu/drm/xen/xen_drm_front_evtchnl.c 	RING_PUSH_REQUESTS_AND_CHECK_NOTIFY(&evtchnl->u.req.ring, notify);
ring               52 drivers/gpu/drm/xen/xen_drm_front_evtchnl.h 			struct xen_displif_front_ring ring;
ring             1434 drivers/hid/wacom_wac.c 	int ring = data[285] & 0x7F;
ring             1439 drivers/hid/wacom_wac.c 	ring = 71 - ring;
ring             1440 drivers/hid/wacom_wac.c 	ring += 3*72/16;
ring             1441 drivers/hid/wacom_wac.c 	if (ring > 71)
ring             1442 drivers/hid/wacom_wac.c 		ring -= 72;
ring             1447 drivers/hid/wacom_wac.c 	input_report_abs(pad_input, ABS_WHEEL, ringstatus ? ring : 0);
ring              824 drivers/infiniband/hw/hns/hns_roce_hw_v2.c static int hns_roce_cmq_space(struct hns_roce_v2_cmq_ring *ring)
ring              826 drivers/infiniband/hw/hns/hns_roce_hw_v2.c 	int ntu = ring->next_to_use;
ring              827 drivers/infiniband/hw/hns/hns_roce_hw_v2.c 	int ntc = ring->next_to_clean;
ring              828 drivers/infiniband/hw/hns/hns_roce_hw_v2.c 	int used = (ntu - ntc + ring->desc_num) % ring->desc_num;
ring              830 drivers/infiniband/hw/hns/hns_roce_hw_v2.c 	return ring->desc_num - used - 1;
ring              834 drivers/infiniband/hw/hns/hns_roce_hw_v2.c 				   struct hns_roce_v2_cmq_ring *ring)
ring              836 drivers/infiniband/hw/hns/hns_roce_hw_v2.c 	int size = ring->desc_num * sizeof(struct hns_roce_cmq_desc);
ring              838 drivers/infiniband/hw/hns/hns_roce_hw_v2.c 	ring->desc = kzalloc(size, GFP_KERNEL);
ring              839 drivers/infiniband/hw/hns/hns_roce_hw_v2.c 	if (!ring->desc)
ring              842 drivers/infiniband/hw/hns/hns_roce_hw_v2.c 	ring->desc_dma_addr = dma_map_single(hr_dev->dev, ring->desc, size,
ring              844 drivers/infiniband/hw/hns/hns_roce_hw_v2.c 	if (dma_mapping_error(hr_dev->dev, ring->desc_dma_addr)) {
ring              845 drivers/infiniband/hw/hns/hns_roce_hw_v2.c 		ring->desc_dma_addr = 0;
ring              846 drivers/infiniband/hw/hns/hns_roce_hw_v2.c 		kfree(ring->desc);
ring              847 drivers/infiniband/hw/hns/hns_roce_hw_v2.c 		ring->desc = NULL;
ring              855 drivers/infiniband/hw/hns/hns_roce_hw_v2.c 				   struct hns_roce_v2_cmq_ring *ring)
ring              857 drivers/infiniband/hw/hns/hns_roce_hw_v2.c 	dma_unmap_single(hr_dev->dev, ring->desc_dma_addr,
ring              858 drivers/infiniband/hw/hns/hns_roce_hw_v2.c 			 ring->desc_num * sizeof(struct hns_roce_cmq_desc),
ring              861 drivers/infiniband/hw/hns/hns_roce_hw_v2.c 	ring->desc_dma_addr = 0;
ring              862 drivers/infiniband/hw/hns/hns_roce_hw_v2.c 	kfree(ring->desc);
ring              868 drivers/infiniband/hw/hns/hns_roce_hw_v2.c 	struct hns_roce_v2_cmq_ring *ring = (ring_type == TYPE_CSQ) ?
ring              871 drivers/infiniband/hw/hns/hns_roce_hw_v2.c 	ring->flag = ring_type;
ring              872 drivers/infiniband/hw/hns/hns_roce_hw_v2.c 	ring->next_to_clean = 0;
ring              873 drivers/infiniband/hw/hns/hns_roce_hw_v2.c 	ring->next_to_use = 0;
ring              875 drivers/infiniband/hw/hns/hns_roce_hw_v2.c 	return hns_roce_alloc_cmq_desc(hr_dev, ring);
ring              881 drivers/infiniband/hw/hns/hns_roce_hw_v2.c 	struct hns_roce_v2_cmq_ring *ring = (ring_type == TYPE_CSQ) ?
ring              883 drivers/infiniband/hw/hns/hns_roce_hw_v2.c 	dma_addr_t dma = ring->desc_dma_addr;
ring              890 drivers/infiniband/hw/hns/hns_roce_hw_v2.c 			   ring->desc_num >> HNS_ROCE_CMQ_DESC_NUM_S);
ring              898 drivers/infiniband/hw/hns/hns_roce_hw_v2.c 			   ring->desc_num >> HNS_ROCE_CMQ_DESC_NUM_S);
ring             1322 drivers/infiniband/hw/mlx4/mad.c 	sg_list.addr = tun_qp->ring[index].map;
ring             1331 drivers/infiniband/hw/mlx4/mad.c 	ib_dma_sync_single_for_device(ctx->ib_dev, tun_qp->ring[index].map,
ring             1486 drivers/infiniband/hw/mlx4/mad.c 	struct mlx4_tunnel_mad *tunnel = tun_qp->ring[wr_ix].addr;
ring             1512 drivers/infiniband/hw/mlx4/mad.c 	ib_dma_sync_single_for_cpu(ctx->ib_dev, tun_qp->ring[wr_ix].map,
ring             1612 drivers/infiniband/hw/mlx4/mad.c 	tun_qp->ring = kcalloc(MLX4_NUM_TUNNEL_BUFS,
ring             1615 drivers/infiniband/hw/mlx4/mad.c 	if (!tun_qp->ring)
ring             1622 drivers/infiniband/hw/mlx4/mad.c 		kfree(tun_qp->ring);
ring             1623 drivers/infiniband/hw/mlx4/mad.c 		tun_qp->ring = NULL;
ring             1636 drivers/infiniband/hw/mlx4/mad.c 		tun_qp->ring[i].addr = kmalloc(rx_buf_size, GFP_KERNEL);
ring             1637 drivers/infiniband/hw/mlx4/mad.c 		if (!tun_qp->ring[i].addr)
ring             1639 drivers/infiniband/hw/mlx4/mad.c 		tun_qp->ring[i].map = ib_dma_map_single(ctx->ib_dev,
ring             1640 drivers/infiniband/hw/mlx4/mad.c 							tun_qp->ring[i].addr,
ring             1643 drivers/infiniband/hw/mlx4/mad.c 		if (ib_dma_mapping_error(ctx->ib_dev, tun_qp->ring[i].map)) {
ring             1644 drivers/infiniband/hw/mlx4/mad.c 			kfree(tun_qp->ring[i].addr);
ring             1684 drivers/infiniband/hw/mlx4/mad.c 		ib_dma_unmap_single(ctx->ib_dev, tun_qp->ring[i].map,
ring             1686 drivers/infiniband/hw/mlx4/mad.c 		kfree(tun_qp->ring[i].addr);
ring             1690 drivers/infiniband/hw/mlx4/mad.c 	kfree(tun_qp->ring);
ring             1691 drivers/infiniband/hw/mlx4/mad.c 	tun_qp->ring = NULL;
ring             1716 drivers/infiniband/hw/mlx4/mad.c 		ib_dma_unmap_single(ctx->ib_dev, tun_qp->ring[i].map,
ring             1718 drivers/infiniband/hw/mlx4/mad.c 		kfree(tun_qp->ring[i].addr);
ring             1729 drivers/infiniband/hw/mlx4/mad.c 	kfree(tun_qp->ring);
ring             1922 drivers/infiniband/hw/mlx4/mad.c 						(sqp->ring[wc.wr_id &
ring             1925 drivers/infiniband/hw/mlx4/mad.c 						(sqp->ring[wc.wr_id &
ring              438 drivers/infiniband/hw/mlx4/mlx4_ib.h 	struct mlx4_ib_buf *ring;
ring              152 drivers/infiniband/hw/vmw_pvrdma/pvrdma.h 	struct pvrdma_ring *ring;
ring              173 drivers/infiniband/hw/vmw_pvrdma/pvrdma.h 	struct pvrdma_ring_state *ring;
ring              425 drivers/infiniband/hw/vmw_pvrdma/pvrdma_main.c 	struct pvrdma_ring *ring = &dev->async_ring_state->rx;
ring              439 drivers/infiniband/hw/vmw_pvrdma/pvrdma_main.c 	while (pvrdma_idx_ring_has_data(ring, ring_slots, &head) > 0) {
ring              483 drivers/infiniband/hw/vmw_pvrdma/pvrdma_main.c 		pvrdma_idx_ring_inc(&ring->cons_head, ring_slots);
ring              501 drivers/infiniband/hw/vmw_pvrdma/pvrdma_main.c 	struct pvrdma_ring *ring = &dev->cq_ring_state->rx;
ring              509 drivers/infiniband/hw/vmw_pvrdma/pvrdma_main.c 	while (pvrdma_idx_ring_has_data(ring, ring_slots, &head) > 0) {
ring              526 drivers/infiniband/hw/vmw_pvrdma/pvrdma_main.c 		pvrdma_idx_ring_inc(&ring->cons_head, ring_slots);
ring              117 drivers/infiniband/hw/vmw_pvrdma/pvrdma_qp.c 	if (qp->rq.ring) {
ring              118 drivers/infiniband/hw/vmw_pvrdma/pvrdma_qp.c 		atomic_set(&qp->rq.ring->cons_head, 0);
ring              119 drivers/infiniband/hw/vmw_pvrdma/pvrdma_qp.c 		atomic_set(&qp->rq.ring->prod_tail, 0);
ring              121 drivers/infiniband/hw/vmw_pvrdma/pvrdma_qp.c 	if (qp->sq.ring) {
ring              122 drivers/infiniband/hw/vmw_pvrdma/pvrdma_qp.c 		atomic_set(&qp->sq.ring->cons_head, 0);
ring              123 drivers/infiniband/hw/vmw_pvrdma/pvrdma_qp.c 		atomic_set(&qp->sq.ring->prod_tail, 0);
ring              335 drivers/infiniband/hw/vmw_pvrdma/pvrdma_qp.c 			qp->sq.ring = qp->pdir.pages[0];
ring              336 drivers/infiniband/hw/vmw_pvrdma/pvrdma_qp.c 			qp->rq.ring = is_srq ? NULL : &qp->sq.ring[1];
ring              643 drivers/infiniband/hw/vmw_pvrdma/pvrdma_qp.c 				qp->sq.ring, qp->sq.wqe_cnt, &tail))) {
ring              803 drivers/infiniband/hw/vmw_pvrdma/pvrdma_qp.c 		pvrdma_idx_ring_inc(&qp->sq.ring->prod_tail,
ring              869 drivers/infiniband/hw/vmw_pvrdma/pvrdma_qp.c 				qp->rq.ring, qp->rq.wqe_cnt, &tail))) {
ring              894 drivers/infiniband/hw/vmw_pvrdma/pvrdma_qp.c 		pvrdma_idx_ring_inc(&qp->rq.ring->prod_tail,
ring              720 drivers/infiniband/ulp/srpt/ib_srpt.c 	struct srpt_ioctx **ring;
ring              726 drivers/infiniband/ulp/srpt/ib_srpt.c 	ring = kvmalloc_array(ring_size, sizeof(ring[0]), GFP_KERNEL);
ring              727 drivers/infiniband/ulp/srpt/ib_srpt.c 	if (!ring)
ring              730 drivers/infiniband/ulp/srpt/ib_srpt.c 		ring[i] = srpt_alloc_ioctx(sdev, ioctx_size, buf_cache, dir);
ring              731 drivers/infiniband/ulp/srpt/ib_srpt.c 		if (!ring[i])
ring              733 drivers/infiniband/ulp/srpt/ib_srpt.c 		ring[i]->index = i;
ring              734 drivers/infiniband/ulp/srpt/ib_srpt.c 		ring[i]->offset = alignment_offset;
ring              740 drivers/infiniband/ulp/srpt/ib_srpt.c 		srpt_free_ioctx(sdev, ring[i], buf_cache, dir);
ring              741 drivers/infiniband/ulp/srpt/ib_srpt.c 	kvfree(ring);
ring              742 drivers/infiniband/ulp/srpt/ib_srpt.c 	ring = NULL;
ring              744 drivers/infiniband/ulp/srpt/ib_srpt.c 	return ring;
ring              934 drivers/mailbox/bcm-flexrm-mailbox.c 	struct flexrm_ring *ring;
ring              941 drivers/mailbox/bcm-flexrm-mailbox.c 		ring = &mbox->rings[i];
ring              942 drivers/mailbox/bcm-flexrm-mailbox.c 		if (readl(ring->regs + RING_CONTROL) &
ring              949 drivers/mailbox/bcm-flexrm-mailbox.c 			   ring->num, state,
ring              950 drivers/mailbox/bcm-flexrm-mailbox.c 			   (unsigned long long)ring->bd_dma_base,
ring              952 drivers/mailbox/bcm-flexrm-mailbox.c 			   (unsigned long long)ring->cmpl_dma_base,
ring              962 drivers/mailbox/bcm-flexrm-mailbox.c 	struct flexrm_ring *ring;
ring              969 drivers/mailbox/bcm-flexrm-mailbox.c 		ring = &mbox->rings[i];
ring              970 drivers/mailbox/bcm-flexrm-mailbox.c 		bd_read_offset = readl_relaxed(ring->regs + RING_BD_READ_PTR);
ring              971 drivers/mailbox/bcm-flexrm-mailbox.c 		val = readl_relaxed(ring->regs + RING_BD_START_ADDR);
ring              974 drivers/mailbox/bcm-flexrm-mailbox.c 					ring->bd_dma_base);
ring              976 drivers/mailbox/bcm-flexrm-mailbox.c 			   ring->num,
ring              978 drivers/mailbox/bcm-flexrm-mailbox.c 			   (u32)ring->bd_write_offset,
ring              979 drivers/mailbox/bcm-flexrm-mailbox.c 			   (u32)ring->cmpl_read_offset,
ring              980 drivers/mailbox/bcm-flexrm-mailbox.c 			   (u32)atomic_read(&ring->msg_send_count),
ring              981 drivers/mailbox/bcm-flexrm-mailbox.c 			   (u32)atomic_read(&ring->msg_cmpl_count));
ring              985 drivers/mailbox/bcm-flexrm-mailbox.c static int flexrm_new_request(struct flexrm_ring *ring,
ring             1002 drivers/mailbox/bcm-flexrm-mailbox.c 	spin_lock_irqsave(&ring->lock, flags);
ring             1003 drivers/mailbox/bcm-flexrm-mailbox.c 	reqid = bitmap_find_free_region(ring->requests_bmap,
ring             1005 drivers/mailbox/bcm-flexrm-mailbox.c 	spin_unlock_irqrestore(&ring->lock, flags);
ring             1008 drivers/mailbox/bcm-flexrm-mailbox.c 	ring->requests[reqid] = msg;
ring             1011 drivers/mailbox/bcm-flexrm-mailbox.c 	ret = flexrm_dma_map(ring->mbox->dev, msg);
ring             1013 drivers/mailbox/bcm-flexrm-mailbox.c 		ring->requests[reqid] = NULL;
ring             1014 drivers/mailbox/bcm-flexrm-mailbox.c 		spin_lock_irqsave(&ring->lock, flags);
ring             1015 drivers/mailbox/bcm-flexrm-mailbox.c 		bitmap_release_region(ring->requests_bmap, reqid, 0);
ring             1016 drivers/mailbox/bcm-flexrm-mailbox.c 		spin_unlock_irqrestore(&ring->lock, flags);
ring             1021 drivers/mailbox/bcm-flexrm-mailbox.c 	read_offset = readl_relaxed(ring->regs + RING_BD_READ_PTR);
ring             1022 drivers/mailbox/bcm-flexrm-mailbox.c 	val = readl_relaxed(ring->regs + RING_BD_START_ADDR);
ring             1024 drivers/mailbox/bcm-flexrm-mailbox.c 	read_offset += (u32)(BD_START_ADDR_DECODE(val) - ring->bd_dma_base);
ring             1035 drivers/mailbox/bcm-flexrm-mailbox.c 	write_offset = ring->bd_write_offset;
ring             1037 drivers/mailbox/bcm-flexrm-mailbox.c 		if (!flexrm_is_next_table_desc(ring->bd_base + write_offset))
ring             1053 drivers/mailbox/bcm-flexrm-mailbox.c 			ring->bd_base + ring->bd_write_offset,
ring             1054 drivers/mailbox/bcm-flexrm-mailbox.c 			RING_BD_TOGGLE_VALID(ring->bd_write_offset),
ring             1055 drivers/mailbox/bcm-flexrm-mailbox.c 			ring->bd_base, ring->bd_base + RING_BD_SIZE);
ring             1063 drivers/mailbox/bcm-flexrm-mailbox.c 	ring->bd_write_offset = (unsigned long)(next - ring->bd_base);
ring             1066 drivers/mailbox/bcm-flexrm-mailbox.c 	atomic_inc_return(&ring->msg_send_count);
ring             1074 drivers/mailbox/bcm-flexrm-mailbox.c 		flexrm_dma_unmap(ring->mbox->dev, msg);
ring             1075 drivers/mailbox/bcm-flexrm-mailbox.c 		ring->requests[reqid] = NULL;
ring             1076 drivers/mailbox/bcm-flexrm-mailbox.c 		spin_lock_irqsave(&ring->lock, flags);
ring             1077 drivers/mailbox/bcm-flexrm-mailbox.c 		bitmap_release_region(ring->requests_bmap, reqid, 0);
ring             1078 drivers/mailbox/bcm-flexrm-mailbox.c 		spin_unlock_irqrestore(&ring->lock, flags);
ring             1084 drivers/mailbox/bcm-flexrm-mailbox.c static int flexrm_process_completions(struct flexrm_ring *ring)
ring             1091 drivers/mailbox/bcm-flexrm-mailbox.c 	struct mbox_chan *chan = &ring->mbox->controller.chans[ring->num];
ring             1093 drivers/mailbox/bcm-flexrm-mailbox.c 	spin_lock_irqsave(&ring->lock, flags);
ring             1103 drivers/mailbox/bcm-flexrm-mailbox.c 	cmpl_write_offset = readl_relaxed(ring->regs + RING_CMPL_WRITE_PTR);
ring             1105 drivers/mailbox/bcm-flexrm-mailbox.c 	cmpl_read_offset = ring->cmpl_read_offset;
ring             1106 drivers/mailbox/bcm-flexrm-mailbox.c 	ring->cmpl_read_offset = cmpl_write_offset;
ring             1108 drivers/mailbox/bcm-flexrm-mailbox.c 	spin_unlock_irqrestore(&ring->lock, flags);
ring             1114 drivers/mailbox/bcm-flexrm-mailbox.c 		desc = *((u64 *)(ring->cmpl_base + cmpl_read_offset));
ring             1124 drivers/mailbox/bcm-flexrm-mailbox.c 			dev_warn(ring->mbox->dev,
ring             1126 drivers/mailbox/bcm-flexrm-mailbox.c 			ring->num, (unsigned long)desc, err);
ring             1133 drivers/mailbox/bcm-flexrm-mailbox.c 		msg = ring->requests[reqid];
ring             1135 drivers/mailbox/bcm-flexrm-mailbox.c 			dev_warn(ring->mbox->dev,
ring             1137 drivers/mailbox/bcm-flexrm-mailbox.c 			ring->num, (unsigned long)desc);
ring             1142 drivers/mailbox/bcm-flexrm-mailbox.c 		ring->requests[reqid] = NULL;
ring             1143 drivers/mailbox/bcm-flexrm-mailbox.c 		spin_lock_irqsave(&ring->lock, flags);
ring             1144 drivers/mailbox/bcm-flexrm-mailbox.c 		bitmap_release_region(ring->requests_bmap, reqid, 0);
ring             1145 drivers/mailbox/bcm-flexrm-mailbox.c 		spin_unlock_irqrestore(&ring->lock, flags);
ring             1148 drivers/mailbox/bcm-flexrm-mailbox.c 		flexrm_dma_unmap(ring->mbox->dev, msg);
ring             1155 drivers/mailbox/bcm-flexrm-mailbox.c 		atomic_inc_return(&ring->msg_cmpl_count);
ring             1206 drivers/mailbox/bcm-flexrm-mailbox.c 	struct flexrm_ring *ring = chan->con_priv;
ring             1212 drivers/mailbox/bcm-flexrm-mailbox.c 			rc = flexrm_new_request(ring, msg,
ring             1223 drivers/mailbox/bcm-flexrm-mailbox.c 	return flexrm_new_request(ring, NULL, data);
ring             1239 drivers/mailbox/bcm-flexrm-mailbox.c 	struct flexrm_ring *ring = chan->con_priv;
ring             1242 drivers/mailbox/bcm-flexrm-mailbox.c 	ring->bd_base = dma_pool_alloc(ring->mbox->bd_pool,
ring             1243 drivers/mailbox/bcm-flexrm-mailbox.c 				       GFP_KERNEL, &ring->bd_dma_base);
ring             1244 drivers/mailbox/bcm-flexrm-mailbox.c 	if (!ring->bd_base) {
ring             1245 drivers/mailbox/bcm-flexrm-mailbox.c 		dev_err(ring->mbox->dev,
ring             1247 drivers/mailbox/bcm-flexrm-mailbox.c 			ring->num);
ring             1257 drivers/mailbox/bcm-flexrm-mailbox.c 		next_addr += ring->bd_dma_base;
ring             1263 drivers/mailbox/bcm-flexrm-mailbox.c 		flexrm_write_desc(ring->bd_base + off, d);
ring             1267 drivers/mailbox/bcm-flexrm-mailbox.c 	ring->cmpl_base = dma_pool_zalloc(ring->mbox->cmpl_pool,
ring             1268 drivers/mailbox/bcm-flexrm-mailbox.c 					 GFP_KERNEL, &ring->cmpl_dma_base);
ring             1269 drivers/mailbox/bcm-flexrm-mailbox.c 	if (!ring->cmpl_base) {
ring             1270 drivers/mailbox/bcm-flexrm-mailbox.c 		dev_err(ring->mbox->dev,
ring             1272 drivers/mailbox/bcm-flexrm-mailbox.c 			ring->num);
ring             1278 drivers/mailbox/bcm-flexrm-mailbox.c 	if (ring->irq == UINT_MAX) {
ring             1279 drivers/mailbox/bcm-flexrm-mailbox.c 		dev_err(ring->mbox->dev,
ring             1280 drivers/mailbox/bcm-flexrm-mailbox.c 			"ring%d IRQ not available\n", ring->num);
ring             1284 drivers/mailbox/bcm-flexrm-mailbox.c 	ret = request_threaded_irq(ring->irq,
ring             1287 drivers/mailbox/bcm-flexrm-mailbox.c 				   0, dev_name(ring->mbox->dev), ring);
ring             1289 drivers/mailbox/bcm-flexrm-mailbox.c 		dev_err(ring->mbox->dev,
ring             1290 drivers/mailbox/bcm-flexrm-mailbox.c 			"failed to request ring%d IRQ\n", ring->num);
ring             1293 drivers/mailbox/bcm-flexrm-mailbox.c 	ring->irq_requested = true;
ring             1296 drivers/mailbox/bcm-flexrm-mailbox.c 	ring->irq_aff_hint = CPU_MASK_NONE;
ring             1297 drivers/mailbox/bcm-flexrm-mailbox.c 	val = ring->mbox->num_rings;
ring             1299 drivers/mailbox/bcm-flexrm-mailbox.c 	cpumask_set_cpu((ring->num / val) % num_online_cpus(),
ring             1300 drivers/mailbox/bcm-flexrm-mailbox.c 			&ring->irq_aff_hint);
ring             1301 drivers/mailbox/bcm-flexrm-mailbox.c 	ret = irq_set_affinity_hint(ring->irq, &ring->irq_aff_hint);
ring             1303 drivers/mailbox/bcm-flexrm-mailbox.c 		dev_err(ring->mbox->dev,
ring             1305 drivers/mailbox/bcm-flexrm-mailbox.c 			ring->num);
ring             1310 drivers/mailbox/bcm-flexrm-mailbox.c 	writel_relaxed(0x0, ring->regs + RING_CONTROL);
ring             1313 drivers/mailbox/bcm-flexrm-mailbox.c 	val = BD_START_ADDR_VALUE(ring->bd_dma_base);
ring             1314 drivers/mailbox/bcm-flexrm-mailbox.c 	writel_relaxed(val, ring->regs + RING_BD_START_ADDR);
ring             1317 drivers/mailbox/bcm-flexrm-mailbox.c 	ring->bd_write_offset =
ring             1318 drivers/mailbox/bcm-flexrm-mailbox.c 			readl_relaxed(ring->regs + RING_BD_WRITE_PTR);
ring             1319 drivers/mailbox/bcm-flexrm-mailbox.c 	ring->bd_write_offset *= RING_DESC_SIZE;
ring             1322 drivers/mailbox/bcm-flexrm-mailbox.c 	val = CMPL_START_ADDR_VALUE(ring->cmpl_dma_base);
ring             1323 drivers/mailbox/bcm-flexrm-mailbox.c 	writel_relaxed(val, ring->regs + RING_CMPL_START_ADDR);
ring             1326 drivers/mailbox/bcm-flexrm-mailbox.c 	ring->cmpl_read_offset =
ring             1327 drivers/mailbox/bcm-flexrm-mailbox.c 			readl_relaxed(ring->regs + RING_CMPL_WRITE_PTR);
ring             1328 drivers/mailbox/bcm-flexrm-mailbox.c 	ring->cmpl_read_offset *= RING_DESC_SIZE;
ring             1331 drivers/mailbox/bcm-flexrm-mailbox.c 	readl_relaxed(ring->regs + RING_NUM_REQ_RECV_LS);
ring             1332 drivers/mailbox/bcm-flexrm-mailbox.c 	readl_relaxed(ring->regs + RING_NUM_REQ_RECV_MS);
ring             1333 drivers/mailbox/bcm-flexrm-mailbox.c 	readl_relaxed(ring->regs + RING_NUM_REQ_TRANS_LS);
ring             1334 drivers/mailbox/bcm-flexrm-mailbox.c 	readl_relaxed(ring->regs + RING_NUM_REQ_TRANS_MS);
ring             1335 drivers/mailbox/bcm-flexrm-mailbox.c 	readl_relaxed(ring->regs + RING_NUM_REQ_OUTSTAND);
ring             1339 drivers/mailbox/bcm-flexrm-mailbox.c 	val |= (ring->msi_timer_val << MSI_TIMER_VAL_SHIFT);
ring             1341 drivers/mailbox/bcm-flexrm-mailbox.c 	val |= (ring->msi_count_threshold & MSI_COUNT_MASK) << MSI_COUNT_SHIFT;
ring             1342 drivers/mailbox/bcm-flexrm-mailbox.c 	writel_relaxed(val, ring->regs + RING_MSI_CONTROL);
ring             1346 drivers/mailbox/bcm-flexrm-mailbox.c 	writel_relaxed(val, ring->regs + RING_CONTROL);
ring             1349 drivers/mailbox/bcm-flexrm-mailbox.c 	atomic_set(&ring->msg_send_count, 0);
ring             1350 drivers/mailbox/bcm-flexrm-mailbox.c 	atomic_set(&ring->msg_cmpl_count, 0);
ring             1355 drivers/mailbox/bcm-flexrm-mailbox.c 	free_irq(ring->irq, ring);
ring             1356 drivers/mailbox/bcm-flexrm-mailbox.c 	ring->irq_requested = false;
ring             1358 drivers/mailbox/bcm-flexrm-mailbox.c 	dma_pool_free(ring->mbox->cmpl_pool,
ring             1359 drivers/mailbox/bcm-flexrm-mailbox.c 		      ring->cmpl_base, ring->cmpl_dma_base);
ring             1360 drivers/mailbox/bcm-flexrm-mailbox.c 	ring->cmpl_base = NULL;
ring             1362 drivers/mailbox/bcm-flexrm-mailbox.c 	dma_pool_free(ring->mbox->bd_pool,
ring             1363 drivers/mailbox/bcm-flexrm-mailbox.c 		      ring->bd_base, ring->bd_dma_base);
ring             1364 drivers/mailbox/bcm-flexrm-mailbox.c 	ring->bd_base = NULL;
ring             1374 drivers/mailbox/bcm-flexrm-mailbox.c 	struct flexrm_ring *ring = chan->con_priv;
ring             1377 drivers/mailbox/bcm-flexrm-mailbox.c 	writel_relaxed(0x0, ring->regs + RING_CONTROL);
ring             1382 drivers/mailbox/bcm-flexrm-mailbox.c 			ring->regs + RING_CONTROL);
ring             1384 drivers/mailbox/bcm-flexrm-mailbox.c 		if (readl_relaxed(ring->regs + RING_FLUSH_DONE) &
ring             1390 drivers/mailbox/bcm-flexrm-mailbox.c 		dev_err(ring->mbox->dev,
ring             1391 drivers/mailbox/bcm-flexrm-mailbox.c 			"setting ring%d flush state timedout\n", ring->num);
ring             1395 drivers/mailbox/bcm-flexrm-mailbox.c 	writel_relaxed(0x0, ring->regs + RING_CONTROL);
ring             1397 drivers/mailbox/bcm-flexrm-mailbox.c 		if (!(readl_relaxed(ring->regs + RING_FLUSH_DONE) &
ring             1403 drivers/mailbox/bcm-flexrm-mailbox.c 		dev_err(ring->mbox->dev,
ring             1404 drivers/mailbox/bcm-flexrm-mailbox.c 			"clearing ring%d flush state timedout\n", ring->num);
ring             1408 drivers/mailbox/bcm-flexrm-mailbox.c 		msg = ring->requests[reqid];
ring             1413 drivers/mailbox/bcm-flexrm-mailbox.c 		ring->requests[reqid] = NULL;
ring             1416 drivers/mailbox/bcm-flexrm-mailbox.c 		flexrm_dma_unmap(ring->mbox->dev, msg);
ring             1424 drivers/mailbox/bcm-flexrm-mailbox.c 	bitmap_zero(ring->requests_bmap, RING_MAX_REQ_COUNT);
ring             1427 drivers/mailbox/bcm-flexrm-mailbox.c 	if (ring->irq_requested) {
ring             1428 drivers/mailbox/bcm-flexrm-mailbox.c 		irq_set_affinity_hint(ring->irq, NULL);
ring             1429 drivers/mailbox/bcm-flexrm-mailbox.c 		free_irq(ring->irq, ring);
ring             1430 drivers/mailbox/bcm-flexrm-mailbox.c 		ring->irq_requested = false;
ring             1434 drivers/mailbox/bcm-flexrm-mailbox.c 	if (ring->cmpl_base) {
ring             1435 drivers/mailbox/bcm-flexrm-mailbox.c 		dma_pool_free(ring->mbox->cmpl_pool,
ring             1436 drivers/mailbox/bcm-flexrm-mailbox.c 			      ring->cmpl_base, ring->cmpl_dma_base);
ring             1437 drivers/mailbox/bcm-flexrm-mailbox.c 		ring->cmpl_base = NULL;
ring             1441 drivers/mailbox/bcm-flexrm-mailbox.c 	if (ring->bd_base) {
ring             1442 drivers/mailbox/bcm-flexrm-mailbox.c 		dma_pool_free(ring->mbox->bd_pool,
ring             1443 drivers/mailbox/bcm-flexrm-mailbox.c 			      ring->bd_base, ring->bd_dma_base);
ring             1444 drivers/mailbox/bcm-flexrm-mailbox.c 		ring->bd_base = NULL;
ring             1459 drivers/mailbox/bcm-flexrm-mailbox.c 	struct flexrm_ring *ring;
ring             1474 drivers/mailbox/bcm-flexrm-mailbox.c 	ring = chan->con_priv;
ring             1475 drivers/mailbox/bcm-flexrm-mailbox.c 	ring->msi_count_threshold = pa->args[1];
ring             1476 drivers/mailbox/bcm-flexrm-mailbox.c 	ring->msi_timer_val = pa->args[2];
ring             1487 drivers/mailbox/bcm-flexrm-mailbox.c 	struct flexrm_ring *ring = &mbox->rings[desc->platform.msi_index];
ring             1490 drivers/mailbox/bcm-flexrm-mailbox.c 	writel_relaxed(msg->address_lo, ring->regs + RING_MSI_ADDR_LS);
ring             1491 drivers/mailbox/bcm-flexrm-mailbox.c 	writel_relaxed(msg->address_hi, ring->regs + RING_MSI_ADDR_MS);
ring             1492 drivers/mailbox/bcm-flexrm-mailbox.c 	writel_relaxed(msg->data, ring->regs + RING_MSI_DATA_VALUE);
ring             1502 drivers/mailbox/bcm-flexrm-mailbox.c 	struct flexrm_ring *ring;
ring             1543 drivers/mailbox/bcm-flexrm-mailbox.c 	ring = devm_kcalloc(dev, mbox->num_rings, sizeof(*ring), GFP_KERNEL);
ring             1544 drivers/mailbox/bcm-flexrm-mailbox.c 	if (!ring) {
ring             1548 drivers/mailbox/bcm-flexrm-mailbox.c 	mbox->rings = ring;
ring             1553 drivers/mailbox/bcm-flexrm-mailbox.c 		ring = &mbox->rings[index];
ring             1554 drivers/mailbox/bcm-flexrm-mailbox.c 		ring->num = index;
ring             1555 drivers/mailbox/bcm-flexrm-mailbox.c 		ring->mbox = mbox;
ring             1563 drivers/mailbox/bcm-flexrm-mailbox.c 		ring->regs = regs;
ring             1565 drivers/mailbox/bcm-flexrm-mailbox.c 		ring->irq = UINT_MAX;
ring             1566 drivers/mailbox/bcm-flexrm-mailbox.c 		ring->irq_requested = false;
ring             1567 drivers/mailbox/bcm-flexrm-mailbox.c 		ring->msi_timer_val = MSI_TIMER_VAL_MASK;
ring             1568 drivers/mailbox/bcm-flexrm-mailbox.c 		ring->msi_count_threshold = 0x1;
ring             1569 drivers/mailbox/bcm-flexrm-mailbox.c 		memset(ring->requests, 0, sizeof(ring->requests));
ring             1570 drivers/mailbox/bcm-flexrm-mailbox.c 		ring->bd_base = NULL;
ring             1571 drivers/mailbox/bcm-flexrm-mailbox.c 		ring->bd_dma_base = 0;
ring             1572 drivers/mailbox/bcm-flexrm-mailbox.c 		ring->cmpl_base = NULL;
ring             1573 drivers/mailbox/bcm-flexrm-mailbox.c 		ring->cmpl_dma_base = 0;
ring             1574 drivers/mailbox/bcm-flexrm-mailbox.c 		atomic_set(&ring->msg_send_count, 0);
ring             1575 drivers/mailbox/bcm-flexrm-mailbox.c 		atomic_set(&ring->msg_cmpl_count, 0);
ring             1576 drivers/mailbox/bcm-flexrm-mailbox.c 		spin_lock_init(&ring->lock);
ring             1577 drivers/mailbox/bcm-flexrm-mailbox.c 		bitmap_zero(ring->requests_bmap, RING_MAX_REQ_COUNT);
ring             1578 drivers/mailbox/bcm-flexrm-mailbox.c 		ring->cmpl_read_offset = 0;
ring             1612 drivers/mailbox/bcm-flexrm-mailbox.c 		ring = &mbox->rings[desc->platform.msi_index];
ring             1613 drivers/mailbox/bcm-flexrm-mailbox.c 		ring->irq = desc->irq;
ring              848 drivers/misc/genwqe/card_utils.c 	int entries = 0, ring, traps, traces, trace_entries;
ring              880 drivers/misc/genwqe/card_utils.c 	for (ring = 0; ring < 8; ring++) {
ring              881 drivers/misc/genwqe/card_utils.c 		addr = GENWQE_UID_OFFS(uid) | IO_EXTENDED_DIAG_MAP(ring);
ring              902 drivers/misc/genwqe/card_utils.c 	int i, traps, traces, trace, trace_entries, trace_entry, ring;
ring              944 drivers/misc/genwqe/card_utils.c 	for (ring = 0; ring < 8; ring++) { /* 0 is fls, 1 is fds,
ring              946 drivers/misc/genwqe/card_utils.c 		addr = GENWQE_UID_OFFS(uid) | IO_EXTENDED_DIAG_MAP(ring);
ring              961 drivers/misc/genwqe/card_utils.c 				GENWQE_EXTENDED_DIAG_SELECTOR(ring, trace);
ring              148 drivers/misc/mic/vop/vop_debugfs.c 					   j, avail->ring[j]);
ring              159 drivers/misc/mic/vop/vop_debugfs.c 							      used->ring[j].id),
ring              161 drivers/misc/mic/vop/vop_debugfs.c 							   used->ring[j].len));
ring              462 drivers/net/ethernet/3com/typhoon.c 	struct basic_ring *ring = &tp->cmdRing;
ring              470 drivers/net/ethernet/3com/typhoon.c 		cmd = (struct cmd_desc *)(ring->ringBase + ring->lastWrite);
ring              471 drivers/net/ethernet/3com/typhoon.c 		typhoon_inc_cmd_index(&ring->lastWrite, 1);
ring              475 drivers/net/ethernet/3com/typhoon.c 		iowrite32(ring->lastWrite, tp->ioaddr + TYPHOON_REG_CMD_READY);
ring              569 drivers/net/ethernet/3com/typhoon.c typhoon_num_free_tx(struct transmit_ring *ring)
ring              572 drivers/net/ethernet/3com/typhoon.c 	return typhoon_num_free(ring->lastWrite, ring->lastRead, TXLO_ENTRIES);
ring              580 drivers/net/ethernet/3com/typhoon.c 	struct basic_ring *ring = &tp->cmdRing;
ring              612 drivers/net/ethernet/3com/typhoon.c 	if(unlikely(ring->lastWrite + len > COMMAND_RING_SIZE)) {
ring              613 drivers/net/ethernet/3com/typhoon.c 		wrap_len = ring->lastWrite + len - COMMAND_RING_SIZE;
ring              614 drivers/net/ethernet/3com/typhoon.c 		len = COMMAND_RING_SIZE - ring->lastWrite;
ring              617 drivers/net/ethernet/3com/typhoon.c 	memcpy(ring->ringBase + ring->lastWrite, cmd, len);
ring              621 drivers/net/ethernet/3com/typhoon.c 		memcpy(ring->ringBase, wrap_ptr, wrap_len);
ring              624 drivers/net/ethernet/3com/typhoon.c 	typhoon_inc_cmd_index(&ring->lastWrite, num_cmd);
ring              629 drivers/net/ethernet/3com/typhoon.c 	iowrite32(ring->lastWrite, tp->ioaddr + TYPHOON_REG_CMD_READY);
ring             1564 drivers/net/ethernet/3com/typhoon.c 	struct basic_ring *ring = &tp->rxBuffRing;
ring             1567 drivers/net/ethernet/3com/typhoon.c 	if((ring->lastWrite + sizeof(*r)) % (RXFREE_ENTRIES * sizeof(*r)) ==
ring             1576 drivers/net/ethernet/3com/typhoon.c 	r = (struct rx_free *) (ring->ringBase + ring->lastWrite);
ring             1577 drivers/net/ethernet/3com/typhoon.c 	typhoon_inc_rxfree_index(&ring->lastWrite, 1);
ring             1583 drivers/net/ethernet/3com/typhoon.c 	indexes->rxBuffReady = cpu_to_le32(ring->lastWrite);
ring             1591 drivers/net/ethernet/3com/typhoon.c 	struct basic_ring *ring = &tp->rxBuffRing;
ring             1598 drivers/net/ethernet/3com/typhoon.c 	if((ring->lastWrite + sizeof(*r)) % (RXFREE_ENTRIES * sizeof(*r)) ==
ring             1619 drivers/net/ethernet/3com/typhoon.c 	r = (struct rx_free *) (ring->ringBase + ring->lastWrite);
ring             1620 drivers/net/ethernet/3com/typhoon.c 	typhoon_inc_rxfree_index(&ring->lastWrite, 1);
ring             1628 drivers/net/ethernet/3com/typhoon.c 	indexes->rxBuffReady = cpu_to_le32(ring->lastWrite);
ring              131 drivers/net/ethernet/amazon/ena/ena_ethtool.c 	struct ena_ring *ring;
ring              138 drivers/net/ethernet/amazon/ena/ena_ethtool.c 		ring = &adapter->tx_ring[i];
ring              143 drivers/net/ethernet/amazon/ena/ena_ethtool.c 			ptr = (u64 *)((uintptr_t)&ring->tx_stats +
ring              146 drivers/net/ethernet/amazon/ena/ena_ethtool.c 			ena_safe_update_stat(ptr, (*data)++, &ring->syncp);
ring              150 drivers/net/ethernet/amazon/ena/ena_ethtool.c 		ring = &adapter->rx_ring[i];
ring              155 drivers/net/ethernet/amazon/ena/ena_ethtool.c 			ptr = (u64 *)((uintptr_t)&ring->rx_stats +
ring              158 drivers/net/ethernet/amazon/ena/ena_ethtool.c 			ena_safe_update_stat(ptr, (*data)++, &ring->syncp);
ring              413 drivers/net/ethernet/amazon/ena/ena_ethtool.c 			      struct ethtool_ringparam *ring)
ring              417 drivers/net/ethernet/amazon/ena/ena_ethtool.c 	ring->tx_max_pending = adapter->max_tx_ring_size;
ring              418 drivers/net/ethernet/amazon/ena/ena_ethtool.c 	ring->rx_max_pending = adapter->max_rx_ring_size;
ring              419 drivers/net/ethernet/amazon/ena/ena_ethtool.c 	ring->tx_pending = adapter->tx_ring[0].ring_size;
ring              420 drivers/net/ethernet/amazon/ena/ena_ethtool.c 	ring->rx_pending = adapter->rx_ring[0].ring_size;
ring              424 drivers/net/ethernet/amazon/ena/ena_ethtool.c 			     struct ethtool_ringparam *ring)
ring              429 drivers/net/ethernet/amazon/ena/ena_ethtool.c 	new_tx_size = ring->tx_pending < ENA_MIN_RING_SIZE ?
ring              430 drivers/net/ethernet/amazon/ena/ena_ethtool.c 			ENA_MIN_RING_SIZE : ring->tx_pending;
ring              433 drivers/net/ethernet/amazon/ena/ena_ethtool.c 	new_rx_size = ring->rx_pending < ENA_MIN_RING_SIZE ?
ring              434 drivers/net/ethernet/amazon/ena/ena_ethtool.c 			ENA_MIN_RING_SIZE : ring->rx_pending;
ring              151 drivers/net/ethernet/amazon/ena/ena_netdev.c 				     struct ena_ring *ring, u16 qid)
ring              153 drivers/net/ethernet/amazon/ena/ena_netdev.c 	ring->qid = qid;
ring              154 drivers/net/ethernet/amazon/ena/ena_netdev.c 	ring->pdev = adapter->pdev;
ring              155 drivers/net/ethernet/amazon/ena/ena_netdev.c 	ring->dev = &adapter->pdev->dev;
ring              156 drivers/net/ethernet/amazon/ena/ena_netdev.c 	ring->netdev = adapter->netdev;
ring              157 drivers/net/ethernet/amazon/ena/ena_netdev.c 	ring->napi = &adapter->ena_napi[qid].napi;
ring              158 drivers/net/ethernet/amazon/ena/ena_netdev.c 	ring->adapter = adapter;
ring              159 drivers/net/ethernet/amazon/ena/ena_netdev.c 	ring->ena_dev = adapter->ena_dev;
ring              160 drivers/net/ethernet/amazon/ena/ena_netdev.c 	ring->per_napi_packets = 0;
ring              161 drivers/net/ethernet/amazon/ena/ena_netdev.c 	ring->cpu = 0;
ring              162 drivers/net/ethernet/amazon/ena/ena_netdev.c 	ring->first_interrupt = false;
ring              163 drivers/net/ethernet/amazon/ena/ena_netdev.c 	ring->no_interrupt_event_cnt = 0;
ring              164 drivers/net/ethernet/amazon/ena/ena_netdev.c 	u64_stats_init(&ring->syncp);
ring              123 drivers/net/ethernet/amd/xgbe/xgbe-desc.c 			   struct xgbe_ring *ring)
ring              128 drivers/net/ethernet/amd/xgbe/xgbe-desc.c 	if (!ring)
ring              131 drivers/net/ethernet/amd/xgbe/xgbe-desc.c 	if (ring->rdata) {
ring              132 drivers/net/ethernet/amd/xgbe/xgbe-desc.c 		for (i = 0; i < ring->rdesc_count; i++) {
ring              133 drivers/net/ethernet/amd/xgbe/xgbe-desc.c 			rdata = XGBE_GET_DESC_DATA(ring, i);
ring              137 drivers/net/ethernet/amd/xgbe/xgbe-desc.c 		kfree(ring->rdata);
ring              138 drivers/net/ethernet/amd/xgbe/xgbe-desc.c 		ring->rdata = NULL;
ring              141 drivers/net/ethernet/amd/xgbe/xgbe-desc.c 	if (ring->rx_hdr_pa.pages) {
ring              142 drivers/net/ethernet/amd/xgbe/xgbe-desc.c 		dma_unmap_page(pdata->dev, ring->rx_hdr_pa.pages_dma,
ring              143 drivers/net/ethernet/amd/xgbe/xgbe-desc.c 			       ring->rx_hdr_pa.pages_len, DMA_FROM_DEVICE);
ring              144 drivers/net/ethernet/amd/xgbe/xgbe-desc.c 		put_page(ring->rx_hdr_pa.pages);
ring              146 drivers/net/ethernet/amd/xgbe/xgbe-desc.c 		ring->rx_hdr_pa.pages = NULL;
ring              147 drivers/net/ethernet/amd/xgbe/xgbe-desc.c 		ring->rx_hdr_pa.pages_len = 0;
ring              148 drivers/net/ethernet/amd/xgbe/xgbe-desc.c 		ring->rx_hdr_pa.pages_offset = 0;
ring              149 drivers/net/ethernet/amd/xgbe/xgbe-desc.c 		ring->rx_hdr_pa.pages_dma = 0;
ring              152 drivers/net/ethernet/amd/xgbe/xgbe-desc.c 	if (ring->rx_buf_pa.pages) {
ring              153 drivers/net/ethernet/amd/xgbe/xgbe-desc.c 		dma_unmap_page(pdata->dev, ring->rx_buf_pa.pages_dma,
ring              154 drivers/net/ethernet/amd/xgbe/xgbe-desc.c 			       ring->rx_buf_pa.pages_len, DMA_FROM_DEVICE);
ring              155 drivers/net/ethernet/amd/xgbe/xgbe-desc.c 		put_page(ring->rx_buf_pa.pages);
ring              157 drivers/net/ethernet/amd/xgbe/xgbe-desc.c 		ring->rx_buf_pa.pages = NULL;
ring              158 drivers/net/ethernet/amd/xgbe/xgbe-desc.c 		ring->rx_buf_pa.pages_len = 0;
ring              159 drivers/net/ethernet/amd/xgbe/xgbe-desc.c 		ring->rx_buf_pa.pages_offset = 0;
ring              160 drivers/net/ethernet/amd/xgbe/xgbe-desc.c 		ring->rx_buf_pa.pages_dma = 0;
ring              163 drivers/net/ethernet/amd/xgbe/xgbe-desc.c 	if (ring->rdesc) {
ring              166 drivers/net/ethernet/amd/xgbe/xgbe-desc.c 				   ring->rdesc_count),
ring              167 drivers/net/ethernet/amd/xgbe/xgbe-desc.c 				  ring->rdesc, ring->rdesc_dma);
ring              168 drivers/net/ethernet/amd/xgbe/xgbe-desc.c 		ring->rdesc = NULL;
ring              216 drivers/net/ethernet/amd/xgbe/xgbe-desc.c 			  struct xgbe_ring *ring, unsigned int rdesc_count)
ring              220 drivers/net/ethernet/amd/xgbe/xgbe-desc.c 	if (!ring)
ring              226 drivers/net/ethernet/amd/xgbe/xgbe-desc.c 	ring->rdesc_count = rdesc_count;
ring              227 drivers/net/ethernet/amd/xgbe/xgbe-desc.c 	ring->rdesc = xgbe_dma_alloc_node(pdata->dev, size, &ring->rdesc_dma,
ring              228 drivers/net/ethernet/amd/xgbe/xgbe-desc.c 					  ring->node);
ring              229 drivers/net/ethernet/amd/xgbe/xgbe-desc.c 	if (!ring->rdesc)
ring              235 drivers/net/ethernet/amd/xgbe/xgbe-desc.c 	ring->rdata = xgbe_alloc_node(size, ring->node);
ring              236 drivers/net/ethernet/amd/xgbe/xgbe-desc.c 	if (!ring->rdata)
ring              241 drivers/net/ethernet/amd/xgbe/xgbe-desc.c 		  ring->rdesc, &ring->rdesc_dma, ring->rdata, ring->node);
ring              357 drivers/net/ethernet/amd/xgbe/xgbe-desc.c 			      struct xgbe_ring *ring,
ring              362 drivers/net/ethernet/amd/xgbe/xgbe-desc.c 	if (!ring->rx_hdr_pa.pages) {
ring              363 drivers/net/ethernet/amd/xgbe/xgbe-desc.c 		ret = xgbe_alloc_pages(pdata, &ring->rx_hdr_pa, 0, ring->node);
ring              368 drivers/net/ethernet/amd/xgbe/xgbe-desc.c 	if (!ring->rx_buf_pa.pages) {
ring              369 drivers/net/ethernet/amd/xgbe/xgbe-desc.c 		ret = xgbe_alloc_pages(pdata, &ring->rx_buf_pa,
ring              370 drivers/net/ethernet/amd/xgbe/xgbe-desc.c 				       PAGE_ALLOC_COSTLY_ORDER, ring->node);
ring              376 drivers/net/ethernet/amd/xgbe/xgbe-desc.c 	xgbe_set_buffer_data(&rdata->rx.hdr, &ring->rx_hdr_pa,
ring              380 drivers/net/ethernet/amd/xgbe/xgbe-desc.c 	xgbe_set_buffer_data(&rdata->rx.buf, &ring->rx_buf_pa,
ring              390 drivers/net/ethernet/amd/xgbe/xgbe-desc.c 	struct xgbe_ring *ring;
ring              400 drivers/net/ethernet/amd/xgbe/xgbe-desc.c 		ring = channel->tx_ring;
ring              401 drivers/net/ethernet/amd/xgbe/xgbe-desc.c 		if (!ring)
ring              404 drivers/net/ethernet/amd/xgbe/xgbe-desc.c 		rdesc = ring->rdesc;
ring              405 drivers/net/ethernet/amd/xgbe/xgbe-desc.c 		rdesc_dma = ring->rdesc_dma;
ring              407 drivers/net/ethernet/amd/xgbe/xgbe-desc.c 		for (j = 0; j < ring->rdesc_count; j++) {
ring              408 drivers/net/ethernet/amd/xgbe/xgbe-desc.c 			rdata = XGBE_GET_DESC_DATA(ring, j);
ring              417 drivers/net/ethernet/amd/xgbe/xgbe-desc.c 		ring->cur = 0;
ring              418 drivers/net/ethernet/amd/xgbe/xgbe-desc.c 		ring->dirty = 0;
ring              419 drivers/net/ethernet/amd/xgbe/xgbe-desc.c 		memset(&ring->tx, 0, sizeof(ring->tx));
ring              431 drivers/net/ethernet/amd/xgbe/xgbe-desc.c 	struct xgbe_ring *ring;
ring              441 drivers/net/ethernet/amd/xgbe/xgbe-desc.c 		ring = channel->rx_ring;
ring              442 drivers/net/ethernet/amd/xgbe/xgbe-desc.c 		if (!ring)
ring              445 drivers/net/ethernet/amd/xgbe/xgbe-desc.c 		rdesc = ring->rdesc;
ring              446 drivers/net/ethernet/amd/xgbe/xgbe-desc.c 		rdesc_dma = ring->rdesc_dma;
ring              448 drivers/net/ethernet/amd/xgbe/xgbe-desc.c 		for (j = 0; j < ring->rdesc_count; j++) {
ring              449 drivers/net/ethernet/amd/xgbe/xgbe-desc.c 			rdata = XGBE_GET_DESC_DATA(ring, j);
ring              454 drivers/net/ethernet/amd/xgbe/xgbe-desc.c 			if (xgbe_map_rx_buffer(pdata, ring, rdata))
ring              461 drivers/net/ethernet/amd/xgbe/xgbe-desc.c 		ring->cur = 0;
ring              462 drivers/net/ethernet/amd/xgbe/xgbe-desc.c 		ring->dirty = 0;
ring              526 drivers/net/ethernet/amd/xgbe/xgbe-desc.c 	struct xgbe_ring *ring = channel->tx_ring;
ring              535 drivers/net/ethernet/amd/xgbe/xgbe-desc.c 	DBGPR("-->xgbe_map_tx_skb: cur = %d\n", ring->cur);
ring              538 drivers/net/ethernet/amd/xgbe/xgbe-desc.c 	start_index = ring->cur;
ring              539 drivers/net/ethernet/amd/xgbe/xgbe-desc.c 	cur_index = ring->cur;
ring              541 drivers/net/ethernet/amd/xgbe/xgbe-desc.c 	packet = &ring->packet_data;
ring              551 drivers/net/ethernet/amd/xgbe/xgbe-desc.c 	if ((tso && (packet->mss != ring->tx.cur_mss)) ||
ring              552 drivers/net/ethernet/amd/xgbe/xgbe-desc.c 	    (vlan && (packet->vlan_ctag != ring->tx.cur_vlan_ctag)))
ring              554 drivers/net/ethernet/amd/xgbe/xgbe-desc.c 	rdata = XGBE_GET_DESC_DATA(ring, cur_index);
ring              575 drivers/net/ethernet/amd/xgbe/xgbe-desc.c 		rdata = XGBE_GET_DESC_DATA(ring, cur_index);
ring              600 drivers/net/ethernet/amd/xgbe/xgbe-desc.c 		rdata = XGBE_GET_DESC_DATA(ring, cur_index);
ring              634 drivers/net/ethernet/amd/xgbe/xgbe-desc.c 			rdata = XGBE_GET_DESC_DATA(ring, cur_index);
ring              642 drivers/net/ethernet/amd/xgbe/xgbe-desc.c 	rdata = XGBE_GET_DESC_DATA(ring, cur_index - 1);
ring              654 drivers/net/ethernet/amd/xgbe/xgbe-desc.c 		rdata = XGBE_GET_DESC_DATA(ring, start_index++);
ring             1410 drivers/net/ethernet/amd/xgbe/xgbe-dev.c 	struct xgbe_ring *ring = channel->tx_ring;
ring             1413 drivers/net/ethernet/amd/xgbe/xgbe-dev.c 	int start_index = ring->cur;
ring             1418 drivers/net/ethernet/amd/xgbe/xgbe-dev.c 	for (i = 0; i < ring->rdesc_count; i++) {
ring             1419 drivers/net/ethernet/amd/xgbe/xgbe-dev.c 		rdata = XGBE_GET_DESC_DATA(ring, i);
ring             1426 drivers/net/ethernet/amd/xgbe/xgbe-dev.c 	XGMAC_DMA_IOWRITE(channel, DMA_CH_TDRLR, ring->rdesc_count - 1);
ring             1429 drivers/net/ethernet/amd/xgbe/xgbe-dev.c 	rdata = XGBE_GET_DESC_DATA(ring, start_index);
ring             1489 drivers/net/ethernet/amd/xgbe/xgbe-dev.c 	struct xgbe_ring *ring = channel->rx_ring;
ring             1491 drivers/net/ethernet/amd/xgbe/xgbe-dev.c 	unsigned int start_index = ring->cur;
ring             1497 drivers/net/ethernet/amd/xgbe/xgbe-dev.c 	for (i = 0; i < ring->rdesc_count; i++) {
ring             1498 drivers/net/ethernet/amd/xgbe/xgbe-dev.c 		rdata = XGBE_GET_DESC_DATA(ring, i);
ring             1505 drivers/net/ethernet/amd/xgbe/xgbe-dev.c 	XGMAC_DMA_IOWRITE(channel, DMA_CH_RDRLR, ring->rdesc_count - 1);
ring             1508 drivers/net/ethernet/amd/xgbe/xgbe-dev.c 	rdata = XGBE_GET_DESC_DATA(ring, start_index);
ring             1515 drivers/net/ethernet/amd/xgbe/xgbe-dev.c 	rdata = XGBE_GET_DESC_DATA(ring, start_index + ring->rdesc_count - 1);
ring             1642 drivers/net/ethernet/amd/xgbe/xgbe-dev.c 			       struct xgbe_ring *ring)
ring             1652 drivers/net/ethernet/amd/xgbe/xgbe-dev.c 	rdata = XGBE_GET_DESC_DATA(ring, ring->cur);
ring             1663 drivers/net/ethernet/amd/xgbe/xgbe-dev.c 	ring->tx.xmit_more = 0;
ring             1669 drivers/net/ethernet/amd/xgbe/xgbe-dev.c 	struct xgbe_ring *ring = channel->tx_ring;
ring             1672 drivers/net/ethernet/amd/xgbe/xgbe-dev.c 	struct xgbe_packet_data *packet = &ring->packet_data;
ring             1677 drivers/net/ethernet/amd/xgbe/xgbe-dev.c 	int start_index = ring->cur;
ring             1678 drivers/net/ethernet/amd/xgbe/xgbe-dev.c 	int cur_index = ring->cur;
ring             1695 drivers/net/ethernet/amd/xgbe/xgbe-dev.c 	if (tso && (packet->mss != ring->tx.cur_mss))
ring             1700 drivers/net/ethernet/amd/xgbe/xgbe-dev.c 	if (vlan && (packet->vlan_ctag != ring->tx.cur_vlan_ctag))
ring             1715 drivers/net/ethernet/amd/xgbe/xgbe-dev.c 	ring->coalesce_count += tx_packets;
ring             1720 drivers/net/ethernet/amd/xgbe/xgbe-dev.c 	else if ((ring->coalesce_count % pdata->tx_frames) < tx_packets)
ring             1725 drivers/net/ethernet/amd/xgbe/xgbe-dev.c 	rdata = XGBE_GET_DESC_DATA(ring, cur_index);
ring             1747 drivers/net/ethernet/amd/xgbe/xgbe-dev.c 			ring->tx.cur_mss = packet->mss;
ring             1767 drivers/net/ethernet/amd/xgbe/xgbe-dev.c 			ring->tx.cur_vlan_ctag = packet->vlan_ctag;
ring             1771 drivers/net/ethernet/amd/xgbe/xgbe-dev.c 		rdata = XGBE_GET_DESC_DATA(ring, cur_index);
ring             1834 drivers/net/ethernet/amd/xgbe/xgbe-dev.c 		rdata = XGBE_GET_DESC_DATA(ring, cur_index);
ring             1878 drivers/net/ethernet/amd/xgbe/xgbe-dev.c 	rdata = XGBE_GET_DESC_DATA(ring, start_index);
ring             1883 drivers/net/ethernet/amd/xgbe/xgbe-dev.c 		xgbe_dump_tx_desc(pdata, ring, start_index,
ring             1889 drivers/net/ethernet/amd/xgbe/xgbe-dev.c 	ring->cur = cur_index + 1;
ring             1893 drivers/net/ethernet/amd/xgbe/xgbe-dev.c 		xgbe_tx_start_xmit(channel, ring);
ring             1895 drivers/net/ethernet/amd/xgbe/xgbe-dev.c 		ring->tx.xmit_more = 1;
ring             1898 drivers/net/ethernet/amd/xgbe/xgbe-dev.c 	      channel->name, start_index & (ring->rdesc_count - 1),
ring             1899 drivers/net/ethernet/amd/xgbe/xgbe-dev.c 	      (ring->cur - 1) & (ring->rdesc_count - 1));
ring             1907 drivers/net/ethernet/amd/xgbe/xgbe-dev.c 	struct xgbe_ring *ring = channel->rx_ring;
ring             1910 drivers/net/ethernet/amd/xgbe/xgbe-dev.c 	struct xgbe_packet_data *packet = &ring->packet_data;
ring             1914 drivers/net/ethernet/amd/xgbe/xgbe-dev.c 	DBGPR("-->xgbe_dev_read: cur = %d\n", ring->cur);
ring             1916 drivers/net/ethernet/amd/xgbe/xgbe-dev.c 	rdata = XGBE_GET_DESC_DATA(ring, ring->cur);
ring             1927 drivers/net/ethernet/amd/xgbe/xgbe-dev.c 		xgbe_dump_rx_desc(pdata, ring, ring->cur);
ring             2059 drivers/net/ethernet/amd/xgbe/xgbe-dev.c 	      ring->cur & (ring->rdesc_count - 1), ring->cur);
ring              193 drivers/net/ethernet/amd/xgbe/xgbe-drv.c 	struct xgbe_ring *ring;
ring              223 drivers/net/ethernet/amd/xgbe/xgbe-drv.c 			ring = xgbe_alloc_node(sizeof(*ring), node);
ring              224 drivers/net/ethernet/amd/xgbe/xgbe-drv.c 			if (!ring)
ring              227 drivers/net/ethernet/amd/xgbe/xgbe-drv.c 			spin_lock_init(&ring->lock);
ring              228 drivers/net/ethernet/amd/xgbe/xgbe-drv.c 			ring->node = node;
ring              230 drivers/net/ethernet/amd/xgbe/xgbe-drv.c 			channel->tx_ring = ring;
ring              234 drivers/net/ethernet/amd/xgbe/xgbe-drv.c 			ring = xgbe_alloc_node(sizeof(*ring), node);
ring              235 drivers/net/ethernet/amd/xgbe/xgbe-drv.c 			if (!ring)
ring              238 drivers/net/ethernet/amd/xgbe/xgbe-drv.c 			spin_lock_init(&ring->lock);
ring              239 drivers/net/ethernet/amd/xgbe/xgbe-drv.c 			ring->node = node;
ring              241 drivers/net/ethernet/amd/xgbe/xgbe-drv.c 			channel->rx_ring = ring;
ring              263 drivers/net/ethernet/amd/xgbe/xgbe-drv.c static inline unsigned int xgbe_tx_avail_desc(struct xgbe_ring *ring)
ring              265 drivers/net/ethernet/amd/xgbe/xgbe-drv.c 	return (ring->rdesc_count - (ring->cur - ring->dirty));
ring              268 drivers/net/ethernet/amd/xgbe/xgbe-drv.c static inline unsigned int xgbe_rx_dirty_desc(struct xgbe_ring *ring)
ring              270 drivers/net/ethernet/amd/xgbe/xgbe-drv.c 	return (ring->cur - ring->dirty);
ring              274 drivers/net/ethernet/amd/xgbe/xgbe-drv.c 				    struct xgbe_ring *ring, unsigned int count)
ring              278 drivers/net/ethernet/amd/xgbe/xgbe-drv.c 	if (count > xgbe_tx_avail_desc(ring)) {
ring              282 drivers/net/ethernet/amd/xgbe/xgbe-drv.c 		ring->tx.queue_stopped = 1;
ring              287 drivers/net/ethernet/amd/xgbe/xgbe-drv.c 		if (ring->tx.xmit_more)
ring              288 drivers/net/ethernet/amd/xgbe/xgbe-drv.c 			pdata->hw_if.tx_start_xmit(channel, ring);
ring             1186 drivers/net/ethernet/amd/xgbe/xgbe-drv.c 	struct xgbe_ring *ring;
ring             1193 drivers/net/ethernet/amd/xgbe/xgbe-drv.c 		ring = pdata->channel[i]->tx_ring;
ring             1194 drivers/net/ethernet/amd/xgbe/xgbe-drv.c 		if (!ring)
ring             1197 drivers/net/ethernet/amd/xgbe/xgbe-drv.c 		for (j = 0; j < ring->rdesc_count; j++) {
ring             1198 drivers/net/ethernet/amd/xgbe/xgbe-drv.c 			rdata = XGBE_GET_DESC_DATA(ring, j);
ring             1209 drivers/net/ethernet/amd/xgbe/xgbe-drv.c 	struct xgbe_ring *ring;
ring             1216 drivers/net/ethernet/amd/xgbe/xgbe-drv.c 		ring = pdata->channel[i]->rx_ring;
ring             1217 drivers/net/ethernet/amd/xgbe/xgbe-drv.c 		if (!ring)
ring             1220 drivers/net/ethernet/amd/xgbe/xgbe-drv.c 		for (j = 0; j < ring->rdesc_count; j++) {
ring             1221 drivers/net/ethernet/amd/xgbe/xgbe-drv.c 			rdata = XGBE_GET_DESC_DATA(ring, j);
ring             1833 drivers/net/ethernet/amd/xgbe/xgbe-drv.c 			     struct xgbe_ring *ring, struct sk_buff *skb,
ring             1851 drivers/net/ethernet/amd/xgbe/xgbe-drv.c 		if (skb_shinfo(skb)->gso_size != ring->tx.cur_mss) {
ring             1873 drivers/net/ethernet/amd/xgbe/xgbe-drv.c 		if (skb_vlan_tag_get(skb) != ring->tx.cur_vlan_ctag)
ring             2017 drivers/net/ethernet/amd/xgbe/xgbe-drv.c 	struct xgbe_ring *ring;
ring             2026 drivers/net/ethernet/amd/xgbe/xgbe-drv.c 	ring = channel->tx_ring;
ring             2027 drivers/net/ethernet/amd/xgbe/xgbe-drv.c 	packet = &ring->packet_data;
ring             2040 drivers/net/ethernet/amd/xgbe/xgbe-drv.c 	xgbe_packet_info(pdata, ring, skb, packet);
ring             2043 drivers/net/ethernet/amd/xgbe/xgbe-drv.c 	ret = xgbe_maybe_stop_tx_queue(channel, ring, packet->rdesc_count);
ring             2073 drivers/net/ethernet/amd/xgbe/xgbe-drv.c 	xgbe_maybe_stop_tx_queue(channel, ring, XGBE_TX_MAX_DESCS);
ring             2521 drivers/net/ethernet/amd/xgbe/xgbe-drv.c 	struct xgbe_ring *ring = channel->rx_ring;
ring             2524 drivers/net/ethernet/amd/xgbe/xgbe-drv.c 	while (ring->dirty != ring->cur) {
ring             2525 drivers/net/ethernet/amd/xgbe/xgbe-drv.c 		rdata = XGBE_GET_DESC_DATA(ring, ring->dirty);
ring             2530 drivers/net/ethernet/amd/xgbe/xgbe-drv.c 		if (desc_if->map_rx_buffer(pdata, ring, rdata))
ring             2533 drivers/net/ethernet/amd/xgbe/xgbe-drv.c 		hw_if->rx_desc_reset(pdata, rdata, ring->dirty);
ring             2535 drivers/net/ethernet/amd/xgbe/xgbe-drv.c 		ring->dirty++;
ring             2543 drivers/net/ethernet/amd/xgbe/xgbe-drv.c 	rdata = XGBE_GET_DESC_DATA(ring, ring->dirty - 1);
ring             2617 drivers/net/ethernet/amd/xgbe/xgbe-drv.c 	struct xgbe_ring *ring = channel->tx_ring;
ring             2629 drivers/net/ethernet/amd/xgbe/xgbe-drv.c 	if (!ring)
ring             2632 drivers/net/ethernet/amd/xgbe/xgbe-drv.c 	cur = ring->cur;
ring             2640 drivers/net/ethernet/amd/xgbe/xgbe-drv.c 	       (ring->dirty != cur)) {
ring             2641 drivers/net/ethernet/amd/xgbe/xgbe-drv.c 		rdata = XGBE_GET_DESC_DATA(ring, ring->dirty);
ring             2652 drivers/net/ethernet/amd/xgbe/xgbe-drv.c 			xgbe_dump_tx_desc(pdata, ring, ring->dirty, 1, 0);
ring             2664 drivers/net/ethernet/amd/xgbe/xgbe-drv.c 		ring->dirty++;
ring             2672 drivers/net/ethernet/amd/xgbe/xgbe-drv.c 	if ((ring->tx.queue_stopped == 1) &&
ring             2673 drivers/net/ethernet/amd/xgbe/xgbe-drv.c 	    (xgbe_tx_avail_desc(ring) > XGBE_TX_DESC_MIN_FREE)) {
ring             2674 drivers/net/ethernet/amd/xgbe/xgbe-drv.c 		ring->tx.queue_stopped = 0;
ring             2687 drivers/net/ethernet/amd/xgbe/xgbe-drv.c 	struct xgbe_ring *ring = channel->rx_ring;
ring             2702 drivers/net/ethernet/amd/xgbe/xgbe-drv.c 	if (!ring)
ring             2710 drivers/net/ethernet/amd/xgbe/xgbe-drv.c 	rdata = XGBE_GET_DESC_DATA(ring, ring->cur);
ring             2711 drivers/net/ethernet/amd/xgbe/xgbe-drv.c 	packet = &ring->packet_data;
ring             2713 drivers/net/ethernet/amd/xgbe/xgbe-drv.c 		DBGPR("  cur = %d\n", ring->cur);
ring             2728 drivers/net/ethernet/amd/xgbe/xgbe-drv.c 		rdata = XGBE_GET_DESC_DATA(ring, ring->cur);
ring             2730 drivers/net/ethernet/amd/xgbe/xgbe-drv.c 		if (xgbe_rx_dirty_desc(ring) > (XGBE_RX_DESC_CNT >> 3))
ring             2737 drivers/net/ethernet/amd/xgbe/xgbe-drv.c 		ring->cur++;
ring             2861 drivers/net/ethernet/amd/xgbe/xgbe-drv.c 		rdata = XGBE_GET_DESC_DATA(ring, ring->cur);
ring             2942 drivers/net/ethernet/amd/xgbe/xgbe-drv.c void xgbe_dump_tx_desc(struct xgbe_prv_data *pdata, struct xgbe_ring *ring,
ring             2949 drivers/net/ethernet/amd/xgbe/xgbe-drv.c 		rdata = XGBE_GET_DESC_DATA(ring, idx);
ring             2962 drivers/net/ethernet/amd/xgbe/xgbe-drv.c void xgbe_dump_rx_desc(struct xgbe_prv_data *pdata, struct xgbe_ring *ring,
ring             2968 drivers/net/ethernet/amd/xgbe/xgbe-drv.c 	rdata = XGBE_GET_DESC_DATA(ring, idx);
ring               67 drivers/net/ethernet/apm/xgene-v2/main.c 	struct xge_desc_ring *ring = pdata->rx_ring;
ring               72 drivers/net/ethernet/apm/xgene-v2/main.c 	u8 tail = ring->tail;
ring               79 drivers/net/ethernet/apm/xgene-v2/main.c 		raw_desc = &ring->raw_desc[tail];
ring               93 drivers/net/ethernet/apm/xgene-v2/main.c 		ring->pkt_info[tail].skb = skb;
ring               94 drivers/net/ethernet/apm/xgene-v2/main.c 		ring->pkt_info[tail].dma_addr = dma_addr;
ring              109 drivers/net/ethernet/apm/xgene-v2/main.c 	ring->tail = tail;
ring              352 drivers/net/ethernet/apm/xgene-v2/main.c 				 struct xge_desc_ring *ring)
ring              358 drivers/net/ethernet/apm/xgene-v2/main.c 	if (!ring)
ring              362 drivers/net/ethernet/apm/xgene-v2/main.c 	if (ring->desc_addr)
ring              363 drivers/net/ethernet/apm/xgene-v2/main.c 		dma_free_coherent(dev, size, ring->desc_addr, ring->dma_addr);
ring              365 drivers/net/ethernet/apm/xgene-v2/main.c 	kfree(ring->pkt_info);
ring              366 drivers/net/ethernet/apm/xgene-v2/main.c 	kfree(ring);
ring              372 drivers/net/ethernet/apm/xgene-v2/main.c 	struct xge_desc_ring *ring = pdata->rx_ring;
ring              379 drivers/net/ethernet/apm/xgene-v2/main.c 		skb = ring->pkt_info[i].skb;
ring              380 drivers/net/ethernet/apm/xgene-v2/main.c 		dma_addr = ring->pkt_info[i].dma_addr;
ring              407 drivers/net/ethernet/apm/xgene-v2/main.c 	struct xge_desc_ring *ring;
ring              410 drivers/net/ethernet/apm/xgene-v2/main.c 	ring = kzalloc(sizeof(*ring), GFP_KERNEL);
ring              411 drivers/net/ethernet/apm/xgene-v2/main.c 	if (!ring)
ring              414 drivers/net/ethernet/apm/xgene-v2/main.c 	ring->ndev = ndev;
ring              417 drivers/net/ethernet/apm/xgene-v2/main.c 	ring->desc_addr = dma_alloc_coherent(dev, size, &ring->dma_addr,
ring              419 drivers/net/ethernet/apm/xgene-v2/main.c 	if (!ring->desc_addr)
ring              422 drivers/net/ethernet/apm/xgene-v2/main.c 	ring->pkt_info = kcalloc(XGENE_ENET_NUM_DESC, sizeof(*ring->pkt_info),
ring              424 drivers/net/ethernet/apm/xgene-v2/main.c 	if (!ring->pkt_info)
ring              427 drivers/net/ethernet/apm/xgene-v2/main.c 	xge_setup_desc(ring);
ring              429 drivers/net/ethernet/apm/xgene-v2/main.c 	return ring;
ring              432 drivers/net/ethernet/apm/xgene-v2/main.c 	xge_delete_desc_ring(ndev, ring);
ring              440 drivers/net/ethernet/apm/xgene-v2/main.c 	struct xge_desc_ring *ring;
ring              444 drivers/net/ethernet/apm/xgene-v2/main.c 	ring = xge_create_desc_ring(ndev);
ring              445 drivers/net/ethernet/apm/xgene-v2/main.c 	if (!ring)
ring              448 drivers/net/ethernet/apm/xgene-v2/main.c 	pdata->tx_ring = ring;
ring              452 drivers/net/ethernet/apm/xgene-v2/main.c 	ring = xge_create_desc_ring(ndev);
ring              453 drivers/net/ethernet/apm/xgene-v2/main.c 	if (!ring)
ring              456 drivers/net/ethernet/apm/xgene-v2/main.c 	pdata->rx_ring = ring;
ring               13 drivers/net/ethernet/apm/xgene-v2/ring.c void xge_setup_desc(struct xge_desc_ring *ring)
ring               21 drivers/net/ethernet/apm/xgene-v2/ring.c 		raw_desc = &ring->raw_desc[i];
ring               24 drivers/net/ethernet/apm/xgene-v2/ring.c 		next_dma = ring->dma_addr + (offset * XGENE_ENET_DESC_SIZE);
ring               36 drivers/net/ethernet/apm/xgene-v2/ring.c 	struct xge_desc_ring *ring = pdata->tx_ring;
ring               37 drivers/net/ethernet/apm/xgene-v2/ring.c 	dma_addr_t dma_addr = ring->dma_addr;
ring               42 drivers/net/ethernet/apm/xgene-v2/ring.c 	ring->head = 0;
ring               43 drivers/net/ethernet/apm/xgene-v2/ring.c 	ring->tail = 0;
ring               48 drivers/net/ethernet/apm/xgene-v2/ring.c 	struct xge_desc_ring *ring = pdata->rx_ring;
ring               49 drivers/net/ethernet/apm/xgene-v2/ring.c 	dma_addr_t dma_addr = ring->dma_addr;
ring               54 drivers/net/ethernet/apm/xgene-v2/ring.c 	ring->head = 0;
ring               55 drivers/net/ethernet/apm/xgene-v2/ring.c 	ring->tail = 0;
ring              101 drivers/net/ethernet/apm/xgene-v2/ring.h void xge_setup_desc(struct xge_desc_ring *ring);
ring               13 drivers/net/ethernet/apm/xgene/xgene_enet_hw.c static void xgene_enet_ring_init(struct xgene_enet_desc_ring *ring)
ring               15 drivers/net/ethernet/apm/xgene/xgene_enet_hw.c 	u32 *ring_cfg = ring->state;
ring               16 drivers/net/ethernet/apm/xgene/xgene_enet_hw.c 	u64 addr = ring->dma;
ring               17 drivers/net/ethernet/apm/xgene/xgene_enet_hw.c 	enum xgene_enet_ring_cfgsize cfgsize = ring->cfgsize;
ring               33 drivers/net/ethernet/apm/xgene/xgene_enet_hw.c static void xgene_enet_ring_set_type(struct xgene_enet_desc_ring *ring)
ring               35 drivers/net/ethernet/apm/xgene/xgene_enet_hw.c 	u32 *ring_cfg = ring->state;
ring               39 drivers/net/ethernet/apm/xgene/xgene_enet_hw.c 	is_bufpool = xgene_enet_is_bufpool(ring->id);
ring               50 drivers/net/ethernet/apm/xgene/xgene_enet_hw.c static void xgene_enet_ring_set_recombbuf(struct xgene_enet_desc_ring *ring)
ring               52 drivers/net/ethernet/apm/xgene/xgene_enet_hw.c 	u32 *ring_cfg = ring->state;
ring               60 drivers/net/ethernet/apm/xgene/xgene_enet_hw.c static void xgene_enet_ring_wr32(struct xgene_enet_desc_ring *ring,
ring               63 drivers/net/ethernet/apm/xgene/xgene_enet_hw.c 	struct xgene_enet_pdata *pdata = netdev_priv(ring->ndev);
ring               68 drivers/net/ethernet/apm/xgene/xgene_enet_hw.c static void xgene_enet_ring_rd32(struct xgene_enet_desc_ring *ring,
ring               71 drivers/net/ethernet/apm/xgene/xgene_enet_hw.c 	struct xgene_enet_pdata *pdata = netdev_priv(ring->ndev);
ring               76 drivers/net/ethernet/apm/xgene/xgene_enet_hw.c static void xgene_enet_write_ring_state(struct xgene_enet_desc_ring *ring)
ring               78 drivers/net/ethernet/apm/xgene/xgene_enet_hw.c 	struct xgene_enet_pdata *pdata = netdev_priv(ring->ndev);
ring               81 drivers/net/ethernet/apm/xgene/xgene_enet_hw.c 	xgene_enet_ring_wr32(ring, CSR_RING_CONFIG, ring->num);
ring               83 drivers/net/ethernet/apm/xgene/xgene_enet_hw.c 		xgene_enet_ring_wr32(ring, CSR_RING_WR_BASE + (i * 4),
ring               84 drivers/net/ethernet/apm/xgene/xgene_enet_hw.c 				     ring->state[i]);
ring               88 drivers/net/ethernet/apm/xgene/xgene_enet_hw.c static void xgene_enet_clr_ring_state(struct xgene_enet_desc_ring *ring)
ring               90 drivers/net/ethernet/apm/xgene/xgene_enet_hw.c 	memset(ring->state, 0, sizeof(ring->state));
ring               91 drivers/net/ethernet/apm/xgene/xgene_enet_hw.c 	xgene_enet_write_ring_state(ring);
ring               94 drivers/net/ethernet/apm/xgene/xgene_enet_hw.c static void xgene_enet_set_ring_state(struct xgene_enet_desc_ring *ring)
ring               96 drivers/net/ethernet/apm/xgene/xgene_enet_hw.c 	xgene_enet_ring_set_type(ring);
ring               98 drivers/net/ethernet/apm/xgene/xgene_enet_hw.c 	if (xgene_enet_ring_owner(ring->id) == RING_OWNER_ETH0 ||
ring               99 drivers/net/ethernet/apm/xgene/xgene_enet_hw.c 	    xgene_enet_ring_owner(ring->id) == RING_OWNER_ETH1)
ring              100 drivers/net/ethernet/apm/xgene/xgene_enet_hw.c 		xgene_enet_ring_set_recombbuf(ring);
ring              102 drivers/net/ethernet/apm/xgene/xgene_enet_hw.c 	xgene_enet_ring_init(ring);
ring              103 drivers/net/ethernet/apm/xgene/xgene_enet_hw.c 	xgene_enet_write_ring_state(ring);
ring              106 drivers/net/ethernet/apm/xgene/xgene_enet_hw.c static void xgene_enet_set_ring_id(struct xgene_enet_desc_ring *ring)
ring              111 drivers/net/ethernet/apm/xgene/xgene_enet_hw.c 	is_bufpool = xgene_enet_is_bufpool(ring->id);
ring              113 drivers/net/ethernet/apm/xgene/xgene_enet_hw.c 	ring_id_val = ring->id & GENMASK(9, 0);
ring              116 drivers/net/ethernet/apm/xgene/xgene_enet_hw.c 	ring_id_buf = (ring->num << 9) & GENMASK(18, 9);
ring              121 drivers/net/ethernet/apm/xgene/xgene_enet_hw.c 	xgene_enet_ring_wr32(ring, CSR_RING_ID, ring_id_val);
ring              122 drivers/net/ethernet/apm/xgene/xgene_enet_hw.c 	xgene_enet_ring_wr32(ring, CSR_RING_ID_BUF, ring_id_buf);
ring              125 drivers/net/ethernet/apm/xgene/xgene_enet_hw.c static void xgene_enet_clr_desc_ring_id(struct xgene_enet_desc_ring *ring)
ring              129 drivers/net/ethernet/apm/xgene/xgene_enet_hw.c 	ring_id = ring->id | OVERWRITE;
ring              130 drivers/net/ethernet/apm/xgene/xgene_enet_hw.c 	xgene_enet_ring_wr32(ring, CSR_RING_ID, ring_id);
ring              131 drivers/net/ethernet/apm/xgene/xgene_enet_hw.c 	xgene_enet_ring_wr32(ring, CSR_RING_ID_BUF, 0);
ring              135 drivers/net/ethernet/apm/xgene/xgene_enet_hw.c 				    struct xgene_enet_desc_ring *ring)
ring              137 drivers/net/ethernet/apm/xgene/xgene_enet_hw.c 	u32 size = ring->size;
ring              141 drivers/net/ethernet/apm/xgene/xgene_enet_hw.c 	xgene_enet_clr_ring_state(ring);
ring              142 drivers/net/ethernet/apm/xgene/xgene_enet_hw.c 	xgene_enet_set_ring_state(ring);
ring              143 drivers/net/ethernet/apm/xgene/xgene_enet_hw.c 	xgene_enet_set_ring_id(ring);
ring              145 drivers/net/ethernet/apm/xgene/xgene_enet_hw.c 	ring->slots = xgene_enet_get_numslots(ring->id, size);
ring              147 drivers/net/ethernet/apm/xgene/xgene_enet_hw.c 	is_bufpool = xgene_enet_is_bufpool(ring->id);
ring              148 drivers/net/ethernet/apm/xgene/xgene_enet_hw.c 	if (is_bufpool || xgene_enet_ring_owner(ring->id) != RING_OWNER_CPU)
ring              149 drivers/net/ethernet/apm/xgene/xgene_enet_hw.c 		return ring;
ring              151 drivers/net/ethernet/apm/xgene/xgene_enet_hw.c 	for (i = 0; i < ring->slots; i++)
ring              152 drivers/net/ethernet/apm/xgene/xgene_enet_hw.c 		xgene_enet_mark_desc_slot_empty(&ring->raw_desc[i]);
ring              154 drivers/net/ethernet/apm/xgene/xgene_enet_hw.c 	xgene_enet_ring_rd32(ring, CSR_RING_NE_INT_MODE, &data);
ring              155 drivers/net/ethernet/apm/xgene/xgene_enet_hw.c 	data |= BIT(31 - xgene_enet_ring_bufnum(ring->id));
ring              156 drivers/net/ethernet/apm/xgene/xgene_enet_hw.c 	xgene_enet_ring_wr32(ring, CSR_RING_NE_INT_MODE, data);
ring              158 drivers/net/ethernet/apm/xgene/xgene_enet_hw.c 	return ring;
ring              161 drivers/net/ethernet/apm/xgene/xgene_enet_hw.c static void xgene_enet_clear_ring(struct xgene_enet_desc_ring *ring)
ring              166 drivers/net/ethernet/apm/xgene/xgene_enet_hw.c 	is_bufpool = xgene_enet_is_bufpool(ring->id);
ring              167 drivers/net/ethernet/apm/xgene/xgene_enet_hw.c 	if (is_bufpool || xgene_enet_ring_owner(ring->id) != RING_OWNER_CPU)
ring              170 drivers/net/ethernet/apm/xgene/xgene_enet_hw.c 	xgene_enet_ring_rd32(ring, CSR_RING_NE_INT_MODE, &data);
ring              171 drivers/net/ethernet/apm/xgene/xgene_enet_hw.c 	data &= ~BIT(31 - xgene_enet_ring_bufnum(ring->id));
ring              172 drivers/net/ethernet/apm/xgene/xgene_enet_hw.c 	xgene_enet_ring_wr32(ring, CSR_RING_NE_INT_MODE, data);
ring              175 drivers/net/ethernet/apm/xgene/xgene_enet_hw.c 	xgene_enet_clr_desc_ring_id(ring);
ring              176 drivers/net/ethernet/apm/xgene/xgene_enet_hw.c 	xgene_enet_clr_ring_state(ring);
ring              179 drivers/net/ethernet/apm/xgene/xgene_enet_hw.c static void xgene_enet_wr_cmd(struct xgene_enet_desc_ring *ring, int count)
ring              181 drivers/net/ethernet/apm/xgene/xgene_enet_hw.c 	iowrite32(count, ring->cmd);
ring              184 drivers/net/ethernet/apm/xgene/xgene_enet_hw.c static u32 xgene_enet_ring_len(struct xgene_enet_desc_ring *ring)
ring              186 drivers/net/ethernet/apm/xgene/xgene_enet_hw.c 	u32 __iomem *cmd_base = ring->cmd_base;
ring              195 drivers/net/ethernet/apm/xgene/xgene_enet_hw.c void xgene_enet_parse_error(struct xgene_enet_desc_ring *ring,
ring              200 drivers/net/ethernet/apm/xgene/xgene_enet_hw.c 		ring->rx_crc_errors++;
ring              204 drivers/net/ethernet/apm/xgene/xgene_enet_hw.c 		ring->rx_errors++;
ring              207 drivers/net/ethernet/apm/xgene/xgene_enet_hw.c 		ring->rx_frame_errors++;
ring              210 drivers/net/ethernet/apm/xgene/xgene_enet_hw.c 		ring->rx_length_errors++;
ring              213 drivers/net/ethernet/apm/xgene/xgene_enet_hw.c 		ring->rx_frame_errors++;
ring              216 drivers/net/ethernet/apm/xgene/xgene_enet_hw.c 		ring->rx_fifo_errors++;
ring              733 drivers/net/ethernet/apm/xgene/xgene_enet_hw.c 			     struct xgene_enet_desc_ring *ring)
ring              737 drivers/net/ethernet/apm/xgene/xgene_enet_hw.c 	if (xgene_enet_is_bufpool(ring->id)) {
ring              739 drivers/net/ethernet/apm/xgene/xgene_enet_hw.c 		data = BIT(xgene_enet_get_fpsel(ring->id));
ring              742 drivers/net/ethernet/apm/xgene/xgene_enet_hw.c 		data = BIT(xgene_enet_ring_bufnum(ring->id));
ring              422 drivers/net/ethernet/apm/xgene/xgene_enet_hw.h void xgene_enet_parse_error(struct xgene_enet_desc_ring *ring,
ring              390 drivers/net/ethernet/apm/xgene/xgene_enet_main.c static __le64 *xgene_enet_get_exp_bufs(struct xgene_enet_desc_ring *ring)
ring              394 drivers/net/ethernet/apm/xgene/xgene_enet_main.c 	exp_bufs = &ring->exp_bufs[ring->exp_buf_tail * MAX_EXP_BUFFS];
ring              396 drivers/net/ethernet/apm/xgene/xgene_enet_main.c 	ring->exp_buf_tail = (ring->exp_buf_tail + 1) & ((ring->slots / 2) - 1);
ring              401 drivers/net/ethernet/apm/xgene/xgene_enet_main.c static dma_addr_t *xgene_get_frag_dma_array(struct xgene_enet_desc_ring *ring)
ring              403 drivers/net/ethernet/apm/xgene/xgene_enet_main.c 	return &ring->cp_ring->frag_dma_addr[ring->tail * MAX_SKB_FRAGS];
ring              781 drivers/net/ethernet/apm/xgene/xgene_enet_main.c static int xgene_enet_process_ring(struct xgene_enet_desc_ring *ring,
ring              784 drivers/net/ethernet/apm/xgene/xgene_enet_main.c 	struct net_device *ndev = ring->ndev;
ring              787 drivers/net/ethernet/apm/xgene/xgene_enet_main.c 	u16 head = ring->head;
ring              788 drivers/net/ethernet/apm/xgene/xgene_enet_main.c 	u16 slots = ring->slots - 1;
ring              793 drivers/net/ethernet/apm/xgene/xgene_enet_main.c 		raw_desc = &ring->raw_desc[head];
ring              804 drivers/net/ethernet/apm/xgene/xgene_enet_main.c 			exp_desc = &ring->raw_desc[head];
ring              815 drivers/net/ethernet/apm/xgene/xgene_enet_main.c 			ret = xgene_enet_rx_frame(ring, raw_desc, exp_desc);
ring              817 drivers/net/ethernet/apm/xgene/xgene_enet_main.c 			ret = xgene_enet_tx_completion(ring, raw_desc);
ring              829 drivers/net/ethernet/apm/xgene/xgene_enet_main.c 			pdata->txc_level[ring->index] += desc_count;
ring              836 drivers/net/ethernet/apm/xgene/xgene_enet_main.c 		pdata->ring_ops->wr_cmd(ring, -count);
ring              837 drivers/net/ethernet/apm/xgene/xgene_enet_main.c 		ring->head = head;
ring              839 drivers/net/ethernet/apm/xgene/xgene_enet_main.c 		if (__netif_subqueue_stopped(ndev, ring->index))
ring              840 drivers/net/ethernet/apm/xgene/xgene_enet_main.c 			netif_start_subqueue(ndev, ring->index);
ring              848 drivers/net/ethernet/apm/xgene/xgene_enet_main.c 	struct xgene_enet_desc_ring *ring;
ring              851 drivers/net/ethernet/apm/xgene/xgene_enet_main.c 	ring = container_of(napi, struct xgene_enet_desc_ring, napi);
ring              852 drivers/net/ethernet/apm/xgene/xgene_enet_main.c 	processed = xgene_enet_process_ring(ring, budget);
ring              856 drivers/net/ethernet/apm/xgene/xgene_enet_main.c 		enable_irq(ring->irq);
ring              880 drivers/net/ethernet/apm/xgene/xgene_enet_main.c 	struct xgene_enet_desc_ring *ring;
ring              884 drivers/net/ethernet/apm/xgene/xgene_enet_main.c 		ring = pdata->rx_ring[i];
ring              886 drivers/net/ethernet/apm/xgene/xgene_enet_main.c 			snprintf(ring->irq_name, IRQ_ID_SIZE, "%s-rx-txc",
ring              889 drivers/net/ethernet/apm/xgene/xgene_enet_main.c 			snprintf(ring->irq_name, IRQ_ID_SIZE, "%s-rx-%d",
ring              895 drivers/net/ethernet/apm/xgene/xgene_enet_main.c 		ring = pdata->tx_ring[i]->cp_ring;
ring              896 drivers/net/ethernet/apm/xgene/xgene_enet_main.c 		snprintf(ring->irq_name, IRQ_ID_SIZE, "%s-txc-%d",
ring              905 drivers/net/ethernet/apm/xgene/xgene_enet_main.c 	struct xgene_enet_desc_ring *ring;
ring              910 drivers/net/ethernet/apm/xgene/xgene_enet_main.c 		ring = pdata->rx_ring[i];
ring              911 drivers/net/ethernet/apm/xgene/xgene_enet_main.c 		irq_set_status_flags(ring->irq, IRQ_DISABLE_UNLAZY);
ring              912 drivers/net/ethernet/apm/xgene/xgene_enet_main.c 		ret = devm_request_irq(dev, ring->irq, xgene_enet_rx_irq,
ring              913 drivers/net/ethernet/apm/xgene/xgene_enet_main.c 				       0, ring->irq_name, ring);
ring              916 drivers/net/ethernet/apm/xgene/xgene_enet_main.c 				   ring->irq_name);
ring              921 drivers/net/ethernet/apm/xgene/xgene_enet_main.c 		ring = pdata->tx_ring[i]->cp_ring;
ring              922 drivers/net/ethernet/apm/xgene/xgene_enet_main.c 		irq_set_status_flags(ring->irq, IRQ_DISABLE_UNLAZY);
ring              923 drivers/net/ethernet/apm/xgene/xgene_enet_main.c 		ret = devm_request_irq(dev, ring->irq, xgene_enet_rx_irq,
ring              924 drivers/net/ethernet/apm/xgene/xgene_enet_main.c 				       0, ring->irq_name, ring);
ring              927 drivers/net/ethernet/apm/xgene/xgene_enet_main.c 				   ring->irq_name);
ring              937 drivers/net/ethernet/apm/xgene/xgene_enet_main.c 	struct xgene_enet_desc_ring *ring;
ring              945 drivers/net/ethernet/apm/xgene/xgene_enet_main.c 		ring = pdata->rx_ring[i];
ring              946 drivers/net/ethernet/apm/xgene/xgene_enet_main.c 		irq_clear_status_flags(ring->irq, IRQ_DISABLE_UNLAZY);
ring              947 drivers/net/ethernet/apm/xgene/xgene_enet_main.c 		devm_free_irq(dev, ring->irq, ring);
ring              951 drivers/net/ethernet/apm/xgene/xgene_enet_main.c 		ring = pdata->tx_ring[i]->cp_ring;
ring              952 drivers/net/ethernet/apm/xgene/xgene_enet_main.c 		irq_clear_status_flags(ring->irq, IRQ_DISABLE_UNLAZY);
ring              953 drivers/net/ethernet/apm/xgene/xgene_enet_main.c 		devm_free_irq(dev, ring->irq, ring);
ring             1044 drivers/net/ethernet/apm/xgene/xgene_enet_main.c static void xgene_enet_delete_ring(struct xgene_enet_desc_ring *ring)
ring             1049 drivers/net/ethernet/apm/xgene/xgene_enet_main.c 	pdata = netdev_priv(ring->ndev);
ring             1050 drivers/net/ethernet/apm/xgene/xgene_enet_main.c 	dev = ndev_to_dev(ring->ndev);
ring             1052 drivers/net/ethernet/apm/xgene/xgene_enet_main.c 	pdata->ring_ops->clear(ring);
ring             1053 drivers/net/ethernet/apm/xgene/xgene_enet_main.c 	dmam_free_coherent(dev, ring->size, ring->desc_addr, ring->dma);
ring             1059 drivers/net/ethernet/apm/xgene/xgene_enet_main.c 	struct xgene_enet_desc_ring *ring;
ring             1063 drivers/net/ethernet/apm/xgene/xgene_enet_main.c 		ring = pdata->tx_ring[i];
ring             1064 drivers/net/ethernet/apm/xgene/xgene_enet_main.c 		if (ring) {
ring             1065 drivers/net/ethernet/apm/xgene/xgene_enet_main.c 			xgene_enet_delete_ring(ring);
ring             1066 drivers/net/ethernet/apm/xgene/xgene_enet_main.c 			pdata->port_ops->clear(pdata, ring);
ring             1068 drivers/net/ethernet/apm/xgene/xgene_enet_main.c 				xgene_enet_delete_ring(ring->cp_ring);
ring             1075 drivers/net/ethernet/apm/xgene/xgene_enet_main.c 		ring = pdata->rx_ring[i];
ring             1076 drivers/net/ethernet/apm/xgene/xgene_enet_main.c 		if (ring) {
ring             1077 drivers/net/ethernet/apm/xgene/xgene_enet_main.c 			page_pool = ring->page_pool;
ring             1084 drivers/net/ethernet/apm/xgene/xgene_enet_main.c 			buf_pool = ring->buf_pool;
ring             1089 drivers/net/ethernet/apm/xgene/xgene_enet_main.c 			xgene_enet_delete_ring(ring);
ring             1125 drivers/net/ethernet/apm/xgene/xgene_enet_main.c static void xgene_enet_free_desc_ring(struct xgene_enet_desc_ring *ring)
ring             1130 drivers/net/ethernet/apm/xgene/xgene_enet_main.c 	if (!ring)
ring             1133 drivers/net/ethernet/apm/xgene/xgene_enet_main.c 	dev = ndev_to_dev(ring->ndev);
ring             1134 drivers/net/ethernet/apm/xgene/xgene_enet_main.c 	pdata = netdev_priv(ring->ndev);
ring             1136 drivers/net/ethernet/apm/xgene/xgene_enet_main.c 	if (ring->desc_addr) {
ring             1137 drivers/net/ethernet/apm/xgene/xgene_enet_main.c 		pdata->ring_ops->clear(ring);
ring             1138 drivers/net/ethernet/apm/xgene/xgene_enet_main.c 		dmam_free_coherent(dev, ring->size, ring->desc_addr, ring->dma);
ring             1140 drivers/net/ethernet/apm/xgene/xgene_enet_main.c 	devm_kfree(dev, ring);
ring             1147 drivers/net/ethernet/apm/xgene/xgene_enet_main.c 	struct xgene_enet_desc_ring *ring;
ring             1152 drivers/net/ethernet/apm/xgene/xgene_enet_main.c 		ring = pdata->tx_ring[i];
ring             1153 drivers/net/ethernet/apm/xgene/xgene_enet_main.c 		if (ring) {
ring             1154 drivers/net/ethernet/apm/xgene/xgene_enet_main.c 			if (ring->cp_ring && ring->cp_ring->cp_skb)
ring             1155 drivers/net/ethernet/apm/xgene/xgene_enet_main.c 				devm_kfree(dev, ring->cp_ring->cp_skb);
ring             1157 drivers/net/ethernet/apm/xgene/xgene_enet_main.c 			if (ring->cp_ring && pdata->cq_cnt)
ring             1158 drivers/net/ethernet/apm/xgene/xgene_enet_main.c 				xgene_enet_free_desc_ring(ring->cp_ring);
ring             1160 drivers/net/ethernet/apm/xgene/xgene_enet_main.c 			xgene_enet_free_desc_ring(ring);
ring             1166 drivers/net/ethernet/apm/xgene/xgene_enet_main.c 		ring = pdata->rx_ring[i];
ring             1167 drivers/net/ethernet/apm/xgene/xgene_enet_main.c 		if (ring) {
ring             1168 drivers/net/ethernet/apm/xgene/xgene_enet_main.c 			if (ring->buf_pool) {
ring             1169 drivers/net/ethernet/apm/xgene/xgene_enet_main.c 				if (ring->buf_pool->rx_skb)
ring             1170 drivers/net/ethernet/apm/xgene/xgene_enet_main.c 					devm_kfree(dev, ring->buf_pool->rx_skb);
ring             1172 drivers/net/ethernet/apm/xgene/xgene_enet_main.c 				xgene_enet_free_desc_ring(ring->buf_pool);
ring             1175 drivers/net/ethernet/apm/xgene/xgene_enet_main.c 			page_pool = ring->page_pool;
ring             1186 drivers/net/ethernet/apm/xgene/xgene_enet_main.c 			xgene_enet_free_desc_ring(ring);
ring             1192 drivers/net/ethernet/apm/xgene/xgene_enet_main.c 				 struct xgene_enet_desc_ring *ring)
ring             1195 drivers/net/ethernet/apm/xgene/xgene_enet_main.c 	    (xgene_enet_ring_owner(ring->id) == RING_OWNER_CPU)) {
ring             1203 drivers/net/ethernet/apm/xgene/xgene_enet_main.c 					      struct xgene_enet_desc_ring *ring)
ring             1207 drivers/net/ethernet/apm/xgene/xgene_enet_main.c 	return pdata->ring_cmd_addr + (ring->num << num_ring_id_shift);
ring             1216 drivers/net/ethernet/apm/xgene/xgene_enet_main.c 	struct xgene_enet_desc_ring *ring;
ring             1224 drivers/net/ethernet/apm/xgene/xgene_enet_main.c 	ring = devm_kzalloc(dev, sizeof(struct xgene_enet_desc_ring),
ring             1226 drivers/net/ethernet/apm/xgene/xgene_enet_main.c 	if (!ring)
ring             1229 drivers/net/ethernet/apm/xgene/xgene_enet_main.c 	ring->ndev = ndev;
ring             1230 drivers/net/ethernet/apm/xgene/xgene_enet_main.c 	ring->num = ring_num;
ring             1231 drivers/net/ethernet/apm/xgene/xgene_enet_main.c 	ring->cfgsize = cfgsize;
ring             1232 drivers/net/ethernet/apm/xgene/xgene_enet_main.c 	ring->id = ring_id;
ring             1234 drivers/net/ethernet/apm/xgene/xgene_enet_main.c 	ring->desc_addr = dmam_alloc_coherent(dev, size, &ring->dma,
ring             1236 drivers/net/ethernet/apm/xgene/xgene_enet_main.c 	if (!ring->desc_addr) {
ring             1237 drivers/net/ethernet/apm/xgene/xgene_enet_main.c 		devm_kfree(dev, ring);
ring             1240 drivers/net/ethernet/apm/xgene/xgene_enet_main.c 	ring->size = size;
ring             1242 drivers/net/ethernet/apm/xgene/xgene_enet_main.c 	if (is_irq_mbox_required(pdata, ring)) {
ring             1244 drivers/net/ethernet/apm/xgene/xgene_enet_main.c 						    &ring->irq_mbox_dma,
ring             1247 drivers/net/ethernet/apm/xgene/xgene_enet_main.c 			dmam_free_coherent(dev, size, ring->desc_addr,
ring             1248 drivers/net/ethernet/apm/xgene/xgene_enet_main.c 					   ring->dma);
ring             1249 drivers/net/ethernet/apm/xgene/xgene_enet_main.c 			devm_kfree(dev, ring);
ring             1252 drivers/net/ethernet/apm/xgene/xgene_enet_main.c 		ring->irq_mbox_addr = irq_mbox_addr;
ring             1255 drivers/net/ethernet/apm/xgene/xgene_enet_main.c 	ring->cmd_base = xgene_enet_ring_cmd_base(pdata, ring);
ring             1256 drivers/net/ethernet/apm/xgene/xgene_enet_main.c 	ring->cmd = ring->cmd_base + INC_DEC_CMD_ADDR;
ring             1257 drivers/net/ethernet/apm/xgene/xgene_enet_main.c 	ring = pdata->ring_ops->setup(ring);
ring             1259 drivers/net/ethernet/apm/xgene/xgene_enet_main.c 		   ring->num, ring->size, ring->id, ring->slots);
ring             1261 drivers/net/ethernet/apm/xgene/xgene_enet_main.c 	return ring;
ring             1473 drivers/net/ethernet/apm/xgene/xgene_enet_main.c 	struct xgene_enet_desc_ring *ring;
ring             1477 drivers/net/ethernet/apm/xgene/xgene_enet_main.c 		ring = pdata->tx_ring[i];
ring             1478 drivers/net/ethernet/apm/xgene/xgene_enet_main.c 		if (ring) {
ring             1479 drivers/net/ethernet/apm/xgene/xgene_enet_main.c 			stats->tx_packets += ring->tx_packets;
ring             1480 drivers/net/ethernet/apm/xgene/xgene_enet_main.c 			stats->tx_bytes += ring->tx_bytes;
ring             1481 drivers/net/ethernet/apm/xgene/xgene_enet_main.c 			stats->tx_dropped += ring->tx_dropped;
ring             1482 drivers/net/ethernet/apm/xgene/xgene_enet_main.c 			stats->tx_errors += ring->tx_errors;
ring             1487 drivers/net/ethernet/apm/xgene/xgene_enet_main.c 		ring = pdata->rx_ring[i];
ring             1488 drivers/net/ethernet/apm/xgene/xgene_enet_main.c 		if (ring) {
ring             1489 drivers/net/ethernet/apm/xgene/xgene_enet_main.c 			stats->rx_packets += ring->rx_packets;
ring             1490 drivers/net/ethernet/apm/xgene/xgene_enet_main.c 			stats->rx_bytes += ring->rx_bytes;
ring             1491 drivers/net/ethernet/apm/xgene/xgene_enet_main.c 			stats->rx_dropped += ring->rx_dropped;
ring             1492 drivers/net/ethernet/apm/xgene/xgene_enet_main.c 			stats->rx_errors += ring->rx_errors +
ring             1493 drivers/net/ethernet/apm/xgene/xgene_enet_main.c 				ring->rx_length_errors +
ring             1494 drivers/net/ethernet/apm/xgene/xgene_enet_main.c 				ring->rx_crc_errors +
ring             1495 drivers/net/ethernet/apm/xgene/xgene_enet_main.c 				ring->rx_frame_errors +
ring             1496 drivers/net/ethernet/apm/xgene/xgene_enet_main.c 				ring->rx_fifo_errors;
ring             1497 drivers/net/ethernet/apm/xgene/xgene_enet_main.c 			stats->rx_length_errors += ring->rx_length_errors;
ring             1498 drivers/net/ethernet/apm/xgene/xgene_enet_main.c 			stats->rx_crc_errors += ring->rx_crc_errors;
ring             1499 drivers/net/ethernet/apm/xgene/xgene_enet_main.c 			stats->rx_frame_errors += ring->rx_frame_errors;
ring             1500 drivers/net/ethernet/apm/xgene/xgene_enet_main.c 			stats->rx_fifo_errors += ring->rx_fifo_errors;
ring              163 drivers/net/ethernet/apm/xgene/xgene_enet_main.h 		      struct xgene_enet_desc_ring *ring);
ring              255 drivers/net/ethernet/apm/xgene/xgene_enet_main.h static inline u16 xgene_enet_dst_ring_num(struct xgene_enet_desc_ring *ring)
ring              257 drivers/net/ethernet/apm/xgene/xgene_enet_main.h 	struct xgene_enet_pdata *pdata = netdev_priv(ring->ndev);
ring              259 drivers/net/ethernet/apm/xgene/xgene_enet_main.h 	return ((u16)pdata->rm << 10) | ring->num;
ring               12 drivers/net/ethernet/apm/xgene/xgene_enet_ring2.c static void xgene_enet_ring_init(struct xgene_enet_desc_ring *ring)
ring               14 drivers/net/ethernet/apm/xgene/xgene_enet_ring2.c 	u32 *ring_cfg = ring->state;
ring               15 drivers/net/ethernet/apm/xgene/xgene_enet_ring2.c 	u64 addr = ring->dma;
ring               17 drivers/net/ethernet/apm/xgene/xgene_enet_ring2.c 	if (xgene_enet_ring_owner(ring->id) == RING_OWNER_CPU) {
ring               18 drivers/net/ethernet/apm/xgene/xgene_enet_ring2.c 		ring_cfg[0] |= SET_VAL(X2_INTLINE, ring->id & RING_BUFNUM_MASK);
ring               27 drivers/net/ethernet/apm/xgene/xgene_enet_ring2.c 	ring_cfg[3] |= SET_VAL(RINGSIZE, ring->cfgsize)
ring               34 drivers/net/ethernet/apm/xgene/xgene_enet_ring2.c static void xgene_enet_ring_set_type(struct xgene_enet_desc_ring *ring)
ring               36 drivers/net/ethernet/apm/xgene/xgene_enet_ring2.c 	u32 *ring_cfg = ring->state;
ring               40 drivers/net/ethernet/apm/xgene/xgene_enet_ring2.c 	is_bufpool = xgene_enet_is_bufpool(ring->id);
ring               47 drivers/net/ethernet/apm/xgene/xgene_enet_ring2.c static void xgene_enet_ring_set_recombbuf(struct xgene_enet_desc_ring *ring)
ring               49 drivers/net/ethernet/apm/xgene/xgene_enet_ring2.c 	u32 *ring_cfg = ring->state;
ring               55 drivers/net/ethernet/apm/xgene/xgene_enet_ring2.c static void xgene_enet_ring_wr32(struct xgene_enet_desc_ring *ring,
ring               58 drivers/net/ethernet/apm/xgene/xgene_enet_ring2.c 	struct xgene_enet_pdata *pdata = netdev_priv(ring->ndev);
ring               63 drivers/net/ethernet/apm/xgene/xgene_enet_ring2.c static void xgene_enet_write_ring_state(struct xgene_enet_desc_ring *ring)
ring               65 drivers/net/ethernet/apm/xgene/xgene_enet_ring2.c 	struct xgene_enet_pdata *pdata = netdev_priv(ring->ndev);
ring               68 drivers/net/ethernet/apm/xgene/xgene_enet_ring2.c 	xgene_enet_ring_wr32(ring, CSR_RING_CONFIG, ring->num);
ring               70 drivers/net/ethernet/apm/xgene/xgene_enet_ring2.c 		xgene_enet_ring_wr32(ring, CSR_RING_WR_BASE + (i * 4),
ring               71 drivers/net/ethernet/apm/xgene/xgene_enet_ring2.c 				     ring->state[i]);
ring               75 drivers/net/ethernet/apm/xgene/xgene_enet_ring2.c static void xgene_enet_clr_ring_state(struct xgene_enet_desc_ring *ring)
ring               77 drivers/net/ethernet/apm/xgene/xgene_enet_ring2.c 	memset(ring->state, 0, sizeof(ring->state));
ring               78 drivers/net/ethernet/apm/xgene/xgene_enet_ring2.c 	xgene_enet_write_ring_state(ring);
ring               81 drivers/net/ethernet/apm/xgene/xgene_enet_ring2.c static void xgene_enet_set_ring_state(struct xgene_enet_desc_ring *ring)
ring               85 drivers/net/ethernet/apm/xgene/xgene_enet_ring2.c 	xgene_enet_ring_set_type(ring);
ring               87 drivers/net/ethernet/apm/xgene/xgene_enet_ring2.c 	owner = xgene_enet_ring_owner(ring->id);
ring               89 drivers/net/ethernet/apm/xgene/xgene_enet_ring2.c 		xgene_enet_ring_set_recombbuf(ring);
ring               91 drivers/net/ethernet/apm/xgene/xgene_enet_ring2.c 	xgene_enet_ring_init(ring);
ring               92 drivers/net/ethernet/apm/xgene/xgene_enet_ring2.c 	xgene_enet_write_ring_state(ring);
ring               95 drivers/net/ethernet/apm/xgene/xgene_enet_ring2.c static void xgene_enet_set_ring_id(struct xgene_enet_desc_ring *ring)
ring              100 drivers/net/ethernet/apm/xgene/xgene_enet_ring2.c 	if (xgene_enet_ring_owner(ring->id) == RING_OWNER_CPU)
ring              103 drivers/net/ethernet/apm/xgene/xgene_enet_ring2.c 	is_bufpool = xgene_enet_is_bufpool(ring->id);
ring              105 drivers/net/ethernet/apm/xgene/xgene_enet_ring2.c 	ring_id_val = ring->id & GENMASK(9, 0);
ring              108 drivers/net/ethernet/apm/xgene/xgene_enet_ring2.c 	ring_id_buf = (ring->num << 9) & GENMASK(18, 9);
ring              114 drivers/net/ethernet/apm/xgene/xgene_enet_ring2.c 	xgene_enet_ring_wr32(ring, CSR_RING_ID, ring_id_val);
ring              115 drivers/net/ethernet/apm/xgene/xgene_enet_ring2.c 	xgene_enet_ring_wr32(ring, CSR_RING_ID_BUF, ring_id_buf);
ring              118 drivers/net/ethernet/apm/xgene/xgene_enet_ring2.c static void xgene_enet_clr_desc_ring_id(struct xgene_enet_desc_ring *ring)
ring              122 drivers/net/ethernet/apm/xgene/xgene_enet_ring2.c 	ring_id = ring->id | OVERWRITE;
ring              123 drivers/net/ethernet/apm/xgene/xgene_enet_ring2.c 	xgene_enet_ring_wr32(ring, CSR_RING_ID, ring_id);
ring              124 drivers/net/ethernet/apm/xgene/xgene_enet_ring2.c 	xgene_enet_ring_wr32(ring, CSR_RING_ID_BUF, 0);
ring              128 drivers/net/ethernet/apm/xgene/xgene_enet_ring2.c 				    struct xgene_enet_desc_ring *ring)
ring              133 drivers/net/ethernet/apm/xgene/xgene_enet_ring2.c 	xgene_enet_clr_ring_state(ring);
ring              134 drivers/net/ethernet/apm/xgene/xgene_enet_ring2.c 	xgene_enet_set_ring_state(ring);
ring              135 drivers/net/ethernet/apm/xgene/xgene_enet_ring2.c 	xgene_enet_set_ring_id(ring);
ring              137 drivers/net/ethernet/apm/xgene/xgene_enet_ring2.c 	ring->slots = xgene_enet_get_numslots(ring->id, ring->size);
ring              139 drivers/net/ethernet/apm/xgene/xgene_enet_ring2.c 	is_bufpool = xgene_enet_is_bufpool(ring->id);
ring              140 drivers/net/ethernet/apm/xgene/xgene_enet_ring2.c 	if (is_bufpool || xgene_enet_ring_owner(ring->id) != RING_OWNER_CPU)
ring              141 drivers/net/ethernet/apm/xgene/xgene_enet_ring2.c 		return ring;
ring              143 drivers/net/ethernet/apm/xgene/xgene_enet_ring2.c 	addr = CSR_VMID0_INTR_MBOX + (4 * (ring->id & RING_BUFNUM_MASK));
ring              144 drivers/net/ethernet/apm/xgene/xgene_enet_ring2.c 	xgene_enet_ring_wr32(ring, addr, ring->irq_mbox_dma >> 10);
ring              146 drivers/net/ethernet/apm/xgene/xgene_enet_ring2.c 	for (i = 0; i < ring->slots; i++)
ring              147 drivers/net/ethernet/apm/xgene/xgene_enet_ring2.c 		xgene_enet_mark_desc_slot_empty(&ring->raw_desc[i]);
ring              149 drivers/net/ethernet/apm/xgene/xgene_enet_ring2.c 	return ring;
ring              152 drivers/net/ethernet/apm/xgene/xgene_enet_ring2.c static void xgene_enet_clear_ring(struct xgene_enet_desc_ring *ring)
ring              154 drivers/net/ethernet/apm/xgene/xgene_enet_ring2.c 	xgene_enet_clr_desc_ring_id(ring);
ring              155 drivers/net/ethernet/apm/xgene/xgene_enet_ring2.c 	xgene_enet_clr_ring_state(ring);
ring              158 drivers/net/ethernet/apm/xgene/xgene_enet_ring2.c static void xgene_enet_wr_cmd(struct xgene_enet_desc_ring *ring, int count)
ring              162 drivers/net/ethernet/apm/xgene/xgene_enet_ring2.c 	if (xgene_enet_ring_owner(ring->id) == RING_OWNER_CPU) {
ring              163 drivers/net/ethernet/apm/xgene/xgene_enet_ring2.c 		data = SET_VAL(X2_INTLINE, ring->id & RING_BUFNUM_MASK) |
ring              168 drivers/net/ethernet/apm/xgene/xgene_enet_ring2.c 	iowrite32(data, ring->cmd);
ring              171 drivers/net/ethernet/apm/xgene/xgene_enet_ring2.c static u32 xgene_enet_ring_len(struct xgene_enet_desc_ring *ring)
ring              173 drivers/net/ethernet/apm/xgene/xgene_enet_ring2.c 	u32 __iomem *cmd_base = ring->cmd_base;
ring              182 drivers/net/ethernet/apm/xgene/xgene_enet_ring2.c static void xgene_enet_setup_coalescing(struct xgene_enet_desc_ring *ring)
ring              186 drivers/net/ethernet/apm/xgene/xgene_enet_ring2.c 	xgene_enet_ring_wr32(ring, CSR_PBM_COAL, 0x8e);
ring              187 drivers/net/ethernet/apm/xgene/xgene_enet_ring2.c 	xgene_enet_ring_wr32(ring, CSR_PBM_CTICK0, data);
ring              188 drivers/net/ethernet/apm/xgene/xgene_enet_ring2.c 	xgene_enet_ring_wr32(ring, CSR_PBM_CTICK1, data);
ring              189 drivers/net/ethernet/apm/xgene/xgene_enet_ring2.c 	xgene_enet_ring_wr32(ring, CSR_PBM_CTICK2, data);
ring              190 drivers/net/ethernet/apm/xgene/xgene_enet_ring2.c 	xgene_enet_ring_wr32(ring, CSR_PBM_CTICK3, data);
ring              191 drivers/net/ethernet/apm/xgene/xgene_enet_ring2.c 	xgene_enet_ring_wr32(ring, CSR_THRESHOLD0_SET1, 0x08);
ring              192 drivers/net/ethernet/apm/xgene/xgene_enet_ring2.c 	xgene_enet_ring_wr32(ring, CSR_THRESHOLD1_SET1, 0x10);
ring              509 drivers/net/ethernet/apm/xgene/xgene_enet_sgmac.c 			     struct xgene_enet_desc_ring *ring)
ring              513 drivers/net/ethernet/apm/xgene/xgene_enet_sgmac.c 	if (xgene_enet_is_bufpool(ring->id)) {
ring              515 drivers/net/ethernet/apm/xgene/xgene_enet_sgmac.c 		data = BIT(xgene_enet_get_fpsel(ring->id));
ring              518 drivers/net/ethernet/apm/xgene/xgene_enet_sgmac.c 		data = BIT(xgene_enet_ring_bufnum(ring->id));
ring              445 drivers/net/ethernet/apm/xgene/xgene_enet_xgmac.c 			     struct xgene_enet_desc_ring *ring)
ring              449 drivers/net/ethernet/apm/xgene/xgene_enet_xgmac.c 	if (xgene_enet_is_bufpool(ring->id)) {
ring              451 drivers/net/ethernet/apm/xgene/xgene_enet_xgmac.c 		data = BIT(xgene_enet_get_fpsel(ring->id));
ring              454 drivers/net/ethernet/apm/xgene/xgene_enet_xgmac.c 		data = BIT(xgene_enet_ring_bufnum(ring->id));
ring              523 drivers/net/ethernet/aquantia/atlantic/aq_ethtool.c 			     struct ethtool_ringparam *ring)
ring              528 drivers/net/ethernet/aquantia/atlantic/aq_ethtool.c 	ring->rx_pending = aq_nic_cfg->rxds;
ring              529 drivers/net/ethernet/aquantia/atlantic/aq_ethtool.c 	ring->tx_pending = aq_nic_cfg->txds;
ring              531 drivers/net/ethernet/aquantia/atlantic/aq_ethtool.c 	ring->rx_max_pending = aq_nic_cfg->aq_hw_caps->rxds_max;
ring              532 drivers/net/ethernet/aquantia/atlantic/aq_ethtool.c 	ring->tx_max_pending = aq_nic_cfg->aq_hw_caps->txds_max;
ring              536 drivers/net/ethernet/aquantia/atlantic/aq_ethtool.c 			    struct ethtool_ringparam *ring)
ring              544 drivers/net/ethernet/aquantia/atlantic/aq_ethtool.c 	if (ring->rx_mini_pending || ring->rx_jumbo_pending) {
ring              556 drivers/net/ethernet/aquantia/atlantic/aq_ethtool.c 	aq_nic_cfg->rxds = max(ring->rx_pending, hw_caps->rxds_min);
ring              560 drivers/net/ethernet/aquantia/atlantic/aq_ethtool.c 	aq_nic_cfg->txds = max(ring->tx_pending, hw_caps->txds_min);
ring              302 drivers/net/ethernet/aquantia/atlantic/aq_nic.c 			struct aq_ring_s *ring)
ring              304 drivers/net/ethernet/aquantia/atlantic/aq_nic.c 	self->aq_ring_tx[idx] = ring;
ring              425 drivers/net/ethernet/aquantia/atlantic/aq_nic.c 				   struct aq_ring_s *ring)
ring              430 drivers/net/ethernet/aquantia/atlantic/aq_nic.c 	unsigned int dx = ring->sw_tail;
ring              432 drivers/net/ethernet/aquantia/atlantic/aq_nic.c 	struct aq_ring_buff_s *dx_buff = &ring->buff_ring[dx];
ring              458 drivers/net/ethernet/aquantia/atlantic/aq_nic.c 		dx = aq_ring_next_dx(ring, dx);
ring              459 drivers/net/ethernet/aquantia/atlantic/aq_nic.c 		dx_buff = &ring->buff_ring[dx];
ring              527 drivers/net/ethernet/aquantia/atlantic/aq_nic.c 			dx = aq_ring_next_dx(ring, dx);
ring              528 drivers/net/ethernet/aquantia/atlantic/aq_nic.c 			dx_buff = &ring->buff_ring[dx];
ring              549 drivers/net/ethernet/aquantia/atlantic/aq_nic.c 	for (dx = ring->sw_tail;
ring              551 drivers/net/ethernet/aquantia/atlantic/aq_nic.c 	     --ret, dx = aq_ring_next_dx(ring, dx)) {
ring              552 drivers/net/ethernet/aquantia/atlantic/aq_nic.c 		dx_buff = &ring->buff_ring[dx];
ring              575 drivers/net/ethernet/aquantia/atlantic/aq_nic.c 	struct aq_ring_s *ring = NULL;
ring              583 drivers/net/ethernet/aquantia/atlantic/aq_nic.c 	ring = self->aq_ring_tx[AQ_NIC_TCVEC2RING(self, tc, vec)];
ring              590 drivers/net/ethernet/aquantia/atlantic/aq_nic.c 	aq_ring_update_queue_state(ring);
ring              593 drivers/net/ethernet/aquantia/atlantic/aq_nic.c 	if (__netif_subqueue_stopped(self->ndev, ring->idx)) {
ring              598 drivers/net/ethernet/aquantia/atlantic/aq_nic.c 	frags = aq_nic_map_skb(self, skb, ring);
ring              602 drivers/net/ethernet/aquantia/atlantic/aq_nic.c 						       ring, frags);
ring              122 drivers/net/ethernet/aquantia/atlantic/aq_nic.h 			struct aq_ring_s *ring);
ring              194 drivers/net/ethernet/aquantia/atlantic/aq_ring.c void aq_ring_update_queue_state(struct aq_ring_s *ring)
ring              196 drivers/net/ethernet/aquantia/atlantic/aq_ring.c 	if (aq_ring_avail_dx(ring) <= AQ_CFG_SKB_FRAGS_MAX)
ring              197 drivers/net/ethernet/aquantia/atlantic/aq_ring.c 		aq_ring_queue_stop(ring);
ring              198 drivers/net/ethernet/aquantia/atlantic/aq_ring.c 	else if (aq_ring_avail_dx(ring) > AQ_CFG_RESTART_DESC_THRES)
ring              199 drivers/net/ethernet/aquantia/atlantic/aq_ring.c 		aq_ring_queue_wake(ring);
ring              202 drivers/net/ethernet/aquantia/atlantic/aq_ring.c void aq_ring_queue_wake(struct aq_ring_s *ring)
ring              204 drivers/net/ethernet/aquantia/atlantic/aq_ring.c 	struct net_device *ndev = aq_nic_get_ndev(ring->aq_nic);
ring              206 drivers/net/ethernet/aquantia/atlantic/aq_ring.c 	if (__netif_subqueue_stopped(ndev, ring->idx)) {
ring              207 drivers/net/ethernet/aquantia/atlantic/aq_ring.c 		netif_wake_subqueue(ndev, ring->idx);
ring              208 drivers/net/ethernet/aquantia/atlantic/aq_ring.c 		ring->stats.tx.queue_restarts++;
ring              212 drivers/net/ethernet/aquantia/atlantic/aq_ring.c void aq_ring_queue_stop(struct aq_ring_s *ring)
ring              214 drivers/net/ethernet/aquantia/atlantic/aq_ring.c 	struct net_device *ndev = aq_nic_get_ndev(ring->aq_nic);
ring              216 drivers/net/ethernet/aquantia/atlantic/aq_ring.c 	if (!__netif_subqueue_stopped(ndev, ring->idx))
ring              217 drivers/net/ethernet/aquantia/atlantic/aq_ring.c 		netif_stop_subqueue(ndev, ring->idx);
ring              167 drivers/net/ethernet/aquantia/atlantic/aq_ring.h void aq_ring_update_queue_state(struct aq_ring_s *ring);
ring              168 drivers/net/ethernet/aquantia/atlantic/aq_ring.h void aq_ring_queue_wake(struct aq_ring_s *ring);
ring              169 drivers/net/ethernet/aquantia/atlantic/aq_ring.h void aq_ring_queue_stop(struct aq_ring_s *ring);
ring               26 drivers/net/ethernet/aquantia/atlantic/aq_vec.c 	struct aq_ring_s ring[AQ_CFG_TCS_MAX][2];
ring               36 drivers/net/ethernet/aquantia/atlantic/aq_vec.c 	struct aq_ring_s *ring = NULL;
ring               45 drivers/net/ethernet/aquantia/atlantic/aq_vec.c 		for (i = 0U, ring = self->ring[0];
ring               46 drivers/net/ethernet/aquantia/atlantic/aq_vec.c 			self->tx_rings > i; ++i, ring = self->ring[i]) {
ring               50 drivers/net/ethernet/aquantia/atlantic/aq_vec.c 							&ring[AQ_VEC_TX_ID]);
ring               55 drivers/net/ethernet/aquantia/atlantic/aq_vec.c 			if (ring[AQ_VEC_TX_ID].sw_head !=
ring               56 drivers/net/ethernet/aquantia/atlantic/aq_vec.c 			    ring[AQ_VEC_TX_ID].hw_head) {
ring               57 drivers/net/ethernet/aquantia/atlantic/aq_vec.c 				was_tx_cleaned = aq_ring_tx_clean(&ring[AQ_VEC_TX_ID]);
ring               58 drivers/net/ethernet/aquantia/atlantic/aq_vec.c 				aq_ring_update_queue_state(&ring[AQ_VEC_TX_ID]);
ring               62 drivers/net/ethernet/aquantia/atlantic/aq_vec.c 					    &ring[AQ_VEC_RX_ID]);
ring               66 drivers/net/ethernet/aquantia/atlantic/aq_vec.c 			if (ring[AQ_VEC_RX_ID].sw_head !=
ring               67 drivers/net/ethernet/aquantia/atlantic/aq_vec.c 				ring[AQ_VEC_RX_ID].hw_head) {
ring               68 drivers/net/ethernet/aquantia/atlantic/aq_vec.c 				err = aq_ring_rx_clean(&ring[AQ_VEC_RX_ID],
ring               75 drivers/net/ethernet/aquantia/atlantic/aq_vec.c 				sw_tail_old = ring[AQ_VEC_RX_ID].sw_tail;
ring               77 drivers/net/ethernet/aquantia/atlantic/aq_vec.c 				err = aq_ring_rx_fill(&ring[AQ_VEC_RX_ID]);
ring               83 drivers/net/ethernet/aquantia/atlantic/aq_vec.c 					&ring[AQ_VEC_RX_ID], sw_tail_old);
ring              107 drivers/net/ethernet/aquantia/atlantic/aq_vec.c 	struct aq_ring_s *ring = NULL;
ring              136 drivers/net/ethernet/aquantia/atlantic/aq_vec.c 		ring = aq_ring_tx_alloc(&self->ring[i][AQ_VEC_TX_ID], aq_nic,
ring              138 drivers/net/ethernet/aquantia/atlantic/aq_vec.c 		if (!ring) {
ring              145 drivers/net/ethernet/aquantia/atlantic/aq_vec.c 		aq_nic_set_tx_ring(aq_nic, idx_ring, ring);
ring              147 drivers/net/ethernet/aquantia/atlantic/aq_vec.c 		ring = aq_ring_rx_alloc(&self->ring[i][AQ_VEC_RX_ID], aq_nic,
ring              149 drivers/net/ethernet/aquantia/atlantic/aq_vec.c 		if (!ring) {
ring              168 drivers/net/ethernet/aquantia/atlantic/aq_vec.c 	struct aq_ring_s *ring = NULL;
ring              175 drivers/net/ethernet/aquantia/atlantic/aq_vec.c 	for (i = 0U, ring = self->ring[0];
ring              176 drivers/net/ethernet/aquantia/atlantic/aq_vec.c 		self->tx_rings > i; ++i, ring = self->ring[i]) {
ring              177 drivers/net/ethernet/aquantia/atlantic/aq_vec.c 		err = aq_ring_init(&ring[AQ_VEC_TX_ID]);
ring              182 drivers/net/ethernet/aquantia/atlantic/aq_vec.c 						       &ring[AQ_VEC_TX_ID],
ring              187 drivers/net/ethernet/aquantia/atlantic/aq_vec.c 		err = aq_ring_init(&ring[AQ_VEC_RX_ID]);
ring              192 drivers/net/ethernet/aquantia/atlantic/aq_vec.c 						       &ring[AQ_VEC_RX_ID],
ring              197 drivers/net/ethernet/aquantia/atlantic/aq_vec.c 		err = aq_ring_rx_fill(&ring[AQ_VEC_RX_ID]);
ring              202 drivers/net/ethernet/aquantia/atlantic/aq_vec.c 						       &ring[AQ_VEC_RX_ID], 0U);
ring              213 drivers/net/ethernet/aquantia/atlantic/aq_vec.c 	struct aq_ring_s *ring = NULL;
ring              217 drivers/net/ethernet/aquantia/atlantic/aq_vec.c 	for (i = 0U, ring = self->ring[0];
ring              218 drivers/net/ethernet/aquantia/atlantic/aq_vec.c 		self->tx_rings > i; ++i, ring = self->ring[i]) {
ring              220 drivers/net/ethernet/aquantia/atlantic/aq_vec.c 							&ring[AQ_VEC_TX_ID]);
ring              225 drivers/net/ethernet/aquantia/atlantic/aq_vec.c 							&ring[AQ_VEC_RX_ID]);
ring              238 drivers/net/ethernet/aquantia/atlantic/aq_vec.c 	struct aq_ring_s *ring = NULL;
ring              241 drivers/net/ethernet/aquantia/atlantic/aq_vec.c 	for (i = 0U, ring = self->ring[0];
ring              242 drivers/net/ethernet/aquantia/atlantic/aq_vec.c 		self->tx_rings > i; ++i, ring = self->ring[i]) {
ring              244 drivers/net/ethernet/aquantia/atlantic/aq_vec.c 						 &ring[AQ_VEC_TX_ID]);
ring              247 drivers/net/ethernet/aquantia/atlantic/aq_vec.c 						 &ring[AQ_VEC_RX_ID]);
ring              255 drivers/net/ethernet/aquantia/atlantic/aq_vec.c 	struct aq_ring_s *ring = NULL;
ring              261 drivers/net/ethernet/aquantia/atlantic/aq_vec.c 	for (i = 0U, ring = self->ring[0];
ring              262 drivers/net/ethernet/aquantia/atlantic/aq_vec.c 		self->tx_rings > i; ++i, ring = self->ring[i]) {
ring              263 drivers/net/ethernet/aquantia/atlantic/aq_vec.c 		aq_ring_tx_clean(&ring[AQ_VEC_TX_ID]);
ring              264 drivers/net/ethernet/aquantia/atlantic/aq_vec.c 		aq_ring_rx_deinit(&ring[AQ_VEC_RX_ID]);
ring              271 drivers/net/ethernet/aquantia/atlantic/aq_vec.c 	struct aq_ring_s *ring = NULL;
ring              277 drivers/net/ethernet/aquantia/atlantic/aq_vec.c 	for (i = 0U, ring = self->ring[0];
ring              278 drivers/net/ethernet/aquantia/atlantic/aq_vec.c 		self->tx_rings > i; ++i, ring = self->ring[i]) {
ring              279 drivers/net/ethernet/aquantia/atlantic/aq_vec.c 		aq_ring_free(&ring[AQ_VEC_TX_ID]);
ring              280 drivers/net/ethernet/aquantia/atlantic/aq_vec.c 		aq_ring_free(&ring[AQ_VEC_RX_ID]);
ring              338 drivers/net/ethernet/aquantia/atlantic/aq_vec.c 	struct aq_ring_s *ring = NULL;
ring              341 drivers/net/ethernet/aquantia/atlantic/aq_vec.c 	for (r = 0U, ring = self->ring[0];
ring              342 drivers/net/ethernet/aquantia/atlantic/aq_vec.c 		self->tx_rings > r; ++r, ring = self->ring[r]) {
ring              343 drivers/net/ethernet/aquantia/atlantic/aq_vec.c 		struct aq_ring_stats_tx_s *tx = &ring[AQ_VEC_TX_ID].stats.tx;
ring              344 drivers/net/ethernet/aquantia/atlantic/aq_vec.c 		struct aq_ring_stats_rx_s *rx = &ring[AQ_VEC_RX_ID].stats.rx;
ring              404 drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_a0.c 				      struct aq_ring_s *ring)
ring              406 drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_a0.c 	hw_atl_tdm_tx_desc_en_set(self, 1, ring->idx);
ring              411 drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_a0.c 				      struct aq_ring_s *ring)
ring              413 drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_a0.c 	hw_atl_rdm_rx_desc_en_set(self, 1, ring->idx);
ring              425 drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_a0.c 					    struct aq_ring_s *ring)
ring              427 drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_a0.c 	hw_atl_reg_tx_dma_desc_tail_ptr_set(self, ring->sw_tail, ring->idx);
ring              432 drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_a0.c 				     struct aq_ring_s *ring,
ring              442 drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_a0.c 	buff = &ring->buff_ring[ring->sw_tail];
ring              446 drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_a0.c 		txd = (struct hw_atl_txd_s *)&ring->dx_ring[ring->sw_tail *
ring              452 drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_a0.c 		buff = &ring->buff_ring[ring->sw_tail];
ring              499 drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_a0.c 		ring->sw_tail = aq_ring_next_dx(ring, ring->sw_tail);
ring              502 drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_a0.c 	hw_atl_a0_hw_tx_ring_tail_update(self, ring);
ring              579 drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_a0.c 				     struct aq_ring_s *ring,
ring              582 drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_a0.c 	for (; sw_tail_old != ring->sw_tail;
ring              583 drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_a0.c 		sw_tail_old = aq_ring_next_dx(ring, sw_tail_old)) {
ring              585 drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_a0.c 			(struct hw_atl_rxd_s *)&ring->dx_ring[sw_tail_old *
ring              588 drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_a0.c 		struct aq_ring_buff_s *buff = &ring->buff_ring[sw_tail_old];
ring              594 drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_a0.c 	hw_atl_reg_rx_dma_desc_tail_ptr_set(self, sw_tail_old, ring->idx);
ring              600 drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_a0.c 					    struct aq_ring_s *ring)
ring              603 drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_a0.c 	unsigned int hw_head = hw_atl_tdm_tx_desc_head_ptr_get(self, ring->idx);
ring              609 drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_a0.c 	ring->hw_head = hw_head;
ring              617 drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_a0.c 					struct aq_ring_s *ring)
ring              619 drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_a0.c 	for (; ring->hw_head != ring->sw_tail;
ring              620 drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_a0.c 		ring->hw_head = aq_ring_next_dx(ring, ring->hw_head)) {
ring              623 drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_a0.c 			&ring->dx_ring[ring->hw_head * HW_ATL_A0_RXD_SIZE];
ring              631 drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_a0.c 			hw_atl_reg_rx_dma_desc_status_get(self, ring->idx)) {
ring              632 drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_a0.c 				hw_atl_rdm_rx_desc_en_set(self, false, ring->idx);
ring              633 drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_a0.c 				hw_atl_rdm_rx_desc_res_set(self, true, ring->idx);
ring              634 drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_a0.c 				hw_atl_rdm_rx_desc_res_set(self, false, ring->idx);
ring              635 drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_a0.c 				hw_atl_rdm_rx_desc_en_set(self, true, ring->idx);
ring              638 drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_a0.c 			if (ring->hw_head ||
ring              640 drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_a0.c 							     ring->idx) < 2U)) {
ring              645 drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_a0.c 					(&ring->dx_ring[(1U) *
ring              657 drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_a0.c 		buff = &ring->buff_ring[ring->hw_head];
ring              709 drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_a0.c 				buff->next = aq_ring_next_dx(ring,
ring              710 drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_a0.c 							     ring->hw_head);
ring              711 drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_a0.c 				++ring->stats.rx.jumbo_packets;
ring              866 drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_a0.c 				     struct aq_ring_s *ring)
ring              868 drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_a0.c 	hw_atl_tdm_tx_desc_en_set(self, 0U, ring->idx);
ring              873 drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_a0.c 				     struct aq_ring_s *ring)
ring              875 drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_a0.c 	hw_atl_rdm_rx_desc_en_set(self, 0U, ring->idx);
ring              459 drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_b0.c 				      struct aq_ring_s *ring)
ring              461 drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_b0.c 	hw_atl_tdm_tx_desc_en_set(self, 1, ring->idx);
ring              466 drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_b0.c 				      struct aq_ring_s *ring)
ring              468 drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_b0.c 	hw_atl_rdm_rx_desc_en_set(self, 1, ring->idx);
ring              480 drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_b0.c 					    struct aq_ring_s *ring)
ring              482 drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_b0.c 	hw_atl_reg_tx_dma_desc_tail_ptr_set(self, ring->sw_tail, ring->idx);
ring              487 drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_b0.c 				     struct aq_ring_s *ring,
ring              498 drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_b0.c 	buff = &ring->buff_ring[ring->sw_tail];
ring              502 drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_b0.c 		txd = (struct hw_atl_txd_s *)&ring->dx_ring[ring->sw_tail *
ring              508 drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_b0.c 		buff = &ring->buff_ring[ring->sw_tail];
ring              566 drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_b0.c 		ring->sw_tail = aq_ring_next_dx(ring, ring->sw_tail);
ring              569 drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_b0.c 	hw_atl_b0_hw_tx_ring_tail_update(self, ring);
ring              647 drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_b0.c 				     struct aq_ring_s *ring,
ring              650 drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_b0.c 	for (; sw_tail_old != ring->sw_tail;
ring              651 drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_b0.c 		sw_tail_old = aq_ring_next_dx(ring, sw_tail_old)) {
ring              653 drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_b0.c 			(struct hw_atl_rxd_s *)&ring->dx_ring[sw_tail_old *
ring              656 drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_b0.c 		struct aq_ring_buff_s *buff = &ring->buff_ring[sw_tail_old];
ring              662 drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_b0.c 	hw_atl_reg_rx_dma_desc_tail_ptr_set(self, sw_tail_old, ring->idx);
ring              668 drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_b0.c 					    struct aq_ring_s *ring)
ring              671 drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_b0.c 	unsigned int hw_head_ = hw_atl_tdm_tx_desc_head_ptr_get(self, ring->idx);
ring              677 drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_b0.c 	ring->hw_head = hw_head_;
ring              685 drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_b0.c 					struct aq_ring_s *ring)
ring              687 drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_b0.c 	for (; ring->hw_head != ring->sw_tail;
ring              688 drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_b0.c 		ring->hw_head = aq_ring_next_dx(ring, ring->hw_head)) {
ring              691 drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_b0.c 			&ring->dx_ring[ring->hw_head * HW_ATL_B0_RXD_SIZE];
ring              701 drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_b0.c 		buff = &ring->buff_ring[ring->hw_head];
ring              770 drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_b0.c 				++ring->stats.rx.lro_packets;
ring              774 drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_b0.c 					aq_ring_next_dx(ring,
ring              775 drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_b0.c 							ring->hw_head);
ring              776 drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_b0.c 				++ring->stats.rx.jumbo_packets;
ring              995 drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_b0.c 				     struct aq_ring_s *ring)
ring              997 drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_b0.c 	hw_atl_tdm_tx_desc_en_set(self, 0U, ring->idx);
ring             1002 drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_b0.c 				     struct aq_ring_s *ring)
ring             1004 drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_b0.c 	hw_atl_rdm_rx_desc_en_set(self, 0U, ring->idx);
ring              337 drivers/net/ethernet/atheros/ag71xx.c static struct ag71xx_desc *ag71xx_ring_desc(struct ag71xx_ring *ring, int idx)
ring              339 drivers/net/ethernet/atheros/ag71xx.c 	return (struct ag71xx_desc *)&ring->descs_cpu[idx * AG71XX_DESC_SIZE];
ring              631 drivers/net/ethernet/atheros/ag71xx.c 	struct ag71xx_ring *ring = &ag->tx_ring;
ring              637 drivers/net/ethernet/atheros/ag71xx.c 	ring_mask = BIT(ring->order) - 1;
ring              638 drivers/net/ethernet/atheros/ag71xx.c 	ring_size = BIT(ring->order);
ring              642 drivers/net/ethernet/atheros/ag71xx.c 	while (ring->dirty + n != ring->curr) {
ring              647 drivers/net/ethernet/atheros/ag71xx.c 		i = (ring->dirty + n) & ring_mask;
ring              648 drivers/net/ethernet/atheros/ag71xx.c 		desc = ag71xx_ring_desc(ring, i);
ring              649 drivers/net/ethernet/atheros/ag71xx.c 		skb = ring->buf[i].tx.skb;
ring              669 drivers/net/ethernet/atheros/ag71xx.c 		ring->buf[i].tx.skb = NULL;
ring              671 drivers/net/ethernet/atheros/ag71xx.c 		bytes_compl += ring->buf[i].tx.len;
ring              674 drivers/net/ethernet/atheros/ag71xx.c 		ring->dirty += n;
ring              691 drivers/net/ethernet/atheros/ag71xx.c 	if ((ring->curr - ring->dirty) < (ring_size * 3) / 4)
ring              957 drivers/net/ethernet/atheros/ag71xx.c 	struct ag71xx_ring *ring = &ag->tx_ring;
ring              958 drivers/net/ethernet/atheros/ag71xx.c 	int ring_mask = BIT(ring->order) - 1;
ring              962 drivers/net/ethernet/atheros/ag71xx.c 	while (ring->curr != ring->dirty) {
ring              964 drivers/net/ethernet/atheros/ag71xx.c 		u32 i = ring->dirty & ring_mask;
ring              966 drivers/net/ethernet/atheros/ag71xx.c 		desc = ag71xx_ring_desc(ring, i);
ring              972 drivers/net/ethernet/atheros/ag71xx.c 		if (ring->buf[i].tx.skb) {
ring              973 drivers/net/ethernet/atheros/ag71xx.c 			bytes_compl += ring->buf[i].tx.len;
ring              975 drivers/net/ethernet/atheros/ag71xx.c 			dev_kfree_skb_any(ring->buf[i].tx.skb);
ring              977 drivers/net/ethernet/atheros/ag71xx.c 		ring->buf[i].tx.skb = NULL;
ring              978 drivers/net/ethernet/atheros/ag71xx.c 		ring->dirty++;
ring              989 drivers/net/ethernet/atheros/ag71xx.c 	struct ag71xx_ring *ring = &ag->tx_ring;
ring              990 drivers/net/ethernet/atheros/ag71xx.c 	int ring_size = BIT(ring->order);
ring              995 drivers/net/ethernet/atheros/ag71xx.c 		struct ag71xx_desc *desc = ag71xx_ring_desc(ring, i);
ring              997 drivers/net/ethernet/atheros/ag71xx.c 		desc->next = (u32)(ring->descs_dma +
ring             1001 drivers/net/ethernet/atheros/ag71xx.c 		ring->buf[i].tx.skb = NULL;
ring             1007 drivers/net/ethernet/atheros/ag71xx.c 	ring->curr = 0;
ring             1008 drivers/net/ethernet/atheros/ag71xx.c 	ring->dirty = 0;
ring             1014 drivers/net/ethernet/atheros/ag71xx.c 	struct ag71xx_ring *ring = &ag->rx_ring;
ring             1015 drivers/net/ethernet/atheros/ag71xx.c 	int ring_size = BIT(ring->order);
ring             1018 drivers/net/ethernet/atheros/ag71xx.c 	if (!ring->buf)
ring             1022 drivers/net/ethernet/atheros/ag71xx.c 		if (ring->buf[i].rx.rx_buf) {
ring             1024 drivers/net/ethernet/atheros/ag71xx.c 					 ring->buf[i].rx.dma_addr,
ring             1026 drivers/net/ethernet/atheros/ag71xx.c 			skb_free_frag(ring->buf[i].rx.rx_buf);
ring             1040 drivers/net/ethernet/atheros/ag71xx.c 	struct ag71xx_ring *ring = &ag->rx_ring;
ring             1044 drivers/net/ethernet/atheros/ag71xx.c 	desc = ag71xx_ring_desc(ring, buf - &ring->buf[0]);
ring             1059 drivers/net/ethernet/atheros/ag71xx.c 	struct ag71xx_ring *ring = &ag->rx_ring;
ring             1061 drivers/net/ethernet/atheros/ag71xx.c 	int ring_mask = BIT(ring->order) - 1;
ring             1062 drivers/net/ethernet/atheros/ag71xx.c 	int ring_size = BIT(ring->order);
ring             1068 drivers/net/ethernet/atheros/ag71xx.c 		struct ag71xx_desc *desc = ag71xx_ring_desc(ring, i);
ring             1070 drivers/net/ethernet/atheros/ag71xx.c 		desc->next = (u32)(ring->descs_dma +
ring             1078 drivers/net/ethernet/atheros/ag71xx.c 		struct ag71xx_desc *desc = ag71xx_ring_desc(ring, i);
ring             1080 drivers/net/ethernet/atheros/ag71xx.c 		if (!ag71xx_fill_rx_buf(ag, &ring->buf[i], ag->rx_buf_offset,
ring             1092 drivers/net/ethernet/atheros/ag71xx.c 	ring->curr = 0;
ring             1093 drivers/net/ethernet/atheros/ag71xx.c 	ring->dirty = 0;
ring             1100 drivers/net/ethernet/atheros/ag71xx.c 	struct ag71xx_ring *ring = &ag->rx_ring;
ring             1101 drivers/net/ethernet/atheros/ag71xx.c 	int ring_mask = BIT(ring->order) - 1;
ring             1106 drivers/net/ethernet/atheros/ag71xx.c 	for (; ring->curr - ring->dirty > 0; ring->dirty++) {
ring             1110 drivers/net/ethernet/atheros/ag71xx.c 		i = ring->dirty & ring_mask;
ring             1111 drivers/net/ethernet/atheros/ag71xx.c 		desc = ag71xx_ring_desc(ring, i);
ring             1113 drivers/net/ethernet/atheros/ag71xx.c 		if (!ring->buf[i].rx.rx_buf &&
ring             1114 drivers/net/ethernet/atheros/ag71xx.c 		    !ag71xx_fill_rx_buf(ag, &ring->buf[i], offset,
ring             1278 drivers/net/ethernet/atheros/ag71xx.c static int ag71xx_fill_dma_desc(struct ag71xx_ring *ring, u32 addr, int len)
ring             1283 drivers/net/ethernet/atheros/ag71xx.c 	ring_mask = BIT(ring->order) - 1;
ring             1285 drivers/net/ethernet/atheros/ag71xx.c 	split = ring->desc_split;
ring             1293 drivers/net/ethernet/atheros/ag71xx.c 		i = (ring->curr + ndesc) & ring_mask;
ring             1294 drivers/net/ethernet/atheros/ag71xx.c 		desc = ag71xx_ring_desc(ring, i);
ring             1332 drivers/net/ethernet/atheros/ag71xx.c 	struct ag71xx_ring *ring;
ring             1336 drivers/net/ethernet/atheros/ag71xx.c 	ring = &ag->tx_ring;
ring             1337 drivers/net/ethernet/atheros/ag71xx.c 	ring_mask = BIT(ring->order) - 1;
ring             1338 drivers/net/ethernet/atheros/ag71xx.c 	ring_size = BIT(ring->order);
ring             1348 drivers/net/ethernet/atheros/ag71xx.c 	i = ring->curr & ring_mask;
ring             1349 drivers/net/ethernet/atheros/ag71xx.c 	desc = ag71xx_ring_desc(ring, i);
ring             1352 drivers/net/ethernet/atheros/ag71xx.c 	n = ag71xx_fill_dma_desc(ring, (u32)dma_addr,
ring             1357 drivers/net/ethernet/atheros/ag71xx.c 	i = (ring->curr + n - 1) & ring_mask;
ring             1358 drivers/net/ethernet/atheros/ag71xx.c 	ring->buf[i].tx.len = skb->len;
ring             1359 drivers/net/ethernet/atheros/ag71xx.c 	ring->buf[i].tx.skb = skb;
ring             1366 drivers/net/ethernet/atheros/ag71xx.c 	ring->curr += n;
ring             1372 drivers/net/ethernet/atheros/ag71xx.c 	if (ring->desc_split)
ring             1375 drivers/net/ethernet/atheros/ag71xx.c 	if (ring->curr - ring->dirty >= ring_size - ring_min) {
ring             1441 drivers/net/ethernet/atheros/ag71xx.c 	struct ag71xx_ring *ring;
ring             1444 drivers/net/ethernet/atheros/ag71xx.c 	ring = &ag->rx_ring;
ring             1447 drivers/net/ethernet/atheros/ag71xx.c 	ring_mask = BIT(ring->order) - 1;
ring             1448 drivers/net/ethernet/atheros/ag71xx.c 	ring_size = BIT(ring->order);
ring             1451 drivers/net/ethernet/atheros/ag71xx.c 		  limit, ring->curr, ring->dirty);
ring             1456 drivers/net/ethernet/atheros/ag71xx.c 		unsigned int i = ring->curr & ring_mask;
ring             1457 drivers/net/ethernet/atheros/ag71xx.c 		struct ag71xx_desc *desc = ag71xx_ring_desc(ring, i);
ring             1464 drivers/net/ethernet/atheros/ag71xx.c 		if ((ring->dirty + ring_size) == ring->curr) {
ring             1474 drivers/net/ethernet/atheros/ag71xx.c 		dma_unmap_single(&ag->pdev->dev, ring->buf[i].rx.dma_addr,
ring             1480 drivers/net/ethernet/atheros/ag71xx.c 		skb = build_skb(ring->buf[i].rx.rx_buf, ag71xx_buffer_size(ag));
ring             1482 drivers/net/ethernet/atheros/ag71xx.c 			skb_free_frag(ring->buf[i].rx.rx_buf);
ring             1499 drivers/net/ethernet/atheros/ag71xx.c 		ring->buf[i].rx.rx_buf = NULL;
ring             1502 drivers/net/ethernet/atheros/ag71xx.c 		ring->curr++;
ring             1512 drivers/net/ethernet/atheros/ag71xx.c 		  ring->curr, ring->dirty, done);
ring             3445 drivers/net/ethernet/atheros/atlx/atl1.c 	struct ethtool_ringparam *ring)
ring             3451 drivers/net/ethernet/atheros/atlx/atl1.c 	ring->rx_max_pending = ATL1_MAX_RFD;
ring             3452 drivers/net/ethernet/atheros/atlx/atl1.c 	ring->tx_max_pending = ATL1_MAX_TPD;
ring             3453 drivers/net/ethernet/atheros/atlx/atl1.c 	ring->rx_pending = rxdr->count;
ring             3454 drivers/net/ethernet/atheros/atlx/atl1.c 	ring->tx_pending = txdr->count;
ring             3458 drivers/net/ethernet/atheros/atlx/atl1.c 	struct ethtool_ringparam *ring)
ring             3481 drivers/net/ethernet/atheros/atlx/atl1.c 	rfdr->count = (u16) max(ring->rx_pending, (u32) ATL1_MIN_RFD);
ring             3487 drivers/net/ethernet/atheros/atlx/atl1.c 	tpdr->count = (u16) max(ring->tx_pending, (u32) ATL1_MIN_TPD);
ring              431 drivers/net/ethernet/broadcom/bcmsysport.c 	struct bcm_sysport_tx_ring *ring;
ring              437 drivers/net/ethernet/broadcom/bcmsysport.c 		ring = &priv->tx_rings[q];
ring              440 drivers/net/ethernet/broadcom/bcmsysport.c 			bytes = ring->bytes;
ring              441 drivers/net/ethernet/broadcom/bcmsysport.c 			packets = ring->packets;
ring              455 drivers/net/ethernet/broadcom/bcmsysport.c 	struct bcm_sysport_tx_ring *ring;
ring              503 drivers/net/ethernet/broadcom/bcmsysport.c 		ring = &priv->tx_rings[i];
ring              504 drivers/net/ethernet/broadcom/bcmsysport.c 		data[j] = ring->packets;
ring              506 drivers/net/ethernet/broadcom/bcmsysport.c 		data[j] = ring->bytes;
ring              573 drivers/net/ethernet/broadcom/bcmsysport.c static void bcm_sysport_set_tx_coalesce(struct bcm_sysport_tx_ring *ring,
ring              576 drivers/net/ethernet/broadcom/bcmsysport.c 	struct bcm_sysport_priv *priv = ring->priv;
ring              579 drivers/net/ethernet/broadcom/bcmsysport.c 	reg = tdma_readl(priv, TDMA_DESC_RING_INTR_CONTROL(ring->index));
ring              585 drivers/net/ethernet/broadcom/bcmsysport.c 	tdma_writel(priv, reg, TDMA_DESC_RING_INTR_CONTROL(ring->index));
ring              850 drivers/net/ethernet/broadcom/bcmsysport.c static void bcm_sysport_tx_reclaim_one(struct bcm_sysport_tx_ring *ring,
ring              855 drivers/net/ethernet/broadcom/bcmsysport.c 	struct bcm_sysport_priv *priv = ring->priv;
ring              876 drivers/net/ethernet/broadcom/bcmsysport.c 					     struct bcm_sysport_tx_ring *ring)
ring              887 drivers/net/ethernet/broadcom/bcmsysport.c 	if (!ring->priv->is_lite)
ring              888 drivers/net/ethernet/broadcom/bcmsysport.c 		intrl2_1_writel(ring->priv, BIT(ring->index), INTRL2_CPU_CLEAR);
ring              890 drivers/net/ethernet/broadcom/bcmsysport.c 		intrl2_0_writel(ring->priv, BIT(ring->index +
ring              894 drivers/net/ethernet/broadcom/bcmsysport.c 	hw_ind = tdma_readl(priv, TDMA_DESC_RING_PROD_CONS_INDEX(ring->index));
ring              896 drivers/net/ethernet/broadcom/bcmsysport.c 	txbds_ready = (c_index - ring->c_index) & RING_CONS_INDEX_MASK;
ring              900 drivers/net/ethernet/broadcom/bcmsysport.c 		  ring->index, ring->c_index, c_index, txbds_ready);
ring              903 drivers/net/ethernet/broadcom/bcmsysport.c 		cb = &ring->cbs[ring->clean_index];
ring              904 drivers/net/ethernet/broadcom/bcmsysport.c 		bcm_sysport_tx_reclaim_one(ring, cb, &bytes_compl, &pkts_compl);
ring              906 drivers/net/ethernet/broadcom/bcmsysport.c 		ring->desc_count++;
ring              909 drivers/net/ethernet/broadcom/bcmsysport.c 		if (likely(ring->clean_index < ring->size - 1))
ring              910 drivers/net/ethernet/broadcom/bcmsysport.c 			ring->clean_index++;
ring              912 drivers/net/ethernet/broadcom/bcmsysport.c 			ring->clean_index = 0;
ring              916 drivers/net/ethernet/broadcom/bcmsysport.c 	ring->packets += pkts_compl;
ring              917 drivers/net/ethernet/broadcom/bcmsysport.c 	ring->bytes += bytes_compl;
ring              920 drivers/net/ethernet/broadcom/bcmsysport.c 	ring->c_index = c_index;
ring              924 drivers/net/ethernet/broadcom/bcmsysport.c 		  ring->index, ring->c_index, pkts_compl, bytes_compl);
ring              931 drivers/net/ethernet/broadcom/bcmsysport.c 					   struct bcm_sysport_tx_ring *ring)
ring              937 drivers/net/ethernet/broadcom/bcmsysport.c 	txq = netdev_get_tx_queue(priv->netdev, ring->index);
ring              939 drivers/net/ethernet/broadcom/bcmsysport.c 	spin_lock_irqsave(&ring->lock, flags);
ring              940 drivers/net/ethernet/broadcom/bcmsysport.c 	released = __bcm_sysport_tx_reclaim(priv, ring);
ring              944 drivers/net/ethernet/broadcom/bcmsysport.c 	spin_unlock_irqrestore(&ring->lock, flags);
ring              951 drivers/net/ethernet/broadcom/bcmsysport.c 				 struct bcm_sysport_tx_ring *ring)
ring              955 drivers/net/ethernet/broadcom/bcmsysport.c 	spin_lock_irqsave(&ring->lock, flags);
ring              956 drivers/net/ethernet/broadcom/bcmsysport.c 	__bcm_sysport_tx_reclaim(priv, ring);
ring              957 drivers/net/ethernet/broadcom/bcmsysport.c 	spin_unlock_irqrestore(&ring->lock, flags);
ring              962 drivers/net/ethernet/broadcom/bcmsysport.c 	struct bcm_sysport_tx_ring *ring =
ring              966 drivers/net/ethernet/broadcom/bcmsysport.c 	work_done = bcm_sysport_tx_reclaim(ring->priv, ring);
ring              971 drivers/net/ethernet/broadcom/bcmsysport.c 		if (!ring->priv->is_lite)
ring              972 drivers/net/ethernet/broadcom/bcmsysport.c 			intrl2_1_mask_clear(ring->priv, BIT(ring->index));
ring              974 drivers/net/ethernet/broadcom/bcmsysport.c 			intrl2_0_mask_clear(ring->priv, BIT(ring->index +
ring             1108 drivers/net/ethernet/broadcom/bcmsysport.c 	unsigned int ring, ring_bit;
ring             1137 drivers/net/ethernet/broadcom/bcmsysport.c 	for (ring = 0; ring < dev->num_tx_queues; ring++) {
ring             1138 drivers/net/ethernet/broadcom/bcmsysport.c 		ring_bit = BIT(ring + INTRL2_0_TDMA_MBDONE_SHIFT);
ring             1142 drivers/net/ethernet/broadcom/bcmsysport.c 		txr = &priv->tx_rings[ring];
ring             1159 drivers/net/ethernet/broadcom/bcmsysport.c 	unsigned int ring;
ring             1170 drivers/net/ethernet/broadcom/bcmsysport.c 	for (ring = 0; ring < dev->num_tx_queues; ring++) {
ring             1171 drivers/net/ethernet/broadcom/bcmsysport.c 		if (!(priv->irq1_stat & BIT(ring)))
ring             1174 drivers/net/ethernet/broadcom/bcmsysport.c 		txr = &priv->tx_rings[ring];
ring             1177 drivers/net/ethernet/broadcom/bcmsysport.c 			intrl2_1_mask_set(priv, BIT(ring));
ring             1279 drivers/net/ethernet/broadcom/bcmsysport.c 	struct bcm_sysport_tx_ring *ring;
ring             1291 drivers/net/ethernet/broadcom/bcmsysport.c 	ring = &priv->tx_rings[queue];
ring             1294 drivers/net/ethernet/broadcom/bcmsysport.c 	spin_lock_irqsave(&ring->lock, flags);
ring             1295 drivers/net/ethernet/broadcom/bcmsysport.c 	if (unlikely(ring->desc_count == 0)) {
ring             1323 drivers/net/ethernet/broadcom/bcmsysport.c 	cb = &ring->cbs[ring->curr_desc];
ring             1336 drivers/net/ethernet/broadcom/bcmsysport.c 	ring->curr_desc++;
ring             1337 drivers/net/ethernet/broadcom/bcmsysport.c 	if (ring->curr_desc == ring->size)
ring             1338 drivers/net/ethernet/broadcom/bcmsysport.c 		ring->curr_desc = 0;
ring             1339 drivers/net/ethernet/broadcom/bcmsysport.c 	ring->desc_count--;
ring             1342 drivers/net/ethernet/broadcom/bcmsysport.c 	tdma_writel(priv, len_status, TDMA_WRITE_PORT_HI(ring->index));
ring             1343 drivers/net/ethernet/broadcom/bcmsysport.c 	tdma_writel(priv, addr_lo, TDMA_WRITE_PORT_LO(ring->index));
ring             1346 drivers/net/ethernet/broadcom/bcmsysport.c 	if (ring->desc_count == 0)
ring             1350 drivers/net/ethernet/broadcom/bcmsysport.c 		  ring->index, ring->desc_count, ring->curr_desc);
ring             1354 drivers/net/ethernet/broadcom/bcmsysport.c 	spin_unlock_irqrestore(&ring->lock, flags);
ring             1468 drivers/net/ethernet/broadcom/bcmsysport.c 	struct bcm_sysport_tx_ring *ring = &priv->tx_rings[index];
ring             1475 drivers/net/ethernet/broadcom/bcmsysport.c 	ring->cbs = kcalloc(size, sizeof(struct bcm_sysport_cb), GFP_KERNEL);
ring             1476 drivers/net/ethernet/broadcom/bcmsysport.c 	if (!ring->cbs) {
ring             1482 drivers/net/ethernet/broadcom/bcmsysport.c 	spin_lock_init(&ring->lock);
ring             1483 drivers/net/ethernet/broadcom/bcmsysport.c 	ring->priv = priv;
ring             1484 drivers/net/ethernet/broadcom/bcmsysport.c 	netif_tx_napi_add(priv->netdev, &ring->napi, bcm_sysport_tx_poll, 64);
ring             1485 drivers/net/ethernet/broadcom/bcmsysport.c 	ring->index = index;
ring             1486 drivers/net/ethernet/broadcom/bcmsysport.c 	ring->size = size;
ring             1487 drivers/net/ethernet/broadcom/bcmsysport.c 	ring->clean_index = 0;
ring             1488 drivers/net/ethernet/broadcom/bcmsysport.c 	ring->alloc_size = ring->size;
ring             1489 drivers/net/ethernet/broadcom/bcmsysport.c 	ring->desc_count = ring->size;
ring             1490 drivers/net/ethernet/broadcom/bcmsysport.c 	ring->curr_desc = 0;
ring             1501 drivers/net/ethernet/broadcom/bcmsysport.c 	if (ring->inspect) {
ring             1502 drivers/net/ethernet/broadcom/bcmsysport.c 		reg |= ring->switch_queue & RING_QID_MASK;
ring             1503 drivers/net/ethernet/broadcom/bcmsysport.c 		reg |= ring->switch_port << RING_PORT_ID_SHIFT;
ring             1531 drivers/net/ethernet/broadcom/bcmsysport.c 	tdma_writel(priv, ring->size |
ring             1540 drivers/net/ethernet/broadcom/bcmsysport.c 	napi_enable(&ring->napi);
ring             1544 drivers/net/ethernet/broadcom/bcmsysport.c 		  ring->size, ring->switch_queue,
ring             1545 drivers/net/ethernet/broadcom/bcmsysport.c 		  ring->switch_port);
ring             1553 drivers/net/ethernet/broadcom/bcmsysport.c 	struct bcm_sysport_tx_ring *ring = &priv->tx_rings[index];
ring             1565 drivers/net/ethernet/broadcom/bcmsysport.c 	if (!ring->cbs)
ring             1568 drivers/net/ethernet/broadcom/bcmsysport.c 	napi_disable(&ring->napi);
ring             1569 drivers/net/ethernet/broadcom/bcmsysport.c 	netif_napi_del(&ring->napi);
ring             1571 drivers/net/ethernet/broadcom/bcmsysport.c 	bcm_sysport_tx_clean(priv, ring);
ring             1573 drivers/net/ethernet/broadcom/bcmsysport.c 	kfree(ring->cbs);
ring             1574 drivers/net/ethernet/broadcom/bcmsysport.c 	ring->cbs = NULL;
ring             1575 drivers/net/ethernet/broadcom/bcmsysport.c 	ring->size = 0;
ring             1576 drivers/net/ethernet/broadcom/bcmsysport.c 	ring->alloc_size = 0;
ring             2271 drivers/net/ethernet/broadcom/bcmsysport.c 	struct bcm_sysport_tx_ring *ring;
ring             2316 drivers/net/ethernet/broadcom/bcmsysport.c 		ring = &priv->tx_rings[q];
ring             2318 drivers/net/ethernet/broadcom/bcmsysport.c 		if (ring->inspect)
ring             2324 drivers/net/ethernet/broadcom/bcmsysport.c 		ring->switch_queue = qp;
ring             2325 drivers/net/ethernet/broadcom/bcmsysport.c 		ring->switch_port = port;
ring             2326 drivers/net/ethernet/broadcom/bcmsysport.c 		ring->inspect = true;
ring             2327 drivers/net/ethernet/broadcom/bcmsysport.c 		priv->ring_map[qp + port * num_tx_queues] = ring;
ring             2337 drivers/net/ethernet/broadcom/bcmsysport.c 	struct bcm_sysport_tx_ring *ring;
ring             2359 drivers/net/ethernet/broadcom/bcmsysport.c 		ring = &priv->tx_rings[q];
ring             2361 drivers/net/ethernet/broadcom/bcmsysport.c 		if (ring->switch_port != port)
ring             2364 drivers/net/ethernet/broadcom/bcmsysport.c 		if (!ring->inspect)
ring             2367 drivers/net/ethernet/broadcom/bcmsysport.c 		ring->inspect = false;
ring             2368 drivers/net/ethernet/broadcom/bcmsysport.c 		qp = ring->switch_queue;
ring               41 drivers/net/ethernet/broadcom/bgmac.c static void bgmac_dma_tx_reset(struct bgmac *bgmac, struct bgmac_dma_ring *ring)
ring               46 drivers/net/ethernet/broadcom/bgmac.c 	if (!ring->mmio_base)
ring               53 drivers/net/ethernet/broadcom/bgmac.c 	bgmac_write(bgmac, ring->mmio_base + BGMAC_DMA_TX_CTL,
ring               56 drivers/net/ethernet/broadcom/bgmac.c 		val = bgmac_read(bgmac, ring->mmio_base + BGMAC_DMA_TX_STATUS);
ring               68 drivers/net/ethernet/broadcom/bgmac.c 			ring->mmio_base, val);
ring               71 drivers/net/ethernet/broadcom/bgmac.c 	bgmac_write(bgmac, ring->mmio_base + BGMAC_DMA_TX_CTL, 0);
ring               73 drivers/net/ethernet/broadcom/bgmac.c 			      ring->mmio_base + BGMAC_DMA_TX_STATUS,
ring               77 drivers/net/ethernet/broadcom/bgmac.c 			 ring->mmio_base);
ring               79 drivers/net/ethernet/broadcom/bgmac.c 		val = bgmac_read(bgmac, ring->mmio_base + BGMAC_DMA_TX_STATUS);
ring               82 drivers/net/ethernet/broadcom/bgmac.c 				ring->mmio_base);
ring               87 drivers/net/ethernet/broadcom/bgmac.c 				struct bgmac_dma_ring *ring)
ring               91 drivers/net/ethernet/broadcom/bgmac.c 	ctl = bgmac_read(bgmac, ring->mmio_base + BGMAC_DMA_TX_CTL);
ring              107 drivers/net/ethernet/broadcom/bgmac.c 	bgmac_write(bgmac, ring->mmio_base + BGMAC_DMA_TX_CTL, ctl);
ring              111 drivers/net/ethernet/broadcom/bgmac.c bgmac_dma_tx_add_buf(struct bgmac *bgmac, struct bgmac_dma_ring *ring,
ring              123 drivers/net/ethernet/broadcom/bgmac.c 	slot = &ring->slots[i];
ring              124 drivers/net/ethernet/broadcom/bgmac.c 	dma_desc = &ring->cpu_base[i];
ring              132 drivers/net/ethernet/broadcom/bgmac.c 				    struct bgmac_dma_ring *ring,
ring              137 drivers/net/ethernet/broadcom/bgmac.c 	int index = ring->end % BGMAC_TX_RING_SLOTS;
ring              138 drivers/net/ethernet/broadcom/bgmac.c 	struct bgmac_slot_info *slot = &ring->slots[index];
ring              156 drivers/net/ethernet/broadcom/bgmac.c 	if (ring->end - ring->start + nr_frags + 1 >= BGMAC_TX_RING_SLOTS) {
ring              171 drivers/net/ethernet/broadcom/bgmac.c 	bgmac_dma_tx_add_buf(bgmac, ring, index, skb_headlen(skb), flags);
ring              179 drivers/net/ethernet/broadcom/bgmac.c 		slot = &ring->slots[index];
ring              188 drivers/net/ethernet/broadcom/bgmac.c 		bgmac_dma_tx_add_buf(bgmac, ring, index, len, flags);
ring              192 drivers/net/ethernet/broadcom/bgmac.c 	ring->end += nr_frags + 1;
ring              200 drivers/net/ethernet/broadcom/bgmac.c 	bgmac_write(bgmac, ring->mmio_base + BGMAC_DMA_TX_INDEX,
ring              201 drivers/net/ethernet/broadcom/bgmac.c 		    ring->index_base +
ring              202 drivers/net/ethernet/broadcom/bgmac.c 		    (ring->end % BGMAC_TX_RING_SLOTS) *
ring              205 drivers/net/ethernet/broadcom/bgmac.c 	if (ring->end - ring->start >= BGMAC_TX_RING_SLOTS - 8)
ring              215 drivers/net/ethernet/broadcom/bgmac.c 		int index = (ring->end + i) % BGMAC_TX_RING_SLOTS;
ring              216 drivers/net/ethernet/broadcom/bgmac.c 		struct bgmac_slot_info *slot = &ring->slots[index];
ring              217 drivers/net/ethernet/broadcom/bgmac.c 		u32 ctl1 = le32_to_cpu(ring->cpu_base[index].ctl1);
ring              225 drivers/net/ethernet/broadcom/bgmac.c 		   ring->mmio_base);
ring              235 drivers/net/ethernet/broadcom/bgmac.c static void bgmac_dma_tx_free(struct bgmac *bgmac, struct bgmac_dma_ring *ring)
ring              242 drivers/net/ethernet/broadcom/bgmac.c 	empty_slot = bgmac_read(bgmac, ring->mmio_base + BGMAC_DMA_TX_STATUS);
ring              244 drivers/net/ethernet/broadcom/bgmac.c 	empty_slot -= ring->index_base;
ring              248 drivers/net/ethernet/broadcom/bgmac.c 	while (ring->start != ring->end) {
ring              249 drivers/net/ethernet/broadcom/bgmac.c 		int slot_idx = ring->start % BGMAC_TX_RING_SLOTS;
ring              250 drivers/net/ethernet/broadcom/bgmac.c 		struct bgmac_slot_info *slot = &ring->slots[slot_idx];
ring              257 drivers/net/ethernet/broadcom/bgmac.c 		ctl0 = le32_to_cpu(ring->cpu_base[slot_idx].ctl0);
ring              258 drivers/net/ethernet/broadcom/bgmac.c 		ctl1 = le32_to_cpu(ring->cpu_base[slot_idx].ctl1);
ring              280 drivers/net/ethernet/broadcom/bgmac.c 		ring->start++;
ring              292 drivers/net/ethernet/broadcom/bgmac.c static void bgmac_dma_rx_reset(struct bgmac *bgmac, struct bgmac_dma_ring *ring)
ring              294 drivers/net/ethernet/broadcom/bgmac.c 	if (!ring->mmio_base)
ring              297 drivers/net/ethernet/broadcom/bgmac.c 	bgmac_write(bgmac, ring->mmio_base + BGMAC_DMA_RX_CTL, 0);
ring              299 drivers/net/ethernet/broadcom/bgmac.c 			      ring->mmio_base + BGMAC_DMA_RX_STATUS,
ring              303 drivers/net/ethernet/broadcom/bgmac.c 			ring->mmio_base);
ring              307 drivers/net/ethernet/broadcom/bgmac.c 				struct bgmac_dma_ring *ring)
ring              311 drivers/net/ethernet/broadcom/bgmac.c 	ctl = bgmac_read(bgmac, ring->mmio_base + BGMAC_DMA_RX_CTL);
ring              330 drivers/net/ethernet/broadcom/bgmac.c 	bgmac_write(bgmac, ring->mmio_base + BGMAC_DMA_RX_CTL, ctl);
ring              368 drivers/net/ethernet/broadcom/bgmac.c 				      struct bgmac_dma_ring *ring)
ring              372 drivers/net/ethernet/broadcom/bgmac.c 	bgmac_write(bgmac, ring->mmio_base + BGMAC_DMA_RX_INDEX,
ring              373 drivers/net/ethernet/broadcom/bgmac.c 		    ring->index_base +
ring              374 drivers/net/ethernet/broadcom/bgmac.c 		    ring->end * sizeof(struct bgmac_dma_desc));
ring              378 drivers/net/ethernet/broadcom/bgmac.c 				    struct bgmac_dma_ring *ring, int desc_idx)
ring              380 drivers/net/ethernet/broadcom/bgmac.c 	struct bgmac_dma_desc *dma_desc = ring->cpu_base + desc_idx;
ring              391 drivers/net/ethernet/broadcom/bgmac.c 	dma_desc->addr_low = cpu_to_le32(lower_32_bits(ring->slots[desc_idx].dma_addr));
ring              392 drivers/net/ethernet/broadcom/bgmac.c 	dma_desc->addr_high = cpu_to_le32(upper_32_bits(ring->slots[desc_idx].dma_addr));
ring              396 drivers/net/ethernet/broadcom/bgmac.c 	ring->end = desc_idx;
ring              412 drivers/net/ethernet/broadcom/bgmac.c static int bgmac_dma_rx_read(struct bgmac *bgmac, struct bgmac_dma_ring *ring,
ring              418 drivers/net/ethernet/broadcom/bgmac.c 	end_slot = bgmac_read(bgmac, ring->mmio_base + BGMAC_DMA_RX_STATUS);
ring              420 drivers/net/ethernet/broadcom/bgmac.c 	end_slot -= ring->index_base;
ring              424 drivers/net/ethernet/broadcom/bgmac.c 	while (ring->start != end_slot) {
ring              426 drivers/net/ethernet/broadcom/bgmac.c 		struct bgmac_slot_info *slot = &ring->slots[ring->start];
ring              451 drivers/net/ethernet/broadcom/bgmac.c 					   ring->start);
ring              459 drivers/net/ethernet/broadcom/bgmac.c 					   ring->start);
ring              489 drivers/net/ethernet/broadcom/bgmac.c 		bgmac_dma_rx_setup_desc(bgmac, ring, ring->start);
ring              491 drivers/net/ethernet/broadcom/bgmac.c 		if (++ring->start >= BGMAC_RX_RING_SLOTS)
ring              492 drivers/net/ethernet/broadcom/bgmac.c 			ring->start = 0;
ring              498 drivers/net/ethernet/broadcom/bgmac.c 	bgmac_dma_rx_update_index(bgmac, ring);
ring              505 drivers/net/ethernet/broadcom/bgmac.c 				struct bgmac_dma_ring *ring,
ring              510 drivers/net/ethernet/broadcom/bgmac.c 		bgmac_write(bgmac, ring->mmio_base + BGMAC_DMA_TX_RINGLO,
ring              512 drivers/net/ethernet/broadcom/bgmac.c 		if (bgmac_read(bgmac, ring->mmio_base + BGMAC_DMA_TX_RINGLO))
ring              516 drivers/net/ethernet/broadcom/bgmac.c 		bgmac_write(bgmac, ring->mmio_base + BGMAC_DMA_RX_RINGLO,
ring              518 drivers/net/ethernet/broadcom/bgmac.c 		if (bgmac_read(bgmac, ring->mmio_base + BGMAC_DMA_RX_RINGLO))
ring              526 drivers/net/ethernet/broadcom/bgmac.c 				   struct bgmac_dma_ring *ring)
ring              529 drivers/net/ethernet/broadcom/bgmac.c 	struct bgmac_dma_desc *dma_desc = ring->cpu_base;
ring              537 drivers/net/ethernet/broadcom/bgmac.c 		slot = &ring->slots[i];
ring              553 drivers/net/ethernet/broadcom/bgmac.c 				   struct bgmac_dma_ring *ring)
ring              560 drivers/net/ethernet/broadcom/bgmac.c 		slot = &ring->slots[i];
ring              573 drivers/net/ethernet/broadcom/bgmac.c 				     struct bgmac_dma_ring *ring,
ring              579 drivers/net/ethernet/broadcom/bgmac.c 	if (!ring->cpu_base)
ring              584 drivers/net/ethernet/broadcom/bgmac.c 	dma_free_coherent(dma_dev, size, ring->cpu_base,
ring              585 drivers/net/ethernet/broadcom/bgmac.c 			  ring->dma_base);
ring              615 drivers/net/ethernet/broadcom/bgmac.c 	struct bgmac_dma_ring *ring;
ring              632 drivers/net/ethernet/broadcom/bgmac.c 		ring = &bgmac->tx_ring[i];
ring              633 drivers/net/ethernet/broadcom/bgmac.c 		ring->mmio_base = ring_base[i];
ring              637 drivers/net/ethernet/broadcom/bgmac.c 		ring->cpu_base = dma_alloc_coherent(dma_dev, size,
ring              638 drivers/net/ethernet/broadcom/bgmac.c 						    &ring->dma_base,
ring              640 drivers/net/ethernet/broadcom/bgmac.c 		if (!ring->cpu_base) {
ring              642 drivers/net/ethernet/broadcom/bgmac.c 				ring->mmio_base);
ring              646 drivers/net/ethernet/broadcom/bgmac.c 		ring->unaligned = bgmac_dma_unaligned(bgmac, ring,
ring              648 drivers/net/ethernet/broadcom/bgmac.c 		if (ring->unaligned)
ring              649 drivers/net/ethernet/broadcom/bgmac.c 			ring->index_base = lower_32_bits(ring->dma_base);
ring              651 drivers/net/ethernet/broadcom/bgmac.c 			ring->index_base = 0;
ring              657 drivers/net/ethernet/broadcom/bgmac.c 		ring = &bgmac->rx_ring[i];
ring              658 drivers/net/ethernet/broadcom/bgmac.c 		ring->mmio_base = ring_base[i];
ring              662 drivers/net/ethernet/broadcom/bgmac.c 		ring->cpu_base = dma_alloc_coherent(dma_dev, size,
ring              663 drivers/net/ethernet/broadcom/bgmac.c 						    &ring->dma_base,
ring              665 drivers/net/ethernet/broadcom/bgmac.c 		if (!ring->cpu_base) {
ring              667 drivers/net/ethernet/broadcom/bgmac.c 				ring->mmio_base);
ring              671 drivers/net/ethernet/broadcom/bgmac.c 		ring->unaligned = bgmac_dma_unaligned(bgmac, ring,
ring              673 drivers/net/ethernet/broadcom/bgmac.c 		if (ring->unaligned)
ring              674 drivers/net/ethernet/broadcom/bgmac.c 			ring->index_base = lower_32_bits(ring->dma_base);
ring              676 drivers/net/ethernet/broadcom/bgmac.c 			ring->index_base = 0;
ring              688 drivers/net/ethernet/broadcom/bgmac.c 	struct bgmac_dma_ring *ring;
ring              692 drivers/net/ethernet/broadcom/bgmac.c 		ring = &bgmac->tx_ring[i];
ring              694 drivers/net/ethernet/broadcom/bgmac.c 		if (!ring->unaligned)
ring              695 drivers/net/ethernet/broadcom/bgmac.c 			bgmac_dma_tx_enable(bgmac, ring);
ring              696 drivers/net/ethernet/broadcom/bgmac.c 		bgmac_write(bgmac, ring->mmio_base + BGMAC_DMA_TX_RINGLO,
ring              697 drivers/net/ethernet/broadcom/bgmac.c 			    lower_32_bits(ring->dma_base));
ring              698 drivers/net/ethernet/broadcom/bgmac.c 		bgmac_write(bgmac, ring->mmio_base + BGMAC_DMA_TX_RINGHI,
ring              699 drivers/net/ethernet/broadcom/bgmac.c 			    upper_32_bits(ring->dma_base));
ring              700 drivers/net/ethernet/broadcom/bgmac.c 		if (ring->unaligned)
ring              701 drivers/net/ethernet/broadcom/bgmac.c 			bgmac_dma_tx_enable(bgmac, ring);
ring              703 drivers/net/ethernet/broadcom/bgmac.c 		ring->start = 0;
ring              704 drivers/net/ethernet/broadcom/bgmac.c 		ring->end = 0;	/* Points the slot that should *not* be read */
ring              710 drivers/net/ethernet/broadcom/bgmac.c 		ring = &bgmac->rx_ring[i];
ring              712 drivers/net/ethernet/broadcom/bgmac.c 		if (!ring->unaligned)
ring              713 drivers/net/ethernet/broadcom/bgmac.c 			bgmac_dma_rx_enable(bgmac, ring);
ring              714 drivers/net/ethernet/broadcom/bgmac.c 		bgmac_write(bgmac, ring->mmio_base + BGMAC_DMA_RX_RINGLO,
ring              715 drivers/net/ethernet/broadcom/bgmac.c 			    lower_32_bits(ring->dma_base));
ring              716 drivers/net/ethernet/broadcom/bgmac.c 		bgmac_write(bgmac, ring->mmio_base + BGMAC_DMA_RX_RINGHI,
ring              717 drivers/net/ethernet/broadcom/bgmac.c 			    upper_32_bits(ring->dma_base));
ring              718 drivers/net/ethernet/broadcom/bgmac.c 		if (ring->unaligned)
ring              719 drivers/net/ethernet/broadcom/bgmac.c 			bgmac_dma_rx_enable(bgmac, ring);
ring              721 drivers/net/ethernet/broadcom/bgmac.c 		ring->start = 0;
ring              722 drivers/net/ethernet/broadcom/bgmac.c 		ring->end = 0;
ring              724 drivers/net/ethernet/broadcom/bgmac.c 			err = bgmac_dma_rx_skb_for_slot(bgmac, &ring->slots[j]);
ring              728 drivers/net/ethernet/broadcom/bgmac.c 			bgmac_dma_rx_setup_desc(bgmac, ring, j);
ring              731 drivers/net/ethernet/broadcom/bgmac.c 		bgmac_dma_rx_update_index(bgmac, ring);
ring             1227 drivers/net/ethernet/broadcom/bgmac.c 	struct bgmac_dma_ring *ring;
ring             1230 drivers/net/ethernet/broadcom/bgmac.c 	ring = &bgmac->tx_ring[0];
ring             1231 drivers/net/ethernet/broadcom/bgmac.c 	return bgmac_dma_tx_add(bgmac, ring, skb);
ring             2780 drivers/net/ethernet/broadcom/bnxt/bnxt.c 		struct bnxt_ring_struct *ring;
ring             2794 drivers/net/ethernet/broadcom/bnxt/bnxt.c 		ring = &rxr->rx_ring_struct;
ring             2795 drivers/net/ethernet/broadcom/bnxt/bnxt.c 		bnxt_free_ring(bp, &ring->ring_mem);
ring             2797 drivers/net/ethernet/broadcom/bnxt/bnxt.c 		ring = &rxr->rx_agg_ring_struct;
ring             2798 drivers/net/ethernet/broadcom/bnxt/bnxt.c 		bnxt_free_ring(bp, &ring->ring_mem);
ring             2834 drivers/net/ethernet/broadcom/bnxt/bnxt.c 		struct bnxt_ring_struct *ring;
ring             2836 drivers/net/ethernet/broadcom/bnxt/bnxt.c 		ring = &rxr->rx_ring_struct;
ring             2854 drivers/net/ethernet/broadcom/bnxt/bnxt.c 		rc = bnxt_alloc_ring(bp, &ring->ring_mem);
ring             2858 drivers/net/ethernet/broadcom/bnxt/bnxt.c 		ring->grp_idx = i;
ring             2862 drivers/net/ethernet/broadcom/bnxt/bnxt.c 			ring = &rxr->rx_agg_ring_struct;
ring             2863 drivers/net/ethernet/broadcom/bnxt/bnxt.c 			rc = bnxt_alloc_ring(bp, &ring->ring_mem);
ring             2867 drivers/net/ethernet/broadcom/bnxt/bnxt.c 			ring->grp_idx = i;
ring             2890 drivers/net/ethernet/broadcom/bnxt/bnxt.c 		struct bnxt_ring_struct *ring;
ring             2898 drivers/net/ethernet/broadcom/bnxt/bnxt.c 		ring = &txr->tx_ring_struct;
ring             2900 drivers/net/ethernet/broadcom/bnxt/bnxt.c 		bnxt_free_ring(bp, &ring->ring_mem);
ring             2926 drivers/net/ethernet/broadcom/bnxt/bnxt.c 		struct bnxt_ring_struct *ring;
ring             2929 drivers/net/ethernet/broadcom/bnxt/bnxt.c 		ring = &txr->tx_ring_struct;
ring             2931 drivers/net/ethernet/broadcom/bnxt/bnxt.c 		rc = bnxt_alloc_ring(bp, &ring->ring_mem);
ring             2935 drivers/net/ethernet/broadcom/bnxt/bnxt.c 		ring->grp_idx = txr->bnapi->index;
ring             2955 drivers/net/ethernet/broadcom/bnxt/bnxt.c 		ring->queue_id = bp->q_info[qidx].queue_id;
ring             2974 drivers/net/ethernet/broadcom/bnxt/bnxt.c 		struct bnxt_ring_struct *ring;
ring             2981 drivers/net/ethernet/broadcom/bnxt/bnxt.c 		ring = &cpr->cp_ring_struct;
ring             2983 drivers/net/ethernet/broadcom/bnxt/bnxt.c 		bnxt_free_ring(bp, &ring->ring_mem);
ring             2989 drivers/net/ethernet/broadcom/bnxt/bnxt.c 				ring = &cpr2->cp_ring_struct;
ring             2990 drivers/net/ethernet/broadcom/bnxt/bnxt.c 				bnxt_free_ring(bp, &ring->ring_mem);
ring             3001 drivers/net/ethernet/broadcom/bnxt/bnxt.c 	struct bnxt_ring_struct *ring;
ring             3009 drivers/net/ethernet/broadcom/bnxt/bnxt.c 	ring = &cpr->cp_ring_struct;
ring             3010 drivers/net/ethernet/broadcom/bnxt/bnxt.c 	rmem = &ring->ring_mem;
ring             3035 drivers/net/ethernet/broadcom/bnxt/bnxt.c 		struct bnxt_ring_struct *ring;
ring             3042 drivers/net/ethernet/broadcom/bnxt/bnxt.c 		ring = &cpr->cp_ring_struct;
ring             3044 drivers/net/ethernet/broadcom/bnxt/bnxt.c 		rc = bnxt_alloc_ring(bp, &ring->ring_mem);
ring             3049 drivers/net/ethernet/broadcom/bnxt/bnxt.c 			ring->map_idx = i + ulp_msix;
ring             3051 drivers/net/ethernet/broadcom/bnxt/bnxt.c 			ring->map_idx = i;
ring             3089 drivers/net/ethernet/broadcom/bnxt/bnxt.c 		struct bnxt_ring_struct *ring;
ring             3095 drivers/net/ethernet/broadcom/bnxt/bnxt.c 		ring = &cpr->cp_ring_struct;
ring             3096 drivers/net/ethernet/broadcom/bnxt/bnxt.c 		rmem = &ring->ring_mem;
ring             3107 drivers/net/ethernet/broadcom/bnxt/bnxt.c 		ring = &rxr->rx_ring_struct;
ring             3108 drivers/net/ethernet/broadcom/bnxt/bnxt.c 		rmem = &ring->ring_mem;
ring             3116 drivers/net/ethernet/broadcom/bnxt/bnxt.c 		ring = &rxr->rx_agg_ring_struct;
ring             3117 drivers/net/ethernet/broadcom/bnxt/bnxt.c 		rmem = &ring->ring_mem;
ring             3130 drivers/net/ethernet/broadcom/bnxt/bnxt.c 		ring = &txr->tx_ring_struct;
ring             3131 drivers/net/ethernet/broadcom/bnxt/bnxt.c 		rmem = &ring->ring_mem;
ring             3141 drivers/net/ethernet/broadcom/bnxt/bnxt.c static void bnxt_init_rxbd_pages(struct bnxt_ring_struct *ring, u32 type)
ring             3147 drivers/net/ethernet/broadcom/bnxt/bnxt.c 	rx_buf_ring = (struct rx_bd **)ring->ring_mem.pg_arr;
ring             3148 drivers/net/ethernet/broadcom/bnxt/bnxt.c 	for (i = 0, prod = 0; i < ring->ring_mem.nr_pages; i++) {
ring             3167 drivers/net/ethernet/broadcom/bnxt/bnxt.c 	struct bnxt_ring_struct *ring;
ring             3178 drivers/net/ethernet/broadcom/bnxt/bnxt.c 	ring = &rxr->rx_ring_struct;
ring             3179 drivers/net/ethernet/broadcom/bnxt/bnxt.c 	bnxt_init_rxbd_pages(ring, type);
ring             3200 drivers/net/ethernet/broadcom/bnxt/bnxt.c 	ring->fw_ring_id = INVALID_HW_RING_ID;
ring             3202 drivers/net/ethernet/broadcom/bnxt/bnxt.c 	ring = &rxr->rx_agg_ring_struct;
ring             3203 drivers/net/ethernet/broadcom/bnxt/bnxt.c 	ring->fw_ring_id = INVALID_HW_RING_ID;
ring             3211 drivers/net/ethernet/broadcom/bnxt/bnxt.c 	bnxt_init_rxbd_pages(ring, type);
ring             3254 drivers/net/ethernet/broadcom/bnxt/bnxt.c 		struct bnxt_ring_struct *ring = &cpr->cp_ring_struct;
ring             3256 drivers/net/ethernet/broadcom/bnxt/bnxt.c 		ring->fw_ring_id = INVALID_HW_RING_ID;
ring             3265 drivers/net/ethernet/broadcom/bnxt/bnxt.c 			ring = &cpr2->cp_ring_struct;
ring             3266 drivers/net/ethernet/broadcom/bnxt/bnxt.c 			ring->fw_ring_id = INVALID_HW_RING_ID;
ring             3303 drivers/net/ethernet/broadcom/bnxt/bnxt.c 		struct bnxt_ring_struct *ring = &txr->tx_ring_struct;
ring             3305 drivers/net/ethernet/broadcom/bnxt/bnxt.c 		ring->fw_ring_id = INVALID_HW_RING_ID;
ring             4094 drivers/net/ethernet/broadcom/bnxt/bnxt.c 		struct bnxt_ring_struct *ring = &cpr->cp_ring_struct;
ring             4096 drivers/net/ethernet/broadcom/bnxt/bnxt.c 		if (ring->fw_ring_id != INVALID_HW_RING_ID)
ring             4797 drivers/net/ethernet/broadcom/bnxt/bnxt.c static u16 bnxt_cp_ring_from_grp(struct bnxt *bp, struct bnxt_ring_struct *ring)
ring             4801 drivers/net/ethernet/broadcom/bnxt/bnxt.c 	grp_info = &bp->grp_info[ring->grp_idx];
ring             4990 drivers/net/ethernet/broadcom/bnxt/bnxt.c 	unsigned int ring = 0, grp_idx;
ring             5034 drivers/net/ethernet/broadcom/bnxt/bnxt.c 		ring = 0;
ring             5036 drivers/net/ethernet/broadcom/bnxt/bnxt.c 		ring = vnic_id - 1;
ring             5038 drivers/net/ethernet/broadcom/bnxt/bnxt.c 		ring = bp->rx_nr_rings - 1;
ring             5040 drivers/net/ethernet/broadcom/bnxt/bnxt.c 	grp_idx = bp->rx_ring[ring].bnapi->index;
ring             5218 drivers/net/ethernet/broadcom/bnxt/bnxt.c 				    struct bnxt_ring_struct *ring,
ring             5224 drivers/net/ethernet/broadcom/bnxt/bnxt.c 	struct bnxt_ring_mem_info *rmem = &ring->ring_mem;
ring             5247 drivers/net/ethernet/broadcom/bnxt/bnxt.c 		txr = container_of(ring, struct bnxt_tx_ring_info,
ring             5251 drivers/net/ethernet/broadcom/bnxt/bnxt.c 		grp_info = &bp->grp_info[ring->grp_idx];
ring             5255 drivers/net/ethernet/broadcom/bnxt/bnxt.c 		req.queue_id = cpu_to_le16(ring->queue_id);
ring             5265 drivers/net/ethernet/broadcom/bnxt/bnxt.c 			grp_info = &bp->grp_info[ring->grp_idx];
ring             5279 drivers/net/ethernet/broadcom/bnxt/bnxt.c 			grp_info = &bp->grp_info[ring->grp_idx];
ring             5298 drivers/net/ethernet/broadcom/bnxt/bnxt.c 			req.cq_handle = cpu_to_le64(ring->handle);
ring             5328 drivers/net/ethernet/broadcom/bnxt/bnxt.c 	ring->fw_ring_id = ring_id;
ring             5410 drivers/net/ethernet/broadcom/bnxt/bnxt.c 		struct bnxt_ring_struct *ring = &cpr->cp_ring_struct;
ring             5411 drivers/net/ethernet/broadcom/bnxt/bnxt.c 		u32 map_idx = ring->map_idx;
ring             5416 drivers/net/ethernet/broadcom/bnxt/bnxt.c 		rc = hwrm_ring_alloc_send_msg(bp, ring, type, map_idx);
ring             5421 drivers/net/ethernet/broadcom/bnxt/bnxt.c 		bnxt_set_db(bp, &cpr->cp_db, type, map_idx, ring->fw_ring_id);
ring             5424 drivers/net/ethernet/broadcom/bnxt/bnxt.c 		bp->grp_info[i].cp_fw_ring_id = ring->fw_ring_id;
ring             5427 drivers/net/ethernet/broadcom/bnxt/bnxt.c 			rc = bnxt_hwrm_set_async_event_cr(bp, ring->fw_ring_id);
ring             5436 drivers/net/ethernet/broadcom/bnxt/bnxt.c 		struct bnxt_ring_struct *ring;
ring             5446 drivers/net/ethernet/broadcom/bnxt/bnxt.c 			ring = &cpr2->cp_ring_struct;
ring             5447 drivers/net/ethernet/broadcom/bnxt/bnxt.c 			ring->handle = BNXT_TX_HDL;
ring             5449 drivers/net/ethernet/broadcom/bnxt/bnxt.c 			rc = hwrm_ring_alloc_send_msg(bp, ring, type2, map_idx);
ring             5453 drivers/net/ethernet/broadcom/bnxt/bnxt.c 				    ring->fw_ring_id);
ring             5456 drivers/net/ethernet/broadcom/bnxt/bnxt.c 		ring = &txr->tx_ring_struct;
ring             5458 drivers/net/ethernet/broadcom/bnxt/bnxt.c 		rc = hwrm_ring_alloc_send_msg(bp, ring, type, map_idx);
ring             5461 drivers/net/ethernet/broadcom/bnxt/bnxt.c 		bnxt_set_db(bp, &txr->tx_db, type, map_idx, ring->fw_ring_id);
ring             5467 drivers/net/ethernet/broadcom/bnxt/bnxt.c 		struct bnxt_ring_struct *ring = &rxr->rx_ring_struct;
ring             5471 drivers/net/ethernet/broadcom/bnxt/bnxt.c 		rc = hwrm_ring_alloc_send_msg(bp, ring, type, map_idx);
ring             5474 drivers/net/ethernet/broadcom/bnxt/bnxt.c 		bnxt_set_db(bp, &rxr->rx_db, type, map_idx, ring->fw_ring_id);
ring             5478 drivers/net/ethernet/broadcom/bnxt/bnxt.c 		bp->grp_info[map_idx].rx_fw_ring_id = ring->fw_ring_id;
ring             5485 drivers/net/ethernet/broadcom/bnxt/bnxt.c 			ring = &cpr2->cp_ring_struct;
ring             5486 drivers/net/ethernet/broadcom/bnxt/bnxt.c 			ring->handle = BNXT_RX_HDL;
ring             5487 drivers/net/ethernet/broadcom/bnxt/bnxt.c 			rc = hwrm_ring_alloc_send_msg(bp, ring, type2, map_idx);
ring             5491 drivers/net/ethernet/broadcom/bnxt/bnxt.c 				    ring->fw_ring_id);
ring             5500 drivers/net/ethernet/broadcom/bnxt/bnxt.c 			struct bnxt_ring_struct *ring =
ring             5502 drivers/net/ethernet/broadcom/bnxt/bnxt.c 			u32 grp_idx = ring->grp_idx;
ring             5505 drivers/net/ethernet/broadcom/bnxt/bnxt.c 			rc = hwrm_ring_alloc_send_msg(bp, ring, type, map_idx);
ring             5510 drivers/net/ethernet/broadcom/bnxt/bnxt.c 				    ring->fw_ring_id);
ring             5513 drivers/net/ethernet/broadcom/bnxt/bnxt.c 			bp->grp_info[grp_idx].agg_fw_ring_id = ring->fw_ring_id;
ring             5521 drivers/net/ethernet/broadcom/bnxt/bnxt.c 				   struct bnxt_ring_struct *ring,
ring             5534 drivers/net/ethernet/broadcom/bnxt/bnxt.c 	req.ring_id = cpu_to_le16(ring->fw_ring_id);
ring             5559 drivers/net/ethernet/broadcom/bnxt/bnxt.c 		struct bnxt_ring_struct *ring = &txr->tx_ring_struct;
ring             5561 drivers/net/ethernet/broadcom/bnxt/bnxt.c 		if (ring->fw_ring_id != INVALID_HW_RING_ID) {
ring             5564 drivers/net/ethernet/broadcom/bnxt/bnxt.c 			hwrm_ring_free_send_msg(bp, ring,
ring             5568 drivers/net/ethernet/broadcom/bnxt/bnxt.c 			ring->fw_ring_id = INVALID_HW_RING_ID;
ring             5574 drivers/net/ethernet/broadcom/bnxt/bnxt.c 		struct bnxt_ring_struct *ring = &rxr->rx_ring_struct;
ring             5577 drivers/net/ethernet/broadcom/bnxt/bnxt.c 		if (ring->fw_ring_id != INVALID_HW_RING_ID) {
ring             5580 drivers/net/ethernet/broadcom/bnxt/bnxt.c 			hwrm_ring_free_send_msg(bp, ring,
ring             5584 drivers/net/ethernet/broadcom/bnxt/bnxt.c 			ring->fw_ring_id = INVALID_HW_RING_ID;
ring             5596 drivers/net/ethernet/broadcom/bnxt/bnxt.c 		struct bnxt_ring_struct *ring = &rxr->rx_agg_ring_struct;
ring             5599 drivers/net/ethernet/broadcom/bnxt/bnxt.c 		if (ring->fw_ring_id != INVALID_HW_RING_ID) {
ring             5602 drivers/net/ethernet/broadcom/bnxt/bnxt.c 			hwrm_ring_free_send_msg(bp, ring, type,
ring             5605 drivers/net/ethernet/broadcom/bnxt/bnxt.c 			ring->fw_ring_id = INVALID_HW_RING_ID;
ring             5624 drivers/net/ethernet/broadcom/bnxt/bnxt.c 		struct bnxt_ring_struct *ring;
ring             5631 drivers/net/ethernet/broadcom/bnxt/bnxt.c 				ring = &cpr2->cp_ring_struct;
ring             5632 drivers/net/ethernet/broadcom/bnxt/bnxt.c 				if (ring->fw_ring_id == INVALID_HW_RING_ID)
ring             5634 drivers/net/ethernet/broadcom/bnxt/bnxt.c 				hwrm_ring_free_send_msg(bp, ring,
ring             5637 drivers/net/ethernet/broadcom/bnxt/bnxt.c 				ring->fw_ring_id = INVALID_HW_RING_ID;
ring             5640 drivers/net/ethernet/broadcom/bnxt/bnxt.c 		ring = &cpr->cp_ring_struct;
ring             5641 drivers/net/ethernet/broadcom/bnxt/bnxt.c 		if (ring->fw_ring_id != INVALID_HW_RING_ID) {
ring             5642 drivers/net/ethernet/broadcom/bnxt/bnxt.c 			hwrm_ring_free_send_msg(bp, ring, type,
ring             5644 drivers/net/ethernet/broadcom/bnxt/bnxt.c 			ring->fw_ring_id = INVALID_HW_RING_ID;
ring              223 drivers/net/ethernet/broadcom/bnxt/bnxt_xdp.c 	int ring;
ring              231 drivers/net/ethernet/broadcom/bnxt/bnxt_xdp.c 	ring = smp_processor_id() % bp->tx_nr_rings_xdp;
ring              232 drivers/net/ethernet/broadcom/bnxt/bnxt_xdp.c 	txr = &bp->tx_ring[ring];
ring              238 drivers/net/ethernet/broadcom/bnxt/bnxt_xdp.c 		    !(bp->bnapi[ring]->flags & BNXT_NAPI_FLAG_XDP)) {
ring              222 drivers/net/ethernet/broadcom/cnic.c 	struct drv_ctl_l2_ring *ring = &info.data.ring;
ring              230 drivers/net/ethernet/broadcom/cnic.c 	ring->cid = cid;
ring              231 drivers/net/ethernet/broadcom/cnic.c 	ring->client_id = cl_id;
ring             1781 drivers/net/ethernet/broadcom/cnic.c 	ictx->ustorm_st_context.ring.rq.pbl_base.lo =
ring             1783 drivers/net/ethernet/broadcom/cnic.c 	ictx->ustorm_st_context.ring.rq.pbl_base.hi =
ring             1785 drivers/net/ethernet/broadcom/cnic.c 	ictx->ustorm_st_context.ring.rq.curr_pbe.lo = req3->qp_first_pte[0].hi;
ring             1786 drivers/net/ethernet/broadcom/cnic.c 	ictx->ustorm_st_context.ring.rq.curr_pbe.hi = req3->qp_first_pte[0].lo;
ring             1787 drivers/net/ethernet/broadcom/cnic.c 	ictx->ustorm_st_context.ring.r2tq.pbl_base.lo =
ring             1789 drivers/net/ethernet/broadcom/cnic.c 	ictx->ustorm_st_context.ring.r2tq.pbl_base.hi =
ring             1791 drivers/net/ethernet/broadcom/cnic.c 	ictx->ustorm_st_context.ring.r2tq.curr_pbe.lo =
ring             1793 drivers/net/ethernet/broadcom/cnic.c 	ictx->ustorm_st_context.ring.r2tq.curr_pbe.hi =
ring             1795 drivers/net/ethernet/broadcom/cnic.c 	ictx->ustorm_st_context.ring.cq_pbl_base.lo =
ring             1797 drivers/net/ethernet/broadcom/cnic.c 	ictx->ustorm_st_context.ring.cq_pbl_base.hi =
ring             1799 drivers/net/ethernet/broadcom/cnic.c 	ictx->ustorm_st_context.ring.cq[0].cq_sn = ISCSI_INITIAL_SN;
ring             1800 drivers/net/ethernet/broadcom/cnic.c 	ictx->ustorm_st_context.ring.cq[0].curr_pbe.lo = req2->cq_first_pte.hi;
ring             1801 drivers/net/ethernet/broadcom/cnic.c 	ictx->ustorm_st_context.ring.cq[0].curr_pbe.hi = req2->cq_first_pte.lo;
ring             1814 drivers/net/ethernet/broadcom/cnic.c 		ictx->ustorm_st_context.ring.cq[i].cq_sn = ISCSI_INITIAL_SN;
ring             1815 drivers/net/ethernet/broadcom/cnic.c 		ictx->ustorm_st_context.ring.cq[i].curr_pbe.lo =
ring             1817 drivers/net/ethernet/broadcom/cnic.c 		ictx->ustorm_st_context.ring.cq[i].curr_pbe.hi =
ring             3963 drivers/net/ethernet/broadcom/cnic_defs.h 	struct rings_db ring;
ring              162 drivers/net/ethernet/broadcom/cnic_if.h 		struct drv_ctl_l2_ring ring;
ring              434 drivers/net/ethernet/broadcom/genet/bcmgenet.c 					   unsigned int ring,
ring              438 drivers/net/ethernet/broadcom/genet/bcmgenet.c 			      (DMA_RING_SIZE * ring) +
ring              443 drivers/net/ethernet/broadcom/genet/bcmgenet.c 					     unsigned int ring, u32 val,
ring              447 drivers/net/ethernet/broadcom/genet/bcmgenet.c 			(DMA_RING_SIZE * ring) +
ring              452 drivers/net/ethernet/broadcom/genet/bcmgenet.c 					   unsigned int ring,
ring              456 drivers/net/ethernet/broadcom/genet/bcmgenet.c 			      (DMA_RING_SIZE * ring) +
ring              461 drivers/net/ethernet/broadcom/genet/bcmgenet.c 					     unsigned int ring, u32 val,
ring              465 drivers/net/ethernet/broadcom/genet/bcmgenet.c 			(DMA_RING_SIZE * ring) +
ring              603 drivers/net/ethernet/broadcom/genet/bcmgenet.c 	struct bcmgenet_rx_ring *ring;
ring              616 drivers/net/ethernet/broadcom/genet/bcmgenet.c 		ring = &priv->rx_rings[i];
ring              617 drivers/net/ethernet/broadcom/genet/bcmgenet.c 		ec->use_adaptive_rx_coalesce |= ring->dim.use_dim;
ring              619 drivers/net/ethernet/broadcom/genet/bcmgenet.c 	ring = &priv->rx_rings[DESC_INDEX];
ring              620 drivers/net/ethernet/broadcom/genet/bcmgenet.c 	ec->use_adaptive_rx_coalesce |= ring->dim.use_dim;
ring              625 drivers/net/ethernet/broadcom/genet/bcmgenet.c static void bcmgenet_set_rx_coalesce(struct bcmgenet_rx_ring *ring,
ring              628 drivers/net/ethernet/broadcom/genet/bcmgenet.c 	struct bcmgenet_priv *priv = ring->priv;
ring              629 drivers/net/ethernet/broadcom/genet/bcmgenet.c 	unsigned int i = ring->index;
ring              640 drivers/net/ethernet/broadcom/genet/bcmgenet.c static void bcmgenet_set_ring_rx_coalesce(struct bcmgenet_rx_ring *ring,
ring              646 drivers/net/ethernet/broadcom/genet/bcmgenet.c 	ring->rx_coalesce_usecs = ec->rx_coalesce_usecs;
ring              647 drivers/net/ethernet/broadcom/genet/bcmgenet.c 	ring->rx_max_coalesced_frames = ec->rx_max_coalesced_frames;
ring              648 drivers/net/ethernet/broadcom/genet/bcmgenet.c 	usecs = ring->rx_coalesce_usecs;
ring              649 drivers/net/ethernet/broadcom/genet/bcmgenet.c 	pkts = ring->rx_max_coalesced_frames;
ring              651 drivers/net/ethernet/broadcom/genet/bcmgenet.c 	if (ec->use_adaptive_rx_coalesce && !ring->dim.use_dim) {
ring              652 drivers/net/ethernet/broadcom/genet/bcmgenet.c 		moder = net_dim_get_def_rx_moderation(ring->dim.dim.mode);
ring              657 drivers/net/ethernet/broadcom/genet/bcmgenet.c 	ring->dim.use_dim = ec->use_adaptive_rx_coalesce;
ring              658 drivers/net/ethernet/broadcom/genet/bcmgenet.c 	bcmgenet_set_rx_coalesce(ring, usecs, pkts);
ring             1236 drivers/net/ethernet/broadcom/genet/bcmgenet.c 					 struct bcmgenet_tx_ring *ring)
ring             1240 drivers/net/ethernet/broadcom/genet/bcmgenet.c 	tx_cb_ptr = ring->cbs;
ring             1241 drivers/net/ethernet/broadcom/genet/bcmgenet.c 	tx_cb_ptr += ring->write_ptr - ring->cb_ptr;
ring             1244 drivers/net/ethernet/broadcom/genet/bcmgenet.c 	if (ring->write_ptr == ring->end_ptr)
ring             1245 drivers/net/ethernet/broadcom/genet/bcmgenet.c 		ring->write_ptr = ring->cb_ptr;
ring             1247 drivers/net/ethernet/broadcom/genet/bcmgenet.c 		ring->write_ptr++;
ring             1253 drivers/net/ethernet/broadcom/genet/bcmgenet.c 					 struct bcmgenet_tx_ring *ring)
ring             1257 drivers/net/ethernet/broadcom/genet/bcmgenet.c 	tx_cb_ptr = ring->cbs;
ring             1258 drivers/net/ethernet/broadcom/genet/bcmgenet.c 	tx_cb_ptr += ring->write_ptr - ring->cb_ptr;
ring             1261 drivers/net/ethernet/broadcom/genet/bcmgenet.c 	if (ring->write_ptr == ring->cb_ptr)
ring             1262 drivers/net/ethernet/broadcom/genet/bcmgenet.c 		ring->write_ptr = ring->end_ptr;
ring             1264 drivers/net/ethernet/broadcom/genet/bcmgenet.c 		ring->write_ptr--;
ring             1269 drivers/net/ethernet/broadcom/genet/bcmgenet.c static inline void bcmgenet_rx_ring16_int_disable(struct bcmgenet_rx_ring *ring)
ring             1271 drivers/net/ethernet/broadcom/genet/bcmgenet.c 	bcmgenet_intrl2_0_writel(ring->priv, UMAC_IRQ_RXDMA_DONE,
ring             1275 drivers/net/ethernet/broadcom/genet/bcmgenet.c static inline void bcmgenet_rx_ring16_int_enable(struct bcmgenet_rx_ring *ring)
ring             1277 drivers/net/ethernet/broadcom/genet/bcmgenet.c 	bcmgenet_intrl2_0_writel(ring->priv, UMAC_IRQ_RXDMA_DONE,
ring             1281 drivers/net/ethernet/broadcom/genet/bcmgenet.c static inline void bcmgenet_rx_ring_int_disable(struct bcmgenet_rx_ring *ring)
ring             1283 drivers/net/ethernet/broadcom/genet/bcmgenet.c 	bcmgenet_intrl2_1_writel(ring->priv,
ring             1284 drivers/net/ethernet/broadcom/genet/bcmgenet.c 				 1 << (UMAC_IRQ1_RX_INTR_SHIFT + ring->index),
ring             1288 drivers/net/ethernet/broadcom/genet/bcmgenet.c static inline void bcmgenet_rx_ring_int_enable(struct bcmgenet_rx_ring *ring)
ring             1290 drivers/net/ethernet/broadcom/genet/bcmgenet.c 	bcmgenet_intrl2_1_writel(ring->priv,
ring             1291 drivers/net/ethernet/broadcom/genet/bcmgenet.c 				 1 << (UMAC_IRQ1_RX_INTR_SHIFT + ring->index),
ring             1295 drivers/net/ethernet/broadcom/genet/bcmgenet.c static inline void bcmgenet_tx_ring16_int_disable(struct bcmgenet_tx_ring *ring)
ring             1297 drivers/net/ethernet/broadcom/genet/bcmgenet.c 	bcmgenet_intrl2_0_writel(ring->priv, UMAC_IRQ_TXDMA_DONE,
ring             1301 drivers/net/ethernet/broadcom/genet/bcmgenet.c static inline void bcmgenet_tx_ring16_int_enable(struct bcmgenet_tx_ring *ring)
ring             1303 drivers/net/ethernet/broadcom/genet/bcmgenet.c 	bcmgenet_intrl2_0_writel(ring->priv, UMAC_IRQ_TXDMA_DONE,
ring             1307 drivers/net/ethernet/broadcom/genet/bcmgenet.c static inline void bcmgenet_tx_ring_int_enable(struct bcmgenet_tx_ring *ring)
ring             1309 drivers/net/ethernet/broadcom/genet/bcmgenet.c 	bcmgenet_intrl2_1_writel(ring->priv, 1 << ring->index,
ring             1313 drivers/net/ethernet/broadcom/genet/bcmgenet.c static inline void bcmgenet_tx_ring_int_disable(struct bcmgenet_tx_ring *ring)
ring             1315 drivers/net/ethernet/broadcom/genet/bcmgenet.c 	bcmgenet_intrl2_1_writel(ring->priv, 1 << ring->index,
ring             1376 drivers/net/ethernet/broadcom/genet/bcmgenet.c 					  struct bcmgenet_tx_ring *ring)
ring             1387 drivers/net/ethernet/broadcom/genet/bcmgenet.c 	if (ring->index == DESC_INDEX)
ring             1391 drivers/net/ethernet/broadcom/genet/bcmgenet.c 		bcmgenet_intrl2_1_writel(priv, (1 << ring->index),
ring             1395 drivers/net/ethernet/broadcom/genet/bcmgenet.c 	c_index = bcmgenet_tdma_ring_readl(priv, ring->index, TDMA_CONS_INDEX)
ring             1397 drivers/net/ethernet/broadcom/genet/bcmgenet.c 	txbds_ready = (c_index - ring->c_index) & DMA_C_INDEX_MASK;
ring             1401 drivers/net/ethernet/broadcom/genet/bcmgenet.c 		  __func__, ring->index, ring->c_index, c_index, txbds_ready);
ring             1406 drivers/net/ethernet/broadcom/genet/bcmgenet.c 					  &priv->tx_cbs[ring->clean_ptr]);
ring             1414 drivers/net/ethernet/broadcom/genet/bcmgenet.c 		if (likely(ring->clean_ptr < ring->end_ptr))
ring             1415 drivers/net/ethernet/broadcom/genet/bcmgenet.c 			ring->clean_ptr++;
ring             1417 drivers/net/ethernet/broadcom/genet/bcmgenet.c 			ring->clean_ptr = ring->cb_ptr;
ring             1420 drivers/net/ethernet/broadcom/genet/bcmgenet.c 	ring->free_bds += txbds_processed;
ring             1421 drivers/net/ethernet/broadcom/genet/bcmgenet.c 	ring->c_index = c_index;
ring             1423 drivers/net/ethernet/broadcom/genet/bcmgenet.c 	ring->packets += pkts_compl;
ring             1424 drivers/net/ethernet/broadcom/genet/bcmgenet.c 	ring->bytes += bytes_compl;
ring             1426 drivers/net/ethernet/broadcom/genet/bcmgenet.c 	netdev_tx_completed_queue(netdev_get_tx_queue(dev, ring->queue),
ring             1433 drivers/net/ethernet/broadcom/genet/bcmgenet.c 				struct bcmgenet_tx_ring *ring)
ring             1437 drivers/net/ethernet/broadcom/genet/bcmgenet.c 	spin_lock_bh(&ring->lock);
ring             1438 drivers/net/ethernet/broadcom/genet/bcmgenet.c 	released = __bcmgenet_tx_reclaim(dev, ring);
ring             1439 drivers/net/ethernet/broadcom/genet/bcmgenet.c 	spin_unlock_bh(&ring->lock);
ring             1446 drivers/net/ethernet/broadcom/genet/bcmgenet.c 	struct bcmgenet_tx_ring *ring =
ring             1451 drivers/net/ethernet/broadcom/genet/bcmgenet.c 	spin_lock(&ring->lock);
ring             1452 drivers/net/ethernet/broadcom/genet/bcmgenet.c 	work_done = __bcmgenet_tx_reclaim(ring->priv->dev, ring);
ring             1453 drivers/net/ethernet/broadcom/genet/bcmgenet.c 	if (ring->free_bds > (MAX_SKB_FRAGS + 1)) {
ring             1454 drivers/net/ethernet/broadcom/genet/bcmgenet.c 		txq = netdev_get_tx_queue(ring->priv->dev, ring->queue);
ring             1457 drivers/net/ethernet/broadcom/genet/bcmgenet.c 	spin_unlock(&ring->lock);
ring             1461 drivers/net/ethernet/broadcom/genet/bcmgenet.c 		ring->int_enable(ring);
ring             1550 drivers/net/ethernet/broadcom/genet/bcmgenet.c 	struct bcmgenet_tx_ring *ring = NULL;
ring             1574 drivers/net/ethernet/broadcom/genet/bcmgenet.c 	ring = &priv->tx_rings[index];
ring             1575 drivers/net/ethernet/broadcom/genet/bcmgenet.c 	txq = netdev_get_tx_queue(dev, ring->queue);
ring             1579 drivers/net/ethernet/broadcom/genet/bcmgenet.c 	spin_lock(&ring->lock);
ring             1580 drivers/net/ethernet/broadcom/genet/bcmgenet.c 	if (ring->free_bds <= (nr_frags + 1)) {
ring             1585 drivers/net/ethernet/broadcom/genet/bcmgenet.c 				   __func__, index, ring->queue);
ring             1611 drivers/net/ethernet/broadcom/genet/bcmgenet.c 		tx_cb_ptr = bcmgenet_get_txcb(priv, ring);
ring             1659 drivers/net/ethernet/broadcom/genet/bcmgenet.c 	ring->free_bds -= nr_frags + 1;
ring             1660 drivers/net/ethernet/broadcom/genet/bcmgenet.c 	ring->prod_index += nr_frags + 1;
ring             1661 drivers/net/ethernet/broadcom/genet/bcmgenet.c 	ring->prod_index &= DMA_P_INDEX_MASK;
ring             1665 drivers/net/ethernet/broadcom/genet/bcmgenet.c 	if (ring->free_bds <= (MAX_SKB_FRAGS + 1))
ring             1670 drivers/net/ethernet/broadcom/genet/bcmgenet.c 		bcmgenet_tdma_ring_writel(priv, ring->index,
ring             1671 drivers/net/ethernet/broadcom/genet/bcmgenet.c 					  ring->prod_index, TDMA_PROD_INDEX);
ring             1673 drivers/net/ethernet/broadcom/genet/bcmgenet.c 	spin_unlock(&ring->lock);
ring             1679 drivers/net/ethernet/broadcom/genet/bcmgenet.c 	bcmgenet_put_txcb(priv, ring);
ring             1683 drivers/net/ethernet/broadcom/genet/bcmgenet.c 		tx_cb_ptr = bcmgenet_put_txcb(priv, ring);
ring             1736 drivers/net/ethernet/broadcom/genet/bcmgenet.c static unsigned int bcmgenet_desc_rx(struct bcmgenet_rx_ring *ring,
ring             1739 drivers/net/ethernet/broadcom/genet/bcmgenet.c 	struct bcmgenet_priv *priv = ring->priv;
ring             1753 drivers/net/ethernet/broadcom/genet/bcmgenet.c 	if (ring->index == DESC_INDEX) {
ring             1757 drivers/net/ethernet/broadcom/genet/bcmgenet.c 		mask = 1 << (UMAC_IRQ1_RX_INTR_SHIFT + ring->index);
ring             1763 drivers/net/ethernet/broadcom/genet/bcmgenet.c 	p_index = bcmgenet_rdma_ring_readl(priv, ring->index, RDMA_PROD_INDEX);
ring             1767 drivers/net/ethernet/broadcom/genet/bcmgenet.c 	if (discards > ring->old_discards) {
ring             1768 drivers/net/ethernet/broadcom/genet/bcmgenet.c 		discards = discards - ring->old_discards;
ring             1769 drivers/net/ethernet/broadcom/genet/bcmgenet.c 		ring->errors += discards;
ring             1770 drivers/net/ethernet/broadcom/genet/bcmgenet.c 		ring->old_discards += discards;
ring             1773 drivers/net/ethernet/broadcom/genet/bcmgenet.c 		if (ring->old_discards >= 0xC000) {
ring             1774 drivers/net/ethernet/broadcom/genet/bcmgenet.c 			ring->old_discards = 0;
ring             1775 drivers/net/ethernet/broadcom/genet/bcmgenet.c 			bcmgenet_rdma_ring_writel(priv, ring->index, 0,
ring             1781 drivers/net/ethernet/broadcom/genet/bcmgenet.c 	rxpkttoprocess = (p_index - ring->c_index) & DMA_C_INDEX_MASK;
ring             1788 drivers/net/ethernet/broadcom/genet/bcmgenet.c 		cb = &priv->rx_cbs[ring->read_ptr];
ring             1792 drivers/net/ethernet/broadcom/genet/bcmgenet.c 			ring->dropped++;
ring             1814 drivers/net/ethernet/broadcom/genet/bcmgenet.c 			  __func__, p_index, ring->c_index,
ring             1815 drivers/net/ethernet/broadcom/genet/bcmgenet.c 			  ring->read_ptr, dma_length_status);
ring             1820 drivers/net/ethernet/broadcom/genet/bcmgenet.c 			ring->errors++;
ring             1871 drivers/net/ethernet/broadcom/genet/bcmgenet.c 		ring->packets++;
ring             1872 drivers/net/ethernet/broadcom/genet/bcmgenet.c 		ring->bytes += len;
ring             1877 drivers/net/ethernet/broadcom/genet/bcmgenet.c 		napi_gro_receive(&ring->napi, skb);
ring             1882 drivers/net/ethernet/broadcom/genet/bcmgenet.c 		if (likely(ring->read_ptr < ring->end_ptr))
ring             1883 drivers/net/ethernet/broadcom/genet/bcmgenet.c 			ring->read_ptr++;
ring             1885 drivers/net/ethernet/broadcom/genet/bcmgenet.c 			ring->read_ptr = ring->cb_ptr;
ring             1887 drivers/net/ethernet/broadcom/genet/bcmgenet.c 		ring->c_index = (ring->c_index + 1) & DMA_C_INDEX_MASK;
ring             1888 drivers/net/ethernet/broadcom/genet/bcmgenet.c 		bcmgenet_rdma_ring_writel(priv, ring->index, ring->c_index, RDMA_CONS_INDEX);
ring             1891 drivers/net/ethernet/broadcom/genet/bcmgenet.c 	ring->dim.bytes = bytes_processed;
ring             1892 drivers/net/ethernet/broadcom/genet/bcmgenet.c 	ring->dim.packets = rxpktprocessed;
ring             1900 drivers/net/ethernet/broadcom/genet/bcmgenet.c 	struct bcmgenet_rx_ring *ring = container_of(napi,
ring             1905 drivers/net/ethernet/broadcom/genet/bcmgenet.c 	work_done = bcmgenet_desc_rx(ring, budget);
ring             1909 drivers/net/ethernet/broadcom/genet/bcmgenet.c 		ring->int_enable(ring);
ring             1912 drivers/net/ethernet/broadcom/genet/bcmgenet.c 	if (ring->dim.use_dim) {
ring             1913 drivers/net/ethernet/broadcom/genet/bcmgenet.c 		dim_update_sample(ring->dim.event_ctr, ring->dim.packets,
ring             1914 drivers/net/ethernet/broadcom/genet/bcmgenet.c 				  ring->dim.bytes, &dim_sample);
ring             1915 drivers/net/ethernet/broadcom/genet/bcmgenet.c 		net_dim(&ring->dim.dim, dim_sample);
ring             1926 drivers/net/ethernet/broadcom/genet/bcmgenet.c 	struct bcmgenet_rx_ring *ring =
ring             1931 drivers/net/ethernet/broadcom/genet/bcmgenet.c 	bcmgenet_set_rx_coalesce(ring, cur_profile.usec, cur_profile.pkts);
ring             1937 drivers/net/ethernet/broadcom/genet/bcmgenet.c 				     struct bcmgenet_rx_ring *ring)
ring             1946 drivers/net/ethernet/broadcom/genet/bcmgenet.c 	for (i = 0; i < ring->size; i++) {
ring             1947 drivers/net/ethernet/broadcom/genet/bcmgenet.c 		cb = ring->cbs + i;
ring             2083 drivers/net/ethernet/broadcom/genet/bcmgenet.c static void bcmgenet_init_dim(struct bcmgenet_rx_ring *ring,
ring             2086 drivers/net/ethernet/broadcom/genet/bcmgenet.c 	struct bcmgenet_net_dim *dim = &ring->dim;
ring             2095 drivers/net/ethernet/broadcom/genet/bcmgenet.c static void bcmgenet_init_rx_coalesce(struct bcmgenet_rx_ring *ring)
ring             2097 drivers/net/ethernet/broadcom/genet/bcmgenet.c 	struct bcmgenet_net_dim *dim = &ring->dim;
ring             2101 drivers/net/ethernet/broadcom/genet/bcmgenet.c 	usecs = ring->rx_coalesce_usecs;
ring             2102 drivers/net/ethernet/broadcom/genet/bcmgenet.c 	pkts = ring->rx_max_coalesced_frames;
ring             2111 drivers/net/ethernet/broadcom/genet/bcmgenet.c 	bcmgenet_set_rx_coalesce(ring, usecs, pkts);
ring             2119 drivers/net/ethernet/broadcom/genet/bcmgenet.c 	struct bcmgenet_tx_ring *ring = &priv->tx_rings[index];
ring             2123 drivers/net/ethernet/broadcom/genet/bcmgenet.c 	spin_lock_init(&ring->lock);
ring             2124 drivers/net/ethernet/broadcom/genet/bcmgenet.c 	ring->priv = priv;
ring             2125 drivers/net/ethernet/broadcom/genet/bcmgenet.c 	ring->index = index;
ring             2127 drivers/net/ethernet/broadcom/genet/bcmgenet.c 		ring->queue = 0;
ring             2128 drivers/net/ethernet/broadcom/genet/bcmgenet.c 		ring->int_enable = bcmgenet_tx_ring16_int_enable;
ring             2129 drivers/net/ethernet/broadcom/genet/bcmgenet.c 		ring->int_disable = bcmgenet_tx_ring16_int_disable;
ring             2131 drivers/net/ethernet/broadcom/genet/bcmgenet.c 		ring->queue = index + 1;
ring             2132 drivers/net/ethernet/broadcom/genet/bcmgenet.c 		ring->int_enable = bcmgenet_tx_ring_int_enable;
ring             2133 drivers/net/ethernet/broadcom/genet/bcmgenet.c 		ring->int_disable = bcmgenet_tx_ring_int_disable;
ring             2135 drivers/net/ethernet/broadcom/genet/bcmgenet.c 	ring->cbs = priv->tx_cbs + start_ptr;
ring             2136 drivers/net/ethernet/broadcom/genet/bcmgenet.c 	ring->size = size;
ring             2137 drivers/net/ethernet/broadcom/genet/bcmgenet.c 	ring->clean_ptr = start_ptr;
ring             2138 drivers/net/ethernet/broadcom/genet/bcmgenet.c 	ring->c_index = 0;
ring             2139 drivers/net/ethernet/broadcom/genet/bcmgenet.c 	ring->free_bds = size;
ring             2140 drivers/net/ethernet/broadcom/genet/bcmgenet.c 	ring->write_ptr = start_ptr;
ring             2141 drivers/net/ethernet/broadcom/genet/bcmgenet.c 	ring->cb_ptr = start_ptr;
ring             2142 drivers/net/ethernet/broadcom/genet/bcmgenet.c 	ring->end_ptr = end_ptr - 1;
ring             2143 drivers/net/ethernet/broadcom/genet/bcmgenet.c 	ring->prod_index = 0;
ring             2170 drivers/net/ethernet/broadcom/genet/bcmgenet.c 	netif_tx_napi_add(priv->dev, &ring->napi, bcmgenet_tx_poll,
ring             2179 drivers/net/ethernet/broadcom/genet/bcmgenet.c 	struct bcmgenet_rx_ring *ring = &priv->rx_rings[index];
ring             2183 drivers/net/ethernet/broadcom/genet/bcmgenet.c 	ring->priv = priv;
ring             2184 drivers/net/ethernet/broadcom/genet/bcmgenet.c 	ring->index = index;
ring             2186 drivers/net/ethernet/broadcom/genet/bcmgenet.c 		ring->int_enable = bcmgenet_rx_ring16_int_enable;
ring             2187 drivers/net/ethernet/broadcom/genet/bcmgenet.c 		ring->int_disable = bcmgenet_rx_ring16_int_disable;
ring             2189 drivers/net/ethernet/broadcom/genet/bcmgenet.c 		ring->int_enable = bcmgenet_rx_ring_int_enable;
ring             2190 drivers/net/ethernet/broadcom/genet/bcmgenet.c 		ring->int_disable = bcmgenet_rx_ring_int_disable;
ring             2192 drivers/net/ethernet/broadcom/genet/bcmgenet.c 	ring->cbs = priv->rx_cbs + start_ptr;
ring             2193 drivers/net/ethernet/broadcom/genet/bcmgenet.c 	ring->size = size;
ring             2194 drivers/net/ethernet/broadcom/genet/bcmgenet.c 	ring->c_index = 0;
ring             2195 drivers/net/ethernet/broadcom/genet/bcmgenet.c 	ring->read_ptr = start_ptr;
ring             2196 drivers/net/ethernet/broadcom/genet/bcmgenet.c 	ring->cb_ptr = start_ptr;
ring             2197 drivers/net/ethernet/broadcom/genet/bcmgenet.c 	ring->end_ptr = end_ptr - 1;
ring             2199 drivers/net/ethernet/broadcom/genet/bcmgenet.c 	ret = bcmgenet_alloc_rx_buffers(priv, ring);
ring             2203 drivers/net/ethernet/broadcom/genet/bcmgenet.c 	bcmgenet_init_dim(ring, bcmgenet_dim_work);
ring             2204 drivers/net/ethernet/broadcom/genet/bcmgenet.c 	bcmgenet_init_rx_coalesce(ring);
ring             2207 drivers/net/ethernet/broadcom/genet/bcmgenet.c 	netif_napi_add(priv->dev, &ring->napi, bcmgenet_rx_poll,
ring             2236 drivers/net/ethernet/broadcom/genet/bcmgenet.c 	struct bcmgenet_tx_ring *ring;
ring             2239 drivers/net/ethernet/broadcom/genet/bcmgenet.c 		ring = &priv->tx_rings[i];
ring             2240 drivers/net/ethernet/broadcom/genet/bcmgenet.c 		napi_enable(&ring->napi);
ring             2241 drivers/net/ethernet/broadcom/genet/bcmgenet.c 		ring->int_enable(ring);
ring             2244 drivers/net/ethernet/broadcom/genet/bcmgenet.c 	ring = &priv->tx_rings[DESC_INDEX];
ring             2245 drivers/net/ethernet/broadcom/genet/bcmgenet.c 	napi_enable(&ring->napi);
ring             2246 drivers/net/ethernet/broadcom/genet/bcmgenet.c 	ring->int_enable(ring);
ring             2252 drivers/net/ethernet/broadcom/genet/bcmgenet.c 	struct bcmgenet_tx_ring *ring;
ring             2255 drivers/net/ethernet/broadcom/genet/bcmgenet.c 		ring = &priv->tx_rings[i];
ring             2256 drivers/net/ethernet/broadcom/genet/bcmgenet.c 		napi_disable(&ring->napi);
ring             2259 drivers/net/ethernet/broadcom/genet/bcmgenet.c 	ring = &priv->tx_rings[DESC_INDEX];
ring             2260 drivers/net/ethernet/broadcom/genet/bcmgenet.c 	napi_disable(&ring->napi);
ring             2266 drivers/net/ethernet/broadcom/genet/bcmgenet.c 	struct bcmgenet_tx_ring *ring;
ring             2269 drivers/net/ethernet/broadcom/genet/bcmgenet.c 		ring = &priv->tx_rings[i];
ring             2270 drivers/net/ethernet/broadcom/genet/bcmgenet.c 		netif_napi_del(&ring->napi);
ring             2273 drivers/net/ethernet/broadcom/genet/bcmgenet.c 	ring = &priv->tx_rings[DESC_INDEX];
ring             2274 drivers/net/ethernet/broadcom/genet/bcmgenet.c 	netif_napi_del(&ring->napi);
ring             2349 drivers/net/ethernet/broadcom/genet/bcmgenet.c 	struct bcmgenet_rx_ring *ring;
ring             2352 drivers/net/ethernet/broadcom/genet/bcmgenet.c 		ring = &priv->rx_rings[i];
ring             2353 drivers/net/ethernet/broadcom/genet/bcmgenet.c 		napi_enable(&ring->napi);
ring             2354 drivers/net/ethernet/broadcom/genet/bcmgenet.c 		ring->int_enable(ring);
ring             2357 drivers/net/ethernet/broadcom/genet/bcmgenet.c 	ring = &priv->rx_rings[DESC_INDEX];
ring             2358 drivers/net/ethernet/broadcom/genet/bcmgenet.c 	napi_enable(&ring->napi);
ring             2359 drivers/net/ethernet/broadcom/genet/bcmgenet.c 	ring->int_enable(ring);
ring             2365 drivers/net/ethernet/broadcom/genet/bcmgenet.c 	struct bcmgenet_rx_ring *ring;
ring             2368 drivers/net/ethernet/broadcom/genet/bcmgenet.c 		ring = &priv->rx_rings[i];
ring             2369 drivers/net/ethernet/broadcom/genet/bcmgenet.c 		napi_disable(&ring->napi);
ring             2370 drivers/net/ethernet/broadcom/genet/bcmgenet.c 		cancel_work_sync(&ring->dim.dim.work);
ring             2373 drivers/net/ethernet/broadcom/genet/bcmgenet.c 	ring = &priv->rx_rings[DESC_INDEX];
ring             2374 drivers/net/ethernet/broadcom/genet/bcmgenet.c 	napi_disable(&ring->napi);
ring             2375 drivers/net/ethernet/broadcom/genet/bcmgenet.c 	cancel_work_sync(&ring->dim.dim.work);
ring             2381 drivers/net/ethernet/broadcom/genet/bcmgenet.c 	struct bcmgenet_rx_ring *ring;
ring             2384 drivers/net/ethernet/broadcom/genet/bcmgenet.c 		ring = &priv->rx_rings[i];
ring             2385 drivers/net/ethernet/broadcom/genet/bcmgenet.c 		netif_napi_del(&ring->napi);
ring             2388 drivers/net/ethernet/broadcom/genet/bcmgenet.c 	ring = &priv->rx_rings[DESC_INDEX];
ring             2389 drivers/net/ethernet/broadcom/genet/bcmgenet.c 	netif_napi_del(&ring->napi);
ring             3015 drivers/net/ethernet/broadcom/genet/bcmgenet.c static void bcmgenet_dump_tx_queue(struct bcmgenet_tx_ring *ring)
ring             3017 drivers/net/ethernet/broadcom/genet/bcmgenet.c 	struct bcmgenet_priv *priv = ring->priv;
ring             3026 drivers/net/ethernet/broadcom/genet/bcmgenet.c 	txq = netdev_get_tx_queue(priv->dev, ring->queue);
ring             3028 drivers/net/ethernet/broadcom/genet/bcmgenet.c 	spin_lock(&ring->lock);
ring             3029 drivers/net/ethernet/broadcom/genet/bcmgenet.c 	if (ring->index == DESC_INDEX) {
ring             3034 drivers/net/ethernet/broadcom/genet/bcmgenet.c 		intmsk = 1 << ring->index;
ring             3036 drivers/net/ethernet/broadcom/genet/bcmgenet.c 	c_index = bcmgenet_tdma_ring_readl(priv, ring->index, TDMA_CONS_INDEX);
ring             3037 drivers/net/ethernet/broadcom/genet/bcmgenet.c 	p_index = bcmgenet_tdma_ring_readl(priv, ring->index, TDMA_PROD_INDEX);
ring             3039 drivers/net/ethernet/broadcom/genet/bcmgenet.c 	free_bds = ring->free_bds;
ring             3040 drivers/net/ethernet/broadcom/genet/bcmgenet.c 	spin_unlock(&ring->lock);
ring             3049 drivers/net/ethernet/broadcom/genet/bcmgenet.c 		  ring->index, ring->queue,
ring             3052 drivers/net/ethernet/broadcom/genet/bcmgenet.c 		  free_bds, ring->size,
ring             3053 drivers/net/ethernet/broadcom/genet/bcmgenet.c 		  ring->prod_index, p_index & DMA_P_INDEX_MASK,
ring             3054 drivers/net/ethernet/broadcom/genet/bcmgenet.c 		  ring->c_index, c_index & DMA_C_INDEX_MASK,
ring             3055 drivers/net/ethernet/broadcom/genet/bcmgenet.c 		  ring->clean_ptr, ring->write_ptr,
ring             3056 drivers/net/ethernet/broadcom/genet/bcmgenet.c 		  ring->cb_ptr, ring->end_ptr);
ring             2738 drivers/net/ethernet/cadence/macb_main.c 			       struct ethtool_ringparam *ring)
ring             2742 drivers/net/ethernet/cadence/macb_main.c 	ring->rx_max_pending = MAX_RX_RING_SIZE;
ring             2743 drivers/net/ethernet/cadence/macb_main.c 	ring->tx_max_pending = MAX_TX_RING_SIZE;
ring             2745 drivers/net/ethernet/cadence/macb_main.c 	ring->rx_pending = bp->rx_ring_size;
ring             2746 drivers/net/ethernet/cadence/macb_main.c 	ring->tx_pending = bp->tx_ring_size;
ring             2750 drivers/net/ethernet/cadence/macb_main.c 			      struct ethtool_ringparam *ring)
ring             2756 drivers/net/ethernet/cadence/macb_main.c 	if ((ring->rx_mini_pending) || (ring->rx_jumbo_pending))
ring             2759 drivers/net/ethernet/cadence/macb_main.c 	new_rx_size = clamp_t(u32, ring->rx_pending,
ring             2763 drivers/net/ethernet/cadence/macb_main.c 	new_tx_size = clamp_t(u32, ring->tx_pending,
ring              469 drivers/net/ethernet/cavium/thunder/nicvf_ethtool.c 				struct ethtool_ringparam *ring)
ring              474 drivers/net/ethernet/cavium/thunder/nicvf_ethtool.c 	ring->rx_max_pending = MAX_CMP_QUEUE_LEN;
ring              475 drivers/net/ethernet/cavium/thunder/nicvf_ethtool.c 	ring->rx_pending = qs->cq_len;
ring              476 drivers/net/ethernet/cavium/thunder/nicvf_ethtool.c 	ring->tx_max_pending = MAX_SND_QUEUE_LEN;
ring              477 drivers/net/ethernet/cavium/thunder/nicvf_ethtool.c 	ring->tx_pending = qs->sq_len;
ring              481 drivers/net/ethernet/cavium/thunder/nicvf_ethtool.c 			       struct ethtool_ringparam *ring)
ring              491 drivers/net/ethernet/cavium/thunder/nicvf_ethtool.c 	if ((ring->rx_mini_pending) || (ring->rx_jumbo_pending))
ring              494 drivers/net/ethernet/cavium/thunder/nicvf_ethtool.c 	tx_count = clamp_t(u32, ring->tx_pending,
ring              496 drivers/net/ethernet/cavium/thunder/nicvf_ethtool.c 	rx_count = clamp_t(u32, ring->rx_pending,
ring              181 drivers/net/ethernet/cisco/enic/enic_ethtool.c 			       struct ethtool_ringparam *ring)
ring              186 drivers/net/ethernet/cisco/enic/enic_ethtool.c 	ring->rx_max_pending = ENIC_MAX_RQ_DESCS;
ring              187 drivers/net/ethernet/cisco/enic/enic_ethtool.c 	ring->rx_pending = c->rq_desc_count;
ring              188 drivers/net/ethernet/cisco/enic/enic_ethtool.c 	ring->tx_max_pending = ENIC_MAX_WQ_DESCS;
ring              189 drivers/net/ethernet/cisco/enic/enic_ethtool.c 	ring->tx_pending = c->wq_desc_count;
ring              193 drivers/net/ethernet/cisco/enic/enic_ethtool.c 			      struct ethtool_ringparam *ring)
ring              202 drivers/net/ethernet/cisco/enic/enic_ethtool.c 	if (ring->rx_mini_max_pending || ring->rx_mini_pending) {
ring              207 drivers/net/ethernet/cisco/enic/enic_ethtool.c 	if (ring->rx_jumbo_max_pending || ring->rx_jumbo_pending) {
ring              214 drivers/net/ethernet/cisco/enic/enic_ethtool.c 	if (ring->rx_pending > ENIC_MAX_RQ_DESCS ||
ring              215 drivers/net/ethernet/cisco/enic/enic_ethtool.c 	    ring->rx_pending < ENIC_MIN_RQ_DESCS) {
ring              217 drivers/net/ethernet/cisco/enic/enic_ethtool.c 			    ring->rx_pending, ENIC_MIN_RQ_DESCS,
ring              221 drivers/net/ethernet/cisco/enic/enic_ethtool.c 	if (ring->tx_pending > ENIC_MAX_WQ_DESCS ||
ring              222 drivers/net/ethernet/cisco/enic/enic_ethtool.c 	    ring->tx_pending < ENIC_MIN_WQ_DESCS) {
ring              224 drivers/net/ethernet/cisco/enic/enic_ethtool.c 			    ring->tx_pending, ENIC_MIN_WQ_DESCS,
ring              231 drivers/net/ethernet/cisco/enic/enic_ethtool.c 		ring->rx_pending & 0xffffffe0; /* must be aligned to groups of 32 */
ring              233 drivers/net/ethernet/cisco/enic/enic_ethtool.c 		ring->tx_pending & 0xffffffe0; /* must be aligned to groups of 32 */
ring              846 drivers/net/ethernet/cisco/enic/enic_main.c 			wq->ring.desc_avail++;
ring               31 drivers/net/ethernet/cisco/enic/vnic_cq.c 	vnic_dev_free_desc_ring(cq->vdev, &cq->ring);
ring               50 drivers/net/ethernet/cisco/enic/vnic_cq.c 	err = vnic_dev_alloc_desc_ring(vdev, &cq->ring, desc_count, desc_size);
ring               65 drivers/net/ethernet/cisco/enic/vnic_cq.c 	paddr = (u64)cq->ring.base_addr | VNIC_PADDR_TARGET;
ring               67 drivers/net/ethernet/cisco/enic/vnic_cq.c 	iowrite32(cq->ring.desc_count, &cq->ctrl->ring_size);
ring               91 drivers/net/ethernet/cisco/enic/vnic_cq.c 	vnic_dev_clear_desc_ring(&cq->ring);
ring               62 drivers/net/ethernet/cisco/enic/vnic_cq.h 	struct vnic_dev_ring ring;
ring               83 drivers/net/ethernet/cisco/enic/vnic_cq.h 	cq_desc = (struct cq_desc *)((u8 *)cq->ring.descs +
ring               84 drivers/net/ethernet/cisco/enic/vnic_cq.h 		cq->ring.desc_size * cq->to_clean);
ring               95 drivers/net/ethernet/cisco/enic/vnic_cq.h 		if (cq->to_clean == cq->ring.desc_count) {
ring              100 drivers/net/ethernet/cisco/enic/vnic_cq.h 		cq_desc = (struct cq_desc *)((u8 *)cq->ring.descs +
ring              101 drivers/net/ethernet/cisco/enic/vnic_cq.h 			cq->ring.desc_size * cq->to_clean);
ring              159 drivers/net/ethernet/cisco/enic/vnic_dev.c static unsigned int vnic_dev_desc_ring_size(struct vnic_dev_ring *ring,
ring              171 drivers/net/ethernet/cisco/enic/vnic_dev.c 	ring->base_align = 512;
ring              176 drivers/net/ethernet/cisco/enic/vnic_dev.c 	ring->desc_count = ALIGN(desc_count, count_align);
ring              178 drivers/net/ethernet/cisco/enic/vnic_dev.c 	ring->desc_size = ALIGN(desc_size, desc_align);
ring              180 drivers/net/ethernet/cisco/enic/vnic_dev.c 	ring->size = ring->desc_count * ring->desc_size;
ring              181 drivers/net/ethernet/cisco/enic/vnic_dev.c 	ring->size_unaligned = ring->size + ring->base_align;
ring              183 drivers/net/ethernet/cisco/enic/vnic_dev.c 	return ring->size_unaligned;
ring              186 drivers/net/ethernet/cisco/enic/vnic_dev.c void vnic_dev_clear_desc_ring(struct vnic_dev_ring *ring)
ring              188 drivers/net/ethernet/cisco/enic/vnic_dev.c 	memset(ring->descs, 0, ring->size);
ring              191 drivers/net/ethernet/cisco/enic/vnic_dev.c int vnic_dev_alloc_desc_ring(struct vnic_dev *vdev, struct vnic_dev_ring *ring,
ring              194 drivers/net/ethernet/cisco/enic/vnic_dev.c 	vnic_dev_desc_ring_size(ring, desc_count, desc_size);
ring              196 drivers/net/ethernet/cisco/enic/vnic_dev.c 	ring->descs_unaligned = pci_alloc_consistent(vdev->pdev,
ring              197 drivers/net/ethernet/cisco/enic/vnic_dev.c 		ring->size_unaligned,
ring              198 drivers/net/ethernet/cisco/enic/vnic_dev.c 		&ring->base_addr_unaligned);
ring              200 drivers/net/ethernet/cisco/enic/vnic_dev.c 	if (!ring->descs_unaligned) {
ring              202 drivers/net/ethernet/cisco/enic/vnic_dev.c 			 (int)ring->size);
ring              206 drivers/net/ethernet/cisco/enic/vnic_dev.c 	ring->base_addr = ALIGN(ring->base_addr_unaligned,
ring              207 drivers/net/ethernet/cisco/enic/vnic_dev.c 		ring->base_align);
ring              208 drivers/net/ethernet/cisco/enic/vnic_dev.c 	ring->descs = (u8 *)ring->descs_unaligned +
ring              209 drivers/net/ethernet/cisco/enic/vnic_dev.c 		(ring->base_addr - ring->base_addr_unaligned);
ring              211 drivers/net/ethernet/cisco/enic/vnic_dev.c 	vnic_dev_clear_desc_ring(ring);
ring              213 drivers/net/ethernet/cisco/enic/vnic_dev.c 	ring->desc_avail = ring->desc_count - 1;
ring              218 drivers/net/ethernet/cisco/enic/vnic_dev.c void vnic_dev_free_desc_ring(struct vnic_dev *vdev, struct vnic_dev_ring *ring)
ring              220 drivers/net/ethernet/cisco/enic/vnic_dev.c 	if (ring->descs) {
ring              222 drivers/net/ethernet/cisco/enic/vnic_dev.c 			ring->size_unaligned,
ring              223 drivers/net/ethernet/cisco/enic/vnic_dev.c 			ring->descs_unaligned,
ring              224 drivers/net/ethernet/cisco/enic/vnic_dev.c 			ring->base_addr_unaligned);
ring              225 drivers/net/ethernet/cisco/enic/vnic_dev.c 		ring->descs = NULL;
ring              420 drivers/net/ethernet/cisco/enic/vnic_dev.c 	vdev->devcmd2->cmd_ring = vdev->devcmd2->wq.ring.descs;
ring              122 drivers/net/ethernet/cisco/enic/vnic_dev.h void vnic_dev_clear_desc_ring(struct vnic_dev_ring *ring);
ring              123 drivers/net/ethernet/cisco/enic/vnic_dev.h int vnic_dev_alloc_desc_ring(struct vnic_dev *vdev, struct vnic_dev_ring *ring,
ring              126 drivers/net/ethernet/cisco/enic/vnic_dev.h 	struct vnic_dev_ring *ring);
ring               34 drivers/net/ethernet/cisco/enic/vnic_rq.c 	unsigned int i, j, count = rq->ring.desc_count;
ring               47 drivers/net/ethernet/cisco/enic/vnic_rq.c 			buf->desc = (u8 *)rq->ring.descs +
ring               48 drivers/net/ethernet/cisco/enic/vnic_rq.c 				rq->ring.desc_size * buf->index;
ring               73 drivers/net/ethernet/cisco/enic/vnic_rq.c 	vnic_dev_free_desc_ring(vdev, &rq->ring);
ring              101 drivers/net/ethernet/cisco/enic/vnic_rq.c 	err = vnic_dev_alloc_desc_ring(vdev, &rq->ring, desc_count, desc_size);
ring              120 drivers/net/ethernet/cisco/enic/vnic_rq.c 	unsigned int count = rq->ring.desc_count;
ring              122 drivers/net/ethernet/cisco/enic/vnic_rq.c 	paddr = (u64)rq->ring.base_addr | VNIC_PADDR_TARGET;
ring              189 drivers/net/ethernet/cisco/enic/vnic_rq.c 	unsigned int count = rq->ring.desc_count;
ring              194 drivers/net/ethernet/cisco/enic/vnic_rq.c 	for (i = 0; i < rq->ring.desc_count; i++) {
ring              198 drivers/net/ethernet/cisco/enic/vnic_rq.c 	rq->ring.desc_avail = rq->ring.desc_count - 1;
ring              217 drivers/net/ethernet/cisco/enic/vnic_rq.c 	vnic_dev_clear_desc_ring(&rq->ring);
ring               89 drivers/net/ethernet/cisco/enic/vnic_rq.h 	struct vnic_dev_ring ring;
ring              100 drivers/net/ethernet/cisco/enic/vnic_rq.h 	return rq->ring.desc_avail;
ring              106 drivers/net/ethernet/cisco/enic/vnic_rq.h 	return rq->ring.desc_count - rq->ring.desc_avail - 1;
ring              134 drivers/net/ethernet/cisco/enic/vnic_rq.h 	rq->ring.desc_avail--;
ring              156 drivers/net/ethernet/cisco/enic/vnic_rq.h 	rq->ring.desc_avail += count;
ring              181 drivers/net/ethernet/cisco/enic/vnic_rq.h 			rq->ring.desc_avail++;
ring               34 drivers/net/ethernet/cisco/enic/vnic_wq.c 	unsigned int i, j, count = wq->ring.desc_count;
ring               47 drivers/net/ethernet/cisco/enic/vnic_wq.c 			buf->desc = (u8 *)wq->ring.descs +
ring               48 drivers/net/ethernet/cisco/enic/vnic_wq.c 				wq->ring.desc_size * buf->index;
ring               76 drivers/net/ethernet/cisco/enic/vnic_wq.c 	vnic_dev_free_desc_ring(vdev, &wq->ring);
ring              104 drivers/net/ethernet/cisco/enic/vnic_wq.c 	err = vnic_dev_alloc_desc_ring(vdev, &wq->ring, desc_count, desc_size);
ring              129 drivers/net/ethernet/cisco/enic/vnic_wq.c 	err = vnic_dev_alloc_desc_ring(vdev, &wq->ring, desc_count, desc_size);
ring              140 drivers/net/ethernet/cisco/enic/vnic_wq.c 	unsigned int count = wq->ring.desc_count;
ring              142 drivers/net/ethernet/cisco/enic/vnic_wq.c 	paddr = (u64)wq->ring.base_addr | VNIC_PADDR_TARGET;
ring              207 drivers/net/ethernet/cisco/enic/vnic_wq.c 		wq->ring.desc_avail++;
ring              216 drivers/net/ethernet/cisco/enic/vnic_wq.c 	vnic_dev_clear_desc_ring(&wq->ring);
ring               84 drivers/net/ethernet/cisco/enic/vnic_wq.h 	struct vnic_dev_ring ring;
ring              106 drivers/net/ethernet/cisco/enic/vnic_wq.h 	return wq->ring.desc_avail;
ring              112 drivers/net/ethernet/cisco/enic/vnic_wq.h 	return wq->ring.desc_count - wq->ring.desc_avail - 1;
ring              151 drivers/net/ethernet/cisco/enic/vnic_wq.h 	wq->ring.desc_avail -= desc_skip_cnt;
ring              167 drivers/net/ethernet/cisco/enic/vnic_wq.h 		wq->ring.desc_avail++;
ring               96 drivers/net/ethernet/cortina/gemini.c 	struct gmac_txdesc *ring;
ring              589 drivers/net/ethernet/cortina/gemini.c 		txq->ring = desc_ring;
ring              629 drivers/net/ethernet/cortina/gemini.c 		txd = txq->ring + c;
ring              701 drivers/net/ethernet/cortina/gemini.c 			  n_txq * sizeof(*port->txq->ring) << port->txq_order,
ring              702 drivers/net/ethernet/cortina/gemini.c 			  port->txq->ring, port->txq_dma_base);
ring             1201 drivers/net/ethernet/cortina/gemini.c 		txd = txq->ring + w;
ring             1221 drivers/net/ethernet/cortina/gemini.c 		dma_unmap_page(geth->dev, txq->ring[w].word2.buf_adr,
ring             1222 drivers/net/ethernet/cortina/gemini.c 			       txq->ring[w].word0.bits.buffer_size,
ring             1265 drivers/net/ethernet/cortina/gemini.c 			txq->ring[d].word3.bits.eofie = 1;
ring              683 drivers/net/ethernet/emulex/benet/be_ethtool.c 			     struct ethtool_ringparam *ring)
ring              687 drivers/net/ethernet/emulex/benet/be_ethtool.c 	ring->rx_max_pending = adapter->rx_obj[0].q.len;
ring              688 drivers/net/ethernet/emulex/benet/be_ethtool.c 	ring->rx_pending = adapter->rx_obj[0].q.len;
ring              689 drivers/net/ethernet/emulex/benet/be_ethtool.c 	ring->tx_max_pending = adapter->tx_obj[0].q.len;
ring              690 drivers/net/ethernet/emulex/benet/be_ethtool.c 	ring->tx_pending = adapter->tx_obj[0].q.len;
ring              948 drivers/net/ethernet/ethoc.c 				struct ethtool_ringparam *ring)
ring              952 drivers/net/ethernet/ethoc.c 	ring->rx_max_pending = priv->num_bd - 1;
ring              953 drivers/net/ethernet/ethoc.c 	ring->rx_mini_max_pending = 0;
ring              954 drivers/net/ethernet/ethoc.c 	ring->rx_jumbo_max_pending = 0;
ring              955 drivers/net/ethernet/ethoc.c 	ring->tx_max_pending = priv->num_bd - 1;
ring              957 drivers/net/ethernet/ethoc.c 	ring->rx_pending = priv->num_rx;
ring              958 drivers/net/ethernet/ethoc.c 	ring->rx_mini_pending = 0;
ring              959 drivers/net/ethernet/ethoc.c 	ring->rx_jumbo_pending = 0;
ring              960 drivers/net/ethernet/ethoc.c 	ring->tx_pending = priv->num_tx;
ring              964 drivers/net/ethernet/ethoc.c 			       struct ethtool_ringparam *ring)
ring              968 drivers/net/ethernet/ethoc.c 	if (ring->tx_pending < 1 || ring->rx_pending < 1 ||
ring              969 drivers/net/ethernet/ethoc.c 	    ring->tx_pending + ring->rx_pending > priv->num_bd)
ring              971 drivers/net/ethernet/ethoc.c 	if (ring->rx_mini_pending || ring->rx_jumbo_pending)
ring              981 drivers/net/ethernet/ethoc.c 	priv->num_tx = rounddown_pow_of_two(ring->tx_pending);
ring              982 drivers/net/ethernet/ethoc.c 	priv->num_rx = ring->rx_pending;
ring                8 drivers/net/ethernet/freescale/enetc/enetc_cbdr.c 	struct enetc_cbdr *ring = &si->cbd_ring;
ring               12 drivers/net/ethernet/freescale/enetc/enetc_cbdr.c 	i = ring->next_to_clean;
ring               14 drivers/net/ethernet/freescale/enetc/enetc_cbdr.c 	while (enetc_rd_reg(ring->cir) != i) {
ring               15 drivers/net/ethernet/freescale/enetc/enetc_cbdr.c 		dest_cbd = ENETC_CBD(*ring, i);
ring               23 drivers/net/ethernet/freescale/enetc/enetc_cbdr.c 		i = (i + 1) % ring->bd_count;
ring               26 drivers/net/ethernet/freescale/enetc/enetc_cbdr.c 	ring->next_to_clean = i;
ring               37 drivers/net/ethernet/freescale/enetc/enetc_cbdr.c 	struct enetc_cbdr *ring = &si->cbd_ring;
ring               42 drivers/net/ethernet/freescale/enetc/enetc_cbdr.c 	if (unlikely(!ring->bd_base))
ring               45 drivers/net/ethernet/freescale/enetc/enetc_cbdr.c 	if (unlikely(!enetc_cbd_unused(ring)))
ring               48 drivers/net/ethernet/freescale/enetc/enetc_cbdr.c 	i = ring->next_to_use;
ring               49 drivers/net/ethernet/freescale/enetc/enetc_cbdr.c 	dest_cbd = ENETC_CBD(*ring, i);
ring               53 drivers/net/ethernet/freescale/enetc/enetc_cbdr.c 	i = (i + 1) % ring->bd_count;
ring               55 drivers/net/ethernet/freescale/enetc/enetc_cbdr.c 	ring->next_to_use = i;
ring               57 drivers/net/ethernet/freescale/enetc/enetc_cbdr.c 	enetc_wr_reg(ring->pir, i);
ring               60 drivers/net/ethernet/freescale/enetc/enetc_cbdr.c 		if (enetc_rd_reg(ring->cir) == i)
ring              536 drivers/net/ethernet/freescale/enetc/enetc_ethtool.c 				struct ethtool_ringparam *ring)
ring              540 drivers/net/ethernet/freescale/enetc/enetc_ethtool.c 	ring->rx_pending = priv->rx_bd_count;
ring              541 drivers/net/ethernet/freescale/enetc/enetc_ethtool.c 	ring->tx_pending = priv->tx_bd_count;
ring              210 drivers/net/ethernet/freescale/ucc_geth_ethtool.c                     struct ethtool_ringparam *ring)
ring              216 drivers/net/ethernet/freescale/ucc_geth_ethtool.c 	ring->rx_max_pending = UCC_GETH_BD_RING_SIZE_MAX;
ring              217 drivers/net/ethernet/freescale/ucc_geth_ethtool.c 	ring->rx_mini_max_pending = UCC_GETH_BD_RING_SIZE_MAX;
ring              218 drivers/net/ethernet/freescale/ucc_geth_ethtool.c 	ring->rx_jumbo_max_pending = UCC_GETH_BD_RING_SIZE_MAX;
ring              219 drivers/net/ethernet/freescale/ucc_geth_ethtool.c 	ring->tx_max_pending = UCC_GETH_BD_RING_SIZE_MAX;
ring              221 drivers/net/ethernet/freescale/ucc_geth_ethtool.c 	ring->rx_pending = ug_info->bdRingLenRx[queue];
ring              222 drivers/net/ethernet/freescale/ucc_geth_ethtool.c 	ring->rx_mini_pending = ug_info->bdRingLenRx[queue];
ring              223 drivers/net/ethernet/freescale/ucc_geth_ethtool.c 	ring->rx_jumbo_pending = ug_info->bdRingLenRx[queue];
ring              224 drivers/net/ethernet/freescale/ucc_geth_ethtool.c 	ring->tx_pending = ug_info->bdRingLenTx[queue];
ring              229 drivers/net/ethernet/freescale/ucc_geth_ethtool.c                     struct ethtool_ringparam *ring)
ring              235 drivers/net/ethernet/freescale/ucc_geth_ethtool.c 	if (ring->rx_pending < UCC_GETH_RX_BD_RING_SIZE_MIN) {
ring              240 drivers/net/ethernet/freescale/ucc_geth_ethtool.c 	if (ring->rx_pending % UCC_GETH_RX_BD_RING_SIZE_ALIGNMENT) {
ring              245 drivers/net/ethernet/freescale/ucc_geth_ethtool.c 	if (ring->tx_pending < UCC_GETH_TX_BD_RING_SIZE_MIN) {
ring              254 drivers/net/ethernet/freescale/ucc_geth_ethtool.c 	ug_info->bdRingLenRx[queue] = ring->rx_pending;
ring              255 drivers/net/ethernet/freescale/ucc_geth_ethtool.c 	ug_info->bdRingLenTx[queue] = ring->tx_pending;
ring               96 drivers/net/ethernet/google/gve/gve_ethtool.c 	int ring;
ring              101 drivers/net/ethernet/google/gve/gve_ethtool.c 	for (rx_pkts = 0, rx_bytes = 0, ring = 0;
ring              102 drivers/net/ethernet/google/gve/gve_ethtool.c 	     ring < priv->rx_cfg.num_queues; ring++) {
ring              106 drivers/net/ethernet/google/gve/gve_ethtool.c 				  u64_stats_fetch_begin(&priv->rx[ring].statss);
ring              107 drivers/net/ethernet/google/gve/gve_ethtool.c 				rx_pkts += priv->rx[ring].rpackets;
ring              108 drivers/net/ethernet/google/gve/gve_ethtool.c 				rx_bytes += priv->rx[ring].rbytes;
ring              109 drivers/net/ethernet/google/gve/gve_ethtool.c 			} while (u64_stats_fetch_retry(&priv->rx[ring].statss,
ring              113 drivers/net/ethernet/google/gve/gve_ethtool.c 	for (tx_pkts = 0, tx_bytes = 0, ring = 0;
ring              114 drivers/net/ethernet/google/gve/gve_ethtool.c 	     ring < priv->tx_cfg.num_queues; ring++) {
ring              118 drivers/net/ethernet/google/gve/gve_ethtool.c 				  u64_stats_fetch_begin(&priv->tx[ring].statss);
ring              119 drivers/net/ethernet/google/gve/gve_ethtool.c 				tx_pkts += priv->tx[ring].pkt_done;
ring              120 drivers/net/ethernet/google/gve/gve_ethtool.c 				tx_bytes += priv->tx[ring].bytes_done;
ring              121 drivers/net/ethernet/google/gve/gve_ethtool.c 			} while (u64_stats_fetch_retry(&priv->tx[ring].statss,
ring              138 drivers/net/ethernet/google/gve/gve_ethtool.c 		for (ring = 0; ring < priv->rx_cfg.num_queues; ring++) {
ring              139 drivers/net/ethernet/google/gve/gve_ethtool.c 			struct gve_rx_ring *rx = &priv->rx[ring];
ring              149 drivers/net/ethernet/google/gve/gve_ethtool.c 		for (ring = 0; ring < priv->tx_cfg.num_queues; ring++) {
ring              150 drivers/net/ethernet/google/gve/gve_ethtool.c 			struct gve_tx_ring *tx = &priv->tx[ring];
ring               33 drivers/net/ethernet/google/gve/gve_main.c 	int ring;
ring               36 drivers/net/ethernet/google/gve/gve_main.c 		for (ring = 0; ring < priv->rx_cfg.num_queues; ring++) {
ring               39 drivers/net/ethernet/google/gve/gve_main.c 				  u64_stats_fetch_begin(&priv->rx[ring].statss);
ring               40 drivers/net/ethernet/google/gve/gve_main.c 				s->rx_packets += priv->rx[ring].rpackets;
ring               41 drivers/net/ethernet/google/gve/gve_main.c 				s->rx_bytes += priv->rx[ring].rbytes;
ring               42 drivers/net/ethernet/google/gve/gve_main.c 			} while (u64_stats_fetch_retry(&priv->rx[ring].statss,
ring               47 drivers/net/ethernet/google/gve/gve_main.c 		for (ring = 0; ring < priv->tx_cfg.num_queues; ring++) {
ring               50 drivers/net/ethernet/google/gve/gve_main.c 				  u64_stats_fetch_begin(&priv->tx[ring].statss);
ring               51 drivers/net/ethernet/google/gve/gve_main.c 				s->tx_packets += priv->tx[ring].pkt_done;
ring               52 drivers/net/ethernet/google/gve/gve_main.c 				s->tx_bytes += priv->tx[ring].bytes_done;
ring               53 drivers/net/ethernet/google/gve/gve_main.c 			} while (u64_stats_fetch_retry(&priv->tx[ring].statss,
ring               36 drivers/net/ethernet/hisilicon/hns/hnae.c static int hnae_alloc_buffer(struct hnae_ring *ring, struct hnae_desc_cb *cb)
ring               38 drivers/net/ethernet/hisilicon/hns/hnae.c 	unsigned int order = hnae_page_order(ring);
ring               48 drivers/net/ethernet/hisilicon/hns/hnae.c 	cb->length = hnae_page_size(ring);
ring               54 drivers/net/ethernet/hisilicon/hns/hnae.c static void hnae_free_buffer(struct hnae_ring *ring, struct hnae_desc_cb *cb)
ring               61 drivers/net/ethernet/hisilicon/hns/hnae.c 	else if (unlikely(is_rx_ring(ring)))
ring               67 drivers/net/ethernet/hisilicon/hns/hnae.c static int hnae_map_buffer(struct hnae_ring *ring, struct hnae_desc_cb *cb)
ring               69 drivers/net/ethernet/hisilicon/hns/hnae.c 	cb->dma = dma_map_page(ring_to_dev(ring), cb->priv, 0,
ring               70 drivers/net/ethernet/hisilicon/hns/hnae.c 			       cb->length, ring_to_dma_dir(ring));
ring               72 drivers/net/ethernet/hisilicon/hns/hnae.c 	if (dma_mapping_error(ring_to_dev(ring), cb->dma))
ring               78 drivers/net/ethernet/hisilicon/hns/hnae.c static void hnae_unmap_buffer(struct hnae_ring *ring, struct hnae_desc_cb *cb)
ring               81 drivers/net/ethernet/hisilicon/hns/hnae.c 		dma_unmap_single(ring_to_dev(ring), cb->dma, cb->length,
ring               82 drivers/net/ethernet/hisilicon/hns/hnae.c 				 ring_to_dma_dir(ring));
ring               84 drivers/net/ethernet/hisilicon/hns/hnae.c 		dma_unmap_page(ring_to_dev(ring), cb->dma, cb->length,
ring               85 drivers/net/ethernet/hisilicon/hns/hnae.c 			       ring_to_dma_dir(ring));
ring              119 drivers/net/ethernet/hisilicon/hns/hnae.c static void hnae_free_buffers(struct hnae_ring *ring)
ring              123 drivers/net/ethernet/hisilicon/hns/hnae.c 	for (i = 0; i < ring->desc_num; i++)
ring              124 drivers/net/ethernet/hisilicon/hns/hnae.c 		hnae_free_buffer_detach(ring, i);
ring              128 drivers/net/ethernet/hisilicon/hns/hnae.c static int hnae_alloc_buffers(struct hnae_ring *ring)
ring              132 drivers/net/ethernet/hisilicon/hns/hnae.c 	for (i = 0; i < ring->desc_num; i++) {
ring              133 drivers/net/ethernet/hisilicon/hns/hnae.c 		ret = hnae_alloc_buffer_attach(ring, i);
ring              142 drivers/net/ethernet/hisilicon/hns/hnae.c 		hnae_free_buffer_detach(ring, j);
ring              147 drivers/net/ethernet/hisilicon/hns/hnae.c static void hnae_free_desc(struct hnae_ring *ring)
ring              149 drivers/net/ethernet/hisilicon/hns/hnae.c 	dma_unmap_single(ring_to_dev(ring), ring->desc_dma_addr,
ring              150 drivers/net/ethernet/hisilicon/hns/hnae.c 			 ring->desc_num * sizeof(ring->desc[0]),
ring              151 drivers/net/ethernet/hisilicon/hns/hnae.c 			 ring_to_dma_dir(ring));
ring              152 drivers/net/ethernet/hisilicon/hns/hnae.c 	ring->desc_dma_addr = 0;
ring              153 drivers/net/ethernet/hisilicon/hns/hnae.c 	kfree(ring->desc);
ring              154 drivers/net/ethernet/hisilicon/hns/hnae.c 	ring->desc = NULL;
ring              158 drivers/net/ethernet/hisilicon/hns/hnae.c static int hnae_alloc_desc(struct hnae_ring *ring)
ring              160 drivers/net/ethernet/hisilicon/hns/hnae.c 	int size = ring->desc_num * sizeof(ring->desc[0]);
ring              162 drivers/net/ethernet/hisilicon/hns/hnae.c 	ring->desc = kzalloc(size, GFP_KERNEL);
ring              163 drivers/net/ethernet/hisilicon/hns/hnae.c 	if (!ring->desc)
ring              166 drivers/net/ethernet/hisilicon/hns/hnae.c 	ring->desc_dma_addr = dma_map_single(ring_to_dev(ring),
ring              167 drivers/net/ethernet/hisilicon/hns/hnae.c 		ring->desc, size, ring_to_dma_dir(ring));
ring              168 drivers/net/ethernet/hisilicon/hns/hnae.c 	if (dma_mapping_error(ring_to_dev(ring), ring->desc_dma_addr)) {
ring              169 drivers/net/ethernet/hisilicon/hns/hnae.c 		ring->desc_dma_addr = 0;
ring              170 drivers/net/ethernet/hisilicon/hns/hnae.c 		kfree(ring->desc);
ring              171 drivers/net/ethernet/hisilicon/hns/hnae.c 		ring->desc = NULL;
ring              179 drivers/net/ethernet/hisilicon/hns/hnae.c static void hnae_fini_ring(struct hnae_ring *ring)
ring              181 drivers/net/ethernet/hisilicon/hns/hnae.c 	if (is_rx_ring(ring))
ring              182 drivers/net/ethernet/hisilicon/hns/hnae.c 		hnae_free_buffers(ring);
ring              184 drivers/net/ethernet/hisilicon/hns/hnae.c 	hnae_free_desc(ring);
ring              185 drivers/net/ethernet/hisilicon/hns/hnae.c 	kfree(ring->desc_cb);
ring              186 drivers/net/ethernet/hisilicon/hns/hnae.c 	ring->desc_cb = NULL;
ring              187 drivers/net/ethernet/hisilicon/hns/hnae.c 	ring->next_to_clean = 0;
ring              188 drivers/net/ethernet/hisilicon/hns/hnae.c 	ring->next_to_use = 0;
ring              193 drivers/net/ethernet/hisilicon/hns/hnae.c hnae_init_ring(struct hnae_queue *q, struct hnae_ring *ring, int flags)
ring              197 drivers/net/ethernet/hisilicon/hns/hnae.c 	if (ring->desc_num <= 0 || ring->buf_size <= 0)
ring              200 drivers/net/ethernet/hisilicon/hns/hnae.c 	ring->q = q;
ring              201 drivers/net/ethernet/hisilicon/hns/hnae.c 	ring->flags = flags;
ring              202 drivers/net/ethernet/hisilicon/hns/hnae.c 	ring->coal_param = q->handle->coal_param;
ring              203 drivers/net/ethernet/hisilicon/hns/hnae.c 	assert(!ring->desc && !ring->desc_cb && !ring->desc_dma_addr);
ring              206 drivers/net/ethernet/hisilicon/hns/hnae.c 	assert(ring->next_to_use == 0);
ring              207 drivers/net/ethernet/hisilicon/hns/hnae.c 	assert(ring->next_to_clean == 0);
ring              209 drivers/net/ethernet/hisilicon/hns/hnae.c 	ring->desc_cb = kcalloc(ring->desc_num, sizeof(ring->desc_cb[0]),
ring              211 drivers/net/ethernet/hisilicon/hns/hnae.c 	if (!ring->desc_cb) {
ring              216 drivers/net/ethernet/hisilicon/hns/hnae.c 	ret = hnae_alloc_desc(ring);
ring              220 drivers/net/ethernet/hisilicon/hns/hnae.c 	if (is_rx_ring(ring)) {
ring              221 drivers/net/ethernet/hisilicon/hns/hnae.c 		ret = hnae_alloc_buffers(ring);
ring              229 drivers/net/ethernet/hisilicon/hns/hnae.c 	hnae_free_desc(ring);
ring              231 drivers/net/ethernet/hisilicon/hns/hnae.c 	kfree(ring->desc_cb);
ring              232 drivers/net/ethernet/hisilicon/hns/hnae.c 	ring->desc_cb = NULL;
ring              233 drivers/net/ethernet/hisilicon/hns/hnae.h #define is_tx_ring(ring) ((ring)->flags & RINGF_DIR)
ring              234 drivers/net/ethernet/hisilicon/hns/hnae.h #define is_rx_ring(ring) (!is_tx_ring(ring))
ring              235 drivers/net/ethernet/hisilicon/hns/hnae.h #define ring_to_dma_dir(ring) (is_tx_ring(ring) ? \
ring              300 drivers/net/ethernet/hisilicon/hns/hnae.h #define ring_ptr_move_fw(ring, p) \
ring              301 drivers/net/ethernet/hisilicon/hns/hnae.h 	((ring)->p = ((ring)->p + 1) % (ring)->desc_num)
ring              302 drivers/net/ethernet/hisilicon/hns/hnae.h #define ring_ptr_move_bw(ring, p) \
ring              303 drivers/net/ethernet/hisilicon/hns/hnae.h 	((ring)->p = ((ring)->p - 1 + (ring)->desc_num) % (ring)->desc_num)
ring              310 drivers/net/ethernet/hisilicon/hns/hnae.h #define assert_is_ring_idx(ring, idx) \
ring              311 drivers/net/ethernet/hisilicon/hns/hnae.h 	assert((idx) >= 0 && (idx) < (ring)->desc_num)
ring              316 drivers/net/ethernet/hisilicon/hns/hnae.h static inline int ring_dist(struct hnae_ring *ring, int begin, int end)
ring              318 drivers/net/ethernet/hisilicon/hns/hnae.h 	assert_is_ring_idx(ring, begin);
ring              319 drivers/net/ethernet/hisilicon/hns/hnae.h 	assert_is_ring_idx(ring, end);
ring              321 drivers/net/ethernet/hisilicon/hns/hnae.h 	return (end - begin + ring->desc_num) % ring->desc_num;
ring              324 drivers/net/ethernet/hisilicon/hns/hnae.h static inline int ring_space(struct hnae_ring *ring)
ring              326 drivers/net/ethernet/hisilicon/hns/hnae.h 	return ring->desc_num -
ring              327 drivers/net/ethernet/hisilicon/hns/hnae.h 		ring_dist(ring, ring->next_to_clean, ring->next_to_use) - 1;
ring              330 drivers/net/ethernet/hisilicon/hns/hnae.h static inline int is_ring_empty(struct hnae_ring *ring)
ring              332 drivers/net/ethernet/hisilicon/hns/hnae.h 	assert_is_ring_idx(ring, ring->next_to_use);
ring              333 drivers/net/ethernet/hisilicon/hns/hnae.h 	assert_is_ring_idx(ring, ring->next_to_clean);
ring              335 drivers/net/ethernet/hisilicon/hns/hnae.h 	return ring->next_to_use == ring->next_to_clean;
ring              346 drivers/net/ethernet/hisilicon/hns/hnae.h 	int (*alloc_buffer)(struct hnae_ring *ring, struct hnae_desc_cb *cb);
ring              347 drivers/net/ethernet/hisilicon/hns/hnae.h 	void (*free_buffer)(struct hnae_ring *ring, struct hnae_desc_cb *cb);
ring              348 drivers/net/ethernet/hisilicon/hns/hnae.h 	int (*map_buffer)(struct hnae_ring *ring, struct hnae_desc_cb *cb);
ring              349 drivers/net/ethernet/hisilicon/hns/hnae.h 	void (*unmap_buffer)(struct hnae_ring *ring, struct hnae_desc_cb *cb);
ring              480 drivers/net/ethernet/hisilicon/hns/hnae.h 	void (*toggle_ring_irq)(struct hnae_ring *ring, u32 val);
ring              570 drivers/net/ethernet/hisilicon/hns/hnae.h #define ring_to_dev(ring) ((ring)->q->dev->dev)
ring              592 drivers/net/ethernet/hisilicon/hns/hnae.h static inline int hnae_reserve_buffer_map(struct hnae_ring *ring,
ring              595 drivers/net/ethernet/hisilicon/hns/hnae.h 	struct hnae_buf_ops *bops = ring->q->handle->bops;
ring              598 drivers/net/ethernet/hisilicon/hns/hnae.h 	ret = bops->alloc_buffer(ring, cb);
ring              602 drivers/net/ethernet/hisilicon/hns/hnae.h 	ret = bops->map_buffer(ring, cb);
ring              609 drivers/net/ethernet/hisilicon/hns/hnae.h 	bops->free_buffer(ring, cb);
ring              614 drivers/net/ethernet/hisilicon/hns/hnae.h static inline int hnae_alloc_buffer_attach(struct hnae_ring *ring, int i)
ring              616 drivers/net/ethernet/hisilicon/hns/hnae.h 	int ret = hnae_reserve_buffer_map(ring, &ring->desc_cb[i]);
ring              621 drivers/net/ethernet/hisilicon/hns/hnae.h 	ring->desc[i].addr = cpu_to_le64(ring->desc_cb[i].dma);
ring              626 drivers/net/ethernet/hisilicon/hns/hnae.h static inline void hnae_buffer_detach(struct hnae_ring *ring, int i)
ring              628 drivers/net/ethernet/hisilicon/hns/hnae.h 	ring->q->handle->bops->unmap_buffer(ring, &ring->desc_cb[i]);
ring              629 drivers/net/ethernet/hisilicon/hns/hnae.h 	ring->desc[i].addr = 0;
ring              632 drivers/net/ethernet/hisilicon/hns/hnae.h static inline void hnae_free_buffer_detach(struct hnae_ring *ring, int i)
ring              634 drivers/net/ethernet/hisilicon/hns/hnae.h 	struct hnae_buf_ops *bops = ring->q->handle->bops;
ring              635 drivers/net/ethernet/hisilicon/hns/hnae.h 	struct hnae_desc_cb *cb = &ring->desc_cb[i];
ring              637 drivers/net/ethernet/hisilicon/hns/hnae.h 	if (!ring->desc_cb[i].dma)
ring              640 drivers/net/ethernet/hisilicon/hns/hnae.h 	hnae_buffer_detach(ring, i);
ring              641 drivers/net/ethernet/hisilicon/hns/hnae.h 	bops->free_buffer(ring, cb);
ring              645 drivers/net/ethernet/hisilicon/hns/hnae.h static inline void hnae_replace_buffer(struct hnae_ring *ring, int i,
ring              648 drivers/net/ethernet/hisilicon/hns/hnae.h 	struct hnae_buf_ops *bops = ring->q->handle->bops;
ring              650 drivers/net/ethernet/hisilicon/hns/hnae.h 	bops->unmap_buffer(ring, &ring->desc_cb[i]);
ring              651 drivers/net/ethernet/hisilicon/hns/hnae.h 	ring->desc_cb[i] = *res_cb;
ring              652 drivers/net/ethernet/hisilicon/hns/hnae.h 	ring->desc[i].addr = cpu_to_le64(ring->desc_cb[i].dma);
ring              653 drivers/net/ethernet/hisilicon/hns/hnae.h 	ring->desc[i].rx.ipoff_bnum_pid_flag = 0;
ring              656 drivers/net/ethernet/hisilicon/hns/hnae.h static inline void hnae_reuse_buffer(struct hnae_ring *ring, int i)
ring              658 drivers/net/ethernet/hisilicon/hns/hnae.h 	ring->desc_cb[i].reuse_flag = 0;
ring              659 drivers/net/ethernet/hisilicon/hns/hnae.h 	ring->desc[i].addr = cpu_to_le64(ring->desc_cb[i].dma
ring              660 drivers/net/ethernet/hisilicon/hns/hnae.h 		+ ring->desc_cb[i].page_offset);
ring              661 drivers/net/ethernet/hisilicon/hns/hnae.h 	ring->desc[i].rx.ipoff_bnum_pid_flag = 0;
ring              668 drivers/net/ethernet/hisilicon/hns/hnae.h 	struct hnae_ring *ring;
ring              671 drivers/net/ethernet/hisilicon/hns/hnae.h 		ring = &h->qs[i]->rx_ring;
ring              672 drivers/net/ethernet/hisilicon/hns/hnae.h 		for (j = 0; j < ring->desc_num; j++)
ring              673 drivers/net/ethernet/hisilicon/hns/hnae.h 			ring->desc[j].addr = cpu_to_le64(ring->desc_cb[j].dma);
ring              683 drivers/net/ethernet/hisilicon/hns/hnae.h 	struct hnae_ring *ring;
ring              686 drivers/net/ethernet/hisilicon/hns/hnae.h 		ring = &h->qs[i]->rx_ring;
ring              687 drivers/net/ethernet/hisilicon/hns/hnae.h 		for (j = 0; j < ring->desc_num; j++) {
ring              688 drivers/net/ethernet/hisilicon/hns/hnae.h 			ring->desc_cb[j].page_offset = 0;
ring              689 drivers/net/ethernet/hisilicon/hns/hnae.h 			if (ring->desc[j].addr !=
ring              690 drivers/net/ethernet/hisilicon/hns/hnae.h 			    cpu_to_le64(ring->desc_cb[j].dma))
ring              691 drivers/net/ethernet/hisilicon/hns/hnae.h 				ring->desc[j].addr =
ring              692 drivers/net/ethernet/hisilicon/hns/hnae.h 					cpu_to_le64(ring->desc_cb[j].dma);
ring              198 drivers/net/ethernet/hisilicon/hns/hns_ae_adapt.c 	struct ring_pair_cb *ring =
ring              201 drivers/net/ethernet/hisilicon/hns/hns_ae_adapt.c 	hns_rcb_init_hw(ring);
ring              392 drivers/net/ethernet/hisilicon/hns/hns_ae_adapt.c static void hns_ae_toggle_ring_irq(struct hnae_ring *ring, u32 mask)
ring              396 drivers/net/ethernet/hisilicon/hns/hns_ae_adapt.c 	if (is_tx_ring(ring))
ring              401 drivers/net/ethernet/hisilicon/hns/hns_ae_adapt.c 	hns_rcb_int_ctrl_hw(ring->q, flag, mask);
ring              404 drivers/net/ethernet/hisilicon/hns/hns_ae_adapt.c static void hns_aev2_toggle_ring_irq(struct hnae_ring *ring, u32 mask)
ring              408 drivers/net/ethernet/hisilicon/hns/hns_ae_adapt.c 	if (is_tx_ring(ring))
ring              413 drivers/net/ethernet/hisilicon/hns/hns_ae_adapt.c 	hns_rcbv2_int_ctrl_hw(ring->q, flag, mask);
ring              249 drivers/net/ethernet/hisilicon/hns/hns_dsaf_rcb.c 	struct hnae_ring *ring =
ring              251 drivers/net/ethernet/hisilicon/hns/hns_dsaf_rcb.c 	dma_addr_t dma = ring->desc_dma_addr;
ring              259 drivers/net/ethernet/hisilicon/hns/hns_dsaf_rcb.c 		hns_rcb_set_rx_ring_bs(q, ring->buf_size);
ring              271 drivers/net/ethernet/hisilicon/hns/hns_dsaf_rcb.c 		hns_rcb_set_tx_ring_bs(q, ring->buf_size);
ring              284 drivers/net/ethernet/hisilicon/hns/hns_dsaf_rcb.c void hns_rcb_init_hw(struct ring_pair_cb *ring)
ring              286 drivers/net/ethernet/hisilicon/hns/hns_dsaf_rcb.c 	hns_rcb_ring_init(ring, RX_RING);
ring              287 drivers/net/ethernet/hisilicon/hns/hns_dsaf_rcb.c 	hns_rcb_ring_init(ring, TX_RING);
ring              442 drivers/net/ethernet/hisilicon/hns/hns_dsaf_rcb.c 	struct hnae_ring *ring;
ring              451 drivers/net/ethernet/hisilicon/hns/hns_dsaf_rcb.c 		ring = &q->rx_ring;
ring              452 drivers/net/ethernet/hisilicon/hns/hns_dsaf_rcb.c 		ring->io_base = ring_pair_cb->q.io_base;
ring              456 drivers/net/ethernet/hisilicon/hns/hns_dsaf_rcb.c 		ring = &q->tx_ring;
ring              457 drivers/net/ethernet/hisilicon/hns/hns_dsaf_rcb.c 		ring->io_base = ring_pair_cb->q.io_base +
ring              467 drivers/net/ethernet/hisilicon/hns/hns_dsaf_rcb.c 	ring->desc = NULL;
ring              468 drivers/net/ethernet/hisilicon/hns/hns_dsaf_rcb.c 	ring->desc_cb = NULL;
ring              470 drivers/net/ethernet/hisilicon/hns/hns_dsaf_rcb.c 	ring->irq = ring_pair_cb->virq[irq_idx];
ring              471 drivers/net/ethernet/hisilicon/hns/hns_dsaf_rcb.c 	ring->desc_dma_addr = 0;
ring              473 drivers/net/ethernet/hisilicon/hns/hns_dsaf_rcb.c 	ring->buf_size = RCB_DEFAULT_BUFFER_SIZE;
ring              474 drivers/net/ethernet/hisilicon/hns/hns_dsaf_rcb.c 	ring->desc_num = desc_num;
ring              475 drivers/net/ethernet/hisilicon/hns/hns_dsaf_rcb.c 	ring->max_desc_num_per_pkt = mdnum_ppkt;
ring              476 drivers/net/ethernet/hisilicon/hns/hns_dsaf_rcb.c 	ring->max_raw_data_sz_per_desc = HNS_RCB_MAX_PKT_SIZE;
ring              477 drivers/net/ethernet/hisilicon/hns/hns_dsaf_rcb.c 	ring->max_pkt_size = HNS_RCB_MAX_PKT_SIZE;
ring              478 drivers/net/ethernet/hisilicon/hns/hns_dsaf_rcb.c 	ring->next_to_use = 0;
ring              479 drivers/net/ethernet/hisilicon/hns/hns_dsaf_rcb.c 	ring->next_to_clean = 0;
ring              819 drivers/net/ethernet/hisilicon/hns/hns_dsaf_rcb.c 	struct ring_pair_cb *ring =
ring              821 drivers/net/ethernet/hisilicon/hns/hns_dsaf_rcb.c 	struct dsaf_device *dsaf_dev = ring->rcb_common->dsaf_dev;
ring              823 drivers/net/ethernet/hisilicon/hns/hns_dsaf_rcb.c 		= dsaf_dev->ppe_common[ring->rcb_common->comm_index];
ring              824 drivers/net/ethernet/hisilicon/hns/hns_dsaf_rcb.c 	struct hns_ring_hw_stats *hw_stats = &ring->hw_stats;
ring              831 drivers/net/ethernet/hisilicon/hns/hns_dsaf_rcb.c 			 PPE_COM_HIS_RX_PKT_QID_OK_CNT_REG + 4 * ring->index);
ring              833 drivers/net/ethernet/hisilicon/hns/hns_dsaf_rcb.c 			 PPE_COM_HIS_RX_PKT_QID_DROP_CNT_REG + 4 * ring->index);
ring              840 drivers/net/ethernet/hisilicon/hns/hns_dsaf_rcb.c 			 PPE_COM_HIS_TX_PKT_QID_OK_CNT_REG + 4 * ring->index);
ring              842 drivers/net/ethernet/hisilicon/hns/hns_dsaf_rcb.c 			 PPE_COM_HIS_TX_PKT_QID_ERR_CNT_REG + 4 * ring->index);
ring              853 drivers/net/ethernet/hisilicon/hns/hns_dsaf_rcb.c 	struct ring_pair_cb *ring =
ring              855 drivers/net/ethernet/hisilicon/hns/hns_dsaf_rcb.c 	struct hns_ring_hw_stats *hw_stats = &ring->hw_stats;
ring              132 drivers/net/ethernet/hisilicon/hns/hns_dsaf_rcb.h void hns_rcb_init_hw(struct ring_pair_cb *ring);
ring               36 drivers/net/ethernet/hisilicon/hns/hns_enet.c static void fill_v2_desc_hw(struct hnae_ring *ring, void *priv, int size,
ring               40 drivers/net/ethernet/hisilicon/hns/hns_enet.c 	struct hnae_desc *desc = &ring->desc[ring->next_to_use];
ring               41 drivers/net/ethernet/hisilicon/hns/hns_enet.c 	struct hnae_desc_cb *desc_cb = &ring->desc_cb[ring->next_to_use];
ring               68 drivers/net/ethernet/hisilicon/hns/hns_enet.c 		       HNSV2_TXD_PORTID_S, ring->q->handle->dport_id);
ring              126 drivers/net/ethernet/hisilicon/hns/hns_enet.c 	ring_ptr_move_fw(ring, next_to_use);
ring              129 drivers/net/ethernet/hisilicon/hns/hns_enet.c static void fill_v2_desc(struct hnae_ring *ring, void *priv,
ring              133 drivers/net/ethernet/hisilicon/hns/hns_enet.c 	fill_v2_desc_hw(ring, priv, size, size, dma, frag_end,
ring              144 drivers/net/ethernet/hisilicon/hns/hns_enet.c static void fill_desc(struct hnae_ring *ring, void *priv,
ring              148 drivers/net/ethernet/hisilicon/hns/hns_enet.c 	struct hnae_desc *desc = &ring->desc[ring->next_to_use];
ring              149 drivers/net/ethernet/hisilicon/hns/hns_enet.c 	struct hnae_desc_cb *desc_cb = &ring->desc_cb[ring->next_to_use];
ring              202 drivers/net/ethernet/hisilicon/hns/hns_enet.c 	ring_ptr_move_fw(ring, next_to_use);
ring              205 drivers/net/ethernet/hisilicon/hns/hns_enet.c static void unfill_desc(struct hnae_ring *ring)
ring              207 drivers/net/ethernet/hisilicon/hns/hns_enet.c 	ring_ptr_move_bw(ring, next_to_use);
ring              211 drivers/net/ethernet/hisilicon/hns/hns_enet.c 	struct sk_buff **out_skb, int *bnum, struct hnae_ring *ring)
ring              220 drivers/net/ethernet/hisilicon/hns/hns_enet.c 	if (unlikely(buf_num > ring->max_desc_num_per_pkt)) {
ring              221 drivers/net/ethernet/hisilicon/hns/hns_enet.c 		if (ring_space(ring) < 1)
ring              231 drivers/net/ethernet/hisilicon/hns/hns_enet.c 	} else if (buf_num > ring_space(ring)) {
ring              240 drivers/net/ethernet/hisilicon/hns/hns_enet.c 	struct sk_buff **out_skb, int *bnum, struct hnae_ring *ring)
ring              260 drivers/net/ethernet/hisilicon/hns/hns_enet.c 	if (unlikely(buf_num > ring->max_desc_num_per_pkt)) {
ring              262 drivers/net/ethernet/hisilicon/hns/hns_enet.c 		if (ring_space(ring) < buf_num)
ring              271 drivers/net/ethernet/hisilicon/hns/hns_enet.c 	} else if (ring_space(ring) < buf_num) {
ring              279 drivers/net/ethernet/hisilicon/hns/hns_enet.c static void fill_tso_desc(struct hnae_ring *ring, void *priv,
ring              293 drivers/net/ethernet/hisilicon/hns/hns_enet.c 		fill_v2_desc_hw(ring, priv, k == 0 ? size : 0,
ring              309 drivers/net/ethernet/hisilicon/hns/hns_enet.c 	struct hnae_ring *ring = ring_data->ring;
ring              310 drivers/net/ethernet/hisilicon/hns/hns_enet.c 	struct device *dev = ring_to_dev(ring);
ring              319 drivers/net/ethernet/hisilicon/hns/hns_enet.c 	switch (priv->ops.maybe_stop_tx(&skb, &buf_num, ring)) {
ring              321 drivers/net/ethernet/hisilicon/hns/hns_enet.c 		ring->stats.tx_busy++;
ring              324 drivers/net/ethernet/hisilicon/hns/hns_enet.c 		ring->stats.sw_err_cnt++;
ring              333 drivers/net/ethernet/hisilicon/hns/hns_enet.c 	next_to_use = ring->next_to_use;
ring              340 drivers/net/ethernet/hisilicon/hns/hns_enet.c 		ring->stats.sw_err_cnt++;
ring              343 drivers/net/ethernet/hisilicon/hns/hns_enet.c 	priv->ops.fill_desc(ring, skb, size, dma, seg_num == 1 ? 1 : 0,
ring              353 drivers/net/ethernet/hisilicon/hns/hns_enet.c 			ring->stats.sw_err_cnt++;
ring              356 drivers/net/ethernet/hisilicon/hns/hns_enet.c 		priv->ops.fill_desc(ring, skb_frag_page(frag), size, dma,
ring              377 drivers/net/ethernet/hisilicon/hns/hns_enet.c 	while (ring->next_to_use != next_to_use) {
ring              378 drivers/net/ethernet/hisilicon/hns/hns_enet.c 		unfill_desc(ring);
ring              379 drivers/net/ethernet/hisilicon/hns/hns_enet.c 		if (ring->next_to_use != next_to_use)
ring              381 drivers/net/ethernet/hisilicon/hns/hns_enet.c 				       ring->desc_cb[ring->next_to_use].dma,
ring              382 drivers/net/ethernet/hisilicon/hns/hns_enet.c 				       ring->desc_cb[ring->next_to_use].length,
ring              386 drivers/net/ethernet/hisilicon/hns/hns_enet.c 					 ring->desc_cb[next_to_use].dma,
ring              387 drivers/net/ethernet/hisilicon/hns/hns_enet.c 					 ring->desc_cb[next_to_use].length,
ring              409 drivers/net/ethernet/hisilicon/hns/hns_enet.c 			       struct hnae_ring *ring, int pull_len,
ring              419 drivers/net/ethernet/hisilicon/hns/hns_enet.c 		hnae_buf_size(ring) == HNS_BUFFER_SIZE_2048);
ring              421 drivers/net/ethernet/hisilicon/hns/hns_enet.c 	desc = &ring->desc[ring->next_to_clean];
ring              425 drivers/net/ethernet/hisilicon/hns/hns_enet.c 		truesize = hnae_buf_size(ring);
ring              428 drivers/net/ethernet/hisilicon/hns/hns_enet.c 		last_offset = hnae_page_size(ring) - hnae_buf_size(ring);
ring              541 drivers/net/ethernet/hisilicon/hns/hns_enet.c 	struct hnae_ring *ring = ring_data->ring;
ring              552 drivers/net/ethernet/hisilicon/hns/hns_enet.c 	desc = &ring->desc[ring->next_to_clean];
ring              553 drivers/net/ethernet/hisilicon/hns/hns_enet.c 	desc_cb = &ring->desc_cb[ring->next_to_clean];
ring              568 drivers/net/ethernet/hisilicon/hns/hns_enet.c 		ring->stats.sw_err_cnt++;
ring              587 drivers/net/ethernet/hisilicon/hns/hns_enet.c 		ring_ptr_move_fw(ring, next_to_clean);
ring              594 drivers/net/ethernet/hisilicon/hns/hns_enet.c 		ring->stats.seg_pkt_cnt++;
ring              600 drivers/net/ethernet/hisilicon/hns/hns_enet.c 		hns_nic_reuse_page(skb, 0, ring, pull_len, desc_cb);
ring              601 drivers/net/ethernet/hisilicon/hns/hns_enet.c 		ring_ptr_move_fw(ring, next_to_clean);
ring              608 drivers/net/ethernet/hisilicon/hns/hns_enet.c 			desc = &ring->desc[ring->next_to_clean];
ring              609 drivers/net/ethernet/hisilicon/hns/hns_enet.c 			desc_cb = &ring->desc_cb[ring->next_to_clean];
ring              611 drivers/net/ethernet/hisilicon/hns/hns_enet.c 			hns_nic_reuse_page(skb, i, ring, 0, desc_cb);
ring              612 drivers/net/ethernet/hisilicon/hns/hns_enet.c 			ring_ptr_move_fw(ring, next_to_clean);
ring              617 drivers/net/ethernet/hisilicon/hns/hns_enet.c 	if (unlikely((!bnum) || (bnum > ring->max_desc_num_per_pkt))) {
ring              621 drivers/net/ethernet/hisilicon/hns/hns_enet.c 			   bnum, ring->max_desc_num_per_pkt,
ring              624 drivers/net/ethernet/hisilicon/hns/hns_enet.c 		ring->stats.err_bd_num++;
ring              634 drivers/net/ethernet/hisilicon/hns/hns_enet.c 		ring->stats.non_vld_descs++;
ring              641 drivers/net/ethernet/hisilicon/hns/hns_enet.c 		ring->stats.err_pkt_len++;
ring              647 drivers/net/ethernet/hisilicon/hns/hns_enet.c 		ring->stats.l2_err++;
ring              652 drivers/net/ethernet/hisilicon/hns/hns_enet.c 	ring->stats.rx_pkts++;
ring              653 drivers/net/ethernet/hisilicon/hns/hns_enet.c 	ring->stats.rx_bytes += skb->len;
ring              669 drivers/net/ethernet/hisilicon/hns/hns_enet.c 	struct hnae_ring *ring = ring_data->ring;
ring              673 drivers/net/ethernet/hisilicon/hns/hns_enet.c 		desc_cb = &ring->desc_cb[ring->next_to_use];
ring              675 drivers/net/ethernet/hisilicon/hns/hns_enet.c 			ring->stats.reuse_pg_cnt++;
ring              676 drivers/net/ethernet/hisilicon/hns/hns_enet.c 			hnae_reuse_buffer(ring, ring->next_to_use);
ring              678 drivers/net/ethernet/hisilicon/hns/hns_enet.c 			ret = hnae_reserve_buffer_map(ring, &res_cbs);
ring              680 drivers/net/ethernet/hisilicon/hns/hns_enet.c 				ring->stats.sw_err_cnt++;
ring              684 drivers/net/ethernet/hisilicon/hns/hns_enet.c 			hnae_replace_buffer(ring, ring->next_to_use, &res_cbs);
ring              687 drivers/net/ethernet/hisilicon/hns/hns_enet.c 		ring_ptr_move_fw(ring, next_to_use);
ring              691 drivers/net/ethernet/hisilicon/hns/hns_enet.c 	writel_relaxed(i, ring->io_base + RCB_REG_HEAD);
ring              705 drivers/net/ethernet/hisilicon/hns/hns_enet.c static int hns_desc_unused(struct hnae_ring *ring)
ring              707 drivers/net/ethernet/hisilicon/hns/hns_enet.c 	int ntc = ring->next_to_clean;
ring              708 drivers/net/ethernet/hisilicon/hns/hns_enet.c 	int ntu = ring->next_to_use;
ring              710 drivers/net/ethernet/hisilicon/hns/hns_enet.c 	return ((ntc >= ntu) ? 0 : ring->desc_num) + ntc - ntu;
ring              718 drivers/net/ethernet/hisilicon/hns/hns_enet.c static u32 hns_coal_rx_bdnum(struct hnae_ring *ring)
ring              720 drivers/net/ethernet/hisilicon/hns/hns_enet.c 	bool coal_enable = ring->q->handle->coal_adapt_en;
ring              723 drivers/net/ethernet/hisilicon/hns/hns_enet.c 	    ring->coal_last_rx_bytes > HNS_LOWEST_LATENCY_RATE)
ring              729 drivers/net/ethernet/hisilicon/hns/hns_enet.c static void hns_update_rx_rate(struct hnae_ring *ring)
ring              731 drivers/net/ethernet/hisilicon/hns/hns_enet.c 	bool coal_enable = ring->q->handle->coal_adapt_en;
ring              736 drivers/net/ethernet/hisilicon/hns/hns_enet.c 	    time_before(jiffies, ring->coal_last_jiffies + (HZ >> 4)))
ring              740 drivers/net/ethernet/hisilicon/hns/hns_enet.c 	if (ring->coal_last_rx_bytes > ring->stats.rx_bytes) {
ring              741 drivers/net/ethernet/hisilicon/hns/hns_enet.c 		ring->coal_last_rx_bytes = ring->stats.rx_bytes;
ring              742 drivers/net/ethernet/hisilicon/hns/hns_enet.c 		ring->coal_last_jiffies = jiffies;
ring              746 drivers/net/ethernet/hisilicon/hns/hns_enet.c 	total_bytes = ring->stats.rx_bytes - ring->coal_last_rx_bytes;
ring              747 drivers/net/ethernet/hisilicon/hns/hns_enet.c 	time_passed_ms = jiffies_to_msecs(jiffies - ring->coal_last_jiffies);
ring              749 drivers/net/ethernet/hisilicon/hns/hns_enet.c 	ring->coal_rx_rate = total_bytes >> 10;
ring              751 drivers/net/ethernet/hisilicon/hns/hns_enet.c 	ring->coal_last_rx_bytes = ring->stats.rx_bytes;
ring              752 drivers/net/ethernet/hisilicon/hns/hns_enet.c 	ring->coal_last_jiffies = jiffies;
ring              778 drivers/net/ethernet/hisilicon/hns/hns_enet.c 	struct hnae_ring *ring = ring_data->ring;
ring              779 drivers/net/ethernet/hisilicon/hns/hns_enet.c 	struct hnae_handle *handle = ring->q->handle;
ring              780 drivers/net/ethernet/hisilicon/hns/hns_enet.c 	u32 new_coal_param, old_coal_param = ring->coal_param;
ring              782 drivers/net/ethernet/hisilicon/hns/hns_enet.c 	if (ring->coal_rx_rate < HNS_LOWEST_LATENCY_RATE)
ring              784 drivers/net/ethernet/hisilicon/hns/hns_enet.c 	else if (ring->coal_rx_rate < HNS_LOW_LATENCY_RATE)
ring              794 drivers/net/ethernet/hisilicon/hns/hns_enet.c 	ring->coal_param = new_coal_param;
ring              823 drivers/net/ethernet/hisilicon/hns/hns_enet.c 	struct hnae_ring *ring = ring_data->ring;
ring              828 drivers/net/ethernet/hisilicon/hns/hns_enet.c 	int unused_count = hns_desc_unused(ring);
ring              830 drivers/net/ethernet/hisilicon/hns/hns_enet.c 	num = readl_relaxed(ring->io_base + RCB_REG_FBDNUM);
ring              842 drivers/net/ethernet/hisilicon/hns/hns_enet.c 			unused_count = hns_desc_unused(ring);
ring              874 drivers/net/ethernet/hisilicon/hns/hns_enet.c 	struct hnae_ring *ring = ring_data->ring;
ring              878 drivers/net/ethernet/hisilicon/hns/hns_enet.c 	hns_update_rx_rate(ring);
ring              881 drivers/net/ethernet/hisilicon/hns/hns_enet.c 	ring_data->ring->q->handle->dev->ops->toggle_ring_irq(ring, 0);
ring              882 drivers/net/ethernet/hisilicon/hns/hns_enet.c 	num = readl_relaxed(ring->io_base + RCB_REG_FBDNUM);
ring              884 drivers/net/ethernet/hisilicon/hns/hns_enet.c 	if (num <= hns_coal_rx_bdnum(ring)) {
ring              885 drivers/net/ethernet/hisilicon/hns/hns_enet.c 		if (ring->q->handle->coal_adapt_en)
ring              890 drivers/net/ethernet/hisilicon/hns/hns_enet.c 		ring_data->ring->q->handle->dev->ops->toggle_ring_irq(
ring              891 drivers/net/ethernet/hisilicon/hns/hns_enet.c 			ring_data->ring, 1);
ring              901 drivers/net/ethernet/hisilicon/hns/hns_enet.c 	struct hnae_ring *ring = ring_data->ring;
ring              904 drivers/net/ethernet/hisilicon/hns/hns_enet.c 	hns_update_rx_rate(ring);
ring              905 drivers/net/ethernet/hisilicon/hns/hns_enet.c 	num = readl_relaxed(ring->io_base + RCB_REG_FBDNUM);
ring              907 drivers/net/ethernet/hisilicon/hns/hns_enet.c 	if (num <= hns_coal_rx_bdnum(ring)) {
ring              908 drivers/net/ethernet/hisilicon/hns/hns_enet.c 		if (ring->q->handle->coal_adapt_en)
ring              917 drivers/net/ethernet/hisilicon/hns/hns_enet.c static inline void hns_nic_reclaim_one_desc(struct hnae_ring *ring,
ring              920 drivers/net/ethernet/hisilicon/hns/hns_enet.c 	struct hnae_desc_cb *desc_cb = &ring->desc_cb[ring->next_to_clean];
ring              925 drivers/net/ethernet/hisilicon/hns/hns_enet.c 	hnae_free_buffer_detach(ring, ring->next_to_clean);
ring              927 drivers/net/ethernet/hisilicon/hns/hns_enet.c 	ring_ptr_move_fw(ring, next_to_clean);
ring              930 drivers/net/ethernet/hisilicon/hns/hns_enet.c static int is_valid_clean_head(struct hnae_ring *ring, int h)
ring              932 drivers/net/ethernet/hisilicon/hns/hns_enet.c 	int u = ring->next_to_use;
ring              933 drivers/net/ethernet/hisilicon/hns/hns_enet.c 	int c = ring->next_to_clean;
ring              935 drivers/net/ethernet/hisilicon/hns/hns_enet.c 	if (unlikely(h > ring->desc_num))
ring              938 drivers/net/ethernet/hisilicon/hns/hns_enet.c 	assert(u > 0 && u < ring->desc_num);
ring              939 drivers/net/ethernet/hisilicon/hns/hns_enet.c 	assert(c > 0 && c < ring->desc_num);
ring              951 drivers/net/ethernet/hisilicon/hns/hns_enet.c 	struct hnae_ring *ring = ring_data->ring;
ring              958 drivers/net/ethernet/hisilicon/hns/hns_enet.c 	head = readl_relaxed(ring->io_base + RCB_REG_HEAD);
ring              961 drivers/net/ethernet/hisilicon/hns/hns_enet.c 	if (is_ring_empty(ring) || head == ring->next_to_clean)
ring              964 drivers/net/ethernet/hisilicon/hns/hns_enet.c 	if (!is_valid_clean_head(ring, head)) {
ring              966 drivers/net/ethernet/hisilicon/hns/hns_enet.c 			   ring->next_to_use, ring->next_to_clean);
ring              967 drivers/net/ethernet/hisilicon/hns/hns_enet.c 		ring->stats.io_err_cnt++;
ring              973 drivers/net/ethernet/hisilicon/hns/hns_enet.c 	while (head != ring->next_to_clean) {
ring              974 drivers/net/ethernet/hisilicon/hns/hns_enet.c 		hns_nic_reclaim_one_desc(ring, &bytes, &pkts);
ring              976 drivers/net/ethernet/hisilicon/hns/hns_enet.c 		prefetch(&ring->desc_cb[ring->next_to_clean]);
ring              979 drivers/net/ethernet/hisilicon/hns/hns_enet.c 	ring->stats.tx_pkts += pkts;
ring              980 drivers/net/ethernet/hisilicon/hns/hns_enet.c 	ring->stats.tx_bytes += bytes;
ring              989 drivers/net/ethernet/hisilicon/hns/hns_enet.c 		     (ring_space(ring) >= ring->max_desc_num_per_pkt * 2))) {
ring              997 drivers/net/ethernet/hisilicon/hns/hns_enet.c 			ring->stats.restart_queue++;
ring             1005 drivers/net/ethernet/hisilicon/hns/hns_enet.c 	struct hnae_ring *ring = ring_data->ring;
ring             1008 drivers/net/ethernet/hisilicon/hns/hns_enet.c 	ring_data->ring->q->handle->dev->ops->toggle_ring_irq(ring, 0);
ring             1010 drivers/net/ethernet/hisilicon/hns/hns_enet.c 	head = readl_relaxed(ring->io_base + RCB_REG_HEAD);
ring             1012 drivers/net/ethernet/hisilicon/hns/hns_enet.c 	if (head != ring->next_to_clean) {
ring             1013 drivers/net/ethernet/hisilicon/hns/hns_enet.c 		ring_data->ring->q->handle->dev->ops->toggle_ring_irq(
ring             1014 drivers/net/ethernet/hisilicon/hns/hns_enet.c 			ring_data->ring, 1);
ring             1024 drivers/net/ethernet/hisilicon/hns/hns_enet.c 	struct hnae_ring *ring = ring_data->ring;
ring             1025 drivers/net/ethernet/hisilicon/hns/hns_enet.c 	int head = readl_relaxed(ring->io_base + RCB_REG_HEAD);
ring             1027 drivers/net/ethernet/hisilicon/hns/hns_enet.c 	if (head == ring->next_to_clean)
ring             1035 drivers/net/ethernet/hisilicon/hns/hns_enet.c 	struct hnae_ring *ring = ring_data->ring;
ring             1041 drivers/net/ethernet/hisilicon/hns/hns_enet.c 	head = ring->next_to_use; /* ntu :soft setted ring position*/
ring             1044 drivers/net/ethernet/hisilicon/hns/hns_enet.c 	while (head != ring->next_to_clean)
ring             1045 drivers/net/ethernet/hisilicon/hns/hns_enet.c 		hns_nic_reclaim_one_desc(ring, &bytes, &pkts);
ring             1056 drivers/net/ethernet/hisilicon/hns/hns_enet.c 	struct hnae_ring *ring = ring_data->ring;
ring             1065 drivers/net/ethernet/hisilicon/hns/hns_enet.c 			ring->q->handle->dev->ops->toggle_ring_irq(ring, 0);
ring             1078 drivers/net/ethernet/hisilicon/hns/hns_enet.c 	ring_data->ring->q->handle->dev->ops->toggle_ring_irq(
ring             1079 drivers/net/ethernet/hisilicon/hns/hns_enet.c 		ring_data->ring, 1);
ring             1175 drivers/net/ethernet/hisilicon/hns/hns_enet.c 	enable_irq(priv->ring_data[idx].ring->irq);
ring             1176 drivers/net/ethernet/hisilicon/hns/hns_enet.c 	h->dev->ops->toggle_ring_irq(priv->ring_data[idx].ring, 0);
ring             1227 drivers/net/ethernet/hisilicon/hns/hns_enet.c 	h->dev->ops->toggle_ring_irq(priv->ring_data[idx].ring, 1);
ring             1228 drivers/net/ethernet/hisilicon/hns/hns_enet.c 	disable_irq(priv->ring_data[idx].ring->irq);
ring             1234 drivers/net/ethernet/hisilicon/hns/hns_enet.c 				      struct hnae_ring *ring, cpumask_t *mask)
ring             1243 drivers/net/ethernet/hisilicon/hns/hns_enet.c 		if (is_tx_ring(ring))
ring             1248 drivers/net/ethernet/hisilicon/hns/hns_enet.c 		if (is_tx_ring(ring))
ring             1265 drivers/net/ethernet/hisilicon/hns/hns_enet.c 		if (priv->ring_data[i].ring->irq_init_flag == RCB_IRQ_INITED) {
ring             1266 drivers/net/ethernet/hisilicon/hns/hns_enet.c 			irq_set_affinity_hint(priv->ring_data[i].ring->irq,
ring             1268 drivers/net/ethernet/hisilicon/hns/hns_enet.c 			free_irq(priv->ring_data[i].ring->irq,
ring             1270 drivers/net/ethernet/hisilicon/hns/hns_enet.c 			priv->ring_data[i].ring->irq_init_flag =
ring             1287 drivers/net/ethernet/hisilicon/hns/hns_enet.c 		if (rd->ring->irq_init_flag == RCB_IRQ_INITED)
ring             1290 drivers/net/ethernet/hisilicon/hns/hns_enet.c 		snprintf(rd->ring->ring_name, RCB_RING_NAME_LEN,
ring             1292 drivers/net/ethernet/hisilicon/hns/hns_enet.c 			 (is_tx_ring(rd->ring) ? "tx" : "rx"), rd->queue_index);
ring             1294 drivers/net/ethernet/hisilicon/hns/hns_enet.c 		rd->ring->ring_name[RCB_RING_NAME_LEN - 1] = '\0';
ring             1296 drivers/net/ethernet/hisilicon/hns/hns_enet.c 		ret = request_irq(rd->ring->irq,
ring             1297 drivers/net/ethernet/hisilicon/hns/hns_enet.c 				  hns_irq_handle, 0, rd->ring->ring_name, rd);
ring             1300 drivers/net/ethernet/hisilicon/hns/hns_enet.c 				   rd->ring->irq);
ring             1303 drivers/net/ethernet/hisilicon/hns/hns_enet.c 		disable_irq(rd->ring->irq);
ring             1306 drivers/net/ethernet/hisilicon/hns/hns_enet.c 						 rd->ring, &rd->mask);
ring             1309 drivers/net/ethernet/hisilicon/hns/hns_enet.c 			irq_set_affinity_hint(rd->ring->irq,
ring             1312 drivers/net/ethernet/hisilicon/hns/hns_enet.c 		rd->ring->irq_init_flag = RCB_IRQ_INITED;
ring             1620 drivers/net/ethernet/hisilicon/hns/hns_enet.c 	struct hnae_ring *ring;
ring             1655 drivers/net/ethernet/hisilicon/hns/hns_enet.c 		ring = &h->qs[i]->rx_ring;
ring             1656 drivers/net/ethernet/hisilicon/hns/hns_enet.c 		head = readl_relaxed(ring->io_base + RCB_REG_HEAD);
ring             1657 drivers/net/ethernet/hisilicon/hns/hns_enet.c 		tail = readl_relaxed(ring->io_base + RCB_REG_TAIL);
ring             1659 drivers/net/ethernet/hisilicon/hns/hns_enet.c 		fetch_num = ring_dist(ring, head, tail);
ring             1662 drivers/net/ethernet/hisilicon/hns/hns_enet.c 			if (ring->desc_cb[head].page_offset != 0) {
ring             1668 drivers/net/ethernet/hisilicon/hns/hns_enet.c 			if (head == ring->desc_num)
ring             2122 drivers/net/ethernet/hisilicon/hns/hns_enet.c 		rd->ring = &h->qs[i]->tx_ring;
ring             2129 drivers/net/ethernet/hisilicon/hns/hns_enet.c 		rd->ring->irq_init_flag = RCB_IRQ_NOT_INITED;
ring             2134 drivers/net/ethernet/hisilicon/hns/hns_enet.c 		rd->ring = &h->qs[i - h->q_num]->rx_ring;
ring             2142 drivers/net/ethernet/hisilicon/hns/hns_enet.c 		rd->ring->irq_init_flag = RCB_IRQ_NOT_INITED;
ring             2155 drivers/net/ethernet/hisilicon/hns/hns_enet.c 		if (priv->ring_data[i].ring->irq_init_flag == RCB_IRQ_INITED) {
ring             2157 drivers/net/ethernet/hisilicon/hns/hns_enet.c 				priv->ring_data[i].ring->irq,
ring             2159 drivers/net/ethernet/hisilicon/hns/hns_enet.c 			free_irq(priv->ring_data[i].ring->irq,
ring             2163 drivers/net/ethernet/hisilicon/hns/hns_enet.c 		priv->ring_data[i].ring->irq_init_flag = RCB_IRQ_NOT_INITED;
ring               34 drivers/net/ethernet/hisilicon/hns/hns_enet.h 	struct hnae_ring *ring;
ring               45 drivers/net/ethernet/hisilicon/hns/hns_enet.h 	void (*fill_desc)(struct hnae_ring *ring, void *priv,
ring               49 drivers/net/ethernet/hisilicon/hns/hns_enet.h 			     int *bnum, struct hnae_ring *ring);
ring              376 drivers/net/ethernet/hisilicon/hns/hns_ethtool.c 	struct hnae_ring *ring;
ring              405 drivers/net/ethernet/hisilicon/hns/hns_ethtool.c 	ring = ring_data->ring;
ring              407 drivers/net/ethernet/hisilicon/hns/hns_ethtool.c 	if (is_tx_ring(ring)) { /* for tx queue reset*/
ring               74 drivers/net/ethernet/hisilicon/hns3/hnae3.h #define ring_ptr_move_fw(ring, p) \
ring               75 drivers/net/ethernet/hisilicon/hns3/hnae3.h 	((ring)->p = ((ring)->p + 1) % (ring)->desc_num)
ring               76 drivers/net/ethernet/hisilicon/hns3/hnae3.h #define ring_ptr_move_bw(ring, p) \
ring               77 drivers/net/ethernet/hisilicon/hns3/hnae3.h 	((ring)->p = ((ring)->p - 1 + (ring)->desc_num) % (ring)->desc_num)
ring               20 drivers/net/ethernet/hisilicon/hns3/hns3_debugfs.c 	struct hns3_enet_ring *ring;
ring               57 drivers/net/ethernet/hisilicon/hns3/hns3_debugfs.c 		ring = ring_data[(u32)(i + h->kinfo.num_tqps)].ring;
ring               58 drivers/net/ethernet/hisilicon/hns3/hns3_debugfs.c 		base_add_h = readl_relaxed(ring->tqp->io_base +
ring               60 drivers/net/ethernet/hisilicon/hns3/hns3_debugfs.c 		base_add_l = readl_relaxed(ring->tqp->io_base +
ring               65 drivers/net/ethernet/hisilicon/hns3/hns3_debugfs.c 		value = readl_relaxed(ring->tqp->io_base +
ring               69 drivers/net/ethernet/hisilicon/hns3/hns3_debugfs.c 		value = readl_relaxed(ring->tqp->io_base +
ring               73 drivers/net/ethernet/hisilicon/hns3/hns3_debugfs.c 		value = readl_relaxed(ring->tqp->io_base +
ring               77 drivers/net/ethernet/hisilicon/hns3/hns3_debugfs.c 		value = readl_relaxed(ring->tqp->io_base +
ring               81 drivers/net/ethernet/hisilicon/hns3/hns3_debugfs.c 		value = readl_relaxed(ring->tqp->io_base +
ring               85 drivers/net/ethernet/hisilicon/hns3/hns3_debugfs.c 		value = readl_relaxed(ring->tqp->io_base +
ring               89 drivers/net/ethernet/hisilicon/hns3/hns3_debugfs.c 		ring = ring_data[i].ring;
ring               90 drivers/net/ethernet/hisilicon/hns3/hns3_debugfs.c 		base_add_h = readl_relaxed(ring->tqp->io_base +
ring               92 drivers/net/ethernet/hisilicon/hns3/hns3_debugfs.c 		base_add_l = readl_relaxed(ring->tqp->io_base +
ring               97 drivers/net/ethernet/hisilicon/hns3/hns3_debugfs.c 		value = readl_relaxed(ring->tqp->io_base +
ring              101 drivers/net/ethernet/hisilicon/hns3/hns3_debugfs.c 		value = readl_relaxed(ring->tqp->io_base +
ring              105 drivers/net/ethernet/hisilicon/hns3/hns3_debugfs.c 		value = readl_relaxed(ring->tqp->io_base +
ring              109 drivers/net/ethernet/hisilicon/hns3/hns3_debugfs.c 		value = readl_relaxed(ring->tqp->io_base +
ring              113 drivers/net/ethernet/hisilicon/hns3/hns3_debugfs.c 		value = readl_relaxed(ring->tqp->io_base +
ring              117 drivers/net/ethernet/hisilicon/hns3/hns3_debugfs.c 		value = readl_relaxed(ring->tqp->io_base +
ring              121 drivers/net/ethernet/hisilicon/hns3/hns3_debugfs.c 		value = readl_relaxed(ring->tqp->io_base +
ring              147 drivers/net/ethernet/hisilicon/hns3/hns3_debugfs.c 		if (!ring_data || !ring_data->ring ||
ring              148 drivers/net/ethernet/hisilicon/hns3/hns3_debugfs.c 		    !ring_data->ring->tqp_vector)
ring              154 drivers/net/ethernet/hisilicon/hns3/hns3_debugfs.c 			 ring_data->ring->tqp_vector->vector_irq);
ring              166 drivers/net/ethernet/hisilicon/hns3/hns3_debugfs.c 	struct hns3_enet_ring *ring;
ring              187 drivers/net/ethernet/hisilicon/hns3/hns3_debugfs.c 	ring  = ring_data[q_num].ring;
ring              188 drivers/net/ethernet/hisilicon/hns3/hns3_debugfs.c 	value = readl_relaxed(ring->tqp->io_base + HNS3_RING_TX_RING_TAIL_REG);
ring              191 drivers/net/ethernet/hisilicon/hns3/hns3_debugfs.c 	if (tx_index >= ring->desc_num) {
ring              193 drivers/net/ethernet/hisilicon/hns3/hns3_debugfs.c 			ring->desc_num - 1);
ring              197 drivers/net/ethernet/hisilicon/hns3/hns3_debugfs.c 	tx_desc = &ring->desc[tx_index];
ring              217 drivers/net/ethernet/hisilicon/hns3/hns3_debugfs.c 	ring  = ring_data[q_num + h->kinfo.num_tqps].ring;
ring              218 drivers/net/ethernet/hisilicon/hns3/hns3_debugfs.c 	value = readl_relaxed(ring->tqp->io_base + HNS3_RING_RX_RING_TAIL_REG);
ring              220 drivers/net/ethernet/hisilicon/hns3/hns3_debugfs.c 	rx_desc	 = &ring->desc[rx_index];
ring              131 drivers/net/ethernet/hisilicon/hns3/hns3_enet.c 		if (tqp_vectors->tx_group.ring && tqp_vectors->rx_group.ring) {
ring              136 drivers/net/ethernet/hisilicon/hns3/hns3_enet.c 		} else if (tqp_vectors->rx_group.ring) {
ring              140 drivers/net/ethernet/hisilicon/hns3/hns3_enet.c 		} else if (tqp_vectors->tx_group.ring) {
ring              426 drivers/net/ethernet/hisilicon/hns3/hns3_enet.c 		struct hns3_enet_ring *ring = tqp_vector->tx_group.ring;
ring              428 drivers/net/ethernet/hisilicon/hns3/hns3_enet.c 		while (ring) {
ring              433 drivers/net/ethernet/hisilicon/hns3/hns3_enet.c 						  ring->tqp->tqp_index);
ring              438 drivers/net/ethernet/hisilicon/hns3/hns3_enet.c 			ring = ring->next;
ring             1031 drivers/net/ethernet/hisilicon/hns3/hns3_enet.c static int hns3_fill_skb_desc(struct hns3_enet_ring *ring,
ring             1042 drivers/net/ethernet/hisilicon/hns3/hns3_enet.c 	ret = hns3_handle_vtags(ring, skb);
ring             1044 drivers/net/ethernet/hisilicon/hns3/hns3_enet.c 		u64_stats_update_begin(&ring->syncp);
ring             1045 drivers/net/ethernet/hisilicon/hns3/hns3_enet.c 		ring->stats.tx_vlan_err++;
ring             1046 drivers/net/ethernet/hisilicon/hns3/hns3_enet.c 		u64_stats_update_end(&ring->syncp);
ring             1068 drivers/net/ethernet/hisilicon/hns3/hns3_enet.c 			u64_stats_update_begin(&ring->syncp);
ring             1069 drivers/net/ethernet/hisilicon/hns3/hns3_enet.c 			ring->stats.tx_l4_proto_err++;
ring             1070 drivers/net/ethernet/hisilicon/hns3/hns3_enet.c 			u64_stats_update_end(&ring->syncp);
ring             1078 drivers/net/ethernet/hisilicon/hns3/hns3_enet.c 			u64_stats_update_begin(&ring->syncp);
ring             1079 drivers/net/ethernet/hisilicon/hns3/hns3_enet.c 			ring->stats.tx_l2l3l4_err++;
ring             1080 drivers/net/ethernet/hisilicon/hns3/hns3_enet.c 			u64_stats_update_end(&ring->syncp);
ring             1087 drivers/net/ethernet/hisilicon/hns3/hns3_enet.c 			u64_stats_update_begin(&ring->syncp);
ring             1088 drivers/net/ethernet/hisilicon/hns3/hns3_enet.c 			ring->stats.tx_tso_err++;
ring             1089 drivers/net/ethernet/hisilicon/hns3/hns3_enet.c 			u64_stats_update_end(&ring->syncp);
ring             1106 drivers/net/ethernet/hisilicon/hns3/hns3_enet.c static int hns3_fill_desc(struct hns3_enet_ring *ring, void *priv,
ring             1110 drivers/net/ethernet/hisilicon/hns3/hns3_enet.c 	struct hns3_desc_cb *desc_cb = &ring->desc_cb[ring->next_to_use];
ring             1111 drivers/net/ethernet/hisilicon/hns3/hns3_enet.c 	struct hns3_desc *desc = &ring->desc[ring->next_to_use];
ring             1112 drivers/net/ethernet/hisilicon/hns3/hns3_enet.c 	struct device *dev = ring_to_dev(ring);
ring             1122 drivers/net/ethernet/hisilicon/hns3/hns3_enet.c 		ret = hns3_fill_skb_desc(ring, skb, desc);
ring             1133 drivers/net/ethernet/hisilicon/hns3/hns3_enet.c 		u64_stats_update_begin(&ring->syncp);
ring             1134 drivers/net/ethernet/hisilicon/hns3/hns3_enet.c 		ring->stats.sw_err_cnt++;
ring             1135 drivers/net/ethernet/hisilicon/hns3/hns3_enet.c 		u64_stats_update_end(&ring->syncp);
ring             1153 drivers/net/ethernet/hisilicon/hns3/hns3_enet.c 		ring_ptr_move_fw(ring, next_to_use);
ring             1182 drivers/net/ethernet/hisilicon/hns3/hns3_enet.c 		ring_ptr_move_fw(ring, next_to_use);
ring             1184 drivers/net/ethernet/hisilicon/hns3/hns3_enet.c 		desc_cb = &ring->desc_cb[ring->next_to_use];
ring             1185 drivers/net/ethernet/hisilicon/hns3/hns3_enet.c 		desc = &ring->desc[ring->next_to_use];
ring             1251 drivers/net/ethernet/hisilicon/hns3/hns3_enet.c static int hns3_nic_maybe_stop_tx(struct hns3_enet_ring *ring,
ring             1277 drivers/net/ethernet/hisilicon/hns3/hns3_enet.c 		u64_stats_update_begin(&ring->syncp);
ring             1278 drivers/net/ethernet/hisilicon/hns3/hns3_enet.c 		ring->stats.tx_copy++;
ring             1279 drivers/net/ethernet/hisilicon/hns3/hns3_enet.c 		u64_stats_update_end(&ring->syncp);
ring             1283 drivers/net/ethernet/hisilicon/hns3/hns3_enet.c 	if (unlikely(ring_space(ring) < bd_num))
ring             1289 drivers/net/ethernet/hisilicon/hns3/hns3_enet.c static void hns3_clear_desc(struct hns3_enet_ring *ring, int next_to_use_orig)
ring             1291 drivers/net/ethernet/hisilicon/hns3/hns3_enet.c 	struct device *dev = ring_to_dev(ring);
ring             1294 drivers/net/ethernet/hisilicon/hns3/hns3_enet.c 	for (i = 0; i < ring->desc_num; i++) {
ring             1296 drivers/net/ethernet/hisilicon/hns3/hns3_enet.c 		if (ring->next_to_use == next_to_use_orig)
ring             1300 drivers/net/ethernet/hisilicon/hns3/hns3_enet.c 		ring_ptr_move_bw(ring, next_to_use);
ring             1303 drivers/net/ethernet/hisilicon/hns3/hns3_enet.c 		if (ring->desc_cb[ring->next_to_use].type == DESC_TYPE_SKB)
ring             1305 drivers/net/ethernet/hisilicon/hns3/hns3_enet.c 					 ring->desc_cb[ring->next_to_use].dma,
ring             1306 drivers/net/ethernet/hisilicon/hns3/hns3_enet.c 					ring->desc_cb[ring->next_to_use].length,
ring             1308 drivers/net/ethernet/hisilicon/hns3/hns3_enet.c 		else if (ring->desc_cb[ring->next_to_use].length)
ring             1310 drivers/net/ethernet/hisilicon/hns3/hns3_enet.c 				       ring->desc_cb[ring->next_to_use].dma,
ring             1311 drivers/net/ethernet/hisilicon/hns3/hns3_enet.c 				       ring->desc_cb[ring->next_to_use].length,
ring             1314 drivers/net/ethernet/hisilicon/hns3/hns3_enet.c 		ring->desc_cb[ring->next_to_use].length = 0;
ring             1315 drivers/net/ethernet/hisilicon/hns3/hns3_enet.c 		ring->desc_cb[ring->next_to_use].dma = 0;
ring             1324 drivers/net/ethernet/hisilicon/hns3/hns3_enet.c 	struct hns3_enet_ring *ring = ring_data->ring;
ring             1341 drivers/net/ethernet/hisilicon/hns3/hns3_enet.c 	buf_num = hns3_nic_maybe_stop_tx(ring, &skb);
ring             1344 drivers/net/ethernet/hisilicon/hns3/hns3_enet.c 			u64_stats_update_begin(&ring->syncp);
ring             1345 drivers/net/ethernet/hisilicon/hns3/hns3_enet.c 			ring->stats.tx_busy++;
ring             1346 drivers/net/ethernet/hisilicon/hns3/hns3_enet.c 			u64_stats_update_end(&ring->syncp);
ring             1349 drivers/net/ethernet/hisilicon/hns3/hns3_enet.c 			u64_stats_update_begin(&ring->syncp);
ring             1350 drivers/net/ethernet/hisilicon/hns3/hns3_enet.c 			ring->stats.sw_err_cnt++;
ring             1351 drivers/net/ethernet/hisilicon/hns3/hns3_enet.c 			u64_stats_update_end(&ring->syncp);
ring             1363 drivers/net/ethernet/hisilicon/hns3/hns3_enet.c 	next_to_use_head = ring->next_to_use;
ring             1365 drivers/net/ethernet/hisilicon/hns3/hns3_enet.c 	ret = hns3_fill_desc(ring, skb, size, seg_num == 1 ? 1 : 0,
ring             1375 drivers/net/ethernet/hisilicon/hns3/hns3_enet.c 		ret = hns3_fill_desc(ring, frag, size,
ring             1389 drivers/net/ethernet/hisilicon/hns3/hns3_enet.c 	hnae3_queue_xmit(ring->tqp, buf_num);
ring             1394 drivers/net/ethernet/hisilicon/hns3/hns3_enet.c 	hns3_clear_desc(ring, next_to_use_head);
ring             1492 drivers/net/ethernet/hisilicon/hns3/hns3_enet.c 	struct hns3_enet_ring *ring;
ring             1514 drivers/net/ethernet/hisilicon/hns3/hns3_enet.c 		ring = priv->ring_data[idx].ring;
ring             1516 drivers/net/ethernet/hisilicon/hns3/hns3_enet.c 			start = u64_stats_fetch_begin_irq(&ring->syncp);
ring             1517 drivers/net/ethernet/hisilicon/hns3/hns3_enet.c 			tx_bytes += ring->stats.tx_bytes;
ring             1518 drivers/net/ethernet/hisilicon/hns3/hns3_enet.c 			tx_pkts += ring->stats.tx_pkts;
ring             1519 drivers/net/ethernet/hisilicon/hns3/hns3_enet.c 			tx_drop += ring->stats.sw_err_cnt;
ring             1520 drivers/net/ethernet/hisilicon/hns3/hns3_enet.c 			tx_drop += ring->stats.tx_vlan_err;
ring             1521 drivers/net/ethernet/hisilicon/hns3/hns3_enet.c 			tx_drop += ring->stats.tx_l4_proto_err;
ring             1522 drivers/net/ethernet/hisilicon/hns3/hns3_enet.c 			tx_drop += ring->stats.tx_l2l3l4_err;
ring             1523 drivers/net/ethernet/hisilicon/hns3/hns3_enet.c 			tx_drop += ring->stats.tx_tso_err;
ring             1524 drivers/net/ethernet/hisilicon/hns3/hns3_enet.c 			tx_errors += ring->stats.sw_err_cnt;
ring             1525 drivers/net/ethernet/hisilicon/hns3/hns3_enet.c 			tx_errors += ring->stats.tx_vlan_err;
ring             1526 drivers/net/ethernet/hisilicon/hns3/hns3_enet.c 			tx_errors += ring->stats.tx_l4_proto_err;
ring             1527 drivers/net/ethernet/hisilicon/hns3/hns3_enet.c 			tx_errors += ring->stats.tx_l2l3l4_err;
ring             1528 drivers/net/ethernet/hisilicon/hns3/hns3_enet.c 			tx_errors += ring->stats.tx_tso_err;
ring             1529 drivers/net/ethernet/hisilicon/hns3/hns3_enet.c 		} while (u64_stats_fetch_retry_irq(&ring->syncp, start));
ring             1532 drivers/net/ethernet/hisilicon/hns3/hns3_enet.c 		ring = priv->ring_data[idx + queue_num].ring;
ring             1534 drivers/net/ethernet/hisilicon/hns3/hns3_enet.c 			start = u64_stats_fetch_begin_irq(&ring->syncp);
ring             1535 drivers/net/ethernet/hisilicon/hns3/hns3_enet.c 			rx_bytes += ring->stats.rx_bytes;
ring             1536 drivers/net/ethernet/hisilicon/hns3/hns3_enet.c 			rx_pkts += ring->stats.rx_pkts;
ring             1537 drivers/net/ethernet/hisilicon/hns3/hns3_enet.c 			rx_drop += ring->stats.l2_err;
ring             1538 drivers/net/ethernet/hisilicon/hns3/hns3_enet.c 			rx_errors += ring->stats.l2_err;
ring             1539 drivers/net/ethernet/hisilicon/hns3/hns3_enet.c 			rx_errors += ring->stats.l3l4_csum_err;
ring             1540 drivers/net/ethernet/hisilicon/hns3/hns3_enet.c 			rx_crc_errors += ring->stats.l2_err;
ring             1541 drivers/net/ethernet/hisilicon/hns3/hns3_enet.c 			rx_multicast += ring->stats.rx_multicast;
ring             1542 drivers/net/ethernet/hisilicon/hns3/hns3_enet.c 			rx_length_errors += ring->stats.err_pkt_len;
ring             1543 drivers/net/ethernet/hisilicon/hns3/hns3_enet.c 		} while (u64_stats_fetch_retry_irq(&ring->syncp, start));
ring             1717 drivers/net/ethernet/hisilicon/hns3/hns3_enet.c 	tx_ring = priv->ring_data[timeout_queue].ring;
ring             2120 drivers/net/ethernet/hisilicon/hns3/hns3_enet.c static int hns3_alloc_buffer(struct hns3_enet_ring *ring,
ring             2123 drivers/net/ethernet/hisilicon/hns3/hns3_enet.c 	unsigned int order = hns3_page_order(ring);
ring             2134 drivers/net/ethernet/hisilicon/hns3/hns3_enet.c 	cb->length = hns3_page_size(ring);
ring             2140 drivers/net/ethernet/hisilicon/hns3/hns3_enet.c static void hns3_free_buffer(struct hns3_enet_ring *ring,
ring             2145 drivers/net/ethernet/hisilicon/hns3/hns3_enet.c 	else if (!HNAE3_IS_TX_RING(ring))
ring             2150 drivers/net/ethernet/hisilicon/hns3/hns3_enet.c static int hns3_map_buffer(struct hns3_enet_ring *ring, struct hns3_desc_cb *cb)
ring             2152 drivers/net/ethernet/hisilicon/hns3/hns3_enet.c 	cb->dma = dma_map_page(ring_to_dev(ring), cb->priv, 0,
ring             2153 drivers/net/ethernet/hisilicon/hns3/hns3_enet.c 			       cb->length, ring_to_dma_dir(ring));
ring             2155 drivers/net/ethernet/hisilicon/hns3/hns3_enet.c 	if (unlikely(dma_mapping_error(ring_to_dev(ring), cb->dma)))
ring             2161 drivers/net/ethernet/hisilicon/hns3/hns3_enet.c static void hns3_unmap_buffer(struct hns3_enet_ring *ring,
ring             2165 drivers/net/ethernet/hisilicon/hns3/hns3_enet.c 		dma_unmap_single(ring_to_dev(ring), cb->dma, cb->length,
ring             2166 drivers/net/ethernet/hisilicon/hns3/hns3_enet.c 				 ring_to_dma_dir(ring));
ring             2168 drivers/net/ethernet/hisilicon/hns3/hns3_enet.c 		dma_unmap_page(ring_to_dev(ring), cb->dma, cb->length,
ring             2169 drivers/net/ethernet/hisilicon/hns3/hns3_enet.c 			       ring_to_dma_dir(ring));
ring             2172 drivers/net/ethernet/hisilicon/hns3/hns3_enet.c static void hns3_buffer_detach(struct hns3_enet_ring *ring, int i)
ring             2174 drivers/net/ethernet/hisilicon/hns3/hns3_enet.c 	hns3_unmap_buffer(ring, &ring->desc_cb[i]);
ring             2175 drivers/net/ethernet/hisilicon/hns3/hns3_enet.c 	ring->desc[i].addr = 0;
ring             2178 drivers/net/ethernet/hisilicon/hns3/hns3_enet.c static void hns3_free_buffer_detach(struct hns3_enet_ring *ring, int i)
ring             2180 drivers/net/ethernet/hisilicon/hns3/hns3_enet.c 	struct hns3_desc_cb *cb = &ring->desc_cb[i];
ring             2182 drivers/net/ethernet/hisilicon/hns3/hns3_enet.c 	if (!ring->desc_cb[i].dma)
ring             2185 drivers/net/ethernet/hisilicon/hns3/hns3_enet.c 	hns3_buffer_detach(ring, i);
ring             2186 drivers/net/ethernet/hisilicon/hns3/hns3_enet.c 	hns3_free_buffer(ring, cb);
ring             2189 drivers/net/ethernet/hisilicon/hns3/hns3_enet.c static void hns3_free_buffers(struct hns3_enet_ring *ring)
ring             2193 drivers/net/ethernet/hisilicon/hns3/hns3_enet.c 	for (i = 0; i < ring->desc_num; i++)
ring             2194 drivers/net/ethernet/hisilicon/hns3/hns3_enet.c 		hns3_free_buffer_detach(ring, i);
ring             2198 drivers/net/ethernet/hisilicon/hns3/hns3_enet.c static void hns3_free_desc(struct hns3_enet_ring *ring)
ring             2200 drivers/net/ethernet/hisilicon/hns3/hns3_enet.c 	int size = ring->desc_num * sizeof(ring->desc[0]);
ring             2202 drivers/net/ethernet/hisilicon/hns3/hns3_enet.c 	hns3_free_buffers(ring);
ring             2204 drivers/net/ethernet/hisilicon/hns3/hns3_enet.c 	if (ring->desc) {
ring             2205 drivers/net/ethernet/hisilicon/hns3/hns3_enet.c 		dma_free_coherent(ring_to_dev(ring), size,
ring             2206 drivers/net/ethernet/hisilicon/hns3/hns3_enet.c 				  ring->desc, ring->desc_dma_addr);
ring             2207 drivers/net/ethernet/hisilicon/hns3/hns3_enet.c 		ring->desc = NULL;
ring             2211 drivers/net/ethernet/hisilicon/hns3/hns3_enet.c static int hns3_alloc_desc(struct hns3_enet_ring *ring)
ring             2213 drivers/net/ethernet/hisilicon/hns3/hns3_enet.c 	int size = ring->desc_num * sizeof(ring->desc[0]);
ring             2215 drivers/net/ethernet/hisilicon/hns3/hns3_enet.c 	ring->desc = dma_alloc_coherent(ring_to_dev(ring), size,
ring             2216 drivers/net/ethernet/hisilicon/hns3/hns3_enet.c 					&ring->desc_dma_addr, GFP_KERNEL);
ring             2217 drivers/net/ethernet/hisilicon/hns3/hns3_enet.c 	if (!ring->desc)
ring             2223 drivers/net/ethernet/hisilicon/hns3/hns3_enet.c static int hns3_reserve_buffer_map(struct hns3_enet_ring *ring,
ring             2228 drivers/net/ethernet/hisilicon/hns3/hns3_enet.c 	ret = hns3_alloc_buffer(ring, cb);
ring             2232 drivers/net/ethernet/hisilicon/hns3/hns3_enet.c 	ret = hns3_map_buffer(ring, cb);
ring             2239 drivers/net/ethernet/hisilicon/hns3/hns3_enet.c 	hns3_free_buffer(ring, cb);
ring             2244 drivers/net/ethernet/hisilicon/hns3/hns3_enet.c static int hns3_alloc_buffer_attach(struct hns3_enet_ring *ring, int i)
ring             2246 drivers/net/ethernet/hisilicon/hns3/hns3_enet.c 	int ret = hns3_reserve_buffer_map(ring, &ring->desc_cb[i]);
ring             2251 drivers/net/ethernet/hisilicon/hns3/hns3_enet.c 	ring->desc[i].addr = cpu_to_le64(ring->desc_cb[i].dma);
ring             2257 drivers/net/ethernet/hisilicon/hns3/hns3_enet.c static int hns3_alloc_ring_buffers(struct hns3_enet_ring *ring)
ring             2261 drivers/net/ethernet/hisilicon/hns3/hns3_enet.c 	for (i = 0; i < ring->desc_num; i++) {
ring             2262 drivers/net/ethernet/hisilicon/hns3/hns3_enet.c 		ret = hns3_alloc_buffer_attach(ring, i);
ring             2271 drivers/net/ethernet/hisilicon/hns3/hns3_enet.c 		hns3_free_buffer_detach(ring, j);
ring             2276 drivers/net/ethernet/hisilicon/hns3/hns3_enet.c static void hns3_replace_buffer(struct hns3_enet_ring *ring, int i,
ring             2279 drivers/net/ethernet/hisilicon/hns3/hns3_enet.c 	hns3_unmap_buffer(ring, &ring->desc_cb[i]);
ring             2280 drivers/net/ethernet/hisilicon/hns3/hns3_enet.c 	ring->desc_cb[i] = *res_cb;
ring             2281 drivers/net/ethernet/hisilicon/hns3/hns3_enet.c 	ring->desc[i].addr = cpu_to_le64(ring->desc_cb[i].dma);
ring             2282 drivers/net/ethernet/hisilicon/hns3/hns3_enet.c 	ring->desc[i].rx.bd_base_info = 0;
ring             2285 drivers/net/ethernet/hisilicon/hns3/hns3_enet.c static void hns3_reuse_buffer(struct hns3_enet_ring *ring, int i)
ring             2287 drivers/net/ethernet/hisilicon/hns3/hns3_enet.c 	ring->desc_cb[i].reuse_flag = 0;
ring             2288 drivers/net/ethernet/hisilicon/hns3/hns3_enet.c 	ring->desc[i].addr = cpu_to_le64(ring->desc_cb[i].dma +
ring             2289 drivers/net/ethernet/hisilicon/hns3/hns3_enet.c 					 ring->desc_cb[i].page_offset);
ring             2290 drivers/net/ethernet/hisilicon/hns3/hns3_enet.c 	ring->desc[i].rx.bd_base_info = 0;
ring             2293 drivers/net/ethernet/hisilicon/hns3/hns3_enet.c static void hns3_nic_reclaim_desc(struct hns3_enet_ring *ring, int head,
ring             2296 drivers/net/ethernet/hisilicon/hns3/hns3_enet.c 	int ntc = ring->next_to_clean;
ring             2300 drivers/net/ethernet/hisilicon/hns3/hns3_enet.c 		desc_cb = &ring->desc_cb[ntc];
ring             2304 drivers/net/ethernet/hisilicon/hns3/hns3_enet.c 		hns3_free_buffer_detach(ring, ntc);
ring             2306 drivers/net/ethernet/hisilicon/hns3/hns3_enet.c 		if (++ntc == ring->desc_num)
ring             2310 drivers/net/ethernet/hisilicon/hns3/hns3_enet.c 		prefetch(&ring->desc_cb[ntc]);
ring             2316 drivers/net/ethernet/hisilicon/hns3/hns3_enet.c 	smp_store_release(&ring->next_to_clean, ntc);
ring             2319 drivers/net/ethernet/hisilicon/hns3/hns3_enet.c static int is_valid_clean_head(struct hns3_enet_ring *ring, int h)
ring             2321 drivers/net/ethernet/hisilicon/hns3/hns3_enet.c 	int u = ring->next_to_use;
ring             2322 drivers/net/ethernet/hisilicon/hns3/hns3_enet.c 	int c = ring->next_to_clean;
ring             2324 drivers/net/ethernet/hisilicon/hns3/hns3_enet.c 	if (unlikely(h > ring->desc_num))
ring             2330 drivers/net/ethernet/hisilicon/hns3/hns3_enet.c void hns3_clean_tx_ring(struct hns3_enet_ring *ring)
ring             2332 drivers/net/ethernet/hisilicon/hns3/hns3_enet.c 	struct net_device *netdev = ring->tqp->handle->kinfo.netdev;
ring             2338 drivers/net/ethernet/hisilicon/hns3/hns3_enet.c 	head = readl_relaxed(ring->tqp->io_base + HNS3_RING_TX_RING_HEAD_REG);
ring             2341 drivers/net/ethernet/hisilicon/hns3/hns3_enet.c 	if (is_ring_empty(ring) || head == ring->next_to_clean)
ring             2344 drivers/net/ethernet/hisilicon/hns3/hns3_enet.c 	if (unlikely(!is_valid_clean_head(ring, head))) {
ring             2346 drivers/net/ethernet/hisilicon/hns3/hns3_enet.c 			   ring->next_to_use, ring->next_to_clean);
ring             2348 drivers/net/ethernet/hisilicon/hns3/hns3_enet.c 		u64_stats_update_begin(&ring->syncp);
ring             2349 drivers/net/ethernet/hisilicon/hns3/hns3_enet.c 		ring->stats.io_err_cnt++;
ring             2350 drivers/net/ethernet/hisilicon/hns3/hns3_enet.c 		u64_stats_update_end(&ring->syncp);
ring             2356 drivers/net/ethernet/hisilicon/hns3/hns3_enet.c 	hns3_nic_reclaim_desc(ring, head, &bytes, &pkts);
ring             2358 drivers/net/ethernet/hisilicon/hns3/hns3_enet.c 	ring->tqp_vector->tx_group.total_bytes += bytes;
ring             2359 drivers/net/ethernet/hisilicon/hns3/hns3_enet.c 	ring->tqp_vector->tx_group.total_packets += pkts;
ring             2361 drivers/net/ethernet/hisilicon/hns3/hns3_enet.c 	u64_stats_update_begin(&ring->syncp);
ring             2362 drivers/net/ethernet/hisilicon/hns3/hns3_enet.c 	ring->stats.tx_bytes += bytes;
ring             2363 drivers/net/ethernet/hisilicon/hns3/hns3_enet.c 	ring->stats.tx_pkts += pkts;
ring             2364 drivers/net/ethernet/hisilicon/hns3/hns3_enet.c 	u64_stats_update_end(&ring->syncp);
ring             2366 drivers/net/ethernet/hisilicon/hns3/hns3_enet.c 	dev_queue = netdev_get_tx_queue(netdev, ring->tqp->tqp_index);
ring             2370 drivers/net/ethernet/hisilicon/hns3/hns3_enet.c 		     (ring_space(ring) > HNS3_MAX_BD_PER_PKT))) {
ring             2378 drivers/net/ethernet/hisilicon/hns3/hns3_enet.c 			ring->stats.restart_queue++;
ring             2383 drivers/net/ethernet/hisilicon/hns3/hns3_enet.c static int hns3_desc_unused(struct hns3_enet_ring *ring)
ring             2385 drivers/net/ethernet/hisilicon/hns3/hns3_enet.c 	int ntc = ring->next_to_clean;
ring             2386 drivers/net/ethernet/hisilicon/hns3/hns3_enet.c 	int ntu = ring->next_to_use;
ring             2388 drivers/net/ethernet/hisilicon/hns3/hns3_enet.c 	return ((ntc >= ntu) ? 0 : ring->desc_num) + ntc - ntu;
ring             2391 drivers/net/ethernet/hisilicon/hns3/hns3_enet.c static void hns3_nic_alloc_rx_buffers(struct hns3_enet_ring *ring,
ring             2399 drivers/net/ethernet/hisilicon/hns3/hns3_enet.c 		desc_cb = &ring->desc_cb[ring->next_to_use];
ring             2401 drivers/net/ethernet/hisilicon/hns3/hns3_enet.c 			u64_stats_update_begin(&ring->syncp);
ring             2402 drivers/net/ethernet/hisilicon/hns3/hns3_enet.c 			ring->stats.reuse_pg_cnt++;
ring             2403 drivers/net/ethernet/hisilicon/hns3/hns3_enet.c 			u64_stats_update_end(&ring->syncp);
ring             2405 drivers/net/ethernet/hisilicon/hns3/hns3_enet.c 			hns3_reuse_buffer(ring, ring->next_to_use);
ring             2407 drivers/net/ethernet/hisilicon/hns3/hns3_enet.c 			ret = hns3_reserve_buffer_map(ring, &res_cbs);
ring             2409 drivers/net/ethernet/hisilicon/hns3/hns3_enet.c 				u64_stats_update_begin(&ring->syncp);
ring             2410 drivers/net/ethernet/hisilicon/hns3/hns3_enet.c 				ring->stats.sw_err_cnt++;
ring             2411 drivers/net/ethernet/hisilicon/hns3/hns3_enet.c 				u64_stats_update_end(&ring->syncp);
ring             2413 drivers/net/ethernet/hisilicon/hns3/hns3_enet.c 				hns3_rl_err(ring->tqp_vector->napi.dev,
ring             2418 drivers/net/ethernet/hisilicon/hns3/hns3_enet.c 			hns3_replace_buffer(ring, ring->next_to_use, &res_cbs);
ring             2420 drivers/net/ethernet/hisilicon/hns3/hns3_enet.c 			u64_stats_update_begin(&ring->syncp);
ring             2421 drivers/net/ethernet/hisilicon/hns3/hns3_enet.c 			ring->stats.non_reuse_pg++;
ring             2422 drivers/net/ethernet/hisilicon/hns3/hns3_enet.c 			u64_stats_update_end(&ring->syncp);
ring             2425 drivers/net/ethernet/hisilicon/hns3/hns3_enet.c 		ring_ptr_move_fw(ring, next_to_use);
ring             2429 drivers/net/ethernet/hisilicon/hns3/hns3_enet.c 	writel_relaxed(i, ring->tqp->io_base + HNS3_RING_RX_RING_HEAD_REG);
ring             2433 drivers/net/ethernet/hisilicon/hns3/hns3_enet.c 				struct hns3_enet_ring *ring, int pull_len,
ring             2436 drivers/net/ethernet/hisilicon/hns3/hns3_enet.c 	struct hns3_desc *desc = &ring->desc[ring->next_to_clean];
ring             2438 drivers/net/ethernet/hisilicon/hns3/hns3_enet.c 	u32 truesize = hns3_buf_size(ring);
ring             2453 drivers/net/ethernet/hisilicon/hns3/hns3_enet.c 	if (desc_cb->page_offset + truesize <= hns3_page_size(ring)) {
ring             2519 drivers/net/ethernet/hisilicon/hns3/hns3_enet.c static void hns3_rx_checksum(struct hns3_enet_ring *ring, struct sk_buff *skb,
ring             2522 drivers/net/ethernet/hisilicon/hns3/hns3_enet.c 	struct net_device *netdev = ring->tqp->handle->kinfo.netdev;
ring             2540 drivers/net/ethernet/hisilicon/hns3/hns3_enet.c 		u64_stats_update_begin(&ring->syncp);
ring             2541 drivers/net/ethernet/hisilicon/hns3/hns3_enet.c 		ring->stats.l3l4_csum_err++;
ring             2542 drivers/net/ethernet/hisilicon/hns3/hns3_enet.c 		u64_stats_update_end(&ring->syncp);
ring             2573 drivers/net/ethernet/hisilicon/hns3/hns3_enet.c static void hns3_rx_skb(struct hns3_enet_ring *ring, struct sk_buff *skb)
ring             2576 drivers/net/ethernet/hisilicon/hns3/hns3_enet.c 		napi_gro_flush(&ring->tqp_vector->napi, false);
ring             2578 drivers/net/ethernet/hisilicon/hns3/hns3_enet.c 	napi_gro_receive(&ring->tqp_vector->napi, skb);
ring             2581 drivers/net/ethernet/hisilicon/hns3/hns3_enet.c static bool hns3_parse_vlan_tag(struct hns3_enet_ring *ring,
ring             2585 drivers/net/ethernet/hisilicon/hns3/hns3_enet.c 	struct hnae3_handle *handle = ring->tqp->handle;
ring             2586 drivers/net/ethernet/hisilicon/hns3/hns3_enet.c 	struct pci_dev *pdev = ring->tqp->handle->pdev;
ring             2633 drivers/net/ethernet/hisilicon/hns3/hns3_enet.c static int hns3_alloc_skb(struct hns3_enet_ring *ring, unsigned int length,
ring             2637 drivers/net/ethernet/hisilicon/hns3/hns3_enet.c 	struct hns3_desc_cb *desc_cb = &ring->desc_cb[ring->next_to_clean];
ring             2638 drivers/net/ethernet/hisilicon/hns3/hns3_enet.c 	struct net_device *netdev = ring->tqp->handle->kinfo.netdev;
ring             2641 drivers/net/ethernet/hisilicon/hns3/hns3_enet.c 	ring->skb = napi_alloc_skb(&ring->tqp_vector->napi, HNS3_RX_HEAD_SIZE);
ring             2642 drivers/net/ethernet/hisilicon/hns3/hns3_enet.c 	skb = ring->skb;
ring             2646 drivers/net/ethernet/hisilicon/hns3/hns3_enet.c 		u64_stats_update_begin(&ring->syncp);
ring             2647 drivers/net/ethernet/hisilicon/hns3/hns3_enet.c 		ring->stats.sw_err_cnt++;
ring             2648 drivers/net/ethernet/hisilicon/hns3/hns3_enet.c 		u64_stats_update_end(&ring->syncp);
ring             2655 drivers/net/ethernet/hisilicon/hns3/hns3_enet.c 	ring->pending_buf = 1;
ring             2656 drivers/net/ethernet/hisilicon/hns3/hns3_enet.c 	ring->frag_num = 0;
ring             2657 drivers/net/ethernet/hisilicon/hns3/hns3_enet.c 	ring->tail_skb = NULL;
ring             2667 drivers/net/ethernet/hisilicon/hns3/hns3_enet.c 		ring_ptr_move_fw(ring, next_to_clean);
ring             2670 drivers/net/ethernet/hisilicon/hns3/hns3_enet.c 	u64_stats_update_begin(&ring->syncp);
ring             2671 drivers/net/ethernet/hisilicon/hns3/hns3_enet.c 	ring->stats.seg_pkt_cnt++;
ring             2672 drivers/net/ethernet/hisilicon/hns3/hns3_enet.c 	u64_stats_update_end(&ring->syncp);
ring             2674 drivers/net/ethernet/hisilicon/hns3/hns3_enet.c 	ring->pull_len = eth_get_headlen(netdev, va, HNS3_RX_HEAD_SIZE);
ring             2675 drivers/net/ethernet/hisilicon/hns3/hns3_enet.c 	__skb_put(skb, ring->pull_len);
ring             2676 drivers/net/ethernet/hisilicon/hns3/hns3_enet.c 	hns3_nic_reuse_page(skb, ring->frag_num++, ring, ring->pull_len,
ring             2678 drivers/net/ethernet/hisilicon/hns3/hns3_enet.c 	ring_ptr_move_fw(ring, next_to_clean);
ring             2683 drivers/net/ethernet/hisilicon/hns3/hns3_enet.c static int hns3_add_frag(struct hns3_enet_ring *ring, struct hns3_desc *desc,
ring             2698 drivers/net/ethernet/hisilicon/hns3/hns3_enet.c 		pre_bd = (ring->next_to_clean - 1 + ring->desc_num) %
ring             2699 drivers/net/ethernet/hisilicon/hns3/hns3_enet.c 			 ring->desc_num;
ring             2700 drivers/net/ethernet/hisilicon/hns3/hns3_enet.c 		pre_desc = &ring->desc[pre_bd];
ring             2707 drivers/net/ethernet/hisilicon/hns3/hns3_enet.c 		desc = &ring->desc[ring->next_to_clean];
ring             2708 drivers/net/ethernet/hisilicon/hns3/hns3_enet.c 		desc_cb = &ring->desc_cb[ring->next_to_clean];
ring             2715 drivers/net/ethernet/hisilicon/hns3/hns3_enet.c 		if (unlikely(ring->frag_num >= MAX_SKB_FRAGS)) {
ring             2716 drivers/net/ethernet/hisilicon/hns3/hns3_enet.c 			new_skb = napi_alloc_skb(&ring->tqp_vector->napi,
ring             2719 drivers/net/ethernet/hisilicon/hns3/hns3_enet.c 				hns3_rl_err(ring->tqp_vector->napi.dev,
ring             2723 drivers/net/ethernet/hisilicon/hns3/hns3_enet.c 			ring->frag_num = 0;
ring             2725 drivers/net/ethernet/hisilicon/hns3/hns3_enet.c 			if (ring->tail_skb) {
ring             2726 drivers/net/ethernet/hisilicon/hns3/hns3_enet.c 				ring->tail_skb->next = new_skb;
ring             2727 drivers/net/ethernet/hisilicon/hns3/hns3_enet.c 				ring->tail_skb = new_skb;
ring             2730 drivers/net/ethernet/hisilicon/hns3/hns3_enet.c 				ring->tail_skb = new_skb;
ring             2734 drivers/net/ethernet/hisilicon/hns3/hns3_enet.c 		if (ring->tail_skb) {
ring             2735 drivers/net/ethernet/hisilicon/hns3/hns3_enet.c 			head_skb->truesize += hns3_buf_size(ring);
ring             2738 drivers/net/ethernet/hisilicon/hns3/hns3_enet.c 			skb = ring->tail_skb;
ring             2741 drivers/net/ethernet/hisilicon/hns3/hns3_enet.c 		hns3_nic_reuse_page(skb, ring->frag_num++, ring, 0, desc_cb);
ring             2742 drivers/net/ethernet/hisilicon/hns3/hns3_enet.c 		ring_ptr_move_fw(ring, next_to_clean);
ring             2743 drivers/net/ethernet/hisilicon/hns3/hns3_enet.c 		ring->pending_buf++;
ring             2749 drivers/net/ethernet/hisilicon/hns3/hns3_enet.c static int hns3_set_gro_and_checksum(struct hns3_enet_ring *ring,
ring             2760 drivers/net/ethernet/hisilicon/hns3/hns3_enet.c 		hns3_rx_checksum(ring, skb, l234info, bd_base_info, ol_info);
ring             2779 drivers/net/ethernet/hisilicon/hns3/hns3_enet.c static void hns3_set_rx_skb_rss_type(struct hns3_enet_ring *ring,
ring             2782 drivers/net/ethernet/hisilicon/hns3/hns3_enet.c 	struct hnae3_handle *handle = ring->tqp->handle;
ring             2793 drivers/net/ethernet/hisilicon/hns3/hns3_enet.c static int hns3_handle_bdinfo(struct hns3_enet_ring *ring, struct sk_buff *skb)
ring             2795 drivers/net/ethernet/hisilicon/hns3/hns3_enet.c 	struct net_device *netdev = ring->tqp->handle->kinfo.netdev;
ring             2806 drivers/net/ethernet/hisilicon/hns3/hns3_enet.c 	pre_ntc = ring->next_to_clean ? (ring->next_to_clean - 1) :
ring             2807 drivers/net/ethernet/hisilicon/hns3/hns3_enet.c 					(ring->desc_num - 1);
ring             2808 drivers/net/ethernet/hisilicon/hns3/hns3_enet.c 	desc = &ring->desc[pre_ntc];
ring             2820 drivers/net/ethernet/hisilicon/hns3/hns3_enet.c 		if (hns3_parse_vlan_tag(ring, desc, l234info, &vlan_tag))
ring             2827 drivers/net/ethernet/hisilicon/hns3/hns3_enet.c 		u64_stats_update_begin(&ring->syncp);
ring             2829 drivers/net/ethernet/hisilicon/hns3/hns3_enet.c 			ring->stats.l2_err++;
ring             2831 drivers/net/ethernet/hisilicon/hns3/hns3_enet.c 			ring->stats.err_pkt_len++;
ring             2832 drivers/net/ethernet/hisilicon/hns3/hns3_enet.c 		u64_stats_update_end(&ring->syncp);
ring             2843 drivers/net/ethernet/hisilicon/hns3/hns3_enet.c 	ret = hns3_set_gro_and_checksum(ring, skb, l234info,
ring             2846 drivers/net/ethernet/hisilicon/hns3/hns3_enet.c 		u64_stats_update_begin(&ring->syncp);
ring             2847 drivers/net/ethernet/hisilicon/hns3/hns3_enet.c 		ring->stats.rx_err_cnt++;
ring             2848 drivers/net/ethernet/hisilicon/hns3/hns3_enet.c 		u64_stats_update_end(&ring->syncp);
ring             2855 drivers/net/ethernet/hisilicon/hns3/hns3_enet.c 	u64_stats_update_begin(&ring->syncp);
ring             2856 drivers/net/ethernet/hisilicon/hns3/hns3_enet.c 	ring->stats.rx_pkts++;
ring             2857 drivers/net/ethernet/hisilicon/hns3/hns3_enet.c 	ring->stats.rx_bytes += len;
ring             2860 drivers/net/ethernet/hisilicon/hns3/hns3_enet.c 		ring->stats.rx_multicast++;
ring             2862 drivers/net/ethernet/hisilicon/hns3/hns3_enet.c 	u64_stats_update_end(&ring->syncp);
ring             2864 drivers/net/ethernet/hisilicon/hns3/hns3_enet.c 	ring->tqp_vector->rx_group.total_bytes += len;
ring             2866 drivers/net/ethernet/hisilicon/hns3/hns3_enet.c 	hns3_set_rx_skb_rss_type(ring, skb, le32_to_cpu(desc->rx.rss_hash));
ring             2870 drivers/net/ethernet/hisilicon/hns3/hns3_enet.c static int hns3_handle_rx_bd(struct hns3_enet_ring *ring,
ring             2873 drivers/net/ethernet/hisilicon/hns3/hns3_enet.c 	struct sk_buff *skb = ring->skb;
ring             2880 drivers/net/ethernet/hisilicon/hns3/hns3_enet.c 	desc = &ring->desc[ring->next_to_clean];
ring             2881 drivers/net/ethernet/hisilicon/hns3/hns3_enet.c 	desc_cb = &ring->desc_cb[ring->next_to_clean];
ring             2893 drivers/net/ethernet/hisilicon/hns3/hns3_enet.c 		ring->va = (unsigned char *)desc_cb->buf + desc_cb->page_offset;
ring             2902 drivers/net/ethernet/hisilicon/hns3/hns3_enet.c 	prefetch(ring->va);
ring             2904 drivers/net/ethernet/hisilicon/hns3/hns3_enet.c 	prefetch(ring->va + L1_CACHE_BYTES);
ring             2908 drivers/net/ethernet/hisilicon/hns3/hns3_enet.c 		ret = hns3_alloc_skb(ring, length, ring->va);
ring             2909 drivers/net/ethernet/hisilicon/hns3/hns3_enet.c 		*out_skb = skb = ring->skb;
ring             2914 drivers/net/ethernet/hisilicon/hns3/hns3_enet.c 			ret = hns3_add_frag(ring, desc, &skb, false);
ring             2921 drivers/net/ethernet/hisilicon/hns3/hns3_enet.c 			memcpy(skb->data, ring->va,
ring             2922 drivers/net/ethernet/hisilicon/hns3/hns3_enet.c 			       ALIGN(ring->pull_len, sizeof(long)));
ring             2925 drivers/net/ethernet/hisilicon/hns3/hns3_enet.c 		ret = hns3_add_frag(ring, desc, &skb, true);
ring             2932 drivers/net/ethernet/hisilicon/hns3/hns3_enet.c 		memcpy(skb->data, ring->va,
ring             2933 drivers/net/ethernet/hisilicon/hns3/hns3_enet.c 		       ALIGN(ring->pull_len, sizeof(long)));
ring             2936 drivers/net/ethernet/hisilicon/hns3/hns3_enet.c 	ret = hns3_handle_bdinfo(ring, skb);
ring             2942 drivers/net/ethernet/hisilicon/hns3/hns3_enet.c 	skb_record_rx_queue(skb, ring->tqp->tqp_index);
ring             2948 drivers/net/ethernet/hisilicon/hns3/hns3_enet.c int hns3_clean_rx_ring(struct hns3_enet_ring *ring, int budget,
ring             2952 drivers/net/ethernet/hisilicon/hns3/hns3_enet.c 	int unused_count = hns3_desc_unused(ring);
ring             2953 drivers/net/ethernet/hisilicon/hns3/hns3_enet.c 	struct sk_buff *skb = ring->skb;
ring             2958 drivers/net/ethernet/hisilicon/hns3/hns3_enet.c 	num = readl_relaxed(ring->tqp->io_base + HNS3_RING_RX_RING_FBDNUM_REG);
ring             2962 drivers/net/ethernet/hisilicon/hns3/hns3_enet.c 	unused_count -= ring->pending_buf;
ring             2967 drivers/net/ethernet/hisilicon/hns3/hns3_enet.c 			hns3_nic_alloc_rx_buffers(ring, unused_count);
ring             2968 drivers/net/ethernet/hisilicon/hns3/hns3_enet.c 			unused_count = hns3_desc_unused(ring) -
ring             2969 drivers/net/ethernet/hisilicon/hns3/hns3_enet.c 					ring->pending_buf;
ring             2973 drivers/net/ethernet/hisilicon/hns3/hns3_enet.c 		err = hns3_handle_rx_bd(ring, &skb);
ring             2980 drivers/net/ethernet/hisilicon/hns3/hns3_enet.c 			recv_bds += ring->pending_buf;
ring             2981 drivers/net/ethernet/hisilicon/hns3/hns3_enet.c 			unused_count += ring->pending_buf;
ring             2982 drivers/net/ethernet/hisilicon/hns3/hns3_enet.c 			ring->skb = NULL;
ring             2983 drivers/net/ethernet/hisilicon/hns3/hns3_enet.c 			ring->pending_buf = 0;
ring             2987 drivers/net/ethernet/hisilicon/hns3/hns3_enet.c 		rx_fn(ring, skb);
ring             2988 drivers/net/ethernet/hisilicon/hns3/hns3_enet.c 		recv_bds += ring->pending_buf;
ring             2989 drivers/net/ethernet/hisilicon/hns3/hns3_enet.c 		unused_count += ring->pending_buf;
ring             2990 drivers/net/ethernet/hisilicon/hns3/hns3_enet.c 		ring->skb = NULL;
ring             2991 drivers/net/ethernet/hisilicon/hns3/hns3_enet.c 		ring->pending_buf = 0;
ring             2999 drivers/net/ethernet/hisilicon/hns3/hns3_enet.c 		hns3_nic_alloc_rx_buffers(ring, unused_count);
ring             3015 drivers/net/ethernet/hisilicon/hns3/hns3_enet.c 	tqp_vector = ring_group->ring->tqp_vector;
ring             3070 drivers/net/ethernet/hisilicon/hns3/hns3_enet.c 	if (!ring_group->ring)
ring             3073 drivers/net/ethernet/hisilicon/hns3/hns3_enet.c 	tqp_vector = ring_group->ring->tqp_vector;
ring             3142 drivers/net/ethernet/hisilicon/hns3/hns3_enet.c 	struct hns3_enet_ring *ring;
ring             3158 drivers/net/ethernet/hisilicon/hns3/hns3_enet.c 	hns3_for_each_ring(ring, tqp_vector->tx_group)
ring             3159 drivers/net/ethernet/hisilicon/hns3/hns3_enet.c 		hns3_clean_tx_ring(ring);
ring             3165 drivers/net/ethernet/hisilicon/hns3/hns3_enet.c 	hns3_for_each_ring(ring, tqp_vector->rx_group) {
ring             3166 drivers/net/ethernet/hisilicon/hns3/hns3_enet.c 		int rx_cleaned = hns3_clean_rx_ring(ring, rx_budget,
ring             3198 drivers/net/ethernet/hisilicon/hns3/hns3_enet.c 	tx_ring = tqp_vector->tx_group.ring;
ring             3229 drivers/net/ethernet/hisilicon/hns3/hns3_enet.c 	rx_ring = tqp_vector->rx_group.ring;
ring             3288 drivers/net/ethernet/hisilicon/hns3/hns3_enet.c 				   struct hns3_enet_ring *ring)
ring             3290 drivers/net/ethernet/hisilicon/hns3/hns3_enet.c 	ring->next = group->ring;
ring             3291 drivers/net/ethernet/hisilicon/hns3/hns3_enet.c 	group->ring = ring;
ring             3336 drivers/net/ethernet/hisilicon/hns3/hns3_enet.c 				       priv->ring_data[i].ring);
ring             3339 drivers/net/ethernet/hisilicon/hns3/hns3_enet.c 				       priv->ring_data[i + tqp_num].ring);
ring             3341 drivers/net/ethernet/hisilicon/hns3/hns3_enet.c 		priv->ring_data[i].ring->tqp_vector = tqp_vector;
ring             3342 drivers/net/ethernet/hisilicon/hns3/hns3_enet.c 		priv->ring_data[i + tqp_num].ring->tqp_vector = tqp_vector;
ring             3431 drivers/net/ethernet/hisilicon/hns3/hns3_enet.c 	group->ring = NULL;
ring             3445 drivers/net/ethernet/hisilicon/hns3/hns3_enet.c 		if (!tqp_vector->rx_group.ring && !tqp_vector->tx_group.ring)
ring             3492 drivers/net/ethernet/hisilicon/hns3/hns3_enet.c 	struct hns3_enet_ring *ring;
ring             3495 drivers/net/ethernet/hisilicon/hns3/hns3_enet.c 	ring = devm_kzalloc(&pdev->dev, sizeof(*ring), GFP_KERNEL);
ring             3496 drivers/net/ethernet/hisilicon/hns3/hns3_enet.c 	if (!ring)
ring             3501 drivers/net/ethernet/hisilicon/hns3/hns3_enet.c 		ring_data[q->tqp_index].ring = ring;
ring             3503 drivers/net/ethernet/hisilicon/hns3/hns3_enet.c 		ring->io_base = (u8 __iomem *)q->io_base + HNS3_TX_REG_OFFSET;
ring             3506 drivers/net/ethernet/hisilicon/hns3/hns3_enet.c 		ring_data[q->tqp_index + queue_num].ring = ring;
ring             3508 drivers/net/ethernet/hisilicon/hns3/hns3_enet.c 		ring->io_base = q->io_base;
ring             3511 drivers/net/ethernet/hisilicon/hns3/hns3_enet.c 	hnae3_set_bit(ring->flag, HNAE3_RING_TYPE_B, ring_type);
ring             3513 drivers/net/ethernet/hisilicon/hns3/hns3_enet.c 	ring->tqp = q;
ring             3514 drivers/net/ethernet/hisilicon/hns3/hns3_enet.c 	ring->desc = NULL;
ring             3515 drivers/net/ethernet/hisilicon/hns3/hns3_enet.c 	ring->desc_cb = NULL;
ring             3516 drivers/net/ethernet/hisilicon/hns3/hns3_enet.c 	ring->dev = priv->dev;
ring             3517 drivers/net/ethernet/hisilicon/hns3/hns3_enet.c 	ring->desc_dma_addr = 0;
ring             3518 drivers/net/ethernet/hisilicon/hns3/hns3_enet.c 	ring->buf_size = q->buf_size;
ring             3519 drivers/net/ethernet/hisilicon/hns3/hns3_enet.c 	ring->desc_num = desc_num;
ring             3520 drivers/net/ethernet/hisilicon/hns3/hns3_enet.c 	ring->next_to_use = 0;
ring             3521 drivers/net/ethernet/hisilicon/hns3/hns3_enet.c 	ring->next_to_clean = 0;
ring             3537 drivers/net/ethernet/hisilicon/hns3/hns3_enet.c 		devm_kfree(priv->dev, priv->ring_data[tqp->tqp_index].ring);
ring             3567 drivers/net/ethernet/hisilicon/hns3/hns3_enet.c 		devm_kfree(priv->dev, priv->ring_data[i].ring);
ring             3569 drivers/net/ethernet/hisilicon/hns3/hns3_enet.c 			   priv->ring_data[i + h->kinfo.num_tqps].ring);
ring             3586 drivers/net/ethernet/hisilicon/hns3/hns3_enet.c 		devm_kfree(priv->dev, priv->ring_data[i].ring);
ring             3588 drivers/net/ethernet/hisilicon/hns3/hns3_enet.c 			   priv->ring_data[i + h->kinfo.num_tqps].ring);
ring             3594 drivers/net/ethernet/hisilicon/hns3/hns3_enet.c static int hns3_alloc_ring_memory(struct hns3_enet_ring *ring)
ring             3598 drivers/net/ethernet/hisilicon/hns3/hns3_enet.c 	if (ring->desc_num <= 0 || ring->buf_size <= 0)
ring             3601 drivers/net/ethernet/hisilicon/hns3/hns3_enet.c 	ring->desc_cb = devm_kcalloc(ring_to_dev(ring), ring->desc_num,
ring             3602 drivers/net/ethernet/hisilicon/hns3/hns3_enet.c 				     sizeof(ring->desc_cb[0]), GFP_KERNEL);
ring             3603 drivers/net/ethernet/hisilicon/hns3/hns3_enet.c 	if (!ring->desc_cb) {
ring             3608 drivers/net/ethernet/hisilicon/hns3/hns3_enet.c 	ret = hns3_alloc_desc(ring);
ring             3612 drivers/net/ethernet/hisilicon/hns3/hns3_enet.c 	if (!HNAE3_IS_TX_RING(ring)) {
ring             3613 drivers/net/ethernet/hisilicon/hns3/hns3_enet.c 		ret = hns3_alloc_ring_buffers(ring);
ring             3621 drivers/net/ethernet/hisilicon/hns3/hns3_enet.c 	hns3_free_desc(ring);
ring             3623 drivers/net/ethernet/hisilicon/hns3/hns3_enet.c 	devm_kfree(ring_to_dev(ring), ring->desc_cb);
ring             3624 drivers/net/ethernet/hisilicon/hns3/hns3_enet.c 	ring->desc_cb = NULL;
ring             3629 drivers/net/ethernet/hisilicon/hns3/hns3_enet.c void hns3_fini_ring(struct hns3_enet_ring *ring)
ring             3631 drivers/net/ethernet/hisilicon/hns3/hns3_enet.c 	hns3_free_desc(ring);
ring             3632 drivers/net/ethernet/hisilicon/hns3/hns3_enet.c 	devm_kfree(ring_to_dev(ring), ring->desc_cb);
ring             3633 drivers/net/ethernet/hisilicon/hns3/hns3_enet.c 	ring->desc_cb = NULL;
ring             3634 drivers/net/ethernet/hisilicon/hns3/hns3_enet.c 	ring->next_to_clean = 0;
ring             3635 drivers/net/ethernet/hisilicon/hns3/hns3_enet.c 	ring->next_to_use = 0;
ring             3636 drivers/net/ethernet/hisilicon/hns3/hns3_enet.c 	ring->pending_buf = 0;
ring             3637 drivers/net/ethernet/hisilicon/hns3/hns3_enet.c 	if (ring->skb) {
ring             3638 drivers/net/ethernet/hisilicon/hns3/hns3_enet.c 		dev_kfree_skb_any(ring->skb);
ring             3639 drivers/net/ethernet/hisilicon/hns3/hns3_enet.c 		ring->skb = NULL;
ring             3667 drivers/net/ethernet/hisilicon/hns3/hns3_enet.c static void hns3_init_ring_hw(struct hns3_enet_ring *ring)
ring             3669 drivers/net/ethernet/hisilicon/hns3/hns3_enet.c 	dma_addr_t dma = ring->desc_dma_addr;
ring             3670 drivers/net/ethernet/hisilicon/hns3/hns3_enet.c 	struct hnae3_queue *q = ring->tqp;
ring             3672 drivers/net/ethernet/hisilicon/hns3/hns3_enet.c 	if (!HNAE3_IS_TX_RING(ring)) {
ring             3678 drivers/net/ethernet/hisilicon/hns3/hns3_enet.c 			       hns3_buf_size2type(ring->buf_size));
ring             3680 drivers/net/ethernet/hisilicon/hns3/hns3_enet.c 			       ring->desc_num / 8 - 1);
ring             3689 drivers/net/ethernet/hisilicon/hns3/hns3_enet.c 			       ring->desc_num / 8 - 1);
ring             3708 drivers/net/ethernet/hisilicon/hns3/hns3_enet.c 			q = priv->ring_data[tc_info->tqp_offset + j].ring->tqp;
ring             3723 drivers/net/ethernet/hisilicon/hns3/hns3_enet.c 		ret = hns3_alloc_ring_memory(priv->ring_data[i].ring);
ring             3730 drivers/net/ethernet/hisilicon/hns3/hns3_enet.c 		u64_stats_init(&priv->ring_data[i].ring->syncp);
ring             3737 drivers/net/ethernet/hisilicon/hns3/hns3_enet.c 		hns3_fini_ring(priv->ring_data[j].ring);
ring             3748 drivers/net/ethernet/hisilicon/hns3/hns3_enet.c 		hns3_fini_ring(priv->ring_data[i].ring);
ring             3749 drivers/net/ethernet/hisilicon/hns3/hns3_enet.c 		hns3_fini_ring(priv->ring_data[i + h->kinfo.num_tqps].ring);
ring             4086 drivers/net/ethernet/hisilicon/hns3/hns3_enet.c static void hns3_clear_tx_ring(struct hns3_enet_ring *ring)
ring             4088 drivers/net/ethernet/hisilicon/hns3/hns3_enet.c 	while (ring->next_to_clean != ring->next_to_use) {
ring             4089 drivers/net/ethernet/hisilicon/hns3/hns3_enet.c 		ring->desc[ring->next_to_clean].tx.bdtp_fe_sc_vld_ra_ri = 0;
ring             4090 drivers/net/ethernet/hisilicon/hns3/hns3_enet.c 		hns3_free_buffer_detach(ring, ring->next_to_clean);
ring             4091 drivers/net/ethernet/hisilicon/hns3/hns3_enet.c 		ring_ptr_move_fw(ring, next_to_clean);
ring             4095 drivers/net/ethernet/hisilicon/hns3/hns3_enet.c static int hns3_clear_rx_ring(struct hns3_enet_ring *ring)
ring             4100 drivers/net/ethernet/hisilicon/hns3/hns3_enet.c 	while (ring->next_to_use != ring->next_to_clean) {
ring             4105 drivers/net/ethernet/hisilicon/hns3/hns3_enet.c 		if (!ring->desc_cb[ring->next_to_use].reuse_flag) {
ring             4106 drivers/net/ethernet/hisilicon/hns3/hns3_enet.c 			ret = hns3_reserve_buffer_map(ring, &res_cbs);
ring             4108 drivers/net/ethernet/hisilicon/hns3/hns3_enet.c 				u64_stats_update_begin(&ring->syncp);
ring             4109 drivers/net/ethernet/hisilicon/hns3/hns3_enet.c 				ring->stats.sw_err_cnt++;
ring             4110 drivers/net/ethernet/hisilicon/hns3/hns3_enet.c 				u64_stats_update_end(&ring->syncp);
ring             4114 drivers/net/ethernet/hisilicon/hns3/hns3_enet.c 				netdev_warn(ring->tqp->handle->kinfo.netdev,
ring             4119 drivers/net/ethernet/hisilicon/hns3/hns3_enet.c 			hns3_replace_buffer(ring, ring->next_to_use, &res_cbs);
ring             4121 drivers/net/ethernet/hisilicon/hns3/hns3_enet.c 		ring_ptr_move_fw(ring, next_to_use);
ring             4125 drivers/net/ethernet/hisilicon/hns3/hns3_enet.c 	if (ring->skb) {
ring             4126 drivers/net/ethernet/hisilicon/hns3/hns3_enet.c 		dev_kfree_skb_any(ring->skb);
ring             4127 drivers/net/ethernet/hisilicon/hns3/hns3_enet.c 		ring->skb = NULL;
ring             4128 drivers/net/ethernet/hisilicon/hns3/hns3_enet.c 		ring->pending_buf = 0;
ring             4134 drivers/net/ethernet/hisilicon/hns3/hns3_enet.c static void hns3_force_clear_rx_ring(struct hns3_enet_ring *ring)
ring             4136 drivers/net/ethernet/hisilicon/hns3/hns3_enet.c 	while (ring->next_to_use != ring->next_to_clean) {
ring             4141 drivers/net/ethernet/hisilicon/hns3/hns3_enet.c 		if (!ring->desc_cb[ring->next_to_use].reuse_flag) {
ring             4142 drivers/net/ethernet/hisilicon/hns3/hns3_enet.c 			hns3_unmap_buffer(ring,
ring             4143 drivers/net/ethernet/hisilicon/hns3/hns3_enet.c 					  &ring->desc_cb[ring->next_to_use]);
ring             4144 drivers/net/ethernet/hisilicon/hns3/hns3_enet.c 			ring->desc_cb[ring->next_to_use].dma = 0;
ring             4147 drivers/net/ethernet/hisilicon/hns3/hns3_enet.c 		ring_ptr_move_fw(ring, next_to_use);
ring             4158 drivers/net/ethernet/hisilicon/hns3/hns3_enet.c 		struct hns3_enet_ring *ring;
ring             4160 drivers/net/ethernet/hisilicon/hns3/hns3_enet.c 		ring = priv->ring_data[i].ring;
ring             4161 drivers/net/ethernet/hisilicon/hns3/hns3_enet.c 		hns3_clear_tx_ring(ring);
ring             4163 drivers/net/ethernet/hisilicon/hns3/hns3_enet.c 		ring = priv->ring_data[i + h->kinfo.num_tqps].ring;
ring             4168 drivers/net/ethernet/hisilicon/hns3/hns3_enet.c 			hns3_force_clear_rx_ring(ring);
ring             4170 drivers/net/ethernet/hisilicon/hns3/hns3_enet.c 			hns3_clear_rx_ring(ring);
ring             4187 drivers/net/ethernet/hisilicon/hns3/hns3_enet.c 		hns3_init_ring_hw(priv->ring_data[i].ring);
ring             4192 drivers/net/ethernet/hisilicon/hns3/hns3_enet.c 		hns3_clear_tx_ring(priv->ring_data[i].ring);
ring             4193 drivers/net/ethernet/hisilicon/hns3/hns3_enet.c 		priv->ring_data[i].ring->next_to_clean = 0;
ring             4194 drivers/net/ethernet/hisilicon/hns3/hns3_enet.c 		priv->ring_data[i].ring->next_to_use = 0;
ring             4196 drivers/net/ethernet/hisilicon/hns3/hns3_enet.c 		rx_ring = priv->ring_data[i + h->kinfo.num_tqps].ring;
ring              438 drivers/net/ethernet/hisilicon/hns3/hns3_enet.h 	struct hns3_enet_ring *ring;
ring              470 drivers/net/ethernet/hisilicon/hns3/hns3_enet.h 	struct hns3_enet_ring *ring;
ring              560 drivers/net/ethernet/hisilicon/hns3/hns3_enet.h static inline int ring_space(struct hns3_enet_ring *ring)
ring              565 drivers/net/ethernet/hisilicon/hns3/hns3_enet.h 	int begin = smp_load_acquire(&ring->next_to_clean);
ring              566 drivers/net/ethernet/hisilicon/hns3/hns3_enet.h 	int end = READ_ONCE(ring->next_to_use);
ring              568 drivers/net/ethernet/hisilicon/hns3/hns3_enet.h 	return ((end >= begin) ? (ring->desc_num - end + begin) :
ring              572 drivers/net/ethernet/hisilicon/hns3/hns3_enet.h static inline int is_ring_empty(struct hns3_enet_ring *ring)
ring              574 drivers/net/ethernet/hisilicon/hns3/hns3_enet.h 	return ring->next_to_use == ring->next_to_clean;
ring              614 drivers/net/ethernet/hisilicon/hns3/hns3_enet.h #define ring_to_dev(ring) ((ring)->dev)
ring              616 drivers/net/ethernet/hisilicon/hns3/hns3_enet.h #define ring_to_dma_dir(ring) (HNAE3_IS_TX_RING(ring) ? \
ring              623 drivers/net/ethernet/hisilicon/hns3/hns3_enet.h static inline unsigned int hns3_page_order(struct hns3_enet_ring *ring)
ring              626 drivers/net/ethernet/hisilicon/hns3/hns3_enet.h 	if (ring->buf_size > (PAGE_SIZE / 2))
ring              636 drivers/net/ethernet/hisilicon/hns3/hns3_enet.h 	for (pos = (head).ring; pos; pos = pos->next)
ring              651 drivers/net/ethernet/hisilicon/hns3/hns3_enet.h void hns3_clean_tx_ring(struct hns3_enet_ring *ring);
ring              655 drivers/net/ethernet/hisilicon/hns3/hns3_enet.h void hns3_fini_ring(struct hns3_enet_ring *ring);
ring              659 drivers/net/ethernet/hisilicon/hns3/hns3_enet.h 		struct hns3_enet_ring *ring, int budget,
ring              172 drivers/net/ethernet/hisilicon/hns3/hns3_ethtool.c static void hns3_lb_check_skb_data(struct hns3_enet_ring *ring,
ring              175 drivers/net/ethernet/hisilicon/hns3/hns3_ethtool.c 	struct hns3_enet_tqp_vector *tqp_vector = ring->tqp_vector;
ring              201 drivers/net/ethernet/hisilicon/hns3/hns3_ethtool.c 		struct hns3_enet_ring *ring = priv->ring_data[i].ring;
ring              205 drivers/net/ethernet/hisilicon/hns3/hns3_ethtool.c 		rx_group = &ring->tqp_vector->rx_group;
ring              209 drivers/net/ethernet/hisilicon/hns3/hns3_ethtool.c 		hns3_clean_rx_ring(ring, budget, hns3_lb_check_skb_data);
ring              224 drivers/net/ethernet/hisilicon/hns3/hns3_ethtool.c 		struct hns3_enet_ring *ring = priv->ring_data[i].ring;
ring              226 drivers/net/ethernet/hisilicon/hns3/hns3_ethtool.c 		hns3_clean_tx_ring(ring);
ring              483 drivers/net/ethernet/hisilicon/hns3/hns3_ethtool.c 	struct hns3_enet_ring *ring;
ring              489 drivers/net/ethernet/hisilicon/hns3/hns3_ethtool.c 		ring = nic_priv->ring_data[i].ring;
ring              491 drivers/net/ethernet/hisilicon/hns3/hns3_ethtool.c 			stat = (u8 *)ring + hns3_txq_stats[j].stats_offset;
ring              498 drivers/net/ethernet/hisilicon/hns3/hns3_ethtool.c 		ring = nic_priv->ring_data[i + kinfo->num_tqps].ring;
ring              500 drivers/net/ethernet/hisilicon/hns3/hns3_ethtool.c 			stat = (u8 *)ring + hns3_rxq_stats[j].stats_offset;
ring              601 drivers/net/ethernet/hisilicon/hns3/hns3_ethtool.c 	param->tx_pending = priv->ring_data[0].ring->desc_num;
ring              602 drivers/net/ethernet/hisilicon/hns3/hns3_ethtool.c 	param->rx_pending = priv->ring_data[queue_num].ring->desc_num;
ring              904 drivers/net/ethernet/hisilicon/hns3/hns3_ethtool.c 		priv->ring_data[i].ring->desc_num = tx_desc_num;
ring              905 drivers/net/ethernet/hisilicon/hns3/hns3_ethtool.c 		priv->ring_data[i + h->kinfo.num_tqps].ring->desc_num =
ring              922 drivers/net/ethernet/hisilicon/hns3/hns3_ethtool.c 		memcpy(&tmp_rings[i], priv->ring_data[i].ring,
ring              970 drivers/net/ethernet/hisilicon/hns3/hns3_ethtool.c 	old_tx_desc_num = priv->ring_data[0].ring->desc_num;
ring              971 drivers/net/ethernet/hisilicon/hns3/hns3_ethtool.c 	old_rx_desc_num = priv->ring_data[queue_num].ring->desc_num;
ring             1000 drivers/net/ethernet/hisilicon/hns3/hns3_ethtool.c 			memcpy(priv->ring_data[i].ring, &tmp_rings[i],
ring             1101 drivers/net/ethernet/hisilicon/hns3/hns3_ethtool.c 	tx_vector = priv->ring_data[queue].ring->tqp_vector;
ring             1102 drivers/net/ethernet/hisilicon/hns3/hns3_ethtool.c 	rx_vector = priv->ring_data[queue_num + queue].ring->tqp_vector;
ring             1227 drivers/net/ethernet/hisilicon/hns3/hns3_ethtool.c 	tx_vector = priv->ring_data[queue].ring->tqp_vector;
ring             1228 drivers/net/ethernet/hisilicon/hns3/hns3_ethtool.c 	rx_vector = priv->ring_data[queue_num + queue].ring->tqp_vector;
ring               14 drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_cmd.c #define hclge_is_csq(ring) ((ring)->flag & HCLGE_TYPE_CSQ)
ring               16 drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_cmd.c #define cmq_ring_to_dev(ring)   (&(ring)->dev->pdev->dev)
ring               18 drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_cmd.c static int hclge_ring_space(struct hclge_cmq_ring *ring)
ring               20 drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_cmd.c 	int ntu = ring->next_to_use;
ring               21 drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_cmd.c 	int ntc = ring->next_to_clean;
ring               22 drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_cmd.c 	int used = (ntu - ntc + ring->desc_num) % ring->desc_num;
ring               24 drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_cmd.c 	return ring->desc_num - used - 1;
ring               27 drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_cmd.c static int is_valid_csq_clean_head(struct hclge_cmq_ring *ring, int head)
ring               29 drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_cmd.c 	int ntu = ring->next_to_use;
ring               30 drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_cmd.c 	int ntc = ring->next_to_clean;
ring               38 drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_cmd.c static int hclge_alloc_cmd_desc(struct hclge_cmq_ring *ring)
ring               40 drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_cmd.c 	int size  = ring->desc_num * sizeof(struct hclge_desc);
ring               42 drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_cmd.c 	ring->desc = dma_alloc_coherent(cmq_ring_to_dev(ring), size,
ring               43 drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_cmd.c 					&ring->desc_dma_addr, GFP_KERNEL);
ring               44 drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_cmd.c 	if (!ring->desc)
ring               50 drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_cmd.c static void hclge_free_cmd_desc(struct hclge_cmq_ring *ring)
ring               52 drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_cmd.c 	int size  = ring->desc_num * sizeof(struct hclge_desc);
ring               54 drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_cmd.c 	if (ring->desc) {
ring               55 drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_cmd.c 		dma_free_coherent(cmq_ring_to_dev(ring), size,
ring               56 drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_cmd.c 				  ring->desc, ring->desc_dma_addr);
ring               57 drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_cmd.c 		ring->desc = NULL;
ring               64 drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_cmd.c 	struct hclge_cmq_ring *ring =
ring               68 drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_cmd.c 	ring->ring_type = ring_type;
ring               69 drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_cmd.c 	ring->dev = hdev;
ring               71 drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_cmd.c 	ret = hclge_alloc_cmd_desc(ring);
ring              101 drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_cmd.c static void hclge_cmd_config_regs(struct hclge_cmq_ring *ring)
ring              103 drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_cmd.c 	dma_addr_t dma = ring->desc_dma_addr;
ring              104 drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_cmd.c 	struct hclge_dev *hdev = ring->dev;
ring              108 drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_cmd.c 	if (ring->ring_type == HCLGE_TYPE_CSQ) {
ring              115 drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_cmd.c 		reg_val |= ring->desc_num >> HCLGE_NIC_CMQ_DESC_NUM_S;
ring              125 drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_cmd.c 				ring->desc_num >> HCLGE_NIC_CMQ_DESC_NUM_S);
ring              483 drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_cmd.c static void hclge_destroy_queue(struct hclge_cmq_ring *ring)
ring              485 drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_cmd.c 	spin_lock(&ring->lock);
ring              486 drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_cmd.c 	hclge_free_cmd_desc(ring);
ring              487 drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_cmd.c 	spin_unlock(&ring->lock);
ring             5583 drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c 		u32 ring = ethtool_get_flow_spec_ring(fs->ring_cookie);
ring             5597 drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c 		if (ring >= tqps) {
ring             5600 drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c 				ring, tqps - 1);
ring             5605 drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c 		q_index = ring;
ring               14 drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_cmd.c #define hclgevf_is_csq(ring) ((ring)->flag & HCLGEVF_TYPE_CSQ)
ring               15 drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_cmd.c #define hclgevf_ring_to_dma_dir(ring) (hclgevf_is_csq(ring) ? \
ring               17 drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_cmd.c #define cmq_ring_to_dev(ring)   (&(ring)->dev->pdev->dev)
ring               19 drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_cmd.c static int hclgevf_ring_space(struct hclgevf_cmq_ring *ring)
ring               21 drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_cmd.c 	int ntc = ring->next_to_clean;
ring               22 drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_cmd.c 	int ntu = ring->next_to_use;
ring               25 drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_cmd.c 	used = (ntu - ntc + ring->desc_num) % ring->desc_num;
ring               27 drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_cmd.c 	return ring->desc_num - used - 1;
ring               30 drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_cmd.c static int hclgevf_is_valid_csq_clean_head(struct hclgevf_cmq_ring *ring,
ring               33 drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_cmd.c 	int ntu = ring->next_to_use;
ring               34 drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_cmd.c 	int ntc = ring->next_to_clean;
ring               88 drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_cmd.c static void hclgevf_cmd_config_regs(struct hclgevf_cmq_ring *ring)
ring               90 drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_cmd.c 	struct hclgevf_dev *hdev = ring->dev;
ring               94 drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_cmd.c 	if (ring->flag == HCLGEVF_TYPE_CSQ) {
ring               95 drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_cmd.c 		reg_val = (u32)ring->desc_dma_addr;
ring               97 drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_cmd.c 		reg_val = (u32)((ring->desc_dma_addr >> 31) >> 1);
ring              102 drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_cmd.c 		reg_val |= (ring->desc_num >> HCLGEVF_NIC_CMQ_DESC_NUM_S);
ring              108 drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_cmd.c 		reg_val = (u32)ring->desc_dma_addr;
ring              110 drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_cmd.c 		reg_val = (u32)((ring->desc_dma_addr >> 31) >> 1);
ring              113 drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_cmd.c 		reg_val = (ring->desc_num >> HCLGEVF_NIC_CMQ_DESC_NUM_S);
ring              127 drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_cmd.c static int hclgevf_alloc_cmd_desc(struct hclgevf_cmq_ring *ring)
ring              129 drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_cmd.c 	int size = ring->desc_num * sizeof(struct hclgevf_desc);
ring              131 drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_cmd.c 	ring->desc = dma_alloc_coherent(cmq_ring_to_dev(ring), size,
ring              132 drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_cmd.c 					&ring->desc_dma_addr, GFP_KERNEL);
ring              133 drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_cmd.c 	if (!ring->desc)
ring              139 drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_cmd.c static void hclgevf_free_cmd_desc(struct hclgevf_cmq_ring *ring)
ring              141 drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_cmd.c 	int size  = ring->desc_num * sizeof(struct hclgevf_desc);
ring              143 drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_cmd.c 	if (ring->desc) {
ring              144 drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_cmd.c 		dma_free_coherent(cmq_ring_to_dev(ring), size,
ring              145 drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_cmd.c 				  ring->desc, ring->desc_dma_addr);
ring              146 drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_cmd.c 		ring->desc = NULL;
ring              153 drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_cmd.c 	struct hclgevf_cmq_ring *ring =
ring              157 drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_cmd.c 	ring->dev = hdev;
ring              158 drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_cmd.c 	ring->flag = ring_type;
ring              161 drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_cmd.c 	ret = hclgevf_alloc_cmd_desc(ring);
ring              136 drivers/net/ethernet/huawei/hinic/hinic_ethtool.c 				struct ethtool_ringparam *ring)
ring              138 drivers/net/ethernet/huawei/hinic/hinic_ethtool.c 	ring->rx_max_pending = HINIC_RQ_DEPTH;
ring              139 drivers/net/ethernet/huawei/hinic/hinic_ethtool.c 	ring->tx_max_pending = HINIC_SQ_DEPTH;
ring              140 drivers/net/ethernet/huawei/hinic/hinic_ethtool.c 	ring->rx_pending = HINIC_RQ_DEPTH;
ring              141 drivers/net/ethernet/huawei/hinic/hinic_ethtool.c 	ring->tx_pending = HINIC_SQ_DEPTH;
ring             2444 drivers/net/ethernet/ibm/ibmvnic.c 				  struct ethtool_ringparam *ring)
ring             2449 drivers/net/ethernet/ibm/ibmvnic.c 		ring->rx_max_pending = adapter->max_rx_add_entries_per_subcrq;
ring             2450 drivers/net/ethernet/ibm/ibmvnic.c 		ring->tx_max_pending = adapter->max_tx_entries_per_subcrq;
ring             2452 drivers/net/ethernet/ibm/ibmvnic.c 		ring->rx_max_pending = IBMVNIC_MAX_QUEUE_SZ;
ring             2453 drivers/net/ethernet/ibm/ibmvnic.c 		ring->tx_max_pending = IBMVNIC_MAX_QUEUE_SZ;
ring             2455 drivers/net/ethernet/ibm/ibmvnic.c 	ring->rx_mini_max_pending = 0;
ring             2456 drivers/net/ethernet/ibm/ibmvnic.c 	ring->rx_jumbo_max_pending = 0;
ring             2457 drivers/net/ethernet/ibm/ibmvnic.c 	ring->rx_pending = adapter->req_rx_add_entries_per_subcrq;
ring             2458 drivers/net/ethernet/ibm/ibmvnic.c 	ring->tx_pending = adapter->req_tx_entries_per_subcrq;
ring             2459 drivers/net/ethernet/ibm/ibmvnic.c 	ring->rx_mini_pending = 0;
ring             2460 drivers/net/ethernet/ibm/ibmvnic.c 	ring->rx_jumbo_pending = 0;
ring             2464 drivers/net/ethernet/ibm/ibmvnic.c 				 struct ethtool_ringparam *ring)
ring             2470 drivers/net/ethernet/ibm/ibmvnic.c 	adapter->desired.rx_entries = ring->rx_pending;
ring             2471 drivers/net/ethernet/ibm/ibmvnic.c 	adapter->desired.tx_entries = ring->tx_pending;
ring             2476 drivers/net/ethernet/ibm/ibmvnic.c 	    (adapter->req_rx_add_entries_per_subcrq != ring->rx_pending ||
ring             2477 drivers/net/ethernet/ibm/ibmvnic.c 	     adapter->req_tx_entries_per_subcrq != ring->tx_pending))
ring             2480 drivers/net/ethernet/ibm/ibmvnic.c 			    ring->rx_pending, ring->tx_pending,
ring             2550 drivers/net/ethernet/intel/e100.c 	struct ethtool_ringparam *ring)
ring             2556 drivers/net/ethernet/intel/e100.c 	ring->rx_max_pending = rfds->max;
ring             2557 drivers/net/ethernet/intel/e100.c 	ring->tx_max_pending = cbs->max;
ring             2558 drivers/net/ethernet/intel/e100.c 	ring->rx_pending = rfds->count;
ring             2559 drivers/net/ethernet/intel/e100.c 	ring->tx_pending = cbs->count;
ring             2563 drivers/net/ethernet/intel/e100.c 	struct ethtool_ringparam *ring)
ring             2569 drivers/net/ethernet/intel/e100.c 	if ((ring->rx_mini_pending) || (ring->rx_jumbo_pending))
ring             2574 drivers/net/ethernet/intel/e100.c 	rfds->count = max(ring->rx_pending, rfds->min);
ring             2576 drivers/net/ethernet/intel/e100.c 	cbs->count = max(ring->tx_pending, cbs->min);
ring              544 drivers/net/ethernet/intel/e1000/e1000_ethtool.c 				struct ethtool_ringparam *ring)
ring              552 drivers/net/ethernet/intel/e1000/e1000_ethtool.c 	ring->rx_max_pending = (mac_type < e1000_82544) ? E1000_MAX_RXD :
ring              554 drivers/net/ethernet/intel/e1000/e1000_ethtool.c 	ring->tx_max_pending = (mac_type < e1000_82544) ? E1000_MAX_TXD :
ring              556 drivers/net/ethernet/intel/e1000/e1000_ethtool.c 	ring->rx_pending = rxdr->count;
ring              557 drivers/net/ethernet/intel/e1000/e1000_ethtool.c 	ring->tx_pending = txdr->count;
ring              561 drivers/net/ethernet/intel/e1000/e1000_ethtool.c 			       struct ethtool_ringparam *ring)
ring              570 drivers/net/ethernet/intel/e1000/e1000_ethtool.c 	if ((ring->rx_mini_pending) || (ring->rx_jumbo_pending))
ring              596 drivers/net/ethernet/intel/e1000/e1000_ethtool.c 	rxdr->count = max(ring->rx_pending, (u32)E1000_MIN_RXD);
ring              600 drivers/net/ethernet/intel/e1000/e1000_ethtool.c 	txdr->count = max(ring->tx_pending, (u32)E1000_MIN_TXD);
ring              382 drivers/net/ethernet/intel/e1000/e1000_main.c 		struct e1000_rx_ring *ring = &adapter->rx_ring[i];
ring              383 drivers/net/ethernet/intel/e1000/e1000_main.c 		adapter->alloc_rx_buf(adapter, ring,
ring              384 drivers/net/ethernet/intel/e1000/e1000_main.c 				      E1000_DESC_UNUSED(ring));
ring             2179 drivers/net/ethernet/intel/e1000/e1000_main.c 		struct e1000_rx_ring *ring = &adapter->rx_ring[0];
ring             2181 drivers/net/ethernet/intel/e1000/e1000_main.c 		adapter->alloc_rx_buf(adapter, ring, E1000_DESC_UNUSED(ring));
ring              251 drivers/net/ethernet/intel/e1000e/e1000.h 	bool (*clean_rx)(struct e1000_ring *ring, int *work_done,
ring              253 drivers/net/ethernet/intel/e1000e/e1000.h 	void (*alloc_rx_buf)(struct e1000_ring *ring, int cleaned_count,
ring              475 drivers/net/ethernet/intel/e1000e/e1000.h int e1000e_setup_rx_resources(struct e1000_ring *ring);
ring              476 drivers/net/ethernet/intel/e1000e/e1000.h int e1000e_setup_tx_resources(struct e1000_ring *ring);
ring              477 drivers/net/ethernet/intel/e1000e/e1000.h void e1000e_free_rx_resources(struct e1000_ring *ring);
ring              478 drivers/net/ethernet/intel/e1000e/e1000.h void e1000e_free_tx_resources(struct e1000_ring *ring);
ring              653 drivers/net/ethernet/intel/e1000e/ethtool.c 				struct ethtool_ringparam *ring)
ring              657 drivers/net/ethernet/intel/e1000e/ethtool.c 	ring->rx_max_pending = E1000_MAX_RXD;
ring              658 drivers/net/ethernet/intel/e1000e/ethtool.c 	ring->tx_max_pending = E1000_MAX_TXD;
ring              659 drivers/net/ethernet/intel/e1000e/ethtool.c 	ring->rx_pending = adapter->rx_ring_count;
ring              660 drivers/net/ethernet/intel/e1000e/ethtool.c 	ring->tx_pending = adapter->tx_ring_count;
ring              664 drivers/net/ethernet/intel/e1000e/ethtool.c 			       struct ethtool_ringparam *ring)
ring              672 drivers/net/ethernet/intel/e1000e/ethtool.c 	if ((ring->rx_mini_pending) || (ring->rx_jumbo_pending))
ring              675 drivers/net/ethernet/intel/e1000e/ethtool.c 	new_rx_count = clamp_t(u32, ring->rx_pending, E1000_MIN_RXD,
ring              679 drivers/net/ethernet/intel/e1000e/ethtool.c 	new_tx_count = clamp_t(u32, ring->tx_pending, E1000_MIN_TXD,
ring              472 drivers/net/ethernet/intel/e1000e/netdev.c static int e1000_desc_unused(struct e1000_ring *ring)
ring              474 drivers/net/ethernet/intel/e1000e/netdev.c 	if (ring->next_to_clean > ring->next_to_use)
ring              475 drivers/net/ethernet/intel/e1000e/netdev.c 		return ring->next_to_clean - ring->next_to_use - 1;
ring              477 drivers/net/ethernet/intel/e1000e/netdev.c 	return ring->count + ring->next_to_clean - ring->next_to_use - 1;
ring             2304 drivers/net/ethernet/intel/e1000e/netdev.c 				struct e1000_ring *ring)
ring             2308 drivers/net/ethernet/intel/e1000e/netdev.c 	ring->desc = dma_alloc_coherent(&pdev->dev, ring->size, &ring->dma,
ring             2310 drivers/net/ethernet/intel/e1000e/netdev.c 	if (!ring->desc)
ring               55 drivers/net/ethernet/intel/fm10k/fm10k.h #define check_for_tx_hang(ring) \
ring               56 drivers/net/ethernet/intel/fm10k/fm10k.h 	test_bit(__FM10K_TX_DETECT_HANG, (ring)->state)
ring               57 drivers/net/ethernet/intel/fm10k/fm10k.h #define set_check_for_tx_hang(ring) \
ring               58 drivers/net/ethernet/intel/fm10k/fm10k.h 	set_bit(__FM10K_TX_DETECT_HANG, (ring)->state)
ring               59 drivers/net/ethernet/intel/fm10k/fm10k.h #define clear_check_for_tx_hang(ring) \
ring               60 drivers/net/ethernet/intel/fm10k/fm10k.h 	clear_bit(__FM10K_TX_DETECT_HANG, (ring)->state)
ring              146 drivers/net/ethernet/intel/fm10k/fm10k.h 	struct fm10k_ring *ring;	/* pointer to linked list of rings */
ring              167 drivers/net/ethernet/intel/fm10k/fm10k.h static inline struct netdev_queue *txring_txq(const struct fm10k_ring *ring)
ring              169 drivers/net/ethernet/intel/fm10k/fm10k.h 	return &ring->netdev->_tx[ring->queue_index];
ring              174 drivers/net/ethernet/intel/fm10k/fm10k.h 	for (pos = &(head).ring[(head).count]; (--pos) >= (head).ring;)
ring              201 drivers/net/ethernet/intel/fm10k/fm10k.h 	struct fm10k_ring ring[0] ____cacheline_internodealigned_in_smp;
ring              422 drivers/net/ethernet/intel/fm10k/fm10k.h static inline u16 fm10k_desc_unused(struct fm10k_ring *ring)
ring              424 drivers/net/ethernet/intel/fm10k/fm10k.h 	s16 unused = ring->next_to_clean - ring->next_to_use - 1;
ring              426 drivers/net/ethernet/intel/fm10k/fm10k.h 	return likely(unused < 0) ? unused + ring->count : unused;
ring              486 drivers/net/ethernet/intel/fm10k/fm10k.h u64 fm10k_get_tx_pending(struct fm10k_ring *ring, bool in_sw);
ring               15 drivers/net/ethernet/intel/fm10k/fm10k_debugfs.c 	struct fm10k_ring *ring = s->private;
ring               17 drivers/net/ethernet/intel/fm10k/fm10k_debugfs.c 	return (*pos < ring->count) ? pos : NULL;
ring               24 drivers/net/ethernet/intel/fm10k/fm10k_debugfs.c 	struct fm10k_ring *ring = s->private;
ring               26 drivers/net/ethernet/intel/fm10k/fm10k_debugfs.c 	return (++(*pos) < ring->count) ? pos : NULL;
ring               45 drivers/net/ethernet/intel/fm10k/fm10k_debugfs.c 	struct fm10k_ring *ring = s->private;
ring               57 drivers/net/ethernet/intel/fm10k/fm10k_debugfs.c 	if (!ring->desc) {
ring               60 drivers/net/ethernet/intel/fm10k/fm10k_debugfs.c 		struct fm10k_tx_desc *txd = FM10K_TX_DESC(ring, i);
ring               72 drivers/net/ethernet/intel/fm10k/fm10k_debugfs.c 	struct fm10k_ring *ring = s->private;
ring               84 drivers/net/ethernet/intel/fm10k/fm10k_debugfs.c 	if (!ring->desc) {
ring               87 drivers/net/ethernet/intel/fm10k/fm10k_debugfs.c 		union fm10k_rx_desc *rxd = FM10K_RX_DESC(ring, i);
ring              115 drivers/net/ethernet/intel/fm10k/fm10k_debugfs.c 	struct fm10k_ring *ring = inode->i_private;
ring              116 drivers/net/ethernet/intel/fm10k/fm10k_debugfs.c 	struct fm10k_q_vector *q_vector = ring->q_vector;
ring              120 drivers/net/ethernet/intel/fm10k/fm10k_debugfs.c 	if (ring < q_vector->rx.ring)
ring              129 drivers/net/ethernet/intel/fm10k/fm10k_debugfs.c 	((struct seq_file *)filep->private_data)->private = ring;
ring              166 drivers/net/ethernet/intel/fm10k/fm10k_debugfs.c 		struct fm10k_ring *ring = &q_vector->tx.ring[i];
ring              168 drivers/net/ethernet/intel/fm10k/fm10k_debugfs.c 		snprintf(name, sizeof(name), "tx_ring.%03d", ring->queue_index);
ring              171 drivers/net/ethernet/intel/fm10k/fm10k_debugfs.c 				    q_vector->dbg_q_vector, ring,
ring              177 drivers/net/ethernet/intel/fm10k/fm10k_debugfs.c 		struct fm10k_ring *ring = &q_vector->rx.ring[i];
ring              179 drivers/net/ethernet/intel/fm10k/fm10k_debugfs.c 		snprintf(name, sizeof(name), "rx_ring.%03d", ring->queue_index);
ring              182 drivers/net/ethernet/intel/fm10k/fm10k_debugfs.c 				    q_vector->dbg_q_vector, ring,
ring              283 drivers/net/ethernet/intel/fm10k/fm10k_ethtool.c 		struct fm10k_ring *ring;
ring              285 drivers/net/ethernet/intel/fm10k/fm10k_ethtool.c 		ring = interface->tx_ring[i];
ring              286 drivers/net/ethernet/intel/fm10k/fm10k_ethtool.c 		fm10k_add_ethtool_stats(&data, ring,
ring              289 drivers/net/ethernet/intel/fm10k/fm10k_ethtool.c 		ring = interface->rx_ring[i];
ring              290 drivers/net/ethernet/intel/fm10k/fm10k_ethtool.c 		fm10k_add_ethtool_stats(&data, ring,
ring              506 drivers/net/ethernet/intel/fm10k/fm10k_ethtool.c 				struct ethtool_ringparam *ring)
ring              510 drivers/net/ethernet/intel/fm10k/fm10k_ethtool.c 	ring->rx_max_pending = FM10K_MAX_RXD;
ring              511 drivers/net/ethernet/intel/fm10k/fm10k_ethtool.c 	ring->tx_max_pending = FM10K_MAX_TXD;
ring              512 drivers/net/ethernet/intel/fm10k/fm10k_ethtool.c 	ring->rx_mini_max_pending = 0;
ring              513 drivers/net/ethernet/intel/fm10k/fm10k_ethtool.c 	ring->rx_jumbo_max_pending = 0;
ring              514 drivers/net/ethernet/intel/fm10k/fm10k_ethtool.c 	ring->rx_pending = interface->rx_ring_count;
ring              515 drivers/net/ethernet/intel/fm10k/fm10k_ethtool.c 	ring->tx_pending = interface->tx_ring_count;
ring              516 drivers/net/ethernet/intel/fm10k/fm10k_ethtool.c 	ring->rx_mini_pending = 0;
ring              517 drivers/net/ethernet/intel/fm10k/fm10k_ethtool.c 	ring->rx_jumbo_pending = 0;
ring              521 drivers/net/ethernet/intel/fm10k/fm10k_ethtool.c 			       struct ethtool_ringparam *ring)
ring              528 drivers/net/ethernet/intel/fm10k/fm10k_ethtool.c 	if ((ring->rx_mini_pending) || (ring->rx_jumbo_pending))
ring              531 drivers/net/ethernet/intel/fm10k/fm10k_ethtool.c 	new_tx_count = clamp_t(u32, ring->tx_pending,
ring              535 drivers/net/ethernet/intel/fm10k/fm10k_ethtool.c 	new_rx_count = clamp_t(u32, ring->rx_pending,
ring              359 drivers/net/ethernet/intel/fm10k/fm10k_main.c static inline void fm10k_rx_checksum(struct fm10k_ring *ring,
ring              366 drivers/net/ethernet/intel/fm10k/fm10k_main.c 	if (!(ring->netdev->features & NETIF_F_RXCSUM))
ring              375 drivers/net/ethernet/intel/fm10k/fm10k_main.c 		ring->rx_stats.csum_err++;
ring              387 drivers/net/ethernet/intel/fm10k/fm10k_main.c 	ring->rx_stats.csum_good++;
ring              396 drivers/net/ethernet/intel/fm10k/fm10k_main.c static inline void fm10k_rx_hash(struct fm10k_ring *ring,
ring              402 drivers/net/ethernet/intel/fm10k/fm10k_main.c 	if (!(ring->netdev->features & NETIF_F_RXHASH))
ring             1113 drivers/net/ethernet/intel/fm10k/fm10k_main.c static u64 fm10k_get_tx_completed(struct fm10k_ring *ring)
ring             1115 drivers/net/ethernet/intel/fm10k/fm10k_main.c 	return ring->stats.packets;
ring             1123 drivers/net/ethernet/intel/fm10k/fm10k_main.c u64 fm10k_get_tx_pending(struct fm10k_ring *ring, bool in_sw)
ring             1125 drivers/net/ethernet/intel/fm10k/fm10k_main.c 	struct fm10k_intfc *interface = ring->q_vector->interface;
ring             1130 drivers/net/ethernet/intel/fm10k/fm10k_main.c 		head = ring->next_to_clean;
ring             1131 drivers/net/ethernet/intel/fm10k/fm10k_main.c 		tail = ring->next_to_use;
ring             1133 drivers/net/ethernet/intel/fm10k/fm10k_main.c 		head = fm10k_read_reg(hw, FM10K_TDH(ring->reg_idx));
ring             1134 drivers/net/ethernet/intel/fm10k/fm10k_main.c 		tail = fm10k_read_reg(hw, FM10K_TDT(ring->reg_idx));
ring             1137 drivers/net/ethernet/intel/fm10k/fm10k_main.c 	return ((head <= tail) ? tail : tail + ring->count) - head;
ring             1435 drivers/net/ethernet/intel/fm10k/fm10k_main.c 	struct fm10k_ring *ring;
ring             1439 drivers/net/ethernet/intel/fm10k/fm10k_main.c 	fm10k_for_each_ring(ring, q_vector->tx) {
ring             1440 drivers/net/ethernet/intel/fm10k/fm10k_main.c 		if (!fm10k_clean_tx_irq(q_vector, ring, budget))
ring             1456 drivers/net/ethernet/intel/fm10k/fm10k_main.c 	fm10k_for_each_ring(ring, q_vector->rx) {
ring             1457 drivers/net/ethernet/intel/fm10k/fm10k_main.c 		int work = fm10k_clean_rx_irq(q_vector, ring, per_ring_budget);
ring             1605 drivers/net/ethernet/intel/fm10k/fm10k_main.c 	struct fm10k_ring *ring;
ring             1611 drivers/net/ethernet/intel/fm10k/fm10k_main.c 	q_vector = kzalloc(struct_size(q_vector, ring, ring_count), GFP_KERNEL);
ring             1625 drivers/net/ethernet/intel/fm10k/fm10k_main.c 	ring = q_vector->ring;
ring             1628 drivers/net/ethernet/intel/fm10k/fm10k_main.c 	q_vector->tx.ring = ring;
ring             1636 drivers/net/ethernet/intel/fm10k/fm10k_main.c 		ring->dev = &interface->pdev->dev;
ring             1637 drivers/net/ethernet/intel/fm10k/fm10k_main.c 		ring->netdev = interface->netdev;
ring             1640 drivers/net/ethernet/intel/fm10k/fm10k_main.c 		ring->q_vector = q_vector;
ring             1643 drivers/net/ethernet/intel/fm10k/fm10k_main.c 		ring->count = interface->tx_ring_count;
ring             1644 drivers/net/ethernet/intel/fm10k/fm10k_main.c 		ring->queue_index = txr_idx;
ring             1647 drivers/net/ethernet/intel/fm10k/fm10k_main.c 		interface->tx_ring[txr_idx] = ring;
ring             1654 drivers/net/ethernet/intel/fm10k/fm10k_main.c 		ring++;
ring             1658 drivers/net/ethernet/intel/fm10k/fm10k_main.c 	q_vector->rx.ring = ring;
ring             1665 drivers/net/ethernet/intel/fm10k/fm10k_main.c 		ring->dev = &interface->pdev->dev;
ring             1666 drivers/net/ethernet/intel/fm10k/fm10k_main.c 		ring->netdev = interface->netdev;
ring             1667 drivers/net/ethernet/intel/fm10k/fm10k_main.c 		rcu_assign_pointer(ring->l2_accel, interface->l2_accel);
ring             1670 drivers/net/ethernet/intel/fm10k/fm10k_main.c 		ring->q_vector = q_vector;
ring             1673 drivers/net/ethernet/intel/fm10k/fm10k_main.c 		ring->count = interface->rx_ring_count;
ring             1674 drivers/net/ethernet/intel/fm10k/fm10k_main.c 		ring->queue_index = rxr_idx;
ring             1677 drivers/net/ethernet/intel/fm10k/fm10k_main.c 		interface->rx_ring[rxr_idx] = ring;
ring             1684 drivers/net/ethernet/intel/fm10k/fm10k_main.c 		ring++;
ring             1704 drivers/net/ethernet/intel/fm10k/fm10k_main.c 	struct fm10k_ring *ring;
ring             1708 drivers/net/ethernet/intel/fm10k/fm10k_main.c 	fm10k_for_each_ring(ring, q_vector->tx)
ring             1709 drivers/net/ethernet/intel/fm10k/fm10k_main.c 		interface->tx_ring[ring->queue_index] = NULL;
ring             1711 drivers/net/ethernet/intel/fm10k/fm10k_main.c 	fm10k_for_each_ring(ring, q_vector->rx)
ring             1712 drivers/net/ethernet/intel/fm10k/fm10k_main.c 		interface->rx_ring[ring->queue_index] = NULL;
ring              144 drivers/net/ethernet/intel/fm10k/fm10k_netdev.c void fm10k_unmap_and_free_tx_resource(struct fm10k_ring *ring,
ring              150 drivers/net/ethernet/intel/fm10k/fm10k_netdev.c 			dma_unmap_single(ring->dev,
ring              155 drivers/net/ethernet/intel/fm10k/fm10k_netdev.c 		dma_unmap_page(ring->dev,
ring             1332 drivers/net/ethernet/intel/fm10k/fm10k_netdev.c 	struct fm10k_ring *ring;
ring             1339 drivers/net/ethernet/intel/fm10k/fm10k_netdev.c 		ring = READ_ONCE(interface->rx_ring[i]);
ring             1341 drivers/net/ethernet/intel/fm10k/fm10k_netdev.c 		if (!ring)
ring             1345 drivers/net/ethernet/intel/fm10k/fm10k_netdev.c 			start = u64_stats_fetch_begin_irq(&ring->syncp);
ring             1346 drivers/net/ethernet/intel/fm10k/fm10k_netdev.c 			packets = ring->stats.packets;
ring             1347 drivers/net/ethernet/intel/fm10k/fm10k_netdev.c 			bytes   = ring->stats.bytes;
ring             1348 drivers/net/ethernet/intel/fm10k/fm10k_netdev.c 		} while (u64_stats_fetch_retry_irq(&ring->syncp, start));
ring             1355 drivers/net/ethernet/intel/fm10k/fm10k_netdev.c 		ring = READ_ONCE(interface->tx_ring[i]);
ring             1357 drivers/net/ethernet/intel/fm10k/fm10k_netdev.c 		if (!ring)
ring             1361 drivers/net/ethernet/intel/fm10k/fm10k_netdev.c 			start = u64_stats_fetch_begin_irq(&ring->syncp);
ring             1362 drivers/net/ethernet/intel/fm10k/fm10k_netdev.c 			packets = ring->stats.packets;
ring             1363 drivers/net/ethernet/intel/fm10k/fm10k_netdev.c 			bytes   = ring->stats.bytes;
ring             1364 drivers/net/ethernet/intel/fm10k/fm10k_netdev.c 		} while (u64_stats_fetch_retry_irq(&ring->syncp, start));
ring             1449 drivers/net/ethernet/intel/fm10k/fm10k_netdev.c 		struct fm10k_ring *ring = interface->rx_ring[i];
ring             1451 drivers/net/ethernet/intel/fm10k/fm10k_netdev.c 		rcu_assign_pointer(ring->l2_accel, l2_accel);
ring              868 drivers/net/ethernet/intel/fm10k/fm10k_pci.c 				    struct fm10k_ring *ring)
ring              871 drivers/net/ethernet/intel/fm10k/fm10k_pci.c 	u64 tdba = ring->dma;
ring              872 drivers/net/ethernet/intel/fm10k/fm10k_pci.c 	u32 size = ring->count * sizeof(struct fm10k_tx_desc);
ring              875 drivers/net/ethernet/intel/fm10k/fm10k_pci.c 	u8 reg_idx = ring->reg_idx;
ring              893 drivers/net/ethernet/intel/fm10k/fm10k_pci.c 	ring->tail = &interface->uc_addr[FM10K_TDT(reg_idx)];
ring              896 drivers/net/ethernet/intel/fm10k/fm10k_pci.c 	ring->next_to_clean = 0;
ring              897 drivers/net/ethernet/intel/fm10k/fm10k_pci.c 	ring->next_to_use = 0;
ring              900 drivers/net/ethernet/intel/fm10k/fm10k_pci.c 	if (ring->q_vector) {
ring              901 drivers/net/ethernet/intel/fm10k/fm10k_pci.c 		txint = ring->q_vector->v_idx + NON_Q_VECTORS;
ring              912 drivers/net/ethernet/intel/fm10k/fm10k_pci.c 	if (!test_and_set_bit(__FM10K_TX_XPS_INIT_DONE, ring->state) &&
ring              913 drivers/net/ethernet/intel/fm10k/fm10k_pci.c 	    ring->q_vector)
ring              914 drivers/net/ethernet/intel/fm10k/fm10k_pci.c 		netif_set_xps_queue(ring->netdev,
ring              915 drivers/net/ethernet/intel/fm10k/fm10k_pci.c 				    &ring->q_vector->affinity_mask,
ring              916 drivers/net/ethernet/intel/fm10k/fm10k_pci.c 				    ring->queue_index);
ring              930 drivers/net/ethernet/intel/fm10k/fm10k_pci.c 				 struct fm10k_ring *ring)
ring              935 drivers/net/ethernet/intel/fm10k/fm10k_pci.c 	u8 reg_idx = ring->reg_idx;
ring              978 drivers/net/ethernet/intel/fm10k/fm10k_pci.c 				    struct fm10k_ring *ring)
ring              980 drivers/net/ethernet/intel/fm10k/fm10k_pci.c 	u64 rdba = ring->dma;
ring              982 drivers/net/ethernet/intel/fm10k/fm10k_pci.c 	u32 size = ring->count * sizeof(union fm10k_rx_desc);
ring              987 drivers/net/ethernet/intel/fm10k/fm10k_pci.c 	u8 reg_idx = ring->reg_idx;
ring             1007 drivers/net/ethernet/intel/fm10k/fm10k_pci.c 	ring->tail = &interface->uc_addr[FM10K_RDT(reg_idx)];
ring             1010 drivers/net/ethernet/intel/fm10k/fm10k_pci.c 	ring->next_to_clean = 0;
ring             1011 drivers/net/ethernet/intel/fm10k/fm10k_pci.c 	ring->next_to_use = 0;
ring             1012 drivers/net/ethernet/intel/fm10k/fm10k_pci.c 	ring->next_to_alloc = 0;
ring             1026 drivers/net/ethernet/intel/fm10k/fm10k_pci.c 	if (!(rx_pause & BIT(ring->qos_pc)))
ring             1032 drivers/net/ethernet/intel/fm10k/fm10k_pci.c 	ring->vid = hw->mac.default_vid;
ring             1036 drivers/net/ethernet/intel/fm10k/fm10k_pci.c 		ring->vid |= FM10K_VLAN_CLEAR;
ring             1039 drivers/net/ethernet/intel/fm10k/fm10k_pci.c 	if (ring->q_vector) {
ring             1040 drivers/net/ethernet/intel/fm10k/fm10k_pci.c 		rxint = ring->q_vector->v_idx + NON_Q_VECTORS;
ring             1052 drivers/net/ethernet/intel/fm10k/fm10k_pci.c 	fm10k_alloc_rx_buffers(ring, fm10k_desc_unused(ring));
ring             1073 drivers/net/ethernet/intel/fm10k/fm10k_pci.c 		struct fm10k_ring *ring = interface->rx_ring[i];
ring             1075 drivers/net/ethernet/intel/fm10k/fm10k_pci.c 		u8 reg_idx = ring->reg_idx;
ring             1077 drivers/net/ethernet/intel/fm10k/fm10k_pci.c 		if (!(rx_pause & BIT(ring->qos_pc)))
ring              448 drivers/net/ethernet/intel/i40e/i40e_debugfs.c 	struct i40e_adminq_ring *ring;
ring              459 drivers/net/ethernet/intel/i40e/i40e_debugfs.c 	ring = &(hw->aq.asq);
ring              460 drivers/net/ethernet/intel/i40e/i40e_debugfs.c 	for (i = 0; i < ring->count; i++) {
ring              461 drivers/net/ethernet/intel/i40e/i40e_debugfs.c 		struct i40e_aq_desc *d = I40E_ADMINQ_DESC(*ring, i);
ring              472 drivers/net/ethernet/intel/i40e/i40e_debugfs.c 	ring = &(hw->aq.arq);
ring              473 drivers/net/ethernet/intel/i40e/i40e_debugfs.c 	for (i = 0; i < ring->count; i++) {
ring              474 drivers/net/ethernet/intel/i40e/i40e_debugfs.c 		struct i40e_aq_desc *d = I40E_ADMINQ_DESC(*ring, i);
ring              499 drivers/net/ethernet/intel/i40e/i40e_debugfs.c 	struct i40e_ring *ring;
ring              519 drivers/net/ethernet/intel/i40e/i40e_debugfs.c 	ring = kmemdup(is_rx_ring
ring              521 drivers/net/ethernet/intel/i40e/i40e_debugfs.c 		       sizeof(*ring), GFP_KERNEL);
ring              522 drivers/net/ethernet/intel/i40e/i40e_debugfs.c 	if (!ring)
ring              528 drivers/net/ethernet/intel/i40e/i40e_debugfs.c 		for (i = 0; i < ring->count; i++) {
ring              530 drivers/net/ethernet/intel/i40e/i40e_debugfs.c 				txd = I40E_TX_DESC(ring, i);
ring              536 drivers/net/ethernet/intel/i40e/i40e_debugfs.c 				rxd = I40E_RX_DESC(ring, i);
ring              545 drivers/net/ethernet/intel/i40e/i40e_debugfs.c 		if (desc_n >= ring->count || desc_n < 0) {
ring              551 drivers/net/ethernet/intel/i40e/i40e_debugfs.c 			txd = I40E_TX_DESC(ring, desc_n);
ring              557 drivers/net/ethernet/intel/i40e/i40e_debugfs.c 			rxd = I40E_RX_DESC(ring, desc_n);
ring              569 drivers/net/ethernet/intel/i40e/i40e_debugfs.c 	kfree(ring);
ring              166 drivers/net/ethernet/intel/i40e/i40e_ethtool.c i40e_add_queue_stats(u64 **data, struct i40e_ring *ring)
ring              179 drivers/net/ethernet/intel/i40e/i40e_ethtool.c 		start = !ring ? 0 : u64_stats_fetch_begin_irq(&ring->syncp);
ring              181 drivers/net/ethernet/intel/i40e/i40e_ethtool.c 			i40e_add_one_ethtool_stat(&(*data)[i], ring,
ring              184 drivers/net/ethernet/intel/i40e/i40e_ethtool.c 	} while (ring && u64_stats_fetch_retry_irq(&ring->syncp, start));
ring             1908 drivers/net/ethernet/intel/i40e/i40e_ethtool.c 			       struct ethtool_ringparam *ring)
ring             1914 drivers/net/ethernet/intel/i40e/i40e_ethtool.c 	ring->rx_max_pending = I40E_MAX_NUM_DESCRIPTORS;
ring             1915 drivers/net/ethernet/intel/i40e/i40e_ethtool.c 	ring->tx_max_pending = I40E_MAX_NUM_DESCRIPTORS;
ring             1916 drivers/net/ethernet/intel/i40e/i40e_ethtool.c 	ring->rx_mini_max_pending = 0;
ring             1917 drivers/net/ethernet/intel/i40e/i40e_ethtool.c 	ring->rx_jumbo_max_pending = 0;
ring             1918 drivers/net/ethernet/intel/i40e/i40e_ethtool.c 	ring->rx_pending = vsi->rx_rings[0]->count;
ring             1919 drivers/net/ethernet/intel/i40e/i40e_ethtool.c 	ring->tx_pending = vsi->tx_rings[0]->count;
ring             1920 drivers/net/ethernet/intel/i40e/i40e_ethtool.c 	ring->rx_mini_pending = 0;
ring             1921 drivers/net/ethernet/intel/i40e/i40e_ethtool.c 	ring->rx_jumbo_pending = 0;
ring             1936 drivers/net/ethernet/intel/i40e/i40e_ethtool.c 			      struct ethtool_ringparam *ring)
ring             1948 drivers/net/ethernet/intel/i40e/i40e_ethtool.c 	if ((ring->rx_mini_pending) || (ring->rx_jumbo_pending))
ring             1951 drivers/net/ethernet/intel/i40e/i40e_ethtool.c 	if (ring->tx_pending > I40E_MAX_NUM_DESCRIPTORS ||
ring             1952 drivers/net/ethernet/intel/i40e/i40e_ethtool.c 	    ring->tx_pending < I40E_MIN_NUM_DESCRIPTORS ||
ring             1953 drivers/net/ethernet/intel/i40e/i40e_ethtool.c 	    ring->rx_pending > I40E_MAX_NUM_DESCRIPTORS ||
ring             1954 drivers/net/ethernet/intel/i40e/i40e_ethtool.c 	    ring->rx_pending < I40E_MIN_NUM_DESCRIPTORS) {
ring             1957 drivers/net/ethernet/intel/i40e/i40e_ethtool.c 			    ring->tx_pending, ring->rx_pending,
ring             1962 drivers/net/ethernet/intel/i40e/i40e_ethtool.c 	new_tx_count = ALIGN(ring->tx_pending, I40E_REQ_DESCRIPTOR_MULTIPLE);
ring             1963 drivers/net/ethernet/intel/i40e/i40e_ethtool.c 	new_rx_count = ALIGN(ring->rx_pending, I40E_REQ_DESCRIPTOR_MULTIPLE);
ring             4492 drivers/net/ethernet/intel/i40e/i40e_ethtool.c 		u32 ring = ethtool_get_flow_spec_ring(fsp->ring_cookie);
ring             4496 drivers/net/ethernet/intel/i40e/i40e_ethtool.c 			if (ring >= vsi->num_queue_pairs)
ring             4505 drivers/net/ethernet/intel/i40e/i40e_ethtool.c 			if (ring >= pf->vf[vf].num_queue_pairs)
ring             4510 drivers/net/ethernet/intel/i40e/i40e_ethtool.c 		q_index = ring;
ring              411 drivers/net/ethernet/intel/i40e/i40e_main.c static void i40e_get_netdev_stats_struct_tx(struct i40e_ring *ring,
ring              418 drivers/net/ethernet/intel/i40e/i40e_main.c 		start = u64_stats_fetch_begin_irq(&ring->syncp);
ring              419 drivers/net/ethernet/intel/i40e/i40e_main.c 		packets = ring->stats.packets;
ring              420 drivers/net/ethernet/intel/i40e/i40e_main.c 		bytes   = ring->stats.bytes;
ring              421 drivers/net/ethernet/intel/i40e/i40e_main.c 	} while (u64_stats_fetch_retry_irq(&ring->syncp, start));
ring              441 drivers/net/ethernet/intel/i40e/i40e_main.c 	struct i40e_ring *ring;
ring              455 drivers/net/ethernet/intel/i40e/i40e_main.c 		ring = READ_ONCE(vsi->tx_rings[i]);
ring              456 drivers/net/ethernet/intel/i40e/i40e_main.c 		if (!ring)
ring              458 drivers/net/ethernet/intel/i40e/i40e_main.c 		i40e_get_netdev_stats_struct_tx(ring, stats);
ring              461 drivers/net/ethernet/intel/i40e/i40e_main.c 			ring++;
ring              462 drivers/net/ethernet/intel/i40e/i40e_main.c 			i40e_get_netdev_stats_struct_tx(ring, stats);
ring              465 drivers/net/ethernet/intel/i40e/i40e_main.c 		ring++;
ring              467 drivers/net/ethernet/intel/i40e/i40e_main.c 			start   = u64_stats_fetch_begin_irq(&ring->syncp);
ring              468 drivers/net/ethernet/intel/i40e/i40e_main.c 			packets = ring->stats.packets;
ring              469 drivers/net/ethernet/intel/i40e/i40e_main.c 			bytes   = ring->stats.bytes;
ring              470 drivers/net/ethernet/intel/i40e/i40e_main.c 		} while (u64_stats_fetch_retry_irq(&ring->syncp, start));
ring             3089 drivers/net/ethernet/intel/i40e/i40e_main.c static void i40e_config_xps_tx_ring(struct i40e_ring *ring)
ring             3093 drivers/net/ethernet/intel/i40e/i40e_main.c 	if (!ring->q_vector || !ring->netdev || ring->ch)
ring             3097 drivers/net/ethernet/intel/i40e/i40e_main.c 	if (test_and_set_bit(__I40E_TX_XPS_INIT_DONE, ring->state))
ring             3100 drivers/net/ethernet/intel/i40e/i40e_main.c 	cpu = cpumask_local_spread(ring->q_vector->v_idx, -1);
ring             3101 drivers/net/ethernet/intel/i40e/i40e_main.c 	netif_set_xps_queue(ring->netdev, get_cpu_mask(cpu),
ring             3102 drivers/net/ethernet/intel/i40e/i40e_main.c 			    ring->queue_index);
ring             3111 drivers/net/ethernet/intel/i40e/i40e_main.c static struct xdp_umem *i40e_xsk_umem(struct i40e_ring *ring)
ring             3113 drivers/net/ethernet/intel/i40e/i40e_main.c 	bool xdp_on = i40e_enabled_xdp_vsi(ring->vsi);
ring             3114 drivers/net/ethernet/intel/i40e/i40e_main.c 	int qid = ring->queue_index;
ring             3116 drivers/net/ethernet/intel/i40e/i40e_main.c 	if (ring_is_xdp(ring))
ring             3117 drivers/net/ethernet/intel/i40e/i40e_main.c 		qid -= ring->vsi->alloc_queue_pairs;
ring             3119 drivers/net/ethernet/intel/i40e/i40e_main.c 	if (!xdp_on || !test_bit(qid, ring->vsi->af_xdp_zc_qps))
ring             3122 drivers/net/ethernet/intel/i40e/i40e_main.c 	return xdp_get_umem_from_qid(ring->vsi->netdev, qid);
ring             3131 drivers/net/ethernet/intel/i40e/i40e_main.c static int i40e_configure_tx_ring(struct i40e_ring *ring)
ring             3133 drivers/net/ethernet/intel/i40e/i40e_main.c 	struct i40e_vsi *vsi = ring->vsi;
ring             3134 drivers/net/ethernet/intel/i40e/i40e_main.c 	u16 pf_q = vsi->base_queue + ring->queue_index;
ring             3140 drivers/net/ethernet/intel/i40e/i40e_main.c 	if (ring_is_xdp(ring))
ring             3141 drivers/net/ethernet/intel/i40e/i40e_main.c 		ring->xsk_umem = i40e_xsk_umem(ring);
ring             3145 drivers/net/ethernet/intel/i40e/i40e_main.c 		ring->atr_sample_rate = vsi->back->atr_sample_rate;
ring             3146 drivers/net/ethernet/intel/i40e/i40e_main.c 		ring->atr_count = 0;
ring             3148 drivers/net/ethernet/intel/i40e/i40e_main.c 		ring->atr_sample_rate = 0;
ring             3152 drivers/net/ethernet/intel/i40e/i40e_main.c 	i40e_config_xps_tx_ring(ring);
ring             3158 drivers/net/ethernet/intel/i40e/i40e_main.c 	tx_ctx.base = (ring->dma / 128);
ring             3159 drivers/net/ethernet/intel/i40e/i40e_main.c 	tx_ctx.qlen = ring->count;
ring             3166 drivers/net/ethernet/intel/i40e/i40e_main.c 	tx_ctx.head_wb_addr = ring->dma +
ring             3167 drivers/net/ethernet/intel/i40e/i40e_main.c 			      (ring->count * sizeof(struct i40e_tx_desc));
ring             3180 drivers/net/ethernet/intel/i40e/i40e_main.c 	if (ring->ch)
ring             3182 drivers/net/ethernet/intel/i40e/i40e_main.c 			le16_to_cpu(ring->ch->info.qs_handle[ring->dcb_tc]);
ring             3185 drivers/net/ethernet/intel/i40e/i40e_main.c 		tx_ctx.rdylist = le16_to_cpu(vsi->info.qs_handle[ring->dcb_tc]);
ring             3194 drivers/net/ethernet/intel/i40e/i40e_main.c 			 ring->queue_index, pf_q, err);
ring             3203 drivers/net/ethernet/intel/i40e/i40e_main.c 			 ring->queue_index, pf_q, err);
ring             3208 drivers/net/ethernet/intel/i40e/i40e_main.c 	if (ring->ch) {
ring             3209 drivers/net/ethernet/intel/i40e/i40e_main.c 		if (ring->ch->type == I40E_VSI_VMDQ2)
ring             3214 drivers/net/ethernet/intel/i40e/i40e_main.c 		qtx_ctl |= (ring->ch->vsi_number <<
ring             3233 drivers/net/ethernet/intel/i40e/i40e_main.c 	ring->tail = hw->hw_addr + I40E_QTX_TAIL(pf_q);
ring             3244 drivers/net/ethernet/intel/i40e/i40e_main.c static int i40e_configure_rx_ring(struct i40e_ring *ring)
ring             3246 drivers/net/ethernet/intel/i40e/i40e_main.c 	struct i40e_vsi *vsi = ring->vsi;
ring             3248 drivers/net/ethernet/intel/i40e/i40e_main.c 	u16 pf_q = vsi->base_queue + ring->queue_index;
ring             3255 drivers/net/ethernet/intel/i40e/i40e_main.c 	bitmap_zero(ring->state, __I40E_RING_STATE_NBITS);
ring             3260 drivers/net/ethernet/intel/i40e/i40e_main.c 	if (ring->vsi->type == I40E_VSI_MAIN)
ring             3261 drivers/net/ethernet/intel/i40e/i40e_main.c 		xdp_rxq_info_unreg_mem_model(&ring->xdp_rxq);
ring             3263 drivers/net/ethernet/intel/i40e/i40e_main.c 	ring->xsk_umem = i40e_xsk_umem(ring);
ring             3264 drivers/net/ethernet/intel/i40e/i40e_main.c 	if (ring->xsk_umem) {
ring             3265 drivers/net/ethernet/intel/i40e/i40e_main.c 		ring->rx_buf_len = ring->xsk_umem->chunk_size_nohr -
ring             3272 drivers/net/ethernet/intel/i40e/i40e_main.c 		ring->zca.free = i40e_zca_free;
ring             3273 drivers/net/ethernet/intel/i40e/i40e_main.c 		ret = xdp_rxq_info_reg_mem_model(&ring->xdp_rxq,
ring             3275 drivers/net/ethernet/intel/i40e/i40e_main.c 						 &ring->zca);
ring             3280 drivers/net/ethernet/intel/i40e/i40e_main.c 			 ring->queue_index);
ring             3283 drivers/net/ethernet/intel/i40e/i40e_main.c 		ring->rx_buf_len = vsi->rx_buf_len;
ring             3284 drivers/net/ethernet/intel/i40e/i40e_main.c 		if (ring->vsi->type == I40E_VSI_MAIN) {
ring             3285 drivers/net/ethernet/intel/i40e/i40e_main.c 			ret = xdp_rxq_info_reg_mem_model(&ring->xdp_rxq,
ring             3293 drivers/net/ethernet/intel/i40e/i40e_main.c 	rx_ctx.dbuff = DIV_ROUND_UP(ring->rx_buf_len,
ring             3296 drivers/net/ethernet/intel/i40e/i40e_main.c 	rx_ctx.base = (ring->dma / 128);
ring             3297 drivers/net/ethernet/intel/i40e/i40e_main.c 	rx_ctx.qlen = ring->count;
ring             3307 drivers/net/ethernet/intel/i40e/i40e_main.c 	rx_ctx.rxmax = min_t(u16, vsi->max_frame, chain_len * ring->rx_buf_len);
ring             3324 drivers/net/ethernet/intel/i40e/i40e_main.c 			 ring->queue_index, pf_q, err);
ring             3333 drivers/net/ethernet/intel/i40e/i40e_main.c 			 ring->queue_index, pf_q, err);
ring             3339 drivers/net/ethernet/intel/i40e/i40e_main.c 		clear_ring_build_skb_enabled(ring);
ring             3341 drivers/net/ethernet/intel/i40e/i40e_main.c 		set_ring_build_skb_enabled(ring);
ring             3344 drivers/net/ethernet/intel/i40e/i40e_main.c 	ring->tail = hw->hw_addr + I40E_QRX_TAIL(pf_q);
ring             3345 drivers/net/ethernet/intel/i40e/i40e_main.c 	writel(0, ring->tail);
ring             3347 drivers/net/ethernet/intel/i40e/i40e_main.c 	ok = ring->xsk_umem ?
ring             3348 drivers/net/ethernet/intel/i40e/i40e_main.c 	     i40e_alloc_rx_buffers_zc(ring, I40E_DESC_UNUSED(ring)) :
ring             3349 drivers/net/ethernet/intel/i40e/i40e_main.c 	     !i40e_alloc_rx_buffers(ring, I40E_DESC_UNUSED(ring));
ring             3356 drivers/net/ethernet/intel/i40e/i40e_main.c 			 ring->xsk_umem ? "UMEM enabled " : "",
ring             3357 drivers/net/ethernet/intel/i40e/i40e_main.c 			 ring->queue_index, pf_q);
ring             3725 drivers/net/ethernet/intel/i40e/i40e_main.c 	if (!q_vector->tx.ring && !q_vector->rx.ring)
ring             3783 drivers/net/ethernet/intel/i40e/i40e_main.c 		if (q_vector->tx.ring && q_vector->rx.ring) {
ring             3787 drivers/net/ethernet/intel/i40e/i40e_main.c 		} else if (q_vector->rx.ring) {
ring             3790 drivers/net/ethernet/intel/i40e/i40e_main.c 		} else if (q_vector->tx.ring) {
ring             4146 drivers/net/ethernet/intel/i40e/i40e_main.c 	if (!q_vector->tx.ring)
ring             4149 drivers/net/ethernet/intel/i40e/i40e_main.c 	vsi = q_vector->tx.ring->vsi;
ring             4150 drivers/net/ethernet/intel/i40e/i40e_main.c 	i40e_clean_fdir_tx_irq(q_vector->tx.ring, vsi->work_limit);
ring             4168 drivers/net/ethernet/intel/i40e/i40e_main.c 	tx_ring->next = q_vector->tx.ring;
ring             4169 drivers/net/ethernet/intel/i40e/i40e_main.c 	q_vector->tx.ring = tx_ring;
ring             4177 drivers/net/ethernet/intel/i40e/i40e_main.c 		xdp_ring->next = q_vector->tx.ring;
ring             4178 drivers/net/ethernet/intel/i40e/i40e_main.c 		q_vector->tx.ring = xdp_ring;
ring             4183 drivers/net/ethernet/intel/i40e/i40e_main.c 	rx_ring->next = q_vector->rx.ring;
ring             4184 drivers/net/ethernet/intel/i40e/i40e_main.c 	q_vector->rx.ring = rx_ring;
ring             4222 drivers/net/ethernet/intel/i40e/i40e_main.c 		q_vector->rx.ring = NULL;
ring             4223 drivers/net/ethernet/intel/i40e/i40e_main.c 		q_vector->tx.ring = NULL;
ring             4727 drivers/net/ethernet/intel/i40e/i40e_main.c 	struct i40e_ring *ring;
ring             4733 drivers/net/ethernet/intel/i40e/i40e_main.c 	i40e_for_each_ring(ring, q_vector->tx)
ring             4734 drivers/net/ethernet/intel/i40e/i40e_main.c 		ring->q_vector = NULL;
ring             4736 drivers/net/ethernet/intel/i40e/i40e_main.c 	i40e_for_each_ring(ring, q_vector->rx)
ring             4737 drivers/net/ethernet/intel/i40e/i40e_main.c 		ring->q_vector = NULL;
ring             4819 drivers/net/ethernet/intel/i40e/i40e_main.c 		if (q_vector->rx.ring || q_vector->tx.ring)
ring             4838 drivers/net/ethernet/intel/i40e/i40e_main.c 		if (q_vector->rx.ring || q_vector->tx.ring)
ring             10835 drivers/net/ethernet/intel/i40e/i40e_main.c 	struct i40e_ring *ring;
ring             10840 drivers/net/ethernet/intel/i40e/i40e_main.c 		ring = kcalloc(qpv, sizeof(struct i40e_ring), GFP_KERNEL);
ring             10841 drivers/net/ethernet/intel/i40e/i40e_main.c 		if (!ring)
ring             10844 drivers/net/ethernet/intel/i40e/i40e_main.c 		ring->queue_index = i;
ring             10845 drivers/net/ethernet/intel/i40e/i40e_main.c 		ring->reg_idx = vsi->base_queue + i;
ring             10846 drivers/net/ethernet/intel/i40e/i40e_main.c 		ring->ring_active = false;
ring             10847 drivers/net/ethernet/intel/i40e/i40e_main.c 		ring->vsi = vsi;
ring             10848 drivers/net/ethernet/intel/i40e/i40e_main.c 		ring->netdev = vsi->netdev;
ring             10849 drivers/net/ethernet/intel/i40e/i40e_main.c 		ring->dev = &pf->pdev->dev;
ring             10850 drivers/net/ethernet/intel/i40e/i40e_main.c 		ring->count = vsi->num_tx_desc;
ring             10851 drivers/net/ethernet/intel/i40e/i40e_main.c 		ring->size = 0;
ring             10852 drivers/net/ethernet/intel/i40e/i40e_main.c 		ring->dcb_tc = 0;
ring             10854 drivers/net/ethernet/intel/i40e/i40e_main.c 			ring->flags = I40E_TXR_FLAGS_WB_ON_ITR;
ring             10855 drivers/net/ethernet/intel/i40e/i40e_main.c 		ring->itr_setting = pf->tx_itr_default;
ring             10856 drivers/net/ethernet/intel/i40e/i40e_main.c 		vsi->tx_rings[i] = ring++;
ring             10861 drivers/net/ethernet/intel/i40e/i40e_main.c 		ring->queue_index = vsi->alloc_queue_pairs + i;
ring             10862 drivers/net/ethernet/intel/i40e/i40e_main.c 		ring->reg_idx = vsi->base_queue + ring->queue_index;
ring             10863 drivers/net/ethernet/intel/i40e/i40e_main.c 		ring->ring_active = false;
ring             10864 drivers/net/ethernet/intel/i40e/i40e_main.c 		ring->vsi = vsi;
ring             10865 drivers/net/ethernet/intel/i40e/i40e_main.c 		ring->netdev = NULL;
ring             10866 drivers/net/ethernet/intel/i40e/i40e_main.c 		ring->dev = &pf->pdev->dev;
ring             10867 drivers/net/ethernet/intel/i40e/i40e_main.c 		ring->count = vsi->num_tx_desc;
ring             10868 drivers/net/ethernet/intel/i40e/i40e_main.c 		ring->size = 0;
ring             10869 drivers/net/ethernet/intel/i40e/i40e_main.c 		ring->dcb_tc = 0;
ring             10871 drivers/net/ethernet/intel/i40e/i40e_main.c 			ring->flags = I40E_TXR_FLAGS_WB_ON_ITR;
ring             10872 drivers/net/ethernet/intel/i40e/i40e_main.c 		set_ring_xdp(ring);
ring             10873 drivers/net/ethernet/intel/i40e/i40e_main.c 		ring->itr_setting = pf->tx_itr_default;
ring             10874 drivers/net/ethernet/intel/i40e/i40e_main.c 		vsi->xdp_rings[i] = ring++;
ring             10877 drivers/net/ethernet/intel/i40e/i40e_main.c 		ring->queue_index = i;
ring             10878 drivers/net/ethernet/intel/i40e/i40e_main.c 		ring->reg_idx = vsi->base_queue + i;
ring             10879 drivers/net/ethernet/intel/i40e/i40e_main.c 		ring->ring_active = false;
ring             10880 drivers/net/ethernet/intel/i40e/i40e_main.c 		ring->vsi = vsi;
ring             10881 drivers/net/ethernet/intel/i40e/i40e_main.c 		ring->netdev = vsi->netdev;
ring             10882 drivers/net/ethernet/intel/i40e/i40e_main.c 		ring->dev = &pf->pdev->dev;
ring             10883 drivers/net/ethernet/intel/i40e/i40e_main.c 		ring->count = vsi->num_rx_desc;
ring             10884 drivers/net/ethernet/intel/i40e/i40e_main.c 		ring->size = 0;
ring             10885 drivers/net/ethernet/intel/i40e/i40e_main.c 		ring->dcb_tc = 0;
ring             10886 drivers/net/ethernet/intel/i40e/i40e_main.c 		ring->itr_setting = pf->rx_itr_default;
ring             10887 drivers/net/ethernet/intel/i40e/i40e_main.c 		vsi->rx_rings[i] = ring;
ring             12638 drivers/net/ethernet/intel/i40e/i40e_main.c 	if (q_vector->rx.ring || q_vector->tx.ring) {
ring               62 drivers/net/ethernet/intel/i40e/i40e_trace.h 	TP_PROTO(struct i40e_ring *ring,
ring               66 drivers/net/ethernet/intel/i40e/i40e_trace.h 	TP_ARGS(ring, desc, buf),
ring               76 drivers/net/ethernet/intel/i40e/i40e_trace.h 		__field(void*, ring)
ring               79 drivers/net/ethernet/intel/i40e/i40e_trace.h 		__string(devname, ring->netdev->name)
ring               83 drivers/net/ethernet/intel/i40e/i40e_trace.h 		__entry->ring = ring;
ring               86 drivers/net/ethernet/intel/i40e/i40e_trace.h 		__assign_str(devname, ring->netdev->name);
ring               91 drivers/net/ethernet/intel/i40e/i40e_trace.h 		__get_str(devname), __entry->ring,
ring               97 drivers/net/ethernet/intel/i40e/i40e_trace.h 	TP_PROTO(struct i40e_ring *ring,
ring              101 drivers/net/ethernet/intel/i40e/i40e_trace.h 	TP_ARGS(ring, desc, buf));
ring              105 drivers/net/ethernet/intel/i40e/i40e_trace.h 	TP_PROTO(struct i40e_ring *ring,
ring              109 drivers/net/ethernet/intel/i40e/i40e_trace.h 	TP_ARGS(ring, desc, buf));
ring              114 drivers/net/ethernet/intel/i40e/i40e_trace.h 	TP_PROTO(struct i40e_ring *ring,
ring              118 drivers/net/ethernet/intel/i40e/i40e_trace.h 	TP_ARGS(ring, desc, skb),
ring              121 drivers/net/ethernet/intel/i40e/i40e_trace.h 		__field(void*, ring)
ring              124 drivers/net/ethernet/intel/i40e/i40e_trace.h 		__string(devname, ring->netdev->name)
ring              128 drivers/net/ethernet/intel/i40e/i40e_trace.h 		__entry->ring = ring;
ring              131 drivers/net/ethernet/intel/i40e/i40e_trace.h 		__assign_str(devname, ring->netdev->name);
ring              136 drivers/net/ethernet/intel/i40e/i40e_trace.h 		__get_str(devname), __entry->ring,
ring              142 drivers/net/ethernet/intel/i40e/i40e_trace.h 	TP_PROTO(struct i40e_ring *ring,
ring              146 drivers/net/ethernet/intel/i40e/i40e_trace.h 	TP_ARGS(ring, desc, skb));
ring              150 drivers/net/ethernet/intel/i40e/i40e_trace.h 	TP_PROTO(struct i40e_ring *ring,
ring              154 drivers/net/ethernet/intel/i40e/i40e_trace.h 	TP_ARGS(ring, desc, skb));
ring              160 drivers/net/ethernet/intel/i40e/i40e_trace.h 		 struct i40e_ring *ring),
ring              162 drivers/net/ethernet/intel/i40e/i40e_trace.h 	TP_ARGS(skb, ring),
ring              166 drivers/net/ethernet/intel/i40e/i40e_trace.h 		__field(void*, ring)
ring              167 drivers/net/ethernet/intel/i40e/i40e_trace.h 		__string(devname, ring->netdev->name)
ring              172 drivers/net/ethernet/intel/i40e/i40e_trace.h 		__entry->ring = ring;
ring              173 drivers/net/ethernet/intel/i40e/i40e_trace.h 		__assign_str(devname, ring->netdev->name);
ring              179 drivers/net/ethernet/intel/i40e/i40e_trace.h 		__entry->ring)
ring              185 drivers/net/ethernet/intel/i40e/i40e_trace.h 		 struct i40e_ring *ring),
ring              187 drivers/net/ethernet/intel/i40e/i40e_trace.h 	TP_ARGS(skb, ring));
ring              192 drivers/net/ethernet/intel/i40e/i40e_trace.h 		 struct i40e_ring *ring),
ring              194 drivers/net/ethernet/intel/i40e/i40e_trace.h 	TP_ARGS(skb, ring));
ring              601 drivers/net/ethernet/intel/i40e/i40e_txrx.c static void i40e_unmap_and_free_tx_resource(struct i40e_ring *ring,
ring              607 drivers/net/ethernet/intel/i40e/i40e_txrx.c 		else if (ring_is_xdp(ring))
ring              612 drivers/net/ethernet/intel/i40e/i40e_txrx.c 			dma_unmap_single(ring->dev,
ring              617 drivers/net/ethernet/intel/i40e/i40e_txrx.c 		dma_unmap_page(ring->dev,
ring              694 drivers/net/ethernet/intel/i40e/i40e_txrx.c u32 i40e_get_tx_pending(struct i40e_ring *ring, bool in_sw)
ring              699 drivers/net/ethernet/intel/i40e/i40e_txrx.c 		head = i40e_get_head(ring);
ring              700 drivers/net/ethernet/intel/i40e/i40e_txrx.c 		tail = readl(ring->tail);
ring              702 drivers/net/ethernet/intel/i40e/i40e_txrx.c 		head = ring->next_to_clean;
ring              703 drivers/net/ethernet/intel/i40e/i40e_txrx.c 		tail = ring->next_to_use;
ring              708 drivers/net/ethernet/intel/i40e/i40e_txrx.c 			tail - head : (tail + ring->count - head);
ring              908 drivers/net/ethernet/intel/i40e/i40e_txrx.c 	u16 flags = q_vector->tx.ring[0].flags;
ring             1014 drivers/net/ethernet/intel/i40e/i40e_txrx.c 	if (!rc->ring || !ITR_IS_DYNAMIC(rc->ring->itr_setting))
ring             1747 drivers/net/ethernet/intel/i40e/i40e_txrx.c static inline void i40e_rx_hash(struct i40e_ring *ring,
ring             1757 drivers/net/ethernet/intel/i40e/i40e_txrx.c 	if (!(ring->netdev->features & NETIF_F_RXHASH))
ring             2573 drivers/net/ethernet/intel/i40e/i40e_txrx.c 	struct i40e_ring *ring;
ring             2587 drivers/net/ethernet/intel/i40e/i40e_txrx.c 	i40e_for_each_ring(ring, q_vector->tx) {
ring             2588 drivers/net/ethernet/intel/i40e/i40e_txrx.c 		bool wd = ring->xsk_umem ?
ring             2589 drivers/net/ethernet/intel/i40e/i40e_txrx.c 			  i40e_clean_xdp_tx_irq(vsi, ring, budget) :
ring             2590 drivers/net/ethernet/intel/i40e/i40e_txrx.c 			  i40e_clean_tx_irq(vsi, ring, budget);
ring             2596 drivers/net/ethernet/intel/i40e/i40e_txrx.c 		arm_wb |= ring->arm_wb;
ring             2597 drivers/net/ethernet/intel/i40e/i40e_txrx.c 		ring->arm_wb = false;
ring             2609 drivers/net/ethernet/intel/i40e/i40e_txrx.c 	i40e_for_each_ring(ring, q_vector->rx) {
ring             2610 drivers/net/ethernet/intel/i40e/i40e_txrx.c 		int cleaned = ring->xsk_umem ?
ring             2611 drivers/net/ethernet/intel/i40e/i40e_txrx.c 			      i40e_clean_rx_irq_zc(ring, budget_per_ring) :
ring             2612 drivers/net/ethernet/intel/i40e/i40e_txrx.c 			      i40e_clean_rx_irq(ring, budget_per_ring);
ring             2643 drivers/net/ethernet/intel/i40e/i40e_txrx.c 			q_vector->tx.ring[0].tx_stats.tx_force_wb++;
ring              425 drivers/net/ethernet/intel/i40e/i40e_txrx.h static inline bool ring_uses_build_skb(struct i40e_ring *ring)
ring              427 drivers/net/ethernet/intel/i40e/i40e_txrx.h 	return !!(ring->flags & I40E_RXR_FLAGS_BUILD_SKB_ENABLED);
ring              430 drivers/net/ethernet/intel/i40e/i40e_txrx.h static inline void set_ring_build_skb_enabled(struct i40e_ring *ring)
ring              432 drivers/net/ethernet/intel/i40e/i40e_txrx.h 	ring->flags |= I40E_RXR_FLAGS_BUILD_SKB_ENABLED;
ring              435 drivers/net/ethernet/intel/i40e/i40e_txrx.h static inline void clear_ring_build_skb_enabled(struct i40e_ring *ring)
ring              437 drivers/net/ethernet/intel/i40e/i40e_txrx.h 	ring->flags &= ~I40E_RXR_FLAGS_BUILD_SKB_ENABLED;
ring              440 drivers/net/ethernet/intel/i40e/i40e_txrx.h static inline bool ring_is_xdp(struct i40e_ring *ring)
ring              442 drivers/net/ethernet/intel/i40e/i40e_txrx.h 	return !!(ring->flags & I40E_TXR_FLAGS_XDP);
ring              445 drivers/net/ethernet/intel/i40e/i40e_txrx.h static inline void set_ring_xdp(struct i40e_ring *ring)
ring              447 drivers/net/ethernet/intel/i40e/i40e_txrx.h 	ring->flags |= I40E_TXR_FLAGS_XDP;
ring              458 drivers/net/ethernet/intel/i40e/i40e_txrx.h 	struct i40e_ring *ring;		/* pointer to linked list of ring(s) */
ring              469 drivers/net/ethernet/intel/i40e/i40e_txrx.h 	for (pos = (head).ring; pos != NULL; pos = pos->next)
ring              471 drivers/net/ethernet/intel/i40e/i40e_txrx.h static inline unsigned int i40e_rx_pg_order(struct i40e_ring *ring)
ring              474 drivers/net/ethernet/intel/i40e/i40e_txrx.h 	if (ring->rx_buf_len > (PAGE_SIZE / 2))
ring              492 drivers/net/ethernet/intel/i40e/i40e_txrx.h u32 i40e_get_tx_pending(struct i40e_ring *ring, bool in_sw);
ring              580 drivers/net/ethernet/intel/i40e/i40e_txrx.h static inline struct netdev_queue *txring_txq(const struct i40e_ring *ring)
ring              582 drivers/net/ethernet/intel/i40e/i40e_txrx.h 	return netdev_get_tx_queue(ring->netdev, ring->queue_index);
ring              791 drivers/net/ethernet/intel/i40e/i40e_xsk.c 	struct i40e_ring *ring;
ring              808 drivers/net/ethernet/intel/i40e/i40e_xsk.c 	ring = vsi->xdp_rings[queue_id];
ring              816 drivers/net/ethernet/intel/i40e/i40e_xsk.c 	if (!napi_if_scheduled_mark_missed(&ring->q_vector->napi))
ring              817 drivers/net/ethernet/intel/i40e/i40e_xsk.c 		i40e_force_wb(vsi, ring->q_vector);
ring              159 drivers/net/ethernet/intel/iavf/iavf_ethtool.c iavf_add_queue_stats(u64 **data, struct iavf_ring *ring)
ring              172 drivers/net/ethernet/intel/iavf/iavf_ethtool.c 		start = !ring ? 0 : u64_stats_fetch_begin_irq(&ring->syncp);
ring              174 drivers/net/ethernet/intel/iavf/iavf_ethtool.c 			iavf_add_one_ethtool_stat(&(*data)[i], ring, &stats[i]);
ring              175 drivers/net/ethernet/intel/iavf/iavf_ethtool.c 	} while (ring && u64_stats_fetch_retry_irq(&ring->syncp, start));
ring              350 drivers/net/ethernet/intel/iavf/iavf_ethtool.c 		struct iavf_ring *ring;
ring              353 drivers/net/ethernet/intel/iavf/iavf_ethtool.c 		ring = (i < adapter->num_active_queues ?
ring              355 drivers/net/ethernet/intel/iavf/iavf_ethtool.c 		iavf_add_queue_stats(&data, ring);
ring              358 drivers/net/ethernet/intel/iavf/iavf_ethtool.c 		ring = (i < adapter->num_active_queues ?
ring              360 drivers/net/ethernet/intel/iavf/iavf_ethtool.c 		iavf_add_queue_stats(&data, ring);
ring              578 drivers/net/ethernet/intel/iavf/iavf_ethtool.c 			       struct ethtool_ringparam *ring)
ring              582 drivers/net/ethernet/intel/iavf/iavf_ethtool.c 	ring->rx_max_pending = IAVF_MAX_RXD;
ring              583 drivers/net/ethernet/intel/iavf/iavf_ethtool.c 	ring->tx_max_pending = IAVF_MAX_TXD;
ring              584 drivers/net/ethernet/intel/iavf/iavf_ethtool.c 	ring->rx_pending = adapter->rx_desc_count;
ring              585 drivers/net/ethernet/intel/iavf/iavf_ethtool.c 	ring->tx_pending = adapter->tx_desc_count;
ring              597 drivers/net/ethernet/intel/iavf/iavf_ethtool.c 			      struct ethtool_ringparam *ring)
ring              602 drivers/net/ethernet/intel/iavf/iavf_ethtool.c 	if ((ring->rx_mini_pending) || (ring->rx_jumbo_pending))
ring              605 drivers/net/ethernet/intel/iavf/iavf_ethtool.c 	new_tx_count = clamp_t(u32, ring->tx_pending,
ring              610 drivers/net/ethernet/intel/iavf/iavf_ethtool.c 	new_rx_count = clamp_t(u32, ring->rx_pending,
ring              287 drivers/net/ethernet/intel/iavf/iavf_main.c 	if (!q_vector->tx.ring && !q_vector->rx.ring)
ring              309 drivers/net/ethernet/intel/iavf/iavf_main.c 	rx_ring->next = q_vector->rx.ring;
ring              311 drivers/net/ethernet/intel/iavf/iavf_main.c 	q_vector->rx.ring = rx_ring;
ring              335 drivers/net/ethernet/intel/iavf/iavf_main.c 	tx_ring->next = q_vector->tx.ring;
ring              337 drivers/net/ethernet/intel/iavf/iavf_main.c 	q_vector->tx.ring = tx_ring;
ring              431 drivers/net/ethernet/intel/iavf/iavf_main.c 		if (q_vector->tx.ring && q_vector->rx.ring) {
ring              435 drivers/net/ethernet/intel/iavf/iavf_main.c 		} else if (q_vector->rx.ring) {
ring              438 drivers/net/ethernet/intel/iavf/iavf_main.c 		} else if (q_vector->tx.ring) {
ring              937 drivers/net/ethernet/intel/iavf/iavf_main.c 		struct iavf_ring *ring = &adapter->rx_rings[i];
ring              939 drivers/net/ethernet/intel/iavf/iavf_main.c 		iavf_alloc_rx_buffers(ring, IAVF_DESC_UNUSED(ring));
ring               62 drivers/net/ethernet/intel/iavf/iavf_trace.h 	TP_PROTO(struct iavf_ring *ring,
ring               66 drivers/net/ethernet/intel/iavf/iavf_trace.h 	TP_ARGS(ring, desc, buf),
ring               76 drivers/net/ethernet/intel/iavf/iavf_trace.h 		__field(void*, ring)
ring               79 drivers/net/ethernet/intel/iavf/iavf_trace.h 		__string(devname, ring->netdev->name)
ring               83 drivers/net/ethernet/intel/iavf/iavf_trace.h 		__entry->ring = ring;
ring               86 drivers/net/ethernet/intel/iavf/iavf_trace.h 		__assign_str(devname, ring->netdev->name);
ring               91 drivers/net/ethernet/intel/iavf/iavf_trace.h 		__get_str(devname), __entry->ring,
ring               97 drivers/net/ethernet/intel/iavf/iavf_trace.h 	TP_PROTO(struct iavf_ring *ring,
ring              101 drivers/net/ethernet/intel/iavf/iavf_trace.h 	TP_ARGS(ring, desc, buf));
ring              105 drivers/net/ethernet/intel/iavf/iavf_trace.h 	TP_PROTO(struct iavf_ring *ring,
ring              109 drivers/net/ethernet/intel/iavf/iavf_trace.h 	TP_ARGS(ring, desc, buf));
ring              114 drivers/net/ethernet/intel/iavf/iavf_trace.h 	TP_PROTO(struct iavf_ring *ring,
ring              118 drivers/net/ethernet/intel/iavf/iavf_trace.h 	TP_ARGS(ring, desc, skb),
ring              121 drivers/net/ethernet/intel/iavf/iavf_trace.h 		__field(void*, ring)
ring              124 drivers/net/ethernet/intel/iavf/iavf_trace.h 		__string(devname, ring->netdev->name)
ring              128 drivers/net/ethernet/intel/iavf/iavf_trace.h 		__entry->ring = ring;
ring              131 drivers/net/ethernet/intel/iavf/iavf_trace.h 		__assign_str(devname, ring->netdev->name);
ring              136 drivers/net/ethernet/intel/iavf/iavf_trace.h 		__get_str(devname), __entry->ring,
ring              142 drivers/net/ethernet/intel/iavf/iavf_trace.h 	TP_PROTO(struct iavf_ring *ring,
ring              146 drivers/net/ethernet/intel/iavf/iavf_trace.h 	TP_ARGS(ring, desc, skb));
ring              150 drivers/net/ethernet/intel/iavf/iavf_trace.h 	TP_PROTO(struct iavf_ring *ring,
ring              154 drivers/net/ethernet/intel/iavf/iavf_trace.h 	TP_ARGS(ring, desc, skb));
ring              160 drivers/net/ethernet/intel/iavf/iavf_trace.h 		 struct iavf_ring *ring),
ring              162 drivers/net/ethernet/intel/iavf/iavf_trace.h 	TP_ARGS(skb, ring),
ring              166 drivers/net/ethernet/intel/iavf/iavf_trace.h 		__field(void*, ring)
ring              167 drivers/net/ethernet/intel/iavf/iavf_trace.h 		__string(devname, ring->netdev->name)
ring              172 drivers/net/ethernet/intel/iavf/iavf_trace.h 		__entry->ring = ring;
ring              173 drivers/net/ethernet/intel/iavf/iavf_trace.h 		__assign_str(devname, ring->netdev->name);
ring              179 drivers/net/ethernet/intel/iavf/iavf_trace.h 		__entry->ring)
ring              185 drivers/net/ethernet/intel/iavf/iavf_trace.h 		 struct iavf_ring *ring),
ring              187 drivers/net/ethernet/intel/iavf/iavf_trace.h 	TP_ARGS(skb, ring));
ring              192 drivers/net/ethernet/intel/iavf/iavf_trace.h 		 struct iavf_ring *ring),
ring              194 drivers/net/ethernet/intel/iavf/iavf_trace.h 	TP_ARGS(skb, ring));
ring               27 drivers/net/ethernet/intel/iavf/iavf_txrx.c static void iavf_unmap_and_free_tx_resource(struct iavf_ring *ring,
ring               36 drivers/net/ethernet/intel/iavf/iavf_txrx.c 			dma_unmap_single(ring->dev,
ring               41 drivers/net/ethernet/intel/iavf/iavf_txrx.c 		dma_unmap_page(ring->dev,
ring              113 drivers/net/ethernet/intel/iavf/iavf_txrx.c u32 iavf_get_tx_pending(struct iavf_ring *ring, bool in_sw)
ring              117 drivers/net/ethernet/intel/iavf/iavf_txrx.c 	head = ring->next_to_clean;
ring              118 drivers/net/ethernet/intel/iavf/iavf_txrx.c 	tail = readl(ring->tail);
ring              122 drivers/net/ethernet/intel/iavf/iavf_txrx.c 			tail - head : (tail + ring->count - head);
ring              335 drivers/net/ethernet/intel/iavf/iavf_txrx.c 	u16 flags = q_vector->tx.ring[0].flags;
ring              424 drivers/net/ethernet/intel/iavf/iavf_txrx.c 	if (!rc->ring || !ITR_IS_DYNAMIC(rc->ring->itr_setting))
ring             1051 drivers/net/ethernet/intel/iavf/iavf_txrx.c static inline void iavf_rx_hash(struct iavf_ring *ring,
ring             1061 drivers/net/ethernet/intel/iavf/iavf_txrx.c 	if (ring->netdev->features & NETIF_F_RXHASH)
ring             1708 drivers/net/ethernet/intel/iavf/iavf_txrx.c 	struct iavf_ring *ring;
ring             1722 drivers/net/ethernet/intel/iavf/iavf_txrx.c 	iavf_for_each_ring(ring, q_vector->tx) {
ring             1723 drivers/net/ethernet/intel/iavf/iavf_txrx.c 		if (!iavf_clean_tx_irq(vsi, ring, budget)) {
ring             1727 drivers/net/ethernet/intel/iavf/iavf_txrx.c 		arm_wb |= ring->arm_wb;
ring             1728 drivers/net/ethernet/intel/iavf/iavf_txrx.c 		ring->arm_wb = false;
ring             1740 drivers/net/ethernet/intel/iavf/iavf_txrx.c 	iavf_for_each_ring(ring, q_vector->rx) {
ring             1741 drivers/net/ethernet/intel/iavf/iavf_txrx.c 		int cleaned = iavf_clean_rx_irq(ring, budget_per_ring);
ring             1772 drivers/net/ethernet/intel/iavf/iavf_txrx.c 			q_vector->tx.ring[0].tx_stats.tx_force_wb++;
ring              392 drivers/net/ethernet/intel/iavf/iavf_txrx.h static inline bool ring_uses_build_skb(struct iavf_ring *ring)
ring              394 drivers/net/ethernet/intel/iavf/iavf_txrx.h 	return !!(ring->flags & IAVF_RXR_FLAGS_BUILD_SKB_ENABLED);
ring              397 drivers/net/ethernet/intel/iavf/iavf_txrx.h static inline void set_ring_build_skb_enabled(struct iavf_ring *ring)
ring              399 drivers/net/ethernet/intel/iavf/iavf_txrx.h 	ring->flags |= IAVF_RXR_FLAGS_BUILD_SKB_ENABLED;
ring              402 drivers/net/ethernet/intel/iavf/iavf_txrx.h static inline void clear_ring_build_skb_enabled(struct iavf_ring *ring)
ring              404 drivers/net/ethernet/intel/iavf/iavf_txrx.h 	ring->flags &= ~IAVF_RXR_FLAGS_BUILD_SKB_ENABLED;
ring              415 drivers/net/ethernet/intel/iavf/iavf_txrx.h 	struct iavf_ring *ring;		/* pointer to linked list of ring(s) */
ring              426 drivers/net/ethernet/intel/iavf/iavf_txrx.h 	for (pos = (head).ring; pos != NULL; pos = pos->next)
ring              428 drivers/net/ethernet/intel/iavf/iavf_txrx.h static inline unsigned int iavf_rx_pg_order(struct iavf_ring *ring)
ring              431 drivers/net/ethernet/intel/iavf/iavf_txrx.h 	if (ring->rx_buf_len > (PAGE_SIZE / 2))
ring              449 drivers/net/ethernet/intel/iavf/iavf_txrx.h u32 iavf_get_tx_pending(struct iavf_ring *ring, bool in_sw);
ring              519 drivers/net/ethernet/intel/iavf/iavf_txrx.h static inline struct netdev_queue *txring_txq(const struct iavf_ring *ring)
ring              521 drivers/net/ethernet/intel/iavf/iavf_txrx.h 	return netdev_get_tx_queue(ring->netdev, ring->queue_index);
ring              128 drivers/net/ethernet/intel/ice/ice_controlq.c static void ice_free_cq_ring(struct ice_hw *hw, struct ice_ctl_q_ring *ring)
ring              130 drivers/net/ethernet/intel/ice/ice_controlq.c 	dmam_free_coherent(ice_hw_to_dev(hw), ring->desc_buf.size,
ring              131 drivers/net/ethernet/intel/ice/ice_controlq.c 			   ring->desc_buf.va, ring->desc_buf.pa);
ring              132 drivers/net/ethernet/intel/ice/ice_controlq.c 	ring->desc_buf.va = NULL;
ring              133 drivers/net/ethernet/intel/ice/ice_controlq.c 	ring->desc_buf.pa = 0;
ring              134 drivers/net/ethernet/intel/ice/ice_controlq.c 	ring->desc_buf.size = 0;
ring              254 drivers/net/ethernet/intel/ice/ice_controlq.c ice_cfg_cq_regs(struct ice_hw *hw, struct ice_ctl_q_ring *ring, u16 num_entries)
ring              257 drivers/net/ethernet/intel/ice/ice_controlq.c 	wr32(hw, ring->head, 0);
ring              258 drivers/net/ethernet/intel/ice/ice_controlq.c 	wr32(hw, ring->tail, 0);
ring              261 drivers/net/ethernet/intel/ice/ice_controlq.c 	wr32(hw, ring->len, (num_entries | ring->len_ena_mask));
ring              262 drivers/net/ethernet/intel/ice/ice_controlq.c 	wr32(hw, ring->bal, lower_32_bits(ring->desc_buf.pa));
ring              263 drivers/net/ethernet/intel/ice/ice_controlq.c 	wr32(hw, ring->bah, upper_32_bits(ring->desc_buf.pa));
ring              266 drivers/net/ethernet/intel/ice/ice_controlq.c 	if (rd32(hw, ring->bal) != lower_32_bits(ring->desc_buf.pa))
ring              425 drivers/net/ethernet/intel/ice/ice_controlq.c #define ICE_FREE_CQ_BUFS(hw, qi, ring)					\
ring              429 drivers/net/ethernet/intel/ice/ice_controlq.c 	for (i = 0; i < (qi)->num_##ring##_entries; i++)		\
ring              430 drivers/net/ethernet/intel/ice/ice_controlq.c 		if ((qi)->ring.r.ring##_bi[i].pa) {			\
ring              432 drivers/net/ethernet/intel/ice/ice_controlq.c 					   (qi)->ring.r.ring##_bi[i].size,\
ring              433 drivers/net/ethernet/intel/ice/ice_controlq.c 					   (qi)->ring.r.ring##_bi[i].va,\
ring              434 drivers/net/ethernet/intel/ice/ice_controlq.c 					   (qi)->ring.r.ring##_bi[i].pa);\
ring              435 drivers/net/ethernet/intel/ice/ice_controlq.c 			(qi)->ring.r.ring##_bi[i].va = NULL;		\
ring              436 drivers/net/ethernet/intel/ice/ice_controlq.c 			(qi)->ring.r.ring##_bi[i].pa = 0;		\
ring              437 drivers/net/ethernet/intel/ice/ice_controlq.c 			(qi)->ring.r.ring##_bi[i].size = 0;		\
ring              440 drivers/net/ethernet/intel/ice/ice_controlq.c 	if ((qi)->ring.cmd_buf)						\
ring              441 drivers/net/ethernet/intel/ice/ice_controlq.c 		devm_kfree(ice_hw_to_dev(hw), (qi)->ring.cmd_buf);	\
ring              443 drivers/net/ethernet/intel/ice/ice_controlq.c 	devm_kfree(ice_hw_to_dev(hw), (qi)->ring.dma_head);		\
ring               27 drivers/net/ethernet/intel/ice/ice_dcb_lib.h ice_set_cgd_num(struct ice_tlan_ctx *tlan_ctx, struct ice_ring *ring)
ring               29 drivers/net/ethernet/intel/ice/ice_dcb_lib.h 	tlan_ctx->cgd_num = ring->dcb_tc;
ring               61 drivers/net/ethernet/intel/ice/ice_dcb_lib.h #define ice_set_cgd_num(tlan_ctx, ring) do {} while (0)
ring             1302 drivers/net/ethernet/intel/ice/ice_ethtool.c 	struct ice_ring *ring;
ring             1320 drivers/net/ethernet/intel/ice/ice_ethtool.c 		ring = READ_ONCE(vsi->tx_rings[j]);
ring             1321 drivers/net/ethernet/intel/ice/ice_ethtool.c 		if (ring) {
ring             1322 drivers/net/ethernet/intel/ice/ice_ethtool.c 			data[i++] = ring->stats.pkts;
ring             1323 drivers/net/ethernet/intel/ice/ice_ethtool.c 			data[i++] = ring->stats.bytes;
ring             1331 drivers/net/ethernet/intel/ice/ice_ethtool.c 		ring = READ_ONCE(vsi->rx_rings[j]);
ring             1332 drivers/net/ethernet/intel/ice/ice_ethtool.c 		if (ring) {
ring             1333 drivers/net/ethernet/intel/ice/ice_ethtool.c 			data[i++] = ring->stats.pkts;
ring             1334 drivers/net/ethernet/intel/ice/ice_ethtool.c 			data[i++] = ring->stats.bytes;
ring             2558 drivers/net/ethernet/intel/ice/ice_ethtool.c ice_get_ringparam(struct net_device *netdev, struct ethtool_ringparam *ring)
ring             2563 drivers/net/ethernet/intel/ice/ice_ethtool.c 	ring->rx_max_pending = ICE_MAX_NUM_DESC;
ring             2564 drivers/net/ethernet/intel/ice/ice_ethtool.c 	ring->tx_max_pending = ICE_MAX_NUM_DESC;
ring             2565 drivers/net/ethernet/intel/ice/ice_ethtool.c 	ring->rx_pending = vsi->rx_rings[0]->count;
ring             2566 drivers/net/ethernet/intel/ice/ice_ethtool.c 	ring->tx_pending = vsi->tx_rings[0]->count;
ring             2569 drivers/net/ethernet/intel/ice/ice_ethtool.c 	ring->rx_mini_max_pending = 0;
ring             2570 drivers/net/ethernet/intel/ice/ice_ethtool.c 	ring->rx_jumbo_max_pending = 0;
ring             2571 drivers/net/ethernet/intel/ice/ice_ethtool.c 	ring->rx_mini_pending = 0;
ring             2572 drivers/net/ethernet/intel/ice/ice_ethtool.c 	ring->rx_jumbo_pending = 0;
ring             2576 drivers/net/ethernet/intel/ice/ice_ethtool.c ice_set_ringparam(struct net_device *netdev, struct ethtool_ringparam *ring)
ring             2585 drivers/net/ethernet/intel/ice/ice_ethtool.c 	if (ring->tx_pending > ICE_MAX_NUM_DESC ||
ring             2586 drivers/net/ethernet/intel/ice/ice_ethtool.c 	    ring->tx_pending < ICE_MIN_NUM_DESC ||
ring             2587 drivers/net/ethernet/intel/ice/ice_ethtool.c 	    ring->rx_pending > ICE_MAX_NUM_DESC ||
ring             2588 drivers/net/ethernet/intel/ice/ice_ethtool.c 	    ring->rx_pending < ICE_MIN_NUM_DESC) {
ring             2590 drivers/net/ethernet/intel/ice/ice_ethtool.c 			   ring->tx_pending, ring->rx_pending,
ring             2596 drivers/net/ethernet/intel/ice/ice_ethtool.c 	new_tx_cnt = ALIGN(ring->tx_pending, ICE_REQ_DESC_MULTIPLE);
ring             2597 drivers/net/ethernet/intel/ice/ice_ethtool.c 	if (new_tx_cnt != ring->tx_pending)
ring             2601 drivers/net/ethernet/intel/ice/ice_ethtool.c 	new_rx_cnt = ALIGN(ring->rx_pending, ICE_REQ_DESC_MULTIPLE);
ring             2602 drivers/net/ethernet/intel/ice/ice_ethtool.c 	if (new_rx_cnt != ring->rx_pending)
ring             3111 drivers/net/ethernet/intel/ice/ice_ethtool.c 	if (!rc->ring)
ring             3114 drivers/net/ethernet/intel/ice/ice_ethtool.c 	pf = rc->ring->vsi->back;
ring             3120 drivers/net/ethernet/intel/ice/ice_ethtool.c 		ec->rx_coalesce_usecs_high = rc->ring->q_vector->intrl;
ring             3229 drivers/net/ethernet/intel/ice/ice_ethtool.c 	if (!rc->ring)
ring             3243 drivers/net/ethernet/intel/ice/ice_ethtool.c 		if (ec->rx_coalesce_usecs_high != rc->ring->q_vector->intrl) {
ring             3244 drivers/net/ethernet/intel/ice/ice_ethtool.c 			rc->ring->q_vector->intrl = ec->rx_coalesce_usecs_high;
ring             3245 drivers/net/ethernet/intel/ice/ice_ethtool.c 			wr32(&pf->hw, GLINT_RATE(rc->ring->q_vector->reg_idx),
ring               14 drivers/net/ethernet/intel/ice/ice_lib.c static int ice_setup_rx_ctx(struct ice_ring *ring)
ring               16 drivers/net/ethernet/intel/ice/ice_lib.c 	struct ice_vsi *vsi = ring->vsi;
ring               25 drivers/net/ethernet/intel/ice/ice_lib.c 	pf_q = vsi->rxq_map[ring->q_index];
ring               30 drivers/net/ethernet/intel/ice/ice_lib.c 	rlan_ctx.base = ring->dma >> 7;
ring               32 drivers/net/ethernet/intel/ice/ice_lib.c 	rlan_ctx.qlen = ring->count;
ring              100 drivers/net/ethernet/intel/ice/ice_lib.c 	ring->tail = hw->hw_addr + QRX_TAIL(pf_q);
ring              101 drivers/net/ethernet/intel/ice/ice_lib.c 	writel(0, ring->tail);
ring              102 drivers/net/ethernet/intel/ice/ice_lib.c 	ice_alloc_rx_bufs(ring, ICE_DESC_UNUSED(ring));
ring              116 drivers/net/ethernet/intel/ice/ice_lib.c ice_setup_tx_ctx(struct ice_ring *ring, struct ice_tlan_ctx *tlan_ctx, u16 pf_q)
ring              118 drivers/net/ethernet/intel/ice/ice_lib.c 	struct ice_vsi *vsi = ring->vsi;
ring              121 drivers/net/ethernet/intel/ice/ice_lib.c 	tlan_ctx->base = ring->dma >> ICE_TLAN_CTX_BASE_S;
ring              126 drivers/net/ethernet/intel/ice/ice_lib.c 	tlan_ctx->qlen = ring->count;
ring              128 drivers/net/ethernet/intel/ice/ice_lib.c 	ice_set_cgd_num(tlan_ctx, ring);
ring              523 drivers/net/ethernet/intel/ice/ice_lib.c 	if (!q_vector->tx.ring && !q_vector->rx.ring)
ring             1109 drivers/net/ethernet/intel/ice/ice_lib.c 	struct ice_ring *ring;
ring             1118 drivers/net/ethernet/intel/ice/ice_lib.c 	ice_for_each_ring(ring, q_vector->tx)
ring             1119 drivers/net/ethernet/intel/ice/ice_lib.c 		ring->q_vector = NULL;
ring             1120 drivers/net/ethernet/intel/ice/ice_lib.c 	ice_for_each_ring(ring, q_vector->rx)
ring             1121 drivers/net/ethernet/intel/ice/ice_lib.c 		ring->q_vector = NULL;
ring             1300 drivers/net/ethernet/intel/ice/ice_lib.c 		struct ice_ring *ring;
ring             1303 drivers/net/ethernet/intel/ice/ice_lib.c 		ring = kzalloc(sizeof(*ring), GFP_KERNEL);
ring             1305 drivers/net/ethernet/intel/ice/ice_lib.c 		if (!ring)
ring             1308 drivers/net/ethernet/intel/ice/ice_lib.c 		ring->q_index = i;
ring             1309 drivers/net/ethernet/intel/ice/ice_lib.c 		ring->reg_idx = vsi->txq_map[i];
ring             1310 drivers/net/ethernet/intel/ice/ice_lib.c 		ring->ring_active = false;
ring             1311 drivers/net/ethernet/intel/ice/ice_lib.c 		ring->vsi = vsi;
ring             1312 drivers/net/ethernet/intel/ice/ice_lib.c 		ring->dev = &pf->pdev->dev;
ring             1313 drivers/net/ethernet/intel/ice/ice_lib.c 		ring->count = vsi->num_tx_desc;
ring             1314 drivers/net/ethernet/intel/ice/ice_lib.c 		vsi->tx_rings[i] = ring;
ring             1319 drivers/net/ethernet/intel/ice/ice_lib.c 		struct ice_ring *ring;
ring             1322 drivers/net/ethernet/intel/ice/ice_lib.c 		ring = kzalloc(sizeof(*ring), GFP_KERNEL);
ring             1323 drivers/net/ethernet/intel/ice/ice_lib.c 		if (!ring)
ring             1326 drivers/net/ethernet/intel/ice/ice_lib.c 		ring->q_index = i;
ring             1327 drivers/net/ethernet/intel/ice/ice_lib.c 		ring->reg_idx = vsi->rxq_map[i];
ring             1328 drivers/net/ethernet/intel/ice/ice_lib.c 		ring->ring_active = false;
ring             1329 drivers/net/ethernet/intel/ice/ice_lib.c 		ring->vsi = vsi;
ring             1330 drivers/net/ethernet/intel/ice/ice_lib.c 		ring->netdev = vsi->netdev;
ring             1331 drivers/net/ethernet/intel/ice/ice_lib.c 		ring->dev = &pf->pdev->dev;
ring             1332 drivers/net/ethernet/intel/ice/ice_lib.c 		ring->count = vsi->num_rx_desc;
ring             1333 drivers/net/ethernet/intel/ice/ice_lib.c 		vsi->rx_rings[i] = ring;
ring             1372 drivers/net/ethernet/intel/ice/ice_lib.c 		q_vector->tx.ring = NULL;
ring             1380 drivers/net/ethernet/intel/ice/ice_lib.c 			tx_ring->next = q_vector->tx.ring;
ring             1381 drivers/net/ethernet/intel/ice/ice_lib.c 			q_vector->tx.ring = tx_ring;
ring             1388 drivers/net/ethernet/intel/ice/ice_lib.c 		q_vector->rx.ring = NULL;
ring             1396 drivers/net/ethernet/intel/ice/ice_lib.c 			rx_ring->next = q_vector->rx.ring;
ring             1397 drivers/net/ethernet/intel/ice/ice_lib.c 			q_vector->rx.ring = rx_ring;
ring             1723 drivers/net/ethernet/intel/ice/ice_lib.c ice_vsi_cfg_txq(struct ice_vsi *vsi, struct ice_ring *ring, u16 tc_q_idx,
ring             1733 drivers/net/ethernet/intel/ice/ice_lib.c 	pf_q = ring->reg_idx;
ring             1734 drivers/net/ethernet/intel/ice/ice_lib.c 	ice_setup_tx_ctx(ring, &tlan_ctx, pf_q);
ring             1743 drivers/net/ethernet/intel/ice/ice_lib.c 	ring->tail = pf->hw.hw_addr + QTX_COMM_DBELL(pf_q);
ring             1748 drivers/net/ethernet/intel/ice/ice_lib.c 	ring->q_handle = tc_q_idx;
ring             1750 drivers/net/ethernet/intel/ice/ice_lib.c 	status = ice_ena_vsi_txq(vsi->port_info, vsi->idx, tc, ring->q_handle,
ring             1765 drivers/net/ethernet/intel/ice/ice_lib.c 		ring->txq_teid = le32_to_cpu(txq->q_teid);
ring             2162 drivers/net/ethernet/intel/ice/ice_lib.c 		     u16 rel_vmvf_num, struct ice_ring *ring,
ring             2172 drivers/net/ethernet/intel/ice/ice_lib.c 	val = rd32(hw, QINT_TQCTL(ring->reg_idx));
ring             2174 drivers/net/ethernet/intel/ice/ice_lib.c 	wr32(hw, QINT_TQCTL(ring->reg_idx), val);
ring             2182 drivers/net/ethernet/intel/ice/ice_lib.c 	q_vector = ring->q_vector;
ring             2224 drivers/net/ethernet/intel/ice/ice_lib.c ice_fill_txq_meta(struct ice_vsi *vsi, struct ice_ring *ring,
ring             2230 drivers/net/ethernet/intel/ice/ice_lib.c 	tc = ring->dcb_tc;
ring             2232 drivers/net/ethernet/intel/ice/ice_lib.c 	txq_meta->q_id = ring->reg_idx;
ring             2233 drivers/net/ethernet/intel/ice/ice_lib.c 	txq_meta->q_teid = ring->txq_teid;
ring             2234 drivers/net/ethernet/intel/ice/ice_lib.c 	txq_meta->q_handle = ring->q_handle;
ring               45 drivers/net/ethernet/intel/ice/ice_lib.h 		     u16 rel_vmvf_num, struct ice_ring *ring,
ring               48 drivers/net/ethernet/intel/ice/ice_lib.h void ice_fill_txq_meta(struct ice_vsi *vsi, struct ice_ring *ring,
ring               54 drivers/net/ethernet/intel/ice/ice_main.c static u16 ice_get_tx_pending(struct ice_ring *ring)
ring               58 drivers/net/ethernet/intel/ice/ice_main.c 	head = ring->next_to_clean;
ring               59 drivers/net/ethernet/intel/ice/ice_main.c 	tail = ring->next_to_use;
ring               63 drivers/net/ethernet/intel/ice/ice_main.c 			tail - head : (tail + ring->count - head);
ring             1617 drivers/net/ethernet/intel/ice/ice_main.c 		if (q_vector->tx.ring && q_vector->rx.ring) {
ring             1621 drivers/net/ethernet/intel/ice/ice_main.c 		} else if (q_vector->rx.ring) {
ring             1624 drivers/net/ethernet/intel/ice/ice_main.c 		} else if (q_vector->tx.ring) {
ring             3528 drivers/net/ethernet/intel/ice/ice_main.c 		if (q_vector->rx.ring || q_vector->tx.ring)
ring             3596 drivers/net/ethernet/intel/ice/ice_main.c ice_fetch_u64_stats_per_ring(struct ice_ring *ring, u64 *pkts, u64 *bytes)
ring             3602 drivers/net/ethernet/intel/ice/ice_main.c 	if (!ring)
ring             3605 drivers/net/ethernet/intel/ice/ice_main.c 		start = u64_stats_fetch_begin_irq(&ring->syncp);
ring             3606 drivers/net/ethernet/intel/ice/ice_main.c 		*pkts = ring->stats.pkts;
ring             3607 drivers/net/ethernet/intel/ice/ice_main.c 		*bytes = ring->stats.bytes;
ring             3608 drivers/net/ethernet/intel/ice/ice_main.c 	} while (u64_stats_fetch_retry_irq(&ring->syncp, start));
ring             3618 drivers/net/ethernet/intel/ice/ice_main.c 	struct ice_ring *ring;
ring             3639 drivers/net/ethernet/intel/ice/ice_main.c 		ring = READ_ONCE(vsi->tx_rings[i]);
ring             3640 drivers/net/ethernet/intel/ice/ice_main.c 		ice_fetch_u64_stats_per_ring(ring, &pkts, &bytes);
ring             3643 drivers/net/ethernet/intel/ice/ice_main.c 		vsi->tx_restart += ring->tx_stats.restart_q;
ring             3644 drivers/net/ethernet/intel/ice/ice_main.c 		vsi->tx_busy += ring->tx_stats.tx_busy;
ring             3645 drivers/net/ethernet/intel/ice/ice_main.c 		vsi->tx_linearize += ring->tx_stats.tx_linearize;
ring             3650 drivers/net/ethernet/intel/ice/ice_main.c 		ring = READ_ONCE(vsi->rx_rings[i]);
ring             3651 drivers/net/ethernet/intel/ice/ice_main.c 		ice_fetch_u64_stats_per_ring(ring, &pkts, &bytes);
ring             3654 drivers/net/ethernet/intel/ice/ice_main.c 		vsi->rx_buf_failed += ring->rx_stats.alloc_buf_failed;
ring             3655 drivers/net/ethernet/intel/ice/ice_main.c 		vsi->rx_page_failed += ring->rx_stats.alloc_page_failed;
ring             3895 drivers/net/ethernet/intel/ice/ice_main.c 		if (q_vector->rx.ring || q_vector->tx.ring)
ring             3973 drivers/net/ethernet/intel/ice/ice_main.c 		struct ice_ring *ring = vsi->tx_rings[i];
ring             3975 drivers/net/ethernet/intel/ice/ice_main.c 		if (!ring)
ring             3978 drivers/net/ethernet/intel/ice/ice_main.c 		ring->netdev = vsi->netdev;
ring             3979 drivers/net/ethernet/intel/ice/ice_main.c 		err = ice_setup_tx_ring(ring);
ring             4004 drivers/net/ethernet/intel/ice/ice_main.c 		struct ice_ring *ring = vsi->rx_rings[i];
ring             4006 drivers/net/ethernet/intel/ice/ice_main.c 		if (!ring)
ring             4009 drivers/net/ethernet/intel/ice/ice_main.c 		ring->netdev = vsi->netdev;
ring             4010 drivers/net/ethernet/intel/ice/ice_main.c 		err = ice_setup_rx_ring(ring);
ring               19 drivers/net/ethernet/intel/ice/ice_txrx.c ice_unmap_and_free_tx_buf(struct ice_ring *ring, struct ice_tx_buf *tx_buf)
ring               24 drivers/net/ethernet/intel/ice/ice_txrx.c 			dma_unmap_single(ring->dev,
ring               29 drivers/net/ethernet/intel/ice/ice_txrx.c 		dma_unmap_page(ring->dev,
ring               41 drivers/net/ethernet/intel/ice/ice_txrx.c static struct netdev_queue *txring_txq(const struct ice_ring *ring)
ring               43 drivers/net/ethernet/intel/ice/ice_txrx.c 	return netdev_get_tx_queue(ring->netdev, ring->q_index);
ring              891 drivers/net/ethernet/intel/ice/ice_txrx.c ice_rx_csum(struct ice_ring *ring, struct sk_buff *skb,
ring              908 drivers/net/ethernet/intel/ice/ice_txrx.c 	if (!(ring->netdev->features & NETIF_F_RXCSUM))
ring              948 drivers/net/ethernet/intel/ice/ice_txrx.c 	ring->vsi->back->hw_csum_rx_error++;
ring             1199 drivers/net/ethernet/intel/ice/ice_txrx.c 	if (!rc->ring || !ITR_IS_DYNAMIC(rc->itr_setting))
ring             1479 drivers/net/ethernet/intel/ice/ice_txrx.c 	struct ice_ring *ring;
ring             1486 drivers/net/ethernet/intel/ice/ice_txrx.c 	ice_for_each_ring(ring, q_vector->tx)
ring             1487 drivers/net/ethernet/intel/ice/ice_txrx.c 		if (!ice_clean_tx_irq(ring, budget))
ring             1505 drivers/net/ethernet/intel/ice/ice_txrx.c 	ice_for_each_ring(ring, q_vector->rx) {
ring             1508 drivers/net/ethernet/intel/ice/ice_txrx.c 		cleaned = ice_clean_rx_irq(ring, budget_per_ring);
ring              215 drivers/net/ethernet/intel/ice/ice_txrx.h 	struct ice_ring *ring;
ring              231 drivers/net/ethernet/intel/ice/ice_txrx.h 	for (pos = (head).ring; pos; pos = pos->next)
ring             2053 drivers/net/ethernet/intel/ice/ice_virtchnl_pf.c 			struct ice_ring *ring = vsi->tx_rings[vf_q_id];
ring             2065 drivers/net/ethernet/intel/ice/ice_virtchnl_pf.c 			ice_fill_txq_meta(vsi, ring, &txq_meta);
ring             2068 drivers/net/ethernet/intel/ice/ice_virtchnl_pf.c 						 ring, &txq_meta)) {
ring              240 drivers/net/ethernet/intel/igb/igb.h 	struct igb_ring *ring;		/* pointer to linked list of rings */
ring              309 drivers/net/ethernet/intel/igb/igb.h 	struct igb_ring ring[0] ____cacheline_internodealigned_in_smp;
ring              321 drivers/net/ethernet/intel/igb/igb.h #define ring_uses_large_buffer(ring) \
ring              322 drivers/net/ethernet/intel/igb/igb.h 	test_bit(IGB_RING_FLAG_RX_3K_BUFFER, &(ring)->flags)
ring              323 drivers/net/ethernet/intel/igb/igb.h #define set_ring_uses_large_buffer(ring) \
ring              324 drivers/net/ethernet/intel/igb/igb.h 	set_bit(IGB_RING_FLAG_RX_3K_BUFFER, &(ring)->flags)
ring              325 drivers/net/ethernet/intel/igb/igb.h #define clear_ring_uses_large_buffer(ring) \
ring              326 drivers/net/ethernet/intel/igb/igb.h 	clear_bit(IGB_RING_FLAG_RX_3K_BUFFER, &(ring)->flags)
ring              328 drivers/net/ethernet/intel/igb/igb.h #define ring_uses_build_skb(ring) \
ring              329 drivers/net/ethernet/intel/igb/igb.h 	test_bit(IGB_RING_FLAG_RX_BUILD_SKB_ENABLED, &(ring)->flags)
ring              330 drivers/net/ethernet/intel/igb/igb.h #define set_ring_build_skb_enabled(ring) \
ring              331 drivers/net/ethernet/intel/igb/igb.h 	set_bit(IGB_RING_FLAG_RX_BUILD_SKB_ENABLED, &(ring)->flags)
ring              332 drivers/net/ethernet/intel/igb/igb.h #define clear_ring_build_skb_enabled(ring) \
ring              333 drivers/net/ethernet/intel/igb/igb.h 	clear_bit(IGB_RING_FLAG_RX_BUILD_SKB_ENABLED, &(ring)->flags)
ring              335 drivers/net/ethernet/intel/igb/igb.h static inline unsigned int igb_rx_bufsz(struct igb_ring *ring)
ring              338 drivers/net/ethernet/intel/igb/igb.h 	if (ring_uses_large_buffer(ring))
ring              341 drivers/net/ethernet/intel/igb/igb.h 	if (ring_uses_build_skb(ring))
ring              347 drivers/net/ethernet/intel/igb/igb.h static inline unsigned int igb_rx_pg_order(struct igb_ring *ring)
ring              350 drivers/net/ethernet/intel/igb/igb.h 	if (ring_uses_large_buffer(ring))
ring              375 drivers/net/ethernet/intel/igb/igb.h static inline int igb_desc_unused(struct igb_ring *ring)
ring              377 drivers/net/ethernet/intel/igb/igb.h 	if (ring->next_to_clean > ring->next_to_use)
ring              378 drivers/net/ethernet/intel/igb/igb.h 		return ring->next_to_clean - ring->next_to_use - 1;
ring              380 drivers/net/ethernet/intel/igb/igb.h 	return ring->count + ring->next_to_clean - ring->next_to_use - 1;
ring              859 drivers/net/ethernet/intel/igb/igb_ethtool.c 			      struct ethtool_ringparam *ring)
ring              863 drivers/net/ethernet/intel/igb/igb_ethtool.c 	ring->rx_max_pending = IGB_MAX_RXD;
ring              864 drivers/net/ethernet/intel/igb/igb_ethtool.c 	ring->tx_max_pending = IGB_MAX_TXD;
ring              865 drivers/net/ethernet/intel/igb/igb_ethtool.c 	ring->rx_pending = adapter->rx_ring_count;
ring              866 drivers/net/ethernet/intel/igb/igb_ethtool.c 	ring->tx_pending = adapter->tx_ring_count;
ring              870 drivers/net/ethernet/intel/igb/igb_ethtool.c 			     struct ethtool_ringparam *ring)
ring              877 drivers/net/ethernet/intel/igb/igb_ethtool.c 	if ((ring->rx_mini_pending) || (ring->rx_jumbo_pending))
ring              880 drivers/net/ethernet/intel/igb/igb_ethtool.c 	new_rx_count = min_t(u32, ring->rx_pending, IGB_MAX_RXD);
ring              884 drivers/net/ethernet/intel/igb/igb_ethtool.c 	new_tx_count = min_t(u32, ring->tx_pending, IGB_MAX_TXD);
ring             2237 drivers/net/ethernet/intel/igb/igb_ethtool.c 		if (q_vector->rx.ring)
ring             2297 drivers/net/ethernet/intel/igb/igb_ethtool.c 	struct igb_ring *ring;
ring             2317 drivers/net/ethernet/intel/igb/igb_ethtool.c 		ring = adapter->tx_ring[j];
ring             2319 drivers/net/ethernet/intel/igb/igb_ethtool.c 			start = u64_stats_fetch_begin_irq(&ring->tx_syncp);
ring             2320 drivers/net/ethernet/intel/igb/igb_ethtool.c 			data[i]   = ring->tx_stats.packets;
ring             2321 drivers/net/ethernet/intel/igb/igb_ethtool.c 			data[i+1] = ring->tx_stats.bytes;
ring             2322 drivers/net/ethernet/intel/igb/igb_ethtool.c 			data[i+2] = ring->tx_stats.restart_queue;
ring             2323 drivers/net/ethernet/intel/igb/igb_ethtool.c 		} while (u64_stats_fetch_retry_irq(&ring->tx_syncp, start));
ring             2325 drivers/net/ethernet/intel/igb/igb_ethtool.c 			start = u64_stats_fetch_begin_irq(&ring->tx_syncp2);
ring             2326 drivers/net/ethernet/intel/igb/igb_ethtool.c 			restart2  = ring->tx_stats.restart_queue2;
ring             2327 drivers/net/ethernet/intel/igb/igb_ethtool.c 		} while (u64_stats_fetch_retry_irq(&ring->tx_syncp2, start));
ring             2333 drivers/net/ethernet/intel/igb/igb_ethtool.c 		ring = adapter->rx_ring[j];
ring             2335 drivers/net/ethernet/intel/igb/igb_ethtool.c 			start = u64_stats_fetch_begin_irq(&ring->rx_syncp);
ring             2336 drivers/net/ethernet/intel/igb/igb_ethtool.c 			data[i]   = ring->rx_stats.packets;
ring             2337 drivers/net/ethernet/intel/igb/igb_ethtool.c 			data[i+1] = ring->rx_stats.bytes;
ring             2338 drivers/net/ethernet/intel/igb/igb_ethtool.c 			data[i+2] = ring->rx_stats.drops;
ring             2339 drivers/net/ethernet/intel/igb/igb_ethtool.c 			data[i+3] = ring->rx_stats.csum_err;
ring             2340 drivers/net/ethernet/intel/igb/igb_ethtool.c 			data[i+4] = ring->rx_stats.alloc_failed;
ring             2341 drivers/net/ethernet/intel/igb/igb_ethtool.c 		} while (u64_stats_fetch_retry_irq(&ring->rx_syncp, start));
ring              798 drivers/net/ethernet/intel/igb/igb_main.c 	if (q_vector->rx.ring)
ring              799 drivers/net/ethernet/intel/igb/igb_main.c 		rx_queue = q_vector->rx.ring->reg_idx;
ring              800 drivers/net/ethernet/intel/igb/igb_main.c 	if (q_vector->tx.ring)
ring              801 drivers/net/ethernet/intel/igb/igb_main.c 		tx_queue = q_vector->tx.ring->reg_idx;
ring              958 drivers/net/ethernet/intel/igb/igb_main.c 		if (q_vector->rx.ring && q_vector->tx.ring)
ring              960 drivers/net/ethernet/intel/igb/igb_main.c 				q_vector->rx.ring->queue_index);
ring              961 drivers/net/ethernet/intel/igb/igb_main.c 		else if (q_vector->tx.ring)
ring              963 drivers/net/ethernet/intel/igb/igb_main.c 				q_vector->tx.ring->queue_index);
ring              964 drivers/net/ethernet/intel/igb/igb_main.c 		else if (q_vector->rx.ring)
ring              966 drivers/net/ethernet/intel/igb/igb_main.c 				q_vector->rx.ring->queue_index);
ring             1031 drivers/net/ethernet/intel/igb/igb_main.c 	if (q_vector->tx.ring)
ring             1032 drivers/net/ethernet/intel/igb/igb_main.c 		adapter->tx_ring[q_vector->tx.ring->queue_index] = NULL;
ring             1034 drivers/net/ethernet/intel/igb/igb_main.c 	if (q_vector->rx.ring)
ring             1035 drivers/net/ethernet/intel/igb/igb_main.c 		adapter->rx_ring[q_vector->rx.ring->queue_index] = NULL;
ring             1168 drivers/net/ethernet/intel/igb/igb_main.c static void igb_add_ring(struct igb_ring *ring,
ring             1171 drivers/net/ethernet/intel/igb/igb_main.c 	head->ring = ring;
ring             1193 drivers/net/ethernet/intel/igb/igb_main.c 	struct igb_ring *ring;
ring             1202 drivers/net/ethernet/intel/igb/igb_main.c 	size = struct_size(q_vector, ring, ring_count);
ring             1233 drivers/net/ethernet/intel/igb/igb_main.c 	ring = q_vector->ring;
ring             1248 drivers/net/ethernet/intel/igb/igb_main.c 		ring->dev = &adapter->pdev->dev;
ring             1249 drivers/net/ethernet/intel/igb/igb_main.c 		ring->netdev = adapter->netdev;
ring             1252 drivers/net/ethernet/intel/igb/igb_main.c 		ring->q_vector = q_vector;
ring             1255 drivers/net/ethernet/intel/igb/igb_main.c 		igb_add_ring(ring, &q_vector->tx);
ring             1259 drivers/net/ethernet/intel/igb/igb_main.c 			set_bit(IGB_RING_FLAG_TX_CTX_IDX, &ring->flags);
ring             1262 drivers/net/ethernet/intel/igb/igb_main.c 		ring->count = adapter->tx_ring_count;
ring             1263 drivers/net/ethernet/intel/igb/igb_main.c 		ring->queue_index = txr_idx;
ring             1265 drivers/net/ethernet/intel/igb/igb_main.c 		ring->cbs_enable = false;
ring             1266 drivers/net/ethernet/intel/igb/igb_main.c 		ring->idleslope = 0;
ring             1267 drivers/net/ethernet/intel/igb/igb_main.c 		ring->sendslope = 0;
ring             1268 drivers/net/ethernet/intel/igb/igb_main.c 		ring->hicredit = 0;
ring             1269 drivers/net/ethernet/intel/igb/igb_main.c 		ring->locredit = 0;
ring             1271 drivers/net/ethernet/intel/igb/igb_main.c 		u64_stats_init(&ring->tx_syncp);
ring             1272 drivers/net/ethernet/intel/igb/igb_main.c 		u64_stats_init(&ring->tx_syncp2);
ring             1275 drivers/net/ethernet/intel/igb/igb_main.c 		adapter->tx_ring[txr_idx] = ring;
ring             1278 drivers/net/ethernet/intel/igb/igb_main.c 		ring++;
ring             1283 drivers/net/ethernet/intel/igb/igb_main.c 		ring->dev = &adapter->pdev->dev;
ring             1284 drivers/net/ethernet/intel/igb/igb_main.c 		ring->netdev = adapter->netdev;
ring             1287 drivers/net/ethernet/intel/igb/igb_main.c 		ring->q_vector = q_vector;
ring             1290 drivers/net/ethernet/intel/igb/igb_main.c 		igb_add_ring(ring, &q_vector->rx);
ring             1294 drivers/net/ethernet/intel/igb/igb_main.c 			set_bit(IGB_RING_FLAG_RX_SCTP_CSUM, &ring->flags);
ring             1300 drivers/net/ethernet/intel/igb/igb_main.c 			set_bit(IGB_RING_FLAG_RX_LB_VLAN_BSWAP, &ring->flags);
ring             1303 drivers/net/ethernet/intel/igb/igb_main.c 		ring->count = adapter->rx_ring_count;
ring             1304 drivers/net/ethernet/intel/igb/igb_main.c 		ring->queue_index = rxr_idx;
ring             1306 drivers/net/ethernet/intel/igb/igb_main.c 		u64_stats_init(&ring->rx_syncp);
ring             1309 drivers/net/ethernet/intel/igb/igb_main.c 		adapter->rx_ring[rxr_idx] = ring;
ring             1690 drivers/net/ethernet/intel/igb/igb_main.c 	struct igb_ring *ring = adapter->tx_ring[queue];
ring             1703 drivers/net/ethernet/intel/igb/igb_main.c 	if (ring->cbs_enable || ring->launchtime_enable) {
ring             1712 drivers/net/ethernet/intel/igb/igb_main.c 	if (ring->cbs_enable || queue == 0) {
ring             1722 drivers/net/ethernet/intel/igb/igb_main.c 		if (queue == 0 && !ring->cbs_enable) {
ring             1724 drivers/net/ethernet/intel/igb/igb_main.c 			ring->idleslope = 1000000;
ring             1725 drivers/net/ethernet/intel/igb/igb_main.c 			ring->hicredit = ETH_FRAME_LEN;
ring             1793 drivers/net/ethernet/intel/igb/igb_main.c 		value = DIV_ROUND_UP_ULL(ring->idleslope * 61034ULL, 1000000);
ring             1801 drivers/net/ethernet/intel/igb/igb_main.c 		     0x80000000 + ring->hicredit * 0x7735);
ring             1824 drivers/net/ethernet/intel/igb/igb_main.c 	if (ring->launchtime_enable) {
ring             1856 drivers/net/ethernet/intel/igb/igb_main.c 		   ring->cbs_enable ? "enabled" : "disabled",
ring             1857 drivers/net/ethernet/intel/igb/igb_main.c 		   ring->launchtime_enable ? "enabled" : "disabled",
ring             1859 drivers/net/ethernet/intel/igb/igb_main.c 		   ring->idleslope, ring->sendslope,
ring             1860 drivers/net/ethernet/intel/igb/igb_main.c 		   ring->hicredit, ring->locredit);
ring             1866 drivers/net/ethernet/intel/igb/igb_main.c 	struct igb_ring *ring;
ring             1871 drivers/net/ethernet/intel/igb/igb_main.c 	ring = adapter->tx_ring[queue];
ring             1872 drivers/net/ethernet/intel/igb/igb_main.c 	ring->launchtime_enable = enable;
ring             1881 drivers/net/ethernet/intel/igb/igb_main.c 	struct igb_ring *ring;
ring             1886 drivers/net/ethernet/intel/igb/igb_main.c 	ring = adapter->tx_ring[queue];
ring             1888 drivers/net/ethernet/intel/igb/igb_main.c 	ring->cbs_enable = enable;
ring             1889 drivers/net/ethernet/intel/igb/igb_main.c 	ring->idleslope = idleslope;
ring             1890 drivers/net/ethernet/intel/igb/igb_main.c 	ring->sendslope = sendslope;
ring             1891 drivers/net/ethernet/intel/igb/igb_main.c 	ring->hicredit = hicredit;
ring             1892 drivers/net/ethernet/intel/igb/igb_main.c 	ring->locredit = locredit;
ring             2015 drivers/net/ethernet/intel/igb/igb_main.c 		struct igb_ring *ring = adapter->rx_ring[i];
ring             2016 drivers/net/ethernet/intel/igb/igb_main.c 		igb_alloc_rx_buffers(ring, igb_desc_unused(ring));
ring             4130 drivers/net/ethernet/intel/igb/igb_main.c 			   struct igb_ring *ring)
ring             4134 drivers/net/ethernet/intel/igb/igb_main.c 	u64 tdba = ring->dma;
ring             4135 drivers/net/ethernet/intel/igb/igb_main.c 	int reg_idx = ring->reg_idx;
ring             4138 drivers/net/ethernet/intel/igb/igb_main.c 	     ring->count * sizeof(union e1000_adv_tx_desc));
ring             4143 drivers/net/ethernet/intel/igb/igb_main.c 	ring->tail = adapter->io_addr + E1000_TDT(reg_idx);
ring             4145 drivers/net/ethernet/intel/igb/igb_main.c 	writel(0, ring->tail);
ring             4152 drivers/net/ethernet/intel/igb/igb_main.c 	memset(ring->tx_buffer_info, 0,
ring             4153 drivers/net/ethernet/intel/igb/igb_main.c 	       sizeof(struct igb_tx_buffer) * ring->count);
ring             4476 drivers/net/ethernet/intel/igb/igb_main.c 			   struct igb_ring *ring)
ring             4480 drivers/net/ethernet/intel/igb/igb_main.c 	u64 rdba = ring->dma;
ring             4481 drivers/net/ethernet/intel/igb/igb_main.c 	int reg_idx = ring->reg_idx;
ring             4492 drivers/net/ethernet/intel/igb/igb_main.c 	     ring->count * sizeof(union e1000_adv_rx_desc));
ring             4495 drivers/net/ethernet/intel/igb/igb_main.c 	ring->tail = adapter->io_addr + E1000_RDT(reg_idx);
ring             4497 drivers/net/ethernet/intel/igb/igb_main.c 	writel(0, ring->tail);
ring             4501 drivers/net/ethernet/intel/igb/igb_main.c 	if (ring_uses_large_buffer(ring))
ring             4522 drivers/net/ethernet/intel/igb/igb_main.c 	memset(ring->rx_buffer_info, 0,
ring             4523 drivers/net/ethernet/intel/igb/igb_main.c 	       sizeof(struct igb_rx_buffer) * ring->count);
ring             4526 drivers/net/ethernet/intel/igb/igb_main.c 	rx_desc = IGB_RX_DESC(ring, 0);
ring             5505 drivers/net/ethernet/intel/igb/igb_main.c 	    ((q_vector->rx.ring && adapter->rx_itr_setting == 3) ||
ring             5506 drivers/net/ethernet/intel/igb/igb_main.c 	     (!q_vector->rx.ring && adapter->tx_itr_setting == 3)))
ring             5609 drivers/net/ethernet/intel/igb/igb_main.c 	    ((q_vector->rx.ring && adapter->rx_itr_setting == 3) ||
ring             5610 drivers/net/ethernet/intel/igb/igb_main.c 	     (!q_vector->rx.ring && adapter->tx_itr_setting == 3)))
ring             6286 drivers/net/ethernet/intel/igb/igb_main.c 		struct igb_ring *ring = adapter->rx_ring[i];
ring             6292 drivers/net/ethernet/intel/igb/igb_main.c 			ring->rx_stats.drops += rqdpc;
ring             6297 drivers/net/ethernet/intel/igb/igb_main.c 			start = u64_stats_fetch_begin_irq(&ring->rx_syncp);
ring             6298 drivers/net/ethernet/intel/igb/igb_main.c 			_bytes = ring->rx_stats.bytes;
ring             6299 drivers/net/ethernet/intel/igb/igb_main.c 			_packets = ring->rx_stats.packets;
ring             6300 drivers/net/ethernet/intel/igb/igb_main.c 		} while (u64_stats_fetch_retry_irq(&ring->rx_syncp, start));
ring             6311 drivers/net/ethernet/intel/igb/igb_main.c 		struct igb_ring *ring = adapter->tx_ring[i];
ring             6313 drivers/net/ethernet/intel/igb/igb_main.c 			start = u64_stats_fetch_begin_irq(&ring->tx_syncp);
ring             6314 drivers/net/ethernet/intel/igb/igb_main.c 			_bytes = ring->tx_stats.bytes;
ring             6315 drivers/net/ethernet/intel/igb/igb_main.c 			_packets = ring->tx_stats.packets;
ring             6316 drivers/net/ethernet/intel/igb/igb_main.c 		} while (u64_stats_fetch_retry_irq(&ring->tx_syncp, start));
ring             6642 drivers/net/ethernet/intel/igb/igb_main.c 	if (q_vector->tx.ring)
ring             6643 drivers/net/ethernet/intel/igb/igb_main.c 		igb_update_tx_dca(adapter, q_vector->tx.ring, cpu);
ring             6645 drivers/net/ethernet/intel/igb/igb_main.c 	if (q_vector->rx.ring)
ring             6646 drivers/net/ethernet/intel/igb/igb_main.c 		igb_update_rx_dca(adapter, q_vector->rx.ring, cpu);
ring             7681 drivers/net/ethernet/intel/igb/igb_main.c 	if ((q_vector->rx.ring && (adapter->rx_itr_setting & 3)) ||
ring             7682 drivers/net/ethernet/intel/igb/igb_main.c 	    (!q_vector->rx.ring && (adapter->tx_itr_setting & 3))) {
ring             7714 drivers/net/ethernet/intel/igb/igb_main.c 	if (q_vector->tx.ring)
ring             7717 drivers/net/ethernet/intel/igb/igb_main.c 	if (q_vector->rx.ring) {
ring             7748 drivers/net/ethernet/intel/igb/igb_main.c 	struct igb_ring *tx_ring = q_vector->tx.ring;
ring             8108 drivers/net/ethernet/intel/igb/igb_main.c static inline void igb_rx_checksum(struct igb_ring *ring,
ring             8119 drivers/net/ethernet/intel/igb/igb_main.c 	if (!(ring->netdev->features & NETIF_F_RXCSUM))
ring             8131 drivers/net/ethernet/intel/igb/igb_main.c 		      test_bit(IGB_RING_FLAG_RX_SCTP_CSUM, &ring->flags))) {
ring             8132 drivers/net/ethernet/intel/igb/igb_main.c 			u64_stats_update_begin(&ring->rx_syncp);
ring             8133 drivers/net/ethernet/intel/igb/igb_main.c 			ring->rx_stats.csum_err++;
ring             8134 drivers/net/ethernet/intel/igb/igb_main.c 			u64_stats_update_end(&ring->rx_syncp);
ring             8144 drivers/net/ethernet/intel/igb/igb_main.c 	dev_dbg(ring->dev, "cksum success: bits %08X\n",
ring             8148 drivers/net/ethernet/intel/igb/igb_main.c static inline void igb_rx_hash(struct igb_ring *ring,
ring             8152 drivers/net/ethernet/intel/igb/igb_main.c 	if (ring->netdev->features & NETIF_F_RXHASH)
ring             8305 drivers/net/ethernet/intel/igb/igb_main.c 	struct igb_ring *rx_ring = q_vector->rx.ring;
ring              180 drivers/net/ethernet/intel/igbvf/ethtool.c 				struct ethtool_ringparam *ring)
ring              186 drivers/net/ethernet/intel/igbvf/ethtool.c 	ring->rx_max_pending = IGBVF_MAX_RXD;
ring              187 drivers/net/ethernet/intel/igbvf/ethtool.c 	ring->tx_max_pending = IGBVF_MAX_TXD;
ring              188 drivers/net/ethernet/intel/igbvf/ethtool.c 	ring->rx_pending = rx_ring->count;
ring              189 drivers/net/ethernet/intel/igbvf/ethtool.c 	ring->tx_pending = tx_ring->count;
ring              193 drivers/net/ethernet/intel/igbvf/ethtool.c 			       struct ethtool_ringparam *ring)
ring              200 drivers/net/ethernet/intel/igbvf/ethtool.c 	if ((ring->rx_mini_pending) || (ring->rx_jumbo_pending))
ring              203 drivers/net/ethernet/intel/igbvf/ethtool.c 	new_rx_count = max_t(u32, ring->rx_pending, IGBVF_MIN_RXD);
ring              207 drivers/net/ethernet/intel/igbvf/ethtool.c 	new_tx_count = max_t(u32, ring->tx_pending, IGBVF_MIN_TXD);
ring               68 drivers/net/ethernet/intel/igbvf/netdev.c static int igbvf_desc_unused(struct igbvf_ring *ring)
ring               70 drivers/net/ethernet/intel/igbvf/netdev.c 	if (ring->next_to_clean > ring->next_to_use)
ring               71 drivers/net/ethernet/intel/igbvf/netdev.c 		return ring->next_to_clean - ring->next_to_use - 1;
ring               73 drivers/net/ethernet/intel/igbvf/netdev.c 	return ring->count + ring->next_to_clean - ring->next_to_use - 1;
ring               24 drivers/net/ethernet/intel/igc/igc.h int igc_setup_tx_resources(struct igc_ring *ring);
ring               25 drivers/net/ethernet/intel/igc/igc.h int igc_setup_rx_resources(struct igc_ring *ring);
ring               26 drivers/net/ethernet/intel/igc/igc.h void igc_free_tx_resources(struct igc_ring *ring);
ring               27 drivers/net/ethernet/intel/igc/igc.h void igc_free_rx_resources(struct igc_ring *ring);
ring              235 drivers/net/ethernet/intel/igc/igc.h 	struct igc_ring *ring;          /* pointer to linked list of rings */
ring              301 drivers/net/ethernet/intel/igc/igc.h 	struct igc_ring ring[0] ____cacheline_internodealigned_in_smp;
ring              437 drivers/net/ethernet/intel/igc/igc.h static inline u16 igc_desc_unused(const struct igc_ring *ring)
ring              439 drivers/net/ethernet/intel/igc/igc.h 	u16 ntc = ring->next_to_clean;
ring              440 drivers/net/ethernet/intel/igc/igc.h 	u16 ntu = ring->next_to_use;
ring              442 drivers/net/ethernet/intel/igc/igc.h 	return ((ntc > ntu) ? 0 : ring->count) + ntc - ntu - 1;
ring              475 drivers/net/ethernet/intel/igc/igc.h #define ring_uses_large_buffer(ring) \
ring              476 drivers/net/ethernet/intel/igc/igc.h 	test_bit(IGC_RING_FLAG_RX_3K_BUFFER, &(ring)->flags)
ring              478 drivers/net/ethernet/intel/igc/igc.h #define ring_uses_build_skb(ring) \
ring              479 drivers/net/ethernet/intel/igc/igc.h 	test_bit(IGC_RING_FLAG_RX_BUILD_SKB_ENABLED, &(ring)->flags)
ring              481 drivers/net/ethernet/intel/igc/igc.h static inline unsigned int igc_rx_bufsz(struct igc_ring *ring)
ring              484 drivers/net/ethernet/intel/igc/igc.h 	if (ring_uses_large_buffer(ring))
ring              487 drivers/net/ethernet/intel/igc/igc.h 	if (ring_uses_build_skb(ring))
ring              493 drivers/net/ethernet/intel/igc/igc.h static inline unsigned int igc_rx_pg_order(struct igc_ring *ring)
ring              496 drivers/net/ethernet/intel/igc/igc.h 	if (ring_uses_large_buffer(ring))
ring              474 drivers/net/ethernet/intel/igc/igc_ethtool.c 			      struct ethtool_ringparam *ring)
ring              478 drivers/net/ethernet/intel/igc/igc_ethtool.c 	ring->rx_max_pending = IGC_MAX_RXD;
ring              479 drivers/net/ethernet/intel/igc/igc_ethtool.c 	ring->tx_max_pending = IGC_MAX_TXD;
ring              480 drivers/net/ethernet/intel/igc/igc_ethtool.c 	ring->rx_pending = adapter->rx_ring_count;
ring              481 drivers/net/ethernet/intel/igc/igc_ethtool.c 	ring->tx_pending = adapter->tx_ring_count;
ring              485 drivers/net/ethernet/intel/igc/igc_ethtool.c 			     struct ethtool_ringparam *ring)
ring              492 drivers/net/ethernet/intel/igc/igc_ethtool.c 	if (ring->rx_mini_pending || ring->rx_jumbo_pending)
ring              495 drivers/net/ethernet/intel/igc/igc_ethtool.c 	new_rx_count = min_t(u32, ring->rx_pending, IGC_MAX_RXD);
ring              499 drivers/net/ethernet/intel/igc/igc_ethtool.c 	new_tx_count = min_t(u32, ring->tx_pending, IGC_MAX_TXD);
ring              729 drivers/net/ethernet/intel/igc/igc_ethtool.c 	struct igc_ring *ring;
ring              749 drivers/net/ethernet/intel/igc/igc_ethtool.c 		ring = adapter->tx_ring[j];
ring              751 drivers/net/ethernet/intel/igc/igc_ethtool.c 			start = u64_stats_fetch_begin_irq(&ring->tx_syncp);
ring              752 drivers/net/ethernet/intel/igc/igc_ethtool.c 			data[i]   = ring->tx_stats.packets;
ring              753 drivers/net/ethernet/intel/igc/igc_ethtool.c 			data[i + 1] = ring->tx_stats.bytes;
ring              754 drivers/net/ethernet/intel/igc/igc_ethtool.c 			data[i + 2] = ring->tx_stats.restart_queue;
ring              755 drivers/net/ethernet/intel/igc/igc_ethtool.c 		} while (u64_stats_fetch_retry_irq(&ring->tx_syncp, start));
ring              757 drivers/net/ethernet/intel/igc/igc_ethtool.c 			start = u64_stats_fetch_begin_irq(&ring->tx_syncp2);
ring              758 drivers/net/ethernet/intel/igc/igc_ethtool.c 			restart2  = ring->tx_stats.restart_queue2;
ring              759 drivers/net/ethernet/intel/igc/igc_ethtool.c 		} while (u64_stats_fetch_retry_irq(&ring->tx_syncp2, start));
ring              765 drivers/net/ethernet/intel/igc/igc_ethtool.c 		ring = adapter->rx_ring[j];
ring              767 drivers/net/ethernet/intel/igc/igc_ethtool.c 			start = u64_stats_fetch_begin_irq(&ring->rx_syncp);
ring              768 drivers/net/ethernet/intel/igc/igc_ethtool.c 			data[i]   = ring->rx_stats.packets;
ring              769 drivers/net/ethernet/intel/igc/igc_ethtool.c 			data[i + 1] = ring->rx_stats.bytes;
ring              770 drivers/net/ethernet/intel/igc/igc_ethtool.c 			data[i + 2] = ring->rx_stats.drops;
ring              771 drivers/net/ethernet/intel/igc/igc_ethtool.c 			data[i + 3] = ring->rx_stats.csum_err;
ring              772 drivers/net/ethernet/intel/igc/igc_ethtool.c 			data[i + 4] = ring->rx_stats.alloc_failed;
ring              773 drivers/net/ethernet/intel/igc/igc_ethtool.c 		} while (u64_stats_fetch_retry_irq(&ring->rx_syncp, start));
ring              865 drivers/net/ethernet/intel/igc/igc_ethtool.c 		if (q_vector->rx.ring)
ring              521 drivers/net/ethernet/intel/igc/igc_main.c 				  struct igc_ring *ring)
ring              525 drivers/net/ethernet/intel/igc/igc_main.c 	int reg_idx = ring->reg_idx;
ring              527 drivers/net/ethernet/intel/igc/igc_main.c 	u64 rdba = ring->dma;
ring              537 drivers/net/ethernet/intel/igc/igc_main.c 	     ring->count * sizeof(union igc_adv_rx_desc));
ring              540 drivers/net/ethernet/intel/igc/igc_main.c 	ring->tail = adapter->io_addr + IGC_RDT(reg_idx);
ring              542 drivers/net/ethernet/intel/igc/igc_main.c 	writel(0, ring->tail);
ring              545 drivers/net/ethernet/intel/igc/igc_main.c 	ring->next_to_clean = 0;
ring              546 drivers/net/ethernet/intel/igc/igc_main.c 	ring->next_to_use = 0;
ring              550 drivers/net/ethernet/intel/igc/igc_main.c 	if (ring_uses_large_buffer(ring))
ring              563 drivers/net/ethernet/intel/igc/igc_main.c 	memset(ring->rx_buffer_info, 0,
ring              564 drivers/net/ethernet/intel/igc/igc_main.c 	       sizeof(struct igc_rx_buffer) * ring->count);
ring              567 drivers/net/ethernet/intel/igc/igc_main.c 	rx_desc = IGC_RX_DESC(ring, 0);
ring              601 drivers/net/ethernet/intel/igc/igc_main.c 				  struct igc_ring *ring)
ring              604 drivers/net/ethernet/intel/igc/igc_main.c 	int reg_idx = ring->reg_idx;
ring              605 drivers/net/ethernet/intel/igc/igc_main.c 	u64 tdba = ring->dma;
ring              614 drivers/net/ethernet/intel/igc/igc_main.c 	     ring->count * sizeof(union igc_adv_tx_desc));
ring              619 drivers/net/ethernet/intel/igc/igc_main.c 	ring->tail = adapter->io_addr + IGC_TDT(reg_idx);
ring              621 drivers/net/ethernet/intel/igc/igc_main.c 	writel(0, ring->tail);
ring             1166 drivers/net/ethernet/intel/igc/igc_main.c static inline void igc_rx_hash(struct igc_ring *ring,
ring             1170 drivers/net/ethernet/intel/igc/igc_main.c 	if (ring->netdev->features & NETIF_F_RXHASH)
ring             1560 drivers/net/ethernet/intel/igc/igc_main.c 	struct igc_ring *rx_ring = q_vector->rx.ring;
ring             1706 drivers/net/ethernet/intel/igc/igc_main.c 	struct igc_ring *tx_ring = q_vector->tx.ring;
ring             1926 drivers/net/ethernet/intel/igc/igc_main.c 		struct igc_ring *ring = adapter->rx_ring[i];
ring             1933 drivers/net/ethernet/intel/igc/igc_main.c 			ring->rx_stats.drops += rqdpc;
ring             1938 drivers/net/ethernet/intel/igc/igc_main.c 			start = u64_stats_fetch_begin_irq(&ring->rx_syncp);
ring             1939 drivers/net/ethernet/intel/igc/igc_main.c 			_bytes = ring->rx_stats.bytes;
ring             1940 drivers/net/ethernet/intel/igc/igc_main.c 			_packets = ring->rx_stats.packets;
ring             1941 drivers/net/ethernet/intel/igc/igc_main.c 		} while (u64_stats_fetch_retry_irq(&ring->rx_syncp, start));
ring             1952 drivers/net/ethernet/intel/igc/igc_main.c 		struct igc_ring *ring = adapter->tx_ring[i];
ring             1955 drivers/net/ethernet/intel/igc/igc_main.c 			start = u64_stats_fetch_begin_irq(&ring->tx_syncp);
ring             1956 drivers/net/ethernet/intel/igc/igc_main.c 			_bytes = ring->tx_stats.bytes;
ring             1957 drivers/net/ethernet/intel/igc/igc_main.c 			_packets = ring->tx_stats.packets;
ring             1958 drivers/net/ethernet/intel/igc/igc_main.c 		} while (u64_stats_fetch_retry_irq(&ring->tx_syncp, start));
ring             2349 drivers/net/ethernet/intel/igc/igc_main.c 		struct igc_ring *ring = adapter->rx_ring[i];
ring             2351 drivers/net/ethernet/intel/igc/igc_main.c 		igc_alloc_rx_buffers(ring, igc_desc_unused(ring));
ring             2598 drivers/net/ethernet/intel/igc/igc_main.c 	if (q_vector->rx.ring)
ring             2599 drivers/net/ethernet/intel/igc/igc_main.c 		rx_queue = q_vector->rx.ring->reg_idx;
ring             2600 drivers/net/ethernet/intel/igc/igc_main.c 	if (q_vector->tx.ring)
ring             2601 drivers/net/ethernet/intel/igc/igc_main.c 		tx_queue = q_vector->tx.ring->reg_idx;
ring             2707 drivers/net/ethernet/intel/igc/igc_main.c 		if (q_vector->rx.ring && q_vector->tx.ring)
ring             2709 drivers/net/ethernet/intel/igc/igc_main.c 				q_vector->rx.ring->queue_index);
ring             2710 drivers/net/ethernet/intel/igc/igc_main.c 		else if (q_vector->tx.ring)
ring             2712 drivers/net/ethernet/intel/igc/igc_main.c 				q_vector->tx.ring->queue_index);
ring             2713 drivers/net/ethernet/intel/igc/igc_main.c 		else if (q_vector->rx.ring)
ring             2715 drivers/net/ethernet/intel/igc/igc_main.c 				q_vector->rx.ring->queue_index);
ring             2760 drivers/net/ethernet/intel/igc/igc_main.c 	if (q_vector->tx.ring)
ring             2761 drivers/net/ethernet/intel/igc/igc_main.c 		adapter->tx_ring[q_vector->tx.ring->queue_index] = NULL;
ring             2763 drivers/net/ethernet/intel/igc/igc_main.c 	if (q_vector->rx.ring)
ring             2764 drivers/net/ethernet/intel/igc/igc_main.c 		adapter->rx_ring[q_vector->rx.ring->queue_index] = NULL;
ring             3135 drivers/net/ethernet/intel/igc/igc_main.c 	    ((q_vector->rx.ring && adapter->rx_itr_setting == 3) ||
ring             3136 drivers/net/ethernet/intel/igc/igc_main.c 	    (!q_vector->rx.ring && adapter->tx_itr_setting == 3)))
ring             3319 drivers/net/ethernet/intel/igc/igc_main.c 	    ((q_vector->rx.ring && adapter->rx_itr_setting == 3) ||
ring             3320 drivers/net/ethernet/intel/igc/igc_main.c 	    (!q_vector->rx.ring && adapter->tx_itr_setting == 3)))
ring             3364 drivers/net/ethernet/intel/igc/igc_main.c 	if ((q_vector->rx.ring && (adapter->rx_itr_setting & 3)) ||
ring             3365 drivers/net/ethernet/intel/igc/igc_main.c 	    (!q_vector->rx.ring && (adapter->tx_itr_setting & 3))) {
ring             3393 drivers/net/ethernet/intel/igc/igc_main.c 	if (q_vector->tx.ring)
ring             3396 drivers/net/ethernet/intel/igc/igc_main.c 	if (q_vector->rx.ring) {
ring             3486 drivers/net/ethernet/intel/igc/igc_main.c static void igc_add_ring(struct igc_ring *ring,
ring             3489 drivers/net/ethernet/intel/igc/igc_main.c 	head->ring = ring;
ring             3511 drivers/net/ethernet/intel/igc/igc_main.c 	struct igc_ring *ring;
ring             3523 drivers/net/ethernet/intel/igc/igc_main.c 		q_vector = kzalloc(struct_size(q_vector, ring, ring_count),
ring             3526 drivers/net/ethernet/intel/igc/igc_main.c 		memset(q_vector, 0, struct_size(q_vector, ring, ring_count));
ring             3546 drivers/net/ethernet/intel/igc/igc_main.c 	ring = q_vector->ring;
ring             3561 drivers/net/ethernet/intel/igc/igc_main.c 		ring->dev = &adapter->pdev->dev;
ring             3562 drivers/net/ethernet/intel/igc/igc_main.c 		ring->netdev = adapter->netdev;
ring             3565 drivers/net/ethernet/intel/igc/igc_main.c 		ring->q_vector = q_vector;
ring             3568 drivers/net/ethernet/intel/igc/igc_main.c 		igc_add_ring(ring, &q_vector->tx);
ring             3571 drivers/net/ethernet/intel/igc/igc_main.c 		ring->count = adapter->tx_ring_count;
ring             3572 drivers/net/ethernet/intel/igc/igc_main.c 		ring->queue_index = txr_idx;
ring             3575 drivers/net/ethernet/intel/igc/igc_main.c 		adapter->tx_ring[txr_idx] = ring;
ring             3578 drivers/net/ethernet/intel/igc/igc_main.c 		ring++;
ring             3583 drivers/net/ethernet/intel/igc/igc_main.c 		ring->dev = &adapter->pdev->dev;
ring             3584 drivers/net/ethernet/intel/igc/igc_main.c 		ring->netdev = adapter->netdev;
ring             3587 drivers/net/ethernet/intel/igc/igc_main.c 		ring->q_vector = q_vector;
ring             3590 drivers/net/ethernet/intel/igc/igc_main.c 		igc_add_ring(ring, &q_vector->rx);
ring             3593 drivers/net/ethernet/intel/igc/igc_main.c 		ring->count = adapter->rx_ring_count;
ring             3594 drivers/net/ethernet/intel/igc/igc_main.c 		ring->queue_index = rxr_idx;
ring             3597 drivers/net/ethernet/intel/igc/igc_main.c 		adapter->rx_ring[rxr_idx] = ring;
ring              469 drivers/net/ethernet/intel/ixgb/ixgb_ethtool.c 		struct ethtool_ringparam *ring)
ring              475 drivers/net/ethernet/intel/ixgb/ixgb_ethtool.c 	ring->rx_max_pending = MAX_RXD;
ring              476 drivers/net/ethernet/intel/ixgb/ixgb_ethtool.c 	ring->tx_max_pending = MAX_TXD;
ring              477 drivers/net/ethernet/intel/ixgb/ixgb_ethtool.c 	ring->rx_pending = rxdr->count;
ring              478 drivers/net/ethernet/intel/ixgb/ixgb_ethtool.c 	ring->tx_pending = txdr->count;
ring              483 drivers/net/ethernet/intel/ixgb/ixgb_ethtool.c 		struct ethtool_ringparam *ring)
ring              494 drivers/net/ethernet/intel/ixgb/ixgb_ethtool.c 	if ((ring->rx_mini_pending) || (ring->rx_jumbo_pending))
ring              500 drivers/net/ethernet/intel/ixgb/ixgb_ethtool.c 	rxdr->count = max(ring->rx_pending,(u32)MIN_RXD);
ring              504 drivers/net/ethernet/intel/ixgb/ixgb_ethtool.c 	txdr->count = max(ring->tx_pending,(u32)MIN_TXD);
ring              279 drivers/net/ethernet/intel/ixgbe/ixgbe.h #define ring_uses_build_skb(ring) \
ring              280 drivers/net/ethernet/intel/ixgbe/ixgbe.h 	test_bit(__IXGBE_RX_BUILD_SKB_ENABLED, &(ring)->state)
ring              290 drivers/net/ethernet/intel/ixgbe/ixgbe.h #define check_for_tx_hang(ring) \
ring              291 drivers/net/ethernet/intel/ixgbe/ixgbe.h 	test_bit(__IXGBE_TX_DETECT_HANG, &(ring)->state)
ring              292 drivers/net/ethernet/intel/ixgbe/ixgbe.h #define set_check_for_tx_hang(ring) \
ring              293 drivers/net/ethernet/intel/ixgbe/ixgbe.h 	set_bit(__IXGBE_TX_DETECT_HANG, &(ring)->state)
ring              294 drivers/net/ethernet/intel/ixgbe/ixgbe.h #define clear_check_for_tx_hang(ring) \
ring              295 drivers/net/ethernet/intel/ixgbe/ixgbe.h 	clear_bit(__IXGBE_TX_DETECT_HANG, &(ring)->state)
ring              296 drivers/net/ethernet/intel/ixgbe/ixgbe.h #define ring_is_rsc_enabled(ring) \
ring              297 drivers/net/ethernet/intel/ixgbe/ixgbe.h 	test_bit(__IXGBE_RX_RSC_ENABLED, &(ring)->state)
ring              298 drivers/net/ethernet/intel/ixgbe/ixgbe.h #define set_ring_rsc_enabled(ring) \
ring              299 drivers/net/ethernet/intel/ixgbe/ixgbe.h 	set_bit(__IXGBE_RX_RSC_ENABLED, &(ring)->state)
ring              300 drivers/net/ethernet/intel/ixgbe/ixgbe.h #define clear_ring_rsc_enabled(ring) \
ring              301 drivers/net/ethernet/intel/ixgbe/ixgbe.h 	clear_bit(__IXGBE_RX_RSC_ENABLED, &(ring)->state)
ring              302 drivers/net/ethernet/intel/ixgbe/ixgbe.h #define ring_is_xdp(ring) \
ring              303 drivers/net/ethernet/intel/ixgbe/ixgbe.h 	test_bit(__IXGBE_TX_XDP_RING, &(ring)->state)
ring              304 drivers/net/ethernet/intel/ixgbe/ixgbe.h #define set_ring_xdp(ring) \
ring              305 drivers/net/ethernet/intel/ixgbe/ixgbe.h 	set_bit(__IXGBE_TX_XDP_RING, &(ring)->state)
ring              306 drivers/net/ethernet/intel/ixgbe/ixgbe.h #define clear_ring_xdp(ring) \
ring              307 drivers/net/ethernet/intel/ixgbe/ixgbe.h 	clear_bit(__IXGBE_TX_XDP_RING, &(ring)->state)
ring              399 drivers/net/ethernet/intel/ixgbe/ixgbe.h static inline unsigned int ixgbe_rx_bufsz(struct ixgbe_ring *ring)
ring              401 drivers/net/ethernet/intel/ixgbe/ixgbe.h 	if (test_bit(__IXGBE_RX_3K_BUFFER, &ring->state))
ring              404 drivers/net/ethernet/intel/ixgbe/ixgbe.h 	if (ring_uses_build_skb(ring))
ring              410 drivers/net/ethernet/intel/ixgbe/ixgbe.h static inline unsigned int ixgbe_rx_pg_order(struct ixgbe_ring *ring)
ring              413 drivers/net/ethernet/intel/ixgbe/ixgbe.h 	if (test_bit(__IXGBE_RX_3K_BUFFER, &ring->state))
ring              427 drivers/net/ethernet/intel/ixgbe/ixgbe.h 	struct ixgbe_ring *ring;	/* pointer to linked list of rings */
ring              438 drivers/net/ethernet/intel/ixgbe/ixgbe.h 	for (pos = (head).ring; pos != NULL; pos = pos->next)
ring              465 drivers/net/ethernet/intel/ixgbe/ixgbe.h 	struct ixgbe_ring ring[0] ____cacheline_internodealigned_in_smp;
ring              507 drivers/net/ethernet/intel/ixgbe/ixgbe.h static inline u16 ixgbe_desc_unused(struct ixgbe_ring *ring)
ring              509 drivers/net/ethernet/intel/ixgbe/ixgbe.h 	u16 ntc = ring->next_to_clean;
ring              510 drivers/net/ethernet/intel/ixgbe/ixgbe.h 	u16 ntu = ring->next_to_use;
ring              512 drivers/net/ethernet/intel/ixgbe/ixgbe.h 	return ((ntc > ntu) ? 0 : ring->count) + ntc - ntu - 1;
ring              957 drivers/net/ethernet/intel/ixgbe/ixgbe.h static inline struct netdev_queue *txring_txq(const struct ixgbe_ring *ring)
ring              959 drivers/net/ethernet/intel/ixgbe/ixgbe.h 	return netdev_get_tx_queue(ring->netdev, ring->queue_index);
ring             1020 drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c 				struct ethtool_ringparam *ring)
ring             1026 drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c 	ring->rx_max_pending = IXGBE_MAX_RXD;
ring             1027 drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c 	ring->tx_max_pending = IXGBE_MAX_TXD;
ring             1028 drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c 	ring->rx_pending = rx_ring->count;
ring             1029 drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c 	ring->tx_pending = tx_ring->count;
ring             1033 drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c 			       struct ethtool_ringparam *ring)
ring             1040 drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c 	if ((ring->rx_mini_pending) || (ring->rx_jumbo_pending))
ring             1043 drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c 	new_tx_count = clamp_t(u32, ring->tx_pending,
ring             1047 drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c 	new_rx_count = clamp_t(u32, ring->rx_pending,
ring             1199 drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c 	struct ixgbe_ring *ring;
ring             1224 drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c 		ring = adapter->tx_ring[j];
ring             1225 drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c 		if (!ring) {
ring             1233 drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c 			start = u64_stats_fetch_begin_irq(&ring->syncp);
ring             1234 drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c 			data[i]   = ring->stats.packets;
ring             1235 drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c 			data[i+1] = ring->stats.bytes;
ring             1236 drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c 		} while (u64_stats_fetch_retry_irq(&ring->syncp, start));
ring             1240 drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c 		ring = adapter->rx_ring[j];
ring             1241 drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c 		if (!ring) {
ring             1249 drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c 			start = u64_stats_fetch_begin_irq(&ring->syncp);
ring             1250 drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c 			data[i]   = ring->stats.packets;
ring             1251 drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c 			data[i+1] = ring->stats.bytes;
ring             1252 drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c 		} while (u64_stats_fetch_retry_irq(&ring->syncp, start));
ring             2694 drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c 		u32 ring = ethtool_get_flow_spec_ring(fsp->ring_cookie);
ring             2697 drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c 		if (!vf && (ring >= adapter->num_rx_queues))
ring             2701 drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c 			   ring >= adapter->num_rx_queues_per_pool))
ring             2706 drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c 			queue = adapter->rx_ring[ring]->reg_idx;
ring             2709 drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c 				adapter->num_rx_queues_per_pool) + ring;
ring              806 drivers/net/ethernet/intel/ixgbe/ixgbe_lib.c static void ixgbe_add_ring(struct ixgbe_ring *ring,
ring              809 drivers/net/ethernet/intel/ixgbe/ixgbe_lib.c 	ring->next = head->ring;
ring              810 drivers/net/ethernet/intel/ixgbe/ixgbe_lib.c 	head->ring = ring;
ring              836 drivers/net/ethernet/intel/ixgbe/ixgbe_lib.c 	struct ixgbe_ring *ring;
ring              856 drivers/net/ethernet/intel/ixgbe/ixgbe_lib.c 	q_vector = kzalloc_node(struct_size(q_vector, ring, ring_count),
ring              859 drivers/net/ethernet/intel/ixgbe/ixgbe_lib.c 		q_vector = kzalloc(struct_size(q_vector, ring, ring_count),
ring              908 drivers/net/ethernet/intel/ixgbe/ixgbe_lib.c 	ring = q_vector->ring;
ring              912 drivers/net/ethernet/intel/ixgbe/ixgbe_lib.c 		ring->dev = &adapter->pdev->dev;
ring              913 drivers/net/ethernet/intel/ixgbe/ixgbe_lib.c 		ring->netdev = adapter->netdev;
ring              916 drivers/net/ethernet/intel/ixgbe/ixgbe_lib.c 		ring->q_vector = q_vector;
ring              919 drivers/net/ethernet/intel/ixgbe/ixgbe_lib.c 		ixgbe_add_ring(ring, &q_vector->tx);
ring              922 drivers/net/ethernet/intel/ixgbe/ixgbe_lib.c 		ring->count = adapter->tx_ring_count;
ring              923 drivers/net/ethernet/intel/ixgbe/ixgbe_lib.c 		ring->queue_index = txr_idx;
ring              926 drivers/net/ethernet/intel/ixgbe/ixgbe_lib.c 		adapter->tx_ring[txr_idx] = ring;
ring              933 drivers/net/ethernet/intel/ixgbe/ixgbe_lib.c 		ring++;
ring              938 drivers/net/ethernet/intel/ixgbe/ixgbe_lib.c 		ring->dev = &adapter->pdev->dev;
ring              939 drivers/net/ethernet/intel/ixgbe/ixgbe_lib.c 		ring->netdev = adapter->netdev;
ring              942 drivers/net/ethernet/intel/ixgbe/ixgbe_lib.c 		ring->q_vector = q_vector;
ring              945 drivers/net/ethernet/intel/ixgbe/ixgbe_lib.c 		ixgbe_add_ring(ring, &q_vector->tx);
ring              948 drivers/net/ethernet/intel/ixgbe/ixgbe_lib.c 		ring->count = adapter->tx_ring_count;
ring              949 drivers/net/ethernet/intel/ixgbe/ixgbe_lib.c 		ring->queue_index = xdp_idx;
ring              950 drivers/net/ethernet/intel/ixgbe/ixgbe_lib.c 		set_ring_xdp(ring);
ring              953 drivers/net/ethernet/intel/ixgbe/ixgbe_lib.c 		adapter->xdp_ring[xdp_idx] = ring;
ring              960 drivers/net/ethernet/intel/ixgbe/ixgbe_lib.c 		ring++;
ring              965 drivers/net/ethernet/intel/ixgbe/ixgbe_lib.c 		ring->dev = &adapter->pdev->dev;
ring              966 drivers/net/ethernet/intel/ixgbe/ixgbe_lib.c 		ring->netdev = adapter->netdev;
ring              969 drivers/net/ethernet/intel/ixgbe/ixgbe_lib.c 		ring->q_vector = q_vector;
ring              972 drivers/net/ethernet/intel/ixgbe/ixgbe_lib.c 		ixgbe_add_ring(ring, &q_vector->rx);
ring              979 drivers/net/ethernet/intel/ixgbe/ixgbe_lib.c 			set_bit(__IXGBE_RX_CSUM_UDP_ZERO_ERR, &ring->state);
ring              987 drivers/net/ethernet/intel/ixgbe/ixgbe_lib.c 				set_bit(__IXGBE_RX_FCOE, &ring->state);
ring              992 drivers/net/ethernet/intel/ixgbe/ixgbe_lib.c 		ring->count = adapter->rx_ring_count;
ring              993 drivers/net/ethernet/intel/ixgbe/ixgbe_lib.c 		ring->queue_index = rxr_idx;
ring              996 drivers/net/ethernet/intel/ixgbe/ixgbe_lib.c 		adapter->rx_ring[rxr_idx] = ring;
ring             1003 drivers/net/ethernet/intel/ixgbe/ixgbe_lib.c 		ring++;
ring             1021 drivers/net/ethernet/intel/ixgbe/ixgbe_lib.c 	struct ixgbe_ring *ring;
ring             1023 drivers/net/ethernet/intel/ixgbe/ixgbe_lib.c 	ixgbe_for_each_ring(ring, q_vector->tx) {
ring             1024 drivers/net/ethernet/intel/ixgbe/ixgbe_lib.c 		if (ring_is_xdp(ring))
ring             1025 drivers/net/ethernet/intel/ixgbe/ixgbe_lib.c 			adapter->xdp_ring[ring->queue_index] = NULL;
ring             1027 drivers/net/ethernet/intel/ixgbe/ixgbe_lib.c 			adapter->tx_ring[ring->queue_index] = NULL;
ring             1030 drivers/net/ethernet/intel/ixgbe/ixgbe_lib.c 	ixgbe_for_each_ring(ring, q_vector->rx)
ring             1031 drivers/net/ethernet/intel/ixgbe/ixgbe_lib.c 		adapter->rx_ring[ring->queue_index] = NULL;
ring              550 drivers/net/ethernet/intel/ixgbe/ixgbe_main.c static void ixgbe_print_buffer(struct ixgbe_ring *ring, int n)
ring              554 drivers/net/ethernet/intel/ixgbe/ixgbe_main.c 	tx_buffer = &ring->tx_buffer_info[ring->next_to_clean];
ring              556 drivers/net/ethernet/intel/ixgbe/ixgbe_main.c 		n, ring->next_to_use, ring->next_to_clean,
ring              572 drivers/net/ethernet/intel/ixgbe/ixgbe_main.c 	struct ixgbe_ring *ring;
ring              612 drivers/net/ethernet/intel/ixgbe/ixgbe_main.c 		ring = adapter->tx_ring[n];
ring              613 drivers/net/ethernet/intel/ixgbe/ixgbe_main.c 		ixgbe_print_buffer(ring, n);
ring              617 drivers/net/ethernet/intel/ixgbe/ixgbe_main.c 		ring = adapter->xdp_ring[n];
ring              618 drivers/net/ethernet/intel/ixgbe/ixgbe_main.c 		ixgbe_print_buffer(ring, n);
ring              663 drivers/net/ethernet/intel/ixgbe/ixgbe_main.c 		ring = adapter->tx_ring[n];
ring              665 drivers/net/ethernet/intel/ixgbe/ixgbe_main.c 		pr_info("TX QUEUE INDEX = %d\n", ring->queue_index);
ring              672 drivers/net/ethernet/intel/ixgbe/ixgbe_main.c 		for (i = 0; ring->desc && (i < ring->count); i++) {
ring              673 drivers/net/ethernet/intel/ixgbe/ixgbe_main.c 			tx_desc = IXGBE_TX_DESC(ring, i);
ring              674 drivers/net/ethernet/intel/ixgbe/ixgbe_main.c 			tx_buffer = &ring->tx_buffer_info[i];
ring              679 drivers/net/ethernet/intel/ixgbe/ixgbe_main.c 				if (i == ring->next_to_use &&
ring              680 drivers/net/ethernet/intel/ixgbe/ixgbe_main.c 				    i == ring->next_to_clean)
ring              682 drivers/net/ethernet/intel/ixgbe/ixgbe_main.c 				else if (i == ring->next_to_use)
ring              684 drivers/net/ethernet/intel/ixgbe/ixgbe_main.c 				else if (i == ring->next_to_clean)
ring             1011 drivers/net/ethernet/intel/ixgbe/ixgbe_main.c static u64 ixgbe_get_tx_completed(struct ixgbe_ring *ring)
ring             1013 drivers/net/ethernet/intel/ixgbe/ixgbe_main.c 	return ring->stats.packets;
ring             1016 drivers/net/ethernet/intel/ixgbe/ixgbe_main.c static u64 ixgbe_get_tx_pending(struct ixgbe_ring *ring)
ring             1020 drivers/net/ethernet/intel/ixgbe/ixgbe_main.c 	head = ring->next_to_clean;
ring             1021 drivers/net/ethernet/intel/ixgbe/ixgbe_main.c 	tail = ring->next_to_use;
ring             1023 drivers/net/ethernet/intel/ixgbe/ixgbe_main.c 	return ((head <= tail) ? tail : tail + ring->count) - head;
ring             1346 drivers/net/ethernet/intel/ixgbe/ixgbe_main.c 	struct ixgbe_ring *ring;
ring             1352 drivers/net/ethernet/intel/ixgbe/ixgbe_main.c 	ixgbe_for_each_ring(ring, q_vector->tx)
ring             1353 drivers/net/ethernet/intel/ixgbe/ixgbe_main.c 		ixgbe_update_tx_dca(adapter, ring, cpu);
ring             1355 drivers/net/ethernet/intel/ixgbe/ixgbe_main.c 	ixgbe_for_each_ring(ring, q_vector->rx)
ring             1356 drivers/net/ethernet/intel/ixgbe/ixgbe_main.c 		ixgbe_update_rx_dca(adapter, ring, cpu);
ring             1422 drivers/net/ethernet/intel/ixgbe/ixgbe_main.c static inline void ixgbe_rx_hash(struct ixgbe_ring *ring,
ring             1428 drivers/net/ethernet/intel/ixgbe/ixgbe_main.c 	if (!(ring->netdev->features & NETIF_F_RXHASH))
ring             1450 drivers/net/ethernet/intel/ixgbe/ixgbe_main.c static inline bool ixgbe_rx_is_fcoe(struct ixgbe_ring *ring,
ring             1455 drivers/net/ethernet/intel/ixgbe/ixgbe_main.c 	return test_bit(__IXGBE_RX_FCOE, &ring->state) &&
ring             1468 drivers/net/ethernet/intel/ixgbe/ixgbe_main.c static inline void ixgbe_rx_checksum(struct ixgbe_ring *ring,
ring             1478 drivers/net/ethernet/intel/ixgbe/ixgbe_main.c 	if (!(ring->netdev->features & NETIF_F_RXCSUM))
ring             1490 drivers/net/ethernet/intel/ixgbe/ixgbe_main.c 		ring->rx_stats.csum_err++;
ring             1503 drivers/net/ethernet/intel/ixgbe/ixgbe_main.c 		    test_bit(__IXGBE_RX_CSUM_UDP_ZERO_ERR, &ring->state))
ring             1506 drivers/net/ethernet/intel/ixgbe/ixgbe_main.c 		ring->rx_stats.csum_err++;
ring             1644 drivers/net/ethernet/intel/ixgbe/ixgbe_main.c static void ixgbe_set_rsc_gso_size(struct ixgbe_ring *ring,
ring             2410 drivers/net/ethernet/intel/ixgbe/ixgbe_main.c 		struct ixgbe_ring *ring = adapter->xdp_ring[smp_processor_id()];
ring             2416 drivers/net/ethernet/intel/ixgbe/ixgbe_main.c 		writel(ring->next_to_use, ring->tail);
ring             2453 drivers/net/ethernet/intel/ixgbe/ixgbe_main.c 		struct ixgbe_ring *ring;
ring             2456 drivers/net/ethernet/intel/ixgbe/ixgbe_main.c 		ixgbe_for_each_ring(ring, q_vector->rx)
ring             2457 drivers/net/ethernet/intel/ixgbe/ixgbe_main.c 			ixgbe_set_ivar(adapter, 0, ring->reg_idx, v_idx);
ring             2459 drivers/net/ethernet/intel/ixgbe/ixgbe_main.c 		ixgbe_for_each_ring(ring, q_vector->tx)
ring             2460 drivers/net/ethernet/intel/ixgbe/ixgbe_main.c 			ixgbe_set_ivar(adapter, 1, ring->reg_idx, v_idx);
ring             2515 drivers/net/ethernet/intel/ixgbe/ixgbe_main.c 	if (!ring_container->ring)
ring             3106 drivers/net/ethernet/intel/ixgbe/ixgbe_main.c 				struct ixgbe_ring *ring = adapter->tx_ring[i];
ring             3108 drivers/net/ethernet/intel/ixgbe/ixgbe_main.c 						       &ring->state))
ring             3143 drivers/net/ethernet/intel/ixgbe/ixgbe_main.c 	if (q_vector->rx.ring || q_vector->tx.ring)
ring             3161 drivers/net/ethernet/intel/ixgbe/ixgbe_main.c 	struct ixgbe_ring *ring;
ring             3170 drivers/net/ethernet/intel/ixgbe/ixgbe_main.c 	ixgbe_for_each_ring(ring, q_vector->tx) {
ring             3171 drivers/net/ethernet/intel/ixgbe/ixgbe_main.c 		bool wd = ring->xsk_umem ?
ring             3172 drivers/net/ethernet/intel/ixgbe/ixgbe_main.c 			  ixgbe_clean_xdp_tx_irq(q_vector, ring, budget) :
ring             3173 drivers/net/ethernet/intel/ixgbe/ixgbe_main.c 			  ixgbe_clean_tx_irq(q_vector, ring, budget);
ring             3190 drivers/net/ethernet/intel/ixgbe/ixgbe_main.c 	ixgbe_for_each_ring(ring, q_vector->rx) {
ring             3191 drivers/net/ethernet/intel/ixgbe/ixgbe_main.c 		int cleaned = ring->xsk_umem ?
ring             3192 drivers/net/ethernet/intel/ixgbe/ixgbe_main.c 			      ixgbe_clean_rx_irq_zc(q_vector, ring,
ring             3194 drivers/net/ethernet/intel/ixgbe/ixgbe_main.c 			      ixgbe_clean_rx_irq(q_vector, ring,
ring             3235 drivers/net/ethernet/intel/ixgbe/ixgbe_main.c 		if (q_vector->tx.ring && q_vector->rx.ring) {
ring             3239 drivers/net/ethernet/intel/ixgbe/ixgbe_main.c 		} else if (q_vector->rx.ring) {
ring             3242 drivers/net/ethernet/intel/ixgbe/ixgbe_main.c 		} else if (q_vector->tx.ring) {
ring             3406 drivers/net/ethernet/intel/ixgbe/ixgbe_main.c 		if (!q_vector->rx.ring && !q_vector->tx.ring)
ring             3478 drivers/net/ethernet/intel/ixgbe/ixgbe_main.c 			     struct ixgbe_ring *ring)
ring             3481 drivers/net/ethernet/intel/ixgbe/ixgbe_main.c 	u64 tdba = ring->dma;
ring             3484 drivers/net/ethernet/intel/ixgbe/ixgbe_main.c 	u8 reg_idx = ring->reg_idx;
ring             3486 drivers/net/ethernet/intel/ixgbe/ixgbe_main.c 	ring->xsk_umem = NULL;
ring             3487 drivers/net/ethernet/intel/ixgbe/ixgbe_main.c 	if (ring_is_xdp(ring))
ring             3488 drivers/net/ethernet/intel/ixgbe/ixgbe_main.c 		ring->xsk_umem = ixgbe_xsk_umem(adapter, ring);
ring             3498 drivers/net/ethernet/intel/ixgbe/ixgbe_main.c 			ring->count * sizeof(union ixgbe_adv_tx_desc));
ring             3501 drivers/net/ethernet/intel/ixgbe/ixgbe_main.c 	ring->tail = adapter->io_addr + IXGBE_TDT(reg_idx);
ring             3513 drivers/net/ethernet/intel/ixgbe/ixgbe_main.c 	if (!ring->q_vector || (ring->q_vector->itr < IXGBE_100K_ITR))
ring             3527 drivers/net/ethernet/intel/ixgbe/ixgbe_main.c 		ring->atr_sample_rate = adapter->atr_sample_rate;
ring             3528 drivers/net/ethernet/intel/ixgbe/ixgbe_main.c 		ring->atr_count = 0;
ring             3529 drivers/net/ethernet/intel/ixgbe/ixgbe_main.c 		set_bit(__IXGBE_TX_FDIR_INIT_DONE, &ring->state);
ring             3531 drivers/net/ethernet/intel/ixgbe/ixgbe_main.c 		ring->atr_sample_rate = 0;
ring             3535 drivers/net/ethernet/intel/ixgbe/ixgbe_main.c 	if (!test_and_set_bit(__IXGBE_TX_XPS_INIT_DONE, &ring->state)) {
ring             3536 drivers/net/ethernet/intel/ixgbe/ixgbe_main.c 		struct ixgbe_q_vector *q_vector = ring->q_vector;
ring             3539 drivers/net/ethernet/intel/ixgbe/ixgbe_main.c 			netif_set_xps_queue(ring->netdev,
ring             3541 drivers/net/ethernet/intel/ixgbe/ixgbe_main.c 					    ring->queue_index);
ring             3544 drivers/net/ethernet/intel/ixgbe/ixgbe_main.c 	clear_bit(__IXGBE_HANG_CHECK_ARMED, &ring->state);
ring             3547 drivers/net/ethernet/intel/ixgbe/ixgbe_main.c 	memset(ring->tx_buffer_info, 0,
ring             3548 drivers/net/ethernet/intel/ixgbe/ixgbe_main.c 	       sizeof(struct ixgbe_tx_buffer) * ring->count);
ring             3651 drivers/net/ethernet/intel/ixgbe/ixgbe_main.c 				 struct ixgbe_ring *ring)
ring             3654 drivers/net/ethernet/intel/ixgbe/ixgbe_main.c 	u8 reg_idx = ring->reg_idx;
ring             3663 drivers/net/ethernet/intel/ixgbe/ixgbe_main.c 				  struct ixgbe_ring *ring)
ring             3666 drivers/net/ethernet/intel/ixgbe/ixgbe_main.c 	u8 reg_idx = ring->reg_idx;
ring             4018 drivers/net/ethernet/intel/ixgbe/ixgbe_main.c 				   struct ixgbe_ring *ring)
ring             4022 drivers/net/ethernet/intel/ixgbe/ixgbe_main.c 	u8 reg_idx = ring->reg_idx;
ring             4024 drivers/net/ethernet/intel/ixgbe/ixgbe_main.c 	if (!ring_is_rsc_enabled(ring))
ring             4040 drivers/net/ethernet/intel/ixgbe/ixgbe_main.c 				       struct ixgbe_ring *ring)
ring             4045 drivers/net/ethernet/intel/ixgbe/ixgbe_main.c 	u8 reg_idx = ring->reg_idx;
ring             4066 drivers/net/ethernet/intel/ixgbe/ixgbe_main.c 			     struct ixgbe_ring *ring)
ring             4070 drivers/net/ethernet/intel/ixgbe/ixgbe_main.c 	u64 rdba = ring->dma;
ring             4072 drivers/net/ethernet/intel/ixgbe/ixgbe_main.c 	u8 reg_idx = ring->reg_idx;
ring             4074 drivers/net/ethernet/intel/ixgbe/ixgbe_main.c 	xdp_rxq_info_unreg_mem_model(&ring->xdp_rxq);
ring             4075 drivers/net/ethernet/intel/ixgbe/ixgbe_main.c 	ring->xsk_umem = ixgbe_xsk_umem(adapter, ring);
ring             4076 drivers/net/ethernet/intel/ixgbe/ixgbe_main.c 	if (ring->xsk_umem) {
ring             4077 drivers/net/ethernet/intel/ixgbe/ixgbe_main.c 		ring->zca.free = ixgbe_zca_free;
ring             4078 drivers/net/ethernet/intel/ixgbe/ixgbe_main.c 		WARN_ON(xdp_rxq_info_reg_mem_model(&ring->xdp_rxq,
ring             4080 drivers/net/ethernet/intel/ixgbe/ixgbe_main.c 						   &ring->zca));
ring             4083 drivers/net/ethernet/intel/ixgbe/ixgbe_main.c 		WARN_ON(xdp_rxq_info_reg_mem_model(&ring->xdp_rxq,
ring             4098 drivers/net/ethernet/intel/ixgbe/ixgbe_main.c 			ring->count * sizeof(union ixgbe_adv_rx_desc));
ring             4104 drivers/net/ethernet/intel/ixgbe/ixgbe_main.c 	ring->tail = adapter->io_addr + IXGBE_RDT(reg_idx);
ring             4106 drivers/net/ethernet/intel/ixgbe/ixgbe_main.c 	ixgbe_configure_srrctl(adapter, ring);
ring             4107 drivers/net/ethernet/intel/ixgbe/ixgbe_main.c 	ixgbe_configure_rscctl(adapter, ring);
ring             4129 drivers/net/ethernet/intel/ixgbe/ixgbe_main.c 		if (ring_uses_build_skb(ring) &&
ring             4130 drivers/net/ethernet/intel/ixgbe/ixgbe_main.c 		    !test_bit(__IXGBE_RX_3K_BUFFER, &ring->state))
ring             4136 drivers/net/ethernet/intel/ixgbe/ixgbe_main.c 	if (ring->xsk_umem && hw->mac.type != ixgbe_mac_82599EB) {
ring             4137 drivers/net/ethernet/intel/ixgbe/ixgbe_main.c 		u32 xsk_buf_len = ring->xsk_umem->chunk_size_nohr -
ring             4144 drivers/net/ethernet/intel/ixgbe/ixgbe_main.c 		ring->rx_buf_len = xsk_buf_len;
ring             4148 drivers/net/ethernet/intel/ixgbe/ixgbe_main.c 	memset(ring->rx_buffer_info, 0,
ring             4149 drivers/net/ethernet/intel/ixgbe/ixgbe_main.c 	       sizeof(struct ixgbe_rx_buffer) * ring->count);
ring             4152 drivers/net/ethernet/intel/ixgbe/ixgbe_main.c 	rx_desc = IXGBE_RX_DESC(ring, 0);
ring             4159 drivers/net/ethernet/intel/ixgbe/ixgbe_main.c 	ixgbe_rx_desc_queue_enable(adapter, ring);
ring             4160 drivers/net/ethernet/intel/ixgbe/ixgbe_main.c 	if (ring->xsk_umem)
ring             4161 drivers/net/ethernet/intel/ixgbe/ixgbe_main.c 		ixgbe_alloc_rx_buffers_zc(ring, ixgbe_desc_unused(ring));
ring             4163 drivers/net/ethernet/intel/ixgbe/ixgbe_main.c 		ixgbe_alloc_rx_buffers(ring, ixgbe_desc_unused(ring));
ring             4519 drivers/net/ethernet/intel/ixgbe/ixgbe_main.c 			struct ixgbe_ring *ring = adapter->rx_ring[i];
ring             4521 drivers/net/ethernet/intel/ixgbe/ixgbe_main.c 			if (!netif_is_ixgbe(ring->netdev))
ring             4524 drivers/net/ethernet/intel/ixgbe/ixgbe_main.c 			j = ring->reg_idx;
ring             4557 drivers/net/ethernet/intel/ixgbe/ixgbe_main.c 			struct ixgbe_ring *ring = adapter->rx_ring[i];
ring             4559 drivers/net/ethernet/intel/ixgbe/ixgbe_main.c 			if (!netif_is_ixgbe(ring->netdev))
ring             4562 drivers/net/ethernet/intel/ixgbe/ixgbe_main.c 			j = ring->reg_idx;
ring             5254 drivers/net/ethernet/intel/ixgbe/ixgbe_main.c 			u32 ring = ethtool_get_flow_spec_ring(filter->action);
ring             5257 drivers/net/ethernet/intel/ixgbe/ixgbe_main.c 			if (!vf && (ring >= adapter->num_rx_queues)) {
ring             5259 drivers/net/ethernet/intel/ixgbe/ixgbe_main.c 				      ring);
ring             5263 drivers/net/ethernet/intel/ixgbe/ixgbe_main.c 				     ring >= adapter->num_rx_queues_per_pool)) {
ring             5265 drivers/net/ethernet/intel/ixgbe/ixgbe_main.c 				      vf, ring);
ring             5271 drivers/net/ethernet/intel/ixgbe/ixgbe_main.c 				queue = adapter->rx_ring[ring]->reg_idx;
ring             5274 drivers/net/ethernet/intel/ixgbe/ixgbe_main.c 					adapter->num_rx_queues_per_pool) + ring;
ring             5755 drivers/net/ethernet/intel/ixgbe/ixgbe_main.c 		struct ixgbe_ring *ring = adapter->rx_ring[i];
ring             5756 drivers/net/ethernet/intel/ixgbe/ixgbe_main.c 		u8 reg_idx = ring->reg_idx;
ring             5798 drivers/net/ethernet/intel/ixgbe/ixgbe_main.c 			struct ixgbe_ring *ring = adapter->rx_ring[i];
ring             5799 drivers/net/ethernet/intel/ixgbe/ixgbe_main.c 			u8 reg_idx = ring->reg_idx;
ring             5824 drivers/net/ethernet/intel/ixgbe/ixgbe_main.c 		struct ixgbe_ring *ring = adapter->tx_ring[i];
ring             5825 drivers/net/ethernet/intel/ixgbe/ixgbe_main.c 		u8 reg_idx = ring->reg_idx;
ring             5832 drivers/net/ethernet/intel/ixgbe/ixgbe_main.c 		struct ixgbe_ring *ring = adapter->xdp_ring[i];
ring             5833 drivers/net/ethernet/intel/ixgbe/ixgbe_main.c 		u8 reg_idx = ring->reg_idx;
ring             5873 drivers/net/ethernet/intel/ixgbe/ixgbe_main.c 			struct ixgbe_ring *ring = adapter->tx_ring[i];
ring             5874 drivers/net/ethernet/intel/ixgbe/ixgbe_main.c 			u8 reg_idx = ring->reg_idx;
ring             5879 drivers/net/ethernet/intel/ixgbe/ixgbe_main.c 			struct ixgbe_ring *ring = adapter->xdp_ring[i];
ring             5880 drivers/net/ethernet/intel/ixgbe/ixgbe_main.c 			u8 reg_idx = ring->reg_idx;
ring             6726 drivers/net/ethernet/intel/ixgbe/ixgbe_main.c 			struct ixgbe_ring *ring = adapter->rx_ring[i];
ring             6728 drivers/net/ethernet/intel/ixgbe/ixgbe_main.c 			if (new_frame_size > ixgbe_rx_bufsz(ring)) {
ring             7347 drivers/net/ethernet/intel/ixgbe/ixgbe_main.c 			if (qv->rx.ring || qv->tx.ring)
ring             7548 drivers/net/ethernet/intel/ixgbe/ixgbe_main.c 		struct ixgbe_ring *ring = adapter->xdp_ring[i];
ring             7550 drivers/net/ethernet/intel/ixgbe/ixgbe_main.c 		if (ring->next_to_use != ring->next_to_clean)
ring             8354 drivers/net/ethernet/intel/ixgbe/ixgbe_main.c static void ixgbe_atr(struct ixgbe_ring *ring,
ring             8357 drivers/net/ethernet/intel/ixgbe/ixgbe_main.c 	struct ixgbe_q_vector *q_vector = ring->q_vector;
ring             8376 drivers/net/ethernet/intel/ixgbe/ixgbe_main.c 	if (!ring->atr_sample_rate)
ring             8379 drivers/net/ethernet/intel/ixgbe/ixgbe_main.c 	ring->atr_count++;
ring             8446 drivers/net/ethernet/intel/ixgbe/ixgbe_main.c 	if (!th->syn && (ring->atr_count < ring->atr_sample_rate))
ring             8450 drivers/net/ethernet/intel/ixgbe/ixgbe_main.c 	ring->atr_count = 0;
ring             8498 drivers/net/ethernet/intel/ixgbe/ixgbe_main.c 					      input, common, ring->queue_index);
ring             8551 drivers/net/ethernet/intel/ixgbe/ixgbe_main.c 	struct ixgbe_ring *ring = adapter->xdp_ring[smp_processor_id()];
ring             8560 drivers/net/ethernet/intel/ixgbe/ixgbe_main.c 	if (unlikely(!ixgbe_desc_unused(ring)))
ring             8563 drivers/net/ethernet/intel/ixgbe/ixgbe_main.c 	dma = dma_map_single(ring->dev, xdpf->data, len, DMA_TO_DEVICE);
ring             8564 drivers/net/ethernet/intel/ixgbe/ixgbe_main.c 	if (dma_mapping_error(ring->dev, dma))
ring             8568 drivers/net/ethernet/intel/ixgbe/ixgbe_main.c 	tx_buffer = &ring->tx_buffer_info[ring->next_to_use];
ring             8573 drivers/net/ethernet/intel/ixgbe/ixgbe_main.c 	i = ring->next_to_use;
ring             8574 drivers/net/ethernet/intel/ixgbe/ixgbe_main.c 	tx_desc = IXGBE_TX_DESC(ring, i);
ring             8596 drivers/net/ethernet/intel/ixgbe/ixgbe_main.c 	if (i == ring->count)
ring             8600 drivers/net/ethernet/intel/ixgbe/ixgbe_main.c 	ring->next_to_use = i;
ring             8759 drivers/net/ethernet/intel/ixgbe/ixgbe_main.c 				      struct ixgbe_ring *ring)
ring             8771 drivers/net/ethernet/intel/ixgbe/ixgbe_main.c 	tx_ring = ring ? ring : adapter->tx_ring[skb_get_queue_mapping(skb)];
ring             8917 drivers/net/ethernet/intel/ixgbe/ixgbe_main.c 				   struct ixgbe_ring *ring)
ring             8922 drivers/net/ethernet/intel/ixgbe/ixgbe_main.c 	if (ring) {
ring             8924 drivers/net/ethernet/intel/ixgbe/ixgbe_main.c 			start = u64_stats_fetch_begin_irq(&ring->syncp);
ring             8925 drivers/net/ethernet/intel/ixgbe/ixgbe_main.c 			packets = ring->stats.packets;
ring             8926 drivers/net/ethernet/intel/ixgbe/ixgbe_main.c 			bytes   = ring->stats.bytes;
ring             8927 drivers/net/ethernet/intel/ixgbe/ixgbe_main.c 		} while (u64_stats_fetch_retry_irq(&ring->syncp, start));
ring             8941 drivers/net/ethernet/intel/ixgbe/ixgbe_main.c 		struct ixgbe_ring *ring = READ_ONCE(adapter->rx_ring[i]);
ring             8945 drivers/net/ethernet/intel/ixgbe/ixgbe_main.c 		if (ring) {
ring             8947 drivers/net/ethernet/intel/ixgbe/ixgbe_main.c 				start = u64_stats_fetch_begin_irq(&ring->syncp);
ring             8948 drivers/net/ethernet/intel/ixgbe/ixgbe_main.c 				packets = ring->stats.packets;
ring             8949 drivers/net/ethernet/intel/ixgbe/ixgbe_main.c 				bytes   = ring->stats.bytes;
ring             8950 drivers/net/ethernet/intel/ixgbe/ixgbe_main.c 			} while (u64_stats_fetch_retry_irq(&ring->syncp, start));
ring             8957 drivers/net/ethernet/intel/ixgbe/ixgbe_main.c 		struct ixgbe_ring *ring = READ_ONCE(adapter->tx_ring[i]);
ring             8959 drivers/net/ethernet/intel/ixgbe/ixgbe_main.c 		ixgbe_get_ring_stats64(stats, ring);
ring             8962 drivers/net/ethernet/intel/ixgbe/ixgbe_main.c 		struct ixgbe_ring *ring = READ_ONCE(adapter->xdp_ring[i]);
ring             8964 drivers/net/ethernet/intel/ixgbe/ixgbe_main.c 		ixgbe_get_ring_stats64(stats, ring);
ring             10177 drivers/net/ethernet/intel/ixgbe/ixgbe_main.c 		struct ixgbe_ring *ring = adapter->rx_ring[rxbase + i];
ring             10178 drivers/net/ethernet/intel/ixgbe/ixgbe_main.c 		struct ixgbe_q_vector *qv = ring->q_vector;
ring             10185 drivers/net/ethernet/intel/ixgbe/ixgbe_main.c 		ring->netdev = NULL;
ring             10251 drivers/net/ethernet/intel/ixgbe/ixgbe_main.c 		struct ixgbe_ring *ring = adapter->rx_ring[i];
ring             10253 drivers/net/ethernet/intel/ixgbe/ixgbe_main.c 		if (ring_is_rsc_enabled(ring))
ring             10256 drivers/net/ethernet/intel/ixgbe/ixgbe_main.c 		if (frame_size > ixgbe_rx_bufsz(ring))
ring             10320 drivers/net/ethernet/intel/ixgbe/ixgbe_main.c void ixgbe_xdp_ring_update_tail(struct ixgbe_ring *ring)
ring             10326 drivers/net/ethernet/intel/ixgbe/ixgbe_main.c 	writel(ring->next_to_use, ring->tail);
ring             10333 drivers/net/ethernet/intel/ixgbe/ixgbe_main.c 	struct ixgbe_ring *ring;
ring             10346 drivers/net/ethernet/intel/ixgbe/ixgbe_main.c 	ring = adapter->xdp_prog ? adapter->xdp_ring[smp_processor_id()] : NULL;
ring             10347 drivers/net/ethernet/intel/ixgbe/ixgbe_main.c 	if (unlikely(!ring))
ring             10350 drivers/net/ethernet/intel/ixgbe/ixgbe_main.c 	if (unlikely(test_bit(__IXGBE_TX_DISABLED, &ring->state)))
ring             10365 drivers/net/ethernet/intel/ixgbe/ixgbe_main.c 		ixgbe_xdp_ring_update_tail(ring);
ring             10512 drivers/net/ethernet/intel/ixgbe/ixgbe_main.c void ixgbe_txrx_ring_disable(struct ixgbe_adapter *adapter, int ring)
ring             10516 drivers/net/ethernet/intel/ixgbe/ixgbe_main.c 	rx_ring = adapter->rx_ring[ring];
ring             10517 drivers/net/ethernet/intel/ixgbe/ixgbe_main.c 	tx_ring = adapter->tx_ring[ring];
ring             10518 drivers/net/ethernet/intel/ixgbe/ixgbe_main.c 	xdp_ring = adapter->xdp_ring[ring];
ring             10550 drivers/net/ethernet/intel/ixgbe/ixgbe_main.c void ixgbe_txrx_ring_enable(struct ixgbe_adapter *adapter, int ring)
ring             10554 drivers/net/ethernet/intel/ixgbe/ixgbe_main.c 	rx_ring = adapter->rx_ring[ring];
ring             10555 drivers/net/ethernet/intel/ixgbe/ixgbe_main.c 	tx_ring = adapter->tx_ring[ring];
ring             10556 drivers/net/ethernet/intel/ixgbe/ixgbe_main.c 	xdp_ring = adapter->xdp_ring[ring];
ring               25 drivers/net/ethernet/intel/ixgbe/ixgbe_txrx_common.h void ixgbe_xdp_ring_update_tail(struct ixgbe_ring *ring);
ring               28 drivers/net/ethernet/intel/ixgbe/ixgbe_txrx_common.h void ixgbe_txrx_ring_disable(struct ixgbe_adapter *adapter, int ring);
ring               29 drivers/net/ethernet/intel/ixgbe/ixgbe_txrx_common.h void ixgbe_txrx_ring_enable(struct ixgbe_adapter *adapter, int ring);
ring               32 drivers/net/ethernet/intel/ixgbe/ixgbe_txrx_common.h 				struct ixgbe_ring *ring);
ring               12 drivers/net/ethernet/intel/ixgbe/ixgbe_xsk.c 				struct ixgbe_ring *ring)
ring               15 drivers/net/ethernet/intel/ixgbe/ixgbe_xsk.c 	int qid = ring->ring_idx;
ring              533 drivers/net/ethernet/intel/ixgbe/ixgbe_xsk.c 		struct ixgbe_ring *ring = adapter->xdp_ring[smp_processor_id()];
ring              539 drivers/net/ethernet/intel/ixgbe/ixgbe_xsk.c 		writel(ring->next_to_use, ring->tail);
ring              701 drivers/net/ethernet/intel/ixgbe/ixgbe_xsk.c 	struct ixgbe_ring *ring;
ring              712 drivers/net/ethernet/intel/ixgbe/ixgbe_xsk.c 	ring = adapter->xdp_ring[qid];
ring              714 drivers/net/ethernet/intel/ixgbe/ixgbe_xsk.c 	if (test_bit(__IXGBE_TX_DISABLED, &ring->state))
ring              717 drivers/net/ethernet/intel/ixgbe/ixgbe_xsk.c 	if (!ring->xsk_umem)
ring              720 drivers/net/ethernet/intel/ixgbe/ixgbe_xsk.c 	if (!napi_if_scheduled_mark_missed(&ring->q_vector->napi)) {
ring              721 drivers/net/ethernet/intel/ixgbe/ixgbe_xsk.c 		u64 eics = BIT_ULL(ring->q_vector->v_idx);
ring              230 drivers/net/ethernet/intel/ixgbevf/ethtool.c 				  struct ethtool_ringparam *ring)
ring              234 drivers/net/ethernet/intel/ixgbevf/ethtool.c 	ring->rx_max_pending = IXGBEVF_MAX_RXD;
ring              235 drivers/net/ethernet/intel/ixgbevf/ethtool.c 	ring->tx_max_pending = IXGBEVF_MAX_TXD;
ring              236 drivers/net/ethernet/intel/ixgbevf/ethtool.c 	ring->rx_pending = adapter->rx_ring_count;
ring              237 drivers/net/ethernet/intel/ixgbevf/ethtool.c 	ring->tx_pending = adapter->tx_ring_count;
ring              241 drivers/net/ethernet/intel/ixgbevf/ethtool.c 				 struct ethtool_ringparam *ring)
ring              248 drivers/net/ethernet/intel/ixgbevf/ethtool.c 	if ((ring->rx_mini_pending) || (ring->rx_jumbo_pending))
ring              251 drivers/net/ethernet/intel/ixgbevf/ethtool.c 	new_tx_count = max_t(u32, ring->tx_pending, IXGBEVF_MIN_TXD);
ring              255 drivers/net/ethernet/intel/ixgbevf/ethtool.c 	new_rx_count = max_t(u32, ring->rx_pending, IXGBEVF_MIN_RXD);
ring              428 drivers/net/ethernet/intel/ixgbevf/ethtool.c 	struct ixgbevf_ring *ring;
ring              455 drivers/net/ethernet/intel/ixgbevf/ethtool.c 		ring = adapter->tx_ring[j];
ring              456 drivers/net/ethernet/intel/ixgbevf/ethtool.c 		if (!ring) {
ring              463 drivers/net/ethernet/intel/ixgbevf/ethtool.c 			start = u64_stats_fetch_begin_irq(&ring->syncp);
ring              464 drivers/net/ethernet/intel/ixgbevf/ethtool.c 			data[i]   = ring->stats.packets;
ring              465 drivers/net/ethernet/intel/ixgbevf/ethtool.c 			data[i + 1] = ring->stats.bytes;
ring              466 drivers/net/ethernet/intel/ixgbevf/ethtool.c 		} while (u64_stats_fetch_retry_irq(&ring->syncp, start));
ring              472 drivers/net/ethernet/intel/ixgbevf/ethtool.c 		ring = adapter->xdp_ring[j];
ring              473 drivers/net/ethernet/intel/ixgbevf/ethtool.c 		if (!ring) {
ring              480 drivers/net/ethernet/intel/ixgbevf/ethtool.c 			start = u64_stats_fetch_begin_irq(&ring->syncp);
ring              481 drivers/net/ethernet/intel/ixgbevf/ethtool.c 			data[i] = ring->stats.packets;
ring              482 drivers/net/ethernet/intel/ixgbevf/ethtool.c 			data[i + 1] = ring->stats.bytes;
ring              483 drivers/net/ethernet/intel/ixgbevf/ethtool.c 		} while (u64_stats_fetch_retry_irq(&ring->syncp, start));
ring              489 drivers/net/ethernet/intel/ixgbevf/ethtool.c 		ring = adapter->rx_ring[j];
ring              490 drivers/net/ethernet/intel/ixgbevf/ethtool.c 		if (!ring) {
ring              497 drivers/net/ethernet/intel/ixgbevf/ethtool.c 			start = u64_stats_fetch_begin_irq(&ring->syncp);
ring              498 drivers/net/ethernet/intel/ixgbevf/ethtool.c 			data[i]   = ring->stats.packets;
ring              499 drivers/net/ethernet/intel/ixgbevf/ethtool.c 			data[i + 1] = ring->stats.bytes;
ring              500 drivers/net/ethernet/intel/ixgbevf/ethtool.c 		} while (u64_stats_fetch_retry_irq(&ring->syncp, start));
ring               83 drivers/net/ethernet/intel/ixgbevf/ixgbevf.h #define ring_is_xdp(ring) \
ring               84 drivers/net/ethernet/intel/ixgbevf/ixgbevf.h 		test_bit(__IXGBEVF_TX_XDP_RING, &(ring)->state)
ring               85 drivers/net/ethernet/intel/ixgbevf/ixgbevf.h #define set_ring_xdp(ring) \
ring               86 drivers/net/ethernet/intel/ixgbevf/ixgbevf.h 		set_bit(__IXGBEVF_TX_XDP_RING, &(ring)->state)
ring               87 drivers/net/ethernet/intel/ixgbevf/ixgbevf.h #define clear_ring_xdp(ring) \
ring               88 drivers/net/ethernet/intel/ixgbevf/ixgbevf.h 		clear_bit(__IXGBEVF_TX_XDP_RING, &(ring)->state)
ring              172 drivers/net/ethernet/intel/ixgbevf/ixgbevf.h #define ring_uses_large_buffer(ring) \
ring              173 drivers/net/ethernet/intel/ixgbevf/ixgbevf.h 	test_bit(__IXGBEVF_RX_3K_BUFFER, &(ring)->state)
ring              174 drivers/net/ethernet/intel/ixgbevf/ixgbevf.h #define set_ring_uses_large_buffer(ring) \
ring              175 drivers/net/ethernet/intel/ixgbevf/ixgbevf.h 	set_bit(__IXGBEVF_RX_3K_BUFFER, &(ring)->state)
ring              176 drivers/net/ethernet/intel/ixgbevf/ixgbevf.h #define clear_ring_uses_large_buffer(ring) \
ring              177 drivers/net/ethernet/intel/ixgbevf/ixgbevf.h 	clear_bit(__IXGBEVF_RX_3K_BUFFER, &(ring)->state)
ring              179 drivers/net/ethernet/intel/ixgbevf/ixgbevf.h #define ring_uses_build_skb(ring) \
ring              180 drivers/net/ethernet/intel/ixgbevf/ixgbevf.h 	test_bit(__IXGBEVF_RX_BUILD_SKB_ENABLED, &(ring)->state)
ring              181 drivers/net/ethernet/intel/ixgbevf/ixgbevf.h #define set_ring_build_skb_enabled(ring) \
ring              182 drivers/net/ethernet/intel/ixgbevf/ixgbevf.h 	set_bit(__IXGBEVF_RX_BUILD_SKB_ENABLED, &(ring)->state)
ring              183 drivers/net/ethernet/intel/ixgbevf/ixgbevf.h #define clear_ring_build_skb_enabled(ring) \
ring              184 drivers/net/ethernet/intel/ixgbevf/ixgbevf.h 	clear_bit(__IXGBEVF_RX_BUILD_SKB_ENABLED, &(ring)->state)
ring              186 drivers/net/ethernet/intel/ixgbevf/ixgbevf.h static inline unsigned int ixgbevf_rx_bufsz(struct ixgbevf_ring *ring)
ring              189 drivers/net/ethernet/intel/ixgbevf/ixgbevf.h 	if (ring_uses_large_buffer(ring))
ring              192 drivers/net/ethernet/intel/ixgbevf/ixgbevf.h 	if (ring_uses_build_skb(ring))
ring              198 drivers/net/ethernet/intel/ixgbevf/ixgbevf.h static inline unsigned int ixgbevf_rx_pg_order(struct ixgbevf_ring *ring)
ring              201 drivers/net/ethernet/intel/ixgbevf/ixgbevf.h 	if (ring_uses_large_buffer(ring))
ring              209 drivers/net/ethernet/intel/ixgbevf/ixgbevf.h #define check_for_tx_hang(ring) \
ring              210 drivers/net/ethernet/intel/ixgbevf/ixgbevf.h 	test_bit(__IXGBEVF_TX_DETECT_HANG, &(ring)->state)
ring              211 drivers/net/ethernet/intel/ixgbevf/ixgbevf.h #define set_check_for_tx_hang(ring) \
ring              212 drivers/net/ethernet/intel/ixgbevf/ixgbevf.h 	set_bit(__IXGBEVF_TX_DETECT_HANG, &(ring)->state)
ring              213 drivers/net/ethernet/intel/ixgbevf/ixgbevf.h #define clear_check_for_tx_hang(ring) \
ring              214 drivers/net/ethernet/intel/ixgbevf/ixgbevf.h 	clear_bit(__IXGBEVF_TX_DETECT_HANG, &(ring)->state)
ring              217 drivers/net/ethernet/intel/ixgbevf/ixgbevf.h 	struct ixgbevf_ring *ring;	/* pointer to linked list of rings */
ring              226 drivers/net/ethernet/intel/ixgbevf/ixgbevf.h 	for (pos = (head).ring; pos != NULL; pos = pos->next)
ring              244 drivers/net/ethernet/intel/ixgbevf/ixgbevf.h 	struct ixgbevf_ring ring[0] ____cacheline_internodealigned_in_smp;
ring              286 drivers/net/ethernet/intel/ixgbevf/ixgbevf.h static inline u16 ixgbevf_desc_unused(struct ixgbevf_ring *ring)
ring              288 drivers/net/ethernet/intel/ixgbevf/ixgbevf.h 	u16 ntc = ring->next_to_clean;
ring              289 drivers/net/ethernet/intel/ixgbevf/ixgbevf.h 	u16 ntu = ring->next_to_use;
ring              291 drivers/net/ethernet/intel/ixgbevf/ixgbevf.h 	return ((ntc > ntu) ? 0 : ring->count) + ntc - ntu - 1;
ring              294 drivers/net/ethernet/intel/ixgbevf/ixgbevf.h static inline void ixgbevf_write_tail(struct ixgbevf_ring *ring, u32 value)
ring              296 drivers/net/ethernet/intel/ixgbevf/ixgbevf.h 	writel(value, ring->tail);
ring              193 drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c static u64 ixgbevf_get_tx_completed(struct ixgbevf_ring *ring)
ring              195 drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c 	return ring->stats.packets;
ring              198 drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c static u32 ixgbevf_get_tx_pending(struct ixgbevf_ring *ring)
ring              200 drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c 	struct ixgbevf_adapter *adapter = netdev_priv(ring->netdev);
ring              203 drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c 	u32 head = IXGBE_READ_REG(hw, IXGBE_VFTDH(ring->reg_idx));
ring              204 drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c 	u32 tail = IXGBE_READ_REG(hw, IXGBE_VFTDT(ring->reg_idx));
ring              208 drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c 			tail - head : (tail + ring->count - head);
ring              443 drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c static inline void ixgbevf_rx_hash(struct ixgbevf_ring *ring,
ring              449 drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c 	if (!(ring->netdev->features & NETIF_F_RXHASH))
ring              469 drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c static inline void ixgbevf_rx_checksum(struct ixgbevf_ring *ring,
ring              476 drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c 	if (!(ring->netdev->features & NETIF_F_RXCSUM))
ring              482 drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c 		ring->rx_stats.csum_err++;
ring              490 drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c 		ring->rx_stats.csum_err++;
ring              983 drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c static int ixgbevf_xmit_xdp_ring(struct ixgbevf_ring *ring,
ring              994 drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c 	if (unlikely(!ixgbevf_desc_unused(ring)))
ring              997 drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c 	dma = dma_map_single(ring->dev, xdp->data, len, DMA_TO_DEVICE);
ring              998 drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c 	if (dma_mapping_error(ring->dev, dma))
ring             1002 drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c 	i = ring->next_to_use;
ring             1003 drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c 	tx_buffer = &ring->tx_buffer_info[i];
ring             1015 drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c 	if (!test_bit(__IXGBEVF_TX_XDP_RING_PRIMED, &ring->state)) {
ring             1018 drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c 		set_bit(__IXGBEVF_TX_XDP_RING_PRIMED, &ring->state);
ring             1020 drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c 		context_desc = IXGBEVF_TX_CTXTDESC(ring, 0);
ring             1038 drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c 	tx_desc = IXGBEVF_TX_DESC(ring, i);
ring             1051 drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c 	if (i == ring->count)
ring             1055 drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c 	ring->next_to_use = i;
ring             1267 drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c 	struct ixgbevf_ring *ring;
ring             1271 drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c 	ixgbevf_for_each_ring(ring, q_vector->tx) {
ring             1272 drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c 		if (!ixgbevf_clean_tx_irq(q_vector, ring, budget))
ring             1287 drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c 	ixgbevf_for_each_ring(ring, q_vector->rx) {
ring             1288 drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c 		int cleaned = ixgbevf_clean_rx_irq(q_vector, ring,
ring             1352 drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c 		struct ixgbevf_ring *ring;
ring             1356 drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c 		ixgbevf_for_each_ring(ring, q_vector->rx)
ring             1357 drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c 			ixgbevf_set_ivar(adapter, 0, ring->reg_idx, v_idx);
ring             1359 drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c 		ixgbevf_for_each_ring(ring, q_vector->tx)
ring             1360 drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c 			ixgbevf_set_ivar(adapter, 1, ring->reg_idx, v_idx);
ring             1362 drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c 		if (q_vector->tx.ring && !q_vector->rx.ring) {
ring             1518 drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c 	if (q_vector->rx.ring || q_vector->tx.ring)
ring             1542 drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c 		if (q_vector->tx.ring && q_vector->rx.ring) {
ring             1546 drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c 		} else if (q_vector->rx.ring) {
ring             1549 drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c 		} else if (q_vector->tx.ring) {
ring             1628 drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c 		if (!adapter->q_vector[i]->rx.ring &&
ring             1629 drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c 		    !adapter->q_vector[i]->tx.ring)
ring             1677 drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c 				      struct ixgbevf_ring *ring)
ring             1680 drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c 	u64 tdba = ring->dma;
ring             1683 drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c 	u8 reg_idx = ring->reg_idx;
ring             1692 drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c 			ring->count * sizeof(union ixgbe_adv_tx_desc));
ring             1706 drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c 	ring->tail = adapter->io_addr + IXGBE_VFTDT(reg_idx);
ring             1709 drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c 	ring->next_to_clean = 0;
ring             1710 drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c 	ring->next_to_use = 0;
ring             1723 drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c 	memset(ring->tx_buffer_info, 0,
ring             1724 drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c 	       sizeof(struct ixgbevf_tx_buffer) * ring->count);
ring             1726 drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c 	clear_bit(__IXGBEVF_HANG_CHECK_ARMED, &ring->state);
ring             1727 drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c 	clear_bit(__IXGBEVF_TX_XDP_RING_PRIMED, &ring->state);
ring             1760 drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c 				     struct ixgbevf_ring *ring, int index)
ring             1768 drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c 	if (ring_uses_large_buffer(ring))
ring             1794 drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c 				     struct ixgbevf_ring *ring)
ring             1799 drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c 	u8 reg_idx = ring->reg_idx;
ring             1821 drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c 					 struct ixgbevf_ring *ring)
ring             1826 drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c 	u8 reg_idx = ring->reg_idx;
ring             1898 drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c 				      struct ixgbevf_ring *ring)
ring             1902 drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c 	u64 rdba = ring->dma;
ring             1904 drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c 	u8 reg_idx = ring->reg_idx;
ring             1908 drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c 	ixgbevf_disable_rx_queue(adapter, ring);
ring             1913 drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c 			ring->count * sizeof(union ixgbe_adv_rx_desc));
ring             1928 drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c 	ring->tail = adapter->io_addr + IXGBE_VFRDT(reg_idx);
ring             1931 drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c 	memset(ring->rx_buffer_info, 0,
ring             1932 drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c 	       sizeof(struct ixgbevf_rx_buffer) * ring->count);
ring             1935 drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c 	rx_desc = IXGBEVF_RX_DESC(ring, 0);
ring             1939 drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c 	ring->next_to_clean = 0;
ring             1940 drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c 	ring->next_to_use = 0;
ring             1941 drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c 	ring->next_to_alloc = 0;
ring             1943 drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c 	ixgbevf_configure_srrctl(adapter, ring, reg_idx);
ring             1952 drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c 		if (ring_uses_build_skb(ring) &&
ring             1953 drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c 		    !ring_uses_large_buffer(ring))
ring             1962 drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c 	ixgbevf_rx_desc_queue_enable(adapter, ring);
ring             1963 drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c 	ixgbevf_alloc_rx_buffers(ring, ixgbevf_desc_unused(ring));
ring             2673 drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c static void ixgbevf_add_ring(struct ixgbevf_ring *ring,
ring             2676 drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c 	ring->next = head->ring;
ring             2677 drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c 	head->ring = ring;
ring             2701 drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c 	struct ixgbevf_ring *ring;
ring             2705 drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c 	size = sizeof(*q_vector) + (sizeof(*ring) * ring_count);
ring             2721 drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c 	ring = q_vector->ring;
ring             2725 drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c 		ring->dev = &adapter->pdev->dev;
ring             2726 drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c 		ring->netdev = adapter->netdev;
ring             2729 drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c 		ring->q_vector = q_vector;
ring             2732 drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c 		ixgbevf_add_ring(ring, &q_vector->tx);
ring             2735 drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c 		ring->count = adapter->tx_ring_count;
ring             2736 drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c 		ring->queue_index = txr_idx;
ring             2737 drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c 		ring->reg_idx = reg_idx;
ring             2740 drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c 		 adapter->tx_ring[txr_idx] = ring;
ring             2748 drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c 		ring++;
ring             2753 drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c 		ring->dev = &adapter->pdev->dev;
ring             2754 drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c 		ring->netdev = adapter->netdev;
ring             2757 drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c 		ring->q_vector = q_vector;
ring             2760 drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c 		ixgbevf_add_ring(ring, &q_vector->tx);
ring             2763 drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c 		ring->count = adapter->tx_ring_count;
ring             2764 drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c 		ring->queue_index = xdp_idx;
ring             2765 drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c 		ring->reg_idx = reg_idx;
ring             2766 drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c 		set_ring_xdp(ring);
ring             2769 drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c 		adapter->xdp_ring[xdp_idx] = ring;
ring             2777 drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c 		ring++;
ring             2782 drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c 		ring->dev = &adapter->pdev->dev;
ring             2783 drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c 		ring->netdev = adapter->netdev;
ring             2786 drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c 		ring->q_vector = q_vector;
ring             2789 drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c 		ixgbevf_add_ring(ring, &q_vector->rx);
ring             2792 drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c 		ring->count = adapter->rx_ring_count;
ring             2793 drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c 		ring->queue_index = rxr_idx;
ring             2794 drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c 		ring->reg_idx = rxr_idx;
ring             2797 drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c 		adapter->rx_ring[rxr_idx] = ring;
ring             2804 drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c 		ring++;
ring             2822 drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c 	struct ixgbevf_ring *ring;
ring             2824 drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c 	ixgbevf_for_each_ring(ring, q_vector->tx) {
ring             2825 drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c 		if (ring_is_xdp(ring))
ring             2826 drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c 			adapter->xdp_ring[ring->queue_index] = NULL;
ring             2828 drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c 			adapter->tx_ring[ring->queue_index] = NULL;
ring             2831 drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c 	ixgbevf_for_each_ring(ring, q_vector->rx)
ring             2832 drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c 		adapter->rx_ring[ring->queue_index] = NULL;
ring             3207 drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c 		if (qv->rx.ring || qv->tx.ring)
ring             4358 drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c 				      const struct ixgbevf_ring *ring)
ring             4363 drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c 	if (ring) {
ring             4365 drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c 			start = u64_stats_fetch_begin_irq(&ring->syncp);
ring             4366 drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c 			bytes = ring->stats.bytes;
ring             4367 drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c 			packets = ring->stats.packets;
ring             4368 drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c 		} while (u64_stats_fetch_retry_irq(&ring->syncp, start));
ring             4380 drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c 	const struct ixgbevf_ring *ring;
ring             4389 drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c 		ring = adapter->rx_ring[i];
ring             4391 drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c 			start = u64_stats_fetch_begin_irq(&ring->syncp);
ring             4392 drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c 			bytes = ring->stats.bytes;
ring             4393 drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c 			packets = ring->stats.packets;
ring             4394 drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c 		} while (u64_stats_fetch_retry_irq(&ring->syncp, start));
ring             4400 drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c 		ring = adapter->tx_ring[i];
ring             4401 drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c 		ixgbevf_get_tx_ring_stats(stats, ring);
ring             4405 drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c 		ring = adapter->xdp_ring[i];
ring             4406 drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c 		ixgbevf_get_tx_ring_stats(stats, ring);
ring             4453 drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c 		struct ixgbevf_ring *ring = adapter->rx_ring[i];
ring             4455 drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c 		if (frame_size > ixgbevf_rx_bufsz(ring))
ring             4029 drivers/net/ethernet/marvell/mvneta.c 					 struct ethtool_ringparam *ring)
ring             4033 drivers/net/ethernet/marvell/mvneta.c 	ring->rx_max_pending = MVNETA_MAX_RXD;
ring             4034 drivers/net/ethernet/marvell/mvneta.c 	ring->tx_max_pending = MVNETA_MAX_TXD;
ring             4035 drivers/net/ethernet/marvell/mvneta.c 	ring->rx_pending = pp->rx_ring_size;
ring             4036 drivers/net/ethernet/marvell/mvneta.c 	ring->tx_pending = pp->tx_ring_size;
ring             4040 drivers/net/ethernet/marvell/mvneta.c 					struct ethtool_ringparam *ring)
ring             4044 drivers/net/ethernet/marvell/mvneta.c 	if ((ring->rx_pending == 0) || (ring->tx_pending == 0))
ring             4046 drivers/net/ethernet/marvell/mvneta.c 	pp->rx_ring_size = ring->rx_pending < MVNETA_MAX_RXD ?
ring             4047 drivers/net/ethernet/marvell/mvneta.c 		ring->rx_pending : MVNETA_MAX_RXD;
ring             4049 drivers/net/ethernet/marvell/mvneta.c 	pp->tx_ring_size = clamp_t(u16, ring->tx_pending,
ring             4051 drivers/net/ethernet/marvell/mvneta.c 	if (pp->tx_ring_size != ring->tx_pending)
ring             4053 drivers/net/ethernet/marvell/mvneta.c 			    pp->tx_ring_size, ring->tx_pending);
ring             3494 drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c 				       struct ethtool_ringparam *ring)
ring             3496 drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c 	u16 new_rx_pending = ring->rx_pending;
ring             3497 drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c 	u16 new_tx_pending = ring->tx_pending;
ring             3499 drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c 	if (ring->rx_pending == 0 || ring->tx_pending == 0)
ring             3502 drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c 	if (ring->rx_pending > MVPP2_MAX_RXD_MAX)
ring             3504 drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c 	else if (!IS_ALIGNED(ring->rx_pending, 16))
ring             3505 drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c 		new_rx_pending = ALIGN(ring->rx_pending, 16);
ring             3507 drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c 	if (ring->tx_pending > MVPP2_MAX_TXD_MAX)
ring             3509 drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c 	else if (!IS_ALIGNED(ring->tx_pending, 32))
ring             3510 drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c 		new_tx_pending = ALIGN(ring->tx_pending, 32);
ring             3518 drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c 	if (ring->rx_pending != new_rx_pending) {
ring             3520 drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c 			    ring->rx_pending, new_rx_pending);
ring             3521 drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c 		ring->rx_pending = new_rx_pending;
ring             3524 drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c 	if (ring->tx_pending != new_tx_pending) {
ring             3526 drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c 			    ring->tx_pending, new_tx_pending);
ring             3527 drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c 		ring->tx_pending = new_tx_pending;
ring             4096 drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c 					struct ethtool_ringparam *ring)
ring             4100 drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c 	ring->rx_max_pending = MVPP2_MAX_RXD_MAX;
ring             4101 drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c 	ring->tx_max_pending = MVPP2_MAX_TXD_MAX;
ring             4102 drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c 	ring->rx_pending = port->rx_ring_size;
ring             4103 drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c 	ring->tx_pending = port->tx_ring_size;
ring             4107 drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c 				       struct ethtool_ringparam *ring)
ring             4114 drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c 	err = mvpp2_check_ringparam_valid(dev, ring);
ring             4119 drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c 		port->rx_ring_size = ring->rx_pending;
ring             4120 drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c 		port->tx_ring_size = ring->tx_pending;
ring             4131 drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c 	port->rx_ring_size = ring->rx_pending;
ring             4132 drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c 	port->tx_ring_size = ring->tx_pending;
ring             4138 drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c 		ring->rx_pending = prev_rx_ring_size;
ring             4147 drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c 		ring->tx_pending = prev_tx_ring_size;
ring              909 drivers/net/ethernet/marvell/skge.c static int skge_ring_alloc(struct skge_ring *ring, void *vaddr, u32 base)
ring              915 drivers/net/ethernet/marvell/skge.c 	ring->start = kcalloc(ring->count, sizeof(*e), GFP_KERNEL);
ring              916 drivers/net/ethernet/marvell/skge.c 	if (!ring->start)
ring              919 drivers/net/ethernet/marvell/skge.c 	for (i = 0, e = ring->start, d = vaddr; i < ring->count; i++, e++, d++) {
ring              921 drivers/net/ethernet/marvell/skge.c 		if (i == ring->count - 1) {
ring              922 drivers/net/ethernet/marvell/skge.c 			e->next = ring->start;
ring              929 drivers/net/ethernet/marvell/skge.c 	ring->to_use = ring->to_clean = ring->start;
ring              984 drivers/net/ethernet/marvell/skge.c 	struct skge_ring *ring = &skge->rx_ring;
ring              987 drivers/net/ethernet/marvell/skge.c 	e = ring->start;
ring              999 drivers/net/ethernet/marvell/skge.c 	} while ((e = e->next) != ring->start);
ring             1009 drivers/net/ethernet/marvell/skge.c 	struct skge_ring *ring = &skge->rx_ring;
ring             1012 drivers/net/ethernet/marvell/skge.c 	e = ring->start;
ring             1026 drivers/net/ethernet/marvell/skge.c 	} while ((e = e->next) != ring->start);
ring             1028 drivers/net/ethernet/marvell/skge.c 	ring->to_clean = ring->start;
ring             2722 drivers/net/ethernet/marvell/skge.c static inline int skge_avail(const struct skge_ring *ring)
ring             2725 drivers/net/ethernet/marvell/skge.c 	return ((ring->to_clean > ring->to_use) ? 0 : ring->count)
ring             2726 drivers/net/ethernet/marvell/skge.c 		+ (ring->to_clean - ring->to_use) - 1;
ring             3149 drivers/net/ethernet/marvell/skge.c 	struct skge_ring *ring = &skge->tx_ring;
ring             3155 drivers/net/ethernet/marvell/skge.c 	for (e = ring->to_clean; e != ring->to_use; e = e->next) {
ring             3197 drivers/net/ethernet/marvell/skge.c 	struct skge_ring *ring = &skge->rx_ring;
ring             3205 drivers/net/ethernet/marvell/skge.c 	for (e = ring->to_clean; prefetch(e->next), work_done < budget; e = e->next) {
ring             3221 drivers/net/ethernet/marvell/skge.c 	ring->to_clean = e;
ring              821 drivers/net/ethernet/mediatek/mtk_eth_soc.c static inline void *mtk_qdma_phys_to_virt(struct mtk_tx_ring *ring, u32 desc)
ring              823 drivers/net/ethernet/mediatek/mtk_eth_soc.c 	void *ret = ring->dma;
ring              825 drivers/net/ethernet/mediatek/mtk_eth_soc.c 	return ret + (desc - ring->phys);
ring              828 drivers/net/ethernet/mediatek/mtk_eth_soc.c static inline struct mtk_tx_buf *mtk_desc_to_tx_buf(struct mtk_tx_ring *ring,
ring              831 drivers/net/ethernet/mediatek/mtk_eth_soc.c 	int idx = txd - ring->dma;
ring              833 drivers/net/ethernet/mediatek/mtk_eth_soc.c 	return &ring->buf[idx];
ring              836 drivers/net/ethernet/mediatek/mtk_eth_soc.c static struct mtk_tx_dma *qdma_to_pdma(struct mtk_tx_ring *ring,
ring              839 drivers/net/ethernet/mediatek/mtk_eth_soc.c 	return ring->dma_pdma - ring->dma + dma;
ring              842 drivers/net/ethernet/mediatek/mtk_eth_soc.c static int txd_to_idx(struct mtk_tx_ring *ring, struct mtk_tx_dma *dma)
ring              844 drivers/net/ethernet/mediatek/mtk_eth_soc.c 	return ((void *)dma - (void *)ring->dma) / sizeof(*dma);
ring              908 drivers/net/ethernet/mediatek/mtk_eth_soc.c 		      int tx_num, struct mtk_tx_ring *ring, bool gso)
ring              921 drivers/net/ethernet/mediatek/mtk_eth_soc.c 	itxd = ring->next_free;
ring              922 drivers/net/ethernet/mediatek/mtk_eth_soc.c 	itxd_pdma = qdma_to_pdma(ring, itxd);
ring              923 drivers/net/ethernet/mediatek/mtk_eth_soc.c 	if (itxd == ring->last_free)
ring              930 drivers/net/ethernet/mediatek/mtk_eth_soc.c 	itx_buf = mtk_desc_to_tx_buf(ring, itxd);
ring              958 drivers/net/ethernet/mediatek/mtk_eth_soc.c 	txd_pdma = qdma_to_pdma(ring, txd);
ring              973 drivers/net/ethernet/mediatek/mtk_eth_soc.c 				txd = mtk_qdma_phys_to_virt(ring, txd->txd2);
ring              974 drivers/net/ethernet/mediatek/mtk_eth_soc.c 				txd_pdma = qdma_to_pdma(ring, txd);
ring              975 drivers/net/ethernet/mediatek/mtk_eth_soc.c 				if (txd == ring->last_free)
ring             1001 drivers/net/ethernet/mediatek/mtk_eth_soc.c 			tx_buf = mtk_desc_to_tx_buf(ring, txd);
ring             1033 drivers/net/ethernet/mediatek/mtk_eth_soc.c 	ring->next_free = mtk_qdma_phys_to_virt(ring, txd->txd2);
ring             1034 drivers/net/ethernet/mediatek/mtk_eth_soc.c 	atomic_sub(n_desc, &ring->free_count);
ring             1046 drivers/net/ethernet/mediatek/mtk_eth_soc.c 		int next_idx = NEXT_DESP_IDX(txd_to_idx(ring, txd),
ring             1047 drivers/net/ethernet/mediatek/mtk_eth_soc.c 					     ring->dma_size);
ring             1055 drivers/net/ethernet/mediatek/mtk_eth_soc.c 		tx_buf = mtk_desc_to_tx_buf(ring, itxd);
ring             1064 drivers/net/ethernet/mediatek/mtk_eth_soc.c 		itxd = mtk_qdma_phys_to_virt(ring, itxd->txd2);
ring             1065 drivers/net/ethernet/mediatek/mtk_eth_soc.c 		itxd_pdma = qdma_to_pdma(ring, itxd);
ring             1130 drivers/net/ethernet/mediatek/mtk_eth_soc.c 	struct mtk_tx_ring *ring = &eth->tx_ring;
ring             1145 drivers/net/ethernet/mediatek/mtk_eth_soc.c 	if (unlikely(atomic_read(&ring->free_count) <= tx_num)) {
ring             1168 drivers/net/ethernet/mediatek/mtk_eth_soc.c 	if (mtk_tx_map(skb, dev, tx_num, ring, gso) < 0)
ring             1171 drivers/net/ethernet/mediatek/mtk_eth_soc.c 	if (unlikely(atomic_read(&ring->free_count) <= ring->thresh))
ring             1188 drivers/net/ethernet/mediatek/mtk_eth_soc.c 	struct mtk_rx_ring *ring;
ring             1195 drivers/net/ethernet/mediatek/mtk_eth_soc.c 		ring = &eth->rx_ring[i];
ring             1196 drivers/net/ethernet/mediatek/mtk_eth_soc.c 		idx = NEXT_DESP_IDX(ring->calc_idx, ring->dma_size);
ring             1197 drivers/net/ethernet/mediatek/mtk_eth_soc.c 		if (ring->dma[idx].rxd2 & RX_DMA_DONE) {
ring             1198 drivers/net/ethernet/mediatek/mtk_eth_soc.c 			ring->calc_idx_update = true;
ring             1199 drivers/net/ethernet/mediatek/mtk_eth_soc.c 			return ring;
ring             1208 drivers/net/ethernet/mediatek/mtk_eth_soc.c 	struct mtk_rx_ring *ring;
ring             1212 drivers/net/ethernet/mediatek/mtk_eth_soc.c 		ring = &eth->rx_ring[0];
ring             1213 drivers/net/ethernet/mediatek/mtk_eth_soc.c 		mtk_w32(eth, ring->calc_idx, ring->crx_idx_reg);
ring             1216 drivers/net/ethernet/mediatek/mtk_eth_soc.c 			ring = &eth->rx_ring[i];
ring             1217 drivers/net/ethernet/mediatek/mtk_eth_soc.c 			if (ring->calc_idx_update) {
ring             1218 drivers/net/ethernet/mediatek/mtk_eth_soc.c 				ring->calc_idx_update = false;
ring             1219 drivers/net/ethernet/mediatek/mtk_eth_soc.c 				mtk_w32(eth, ring->calc_idx, ring->crx_idx_reg);
ring             1228 drivers/net/ethernet/mediatek/mtk_eth_soc.c 	struct mtk_rx_ring *ring;
ring             1241 drivers/net/ethernet/mediatek/mtk_eth_soc.c 		ring = mtk_get_rx_ring(eth);
ring             1242 drivers/net/ethernet/mediatek/mtk_eth_soc.c 		if (unlikely(!ring))
ring             1245 drivers/net/ethernet/mediatek/mtk_eth_soc.c 		idx = NEXT_DESP_IDX(ring->calc_idx, ring->dma_size);
ring             1246 drivers/net/ethernet/mediatek/mtk_eth_soc.c 		rxd = &ring->dma[idx];
ring             1247 drivers/net/ethernet/mediatek/mtk_eth_soc.c 		data = ring->data[idx];
ring             1272 drivers/net/ethernet/mediatek/mtk_eth_soc.c 		new_data = napi_alloc_frag(ring->frag_size);
ring             1280 drivers/net/ethernet/mediatek/mtk_eth_soc.c 					  ring->buf_size,
ring             1289 drivers/net/ethernet/mediatek/mtk_eth_soc.c 		skb = build_skb(data, ring->frag_size);
ring             1298 drivers/net/ethernet/mediatek/mtk_eth_soc.c 				 ring->buf_size, DMA_FROM_DEVICE);
ring             1315 drivers/net/ethernet/mediatek/mtk_eth_soc.c 		ring->data[idx] = new_data;
ring             1322 drivers/net/ethernet/mediatek/mtk_eth_soc.c 			rxd->rxd2 = RX_DMA_PLEN0(ring->buf_size);
ring             1324 drivers/net/ethernet/mediatek/mtk_eth_soc.c 		ring->calc_idx = idx;
ring             1344 drivers/net/ethernet/mediatek/mtk_eth_soc.c 	struct mtk_tx_ring *ring = &eth->tx_ring;
ring             1353 drivers/net/ethernet/mediatek/mtk_eth_soc.c 	desc = mtk_qdma_phys_to_virt(ring, cpu);
ring             1359 drivers/net/ethernet/mediatek/mtk_eth_soc.c 		desc = mtk_qdma_phys_to_virt(ring, desc->txd2);
ring             1363 drivers/net/ethernet/mediatek/mtk_eth_soc.c 		tx_buf = mtk_desc_to_tx_buf(ring, desc);
ring             1378 drivers/net/ethernet/mediatek/mtk_eth_soc.c 		ring->last_free = desc;
ring             1379 drivers/net/ethernet/mediatek/mtk_eth_soc.c 		atomic_inc(&ring->free_count);
ring             1392 drivers/net/ethernet/mediatek/mtk_eth_soc.c 	struct mtk_tx_ring *ring = &eth->tx_ring;
ring             1398 drivers/net/ethernet/mediatek/mtk_eth_soc.c 	cpu = ring->cpu_idx;
ring             1402 drivers/net/ethernet/mediatek/mtk_eth_soc.c 		tx_buf = &ring->buf[cpu];
ring             1415 drivers/net/ethernet/mediatek/mtk_eth_soc.c 		desc = &ring->dma[cpu];
ring             1416 drivers/net/ethernet/mediatek/mtk_eth_soc.c 		ring->last_free = desc;
ring             1417 drivers/net/ethernet/mediatek/mtk_eth_soc.c 		atomic_inc(&ring->free_count);
ring             1419 drivers/net/ethernet/mediatek/mtk_eth_soc.c 		cpu = NEXT_DESP_IDX(cpu, ring->dma_size);
ring             1422 drivers/net/ethernet/mediatek/mtk_eth_soc.c 	ring->cpu_idx = cpu;
ring             1429 drivers/net/ethernet/mediatek/mtk_eth_soc.c 	struct mtk_tx_ring *ring = &eth->tx_ring;
ring             1450 drivers/net/ethernet/mediatek/mtk_eth_soc.c 	    (atomic_read(&ring->free_count) > ring->thresh))
ring             1535 drivers/net/ethernet/mediatek/mtk_eth_soc.c 	struct mtk_tx_ring *ring = &eth->tx_ring;
ring             1536 drivers/net/ethernet/mediatek/mtk_eth_soc.c 	int i, sz = sizeof(*ring->dma);
ring             1538 drivers/net/ethernet/mediatek/mtk_eth_soc.c 	ring->buf = kcalloc(MTK_DMA_SIZE, sizeof(*ring->buf),
ring             1540 drivers/net/ethernet/mediatek/mtk_eth_soc.c 	if (!ring->buf)
ring             1543 drivers/net/ethernet/mediatek/mtk_eth_soc.c 	ring->dma = dma_alloc_coherent(eth->dev, MTK_DMA_SIZE * sz,
ring             1544 drivers/net/ethernet/mediatek/mtk_eth_soc.c 				       &ring->phys, GFP_ATOMIC);
ring             1545 drivers/net/ethernet/mediatek/mtk_eth_soc.c 	if (!ring->dma)
ring             1550 drivers/net/ethernet/mediatek/mtk_eth_soc.c 		u32 next_ptr = ring->phys + next * sz;
ring             1552 drivers/net/ethernet/mediatek/mtk_eth_soc.c 		ring->dma[i].txd2 = next_ptr;
ring             1553 drivers/net/ethernet/mediatek/mtk_eth_soc.c 		ring->dma[i].txd3 = TX_DMA_LS0 | TX_DMA_OWNER_CPU;
ring             1561 drivers/net/ethernet/mediatek/mtk_eth_soc.c 		ring->dma_pdma = dma_alloc_coherent(eth->dev, MTK_DMA_SIZE * sz,
ring             1562 drivers/net/ethernet/mediatek/mtk_eth_soc.c 						    &ring->phys_pdma,
ring             1564 drivers/net/ethernet/mediatek/mtk_eth_soc.c 		if (!ring->dma_pdma)
ring             1568 drivers/net/ethernet/mediatek/mtk_eth_soc.c 			ring->dma_pdma[i].txd2 = TX_DMA_DESP2_DEF;
ring             1569 drivers/net/ethernet/mediatek/mtk_eth_soc.c 			ring->dma_pdma[i].txd4 = 0;
ring             1573 drivers/net/ethernet/mediatek/mtk_eth_soc.c 	ring->dma_size = MTK_DMA_SIZE;
ring             1574 drivers/net/ethernet/mediatek/mtk_eth_soc.c 	atomic_set(&ring->free_count, MTK_DMA_SIZE - 2);
ring             1575 drivers/net/ethernet/mediatek/mtk_eth_soc.c 	ring->next_free = &ring->dma[0];
ring             1576 drivers/net/ethernet/mediatek/mtk_eth_soc.c 	ring->last_free = &ring->dma[MTK_DMA_SIZE - 1];
ring             1577 drivers/net/ethernet/mediatek/mtk_eth_soc.c 	ring->thresh = MAX_SKB_FRAGS;
ring             1585 drivers/net/ethernet/mediatek/mtk_eth_soc.c 		mtk_w32(eth, ring->phys, MTK_QTX_CTX_PTR);
ring             1586 drivers/net/ethernet/mediatek/mtk_eth_soc.c 		mtk_w32(eth, ring->phys, MTK_QTX_DTX_PTR);
ring             1588 drivers/net/ethernet/mediatek/mtk_eth_soc.c 			ring->phys + ((MTK_DMA_SIZE - 1) * sz),
ring             1591 drivers/net/ethernet/mediatek/mtk_eth_soc.c 			ring->phys + ((MTK_DMA_SIZE - 1) * sz),
ring             1596 drivers/net/ethernet/mediatek/mtk_eth_soc.c 		mtk_w32(eth, ring->phys_pdma, MT7628_TX_BASE_PTR0);
ring             1610 drivers/net/ethernet/mediatek/mtk_eth_soc.c 	struct mtk_tx_ring *ring = &eth->tx_ring;
ring             1613 drivers/net/ethernet/mediatek/mtk_eth_soc.c 	if (ring->buf) {
ring             1615 drivers/net/ethernet/mediatek/mtk_eth_soc.c 			mtk_tx_unmap(eth, &ring->buf[i]);
ring             1616 drivers/net/ethernet/mediatek/mtk_eth_soc.c 		kfree(ring->buf);
ring             1617 drivers/net/ethernet/mediatek/mtk_eth_soc.c 		ring->buf = NULL;
ring             1620 drivers/net/ethernet/mediatek/mtk_eth_soc.c 	if (ring->dma) {
ring             1622 drivers/net/ethernet/mediatek/mtk_eth_soc.c 				  MTK_DMA_SIZE * sizeof(*ring->dma),
ring             1623 drivers/net/ethernet/mediatek/mtk_eth_soc.c 				  ring->dma,
ring             1624 drivers/net/ethernet/mediatek/mtk_eth_soc.c 				  ring->phys);
ring             1625 drivers/net/ethernet/mediatek/mtk_eth_soc.c 		ring->dma = NULL;
ring             1628 drivers/net/ethernet/mediatek/mtk_eth_soc.c 	if (ring->dma_pdma) {
ring             1630 drivers/net/ethernet/mediatek/mtk_eth_soc.c 				  MTK_DMA_SIZE * sizeof(*ring->dma_pdma),
ring             1631 drivers/net/ethernet/mediatek/mtk_eth_soc.c 				  ring->dma_pdma,
ring             1632 drivers/net/ethernet/mediatek/mtk_eth_soc.c 				  ring->phys_pdma);
ring             1633 drivers/net/ethernet/mediatek/mtk_eth_soc.c 		ring->dma_pdma = NULL;
ring             1639 drivers/net/ethernet/mediatek/mtk_eth_soc.c 	struct mtk_rx_ring *ring;
ring             1647 drivers/net/ethernet/mediatek/mtk_eth_soc.c 		ring = &eth->rx_ring_qdma;
ring             1650 drivers/net/ethernet/mediatek/mtk_eth_soc.c 		ring = &eth->rx_ring[ring_no];
ring             1661 drivers/net/ethernet/mediatek/mtk_eth_soc.c 	ring->frag_size = mtk_max_frag_size(rx_data_len);
ring             1662 drivers/net/ethernet/mediatek/mtk_eth_soc.c 	ring->buf_size = mtk_max_buf_size(ring->frag_size);
ring             1663 drivers/net/ethernet/mediatek/mtk_eth_soc.c 	ring->data = kcalloc(rx_dma_size, sizeof(*ring->data),
ring             1665 drivers/net/ethernet/mediatek/mtk_eth_soc.c 	if (!ring->data)
ring             1669 drivers/net/ethernet/mediatek/mtk_eth_soc.c 		ring->data[i] = netdev_alloc_frag(ring->frag_size);
ring             1670 drivers/net/ethernet/mediatek/mtk_eth_soc.c 		if (!ring->data[i])
ring             1674 drivers/net/ethernet/mediatek/mtk_eth_soc.c 	ring->dma = dma_alloc_coherent(eth->dev,
ring             1675 drivers/net/ethernet/mediatek/mtk_eth_soc.c 				       rx_dma_size * sizeof(*ring->dma),
ring             1676 drivers/net/ethernet/mediatek/mtk_eth_soc.c 				       &ring->phys, GFP_ATOMIC);
ring             1677 drivers/net/ethernet/mediatek/mtk_eth_soc.c 	if (!ring->dma)
ring             1682 drivers/net/ethernet/mediatek/mtk_eth_soc.c 				ring->data[i] + NET_SKB_PAD + eth->ip_align,
ring             1683 drivers/net/ethernet/mediatek/mtk_eth_soc.c 				ring->buf_size,
ring             1687 drivers/net/ethernet/mediatek/mtk_eth_soc.c 		ring->dma[i].rxd1 = (unsigned int)dma_addr;
ring             1690 drivers/net/ethernet/mediatek/mtk_eth_soc.c 			ring->dma[i].rxd2 = RX_DMA_LSO;
ring             1692 drivers/net/ethernet/mediatek/mtk_eth_soc.c 			ring->dma[i].rxd2 = RX_DMA_PLEN0(ring->buf_size);
ring             1694 drivers/net/ethernet/mediatek/mtk_eth_soc.c 	ring->dma_size = rx_dma_size;
ring             1695 drivers/net/ethernet/mediatek/mtk_eth_soc.c 	ring->calc_idx_update = false;
ring             1696 drivers/net/ethernet/mediatek/mtk_eth_soc.c 	ring->calc_idx = rx_dma_size - 1;
ring             1697 drivers/net/ethernet/mediatek/mtk_eth_soc.c 	ring->crx_idx_reg = MTK_PRX_CRX_IDX_CFG(ring_no);
ring             1703 drivers/net/ethernet/mediatek/mtk_eth_soc.c 	mtk_w32(eth, ring->phys, MTK_PRX_BASE_PTR_CFG(ring_no) + offset);
ring             1705 drivers/net/ethernet/mediatek/mtk_eth_soc.c 	mtk_w32(eth, ring->calc_idx, ring->crx_idx_reg + offset);
ring             1711 drivers/net/ethernet/mediatek/mtk_eth_soc.c static void mtk_rx_clean(struct mtk_eth *eth, struct mtk_rx_ring *ring)
ring             1715 drivers/net/ethernet/mediatek/mtk_eth_soc.c 	if (ring->data && ring->dma) {
ring             1716 drivers/net/ethernet/mediatek/mtk_eth_soc.c 		for (i = 0; i < ring->dma_size; i++) {
ring             1717 drivers/net/ethernet/mediatek/mtk_eth_soc.c 			if (!ring->data[i])
ring             1719 drivers/net/ethernet/mediatek/mtk_eth_soc.c 			if (!ring->dma[i].rxd1)
ring             1722 drivers/net/ethernet/mediatek/mtk_eth_soc.c 					 ring->dma[i].rxd1,
ring             1723 drivers/net/ethernet/mediatek/mtk_eth_soc.c 					 ring->buf_size,
ring             1725 drivers/net/ethernet/mediatek/mtk_eth_soc.c 			skb_free_frag(ring->data[i]);
ring             1727 drivers/net/ethernet/mediatek/mtk_eth_soc.c 		kfree(ring->data);
ring             1728 drivers/net/ethernet/mediatek/mtk_eth_soc.c 		ring->data = NULL;
ring             1731 drivers/net/ethernet/mediatek/mtk_eth_soc.c 	if (ring->dma) {
ring             1733 drivers/net/ethernet/mediatek/mtk_eth_soc.c 				  ring->dma_size * sizeof(*ring->dma),
ring             1734 drivers/net/ethernet/mediatek/mtk_eth_soc.c 				  ring->dma,
ring             1735 drivers/net/ethernet/mediatek/mtk_eth_soc.c 				  ring->phys);
ring             1736 drivers/net/ethernet/mediatek/mtk_eth_soc.c 		ring->dma = NULL;
ring               48 drivers/net/ethernet/mellanox/mlx4/en_cq.c 		      int entries, int ring, enum cq_type mode,
ring               64 drivers/net/ethernet/mellanox/mlx4/en_cq.c 	cq->ring = ring;
ring              107 drivers/net/ethernet/mellanox/mlx4/en_cq.c 			cq->vector = cpumask_first(priv->rx_ring[cq->ring]->affinity_mask);
ring              134 drivers/net/ethernet/mellanox/mlx4/en_cq.c 		cq->size = priv->rx_ring[cq->ring]->actual_size;
ring             1461 drivers/net/ethernet/mellanox/mlx4/en_netdev.c 	int ring, err;
ring             1469 drivers/net/ethernet/mellanox/mlx4/en_netdev.c 	for (ring = 0; ring < priv->rx_ring_num; ring++) {
ring             1470 drivers/net/ethernet/mellanox/mlx4/en_netdev.c 		rx_packets = READ_ONCE(priv->rx_ring[ring]->packets);
ring             1471 drivers/net/ethernet/mellanox/mlx4/en_netdev.c 		rx_bytes = READ_ONCE(priv->rx_ring[ring]->bytes);
ring             1473 drivers/net/ethernet/mellanox/mlx4/en_netdev.c 		rx_pkt_diff = rx_packets - priv->last_moder_packets[ring];
ring             1477 drivers/net/ethernet/mellanox/mlx4/en_netdev.c 				priv->last_moder_bytes[ring]) / packets : 0;
ring             1496 drivers/net/ethernet/mellanox/mlx4/en_netdev.c 		cq = priv->rx_cq[ring];
ring             1497 drivers/net/ethernet/mellanox/mlx4/en_netdev.c 		if (moder_time != priv->last_moder_time[ring] ||
ring             1499 drivers/net/ethernet/mellanox/mlx4/en_netdev.c 			priv->last_moder_time[ring] = moder_time;
ring             1505 drivers/net/ethernet/mellanox/mlx4/en_netdev.c 				       ring);
ring             1507 drivers/net/ethernet/mellanox/mlx4/en_netdev.c 		priv->last_moder_packets[ring] = rx_packets;
ring             1508 drivers/net/ethernet/mellanox/mlx4/en_netdev.c 		priv->last_moder_bytes[ring] = rx_bytes;
ring             1588 drivers/net/ethernet/mellanox/mlx4/en_netdev.c 	struct mlx4_en_rx_ring *ring = priv->rx_ring[ring_idx];
ring             1591 drivers/net/ethernet/mellanox/mlx4/en_netdev.c 	if (!zalloc_cpumask_var(&ring->affinity_mask, GFP_KERNEL))
ring             1595 drivers/net/ethernet/mellanox/mlx4/en_netdev.c 			ring->affinity_mask);
ring              163 drivers/net/ethernet/mellanox/mlx4/en_port.c 		const struct mlx4_en_rx_ring *ring = priv->rx_ring[i];
ring              165 drivers/net/ethernet/mellanox/mlx4/en_port.c 		packets += READ_ONCE(ring->packets);
ring              166 drivers/net/ethernet/mellanox/mlx4/en_port.c 		bytes   += READ_ONCE(ring->bytes);
ring              174 drivers/net/ethernet/mellanox/mlx4/en_port.c 		const struct mlx4_en_tx_ring *ring = priv->tx_ring[TX][i];
ring              176 drivers/net/ethernet/mellanox/mlx4/en_port.c 		packets += READ_ONCE(ring->packets);
ring              177 drivers/net/ethernet/mellanox/mlx4/en_port.c 		bytes   += READ_ONCE(ring->bytes);
ring              250 drivers/net/ethernet/mellanox/mlx4/en_port.c 		const struct mlx4_en_rx_ring *ring = priv->rx_ring[i];
ring              252 drivers/net/ethernet/mellanox/mlx4/en_port.c 		sw_rx_dropped			+= READ_ONCE(ring->dropped);
ring              253 drivers/net/ethernet/mellanox/mlx4/en_port.c 		priv->port_stats.rx_chksum_good += READ_ONCE(ring->csum_ok);
ring              254 drivers/net/ethernet/mellanox/mlx4/en_port.c 		priv->port_stats.rx_chksum_none += READ_ONCE(ring->csum_none);
ring              255 drivers/net/ethernet/mellanox/mlx4/en_port.c 		priv->port_stats.rx_chksum_complete += READ_ONCE(ring->csum_complete);
ring              256 drivers/net/ethernet/mellanox/mlx4/en_port.c 		priv->port_stats.rx_alloc_pages += READ_ONCE(ring->rx_alloc_pages);
ring              257 drivers/net/ethernet/mellanox/mlx4/en_port.c 		priv->xdp_stats.rx_xdp_drop	+= READ_ONCE(ring->xdp_drop);
ring              258 drivers/net/ethernet/mellanox/mlx4/en_port.c 		priv->xdp_stats.rx_xdp_tx	+= READ_ONCE(ring->xdp_tx);
ring              259 drivers/net/ethernet/mellanox/mlx4/en_port.c 		priv->xdp_stats.rx_xdp_tx_full	+= READ_ONCE(ring->xdp_tx_full);
ring              268 drivers/net/ethernet/mellanox/mlx4/en_port.c 		const struct mlx4_en_tx_ring *ring = priv->tx_ring[TX][i];
ring              270 drivers/net/ethernet/mellanox/mlx4/en_port.c 		sw_tx_dropped			   += READ_ONCE(ring->tx_dropped);
ring              271 drivers/net/ethernet/mellanox/mlx4/en_port.c 		priv->port_stats.tx_chksum_offload += READ_ONCE(ring->tx_csum);
ring              272 drivers/net/ethernet/mellanox/mlx4/en_port.c 		priv->port_stats.queue_stopped     += READ_ONCE(ring->queue_stopped);
ring              273 drivers/net/ethernet/mellanox/mlx4/en_port.c 		priv->port_stats.wake_queue        += READ_ONCE(ring->wake_queue);
ring              274 drivers/net/ethernet/mellanox/mlx4/en_port.c 		priv->port_stats.tso_packets       += READ_ONCE(ring->tso_packets);
ring              275 drivers/net/ethernet/mellanox/mlx4/en_port.c 		priv->port_stats.xmit_more         += READ_ONCE(ring->xmit_more);
ring               75 drivers/net/ethernet/mellanox/mlx4/en_rx.c 			       struct mlx4_en_rx_ring *ring,
ring               86 drivers/net/ethernet/mellanox/mlx4/en_rx.c 			ring->rx_alloc_pages++;
ring              109 drivers/net/ethernet/mellanox/mlx4/en_rx.c 				 struct mlx4_en_rx_ring *ring, int index)
ring              111 drivers/net/ethernet/mellanox/mlx4/en_rx.c 	struct mlx4_en_rx_desc *rx_desc = ring->buf + ring->stride * index;
ring              125 drivers/net/ethernet/mellanox/mlx4/en_rx.c 	possible_frags = (ring->stride - sizeof(struct mlx4_en_rx_desc)) / DS_SIZE;
ring              134 drivers/net/ethernet/mellanox/mlx4/en_rx.c 				   struct mlx4_en_rx_ring *ring, int index,
ring              137 drivers/net/ethernet/mellanox/mlx4/en_rx.c 	struct mlx4_en_rx_desc *rx_desc = ring->buf +
ring              138 drivers/net/ethernet/mellanox/mlx4/en_rx.c 		(index << ring->log_stride);
ring              139 drivers/net/ethernet/mellanox/mlx4/en_rx.c 	struct mlx4_en_rx_alloc *frags = ring->rx_info +
ring              141 drivers/net/ethernet/mellanox/mlx4/en_rx.c 	if (likely(ring->page_cache.index > 0)) {
ring              144 drivers/net/ethernet/mellanox/mlx4/en_rx.c 			ring->page_cache.index--;
ring              145 drivers/net/ethernet/mellanox/mlx4/en_rx.c 			frags->page = ring->page_cache.buf[ring->page_cache.index].page;
ring              146 drivers/net/ethernet/mellanox/mlx4/en_rx.c 			frags->dma  = ring->page_cache.buf[ring->page_cache.index].dma;
ring              154 drivers/net/ethernet/mellanox/mlx4/en_rx.c 	return mlx4_en_alloc_frags(priv, ring, rx_desc, frags, gfp);
ring              157 drivers/net/ethernet/mellanox/mlx4/en_rx.c static bool mlx4_en_is_ring_empty(const struct mlx4_en_rx_ring *ring)
ring              159 drivers/net/ethernet/mellanox/mlx4/en_rx.c 	return ring->prod == ring->cons;
ring              162 drivers/net/ethernet/mellanox/mlx4/en_rx.c static inline void mlx4_en_update_rx_prod_db(struct mlx4_en_rx_ring *ring)
ring              164 drivers/net/ethernet/mellanox/mlx4/en_rx.c 	*ring->wqres.db.db = cpu_to_be32(ring->prod & 0xffff);
ring              169 drivers/net/ethernet/mellanox/mlx4/en_rx.c 				 struct mlx4_en_rx_ring *ring,
ring              175 drivers/net/ethernet/mellanox/mlx4/en_rx.c 	frags = ring->rx_info + (index << priv->log_rx_info);
ring              185 drivers/net/ethernet/mellanox/mlx4/en_rx.c 	struct mlx4_en_rx_ring *ring;
ring              192 drivers/net/ethernet/mellanox/mlx4/en_rx.c 			ring = priv->rx_ring[ring_ind];
ring              194 drivers/net/ethernet/mellanox/mlx4/en_rx.c 			if (mlx4_en_prepare_rx_desc(priv, ring,
ring              195 drivers/net/ethernet/mellanox/mlx4/en_rx.c 						    ring->actual_size,
ring              197 drivers/net/ethernet/mellanox/mlx4/en_rx.c 				if (ring->actual_size < MLX4_EN_MIN_RX_SIZE) {
ring              201 drivers/net/ethernet/mellanox/mlx4/en_rx.c 					new_size = rounddown_pow_of_two(ring->actual_size);
ring              203 drivers/net/ethernet/mellanox/mlx4/en_rx.c 						ring->actual_size, new_size);
ring              207 drivers/net/ethernet/mellanox/mlx4/en_rx.c 			ring->actual_size++;
ring              208 drivers/net/ethernet/mellanox/mlx4/en_rx.c 			ring->prod++;
ring              215 drivers/net/ethernet/mellanox/mlx4/en_rx.c 		ring = priv->rx_ring[ring_ind];
ring              216 drivers/net/ethernet/mellanox/mlx4/en_rx.c 		while (ring->actual_size > new_size) {
ring              217 drivers/net/ethernet/mellanox/mlx4/en_rx.c 			ring->actual_size--;
ring              218 drivers/net/ethernet/mellanox/mlx4/en_rx.c 			ring->prod--;
ring              219 drivers/net/ethernet/mellanox/mlx4/en_rx.c 			mlx4_en_free_rx_desc(priv, ring, ring->actual_size);
ring              227 drivers/net/ethernet/mellanox/mlx4/en_rx.c 				struct mlx4_en_rx_ring *ring)
ring              232 drivers/net/ethernet/mellanox/mlx4/en_rx.c 	       ring->cons, ring->prod);
ring              235 drivers/net/ethernet/mellanox/mlx4/en_rx.c 	for (index = 0; index < ring->size; index++) {
ring              237 drivers/net/ethernet/mellanox/mlx4/en_rx.c 		mlx4_en_free_rx_desc(priv, ring, index);
ring              239 drivers/net/ethernet/mellanox/mlx4/en_rx.c 	ring->cons = 0;
ring              240 drivers/net/ethernet/mellanox/mlx4/en_rx.c 	ring->prod = 0;
ring              268 drivers/net/ethernet/mellanox/mlx4/en_rx.c 	struct mlx4_en_rx_ring *ring;
ring              272 drivers/net/ethernet/mellanox/mlx4/en_rx.c 	ring = kzalloc_node(sizeof(*ring), GFP_KERNEL, node);
ring              273 drivers/net/ethernet/mellanox/mlx4/en_rx.c 	if (!ring) {
ring              278 drivers/net/ethernet/mellanox/mlx4/en_rx.c 	ring->prod = 0;
ring              279 drivers/net/ethernet/mellanox/mlx4/en_rx.c 	ring->cons = 0;
ring              280 drivers/net/ethernet/mellanox/mlx4/en_rx.c 	ring->size = size;
ring              281 drivers/net/ethernet/mellanox/mlx4/en_rx.c 	ring->size_mask = size - 1;
ring              282 drivers/net/ethernet/mellanox/mlx4/en_rx.c 	ring->stride = stride;
ring              283 drivers/net/ethernet/mellanox/mlx4/en_rx.c 	ring->log_stride = ffs(ring->stride) - 1;
ring              284 drivers/net/ethernet/mellanox/mlx4/en_rx.c 	ring->buf_size = ring->size * ring->stride + TXBB_SIZE;
ring              286 drivers/net/ethernet/mellanox/mlx4/en_rx.c 	if (xdp_rxq_info_reg(&ring->xdp_rxq, priv->dev, queue_index) < 0)
ring              291 drivers/net/ethernet/mellanox/mlx4/en_rx.c 	ring->rx_info = kvzalloc_node(tmp, GFP_KERNEL, node);
ring              292 drivers/net/ethernet/mellanox/mlx4/en_rx.c 	if (!ring->rx_info) {
ring              298 drivers/net/ethernet/mellanox/mlx4/en_rx.c 		 ring->rx_info, tmp);
ring              302 drivers/net/ethernet/mellanox/mlx4/en_rx.c 	err = mlx4_alloc_hwq_res(mdev->dev, &ring->wqres, ring->buf_size);
ring              307 drivers/net/ethernet/mellanox/mlx4/en_rx.c 	ring->buf = ring->wqres.buf.direct.buf;
ring              309 drivers/net/ethernet/mellanox/mlx4/en_rx.c 	ring->hwtstamp_rx_filter = priv->hwtstamp_config.rx_filter;
ring              311 drivers/net/ethernet/mellanox/mlx4/en_rx.c 	*pring = ring;
ring              315 drivers/net/ethernet/mellanox/mlx4/en_rx.c 	kvfree(ring->rx_info);
ring              316 drivers/net/ethernet/mellanox/mlx4/en_rx.c 	ring->rx_info = NULL;
ring              318 drivers/net/ethernet/mellanox/mlx4/en_rx.c 	xdp_rxq_info_unreg(&ring->xdp_rxq);
ring              320 drivers/net/ethernet/mellanox/mlx4/en_rx.c 	kfree(ring);
ring              328 drivers/net/ethernet/mellanox/mlx4/en_rx.c 	struct mlx4_en_rx_ring *ring;
ring              336 drivers/net/ethernet/mellanox/mlx4/en_rx.c 		ring = priv->rx_ring[ring_ind];
ring              338 drivers/net/ethernet/mellanox/mlx4/en_rx.c 		ring->prod = 0;
ring              339 drivers/net/ethernet/mellanox/mlx4/en_rx.c 		ring->cons = 0;
ring              340 drivers/net/ethernet/mellanox/mlx4/en_rx.c 		ring->actual_size = 0;
ring              341 drivers/net/ethernet/mellanox/mlx4/en_rx.c 		ring->cqn = priv->rx_cq[ring_ind]->mcq.cqn;
ring              343 drivers/net/ethernet/mellanox/mlx4/en_rx.c 		ring->stride = stride;
ring              344 drivers/net/ethernet/mellanox/mlx4/en_rx.c 		if (ring->stride <= TXBB_SIZE) {
ring              346 drivers/net/ethernet/mellanox/mlx4/en_rx.c 			__be32 *ptr = (__be32 *)ring->buf;
ring              350 drivers/net/ethernet/mellanox/mlx4/en_rx.c 			ring->buf += TXBB_SIZE;
ring              353 drivers/net/ethernet/mellanox/mlx4/en_rx.c 		ring->log_stride = ffs(ring->stride) - 1;
ring              354 drivers/net/ethernet/mellanox/mlx4/en_rx.c 		ring->buf_size = ring->size * ring->stride;
ring              356 drivers/net/ethernet/mellanox/mlx4/en_rx.c 		memset(ring->buf, 0, ring->buf_size);
ring              357 drivers/net/ethernet/mellanox/mlx4/en_rx.c 		mlx4_en_update_rx_prod_db(ring);
ring              360 drivers/net/ethernet/mellanox/mlx4/en_rx.c 		for (i = 0; i < ring->size; i++)
ring              361 drivers/net/ethernet/mellanox/mlx4/en_rx.c 			mlx4_en_init_rx_desc(priv, ring, i);
ring              368 drivers/net/ethernet/mellanox/mlx4/en_rx.c 		ring = priv->rx_ring[ring_ind];
ring              370 drivers/net/ethernet/mellanox/mlx4/en_rx.c 		ring->size_mask = ring->actual_size - 1;
ring              371 drivers/net/ethernet/mellanox/mlx4/en_rx.c 		mlx4_en_update_rx_prod_db(ring);
ring              395 drivers/net/ethernet/mellanox/mlx4/en_rx.c 	int ring;
ring              400 drivers/net/ethernet/mellanox/mlx4/en_rx.c 	for (ring = 0; ring < priv->rx_ring_num; ring++) {
ring              401 drivers/net/ethernet/mellanox/mlx4/en_rx.c 		if (mlx4_en_is_ring_empty(priv->rx_ring[ring])) {
ring              403 drivers/net/ethernet/mellanox/mlx4/en_rx.c 			napi_reschedule(&priv->rx_cq[ring]->napi);
ring              415 drivers/net/ethernet/mellanox/mlx4/en_rx.c bool mlx4_en_rx_recycle(struct mlx4_en_rx_ring *ring,
ring              418 drivers/net/ethernet/mellanox/mlx4/en_rx.c 	struct mlx4_en_page_cache *cache = &ring->page_cache;
ring              434 drivers/net/ethernet/mellanox/mlx4/en_rx.c 	struct mlx4_en_rx_ring *ring = *pring;
ring              438 drivers/net/ethernet/mellanox/mlx4/en_rx.c 					ring->xdp_prog,
ring              442 drivers/net/ethernet/mellanox/mlx4/en_rx.c 	xdp_rxq_info_unreg(&ring->xdp_rxq);
ring              443 drivers/net/ethernet/mellanox/mlx4/en_rx.c 	mlx4_free_hwq_res(mdev->dev, &ring->wqres, size * stride + TXBB_SIZE);
ring              444 drivers/net/ethernet/mellanox/mlx4/en_rx.c 	kvfree(ring->rx_info);
ring              445 drivers/net/ethernet/mellanox/mlx4/en_rx.c 	ring->rx_info = NULL;
ring              446 drivers/net/ethernet/mellanox/mlx4/en_rx.c 	kfree(ring);
ring              451 drivers/net/ethernet/mellanox/mlx4/en_rx.c 				struct mlx4_en_rx_ring *ring)
ring              455 drivers/net/ethernet/mellanox/mlx4/en_rx.c 	for (i = 0; i < ring->page_cache.index; i++) {
ring              456 drivers/net/ethernet/mellanox/mlx4/en_rx.c 		dma_unmap_page(priv->ddev, ring->page_cache.buf[i].dma,
ring              458 drivers/net/ethernet/mellanox/mlx4/en_rx.c 		put_page(ring->page_cache.buf[i].page);
ring              460 drivers/net/ethernet/mellanox/mlx4/en_rx.c 	ring->page_cache.index = 0;
ring              461 drivers/net/ethernet/mellanox/mlx4/en_rx.c 	mlx4_en_free_rx_buf(priv, ring);
ring              462 drivers/net/ethernet/mellanox/mlx4/en_rx.c 	if (ring->stride <= TXBB_SIZE)
ring              463 drivers/net/ethernet/mellanox/mlx4/en_rx.c 		ring->buf -= TXBB_SIZE;
ring              548 drivers/net/ethernet/mellanox/mlx4/en_rx.c 				      struct mlx4_en_rx_ring *ring)
ring              550 drivers/net/ethernet/mellanox/mlx4/en_rx.c 	u32 missing = ring->actual_size - (ring->prod - ring->cons);
ring              556 drivers/net/ethernet/mellanox/mlx4/en_rx.c 		if (mlx4_en_prepare_rx_desc(priv, ring,
ring              557 drivers/net/ethernet/mellanox/mlx4/en_rx.c 					    ring->prod & ring->size_mask,
ring              560 drivers/net/ethernet/mellanox/mlx4/en_rx.c 		ring->prod++;
ring              563 drivers/net/ethernet/mellanox/mlx4/en_rx.c 	mlx4_en_update_rx_prod_db(ring);
ring              668 drivers/net/ethernet/mellanox/mlx4/en_rx.c 	struct mlx4_en_rx_ring *ring;
ring              670 drivers/net/ethernet/mellanox/mlx4/en_rx.c 	int cq_ring = cq->ring;
ring              680 drivers/net/ethernet/mellanox/mlx4/en_rx.c 	ring = priv->rx_ring[cq_ring];
ring              684 drivers/net/ethernet/mellanox/mlx4/en_rx.c 	xdp_prog = rcu_dereference(ring->xdp_prog);
ring              685 drivers/net/ethernet/mellanox/mlx4/en_rx.c 	xdp.rxq = &ring->xdp_rxq;
ring              691 drivers/net/ethernet/mellanox/mlx4/en_rx.c 	index = cq->mcq.cons_index & ring->size_mask;
ring              705 drivers/net/ethernet/mellanox/mlx4/en_rx.c 		frags = ring->rx_info + (index << priv->log_rx_info);
ring              764 drivers/net/ethernet/mellanox/mlx4/en_rx.c 		length -= ring->fcs_del;
ring              798 drivers/net/ethernet/mellanox/mlx4/en_rx.c 				if (likely(!mlx4_en_xmit_frame(ring, frags, priv,
ring              813 drivers/net/ethernet/mellanox/mlx4/en_rx.c 				ring->xdp_drop++;
ring              819 drivers/net/ethernet/mellanox/mlx4/en_rx.c 		ring->bytes += length;
ring              820 drivers/net/ethernet/mellanox/mlx4/en_rx.c 		ring->packets++;
ring              826 drivers/net/ethernet/mellanox/mlx4/en_rx.c 		if (unlikely(ring->hwtstamp_rx_filter == HWTSTAMP_FILTER_ALL)) {
ring              852 drivers/net/ethernet/mellanox/mlx4/en_rx.c 				ring->csum_ok++;
ring              861 drivers/net/ethernet/mellanox/mlx4/en_rx.c 				ring->csum_complete++;
ring              867 drivers/net/ethernet/mellanox/mlx4/en_rx.c 			ring->csum_none++;
ring              898 drivers/net/ethernet/mellanox/mlx4/en_rx.c 		index = (cq->mcq.cons_index) & ring->size_mask;
ring              914 drivers/net/ethernet/mellanox/mlx4/en_rx.c 		ring->cons = cq->mcq.cons_index;
ring              918 drivers/net/ethernet/mellanox/mlx4/en_rx.c 	mlx4_en_refill_rx_buffers(priv, ring);
ring              946 drivers/net/ethernet/mellanox/mlx4/en_rx.c 		xdp_tx_cq = priv->tx_cq[TX_XDP][cq->ring];
ring             1057 drivers/net/ethernet/mellanox/mlx4/en_rx.c 				 struct mlx4_en_rx_ring *ring,
ring             1077 drivers/net/ethernet/mellanox/mlx4/en_rx.c 	mlx4_en_fill_qp_context(priv, ring->actual_size, ring->stride, 0, 0,
ring             1078 drivers/net/ethernet/mellanox/mlx4/en_rx.c 				qpn, ring->cqn, -1, context);
ring             1079 drivers/net/ethernet/mellanox/mlx4/en_rx.c 	context->db_rec_addr = cpu_to_be64(ring->wqres.db.dma);
ring             1085 drivers/net/ethernet/mellanox/mlx4/en_rx.c 			ring->fcs_del = 0;
ring             1087 drivers/net/ethernet/mellanox/mlx4/en_rx.c 			ring->fcs_del = ETH_FCS_LEN;
ring             1089 drivers/net/ethernet/mellanox/mlx4/en_rx.c 		ring->fcs_del = 0;
ring             1091 drivers/net/ethernet/mellanox/mlx4/en_rx.c 	err = mlx4_qp_to_ready(mdev->dev, &ring->wqres.mtt, context, qp, state);
ring             1096 drivers/net/ethernet/mellanox/mlx4/en_rx.c 	mlx4_en_update_rx_prod_db(ring);
ring               55 drivers/net/ethernet/mellanox/mlx4/en_tx.c 	struct mlx4_en_tx_ring *ring;
ring               59 drivers/net/ethernet/mellanox/mlx4/en_tx.c 	ring = kzalloc_node(sizeof(*ring), GFP_KERNEL, node);
ring               60 drivers/net/ethernet/mellanox/mlx4/en_tx.c 	if (!ring) {
ring               65 drivers/net/ethernet/mellanox/mlx4/en_tx.c 	ring->size = size;
ring               66 drivers/net/ethernet/mellanox/mlx4/en_tx.c 	ring->size_mask = size - 1;
ring               67 drivers/net/ethernet/mellanox/mlx4/en_tx.c 	ring->sp_stride = stride;
ring               68 drivers/net/ethernet/mellanox/mlx4/en_tx.c 	ring->full_size = ring->size - HEADROOM - MAX_DESC_TXBBS;
ring               71 drivers/net/ethernet/mellanox/mlx4/en_tx.c 	ring->tx_info = kvmalloc_node(tmp, GFP_KERNEL, node);
ring               72 drivers/net/ethernet/mellanox/mlx4/en_tx.c 	if (!ring->tx_info) {
ring               78 drivers/net/ethernet/mellanox/mlx4/en_tx.c 		 ring->tx_info, tmp);
ring               80 drivers/net/ethernet/mellanox/mlx4/en_tx.c 	ring->bounce_buf = kmalloc_node(MAX_DESC_SIZE, GFP_KERNEL, node);
ring               81 drivers/net/ethernet/mellanox/mlx4/en_tx.c 	if (!ring->bounce_buf) {
ring               82 drivers/net/ethernet/mellanox/mlx4/en_tx.c 		ring->bounce_buf = kmalloc(MAX_DESC_SIZE, GFP_KERNEL);
ring               83 drivers/net/ethernet/mellanox/mlx4/en_tx.c 		if (!ring->bounce_buf) {
ring               88 drivers/net/ethernet/mellanox/mlx4/en_tx.c 	ring->buf_size = ALIGN(size * ring->sp_stride, MLX4_EN_PAGE_SIZE);
ring               92 drivers/net/ethernet/mellanox/mlx4/en_tx.c 	err = mlx4_alloc_hwq_res(mdev->dev, &ring->sp_wqres, ring->buf_size);
ring               99 drivers/net/ethernet/mellanox/mlx4/en_tx.c 	ring->buf = ring->sp_wqres.buf.direct.buf;
ring              102 drivers/net/ethernet/mellanox/mlx4/en_tx.c 	       ring, ring->buf, ring->size, ring->buf_size,
ring              103 drivers/net/ethernet/mellanox/mlx4/en_tx.c 	       (unsigned long long) ring->sp_wqres.buf.direct.map);
ring              105 drivers/net/ethernet/mellanox/mlx4/en_tx.c 	err = mlx4_qp_reserve_range(mdev->dev, 1, 1, &ring->qpn,
ring              113 drivers/net/ethernet/mellanox/mlx4/en_tx.c 	err = mlx4_qp_alloc(mdev->dev, ring->qpn, &ring->sp_qp);
ring              115 drivers/net/ethernet/mellanox/mlx4/en_tx.c 		en_err(priv, "Failed allocating qp %d\n", ring->qpn);
ring              118 drivers/net/ethernet/mellanox/mlx4/en_tx.c 	ring->sp_qp.event = mlx4_en_sqp_event;
ring              120 drivers/net/ethernet/mellanox/mlx4/en_tx.c 	err = mlx4_bf_alloc(mdev->dev, &ring->bf, node);
ring              123 drivers/net/ethernet/mellanox/mlx4/en_tx.c 		ring->bf.uar = &mdev->priv_uar;
ring              124 drivers/net/ethernet/mellanox/mlx4/en_tx.c 		ring->bf.uar->map = mdev->uar_map;
ring              125 drivers/net/ethernet/mellanox/mlx4/en_tx.c 		ring->bf_enabled = false;
ring              126 drivers/net/ethernet/mellanox/mlx4/en_tx.c 		ring->bf_alloced = false;
ring              129 drivers/net/ethernet/mellanox/mlx4/en_tx.c 		ring->bf_alloced = true;
ring              130 drivers/net/ethernet/mellanox/mlx4/en_tx.c 		ring->bf_enabled = !!(priv->pflags &
ring              134 drivers/net/ethernet/mellanox/mlx4/en_tx.c 	ring->hwtstamp_tx_type = priv->hwtstamp_config.tx_type;
ring              135 drivers/net/ethernet/mellanox/mlx4/en_tx.c 	ring->queue_index = queue_index;
ring              140 drivers/net/ethernet/mellanox/mlx4/en_tx.c 				&ring->sp_affinity_mask);
ring              142 drivers/net/ethernet/mellanox/mlx4/en_tx.c 	*pring = ring;
ring              146 drivers/net/ethernet/mellanox/mlx4/en_tx.c 	mlx4_qp_release_range(mdev->dev, ring->qpn, 1);
ring              148 drivers/net/ethernet/mellanox/mlx4/en_tx.c 	mlx4_free_hwq_res(mdev->dev, &ring->sp_wqres, ring->buf_size);
ring              150 drivers/net/ethernet/mellanox/mlx4/en_tx.c 	kfree(ring->bounce_buf);
ring              151 drivers/net/ethernet/mellanox/mlx4/en_tx.c 	ring->bounce_buf = NULL;
ring              153 drivers/net/ethernet/mellanox/mlx4/en_tx.c 	kvfree(ring->tx_info);
ring              154 drivers/net/ethernet/mellanox/mlx4/en_tx.c 	ring->tx_info = NULL;
ring              156 drivers/net/ethernet/mellanox/mlx4/en_tx.c 	kfree(ring);
ring              165 drivers/net/ethernet/mellanox/mlx4/en_tx.c 	struct mlx4_en_tx_ring *ring = *pring;
ring              166 drivers/net/ethernet/mellanox/mlx4/en_tx.c 	en_dbg(DRV, priv, "Destroying tx ring, qpn: %d\n", ring->qpn);
ring              168 drivers/net/ethernet/mellanox/mlx4/en_tx.c 	if (ring->bf_alloced)
ring              169 drivers/net/ethernet/mellanox/mlx4/en_tx.c 		mlx4_bf_free(mdev->dev, &ring->bf);
ring              170 drivers/net/ethernet/mellanox/mlx4/en_tx.c 	mlx4_qp_remove(mdev->dev, &ring->sp_qp);
ring              171 drivers/net/ethernet/mellanox/mlx4/en_tx.c 	mlx4_qp_free(mdev->dev, &ring->sp_qp);
ring              172 drivers/net/ethernet/mellanox/mlx4/en_tx.c 	mlx4_qp_release_range(priv->mdev->dev, ring->qpn, 1);
ring              173 drivers/net/ethernet/mellanox/mlx4/en_tx.c 	mlx4_free_hwq_res(mdev->dev, &ring->sp_wqres, ring->buf_size);
ring              174 drivers/net/ethernet/mellanox/mlx4/en_tx.c 	kfree(ring->bounce_buf);
ring              175 drivers/net/ethernet/mellanox/mlx4/en_tx.c 	ring->bounce_buf = NULL;
ring              176 drivers/net/ethernet/mellanox/mlx4/en_tx.c 	kvfree(ring->tx_info);
ring              177 drivers/net/ethernet/mellanox/mlx4/en_tx.c 	ring->tx_info = NULL;
ring              178 drivers/net/ethernet/mellanox/mlx4/en_tx.c 	kfree(ring);
ring              183 drivers/net/ethernet/mellanox/mlx4/en_tx.c 			     struct mlx4_en_tx_ring *ring,
ring              189 drivers/net/ethernet/mellanox/mlx4/en_tx.c 	ring->sp_cqn = cq;
ring              190 drivers/net/ethernet/mellanox/mlx4/en_tx.c 	ring->prod = 0;
ring              191 drivers/net/ethernet/mellanox/mlx4/en_tx.c 	ring->cons = 0xffffffff;
ring              192 drivers/net/ethernet/mellanox/mlx4/en_tx.c 	ring->last_nr_txbb = 1;
ring              193 drivers/net/ethernet/mellanox/mlx4/en_tx.c 	memset(ring->tx_info, 0, ring->size * sizeof(struct mlx4_en_tx_info));
ring              194 drivers/net/ethernet/mellanox/mlx4/en_tx.c 	memset(ring->buf, 0, ring->buf_size);
ring              195 drivers/net/ethernet/mellanox/mlx4/en_tx.c 	ring->free_tx_desc = mlx4_en_free_tx_desc;
ring              197 drivers/net/ethernet/mellanox/mlx4/en_tx.c 	ring->sp_qp_state = MLX4_QP_STATE_RST;
ring              198 drivers/net/ethernet/mellanox/mlx4/en_tx.c 	ring->doorbell_qpn = cpu_to_be32(ring->sp_qp.qpn << 8);
ring              199 drivers/net/ethernet/mellanox/mlx4/en_tx.c 	ring->mr_key = cpu_to_be32(mdev->mr.key);
ring              201 drivers/net/ethernet/mellanox/mlx4/en_tx.c 	mlx4_en_fill_qp_context(priv, ring->size, ring->sp_stride, 1, 0, ring->qpn,
ring              202 drivers/net/ethernet/mellanox/mlx4/en_tx.c 				ring->sp_cqn, user_prio, &ring->sp_context);
ring              203 drivers/net/ethernet/mellanox/mlx4/en_tx.c 	if (ring->bf_alloced)
ring              204 drivers/net/ethernet/mellanox/mlx4/en_tx.c 		ring->sp_context.usr_page =
ring              206 drivers/net/ethernet/mellanox/mlx4/en_tx.c 							 ring->bf.uar->index));
ring              208 drivers/net/ethernet/mellanox/mlx4/en_tx.c 	err = mlx4_qp_to_ready(mdev->dev, &ring->sp_wqres.mtt, &ring->sp_context,
ring              209 drivers/net/ethernet/mellanox/mlx4/en_tx.c 			       &ring->sp_qp, &ring->sp_qp_state);
ring              210 drivers/net/ethernet/mellanox/mlx4/en_tx.c 	if (!cpumask_empty(&ring->sp_affinity_mask))
ring              211 drivers/net/ethernet/mellanox/mlx4/en_tx.c 		netif_set_xps_queue(priv->dev, &ring->sp_affinity_mask,
ring              212 drivers/net/ethernet/mellanox/mlx4/en_tx.c 				    ring->queue_index);
ring              218 drivers/net/ethernet/mellanox/mlx4/en_tx.c 				struct mlx4_en_tx_ring *ring)
ring              222 drivers/net/ethernet/mellanox/mlx4/en_tx.c 	mlx4_qp_modify(mdev->dev, NULL, ring->sp_qp_state,
ring              223 drivers/net/ethernet/mellanox/mlx4/en_tx.c 		       MLX4_QP_STATE_RST, NULL, 0, 0, &ring->sp_qp);
ring              226 drivers/net/ethernet/mellanox/mlx4/en_tx.c static inline bool mlx4_en_is_tx_ring_full(struct mlx4_en_tx_ring *ring)
ring              228 drivers/net/ethernet/mellanox/mlx4/en_tx.c 	return ring->prod - ring->cons > ring->full_size;
ring              232 drivers/net/ethernet/mellanox/mlx4/en_tx.c 			      struct mlx4_en_tx_ring *ring, int index,
ring              236 drivers/net/ethernet/mellanox/mlx4/en_tx.c 	struct mlx4_en_tx_desc *tx_desc = ring->buf + (index << LOG_TXBB_SIZE);
ring              237 drivers/net/ethernet/mellanox/mlx4/en_tx.c 	struct mlx4_en_tx_info *tx_info = &ring->tx_info[index];
ring              238 drivers/net/ethernet/mellanox/mlx4/en_tx.c 	void *end = ring->buf + ring->buf_size;
ring              258 drivers/net/ethernet/mellanox/mlx4/en_tx.c 				ptr = ring->buf;
ring              266 drivers/net/ethernet/mellanox/mlx4/en_tx.c 						   struct mlx4_en_tx_ring *ring,
ring              271 drivers/net/ethernet/mellanox/mlx4/en_tx.c 			 struct mlx4_en_tx_ring *ring,
ring              275 drivers/net/ethernet/mellanox/mlx4/en_tx.c 	struct mlx4_en_tx_info *tx_info = &ring->tx_info[index];
ring              276 drivers/net/ethernet/mellanox/mlx4/en_tx.c 	struct mlx4_en_tx_desc *tx_desc = ring->buf + (index << LOG_TXBB_SIZE);
ring              278 drivers/net/ethernet/mellanox/mlx4/en_tx.c 	void *end = ring->buf + ring->buf_size;
ring              318 drivers/net/ethernet/mellanox/mlx4/en_tx.c 				data = ring->buf + ((void *)data - end);
ring              324 drivers/net/ethernet/mellanox/mlx4/en_tx.c 					data = ring->buf;
ring              338 drivers/net/ethernet/mellanox/mlx4/en_tx.c 						      struct mlx4_en_tx_ring *ring,
ring              343 drivers/net/ethernet/mellanox/mlx4/en_tx.c 			    struct mlx4_en_tx_ring *ring,
ring              347 drivers/net/ethernet/mellanox/mlx4/en_tx.c 	struct mlx4_en_tx_info *tx_info = &ring->tx_info[index];
ring              353 drivers/net/ethernet/mellanox/mlx4/en_tx.c 	if (!mlx4_en_rx_recycle(ring->recycle_ring, &frame)) {
ring              362 drivers/net/ethernet/mellanox/mlx4/en_tx.c int mlx4_en_free_tx_buf(struct net_device *dev, struct mlx4_en_tx_ring *ring)
ring              368 drivers/net/ethernet/mellanox/mlx4/en_tx.c 	ring->cons += ring->last_nr_txbb;
ring              370 drivers/net/ethernet/mellanox/mlx4/en_tx.c 		 ring->cons, ring->prod);
ring              372 drivers/net/ethernet/mellanox/mlx4/en_tx.c 	if ((u32) (ring->prod - ring->cons) > ring->size) {
ring              378 drivers/net/ethernet/mellanox/mlx4/en_tx.c 	while (ring->cons != ring->prod) {
ring              379 drivers/net/ethernet/mellanox/mlx4/en_tx.c 		ring->last_nr_txbb = ring->free_tx_desc(priv, ring,
ring              380 drivers/net/ethernet/mellanox/mlx4/en_tx.c 						ring->cons & ring->size_mask,
ring              382 drivers/net/ethernet/mellanox/mlx4/en_tx.c 		ring->cons += ring->last_nr_txbb;
ring              386 drivers/net/ethernet/mellanox/mlx4/en_tx.c 	if (ring->tx_queue)
ring              387 drivers/net/ethernet/mellanox/mlx4/en_tx.c 		netdev_tx_reset_queue(ring->tx_queue);
ring              400 drivers/net/ethernet/mellanox/mlx4/en_tx.c 	struct mlx4_en_tx_ring *ring = priv->tx_ring[cq->type][cq->ring];
ring              407 drivers/net/ethernet/mellanox/mlx4/en_tx.c 	u32 size_mask = ring->size_mask;
ring              420 drivers/net/ethernet/mellanox/mlx4/en_tx.c 	netdev_txq_bql_complete_prefetchw(ring->tx_queue);
ring              424 drivers/net/ethernet/mellanox/mlx4/en_tx.c 	last_nr_txbb = READ_ONCE(ring->last_nr_txbb);
ring              425 drivers/net/ethernet/mellanox/mlx4/en_tx.c 	ring_cons = READ_ONCE(ring->cons);
ring              458 drivers/net/ethernet/mellanox/mlx4/en_tx.c 			if (unlikely(ring->tx_info[ring_index].ts_requested))
ring              462 drivers/net/ethernet/mellanox/mlx4/en_tx.c 			last_nr_txbb = INDIRECT_CALL_2(ring->free_tx_desc,
ring              465 drivers/net/ethernet/mellanox/mlx4/en_tx.c 					priv, ring, ring_index,
ring              468 drivers/net/ethernet/mellanox/mlx4/en_tx.c 			mlx4_en_stamp_wqe(priv, ring, stamp_index,
ring              470 drivers/net/ethernet/mellanox/mlx4/en_tx.c 						ring->size));
ring              474 drivers/net/ethernet/mellanox/mlx4/en_tx.c 			bytes += ring->tx_info[ring_index].nr_bytes;
ring              491 drivers/net/ethernet/mellanox/mlx4/en_tx.c 	WRITE_ONCE(ring->last_nr_txbb, last_nr_txbb);
ring              492 drivers/net/ethernet/mellanox/mlx4/en_tx.c 	WRITE_ONCE(ring->cons, ring_cons + txbbs_skipped);
ring              497 drivers/net/ethernet/mellanox/mlx4/en_tx.c 	netdev_tx_completed_queue(ring->tx_queue, packets, bytes);
ring              501 drivers/net/ethernet/mellanox/mlx4/en_tx.c 	if (netif_tx_queue_stopped(ring->tx_queue) &&
ring              502 drivers/net/ethernet/mellanox/mlx4/en_tx.c 	    !mlx4_en_is_tx_ring_full(ring)) {
ring              503 drivers/net/ethernet/mellanox/mlx4/en_tx.c 		netif_tx_wake_queue(ring->tx_queue);
ring              504 drivers/net/ethernet/mellanox/mlx4/en_tx.c 		ring->wake_queue++;
ring              540 drivers/net/ethernet/mellanox/mlx4/en_tx.c 						      struct mlx4_en_tx_ring *ring,
ring              544 drivers/net/ethernet/mellanox/mlx4/en_tx.c 	u32 copy = (ring->size - index) << LOG_TXBB_SIZE;
ring              551 drivers/net/ethernet/mellanox/mlx4/en_tx.c 		*((u32 *) (ring->buf + i)) =
ring              552 drivers/net/ethernet/mellanox/mlx4/en_tx.c 			*((u32 *) (ring->bounce_buf + copy + i));
ring              559 drivers/net/ethernet/mellanox/mlx4/en_tx.c 		*((u32 *)(ring->buf + (index << LOG_TXBB_SIZE) + i)) =
ring              560 drivers/net/ethernet/mellanox/mlx4/en_tx.c 			*((u32 *) (ring->bounce_buf + i));
ring              564 drivers/net/ethernet/mellanox/mlx4/en_tx.c 	return ring->buf + (index << LOG_TXBB_SIZE);
ring              717 drivers/net/ethernet/mellanox/mlx4/en_tx.c void mlx4_en_xmit_doorbell(struct mlx4_en_tx_ring *ring)
ring              730 drivers/net/ethernet/mellanox/mlx4/en_tx.c 		  (__force u32)ring->doorbell_qpn,
ring              731 drivers/net/ethernet/mellanox/mlx4/en_tx.c 		  ring->bf.uar->map + MLX4_SEND_DOORBELL);
ring              734 drivers/net/ethernet/mellanox/mlx4/en_tx.c static void mlx4_en_tx_write_desc(struct mlx4_en_tx_ring *ring,
ring              753 drivers/net/ethernet/mellanox/mlx4/en_tx.c 		mlx4_bf_copy(ring->bf.reg + ring->bf.offset, &tx_desc->ctrl,
ring              758 drivers/net/ethernet/mellanox/mlx4/en_tx.c 		ring->bf.offset ^= ring->bf.buf_size;
ring              766 drivers/net/ethernet/mellanox/mlx4/en_tx.c 			mlx4_en_xmit_doorbell(ring);
ring              768 drivers/net/ethernet/mellanox/mlx4/en_tx.c 			ring->xmit_more++;
ring              841 drivers/net/ethernet/mellanox/mlx4/en_tx.c 	struct mlx4_en_tx_ring *ring;
ring              862 drivers/net/ethernet/mellanox/mlx4/en_tx.c 	ring = priv->tx_ring[TX][tx_ind];
ring              868 drivers/net/ethernet/mellanox/mlx4/en_tx.c 	ring_cons = READ_ONCE(ring->cons);
ring              884 drivers/net/ethernet/mellanox/mlx4/en_tx.c 	bf_ok = ring->bf_enabled;
ring              899 drivers/net/ethernet/mellanox/mlx4/en_tx.c 	netdev_txq_bql_enqueue_prefetchw(ring->tx_queue);
ring              903 drivers/net/ethernet/mellanox/mlx4/en_tx.c 			 (u32)(ring->prod - ring_cons - 1));
ring              906 drivers/net/ethernet/mellanox/mlx4/en_tx.c 	index = ring->prod & ring->size_mask;
ring              907 drivers/net/ethernet/mellanox/mlx4/en_tx.c 	bf_index = ring->prod;
ring              911 drivers/net/ethernet/mellanox/mlx4/en_tx.c 	if (likely(index + nr_txbb <= ring->size))
ring              912 drivers/net/ethernet/mellanox/mlx4/en_tx.c 		tx_desc = ring->buf + (index << LOG_TXBB_SIZE);
ring              914 drivers/net/ethernet/mellanox/mlx4/en_tx.c 		tx_desc = (struct mlx4_en_tx_desc *) ring->bounce_buf;
ring              920 drivers/net/ethernet/mellanox/mlx4/en_tx.c 	tx_info = &ring->tx_info[index];
ring              946 drivers/net/ethernet/mellanox/mlx4/en_tx.c 					   lso_header_size, ring->mr_key,
ring              955 drivers/net/ethernet/mellanox/mlx4/en_tx.c 	if (unlikely(ring->hwtstamp_tx_type == HWTSTAMP_TX_ON &&
ring              970 drivers/net/ethernet/mellanox/mlx4/en_tx.c 		ring->tx_csum++;
ring              990 drivers/net/ethernet/mellanox/mlx4/en_tx.c 			((ring->prod & ring->size) ?
ring             1001 drivers/net/ethernet/mellanox/mlx4/en_tx.c 		ring->tso_packets++;
ring             1005 drivers/net/ethernet/mellanox/mlx4/en_tx.c 		ring->packets += i;
ring             1009 drivers/net/ethernet/mellanox/mlx4/en_tx.c 			((ring->prod & ring->size) ?
ring             1012 drivers/net/ethernet/mellanox/mlx4/en_tx.c 		ring->packets++;
ring             1014 drivers/net/ethernet/mellanox/mlx4/en_tx.c 	ring->bytes += tx_info->nr_bytes;
ring             1038 drivers/net/ethernet/mellanox/mlx4/en_tx.c 	ring->prod += nr_txbb;
ring             1042 drivers/net/ethernet/mellanox/mlx4/en_tx.c 		tx_desc = mlx4_en_bounce_to_desc(priv, ring, index, desc_size);
ring             1047 drivers/net/ethernet/mellanox/mlx4/en_tx.c 	stop_queue = mlx4_en_is_tx_ring_full(ring);
ring             1049 drivers/net/ethernet/mellanox/mlx4/en_tx.c 		netif_tx_stop_queue(ring->tx_queue);
ring             1050 drivers/net/ethernet/mellanox/mlx4/en_tx.c 		ring->queue_stopped++;
ring             1053 drivers/net/ethernet/mellanox/mlx4/en_tx.c 	send_doorbell = __netdev_tx_sent_queue(ring->tx_queue,
ring             1062 drivers/net/ethernet/mellanox/mlx4/en_tx.c 		qpn_vlan.bf_qpn = ring->doorbell_qpn | cpu_to_be32(real_size);
ring             1066 drivers/net/ethernet/mellanox/mlx4/en_tx.c 	mlx4_en_tx_write_desc(ring, tx_desc, qpn_vlan, desc_size, bf_index,
ring             1078 drivers/net/ethernet/mellanox/mlx4/en_tx.c 		ring_cons = READ_ONCE(ring->cons);
ring             1079 drivers/net/ethernet/mellanox/mlx4/en_tx.c 		if (unlikely(!mlx4_en_is_tx_ring_full(ring))) {
ring             1080 drivers/net/ethernet/mellanox/mlx4/en_tx.c 			netif_tx_wake_queue(ring->tx_queue);
ring             1081 drivers/net/ethernet/mellanox/mlx4/en_tx.c 			ring->wake_queue++;
ring             1087 drivers/net/ethernet/mellanox/mlx4/en_tx.c 	ring->tx_dropped++;
ring             1098 drivers/net/ethernet/mellanox/mlx4/en_tx.c 				    struct mlx4_en_tx_ring *ring)
ring             1102 drivers/net/ethernet/mellanox/mlx4/en_tx.c 	for (i = 0; i < ring->size; i++) {
ring             1103 drivers/net/ethernet/mellanox/mlx4/en_tx.c 		struct mlx4_en_tx_info *tx_info = &ring->tx_info[i];
ring             1104 drivers/net/ethernet/mellanox/mlx4/en_tx.c 		struct mlx4_en_tx_desc *tx_desc = ring->buf +
ring             1115 drivers/net/ethernet/mellanox/mlx4/en_tx.c 		tx_desc->data.lkey = ring->mr_key;
ring             1129 drivers/net/ethernet/mellanox/mlx4/en_tx.c 	struct mlx4_en_tx_ring *ring;
ring             1137 drivers/net/ethernet/mellanox/mlx4/en_tx.c 	ring = priv->tx_ring[TX_XDP][tx_ind];
ring             1139 drivers/net/ethernet/mellanox/mlx4/en_tx.c 	if (unlikely(mlx4_en_is_tx_ring_full(ring)))
ring             1142 drivers/net/ethernet/mellanox/mlx4/en_tx.c 	index = ring->prod & ring->size_mask;
ring             1143 drivers/net/ethernet/mellanox/mlx4/en_tx.c 	tx_info = &ring->tx_info[index];
ring             1147 drivers/net/ethernet/mellanox/mlx4/en_tx.c 			 (u32)(ring->prod - READ_ONCE(ring->cons) - 1));
ring             1149 drivers/net/ethernet/mellanox/mlx4/en_tx.c 	tx_desc = ring->buf + (index << LOG_TXBB_SIZE);
ring             1169 drivers/net/ethernet/mellanox/mlx4/en_tx.c 		((ring->prod & ring->size) ?
ring             1175 drivers/net/ethernet/mellanox/mlx4/en_tx.c 	ring->prod += MLX4_EN_XDP_TX_NRTXBB;
ring             1182 drivers/net/ethernet/mellanox/mlx4/en_tx.c 	ring->xmit_more++;
ring              285 drivers/net/ethernet/mellanox/mlx4/mlx4_en.h 						struct mlx4_en_tx_ring *ring,
ring              368 drivers/net/ethernet/mellanox/mlx4/mlx4_en.h 	int                     ring;
ring              691 drivers/net/ethernet/mellanox/mlx4/mlx4_en.h 		      int entries, int ring, enum cq_type mode, int node);
ring              707 drivers/net/ethernet/mellanox/mlx4/mlx4_en.h void mlx4_en_xmit_doorbell(struct mlx4_en_tx_ring *ring);
ring              708 drivers/net/ethernet/mellanox/mlx4/mlx4_en.h bool mlx4_en_rx_recycle(struct mlx4_en_rx_ring *ring,
ring              718 drivers/net/ethernet/mellanox/mlx4/mlx4_en.h 				    struct mlx4_en_tx_ring *ring);
ring              720 drivers/net/ethernet/mellanox/mlx4/mlx4_en.h 			     struct mlx4_en_tx_ring *ring,
ring              723 drivers/net/ethernet/mellanox/mlx4/mlx4_en.h 				struct mlx4_en_tx_ring *ring);
ring              734 drivers/net/ethernet/mellanox/mlx4/mlx4_en.h 				struct mlx4_en_rx_ring *ring);
ring              743 drivers/net/ethernet/mellanox/mlx4/mlx4_en.h 			 struct mlx4_en_tx_ring *ring,
ring              747 drivers/net/ethernet/mellanox/mlx4/mlx4_en.h 			    struct mlx4_en_tx_ring *ring,
ring              761 drivers/net/ethernet/mellanox/mlx4/mlx4_en.h int mlx4_en_free_tx_buf(struct net_device *dev, struct mlx4_en_tx_ring *ring);
ring             1000 drivers/net/ethernet/micrel/ksz884x.c 	struct ksz_desc *ring;
ring             1608 drivers/net/ethernet/micrel/ksz884x.c 	*desc = &info->ring[info->last];
ring             1628 drivers/net/ethernet/micrel/ksz884x.c 	*desc = &info->ring[info->next];
ring             3831 drivers/net/ethernet/micrel/ksz884x.c 	struct ksz_desc *cur = desc_info->ring;
ring             3847 drivers/net/ethernet/micrel/ksz884x.c 	desc_info->cur = desc_info->ring;
ring             3867 drivers/net/ethernet/micrel/ksz884x.c 	info->cur = info->ring;
ring             4364 drivers/net/ethernet/micrel/ksz884x.c 	desc_info->ring = kcalloc(desc_info->alloc, sizeof(struct ksz_desc),
ring             4366 drivers/net/ethernet/micrel/ksz884x.c 	if (!desc_info->ring)
ring             4556 drivers/net/ethernet/micrel/ksz884x.c 	kfree(hw->rx_desc_info.ring);
ring             4557 drivers/net/ethernet/micrel/ksz884x.c 	hw->rx_desc_info.ring = NULL;
ring             4558 drivers/net/ethernet/micrel/ksz884x.c 	kfree(hw->tx_desc_info.ring);
ring             4559 drivers/net/ethernet/micrel/ksz884x.c 	hw->tx_desc_info.ring = NULL;
ring             4574 drivers/net/ethernet/micrel/ksz884x.c 	struct ksz_desc *desc = desc_info->ring;
ring             4749 drivers/net/ethernet/micrel/ksz884x.c 		desc = &info->ring[last];
ring             5044 drivers/net/ethernet/micrel/ksz884x.c 		desc = &info->ring[next];
ring             5080 drivers/net/ethernet/micrel/ksz884x.c 		desc = &info->ring[next];
ring             5125 drivers/net/ethernet/micrel/ksz884x.c 		desc = &info->ring[next];
ring             6404 drivers/net/ethernet/micrel/ksz884x.c 	struct ethtool_ringparam *ring)
ring             6410 drivers/net/ethernet/micrel/ksz884x.c 	ring->tx_max_pending = (1 << 9);
ring             6411 drivers/net/ethernet/micrel/ksz884x.c 	ring->tx_pending = hw->tx_desc_info.alloc;
ring             6412 drivers/net/ethernet/micrel/ksz884x.c 	ring->rx_max_pending = (1 << 9);
ring             6413 drivers/net/ethernet/micrel/ksz884x.c 	ring->rx_pending = hw->rx_desc_info.alloc;
ring             1713 drivers/net/ethernet/myricom/myri10ge/myri10ge.c 		       struct ethtool_ringparam *ring)
ring             1717 drivers/net/ethernet/myricom/myri10ge/myri10ge.c 	ring->rx_mini_max_pending = mgp->ss[0].rx_small.mask + 1;
ring             1718 drivers/net/ethernet/myricom/myri10ge/myri10ge.c 	ring->rx_max_pending = mgp->ss[0].rx_big.mask + 1;
ring             1719 drivers/net/ethernet/myricom/myri10ge/myri10ge.c 	ring->rx_jumbo_max_pending = 0;
ring             1720 drivers/net/ethernet/myricom/myri10ge/myri10ge.c 	ring->tx_max_pending = mgp->ss[0].tx.mask + 1;
ring             1721 drivers/net/ethernet/myricom/myri10ge/myri10ge.c 	ring->rx_mini_pending = ring->rx_mini_max_pending;
ring             1722 drivers/net/ethernet/myricom/myri10ge/myri10ge.c 	ring->rx_pending = ring->rx_max_pending;
ring             1723 drivers/net/ethernet/myricom/myri10ge/myri10ge.c 	ring->rx_jumbo_pending = ring->rx_jumbo_max_pending;
ring             1724 drivers/net/ethernet/myricom/myri10ge/myri10ge.c 	ring->tx_pending = ring->tx_max_pending;
ring              246 drivers/net/ethernet/neterion/s2io-regs.h #define RX_MAT_SET(ring, msi)			vBIT(msi, (8 * ring), 8)
ring              698 drivers/net/ethernet/neterion/s2io.c 		struct ring_info *ring = &mac_control->rings[i];
ring              707 drivers/net/ethernet/neterion/s2io.c 		ring->block_count = rx_cfg->num_rxd /
ring              709 drivers/net/ethernet/neterion/s2io.c 		ring->pkt_cnt = rx_cfg->num_rxd - ring->block_count;
ring              718 drivers/net/ethernet/neterion/s2io.c 		struct ring_info *ring = &mac_control->rings[i];
ring              720 drivers/net/ethernet/neterion/s2io.c 		ring->rx_curr_get_info.block_index = 0;
ring              721 drivers/net/ethernet/neterion/s2io.c 		ring->rx_curr_get_info.offset = 0;
ring              722 drivers/net/ethernet/neterion/s2io.c 		ring->rx_curr_get_info.ring_len = rx_cfg->num_rxd - 1;
ring              723 drivers/net/ethernet/neterion/s2io.c 		ring->rx_curr_put_info.block_index = 0;
ring              724 drivers/net/ethernet/neterion/s2io.c 		ring->rx_curr_put_info.offset = 0;
ring              725 drivers/net/ethernet/neterion/s2io.c 		ring->rx_curr_put_info.ring_len = rx_cfg->num_rxd - 1;
ring              726 drivers/net/ethernet/neterion/s2io.c 		ring->nic = nic;
ring              727 drivers/net/ethernet/neterion/s2io.c 		ring->ring_no = i;
ring              735 drivers/net/ethernet/neterion/s2io.c 			rx_blocks = &ring->rx_blocks[j];
ring              771 drivers/net/ethernet/neterion/s2io.c 			tmp_v_addr = ring->rx_blocks[j].block_virt_addr;
ring              772 drivers/net/ethernet/neterion/s2io.c 			tmp_v_addr_next = ring->rx_blocks[next].block_virt_addr;
ring              773 drivers/net/ethernet/neterion/s2io.c 			tmp_p_addr = ring->rx_blocks[j].block_dma_addr;
ring              774 drivers/net/ethernet/neterion/s2io.c 			tmp_p_addr_next = ring->rx_blocks[next].block_dma_addr;
ring              790 drivers/net/ethernet/neterion/s2io.c 			struct ring_info *ring = &mac_control->rings[i];
ring              795 drivers/net/ethernet/neterion/s2io.c 			ring->ba = kmalloc(size, GFP_KERNEL);
ring              796 drivers/net/ethernet/neterion/s2io.c 			if (!ring->ba)
ring              804 drivers/net/ethernet/neterion/s2io.c 				ring->ba[j] = kmalloc(size, GFP_KERNEL);
ring              805 drivers/net/ethernet/neterion/s2io.c 				if (!ring->ba[j])
ring              809 drivers/net/ethernet/neterion/s2io.c 					ba = &ring->ba[j][k];
ring              934 drivers/net/ethernet/neterion/s2io.c 		struct ring_info *ring = &mac_control->rings[i];
ring              936 drivers/net/ethernet/neterion/s2io.c 		blk_cnt = ring->block_count;
ring              938 drivers/net/ethernet/neterion/s2io.c 			tmp_v_addr = ring->rx_blocks[j].block_virt_addr;
ring              939 drivers/net/ethernet/neterion/s2io.c 			tmp_p_addr = ring->rx_blocks[j].block_dma_addr;
ring              945 drivers/net/ethernet/neterion/s2io.c 			kfree(ring->rx_blocks[j].rxds);
ring              955 drivers/net/ethernet/neterion/s2io.c 			struct ring_info *ring = &mac_control->rings[i];
ring              961 drivers/net/ethernet/neterion/s2io.c 				if (!ring->ba[j])
ring              964 drivers/net/ethernet/neterion/s2io.c 					struct buffAdd *ba = &ring->ba[j][k];
ring              973 drivers/net/ethernet/neterion/s2io.c 				kfree(ring->ba[j]);
ring              977 drivers/net/ethernet/neterion/s2io.c 			kfree(ring->ba);
ring             2213 drivers/net/ethernet/neterion/s2io.c 		struct ring_info *ring = &mac_control->rings[i];
ring             2215 drivers/net/ethernet/neterion/s2io.c 		writeq((u64)ring->rx_blocks[0].block_dma_addr,
ring             2441 drivers/net/ethernet/neterion/s2io.c static int fill_rx_buffers(struct s2io_nic *nic, struct ring_info *ring,
ring             2455 drivers/net/ethernet/neterion/s2io.c 	struct swStat *swstats = &ring->nic->mac_control.stats_info->sw_stat;
ring             2457 drivers/net/ethernet/neterion/s2io.c 	alloc_cnt = ring->pkt_cnt - ring->rx_bufs_left;
ring             2459 drivers/net/ethernet/neterion/s2io.c 	block_no1 = ring->rx_curr_get_info.block_index;
ring             2461 drivers/net/ethernet/neterion/s2io.c 		block_no = ring->rx_curr_put_info.block_index;
ring             2463 drivers/net/ethernet/neterion/s2io.c 		off = ring->rx_curr_put_info.offset;
ring             2465 drivers/net/ethernet/neterion/s2io.c 		rxdp = ring->rx_blocks[block_no].rxds[off].virt_addr;
ring             2468 drivers/net/ethernet/neterion/s2io.c 		    (off == ring->rx_curr_get_info.offset) &&
ring             2471 drivers/net/ethernet/neterion/s2io.c 				  ring->dev->name);
ring             2474 drivers/net/ethernet/neterion/s2io.c 		if (off && (off == ring->rxd_count)) {
ring             2475 drivers/net/ethernet/neterion/s2io.c 			ring->rx_curr_put_info.block_index++;
ring             2476 drivers/net/ethernet/neterion/s2io.c 			if (ring->rx_curr_put_info.block_index ==
ring             2477 drivers/net/ethernet/neterion/s2io.c 			    ring->block_count)
ring             2478 drivers/net/ethernet/neterion/s2io.c 				ring->rx_curr_put_info.block_index = 0;
ring             2479 drivers/net/ethernet/neterion/s2io.c 			block_no = ring->rx_curr_put_info.block_index;
ring             2481 drivers/net/ethernet/neterion/s2io.c 			ring->rx_curr_put_info.offset = off;
ring             2482 drivers/net/ethernet/neterion/s2io.c 			rxdp = ring->rx_blocks[block_no].block_virt_addr;
ring             2484 drivers/net/ethernet/neterion/s2io.c 				  ring->dev->name, rxdp);
ring             2489 drivers/net/ethernet/neterion/s2io.c 		    ((ring->rxd_mode == RXD_MODE_3B) &&
ring             2491 drivers/net/ethernet/neterion/s2io.c 			ring->rx_curr_put_info.offset = off;
ring             2495 drivers/net/ethernet/neterion/s2io.c 		size = ring->mtu +
ring             2498 drivers/net/ethernet/neterion/s2io.c 		if (ring->rxd_mode == RXD_MODE_1)
ring             2501 drivers/net/ethernet/neterion/s2io.c 			size = ring->mtu + ALIGN_SIZE + BUF0_LEN + 4;
ring             2507 drivers/net/ethernet/neterion/s2io.c 				  ring->dev->name);
ring             2518 drivers/net/ethernet/neterion/s2io.c 		if (ring->rxd_mode == RXD_MODE_1) {
ring             2524 drivers/net/ethernet/neterion/s2io.c 				pci_map_single(ring->pdev, skb->data,
ring             2534 drivers/net/ethernet/neterion/s2io.c 		} else if (ring->rxd_mode == RXD_MODE_3B) {
ring             2550 drivers/net/ethernet/neterion/s2io.c 			ba = &ring->ba[block_no][off];
ring             2560 drivers/net/ethernet/neterion/s2io.c 					pci_map_single(ring->pdev, ba->ba_0,
ring             2567 drivers/net/ethernet/neterion/s2io.c 				pci_dma_sync_single_for_device(ring->pdev,
ring             2573 drivers/net/ethernet/neterion/s2io.c 			if (ring->rxd_mode == RXD_MODE_3B) {
ring             2580 drivers/net/ethernet/neterion/s2io.c 				rxdp3->Buffer2_ptr = pci_map_single(ring->pdev,
ring             2582 drivers/net/ethernet/neterion/s2io.c 								    ring->mtu + 4,
ring             2591 drivers/net/ethernet/neterion/s2io.c 						pci_map_single(ring->pdev,
ring             2598 drivers/net/ethernet/neterion/s2io.c 						pci_unmap_single(ring->pdev,
ring             2601 drivers/net/ethernet/neterion/s2io.c 								 ring->mtu + 4,
ring             2608 drivers/net/ethernet/neterion/s2io.c 					(ring->mtu + 4);
ring             2616 drivers/net/ethernet/neterion/s2io.c 		if (off == (ring->rxd_count + 1))
ring             2618 drivers/net/ethernet/neterion/s2io.c 		ring->rx_curr_put_info.offset = off;
ring             2628 drivers/net/ethernet/neterion/s2io.c 		ring->rx_bufs_left += 1;
ring             2717 drivers/net/ethernet/neterion/s2io.c 		struct ring_info *ring = &mac_control->rings[i];
ring             2722 drivers/net/ethernet/neterion/s2io.c 		ring->rx_curr_put_info.block_index = 0;
ring             2723 drivers/net/ethernet/neterion/s2io.c 		ring->rx_curr_get_info.block_index = 0;
ring             2724 drivers/net/ethernet/neterion/s2io.c 		ring->rx_curr_put_info.offset = 0;
ring             2725 drivers/net/ethernet/neterion/s2io.c 		ring->rx_curr_get_info.offset = 0;
ring             2726 drivers/net/ethernet/neterion/s2io.c 		ring->rx_bufs_left = 0;
ring             2732 drivers/net/ethernet/neterion/s2io.c static int s2io_chk_rx_buffers(struct s2io_nic *nic, struct ring_info *ring)
ring             2734 drivers/net/ethernet/neterion/s2io.c 	if (fill_rx_buffers(nic, ring, 0) == -ENOMEM) {
ring             2736 drivers/net/ethernet/neterion/s2io.c 			  ring->dev->name);
ring             2756 drivers/net/ethernet/neterion/s2io.c 	struct ring_info *ring = container_of(napi, struct ring_info, napi);
ring             2757 drivers/net/ethernet/neterion/s2io.c 	struct net_device *dev = ring->dev;
ring             2768 drivers/net/ethernet/neterion/s2io.c 	pkts_processed = rx_intr_handler(ring, budget);
ring             2769 drivers/net/ethernet/neterion/s2io.c 	s2io_chk_rx_buffers(nic, ring);
ring             2775 drivers/net/ethernet/neterion/s2io.c 		addr += 7 - ring->ring_no;
ring             2776 drivers/net/ethernet/neterion/s2io.c 		val8 = (ring->ring_no == 0) ? 0x3f : 0xbf;
ring             2797 drivers/net/ethernet/neterion/s2io.c 		struct ring_info *ring = &mac_control->rings[i];
ring             2798 drivers/net/ethernet/neterion/s2io.c 		ring_pkts_processed = rx_intr_handler(ring, budget);
ring             2799 drivers/net/ethernet/neterion/s2io.c 		s2io_chk_rx_buffers(nic, ring);
ring             2851 drivers/net/ethernet/neterion/s2io.c 		struct ring_info *ring = &mac_control->rings[i];
ring             2853 drivers/net/ethernet/neterion/s2io.c 		rx_intr_handler(ring, 0);
ring             2857 drivers/net/ethernet/neterion/s2io.c 		struct ring_info *ring = &mac_control->rings[i];
ring             2859 drivers/net/ethernet/neterion/s2io.c 		if (fill_rx_buffers(nic, ring, 0) == -ENOMEM) {
ring             4197 drivers/net/ethernet/neterion/s2io.c 	struct ring_info *ring = (struct ring_info *)dev_id;
ring             4198 drivers/net/ethernet/neterion/s2io.c 	struct s2io_nic *sp = ring->nic;
ring             4209 drivers/net/ethernet/neterion/s2io.c 		addr += (7 - ring->ring_no);
ring             4210 drivers/net/ethernet/neterion/s2io.c 		val8 = (ring->ring_no == 0) ? 0x7f : 0xff;
ring             4213 drivers/net/ethernet/neterion/s2io.c 		napi_schedule(&ring->napi);
ring             4215 drivers/net/ethernet/neterion/s2io.c 		rx_intr_handler(ring, 0);
ring             4216 drivers/net/ethernet/neterion/s2io.c 		s2io_chk_rx_buffers(sp, ring);
ring             4706 drivers/net/ethernet/neterion/s2io.c 				struct ring_info *ring = &mac_control->rings[i];
ring             4708 drivers/net/ethernet/neterion/s2io.c 				rx_intr_handler(ring, 0);
ring             4731 drivers/net/ethernet/neterion/s2io.c 				struct ring_info *ring = &mac_control->rings[i];
ring             4733 drivers/net/ethernet/neterion/s2io.c 				s2io_chk_rx_buffers(sp, ring);
ring             6879 drivers/net/ethernet/neterion/s2io.c 		struct ring_info *ring = &mac_control->rings[i];
ring             6885 drivers/net/ethernet/neterion/s2io.c 				rxdp = ring->rx_blocks[j].rxds[k].virt_addr;
ring             6887 drivers/net/ethernet/neterion/s2io.c 					ba = &ring->ba[j][k];
ring             7117 drivers/net/ethernet/neterion/s2io.c 		struct ring_info *ring = &mac_control->rings[i];
ring             7119 drivers/net/ethernet/neterion/s2io.c 		ring->mtu = dev->mtu;
ring             7120 drivers/net/ethernet/neterion/s2io.c 		ring->lro = !!(dev->features & NETIF_F_LRO);
ring             7121 drivers/net/ethernet/neterion/s2io.c 		ret = fill_rx_buffers(sp, ring, 1);
ring             7130 drivers/net/ethernet/neterion/s2io.c 			  ring->rx_bufs_left);
ring             7597 drivers/net/ethernet/neterion/s2io.c static int rts_ds_steer(struct s2io_nic *nic, u8 ds_codepoint, u8 ring)
ring             7605 drivers/net/ethernet/neterion/s2io.c 	val64 = RTS_DS_MEM_DATA(ring);
ring             7808 drivers/net/ethernet/neterion/s2io.c 		struct ring_info *ring = &mac_control->rings[i];
ring             7812 drivers/net/ethernet/neterion/s2io.c 		ring->rx_bufs_left = 0;
ring             7813 drivers/net/ethernet/neterion/s2io.c 		ring->rxd_mode = sp->rxd_mode;
ring             7814 drivers/net/ethernet/neterion/s2io.c 		ring->rxd_count = rxd_count[sp->rxd_mode];
ring             7815 drivers/net/ethernet/neterion/s2io.c 		ring->pdev = sp->pdev;
ring             7816 drivers/net/ethernet/neterion/s2io.c 		ring->dev = sp->dev;
ring             7914 drivers/net/ethernet/neterion/s2io.c 			struct ring_info *ring = &mac_control->rings[i];
ring             7916 drivers/net/ethernet/neterion/s2io.c 			netif_napi_add(dev, &ring->napi, s2io_poll_msix, 64);
ring             1110 drivers/net/ethernet/neterion/s2io.h static int rts_ds_steer(struct s2io_nic *nic, u8 ds_codepoint, u8 ring);
ring             1359 drivers/net/ethernet/neterion/vxge/vxge-config.c 		if (device_config->vp_config[i].ring.enable ==
ring             1361 drivers/net/ethernet/neterion/vxge/vxge-config.c 			nblocks += device_config->vp_config[i].ring.ring_blocks;
ring             2018 drivers/net/ethernet/neterion/vxge/vxge-config.c static u64 __vxge_hw_ring_first_block_address_get(struct __vxge_hw_ring *ring)
ring             2022 drivers/net/ethernet/neterion/vxge/vxge-config.c 	dma_object = ring->mempool->memblocks_dma_arr;
ring             2060 drivers/net/ethernet/neterion/vxge/vxge-config.c 					 struct __vxge_hw_ring *ring, u32 from,
ring             2096 drivers/net/ethernet/neterion/vxge/vxge-config.c 	struct __vxge_hw_ring *ring =
ring             2100 drivers/net/ethernet/neterion/vxge/vxge-config.c 	for (i = 0; i < ring->rxds_per_block; i++) {
ring             2105 drivers/net/ethernet/neterion/vxge/vxge-config.c 		u32 reserve_index = ring->channel.reserve_ptr -
ring             2106 drivers/net/ethernet/neterion/vxge/vxge-config.c 				(index * ring->rxds_per_block + i + 1);
ring             2109 drivers/net/ethernet/neterion/vxge/vxge-config.c 		ring->channel.reserve_arr[reserve_index] = ((u8 *)item) +
ring             2110 drivers/net/ethernet/neterion/vxge/vxge-config.c 						i * ring->rxd_size;
ring             2119 drivers/net/ethernet/neterion/vxge/vxge-config.c 		rxdp = ring->channel.reserve_arr[reserve_index];
ring             2121 drivers/net/ethernet/neterion/vxge/vxge-config.c 		uld_priv = ((u8 *)rxdblock_priv + ring->rxd_priv_size * i);
ring             2131 drivers/net/ethernet/neterion/vxge/vxge-config.c 		__vxge_hw_ring_rxdblock_link(mempoolh, ring, index, 0);
ring             2136 drivers/net/ethernet/neterion/vxge/vxge-config.c 		__vxge_hw_ring_rxdblock_link(mempoolh, ring, index - 1, index);
ring             2145 drivers/net/ethernet/neterion/vxge/vxge-config.c vxge_hw_ring_replenish(struct __vxge_hw_ring *ring)
ring             2151 drivers/net/ethernet/neterion/vxge/vxge-config.c 	channel = &ring->channel;
ring             2155 drivers/net/ethernet/neterion/vxge/vxge-config.c 		status = vxge_hw_ring_rxd_reserve(ring, &rxd);
ring             2159 drivers/net/ethernet/neterion/vxge/vxge-config.c 		if (ring->rxd_init) {
ring             2160 drivers/net/ethernet/neterion/vxge/vxge-config.c 			status = ring->rxd_init(rxd, channel->userdata);
ring             2162 drivers/net/ethernet/neterion/vxge/vxge-config.c 				vxge_hw_ring_rxd_free(ring, rxd);
ring             2167 drivers/net/ethernet/neterion/vxge/vxge-config.c 		vxge_hw_ring_rxd_post(ring, rxd);
ring             2716 drivers/net/ethernet/neterion/vxge/vxge-config.c static enum vxge_hw_status __vxge_hw_ring_abort(struct __vxge_hw_ring *ring)
ring             2721 drivers/net/ethernet/neterion/vxge/vxge-config.c 	channel = &ring->channel;
ring             2731 drivers/net/ethernet/neterion/vxge/vxge-config.c 		if (ring->rxd_term)
ring             2732 drivers/net/ethernet/neterion/vxge/vxge-config.c 			ring->rxd_term(rxdh, VXGE_HW_RXD_STATE_POSTED,
ring             2745 drivers/net/ethernet/neterion/vxge/vxge-config.c static enum vxge_hw_status __vxge_hw_ring_reset(struct __vxge_hw_ring *ring)
ring             2750 drivers/net/ethernet/neterion/vxge/vxge-config.c 	channel = &ring->channel;
ring             2752 drivers/net/ethernet/neterion/vxge/vxge-config.c 	__vxge_hw_ring_abort(ring);
ring             2759 drivers/net/ethernet/neterion/vxge/vxge-config.c 	if (ring->rxd_init) {
ring             2760 drivers/net/ethernet/neterion/vxge/vxge-config.c 		status = vxge_hw_ring_replenish(ring);
ring             2775 drivers/net/ethernet/neterion/vxge/vxge-config.c 	struct __vxge_hw_ring *ring = vp->vpath->ringh;
ring             2777 drivers/net/ethernet/neterion/vxge/vxge-config.c 	__vxge_hw_ring_abort(ring);
ring             2779 drivers/net/ethernet/neterion/vxge/vxge-config.c 	if (ring->mempool)
ring             2780 drivers/net/ethernet/neterion/vxge/vxge-config.c 		__vxge_hw_mempool_destroy(ring->mempool);
ring             2783 drivers/net/ethernet/neterion/vxge/vxge-config.c 	__vxge_hw_channel_free(&ring->channel);
ring             2797 drivers/net/ethernet/neterion/vxge/vxge-config.c 	struct __vxge_hw_ring *ring;
ring             2814 drivers/net/ethernet/neterion/vxge/vxge-config.c 	config = &hldev->config.vp_config[vp_id].ring;
ring             2819 drivers/net/ethernet/neterion/vxge/vxge-config.c 	ring = (struct __vxge_hw_ring *)__vxge_hw_channel_allocate(vp,
ring             2824 drivers/net/ethernet/neterion/vxge/vxge-config.c 	if (ring == NULL) {
ring             2829 drivers/net/ethernet/neterion/vxge/vxge-config.c 	vp->vpath->ringh = ring;
ring             2830 drivers/net/ethernet/neterion/vxge/vxge-config.c 	ring->vp_id = vp_id;
ring             2831 drivers/net/ethernet/neterion/vxge/vxge-config.c 	ring->vp_reg = vp->vpath->vp_reg;
ring             2832 drivers/net/ethernet/neterion/vxge/vxge-config.c 	ring->common_reg = hldev->common_reg;
ring             2833 drivers/net/ethernet/neterion/vxge/vxge-config.c 	ring->stats = &vp->vpath->sw_stats->ring_stats;
ring             2834 drivers/net/ethernet/neterion/vxge/vxge-config.c 	ring->config = config;
ring             2835 drivers/net/ethernet/neterion/vxge/vxge-config.c 	ring->callback = attr->callback;
ring             2836 drivers/net/ethernet/neterion/vxge/vxge-config.c 	ring->rxd_init = attr->rxd_init;
ring             2837 drivers/net/ethernet/neterion/vxge/vxge-config.c 	ring->rxd_term = attr->rxd_term;
ring             2838 drivers/net/ethernet/neterion/vxge/vxge-config.c 	ring->buffer_mode = config->buffer_mode;
ring             2839 drivers/net/ethernet/neterion/vxge/vxge-config.c 	ring->tim_rti_cfg1_saved = vp->vpath->tim_rti_cfg1_saved;
ring             2840 drivers/net/ethernet/neterion/vxge/vxge-config.c 	ring->tim_rti_cfg3_saved = vp->vpath->tim_rti_cfg3_saved;
ring             2841 drivers/net/ethernet/neterion/vxge/vxge-config.c 	ring->rxds_limit = config->rxds_limit;
ring             2843 drivers/net/ethernet/neterion/vxge/vxge-config.c 	ring->rxd_size = vxge_hw_ring_rxd_size_get(config->buffer_mode);
ring             2844 drivers/net/ethernet/neterion/vxge/vxge-config.c 	ring->rxd_priv_size =
ring             2846 drivers/net/ethernet/neterion/vxge/vxge-config.c 	ring->per_rxd_space = attr->per_rxd_space;
ring             2848 drivers/net/ethernet/neterion/vxge/vxge-config.c 	ring->rxd_priv_size =
ring             2849 drivers/net/ethernet/neterion/vxge/vxge-config.c 		((ring->rxd_priv_size + VXGE_CACHE_LINE_SIZE - 1) /
ring             2854 drivers/net/ethernet/neterion/vxge/vxge-config.c 	ring->rxds_per_block =
ring             2858 drivers/net/ethernet/neterion/vxge/vxge-config.c 	ring->rxdblock_priv_size = ring->rxd_priv_size * ring->rxds_per_block;
ring             2859 drivers/net/ethernet/neterion/vxge/vxge-config.c 	ring->mempool = __vxge_hw_mempool_create(hldev,
ring             2862 drivers/net/ethernet/neterion/vxge/vxge-config.c 				ring->rxdblock_priv_size,
ring             2863 drivers/net/ethernet/neterion/vxge/vxge-config.c 				ring->config->ring_blocks,
ring             2864 drivers/net/ethernet/neterion/vxge/vxge-config.c 				ring->config->ring_blocks,
ring             2866 drivers/net/ethernet/neterion/vxge/vxge-config.c 				ring);
ring             2867 drivers/net/ethernet/neterion/vxge/vxge-config.c 	if (ring->mempool == NULL) {
ring             2872 drivers/net/ethernet/neterion/vxge/vxge-config.c 	status = __vxge_hw_channel_initialize(&ring->channel);
ring             2885 drivers/net/ethernet/neterion/vxge/vxge-config.c 	if (ring->rxd_init) {
ring             2886 drivers/net/ethernet/neterion/vxge/vxge-config.c 		status = vxge_hw_ring_replenish(ring);
ring             2895 drivers/net/ethernet/neterion/vxge/vxge-config.c 	ring->stats->common_stats.usage_cnt = 0;
ring             2924 drivers/net/ethernet/neterion/vxge/vxge-config.c 		device_config->vp_config[i].ring.enable = VXGE_HW_RING_DEFAULT;
ring             2926 drivers/net/ethernet/neterion/vxge/vxge-config.c 		device_config->vp_config[i].ring.ring_blocks =
ring             2929 drivers/net/ethernet/neterion/vxge/vxge-config.c 		device_config->vp_config[i].ring.buffer_mode =
ring             2932 drivers/net/ethernet/neterion/vxge/vxge-config.c 		device_config->vp_config[i].ring.scatter_mode =
ring             2935 drivers/net/ethernet/neterion/vxge/vxge-config.c 		device_config->vp_config[i].ring.rxds_limit =
ring             3939 drivers/net/ethernet/neterion/vxge/vxge-config.c vxge_hw_vpath_check_leak(struct __vxge_hw_ring *ring)
ring             3944 drivers/net/ethernet/neterion/vxge/vxge-config.c 	if (ring == NULL)
ring             3947 drivers/net/ethernet/neterion/vxge/vxge-config.c 	rxd_new_count = readl(&ring->vp_reg->prc_rxd_doorbell);
ring             3948 drivers/net/ethernet/neterion/vxge/vxge-config.c 	rxd_spat = readq(&ring->vp_reg->prc_cfg6);
ring             4077 drivers/net/ethernet/neterion/vxge/vxge-config.c 	if (vp_config->ring.enable == VXGE_HW_RING_DISABLE)
ring             4090 drivers/net/ethernet/neterion/vxge/vxge-config.c 	if (vpath->vp_config->ring.scatter_mode !=
ring             4095 drivers/net/ethernet/neterion/vxge/vxge-config.c 		switch (vpath->vp_config->ring.scatter_mode) {
ring             4225 drivers/net/ethernet/neterion/vxge/vxge-config.c 	if (vp_config->ring.enable == VXGE_HW_RING_ENABLE) {
ring             4294 drivers/net/ethernet/neterion/vxge/vxge-config.c 	if (config->ring.enable == VXGE_HW_RING_ENABLE)
ring             4410 drivers/net/ethernet/neterion/vxge/vxge-config.c 	if (config->ring.enable == VXGE_HW_RING_ENABLE) {
ring             4829 drivers/net/ethernet/neterion/vxge/vxge-config.c 	if (vpath->vp_config->ring.enable == VXGE_HW_RING_ENABLE) {
ring             4903 drivers/net/ethernet/neterion/vxge/vxge-config.c 	struct __vxge_hw_ring *ring = vpath->ringh;
ring             4911 drivers/net/ethernet/neterion/vxge/vxge-config.c 		new_count = ring->config->ring_blocks * VXGE_HW_BLOCK_SIZE / 8;
ring             4930 drivers/net/ethernet/neterion/vxge/vxge-config.c 	ring->rxds_limit = min(ring->rxds_limit, val64);
ring             4931 drivers/net/ethernet/neterion/vxge/vxge-config.c 	if (ring->rxds_limit < 4)
ring             4932 drivers/net/ethernet/neterion/vxge/vxge-config.c 		ring->rxds_limit = 4;
ring              372 drivers/net/ethernet/neterion/vxge/vxge-config.h 	struct vxge_hw_ring_config		ring;
ring              584 drivers/net/ethernet/neterion/vxge/vxge-ethtool.c 		*(ptr + 5) += vdev->vpaths[k].ring.stats.rx_frms;
ring              585 drivers/net/ethernet/neterion/vxge/vxge-ethtool.c 		*(ptr + 6) += vdev->vpaths[k].ring.stats.rx_errors;
ring              586 drivers/net/ethernet/neterion/vxge/vxge-ethtool.c 		*(ptr + 7) += vdev->vpaths[k].ring.stats.rx_bytes;
ring              587 drivers/net/ethernet/neterion/vxge/vxge-ethtool.c 		*(ptr + 8) += vdev->vpaths[k].ring.stats.rx_mcast;
ring              589 drivers/net/ethernet/neterion/vxge/vxge-ethtool.c 				vdev->vpaths[k].ring.stats.pci_map_fail;
ring              590 drivers/net/ethernet/neterion/vxge/vxge-ethtool.c 		*(ptr + 10) += vdev->vpaths[k].ring.stats.skb_alloc_fail;
ring              133 drivers/net/ethernet/neterion/vxge/vxge-main.c 	struct vxge_ring *ring;
ring              137 drivers/net/ethernet/neterion/vxge/vxge-main.c 		ring = &vdev->vpaths[i].ring;
ring              138 drivers/net/ethernet/neterion/vxge/vxge-main.c 		vxge_hw_vpath_poll_rx(ring->handle);
ring              194 drivers/net/ethernet/neterion/vxge/vxge-main.c vxge_rx_alloc(void *dtrh, struct vxge_ring *ring, const int skb_size)
ring              200 drivers/net/ethernet/neterion/vxge/vxge-main.c 	dev = ring->ndev;
ring              202 drivers/net/ethernet/neterion/vxge/vxge-main.c 		ring->ndev->name, __func__, __LINE__);
ring              212 drivers/net/ethernet/neterion/vxge/vxge-main.c 		ring->stats.skb_alloc_fail++;
ring              217 drivers/net/ethernet/neterion/vxge/vxge-main.c 		"%s: %s:%d  Skb : 0x%p", ring->ndev->name,
ring              226 drivers/net/ethernet/neterion/vxge/vxge-main.c 		"%s: %s:%d Exiting...", ring->ndev->name, __func__, __LINE__);
ring              234 drivers/net/ethernet/neterion/vxge/vxge-main.c static int vxge_rx_map(void *dtrh, struct vxge_ring *ring)
ring              240 drivers/net/ethernet/neterion/vxge/vxge-main.c 		ring->ndev->name, __func__, __LINE__);
ring              244 drivers/net/ethernet/neterion/vxge/vxge-main.c 	dma_addr = pci_map_single(ring->pdev, rx_priv->skb_data,
ring              247 drivers/net/ethernet/neterion/vxge/vxge-main.c 	if (unlikely(pci_dma_mapping_error(ring->pdev, dma_addr))) {
ring              248 drivers/net/ethernet/neterion/vxge/vxge-main.c 		ring->stats.pci_map_fail++;
ring              253 drivers/net/ethernet/neterion/vxge/vxge-main.c 		ring->ndev->name, __func__, __LINE__,
ring              259 drivers/net/ethernet/neterion/vxge/vxge-main.c 		"%s: %s:%d Exiting...", ring->ndev->name, __func__, __LINE__);
ring              271 drivers/net/ethernet/neterion/vxge/vxge-main.c 	struct vxge_ring *ring = (struct vxge_ring *)userdata;
ring              275 drivers/net/ethernet/neterion/vxge/vxge-main.c 		ring->ndev->name, __func__, __LINE__);
ring              276 drivers/net/ethernet/neterion/vxge/vxge-main.c 	if (vxge_rx_alloc(dtrh, ring,
ring              277 drivers/net/ethernet/neterion/vxge/vxge-main.c 			  VXGE_LL_MAX_FRAME_SIZE(ring->ndev)) == NULL)
ring              280 drivers/net/ethernet/neterion/vxge/vxge-main.c 	if (vxge_rx_map(dtrh, ring)) {
ring              287 drivers/net/ethernet/neterion/vxge/vxge-main.c 		"%s: %s:%d Exiting...", ring->ndev->name, __func__, __LINE__);
ring              293 drivers/net/ethernet/neterion/vxge/vxge-main.c vxge_rx_complete(struct vxge_ring *ring, struct sk_buff *skb, u16 vlan,
ring              298 drivers/net/ethernet/neterion/vxge/vxge-main.c 			ring->ndev->name, __func__, __LINE__);
ring              299 drivers/net/ethernet/neterion/vxge/vxge-main.c 	skb_record_rx_queue(skb, ring->driver_id);
ring              300 drivers/net/ethernet/neterion/vxge/vxge-main.c 	skb->protocol = eth_type_trans(skb, ring->ndev);
ring              302 drivers/net/ethernet/neterion/vxge/vxge-main.c 	u64_stats_update_begin(&ring->stats.syncp);
ring              303 drivers/net/ethernet/neterion/vxge/vxge-main.c 	ring->stats.rx_frms++;
ring              304 drivers/net/ethernet/neterion/vxge/vxge-main.c 	ring->stats.rx_bytes += pkt_length;
ring              307 drivers/net/ethernet/neterion/vxge/vxge-main.c 		ring->stats.rx_mcast++;
ring              308 drivers/net/ethernet/neterion/vxge/vxge-main.c 	u64_stats_update_end(&ring->stats.syncp);
ring              312 drivers/net/ethernet/neterion/vxge/vxge-main.c 		ring->ndev->name, __func__, __LINE__, skb->protocol);
ring              315 drivers/net/ethernet/neterion/vxge/vxge-main.c 	    ring->vlan_tag_strip == VXGE_HW_VPATH_RPA_STRIP_VLAN_TAG_ENABLE)
ring              317 drivers/net/ethernet/neterion/vxge/vxge-main.c 	napi_gro_receive(ring->napi_p, skb);
ring              320 drivers/net/ethernet/neterion/vxge/vxge-main.c 		"%s: %s:%d Exiting...", ring->ndev->name, __func__, __LINE__);
ring              323 drivers/net/ethernet/neterion/vxge/vxge-main.c static inline void vxge_re_pre_post(void *dtr, struct vxge_ring *ring,
ring              326 drivers/net/ethernet/neterion/vxge/vxge-main.c 	pci_dma_sync_single_for_device(ring->pdev,
ring              330 drivers/net/ethernet/neterion/vxge/vxge-main.c 	vxge_hw_ring_rxd_pre_post(ring->handle, dtr);
ring              357 drivers/net/ethernet/neterion/vxge/vxge-main.c 	struct vxge_ring *ring = (struct vxge_ring *)userdata;
ring              358 drivers/net/ethernet/neterion/vxge/vxge-main.c 	struct net_device *dev = ring->ndev;
ring              369 drivers/net/ethernet/neterion/vxge/vxge-main.c 		ring->ndev->name, __func__, __LINE__);
ring              371 drivers/net/ethernet/neterion/vxge/vxge-main.c 	if (ring->budget <= 0)
ring              384 drivers/net/ethernet/neterion/vxge/vxge-main.c 			ring->ndev->name, __func__, __LINE__, skb);
ring              393 drivers/net/ethernet/neterion/vxge/vxge-main.c 			ring->ndev->name, __func__, __LINE__, pkt_length);
ring              405 drivers/net/ethernet/neterion/vxge/vxge-main.c 				ring->stats.rx_errors++;
ring              408 drivers/net/ethernet/neterion/vxge/vxge-main.c 					ring->ndev->name, __func__,
ring              415 drivers/net/ethernet/neterion/vxge/vxge-main.c 				vxge_re_pre_post(dtr, ring, rx_priv);
ring              418 drivers/net/ethernet/neterion/vxge/vxge-main.c 				ring->stats.rx_dropped++;
ring              424 drivers/net/ethernet/neterion/vxge/vxge-main.c 			if (vxge_rx_alloc(dtr, ring, data_size) != NULL) {
ring              425 drivers/net/ethernet/neterion/vxge/vxge-main.c 				if (!vxge_rx_map(dtr, ring)) {
ring              428 drivers/net/ethernet/neterion/vxge/vxge-main.c 					pci_unmap_single(ring->pdev, data_dma,
ring              438 drivers/net/ethernet/neterion/vxge/vxge-main.c 					vxge_re_pre_post(dtr, ring, rx_priv);
ring              442 drivers/net/ethernet/neterion/vxge/vxge-main.c 					ring->stats.rx_dropped++;
ring              446 drivers/net/ethernet/neterion/vxge/vxge-main.c 				vxge_re_pre_post(dtr, ring, rx_priv);
ring              449 drivers/net/ethernet/neterion/vxge/vxge-main.c 				ring->stats.rx_dropped++;
ring              461 drivers/net/ethernet/neterion/vxge/vxge-main.c 				pci_dma_sync_single_for_cpu(ring->pdev,
ring              467 drivers/net/ethernet/neterion/vxge/vxge-main.c 					ring->ndev->name, __func__,
ring              471 drivers/net/ethernet/neterion/vxge/vxge-main.c 				vxge_re_pre_post(dtr, ring, rx_priv);
ring              479 drivers/net/ethernet/neterion/vxge/vxge-main.c 				vxge_re_pre_post(dtr, ring, rx_priv);
ring              485 drivers/net/ethernet/neterion/vxge/vxge-main.c 				ring->stats.skb_alloc_fail++;
ring              500 drivers/net/ethernet/neterion/vxge/vxge-main.c 		if (ring->rx_hwts) {
ring              516 drivers/net/ethernet/neterion/vxge/vxge-main.c 		vxge_rx_complete(ring, skb, ext_info.vlan,
ring              519 drivers/net/ethernet/neterion/vxge/vxge-main.c 		ring->budget--;
ring              520 drivers/net/ethernet/neterion/vxge/vxge-main.c 		ring->pkts_processed++;
ring              521 drivers/net/ethernet/neterion/vxge/vxge-main.c 		if (!ring->budget)
ring             1006 drivers/net/ethernet/neterion/vxge/vxge-main.c 	struct vxge_ring *ring = (struct vxge_ring *)userdata;
ring             1011 drivers/net/ethernet/neterion/vxge/vxge-main.c 			ring->ndev->name, __func__, __LINE__);
ring             1015 drivers/net/ethernet/neterion/vxge/vxge-main.c 	pci_unmap_single(ring->pdev, rx_priv->data_dma,
ring             1023 drivers/net/ethernet/neterion/vxge/vxge-main.c 		ring->ndev->name, __func__, __LINE__);
ring             1563 drivers/net/ethernet/neterion/vxge/vxge-main.c 	vpath->ring.last_status = VXGE_HW_OK;
ring             1585 drivers/net/ethernet/neterion/vxge/vxge-main.c 			hw_ring = vdev->vpaths[i].ring.handle;
ring             1813 drivers/net/ethernet/neterion/vxge/vxge-main.c 	struct vxge_ring *ring = container_of(napi, struct vxge_ring, napi);
ring             1817 drivers/net/ethernet/neterion/vxge/vxge-main.c 	ring->budget = budget;
ring             1818 drivers/net/ethernet/neterion/vxge/vxge-main.c 	ring->pkts_processed = 0;
ring             1819 drivers/net/ethernet/neterion/vxge/vxge-main.c 	vxge_hw_vpath_poll_rx(ring->handle);
ring             1820 drivers/net/ethernet/neterion/vxge/vxge-main.c 	pkts_processed = ring->pkts_processed;
ring             1827 drivers/net/ethernet/neterion/vxge/vxge-main.c 				(struct __vxge_hw_channel *)ring->handle,
ring             1828 drivers/net/ethernet/neterion/vxge/vxge-main.c 				ring->rx_vector_no);
ring             1843 drivers/net/ethernet/neterion/vxge/vxge-main.c 	struct vxge_ring *ring;
ring             1848 drivers/net/ethernet/neterion/vxge/vxge-main.c 		ring = &vdev->vpaths[i].ring;
ring             1849 drivers/net/ethernet/neterion/vxge/vxge-main.c 		ring->budget = budget;
ring             1850 drivers/net/ethernet/neterion/vxge/vxge-main.c 		ring->pkts_processed = 0;
ring             1851 drivers/net/ethernet/neterion/vxge/vxge-main.c 		vxge_hw_vpath_poll_rx(ring->handle);
ring             1852 drivers/net/ethernet/neterion/vxge/vxge-main.c 		pkts_processed += ring->pkts_processed;
ring             1853 drivers/net/ethernet/neterion/vxge/vxge-main.c 		budget -= ring->pkts_processed;
ring             2057 drivers/net/ethernet/neterion/vxge/vxge-main.c 		attr.ring_attr.userdata = &vpath->ring;
ring             2059 drivers/net/ethernet/neterion/vxge/vxge-main.c 		vpath->ring.ndev = vdev->ndev;
ring             2060 drivers/net/ethernet/neterion/vxge/vxge-main.c 		vpath->ring.pdev = vdev->pdev;
ring             2066 drivers/net/ethernet/neterion/vxge/vxge-main.c 			vpath->ring.handle =
ring             2074 drivers/net/ethernet/neterion/vxge/vxge-main.c 			u64_stats_init(&vpath->ring.stats.syncp);
ring             2085 drivers/net/ethernet/neterion/vxge/vxge-main.c 			vpath->ring.rx_vector_no = 0;
ring             2086 drivers/net/ethernet/neterion/vxge/vxge-main.c 			vpath->ring.rx_hwts = vdev->rx_hwts;
ring             2089 drivers/net/ethernet/neterion/vxge/vxge-main.c 			vpath->ring.vlan_tag_strip = vdev->vlan_tag_strip;
ring             2144 drivers/net/ethernet/neterion/vxge/vxge-main.c static void adaptive_coalesce_rx_interrupts(struct vxge_ring *ring)
ring             2146 drivers/net/ethernet/neterion/vxge/vxge-main.c 	ring->interrupt_count++;
ring             2147 drivers/net/ethernet/neterion/vxge/vxge-main.c 	if (time_before(ring->jiffies + HZ / 100, jiffies)) {
ring             2148 drivers/net/ethernet/neterion/vxge/vxge-main.c 		struct __vxge_hw_ring *hw_ring = ring->handle;
ring             2150 drivers/net/ethernet/neterion/vxge/vxge-main.c 		ring->jiffies = jiffies;
ring             2151 drivers/net/ethernet/neterion/vxge/vxge-main.c 		if (ring->interrupt_count > VXGE_T1A_MAX_INTERRUPT_COUNT &&
ring             2159 drivers/net/ethernet/neterion/vxge/vxge-main.c 		ring->interrupt_count = 0;
ring             2241 drivers/net/ethernet/neterion/vxge/vxge-main.c 	struct vxge_ring *ring = (struct vxge_ring *)dev_id;
ring             2243 drivers/net/ethernet/neterion/vxge/vxge-main.c 	adaptive_coalesce_rx_interrupts(ring);
ring             2245 drivers/net/ethernet/neterion/vxge/vxge-main.c 	vxge_hw_channel_msix_mask((struct __vxge_hw_channel *)ring->handle,
ring             2246 drivers/net/ethernet/neterion/vxge/vxge-main.c 				  ring->rx_vector_no);
ring             2248 drivers/net/ethernet/neterion/vxge/vxge-main.c 	vxge_hw_channel_msix_clear((struct __vxge_hw_channel *)ring->handle,
ring             2249 drivers/net/ethernet/neterion/vxge/vxge-main.c 				   ring->rx_vector_no);
ring             2251 drivers/net/ethernet/neterion/vxge/vxge-main.c 	napi_schedule(&ring->napi);
ring             2396 drivers/net/ethernet/neterion/vxge/vxge-main.c 			vpath->ring.rx_vector_no = (vpath->device_id *
ring             2495 drivers/net/ethernet/neterion/vxge/vxge-main.c 					&vdev->vpaths[vp_idx].ring);
ring             2497 drivers/net/ethernet/neterion/vxge/vxge-main.c 						&vdev->vpaths[vp_idx].ring;
ring             2612 drivers/net/ethernet/neterion/vxge/vxge-main.c 	struct vxge_ring *ring;
ring             2617 drivers/net/ethernet/neterion/vxge/vxge-main.c 		ring = &vdev->vpaths[i].ring;
ring             2620 drivers/net/ethernet/neterion/vxge/vxge-main.c 		rx_frms = READ_ONCE(ring->stats.rx_frms);
ring             2623 drivers/net/ethernet/neterion/vxge/vxge-main.c 		if (ring->stats.prev_rx_frms == rx_frms) {
ring             2624 drivers/net/ethernet/neterion/vxge/vxge-main.c 			status = vxge_hw_vpath_check_leak(ring->handle);
ring             2628 drivers/net/ethernet/neterion/vxge/vxge-main.c 				(VXGE_HW_FAIL == ring->last_status)) {
ring             2643 drivers/net/ethernet/neterion/vxge/vxge-main.c 		ring->stats.prev_rx_frms = rx_frms;
ring             2644 drivers/net/ethernet/neterion/vxge/vxge-main.c 		ring->last_status = status;
ring             2741 drivers/net/ethernet/neterion/vxge/vxge-main.c 			vpath->ring.napi_p = &vdev->napi;
ring             2746 drivers/net/ethernet/neterion/vxge/vxge-main.c 			netif_napi_add(dev, &vpath->ring.napi,
ring             2748 drivers/net/ethernet/neterion/vxge/vxge-main.c 			napi_enable(&vpath->ring.napi);
ring             2749 drivers/net/ethernet/neterion/vxge/vxge-main.c 			vpath->ring.napi_p = &vpath->ring.napi;
ring             2893 drivers/net/ethernet/neterion/vxge/vxge-main.c 			napi_disable(&vdev->vpaths[i].ring.napi);
ring             2926 drivers/net/ethernet/neterion/vxge/vxge-main.c 			netif_napi_del(&vdev->vpaths[i].ring.napi);
ring             3005 drivers/net/ethernet/neterion/vxge/vxge-main.c 			napi_disable(&vdev->vpaths[i].ring.napi);
ring             3109 drivers/net/ethernet/neterion/vxge/vxge-main.c 		struct vxge_ring_stats *rxstats = &vdev->vpaths[k].ring.stats;
ring             3221 drivers/net/ethernet/neterion/vxge/vxge-main.c 		vdev->vpaths[i].ring.rx_hwts = vdev->rx_hwts;
ring             3797 drivers/net/ethernet/neterion/vxge/vxge-main.c 		device_config->vp_config[i].ring.enable  =
ring             3800 drivers/net/ethernet/neterion/vxge/vxge-main.c 		device_config->vp_config[i].ring.ring_blocks  =
ring             3803 drivers/net/ethernet/neterion/vxge/vxge-main.c 		device_config->vp_config[i].ring.buffer_mode =
ring             3806 drivers/net/ethernet/neterion/vxge/vxge-main.c 		device_config->vp_config[i].ring.rxds_limit  =
ring             3809 drivers/net/ethernet/neterion/vxge/vxge-main.c 		device_config->vp_config[i].ring.scatter_mode =
ring             4607 drivers/net/ethernet/neterion/vxge/vxge-main.c 		vdev->vpaths[j].ring.driver_id = j;
ring              301 drivers/net/ethernet/neterion/vxge/vxge-main.h 	struct vxge_ring ring;
ring              244 drivers/net/ethernet/neterion/vxge/vxge-traffic.c void vxge_hw_vpath_dynamic_rti_ci_set(struct __vxge_hw_ring *ring)
ring              246 drivers/net/ethernet/neterion/vxge/vxge-traffic.c 	u64 val64 = ring->tim_rti_cfg1_saved;
ring              249 drivers/net/ethernet/neterion/vxge/vxge-traffic.c 	ring->tim_rti_cfg1_saved = val64;
ring              250 drivers/net/ethernet/neterion/vxge/vxge-traffic.c 	writeq(val64, &ring->vp_reg->tim_cfg1_int_num[VXGE_HW_VPATH_INTR_RX]);
ring              269 drivers/net/ethernet/neterion/vxge/vxge-traffic.c void vxge_hw_vpath_dynamic_rti_rtimer_set(struct __vxge_hw_ring *ring)
ring              271 drivers/net/ethernet/neterion/vxge/vxge-traffic.c 	u64 val64 = ring->tim_rti_cfg3_saved;
ring              272 drivers/net/ethernet/neterion/vxge/vxge-traffic.c 	u64 timer = (ring->rtimer * 1000) / 272;
ring              279 drivers/net/ethernet/neterion/vxge/vxge-traffic.c 	writeq(val64, &ring->vp_reg->tim_cfg3_int_num[VXGE_HW_VPATH_INTR_RX]);
ring             1133 drivers/net/ethernet/neterion/vxge/vxge-traffic.c enum vxge_hw_status vxge_hw_ring_rxd_reserve(struct __vxge_hw_ring *ring,
ring             1139 drivers/net/ethernet/neterion/vxge/vxge-traffic.c 	channel = &ring->channel;
ring             1177 drivers/net/ethernet/neterion/vxge/vxge-traffic.c void vxge_hw_ring_rxd_free(struct __vxge_hw_ring *ring, void *rxdh)
ring             1181 drivers/net/ethernet/neterion/vxge/vxge-traffic.c 	channel = &ring->channel;
ring             1194 drivers/net/ethernet/neterion/vxge/vxge-traffic.c void vxge_hw_ring_rxd_pre_post(struct __vxge_hw_ring *ring, void *rxdh)
ring             1198 drivers/net/ethernet/neterion/vxge/vxge-traffic.c 	channel = &ring->channel;
ring             1210 drivers/net/ethernet/neterion/vxge/vxge-traffic.c void vxge_hw_ring_rxd_post_post(struct __vxge_hw_ring *ring, void *rxdh)
ring             1216 drivers/net/ethernet/neterion/vxge/vxge-traffic.c 	if (ring->stats->common_stats.usage_cnt > 0)
ring             1217 drivers/net/ethernet/neterion/vxge/vxge-traffic.c 		ring->stats->common_stats.usage_cnt--;
ring             1230 drivers/net/ethernet/neterion/vxge/vxge-traffic.c void vxge_hw_ring_rxd_post(struct __vxge_hw_ring *ring, void *rxdh)
ring             1235 drivers/net/ethernet/neterion/vxge/vxge-traffic.c 	channel = &ring->channel;
ring             1242 drivers/net/ethernet/neterion/vxge/vxge-traffic.c 	if (ring->stats->common_stats.usage_cnt > 0)
ring             1243 drivers/net/ethernet/neterion/vxge/vxge-traffic.c 		ring->stats->common_stats.usage_cnt--;
ring             1253 drivers/net/ethernet/neterion/vxge/vxge-traffic.c void vxge_hw_ring_rxd_post_post_wmb(struct __vxge_hw_ring *ring, void *rxdh)
ring             1256 drivers/net/ethernet/neterion/vxge/vxge-traffic.c 	vxge_hw_ring_rxd_post_post(ring, rxdh);
ring             1293 drivers/net/ethernet/neterion/vxge/vxge-traffic.c 	struct __vxge_hw_ring *ring, void **rxdh, u8 *t_code)
ring             1300 drivers/net/ethernet/neterion/vxge/vxge-traffic.c 	channel = &ring->channel;
ring             1320 drivers/net/ethernet/neterion/vxge/vxge-traffic.c 		++ring->cmpl_cnt;
ring             1325 drivers/net/ethernet/neterion/vxge/vxge-traffic.c 		ring->stats->common_stats.usage_cnt++;
ring             1326 drivers/net/ethernet/neterion/vxge/vxge-traffic.c 		if (ring->stats->common_stats.usage_max <
ring             1327 drivers/net/ethernet/neterion/vxge/vxge-traffic.c 				ring->stats->common_stats.usage_cnt)
ring             1328 drivers/net/ethernet/neterion/vxge/vxge-traffic.c 			ring->stats->common_stats.usage_max =
ring             1329 drivers/net/ethernet/neterion/vxge/vxge-traffic.c 				ring->stats->common_stats.usage_cnt;
ring             1358 drivers/net/ethernet/neterion/vxge/vxge-traffic.c 	struct __vxge_hw_ring *ring, void *rxdh, u8 t_code)
ring             1378 drivers/net/ethernet/neterion/vxge/vxge-traffic.c 	ring->stats->rxd_t_code_err_cnt[t_code]++;
ring             2373 drivers/net/ethernet/neterion/vxge/vxge-traffic.c enum vxge_hw_status vxge_hw_vpath_poll_rx(struct __vxge_hw_ring *ring)
ring             2381 drivers/net/ethernet/neterion/vxge/vxge-traffic.c 	ring->cmpl_cnt = 0;
ring             2383 drivers/net/ethernet/neterion/vxge/vxge-traffic.c 	status = vxge_hw_ring_rxd_next_completed(ring, &first_rxdh, &t_code);
ring             2385 drivers/net/ethernet/neterion/vxge/vxge-traffic.c 		ring->callback(ring, first_rxdh,
ring             2386 drivers/net/ethernet/neterion/vxge/vxge-traffic.c 			t_code, ring->channel.userdata);
ring             2388 drivers/net/ethernet/neterion/vxge/vxge-traffic.c 	if (ring->cmpl_cnt != 0) {
ring             2389 drivers/net/ethernet/neterion/vxge/vxge-traffic.c 		ring->doorbell_cnt += ring->cmpl_cnt;
ring             2390 drivers/net/ethernet/neterion/vxge/vxge-traffic.c 		if (ring->doorbell_cnt >= ring->rxds_limit) {
ring             2395 drivers/net/ethernet/neterion/vxge/vxge-traffic.c 			new_count = (ring->doorbell_cnt * 4);
ring             2398 drivers/net/ethernet/neterion/vxge/vxge-traffic.c 			ring->total_db_cnt += ring->doorbell_cnt;
ring             2399 drivers/net/ethernet/neterion/vxge/vxge-traffic.c 			if (ring->total_db_cnt >= ring->rxds_per_block) {
ring             2402 drivers/net/ethernet/neterion/vxge/vxge-traffic.c 				ring->total_db_cnt %= ring->rxds_per_block;
ring             2405 drivers/net/ethernet/neterion/vxge/vxge-traffic.c 				&ring->vp_reg->prc_rxd_doorbell);
ring             2407 drivers/net/ethernet/neterion/vxge/vxge-traffic.c 			  readl(&ring->common_reg->titan_general_int_status);
ring             2408 drivers/net/ethernet/neterion/vxge/vxge-traffic.c 			ring->doorbell_cnt = 0;
ring             2142 drivers/net/ethernet/neterion/vxge/vxge-traffic.h void vxge_hw_vpath_dynamic_rti_rtimer_set(struct __vxge_hw_ring *ring);
ring             2288 drivers/net/ethernet/neterion/vxge/vxge-traffic.h void vxge_hw_vpath_dynamic_rti_ci_set(struct __vxge_hw_ring *ring);
ring               42 drivers/net/ethernet/netronome/nfp/flower/metadata.c 	struct circ_buf *ring;
ring               44 drivers/net/ethernet/netronome/nfp/flower/metadata.c 	ring = &priv->stats_ids.free_list;
ring               46 drivers/net/ethernet/netronome/nfp/flower/metadata.c 	if (!CIRC_SPACE(ring->head, ring->tail,
ring               51 drivers/net/ethernet/netronome/nfp/flower/metadata.c 	memcpy(&ring->buf[ring->head], &stats_context_id, NFP_FL_STATS_ELEM_RS);
ring               52 drivers/net/ethernet/netronome/nfp/flower/metadata.c 	ring->head = (ring->head + NFP_FL_STATS_ELEM_RS) %
ring               62 drivers/net/ethernet/netronome/nfp/flower/metadata.c 	struct circ_buf *ring;
ring               64 drivers/net/ethernet/netronome/nfp/flower/metadata.c 	ring = &priv->stats_ids.free_list;
ring               83 drivers/net/ethernet/netronome/nfp/flower/metadata.c 	if (ring->head == ring->tail) {
ring               88 drivers/net/ethernet/netronome/nfp/flower/metadata.c 	memcpy(&temp_stats_id, &ring->buf[ring->tail], NFP_FL_STATS_ELEM_RS);
ring               90 drivers/net/ethernet/netronome/nfp/flower/metadata.c 	memcpy(&ring->buf[ring->tail], &freed_stats_id, NFP_FL_STATS_ELEM_RS);
ring               91 drivers/net/ethernet/netronome/nfp/flower/metadata.c 	ring->tail = (ring->tail + NFP_FL_STATS_ELEM_RS) %
ring              137 drivers/net/ethernet/netronome/nfp/flower/metadata.c 	struct circ_buf *ring;
ring              139 drivers/net/ethernet/netronome/nfp/flower/metadata.c 	ring = &priv->mask_ids.mask_id_free_list;
ring              141 drivers/net/ethernet/netronome/nfp/flower/metadata.c 	if (CIRC_SPACE(ring->head, ring->tail, NFP_FLOWER_MASK_ENTRY_RS) == 0)
ring              144 drivers/net/ethernet/netronome/nfp/flower/metadata.c 	memcpy(&ring->buf[ring->head], &mask_id, NFP_FLOWER_MASK_ELEMENT_RS);
ring              145 drivers/net/ethernet/netronome/nfp/flower/metadata.c 	ring->head = (ring->head + NFP_FLOWER_MASK_ELEMENT_RS) %
ring              157 drivers/net/ethernet/netronome/nfp/flower/metadata.c 	struct circ_buf *ring;
ring              160 drivers/net/ethernet/netronome/nfp/flower/metadata.c 	ring = &priv->mask_ids.mask_id_free_list;
ring              170 drivers/net/ethernet/netronome/nfp/flower/metadata.c 	if (ring->head == ring->tail)
ring              173 drivers/net/ethernet/netronome/nfp/flower/metadata.c 	memcpy(&temp_id, &ring->buf[ring->tail], NFP_FLOWER_MASK_ELEMENT_RS);
ring              182 drivers/net/ethernet/netronome/nfp/flower/metadata.c 	memcpy(&ring->buf[ring->tail], &freed_id, NFP_FLOWER_MASK_ELEMENT_RS);
ring              183 drivers/net/ethernet/netronome/nfp/flower/metadata.c 	ring->tail = (ring->tail + NFP_FLOWER_MASK_ELEMENT_RS) %
ring              113 drivers/net/ethernet/netronome/nfp/nfp_net.h #define D_IDX(ring, idx)	((idx) & ((ring)->cnt - 1))
ring              361 drivers/net/ethernet/netronome/nfp/nfp_net_ethtool.c 				  struct ethtool_ringparam *ring)
ring              365 drivers/net/ethernet/netronome/nfp/nfp_net_ethtool.c 	ring->rx_max_pending = NFP_NET_MAX_RX_DESCS;
ring              366 drivers/net/ethernet/netronome/nfp/nfp_net_ethtool.c 	ring->tx_max_pending = NFP_NET_MAX_TX_DESCS;
ring              367 drivers/net/ethernet/netronome/nfp/nfp_net_ethtool.c 	ring->rx_pending = nn->dp.rxd_cnt;
ring              368 drivers/net/ethernet/netronome/nfp/nfp_net_ethtool.c 	ring->tx_pending = nn->dp.txd_cnt;
ring              386 drivers/net/ethernet/netronome/nfp/nfp_net_ethtool.c 				 struct ethtool_ringparam *ring)
ring              392 drivers/net/ethernet/netronome/nfp/nfp_net_ethtool.c 	if (ring->rx_mini_pending || ring->rx_jumbo_pending)
ring              396 drivers/net/ethernet/netronome/nfp/nfp_net_ethtool.c 	rxd_cnt = roundup_pow_of_two(ring->rx_pending);
ring              397 drivers/net/ethernet/netronome/nfp/nfp_net_ethtool.c 	txd_cnt = roundup_pow_of_two(ring->tx_pending);
ring             4620 drivers/net/ethernet/nvidia/forcedeth.c static void nv_get_ringparam(struct net_device *dev, struct ethtool_ringparam* ring)
ring             4624 drivers/net/ethernet/nvidia/forcedeth.c 	ring->rx_max_pending = (np->desc_ver == DESC_VER_1) ? RING_MAX_DESC_VER_1 : RING_MAX_DESC_VER_2_3;
ring             4625 drivers/net/ethernet/nvidia/forcedeth.c 	ring->tx_max_pending = (np->desc_ver == DESC_VER_1) ? RING_MAX_DESC_VER_1 : RING_MAX_DESC_VER_2_3;
ring             4627 drivers/net/ethernet/nvidia/forcedeth.c 	ring->rx_pending = np->rx_ring_size;
ring             4628 drivers/net/ethernet/nvidia/forcedeth.c 	ring->tx_pending = np->tx_ring_size;
ring             4631 drivers/net/ethernet/nvidia/forcedeth.c static int nv_set_ringparam(struct net_device *dev, struct ethtool_ringparam* ring)
ring             4638 drivers/net/ethernet/nvidia/forcedeth.c 	if (ring->rx_pending < RX_RING_MIN ||
ring             4639 drivers/net/ethernet/nvidia/forcedeth.c 	    ring->tx_pending < TX_RING_MIN ||
ring             4640 drivers/net/ethernet/nvidia/forcedeth.c 	    ring->rx_mini_pending != 0 ||
ring             4641 drivers/net/ethernet/nvidia/forcedeth.c 	    ring->rx_jumbo_pending != 0 ||
ring             4643 drivers/net/ethernet/nvidia/forcedeth.c 	     (ring->rx_pending > RING_MAX_DESC_VER_1 ||
ring             4644 drivers/net/ethernet/nvidia/forcedeth.c 	      ring->tx_pending > RING_MAX_DESC_VER_1)) ||
ring             4646 drivers/net/ethernet/nvidia/forcedeth.c 	     (ring->rx_pending > RING_MAX_DESC_VER_2_3 ||
ring             4647 drivers/net/ethernet/nvidia/forcedeth.c 	      ring->tx_pending > RING_MAX_DESC_VER_2_3))) {
ring             4655 drivers/net/ethernet/nvidia/forcedeth.c 					       (ring->rx_pending +
ring             4656 drivers/net/ethernet/nvidia/forcedeth.c 					       ring->tx_pending),
ring             4661 drivers/net/ethernet/nvidia/forcedeth.c 					       (ring->rx_pending +
ring             4662 drivers/net/ethernet/nvidia/forcedeth.c 					       ring->tx_pending),
ring             4665 drivers/net/ethernet/nvidia/forcedeth.c 	rx_skbuff = kmalloc_array(ring->rx_pending, sizeof(struct nv_skb_map),
ring             4667 drivers/net/ethernet/nvidia/forcedeth.c 	tx_skbuff = kmalloc_array(ring->tx_pending, sizeof(struct nv_skb_map),
ring             4675 drivers/net/ethernet/nvidia/forcedeth.c 						  (ring->rx_pending +
ring             4676 drivers/net/ethernet/nvidia/forcedeth.c 						  ring->tx_pending),
ring             4682 drivers/net/ethernet/nvidia/forcedeth.c 						  (ring->rx_pending +
ring             4683 drivers/net/ethernet/nvidia/forcedeth.c 						  ring->tx_pending),
ring             4708 drivers/net/ethernet/nvidia/forcedeth.c 	np->rx_ring_size = ring->rx_pending;
ring             4709 drivers/net/ethernet/nvidia/forcedeth.c 	np->tx_ring_size = ring->tx_pending;
ring              273 drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_ethtool.c 					struct ethtool_ringparam *ring)
ring              279 drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_ethtool.c 	ring->rx_max_pending = PCH_GBE_MAX_RXD;
ring              280 drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_ethtool.c 	ring->tx_max_pending = PCH_GBE_MAX_TXD;
ring              281 drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_ethtool.c 	ring->rx_pending = rxdr->count;
ring              282 drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_ethtool.c 	ring->tx_pending = txdr->count;
ring              294 drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_ethtool.c 					struct ethtool_ringparam *ring)
ring              302 drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_ethtool.c 	if ((ring->rx_mini_pending) || (ring->rx_jumbo_pending))
ring              326 drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_ethtool.c 		clamp_val(ring->rx_pending, PCH_GBE_MIN_RXD, PCH_GBE_MAX_RXD);
ring              330 drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_ethtool.c 		clamp_val(ring->tx_pending, PCH_GBE_MIN_RXD, PCH_GBE_MAX_RXD);
ring              267 drivers/net/ethernet/pasemi/pasemi_mac.c 	struct pasemi_mac_csring *ring;
ring              272 drivers/net/ethernet/pasemi/pasemi_mac.c 	ring = pasemi_dma_alloc_chan(TXCHAN, sizeof(struct pasemi_mac_csring),
ring              275 drivers/net/ethernet/pasemi/pasemi_mac.c 	if (!ring) {
ring              280 drivers/net/ethernet/pasemi/pasemi_mac.c 	chno = ring->chan.chno;
ring              282 drivers/net/ethernet/pasemi/pasemi_mac.c 	ring->size = CS_RING_SIZE;
ring              283 drivers/net/ethernet/pasemi/pasemi_mac.c 	ring->next_to_fill = 0;
ring              286 drivers/net/ethernet/pasemi/pasemi_mac.c 	if (pasemi_dma_alloc_ring(&ring->chan, CS_RING_SIZE))
ring              290 drivers/net/ethernet/pasemi/pasemi_mac.c 		      PAS_DMA_TXCHAN_BASEL_BRBL(ring->chan.ring_dma));
ring              291 drivers/net/ethernet/pasemi/pasemi_mac.c 	val = PAS_DMA_TXCHAN_BASEU_BRBH(ring->chan.ring_dma >> 32);
ring              296 drivers/net/ethernet/pasemi/pasemi_mac.c 	ring->events[0] = pasemi_dma_alloc_flag();
ring              297 drivers/net/ethernet/pasemi/pasemi_mac.c 	ring->events[1] = pasemi_dma_alloc_flag();
ring              298 drivers/net/ethernet/pasemi/pasemi_mac.c 	if (ring->events[0] < 0 || ring->events[1] < 0)
ring              301 drivers/net/ethernet/pasemi/pasemi_mac.c 	pasemi_dma_clear_flag(ring->events[0]);
ring              302 drivers/net/ethernet/pasemi/pasemi_mac.c 	pasemi_dma_clear_flag(ring->events[1]);
ring              304 drivers/net/ethernet/pasemi/pasemi_mac.c 	ring->fun = pasemi_dma_alloc_fun();
ring              305 drivers/net/ethernet/pasemi/pasemi_mac.c 	if (ring->fun < 0)
ring              309 drivers/net/ethernet/pasemi/pasemi_mac.c 	      PAS_DMA_TXCHAN_CFG_TATTR(ring->fun) |
ring              318 drivers/net/ethernet/pasemi/pasemi_mac.c 	pasemi_dma_start_chan(&ring->chan, PAS_DMA_TXCHAN_TCMDSTA_SZ |
ring              323 drivers/net/ethernet/pasemi/pasemi_mac.c 	return ring;
ring              327 drivers/net/ethernet/pasemi/pasemi_mac.c 	if (ring->events[0] >= 0)
ring              328 drivers/net/ethernet/pasemi/pasemi_mac.c 		pasemi_dma_free_flag(ring->events[0]);
ring              329 drivers/net/ethernet/pasemi/pasemi_mac.c 	if (ring->events[1] >= 0)
ring              330 drivers/net/ethernet/pasemi/pasemi_mac.c 		pasemi_dma_free_flag(ring->events[1]);
ring              331 drivers/net/ethernet/pasemi/pasemi_mac.c 	pasemi_dma_free_ring(&ring->chan);
ring              333 drivers/net/ethernet/pasemi/pasemi_mac.c 	pasemi_dma_free_chan(&ring->chan);
ring              365 drivers/net/ethernet/pasemi/pasemi_mac.c 	struct pasemi_mac_rxring *ring;
ring              370 drivers/net/ethernet/pasemi/pasemi_mac.c 	ring = pasemi_dma_alloc_chan(RXCHAN, sizeof(struct pasemi_mac_rxring),
ring              373 drivers/net/ethernet/pasemi/pasemi_mac.c 	if (!ring) {
ring              377 drivers/net/ethernet/pasemi/pasemi_mac.c 	chno = ring->chan.chno;
ring              379 drivers/net/ethernet/pasemi/pasemi_mac.c 	spin_lock_init(&ring->lock);
ring              381 drivers/net/ethernet/pasemi/pasemi_mac.c 	ring->size = RX_RING_SIZE;
ring              382 drivers/net/ethernet/pasemi/pasemi_mac.c 	ring->ring_info = kcalloc(RX_RING_SIZE,
ring              386 drivers/net/ethernet/pasemi/pasemi_mac.c 	if (!ring->ring_info)
ring              390 drivers/net/ethernet/pasemi/pasemi_mac.c 	if (pasemi_dma_alloc_ring(&ring->chan, RX_RING_SIZE))
ring              393 drivers/net/ethernet/pasemi/pasemi_mac.c 	ring->buffers = dma_alloc_coherent(&mac->dma_pdev->dev,
ring              395 drivers/net/ethernet/pasemi/pasemi_mac.c 					   &ring->buf_dma, GFP_KERNEL);
ring              396 drivers/net/ethernet/pasemi/pasemi_mac.c 	if (!ring->buffers)
ring              400 drivers/net/ethernet/pasemi/pasemi_mac.c 		      PAS_DMA_RXCHAN_BASEL_BRBL(ring->chan.ring_dma));
ring              403 drivers/net/ethernet/pasemi/pasemi_mac.c 		      PAS_DMA_RXCHAN_BASEU_BRBH(ring->chan.ring_dma >> 32) |
ring              414 drivers/net/ethernet/pasemi/pasemi_mac.c 		      PAS_DMA_RXINT_BASEL_BRBL(ring->buf_dma));
ring              417 drivers/net/ethernet/pasemi/pasemi_mac.c 		      PAS_DMA_RXINT_BASEU_BRBH(ring->buf_dma >> 32) |
ring              429 drivers/net/ethernet/pasemi/pasemi_mac.c 	ring->next_to_fill = 0;
ring              430 drivers/net/ethernet/pasemi/pasemi_mac.c 	ring->next_to_clean = 0;
ring              431 drivers/net/ethernet/pasemi/pasemi_mac.c 	ring->mac = mac;
ring              432 drivers/net/ethernet/pasemi/pasemi_mac.c 	mac->rx = ring;
ring              437 drivers/net/ethernet/pasemi/pasemi_mac.c 	kfree(ring->ring_info);
ring              439 drivers/net/ethernet/pasemi/pasemi_mac.c 	pasemi_dma_free_chan(&ring->chan);
ring              449 drivers/net/ethernet/pasemi/pasemi_mac.c 	struct pasemi_mac_txring *ring;
ring              453 drivers/net/ethernet/pasemi/pasemi_mac.c 	ring = pasemi_dma_alloc_chan(TXCHAN, sizeof(struct pasemi_mac_txring),
ring              456 drivers/net/ethernet/pasemi/pasemi_mac.c 	if (!ring) {
ring              461 drivers/net/ethernet/pasemi/pasemi_mac.c 	chno = ring->chan.chno;
ring              463 drivers/net/ethernet/pasemi/pasemi_mac.c 	spin_lock_init(&ring->lock);
ring              465 drivers/net/ethernet/pasemi/pasemi_mac.c 	ring->size = TX_RING_SIZE;
ring              466 drivers/net/ethernet/pasemi/pasemi_mac.c 	ring->ring_info = kcalloc(TX_RING_SIZE,
ring              469 drivers/net/ethernet/pasemi/pasemi_mac.c 	if (!ring->ring_info)
ring              473 drivers/net/ethernet/pasemi/pasemi_mac.c 	if (pasemi_dma_alloc_ring(&ring->chan, TX_RING_SIZE))
ring              477 drivers/net/ethernet/pasemi/pasemi_mac.c 		      PAS_DMA_TXCHAN_BASEL_BRBL(ring->chan.ring_dma));
ring              478 drivers/net/ethernet/pasemi/pasemi_mac.c 	val = PAS_DMA_TXCHAN_BASEU_BRBH(ring->chan.ring_dma >> 32);
ring              493 drivers/net/ethernet/pasemi/pasemi_mac.c 	ring->next_to_fill = 0;
ring              494 drivers/net/ethernet/pasemi/pasemi_mac.c 	ring->next_to_clean = 0;
ring              495 drivers/net/ethernet/pasemi/pasemi_mac.c 	ring->mac = mac;
ring              497 drivers/net/ethernet/pasemi/pasemi_mac.c 	return ring;
ring              500 drivers/net/ethernet/pasemi/pasemi_mac.c 	kfree(ring->ring_info);
ring              502 drivers/net/ethernet/pasemi/pasemi_mac.c 	pasemi_dma_free_chan(&ring->chan);
ring              101 drivers/net/ethernet/pasemi/pasemi_mac.h #define RING_USED(ring)	(((ring)->next_to_fill - (ring)->next_to_clean) \
ring              102 drivers/net/ethernet/pasemi/pasemi_mac.h 				& ((ring)->size - 1))
ring              103 drivers/net/ethernet/pasemi/pasemi_mac.h #define RING_AVAIL(ring)	((ring->size) - RING_USED(ring))
ring              441 drivers/net/ethernet/pensando/ionic/ionic_ethtool.c 				struct ethtool_ringparam *ring)
ring              445 drivers/net/ethernet/pensando/ionic/ionic_ethtool.c 	ring->tx_max_pending = IONIC_MAX_TXRX_DESC;
ring              446 drivers/net/ethernet/pensando/ionic/ionic_ethtool.c 	ring->tx_pending = lif->ntxq_descs;
ring              447 drivers/net/ethernet/pensando/ionic/ionic_ethtool.c 	ring->rx_max_pending = IONIC_MAX_TXRX_DESC;
ring              448 drivers/net/ethernet/pensando/ionic/ionic_ethtool.c 	ring->rx_pending = lif->nrxq_descs;
ring              452 drivers/net/ethernet/pensando/ionic/ionic_ethtool.c 			       struct ethtool_ringparam *ring)
ring              457 drivers/net/ethernet/pensando/ionic/ionic_ethtool.c 	if (ring->rx_mini_pending || ring->rx_jumbo_pending) {
ring              462 drivers/net/ethernet/pensando/ionic/ionic_ethtool.c 	if (!is_power_of_2(ring->tx_pending) ||
ring              463 drivers/net/ethernet/pensando/ionic/ionic_ethtool.c 	    !is_power_of_2(ring->rx_pending)) {
ring              469 drivers/net/ethernet/pensando/ionic/ionic_ethtool.c 	if (ring->tx_pending == lif->ntxq_descs &&
ring              470 drivers/net/ethernet/pensando/ionic/ionic_ethtool.c 	    ring->rx_pending == lif->nrxq_descs)
ring              480 drivers/net/ethernet/pensando/ionic/ionic_ethtool.c 	lif->ntxq_descs = ring->tx_pending;
ring              481 drivers/net/ethernet/pensando/ionic/ionic_ethtool.c 	lif->nrxq_descs = ring->rx_pending;
ring             2449 drivers/net/ethernet/pensando/ionic/ionic_if.h 	u8     ring;
ring              684 drivers/net/ethernet/qlogic/netxen/netxen_nic_ctx.c 	int ring;
ring              697 drivers/net/ethernet/qlogic/netxen/netxen_nic_ctx.c 	for (ring = 0; ring < adapter->max_rds_rings; ring++) {
ring              698 drivers/net/ethernet/qlogic/netxen/netxen_nic_ctx.c 		rds_ring = &recv_ctx->rds_rings[ring];
ring              700 drivers/net/ethernet/qlogic/netxen/netxen_nic_ctx.c 		hwctx->rcv_rings[ring].addr =
ring              702 drivers/net/ethernet/qlogic/netxen/netxen_nic_ctx.c 		hwctx->rcv_rings[ring].size =
ring              706 drivers/net/ethernet/qlogic/netxen/netxen_nic_ctx.c 	for (ring = 0; ring < adapter->max_sds_rings; ring++) {
ring              707 drivers/net/ethernet/qlogic/netxen/netxen_nic_ctx.c 		sds_ring = &recv_ctx->sds_rings[ring];
ring              709 drivers/net/ethernet/qlogic/netxen/netxen_nic_ctx.c 		if (ring == 0) {
ring              713 drivers/net/ethernet/qlogic/netxen/netxen_nic_ctx.c 		hwctx->sts_rings[ring].addr = cpu_to_le64(sds_ring->phys_addr);
ring              714 drivers/net/ethernet/qlogic/netxen/netxen_nic_ctx.c 		hwctx->sts_rings[ring].size = cpu_to_le32(sds_ring->num_desc);
ring              715 drivers/net/ethernet/qlogic/netxen/netxen_nic_ctx.c 		hwctx->sts_rings[ring].msi_index = cpu_to_le16(ring);
ring              735 drivers/net/ethernet/qlogic/netxen/netxen_nic_ctx.c 	int ring;
ring              777 drivers/net/ethernet/qlogic/netxen/netxen_nic_ctx.c 	for (ring = 0; ring < adapter->max_rds_rings; ring++) {
ring              778 drivers/net/ethernet/qlogic/netxen/netxen_nic_ctx.c 		rds_ring = &recv_ctx->rds_rings[ring];
ring              785 drivers/net/ethernet/qlogic/netxen/netxen_nic_ctx.c 				netdev->name, ring);
ring              794 drivers/net/ethernet/qlogic/netxen/netxen_nic_ctx.c 			recv_crb_registers[port].crb_rcv_producer[ring]);
ring              797 drivers/net/ethernet/qlogic/netxen/netxen_nic_ctx.c 	for (ring = 0; ring < adapter->max_sds_rings; ring++) {
ring              798 drivers/net/ethernet/qlogic/netxen/netxen_nic_ctx.c 		sds_ring = &recv_ctx->sds_rings[ring];
ring              806 drivers/net/ethernet/qlogic/netxen/netxen_nic_ctx.c 				netdev->name, ring);
ring              815 drivers/net/ethernet/qlogic/netxen/netxen_nic_ctx.c 				recv_crb_registers[port].crb_sts_consumer[ring]);
ring              819 drivers/net/ethernet/qlogic/netxen/netxen_nic_ctx.c 				recv_crb_registers[port].sw_int_mask[ring]);
ring              853 drivers/net/ethernet/qlogic/netxen/netxen_nic_ctx.c 	int ring;
ring              893 drivers/net/ethernet/qlogic/netxen/netxen_nic_ctx.c 	for (ring = 0; ring < adapter->max_rds_rings; ring++) {
ring              894 drivers/net/ethernet/qlogic/netxen/netxen_nic_ctx.c 		rds_ring = &recv_ctx->rds_rings[ring];
ring              905 drivers/net/ethernet/qlogic/netxen/netxen_nic_ctx.c 	for (ring = 0; ring < adapter->max_sds_rings; ring++) {
ring              906 drivers/net/ethernet/qlogic/netxen/netxen_nic_ctx.c 		sds_ring = &recv_ctx->sds_rings[ring];
ring              290 drivers/net/ethernet/qlogic/netxen/netxen_nic_ethtool.c 	int ring, i = 0;
ring              346 drivers/net/ethernet/qlogic/netxen/netxen_nic_ethtool.c 	for (ring = 0; ring < adapter->max_sds_rings; ring++) {
ring              347 drivers/net/ethernet/qlogic/netxen/netxen_nic_ethtool.c 		sds_ring = &(recv_ctx->sds_rings[ring]);
ring              395 drivers/net/ethernet/qlogic/netxen/netxen_nic_ethtool.c 		struct ethtool_ringparam *ring)
ring              399 drivers/net/ethernet/qlogic/netxen/netxen_nic_ethtool.c 	ring->rx_pending = adapter->num_rxd;
ring              400 drivers/net/ethernet/qlogic/netxen/netxen_nic_ethtool.c 	ring->rx_jumbo_pending = adapter->num_jumbo_rxd;
ring              401 drivers/net/ethernet/qlogic/netxen/netxen_nic_ethtool.c 	ring->rx_jumbo_pending += adapter->num_lro_rxd;
ring              402 drivers/net/ethernet/qlogic/netxen/netxen_nic_ethtool.c 	ring->tx_pending = adapter->num_txd;
ring              405 drivers/net/ethernet/qlogic/netxen/netxen_nic_ethtool.c 		ring->rx_max_pending = MAX_RCV_DESCRIPTORS_1G;
ring              406 drivers/net/ethernet/qlogic/netxen/netxen_nic_ethtool.c 		ring->rx_jumbo_max_pending = MAX_JUMBO_RCV_DESCRIPTORS_1G;
ring              408 drivers/net/ethernet/qlogic/netxen/netxen_nic_ethtool.c 		ring->rx_max_pending = MAX_RCV_DESCRIPTORS_10G;
ring              409 drivers/net/ethernet/qlogic/netxen/netxen_nic_ethtool.c 		ring->rx_jumbo_max_pending = MAX_JUMBO_RCV_DESCRIPTORS_10G;
ring              412 drivers/net/ethernet/qlogic/netxen/netxen_nic_ethtool.c 	ring->tx_max_pending = MAX_CMD_DESCRIPTORS;
ring              433 drivers/net/ethernet/qlogic/netxen/netxen_nic_ethtool.c 		struct ethtool_ringparam *ring)
ring              443 drivers/net/ethernet/qlogic/netxen/netxen_nic_ethtool.c 	if (ring->rx_mini_pending)
ring              451 drivers/net/ethernet/qlogic/netxen/netxen_nic_ethtool.c 	num_rxd = netxen_validate_ringparam(ring->rx_pending,
ring              454 drivers/net/ethernet/qlogic/netxen/netxen_nic_ethtool.c 	num_jumbo_rxd = netxen_validate_ringparam(ring->rx_jumbo_pending,
ring              457 drivers/net/ethernet/qlogic/netxen/netxen_nic_ethtool.c 	num_txd = netxen_validate_ringparam(ring->tx_pending,
ring               96 drivers/net/ethernet/qlogic/netxen/netxen_nic_init.c 	int i, ring;
ring               99 drivers/net/ethernet/qlogic/netxen/netxen_nic_init.c 	for (ring = 0; ring < adapter->max_rds_rings; ring++) {
ring              100 drivers/net/ethernet/qlogic/netxen/netxen_nic_init.c 		rds_ring = &recv_ctx->rds_rings[ring];
ring              154 drivers/net/ethernet/qlogic/netxen/netxen_nic_init.c 	int ring;
ring              161 drivers/net/ethernet/qlogic/netxen/netxen_nic_init.c 	for (ring = 0; ring < adapter->max_rds_rings; ring++) {
ring              162 drivers/net/ethernet/qlogic/netxen/netxen_nic_init.c 		rds_ring = &recv_ctx->rds_rings[ring];
ring              185 drivers/net/ethernet/qlogic/netxen/netxen_nic_init.c 	int ring, i;
ring              214 drivers/net/ethernet/qlogic/netxen/netxen_nic_init.c 	for (ring = 0; ring < adapter->max_rds_rings; ring++) {
ring              215 drivers/net/ethernet/qlogic/netxen/netxen_nic_init.c 		rds_ring = &recv_ctx->rds_rings[ring];
ring              216 drivers/net/ethernet/qlogic/netxen/netxen_nic_init.c 		switch (ring) {
ring              280 drivers/net/ethernet/qlogic/netxen/netxen_nic_init.c 	for (ring = 0; ring < adapter->max_sds_rings; ring++) {
ring              281 drivers/net/ethernet/qlogic/netxen/netxen_nic_init.c 		sds_ring = &recv_ctx->sds_rings[ring];
ring              282 drivers/net/ethernet/qlogic/netxen/netxen_nic_init.c 		sds_ring->irq = adapter->msix_entries[ring].vector;
ring             1517 drivers/net/ethernet/qlogic/netxen/netxen_nic_init.c 		int ring, u64 sts_data0)
ring             1526 drivers/net/ethernet/qlogic/netxen/netxen_nic_init.c 	if (unlikely(ring >= adapter->max_rds_rings))
ring             1529 drivers/net/ethernet/qlogic/netxen/netxen_nic_init.c 	rds_ring = &recv_ctx->rds_rings[ring];
ring             1571 drivers/net/ethernet/qlogic/netxen/netxen_nic_init.c 		int ring, u64 sts_data0, u64 sts_data1)
ring             1587 drivers/net/ethernet/qlogic/netxen/netxen_nic_init.c 	if (unlikely(ring >= adapter->max_rds_rings))
ring             1590 drivers/net/ethernet/qlogic/netxen/netxen_nic_init.c 	rds_ring = &recv_ctx->rds_rings[ring];
ring             1660 drivers/net/ethernet/qlogic/netxen/netxen_nic_init.c 	int opcode, ring = 0, desc_cnt;
ring             1677 drivers/net/ethernet/qlogic/netxen/netxen_nic_init.c 			ring = netxen_get_sts_type(sts_data0);
ring             1679 drivers/net/ethernet/qlogic/netxen/netxen_nic_init.c 					ring, sts_data0);
ring             1682 drivers/net/ethernet/qlogic/netxen/netxen_nic_init.c 			ring = netxen_get_lro_sts_type(sts_data0);
ring             1685 drivers/net/ethernet/qlogic/netxen/netxen_nic_init.c 					ring, sts_data0, sts_data1);
ring             1696 drivers/net/ethernet/qlogic/netxen/netxen_nic_init.c 			list_add_tail(&rxbuf->list, &sds_ring->free_list[ring]);
ring             1708 drivers/net/ethernet/qlogic/netxen/netxen_nic_init.c 	for (ring = 0; ring < adapter->max_rds_rings; ring++) {
ring             1710 drivers/net/ethernet/qlogic/netxen/netxen_nic_init.c 			&adapter->recv_ctx.rds_rings[ring];
ring             1712 drivers/net/ethernet/qlogic/netxen/netxen_nic_init.c 		if (!list_empty(&sds_ring->free_list[ring])) {
ring             1713 drivers/net/ethernet/qlogic/netxen/netxen_nic_init.c 			list_for_each(cur, &sds_ring->free_list[ring]) {
ring             1719 drivers/net/ethernet/qlogic/netxen/netxen_nic_init.c 			netxen_merge_rx_buffers(&sds_ring->free_list[ring],
ring              167 drivers/net/ethernet/qlogic/netxen/netxen_nic_main.c 	int ring;
ring              174 drivers/net/ethernet/qlogic/netxen/netxen_nic_main.c 	for (ring = 0; ring < adapter->max_sds_rings; ring++) {
ring              175 drivers/net/ethernet/qlogic/netxen/netxen_nic_main.c 		sds_ring = &recv_ctx->sds_rings[ring];
ring              186 drivers/net/ethernet/qlogic/netxen/netxen_nic_main.c 	int ring;
ring              190 drivers/net/ethernet/qlogic/netxen/netxen_nic_main.c 	for (ring = 0; ring < adapter->max_sds_rings; ring++) {
ring              191 drivers/net/ethernet/qlogic/netxen/netxen_nic_main.c 		sds_ring = &recv_ctx->sds_rings[ring];
ring              201 drivers/net/ethernet/qlogic/netxen/netxen_nic_main.c 	int ring;
ring              205 drivers/net/ethernet/qlogic/netxen/netxen_nic_main.c 	for (ring = 0; ring < adapter->max_sds_rings; ring++) {
ring              206 drivers/net/ethernet/qlogic/netxen/netxen_nic_main.c 		sds_ring = &recv_ctx->sds_rings[ring];
ring              215 drivers/net/ethernet/qlogic/netxen/netxen_nic_main.c 	int ring;
ring              219 drivers/net/ethernet/qlogic/netxen/netxen_nic_main.c 	for (ring = 0; ring < adapter->max_sds_rings; ring++) {
ring              220 drivers/net/ethernet/qlogic/netxen/netxen_nic_main.c 		sds_ring = &recv_ctx->sds_rings[ring];
ring             1041 drivers/net/ethernet/qlogic/netxen/netxen_nic_main.c 	int err, ring;
ring             1057 drivers/net/ethernet/qlogic/netxen/netxen_nic_main.c 	for (ring = 0; ring < adapter->max_sds_rings; ring++) {
ring             1058 drivers/net/ethernet/qlogic/netxen/netxen_nic_main.c 		sds_ring = &recv_ctx->sds_rings[ring];
ring             1059 drivers/net/ethernet/qlogic/netxen/netxen_nic_main.c 		sprintf(sds_ring->name, "%s[%d]", netdev->name, ring);
ring             1072 drivers/net/ethernet/qlogic/netxen/netxen_nic_main.c 	int ring;
ring             1077 drivers/net/ethernet/qlogic/netxen/netxen_nic_main.c 	for (ring = 0; ring < adapter->max_sds_rings; ring++) {
ring             1078 drivers/net/ethernet/qlogic/netxen/netxen_nic_main.c 		sds_ring = &recv_ctx->sds_rings[ring];
ring             1202 drivers/net/ethernet/qlogic/netxen/netxen_nic_main.c 	int err, ring;
ring             1253 drivers/net/ethernet/qlogic/netxen/netxen_nic_main.c 	for (ring = 0; ring < adapter->max_rds_rings; ring++) {
ring             1254 drivers/net/ethernet/qlogic/netxen/netxen_nic_main.c 		rds_ring = &adapter->recv_ctx.rds_rings[ring];
ring             1255 drivers/net/ethernet/qlogic/netxen/netxen_nic_main.c 		netxen_post_rx_buffers(adapter, ring, rds_ring);
ring             1955 drivers/net/ethernet/qlogic/qlcnic/qlcnic.h 					      int ring)
ring             1957 drivers/net/ethernet/qlogic/qlcnic/qlcnic.h 	return adapter->ahw->hw_ops->create_tx_ctx(adapter, ptr, ring);
ring             1315 drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.c 			      struct qlcnic_host_tx_ring *tx, int ring)
ring             1341 drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.c 			msix_vector = adapter->drv_sds_rings + ring;
ring             1382 drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.c 		intr_mask = ahw->intr_tbl[adapter->drv_sds_rings + ring].src;
ring             1400 drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.c 	u8 ring;
ring             1431 drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.c 	for (ring = 0; ring < adapter->max_rds_rings; ring++) {
ring             1432 drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.c 		rds_ring = &adapter->recv_ctx->rds_rings[ring];
ring             1433 drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.c 		qlcnic_post_rx_buffers(adapter, rds_ring, ring);
ring             1437 drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.c 		for (ring = 0; ring < adapter->drv_sds_rings; ring++) {
ring             1438 drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.c 			sds_ring = &adapter->recv_ctx->sds_rings[ring];
ring             1457 drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.c 	int ring;
ring             1461 drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.c 		for (ring = 0; ring < adapter->drv_sds_rings; ring++) {
ring             1462 drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.c 			sds_ring = &adapter->recv_ctx->sds_rings[ring];
ring              554 drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.h 				  u16 vlan, struct qlcnic_host_tx_ring *ring);
ring              415 drivers/net/ethernet/qlogic/qlcnic/qlcnic_ctx.c 				     int ring)
ring              465 drivers/net/ethernet/qlogic/qlcnic/qlcnic_ctx.c 		index = temp_nsds_rings + ring;
ring              501 drivers/net/ethernet/qlogic/qlcnic/qlcnic_ctx.c 			index = adapter->drv_sds_rings + ring;
ring              560 drivers/net/ethernet/qlogic/qlcnic/qlcnic_ctx.c 	int err, ring;
ring              571 drivers/net/ethernet/qlogic/qlcnic/qlcnic_ctx.c 	for (ring = 0; ring < adapter->drv_tx_rings; ring++) {
ring              572 drivers/net/ethernet/qlogic/qlcnic/qlcnic_ctx.c 		tx_ring = &adapter->tx_ring[ring];
ring              594 drivers/net/ethernet/qlogic/qlcnic/qlcnic_ctx.c 	for (ring = 0; ring < adapter->max_rds_rings; ring++) {
ring              595 drivers/net/ethernet/qlogic/qlcnic/qlcnic_ctx.c 		rds_ring = &recv_ctx->rds_rings[ring];
ring              607 drivers/net/ethernet/qlogic/qlcnic/qlcnic_ctx.c 	for (ring = 0; ring < adapter->drv_sds_rings; ring++) {
ring              608 drivers/net/ethernet/qlogic/qlcnic/qlcnic_ctx.c 		sds_ring = &recv_ctx->sds_rings[ring];
ring              629 drivers/net/ethernet/qlogic/qlcnic/qlcnic_ctx.c 	int i, err, ring;
ring              655 drivers/net/ethernet/qlogic/qlcnic/qlcnic_ctx.c 	for (ring = 0; ring < dev->drv_tx_rings; ring++) {
ring              657 drivers/net/ethernet/qlogic/qlcnic/qlcnic_ctx.c 						  &dev->tx_ring[ring],
ring              658 drivers/net/ethernet/qlogic/qlcnic/qlcnic_ctx.c 						  ring);
ring              661 drivers/net/ethernet/qlogic/qlcnic/qlcnic_ctx.c 			if (ring == 0)
ring              664 drivers/net/ethernet/qlogic/qlcnic/qlcnic_ctx.c 			for (i = 0; i < ring; i++)
ring              690 drivers/net/ethernet/qlogic/qlcnic/qlcnic_ctx.c 	int ring;
ring              694 drivers/net/ethernet/qlogic/qlcnic/qlcnic_ctx.c 		for (ring = 0; ring < adapter->drv_tx_rings; ring++)
ring              696 drivers/net/ethernet/qlogic/qlcnic/qlcnic_ctx.c 						 &adapter->tx_ring[ring]);
ring              720 drivers/net/ethernet/qlogic/qlcnic/qlcnic_ctx.c 	int ring;
ring              724 drivers/net/ethernet/qlogic/qlcnic/qlcnic_ctx.c 	for (ring = 0; ring < adapter->drv_tx_rings; ring++) {
ring              725 drivers/net/ethernet/qlogic/qlcnic/qlcnic_ctx.c 		tx_ring = &adapter->tx_ring[ring];
ring              743 drivers/net/ethernet/qlogic/qlcnic/qlcnic_ctx.c 	for (ring = 0; ring < adapter->max_rds_rings; ring++) {
ring              744 drivers/net/ethernet/qlogic/qlcnic/qlcnic_ctx.c 		rds_ring = &recv_ctx->rds_rings[ring];
ring              755 drivers/net/ethernet/qlogic/qlcnic/qlcnic_ctx.c 	for (ring = 0; ring < adapter->drv_sds_rings; ring++) {
ring              756 drivers/net/ethernet/qlogic/qlcnic/qlcnic_ctx.c 		sds_ring = &recv_ctx->sds_rings[ring];
ring              538 drivers/net/ethernet/qlogic/qlcnic/qlcnic_ethtool.c 	int ring, i = 0;
ring              563 drivers/net/ethernet/qlogic/qlcnic/qlcnic_ethtool.c 	for (ring = 0; ring < adapter->drv_tx_rings; ring++) {
ring              564 drivers/net/ethernet/qlogic/qlcnic/qlcnic_ethtool.c 		tx_ring = &adapter->tx_ring[ring];
ring              576 drivers/net/ethernet/qlogic/qlcnic/qlcnic_ethtool.c 	for (ring = 0; ring < adapter->max_rds_rings; ring++) {
ring              577 drivers/net/ethernet/qlogic/qlcnic/qlcnic_ethtool.c 		rds_rings = &recv_ctx->rds_rings[ring];
ring              583 drivers/net/ethernet/qlogic/qlcnic/qlcnic_ethtool.c 	for (ring = 0; ring < adapter->drv_sds_rings; ring++) {
ring              584 drivers/net/ethernet/qlogic/qlcnic/qlcnic_ethtool.c 		sds_ring = &(recv_ctx->sds_rings[ring]);
ring              636 drivers/net/ethernet/qlogic/qlcnic/qlcnic_ethtool.c 		struct ethtool_ringparam *ring)
ring              640 drivers/net/ethernet/qlogic/qlcnic/qlcnic_ethtool.c 	ring->rx_pending = adapter->num_rxd;
ring              641 drivers/net/ethernet/qlogic/qlcnic/qlcnic_ethtool.c 	ring->rx_jumbo_pending = adapter->num_jumbo_rxd;
ring              642 drivers/net/ethernet/qlogic/qlcnic/qlcnic_ethtool.c 	ring->tx_pending = adapter->num_txd;
ring              644 drivers/net/ethernet/qlogic/qlcnic/qlcnic_ethtool.c 	ring->rx_max_pending = adapter->max_rxd;
ring              645 drivers/net/ethernet/qlogic/qlcnic/qlcnic_ethtool.c 	ring->rx_jumbo_max_pending = adapter->max_jumbo_rxd;
ring              646 drivers/net/ethernet/qlogic/qlcnic/qlcnic_ethtool.c 	ring->tx_max_pending = MAX_CMD_DESCRIPTORS;
ring              667 drivers/net/ethernet/qlogic/qlcnic/qlcnic_ethtool.c 		struct ethtool_ringparam *ring)
ring              672 drivers/net/ethernet/qlogic/qlcnic/qlcnic_ethtool.c 	if (ring->rx_mini_pending)
ring              675 drivers/net/ethernet/qlogic/qlcnic/qlcnic_ethtool.c 	num_rxd = qlcnic_validate_ringparam(ring->rx_pending,
ring              678 drivers/net/ethernet/qlogic/qlcnic/qlcnic_ethtool.c 	num_jumbo_rxd = qlcnic_validate_ringparam(ring->rx_jumbo_pending,
ring              682 drivers/net/ethernet/qlogic/qlcnic/qlcnic_ethtool.c 	num_txd = qlcnic_validate_ringparam(ring->tx_pending,
ring             1308 drivers/net/ethernet/qlogic/qlcnic/qlcnic_ethtool.c 	int ring;
ring             1311 drivers/net/ethernet/qlogic/qlcnic/qlcnic_ethtool.c 	for (ring = 0; ring < adapter->drv_tx_rings; ring++) {
ring             1312 drivers/net/ethernet/qlogic/qlcnic/qlcnic_ethtool.c 		tx_ring = &adapter->tx_ring[ring];
ring             1349 drivers/net/ethernet/qlogic/qlcnic/qlcnic_ethtool.c 	int index, ret, length, size, ring;
ring             1354 drivers/net/ethernet/qlogic/qlcnic/qlcnic_ethtool.c 	for (ring = 0, index = 0; ring < adapter->drv_tx_rings; ring++) {
ring             1356 drivers/net/ethernet/qlogic/qlcnic/qlcnic_ethtool.c 			tx_ring = &adapter->tx_ring[ring];
ring               88 drivers/net/ethernet/qlogic/qlcnic/qlcnic_init.c 	int i, ring;
ring               91 drivers/net/ethernet/qlogic/qlcnic/qlcnic_init.c 	for (ring = 0; ring < adapter->max_rds_rings; ring++) {
ring               92 drivers/net/ethernet/qlogic/qlcnic/qlcnic_init.c 		rds_ring = &recv_ctx->rds_rings[ring];
ring              113 drivers/net/ethernet/qlogic/qlcnic/qlcnic_init.c 	int i, ring;
ring              116 drivers/net/ethernet/qlogic/qlcnic/qlcnic_init.c 	for (ring = 0; ring < adapter->max_rds_rings; ring++) {
ring              117 drivers/net/ethernet/qlogic/qlcnic/qlcnic_init.c 		rds_ring = &recv_ctx->rds_rings[ring];
ring              170 drivers/net/ethernet/qlogic/qlcnic/qlcnic_init.c 	int ring;
ring              177 drivers/net/ethernet/qlogic/qlcnic/qlcnic_init.c 	for (ring = 0; ring < adapter->max_rds_rings; ring++) {
ring              178 drivers/net/ethernet/qlogic/qlcnic/qlcnic_init.c 		rds_ring = &recv_ctx->rds_rings[ring];
ring              191 drivers/net/ethernet/qlogic/qlcnic/qlcnic_init.c 	int ring, i;
ring              202 drivers/net/ethernet/qlogic/qlcnic/qlcnic_init.c 	for (ring = 0; ring < adapter->max_rds_rings; ring++) {
ring              203 drivers/net/ethernet/qlogic/qlcnic/qlcnic_init.c 		rds_ring = &recv_ctx->rds_rings[ring];
ring              204 drivers/net/ethernet/qlogic/qlcnic/qlcnic_init.c 		switch (ring) {
ring              243 drivers/net/ethernet/qlogic/qlcnic/qlcnic_init.c 	for (ring = 0; ring < adapter->drv_sds_rings; ring++) {
ring              244 drivers/net/ethernet/qlogic/qlcnic/qlcnic_init.c 		sds_ring = &recv_ctx->sds_rings[ring];
ring              245 drivers/net/ethernet/qlogic/qlcnic/qlcnic_init.c 		sds_ring->irq = adapter->msix_entries[ring].vector;
ring              251 drivers/net/ethernet/qlogic/qlcnic/qlcnic_init.c 				sds_ring->tx_ring = &adapter->tx_ring[ring];
ring             1139 drivers/net/ethernet/qlogic/qlcnic/qlcnic_io.c 					    struct qlcnic_host_rds_ring *ring,
ring             1145 drivers/net/ethernet/qlogic/qlcnic/qlcnic_io.c 	buffer = &ring->rx_buf_arr[index];
ring             1151 drivers/net/ethernet/qlogic/qlcnic/qlcnic_io.c 	pci_unmap_single(adapter->pdev, buffer->dma, ring->dma_size,
ring             1195 drivers/net/ethernet/qlogic/qlcnic/qlcnic_io.c 		   struct qlcnic_host_sds_ring *sds_ring, int ring,
ring             1206 drivers/net/ethernet/qlogic/qlcnic/qlcnic_io.c 	if (unlikely(ring >= adapter->max_rds_rings))
ring             1209 drivers/net/ethernet/qlogic/qlcnic/qlcnic_io.c 	rds_ring = &recv_ctx->rds_rings[ring];
ring             1263 drivers/net/ethernet/qlogic/qlcnic/qlcnic_io.c 		   int ring, u64 sts_data0, u64 sts_data1)
ring             1278 drivers/net/ethernet/qlogic/qlcnic/qlcnic_io.c 	if (unlikely(ring >= adapter->max_rds_rings))
ring             1281 drivers/net/ethernet/qlogic/qlcnic/qlcnic_io.c 	rds_ring = &recv_ctx->rds_rings[ring];
ring             1366 drivers/net/ethernet/qlogic/qlcnic/qlcnic_io.c 	u8 ring;
ring             1382 drivers/net/ethernet/qlogic/qlcnic/qlcnic_io.c 			ring = qlcnic_get_sts_type(sts_data0);
ring             1383 drivers/net/ethernet/qlogic/qlcnic/qlcnic_io.c 			rxbuf = qlcnic_process_rcv(adapter, sds_ring, ring,
ring             1387 drivers/net/ethernet/qlogic/qlcnic/qlcnic_io.c 			ring = qlcnic_get_lro_sts_type(sts_data0);
ring             1389 drivers/net/ethernet/qlogic/qlcnic/qlcnic_io.c 			rxbuf = qlcnic_process_lro(adapter, ring, sts_data0,
ring             1400 drivers/net/ethernet/qlogic/qlcnic/qlcnic_io.c 			list_add_tail(&rxbuf->list, &sds_ring->free_list[ring]);
ring             1412 drivers/net/ethernet/qlogic/qlcnic/qlcnic_io.c 	for (ring = 0; ring < adapter->max_rds_rings; ring++) {
ring             1413 drivers/net/ethernet/qlogic/qlcnic/qlcnic_io.c 		rds_ring = &adapter->recv_ctx->rds_rings[ring];
ring             1414 drivers/net/ethernet/qlogic/qlcnic/qlcnic_io.c 		if (!list_empty(&sds_ring->free_list[ring])) {
ring             1415 drivers/net/ethernet/qlogic/qlcnic/qlcnic_io.c 			list_for_each(cur, &sds_ring->free_list[ring]) {
ring             1421 drivers/net/ethernet/qlogic/qlcnic/qlcnic_io.c 			list_splice_tail_init(&sds_ring->free_list[ring],
ring             1426 drivers/net/ethernet/qlogic/qlcnic/qlcnic_io.c 		qlcnic_post_rx_buffers_nodb(adapter, rds_ring, ring);
ring             1491 drivers/net/ethernet/qlogic/qlcnic/qlcnic_io.c static void qlcnic_process_rcv_diag(struct qlcnic_adapter *adapter, int ring,
ring             1499 drivers/net/ethernet/qlogic/qlcnic/qlcnic_io.c 	if (unlikely(ring >= adapter->max_rds_rings))
ring             1502 drivers/net/ethernet/qlogic/qlcnic/qlcnic_io.c 	rds_ring = &recv_ctx->rds_rings[ring];
ring             1541 drivers/net/ethernet/qlogic/qlcnic/qlcnic_io.c 	int ring, opcode, desc_cnt;
ring             1558 drivers/net/ethernet/qlogic/qlcnic/qlcnic_io.c 		ring = qlcnic_get_sts_type(sts_data0);
ring             1559 drivers/net/ethernet/qlogic/qlcnic/qlcnic_io.c 		qlcnic_process_rcv_diag(adapter, ring, sts_data0);
ring             1576 drivers/net/ethernet/qlogic/qlcnic/qlcnic_io.c 	int ring;
ring             1584 drivers/net/ethernet/qlogic/qlcnic/qlcnic_io.c 	for (ring = 0; ring < adapter->drv_sds_rings; ring++) {
ring             1585 drivers/net/ethernet/qlogic/qlcnic/qlcnic_io.c 		sds_ring = &recv_ctx->sds_rings[ring];
ring             1591 drivers/net/ethernet/qlogic/qlcnic/qlcnic_io.c 			if (ring == (adapter->drv_sds_rings - 1))
ring             1608 drivers/net/ethernet/qlogic/qlcnic/qlcnic_io.c 		for (ring = 0; ring < adapter->drv_tx_rings; ring++) {
ring             1609 drivers/net/ethernet/qlogic/qlcnic/qlcnic_io.c 			tx_ring = &adapter->tx_ring[ring];
ring             1620 drivers/net/ethernet/qlogic/qlcnic/qlcnic_io.c 	int ring;
ring             1625 drivers/net/ethernet/qlogic/qlcnic/qlcnic_io.c 	for (ring = 0; ring < adapter->drv_sds_rings; ring++) {
ring             1626 drivers/net/ethernet/qlogic/qlcnic/qlcnic_io.c 		sds_ring = &recv_ctx->sds_rings[ring];
ring             1633 drivers/net/ethernet/qlogic/qlcnic/qlcnic_io.c 		for (ring = 0; ring < adapter->drv_tx_rings; ring++) {
ring             1634 drivers/net/ethernet/qlogic/qlcnic/qlcnic_io.c 			tx_ring = &adapter->tx_ring[ring];
ring             1644 drivers/net/ethernet/qlogic/qlcnic/qlcnic_io.c 	int ring;
ring             1652 drivers/net/ethernet/qlogic/qlcnic/qlcnic_io.c 	for (ring = 0; ring < adapter->drv_sds_rings; ring++) {
ring             1653 drivers/net/ethernet/qlogic/qlcnic/qlcnic_io.c 		sds_ring = &recv_ctx->sds_rings[ring];
ring             1661 drivers/net/ethernet/qlogic/qlcnic/qlcnic_io.c 		for (ring = 0; ring < adapter->drv_tx_rings; ring++) {
ring             1662 drivers/net/ethernet/qlogic/qlcnic/qlcnic_io.c 			tx_ring = &adapter->tx_ring[ring];
ring             1671 drivers/net/ethernet/qlogic/qlcnic/qlcnic_io.c 	int ring;
ring             1679 drivers/net/ethernet/qlogic/qlcnic/qlcnic_io.c 	for (ring = 0; ring < adapter->drv_sds_rings; ring++) {
ring             1680 drivers/net/ethernet/qlogic/qlcnic/qlcnic_io.c 		sds_ring = &recv_ctx->sds_rings[ring];
ring             1689 drivers/net/ethernet/qlogic/qlcnic/qlcnic_io.c 		for (ring = 0; ring < adapter->drv_tx_rings; ring++) {
ring             1690 drivers/net/ethernet/qlogic/qlcnic/qlcnic_io.c 			tx_ring = &adapter->tx_ring[ring];
ring             1719 drivers/net/ethernet/qlogic/qlcnic/qlcnic_io.c 			u8 ring, u64 sts_data[])
ring             1730 drivers/net/ethernet/qlogic/qlcnic/qlcnic_io.c 	if (unlikely(ring >= adapter->max_rds_rings))
ring             1733 drivers/net/ethernet/qlogic/qlcnic/qlcnic_io.c 	rds_ring = &recv_ctx->rds_rings[ring];
ring             1785 drivers/net/ethernet/qlogic/qlcnic/qlcnic_io.c 			u8 ring, u64 sts_data[])
ring             1802 drivers/net/ethernet/qlogic/qlcnic/qlcnic_io.c 	if (unlikely(ring >= adapter->max_rds_rings))
ring             1805 drivers/net/ethernet/qlogic/qlcnic/qlcnic_io.c 	rds_ring = &recv_ctx->rds_rings[ring];
ring             1888 drivers/net/ethernet/qlogic/qlcnic/qlcnic_io.c 	u8 ring;
ring             1900 drivers/net/ethernet/qlogic/qlcnic/qlcnic_io.c 		ring = QLCNIC_FETCH_RING_ID(sts_data[0]);
ring             1905 drivers/net/ethernet/qlogic/qlcnic/qlcnic_io.c 							ring, sts_data);
ring             1908 drivers/net/ethernet/qlogic/qlcnic/qlcnic_io.c 			rxbuf = qlcnic_83xx_process_lro(adapter, ring,
ring             1918 drivers/net/ethernet/qlogic/qlcnic/qlcnic_io.c 			list_add_tail(&rxbuf->list, &sds_ring->free_list[ring]);
ring             1928 drivers/net/ethernet/qlogic/qlcnic/qlcnic_io.c 	for (ring = 0; ring < adapter->max_rds_rings; ring++) {
ring             1929 drivers/net/ethernet/qlogic/qlcnic/qlcnic_io.c 		rds_ring = &adapter->recv_ctx->rds_rings[ring];
ring             1930 drivers/net/ethernet/qlogic/qlcnic/qlcnic_io.c 		if (!list_empty(&sds_ring->free_list[ring])) {
ring             1931 drivers/net/ethernet/qlogic/qlcnic/qlcnic_io.c 			list_for_each(cur, &sds_ring->free_list[ring]) {
ring             1937 drivers/net/ethernet/qlogic/qlcnic/qlcnic_io.c 			list_splice_tail_init(&sds_ring->free_list[ring],
ring             1941 drivers/net/ethernet/qlogic/qlcnic/qlcnic_io.c 		qlcnic_post_rx_buffers_nodb(adapter, rds_ring, ring);
ring             2047 drivers/net/ethernet/qlogic/qlcnic/qlcnic_io.c 	int ring;
ring             2055 drivers/net/ethernet/qlogic/qlcnic/qlcnic_io.c 	for (ring = 0; ring < adapter->drv_sds_rings; ring++) {
ring             2056 drivers/net/ethernet/qlogic/qlcnic/qlcnic_io.c 		sds_ring = &recv_ctx->sds_rings[ring];
ring             2064 drivers/net/ethernet/qlogic/qlcnic/qlcnic_io.c 		for (ring = 0; ring < adapter->drv_tx_rings; ring++) {
ring             2065 drivers/net/ethernet/qlogic/qlcnic/qlcnic_io.c 			tx_ring = &adapter->tx_ring[ring];
ring             2074 drivers/net/ethernet/qlogic/qlcnic/qlcnic_io.c 	int ring;
ring             2082 drivers/net/ethernet/qlogic/qlcnic/qlcnic_io.c 	for (ring = 0; ring < adapter->drv_sds_rings; ring++) {
ring             2083 drivers/net/ethernet/qlogic/qlcnic/qlcnic_io.c 		sds_ring = &recv_ctx->sds_rings[ring];
ring             2092 drivers/net/ethernet/qlogic/qlcnic/qlcnic_io.c 		for (ring = 0; ring < adapter->drv_tx_rings; ring++) {
ring             2093 drivers/net/ethernet/qlogic/qlcnic/qlcnic_io.c 			tx_ring = &adapter->tx_ring[ring];
ring             2104 drivers/net/ethernet/qlogic/qlcnic/qlcnic_io.c 	int ring;
ring             2112 drivers/net/ethernet/qlogic/qlcnic/qlcnic_io.c 	for (ring = 0; ring < adapter->drv_sds_rings; ring++) {
ring             2113 drivers/net/ethernet/qlogic/qlcnic/qlcnic_io.c 		sds_ring = &recv_ctx->sds_rings[ring];
ring             2138 drivers/net/ethernet/qlogic/qlcnic/qlcnic_io.c 		for (ring = 0; ring < adapter->drv_tx_rings; ring++) {
ring             2139 drivers/net/ethernet/qlogic/qlcnic/qlcnic_io.c 			tx_ring = &adapter->tx_ring[ring];
ring             2151 drivers/net/ethernet/qlogic/qlcnic/qlcnic_io.c 	int ring;
ring             2156 drivers/net/ethernet/qlogic/qlcnic/qlcnic_io.c 	for (ring = 0; ring < adapter->drv_sds_rings; ring++) {
ring             2157 drivers/net/ethernet/qlogic/qlcnic/qlcnic_io.c 		sds_ring = &recv_ctx->sds_rings[ring];
ring             2165 drivers/net/ethernet/qlogic/qlcnic/qlcnic_io.c 		for (ring = 0; ring < adapter->drv_tx_rings; ring++) {
ring             2166 drivers/net/ethernet/qlogic/qlcnic/qlcnic_io.c 			tx_ring = &adapter->tx_ring[ring];
ring             2175 drivers/net/ethernet/qlogic/qlcnic/qlcnic_io.c 					 int ring, u64 sts_data[])
ring             2182 drivers/net/ethernet/qlogic/qlcnic/qlcnic_io.c 	if (unlikely(ring >= adapter->max_rds_rings))
ring             2185 drivers/net/ethernet/qlogic/qlcnic/qlcnic_io.c 	rds_ring = &recv_ctx->rds_rings[ring];
ring             2215 drivers/net/ethernet/qlogic/qlcnic/qlcnic_io.c 	int ring, opcode;
ring             2225 drivers/net/ethernet/qlogic/qlcnic/qlcnic_io.c 	ring = QLCNIC_FETCH_RING_ID(sts_data[0]);
ring             2226 drivers/net/ethernet/qlogic/qlcnic/qlcnic_io.c 	qlcnic_83xx_process_rcv_diag(adapter, ring, sts_data);
ring             1724 drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c 	int err, ring, num_sds_rings;
ring             1758 drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c 			for (ring = 0; ring < num_sds_rings; ring++) {
ring             1759 drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c 				sds_ring = &recv_ctx->sds_rings[ring];
ring             1762 drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c 				    (ring == (num_sds_rings - 1))) {
ring             1772 drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c 							 netdev->name, ring);
ring             1777 drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c 						 netdev->name, ring);
ring             1791 drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c 			for (ring = 0; ring < adapter->drv_tx_rings;
ring             1792 drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c 			     ring++) {
ring             1793 drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c 				tx_ring = &adapter->tx_ring[ring];
ring             1795 drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c 					 "%s-tx-%d", netdev->name, ring);
ring             1809 drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c 	int ring;
ring             1819 drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c 			for (ring = 0; ring < adapter->drv_sds_rings; ring++) {
ring             1820 drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c 				sds_ring = &recv_ctx->sds_rings[ring];
ring             1828 drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c 			for (ring = 0; ring < adapter->drv_tx_rings;
ring             1829 drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c 			     ring++) {
ring             1830 drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c 				tx_ring = &adapter->tx_ring[ring];
ring             1882 drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c 	int ring;
ring             1899 drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c 	for (ring = 0; ring < adapter->max_rds_rings; ring++) {
ring             1900 drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c 		rds_ring = &adapter->recv_ctx->rds_rings[ring];
ring             1901 drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c 		qlcnic_post_rx_buffers(adapter, rds_ring, ring);
ring             1941 drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c 	int ring;
ring             1970 drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c 	for (ring = 0; ring < adapter->drv_tx_rings; ring++)
ring             1971 drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c 		qlcnic_release_tx_buffers(adapter, &adapter->tx_ring[ring]);
ring             2055 drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c 	int ring;
ring             2059 drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c 		for (ring = 0; ring < adapter->drv_sds_rings; ring++) {
ring             2060 drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c 			sds_ring = &adapter->recv_ctx->sds_rings[ring];
ring             2139 drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c 	int ring;
ring             2166 drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c 	for (ring = 0; ring < adapter->max_rds_rings; ring++) {
ring             2167 drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c 		rds_ring = &adapter->recv_ctx->rds_rings[ring];
ring             2168 drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c 		qlcnic_post_rx_buffers(adapter, rds_ring, ring);
ring             2172 drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c 		for (ring = 0; ring < adapter->drv_sds_rings; ring++) {
ring             2173 drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c 			sds_ring = &adapter->recv_ctx->sds_rings[ring];
ring             2383 drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c 	int ring;
ring             2386 drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c 	for (ring = 0; ring < adapter->drv_tx_rings; ring++) {
ring             2387 drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c 		tx_ring = &adapter->tx_ring[ring];
ring             2399 drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c 	int ring, vector, index;
ring             2410 drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c 	for (ring = 0; ring < adapter->drv_tx_rings; ring++) {
ring             2411 drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c 		tx_ring = &adapter->tx_ring[ring];
ring             2413 drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c 		tx_ring->txq = netdev_get_tx_queue(netdev, ring);
ring             2425 drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c 		for (ring = 0; ring < adapter->drv_tx_rings; ring++) {
ring             2426 drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c 			tx_ring = &adapter->tx_ring[ring];
ring             2429 drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c 				index = adapter->drv_sds_rings + ring;
ring             3013 drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c 	int ring;
ring             3018 drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c 	for (ring = 0; ring < adapter->max_rds_rings; ring++) {
ring             3019 drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c 		rds_ring = &recv_ctx->rds_rings[ring];
ring             3024 drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c 			     ring, readl(rds_ring->crb_rcv_producer),
ring             3028 drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c 	for (ring = 0; ring < adapter->drv_sds_rings; ring++) {
ring             3029 drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c 		sds_ring = &(recv_ctx->sds_rings[ring]);
ring             3034 drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c 			    ring, readl(sds_ring->crb_sts_consumer),
ring             3039 drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c 	for (ring = 0; ring < adapter->drv_tx_rings; ring++) {
ring             3040 drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c 		tx_ring = &adapter->tx_ring[ring];
ring             3044 drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c 			    ring, tx_ring->ctx_id);
ring              136 drivers/net/ethernet/qualcomm/emac/emac-ethtool.c 			       struct ethtool_ringparam *ring)
ring              140 drivers/net/ethernet/qualcomm/emac/emac-ethtool.c 	ring->rx_max_pending = EMAC_MAX_RX_DESCS;
ring              141 drivers/net/ethernet/qualcomm/emac/emac-ethtool.c 	ring->tx_max_pending = EMAC_MAX_TX_DESCS;
ring              142 drivers/net/ethernet/qualcomm/emac/emac-ethtool.c 	ring->rx_pending = adpt->rx_desc_cnt;
ring              143 drivers/net/ethernet/qualcomm/emac/emac-ethtool.c 	ring->tx_pending = adpt->tx_desc_cnt;
ring              147 drivers/net/ethernet/qualcomm/emac/emac-ethtool.c 			      struct ethtool_ringparam *ring)
ring              154 drivers/net/ethernet/qualcomm/emac/emac-ethtool.c 	if (ring->rx_mini_pending || ring->rx_jumbo_pending)
ring              158 drivers/net/ethernet/qualcomm/emac/emac-ethtool.c 		clamp_val(ring->tx_pending, EMAC_MIN_TX_DESCS, EMAC_MAX_TX_DESCS);
ring              161 drivers/net/ethernet/qualcomm/emac/emac-ethtool.c 		clamp_val(ring->rx_pending, EMAC_MIN_RX_DESCS, EMAC_MAX_RX_DESCS);
ring              248 drivers/net/ethernet/qualcomm/qca_debug.c qcaspi_get_ringparam(struct net_device *dev, struct ethtool_ringparam *ring)
ring              252 drivers/net/ethernet/qualcomm/qca_debug.c 	ring->rx_max_pending = 4;
ring              253 drivers/net/ethernet/qualcomm/qca_debug.c 	ring->tx_max_pending = TX_RING_MAX_LEN;
ring              254 drivers/net/ethernet/qualcomm/qca_debug.c 	ring->rx_pending = 4;
ring              255 drivers/net/ethernet/qualcomm/qca_debug.c 	ring->tx_pending = qca->txr.count;
ring              259 drivers/net/ethernet/qualcomm/qca_debug.c qcaspi_set_ringparam(struct net_device *dev, struct ethtool_ringparam *ring)
ring              264 drivers/net/ethernet/qualcomm/qca_debug.c 	if ((ring->rx_pending) ||
ring              265 drivers/net/ethernet/qualcomm/qca_debug.c 	    (ring->rx_mini_pending) ||
ring              266 drivers/net/ethernet/qualcomm/qca_debug.c 	    (ring->rx_jumbo_pending))
ring              272 drivers/net/ethernet/qualcomm/qca_debug.c 	qca->txr.count = max_t(u32, ring->tx_pending, TX_RING_MIN_LEN);
ring             1391 drivers/net/ethernet/realtek/8139cp.c 				struct ethtool_ringparam *ring)
ring             1393 drivers/net/ethernet/realtek/8139cp.c 	ring->rx_max_pending = CP_RX_RING_SIZE;
ring             1394 drivers/net/ethernet/realtek/8139cp.c 	ring->tx_max_pending = CP_TX_RING_SIZE;
ring             1395 drivers/net/ethernet/realtek/8139cp.c 	ring->rx_pending = CP_RX_RING_SIZE;
ring             1396 drivers/net/ethernet/realtek/8139cp.c 	ring->tx_pending = CP_TX_RING_SIZE;
ring             1915 drivers/net/ethernet/realtek/8139too.c static inline void wrap_copy(struct sk_buff *skb, const unsigned char *ring,
ring             1921 drivers/net/ethernet/realtek/8139too.c 		skb_copy_to_linear_data(skb, ring + offset, left);
ring             1922 drivers/net/ethernet/realtek/8139too.c 		skb_copy_to_linear_data_offset(skb, left, ring, size - left);
ring             1924 drivers/net/ethernet/realtek/8139too.c 		skb_copy_to_linear_data(skb, ring + offset, size);
ring             1193 drivers/net/ethernet/renesas/ravb_main.c 			       struct ethtool_ringparam *ring)
ring             1197 drivers/net/ethernet/renesas/ravb_main.c 	ring->rx_max_pending = BE_RX_RING_MAX;
ring             1198 drivers/net/ethernet/renesas/ravb_main.c 	ring->tx_max_pending = BE_TX_RING_MAX;
ring             1199 drivers/net/ethernet/renesas/ravb_main.c 	ring->rx_pending = priv->num_rx_ring[RAVB_BE];
ring             1200 drivers/net/ethernet/renesas/ravb_main.c 	ring->tx_pending = priv->num_tx_ring[RAVB_BE];
ring             1204 drivers/net/ethernet/renesas/ravb_main.c 			      struct ethtool_ringparam *ring)
ring             1209 drivers/net/ethernet/renesas/ravb_main.c 	if (ring->tx_pending > BE_TX_RING_MAX ||
ring             1210 drivers/net/ethernet/renesas/ravb_main.c 	    ring->rx_pending > BE_RX_RING_MAX ||
ring             1211 drivers/net/ethernet/renesas/ravb_main.c 	    ring->tx_pending < BE_TX_RING_MIN ||
ring             1212 drivers/net/ethernet/renesas/ravb_main.c 	    ring->rx_pending < BE_RX_RING_MIN)
ring             1214 drivers/net/ethernet/renesas/ravb_main.c 	if (ring->rx_mini_pending || ring->rx_jumbo_pending)
ring             1237 drivers/net/ethernet/renesas/ravb_main.c 	priv->num_rx_ring[RAVB_BE] = ring->rx_pending;
ring             1238 drivers/net/ethernet/renesas/ravb_main.c 	priv->num_tx_ring[RAVB_BE] = ring->tx_pending;
ring             2325 drivers/net/ethernet/renesas/sh_eth.c 				 struct ethtool_ringparam *ring)
ring             2329 drivers/net/ethernet/renesas/sh_eth.c 	ring->rx_max_pending = RX_RING_MAX;
ring             2330 drivers/net/ethernet/renesas/sh_eth.c 	ring->tx_max_pending = TX_RING_MAX;
ring             2331 drivers/net/ethernet/renesas/sh_eth.c 	ring->rx_pending = mdp->num_rx_ring;
ring             2332 drivers/net/ethernet/renesas/sh_eth.c 	ring->tx_pending = mdp->num_tx_ring;
ring             2336 drivers/net/ethernet/renesas/sh_eth.c 				struct ethtool_ringparam *ring)
ring             2341 drivers/net/ethernet/renesas/sh_eth.c 	if (ring->tx_pending > TX_RING_MAX ||
ring             2342 drivers/net/ethernet/renesas/sh_eth.c 	    ring->rx_pending > RX_RING_MAX ||
ring             2343 drivers/net/ethernet/renesas/sh_eth.c 	    ring->tx_pending < TX_RING_MIN ||
ring             2344 drivers/net/ethernet/renesas/sh_eth.c 	    ring->rx_pending < RX_RING_MIN)
ring             2346 drivers/net/ethernet/renesas/sh_eth.c 	if (ring->rx_mini_pending || ring->rx_jumbo_pending)
ring             2370 drivers/net/ethernet/renesas/sh_eth.c 	mdp->num_rx_ring = ring->rx_pending;
ring             2371 drivers/net/ethernet/renesas/sh_eth.c 	mdp->num_tx_ring = ring->tx_pending;
ring              662 drivers/net/ethernet/sfc/ethtool.c 				      struct ethtool_ringparam *ring)
ring              666 drivers/net/ethernet/sfc/ethtool.c 	ring->rx_max_pending = EFX_MAX_DMAQ_SIZE;
ring              667 drivers/net/ethernet/sfc/ethtool.c 	ring->tx_max_pending = EFX_TXQ_MAX_ENT(efx);
ring              668 drivers/net/ethernet/sfc/ethtool.c 	ring->rx_pending = efx->rxq_entries;
ring              669 drivers/net/ethernet/sfc/ethtool.c 	ring->tx_pending = efx->txq_entries;
ring              673 drivers/net/ethernet/sfc/ethtool.c 				     struct ethtool_ringparam *ring)
ring              678 drivers/net/ethernet/sfc/ethtool.c 	if (ring->rx_mini_pending || ring->rx_jumbo_pending ||
ring              679 drivers/net/ethernet/sfc/ethtool.c 	    ring->rx_pending > EFX_MAX_DMAQ_SIZE ||
ring              680 drivers/net/ethernet/sfc/ethtool.c 	    ring->tx_pending > EFX_TXQ_MAX_ENT(efx))
ring              683 drivers/net/ethernet/sfc/ethtool.c 	if (ring->rx_pending < EFX_RXQ_MIN_ENT) {
ring              690 drivers/net/ethernet/sfc/ethtool.c 	txq_entries = max(ring->tx_pending, EFX_TXQ_MIN_ENT(efx));
ring              691 drivers/net/ethernet/sfc/ethtool.c 	if (txq_entries != ring->tx_pending)
ring              696 drivers/net/ethernet/sfc/ethtool.c 	return efx_realloc_channels(efx, ring->rx_pending, txq_entries);
ring              640 drivers/net/ethernet/sfc/falcon/ethtool.c 				      struct ethtool_ringparam *ring)
ring              644 drivers/net/ethernet/sfc/falcon/ethtool.c 	ring->rx_max_pending = EF4_MAX_DMAQ_SIZE;
ring              645 drivers/net/ethernet/sfc/falcon/ethtool.c 	ring->tx_max_pending = EF4_MAX_DMAQ_SIZE;
ring              646 drivers/net/ethernet/sfc/falcon/ethtool.c 	ring->rx_pending = efx->rxq_entries;
ring              647 drivers/net/ethernet/sfc/falcon/ethtool.c 	ring->tx_pending = efx->txq_entries;
ring              651 drivers/net/ethernet/sfc/falcon/ethtool.c 				     struct ethtool_ringparam *ring)
ring              656 drivers/net/ethernet/sfc/falcon/ethtool.c 	if (ring->rx_mini_pending || ring->rx_jumbo_pending ||
ring              657 drivers/net/ethernet/sfc/falcon/ethtool.c 	    ring->rx_pending > EF4_MAX_DMAQ_SIZE ||
ring              658 drivers/net/ethernet/sfc/falcon/ethtool.c 	    ring->tx_pending > EF4_MAX_DMAQ_SIZE)
ring              661 drivers/net/ethernet/sfc/falcon/ethtool.c 	if (ring->rx_pending < EF4_RXQ_MIN_ENT) {
ring              668 drivers/net/ethernet/sfc/falcon/ethtool.c 	txq_entries = max(ring->tx_pending, EF4_TXQ_MIN_ENT(efx));
ring              669 drivers/net/ethernet/sfc/falcon/ethtool.c 	if (txq_entries != ring->tx_pending)
ring              674 drivers/net/ethernet/sfc/falcon/ethtool.c 	return ef4_realloc_channels(efx, ring->rx_pending, txq_entries);
ring              935 drivers/net/ethernet/sgi/ioc3-eth.c 	unsigned long ring;
ring              938 drivers/net/ethernet/sgi/ioc3-eth.c 	ring = ioc3_map(ip->rxr_dma, PCI64_ATTR_PREC);
ring              939 drivers/net/ethernet/sgi/ioc3-eth.c 	writel(ring >> 32, &regs->erbr_h);
ring              940 drivers/net/ethernet/sgi/ioc3-eth.c 	writel(ring & 0xffffffff, &regs->erbr_l);
ring              944 drivers/net/ethernet/sgi/ioc3-eth.c 	ring = ioc3_map(ip->txr_dma, PCI64_ATTR_PREC);
ring              949 drivers/net/ethernet/sgi/ioc3-eth.c 	writel(ring >> 32, &regs->etbr_h);
ring              950 drivers/net/ethernet/sgi/ioc3-eth.c 	writel(ring & 0xffffffff, &regs->etbr_l);
ring              282 drivers/net/ethernet/sun/cassini.c static void cas_disable_irq(struct cas *cp, const int ring)
ring              285 drivers/net/ethernet/sun/cassini.c 	if (ring == 0) {
ring              292 drivers/net/ethernet/sun/cassini.c 		switch (ring) {
ring              304 drivers/net/ethernet/sun/cassini.c 			       cp->regs + REG_PLUS_INTRN_MASK(ring));
ring              309 drivers/net/ethernet/sun/cassini.c 			       REG_PLUS_INTRN_MASK(ring));
ring              323 drivers/net/ethernet/sun/cassini.c static void cas_enable_irq(struct cas *cp, const int ring)
ring              325 drivers/net/ethernet/sun/cassini.c 	if (ring == 0) { /* all but TX_DONE */
ring              331 drivers/net/ethernet/sun/cassini.c 		switch (ring) {
ring              343 drivers/net/ethernet/sun/cassini.c 			       REG_PLUS_INTRN_MASK(ring));
ring             1377 drivers/net/ethernet/sun/cassini.c static cas_page_t *cas_page_swap(struct cas *cp, const int ring,
ring             1843 drivers/net/ethernet/sun/cassini.c static inline void cas_tx_ringN(struct cas *cp, int ring, int limit)
ring             1850 drivers/net/ethernet/sun/cassini.c 	spin_lock(&cp->tx_lock[ring]);
ring             1851 drivers/net/ethernet/sun/cassini.c 	txds = cp->init_txds[ring];
ring             1852 drivers/net/ethernet/sun/cassini.c 	skbs = cp->tx_skbs[ring];
ring             1853 drivers/net/ethernet/sun/cassini.c 	entry = cp->tx_old[ring];
ring             1855 drivers/net/ethernet/sun/cassini.c 	count = TX_BUFF_COUNT(ring, entry, limit);
ring             1864 drivers/net/ethernet/sun/cassini.c 			entry = TX_DESC_NEXT(ring, entry);
ring             1870 drivers/net/ethernet/sun/cassini.c 			+ cp->tx_tiny_use[ring][entry].nbufs + 1;
ring             1875 drivers/net/ethernet/sun/cassini.c 			     "tx[%d] done, slot %d\n", ring, entry);
ring             1878 drivers/net/ethernet/sun/cassini.c 		cp->tx_tiny_use[ring][entry].nbufs = 0;
ring             1888 drivers/net/ethernet/sun/cassini.c 			entry = TX_DESC_NEXT(ring, entry);
ring             1891 drivers/net/ethernet/sun/cassini.c 			if (cp->tx_tiny_use[ring][entry].used) {
ring             1892 drivers/net/ethernet/sun/cassini.c 				cp->tx_tiny_use[ring][entry].used = 0;
ring             1893 drivers/net/ethernet/sun/cassini.c 				entry = TX_DESC_NEXT(ring, entry);
ring             1897 drivers/net/ethernet/sun/cassini.c 		spin_lock(&cp->stat_lock[ring]);
ring             1898 drivers/net/ethernet/sun/cassini.c 		cp->net_stats[ring].tx_packets++;
ring             1899 drivers/net/ethernet/sun/cassini.c 		cp->net_stats[ring].tx_bytes += skb->len;
ring             1900 drivers/net/ethernet/sun/cassini.c 		spin_unlock(&cp->stat_lock[ring]);
ring             1903 drivers/net/ethernet/sun/cassini.c 	cp->tx_old[ring] = entry;
ring             1910 drivers/net/ethernet/sun/cassini.c 	    (TX_BUFFS_AVAIL(cp, ring) > CAS_TABORT(cp)*(MAX_SKB_FRAGS + 1)))
ring             1912 drivers/net/ethernet/sun/cassini.c 	spin_unlock(&cp->tx_lock[ring]);
ring             1918 drivers/net/ethernet/sun/cassini.c         int limit, ring;
ring             1926 drivers/net/ethernet/sun/cassini.c 	for (ring = 0; ring < N_TX_RINGS; ring++) {
ring             1933 drivers/net/ethernet/sun/cassini.c 		limit = readl(cp->regs + REG_TX_COMPN(ring));
ring             1935 drivers/net/ethernet/sun/cassini.c 		if (cp->tx_old[ring] != limit)
ring             1936 drivers/net/ethernet/sun/cassini.c 			cas_tx_ringN(cp, ring, limit);
ring             2178 drivers/net/ethernet/sun/cassini.c static void cas_post_page(struct cas *cp, const int ring, const int index)
ring             2183 drivers/net/ethernet/sun/cassini.c 	entry = cp->rx_old[ring];
ring             2185 drivers/net/ethernet/sun/cassini.c 	new = cas_page_swap(cp, ring, index);
ring             2186 drivers/net/ethernet/sun/cassini.c 	cp->init_rxds[ring][entry].buffer = cpu_to_le64(new->dma_addr);
ring             2187 drivers/net/ethernet/sun/cassini.c 	cp->init_rxds[ring][entry].index  =
ring             2189 drivers/net/ethernet/sun/cassini.c 			    CAS_BASE(RX_INDEX_RING, ring));
ring             2191 drivers/net/ethernet/sun/cassini.c 	entry = RX_DESC_ENTRY(ring, entry + 1);
ring             2192 drivers/net/ethernet/sun/cassini.c 	cp->rx_old[ring] = entry;
ring             2197 drivers/net/ethernet/sun/cassini.c 	if (ring == 0)
ring             2206 drivers/net/ethernet/sun/cassini.c static int cas_post_rxds_ringN(struct cas *cp, int ring, int num)
ring             2210 drivers/net/ethernet/sun/cassini.c 	cas_page_t **page = cp->rx_pages[ring];
ring             2212 drivers/net/ethernet/sun/cassini.c 	entry = cp->rx_old[ring];
ring             2215 drivers/net/ethernet/sun/cassini.c 		     "rxd[%d] interrupt, done: %d\n", ring, entry);
ring             2219 drivers/net/ethernet/sun/cassini.c 	last = RX_DESC_ENTRY(ring, num ? entry + num - 4: entry - 4);
ring             2229 drivers/net/ethernet/sun/cassini.c 				cp->cas_flags |= CAS_FLAG_RXD_POST(ring);
ring             2233 drivers/net/ethernet/sun/cassini.c 				cp->rx_old[ring]  = entry;
ring             2234 drivers/net/ethernet/sun/cassini.c 				cp->rx_last[ring] = num ? num - released : 0;
ring             2240 drivers/net/ethernet/sun/cassini.c 			cp->init_rxds[ring][entry].buffer =
ring             2251 drivers/net/ethernet/sun/cassini.c 		entry = RX_DESC_ENTRY(ring, entry + 1);
ring             2253 drivers/net/ethernet/sun/cassini.c 	cp->rx_old[ring] = entry;
ring             2258 drivers/net/ethernet/sun/cassini.c 	if (ring == 0)
ring             2279 drivers/net/ethernet/sun/cassini.c static int cas_rx_ringN(struct cas *cp, int ring, int budget)
ring             2281 drivers/net/ethernet/sun/cassini.c 	struct cas_rx_comp *rxcs = cp->init_rxcs[ring];
ring             2287 drivers/net/ethernet/sun/cassini.c 		     ring,
ring             2288 drivers/net/ethernet/sun/cassini.c 		     readl(cp->regs + REG_RX_COMP_HEAD), cp->rx_new[ring]);
ring             2290 drivers/net/ethernet/sun/cassini.c 	entry = cp->rx_new[ring];
ring             2316 drivers/net/ethernet/sun/cassini.c 			spin_lock(&cp->stat_lock[ring]);
ring             2317 drivers/net/ethernet/sun/cassini.c 			cp->net_stats[ring].rx_errors++;
ring             2319 drivers/net/ethernet/sun/cassini.c 				cp->net_stats[ring].rx_length_errors++;
ring             2321 drivers/net/ethernet/sun/cassini.c 				cp->net_stats[ring].rx_crc_errors++;
ring             2322 drivers/net/ethernet/sun/cassini.c 			spin_unlock(&cp->stat_lock[ring]);
ring             2326 drivers/net/ethernet/sun/cassini.c 			spin_lock(&cp->stat_lock[ring]);
ring             2327 drivers/net/ethernet/sun/cassini.c 			++cp->net_stats[ring].rx_dropped;
ring             2328 drivers/net/ethernet/sun/cassini.c 			spin_unlock(&cp->stat_lock[ring]);
ring             2348 drivers/net/ethernet/sun/cassini.c 		spin_lock(&cp->stat_lock[ring]);
ring             2349 drivers/net/ethernet/sun/cassini.c 		cp->net_stats[ring].rx_packets++;
ring             2350 drivers/net/ethernet/sun/cassini.c 		cp->net_stats[ring].rx_bytes += len;
ring             2351 drivers/net/ethernet/sun/cassini.c 		spin_unlock(&cp->stat_lock[ring]);
ring             2379 drivers/net/ethernet/sun/cassini.c 		entry = RX_COMP_ENTRY(ring, entry + 1 +
ring             2386 drivers/net/ethernet/sun/cassini.c 	cp->rx_new[ring] = entry;
ring             2396 drivers/net/ethernet/sun/cassini.c 				struct cas *cp, int ring)
ring             2398 drivers/net/ethernet/sun/cassini.c 	struct cas_rx_comp *rxc = cp->init_rxcs[ring];
ring             2401 drivers/net/ethernet/sun/cassini.c 	last = cp->rx_cur[ring];
ring             2402 drivers/net/ethernet/sun/cassini.c 	entry = cp->rx_new[ring];
ring             2405 drivers/net/ethernet/sun/cassini.c 		     ring, readl(cp->regs + REG_RX_COMP_HEAD), entry);
ring             2410 drivers/net/ethernet/sun/cassini.c 		last = RX_COMP_ENTRY(ring, last + 1);
ring             2412 drivers/net/ethernet/sun/cassini.c 	cp->rx_cur[ring] = last;
ring             2414 drivers/net/ethernet/sun/cassini.c 	if (ring == 0)
ring             2417 drivers/net/ethernet/sun/cassini.c 		writel(last, cp->regs + REG_PLUS_RX_COMPN_TAIL(ring));
ring             2428 drivers/net/ethernet/sun/cassini.c 				   const int ring)
ring             2431 drivers/net/ethernet/sun/cassini.c 		cas_post_rxcs_ringN(dev, cp, ring);
ring             2439 drivers/net/ethernet/sun/cassini.c 	int ring = (irq == cp->pci_irq_INTC) ? 2 : 3;
ring             2440 drivers/net/ethernet/sun/cassini.c 	u32 status = readl(cp->regs + REG_PLUS_INTRN_STATUS(ring));
ring             2452 drivers/net/ethernet/sun/cassini.c 		cas_rx_ringN(cp, ring, 0);
ring             2458 drivers/net/ethernet/sun/cassini.c 		cas_handle_irqN(dev, cp, status, ring);
ring             2716 drivers/net/ethernet/sun/cassini.c static inline int cas_intme(int ring, int entry)
ring             2719 drivers/net/ethernet/sun/cassini.c 	if (!(entry & ((TX_DESC_RINGN_SIZE(ring) >> 1) - 1)))
ring             2725 drivers/net/ethernet/sun/cassini.c static void cas_write_txd(struct cas *cp, int ring, int entry,
ring             2728 drivers/net/ethernet/sun/cassini.c 	struct cas_tx_desc *txd = cp->init_txds[ring] + entry;
ring             2731 drivers/net/ethernet/sun/cassini.c 	if (cas_intme(ring, entry))
ring             2739 drivers/net/ethernet/sun/cassini.c static inline void *tx_tiny_buf(struct cas *cp, const int ring,
ring             2742 drivers/net/ethernet/sun/cassini.c 	return cp->tx_tiny_bufs[ring] + TX_TINY_BUF_LEN*entry;
ring             2745 drivers/net/ethernet/sun/cassini.c static inline dma_addr_t tx_tiny_map(struct cas *cp, const int ring,
ring             2748 drivers/net/ethernet/sun/cassini.c 	cp->tx_tiny_use[ring][tentry].nbufs++;
ring             2749 drivers/net/ethernet/sun/cassini.c 	cp->tx_tiny_use[ring][entry].used = 1;
ring             2750 drivers/net/ethernet/sun/cassini.c 	return cp->tx_tiny_dvma[ring] + TX_TINY_BUF_LEN*entry;
ring             2753 drivers/net/ethernet/sun/cassini.c static inline int cas_xmit_tx_ringN(struct cas *cp, int ring,
ring             2763 drivers/net/ethernet/sun/cassini.c 	spin_lock_irqsave(&cp->tx_lock[ring], flags);
ring             2766 drivers/net/ethernet/sun/cassini.c 	if (TX_BUFFS_AVAIL(cp, ring) <=
ring             2769 drivers/net/ethernet/sun/cassini.c 		spin_unlock_irqrestore(&cp->tx_lock[ring], flags);
ring             2784 drivers/net/ethernet/sun/cassini.c 	entry = cp->tx_new[ring];
ring             2785 drivers/net/ethernet/sun/cassini.c 	cp->tx_skbs[ring][entry] = skb;
ring             2797 drivers/net/ethernet/sun/cassini.c 		cas_write_txd(cp, ring, entry, mapping, len - tabort,
ring             2799 drivers/net/ethernet/sun/cassini.c 		entry = TX_DESC_NEXT(ring, entry);
ring             2802 drivers/net/ethernet/sun/cassini.c 			      tx_tiny_buf(cp, ring, entry), tabort);
ring             2803 drivers/net/ethernet/sun/cassini.c 		mapping = tx_tiny_map(cp, ring, entry, tentry);
ring             2804 drivers/net/ethernet/sun/cassini.c 		cas_write_txd(cp, ring, entry, mapping, tabort, ctrl,
ring             2807 drivers/net/ethernet/sun/cassini.c 		cas_write_txd(cp, ring, entry, mapping, len, ctrl |
ring             2810 drivers/net/ethernet/sun/cassini.c 	entry = TX_DESC_NEXT(ring, entry);
ring             2824 drivers/net/ethernet/sun/cassini.c 			cas_write_txd(cp, ring, entry, mapping, len - tabort,
ring             2826 drivers/net/ethernet/sun/cassini.c 			entry = TX_DESC_NEXT(ring, entry);
ring             2829 drivers/net/ethernet/sun/cassini.c 			memcpy(tx_tiny_buf(cp, ring, entry),
ring             2833 drivers/net/ethernet/sun/cassini.c 			mapping = tx_tiny_map(cp, ring, entry, tentry);
ring             2837 drivers/net/ethernet/sun/cassini.c 		cas_write_txd(cp, ring, entry, mapping, len, ctrl,
ring             2839 drivers/net/ethernet/sun/cassini.c 		entry = TX_DESC_NEXT(ring, entry);
ring             2842 drivers/net/ethernet/sun/cassini.c 	cp->tx_new[ring] = entry;
ring             2843 drivers/net/ethernet/sun/cassini.c 	if (TX_BUFFS_AVAIL(cp, ring) <= CAS_TABORT(cp)*(MAX_SKB_FRAGS + 1))
ring             2848 drivers/net/ethernet/sun/cassini.c 		     ring, entry, skb->len, TX_BUFFS_AVAIL(cp, ring));
ring             2849 drivers/net/ethernet/sun/cassini.c 	writel(entry, cp->regs + REG_TX_KICKN(ring));
ring             2850 drivers/net/ethernet/sun/cassini.c 	spin_unlock_irqrestore(&cp->tx_lock[ring], flags);
ring             2861 drivers/net/ethernet/sun/cassini.c 	static int ring;
ring             2869 drivers/net/ethernet/sun/cassini.c 	if (cas_xmit_tx_ringN(cp, ring++ & N_TX_RINGS_MASK, skb))
ring             3879 drivers/net/ethernet/sun/cassini.c static void cas_clean_txd(struct cas *cp, int ring)
ring             3881 drivers/net/ethernet/sun/cassini.c 	struct cas_tx_desc *txd = cp->init_txds[ring];
ring             3882 drivers/net/ethernet/sun/cassini.c 	struct sk_buff *skb, **skbs = cp->tx_skbs[ring];
ring             3886 drivers/net/ethernet/sun/cassini.c 	size = TX_DESC_RINGN_SIZE(ring);
ring             3915 drivers/net/ethernet/sun/cassini.c 				if (cp->tx_tiny_use[ring][ent].used)
ring             3923 drivers/net/ethernet/sun/cassini.c 	memset(cp->tx_tiny_use[ring], 0, size*sizeof(*cp->tx_tiny_use[ring]));
ring             3927 drivers/net/ethernet/sun/cassini.c static inline void cas_free_rx_desc(struct cas *cp, int ring)
ring             3929 drivers/net/ethernet/sun/cassini.c 	cas_page_t **page = cp->rx_pages[ring];
ring             3932 drivers/net/ethernet/sun/cassini.c 	size = RX_DESC_RINGN_SIZE(ring);
ring             3967 drivers/net/ethernet/sun/cassini.c static inline int cas_alloc_rx_desc(struct cas *cp, int ring)
ring             3969 drivers/net/ethernet/sun/cassini.c 	cas_page_t **page = cp->rx_pages[ring];
ring             3972 drivers/net/ethernet/sun/cassini.c 	size = RX_DESC_RINGN_SIZE(ring);
ring              285 drivers/net/ethernet/synopsys/dwc-xlgmac-common.c 			 struct xlgmac_ring *ring,
ring              294 drivers/net/ethernet/synopsys/dwc-xlgmac-common.c 		desc_data = XLGMAC_GET_DESC_DATA(ring, idx);
ring              312 drivers/net/ethernet/synopsys/dwc-xlgmac-common.c 			 struct xlgmac_ring *ring,
ring              318 drivers/net/ethernet/synopsys/dwc-xlgmac-common.c 	desc_data = XLGMAC_GET_DESC_DATA(ring, idx);
ring               75 drivers/net/ethernet/synopsys/dwc-xlgmac-desc.c 			     struct xlgmac_ring *ring)
ring               80 drivers/net/ethernet/synopsys/dwc-xlgmac-desc.c 	if (!ring)
ring               83 drivers/net/ethernet/synopsys/dwc-xlgmac-desc.c 	if (ring->desc_data_head) {
ring               84 drivers/net/ethernet/synopsys/dwc-xlgmac-desc.c 		for (i = 0; i < ring->dma_desc_count; i++) {
ring               85 drivers/net/ethernet/synopsys/dwc-xlgmac-desc.c 			desc_data = XLGMAC_GET_DESC_DATA(ring, i);
ring               89 drivers/net/ethernet/synopsys/dwc-xlgmac-desc.c 		kfree(ring->desc_data_head);
ring               90 drivers/net/ethernet/synopsys/dwc-xlgmac-desc.c 		ring->desc_data_head = NULL;
ring               93 drivers/net/ethernet/synopsys/dwc-xlgmac-desc.c 	if (ring->rx_hdr_pa.pages) {
ring               94 drivers/net/ethernet/synopsys/dwc-xlgmac-desc.c 		dma_unmap_page(pdata->dev, ring->rx_hdr_pa.pages_dma,
ring               95 drivers/net/ethernet/synopsys/dwc-xlgmac-desc.c 			       ring->rx_hdr_pa.pages_len, DMA_FROM_DEVICE);
ring               96 drivers/net/ethernet/synopsys/dwc-xlgmac-desc.c 		put_page(ring->rx_hdr_pa.pages);
ring               98 drivers/net/ethernet/synopsys/dwc-xlgmac-desc.c 		ring->rx_hdr_pa.pages = NULL;
ring               99 drivers/net/ethernet/synopsys/dwc-xlgmac-desc.c 		ring->rx_hdr_pa.pages_len = 0;
ring              100 drivers/net/ethernet/synopsys/dwc-xlgmac-desc.c 		ring->rx_hdr_pa.pages_offset = 0;
ring              101 drivers/net/ethernet/synopsys/dwc-xlgmac-desc.c 		ring->rx_hdr_pa.pages_dma = 0;
ring              104 drivers/net/ethernet/synopsys/dwc-xlgmac-desc.c 	if (ring->rx_buf_pa.pages) {
ring              105 drivers/net/ethernet/synopsys/dwc-xlgmac-desc.c 		dma_unmap_page(pdata->dev, ring->rx_buf_pa.pages_dma,
ring              106 drivers/net/ethernet/synopsys/dwc-xlgmac-desc.c 			       ring->rx_buf_pa.pages_len, DMA_FROM_DEVICE);
ring              107 drivers/net/ethernet/synopsys/dwc-xlgmac-desc.c 		put_page(ring->rx_buf_pa.pages);
ring              109 drivers/net/ethernet/synopsys/dwc-xlgmac-desc.c 		ring->rx_buf_pa.pages = NULL;
ring              110 drivers/net/ethernet/synopsys/dwc-xlgmac-desc.c 		ring->rx_buf_pa.pages_len = 0;
ring              111 drivers/net/ethernet/synopsys/dwc-xlgmac-desc.c 		ring->rx_buf_pa.pages_offset = 0;
ring              112 drivers/net/ethernet/synopsys/dwc-xlgmac-desc.c 		ring->rx_buf_pa.pages_dma = 0;
ring              115 drivers/net/ethernet/synopsys/dwc-xlgmac-desc.c 	if (ring->dma_desc_head) {
ring              118 drivers/net/ethernet/synopsys/dwc-xlgmac-desc.c 				  ring->dma_desc_count),
ring              119 drivers/net/ethernet/synopsys/dwc-xlgmac-desc.c 				  ring->dma_desc_head,
ring              120 drivers/net/ethernet/synopsys/dwc-xlgmac-desc.c 				  ring->dma_desc_head_addr);
ring              121 drivers/net/ethernet/synopsys/dwc-xlgmac-desc.c 		ring->dma_desc_head = NULL;
ring              126 drivers/net/ethernet/synopsys/dwc-xlgmac-desc.c 			    struct xlgmac_ring *ring,
ring              129 drivers/net/ethernet/synopsys/dwc-xlgmac-desc.c 	if (!ring)
ring              133 drivers/net/ethernet/synopsys/dwc-xlgmac-desc.c 	ring->dma_desc_count = dma_desc_count;
ring              134 drivers/net/ethernet/synopsys/dwc-xlgmac-desc.c 	ring->dma_desc_head = dma_alloc_coherent(pdata->dev,
ring              137 drivers/net/ethernet/synopsys/dwc-xlgmac-desc.c 					&ring->dma_desc_head_addr,
ring              139 drivers/net/ethernet/synopsys/dwc-xlgmac-desc.c 	if (!ring->dma_desc_head)
ring              143 drivers/net/ethernet/synopsys/dwc-xlgmac-desc.c 	ring->desc_data_head = kcalloc(dma_desc_count,
ring              146 drivers/net/ethernet/synopsys/dwc-xlgmac-desc.c 	if (!ring->desc_data_head)
ring              151 drivers/net/ethernet/synopsys/dwc-xlgmac-desc.c 		ring->dma_desc_head,
ring              152 drivers/net/ethernet/synopsys/dwc-xlgmac-desc.c 		&ring->dma_desc_head_addr,
ring              153 drivers/net/ethernet/synopsys/dwc-xlgmac-desc.c 		ring->desc_data_head);
ring              390 drivers/net/ethernet/synopsys/dwc-xlgmac-desc.c 				struct xlgmac_ring *ring,
ring              395 drivers/net/ethernet/synopsys/dwc-xlgmac-desc.c 	if (!ring->rx_hdr_pa.pages) {
ring              396 drivers/net/ethernet/synopsys/dwc-xlgmac-desc.c 		ret = xlgmac_alloc_pages(pdata, &ring->rx_hdr_pa,
ring              402 drivers/net/ethernet/synopsys/dwc-xlgmac-desc.c 	if (!ring->rx_buf_pa.pages) {
ring              404 drivers/net/ethernet/synopsys/dwc-xlgmac-desc.c 		ret = xlgmac_alloc_pages(pdata, &ring->rx_buf_pa,
ring              411 drivers/net/ethernet/synopsys/dwc-xlgmac-desc.c 	xlgmac_set_buffer_data(&desc_data->rx.hdr, &ring->rx_hdr_pa,
ring              415 drivers/net/ethernet/synopsys/dwc-xlgmac-desc.c 	xlgmac_set_buffer_data(&desc_data->rx.buf, &ring->rx_buf_pa,
ring              427 drivers/net/ethernet/synopsys/dwc-xlgmac-desc.c 	struct xlgmac_ring *ring;
ring              433 drivers/net/ethernet/synopsys/dwc-xlgmac-desc.c 		ring = channel->tx_ring;
ring              434 drivers/net/ethernet/synopsys/dwc-xlgmac-desc.c 		if (!ring)
ring              437 drivers/net/ethernet/synopsys/dwc-xlgmac-desc.c 		dma_desc = ring->dma_desc_head;
ring              438 drivers/net/ethernet/synopsys/dwc-xlgmac-desc.c 		dma_desc_addr = ring->dma_desc_head_addr;
ring              440 drivers/net/ethernet/synopsys/dwc-xlgmac-desc.c 		for (j = 0; j < ring->dma_desc_count; j++) {
ring              441 drivers/net/ethernet/synopsys/dwc-xlgmac-desc.c 			desc_data = XLGMAC_GET_DESC_DATA(ring, j);
ring              450 drivers/net/ethernet/synopsys/dwc-xlgmac-desc.c 		ring->cur = 0;
ring              451 drivers/net/ethernet/synopsys/dwc-xlgmac-desc.c 		ring->dirty = 0;
ring              452 drivers/net/ethernet/synopsys/dwc-xlgmac-desc.c 		memset(&ring->tx, 0, sizeof(ring->tx));
ring              464 drivers/net/ethernet/synopsys/dwc-xlgmac-desc.c 	struct xlgmac_ring *ring;
ring              470 drivers/net/ethernet/synopsys/dwc-xlgmac-desc.c 		ring = channel->rx_ring;
ring              471 drivers/net/ethernet/synopsys/dwc-xlgmac-desc.c 		if (!ring)
ring              474 drivers/net/ethernet/synopsys/dwc-xlgmac-desc.c 		dma_desc = ring->dma_desc_head;
ring              475 drivers/net/ethernet/synopsys/dwc-xlgmac-desc.c 		dma_desc_addr = ring->dma_desc_head_addr;
ring              477 drivers/net/ethernet/synopsys/dwc-xlgmac-desc.c 		for (j = 0; j < ring->dma_desc_count; j++) {
ring              478 drivers/net/ethernet/synopsys/dwc-xlgmac-desc.c 			desc_data = XLGMAC_GET_DESC_DATA(ring, j);
ring              483 drivers/net/ethernet/synopsys/dwc-xlgmac-desc.c 			if (xlgmac_map_rx_buffer(pdata, ring, desc_data))
ring              490 drivers/net/ethernet/synopsys/dwc-xlgmac-desc.c 		ring->cur = 0;
ring              491 drivers/net/ethernet/synopsys/dwc-xlgmac-desc.c 		ring->dirty = 0;
ring              501 drivers/net/ethernet/synopsys/dwc-xlgmac-desc.c 	struct xlgmac_ring *ring = channel->tx_ring;
ring              512 drivers/net/ethernet/synopsys/dwc-xlgmac-desc.c 	start_index = ring->cur;
ring              513 drivers/net/ethernet/synopsys/dwc-xlgmac-desc.c 	cur_index = ring->cur;
ring              515 drivers/net/ethernet/synopsys/dwc-xlgmac-desc.c 	pkt_info = &ring->pkt_info;
ring              527 drivers/net/ethernet/synopsys/dwc-xlgmac-desc.c 	if ((tso && (pkt_info->mss != ring->tx.cur_mss)) ||
ring              528 drivers/net/ethernet/synopsys/dwc-xlgmac-desc.c 	    (vlan && (pkt_info->vlan_ctag != ring->tx.cur_vlan_ctag)))
ring              530 drivers/net/ethernet/synopsys/dwc-xlgmac-desc.c 	desc_data = XLGMAC_GET_DESC_DATA(ring, cur_index);
ring              551 drivers/net/ethernet/synopsys/dwc-xlgmac-desc.c 		desc_data = XLGMAC_GET_DESC_DATA(ring, cur_index);
ring              576 drivers/net/ethernet/synopsys/dwc-xlgmac-desc.c 		desc_data = XLGMAC_GET_DESC_DATA(ring, cur_index);
ring              610 drivers/net/ethernet/synopsys/dwc-xlgmac-desc.c 			desc_data = XLGMAC_GET_DESC_DATA(ring, cur_index);
ring              618 drivers/net/ethernet/synopsys/dwc-xlgmac-desc.c 	desc_data = XLGMAC_GET_DESC_DATA(ring, cur_index - 1);
ring              628 drivers/net/ethernet/synopsys/dwc-xlgmac-desc.c 		desc_data = XLGMAC_GET_DESC_DATA(ring, start_index++);
ring              677 drivers/net/ethernet/synopsys/dwc-xlgmac-hw.c 				 struct xlgmac_ring *ring)
ring              688 drivers/net/ethernet/synopsys/dwc-xlgmac-hw.c 	desc_data = XLGMAC_GET_DESC_DATA(ring, ring->cur);
ring              699 drivers/net/ethernet/synopsys/dwc-xlgmac-hw.c 	ring->tx.xmit_more = 0;
ring              705 drivers/net/ethernet/synopsys/dwc-xlgmac-hw.c 	struct xlgmac_ring *ring = channel->tx_ring;
ring              711 drivers/net/ethernet/synopsys/dwc-xlgmac-hw.c 	int start_index = ring->cur;
ring              712 drivers/net/ethernet/synopsys/dwc-xlgmac-hw.c 	int cur_index = ring->cur;
ring              716 drivers/net/ethernet/synopsys/dwc-xlgmac-hw.c 	pkt_info = &ring->pkt_info;
ring              727 drivers/net/ethernet/synopsys/dwc-xlgmac-hw.c 	if (tso && (pkt_info->mss != ring->tx.cur_mss))
ring              732 drivers/net/ethernet/synopsys/dwc-xlgmac-hw.c 	if (vlan && (pkt_info->vlan_ctag != ring->tx.cur_vlan_ctag))
ring              747 drivers/net/ethernet/synopsys/dwc-xlgmac-hw.c 	ring->coalesce_count += pkt_info->tx_packets;
ring              752 drivers/net/ethernet/synopsys/dwc-xlgmac-hw.c 	else if ((ring->coalesce_count % pdata->tx_frames) <
ring              758 drivers/net/ethernet/synopsys/dwc-xlgmac-hw.c 	desc_data = XLGMAC_GET_DESC_DATA(ring, cur_index);
ring              789 drivers/net/ethernet/synopsys/dwc-xlgmac-hw.c 			ring->tx.cur_mss = pkt_info->mss;
ring              818 drivers/net/ethernet/synopsys/dwc-xlgmac-hw.c 			ring->tx.cur_vlan_ctag = pkt_info->vlan_ctag;
ring              822 drivers/net/ethernet/synopsys/dwc-xlgmac-hw.c 		desc_data = XLGMAC_GET_DESC_DATA(ring, cur_index);
ring              922 drivers/net/ethernet/synopsys/dwc-xlgmac-hw.c 		desc_data = XLGMAC_GET_DESC_DATA(ring, cur_index);
ring              983 drivers/net/ethernet/synopsys/dwc-xlgmac-hw.c 	desc_data = XLGMAC_GET_DESC_DATA(ring, start_index);
ring              991 drivers/net/ethernet/synopsys/dwc-xlgmac-hw.c 		xlgmac_dump_tx_desc(pdata, ring, start_index,
ring              997 drivers/net/ethernet/synopsys/dwc-xlgmac-hw.c 	ring->cur = cur_index + 1;
ring             1001 drivers/net/ethernet/synopsys/dwc-xlgmac-hw.c 		xlgmac_tx_start_xmit(channel, ring);
ring             1003 drivers/net/ethernet/synopsys/dwc-xlgmac-hw.c 		ring->tx.xmit_more = 1;
ring             1006 drivers/net/ethernet/synopsys/dwc-xlgmac-hw.c 		  channel->name, start_index & (ring->dma_desc_count - 1),
ring             1007 drivers/net/ethernet/synopsys/dwc-xlgmac-hw.c 		  (ring->cur - 1) & (ring->dma_desc_count - 1));
ring             1058 drivers/net/ethernet/synopsys/dwc-xlgmac-hw.c 	struct xlgmac_ring *ring = channel->tx_ring;
ring             1060 drivers/net/ethernet/synopsys/dwc-xlgmac-hw.c 	int start_index = ring->cur;
ring             1064 drivers/net/ethernet/synopsys/dwc-xlgmac-hw.c 	for (i = 0; i < ring->dma_desc_count; i++) {
ring             1065 drivers/net/ethernet/synopsys/dwc-xlgmac-hw.c 		desc_data = XLGMAC_GET_DESC_DATA(ring, i);
ring             1072 drivers/net/ethernet/synopsys/dwc-xlgmac-hw.c 	writel(ring->dma_desc_count - 1, XLGMAC_DMA_REG(channel, DMA_CH_TDRLR));
ring             1075 drivers/net/ethernet/synopsys/dwc-xlgmac-hw.c 	desc_data = XLGMAC_GET_DESC_DATA(ring, start_index);
ring             1142 drivers/net/ethernet/synopsys/dwc-xlgmac-hw.c 	struct xlgmac_ring *ring = channel->rx_ring;
ring             1143 drivers/net/ethernet/synopsys/dwc-xlgmac-hw.c 	unsigned int start_index = ring->cur;
ring             1148 drivers/net/ethernet/synopsys/dwc-xlgmac-hw.c 	for (i = 0; i < ring->dma_desc_count; i++) {
ring             1149 drivers/net/ethernet/synopsys/dwc-xlgmac-hw.c 		desc_data = XLGMAC_GET_DESC_DATA(ring, i);
ring             1156 drivers/net/ethernet/synopsys/dwc-xlgmac-hw.c 	writel(ring->dma_desc_count - 1, XLGMAC_DMA_REG(channel, DMA_CH_RDRLR));
ring             1159 drivers/net/ethernet/synopsys/dwc-xlgmac-hw.c 	desc_data = XLGMAC_GET_DESC_DATA(ring, start_index);
ring             1166 drivers/net/ethernet/synopsys/dwc-xlgmac-hw.c 	desc_data = XLGMAC_GET_DESC_DATA(ring, start_index +
ring             1167 drivers/net/ethernet/synopsys/dwc-xlgmac-hw.c 					  ring->dma_desc_count - 1);
ring             2641 drivers/net/ethernet/synopsys/dwc-xlgmac-hw.c 	struct xlgmac_ring *ring = channel->rx_ring;
ring             2648 drivers/net/ethernet/synopsys/dwc-xlgmac-hw.c 	desc_data = XLGMAC_GET_DESC_DATA(ring, ring->cur);
ring             2650 drivers/net/ethernet/synopsys/dwc-xlgmac-hw.c 	pkt_info = &ring->pkt_info;
ring             2662 drivers/net/ethernet/synopsys/dwc-xlgmac-hw.c 		xlgmac_dump_rx_desc(pdata, ring, ring->cur);
ring             2811 drivers/net/ethernet/synopsys/dwc-xlgmac-hw.c 		  ring->cur & (ring->dma_desc_count - 1), ring->cur);
ring               28 drivers/net/ethernet/synopsys/dwc-xlgmac-net.c static inline unsigned int xlgmac_tx_avail_desc(struct xlgmac_ring *ring)
ring               30 drivers/net/ethernet/synopsys/dwc-xlgmac-net.c 	return (ring->dma_desc_count - (ring->cur - ring->dirty));
ring               33 drivers/net/ethernet/synopsys/dwc-xlgmac-net.c static inline unsigned int xlgmac_rx_dirty_desc(struct xlgmac_ring *ring)
ring               35 drivers/net/ethernet/synopsys/dwc-xlgmac-net.c 	return (ring->cur - ring->dirty);
ring               40 drivers/net/ethernet/synopsys/dwc-xlgmac-net.c 			struct xlgmac_ring *ring,
ring               45 drivers/net/ethernet/synopsys/dwc-xlgmac-net.c 	if (count > xlgmac_tx_avail_desc(ring)) {
ring               49 drivers/net/ethernet/synopsys/dwc-xlgmac-net.c 		ring->tx.queue_stopped = 1;
ring               54 drivers/net/ethernet/synopsys/dwc-xlgmac-net.c 		if (ring->tx.xmit_more)
ring               55 drivers/net/ethernet/synopsys/dwc-xlgmac-net.c 			pdata->hw_ops.tx_start_xmit(channel, ring);
ring              115 drivers/net/ethernet/synopsys/dwc-xlgmac-net.c 			       struct xlgmac_ring *ring,
ring              134 drivers/net/ethernet/synopsys/dwc-xlgmac-net.c 		if (skb_shinfo(skb)->gso_size != ring->tx.cur_mss) {
ring              161 drivers/net/ethernet/synopsys/dwc-xlgmac-net.c 		if (skb_vlan_tag_get(skb) != ring->tx.cur_vlan_ctag)
ring              524 drivers/net/ethernet/synopsys/dwc-xlgmac-net.c 	struct xlgmac_ring *ring;
ring              529 drivers/net/ethernet/synopsys/dwc-xlgmac-net.c 		ring = channel->tx_ring;
ring              530 drivers/net/ethernet/synopsys/dwc-xlgmac-net.c 		if (!ring)
ring              533 drivers/net/ethernet/synopsys/dwc-xlgmac-net.c 		for (j = 0; j < ring->dma_desc_count; j++) {
ring              534 drivers/net/ethernet/synopsys/dwc-xlgmac-net.c 			desc_data = XLGMAC_GET_DESC_DATA(ring, j);
ring              545 drivers/net/ethernet/synopsys/dwc-xlgmac-net.c 	struct xlgmac_ring *ring;
ring              550 drivers/net/ethernet/synopsys/dwc-xlgmac-net.c 		ring = channel->rx_ring;
ring              551 drivers/net/ethernet/synopsys/dwc-xlgmac-net.c 		if (!ring)
ring              554 drivers/net/ethernet/synopsys/dwc-xlgmac-net.c 		for (j = 0; j < ring->dma_desc_count; j++) {
ring              555 drivers/net/ethernet/synopsys/dwc-xlgmac-net.c 			desc_data = XLGMAC_GET_DESC_DATA(ring, j);
ring              708 drivers/net/ethernet/synopsys/dwc-xlgmac-net.c 	struct xlgmac_ring *ring;
ring              718 drivers/net/ethernet/synopsys/dwc-xlgmac-net.c 	ring = channel->tx_ring;
ring              719 drivers/net/ethernet/synopsys/dwc-xlgmac-net.c 	tx_pkt_info = &ring->pkt_info;
ring              730 drivers/net/ethernet/synopsys/dwc-xlgmac-net.c 	xlgmac_prep_tx_pkt(pdata, ring, skb, tx_pkt_info);
ring              733 drivers/net/ethernet/synopsys/dwc-xlgmac-net.c 	ret = xlgmac_maybe_stop_tx_queue(channel, ring,
ring              762 drivers/net/ethernet/synopsys/dwc-xlgmac-net.c 	xlgmac_maybe_stop_tx_queue(channel, ring, XLGMAC_TX_MAX_DESC_NR);
ring              954 drivers/net/ethernet/synopsys/dwc-xlgmac-net.c 	struct xlgmac_ring *ring = channel->rx_ring;
ring              962 drivers/net/ethernet/synopsys/dwc-xlgmac-net.c 	while (ring->dirty != ring->cur) {
ring              963 drivers/net/ethernet/synopsys/dwc-xlgmac-net.c 		desc_data = XLGMAC_GET_DESC_DATA(ring, ring->dirty);
ring              968 drivers/net/ethernet/synopsys/dwc-xlgmac-net.c 		if (desc_ops->map_rx_buffer(pdata, ring, desc_data))
ring              971 drivers/net/ethernet/synopsys/dwc-xlgmac-net.c 		hw_ops->rx_desc_reset(pdata, desc_data, ring->dirty);
ring              973 drivers/net/ethernet/synopsys/dwc-xlgmac-net.c 		ring->dirty++;
ring              982 drivers/net/ethernet/synopsys/dwc-xlgmac-net.c 	desc_data = XLGMAC_GET_DESC_DATA(ring, ring->dirty - 1);
ring             1037 drivers/net/ethernet/synopsys/dwc-xlgmac-net.c 	struct xlgmac_ring *ring = channel->tx_ring;
ring             1052 drivers/net/ethernet/synopsys/dwc-xlgmac-net.c 	if (!ring)
ring             1055 drivers/net/ethernet/synopsys/dwc-xlgmac-net.c 	cur = ring->cur;
ring             1063 drivers/net/ethernet/synopsys/dwc-xlgmac-net.c 	       (ring->dirty != cur)) {
ring             1064 drivers/net/ethernet/synopsys/dwc-xlgmac-net.c 		desc_data = XLGMAC_GET_DESC_DATA(ring, ring->dirty);
ring             1076 drivers/net/ethernet/synopsys/dwc-xlgmac-net.c 			xlgmac_dump_tx_desc(pdata, ring, ring->dirty, 1, 0);
ring             1088 drivers/net/ethernet/synopsys/dwc-xlgmac-net.c 		ring->dirty++;
ring             1096 drivers/net/ethernet/synopsys/dwc-xlgmac-net.c 	if ((ring->tx.queue_stopped == 1) &&
ring             1097 drivers/net/ethernet/synopsys/dwc-xlgmac-net.c 	    (xlgmac_tx_avail_desc(ring) > XLGMAC_TX_DESC_MIN_FREE)) {
ring             1098 drivers/net/ethernet/synopsys/dwc-xlgmac-net.c 		ring->tx.queue_stopped = 0;
ring             1110 drivers/net/ethernet/synopsys/dwc-xlgmac-net.c 	struct xlgmac_ring *ring = channel->rx_ring;
ring             1126 drivers/net/ethernet/synopsys/dwc-xlgmac-net.c 	if (!ring)
ring             1134 drivers/net/ethernet/synopsys/dwc-xlgmac-net.c 	desc_data = XLGMAC_GET_DESC_DATA(ring, ring->cur);
ring             1135 drivers/net/ethernet/synopsys/dwc-xlgmac-net.c 	pkt_info = &ring->pkt_info;
ring             1150 drivers/net/ethernet/synopsys/dwc-xlgmac-net.c 		desc_data = XLGMAC_GET_DESC_DATA(ring, ring->cur);
ring             1152 drivers/net/ethernet/synopsys/dwc-xlgmac-net.c 		if (xlgmac_rx_dirty_desc(ring) > XLGMAC_RX_DESC_MAX_DIRTY)
ring             1159 drivers/net/ethernet/synopsys/dwc-xlgmac-net.c 		ring->cur++;
ring             1268 drivers/net/ethernet/synopsys/dwc-xlgmac-net.c 		desc_data = XLGMAC_GET_DESC_DATA(ring, ring->cur);
ring               94 drivers/net/ethernet/synopsys/dwc-xlgmac.h #define XLGMAC_GET_DESC_DATA(ring, idx) ({				\
ring               95 drivers/net/ethernet/synopsys/dwc-xlgmac.h 	typeof(ring) _ring = (ring);					\
ring              387 drivers/net/ethernet/synopsys/dwc-xlgmac.h 			     struct xlgmac_ring *ring,
ring              434 drivers/net/ethernet/synopsys/dwc-xlgmac.h 			      struct xlgmac_ring *ring);
ring              637 drivers/net/ethernet/synopsys/dwc-xlgmac.h 			 struct xlgmac_ring *ring,
ring              642 drivers/net/ethernet/synopsys/dwc-xlgmac.h 			 struct xlgmac_ring *ring,
ring             2259 drivers/net/ethernet/tehuti/tehuti.c bdx_get_ringparam(struct net_device *netdev, struct ethtool_ringparam *ring)
ring             2264 drivers/net/ethernet/tehuti/tehuti.c 	ring->rx_max_pending = bdx_rx_fifo_size_to_packets(3);
ring             2265 drivers/net/ethernet/tehuti/tehuti.c 	ring->tx_max_pending = bdx_tx_fifo_size_to_packets(3);
ring             2266 drivers/net/ethernet/tehuti/tehuti.c 	ring->rx_pending = bdx_rx_fifo_size_to_packets(priv->rxf_size);
ring             2267 drivers/net/ethernet/tehuti/tehuti.c 	ring->tx_pending = bdx_tx_fifo_size_to_packets(priv->txd_size);
ring             2276 drivers/net/ethernet/tehuti/tehuti.c bdx_set_ringparam(struct net_device *netdev, struct ethtool_ringparam *ring)
ring             2283 drivers/net/ethernet/tehuti/tehuti.c 		if (bdx_rx_fifo_size_to_packets(rx_size) >= ring->rx_pending)
ring             2290 drivers/net/ethernet/tehuti/tehuti.c 		if (bdx_tx_fifo_size_to_packets(tx_size) >= ring->tx_pending)
ring              830 drivers/net/ethernet/ti/cpmac.c 						struct ethtool_ringparam *ring)
ring              834 drivers/net/ethernet/ti/cpmac.c 	ring->rx_max_pending = 1024;
ring              835 drivers/net/ethernet/ti/cpmac.c 	ring->rx_mini_max_pending = 1;
ring              836 drivers/net/ethernet/ti/cpmac.c 	ring->rx_jumbo_max_pending = 1;
ring              837 drivers/net/ethernet/ti/cpmac.c 	ring->tx_max_pending = 1;
ring              839 drivers/net/ethernet/ti/cpmac.c 	ring->rx_pending = priv->ring_size;
ring              840 drivers/net/ethernet/ti/cpmac.c 	ring->rx_mini_pending = 1;
ring              841 drivers/net/ethernet/ti/cpmac.c 	ring->rx_jumbo_pending = 1;
ring              842 drivers/net/ethernet/ti/cpmac.c 	ring->tx_pending = 1;
ring              846 drivers/net/ethernet/ti/cpmac.c 						struct ethtool_ringparam *ring)
ring              852 drivers/net/ethernet/ti/cpmac.c 	priv->ring_size = ring->rx_pending;
ring              279 drivers/net/ethernet/toshiba/spider_net.c 	descr = chain->ring;
ring              284 drivers/net/ethernet/toshiba/spider_net.c 	} while (descr != chain->ring);
ring              317 drivers/net/ethernet/toshiba/spider_net.c 	memset(chain->ring, 0, chain->num_desc * sizeof(struct spider_net_descr));
ring              320 drivers/net/ethernet/toshiba/spider_net.c 	descr = chain->ring;
ring              335 drivers/net/ethernet/toshiba/spider_net.c 	(descr-1)->next = chain->ring;
ring              336 drivers/net/ethernet/toshiba/spider_net.c 	chain->ring->prev = descr-1;
ring              339 drivers/net/ethernet/toshiba/spider_net.c 	chain->head = chain->ring;
ring              340 drivers/net/ethernet/toshiba/spider_net.c 	chain->tail = chain->ring;
ring              990 drivers/net/ethernet/toshiba/spider_net.c 	int off = start - chain->ring;
ring             1005 drivers/net/ethernet/toshiba/spider_net.c 		off = descr - chain->ring;
ring             1047 drivers/net/ethernet/toshiba/spider_net.c 		cnt = descr - chain->ring;
ring             1202 drivers/net/ethernet/toshiba/spider_net.c 		pr_err("which=%ld\n", descr - card->rx_chain.ring);
ring             2328 drivers/net/ethernet/toshiba/spider_net.c 	card->rx_chain.ring = card->darray;
ring             2330 drivers/net/ethernet/toshiba/spider_net.c 	card->tx_chain.ring = card->darray + rx_descriptors;
ring              396 drivers/net/ethernet/toshiba/spider_net.h 	struct spider_net_descr *ring;
ring             1156 drivers/net/ethernet/via/via-rhine.c 	void *ring;
ring             1159 drivers/net/ethernet/via/via-rhine.c 	ring = dma_alloc_coherent(hwdev,
ring             1164 drivers/net/ethernet/via/via-rhine.c 	if (!ring) {
ring             1177 drivers/net/ethernet/via/via-rhine.c 					  ring, ring_dma);
ring             1182 drivers/net/ethernet/via/via-rhine.c 	rp->rx_ring = ring;
ring             1183 drivers/net/ethernet/via/via-rhine.c 	rp->tx_ring = ring + RX_RING_SIZE * sizeof(struct rx_desc);
ring              565 drivers/net/ethernet/via/via-velocity.c 		vptr->rx.ring[i].rdesc0.len |= OWNED_BY_NIC;
ring             1446 drivers/net/ethernet/via/via-velocity.c 		vptr->rx.ring[dirty].rdesc0.len |= OWNED_BY_NIC;
ring             1483 drivers/net/ethernet/via/via-velocity.c 	vptr->rx.ring = pool;
ring             1516 drivers/net/ethernet/via/via-velocity.c 	struct rx_desc *rd = &(vptr->rx.ring[idx]);
ring             1549 drivers/net/ethernet/via/via-velocity.c 		struct rx_desc *rd = vptr->rx.ring + dirty;
ring             1587 drivers/net/ethernet/via/via-velocity.c 		struct rx_desc *rd = vptr->rx.ring + i;
ring             1675 drivers/net/ethernet/via/via-velocity.c 	dma_free_coherent(vptr->dev, size, vptr->rx.ring, vptr->rx.pool_dma);
ring             2033 drivers/net/ethernet/via/via-velocity.c 	struct rx_desc *rd = &(vptr->rx.ring[idx]);
ring             2095 drivers/net/ethernet/via/via-velocity.c 		struct rx_desc *rd = vptr->rx.ring + rd_curr;
ring             1459 drivers/net/ethernet/via/via-velocity.h 		struct rx_desc *ring;
ring              289 drivers/net/fddi/defza.c 	struct fza_ring_cmd __iomem *ring = fp->ring_cmd + fp->ring_cmd_index;
ring              301 drivers/net/fddi/defza.c 	buf = fp->mmio + readl_u(&ring->buffer);
ring              303 drivers/net/fddi/defza.c 	if ((readl_u(&ring->cmd_own) & FZA_RING_OWN_MASK) !=
ring              366 drivers/net/fddi/defza.c 	writel_u(FZA_RING_OWN_FZA | command, &ring->cmd_own);
ring              374 drivers/net/fddi/defza.c 	return ring;
ring              381 drivers/net/fddi/defza.c 	struct fza_ring_cmd __iomem *ring;
ring              388 drivers/net/fddi/defza.c 	ring = fza_cmd_send(dev, FZA_RING_CMD_INIT);
ring              390 drivers/net/fddi/defza.c 	if (!ring)
ring              403 drivers/net/fddi/defza.c 	stat = readl_u(&ring->stat);
ring              414 drivers/net/fddi/defza.c 		*init = fp->mmio + readl_u(&ring->buffer);
ring             1150 drivers/net/fddi/defza.c 	struct fza_ring_cmd __iomem *ring;
ring             1195 drivers/net/fddi/defza.c 	ring = fza_cmd_send(dev, FZA_RING_CMD_PARAM);
ring             1197 drivers/net/fddi/defza.c 	if (!ring)
ring             1206 drivers/net/fddi/defza.c 	stat = readl_u(&ring->stat);
ring              125 drivers/net/fjes/fjes_hw.c 	epbh->ring = (u8 *)(mem + sizeof(union ep_buffer_info));
ring              137 drivers/net/fjes/fjes_hw.c 	epbh->ring = NULL;
ring              913 drivers/net/fjes/fjes_hw.c 	ring_frame = (struct esmem_frame *)&(epbh->ring[EP_RING_INDEX
ring              944 drivers/net/fjes/fjes_hw.c 	ring_frame = (struct esmem_frame *)&(epbh->ring[EP_RING_INDEX
ring              251 drivers/net/fjes/fjes_hw.h 		u8 *ring;
ring              677 drivers/net/hippi/rrunner.c 	cmd.ring = 0;
ring              360 drivers/net/hippi/rrunner.h 	u8      ring;
ring              364 drivers/net/hippi/rrunner.h 	u8      ring;
ring              461 drivers/net/hippi/rrunner.h 	u8      ring;
ring              465 drivers/net/hippi/rrunner.h 	u8      ring;
ring             1709 drivers/net/hyperv/netvsc_drv.c 				   struct ethtool_ringparam *ring)
ring             1713 drivers/net/hyperv/netvsc_drv.c 	ring->rx_pending = nvdev->recv_section_cnt;
ring             1714 drivers/net/hyperv/netvsc_drv.c 	ring->tx_pending = nvdev->send_section_cnt;
ring             1721 drivers/net/hyperv/netvsc_drv.c 	ring->rx_max_pending = max_buf_size / nvdev->recv_section_size;
ring             1722 drivers/net/hyperv/netvsc_drv.c 	ring->tx_max_pending = NETVSC_SEND_BUFFER_SIZE
ring             1727 drivers/net/hyperv/netvsc_drv.c 				 struct ethtool_ringparam *ring)
ring             1735 drivers/net/hyperv/netvsc_drv.c 	__netvsc_get_ringparam(nvdev, ring);
ring             1739 drivers/net/hyperv/netvsc_drv.c 				struct ethtool_ringparam *ring)
ring             1754 drivers/net/hyperv/netvsc_drv.c 	new_tx = clamp_t(u32, ring->tx_pending,
ring             1756 drivers/net/hyperv/netvsc_drv.c 	new_rx = clamp_t(u32, ring->rx_pending,
ring              349 drivers/net/tap.c 			if (ptr_ring_produce(&q->ring, skb))
ring              359 drivers/net/tap.c 			if (ptr_ring_produce(&q->ring, segs)) {
ring              376 drivers/net/tap.c 		if (ptr_ring_produce(&q->ring, skb))
ring              498 drivers/net/tap.c 	ptr_ring_cleanup(&q->ring, __skb_array_destroy_skb);
ring              518 drivers/net/tap.c 	if (ptr_ring_init(&q->ring, tap->dev->tx_queue_len, GFP_KERNEL)) {
ring              583 drivers/net/tap.c 	if (!ptr_ring_empty(&q->ring))
ring              846 drivers/net/tap.c 		skb = ptr_ring_consume(&q->ring);
ring             1252 drivers/net/tap.c 	return PTR_RING_PEEK_CALL(&q->ring, __skb_array_len_with_tag);
ring             1287 drivers/net/tap.c 	return &q->ring;
ring             1304 drivers/net/tap.c 		rings[i++] = &q->ring;
ring              141 drivers/net/thunderbolt.c 	struct tb_ring *ring;
ring              321 drivers/net/thunderbolt.c static void tbnet_free_buffers(struct tbnet_ring *ring)
ring              326 drivers/net/thunderbolt.c 		struct device *dma_dev = tb_ring_dma_device(ring->ring);
ring              327 drivers/net/thunderbolt.c 		struct tbnet_frame *tf = &ring->frames[i];
ring              335 drivers/net/thunderbolt.c 		if (ring->ring->is_tx) {
ring              353 drivers/net/thunderbolt.c 	ring->cons = 0;
ring              354 drivers/net/thunderbolt.c 	ring->prod = 0;
ring              375 drivers/net/thunderbolt.c 		tb_ring_stop(net->rx_ring.ring);
ring              376 drivers/net/thunderbolt.c 		tb_ring_stop(net->tx_ring.ring);
ring              461 drivers/net/thunderbolt.c static unsigned int tbnet_available_buffers(const struct tbnet_ring *ring)
ring              463 drivers/net/thunderbolt.c 	return ring->prod - ring->cons;
ring              468 drivers/net/thunderbolt.c 	struct tbnet_ring *ring = &net->rx_ring;
ring              472 drivers/net/thunderbolt.c 		struct device *dma_dev = tb_ring_dma_device(ring->ring);
ring              473 drivers/net/thunderbolt.c 		unsigned int index = ring->prod & (TBNET_RING_SIZE - 1);
ring              474 drivers/net/thunderbolt.c 		struct tbnet_frame *tf = &ring->frames[index];
ring              500 drivers/net/thunderbolt.c 		tb_ring_rx(ring->ring, &tf->frame);
ring              502 drivers/net/thunderbolt.c 		ring->prod++;
ring              508 drivers/net/thunderbolt.c 	tbnet_free_buffers(ring);
ring              514 drivers/net/thunderbolt.c 	struct tbnet_ring *ring = &net->tx_ring;
ring              515 drivers/net/thunderbolt.c 	struct device *dma_dev = tb_ring_dma_device(ring->ring);
ring              519 drivers/net/thunderbolt.c 	if (!tbnet_available_buffers(ring))
ring              522 drivers/net/thunderbolt.c 	index = ring->cons++ & (TBNET_RING_SIZE - 1);
ring              524 drivers/net/thunderbolt.c 	tf = &ring->frames[index];
ring              533 drivers/net/thunderbolt.c static void tbnet_tx_callback(struct tb_ring *ring, struct ring_frame *frame,
ring              548 drivers/net/thunderbolt.c 	struct tbnet_ring *ring = &net->tx_ring;
ring              549 drivers/net/thunderbolt.c 	struct device *dma_dev = tb_ring_dma_device(ring->ring);
ring              553 drivers/net/thunderbolt.c 		struct tbnet_frame *tf = &ring->frames[i];
ring              558 drivers/net/thunderbolt.c 			tbnet_free_buffers(ring);
ring              567 drivers/net/thunderbolt.c 			tbnet_free_buffers(ring);
ring              578 drivers/net/thunderbolt.c 	ring->cons = 0;
ring              579 drivers/net/thunderbolt.c 	ring->prod = TBNET_RING_SIZE - 1;
ring              604 drivers/net/thunderbolt.c 				      net->rx_ring.ring->hop,
ring              606 drivers/net/thunderbolt.c 				      net->tx_ring.ring->hop);
ring              612 drivers/net/thunderbolt.c 	tb_ring_start(net->tx_ring.ring);
ring              613 drivers/net/thunderbolt.c 	tb_ring_start(net->rx_ring.ring);
ring              630 drivers/net/thunderbolt.c 	tb_ring_stop(net->rx_ring.ring);
ring              631 drivers/net/thunderbolt.c 	tb_ring_stop(net->tx_ring.ring);
ring              744 drivers/net/thunderbolt.c 	struct device *dma_dev = tb_ring_dma_device(net->rx_ring.ring);
ring              766 drivers/net/thunderbolt.c 		frame = tb_ring_poll(net->rx_ring.ring);
ring              836 drivers/net/thunderbolt.c 	tb_ring_poll_complete(net->rx_ring.ring);
ring              853 drivers/net/thunderbolt.c 	struct tb_ring *ring;
ring              857 drivers/net/thunderbolt.c 	ring = tb_ring_alloc_tx(xd->tb->nhi, -1, TBNET_RING_SIZE,
ring              859 drivers/net/thunderbolt.c 	if (!ring) {
ring              863 drivers/net/thunderbolt.c 	net->tx_ring.ring = ring;
ring              868 drivers/net/thunderbolt.c 	ring = tb_ring_alloc_rx(xd->tb->nhi, -1, TBNET_RING_SIZE,
ring              871 drivers/net/thunderbolt.c 	if (!ring) {
ring              873 drivers/net/thunderbolt.c 		tb_ring_free(net->tx_ring.ring);
ring              874 drivers/net/thunderbolt.c 		net->tx_ring.ring = NULL;
ring              877 drivers/net/thunderbolt.c 	net->rx_ring.ring = ring;
ring              894 drivers/net/thunderbolt.c 	tb_ring_free(net->rx_ring.ring);
ring              895 drivers/net/thunderbolt.c 	net->rx_ring.ring = NULL;
ring              896 drivers/net/thunderbolt.c 	tb_ring_free(net->tx_ring.ring);
ring              897 drivers/net/thunderbolt.c 	net->tx_ring.ring = NULL;
ring              906 drivers/net/thunderbolt.c 	struct device *dma_dev = tb_ring_dma_device(net->tx_ring.ring);
ring             1125 drivers/net/thunderbolt.c 		tb_ring_tx(net->tx_ring.ring, &frames[i]->frame);
ring             5339 drivers/net/usb/r8152.c 				  struct ethtool_ringparam *ring)
ring             5343 drivers/net/usb/r8152.c 	ring->rx_max_pending = RTL8152_RX_MAX_PENDING;
ring             5344 drivers/net/usb/r8152.c 	ring->rx_pending = tp->rx_pending;
ring             5348 drivers/net/usb/r8152.c 				 struct ethtool_ringparam *ring)
ring             5352 drivers/net/usb/r8152.c 	if (ring->rx_pending < (RTL8152_MAX_RX * 2))
ring             5355 drivers/net/usb/r8152.c 	if (tp->rx_pending != ring->rx_pending) {
ring             5359 drivers/net/usb/r8152.c 			tp->rx_pending = ring->rx_pending;
ring             5363 drivers/net/usb/r8152.c 			tp->rx_pending = ring->rx_pending;
ring             2021 drivers/net/virtio_net.c 				struct ethtool_ringparam *ring)
ring             2025 drivers/net/virtio_net.c 	ring->rx_max_pending = virtqueue_get_vring_size(vi->rq[0].vq);
ring             2026 drivers/net/virtio_net.c 	ring->tx_max_pending = virtqueue_get_vring_size(vi->sq[0].vq);
ring             2027 drivers/net/virtio_net.c 	ring->rx_pending = ring->rx_max_pending;
ring             2028 drivers/net/virtio_net.c 	ring->tx_pending = ring->tx_max_pending;
ring              571 drivers/net/vmxnet3/vmxnet3_drv.c 	struct vmxnet3_cmd_ring *ring = &rq->rx_ring[ring_idx];
ring              578 drivers/net/vmxnet3/vmxnet3_drv.c 		rbi = rbi_base + ring->next2fill;
ring              579 drivers/net/vmxnet3/vmxnet3_drv.c 		gd = ring->base + ring->next2fill;
ring              632 drivers/net/vmxnet3/vmxnet3_drv.c 		gd->dword[2] = cpu_to_le32((!ring->gen << VMXNET3_RXD_GEN_SHIFT)
ring              640 drivers/net/vmxnet3/vmxnet3_drv.c 		gd->dword[2] |= cpu_to_le32(ring->gen << VMXNET3_RXD_GEN_SHIFT);
ring              642 drivers/net/vmxnet3/vmxnet3_drv.c 		vmxnet3_cmd_ring_adv_next2fill(ring);
ring              647 drivers/net/vmxnet3/vmxnet3_drv.c 		num_allocated, ring->next2fill, ring->next2comp);
ring              650 drivers/net/vmxnet3/vmxnet3_drv.c 	BUG_ON(num_allocated != 0 && ring->next2fill == ring->next2comp);
ring             1303 drivers/net/vmxnet3/vmxnet3_drv.c 		struct vmxnet3_cmd_ring	*ring = NULL;
ring             1320 drivers/net/vmxnet3/vmxnet3_drv.c 		ring = rq->rx_ring + ring_idx;
ring             1543 drivers/net/vmxnet3/vmxnet3_drv.c 		ring->next2comp = idx;
ring             1544 drivers/net/vmxnet3/vmxnet3_drv.c 		num_to_alloc = vmxnet3_cmd_ring_desc_avail(ring);
ring             1545 drivers/net/vmxnet3/vmxnet3_drv.c 		ring = rq->rx_ring + ring_idx;
ring             1553 drivers/net/vmxnet3/vmxnet3_drv.c 			vmxnet3_getRxDesc(rxd, &ring->base[ring->next2fill].rxd,
ring             1558 drivers/net/vmxnet3/vmxnet3_drv.c 			rxd->gen = ring->gen;
ring             1559 drivers/net/vmxnet3/vmxnet3_drv.c 			vmxnet3_cmd_ring_adv_next2fill(ring);
ring             1567 drivers/net/vmxnet3/vmxnet3_drv.c 					       ring->next2fill);
ring              139 drivers/net/vmxnet3/vmxnet3_int.h vmxnet3_cmd_ring_adv_next2fill(struct vmxnet3_cmd_ring *ring)
ring              141 drivers/net/vmxnet3/vmxnet3_int.h 	ring->next2fill++;
ring              142 drivers/net/vmxnet3/vmxnet3_int.h 	if (unlikely(ring->next2fill == ring->size)) {
ring              143 drivers/net/vmxnet3/vmxnet3_int.h 		ring->next2fill = 0;
ring              144 drivers/net/vmxnet3/vmxnet3_int.h 		VMXNET3_FLIP_RING_GEN(ring->gen);
ring              149 drivers/net/vmxnet3/vmxnet3_int.h vmxnet3_cmd_ring_adv_next2comp(struct vmxnet3_cmd_ring *ring)
ring              151 drivers/net/vmxnet3/vmxnet3_int.h 	VMXNET3_INC_RING_IDX_ONLY(ring->next2comp, ring->size);
ring              155 drivers/net/vmxnet3/vmxnet3_int.h vmxnet3_cmd_ring_desc_avail(struct vmxnet3_cmd_ring *ring)
ring              157 drivers/net/vmxnet3/vmxnet3_int.h 	return (ring->next2comp > ring->next2fill ? 0 : ring->size) +
ring              158 drivers/net/vmxnet3/vmxnet3_int.h 		ring->next2comp - ring->next2fill - 1;
ring              171 drivers/net/vmxnet3/vmxnet3_int.h vmxnet3_comp_ring_adv_next2proc(struct vmxnet3_comp_ring *ring)
ring              173 drivers/net/vmxnet3/vmxnet3_int.h 	ring->next2proc++;
ring              174 drivers/net/vmxnet3/vmxnet3_int.h 	if (unlikely(ring->next2proc == ring->size)) {
ring              175 drivers/net/vmxnet3/vmxnet3_int.h 		ring->next2proc = 0;
ring              176 drivers/net/vmxnet3/vmxnet3_int.h 		VMXNET3_FLIP_RING_GEN(ring->gen);
ring              753 drivers/net/wireless/ath/ath10k/htt_tx.c 	struct htt_rx_ring_setup_ring32 *ring =
ring              757 drivers/net/wireless/ath/ath10k/htt_tx.c 	ring->mac80211_hdr_offset = __cpu_to_le16(desc_offset(rx_hdr_status));
ring              758 drivers/net/wireless/ath/ath10k/htt_tx.c 	ring->msdu_payload_offset = __cpu_to_le16(desc_offset(msdu_payload));
ring              759 drivers/net/wireless/ath/ath10k/htt_tx.c 	ring->ppdu_start_offset = __cpu_to_le16(desc_offset(ppdu_start));
ring              760 drivers/net/wireless/ath/ath10k/htt_tx.c 	ring->ppdu_end_offset = __cpu_to_le16(desc_offset(ppdu_end));
ring              761 drivers/net/wireless/ath/ath10k/htt_tx.c 	ring->mpdu_start_offset = __cpu_to_le16(desc_offset(mpdu_start));
ring              762 drivers/net/wireless/ath/ath10k/htt_tx.c 	ring->mpdu_end_offset = __cpu_to_le16(desc_offset(mpdu_end));
ring              763 drivers/net/wireless/ath/ath10k/htt_tx.c 	ring->msdu_start_offset = __cpu_to_le16(desc_offset(msdu_start));
ring              764 drivers/net/wireless/ath/ath10k/htt_tx.c 	ring->msdu_end_offset = __cpu_to_le16(desc_offset(msdu_end));
ring              765 drivers/net/wireless/ath/ath10k/htt_tx.c 	ring->rx_attention_offset = __cpu_to_le16(desc_offset(attention));
ring              766 drivers/net/wireless/ath/ath10k/htt_tx.c 	ring->frag_info_offset = __cpu_to_le16(desc_offset(frag_info));
ring              772 drivers/net/wireless/ath/ath10k/htt_tx.c 	struct htt_rx_ring_setup_ring64 *ring =
ring              776 drivers/net/wireless/ath/ath10k/htt_tx.c 	ring->mac80211_hdr_offset = __cpu_to_le16(desc_offset(rx_hdr_status));
ring              777 drivers/net/wireless/ath/ath10k/htt_tx.c 	ring->msdu_payload_offset = __cpu_to_le16(desc_offset(msdu_payload));
ring              778 drivers/net/wireless/ath/ath10k/htt_tx.c 	ring->ppdu_start_offset = __cpu_to_le16(desc_offset(ppdu_start));
ring              779 drivers/net/wireless/ath/ath10k/htt_tx.c 	ring->ppdu_end_offset = __cpu_to_le16(desc_offset(ppdu_end));
ring              780 drivers/net/wireless/ath/ath10k/htt_tx.c 	ring->mpdu_start_offset = __cpu_to_le16(desc_offset(mpdu_start));
ring              781 drivers/net/wireless/ath/ath10k/htt_tx.c 	ring->mpdu_end_offset = __cpu_to_le16(desc_offset(mpdu_end));
ring              782 drivers/net/wireless/ath/ath10k/htt_tx.c 	ring->msdu_start_offset = __cpu_to_le16(desc_offset(msdu_start));
ring              783 drivers/net/wireless/ath/ath10k/htt_tx.c 	ring->msdu_end_offset = __cpu_to_le16(desc_offset(msdu_end));
ring              784 drivers/net/wireless/ath/ath10k/htt_tx.c 	ring->rx_attention_offset = __cpu_to_le16(desc_offset(attention));
ring              785 drivers/net/wireless/ath/ath10k/htt_tx.c 	ring->frag_info_offset = __cpu_to_le16(desc_offset(frag_info));
ring              794 drivers/net/wireless/ath/ath10k/htt_tx.c 	struct htt_rx_ring_setup_ring32 *ring;
ring              809 drivers/net/wireless/ath/ath10k/htt_tx.c 	    + (sizeof(*ring) * num_rx_ring);
ring              817 drivers/net/wireless/ath/ath10k/htt_tx.c 	ring = &cmd->rx_setup_32.rings[0];
ring              843 drivers/net/wireless/ath/ath10k/htt_tx.c 	ring->fw_idx_shadow_reg_paddr =
ring              845 drivers/net/wireless/ath/ath10k/htt_tx.c 	ring->rx_ring_base_paddr = __cpu_to_le32(htt->rx_ring.base_paddr);
ring              846 drivers/net/wireless/ath/ath10k/htt_tx.c 	ring->rx_ring_len = __cpu_to_le16(htt->rx_ring.size);
ring              847 drivers/net/wireless/ath/ath10k/htt_tx.c 	ring->rx_ring_bufsize = __cpu_to_le16(HTT_RX_BUF_SIZE);
ring              848 drivers/net/wireless/ath/ath10k/htt_tx.c 	ring->flags = __cpu_to_le16(flags);
ring              849 drivers/net/wireless/ath/ath10k/htt_tx.c 	ring->fw_idx_init_val = __cpu_to_le16(fw_idx);
ring              851 drivers/net/wireless/ath/ath10k/htt_tx.c 	ath10k_htt_fill_rx_desc_offset_32(ring);
ring              866 drivers/net/wireless/ath/ath10k/htt_tx.c 	struct htt_rx_ring_setup_ring64 *ring;
ring              880 drivers/net/wireless/ath/ath10k/htt_tx.c 	    + (sizeof(*ring) * num_rx_ring);
ring              888 drivers/net/wireless/ath/ath10k/htt_tx.c 	ring = &cmd->rx_setup_64.rings[0];
ring              913 drivers/net/wireless/ath/ath10k/htt_tx.c 	ring->fw_idx_shadow_reg_paddr = __cpu_to_le64(htt->rx_ring.alloc_idx.paddr);
ring              914 drivers/net/wireless/ath/ath10k/htt_tx.c 	ring->rx_ring_base_paddr = __cpu_to_le64(htt->rx_ring.base_paddr);
ring              915 drivers/net/wireless/ath/ath10k/htt_tx.c 	ring->rx_ring_len = __cpu_to_le16(htt->rx_ring.size);
ring              916 drivers/net/wireless/ath/ath10k/htt_tx.c 	ring->rx_ring_bufsize = __cpu_to_le16(HTT_RX_BUF_SIZE);
ring              917 drivers/net/wireless/ath/ath10k/htt_tx.c 	ring->flags = __cpu_to_le16(flags);
ring              918 drivers/net/wireless/ath/ath10k/htt_tx.c 	ring->fw_idx_init_val = __cpu_to_le16(fw_idx);
ring              920 drivers/net/wireless/ath/ath10k/htt_tx.c 	ath10k_htt_fill_rx_desc_offset_64(ring);
ring              935 drivers/net/wireless/ath/ath10k/htt_tx.c 	struct htt_rx_ring_setup_ring32 *ring;
ring              949 drivers/net/wireless/ath/ath10k/htt_tx.c 	    + (sizeof(*ring) * num_rx_ring);
ring              957 drivers/net/wireless/ath/ath10k/htt_tx.c 	ring = &cmd->rx_setup_32.rings[0];
ring              967 drivers/net/wireless/ath/ath10k/htt_tx.c 	memset(ring, 0, sizeof(*ring));
ring              968 drivers/net/wireless/ath/ath10k/htt_tx.c 	ring->rx_ring_len = __cpu_to_le16(HTT_RX_RING_SIZE_MIN);
ring              969 drivers/net/wireless/ath/ath10k/htt_tx.c 	ring->rx_ring_bufsize = __cpu_to_le16(HTT_RX_BUF_SIZE);
ring              970 drivers/net/wireless/ath/ath10k/htt_tx.c 	ring->flags = __cpu_to_le16(flags);
ring              585 drivers/net/wireless/ath/carl9170/debug.c 		ar->debug.ring[ar->debug.ring_tail].reg = reg + (i << 2);
ring              586 drivers/net/wireless/ath/carl9170/debug.c 		ar->debug.ring[ar->debug.ring_tail].value = tmp;
ring              602 drivers/net/wireless/ath/carl9170/debug.c 		    ar->debug.ring[ar->debug.ring_head].reg,
ring              603 drivers/net/wireless/ath/carl9170/debug.c 		    ar->debug.ring[ar->debug.ring_head].value);
ring              124 drivers/net/wireless/ath/carl9170/debug.h 	struct carl9170_debug_mem_rbe ring[CARL9170_DEBUG_RING_SIZE];
ring               54 drivers/net/wireless/ath/wil6210/debugfs.c 				struct wil_ring *ring,
ring               60 drivers/net/wireless/ath/wil6210/debugfs.c 	if (ring->is_rx) {
ring               63 drivers/net/wireless/ath/wil6210/debugfs.c 			&ring->va[idx].rx.enhanced;
ring               73 drivers/net/wireless/ath/wil6210/debugfs.c 			&ring->va[idx].tx.enhanced;
ring               76 drivers/net/wireless/ath/wil6210/debugfs.c 		has_skb = ring->ctx && ring->ctx[idx].skb;
ring               86 drivers/net/wireless/ath/wil6210/debugfs.c 			   const char *name, struct wil_ring *ring,
ring               93 drivers/net/wireless/ath/wil6210/debugfs.c 	seq_printf(s, "  pa     = %pad\n", &ring->pa);
ring               94 drivers/net/wireless/ath/wil6210/debugfs.c 	seq_printf(s, "  va     = 0x%p\n", ring->va);
ring               95 drivers/net/wireless/ath/wil6210/debugfs.c 	seq_printf(s, "  size   = %d\n", ring->size);
ring               96 drivers/net/wireless/ath/wil6210/debugfs.c 	if (wil->use_enhanced_dma_hw && ring->is_rx)
ring               97 drivers/net/wireless/ath/wil6210/debugfs.c 		seq_printf(s, "  swtail = %u\n", *ring->edma_rx_swtail.va);
ring               99 drivers/net/wireless/ath/wil6210/debugfs.c 		seq_printf(s, "  swtail = %d\n", ring->swtail);
ring              100 drivers/net/wireless/ath/wil6210/debugfs.c 	seq_printf(s, "  swhead = %d\n", ring->swhead);
ring              102 drivers/net/wireless/ath/wil6210/debugfs.c 		int ring_id = ring->is_rx ?
ring              103 drivers/net/wireless/ath/wil6210/debugfs.c 			WIL_RX_DESC_RING_ID : ring - wil->ring_tx;
ring              114 drivers/net/wireless/ath/wil6210/debugfs.c 	seq_printf(s, "  hwtail = [0x%08x] -> ", ring->hwtail);
ring              115 drivers/net/wireless/ath/wil6210/debugfs.c 	x = wmi_addr(wil, ring->hwtail);
ring              123 drivers/net/wireless/ath/wil6210/debugfs.c 	if (ring->va && (ring->size <= (1 << WIL_RING_SIZE_ORDER_MAX))) {
ring              126 drivers/net/wireless/ath/wil6210/debugfs.c 		for (i = 0; i < ring->size; i++) {
ring              130 drivers/net/wireless/ath/wil6210/debugfs.c 				wil_print_desc_edma(s, wil, ring, _s, _h, i);
ring              133 drivers/net/wireless/ath/wil6210/debugfs.c 					&ring->va[i].tx.legacy;
ring              135 drivers/net/wireless/ath/wil6210/debugfs.c 					   _s : (ring->ctx[i].skb ? _h : 'h'));
ring              151 drivers/net/wireless/ath/wil6210/debugfs.c 		struct wil_ring *ring = &wil->ring_tx[i];
ring              154 drivers/net/wireless/ath/wil6210/debugfs.c 		if (ring->va) {
ring              157 drivers/net/wireless/ath/wil6210/debugfs.c 			u32 swhead = ring->swhead;
ring              158 drivers/net/wireless/ath/wil6210/debugfs.c 			u32 swtail = ring->swtail;
ring              159 drivers/net/wireless/ath/wil6210/debugfs.c 			int used = (ring->size + swhead - swtail)
ring              160 drivers/net/wireless/ath/wil6210/debugfs.c 				   % ring->size;
ring              161 drivers/net/wireless/ath/wil6210/debugfs.c 			int avail = ring->size - used - 1;
ring              196 drivers/net/wireless/ath/wil6210/debugfs.c 			wil_print_ring(s, wil, name, ring, '_', 'H');
ring              202 drivers/net/wireless/ath/wil6210/debugfs.c DEFINE_SHOW_ATTRIBUTE(ring);
ring             1082 drivers/net/wireless/ath/wil6210/debugfs.c 	struct wil_ring *ring;
ring             1106 drivers/net/wireless/ath/wil6210/debugfs.c 	ring = tx ? &wil->ring_tx[ring_idx] : &wil->ring_rx;
ring             1108 drivers/net/wireless/ath/wil6210/debugfs.c 	if (!ring->va) {
ring             1116 drivers/net/wireless/ath/wil6210/debugfs.c 	if (txdesc_idx >= ring->size) {
ring             1119 drivers/net/wireless/ath/wil6210/debugfs.c 				   ring_idx, txdesc_idx, ring->size);
ring             1122 drivers/net/wireless/ath/wil6210/debugfs.c 				   txdesc_idx, ring->size);
ring             1129 drivers/net/wireless/ath/wil6210/debugfs.c 	d = &ring->va[txdesc_idx].tx.legacy;
ring             1135 drivers/net/wireless/ath/wil6210/debugfs.c 			skb = ring->ctx ? ring->ctx[txdesc_idx].skb : NULL;
ring             1139 drivers/net/wireless/ath/wil6210/debugfs.c 				&ring->va[txdesc_idx].rx.enhanced;
ring             1149 drivers/net/wireless/ath/wil6210/debugfs.c 		skb = ring->ctx[txdesc_idx].skb;
ring              211 drivers/net/wireless/ath/wil6210/main.c 	struct wil_ring *ring = &wil->ring_tx[id];
ring              216 drivers/net/wireless/ath/wil6210/main.c 	if (!ring->va)
ring              237 drivers/net/wireless/ath/wil6210/main.c 	wil->txrx_ops.ring_fini_tx(wil, ring);
ring              153 drivers/net/wireless/ath/wil6210/netdev.c 		struct wil_ring *ring = &wil->ring_tx[i];
ring              157 drivers/net/wireless/ath/wil6210/netdev.c 		if (!ring->va || !txdata->enabled ||
ring               50 drivers/net/wireless/ath/wil6210/txrx.c static inline int wil_ring_wmark_low(struct wil_ring *ring)
ring               52 drivers/net/wireless/ath/wil6210/txrx.c 	return ring->size / 8;
ring               56 drivers/net/wireless/ath/wil6210/txrx.c static inline int wil_ring_wmark_high(struct wil_ring *ring)
ring               58 drivers/net/wireless/ath/wil6210/txrx.c 	return ring->size / 4;
ring               62 drivers/net/wireless/ath/wil6210/txrx.c static inline int wil_ring_avail_low(struct wil_ring *ring)
ring               64 drivers/net/wireless/ath/wil6210/txrx.c 	return wil_ring_avail_tx(ring) < wil_ring_wmark_low(ring);
ring               68 drivers/net/wireless/ath/wil6210/txrx.c static inline int wil_ring_avail_high(struct wil_ring *ring)
ring               70 drivers/net/wireless/ath/wil6210/txrx.c 	return wil_ring_avail_tx(ring) > wil_ring_wmark_high(ring);
ring              366 drivers/net/wireless/ath/wil6210/txrx.c 	struct wil_ring *ring = &wil->ring_rx;
ring              368 drivers/net/wireless/ath/wil6210/txrx.c 	_d = (struct vring_rx_desc *)&ring->va[ring->swhead].rx.legacy;
ring             1471 drivers/net/wireless/ath/wil6210/txrx.c 		       struct wil_ring *ring, struct sk_buff *skb);
ring             1477 drivers/net/wireless/ath/wil6210/txrx.c 	struct wil_ring *ring;
ring             1488 drivers/net/wireless/ath/wil6210/txrx.c 		ring = &wil->ring_tx[i];
ring             1490 drivers/net/wireless/ath/wil6210/txrx.c 		if (!ring->va || !txdata->enabled || txdata->mid != vif->mid)
ring             1503 drivers/net/wireless/ath/wil6210/txrx.c 		return ring;
ring             2034 drivers/net/wireless/ath/wil6210/txrx.c 			 struct wil_ring *ring, struct sk_buff *skb)
ring             2039 drivers/net/wireless/ath/wil6210/txrx.c 	u32 swhead = ring->swhead;
ring             2040 drivers/net/wireless/ath/wil6210/txrx.c 	int avail = wil_ring_avail_tx(ring);
ring             2043 drivers/net/wireless/ath/wil6210/txrx.c 	int ring_index = ring - wil->ring_tx;
ring             2063 drivers/net/wireless/ath/wil6210/txrx.c 	_d = &ring->va[i].tx.legacy;
ring             2074 drivers/net/wireless/ath/wil6210/txrx.c 	ring->ctx[i].mapped_as = wil_mapped_as_single;
ring             2090 drivers/net/wireless/ath/wil6210/txrx.c 	ring->ctx[i].nr_frags = nr_frags;
ring             2102 drivers/net/wireless/ath/wil6210/txrx.c 		i = (swhead + f + 1) % ring->size;
ring             2103 drivers/net/wireless/ath/wil6210/txrx.c 		_d = &ring->va[i].tx.legacy;
ring             2111 drivers/net/wireless/ath/wil6210/txrx.c 		ring->ctx[i].mapped_as = wil_mapped_as_page;
ring             2133 drivers/net/wireless/ath/wil6210/txrx.c 	ring->ctx[i].skb = skb_get(skb);
ring             2136 drivers/net/wireless/ath/wil6210/txrx.c 	used = wil_ring_used_tx(ring);
ring             2152 drivers/net/wireless/ath/wil6210/txrx.c 	wil_ring_advance_head(ring, nr_frags + 1);
ring             2154 drivers/net/wireless/ath/wil6210/txrx.c 		     ring->swhead);
ring             2167 drivers/net/wireless/ath/wil6210/txrx.c 	wil_w(wil, ring->hwtail, ring->swhead);
ring             2176 drivers/net/wireless/ath/wil6210/txrx.c 		i = (swhead + f) % ring->size;
ring             2177 drivers/net/wireless/ath/wil6210/txrx.c 		ctx = &ring->ctx[i];
ring             2178 drivers/net/wireless/ath/wil6210/txrx.c 		_d = &ring->va[i].tx.legacy;
ring             2192 drivers/net/wireless/ath/wil6210/txrx.c 		       struct wil_ring *ring, struct sk_buff *skb)
ring             2194 drivers/net/wireless/ath/wil6210/txrx.c 	int ring_index = ring - wil->ring_tx;
ring             2210 drivers/net/wireless/ath/wil6210/txrx.c 	     (wil, vif, ring, skb);
ring             2236 drivers/net/wireless/ath/wil6210/txrx.c 					   struct wil_ring *ring,
ring             2245 drivers/net/wireless/ath/wil6210/txrx.c 	if (ring)
ring             2247 drivers/net/wireless/ath/wil6210/txrx.c 			     (int)(ring - wil->ring_tx), vif->mid, check_stop,
ring             2253 drivers/net/wireless/ath/wil6210/txrx.c 	if (ring && drop_if_ring_full)
ring             2262 drivers/net/wireless/ath/wil6210/txrx.c 		if (!ring || unlikely(wil_ring_avail_low(ring))) {
ring             2282 drivers/net/wireless/ath/wil6210/txrx.c 		    !txdata->enabled || cur_ring == ring)
ring             2292 drivers/net/wireless/ath/wil6210/txrx.c 	if (!ring || wil_ring_avail_high(ring)) {
ring             2301 drivers/net/wireless/ath/wil6210/txrx.c 			   struct wil_ring *ring, bool check_stop)
ring             2304 drivers/net/wireless/ath/wil6210/txrx.c 	__wil_update_net_queues(wil, vif, ring, check_stop);
ring             2309 drivers/net/wireless/ath/wil6210/txrx.c 			      struct wil_ring *ring, bool check_stop)
ring             2312 drivers/net/wireless/ath/wil6210/txrx.c 	__wil_update_net_queues(wil, vif, ring, check_stop);
ring             2322 drivers/net/wireless/ath/wil6210/txrx.c 	struct wil_ring *ring;
ring             2348 drivers/net/wireless/ath/wil6210/txrx.c 		ring = wil_find_tx_ring_sta(wil, vif, skb);
ring             2354 drivers/net/wireless/ath/wil6210/txrx.c 			ring = wil_find_tx_bcast_2(wil, vif, skb);
ring             2357 drivers/net/wireless/ath/wil6210/txrx.c 			ring = wil_find_tx_bcast_1(wil, vif, skb);
ring             2362 drivers/net/wireless/ath/wil6210/txrx.c 			ring = wil_find_tx_bcast_2(wil, vif, skb);
ring             2365 drivers/net/wireless/ath/wil6210/txrx.c 		ring = wil_find_tx_ucast(wil, vif, skb);
ring             2367 drivers/net/wireless/ath/wil6210/txrx.c 	if (unlikely(!ring)) {
ring             2372 drivers/net/wireless/ath/wil6210/txrx.c 	rc = wil_tx_ring(wil, vif, ring, skb);
ring             2377 drivers/net/wireless/ath/wil6210/txrx.c 		wil_update_net_queues_bh(wil, vif, ring, true);
ring              590 drivers/net/wireless/ath/wil6210/txrx.h static inline int wil_ring_is_empty(struct wil_ring *ring)
ring              592 drivers/net/wireless/ath/wil6210/txrx.h 	return ring->swhead == ring->swtail;
ring              595 drivers/net/wireless/ath/wil6210/txrx.h static inline u32 wil_ring_next_tail(struct wil_ring *ring)
ring              597 drivers/net/wireless/ath/wil6210/txrx.h 	return (ring->swtail + 1) % ring->size;
ring              600 drivers/net/wireless/ath/wil6210/txrx.h static inline void wil_ring_advance_head(struct wil_ring *ring, int n)
ring              602 drivers/net/wireless/ath/wil6210/txrx.h 	ring->swhead = (ring->swhead + n) % ring->size;
ring              605 drivers/net/wireless/ath/wil6210/txrx.h static inline int wil_ring_is_full(struct wil_ring *ring)
ring              607 drivers/net/wireless/ath/wil6210/txrx.h 	return wil_ring_next_tail(ring) == ring->swhead;
ring              641 drivers/net/wireless/ath/wil6210/txrx.h static inline int wil_ring_used_tx(struct wil_ring *ring)
ring              643 drivers/net/wireless/ath/wil6210/txrx.h 	u32 swhead = ring->swhead;
ring              644 drivers/net/wireless/ath/wil6210/txrx.h 	u32 swtail = ring->swtail;
ring              646 drivers/net/wireless/ath/wil6210/txrx.h 	return (ring->size + swhead - swtail) % ring->size;
ring              650 drivers/net/wireless/ath/wil6210/txrx.h static inline int wil_ring_avail_tx(struct wil_ring *ring)
ring              652 drivers/net/wireless/ath/wil6210/txrx.h 	return ring->size - wil_ring_used_tx(ring) - 1;
ring              165 drivers/net/wireless/ath/wil6210/txrx_edma.c 				   struct wil_ring *ring, u32 i)
ring              178 drivers/net/wireless/ath/wil6210/txrx_edma.c 		&ring->va[i].rx.enhanced;
ring              246 drivers/net/wireless/ath/wil6210/txrx_edma.c 	struct wil_ring *ring = &wil->ring_rx;
ring              249 drivers/net/wireless/ath/wil6210/txrx_edma.c 	ring->swtail = *ring->edma_rx_swtail.va;
ring              251 drivers/net/wireless/ath/wil6210/txrx_edma.c 	for (; next_head = wil_ring_next_head(ring),
ring              252 drivers/net/wireless/ath/wil6210/txrx_edma.c 	     (next_head != ring->swtail);
ring              253 drivers/net/wireless/ath/wil6210/txrx_edma.c 	     ring->swhead = next_head) {
ring              254 drivers/net/wireless/ath/wil6210/txrx_edma.c 		rc = wil_ring_alloc_skb_edma(wil, ring, ring->swhead);
ring              261 drivers/net/wireless/ath/wil6210/txrx_edma.c 						    rc, ring->swhead);
ring              271 drivers/net/wireless/ath/wil6210/txrx_edma.c 	wil_w(wil, ring->hwtail, ring->swhead);
ring              277 drivers/net/wireless/ath/wil6210/txrx_edma.c 					      struct wil_ring *ring)
ring              308 drivers/net/wireless/ath/wil6210/txrx_edma.c 	struct wil_ring *ring = &wil->ring_rx;
ring              316 drivers/net/wireless/ath/wil6210/txrx_edma.c 	wil_move_all_rx_buff_to_free_list(wil, ring);
ring              387 drivers/net/wireless/ath/wil6210/txrx_edma.c 				    struct wil_ring *ring)
ring              390 drivers/net/wireless/ath/wil6210/txrx_edma.c 	size_t sz = ring->size * sizeof(ring->va[0]);
ring              394 drivers/net/wireless/ath/wil6210/txrx_edma.c 	BUILD_BUG_ON(sizeof(ring->va[0]) != 32);
ring              396 drivers/net/wireless/ath/wil6210/txrx_edma.c 	ring->swhead = 0;
ring              397 drivers/net/wireless/ath/wil6210/txrx_edma.c 	ring->swtail = 0;
ring              398 drivers/net/wireless/ath/wil6210/txrx_edma.c 	ring->ctx = kcalloc(ring->size, sizeof(ring->ctx[0]), GFP_KERNEL);
ring              399 drivers/net/wireless/ath/wil6210/txrx_edma.c 	if (!ring->ctx)
ring              402 drivers/net/wireless/ath/wil6210/txrx_edma.c 	ring->va = dma_alloc_coherent(dev, sz, &ring->pa, GFP_KERNEL);
ring              403 drivers/net/wireless/ath/wil6210/txrx_edma.c 	if (!ring->va)
ring              406 drivers/net/wireless/ath/wil6210/txrx_edma.c 	if (ring->is_rx) {
ring              407 drivers/net/wireless/ath/wil6210/txrx_edma.c 		sz = sizeof(*ring->edma_rx_swtail.va);
ring              408 drivers/net/wireless/ath/wil6210/txrx_edma.c 		ring->edma_rx_swtail.va =
ring              409 drivers/net/wireless/ath/wil6210/txrx_edma.c 			dma_alloc_coherent(dev, sz, &ring->edma_rx_swtail.pa,
ring              411 drivers/net/wireless/ath/wil6210/txrx_edma.c 		if (!ring->edma_rx_swtail.va)
ring              416 drivers/net/wireless/ath/wil6210/txrx_edma.c 		     ring->is_rx ? "RX" : "TX",
ring              417 drivers/net/wireless/ath/wil6210/txrx_edma.c 		     ring->size, ring->va, &ring->pa, ring->ctx);
ring              421 drivers/net/wireless/ath/wil6210/txrx_edma.c 	dma_free_coherent(dev, ring->size * sizeof(ring->va[0]),
ring              422 drivers/net/wireless/ath/wil6210/txrx_edma.c 			  (void *)ring->va, ring->pa);
ring              423 drivers/net/wireless/ath/wil6210/txrx_edma.c 	ring->va = NULL;
ring              425 drivers/net/wireless/ath/wil6210/txrx_edma.c 	kfree(ring->ctx);
ring              426 drivers/net/wireless/ath/wil6210/txrx_edma.c 	ring->ctx = NULL;
ring              431 drivers/net/wireless/ath/wil6210/txrx_edma.c static void wil_ring_free_edma(struct wil6210_priv *wil, struct wil_ring *ring)
ring              437 drivers/net/wireless/ath/wil6210/txrx_edma.c 	if (!ring->va)
ring              440 drivers/net/wireless/ath/wil6210/txrx_edma.c 	sz = ring->size * sizeof(ring->va[0]);
ring              443 drivers/net/wireless/ath/wil6210/txrx_edma.c 	if (ring->is_rx) {
ring              445 drivers/net/wireless/ath/wil6210/txrx_edma.c 			     ring->size, ring->va,
ring              446 drivers/net/wireless/ath/wil6210/txrx_edma.c 			     &ring->pa, ring->ctx);
ring              448 drivers/net/wireless/ath/wil6210/txrx_edma.c 		wil_move_all_rx_buff_to_free_list(wil, ring);
ring              449 drivers/net/wireless/ath/wil6210/txrx_edma.c 		dma_free_coherent(dev, sizeof(*ring->edma_rx_swtail.va),
ring              450 drivers/net/wireless/ath/wil6210/txrx_edma.c 				  ring->edma_rx_swtail.va,
ring              451 drivers/net/wireless/ath/wil6210/txrx_edma.c 				  ring->edma_rx_swtail.pa);
ring              456 drivers/net/wireless/ath/wil6210/txrx_edma.c 	ring_index = ring - wil->ring_tx;
ring              459 drivers/net/wireless/ath/wil6210/txrx_edma.c 		     ring_index, ring->size, ring->va,
ring              460 drivers/net/wireless/ath/wil6210/txrx_edma.c 		     &ring->pa, ring->ctx);
ring              462 drivers/net/wireless/ath/wil6210/txrx_edma.c 	while (!wil_ring_is_empty(ring)) {
ring              468 drivers/net/wireless/ath/wil6210/txrx_edma.c 			&ring->va[ring->swtail].tx.enhanced;
ring              470 drivers/net/wireless/ath/wil6210/txrx_edma.c 		ctx = &ring->ctx[ring->swtail];
ring              474 drivers/net/wireless/ath/wil6210/txrx_edma.c 				     ring->swtail);
ring              475 drivers/net/wireless/ath/wil6210/txrx_edma.c 			ring->swtail = wil_ring_next_tail(ring);
ring              482 drivers/net/wireless/ath/wil6210/txrx_edma.c 		ring->swtail = wil_ring_next_tail(ring);
ring              486 drivers/net/wireless/ath/wil6210/txrx_edma.c 	dma_free_coherent(dev, sz, (void *)ring->va, ring->pa);
ring              487 drivers/net/wireless/ath/wil6210/txrx_edma.c 	kfree(ring->ctx);
ring              488 drivers/net/wireless/ath/wil6210/txrx_edma.c 	ring->pa = 0;
ring              489 drivers/net/wireless/ath/wil6210/txrx_edma.c 	ring->va = NULL;
ring              490 drivers/net/wireless/ath/wil6210/txrx_edma.c 	ring->ctx = NULL;
ring              496 drivers/net/wireless/ath/wil6210/txrx_edma.c 	struct wil_ring *ring = &wil->ring_rx;
ring              501 drivers/net/wireless/ath/wil6210/txrx_edma.c 	ring->size = desc_ring_size;
ring              502 drivers/net/wireless/ath/wil6210/txrx_edma.c 	ring->is_rx = true;
ring              503 drivers/net/wireless/ath/wil6210/txrx_edma.c 	rc = wil_ring_alloc_desc_ring(wil, ring);
ring              513 drivers/net/wireless/ath/wil6210/txrx_edma.c 	wil_ring_free_edma(wil, ring);
ring              617 drivers/net/wireless/ath/wil6210/txrx_edma.c 	struct wil_ring *ring = &wil->ring_rx;
ring              701 drivers/net/wireless/ath/wil6210/txrx_edma.c 	wil_ring_free_edma(wil, ring);
ring              714 drivers/net/wireless/ath/wil6210/txrx_edma.c 	struct wil_ring *ring = &wil->ring_tx[ring_id];
ring              724 drivers/net/wireless/ath/wil6210/txrx_edma.c 	ring->size = size;
ring              725 drivers/net/wireless/ath/wil6210/txrx_edma.c 	rc = wil_ring_alloc_desc_ring(wil, ring);
ring              749 drivers/net/wireless/ath/wil6210/txrx_edma.c 	wil_ring_free_edma(wil, ring);
ring             1070 drivers/net/wireless/ath/wil6210/txrx_edma.c 	struct wil_ring *ring = &wil->ring_rx;
ring             1075 drivers/net/wireless/ath/wil6210/txrx_edma.c 	if (unlikely(!ring->va)) {
ring             1165 drivers/net/wireless/ath/wil6210/txrx_edma.c 	struct wil_ring *ring = NULL;
ring             1198 drivers/net/wireless/ath/wil6210/txrx_edma.c 		ring = &wil->ring_tx[ring_id];
ring             1199 drivers/net/wireless/ath/wil6210/txrx_edma.c 		if (unlikely(!ring->va)) {
ring             1226 drivers/net/wireless/ath/wil6210/txrx_edma.c 		used_before_complete = wil_ring_used_tx(ring);
ring             1229 drivers/net/wireless/ath/wil6210/txrx_edma.c 			struct wil_ctx *ctx = &ring->ctx[ring->swtail];
ring             1235 drivers/net/wireless/ath/wil6210/txrx_edma.c 				&ring->va[ring->swtail].tx.enhanced;
ring             1239 drivers/net/wireless/ath/wil6210/txrx_edma.c 			trace_wil6210_tx_status(&msg, ring->swtail, dmalen);
ring             1242 drivers/net/wireless/ath/wil6210/txrx_edma.c 				     ring_id, ring->swtail, dmalen,
ring             1282 drivers/net/wireless/ath/wil6210/txrx_edma.c 			ring->swtail = wil_ring_next_tail(ring);
ring             1288 drivers/net/wireless/ath/wil6210/txrx_edma.c 		used_new = wil_ring_used_tx(ring);
ring             1356 drivers/net/wireless/ath/wil6210/txrx_edma.c 			       skb_frag_t *frag, struct wil_ring *ring,
ring             1363 drivers/net/wireless/ath/wil6210/txrx_edma.c 		&ring->va[i].tx.enhanced;
ring             1365 drivers/net/wireless/ath/wil6210/txrx_edma.c 	int ring_index = ring - wil->ring_tx;
ring             1373 drivers/net/wireless/ath/wil6210/txrx_edma.c 		ring->ctx[i].mapped_as = wil_mapped_as_single;
ring             1376 drivers/net/wireless/ath/wil6210/txrx_edma.c 		ring->ctx[i].mapped_as = wil_mapped_as_page;
ring             1394 drivers/net/wireless/ath/wil6210/txrx_edma.c 		ring->ctx[i].skb = skb_get(skb);
ring             1407 drivers/net/wireless/ath/wil6210/txrx_edma.c 				  struct wil_ring *ring,
ring             1410 drivers/net/wireless/ath/wil6210/txrx_edma.c 	int ring_index = ring - wil->ring_tx;
ring             1414 drivers/net/wireless/ath/wil6210/txrx_edma.c 	int used, avail = wil_ring_avail_tx(ring);
ring             1418 drivers/net/wireless/ath/wil6210/txrx_edma.c 	u32 swhead = ring->swhead;
ring             1466 drivers/net/wireless/ath/wil6210/txrx_edma.c 				 wil_tso_type_hdr, NULL, ring, skb,
ring             1476 drivers/net/wireless/ath/wil6210/txrx_edma.c 				 (swhead + descs_used) % ring->size,
ring             1478 drivers/net/wireless/ath/wil6210/txrx_edma.c 				 wil_tso_type_lst, NULL, ring, skb,
ring             1493 drivers/net/wireless/ath/wil6210/txrx_edma.c 					 (swhead + descs_used) % ring->size,
ring             1496 drivers/net/wireless/ath/wil6210/txrx_edma.c 					 frag, ring, skb, is_ipv4,
ring             1504 drivers/net/wireless/ath/wil6210/txrx_edma.c 	used = wil_ring_used_tx(ring);
ring             1513 drivers/net/wireless/ath/wil6210/txrx_edma.c 	wil_ring_advance_head(ring, descs_used);
ring             1514 drivers/net/wireless/ath/wil6210/txrx_edma.c 	wil_dbg_txrx(wil, "TSO: Tx swhead %d -> %d\n", swhead, ring->swhead);
ring             1526 drivers/net/wireless/ath/wil6210/txrx_edma.c 	wil_w(wil, ring->hwtail, ring->swhead);
ring             1534 drivers/net/wireless/ath/wil6210/txrx_edma.c 		int i = (swhead + descs_used - 1) % ring->size;
ring             1538 drivers/net/wireless/ath/wil6210/txrx_edma.c 			&ring->va[i].tx.enhanced;
ring             1541 drivers/net/wireless/ath/wil6210/txrx_edma.c 		ctx = &ring->ctx[i];
ring             1553 drivers/net/wireless/ath/wil6210/txrx_edma.c 	struct wil_ring *ring = &wil->ring_tx[ring_id];
ring             1563 drivers/net/wireless/ath/wil6210/txrx_edma.c 	ring->size = size;
ring             1564 drivers/net/wireless/ath/wil6210/txrx_edma.c 	ring->is_rx = false;
ring             1565 drivers/net/wireless/ath/wil6210/txrx_edma.c 	rc = wil_ring_alloc_desc_ring(wil, ring);
ring             1585 drivers/net/wireless/ath/wil6210/txrx_edma.c 	wil_ring_free_edma(wil, ring);
ring             1611 drivers/net/wireless/ath/wil6210/txrx_edma.c 	struct wil_ring *ring = &wil->ring_rx;
ring             1616 drivers/net/wireless/ath/wil6210/txrx_edma.c 	wil_ring_free_edma(wil, ring);
ring              576 drivers/net/wireless/ath/wil6210/txrx_edma.h static inline u32 wil_ring_next_head(struct wil_ring *ring)
ring              578 drivers/net/wireless/ath/wil6210/txrx_edma.h 	return (ring->swhead + 1) % ring->size;
ring              607 drivers/net/wireless/ath/wil6210/wil6210.h 	void (*ring_fini_tx)(struct wil6210_priv *wil, struct wil_ring *ring);
ring              617 drivers/net/wireless/ath/wil6210/wil6210.h 			   struct wil_ring *ring, struct sk_buff *skb);
ring             1378 drivers/net/wireless/ath/wil6210/wil6210.h 			   struct wil_ring *ring, bool should_stop);
ring             1380 drivers/net/wireless/ath/wil6210/wil6210.h 			      struct wil_ring *ring, bool check_stop);
ring             1595 drivers/net/wireless/ath/wil6210/wmi.c 	struct wil_ring *ring;
ring             1612 drivers/net/wireless/ath/wil6210/wmi.c 		ring = &wil->ring_tx[i];
ring             1614 drivers/net/wireless/ath/wil6210/wmi.c 		if (!ring->va || !txdata->enabled || txdata->mid != vif->mid)
ring             3824 drivers/net/wireless/ath/wil6210/wmi.c 	struct wil_ring *ring = &wil->ring_rx;
ring             3828 drivers/net/wireless/ath/wil6210/wmi.c 			.ring_size = cpu_to_le16(ring->size),
ring             3841 drivers/net/wireless/ath/wil6210/wmi.c 	cmd.ring_cfg.ring_mem_base = cpu_to_le64(ring->pa);
ring             3842 drivers/net/wireless/ath/wil6210/wmi.c 	cmd.sw_tail_host_addr = cpu_to_le64(ring->edma_rx_swtail.pa);
ring             3857 drivers/net/wireless/ath/wil6210/wmi.c 	ring->hwtail = le32_to_cpu(reply.evt.ring_tail_ptr);
ring             3868 drivers/net/wireless/ath/wil6210/wmi.c 	struct wil_ring *ring = &wil->ring_tx[ring_id];
ring             3872 drivers/net/wireless/ath/wil6210/wmi.c 			.ring_size = cpu_to_le16(ring->size),
ring             3892 drivers/net/wireless/ath/wil6210/wmi.c 	cmd.ring_cfg.ring_mem_base = cpu_to_le64(ring->pa);
ring             3908 drivers/net/wireless/ath/wil6210/wmi.c 	ring->hwtail = le32_to_cpu(reply.evt.ring_tail_ptr);
ring             3919 drivers/net/wireless/ath/wil6210/wmi.c 	struct wil_ring *ring = &wil->ring_tx[ring_id];
ring             3923 drivers/net/wireless/ath/wil6210/wmi.c 			.ring_size = cpu_to_le16(ring->size),
ring             3938 drivers/net/wireless/ath/wil6210/wmi.c 	cmd.ring_cfg.ring_mem_base = cpu_to_le64(ring->pa);
ring             3954 drivers/net/wireless/ath/wil6210/wmi.c 	ring->hwtail = le32_to_cpu(reply.evt.ring_tail_ptr);
ring               72 drivers/net/wireless/broadcom/b43/dma.c struct b43_dmadesc_generic *op32_idx2desc(struct b43_dmaring *ring,
ring               78 drivers/net/wireless/broadcom/b43/dma.c 	*meta = &(ring->meta[slot]);
ring               79 drivers/net/wireless/broadcom/b43/dma.c 	desc = ring->descbase;
ring               85 drivers/net/wireless/broadcom/b43/dma.c static void op32_fill_descriptor(struct b43_dmaring *ring,
ring               90 drivers/net/wireless/broadcom/b43/dma.c 	struct b43_dmadesc32 *descbase = ring->descbase;
ring               97 drivers/net/wireless/broadcom/b43/dma.c 	B43_WARN_ON(!(slot >= 0 && slot < ring->nr_slots));
ring               99 drivers/net/wireless/broadcom/b43/dma.c 	addr = b43_dma_address(&ring->dev->dma, dmaaddr, B43_DMA_ADDR_LOW);
ring              100 drivers/net/wireless/broadcom/b43/dma.c 	addrext = b43_dma_address(&ring->dev->dma, dmaaddr, B43_DMA_ADDR_EXT);
ring              103 drivers/net/wireless/broadcom/b43/dma.c 	if (slot == ring->nr_slots - 1)
ring              118 drivers/net/wireless/broadcom/b43/dma.c static void op32_poke_tx(struct b43_dmaring *ring, int slot)
ring              120 drivers/net/wireless/broadcom/b43/dma.c 	b43_dma_write(ring, B43_DMA32_TXINDEX,
ring              124 drivers/net/wireless/broadcom/b43/dma.c static void op32_tx_suspend(struct b43_dmaring *ring)
ring              126 drivers/net/wireless/broadcom/b43/dma.c 	b43_dma_write(ring, B43_DMA32_TXCTL, b43_dma_read(ring, B43_DMA32_TXCTL)
ring              130 drivers/net/wireless/broadcom/b43/dma.c static void op32_tx_resume(struct b43_dmaring *ring)
ring              132 drivers/net/wireless/broadcom/b43/dma.c 	b43_dma_write(ring, B43_DMA32_TXCTL, b43_dma_read(ring, B43_DMA32_TXCTL)
ring              136 drivers/net/wireless/broadcom/b43/dma.c static int op32_get_current_rxslot(struct b43_dmaring *ring)
ring              140 drivers/net/wireless/broadcom/b43/dma.c 	val = b43_dma_read(ring, B43_DMA32_RXSTATUS);
ring              146 drivers/net/wireless/broadcom/b43/dma.c static void op32_set_current_rxslot(struct b43_dmaring *ring, int slot)
ring              148 drivers/net/wireless/broadcom/b43/dma.c 	b43_dma_write(ring, B43_DMA32_RXINDEX,
ring              164 drivers/net/wireless/broadcom/b43/dma.c struct b43_dmadesc_generic *op64_idx2desc(struct b43_dmaring *ring,
ring              170 drivers/net/wireless/broadcom/b43/dma.c 	*meta = &(ring->meta[slot]);
ring              171 drivers/net/wireless/broadcom/b43/dma.c 	desc = ring->descbase;
ring              177 drivers/net/wireless/broadcom/b43/dma.c static void op64_fill_descriptor(struct b43_dmaring *ring,
ring              182 drivers/net/wireless/broadcom/b43/dma.c 	struct b43_dmadesc64 *descbase = ring->descbase;
ring              189 drivers/net/wireless/broadcom/b43/dma.c 	B43_WARN_ON(!(slot >= 0 && slot < ring->nr_slots));
ring              191 drivers/net/wireless/broadcom/b43/dma.c 	addrlo = b43_dma_address(&ring->dev->dma, dmaaddr, B43_DMA_ADDR_LOW);
ring              192 drivers/net/wireless/broadcom/b43/dma.c 	addrhi = b43_dma_address(&ring->dev->dma, dmaaddr, B43_DMA_ADDR_HIGH);
ring              193 drivers/net/wireless/broadcom/b43/dma.c 	addrext = b43_dma_address(&ring->dev->dma, dmaaddr, B43_DMA_ADDR_EXT);
ring              195 drivers/net/wireless/broadcom/b43/dma.c 	if (slot == ring->nr_slots - 1)
ring              213 drivers/net/wireless/broadcom/b43/dma.c static void op64_poke_tx(struct b43_dmaring *ring, int slot)
ring              215 drivers/net/wireless/broadcom/b43/dma.c 	b43_dma_write(ring, B43_DMA64_TXINDEX,
ring              219 drivers/net/wireless/broadcom/b43/dma.c static void op64_tx_suspend(struct b43_dmaring *ring)
ring              221 drivers/net/wireless/broadcom/b43/dma.c 	b43_dma_write(ring, B43_DMA64_TXCTL, b43_dma_read(ring, B43_DMA64_TXCTL)
ring              225 drivers/net/wireless/broadcom/b43/dma.c static void op64_tx_resume(struct b43_dmaring *ring)
ring              227 drivers/net/wireless/broadcom/b43/dma.c 	b43_dma_write(ring, B43_DMA64_TXCTL, b43_dma_read(ring, B43_DMA64_TXCTL)
ring              231 drivers/net/wireless/broadcom/b43/dma.c static int op64_get_current_rxslot(struct b43_dmaring *ring)
ring              235 drivers/net/wireless/broadcom/b43/dma.c 	val = b43_dma_read(ring, B43_DMA64_RXSTATUS);
ring              241 drivers/net/wireless/broadcom/b43/dma.c static void op64_set_current_rxslot(struct b43_dmaring *ring, int slot)
ring              243 drivers/net/wireless/broadcom/b43/dma.c 	b43_dma_write(ring, B43_DMA64_RXINDEX,
ring              257 drivers/net/wireless/broadcom/b43/dma.c static inline int free_slots(struct b43_dmaring *ring)
ring              259 drivers/net/wireless/broadcom/b43/dma.c 	return (ring->nr_slots - ring->used_slots);
ring              262 drivers/net/wireless/broadcom/b43/dma.c static inline int next_slot(struct b43_dmaring *ring, int slot)
ring              264 drivers/net/wireless/broadcom/b43/dma.c 	B43_WARN_ON(!(slot >= -1 && slot <= ring->nr_slots - 1));
ring              265 drivers/net/wireless/broadcom/b43/dma.c 	if (slot == ring->nr_slots - 1)
ring              270 drivers/net/wireless/broadcom/b43/dma.c static inline int prev_slot(struct b43_dmaring *ring, int slot)
ring              272 drivers/net/wireless/broadcom/b43/dma.c 	B43_WARN_ON(!(slot >= 0 && slot <= ring->nr_slots - 1));
ring              274 drivers/net/wireless/broadcom/b43/dma.c 		return ring->nr_slots - 1;
ring              279 drivers/net/wireless/broadcom/b43/dma.c static void update_max_used_slots(struct b43_dmaring *ring,
ring              282 drivers/net/wireless/broadcom/b43/dma.c 	if (current_used_slots <= ring->max_used_slots)
ring              284 drivers/net/wireless/broadcom/b43/dma.c 	ring->max_used_slots = current_used_slots;
ring              285 drivers/net/wireless/broadcom/b43/dma.c 	if (b43_debug(ring->dev, B43_DBG_DMAVERBOSE)) {
ring              286 drivers/net/wireless/broadcom/b43/dma.c 		b43dbg(ring->dev->wl,
ring              288 drivers/net/wireless/broadcom/b43/dma.c 		       ring->max_used_slots,
ring              289 drivers/net/wireless/broadcom/b43/dma.c 		       ring->tx ? "TX" : "RX", ring->index);
ring              294 drivers/net/wireless/broadcom/b43/dma.c     void update_max_used_slots(struct b43_dmaring *ring, int current_used_slots)
ring              300 drivers/net/wireless/broadcom/b43/dma.c static inline int request_slot(struct b43_dmaring *ring)
ring              304 drivers/net/wireless/broadcom/b43/dma.c 	B43_WARN_ON(!ring->tx);
ring              305 drivers/net/wireless/broadcom/b43/dma.c 	B43_WARN_ON(ring->stopped);
ring              306 drivers/net/wireless/broadcom/b43/dma.c 	B43_WARN_ON(free_slots(ring) == 0);
ring              308 drivers/net/wireless/broadcom/b43/dma.c 	slot = next_slot(ring, ring->current_slot);
ring              309 drivers/net/wireless/broadcom/b43/dma.c 	ring->current_slot = slot;
ring              310 drivers/net/wireless/broadcom/b43/dma.c 	ring->used_slots++;
ring              312 drivers/net/wireless/broadcom/b43/dma.c 	update_max_used_slots(ring, ring->used_slots);
ring              347 drivers/net/wireless/broadcom/b43/dma.c     dma_addr_t map_descbuffer(struct b43_dmaring *ring,
ring              353 drivers/net/wireless/broadcom/b43/dma.c 		dmaaddr = dma_map_single(ring->dev->dev->dma_dev,
ring              356 drivers/net/wireless/broadcom/b43/dma.c 		dmaaddr = dma_map_single(ring->dev->dev->dma_dev,
ring              364 drivers/net/wireless/broadcom/b43/dma.c     void unmap_descbuffer(struct b43_dmaring *ring,
ring              368 drivers/net/wireless/broadcom/b43/dma.c 		dma_unmap_single(ring->dev->dev->dma_dev,
ring              371 drivers/net/wireless/broadcom/b43/dma.c 		dma_unmap_single(ring->dev->dev->dma_dev,
ring              377 drivers/net/wireless/broadcom/b43/dma.c     void sync_descbuffer_for_cpu(struct b43_dmaring *ring,
ring              380 drivers/net/wireless/broadcom/b43/dma.c 	B43_WARN_ON(ring->tx);
ring              381 drivers/net/wireless/broadcom/b43/dma.c 	dma_sync_single_for_cpu(ring->dev->dev->dma_dev,
ring              386 drivers/net/wireless/broadcom/b43/dma.c     void sync_descbuffer_for_device(struct b43_dmaring *ring,
ring              389 drivers/net/wireless/broadcom/b43/dma.c 	B43_WARN_ON(ring->tx);
ring              390 drivers/net/wireless/broadcom/b43/dma.c 	dma_sync_single_for_device(ring->dev->dev->dma_dev,
ring              395 drivers/net/wireless/broadcom/b43/dma.c     void free_descriptor_buffer(struct b43_dmaring *ring,
ring              399 drivers/net/wireless/broadcom/b43/dma.c 		if (ring->tx)
ring              400 drivers/net/wireless/broadcom/b43/dma.c 			ieee80211_free_txskb(ring->dev->wl->hw, meta->skb);
ring              407 drivers/net/wireless/broadcom/b43/dma.c static int alloc_ringmemory(struct b43_dmaring *ring)
ring              418 drivers/net/wireless/broadcom/b43/dma.c 	u16 ring_mem_size = (ring->type == B43_DMA_64BIT) ?
ring              421 drivers/net/wireless/broadcom/b43/dma.c 	ring->descbase = dma_alloc_coherent(ring->dev->dev->dma_dev,
ring              422 drivers/net/wireless/broadcom/b43/dma.c 					    ring_mem_size, &(ring->dmabase),
ring              424 drivers/net/wireless/broadcom/b43/dma.c 	if (!ring->descbase)
ring              430 drivers/net/wireless/broadcom/b43/dma.c static void free_ringmemory(struct b43_dmaring *ring)
ring              432 drivers/net/wireless/broadcom/b43/dma.c 	u16 ring_mem_size = (ring->type == B43_DMA_64BIT) ?
ring              434 drivers/net/wireless/broadcom/b43/dma.c 	dma_free_coherent(ring->dev->dev->dma_dev, ring_mem_size,
ring              435 drivers/net/wireless/broadcom/b43/dma.c 			  ring->descbase, ring->dmabase);
ring              538 drivers/net/wireless/broadcom/b43/dma.c static bool b43_dma_mapping_error(struct b43_dmaring *ring,
ring              542 drivers/net/wireless/broadcom/b43/dma.c 	if (unlikely(dma_mapping_error(ring->dev->dev->dma_dev, addr)))
ring              545 drivers/net/wireless/broadcom/b43/dma.c 	switch (ring->type) {
ring              565 drivers/net/wireless/broadcom/b43/dma.c 	unmap_descbuffer(ring, addr, buffersize, dma_to_device);
ring              570 drivers/net/wireless/broadcom/b43/dma.c static bool b43_rx_buffer_is_poisoned(struct b43_dmaring *ring, struct sk_buff *skb)
ring              572 drivers/net/wireless/broadcom/b43/dma.c 	unsigned char *f = skb->data + ring->frameoffset;
ring              577 drivers/net/wireless/broadcom/b43/dma.c static void b43_poison_rx_buffer(struct b43_dmaring *ring, struct sk_buff *skb)
ring              587 drivers/net/wireless/broadcom/b43/dma.c 	B43_WARN_ON(ring->rx_buffersize < ring->frameoffset + sizeof(struct b43_plcp_hdr6) + 2);
ring              588 drivers/net/wireless/broadcom/b43/dma.c 	frame = skb->data + ring->frameoffset;
ring              592 drivers/net/wireless/broadcom/b43/dma.c static int setup_rx_descbuffer(struct b43_dmaring *ring,
ring              599 drivers/net/wireless/broadcom/b43/dma.c 	B43_WARN_ON(ring->tx);
ring              601 drivers/net/wireless/broadcom/b43/dma.c 	skb = __dev_alloc_skb(ring->rx_buffersize, gfp_flags);
ring              604 drivers/net/wireless/broadcom/b43/dma.c 	b43_poison_rx_buffer(ring, skb);
ring              605 drivers/net/wireless/broadcom/b43/dma.c 	dmaaddr = map_descbuffer(ring, skb->data, ring->rx_buffersize, 0);
ring              606 drivers/net/wireless/broadcom/b43/dma.c 	if (b43_dma_mapping_error(ring, dmaaddr, ring->rx_buffersize, 0)) {
ring              612 drivers/net/wireless/broadcom/b43/dma.c 		skb = __dev_alloc_skb(ring->rx_buffersize, gfp_flags);
ring              615 drivers/net/wireless/broadcom/b43/dma.c 		b43_poison_rx_buffer(ring, skb);
ring              616 drivers/net/wireless/broadcom/b43/dma.c 		dmaaddr = map_descbuffer(ring, skb->data,
ring              617 drivers/net/wireless/broadcom/b43/dma.c 					 ring->rx_buffersize, 0);
ring              618 drivers/net/wireless/broadcom/b43/dma.c 		if (b43_dma_mapping_error(ring, dmaaddr, ring->rx_buffersize, 0)) {
ring              619 drivers/net/wireless/broadcom/b43/dma.c 			b43err(ring->dev->wl, "RX DMA buffer allocation failed\n");
ring              627 drivers/net/wireless/broadcom/b43/dma.c 	ring->ops->fill_descriptor(ring, desc, dmaaddr,
ring              628 drivers/net/wireless/broadcom/b43/dma.c 				   ring->rx_buffersize, 0, 0, 0);
ring              636 drivers/net/wireless/broadcom/b43/dma.c static int alloc_initial_descbuffers(struct b43_dmaring *ring)
ring              642 drivers/net/wireless/broadcom/b43/dma.c 	for (i = 0; i < ring->nr_slots; i++) {
ring              643 drivers/net/wireless/broadcom/b43/dma.c 		desc = ring->ops->idx2desc(ring, i, &meta);
ring              645 drivers/net/wireless/broadcom/b43/dma.c 		err = setup_rx_descbuffer(ring, desc, meta, GFP_KERNEL);
ring              647 drivers/net/wireless/broadcom/b43/dma.c 			b43err(ring->dev->wl,
ring              653 drivers/net/wireless/broadcom/b43/dma.c 	ring->used_slots = ring->nr_slots;
ring              660 drivers/net/wireless/broadcom/b43/dma.c 		desc = ring->ops->idx2desc(ring, i, &meta);
ring              662 drivers/net/wireless/broadcom/b43/dma.c 		unmap_descbuffer(ring, meta->dmaaddr, ring->rx_buffersize, 0);
ring              672 drivers/net/wireless/broadcom/b43/dma.c static int dmacontroller_setup(struct b43_dmaring *ring)
ring              677 drivers/net/wireless/broadcom/b43/dma.c 	bool parity = ring->dev->dma.parity;
ring              681 drivers/net/wireless/broadcom/b43/dma.c 	if (ring->tx) {
ring              682 drivers/net/wireless/broadcom/b43/dma.c 		if (ring->type == B43_DMA_64BIT) {
ring              683 drivers/net/wireless/broadcom/b43/dma.c 			u64 ringbase = (u64) (ring->dmabase);
ring              684 drivers/net/wireless/broadcom/b43/dma.c 			addrext = b43_dma_address(&ring->dev->dma, ringbase, B43_DMA_ADDR_EXT);
ring              685 drivers/net/wireless/broadcom/b43/dma.c 			addrlo = b43_dma_address(&ring->dev->dma, ringbase, B43_DMA_ADDR_LOW);
ring              686 drivers/net/wireless/broadcom/b43/dma.c 			addrhi = b43_dma_address(&ring->dev->dma, ringbase, B43_DMA_ADDR_HIGH);
ring              693 drivers/net/wireless/broadcom/b43/dma.c 			b43_dma_write(ring, B43_DMA64_TXCTL, value);
ring              694 drivers/net/wireless/broadcom/b43/dma.c 			b43_dma_write(ring, B43_DMA64_TXRINGLO, addrlo);
ring              695 drivers/net/wireless/broadcom/b43/dma.c 			b43_dma_write(ring, B43_DMA64_TXRINGHI, addrhi);
ring              697 drivers/net/wireless/broadcom/b43/dma.c 			u32 ringbase = (u32) (ring->dmabase);
ring              698 drivers/net/wireless/broadcom/b43/dma.c 			addrext = b43_dma_address(&ring->dev->dma, ringbase, B43_DMA_ADDR_EXT);
ring              699 drivers/net/wireless/broadcom/b43/dma.c 			addrlo = b43_dma_address(&ring->dev->dma, ringbase, B43_DMA_ADDR_LOW);
ring              706 drivers/net/wireless/broadcom/b43/dma.c 			b43_dma_write(ring, B43_DMA32_TXCTL, value);
ring              707 drivers/net/wireless/broadcom/b43/dma.c 			b43_dma_write(ring, B43_DMA32_TXRING, addrlo);
ring              710 drivers/net/wireless/broadcom/b43/dma.c 		err = alloc_initial_descbuffers(ring);
ring              713 drivers/net/wireless/broadcom/b43/dma.c 		if (ring->type == B43_DMA_64BIT) {
ring              714 drivers/net/wireless/broadcom/b43/dma.c 			u64 ringbase = (u64) (ring->dmabase);
ring              715 drivers/net/wireless/broadcom/b43/dma.c 			addrext = b43_dma_address(&ring->dev->dma, ringbase, B43_DMA_ADDR_EXT);
ring              716 drivers/net/wireless/broadcom/b43/dma.c 			addrlo = b43_dma_address(&ring->dev->dma, ringbase, B43_DMA_ADDR_LOW);
ring              717 drivers/net/wireless/broadcom/b43/dma.c 			addrhi = b43_dma_address(&ring->dev->dma, ringbase, B43_DMA_ADDR_HIGH);
ring              719 drivers/net/wireless/broadcom/b43/dma.c 			value = (ring->frameoffset << B43_DMA64_RXFROFF_SHIFT);
ring              725 drivers/net/wireless/broadcom/b43/dma.c 			b43_dma_write(ring, B43_DMA64_RXCTL, value);
ring              726 drivers/net/wireless/broadcom/b43/dma.c 			b43_dma_write(ring, B43_DMA64_RXRINGLO, addrlo);
ring              727 drivers/net/wireless/broadcom/b43/dma.c 			b43_dma_write(ring, B43_DMA64_RXRINGHI, addrhi);
ring              728 drivers/net/wireless/broadcom/b43/dma.c 			b43_dma_write(ring, B43_DMA64_RXINDEX, ring->nr_slots *
ring              731 drivers/net/wireless/broadcom/b43/dma.c 			u32 ringbase = (u32) (ring->dmabase);
ring              732 drivers/net/wireless/broadcom/b43/dma.c 			addrext = b43_dma_address(&ring->dev->dma, ringbase, B43_DMA_ADDR_EXT);
ring              733 drivers/net/wireless/broadcom/b43/dma.c 			addrlo = b43_dma_address(&ring->dev->dma, ringbase, B43_DMA_ADDR_LOW);
ring              735 drivers/net/wireless/broadcom/b43/dma.c 			value = (ring->frameoffset << B43_DMA32_RXFROFF_SHIFT);
ring              741 drivers/net/wireless/broadcom/b43/dma.c 			b43_dma_write(ring, B43_DMA32_RXCTL, value);
ring              742 drivers/net/wireless/broadcom/b43/dma.c 			b43_dma_write(ring, B43_DMA32_RXRING, addrlo);
ring              743 drivers/net/wireless/broadcom/b43/dma.c 			b43_dma_write(ring, B43_DMA32_RXINDEX, ring->nr_slots *
ring              753 drivers/net/wireless/broadcom/b43/dma.c static void dmacontroller_cleanup(struct b43_dmaring *ring)
ring              755 drivers/net/wireless/broadcom/b43/dma.c 	if (ring->tx) {
ring              756 drivers/net/wireless/broadcom/b43/dma.c 		b43_dmacontroller_tx_reset(ring->dev, ring->mmio_base,
ring              757 drivers/net/wireless/broadcom/b43/dma.c 					   ring->type);
ring              758 drivers/net/wireless/broadcom/b43/dma.c 		if (ring->type == B43_DMA_64BIT) {
ring              759 drivers/net/wireless/broadcom/b43/dma.c 			b43_dma_write(ring, B43_DMA64_TXRINGLO, 0);
ring              760 drivers/net/wireless/broadcom/b43/dma.c 			b43_dma_write(ring, B43_DMA64_TXRINGHI, 0);
ring              762 drivers/net/wireless/broadcom/b43/dma.c 			b43_dma_write(ring, B43_DMA32_TXRING, 0);
ring              764 drivers/net/wireless/broadcom/b43/dma.c 		b43_dmacontroller_rx_reset(ring->dev, ring->mmio_base,
ring              765 drivers/net/wireless/broadcom/b43/dma.c 					   ring->type);
ring              766 drivers/net/wireless/broadcom/b43/dma.c 		if (ring->type == B43_DMA_64BIT) {
ring              767 drivers/net/wireless/broadcom/b43/dma.c 			b43_dma_write(ring, B43_DMA64_RXRINGLO, 0);
ring              768 drivers/net/wireless/broadcom/b43/dma.c 			b43_dma_write(ring, B43_DMA64_RXRINGHI, 0);
ring              770 drivers/net/wireless/broadcom/b43/dma.c 			b43_dma_write(ring, B43_DMA32_RXRING, 0);
ring              774 drivers/net/wireless/broadcom/b43/dma.c static void free_all_descbuffers(struct b43_dmaring *ring)
ring              779 drivers/net/wireless/broadcom/b43/dma.c 	if (!ring->used_slots)
ring              781 drivers/net/wireless/broadcom/b43/dma.c 	for (i = 0; i < ring->nr_slots; i++) {
ring              783 drivers/net/wireless/broadcom/b43/dma.c 		ring->ops->idx2desc(ring, i, &meta);
ring              786 drivers/net/wireless/broadcom/b43/dma.c 			B43_WARN_ON(!ring->tx);
ring              789 drivers/net/wireless/broadcom/b43/dma.c 		if (ring->tx) {
ring              790 drivers/net/wireless/broadcom/b43/dma.c 			unmap_descbuffer(ring, meta->dmaaddr,
ring              793 drivers/net/wireless/broadcom/b43/dma.c 			unmap_descbuffer(ring, meta->dmaaddr,
ring              794 drivers/net/wireless/broadcom/b43/dma.c 					 ring->rx_buffersize, 0);
ring              796 drivers/net/wireless/broadcom/b43/dma.c 		free_descriptor_buffer(ring, meta);
ring              837 drivers/net/wireless/broadcom/b43/dma.c 	struct b43_dmaring *ring;
ring              841 drivers/net/wireless/broadcom/b43/dma.c 	ring = kzalloc(sizeof(*ring), GFP_KERNEL);
ring              842 drivers/net/wireless/broadcom/b43/dma.c 	if (!ring)
ring              845 drivers/net/wireless/broadcom/b43/dma.c 	ring->nr_slots = B43_RXRING_SLOTS;
ring              847 drivers/net/wireless/broadcom/b43/dma.c 		ring->nr_slots = B43_TXRING_SLOTS;
ring              849 drivers/net/wireless/broadcom/b43/dma.c 	ring->meta = kcalloc(ring->nr_slots, sizeof(struct b43_dmadesc_meta),
ring              851 drivers/net/wireless/broadcom/b43/dma.c 	if (!ring->meta)
ring              853 drivers/net/wireless/broadcom/b43/dma.c 	for (i = 0; i < ring->nr_slots; i++)
ring              854 drivers/net/wireless/broadcom/b43/dma.c 		ring->meta->skb = B43_DMA_PTR_POISON;
ring              856 drivers/net/wireless/broadcom/b43/dma.c 	ring->type = type;
ring              857 drivers/net/wireless/broadcom/b43/dma.c 	ring->dev = dev;
ring              858 drivers/net/wireless/broadcom/b43/dma.c 	ring->mmio_base = b43_dmacontroller_base(type, controller_index);
ring              859 drivers/net/wireless/broadcom/b43/dma.c 	ring->index = controller_index;
ring              861 drivers/net/wireless/broadcom/b43/dma.c 		ring->ops = &dma64_ops;
ring              863 drivers/net/wireless/broadcom/b43/dma.c 		ring->ops = &dma32_ops;
ring              865 drivers/net/wireless/broadcom/b43/dma.c 		ring->tx = true;
ring              866 drivers/net/wireless/broadcom/b43/dma.c 		ring->current_slot = -1;
ring              868 drivers/net/wireless/broadcom/b43/dma.c 		if (ring->index == 0) {
ring              871 drivers/net/wireless/broadcom/b43/dma.c 				ring->rx_buffersize = B43_DMA0_RX_FW598_BUFSIZE;
ring              872 drivers/net/wireless/broadcom/b43/dma.c 				ring->frameoffset = B43_DMA0_RX_FW598_FO;
ring              876 drivers/net/wireless/broadcom/b43/dma.c 				ring->rx_buffersize = B43_DMA0_RX_FW351_BUFSIZE;
ring              877 drivers/net/wireless/broadcom/b43/dma.c 				ring->frameoffset = B43_DMA0_RX_FW351_FO;
ring              884 drivers/net/wireless/broadcom/b43/dma.c 	ring->last_injected_overflow = jiffies;
ring              891 drivers/net/wireless/broadcom/b43/dma.c 		ring->txhdr_cache = kcalloc(ring->nr_slots / TX_SLOTS_PER_FRAME,
ring              894 drivers/net/wireless/broadcom/b43/dma.c 		if (!ring->txhdr_cache)
ring              899 drivers/net/wireless/broadcom/b43/dma.c 					  ring->txhdr_cache,
ring              903 drivers/net/wireless/broadcom/b43/dma.c 		if (b43_dma_mapping_error(ring, dma_test,
ring              906 drivers/net/wireless/broadcom/b43/dma.c 			kfree(ring->txhdr_cache);
ring              907 drivers/net/wireless/broadcom/b43/dma.c 			ring->txhdr_cache = kcalloc(ring->nr_slots / TX_SLOTS_PER_FRAME,
ring              910 drivers/net/wireless/broadcom/b43/dma.c 			if (!ring->txhdr_cache)
ring              914 drivers/net/wireless/broadcom/b43/dma.c 						  ring->txhdr_cache,
ring              918 drivers/net/wireless/broadcom/b43/dma.c 			if (b43_dma_mapping_error(ring, dma_test,
ring              932 drivers/net/wireless/broadcom/b43/dma.c 	err = alloc_ringmemory(ring);
ring              935 drivers/net/wireless/broadcom/b43/dma.c 	err = dmacontroller_setup(ring);
ring              940 drivers/net/wireless/broadcom/b43/dma.c 	return ring;
ring              943 drivers/net/wireless/broadcom/b43/dma.c 	free_ringmemory(ring);
ring              945 drivers/net/wireless/broadcom/b43/dma.c 	kfree(ring->txhdr_cache);
ring              947 drivers/net/wireless/broadcom/b43/dma.c 	kfree(ring->meta);
ring              949 drivers/net/wireless/broadcom/b43/dma.c 	kfree(ring);
ring              950 drivers/net/wireless/broadcom/b43/dma.c 	ring = NULL;
ring              966 drivers/net/wireless/broadcom/b43/dma.c static void b43_destroy_dmaring(struct b43_dmaring *ring,
ring              969 drivers/net/wireless/broadcom/b43/dma.c 	if (!ring)
ring              975 drivers/net/wireless/broadcom/b43/dma.c 		u64 failed_packets = ring->nr_failed_tx_packets;
ring              976 drivers/net/wireless/broadcom/b43/dma.c 		u64 succeed_packets = ring->nr_succeed_tx_packets;
ring              983 drivers/net/wireless/broadcom/b43/dma.c 			average_tries = divide(ring->nr_total_packet_tries * 100, nr_packets);
ring              985 drivers/net/wireless/broadcom/b43/dma.c 		b43dbg(ring->dev->wl, "DMA-%u %s: "
ring              988 drivers/net/wireless/broadcom/b43/dma.c 		       (unsigned int)(ring->type), ringname,
ring              989 drivers/net/wireless/broadcom/b43/dma.c 		       ring->max_used_slots,
ring              990 drivers/net/wireless/broadcom/b43/dma.c 		       ring->nr_slots,
ring             1003 drivers/net/wireless/broadcom/b43/dma.c 	dmacontroller_cleanup(ring);
ring             1004 drivers/net/wireless/broadcom/b43/dma.c 	free_all_descbuffers(ring);
ring             1005 drivers/net/wireless/broadcom/b43/dma.c 	free_ringmemory(ring);
ring             1007 drivers/net/wireless/broadcom/b43/dma.c 	kfree(ring->txhdr_cache);
ring             1008 drivers/net/wireless/broadcom/b43/dma.c 	kfree(ring->meta);
ring             1009 drivers/net/wireless/broadcom/b43/dma.c 	kfree(ring);
ring             1012 drivers/net/wireless/broadcom/b43/dma.c #define destroy_ring(dma, ring) do {				\
ring             1013 drivers/net/wireless/broadcom/b43/dma.c 	b43_destroy_dmaring((dma)->ring, __stringify(ring));	\
ring             1014 drivers/net/wireless/broadcom/b43/dma.c 	(dma)->ring = NULL;					\
ring             1136 drivers/net/wireless/broadcom/b43/dma.c static u16 generate_cookie(struct b43_dmaring *ring, int slot)
ring             1148 drivers/net/wireless/broadcom/b43/dma.c 	cookie = (((u16)ring->index + 1) << 12);
ring             1160 drivers/net/wireless/broadcom/b43/dma.c 	struct b43_dmaring *ring = NULL;
ring             1164 drivers/net/wireless/broadcom/b43/dma.c 		ring = dma->tx_ring_AC_BK;
ring             1167 drivers/net/wireless/broadcom/b43/dma.c 		ring = dma->tx_ring_AC_BE;
ring             1170 drivers/net/wireless/broadcom/b43/dma.c 		ring = dma->tx_ring_AC_VI;
ring             1173 drivers/net/wireless/broadcom/b43/dma.c 		ring = dma->tx_ring_AC_VO;
ring             1176 drivers/net/wireless/broadcom/b43/dma.c 		ring = dma->tx_ring_mcast;
ring             1180 drivers/net/wireless/broadcom/b43/dma.c 	if (unlikely(!ring || *slot < 0 || *slot >= ring->nr_slots)) {
ring             1186 drivers/net/wireless/broadcom/b43/dma.c 	return ring;
ring             1189 drivers/net/wireless/broadcom/b43/dma.c static int dma_tx_fragment(struct b43_dmaring *ring,
ring             1192 drivers/net/wireless/broadcom/b43/dma.c 	const struct b43_dma_ops *ops = ring->ops;
ring             1202 drivers/net/wireless/broadcom/b43/dma.c 	size_t hdrsize = b43_txhdr_size(ring->dev);
ring             1209 drivers/net/wireless/broadcom/b43/dma.c 	old_top_slot = ring->current_slot;
ring             1210 drivers/net/wireless/broadcom/b43/dma.c 	old_used_slots = ring->used_slots;
ring             1213 drivers/net/wireless/broadcom/b43/dma.c 	slot = request_slot(ring);
ring             1214 drivers/net/wireless/broadcom/b43/dma.c 	desc = ops->idx2desc(ring, slot, &meta_hdr);
ring             1217 drivers/net/wireless/broadcom/b43/dma.c 	header = &(ring->txhdr_cache[(slot / TX_SLOTS_PER_FRAME) * hdrsize]);
ring             1218 drivers/net/wireless/broadcom/b43/dma.c 	cookie = generate_cookie(ring, slot);
ring             1219 drivers/net/wireless/broadcom/b43/dma.c 	err = b43_generate_txhdr(ring->dev, header,
ring             1222 drivers/net/wireless/broadcom/b43/dma.c 		ring->current_slot = old_top_slot;
ring             1223 drivers/net/wireless/broadcom/b43/dma.c 		ring->used_slots = old_used_slots;
ring             1227 drivers/net/wireless/broadcom/b43/dma.c 	meta_hdr->dmaaddr = map_descbuffer(ring, (unsigned char *)header,
ring             1229 drivers/net/wireless/broadcom/b43/dma.c 	if (b43_dma_mapping_error(ring, meta_hdr->dmaaddr, hdrsize, 1)) {
ring             1230 drivers/net/wireless/broadcom/b43/dma.c 		ring->current_slot = old_top_slot;
ring             1231 drivers/net/wireless/broadcom/b43/dma.c 		ring->used_slots = old_used_slots;
ring             1234 drivers/net/wireless/broadcom/b43/dma.c 	ops->fill_descriptor(ring, desc, meta_hdr->dmaaddr,
ring             1238 drivers/net/wireless/broadcom/b43/dma.c 	slot = request_slot(ring);
ring             1239 drivers/net/wireless/broadcom/b43/dma.c 	desc = ops->idx2desc(ring, slot, &meta);
ring             1246 drivers/net/wireless/broadcom/b43/dma.c 	meta->dmaaddr = map_descbuffer(ring, skb->data, skb->len, 1);
ring             1248 drivers/net/wireless/broadcom/b43/dma.c 	if (b43_dma_mapping_error(ring, meta->dmaaddr, skb->len, 1)) {
ring             1252 drivers/net/wireless/broadcom/b43/dma.c 			ring->current_slot = old_top_slot;
ring             1253 drivers/net/wireless/broadcom/b43/dma.c 			ring->used_slots = old_used_slots;
ring             1258 drivers/net/wireless/broadcom/b43/dma.c 		meta->dmaaddr = map_descbuffer(ring, priv_info->bouncebuffer, skb->len, 1);
ring             1259 drivers/net/wireless/broadcom/b43/dma.c 		if (b43_dma_mapping_error(ring, meta->dmaaddr, skb->len, 1)) {
ring             1262 drivers/net/wireless/broadcom/b43/dma.c 			ring->current_slot = old_top_slot;
ring             1263 drivers/net/wireless/broadcom/b43/dma.c 			ring->used_slots = old_used_slots;
ring             1269 drivers/net/wireless/broadcom/b43/dma.c 	ops->fill_descriptor(ring, desc, meta->dmaaddr, skb->len, 0, 1, 1);
ring             1274 drivers/net/wireless/broadcom/b43/dma.c 		b43_shm_write16(ring->dev, B43_SHM_SHARED,
ring             1279 drivers/net/wireless/broadcom/b43/dma.c 	ops->poke_tx(ring, next_slot(ring, slot));
ring             1283 drivers/net/wireless/broadcom/b43/dma.c 	unmap_descbuffer(ring, meta_hdr->dmaaddr,
ring             1288 drivers/net/wireless/broadcom/b43/dma.c static inline int should_inject_overflow(struct b43_dmaring *ring)
ring             1291 drivers/net/wireless/broadcom/b43/dma.c 	if (unlikely(b43_debug(ring->dev, B43_DBG_DMAOVERFLOW))) {
ring             1296 drivers/net/wireless/broadcom/b43/dma.c 		next_overflow = ring->last_injected_overflow + HZ;
ring             1298 drivers/net/wireless/broadcom/b43/dma.c 			ring->last_injected_overflow = jiffies;
ring             1299 drivers/net/wireless/broadcom/b43/dma.c 			b43dbg(ring->dev->wl,
ring             1301 drivers/net/wireless/broadcom/b43/dma.c 			       "DMA controller %d\n", ring->index);
ring             1313 drivers/net/wireless/broadcom/b43/dma.c 	struct b43_dmaring *ring;
ring             1322 drivers/net/wireless/broadcom/b43/dma.c 			ring = dev->dma.tx_ring_AC_VO;
ring             1325 drivers/net/wireless/broadcom/b43/dma.c 			ring = dev->dma.tx_ring_AC_VI;
ring             1328 drivers/net/wireless/broadcom/b43/dma.c 			ring = dev->dma.tx_ring_AC_BE;
ring             1331 drivers/net/wireless/broadcom/b43/dma.c 			ring = dev->dma.tx_ring_AC_BK;
ring             1335 drivers/net/wireless/broadcom/b43/dma.c 		ring = dev->dma.tx_ring_AC_BE;
ring             1337 drivers/net/wireless/broadcom/b43/dma.c 	return ring;
ring             1342 drivers/net/wireless/broadcom/b43/dma.c 	struct b43_dmaring *ring;
ring             1350 drivers/net/wireless/broadcom/b43/dma.c 		ring = dev->dma.tx_ring_mcast;
ring             1356 drivers/net/wireless/broadcom/b43/dma.c 		ring = select_ring_by_priority(
ring             1360 drivers/net/wireless/broadcom/b43/dma.c 	B43_WARN_ON(!ring->tx);
ring             1362 drivers/net/wireless/broadcom/b43/dma.c 	if (unlikely(ring->stopped)) {
ring             1373 drivers/net/wireless/broadcom/b43/dma.c 	if (WARN_ON(free_slots(ring) < TX_SLOTS_PER_FRAME)) {
ring             1384 drivers/net/wireless/broadcom/b43/dma.c 	ring->queue_prio = skb_get_queue_mapping(skb);
ring             1386 drivers/net/wireless/broadcom/b43/dma.c 	err = dma_tx_fragment(ring, skb);
ring             1398 drivers/net/wireless/broadcom/b43/dma.c 	if ((free_slots(ring) < TX_SLOTS_PER_FRAME) ||
ring             1399 drivers/net/wireless/broadcom/b43/dma.c 	    should_inject_overflow(ring)) {
ring             1404 drivers/net/wireless/broadcom/b43/dma.c 		ring->stopped = true;
ring             1406 drivers/net/wireless/broadcom/b43/dma.c 			b43dbg(dev->wl, "Stopped TX ring %d\n", ring->index);
ring             1418 drivers/net/wireless/broadcom/b43/dma.c 	struct b43_dmaring *ring;
ring             1427 drivers/net/wireless/broadcom/b43/dma.c 	ring = parse_cookie(dev, status->cookie, &slot);
ring             1428 drivers/net/wireless/broadcom/b43/dma.c 	if (unlikely(!ring))
ring             1430 drivers/net/wireless/broadcom/b43/dma.c 	B43_WARN_ON(!ring->tx);
ring             1435 drivers/net/wireless/broadcom/b43/dma.c 	firstused = ring->current_slot - ring->used_slots + 1;
ring             1437 drivers/net/wireless/broadcom/b43/dma.c 		firstused = ring->nr_slots + firstused;
ring             1444 drivers/net/wireless/broadcom/b43/dma.c 		if (slot == next_slot(ring, next_slot(ring, firstused))) {
ring             1454 drivers/net/wireless/broadcom/b43/dma.c 				       ring->index, slot);
ring             1465 drivers/net/wireless/broadcom/b43/dma.c 			       ring->index, firstused, slot);
ring             1472 drivers/net/wireless/broadcom/b43/dma.c 	ops = ring->ops;
ring             1474 drivers/net/wireless/broadcom/b43/dma.c 		B43_WARN_ON(slot < 0 || slot >= ring->nr_slots);
ring             1476 drivers/net/wireless/broadcom/b43/dma.c 		ops->idx2desc(ring, slot, &meta);
ring             1481 drivers/net/wireless/broadcom/b43/dma.c 			       slot, firstused, ring->index);
ring             1489 drivers/net/wireless/broadcom/b43/dma.c 			unmap_descbuffer(ring, meta->dmaaddr,
ring             1494 drivers/net/wireless/broadcom/b43/dma.c 			unmap_descbuffer(ring, meta->dmaaddr,
ring             1507 drivers/net/wireless/broadcom/b43/dma.c 				       slot, firstused, ring->index);
ring             1529 drivers/net/wireless/broadcom/b43/dma.c 				ring->nr_succeed_tx_packets++;
ring             1531 drivers/net/wireless/broadcom/b43/dma.c 				ring->nr_failed_tx_packets++;
ring             1532 drivers/net/wireless/broadcom/b43/dma.c 			ring->nr_total_packet_tries += status->frame_count;
ring             1546 drivers/net/wireless/broadcom/b43/dma.c 				       slot, firstused, ring->index);
ring             1552 drivers/net/wireless/broadcom/b43/dma.c 		ring->used_slots--;
ring             1559 drivers/net/wireless/broadcom/b43/dma.c 		slot = next_slot(ring, slot);
ring             1563 drivers/net/wireless/broadcom/b43/dma.c 	if (ring->stopped) {
ring             1564 drivers/net/wireless/broadcom/b43/dma.c 		B43_WARN_ON(free_slots(ring) < TX_SLOTS_PER_FRAME);
ring             1565 drivers/net/wireless/broadcom/b43/dma.c 		ring->stopped = false;
ring             1568 drivers/net/wireless/broadcom/b43/dma.c 	if (dev->wl->tx_queue_stopped[ring->queue_prio]) {
ring             1569 drivers/net/wireless/broadcom/b43/dma.c 		dev->wl->tx_queue_stopped[ring->queue_prio] = 0;
ring             1573 drivers/net/wireless/broadcom/b43/dma.c 		ieee80211_wake_queue(dev->wl->hw, ring->queue_prio);
ring             1575 drivers/net/wireless/broadcom/b43/dma.c 			b43dbg(dev->wl, "Woke up TX ring %d\n", ring->index);
ring             1582 drivers/net/wireless/broadcom/b43/dma.c static void dma_rx(struct b43_dmaring *ring, int *slot)
ring             1584 drivers/net/wireless/broadcom/b43/dma.c 	const struct b43_dma_ops *ops = ring->ops;
ring             1593 drivers/net/wireless/broadcom/b43/dma.c 	desc = ops->idx2desc(ring, *slot, &meta);
ring             1595 drivers/net/wireless/broadcom/b43/dma.c 	sync_descbuffer_for_cpu(ring, meta->dmaaddr, ring->rx_buffersize);
ring             1613 drivers/net/wireless/broadcom/b43/dma.c 	if (unlikely(b43_rx_buffer_is_poisoned(ring, skb))) {
ring             1616 drivers/net/wireless/broadcom/b43/dma.c 		b43dbg(ring->dev->wl, "DMA RX: Dropping poisoned buffer.\n");
ring             1620 drivers/net/wireless/broadcom/b43/dma.c 	if (unlikely(len + ring->frameoffset > ring->rx_buffersize)) {
ring             1630 drivers/net/wireless/broadcom/b43/dma.c 			desc = ops->idx2desc(ring, *slot, &meta);
ring             1632 drivers/net/wireless/broadcom/b43/dma.c 			b43_poison_rx_buffer(ring, meta->skb);
ring             1633 drivers/net/wireless/broadcom/b43/dma.c 			sync_descbuffer_for_device(ring, meta->dmaaddr,
ring             1634 drivers/net/wireless/broadcom/b43/dma.c 						   ring->rx_buffersize);
ring             1635 drivers/net/wireless/broadcom/b43/dma.c 			*slot = next_slot(ring, *slot);
ring             1637 drivers/net/wireless/broadcom/b43/dma.c 			tmp -= ring->rx_buffersize;
ring             1641 drivers/net/wireless/broadcom/b43/dma.c 		b43err(ring->dev->wl, "DMA RX buffer too small "
ring             1643 drivers/net/wireless/broadcom/b43/dma.c 		       len, ring->rx_buffersize, cnt);
ring             1648 drivers/net/wireless/broadcom/b43/dma.c 	err = setup_rx_descbuffer(ring, desc, meta, GFP_ATOMIC);
ring             1650 drivers/net/wireless/broadcom/b43/dma.c 		b43dbg(ring->dev->wl, "DMA RX: setup_rx_descbuffer() failed\n");
ring             1654 drivers/net/wireless/broadcom/b43/dma.c 	unmap_descbuffer(ring, dmaaddr, ring->rx_buffersize, 0);
ring             1655 drivers/net/wireless/broadcom/b43/dma.c 	skb_put(skb, len + ring->frameoffset);
ring             1656 drivers/net/wireless/broadcom/b43/dma.c 	skb_pull(skb, ring->frameoffset);
ring             1658 drivers/net/wireless/broadcom/b43/dma.c 	b43_rx(ring->dev, skb, rxhdr);
ring             1664 drivers/net/wireless/broadcom/b43/dma.c 	b43_poison_rx_buffer(ring, skb);
ring             1665 drivers/net/wireless/broadcom/b43/dma.c 	sync_descbuffer_for_device(ring, dmaaddr, ring->rx_buffersize);
ring             1668 drivers/net/wireless/broadcom/b43/dma.c void b43_dma_handle_rx_overflow(struct b43_dmaring *ring)
ring             1672 drivers/net/wireless/broadcom/b43/dma.c 	B43_WARN_ON(ring->tx);
ring             1682 drivers/net/wireless/broadcom/b43/dma.c 	current_slot = ring->ops->get_current_rxslot(ring);
ring             1683 drivers/net/wireless/broadcom/b43/dma.c 	previous_slot = prev_slot(ring, current_slot);
ring             1684 drivers/net/wireless/broadcom/b43/dma.c 	ring->ops->set_current_rxslot(ring, previous_slot);
ring             1687 drivers/net/wireless/broadcom/b43/dma.c void b43_dma_rx(struct b43_dmaring *ring)
ring             1689 drivers/net/wireless/broadcom/b43/dma.c 	const struct b43_dma_ops *ops = ring->ops;
ring             1693 drivers/net/wireless/broadcom/b43/dma.c 	B43_WARN_ON(ring->tx);
ring             1694 drivers/net/wireless/broadcom/b43/dma.c 	current_slot = ops->get_current_rxslot(ring);
ring             1695 drivers/net/wireless/broadcom/b43/dma.c 	B43_WARN_ON(!(current_slot >= 0 && current_slot < ring->nr_slots));
ring             1697 drivers/net/wireless/broadcom/b43/dma.c 	slot = ring->current_slot;
ring             1698 drivers/net/wireless/broadcom/b43/dma.c 	for (; slot != current_slot; slot = next_slot(ring, slot)) {
ring             1699 drivers/net/wireless/broadcom/b43/dma.c 		dma_rx(ring, &slot);
ring             1700 drivers/net/wireless/broadcom/b43/dma.c 		update_max_used_slots(ring, ++used_slots);
ring             1703 drivers/net/wireless/broadcom/b43/dma.c 	ops->set_current_rxslot(ring, slot);
ring             1704 drivers/net/wireless/broadcom/b43/dma.c 	ring->current_slot = slot;
ring             1707 drivers/net/wireless/broadcom/b43/dma.c static void b43_dma_tx_suspend_ring(struct b43_dmaring *ring)
ring             1709 drivers/net/wireless/broadcom/b43/dma.c 	B43_WARN_ON(!ring->tx);
ring             1710 drivers/net/wireless/broadcom/b43/dma.c 	ring->ops->tx_suspend(ring);
ring             1713 drivers/net/wireless/broadcom/b43/dma.c static void b43_dma_tx_resume_ring(struct b43_dmaring *ring)
ring             1715 drivers/net/wireless/broadcom/b43/dma.c 	B43_WARN_ON(!ring->tx);
ring             1716 drivers/net/wireless/broadcom/b43/dma.c 	ring->ops->tx_resume(ring);
ring              199 drivers/net/wireless/broadcom/b43/dma.h 	struct b43_dmadesc_generic *(*idx2desc) (struct b43_dmaring * ring,
ring              203 drivers/net/wireless/broadcom/b43/dma.h 	void (*fill_descriptor) (struct b43_dmaring * ring,
ring              207 drivers/net/wireless/broadcom/b43/dma.h 	void (*poke_tx) (struct b43_dmaring * ring, int slot);
ring              208 drivers/net/wireless/broadcom/b43/dma.h 	void (*tx_suspend) (struct b43_dmaring * ring);
ring              209 drivers/net/wireless/broadcom/b43/dma.h 	void (*tx_resume) (struct b43_dmaring * ring);
ring              210 drivers/net/wireless/broadcom/b43/dma.h 	int (*get_current_rxslot) (struct b43_dmaring * ring);
ring              211 drivers/net/wireless/broadcom/b43/dma.h 	void (*set_current_rxslot) (struct b43_dmaring * ring, int slot);
ring              278 drivers/net/wireless/broadcom/b43/dma.h static inline u32 b43_dma_read(struct b43_dmaring *ring, u16 offset)
ring              280 drivers/net/wireless/broadcom/b43/dma.h 	return b43_read32(ring->dev, ring->mmio_base + offset);
ring              283 drivers/net/wireless/broadcom/b43/dma.h static inline void b43_dma_write(struct b43_dmaring *ring, u16 offset, u32 value)
ring              285 drivers/net/wireless/broadcom/b43/dma.h 	b43_write32(ring->dev, ring->mmio_base + offset, value);
ring              299 drivers/net/wireless/broadcom/b43/dma.h void b43_dma_handle_rx_overflow(struct b43_dmaring *ring);
ring              301 drivers/net/wireless/broadcom/b43/dma.h void b43_dma_rx(struct b43_dmaring *ring);
ring               32 drivers/net/wireless/broadcom/b43legacy/dma.c struct b43legacy_dmadesc32 *op32_idx2desc(struct b43legacy_dmaring *ring,
ring               38 drivers/net/wireless/broadcom/b43legacy/dma.c 	*meta = &(ring->meta[slot]);
ring               39 drivers/net/wireless/broadcom/b43legacy/dma.c 	desc = ring->descbase;
ring               45 drivers/net/wireless/broadcom/b43legacy/dma.c static void op32_fill_descriptor(struct b43legacy_dmaring *ring,
ring               50 drivers/net/wireless/broadcom/b43legacy/dma.c 	struct b43legacy_dmadesc32 *descbase = ring->descbase;
ring               57 drivers/net/wireless/broadcom/b43legacy/dma.c 	B43legacy_WARN_ON(!(slot >= 0 && slot < ring->nr_slots));
ring               62 drivers/net/wireless/broadcom/b43legacy/dma.c 	addr |= ring->dev->dma.translation;
ring               63 drivers/net/wireless/broadcom/b43legacy/dma.c 	ctl = (bufsize - ring->frameoffset)
ring               65 drivers/net/wireless/broadcom/b43legacy/dma.c 	if (slot == ring->nr_slots - 1)
ring               80 drivers/net/wireless/broadcom/b43legacy/dma.c static void op32_poke_tx(struct b43legacy_dmaring *ring, int slot)
ring               82 drivers/net/wireless/broadcom/b43legacy/dma.c 	b43legacy_dma_write(ring, B43legacy_DMA32_TXINDEX,
ring               86 drivers/net/wireless/broadcom/b43legacy/dma.c static void op32_tx_suspend(struct b43legacy_dmaring *ring)
ring               88 drivers/net/wireless/broadcom/b43legacy/dma.c 	b43legacy_dma_write(ring, B43legacy_DMA32_TXCTL,
ring               89 drivers/net/wireless/broadcom/b43legacy/dma.c 			    b43legacy_dma_read(ring, B43legacy_DMA32_TXCTL)
ring               93 drivers/net/wireless/broadcom/b43legacy/dma.c static void op32_tx_resume(struct b43legacy_dmaring *ring)
ring               95 drivers/net/wireless/broadcom/b43legacy/dma.c 	b43legacy_dma_write(ring, B43legacy_DMA32_TXCTL,
ring               96 drivers/net/wireless/broadcom/b43legacy/dma.c 			    b43legacy_dma_read(ring, B43legacy_DMA32_TXCTL)
ring              100 drivers/net/wireless/broadcom/b43legacy/dma.c static int op32_get_current_rxslot(struct b43legacy_dmaring *ring)
ring              104 drivers/net/wireless/broadcom/b43legacy/dma.c 	val = b43legacy_dma_read(ring, B43legacy_DMA32_RXSTATUS);
ring              110 drivers/net/wireless/broadcom/b43legacy/dma.c static void op32_set_current_rxslot(struct b43legacy_dmaring *ring,
ring              113 drivers/net/wireless/broadcom/b43legacy/dma.c 	b43legacy_dma_write(ring, B43legacy_DMA32_RXINDEX,
ring              117 drivers/net/wireless/broadcom/b43legacy/dma.c static inline int free_slots(struct b43legacy_dmaring *ring)
ring              119 drivers/net/wireless/broadcom/b43legacy/dma.c 	return (ring->nr_slots - ring->used_slots);
ring              122 drivers/net/wireless/broadcom/b43legacy/dma.c static inline int next_slot(struct b43legacy_dmaring *ring, int slot)
ring              124 drivers/net/wireless/broadcom/b43legacy/dma.c 	B43legacy_WARN_ON(!(slot >= -1 && slot <= ring->nr_slots - 1));
ring              125 drivers/net/wireless/broadcom/b43legacy/dma.c 	if (slot == ring->nr_slots - 1)
ring              130 drivers/net/wireless/broadcom/b43legacy/dma.c static inline int prev_slot(struct b43legacy_dmaring *ring, int slot)
ring              132 drivers/net/wireless/broadcom/b43legacy/dma.c 	B43legacy_WARN_ON(!(slot >= 0 && slot <= ring->nr_slots - 1));
ring              134 drivers/net/wireless/broadcom/b43legacy/dma.c 		return ring->nr_slots - 1;
ring              139 drivers/net/wireless/broadcom/b43legacy/dma.c static void update_max_used_slots(struct b43legacy_dmaring *ring,
ring              142 drivers/net/wireless/broadcom/b43legacy/dma.c 	if (current_used_slots <= ring->max_used_slots)
ring              144 drivers/net/wireless/broadcom/b43legacy/dma.c 	ring->max_used_slots = current_used_slots;
ring              145 drivers/net/wireless/broadcom/b43legacy/dma.c 	if (b43legacy_debug(ring->dev, B43legacy_DBG_DMAVERBOSE))
ring              146 drivers/net/wireless/broadcom/b43legacy/dma.c 		b43legacydbg(ring->dev->wl,
ring              148 drivers/net/wireless/broadcom/b43legacy/dma.c 		       ring->max_used_slots,
ring              149 drivers/net/wireless/broadcom/b43legacy/dma.c 		       ring->tx ? "TX" : "RX",
ring              150 drivers/net/wireless/broadcom/b43legacy/dma.c 		       ring->index);
ring              154 drivers/net/wireless/broadcom/b43legacy/dma.c void update_max_used_slots(struct b43legacy_dmaring *ring,
ring              161 drivers/net/wireless/broadcom/b43legacy/dma.c int request_slot(struct b43legacy_dmaring *ring)
ring              165 drivers/net/wireless/broadcom/b43legacy/dma.c 	B43legacy_WARN_ON(!ring->tx);
ring              166 drivers/net/wireless/broadcom/b43legacy/dma.c 	B43legacy_WARN_ON(ring->stopped);
ring              167 drivers/net/wireless/broadcom/b43legacy/dma.c 	B43legacy_WARN_ON(free_slots(ring) == 0);
ring              169 drivers/net/wireless/broadcom/b43legacy/dma.c 	slot = next_slot(ring, ring->current_slot);
ring              170 drivers/net/wireless/broadcom/b43legacy/dma.c 	ring->current_slot = slot;
ring              171 drivers/net/wireless/broadcom/b43legacy/dma.c 	ring->used_slots++;
ring              173 drivers/net/wireless/broadcom/b43legacy/dma.c 	update_max_used_slots(ring, ring->used_slots);
ring              183 drivers/net/wireless/broadcom/b43legacy/dma.c 	struct b43legacy_dmaring *ring;
ring              194 drivers/net/wireless/broadcom/b43legacy/dma.c 		ring = dev->dma.tx_ring3;
ring              197 drivers/net/wireless/broadcom/b43legacy/dma.c 		ring = dev->dma.tx_ring2;
ring              200 drivers/net/wireless/broadcom/b43legacy/dma.c 		ring = dev->dma.tx_ring1;
ring              203 drivers/net/wireless/broadcom/b43legacy/dma.c 		ring = dev->dma.tx_ring0;
ring              206 drivers/net/wireless/broadcom/b43legacy/dma.c 		ring = dev->dma.tx_ring4;
ring              209 drivers/net/wireless/broadcom/b43legacy/dma.c 		ring = dev->dma.tx_ring5;
ring              213 drivers/net/wireless/broadcom/b43legacy/dma.c 	return ring;
ring              217 drivers/net/wireless/broadcom/b43legacy/dma.c static inline int txring_to_priority(struct b43legacy_dmaring *ring)
ring              225 drivers/net/wireless/broadcom/b43legacy/dma.c 	return idx_to_prio[ring->index];
ring              247 drivers/net/wireless/broadcom/b43legacy/dma.c dma_addr_t map_descbuffer(struct b43legacy_dmaring *ring,
ring              255 drivers/net/wireless/broadcom/b43legacy/dma.c 		dmaaddr = dma_map_single(ring->dev->dev->dma_dev,
ring              259 drivers/net/wireless/broadcom/b43legacy/dma.c 		dmaaddr = dma_map_single(ring->dev->dev->dma_dev,
ring              267 drivers/net/wireless/broadcom/b43legacy/dma.c void unmap_descbuffer(struct b43legacy_dmaring *ring,
ring              273 drivers/net/wireless/broadcom/b43legacy/dma.c 		dma_unmap_single(ring->dev->dev->dma_dev,
ring              277 drivers/net/wireless/broadcom/b43legacy/dma.c 		dma_unmap_single(ring->dev->dev->dma_dev,
ring              283 drivers/net/wireless/broadcom/b43legacy/dma.c void sync_descbuffer_for_cpu(struct b43legacy_dmaring *ring,
ring              287 drivers/net/wireless/broadcom/b43legacy/dma.c 	B43legacy_WARN_ON(ring->tx);
ring              289 drivers/net/wireless/broadcom/b43legacy/dma.c 	dma_sync_single_for_cpu(ring->dev->dev->dma_dev,
ring              294 drivers/net/wireless/broadcom/b43legacy/dma.c void sync_descbuffer_for_device(struct b43legacy_dmaring *ring,
ring              298 drivers/net/wireless/broadcom/b43legacy/dma.c 	B43legacy_WARN_ON(ring->tx);
ring              300 drivers/net/wireless/broadcom/b43legacy/dma.c 	dma_sync_single_for_device(ring->dev->dev->dma_dev,
ring              305 drivers/net/wireless/broadcom/b43legacy/dma.c void free_descriptor_buffer(struct b43legacy_dmaring *ring,
ring              318 drivers/net/wireless/broadcom/b43legacy/dma.c static int alloc_ringmemory(struct b43legacy_dmaring *ring)
ring              321 drivers/net/wireless/broadcom/b43legacy/dma.c 	ring->descbase = dma_alloc_coherent(ring->dev->dev->dma_dev,
ring              323 drivers/net/wireless/broadcom/b43legacy/dma.c 					    &(ring->dmabase), GFP_KERNEL);
ring              324 drivers/net/wireless/broadcom/b43legacy/dma.c 	if (!ring->descbase)
ring              330 drivers/net/wireless/broadcom/b43legacy/dma.c static void free_ringmemory(struct b43legacy_dmaring *ring)
ring              332 drivers/net/wireless/broadcom/b43legacy/dma.c 	dma_free_coherent(ring->dev->dev->dma_dev, B43legacy_DMA_RINGMEMSIZE,
ring              333 drivers/net/wireless/broadcom/b43legacy/dma.c 			  ring->descbase, ring->dmabase);
ring              411 drivers/net/wireless/broadcom/b43legacy/dma.c static bool b43legacy_dma_mapping_error(struct b43legacy_dmaring *ring,
ring              416 drivers/net/wireless/broadcom/b43legacy/dma.c 	if (unlikely(dma_mapping_error(ring->dev->dev->dma_dev, addr)))
ring              419 drivers/net/wireless/broadcom/b43legacy/dma.c 	switch (ring->type) {
ring              435 drivers/net/wireless/broadcom/b43legacy/dma.c 	unmap_descbuffer(ring, addr, buffersize, dma_to_device);
ring              440 drivers/net/wireless/broadcom/b43legacy/dma.c static int setup_rx_descbuffer(struct b43legacy_dmaring *ring,
ring              450 drivers/net/wireless/broadcom/b43legacy/dma.c 	B43legacy_WARN_ON(ring->tx);
ring              452 drivers/net/wireless/broadcom/b43legacy/dma.c 	skb = __dev_alloc_skb(ring->rx_buffersize, gfp_flags);
ring              455 drivers/net/wireless/broadcom/b43legacy/dma.c 	dmaaddr = map_descbuffer(ring, skb->data,
ring              456 drivers/net/wireless/broadcom/b43legacy/dma.c 				 ring->rx_buffersize, 0);
ring              457 drivers/net/wireless/broadcom/b43legacy/dma.c 	if (b43legacy_dma_mapping_error(ring, dmaaddr, ring->rx_buffersize, 0)) {
ring              463 drivers/net/wireless/broadcom/b43legacy/dma.c 		skb = __dev_alloc_skb(ring->rx_buffersize, gfp_flags);
ring              466 drivers/net/wireless/broadcom/b43legacy/dma.c 		dmaaddr = map_descbuffer(ring, skb->data,
ring              467 drivers/net/wireless/broadcom/b43legacy/dma.c 					 ring->rx_buffersize, 0);
ring              470 drivers/net/wireless/broadcom/b43legacy/dma.c 	if (b43legacy_dma_mapping_error(ring, dmaaddr, ring->rx_buffersize, 0)) {
ring              477 drivers/net/wireless/broadcom/b43legacy/dma.c 	op32_fill_descriptor(ring, desc, dmaaddr, ring->rx_buffersize, 0, 0, 0);
ring              490 drivers/net/wireless/broadcom/b43legacy/dma.c static int alloc_initial_descbuffers(struct b43legacy_dmaring *ring)
ring              497 drivers/net/wireless/broadcom/b43legacy/dma.c 	for (i = 0; i < ring->nr_slots; i++) {
ring              498 drivers/net/wireless/broadcom/b43legacy/dma.c 		desc = op32_idx2desc(ring, i, &meta);
ring              500 drivers/net/wireless/broadcom/b43legacy/dma.c 		err = setup_rx_descbuffer(ring, desc, meta, GFP_KERNEL);
ring              502 drivers/net/wireless/broadcom/b43legacy/dma.c 			b43legacyerr(ring->dev->wl,
ring              508 drivers/net/wireless/broadcom/b43legacy/dma.c 	ring->used_slots = ring->nr_slots;
ring              515 drivers/net/wireless/broadcom/b43legacy/dma.c 		desc = op32_idx2desc(ring, i, &meta);
ring              517 drivers/net/wireless/broadcom/b43legacy/dma.c 		unmap_descbuffer(ring, meta->dmaaddr, ring->rx_buffersize, 0);
ring              527 drivers/net/wireless/broadcom/b43legacy/dma.c static int dmacontroller_setup(struct b43legacy_dmaring *ring)
ring              532 drivers/net/wireless/broadcom/b43legacy/dma.c 	u32 trans = ring->dev->dma.translation;
ring              533 drivers/net/wireless/broadcom/b43legacy/dma.c 	u32 ringbase = (u32)(ring->dmabase);
ring              535 drivers/net/wireless/broadcom/b43legacy/dma.c 	if (ring->tx) {
ring              541 drivers/net/wireless/broadcom/b43legacy/dma.c 		b43legacy_dma_write(ring, B43legacy_DMA32_TXCTL, value);
ring              542 drivers/net/wireless/broadcom/b43legacy/dma.c 		b43legacy_dma_write(ring, B43legacy_DMA32_TXRING,
ring              546 drivers/net/wireless/broadcom/b43legacy/dma.c 		err = alloc_initial_descbuffers(ring);
ring              552 drivers/net/wireless/broadcom/b43legacy/dma.c 		value = (ring->frameoffset <<
ring              557 drivers/net/wireless/broadcom/b43legacy/dma.c 		b43legacy_dma_write(ring, B43legacy_DMA32_RXCTL, value);
ring              558 drivers/net/wireless/broadcom/b43legacy/dma.c 		b43legacy_dma_write(ring, B43legacy_DMA32_RXRING,
ring              561 drivers/net/wireless/broadcom/b43legacy/dma.c 		b43legacy_dma_write(ring, B43legacy_DMA32_RXINDEX, 200);
ring              569 drivers/net/wireless/broadcom/b43legacy/dma.c static void dmacontroller_cleanup(struct b43legacy_dmaring *ring)
ring              571 drivers/net/wireless/broadcom/b43legacy/dma.c 	if (ring->tx) {
ring              572 drivers/net/wireless/broadcom/b43legacy/dma.c 		b43legacy_dmacontroller_tx_reset(ring->dev, ring->mmio_base,
ring              573 drivers/net/wireless/broadcom/b43legacy/dma.c 						 ring->type);
ring              574 drivers/net/wireless/broadcom/b43legacy/dma.c 		b43legacy_dma_write(ring, B43legacy_DMA32_TXRING, 0);
ring              576 drivers/net/wireless/broadcom/b43legacy/dma.c 		b43legacy_dmacontroller_rx_reset(ring->dev, ring->mmio_base,
ring              577 drivers/net/wireless/broadcom/b43legacy/dma.c 						 ring->type);
ring              578 drivers/net/wireless/broadcom/b43legacy/dma.c 		b43legacy_dma_write(ring, B43legacy_DMA32_RXRING, 0);
ring              582 drivers/net/wireless/broadcom/b43legacy/dma.c static void free_all_descbuffers(struct b43legacy_dmaring *ring)
ring              587 drivers/net/wireless/broadcom/b43legacy/dma.c 	if (!ring->used_slots)
ring              589 drivers/net/wireless/broadcom/b43legacy/dma.c 	for (i = 0; i < ring->nr_slots; i++) {
ring              590 drivers/net/wireless/broadcom/b43legacy/dma.c 		op32_idx2desc(ring, i, &meta);
ring              593 drivers/net/wireless/broadcom/b43legacy/dma.c 			B43legacy_WARN_ON(!ring->tx);
ring              596 drivers/net/wireless/broadcom/b43legacy/dma.c 		if (ring->tx)
ring              597 drivers/net/wireless/broadcom/b43legacy/dma.c 			unmap_descbuffer(ring, meta->dmaaddr,
ring              600 drivers/net/wireless/broadcom/b43legacy/dma.c 			unmap_descbuffer(ring, meta->dmaaddr,
ring              601 drivers/net/wireless/broadcom/b43legacy/dma.c 					 ring->rx_buffersize, 0);
ring              602 drivers/net/wireless/broadcom/b43legacy/dma.c 		free_descriptor_buffer(ring, meta, 0);
ring              629 drivers/net/wireless/broadcom/b43legacy/dma.c 	struct b43legacy_dmaring *ring;
ring              634 drivers/net/wireless/broadcom/b43legacy/dma.c 	ring = kzalloc(sizeof(*ring), GFP_KERNEL);
ring              635 drivers/net/wireless/broadcom/b43legacy/dma.c 	if (!ring)
ring              637 drivers/net/wireless/broadcom/b43legacy/dma.c 	ring->type = type;
ring              638 drivers/net/wireless/broadcom/b43legacy/dma.c 	ring->dev = dev;
ring              644 drivers/net/wireless/broadcom/b43legacy/dma.c 	ring->meta = kcalloc(nr_slots, sizeof(struct b43legacy_dmadesc_meta),
ring              646 drivers/net/wireless/broadcom/b43legacy/dma.c 	if (!ring->meta)
ring              649 drivers/net/wireless/broadcom/b43legacy/dma.c 		ring->txhdr_cache = kcalloc(nr_slots,
ring              652 drivers/net/wireless/broadcom/b43legacy/dma.c 		if (!ring->txhdr_cache)
ring              656 drivers/net/wireless/broadcom/b43legacy/dma.c 		dma_test = dma_map_single(dev->dev->dma_dev, ring->txhdr_cache,
ring              660 drivers/net/wireless/broadcom/b43legacy/dma.c 		if (b43legacy_dma_mapping_error(ring, dma_test,
ring              663 drivers/net/wireless/broadcom/b43legacy/dma.c 			kfree(ring->txhdr_cache);
ring              664 drivers/net/wireless/broadcom/b43legacy/dma.c 			ring->txhdr_cache = kcalloc(nr_slots,
ring              667 drivers/net/wireless/broadcom/b43legacy/dma.c 			if (!ring->txhdr_cache)
ring              671 drivers/net/wireless/broadcom/b43legacy/dma.c 					ring->txhdr_cache,
ring              675 drivers/net/wireless/broadcom/b43legacy/dma.c 			if (b43legacy_dma_mapping_error(ring, dma_test,
ring              685 drivers/net/wireless/broadcom/b43legacy/dma.c 	ring->nr_slots = nr_slots;
ring              686 drivers/net/wireless/broadcom/b43legacy/dma.c 	ring->mmio_base = b43legacy_dmacontroller_base(type, controller_index);
ring              687 drivers/net/wireless/broadcom/b43legacy/dma.c 	ring->index = controller_index;
ring              689 drivers/net/wireless/broadcom/b43legacy/dma.c 		ring->tx = true;
ring              690 drivers/net/wireless/broadcom/b43legacy/dma.c 		ring->current_slot = -1;
ring              692 drivers/net/wireless/broadcom/b43legacy/dma.c 		if (ring->index == 0) {
ring              693 drivers/net/wireless/broadcom/b43legacy/dma.c 			ring->rx_buffersize = B43legacy_DMA0_RX_BUFFERSIZE;
ring              694 drivers/net/wireless/broadcom/b43legacy/dma.c 			ring->frameoffset = B43legacy_DMA0_RX_FRAMEOFFSET;
ring              695 drivers/net/wireless/broadcom/b43legacy/dma.c 		} else if (ring->index == 3) {
ring              696 drivers/net/wireless/broadcom/b43legacy/dma.c 			ring->rx_buffersize = B43legacy_DMA3_RX_BUFFERSIZE;
ring              697 drivers/net/wireless/broadcom/b43legacy/dma.c 			ring->frameoffset = B43legacy_DMA3_RX_FRAMEOFFSET;
ring              702 drivers/net/wireless/broadcom/b43legacy/dma.c 	ring->last_injected_overflow = jiffies;
ring              705 drivers/net/wireless/broadcom/b43legacy/dma.c 	err = alloc_ringmemory(ring);
ring              708 drivers/net/wireless/broadcom/b43legacy/dma.c 	err = dmacontroller_setup(ring);
ring              713 drivers/net/wireless/broadcom/b43legacy/dma.c 	return ring;
ring              716 drivers/net/wireless/broadcom/b43legacy/dma.c 	free_ringmemory(ring);
ring              718 drivers/net/wireless/broadcom/b43legacy/dma.c 	kfree(ring->txhdr_cache);
ring              720 drivers/net/wireless/broadcom/b43legacy/dma.c 	kfree(ring->meta);
ring              722 drivers/net/wireless/broadcom/b43legacy/dma.c 	kfree(ring);
ring              723 drivers/net/wireless/broadcom/b43legacy/dma.c 	ring = NULL;
ring              728 drivers/net/wireless/broadcom/b43legacy/dma.c static void b43legacy_destroy_dmaring(struct b43legacy_dmaring *ring)
ring              730 drivers/net/wireless/broadcom/b43legacy/dma.c 	if (!ring)
ring              733 drivers/net/wireless/broadcom/b43legacy/dma.c 	b43legacydbg(ring->dev->wl, "DMA-%u 0x%04X (%s) max used slots:"
ring              734 drivers/net/wireless/broadcom/b43legacy/dma.c 		     " %d/%d\n", (unsigned int)(ring->type), ring->mmio_base,
ring              735 drivers/net/wireless/broadcom/b43legacy/dma.c 		     (ring->tx) ? "TX" : "RX", ring->max_used_slots,
ring              736 drivers/net/wireless/broadcom/b43legacy/dma.c 		     ring->nr_slots);
ring              740 drivers/net/wireless/broadcom/b43legacy/dma.c 	dmacontroller_cleanup(ring);
ring              741 drivers/net/wireless/broadcom/b43legacy/dma.c 	free_all_descbuffers(ring);
ring              742 drivers/net/wireless/broadcom/b43legacy/dma.c 	free_ringmemory(ring);
ring              744 drivers/net/wireless/broadcom/b43legacy/dma.c 	kfree(ring->txhdr_cache);
ring              745 drivers/net/wireless/broadcom/b43legacy/dma.c 	kfree(ring->meta);
ring              746 drivers/net/wireless/broadcom/b43legacy/dma.c 	kfree(ring);
ring              779 drivers/net/wireless/broadcom/b43legacy/dma.c 	struct b43legacy_dmaring *ring;
ring              800 drivers/net/wireless/broadcom/b43legacy/dma.c 	ring = b43legacy_setup_dmaring(dev, 0, 1, type);
ring              801 drivers/net/wireless/broadcom/b43legacy/dma.c 	if (!ring)
ring              803 drivers/net/wireless/broadcom/b43legacy/dma.c 	dma->tx_ring0 = ring;
ring              805 drivers/net/wireless/broadcom/b43legacy/dma.c 	ring = b43legacy_setup_dmaring(dev, 1, 1, type);
ring              806 drivers/net/wireless/broadcom/b43legacy/dma.c 	if (!ring)
ring              808 drivers/net/wireless/broadcom/b43legacy/dma.c 	dma->tx_ring1 = ring;
ring              810 drivers/net/wireless/broadcom/b43legacy/dma.c 	ring = b43legacy_setup_dmaring(dev, 2, 1, type);
ring              811 drivers/net/wireless/broadcom/b43legacy/dma.c 	if (!ring)
ring              813 drivers/net/wireless/broadcom/b43legacy/dma.c 	dma->tx_ring2 = ring;
ring              815 drivers/net/wireless/broadcom/b43legacy/dma.c 	ring = b43legacy_setup_dmaring(dev, 3, 1, type);
ring              816 drivers/net/wireless/broadcom/b43legacy/dma.c 	if (!ring)
ring              818 drivers/net/wireless/broadcom/b43legacy/dma.c 	dma->tx_ring3 = ring;
ring              820 drivers/net/wireless/broadcom/b43legacy/dma.c 	ring = b43legacy_setup_dmaring(dev, 4, 1, type);
ring              821 drivers/net/wireless/broadcom/b43legacy/dma.c 	if (!ring)
ring              823 drivers/net/wireless/broadcom/b43legacy/dma.c 	dma->tx_ring4 = ring;
ring              825 drivers/net/wireless/broadcom/b43legacy/dma.c 	ring = b43legacy_setup_dmaring(dev, 5, 1, type);
ring              826 drivers/net/wireless/broadcom/b43legacy/dma.c 	if (!ring)
ring              828 drivers/net/wireless/broadcom/b43legacy/dma.c 	dma->tx_ring5 = ring;
ring              831 drivers/net/wireless/broadcom/b43legacy/dma.c 	ring = b43legacy_setup_dmaring(dev, 0, 0, type);
ring              832 drivers/net/wireless/broadcom/b43legacy/dma.c 	if (!ring)
ring              834 drivers/net/wireless/broadcom/b43legacy/dma.c 	dma->rx_ring0 = ring;
ring              837 drivers/net/wireless/broadcom/b43legacy/dma.c 		ring = b43legacy_setup_dmaring(dev, 3, 0, type);
ring              838 drivers/net/wireless/broadcom/b43legacy/dma.c 		if (!ring)
ring              840 drivers/net/wireless/broadcom/b43legacy/dma.c 		dma->rx_ring3 = ring;
ring              873 drivers/net/wireless/broadcom/b43legacy/dma.c static u16 generate_cookie(struct b43legacy_dmaring *ring,
ring              884 drivers/net/wireless/broadcom/b43legacy/dma.c 	switch (ring->index) {
ring              916 drivers/net/wireless/broadcom/b43legacy/dma.c 	struct b43legacy_dmaring *ring = NULL;
ring              920 drivers/net/wireless/broadcom/b43legacy/dma.c 		ring = dma->tx_ring0;
ring              923 drivers/net/wireless/broadcom/b43legacy/dma.c 		ring = dma->tx_ring1;
ring              926 drivers/net/wireless/broadcom/b43legacy/dma.c 		ring = dma->tx_ring2;
ring              929 drivers/net/wireless/broadcom/b43legacy/dma.c 		ring = dma->tx_ring3;
ring              932 drivers/net/wireless/broadcom/b43legacy/dma.c 		ring = dma->tx_ring4;
ring              935 drivers/net/wireless/broadcom/b43legacy/dma.c 		ring = dma->tx_ring5;
ring              941 drivers/net/wireless/broadcom/b43legacy/dma.c 	B43legacy_WARN_ON(!(ring && *slot >= 0 && *slot < ring->nr_slots));
ring              943 drivers/net/wireless/broadcom/b43legacy/dma.c 	return ring;
ring              946 drivers/net/wireless/broadcom/b43legacy/dma.c static int dma_tx_fragment(struct b43legacy_dmaring *ring,
ring              962 drivers/net/wireless/broadcom/b43legacy/dma.c 	old_top_slot = ring->current_slot;
ring              963 drivers/net/wireless/broadcom/b43legacy/dma.c 	old_used_slots = ring->used_slots;
ring              966 drivers/net/wireless/broadcom/b43legacy/dma.c 	slot = request_slot(ring);
ring              967 drivers/net/wireless/broadcom/b43legacy/dma.c 	desc = op32_idx2desc(ring, slot, &meta_hdr);
ring              970 drivers/net/wireless/broadcom/b43legacy/dma.c 	header = &(ring->txhdr_cache[slot * sizeof(
ring              972 drivers/net/wireless/broadcom/b43legacy/dma.c 	err = b43legacy_generate_txhdr(ring->dev, header,
ring              974 drivers/net/wireless/broadcom/b43legacy/dma.c 				 generate_cookie(ring, slot));
ring              976 drivers/net/wireless/broadcom/b43legacy/dma.c 		ring->current_slot = old_top_slot;
ring              977 drivers/net/wireless/broadcom/b43legacy/dma.c 		ring->used_slots = old_used_slots;
ring              981 drivers/net/wireless/broadcom/b43legacy/dma.c 	meta_hdr->dmaaddr = map_descbuffer(ring, (unsigned char *)header,
ring              983 drivers/net/wireless/broadcom/b43legacy/dma.c 	if (b43legacy_dma_mapping_error(ring, meta_hdr->dmaaddr,
ring              985 drivers/net/wireless/broadcom/b43legacy/dma.c 		ring->current_slot = old_top_slot;
ring              986 drivers/net/wireless/broadcom/b43legacy/dma.c 		ring->used_slots = old_used_slots;
ring              989 drivers/net/wireless/broadcom/b43legacy/dma.c 	op32_fill_descriptor(ring, desc, meta_hdr->dmaaddr,
ring              993 drivers/net/wireless/broadcom/b43legacy/dma.c 	slot = request_slot(ring);
ring              994 drivers/net/wireless/broadcom/b43legacy/dma.c 	desc = op32_idx2desc(ring, slot, &meta);
ring             1000 drivers/net/wireless/broadcom/b43legacy/dma.c 	meta->dmaaddr = map_descbuffer(ring, skb->data, skb->len, 1);
ring             1002 drivers/net/wireless/broadcom/b43legacy/dma.c 	if (b43legacy_dma_mapping_error(ring, meta->dmaaddr, skb->len, 1)) {
ring             1005 drivers/net/wireless/broadcom/b43legacy/dma.c 			ring->current_slot = old_top_slot;
ring             1006 drivers/net/wireless/broadcom/b43legacy/dma.c 			ring->used_slots = old_used_slots;
ring             1021 drivers/net/wireless/broadcom/b43legacy/dma.c 		meta->dmaaddr = map_descbuffer(ring, skb->data, skb->len, 1);
ring             1022 drivers/net/wireless/broadcom/b43legacy/dma.c 		if (b43legacy_dma_mapping_error(ring, meta->dmaaddr, skb->len, 1)) {
ring             1023 drivers/net/wireless/broadcom/b43legacy/dma.c 			ring->current_slot = old_top_slot;
ring             1024 drivers/net/wireless/broadcom/b43legacy/dma.c 			ring->used_slots = old_used_slots;
ring             1030 drivers/net/wireless/broadcom/b43legacy/dma.c 	op32_fill_descriptor(ring, desc, meta->dmaaddr,
ring             1035 drivers/net/wireless/broadcom/b43legacy/dma.c 	op32_poke_tx(ring, next_slot(ring, slot));
ring             1041 drivers/net/wireless/broadcom/b43legacy/dma.c 	unmap_descbuffer(ring, meta_hdr->dmaaddr,
ring             1047 drivers/net/wireless/broadcom/b43legacy/dma.c int should_inject_overflow(struct b43legacy_dmaring *ring)
ring             1050 drivers/net/wireless/broadcom/b43legacy/dma.c 	if (unlikely(b43legacy_debug(ring->dev,
ring             1056 drivers/net/wireless/broadcom/b43legacy/dma.c 		next_overflow = ring->last_injected_overflow + HZ;
ring             1058 drivers/net/wireless/broadcom/b43legacy/dma.c 			ring->last_injected_overflow = jiffies;
ring             1059 drivers/net/wireless/broadcom/b43legacy/dma.c 			b43legacydbg(ring->dev->wl,
ring             1061 drivers/net/wireless/broadcom/b43legacy/dma.c 			       "DMA controller %d\n", ring->index);
ring             1072 drivers/net/wireless/broadcom/b43legacy/dma.c 	struct b43legacy_dmaring *ring;
ring             1075 drivers/net/wireless/broadcom/b43legacy/dma.c 	ring = priority_to_txring(dev, skb_get_queue_mapping(skb));
ring             1076 drivers/net/wireless/broadcom/b43legacy/dma.c 	B43legacy_WARN_ON(!ring->tx);
ring             1078 drivers/net/wireless/broadcom/b43legacy/dma.c 	if (unlikely(ring->stopped)) {
ring             1088 drivers/net/wireless/broadcom/b43legacy/dma.c 	if (WARN_ON(free_slots(ring) < SLOTS_PER_PACKET)) {
ring             1097 drivers/net/wireless/broadcom/b43legacy/dma.c 	err = dma_tx_fragment(ring, &skb);
ring             1108 drivers/net/wireless/broadcom/b43legacy/dma.c 	if ((free_slots(ring) < SLOTS_PER_PACKET) ||
ring             1109 drivers/net/wireless/broadcom/b43legacy/dma.c 	    should_inject_overflow(ring)) {
ring             1114 drivers/net/wireless/broadcom/b43legacy/dma.c 		ring->stopped = true;
ring             1117 drivers/net/wireless/broadcom/b43legacy/dma.c 			       ring->index);
ring             1125 drivers/net/wireless/broadcom/b43legacy/dma.c 	struct b43legacy_dmaring *ring;
ring             1131 drivers/net/wireless/broadcom/b43legacy/dma.c 	ring = parse_cookie(dev, status->cookie, &slot);
ring             1132 drivers/net/wireless/broadcom/b43legacy/dma.c 	if (unlikely(!ring))
ring             1134 drivers/net/wireless/broadcom/b43legacy/dma.c 	B43legacy_WARN_ON(!ring->tx);
ring             1139 drivers/net/wireless/broadcom/b43legacy/dma.c 	firstused = ring->current_slot - ring->used_slots + 1;
ring             1141 drivers/net/wireless/broadcom/b43legacy/dma.c 		firstused = ring->nr_slots + firstused;
ring             1148 drivers/net/wireless/broadcom/b43legacy/dma.c 			     ring->index, firstused, slot);
ring             1153 drivers/net/wireless/broadcom/b43legacy/dma.c 		B43legacy_WARN_ON(!(slot >= 0 && slot < ring->nr_slots));
ring             1154 drivers/net/wireless/broadcom/b43legacy/dma.c 		op32_idx2desc(ring, slot, &meta);
ring             1157 drivers/net/wireless/broadcom/b43legacy/dma.c 			unmap_descbuffer(ring, meta->dmaaddr,
ring             1160 drivers/net/wireless/broadcom/b43legacy/dma.c 			unmap_descbuffer(ring, meta->dmaaddr,
ring             1215 drivers/net/wireless/broadcom/b43legacy/dma.c 		ring->used_slots--;
ring             1219 drivers/net/wireless/broadcom/b43legacy/dma.c 		slot = next_slot(ring, slot);
ring             1222 drivers/net/wireless/broadcom/b43legacy/dma.c 	if (ring->stopped) {
ring             1223 drivers/net/wireless/broadcom/b43legacy/dma.c 		B43legacy_WARN_ON(free_slots(ring) < SLOTS_PER_PACKET);
ring             1224 drivers/net/wireless/broadcom/b43legacy/dma.c 		ring->stopped = false;
ring             1227 drivers/net/wireless/broadcom/b43legacy/dma.c 	if (dev->wl->tx_queue_stopped[ring->queue_prio]) {
ring             1228 drivers/net/wireless/broadcom/b43legacy/dma.c 		dev->wl->tx_queue_stopped[ring->queue_prio] = 0;
ring             1232 drivers/net/wireless/broadcom/b43legacy/dma.c 		ieee80211_wake_queue(dev->wl->hw, ring->queue_prio);
ring             1235 drivers/net/wireless/broadcom/b43legacy/dma.c 				     ring->index);
ring             1241 drivers/net/wireless/broadcom/b43legacy/dma.c static void dma_rx(struct b43legacy_dmaring *ring,
ring             1252 drivers/net/wireless/broadcom/b43legacy/dma.c 	desc = op32_idx2desc(ring, *slot, &meta);
ring             1254 drivers/net/wireless/broadcom/b43legacy/dma.c 	sync_descbuffer_for_cpu(ring, meta->dmaaddr, ring->rx_buffersize);
ring             1257 drivers/net/wireless/broadcom/b43legacy/dma.c 	if (ring->index == 3) {
ring             1270 drivers/net/wireless/broadcom/b43legacy/dma.c 		b43legacy_handle_hwtxstatus(ring->dev, hw);
ring             1272 drivers/net/wireless/broadcom/b43legacy/dma.c 		sync_descbuffer_for_device(ring, meta->dmaaddr,
ring             1273 drivers/net/wireless/broadcom/b43legacy/dma.c 					   ring->rx_buffersize);
ring             1289 drivers/net/wireless/broadcom/b43legacy/dma.c 			sync_descbuffer_for_device(ring, meta->dmaaddr,
ring             1290 drivers/net/wireless/broadcom/b43legacy/dma.c 						   ring->rx_buffersize);
ring             1294 drivers/net/wireless/broadcom/b43legacy/dma.c 	if (unlikely(len > ring->rx_buffersize)) {
ring             1304 drivers/net/wireless/broadcom/b43legacy/dma.c 			desc = op32_idx2desc(ring, *slot, &meta);
ring             1306 drivers/net/wireless/broadcom/b43legacy/dma.c 			sync_descbuffer_for_device(ring, meta->dmaaddr,
ring             1307 drivers/net/wireless/broadcom/b43legacy/dma.c 						   ring->rx_buffersize);
ring             1308 drivers/net/wireless/broadcom/b43legacy/dma.c 			*slot = next_slot(ring, *slot);
ring             1310 drivers/net/wireless/broadcom/b43legacy/dma.c 			tmp -= ring->rx_buffersize;
ring             1314 drivers/net/wireless/broadcom/b43legacy/dma.c 		b43legacyerr(ring->dev->wl, "DMA RX buffer too small "
ring             1316 drivers/net/wireless/broadcom/b43legacy/dma.c 		       len, ring->rx_buffersize, cnt);
ring             1321 drivers/net/wireless/broadcom/b43legacy/dma.c 	err = setup_rx_descbuffer(ring, desc, meta, GFP_ATOMIC);
ring             1323 drivers/net/wireless/broadcom/b43legacy/dma.c 		b43legacydbg(ring->dev->wl, "DMA RX: setup_rx_descbuffer()"
ring             1325 drivers/net/wireless/broadcom/b43legacy/dma.c 		sync_descbuffer_for_device(ring, dmaaddr,
ring             1326 drivers/net/wireless/broadcom/b43legacy/dma.c 					   ring->rx_buffersize);
ring             1330 drivers/net/wireless/broadcom/b43legacy/dma.c 	unmap_descbuffer(ring, dmaaddr, ring->rx_buffersize, 0);
ring             1331 drivers/net/wireless/broadcom/b43legacy/dma.c 	skb_put(skb, len + ring->frameoffset);
ring             1332 drivers/net/wireless/broadcom/b43legacy/dma.c 	skb_pull(skb, ring->frameoffset);
ring             1334 drivers/net/wireless/broadcom/b43legacy/dma.c 	b43legacy_rx(ring->dev, skb, rxhdr);
ring             1339 drivers/net/wireless/broadcom/b43legacy/dma.c void b43legacy_dma_rx(struct b43legacy_dmaring *ring)
ring             1345 drivers/net/wireless/broadcom/b43legacy/dma.c 	B43legacy_WARN_ON(ring->tx);
ring             1346 drivers/net/wireless/broadcom/b43legacy/dma.c 	current_slot = op32_get_current_rxslot(ring);
ring             1348 drivers/net/wireless/broadcom/b43legacy/dma.c 			   ring->nr_slots));
ring             1350 drivers/net/wireless/broadcom/b43legacy/dma.c 	slot = ring->current_slot;
ring             1351 drivers/net/wireless/broadcom/b43legacy/dma.c 	for (; slot != current_slot; slot = next_slot(ring, slot)) {
ring             1352 drivers/net/wireless/broadcom/b43legacy/dma.c 		dma_rx(ring, &slot);
ring             1353 drivers/net/wireless/broadcom/b43legacy/dma.c 		update_max_used_slots(ring, ++used_slots);
ring             1355 drivers/net/wireless/broadcom/b43legacy/dma.c 	op32_set_current_rxslot(ring, slot);
ring             1356 drivers/net/wireless/broadcom/b43legacy/dma.c 	ring->current_slot = slot;
ring             1359 drivers/net/wireless/broadcom/b43legacy/dma.c static void b43legacy_dma_tx_suspend_ring(struct b43legacy_dmaring *ring)
ring             1361 drivers/net/wireless/broadcom/b43legacy/dma.c 	B43legacy_WARN_ON(!ring->tx);
ring             1362 drivers/net/wireless/broadcom/b43legacy/dma.c 	op32_tx_suspend(ring);
ring             1365 drivers/net/wireless/broadcom/b43legacy/dma.c static void b43legacy_dma_tx_resume_ring(struct b43legacy_dmaring *ring)
ring             1367 drivers/net/wireless/broadcom/b43legacy/dma.c 	B43legacy_WARN_ON(!ring->tx);
ring             1368 drivers/net/wireless/broadcom/b43legacy/dma.c 	op32_tx_resume(ring);
ring              168 drivers/net/wireless/broadcom/b43legacy/dma.h u32 b43legacy_dma_read(struct b43legacy_dmaring *ring,
ring              171 drivers/net/wireless/broadcom/b43legacy/dma.h 	return b43legacy_read32(ring->dev, ring->mmio_base + offset);
ring              175 drivers/net/wireless/broadcom/b43legacy/dma.h void b43legacy_dma_write(struct b43legacy_dmaring *ring,
ring              178 drivers/net/wireless/broadcom/b43legacy/dma.h 	b43legacy_write32(ring->dev, ring->mmio_base + offset, value);
ring              193 drivers/net/wireless/broadcom/b43legacy/dma.h void b43legacy_dma_rx(struct b43legacy_dmaring *ring);
ring              219 drivers/net/wireless/broadcom/b43legacy/dma.h void b43legacy_dma_rx(struct b43legacy_dmaring *ring)
ring              106 drivers/net/wireless/broadcom/brcm80211/brcmfmac/flowring.c 	struct brcmf_flowring_ring *ring;
ring              148 drivers/net/wireless/broadcom/brcm80211/brcmfmac/flowring.c 		ring = kzalloc(sizeof(*ring), GFP_ATOMIC);
ring              149 drivers/net/wireless/broadcom/brcm80211/brcmfmac/flowring.c 		if (!ring)
ring              157 drivers/net/wireless/broadcom/brcm80211/brcmfmac/flowring.c 		ring->hash_id = hash_idx;
ring              158 drivers/net/wireless/broadcom/brcm80211/brcmfmac/flowring.c 		ring->status = RING_CLOSED;
ring              159 drivers/net/wireless/broadcom/brcm80211/brcmfmac/flowring.c 		skb_queue_head_init(&ring->skblist);
ring              160 drivers/net/wireless/broadcom/brcm80211/brcmfmac/flowring.c 		flow->rings[i] = ring;
ring              170 drivers/net/wireless/broadcom/brcm80211/brcmfmac/flowring.c 	struct brcmf_flowring_ring *ring;
ring              172 drivers/net/wireless/broadcom/brcm80211/brcmfmac/flowring.c 	ring = flow->rings[flowid];
ring              174 drivers/net/wireless/broadcom/brcm80211/brcmfmac/flowring.c 	return flow->hash[ring->hash_id].fifo;
ring              181 drivers/net/wireless/broadcom/brcm80211/brcmfmac/flowring.c 	struct brcmf_flowring_ring *ring;
ring              192 drivers/net/wireless/broadcom/brcm80211/brcmfmac/flowring.c 	ring = flow->rings[flowid];
ring              193 drivers/net/wireless/broadcom/brcm80211/brcmfmac/flowring.c 	if (ring->blocked == blocked) {
ring              202 drivers/net/wireless/broadcom/brcm80211/brcmfmac/flowring.c 			ring = flow->rings[i];
ring              203 drivers/net/wireless/broadcom/brcm80211/brcmfmac/flowring.c 			if ((ring->status == RING_OPEN) &&
ring              205 drivers/net/wireless/broadcom/brcm80211/brcmfmac/flowring.c 				if (ring->blocked) {
ring              230 drivers/net/wireless/broadcom/brcm80211/brcmfmac/flowring.c 	struct brcmf_flowring_ring *ring;
ring              236 drivers/net/wireless/broadcom/brcm80211/brcmfmac/flowring.c 	ring = flow->rings[flowid];
ring              237 drivers/net/wireless/broadcom/brcm80211/brcmfmac/flowring.c 	if (!ring)
ring              244 drivers/net/wireless/broadcom/brcm80211/brcmfmac/flowring.c 	hash_idx = ring->hash_id;
ring              249 drivers/net/wireless/broadcom/brcm80211/brcmfmac/flowring.c 	skb = skb_dequeue(&ring->skblist);
ring              252 drivers/net/wireless/broadcom/brcm80211/brcmfmac/flowring.c 		skb = skb_dequeue(&ring->skblist);
ring              255 drivers/net/wireless/broadcom/brcm80211/brcmfmac/flowring.c 	kfree(ring);
ring              262 drivers/net/wireless/broadcom/brcm80211/brcmfmac/flowring.c 	struct brcmf_flowring_ring *ring;
ring              264 drivers/net/wireless/broadcom/brcm80211/brcmfmac/flowring.c 	ring = flow->rings[flowid];
ring              266 drivers/net/wireless/broadcom/brcm80211/brcmfmac/flowring.c 	skb_queue_tail(&ring->skblist, skb);
ring              268 drivers/net/wireless/broadcom/brcm80211/brcmfmac/flowring.c 	if (!ring->blocked &&
ring              269 drivers/net/wireless/broadcom/brcm80211/brcmfmac/flowring.c 	    (skb_queue_len(&ring->skblist) > BRCMF_FLOWRING_HIGH)) {
ring              278 drivers/net/wireless/broadcom/brcm80211/brcmfmac/flowring.c 		if (skb_queue_len(&ring->skblist) < BRCMF_FLOWRING_LOW)
ring              281 drivers/net/wireless/broadcom/brcm80211/brcmfmac/flowring.c 	return skb_queue_len(&ring->skblist);
ring              287 drivers/net/wireless/broadcom/brcm80211/brcmfmac/flowring.c 	struct brcmf_flowring_ring *ring;
ring              290 drivers/net/wireless/broadcom/brcm80211/brcmfmac/flowring.c 	ring = flow->rings[flowid];
ring              291 drivers/net/wireless/broadcom/brcm80211/brcmfmac/flowring.c 	if (ring->status != RING_OPEN)
ring              294 drivers/net/wireless/broadcom/brcm80211/brcmfmac/flowring.c 	skb = skb_dequeue(&ring->skblist);
ring              296 drivers/net/wireless/broadcom/brcm80211/brcmfmac/flowring.c 	if (ring->blocked &&
ring              297 drivers/net/wireless/broadcom/brcm80211/brcmfmac/flowring.c 	    (skb_queue_len(&ring->skblist) < BRCMF_FLOWRING_LOW)) {
ring              309 drivers/net/wireless/broadcom/brcm80211/brcmfmac/flowring.c 	struct brcmf_flowring_ring *ring;
ring              311 drivers/net/wireless/broadcom/brcm80211/brcmfmac/flowring.c 	ring = flow->rings[flowid];
ring              313 drivers/net/wireless/broadcom/brcm80211/brcmfmac/flowring.c 	skb_queue_head(&ring->skblist, skb);
ring              319 drivers/net/wireless/broadcom/brcm80211/brcmfmac/flowring.c 	struct brcmf_flowring_ring *ring;
ring              321 drivers/net/wireless/broadcom/brcm80211/brcmfmac/flowring.c 	ring = flow->rings[flowid];
ring              322 drivers/net/wireless/broadcom/brcm80211/brcmfmac/flowring.c 	if (!ring)
ring              325 drivers/net/wireless/broadcom/brcm80211/brcmfmac/flowring.c 	if (ring->status != RING_OPEN)
ring              328 drivers/net/wireless/broadcom/brcm80211/brcmfmac/flowring.c 	return skb_queue_len(&ring->skblist);
ring              334 drivers/net/wireless/broadcom/brcm80211/brcmfmac/flowring.c 	struct brcmf_flowring_ring *ring;
ring              336 drivers/net/wireless/broadcom/brcm80211/brcmfmac/flowring.c 	ring = flow->rings[flowid];
ring              337 drivers/net/wireless/broadcom/brcm80211/brcmfmac/flowring.c 	if (!ring) {
ring              342 drivers/net/wireless/broadcom/brcm80211/brcmfmac/flowring.c 	ring->status = RING_OPEN;
ring              348 drivers/net/wireless/broadcom/brcm80211/brcmfmac/flowring.c 	struct brcmf_flowring_ring *ring;
ring              351 drivers/net/wireless/broadcom/brcm80211/brcmfmac/flowring.c 	ring = flow->rings[flowid];
ring              352 drivers/net/wireless/broadcom/brcm80211/brcmfmac/flowring.c 	hash_idx = ring->hash_id;
ring             1216 drivers/net/wireless/broadcom/brcm80211/brcmfmac/msgbuf.c 		int ring = le16_to_cpu(ring_status->compl_hdr.flow_ring_id);
ring             1218 drivers/net/wireless/broadcom/brcm80211/brcmfmac/msgbuf.c 		bphy_err(drvr, "Firmware reported ring %d error: %d\n", ring,
ring             1449 drivers/net/wireless/broadcom/brcm80211/brcmfmac/msgbuf.c 	struct brcmf_flowring_ring *ring;
ring             1474 drivers/net/wireless/broadcom/brcm80211/brcmfmac/msgbuf.c 		ring = msgbuf->flow->rings[i];
ring             1475 drivers/net/wireless/broadcom/brcm80211/brcmfmac/msgbuf.c 		if (ring->status != RING_OPEN)
ring             1478 drivers/net/wireless/broadcom/brcm80211/brcmfmac/msgbuf.c 		hash = &msgbuf->flow->hash[ring->hash_id];
ring             1482 drivers/net/wireless/broadcom/brcm80211/brcmfmac/msgbuf.c 				skb_queue_len(&ring->skblist), ring->blocked,
ring              923 drivers/net/wireless/broadcom/brcm80211/brcmfmac/pcie.c 	struct brcmf_pcie_ringbuf *ring = (struct brcmf_pcie_ringbuf *)ctx;
ring              924 drivers/net/wireless/broadcom/brcm80211/brcmfmac/pcie.c 	struct brcmf_pciedev_info *devinfo = ring->devinfo;
ring              925 drivers/net/wireless/broadcom/brcm80211/brcmfmac/pcie.c 	struct brcmf_commonring *commonring = &ring->commonring;
ring              931 drivers/net/wireless/broadcom/brcm80211/brcmfmac/pcie.c 		  commonring->w_ptr, ring->id);
ring              933 drivers/net/wireless/broadcom/brcm80211/brcmfmac/pcie.c 	devinfo->write_ptr(devinfo, ring->r_idx_addr, commonring->r_ptr);
ring              941 drivers/net/wireless/broadcom/brcm80211/brcmfmac/pcie.c 	struct brcmf_pcie_ringbuf *ring = (struct brcmf_pcie_ringbuf *)ctx;
ring              942 drivers/net/wireless/broadcom/brcm80211/brcmfmac/pcie.c 	struct brcmf_pciedev_info *devinfo = ring->devinfo;
ring              943 drivers/net/wireless/broadcom/brcm80211/brcmfmac/pcie.c 	struct brcmf_commonring *commonring = &ring->commonring;
ring              949 drivers/net/wireless/broadcom/brcm80211/brcmfmac/pcie.c 		  commonring->r_ptr, ring->id);
ring              951 drivers/net/wireless/broadcom/brcm80211/brcmfmac/pcie.c 	devinfo->write_ptr(devinfo, ring->w_idx_addr, commonring->w_ptr);
ring              959 drivers/net/wireless/broadcom/brcm80211/brcmfmac/pcie.c 	struct brcmf_pcie_ringbuf *ring = (struct brcmf_pcie_ringbuf *)ctx;
ring              960 drivers/net/wireless/broadcom/brcm80211/brcmfmac/pcie.c 	struct brcmf_pciedev_info *devinfo = ring->devinfo;
ring              975 drivers/net/wireless/broadcom/brcm80211/brcmfmac/pcie.c 	struct brcmf_pcie_ringbuf *ring = (struct brcmf_pcie_ringbuf *)ctx;
ring              976 drivers/net/wireless/broadcom/brcm80211/brcmfmac/pcie.c 	struct brcmf_pciedev_info *devinfo = ring->devinfo;
ring              977 drivers/net/wireless/broadcom/brcm80211/brcmfmac/pcie.c 	struct brcmf_commonring *commonring = &ring->commonring;
ring              982 drivers/net/wireless/broadcom/brcm80211/brcmfmac/pcie.c 	commonring->r_ptr = devinfo->read_ptr(devinfo, ring->r_idx_addr);
ring              985 drivers/net/wireless/broadcom/brcm80211/brcmfmac/pcie.c 		  commonring->w_ptr, ring->id);
ring              993 drivers/net/wireless/broadcom/brcm80211/brcmfmac/pcie.c 	struct brcmf_pcie_ringbuf *ring = (struct brcmf_pcie_ringbuf *)ctx;
ring              994 drivers/net/wireless/broadcom/brcm80211/brcmfmac/pcie.c 	struct brcmf_pciedev_info *devinfo = ring->devinfo;
ring              995 drivers/net/wireless/broadcom/brcm80211/brcmfmac/pcie.c 	struct brcmf_commonring *commonring = &ring->commonring;
ring             1000 drivers/net/wireless/broadcom/brcm80211/brcmfmac/pcie.c 	commonring->w_ptr = devinfo->read_ptr(devinfo, ring->w_idx_addr);
ring             1003 drivers/net/wireless/broadcom/brcm80211/brcmfmac/pcie.c 		  commonring->r_ptr, ring->id);
ring             1014 drivers/net/wireless/broadcom/brcm80211/brcmfmac/pcie.c 	void *ring;
ring             1017 drivers/net/wireless/broadcom/brcm80211/brcmfmac/pcie.c 	ring = dma_alloc_coherent(&devinfo->pdev->dev, size, dma_handle,
ring             1019 drivers/net/wireless/broadcom/brcm80211/brcmfmac/pcie.c 	if (!ring)
ring             1027 drivers/net/wireless/broadcom/brcm80211/brcmfmac/pcie.c 	memset(ring, 0, size);
ring             1029 drivers/net/wireless/broadcom/brcm80211/brcmfmac/pcie.c 	return (ring);
ring             1039 drivers/net/wireless/broadcom/brcm80211/brcmfmac/pcie.c 	struct brcmf_pcie_ringbuf *ring;
ring             1061 drivers/net/wireless/broadcom/brcm80211/brcmfmac/pcie.c 	ring = kzalloc(sizeof(*ring), GFP_KERNEL);
ring             1062 drivers/net/wireless/broadcom/brcm80211/brcmfmac/pcie.c 	if (!ring) {
ring             1067 drivers/net/wireless/broadcom/brcm80211/brcmfmac/pcie.c 	brcmf_commonring_config(&ring->commonring, brcmf_ring_max_item[ring_id],
ring             1069 drivers/net/wireless/broadcom/brcm80211/brcmfmac/pcie.c 	ring->dma_handle = dma_handle;
ring             1070 drivers/net/wireless/broadcom/brcm80211/brcmfmac/pcie.c 	ring->devinfo = devinfo;
ring             1071 drivers/net/wireless/broadcom/brcm80211/brcmfmac/pcie.c 	brcmf_commonring_register_cb(&ring->commonring,
ring             1076 drivers/net/wireless/broadcom/brcm80211/brcmfmac/pcie.c 				     brcmf_pcie_ring_mb_write_wptr, ring);
ring             1078 drivers/net/wireless/broadcom/brcm80211/brcmfmac/pcie.c 	return (ring);
ring             1083 drivers/net/wireless/broadcom/brcm80211/brcmfmac/pcie.c 					  struct brcmf_pcie_ringbuf *ring)
ring             1088 drivers/net/wireless/broadcom/brcm80211/brcmfmac/pcie.c 	if (!ring)
ring             1091 drivers/net/wireless/broadcom/brcm80211/brcmfmac/pcie.c 	dma_buf = ring->commonring.buf_addr;
ring             1093 drivers/net/wireless/broadcom/brcm80211/brcmfmac/pcie.c 		size = ring->commonring.depth * ring->commonring.item_len;
ring             1094 drivers/net/wireless/broadcom/brcm80211/brcmfmac/pcie.c 		dma_free_coherent(dev, size, dma_buf, ring->dma_handle);
ring             1096 drivers/net/wireless/broadcom/brcm80211/brcmfmac/pcie.c 	kfree(ring);
ring             1124 drivers/net/wireless/broadcom/brcm80211/brcmfmac/pcie.c 	struct brcmf_pcie_ringbuf *ring;
ring             1218 drivers/net/wireless/broadcom/brcm80211/brcmfmac/pcie.c 		ring = brcmf_pcie_alloc_dma_and_ring(devinfo, i, ring_mem_ptr);
ring             1219 drivers/net/wireless/broadcom/brcm80211/brcmfmac/pcie.c 		if (!ring)
ring             1221 drivers/net/wireless/broadcom/brcm80211/brcmfmac/pcie.c 		ring->w_idx_addr = h2d_w_idx_ptr;
ring             1222 drivers/net/wireless/broadcom/brcm80211/brcmfmac/pcie.c 		ring->r_idx_addr = h2d_r_idx_ptr;
ring             1223 drivers/net/wireless/broadcom/brcm80211/brcmfmac/pcie.c 		ring->id = i;
ring             1224 drivers/net/wireless/broadcom/brcm80211/brcmfmac/pcie.c 		devinfo->shared.commonrings[i] = ring;
ring             1233 drivers/net/wireless/broadcom/brcm80211/brcmfmac/pcie.c 		ring = brcmf_pcie_alloc_dma_and_ring(devinfo, i, ring_mem_ptr);
ring             1234 drivers/net/wireless/broadcom/brcm80211/brcmfmac/pcie.c 		if (!ring)
ring             1236 drivers/net/wireless/broadcom/brcm80211/brcmfmac/pcie.c 		ring->w_idx_addr = d2h_w_idx_ptr;
ring             1237 drivers/net/wireless/broadcom/brcm80211/brcmfmac/pcie.c 		ring->r_idx_addr = d2h_r_idx_ptr;
ring             1238 drivers/net/wireless/broadcom/brcm80211/brcmfmac/pcie.c 		ring->id = i;
ring             1239 drivers/net/wireless/broadcom/brcm80211/brcmfmac/pcie.c 		devinfo->shared.commonrings[i] = ring;
ring             1249 drivers/net/wireless/broadcom/brcm80211/brcmfmac/pcie.c 	rings = kcalloc(max_flowrings, sizeof(*ring), GFP_KERNEL);
ring             1256 drivers/net/wireless/broadcom/brcm80211/brcmfmac/pcie.c 		ring = &rings[i];
ring             1257 drivers/net/wireless/broadcom/brcm80211/brcmfmac/pcie.c 		ring->devinfo = devinfo;
ring             1258 drivers/net/wireless/broadcom/brcm80211/brcmfmac/pcie.c 		ring->id = i + BRCMF_H2D_MSGRING_FLOWRING_IDSTART;
ring             1259 drivers/net/wireless/broadcom/brcm80211/brcmfmac/pcie.c 		brcmf_commonring_register_cb(&ring->commonring,
ring             1265 drivers/net/wireless/broadcom/brcm80211/brcmfmac/pcie.c 					     ring);
ring             1266 drivers/net/wireless/broadcom/brcm80211/brcmfmac/pcie.c 		ring->w_idx_addr = h2d_w_idx_ptr;
ring             1267 drivers/net/wireless/broadcom/brcm80211/brcmfmac/pcie.c 		ring->r_idx_addr = h2d_r_idx_ptr;
ring              133 drivers/net/wireless/intersil/p54/p54pci.c 	int ring_index, struct p54p_desc *ring, u32 ring_limit,
ring              147 drivers/net/wireless/intersil/p54/p54pci.c 		struct p54p_desc *desc = &ring[i];
ring              185 drivers/net/wireless/intersil/p54/p54pci.c 	int ring_index, struct p54p_desc *ring, u32 ring_limit,
ring              200 drivers/net/wireless/intersil/p54/p54pci.c 		desc = &ring[i];
ring              238 drivers/net/wireless/intersil/p54/p54pci.c 	p54p_refill_rx_ring(dev, ring_index, ring, ring_limit, rx_buf, *index);
ring              242 drivers/net/wireless/intersil/p54/p54pci.c 	int ring_index, struct p54p_desc *ring, u32 ring_limit,
ring              256 drivers/net/wireless/intersil/p54/p54pci.c 		desc = &ring[i];
ring              345 drivers/net/wireless/realtek/rtl818x/rtl8180/dev.c 	struct rtl8180_tx_ring *ring = &priv->tx_ring[prio];
ring              347 drivers/net/wireless/realtek/rtl818x/rtl8180/dev.c 	while (skb_queue_len(&ring->queue)) {
ring              348 drivers/net/wireless/realtek/rtl818x/rtl8180/dev.c 		struct rtl8180_tx_desc *entry = &ring->desc[ring->idx];
ring              356 drivers/net/wireless/realtek/rtl818x/rtl8180/dev.c 		ring->idx = (ring->idx + 1) % ring->entries;
ring              357 drivers/net/wireless/realtek/rtl818x/rtl8180/dev.c 		skb = __skb_dequeue(&ring->queue);
ring              371 drivers/net/wireless/realtek/rtl818x/rtl8180/dev.c 		if (ring->entries - skb_queue_len(&ring->queue) == 2)
ring              461 drivers/net/wireless/realtek/rtl818x/rtl8180/dev.c 	struct rtl8180_tx_ring *ring;
ring              474 drivers/net/wireless/realtek/rtl818x/rtl8180/dev.c 	ring = &priv->tx_ring[prio];
ring              544 drivers/net/wireless/realtek/rtl818x/rtl8180/dev.c 	idx = (ring->idx + skb_queue_len(&ring->queue)) % ring->entries;
ring              545 drivers/net/wireless/realtek/rtl818x/rtl8180/dev.c 	entry = &ring->desc[idx];
ring              573 drivers/net/wireless/realtek/rtl818x/rtl8180/dev.c 	__skb_queue_tail(&ring->queue, skb);
ring              574 drivers/net/wireless/realtek/rtl818x/rtl8180/dev.c 	if (ring->entries - skb_queue_len(&ring->queue) < 2)
ring             1072 drivers/net/wireless/realtek/rtl818x/rtl8180/dev.c 	struct rtl8180_tx_desc *ring;
ring             1076 drivers/net/wireless/realtek/rtl818x/rtl8180/dev.c 	ring = pci_zalloc_consistent(priv->pdev, sizeof(*ring) * entries,
ring             1078 drivers/net/wireless/realtek/rtl818x/rtl8180/dev.c 	if (!ring || (unsigned long)ring & 0xFF) {
ring             1084 drivers/net/wireless/realtek/rtl818x/rtl8180/dev.c 	priv->tx_ring[prio].desc = ring;
ring             1091 drivers/net/wireless/realtek/rtl818x/rtl8180/dev.c 		ring[i].next_tx_desc =
ring             1092 drivers/net/wireless/realtek/rtl818x/rtl8180/dev.c 			cpu_to_le32((u32)dma + ((i + 1) % entries) * sizeof(*ring));
ring             1100 drivers/net/wireless/realtek/rtl818x/rtl8180/dev.c 	struct rtl8180_tx_ring *ring = &priv->tx_ring[prio];
ring             1102 drivers/net/wireless/realtek/rtl818x/rtl8180/dev.c 	while (skb_queue_len(&ring->queue)) {
ring             1103 drivers/net/wireless/realtek/rtl818x/rtl8180/dev.c 		struct rtl8180_tx_desc *entry = &ring->desc[ring->idx];
ring             1104 drivers/net/wireless/realtek/rtl818x/rtl8180/dev.c 		struct sk_buff *skb = __skb_dequeue(&ring->queue);
ring             1109 drivers/net/wireless/realtek/rtl818x/rtl8180/dev.c 		ring->idx = (ring->idx + 1) % ring->entries;
ring             1112 drivers/net/wireless/realtek/rtl818x/rtl8180/dev.c 	pci_free_consistent(priv->pdev, sizeof(*ring->desc)*ring->entries,
ring             1113 drivers/net/wireless/realtek/rtl818x/rtl8180/dev.c 			    ring->desc, ring->dma);
ring             1114 drivers/net/wireless/realtek/rtl818x/rtl8180/dev.c 	ring->desc = NULL;
ring             1850 drivers/net/wireless/realtek/rtlwifi/core.c 	struct rtl8192_tx_ring *ring;
ring             1855 drivers/net/wireless/realtek/rtlwifi/core.c 	ring = &rtlpci->tx_ring[BEACON_QUEUE];
ring             1858 drivers/net/wireless/realtek/rtlwifi/core.c 	pskb = __skb_dequeue(&ring->queue);
ring             1863 drivers/net/wireless/realtek/rtlwifi/core.c 	pdesc = &ring->desc[0];
ring             1867 drivers/net/wireless/realtek/rtlwifi/core.c 	__skb_queue_tail(&ring->queue, skb);
ring              494 drivers/net/wireless/realtek/rtlwifi/pci.c 		struct rtl8192_tx_ring *ring = &rtlpci->tx_ring[hw_queue];
ring              504 drivers/net/wireless/realtek/rtlwifi/pci.c 			    (ring->entries - skb_queue_len(&ring->queue) >
ring              531 drivers/net/wireless/realtek/rtlwifi/pci.c 	struct rtl8192_tx_ring *ring = &rtlpci->tx_ring[prio];
ring              533 drivers/net/wireless/realtek/rtlwifi/pci.c 	while (skb_queue_len(&ring->queue)) {
ring              541 drivers/net/wireless/realtek/rtlwifi/pci.c 			entry = (u8 *)(&ring->buffer_desc[ring->idx]);
ring              543 drivers/net/wireless/realtek/rtlwifi/pci.c 			entry = (u8 *)(&ring->desc[ring->idx]);
ring              545 drivers/net/wireless/realtek/rtlwifi/pci.c 		if (!rtlpriv->cfg->ops->is_tx_desc_closed(hw, prio, ring->idx))
ring              547 drivers/net/wireless/realtek/rtlwifi/pci.c 		ring->idx = (ring->idx + 1) % ring->entries;
ring              549 drivers/net/wireless/realtek/rtlwifi/pci.c 		skb = __skb_dequeue(&ring->queue);
ring              562 drivers/net/wireless/realtek/rtlwifi/pci.c 			 ring->idx,
ring              563 drivers/net/wireless/realtek/rtlwifi/pci.c 			 skb_queue_len(&ring->queue),
ring              610 drivers/net/wireless/realtek/rtlwifi/pci.c 		if ((ring->entries - skb_queue_len(&ring->queue)) <= 4) {
ring              613 drivers/net/wireless/realtek/rtlwifi/pci.c 				 prio, ring->idx,
ring              614 drivers/net/wireless/realtek/rtlwifi/pci.c 				 skb_queue_len(&ring->queue));
ring             1076 drivers/net/wireless/realtek/rtlwifi/pci.c 	struct rtl8192_tx_ring *ring = NULL;
ring             1088 drivers/net/wireless/realtek/rtlwifi/pci.c 	ring = &rtlpci->tx_ring[BEACON_QUEUE];
ring             1089 drivers/net/wireless/realtek/rtlwifi/pci.c 	pskb = __skb_dequeue(&ring->queue);
ring             1091 drivers/net/wireless/realtek/rtlwifi/pci.c 		entry = (u8 *)(&ring->buffer_desc[ring->idx]);
ring             1093 drivers/net/wireless/realtek/rtlwifi/pci.c 		entry = (u8 *)(&ring->desc[ring->idx]);
ring             1108 drivers/net/wireless/realtek/rtlwifi/pci.c 	pdesc = &ring->desc[0];
ring             1110 drivers/net/wireless/realtek/rtlwifi/pci.c 		pbuffer_desc = &ring->buffer_desc[0];
ring             1116 drivers/net/wireless/realtek/rtlwifi/pci.c 	__skb_queue_tail(&ring->queue, pskb);
ring             1338 drivers/net/wireless/realtek/rtlwifi/pci.c 	struct rtl8192_tx_ring *ring = &rtlpci->tx_ring[prio];
ring             1341 drivers/net/wireless/realtek/rtlwifi/pci.c 	while (skb_queue_len(&ring->queue)) {
ring             1343 drivers/net/wireless/realtek/rtlwifi/pci.c 		struct sk_buff *skb = __skb_dequeue(&ring->queue);
ring             1346 drivers/net/wireless/realtek/rtlwifi/pci.c 			entry = (u8 *)(&ring->buffer_desc[ring->idx]);
ring             1348 drivers/net/wireless/realtek/rtlwifi/pci.c 			entry = (u8 *)(&ring->desc[ring->idx]);
ring             1356 drivers/net/wireless/realtek/rtlwifi/pci.c 		ring->idx = (ring->idx + 1) % ring->entries;
ring             1361 drivers/net/wireless/realtek/rtlwifi/pci.c 			    sizeof(*ring->desc) * ring->entries,
ring             1362 drivers/net/wireless/realtek/rtlwifi/pci.c 			    ring->desc, ring->dma);
ring             1363 drivers/net/wireless/realtek/rtlwifi/pci.c 	ring->desc = NULL;
ring             1366 drivers/net/wireless/realtek/rtlwifi/pci.c 				    sizeof(*ring->buffer_desc) * ring->entries,
ring             1367 drivers/net/wireless/realtek/rtlwifi/pci.c 				    ring->buffer_desc, ring->buffer_desc_dma);
ring             1368 drivers/net/wireless/realtek/rtlwifi/pci.c 		ring->buffer_desc = NULL;
ring             1518 drivers/net/wireless/realtek/rtlwifi/pci.c 			struct rtl8192_tx_ring *ring = &rtlpci->tx_ring[i];
ring             1520 drivers/net/wireless/realtek/rtlwifi/pci.c 			while (skb_queue_len(&ring->queue)) {
ring             1523 drivers/net/wireless/realtek/rtlwifi/pci.c 					__skb_dequeue(&ring->queue);
ring             1525 drivers/net/wireless/realtek/rtlwifi/pci.c 					entry = (u8 *)(&ring->buffer_desc
ring             1526 drivers/net/wireless/realtek/rtlwifi/pci.c 								[ring->idx]);
ring             1528 drivers/net/wireless/realtek/rtlwifi/pci.c 					entry = (u8 *)(&ring->desc[ring->idx]);
ring             1538 drivers/net/wireless/realtek/rtlwifi/pci.c 				ring->idx = (ring->idx + 1) % ring->entries;
ring             1546 drivers/net/wireless/realtek/rtlwifi/pci.c 			ring->idx = 0;
ring             1547 drivers/net/wireless/realtek/rtlwifi/pci.c 			ring->entries = rtlpci->txringcount[i];
ring             1601 drivers/net/wireless/realtek/rtlwifi/pci.c 	struct rtl8192_tx_ring *ring;
ring             1633 drivers/net/wireless/realtek/rtlwifi/pci.c 	ring = &rtlpci->tx_ring[hw_queue];
ring             1636 drivers/net/wireless/realtek/rtlwifi/pci.c 			idx = ring->cur_tx_wp;
ring             1638 drivers/net/wireless/realtek/rtlwifi/pci.c 			idx = (ring->idx + skb_queue_len(&ring->queue)) %
ring             1639 drivers/net/wireless/realtek/rtlwifi/pci.c 			      ring->entries;
ring             1644 drivers/net/wireless/realtek/rtlwifi/pci.c 	pdesc = &ring->desc[idx];
ring             1646 drivers/net/wireless/realtek/rtlwifi/pci.c 		ptx_bd_desc = &ring->buffer_desc[idx];
ring             1654 drivers/net/wireless/realtek/rtlwifi/pci.c 				 hw_queue, ring->idx, idx,
ring             1655 drivers/net/wireless/realtek/rtlwifi/pci.c 				 skb_queue_len(&ring->queue));
ring             1677 drivers/net/wireless/realtek/rtlwifi/pci.c 	__skb_queue_tail(&ring->queue, skb);
ring             1687 drivers/net/wireless/realtek/rtlwifi/pci.c 	if ((ring->entries - skb_queue_len(&ring->queue)) < 2 &&
ring             1691 drivers/net/wireless/realtek/rtlwifi/pci.c 			 hw_queue, ring->idx, idx,
ring             1692 drivers/net/wireless/realtek/rtlwifi/pci.c 			 skb_queue_len(&ring->queue));
ring             1712 drivers/net/wireless/realtek/rtlwifi/pci.c 	struct rtl8192_tx_ring *ring;
ring             1724 drivers/net/wireless/realtek/rtlwifi/pci.c 		ring = &pcipriv->dev.tx_ring[queue_id];
ring             1725 drivers/net/wireless/realtek/rtlwifi/pci.c 		queue_len = skb_queue_len(&ring->queue);
ring               70 drivers/net/wireless/realtek/rtlwifi/rtl8188ee/hw.c 	struct rtl8192_tx_ring *ring = &rtlpci->tx_ring[BEACON_QUEUE];
ring               74 drivers/net/wireless/realtek/rtlwifi/rtl8188ee/hw.c 	while (skb_queue_len(&ring->queue)) {
ring               75 drivers/net/wireless/realtek/rtlwifi/rtl8188ee/hw.c 		struct rtl_tx_desc *entry = &ring->desc[ring->idx];
ring               76 drivers/net/wireless/realtek/rtlwifi/rtl8188ee/hw.c 		struct sk_buff *skb = __skb_dequeue(&ring->queue);
ring               84 drivers/net/wireless/realtek/rtlwifi/rtl8188ee/hw.c 		ring->idx = (ring->idx + 1) % ring->entries;
ring              170 drivers/net/wireless/realtek/rtlwifi/rtl8188ee/hw.c 	struct rtl8192_tx_ring *ring;
ring              186 drivers/net/wireless/realtek/rtlwifi/rtl8188ee/hw.c 		ring = &rtlpci->tx_ring[queue];
ring              187 drivers/net/wireless/realtek/rtlwifi/rtl8188ee/hw.c 		if (skb_queue_len(&ring->queue)) {
ring             2187 drivers/net/wireless/realtek/rtlwifi/rtl8188ee/phy.c 	struct rtl8192_tx_ring *ring = NULL;
ring             2225 drivers/net/wireless/realtek/rtlwifi/rtl8188ee/phy.c 			ring = &pcipriv->dev.tx_ring[queue_id];
ring             2227 drivers/net/wireless/realtek/rtlwifi/rtl8188ee/phy.c 			    skb_queue_len(&ring->queue) == 0) {
ring             2234 drivers/net/wireless/realtek/rtlwifi/rtl8188ee/phy.c 					 skb_queue_len(&ring->queue));
ring             2244 drivers/net/wireless/realtek/rtlwifi/rtl8188ee/phy.c 					  skb_queue_len(&ring->queue));
ring             2269 drivers/net/wireless/realtek/rtlwifi/rtl8188ee/phy.c 				ring = &pcipriv->dev.tx_ring[queue_id];
ring             2270 drivers/net/wireless/realtek/rtlwifi/rtl8188ee/phy.c 				if (skb_queue_len(&ring->queue) == 0) {
ring             2277 drivers/net/wireless/realtek/rtlwifi/rtl8188ee/phy.c 						 skb_queue_len(&ring->queue));
ring             2287 drivers/net/wireless/realtek/rtlwifi/rtl8188ee/phy.c 						 skb_queue_len(&ring->queue));
ring              813 drivers/net/wireless/realtek/rtlwifi/rtl8188ee/trx.c 	struct rtl8192_tx_ring *ring = &rtlpci->tx_ring[hw_queue];
ring              814 drivers/net/wireless/realtek/rtlwifi/rtl8188ee/trx.c 	u8 *entry = (u8 *)(&ring->desc[ring->idx]);
ring              438 drivers/net/wireless/realtek/rtlwifi/rtl8192ce/phy.c 	struct rtl8192_tx_ring *ring = NULL;
ring              495 drivers/net/wireless/realtek/rtlwifi/rtl8192ce/phy.c 				ring = &pcipriv->dev.tx_ring[queue_id];
ring              497 drivers/net/wireless/realtek/rtlwifi/rtl8192ce/phy.c 				    skb_queue_len(&ring->queue) == 0) {
ring              504 drivers/net/wireless/realtek/rtlwifi/rtl8192ce/phy.c 						 skb_queue_len(&ring->queue));
ring              514 drivers/net/wireless/realtek/rtlwifi/rtl8192ce/phy.c 						 skb_queue_len(&ring->queue));
ring              711 drivers/net/wireless/realtek/rtlwifi/rtl8192ce/trx.c 	struct rtl8192_tx_ring *ring = &rtlpci->tx_ring[hw_queue];
ring              712 drivers/net/wireless/realtek/rtlwifi/rtl8192ce/trx.c 	u8 *entry = (u8 *)(&ring->desc[ring->idx]);
ring              382 drivers/net/wireless/realtek/rtlwifi/rtl8192cu/phy.c 	struct rtl8192_tx_ring *ring = NULL;
ring              418 drivers/net/wireless/realtek/rtlwifi/rtl8192cu/phy.c 			ring = &pcipriv->dev.tx_ring[queue_id];
ring              419 drivers/net/wireless/realtek/rtlwifi/rtl8192cu/phy.c 			if (skb_queue_len(&ring->queue) == 0 ||
ring              428 drivers/net/wireless/realtek/rtlwifi/rtl8192cu/phy.c 					 skb_queue_len(&ring->queue));
ring              437 drivers/net/wireless/realtek/rtlwifi/rtl8192cu/phy.c 					 skb_queue_len(&ring->queue));
ring              461 drivers/net/wireless/realtek/rtlwifi/rtl8192cu/phy.c 			ring = &pcipriv->dev.tx_ring[queue_id];
ring              462 drivers/net/wireless/realtek/rtlwifi/rtl8192cu/phy.c 			if (skb_queue_len(&ring->queue) == 0) {
ring              469 drivers/net/wireless/realtek/rtlwifi/rtl8192cu/phy.c 					 skb_queue_len(&ring->queue));
ring              478 drivers/net/wireless/realtek/rtlwifi/rtl8192cu/phy.c 					 skb_queue_len(&ring->queue));
ring              459 drivers/net/wireless/realtek/rtlwifi/rtl8192de/fw.c 	struct rtl8192_tx_ring *ring;
ring              465 drivers/net/wireless/realtek/rtlwifi/rtl8192de/fw.c 	ring = &rtlpci->tx_ring[BEACON_QUEUE];
ring              466 drivers/net/wireless/realtek/rtlwifi/rtl8192de/fw.c 	pskb = __skb_dequeue(&ring->queue);
ring              469 drivers/net/wireless/realtek/rtlwifi/rtl8192de/fw.c 	pdesc = &ring->desc[idx];
ring              473 drivers/net/wireless/realtek/rtlwifi/rtl8192de/fw.c 	__skb_queue_tail(&ring->queue, skb);
ring             3058 drivers/net/wireless/realtek/rtlwifi/rtl8192de/phy.c 	struct rtl8192_tx_ring *ring = NULL;
ring             3115 drivers/net/wireless/realtek/rtlwifi/rtl8192de/phy.c 			ring = &pcipriv->dev.tx_ring[queue_id];
ring             3116 drivers/net/wireless/realtek/rtlwifi/rtl8192de/phy.c 			if (skb_queue_len(&ring->queue) == 0 ||
ring             3129 drivers/net/wireless/realtek/rtlwifi/rtl8192de/phy.c 					 skb_queue_len(&ring->queue));
ring             3138 drivers/net/wireless/realtek/rtlwifi/rtl8192de/phy.c 					 skb_queue_len(&ring->queue));
ring              830 drivers/net/wireless/realtek/rtlwifi/rtl8192de/trx.c 	struct rtl8192_tx_ring *ring = &rtlpci->tx_ring[hw_queue];
ring              831 drivers/net/wireless/realtek/rtlwifi/rtl8192de/trx.c 	u8 *entry = (u8 *)(&ring->desc[ring->idx]);
ring              147 drivers/net/wireless/realtek/rtlwifi/rtl8192ee/hw.c 	struct rtl8192_tx_ring *ring;
ring              164 drivers/net/wireless/realtek/rtlwifi/rtl8192ee/hw.c 		ring = &rtlpci->tx_ring[queue];
ring              165 drivers/net/wireless/realtek/rtlwifi/rtl8192ee/hw.c 		if (skb_queue_len(&ring->queue)) {
ring             3063 drivers/net/wireless/realtek/rtlwifi/rtl8192ee/phy.c 	struct rtl8192_tx_ring *ring = NULL;
ring             3095 drivers/net/wireless/realtek/rtlwifi/rtl8192ee/phy.c 			ring = &pcipriv->dev.tx_ring[queue_id];
ring             3097 drivers/net/wireless/realtek/rtlwifi/rtl8192ee/phy.c 			    skb_queue_len(&ring->queue) == 0) {
ring             3104 drivers/net/wireless/realtek/rtlwifi/rtl8192ee/phy.c 					 skb_queue_len(&ring->queue));
ring             3114 drivers/net/wireless/realtek/rtlwifi/rtl8192ee/phy.c 					  skb_queue_len(&ring->queue));
ring             3139 drivers/net/wireless/realtek/rtlwifi/rtl8192ee/phy.c 			ring = &pcipriv->dev.tx_ring[queue_id];
ring             3140 drivers/net/wireless/realtek/rtlwifi/rtl8192ee/phy.c 			if (skb_queue_len(&ring->queue) == 0) {
ring             3147 drivers/net/wireless/realtek/rtlwifi/rtl8192ee/phy.c 					 skb_queue_len(&ring->queue));
ring             3156 drivers/net/wireless/realtek/rtlwifi/rtl8192ee/phy.c 					  skb_queue_len(&ring->queue));
ring              909 drivers/net/wireless/realtek/rtlwifi/rtl8192ee/trx.c 			struct rtl8192_tx_ring *ring = &rtlpci->tx_ring[q_idx];
ring              910 drivers/net/wireless/realtek/rtlwifi/rtl8192ee/trx.c 			u16 max_tx_desc = ring->entries;
ring              913 drivers/net/wireless/realtek/rtlwifi/rtl8192ee/trx.c 				ring->cur_tx_wp = 0;
ring              914 drivers/net/wireless/realtek/rtlwifi/rtl8192ee/trx.c 				ring->cur_tx_rp = 0;
ring              920 drivers/net/wireless/realtek/rtlwifi/rtl8192ee/trx.c 			ring->cur_tx_wp = ((ring->cur_tx_wp + 1) % max_tx_desc);
ring              924 drivers/net/wireless/realtek/rtlwifi/rtl8192ee/trx.c 				       ring->cur_tx_wp);
ring             1010 drivers/net/wireless/realtek/rtlwifi/rtl8192ee/trx.c 	struct rtl8192_tx_ring *ring = &rtlpci->tx_ring[hw_queue];
ring             1023 drivers/net/wireless/realtek/rtlwifi/rtl8192ee/trx.c 		ring->cur_tx_rp = cur_tx_rp;
ring             1026 drivers/net/wireless/realtek/rtlwifi/rtl8192ee/trx.c 	read_point = ring->cur_tx_rp;
ring             1027 drivers/net/wireless/realtek/rtlwifi/rtl8192ee/trx.c 	write_point = ring->cur_tx_wp;
ring              114 drivers/net/wireless/realtek/rtlwifi/rtl8192se/fw.c 	struct rtl8192_tx_ring *ring;
ring              119 drivers/net/wireless/realtek/rtlwifi/rtl8192se/fw.c 	ring = &rtlpci->tx_ring[TXCMD_QUEUE];
ring              123 drivers/net/wireless/realtek/rtlwifi/rtl8192se/fw.c 	idx = (ring->idx + skb_queue_len(&ring->queue)) % ring->entries;
ring              124 drivers/net/wireless/realtek/rtlwifi/rtl8192se/fw.c 	pdesc = &ring->desc[idx];
ring              126 drivers/net/wireless/realtek/rtlwifi/rtl8192se/fw.c 	__skb_queue_tail(&ring->queue, skb);
ring              519 drivers/net/wireless/realtek/rtlwifi/rtl8192se/phy.c 	struct rtl8192_tx_ring *ring = NULL;
ring              583 drivers/net/wireless/realtek/rtlwifi/rtl8192se/phy.c 				ring = &pcipriv->dev.tx_ring[queue_id];
ring              584 drivers/net/wireless/realtek/rtlwifi/rtl8192se/phy.c 				if (skb_queue_len(&ring->queue) == 0 ||
ring              592 drivers/net/wireless/realtek/rtlwifi/rtl8192se/phy.c 						 skb_queue_len(&ring->queue));
ring              603 drivers/net/wireless/realtek/rtlwifi/rtl8192se/phy.c 						 skb_queue_len(&ring->queue));
ring              218 drivers/net/wireless/realtek/rtlwifi/rtl8192se/sw.c 	struct rtl8192_tx_ring *ring = &rtlpci->tx_ring[hw_queue];
ring              219 drivers/net/wireless/realtek/rtlwifi/rtl8192se/sw.c 	u8 *entry = (u8 *)(&ring->desc[ring->idx]);
ring             1574 drivers/net/wireless/realtek/rtlwifi/rtl8723ae/phy.c 	struct rtl8192_tx_ring *ring = NULL;
ring             1629 drivers/net/wireless/realtek/rtlwifi/rtl8723ae/phy.c 			ring = &pcipriv->dev.tx_ring[queue_id];
ring             1631 drivers/net/wireless/realtek/rtlwifi/rtl8723ae/phy.c 			    skb_queue_len(&ring->queue) == 0) {
ring             1638 drivers/net/wireless/realtek/rtlwifi/rtl8723ae/phy.c 					 skb_queue_len(&ring->queue));
ring             1648 drivers/net/wireless/realtek/rtlwifi/rtl8723ae/phy.c 					  skb_queue_len(&ring->queue));
ring              672 drivers/net/wireless/realtek/rtlwifi/rtl8723ae/trx.c 	struct rtl8192_tx_ring *ring = &rtlpci->tx_ring[hw_queue];
ring              673 drivers/net/wireless/realtek/rtlwifi/rtl8723ae/trx.c 	u8 *entry = (u8 *)(&ring->desc[ring->idx]);
ring               32 drivers/net/wireless/realtek/rtlwifi/rtl8723be/hw.c 	struct rtl8192_tx_ring *ring = &rtlpci->tx_ring[BEACON_QUEUE];
ring               36 drivers/net/wireless/realtek/rtlwifi/rtl8723be/hw.c 	while (skb_queue_len(&ring->queue)) {
ring               37 drivers/net/wireless/realtek/rtlwifi/rtl8723be/hw.c 		struct rtl_tx_desc *entry = &ring->desc[ring->idx];
ring               38 drivers/net/wireless/realtek/rtlwifi/rtl8723be/hw.c 		struct sk_buff *skb = __skb_dequeue(&ring->queue);
ring               46 drivers/net/wireless/realtek/rtlwifi/rtl8723be/hw.c 		ring->idx = (ring->idx + 1) % ring->entries;
ring              173 drivers/net/wireless/realtek/rtlwifi/rtl8723be/hw.c 	struct rtl8192_tx_ring *ring;
ring              189 drivers/net/wireless/realtek/rtlwifi/rtl8723be/hw.c 		ring = &rtlpci->tx_ring[queue];
ring              190 drivers/net/wireless/realtek/rtlwifi/rtl8723be/hw.c 		if (skb_queue_len(&ring->queue)) {
ring             2521 drivers/net/wireless/realtek/rtlwifi/rtl8723be/phy.c 	struct rtl8192_tx_ring *ring = NULL;
ring             2554 drivers/net/wireless/realtek/rtlwifi/rtl8723be/phy.c 			ring = &pcipriv->dev.tx_ring[queue_id];
ring             2560 drivers/net/wireless/realtek/rtlwifi/rtl8723be/phy.c 			    skb_queue_len(&ring->queue) == 0) {
ring             2567 drivers/net/wireless/realtek/rtlwifi/rtl8723be/phy.c 					 skb_queue_len(&ring->queue));
ring             2577 drivers/net/wireless/realtek/rtlwifi/rtl8723be/phy.c 					  skb_queue_len(&ring->queue));
ring             2603 drivers/net/wireless/realtek/rtlwifi/rtl8723be/phy.c 			ring = &pcipriv->dev.tx_ring[queue_id];
ring             2604 drivers/net/wireless/realtek/rtlwifi/rtl8723be/phy.c 			if (skb_queue_len(&ring->queue) == 0) {
ring             2611 drivers/net/wireless/realtek/rtlwifi/rtl8723be/phy.c 					 skb_queue_len(&ring->queue));
ring             2621 drivers/net/wireless/realtek/rtlwifi/rtl8723be/phy.c 					 skb_queue_len(&ring->queue));
ring              725 drivers/net/wireless/realtek/rtlwifi/rtl8723be/trx.c 	struct rtl8192_tx_ring *ring = &rtlpci->tx_ring[hw_queue];
ring              726 drivers/net/wireless/realtek/rtlwifi/rtl8723be/trx.c 	u8 *entry = (u8 *)(&ring->desc[ring->idx]);
ring              223 drivers/net/wireless/realtek/rtlwifi/rtl8723com/fw_common.c 	struct rtl8192_tx_ring *ring;
ring              229 drivers/net/wireless/realtek/rtlwifi/rtl8723com/fw_common.c 	ring = &rtlpci->tx_ring[BEACON_QUEUE];
ring              231 drivers/net/wireless/realtek/rtlwifi/rtl8723com/fw_common.c 	pskb = __skb_dequeue(&ring->queue);
ring              235 drivers/net/wireless/realtek/rtlwifi/rtl8723com/fw_common.c 	pdesc = &ring->desc[0];
ring              241 drivers/net/wireless/realtek/rtlwifi/rtl8723com/fw_common.c 	__skb_queue_tail(&ring->queue, skb);
ring               28 drivers/net/wireless/realtek/rtlwifi/rtl8821ae/hw.c 	struct rtl8192_tx_ring *ring = &rtlpci->tx_ring[BEACON_QUEUE];
ring               32 drivers/net/wireless/realtek/rtlwifi/rtl8821ae/hw.c 	while (skb_queue_len(&ring->queue)) {
ring               33 drivers/net/wireless/realtek/rtlwifi/rtl8821ae/hw.c 		struct rtl_tx_desc *entry = &ring->desc[ring->idx];
ring               34 drivers/net/wireless/realtek/rtlwifi/rtl8821ae/hw.c 		struct sk_buff *skb = __skb_dequeue(&ring->queue);
ring               42 drivers/net/wireless/realtek/rtlwifi/rtl8821ae/hw.c 		ring->idx = (ring->idx + 1) % ring->entries;
ring              171 drivers/net/wireless/realtek/rtlwifi/rtl8821ae/hw.c 	struct rtl8192_tx_ring *ring;
ring              187 drivers/net/wireless/realtek/rtlwifi/rtl8821ae/hw.c 		ring = &rtlpci->tx_ring[queue];
ring              188 drivers/net/wireless/realtek/rtlwifi/rtl8821ae/hw.c 		if (skb_queue_len(&ring->queue)) {
ring             4732 drivers/net/wireless/realtek/rtlwifi/rtl8821ae/phy.c 	struct rtl8192_tx_ring *ring = NULL;
ring             4769 drivers/net/wireless/realtek/rtlwifi/rtl8821ae/phy.c 			ring = &pcipriv->dev.tx_ring[queue_id];
ring             4771 drivers/net/wireless/realtek/rtlwifi/rtl8821ae/phy.c 			    skb_queue_len(&ring->queue) == 0) {
ring             4778 drivers/net/wireless/realtek/rtlwifi/rtl8821ae/phy.c 					 skb_queue_len(&ring->queue));
ring             4788 drivers/net/wireless/realtek/rtlwifi/rtl8821ae/phy.c 					  skb_queue_len(&ring->queue));
ring              965 drivers/net/wireless/realtek/rtlwifi/rtl8821ae/trx.c 	struct rtl8192_tx_ring *ring = &rtlpci->tx_ring[hw_queue];
ring              966 drivers/net/wireless/realtek/rtlwifi/rtl8821ae/trx.c 	u8 *entry = (u8 *)(&ring->desc[ring->idx]);
ring              566 drivers/net/wireless/realtek/rtw88/pci.c 				      struct rtw_pci_tx_ring *ring)
ring              568 drivers/net/wireless/realtek/rtw88/pci.c 	struct sk_buff *prev = skb_dequeue(&ring->queue);
ring              609 drivers/net/wireless/realtek/rtw88/pci.c 	struct rtw_pci_tx_ring *ring;
ring              620 drivers/net/wireless/realtek/rtw88/pci.c 	ring = &rtwpci->tx_rings[queue];
ring              625 drivers/net/wireless/realtek/rtw88/pci.c 		rtw_pci_release_rsvd_page(rtwpci, ring);
ring              626 drivers/net/wireless/realtek/rtw88/pci.c 	else if (!avail_desc(ring->r.wp, ring->r.rp, ring->r.len))
ring              639 drivers/net/wireless/realtek/rtw88/pci.c 	buf_desc = get_tx_buffer_desc(ring, tx_buf_desc_sz);
ring              654 drivers/net/wireless/realtek/rtw88/pci.c 	skb_queue_tail(&ring->queue, skb);
ring              658 drivers/net/wireless/realtek/rtw88/pci.c 		if (++ring->r.wp >= ring->r.len)
ring              659 drivers/net/wireless/realtek/rtw88/pci.c 			ring->r.wp = 0;
ring              661 drivers/net/wireless/realtek/rtw88/pci.c 		rtw_write16(rtwdev, bd_idx, ring->r.wp & 0xfff);
ring              722 drivers/net/wireless/realtek/rtw88/pci.c 	struct rtw_pci_tx_ring *ring;
ring              730 drivers/net/wireless/realtek/rtw88/pci.c 	ring = &rtwpci->tx_rings[queue];
ring              731 drivers/net/wireless/realtek/rtw88/pci.c 	if (avail_desc(ring->r.wp, ring->r.rp, ring->r.len) < 2) {
ring              733 drivers/net/wireless/realtek/rtw88/pci.c 		ring->queue_stopped = true;
ring              744 drivers/net/wireless/realtek/rtw88/pci.c 	struct rtw_pci_tx_ring *ring;
ring              752 drivers/net/wireless/realtek/rtw88/pci.c 	ring = &rtwpci->tx_rings[hw_queue];
ring              758 drivers/net/wireless/realtek/rtw88/pci.c 	if (cur_rp >= ring->r.rp)
ring              759 drivers/net/wireless/realtek/rtw88/pci.c 		count = cur_rp - ring->r.rp;
ring              761 drivers/net/wireless/realtek/rtw88/pci.c 		count = ring->r.len - (ring->r.rp - cur_rp);
ring              764 drivers/net/wireless/realtek/rtw88/pci.c 		skb = skb_dequeue(&ring->queue);
ring              767 drivers/net/wireless/realtek/rtw88/pci.c 				count, hw_queue, bd_idx, ring->r.rp, cur_rp);
ring              780 drivers/net/wireless/realtek/rtw88/pci.c 		if (ring->queue_stopped &&
ring              781 drivers/net/wireless/realtek/rtw88/pci.c 		    avail_desc(ring->r.wp, ring->r.rp, ring->r.len) > 4) {
ring              784 drivers/net/wireless/realtek/rtw88/pci.c 			ring->queue_stopped = false;
ring              807 drivers/net/wireless/realtek/rtw88/pci.c 	ring->r.rp = cur_rp;
ring              814 drivers/net/wireless/realtek/rtw88/pci.c 	struct rtw_pci_rx_ring *ring;
ring              827 drivers/net/wireless/realtek/rtw88/pci.c 	ring = &rtwpci->rx_rings[RTW_RX_QUEUE_MPDU];
ring              832 drivers/net/wireless/realtek/rtw88/pci.c 	if (cur_wp >= ring->r.wp)
ring              833 drivers/net/wireless/realtek/rtw88/pci.c 		count = cur_wp - ring->r.wp;
ring              835 drivers/net/wireless/realtek/rtw88/pci.c 		count = ring->r.len - (ring->r.wp - cur_wp);
ring              837 drivers/net/wireless/realtek/rtw88/pci.c 	cur_rp = ring->r.rp;
ring              839 drivers/net/wireless/realtek/rtw88/pci.c 		rtw_pci_dma_check(rtwdev, ring, cur_rp);
ring              840 drivers/net/wireless/realtek/rtw88/pci.c 		skb = ring->buf[cur_rp];
ring              875 drivers/net/wireless/realtek/rtw88/pci.c 		rtw_pci_sync_rx_desc_device(rtwdev, dma, ring, cur_rp,
ring              879 drivers/net/wireless/realtek/rtw88/pci.c 		if (++cur_rp >= ring->r.len)
ring              883 drivers/net/wireless/realtek/rtw88/pci.c 	ring->r.rp = cur_rp;
ring              884 drivers/net/wireless/realtek/rtw88/pci.c 	ring->r.wp = cur_wp;
ring              885 drivers/net/wireless/realtek/rtw88/pci.c 	rtw_write16(rtwdev, RTK_PCI_RXBD_IDX_MPDUQ, ring->r.rp);
ring              228 drivers/net/wireless/realtek/rtw88/pci.h struct rtw_pci_tx_buffer_desc *get_tx_buffer_desc(struct rtw_pci_tx_ring *ring,
ring              233 drivers/net/wireless/realtek/rtw88/pci.h 	buf_desc = ring->r.head + ring->r.wp * size;
ring              195 drivers/nvme/host/rdma.c 		struct nvme_rdma_qe *ring, size_t ib_queue_size,
ring              201 drivers/nvme/host/rdma.c 		nvme_rdma_free_qe(ibdev, &ring[i], capsule_size, dir);
ring              202 drivers/nvme/host/rdma.c 	kfree(ring);
ring              209 drivers/nvme/host/rdma.c 	struct nvme_rdma_qe *ring;
ring              212 drivers/nvme/host/rdma.c 	ring = kcalloc(ib_queue_size, sizeof(struct nvme_rdma_qe), GFP_KERNEL);
ring              213 drivers/nvme/host/rdma.c 	if (!ring)
ring              222 drivers/nvme/host/rdma.c 		if (nvme_rdma_alloc_qe(ibdev, &ring[i], capsule_size, dir))
ring              226 drivers/nvme/host/rdma.c 	return ring;
ring              229 drivers/nvme/host/rdma.c 	nvme_rdma_free_ring(ibdev, ring, i, capsule_size, dir);
ring              298 drivers/platform/mellanox/mlxbf-tmfifo.c 	head = virtio16_to_cpu(vdev, vr->avail->ring[idx]);
ring              317 drivers/platform/mellanox/mlxbf-tmfifo.c 	vr->used->ring[idx].id = cpu_to_virtio32(vdev, desc - vr->desc);
ring              318 drivers/platform/mellanox/mlxbf-tmfifo.c 	vr->used->ring[idx].len = cpu_to_virtio32(vdev, len);
ring              154 drivers/scsi/fnic/fnic_scsi.c 		wq->ring.desc_avail += (fnic->fw_ack_index[0]
ring              157 drivers/scsi/fnic/fnic_scsi.c 		wq->ring.desc_avail += (wq->ring.desc_count
ring              167 drivers/scsi/fnic/fnic_scsi.c 		(fnic->fw_ack_index[0] + 1) % wq->ring.desc_count;
ring               26 drivers/scsi/fnic/vnic_cq.c 	vnic_dev_free_desc_ring(cq->vdev, &cq->ring);
ring               45 drivers/scsi/fnic/vnic_cq.c 	err = vnic_dev_alloc_desc_ring(vdev, &cq->ring, desc_count, desc_size);
ring               60 drivers/scsi/fnic/vnic_cq.c 	paddr = (u64)cq->ring.base_addr | VNIC_PADDR_TARGET;
ring               62 drivers/scsi/fnic/vnic_cq.c 	iowrite32(cq->ring.desc_count, &cq->ctrl->ring_size);
ring               84 drivers/scsi/fnic/vnic_cq.c 	vnic_dev_clear_desc_ring(&cq->ring);
ring               65 drivers/scsi/fnic/vnic_cq.h 	struct vnic_dev_ring ring;
ring               81 drivers/scsi/fnic/vnic_cq.h 	cq_desc = (struct cq_desc *)((u8 *)cq->ring.descs +
ring               82 drivers/scsi/fnic/vnic_cq.h 		cq->ring.desc_size * cq->to_clean);
ring               93 drivers/scsi/fnic/vnic_cq.h 		if (cq->to_clean == cq->ring.desc_count) {
ring               98 drivers/scsi/fnic/vnic_cq.h 		cq_desc = (struct cq_desc *)((u8 *)cq->ring.descs +
ring               99 drivers/scsi/fnic/vnic_cq.h 			cq->ring.desc_size * cq->to_clean);
ring               35 drivers/scsi/fnic/vnic_cq_copy.h 	desc = (struct fcpio_fw_req *)((u8 *)cq->ring.descs +
ring               36 drivers/scsi/fnic/vnic_cq_copy.h 		cq->ring.desc_size * cq->to_clean);
ring               45 drivers/scsi/fnic/vnic_cq_copy.h 		if (cq->to_clean == cq->ring.desc_count) {
ring               50 drivers/scsi/fnic/vnic_cq_copy.h 		desc = (struct fcpio_fw_req *)((u8 *)cq->ring.descs +
ring               51 drivers/scsi/fnic/vnic_cq_copy.h 			cq->ring.desc_size * cq->to_clean);
ring              185 drivers/scsi/fnic/vnic_dev.c unsigned int vnic_dev_desc_ring_size(struct vnic_dev_ring *ring,
ring              198 drivers/scsi/fnic/vnic_dev.c 	ring->base_align = 512;
ring              203 drivers/scsi/fnic/vnic_dev.c 	ring->desc_count = ALIGN(desc_count, count_align);
ring              205 drivers/scsi/fnic/vnic_dev.c 	ring->desc_size = ALIGN(desc_size, desc_align);
ring              207 drivers/scsi/fnic/vnic_dev.c 	ring->size = ring->desc_count * ring->desc_size;
ring              208 drivers/scsi/fnic/vnic_dev.c 	ring->size_unaligned = ring->size + ring->base_align;
ring              210 drivers/scsi/fnic/vnic_dev.c 	return ring->size_unaligned;
ring              213 drivers/scsi/fnic/vnic_dev.c void vnic_dev_clear_desc_ring(struct vnic_dev_ring *ring)
ring              215 drivers/scsi/fnic/vnic_dev.c 	memset(ring->descs, 0, ring->size);
ring              218 drivers/scsi/fnic/vnic_dev.c int vnic_dev_alloc_desc_ring(struct vnic_dev *vdev, struct vnic_dev_ring *ring,
ring              221 drivers/scsi/fnic/vnic_dev.c 	vnic_dev_desc_ring_size(ring, desc_count, desc_size);
ring              223 drivers/scsi/fnic/vnic_dev.c 	ring->descs_unaligned = dma_alloc_coherent(&vdev->pdev->dev,
ring              224 drivers/scsi/fnic/vnic_dev.c 		ring->size_unaligned,
ring              225 drivers/scsi/fnic/vnic_dev.c 		&ring->base_addr_unaligned, GFP_KERNEL);
ring              227 drivers/scsi/fnic/vnic_dev.c 	if (!ring->descs_unaligned) {
ring              230 drivers/scsi/fnic/vnic_dev.c 			(int)ring->size);
ring              234 drivers/scsi/fnic/vnic_dev.c 	ring->base_addr = ALIGN(ring->base_addr_unaligned,
ring              235 drivers/scsi/fnic/vnic_dev.c 		ring->base_align);
ring              236 drivers/scsi/fnic/vnic_dev.c 	ring->descs = (u8 *)ring->descs_unaligned +
ring              237 drivers/scsi/fnic/vnic_dev.c 		(ring->base_addr - ring->base_addr_unaligned);
ring              239 drivers/scsi/fnic/vnic_dev.c 	vnic_dev_clear_desc_ring(ring);
ring              241 drivers/scsi/fnic/vnic_dev.c 	ring->desc_avail = ring->desc_count - 1;
ring              246 drivers/scsi/fnic/vnic_dev.c void vnic_dev_free_desc_ring(struct vnic_dev *vdev, struct vnic_dev_ring *ring)
ring              248 drivers/scsi/fnic/vnic_dev.c 	if (ring->descs) {
ring              250 drivers/scsi/fnic/vnic_dev.c 			ring->size_unaligned,
ring              251 drivers/scsi/fnic/vnic_dev.c 			ring->descs_unaligned,
ring              252 drivers/scsi/fnic/vnic_dev.c 			ring->base_addr_unaligned);
ring              253 drivers/scsi/fnic/vnic_dev.c 		ring->descs = NULL;
ring              468 drivers/scsi/fnic/vnic_dev.c 		(struct vnic_devcmd2 *) vdev->devcmd2->wq.ring.descs;
ring              117 drivers/scsi/fnic/vnic_dev.h unsigned int vnic_dev_desc_ring_size(struct vnic_dev_ring *ring,
ring              120 drivers/scsi/fnic/vnic_dev.h void vnic_dev_clear_desc_ring(struct vnic_dev_ring *ring);
ring              121 drivers/scsi/fnic/vnic_dev.h int vnic_dev_alloc_desc_ring(struct vnic_dev *vdev, struct vnic_dev_ring *ring,
ring              124 drivers/scsi/fnic/vnic_dev.h 			     struct vnic_dev_ring *ring);
ring               30 drivers/scsi/fnic/vnic_rq.c 	unsigned int i, j, count = rq->ring.desc_count;
ring               45 drivers/scsi/fnic/vnic_rq.c 			buf->desc = (u8 *)rq->ring.descs +
ring               46 drivers/scsi/fnic/vnic_rq.c 				rq->ring.desc_size * buf->index;
ring               72 drivers/scsi/fnic/vnic_rq.c 	vnic_dev_free_desc_ring(vdev, &rq->ring);
ring               98 drivers/scsi/fnic/vnic_rq.c 	err = vnic_dev_alloc_desc_ring(vdev, &rq->ring, desc_count, desc_size);
ring              118 drivers/scsi/fnic/vnic_rq.c 	paddr = (u64)rq->ring.base_addr | VNIC_PADDR_TARGET;
ring              120 drivers/scsi/fnic/vnic_rq.c 	iowrite32(rq->ring.desc_count, &rq->ctrl->ring_size);
ring              180 drivers/scsi/fnic/vnic_rq.c 		rq->ring.desc_avail++;
ring              192 drivers/scsi/fnic/vnic_rq.c 	vnic_dev_clear_desc_ring(&rq->ring);
ring               96 drivers/scsi/fnic/vnic_rq.h 	struct vnic_dev_ring ring;
ring              108 drivers/scsi/fnic/vnic_rq.h 	return rq->ring.desc_avail;
ring              114 drivers/scsi/fnic/vnic_rq.h 	return rq->ring.desc_count - rq->ring.desc_avail - 1;
ring              145 drivers/scsi/fnic/vnic_rq.h 	rq->ring.desc_avail--;
ring              172 drivers/scsi/fnic/vnic_rq.h 	rq->ring.desc_avail += count;
ring              197 drivers/scsi/fnic/vnic_rq.h 			rq->ring.desc_avail++;
ring               43 drivers/scsi/fnic/vnic_wq.c 	return vnic_dev_alloc_desc_ring(vdev, &wq->ring, desc_count, desc_size);
ring               50 drivers/scsi/fnic/vnic_wq.c 	unsigned int i, j, count = wq->ring.desc_count;
ring               65 drivers/scsi/fnic/vnic_wq.c 			buf->desc = (u8 *)wq->ring.descs +
ring               66 drivers/scsi/fnic/vnic_wq.c 				wq->ring.desc_size * buf->index;
ring               91 drivers/scsi/fnic/vnic_wq.c 	vnic_dev_free_desc_ring(vdev, &wq->ring);
ring              118 drivers/scsi/fnic/vnic_wq.c 	err = vnic_dev_alloc_desc_ring(vdev, &wq->ring, desc_count, desc_size);
ring              159 drivers/scsi/fnic/vnic_wq.c 	unsigned int count = wq->ring.desc_count;
ring              161 drivers/scsi/fnic/vnic_wq.c 	paddr = (u64)wq->ring.base_addr | VNIC_PADDR_TARGET;
ring              183 drivers/scsi/fnic/vnic_wq.c 	paddr = (u64)wq->ring.base_addr | VNIC_PADDR_TARGET;
ring              185 drivers/scsi/fnic/vnic_wq.c 	iowrite32(wq->ring.desc_count, &wq->ctrl->ring_size);
ring              236 drivers/scsi/fnic/vnic_wq.c 		wq->ring.desc_avail++;
ring              245 drivers/scsi/fnic/vnic_wq.c 	vnic_dev_clear_desc_ring(&wq->ring);
ring               91 drivers/scsi/fnic/vnic_wq.h 	struct vnic_dev_ring ring;
ring              101 drivers/scsi/fnic/vnic_wq.h 	return wq->ring.desc_avail;
ring              107 drivers/scsi/fnic/vnic_wq.h 	return wq->ring.desc_count - wq->ring.desc_avail - 1;
ring              138 drivers/scsi/fnic/vnic_wq.h 	wq->ring.desc_avail--;
ring              154 drivers/scsi/fnic/vnic_wq.h 		wq->ring.desc_avail++;
ring               66 drivers/scsi/fnic/vnic_wq_copy.c 	vnic_dev_clear_desc_ring(&wq->ring);
ring               74 drivers/scsi/fnic/vnic_wq_copy.c 	vnic_dev_free_desc_ring(vdev, &wq->ring);
ring               95 drivers/scsi/fnic/vnic_wq_copy.c 	err = vnic_dev_alloc_desc_ring(vdev, &wq->ring, desc_count, desc_size);
ring              108 drivers/scsi/fnic/vnic_wq_copy.c 	paddr = (u64)wq->ring.base_addr | VNIC_PADDR_TARGET;
ring              110 drivers/scsi/fnic/vnic_wq_copy.c 	iowrite32(wq->ring.desc_count, &wq->ctrl->ring_size);
ring               31 drivers/scsi/fnic/vnic_wq_copy.h 	struct vnic_dev_ring ring;
ring               38 drivers/scsi/fnic/vnic_wq_copy.h 	return wq->ring.desc_avail;
ring               43 drivers/scsi/fnic/vnic_wq_copy.h 	return wq->ring.desc_count - 1 - wq->ring.desc_avail;
ring               48 drivers/scsi/fnic/vnic_wq_copy.h 	struct fcpio_host_req *desc = wq->ring.descs;
ring               55 drivers/scsi/fnic/vnic_wq_copy.h 	((wq->to_use_index + 1) == wq->ring.desc_count) ?
ring               57 drivers/scsi/fnic/vnic_wq_copy.h 	wq->ring.desc_avail--;
ring               76 drivers/scsi/fnic/vnic_wq_copy.h 		cnt = wq->ring.desc_count - wq->to_clean_index + index + 1;
ring               78 drivers/scsi/fnic/vnic_wq_copy.h 	wq->to_clean_index = ((index + 1) % wq->ring.desc_count);
ring               79 drivers/scsi/fnic/vnic_wq_copy.h 	wq->ring.desc_avail += cnt;
ring               88 drivers/scsi/fnic/vnic_wq_copy.h 	struct fcpio_host_req *wq_desc = wq->ring.descs;
ring               96 drivers/scsi/fnic/vnic_wq_copy.h 		wq->ring.desc_avail++;
ring              103 drivers/scsi/fnic/vnic_wq_copy.h 		((wq->to_clean_index + 1) == wq->ring.desc_count) ?
ring               25 drivers/scsi/ibmvscsi_tgt/libsrp.c 			     struct srp_buf **ring)
ring               42 drivers/scsi/ibmvscsi_tgt/libsrp.c 		iue->sbuf = ring[i];
ring               61 drivers/scsi/ibmvscsi_tgt/libsrp.c 	struct srp_buf **ring;
ring               64 drivers/scsi/ibmvscsi_tgt/libsrp.c 	ring = kcalloc(max, sizeof(struct srp_buf *), GFP_KERNEL);
ring               65 drivers/scsi/ibmvscsi_tgt/libsrp.c 	if (!ring)
ring               69 drivers/scsi/ibmvscsi_tgt/libsrp.c 		ring[i] = kzalloc(sizeof(*ring[i]), GFP_KERNEL);
ring               70 drivers/scsi/ibmvscsi_tgt/libsrp.c 		if (!ring[i])
ring               72 drivers/scsi/ibmvscsi_tgt/libsrp.c 		ring[i]->buf = dma_alloc_coherent(dev, size, &ring[i]->dma,
ring               74 drivers/scsi/ibmvscsi_tgt/libsrp.c 		if (!ring[i]->buf)
ring               77 drivers/scsi/ibmvscsi_tgt/libsrp.c 	return ring;
ring               80 drivers/scsi/ibmvscsi_tgt/libsrp.c 	for (i = 0; i < max && ring[i]; i++) {
ring               81 drivers/scsi/ibmvscsi_tgt/libsrp.c 		if (ring[i]->buf) {
ring               82 drivers/scsi/ibmvscsi_tgt/libsrp.c 			dma_free_coherent(dev, size, ring[i]->buf,
ring               83 drivers/scsi/ibmvscsi_tgt/libsrp.c 					  ring[i]->dma);
ring               85 drivers/scsi/ibmvscsi_tgt/libsrp.c 		kfree(ring[i]);
ring               87 drivers/scsi/ibmvscsi_tgt/libsrp.c 	kfree(ring);
ring               92 drivers/scsi/ibmvscsi_tgt/libsrp.c static void srp_ring_free(struct device *dev, struct srp_buf **ring,
ring               98 drivers/scsi/ibmvscsi_tgt/libsrp.c 		dma_free_coherent(dev, size, ring[i]->buf, ring[i]->dma);
ring               99 drivers/scsi/ibmvscsi_tgt/libsrp.c 		kfree(ring[i]);
ring              101 drivers/scsi/ibmvscsi_tgt/libsrp.c 	kfree(ring);
ring             2380 drivers/scsi/lpfc/lpfc_hw.h 	uint32_t ring:4;
ring             2382 drivers/scsi/lpfc/lpfc_hw.h 	uint32_t ring:4;
ring             2488 drivers/scsi/lpfc/lpfc_hw.h 	uint32_t ring:4;
ring             2490 drivers/scsi/lpfc/lpfc_hw.h 	uint32_t ring:4;
ring             3486 drivers/scsi/lpfc/lpfc_hw.h 	uint32_t ring:2;	/* Ring for ASYNC_EVENT iocb Bits 0-1*/
ring             3488 drivers/scsi/lpfc/lpfc_hw.h 	uint32_t ring:2;	/* Ring for ASYNC_EVENT iocb Bits 0-1*/
ring              231 drivers/scsi/lpfc/lpfc_mbox.c 		uint32_t ring)
ring              238 drivers/scsi/lpfc/lpfc_mbox.c 	mb->un.varCfgAsyncEvent.ring = ring;
ring             1216 drivers/scsi/lpfc/lpfc_mbox.c lpfc_config_ring(struct lpfc_hba * phba, int ring, LPFC_MBOXQ_t * pmb)
ring             1225 drivers/scsi/lpfc/lpfc_mbox.c 	mb->un.varCfgRing.ring = ring;
ring             1231 drivers/scsi/lpfc/lpfc_mbox.c 	pring = &psli->sli3_ring[ring];
ring               99 drivers/scsi/qla2xxx/qla_dbg.c 	memcpy(ptr, req->ring, req->length *
ring              104 drivers/scsi/qla2xxx/qla_dbg.c 	memcpy(ptr, rsp->ring, rsp->length  *
ring              553 drivers/scsi/qla2xxx/qla_dbg.c 		void *ring;
ring              562 drivers/scsi/qla2xxx/qla_dbg.c 	aqp->ring = ha->tgt.atio_ring;
ring              583 drivers/scsi/qla2xxx/qla_dbg.c 		memcpy(ptr, aqp->ring, aqp->length * sizeof(request_t));
ring              627 drivers/scsi/qla2xxx/qla_dbg.c 		memcpy(ptr, req->ring, req->length * sizeof(request_t));
ring              655 drivers/scsi/qla2xxx/qla_dbg.c 		memcpy(ptr, rsp->ring, rsp->length * sizeof(response_t));
ring             3437 drivers/scsi/qla2xxx/qla_def.h 	response_t *ring;
ring             3464 drivers/scsi/qla2xxx/qla_def.h 	request_t *ring;
ring             2241 drivers/scsi/qla2xxx/qla_init.c 	ha->isp_ops->get_flash_version(vha, req->ring);
ring             3757 drivers/scsi/qla2xxx/qla_init.c 	rsp->ring_ptr = rsp->ring;
ring             4063 drivers/scsi/qla2xxx/qla_init.c 		req->out_ptr = (void *)(req->ring + req->length);
ring             4071 drivers/scsi/qla2xxx/qla_init.c 		req->ring_ptr  = req->ring;
ring             4080 drivers/scsi/qla2xxx/qla_init.c 		rsp->in_ptr = (void *)(rsp->ring + rsp->length);
ring             6795 drivers/scsi/qla2xxx/qla_init.c 		ha->isp_ops->get_flash_version(vha, req->ring);
ring             7642 drivers/scsi/qla2xxx/qla_init.c 	dcode = (void *)req->ring;
ring             7655 drivers/scsi/qla2xxx/qla_init.c 	dcode = (void *)req->ring;
ring             7706 drivers/scsi/qla2xxx/qla_init.c 		dcode = (void *)req->ring;
ring             7795 drivers/scsi/qla2xxx/qla_init.c 	wcode = (uint16_t *)req->ring;
ring             7910 drivers/scsi/qla2xxx/qla_init.c 	dcode = (void *)req->ring;
ring              304 drivers/scsi/qla2xxx/qla_inline.h 		req->ring_ptr = req->ring;
ring              102 drivers/scsi/qla2xxx/qla_iocb.c 		req->ring_ptr = req->ring;
ring              131 drivers/scsi/qla2xxx/qla_iocb.c 		req->ring_ptr = req->ring;
ring              424 drivers/scsi/qla2xxx/qla_iocb.c 		req->ring_ptr = req->ring;
ring              469 drivers/scsi/qla2xxx/qla_iocb.c 			req->ring_ptr = req->ring;
ring             1694 drivers/scsi/qla2xxx/qla_iocb.c 		req->ring_ptr = req->ring;
ring             1879 drivers/scsi/qla2xxx/qla_iocb.c 		req->ring_ptr = req->ring;
ring             2014 drivers/scsi/qla2xxx/qla_iocb.c 		req->ring_ptr = req->ring;
ring             2212 drivers/scsi/qla2xxx/qla_iocb.c 		req->ring_ptr = req->ring;
ring             3356 drivers/scsi/qla2xxx/qla_iocb.c 		req->ring_ptr = req->ring;
ring             2059 drivers/scsi/qla2xxx/qla_isr.c 			rsp->ring_ptr = rsp->ring;
ring             3008 drivers/scsi/qla2xxx/qla_isr.c 			rsp->ring_ptr = rsp->ring;
ring              561 drivers/scsi/qla2xxx/qla_mid.c 		sizeof(request_t), req->ring, req->dma);
ring              562 drivers/scsi/qla2xxx/qla_mid.c 	req->ring = NULL;
ring              588 drivers/scsi/qla2xxx/qla_mid.c 		sizeof(response_t), rsp->ring, rsp->dma);
ring              589 drivers/scsi/qla2xxx/qla_mid.c 	rsp->ring = NULL;
ring              702 drivers/scsi/qla2xxx/qla_mid.c 	req->ring = dma_alloc_coherent(&ha->pdev->dev,
ring              705 drivers/scsi/qla2xxx/qla_mid.c 	if (req->ring == NULL) {
ring              755 drivers/scsi/qla2xxx/qla_mid.c 	req->ring_ptr = req->ring;
ring              763 drivers/scsi/qla2xxx/qla_mid.c 	req->out_ptr = (void *)(req->ring + req->length);
ring              831 drivers/scsi/qla2xxx/qla_mid.c 	rsp->ring = dma_alloc_coherent(&ha->pdev->dev,
ring              834 drivers/scsi/qla2xxx/qla_mid.c 	if (rsp->ring == NULL) {
ring              877 drivers/scsi/qla2xxx/qla_mid.c 	rsp->in_ptr = (void *)(rsp->ring + rsp->length);
ring              841 drivers/scsi/qla2xxx/qla_mr.c 	req->ring_fx00 = req->ring;
ring              845 drivers/scsi/qla2xxx/qla_mr.c 	rsp->ring_fx00 = rsp->ring;
ring              868 drivers/scsi/qla2xxx/qla_mr.c 	req->ring = (void __force *)ha->iobase + ha->req_que_off;
ring              870 drivers/scsi/qla2xxx/qla_mr.c 	if ((!req->ring) || (req->length == 0)) {
ring              879 drivers/scsi/qla2xxx/qla_mr.c 	    req, req->ring, req->length,
ring              883 drivers/scsi/qla2xxx/qla_mr.c 	rsp->ring = (void __force *)ha->iobase + ha->rsp_que_off;
ring              885 drivers/scsi/qla2xxx/qla_mr.c 	if ((!rsp->ring) || (rsp->length == 0)) {
ring              894 drivers/scsi/qla2xxx/qla_mr.c 	    rsp, rsp->ring, rsp->length,
ring             1426 drivers/scsi/qla2xxx/qla_mr.c 	rsp->ring_ptr = rsp->ring;
ring             2739 drivers/scsi/qla2xxx/qla_mr.c 			rsp->ring_ptr = rsp->ring;
ring             2980 drivers/scsi/qla2xxx/qla_mr.c 		req->ring_ptr = req->ring;
ring             3175 drivers/scsi/qla2xxx/qla_mr.c 		req->ring_ptr = req->ring;
ring              487 drivers/scsi/qla2xxx/qla_nvme.c 				req->ring_ptr = req->ring;
ring              511 drivers/scsi/qla2xxx/qla_nvme.c 		req->ring_ptr = req->ring;
ring             2811 drivers/scsi/qla2xxx/qla_nx.c 		req->ring_ptr = req->ring;
ring             3060 drivers/scsi/qla2xxx/qla_nx.c 		ha->isp_ops->get_flash_version(vha, req->ring);
ring             1658 drivers/scsi/qla2xxx/qla_nx2.c 		ha->isp_ops->get_flash_version(vha, vha->req->ring);
ring              459 drivers/scsi/qla2xxx/qla_os.c 	} else if (req && req->ring)
ring              462 drivers/scsi/qla2xxx/qla_os.c 		req->ring, req->dma);
ring              477 drivers/scsi/qla2xxx/qla_os.c 	} else if (rsp && rsp->ring) {
ring              480 drivers/scsi/qla2xxx/qla_os.c 		rsp->ring, rsp->dma);
ring             4157 drivers/scsi/qla2xxx/qla_os.c 	(*req)->ring = dma_alloc_coherent(&ha->pdev->dev,
ring             4160 drivers/scsi/qla2xxx/qla_os.c 	if (!(*req)->ring) {
ring             4174 drivers/scsi/qla2xxx/qla_os.c 	(*rsp)->ring = dma_alloc_coherent(&ha->pdev->dev,
ring             4177 drivers/scsi/qla2xxx/qla_os.c 	if (!(*rsp)->ring) {
ring             4187 drivers/scsi/qla2xxx/qla_os.c 	    *req, (*req)->length, (*req)->ring, *rsp, (*rsp)->length,
ring             4188 drivers/scsi/qla2xxx/qla_os.c 	    (*rsp)->ring);
ring             4271 drivers/scsi/qla2xxx/qla_os.c 		sizeof(response_t), (*rsp)->ring, (*rsp)->dma);
ring             4272 drivers/scsi/qla2xxx/qla_os.c 	(*rsp)->ring = NULL;
ring             4279 drivers/scsi/qla2xxx/qla_os.c 		sizeof(request_t), (*req)->ring, (*req)->dma);
ring             4280 drivers/scsi/qla2xxx/qla_os.c 	(*req)->ring = NULL;
ring              555 drivers/scsi/qla2xxx/qla_sup.c 	struct qla_flt_location *fltl = (void *)req->ring;
ring              556 drivers/scsi/qla2xxx/qla_sup.c 	uint32_t *dcode = (void *)req->ring;
ring              557 drivers/scsi/qla2xxx/qla_sup.c 	uint8_t *buf = (void *)req->ring, *bcode,  last_image;
ring              613 drivers/scsi/qla2xxx/qla_sup.c 	wptr = (void *)req->ring;
ring              952 drivers/scsi/qla2xxx/qla_sup.c 	uint16_t *wptr = (void *)req->ring;
ring              953 drivers/scsi/qla2xxx/qla_sup.c 	struct qla_fdt_layout *fdt = (struct qla_fdt_layout *)req->ring;
ring             1052 drivers/scsi/qla2xxx/qla_sup.c 	wptr = (uint32_t *)req->ring;
ring             1053 drivers/scsi/qla2xxx/qla_sup.c 	ha->isp_ops->read_optrom(vha, req->ring, QLA82XX_IDC_PARAM_ADDR, 8);
ring             2504 drivers/scsi/qla2xxx/qla_target.c 		req->ring_ptr = req->ring;
ring              338 drivers/scsi/qla2xxx/qla_tmpl.c 				qla27xx_insertbuf(req ? req->ring : NULL,
ring              339 drivers/scsi/qla2xxx/qla_tmpl.c 				    length * sizeof(*req->ring), buf, len);
ring              352 drivers/scsi/qla2xxx/qla_tmpl.c 				qla27xx_insertbuf(rsp ? rsp->ring : NULL,
ring              353 drivers/scsi/qla2xxx/qla_tmpl.c 				    length * sizeof(*rsp->ring), buf, len);
ring               26 drivers/scsi/snic/vnic_cq.c 	svnic_dev_free_desc_ring(cq->vdev, &cq->ring);
ring               46 drivers/scsi/snic/vnic_cq.c 	err = svnic_dev_alloc_desc_ring(vdev, &cq->ring, desc_count, desc_size);
ring               61 drivers/scsi/snic/vnic_cq.c 	paddr = (u64)cq->ring.base_addr | VNIC_PADDR_TARGET;
ring               63 drivers/scsi/snic/vnic_cq.c 	iowrite32(cq->ring.desc_count, &cq->ctrl->ring_size);
ring               85 drivers/scsi/snic/vnic_cq.c 	svnic_dev_clear_desc_ring(&cq->ring);
ring               55 drivers/scsi/snic/vnic_cq.h 	struct vnic_dev_ring ring;
ring               71 drivers/scsi/snic/vnic_cq.h 	cq_desc = (struct cq_desc *)((u8 *)cq->ring.descs +
ring               72 drivers/scsi/snic/vnic_cq.h 		cq->ring.desc_size * cq->to_clean);
ring               83 drivers/scsi/snic/vnic_cq.h 		if (cq->to_clean == cq->ring.desc_count) {
ring               88 drivers/scsi/snic/vnic_cq.h 		cq_desc = (struct cq_desc *)((u8 *)cq->ring.descs +
ring               89 drivers/scsi/snic/vnic_cq.h 			cq->ring.desc_size * cq->to_clean);
ring               35 drivers/scsi/snic/vnic_cq_fw.h 	desc = (struct snic_fw_req *)((u8 *)cq->ring.descs +
ring               36 drivers/scsi/snic/vnic_cq_fw.h 		cq->ring.desc_size * cq->to_clean);
ring               45 drivers/scsi/snic/vnic_cq_fw.h 		if (cq->to_clean == cq->ring.desc_count) {
ring               50 drivers/scsi/snic/vnic_cq_fw.h 		desc = (struct snic_fw_req *)((u8 *)cq->ring.descs +
ring               51 drivers/scsi/snic/vnic_cq_fw.h 			cq->ring.desc_size * cq->to_clean);
ring              190 drivers/scsi/snic/vnic_dev.c unsigned int svnic_dev_desc_ring_size(struct vnic_dev_ring *ring,
ring              203 drivers/scsi/snic/vnic_dev.c 	ring->base_align = 512;
ring              208 drivers/scsi/snic/vnic_dev.c 	ring->desc_count = ALIGN(desc_count, count_align);
ring              210 drivers/scsi/snic/vnic_dev.c 	ring->desc_size = ALIGN(desc_size, desc_align);
ring              212 drivers/scsi/snic/vnic_dev.c 	ring->size = ring->desc_count * ring->desc_size;
ring              213 drivers/scsi/snic/vnic_dev.c 	ring->size_unaligned = ring->size + ring->base_align;
ring              215 drivers/scsi/snic/vnic_dev.c 	return ring->size_unaligned;
ring              218 drivers/scsi/snic/vnic_dev.c void svnic_dev_clear_desc_ring(struct vnic_dev_ring *ring)
ring              220 drivers/scsi/snic/vnic_dev.c 	memset(ring->descs, 0, ring->size);
ring              223 drivers/scsi/snic/vnic_dev.c int svnic_dev_alloc_desc_ring(struct vnic_dev *vdev, struct vnic_dev_ring *ring,
ring              226 drivers/scsi/snic/vnic_dev.c 	svnic_dev_desc_ring_size(ring, desc_count, desc_size);
ring              228 drivers/scsi/snic/vnic_dev.c 	ring->descs_unaligned = dma_alloc_coherent(&vdev->pdev->dev,
ring              229 drivers/scsi/snic/vnic_dev.c 			ring->size_unaligned, &ring->base_addr_unaligned,
ring              231 drivers/scsi/snic/vnic_dev.c 	if (!ring->descs_unaligned) {
ring              233 drivers/scsi/snic/vnic_dev.c 			(int)ring->size);
ring              238 drivers/scsi/snic/vnic_dev.c 	ring->base_addr = ALIGN(ring->base_addr_unaligned,
ring              239 drivers/scsi/snic/vnic_dev.c 		ring->base_align);
ring              240 drivers/scsi/snic/vnic_dev.c 	ring->descs = (u8 *)ring->descs_unaligned +
ring              241 drivers/scsi/snic/vnic_dev.c 		(ring->base_addr - ring->base_addr_unaligned);
ring              243 drivers/scsi/snic/vnic_dev.c 	svnic_dev_clear_desc_ring(ring);
ring              245 drivers/scsi/snic/vnic_dev.c 	ring->desc_avail = ring->desc_count - 1;
ring              250 drivers/scsi/snic/vnic_dev.c void svnic_dev_free_desc_ring(struct vnic_dev *vdev, struct vnic_dev_ring *ring)
ring              252 drivers/scsi/snic/vnic_dev.c 	if (ring->descs) {
ring              254 drivers/scsi/snic/vnic_dev.c 			ring->size_unaligned,
ring              255 drivers/scsi/snic/vnic_dev.c 			ring->descs_unaligned,
ring              256 drivers/scsi/snic/vnic_dev.c 			ring->base_addr_unaligned);
ring              257 drivers/scsi/snic/vnic_dev.c 		ring->descs = NULL;
ring              406 drivers/scsi/snic/vnic_dev.c 	dc2c->cmd_ring = (struct vnic_devcmd2 *) dc2c->wq.ring.descs;
ring               75 drivers/scsi/snic/vnic_dev.h unsigned int svnic_dev_desc_ring_size(struct vnic_dev_ring *ring,
ring               78 drivers/scsi/snic/vnic_dev.h void svnic_dev_clear_desc_ring(struct vnic_dev_ring *ring);
ring               79 drivers/scsi/snic/vnic_dev.h int svnic_dev_alloc_desc_ring(struct vnic_dev *vdev, struct vnic_dev_ring *ring,
ring               82 drivers/scsi/snic/vnic_dev.h 			     struct vnic_dev_ring *ring);
ring               39 drivers/scsi/snic/vnic_wq.c 	return svnic_dev_alloc_desc_ring(vdev, &wq->ring, desc_count,
ring               46 drivers/scsi/snic/vnic_wq.c 	unsigned int i, j, count = wq->ring.desc_count;
ring               62 drivers/scsi/snic/vnic_wq.c 			buf->desc = (u8 *)wq->ring.descs +
ring               63 drivers/scsi/snic/vnic_wq.c 				wq->ring.desc_size * buf->index;
ring               88 drivers/scsi/snic/vnic_wq.c 	svnic_dev_free_desc_ring(vdev, &wq->ring);
ring              160 drivers/scsi/snic/vnic_wq.c 	unsigned int count = wq->ring.desc_count;
ring              162 drivers/scsi/snic/vnic_wq.c 	paddr = (u64)wq->ring.base_addr | VNIC_PADDR_TARGET;
ring              227 drivers/scsi/snic/vnic_wq.c 		wq->ring.desc_avail++;
ring              236 drivers/scsi/snic/vnic_wq.c 	svnic_dev_clear_desc_ring(&wq->ring);
ring               78 drivers/scsi/snic/vnic_wq.h 	struct vnic_dev_ring ring;
ring               88 drivers/scsi/snic/vnic_wq.h 	return wq->ring.desc_avail;
ring               94 drivers/scsi/snic/vnic_wq.h 	return wq->ring.desc_count - wq->ring.desc_avail - 1;
ring              125 drivers/scsi/snic/vnic_wq.h 	wq->ring.desc_avail--;
ring              141 drivers/scsi/snic/vnic_wq.h 		wq->ring.desc_avail++;
ring              648 drivers/scsi/vmw_pvscsi.c 	struct PVSCSIRingCmpDesc *ring = adapter->cmp_ring;
ring              652 drivers/scsi/vmw_pvscsi.c 		struct PVSCSIRingCmpDesc *e = ring + (s->cmpConsIdx &
ring             1087 drivers/scsi/vmw_pvscsi.c 	struct PVSCSIRingMsgDesc *ring = adapter->msg_ring;
ring             1091 drivers/scsi/vmw_pvscsi.c 		struct PVSCSIRingMsgDesc *e = ring + (s->msgConsIdx &
ring              113 drivers/scsi/xen-scsifront.c 	struct vscsiif_front_ring ring;
ring              181 drivers/scsi/xen-scsifront.c 	struct vscsiif_front_ring *ring = &(info->ring);
ring              187 drivers/scsi/xen-scsifront.c 	if (RING_FULL(&info->ring))
ring              197 drivers/scsi/xen-scsifront.c 	ring_req = RING_GET_REQUEST(&(info->ring), ring->req_prod_pvt);
ring              198 drivers/scsi/xen-scsifront.c 	ring->req_prod_pvt++;
ring              220 drivers/scsi/xen-scsifront.c 	RING_PUSH_REQUESTS_AND_CHECK_NOTIFY(ring, notify);
ring              328 drivers/scsi/xen-scsifront.c 	rp = info->ring.sring->rsp_prod;
ring              330 drivers/scsi/xen-scsifront.c 	for (i = info->ring.rsp_cons; i != rp; i++) {
ring              331 drivers/scsi/xen-scsifront.c 		ring_rsp = RING_GET_RESPONSE(&info->ring, i);
ring              335 drivers/scsi/xen-scsifront.c 	info->ring.rsp_cons = i;
ring              337 drivers/scsi/xen-scsifront.c 	if (i != info->ring.req_prod_pvt)
ring              338 drivers/scsi/xen-scsifront.c 		RING_FINAL_CHECK_FOR_RESPONSES(&info->ring, more_to_do);
ring              340 drivers/scsi/xen-scsifront.c 		info->ring.sring->rsp_event = i + 1;
ring              717 drivers/scsi/xen-scsifront.c 	FRONT_RING_INIT(&info->ring, sring, PAGE_SIZE);
ring              756 drivers/scsi/xen-scsifront.c 				  (unsigned long)info->ring.sring);
ring              765 drivers/scsi/xen-scsifront.c 				  (unsigned long)info->ring.sring);
ring              123 drivers/soc/fsl/qbman/bman.c 	struct bm_rcr_entry *ring, *cursor;
ring              387 drivers/soc/fsl/qbman/bman.c 	rcr->ring = portal->addr.ce + BM_CL_RCR;
ring              390 drivers/soc/fsl/qbman/bman.c 	rcr->cursor = rcr->ring + pi;
ring              198 drivers/soc/fsl/qbman/qman.c 	struct qm_eqcr_entry *ring, *cursor;
ring              207 drivers/soc/fsl/qbman/qman.c 	const struct qm_dqrr_entry *ring, *cursor;
ring              217 drivers/soc/fsl/qbman/qman.c 	union qm_mr_entry *ring, *cursor;
ring              430 drivers/soc/fsl/qbman/qman.c 	eqcr->ring = portal->addr.ce + QM_CL_EQCR;
ring              434 drivers/soc/fsl/qbman/qman.c 	eqcr->cursor = eqcr->ring + pi;
ring              619 drivers/soc/fsl/qbman/qman.c 	dqrr->ring = portal->addr.ce + QM_CL_DQRR;
ring              622 drivers/soc/fsl/qbman/qman.c 	dqrr->cursor = dqrr->ring + dqrr->ci;
ring              634 drivers/soc/fsl/qbman/qman.c 		dpaa_invalidate(qm_cl(dqrr->ring, cfg));
ring              680 drivers/soc/fsl/qbman/qman.c 	struct qm_dqrr_entry *res = qm_cl(dqrr->ring, dqrr->pi);
ring              706 drivers/soc/fsl/qbman/qman.c 	DPAA_ASSERT((dqrr->ring + idx) == dq);
ring              773 drivers/soc/fsl/qbman/qman.c 	mr->ring = portal->addr.ce + QM_CL_MR;
ring              776 drivers/soc/fsl/qbman/qman.c 	mr->cursor = mr->ring + mr->ci;
ring              820 drivers/soc/fsl/qbman/qman.c 	union qm_mr_entry *res = qm_cl(mr->ring, mr->pi);
ring              145 drivers/spi/spi-pic32-sqi.c 	struct ring_desc	*ring;
ring              470 drivers/spi/spi-pic32-sqi.c 	sqi->ring = kcalloc(PESQI_BD_COUNT, sizeof(*rdesc), GFP_KERNEL);
ring              471 drivers/spi/spi-pic32-sqi.c 	if (!sqi->ring) {
ring              484 drivers/spi/spi-pic32-sqi.c 	for (i = 0, rdesc = sqi->ring; i < PESQI_BD_COUNT; i++, rdesc++) {
ring              492 drivers/spi/spi-pic32-sqi.c 	for (i = 0, rdesc = sqi->ring; i < PESQI_BD_COUNT - 1; i++)
ring              504 drivers/spi/spi-pic32-sqi.c 	kfree(sqi->ring);
ring              556 drivers/staging/comedi/drivers/mite.c 	writel(mite_chan->ring->dma_addr,
ring              569 drivers/staging/comedi/drivers/mite.c 						   struct mite_ring *ring,
ring              584 drivers/staging/comedi/drivers/mite.c 		if (!mite_chan->ring) {
ring              585 drivers/staging/comedi/drivers/mite.c 			mite_chan->ring = ring;
ring              601 drivers/staging/comedi/drivers/mite.c 					  struct mite_ring *ring)
ring              603 drivers/staging/comedi/drivers/mite.c 	return mite_request_channel_in_range(mite, ring, 0,
ring              619 drivers/staging/comedi/drivers/mite.c 	if (mite_chan->ring) {
ring              631 drivers/staging/comedi/drivers/mite.c 		mite_chan->ring = NULL;
ring              650 drivers/staging/comedi/drivers/mite.c int mite_init_ring_descriptors(struct mite_ring *ring,
ring              663 drivers/staging/comedi/drivers/mite.c 	if ((n_full_links + (remainder > 0 ? 1 : 0)) > ring->n_links) {
ring              671 drivers/staging/comedi/drivers/mite.c 		desc = &ring->descs[i];
ring              674 drivers/staging/comedi/drivers/mite.c 		desc->next = cpu_to_le32(ring->dma_addr +
ring              680 drivers/staging/comedi/drivers/mite.c 		desc = &ring->descs[i];
ring              687 drivers/staging/comedi/drivers/mite.c 	desc->next = cpu_to_le32(ring->dma_addr);
ring              698 drivers/staging/comedi/drivers/mite.c static void mite_free_dma_descs(struct mite_ring *ring)
ring              700 drivers/staging/comedi/drivers/mite.c 	struct mite_dma_desc *descs = ring->descs;
ring              703 drivers/staging/comedi/drivers/mite.c 		dma_free_coherent(ring->hw_dev,
ring              704 drivers/staging/comedi/drivers/mite.c 				  ring->n_links * sizeof(*descs),
ring              705 drivers/staging/comedi/drivers/mite.c 				  descs, ring->dma_addr);
ring              706 drivers/staging/comedi/drivers/mite.c 		ring->descs = NULL;
ring              707 drivers/staging/comedi/drivers/mite.c 		ring->dma_addr = 0;
ring              708 drivers/staging/comedi/drivers/mite.c 		ring->n_links = 0;
ring              717 drivers/staging/comedi/drivers/mite.c int mite_buf_change(struct mite_ring *ring, struct comedi_subdevice *s)
ring              723 drivers/staging/comedi/drivers/mite.c 	mite_free_dma_descs(ring);
ring              730 drivers/staging/comedi/drivers/mite.c 	descs = dma_alloc_coherent(ring->hw_dev,
ring              732 drivers/staging/comedi/drivers/mite.c 				   &ring->dma_addr, GFP_KERNEL);
ring              738 drivers/staging/comedi/drivers/mite.c 	ring->descs = descs;
ring              739 drivers/staging/comedi/drivers/mite.c 	ring->n_links = n_links;
ring              741 drivers/staging/comedi/drivers/mite.c 	return mite_init_ring_descriptors(ring, s, n_links << PAGE_SHIFT);
ring              751 drivers/staging/comedi/drivers/mite.c 	struct mite_ring *ring;
ring              753 drivers/staging/comedi/drivers/mite.c 	ring = kmalloc(sizeof(*ring), GFP_KERNEL);
ring              754 drivers/staging/comedi/drivers/mite.c 	if (!ring)
ring              756 drivers/staging/comedi/drivers/mite.c 	ring->hw_dev = get_device(&mite->pcidev->dev);
ring              757 drivers/staging/comedi/drivers/mite.c 	if (!ring->hw_dev) {
ring              758 drivers/staging/comedi/drivers/mite.c 		kfree(ring);
ring              761 drivers/staging/comedi/drivers/mite.c 	ring->n_links = 0;
ring              762 drivers/staging/comedi/drivers/mite.c 	ring->descs = NULL;
ring              763 drivers/staging/comedi/drivers/mite.c 	ring->dma_addr = 0;
ring              764 drivers/staging/comedi/drivers/mite.c 	return ring;
ring              772 drivers/staging/comedi/drivers/mite.c void mite_free_ring(struct mite_ring *ring)
ring              774 drivers/staging/comedi/drivers/mite.c 	if (ring) {
ring              775 drivers/staging/comedi/drivers/mite.c 		mite_free_dma_descs(ring);
ring              776 drivers/staging/comedi/drivers/mite.c 		put_device(ring->hw_dev);
ring              777 drivers/staging/comedi/drivers/mite.c 		kfree(ring);
ring               41 drivers/staging/comedi/drivers/mite.h 	struct mite_ring *ring;
ring               68 drivers/staging/comedi/drivers/mite.h 						   struct mite_ring *ring,
ring               72 drivers/staging/comedi/drivers/mite.h 					  struct mite_ring *ring);
ring               75 drivers/staging/comedi/drivers/mite.h int mite_init_ring_descriptors(struct mite_ring *ring,
ring               77 drivers/staging/comedi/drivers/mite.h int mite_buf_change(struct mite_ring *ring, struct comedi_subdevice *s);
ring               80 drivers/staging/comedi/drivers/mite.h void mite_free_ring(struct mite_ring *ring);
ring              260 drivers/staging/comedi/drivers/ni_660x.c 	struct mite_ring *ring[NI660X_MAX_CHIPS][NI660X_COUNTERS_PER_CHIP];
ring              342 drivers/staging/comedi/drivers/ni_660x.c 	struct mite_ring *ring;
ring              347 drivers/staging/comedi/drivers/ni_660x.c 	ring = devpriv->ring[counter->chip_index][counter->counter_index];
ring              348 drivers/staging/comedi/drivers/ni_660x.c 	mite_chan = mite_request_channel(devpriv->mite, ring);
ring              473 drivers/staging/comedi/drivers/ni_660x.c 	struct mite_ring *ring;
ring              476 drivers/staging/comedi/drivers/ni_660x.c 	ring = devpriv->ring[counter->chip_index][counter->counter_index];
ring              477 drivers/staging/comedi/drivers/ni_660x.c 	ret = mite_buf_change(ring, s);
ring              510 drivers/staging/comedi/drivers/ni_660x.c 			devpriv->ring[i][j] = mite_alloc_ring(devpriv->mite);
ring              511 drivers/staging/comedi/drivers/ni_660x.c 			if (!devpriv->ring[i][j])
ring              527 drivers/staging/comedi/drivers/ni_660x.c 			mite_free_ring(devpriv->ring[i][j]);
ring             1955 drivers/staging/comedi/drivers/ni_mio_common.c static void ni_cmd_set_mite_transfer(struct mite_ring *ring,
ring             1981 drivers/staging/comedi/drivers/ni_mio_common.c 	mite_init_ring_descriptors(ring, sdev, nbytes);
ring             1415 drivers/staging/rtl8192e/rtl8192e/r8192E_phy.c 	struct rtl8192_tx_ring  *ring = NULL;
ring             1479 drivers/staging/rtl8192e/rtl8192e/r8192E_phy.c 				ring = &priv->tx_ring[QueueID];
ring             1481 drivers/staging/rtl8192e/rtl8192e/r8192E_phy.c 				if (skb_queue_len(&ring->queue) == 0) {
ring             1508 drivers/staging/rtl8192e/rtl8192e/r8192E_phy.c 				ring = &priv->tx_ring[QueueID];
ring             1510 drivers/staging/rtl8192e/rtl8192e/r8192E_phy.c 				if (skb_queue_len(&ring->queue) == 0) {
ring              263 drivers/staging/rtl8192e/rtl8192e/rtl_core.c 	struct rtl8192_tx_ring *ring = &priv->tx_ring[prio];
ring              265 drivers/staging/rtl8192e/rtl8192e/rtl_core.c 	if (ring->entries - skb_queue_len(&ring->queue) >= 2)
ring              524 drivers/staging/rtl8192e/rtl8192e/rtl_core.c 	struct rtl8192_tx_ring *ring = NULL;
ring              527 drivers/staging/rtl8192e/rtl8192e/rtl_core.c 	ring = &priv->tx_ring[BEACON_QUEUE];
ring              528 drivers/staging/rtl8192e/rtl8192e/rtl_core.c 	pskb = __skb_dequeue(&ring->queue);
ring              543 drivers/staging/rtl8192e/rtl8192e/rtl_core.c 	pdesc = &ring->desc[0];
ring              545 drivers/staging/rtl8192e/rtl8192e/rtl_core.c 	__skb_queue_tail(&ring->queue, pnewskb);
ring             1115 drivers/staging/rtl8192e/rtl8192e/rtl_core.c 	struct rtl8192_tx_ring  *ring = NULL;
ring             1136 drivers/staging/rtl8192e/rtl8192e/rtl_core.c 		ring = &priv->tx_ring[QueueID];
ring             1138 drivers/staging/rtl8192e/rtl8192e/rtl_core.c 		if (skb_queue_len(&ring->queue) == 0) {
ring             1141 drivers/staging/rtl8192e/rtl8192e/rtl_core.c 			skb = __skb_peek(&ring->queue);
ring             1580 drivers/staging/rtl8192e/rtl8192e/rtl_core.c 	struct rtl8192_tx_ring *ring = &priv->tx_ring[prio];
ring             1582 drivers/staging/rtl8192e/rtl8192e/rtl_core.c 	while (skb_queue_len(&ring->queue)) {
ring             1583 drivers/staging/rtl8192e/rtl8192e/rtl_core.c 		struct tx_desc *entry = &ring->desc[ring->idx];
ring             1584 drivers/staging/rtl8192e/rtl8192e/rtl_core.c 		struct sk_buff *skb = __skb_dequeue(&ring->queue);
ring             1589 drivers/staging/rtl8192e/rtl8192e/rtl_core.c 		ring->idx = (ring->idx + 1) % ring->entries;
ring             1592 drivers/staging/rtl8192e/rtl8192e/rtl_core.c 	pci_free_consistent(priv->pdev, sizeof(*ring->desc) * ring->entries,
ring             1593 drivers/staging/rtl8192e/rtl8192e/rtl_core.c 	ring->desc, ring->dma);
ring             1594 drivers/staging/rtl8192e/rtl8192e/rtl_core.c 	ring->desc = NULL;
ring             1667 drivers/staging/rtl8192e/rtl8192e/rtl_core.c 	struct rtl8192_tx_ring *ring = &priv->tx_ring[prio];
ring             1669 drivers/staging/rtl8192e/rtl8192e/rtl_core.c 	while (skb_queue_len(&ring->queue)) {
ring             1670 drivers/staging/rtl8192e/rtl8192e/rtl_core.c 		struct tx_desc *entry = &ring->desc[ring->idx];
ring             1676 drivers/staging/rtl8192e/rtl8192e/rtl_core.c 			ring->idx = (ring->idx + 1) % ring->entries;
ring             1679 drivers/staging/rtl8192e/rtl8192e/rtl_core.c 		skb = __skb_dequeue(&ring->queue);
ring             1692 drivers/staging/rtl8192e/rtl8192e/rtl_core.c 	struct rtl8192_tx_ring *ring;
ring             1699 drivers/staging/rtl8192e/rtl8192e/rtl_core.c 	ring = &priv->tx_ring[TXCMD_QUEUE];
ring             1701 drivers/staging/rtl8192e/rtl8192e/rtl_core.c 	idx = (ring->idx + skb_queue_len(&ring->queue)) % ring->entries;
ring             1702 drivers/staging/rtl8192e/rtl8192e/rtl_core.c 	entry = (struct tx_desc_cmd *)&ring->desc[idx];
ring             1708 drivers/staging/rtl8192e/rtl8192e/rtl_core.c 	__skb_queue_tail(&ring->queue, skb);
ring             1715 drivers/staging/rtl8192e/rtl8192e/rtl_core.c 	struct rtl8192_tx_ring  *ring;
ring             1749 drivers/staging/rtl8192e/rtl8192e/rtl_core.c 	ring = &priv->tx_ring[tcb_desc->queue_index];
ring             1751 drivers/staging/rtl8192e/rtl8192e/rtl_core.c 		idx = (ring->idx + skb_queue_len(&ring->queue)) % ring->entries;
ring             1755 drivers/staging/rtl8192e/rtl8192e/rtl_core.c 	pdesc = &ring->desc[idx];
ring             1759 drivers/staging/rtl8192e/rtl8192e/rtl_core.c 			    tcb_desc->queue_index, ring->idx, idx, skb->len,
ring             1760 drivers/staging/rtl8192e/rtl8192e/rtl_core.c 			    skb_queue_len(&ring->queue));
ring             1770 drivers/staging/rtl8192e/rtl8192e/rtl_core.c 	__skb_queue_tail(&ring->queue, skb);
ring             1831 drivers/staging/rtl8192e/rtl8192e/rtl_core.c 	struct tx_desc *ring;
ring             1835 drivers/staging/rtl8192e/rtl8192e/rtl_core.c 	ring = pci_zalloc_consistent(priv->pdev, sizeof(*ring) * entries, &dma);
ring             1836 drivers/staging/rtl8192e/rtl8192e/rtl_core.c 	if (!ring || (unsigned long)ring & 0xFF) {
ring             1841 drivers/staging/rtl8192e/rtl8192e/rtl_core.c 	priv->tx_ring[prio].desc = ring;
ring             1848 drivers/staging/rtl8192e/rtl8192e/rtl_core.c 		ring[i].NextDescAddress =
ring             1850 drivers/staging/rtl8192e/rtl8192e/rtl_core.c 			sizeof(*ring);
ring             1902 drivers/staging/rtl8192e/rtl8192e/rtl_core.c 			struct rtl8192_tx_ring *ring = &priv->tx_ring[i];
ring             1904 drivers/staging/rtl8192e/rtl8192e/rtl_core.c 			while (skb_queue_len(&ring->queue)) {
ring             1905 drivers/staging/rtl8192e/rtl8192e/rtl_core.c 				struct tx_desc *entry = &ring->desc[ring->idx];
ring             1907 drivers/staging/rtl8192e/rtl8192e/rtl_core.c 						 __skb_dequeue(&ring->queue);
ring             1913 drivers/staging/rtl8192e/rtl8192e/rtl_core.c 				ring->idx = (ring->idx + 1) % ring->entries;
ring             1915 drivers/staging/rtl8192e/rtl8192e/rtl_core.c 			ring->idx = 0;
ring              326 drivers/thunderbolt/ctl.c static void tb_ctl_tx_callback(struct tb_ring *ring, struct ring_frame *frame,
ring              407 drivers/thunderbolt/ctl.c static void tb_ctl_rx_callback(struct tb_ring *ring, struct ring_frame *frame,
ring               25 drivers/thunderbolt/nhi.c #define RING_TYPE(ring) ((ring)->is_tx ? "TX ring" : "RX ring")
ring               43 drivers/thunderbolt/nhi.c static int ring_interrupt_index(struct tb_ring *ring)
ring               45 drivers/thunderbolt/nhi.c 	int bit = ring->hop;
ring               46 drivers/thunderbolt/nhi.c 	if (!ring->is_tx)
ring               47 drivers/thunderbolt/nhi.c 		bit += ring->nhi->hop_count;
ring               56 drivers/thunderbolt/nhi.c static void ring_interrupt_active(struct tb_ring *ring, bool active)
ring               59 drivers/thunderbolt/nhi.c 		  ring_interrupt_index(ring) / 32 * 4;
ring               60 drivers/thunderbolt/nhi.c 	int bit = ring_interrupt_index(ring) & 31;
ring               64 drivers/thunderbolt/nhi.c 	if (ring->irq > 0) {
ring               69 drivers/thunderbolt/nhi.c 		if (ring->is_tx)
ring               70 drivers/thunderbolt/nhi.c 			index = ring->hop;
ring               72 drivers/thunderbolt/nhi.c 			index = ring->hop + ring->nhi->hop_count;
ring               78 drivers/thunderbolt/nhi.c 		misc = ioread32(ring->nhi->iobase + REG_DMA_MISC);
ring               81 drivers/thunderbolt/nhi.c 			iowrite32(misc, ring->nhi->iobase + REG_DMA_MISC);
ring               84 drivers/thunderbolt/nhi.c 		ivr_base = ring->nhi->iobase + REG_INT_VEC_ALLOC_BASE;
ring               90 drivers/thunderbolt/nhi.c 			ivr |= ring->vector << shift;
ring               94 drivers/thunderbolt/nhi.c 	old = ioread32(ring->nhi->iobase + reg);
ring              100 drivers/thunderbolt/nhi.c 	dev_dbg(&ring->nhi->pdev->dev,
ring              105 drivers/thunderbolt/nhi.c 		dev_WARN(&ring->nhi->pdev->dev,
ring              107 drivers/thunderbolt/nhi.c 					 RING_TYPE(ring), ring->hop,
ring              109 drivers/thunderbolt/nhi.c 	iowrite32(new, ring->nhi->iobase + reg);
ring              131 drivers/thunderbolt/nhi.c static void __iomem *ring_desc_base(struct tb_ring *ring)
ring              133 drivers/thunderbolt/nhi.c 	void __iomem *io = ring->nhi->iobase;
ring              134 drivers/thunderbolt/nhi.c 	io += ring->is_tx ? REG_TX_RING_BASE : REG_RX_RING_BASE;
ring              135 drivers/thunderbolt/nhi.c 	io += ring->hop * 16;
ring              139 drivers/thunderbolt/nhi.c static void __iomem *ring_options_base(struct tb_ring *ring)
ring              141 drivers/thunderbolt/nhi.c 	void __iomem *io = ring->nhi->iobase;
ring              142 drivers/thunderbolt/nhi.c 	io += ring->is_tx ? REG_TX_OPTIONS_BASE : REG_RX_OPTIONS_BASE;
ring              143 drivers/thunderbolt/nhi.c 	io += ring->hop * 32;
ring              147 drivers/thunderbolt/nhi.c static void ring_iowrite_cons(struct tb_ring *ring, u16 cons)
ring              154 drivers/thunderbolt/nhi.c 	iowrite32(cons, ring_desc_base(ring) + 8);
ring              157 drivers/thunderbolt/nhi.c static void ring_iowrite_prod(struct tb_ring *ring, u16 prod)
ring              160 drivers/thunderbolt/nhi.c 	iowrite32(prod << 16, ring_desc_base(ring) + 8);
ring              163 drivers/thunderbolt/nhi.c static void ring_iowrite32desc(struct tb_ring *ring, u32 value, u32 offset)
ring              165 drivers/thunderbolt/nhi.c 	iowrite32(value, ring_desc_base(ring) + offset);
ring              168 drivers/thunderbolt/nhi.c static void ring_iowrite64desc(struct tb_ring *ring, u64 value, u32 offset)
ring              170 drivers/thunderbolt/nhi.c 	iowrite32(value, ring_desc_base(ring) + offset);
ring              171 drivers/thunderbolt/nhi.c 	iowrite32(value >> 32, ring_desc_base(ring) + offset + 4);
ring              174 drivers/thunderbolt/nhi.c static void ring_iowrite32options(struct tb_ring *ring, u32 value, u32 offset)
ring              176 drivers/thunderbolt/nhi.c 	iowrite32(value, ring_options_base(ring) + offset);
ring              179 drivers/thunderbolt/nhi.c static bool ring_full(struct tb_ring *ring)
ring              181 drivers/thunderbolt/nhi.c 	return ((ring->head + 1) % ring->size) == ring->tail;
ring              184 drivers/thunderbolt/nhi.c static bool ring_empty(struct tb_ring *ring)
ring              186 drivers/thunderbolt/nhi.c 	return ring->head == ring->tail;
ring              194 drivers/thunderbolt/nhi.c static void ring_write_descriptors(struct tb_ring *ring)
ring              198 drivers/thunderbolt/nhi.c 	list_for_each_entry_safe(frame, n, &ring->queue, list) {
ring              199 drivers/thunderbolt/nhi.c 		if (ring_full(ring))
ring              201 drivers/thunderbolt/nhi.c 		list_move_tail(&frame->list, &ring->in_flight);
ring              202 drivers/thunderbolt/nhi.c 		descriptor = &ring->descriptors[ring->head];
ring              206 drivers/thunderbolt/nhi.c 		if (ring->is_tx) {
ring              211 drivers/thunderbolt/nhi.c 		ring->head = (ring->head + 1) % ring->size;
ring              212 drivers/thunderbolt/nhi.c 		if (ring->is_tx)
ring              213 drivers/thunderbolt/nhi.c 			ring_iowrite_prod(ring, ring->head);
ring              215 drivers/thunderbolt/nhi.c 			ring_iowrite_cons(ring, ring->head);
ring              230 drivers/thunderbolt/nhi.c 	struct tb_ring *ring = container_of(work, typeof(*ring), work);
ring              236 drivers/thunderbolt/nhi.c 	spin_lock_irqsave(&ring->lock, flags);
ring              238 drivers/thunderbolt/nhi.c 	if (!ring->running) {
ring              240 drivers/thunderbolt/nhi.c 		list_splice_tail_init(&ring->in_flight, &done);
ring              241 drivers/thunderbolt/nhi.c 		list_splice_tail_init(&ring->queue, &done);
ring              246 drivers/thunderbolt/nhi.c 	while (!ring_empty(ring)) {
ring              247 drivers/thunderbolt/nhi.c 		if (!(ring->descriptors[ring->tail].flags
ring              250 drivers/thunderbolt/nhi.c 		frame = list_first_entry(&ring->in_flight, typeof(*frame),
ring              253 drivers/thunderbolt/nhi.c 		if (!ring->is_tx) {
ring              254 drivers/thunderbolt/nhi.c 			frame->size = ring->descriptors[ring->tail].length;
ring              255 drivers/thunderbolt/nhi.c 			frame->eof = ring->descriptors[ring->tail].eof;
ring              256 drivers/thunderbolt/nhi.c 			frame->sof = ring->descriptors[ring->tail].sof;
ring              257 drivers/thunderbolt/nhi.c 			frame->flags = ring->descriptors[ring->tail].flags;
ring              259 drivers/thunderbolt/nhi.c 		ring->tail = (ring->tail + 1) % ring->size;
ring              261 drivers/thunderbolt/nhi.c 	ring_write_descriptors(ring);
ring              265 drivers/thunderbolt/nhi.c 	spin_unlock_irqrestore(&ring->lock, flags);
ring              274 drivers/thunderbolt/nhi.c 			frame->callback(ring, frame, canceled);
ring              278 drivers/thunderbolt/nhi.c int __tb_ring_enqueue(struct tb_ring *ring, struct ring_frame *frame)
ring              283 drivers/thunderbolt/nhi.c 	spin_lock_irqsave(&ring->lock, flags);
ring              284 drivers/thunderbolt/nhi.c 	if (ring->running) {
ring              285 drivers/thunderbolt/nhi.c 		list_add_tail(&frame->list, &ring->queue);
ring              286 drivers/thunderbolt/nhi.c 		ring_write_descriptors(ring);
ring              290 drivers/thunderbolt/nhi.c 	spin_unlock_irqrestore(&ring->lock, flags);
ring              304 drivers/thunderbolt/nhi.c struct ring_frame *tb_ring_poll(struct tb_ring *ring)
ring              309 drivers/thunderbolt/nhi.c 	spin_lock_irqsave(&ring->lock, flags);
ring              310 drivers/thunderbolt/nhi.c 	if (!ring->running)
ring              312 drivers/thunderbolt/nhi.c 	if (ring_empty(ring))
ring              315 drivers/thunderbolt/nhi.c 	if (ring->descriptors[ring->tail].flags & RING_DESC_COMPLETED) {
ring              316 drivers/thunderbolt/nhi.c 		frame = list_first_entry(&ring->in_flight, typeof(*frame),
ring              320 drivers/thunderbolt/nhi.c 		if (!ring->is_tx) {
ring              321 drivers/thunderbolt/nhi.c 			frame->size = ring->descriptors[ring->tail].length;
ring              322 drivers/thunderbolt/nhi.c 			frame->eof = ring->descriptors[ring->tail].eof;
ring              323 drivers/thunderbolt/nhi.c 			frame->sof = ring->descriptors[ring->tail].sof;
ring              324 drivers/thunderbolt/nhi.c 			frame->flags = ring->descriptors[ring->tail].flags;
ring              327 drivers/thunderbolt/nhi.c 		ring->tail = (ring->tail + 1) % ring->size;
ring              331 drivers/thunderbolt/nhi.c 	spin_unlock_irqrestore(&ring->lock, flags);
ring              336 drivers/thunderbolt/nhi.c static void __ring_interrupt_mask(struct tb_ring *ring, bool mask)
ring              338 drivers/thunderbolt/nhi.c 	int idx = ring_interrupt_index(ring);
ring              343 drivers/thunderbolt/nhi.c 	val = ioread32(ring->nhi->iobase + reg);
ring              348 drivers/thunderbolt/nhi.c 	iowrite32(val, ring->nhi->iobase + reg);
ring              352 drivers/thunderbolt/nhi.c static void __ring_interrupt(struct tb_ring *ring)
ring              354 drivers/thunderbolt/nhi.c 	if (!ring->running)
ring              357 drivers/thunderbolt/nhi.c 	if (ring->start_poll) {
ring              358 drivers/thunderbolt/nhi.c 		__ring_interrupt_mask(ring, true);
ring              359 drivers/thunderbolt/nhi.c 		ring->start_poll(ring->poll_data);
ring              361 drivers/thunderbolt/nhi.c 		schedule_work(&ring->work);
ring              372 drivers/thunderbolt/nhi.c void tb_ring_poll_complete(struct tb_ring *ring)
ring              376 drivers/thunderbolt/nhi.c 	spin_lock_irqsave(&ring->nhi->lock, flags);
ring              377 drivers/thunderbolt/nhi.c 	spin_lock(&ring->lock);
ring              378 drivers/thunderbolt/nhi.c 	if (ring->start_poll)
ring              379 drivers/thunderbolt/nhi.c 		__ring_interrupt_mask(ring, false);
ring              380 drivers/thunderbolt/nhi.c 	spin_unlock(&ring->lock);
ring              381 drivers/thunderbolt/nhi.c 	spin_unlock_irqrestore(&ring->nhi->lock, flags);
ring              387 drivers/thunderbolt/nhi.c 	struct tb_ring *ring = data;
ring              389 drivers/thunderbolt/nhi.c 	spin_lock(&ring->nhi->lock);
ring              390 drivers/thunderbolt/nhi.c 	spin_lock(&ring->lock);
ring              391 drivers/thunderbolt/nhi.c 	__ring_interrupt(ring);
ring              392 drivers/thunderbolt/nhi.c 	spin_unlock(&ring->lock);
ring              393 drivers/thunderbolt/nhi.c 	spin_unlock(&ring->nhi->lock);
ring              398 drivers/thunderbolt/nhi.c static int ring_request_msix(struct tb_ring *ring, bool no_suspend)
ring              400 drivers/thunderbolt/nhi.c 	struct tb_nhi *nhi = ring->nhi;
ring              411 drivers/thunderbolt/nhi.c 	ring->vector = ret;
ring              413 drivers/thunderbolt/nhi.c 	ring->irq = pci_irq_vector(ring->nhi->pdev, ring->vector);
ring              414 drivers/thunderbolt/nhi.c 	if (ring->irq < 0)
ring              415 drivers/thunderbolt/nhi.c 		return ring->irq;
ring              418 drivers/thunderbolt/nhi.c 	return request_irq(ring->irq, ring_msix, irqflags, "thunderbolt", ring);
ring              421 drivers/thunderbolt/nhi.c static void ring_release_msix(struct tb_ring *ring)
ring              423 drivers/thunderbolt/nhi.c 	if (ring->irq <= 0)
ring              426 drivers/thunderbolt/nhi.c 	free_irq(ring->irq, ring);
ring              427 drivers/thunderbolt/nhi.c 	ida_simple_remove(&ring->nhi->msix_ida, ring->vector);
ring              428 drivers/thunderbolt/nhi.c 	ring->vector = 0;
ring              429 drivers/thunderbolt/nhi.c 	ring->irq = 0;
ring              432 drivers/thunderbolt/nhi.c static int nhi_alloc_hop(struct tb_nhi *nhi, struct tb_ring *ring)
ring              438 drivers/thunderbolt/nhi.c 	if (ring->hop < 0) {
ring              446 drivers/thunderbolt/nhi.c 			if (ring->is_tx) {
ring              448 drivers/thunderbolt/nhi.c 					ring->hop = i;
ring              453 drivers/thunderbolt/nhi.c 					ring->hop = i;
ring              460 drivers/thunderbolt/nhi.c 	if (ring->hop < 0 || ring->hop >= nhi->hop_count) {
ring              461 drivers/thunderbolt/nhi.c 		dev_warn(&nhi->pdev->dev, "invalid hop: %d\n", ring->hop);
ring              465 drivers/thunderbolt/nhi.c 	if (ring->is_tx && nhi->tx_rings[ring->hop]) {
ring              467 drivers/thunderbolt/nhi.c 			 ring->hop);
ring              470 drivers/thunderbolt/nhi.c 	} else if (!ring->is_tx && nhi->rx_rings[ring->hop]) {
ring              472 drivers/thunderbolt/nhi.c 			 ring->hop);
ring              477 drivers/thunderbolt/nhi.c 	if (ring->is_tx)
ring              478 drivers/thunderbolt/nhi.c 		nhi->tx_rings[ring->hop] = ring;
ring              480 drivers/thunderbolt/nhi.c 		nhi->rx_rings[ring->hop] = ring;
ring              494 drivers/thunderbolt/nhi.c 	struct tb_ring *ring = NULL;
ring              503 drivers/thunderbolt/nhi.c 	ring = kzalloc(sizeof(*ring), GFP_KERNEL);
ring              504 drivers/thunderbolt/nhi.c 	if (!ring)
ring              507 drivers/thunderbolt/nhi.c 	spin_lock_init(&ring->lock);
ring              508 drivers/thunderbolt/nhi.c 	INIT_LIST_HEAD(&ring->queue);
ring              509 drivers/thunderbolt/nhi.c 	INIT_LIST_HEAD(&ring->in_flight);
ring              510 drivers/thunderbolt/nhi.c 	INIT_WORK(&ring->work, ring_work);
ring              512 drivers/thunderbolt/nhi.c 	ring->nhi = nhi;
ring              513 drivers/thunderbolt/nhi.c 	ring->hop = hop;
ring              514 drivers/thunderbolt/nhi.c 	ring->is_tx = transmit;
ring              515 drivers/thunderbolt/nhi.c 	ring->size = size;
ring              516 drivers/thunderbolt/nhi.c 	ring->flags = flags;
ring              517 drivers/thunderbolt/nhi.c 	ring->sof_mask = sof_mask;
ring              518 drivers/thunderbolt/nhi.c 	ring->eof_mask = eof_mask;
ring              519 drivers/thunderbolt/nhi.c 	ring->head = 0;
ring              520 drivers/thunderbolt/nhi.c 	ring->tail = 0;
ring              521 drivers/thunderbolt/nhi.c 	ring->running = false;
ring              522 drivers/thunderbolt/nhi.c 	ring->start_poll = start_poll;
ring              523 drivers/thunderbolt/nhi.c 	ring->poll_data = poll_data;
ring              525 drivers/thunderbolt/nhi.c 	ring->descriptors = dma_alloc_coherent(&ring->nhi->pdev->dev,
ring              526 drivers/thunderbolt/nhi.c 			size * sizeof(*ring->descriptors),
ring              527 drivers/thunderbolt/nhi.c 			&ring->descriptors_dma, GFP_KERNEL | __GFP_ZERO);
ring              528 drivers/thunderbolt/nhi.c 	if (!ring->descriptors)
ring              531 drivers/thunderbolt/nhi.c 	if (ring_request_msix(ring, flags & RING_FLAG_NO_SUSPEND))
ring              534 drivers/thunderbolt/nhi.c 	if (nhi_alloc_hop(nhi, ring))
ring              537 drivers/thunderbolt/nhi.c 	return ring;
ring              540 drivers/thunderbolt/nhi.c 	ring_release_msix(ring);
ring              542 drivers/thunderbolt/nhi.c 	dma_free_coherent(&ring->nhi->pdev->dev,
ring              543 drivers/thunderbolt/nhi.c 			  ring->size * sizeof(*ring->descriptors),
ring              544 drivers/thunderbolt/nhi.c 			  ring->descriptors, ring->descriptors_dma);
ring              546 drivers/thunderbolt/nhi.c 	kfree(ring);
ring              592 drivers/thunderbolt/nhi.c void tb_ring_start(struct tb_ring *ring)
ring              597 drivers/thunderbolt/nhi.c 	spin_lock_irq(&ring->nhi->lock);
ring              598 drivers/thunderbolt/nhi.c 	spin_lock(&ring->lock);
ring              599 drivers/thunderbolt/nhi.c 	if (ring->nhi->going_away)
ring              601 drivers/thunderbolt/nhi.c 	if (ring->running) {
ring              602 drivers/thunderbolt/nhi.c 		dev_WARN(&ring->nhi->pdev->dev, "ring already started\n");
ring              605 drivers/thunderbolt/nhi.c 	dev_dbg(&ring->nhi->pdev->dev, "starting %s %d\n",
ring              606 drivers/thunderbolt/nhi.c 		RING_TYPE(ring), ring->hop);
ring              608 drivers/thunderbolt/nhi.c 	if (ring->flags & RING_FLAG_FRAME) {
ring              617 drivers/thunderbolt/nhi.c 	if (ring->flags & RING_FLAG_E2E && !ring->is_tx) {
ring              630 drivers/thunderbolt/nhi.c 	ring_iowrite64desc(ring, ring->descriptors_dma, 0);
ring              631 drivers/thunderbolt/nhi.c 	if (ring->is_tx) {
ring              632 drivers/thunderbolt/nhi.c 		ring_iowrite32desc(ring, ring->size, 12);
ring              633 drivers/thunderbolt/nhi.c 		ring_iowrite32options(ring, 0, 4); /* time releated ? */
ring              634 drivers/thunderbolt/nhi.c 		ring_iowrite32options(ring, flags, 0);
ring              636 drivers/thunderbolt/nhi.c 		u32 sof_eof_mask = ring->sof_mask << 16 | ring->eof_mask;
ring              638 drivers/thunderbolt/nhi.c 		ring_iowrite32desc(ring, (frame_size << 16) | ring->size, 12);
ring              639 drivers/thunderbolt/nhi.c 		ring_iowrite32options(ring, sof_eof_mask, 4);
ring              640 drivers/thunderbolt/nhi.c 		ring_iowrite32options(ring, flags, 0);
ring              642 drivers/thunderbolt/nhi.c 	ring_interrupt_active(ring, true);
ring              643 drivers/thunderbolt/nhi.c 	ring->running = true;
ring              645 drivers/thunderbolt/nhi.c 	spin_unlock(&ring->lock);
ring              646 drivers/thunderbolt/nhi.c 	spin_unlock_irq(&ring->nhi->lock);
ring              663 drivers/thunderbolt/nhi.c void tb_ring_stop(struct tb_ring *ring)
ring              665 drivers/thunderbolt/nhi.c 	spin_lock_irq(&ring->nhi->lock);
ring              666 drivers/thunderbolt/nhi.c 	spin_lock(&ring->lock);
ring              667 drivers/thunderbolt/nhi.c 	dev_dbg(&ring->nhi->pdev->dev, "stopping %s %d\n",
ring              668 drivers/thunderbolt/nhi.c 		RING_TYPE(ring), ring->hop);
ring              669 drivers/thunderbolt/nhi.c 	if (ring->nhi->going_away)
ring              671 drivers/thunderbolt/nhi.c 	if (!ring->running) {
ring              672 drivers/thunderbolt/nhi.c 		dev_WARN(&ring->nhi->pdev->dev, "%s %d already stopped\n",
ring              673 drivers/thunderbolt/nhi.c 			 RING_TYPE(ring), ring->hop);
ring              676 drivers/thunderbolt/nhi.c 	ring_interrupt_active(ring, false);
ring              678 drivers/thunderbolt/nhi.c 	ring_iowrite32options(ring, 0, 0);
ring              679 drivers/thunderbolt/nhi.c 	ring_iowrite64desc(ring, 0, 0);
ring              680 drivers/thunderbolt/nhi.c 	ring_iowrite32desc(ring, 0, 8);
ring              681 drivers/thunderbolt/nhi.c 	ring_iowrite32desc(ring, 0, 12);
ring              682 drivers/thunderbolt/nhi.c 	ring->head = 0;
ring              683 drivers/thunderbolt/nhi.c 	ring->tail = 0;
ring              684 drivers/thunderbolt/nhi.c 	ring->running = false;
ring              687 drivers/thunderbolt/nhi.c 	spin_unlock(&ring->lock);
ring              688 drivers/thunderbolt/nhi.c 	spin_unlock_irq(&ring->nhi->lock);
ring              693 drivers/thunderbolt/nhi.c 	schedule_work(&ring->work);
ring              694 drivers/thunderbolt/nhi.c 	flush_work(&ring->work);
ring              708 drivers/thunderbolt/nhi.c void tb_ring_free(struct tb_ring *ring)
ring              710 drivers/thunderbolt/nhi.c 	spin_lock_irq(&ring->nhi->lock);
ring              715 drivers/thunderbolt/nhi.c 	if (ring->is_tx)
ring              716 drivers/thunderbolt/nhi.c 		ring->nhi->tx_rings[ring->hop] = NULL;
ring              718 drivers/thunderbolt/nhi.c 		ring->nhi->rx_rings[ring->hop] = NULL;
ring              720 drivers/thunderbolt/nhi.c 	if (ring->running) {
ring              721 drivers/thunderbolt/nhi.c 		dev_WARN(&ring->nhi->pdev->dev, "%s %d still running\n",
ring              722 drivers/thunderbolt/nhi.c 			 RING_TYPE(ring), ring->hop);
ring              724 drivers/thunderbolt/nhi.c 	spin_unlock_irq(&ring->nhi->lock);
ring              726 drivers/thunderbolt/nhi.c 	ring_release_msix(ring);
ring              728 drivers/thunderbolt/nhi.c 	dma_free_coherent(&ring->nhi->pdev->dev,
ring              729 drivers/thunderbolt/nhi.c 			  ring->size * sizeof(*ring->descriptors),
ring              730 drivers/thunderbolt/nhi.c 			  ring->descriptors, ring->descriptors_dma);
ring              732 drivers/thunderbolt/nhi.c 	ring->descriptors = NULL;
ring              733 drivers/thunderbolt/nhi.c 	ring->descriptors_dma = 0;
ring              736 drivers/thunderbolt/nhi.c 	dev_dbg(&ring->nhi->pdev->dev, "freeing %s %d\n", RING_TYPE(ring),
ring              737 drivers/thunderbolt/nhi.c 		ring->hop);
ring              744 drivers/thunderbolt/nhi.c 	flush_work(&ring->work);
ring              745 drivers/thunderbolt/nhi.c 	kfree(ring);
ring              811 drivers/thunderbolt/nhi.c 	struct tb_ring *ring;
ring              838 drivers/thunderbolt/nhi.c 			ring = nhi->tx_rings[hop];
ring              840 drivers/thunderbolt/nhi.c 			ring = nhi->rx_rings[hop];
ring              841 drivers/thunderbolt/nhi.c 		if (ring == NULL) {
ring              849 drivers/thunderbolt/nhi.c 		spin_lock(&ring->lock);
ring              850 drivers/thunderbolt/nhi.c 		__ring_interrupt(ring);
ring              851 drivers/thunderbolt/nhi.c 		spin_unlock(&ring->lock);
ring              737 drivers/tty/serial/atmel_serial.c 	struct circ_buf *ring = &atmel_port->rx_ring;
ring              740 drivers/tty/serial/atmel_serial.c 	if (!CIRC_SPACE(ring->head, ring->tail, ATMEL_SERIAL_RINGSIZE))
ring              744 drivers/tty/serial/atmel_serial.c 	c = &((struct atmel_uart_char *)ring->buf)[ring->head];
ring              751 drivers/tty/serial/atmel_serial.c 	ring->head = (ring->head + 1) & (ATMEL_SERIAL_RINGSIZE - 1);
ring             1106 drivers/tty/serial/atmel_serial.c 	struct circ_buf *ring = &atmel_port->rx_ring;
ring             1139 drivers/tty/serial/atmel_serial.c 	ring->head = sg_dma_len(&atmel_port->sg_rx) - state.residue;
ring             1140 drivers/tty/serial/atmel_serial.c 	BUG_ON(ring->head > sg_dma_len(&atmel_port->sg_rx));
ring             1153 drivers/tty/serial/atmel_serial.c 	if (ring->head < ring->tail) {
ring             1154 drivers/tty/serial/atmel_serial.c 		count = sg_dma_len(&atmel_port->sg_rx) - ring->tail;
ring             1156 drivers/tty/serial/atmel_serial.c 		tty_insert_flip_string(tport, ring->buf + ring->tail, count);
ring             1157 drivers/tty/serial/atmel_serial.c 		ring->tail = 0;
ring             1162 drivers/tty/serial/atmel_serial.c 	if (ring->tail < ring->head) {
ring             1163 drivers/tty/serial/atmel_serial.c 		count = ring->head - ring->tail;
ring             1165 drivers/tty/serial/atmel_serial.c 		tty_insert_flip_string(tport, ring->buf + ring->tail, count);
ring             1167 drivers/tty/serial/atmel_serial.c 		if (ring->head >= sg_dma_len(&atmel_port->sg_rx))
ring             1168 drivers/tty/serial/atmel_serial.c 			ring->head = 0;
ring             1169 drivers/tty/serial/atmel_serial.c 		ring->tail = ring->head;
ring             1197 drivers/tty/serial/atmel_serial.c 	struct circ_buf		*ring;
ring             1200 drivers/tty/serial/atmel_serial.c 	ring = &atmel_port->rx_ring;
ring             1214 drivers/tty/serial/atmel_serial.c 	BUG_ON(!PAGE_ALIGNED(ring->buf));
ring             1216 drivers/tty/serial/atmel_serial.c 		    virt_to_page(ring->buf),
ring             1218 drivers/tty/serial/atmel_serial.c 		    offset_in_page(ring->buf));
ring             1230 drivers/tty/serial/atmel_serial.c 			ring->buf,
ring             1521 drivers/tty/serial/atmel_serial.c 	struct circ_buf *ring = &atmel_port->rx_ring;
ring             1525 drivers/tty/serial/atmel_serial.c 	while (ring->head != ring->tail) {
ring             1531 drivers/tty/serial/atmel_serial.c 		c = ((struct atmel_uart_char *)ring->buf)[ring->tail];
ring             1533 drivers/tty/serial/atmel_serial.c 		ring->tail = (ring->tail + 1) & (ATMEL_SERIAL_RINGSIZE - 1);
ring             1004 drivers/tty/serial/fsl_lpuart.c 	struct circ_buf *ring = &sport->rx_ring;
ring             1084 drivers/tty/serial/fsl_lpuart.c 	ring->head = sport->rx_sgl.length - state.residue;
ring             1085 drivers/tty/serial/fsl_lpuart.c 	BUG_ON(ring->head > sport->rx_sgl.length);
ring             1098 drivers/tty/serial/fsl_lpuart.c 	if (ring->head < ring->tail) {
ring             1099 drivers/tty/serial/fsl_lpuart.c 		count = sport->rx_sgl.length - ring->tail;
ring             1101 drivers/tty/serial/fsl_lpuart.c 		tty_insert_flip_string(port, ring->buf + ring->tail, count);
ring             1102 drivers/tty/serial/fsl_lpuart.c 		ring->tail = 0;
ring             1107 drivers/tty/serial/fsl_lpuart.c 	if (ring->tail < ring->head) {
ring             1108 drivers/tty/serial/fsl_lpuart.c 		count = ring->head - ring->tail;
ring             1109 drivers/tty/serial/fsl_lpuart.c 		tty_insert_flip_string(port, ring->buf + ring->tail, count);
ring             1111 drivers/tty/serial/fsl_lpuart.c 		if (ring->head >= sport->rx_sgl.length)
ring             1112 drivers/tty/serial/fsl_lpuart.c 			ring->head = 0;
ring             1113 drivers/tty/serial/fsl_lpuart.c 		ring->tail = ring->head;
ring             1143 drivers/tty/serial/fsl_lpuart.c 	struct circ_buf *ring = &sport->rx_ring;
ring             1165 drivers/tty/serial/fsl_lpuart.c 	ring->buf = kzalloc(sport->rx_dma_rng_buf_len, GFP_ATOMIC);
ring             1166 drivers/tty/serial/fsl_lpuart.c 	if (!ring->buf)
ring             1169 drivers/tty/serial/fsl_lpuart.c 	sg_init_one(&sport->rx_sgl, ring->buf, sport->rx_dma_rng_buf_len);
ring              111 drivers/usb/cdns3/debug.h 				   struct cdns3_trb *ring, char *str)
ring              149 drivers/usb/cdns3/debug.h 		trb = &ring[i];
ring              370 drivers/usb/cdns3/trace.h 		__dynamic_array(u8, ring, TRB_RING_SIZE)
ring              378 drivers/usb/cdns3/trace.h 		memcpy(__get_dynamic_array(ring), priv_ep->trb_pool,
ring              384 drivers/usb/cdns3/trace.h 				 (struct cdns3_trb *)__get_str(ring),
ring              176 drivers/usb/early/xhci-dbc.c xdbc_alloc_ring(struct xdbc_segment *seg, struct xdbc_ring *ring)
ring              182 drivers/usb/early/xhci-dbc.c 	ring->segment = seg;
ring              187 drivers/usb/early/xhci-dbc.c static void __init xdbc_free_ring(struct xdbc_ring *ring)
ring              189 drivers/usb/early/xhci-dbc.c 	struct xdbc_segment *seg = ring->segment;
ring              195 drivers/usb/early/xhci-dbc.c 	ring->segment = NULL;
ring              198 drivers/usb/early/xhci-dbc.c static void xdbc_reset_ring(struct xdbc_ring *ring)
ring              200 drivers/usb/early/xhci-dbc.c 	struct xdbc_segment *seg = ring->segment;
ring              205 drivers/usb/early/xhci-dbc.c 	ring->enqueue = seg->trbs;
ring              206 drivers/usb/early/xhci-dbc.c 	ring->dequeue = seg->trbs;
ring              207 drivers/usb/early/xhci-dbc.c 	ring->cycle_state = 1;
ring              209 drivers/usb/early/xhci-dbc.c 	if (ring != &xdbc.evt_ring) {
ring              389 drivers/usb/early/xhci-dbc.c xdbc_queue_trb(struct xdbc_ring *ring, u32 field1, u32 field2, u32 field3, u32 field4)
ring              393 drivers/usb/early/xhci-dbc.c 	trb = ring->enqueue;
ring              399 drivers/usb/early/xhci-dbc.c 	++(ring->enqueue);
ring              400 drivers/usb/early/xhci-dbc.c 	if (ring->enqueue >= &ring->segment->trbs[TRBS_PER_SEGMENT - 1]) {
ring              401 drivers/usb/early/xhci-dbc.c 		link_trb = ring->enqueue;
ring              402 drivers/usb/early/xhci-dbc.c 		if (ring->cycle_state)
ring              407 drivers/usb/early/xhci-dbc.c 		ring->enqueue = ring->segment->trbs;
ring              408 drivers/usb/early/xhci-dbc.c 		ring->cycle_state ^= 1;
ring              465 drivers/usb/early/xhci-dbc.c 	struct xdbc_ring *ring;
ring              485 drivers/usb/early/xhci-dbc.c 	ring = (read ? &xdbc.in_ring : &xdbc.out_ring);
ring              486 drivers/usb/early/xhci-dbc.c 	trb = ring->enqueue;
ring              487 drivers/usb/early/xhci-dbc.c 	cycle = ring->cycle_state;
ring              507 drivers/usb/early/xhci-dbc.c 	xdbc_queue_trb(ring, lower_32_bits(addr), upper_32_bits(addr), length, control);
ring              150 drivers/usb/host/u132-hcd.c 	struct u132_ring *ring;
ring              185 drivers/usb/host/u132-hcd.c 	struct u132_ring ring[MAX_U132_RINGS];
ring              300 drivers/usb/host/u132-hcd.c static inline void u132_ring_put_kref(struct u132 *u132, struct u132_ring *ring)
ring              305 drivers/usb/host/u132-hcd.c static void u132_ring_requeue_work(struct u132 *u132, struct u132_ring *ring,
ring              309 drivers/usb/host/u132-hcd.c 		if (queue_delayed_work(workqueue, &ring->scheduler, delta))
ring              311 drivers/usb/host/u132-hcd.c 	} else if (queue_delayed_work(workqueue, &ring->scheduler, 0))
ring              316 drivers/usb/host/u132-hcd.c static void u132_ring_queue_work(struct u132 *u132, struct u132_ring *ring,
ring              320 drivers/usb/host/u132-hcd.c 	u132_ring_requeue_work(u132, ring, delta);
ring              323 drivers/usb/host/u132-hcd.c static void u132_ring_cancel_work(struct u132 *u132, struct u132_ring *ring)
ring              325 drivers/usb/host/u132-hcd.c 	if (cancel_delayed_work(&ring->scheduler))
ring              339 drivers/usb/host/u132-hcd.c 	struct u132_ring *ring = endp->ring;
ring              341 drivers/usb/host/u132-hcd.c 	ring->length -= 1;
ring              342 drivers/usb/host/u132-hcd.c 	if (endp == ring->curr_endp) {
ring              344 drivers/usb/host/u132-hcd.c 			ring->curr_endp = NULL;
ring              349 drivers/usb/host/u132-hcd.c 			ring->curr_endp = next_endp;
ring              510 drivers/usb/host/u132-hcd.c 	struct u132_ring *ring;
ring              532 drivers/usb/host/u132-hcd.c 	ring = endp->ring;
ring              533 drivers/usb/host/u132-hcd.c 	ring->in_use = 0;
ring              534 drivers/usb/host/u132-hcd.c 	u132_ring_cancel_work(u132, ring);
ring              535 drivers/usb/host/u132-hcd.c 	u132_ring_queue_work(u132, ring, 0);
ring              573 drivers/usb/host/u132-hcd.c static inline int edset_input(struct u132 *u132, struct u132_ring *ring,
ring              579 drivers/usb/host/u132-hcd.c 	return usb_ftdi_elan_edset_input(u132->platform_dev, ring->number, endp,
ring              583 drivers/usb/host/u132-hcd.c static inline int edset_setup(struct u132 *u132, struct u132_ring *ring,
ring              589 drivers/usb/host/u132-hcd.c 	return usb_ftdi_elan_edset_setup(u132->platform_dev, ring->number, endp,
ring              593 drivers/usb/host/u132-hcd.c static inline int edset_single(struct u132 *u132, struct u132_ring *ring,
ring              599 drivers/usb/host/u132-hcd.c 	return usb_ftdi_elan_edset_single(u132->platform_dev, ring->number,
ring              603 drivers/usb/host/u132-hcd.c static inline int edset_output(struct u132 *u132, struct u132_ring *ring,
ring              609 drivers/usb/host/u132-hcd.c 	return usb_ftdi_elan_edset_output(u132->platform_dev, ring->number,
ring              645 drivers/usb/host/u132-hcd.c 		struct u132_ring *ring = endp->ring;
ring              662 drivers/usb/host/u132-hcd.c 				retval = edset_single(u132, ring, endp, urb,
ring              669 drivers/usb/host/u132-hcd.c 				ring->in_use = 0;
ring              673 drivers/usb/host/u132-hcd.c 				u132_ring_cancel_work(u132, ring);
ring              674 drivers/usb/host/u132-hcd.c 				u132_ring_queue_work(u132, ring, 0);
ring              744 drivers/usb/host/u132-hcd.c 		struct u132_ring *ring = endp->ring;
ring              750 drivers/usb/host/u132-hcd.c 			retval = edset_output(u132, ring, endp, urb, address,
ring              796 drivers/usb/host/u132-hcd.c 		struct u132_ring *ring = endp->ring;
ring              813 drivers/usb/host/u132-hcd.c 				ring->number, endp, urb, address,
ring              935 drivers/usb/host/u132-hcd.c 		struct u132_ring *ring = endp->ring;
ring              950 drivers/usb/host/u132-hcd.c 				ring->number, endp, urb, address,
ring             1046 drivers/usb/host/u132-hcd.c 			struct u132_ring *ring = endp->ring;
ring             1049 drivers/usb/host/u132-hcd.c 				ring->number, endp, urb, address,
ring             1057 drivers/usb/host/u132-hcd.c 			struct u132_ring *ring = endp->ring;
ring             1060 drivers/usb/host/u132-hcd.c 				ring->number, endp, urb, address,
ring             1143 drivers/usb/host/u132-hcd.c 		struct u132_ring *ring = endp->ring;
ring             1146 drivers/usb/host/u132-hcd.c 			ring->number, endp, urb, 0, endp->usb_endp, 0,
ring             1224 drivers/usb/host/u132-hcd.c 		struct u132_ring *ring = endp->ring;
ring             1235 drivers/usb/host/u132-hcd.c 			ring->number, endp, urb, address, endp->usb_endp, 0x3,
ring             1276 drivers/usb/host/u132-hcd.c 		struct u132_ring *ring = endp->ring;
ring             1279 drivers/usb/host/u132-hcd.c 			ring->number, endp, urb, address, endp->usb_endp, 0,
ring             1299 drivers/usb/host/u132-hcd.c 	struct u132_ring *ring =
ring             1301 drivers/usb/host/u132-hcd.c 	struct u132 *u132 = ring->u132;
ring             1303 drivers/usb/host/u132-hcd.c 	if (ring->in_use) {
ring             1305 drivers/usb/host/u132-hcd.c 		u132_ring_put_kref(u132, ring);
ring             1307 drivers/usb/host/u132-hcd.c 	} else if (ring->curr_endp) {
ring             1308 drivers/usb/host/u132-hcd.c 		struct u132_endp *endp, *last_endp = ring->curr_endp;
ring             1314 drivers/usb/host/u132-hcd.c 				ring->curr_endp = endp;
ring             1318 drivers/usb/host/u132-hcd.c 				u132_ring_put_kref(u132, ring);
ring             1332 drivers/usb/host/u132-hcd.c 			u132_ring_put_kref(u132, ring);
ring             1340 drivers/usb/host/u132-hcd.c 			u132_ring_requeue_work(u132, ring, wakeup);
ring             1345 drivers/usb/host/u132-hcd.c 			u132_ring_put_kref(u132, ring);
ring             1350 drivers/usb/host/u132-hcd.c 		u132_ring_put_kref(u132, ring);
ring             1357 drivers/usb/host/u132-hcd.c 	struct u132_ring *ring;
ring             1362 drivers/usb/host/u132-hcd.c 	ring = endp->ring;
ring             1367 drivers/usb/host/u132-hcd.c 				ring->number, endp);
ring             1375 drivers/usb/host/u132-hcd.c 	} else if (ring->in_use) {
ring             1385 drivers/usb/host/u132-hcd.c 		if (ring->in_use) {
ring             1394 drivers/usb/host/u132-hcd.c 			ring->curr_endp = endp;
ring             1395 drivers/usb/host/u132-hcd.c 			ring->in_use = 1;
ring             1397 drivers/usb/host/u132-hcd.c 			retval = edset_single(u132, ring, endp, urb, address,
ring             1405 drivers/usb/host/u132-hcd.c 		if (ring->in_use) {
ring             1414 drivers/usb/host/u132-hcd.c 			ring->curr_endp = endp;
ring             1415 drivers/usb/host/u132-hcd.c 			ring->in_use = 1;
ring             1417 drivers/usb/host/u132-hcd.c 			retval = edset_setup(u132, ring, endp, urb, address,
ring             1427 drivers/usb/host/u132-hcd.c 			ring->curr_endp = endp;
ring             1428 drivers/usb/host/u132-hcd.c 			ring->in_use = 1;
ring             1430 drivers/usb/host/u132-hcd.c 			retval = edset_setup(u132, ring, endp, urb, 0, 0x2,
ring             1441 drivers/usb/host/u132-hcd.c 			ring->curr_endp = endp;
ring             1442 drivers/usb/host/u132-hcd.c 			ring->in_use = 1;
ring             1444 drivers/usb/host/u132-hcd.c 			retval = edset_setup(u132, ring, endp, urb, address,
ring             1453 drivers/usb/host/u132-hcd.c 			if (ring->in_use) {
ring             1462 drivers/usb/host/u132-hcd.c 				ring->curr_endp = endp;
ring             1463 drivers/usb/host/u132-hcd.c 				ring->in_use = 1;
ring             1465 drivers/usb/host/u132-hcd.c 				retval = edset_input(u132, ring, endp, urb,
ring             1476 drivers/usb/host/u132-hcd.c 			if (ring->in_use) {
ring             1485 drivers/usb/host/u132-hcd.c 				ring->curr_endp = endp;
ring             1486 drivers/usb/host/u132-hcd.c 				ring->in_use = 1;
ring             1488 drivers/usb/host/u132-hcd.c 				retval = edset_output(u132, ring, endp, urb,
ring             1855 drivers/usb/host/u132-hcd.c 	struct u132_ring *ring;
ring             1877 drivers/usb/host/u132-hcd.c 	ring = endp->ring = &u132->ring[0];
ring             1878 drivers/usb/host/u132-hcd.c 	if (ring->curr_endp) {
ring             1879 drivers/usb/host/u132-hcd.c 		list_add_tail(&endp->endp_ring, &ring->curr_endp->endp_ring);
ring             1882 drivers/usb/host/u132-hcd.c 		ring->curr_endp = endp;
ring             1884 drivers/usb/host/u132-hcd.c 	ring->length += 1;
ring             1954 drivers/usb/host/u132-hcd.c 	struct u132_ring *ring;
ring             2002 drivers/usb/host/u132-hcd.c 	ring = endp->ring = &u132->ring[ring_number - 1];
ring             2003 drivers/usb/host/u132-hcd.c 	if (ring->curr_endp) {
ring             2004 drivers/usb/host/u132-hcd.c 		list_add_tail(&endp->endp_ring, &ring->curr_endp->endp_ring);
ring             2007 drivers/usb/host/u132-hcd.c 		ring->curr_endp = endp;
ring             2009 drivers/usb/host/u132-hcd.c 	ring->length += 1;
ring             2050 drivers/usb/host/u132-hcd.c 	struct u132_ring *ring;
ring             2072 drivers/usb/host/u132-hcd.c 	ring = endp->ring = &u132->ring[0];
ring             2073 drivers/usb/host/u132-hcd.c 	if (ring->curr_endp) {
ring             2074 drivers/usb/host/u132-hcd.c 		list_add_tail(&endp->endp_ring, &ring->curr_endp->endp_ring);
ring             2077 drivers/usb/host/u132-hcd.c 		ring->curr_endp = endp;
ring             2079 drivers/usb/host/u132-hcd.c 	ring->length += 1;
ring             2403 drivers/usb/host/u132-hcd.c 		"\n", urb, endp->endp_number, endp, endp->ring->number,
ring             2425 drivers/usb/host/u132-hcd.c 			endp->endp_number, endp, endp->ring->number,
ring             2488 drivers/usb/host/u132-hcd.c 				endp->endp_number, endp, endp->ring->number,
ring             2983 drivers/usb/host/u132-hcd.c 				struct u132_ring *ring = &u132->ring[rings];
ring             2984 drivers/usb/host/u132-hcd.c 				u132_ring_cancel_work(u132, ring);
ring             3017 drivers/usb/host/u132-hcd.c 		struct u132_ring *ring = &u132->ring[rings];
ring             3018 drivers/usb/host/u132-hcd.c 		ring->u132 = u132;
ring             3019 drivers/usb/host/u132-hcd.c 		ring->number = rings + 1;
ring             3020 drivers/usb/host/u132-hcd.c 		ring->length = 0;
ring             3021 drivers/usb/host/u132-hcd.c 		ring->curr_endp = NULL;
ring             3022 drivers/usb/host/u132-hcd.c 		INIT_DELAYED_WORK(&ring->scheduler,
ring              217 drivers/usb/host/xhci-dbgcap.c xhci_dbc_queue_trb(struct xhci_ring *ring, u32 field1,
ring              222 drivers/usb/host/xhci-dbgcap.c 	trb = ring->enqueue;
ring              228 drivers/usb/host/xhci-dbgcap.c 	trace_xhci_dbc_gadget_ep_queue(ring, &trb->generic);
ring              230 drivers/usb/host/xhci-dbgcap.c 	ring->num_trbs_free--;
ring              231 drivers/usb/host/xhci-dbgcap.c 	next = ++(ring->enqueue);
ring              234 drivers/usb/host/xhci-dbgcap.c 		ring->enqueue = ring->enq_seg->trbs;
ring              235 drivers/usb/host/xhci-dbgcap.c 		ring->cycle_state ^= 1;
ring              246 drivers/usb/host/xhci-dbgcap.c 	struct xhci_ring	*ring = dep->ring;
ring              251 drivers/usb/host/xhci-dbgcap.c 	if (ring->num_trbs_free < num_trbs)
ring              255 drivers/usb/host/xhci-dbgcap.c 	trb	= ring->enqueue;
ring              256 drivers/usb/host/xhci-dbgcap.c 	cycle	= ring->cycle_state;
ring              265 drivers/usb/host/xhci-dbgcap.c 	req->trb = ring->enqueue;
ring              266 drivers/usb/host/xhci-dbgcap.c 	req->trb_dma = xhci_trb_virt_to_dma(ring->enq_seg, ring->enqueue);
ring              267 drivers/usb/host/xhci-dbgcap.c 	xhci_dbc_queue_trb(ring,
ring              355 drivers/usb/host/xhci-dbgcap.c 	dep->ring		= direction ? dbc->ring_in : dbc->ring_out;
ring              593 drivers/usb/host/xhci-dbgcap.c 	struct xhci_ring	*ring;
ring              605 drivers/usb/host/xhci-dbgcap.c 	ring		= dep->ring;
ring              640 drivers/usb/host/xhci-dbgcap.c 	trace_xhci_dbc_handle_transfer(ring, &req->trb->generic);
ring              642 drivers/usb/host/xhci-dbgcap.c 	ring->num_trbs_free++;
ring              107 drivers/usb/host/xhci-dbgcap.h 	struct xhci_ring		*ring;
ring              166 drivers/usb/host/xhci-debugfs.c 	struct xhci_ring	*ring = *(struct xhci_ring **)s->private;
ring              168 drivers/usb/host/xhci-debugfs.c 	dma = xhci_trb_virt_to_dma(ring->enq_seg, ring->enqueue);
ring              177 drivers/usb/host/xhci-debugfs.c 	struct xhci_ring	*ring = *(struct xhci_ring **)s->private;
ring              179 drivers/usb/host/xhci-debugfs.c 	dma = xhci_trb_virt_to_dma(ring->deq_seg, ring->dequeue);
ring              187 drivers/usb/host/xhci-debugfs.c 	struct xhci_ring	*ring = *(struct xhci_ring **)s->private;
ring              189 drivers/usb/host/xhci-debugfs.c 	seq_printf(s, "%d\n", ring->cycle_state);
ring              215 drivers/usb/host/xhci-debugfs.c 	struct xhci_ring	*ring = *(struct xhci_ring **)s->private;
ring              216 drivers/usb/host/xhci-debugfs.c 	struct xhci_segment	*seg = ring->first_seg;
ring              218 drivers/usb/host/xhci-debugfs.c 	for (i = 0; i < ring->num_segs; i++) {
ring              411 drivers/usb/host/xhci-debugfs.c 						   struct xhci_ring **ring,
ring              419 drivers/usb/host/xhci-debugfs.c 				  ring, dir, &xhci_ring_fops);
ring              455 drivers/usb/host/xhci-debugfs.c 						   &dev->eps[ep_index].ring,
ring              491 drivers/usb/host/xhci-debugfs.c 	xhci_debugfs_create_ring_dir(xhci, &dev->eps[0].ring,
ring              409 drivers/usb/host/xhci-hub.c 		if (virt_dev->eps[i].ring && virt_dev->eps[i].ring->dequeue) {
ring              472 drivers/usb/host/xhci-hub.c 		} else if (ep->ring && ep->ring->dequeue) {
ring              129 drivers/usb/host/xhci-mem.c static void xhci_link_rings(struct xhci_hcd *xhci, struct xhci_ring *ring,
ring              135 drivers/usb/host/xhci-mem.c 	if (!ring || !first || !last)
ring              138 drivers/usb/host/xhci-mem.c 	next = ring->enq_seg->next;
ring              139 drivers/usb/host/xhci-mem.c 	xhci_link_segments(xhci, ring->enq_seg, first, ring->type);
ring              140 drivers/usb/host/xhci-mem.c 	xhci_link_segments(xhci, last, next, ring->type);
ring              141 drivers/usb/host/xhci-mem.c 	ring->num_segs += num_segs;
ring              142 drivers/usb/host/xhci-mem.c 	ring->num_trbs_free += (TRBS_PER_SEGMENT - 1) * num_segs;
ring              144 drivers/usb/host/xhci-mem.c 	if (ring->type != TYPE_EVENT && ring->enq_seg == ring->last_seg) {
ring              145 drivers/usb/host/xhci-mem.c 		ring->last_seg->trbs[TRBS_PER_SEGMENT-1].link.control
ring              149 drivers/usb/host/xhci-mem.c 		ring->last_seg = last;
ring              185 drivers/usb/host/xhci-mem.c 		struct xhci_ring *ring,
ring              201 drivers/usb/host/xhci-mem.c 			key, ring);
ring              218 drivers/usb/host/xhci-mem.c 		struct xhci_ring *ring,
ring              233 drivers/usb/host/xhci-mem.c 				ring, seg, mem_flags);
ring              256 drivers/usb/host/xhci-mem.c static void xhci_remove_stream_mapping(struct xhci_ring *ring)
ring              260 drivers/usb/host/xhci-mem.c 	if (WARN_ON_ONCE(ring->trb_address_map == NULL))
ring              263 drivers/usb/host/xhci-mem.c 	seg = ring->first_seg;
ring              265 drivers/usb/host/xhci-mem.c 		xhci_remove_segment_mapping(ring->trb_address_map, seg);
ring              267 drivers/usb/host/xhci-mem.c 	} while (seg != ring->first_seg);
ring              270 drivers/usb/host/xhci-mem.c static int xhci_update_stream_mapping(struct xhci_ring *ring, gfp_t mem_flags)
ring              272 drivers/usb/host/xhci-mem.c 	return xhci_update_stream_segment_mapping(ring->trb_address_map, ring,
ring              273 drivers/usb/host/xhci-mem.c 			ring->first_seg, ring->last_seg, mem_flags);
ring              277 drivers/usb/host/xhci-mem.c void xhci_ring_free(struct xhci_hcd *xhci, struct xhci_ring *ring)
ring              279 drivers/usb/host/xhci-mem.c 	if (!ring)
ring              282 drivers/usb/host/xhci-mem.c 	trace_xhci_ring_free(ring);
ring              284 drivers/usb/host/xhci-mem.c 	if (ring->first_seg) {
ring              285 drivers/usb/host/xhci-mem.c 		if (ring->type == TYPE_STREAM)
ring              286 drivers/usb/host/xhci-mem.c 			xhci_remove_stream_mapping(ring);
ring              287 drivers/usb/host/xhci-mem.c 		xhci_free_segments_for_ring(xhci, ring->first_seg);
ring              290 drivers/usb/host/xhci-mem.c 	kfree(ring);
ring              293 drivers/usb/host/xhci-mem.c static void xhci_initialize_ring_info(struct xhci_ring *ring,
ring              297 drivers/usb/host/xhci-mem.c 	ring->enqueue = ring->first_seg->trbs;
ring              298 drivers/usb/host/xhci-mem.c 	ring->enq_seg = ring->first_seg;
ring              299 drivers/usb/host/xhci-mem.c 	ring->dequeue = ring->enqueue;
ring              300 drivers/usb/host/xhci-mem.c 	ring->deq_seg = ring->first_seg;
ring              308 drivers/usb/host/xhci-mem.c 	ring->cycle_state = cycle_state;
ring              314 drivers/usb/host/xhci-mem.c 	ring->num_trbs_free = ring->num_segs * (TRBS_PER_SEGMENT - 1) - 1;
ring              366 drivers/usb/host/xhci-mem.c 	struct xhci_ring	*ring;
ring              370 drivers/usb/host/xhci-mem.c 	ring = kzalloc_node(sizeof(*ring), flags, dev_to_node(dev));
ring              371 drivers/usb/host/xhci-mem.c 	if (!ring)
ring              374 drivers/usb/host/xhci-mem.c 	ring->num_segs = num_segs;
ring              375 drivers/usb/host/xhci-mem.c 	ring->bounce_buf_len = max_packet;
ring              376 drivers/usb/host/xhci-mem.c 	INIT_LIST_HEAD(&ring->td_list);
ring              377 drivers/usb/host/xhci-mem.c 	ring->type = type;
ring              379 drivers/usb/host/xhci-mem.c 		return ring;
ring              381 drivers/usb/host/xhci-mem.c 	ret = xhci_alloc_segments_for_ring(xhci, &ring->first_seg,
ring              382 drivers/usb/host/xhci-mem.c 			&ring->last_seg, num_segs, cycle_state, type,
ring              390 drivers/usb/host/xhci-mem.c 		ring->last_seg->trbs[TRBS_PER_SEGMENT - 1].link.control |=
ring              393 drivers/usb/host/xhci-mem.c 	xhci_initialize_ring_info(ring, cycle_state);
ring              394 drivers/usb/host/xhci-mem.c 	trace_xhci_ring_alloc(ring);
ring              395 drivers/usb/host/xhci-mem.c 	return ring;
ring              398 drivers/usb/host/xhci-mem.c 	kfree(ring);
ring              406 drivers/usb/host/xhci-mem.c 	xhci_ring_free(xhci, virt_dev->eps[ep_index].ring);
ring              407 drivers/usb/host/xhci-mem.c 	virt_dev->eps[ep_index].ring = NULL;
ring              414 drivers/usb/host/xhci-mem.c int xhci_ring_expansion(struct xhci_hcd *xhci, struct xhci_ring *ring,
ring              427 drivers/usb/host/xhci-mem.c 	num_segs = ring->num_segs > num_segs_needed ?
ring              428 drivers/usb/host/xhci-mem.c 			ring->num_segs : num_segs_needed;
ring              431 drivers/usb/host/xhci-mem.c 			num_segs, ring->cycle_state, ring->type,
ring              432 drivers/usb/host/xhci-mem.c 			ring->bounce_buf_len, flags);
ring              436 drivers/usb/host/xhci-mem.c 	if (ring->type == TYPE_STREAM)
ring              437 drivers/usb/host/xhci-mem.c 		ret = xhci_update_stream_segment_mapping(ring->trb_address_map,
ring              438 drivers/usb/host/xhci-mem.c 						ring, first, last, flags);
ring              451 drivers/usb/host/xhci-mem.c 	xhci_link_rings(xhci, ring, first, last, num_segs);
ring              452 drivers/usb/host/xhci-mem.c 	trace_xhci_ring_expansion(ring);
ring              455 drivers/usb/host/xhci-mem.c 			ring->num_segs);
ring              583 drivers/usb/host/xhci-mem.c 	return ep->ring;
ring              594 drivers/usb/host/xhci-mem.c 		return ep->ring;
ring              754 drivers/usb/host/xhci-mem.c 	addr = xhci_trb_virt_to_dma(ep->ring->deq_seg, ep->ring->dequeue);
ring              755 drivers/usb/host/xhci-mem.c 	ep_ctx->deq  = cpu_to_le64(addr | ep->ring->cycle_state);
ring              899 drivers/usb/host/xhci-mem.c 		if (dev->eps[i].ring)
ring              900 drivers/usb/host/xhci-mem.c 			xhci_ring_free(xhci, dev->eps[i].ring);
ring             1012 drivers/usb/host/xhci-mem.c 	dev->eps[0].ring = xhci_ring_alloc(xhci, 2, 1, TYPE_CTRL, 0, flags);
ring             1013 drivers/usb/host/xhci-mem.c 	if (!dev->eps[0].ring)
ring             1050 drivers/usb/host/xhci-mem.c 	ep_ring = virt_dev->eps[0].ring;
ring             1205 drivers/usb/host/xhci-mem.c 	ep0_ctx->deq = cpu_to_le64(dev->eps[0].ring->first_seg->dma |
ring             1206 drivers/usb/host/xhci-mem.c 				   dev->eps[0].ring->cycle_state);
ring               95 drivers/usb/host/xhci-ring.c static bool last_trb_on_ring(struct xhci_ring *ring,
ring               98 drivers/usb/host/xhci-ring.c 	return last_trb_on_seg(seg, trb) && (seg->next == ring->first_seg);
ring              140 drivers/usb/host/xhci-ring.c 		struct xhci_ring *ring,
ring              156 drivers/usb/host/xhci-ring.c void inc_deq(struct xhci_hcd *xhci, struct xhci_ring *ring)
ring              159 drivers/usb/host/xhci-ring.c 	if (ring->type == TYPE_EVENT) {
ring              160 drivers/usb/host/xhci-ring.c 		if (!last_trb_on_seg(ring->deq_seg, ring->dequeue)) {
ring              161 drivers/usb/host/xhci-ring.c 			ring->dequeue++;
ring              164 drivers/usb/host/xhci-ring.c 		if (last_trb_on_ring(ring, ring->deq_seg, ring->dequeue))
ring              165 drivers/usb/host/xhci-ring.c 			ring->cycle_state ^= 1;
ring              166 drivers/usb/host/xhci-ring.c 		ring->deq_seg = ring->deq_seg->next;
ring              167 drivers/usb/host/xhci-ring.c 		ring->dequeue = ring->deq_seg->trbs;
ring              172 drivers/usb/host/xhci-ring.c 	if (!trb_is_link(ring->dequeue)) {
ring              173 drivers/usb/host/xhci-ring.c 		ring->dequeue++;
ring              174 drivers/usb/host/xhci-ring.c 		ring->num_trbs_free++;
ring              176 drivers/usb/host/xhci-ring.c 	while (trb_is_link(ring->dequeue)) {
ring              177 drivers/usb/host/xhci-ring.c 		ring->deq_seg = ring->deq_seg->next;
ring              178 drivers/usb/host/xhci-ring.c 		ring->dequeue = ring->deq_seg->trbs;
ring              182 drivers/usb/host/xhci-ring.c 	trace_xhci_inc_deq(ring);
ring              204 drivers/usb/host/xhci-ring.c static void inc_enq(struct xhci_hcd *xhci, struct xhci_ring *ring,
ring              210 drivers/usb/host/xhci-ring.c 	chain = le32_to_cpu(ring->enqueue->generic.field[3]) & TRB_CHAIN;
ring              212 drivers/usb/host/xhci-ring.c 	if (!trb_is_link(ring->enqueue))
ring              213 drivers/usb/host/xhci-ring.c 		ring->num_trbs_free--;
ring              214 drivers/usb/host/xhci-ring.c 	next = ++(ring->enqueue);
ring              233 drivers/usb/host/xhci-ring.c 		if (!(ring->type == TYPE_ISOC &&
ring              245 drivers/usb/host/xhci-ring.c 			ring->cycle_state ^= 1;
ring              247 drivers/usb/host/xhci-ring.c 		ring->enq_seg = ring->enq_seg->next;
ring              248 drivers/usb/host/xhci-ring.c 		ring->enqueue = ring->enq_seg->trbs;
ring              249 drivers/usb/host/xhci-ring.c 		next = ring->enqueue;
ring              252 drivers/usb/host/xhci-ring.c 	trace_xhci_inc_enq(ring);
ring              259 drivers/usb/host/xhci-ring.c static inline int room_on_ring(struct xhci_hcd *xhci, struct xhci_ring *ring,
ring              264 drivers/usb/host/xhci-ring.c 	if (ring->num_trbs_free < num_trbs)
ring              267 drivers/usb/host/xhci-ring.c 	if (ring->type != TYPE_COMMAND && ring->type != TYPE_EVENT) {
ring              268 drivers/usb/host/xhci-ring.c 		num_trbs_in_deq_seg = ring->dequeue - ring->deq_seg->trbs;
ring              269 drivers/usb/host/xhci-ring.c 		if (ring->num_trbs_free < num_trbs + num_trbs_in_deq_seg)
ring              422 drivers/usb/host/xhci-ring.c 		if (ep->ring && !(list_empty(&ep->ring->td_list)))
ring              456 drivers/usb/host/xhci-ring.c 		return ep->ring;
ring              594 drivers/usb/host/xhci-ring.c 		if (new_deq == ep->ring->dequeue) {
ring              679 drivers/usb/host/xhci-ring.c 		struct xhci_ring *ring, struct xhci_td *td)
ring              686 drivers/usb/host/xhci-ring.c 	if (!ring || !seg || !urb)
ring              690 drivers/usb/host/xhci-ring.c 		dma_unmap_single(dev, seg->bounce_dma, ring->bounce_buf_len,
ring              695 drivers/usb/host/xhci-ring.c 	dma_unmap_single(dev, seg->bounce_dma, ring->bounce_buf_len,
ring              853 drivers/usb/host/xhci-ring.c static void xhci_kill_ring_urbs(struct xhci_hcd *xhci, struct xhci_ring *ring)
ring              858 drivers/usb/host/xhci-ring.c 	list_for_each_entry_safe(cur_td, tmp, &ring->td_list, td_list) {
ring              864 drivers/usb/host/xhci-ring.c 		xhci_unmap_td_bounce_buffer(xhci, ring, cur_td);
ring              878 drivers/usb/host/xhci-ring.c 	struct xhci_ring *ring;
ring              887 drivers/usb/host/xhci-ring.c 			ring = ep->stream_info->stream_rings[stream_id];
ring              888 drivers/usb/host/xhci-ring.c 			if (!ring)
ring              894 drivers/usb/host/xhci-ring.c 			xhci_kill_ring_urbs(xhci, ring);
ring              897 drivers/usb/host/xhci-ring.c 		ring = ep->ring;
ring              898 drivers/usb/host/xhci-ring.c 		if (!ring)
ring              903 drivers/usb/host/xhci-ring.c 		xhci_kill_ring_urbs(xhci, ring);
ring             2016 drivers/usb/host/xhci-ring.c static int sum_trb_lengths(struct xhci_hcd *xhci, struct xhci_ring *ring,
ring             2020 drivers/usb/host/xhci-ring.c 	union xhci_trb *trb = ring->dequeue;
ring             2021 drivers/usb/host/xhci-ring.c 	struct xhci_segment *seg = ring->deq_seg;
ring             2023 drivers/usb/host/xhci-ring.c 	for (sum = 0; trb != stop_trb; next_trb(xhci, ring, &seg, &trb)) {
ring             2911 drivers/usb/host/xhci-ring.c static void queue_trb(struct xhci_hcd *xhci, struct xhci_ring *ring,
ring             2917 drivers/usb/host/xhci-ring.c 	trb = &ring->enqueue->generic;
ring             2923 drivers/usb/host/xhci-ring.c 	trace_xhci_queue_trb(ring, trb);
ring             2925 drivers/usb/host/xhci-ring.c 	inc_enq(xhci, ring, more_trbs_coming);
ring             3295 drivers/usb/host/xhci-ring.c 	struct xhci_ring *ring;
ring             3310 drivers/usb/host/xhci-ring.c 	ring = xhci_urb_to_transfer_ring(xhci, urb);
ring             3311 drivers/usb/host/xhci-ring.c 	if (!ring)
ring             3346 drivers/usb/host/xhci-ring.c 	start_trb = &ring->enqueue->generic;
ring             3347 drivers/usb/host/xhci-ring.c 	start_cycle = ring->cycle_state;
ring             3368 drivers/usb/host/xhci-ring.c 			field |= ring->cycle_state;
ring             3375 drivers/usb/host/xhci-ring.c 			if (trb_is_link(ring->enqueue + 1)) {
ring             3378 drivers/usb/host/xhci-ring.c 						  ring->enq_seg)) {
ring             3379 drivers/usb/host/xhci-ring.c 					send_addr = ring->enq_seg->bounce_dma;
ring             3381 drivers/usb/host/xhci-ring.c 					td->bounce_seg = ring->enq_seg;
ring             3389 drivers/usb/host/xhci-ring.c 			td->last_trb = ring->enqueue;
ring             3411 drivers/usb/host/xhci-ring.c 		queue_trb(xhci, ring, more_trbs_coming | need_zero_pkt,
ring             3439 drivers/usb/host/xhci-ring.c 		urb_priv->td[1].last_trb = ring->enqueue;
ring             3440 drivers/usb/host/xhci-ring.c 		field = TRB_TYPE(TRB_NORMAL) | ring->cycle_state | TRB_IOC;
ring             3441 drivers/usb/host/xhci-ring.c 		queue_trb(xhci, ring, 0, 0, 0, TRB_INTR_TARGET(0), field);
ring             3747 drivers/usb/host/xhci-ring.c 	ep_ring = xhci->devs[slot_id]->eps[ep_index].ring;
ring             3937 drivers/usb/host/xhci-ring.c 	ep_ring = xdev->eps[ep_index].ring;
ring              117 drivers/usb/host/xhci-trace.h 	TP_PROTO(struct xhci_ring *ring, struct xhci_generic_trb *trb),
ring              118 drivers/usb/host/xhci-trace.h 	TP_ARGS(ring, trb),
ring              127 drivers/usb/host/xhci-trace.h 		__entry->type = ring->type;
ring              140 drivers/usb/host/xhci-trace.h 	TP_PROTO(struct xhci_ring *ring, struct xhci_generic_trb *trb),
ring              141 drivers/usb/host/xhci-trace.h 	TP_ARGS(ring, trb)
ring              145 drivers/usb/host/xhci-trace.h 	TP_PROTO(struct xhci_ring *ring, struct xhci_generic_trb *trb),
ring              146 drivers/usb/host/xhci-trace.h 	TP_ARGS(ring, trb)
ring              150 drivers/usb/host/xhci-trace.h 	TP_PROTO(struct xhci_ring *ring, struct xhci_generic_trb *trb),
ring              151 drivers/usb/host/xhci-trace.h 	TP_ARGS(ring, trb)
ring              155 drivers/usb/host/xhci-trace.h 	TP_PROTO(struct xhci_ring *ring, struct xhci_generic_trb *trb),
ring              156 drivers/usb/host/xhci-trace.h 	TP_ARGS(ring, trb)
ring              160 drivers/usb/host/xhci-trace.h 	TP_PROTO(struct xhci_ring *ring, struct xhci_generic_trb *trb),
ring              161 drivers/usb/host/xhci-trace.h 	TP_ARGS(ring, trb)
ring              165 drivers/usb/host/xhci-trace.h 	TP_PROTO(struct xhci_ring *ring, struct xhci_generic_trb *trb),
ring              166 drivers/usb/host/xhci-trace.h 	TP_ARGS(ring, trb)
ring              170 drivers/usb/host/xhci-trace.h 	TP_PROTO(struct xhci_ring *ring, struct xhci_generic_trb *trb),
ring              171 drivers/usb/host/xhci-trace.h 	TP_ARGS(ring, trb)
ring              455 drivers/usb/host/xhci-trace.h 	TP_PROTO(struct xhci_ring *ring),
ring              456 drivers/usb/host/xhci-trace.h 	TP_ARGS(ring),
ring              459 drivers/usb/host/xhci-trace.h 		__field(void *, ring)
ring              471 drivers/usb/host/xhci-trace.h 		__entry->ring = ring;
ring              472 drivers/usb/host/xhci-trace.h 		__entry->type = ring->type;
ring              473 drivers/usb/host/xhci-trace.h 		__entry->num_segs = ring->num_segs;
ring              474 drivers/usb/host/xhci-trace.h 		__entry->stream_id = ring->stream_id;
ring              475 drivers/usb/host/xhci-trace.h 		__entry->enq_seg = ring->enq_seg->dma;
ring              476 drivers/usb/host/xhci-trace.h 		__entry->deq_seg = ring->deq_seg->dma;
ring              477 drivers/usb/host/xhci-trace.h 		__entry->cycle_state = ring->cycle_state;
ring              478 drivers/usb/host/xhci-trace.h 		__entry->num_trbs_free = ring->num_trbs_free;
ring              479 drivers/usb/host/xhci-trace.h 		__entry->bounce_buf_len = ring->bounce_buf_len;
ring              480 drivers/usb/host/xhci-trace.h 		__entry->enq = xhci_trb_virt_to_dma(ring->enq_seg, ring->enqueue);
ring              481 drivers/usb/host/xhci-trace.h 		__entry->deq = xhci_trb_virt_to_dma(ring->deq_seg, ring->dequeue);
ring              484 drivers/usb/host/xhci-trace.h 			xhci_ring_type_string(__entry->type), __entry->ring,
ring              496 drivers/usb/host/xhci-trace.h 	TP_PROTO(struct xhci_ring *ring),
ring              497 drivers/usb/host/xhci-trace.h 	TP_ARGS(ring)
ring              501 drivers/usb/host/xhci-trace.h 	TP_PROTO(struct xhci_ring *ring),
ring              502 drivers/usb/host/xhci-trace.h 	TP_ARGS(ring)
ring              506 drivers/usb/host/xhci-trace.h 	TP_PROTO(struct xhci_ring *ring),
ring              507 drivers/usb/host/xhci-trace.h 	TP_ARGS(ring)
ring              511 drivers/usb/host/xhci-trace.h 	TP_PROTO(struct xhci_ring *ring),
ring              512 drivers/usb/host/xhci-trace.h 	TP_ARGS(ring)
ring              516 drivers/usb/host/xhci-trace.h 	TP_PROTO(struct xhci_ring *ring),
ring              517 drivers/usb/host/xhci-trace.h 	TP_ARGS(ring)
ring               41 drivers/usb/host/xhci.c static bool td_on_ring(struct xhci_td *td, struct xhci_ring *ring)
ring               43 drivers/usb/host/xhci.c 	struct xhci_segment *seg = ring->first_seg;
ring               51 drivers/usb/host/xhci.c 	} while (seg && seg != ring->first_seg);
ring              850 drivers/usb/host/xhci.c 	struct xhci_ring *ring;
ring              853 drivers/usb/host/xhci.c 	ring = xhci->cmd_ring;
ring              854 drivers/usb/host/xhci.c 	seg = ring->deq_seg;
ring              861 drivers/usb/host/xhci.c 	} while (seg != ring->deq_seg);
ring              864 drivers/usb/host/xhci.c 	ring->deq_seg = ring->first_seg;
ring              865 drivers/usb/host/xhci.c 	ring->dequeue = ring->first_seg->trbs;
ring              866 drivers/usb/host/xhci.c 	ring->enq_seg = ring->deq_seg;
ring              867 drivers/usb/host/xhci.c 	ring->enqueue = ring->dequeue;
ring              869 drivers/usb/host/xhci.c 	ring->num_trbs_free = ring->num_segs * (TRBS_PER_SEGMENT - 1) - 1;
ring              874 drivers/usb/host/xhci.c 	ring->cycle_state = 1;
ring             1776 drivers/usb/host/xhci.c 		if (xhci->devs[udev->slot_id]->eps[ep_index].ring != NULL)
ring             1863 drivers/usb/host/xhci.c 	if (virt_dev->eps[ep_index].ring &&
ring             2912 drivers/usb/host/xhci.c 		if ((virt_dev->eps[i-1].ring && !(ctrl_ctx->drop_flags & le32))
ring             2945 drivers/usb/host/xhci.c 		if (virt_dev->eps[i].ring) {
ring             2949 drivers/usb/host/xhci.c 		virt_dev->eps[i].ring = virt_dev->eps[i].new_ring;
ring             3193 drivers/usb/host/xhci.c 	if (!list_empty(&ep->ring->td_list)) {
ring             3275 drivers/usb/host/xhci.c 	if (!list_empty(&xhci->devs[slot_id]->eps[ep_index].ring->td_list)) {
ring             3662 drivers/usb/host/xhci.c 		if (virt_dev->eps[i].ring) {
ring             3828 drivers/usb/host/xhci.c 		if (ep->ring) {
ring              921 drivers/usb/host/xhci.h 	struct xhci_ring		*ring;
ring             2010 drivers/usb/host/xhci.h void xhci_ring_free(struct xhci_hcd *xhci, struct xhci_ring *ring);
ring             2011 drivers/usb/host/xhci.h int xhci_ring_expansion(struct xhci_hcd *xhci, struct xhci_ring *ring,
ring             2131 drivers/usb/host/xhci.h void inc_deq(struct xhci_hcd *xhci, struct xhci_ring *ring);
ring              204 drivers/usb/mtu3/mtu3_debugfs.c 	struct mtu3_gpd_ring *ring;
ring              207 drivers/usb/mtu3/mtu3_debugfs.c 	ring = &mep->gpd_ring;
ring              211 drivers/usb/mtu3/mtu3_debugfs.c 		   &ring->dma, ring->start, ring->end,
ring              212 drivers/usb/mtu3/mtu3_debugfs.c 		   ring->enqueue, ring->dequeue);
ring              222 drivers/usb/mtu3/mtu3_debugfs.c 	struct mtu3_gpd_ring *ring;
ring              229 drivers/usb/mtu3/mtu3_debugfs.c 	ring = &mep->gpd_ring;
ring              230 drivers/usb/mtu3/mtu3_debugfs.c 	gpd = ring->start;
ring              237 drivers/usb/mtu3/mtu3_debugfs.c 		dma = ring->dma + i * sizeof(*gpd);
ring              119 drivers/usb/mtu3/mtu3_qmu.c static struct qmu_gpd *gpd_dma_to_virt(struct mtu3_gpd_ring *ring,
ring              122 drivers/usb/mtu3/mtu3_qmu.c 	dma_addr_t dma_base = ring->dma;
ring              123 drivers/usb/mtu3/mtu3_qmu.c 	struct qmu_gpd *gpd_head = ring->start;
ring              132 drivers/usb/mtu3/mtu3_qmu.c static dma_addr_t gpd_virt_to_dma(struct mtu3_gpd_ring *ring,
ring              135 drivers/usb/mtu3/mtu3_qmu.c 	dma_addr_t dma_base = ring->dma;
ring              136 drivers/usb/mtu3/mtu3_qmu.c 	struct qmu_gpd *gpd_head = ring->start;
ring              146 drivers/usb/mtu3/mtu3_qmu.c static void gpd_ring_init(struct mtu3_gpd_ring *ring, struct qmu_gpd *gpd)
ring              148 drivers/usb/mtu3/mtu3_qmu.c 	ring->start = gpd;
ring              149 drivers/usb/mtu3/mtu3_qmu.c 	ring->enqueue = gpd;
ring              150 drivers/usb/mtu3/mtu3_qmu.c 	ring->dequeue = gpd;
ring              151 drivers/usb/mtu3/mtu3_qmu.c 	ring->end = gpd + MAX_GPD_NUM - 1;
ring              156 drivers/usb/mtu3/mtu3_qmu.c 	struct mtu3_gpd_ring *ring = &mep->gpd_ring;
ring              157 drivers/usb/mtu3/mtu3_qmu.c 	struct qmu_gpd *gpd = ring->start;
ring              161 drivers/usb/mtu3/mtu3_qmu.c 		gpd_ring_init(ring, gpd);
ring              168 drivers/usb/mtu3/mtu3_qmu.c 	struct mtu3_gpd_ring *ring = &mep->gpd_ring;
ring              171 drivers/usb/mtu3/mtu3_qmu.c 	gpd = dma_pool_zalloc(mep->mtu->qmu_gpd_pool, GFP_ATOMIC, &ring->dma);
ring              175 drivers/usb/mtu3/mtu3_qmu.c 	gpd_ring_init(ring, gpd);
ring              182 drivers/usb/mtu3/mtu3_qmu.c 	struct mtu3_gpd_ring *ring = &mep->gpd_ring;
ring              185 drivers/usb/mtu3/mtu3_qmu.c 			ring->start, ring->dma);
ring              186 drivers/usb/mtu3/mtu3_qmu.c 	memset(ring, 0, sizeof(*ring));
ring              203 drivers/usb/mtu3/mtu3_qmu.c static struct qmu_gpd *advance_enq_gpd(struct mtu3_gpd_ring *ring)
ring              205 drivers/usb/mtu3/mtu3_qmu.c 	if (ring->enqueue < ring->end)
ring              206 drivers/usb/mtu3/mtu3_qmu.c 		ring->enqueue++;
ring              208 drivers/usb/mtu3/mtu3_qmu.c 		ring->enqueue = ring->start;
ring              210 drivers/usb/mtu3/mtu3_qmu.c 	return ring->enqueue;
ring              213 drivers/usb/mtu3/mtu3_qmu.c static struct qmu_gpd *advance_deq_gpd(struct mtu3_gpd_ring *ring)
ring              215 drivers/usb/mtu3/mtu3_qmu.c 	if (ring->dequeue < ring->end)
ring              216 drivers/usb/mtu3/mtu3_qmu.c 		ring->dequeue++;
ring              218 drivers/usb/mtu3/mtu3_qmu.c 		ring->dequeue = ring->start;
ring              220 drivers/usb/mtu3/mtu3_qmu.c 	return ring->dequeue;
ring              224 drivers/usb/mtu3/mtu3_qmu.c static int gpd_ring_empty(struct mtu3_gpd_ring *ring)
ring              226 drivers/usb/mtu3/mtu3_qmu.c 	struct qmu_gpd *enq = ring->enqueue;
ring              229 drivers/usb/mtu3/mtu3_qmu.c 	if (ring->enqueue < ring->end)
ring              232 drivers/usb/mtu3/mtu3_qmu.c 		next = ring->start;
ring              235 drivers/usb/mtu3/mtu3_qmu.c 	return next == ring->dequeue;
ring              246 drivers/usb/mtu3/mtu3_qmu.c 	struct mtu3_gpd_ring *ring = &mep->gpd_ring;
ring              247 drivers/usb/mtu3/mtu3_qmu.c 	struct qmu_gpd *gpd = ring->enqueue;
ring              259 drivers/usb/mtu3/mtu3_qmu.c 	enq = advance_enq_gpd(ring);
ring              260 drivers/usb/mtu3/mtu3_qmu.c 	enq_dma = gpd_virt_to_dma(ring, enq);
ring              287 drivers/usb/mtu3/mtu3_qmu.c 	struct mtu3_gpd_ring *ring = &mep->gpd_ring;
ring              288 drivers/usb/mtu3/mtu3_qmu.c 	struct qmu_gpd *gpd = ring->enqueue;
ring              300 drivers/usb/mtu3/mtu3_qmu.c 	enq = advance_enq_gpd(ring);
ring              301 drivers/usb/mtu3/mtu3_qmu.c 	enq_dma = gpd_virt_to_dma(ring, enq);
ring              330 drivers/usb/mtu3/mtu3_qmu.c 	struct mtu3_gpd_ring *ring = &mep->gpd_ring;
ring              335 drivers/usb/mtu3/mtu3_qmu.c 		write_txq_start_addr(mbase, epnum, ring->dma);
ring              349 drivers/usb/mtu3/mtu3_qmu.c 		write_rxq_start_addr(mbase, epnum, ring->dma);
ring              416 drivers/usb/mtu3/mtu3_qmu.c 	struct mtu3_gpd_ring *ring = &mep->gpd_ring;
ring              429 drivers/usb/mtu3/mtu3_qmu.c 	gpd_current = gpd_dma_to_virt(ring, cur_gpd_dma);
ring              467 drivers/usb/mtu3/mtu3_qmu.c 	struct mtu3_gpd_ring *ring = &mep->gpd_ring;
ring              469 drivers/usb/mtu3/mtu3_qmu.c 	struct qmu_gpd *gpd = ring->dequeue;
ring              477 drivers/usb/mtu3/mtu3_qmu.c 	gpd_current = gpd_dma_to_virt(ring, cur_gpd_dma);
ring              480 drivers/usb/mtu3/mtu3_qmu.c 		__func__, epnum, gpd, gpd_current, ring->enqueue);
ring              496 drivers/usb/mtu3/mtu3_qmu.c 		gpd = advance_deq_gpd(ring);
ring              500 drivers/usb/mtu3/mtu3_qmu.c 		__func__, epnum, ring->dequeue, ring->enqueue);
ring              507 drivers/usb/mtu3/mtu3_qmu.c 	struct mtu3_gpd_ring *ring = &mep->gpd_ring;
ring              509 drivers/usb/mtu3/mtu3_qmu.c 	struct qmu_gpd *gpd = ring->dequeue;
ring              516 drivers/usb/mtu3/mtu3_qmu.c 	gpd_current = gpd_dma_to_virt(ring, cur_gpd_dma);
ring              519 drivers/usb/mtu3/mtu3_qmu.c 		__func__, epnum, gpd, gpd_current, ring->enqueue);
ring              535 drivers/usb/mtu3/mtu3_qmu.c 		gpd = advance_deq_gpd(ring);
ring              539 drivers/usb/mtu3/mtu3_qmu.c 		__func__, epnum, ring->dequeue, ring->enqueue);
ring              375 drivers/usb/musb/musb_host.c 				head = qh->ring.prev;
ring              376 drivers/usb/musb/musb_host.c 				list_del(&qh->ring);
ring              977 drivers/usb/musb/musb_host.c 			list_move_tail(&cur_qh->ring, &musb->in_bulk);
ring              986 drivers/usb/musb/musb_host.c 			list_move_tail(&cur_qh->ring, &musb->out_bulk);
ring             2123 drivers/usb/musb/musb_host.c 		list_add_tail(&qh->ring, head);
ring             2187 drivers/usb/musb/musb_host.c 	INIT_LIST_HEAD(&qh->ring);
ring             2428 drivers/usb/musb/musb_host.c 			list_del(&qh->ring);
ring             2485 drivers/usb/musb/musb_host.c 		list_del(&qh->ring);
ring               21 drivers/usb/musb/musb_host.h 	struct list_head	ring;		/* of musb_qh */
ring               50 drivers/usb/musb/musb_host.h 	return list_entry(q->next, struct musb_qh, ring);
ring               43 drivers/vfio/platform/reset/vfio_platform_bcmflexrm.c static int vfio_platform_bcmflexrm_shutdown(void __iomem *ring)
ring               48 drivers/vfio/platform/reset/vfio_platform_bcmflexrm.c 	writel_relaxed(0x0, ring + RING_CONTROL);
ring               52 drivers/vfio/platform/reset/vfio_platform_bcmflexrm.c 	writel_relaxed(BIT(CONTROL_FLUSH_SHIFT), ring + RING_CONTROL);
ring               54 drivers/vfio/platform/reset/vfio_platform_bcmflexrm.c 		if (readl_relaxed(ring + RING_FLUSH_DONE) &
ring               64 drivers/vfio/platform/reset/vfio_platform_bcmflexrm.c 	writel_relaxed(0x0, ring + RING_CONTROL);
ring               66 drivers/vfio/platform/reset/vfio_platform_bcmflexrm.c 		if (!(readl_relaxed(ring + RING_FLUSH_DONE) &
ring               79 drivers/vfio/platform/reset/vfio_platform_bcmflexrm.c 	void __iomem *ring;
ring               91 drivers/vfio/platform/reset/vfio_platform_bcmflexrm.c 	for (ring = reg->ioaddr;
ring               92 drivers/vfio/platform/reset/vfio_platform_bcmflexrm.c 	     ring < (reg->ioaddr + reg->size); ring += RING_REGS_SIZE) {
ring               93 drivers/vfio/platform/reset/vfio_platform_bcmflexrm.c 		if (readl_relaxed(ring + RING_VER) == RING_VER_MAGIC) {
ring               94 drivers/vfio/platform/reset/vfio_platform_bcmflexrm.c 			rc = vfio_platform_bcmflexrm_shutdown(ring);
ring             1441 drivers/vhost/net.c 	struct ptr_ring *ring;
ring             1446 drivers/vhost/net.c 	ring = tun_get_tx_ring(file);
ring             1447 drivers/vhost/net.c 	if (!IS_ERR(ring))
ring             1449 drivers/vhost/net.c 	ring = tap_get_ptr_ring(file);
ring             1450 drivers/vhost/net.c 	if (!IS_ERR(ring))
ring             1452 drivers/vhost/net.c 	ring = NULL;
ring             1455 drivers/vhost/net.c 	return ring;
ring               49 drivers/vhost/vhost.c #define vhost_used_event(vq) ((__virtio16 __user *)&vq->avail->ring[vq->num])
ring               50 drivers/vhost/vhost.c #define vhost_avail_event(vq) ((__virtio16 __user *)&vq->used->ring[vq->num])
ring              436 drivers/vhost/vhost.c 	       sizeof(*vq->avail->ring) * num + event;
ring              446 drivers/vhost/vhost.c 	       sizeof(*vq->used->ring) * num + event;
ring              924 drivers/vhost/vhost.c 	return vhost_copy_to_user(vq, vq->used->ring + idx, head,
ring              990 drivers/vhost/vhost.c 			       &vq->avail->ring[idx & (vq->num - 1)]);
ring             1548 drivers/vhost/vhost.c 				vq->num * sizeof *vq->used->ring))
ring             2254 drivers/vhost/vhost.c 		       &vq->avail->ring[last_avail_idx % vq->num]);
ring             2378 drivers/vhost/vhost.c 	used = vq->used->ring + start;
ring               56 drivers/vhost/vringh.c 	err = getu16(vrh, &head, &vrh->vring.avail->ring[i]);
ring               59 drivers/vhost/vringh.c 			   *last_avail_idx, &vrh->vring.avail->ring[i]);
ring              423 drivers/vhost/vringh.c 		err = putused(&used_ring->ring[off], used, part);
ring              425 drivers/vhost/vringh.c 			err = putused(&used_ring->ring[0], used + part,
ring              428 drivers/vhost/vringh.c 		err = putused(&used_ring->ring[off], used, num_used);
ring              432 drivers/vhost/vringh.c 			   num_used, off, &used_ring->ring[off]);
ring              294 drivers/video/fbdev/intelfb/intelfb.h 	struct intelfb_heap_data ring;
ring              606 drivers/video/fbdev/intelfb/intelfbdrv.c 		dinfo->ring.size = RINGBUFFER_SIZE;
ring              607 drivers/video/fbdev/intelfb/intelfbdrv.c 		dinfo->ring_tail_mask = dinfo->ring.size - 1;
ring              634 drivers/video/fbdev/intelfb/intelfbdrv.c 		dinfo->ring.offset = offset + gtt_info.current_memory;
ring              637 drivers/video/fbdev/intelfb/intelfbdrv.c 			+ gtt_info.current_memory + (dinfo->ring.size >> 12);
ring              640 drivers/video/fbdev/intelfb/intelfbdrv.c 			+ gtt_info.current_memory + (dinfo->ring.size >> 12)
ring              668 drivers/video/fbdev/intelfb/intelfbdrv.c 		      agp_allocate_memory(bridge, dinfo->ring.size >> 12,
ring              676 drivers/video/fbdev/intelfb/intelfbdrv.c 				    dinfo->ring.offset)) {
ring              682 drivers/video/fbdev/intelfb/intelfbdrv.c 		dinfo->ring.physical = dinfo->aperture.physical
ring              683 drivers/video/fbdev/intelfb/intelfbdrv.c 			+ (dinfo->ring.offset << 12);
ring              684 drivers/video/fbdev/intelfb/intelfbdrv.c 		dinfo->ring.virtual  = dinfo->aperture.virtual
ring              685 drivers/video/fbdev/intelfb/intelfbdrv.c 			+ (dinfo->ring.offset << 12);
ring              753 drivers/video/fbdev/intelfb/intelfbdrv.c 		dinfo->ring.physical, dinfo->ring.size,
ring              754 drivers/video/fbdev/intelfb/intelfbdrv.c 		dinfo->ring.virtual);
ring             1491 drivers/video/fbdev/intelfb/intelfbhw.c 		ring_space = dinfo->ring.size -
ring             1569 drivers/video/fbdev/intelfb/intelfbhw.c 	wait_ring(dinfo, dinfo->ring.size - RING_MIN_FREE);
ring             1570 drivers/video/fbdev/intelfb/intelfbhw.c 	dinfo->ring_space = dinfo->ring.size - RING_MIN_FREE;
ring             1648 drivers/video/fbdev/intelfb/intelfbhw.c 	OUTREG(PRI_RING_START, dinfo->ring.physical & RING_START_MASK);
ring             1650 drivers/video/fbdev/intelfb/intelfbhw.c 		((dinfo->ring.size - GTT_PAGE_SIZE) & RING_LENGTH_MASK) |
ring              535 drivers/video/fbdev/intelfb/intelfbhw.h 	writel((n), (u32 __iomem *)(dinfo->ring.virtual + dinfo->ring_tail));\
ring              547 drivers/virtio/virtio_ring.c 	vq->split.vring.avail->ring[avail] = cpu_to_virtio16(_vq->vdev, head);
ring              705 drivers/virtio/virtio_ring.c 			vq->split.vring.used->ring[last_used].id);
ring              707 drivers/virtio/virtio_ring.c 			vq->split.vring.used->ring[last_used].len);
ring             1566 drivers/virtio/virtio_ring.c 	struct vring_packed_desc *ring;
ring             1574 drivers/virtio/virtio_ring.c 	ring = vring_alloc_queue(vdev, ring_size_in_bytes,
ring             1577 drivers/virtio/virtio_ring.c 	if (!ring)
ring             1632 drivers/virtio/virtio_ring.c 	vq->packed.vring.desc = ring;
ring             1683 drivers/virtio/virtio_ring.c 	vring_free_queue(vdev, ring_size_in_bytes, ring, ring_dma_addr);
ring               68 drivers/xen/evtchn.c 	evtchn_port_t *ring;
ring               90 drivers/xen/evtchn.c static void evtchn_free_ring(evtchn_port_t *ring)
ring               92 drivers/xen/evtchn.c 	kvfree(ring);
ring              104 drivers/xen/evtchn.c 	return u->ring + evtchn_ring_offset(u, idx);
ring              251 drivers/xen/evtchn.c 	     copy_to_user(&buf[bytes1], &u->ring[0], bytes2)))
ring              329 drivers/xen/evtchn.c 	old_ring = u->ring;
ring              349 drivers/xen/evtchn.c 	memcpy(new_ring, old_ring, u->ring_size * sizeof(*u->ring));
ring              351 drivers/xen/evtchn.c 	       u->ring_size * sizeof(*u->ring));
ring              353 drivers/xen/evtchn.c 	u->ring = new_ring;
ring              683 drivers/xen/evtchn.c 	evtchn_free_ring(u->ring);
ring               41 drivers/xen/pvcalls-back.c 	struct xen_pvcalls_back_ring ring;
ring               60 drivers/xen/pvcalls-back.c 	struct pvcalls_data_intf *ring;
ring               97 drivers/xen/pvcalls-back.c 	struct pvcalls_data_intf *intf = map->ring;
ring              168 drivers/xen/pvcalls-back.c 	struct pvcalls_data_intf *intf = map->ring;
ring              265 drivers/xen/pvcalls-back.c 	rsp = RING_GET_RESPONSE(&fedata->ring, fedata->ring.rsp_prod_pvt++);
ring              322 drivers/xen/pvcalls-back.c 	map->ring = page;
ring              323 drivers/xen/pvcalls-back.c 	map->ring_order = map->ring->ring_order;
ring              331 drivers/xen/pvcalls-back.c 	ret = xenbus_map_ring_valloc(fedata->dev, map->ring->ref,
ring              413 drivers/xen/pvcalls-back.c 	rsp = RING_GET_RESPONSE(&fedata->ring, fedata->ring.rsp_prod_pvt++);
ring              438 drivers/xen/pvcalls-back.c 	xenbus_unmap_ring_vfree(dev, (void *)map->ring);
ring              495 drivers/xen/pvcalls-back.c 	rsp = RING_GET_RESPONSE(&fedata->ring, fedata->ring.rsp_prod_pvt++);
ring              562 drivers/xen/pvcalls-back.c 	rsp = RING_GET_RESPONSE(&fedata->ring, fedata->ring.rsp_prod_pvt++);
ring              567 drivers/xen/pvcalls-back.c 	RING_PUSH_RESPONSES_AND_CHECK_NOTIFY(&fedata->ring, notify);
ring              588 drivers/xen/pvcalls-back.c 		rsp = RING_GET_RESPONSE(&fedata->ring,
ring              589 drivers/xen/pvcalls-back.c 					fedata->ring.rsp_prod_pvt++);
ring              598 drivers/xen/pvcalls-back.c 		RING_PUSH_RESPONSES_AND_CHECK_NOTIFY(&fedata->ring, notify);
ring              664 drivers/xen/pvcalls-back.c 	rsp = RING_GET_RESPONSE(&fedata->ring, fedata->ring.rsp_prod_pvt++);
ring              691 drivers/xen/pvcalls-back.c 	rsp = RING_GET_RESPONSE(&fedata->ring, fedata->ring.rsp_prod_pvt++);
ring              736 drivers/xen/pvcalls-back.c 	rsp = RING_GET_RESPONSE(&fedata->ring, fedata->ring.rsp_prod_pvt++);
ring              792 drivers/xen/pvcalls-back.c 	rsp = RING_GET_RESPONSE(&fedata->ring, fedata->ring.rsp_prod_pvt++);
ring              834 drivers/xen/pvcalls-back.c 				&fedata->ring, fedata->ring.rsp_prod_pvt++);
ring              851 drivers/xen/pvcalls-back.c 		while (RING_HAS_UNCONSUMED_REQUESTS(&fedata->ring)) {
ring              852 drivers/xen/pvcalls-back.c 			RING_COPY_REQUEST(&fedata->ring,
ring              853 drivers/xen/pvcalls-back.c 					  fedata->ring.req_cons++,
ring              858 drivers/xen/pvcalls-back.c 					&fedata->ring, notify);
ring              868 drivers/xen/pvcalls-back.c 		RING_FINAL_CHECK_FOR_REQUESTS(&fedata->ring, more);
ring              949 drivers/xen/pvcalls-back.c 	BACK_RING_INIT(&fedata->ring, fedata->sring, XEN_PAGE_SIZE * 1);
ring               32 drivers/xen/pvcalls-front.c 	struct xen_pvcalls_front_ring ring;
ring               65 drivers/xen/pvcalls-front.c 			struct pvcalls_data_intf *ring;
ring              127 drivers/xen/pvcalls-front.c 	*req_id = bedata->ring.req_prod_pvt & (RING_SIZE(&bedata->ring) - 1);
ring              128 drivers/xen/pvcalls-front.c 	if (RING_FULL(&bedata->ring) ||
ring              136 drivers/xen/pvcalls-front.c 	struct pvcalls_data_intf *intf = map->active.ring;
ring              153 drivers/xen/pvcalls-front.c 	struct pvcalls_data_intf *intf = map->active.ring;
ring              184 drivers/xen/pvcalls-front.c 	while (RING_HAS_UNCONSUMED_RESPONSES(&bedata->ring)) {
ring              185 drivers/xen/pvcalls-front.c 		rsp = RING_GET_RESPONSE(&bedata->ring, bedata->ring.rsp_cons);
ring              216 drivers/xen/pvcalls-front.c 		bedata->ring.rsp_cons++;
ring              219 drivers/xen/pvcalls-front.c 	RING_FINAL_CHECK_FOR_RESPONSES(&bedata->ring, more);
ring              241 drivers/xen/pvcalls-front.c 		gnttab_end_foreign_access(map->active.ring->ref[i], 0, 0);
ring              243 drivers/xen/pvcalls-front.c 	free_page((unsigned long)map->active.ring);
ring              309 drivers/xen/pvcalls-front.c 	req = RING_GET_REQUEST(&bedata->ring, req_id);
ring              317 drivers/xen/pvcalls-front.c 	bedata->ring.req_prod_pvt++;
ring              318 drivers/xen/pvcalls-front.c 	RING_PUSH_REQUESTS_AND_CHECK_NOTIFY(&bedata->ring, notify);
ring              337 drivers/xen/pvcalls-front.c 	if (!map->active.ring)
ring              341 drivers/xen/pvcalls-front.c 			map->active.ring->ring_order);
ring              342 drivers/xen/pvcalls-front.c 	free_page((unsigned long)map->active.ring);
ring              349 drivers/xen/pvcalls-front.c 	map->active.ring = (struct pvcalls_data_intf *)
ring              351 drivers/xen/pvcalls-front.c 	if (!map->active.ring)
ring              354 drivers/xen/pvcalls-front.c 	map->active.ring->ring_order = PVCALLS_RING_ORDER;
ring              381 drivers/xen/pvcalls-front.c 		map->active.ring->ref[i] = gnttab_grant_foreign_access(
ring              387 drivers/xen/pvcalls-front.c 		pfn_to_gfn(virt_to_pfn((void *)map->active.ring)), 0);
ring              450 drivers/xen/pvcalls-front.c 	req = RING_GET_REQUEST(&bedata->ring, req_id);
ring              462 drivers/xen/pvcalls-front.c 	bedata->ring.req_prod_pvt++;
ring              463 drivers/xen/pvcalls-front.c 	RING_PUSH_REQUESTS_AND_CHECK_NOTIFY(&bedata->ring, notify);
ring              557 drivers/xen/pvcalls-front.c 	sent = __write_ring(map->active.ring,
ring              645 drivers/xen/pvcalls-front.c 	ret = __read_ring(map->active.ring, &map->active.data,
ring              682 drivers/xen/pvcalls-front.c 	req = RING_GET_REQUEST(&bedata->ring, req_id);
ring              694 drivers/xen/pvcalls-front.c 	bedata->ring.req_prod_pvt++;
ring              695 drivers/xen/pvcalls-front.c 	RING_PUSH_REQUESTS_AND_CHECK_NOTIFY(&bedata->ring, notify);
ring              737 drivers/xen/pvcalls-front.c 	req = RING_GET_REQUEST(&bedata->ring, req_id);
ring              743 drivers/xen/pvcalls-front.c 	bedata->ring.req_prod_pvt++;
ring              744 drivers/xen/pvcalls-front.c 	RING_PUSH_REQUESTS_AND_CHECK_NOTIFY(&bedata->ring, notify);
ring              844 drivers/xen/pvcalls-front.c 	req = RING_GET_REQUEST(&bedata->ring, req_id);
ring              853 drivers/xen/pvcalls-front.c 	bedata->ring.req_prod_pvt++;
ring              854 drivers/xen/pvcalls-front.c 	RING_PUSH_REQUESTS_AND_CHECK_NOTIFY(&bedata->ring, notify);
ring              940 drivers/xen/pvcalls-front.c 	req = RING_GET_REQUEST(&bedata->ring, req_id);
ring              945 drivers/xen/pvcalls-front.c 	bedata->ring.req_prod_pvt++;
ring              946 drivers/xen/pvcalls-front.c 	RING_PUSH_REQUESTS_AND_CHECK_NOTIFY(&bedata->ring, notify);
ring              962 drivers/xen/pvcalls-front.c 	struct pvcalls_data_intf *intf = map->active.ring;
ring             1026 drivers/xen/pvcalls-front.c 	req = RING_GET_REQUEST(&bedata->ring, req_id);
ring             1031 drivers/xen/pvcalls-front.c 	bedata->ring.req_prod_pvt++;
ring             1032 drivers/xen/pvcalls-front.c 	RING_PUSH_REQUESTS_AND_CHECK_NOTIFY(&bedata->ring, notify);
ring             1045 drivers/xen/pvcalls-front.c 		map->active.ring->in_error = -EBADF;
ring             1100 drivers/xen/pvcalls-front.c 			map->active.ring->in_error = -EBADF;
ring             1119 drivers/xen/pvcalls-front.c 	kfree(bedata->ring.sring);
ring             1182 drivers/xen/pvcalls-front.c 	FRONT_RING_INIT(&bedata->ring, sring, XEN_PAGE_SIZE);
ring               93 drivers/xen/xen-scsiback.c 	struct vscsiif_back_ring ring;
ring              334 drivers/xen/xen-scsiback.c 	ring_res = RING_GET_RESPONSE(&info->ring, info->ring.rsp_prod_pvt);
ring              335 drivers/xen/xen-scsiback.c 	info->ring.rsp_prod_pvt++;
ring              353 drivers/xen/xen-scsiback.c 	RING_PUSH_RESPONSES_AND_CHECK_NOTIFY(&info->ring, notify);
ring              590 drivers/xen/xen-scsiback.c 	xenbus_unmap_ring_vfree(info->dev, info->ring.sring);
ring              650 drivers/xen/xen-scsiback.c static struct vscsibk_pend *scsiback_get_pend_req(struct vscsiif_back_ring *ring,
ring              677 drivers/xen/xen-scsiback.c 				struct vscsiif_back_ring *ring,
ring              710 drivers/xen/xen-scsiback.c 	pending_req = scsiback_get_pend_req(ring, v2p);
ring              727 drivers/xen/xen-scsiback.c 	struct vscsiif_back_ring *ring = &info->ring;
ring              734 drivers/xen/xen-scsiback.c 	rc = ring->req_cons;
ring              735 drivers/xen/xen-scsiback.c 	rp = ring->sring->req_prod;
ring              738 drivers/xen/xen-scsiback.c 	if (RING_REQUEST_PROD_OVERFLOW(ring, rp)) {
ring              739 drivers/xen/xen-scsiback.c 		rc = ring->rsp_prod_pvt;
ring              747 drivers/xen/xen-scsiback.c 		if (RING_REQUEST_CONS_OVERFLOW(ring, rc))
ring              750 drivers/xen/xen-scsiback.c 		RING_COPY_REQUEST(ring, rc, &ring_req);
ring              751 drivers/xen/xen-scsiback.c 		ring->req_cons = ++rc;
ring              753 drivers/xen/xen-scsiback.c 		pending_req = prepare_pending_reqs(info, ring, &ring_req);
ring              798 drivers/xen/xen-scsiback.c 	RING_FINAL_CHECK_FOR_REQUESTS(&info->ring, more_to_do);
ring              830 drivers/xen/xen-scsiback.c 	BACK_RING_INIT(&info->ring, sring, PAGE_SIZE);
ring              463 fs/aio.c       	struct aio_ring *ring;
ring              544 fs/aio.c       	ring = kmap_atomic(ctx->ring_pages[0]);
ring              545 fs/aio.c       	ring->nr = nr_events;	/* user copy */
ring              546 fs/aio.c       	ring->id = ~0U;
ring              547 fs/aio.c       	ring->head = ring->tail = 0;
ring              548 fs/aio.c       	ring->magic = AIO_RING_MAGIC;
ring              549 fs/aio.c       	ring->compat_features = AIO_RING_COMPAT_FEATURES;
ring              550 fs/aio.c       	ring->incompat_features = AIO_RING_INCOMPAT_FEATURES;
ring              551 fs/aio.c       	ring->header_length = sizeof(struct aio_ring);
ring              552 fs/aio.c       	kunmap_atomic(ring);
ring              638 fs/aio.c       	struct aio_ring *ring;
ring              655 fs/aio.c       					ring = kmap_atomic(ctx->ring_pages[0]);
ring              656 fs/aio.c       					ring->id = ctx->id;
ring              657 fs/aio.c       					kunmap_atomic(ring);
ring              990 fs/aio.c       		struct aio_ring *ring;
ring             1002 fs/aio.c       		ring = kmap_atomic(ctx->ring_pages[0]);
ring             1003 fs/aio.c       		head = ring->head;
ring             1004 fs/aio.c       		kunmap_atomic(ring);
ring             1050 fs/aio.c       	struct aio_ring __user *ring  = (void __user *)ctx_id;
ring             1056 fs/aio.c       	if (get_user(id, &ring->id))
ring             1092 fs/aio.c       	struct aio_ring	*ring;
ring             1129 fs/aio.c       	ring = kmap_atomic(ctx->ring_pages[0]);
ring             1130 fs/aio.c       	head = ring->head;
ring             1131 fs/aio.c       	ring->tail = tail;
ring             1132 fs/aio.c       	kunmap_atomic(ring);
ring             1177 fs/aio.c       	struct aio_ring *ring;
ring             1192 fs/aio.c       	ring = kmap_atomic(ctx->ring_pages[0]);
ring             1193 fs/aio.c       	head = ring->head;
ring             1194 fs/aio.c       	tail = ring->tail;
ring             1195 fs/aio.c       	kunmap_atomic(ring);
ring             1242 fs/aio.c       	ring = kmap_atomic(ctx->ring_pages[0]);
ring             1243 fs/aio.c       	ring->head = head;
ring             1244 fs/aio.c       	kunmap_atomic(ring);
ring              136 fs/verity/signature.c 	struct key *ring;
ring              139 fs/verity/signature.c 	ring = keyring_alloc(".fs-verity", KUIDT_INIT(0), KGIDT_INIT(0),
ring              144 fs/verity/signature.c 	if (IS_ERR(ring))
ring              145 fs/verity/signature.c 		return PTR_ERR(ring);
ring              151 fs/verity/signature.c 	fsverity_keyring = ring;
ring              155 fs/verity/signature.c 	key_put(ring);
ring               72 include/linux/if_tap.h 	struct ptr_ring ring;
ring              666 include/linux/libata.h 	struct ata_ering_entry	ring[ATA_ERING_SIZE];
ring               26 include/linux/skb_array.h 	struct ptr_ring ring;
ring               34 include/linux/skb_array.h 	return __ptr_ring_full(&a->ring);
ring               39 include/linux/skb_array.h 	return ptr_ring_full(&a->ring);
ring               44 include/linux/skb_array.h 	return ptr_ring_produce(&a->ring, skb);
ring               49 include/linux/skb_array.h 	return ptr_ring_produce_irq(&a->ring, skb);
ring               54 include/linux/skb_array.h 	return ptr_ring_produce_bh(&a->ring, skb);
ring               59 include/linux/skb_array.h 	return ptr_ring_produce_any(&a->ring, skb);
ring               68 include/linux/skb_array.h 	return __ptr_ring_empty(&a->ring);
ring               73 include/linux/skb_array.h 	return __ptr_ring_peek(&a->ring);
ring               78 include/linux/skb_array.h 	return ptr_ring_empty(&a->ring);
ring               83 include/linux/skb_array.h 	return ptr_ring_empty_bh(&a->ring);
ring               88 include/linux/skb_array.h 	return ptr_ring_empty_irq(&a->ring);
ring               93 include/linux/skb_array.h 	return ptr_ring_empty_any(&a->ring);
ring               98 include/linux/skb_array.h 	return __ptr_ring_consume(&a->ring);
ring              103 include/linux/skb_array.h 	return ptr_ring_consume(&a->ring);
ring              109 include/linux/skb_array.h 	return ptr_ring_consume_batched(&a->ring, (void **)array, n);
ring              114 include/linux/skb_array.h 	return ptr_ring_consume_irq(&a->ring);
ring              120 include/linux/skb_array.h 	return ptr_ring_consume_batched_irq(&a->ring, (void **)array, n);
ring              125 include/linux/skb_array.h 	return ptr_ring_consume_any(&a->ring);
ring              131 include/linux/skb_array.h 	return ptr_ring_consume_batched_any(&a->ring, (void **)array, n);
ring              137 include/linux/skb_array.h 	return ptr_ring_consume_bh(&a->ring);
ring              143 include/linux/skb_array.h 	return ptr_ring_consume_batched_bh(&a->ring, (void **)array, n);
ring              162 include/linux/skb_array.h 	return PTR_RING_PEEK_CALL(&a->ring, __skb_array_len_with_tag);
ring              167 include/linux/skb_array.h 	return PTR_RING_PEEK_CALL_IRQ(&a->ring, __skb_array_len_with_tag);
ring              172 include/linux/skb_array.h 	return PTR_RING_PEEK_CALL_BH(&a->ring, __skb_array_len_with_tag);
ring              177 include/linux/skb_array.h 	return PTR_RING_PEEK_CALL_ANY(&a->ring, __skb_array_len_with_tag);
ring              182 include/linux/skb_array.h 	return ptr_ring_init(&a->ring, size, gfp);
ring              193 include/linux/skb_array.h 	ptr_ring_unconsume(&a->ring, (void **)skbs, n, __skb_array_destroy_skb);
ring              198 include/linux/skb_array.h 	return ptr_ring_resize(&a->ring, size, gfp, __skb_array_destroy_skb);
ring              205 include/linux/skb_array.h 	BUILD_BUG_ON(offsetof(struct skb_array, ring));
ring              213 include/linux/skb_array.h 	ptr_ring_cleanup(&a->ring, __skb_array_destroy_skb);
ring              559 include/linux/thunderbolt.h void tb_ring_start(struct tb_ring *ring);
ring              560 include/linux/thunderbolt.h void tb_ring_stop(struct tb_ring *ring);
ring              561 include/linux/thunderbolt.h void tb_ring_free(struct tb_ring *ring);
ring              563 include/linux/thunderbolt.h int __tb_ring_enqueue(struct tb_ring *ring, struct ring_frame *frame);
ring              581 include/linux/thunderbolt.h static inline int tb_ring_rx(struct tb_ring *ring, struct ring_frame *frame)
ring              583 include/linux/thunderbolt.h 	WARN_ON(ring->is_tx);
ring              584 include/linux/thunderbolt.h 	return __tb_ring_enqueue(ring, frame);
ring              602 include/linux/thunderbolt.h static inline int tb_ring_tx(struct tb_ring *ring, struct ring_frame *frame)
ring              604 include/linux/thunderbolt.h 	WARN_ON(!ring->is_tx);
ring              605 include/linux/thunderbolt.h 	return __tb_ring_enqueue(ring, frame);
ring              609 include/linux/thunderbolt.h struct ring_frame *tb_ring_poll(struct tb_ring *ring);
ring              610 include/linux/thunderbolt.h void tb_ring_poll_complete(struct tb_ring *ring);
ring              619 include/linux/thunderbolt.h static inline struct device *tb_ring_dma_device(struct tb_ring *ring)
ring              621 include/linux/thunderbolt.h 	return &ring->nhi->pdev->dev;
ring              106 include/net/page_pool.h 	struct ptr_ring ring;
ring              421 include/uapi/drm/amdgpu_drm.h 	__u32 ring;
ring              439 include/uapi/drm/amdgpu_drm.h 	__u32 ring;
ring              602 include/uapi/drm/amdgpu_drm.h 	__u32 ring;
ring              608 include/uapi/drm/amdgpu_drm.h 	__u32 ring;
ring               53 include/uapi/linux/genwqe/genwqe_card.h #define IO_EXTENDED_DIAG_MAP(ring)	(0x00000500 | ((ring) << 3))
ring               55 include/uapi/linux/genwqe/genwqe_card.h #define GENWQE_EXTENDED_DIAG_SELECTOR(ring, trace) (((ring) << 8) | (trace))
ring              104 include/uapi/linux/virtio_ring.h 	__virtio16 ring[];
ring              118 include/uapi/linux/virtio_ring.h 	struct vring_used_elem ring[];
ring              164 include/uapi/linux/virtio_ring.h #define vring_used_event(vr) ((vr)->avail->ring[(vr)->num])
ring              165 include/uapi/linux/virtio_ring.h #define vring_avail_event(vr) (*(__virtio16 *)&(vr)->used->ring[(vr)->num])
ring              173 include/uapi/linux/virtio_ring.h 	vr->used = (void *)(((uintptr_t)&vr->avail->ring[num] + sizeof(__virtio16)
ring               15 include/xen/interface/io/console.h #define MASK_XENCONS_IDX(idx, ring) ((idx) & (sizeof(ring)-1))
ring               31 include/xen/interface/io/ring.h 	(__RD32(((_sz) - offsetof(struct _s##_sring, ring)) /	\
ring               32 include/xen/interface/io/ring.h 		sizeof(((struct _s##_sring *)0)->ring[0])))
ring               38 include/xen/interface/io/ring.h 	(__RD32(((_sz) - (long)&(_s)->ring + (long)(_s)) / sizeof((_s)->ring[0])))
ring               87 include/xen/interface/io/ring.h     union __name##_sring_entry ring[1]; /* variable-length */		\
ring              185 include/xen/interface/io/ring.h     (&((_r)->sring->ring[((_idx) & (RING_SIZE(_r) - 1))].req))
ring              202 include/xen/interface/io/ring.h     (&((_r)->sring->ring[((_idx) & (RING_SIZE(_r) - 1))].rsp))
ring              222 kernel/bpf/cpumap.c static void __cpu_map_ring_cleanup(struct ptr_ring *ring)
ring              231 kernel/bpf/cpumap.c 	while ((xdpf = ptr_ring_consume(ring)))
ring             1167 kernel/power/swap.c 	unsigned ring = 0, pg = 0, ring_size = 0,
ring             1293 kernel/power/swap.c 			ret = swap_read_page(handle, page[ring], &hb);
ring             1307 kernel/power/swap.c 			if (++ring >= ring_size)
ring             1308 kernel/power/swap.c 				ring = 0;
ring              127 net/9p/trans_xen.c static bool p9_xen_write_todo(struct xen_9pfs_dataring *ring, RING_IDX size)
ring              131 net/9p/trans_xen.c 	cons = ring->intf->out_cons;
ring              132 net/9p/trans_xen.c 	prod = ring->intf->out_prod;
ring              145 net/9p/trans_xen.c 	struct xen_9pfs_dataring *ring;
ring              158 net/9p/trans_xen.c 	ring = &priv->rings[num];
ring              161 net/9p/trans_xen.c 	while (wait_event_killable(ring->wq,
ring              162 net/9p/trans_xen.c 				   p9_xen_write_todo(ring, size)) != 0)
ring              165 net/9p/trans_xen.c 	spin_lock_irqsave(&ring->lock, flags);
ring              166 net/9p/trans_xen.c 	cons = ring->intf->out_cons;
ring              167 net/9p/trans_xen.c 	prod = ring->intf->out_prod;
ring              172 net/9p/trans_xen.c 		spin_unlock_irqrestore(&ring->lock, flags);
ring              179 net/9p/trans_xen.c 	xen_9pfs_write_packet(ring->data.out, p9_req->tc.sdata, size,
ring              185 net/9p/trans_xen.c 	ring->intf->out_prod = prod;
ring              186 net/9p/trans_xen.c 	spin_unlock_irqrestore(&ring->lock, flags);
ring              187 net/9p/trans_xen.c 	notify_remote_via_irq(ring->irq);
ring              196 net/9p/trans_xen.c 	struct xen_9pfs_dataring *ring;
ring              202 net/9p/trans_xen.c 	ring = container_of(work, struct xen_9pfs_dataring, work);
ring              203 net/9p/trans_xen.c 	priv = ring->priv;
ring              206 net/9p/trans_xen.c 		cons = ring->intf->in_cons;
ring              207 net/9p/trans_xen.c 		prod = ring->intf->in_prod;
ring              212 net/9p/trans_xen.c 			notify_remote_via_irq(ring->irq);
ring              220 net/9p/trans_xen.c 		xen_9pfs_read_packet(&h, ring->data.in, sizeof(h),
ring              229 net/9p/trans_xen.c 			ring->intf->in_cons = cons;
ring              238 net/9p/trans_xen.c 		xen_9pfs_read_packet(req->rc.sdata, ring->data.in, h.size,
ring              244 net/9p/trans_xen.c 		ring->intf->in_cons = cons;
ring              255 net/9p/trans_xen.c 	struct xen_9pfs_dataring *ring = r;
ring              257 net/9p/trans_xen.c 	if (!ring || !ring->priv->client) {
ring              262 net/9p/trans_xen.c 	wake_up_interruptible(&ring->wq);
ring              263 net/9p/trans_xen.c 	schedule_work(&ring->work);
ring              326 net/9p/trans_xen.c 					 struct xen_9pfs_dataring *ring)
ring              332 net/9p/trans_xen.c 	init_waitqueue_head(&ring->wq);
ring              333 net/9p/trans_xen.c 	spin_lock_init(&ring->lock);
ring              334 net/9p/trans_xen.c 	INIT_WORK(&ring->work, p9_xen_response);
ring              336 net/9p/trans_xen.c 	ring->intf = (struct xen_9pfs_data_intf *)get_zeroed_page(GFP_KERNEL);
ring              337 net/9p/trans_xen.c 	if (!ring->intf)
ring              340 net/9p/trans_xen.c 					  virt_to_gfn(ring->intf), 0);
ring              343 net/9p/trans_xen.c 	ring->ref = ret;
ring              355 net/9p/trans_xen.c 		ring->intf->ref[i] = ret;
ring              357 net/9p/trans_xen.c 	ring->intf->ring_order = XEN_9PFS_RING_ORDER;
ring              358 net/9p/trans_xen.c 	ring->data.in = bytes;
ring              359 net/9p/trans_xen.c 	ring->data.out = bytes + XEN_9PFS_RING_SIZE;
ring              361 net/9p/trans_xen.c 	ret = xenbus_alloc_evtchn(dev, &ring->evtchn);
ring              364 net/9p/trans_xen.c 	ring->irq = bind_evtchn_to_irqhandler(ring->evtchn,
ring              366 net/9p/trans_xen.c 					      0, "xen_9pfs-frontend", ring);
ring              367 net/9p/trans_xen.c 	if (ring->irq >= 0)
ring              370 net/9p/trans_xen.c 	xenbus_free_evtchn(dev, ring->evtchn);
ring              371 net/9p/trans_xen.c 	ret = ring->irq;
ring              375 net/9p/trans_xen.c 			gnttab_end_foreign_access(ring->intf->ref[i], 0, 0);
ring              380 net/9p/trans_xen.c 	gnttab_end_foreign_access(ring->ref, 0, 0);
ring              381 net/9p/trans_xen.c 	free_page((unsigned long)ring->intf);
ring               50 net/core/page_pool.c 	if (ptr_ring_init(&pool->ring, ring_qsize, GFP_KERNEL) < 0)
ring               87 net/core/page_pool.c 	struct ptr_ring *r = &pool->ring;
ring              261 net/core/page_pool.c 		ret = ptr_ring_produce(&pool->ring, page);
ring              263 net/core/page_pool.c 		ret = ptr_ring_produce_bh(&pool->ring, page);
ring              329 net/core/page_pool.c 	while ((page = ptr_ring_consume_bh(&pool->ring))) {
ring              344 net/core/page_pool.c 	ptr_ring_cleanup(&pool->ring, NULL);
ring               24 net/dccp/ccids/lib/loss_interval.c 	return lh->counter ? lh->ring[LIH_INDEX(lh->counter - 1)] : NULL;
ring               31 net/dccp/ccids/lib/loss_interval.c 	return lh->ring[LIH_INDEX(lh->counter - i - 1)]->li_length;
ring               39 net/dccp/ccids/lib/loss_interval.c 	if (lh->ring[LIH_INDEX(lh->counter)] == NULL)
ring               40 net/dccp/ccids/lib/loss_interval.c 		lh->ring[LIH_INDEX(lh->counter)] = kmem_cache_alloc(tfrc_lh_slab,
ring               42 net/dccp/ccids/lib/loss_interval.c 	return lh->ring[LIH_INDEX(lh->counter)];
ring               51 net/dccp/ccids/lib/loss_interval.c 		if (lh->ring[LIH_INDEX(lh->counter)] != NULL) {
ring               53 net/dccp/ccids/lib/loss_interval.c 					lh->ring[LIH_INDEX(lh->counter)]);
ring               54 net/dccp/ccids/lib/loss_interval.c 			lh->ring[LIH_INDEX(lh->counter)] = NULL;
ring               42 net/dccp/ccids/lib/loss_interval.h 	struct tfrc_loss_interval	*ring[LIH_SIZE];
ring              140 net/dccp/ccids/lib/packet_history.c 	swap(h->ring[idx_a], h->ring[idx_b]);
ring              339 net/dccp/ccids/lib/packet_history.c 		h->ring[i] = kmem_cache_alloc(tfrc_rx_hist_slab, GFP_ATOMIC);
ring              340 net/dccp/ccids/lib/packet_history.c 		if (h->ring[i] == NULL)
ring              349 net/dccp/ccids/lib/packet_history.c 		kmem_cache_free(tfrc_rx_hist_slab, h->ring[i]);
ring              350 net/dccp/ccids/lib/packet_history.c 		h->ring[i] = NULL;
ring              360 net/dccp/ccids/lib/packet_history.c 		if (h->ring[i] != NULL) {
ring              361 net/dccp/ccids/lib/packet_history.c 			kmem_cache_free(tfrc_rx_hist_slab, h->ring[i]);
ring              362 net/dccp/ccids/lib/packet_history.c 			h->ring[i] = NULL;
ring              372 net/dccp/ccids/lib/packet_history.c 	return h->ring[0];
ring              381 net/dccp/ccids/lib/packet_history.c 	return h->ring[h->rtt_sample_prev];
ring               82 net/dccp/ccids/lib/packet_history.h 	struct tfrc_rx_hist_entry *ring[TFRC_NDUPACK + 1];
ring              102 net/dccp/ccids/lib/packet_history.h 	return h->ring[tfrc_rx_hist_index(h, h->loss_count)];
ring              111 net/dccp/ccids/lib/packet_history.h 	return h->ring[tfrc_rx_hist_index(h, n)];
ring              120 net/dccp/ccids/lib/packet_history.h 	return h->ring[h->loss_start];
ring              168 net/ipv6/seg6_hmac.c 	char *ring, *off;
ring              192 net/ipv6/seg6_hmac.c 	ring = this_cpu_ptr(hmac_ring);
ring              193 net/ipv6/seg6_hmac.c 	off = ring;
ring              215 net/ipv6/seg6_hmac.c 	dgsize = __do_hmac(hinfo, ring, plen, tmp_out,
ring               72 net/packet/diag.c static int pdiag_put_ring(struct packet_ring_buffer *ring, int ver, int nl_type,
ring               77 net/packet/diag.c 	if (!ring->pg_vec)
ring               80 net/packet/diag.c 	pdr.pdr_block_size = ring->pg_vec_pages << PAGE_SHIFT;
ring               81 net/packet/diag.c 	pdr.pdr_block_nr = ring->pg_vec_len;
ring               82 net/packet/diag.c 	pdr.pdr_frame_size = ring->frame_size;
ring               83 net/packet/diag.c 	pdr.pdr_frame_nr = ring->frame_max + 1;
ring               86 net/packet/diag.c 		pdr.pdr_retire_tmo = ring->prb_bdqc.retire_blk_tov;
ring               87 net/packet/diag.c 		pdr.pdr_sizeof_priv = ring->prb_bdqc.blk_sizeof_priv;
ring               88 net/packet/diag.c 		pdr.pdr_features = ring->prb_bdqc.feature_req_word;
ring              418 net/rds/ib.h   void rds_ib_ring_init(struct rds_ib_work_ring *ring, u32 nr);
ring              419 net/rds/ib.h   void rds_ib_ring_resize(struct rds_ib_work_ring *ring, u32 nr);
ring              420 net/rds/ib.h   u32 rds_ib_ring_alloc(struct rds_ib_work_ring *ring, u32 val, u32 *pos);
ring              421 net/rds/ib.h   void rds_ib_ring_free(struct rds_ib_work_ring *ring, u32 val);
ring              422 net/rds/ib.h   void rds_ib_ring_unalloc(struct rds_ib_work_ring *ring, u32 val);
ring              423 net/rds/ib.h   int rds_ib_ring_empty(struct rds_ib_work_ring *ring);
ring              424 net/rds/ib.h   int rds_ib_ring_low(struct rds_ib_work_ring *ring);
ring              425 net/rds/ib.h   u32 rds_ib_ring_oldest(struct rds_ib_work_ring *ring);
ring              426 net/rds/ib.h   u32 rds_ib_ring_completed(struct rds_ib_work_ring *ring, u32 wr_id, u32 oldest);
ring               66 net/rds/ib_ring.c void rds_ib_ring_init(struct rds_ib_work_ring *ring, u32 nr)
ring               68 net/rds/ib_ring.c 	memset(ring, 0, sizeof(*ring));
ring               69 net/rds/ib_ring.c 	ring->w_nr = nr;
ring               70 net/rds/ib_ring.c 	rdsdebug("ring %p nr %u\n", ring, ring->w_nr);
ring               73 net/rds/ib_ring.c static inline u32 __rds_ib_ring_used(struct rds_ib_work_ring *ring)
ring               78 net/rds/ib_ring.c 	diff = ring->w_alloc_ctr - (u32) atomic_read(&ring->w_free_ctr);
ring               79 net/rds/ib_ring.c 	BUG_ON(diff > ring->w_nr);
ring               84 net/rds/ib_ring.c void rds_ib_ring_resize(struct rds_ib_work_ring *ring, u32 nr)
ring               88 net/rds/ib_ring.c 	BUG_ON(__rds_ib_ring_used(ring));
ring               89 net/rds/ib_ring.c 	ring->w_nr = nr;
ring               92 net/rds/ib_ring.c static int __rds_ib_ring_empty(struct rds_ib_work_ring *ring)
ring               94 net/rds/ib_ring.c 	return __rds_ib_ring_used(ring) == 0;
ring               97 net/rds/ib_ring.c u32 rds_ib_ring_alloc(struct rds_ib_work_ring *ring, u32 val, u32 *pos)
ring              101 net/rds/ib_ring.c 	avail = ring->w_nr - __rds_ib_ring_used(ring);
ring              103 net/rds/ib_ring.c 	rdsdebug("ring %p val %u next %u free %u\n", ring, val,
ring              104 net/rds/ib_ring.c 		 ring->w_alloc_ptr, avail);
ring              108 net/rds/ib_ring.c 		*pos = ring->w_alloc_ptr;
ring              110 net/rds/ib_ring.c 		ring->w_alloc_ptr = (ring->w_alloc_ptr + ret) % ring->w_nr;
ring              111 net/rds/ib_ring.c 		ring->w_alloc_ctr += ret;
ring              117 net/rds/ib_ring.c void rds_ib_ring_free(struct rds_ib_work_ring *ring, u32 val)
ring              119 net/rds/ib_ring.c 	ring->w_free_ptr = (ring->w_free_ptr + val) % ring->w_nr;
ring              120 net/rds/ib_ring.c 	atomic_add(val, &ring->w_free_ctr);
ring              122 net/rds/ib_ring.c 	if (__rds_ib_ring_empty(ring) &&
ring              127 net/rds/ib_ring.c void rds_ib_ring_unalloc(struct rds_ib_work_ring *ring, u32 val)
ring              129 net/rds/ib_ring.c 	ring->w_alloc_ptr = (ring->w_alloc_ptr - val) % ring->w_nr;
ring              130 net/rds/ib_ring.c 	ring->w_alloc_ctr -= val;
ring              133 net/rds/ib_ring.c int rds_ib_ring_empty(struct rds_ib_work_ring *ring)
ring              135 net/rds/ib_ring.c 	return __rds_ib_ring_empty(ring);
ring              138 net/rds/ib_ring.c int rds_ib_ring_low(struct rds_ib_work_ring *ring)
ring              140 net/rds/ib_ring.c 	return __rds_ib_ring_used(ring) <= (ring->w_nr >> 1);
ring              147 net/rds/ib_ring.c u32 rds_ib_ring_oldest(struct rds_ib_work_ring *ring)
ring              149 net/rds/ib_ring.c 	return ring->w_free_ptr;
ring              156 net/rds/ib_ring.c u32 rds_ib_ring_completed(struct rds_ib_work_ring *ring, u32 wr_id, u32 oldest)
ring              163 net/rds/ib_ring.c 		ret = ring->w_nr - oldest + (unsigned long long)wr_id + 1;
ring              165 net/rds/ib_ring.c 	rdsdebug("ring %p ret %u wr_id %u oldest %u\n", ring, ret,
ring              693 net/sched/sch_generic.c 		if (!q->ring.queue)
ring              760 net/sched/sch_generic.c 		if (!q->ring.queue)
ring              765 net/sched/sch_generic.c 		ptr_ring_cleanup(&q->ring, NULL);
ring               63 net/xdp/xsk.c  	umem->fq->ring->flags |= XDP_RING_NEED_WAKEUP;
ring               77 net/xdp/xsk.c  		xs->tx->ring->flags |= XDP_RING_NEED_WAKEUP;
ring               90 net/xdp/xsk.c  	umem->fq->ring->flags &= ~XDP_RING_NEED_WAKEUP;
ring              104 net/xdp/xsk.c  		xs->tx->ring->flags &= ~XDP_RING_NEED_WAKEUP;
ring              765 net/xdp/xsk.c  			xs->tx->ring->flags |= XDP_RING_NEED_WAKEUP;
ring              833 net/xdp/xsk.c  static void xsk_enter_rxtx_offsets(struct xdp_ring_offset_v1 *ring)
ring              835 net/xdp/xsk.c  	ring->producer = offsetof(struct xdp_rxtx_ring, ptrs.producer);
ring              836 net/xdp/xsk.c  	ring->consumer = offsetof(struct xdp_rxtx_ring, ptrs.consumer);
ring              837 net/xdp/xsk.c  	ring->desc = offsetof(struct xdp_rxtx_ring, desc);
ring              840 net/xdp/xsk.c  static void xsk_enter_umem_offsets(struct xdp_ring_offset_v1 *ring)
ring              842 net/xdp/xsk.c  	ring->producer = offsetof(struct xdp_umem_ring, ptrs.producer);
ring              843 net/xdp/xsk.c  	ring->consumer = offsetof(struct xdp_umem_ring, ptrs.consumer);
ring              844 net/xdp/xsk.c  	ring->desc = offsetof(struct xdp_umem_ring, desc);
ring              998 net/xdp/xsk.c  	qpg = virt_to_head_page(q->ring);
ring             1002 net/xdp/xsk.c  	pfn = virt_to_phys(q->ring) >> PAGE_SHIFT;
ring               49 net/xdp/xsk_queue.c 	q->ring = (struct xdp_ring *)__get_free_pages(gfp_flags,
ring               51 net/xdp/xsk_queue.c 	if (!q->ring) {
ring               64 net/xdp/xsk_queue.c 	page_frag_free(q->ring);
ring               43 net/xdp/xsk_queue.h 	struct xdp_ring *ring;
ring              102 net/xdp/xsk_queue.h 		q->prod_tail = READ_ONCE(q->ring->producer);
ring              117 net/xdp/xsk_queue.h 	q->cons_tail = READ_ONCE(q->ring->consumer);
ring              129 net/xdp/xsk_queue.h 	q->prod_tail = READ_ONCE(q->ring->producer);
ring              178 net/xdp/xsk_queue.h 		struct xdp_umem_ring *ring = (struct xdp_umem_ring *)q->ring;
ring              181 net/xdp/xsk_queue.h 		*addr = READ_ONCE(ring->desc[idx]) & q->chunk_mask;
ring              206 net/xdp/xsk_queue.h 		WRITE_ONCE(q->ring->consumer, q->cons_tail);
ring              223 net/xdp/xsk_queue.h 	struct xdp_umem_ring *ring = (struct xdp_umem_ring *)q->ring;
ring              229 net/xdp/xsk_queue.h 	ring->desc[q->prod_tail++ & q->ring_mask] = addr;
ring              234 net/xdp/xsk_queue.h 	WRITE_ONCE(q->ring->producer, q->prod_tail);
ring              240 net/xdp/xsk_queue.h 	struct xdp_umem_ring *ring = (struct xdp_umem_ring *)q->ring;
ring              246 net/xdp/xsk_queue.h 	ring->desc[q->prod_head++ & q->ring_mask] = addr;
ring              257 net/xdp/xsk_queue.h 	WRITE_ONCE(q->ring->producer, q->prod_tail);
ring              304 net/xdp/xsk_queue.h 		struct xdp_rxtx_ring *ring = (struct xdp_rxtx_ring *)q->ring;
ring              307 net/xdp/xsk_queue.h 		*desc = READ_ONCE(ring->desc[idx]);
ring              323 net/xdp/xsk_queue.h 		WRITE_ONCE(q->ring->consumer, q->cons_tail);
ring              341 net/xdp/xsk_queue.h 	struct xdp_rxtx_ring *ring = (struct xdp_rxtx_ring *)q->ring;
ring              349 net/xdp/xsk_queue.h 	ring->desc[idx].addr = addr;
ring              350 net/xdp/xsk_queue.h 	ring->desc[idx].len = len;
ring              361 net/xdp/xsk_queue.h 	WRITE_ONCE(q->ring->producer, q->prod_tail);
ring             1222 samples/mic/mpssd/mpssd.c 					vring.vr.avail->ring[avail_idx]);
ring               31 sound/xen/xen_snd_front.c 	req = RING_GET_REQUEST(&evtchnl->u.req.ring,
ring               32 sound/xen/xen_snd_front.c 			       evtchnl->u.req.ring.req_prod_pvt);
ring               34 sound/xen/xen_snd_front_evtchnl.c 	rp = channel->u.req.ring.sring->rsp_prod;
ring               43 sound/xen/xen_snd_front_evtchnl.c 	for (i = channel->u.req.ring.rsp_cons; i != rp; i++) {
ring               44 sound/xen/xen_snd_front_evtchnl.c 		resp = RING_GET_RESPONSE(&channel->u.req.ring, i);
ring               75 sound/xen/xen_snd_front_evtchnl.c 	channel->u.req.ring.rsp_cons = i;
ring               76 sound/xen/xen_snd_front_evtchnl.c 	if (i != channel->u.req.ring.req_prod_pvt) {
ring               79 sound/xen/xen_snd_front_evtchnl.c 		RING_FINAL_CHECK_FOR_RESPONSES(&channel->u.req.ring,
ring               84 sound/xen/xen_snd_front_evtchnl.c 		channel->u.req.ring.sring->rsp_event = i + 1;
ring              141 sound/xen/xen_snd_front_evtchnl.c 	channel->u.req.ring.req_prod_pvt++;
ring              142 sound/xen/xen_snd_front_evtchnl.c 	RING_PUSH_REQUESTS_AND_CHECK_NOTIFY(&channel->u.req.ring, notify);
ring              153 sound/xen/xen_snd_front_evtchnl.c 		page = (unsigned long)channel->u.req.ring.sring;
ring              238 sound/xen/xen_snd_front_evtchnl.c 		FRONT_RING_INIT(&channel->u.req.ring, sring, XEN_PAGE_SIZE);
ring              242 sound/xen/xen_snd_front_evtchnl.c 			channel->u.req.ring.sring = NULL;
ring               57 sound/xen/xen_snd_front_evtchnl.h 			struct xen_sndif_front_ring ring;
ring              196 tools/io_uring/io_uring-bench.c 	struct io_sq_ring *ring = &s->sq_ring;
ring              199 tools/io_uring/io_uring-bench.c 	next_tail = tail = *ring->tail;
ring              203 tools/io_uring/io_uring-bench.c 		if (next_tail == *ring->head)
ring              208 tools/io_uring/io_uring-bench.c 		ring->array[index] = index;
ring              213 tools/io_uring/io_uring-bench.c 	if (*ring->tail != tail) {
ring              216 tools/io_uring/io_uring-bench.c 		*ring->tail = tail;
ring              246 tools/io_uring/io_uring-bench.c 	struct io_cq_ring *ring = &s->cq_ring;
ring              250 tools/io_uring/io_uring-bench.c 	head = *ring->head;
ring              255 tools/io_uring/io_uring-bench.c 		if (head == *ring->tail)
ring              257 tools/io_uring/io_uring-bench.c 		cqe = &ring->cqes[head & cq_ring_mask];
ring              273 tools/io_uring/io_uring-bench.c 	*ring->head = head;
ring              281 tools/io_uring/io_uring-bench.c 	struct io_sq_ring *ring = &s->sq_ring;
ring              309 tools/io_uring/io_uring-bench.c 		if (!sq_thread_poll || (*ring->flags & IORING_SQ_NEED_WAKEUP)) {
ring              314 tools/io_uring/io_uring-bench.c 			if ((*ring->flags & IORING_SQ_NEED_WAKEUP))
ring               34 tools/io_uring/io_uring-cp.c static int setup_context(unsigned entries, struct io_uring *ring)
ring               38 tools/io_uring/io_uring-cp.c 	ret = io_uring_queue_init(entries, ring, 0);
ring               69 tools/io_uring/io_uring-cp.c static void queue_prepped(struct io_uring *ring, struct io_data *data)
ring               73 tools/io_uring/io_uring-cp.c 	sqe = io_uring_get_sqe(ring);
ring               84 tools/io_uring/io_uring-cp.c static int queue_read(struct io_uring *ring, off_t size, off_t offset)
ring               93 tools/io_uring/io_uring-cp.c 	sqe = io_uring_get_sqe(ring);
ring              111 tools/io_uring/io_uring-cp.c static void queue_write(struct io_uring *ring, struct io_data *data)
ring              119 tools/io_uring/io_uring-cp.c 	queue_prepped(ring, data);
ring              120 tools/io_uring/io_uring-cp.c 	io_uring_submit(ring);
ring              123 tools/io_uring/io_uring-cp.c static int copy_file(struct io_uring *ring, off_t insize)
ring              151 tools/io_uring/io_uring-cp.c 			if (queue_read(ring, this_size, offset))
ring              160 tools/io_uring/io_uring-cp.c 			ret = io_uring_submit(ring);
ring              175 tools/io_uring/io_uring-cp.c 				ret = io_uring_wait_cqe(ring, &cqe);
ring              178 tools/io_uring/io_uring-cp.c 				ret = io_uring_peek_cqe(ring, &cqe);
ring              190 tools/io_uring/io_uring-cp.c 					queue_prepped(ring, data);
ring              191 tools/io_uring/io_uring-cp.c 					io_uring_cqe_seen(ring, cqe);
ring              202 tools/io_uring/io_uring-cp.c 				queue_prepped(ring, data);
ring              203 tools/io_uring/io_uring-cp.c 				io_uring_cqe_seen(ring, cqe);
ring              212 tools/io_uring/io_uring-cp.c 				queue_write(ring, data);
ring              220 tools/io_uring/io_uring-cp.c 			io_uring_cqe_seen(ring, cqe);
ring              229 tools/io_uring/io_uring-cp.c 	struct io_uring ring;
ring              249 tools/io_uring/io_uring-cp.c 	if (setup_context(QD, &ring))
ring              254 tools/io_uring/io_uring-cp.c 	ret = copy_file(&ring, insize);
ring              258 tools/io_uring/io_uring-cp.c 	io_uring_queue_exit(&ring);
ring               63 tools/io_uring/liburing.h extern int io_uring_queue_init(unsigned entries, struct io_uring *ring,
ring               66 tools/io_uring/liburing.h 	struct io_uring *ring);
ring               67 tools/io_uring/liburing.h extern void io_uring_queue_exit(struct io_uring *ring);
ring               68 tools/io_uring/liburing.h extern int io_uring_peek_cqe(struct io_uring *ring,
ring               70 tools/io_uring/liburing.h extern int io_uring_wait_cqe(struct io_uring *ring,
ring               72 tools/io_uring/liburing.h extern int io_uring_submit(struct io_uring *ring);
ring               73 tools/io_uring/liburing.h extern struct io_uring_sqe *io_uring_get_sqe(struct io_uring *ring);
ring               79 tools/io_uring/liburing.h static inline void io_uring_cqe_seen(struct io_uring *ring,
ring               83 tools/io_uring/liburing.h 		struct io_uring_cq *cq = &ring->cq;
ring               11 tools/io_uring/queue.c static int __io_uring_get_cqe(struct io_uring *ring,
ring               14 tools/io_uring/queue.c 	struct io_uring_cq *cq = &ring->cq;
ring               36 tools/io_uring/queue.c 		ret = io_uring_enter(ring->ring_fd, 0, 1,
ring               49 tools/io_uring/queue.c int io_uring_peek_cqe(struct io_uring *ring, struct io_uring_cqe **cqe_ptr)
ring               51 tools/io_uring/queue.c 	return __io_uring_get_cqe(ring, cqe_ptr, 0);
ring               58 tools/io_uring/queue.c int io_uring_wait_cqe(struct io_uring *ring, struct io_uring_cqe **cqe_ptr)
ring               60 tools/io_uring/queue.c 	return __io_uring_get_cqe(ring, cqe_ptr, 1);
ring               68 tools/io_uring/queue.c int io_uring_submit(struct io_uring *ring)
ring               70 tools/io_uring/queue.c 	struct io_uring_sq *sq = &ring->sq;
ring              126 tools/io_uring/queue.c 	ret = io_uring_enter(ring->ring_fd, submitted, 0,
ring              141 tools/io_uring/queue.c struct io_uring_sqe *io_uring_get_sqe(struct io_uring *ring)
ring              143 tools/io_uring/queue.c 	struct io_uring_sq *sq = &ring->sq;
ring               64 tools/io_uring/setup.c int io_uring_queue_mmap(int fd, struct io_uring_params *p, struct io_uring *ring)
ring               68 tools/io_uring/setup.c 	memset(ring, 0, sizeof(*ring));
ring               69 tools/io_uring/setup.c 	ret = io_uring_mmap(fd, p, &ring->sq, &ring->cq);
ring               71 tools/io_uring/setup.c 		ring->ring_fd = fd;
ring               79 tools/io_uring/setup.c int io_uring_queue_init(unsigned entries, struct io_uring *ring, unsigned flags)
ring               91 tools/io_uring/setup.c 	ret = io_uring_queue_mmap(fd, &p, ring);
ring               98 tools/io_uring/setup.c void io_uring_queue_exit(struct io_uring *ring)
ring              100 tools/io_uring/setup.c 	struct io_uring_sq *sq = &ring->sq;
ring              101 tools/io_uring/setup.c 	struct io_uring_cq *cq = &ring->cq;
ring              106 tools/io_uring/setup.c 	close(ring->ring_fd);
ring              279 tools/lib/bpf/xsk.c 	fill->ring = map + off.fr.desc;
ring              296 tools/lib/bpf/xsk.c 	comp->ring = map + off.cr.desc;
ring              646 tools/lib/bpf/xsk.c 		rx->ring = rx_map + off.rx.desc;
ring              665 tools/lib/bpf/xsk.c 		tx->ring = tx_map + off.tx.desc;
ring              721 tools/lib/bpf/xsk.c 		munmap(umem->fill->ring - off.fr.desc,
ring              723 tools/lib/bpf/xsk.c 		munmap(umem->comp->ring - off.cr.desc,
ring              750 tools/lib/bpf/xsk.c 			munmap(xsk->rx->ring - off.rx.desc,
ring              754 tools/lib/bpf/xsk.c 			munmap(xsk->tx->ring - off.tx.desc,
ring               34 tools/lib/bpf/xsk.h 	void *ring; \
ring               51 tools/lib/bpf/xsk.h 	__u64 *addrs = (__u64 *)fill->ring;
ring               59 tools/lib/bpf/xsk.h 	const __u64 *addrs = (const __u64 *)comp->ring;
ring               67 tools/lib/bpf/xsk.h 	struct xdp_desc *descs = (struct xdp_desc *)tx->ring;
ring               75 tools/lib/bpf/xsk.h 	const struct xdp_desc *descs = (const struct xdp_desc *)rx->ring;
ring              184 tools/testing/selftests/net/psock_fanout.c 	char *ring;
ring              198 tools/testing/selftests/net/psock_fanout.c 	ring = mmap(0, req.tp_block_size * req.tp_block_nr,
ring              200 tools/testing/selftests/net/psock_fanout.c 	if (ring == MAP_FAILED) {
ring              205 tools/testing/selftests/net/psock_fanout.c 	return ring;
ring              208 tools/testing/selftests/net/psock_fanout.c static int sock_fanout_read_ring(int fd, void *ring)
ring              210 tools/testing/selftests/net/psock_fanout.c 	struct tpacket2_hdr *header = ring;
ring              215 tools/testing/selftests/net/psock_fanout.c 		header = ring + (count * getpagesize());
ring               71 tools/testing/selftests/net/psock_tpacket.c 	void (*walk)(int sock, struct ring *ring);
ring              220 tools/testing/selftests/net/psock_tpacket.c static void walk_v1_v2_rx(int sock, struct ring *ring)
ring              227 tools/testing/selftests/net/psock_tpacket.c 	bug_on(ring->type != PACKET_RX_RING);
ring              239 tools/testing/selftests/net/psock_tpacket.c 		while (__v1_v2_rx_kernel_ready(ring->rd[frame_num].iov_base,
ring              240 tools/testing/selftests/net/psock_tpacket.c 					       ring->version)) {
ring              241 tools/testing/selftests/net/psock_tpacket.c 			ppd.raw = ring->rd[frame_num].iov_base;
ring              243 tools/testing/selftests/net/psock_tpacket.c 			switch (ring->version) {
ring              260 tools/testing/selftests/net/psock_tpacket.c 			__v1_v2_rx_user_ready(ppd.raw, ring->version);
ring              262 tools/testing/selftests/net/psock_tpacket.c 			frame_num = (frame_num + 1) % ring->rd_num;
ring              272 tools/testing/selftests/net/psock_tpacket.c 			ring->version, total_packets, NUM_PACKETS);
ring              354 tools/testing/selftests/net/psock_tpacket.c static inline void *get_next_frame(struct ring *ring, int n)
ring              356 tools/testing/selftests/net/psock_tpacket.c 	uint8_t *f0 = ring->rd[0].iov_base;
ring              358 tools/testing/selftests/net/psock_tpacket.c 	switch (ring->version) {
ring              361 tools/testing/selftests/net/psock_tpacket.c 		return ring->rd[n].iov_base;
ring              363 tools/testing/selftests/net/psock_tpacket.c 		return f0 + (n * ring->req3.tp_frame_size);
ring              369 tools/testing/selftests/net/psock_tpacket.c static void walk_tx(int sock, struct ring *ring)
ring              387 tools/testing/selftests/net/psock_tpacket.c 	if (ring->version <= TPACKET_V2)
ring              388 tools/testing/selftests/net/psock_tpacket.c 		nframes = ring->rd_num;
ring              390 tools/testing/selftests/net/psock_tpacket.c 		nframes = ring->req3.tp_frame_nr;
ring              392 tools/testing/selftests/net/psock_tpacket.c 	bug_on(ring->type != PACKET_TX_RING);
ring              419 tools/testing/selftests/net/psock_tpacket.c 		void *next = get_next_frame(ring, frame_num);
ring              421 tools/testing/selftests/net/psock_tpacket.c 		while (__tx_kernel_ready(next, ring->version) &&
ring              425 tools/testing/selftests/net/psock_tpacket.c 			switch (ring->version) {
ring              463 tools/testing/selftests/net/psock_tpacket.c 			__tx_user_ready(next, ring->version);
ring              493 tools/testing/selftests/net/psock_tpacket.c 			ring->version, total_packets, NUM_PACKETS);
ring              500 tools/testing/selftests/net/psock_tpacket.c static void walk_v1_v2(int sock, struct ring *ring)
ring              502 tools/testing/selftests/net/psock_tpacket.c 	if (ring->type == PACKET_RX_RING)
ring              503 tools/testing/selftests/net/psock_tpacket.c 		walk_v1_v2_rx(sock, ring);
ring              505 tools/testing/selftests/net/psock_tpacket.c 		walk_tx(sock, ring);
ring              581 tools/testing/selftests/net/psock_tpacket.c static void walk_v3_rx(int sock, struct ring *ring)
ring              588 tools/testing/selftests/net/psock_tpacket.c 	bug_on(ring->type != PACKET_RX_RING);
ring              600 tools/testing/selftests/net/psock_tpacket.c 		pbd = (struct block_desc *) ring->rd[block_num].iov_base;
ring              608 tools/testing/selftests/net/psock_tpacket.c 		block_num = (block_num + 1) % ring->rd_num;
ring              622 tools/testing/selftests/net/psock_tpacket.c static void walk_v3(int sock, struct ring *ring)
ring              624 tools/testing/selftests/net/psock_tpacket.c 	if (ring->type == PACKET_RX_RING)
ring              625 tools/testing/selftests/net/psock_tpacket.c 		walk_v3_rx(sock, ring);
ring              627 tools/testing/selftests/net/psock_tpacket.c 		walk_tx(sock, ring);
ring              630 tools/testing/selftests/net/psock_tpacket.c static void __v1_v2_fill(struct ring *ring, unsigned int blocks)
ring              632 tools/testing/selftests/net/psock_tpacket.c 	ring->req.tp_block_size = getpagesize() << 2;
ring              633 tools/testing/selftests/net/psock_tpacket.c 	ring->req.tp_frame_size = TPACKET_ALIGNMENT << 7;
ring              634 tools/testing/selftests/net/psock_tpacket.c 	ring->req.tp_block_nr = blocks;
ring              636 tools/testing/selftests/net/psock_tpacket.c 	ring->req.tp_frame_nr = ring->req.tp_block_size /
ring              637 tools/testing/selftests/net/psock_tpacket.c 				ring->req.tp_frame_size *
ring              638 tools/testing/selftests/net/psock_tpacket.c 				ring->req.tp_block_nr;
ring              640 tools/testing/selftests/net/psock_tpacket.c 	ring->mm_len = ring->req.tp_block_size * ring->req.tp_block_nr;
ring              641 tools/testing/selftests/net/psock_tpacket.c 	ring->walk = walk_v1_v2;
ring              642 tools/testing/selftests/net/psock_tpacket.c 	ring->rd_num = ring->req.tp_frame_nr;
ring              643 tools/testing/selftests/net/psock_tpacket.c 	ring->flen = ring->req.tp_frame_size;
ring              646 tools/testing/selftests/net/psock_tpacket.c static void __v3_fill(struct ring *ring, unsigned int blocks, int type)
ring              649 tools/testing/selftests/net/psock_tpacket.c 		ring->req3.tp_retire_blk_tov = 64;
ring              650 tools/testing/selftests/net/psock_tpacket.c 		ring->req3.tp_sizeof_priv = 0;
ring              651 tools/testing/selftests/net/psock_tpacket.c 		ring->req3.tp_feature_req_word = TP_FT_REQ_FILL_RXHASH;
ring              653 tools/testing/selftests/net/psock_tpacket.c 	ring->req3.tp_block_size = getpagesize() << 2;
ring              654 tools/testing/selftests/net/psock_tpacket.c 	ring->req3.tp_frame_size = TPACKET_ALIGNMENT << 7;
ring              655 tools/testing/selftests/net/psock_tpacket.c 	ring->req3.tp_block_nr = blocks;
ring              657 tools/testing/selftests/net/psock_tpacket.c 	ring->req3.tp_frame_nr = ring->req3.tp_block_size /
ring              658 tools/testing/selftests/net/psock_tpacket.c 				 ring->req3.tp_frame_size *
ring              659 tools/testing/selftests/net/psock_tpacket.c 				 ring->req3.tp_block_nr;
ring              661 tools/testing/selftests/net/psock_tpacket.c 	ring->mm_len = ring->req3.tp_block_size * ring->req3.tp_block_nr;
ring              662 tools/testing/selftests/net/psock_tpacket.c 	ring->walk = walk_v3;
ring              663 tools/testing/selftests/net/psock_tpacket.c 	ring->rd_num = ring->req3.tp_block_nr;
ring              664 tools/testing/selftests/net/psock_tpacket.c 	ring->flen = ring->req3.tp_block_size;
ring              667 tools/testing/selftests/net/psock_tpacket.c static void setup_ring(int sock, struct ring *ring, int version, int type)
ring              672 tools/testing/selftests/net/psock_tpacket.c 	ring->type = type;
ring              673 tools/testing/selftests/net/psock_tpacket.c 	ring->version = version;
ring              680 tools/testing/selftests/net/psock_tpacket.c 		__v1_v2_fill(ring, blocks);
ring              681 tools/testing/selftests/net/psock_tpacket.c 		ret = setsockopt(sock, SOL_PACKET, type, &ring->req,
ring              682 tools/testing/selftests/net/psock_tpacket.c 				 sizeof(ring->req));
ring              686 tools/testing/selftests/net/psock_tpacket.c 		__v3_fill(ring, blocks, type);
ring              687 tools/testing/selftests/net/psock_tpacket.c 		ret = setsockopt(sock, SOL_PACKET, type, &ring->req3,
ring              688 tools/testing/selftests/net/psock_tpacket.c 				 sizeof(ring->req3));
ring              697 tools/testing/selftests/net/psock_tpacket.c 	ring->rd_len = ring->rd_num * sizeof(*ring->rd);
ring              698 tools/testing/selftests/net/psock_tpacket.c 	ring->rd = malloc(ring->rd_len);
ring              699 tools/testing/selftests/net/psock_tpacket.c 	if (ring->rd == NULL) {
ring              708 tools/testing/selftests/net/psock_tpacket.c static void mmap_ring(int sock, struct ring *ring)
ring              712 tools/testing/selftests/net/psock_tpacket.c 	ring->mm_space = mmap(0, ring->mm_len, PROT_READ | PROT_WRITE,
ring              714 tools/testing/selftests/net/psock_tpacket.c 	if (ring->mm_space == MAP_FAILED) {
ring              719 tools/testing/selftests/net/psock_tpacket.c 	memset(ring->rd, 0, ring->rd_len);
ring              720 tools/testing/selftests/net/psock_tpacket.c 	for (i = 0; i < ring->rd_num; ++i) {
ring              721 tools/testing/selftests/net/psock_tpacket.c 		ring->rd[i].iov_base = ring->mm_space + (i * ring->flen);
ring              722 tools/testing/selftests/net/psock_tpacket.c 		ring->rd[i].iov_len = ring->flen;
ring              726 tools/testing/selftests/net/psock_tpacket.c static void bind_ring(int sock, struct ring *ring)
ring              732 tools/testing/selftests/net/psock_tpacket.c 	ring->ll.sll_family = PF_PACKET;
ring              733 tools/testing/selftests/net/psock_tpacket.c 	ring->ll.sll_protocol = htons(ETH_P_ALL);
ring              734 tools/testing/selftests/net/psock_tpacket.c 	ring->ll.sll_ifindex = if_nametoindex("lo");
ring              735 tools/testing/selftests/net/psock_tpacket.c 	ring->ll.sll_hatype = 0;
ring              736 tools/testing/selftests/net/psock_tpacket.c 	ring->ll.sll_pkttype = 0;
ring              737 tools/testing/selftests/net/psock_tpacket.c 	ring->ll.sll_halen = 0;
ring              739 tools/testing/selftests/net/psock_tpacket.c 	ret = bind(sock, (struct sockaddr *) &ring->ll, sizeof(ring->ll));
ring              746 tools/testing/selftests/net/psock_tpacket.c static void walk_ring(int sock, struct ring *ring)
ring              748 tools/testing/selftests/net/psock_tpacket.c 	ring->walk(sock, ring);
ring              751 tools/testing/selftests/net/psock_tpacket.c static void unmap_ring(int sock, struct ring *ring)
ring              753 tools/testing/selftests/net/psock_tpacket.c 	munmap(ring->mm_space, ring->mm_len);
ring              754 tools/testing/selftests/net/psock_tpacket.c 	free(ring->rd);
ring              805 tools/testing/selftests/net/psock_tpacket.c 	struct ring ring;
ring              820 tools/testing/selftests/net/psock_tpacket.c 	memset(&ring, 0, sizeof(ring));
ring              821 tools/testing/selftests/net/psock_tpacket.c 	setup_ring(sock, &ring, version, type);
ring              822 tools/testing/selftests/net/psock_tpacket.c 	mmap_ring(sock, &ring);
ring              823 tools/testing/selftests/net/psock_tpacket.c 	bind_ring(sock, &ring);
ring              824 tools/testing/selftests/net/psock_tpacket.c 	walk_ring(sock, &ring);
ring              825 tools/testing/selftests/net/psock_tpacket.c 	unmap_ring(sock, &ring);
ring               86 tools/testing/selftests/net/txring_overwrite.c static int setup_tx(char **ring)
ring              114 tools/testing/selftests/net/txring_overwrite.c 	*ring = mmap(0, req.tp_block_size * req.tp_block_nr,
ring              116 tools/testing/selftests/net/txring_overwrite.c 	if (*ring == MAP_FAILED)
ring              161 tools/testing/selftests/net/txring_overwrite.c 	char *ring;
ring              165 tools/testing/selftests/net/txring_overwrite.c 	fdt = setup_tx(&ring);
ring              167 tools/testing/selftests/net/txring_overwrite.c 	send_pkt(fdt, ring, payload_patterns[0]);
ring              168 tools/testing/selftests/net/txring_overwrite.c 	send_pkt(fdt, ring, payload_patterns[1]);
ring               56 tools/virtio/ringtest/ring.c struct desc *ring;
ring               82 tools/virtio/ringtest/ring.c 	ret = posix_memalign((void **)&ring, 0x1000, ring_size * sizeof *ring);
ring              101 tools/virtio/ringtest/ring.c 		ring[i] = desc;
ring              125 tools/virtio/ringtest/ring.c 	ring[head].addr = (unsigned long)(void*)buf;
ring              126 tools/virtio/ringtest/ring.c 	ring[head].len = len;
ring              133 tools/virtio/ringtest/ring.c 	index = ring[head].index;
ring              138 tools/virtio/ringtest/ring.c 	ring[head].flags = DESC_HW;
ring              149 tools/virtio/ringtest/ring.c 	if (ring[head].flags & DESC_HW)
ring              153 tools/virtio/ringtest/ring.c 	*lenp = ring[head].len;
ring              154 tools/virtio/ringtest/ring.c 	index = ring[head].index & (ring_size - 1);
ring              168 tools/virtio/ringtest/ring.c 	return (ring[head].flags & DESC_HW);
ring              223 tools/virtio/ringtest/ring.c 	return !(ring[head].flags & DESC_HW);
ring              230 tools/virtio/ringtest/ring.c 	if (!(ring[head].flags & DESC_HW))
ring              241 tools/virtio/ringtest/ring.c 	ring[head].len--;
ring              249 tools/virtio/ringtest/ring.c 	ring[head].flags = 0;
ring               22 tools/virtio/ringtest/virtio_ring_0_9.c struct vring ring;
ring               76 tools/virtio/ringtest/virtio_ring_0_9.c 	vring_init(&ring, ring_size, p, 0x1000);
ring               86 tools/virtio/ringtest/virtio_ring_0_9.c 		ring.desc[i].next = i + 1;
ring              117 tools/virtio/ringtest/virtio_ring_0_9.c 	desc = ring.desc;
ring              136 tools/virtio/ringtest/virtio_ring_0_9.c 	ring.avail->ring[avail & (ring_size - 1)] =
ring              143 tools/virtio/ringtest/virtio_ring_0_9.c 	ring.avail->ring[avail] = head;
ring              148 tools/virtio/ringtest/virtio_ring_0_9.c 	ring.avail->idx = guest.avail_idx;
ring              160 tools/virtio/ringtest/virtio_ring_0_9.c 	index = ring.used->ring[head].id;
ring              167 tools/virtio/ringtest/virtio_ring_0_9.c 	if (ring.used->idx == guest.last_used_idx)
ring              176 tools/virtio/ringtest/virtio_ring_0_9.c 	index = ring.used->ring[head].id;
ring              181 tools/virtio/ringtest/virtio_ring_0_9.c 	*lenp = ring.desc[index].len;
ring              183 tools/virtio/ringtest/virtio_ring_0_9.c 	*lenp = ring.used->ring[head].len;
ring              186 tools/virtio/ringtest/virtio_ring_0_9.c 	*bufp = (void*)(unsigned long)ring.desc[index].addr;
ring              189 tools/virtio/ringtest/virtio_ring_0_9.c 	ring.desc[index].next = guest.free_head;
ring              202 tools/virtio/ringtest/virtio_ring_0_9.c 	unsigned index = ring.used->ring[head].id;
ring              206 tools/virtio/ringtest/virtio_ring_0_9.c 	return ring.used->idx == last_used_idx;
ring              219 tools/virtio/ringtest/virtio_ring_0_9.c 	vring_used_event(&ring) = guest.last_used_idx;
ring              233 tools/virtio/ringtest/virtio_ring_0_9.c 	need = vring_need_event(vring_avail_event(&ring),
ring              252 tools/virtio/ringtest/virtio_ring_0_9.c 	vring_avail_event(&ring) = host.used_idx;
ring              262 tools/virtio/ringtest/virtio_ring_0_9.c 	unsigned index = ring.avail->ring[head & (ring_size - 1)];
ring              266 tools/virtio/ringtest/virtio_ring_0_9.c 	return head == ring.avail->idx;
ring              277 tools/virtio/ringtest/virtio_ring_0_9.c 	head = ring.avail->ring[used_idx & (ring_size - 1)];
ring              284 tools/virtio/ringtest/virtio_ring_0_9.c 	desc = &ring.desc[head & (ring_size - 1)];
ring              286 tools/virtio/ringtest/virtio_ring_0_9.c 	if (used_idx == ring.avail->idx)
ring              296 tools/virtio/ringtest/virtio_ring_0_9.c 	head = ring.avail->ring[used_idx];
ring              298 tools/virtio/ringtest/virtio_ring_0_9.c 	desc = &ring.desc[head];
ring              308 tools/virtio/ringtest/virtio_ring_0_9.c 	ring.used->ring[used_idx].id = head;
ring              309 tools/virtio/ringtest/virtio_ring_0_9.c 	ring.used->ring[used_idx].len = desc->len - 1;
ring              314 tools/virtio/ringtest/virtio_ring_0_9.c 	ring.used->idx = host.used_idx;
ring              326 tools/virtio/ringtest/virtio_ring_0_9.c 	need = vring_need_event(vring_used_event(&ring),
ring               29 tools/virtio/virtio_test.c 	void *ring;
ring               98 tools/virtio/virtio_test.c 	r = posix_memalign(&info->ring, 4096, vring_size(num, 4096));
ring              100 tools/virtio/virtio_test.c 	memset(info->ring, 0, vring_size(num, 4096));
ring              101 tools/virtio/virtio_test.c 	vring_init(&info->vring, num, info->ring, 4096);
ring              104 tools/virtio/virtio_test.c 				       true, false, info->ring,
ring              128 tools/virtio/vringh_test.c 	err = get_user(*head, &vrh->vring.avail->ring[i]);
ring               45 virt/kvm/coalesced_mmio.c 	struct kvm_coalesced_mmio_ring *ring;
ring               54 virt/kvm/coalesced_mmio.c 	ring = dev->kvm->coalesced_mmio_ring;
ring               55 virt/kvm/coalesced_mmio.c 	avail = (ring->first - last - 1) % KVM_COALESCED_MMIO_MAX;
ring               69 virt/kvm/coalesced_mmio.c 	struct kvm_coalesced_mmio_ring *ring = dev->kvm->coalesced_mmio_ring;
ring               77 virt/kvm/coalesced_mmio.c 	insert = READ_ONCE(ring->last);
ring               86 virt/kvm/coalesced_mmio.c 	ring->coalesced_mmio[insert].phys_addr = addr;
ring               87 virt/kvm/coalesced_mmio.c 	ring->coalesced_mmio[insert].len = len;
ring               88 virt/kvm/coalesced_mmio.c 	memcpy(ring->coalesced_mmio[insert].data, val, len);
ring               89 virt/kvm/coalesced_mmio.c 	ring->coalesced_mmio[insert].pio = dev->zone.pio;
ring               91 virt/kvm/coalesced_mmio.c 	ring->last = (insert + 1) % KVM_COALESCED_MMIO_MAX;