Home
last modified time | relevance | path

Searched refs:ring (Results 1 – 200 of 511) sorted by relevance

123

/linux-4.4.14/drivers/gpu/drm/amd/amdgpu/
Damdgpu_ring.c49 static int amdgpu_debugfs_ring_init(struct amdgpu_device *adev, struct amdgpu_ring *ring);
59 void amdgpu_ring_free_size(struct amdgpu_ring *ring) in amdgpu_ring_free_size() argument
61 uint32_t rptr = amdgpu_ring_get_rptr(ring); in amdgpu_ring_free_size()
64 ring->ring_free_dw = rptr + (ring->ring_size / 4); in amdgpu_ring_free_size()
65 ring->ring_free_dw -= ring->wptr; in amdgpu_ring_free_size()
66 ring->ring_free_dw &= ring->ptr_mask; in amdgpu_ring_free_size()
67 if (!ring->ring_free_dw) { in amdgpu_ring_free_size()
69 ring->ring_free_dw = ring->ring_size / 4; in amdgpu_ring_free_size()
83 int amdgpu_ring_alloc(struct amdgpu_ring *ring, unsigned ndw) in amdgpu_ring_alloc() argument
88 if (ndw > (ring->ring_size / 4)) in amdgpu_ring_alloc()
[all …]
Damdgpu_fence.c61 static void amdgpu_fence_write(struct amdgpu_ring *ring, u32 seq) in amdgpu_fence_write() argument
63 struct amdgpu_fence_driver *drv = &ring->fence_drv; in amdgpu_fence_write()
77 static u32 amdgpu_fence_read(struct amdgpu_ring *ring) in amdgpu_fence_read() argument
79 struct amdgpu_fence_driver *drv = &ring->fence_drv; in amdgpu_fence_read()
100 int amdgpu_fence_emit(struct amdgpu_ring *ring, void *owner, in amdgpu_fence_emit() argument
103 struct amdgpu_device *adev = ring->adev; in amdgpu_fence_emit()
110 (*fence)->seq = ++ring->fence_drv.sync_seq[ring->idx]; in amdgpu_fence_emit()
111 (*fence)->ring = ring; in amdgpu_fence_emit()
114 &ring->fence_drv.fence_queue.lock, in amdgpu_fence_emit()
115 adev->fence_context + ring->idx, in amdgpu_fence_emit()
[all …]
Duvd_v5_0.c47 static uint32_t uvd_v5_0_ring_get_rptr(struct amdgpu_ring *ring) in uvd_v5_0_ring_get_rptr() argument
49 struct amdgpu_device *adev = ring->adev; in uvd_v5_0_ring_get_rptr()
61 static uint32_t uvd_v5_0_ring_get_wptr(struct amdgpu_ring *ring) in uvd_v5_0_ring_get_wptr() argument
63 struct amdgpu_device *adev = ring->adev; in uvd_v5_0_ring_get_wptr()
75 static void uvd_v5_0_ring_set_wptr(struct amdgpu_ring *ring) in uvd_v5_0_ring_set_wptr() argument
77 struct amdgpu_device *adev = ring->adev; in uvd_v5_0_ring_set_wptr()
79 WREG32(mmUVD_RBC_RB_WPTR, ring->wptr); in uvd_v5_0_ring_set_wptr()
94 struct amdgpu_ring *ring; in uvd_v5_0_sw_init() local
111 ring = &adev->uvd.ring; in uvd_v5_0_sw_init()
112 sprintf(ring->name, "uvd"); in uvd_v5_0_sw_init()
[all …]
Duvd_v6_0.c47 static uint32_t uvd_v6_0_ring_get_rptr(struct amdgpu_ring *ring) in uvd_v6_0_ring_get_rptr() argument
49 struct amdgpu_device *adev = ring->adev; in uvd_v6_0_ring_get_rptr()
61 static uint32_t uvd_v6_0_ring_get_wptr(struct amdgpu_ring *ring) in uvd_v6_0_ring_get_wptr() argument
63 struct amdgpu_device *adev = ring->adev; in uvd_v6_0_ring_get_wptr()
75 static void uvd_v6_0_ring_set_wptr(struct amdgpu_ring *ring) in uvd_v6_0_ring_set_wptr() argument
77 struct amdgpu_device *adev = ring->adev; in uvd_v6_0_ring_set_wptr()
79 WREG32(mmUVD_RBC_RB_WPTR, ring->wptr); in uvd_v6_0_ring_set_wptr()
94 struct amdgpu_ring *ring; in uvd_v6_0_sw_init() local
111 ring = &adev->uvd.ring; in uvd_v6_0_sw_init()
112 sprintf(ring->name, "uvd"); in uvd_v6_0_sw_init()
[all …]
Damdgpu_ib.c58 int amdgpu_ib_get(struct amdgpu_ring *ring, struct amdgpu_vm *vm, in amdgpu_ib_get() argument
61 struct amdgpu_device *adev = ring->adev; in amdgpu_ib_get()
80 ib->ring = ring; in amdgpu_ib_get()
127 struct amdgpu_ring *ring; in amdgpu_ib_schedule() local
136 ring = ibs->ring; in amdgpu_ib_schedule()
140 if (!ring->ready) { in amdgpu_ib_schedule()
149 r = amdgpu_ring_lock(ring, (256 + AMDGPU_NUM_SYNCS * 8) * num_ibs); in amdgpu_ib_schedule()
157 r = amdgpu_vm_grab_id(ibs->vm, ibs->ring, &ibs->sync); in amdgpu_ib_schedule()
159 amdgpu_ring_unlock_undo(ring); in amdgpu_ib_schedule()
164 r = amdgpu_sync_rings(&ibs->sync, ring); in amdgpu_ib_schedule()
[all …]
Duvd_v4_2.c51 static uint32_t uvd_v4_2_ring_get_rptr(struct amdgpu_ring *ring) in uvd_v4_2_ring_get_rptr() argument
53 struct amdgpu_device *adev = ring->adev; in uvd_v4_2_ring_get_rptr()
65 static uint32_t uvd_v4_2_ring_get_wptr(struct amdgpu_ring *ring) in uvd_v4_2_ring_get_wptr() argument
67 struct amdgpu_device *adev = ring->adev; in uvd_v4_2_ring_get_wptr()
79 static void uvd_v4_2_ring_set_wptr(struct amdgpu_ring *ring) in uvd_v4_2_ring_set_wptr() argument
81 struct amdgpu_device *adev = ring->adev; in uvd_v4_2_ring_set_wptr()
83 WREG32(mmUVD_RBC_RB_WPTR, ring->wptr); in uvd_v4_2_ring_set_wptr()
98 struct amdgpu_ring *ring; in uvd_v4_2_sw_init() local
115 ring = &adev->uvd.ring; in uvd_v4_2_sw_init()
116 sprintf(ring->name, "uvd"); in uvd_v4_2_sw_init()
[all …]
Dsdma_v2_4.c182 static uint32_t sdma_v2_4_ring_get_rptr(struct amdgpu_ring *ring) in sdma_v2_4_ring_get_rptr() argument
187 rptr = ring->adev->wb.wb[ring->rptr_offs] >> 2; in sdma_v2_4_ring_get_rptr()
199 static uint32_t sdma_v2_4_ring_get_wptr(struct amdgpu_ring *ring) in sdma_v2_4_ring_get_wptr() argument
201 struct amdgpu_device *adev = ring->adev; in sdma_v2_4_ring_get_wptr()
202 int me = (ring == &ring->adev->sdma.instance[0].ring) ? 0 : 1; in sdma_v2_4_ring_get_wptr()
215 static void sdma_v2_4_ring_set_wptr(struct amdgpu_ring *ring) in sdma_v2_4_ring_set_wptr() argument
217 struct amdgpu_device *adev = ring->adev; in sdma_v2_4_ring_set_wptr()
218 int me = (ring == &ring->adev->sdma.instance[0].ring) ? 0 : 1; in sdma_v2_4_ring_set_wptr()
220 WREG32(mmSDMA0_GFX_RB_WPTR + sdma_offsets[me], ring->wptr << 2); in sdma_v2_4_ring_set_wptr()
223 static void sdma_v2_4_ring_insert_nop(struct amdgpu_ring *ring, uint32_t count) in sdma_v2_4_ring_insert_nop() argument
[all …]
Dsdma_v3_0.c278 static uint32_t sdma_v3_0_ring_get_rptr(struct amdgpu_ring *ring) in sdma_v3_0_ring_get_rptr() argument
283 rptr = ring->adev->wb.wb[ring->rptr_offs] >> 2; in sdma_v3_0_ring_get_rptr()
295 static uint32_t sdma_v3_0_ring_get_wptr(struct amdgpu_ring *ring) in sdma_v3_0_ring_get_wptr() argument
297 struct amdgpu_device *adev = ring->adev; in sdma_v3_0_ring_get_wptr()
300 if (ring->use_doorbell) { in sdma_v3_0_ring_get_wptr()
302 wptr = ring->adev->wb.wb[ring->wptr_offs] >> 2; in sdma_v3_0_ring_get_wptr()
304 int me = (ring == &ring->adev->sdma.instance[0].ring) ? 0 : 1; in sdma_v3_0_ring_get_wptr()
319 static void sdma_v3_0_ring_set_wptr(struct amdgpu_ring *ring) in sdma_v3_0_ring_set_wptr() argument
321 struct amdgpu_device *adev = ring->adev; in sdma_v3_0_ring_set_wptr()
323 if (ring->use_doorbell) { in sdma_v3_0_ring_set_wptr()
[all …]
Dcik_sdma.c152 static uint32_t cik_sdma_ring_get_rptr(struct amdgpu_ring *ring) in cik_sdma_ring_get_rptr() argument
156 rptr = ring->adev->wb.wb[ring->rptr_offs]; in cik_sdma_ring_get_rptr()
168 static uint32_t cik_sdma_ring_get_wptr(struct amdgpu_ring *ring) in cik_sdma_ring_get_wptr() argument
170 struct amdgpu_device *adev = ring->adev; in cik_sdma_ring_get_wptr()
171 u32 me = (ring == &adev->sdma.instance[0].ring) ? 0 : 1; in cik_sdma_ring_get_wptr()
183 static void cik_sdma_ring_set_wptr(struct amdgpu_ring *ring) in cik_sdma_ring_set_wptr() argument
185 struct amdgpu_device *adev = ring->adev; in cik_sdma_ring_set_wptr()
186 u32 me = (ring == &adev->sdma.instance[0].ring) ? 0 : 1; in cik_sdma_ring_set_wptr()
188 WREG32(mmSDMA0_GFX_RB_WPTR + sdma_offsets[me], (ring->wptr << 2) & 0x3fffc); in cik_sdma_ring_set_wptr()
191 static void cik_sdma_ring_insert_nop(struct amdgpu_ring *ring, uint32_t count) in cik_sdma_ring_insert_nop() argument
[all …]
Dvce_v2_0.c55 static uint32_t vce_v2_0_ring_get_rptr(struct amdgpu_ring *ring) in vce_v2_0_ring_get_rptr() argument
57 struct amdgpu_device *adev = ring->adev; in vce_v2_0_ring_get_rptr()
59 if (ring == &adev->vce.ring[0]) in vce_v2_0_ring_get_rptr()
72 static uint32_t vce_v2_0_ring_get_wptr(struct amdgpu_ring *ring) in vce_v2_0_ring_get_wptr() argument
74 struct amdgpu_device *adev = ring->adev; in vce_v2_0_ring_get_wptr()
76 if (ring == &adev->vce.ring[0]) in vce_v2_0_ring_get_wptr()
89 static void vce_v2_0_ring_set_wptr(struct amdgpu_ring *ring) in vce_v2_0_ring_set_wptr() argument
91 struct amdgpu_device *adev = ring->adev; in vce_v2_0_ring_set_wptr()
93 if (ring == &adev->vce.ring[0]) in vce_v2_0_ring_set_wptr()
94 WREG32(mmVCE_RB_WPTR, ring->wptr); in vce_v2_0_ring_set_wptr()
[all …]
Dvce_v3_0.c62 static uint32_t vce_v3_0_ring_get_rptr(struct amdgpu_ring *ring) in vce_v3_0_ring_get_rptr() argument
64 struct amdgpu_device *adev = ring->adev; in vce_v3_0_ring_get_rptr()
66 if (ring == &adev->vce.ring[0]) in vce_v3_0_ring_get_rptr()
79 static uint32_t vce_v3_0_ring_get_wptr(struct amdgpu_ring *ring) in vce_v3_0_ring_get_wptr() argument
81 struct amdgpu_device *adev = ring->adev; in vce_v3_0_ring_get_wptr()
83 if (ring == &adev->vce.ring[0]) in vce_v3_0_ring_get_wptr()
96 static void vce_v3_0_ring_set_wptr(struct amdgpu_ring *ring) in vce_v3_0_ring_set_wptr() argument
98 struct amdgpu_device *adev = ring->adev; in vce_v3_0_ring_set_wptr()
100 if (ring == &adev->vce.ring[0]) in vce_v3_0_ring_set_wptr()
101 WREG32(mmVCE_RB_WPTR, ring->wptr); in vce_v3_0_ring_set_wptr()
[all …]
Damdgpu_vce.c195 amdgpu_ring_fini(&adev->vce.ring[0]); in amdgpu_vce_sw_fini()
196 amdgpu_ring_fini(&adev->vce.ring[1]); in amdgpu_vce_sw_fini()
281 if ((amdgpu_fence_count_emitted(&adev->vce.ring[0]) == 0) && in amdgpu_vce_idle_work_handler()
282 (amdgpu_fence_count_emitted(&adev->vce.ring[1]) == 0)) { in amdgpu_vce_idle_work_handler()
332 struct amdgpu_ring *ring = &adev->vce.ring[0]; in amdgpu_vce_free_handles() local
341 r = amdgpu_vce_get_destroy_msg(ring, handle, NULL); in amdgpu_vce_free_handles()
368 int amdgpu_vce_get_create_msg(struct amdgpu_ring *ring, uint32_t handle, in amdgpu_vce_get_create_msg() argument
374 struct amdgpu_device *adev = ring->adev; in amdgpu_vce_get_create_msg()
381 r = amdgpu_ib_get(ring, NULL, ib_size_dw * 4, ib); in amdgpu_vce_get_create_msg()
396 if ((ring->adev->vce.fw_version >> 24) >= 52) in amdgpu_vce_get_create_msg()
[all …]
Damdgpu_trace.h34 __field(u32, ring)
41 __entry->ring = p->ibs[i].ring->idx;
44 p->ibs[i].ring);
47 __entry->bo_list, __entry->ring, __entry->dw,
68 __entry->ring_name = job->ibs[0].ring->name;
93 __entry->ring_name = job->ibs[0].ring->name;
103 TP_PROTO(unsigned vmid, int ring),
104 TP_ARGS(vmid, ring),
107 __field(u32, ring)
112 __entry->ring = ring;
[all …]
Dgfx_v8_0.c620 static int gfx_v8_0_ring_test_ring(struct amdgpu_ring *ring) in gfx_v8_0_ring_test_ring() argument
622 struct amdgpu_device *adev = ring->adev; in gfx_v8_0_ring_test_ring()
634 r = amdgpu_ring_lock(ring, 3); in gfx_v8_0_ring_test_ring()
637 ring->idx, r); in gfx_v8_0_ring_test_ring()
641 amdgpu_ring_write(ring, PACKET3(PACKET3_SET_UCONFIG_REG, 1)); in gfx_v8_0_ring_test_ring()
642 amdgpu_ring_write(ring, (scratch - PACKET3_SET_UCONFIG_REG_START)); in gfx_v8_0_ring_test_ring()
643 amdgpu_ring_write(ring, 0xDEADBEEF); in gfx_v8_0_ring_test_ring()
644 amdgpu_ring_unlock_commit(ring); in gfx_v8_0_ring_test_ring()
654 ring->idx, i); in gfx_v8_0_ring_test_ring()
657 ring->idx, scratch, tmp); in gfx_v8_0_ring_test_ring()
[all …]
Dgfx_v7_0.c2368 static int gfx_v7_0_ring_test_ring(struct amdgpu_ring *ring) in gfx_v7_0_ring_test_ring() argument
2370 struct amdgpu_device *adev = ring->adev; in gfx_v7_0_ring_test_ring()
2382 r = amdgpu_ring_lock(ring, 3); in gfx_v7_0_ring_test_ring()
2384 DRM_ERROR("amdgpu: cp failed to lock ring %d (%d).\n", ring->idx, r); in gfx_v7_0_ring_test_ring()
2388 amdgpu_ring_write(ring, PACKET3(PACKET3_SET_UCONFIG_REG, 1)); in gfx_v7_0_ring_test_ring()
2389 amdgpu_ring_write(ring, (scratch - PACKET3_SET_UCONFIG_REG_START)); in gfx_v7_0_ring_test_ring()
2390 amdgpu_ring_write(ring, 0xDEADBEEF); in gfx_v7_0_ring_test_ring()
2391 amdgpu_ring_unlock_commit(ring); in gfx_v7_0_ring_test_ring()
2400 DRM_INFO("ring test on %d succeeded in %d usecs\n", ring->idx, i); in gfx_v7_0_ring_test_ring()
2403 ring->idx, scratch, tmp); in gfx_v7_0_ring_test_ring()
[all …]
Damdgpu_semaphore.c58 bool amdgpu_semaphore_emit_signal(struct amdgpu_ring *ring, in amdgpu_semaphore_emit_signal() argument
61 trace_amdgpu_semaphore_signale(ring->idx, semaphore); in amdgpu_semaphore_emit_signal()
63 if (amdgpu_ring_emit_semaphore(ring, semaphore, false)) { in amdgpu_semaphore_emit_signal()
67 ring->last_semaphore_signal_addr = semaphore->gpu_addr; in amdgpu_semaphore_emit_signal()
73 bool amdgpu_semaphore_emit_wait(struct amdgpu_ring *ring, in amdgpu_semaphore_emit_wait() argument
76 trace_amdgpu_semaphore_wait(ring->idx, semaphore); in amdgpu_semaphore_emit_wait()
78 if (amdgpu_ring_emit_semaphore(ring, semaphore, true)) { in amdgpu_semaphore_emit_wait()
82 ring->last_semaphore_wait_addr = semaphore->gpu_addr; in amdgpu_semaphore_emit_wait()
Damdgpu_vce.h31 int amdgpu_vce_get_create_msg(struct amdgpu_ring *ring, uint32_t handle,
33 int amdgpu_vce_get_destroy_msg(struct amdgpu_ring *ring, uint32_t handle,
37 bool amdgpu_vce_ring_emit_semaphore(struct amdgpu_ring *ring,
40 void amdgpu_vce_ring_emit_ib(struct amdgpu_ring *ring, struct amdgpu_ib *ib);
41 void amdgpu_vce_ring_emit_fence(struct amdgpu_ring *ring, u64 addr, u64 seq,
43 int amdgpu_vce_ring_test_ring(struct amdgpu_ring *ring);
44 int amdgpu_vce_ring_test_ib(struct amdgpu_ring *ring);
Damdgpu_test.c33 struct amdgpu_ring *ring = adev->mman.buffer_funcs_ring; in amdgpu_do_test_moves() local
113 r = amdgpu_copy_buffer(ring, gtt_addr, vram_addr, in amdgpu_do_test_moves()
158 r = amdgpu_copy_buffer(ring, vram_addr, gtt_addr, in amdgpu_do_test_moves()
242 struct amdgpu_ring *ring, in amdgpu_test_create_and_emit_fence() argument
245 uint32_t handle = ring->idx ^ 0xdeafbeef; in amdgpu_test_create_and_emit_fence()
248 if (ring == &adev->uvd.ring) { in amdgpu_test_create_and_emit_fence()
249 r = amdgpu_uvd_get_create_msg(ring, handle, NULL); in amdgpu_test_create_and_emit_fence()
255 r = amdgpu_uvd_get_destroy_msg(ring, handle, fence); in amdgpu_test_create_and_emit_fence()
261 } else if (ring == &adev->vce.ring[0] || in amdgpu_test_create_and_emit_fence()
262 ring == &adev->vce.ring[1]) { in amdgpu_test_create_and_emit_fence()
[all …]
Damdgpu_ih.c63 (void **)&adev->irq.ih.ring); in amdgpu_ih_ring_alloc()
97 if (!adev->irq.ih.ring) { in amdgpu_ih_ring_init()
101 adev->irq.ih.ring = pci_alloc_consistent(adev->pdev, in amdgpu_ih_ring_init()
104 if (adev->irq.ih.ring == NULL) in amdgpu_ih_ring_init()
106 memset((void *)adev->irq.ih.ring, 0, adev->irq.ih.ring_size + 8); in amdgpu_ih_ring_init()
142 if (adev->irq.ih.ring) { in amdgpu_ih_ring_fini()
147 (void *)adev->irq.ih.ring, in amdgpu_ih_ring_fini()
149 adev->irq.ih.ring = NULL; in amdgpu_ih_ring_fini()
160 adev->irq.ih.ring = NULL; in amdgpu_ih_ring_fini()
201 (const void *) &adev->irq.ih.ring[ring_index]); in amdgpu_ih_process()
[all …]
Damdgpu_cs.c75 u32 ip_instance, u32 ring, in amdgpu_cs_get_ring() argument
89 if (ring < adev->gfx.num_gfx_rings) { in amdgpu_cs_get_ring()
90 *out_ring = &adev->gfx.gfx_ring[ring]; in amdgpu_cs_get_ring()
98 if (ring < adev->gfx.num_compute_rings) { in amdgpu_cs_get_ring()
99 *out_ring = &adev->gfx.compute_ring[ring]; in amdgpu_cs_get_ring()
107 if (ring < adev->sdma.num_instances) { in amdgpu_cs_get_ring()
108 *out_ring = &adev->sdma.instance[ring].ring; in amdgpu_cs_get_ring()
116 *out_ring = &adev->uvd.ring; in amdgpu_cs_get_ring()
119 if (ring < 2){ in amdgpu_cs_get_ring()
120 *out_ring = &adev->vce.ring[ring]; in amdgpu_cs_get_ring()
[all …]
Damdgpu_sync.c67 return a_fence->ring->adev == adev; in amdgpu_sync_same_dev()
70 struct amdgpu_ring *ring; in amdgpu_sync_same_dev() local
72 ring = container_of(s_fence->sched, struct amdgpu_ring, sched); in amdgpu_sync_same_dev()
73 return ring->adev == adev; in amdgpu_sync_same_dev()
120 if (!fence || fence->ring->adev != adev) { in amdgpu_sync_fence()
138 amdgpu_sync_keep_later(&sync->sync_to[fence->ring->idx], f); in amdgpu_sync_fence()
279 struct amdgpu_ring *ring) in amdgpu_sync_rings() argument
281 struct amdgpu_device *adev = ring->adev; in amdgpu_sync_rings()
297 !amdgpu_fence_need_sync(fence, ring)) in amdgpu_sync_rings()
342 if (!amdgpu_semaphore_emit_wait(ring, semaphore)) { in amdgpu_sync_rings()
[all …]
Damdgpu.h325 u32 (*get_rptr)(struct amdgpu_ring *ring);
326 u32 (*get_wptr)(struct amdgpu_ring *ring);
327 void (*set_wptr)(struct amdgpu_ring *ring);
331 void (*emit_ib)(struct amdgpu_ring *ring,
333 void (*emit_fence)(struct amdgpu_ring *ring, uint64_t addr,
335 bool (*emit_semaphore)(struct amdgpu_ring *ring,
338 void (*emit_vm_flush)(struct amdgpu_ring *ring, unsigned vm_id,
340 void (*emit_hdp_flush)(struct amdgpu_ring *ring);
341 void (*emit_gds_switch)(struct amdgpu_ring *ring, uint32_t vmid,
346 int (*test_ring)(struct amdgpu_ring *ring);
[all …]
Damdgpu_uvd.c225 amdgpu_ring_fini(&adev->uvd.ring); in amdgpu_uvd_sw_fini()
234 struct amdgpu_ring *ring = &adev->uvd.ring; in amdgpu_uvd_suspend() local
247 r = amdgpu_uvd_get_destroy_msg(ring, handle, &fence); in amdgpu_uvd_suspend()
293 struct amdgpu_ring *ring = &adev->uvd.ring; in amdgpu_uvd_free_handles() local
303 r = amdgpu_uvd_get_destroy_msg(ring, handle, &fence); in amdgpu_uvd_free_handles()
839 static int amdgpu_uvd_send_msg(struct amdgpu_ring *ring, in amdgpu_uvd_send_msg() argument
848 struct amdgpu_device *adev = ring->adev; in amdgpu_uvd_send_msg()
875 r = amdgpu_ib_get(ring, NULL, 64, ib); in amdgpu_uvd_send_msg()
890 r = amdgpu_sched_ib_submit_kernel_helper(adev, ring, ib, 1, in amdgpu_uvd_send_msg()
906 amdgpu_ib_free(ring->adev, ib); in amdgpu_uvd_send_msg()
[all …]
/linux-4.4.14/drivers/gpu/drm/i915/
Dintel_ringbuffer.c37 intel_ring_initialized(struct intel_engine_cs *ring) in intel_ring_initialized() argument
39 struct drm_device *dev = ring->dev; in intel_ring_initialized()
45 struct intel_context *dctx = ring->default_context; in intel_ring_initialized()
46 struct intel_ringbuffer *ringbuf = dctx->engine[ring->id].ringbuf; in intel_ring_initialized()
50 return ring->buffer && ring->buffer->obj; in intel_ring_initialized()
78 bool intel_ring_stopped(struct intel_engine_cs *ring) in intel_ring_stopped() argument
80 struct drm_i915_private *dev_priv = ring->dev->dev_private; in intel_ring_stopped()
81 return dev_priv->gpu_error.stop_rings & intel_ring_flag(ring); in intel_ring_stopped()
84 static void __intel_ring_advance(struct intel_engine_cs *ring) in __intel_ring_advance() argument
86 struct intel_ringbuffer *ringbuf = ring->buffer; in __intel_ring_advance()
[all …]
Dintel_lrc.c224 static void lrc_setup_hardware_status_page(struct intel_engine_cs *ring,
283 static bool disable_lite_restore_wa(struct intel_engine_cs *ring) in disable_lite_restore_wa() argument
285 struct drm_device *dev = ring->dev; in disable_lite_restore_wa()
289 (ring->id == VCS || ring->id == VCS2); in disable_lite_restore_wa()
293 struct intel_engine_cs *ring) in intel_lr_context_descriptor() argument
295 struct drm_i915_gem_object *ctx_obj = ctx->engine[ring->id].state; in intel_lr_context_descriptor()
316 if (disable_lite_restore_wa(ring)) in intel_lr_context_descriptor()
326 struct intel_engine_cs *ring = rq0->ring; in execlists_elsp_write() local
327 struct drm_device *dev = ring->dev; in execlists_elsp_write()
332 desc[1] = intel_lr_context_descriptor(rq1->ctx, rq1->ring); in execlists_elsp_write()
[all …]
Dintel_ringbuffer.h34 #define I915_READ_TAIL(ring) I915_READ(RING_TAIL((ring)->mmio_base)) argument
35 #define I915_WRITE_TAIL(ring, val) I915_WRITE(RING_TAIL((ring)->mmio_base), val) argument
37 #define I915_READ_START(ring) I915_READ(RING_START((ring)->mmio_base)) argument
38 #define I915_WRITE_START(ring, val) I915_WRITE(RING_START((ring)->mmio_base), val) argument
40 #define I915_READ_HEAD(ring) I915_READ(RING_HEAD((ring)->mmio_base)) argument
41 #define I915_WRITE_HEAD(ring, val) I915_WRITE(RING_HEAD((ring)->mmio_base), val) argument
43 #define I915_READ_CTL(ring) I915_READ(RING_CTL((ring)->mmio_base)) argument
44 #define I915_WRITE_CTL(ring, val) I915_WRITE(RING_CTL((ring)->mmio_base), val) argument
46 #define I915_READ_IMR(ring) I915_READ(RING_IMR((ring)->mmio_base)) argument
47 #define I915_WRITE_IMR(ring, val) I915_WRITE(RING_IMR((ring)->mmio_base), val) argument
[all …]
Di915_gem_context.c333 struct intel_engine_cs *ring = &dev_priv->ring[i]; in i915_gem_context_reset() local
334 struct intel_context *lctx = ring->last_context; in i915_gem_context_reset()
341 ring->last_context = NULL; in i915_gem_context_reset()
345 if (ring->default_context) in i915_gem_context_reset()
346 ring->default_context->legacy_hw_ctx.initialized = false; in i915_gem_context_reset()
358 if (WARN_ON(dev_priv->ring[RCS].default_context)) in i915_gem_context_init()
389 struct intel_engine_cs *ring = &dev_priv->ring[i]; in i915_gem_context_init() local
392 ring->default_context = ctx; in i915_gem_context_init()
404 struct intel_context *dctx = dev_priv->ring[RCS].default_context; in i915_gem_context_fini()
419 WARN_ON(!dev_priv->ring[RCS].last_context); in i915_gem_context_fini()
[all …]
Dintel_lrc.h32 #define RING_ELSP(ring) ((ring)->mmio_base+0x230) argument
33 #define RING_EXECLIST_STATUS_LO(ring) ((ring)->mmio_base+0x234) argument
34 #define RING_EXECLIST_STATUS_HI(ring) ((ring)->mmio_base+0x234 + 4) argument
35 #define RING_CONTEXT_CONTROL(ring) ((ring)->mmio_base+0x244) argument
39 #define RING_CONTEXT_STATUS_BUF_LO(ring, i) ((ring)->mmio_base+0x370 + (i) * 8) argument
40 #define RING_CONTEXT_STATUS_BUF_HI(ring, i) ((ring)->mmio_base+0x370 + (i) * 8 + 4) argument
41 #define RING_CONTEXT_STATUS_PTR(ring) ((ring)->mmio_base+0x3a0) argument
46 void intel_logical_ring_stop(struct intel_engine_cs *ring);
47 void intel_logical_ring_cleanup(struct intel_engine_cs *ring);
83 struct intel_engine_cs *ring);
[all …]
Di915_gpu_error.c33 static const char *ring_str(int ring) in ring_str() argument
35 switch (ring) { in ring_str()
210 err_puts(m, err->ring != -1 ? " " : ""); in print_error_buffers()
211 err_puts(m, ring_str(err->ring)); in print_error_buffers()
249 struct drm_i915_error_ring *ring = &error->ring[ring_idx]; in i915_ring_error_state() local
251 if (!ring->valid) in i915_ring_error_state()
255 err_printf(m, " START: 0x%08x\n", ring->start); in i915_ring_error_state()
256 err_printf(m, " HEAD: 0x%08x\n", ring->head); in i915_ring_error_state()
257 err_printf(m, " TAIL: 0x%08x\n", ring->tail); in i915_ring_error_state()
258 err_printf(m, " CTL: 0x%08x\n", ring->ctl); in i915_ring_error_state()
[all …]
Di915_cmd_parser.c553 static bool validate_cmds_sorted(struct intel_engine_cs *ring, in validate_cmds_sorted() argument
575 ring->id, i, j, curr, previous); in validate_cmds_sorted()
609 static bool validate_regs_sorted(struct intel_engine_cs *ring) in validate_regs_sorted() argument
611 return check_sorted(ring->id, ring->reg_table, ring->reg_count) && in validate_regs_sorted()
612 check_sorted(ring->id, ring->master_reg_table, in validate_regs_sorted()
613 ring->master_reg_count); in validate_regs_sorted()
637 static int init_hash_table(struct intel_engine_cs *ring, in init_hash_table() argument
643 hash_init(ring->cmd_hash); in init_hash_table()
658 hash_add(ring->cmd_hash, &desc_node->node, in init_hash_table()
666 static void fini_hash_table(struct intel_engine_cs *ring) in fini_hash_table() argument
[all …]
Di915_gem_debug.c39 struct intel_engine_cs *ring; in i915_verify_lists() local
46 for_each_ring(ring, dev_priv, i) { in i915_verify_lists()
47 list_for_each_entry(obj, &ring->active_list, ring_list[ring->id]) { in i915_verify_lists()
51 ring->name, obj); in i915_verify_lists()
55 obj->last_read_req[ring->id] == NULL) { in i915_verify_lists()
57 ring->name, obj); in i915_verify_lists()
61 ring->name, in i915_verify_lists()
Di915_gem_execbuffer.c580 struct intel_engine_cs *ring, in i915_gem_execbuffer_reserve_vma() argument
689 i915_gem_execbuffer_reserve(struct intel_engine_cs *ring, in i915_gem_execbuffer_reserve() argument
698 bool has_fenced_gpu_access = INTEL_INFO(ring->dev)->gen < 4; in i915_gem_execbuffer_reserve()
701 i915_gem_retire_requests_ring(ring); in i915_gem_execbuffer_reserve()
759 ret = i915_gem_execbuffer_reserve_vma(vma, ring, need_relocs); in i915_gem_execbuffer_reserve()
769 ret = i915_gem_execbuffer_reserve_vma(vma, ring, need_relocs); in i915_gem_execbuffer_reserve()
792 struct intel_engine_cs *ring, in i915_gem_execbuffer_relocate_slow() argument
881 ret = i915_gem_execbuffer_reserve(ring, &eb->vmas, ctx, &need_relocs); in i915_gem_execbuffer_relocate_slow()
909 const unsigned other_rings = ~intel_ring_flag(req->ring); in i915_gem_execbuffer_move_to_gpu()
919 ret = i915_gem_object_sync(obj, req->ring, &req); in i915_gem_execbuffer_move_to_gpu()
[all …]
Di915_trace.h478 __entry->sync_to = to_req->ring->id;
494 __field(u32, ring)
500 struct intel_engine_cs *ring =
502 __entry->dev = ring->dev->primary->index;
503 __entry->ring = ring->id;
506 i915_trace_irq_get(ring, req);
510 __entry->dev, __entry->ring, __entry->seqno, __entry->flags)
519 __field(u32, ring)
525 __entry->dev = req->ring->dev->primary->index;
526 __entry->ring = req->ring->id;
[all …]
Di915_gem.c48 i915_gem_object_retire__read(struct drm_i915_gem_object *obj, int ring);
1144 struct intel_engine_cs *ring) in missed_irq() argument
1146 return test_bit(ring->id, &dev_priv->gpu_error.missed_irq_rings); in missed_irq()
1196 if (req->ring->irq_refcount) in __i915_spin_request()
1246 struct intel_engine_cs *ring = i915_gem_request_get_ring(req); in __i915_wait_request() local
1247 struct drm_device *dev = ring->dev; in __i915_wait_request()
1250 ACCESS_ONCE(dev_priv->gpu_error.test_irq_rings) & intel_ring_flag(ring); in __i915_wait_request()
1288 if (!irq_test_in_progress && WARN_ON(!ring->irq_get(ring))) { in __i915_wait_request()
1296 prepare_to_wait(&ring->irq_queue, &wait, state); in __i915_wait_request()
1325 if (timeout || missed_irq(dev_priv, ring)) { in __i915_wait_request()
[all …]
Di915_irq.c979 static void notify_ring(struct intel_engine_cs *ring) in notify_ring() argument
981 if (!intel_ring_initialized(ring)) in notify_ring()
984 trace_i915_gem_request_notify(ring); in notify_ring()
986 wake_up_all(&ring->irq_queue); in notify_ring()
1064 struct intel_engine_cs *ring; in any_waiters() local
1067 for_each_ring(ring, dev_priv, i) in any_waiters()
1068 if (ring->irq_refcount) in any_waiters()
1266 notify_ring(&dev_priv->ring[RCS]); in ilk_gt_irq_handler()
1268 notify_ring(&dev_priv->ring[VCS]); in ilk_gt_irq_handler()
1278 notify_ring(&dev_priv->ring[RCS]); in snb_gt_irq_handler()
[all …]
Dintel_overlay.c236 struct intel_engine_cs *ring = &dev_priv->ring[RCS]; in intel_overlay_on() local
243 ret = i915_gem_request_alloc(ring, ring->default_context, &req); in intel_overlay_on()
255 intel_ring_emit(ring, MI_OVERLAY_FLIP | MI_OVERLAY_ON); in intel_overlay_on()
256 intel_ring_emit(ring, overlay->flip_addr | OFC_UPDATE); in intel_overlay_on()
257 intel_ring_emit(ring, MI_WAIT_FOR_EVENT | MI_WAIT_FOR_OVERLAY_FLIP); in intel_overlay_on()
258 intel_ring_emit(ring, MI_NOOP); in intel_overlay_on()
259 intel_ring_advance(ring); in intel_overlay_on()
270 struct intel_engine_cs *ring = &dev_priv->ring[RCS]; in intel_overlay_continue() local
286 ret = i915_gem_request_alloc(ring, ring->default_context, &req); in intel_overlay_continue()
296 intel_ring_emit(ring, MI_OVERLAY_FLIP | MI_OVERLAY_CONTINUE); in intel_overlay_continue()
[all …]
Di915_gem_render_state.c172 int i915_gem_render_state_prepare(struct intel_engine_cs *ring, in i915_gem_render_state_prepare() argument
177 if (WARN_ON(ring->id != RCS)) in i915_gem_render_state_prepare()
180 ret = render_state_init(so, ring->dev); in i915_gem_render_state_prepare()
201 ret = i915_gem_render_state_prepare(req->ring, &so); in i915_gem_render_state_init()
208 ret = req->ring->dispatch_execbuffer(req, so.ggtt_offset, in i915_gem_render_state_init()
215 ret = req->ring->dispatch_execbuffer(req, in i915_gem_render_state_init()
/linux-4.4.14/drivers/thunderbolt/
Dnhi.c22 #define RING_TYPE(ring) ((ring)->is_tx ? "TX ring" : "RX ring") argument
25 static int ring_interrupt_index(struct tb_ring *ring) in ring_interrupt_index() argument
27 int bit = ring->hop; in ring_interrupt_index()
28 if (!ring->is_tx) in ring_interrupt_index()
29 bit += ring->nhi->hop_count; in ring_interrupt_index()
38 static void ring_interrupt_active(struct tb_ring *ring, bool active) in ring_interrupt_active() argument
40 int reg = REG_RING_INTERRUPT_BASE + ring_interrupt_index(ring) / 32; in ring_interrupt_active()
41 int bit = ring_interrupt_index(ring) & 31; in ring_interrupt_active()
44 old = ioread32(ring->nhi->iobase + reg); in ring_interrupt_active()
50 dev_info(&ring->nhi->pdev->dev, in ring_interrupt_active()
[all …]
Dnhi.h69 void ring_start(struct tb_ring *ring);
70 void ring_stop(struct tb_ring *ring);
71 void ring_free(struct tb_ring *ring);
73 int __ring_enqueue(struct tb_ring *ring, struct ring_frame *frame);
89 static inline int ring_rx(struct tb_ring *ring, struct ring_frame *frame) in ring_rx() argument
91 WARN_ON(ring->is_tx); in ring_rx()
92 return __ring_enqueue(ring, frame); in ring_rx()
108 static inline int ring_tx(struct tb_ring *ring, struct ring_frame *frame) in ring_tx() argument
110 WARN_ON(!ring->is_tx); in ring_tx()
111 return __ring_enqueue(ring, frame); in ring_tx()
/linux-4.4.14/drivers/net/wireless/b43legacy/
Ddma.c45 struct b43legacy_dmadesc32 *op32_idx2desc(struct b43legacy_dmaring *ring, in op32_idx2desc() argument
51 *meta = &(ring->meta[slot]); in op32_idx2desc()
52 desc = ring->descbase; in op32_idx2desc()
58 static void op32_fill_descriptor(struct b43legacy_dmaring *ring, in op32_fill_descriptor() argument
63 struct b43legacy_dmadesc32 *descbase = ring->descbase; in op32_fill_descriptor()
70 B43legacy_WARN_ON(!(slot >= 0 && slot < ring->nr_slots)); in op32_fill_descriptor()
75 addr |= ring->dev->dma.translation; in op32_fill_descriptor()
76 ctl = (bufsize - ring->frameoffset) in op32_fill_descriptor()
78 if (slot == ring->nr_slots - 1) in op32_fill_descriptor()
93 static void op32_poke_tx(struct b43legacy_dmaring *ring, int slot) in op32_poke_tx() argument
[all …]
Ddma.h167 u32 b43legacy_dma_read(struct b43legacy_dmaring *ring, in b43legacy_dma_read() argument
170 return b43legacy_read32(ring->dev, ring->mmio_base + offset); in b43legacy_dma_read()
174 void b43legacy_dma_write(struct b43legacy_dmaring *ring, in b43legacy_dma_write() argument
177 b43legacy_write32(ring->dev, ring->mmio_base + offset, value); in b43legacy_dma_write()
192 void b43legacy_dma_rx(struct b43legacy_dmaring *ring);
218 void b43legacy_dma_rx(struct b43legacy_dmaring *ring) in b43legacy_dma_rx() argument
/linux-4.4.14/drivers/gpu/drm/radeon/
Dradeon_ring.c45 static int radeon_debugfs_ring_init(struct radeon_device *rdev, struct radeon_ring *ring);
58 struct radeon_ring *ring) in radeon_ring_supports_scratch_reg() argument
60 switch (ring->idx) { in radeon_ring_supports_scratch_reg()
78 void radeon_ring_free_size(struct radeon_device *rdev, struct radeon_ring *ring) in radeon_ring_free_size() argument
80 uint32_t rptr = radeon_ring_get_rptr(rdev, ring); in radeon_ring_free_size()
83 ring->ring_free_dw = rptr + (ring->ring_size / 4); in radeon_ring_free_size()
84 ring->ring_free_dw -= ring->wptr; in radeon_ring_free_size()
85 ring->ring_free_dw &= ring->ptr_mask; in radeon_ring_free_size()
86 if (!ring->ring_free_dw) { in radeon_ring_free_size()
88 ring->ring_free_dw = ring->ring_size / 4; in radeon_ring_free_size()
[all …]
Devergreen_dma.c44 struct radeon_ring *ring = &rdev->ring[fence->ring]; in evergreen_dma_fence_ring_emit() local
45 u64 addr = rdev->fence_drv[fence->ring].gpu_addr; in evergreen_dma_fence_ring_emit()
47 radeon_ring_write(ring, DMA_PACKET(DMA_PACKET_FENCE, 0, 0)); in evergreen_dma_fence_ring_emit()
48 radeon_ring_write(ring, addr & 0xfffffffc); in evergreen_dma_fence_ring_emit()
49 radeon_ring_write(ring, (upper_32_bits(addr) & 0xff)); in evergreen_dma_fence_ring_emit()
50 radeon_ring_write(ring, fence->seq); in evergreen_dma_fence_ring_emit()
52 radeon_ring_write(ring, DMA_PACKET(DMA_PACKET_TRAP, 0, 0)); in evergreen_dma_fence_ring_emit()
54 radeon_ring_write(ring, DMA_PACKET(DMA_PACKET_SRBM_WRITE, 0, 0)); in evergreen_dma_fence_ring_emit()
55 radeon_ring_write(ring, (0xf << 16) | (HDP_MEM_COHERENCY_FLUSH_CNTL >> 2)); in evergreen_dma_fence_ring_emit()
56 radeon_ring_write(ring, 1); in evergreen_dma_fence_ring_emit()
[all …]
Dradeon_fence.c62 static void radeon_fence_write(struct radeon_device *rdev, u32 seq, int ring) in radeon_fence_write() argument
64 struct radeon_fence_driver *drv = &rdev->fence_drv[ring]; in radeon_fence_write()
83 static u32 radeon_fence_read(struct radeon_device *rdev, int ring) in radeon_fence_read() argument
85 struct radeon_fence_driver *drv = &rdev->fence_drv[ring]; in radeon_fence_read()
108 static void radeon_fence_schedule_check(struct radeon_device *rdev, int ring) in radeon_fence_schedule_check() argument
115 &rdev->fence_drv[ring].lockup_work, in radeon_fence_schedule_check()
131 int ring) in radeon_fence_emit() argument
133 u64 seq = ++rdev->fence_drv[ring].sync_seq[ring]; in radeon_fence_emit()
142 (*fence)->ring = ring; in radeon_fence_emit()
145 &rdev->fence_queue.lock, rdev->fence_context + ring, seq); in radeon_fence_emit()
[all …]
Dr600_dma.c52 struct radeon_ring *ring) in r600_dma_get_rptr() argument
57 rptr = rdev->wb.wb[ring->rptr_offs/4]; in r600_dma_get_rptr()
73 struct radeon_ring *ring) in r600_dma_get_wptr() argument
87 struct radeon_ring *ring) in r600_dma_set_wptr() argument
89 WREG32(DMA_RB_WPTR, (ring->wptr << 2) & 0x3fffc); in r600_dma_set_wptr()
109 rdev->ring[R600_RING_TYPE_DMA_INDEX].ready = false; in r600_dma_stop()
122 struct radeon_ring *ring = &rdev->ring[R600_RING_TYPE_DMA_INDEX]; in r600_dma_resume() local
131 rb_bufsz = order_base_2(ring->ring_size / 4); in r600_dma_resume()
151 WREG32(DMA_RB_BASE, ring->gpu_addr >> 8); in r600_dma_resume()
167 ring->wptr = 0; in r600_dma_resume()
[all …]
Duvd_v1_0.c40 struct radeon_ring *ring) in uvd_v1_0_get_rptr() argument
54 struct radeon_ring *ring) in uvd_v1_0_get_wptr() argument
68 struct radeon_ring *ring) in uvd_v1_0_set_wptr() argument
70 WREG32(UVD_RBC_RB_WPTR, ring->wptr); in uvd_v1_0_set_wptr()
84 struct radeon_ring *ring = &rdev->ring[fence->ring]; in uvd_v1_0_fence_emit() local
85 uint64_t addr = rdev->fence_drv[fence->ring].gpu_addr; in uvd_v1_0_fence_emit()
87 radeon_ring_write(ring, PACKET0(UVD_GPCOM_VCPU_DATA0, 0)); in uvd_v1_0_fence_emit()
88 radeon_ring_write(ring, addr & 0xffffffff); in uvd_v1_0_fence_emit()
89 radeon_ring_write(ring, PACKET0(UVD_GPCOM_VCPU_DATA1, 0)); in uvd_v1_0_fence_emit()
90 radeon_ring_write(ring, fence->seq); in uvd_v1_0_fence_emit()
[all …]
Dcik_sdma.c64 struct radeon_ring *ring) in cik_sdma_get_rptr() argument
69 rptr = rdev->wb.wb[ring->rptr_offs/4]; in cik_sdma_get_rptr()
71 if (ring->idx == R600_RING_TYPE_DMA_INDEX) in cik_sdma_get_rptr()
91 struct radeon_ring *ring) in cik_sdma_get_wptr() argument
95 if (ring->idx == R600_RING_TYPE_DMA_INDEX) in cik_sdma_get_wptr()
112 struct radeon_ring *ring) in cik_sdma_set_wptr() argument
116 if (ring->idx == R600_RING_TYPE_DMA_INDEX) in cik_sdma_set_wptr()
121 WREG32(reg, (ring->wptr << 2) & 0x3fffc); in cik_sdma_set_wptr()
136 struct radeon_ring *ring = &rdev->ring[ib->ring]; in cik_sdma_ring_ib_execute() local
137 u32 extra_bits = (ib->vm ? ib->vm->ids[ib->ring].id : 0) & 0xf; in cik_sdma_ring_ib_execute()
[all …]
Dni_dma.c54 struct radeon_ring *ring) in cayman_dma_get_rptr() argument
59 rptr = rdev->wb.wb[ring->rptr_offs/4]; in cayman_dma_get_rptr()
61 if (ring->idx == R600_RING_TYPE_DMA_INDEX) in cayman_dma_get_rptr()
81 struct radeon_ring *ring) in cayman_dma_get_wptr() argument
85 if (ring->idx == R600_RING_TYPE_DMA_INDEX) in cayman_dma_get_wptr()
102 struct radeon_ring *ring) in cayman_dma_set_wptr() argument
106 if (ring->idx == R600_RING_TYPE_DMA_INDEX) in cayman_dma_set_wptr()
111 WREG32(reg, (ring->wptr << 2) & 0x3fffc); in cayman_dma_set_wptr()
125 struct radeon_ring *ring = &rdev->ring[ib->ring]; in cayman_dma_ring_ib_execute() local
126 unsigned vm_id = ib->vm ? ib->vm->ids[ib->ring].id : 0; in cayman_dma_ring_ib_execute()
[all …]
Dradeon_trace.h33 __field(u32, ring)
39 __entry->ring = p->ring;
42 p->rdev, p->ring);
45 __entry->ring, __entry->dw,
50 TP_PROTO(unsigned vmid, int ring),
51 TP_ARGS(vmid, ring),
54 __field(u32, ring)
59 __entry->ring = ring;
61 TP_printk("vmid=%u, ring=%u", __entry->vmid, __entry->ring)
107 TP_PROTO(uint64_t pd_addr, unsigned ring, unsigned id),
[all …]
Duvd_v2_2.c42 struct radeon_ring *ring = &rdev->ring[fence->ring]; in uvd_v2_2_fence_emit() local
43 uint64_t addr = rdev->fence_drv[fence->ring].gpu_addr; in uvd_v2_2_fence_emit()
45 radeon_ring_write(ring, PACKET0(UVD_CONTEXT_ID, 0)); in uvd_v2_2_fence_emit()
46 radeon_ring_write(ring, fence->seq); in uvd_v2_2_fence_emit()
47 radeon_ring_write(ring, PACKET0(UVD_GPCOM_VCPU_DATA0, 0)); in uvd_v2_2_fence_emit()
48 radeon_ring_write(ring, lower_32_bits(addr)); in uvd_v2_2_fence_emit()
49 radeon_ring_write(ring, PACKET0(UVD_GPCOM_VCPU_DATA1, 0)); in uvd_v2_2_fence_emit()
50 radeon_ring_write(ring, upper_32_bits(addr) & 0xff); in uvd_v2_2_fence_emit()
51 radeon_ring_write(ring, PACKET0(UVD_GPCOM_VCPU_CMD, 0)); in uvd_v2_2_fence_emit()
52 radeon_ring_write(ring, 0); in uvd_v2_2_fence_emit()
[all …]
Dsi_dma.c41 bool si_dma_is_lockup(struct radeon_device *rdev, struct radeon_ring *ring) in si_dma_is_lockup() argument
46 if (ring->idx == R600_RING_TYPE_DMA_INDEX) in si_dma_is_lockup()
52 radeon_ring_lockup_update(rdev, ring); in si_dma_is_lockup()
55 return radeon_ring_test_lockup(rdev, ring); in si_dma_is_lockup()
187 void si_dma_vm_flush(struct radeon_device *rdev, struct radeon_ring *ring, in si_dma_vm_flush() argument
191 radeon_ring_write(ring, DMA_PACKET(DMA_PACKET_SRBM_WRITE, 0, 0, 0, 0)); in si_dma_vm_flush()
193 radeon_ring_write(ring, (0xf << 16) | ((VM_CONTEXT0_PAGE_TABLE_BASE_ADDR + (vm_id << 2)) >> 2)); in si_dma_vm_flush()
195 …radeon_ring_write(ring, (0xf << 16) | ((VM_CONTEXT8_PAGE_TABLE_BASE_ADDR + ((vm_id - 8) << 2)) >> … in si_dma_vm_flush()
197 radeon_ring_write(ring, pd_addr >> 12); in si_dma_vm_flush()
200 radeon_ring_write(ring, DMA_PACKET(DMA_PACKET_SRBM_WRITE, 0, 0, 0, 0)); in si_dma_vm_flush()
[all …]
Dni.c1397 int ring, u32 cp_int_cntl) in cayman_cp_int_cntl_setup() argument
1401 WREG32(SRBM_GFX_CNTL, srbm_gfx_cntl | (ring & 3)); in cayman_cp_int_cntl_setup()
1411 struct radeon_ring *ring = &rdev->ring[fence->ring]; in cayman_fence_ring_emit() local
1412 u64 addr = rdev->fence_drv[fence->ring].gpu_addr; in cayman_fence_ring_emit()
1417 radeon_ring_write(ring, PACKET3(PACKET3_SURFACE_SYNC, 3)); in cayman_fence_ring_emit()
1418 radeon_ring_write(ring, PACKET3_ENGINE_ME | cp_coher_cntl); in cayman_fence_ring_emit()
1419 radeon_ring_write(ring, 0xFFFFFFFF); in cayman_fence_ring_emit()
1420 radeon_ring_write(ring, 0); in cayman_fence_ring_emit()
1421 radeon_ring_write(ring, 10); /* poll interval */ in cayman_fence_ring_emit()
1423 radeon_ring_write(ring, PACKET3(PACKET3_EVENT_WRITE_EOP, 4)); in cayman_fence_ring_emit()
[all …]
Dvce_v1_0.c60 struct radeon_ring *ring) in vce_v1_0_get_rptr() argument
62 if (ring->idx == TN_RING_TYPE_VCE1_INDEX) in vce_v1_0_get_rptr()
77 struct radeon_ring *ring) in vce_v1_0_get_wptr() argument
79 if (ring->idx == TN_RING_TYPE_VCE1_INDEX) in vce_v1_0_get_wptr()
94 struct radeon_ring *ring) in vce_v1_0_set_wptr() argument
96 if (ring->idx == TN_RING_TYPE_VCE1_INDEX) in vce_v1_0_set_wptr()
97 WREG32(VCE_RB_WPTR, ring->wptr); in vce_v1_0_set_wptr()
99 WREG32(VCE_RB_WPTR2, ring->wptr); in vce_v1_0_set_wptr()
291 struct radeon_ring *ring; in vce_v1_0_start() local
297 ring = &rdev->ring[TN_RING_TYPE_VCE1_INDEX]; in vce_v1_0_start()
[all …]
Dradeon_ib.c55 int radeon_ib_get(struct radeon_device *rdev, int ring, in radeon_ib_get() argument
69 ib->ring = ring; in radeon_ib_get()
125 struct radeon_ring *ring = &rdev->ring[ib->ring]; in radeon_ib_schedule() local
128 if (!ib->length_dw || !ring->ready) { in radeon_ib_schedule()
135 r = radeon_ring_lock(rdev, ring, 64 + RADEON_NUM_SYNCS * 8); in radeon_ib_schedule()
144 vm_id_fence = radeon_vm_grab_id(rdev, ib->vm, ib->ring); in radeon_ib_schedule()
149 r = radeon_sync_rings(rdev, &ib->sync, ib->ring); in radeon_ib_schedule()
152 radeon_ring_unlock_undo(rdev, ring); in radeon_ib_schedule()
157 radeon_vm_flush(rdev, ib->vm, ib->ring, in radeon_ib_schedule()
161 radeon_ring_ib_execute(rdev, const_ib->ring, const_ib); in radeon_ib_schedule()
[all …]
Drv770_dma.c50 struct radeon_ring *ring = &rdev->ring[ring_index]; in rv770_copy_dma() local
59 r = radeon_ring_lock(rdev, ring, num_loops * 5 + 8); in rv770_copy_dma()
67 radeon_sync_rings(rdev, &sync, ring->idx); in rv770_copy_dma()
74 radeon_ring_write(ring, DMA_PACKET(DMA_PACKET_COPY, 0, 0, cur_size_in_dw)); in rv770_copy_dma()
75 radeon_ring_write(ring, dst_offset & 0xfffffffc); in rv770_copy_dma()
76 radeon_ring_write(ring, src_offset & 0xfffffffc); in rv770_copy_dma()
77 radeon_ring_write(ring, upper_32_bits(dst_offset) & 0xff); in rv770_copy_dma()
78 radeon_ring_write(ring, upper_32_bits(src_offset) & 0xff); in rv770_copy_dma()
83 r = radeon_fence_emit(rdev, &fence, ring->idx); in rv770_copy_dma()
85 radeon_ring_unlock_undo(rdev, ring); in rv770_copy_dma()
[all …]
Duvd_v3_1.c41 struct radeon_ring *ring, in uvd_v3_1_semaphore_emit() argument
47 radeon_ring_write(ring, PACKET0(UVD_SEMA_ADDR_LOW, 0)); in uvd_v3_1_semaphore_emit()
48 radeon_ring_write(ring, (addr >> 3) & 0x000FFFFF); in uvd_v3_1_semaphore_emit()
50 radeon_ring_write(ring, PACKET0(UVD_SEMA_ADDR_HIGH, 0)); in uvd_v3_1_semaphore_emit()
51 radeon_ring_write(ring, (addr >> 23) & 0x000FFFFF); in uvd_v3_1_semaphore_emit()
53 radeon_ring_write(ring, PACKET0(UVD_SEMA_CMD, 0)); in uvd_v3_1_semaphore_emit()
54 radeon_ring_write(ring, 0x80 | (emit_wait ? 1 : 0)); in uvd_v3_1_semaphore_emit()
Dradeon_vce.c346 int radeon_vce_get_create_msg(struct radeon_device *rdev, int ring, in radeon_vce_get_create_msg() argument
354 r = radeon_ib_get(rdev, ring, &ib, NULL, ib_size_dw * 4); in radeon_vce_get_create_msg()
413 int radeon_vce_get_destroy_msg(struct radeon_device *rdev, int ring, in radeon_vce_get_destroy_msg() argument
421 r = radeon_ib_get(rdev, ring, &ib, NULL, ib_size_dw * 4); in radeon_vce_get_destroy_msg()
696 struct radeon_ring *ring, in radeon_vce_semaphore_emit() argument
702 radeon_ring_write(ring, cpu_to_le32(VCE_CMD_SEMAPHORE)); in radeon_vce_semaphore_emit()
703 radeon_ring_write(ring, cpu_to_le32((addr >> 3) & 0x000FFFFF)); in radeon_vce_semaphore_emit()
704 radeon_ring_write(ring, cpu_to_le32((addr >> 23) & 0x000FFFFF)); in radeon_vce_semaphore_emit()
705 radeon_ring_write(ring, cpu_to_le32(0x01003000 | (emit_wait ? 1 : 0))); in radeon_vce_semaphore_emit()
707 radeon_ring_write(ring, cpu_to_le32(VCE_CMD_END)); in radeon_vce_semaphore_emit()
[all …]
Dradeon_sync.c71 other = sync->sync_to[fence->ring]; in radeon_sync_fence()
72 sync->sync_to[fence->ring] = radeon_fence_later(fence, other); in radeon_sync_fence()
139 int ring) in radeon_sync_rings() argument
149 if (!radeon_fence_need_sync(fence, ring)) in radeon_sync_rings()
153 if (!rdev->ring[i].ready) { in radeon_sync_rings()
172 r = radeon_ring_alloc(rdev, &rdev->ring[i], 16); in radeon_sync_rings()
179 radeon_ring_undo(&rdev->ring[i]); in radeon_sync_rings()
187 if (!radeon_semaphore_emit_wait(rdev, ring, semaphore)) { in radeon_sync_rings()
189 radeon_ring_undo(&rdev->ring[i]); in radeon_sync_rings()
196 radeon_ring_commit(rdev, &rdev->ring[i], false); in radeon_sync_rings()
[all …]
Dradeon_semaphore.c61 struct radeon_ring *ring = &rdev->ring[ridx]; in radeon_semaphore_emit_signal() local
65 if (radeon_semaphore_ring_emit(rdev, ridx, ring, semaphore, false)) { in radeon_semaphore_emit_signal()
69 ring->last_semaphore_signal_addr = semaphore->gpu_addr; in radeon_semaphore_emit_signal()
78 struct radeon_ring *ring = &rdev->ring[ridx]; in radeon_semaphore_emit_wait() local
82 if (radeon_semaphore_ring_emit(rdev, ridx, ring, semaphore, true)) { in radeon_semaphore_emit_wait()
86 ring->last_semaphore_wait_addr = semaphore->gpu_addr; in radeon_semaphore_emit_wait()
Dcik.c3867 int cik_ring_test(struct radeon_device *rdev, struct radeon_ring *ring) in cik_ring_test() argument
3880 r = radeon_ring_lock(rdev, ring, 3); in cik_ring_test()
3882 DRM_ERROR("radeon: cp failed to lock ring %d (%d).\n", ring->idx, r); in cik_ring_test()
3886 radeon_ring_write(ring, PACKET3(PACKET3_SET_UCONFIG_REG, 1)); in cik_ring_test()
3887 radeon_ring_write(ring, ((scratch - PACKET3_SET_UCONFIG_REG_START) >> 2)); in cik_ring_test()
3888 radeon_ring_write(ring, 0xDEADBEEF); in cik_ring_test()
3889 radeon_ring_unlock_commit(rdev, ring, false); in cik_ring_test()
3898 DRM_INFO("ring test on %d succeeded in %d usecs\n", ring->idx, i); in cik_ring_test()
3901 ring->idx, scratch, tmp); in cik_ring_test()
3919 struct radeon_ring *ring = &rdev->ring[ridx]; in cik_hdp_flush_cp_ring_emit() local
[all …]
Dr300.c211 struct radeon_ring *ring = &rdev->ring[fence->ring]; in r300_fence_ring_emit() local
216 radeon_ring_write(ring, PACKET0(R300_RE_SCISSORS_TL, 0)); in r300_fence_ring_emit()
217 radeon_ring_write(ring, 0); in r300_fence_ring_emit()
218 radeon_ring_write(ring, PACKET0(R300_RE_SCISSORS_BR, 0)); in r300_fence_ring_emit()
219 radeon_ring_write(ring, 0); in r300_fence_ring_emit()
221 radeon_ring_write(ring, PACKET0(R300_RB3D_DSTCACHE_CTLSTAT, 0)); in r300_fence_ring_emit()
222 radeon_ring_write(ring, R300_RB3D_DC_FLUSH); in r300_fence_ring_emit()
223 radeon_ring_write(ring, PACKET0(R300_RB3D_ZCACHE_CTLSTAT, 0)); in r300_fence_ring_emit()
224 radeon_ring_write(ring, R300_ZC_FLUSH); in r300_fence_ring_emit()
226 radeon_ring_write(ring, PACKET0(RADEON_WAIT_UNTIL, 0)); in r300_fence_ring_emit()
[all …]
Dsi.c3374 struct radeon_ring *ring = &rdev->ring[fence->ring]; in si_fence_ring_emit() local
3375 u64 addr = rdev->fence_drv[fence->ring].gpu_addr; in si_fence_ring_emit()
3378 radeon_ring_write(ring, PACKET3(PACKET3_SET_CONFIG_REG, 1)); in si_fence_ring_emit()
3379 radeon_ring_write(ring, (CP_COHER_CNTL2 - PACKET3_SET_CONFIG_REG_START) >> 2); in si_fence_ring_emit()
3380 radeon_ring_write(ring, 0); in si_fence_ring_emit()
3381 radeon_ring_write(ring, PACKET3(PACKET3_SURFACE_SYNC, 3)); in si_fence_ring_emit()
3382 radeon_ring_write(ring, PACKET3_TCL1_ACTION_ENA | in si_fence_ring_emit()
3386 radeon_ring_write(ring, 0xFFFFFFFF); in si_fence_ring_emit()
3387 radeon_ring_write(ring, 0); in si_fence_ring_emit()
3388 radeon_ring_write(ring, 10); /* poll interval */ in si_fence_ring_emit()
[all …]
Dradeon_asic.h73 void r100_ring_start(struct radeon_device *rdev, struct radeon_ring *ring);
113 int r100_ib_test(struct radeon_device *rdev, struct radeon_ring *ring);
147 struct radeon_ring *ring);
149 struct radeon_ring *ring);
151 struct radeon_ring *ring);
171 extern void r300_ring_start(struct radeon_device *rdev, struct radeon_ring *ring);
287 void rv515_ring_start(struct radeon_device *rdev, struct radeon_ring *ring);
331 struct radeon_ring *ring,
335 bool r600_dma_is_lockup(struct radeon_device *rdev, struct radeon_ring *ring);
342 int r600_ib_test(struct radeon_device *rdev, struct radeon_ring *ring);
[all …]
Dradeon_test.c40 int i, r, ring; in radeon_do_test_moves() local
44 ring = radeon_copy_dma_ring_index(rdev); in radeon_do_test_moves()
47 ring = radeon_copy_blit_ring_index(rdev); in radeon_do_test_moves()
119 if (ring == R600_RING_TYPE_DMA_INDEX) in radeon_do_test_moves()
170 if (ring == R600_RING_TYPE_DMA_INDEX) in radeon_do_test_moves()
262 struct radeon_ring *ring, in radeon_test_create_and_emit_fence() argument
265 uint32_t handle = ring->idx ^ 0xdeafbeef; in radeon_test_create_and_emit_fence()
268 if (ring->idx == R600_RING_TYPE_UVD_INDEX) { in radeon_test_create_and_emit_fence()
269 r = radeon_uvd_get_create_msg(rdev, ring->idx, handle, NULL); in radeon_test_create_and_emit_fence()
275 r = radeon_uvd_get_destroy_msg(rdev, ring->idx, handle, fence); in radeon_test_create_and_emit_fence()
[all …]
Dradeon_cs.c124 if (p->ring == R600_RING_TYPE_UVD_INDEX && in radeon_cs_parser_relocs()
182 r = radeon_bo_list_validate(p->rdev, &p->ticket, &p->validated, p->ring); in radeon_cs_parser_relocs()
190 static int radeon_cs_get_ring(struct radeon_cs_parser *p, u32 ring, s32 priority) in radeon_cs_get_ring() argument
194 switch (ring) { in radeon_cs_get_ring()
196 DRM_ERROR("unknown ring id: %d\n", ring); in radeon_cs_get_ring()
199 p->ring = RADEON_RING_TYPE_GFX_INDEX; in radeon_cs_get_ring()
204 p->ring = CAYMAN_RING_TYPE_CP1_INDEX; in radeon_cs_get_ring()
206 p->ring = CAYMAN_RING_TYPE_CP2_INDEX; in radeon_cs_get_ring()
208 p->ring = RADEON_RING_TYPE_GFX_INDEX; in radeon_cs_get_ring()
213 p->ring = R600_RING_TYPE_DMA_INDEX; in radeon_cs_get_ring()
[all …]
Dr600.c1909 bool r600_gfx_is_lockup(struct radeon_device *rdev, struct radeon_ring *ring) in r600_gfx_is_lockup() argument
1916 radeon_ring_lockup_update(rdev, ring); in r600_gfx_is_lockup()
1919 return radeon_ring_test_lockup(rdev, ring); in r600_gfx_is_lockup()
2417 rdev->ring[RADEON_RING_TYPE_GFX_INDEX].ready = false; in r600_cp_stop()
2612 struct radeon_ring *ring) in r600_gfx_get_rptr() argument
2617 rptr = rdev->wb.wb[ring->rptr_offs/4]; in r600_gfx_get_rptr()
2625 struct radeon_ring *ring) in r600_gfx_get_wptr() argument
2635 struct radeon_ring *ring) in r600_gfx_set_wptr() argument
2637 WREG32(R600_CP_RB_WPTR, ring->wptr); in r600_gfx_set_wptr()
2685 struct radeon_ring *ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX]; in r600_cp_start() local
[all …]
Drv515.c62 void rv515_ring_start(struct radeon_device *rdev, struct radeon_ring *ring) in rv515_ring_start() argument
66 r = radeon_ring_lock(rdev, ring, 64); in rv515_ring_start()
70 radeon_ring_write(ring, PACKET0(ISYNC_CNTL, 0)); in rv515_ring_start()
71 radeon_ring_write(ring, in rv515_ring_start()
76 radeon_ring_write(ring, PACKET0(WAIT_UNTIL, 0)); in rv515_ring_start()
77 radeon_ring_write(ring, WAIT_2D_IDLECLEAN | WAIT_3D_IDLECLEAN); in rv515_ring_start()
78 radeon_ring_write(ring, PACKET0(R300_DST_PIPE_CONFIG, 0)); in rv515_ring_start()
79 radeon_ring_write(ring, R300_PIPE_AUTO_CONFIG); in rv515_ring_start()
80 radeon_ring_write(ring, PACKET0(GB_SELECT, 0)); in rv515_ring_start()
81 radeon_ring_write(ring, 0); in rv515_ring_start()
[all …]
Dr420.c209 struct radeon_ring *ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX]; in r420_cp_errata_init() local
218 radeon_ring_lock(rdev, ring, 8); in r420_cp_errata_init()
219 radeon_ring_write(ring, PACKET0(R300_CP_RESYNC_ADDR, 1)); in r420_cp_errata_init()
220 radeon_ring_write(ring, rdev->config.r300.resync_scratch); in r420_cp_errata_init()
221 radeon_ring_write(ring, 0xDEADBEEF); in r420_cp_errata_init()
222 radeon_ring_unlock_commit(rdev, ring, false); in r420_cp_errata_init()
227 struct radeon_ring *ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX]; in r420_cp_errata_fini() local
232 radeon_ring_lock(rdev, ring, 8); in r420_cp_errata_fini()
233 radeon_ring_write(ring, PACKET0(R300_RB3D_DSTCACHE_CTLSTAT, 0)); in r420_cp_errata_fini()
234 radeon_ring_write(ring, R300_RB3D_DC_FINISH); in r420_cp_errata_fini()
[all …]
Dr100.c839 static void r100_ring_hdp_flush(struct radeon_device *rdev, struct radeon_ring *ring) in r100_ring_hdp_flush() argument
841 radeon_ring_write(ring, PACKET0(RADEON_HOST_PATH_CNTL, 0)); in r100_ring_hdp_flush()
842 radeon_ring_write(ring, rdev->config.r100.hdp_cntl | in r100_ring_hdp_flush()
844 radeon_ring_write(ring, PACKET0(RADEON_HOST_PATH_CNTL, 0)); in r100_ring_hdp_flush()
845 radeon_ring_write(ring, rdev->config.r100.hdp_cntl); in r100_ring_hdp_flush()
853 struct radeon_ring *ring = &rdev->ring[fence->ring]; in r100_fence_ring_emit() local
857 radeon_ring_write(ring, PACKET0(RADEON_RB3D_DSTCACHE_CTLSTAT, 0)); in r100_fence_ring_emit()
858 radeon_ring_write(ring, RADEON_RB3D_DC_FLUSH_ALL); in r100_fence_ring_emit()
859 radeon_ring_write(ring, PACKET0(RADEON_RB3D_ZCACHE_CTLSTAT, 0)); in r100_fence_ring_emit()
860 radeon_ring_write(ring, RADEON_RB3D_ZC_FLUSH_ALL); in r100_fence_ring_emit()
[all …]
Dr200.c89 struct radeon_ring *ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX]; in r200_copy_dma() local
99 r = radeon_ring_lock(rdev, ring, num_loops * 4 + 64); in r200_copy_dma()
105 radeon_ring_write(ring, PACKET0(RADEON_WAIT_UNTIL, 0)); in r200_copy_dma()
106 radeon_ring_write(ring, (1 << 16)); in r200_copy_dma()
113 radeon_ring_write(ring, PACKET0(0x720, 2)); in r200_copy_dma()
114 radeon_ring_write(ring, src_offset); in r200_copy_dma()
115 radeon_ring_write(ring, dst_offset); in r200_copy_dma()
116 radeon_ring_write(ring, cur_size | (1 << 31) | (1 << 30)); in r200_copy_dma()
120 radeon_ring_write(ring, PACKET0(RADEON_WAIT_UNTIL, 0)); in r200_copy_dma()
121 radeon_ring_write(ring, RADEON_WAIT_DMA_GUI_IDLE); in r200_copy_dma()
[all …]
Dradeon.h372 unsigned ring; member
378 int radeon_fence_driver_start_ring(struct radeon_device *rdev, int ring);
381 void radeon_fence_driver_force_completion(struct radeon_device *rdev, int ring);
382 int radeon_fence_emit(struct radeon_device *rdev, struct radeon_fence **fence, int ring);
383 void radeon_fence_process(struct radeon_device *rdev, int ring);
386 int radeon_fence_wait_next(struct radeon_device *rdev, int ring);
387 int radeon_fence_wait_empty(struct radeon_device *rdev, int ring);
393 unsigned radeon_fence_count_emitted(struct radeon_device *rdev, int ring);
394 bool radeon_fence_need_sync(struct radeon_fence *fence, int ring);
395 void radeon_fence_note_sync(struct radeon_fence *fence, int ring);
[all …]
/linux-4.4.14/drivers/net/wireless/b43/
Ddma.c85 struct b43_dmadesc_generic *op32_idx2desc(struct b43_dmaring *ring, in op32_idx2desc() argument
91 *meta = &(ring->meta[slot]); in op32_idx2desc()
92 desc = ring->descbase; in op32_idx2desc()
98 static void op32_fill_descriptor(struct b43_dmaring *ring, in op32_fill_descriptor() argument
103 struct b43_dmadesc32 *descbase = ring->descbase; in op32_fill_descriptor()
110 B43_WARN_ON(!(slot >= 0 && slot < ring->nr_slots)); in op32_fill_descriptor()
112 addr = b43_dma_address(&ring->dev->dma, dmaaddr, B43_DMA_ADDR_LOW); in op32_fill_descriptor()
113 addrext = b43_dma_address(&ring->dev->dma, dmaaddr, B43_DMA_ADDR_EXT); in op32_fill_descriptor()
116 if (slot == ring->nr_slots - 1) in op32_fill_descriptor()
131 static void op32_poke_tx(struct b43_dmaring *ring, int slot) in op32_poke_tx() argument
[all …]
Ddma.h198 struct b43_dmadesc_generic *(*idx2desc) (struct b43_dmaring * ring,
202 void (*fill_descriptor) (struct b43_dmaring * ring,
206 void (*poke_tx) (struct b43_dmaring * ring, int slot);
207 void (*tx_suspend) (struct b43_dmaring * ring);
208 void (*tx_resume) (struct b43_dmaring * ring);
209 int (*get_current_rxslot) (struct b43_dmaring * ring);
210 void (*set_current_rxslot) (struct b43_dmaring * ring, int slot);
277 static inline u32 b43_dma_read(struct b43_dmaring *ring, u16 offset) in b43_dma_read() argument
279 return b43_read32(ring->dev, ring->mmio_base + offset); in b43_dma_read()
282 static inline void b43_dma_write(struct b43_dmaring *ring, u16 offset, u32 value) in b43_dma_write() argument
[all …]
/linux-4.4.14/drivers/net/ethernet/apm/xgene/
Dxgene_enet_ring2.c24 static void xgene_enet_ring_init(struct xgene_enet_desc_ring *ring) in xgene_enet_ring_init() argument
26 u32 *ring_cfg = ring->state; in xgene_enet_ring_init()
27 u64 addr = ring->dma; in xgene_enet_ring_init()
29 if (xgene_enet_ring_owner(ring->id) == RING_OWNER_CPU) { in xgene_enet_ring_init()
30 ring_cfg[0] |= SET_VAL(X2_INTLINE, ring->id & RING_BUFNUM_MASK); in xgene_enet_ring_init()
39 ring_cfg[3] |= SET_VAL(RINGSIZE, ring->cfgsize) in xgene_enet_ring_init()
46 static void xgene_enet_ring_set_type(struct xgene_enet_desc_ring *ring) in xgene_enet_ring_set_type() argument
48 u32 *ring_cfg = ring->state; in xgene_enet_ring_set_type()
52 is_bufpool = xgene_enet_is_bufpool(ring->id); in xgene_enet_ring_set_type()
59 static void xgene_enet_ring_set_recombbuf(struct xgene_enet_desc_ring *ring) in xgene_enet_ring_set_recombbuf() argument
[all …]
Dxgene_enet_hw.c25 static void xgene_enet_ring_init(struct xgene_enet_desc_ring *ring) in xgene_enet_ring_init() argument
27 u32 *ring_cfg = ring->state; in xgene_enet_ring_init()
28 u64 addr = ring->dma; in xgene_enet_ring_init()
29 enum xgene_enet_ring_cfgsize cfgsize = ring->cfgsize; in xgene_enet_ring_init()
45 static void xgene_enet_ring_set_type(struct xgene_enet_desc_ring *ring) in xgene_enet_ring_set_type() argument
47 u32 *ring_cfg = ring->state; in xgene_enet_ring_set_type()
51 is_bufpool = xgene_enet_is_bufpool(ring->id); in xgene_enet_ring_set_type()
62 static void xgene_enet_ring_set_recombbuf(struct xgene_enet_desc_ring *ring) in xgene_enet_ring_set_recombbuf() argument
64 u32 *ring_cfg = ring->state; in xgene_enet_ring_set_recombbuf()
72 static void xgene_enet_ring_wr32(struct xgene_enet_desc_ring *ring, in xgene_enet_ring_wr32() argument
[all …]
Dxgene_enet_main.c96 static u16 xgene_enet_dst_ring_num(struct xgene_enet_desc_ring *ring) in xgene_enet_dst_ring_num() argument
98 struct xgene_enet_pdata *pdata = netdev_priv(ring->ndev); in xgene_enet_dst_ring_num()
100 return ((u16)pdata->rm << 10) | ring->num; in xgene_enet_dst_ring_num()
272 static __le64 *xgene_enet_get_exp_bufs(struct xgene_enet_desc_ring *ring) in xgene_enet_get_exp_bufs() argument
276 exp_bufs = &ring->exp_bufs[ring->exp_buf_tail * MAX_EXP_BUFFS]; in xgene_enet_get_exp_bufs()
278 ring->exp_buf_tail = (ring->exp_buf_tail + 1) & ((ring->slots / 2) - 1); in xgene_enet_get_exp_bufs()
283 static dma_addr_t *xgene_get_frag_dma_array(struct xgene_enet_desc_ring *ring) in xgene_get_frag_dma_array() argument
285 return &ring->cp_ring->frag_dma_addr[ring->tail * MAX_SKB_FRAGS]; in xgene_get_frag_dma_array()
536 static int xgene_enet_process_ring(struct xgene_enet_desc_ring *ring, in xgene_enet_process_ring() argument
539 struct xgene_enet_pdata *pdata = netdev_priv(ring->ndev); in xgene_enet_process_ring()
[all …]
/linux-4.4.14/net/rds/
Diw_ring.c66 void rds_iw_ring_init(struct rds_iw_work_ring *ring, u32 nr) in rds_iw_ring_init() argument
68 memset(ring, 0, sizeof(*ring)); in rds_iw_ring_init()
69 ring->w_nr = nr; in rds_iw_ring_init()
70 rdsdebug("ring %p nr %u\n", ring, ring->w_nr); in rds_iw_ring_init()
73 static inline u32 __rds_iw_ring_used(struct rds_iw_work_ring *ring) in __rds_iw_ring_used() argument
78 diff = ring->w_alloc_ctr - (u32) atomic_read(&ring->w_free_ctr); in __rds_iw_ring_used()
79 BUG_ON(diff > ring->w_nr); in __rds_iw_ring_used()
84 void rds_iw_ring_resize(struct rds_iw_work_ring *ring, u32 nr) in rds_iw_ring_resize() argument
88 BUG_ON(__rds_iw_ring_used(ring)); in rds_iw_ring_resize()
89 ring->w_nr = nr; in rds_iw_ring_resize()
[all …]
Dib_ring.c66 void rds_ib_ring_init(struct rds_ib_work_ring *ring, u32 nr) in rds_ib_ring_init() argument
68 memset(ring, 0, sizeof(*ring)); in rds_ib_ring_init()
69 ring->w_nr = nr; in rds_ib_ring_init()
70 rdsdebug("ring %p nr %u\n", ring, ring->w_nr); in rds_ib_ring_init()
73 static inline u32 __rds_ib_ring_used(struct rds_ib_work_ring *ring) in __rds_ib_ring_used() argument
78 diff = ring->w_alloc_ctr - (u32) atomic_read(&ring->w_free_ctr); in __rds_ib_ring_used()
79 BUG_ON(diff > ring->w_nr); in __rds_ib_ring_used()
84 void rds_ib_ring_resize(struct rds_ib_work_ring *ring, u32 nr) in rds_ib_ring_resize() argument
88 BUG_ON(__rds_ib_ring_used(ring)); in rds_ib_ring_resize()
89 ring->w_nr = nr; in rds_ib_ring_resize()
[all …]
Diw.h342 void rds_iw_ring_init(struct rds_iw_work_ring *ring, u32 nr);
343 void rds_iw_ring_resize(struct rds_iw_work_ring *ring, u32 nr);
344 u32 rds_iw_ring_alloc(struct rds_iw_work_ring *ring, u32 val, u32 *pos);
345 void rds_iw_ring_free(struct rds_iw_work_ring *ring, u32 val);
346 void rds_iw_ring_unalloc(struct rds_iw_work_ring *ring, u32 val);
347 int rds_iw_ring_empty(struct rds_iw_work_ring *ring);
348 int rds_iw_ring_low(struct rds_iw_work_ring *ring);
349 u32 rds_iw_ring_oldest(struct rds_iw_work_ring *ring);
350 u32 rds_iw_ring_completed(struct rds_iw_work_ring *ring, u32 wr_id, u32 oldest);
Dib.h384 void rds_ib_ring_init(struct rds_ib_work_ring *ring, u32 nr);
385 void rds_ib_ring_resize(struct rds_ib_work_ring *ring, u32 nr);
386 u32 rds_ib_ring_alloc(struct rds_ib_work_ring *ring, u32 val, u32 *pos);
387 void rds_ib_ring_free(struct rds_ib_work_ring *ring, u32 val);
388 void rds_ib_ring_unalloc(struct rds_ib_work_ring *ring, u32 val);
389 int rds_ib_ring_empty(struct rds_ib_work_ring *ring);
390 int rds_ib_ring_low(struct rds_ib_work_ring *ring);
391 u32 rds_ib_ring_oldest(struct rds_ib_work_ring *ring);
392 u32 rds_ib_ring_completed(struct rds_ib_work_ring *ring, u32 wr_id, u32 oldest);
/linux-4.4.14/drivers/net/ethernet/mellanox/mlx4/
Den_tx.c53 struct mlx4_en_tx_ring *ring; in mlx4_en_create_tx_ring() local
57 ring = kzalloc_node(sizeof(*ring), GFP_KERNEL, node); in mlx4_en_create_tx_ring()
58 if (!ring) { in mlx4_en_create_tx_ring()
59 ring = kzalloc(sizeof(*ring), GFP_KERNEL); in mlx4_en_create_tx_ring()
60 if (!ring) { in mlx4_en_create_tx_ring()
66 ring->size = size; in mlx4_en_create_tx_ring()
67 ring->size_mask = size - 1; in mlx4_en_create_tx_ring()
68 ring->stride = stride; in mlx4_en_create_tx_ring()
69 ring->full_size = ring->size - HEADROOM - MAX_DESC_TXBBS; in mlx4_en_create_tx_ring()
72 ring->tx_info = kmalloc_node(tmp, GFP_KERNEL | __GFP_NOWARN, node); in mlx4_en_create_tx_ring()
[all …]
Den_rx.c154 struct mlx4_en_rx_ring *ring) in mlx4_en_init_allocator() argument
162 if (mlx4_alloc_pages(priv, &ring->page_alloc[i], in mlx4_en_init_allocator()
167 i, ring->page_alloc[i].page_size, in mlx4_en_init_allocator()
168 atomic_read(&ring->page_alloc[i].page->_count)); in mlx4_en_init_allocator()
176 page_alloc = &ring->page_alloc[i]; in mlx4_en_init_allocator()
188 struct mlx4_en_rx_ring *ring) in mlx4_en_destroy_allocator() argument
196 page_alloc = &ring->page_alloc[i]; in mlx4_en_destroy_allocator()
212 struct mlx4_en_rx_ring *ring, int index) in mlx4_en_init_rx_desc() argument
214 struct mlx4_en_rx_desc *rx_desc = ring->buf + ring->stride * index; in mlx4_en_init_rx_desc()
228 possible_frags = (ring->stride - sizeof(struct mlx4_en_rx_desc)) / DS_SIZE; in mlx4_en_init_rx_desc()
[all …]
/linux-4.4.14/drivers/crypto/qat/qat_common/
Dadf_transport.c80 static int adf_reserve_ring(struct adf_etr_bank_data *bank, uint32_t ring) in adf_reserve_ring() argument
83 if (bank->ring_mask & (1 << ring)) { in adf_reserve_ring()
87 bank->ring_mask |= (1 << ring); in adf_reserve_ring()
92 static void adf_unreserve_ring(struct adf_etr_bank_data *bank, uint32_t ring) in adf_unreserve_ring() argument
95 bank->ring_mask &= ~(1 << ring); in adf_unreserve_ring()
99 static void adf_enable_ring_irq(struct adf_etr_bank_data *bank, uint32_t ring) in adf_enable_ring_irq() argument
102 bank->irq_mask |= (1 << ring); in adf_enable_ring_irq()
109 static void adf_disable_ring_irq(struct adf_etr_bank_data *bank, uint32_t ring) in adf_disable_ring_irq() argument
112 bank->irq_mask &= ~(1 << ring); in adf_disable_ring_irq()
117 int adf_send_message(struct adf_etr_ring_data *ring, uint32_t *msg) in adf_send_message() argument
[all …]
Dadf_transport_debug.c59 struct adf_etr_ring_data *ring = sfile->private; in adf_ring_start() local
65 if (*pos >= (ADF_SIZE_TO_RING_SIZE_IN_BYTES(ring->ring_size) / in adf_ring_start()
66 ADF_MSG_SIZE_TO_BYTES(ring->msg_size))) in adf_ring_start()
69 return ring->base_addr + in adf_ring_start()
70 (ADF_MSG_SIZE_TO_BYTES(ring->msg_size) * (*pos)++); in adf_ring_start()
75 struct adf_etr_ring_data *ring = sfile->private; in adf_ring_next() local
77 if (*pos >= (ADF_SIZE_TO_RING_SIZE_IN_BYTES(ring->ring_size) / in adf_ring_next()
78 ADF_MSG_SIZE_TO_BYTES(ring->msg_size))) in adf_ring_next()
81 return ring->base_addr + in adf_ring_next()
82 (ADF_MSG_SIZE_TO_BYTES(ring->msg_size) * (*pos)++); in adf_ring_next()
[all …]
Dadf_transport_access_macros.h119 #define READ_CSR_RING_HEAD(csr_base_addr, bank, ring) \ argument
121 ADF_RING_CSR_RING_HEAD + (ring << 2))
122 #define READ_CSR_RING_TAIL(csr_base_addr, bank, ring) \ argument
124 ADF_RING_CSR_RING_TAIL + (ring << 2))
128 #define WRITE_CSR_RING_CONFIG(csr_base_addr, bank, ring, value) \ argument
130 ADF_RING_CSR_RING_CONFIG + (ring << 2), value)
131 #define WRITE_CSR_RING_BASE(csr_base_addr, bank, ring, value) \ argument
137 ADF_RING_CSR_RING_LBASE + (ring << 2), l_base); \
139 ADF_RING_CSR_RING_UBASE + (ring << 2), u_base); \
141 #define WRITE_CSR_RING_HEAD(csr_base_addr, bank, ring, value) \ argument
[all …]
Dadf_transport_internal.h99 int adf_ring_debugfs_add(struct adf_etr_ring_data *ring, const char *name);
100 void adf_ring_debugfs_rm(struct adf_etr_ring_data *ring);
109 static inline int adf_ring_debugfs_add(struct adf_etr_ring_data *ring, in adf_ring_debugfs_add() argument
115 #define adf_ring_debugfs_rm(ring) do {} while (0) argument
/linux-4.4.14/tools/testing/selftests/net/
Dpsock_tpacket.c78 struct ring { struct
83 void (*walk)(int sock, struct ring *ring); argument
232 static void walk_v1_v2_rx(int sock, struct ring *ring) in walk_v1_v2_rx() argument
239 bug_on(ring->type != PACKET_RX_RING); in walk_v1_v2_rx()
252 while (__v1_v2_rx_kernel_ready(ring->rd[frame_num].iov_base, in walk_v1_v2_rx()
253 ring->version)) { in walk_v1_v2_rx()
254 ppd.raw = ring->rd[frame_num].iov_base; in walk_v1_v2_rx()
256 switch (ring->version) { in walk_v1_v2_rx()
273 __v1_v2_rx_user_ready(ppd.raw, ring->version); in walk_v1_v2_rx()
275 frame_num = (frame_num + 1) % ring->rd_num; in walk_v1_v2_rx()
[all …]
/linux-4.4.14/drivers/gpu/drm/msm/
Dmsm_ringbuffer.c23 struct msm_ringbuffer *ring; in msm_ringbuffer_new() local
28 ring = kzalloc(sizeof(*ring), GFP_KERNEL); in msm_ringbuffer_new()
29 if (!ring) { in msm_ringbuffer_new()
34 ring->gpu = gpu; in msm_ringbuffer_new()
35 ring->bo = msm_gem_new(gpu->dev, size, MSM_BO_WC); in msm_ringbuffer_new()
36 if (IS_ERR(ring->bo)) { in msm_ringbuffer_new()
37 ret = PTR_ERR(ring->bo); in msm_ringbuffer_new()
38 ring->bo = NULL; in msm_ringbuffer_new()
42 ring->start = msm_gem_vaddr_locked(ring->bo); in msm_ringbuffer_new()
43 ring->end = ring->start + (size / 4); in msm_ringbuffer_new()
[all …]
Dmsm_ringbuffer.h31 void msm_ringbuffer_destroy(struct msm_ringbuffer *ring);
36 OUT_RING(struct msm_ringbuffer *ring, uint32_t data) in OUT_RING() argument
38 if (ring->cur == ring->end) in OUT_RING()
39 ring->cur = ring->start; in OUT_RING()
40 *(ring->cur++) = data; in OUT_RING()
/linux-4.4.14/drivers/net/ethernet/hisilicon/hns/
Dhnae.h188 #define is_tx_ring(ring) ((ring)->flags & RINGF_DIR) argument
189 #define is_rx_ring(ring) (!is_tx_ring(ring)) argument
190 #define ring_to_dma_dir(ring) (is_tx_ring(ring) ? \ argument
249 #define ring_ptr_move_fw(ring, p) \ argument
250 ((ring)->p = ((ring)->p + 1) % (ring)->desc_num)
251 #define ring_ptr_move_bw(ring, p) \ argument
252 ((ring)->p = ((ring)->p - 1 + (ring)->desc_num) % (ring)->desc_num)
259 #define assert_is_ring_idx(ring, idx) \ argument
260 assert((idx) >= 0 && (idx) < (ring)->desc_num)
265 static inline int ring_dist(struct hnae_ring *ring, int begin, int end) in ring_dist() argument
[all …]
Dhnae.c40 static int hnae_alloc_buffer(struct hnae_ring *ring, struct hnae_desc_cb *cb) in hnae_alloc_buffer() argument
42 unsigned int order = hnae_page_order(ring); in hnae_alloc_buffer()
52 cb->length = hnae_page_size(ring); in hnae_alloc_buffer()
58 static void hnae_free_buffer(struct hnae_ring *ring, struct hnae_desc_cb *cb) in hnae_free_buffer() argument
62 else if (unlikely(is_rx_ring(ring))) in hnae_free_buffer()
67 static int hnae_map_buffer(struct hnae_ring *ring, struct hnae_desc_cb *cb) in hnae_map_buffer() argument
69 cb->dma = dma_map_page(ring_to_dev(ring), cb->priv, 0, in hnae_map_buffer()
70 cb->length, ring_to_dma_dir(ring)); in hnae_map_buffer()
72 if (dma_mapping_error(ring_to_dev(ring), cb->dma)) in hnae_map_buffer()
78 static void hnae_unmap_buffer(struct hnae_ring *ring, struct hnae_desc_cb *cb) in hnae_unmap_buffer() argument
[all …]
Dhns_enet.c37 static void fill_desc(struct hnae_ring *ring, void *priv, in fill_desc() argument
41 struct hnae_desc *desc = &ring->desc[ring->next_to_use]; in fill_desc()
42 struct hnae_desc_cb *desc_cb = &ring->desc_cb[ring->next_to_use]; in fill_desc()
95 ring_ptr_move_fw(ring, next_to_use); in fill_desc()
98 static void unfill_desc(struct hnae_ring *ring) in unfill_desc() argument
100 ring_ptr_move_bw(ring, next_to_use); in unfill_desc()
109 struct hnae_ring *ring = ring_data->ring; in hns_nic_net_xmit_hw() local
118 assert(ring->max_desc_num_per_pkt <= ring->desc_num); in hns_nic_net_xmit_hw()
123 if (unlikely(buf_num > ring->max_desc_num_per_pkt)) { in hns_nic_net_xmit_hw()
124 if (ring_space(ring) < 1) { in hns_nic_net_xmit_hw()
[all …]
Dhns_dsaf_rcb.c187 struct hnae_ring *ring = in hns_rcb_ring_init() local
189 dma_addr_t dma = ring->desc_dma_addr; in hns_rcb_ring_init()
220 void hns_rcb_init_hw(struct ring_pair_cb *ring) in hns_rcb_init_hw() argument
222 hns_rcb_ring_init(ring, RX_RING); in hns_rcb_init_hw()
223 hns_rcb_ring_init(ring, TX_RING); in hns_rcb_init_hw()
386 struct hnae_ring *ring; in hns_rcb_ring_get_cfg() local
395 ring = &q->rx_ring; in hns_rcb_ring_get_cfg()
396 ring->io_base = ring_pair_cb->q.io_base; in hns_rcb_ring_get_cfg()
399 ring = &q->tx_ring; in hns_rcb_ring_get_cfg()
400 ring->io_base = (u8 __iomem *)ring_pair_cb->q.io_base + in hns_rcb_ring_get_cfg()
[all …]
/linux-4.4.14/drivers/net/ethernet/amd/xgbe/
Dxgbe-desc.c123 struct xgbe_ring *ring) in xgbe_free_ring() argument
128 if (!ring) in xgbe_free_ring()
131 if (ring->rdata) { in xgbe_free_ring()
132 for (i = 0; i < ring->rdesc_count; i++) { in xgbe_free_ring()
133 rdata = XGBE_GET_DESC_DATA(ring, i); in xgbe_free_ring()
137 kfree(ring->rdata); in xgbe_free_ring()
138 ring->rdata = NULL; in xgbe_free_ring()
141 if (ring->rx_hdr_pa.pages) { in xgbe_free_ring()
142 dma_unmap_page(pdata->dev, ring->rx_hdr_pa.pages_dma, in xgbe_free_ring()
143 ring->rx_hdr_pa.pages_len, DMA_FROM_DEVICE); in xgbe_free_ring()
[all …]
Dxgbe-drv.c223 static inline unsigned int xgbe_tx_avail_desc(struct xgbe_ring *ring) in xgbe_tx_avail_desc() argument
225 return (ring->rdesc_count - (ring->cur - ring->dirty)); in xgbe_tx_avail_desc()
228 static inline unsigned int xgbe_rx_dirty_desc(struct xgbe_ring *ring) in xgbe_rx_dirty_desc() argument
230 return (ring->cur - ring->dirty); in xgbe_rx_dirty_desc()
234 struct xgbe_ring *ring, unsigned int count) in xgbe_maybe_stop_tx_queue() argument
238 if (count > xgbe_tx_avail_desc(ring)) { in xgbe_maybe_stop_tx_queue()
242 ring->tx.queue_stopped = 1; in xgbe_maybe_stop_tx_queue()
247 if (ring->tx.xmit_more) in xgbe_maybe_stop_tx_queue()
248 pdata->hw_if.tx_start_xmit(channel, ring); in xgbe_maybe_stop_tx_queue()
740 struct xgbe_ring *ring; in xgbe_free_tx_data() local
[all …]
Dxgbe-dev.c1078 struct xgbe_ring *ring = channel->tx_ring; in xgbe_tx_desc_init() local
1081 int start_index = ring->cur; in xgbe_tx_desc_init()
1086 for (i = 0; i < ring->rdesc_count; i++) { in xgbe_tx_desc_init()
1087 rdata = XGBE_GET_DESC_DATA(ring, i); in xgbe_tx_desc_init()
1094 XGMAC_DMA_IOWRITE(channel, DMA_CH_TDRLR, ring->rdesc_count - 1); in xgbe_tx_desc_init()
1097 rdata = XGBE_GET_DESC_DATA(ring, start_index); in xgbe_tx_desc_init()
1157 struct xgbe_ring *ring = channel->rx_ring; in xgbe_rx_desc_init() local
1159 unsigned int start_index = ring->cur; in xgbe_rx_desc_init()
1165 for (i = 0; i < ring->rdesc_count; i++) { in xgbe_rx_desc_init()
1166 rdata = XGBE_GET_DESC_DATA(ring, i); in xgbe_rx_desc_init()
[all …]
/linux-4.4.14/drivers/net/wireless/brcm80211/brcmfmac/
Dflowring.c112 struct brcmf_flowring_ring *ring; in brcmf_flowring_create() local
152 ring = kzalloc(sizeof(*ring), GFP_ATOMIC); in brcmf_flowring_create()
153 if (!ring) in brcmf_flowring_create()
161 ring->hash_id = hash_idx; in brcmf_flowring_create()
162 ring->status = RING_CLOSED; in brcmf_flowring_create()
163 skb_queue_head_init(&ring->skblist); in brcmf_flowring_create()
164 flow->rings[i] = ring; in brcmf_flowring_create()
174 struct brcmf_flowring_ring *ring; in brcmf_flowring_tid() local
176 ring = flow->rings[flowid]; in brcmf_flowring_tid()
178 return flow->hash[ring->hash_id].fifo; in brcmf_flowring_tid()
[all …]
Dpcie.c960 struct brcmf_pcie_ringbuf *ring = (struct brcmf_pcie_ringbuf *)ctx; in brcmf_pcie_ring_mb_write_rptr() local
961 struct brcmf_pciedev_info *devinfo = ring->devinfo; in brcmf_pcie_ring_mb_write_rptr()
962 struct brcmf_commonring *commonring = &ring->commonring; in brcmf_pcie_ring_mb_write_rptr()
968 commonring->w_ptr, ring->id); in brcmf_pcie_ring_mb_write_rptr()
970 devinfo->write_ptr(devinfo, ring->r_idx_addr, commonring->r_ptr); in brcmf_pcie_ring_mb_write_rptr()
978 struct brcmf_pcie_ringbuf *ring = (struct brcmf_pcie_ringbuf *)ctx; in brcmf_pcie_ring_mb_write_wptr() local
979 struct brcmf_pciedev_info *devinfo = ring->devinfo; in brcmf_pcie_ring_mb_write_wptr()
980 struct brcmf_commonring *commonring = &ring->commonring; in brcmf_pcie_ring_mb_write_wptr()
986 commonring->r_ptr, ring->id); in brcmf_pcie_ring_mb_write_wptr()
988 devinfo->write_ptr(devinfo, ring->w_idx_addr, commonring->w_ptr); in brcmf_pcie_ring_mb_write_wptr()
[all …]
/linux-4.4.14/drivers/net/ethernet/broadcom/
Dbgmac.c49 static void bgmac_dma_tx_reset(struct bgmac *bgmac, struct bgmac_dma_ring *ring) in bgmac_dma_tx_reset() argument
54 if (!ring->mmio_base) in bgmac_dma_tx_reset()
61 bgmac_write(bgmac, ring->mmio_base + BGMAC_DMA_TX_CTL, in bgmac_dma_tx_reset()
64 val = bgmac_read(bgmac, ring->mmio_base + BGMAC_DMA_TX_STATUS); in bgmac_dma_tx_reset()
76 ring->mmio_base, val); in bgmac_dma_tx_reset()
79 bgmac_write(bgmac, ring->mmio_base + BGMAC_DMA_TX_CTL, 0); in bgmac_dma_tx_reset()
81 ring->mmio_base + BGMAC_DMA_TX_STATUS, in bgmac_dma_tx_reset()
85 ring->mmio_base); in bgmac_dma_tx_reset()
87 val = bgmac_read(bgmac, ring->mmio_base + BGMAC_DMA_TX_STATUS); in bgmac_dma_tx_reset()
90 ring->mmio_base); in bgmac_dma_tx_reset()
[all …]
Dbcmsysport.c729 struct bcm_sysport_tx_ring *ring) in __bcm_sysport_tx_reclaim() argument
738 txq = netdev_get_tx_queue(ndev, ring->index); in __bcm_sysport_tx_reclaim()
741 hw_ind = tdma_readl(priv, TDMA_DESC_RING_PROD_CONS_INDEX(ring->index)); in __bcm_sysport_tx_reclaim()
743 ring->p_index = (hw_ind & RING_PROD_INDEX_MASK); in __bcm_sysport_tx_reclaim()
745 last_c_index = ring->c_index; in __bcm_sysport_tx_reclaim()
746 num_tx_cbs = ring->size; in __bcm_sysport_tx_reclaim()
757 ring->index, c_index, last_tx_cn, last_c_index); in __bcm_sysport_tx_reclaim()
760 cb = ring->cbs + last_c_index; in __bcm_sysport_tx_reclaim()
763 ring->desc_count++; in __bcm_sysport_tx_reclaim()
768 ring->c_index = c_index; in __bcm_sysport_tx_reclaim()
[all …]
/linux-4.4.14/drivers/gpu/drm/qxl/
Dqxl_cmd.c33 struct ring { struct
39 struct ring *ring; member
47 void qxl_ring_free(struct qxl_ring *ring) in qxl_ring_free() argument
49 kfree(ring); in qxl_ring_free()
52 void qxl_ring_init_hdr(struct qxl_ring *ring) in qxl_ring_init_hdr() argument
54 ring->ring->header.notify_on_prod = ring->n_elements; in qxl_ring_init_hdr()
65 struct qxl_ring *ring; in qxl_ring_create() local
67 ring = kmalloc(sizeof(*ring), GFP_KERNEL); in qxl_ring_create()
68 if (!ring) in qxl_ring_create()
71 ring->ring = (struct ring *)header; in qxl_ring_create()
[all …]
/linux-4.4.14/drivers/net/ethernet/intel/fm10k/
Dfm10k_debugfs.c34 struct fm10k_ring *ring = s->private; in fm10k_dbg_desc_seq_start() local
36 return (*pos < ring->count) ? pos : NULL; in fm10k_dbg_desc_seq_start()
43 struct fm10k_ring *ring = s->private; in fm10k_dbg_desc_seq_next() local
45 return (++(*pos) < ring->count) ? pos : NULL; in fm10k_dbg_desc_seq_next()
64 struct fm10k_ring *ring = s->private; in fm10k_dbg_tx_desc_seq_show() local
76 if (!ring->desc) { in fm10k_dbg_tx_desc_seq_show()
79 struct fm10k_tx_desc *txd = FM10K_TX_DESC(ring, i); in fm10k_dbg_tx_desc_seq_show()
91 struct fm10k_ring *ring = s->private; in fm10k_dbg_rx_desc_seq_show() local
103 if (!ring->desc) { in fm10k_dbg_rx_desc_seq_show()
106 union fm10k_rx_desc *rxd = FM10K_RX_DESC(ring, i); in fm10k_dbg_rx_desc_seq_show()
[all …]
Dfm10k.h71 #define check_for_tx_hang(ring) \ argument
72 test_bit(__FM10K_TX_DETECT_HANG, &(ring)->state)
73 #define set_check_for_tx_hang(ring) \ argument
74 set_bit(__FM10K_TX_DETECT_HANG, &(ring)->state)
75 #define clear_check_for_tx_hang(ring) \ argument
76 clear_bit(__FM10K_TX_DETECT_HANG, &(ring)->state)
162 struct fm10k_ring *ring; /* pointer to linked list of rings */ member
177 static inline struct netdev_queue *txring_txq(const struct fm10k_ring *ring) in txring_txq() argument
179 return &ring->netdev->_tx[ring->queue_index]; in txring_txq()
184 for (pos = &(head).ring[(head).count]; (--pos) >= (head).ring;)
[all …]
Dfm10k_main.c374 static inline void fm10k_rx_checksum(struct fm10k_ring *ring, in fm10k_rx_checksum() argument
381 if (!(ring->netdev->features & NETIF_F_RXCSUM)) in fm10k_rx_checksum()
390 ring->rx_stats.csum_err++; in fm10k_rx_checksum()
402 ring->rx_stats.csum_good++; in fm10k_rx_checksum()
411 static inline void fm10k_rx_hash(struct fm10k_ring *ring, in fm10k_rx_hash() argument
417 if (!(ring->netdev->features & NETIF_F_RXHASH)) in fm10k_rx_hash()
1142 static u64 fm10k_get_tx_completed(struct fm10k_ring *ring) in fm10k_get_tx_completed() argument
1144 return ring->stats.packets; in fm10k_get_tx_completed()
1147 static u64 fm10k_get_tx_pending(struct fm10k_ring *ring) in fm10k_get_tx_pending() argument
1150 u32 head = ring->next_to_clean; in fm10k_get_tx_pending()
[all …]
/linux-4.4.14/drivers/staging/comedi/drivers/
Dmite.c183 struct mite_dma_descriptor_ring *ring = in mite_alloc_ring() local
186 if (!ring) in mite_alloc_ring()
188 ring->hw_dev = get_device(&mite->pcidev->dev); in mite_alloc_ring()
189 if (!ring->hw_dev) { in mite_alloc_ring()
190 kfree(ring); in mite_alloc_ring()
193 ring->n_links = 0; in mite_alloc_ring()
194 ring->descriptors = NULL; in mite_alloc_ring()
195 ring->descriptors_dma_addr = 0; in mite_alloc_ring()
196 return ring; in mite_alloc_ring()
200 void mite_free_ring(struct mite_dma_descriptor_ring *ring) in mite_free_ring() argument
[all …]
/linux-4.4.14/drivers/gpu/drm/r128/
Dr128_drv.h82 drm_r128_ring_buffer_t ring; member
418 drm_r128_ring_buffer_t *ring = &dev_priv->ring; in r128_update_ring_snapshot() local
419 ring->space = (GET_RING_HEAD(dev_priv) - ring->tail) * sizeof(u32); in r128_update_ring_snapshot()
420 if (ring->space <= 0) in r128_update_ring_snapshot()
421 ring->space += ring->size; in r128_update_ring_snapshot()
438 drm_r128_ring_buffer_t *ring = &dev_priv->ring; int i; \
439 if (ring->space < ring->high_mark) { \
442 if (ring->space >= ring->high_mark) \
477 int write, _nr; unsigned int tail_mask; volatile u32 *ring;
482 if (dev_priv->ring.space <= (n) * sizeof(u32)) { \
[all …]
/linux-4.4.14/drivers/scsi/fnic/
Dvnic_wq_copy.h31 struct vnic_dev_ring ring; member
38 return wq->ring.desc_avail; in vnic_wq_copy_desc_avail()
43 return wq->ring.desc_count - 1 - wq->ring.desc_avail; in vnic_wq_copy_desc_in_use()
48 struct fcpio_host_req *desc = wq->ring.descs; in vnic_wq_copy_next_desc()
55 ((wq->to_use_index + 1) == wq->ring.desc_count) ? in vnic_wq_copy_post()
57 wq->ring.desc_avail--; in vnic_wq_copy_post()
76 cnt = wq->ring.desc_count - wq->to_clean_index + index + 1; in vnic_wq_copy_desc_process()
78 wq->to_clean_index = ((index + 1) % wq->ring.desc_count); in vnic_wq_copy_desc_process()
79 wq->ring.desc_avail += cnt; in vnic_wq_copy_desc_process()
88 struct fcpio_host_req *wq_desc = wq->ring.descs; in vnic_wq_copy_service()
[all …]
Dvnic_dev.c160 unsigned int vnic_dev_desc_ring_size(struct vnic_dev_ring *ring, in vnic_dev_desc_ring_size() argument
173 ring->base_align = 512; in vnic_dev_desc_ring_size()
178 ring->desc_count = ALIGN(desc_count, count_align); in vnic_dev_desc_ring_size()
180 ring->desc_size = ALIGN(desc_size, desc_align); in vnic_dev_desc_ring_size()
182 ring->size = ring->desc_count * ring->desc_size; in vnic_dev_desc_ring_size()
183 ring->size_unaligned = ring->size + ring->base_align; in vnic_dev_desc_ring_size()
185 return ring->size_unaligned; in vnic_dev_desc_ring_size()
188 void vnic_dev_clear_desc_ring(struct vnic_dev_ring *ring) in vnic_dev_clear_desc_ring() argument
190 memset(ring->descs, 0, ring->size); in vnic_dev_clear_desc_ring()
193 int vnic_dev_alloc_desc_ring(struct vnic_dev *vdev, struct vnic_dev_ring *ring, in vnic_dev_alloc_desc_ring() argument
[all …]
Dvnic_wq.c31 unsigned int i, j, count = wq->ring.desc_count; in vnic_wq_alloc_bufs()
48 buf->desc = (u8 *)wq->ring.descs + in vnic_wq_alloc_bufs()
49 wq->ring.desc_size * buf->index; in vnic_wq_alloc_bufs()
74 vnic_dev_free_desc_ring(vdev, &wq->ring); in vnic_wq_free()
101 err = vnic_dev_alloc_desc_ring(vdev, &wq->ring, desc_count, desc_size); in vnic_wq_alloc()
120 paddr = (u64)wq->ring.base_addr | VNIC_PADDR_TARGET; in vnic_wq_init()
122 iowrite32(wq->ring.desc_count, &wq->ctrl->ring_size); in vnic_wq_init()
173 wq->ring.desc_avail++; in vnic_wq_clean()
182 vnic_dev_clear_desc_ring(&wq->ring); in vnic_wq_clean()
Dvnic_cq_copy.h35 desc = (struct fcpio_fw_req *)((u8 *)cq->ring.descs + in vnic_cq_copy_service()
36 cq->ring.desc_size * cq->to_clean); in vnic_cq_copy_service()
45 if (cq->to_clean == cq->ring.desc_count) { in vnic_cq_copy_service()
50 desc = (struct fcpio_fw_req *)((u8 *)cq->ring.descs + in vnic_cq_copy_service()
51 cq->ring.desc_size * cq->to_clean); in vnic_cq_copy_service()
Dvnic_rq.c31 unsigned int i, j, count = rq->ring.desc_count; in vnic_rq_alloc_bufs()
48 buf->desc = (u8 *)rq->ring.descs + in vnic_rq_alloc_bufs()
49 rq->ring.desc_size * buf->index; in vnic_rq_alloc_bufs()
75 vnic_dev_free_desc_ring(vdev, &rq->ring); in vnic_rq_free()
101 err = vnic_dev_alloc_desc_ring(vdev, &rq->ring, desc_count, desc_size); in vnic_rq_alloc()
121 paddr = (u64)rq->ring.base_addr | VNIC_PADDR_TARGET; in vnic_rq_init()
123 iowrite32(rq->ring.desc_count, &rq->ctrl->ring_size); in vnic_rq_init()
183 rq->ring.desc_avail++; in vnic_rq_clean()
195 vnic_dev_clear_desc_ring(&rq->ring); in vnic_rq_clean()
Dvnic_cq.h65 struct vnic_dev_ring ring; member
81 cq_desc = (struct cq_desc *)((u8 *)cq->ring.descs + in vnic_cq_service()
82 cq->ring.desc_size * cq->to_clean); in vnic_cq_service()
93 if (cq->to_clean == cq->ring.desc_count) { in vnic_cq_service()
98 cq_desc = (struct cq_desc *)((u8 *)cq->ring.descs + in vnic_cq_service()
99 cq->ring.desc_size * cq->to_clean); in vnic_cq_service()
Dvnic_cq.c26 vnic_dev_free_desc_ring(cq->vdev, &cq->ring); in vnic_cq_free()
45 err = vnic_dev_alloc_desc_ring(vdev, &cq->ring, desc_count, desc_size); in vnic_cq_alloc()
60 paddr = (u64)cq->ring.base_addr | VNIC_PADDR_TARGET; in vnic_cq_init()
62 iowrite32(cq->ring.desc_count, &cq->ctrl->ring_size); in vnic_cq_init()
84 vnic_dev_clear_desc_ring(&cq->ring); in vnic_cq_clean()
Dvnic_wq.h89 struct vnic_dev_ring ring; member
99 return wq->ring.desc_avail; in vnic_wq_desc_avail()
105 return wq->ring.desc_count - wq->ring.desc_avail - 1; in vnic_wq_desc_used()
136 wq->ring.desc_avail--; in vnic_wq_post()
152 wq->ring.desc_avail++; in vnic_wq_service()
Dvnic_rq.h96 struct vnic_dev_ring ring; member
108 return rq->ring.desc_avail; in vnic_rq_desc_avail()
114 return rq->ring.desc_count - rq->ring.desc_avail - 1; in vnic_rq_desc_used()
145 rq->ring.desc_avail--; in vnic_rq_post()
172 rq->ring.desc_avail += count; in vnic_rq_return_descs()
197 rq->ring.desc_avail++; in vnic_rq_service()
Dvnic_wq_copy.c66 vnic_dev_clear_desc_ring(&wq->ring); in vnic_wq_copy_clean()
74 vnic_dev_free_desc_ring(vdev, &wq->ring); in vnic_wq_copy_free()
95 err = vnic_dev_alloc_desc_ring(vdev, &wq->ring, desc_count, desc_size); in vnic_wq_copy_alloc()
108 paddr = (u64)wq->ring.base_addr | VNIC_PADDR_TARGET; in vnic_wq_copy_init()
110 iowrite32(wq->ring.desc_count, &wq->ctrl->ring_size); in vnic_wq_copy_init()
/linux-4.4.14/drivers/net/ethernet/broadcom/genet/
Dbcmgenet.c418 unsigned int ring, in bcmgenet_tdma_ring_readl() argument
422 (DMA_RING_SIZE * ring) + in bcmgenet_tdma_ring_readl()
427 unsigned int ring, u32 val, in bcmgenet_tdma_ring_writel() argument
431 (DMA_RING_SIZE * ring) + in bcmgenet_tdma_ring_writel()
436 unsigned int ring, in bcmgenet_rdma_ring_readl() argument
440 (DMA_RING_SIZE * ring) + in bcmgenet_rdma_ring_readl()
445 unsigned int ring, u32 val, in bcmgenet_rdma_ring_writel() argument
449 (DMA_RING_SIZE * ring) + in bcmgenet_rdma_ring_writel()
1092 struct bcmgenet_tx_ring *ring) in bcmgenet_get_txcb() argument
1096 tx_cb_ptr = ring->cbs; in bcmgenet_get_txcb()
[all …]
/linux-4.4.14/drivers/net/vmxnet3/
Dvmxnet3_int.h134 vmxnet3_cmd_ring_adv_next2fill(struct vmxnet3_cmd_ring *ring) in vmxnet3_cmd_ring_adv_next2fill() argument
136 ring->next2fill++; in vmxnet3_cmd_ring_adv_next2fill()
137 if (unlikely(ring->next2fill == ring->size)) { in vmxnet3_cmd_ring_adv_next2fill()
138 ring->next2fill = 0; in vmxnet3_cmd_ring_adv_next2fill()
139 VMXNET3_FLIP_RING_GEN(ring->gen); in vmxnet3_cmd_ring_adv_next2fill()
144 vmxnet3_cmd_ring_adv_next2comp(struct vmxnet3_cmd_ring *ring) in vmxnet3_cmd_ring_adv_next2comp() argument
146 VMXNET3_INC_RING_IDX_ONLY(ring->next2comp, ring->size); in vmxnet3_cmd_ring_adv_next2comp()
150 vmxnet3_cmd_ring_desc_avail(struct vmxnet3_cmd_ring *ring) in vmxnet3_cmd_ring_desc_avail() argument
152 return (ring->next2comp > ring->next2fill ? 0 : ring->size) + in vmxnet3_cmd_ring_desc_avail()
153 ring->next2comp - ring->next2fill - 1; in vmxnet3_cmd_ring_desc_avail()
[all …]
/linux-4.4.14/drivers/scsi/snic/
Dvnic_dev.c190 unsigned int svnic_dev_desc_ring_size(struct vnic_dev_ring *ring, in svnic_dev_desc_ring_size() argument
203 ring->base_align = 512; in svnic_dev_desc_ring_size()
208 ring->desc_count = ALIGN(desc_count, count_align); in svnic_dev_desc_ring_size()
210 ring->desc_size = ALIGN(desc_size, desc_align); in svnic_dev_desc_ring_size()
212 ring->size = ring->desc_count * ring->desc_size; in svnic_dev_desc_ring_size()
213 ring->size_unaligned = ring->size + ring->base_align; in svnic_dev_desc_ring_size()
215 return ring->size_unaligned; in svnic_dev_desc_ring_size()
218 void svnic_dev_clear_desc_ring(struct vnic_dev_ring *ring) in svnic_dev_clear_desc_ring() argument
220 memset(ring->descs, 0, ring->size); in svnic_dev_clear_desc_ring()
223 int svnic_dev_alloc_desc_ring(struct vnic_dev *vdev, struct vnic_dev_ring *ring, in svnic_dev_alloc_desc_ring() argument
[all …]
Dvnic_cq_fw.h35 desc = (struct snic_fw_req *)((u8 *)cq->ring.descs + in vnic_cq_fw_service()
36 cq->ring.desc_size * cq->to_clean); in vnic_cq_fw_service()
45 if (cq->to_clean == cq->ring.desc_count) { in vnic_cq_fw_service()
50 desc = (struct snic_fw_req *)((u8 *)cq->ring.descs + in vnic_cq_fw_service()
51 cq->ring.desc_size * cq->to_clean); in vnic_cq_fw_service()
Dvnic_cq.h55 struct vnic_dev_ring ring; member
71 cq_desc = (struct cq_desc *)((u8 *)cq->ring.descs + in svnic_cq_service()
72 cq->ring.desc_size * cq->to_clean); in svnic_cq_service()
83 if (cq->to_clean == cq->ring.desc_count) { in svnic_cq_service()
88 cq_desc = (struct cq_desc *)((u8 *)cq->ring.descs + in svnic_cq_service()
89 cq->ring.desc_size * cq->to_clean); in svnic_cq_service()
Dvnic_wq.c39 return svnic_dev_alloc_desc_ring(vdev, &wq->ring, desc_count, in vnic_wq_alloc_ring()
46 unsigned int i, j, count = wq->ring.desc_count; in vnic_wq_alloc_bufs()
62 buf->desc = (u8 *)wq->ring.descs + in vnic_wq_alloc_bufs()
63 wq->ring.desc_size * buf->index; in vnic_wq_alloc_bufs()
88 svnic_dev_free_desc_ring(vdev, &wq->ring); in svnic_wq_free()
160 unsigned int count = wq->ring.desc_count; in vnic_wq_init_start()
162 paddr = (u64)wq->ring.base_addr | VNIC_PADDR_TARGET; in vnic_wq_init_start()
227 wq->ring.desc_avail++; in svnic_wq_clean()
236 svnic_dev_clear_desc_ring(&wq->ring); in svnic_wq_clean()
Dvnic_cq.c26 svnic_dev_free_desc_ring(cq->vdev, &cq->ring); in svnic_cq_free()
46 err = svnic_dev_alloc_desc_ring(vdev, &cq->ring, desc_count, desc_size); in svnic_cq_alloc()
61 paddr = (u64)cq->ring.base_addr | VNIC_PADDR_TARGET; in svnic_cq_init()
63 iowrite32(cq->ring.desc_count, &cq->ctrl->ring_size); in svnic_cq_init()
85 svnic_dev_clear_desc_ring(&cq->ring); in svnic_cq_clean()
Dvnic_wq.h78 struct vnic_dev_ring ring; member
88 return wq->ring.desc_avail; in svnic_wq_desc_avail()
94 return wq->ring.desc_count - wq->ring.desc_avail - 1; in svnic_wq_desc_used()
125 wq->ring.desc_avail--; in svnic_wq_post()
141 wq->ring.desc_avail++; in svnic_wq_service()
/linux-4.4.14/drivers/dma/
Dxgene-dma.c655 struct xgene_dma_ring *ring = &chan->tx_ring; in xgene_chan_xfer_request() local
659 desc_hw = &ring->desc_hw[ring->head]; in xgene_chan_xfer_request()
665 if (++ring->head == ring->slots) in xgene_chan_xfer_request()
666 ring->head = 0; in xgene_chan_xfer_request()
676 desc_hw = &ring->desc_hw[ring->head]; in xgene_chan_xfer_request()
678 if (++ring->head == ring->slots) in xgene_chan_xfer_request()
679 ring->head = 0; in xgene_chan_xfer_request()
690 2 : 1, ring->cmd); in xgene_chan_xfer_request()
747 struct xgene_dma_ring *ring = &chan->rx_ring; in xgene_dma_cleanup_descriptors() local
763 desc_hw = &ring->desc_hw[ring->head]; in xgene_dma_cleanup_descriptors()
[all …]
/linux-4.4.14/drivers/net/ethernet/qlogic/qlcnic/
Dqlcnic_io.c1136 struct qlcnic_host_rds_ring *ring, in qlcnic_process_rxbuf() argument
1142 buffer = &ring->rx_buf_arr[index]; in qlcnic_process_rxbuf()
1148 pci_unmap_single(adapter->pdev, buffer->dma, ring->dma_size, in qlcnic_process_rxbuf()
1192 struct qlcnic_host_sds_ring *sds_ring, int ring, in qlcnic_process_rcv() argument
1203 if (unlikely(ring >= adapter->max_rds_rings)) in qlcnic_process_rcv()
1206 rds_ring = &recv_ctx->rds_rings[ring]; in qlcnic_process_rcv()
1260 int ring, u64 sts_data0, u64 sts_data1) in qlcnic_process_lro() argument
1275 if (unlikely(ring >= adapter->max_rds_rings)) in qlcnic_process_lro()
1278 rds_ring = &recv_ctx->rds_rings[ring]; in qlcnic_process_lro()
1363 u8 ring; in qlcnic_process_rcv_ring() local
[all …]
Dqlcnic_ctx.c415 int ring) in qlcnic_82xx_fw_cmd_create_tx_ctx() argument
465 index = temp_nsds_rings + ring; in qlcnic_82xx_fw_cmd_create_tx_ctx()
501 index = adapter->drv_sds_rings + ring; in qlcnic_82xx_fw_cmd_create_tx_ctx()
560 int err, ring; in qlcnic_alloc_hw_resources() local
571 for (ring = 0; ring < adapter->drv_tx_rings; ring++) { in qlcnic_alloc_hw_resources()
572 tx_ring = &adapter->tx_ring[ring]; in qlcnic_alloc_hw_resources()
592 for (ring = 0; ring < adapter->max_rds_rings; ring++) { in qlcnic_alloc_hw_resources()
593 rds_ring = &recv_ctx->rds_rings[ring]; in qlcnic_alloc_hw_resources()
605 for (ring = 0; ring < adapter->drv_sds_rings; ring++) { in qlcnic_alloc_hw_resources()
606 sds_ring = &recv_ctx->sds_rings[ring]; in qlcnic_alloc_hw_resources()
[all …]
Dqlcnic_main.c1723 int err, ring, num_sds_rings; in qlcnic_request_irq() local
1757 for (ring = 0; ring < num_sds_rings; ring++) { in qlcnic_request_irq()
1758 sds_ring = &recv_ctx->sds_rings[ring]; in qlcnic_request_irq()
1761 (ring == (num_sds_rings - 1))) { in qlcnic_request_irq()
1771 netdev->name, ring); in qlcnic_request_irq()
1776 netdev->name, ring); in qlcnic_request_irq()
1790 for (ring = 0; ring < adapter->drv_tx_rings; in qlcnic_request_irq()
1791 ring++) { in qlcnic_request_irq()
1792 tx_ring = &adapter->tx_ring[ring]; in qlcnic_request_irq()
1794 "%s-tx-%d", netdev->name, ring); in qlcnic_request_irq()
[all …]
Dqlcnic_init.c88 int i, ring; in qlcnic_release_rx_buffers() local
91 for (ring = 0; ring < adapter->max_rds_rings; ring++) { in qlcnic_release_rx_buffers()
92 rds_ring = &recv_ctx->rds_rings[ring]; in qlcnic_release_rx_buffers()
113 int i, ring; in qlcnic_reset_rx_buffers_list() local
116 for (ring = 0; ring < adapter->max_rds_rings; ring++) { in qlcnic_reset_rx_buffers_list()
117 rds_ring = &recv_ctx->rds_rings[ring]; in qlcnic_reset_rx_buffers_list()
170 int ring; in qlcnic_free_sw_resources() local
177 for (ring = 0; ring < adapter->max_rds_rings; ring++) { in qlcnic_free_sw_resources()
178 rds_ring = &recv_ctx->rds_rings[ring]; in qlcnic_free_sw_resources()
191 int ring, i; in qlcnic_alloc_sw_resources() local
[all …]
/linux-4.4.14/drivers/gpu/drm/msm/adreno/
Dadreno_gpu.c87 static uint32_t get_wptr(struct msm_ringbuffer *ring) in get_wptr() argument
89 return ring->cur - ring->start; in get_wptr()
127 struct msm_ringbuffer *ring = gpu->rb; in adreno_submit() local
140 OUT_PKT3(ring, CP_INDIRECT_BUFFER_PFD, 2); in adreno_submit()
141 OUT_RING(ring, submit->cmd[i].iova); in adreno_submit()
142 OUT_RING(ring, submit->cmd[i].size); in adreno_submit()
153 OUT_PKT2(ring); in adreno_submit()
155 OUT_PKT0(ring, REG_AXXX_CP_SCRATCH_REG2, 1); in adreno_submit()
156 OUT_RING(ring, submit->fence); in adreno_submit()
163 OUT_PKT3(ring, CP_EVENT_WRITE, 1); in adreno_submit()
[all …]
Da4xx_gpu.c112 struct msm_ringbuffer *ring = gpu->rb; in a4xx_me_init() local
114 OUT_PKT3(ring, CP_ME_INIT, 17); in a4xx_me_init()
115 OUT_RING(ring, 0x000003f7); in a4xx_me_init()
116 OUT_RING(ring, 0x00000000); in a4xx_me_init()
117 OUT_RING(ring, 0x00000000); in a4xx_me_init()
118 OUT_RING(ring, 0x00000000); in a4xx_me_init()
119 OUT_RING(ring, 0x00000080); in a4xx_me_init()
120 OUT_RING(ring, 0x00000100); in a4xx_me_init()
121 OUT_RING(ring, 0x00000180); in a4xx_me_init()
122 OUT_RING(ring, 0x00006600); in a4xx_me_init()
[all …]
Da3xx_gpu.c46 struct msm_ringbuffer *ring = gpu->rb; in a3xx_me_init() local
48 OUT_PKT3(ring, CP_ME_INIT, 17); in a3xx_me_init()
49 OUT_RING(ring, 0x000003f7); in a3xx_me_init()
50 OUT_RING(ring, 0x00000000); in a3xx_me_init()
51 OUT_RING(ring, 0x00000000); in a3xx_me_init()
52 OUT_RING(ring, 0x00000000); in a3xx_me_init()
53 OUT_RING(ring, 0x00000080); in a3xx_me_init()
54 OUT_RING(ring, 0x00000100); in a3xx_me_init()
55 OUT_RING(ring, 0x00000180); in a3xx_me_init()
56 OUT_RING(ring, 0x00006600); in a3xx_me_init()
[all …]
Dadreno_gpu.h254 OUT_PKT0(struct msm_ringbuffer *ring, uint16_t regindx, uint16_t cnt) in OUT_PKT0() argument
256 adreno_wait_ring(ring->gpu, cnt+1); in OUT_PKT0()
257 OUT_RING(ring, CP_TYPE0_PKT | ((cnt-1) << 16) | (regindx & 0x7FFF)); in OUT_PKT0()
262 OUT_PKT2(struct msm_ringbuffer *ring) in OUT_PKT2() argument
264 adreno_wait_ring(ring->gpu, 1); in OUT_PKT2()
265 OUT_RING(ring, CP_TYPE2_PKT); in OUT_PKT2()
269 OUT_PKT3(struct msm_ringbuffer *ring, uint8_t opcode, uint16_t cnt) in OUT_PKT3() argument
271 adreno_wait_ring(ring->gpu, cnt+1); in OUT_PKT3()
272 OUT_RING(ring, CP_TYPE3_PKT | ((cnt-1) << 16) | ((opcode & 0xFF) << 8)); in OUT_PKT3()
/linux-4.4.14/virt/kvm/
Dcoalesced_mmio.c44 struct kvm_coalesced_mmio_ring *ring; in coalesced_mmio_has_room() local
53 ring = dev->kvm->coalesced_mmio_ring; in coalesced_mmio_has_room()
54 avail = (ring->first - ring->last - 1) % KVM_COALESCED_MMIO_MAX; in coalesced_mmio_has_room()
68 struct kvm_coalesced_mmio_ring *ring = dev->kvm->coalesced_mmio_ring; in coalesced_mmio_write() local
82 ring->coalesced_mmio[ring->last].phys_addr = addr; in coalesced_mmio_write()
83 ring->coalesced_mmio[ring->last].len = len; in coalesced_mmio_write()
84 memcpy(ring->coalesced_mmio[ring->last].data, val, len); in coalesced_mmio_write()
86 ring->last = (ring->last + 1) % KVM_COALESCED_MMIO_MAX; in coalesced_mmio_write()
/linux-4.4.14/Documentation/networking/
Dnetlink_mmap.txt14 Memory mapped netlink I/O used two circular ring buffers for RX and TX which
17 The RX ring is used by the kernel to directly construct netlink messages into
19 additionally as long as the ring contains messages no recvmsg() or poll()
22 The TX ring is used to process messages directly from user-space memory, the
23 kernel processes all messages contained in the ring using a single sendmsg()
31 - ring setup
32 - conversion of the RX path to get messages from the ring instead of recvmsg()
33 - conversion of the TX path to construct messages into the ring
35 Ring setup is done using setsockopt() to provide the ring parameters to the
36 kernel, then a call to mmap() to map the ring into the processes address space:
[all …]
Dpacket_mmap.txt75 setsockopt() ---> allocation of the circular buffer (ring)
103 allocated RX and TX buffer ring with a single mmap() call.
104 See "Mapping and use of the circular buffer (ring)".
116 setsockopt() ---> allocation of the circular buffer (ring)
124 the ring
215 circular buffer (ring) of unswappable memory.
252 buffer (ring)".
406 + Mapping and use of the circular buffer (ring)
423 RX and TX buffer ring has to be done with one call to mmap:
432 RX must be the first as the kernel maps the TX ring memory right
[all …]
/linux-4.4.14/drivers/net/ethernet/intel/i40e/
Di40e_txrx.h220 #define ring_is_ps_enabled(ring) \ argument
221 test_bit(__I40E_RX_PS_ENABLED, &(ring)->state)
222 #define set_ring_ps_enabled(ring) \ argument
223 set_bit(__I40E_RX_PS_ENABLED, &(ring)->state)
224 #define clear_ring_ps_enabled(ring) \ argument
225 clear_bit(__I40E_RX_PS_ENABLED, &(ring)->state)
226 #define ring_is_16byte_desc_enabled(ring) \ argument
227 test_bit(__I40E_RX_16BYTE_DESC_ENABLED, &(ring)->state)
228 #define set_ring_16byte_desc_enabled(ring) \ argument
229 set_bit(__I40E_RX_16BYTE_DESC_ENABLED, &(ring)->state)
[all …]
/linux-4.4.14/drivers/net/ethernet/intel/i40evf/
Di40e_txrx.h219 #define ring_is_ps_enabled(ring) \ argument
220 test_bit(__I40E_RX_PS_ENABLED, &(ring)->state)
221 #define set_ring_ps_enabled(ring) \ argument
222 set_bit(__I40E_RX_PS_ENABLED, &(ring)->state)
223 #define clear_ring_ps_enabled(ring) \ argument
224 clear_bit(__I40E_RX_PS_ENABLED, &(ring)->state)
225 #define ring_is_16byte_desc_enabled(ring) \ argument
226 test_bit(__I40E_RX_16BYTE_DESC_ENABLED, &(ring)->state)
227 #define set_ring_16byte_desc_enabled(ring) \ argument
228 set_bit(__I40E_RX_16BYTE_DESC_ENABLED, &(ring)->state)
[all …]
/linux-4.4.14/drivers/net/ethernet/qlogic/netxen/
Dnetxen_nic_ctx.c707 int ring; in netxen_init_old_ctx() local
720 for (ring = 0; ring < adapter->max_rds_rings; ring++) { in netxen_init_old_ctx()
721 rds_ring = &recv_ctx->rds_rings[ring]; in netxen_init_old_ctx()
723 hwctx->rcv_rings[ring].addr = in netxen_init_old_ctx()
725 hwctx->rcv_rings[ring].size = in netxen_init_old_ctx()
729 for (ring = 0; ring < adapter->max_sds_rings; ring++) { in netxen_init_old_ctx()
730 sds_ring = &recv_ctx->sds_rings[ring]; in netxen_init_old_ctx()
732 if (ring == 0) { in netxen_init_old_ctx()
736 hwctx->sts_rings[ring].addr = cpu_to_le64(sds_ring->phys_addr); in netxen_init_old_ctx()
737 hwctx->sts_rings[ring].size = cpu_to_le32(sds_ring->num_desc); in netxen_init_old_ctx()
[all …]
Dnetxen_nic_init.c112 int i, ring; in netxen_release_rx_buffers() local
115 for (ring = 0; ring < adapter->max_rds_rings; ring++) { in netxen_release_rx_buffers()
116 rds_ring = &recv_ctx->rds_rings[ring]; in netxen_release_rx_buffers()
170 int ring; in netxen_free_sw_resources() local
177 for (ring = 0; ring < adapter->max_rds_rings; ring++) { in netxen_free_sw_resources()
178 rds_ring = &recv_ctx->rds_rings[ring]; in netxen_free_sw_resources()
201 int ring, i; in netxen_alloc_sw_resources() local
230 for (ring = 0; ring < adapter->max_rds_rings; ring++) { in netxen_alloc_sw_resources()
231 rds_ring = &recv_ctx->rds_rings[ring]; in netxen_alloc_sw_resources()
232 switch (ring) { in netxen_alloc_sw_resources()
[all …]
Dnetxen_nic_ethtool.c298 int ring, i = 0; in netxen_nic_get_regs() local
354 for (ring = 0; ring < adapter->max_sds_rings; ring++) { in netxen_nic_get_regs()
355 sds_ring = &(recv_ctx->sds_rings[ring]); in netxen_nic_get_regs()
403 struct ethtool_ringparam *ring) in netxen_nic_get_ringparam() argument
407 ring->rx_pending = adapter->num_rxd; in netxen_nic_get_ringparam()
408 ring->rx_jumbo_pending = adapter->num_jumbo_rxd; in netxen_nic_get_ringparam()
409 ring->rx_jumbo_pending += adapter->num_lro_rxd; in netxen_nic_get_ringparam()
410 ring->tx_pending = adapter->num_txd; in netxen_nic_get_ringparam()
413 ring->rx_max_pending = MAX_RCV_DESCRIPTORS_1G; in netxen_nic_get_ringparam()
414 ring->rx_jumbo_max_pending = MAX_JUMBO_RCV_DESCRIPTORS_1G; in netxen_nic_get_ringparam()
[all …]
/linux-4.4.14/drivers/net/ethernet/cisco/enic/
Dvnic_rq.c34 unsigned int i, j, count = rq->ring.desc_count; in vnic_rq_alloc_bufs()
47 buf->desc = (u8 *)rq->ring.descs + in vnic_rq_alloc_bufs()
48 rq->ring.desc_size * buf->index; in vnic_rq_alloc_bufs()
73 vnic_dev_free_desc_ring(vdev, &rq->ring); in vnic_rq_free()
101 err = vnic_dev_alloc_desc_ring(vdev, &rq->ring, desc_count, desc_size); in vnic_rq_alloc()
120 unsigned int count = rq->ring.desc_count; in vnic_rq_init_start()
122 paddr = (u64)rq->ring.base_addr | VNIC_PADDR_TARGET; in vnic_rq_init_start()
192 unsigned int count = rq->ring.desc_count; in vnic_rq_clean()
197 for (i = 0; i < rq->ring.desc_count; i++) { in vnic_rq_clean()
201 rq->ring.desc_avail = rq->ring.desc_count - 1; in vnic_rq_clean()
[all …]
Dvnic_wq.c34 unsigned int i, j, count = wq->ring.desc_count; in vnic_wq_alloc_bufs()
47 buf->desc = (u8 *)wq->ring.descs + in vnic_wq_alloc_bufs()
48 wq->ring.desc_size * buf->index; in vnic_wq_alloc_bufs()
76 vnic_dev_free_desc_ring(vdev, &wq->ring); in vnic_wq_free()
104 err = vnic_dev_alloc_desc_ring(vdev, &wq->ring, desc_count, desc_size); in vnic_wq_alloc()
129 err = vnic_dev_alloc_desc_ring(vdev, &wq->ring, desc_count, desc_size); in enic_wq_devcmd2_alloc()
140 unsigned int count = wq->ring.desc_count; in enic_wq_init_start()
142 paddr = (u64)wq->ring.base_addr | VNIC_PADDR_TARGET; in enic_wq_init_start()
207 wq->ring.desc_avail++; in vnic_wq_clean()
216 vnic_dev_clear_desc_ring(&wq->ring); in vnic_wq_clean()
Dvnic_dev.c159 static unsigned int vnic_dev_desc_ring_size(struct vnic_dev_ring *ring, in vnic_dev_desc_ring_size() argument
171 ring->base_align = 512; in vnic_dev_desc_ring_size()
176 ring->desc_count = ALIGN(desc_count, count_align); in vnic_dev_desc_ring_size()
178 ring->desc_size = ALIGN(desc_size, desc_align); in vnic_dev_desc_ring_size()
180 ring->size = ring->desc_count * ring->desc_size; in vnic_dev_desc_ring_size()
181 ring->size_unaligned = ring->size + ring->base_align; in vnic_dev_desc_ring_size()
183 return ring->size_unaligned; in vnic_dev_desc_ring_size()
186 void vnic_dev_clear_desc_ring(struct vnic_dev_ring *ring) in vnic_dev_clear_desc_ring() argument
188 memset(ring->descs, 0, ring->size); in vnic_dev_clear_desc_ring()
191 int vnic_dev_alloc_desc_ring(struct vnic_dev *vdev, struct vnic_dev_ring *ring, in vnic_dev_alloc_desc_ring() argument
[all …]
Dvnic_cq.h62 struct vnic_dev_ring ring; member
83 cq_desc = (struct cq_desc *)((u8 *)cq->ring.descs + in vnic_cq_service()
84 cq->ring.desc_size * cq->to_clean); in vnic_cq_service()
95 if (cq->to_clean == cq->ring.desc_count) { in vnic_cq_service()
100 cq_desc = (struct cq_desc *)((u8 *)cq->ring.descs + in vnic_cq_service()
101 cq->ring.desc_size * cq->to_clean); in vnic_cq_service()
Dvnic_cq.c31 vnic_dev_free_desc_ring(cq->vdev, &cq->ring); in vnic_cq_free()
50 err = vnic_dev_alloc_desc_ring(vdev, &cq->ring, desc_count, desc_size); in vnic_cq_alloc()
65 paddr = (u64)cq->ring.base_addr | VNIC_PADDR_TARGET; in vnic_cq_init()
67 iowrite32(cq->ring.desc_count, &cq->ctrl->ring_size); in vnic_cq_init()
91 vnic_dev_clear_desc_ring(&cq->ring); in vnic_cq_clean()
Dvnic_rq.h89 struct vnic_dev_ring ring; member
103 return rq->ring.desc_avail; in vnic_rq_desc_avail()
109 return rq->ring.desc_count - rq->ring.desc_avail - 1; in vnic_rq_desc_used()
137 rq->ring.desc_avail--; in vnic_rq_post()
159 rq->ring.desc_avail += count; in vnic_rq_return_descs()
184 rq->ring.desc_avail++; in vnic_rq_service()
Dvnic_wq.h84 struct vnic_dev_ring ring; member
106 return wq->ring.desc_avail; in vnic_wq_desc_avail()
112 return wq->ring.desc_count - wq->ring.desc_avail - 1; in vnic_wq_desc_used()
151 wq->ring.desc_avail -= desc_skip_cnt; in vnic_wq_post()
167 wq->ring.desc_avail++; in vnic_wq_service()
/linux-4.4.14/drivers/dma/ioat/
Ddma.c331 struct ioat_ring_ent **ring; in ioat_alloc_ring() local
339 ring = kcalloc(descs, sizeof(*ring), flags); in ioat_alloc_ring()
340 if (!ring) in ioat_alloc_ring()
343 ring[i] = ioat_alloc_ring_ent(c, flags); in ioat_alloc_ring()
344 if (!ring[i]) { in ioat_alloc_ring()
346 ioat_free_ring_ent(ring[i], c); in ioat_alloc_ring()
347 kfree(ring); in ioat_alloc_ring()
350 set_desc_id(ring[i], i); in ioat_alloc_ring()
355 struct ioat_ring_ent *next = ring[i+1]; in ioat_alloc_ring()
356 struct ioat_dma_descriptor *hw = ring[i]->hw; in ioat_alloc_ring()
[all …]
/linux-4.4.14/drivers/usb/host/
Dxhci-mem.c128 static void xhci_link_rings(struct xhci_hcd *xhci, struct xhci_ring *ring, in xhci_link_rings() argument
134 if (!ring || !first || !last) in xhci_link_rings()
137 next = ring->enq_seg->next; in xhci_link_rings()
138 xhci_link_segments(xhci, ring->enq_seg, first, ring->type); in xhci_link_rings()
139 xhci_link_segments(xhci, last, next, ring->type); in xhci_link_rings()
140 ring->num_segs += num_segs; in xhci_link_rings()
141 ring->num_trbs_free += (TRBS_PER_SEGMENT - 1) * num_segs; in xhci_link_rings()
143 if (ring->type != TYPE_EVENT && ring->enq_seg == ring->last_seg) { in xhci_link_rings()
144 ring->last_seg->trbs[TRBS_PER_SEGMENT-1].link.control in xhci_link_rings()
148 ring->last_seg = last; in xhci_link_rings()
[all …]
Du132-hcd.c154 struct u132_ring *ring; member
189 struct u132_ring ring[MAX_U132_RINGS]; member
304 static inline void u132_ring_put_kref(struct u132 *u132, struct u132_ring *ring) in u132_ring_put_kref() argument
309 static void u132_ring_requeue_work(struct u132 *u132, struct u132_ring *ring, in u132_ring_requeue_work() argument
313 if (queue_delayed_work(workqueue, &ring->scheduler, delta)) in u132_ring_requeue_work()
315 } else if (queue_delayed_work(workqueue, &ring->scheduler, 0)) in u132_ring_requeue_work()
320 static void u132_ring_queue_work(struct u132 *u132, struct u132_ring *ring, in u132_ring_queue_work() argument
324 u132_ring_requeue_work(u132, ring, delta); in u132_ring_queue_work()
327 static void u132_ring_cancel_work(struct u132 *u132, struct u132_ring *ring) in u132_ring_cancel_work() argument
329 if (cancel_delayed_work(&ring->scheduler)) in u132_ring_cancel_work()
[all …]
Dxhci-dbg.c346 void xhci_dbg_ring_ptrs(struct xhci_hcd *xhci, struct xhci_ring *ring) in xhci_dbg_ring_ptrs() argument
349 ring->dequeue, in xhci_dbg_ring_ptrs()
350 (unsigned long long)xhci_trb_virt_to_dma(ring->deq_seg, in xhci_dbg_ring_ptrs()
351 ring->dequeue)); in xhci_dbg_ring_ptrs()
353 ring->deq_updates); in xhci_dbg_ring_ptrs()
355 ring->enqueue, in xhci_dbg_ring_ptrs()
356 (unsigned long long)xhci_trb_virt_to_dma(ring->enq_seg, in xhci_dbg_ring_ptrs()
357 ring->enqueue)); in xhci_dbg_ring_ptrs()
359 ring->enq_updates); in xhci_dbg_ring_ptrs()
371 void xhci_debug_ring(struct xhci_hcd *xhci, struct xhci_ring *ring) in xhci_debug_ring() argument
[all …]
Dxhci-ring.c93 static bool last_trb_on_last_seg(struct xhci_hcd *xhci, struct xhci_ring *ring, in last_trb_on_last_seg() argument
96 if (ring == xhci->event_ring) in last_trb_on_last_seg()
107 static int last_trb(struct xhci_hcd *xhci, struct xhci_ring *ring, in last_trb() argument
110 if (ring == xhci->event_ring) in last_trb()
116 static int enqueue_is_link_trb(struct xhci_ring *ring) in enqueue_is_link_trb() argument
118 struct xhci_link_trb *link = &ring->enqueue->link; in enqueue_is_link_trb()
127 struct xhci_ring *ring, in next_trb() argument
131 if (last_trb(xhci, ring, *seg, *trb)) { in next_trb()
143 static void inc_deq(struct xhci_hcd *xhci, struct xhci_ring *ring) in inc_deq() argument
145 ring->deq_updates++; in inc_deq()
[all …]
/linux-4.4.14/drivers/net/irda/
Ddonauboe.c272 if (self->ring) in toshoboe_dumpregs()
275 ringbase = virt_to_bus (self->ring); in toshoboe_dumpregs()
279 printk (" (%d,%02x)",self->ring->rx[i].len,self->ring->rx[i].control); in toshoboe_dumpregs()
283 printk (" (%d,%02x)",self->ring->tx[i].len,self->ring->tx[i].control); in toshoboe_dumpregs()
490 self->ring->tx[i].len = 0; in toshoboe_initring()
491 self->ring->tx[i].control = 0x00; in toshoboe_initring()
492 self->ring->tx[i].address = virt_to_bus (self->tx_bufs[i]); in toshoboe_initring()
497 self->ring->rx[i].len = RX_LEN; in toshoboe_initring()
498 self->ring->rx[i].len = 0; in toshoboe_initring()
499 self->ring->rx[i].address = virt_to_bus (self->rx_bufs[i]); in toshoboe_initring()
[all …]
/linux-4.4.14/drivers/net/ethernet/freescale/
Ducc_geth_ethtool.c215 struct ethtool_ringparam *ring) in uec_get_ringparam() argument
221 ring->rx_max_pending = UCC_GETH_BD_RING_SIZE_MAX; in uec_get_ringparam()
222 ring->rx_mini_max_pending = UCC_GETH_BD_RING_SIZE_MAX; in uec_get_ringparam()
223 ring->rx_jumbo_max_pending = UCC_GETH_BD_RING_SIZE_MAX; in uec_get_ringparam()
224 ring->tx_max_pending = UCC_GETH_BD_RING_SIZE_MAX; in uec_get_ringparam()
226 ring->rx_pending = ug_info->bdRingLenRx[queue]; in uec_get_ringparam()
227 ring->rx_mini_pending = ug_info->bdRingLenRx[queue]; in uec_get_ringparam()
228 ring->rx_jumbo_pending = ug_info->bdRingLenRx[queue]; in uec_get_ringparam()
229 ring->tx_pending = ug_info->bdRingLenTx[queue]; in uec_get_ringparam()
234 struct ethtool_ringparam *ring) in uec_set_ringparam() argument
[all …]
/linux-4.4.14/drivers/net/ethernet/intel/ixgbe/
Dixgbe.h246 #define check_for_tx_hang(ring) \ argument
247 test_bit(__IXGBE_TX_DETECT_HANG, &(ring)->state)
248 #define set_check_for_tx_hang(ring) \ argument
249 set_bit(__IXGBE_TX_DETECT_HANG, &(ring)->state)
250 #define clear_check_for_tx_hang(ring) \ argument
251 clear_bit(__IXGBE_TX_DETECT_HANG, &(ring)->state)
252 #define ring_is_rsc_enabled(ring) \ argument
253 test_bit(__IXGBE_RX_RSC_ENABLED, &(ring)->state)
254 #define set_ring_rsc_enabled(ring) \ argument
255 set_bit(__IXGBE_RX_RSC_ENABLED, &(ring)->state)
[all …]
Dixgbe_lib.c780 static void ixgbe_add_ring(struct ixgbe_ring *ring, in ixgbe_add_ring() argument
783 ring->next = head->ring; in ixgbe_add_ring()
784 head->ring = ring; in ixgbe_add_ring()
806 struct ixgbe_ring *ring; in ixgbe_alloc_q_vector() local
863 ring = q_vector->ring; in ixgbe_alloc_q_vector()
882 ring->dev = &adapter->pdev->dev; in ixgbe_alloc_q_vector()
883 ring->netdev = adapter->netdev; in ixgbe_alloc_q_vector()
886 ring->q_vector = q_vector; in ixgbe_alloc_q_vector()
889 ixgbe_add_ring(ring, &q_vector->tx); in ixgbe_alloc_q_vector()
892 ring->count = adapter->tx_ring_count; in ixgbe_alloc_q_vector()
[all …]
/linux-4.4.14/drivers/net/ethernet/neterion/vxge/
Dvxge-traffic.c243 void vxge_hw_vpath_dynamic_rti_ci_set(struct __vxge_hw_ring *ring) in vxge_hw_vpath_dynamic_rti_ci_set() argument
245 u64 val64 = ring->tim_rti_cfg1_saved; in vxge_hw_vpath_dynamic_rti_ci_set()
248 ring->tim_rti_cfg1_saved = val64; in vxge_hw_vpath_dynamic_rti_ci_set()
249 writeq(val64, &ring->vp_reg->tim_cfg1_int_num[VXGE_HW_VPATH_INTR_RX]); in vxge_hw_vpath_dynamic_rti_ci_set()
268 void vxge_hw_vpath_dynamic_rti_rtimer_set(struct __vxge_hw_ring *ring) in vxge_hw_vpath_dynamic_rti_rtimer_set() argument
270 u64 val64 = ring->tim_rti_cfg3_saved; in vxge_hw_vpath_dynamic_rti_rtimer_set()
271 u64 timer = (ring->rtimer * 1000) / 272; in vxge_hw_vpath_dynamic_rti_rtimer_set()
278 writeq(val64, &ring->vp_reg->tim_cfg3_int_num[VXGE_HW_VPATH_INTR_RX]); in vxge_hw_vpath_dynamic_rti_rtimer_set()
1132 enum vxge_hw_status vxge_hw_ring_rxd_reserve(struct __vxge_hw_ring *ring, in vxge_hw_ring_rxd_reserve() argument
1138 channel = &ring->channel; in vxge_hw_ring_rxd_reserve()
[all …]
Dvxge-main.c133 struct vxge_ring *ring; in VXGE_COMPLETE_ALL_RX() local
137 ring = &vdev->vpaths[i].ring; in VXGE_COMPLETE_ALL_RX()
138 vxge_hw_vpath_poll_rx(ring->handle); in VXGE_COMPLETE_ALL_RX()
194 vxge_rx_alloc(void *dtrh, struct vxge_ring *ring, const int skb_size) in vxge_rx_alloc() argument
200 dev = ring->ndev; in vxge_rx_alloc()
202 ring->ndev->name, __func__, __LINE__); in vxge_rx_alloc()
212 ring->stats.skb_alloc_fail++; in vxge_rx_alloc()
217 "%s: %s:%d Skb : 0x%p", ring->ndev->name, in vxge_rx_alloc()
226 "%s: %s:%d Exiting...", ring->ndev->name, __func__, __LINE__); in vxge_rx_alloc()
234 static int vxge_rx_map(void *dtrh, struct vxge_ring *ring) in vxge_rx_map() argument
[all …]
Dvxge-config.c1363 if (device_config->vp_config[i].ring.enable == in vxge_hw_device_initialize()
1365 nblocks += device_config->vp_config[i].ring.ring_blocks; in vxge_hw_device_initialize()
2022 static u64 __vxge_hw_ring_first_block_address_get(struct __vxge_hw_ring *ring) in __vxge_hw_ring_first_block_address_get() argument
2026 dma_object = ring->mempool->memblocks_dma_arr; in __vxge_hw_ring_first_block_address_get()
2064 struct __vxge_hw_ring *ring, u32 from, in __vxge_hw_ring_rxdblock_link() argument
2100 struct __vxge_hw_ring *ring = in __vxge_hw_ring_mempool_item_alloc() local
2104 for (i = 0; i < ring->rxds_per_block; i++) { in __vxge_hw_ring_mempool_item_alloc()
2109 u32 reserve_index = ring->channel.reserve_ptr - in __vxge_hw_ring_mempool_item_alloc()
2110 (index * ring->rxds_per_block + i + 1); in __vxge_hw_ring_mempool_item_alloc()
2113 ring->channel.reserve_arr[reserve_index] = ((u8 *)item) + in __vxge_hw_ring_mempool_item_alloc()
[all …]
/linux-4.4.14/drivers/net/ethernet/intel/ixgbevf/
Dixgbevf.h100 #define check_for_tx_hang(ring) \ argument
101 test_bit(__IXGBEVF_TX_DETECT_HANG, &(ring)->state)
102 #define set_check_for_tx_hang(ring) \ argument
103 set_bit(__IXGBEVF_TX_DETECT_HANG, &(ring)->state)
104 #define clear_check_for_tx_hang(ring) \ argument
105 clear_bit(__IXGBEVF_TX_DETECT_HANG, &(ring)->state)
178 struct ixgbevf_ring *ring; /* pointer to linked list of rings */ member
187 for (pos = (head).ring; pos != NULL; pos = pos->next)
238 q_vector->tx.ring->stats.yields++; in ixgbevf_qv_lock_napi()
275 q_vector->rx.ring->stats.yields++; in ixgbevf_qv_lock_poll()
[all …]
Dixgbevf_main.c218 static u64 ixgbevf_get_tx_completed(struct ixgbevf_ring *ring) in ixgbevf_get_tx_completed() argument
220 return ring->stats.packets; in ixgbevf_get_tx_completed()
223 static u32 ixgbevf_get_tx_pending(struct ixgbevf_ring *ring) in ixgbevf_get_tx_pending() argument
225 struct ixgbevf_adapter *adapter = netdev_priv(ring->netdev); in ixgbevf_get_tx_pending()
228 u32 head = IXGBE_READ_REG(hw, IXGBE_VFTDH(ring->reg_idx)); in ixgbevf_get_tx_pending()
229 u32 tail = IXGBE_READ_REG(hw, IXGBE_VFTDT(ring->reg_idx)); in ixgbevf_get_tx_pending()
233 tail - head : (tail + ring->count - head); in ixgbevf_get_tx_pending()
466 static inline void ixgbevf_rx_hash(struct ixgbevf_ring *ring, in ixgbevf_rx_hash() argument
472 if (!(ring->netdev->features & NETIF_F_RXHASH)) in ixgbevf_rx_hash()
492 static inline void ixgbevf_rx_checksum(struct ixgbevf_ring *ring, in ixgbevf_rx_checksum() argument
[all …]
/linux-4.4.14/drivers/net/ethernet/pasemi/
Dpasemi_mac.c316 struct pasemi_mac_csring *ring; in pasemi_mac_setup_csring() local
321 ring = pasemi_dma_alloc_chan(TXCHAN, sizeof(struct pasemi_mac_csring), in pasemi_mac_setup_csring()
324 if (!ring) { in pasemi_mac_setup_csring()
329 chno = ring->chan.chno; in pasemi_mac_setup_csring()
331 ring->size = CS_RING_SIZE; in pasemi_mac_setup_csring()
332 ring->next_to_fill = 0; in pasemi_mac_setup_csring()
335 if (pasemi_dma_alloc_ring(&ring->chan, CS_RING_SIZE)) in pasemi_mac_setup_csring()
339 PAS_DMA_TXCHAN_BASEL_BRBL(ring->chan.ring_dma)); in pasemi_mac_setup_csring()
340 val = PAS_DMA_TXCHAN_BASEU_BRBH(ring->chan.ring_dma >> 32); in pasemi_mac_setup_csring()
345 ring->events[0] = pasemi_dma_alloc_flag(); in pasemi_mac_setup_csring()
[all …]
Dpasemi_mac.h117 #define RING_USED(ring) (((ring)->next_to_fill - (ring)->next_to_clean) \ argument
118 & ((ring)->size - 1))
119 #define RING_AVAIL(ring) ((ring->size) - RING_USED(ring)) argument
/linux-4.4.14/arch/powerpc/boot/dts/fsl/
Dqoriq-sec6.0-0.dtsi42 compatible = "fsl,sec-v6.0-job-ring",
43 "fsl,sec-v5.2-job-ring",
44 "fsl,sec-v5.0-job-ring",
45 "fsl,sec-v4.4-job-ring",
46 "fsl,sec-v4.0-job-ring";
51 compatible = "fsl,sec-v6.0-job-ring",
52 "fsl,sec-v5.2-job-ring",
53 "fsl,sec-v5.0-job-ring",
54 "fsl,sec-v4.4-job-ring",
55 "fsl,sec-v4.0-job-ring";
Dqoriq-sec5.3-0.dtsi45 compatible = "fsl,sec-v5.3-job-ring",
46 "fsl,sec-v5.0-job-ring",
47 "fsl,sec-v4.0-job-ring";
53 compatible = "fsl,sec-v5.3-job-ring",
54 "fsl,sec-v5.0-job-ring",
55 "fsl,sec-v4.0-job-ring";
61 compatible = "fsl,sec-v5.3-job-ring",
62 "fsl,sec-v5.0-job-ring",
63 "fsl,sec-v4.0-job-ring";
69 compatible = "fsl,sec-v5.3-job-ring",
[all …]
Dqoriq-sec5.2-0.dtsi45 compatible = "fsl,sec-v5.2-job-ring",
46 "fsl,sec-v5.0-job-ring",
47 "fsl,sec-v4.0-job-ring";
53 compatible = "fsl,sec-v5.2-job-ring",
54 "fsl,sec-v5.0-job-ring",
55 "fsl,sec-v4.0-job-ring";
61 compatible = "fsl,sec-v5.2-job-ring",
62 "fsl,sec-v5.0-job-ring",
63 "fsl,sec-v4.0-job-ring";
69 compatible = "fsl,sec-v5.2-job-ring",
[all …]
Dpq3-sec4.4-0.dtsi45 compatible = "fsl,sec-v4.4-job-ring", "fsl,sec-v4.0-job-ring";
51 compatible = "fsl,sec-v4.4-job-ring", "fsl,sec-v4.0-job-ring";
57 compatible = "fsl,sec-v4.4-job-ring", "fsl,sec-v4.0-job-ring";
63 compatible = "fsl,sec-v4.4-job-ring", "fsl,sec-v4.0-job-ring";
Dqoriq-raid1.0-0.dtsi50 compatible = "fsl,raideng-v1.0-job-ring", "fsl,raideng-v1.0-hp-ring";
57 compatible = "fsl,raideng-v1.0-job-ring", "fsl,raideng-v1.0-lp-ring";
72 compatible = "fsl,raideng-v1.0-job-ring", "fsl,raideng-v1.0-hp-ring";
79 compatible = "fsl,raideng-v1.0-job-ring", "fsl,raideng-v1.0-lp-ring";
Dqoriq-sec5.0-0.dtsi45 compatible = "fsl,sec-v5.0-job-ring",
46 "fsl,sec-v4.0-job-ring";
52 compatible = "fsl,sec-v5.0-job-ring",
53 "fsl,sec-v4.0-job-ring";
59 compatible = "fsl,sec-v5.0-job-ring",
60 "fsl,sec-v4.0-job-ring";
66 compatible = "fsl,sec-v5.0-job-ring",
67 "fsl,sec-v4.0-job-ring";
Dqoriq-sec4.2-0.dtsi45 compatible = "fsl,sec-v4.2-job-ring",
46 "fsl,sec-v4.0-job-ring";
52 compatible = "fsl,sec-v4.2-job-ring",
53 "fsl,sec-v4.0-job-ring";
59 compatible = "fsl,sec-v4.2-job-ring",
60 "fsl,sec-v4.0-job-ring";
66 compatible = "fsl,sec-v4.2-job-ring",
67 "fsl,sec-v4.0-job-ring";
/linux-4.4.14/Documentation/devicetree/bindings/powerpc/fsl/
Draideng.txt48 There must be a sub-node for each job ring present in RAID Engine
51 - compatible: Must contain "fsl,raideng-v1.0-job-ring" as the value
52 This identifies job ring. Should contain either
53 "fsl,raideng-v1.0-hp-ring" or "fsl,raideng-v1.0-lp-ring"
54 depending upon whether ring has high or low priority
55 - reg: offset and length of the register set for job ring
56 - interrupts: interrupt mapping for job ring IRQ
75 compatible = "fsl,raideng-v1.0-job-ring", "fsl,raideng-v1.0-hp-ring";
/linux-4.4.14/drivers/net/wireless/realtek/rtlwifi/
Dpci.c532 struct rtl8192_tx_ring *ring = &rtlpci->tx_ring[hw_queue]; in _rtl_pci_tx_chk_waitq() local
540 (ring->entries - skb_queue_len(&ring->queue) > in _rtl_pci_tx_chk_waitq()
567 struct rtl8192_tx_ring *ring = &rtlpci->tx_ring[prio]; in _rtl_pci_tx_isr() local
569 while (skb_queue_len(&ring->queue)) { in _rtl_pci_tx_isr()
577 entry = (u8 *)(&ring->buffer_desc[ring->idx]); in _rtl_pci_tx_isr()
579 entry = (u8 *)(&ring->desc[ring->idx]); in _rtl_pci_tx_isr()
588 if (!rtlpriv->cfg->ops->is_tx_desc_closed(hw, prio, ring->idx)) in _rtl_pci_tx_isr()
590 ring->idx = (ring->idx + 1) % ring->entries; in _rtl_pci_tx_isr()
592 skb = __skb_dequeue(&ring->queue); in _rtl_pci_tx_isr()
605 ring->idx, in _rtl_pci_tx_isr()
[all …]
/linux-4.4.14/drivers/gpu/drm/i810/
Di810_dma.c215 if (dev_priv->ring.virtual_start) in i810_dma_cleanup()
216 drm_legacy_ioremapfree(&dev_priv->ring.map, dev); in i810_dma_cleanup()
239 drm_i810_ring_buffer_t *ring = &(dev_priv->ring); in i810_wait_ring() local
245 while (ring->space < n) { in i810_wait_ring()
246 ring->head = I810_READ(LP_RING + RING_HEAD) & HEAD_ADDR; in i810_wait_ring()
247 ring->space = ring->head - (ring->tail + 8); in i810_wait_ring()
248 if (ring->space < 0) in i810_wait_ring()
249 ring->space += ring->Size; in i810_wait_ring()
251 if (ring->head != last_head) { in i810_wait_ring()
253 last_head = ring->head; in i810_wait_ring()
[all …]
Di810_drv.h86 drm_i810_ring_buffer_t ring; member
150 if (dev_priv->ring.space < n*4) \
152 dev_priv->ring.space -= n*4; \
153 outring = dev_priv->ring.tail; \
154 ringmask = dev_priv->ring.tail_mask; \
155 virt = dev_priv->ring.virtual_start; \
161 dev_priv->ring.tail = outring; \
/linux-4.4.14/drivers/net/ethernet/sun/
Dcassini.c294 static void cas_disable_irq(struct cas *cp, const int ring) in cas_disable_irq() argument
297 if (ring == 0) { in cas_disable_irq()
304 switch (ring) { in cas_disable_irq()
316 cp->regs + REG_PLUS_INTRN_MASK(ring)); in cas_disable_irq()
321 REG_PLUS_INTRN_MASK(ring)); in cas_disable_irq()
335 static void cas_enable_irq(struct cas *cp, const int ring) in cas_enable_irq() argument
337 if (ring == 0) { /* all but TX_DONE */ in cas_enable_irq()
343 switch (ring) { in cas_enable_irq()
355 REG_PLUS_INTRN_MASK(ring)); in cas_enable_irq()
1388 static cas_page_t *cas_page_swap(struct cas *cp, const int ring, in cas_page_swap() argument
[all …]
/linux-4.4.14/Documentation/devicetree/bindings/crypto/
Dfsl-sec6.txt84 Definition: Must include "fsl,sec-v6.0-job-ring".
103 compatible = "fsl,sec-v6.0-job-ring";
123 compatible = "fsl,sec-v6.0-job-ring",
124 "fsl,sec-v5.2-job-ring",
125 "fsl,sec-v5.0-job-ring",
126 "fsl,sec-v4.4-job-ring",
127 "fsl,sec-v4.0-job-ring";
132 compatible = "fsl,sec-v6.0-job-ring",
133 "fsl,sec-v5.2-job-ring",
134 "fsl,sec-v5.0-job-ring",
[all …]
/linux-4.4.14/net/dccp/ccids/lib/
Dloss_interval.c28 return lh->counter ? lh->ring[LIH_INDEX(lh->counter - 1)] : NULL; in tfrc_lh_peek()
35 return lh->ring[LIH_INDEX(lh->counter - i - 1)]->li_length; in tfrc_lh_get_interval()
43 if (lh->ring[LIH_INDEX(lh->counter)] == NULL) in tfrc_lh_demand_next()
44 lh->ring[LIH_INDEX(lh->counter)] = kmem_cache_alloc(tfrc_lh_slab, in tfrc_lh_demand_next()
46 return lh->ring[LIH_INDEX(lh->counter)]; in tfrc_lh_demand_next()
55 if (lh->ring[LIH_INDEX(lh->counter)] != NULL) { in tfrc_lh_cleanup()
57 lh->ring[LIH_INDEX(lh->counter)]); in tfrc_lh_cleanup()
58 lh->ring[LIH_INDEX(lh->counter)] = NULL; in tfrc_lh_cleanup()
Dpacket_history.c152 struct tfrc_rx_hist_entry *tmp = h->ring[idx_a]; in tfrc_rx_hist_swap()
154 h->ring[idx_a] = h->ring[idx_b]; in tfrc_rx_hist_swap()
155 h->ring[idx_b] = tmp; in tfrc_rx_hist_swap()
354 h->ring[i] = kmem_cache_alloc(tfrc_rx_hist_slab, GFP_ATOMIC); in tfrc_rx_hist_alloc()
355 if (h->ring[i] == NULL) in tfrc_rx_hist_alloc()
364 kmem_cache_free(tfrc_rx_hist_slab, h->ring[i]); in tfrc_rx_hist_alloc()
365 h->ring[i] = NULL; in tfrc_rx_hist_alloc()
375 if (h->ring[i] != NULL) { in tfrc_rx_hist_purge()
376 kmem_cache_free(tfrc_rx_hist_slab, h->ring[i]); in tfrc_rx_hist_purge()
377 h->ring[i] = NULL; in tfrc_rx_hist_purge()
[all …]
Dpacket_history.h95 struct tfrc_rx_hist_entry *ring[TFRC_NDUPACK + 1]; member
115 return h->ring[tfrc_rx_hist_index(h, h->loss_count)]; in tfrc_rx_hist_last_rcv()
124 return h->ring[tfrc_rx_hist_index(h, n)]; in tfrc_rx_hist_entry()
133 return h->ring[h->loss_start]; in tfrc_rx_hist_loss_prev()
/linux-4.4.14/drivers/net/ethernet/qualcomm/
Dqca_debug.c263 qcaspi_get_ringparam(struct net_device *dev, struct ethtool_ringparam *ring) in qcaspi_get_ringparam() argument
267 ring->rx_max_pending = 4; in qcaspi_get_ringparam()
268 ring->tx_max_pending = TX_RING_MAX_LEN; in qcaspi_get_ringparam()
269 ring->rx_pending = 4; in qcaspi_get_ringparam()
270 ring->tx_pending = qca->txr.count; in qcaspi_get_ringparam()
274 qcaspi_set_ringparam(struct net_device *dev, struct ethtool_ringparam *ring) in qcaspi_set_ringparam() argument
278 if ((ring->rx_pending) || in qcaspi_set_ringparam()
279 (ring->rx_mini_pending) || in qcaspi_set_ringparam()
280 (ring->rx_jumbo_pending)) in qcaspi_set_ringparam()
286 qca->txr.count = max_t(u32, ring->tx_pending, TX_RING_MIN_LEN); in qcaspi_set_ringparam()
/linux-4.4.14/net/netlink/
Daf_netlink.c385 struct netlink_ring *ring; in __netlink_set_ring() local
388 ring = tx_ring ? &nlk->tx_ring : &nlk->rx_ring; in __netlink_set_ring()
392 ring->frame_max = req->nm_frame_nr - 1; in __netlink_set_ring()
393 ring->head = 0; in __netlink_set_ring()
394 ring->frame_size = req->nm_frame_size; in __netlink_set_ring()
395 ring->pg_vec_pages = req->nm_block_size / PAGE_SIZE; in __netlink_set_ring()
397 swap(ring->pg_vec_len, req->nm_block_nr); in __netlink_set_ring()
398 swap(ring->pg_vec_order, order); in __netlink_set_ring()
399 swap(ring->pg_vec, pg_vec); in __netlink_set_ring()
414 struct netlink_ring *ring; in netlink_set_ring() local
[all …]
/linux-4.4.14/arch/tile/include/gxio/
Dmpipe.h559 unsigned int ring,
579 unsigned int ring);
588 *context, unsigned int ring);
628 gxio_mpipe_notif_group_add_ring(gxio_mpipe_notif_group_bits_t *bits, int ring) in gxio_mpipe_notif_group_add_ring() argument
630 bits->ring_mask[ring / 64] |= (1ull << (ring % 64)); in gxio_mpipe_notif_group_add_ring()
762 unsigned int ring,
777 int ring, int bucket, unsigned int count) in gxio_mpipe_credit() argument
791 offset.ring = ring; in gxio_mpipe_credit()
793 offset.ring_enable = (ring >= 0); in gxio_mpipe_credit()
1137 unsigned int ring; member
[all …]
/linux-4.4.14/include/uapi/linux/
Dvirtio_ring.h80 __virtio16 ring[]; member
94 struct vring_used_elem ring[]; member
140 #define vring_used_event(vr) ((vr)->avail->ring[(vr)->num])
141 #define vring_avail_event(vr) (*(__virtio16 *)&(vr)->used->ring[(vr)->num])
149 vr->used = (void *)(((uintptr_t)&vr->avail->ring[num] + sizeof(__virtio16) in vring_init()
/linux-4.4.14/arch/tile/gxio/
Diorpc_mpipe.c102 unsigned int ring; member
107 unsigned int ring) in gxio_mpipe_init_notif_ring_aux() argument
122 params->ring = ring; in gxio_mpipe_init_notif_ring_aux()
133 unsigned int ring; member
139 unsigned int ring) in gxio_mpipe_request_notif_ring_interrupt() argument
148 params->ring = ring; in gxio_mpipe_request_notif_ring_interrupt()
158 unsigned int ring; member
162 unsigned int ring) in gxio_mpipe_enable_notif_ring_interrupt() argument
167 params->ring = ring; in gxio_mpipe_enable_notif_ring_interrupt()
287 unsigned int ring; member
[all …]
Dmpipe.c160 unsigned int ring, in gxio_mpipe_init_notif_ring() argument
165 mem_flags, ring); in gxio_mpipe_init_notif_ring()
172 unsigned int ring, in gxio_mpipe_init_notif_group_and_buckets() argument
190 gxio_mpipe_notif_group_add_ring(&bits, ring + i); in gxio_mpipe_init_notif_group_and_buckets()
197 bucket_info.notifring = ring + (i % num_rings); in gxio_mpipe_init_notif_group_and_buckets()
211 unsigned int ring, unsigned int channel, in gxio_mpipe_init_edma_ring() argument
218 ring, channel); in gxio_mpipe_init_edma_ring()
366 unsigned int ring, in gxio_mpipe_iqueue_init() argument
374 iqueue->ring = ring; in gxio_mpipe_iqueue_init()
386 return gxio_mpipe_init_notif_ring(context, ring, mem, mem_size, in gxio_mpipe_iqueue_init()
[all …]
/linux-4.4.14/drivers/xen/
Devtchn.c67 evtchn_port_t *ring; member
87 evtchn_port_t *ring; in evtchn_alloc_ring() local
88 size_t s = size * sizeof(*ring); in evtchn_alloc_ring()
90 ring = kmalloc(s, GFP_KERNEL); in evtchn_alloc_ring()
91 if (!ring) in evtchn_alloc_ring()
92 ring = vmalloc(s); in evtchn_alloc_ring()
94 return ring; in evtchn_alloc_ring()
97 static void evtchn_free_ring(evtchn_port_t *ring) in evtchn_free_ring() argument
99 kvfree(ring); in evtchn_free_ring()
111 return u->ring + evtchn_ring_offset(u, idx); in evtchn_ring_entry()
[all …]
/linux-4.4.14/drivers/staging/iio/accel/
Dsca3000_ring.c236 struct iio_hw_buffer *ring; in sca3000_rb_allocate() local
238 ring = kzalloc(sizeof(*ring), GFP_KERNEL); in sca3000_rb_allocate()
239 if (!ring) in sca3000_rb_allocate()
242 ring->private = indio_dev; in sca3000_rb_allocate()
243 buf = &ring->buf; in sca3000_rb_allocate()
344 void sca3000_ring_int_process(u8 val, struct iio_buffer *ring) in sca3000_ring_int_process() argument
348 ring->stufftoread = true; in sca3000_ring_int_process()
349 wake_up_interruptible(&ring->pollq); in sca3000_ring_int_process()
/linux-4.4.14/drivers/staging/rtl8192e/rtl8192e/
Drtl_core.c274 struct rtl8192_tx_ring *ring = &priv->tx_ring[prio]; in _rtl92e_check_nic_enough_desc() local
276 if (ring->entries - skb_queue_len(&ring->queue) >= 2) in _rtl92e_check_nic_enough_desc()
535 struct rtl8192_tx_ring *ring = NULL; in _rtl92e_prepare_beacon() local
538 ring = &priv->tx_ring[BEACON_QUEUE]; in _rtl92e_prepare_beacon()
539 pskb = __skb_dequeue(&ring->queue); in _rtl92e_prepare_beacon()
554 pdesc = &ring->desc[0]; in _rtl92e_prepare_beacon()
556 __skb_queue_tail(&ring->queue, pnewskb); in _rtl92e_prepare_beacon()
1130 struct rtl8192_tx_ring *ring = NULL; in _rtl92e_tx_check_stuck() local
1151 ring = &priv->tx_ring[QueueID]; in _rtl92e_tx_check_stuck()
1153 if (skb_queue_len(&ring->queue) == 0) { in _rtl92e_tx_check_stuck()
[all …]
/linux-4.4.14/drivers/gpu/drm/via/
Dvia_dma.c161 if (dev_priv->ring.virtual_start) { in via_dma_cleanup()
164 drm_legacy_ioremapfree(&dev_priv->ring.map, dev); in via_dma_cleanup()
165 dev_priv->ring.virtual_start = NULL; in via_dma_cleanup()
182 if (dev_priv->ring.virtual_start != NULL) { in via_initialize()
197 dev_priv->ring.map.offset = dev->agp->base + init->offset; in via_initialize()
198 dev_priv->ring.map.size = init->size; in via_initialize()
199 dev_priv->ring.map.type = 0; in via_initialize()
200 dev_priv->ring.map.flags = 0; in via_initialize()
201 dev_priv->ring.map.mtrr = 0; in via_initialize()
203 drm_legacy_ioremap(&dev_priv->ring.map, dev); in via_initialize()
[all …]
/linux-4.4.14/fs/
Daio.c438 struct aio_ring *ring; in aio_setup_ring() local
515 ring = kmap_atomic(ctx->ring_pages[0]); in aio_setup_ring()
516 ring->nr = nr_events; /* user copy */ in aio_setup_ring()
517 ring->id = ~0U; in aio_setup_ring()
518 ring->head = ring->tail = 0; in aio_setup_ring()
519 ring->magic = AIO_RING_MAGIC; in aio_setup_ring()
520 ring->compat_features = AIO_RING_COMPAT_FEATURES; in aio_setup_ring()
521 ring->incompat_features = AIO_RING_INCOMPAT_FEATURES; in aio_setup_ring()
522 ring->header_length = sizeof(struct aio_ring); in aio_setup_ring()
523 kunmap_atomic(ring); in aio_setup_ring()
[all …]
/linux-4.4.14/arch/tile/include/hv/
Ddrv_trio_intf.h130 #define HV_TRIO_PUSH_DMA_OFFSET(ring) \ argument
133 ((ring) << TRIO_PUSH_DMA_REGION_ADDR__RING_SEL_SHIFT))
136 #define HV_TRIO_PULL_DMA_OFFSET(ring) \ argument
139 ((ring) << TRIO_PULL_DMA_REGION_ADDR__RING_SEL_SHIFT))
/linux-4.4.14/net/packet/
Ddiag.c71 static int pdiag_put_ring(struct packet_ring_buffer *ring, int ver, int nl_type, in pdiag_put_ring() argument
76 if (!ring->pg_vec || ((ver > TPACKET_V2) && in pdiag_put_ring()
80 pdr.pdr_block_size = ring->pg_vec_pages << PAGE_SHIFT; in pdiag_put_ring()
81 pdr.pdr_block_nr = ring->pg_vec_len; in pdiag_put_ring()
82 pdr.pdr_frame_size = ring->frame_size; in pdiag_put_ring()
83 pdr.pdr_frame_nr = ring->frame_max + 1; in pdiag_put_ring()
86 pdr.pdr_retire_tmo = ring->prb_bdqc.retire_blk_tov; in pdiag_put_ring()
87 pdr.pdr_sizeof_priv = ring->prb_bdqc.blk_sizeof_priv; in pdiag_put_ring()
88 pdr.pdr_features = ring->prb_bdqc.feature_req_word; in pdiag_put_ring()
/linux-4.4.14/drivers/net/ethernet/broadcom/bnxt/
Dbnxt.c1586 static void bnxt_free_ring(struct bnxt *bp, struct bnxt_ring_struct *ring) in bnxt_free_ring() argument
1591 for (i = 0; i < ring->nr_pages; i++) { in bnxt_free_ring()
1592 if (!ring->pg_arr[i]) in bnxt_free_ring()
1595 dma_free_coherent(&pdev->dev, ring->page_size, in bnxt_free_ring()
1596 ring->pg_arr[i], ring->dma_arr[i]); in bnxt_free_ring()
1598 ring->pg_arr[i] = NULL; in bnxt_free_ring()
1600 if (ring->pg_tbl) { in bnxt_free_ring()
1601 dma_free_coherent(&pdev->dev, ring->nr_pages * 8, in bnxt_free_ring()
1602 ring->pg_tbl, ring->pg_tbl_map); in bnxt_free_ring()
1603 ring->pg_tbl = NULL; in bnxt_free_ring()
[all …]
/linux-4.4.14/include/xen/interface/io/
Dring.h28 (__RD32(((_sz) - offsetof(struct _s##_sring, ring)) / \
29 sizeof(((struct _s##_sring *)0)->ring[0])))
35 (__RD32(((_sz) - (long)&(_s)->ring + (long)(_s)) / sizeof((_s)->ring[0])))
84 union __name##_sring_entry ring[1]; /* variable-length */ \
182 (&((_r)->sring->ring[((_idx) & (RING_SIZE(_r) - 1))].req))
199 (&((_r)->sring->ring[((_idx) & (RING_SIZE(_r) - 1))].rsp))
/linux-4.4.14/drivers/net/wireless/ath/ath10k/
Dhtt_tx.c314 struct htt_rx_ring_setup_ring *ring; in ath10k_htt_send_rx_ring_cfg_ll() local
329 + (sizeof(*ring) * num_rx_ring); in ath10k_htt_send_rx_ring_cfg_ll()
337 ring = &cmd->rx_setup.rings[0]; in ath10k_htt_send_rx_ring_cfg_ll()
363 ring->fw_idx_shadow_reg_paddr = in ath10k_htt_send_rx_ring_cfg_ll()
365 ring->rx_ring_base_paddr = __cpu_to_le32(htt->rx_ring.base_paddr); in ath10k_htt_send_rx_ring_cfg_ll()
366 ring->rx_ring_len = __cpu_to_le16(htt->rx_ring.size); in ath10k_htt_send_rx_ring_cfg_ll()
367 ring->rx_ring_bufsize = __cpu_to_le16(HTT_RX_BUF_SIZE); in ath10k_htt_send_rx_ring_cfg_ll()
368 ring->flags = __cpu_to_le16(flags); in ath10k_htt_send_rx_ring_cfg_ll()
369 ring->fw_idx_init_val = __cpu_to_le16(fw_idx); in ath10k_htt_send_rx_ring_cfg_ll()
373 ring->mac80211_hdr_offset = __cpu_to_le16(desc_offset(rx_hdr_status)); in ath10k_htt_send_rx_ring_cfg_ll()
[all …]
/linux-4.4.14/drivers/net/ethernet/intel/igb/
Digb.h225 struct igb_ring *ring; /* pointer to linked list of rings */ member
288 struct igb_ring ring[0] ____cacheline_internodealigned_in_smp; member
315 static inline int igb_desc_unused(struct igb_ring *ring) in igb_desc_unused() argument
317 if (ring->next_to_clean > ring->next_to_use) in igb_desc_unused()
318 return ring->next_to_clean - ring->next_to_use - 1; in igb_desc_unused()
320 return ring->count + ring->next_to_clean - ring->next_to_use - 1; in igb_desc_unused()
/linux-4.4.14/drivers/net/wireless/realtek/rtl818x/rtl8180/
Ddev.c345 struct rtl8180_tx_ring *ring = &priv->tx_ring[prio]; in rtl8180_handle_tx() local
347 while (skb_queue_len(&ring->queue)) { in rtl8180_handle_tx()
348 struct rtl8180_tx_desc *entry = &ring->desc[ring->idx]; in rtl8180_handle_tx()
356 ring->idx = (ring->idx + 1) % ring->entries; in rtl8180_handle_tx()
357 skb = __skb_dequeue(&ring->queue); in rtl8180_handle_tx()
371 if (ring->entries - skb_queue_len(&ring->queue) == 2) in rtl8180_handle_tx()
461 struct rtl8180_tx_ring *ring; in rtl8180_tx() local
474 ring = &priv->tx_ring[prio]; in rtl8180_tx()
544 idx = (ring->idx + skb_queue_len(&ring->queue)) % ring->entries; in rtl8180_tx()
545 entry = &ring->desc[idx]; in rtl8180_tx()
[all …]
/linux-4.4.14/drivers/crypto/caam/
Dctrl.c301 int ring; in caam_remove() local
308 for (ring = 0; ring < ctrlpriv->total_jobrs; ring++) { in caam_remove()
309 if (ctrlpriv->jrpdev[ring]) in caam_remove()
310 of_device_unregister(ctrlpriv->jrpdev[ring]); in caam_remove()
412 int ret, ring, rspec, gen_sk, ent_delay = RTSDCTL_ENT_DLY_MIN; in caam_probe() local
592 ring = 0; in caam_probe()
597 ctrlpriv->jrpdev[ring] = in caam_probe()
599 if (!ctrlpriv->jrpdev[ring]) { in caam_probe()
601 ring); in caam_probe()
604 ctrlpriv->jr[ring] = (struct caam_job_ring __force *) in caam_probe()
[all …]
/linux-4.4.14/drivers/scsi/
Dxen-scsifront.c110 struct vscsiif_front_ring ring; member
177 struct vscsiif_front_ring *ring = &(info->ring); in scsifront_pre_req() local
185 ring_req = RING_GET_REQUEST(&(info->ring), ring->req_prod_pvt); in scsifront_pre_req()
187 ring->req_prod_pvt++; in scsifront_pre_req()
196 struct vscsiif_front_ring *ring = &(info->ring); in scsifront_do_request() local
199 RING_PUSH_REQUESTS_AND_CHECK_NOTIFY(ring, notify); in scsifront_do_request()
303 rp = info->ring.sring->rsp_prod; in scsifront_ring_drain()
305 for (i = info->ring.rsp_cons; i != rp; i++) { in scsifront_ring_drain()
306 ring_rsp = RING_GET_RESPONSE(&info->ring, i); in scsifront_ring_drain()
310 info->ring.rsp_cons = i; in scsifront_ring_drain()
[all …]

123